VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/solaris/memobj-r0drv-solaris.c

Last change on this file was 100359, checked in by vboxsync, 12 months ago

Runtime/RTR0MemObj*: Add PhysHighest parameter to RTR0MemObjAllocCont to indicate the maximum allowed physical address for an allocation, bugref:10457 [Solaris build fix]

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 43.1 KB
Line 
1/* $Id: memobj-r0drv-solaris.c 100359 2023-07-04 07:07:15Z vboxsync $ */
2/** @file
3 * IPRT - Ring-0 Memory Objects, Solaris.
4 */
5
6/*
7 * Copyright (C) 2006-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * The contents of this file may alternatively be used under the terms
26 * of the Common Development and Distribution License Version 1.0
27 * (CDDL), a copy of it is provided in the "COPYING.CDDL" file included
28 * in the VirtualBox distribution, in which case the provisions of the
29 * CDDL are applicable instead of those of the GPL.
30 *
31 * You may elect to license modified versions of this file under the
32 * terms and conditions of either the GPL or the CDDL or both.
33 *
34 * SPDX-License-Identifier: GPL-3.0-only OR CDDL-1.0
35 */
36
37
38/*********************************************************************************************************************************
39* Header Files *
40*********************************************************************************************************************************/
41#include "the-solaris-kernel.h"
42#include "internal/iprt.h"
43#include <iprt/memobj.h>
44
45#include <iprt/asm.h>
46#include <iprt/assert.h>
47#include <iprt/err.h>
48#include <iprt/log.h>
49#include <iprt/mem.h>
50#include <iprt/param.h>
51#include <iprt/process.h>
52#include "internal/memobj.h"
53#include "memobj-r0drv-solaris.h"
54
55
56/*********************************************************************************************************************************
57* Defined Constants And Macros *
58*********************************************************************************************************************************/
59#define SOL_IS_KRNL_ADDR(vx) ((uintptr_t)(vx) >= kernelbase)
60
61
62/*********************************************************************************************************************************
63* Structures and Typedefs *
64*********************************************************************************************************************************/
65/**
66 * The Solaris version of the memory object structure.
67 */
68typedef struct RTR0MEMOBJSOL
69{
70 /** The core structure. */
71 RTR0MEMOBJINTERNAL Core;
72 /** Pointer to kernel memory cookie. */
73 ddi_umem_cookie_t Cookie;
74 /** Shadow locked pages. */
75 void *pvHandle;
76 /** Access during locking. */
77 int fAccess;
78 /** Set if large pages are involved in an RTR0MEMOBJTYPE_PHYS allocation. */
79 bool fLargePage;
80 /** Whether we have individual pages or a kernel-mapped virtual memory
81 * block in an RTR0MEMOBJTYPE_PHYS_NC allocation. */
82 bool fIndivPages;
83 /** Set if executable allocation - only RTR0MEMOBJTYPE_PHYS. */
84 bool fExecutable;
85} RTR0MEMOBJSOL, *PRTR0MEMOBJSOL;
86
87
88/*********************************************************************************************************************************
89* Global Variables *
90*********************************************************************************************************************************/
91static vnode_t g_PageVnode;
92static kmutex_t g_OffsetMtx;
93static u_offset_t g_offPage;
94
95static vnode_t g_LargePageVnode;
96static kmutex_t g_LargePageOffsetMtx;
97static u_offset_t g_offLargePage;
98static bool g_fLargePageNoReloc;
99
100
101/**
102 * Returns the physical address for a virtual address.
103 *
104 * @param pv The virtual address.
105 *
106 * @returns The physical address corresponding to @a pv.
107 */
108static uint64_t rtR0MemObjSolVirtToPhys(void *pv)
109{
110 struct hat *pHat = NULL;
111 pfn_t PageFrameNum = 0;
112 uintptr_t uVirtAddr = (uintptr_t)pv;
113
114 if (SOL_IS_KRNL_ADDR(pv))
115 pHat = kas.a_hat;
116 else
117 {
118 proc_t *pProcess = (proc_t *)RTR0ProcHandleSelf();
119 AssertRelease(pProcess);
120 pHat = pProcess->p_as->a_hat;
121 }
122
123 PageFrameNum = hat_getpfnum(pHat, (caddr_t)(uVirtAddr & PAGEMASK));
124 AssertReleaseMsg(PageFrameNum != PFN_INVALID, ("rtR0MemObjSolVirtToPhys failed. pv=%p\n", pv));
125 return (((uint64_t)PageFrameNum << PAGE_SHIFT) | (uVirtAddr & PAGE_OFFSET_MASK));
126}
127
128
129/**
130 * Returns the physical address for a page.
131 *
132 * @param pPage Pointer to the page.
133 *
134 * @returns The physical address for a page.
135 */
136static inline uint64_t rtR0MemObjSolPagePhys(page_t *pPage)
137{
138 AssertPtr(pPage);
139 pfn_t PageFrameNum = page_pptonum(pPage);
140 AssertReleaseMsg(PageFrameNum != PFN_INVALID, ("rtR0MemObjSolPagePhys failed pPage=%p\n"));
141 return (uint64_t)PageFrameNum << PAGE_SHIFT;
142}
143
144
145/**
146 * Allocates one page.
147 *
148 * @param virtAddr The virtual address to which this page maybe mapped in
149 * the future.
150 *
151 * @returns Pointer to the allocated page, NULL on failure.
152 */
153static page_t *rtR0MemObjSolPageAlloc(caddr_t virtAddr)
154{
155 u_offset_t offPage;
156 seg_t KernelSeg;
157
158 /*
159 * 16777215 terabytes of total memory for all VMs or
160 * restart 8000 1GB VMs 2147483 times until wraparound!
161 */
162 mutex_enter(&g_OffsetMtx);
163 AssertCompileSize(u_offset_t, sizeof(uint64_t)); NOREF(RTASSERTVAR);
164 g_offPage = RT_ALIGN_64(g_offPage, PAGE_SIZE) + PAGE_SIZE;
165 offPage = g_offPage;
166 mutex_exit(&g_OffsetMtx);
167
168 KernelSeg.s_as = &kas;
169 page_t *pPage = page_create_va(&g_PageVnode, offPage, PAGE_SIZE, PG_WAIT | PG_NORELOC, &KernelSeg, virtAddr);
170 if (RT_LIKELY(pPage))
171 {
172 /*
173 * Lock this page into memory "long term" to prevent this page from being paged out
174 * when we drop the page lock temporarily (during free). Downgrade to a shared lock
175 * to prevent page relocation.
176 */
177 page_pp_lock(pPage, 0 /* COW */, 1 /* Kernel */);
178 page_io_unlock(pPage);
179 page_downgrade(pPage);
180 Assert(PAGE_LOCKED_SE(pPage, SE_SHARED));
181 }
182
183 return pPage;
184}
185
186
187/**
188 * Destroys an allocated page.
189 *
190 * @param pPage Pointer to the page to be destroyed.
191 * @remarks This function expects page in @c pPage to be shared locked.
192 */
193static void rtR0MemObjSolPageDestroy(page_t *pPage)
194{
195 /*
196 * We need to exclusive lock the pages before freeing them, if upgrading the shared lock to exclusive fails,
197 * drop the page lock and look it up from the hash. Record the page offset before we drop the page lock as
198 * we cannot touch any page_t members once the lock is dropped.
199 */
200 AssertPtr(pPage);
201 Assert(PAGE_LOCKED_SE(pPage, SE_SHARED));
202
203 u_offset_t offPage = pPage->p_offset;
204 int rc = page_tryupgrade(pPage);
205 if (!rc)
206 {
207 page_unlock(pPage);
208 page_t *pFoundPage = page_lookup(&g_PageVnode, offPage, SE_EXCL);
209
210 /*
211 * Since we allocated the pages as PG_NORELOC we should only get back the exact page always.
212 */
213 AssertReleaseMsg(pFoundPage == pPage, ("Page lookup failed %p:%llx returned %p, expected %p\n",
214 &g_PageVnode, offPage, pFoundPage, pPage));
215 }
216 Assert(PAGE_LOCKED_SE(pPage, SE_EXCL));
217 page_pp_unlock(pPage, 0 /* COW */, 1 /* Kernel */);
218 page_destroy(pPage, 0 /* move it to the free list */);
219}
220
221
222/* Currently not used on 32-bits, define it to shut up gcc. */
223#if HC_ARCH_BITS == 64
224/**
225 * Allocates physical, non-contiguous memory of pages.
226 *
227 * @param puPhys Where to store the physical address of first page. Optional,
228 * can be NULL.
229 * @param cb The size of the allocation.
230 *
231 * @return Array of allocated pages, NULL on failure.
232 */
233static page_t **rtR0MemObjSolPagesAlloc(uint64_t *puPhys, size_t cb)
234{
235 /*
236 * VM1:
237 * The page freelist and cachelist both hold pages that are not mapped into any address space.
238 * The cachelist is not really free pages but when memory is exhausted they'll be moved to the
239 * free lists, it's the total of the free+cache list that we see on the 'free' column in vmstat.
240 *
241 * VM2:
242 * @todo Document what happens behind the scenes in VM2 regarding the free and cachelist.
243 */
244
245 /*
246 * Non-pageable memory reservation request for _4K pages, don't sleep.
247 */
248 size_t cPages = (cb + PAGE_SIZE - 1) >> PAGE_SHIFT;
249 int rc = page_resv(cPages, KM_NOSLEEP);
250 if (rc)
251 {
252 size_t cbPages = cPages * sizeof(page_t *);
253 page_t **ppPages = kmem_zalloc(cbPages, KM_SLEEP);
254 if (RT_LIKELY(ppPages))
255 {
256 /*
257 * Get pages from kseg, the 'virtAddr' here is only for colouring but unfortunately
258 * we don't yet have the 'virtAddr' to which this memory may be mapped.
259 */
260 caddr_t virtAddr = 0;
261 for (size_t i = 0; i < cPages; i++, virtAddr += PAGE_SIZE)
262 {
263 /*
264 * Get a page from the free list locked exclusively. The page will be named (hashed in)
265 * and we rely on it during free. The page we get will be shared locked to prevent the page
266 * from being relocated.
267 */
268 page_t *pPage = rtR0MemObjSolPageAlloc(virtAddr);
269 if (RT_UNLIKELY(!pPage))
270 {
271 /*
272 * No page found, release whatever pages we grabbed so far.
273 */
274 for (size_t k = 0; k < i; k++)
275 rtR0MemObjSolPageDestroy(ppPages[k]);
276 kmem_free(ppPages, cbPages);
277 page_unresv(cPages);
278 return NULL;
279 }
280
281 ppPages[i] = pPage;
282 }
283
284 if (puPhys)
285 *puPhys = rtR0MemObjSolPagePhys(ppPages[0]);
286 return ppPages;
287 }
288
289 page_unresv(cPages);
290 }
291
292 return NULL;
293}
294#endif /* HC_ARCH_BITS == 64 */
295
296
297/**
298 * Frees the allocates pages.
299 *
300 * @param ppPages Pointer to the page list.
301 * @param cbPages Size of the allocation.
302 */
303static void rtR0MemObjSolPagesFree(page_t **ppPages, size_t cb)
304{
305 size_t cPages = (cb + PAGE_SIZE - 1) >> PAGE_SHIFT;
306 size_t cbPages = cPages * sizeof(page_t *);
307 for (size_t iPage = 0; iPage < cPages; iPage++)
308 rtR0MemObjSolPageDestroy(ppPages[iPage]);
309
310 kmem_free(ppPages, cbPages);
311 page_unresv(cPages);
312}
313
314
315/**
316 * Allocates one large page.
317 *
318 * @param puPhys Where to store the physical address of the allocated
319 * page. Optional, can be NULL.
320 * @param cbLargePage Size of the large page.
321 *
322 * @returns Pointer to a list of pages that cover the large page, NULL on
323 * failure.
324 */
325static page_t **rtR0MemObjSolLargePageAlloc(uint64_t *puPhys, size_t cbLargePage)
326{
327 /*
328 * Check PG_NORELOC support for large pages. Using this helps prevent _1G page
329 * fragementation on systems that support it.
330 */
331 static bool fPageNoRelocChecked = false;
332 if (fPageNoRelocChecked == false)
333 {
334 fPageNoRelocChecked = true;
335 g_fLargePageNoReloc = false;
336 if ( g_pfnrtR0Sol_page_noreloc_supported
337 && g_pfnrtR0Sol_page_noreloc_supported(cbLargePage))
338 {
339 g_fLargePageNoReloc = true;
340 }
341 }
342
343 /*
344 * Non-pageable memory reservation request for _4K pages, don't sleep.
345 */
346 size_t cPages = (cbLargePage + PAGE_SIZE - 1) >> PAGE_SHIFT;
347 size_t cbPages = cPages * sizeof(page_t *);
348 u_offset_t offPage = 0;
349 int rc = page_resv(cPages, KM_NOSLEEP);
350 if (rc)
351 {
352 page_t **ppPages = kmem_zalloc(cbPages, KM_SLEEP);
353 if (RT_LIKELY(ppPages))
354 {
355 mutex_enter(&g_LargePageOffsetMtx);
356 AssertCompileSize(u_offset_t, sizeof(uint64_t)); NOREF(RTASSERTVAR);
357 g_offLargePage = RT_ALIGN_64(g_offLargePage, cbLargePage) + cbLargePage;
358 offPage = g_offLargePage;
359 mutex_exit(&g_LargePageOffsetMtx);
360
361 seg_t KernelSeg;
362 KernelSeg.s_as = &kas;
363 page_t *pRootPage = page_create_va_large(&g_LargePageVnode, offPage, cbLargePage,
364 PG_EXCL | (g_fLargePageNoReloc ? PG_NORELOC : 0), &KernelSeg,
365 0 /* vaddr */,NULL /* locality group */);
366 if (pRootPage)
367 {
368 /*
369 * Split it into sub-pages, downgrade each page to a shared lock to prevent page relocation.
370 */
371 page_t *pPageList = pRootPage;
372 for (size_t iPage = 0; iPage < cPages; iPage++)
373 {
374 page_t *pPage = pPageList;
375 AssertPtr(pPage);
376 AssertMsg(page_pptonum(pPage) == iPage + page_pptonum(pRootPage),
377 ("%p:%lx %lx+%lx\n", pPage, page_pptonum(pPage), iPage, page_pptonum(pRootPage)));
378 AssertMsg(pPage->p_szc == pRootPage->p_szc, ("Size code mismatch %p %d %d\n", pPage,
379 (int)pPage->p_szc, (int)pRootPage->p_szc));
380
381 /*
382 * Lock the page into memory "long term". This prevents callers of page_try_demote_pages() (such as the
383 * pageout scanner) from demoting the large page into smaller pages while we temporarily release the
384 * exclusive lock (during free). We pass "0, 1" since we've already accounted for availrmem during
385 * page_resv().
386 */
387 page_pp_lock(pPage, 0 /* COW */, 1 /* Kernel */);
388
389 page_sub(&pPageList, pPage);
390 page_io_unlock(pPage);
391 page_downgrade(pPage);
392 Assert(PAGE_LOCKED_SE(pPage, SE_SHARED));
393
394 ppPages[iPage] = pPage;
395 }
396 Assert(pPageList == NULL);
397 Assert(ppPages[0] == pRootPage);
398
399 uint64_t uPhys = rtR0MemObjSolPagePhys(pRootPage);
400 AssertMsg(!(uPhys & (cbLargePage - 1)), ("%llx %zx\n", uPhys, cbLargePage));
401 if (puPhys)
402 *puPhys = uPhys;
403 return ppPages;
404 }
405
406 /*
407 * Don't restore offPrev in case of failure (race condition), we have plenty of offset space.
408 * The offset must be unique (for the same vnode) or we'll encounter panics on page_create_va_large().
409 */
410 kmem_free(ppPages, cbPages);
411 }
412
413 page_unresv(cPages);
414 }
415 return NULL;
416}
417
418
419/**
420 * Frees the large page.
421 *
422 * @param ppPages Pointer to the list of small pages that cover the
423 * large page.
424 * @param cbLargePage Size of the allocation (i.e. size of the large
425 * page).
426 */
427static void rtR0MemObjSolLargePageFree(page_t **ppPages, size_t cbLargePage)
428{
429 Assert(ppPages);
430 Assert(cbLargePage > PAGE_SIZE);
431
432 bool fDemoted = false;
433 size_t cPages = (cbLargePage + PAGE_SIZE - 1) >> PAGE_SHIFT;
434 size_t cbPages = cPages * sizeof(page_t *);
435 page_t *pPageList = ppPages[0];
436
437 for (size_t iPage = 0; iPage < cPages; iPage++)
438 {
439 /*
440 * We need the pages exclusively locked, try upgrading the shared lock.
441 * If it fails, drop the shared page lock (cannot access any page_t members once this is done)
442 * and lookup the page from the page hash locking it exclusively.
443 */
444 page_t *pPage = ppPages[iPage];
445 u_offset_t offPage = pPage->p_offset;
446 int rc = page_tryupgrade(pPage);
447 if (!rc)
448 {
449 page_unlock(pPage);
450 page_t *pFoundPage = page_lookup(&g_LargePageVnode, offPage, SE_EXCL);
451 AssertRelease(pFoundPage);
452
453 if (g_fLargePageNoReloc)
454 {
455 /*
456 * This can only be guaranteed if PG_NORELOC is used while allocating the pages.
457 */
458 AssertReleaseMsg(pFoundPage == pPage,
459 ("lookup failed %p:%llu returned %p, expected %p\n", &g_LargePageVnode, offPage,
460 pFoundPage, pPage));
461 }
462
463 /*
464 * Check for page demotion (regardless of relocation). Some places in Solaris (e.g. VM1 page_retire())
465 * could possibly demote the large page to _4K pages between our call to page_unlock() and page_lookup().
466 */
467 if (page_get_pagecnt(pFoundPage->p_szc) == 1) /* Base size of only _4K associated with this page. */
468 fDemoted = true;
469 pPage = pFoundPage;
470 ppPages[iPage] = pFoundPage;
471 }
472 Assert(PAGE_LOCKED_SE(pPage, SE_EXCL));
473 page_pp_unlock(pPage, 0 /* COW */, 1 /* Kernel */);
474 }
475
476 if (fDemoted)
477 {
478 for (size_t iPage = 0; iPage < cPages; iPage++)
479 {
480 Assert(page_get_pagecnt(ppPages[iPage]->p_szc) == 1);
481 page_destroy(ppPages[iPage], 0 /* move it to the free list */);
482 }
483 }
484 else
485 {
486 /*
487 * Although we shred the adjacent pages in the linked list, page_destroy_pages works on
488 * adjacent pages via array increments. So this does indeed free all the pages.
489 */
490 AssertPtr(pPageList);
491 page_destroy_pages(pPageList);
492 }
493 kmem_free(ppPages, cbPages);
494 page_unresv(cPages);
495}
496
497
498/**
499 * Unmaps kernel/user-space mapped memory.
500 *
501 * @param pv Pointer to the mapped memory block.
502 * @param cb Size of the memory block.
503 */
504static void rtR0MemObjSolUnmap(void *pv, size_t cb)
505{
506 if (SOL_IS_KRNL_ADDR(pv))
507 {
508 hat_unload(kas.a_hat, pv, cb, HAT_UNLOAD | HAT_UNLOAD_UNLOCK);
509 vmem_free(heap_arena, pv, cb);
510 }
511 else
512 {
513 struct as *pAddrSpace = ((proc_t *)RTR0ProcHandleSelf())->p_as;
514 AssertPtr(pAddrSpace);
515 as_rangelock(pAddrSpace);
516 as_unmap(pAddrSpace, pv, cb);
517 as_rangeunlock(pAddrSpace);
518 }
519}
520
521
522/**
523 * Lock down memory mappings for a virtual address.
524 *
525 * @param pv Pointer to the memory to lock down.
526 * @param cb Size of the memory block.
527 * @param fAccess Page access rights (S_READ, S_WRITE, S_EXEC)
528 *
529 * @returns IPRT status code.
530 */
531static int rtR0MemObjSolLock(void *pv, size_t cb, int fPageAccess)
532{
533 /*
534 * Kernel memory mappings on x86/amd64 are always locked, only handle user-space memory.
535 */
536 if (!SOL_IS_KRNL_ADDR(pv))
537 {
538 proc_t *pProc = (proc_t *)RTR0ProcHandleSelf();
539 AssertPtr(pProc);
540 faultcode_t rc = as_fault(pProc->p_as->a_hat, pProc->p_as, (caddr_t)pv, cb, F_SOFTLOCK, fPageAccess);
541 if (rc)
542 {
543 LogRel(("rtR0MemObjSolLock failed for pv=%pv cb=%lx fPageAccess=%d rc=%d\n", pv, cb, fPageAccess, rc));
544 return VERR_LOCK_FAILED;
545 }
546 }
547 return VINF_SUCCESS;
548}
549
550
551/**
552 * Unlock memory mappings for a virtual address.
553 *
554 * @param pv Pointer to the locked memory.
555 * @param cb Size of the memory block.
556 * @param fPageAccess Page access rights (S_READ, S_WRITE, S_EXEC).
557 */
558static void rtR0MemObjSolUnlock(void *pv, size_t cb, int fPageAccess)
559{
560 if (!SOL_IS_KRNL_ADDR(pv))
561 {
562 proc_t *pProcess = (proc_t *)RTR0ProcHandleSelf();
563 AssertPtr(pProcess);
564 as_fault(pProcess->p_as->a_hat, pProcess->p_as, (caddr_t)pv, cb, F_SOFTUNLOCK, fPageAccess);
565 }
566}
567
568
569/**
570 * Maps a list of physical pages into user address space.
571 *
572 * @param pVirtAddr Where to store the virtual address of the mapping.
573 * @param fPageAccess Page access rights (PROT_READ, PROT_WRITE,
574 * PROT_EXEC)
575 * @param paPhysAddrs Array of physical addresses to pages.
576 * @param cb Size of memory being mapped.
577 *
578 * @returns IPRT status code.
579 */
580static int rtR0MemObjSolUserMap(caddr_t *pVirtAddr, unsigned fPageAccess, uint64_t *paPhysAddrs, size_t cb, size_t cbPageSize)
581{
582 struct as *pAddrSpace = ((proc_t *)RTR0ProcHandleSelf())->p_as;
583 int rc;
584 SEGVBOX_CRARGS Args;
585
586 Args.paPhysAddrs = paPhysAddrs;
587 Args.fPageAccess = fPageAccess;
588 Args.cbPageSize = cbPageSize;
589
590 as_rangelock(pAddrSpace);
591 if (g_frtSolOldMapAddr)
592 g_rtSolMapAddr.u.pfnSol_map_addr_old(pVirtAddr, cb, 0 /* offset */, 0 /* vacalign */, MAP_SHARED);
593 else
594 g_rtSolMapAddr.u.pfnSol_map_addr(pVirtAddr, cb, 0 /* offset */, MAP_SHARED);
595 if (*pVirtAddr != NULL)
596 rc = as_map(pAddrSpace, *pVirtAddr, cb, rtR0SegVBoxSolCreate, &Args);
597 else
598 rc = ENOMEM;
599 as_rangeunlock(pAddrSpace);
600
601 return RTErrConvertFromErrno(rc);
602}
603
604
605DECLHIDDEN(int) rtR0MemObjNativeFree(RTR0MEMOBJ pMem)
606{
607 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)pMem;
608
609 switch (pMemSolaris->Core.enmType)
610 {
611 case RTR0MEMOBJTYPE_LOW:
612 rtR0SolMemFree(pMemSolaris->Core.pv, pMemSolaris->Core.cb);
613 break;
614
615 case RTR0MEMOBJTYPE_PHYS:
616 if (pMemSolaris->Core.u.Phys.fAllocated)
617 {
618 if (pMemSolaris->fLargePage)
619 rtR0MemObjSolLargePageFree(pMemSolaris->pvHandle, pMemSolaris->Core.cb);
620 else
621 rtR0SolMemFree(pMemSolaris->Core.pv, pMemSolaris->Core.cb);
622 }
623 break;
624
625 case RTR0MEMOBJTYPE_PHYS_NC:
626 if (pMemSolaris->fIndivPages)
627 rtR0MemObjSolPagesFree(pMemSolaris->pvHandle, pMemSolaris->Core.cb);
628 else
629 rtR0SolMemFree(pMemSolaris->Core.pv, pMemSolaris->Core.cb);
630 break;
631
632 case RTR0MEMOBJTYPE_PAGE:
633 if (!pMemSolaris->fExecutable)
634 ddi_umem_free(pMemSolaris->Cookie);
635 else
636 segkmem_free(heaptext_arena, pMemSolaris->Core.pv, pMemSolaris->Core.cb);
637 break;
638
639 case RTR0MEMOBJTYPE_LOCK:
640 rtR0MemObjSolUnlock(pMemSolaris->Core.pv, pMemSolaris->Core.cb, pMemSolaris->fAccess);
641 break;
642
643 case RTR0MEMOBJTYPE_MAPPING:
644 rtR0MemObjSolUnmap(pMemSolaris->Core.pv, pMemSolaris->Core.cb);
645 break;
646
647 case RTR0MEMOBJTYPE_RES_VIRT:
648 if (pMemSolaris->Core.u.ResVirt.R0Process == NIL_RTR0PROCESS)
649 vmem_xfree(heap_arena, pMemSolaris->Core.pv, pMemSolaris->Core.cb);
650 else
651 AssertFailed();
652 break;
653
654 case RTR0MEMOBJTYPE_CONT: /* we don't use this type here. */
655 default:
656 AssertMsgFailed(("enmType=%d\n", pMemSolaris->Core.enmType));
657 return VERR_INTERNAL_ERROR;
658 }
659
660 return VINF_SUCCESS;
661}
662
663
664DECLHIDDEN(int) rtR0MemObjNativeAllocPage(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable, const char *pszTag)
665{
666 /* Create the object. */
667 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_PAGE, NULL, cb, pszTag);
668 if (pMemSolaris)
669 {
670 void *pvMem;
671 if (!fExecutable)
672 {
673 pMemSolaris->Core.fFlags |= RTR0MEMOBJ_FLAGS_ZERO_AT_ALLOC;
674 pvMem = ddi_umem_alloc(cb, DDI_UMEM_SLEEP, &pMemSolaris->Cookie);
675 }
676 else
677 {
678 pMemSolaris->Core.fFlags |= RTR0MEMOBJ_FLAGS_UNINITIALIZED_AT_ALLOC; /** @todo does segkmem_alloc zero the memory? */
679 pvMem = segkmem_alloc(heaptext_arena, cb, KM_SLEEP);
680 }
681 if (pvMem)
682 {
683 pMemSolaris->Core.pv = pvMem;
684 pMemSolaris->pvHandle = NULL;
685 pMemSolaris->fExecutable = fExecutable;
686 *ppMem = &pMemSolaris->Core;
687 return VINF_SUCCESS;
688 }
689 rtR0MemObjDelete(&pMemSolaris->Core);
690 return VERR_NO_PAGE_MEMORY;
691 }
692 return VERR_NO_MEMORY;
693}
694
695
696DECLHIDDEN(int) rtR0MemObjNativeAllocLarge(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, size_t cbLargePage, uint32_t fFlags,
697 const char *pszTag)
698{
699 return rtR0MemObjFallbackAllocLarge(ppMem, cb, cbLargePage, fFlags, pszTag);
700}
701
702
703DECLHIDDEN(int) rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable, const char *pszTag)
704{
705 AssertReturn(!fExecutable, VERR_NOT_SUPPORTED);
706
707 /* Create the object */
708 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_LOW, NULL, cb, pszTag);
709 if (pMemSolaris)
710 {
711 /* Allocate physically low page-aligned memory. */
712 uint64_t uPhysHi = _4G - 1;
713 void *pvMem = rtR0SolMemAlloc(uPhysHi, NULL /* puPhys */, cb, PAGE_SIZE, false /* fContig */);
714 if (pvMem)
715 {
716 pMemSolaris->Core.fFlags |= RTR0MEMOBJ_FLAGS_UNINITIALIZED_AT_ALLOC;
717 pMemSolaris->Core.pv = pvMem;
718 pMemSolaris->pvHandle = NULL;
719 *ppMem = &pMemSolaris->Core;
720 return VINF_SUCCESS;
721 }
722 rtR0MemObjDelete(&pMemSolaris->Core);
723 return VERR_NO_LOW_MEMORY;
724 }
725 return VERR_NO_MEMORY;
726}
727
728
729DECLHIDDEN(int) rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest,
730 bool fExecutable, const char *pszTag)
731{
732 AssertReturn(!fExecutable, VERR_NOT_SUPPORTED);
733 return rtR0MemObjNativeAllocPhys(ppMem, cb, PhysHighest, PAGE_SIZE /* alignment */, pszTag);
734}
735
736
737DECLHIDDEN(int) rtR0MemObjNativeAllocPhysNC(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest, const char *pszTag)
738{
739#if HC_ARCH_BITS == 64
740 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_PHYS_NC, NULL, cb, pszTag);
741 if (pMemSolaris)
742 {
743 if (PhysHighest == NIL_RTHCPHYS)
744 {
745 uint64_t PhysAddr = UINT64_MAX;
746 void *pvPages = rtR0MemObjSolPagesAlloc(&PhysAddr, cb);
747 if (!pvPages)
748 {
749 LogRel(("rtR0MemObjNativeAllocPhysNC: rtR0MemObjSolPagesAlloc failed for cb=%u.\n", cb));
750 rtR0MemObjDelete(&pMemSolaris->Core);
751 return VERR_NO_MEMORY;
752 }
753 Assert(PhysAddr != UINT64_MAX);
754 Assert(!(PhysAddr & PAGE_OFFSET_MASK));
755
756 pMemSolaris->Core.pv = NULL;
757 pMemSolaris->pvHandle = pvPages;
758 pMemSolaris->fIndivPages = true;
759 }
760 else
761 {
762 /*
763 * If we must satisfy an upper limit constraint, it isn't feasible to grab individual pages.
764 * We fall back to using contig_alloc().
765 */
766 uint64_t PhysAddr = UINT64_MAX;
767 void *pvMem = rtR0SolMemAlloc(PhysHighest, &PhysAddr, cb, PAGE_SIZE, false /* fContig */);
768 if (!pvMem)
769 {
770 LogRel(("rtR0MemObjNativeAllocPhysNC: rtR0SolMemAlloc failed for cb=%u PhysHighest=%RHp.\n", cb, PhysHighest));
771 rtR0MemObjDelete(&pMemSolaris->Core);
772 return VERR_NO_MEMORY;
773 }
774 Assert(PhysAddr != UINT64_MAX);
775 Assert(!(PhysAddr & PAGE_OFFSET_MASK));
776
777 pMemSolaris->Core.pv = pvMem;
778 pMemSolaris->pvHandle = NULL;
779 pMemSolaris->fIndivPages = false;
780 }
781 pMemSolaris->Core.fFlags |= RTR0MEMOBJ_FLAGS_UNINITIALIZED_AT_ALLOC;
782 *ppMem = &pMemSolaris->Core;
783 return VINF_SUCCESS;
784 }
785 return VERR_NO_MEMORY;
786
787#else /* 32 bit: */
788 return VERR_NOT_SUPPORTED; /* see the RTR0MemObjAllocPhysNC specs */
789#endif
790}
791
792
793DECLHIDDEN(int) rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest, size_t uAlignment,
794 const char *pszTag)
795{
796 AssertMsgReturn(PhysHighest >= 16 *_1M, ("PhysHigest=%RHp\n", PhysHighest), VERR_NOT_SUPPORTED);
797
798 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_PHYS, NULL, cb, pszTag);
799 if (RT_UNLIKELY(!pMemSolaris))
800 return VERR_NO_MEMORY;
801
802 /*
803 * Allocating one large page gets special treatment.
804 */
805 static uint32_t s_cbLargePage = UINT32_MAX;
806 if (s_cbLargePage == UINT32_MAX)
807 {
808 if (page_num_pagesizes() > 1)
809 ASMAtomicWriteU32(&s_cbLargePage, page_get_pagesize(1)); /* Page-size code 1 maps to _2M on Solaris x86/amd64. */
810 else
811 ASMAtomicWriteU32(&s_cbLargePage, 0);
812 }
813
814 uint64_t PhysAddr;
815 if ( cb == s_cbLargePage
816 && cb == uAlignment
817 && PhysHighest == NIL_RTHCPHYS)
818 {
819 /*
820 * Allocate one large page (backed by physically contiguous memory).
821 */
822 void *pvPages = rtR0MemObjSolLargePageAlloc(&PhysAddr, cb);
823 if (RT_LIKELY(pvPages))
824 {
825 AssertMsg(!(PhysAddr & (cb - 1)), ("%RHp\n", PhysAddr));
826 pMemSolaris->Core.fFlags |= RTR0MEMOBJ_FLAGS_UNINITIALIZED_AT_ALLOC; /*?*/
827 pMemSolaris->Core.pv = NULL;
828 pMemSolaris->Core.u.Phys.PhysBase = PhysAddr;
829 pMemSolaris->Core.u.Phys.fAllocated = true;
830 pMemSolaris->pvHandle = pvPages;
831 pMemSolaris->fLargePage = true;
832
833 *ppMem = &pMemSolaris->Core;
834 return VINF_SUCCESS;
835 }
836 }
837 else
838 {
839 /*
840 * Allocate physically contiguous memory aligned as specified.
841 */
842 AssertCompile(NIL_RTHCPHYS == UINT64_MAX); NOREF(RTASSERTVAR);
843 PhysAddr = PhysHighest;
844 void *pvMem = rtR0SolMemAlloc(PhysHighest, &PhysAddr, cb, uAlignment, true /* fContig */);
845 if (RT_LIKELY(pvMem))
846 {
847 Assert(!(PhysAddr & PAGE_OFFSET_MASK));
848 Assert(PhysAddr < PhysHighest);
849 Assert(PhysAddr + cb <= PhysHighest);
850
851 pMemSolaris->Core.fFlags |= RTR0MEMOBJ_FLAGS_UNINITIALIZED_AT_ALLOC;
852 pMemSolaris->Core.pv = pvMem;
853 pMemSolaris->Core.u.Phys.PhysBase = PhysAddr;
854 pMemSolaris->Core.u.Phys.fAllocated = true;
855 pMemSolaris->pvHandle = NULL;
856 pMemSolaris->fLargePage = false;
857
858 *ppMem = &pMemSolaris->Core;
859 return VINF_SUCCESS;
860 }
861 }
862 rtR0MemObjDelete(&pMemSolaris->Core);
863 return VERR_NO_CONT_MEMORY;
864}
865
866
867DECLHIDDEN(int) rtR0MemObjNativeEnterPhys(PPRTR0MEMOBJINTERNAL ppMem, RTHCPHYS Phys, size_t cb, uint32_t uCachePolicy,
868 const char *pszTag)
869{
870 AssertReturn(uCachePolicy == RTMEM_CACHE_POLICY_DONT_CARE, VERR_NOT_SUPPORTED);
871
872 /* Create the object. */
873 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_PHYS, NULL, cb, pszTag);
874 if (!pMemSolaris)
875 return VERR_NO_MEMORY;
876
877 /* There is no allocation here, it needs to be mapped somewhere first. */
878 pMemSolaris->Core.u.Phys.fAllocated = false;
879 pMemSolaris->Core.u.Phys.PhysBase = Phys;
880 pMemSolaris->Core.u.Phys.uCachePolicy = uCachePolicy;
881 *ppMem = &pMemSolaris->Core;
882 return VINF_SUCCESS;
883}
884
885
886DECLHIDDEN(int) rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, uint32_t fAccess,
887 RTR0PROCESS R0Process, const char *pszTag)
888{
889 AssertReturn(R0Process == RTR0ProcHandleSelf(), VERR_INVALID_PARAMETER);
890 NOREF(fAccess);
891
892 /* Create the locking object */
893 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_LOCK,
894 (void *)R3Ptr, cb, pszTag);
895 if (!pMemSolaris)
896 return VERR_NO_MEMORY;
897
898 /* Lock down user pages. */
899 int fPageAccess = S_READ;
900 if (fAccess & RTMEM_PROT_WRITE)
901 fPageAccess = S_WRITE;
902 if (fAccess & RTMEM_PROT_EXEC)
903 fPageAccess = S_EXEC;
904 int rc = rtR0MemObjSolLock((void *)R3Ptr, cb, fPageAccess);
905 if (RT_FAILURE(rc))
906 {
907 LogRel(("rtR0MemObjNativeLockUser: rtR0MemObjSolLock failed rc=%d\n", rc));
908 rtR0MemObjDelete(&pMemSolaris->Core);
909 return rc;
910 }
911
912 /* Fill in the object attributes and return successfully. */
913 pMemSolaris->Core.u.Lock.R0Process = R0Process;
914 pMemSolaris->pvHandle = NULL;
915 pMemSolaris->fAccess = fPageAccess;
916 *ppMem = &pMemSolaris->Core;
917 return VINF_SUCCESS;
918}
919
920
921DECLHIDDEN(int) rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess, const char *pszTag)
922{
923 NOREF(fAccess);
924
925 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_LOCK, pv, cb, pszTag);
926 if (!pMemSolaris)
927 return VERR_NO_MEMORY;
928
929 /* Lock down kernel pages. */
930 int fPageAccess = S_READ;
931 if (fAccess & RTMEM_PROT_WRITE)
932 fPageAccess = S_WRITE;
933 if (fAccess & RTMEM_PROT_EXEC)
934 fPageAccess = S_EXEC;
935 int rc = rtR0MemObjSolLock(pv, cb, fPageAccess);
936 if (RT_FAILURE(rc))
937 {
938 LogRel(("rtR0MemObjNativeLockKernel: rtR0MemObjSolLock failed rc=%d\n", rc));
939 rtR0MemObjDelete(&pMemSolaris->Core);
940 return rc;
941 }
942
943 /* Fill in the object attributes and return successfully. */
944 pMemSolaris->Core.u.Lock.R0Process = NIL_RTR0PROCESS;
945 pMemSolaris->pvHandle = NULL;
946 pMemSolaris->fAccess = fPageAccess;
947 *ppMem = &pMemSolaris->Core;
948 return VINF_SUCCESS;
949}
950
951
952DECLHIDDEN(int) rtR0MemObjNativeReserveKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment,
953 const char *pszTag)
954{
955 PRTR0MEMOBJSOL pMemSolaris;
956
957 /*
958 * Use xalloc.
959 */
960 void *pv = vmem_xalloc(heap_arena, cb, uAlignment, 0 /* phase */, 0 /* nocross */,
961 NULL /* minaddr */, NULL /* maxaddr */, VM_SLEEP);
962 if (RT_UNLIKELY(!pv))
963 return VERR_NO_MEMORY;
964
965 /* Create the object. */
966 pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_RES_VIRT, pv, cb, pszTag);
967 if (!pMemSolaris)
968 {
969 LogRel(("rtR0MemObjNativeReserveKernel failed to alloc memory object.\n"));
970 vmem_xfree(heap_arena, pv, cb);
971 return VERR_NO_MEMORY;
972 }
973
974 pMemSolaris->Core.u.ResVirt.R0Process = NIL_RTR0PROCESS;
975 *ppMem = &pMemSolaris->Core;
976 return VINF_SUCCESS;
977}
978
979
980DECLHIDDEN(int) rtR0MemObjNativeReserveUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment,
981 RTR0PROCESS R0Process, const char *pszTag)
982{
983 RT_NOREF(ppMem, R3PtrFixed, cb, uAlignment, R0Process, pszTag);
984 return VERR_NOT_SUPPORTED;
985}
986
987
988DECLHIDDEN(int) rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
989 unsigned fProt, size_t offSub, size_t cbSub, const char *pszTag)
990{
991 /* Fail if requested to do something we can't. */
992 AssertMsgReturn(pvFixed == (void *)-1, ("%p\n", pvFixed), VERR_NOT_SUPPORTED);
993 if (uAlignment > PAGE_SIZE)
994 return VERR_NOT_SUPPORTED;
995
996 /*
997 * Use xalloc to get address space.
998 */
999 if (!cbSub)
1000 cbSub = pMemToMap->cb;
1001 void *pv = vmem_xalloc(heap_arena, cbSub, uAlignment, 0 /* phase */, 0 /* nocross */,
1002 NULL /* minaddr */, NULL /* maxaddr */, VM_SLEEP);
1003 if (RT_UNLIKELY(!pv))
1004 return VERR_MAP_FAILED;
1005
1006 /*
1007 * Load the pages from the other object into it.
1008 */
1009 uint32_t fAttr = HAT_UNORDERED_OK | HAT_MERGING_OK | HAT_LOADCACHING_OK | HAT_STORECACHING_OK;
1010 if (fProt & RTMEM_PROT_READ)
1011 fAttr |= PROT_READ;
1012 if (fProt & RTMEM_PROT_EXEC)
1013 fAttr |= PROT_EXEC;
1014 if (fProt & RTMEM_PROT_WRITE)
1015 fAttr |= PROT_WRITE;
1016 fAttr |= HAT_NOSYNC;
1017
1018 int rc = VINF_SUCCESS;
1019 size_t off = 0;
1020 while (off < cbSub)
1021 {
1022 RTHCPHYS HCPhys = RTR0MemObjGetPagePhysAddr(pMemToMap, (offSub + off) >> PAGE_SHIFT);
1023 AssertBreakStmt(HCPhys != NIL_RTHCPHYS, rc = VERR_INTERNAL_ERROR_2);
1024 pfn_t pfn = HCPhys >> PAGE_SHIFT;
1025 AssertBreakStmt(((RTHCPHYS)pfn << PAGE_SHIFT) == HCPhys, rc = VERR_INTERNAL_ERROR_3);
1026
1027 hat_devload(kas.a_hat, (uint8_t *)pv + off, PAGE_SIZE, pfn, fAttr, HAT_LOAD_LOCK);
1028
1029 /* Advance. */
1030 off += PAGE_SIZE;
1031 }
1032 if (RT_SUCCESS(rc))
1033 {
1034 /*
1035 * Create a memory object for the mapping.
1036 */
1037 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_MAPPING,
1038 pv, cbSub, pszTag);
1039 if (pMemSolaris)
1040 {
1041 pMemSolaris->Core.u.Mapping.R0Process = NIL_RTR0PROCESS;
1042 *ppMem = &pMemSolaris->Core;
1043 return VINF_SUCCESS;
1044 }
1045
1046 LogRel(("rtR0MemObjNativeMapKernel failed to alloc memory object.\n"));
1047 rc = VERR_NO_MEMORY;
1048 }
1049
1050 if (off)
1051 hat_unload(kas.a_hat, pv, off, HAT_UNLOAD | HAT_UNLOAD_UNLOCK);
1052 vmem_xfree(heap_arena, pv, cbSub);
1053 return rc;
1054}
1055
1056
1057DECLHIDDEN(int) rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, PRTR0MEMOBJINTERNAL pMemToMap, RTR3PTR R3PtrFixed,
1058 size_t uAlignment, unsigned fProt, RTR0PROCESS R0Process, size_t offSub, size_t cbSub,
1059 const char *pszTag)
1060{
1061 /*
1062 * Fend off things we cannot do.
1063 */
1064 AssertMsgReturn(R3PtrFixed == (RTR3PTR)-1, ("%p\n", R3PtrFixed), VERR_NOT_SUPPORTED);
1065 AssertMsgReturn(R0Process == RTR0ProcHandleSelf(), ("%p != %p\n", R0Process, RTR0ProcHandleSelf()), VERR_NOT_SUPPORTED);
1066 if (uAlignment != PAGE_SIZE)
1067 return VERR_NOT_SUPPORTED;
1068
1069 /*
1070 * Get parameters from the source object and offSub/cbSub.
1071 */
1072 PRTR0MEMOBJSOL pMemToMapSolaris = (PRTR0MEMOBJSOL)pMemToMap;
1073 uint8_t *pb = pMemToMapSolaris->Core.pv ? (uint8_t *)pMemToMapSolaris->Core.pv + offSub : NULL;
1074 size_t const cb = cbSub ? cbSub : pMemToMapSolaris->Core.cb;
1075 size_t const cPages = cb >> PAGE_SHIFT;
1076 Assert(!offSub || cbSub);
1077 Assert(!(cb & PAGE_OFFSET_MASK));
1078
1079 /*
1080 * Create the mapping object
1081 */
1082 PRTR0MEMOBJSOL pMemSolaris;
1083 pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_MAPPING, pb, cb, pszTag);
1084 if (RT_UNLIKELY(!pMemSolaris))
1085 return VERR_NO_MEMORY;
1086
1087 /*
1088 * Gather the physical page address of the pages to be mapped.
1089 */
1090 int rc = VINF_SUCCESS;
1091 uint64_t *paPhysAddrs = kmem_zalloc(sizeof(uint64_t) * cPages, KM_SLEEP);
1092 if (RT_LIKELY(paPhysAddrs))
1093 {
1094 if ( pMemToMapSolaris->Core.enmType == RTR0MEMOBJTYPE_PHYS_NC
1095 && pMemToMapSolaris->fIndivPages)
1096 {
1097 /* Translate individual page_t to physical addresses. */
1098 page_t **papPages = pMemToMapSolaris->pvHandle;
1099 AssertPtr(papPages);
1100 papPages += offSub >> PAGE_SHIFT;
1101 for (size_t iPage = 0; iPage < cPages; iPage++)
1102 paPhysAddrs[iPage] = rtR0MemObjSolPagePhys(papPages[iPage]);
1103 }
1104 else if ( pMemToMapSolaris->Core.enmType == RTR0MEMOBJTYPE_PHYS
1105 && pMemToMapSolaris->fLargePage)
1106 {
1107 /* Split up the large page into page-sized chunks. */
1108 RTHCPHYS Phys = pMemToMapSolaris->Core.u.Phys.PhysBase;
1109 Phys += offSub;
1110 for (size_t iPage = 0; iPage < cPages; iPage++, Phys += PAGE_SIZE)
1111 paPhysAddrs[iPage] = Phys;
1112 }
1113 else
1114 {
1115 /* Have kernel mapping, just translate virtual to physical. */
1116 AssertPtr(pb);
1117 for (size_t iPage = 0; iPage < cPages; iPage++)
1118 {
1119 paPhysAddrs[iPage] = rtR0MemObjSolVirtToPhys(pb);
1120 if (RT_UNLIKELY(paPhysAddrs[iPage] == -(uint64_t)1))
1121 {
1122 LogRel(("rtR0MemObjNativeMapUser: no page to map.\n"));
1123 rc = VERR_MAP_FAILED;
1124 break;
1125 }
1126 pb += PAGE_SIZE;
1127 }
1128 }
1129 if (RT_SUCCESS(rc))
1130 {
1131 /*
1132 * Perform the actual mapping.
1133 */
1134 unsigned fPageAccess = PROT_READ;
1135 if (fProt & RTMEM_PROT_WRITE)
1136 fPageAccess |= PROT_WRITE;
1137 if (fProt & RTMEM_PROT_EXEC)
1138 fPageAccess |= PROT_EXEC;
1139
1140 caddr_t UserAddr = NULL;
1141 rc = rtR0MemObjSolUserMap(&UserAddr, fPageAccess, paPhysAddrs, cb, PAGE_SIZE);
1142 if (RT_SUCCESS(rc))
1143 {
1144 pMemSolaris->Core.u.Mapping.R0Process = R0Process;
1145 pMemSolaris->Core.pv = UserAddr;
1146
1147 *ppMem = &pMemSolaris->Core;
1148 kmem_free(paPhysAddrs, sizeof(uint64_t) * cPages);
1149 return VINF_SUCCESS;
1150 }
1151
1152 LogRel(("rtR0MemObjNativeMapUser: rtR0MemObjSolUserMap failed rc=%d.\n", rc));
1153 }
1154
1155 rc = VERR_MAP_FAILED;
1156 kmem_free(paPhysAddrs, sizeof(uint64_t) * cPages);
1157 }
1158 else
1159 rc = VERR_NO_MEMORY;
1160 rtR0MemObjDelete(&pMemSolaris->Core);
1161 return rc;
1162}
1163
1164
1165DECLHIDDEN(int) rtR0MemObjNativeProtect(PRTR0MEMOBJINTERNAL pMem, size_t offSub, size_t cbSub, uint32_t fProt)
1166{
1167 NOREF(pMem);
1168 NOREF(offSub);
1169 NOREF(cbSub);
1170 NOREF(fProt);
1171 return VERR_NOT_SUPPORTED;
1172}
1173
1174
1175DECLHIDDEN(RTHCPHYS) rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, size_t iPage)
1176{
1177 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)pMem;
1178
1179 switch (pMemSolaris->Core.enmType)
1180 {
1181 case RTR0MEMOBJTYPE_PHYS_NC:
1182 if ( pMemSolaris->Core.u.Phys.fAllocated
1183 || !pMemSolaris->fIndivPages)
1184 {
1185 uint8_t *pb = (uint8_t *)pMemSolaris->Core.pv + ((size_t)iPage << PAGE_SHIFT);
1186 return rtR0MemObjSolVirtToPhys(pb);
1187 }
1188 page_t **ppPages = pMemSolaris->pvHandle;
1189 return rtR0MemObjSolPagePhys(ppPages[iPage]);
1190
1191 case RTR0MEMOBJTYPE_PAGE:
1192 case RTR0MEMOBJTYPE_LOW:
1193 case RTR0MEMOBJTYPE_LOCK:
1194 {
1195 uint8_t *pb = (uint8_t *)pMemSolaris->Core.pv + ((size_t)iPage << PAGE_SHIFT);
1196 return rtR0MemObjSolVirtToPhys(pb);
1197 }
1198
1199 /*
1200 * Although mapping can be handled by rtR0MemObjSolVirtToPhys(offset) like the above case,
1201 * request it from the parent so that we have a clear distinction between CONT/PHYS_NC.
1202 */
1203 case RTR0MEMOBJTYPE_MAPPING:
1204 return rtR0MemObjNativeGetPagePhysAddr(pMemSolaris->Core.uRel.Child.pParent, iPage);
1205
1206 case RTR0MEMOBJTYPE_CONT:
1207 case RTR0MEMOBJTYPE_PHYS:
1208 AssertFailed(); /* handled by the caller */
1209 case RTR0MEMOBJTYPE_RES_VIRT:
1210 default:
1211 return NIL_RTHCPHYS;
1212 }
1213}
1214
Note: See TracBrowser for help on using the repository browser.

© 2023 Oracle
ContactPrivacy policyTerms of Use