VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/darwin/memobj-r0drv-darwin.cpp

Last change on this file was 100357, checked in by vboxsync, 11 months ago

Runtime/RTR0MemObj*: Add PhysHighest parameter to RTR0MemObjAllocCont to indicate the maximum allowed physical address for an allocation, bugref:10457 [second attempt]

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 61.0 KB
Line 
1/* $Id: memobj-r0drv-darwin.cpp 100357 2023-07-04 07:00:26Z vboxsync $ */
2/** @file
3 * IPRT - Ring-0 Memory Objects, Darwin.
4 */
5
6/*
7 * Copyright (C) 2006-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * The contents of this file may alternatively be used under the terms
26 * of the Common Development and Distribution License Version 1.0
27 * (CDDL), a copy of it is provided in the "COPYING.CDDL" file included
28 * in the VirtualBox distribution, in which case the provisions of the
29 * CDDL are applicable instead of those of the GPL.
30 *
31 * You may elect to license modified versions of this file under the
32 * terms and conditions of either the GPL or the CDDL or both.
33 *
34 * SPDX-License-Identifier: GPL-3.0-only OR CDDL-1.0
35 */
36
37
38/*********************************************************************************************************************************
39* Header Files *
40*********************************************************************************************************************************/
41#define RTMEM_NO_WRAP_TO_EF_APIS /* circular dependency otherwise. */
42#include "the-darwin-kernel.h"
43#include "internal/iprt.h"
44#include <iprt/memobj.h>
45
46#include <iprt/asm.h>
47#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
48# include <iprt/x86.h>
49# include <iprt/asm-amd64-x86.h>
50#endif
51#include <iprt/assert.h>
52#include <iprt/log.h>
53#include <iprt/mem.h>
54#include <iprt/param.h>
55#include <iprt/process.h>
56#include <iprt/semaphore.h>
57#include <iprt/string.h>
58#include <iprt/thread.h>
59#include "internal/memobj.h"
60
61
62/*********************************************************************************************************************************
63* Defined Constants And Macros *
64*********************************************************************************************************************************/
65#define MY_PRINTF(...) do { printf(__VA_ARGS__); kprintf(__VA_ARGS__); } while (0)
66
67/*#define USE_VM_MAP_WIRE - may re-enable later when non-mapped allocations are added. */
68
69
70/*********************************************************************************************************************************
71* Structures and Typedefs *
72*********************************************************************************************************************************/
73/**
74 * The Darwin version of the memory object structure.
75 */
76typedef struct RTR0MEMOBJDARWIN
77{
78 /** The core structure. */
79 RTR0MEMOBJINTERNAL Core;
80 /** Pointer to the memory descriptor created for allocated and locked memory. */
81 IOMemoryDescriptor *pMemDesc;
82 /** Pointer to the memory mapping object for mapped memory. */
83 IOMemoryMap *pMemMap;
84} RTR0MEMOBJDARWIN, *PRTR0MEMOBJDARWIN;
85
86/**
87 * Common thread_call_allocate/thread_call_enter argument package.
88 */
89typedef struct RTR0MEMOBJDARWINTHREADARGS
90{
91 int32_t volatile rc;
92 RTSEMEVENTMULTI hEvent;
93} RTR0MEMOBJDARWINTHREADARGS;
94
95
96/**
97 * Arguments for rtR0MemObjNativeAllockWorkOnKernelThread.
98 */
99typedef struct RTR0MEMOBJDARWINALLOCARGS
100{
101 RTR0MEMOBJDARWINTHREADARGS Core;
102 PPRTR0MEMOBJINTERNAL ppMem;
103 size_t cb;
104 bool fExecutable;
105 bool fContiguous;
106 mach_vm_address_t PhysMask;
107 uint64_t MaxPhysAddr;
108 RTR0MEMOBJTYPE enmType;
109 size_t uAlignment;
110 const char *pszTag;
111} RTR0MEMOBJDARWINALLOCARGS;
112
113/**
114 * Arguments for rtR0MemObjNativeProtectWorkOnKernelThread.
115 */
116typedef struct RTR0MEMOBJDARWINPROTECTARGS
117{
118 RTR0MEMOBJDARWINTHREADARGS Core;
119 PRTR0MEMOBJINTERNAL pMem;
120 size_t offSub;
121 size_t cbSub;
122 uint32_t fProt;
123} RTR0MEMOBJDARWINPROTECTARGS;
124
125
126/*********************************************************************************************************************************
127* Internal Functions *
128*********************************************************************************************************************************/
129static void rtR0MemObjNativeAllockWorkerOnKernelThread(void *pvUser0, void *pvUser1);
130static int rtR0MemObjNativeProtectWorker(PRTR0MEMOBJINTERNAL pMem, size_t offSub, size_t cbSub, uint32_t fProt);
131static void rtR0MemObjNativeProtectWorkerOnKernelThread(void *pvUser0, void *pvUser1);
132
133
134/**
135 * Touch the pages to force the kernel to create or write-enable the page table
136 * entries.
137 *
138 * This is necessary since the kernel gets upset if we take a page fault when
139 * preemption is disabled and/or we own a simple lock (same thing). It has no
140 * problems with us disabling interrupts when taking the traps, weird stuff.
141 *
142 * (This is basically a way of invoking vm_fault on a range of pages.)
143 *
144 * @param pv Pointer to the first page.
145 * @param cb The number of bytes.
146 */
147static void rtR0MemObjDarwinTouchPages(void *pv, size_t cb)
148{
149 uint32_t volatile *pu32 = (uint32_t volatile *)pv;
150 for (;;)
151 {
152 ASMAtomicCmpXchgU32(pu32, 0xdeadbeef, 0xdeadbeef);
153 if (cb <= PAGE_SIZE)
154 break;
155 cb -= PAGE_SIZE;
156 pu32 += PAGE_SIZE / sizeof(uint32_t);
157 }
158}
159
160
161/**
162 * Read (sniff) every page in the range to make sure there are some page tables
163 * entries backing it.
164 *
165 * This is just to be sure vm_protect didn't remove stuff without re-adding it
166 * if someone should try write-protect something.
167 *
168 * @param pv Pointer to the first page.
169 * @param cb The number of bytes.
170 */
171static void rtR0MemObjDarwinSniffPages(void const *pv, size_t cb)
172{
173 uint32_t volatile *pu32 = (uint32_t volatile *)pv;
174 uint32_t volatile u32Counter = 0;
175 for (;;)
176 {
177 u32Counter += *pu32;
178
179 if (cb <= PAGE_SIZE)
180 break;
181 cb -= PAGE_SIZE;
182 pu32 += PAGE_SIZE / sizeof(uint32_t);
183 }
184}
185
186
187/**
188 * Gets the virtual memory map the specified object is mapped into.
189 *
190 * @returns VM map handle on success, NULL if no map.
191 * @param pMem The memory object.
192 */
193DECLINLINE(vm_map_t) rtR0MemObjDarwinGetMap(PRTR0MEMOBJINTERNAL pMem)
194{
195 switch (pMem->enmType)
196 {
197 case RTR0MEMOBJTYPE_PAGE:
198 case RTR0MEMOBJTYPE_LOW:
199 case RTR0MEMOBJTYPE_CONT:
200 return kernel_map;
201
202 case RTR0MEMOBJTYPE_PHYS:
203 case RTR0MEMOBJTYPE_PHYS_NC:
204 if (pMem->pv)
205 return kernel_map;
206 return NULL;
207
208 case RTR0MEMOBJTYPE_LOCK:
209 return pMem->u.Lock.R0Process == NIL_RTR0PROCESS
210 ? kernel_map
211 : get_task_map((task_t)pMem->u.Lock.R0Process);
212
213 case RTR0MEMOBJTYPE_RES_VIRT:
214 return pMem->u.ResVirt.R0Process == NIL_RTR0PROCESS
215 ? kernel_map
216 : get_task_map((task_t)pMem->u.ResVirt.R0Process);
217
218 case RTR0MEMOBJTYPE_MAPPING:
219 return pMem->u.Mapping.R0Process == NIL_RTR0PROCESS
220 ? kernel_map
221 : get_task_map((task_t)pMem->u.Mapping.R0Process);
222
223 default:
224 return NULL;
225 }
226}
227
228#if 0 /* not necessary after all*/
229/* My vm_map mockup. */
230struct my_vm_map
231{
232 struct { char pad[8]; } lock;
233 struct my_vm_map_header
234 {
235 struct vm_map_links
236 {
237 void *prev;
238 void *next;
239 vm_map_offset_t start;
240 vm_map_offset_t end;
241 } links;
242 int nentries;
243 boolean_t entries_pageable;
244 } hdr;
245 pmap_t pmap;
246 vm_map_size_t size;
247};
248
249
250/**
251 * Gets the minimum map address, this is similar to get_map_min.
252 *
253 * @returns The start address of the map.
254 * @param pMap The map.
255 */
256static vm_map_offset_t rtR0MemObjDarwinGetMapMin(vm_map_t pMap)
257{
258 /* lazy discovery of the correct offset. The apple guys is a wonderfully secretive bunch. */
259 static int32_t volatile s_offAdjust = INT32_MAX;
260 int32_t off = s_offAdjust;
261 if (off == INT32_MAX)
262 {
263 for (off = 0; ; off += sizeof(pmap_t))
264 {
265 if (*(pmap_t *)((uint8_t *)kernel_map + off) == kernel_pmap)
266 break;
267 AssertReturn(off <= RT_MAX(RT_OFFSETOF(struct my_vm_map, pmap) * 4, 1024), 0x1000);
268 }
269 ASMAtomicWriteS32(&s_offAdjust, off - RT_OFFSETOF(struct my_vm_map, pmap));
270 }
271
272 /* calculate it. */
273 struct my_vm_map *pMyMap = (struct my_vm_map *)((uint8_t *)pMap + off);
274 return pMyMap->hdr.links.start;
275}
276#endif /* unused */
277
278#ifdef RT_STRICT
279# if 0 /* unused */
280
281/**
282 * Read from a physical page.
283 *
284 * @param HCPhys The address to start reading at.
285 * @param cb How many bytes to read.
286 * @param pvDst Where to put the bytes. This is zero'd on failure.
287 */
288static void rtR0MemObjDarwinReadPhys(RTHCPHYS HCPhys, size_t cb, void *pvDst)
289{
290 memset(pvDst, '\0', cb);
291
292 IOAddressRange aRanges[1] = { { (mach_vm_address_t)HCPhys, RT_ALIGN_Z(cb, PAGE_SIZE) } };
293 IOMemoryDescriptor *pMemDesc = IOMemoryDescriptor::withAddressRanges(&aRanges[0], RT_ELEMENTS(aRanges),
294 kIODirectionIn, NULL /*task*/);
295 if (pMemDesc)
296 {
297#if MAC_OS_X_VERSION_MIN_REQUIRED >= 1050
298 IOMemoryMap *pMemMap = pMemDesc->createMappingInTask(kernel_task, 0, kIOMapAnywhere | kIOMapDefaultCache);
299#else
300 IOMemoryMap *pMemMap = pMemDesc->map(kernel_task, 0, kIOMapAnywhere | kIOMapDefaultCache);
301#endif
302 if (pMemMap)
303 {
304 void const *pvSrc = (void const *)(uintptr_t)pMemMap->getVirtualAddress();
305 memcpy(pvDst, pvSrc, cb);
306 pMemMap->release();
307 }
308 else
309 MY_PRINTF("rtR0MemObjDarwinReadPhys: createMappingInTask failed; HCPhys=%llx\n", HCPhys);
310
311 pMemDesc->release();
312 }
313 else
314 MY_PRINTF("rtR0MemObjDarwinReadPhys: withAddressRanges failed; HCPhys=%llx\n", HCPhys);
315}
316
317
318/**
319 * Gets the PTE for a page.
320 *
321 * @returns the PTE.
322 * @param pvPage The virtual address to get the PTE for.
323 */
324static uint64_t rtR0MemObjDarwinGetPTE(void *pvPage)
325{
326 RTUINT64U u64;
327 RTCCUINTREG cr3 = ASMGetCR3();
328 RTCCUINTREG cr4 = ASMGetCR4();
329 bool fPAE = false;
330 bool fLMA = false;
331 if (cr4 & X86_CR4_PAE)
332 {
333 fPAE = true;
334 uint32_t fExtFeatures = ASMCpuId_EDX(0x80000001);
335 if (fExtFeatures & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE)
336 {
337 uint64_t efer = ASMRdMsr(MSR_K6_EFER);
338 if (efer & MSR_K6_EFER_LMA)
339 fLMA = true;
340 }
341 }
342
343 if (fLMA)
344 {
345 /* PML4 */
346 rtR0MemObjDarwinReadPhys((cr3 & ~(RTCCUINTREG)PAGE_OFFSET_MASK) | (((uint64_t)(uintptr_t)pvPage >> X86_PML4_SHIFT) & X86_PML4_MASK) * 8, 8, &u64);
347 if (!(u64.u & X86_PML4E_P))
348 {
349 MY_PRINTF("rtR0MemObjDarwinGetPTE: %p -> PML4E !p\n", pvPage);
350 return 0;
351 }
352
353 /* PDPTR */
354 rtR0MemObjDarwinReadPhys((u64.u & ~(uint64_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64) * 8, 8, &u64);
355 if (!(u64.u & X86_PDPE_P))
356 {
357 MY_PRINTF("rtR0MemObjDarwinGetPTE: %p -> PDPTE !p\n", pvPage);
358 return 0;
359 }
360 if (u64.u & X86_PDPE_LM_PS)
361 return (u64.u & ~(uint64_t)(_1G -1)) | ((uintptr_t)pvPage & (_1G -1));
362
363 /* PD */
364 rtR0MemObjDarwinReadPhys((u64.u & ~(uint64_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK) * 8, 8, &u64);
365 if (!(u64.u & X86_PDE_P))
366 {
367 MY_PRINTF("rtR0MemObjDarwinGetPTE: %p -> PDE !p\n", pvPage);
368 return 0;
369 }
370 if (u64.u & X86_PDE_PS)
371 return (u64.u & ~(uint64_t)(_2M -1)) | ((uintptr_t)pvPage & (_2M -1));
372
373 /* PT */
374 rtR0MemObjDarwinReadPhys((u64.u & ~(uint64_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> X86_PT_PAE_SHIFT) & X86_PT_PAE_MASK) * 8, 8, &u64);
375 if (!(u64.u & X86_PTE_P))
376 {
377 MY_PRINTF("rtR0MemObjDarwinGetPTE: %p -> PTE !p\n", pvPage);
378 return 0;
379 }
380 return u64.u;
381 }
382
383 if (fPAE)
384 {
385 /* PDPTR */
386 rtR0MemObjDarwinReadPhys((u64.u & X86_CR3_PAE_PAGE_MASK) | (((uintptr_t)pvPage >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE) * 8, 8, &u64);
387 if (!(u64.u & X86_PDE_P))
388 return 0;
389
390 /* PD */
391 rtR0MemObjDarwinReadPhys((u64.u & ~(uint64_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK) * 8, 8, &u64);
392 if (!(u64.u & X86_PDE_P))
393 return 0;
394 if (u64.u & X86_PDE_PS)
395 return (u64.u & ~(uint64_t)(_2M -1)) | ((uintptr_t)pvPage & (_2M -1));
396
397 /* PT */
398 rtR0MemObjDarwinReadPhys((u64.u & ~(uint64_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> X86_PT_PAE_SHIFT) & X86_PT_PAE_MASK) * 8, 8, &u64);
399 if (!(u64.u & X86_PTE_P))
400 return 0;
401 return u64.u;
402 }
403
404 /* PD */
405 rtR0MemObjDarwinReadPhys((u64.au32[0] & ~(uint32_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> X86_PD_SHIFT) & X86_PD_MASK) * 4, 4, &u64);
406 if (!(u64.au32[0] & X86_PDE_P))
407 return 0;
408 if (u64.au32[0] & X86_PDE_PS)
409 return (u64.u & ~(uint64_t)(_2M -1)) | ((uintptr_t)pvPage & (_2M -1));
410
411 /* PT */
412 rtR0MemObjDarwinReadPhys((u64.au32[0] & ~(uint32_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> X86_PT_SHIFT) & X86_PT_MASK) * 4, 4, &u64);
413 if (!(u64.au32[0] & X86_PTE_P))
414 return 0;
415 return u64.au32[0];
416
417 return 0;
418}
419
420# endif /* unused */
421#endif /* RT_STRICT */
422
423DECLHIDDEN(int) rtR0MemObjNativeFree(RTR0MEMOBJ pMem)
424{
425 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)pMem;
426 IPRT_DARWIN_SAVE_EFL_AC();
427
428 /*
429 * Release the IOMemoryDescriptor or/and IOMemoryMap associated with the object.
430 */
431 if (pMemDarwin->pMemDesc)
432 {
433 pMemDarwin->pMemDesc->complete();
434 pMemDarwin->pMemDesc->release();
435 pMemDarwin->pMemDesc = NULL;
436 }
437
438 if (pMemDarwin->pMemMap)
439 {
440 pMemDarwin->pMemMap->release();
441 pMemDarwin->pMemMap = NULL;
442 }
443
444 /*
445 * Release any memory that we've allocated or locked.
446 */
447 switch (pMemDarwin->Core.enmType)
448 {
449 case RTR0MEMOBJTYPE_LOW:
450 case RTR0MEMOBJTYPE_PAGE:
451 case RTR0MEMOBJTYPE_CONT:
452 break;
453
454 case RTR0MEMOBJTYPE_LOCK:
455 {
456#ifdef USE_VM_MAP_WIRE
457 vm_map_t Map = pMemDarwin->Core.u.Lock.R0Process != NIL_RTR0PROCESS
458 ? get_task_map((task_t)pMemDarwin->Core.u.Lock.R0Process)
459 : kernel_map;
460 kern_return_t kr = vm_map_unwire(Map,
461 (vm_map_offset_t)pMemDarwin->Core.pv,
462 (vm_map_offset_t)pMemDarwin->Core.pv + pMemDarwin->Core.cb,
463 0 /* not user */);
464 AssertRC(kr == KERN_SUCCESS); /** @todo don't ignore... */
465#endif
466 break;
467 }
468
469 case RTR0MEMOBJTYPE_PHYS:
470 /*if (pMemDarwin->Core.u.Phys.fAllocated)
471 IOFreePhysical(pMemDarwin->Core.u.Phys.PhysBase, pMemDarwin->Core.cb);*/
472 Assert(!pMemDarwin->Core.u.Phys.fAllocated);
473 break;
474
475 case RTR0MEMOBJTYPE_PHYS_NC:
476 AssertMsgFailed(("RTR0MEMOBJTYPE_PHYS_NC\n"));
477 IPRT_DARWIN_RESTORE_EFL_AC();
478 return VERR_INTERNAL_ERROR;
479
480 case RTR0MEMOBJTYPE_RES_VIRT:
481 AssertMsgFailed(("RTR0MEMOBJTYPE_RES_VIRT\n"));
482 IPRT_DARWIN_RESTORE_EFL_AC();
483 return VERR_INTERNAL_ERROR;
484
485 case RTR0MEMOBJTYPE_MAPPING:
486 /* nothing to do here. */
487 break;
488
489 default:
490 AssertMsgFailed(("enmType=%d\n", pMemDarwin->Core.enmType));
491 IPRT_DARWIN_RESTORE_EFL_AC();
492 return VERR_INTERNAL_ERROR;
493 }
494
495 IPRT_DARWIN_RESTORE_EFL_AC();
496 return VINF_SUCCESS;
497}
498
499
500/**
501 * This is a helper function to executes @a pfnWorker in the context of the
502 * kernel_task
503 *
504 * @returns IPRT status code - result from pfnWorker or dispatching error.
505 * @param pfnWorker The function to call.
506 * @param pArgs The arguments to pass to the function.
507 */
508static int rtR0MemObjDarwinDoInKernelTaskThread(thread_call_func_t pfnWorker, RTR0MEMOBJDARWINTHREADARGS *pArgs)
509{
510 pArgs->rc = VERR_IPE_UNINITIALIZED_STATUS;
511 pArgs->hEvent = NIL_RTSEMEVENTMULTI;
512 int rc = RTSemEventMultiCreate(&pArgs->hEvent);
513 if (RT_SUCCESS(rc))
514 {
515 thread_call_t hCall = thread_call_allocate(pfnWorker, (void *)pArgs);
516 if (hCall)
517 {
518 boolean_t fRc = thread_call_enter(hCall);
519 AssertLogRel(fRc == FALSE);
520
521 rc = RTSemEventMultiWaitEx(pArgs->hEvent, RTSEMWAIT_FLAGS_INDEFINITE | RTSEMWAIT_FLAGS_UNINTERRUPTIBLE,
522 RT_INDEFINITE_WAIT);
523 AssertLogRelRC(rc);
524
525 rc = pArgs->rc;
526 thread_call_free(hCall);
527 }
528 else
529 rc = VERR_NO_MEMORY;
530 RTSemEventMultiDestroy(pArgs->hEvent);
531 }
532 return rc;
533}
534
535
536/**
537 * Signals result to thread waiting in rtR0MemObjDarwinDoInKernelTaskThread.
538 *
539 * @param pArgs The argument structure.
540 * @param rc The IPRT status code to signal.
541 */
542static void rtR0MemObjDarwinSignalThreadWaitinOnTask(RTR0MEMOBJDARWINTHREADARGS volatile *pArgs, int rc)
543{
544 if (ASMAtomicCmpXchgS32(&pArgs->rc, rc, VERR_IPE_UNINITIALIZED_STATUS))
545 {
546 rc = RTSemEventMultiSignal(pArgs->hEvent);
547 AssertLogRelRC(rc);
548 }
549}
550
551
552/**
553 * Kernel memory alloc worker that uses inTaskWithPhysicalMask.
554 *
555 * @returns IPRT status code.
556 * @retval VERR_ADDRESS_TOO_BIG try another way.
557 *
558 * @param ppMem Where to return the memory object.
559 * @param cb The page aligned memory size.
560 * @param fExecutable Whether the mapping needs to be executable.
561 * @param fContiguous Whether the backing memory needs to be contiguous.
562 * @param PhysMask The mask for the backing memory (i.e. range). Use 0 if
563 * you don't care that much or is speculating.
564 * @param MaxPhysAddr The max address to verify the result against. Use
565 * UINT64_MAX if it doesn't matter.
566 * @param enmType The object type.
567 * @param uAlignment The allocation alignment (in bytes).
568 * @param pszTag Allocation tag used for statistics and such.
569 * @param fOnKernelThread Set if we're already on the kernel thread.
570 */
571static int rtR0MemObjNativeAllocWorker(PPRTR0MEMOBJINTERNAL ppMem, size_t cb,
572 bool fExecutable, bool fContiguous,
573 mach_vm_address_t PhysMask, uint64_t MaxPhysAddr,
574 RTR0MEMOBJTYPE enmType, size_t uAlignment, const char *pszTag, bool fOnKernelThread)
575{
576 int rc;
577
578 /*
579 * Because of process code signing properties leaking into kernel space in
580 * in XNU's vm_fault.c code, we have to defer allocations of exec memory to
581 * a thread running in the kernel_task to get consistent results here.
582 *
583 * Trouble strikes in vm_fault_enter() when cs_enforcement_enabled is determined
584 * to be true because current process has the CS_ENFORCEMENT flag, the page flag
585 * vmp_cs_validated is clear, and the protection mask includes VM_PROT_EXECUTE
586 * (pmap_cs_enforced does not apply to macOS it seems). This test seems to go
587 * back to 10.5, though I'm not sure whether it's enabled for macOS that early
588 * on. Only VM_PROT_EXECUTE is problematic for kernel memory, (though
589 * VM_PROT_WRITE on code signed pages is also problematic in theory). As long as
590 * kernel_task doesn't have CS_ENFORCEMENT enabled, we'll be fine switching to it.
591 */
592 if (!fExecutable || fOnKernelThread)
593 { /* likely */ }
594 else
595 {
596 RTR0MEMOBJDARWINALLOCARGS Args;
597 Args.ppMem = ppMem;
598 Args.cb = cb;
599 Args.fExecutable = fExecutable;
600 Args.fContiguous = fContiguous;
601 Args.PhysMask = PhysMask;
602 Args.MaxPhysAddr = MaxPhysAddr;
603 Args.enmType = enmType;
604 Args.uAlignment = uAlignment;
605 Args.pszTag = pszTag;
606 return rtR0MemObjDarwinDoInKernelTaskThread(rtR0MemObjNativeAllockWorkerOnKernelThread, &Args.Core);
607 }
608
609 /*
610 * Try inTaskWithPhysicalMask first, but since we don't quite trust that it
611 * actually respects the physical memory mask (10.5.x is certainly busted),
612 * we'll use rtR0MemObjNativeAllocCont as a fallback for dealing with that.
613 *
614 * The kIOMemoryKernelUserShared flag just forces the result to be page aligned.
615 *
616 * The kIOMemoryMapperNone flag is required since 10.8.2 (IOMMU changes?).
617 */
618
619 /* This is an old fudge from the snow leoard days: "Is it only on snow leopard?
620 Seen allocating memory for the VM structure, last page corrupted or
621 inaccessible." Made it only apply to snow leopard and older for now. */
622 size_t cbFudged = cb;
623 if (version_major >= 11 /* 10 = 10.7.x = Lion. */)
624 { /* likely */ }
625 else
626 cbFudged += PAGE_SIZE;
627
628 IOOptionBits fOptions = kIOMemoryKernelUserShared | kIODirectionInOut;
629 if (fContiguous)
630 {
631 fOptions |= kIOMemoryPhysicallyContiguous;
632 if ( version_major > 12
633 || (version_major == 12 && version_minor >= 2) /* 10.8.2 = Mountain Kitten */ )
634 fOptions |= kIOMemoryHostPhysicallyContiguous; /* (Just to make ourselves clear, in case the xnu code changes.) */
635 }
636 if (version_major >= 12 /* 12 = 10.8.x = Mountain Kitten */)
637 fOptions |= kIOMemoryMapperNone;
638
639#if __MAC_OS_X_VERSION_MAX_ALLOWED >= 1070 && 0 /* enable when/if necessary */
640 /* Paranoia: Don't misrepresent our intentions, we won't map kernel executable memory into ring-0. */
641 if (fExecutable && version_major >= 11 /* 10.7.x = Lion, as below */)
642 {
643 fOptions &= ~kIOMemoryKernelUserShared;
644 if (uAlignment < PAGE_SIZE)
645 uAlignment = PAGE_SIZE;
646 }
647#endif
648
649 /* The public initWithPhysicalMask virtual method appeared in 10.7.0, in
650 versions 10.5.0 up to 10.7.0 it was private, and 10.4.8-10.5.0 it was
651 x86 only and didn't have the alignment parameter (slot was different too). */
652 uint64_t uAlignmentActual = uAlignment;
653 IOBufferMemoryDescriptor *pMemDesc;
654#if __MAC_OS_X_VERSION_MAX_ALLOWED >= 1070
655 if (version_major >= 11 /* 11 = 10.7.x = Lion, could probably allow 10.5.0+ here if we really wanted to. */)
656 {
657 /* Starting with 10.6.x the physical mask is ignored if alignment is higher
658 than 1. The assumption seems to be that inTaskWithPhysicalMask() should
659 be used and the alignment inferred from the PhysMask argument. */
660 if (MaxPhysAddr != UINT64_MAX)
661 {
662 Assert(RT_ALIGN_64(PhysMask, uAlignment) == PhysMask);
663 uAlignmentActual = 1;
664 }
665
666 pMemDesc = new IOBufferMemoryDescriptor;
667 if (pMemDesc)
668 {
669 if (pMemDesc->initWithPhysicalMask(kernel_task, fOptions, cbFudged, uAlignmentActual, PhysMask))
670 { /* likely */ }
671 else
672 {
673 pMemDesc->release();
674 pMemDesc = NULL;
675 }
676 }
677 }
678 else
679#endif
680 pMemDesc = IOBufferMemoryDescriptor::inTaskWithPhysicalMask(kernel_task, fOptions, cbFudged, PhysMask);
681 if (pMemDesc)
682 {
683 IOReturn IORet = pMemDesc->prepare(kIODirectionInOut);
684 if (IORet == kIOReturnSuccess)
685 {
686 void *pv = pMemDesc->getBytesNoCopy(0, cbFudged);
687 if (pv)
688 {
689 /*
690 * Check if it's all below 4GB.
691 */
692 addr64_t AddrPrev = 0;
693 MaxPhysAddr &= ~(uint64_t)PAGE_OFFSET_MASK;
694 for (IOByteCount off = 0; off < cb; off += PAGE_SIZE)
695 {
696#ifdef __LP64__
697 addr64_t Addr = pMemDesc->getPhysicalSegment(off, NULL, kIOMemoryMapperNone);
698#else
699 addr64_t Addr = pMemDesc->getPhysicalSegment64(off, NULL);
700#endif
701 if ( Addr > MaxPhysAddr
702 || !Addr
703 || (Addr & PAGE_OFFSET_MASK)
704 || ( fContiguous
705 && !off
706 && Addr == AddrPrev + PAGE_SIZE))
707 {
708 /* Buggy API, try allocate the memory another way. */
709 pMemDesc->complete();
710 pMemDesc->release();
711 if (PhysMask)
712 {
713 kprintf("rtR0MemObjNativeAllocWorker: off=%zx Addr=%llx AddrPrev=%llx MaxPhysAddr=%llx PhysMas=%llx fContiguous=%d fOptions=%#x - buggy API!\n",
714 (size_t)off, Addr, AddrPrev, MaxPhysAddr, PhysMask, fContiguous, fOptions);
715 LogRel(("rtR0MemObjNativeAllocWorker: off=%zx Addr=%llx AddrPrev=%llx MaxPhysAddr=%llx PhysMas=%llx fContiguous=%RTbool fOptions=%#x - buggy API!\n",
716 (size_t)off, Addr, AddrPrev, MaxPhysAddr, PhysMask, fContiguous, fOptions));
717 }
718 return VERR_ADDRESS_TOO_BIG;
719 }
720 AddrPrev = Addr;
721 }
722
723 /*
724 * Check that it's aligned correctly.
725 */
726 if ((uintptr_t)pv & (uAlignment - 1))
727 {
728 pMemDesc->complete();
729 pMemDesc->release();
730 if (PhysMask)
731 {
732 kprintf("rtR0MemObjNativeAllocWorker: pv=%p uAlignment=%#zx (MaxPhysAddr=%llx PhysMas=%llx fContiguous=%d fOptions=%#x) - buggy API!!\n",
733 pv, uAlignment, MaxPhysAddr, PhysMask, fContiguous, fOptions);
734 LogRel(("rtR0MemObjNativeAllocWorker: pv=%p uAlignment=%#zx (MaxPhysAddr=%llx PhysMas=%llx fContiguous=%RTbool fOptions=%#x) - buggy API!\n",
735 pv, uAlignment, MaxPhysAddr, PhysMask, fContiguous, fOptions));
736 }
737 return VERR_NOT_SUPPORTED;
738 }
739
740#ifdef RT_STRICT
741 /* check that the memory is actually mapped. */
742 //addr64_t Addr = pMemDesc->getPhysicalSegment64(0, NULL);
743 //printf("rtR0MemObjNativeAllocWorker: pv=%p %8llx %8llx\n", pv, rtR0MemObjDarwinGetPTE(pv), Addr);
744 RTTHREADPREEMPTSTATE State = RTTHREADPREEMPTSTATE_INITIALIZER;
745 RTThreadPreemptDisable(&State);
746 rtR0MemObjDarwinTouchPages(pv, cb);
747 RTThreadPreemptRestore(&State);
748#endif
749
750 /*
751 * Create the IPRT memory object.
752 */
753 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), enmType, pv, cb, pszTag);
754 if (pMemDarwin)
755 {
756 if (fOptions & kIOMemoryKernelUserShared)
757 pMemDarwin->Core.fFlags |= RTR0MEMOBJ_FLAGS_ZERO_AT_ALLOC;
758 else
759 pMemDarwin->Core.fFlags |= RTR0MEMOBJ_FLAGS_UNINITIALIZED_AT_ALLOC;
760 if (fContiguous)
761 {
762#ifdef __LP64__
763 addr64_t PhysBase64 = pMemDesc->getPhysicalSegment(0, NULL, kIOMemoryMapperNone);
764#else
765 addr64_t PhysBase64 = pMemDesc->getPhysicalSegment64(0, NULL);
766#endif
767 RTHCPHYS PhysBase = PhysBase64; Assert(PhysBase == PhysBase64);
768 if (enmType == RTR0MEMOBJTYPE_CONT)
769 pMemDarwin->Core.u.Cont.Phys = PhysBase;
770 else if (enmType == RTR0MEMOBJTYPE_PHYS)
771 pMemDarwin->Core.u.Phys.PhysBase = PhysBase;
772 else
773 AssertMsgFailed(("enmType=%d\n", enmType));
774 }
775
776 if (fExecutable)
777 {
778 rc = rtR0MemObjNativeProtectWorker(&pMemDarwin->Core, 0, cb,
779 RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC);
780#ifdef RT_STRICT
781 if (RT_SUCCESS(rc))
782 {
783 /* check that the memory is actually mapped. */
784 RTTHREADPREEMPTSTATE State2 = RTTHREADPREEMPTSTATE_INITIALIZER;
785 RTThreadPreemptDisable(&State2);
786 rtR0MemObjDarwinTouchPages(pv, cb);
787 RTThreadPreemptRestore(&State2);
788 }
789#endif
790 /* Bug 6226: Ignore KERN_PROTECTION_FAILURE on Leopard and older. */
791 if ( rc == VERR_PERMISSION_DENIED
792 && version_major <= 10 /* 10 = 10.6.x = Snow Leopard. */)
793 rc = VINF_SUCCESS;
794 }
795 else
796 rc = VINF_SUCCESS;
797 if (RT_SUCCESS(rc))
798 {
799 pMemDarwin->pMemDesc = pMemDesc;
800 *ppMem = &pMemDarwin->Core;
801 return VINF_SUCCESS;
802 }
803
804 rtR0MemObjDelete(&pMemDarwin->Core);
805 }
806
807 if (enmType == RTR0MEMOBJTYPE_PHYS_NC)
808 rc = VERR_NO_PHYS_MEMORY;
809 else if (enmType == RTR0MEMOBJTYPE_LOW)
810 rc = VERR_NO_LOW_MEMORY;
811 else if (enmType == RTR0MEMOBJTYPE_CONT)
812 rc = VERR_NO_CONT_MEMORY;
813 else
814 rc = VERR_NO_MEMORY;
815 }
816 else
817 rc = VERR_MEMOBJ_INIT_FAILED;
818
819 pMemDesc->complete();
820 }
821 else
822 rc = RTErrConvertFromDarwinIO(IORet);
823 pMemDesc->release();
824 }
825 else
826 rc = VERR_MEMOBJ_INIT_FAILED;
827 Assert(rc != VERR_ADDRESS_TOO_BIG);
828 return rc;
829}
830
831
832/**
833 * rtR0MemObjNativeAllocWorker kernel_task wrapper function.
834 */
835static void rtR0MemObjNativeAllockWorkerOnKernelThread(void *pvUser0, void *pvUser1)
836{
837 AssertPtr(pvUser0); Assert(pvUser1 == NULL); NOREF(pvUser1);
838 RTR0MEMOBJDARWINALLOCARGS volatile *pArgs = (RTR0MEMOBJDARWINALLOCARGS volatile *)pvUser0;
839 int rc = rtR0MemObjNativeAllocWorker(pArgs->ppMem, pArgs->cb, pArgs->fExecutable, pArgs->fContiguous, pArgs->PhysMask,
840 pArgs->MaxPhysAddr, pArgs->enmType, pArgs->uAlignment, pArgs->pszTag,
841 true /*fOnKernelThread*/);
842 rtR0MemObjDarwinSignalThreadWaitinOnTask(&pArgs->Core, rc);
843}
844
845
846DECLHIDDEN(int) rtR0MemObjNativeAllocPage(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable, const char *pszTag)
847{
848 IPRT_DARWIN_SAVE_EFL_AC();
849
850 int rc = rtR0MemObjNativeAllocWorker(ppMem, cb, fExecutable, false /* fContiguous */, 0 /* PhysMask */, UINT64_MAX,
851 RTR0MEMOBJTYPE_PAGE, PAGE_SIZE, pszTag, false /*fOnKernelThread*/);
852
853 IPRT_DARWIN_RESTORE_EFL_AC();
854 return rc;
855}
856
857
858DECLHIDDEN(int) rtR0MemObjNativeAllocLarge(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, size_t cbLargePage, uint32_t fFlags,
859 const char *pszTag)
860{
861 return rtR0MemObjFallbackAllocLarge(ppMem, cb, cbLargePage, fFlags, pszTag);
862}
863
864
865DECLHIDDEN(int) rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable, const char *pszTag)
866{
867 IPRT_DARWIN_SAVE_EFL_AC();
868
869 /*
870 * Try IOMallocPhysical/IOMallocAligned first.
871 * Then try optimistically without a physical address mask, which will always
872 * end up using IOMallocAligned.
873 *
874 * (See bug comment in the worker and IOBufferMemoryDescriptor::initWithPhysicalMask.)
875 */
876 int rc = rtR0MemObjNativeAllocWorker(ppMem, cb, fExecutable, false /* fContiguous */, ~(uint32_t)PAGE_OFFSET_MASK,
877 _4G - PAGE_SIZE, RTR0MEMOBJTYPE_LOW, PAGE_SIZE, pszTag, false /*fOnKernelThread*/);
878 if (rc == VERR_ADDRESS_TOO_BIG)
879 rc = rtR0MemObjNativeAllocWorker(ppMem, cb, fExecutable, false /* fContiguous */, 0 /* PhysMask */,
880 _4G - PAGE_SIZE, RTR0MEMOBJTYPE_LOW, PAGE_SIZE, pszTag, false /*fOnKernelThread*/);
881
882 IPRT_DARWIN_RESTORE_EFL_AC();
883 return rc;
884}
885
886
887/**
888 * Translates the PhysHighest address into a mask.
889 */
890static mach_vm_address_t rtR0MemObjDarwinCalcPhysMask(RTHCPHYS PhysHighest, size_t uAlignment)
891{
892 if (PhysHighest == NIL_RTHCPHYS)
893 return uAlignment <= PAGE_SIZE ? 0 : ~(mach_vm_address_t)(uAlignment - 1);
894
895 mach_vm_address_t PhysMask = ~(mach_vm_address_t)0;
896 while (PhysMask > (PhysHighest | PAGE_OFFSET_MASK))
897 PhysMask >>= 1;
898 PhysMask &= ~(mach_vm_address_t)(uAlignment - 1);
899
900 return PhysMask;
901}
902
903
904DECLHIDDEN(int) rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest,
905 bool fExecutable, const char *pszTag)
906{
907 mach_vm_address_t const PhysMask = rtR0MemObjDarwinCalcPhysMask(PhysHighest, PAGE_SIZE);
908 IPRT_DARWIN_SAVE_EFL_AC();
909
910 int rc = rtR0MemObjNativeAllocWorker(ppMem, cb, fExecutable, true /* fContiguous */, PhysMask, PhysHighest,
911 RTR0MEMOBJTYPE_CONT, PAGE_SIZE, pszTag, false /*fOnKernelThread*/);
912
913 /*
914 * Workaround for bogus IOKernelAllocateContiguous behavior, just in case.
915 * cb <= PAGE_SIZE allocations take a different path, using a different allocator.
916 */
917 if (RT_FAILURE(rc) && cb <= PAGE_SIZE)
918 rc = rtR0MemObjNativeAllocWorker(ppMem, cb + PAGE_SIZE, fExecutable, true /* fContiguous */, PhysMask, PhysHighest,
919 RTR0MEMOBJTYPE_CONT, PAGE_SIZE, pszTag, false /*fOnKernelThread*/);
920 IPRT_DARWIN_RESTORE_EFL_AC();
921 return rc;
922}
923
924
925DECLHIDDEN(int) rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest, size_t uAlignment,
926 const char *pszTag)
927{
928 if (uAlignment != PAGE_SIZE)
929 {
930 /* See rtR0MemObjNativeAllocWorker: */
931 if (version_major < 9 /* 9 = 10.5.x = Snow Leopard */)
932 return VERR_NOT_SUPPORTED;
933 }
934 mach_vm_address_t const PhysMask = rtR0MemObjDarwinCalcPhysMask(PhysHighest, uAlignment);
935
936 IPRT_DARWIN_SAVE_EFL_AC();
937
938 /*
939 * Translate the PhysHighest address into a mask.
940 */
941 int rc;
942 if (PhysHighest == NIL_RTHCPHYS)
943 rc = rtR0MemObjNativeAllocWorker(ppMem, cb, false /* fExecutable */, true /* fContiguous */, PhysMask, UINT64_MAX,
944 RTR0MEMOBJTYPE_PHYS, uAlignment, pszTag, false /*fOnKernelThread*/);
945 else
946 {
947 AssertReturn(PhysMask + 1 <= cb, VERR_INVALID_PARAMETER);
948 rc = rtR0MemObjNativeAllocWorker(ppMem, cb, false /* fExecutable */, true /* fContiguous */, PhysMask, PhysHighest,
949 RTR0MEMOBJTYPE_PHYS, uAlignment, pszTag, false /*fOnKernelThread*/);
950 }
951
952 IPRT_DARWIN_RESTORE_EFL_AC();
953 return rc;
954}
955
956
957DECLHIDDEN(int) rtR0MemObjNativeAllocPhysNC(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest, const char *pszTag)
958{
959 /** @todo rtR0MemObjNativeAllocPhys / darwin.
960 * This might be a bit problematic and may very well require having to create our own
961 * object which we populate with pages but without mapping it into any address space.
962 * Estimate is 2-3 days.
963 */
964 RT_NOREF(ppMem, cb, PhysHighest, pszTag);
965 return VERR_NOT_SUPPORTED;
966}
967
968
969DECLHIDDEN(int) rtR0MemObjNativeEnterPhys(PPRTR0MEMOBJINTERNAL ppMem, RTHCPHYS Phys, size_t cb, uint32_t uCachePolicy,
970 const char *pszTag)
971{
972 AssertReturn(uCachePolicy == RTMEM_CACHE_POLICY_DONT_CARE, VERR_NOT_SUPPORTED);
973 IPRT_DARWIN_SAVE_EFL_AC();
974
975 /*
976 * Create a descriptor for it (the validation is always true on intel macs, but
977 * as it doesn't harm us keep it in).
978 */
979 int rc = VERR_ADDRESS_TOO_BIG;
980 IOAddressRange aRanges[1] = { { Phys, cb } };
981 if ( aRanges[0].address == Phys
982 && aRanges[0].length == cb)
983 {
984 IOMemoryDescriptor *pMemDesc = IOMemoryDescriptor::withAddressRanges(&aRanges[0], RT_ELEMENTS(aRanges),
985 kIODirectionInOut, NULL /*task*/);
986 if (pMemDesc)
987 {
988#ifdef __LP64__
989 Assert(Phys == pMemDesc->getPhysicalSegment(0, NULL, kIOMemoryMapperNone));
990#else
991 Assert(Phys == pMemDesc->getPhysicalSegment64(0, NULL));
992#endif
993
994 /*
995 * Create the IPRT memory object.
996 */
997 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_PHYS,
998 NULL, cb, pszTag);
999 if (pMemDarwin)
1000 {
1001 pMemDarwin->Core.u.Phys.PhysBase = Phys;
1002 pMemDarwin->Core.u.Phys.fAllocated = false;
1003 pMemDarwin->Core.u.Phys.uCachePolicy = uCachePolicy;
1004 pMemDarwin->pMemDesc = pMemDesc;
1005 *ppMem = &pMemDarwin->Core;
1006 IPRT_DARWIN_RESTORE_EFL_AC();
1007 return VINF_SUCCESS;
1008 }
1009
1010 rc = VERR_NO_MEMORY;
1011 pMemDesc->release();
1012 }
1013 else
1014 rc = VERR_MEMOBJ_INIT_FAILED;
1015 }
1016 else
1017 AssertMsgFailed(("%#llx %llx\n", (unsigned long long)Phys, (unsigned long long)cb));
1018 IPRT_DARWIN_RESTORE_EFL_AC();
1019 return rc;
1020}
1021
1022
1023/**
1024 * Internal worker for locking down pages.
1025 *
1026 * @return IPRT status code.
1027 *
1028 * @param ppMem Where to store the memory object pointer.
1029 * @param pv First page.
1030 * @param cb Number of bytes.
1031 * @param fAccess The desired access, a combination of RTMEM_PROT_READ
1032 * and RTMEM_PROT_WRITE.
1033 * @param Task The task \a pv and \a cb refers to.
1034 * @param pszTag Allocation tag used for statistics and such.
1035 */
1036static int rtR0MemObjNativeLock(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess, task_t Task,
1037 const char *pszTag)
1038{
1039 IPRT_DARWIN_SAVE_EFL_AC();
1040 NOREF(fAccess);
1041#ifdef USE_VM_MAP_WIRE
1042 vm_map_t Map = get_task_map(Task);
1043 Assert(Map);
1044
1045 /*
1046 * First try lock the memory.
1047 */
1048 int rc = VERR_LOCK_FAILED;
1049 kern_return_t kr = vm_map_wire(get_task_map(Task),
1050 (vm_map_offset_t)pv,
1051 (vm_map_offset_t)pv + cb,
1052 VM_PROT_DEFAULT,
1053 0 /* not user */);
1054 if (kr == KERN_SUCCESS)
1055 {
1056 /*
1057 * Create the IPRT memory object.
1058 */
1059 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_LOCK, pv, cb, pszTag);
1060 if (pMemDarwin)
1061 {
1062 pMemDarwin->Core.u.Lock.R0Process = (RTR0PROCESS)Task;
1063 *ppMem = &pMemDarwin->Core;
1064
1065 IPRT_DARWIN_RESTORE_EFL_AC();
1066 return VINF_SUCCESS;
1067 }
1068
1069 kr = vm_map_unwire(get_task_map(Task), (vm_map_offset_t)pv, (vm_map_offset_t)pv + cb, 0 /* not user */);
1070 Assert(kr == KERN_SUCCESS);
1071 rc = VERR_NO_MEMORY;
1072 }
1073
1074#else
1075
1076 /*
1077 * Create a descriptor and try lock it (prepare).
1078 */
1079 int rc = VERR_MEMOBJ_INIT_FAILED;
1080 IOMemoryDescriptor *pMemDesc = IOMemoryDescriptor::withAddressRange((vm_address_t)pv, cb, kIODirectionInOut, Task);
1081 if (pMemDesc)
1082 {
1083 IOReturn IORet = pMemDesc->prepare(kIODirectionInOut);
1084 if (IORet == kIOReturnSuccess)
1085 {
1086 /*
1087 * Create the IPRT memory object.
1088 */
1089 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_LOCK,
1090 pv, cb, pszTag);
1091 if (pMemDarwin)
1092 {
1093 pMemDarwin->Core.u.Lock.R0Process = (RTR0PROCESS)Task;
1094 pMemDarwin->pMemDesc = pMemDesc;
1095 *ppMem = &pMemDarwin->Core;
1096
1097 IPRT_DARWIN_RESTORE_EFL_AC();
1098 return VINF_SUCCESS;
1099 }
1100
1101 pMemDesc->complete();
1102 rc = VERR_NO_MEMORY;
1103 }
1104 else
1105 rc = VERR_LOCK_FAILED;
1106 pMemDesc->release();
1107 }
1108#endif
1109 IPRT_DARWIN_RESTORE_EFL_AC();
1110 return rc;
1111}
1112
1113
1114DECLHIDDEN(int) rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, uint32_t fAccess,
1115 RTR0PROCESS R0Process, const char *pszTag)
1116{
1117 return rtR0MemObjNativeLock(ppMem, (void *)R3Ptr, cb, fAccess, (task_t)R0Process, pszTag);
1118}
1119
1120
1121DECLHIDDEN(int) rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess, const char *pszTag)
1122{
1123 return rtR0MemObjNativeLock(ppMem, pv, cb, fAccess, kernel_task, pszTag);
1124}
1125
1126
1127DECLHIDDEN(int) rtR0MemObjNativeReserveKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment,
1128 const char *pszTag)
1129{
1130 RT_NOREF(ppMem, pvFixed, cb, uAlignment, pszTag);
1131 return VERR_NOT_SUPPORTED;
1132}
1133
1134
1135DECLHIDDEN(int) rtR0MemObjNativeReserveUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment,
1136 RTR0PROCESS R0Process, const char *pszTag)
1137{
1138 RT_NOREF(ppMem, R3PtrFixed, cb, uAlignment, R0Process, pszTag);
1139 return VERR_NOT_SUPPORTED;
1140}
1141
1142
1143DECLHIDDEN(int) rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
1144 unsigned fProt, size_t offSub, size_t cbSub, const char *pszTag)
1145{
1146 RT_NOREF(fProt);
1147 AssertReturn(pvFixed == (void *)-1, VERR_NOT_SUPPORTED);
1148
1149 /*
1150 * Check that the specified alignment is supported.
1151 */
1152 if (uAlignment > PAGE_SIZE)
1153 return VERR_NOT_SUPPORTED;
1154 Assert(!offSub || cbSub);
1155
1156 IPRT_DARWIN_SAVE_EFL_AC();
1157
1158 /*
1159 * Must have a memory descriptor that we can map.
1160 */
1161 int rc = VERR_INVALID_PARAMETER;
1162 PRTR0MEMOBJDARWIN pMemToMapDarwin = (PRTR0MEMOBJDARWIN)pMemToMap;
1163 if (pMemToMapDarwin->pMemDesc)
1164 {
1165 /* The kIOMapPrefault option was added in 10.10.0; causes PTEs to be populated with
1166 INTEL_PTE_WIRED to be set, just like we desire (see further down). However, till
1167 10.13.0 it was not available for use on kernel mappings. Oh, fudge. */
1168#if MAC_OS_X_VERSION_MIN_REQUIRED >= 1050
1169 static uint32_t volatile s_fOptions = UINT32_MAX;
1170 uint32_t fOptions = s_fOptions;
1171 if (RT_UNLIKELY(fOptions == UINT32_MAX))
1172 s_fOptions = fOptions = version_major >= 17 ? 0x10000000 /*kIOMapPrefault*/ : 0; /* Since 10.13.0 (High Sierra). */
1173
1174 IOMemoryMap *pMemMap = pMemToMapDarwin->pMemDesc->createMappingInTask(kernel_task,
1175 0,
1176 kIOMapAnywhere | kIOMapDefaultCache | fOptions,
1177 offSub,
1178 cbSub);
1179#else
1180 IOMemoryMap *pMemMap = pMemToMapDarwin->pMemDesc->map(kernel_task,
1181 0,
1182 kIOMapAnywhere | kIOMapDefaultCache,
1183 offSub,
1184 cbSub);
1185#endif
1186 if (pMemMap)
1187 {
1188 IOVirtualAddress VirtAddr = pMemMap->getVirtualAddress();
1189 void *pv = (void *)(uintptr_t)VirtAddr;
1190 if ((uintptr_t)pv == VirtAddr && pv != NULL)
1191 {
1192//#ifdef __LP64__
1193// addr64_t Addr = pMemToMapDarwin->pMemDesc->getPhysicalSegment(offSub, NULL, kIOMemoryMapperNone);
1194//#else
1195// addr64_t Addr = pMemToMapDarwin->pMemDesc->getPhysicalSegment64(offSub, NULL);
1196//#endif
1197// MY_PRINTF("pv=%p: %8llx %8llx\n", pv, rtR0MemObjDarwinGetPTE(pv), Addr);
1198
1199// /*
1200// * Explicitly lock it so that we're sure it is present and that
1201// * its PTEs cannot be recycled.
1202// * Note! withAddressRange() doesn't work as it adds kIOMemoryTypeVirtual64
1203// * to the options which causes prepare() to not wire the pages.
1204// * This is probably a bug.
1205// */
1206// IOAddressRange Range = { (mach_vm_address_t)pv, cbSub };
1207// IOMemoryDescriptor *pMemDesc = IOMemoryDescriptor::withOptions(&Range,
1208// 1 /* count */,
1209// 0 /* offset */,
1210// kernel_task,
1211// kIODirectionInOut | kIOMemoryTypeVirtual,
1212// kIOMapperSystem);
1213// if (pMemDesc)
1214// {
1215// IOReturn IORet = pMemDesc->prepare(kIODirectionInOut);
1216// if (IORet == kIOReturnSuccess)
1217// {
1218 /* HACK ALERT! On kernels older than 10.10 (xnu version 14), we need to fault in
1219 the pages here so they can safely be accessed from inside simple
1220 locks and when preemption is disabled (no page-ins allowed).
1221 Note! This touching does not cause INTEL_PTE_WIRED (bit 10) to be set as we go
1222 thru general #PF and vm_fault doesn't figure it should be wired or something. */
1223 rtR0MemObjDarwinTouchPages(pv, cbSub ? cbSub : pMemToMap->cb);
1224 /** @todo First, the memory should've been mapped by now, and second, it
1225 * should have the wired attribute in the PTE (bit 10). Neither seems to
1226 * be the case. The disabled locking code doesn't make any difference,
1227 * which is extremely odd, and breaks rtR0MemObjNativeGetPagePhysAddr
1228 * (getPhysicalSegment64 -> 64 for the lock descriptor. */
1229//#ifdef __LP64__
1230// addr64_t Addr2 = pMemToMapDarwin->pMemDesc->getPhysicalSegment(offSub, NULL, kIOMemoryMapperNone);
1231//#else
1232// addr64_t Addr2 = pMemToMapDarwin->pMemDesc->getPhysicalSegment64(offSub, NULL);
1233//#endif
1234// MY_PRINTF("pv=%p: %8llx %8llx (%d)\n", pv, rtR0MemObjDarwinGetPTE(pv), Addr2, 2);
1235
1236 /*
1237 * Create the IPRT memory object.
1238 */
1239 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_MAPPING,
1240 pv, cbSub ? cbSub : pMemToMap->cb, pszTag);
1241 if (pMemDarwin)
1242 {
1243 pMemDarwin->Core.u.Mapping.R0Process = NIL_RTR0PROCESS;
1244 pMemDarwin->pMemMap = pMemMap;
1245// pMemDarwin->pMemDesc = pMemDesc;
1246 *ppMem = &pMemDarwin->Core;
1247
1248 IPRT_DARWIN_RESTORE_EFL_AC();
1249 return VINF_SUCCESS;
1250 }
1251
1252// pMemDesc->complete();
1253// rc = VERR_NO_MEMORY;
1254// }
1255// else
1256// rc = RTErrConvertFromDarwinIO(IORet);
1257// pMemDesc->release();
1258// }
1259// else
1260// rc = VERR_MEMOBJ_INIT_FAILED;
1261 }
1262 else if (pv)
1263 rc = VERR_ADDRESS_TOO_BIG;
1264 else
1265 rc = VERR_MAP_FAILED;
1266 pMemMap->release();
1267 }
1268 else
1269 rc = VERR_MAP_FAILED;
1270 }
1271
1272 IPRT_DARWIN_RESTORE_EFL_AC();
1273 return rc;
1274}
1275
1276
1277DECLHIDDEN(int) rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, RTR3PTR R3PtrFixed, size_t uAlignment,
1278 unsigned fProt, RTR0PROCESS R0Process, size_t offSub, size_t cbSub, const char *pszTag)
1279{
1280 RT_NOREF(fProt);
1281
1282 /*
1283 * Check for unsupported things.
1284 */
1285 AssertReturn(R3PtrFixed == (RTR3PTR)-1, VERR_NOT_SUPPORTED);
1286 if (uAlignment > PAGE_SIZE)
1287 return VERR_NOT_SUPPORTED;
1288 Assert(!offSub || cbSub);
1289
1290 IPRT_DARWIN_SAVE_EFL_AC();
1291
1292 /*
1293 * Must have a memory descriptor.
1294 */
1295 int rc = VERR_INVALID_PARAMETER;
1296 PRTR0MEMOBJDARWIN pMemToMapDarwin = (PRTR0MEMOBJDARWIN)pMemToMap;
1297 if (pMemToMapDarwin->pMemDesc)
1298 {
1299#if MAC_OS_X_VERSION_MIN_REQUIRED >= 101000 /* The kIOMapPrefault option was added in 10.10.0. */
1300 IOMemoryMap *pMemMap = pMemToMapDarwin->pMemDesc->createMappingInTask((task_t)R0Process,
1301 0,
1302 kIOMapAnywhere | kIOMapDefaultCache | kIOMapPrefault,
1303 offSub,
1304 cbSub);
1305#elif MAC_OS_X_VERSION_MIN_REQUIRED >= 1050
1306 static uint32_t volatile s_fOptions = UINT32_MAX;
1307 uint32_t fOptions = s_fOptions;
1308 if (RT_UNLIKELY(fOptions == UINT32_MAX))
1309 s_fOptions = fOptions = version_major >= 14 ? 0x10000000 /*kIOMapPrefault*/ : 0; /* Since 10.10.0. */
1310 IOMemoryMap *pMemMap = pMemToMapDarwin->pMemDesc->createMappingInTask((task_t)R0Process,
1311 0,
1312 kIOMapAnywhere | kIOMapDefaultCache | fOptions,
1313 offSub,
1314 cbSub);
1315#else
1316 IOMemoryMap *pMemMap = pMemToMapDarwin->pMemDesc->map((task_t)R0Process,
1317 0,
1318 kIOMapAnywhere | kIOMapDefaultCache,
1319 offSub,
1320 cbSub);
1321#endif
1322 if (pMemMap)
1323 {
1324 IOVirtualAddress VirtAddr = pMemMap->getVirtualAddress();
1325 void *pv = (void *)(uintptr_t)VirtAddr;
1326 if ((uintptr_t)pv == VirtAddr && pv != NULL)
1327 {
1328 /*
1329 * Create the IPRT memory object.
1330 */
1331 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_MAPPING,
1332 pv, cbSub ? cbSub : pMemToMap->cb, pszTag);
1333 if (pMemDarwin)
1334 {
1335 pMemDarwin->Core.u.Mapping.R0Process = R0Process;
1336 pMemDarwin->pMemMap = pMemMap;
1337 *ppMem = &pMemDarwin->Core;
1338
1339 IPRT_DARWIN_RESTORE_EFL_AC();
1340 return VINF_SUCCESS;
1341 }
1342
1343 rc = VERR_NO_MEMORY;
1344 }
1345 else if (pv)
1346 rc = VERR_ADDRESS_TOO_BIG;
1347 else
1348 rc = VERR_MAP_FAILED;
1349 pMemMap->release();
1350 }
1351 else
1352 rc = VERR_MAP_FAILED;
1353 }
1354
1355 IPRT_DARWIN_RESTORE_EFL_AC();
1356 return rc;
1357}
1358
1359
1360/**
1361 * Worker for rtR0MemObjNativeProtect that's typically called in a different
1362 * context.
1363 */
1364static int rtR0MemObjNativeProtectWorker(PRTR0MEMOBJINTERNAL pMem, size_t offSub, size_t cbSub, uint32_t fProt)
1365{
1366 IPRT_DARWIN_SAVE_EFL_AC();
1367
1368 /* Get the map for the object. */
1369 vm_map_t pVmMap = rtR0MemObjDarwinGetMap(pMem);
1370 if (!pVmMap)
1371 {
1372 IPRT_DARWIN_RESTORE_EFL_AC();
1373 return VERR_NOT_SUPPORTED;
1374 }
1375
1376 /*
1377 * Convert the protection.
1378 */
1379 vm_prot_t fMachProt;
1380 switch (fProt)
1381 {
1382 case RTMEM_PROT_NONE:
1383 fMachProt = VM_PROT_NONE;
1384 break;
1385 case RTMEM_PROT_READ:
1386 fMachProt = VM_PROT_READ;
1387 break;
1388 case RTMEM_PROT_READ | RTMEM_PROT_WRITE:
1389 fMachProt = VM_PROT_READ | VM_PROT_WRITE;
1390 break;
1391 case RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC:
1392 fMachProt = VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE;
1393 break;
1394 case RTMEM_PROT_WRITE:
1395 fMachProt = VM_PROT_WRITE | VM_PROT_READ; /* never write-only */
1396 break;
1397 case RTMEM_PROT_WRITE | RTMEM_PROT_EXEC:
1398 fMachProt = VM_PROT_WRITE | VM_PROT_EXECUTE | VM_PROT_READ; /* never write-only or execute-only */
1399 break;
1400 case RTMEM_PROT_EXEC:
1401 fMachProt = VM_PROT_EXECUTE | VM_PROT_READ; /* never execute-only */
1402 break;
1403 default:
1404 AssertFailedReturn(VERR_INVALID_PARAMETER);
1405 }
1406
1407 /*
1408 * Do the job.
1409 */
1410 vm_offset_t Start = (uintptr_t)pMem->pv + offSub;
1411 kern_return_t krc = vm_protect(pVmMap,
1412 Start,
1413 cbSub,
1414 false,
1415 fMachProt);
1416 if (krc != KERN_SUCCESS)
1417 {
1418 static int s_cComplaints = 0;
1419 if (s_cComplaints < 10)
1420 {
1421 s_cComplaints++;
1422 printf("rtR0MemObjNativeProtect: vm_protect(%p,%p,%p,false,%#x) -> %d\n",
1423 (void *)pVmMap, (void *)Start, (void *)cbSub, fMachProt, krc);
1424
1425 kern_return_t krc2;
1426 vm_offset_t pvReal = Start;
1427 vm_size_t cbReal = 0;
1428 mach_msg_type_number_t cInfo = VM_REGION_BASIC_INFO_COUNT;
1429 struct vm_region_basic_info Info;
1430 RT_ZERO(Info);
1431 krc2 = vm_region(pVmMap, &pvReal, &cbReal, VM_REGION_BASIC_INFO, (vm_region_info_t)&Info, &cInfo, NULL);
1432 printf("rtR0MemObjNativeProtect: basic info - krc2=%d pv=%p cb=%p prot=%#x max=%#x inh=%#x shr=%d rvd=%d off=%#x behavior=%#x wired=%#x\n",
1433 krc2, (void *)pvReal, (void *)cbReal, Info.protection, Info.max_protection, Info.inheritance,
1434 Info.shared, Info.reserved, Info.offset, Info.behavior, Info.user_wired_count);
1435 }
1436 IPRT_DARWIN_RESTORE_EFL_AC();
1437 return RTErrConvertFromDarwinKern(krc);
1438 }
1439
1440 /*
1441 * Touch the pages if they should be writable afterwards and accessible
1442 * from code which should never fault. vm_protect() may leave pages
1443 * temporarily write protected, possibly due to pmap no-upgrade rules?
1444 *
1445 * This is the same trick (or HACK ALERT if you like) as applied in
1446 * rtR0MemObjNativeMapKernel.
1447 */
1448 if ( pMem->enmType != RTR0MEMOBJTYPE_MAPPING
1449 || pMem->u.Mapping.R0Process == NIL_RTR0PROCESS)
1450 {
1451 if (fProt & RTMEM_PROT_WRITE)
1452 rtR0MemObjDarwinTouchPages((void *)Start, cbSub);
1453 /*
1454 * Sniff (read) read-only pages too, just to be sure.
1455 */
1456 else if (fProt & (RTMEM_PROT_READ | RTMEM_PROT_EXEC))
1457 rtR0MemObjDarwinSniffPages((void const *)Start, cbSub);
1458 }
1459
1460 IPRT_DARWIN_RESTORE_EFL_AC();
1461 return VINF_SUCCESS;
1462}
1463
1464
1465/**
1466 * rtR0MemObjNativeProtect kernel_task wrapper function.
1467 */
1468static void rtR0MemObjNativeProtectWorkerOnKernelThread(void *pvUser0, void *pvUser1)
1469{
1470 AssertPtr(pvUser0); Assert(pvUser1 == NULL); NOREF(pvUser1);
1471 RTR0MEMOBJDARWINPROTECTARGS *pArgs = (RTR0MEMOBJDARWINPROTECTARGS *)pvUser0;
1472 int rc = rtR0MemObjNativeProtectWorker(pArgs->pMem, pArgs->offSub, pArgs->cbSub, pArgs->fProt);
1473 rtR0MemObjDarwinSignalThreadWaitinOnTask(&pArgs->Core, rc);
1474}
1475
1476
1477DECLHIDDEN(int) rtR0MemObjNativeProtect(PRTR0MEMOBJINTERNAL pMem, size_t offSub, size_t cbSub, uint32_t fProt)
1478{
1479 /*
1480 * The code won't work right because process codesigning properties leaks
1481 * into kernel_map memory management. So, if the user process we're running
1482 * in has CS restrictions active, we cannot play around with the EXEC
1483 * protection because some vm_fault.c think we're modifying the process map
1484 * or something.
1485 */
1486 int rc;
1487 if (rtR0MemObjDarwinGetMap(pMem) == kernel_map)
1488 {
1489 RTR0MEMOBJDARWINPROTECTARGS Args;
1490 Args.pMem = pMem;
1491 Args.offSub = offSub;
1492 Args.cbSub = cbSub;
1493 Args.fProt = fProt;
1494 rc = rtR0MemObjDarwinDoInKernelTaskThread(rtR0MemObjNativeProtectWorkerOnKernelThread, &Args.Core);
1495 }
1496 else
1497 rc = rtR0MemObjNativeProtectWorker(pMem, offSub, cbSub, fProt);
1498 return rc;
1499}
1500
1501
1502DECLHIDDEN(RTHCPHYS) rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, size_t iPage)
1503{
1504 RTHCPHYS PhysAddr;
1505 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)pMem;
1506 IPRT_DARWIN_SAVE_EFL_AC();
1507
1508#ifdef USE_VM_MAP_WIRE
1509 /*
1510 * Locked memory doesn't have a memory descriptor and
1511 * needs to be handled differently.
1512 */
1513 if (pMemDarwin->Core.enmType == RTR0MEMOBJTYPE_LOCK)
1514 {
1515 ppnum_t PgNo;
1516 if (pMemDarwin->Core.u.Lock.R0Process == NIL_RTR0PROCESS)
1517 PgNo = pmap_find_phys(kernel_pmap, (uintptr_t)pMemDarwin->Core.pv + iPage * PAGE_SIZE);
1518 else
1519 {
1520 /*
1521 * From what I can tell, Apple seems to have locked up the all the
1522 * available interfaces that could help us obtain the pmap_t of a task
1523 * or vm_map_t.
1524
1525 * So, we'll have to figure out where in the vm_map_t structure it is
1526 * and read it our selves. ASSUMING that kernel_pmap is pointed to by
1527 * kernel_map->pmap, we scan kernel_map to locate the structure offset.
1528 * Not nice, but it will hopefully do the job in a reliable manner...
1529 *
1530 * (get_task_pmap, get_map_pmap or vm_map_pmap is what we really need btw.)
1531 */
1532 static int s_offPmap = -1;
1533 if (RT_UNLIKELY(s_offPmap == -1))
1534 {
1535 pmap_t const *p = (pmap_t *)kernel_map;
1536 pmap_t const * const pEnd = p + 64;
1537 for (; p < pEnd; p++)
1538 if (*p == kernel_pmap)
1539 {
1540 s_offPmap = (uintptr_t)p - (uintptr_t)kernel_map;
1541 break;
1542 }
1543 AssertReturn(s_offPmap >= 0, NIL_RTHCPHYS);
1544 }
1545 pmap_t Pmap = *(pmap_t *)((uintptr_t)get_task_map((task_t)pMemDarwin->Core.u.Lock.R0Process) + s_offPmap);
1546 PgNo = pmap_find_phys(Pmap, (uintptr_t)pMemDarwin->Core.pv + iPage * PAGE_SIZE);
1547 }
1548
1549 IPRT_DARWIN_RESTORE_EFL_AC();
1550 AssertReturn(PgNo, NIL_RTHCPHYS);
1551 PhysAddr = (RTHCPHYS)PgNo << PAGE_SHIFT;
1552 Assert((PhysAddr >> PAGE_SHIFT) == PgNo);
1553 }
1554 else
1555#endif /* USE_VM_MAP_WIRE */
1556 {
1557 /*
1558 * Get the memory descriptor.
1559 */
1560 IOMemoryDescriptor *pMemDesc = pMemDarwin->pMemDesc;
1561 if (!pMemDesc)
1562 pMemDesc = pMemDarwin->pMemMap->getMemoryDescriptor();
1563 AssertReturn(pMemDesc, NIL_RTHCPHYS);
1564
1565 /*
1566 * If we've got a memory descriptor, use getPhysicalSegment64().
1567 */
1568#ifdef __LP64__
1569 addr64_t Addr = pMemDesc->getPhysicalSegment(iPage * PAGE_SIZE, NULL, kIOMemoryMapperNone);
1570#else
1571 addr64_t Addr = pMemDesc->getPhysicalSegment64(iPage * PAGE_SIZE, NULL);
1572#endif
1573 IPRT_DARWIN_RESTORE_EFL_AC();
1574 AssertMsgReturn(Addr, ("iPage=%u\n", iPage), NIL_RTHCPHYS);
1575 PhysAddr = Addr;
1576 AssertMsgReturn(PhysAddr == Addr, ("PhysAddr=%RHp Addr=%RX64\n", PhysAddr, (uint64_t)Addr), NIL_RTHCPHYS);
1577 }
1578
1579 return PhysAddr;
1580}
1581
Note: See TracBrowser for help on using the repository browser.

© 2023 Oracle
ContactPrivacy policyTerms of Use