VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/memobj-r0drv.cpp

Last change on this file was 100357, checked in by vboxsync, 11 months ago

Runtime/RTR0MemObj*: Add PhysHighest parameter to RTR0MemObjAllocCont to indicate the maximum allowed physical address for an allocation, bugref:10457 [second attempt]

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Rev Revision
File size: 31.4 KB
Line 
1/* $Id: memobj-r0drv.cpp 100357 2023-07-04 07:00:26Z vboxsync $ */
2/** @file
3 * IPRT - Ring-0 Memory Objects, Common Code.
4 */
5
6/*
7 * Copyright (C) 2006-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * The contents of this file may alternatively be used under the terms
26 * of the Common Development and Distribution License Version 1.0
27 * (CDDL), a copy of it is provided in the "COPYING.CDDL" file included
28 * in the VirtualBox distribution, in which case the provisions of the
29 * CDDL are applicable instead of those of the GPL.
30 *
31 * You may elect to license modified versions of this file under the
32 * terms and conditions of either the GPL or the CDDL or both.
33 *
34 * SPDX-License-Identifier: GPL-3.0-only OR CDDL-1.0
35 */
36
37
38/*********************************************************************************************************************************
39* Header Files *
40*********************************************************************************************************************************/
41#define LOG_GROUP RTLOGGROUP_DEFAULT /// @todo RTLOGGROUP_MEM
42#define RTMEM_NO_WRAP_TO_EF_APIS /* circular dependency otherwise. */
43#include <iprt/memobj.h>
44#include "internal/iprt.h"
45
46#include <iprt/alloc.h>
47#include <iprt/asm.h>
48#include <iprt/assert.h>
49#include <iprt/err.h>
50#include <iprt/log.h>
51#include <iprt/mp.h>
52#include <iprt/param.h>
53#include <iprt/process.h>
54#include <iprt/thread.h>
55
56#include "internal/memobj.h"
57
58
59/**
60 * Internal function for allocating a new memory object.
61 *
62 * @returns The allocated and initialized handle.
63 * @param cbSelf The size of the memory object handle. 0 mean default size.
64 * @param enmType The memory object type.
65 * @param pv The memory object mapping.
66 * @param cb The size of the memory object.
67 * @param pszTag The tag string.
68 */
69DECLHIDDEN(PRTR0MEMOBJINTERNAL) rtR0MemObjNew(size_t cbSelf, RTR0MEMOBJTYPE enmType, void *pv, size_t cb, const char *pszTag)
70{
71 PRTR0MEMOBJINTERNAL pNew;
72
73 /* validate the size */
74 if (!cbSelf)
75 cbSelf = sizeof(*pNew);
76 Assert(cbSelf >= sizeof(*pNew));
77 Assert(cbSelf == (uint32_t)cbSelf);
78 AssertMsg(RT_ALIGN_Z(cb, PAGE_SIZE) == cb, ("%#zx\n", cb));
79
80 /*
81 * Allocate and initialize the object.
82 */
83 pNew = (PRTR0MEMOBJINTERNAL)RTMemAllocZ(cbSelf);
84 if (pNew)
85 {
86 pNew->u32Magic = RTR0MEMOBJ_MAGIC;
87 pNew->cbSelf = (uint32_t)cbSelf;
88 pNew->enmType = enmType;
89 pNew->fFlags = 0;
90 pNew->cb = cb;
91 pNew->pv = pv;
92#ifdef DEBUG
93 pNew->pszTag = pszTag;
94#else
95 RT_NOREF_PV(pszTag);
96#endif
97 }
98 return pNew;
99}
100
101
102/**
103 * Deletes an incomplete memory object.
104 *
105 * This is for cleaning up after failures during object creation.
106 *
107 * @param pMem The incomplete memory object to delete.
108 */
109DECLHIDDEN(void) rtR0MemObjDelete(PRTR0MEMOBJINTERNAL pMem)
110{
111 if (pMem)
112 {
113 ASMAtomicUoWriteU32(&pMem->u32Magic, ~RTR0MEMOBJ_MAGIC);
114 pMem->enmType = RTR0MEMOBJTYPE_END;
115 RTMemFree(pMem);
116 }
117}
118
119
120/**
121 * Links a mapping object to a primary object.
122 *
123 * @returns IPRT status code.
124 * @retval VINF_SUCCESS on success.
125 * @retval VINF_NO_MEMORY if we couldn't expand the mapping array of the parent.
126 * @param pParent The parent (primary) memory object.
127 * @param pChild The child (mapping) memory object.
128 */
129static int rtR0MemObjLink(PRTR0MEMOBJINTERNAL pParent, PRTR0MEMOBJINTERNAL pChild)
130{
131 uint32_t i;
132
133 /* sanity */
134 Assert(rtR0MemObjIsMapping(pChild));
135 Assert(!rtR0MemObjIsMapping(pParent));
136
137 /* expand the array? */
138 i = pParent->uRel.Parent.cMappings;
139 if (i >= pParent->uRel.Parent.cMappingsAllocated)
140 {
141 void *pv = RTMemRealloc(pParent->uRel.Parent.papMappings,
142 (i + 32) * sizeof(pParent->uRel.Parent.papMappings[0]));
143 if (!pv)
144 return VERR_NO_MEMORY;
145 pParent->uRel.Parent.papMappings = (PPRTR0MEMOBJINTERNAL)pv;
146 pParent->uRel.Parent.cMappingsAllocated = i + 32;
147 Assert(i == pParent->uRel.Parent.cMappings);
148 }
149
150 /* do the linking. */
151 pParent->uRel.Parent.papMappings[i] = pChild;
152 pParent->uRel.Parent.cMappings++;
153 pChild->uRel.Child.pParent = pParent;
154
155 return VINF_SUCCESS;
156}
157
158
159RTR0DECL(bool) RTR0MemObjIsMapping(RTR0MEMOBJ MemObj)
160{
161 /* Validate the object handle. */
162 PRTR0MEMOBJINTERNAL pMem;
163 AssertPtrReturn(MemObj, false);
164 pMem = (PRTR0MEMOBJINTERNAL)MemObj;
165 AssertMsgReturn(pMem->u32Magic == RTR0MEMOBJ_MAGIC, ("%p: %#x\n", pMem, pMem->u32Magic), false);
166 AssertMsgReturn(pMem->enmType > RTR0MEMOBJTYPE_INVALID && pMem->enmType < RTR0MEMOBJTYPE_END, ("%p: %d\n", pMem, pMem->enmType), false);
167
168 /* hand it on to the inlined worker. */
169 return rtR0MemObjIsMapping(pMem);
170}
171RT_EXPORT_SYMBOL(RTR0MemObjIsMapping);
172
173
174RTR0DECL(void *) RTR0MemObjAddress(RTR0MEMOBJ MemObj)
175{
176 /* Validate the object handle. */
177 PRTR0MEMOBJINTERNAL pMem;
178 if (RT_UNLIKELY(MemObj == NIL_RTR0MEMOBJ))
179 return NULL;
180 AssertPtrReturn(MemObj, NULL);
181 pMem = (PRTR0MEMOBJINTERNAL)MemObj;
182 AssertMsgReturn(pMem->u32Magic == RTR0MEMOBJ_MAGIC, ("%p: %#x\n", pMem, pMem->u32Magic), NULL);
183 AssertMsgReturn(pMem->enmType > RTR0MEMOBJTYPE_INVALID && pMem->enmType < RTR0MEMOBJTYPE_END, ("%p: %d\n", pMem, pMem->enmType), NULL);
184
185 /* return the mapping address. */
186 return pMem->pv;
187}
188RT_EXPORT_SYMBOL(RTR0MemObjAddress);
189
190
191RTR0DECL(RTR3PTR) RTR0MemObjAddressR3(RTR0MEMOBJ MemObj)
192{
193 PRTR0MEMOBJINTERNAL pMem;
194
195 /* Validate the object handle. */
196 if (RT_UNLIKELY(MemObj == NIL_RTR0MEMOBJ))
197 return NIL_RTR3PTR;
198 AssertPtrReturn(MemObj, NIL_RTR3PTR);
199 pMem = (PRTR0MEMOBJINTERNAL)MemObj;
200 AssertMsgReturn(pMem->u32Magic == RTR0MEMOBJ_MAGIC, ("%p: %#x\n", pMem, pMem->u32Magic), NIL_RTR3PTR);
201 AssertMsgReturn(pMem->enmType > RTR0MEMOBJTYPE_INVALID && pMem->enmType < RTR0MEMOBJTYPE_END, ("%p: %d\n", pMem, pMem->enmType), NIL_RTR3PTR);
202 if (RT_UNLIKELY( ( pMem->enmType != RTR0MEMOBJTYPE_MAPPING
203 || pMem->u.Mapping.R0Process == NIL_RTR0PROCESS)
204 && ( pMem->enmType != RTR0MEMOBJTYPE_LOCK
205 || pMem->u.Lock.R0Process == NIL_RTR0PROCESS)
206 && ( pMem->enmType != RTR0MEMOBJTYPE_PHYS_NC
207 || pMem->u.Lock.R0Process == NIL_RTR0PROCESS)
208 && ( pMem->enmType != RTR0MEMOBJTYPE_RES_VIRT
209 || pMem->u.ResVirt.R0Process == NIL_RTR0PROCESS)))
210 return NIL_RTR3PTR;
211
212 /* return the mapping address. */
213 return (RTR3PTR)pMem->pv;
214}
215RT_EXPORT_SYMBOL(RTR0MemObjAddressR3);
216
217
218RTR0DECL(size_t) RTR0MemObjSize(RTR0MEMOBJ MemObj)
219{
220 PRTR0MEMOBJINTERNAL pMem;
221
222 /* Validate the object handle. */
223 if (RT_UNLIKELY(MemObj == NIL_RTR0MEMOBJ))
224 return 0;
225 AssertPtrReturn(MemObj, 0);
226 pMem = (PRTR0MEMOBJINTERNAL)MemObj;
227 AssertMsgReturn(pMem->u32Magic == RTR0MEMOBJ_MAGIC, ("%p: %#x\n", pMem, pMem->u32Magic), 0);
228 AssertMsgReturn(pMem->enmType > RTR0MEMOBJTYPE_INVALID && pMem->enmType < RTR0MEMOBJTYPE_END, ("%p: %d\n", pMem, pMem->enmType), 0);
229 AssertMsg(RT_ALIGN_Z(pMem->cb, PAGE_SIZE) == pMem->cb, ("%#zx\n", pMem->cb));
230
231 /* return the size. */
232 return pMem->cb;
233}
234RT_EXPORT_SYMBOL(RTR0MemObjSize);
235
236
237/* Work around gcc bug 55940 */
238#if defined(__GNUC__) && defined(RT_ARCH_X86) && (__GNUC__ * 100 + __GNUC_MINOR__) == 407
239 __attribute__((__optimize__ ("no-shrink-wrap")))
240#endif
241RTR0DECL(RTHCPHYS) RTR0MemObjGetPagePhysAddr(RTR0MEMOBJ MemObj, size_t iPage)
242{
243 /* Validate the object handle. */
244 PRTR0MEMOBJINTERNAL pMem;
245 size_t cPages;
246 AssertPtrReturn(MemObj, NIL_RTHCPHYS);
247 pMem = (PRTR0MEMOBJINTERNAL)MemObj;
248 AssertReturn(pMem->u32Magic == RTR0MEMOBJ_MAGIC, NIL_RTHCPHYS);
249 AssertReturn(pMem->enmType > RTR0MEMOBJTYPE_INVALID && pMem->enmType < RTR0MEMOBJTYPE_END, NIL_RTHCPHYS);
250 AssertMsgReturn(pMem->u32Magic == RTR0MEMOBJ_MAGIC, ("%p: %#x\n", pMem, pMem->u32Magic), NIL_RTHCPHYS);
251 AssertMsgReturn(pMem->enmType > RTR0MEMOBJTYPE_INVALID && pMem->enmType < RTR0MEMOBJTYPE_END, ("%p: %d\n", pMem, pMem->enmType), NIL_RTHCPHYS);
252 cPages = (pMem->cb >> PAGE_SHIFT);
253 if (iPage >= cPages)
254 {
255 /* permit: while (RTR0MemObjGetPagePhysAddr(pMem, iPage++) != NIL_RTHCPHYS) {} */
256 if (iPage == cPages)
257 return NIL_RTHCPHYS;
258 AssertReturn(iPage < (pMem->cb >> PAGE_SHIFT), NIL_RTHCPHYS);
259 }
260
261 /*
262 * We know the address of physically contiguous allocations and mappings.
263 */
264 if (pMem->enmType == RTR0MEMOBJTYPE_CONT)
265 return pMem->u.Cont.Phys + iPage * PAGE_SIZE;
266 if (pMem->enmType == RTR0MEMOBJTYPE_PHYS)
267 return pMem->u.Phys.PhysBase + iPage * PAGE_SIZE;
268
269 /*
270 * Do the job.
271 */
272 return rtR0MemObjNativeGetPagePhysAddr(pMem, iPage);
273}
274RT_EXPORT_SYMBOL(RTR0MemObjGetPagePhysAddr);
275
276
277RTR0DECL(bool) RTR0MemObjWasZeroInitialized(RTR0MEMOBJ hMemObj)
278{
279 PRTR0MEMOBJINTERNAL pMem;
280
281 /* Validate the object handle. */
282 if (RT_UNLIKELY(hMemObj == NIL_RTR0MEMOBJ))
283 return false;
284 AssertPtrReturn(hMemObj, false);
285 pMem = (PRTR0MEMOBJINTERNAL)hMemObj;
286 AssertMsgReturn(pMem->u32Magic == RTR0MEMOBJ_MAGIC, ("%p: %#x\n", pMem, pMem->u32Magic), false);
287 AssertMsgReturn(pMem->enmType > RTR0MEMOBJTYPE_INVALID && pMem->enmType < RTR0MEMOBJTYPE_END, ("%p: %d\n", pMem, pMem->enmType), false);
288 Assert( (pMem->fFlags & (RTR0MEMOBJ_FLAGS_ZERO_AT_ALLOC | RTR0MEMOBJ_FLAGS_UNINITIALIZED_AT_ALLOC))
289 != (RTR0MEMOBJ_FLAGS_ZERO_AT_ALLOC | RTR0MEMOBJ_FLAGS_UNINITIALIZED_AT_ALLOC));
290
291 /* return the alloc init state. */
292 return (pMem->fFlags & (RTR0MEMOBJ_FLAGS_ZERO_AT_ALLOC | RTR0MEMOBJ_FLAGS_UNINITIALIZED_AT_ALLOC))
293 == RTR0MEMOBJ_FLAGS_ZERO_AT_ALLOC;
294}
295RT_EXPORT_SYMBOL(RTR0MemObjWasZeroInitialized);
296
297
298RTR0DECL(int) RTR0MemObjFree(RTR0MEMOBJ MemObj, bool fFreeMappings)
299{
300 /*
301 * Validate the object handle.
302 */
303 PRTR0MEMOBJINTERNAL pMem;
304 int rc;
305
306 if (MemObj == NIL_RTR0MEMOBJ)
307 return VINF_SUCCESS;
308 AssertPtrReturn(MemObj, VERR_INVALID_HANDLE);
309 pMem = (PRTR0MEMOBJINTERNAL)MemObj;
310 AssertReturn(pMem->u32Magic == RTR0MEMOBJ_MAGIC, VERR_INVALID_HANDLE);
311 AssertReturn(pMem->enmType > RTR0MEMOBJTYPE_INVALID && pMem->enmType < RTR0MEMOBJTYPE_END, VERR_INVALID_HANDLE);
312 RT_ASSERT_PREEMPTIBLE();
313
314 /*
315 * Deal with mappings according to fFreeMappings.
316 */
317 if ( !rtR0MemObjIsMapping(pMem)
318 && pMem->uRel.Parent.cMappings > 0)
319 {
320 /* fail if not requested to free mappings. */
321 if (!fFreeMappings)
322 return VERR_MEMORY_BUSY;
323
324 while (pMem->uRel.Parent.cMappings > 0)
325 {
326 PRTR0MEMOBJINTERNAL pChild = pMem->uRel.Parent.papMappings[--pMem->uRel.Parent.cMappings];
327 pMem->uRel.Parent.papMappings[pMem->uRel.Parent.cMappings] = NULL;
328
329 /* sanity checks. */
330 AssertPtr(pChild);
331 AssertFatal(pChild->u32Magic == RTR0MEMOBJ_MAGIC);
332 AssertFatal(pChild->enmType > RTR0MEMOBJTYPE_INVALID && pChild->enmType < RTR0MEMOBJTYPE_END);
333 AssertFatal(rtR0MemObjIsMapping(pChild));
334
335 /* free the mapping. */
336 rc = rtR0MemObjNativeFree(pChild);
337 if (RT_FAILURE(rc))
338 {
339 Log(("RTR0MemObjFree: failed to free mapping %p: %p %#zx; rc=%Rrc\n", pChild, pChild->pv, pChild->cb, rc));
340 pMem->uRel.Parent.papMappings[pMem->uRel.Parent.cMappings++] = pChild;
341 return rc;
342 }
343
344 pChild->u32Magic++;
345 pChild->enmType = RTR0MEMOBJTYPE_END;
346 RTMemFree(pChild);
347 }
348 }
349
350 /*
351 * Free this object.
352 */
353 rc = rtR0MemObjNativeFree(pMem);
354 if (RT_SUCCESS(rc))
355 {
356 /*
357 * Ok, it was freed just fine. Now, if it's a mapping we'll have to remove it from the parent.
358 */
359 if (rtR0MemObjIsMapping(pMem))
360 {
361 PRTR0MEMOBJINTERNAL pParent = pMem->uRel.Child.pParent;
362 uint32_t i;
363
364 /* sanity checks */
365 AssertPtr(pParent);
366 AssertFatal(pParent->u32Magic == RTR0MEMOBJ_MAGIC);
367 AssertFatal(pParent->enmType > RTR0MEMOBJTYPE_INVALID && pParent->enmType < RTR0MEMOBJTYPE_END);
368 AssertFatal(!rtR0MemObjIsMapping(pParent));
369 AssertFatal(pParent->uRel.Parent.cMappings > 0);
370 AssertPtr(pParent->uRel.Parent.papMappings);
371
372 /* locate and remove from the array of mappings. */
373 i = pParent->uRel.Parent.cMappings;
374 while (i-- > 0)
375 {
376 if (pParent->uRel.Parent.papMappings[i] == pMem)
377 {
378 pParent->uRel.Parent.papMappings[i] = pParent->uRel.Parent.papMappings[--pParent->uRel.Parent.cMappings];
379 break;
380 }
381 }
382 Assert(i != UINT32_MAX);
383 }
384 else
385 Assert(pMem->uRel.Parent.cMappings == 0);
386
387 /*
388 * Finally, destroy the handle.
389 */
390 pMem->u32Magic++;
391 pMem->enmType = RTR0MEMOBJTYPE_END;
392 if (!rtR0MemObjIsMapping(pMem))
393 RTMemFree(pMem->uRel.Parent.papMappings);
394 RTMemFree(pMem);
395 }
396 else
397 Log(("RTR0MemObjFree: failed to free %p: %d %p %#zx; rc=%Rrc\n",
398 pMem, pMem->enmType, pMem->pv, pMem->cb, rc));
399 return rc;
400}
401RT_EXPORT_SYMBOL(RTR0MemObjFree);
402
403
404
405RTR0DECL(int) RTR0MemObjAllocPageTag(PRTR0MEMOBJ pMemObj, size_t cb, bool fExecutable, const char *pszTag)
406{
407 /* sanity checks. */
408 const size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
409 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
410 *pMemObj = NIL_RTR0MEMOBJ;
411 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
412 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
413 RT_ASSERT_PREEMPTIBLE();
414
415 /* do the allocation. */
416 return rtR0MemObjNativeAllocPage(pMemObj, cbAligned, fExecutable, pszTag);
417}
418RT_EXPORT_SYMBOL(RTR0MemObjAllocPageTag);
419
420
421RTR0DECL(int) RTR0MemObjAllocLargeTag(PRTR0MEMOBJ pMemObj, size_t cb, size_t cbLargePage, uint32_t fFlags, const char *pszTag)
422{
423 /* sanity checks. */
424 const size_t cbAligned = RT_ALIGN_Z(cb, cbLargePage);
425 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
426 *pMemObj = NIL_RTR0MEMOBJ;
427#ifdef RT_ARCH_AMD64
428 AssertReturn(cbLargePage == _2M || cbLargePage == _1G, VERR_OUT_OF_RANGE);
429#elif defined(RT_ARCH_X86)
430 AssertReturn(cbLargePage == _2M || cbLargePage == _4M, VERR_OUT_OF_RANGE);
431#else
432 AssertReturn(RT_IS_POWER_OF_TWO(cbLargePage), VERR_NOT_POWER_OF_TWO);
433 AssertReturn(cbLargePage > PAGE_SIZE, VERR_OUT_OF_RANGE);
434#endif
435 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
436 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
437 AssertReturn(!(fFlags & ~RTMEMOBJ_ALLOC_LARGE_F_VALID_MASK), VERR_INVALID_PARAMETER);
438 RT_ASSERT_PREEMPTIBLE();
439
440 /* do the allocation. */
441 return rtR0MemObjNativeAllocLarge(pMemObj, cbAligned, cbLargePage, fFlags, pszTag);
442}
443RT_EXPORT_SYMBOL(RTR0MemObjAllocLargeTag);
444
445
446/**
447 * Fallback implementation of rtR0MemObjNativeAllocLarge and implements single
448 * page allocation using rtR0MemObjNativeAllocPhys.
449 */
450DECLHIDDEN(int) rtR0MemObjFallbackAllocLarge(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, size_t cbLargePage, uint32_t fFlags,
451 const char *pszTag)
452{
453 RT_NOREF(pszTag, fFlags);
454 if (cb == cbLargePage)
455 return rtR0MemObjNativeAllocPhys(ppMem, cb, NIL_RTHCPHYS, cbLargePage, pszTag);
456 return VERR_NOT_SUPPORTED;
457}
458
459
460RTR0DECL(int) RTR0MemObjAllocLowTag(PRTR0MEMOBJ pMemObj, size_t cb, bool fExecutable, const char *pszTag)
461{
462 /* sanity checks. */
463 const size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
464 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
465 *pMemObj = NIL_RTR0MEMOBJ;
466 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
467 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
468 RT_ASSERT_PREEMPTIBLE();
469
470 /* do the allocation. */
471 return rtR0MemObjNativeAllocLow(pMemObj, cbAligned, fExecutable, pszTag);
472}
473RT_EXPORT_SYMBOL(RTR0MemObjAllocLowTag);
474
475
476RTR0DECL(int) RTR0MemObjAllocContTag(PRTR0MEMOBJ pMemObj, size_t cb, RTHCPHYS PhysHighest, bool fExecutable, const char *pszTag)
477{
478 /* sanity checks. */
479 const size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
480 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
481 *pMemObj = NIL_RTR0MEMOBJ;
482 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
483 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
484 AssertReturn(PhysHighest >= cb, VERR_INVALID_PARAMETER);
485 RT_ASSERT_PREEMPTIBLE();
486
487 /* do the allocation. */
488 return rtR0MemObjNativeAllocCont(pMemObj, cbAligned, PhysHighest, fExecutable, pszTag);
489}
490RT_EXPORT_SYMBOL(RTR0MemObjAllocContTag);
491
492
493RTR0DECL(int) RTR0MemObjLockUserTag(PRTR0MEMOBJ pMemObj, RTR3PTR R3Ptr, size_t cb,
494 uint32_t fAccess, RTR0PROCESS R0Process, const char *pszTag)
495{
496 /* sanity checks. */
497 const size_t cbAligned = RT_ALIGN_Z(cb + (R3Ptr & PAGE_OFFSET_MASK), PAGE_SIZE);
498 RTR3PTR const R3PtrAligned = (R3Ptr & ~(RTR3PTR)PAGE_OFFSET_MASK);
499 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
500 *pMemObj = NIL_RTR0MEMOBJ;
501 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
502 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
503 if (R0Process == NIL_RTR0PROCESS)
504 R0Process = RTR0ProcHandleSelf();
505 AssertReturn(!(fAccess & ~(RTMEM_PROT_READ | RTMEM_PROT_WRITE)), VERR_INVALID_PARAMETER);
506 AssertReturn(fAccess, VERR_INVALID_PARAMETER);
507 RT_ASSERT_PREEMPTIBLE();
508
509 /* do the locking. */
510 return rtR0MemObjNativeLockUser(pMemObj, R3PtrAligned, cbAligned, fAccess, R0Process, pszTag);
511}
512RT_EXPORT_SYMBOL(RTR0MemObjLockUserTag);
513
514
515RTR0DECL(int) RTR0MemObjLockKernelTag(PRTR0MEMOBJ pMemObj, void *pv, size_t cb, uint32_t fAccess, const char *pszTag)
516{
517 /* sanity checks. */
518 const size_t cbAligned = RT_ALIGN_Z(cb + ((uintptr_t)pv & PAGE_OFFSET_MASK), PAGE_SIZE);
519 void * const pvAligned = (void *)((uintptr_t)pv & ~(uintptr_t)PAGE_OFFSET_MASK);
520 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
521 *pMemObj = NIL_RTR0MEMOBJ;
522 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
523 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
524 AssertPtrReturn(pvAligned, VERR_INVALID_POINTER);
525 AssertReturn(!(fAccess & ~(RTMEM_PROT_READ | RTMEM_PROT_WRITE)), VERR_INVALID_PARAMETER);
526 AssertReturn(fAccess, VERR_INVALID_PARAMETER);
527 RT_ASSERT_PREEMPTIBLE();
528
529 /* do the allocation. */
530 return rtR0MemObjNativeLockKernel(pMemObj, pvAligned, cbAligned, fAccess, pszTag);
531}
532RT_EXPORT_SYMBOL(RTR0MemObjLockKernelTag);
533
534
535RTR0DECL(int) RTR0MemObjAllocPhysTag(PRTR0MEMOBJ pMemObj, size_t cb, RTHCPHYS PhysHighest, const char *pszTag)
536{
537 /* sanity checks. */
538 const size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
539 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
540 *pMemObj = NIL_RTR0MEMOBJ;
541 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
542 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
543 AssertReturn(PhysHighest >= cb, VERR_INVALID_PARAMETER);
544 RT_ASSERT_PREEMPTIBLE();
545
546 /* do the allocation. */
547 return rtR0MemObjNativeAllocPhys(pMemObj, cbAligned, PhysHighest, PAGE_SIZE /* page aligned */, pszTag);
548}
549RT_EXPORT_SYMBOL(RTR0MemObjAllocPhysTag);
550
551
552RTR0DECL(int) RTR0MemObjAllocPhysExTag(PRTR0MEMOBJ pMemObj, size_t cb, RTHCPHYS PhysHighest, size_t uAlignment, const char *pszTag)
553{
554 /* sanity checks. */
555 const size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
556 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
557 *pMemObj = NIL_RTR0MEMOBJ;
558 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
559 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
560 AssertReturn(PhysHighest >= cb, VERR_INVALID_PARAMETER);
561 if (uAlignment == 0)
562 uAlignment = PAGE_SIZE;
563 AssertReturn( uAlignment == PAGE_SIZE
564 || uAlignment == _2M
565 || uAlignment == _4M
566 || uAlignment == _1G,
567 VERR_INVALID_PARAMETER);
568#if HC_ARCH_BITS == 32
569 /* Memory allocated in this way is typically mapped into kernel space as well; simply
570 don't allow this on 32 bits hosts as the kernel space is too crowded already. */
571 if (uAlignment != PAGE_SIZE)
572 return VERR_NOT_SUPPORTED;
573#endif
574 RT_ASSERT_PREEMPTIBLE();
575
576 /* do the allocation. */
577 return rtR0MemObjNativeAllocPhys(pMemObj, cbAligned, PhysHighest, uAlignment, pszTag);
578}
579RT_EXPORT_SYMBOL(RTR0MemObjAllocPhysExTag);
580
581
582RTR0DECL(int) RTR0MemObjAllocPhysNCTag(PRTR0MEMOBJ pMemObj, size_t cb, RTHCPHYS PhysHighest, const char *pszTag)
583{
584 /* sanity checks. */
585 const size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
586 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
587 *pMemObj = NIL_RTR0MEMOBJ;
588 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
589 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
590 AssertReturn(PhysHighest >= cb, VERR_INVALID_PARAMETER);
591 RT_ASSERT_PREEMPTIBLE();
592
593 /* do the allocation. */
594 return rtR0MemObjNativeAllocPhysNC(pMemObj, cbAligned, PhysHighest, pszTag);
595}
596RT_EXPORT_SYMBOL(RTR0MemObjAllocPhysNCTag);
597
598
599RTR0DECL(int) RTR0MemObjEnterPhysTag(PRTR0MEMOBJ pMemObj, RTHCPHYS Phys, size_t cb, uint32_t uCachePolicy, const char *pszTag)
600{
601 /* sanity checks. */
602 const size_t cbAligned = RT_ALIGN_Z(cb + (Phys & PAGE_OFFSET_MASK), PAGE_SIZE);
603 const RTHCPHYS PhysAligned = Phys & ~(RTHCPHYS)PAGE_OFFSET_MASK;
604 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
605 *pMemObj = NIL_RTR0MEMOBJ;
606 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
607 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
608 AssertReturn(Phys != NIL_RTHCPHYS, VERR_INVALID_PARAMETER);
609 AssertReturn( uCachePolicy == RTMEM_CACHE_POLICY_DONT_CARE
610 || uCachePolicy == RTMEM_CACHE_POLICY_MMIO,
611 VERR_INVALID_PARAMETER);
612 RT_ASSERT_PREEMPTIBLE();
613
614 /* do the allocation. */
615 return rtR0MemObjNativeEnterPhys(pMemObj, PhysAligned, cbAligned, uCachePolicy, pszTag);
616}
617RT_EXPORT_SYMBOL(RTR0MemObjEnterPhysTag);
618
619
620RTR0DECL(int) RTR0MemObjReserveKernelTag(PRTR0MEMOBJ pMemObj, void *pvFixed, size_t cb, size_t uAlignment, const char *pszTag)
621{
622 /* sanity checks. */
623 const size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
624 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
625 *pMemObj = NIL_RTR0MEMOBJ;
626 if (uAlignment == 0)
627 uAlignment = PAGE_SIZE;
628 AssertReturn(uAlignment == PAGE_SIZE || uAlignment == _2M || uAlignment == _4M, VERR_INVALID_PARAMETER);
629 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
630 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
631 if (pvFixed != (void *)-1)
632 AssertReturn(!((uintptr_t)pvFixed & (uAlignment - 1)), VERR_INVALID_PARAMETER);
633 RT_ASSERT_PREEMPTIBLE();
634
635 /* do the reservation. */
636 return rtR0MemObjNativeReserveKernel(pMemObj, pvFixed, cbAligned, uAlignment, pszTag);
637}
638RT_EXPORT_SYMBOL(RTR0MemObjReserveKernelTag);
639
640
641RTR0DECL(int) RTR0MemObjReserveUserTag(PRTR0MEMOBJ pMemObj, RTR3PTR R3PtrFixed, size_t cb,
642 size_t uAlignment, RTR0PROCESS R0Process, const char *pszTag)
643{
644 /* sanity checks. */
645 const size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
646 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
647 *pMemObj = NIL_RTR0MEMOBJ;
648 if (uAlignment == 0)
649 uAlignment = PAGE_SIZE;
650 AssertReturn(uAlignment == PAGE_SIZE || uAlignment == _2M || uAlignment == _4M, VERR_INVALID_PARAMETER);
651 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
652 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
653 if (R3PtrFixed != (RTR3PTR)-1)
654 AssertReturn(!(R3PtrFixed & (uAlignment - 1)), VERR_INVALID_PARAMETER);
655 if (R0Process == NIL_RTR0PROCESS)
656 R0Process = RTR0ProcHandleSelf();
657 RT_ASSERT_PREEMPTIBLE();
658
659 /* do the reservation. */
660 return rtR0MemObjNativeReserveUser(pMemObj, R3PtrFixed, cbAligned, uAlignment, R0Process, pszTag);
661}
662RT_EXPORT_SYMBOL(RTR0MemObjReserveUserTag);
663
664
665RTR0DECL(int) RTR0MemObjMapKernelTag(PRTR0MEMOBJ pMemObj, RTR0MEMOBJ MemObjToMap, void *pvFixed,
666 size_t uAlignment, unsigned fProt, const char *pszTag)
667{
668 return RTR0MemObjMapKernelExTag(pMemObj, MemObjToMap, pvFixed, uAlignment, fProt, 0, 0, pszTag);
669}
670RT_EXPORT_SYMBOL(RTR0MemObjMapKernelTag);
671
672
673RTR0DECL(int) RTR0MemObjMapKernelExTag(PRTR0MEMOBJ pMemObj, RTR0MEMOBJ MemObjToMap, void *pvFixed, size_t uAlignment,
674 unsigned fProt, size_t offSub, size_t cbSub, const char *pszTag)
675{
676 PRTR0MEMOBJINTERNAL pMemToMap;
677 PRTR0MEMOBJINTERNAL pNew;
678 int rc;
679
680 /* sanity checks. */
681 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
682 *pMemObj = NIL_RTR0MEMOBJ;
683 AssertPtrReturn(MemObjToMap, VERR_INVALID_HANDLE);
684 pMemToMap = (PRTR0MEMOBJINTERNAL)MemObjToMap;
685 AssertReturn(pMemToMap->u32Magic == RTR0MEMOBJ_MAGIC, VERR_INVALID_HANDLE);
686 AssertReturn(pMemToMap->enmType > RTR0MEMOBJTYPE_INVALID && pMemToMap->enmType < RTR0MEMOBJTYPE_END, VERR_INVALID_HANDLE);
687 AssertReturn(!rtR0MemObjIsMapping(pMemToMap), VERR_INVALID_PARAMETER);
688 AssertReturn(pMemToMap->enmType != RTR0MEMOBJTYPE_RES_VIRT, VERR_INVALID_PARAMETER);
689 if (uAlignment == 0)
690 uAlignment = PAGE_SIZE;
691 AssertReturn(uAlignment == PAGE_SIZE || uAlignment == _2M || uAlignment == _4M, VERR_INVALID_PARAMETER);
692 if (pvFixed != (void *)-1)
693 AssertReturn(!((uintptr_t)pvFixed & (uAlignment - 1)), VERR_INVALID_PARAMETER);
694 AssertReturn(fProt != RTMEM_PROT_NONE, VERR_INVALID_PARAMETER);
695 AssertReturn(!(fProt & ~(RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC)), VERR_INVALID_PARAMETER);
696 AssertReturn(!(offSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
697 AssertReturn(offSub < pMemToMap->cb, VERR_INVALID_PARAMETER);
698 AssertReturn(!(cbSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
699 AssertReturn(cbSub <= pMemToMap->cb, VERR_INVALID_PARAMETER);
700 AssertReturn((!offSub && !cbSub) || (offSub + cbSub) <= pMemToMap->cb, VERR_INVALID_PARAMETER);
701 RT_ASSERT_PREEMPTIBLE();
702
703 /* adjust the request to simplify the native code. */
704 if (offSub == 0 && cbSub == pMemToMap->cb)
705 cbSub = 0;
706
707 /* do the mapping. */
708 rc = rtR0MemObjNativeMapKernel(&pNew, pMemToMap, pvFixed, uAlignment, fProt, offSub, cbSub, pszTag);
709 if (RT_SUCCESS(rc))
710 {
711 /* link it. */
712 rc = rtR0MemObjLink(pMemToMap, pNew);
713 if (RT_SUCCESS(rc))
714 *pMemObj = pNew;
715 else
716 {
717 /* damn, out of memory. bail out. */
718 int rc2 = rtR0MemObjNativeFree(pNew);
719 AssertRC(rc2);
720 pNew->u32Magic++;
721 pNew->enmType = RTR0MEMOBJTYPE_END;
722 RTMemFree(pNew);
723 }
724 }
725
726 return rc;
727}
728RT_EXPORT_SYMBOL(RTR0MemObjMapKernelExTag);
729
730
731RTR0DECL(int) RTR0MemObjMapUserTag(PRTR0MEMOBJ pMemObj, RTR0MEMOBJ MemObjToMap, RTR3PTR R3PtrFixed,
732 size_t uAlignment, unsigned fProt, RTR0PROCESS R0Process, const char *pszTag)
733{
734 return RTR0MemObjMapUserExTag(pMemObj, MemObjToMap, R3PtrFixed, uAlignment, fProt, R0Process, 0, 0, pszTag);
735}
736RT_EXPORT_SYMBOL(RTR0MemObjMapUserTag);
737
738
739RTR0DECL(int) RTR0MemObjMapUserExTag(PRTR0MEMOBJ pMemObj, RTR0MEMOBJ MemObjToMap, RTR3PTR R3PtrFixed, size_t uAlignment,
740 unsigned fProt, RTR0PROCESS R0Process, size_t offSub, size_t cbSub, const char *pszTag)
741{
742 /* sanity checks. */
743 PRTR0MEMOBJINTERNAL pMemToMap;
744 PRTR0MEMOBJINTERNAL pNew;
745 int rc;
746 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
747 pMemToMap = (PRTR0MEMOBJINTERNAL)MemObjToMap;
748 *pMemObj = NIL_RTR0MEMOBJ;
749 AssertPtrReturn(MemObjToMap, VERR_INVALID_HANDLE);
750 AssertReturn(pMemToMap->u32Magic == RTR0MEMOBJ_MAGIC, VERR_INVALID_HANDLE);
751 AssertReturn(pMemToMap->enmType > RTR0MEMOBJTYPE_INVALID && pMemToMap->enmType < RTR0MEMOBJTYPE_END, VERR_INVALID_HANDLE);
752 AssertReturn(!rtR0MemObjIsMapping(pMemToMap), VERR_INVALID_PARAMETER);
753 AssertReturn(pMemToMap->enmType != RTR0MEMOBJTYPE_RES_VIRT, VERR_INVALID_PARAMETER);
754 if (uAlignment == 0)
755 uAlignment = PAGE_SIZE;
756 AssertReturn(uAlignment == PAGE_SIZE || uAlignment == _2M || uAlignment == _4M, VERR_INVALID_PARAMETER);
757 if (R3PtrFixed != (RTR3PTR)-1)
758 AssertReturn(!(R3PtrFixed & (uAlignment - 1)), VERR_INVALID_PARAMETER);
759 AssertReturn(fProt != RTMEM_PROT_NONE, VERR_INVALID_PARAMETER);
760 AssertReturn(!(fProt & ~(RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC)), VERR_INVALID_PARAMETER);
761 AssertReturn(!(offSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
762 AssertReturn(offSub < pMemToMap->cb, VERR_INVALID_PARAMETER);
763 AssertReturn(!(cbSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
764 AssertReturn(cbSub <= pMemToMap->cb, VERR_INVALID_PARAMETER);
765 AssertReturn((!offSub && !cbSub) || (offSub + cbSub) <= pMemToMap->cb, VERR_INVALID_PARAMETER);
766 if (R0Process == NIL_RTR0PROCESS)
767 R0Process = RTR0ProcHandleSelf();
768 RT_ASSERT_PREEMPTIBLE();
769
770 /* adjust the request to simplify the native code. */
771 if (offSub == 0 && cbSub == pMemToMap->cb)
772 cbSub = 0;
773
774 /* do the mapping. */
775 rc = rtR0MemObjNativeMapUser(&pNew, pMemToMap, R3PtrFixed, uAlignment, fProt, R0Process, offSub, cbSub, pszTag);
776 if (RT_SUCCESS(rc))
777 {
778 /* link it. */
779 rc = rtR0MemObjLink(pMemToMap, pNew);
780 if (RT_SUCCESS(rc))
781 *pMemObj = pNew;
782 else
783 {
784 /* damn, out of memory. bail out. */
785 int rc2 = rtR0MemObjNativeFree(pNew);
786 AssertRC(rc2);
787 pNew->u32Magic++;
788 pNew->enmType = RTR0MEMOBJTYPE_END;
789 RTMemFree(pNew);
790 }
791 }
792
793 return rc;
794}
795RT_EXPORT_SYMBOL(RTR0MemObjMapUserExTag);
796
797
798RTR0DECL(int) RTR0MemObjProtect(RTR0MEMOBJ hMemObj, size_t offSub, size_t cbSub, uint32_t fProt)
799{
800 PRTR0MEMOBJINTERNAL pMemObj;
801 int rc;
802
803 /* sanity checks. */
804 pMemObj = (PRTR0MEMOBJINTERNAL)hMemObj;
805 AssertPtrReturn(pMemObj, VERR_INVALID_HANDLE);
806 AssertReturn(pMemObj->u32Magic == RTR0MEMOBJ_MAGIC, VERR_INVALID_HANDLE);
807 AssertReturn(pMemObj->enmType > RTR0MEMOBJTYPE_INVALID && pMemObj->enmType < RTR0MEMOBJTYPE_END, VERR_INVALID_HANDLE);
808 AssertReturn(rtR0MemObjIsProtectable(pMemObj), VERR_INVALID_PARAMETER);
809 AssertReturn(!(offSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
810 AssertReturn(offSub < pMemObj->cb, VERR_INVALID_PARAMETER);
811 AssertReturn(!(cbSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
812 AssertReturn(cbSub <= pMemObj->cb, VERR_INVALID_PARAMETER);
813 AssertReturn(offSub + cbSub <= pMemObj->cb, VERR_INVALID_PARAMETER);
814 AssertReturn(!(fProt & ~(RTMEM_PROT_NONE | RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC)), VERR_INVALID_PARAMETER);
815 RT_ASSERT_PREEMPTIBLE();
816
817 /* do the job */
818 rc = rtR0MemObjNativeProtect(pMemObj, offSub, cbSub, fProt);
819 if (RT_SUCCESS(rc))
820 pMemObj->fFlags |= RTR0MEMOBJ_FLAGS_PROT_CHANGED; /* record it */
821
822 return rc;
823}
824RT_EXPORT_SYMBOL(RTR0MemObjProtect);
825
Note: See TracBrowser for help on using the repository browser.

© 2023 Oracle
ContactPrivacy policyTerms of Use