VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/memobj-r0drv.cpp

Last change on this file was 100357, checked in by vboxsync, 11 months ago

Runtime/RTR0MemObj*: Add PhysHighest parameter to RTR0MemObjAllocCont to indicate the maximum allowed physical address for an allocation, bugref:10457 [second attempt]

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Rev Revision
File size: 31.4 KB
RevLine 
[55401]1/* $Id: memobj-r0drv.cpp 100357 2023-07-04 07:00:26Z vboxsync $ */
[1]2/** @file
[8245]3 * IPRT - Ring-0 Memory Objects, Common Code.
[1]4 */
5
6/*
[98103]7 * Copyright (C) 2006-2023 Oracle and/or its affiliates.
[1]8 *
[96407]9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
[5999]11 *
[96407]12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
[5999]25 * The contents of this file may alternatively be used under the terms
26 * of the Common Development and Distribution License Version 1.0
[96407]27 * (CDDL), a copy of it is provided in the "COPYING.CDDL" file included
28 * in the VirtualBox distribution, in which case the provisions of the
[5999]29 * CDDL are applicable instead of those of the GPL.
30 *
31 * You may elect to license modified versions of this file under the
32 * terms and conditions of either the GPL or the CDDL or both.
[96407]33 *
34 * SPDX-License-Identifier: GPL-3.0-only OR CDDL-1.0
[1]35 */
36
37
[57358]38/*********************************************************************************************************************************
39* Header Files *
40*********************************************************************************************************************************/
[63561]41#define LOG_GROUP RTLOGGROUP_DEFAULT /// @todo RTLOGGROUP_MEM
[58278]42#define RTMEM_NO_WRAP_TO_EF_APIS /* circular dependency otherwise. */
[1]43#include <iprt/memobj.h>
[21337]44#include "internal/iprt.h"
[20525]45
[1]46#include <iprt/alloc.h>
[20525]47#include <iprt/asm.h>
[1]48#include <iprt/assert.h>
49#include <iprt/err.h>
50#include <iprt/log.h>
[22052]51#include <iprt/mp.h>
[1]52#include <iprt/param.h>
[20525]53#include <iprt/process.h>
[22052]54#include <iprt/thread.h>
[20525]55
[207]56#include "internal/memobj.h"
[1]57
58
59/**
60 * Internal function for allocating a new memory object.
61 *
62 * @returns The allocated and initialized handle.
63 * @param cbSelf The size of the memory object handle. 0 mean default size.
64 * @param enmType The memory object type.
65 * @param pv The memory object mapping.
66 * @param cb The size of the memory object.
[91478]67 * @param pszTag The tag string.
[1]68 */
[91478]69DECLHIDDEN(PRTR0MEMOBJINTERNAL) rtR0MemObjNew(size_t cbSelf, RTR0MEMOBJTYPE enmType, void *pv, size_t cb, const char *pszTag)
[1]70{
71 PRTR0MEMOBJINTERNAL pNew;
72
73 /* validate the size */
74 if (!cbSelf)
75 cbSelf = sizeof(*pNew);
76 Assert(cbSelf >= sizeof(*pNew));
[4135]77 Assert(cbSelf == (uint32_t)cbSelf);
[36376]78 AssertMsg(RT_ALIGN_Z(cb, PAGE_SIZE) == cb, ("%#zx\n", cb));
[1]79
80 /*
81 * Allocate and initialize the object.
82 */
[393]83 pNew = (PRTR0MEMOBJINTERNAL)RTMemAllocZ(cbSelf);
[1]84 if (pNew)
85 {
86 pNew->u32Magic = RTR0MEMOBJ_MAGIC;
[4135]87 pNew->cbSelf = (uint32_t)cbSelf;
[1]88 pNew->enmType = enmType;
[20525]89 pNew->fFlags = 0;
[1]90 pNew->cb = cb;
91 pNew->pv = pv;
[91478]92#ifdef DEBUG
93 pNew->pszTag = pszTag;
94#else
95 RT_NOREF_PV(pszTag);
96#endif
[1]97 }
98 return pNew;
99}
100
101
102/**
[1190]103 * Deletes an incomplete memory object.
104 *
105 * This is for cleaning up after failures during object creation.
106 *
107 * @param pMem The incomplete memory object to delete.
108 */
[36555]109DECLHIDDEN(void) rtR0MemObjDelete(PRTR0MEMOBJINTERNAL pMem)
[1190]110{
111 if (pMem)
112 {
[20525]113 ASMAtomicUoWriteU32(&pMem->u32Magic, ~RTR0MEMOBJ_MAGIC);
[1190]114 pMem->enmType = RTR0MEMOBJTYPE_END;
115 RTMemFree(pMem);
116 }
117}
118
119
120/**
[1]121 * Links a mapping object to a primary object.
122 *
123 * @returns IPRT status code.
124 * @retval VINF_SUCCESS on success.
125 * @retval VINF_NO_MEMORY if we couldn't expand the mapping array of the parent.
126 * @param pParent The parent (primary) memory object.
127 * @param pChild The child (mapping) memory object.
128 */
129static int rtR0MemObjLink(PRTR0MEMOBJINTERNAL pParent, PRTR0MEMOBJINTERNAL pChild)
130{
[4233]131 uint32_t i;
132
[1]133 /* sanity */
134 Assert(rtR0MemObjIsMapping(pChild));
135 Assert(!rtR0MemObjIsMapping(pParent));
136
137 /* expand the array? */
[4233]138 i = pParent->uRel.Parent.cMappings;
[1]139 if (i >= pParent->uRel.Parent.cMappingsAllocated)
140 {
141 void *pv = RTMemRealloc(pParent->uRel.Parent.papMappings,
142 (i + 32) * sizeof(pParent->uRel.Parent.papMappings[0]));
143 if (!pv)
144 return VERR_NO_MEMORY;
145 pParent->uRel.Parent.papMappings = (PPRTR0MEMOBJINTERNAL)pv;
[217]146 pParent->uRel.Parent.cMappingsAllocated = i + 32;
[1]147 Assert(i == pParent->uRel.Parent.cMappings);
148 }
149
150 /* do the linking. */
151 pParent->uRel.Parent.papMappings[i] = pChild;
[217]152 pParent->uRel.Parent.cMappings++;
[1]153 pChild->uRel.Child.pParent = pParent;
154
155 return VINF_SUCCESS;
156}
157
158
159RTR0DECL(bool) RTR0MemObjIsMapping(RTR0MEMOBJ MemObj)
160{
161 /* Validate the object handle. */
[4233]162 PRTR0MEMOBJINTERNAL pMem;
[1]163 AssertPtrReturn(MemObj, false);
[4233]164 pMem = (PRTR0MEMOBJINTERNAL)MemObj;
[1190]165 AssertMsgReturn(pMem->u32Magic == RTR0MEMOBJ_MAGIC, ("%p: %#x\n", pMem, pMem->u32Magic), false);
166 AssertMsgReturn(pMem->enmType > RTR0MEMOBJTYPE_INVALID && pMem->enmType < RTR0MEMOBJTYPE_END, ("%p: %d\n", pMem, pMem->enmType), false);
[1]167
168 /* hand it on to the inlined worker. */
169 return rtR0MemObjIsMapping(pMem);
170}
[21337]171RT_EXPORT_SYMBOL(RTR0MemObjIsMapping);
[1]172
173
174RTR0DECL(void *) RTR0MemObjAddress(RTR0MEMOBJ MemObj)
175{
176 /* Validate the object handle. */
[4233]177 PRTR0MEMOBJINTERNAL pMem;
[4819]178 if (RT_UNLIKELY(MemObj == NIL_RTR0MEMOBJ))
179 return NULL;
180 AssertPtrReturn(MemObj, NULL);
[4233]181 pMem = (PRTR0MEMOBJINTERNAL)MemObj;
[4819]182 AssertMsgReturn(pMem->u32Magic == RTR0MEMOBJ_MAGIC, ("%p: %#x\n", pMem, pMem->u32Magic), NULL);
183 AssertMsgReturn(pMem->enmType > RTR0MEMOBJTYPE_INVALID && pMem->enmType < RTR0MEMOBJTYPE_END, ("%p: %d\n", pMem, pMem->enmType), NULL);
[1]184
185 /* return the mapping address. */
186 return pMem->pv;
187}
[21337]188RT_EXPORT_SYMBOL(RTR0MemObjAddress);
[1]189
190
[4135]191RTR0DECL(RTR3PTR) RTR0MemObjAddressR3(RTR0MEMOBJ MemObj)
192{
[4819]193 PRTR0MEMOBJINTERNAL pMem;
194
[4135]195 /* Validate the object handle. */
[4819]196 if (RT_UNLIKELY(MemObj == NIL_RTR0MEMOBJ))
197 return NIL_RTR3PTR;
[4135]198 AssertPtrReturn(MemObj, NIL_RTR3PTR);
[4233]199 pMem = (PRTR0MEMOBJINTERNAL)MemObj;
[4135]200 AssertMsgReturn(pMem->u32Magic == RTR0MEMOBJ_MAGIC, ("%p: %#x\n", pMem, pMem->u32Magic), NIL_RTR3PTR);
201 AssertMsgReturn(pMem->enmType > RTR0MEMOBJTYPE_INVALID && pMem->enmType < RTR0MEMOBJTYPE_END, ("%p: %d\n", pMem, pMem->enmType), NIL_RTR3PTR);
[4819]202 if (RT_UNLIKELY( ( pMem->enmType != RTR0MEMOBJTYPE_MAPPING
203 || pMem->u.Mapping.R0Process == NIL_RTR0PROCESS)
204 && ( pMem->enmType != RTR0MEMOBJTYPE_LOCK
205 || pMem->u.Lock.R0Process == NIL_RTR0PROCESS)
206 && ( pMem->enmType != RTR0MEMOBJTYPE_PHYS_NC
207 || pMem->u.Lock.R0Process == NIL_RTR0PROCESS)
208 && ( pMem->enmType != RTR0MEMOBJTYPE_RES_VIRT
209 || pMem->u.ResVirt.R0Process == NIL_RTR0PROCESS)))
210 return NIL_RTR3PTR;
[4135]211
212 /* return the mapping address. */
213 return (RTR3PTR)pMem->pv;
214}
[21337]215RT_EXPORT_SYMBOL(RTR0MemObjAddressR3);
[4135]216
217
[1]218RTR0DECL(size_t) RTR0MemObjSize(RTR0MEMOBJ MemObj)
219{
[4819]220 PRTR0MEMOBJINTERNAL pMem;
221
[1]222 /* Validate the object handle. */
[4819]223 if (RT_UNLIKELY(MemObj == NIL_RTR0MEMOBJ))
224 return 0;
[1]225 AssertPtrReturn(MemObj, 0);
[4233]226 pMem = (PRTR0MEMOBJINTERNAL)MemObj;
[1190]227 AssertMsgReturn(pMem->u32Magic == RTR0MEMOBJ_MAGIC, ("%p: %#x\n", pMem, pMem->u32Magic), 0);
228 AssertMsgReturn(pMem->enmType > RTR0MEMOBJTYPE_INVALID && pMem->enmType < RTR0MEMOBJTYPE_END, ("%p: %d\n", pMem, pMem->enmType), 0);
[36372]229 AssertMsg(RT_ALIGN_Z(pMem->cb, PAGE_SIZE) == pMem->cb, ("%#zx\n", pMem->cb));
[1]230
231 /* return the size. */
232 return pMem->cb;
233}
[21337]234RT_EXPORT_SYMBOL(RTR0MemObjSize);
[1]235
236
[44302]237/* Work around gcc bug 55940 */
[60077]238#if defined(__GNUC__) && defined(RT_ARCH_X86) && (__GNUC__ * 100 + __GNUC_MINOR__) == 407
[44302]239 __attribute__((__optimize__ ("no-shrink-wrap")))
240#endif
[4135]241RTR0DECL(RTHCPHYS) RTR0MemObjGetPagePhysAddr(RTR0MEMOBJ MemObj, size_t iPage)
[1]242{
243 /* Validate the object handle. */
[4233]244 PRTR0MEMOBJINTERNAL pMem;
245 size_t cPages;
[1]246 AssertPtrReturn(MemObj, NIL_RTHCPHYS);
[4233]247 pMem = (PRTR0MEMOBJINTERNAL)MemObj;
[1]248 AssertReturn(pMem->u32Magic == RTR0MEMOBJ_MAGIC, NIL_RTHCPHYS);
249 AssertReturn(pMem->enmType > RTR0MEMOBJTYPE_INVALID && pMem->enmType < RTR0MEMOBJTYPE_END, NIL_RTHCPHYS);
[1190]250 AssertMsgReturn(pMem->u32Magic == RTR0MEMOBJ_MAGIC, ("%p: %#x\n", pMem, pMem->u32Magic), NIL_RTHCPHYS);
251 AssertMsgReturn(pMem->enmType > RTR0MEMOBJTYPE_INVALID && pMem->enmType < RTR0MEMOBJTYPE_END, ("%p: %d\n", pMem, pMem->enmType), NIL_RTHCPHYS);
[4233]252 cPages = (pMem->cb >> PAGE_SHIFT);
[1]253 if (iPage >= cPages)
254 {
255 /* permit: while (RTR0MemObjGetPagePhysAddr(pMem, iPage++) != NIL_RTHCPHYS) {} */
256 if (iPage == cPages)
257 return NIL_RTHCPHYS;
258 AssertReturn(iPage < (pMem->cb >> PAGE_SHIFT), NIL_RTHCPHYS);
259 }
260
[217]261 /*
262 * We know the address of physically contiguous allocations and mappings.
263 */
264 if (pMem->enmType == RTR0MEMOBJTYPE_CONT)
265 return pMem->u.Cont.Phys + iPage * PAGE_SIZE;
266 if (pMem->enmType == RTR0MEMOBJTYPE_PHYS)
267 return pMem->u.Phys.PhysBase + iPage * PAGE_SIZE;
268
269 /*
270 * Do the job.
271 */
[1]272 return rtR0MemObjNativeGetPagePhysAddr(pMem, iPage);
273}
[21337]274RT_EXPORT_SYMBOL(RTR0MemObjGetPagePhysAddr);
[1]275
276
[92250]277RTR0DECL(bool) RTR0MemObjWasZeroInitialized(RTR0MEMOBJ hMemObj)
[92246]278{
279 PRTR0MEMOBJINTERNAL pMem;
280
281 /* Validate the object handle. */
282 if (RT_UNLIKELY(hMemObj == NIL_RTR0MEMOBJ))
283 return false;
284 AssertPtrReturn(hMemObj, false);
285 pMem = (PRTR0MEMOBJINTERNAL)hMemObj;
286 AssertMsgReturn(pMem->u32Magic == RTR0MEMOBJ_MAGIC, ("%p: %#x\n", pMem, pMem->u32Magic), false);
287 AssertMsgReturn(pMem->enmType > RTR0MEMOBJTYPE_INVALID && pMem->enmType < RTR0MEMOBJTYPE_END, ("%p: %d\n", pMem, pMem->enmType), false);
288 Assert( (pMem->fFlags & (RTR0MEMOBJ_FLAGS_ZERO_AT_ALLOC | RTR0MEMOBJ_FLAGS_UNINITIALIZED_AT_ALLOC))
289 != (RTR0MEMOBJ_FLAGS_ZERO_AT_ALLOC | RTR0MEMOBJ_FLAGS_UNINITIALIZED_AT_ALLOC));
290
291 /* return the alloc init state. */
292 return (pMem->fFlags & (RTR0MEMOBJ_FLAGS_ZERO_AT_ALLOC | RTR0MEMOBJ_FLAGS_UNINITIALIZED_AT_ALLOC))
293 == RTR0MEMOBJ_FLAGS_ZERO_AT_ALLOC;
294}
295RT_EXPORT_SYMBOL(RTR0MemObjWasZeroInitialized);
296
297
[1]298RTR0DECL(int) RTR0MemObjFree(RTR0MEMOBJ MemObj, bool fFreeMappings)
299{
300 /*
301 * Validate the object handle.
302 */
[4233]303 PRTR0MEMOBJINTERNAL pMem;
304 int rc;
305
[1]306 if (MemObj == NIL_RTR0MEMOBJ)
307 return VINF_SUCCESS;
308 AssertPtrReturn(MemObj, VERR_INVALID_HANDLE);
[4233]309 pMem = (PRTR0MEMOBJINTERNAL)MemObj;
[1]310 AssertReturn(pMem->u32Magic == RTR0MEMOBJ_MAGIC, VERR_INVALID_HANDLE);
311 AssertReturn(pMem->enmType > RTR0MEMOBJTYPE_INVALID && pMem->enmType < RTR0MEMOBJTYPE_END, VERR_INVALID_HANDLE);
[22052]312 RT_ASSERT_PREEMPTIBLE();
[1]313
314 /*
[33540]315 * Deal with mappings according to fFreeMappings.
[1]316 */
317 if ( !rtR0MemObjIsMapping(pMem)
318 && pMem->uRel.Parent.cMappings > 0)
319 {
320 /* fail if not requested to free mappings. */
321 if (!fFreeMappings)
322 return VERR_MEMORY_BUSY;
323
324 while (pMem->uRel.Parent.cMappings > 0)
325 {
326 PRTR0MEMOBJINTERNAL pChild = pMem->uRel.Parent.papMappings[--pMem->uRel.Parent.cMappings];
[40938]327 pMem->uRel.Parent.papMappings[pMem->uRel.Parent.cMappings] = NULL;
[1]328
329 /* sanity checks. */
330 AssertPtr(pChild);
331 AssertFatal(pChild->u32Magic == RTR0MEMOBJ_MAGIC);
332 AssertFatal(pChild->enmType > RTR0MEMOBJTYPE_INVALID && pChild->enmType < RTR0MEMOBJTYPE_END);
333 AssertFatal(rtR0MemObjIsMapping(pChild));
334
335 /* free the mapping. */
[4233]336 rc = rtR0MemObjNativeFree(pChild);
[1]337 if (RT_FAILURE(rc))
338 {
[13837]339 Log(("RTR0MemObjFree: failed to free mapping %p: %p %#zx; rc=%Rrc\n", pChild, pChild->pv, pChild->cb, rc));
[1]340 pMem->uRel.Parent.papMappings[pMem->uRel.Parent.cMappings++] = pChild;
341 return rc;
342 }
[88979]343
344 pChild->u32Magic++;
345 pChild->enmType = RTR0MEMOBJTYPE_END;
346 RTMemFree(pChild);
[1]347 }
348 }
349
350 /*
351 * Free this object.
352 */
[4233]353 rc = rtR0MemObjNativeFree(pMem);
[1]354 if (RT_SUCCESS(rc))
355 {
356 /*
357 * Ok, it was freed just fine. Now, if it's a mapping we'll have to remove it from the parent.
358 */
359 if (rtR0MemObjIsMapping(pMem))
360 {
361 PRTR0MEMOBJINTERNAL pParent = pMem->uRel.Child.pParent;
[4233]362 uint32_t i;
[1]363
364 /* sanity checks */
365 AssertPtr(pParent);
366 AssertFatal(pParent->u32Magic == RTR0MEMOBJ_MAGIC);
367 AssertFatal(pParent->enmType > RTR0MEMOBJTYPE_INVALID && pParent->enmType < RTR0MEMOBJTYPE_END);
368 AssertFatal(!rtR0MemObjIsMapping(pParent));
[217]369 AssertFatal(pParent->uRel.Parent.cMappings > 0);
370 AssertPtr(pParent->uRel.Parent.papMappings);
[1]371
372 /* locate and remove from the array of mappings. */
[4233]373 i = pParent->uRel.Parent.cMappings;
[1]374 while (i-- > 0)
375 {
376 if (pParent->uRel.Parent.papMappings[i] == pMem)
377 {
378 pParent->uRel.Parent.papMappings[i] = pParent->uRel.Parent.papMappings[--pParent->uRel.Parent.cMappings];
379 break;
380 }
381 }
382 Assert(i != UINT32_MAX);
383 }
384 else
385 Assert(pMem->uRel.Parent.cMappings == 0);
386
387 /*
388 * Finally, destroy the handle.
389 */
390 pMem->u32Magic++;
391 pMem->enmType = RTR0MEMOBJTYPE_END;
392 if (!rtR0MemObjIsMapping(pMem))
393 RTMemFree(pMem->uRel.Parent.papMappings);
394 RTMemFree(pMem);
395 }
396 else
[13837]397 Log(("RTR0MemObjFree: failed to free %p: %d %p %#zx; rc=%Rrc\n",
[1]398 pMem, pMem->enmType, pMem->pv, pMem->cb, rc));
399 return rc;
400}
[21337]401RT_EXPORT_SYMBOL(RTR0MemObjFree);
[1]402
403
404
[31157]405RTR0DECL(int) RTR0MemObjAllocPageTag(PRTR0MEMOBJ pMemObj, size_t cb, bool fExecutable, const char *pszTag)
[1]406{
407 /* sanity checks. */
[4233]408 const size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
[1]409 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
410 *pMemObj = NIL_RTR0MEMOBJ;
411 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
412 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
[22052]413 RT_ASSERT_PREEMPTIBLE();
[1]414
415 /* do the allocation. */
[91483]416 return rtR0MemObjNativeAllocPage(pMemObj, cbAligned, fExecutable, pszTag);
[1]417}
[31157]418RT_EXPORT_SYMBOL(RTR0MemObjAllocPageTag);
[1]419
420
[91446]421RTR0DECL(int) RTR0MemObjAllocLargeTag(PRTR0MEMOBJ pMemObj, size_t cb, size_t cbLargePage, uint32_t fFlags, const char *pszTag)
422{
423 /* sanity checks. */
424 const size_t cbAligned = RT_ALIGN_Z(cb, cbLargePage);
425 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
426 *pMemObj = NIL_RTR0MEMOBJ;
427#ifdef RT_ARCH_AMD64
428 AssertReturn(cbLargePage == _2M || cbLargePage == _1G, VERR_OUT_OF_RANGE);
429#elif defined(RT_ARCH_X86)
430 AssertReturn(cbLargePage == _2M || cbLargePage == _4M, VERR_OUT_OF_RANGE);
431#else
432 AssertReturn(RT_IS_POWER_OF_TWO(cbLargePage), VERR_NOT_POWER_OF_TWO);
433 AssertReturn(cbLargePage > PAGE_SIZE, VERR_OUT_OF_RANGE);
434#endif
435 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
436 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
437 AssertReturn(!(fFlags & ~RTMEMOBJ_ALLOC_LARGE_F_VALID_MASK), VERR_INVALID_PARAMETER);
438 RT_ASSERT_PREEMPTIBLE();
439
440 /* do the allocation. */
441 return rtR0MemObjNativeAllocLarge(pMemObj, cbAligned, cbLargePage, fFlags, pszTag);
442}
443RT_EXPORT_SYMBOL(RTR0MemObjAllocLargeTag);
444
445
446/**
447 * Fallback implementation of rtR0MemObjNativeAllocLarge and implements single
448 * page allocation using rtR0MemObjNativeAllocPhys.
449 */
450DECLHIDDEN(int) rtR0MemObjFallbackAllocLarge(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, size_t cbLargePage, uint32_t fFlags,
451 const char *pszTag)
452{
453 RT_NOREF(pszTag, fFlags);
454 if (cb == cbLargePage)
[91481]455 return rtR0MemObjNativeAllocPhys(ppMem, cb, NIL_RTHCPHYS, cbLargePage, pszTag);
[91446]456 return VERR_NOT_SUPPORTED;
457}
458
459
[31157]460RTR0DECL(int) RTR0MemObjAllocLowTag(PRTR0MEMOBJ pMemObj, size_t cb, bool fExecutable, const char *pszTag)
[1]461{
462 /* sanity checks. */
[4233]463 const size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
[1]464 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
465 *pMemObj = NIL_RTR0MEMOBJ;
466 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
467 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
[22052]468 RT_ASSERT_PREEMPTIBLE();
[1]469
470 /* do the allocation. */
[91483]471 return rtR0MemObjNativeAllocLow(pMemObj, cbAligned, fExecutable, pszTag);
[1]472}
[31157]473RT_EXPORT_SYMBOL(RTR0MemObjAllocLowTag);
[1]474
475
[100357]476RTR0DECL(int) RTR0MemObjAllocContTag(PRTR0MEMOBJ pMemObj, size_t cb, RTHCPHYS PhysHighest, bool fExecutable, const char *pszTag)
[1]477{
478 /* sanity checks. */
[4233]479 const size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
[1]480 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
481 *pMemObj = NIL_RTR0MEMOBJ;
482 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
483 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
[100357]484 AssertReturn(PhysHighest >= cb, VERR_INVALID_PARAMETER);
[22052]485 RT_ASSERT_PREEMPTIBLE();
[1]486
487 /* do the allocation. */
[100357]488 return rtR0MemObjNativeAllocCont(pMemObj, cbAligned, PhysHighest, fExecutable, pszTag);
[1]489}
[31157]490RT_EXPORT_SYMBOL(RTR0MemObjAllocContTag);
[1]491
492
[31157]493RTR0DECL(int) RTR0MemObjLockUserTag(PRTR0MEMOBJ pMemObj, RTR3PTR R3Ptr, size_t cb,
494 uint32_t fAccess, RTR0PROCESS R0Process, const char *pszTag)
[1]495{
496 /* sanity checks. */
[4233]497 const size_t cbAligned = RT_ALIGN_Z(cb + (R3Ptr & PAGE_OFFSET_MASK), PAGE_SIZE);
498 RTR3PTR const R3PtrAligned = (R3Ptr & ~(RTR3PTR)PAGE_OFFSET_MASK);
[1]499 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
500 *pMemObj = NIL_RTR0MEMOBJ;
501 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
502 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
[392]503 if (R0Process == NIL_RTR0PROCESS)
504 R0Process = RTR0ProcHandleSelf();
[23610]505 AssertReturn(!(fAccess & ~(RTMEM_PROT_READ | RTMEM_PROT_WRITE)), VERR_INVALID_PARAMETER);
506 AssertReturn(fAccess, VERR_INVALID_PARAMETER);
[22052]507 RT_ASSERT_PREEMPTIBLE();
[4781]508
[4755]509 /* do the locking. */
[91482]510 return rtR0MemObjNativeLockUser(pMemObj, R3PtrAligned, cbAligned, fAccess, R0Process, pszTag);
[1]511}
[31157]512RT_EXPORT_SYMBOL(RTR0MemObjLockUserTag);
[1]513
514
[31157]515RTR0DECL(int) RTR0MemObjLockKernelTag(PRTR0MEMOBJ pMemObj, void *pv, size_t cb, uint32_t fAccess, const char *pszTag)
[1]516{
517 /* sanity checks. */
[4233]518 const size_t cbAligned = RT_ALIGN_Z(cb + ((uintptr_t)pv & PAGE_OFFSET_MASK), PAGE_SIZE);
519 void * const pvAligned = (void *)((uintptr_t)pv & ~(uintptr_t)PAGE_OFFSET_MASK);
[1]520 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
521 *pMemObj = NIL_RTR0MEMOBJ;
522 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
523 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
524 AssertPtrReturn(pvAligned, VERR_INVALID_POINTER);
[23610]525 AssertReturn(!(fAccess & ~(RTMEM_PROT_READ | RTMEM_PROT_WRITE)), VERR_INVALID_PARAMETER);
526 AssertReturn(fAccess, VERR_INVALID_PARAMETER);
[22052]527 RT_ASSERT_PREEMPTIBLE();
[4781]528
[1]529 /* do the allocation. */
[91482]530 return rtR0MemObjNativeLockKernel(pMemObj, pvAligned, cbAligned, fAccess, pszTag);
[1]531}
[31157]532RT_EXPORT_SYMBOL(RTR0MemObjLockKernelTag);
[1]533
534
[31157]535RTR0DECL(int) RTR0MemObjAllocPhysTag(PRTR0MEMOBJ pMemObj, size_t cb, RTHCPHYS PhysHighest, const char *pszTag)
[1]536{
537 /* sanity checks. */
[4233]538 const size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
[1]539 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
540 *pMemObj = NIL_RTR0MEMOBJ;
541 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
542 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
[217]543 AssertReturn(PhysHighest >= cb, VERR_INVALID_PARAMETER);
[22052]544 RT_ASSERT_PREEMPTIBLE();
[1]545
546 /* do the allocation. */
[91481]547 return rtR0MemObjNativeAllocPhys(pMemObj, cbAligned, PhysHighest, PAGE_SIZE /* page aligned */, pszTag);
[1]548}
[31157]549RT_EXPORT_SYMBOL(RTR0MemObjAllocPhysTag);
[1]550
[26847]551
[31157]552RTR0DECL(int) RTR0MemObjAllocPhysExTag(PRTR0MEMOBJ pMemObj, size_t cb, RTHCPHYS PhysHighest, size_t uAlignment, const char *pszTag)
[26430]553{
554 /* sanity checks. */
555 const size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
556 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
557 *pMemObj = NIL_RTR0MEMOBJ;
558 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
559 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
560 AssertReturn(PhysHighest >= cb, VERR_INVALID_PARAMETER);
[26847]561 if (uAlignment == 0)
562 uAlignment = PAGE_SIZE;
563 AssertReturn( uAlignment == PAGE_SIZE
564 || uAlignment == _2M
565 || uAlignment == _4M
566 || uAlignment == _1G,
567 VERR_INVALID_PARAMETER);
[26435]568#if HC_ARCH_BITS == 32
[26847]569 /* Memory allocated in this way is typically mapped into kernel space as well; simply
570 don't allow this on 32 bits hosts as the kernel space is too crowded already. */
571 if (uAlignment != PAGE_SIZE)
[26435]572 return VERR_NOT_SUPPORTED;
573#endif
[26430]574 RT_ASSERT_PREEMPTIBLE();
[1]575
[26430]576 /* do the allocation. */
[91481]577 return rtR0MemObjNativeAllocPhys(pMemObj, cbAligned, PhysHighest, uAlignment, pszTag);
[26430]578}
[31157]579RT_EXPORT_SYMBOL(RTR0MemObjAllocPhysExTag);
[26430]580
581
[31157]582RTR0DECL(int) RTR0MemObjAllocPhysNCTag(PRTR0MEMOBJ pMemObj, size_t cb, RTHCPHYS PhysHighest, const char *pszTag)
[4136]583{
584 /* sanity checks. */
[4233]585 const size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
[4136]586 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
587 *pMemObj = NIL_RTR0MEMOBJ;
588 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
589 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
590 AssertReturn(PhysHighest >= cb, VERR_INVALID_PARAMETER);
[22052]591 RT_ASSERT_PREEMPTIBLE();
[4136]592
593 /* do the allocation. */
[91481]594 return rtR0MemObjNativeAllocPhysNC(pMemObj, cbAligned, PhysHighest, pszTag);
[4136]595}
[31157]596RT_EXPORT_SYMBOL(RTR0MemObjAllocPhysNCTag);
[4136]597
598
[31157]599RTR0DECL(int) RTR0MemObjEnterPhysTag(PRTR0MEMOBJ pMemObj, RTHCPHYS Phys, size_t cb, uint32_t uCachePolicy, const char *pszTag)
[1]600{
601 /* sanity checks. */
[4233]602 const size_t cbAligned = RT_ALIGN_Z(cb + (Phys & PAGE_OFFSET_MASK), PAGE_SIZE);
603 const RTHCPHYS PhysAligned = Phys & ~(RTHCPHYS)PAGE_OFFSET_MASK;
[1]604 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
605 *pMemObj = NIL_RTR0MEMOBJ;
606 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
607 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
608 AssertReturn(Phys != NIL_RTHCPHYS, VERR_INVALID_PARAMETER);
[29027]609 AssertReturn( uCachePolicy == RTMEM_CACHE_POLICY_DONT_CARE
610 || uCachePolicy == RTMEM_CACHE_POLICY_MMIO,
611 VERR_INVALID_PARAMETER);
[22052]612 RT_ASSERT_PREEMPTIBLE();
[1]613
614 /* do the allocation. */
[91480]615 return rtR0MemObjNativeEnterPhys(pMemObj, PhysAligned, cbAligned, uCachePolicy, pszTag);
[1]616}
[31157]617RT_EXPORT_SYMBOL(RTR0MemObjEnterPhysTag);
[1]618
619
[31157]620RTR0DECL(int) RTR0MemObjReserveKernelTag(PRTR0MEMOBJ pMemObj, void *pvFixed, size_t cb, size_t uAlignment, const char *pszTag)
[1]621{
622 /* sanity checks. */
[4233]623 const size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
[1]624 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
625 *pMemObj = NIL_RTR0MEMOBJ;
626 if (uAlignment == 0)
627 uAlignment = PAGE_SIZE;
628 AssertReturn(uAlignment == PAGE_SIZE || uAlignment == _2M || uAlignment == _4M, VERR_INVALID_PARAMETER);
629 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
630 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
631 if (pvFixed != (void *)-1)
632 AssertReturn(!((uintptr_t)pvFixed & (uAlignment - 1)), VERR_INVALID_PARAMETER);
[22052]633 RT_ASSERT_PREEMPTIBLE();
[1]634
635 /* do the reservation. */
[91480]636 return rtR0MemObjNativeReserveKernel(pMemObj, pvFixed, cbAligned, uAlignment, pszTag);
[1]637}
[31157]638RT_EXPORT_SYMBOL(RTR0MemObjReserveKernelTag);
[1]639
640
[31157]641RTR0DECL(int) RTR0MemObjReserveUserTag(PRTR0MEMOBJ pMemObj, RTR3PTR R3PtrFixed, size_t cb,
642 size_t uAlignment, RTR0PROCESS R0Process, const char *pszTag)
[1]643{
644 /* sanity checks. */
[4233]645 const size_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
[1]646 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
647 *pMemObj = NIL_RTR0MEMOBJ;
648 if (uAlignment == 0)
649 uAlignment = PAGE_SIZE;
650 AssertReturn(uAlignment == PAGE_SIZE || uAlignment == _2M || uAlignment == _4M, VERR_INVALID_PARAMETER);
651 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
652 AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
[4136]653 if (R3PtrFixed != (RTR3PTR)-1)
654 AssertReturn(!(R3PtrFixed & (uAlignment - 1)), VERR_INVALID_PARAMETER);
[392]655 if (R0Process == NIL_RTR0PROCESS)
656 R0Process = RTR0ProcHandleSelf();
[22052]657 RT_ASSERT_PREEMPTIBLE();
[1]658
659 /* do the reservation. */
[91480]660 return rtR0MemObjNativeReserveUser(pMemObj, R3PtrFixed, cbAligned, uAlignment, R0Process, pszTag);
[1]661}
[31157]662RT_EXPORT_SYMBOL(RTR0MemObjReserveUserTag);
[1]663
664
[31157]665RTR0DECL(int) RTR0MemObjMapKernelTag(PRTR0MEMOBJ pMemObj, RTR0MEMOBJ MemObjToMap, void *pvFixed,
666 size_t uAlignment, unsigned fProt, const char *pszTag)
[1]667{
[31157]668 return RTR0MemObjMapKernelExTag(pMemObj, MemObjToMap, pvFixed, uAlignment, fProt, 0, 0, pszTag);
[14824]669}
[31157]670RT_EXPORT_SYMBOL(RTR0MemObjMapKernelTag);
[14824]671
672
[31157]673RTR0DECL(int) RTR0MemObjMapKernelExTag(PRTR0MEMOBJ pMemObj, RTR0MEMOBJ MemObjToMap, void *pvFixed, size_t uAlignment,
674 unsigned fProt, size_t offSub, size_t cbSub, const char *pszTag)
[14824]675{
[4233]676 PRTR0MEMOBJINTERNAL pMemToMap;
677 PRTR0MEMOBJINTERNAL pNew;
[14518]678 int rc;
679
680 /* sanity checks. */
[1]681 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
682 *pMemObj = NIL_RTR0MEMOBJ;
[197]683 AssertPtrReturn(MemObjToMap, VERR_INVALID_HANDLE);
[4233]684 pMemToMap = (PRTR0MEMOBJINTERNAL)MemObjToMap;
[1]685 AssertReturn(pMemToMap->u32Magic == RTR0MEMOBJ_MAGIC, VERR_INVALID_HANDLE);
686 AssertReturn(pMemToMap->enmType > RTR0MEMOBJTYPE_INVALID && pMemToMap->enmType < RTR0MEMOBJTYPE_END, VERR_INVALID_HANDLE);
687 AssertReturn(!rtR0MemObjIsMapping(pMemToMap), VERR_INVALID_PARAMETER);
688 AssertReturn(pMemToMap->enmType != RTR0MEMOBJTYPE_RES_VIRT, VERR_INVALID_PARAMETER);
689 if (uAlignment == 0)
690 uAlignment = PAGE_SIZE;
691 AssertReturn(uAlignment == PAGE_SIZE || uAlignment == _2M || uAlignment == _4M, VERR_INVALID_PARAMETER);
692 if (pvFixed != (void *)-1)
693 AssertReturn(!((uintptr_t)pvFixed & (uAlignment - 1)), VERR_INVALID_PARAMETER);
694 AssertReturn(fProt != RTMEM_PROT_NONE, VERR_INVALID_PARAMETER);
695 AssertReturn(!(fProt & ~(RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC)), VERR_INVALID_PARAMETER);
[14824]696 AssertReturn(!(offSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
697 AssertReturn(offSub < pMemToMap->cb, VERR_INVALID_PARAMETER);
698 AssertReturn(!(cbSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
699 AssertReturn(cbSub <= pMemToMap->cb, VERR_INVALID_PARAMETER);
700 AssertReturn((!offSub && !cbSub) || (offSub + cbSub) <= pMemToMap->cb, VERR_INVALID_PARAMETER);
[22052]701 RT_ASSERT_PREEMPTIBLE();
[1]702
[14824]703 /* adjust the request to simplify the native code. */
704 if (offSub == 0 && cbSub == pMemToMap->cb)
705 cbSub = 0;
[1]706
707 /* do the mapping. */
[91479]708 rc = rtR0MemObjNativeMapKernel(&pNew, pMemToMap, pvFixed, uAlignment, fProt, offSub, cbSub, pszTag);
[1]709 if (RT_SUCCESS(rc))
710 {
711 /* link it. */
712 rc = rtR0MemObjLink(pMemToMap, pNew);
713 if (RT_SUCCESS(rc))
714 *pMemObj = pNew;
715 else
716 {
717 /* damn, out of memory. bail out. */
718 int rc2 = rtR0MemObjNativeFree(pNew);
719 AssertRC(rc2);
720 pNew->u32Magic++;
721 pNew->enmType = RTR0MEMOBJTYPE_END;
722 RTMemFree(pNew);
723 }
724 }
725
726 return rc;
727}
[31157]728RT_EXPORT_SYMBOL(RTR0MemObjMapKernelExTag);
[1]729
730
[31157]731RTR0DECL(int) RTR0MemObjMapUserTag(PRTR0MEMOBJ pMemObj, RTR0MEMOBJ MemObjToMap, RTR3PTR R3PtrFixed,
732 size_t uAlignment, unsigned fProt, RTR0PROCESS R0Process, const char *pszTag)
[1]733{
[78120]734 return RTR0MemObjMapUserExTag(pMemObj, MemObjToMap, R3PtrFixed, uAlignment, fProt, R0Process, 0, 0, pszTag);
735}
736RT_EXPORT_SYMBOL(RTR0MemObjMapUserTag);
737
738
739RTR0DECL(int) RTR0MemObjMapUserExTag(PRTR0MEMOBJ pMemObj, RTR0MEMOBJ MemObjToMap, RTR3PTR R3PtrFixed, size_t uAlignment,
740 unsigned fProt, RTR0PROCESS R0Process, size_t offSub, size_t cbSub, const char *pszTag)
741{
[1]742 /* sanity checks. */
[4233]743 PRTR0MEMOBJINTERNAL pMemToMap;
744 PRTR0MEMOBJINTERNAL pNew;
745 int rc;
[1]746 AssertPtrReturn(pMemObj, VERR_INVALID_POINTER);
[4233]747 pMemToMap = (PRTR0MEMOBJINTERNAL)MemObjToMap;
[1]748 *pMemObj = NIL_RTR0MEMOBJ;
[197]749 AssertPtrReturn(MemObjToMap, VERR_INVALID_HANDLE);
[1]750 AssertReturn(pMemToMap->u32Magic == RTR0MEMOBJ_MAGIC, VERR_INVALID_HANDLE);
751 AssertReturn(pMemToMap->enmType > RTR0MEMOBJTYPE_INVALID && pMemToMap->enmType < RTR0MEMOBJTYPE_END, VERR_INVALID_HANDLE);
752 AssertReturn(!rtR0MemObjIsMapping(pMemToMap), VERR_INVALID_PARAMETER);
753 AssertReturn(pMemToMap->enmType != RTR0MEMOBJTYPE_RES_VIRT, VERR_INVALID_PARAMETER);
754 if (uAlignment == 0)
755 uAlignment = PAGE_SIZE;
756 AssertReturn(uAlignment == PAGE_SIZE || uAlignment == _2M || uAlignment == _4M, VERR_INVALID_PARAMETER);
[4136]757 if (R3PtrFixed != (RTR3PTR)-1)
758 AssertReturn(!(R3PtrFixed & (uAlignment - 1)), VERR_INVALID_PARAMETER);
[1]759 AssertReturn(fProt != RTMEM_PROT_NONE, VERR_INVALID_PARAMETER);
760 AssertReturn(!(fProt & ~(RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC)), VERR_INVALID_PARAMETER);
[78120]761 AssertReturn(!(offSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
762 AssertReturn(offSub < pMemToMap->cb, VERR_INVALID_PARAMETER);
763 AssertReturn(!(cbSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
764 AssertReturn(cbSub <= pMemToMap->cb, VERR_INVALID_PARAMETER);
765 AssertReturn((!offSub && !cbSub) || (offSub + cbSub) <= pMemToMap->cb, VERR_INVALID_PARAMETER);
[392]766 if (R0Process == NIL_RTR0PROCESS)
767 R0Process = RTR0ProcHandleSelf();
[22052]768 RT_ASSERT_PREEMPTIBLE();
[1]769
[78120]770 /* adjust the request to simplify the native code. */
771 if (offSub == 0 && cbSub == pMemToMap->cb)
772 cbSub = 0;
773
[1]774 /* do the mapping. */
[91479]775 rc = rtR0MemObjNativeMapUser(&pNew, pMemToMap, R3PtrFixed, uAlignment, fProt, R0Process, offSub, cbSub, pszTag);
[1]776 if (RT_SUCCESS(rc))
777 {
778 /* link it. */
779 rc = rtR0MemObjLink(pMemToMap, pNew);
780 if (RT_SUCCESS(rc))
781 *pMemObj = pNew;
782 else
783 {
784 /* damn, out of memory. bail out. */
785 int rc2 = rtR0MemObjNativeFree(pNew);
786 AssertRC(rc2);
787 pNew->u32Magic++;
788 pNew->enmType = RTR0MEMOBJTYPE_END;
789 RTMemFree(pNew);
790 }
791 }
792
793 return rc;
794}
[78120]795RT_EXPORT_SYMBOL(RTR0MemObjMapUserExTag);
[1]796
[20525]797
798RTR0DECL(int) RTR0MemObjProtect(RTR0MEMOBJ hMemObj, size_t offSub, size_t cbSub, uint32_t fProt)
799{
800 PRTR0MEMOBJINTERNAL pMemObj;
801 int rc;
802
803 /* sanity checks. */
804 pMemObj = (PRTR0MEMOBJINTERNAL)hMemObj;
805 AssertPtrReturn(pMemObj, VERR_INVALID_HANDLE);
806 AssertReturn(pMemObj->u32Magic == RTR0MEMOBJ_MAGIC, VERR_INVALID_HANDLE);
807 AssertReturn(pMemObj->enmType > RTR0MEMOBJTYPE_INVALID && pMemObj->enmType < RTR0MEMOBJTYPE_END, VERR_INVALID_HANDLE);
808 AssertReturn(rtR0MemObjIsProtectable(pMemObj), VERR_INVALID_PARAMETER);
809 AssertReturn(!(offSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
810 AssertReturn(offSub < pMemObj->cb, VERR_INVALID_PARAMETER);
811 AssertReturn(!(cbSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
812 AssertReturn(cbSub <= pMemObj->cb, VERR_INVALID_PARAMETER);
813 AssertReturn(offSub + cbSub <= pMemObj->cb, VERR_INVALID_PARAMETER);
814 AssertReturn(!(fProt & ~(RTMEM_PROT_NONE | RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC)), VERR_INVALID_PARAMETER);
[22052]815 RT_ASSERT_PREEMPTIBLE();
[20525]816
817 /* do the job */
818 rc = rtR0MemObjNativeProtect(pMemObj, offSub, cbSub, fProt);
819 if (RT_SUCCESS(rc))
820 pMemObj->fFlags |= RTR0MEMOBJ_FLAGS_PROT_CHANGED; /* record it */
821
822 return rc;
823}
[21337]824RT_EXPORT_SYMBOL(RTR0MemObjProtect);
825
Note: See TracBrowser for help on using the repository browser.

© 2023 Oracle
ContactPrivacy policyTerms of Use