VirtualBox

source: vbox/trunk/src/VBox/Runtime/r3/mempage-heap.cpp

Last change on this file was 103005, checked in by vboxsync, 5 months ago

iprt/asm.h,*: Split out the ASMMem* and related stuff into a separate header, asm-mem.h, so that we can get the RT_ASM_PAGE_SIZE stuff out of the way.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 28.4 KB
RevLine 
[97908]1/* $Id: mempage-heap.cpp 103005 2024-01-23 23:55:58Z vboxsync $ */
[33269]2/** @file
[101301]3 * IPRT - RTMemPage*, optimized using heap.
[33269]4 */
5
6/*
[98103]7 * Copyright (C) 2006-2023 Oracle and/or its affiliates.
[33269]8 *
[96407]9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
[33269]11 *
[96407]12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
[33269]25 * The contents of this file may alternatively be used under the terms
26 * of the Common Development and Distribution License Version 1.0
[96407]27 * (CDDL), a copy of it is provided in the "COPYING.CDDL" file included
28 * in the VirtualBox distribution, in which case the provisions of the
[33269]29 * CDDL are applicable instead of those of the GPL.
30 *
31 * You may elect to license modified versions of this file under the
32 * terms and conditions of either the GPL or the CDDL or both.
[96407]33 *
34 * SPDX-License-Identifier: GPL-3.0-only OR CDDL-1.0
[33269]35 */
36
37
[57358]38/*********************************************************************************************************************************
39* Header Files *
40*********************************************************************************************************************************/
[33269]41#include "internal/iprt.h"
42#include <iprt/mem.h>
43
[103005]44#include <iprt/asm-mem.h>
[33269]45#include <iprt/asm.h>
46#include <iprt/assert.h>
47#include <iprt/avl.h>
48#include <iprt/critsect.h>
[76452]49#include <iprt/errcore.h>
[101150]50#include <iprt/list.h>
[33269]51#include <iprt/once.h>
52#include <iprt/param.h>
53#include <iprt/string.h>
[101162]54#include "internal/mem.h"
[33269]55
56
57
[57358]58/*********************************************************************************************************************************
59* Defined Constants And Macros *
60*********************************************************************************************************************************/
[33269]61/** Threshold at which to we switch to simply calling mmap. */
[101151]62#define RTMEMPAGE_NATIVE_THRESHOLD _1M
[33269]63/** The size of a heap block (power of two) - in bytes. */
[101151]64#define RTMEMPAGE_BLOCK_SIZE _4M
[101150]65
[33269]66/** The number of pages per heap block. */
[101151]67#define RTMEMPAGE_BLOCK_PAGE_COUNT (RTMEMPAGE_BLOCK_SIZE / PAGE_SIZE)
68AssertCompile(RTMEMPAGE_BLOCK_SIZE == RTMEMPAGE_BLOCK_PAGE_COUNT * PAGE_SIZE);
[33269]69
70
[57358]71/*********************************************************************************************************************************
72* Structures and Typedefs *
73*********************************************************************************************************************************/
[33269]74/** Pointer to a page heap block. */
75typedef struct RTHEAPPAGEBLOCK *PRTHEAPPAGEBLOCK;
76
77/**
78 * A simple page heap.
79 */
80typedef struct RTHEAPPAGE
81{
82 /** Magic number (RTHEAPPAGE_MAGIC). */
83 uint32_t u32Magic;
84 /** The number of pages in the heap (in BlockTree). */
85 uint32_t cHeapPages;
86 /** The number of currently free pages. */
87 uint32_t cFreePages;
88 /** Number of successful calls. */
89 uint32_t cAllocCalls;
90 /** Number of successful free calls. */
91 uint32_t cFreeCalls;
92 /** The free call number at which we last tried to minimize the heap. */
93 uint32_t uLastMinimizeCall;
94 /** Tree of heap blocks. */
95 AVLRPVTREE BlockTree;
96 /** Allocation hint no 1 (last freed). */
97 PRTHEAPPAGEBLOCK pHint1;
98 /** Allocation hint no 2 (last alloc). */
99 PRTHEAPPAGEBLOCK pHint2;
[101150]100 /** The allocation chunks for the RTHEAPPAGEBLOCK allocator
101 * (RTHEAPPAGEBLOCKALLOCCHUNK). */
102 RTLISTANCHOR BlockAllocatorChunks;
[33269]103 /** Critical section protecting the heap. */
104 RTCRITSECT CritSect;
105 /** Set if the memory must allocated with execute access. */
106 bool fExec;
107} RTHEAPPAGE;
108#define RTHEAPPAGE_MAGIC UINT32_C(0xfeedface)
109/** Pointer to a page heap. */
110typedef RTHEAPPAGE *PRTHEAPPAGE;
111
112
113/**
114 * Describes a page heap block.
115 */
116typedef struct RTHEAPPAGEBLOCK
117{
118 /** The AVL tree node core (void pointer range). */
119 AVLRPVNODECORE Core;
[101148]120 /** The number of free pages. */
121 uint32_t cFreePages;
122 /** Pointer back to the heap. */
123 PRTHEAPPAGE pHeap;
[33269]124 /** Allocation bitmap. Set bits marks allocated pages. */
[101151]125 uint32_t bmAlloc[RTMEMPAGE_BLOCK_PAGE_COUNT / 32];
[33269]126 /** Allocation boundrary bitmap. Set bits marks the start of
127 * allocations. */
[101151]128 uint32_t bmFirst[RTMEMPAGE_BLOCK_PAGE_COUNT / 32];
[101148]129 /** Bitmap tracking pages where RTMEMPAGEALLOC_F_ADVISE_LOCKED has been
130 * successfully applied. */
[101151]131 uint32_t bmLockedAdviced[RTMEMPAGE_BLOCK_PAGE_COUNT / 32];
[101148]132 /** Bitmap tracking pages where RTMEMPAGEALLOC_F_ADVISE_NO_DUMP has been
133 * successfully applied. */
[101151]134 uint32_t bmNoDumpAdviced[RTMEMPAGE_BLOCK_PAGE_COUNT / 32];
[33269]135} RTHEAPPAGEBLOCK;
136
137
138/**
[101150]139 * Allocation chunk of RTHEAPPAGEBLOCKALLOCCHUNK structures.
140 *
141 * This is backed by an 64KB allocation and non-present blocks will be marked as
142 * allocated in bmAlloc.
143 */
144typedef struct RTHEAPPAGEBLOCKALLOCCHUNK
145{
146 /** List entry. */
147 RTLISTNODE ListEntry;
148 /** Number of free RTHEAPPAGEBLOCK structures here. */
149 uint32_t cFree;
150 /** Number of blocks in aBlocks. */
151 uint32_t cBlocks;
152 /** Allocation bitmap. */
153 uint32_t bmAlloc[ARCH_BITS == 32 ? 28 : 26];
154 /** Block array. */
155 RT_FLEXIBLE_ARRAY_EXTENSION
156 RTHEAPPAGEBLOCK aBlocks[RT_FLEXIBLE_ARRAY];
157} RTHEAPPAGEBLOCKALLOCCHUNK;
158AssertCompileMemberAlignment(RTHEAPPAGEBLOCKALLOCCHUNK, bmAlloc, 8);
159AssertCompileMemberAlignment(RTHEAPPAGEBLOCKALLOCCHUNK, aBlocks, 64);
160/** Pointer to an allocation chunk of RTHEAPPAGEBLOCKALLOCCHUNK structures. */
161typedef RTHEAPPAGEBLOCKALLOCCHUNK *PRTHEAPPAGEBLOCKALLOCCHUNK;
162
163/** Max number of blocks one RTHEAPPAGEBLOCKALLOCCHUNK can track (896/832). */
164#define RTHEAPPAGEBLOCKALLOCCHUNK_MAX_BLOCKS ((ARCH_BITS == 32 ? 28 : 26) * 32)
165/** The chunk size for the block allocator. */
166#define RTHEAPPAGEBLOCKALLOCCHUNK_ALLOC_SIZE _64K
167
168
169/**
[33269]170 * Argument package for rtHeapPageAllocCallback.
171 */
172typedef struct RTHEAPPAGEALLOCARGS
173{
174 /** The number of pages to allocate. */
175 size_t cPages;
176 /** Non-null on success. */
177 void *pvAlloc;
[78334]178 /** RTMEMPAGEALLOC_F_XXX. */
179 uint32_t fFlags;
[33269]180} RTHEAPPAGEALLOCARGS;
181
182
[57358]183/*********************************************************************************************************************************
184* Global Variables *
185*********************************************************************************************************************************/
[33269]186/** Initialize once structure. */
[101151]187static RTONCE g_MemPageHeapInitOnce = RTONCE_INITIALIZER;
[33269]188/** The page heap. */
[101151]189static RTHEAPPAGE g_MemPageHeap;
[33269]190/** The exec page heap. */
[101151]191static RTHEAPPAGE g_MemExecHeap;
[33269]192
193
[101148]194/**
[33269]195 * Initializes the heap.
196 *
197 * @returns IPRT status code.
198 * @param pHeap The page heap to initialize.
199 * @param fExec Whether the heap memory should be marked as
200 * executable or not.
201 */
[99775]202static int RTHeapPageInit(PRTHEAPPAGE pHeap, bool fExec)
[33269]203{
204 int rc = RTCritSectInitEx(&pHeap->CritSect,
205 RTCRITSECT_FLAGS_NO_LOCK_VAL | RTCRITSECT_FLAGS_NO_NESTING | RTCRITSECT_FLAGS_BOOTSTRAP_HACK,
206 NIL_RTLOCKVALCLASS, RTLOCKVAL_SUB_CLASS_NONE, NULL);
207 if (RT_SUCCESS(rc))
208 {
209 pHeap->cHeapPages = 0;
210 pHeap->cFreePages = 0;
211 pHeap->cAllocCalls = 0;
212 pHeap->cFreeCalls = 0;
213 pHeap->uLastMinimizeCall = 0;
214 pHeap->BlockTree = NULL;
215 pHeap->fExec = fExec;
[101150]216 RTListInit(&pHeap->BlockAllocatorChunks);
[33269]217 pHeap->u32Magic = RTHEAPPAGE_MAGIC;
218 }
219 return rc;
220}
221
222
223/**
[39083]224 * Deletes the heap and all the memory it tracks.
[33269]225 *
226 * @returns IPRT status code.
227 * @param pHeap The page heap to delete.
228 */
[99775]229static int RTHeapPageDelete(PRTHEAPPAGE pHeap)
[33269]230{
[39083]231 NOREF(pHeap);
[101148]232 pHeap->u32Magic = ~RTHEAPPAGE_MAGIC;
233 return VINF_SUCCESS;
[33269]234}
235
236
237/**
[101150]238 * Allocates a RTHEAPPAGEBLOCK.
239 *
240 * @returns Pointer to RTHEAPPAGEBLOCK on success, NULL on failure.
241 * @param pHeap The heap this is for.
242 */
243static PRTHEAPPAGEBLOCK rtHeapPageIntBlockAllocatorAlloc(PRTHEAPPAGE pHeap)
244{
245 /*
246 * Locate a chunk with space and grab a block from it.
247 */
248 PRTHEAPPAGEBLOCKALLOCCHUNK pChunk;
249 RTListForEach(&pHeap->BlockAllocatorChunks, pChunk, RTHEAPPAGEBLOCKALLOCCHUNK, ListEntry)
250 {
251 if (pChunk->cFree > 0)
252 {
[101192]253 uint32_t const cBits = RT_ALIGN_32(pChunk->cBlocks, 64);
254 int idxBlock = ASMBitFirstClear(&pChunk->bmAlloc[0], RT_MIN(RTHEAPPAGEBLOCKALLOCCHUNK_MAX_BLOCKS, cBits));
[101150]255 if (idxBlock >= 0)
256 {
257 ASMBitSet(&pChunk->bmAlloc[0], idxBlock);
258 pChunk->cFree -= 1;
259 return &pChunk->aBlocks[idxBlock];
260 }
261 AssertFailed();
262 }
263 }
264
265 /*
266 * Allocate a new chunk and return the first block in it.
267 */
268 int rc = rtMemPageNativeAlloc(RTHEAPPAGEBLOCKALLOCCHUNK_ALLOC_SIZE, 0, (void **)&pChunk);
269 AssertRCReturn(rc, NULL);
270 pChunk->cBlocks = (RTHEAPPAGEBLOCKALLOCCHUNK_ALLOC_SIZE - RT_UOFFSETOF(RTHEAPPAGEBLOCKALLOCCHUNK, aBlocks))
271 / sizeof(pChunk->aBlocks[0]);
272 AssertStmt(pChunk->cBlocks < RTHEAPPAGEBLOCKALLOCCHUNK_MAX_BLOCKS, pChunk->cBlocks = RTHEAPPAGEBLOCKALLOCCHUNK_MAX_BLOCKS);
273 pChunk->cFree = pChunk->cBlocks;
274
275 RT_ZERO(pChunk->bmAlloc);
276 ASMBitSetRange(pChunk->bmAlloc, pChunk->cBlocks, RTHEAPPAGEBLOCKALLOCCHUNK_MAX_BLOCKS);
277 RTListPrepend(&pHeap->BlockAllocatorChunks, &pChunk->ListEntry);
278
[101152]279 /*
[101150]280 * Allocate the first one.
281 */
282 ASMBitSet(pChunk->bmAlloc, 0);
283 pChunk->cFree -= 1;
284
285 return &pChunk->aBlocks[0];
286}
287
288
289/**
290 * Frees a RTHEAPPAGEBLOCK.
291 *
292 * @param pHeap The heap this is for.
293 * @param pBlock The block to free.
294 */
295static void rtHeapPageIntBlockAllocatorFree(PRTHEAPPAGE pHeap, PRTHEAPPAGEBLOCK pBlock)
296{
297 /*
298 * Locate the chunk the block belongs to and mark it as freed.
299 */
300 PRTHEAPPAGEBLOCKALLOCCHUNK pChunk;
301 RTListForEach(&pHeap->BlockAllocatorChunks, pChunk, RTHEAPPAGEBLOCKALLOCCHUNK, ListEntry)
302 {
303 if ((uintptr_t)pBlock - (uintptr_t)pChunk < RTHEAPPAGEBLOCKALLOCCHUNK_ALLOC_SIZE)
304 {
305 uintptr_t const idxBlock = (uintptr_t)(pBlock - &pChunk->aBlocks[0]);
306 if (ASMBitTestAndClear(&pChunk->bmAlloc[0], idxBlock))
307 pChunk->cFree++;
308 else
309 AssertMsgFailed(("pBlock=%p idxBlock=%#zx\n", pBlock, idxBlock));
310 return;
311 }
312 }
313 AssertFailed();
314}
315
316
317/**
[78334]318 * Applies flags to an allocation.
319 *
[101148]320 * @return Flags that eeds to be reverted upon free.
[78334]321 * @param pv The allocation.
322 * @param cb The size of the allocation (page aligned).
323 * @param fFlags RTMEMPAGEALLOC_F_XXX.
324 */
[101151]325DECLINLINE(uint32_t) rtMemPageApplyFlags(void *pv, size_t cb, uint32_t fFlags)
[78334]326{
[101148]327 uint32_t fHandled = 0;
328 if (fFlags & (RTMEMPAGEALLOC_F_ADVISE_LOCKED | RTMEMPAGEALLOC_F_ADVISE_NO_DUMP))
329 fHandled = rtMemPageNativeApplyFlags(pv, cb, fFlags);
[78334]330 if (fFlags & RTMEMPAGEALLOC_F_ZERO)
331 RT_BZERO(pv, cb);
[101148]332 return fHandled;
[78334]333}
334
335
336/**
[33269]337 * Avoids some gotos in rtHeapPageAllocFromBlock.
338 *
339 * @returns VINF_SUCCESS.
340 * @param pBlock The block.
341 * @param iPage The page to start allocating at.
342 * @param cPages The number of pages.
[78334]343 * @param fFlags RTMEMPAGEALLOC_F_XXX.
[33269]344 * @param ppv Where to return the allocation address.
345 */
[78334]346DECLINLINE(int) rtHeapPageAllocFromBlockSuccess(PRTHEAPPAGEBLOCK pBlock, uint32_t iPage, size_t cPages, uint32_t fFlags, void **ppv)
[33269]347{
348 PRTHEAPPAGE pHeap = pBlock->pHeap;
349
350 ASMBitSet(&pBlock->bmFirst[0], iPage);
[101162]351 pBlock->cFreePages -= (uint32_t)cPages;
352 pHeap->cFreePages -= (uint32_t)cPages;
[33269]353 if (!pHeap->pHint2 || pHeap->pHint2->cFreePages < pBlock->cFreePages)
354 pHeap->pHint2 = pBlock;
355 pHeap->cAllocCalls++;
356
357 void *pv = (uint8_t *)pBlock->Core.Key + (iPage << PAGE_SHIFT);
358 *ppv = pv;
359
[78334]360 if (fFlags)
[101148]361 {
[101151]362 uint32_t fHandled = rtMemPageApplyFlags(pv, cPages << PAGE_SHIFT, fFlags);
[101148]363 Assert(!(fHandled & ~(RTMEMPAGEALLOC_F_ADVISE_LOCKED | RTMEMPAGEALLOC_F_ADVISE_NO_DUMP)));
364 if (fHandled & RTMEMPAGEALLOC_F_ADVISE_LOCKED)
365 ASMBitSetRange(&pBlock->bmLockedAdviced[0], iPage, iPage + cPages);
366 if (fHandled & RTMEMPAGEALLOC_F_ADVISE_NO_DUMP)
367 ASMBitSetRange(&pBlock->bmNoDumpAdviced[0], iPage, iPage + cPages);
368 }
[78334]369
[33269]370 return VINF_SUCCESS;
371}
372
373
374/**
375 * Checks if a page range is free in the specified block.
376 *
377 * @returns @c true if the range is free, @c false if not.
378 * @param pBlock The block.
379 * @param iFirst The first page to check.
380 * @param cPages The number of pages to check.
381 */
382DECLINLINE(bool) rtHeapPageIsPageRangeFree(PRTHEAPPAGEBLOCK pBlock, uint32_t iFirst, uint32_t cPages)
383{
384 uint32_t i = iFirst + cPages;
385 while (i-- > iFirst)
386 {
387 if (ASMBitTest(&pBlock->bmAlloc[0], i))
388 return false;
389 Assert(!ASMBitTest(&pBlock->bmFirst[0], i));
390 }
391 return true;
392}
393
394
395/**
396 * Tries to allocate a chunk of pages from a heap block.
397 *
398 * @retval VINF_SUCCESS on success.
399 * @retval VERR_NO_MEMORY if the allocation failed.
400 * @param pBlock The block to allocate from.
401 * @param cPages The size of the allocation.
[78334]402 * @param fFlags RTMEMPAGEALLOC_F_XXX.
[33269]403 * @param ppv Where to return the allocation address on success.
404 */
[78334]405DECLINLINE(int) rtHeapPageAllocFromBlock(PRTHEAPPAGEBLOCK pBlock, size_t cPages, uint32_t fFlags, void **ppv)
[33269]406{
407 if (pBlock->cFreePages >= cPages)
408 {
[101151]409 int iPage = ASMBitFirstClear(&pBlock->bmAlloc[0], RTMEMPAGE_BLOCK_PAGE_COUNT);
[33269]410 Assert(iPage >= 0);
411
412 /* special case: single page. */
413 if (cPages == 1)
414 {
415 ASMBitSet(&pBlock->bmAlloc[0], iPage);
[78334]416 return rtHeapPageAllocFromBlockSuccess(pBlock, iPage, cPages, fFlags, ppv);
[33269]417 }
418
419 while ( iPage >= 0
[101151]420 && (unsigned)iPage <= RTMEMPAGE_BLOCK_PAGE_COUNT - cPages)
[33269]421 {
[101162]422 if (rtHeapPageIsPageRangeFree(pBlock, iPage + 1, (uint32_t)cPages - 1))
[33269]423 {
424 ASMBitSetRange(&pBlock->bmAlloc[0], iPage, iPage + cPages);
[78334]425 return rtHeapPageAllocFromBlockSuccess(pBlock, iPage, cPages, fFlags, ppv);
[33269]426 }
427
428 /* next */
[101151]429 iPage = ASMBitNextSet(&pBlock->bmAlloc[0], RTMEMPAGE_BLOCK_PAGE_COUNT, iPage);
430 if (iPage < 0 || (unsigned)iPage >= RTMEMPAGE_BLOCK_PAGE_COUNT - 1)
[33269]431 break;
[101151]432 iPage = ASMBitNextClear(&pBlock->bmAlloc[0], RTMEMPAGE_BLOCK_PAGE_COUNT, iPage);
[33269]433 }
434 }
435
436 return VERR_NO_MEMORY;
437}
438
439
440/**
441 * RTAvlrPVDoWithAll callback.
442 *
443 * @returns 0 to continue the enum, non-zero to quit it.
444 * @param pNode The node.
445 * @param pvUser The user argument.
446 */
447static DECLCALLBACK(int) rtHeapPageAllocCallback(PAVLRPVNODECORE pNode, void *pvUser)
448{
449 PRTHEAPPAGEBLOCK pBlock = RT_FROM_MEMBER(pNode, RTHEAPPAGEBLOCK, Core);
450 RTHEAPPAGEALLOCARGS *pArgs = (RTHEAPPAGEALLOCARGS *)pvUser;
[78334]451 int rc = rtHeapPageAllocFromBlock(pBlock, pArgs->cPages, pArgs->fFlags, &pArgs->pvAlloc);
[33269]452 return RT_SUCCESS(rc) ? 1 : 0;
453}
454
455
456/**
457 * Worker for RTHeapPageAlloc.
458 *
459 * @returns IPRT status code
[78334]460 * @param pHeap The heap - locked.
461 * @param cPages The page count.
462 * @param pszTag The tag.
463 * @param fFlags RTMEMPAGEALLOC_F_XXX.
464 * @param ppv Where to return the address of the allocation
465 * on success.
[33269]466 */
[78334]467static int rtHeapPageAllocLocked(PRTHEAPPAGE pHeap, size_t cPages, const char *pszTag, uint32_t fFlags, void **ppv)
[33269]468{
469 int rc;
[39083]470 NOREF(pszTag);
[33269]471
472 /*
473 * Use the hints first.
474 */
475 if (pHeap->pHint1)
476 {
[78334]477 rc = rtHeapPageAllocFromBlock(pHeap->pHint1, cPages, fFlags, ppv);
[33269]478 if (rc != VERR_NO_MEMORY)
479 return rc;
480 }
481 if (pHeap->pHint2)
482 {
[78334]483 rc = rtHeapPageAllocFromBlock(pHeap->pHint2, cPages, fFlags, ppv);
[33269]484 if (rc != VERR_NO_MEMORY)
485 return rc;
486 }
487
488 /*
489 * Search the heap for a block with enough free space.
490 *
491 * N.B. This search algorithm is not optimal at all. What (hopefully) saves
492 * it are the two hints above.
493 */
494 if (pHeap->cFreePages >= cPages)
495 {
496 RTHEAPPAGEALLOCARGS Args;
497 Args.cPages = cPages;
498 Args.pvAlloc = NULL;
[78334]499 Args.fFlags = fFlags;
[33269]500 RTAvlrPVDoWithAll(&pHeap->BlockTree, true /*fFromLeft*/, rtHeapPageAllocCallback, &Args);
501 if (Args.pvAlloc)
502 {
503 *ppv = Args.pvAlloc;
504 return VINF_SUCCESS;
505 }
506 }
507
508 /*
[101148]509 * Didn't find anything, so expand the heap with a new block.
[33269]510 */
[101150]511 PRTHEAPPAGEBLOCK const pBlock = rtHeapPageIntBlockAllocatorAlloc(pHeap);
512 AssertReturn(pBlock, VERR_NO_MEMORY);
513
[33269]514 RTCritSectLeave(&pHeap->CritSect);
[101148]515
516 void *pvPages = NULL;
[101151]517 rc = rtMemPageNativeAlloc(RTMEMPAGE_BLOCK_SIZE, pHeap->fExec ? RTMEMPAGEALLOC_F_EXECUTABLE : 0, &pvPages);
[101150]518
519 RTCritSectEnter(&pHeap->CritSect);
[101148]520 if (RT_FAILURE(rc))
[33269]521 {
[101150]522 rtHeapPageIntBlockAllocatorFree(pHeap, pBlock);
[101148]523 return rc;
[33269]524 }
525
526 RT_ZERO(*pBlock);
527 pBlock->Core.Key = pvPages;
[101151]528 pBlock->Core.KeyLast = (uint8_t *)pvPages + RTMEMPAGE_BLOCK_SIZE - 1;
529 pBlock->cFreePages = RTMEMPAGE_BLOCK_PAGE_COUNT;
[33269]530 pBlock->pHeap = pHeap;
531
532 bool fRc = RTAvlrPVInsert(&pHeap->BlockTree, &pBlock->Core); Assert(fRc); NOREF(fRc);
[101151]533 pHeap->cFreePages += RTMEMPAGE_BLOCK_PAGE_COUNT;
534 pHeap->cHeapPages += RTMEMPAGE_BLOCK_PAGE_COUNT;
[33269]535
536 /*
537 * Grab memory from the new block (cannot fail).
538 */
[78334]539 rc = rtHeapPageAllocFromBlock(pBlock, cPages, fFlags, ppv);
[33269]540 Assert(rc == VINF_SUCCESS);
541
542 return rc;
543}
544
545
546/**
547 * Allocates one or more pages off the heap.
548 *
549 * @returns IPRT status code.
550 * @param pHeap The page heap.
551 * @param cPages The number of pages to allocate.
552 * @param pszTag The allocation tag.
[78334]553 * @param fFlags RTMEMPAGEALLOC_F_XXX.
[33269]554 * @param ppv Where to return the pointer to the pages.
555 */
[99775]556static int RTHeapPageAlloc(PRTHEAPPAGE pHeap, size_t cPages, const char *pszTag, uint32_t fFlags, void **ppv)
[33269]557{
558 /*
559 * Validate input.
560 */
561 AssertPtr(ppv);
562 *ppv = NULL;
563 AssertPtrReturn(pHeap, VERR_INVALID_HANDLE);
564 AssertReturn(pHeap->u32Magic == RTHEAPPAGE_MAGIC, VERR_INVALID_HANDLE);
[101151]565 AssertMsgReturn(cPages < RTMEMPAGE_BLOCK_SIZE, ("%#zx\n", cPages), VERR_OUT_OF_RANGE);
[33269]566
567 /*
568 * Grab the lock and call a worker with many returns.
569 */
570 int rc = RTCritSectEnter(&pHeap->CritSect);
571 if (RT_SUCCESS(rc))
572 {
[78334]573 rc = rtHeapPageAllocLocked(pHeap, cPages, pszTag, fFlags, ppv);
[33269]574 RTCritSectLeave(&pHeap->CritSect);
575 }
576
577 return rc;
578}
579
580
581/**
582 * RTAvlrPVDoWithAll callback.
583 *
584 * @returns 0 to continue the enum, non-zero to quit it.
585 * @param pNode The node.
586 * @param pvUser Pointer to a block pointer variable. For returning
587 * the address of the block to be freed.
588 */
589static DECLCALLBACK(int) rtHeapPageFindUnusedBlockCallback(PAVLRPVNODECORE pNode, void *pvUser)
590{
591 PRTHEAPPAGEBLOCK pBlock = RT_FROM_MEMBER(pNode, RTHEAPPAGEBLOCK, Core);
[101151]592 if (pBlock->cFreePages == RTMEMPAGE_BLOCK_PAGE_COUNT)
[33269]593 {
594 *(PRTHEAPPAGEBLOCK *)pvUser = pBlock;
595 return 1;
596 }
597 return 0;
598}
599
600
601/**
[101142]602 * Frees an allocation.
[33269]603 *
604 * @returns IPRT status code.
[101142]605 * @retval VERR_NOT_FOUND if pv isn't within any of the memory blocks in the
606 * heap.
607 * @retval VERR_INVALID_POINTER if the given memory range isn't exactly one
608 * allocation block.
[33269]609 * @param pHeap The page heap.
610 * @param pv Pointer to what RTHeapPageAlloc returned.
611 * @param cPages The number of pages that was allocated.
612 */
[99775]613static int RTHeapPageFree(PRTHEAPPAGE pHeap, void *pv, size_t cPages)
[33269]614{
615 /*
616 * Validate input.
617 */
618 if (!pv)
619 return VINF_SUCCESS;
620 AssertPtrReturn(pHeap, VERR_INVALID_HANDLE);
621 AssertReturn(pHeap->u32Magic == RTHEAPPAGE_MAGIC, VERR_INVALID_HANDLE);
622
623 /*
624 * Grab the lock and look up the page.
625 */
626 int rc = RTCritSectEnter(&pHeap->CritSect);
627 if (RT_SUCCESS(rc))
628 {
629 PRTHEAPPAGEBLOCK pBlock = (PRTHEAPPAGEBLOCK)RTAvlrPVRangeGet(&pHeap->BlockTree, pv);
630 if (pBlock)
631 {
632 /*
633 * Validate the specified address range.
634 */
635 uint32_t const iPage = (uint32_t)(((uintptr_t)pv - (uintptr_t)pBlock->Core.Key) >> PAGE_SHIFT);
636 /* Check the range is within the block. */
[101151]637 bool fOk = iPage + cPages <= RTMEMPAGE_BLOCK_PAGE_COUNT;
[33269]638 /* Check that it's the start of an allocation. */
639 fOk = fOk && ASMBitTest(&pBlock->bmFirst[0], iPage);
640 /* Check that the range ends at an allocation boundrary. */
[101151]641 fOk = fOk && ( iPage + cPages == RTMEMPAGE_BLOCK_PAGE_COUNT
[101162]642 || ASMBitTest(&pBlock->bmFirst[0], iPage + (uint32_t)cPages)
643 || !ASMBitTest(&pBlock->bmAlloc[0], iPage + (uint32_t)cPages));
[33269]644 /* Check the other pages. */
[101162]645 uint32_t const iLastPage = iPage + (uint32_t)cPages - 1;
[33269]646 for (uint32_t i = iPage + 1; i < iLastPage && fOk; i++)
647 fOk = ASMBitTest(&pBlock->bmAlloc[0], i)
648 && !ASMBitTest(&pBlock->bmFirst[0], i);
649 if (fOk)
650 {
651 /*
652 * Free the memory.
653 */
[101148]654 uint32_t fRevert = (ASMBitTest(&pBlock->bmLockedAdviced[0], iPage) ? RTMEMPAGEALLOC_F_ADVISE_LOCKED : 0)
655 | (ASMBitTest(&pBlock->bmNoDumpAdviced[0], iPage) ? RTMEMPAGEALLOC_F_ADVISE_NO_DUMP : 0);
656 if (fRevert)
657 {
658 rtMemPageNativeRevertFlags(pv, cPages << PAGE_SHIFT, fRevert);
659 ASMBitClearRange(&pBlock->bmLockedAdviced[0], iPage, iPage + cPages);
660 ASMBitClearRange(&pBlock->bmNoDumpAdviced[0], iPage, iPage + cPages);
661 }
[33269]662 ASMBitClearRange(&pBlock->bmAlloc[0], iPage, iPage + cPages);
663 ASMBitClear(&pBlock->bmFirst[0], iPage);
[101162]664 pBlock->cFreePages += (uint32_t)cPages;
665 pHeap->cFreePages += (uint32_t)cPages;
[33269]666 pHeap->cFreeCalls++;
667 if (!pHeap->pHint1 || pHeap->pHint1->cFreePages < pBlock->cFreePages)
668 pHeap->pHint1 = pBlock;
669
[78334]670 /** @todo Add bitmaps for tracking madvice and mlock so we can undo those. */
671
[33269]672 /*
673 * Shrink the heap. Not very efficient because of the AVL tree.
674 */
[101151]675 if ( pHeap->cFreePages >= RTMEMPAGE_BLOCK_PAGE_COUNT * 3
[33269]676 && pHeap->cFreePages >= pHeap->cHeapPages / 2 /* 50% free */
[101151]677 && pHeap->cFreeCalls - pHeap->uLastMinimizeCall > RTMEMPAGE_BLOCK_PAGE_COUNT
[33269]678 )
679 {
680 uint32_t cFreePageTarget = pHeap->cHeapPages / 4; /* 25% free */
681 while (pHeap->cFreePages > cFreePageTarget)
682 {
683 pHeap->uLastMinimizeCall = pHeap->cFreeCalls;
684
685 pBlock = NULL;
686 RTAvlrPVDoWithAll(&pHeap->BlockTree, false /*fFromLeft*/,
687 rtHeapPageFindUnusedBlockCallback, &pBlock);
688 if (!pBlock)
689 break;
690
691 void *pv2 = RTAvlrPVRemove(&pHeap->BlockTree, pBlock->Core.Key); Assert(pv2); NOREF(pv2);
[101151]692 pHeap->cHeapPages -= RTMEMPAGE_BLOCK_PAGE_COUNT;
693 pHeap->cFreePages -= RTMEMPAGE_BLOCK_PAGE_COUNT;
[33269]694 pHeap->pHint1 = NULL;
695 pHeap->pHint2 = NULL;
696 RTCritSectLeave(&pHeap->CritSect);
697
[101151]698 rtMemPageNativeFree(pBlock->Core.Key, RTMEMPAGE_BLOCK_SIZE);
[33269]699 pBlock->Core.Key = pBlock->Core.KeyLast = NULL;
700 pBlock->cFreePages = 0;
[101150]701 rtHeapPageIntBlockAllocatorFree(pHeap, pBlock);
[33269]702
703 RTCritSectEnter(&pHeap->CritSect);
704 }
705 }
706 }
707 else
708 rc = VERR_INVALID_POINTER;
709 }
710 else
[101151]711 rc = VERR_NOT_FOUND; /* Distinct return code for this so RTMemPageFree and others can try alternative heaps. */
[33269]712
713 RTCritSectLeave(&pHeap->CritSect);
714 }
715
716 return rc;
717}
718
719
720/**
721 * Initializes the heap.
722 *
723 * @returns IPRT status code
[43879]724 * @param pvUser Unused.
[33269]725 */
[101151]726static DECLCALLBACK(int) rtMemPageInitOnce(void *pvUser)
[33269]727{
[43879]728 NOREF(pvUser);
[101151]729 int rc = RTHeapPageInit(&g_MemPageHeap, false /*fExec*/);
[33269]730 if (RT_SUCCESS(rc))
731 {
[101151]732 rc = RTHeapPageInit(&g_MemExecHeap, true /*fExec*/);
[33269]733 if (RT_SUCCESS(rc))
734 return rc;
[101151]735 RTHeapPageDelete(&g_MemPageHeap);
[33269]736 }
737 return rc;
738}
739
740
741/**
742 * Allocates memory from the specified heap.
743 *
744 * @returns Address of the allocated memory.
745 * @param cb The number of bytes to allocate.
746 * @param pszTag The tag.
[78334]747 * @param fFlags RTMEMPAGEALLOC_F_XXX.
[33269]748 * @param pHeap The heap to use.
749 */
[101151]750static void *rtMemPageAllocInner(size_t cb, const char *pszTag, uint32_t fFlags, PRTHEAPPAGE pHeap)
[33269]751{
752 /*
753 * Validate & adjust the input.
754 */
755 Assert(cb > 0);
756 NOREF(pszTag);
757 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
758
759 /*
[101148]760 * If the allocation is relatively large, we use mmap/VirtualAlloc/DosAllocMem directly.
[33269]761 */
[99779]762 void *pv = NULL; /* shut up gcc */
[101151]763 if (cb >= RTMEMPAGE_NATIVE_THRESHOLD)
[33269]764 {
[101148]765 int rc = rtMemPageNativeAlloc(cb, fFlags, &pv);
766 if (RT_SUCCESS(rc))
[33269]767 {
768 AssertPtr(pv);
[78334]769
770 if (fFlags)
[101151]771 rtMemPageApplyFlags(pv, cb, fFlags);
[33269]772 }
773 else
774 pv = NULL;
775 }
776 else
777 {
[101151]778 int rc = RTOnce(&g_MemPageHeapInitOnce, rtMemPageInitOnce, NULL);
[33269]779 if (RT_SUCCESS(rc))
[78334]780 rc = RTHeapPageAlloc(pHeap, cb >> PAGE_SHIFT, pszTag, fFlags, &pv);
[33269]781 if (RT_FAILURE(rc))
782 pv = NULL;
783 }
784
785 return pv;
786}
787
788
[101151]789RTDECL(void *) RTMemPageAllocTag(size_t cb, const char *pszTag) RT_NO_THROW_DEF
[33269]790{
[101151]791 return rtMemPageAllocInner(cb, pszTag, 0, &g_MemPageHeap);
792}
793
794
795RTDECL(void *) RTMemPageAllocZTag(size_t cb, const char *pszTag) RT_NO_THROW_DEF
796{
797 return rtMemPageAllocInner(cb, pszTag, RTMEMPAGEALLOC_F_ZERO, &g_MemPageHeap);
798}
799
800
801RTDECL(void *) RTMemPageAllocExTag(size_t cb, uint32_t fFlags, const char *pszTag) RT_NO_THROW_DEF
802{
803 AssertReturn(!(fFlags & ~RTMEMPAGEALLOC_F_VALID_MASK), NULL);
804 return rtMemPageAllocInner(cb, pszTag, fFlags,
805 !(fFlags & RTMEMPAGEALLOC_F_EXECUTABLE) ? &g_MemPageHeap : &g_MemExecHeap);
806}
807
808
809RTDECL(void) RTMemPageFree(void *pv, size_t cb) RT_NO_THROW_DEF
810{
[33269]811 /*
812 * Validate & adjust the input.
813 */
814 if (!pv)
815 return;
816 AssertPtr(pv);
817 Assert(cb > 0);
818 Assert(!((uintptr_t)pv & PAGE_OFFSET_MASK));
819 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
820
821 /*
[101148]822 * If the allocation is relatively large, we used mmap/VirtualAlloc/DosAllocMem directly.
[33269]823 */
[101151]824 if (cb >= RTMEMPAGE_NATIVE_THRESHOLD)
[101148]825 rtMemPageNativeFree(pv, cb);
[33269]826 else
827 {
[101151]828 int rc = RTHeapPageFree(&g_MemPageHeap, pv, cb >> PAGE_SHIFT);
[101142]829 if (rc == VERR_NOT_FOUND)
[101151]830 rc = RTHeapPageFree(&g_MemExecHeap, pv, cb >> PAGE_SHIFT);
[33269]831 AssertRC(rc);
832 }
833}
834
Note: See TracBrowser for help on using the repository browser.

© 2023 Oracle
ContactPrivacy policyTerms of Use