VirtualBox

source: vbox/trunk/src/VBox/Runtime/r3/mempage-heap.cpp@ 101301

Last change on this file since 101301 was 101301, checked in by vboxsync, 8 months ago

IPRT: Nit. bugref:10371

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 28.4 KB
Line 
1/* $Id: mempage-heap.cpp 101301 2023-09-28 00:26:16Z vboxsync $ */
2/** @file
3 * IPRT - RTMemPage*, optimized using heap.
4 */
5
6/*
7 * Copyright (C) 2006-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * The contents of this file may alternatively be used under the terms
26 * of the Common Development and Distribution License Version 1.0
27 * (CDDL), a copy of it is provided in the "COPYING.CDDL" file included
28 * in the VirtualBox distribution, in which case the provisions of the
29 * CDDL are applicable instead of those of the GPL.
30 *
31 * You may elect to license modified versions of this file under the
32 * terms and conditions of either the GPL or the CDDL or both.
33 *
34 * SPDX-License-Identifier: GPL-3.0-only OR CDDL-1.0
35 */
36
37
38/*********************************************************************************************************************************
39* Header Files *
40*********************************************************************************************************************************/
41#include "internal/iprt.h"
42#include <iprt/mem.h>
43
44#include <iprt/asm.h>
45#include <iprt/assert.h>
46#include <iprt/avl.h>
47#include <iprt/critsect.h>
48#include <iprt/errcore.h>
49#include <iprt/list.h>
50#include <iprt/once.h>
51#include <iprt/param.h>
52#include <iprt/string.h>
53#include "internal/mem.h"
54
55
56
57/*********************************************************************************************************************************
58* Defined Constants And Macros *
59*********************************************************************************************************************************/
60/** Threshold at which to we switch to simply calling mmap. */
61#define RTMEMPAGE_NATIVE_THRESHOLD _1M
62/** The size of a heap block (power of two) - in bytes. */
63#define RTMEMPAGE_BLOCK_SIZE _4M
64
65/** The number of pages per heap block. */
66#define RTMEMPAGE_BLOCK_PAGE_COUNT (RTMEMPAGE_BLOCK_SIZE / PAGE_SIZE)
67AssertCompile(RTMEMPAGE_BLOCK_SIZE == RTMEMPAGE_BLOCK_PAGE_COUNT * PAGE_SIZE);
68
69
70/*********************************************************************************************************************************
71* Structures and Typedefs *
72*********************************************************************************************************************************/
73/** Pointer to a page heap block. */
74typedef struct RTHEAPPAGEBLOCK *PRTHEAPPAGEBLOCK;
75
76/**
77 * A simple page heap.
78 */
79typedef struct RTHEAPPAGE
80{
81 /** Magic number (RTHEAPPAGE_MAGIC). */
82 uint32_t u32Magic;
83 /** The number of pages in the heap (in BlockTree). */
84 uint32_t cHeapPages;
85 /** The number of currently free pages. */
86 uint32_t cFreePages;
87 /** Number of successful calls. */
88 uint32_t cAllocCalls;
89 /** Number of successful free calls. */
90 uint32_t cFreeCalls;
91 /** The free call number at which we last tried to minimize the heap. */
92 uint32_t uLastMinimizeCall;
93 /** Tree of heap blocks. */
94 AVLRPVTREE BlockTree;
95 /** Allocation hint no 1 (last freed). */
96 PRTHEAPPAGEBLOCK pHint1;
97 /** Allocation hint no 2 (last alloc). */
98 PRTHEAPPAGEBLOCK pHint2;
99 /** The allocation chunks for the RTHEAPPAGEBLOCK allocator
100 * (RTHEAPPAGEBLOCKALLOCCHUNK). */
101 RTLISTANCHOR BlockAllocatorChunks;
102 /** Critical section protecting the heap. */
103 RTCRITSECT CritSect;
104 /** Set if the memory must allocated with execute access. */
105 bool fExec;
106} RTHEAPPAGE;
107#define RTHEAPPAGE_MAGIC UINT32_C(0xfeedface)
108/** Pointer to a page heap. */
109typedef RTHEAPPAGE *PRTHEAPPAGE;
110
111
112/**
113 * Describes a page heap block.
114 */
115typedef struct RTHEAPPAGEBLOCK
116{
117 /** The AVL tree node core (void pointer range). */
118 AVLRPVNODECORE Core;
119 /** The number of free pages. */
120 uint32_t cFreePages;
121 /** Pointer back to the heap. */
122 PRTHEAPPAGE pHeap;
123 /** Allocation bitmap. Set bits marks allocated pages. */
124 uint32_t bmAlloc[RTMEMPAGE_BLOCK_PAGE_COUNT / 32];
125 /** Allocation boundrary bitmap. Set bits marks the start of
126 * allocations. */
127 uint32_t bmFirst[RTMEMPAGE_BLOCK_PAGE_COUNT / 32];
128 /** Bitmap tracking pages where RTMEMPAGEALLOC_F_ADVISE_LOCKED has been
129 * successfully applied. */
130 uint32_t bmLockedAdviced[RTMEMPAGE_BLOCK_PAGE_COUNT / 32];
131 /** Bitmap tracking pages where RTMEMPAGEALLOC_F_ADVISE_NO_DUMP has been
132 * successfully applied. */
133 uint32_t bmNoDumpAdviced[RTMEMPAGE_BLOCK_PAGE_COUNT / 32];
134} RTHEAPPAGEBLOCK;
135
136
137/**
138 * Allocation chunk of RTHEAPPAGEBLOCKALLOCCHUNK structures.
139 *
140 * This is backed by an 64KB allocation and non-present blocks will be marked as
141 * allocated in bmAlloc.
142 */
143typedef struct RTHEAPPAGEBLOCKALLOCCHUNK
144{
145 /** List entry. */
146 RTLISTNODE ListEntry;
147 /** Number of free RTHEAPPAGEBLOCK structures here. */
148 uint32_t cFree;
149 /** Number of blocks in aBlocks. */
150 uint32_t cBlocks;
151 /** Allocation bitmap. */
152 uint32_t bmAlloc[ARCH_BITS == 32 ? 28 : 26];
153 /** Block array. */
154 RT_FLEXIBLE_ARRAY_EXTENSION
155 RTHEAPPAGEBLOCK aBlocks[RT_FLEXIBLE_ARRAY];
156} RTHEAPPAGEBLOCKALLOCCHUNK;
157AssertCompileMemberAlignment(RTHEAPPAGEBLOCKALLOCCHUNK, bmAlloc, 8);
158AssertCompileMemberAlignment(RTHEAPPAGEBLOCKALLOCCHUNK, aBlocks, 64);
159/** Pointer to an allocation chunk of RTHEAPPAGEBLOCKALLOCCHUNK structures. */
160typedef RTHEAPPAGEBLOCKALLOCCHUNK *PRTHEAPPAGEBLOCKALLOCCHUNK;
161
162/** Max number of blocks one RTHEAPPAGEBLOCKALLOCCHUNK can track (896/832). */
163#define RTHEAPPAGEBLOCKALLOCCHUNK_MAX_BLOCKS ((ARCH_BITS == 32 ? 28 : 26) * 32)
164/** The chunk size for the block allocator. */
165#define RTHEAPPAGEBLOCKALLOCCHUNK_ALLOC_SIZE _64K
166
167
168/**
169 * Argument package for rtHeapPageAllocCallback.
170 */
171typedef struct RTHEAPPAGEALLOCARGS
172{
173 /** The number of pages to allocate. */
174 size_t cPages;
175 /** Non-null on success. */
176 void *pvAlloc;
177 /** RTMEMPAGEALLOC_F_XXX. */
178 uint32_t fFlags;
179} RTHEAPPAGEALLOCARGS;
180
181
182/*********************************************************************************************************************************
183* Global Variables *
184*********************************************************************************************************************************/
185/** Initialize once structure. */
186static RTONCE g_MemPageHeapInitOnce = RTONCE_INITIALIZER;
187/** The page heap. */
188static RTHEAPPAGE g_MemPageHeap;
189/** The exec page heap. */
190static RTHEAPPAGE g_MemExecHeap;
191
192
193/**
194 * Initializes the heap.
195 *
196 * @returns IPRT status code.
197 * @param pHeap The page heap to initialize.
198 * @param fExec Whether the heap memory should be marked as
199 * executable or not.
200 */
201static int RTHeapPageInit(PRTHEAPPAGE pHeap, bool fExec)
202{
203 int rc = RTCritSectInitEx(&pHeap->CritSect,
204 RTCRITSECT_FLAGS_NO_LOCK_VAL | RTCRITSECT_FLAGS_NO_NESTING | RTCRITSECT_FLAGS_BOOTSTRAP_HACK,
205 NIL_RTLOCKVALCLASS, RTLOCKVAL_SUB_CLASS_NONE, NULL);
206 if (RT_SUCCESS(rc))
207 {
208 pHeap->cHeapPages = 0;
209 pHeap->cFreePages = 0;
210 pHeap->cAllocCalls = 0;
211 pHeap->cFreeCalls = 0;
212 pHeap->uLastMinimizeCall = 0;
213 pHeap->BlockTree = NULL;
214 pHeap->fExec = fExec;
215 RTListInit(&pHeap->BlockAllocatorChunks);
216 pHeap->u32Magic = RTHEAPPAGE_MAGIC;
217 }
218 return rc;
219}
220
221
222/**
223 * Deletes the heap and all the memory it tracks.
224 *
225 * @returns IPRT status code.
226 * @param pHeap The page heap to delete.
227 */
228static int RTHeapPageDelete(PRTHEAPPAGE pHeap)
229{
230 NOREF(pHeap);
231 pHeap->u32Magic = ~RTHEAPPAGE_MAGIC;
232 return VINF_SUCCESS;
233}
234
235
236/**
237 * Allocates a RTHEAPPAGEBLOCK.
238 *
239 * @returns Pointer to RTHEAPPAGEBLOCK on success, NULL on failure.
240 * @param pHeap The heap this is for.
241 */
242static PRTHEAPPAGEBLOCK rtHeapPageIntBlockAllocatorAlloc(PRTHEAPPAGE pHeap)
243{
244 /*
245 * Locate a chunk with space and grab a block from it.
246 */
247 PRTHEAPPAGEBLOCKALLOCCHUNK pChunk;
248 RTListForEach(&pHeap->BlockAllocatorChunks, pChunk, RTHEAPPAGEBLOCKALLOCCHUNK, ListEntry)
249 {
250 if (pChunk->cFree > 0)
251 {
252 uint32_t const cBits = RT_ALIGN_32(pChunk->cBlocks, 64);
253 int idxBlock = ASMBitFirstClear(&pChunk->bmAlloc[0], RT_MIN(RTHEAPPAGEBLOCKALLOCCHUNK_MAX_BLOCKS, cBits));
254 if (idxBlock >= 0)
255 {
256 ASMBitSet(&pChunk->bmAlloc[0], idxBlock);
257 pChunk->cFree -= 1;
258 return &pChunk->aBlocks[idxBlock];
259 }
260 AssertFailed();
261 }
262 }
263
264 /*
265 * Allocate a new chunk and return the first block in it.
266 */
267 int rc = rtMemPageNativeAlloc(RTHEAPPAGEBLOCKALLOCCHUNK_ALLOC_SIZE, 0, (void **)&pChunk);
268 AssertRCReturn(rc, NULL);
269 pChunk->cBlocks = (RTHEAPPAGEBLOCKALLOCCHUNK_ALLOC_SIZE - RT_UOFFSETOF(RTHEAPPAGEBLOCKALLOCCHUNK, aBlocks))
270 / sizeof(pChunk->aBlocks[0]);
271 AssertStmt(pChunk->cBlocks < RTHEAPPAGEBLOCKALLOCCHUNK_MAX_BLOCKS, pChunk->cBlocks = RTHEAPPAGEBLOCKALLOCCHUNK_MAX_BLOCKS);
272 pChunk->cFree = pChunk->cBlocks;
273
274 RT_ZERO(pChunk->bmAlloc);
275 ASMBitSetRange(pChunk->bmAlloc, pChunk->cBlocks, RTHEAPPAGEBLOCKALLOCCHUNK_MAX_BLOCKS);
276 RTListPrepend(&pHeap->BlockAllocatorChunks, &pChunk->ListEntry);
277
278 /*
279 * Allocate the first one.
280 */
281 ASMBitSet(pChunk->bmAlloc, 0);
282 pChunk->cFree -= 1;
283
284 return &pChunk->aBlocks[0];
285}
286
287
288/**
289 * Frees a RTHEAPPAGEBLOCK.
290 *
291 * @param pHeap The heap this is for.
292 * @param pBlock The block to free.
293 */
294static void rtHeapPageIntBlockAllocatorFree(PRTHEAPPAGE pHeap, PRTHEAPPAGEBLOCK pBlock)
295{
296 /*
297 * Locate the chunk the block belongs to and mark it as freed.
298 */
299 PRTHEAPPAGEBLOCKALLOCCHUNK pChunk;
300 RTListForEach(&pHeap->BlockAllocatorChunks, pChunk, RTHEAPPAGEBLOCKALLOCCHUNK, ListEntry)
301 {
302 if ((uintptr_t)pBlock - (uintptr_t)pChunk < RTHEAPPAGEBLOCKALLOCCHUNK_ALLOC_SIZE)
303 {
304 uintptr_t const idxBlock = (uintptr_t)(pBlock - &pChunk->aBlocks[0]);
305 if (ASMBitTestAndClear(&pChunk->bmAlloc[0], idxBlock))
306 pChunk->cFree++;
307 else
308 AssertMsgFailed(("pBlock=%p idxBlock=%#zx\n", pBlock, idxBlock));
309 return;
310 }
311 }
312 AssertFailed();
313}
314
315
316/**
317 * Applies flags to an allocation.
318 *
319 * @return Flags that eeds to be reverted upon free.
320 * @param pv The allocation.
321 * @param cb The size of the allocation (page aligned).
322 * @param fFlags RTMEMPAGEALLOC_F_XXX.
323 */
324DECLINLINE(uint32_t) rtMemPageApplyFlags(void *pv, size_t cb, uint32_t fFlags)
325{
326 uint32_t fHandled = 0;
327 if (fFlags & (RTMEMPAGEALLOC_F_ADVISE_LOCKED | RTMEMPAGEALLOC_F_ADVISE_NO_DUMP))
328 fHandled = rtMemPageNativeApplyFlags(pv, cb, fFlags);
329 if (fFlags & RTMEMPAGEALLOC_F_ZERO)
330 RT_BZERO(pv, cb);
331 return fHandled;
332}
333
334
335/**
336 * Avoids some gotos in rtHeapPageAllocFromBlock.
337 *
338 * @returns VINF_SUCCESS.
339 * @param pBlock The block.
340 * @param iPage The page to start allocating at.
341 * @param cPages The number of pages.
342 * @param fFlags RTMEMPAGEALLOC_F_XXX.
343 * @param ppv Where to return the allocation address.
344 */
345DECLINLINE(int) rtHeapPageAllocFromBlockSuccess(PRTHEAPPAGEBLOCK pBlock, uint32_t iPage, size_t cPages, uint32_t fFlags, void **ppv)
346{
347 PRTHEAPPAGE pHeap = pBlock->pHeap;
348
349 ASMBitSet(&pBlock->bmFirst[0], iPage);
350 pBlock->cFreePages -= (uint32_t)cPages;
351 pHeap->cFreePages -= (uint32_t)cPages;
352 if (!pHeap->pHint2 || pHeap->pHint2->cFreePages < pBlock->cFreePages)
353 pHeap->pHint2 = pBlock;
354 pHeap->cAllocCalls++;
355
356 void *pv = (uint8_t *)pBlock->Core.Key + (iPage << PAGE_SHIFT);
357 *ppv = pv;
358
359 if (fFlags)
360 {
361 uint32_t fHandled = rtMemPageApplyFlags(pv, cPages << PAGE_SHIFT, fFlags);
362 Assert(!(fHandled & ~(RTMEMPAGEALLOC_F_ADVISE_LOCKED | RTMEMPAGEALLOC_F_ADVISE_NO_DUMP)));
363 if (fHandled & RTMEMPAGEALLOC_F_ADVISE_LOCKED)
364 ASMBitSetRange(&pBlock->bmLockedAdviced[0], iPage, iPage + cPages);
365 if (fHandled & RTMEMPAGEALLOC_F_ADVISE_NO_DUMP)
366 ASMBitSetRange(&pBlock->bmNoDumpAdviced[0], iPage, iPage + cPages);
367 }
368
369 return VINF_SUCCESS;
370}
371
372
373/**
374 * Checks if a page range is free in the specified block.
375 *
376 * @returns @c true if the range is free, @c false if not.
377 * @param pBlock The block.
378 * @param iFirst The first page to check.
379 * @param cPages The number of pages to check.
380 */
381DECLINLINE(bool) rtHeapPageIsPageRangeFree(PRTHEAPPAGEBLOCK pBlock, uint32_t iFirst, uint32_t cPages)
382{
383 uint32_t i = iFirst + cPages;
384 while (i-- > iFirst)
385 {
386 if (ASMBitTest(&pBlock->bmAlloc[0], i))
387 return false;
388 Assert(!ASMBitTest(&pBlock->bmFirst[0], i));
389 }
390 return true;
391}
392
393
394/**
395 * Tries to allocate a chunk of pages from a heap block.
396 *
397 * @retval VINF_SUCCESS on success.
398 * @retval VERR_NO_MEMORY if the allocation failed.
399 * @param pBlock The block to allocate from.
400 * @param cPages The size of the allocation.
401 * @param fFlags RTMEMPAGEALLOC_F_XXX.
402 * @param ppv Where to return the allocation address on success.
403 */
404DECLINLINE(int) rtHeapPageAllocFromBlock(PRTHEAPPAGEBLOCK pBlock, size_t cPages, uint32_t fFlags, void **ppv)
405{
406 if (pBlock->cFreePages >= cPages)
407 {
408 int iPage = ASMBitFirstClear(&pBlock->bmAlloc[0], RTMEMPAGE_BLOCK_PAGE_COUNT);
409 Assert(iPage >= 0);
410
411 /* special case: single page. */
412 if (cPages == 1)
413 {
414 ASMBitSet(&pBlock->bmAlloc[0], iPage);
415 return rtHeapPageAllocFromBlockSuccess(pBlock, iPage, cPages, fFlags, ppv);
416 }
417
418 while ( iPage >= 0
419 && (unsigned)iPage <= RTMEMPAGE_BLOCK_PAGE_COUNT - cPages)
420 {
421 if (rtHeapPageIsPageRangeFree(pBlock, iPage + 1, (uint32_t)cPages - 1))
422 {
423 ASMBitSetRange(&pBlock->bmAlloc[0], iPage, iPage + cPages);
424 return rtHeapPageAllocFromBlockSuccess(pBlock, iPage, cPages, fFlags, ppv);
425 }
426
427 /* next */
428 iPage = ASMBitNextSet(&pBlock->bmAlloc[0], RTMEMPAGE_BLOCK_PAGE_COUNT, iPage);
429 if (iPage < 0 || (unsigned)iPage >= RTMEMPAGE_BLOCK_PAGE_COUNT - 1)
430 break;
431 iPage = ASMBitNextClear(&pBlock->bmAlloc[0], RTMEMPAGE_BLOCK_PAGE_COUNT, iPage);
432 }
433 }
434
435 return VERR_NO_MEMORY;
436}
437
438
439/**
440 * RTAvlrPVDoWithAll callback.
441 *
442 * @returns 0 to continue the enum, non-zero to quit it.
443 * @param pNode The node.
444 * @param pvUser The user argument.
445 */
446static DECLCALLBACK(int) rtHeapPageAllocCallback(PAVLRPVNODECORE pNode, void *pvUser)
447{
448 PRTHEAPPAGEBLOCK pBlock = RT_FROM_MEMBER(pNode, RTHEAPPAGEBLOCK, Core);
449 RTHEAPPAGEALLOCARGS *pArgs = (RTHEAPPAGEALLOCARGS *)pvUser;
450 int rc = rtHeapPageAllocFromBlock(pBlock, pArgs->cPages, pArgs->fFlags, &pArgs->pvAlloc);
451 return RT_SUCCESS(rc) ? 1 : 0;
452}
453
454
455/**
456 * Worker for RTHeapPageAlloc.
457 *
458 * @returns IPRT status code
459 * @param pHeap The heap - locked.
460 * @param cPages The page count.
461 * @param pszTag The tag.
462 * @param fFlags RTMEMPAGEALLOC_F_XXX.
463 * @param ppv Where to return the address of the allocation
464 * on success.
465 */
466static int rtHeapPageAllocLocked(PRTHEAPPAGE pHeap, size_t cPages, const char *pszTag, uint32_t fFlags, void **ppv)
467{
468 int rc;
469 NOREF(pszTag);
470
471 /*
472 * Use the hints first.
473 */
474 if (pHeap->pHint1)
475 {
476 rc = rtHeapPageAllocFromBlock(pHeap->pHint1, cPages, fFlags, ppv);
477 if (rc != VERR_NO_MEMORY)
478 return rc;
479 }
480 if (pHeap->pHint2)
481 {
482 rc = rtHeapPageAllocFromBlock(pHeap->pHint2, cPages, fFlags, ppv);
483 if (rc != VERR_NO_MEMORY)
484 return rc;
485 }
486
487 /*
488 * Search the heap for a block with enough free space.
489 *
490 * N.B. This search algorithm is not optimal at all. What (hopefully) saves
491 * it are the two hints above.
492 */
493 if (pHeap->cFreePages >= cPages)
494 {
495 RTHEAPPAGEALLOCARGS Args;
496 Args.cPages = cPages;
497 Args.pvAlloc = NULL;
498 Args.fFlags = fFlags;
499 RTAvlrPVDoWithAll(&pHeap->BlockTree, true /*fFromLeft*/, rtHeapPageAllocCallback, &Args);
500 if (Args.pvAlloc)
501 {
502 *ppv = Args.pvAlloc;
503 return VINF_SUCCESS;
504 }
505 }
506
507 /*
508 * Didn't find anything, so expand the heap with a new block.
509 */
510 PRTHEAPPAGEBLOCK const pBlock = rtHeapPageIntBlockAllocatorAlloc(pHeap);
511 AssertReturn(pBlock, VERR_NO_MEMORY);
512
513 RTCritSectLeave(&pHeap->CritSect);
514
515 void *pvPages = NULL;
516 rc = rtMemPageNativeAlloc(RTMEMPAGE_BLOCK_SIZE, pHeap->fExec ? RTMEMPAGEALLOC_F_EXECUTABLE : 0, &pvPages);
517
518 RTCritSectEnter(&pHeap->CritSect);
519 if (RT_FAILURE(rc))
520 {
521 rtHeapPageIntBlockAllocatorFree(pHeap, pBlock);
522 return rc;
523 }
524
525 RT_ZERO(*pBlock);
526 pBlock->Core.Key = pvPages;
527 pBlock->Core.KeyLast = (uint8_t *)pvPages + RTMEMPAGE_BLOCK_SIZE - 1;
528 pBlock->cFreePages = RTMEMPAGE_BLOCK_PAGE_COUNT;
529 pBlock->pHeap = pHeap;
530
531 bool fRc = RTAvlrPVInsert(&pHeap->BlockTree, &pBlock->Core); Assert(fRc); NOREF(fRc);
532 pHeap->cFreePages += RTMEMPAGE_BLOCK_PAGE_COUNT;
533 pHeap->cHeapPages += RTMEMPAGE_BLOCK_PAGE_COUNT;
534
535 /*
536 * Grab memory from the new block (cannot fail).
537 */
538 rc = rtHeapPageAllocFromBlock(pBlock, cPages, fFlags, ppv);
539 Assert(rc == VINF_SUCCESS);
540
541 return rc;
542}
543
544
545/**
546 * Allocates one or more pages off the heap.
547 *
548 * @returns IPRT status code.
549 * @param pHeap The page heap.
550 * @param cPages The number of pages to allocate.
551 * @param pszTag The allocation tag.
552 * @param fFlags RTMEMPAGEALLOC_F_XXX.
553 * @param ppv Where to return the pointer to the pages.
554 */
555static int RTHeapPageAlloc(PRTHEAPPAGE pHeap, size_t cPages, const char *pszTag, uint32_t fFlags, void **ppv)
556{
557 /*
558 * Validate input.
559 */
560 AssertPtr(ppv);
561 *ppv = NULL;
562 AssertPtrReturn(pHeap, VERR_INVALID_HANDLE);
563 AssertReturn(pHeap->u32Magic == RTHEAPPAGE_MAGIC, VERR_INVALID_HANDLE);
564 AssertMsgReturn(cPages < RTMEMPAGE_BLOCK_SIZE, ("%#zx\n", cPages), VERR_OUT_OF_RANGE);
565
566 /*
567 * Grab the lock and call a worker with many returns.
568 */
569 int rc = RTCritSectEnter(&pHeap->CritSect);
570 if (RT_SUCCESS(rc))
571 {
572 rc = rtHeapPageAllocLocked(pHeap, cPages, pszTag, fFlags, ppv);
573 RTCritSectLeave(&pHeap->CritSect);
574 }
575
576 return rc;
577}
578
579
580/**
581 * RTAvlrPVDoWithAll callback.
582 *
583 * @returns 0 to continue the enum, non-zero to quit it.
584 * @param pNode The node.
585 * @param pvUser Pointer to a block pointer variable. For returning
586 * the address of the block to be freed.
587 */
588static DECLCALLBACK(int) rtHeapPageFindUnusedBlockCallback(PAVLRPVNODECORE pNode, void *pvUser)
589{
590 PRTHEAPPAGEBLOCK pBlock = RT_FROM_MEMBER(pNode, RTHEAPPAGEBLOCK, Core);
591 if (pBlock->cFreePages == RTMEMPAGE_BLOCK_PAGE_COUNT)
592 {
593 *(PRTHEAPPAGEBLOCK *)pvUser = pBlock;
594 return 1;
595 }
596 return 0;
597}
598
599
600/**
601 * Frees an allocation.
602 *
603 * @returns IPRT status code.
604 * @retval VERR_NOT_FOUND if pv isn't within any of the memory blocks in the
605 * heap.
606 * @retval VERR_INVALID_POINTER if the given memory range isn't exactly one
607 * allocation block.
608 * @param pHeap The page heap.
609 * @param pv Pointer to what RTHeapPageAlloc returned.
610 * @param cPages The number of pages that was allocated.
611 */
612static int RTHeapPageFree(PRTHEAPPAGE pHeap, void *pv, size_t cPages)
613{
614 /*
615 * Validate input.
616 */
617 if (!pv)
618 return VINF_SUCCESS;
619 AssertPtrReturn(pHeap, VERR_INVALID_HANDLE);
620 AssertReturn(pHeap->u32Magic == RTHEAPPAGE_MAGIC, VERR_INVALID_HANDLE);
621
622 /*
623 * Grab the lock and look up the page.
624 */
625 int rc = RTCritSectEnter(&pHeap->CritSect);
626 if (RT_SUCCESS(rc))
627 {
628 PRTHEAPPAGEBLOCK pBlock = (PRTHEAPPAGEBLOCK)RTAvlrPVRangeGet(&pHeap->BlockTree, pv);
629 if (pBlock)
630 {
631 /*
632 * Validate the specified address range.
633 */
634 uint32_t const iPage = (uint32_t)(((uintptr_t)pv - (uintptr_t)pBlock->Core.Key) >> PAGE_SHIFT);
635 /* Check the range is within the block. */
636 bool fOk = iPage + cPages <= RTMEMPAGE_BLOCK_PAGE_COUNT;
637 /* Check that it's the start of an allocation. */
638 fOk = fOk && ASMBitTest(&pBlock->bmFirst[0], iPage);
639 /* Check that the range ends at an allocation boundrary. */
640 fOk = fOk && ( iPage + cPages == RTMEMPAGE_BLOCK_PAGE_COUNT
641 || ASMBitTest(&pBlock->bmFirst[0], iPage + (uint32_t)cPages)
642 || !ASMBitTest(&pBlock->bmAlloc[0], iPage + (uint32_t)cPages));
643 /* Check the other pages. */
644 uint32_t const iLastPage = iPage + (uint32_t)cPages - 1;
645 for (uint32_t i = iPage + 1; i < iLastPage && fOk; i++)
646 fOk = ASMBitTest(&pBlock->bmAlloc[0], i)
647 && !ASMBitTest(&pBlock->bmFirst[0], i);
648 if (fOk)
649 {
650 /*
651 * Free the memory.
652 */
653 uint32_t fRevert = (ASMBitTest(&pBlock->bmLockedAdviced[0], iPage) ? RTMEMPAGEALLOC_F_ADVISE_LOCKED : 0)
654 | (ASMBitTest(&pBlock->bmNoDumpAdviced[0], iPage) ? RTMEMPAGEALLOC_F_ADVISE_NO_DUMP : 0);
655 if (fRevert)
656 {
657 rtMemPageNativeRevertFlags(pv, cPages << PAGE_SHIFT, fRevert);
658 ASMBitClearRange(&pBlock->bmLockedAdviced[0], iPage, iPage + cPages);
659 ASMBitClearRange(&pBlock->bmNoDumpAdviced[0], iPage, iPage + cPages);
660 }
661 ASMBitClearRange(&pBlock->bmAlloc[0], iPage, iPage + cPages);
662 ASMBitClear(&pBlock->bmFirst[0], iPage);
663 pBlock->cFreePages += (uint32_t)cPages;
664 pHeap->cFreePages += (uint32_t)cPages;
665 pHeap->cFreeCalls++;
666 if (!pHeap->pHint1 || pHeap->pHint1->cFreePages < pBlock->cFreePages)
667 pHeap->pHint1 = pBlock;
668
669 /** @todo Add bitmaps for tracking madvice and mlock so we can undo those. */
670
671 /*
672 * Shrink the heap. Not very efficient because of the AVL tree.
673 */
674 if ( pHeap->cFreePages >= RTMEMPAGE_BLOCK_PAGE_COUNT * 3
675 && pHeap->cFreePages >= pHeap->cHeapPages / 2 /* 50% free */
676 && pHeap->cFreeCalls - pHeap->uLastMinimizeCall > RTMEMPAGE_BLOCK_PAGE_COUNT
677 )
678 {
679 uint32_t cFreePageTarget = pHeap->cHeapPages / 4; /* 25% free */
680 while (pHeap->cFreePages > cFreePageTarget)
681 {
682 pHeap->uLastMinimizeCall = pHeap->cFreeCalls;
683
684 pBlock = NULL;
685 RTAvlrPVDoWithAll(&pHeap->BlockTree, false /*fFromLeft*/,
686 rtHeapPageFindUnusedBlockCallback, &pBlock);
687 if (!pBlock)
688 break;
689
690 void *pv2 = RTAvlrPVRemove(&pHeap->BlockTree, pBlock->Core.Key); Assert(pv2); NOREF(pv2);
691 pHeap->cHeapPages -= RTMEMPAGE_BLOCK_PAGE_COUNT;
692 pHeap->cFreePages -= RTMEMPAGE_BLOCK_PAGE_COUNT;
693 pHeap->pHint1 = NULL;
694 pHeap->pHint2 = NULL;
695 RTCritSectLeave(&pHeap->CritSect);
696
697 rtMemPageNativeFree(pBlock->Core.Key, RTMEMPAGE_BLOCK_SIZE);
698 pBlock->Core.Key = pBlock->Core.KeyLast = NULL;
699 pBlock->cFreePages = 0;
700 rtHeapPageIntBlockAllocatorFree(pHeap, pBlock);
701
702 RTCritSectEnter(&pHeap->CritSect);
703 }
704 }
705 }
706 else
707 rc = VERR_INVALID_POINTER;
708 }
709 else
710 rc = VERR_NOT_FOUND; /* Distinct return code for this so RTMemPageFree and others can try alternative heaps. */
711
712 RTCritSectLeave(&pHeap->CritSect);
713 }
714
715 return rc;
716}
717
718
719/**
720 * Initializes the heap.
721 *
722 * @returns IPRT status code
723 * @param pvUser Unused.
724 */
725static DECLCALLBACK(int) rtMemPageInitOnce(void *pvUser)
726{
727 NOREF(pvUser);
728 int rc = RTHeapPageInit(&g_MemPageHeap, false /*fExec*/);
729 if (RT_SUCCESS(rc))
730 {
731 rc = RTHeapPageInit(&g_MemExecHeap, true /*fExec*/);
732 if (RT_SUCCESS(rc))
733 return rc;
734 RTHeapPageDelete(&g_MemPageHeap);
735 }
736 return rc;
737}
738
739
740/**
741 * Allocates memory from the specified heap.
742 *
743 * @returns Address of the allocated memory.
744 * @param cb The number of bytes to allocate.
745 * @param pszTag The tag.
746 * @param fFlags RTMEMPAGEALLOC_F_XXX.
747 * @param pHeap The heap to use.
748 */
749static void *rtMemPageAllocInner(size_t cb, const char *pszTag, uint32_t fFlags, PRTHEAPPAGE pHeap)
750{
751 /*
752 * Validate & adjust the input.
753 */
754 Assert(cb > 0);
755 NOREF(pszTag);
756 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
757
758 /*
759 * If the allocation is relatively large, we use mmap/VirtualAlloc/DosAllocMem directly.
760 */
761 void *pv = NULL; /* shut up gcc */
762 if (cb >= RTMEMPAGE_NATIVE_THRESHOLD)
763 {
764 int rc = rtMemPageNativeAlloc(cb, fFlags, &pv);
765 if (RT_SUCCESS(rc))
766 {
767 AssertPtr(pv);
768
769 if (fFlags)
770 rtMemPageApplyFlags(pv, cb, fFlags);
771 }
772 else
773 pv = NULL;
774 }
775 else
776 {
777 int rc = RTOnce(&g_MemPageHeapInitOnce, rtMemPageInitOnce, NULL);
778 if (RT_SUCCESS(rc))
779 rc = RTHeapPageAlloc(pHeap, cb >> PAGE_SHIFT, pszTag, fFlags, &pv);
780 if (RT_FAILURE(rc))
781 pv = NULL;
782 }
783
784 return pv;
785}
786
787
788RTDECL(void *) RTMemPageAllocTag(size_t cb, const char *pszTag) RT_NO_THROW_DEF
789{
790 return rtMemPageAllocInner(cb, pszTag, 0, &g_MemPageHeap);
791}
792
793
794RTDECL(void *) RTMemPageAllocZTag(size_t cb, const char *pszTag) RT_NO_THROW_DEF
795{
796 return rtMemPageAllocInner(cb, pszTag, RTMEMPAGEALLOC_F_ZERO, &g_MemPageHeap);
797}
798
799
800RTDECL(void *) RTMemPageAllocExTag(size_t cb, uint32_t fFlags, const char *pszTag) RT_NO_THROW_DEF
801{
802 AssertReturn(!(fFlags & ~RTMEMPAGEALLOC_F_VALID_MASK), NULL);
803 return rtMemPageAllocInner(cb, pszTag, fFlags,
804 !(fFlags & RTMEMPAGEALLOC_F_EXECUTABLE) ? &g_MemPageHeap : &g_MemExecHeap);
805}
806
807
808RTDECL(void) RTMemPageFree(void *pv, size_t cb) RT_NO_THROW_DEF
809{
810 /*
811 * Validate & adjust the input.
812 */
813 if (!pv)
814 return;
815 AssertPtr(pv);
816 Assert(cb > 0);
817 Assert(!((uintptr_t)pv & PAGE_OFFSET_MASK));
818 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
819
820 /*
821 * If the allocation is relatively large, we used mmap/VirtualAlloc/DosAllocMem directly.
822 */
823 if (cb >= RTMEMPAGE_NATIVE_THRESHOLD)
824 rtMemPageNativeFree(pv, cb);
825 else
826 {
827 int rc = RTHeapPageFree(&g_MemPageHeap, pv, cb >> PAGE_SHIFT);
828 if (rc == VERR_NOT_FOUND)
829 rc = RTHeapPageFree(&g_MemExecHeap, pv, cb >> PAGE_SHIFT);
830 AssertRC(rc);
831 }
832}
833
Note: See TracBrowser for help on using the repository browser.

© 2023 Oracle
ContactPrivacy policyTerms of Use