VirtualBox

source: vbox/trunk/src/VBox/Runtime/r3/mempage-heap.cpp@ 103914

Last change on this file since 103914 was 103005, checked in by vboxsync, 11 months ago

iprt/asm.h,*: Split out the ASMMem* and related stuff into a separate header, asm-mem.h, so that we can get the RT_ASM_PAGE_SIZE stuff out of the way.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 28.4 KB
Line 
1/* $Id: mempage-heap.cpp 103005 2024-01-23 23:55:58Z vboxsync $ */
2/** @file
3 * IPRT - RTMemPage*, optimized using heap.
4 */
5
6/*
7 * Copyright (C) 2006-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * The contents of this file may alternatively be used under the terms
26 * of the Common Development and Distribution License Version 1.0
27 * (CDDL), a copy of it is provided in the "COPYING.CDDL" file included
28 * in the VirtualBox distribution, in which case the provisions of the
29 * CDDL are applicable instead of those of the GPL.
30 *
31 * You may elect to license modified versions of this file under the
32 * terms and conditions of either the GPL or the CDDL or both.
33 *
34 * SPDX-License-Identifier: GPL-3.0-only OR CDDL-1.0
35 */
36
37
38/*********************************************************************************************************************************
39* Header Files *
40*********************************************************************************************************************************/
41#include "internal/iprt.h"
42#include <iprt/mem.h>
43
44#include <iprt/asm-mem.h>
45#include <iprt/asm.h>
46#include <iprt/assert.h>
47#include <iprt/avl.h>
48#include <iprt/critsect.h>
49#include <iprt/errcore.h>
50#include <iprt/list.h>
51#include <iprt/once.h>
52#include <iprt/param.h>
53#include <iprt/string.h>
54#include "internal/mem.h"
55
56
57
58/*********************************************************************************************************************************
59* Defined Constants And Macros *
60*********************************************************************************************************************************/
61/** Threshold at which to we switch to simply calling mmap. */
62#define RTMEMPAGE_NATIVE_THRESHOLD _1M
63/** The size of a heap block (power of two) - in bytes. */
64#define RTMEMPAGE_BLOCK_SIZE _4M
65
66/** The number of pages per heap block. */
67#define RTMEMPAGE_BLOCK_PAGE_COUNT (RTMEMPAGE_BLOCK_SIZE / PAGE_SIZE)
68AssertCompile(RTMEMPAGE_BLOCK_SIZE == RTMEMPAGE_BLOCK_PAGE_COUNT * PAGE_SIZE);
69
70
71/*********************************************************************************************************************************
72* Structures and Typedefs *
73*********************************************************************************************************************************/
74/** Pointer to a page heap block. */
75typedef struct RTHEAPPAGEBLOCK *PRTHEAPPAGEBLOCK;
76
77/**
78 * A simple page heap.
79 */
80typedef struct RTHEAPPAGE
81{
82 /** Magic number (RTHEAPPAGE_MAGIC). */
83 uint32_t u32Magic;
84 /** The number of pages in the heap (in BlockTree). */
85 uint32_t cHeapPages;
86 /** The number of currently free pages. */
87 uint32_t cFreePages;
88 /** Number of successful calls. */
89 uint32_t cAllocCalls;
90 /** Number of successful free calls. */
91 uint32_t cFreeCalls;
92 /** The free call number at which we last tried to minimize the heap. */
93 uint32_t uLastMinimizeCall;
94 /** Tree of heap blocks. */
95 AVLRPVTREE BlockTree;
96 /** Allocation hint no 1 (last freed). */
97 PRTHEAPPAGEBLOCK pHint1;
98 /** Allocation hint no 2 (last alloc). */
99 PRTHEAPPAGEBLOCK pHint2;
100 /** The allocation chunks for the RTHEAPPAGEBLOCK allocator
101 * (RTHEAPPAGEBLOCKALLOCCHUNK). */
102 RTLISTANCHOR BlockAllocatorChunks;
103 /** Critical section protecting the heap. */
104 RTCRITSECT CritSect;
105 /** Set if the memory must allocated with execute access. */
106 bool fExec;
107} RTHEAPPAGE;
108#define RTHEAPPAGE_MAGIC UINT32_C(0xfeedface)
109/** Pointer to a page heap. */
110typedef RTHEAPPAGE *PRTHEAPPAGE;
111
112
113/**
114 * Describes a page heap block.
115 */
116typedef struct RTHEAPPAGEBLOCK
117{
118 /** The AVL tree node core (void pointer range). */
119 AVLRPVNODECORE Core;
120 /** The number of free pages. */
121 uint32_t cFreePages;
122 /** Pointer back to the heap. */
123 PRTHEAPPAGE pHeap;
124 /** Allocation bitmap. Set bits marks allocated pages. */
125 uint32_t bmAlloc[RTMEMPAGE_BLOCK_PAGE_COUNT / 32];
126 /** Allocation boundrary bitmap. Set bits marks the start of
127 * allocations. */
128 uint32_t bmFirst[RTMEMPAGE_BLOCK_PAGE_COUNT / 32];
129 /** Bitmap tracking pages where RTMEMPAGEALLOC_F_ADVISE_LOCKED has been
130 * successfully applied. */
131 uint32_t bmLockedAdviced[RTMEMPAGE_BLOCK_PAGE_COUNT / 32];
132 /** Bitmap tracking pages where RTMEMPAGEALLOC_F_ADVISE_NO_DUMP has been
133 * successfully applied. */
134 uint32_t bmNoDumpAdviced[RTMEMPAGE_BLOCK_PAGE_COUNT / 32];
135} RTHEAPPAGEBLOCK;
136
137
138/**
139 * Allocation chunk of RTHEAPPAGEBLOCKALLOCCHUNK structures.
140 *
141 * This is backed by an 64KB allocation and non-present blocks will be marked as
142 * allocated in bmAlloc.
143 */
144typedef struct RTHEAPPAGEBLOCKALLOCCHUNK
145{
146 /** List entry. */
147 RTLISTNODE ListEntry;
148 /** Number of free RTHEAPPAGEBLOCK structures here. */
149 uint32_t cFree;
150 /** Number of blocks in aBlocks. */
151 uint32_t cBlocks;
152 /** Allocation bitmap. */
153 uint32_t bmAlloc[ARCH_BITS == 32 ? 28 : 26];
154 /** Block array. */
155 RT_FLEXIBLE_ARRAY_EXTENSION
156 RTHEAPPAGEBLOCK aBlocks[RT_FLEXIBLE_ARRAY];
157} RTHEAPPAGEBLOCKALLOCCHUNK;
158AssertCompileMemberAlignment(RTHEAPPAGEBLOCKALLOCCHUNK, bmAlloc, 8);
159AssertCompileMemberAlignment(RTHEAPPAGEBLOCKALLOCCHUNK, aBlocks, 64);
160/** Pointer to an allocation chunk of RTHEAPPAGEBLOCKALLOCCHUNK structures. */
161typedef RTHEAPPAGEBLOCKALLOCCHUNK *PRTHEAPPAGEBLOCKALLOCCHUNK;
162
163/** Max number of blocks one RTHEAPPAGEBLOCKALLOCCHUNK can track (896/832). */
164#define RTHEAPPAGEBLOCKALLOCCHUNK_MAX_BLOCKS ((ARCH_BITS == 32 ? 28 : 26) * 32)
165/** The chunk size for the block allocator. */
166#define RTHEAPPAGEBLOCKALLOCCHUNK_ALLOC_SIZE _64K
167
168
169/**
170 * Argument package for rtHeapPageAllocCallback.
171 */
172typedef struct RTHEAPPAGEALLOCARGS
173{
174 /** The number of pages to allocate. */
175 size_t cPages;
176 /** Non-null on success. */
177 void *pvAlloc;
178 /** RTMEMPAGEALLOC_F_XXX. */
179 uint32_t fFlags;
180} RTHEAPPAGEALLOCARGS;
181
182
183/*********************************************************************************************************************************
184* Global Variables *
185*********************************************************************************************************************************/
186/** Initialize once structure. */
187static RTONCE g_MemPageHeapInitOnce = RTONCE_INITIALIZER;
188/** The page heap. */
189static RTHEAPPAGE g_MemPageHeap;
190/** The exec page heap. */
191static RTHEAPPAGE g_MemExecHeap;
192
193
194/**
195 * Initializes the heap.
196 *
197 * @returns IPRT status code.
198 * @param pHeap The page heap to initialize.
199 * @param fExec Whether the heap memory should be marked as
200 * executable or not.
201 */
202static int RTHeapPageInit(PRTHEAPPAGE pHeap, bool fExec)
203{
204 int rc = RTCritSectInitEx(&pHeap->CritSect,
205 RTCRITSECT_FLAGS_NO_LOCK_VAL | RTCRITSECT_FLAGS_NO_NESTING | RTCRITSECT_FLAGS_BOOTSTRAP_HACK,
206 NIL_RTLOCKVALCLASS, RTLOCKVAL_SUB_CLASS_NONE, NULL);
207 if (RT_SUCCESS(rc))
208 {
209 pHeap->cHeapPages = 0;
210 pHeap->cFreePages = 0;
211 pHeap->cAllocCalls = 0;
212 pHeap->cFreeCalls = 0;
213 pHeap->uLastMinimizeCall = 0;
214 pHeap->BlockTree = NULL;
215 pHeap->fExec = fExec;
216 RTListInit(&pHeap->BlockAllocatorChunks);
217 pHeap->u32Magic = RTHEAPPAGE_MAGIC;
218 }
219 return rc;
220}
221
222
223/**
224 * Deletes the heap and all the memory it tracks.
225 *
226 * @returns IPRT status code.
227 * @param pHeap The page heap to delete.
228 */
229static int RTHeapPageDelete(PRTHEAPPAGE pHeap)
230{
231 NOREF(pHeap);
232 pHeap->u32Magic = ~RTHEAPPAGE_MAGIC;
233 return VINF_SUCCESS;
234}
235
236
237/**
238 * Allocates a RTHEAPPAGEBLOCK.
239 *
240 * @returns Pointer to RTHEAPPAGEBLOCK on success, NULL on failure.
241 * @param pHeap The heap this is for.
242 */
243static PRTHEAPPAGEBLOCK rtHeapPageIntBlockAllocatorAlloc(PRTHEAPPAGE pHeap)
244{
245 /*
246 * Locate a chunk with space and grab a block from it.
247 */
248 PRTHEAPPAGEBLOCKALLOCCHUNK pChunk;
249 RTListForEach(&pHeap->BlockAllocatorChunks, pChunk, RTHEAPPAGEBLOCKALLOCCHUNK, ListEntry)
250 {
251 if (pChunk->cFree > 0)
252 {
253 uint32_t const cBits = RT_ALIGN_32(pChunk->cBlocks, 64);
254 int idxBlock = ASMBitFirstClear(&pChunk->bmAlloc[0], RT_MIN(RTHEAPPAGEBLOCKALLOCCHUNK_MAX_BLOCKS, cBits));
255 if (idxBlock >= 0)
256 {
257 ASMBitSet(&pChunk->bmAlloc[0], idxBlock);
258 pChunk->cFree -= 1;
259 return &pChunk->aBlocks[idxBlock];
260 }
261 AssertFailed();
262 }
263 }
264
265 /*
266 * Allocate a new chunk and return the first block in it.
267 */
268 int rc = rtMemPageNativeAlloc(RTHEAPPAGEBLOCKALLOCCHUNK_ALLOC_SIZE, 0, (void **)&pChunk);
269 AssertRCReturn(rc, NULL);
270 pChunk->cBlocks = (RTHEAPPAGEBLOCKALLOCCHUNK_ALLOC_SIZE - RT_UOFFSETOF(RTHEAPPAGEBLOCKALLOCCHUNK, aBlocks))
271 / sizeof(pChunk->aBlocks[0]);
272 AssertStmt(pChunk->cBlocks < RTHEAPPAGEBLOCKALLOCCHUNK_MAX_BLOCKS, pChunk->cBlocks = RTHEAPPAGEBLOCKALLOCCHUNK_MAX_BLOCKS);
273 pChunk->cFree = pChunk->cBlocks;
274
275 RT_ZERO(pChunk->bmAlloc);
276 ASMBitSetRange(pChunk->bmAlloc, pChunk->cBlocks, RTHEAPPAGEBLOCKALLOCCHUNK_MAX_BLOCKS);
277 RTListPrepend(&pHeap->BlockAllocatorChunks, &pChunk->ListEntry);
278
279 /*
280 * Allocate the first one.
281 */
282 ASMBitSet(pChunk->bmAlloc, 0);
283 pChunk->cFree -= 1;
284
285 return &pChunk->aBlocks[0];
286}
287
288
289/**
290 * Frees a RTHEAPPAGEBLOCK.
291 *
292 * @param pHeap The heap this is for.
293 * @param pBlock The block to free.
294 */
295static void rtHeapPageIntBlockAllocatorFree(PRTHEAPPAGE pHeap, PRTHEAPPAGEBLOCK pBlock)
296{
297 /*
298 * Locate the chunk the block belongs to and mark it as freed.
299 */
300 PRTHEAPPAGEBLOCKALLOCCHUNK pChunk;
301 RTListForEach(&pHeap->BlockAllocatorChunks, pChunk, RTHEAPPAGEBLOCKALLOCCHUNK, ListEntry)
302 {
303 if ((uintptr_t)pBlock - (uintptr_t)pChunk < RTHEAPPAGEBLOCKALLOCCHUNK_ALLOC_SIZE)
304 {
305 uintptr_t const idxBlock = (uintptr_t)(pBlock - &pChunk->aBlocks[0]);
306 if (ASMBitTestAndClear(&pChunk->bmAlloc[0], idxBlock))
307 pChunk->cFree++;
308 else
309 AssertMsgFailed(("pBlock=%p idxBlock=%#zx\n", pBlock, idxBlock));
310 return;
311 }
312 }
313 AssertFailed();
314}
315
316
317/**
318 * Applies flags to an allocation.
319 *
320 * @return Flags that eeds to be reverted upon free.
321 * @param pv The allocation.
322 * @param cb The size of the allocation (page aligned).
323 * @param fFlags RTMEMPAGEALLOC_F_XXX.
324 */
325DECLINLINE(uint32_t) rtMemPageApplyFlags(void *pv, size_t cb, uint32_t fFlags)
326{
327 uint32_t fHandled = 0;
328 if (fFlags & (RTMEMPAGEALLOC_F_ADVISE_LOCKED | RTMEMPAGEALLOC_F_ADVISE_NO_DUMP))
329 fHandled = rtMemPageNativeApplyFlags(pv, cb, fFlags);
330 if (fFlags & RTMEMPAGEALLOC_F_ZERO)
331 RT_BZERO(pv, cb);
332 return fHandled;
333}
334
335
336/**
337 * Avoids some gotos in rtHeapPageAllocFromBlock.
338 *
339 * @returns VINF_SUCCESS.
340 * @param pBlock The block.
341 * @param iPage The page to start allocating at.
342 * @param cPages The number of pages.
343 * @param fFlags RTMEMPAGEALLOC_F_XXX.
344 * @param ppv Where to return the allocation address.
345 */
346DECLINLINE(int) rtHeapPageAllocFromBlockSuccess(PRTHEAPPAGEBLOCK pBlock, uint32_t iPage, size_t cPages, uint32_t fFlags, void **ppv)
347{
348 PRTHEAPPAGE pHeap = pBlock->pHeap;
349
350 ASMBitSet(&pBlock->bmFirst[0], iPage);
351 pBlock->cFreePages -= (uint32_t)cPages;
352 pHeap->cFreePages -= (uint32_t)cPages;
353 if (!pHeap->pHint2 || pHeap->pHint2->cFreePages < pBlock->cFreePages)
354 pHeap->pHint2 = pBlock;
355 pHeap->cAllocCalls++;
356
357 void *pv = (uint8_t *)pBlock->Core.Key + (iPage << PAGE_SHIFT);
358 *ppv = pv;
359
360 if (fFlags)
361 {
362 uint32_t fHandled = rtMemPageApplyFlags(pv, cPages << PAGE_SHIFT, fFlags);
363 Assert(!(fHandled & ~(RTMEMPAGEALLOC_F_ADVISE_LOCKED | RTMEMPAGEALLOC_F_ADVISE_NO_DUMP)));
364 if (fHandled & RTMEMPAGEALLOC_F_ADVISE_LOCKED)
365 ASMBitSetRange(&pBlock->bmLockedAdviced[0], iPage, iPage + cPages);
366 if (fHandled & RTMEMPAGEALLOC_F_ADVISE_NO_DUMP)
367 ASMBitSetRange(&pBlock->bmNoDumpAdviced[0], iPage, iPage + cPages);
368 }
369
370 return VINF_SUCCESS;
371}
372
373
374/**
375 * Checks if a page range is free in the specified block.
376 *
377 * @returns @c true if the range is free, @c false if not.
378 * @param pBlock The block.
379 * @param iFirst The first page to check.
380 * @param cPages The number of pages to check.
381 */
382DECLINLINE(bool) rtHeapPageIsPageRangeFree(PRTHEAPPAGEBLOCK pBlock, uint32_t iFirst, uint32_t cPages)
383{
384 uint32_t i = iFirst + cPages;
385 while (i-- > iFirst)
386 {
387 if (ASMBitTest(&pBlock->bmAlloc[0], i))
388 return false;
389 Assert(!ASMBitTest(&pBlock->bmFirst[0], i));
390 }
391 return true;
392}
393
394
395/**
396 * Tries to allocate a chunk of pages from a heap block.
397 *
398 * @retval VINF_SUCCESS on success.
399 * @retval VERR_NO_MEMORY if the allocation failed.
400 * @param pBlock The block to allocate from.
401 * @param cPages The size of the allocation.
402 * @param fFlags RTMEMPAGEALLOC_F_XXX.
403 * @param ppv Where to return the allocation address on success.
404 */
405DECLINLINE(int) rtHeapPageAllocFromBlock(PRTHEAPPAGEBLOCK pBlock, size_t cPages, uint32_t fFlags, void **ppv)
406{
407 if (pBlock->cFreePages >= cPages)
408 {
409 int iPage = ASMBitFirstClear(&pBlock->bmAlloc[0], RTMEMPAGE_BLOCK_PAGE_COUNT);
410 Assert(iPage >= 0);
411
412 /* special case: single page. */
413 if (cPages == 1)
414 {
415 ASMBitSet(&pBlock->bmAlloc[0], iPage);
416 return rtHeapPageAllocFromBlockSuccess(pBlock, iPage, cPages, fFlags, ppv);
417 }
418
419 while ( iPage >= 0
420 && (unsigned)iPage <= RTMEMPAGE_BLOCK_PAGE_COUNT - cPages)
421 {
422 if (rtHeapPageIsPageRangeFree(pBlock, iPage + 1, (uint32_t)cPages - 1))
423 {
424 ASMBitSetRange(&pBlock->bmAlloc[0], iPage, iPage + cPages);
425 return rtHeapPageAllocFromBlockSuccess(pBlock, iPage, cPages, fFlags, ppv);
426 }
427
428 /* next */
429 iPage = ASMBitNextSet(&pBlock->bmAlloc[0], RTMEMPAGE_BLOCK_PAGE_COUNT, iPage);
430 if (iPage < 0 || (unsigned)iPage >= RTMEMPAGE_BLOCK_PAGE_COUNT - 1)
431 break;
432 iPage = ASMBitNextClear(&pBlock->bmAlloc[0], RTMEMPAGE_BLOCK_PAGE_COUNT, iPage);
433 }
434 }
435
436 return VERR_NO_MEMORY;
437}
438
439
440/**
441 * RTAvlrPVDoWithAll callback.
442 *
443 * @returns 0 to continue the enum, non-zero to quit it.
444 * @param pNode The node.
445 * @param pvUser The user argument.
446 */
447static DECLCALLBACK(int) rtHeapPageAllocCallback(PAVLRPVNODECORE pNode, void *pvUser)
448{
449 PRTHEAPPAGEBLOCK pBlock = RT_FROM_MEMBER(pNode, RTHEAPPAGEBLOCK, Core);
450 RTHEAPPAGEALLOCARGS *pArgs = (RTHEAPPAGEALLOCARGS *)pvUser;
451 int rc = rtHeapPageAllocFromBlock(pBlock, pArgs->cPages, pArgs->fFlags, &pArgs->pvAlloc);
452 return RT_SUCCESS(rc) ? 1 : 0;
453}
454
455
456/**
457 * Worker for RTHeapPageAlloc.
458 *
459 * @returns IPRT status code
460 * @param pHeap The heap - locked.
461 * @param cPages The page count.
462 * @param pszTag The tag.
463 * @param fFlags RTMEMPAGEALLOC_F_XXX.
464 * @param ppv Where to return the address of the allocation
465 * on success.
466 */
467static int rtHeapPageAllocLocked(PRTHEAPPAGE pHeap, size_t cPages, const char *pszTag, uint32_t fFlags, void **ppv)
468{
469 int rc;
470 NOREF(pszTag);
471
472 /*
473 * Use the hints first.
474 */
475 if (pHeap->pHint1)
476 {
477 rc = rtHeapPageAllocFromBlock(pHeap->pHint1, cPages, fFlags, ppv);
478 if (rc != VERR_NO_MEMORY)
479 return rc;
480 }
481 if (pHeap->pHint2)
482 {
483 rc = rtHeapPageAllocFromBlock(pHeap->pHint2, cPages, fFlags, ppv);
484 if (rc != VERR_NO_MEMORY)
485 return rc;
486 }
487
488 /*
489 * Search the heap for a block with enough free space.
490 *
491 * N.B. This search algorithm is not optimal at all. What (hopefully) saves
492 * it are the two hints above.
493 */
494 if (pHeap->cFreePages >= cPages)
495 {
496 RTHEAPPAGEALLOCARGS Args;
497 Args.cPages = cPages;
498 Args.pvAlloc = NULL;
499 Args.fFlags = fFlags;
500 RTAvlrPVDoWithAll(&pHeap->BlockTree, true /*fFromLeft*/, rtHeapPageAllocCallback, &Args);
501 if (Args.pvAlloc)
502 {
503 *ppv = Args.pvAlloc;
504 return VINF_SUCCESS;
505 }
506 }
507
508 /*
509 * Didn't find anything, so expand the heap with a new block.
510 */
511 PRTHEAPPAGEBLOCK const pBlock = rtHeapPageIntBlockAllocatorAlloc(pHeap);
512 AssertReturn(pBlock, VERR_NO_MEMORY);
513
514 RTCritSectLeave(&pHeap->CritSect);
515
516 void *pvPages = NULL;
517 rc = rtMemPageNativeAlloc(RTMEMPAGE_BLOCK_SIZE, pHeap->fExec ? RTMEMPAGEALLOC_F_EXECUTABLE : 0, &pvPages);
518
519 RTCritSectEnter(&pHeap->CritSect);
520 if (RT_FAILURE(rc))
521 {
522 rtHeapPageIntBlockAllocatorFree(pHeap, pBlock);
523 return rc;
524 }
525
526 RT_ZERO(*pBlock);
527 pBlock->Core.Key = pvPages;
528 pBlock->Core.KeyLast = (uint8_t *)pvPages + RTMEMPAGE_BLOCK_SIZE - 1;
529 pBlock->cFreePages = RTMEMPAGE_BLOCK_PAGE_COUNT;
530 pBlock->pHeap = pHeap;
531
532 bool fRc = RTAvlrPVInsert(&pHeap->BlockTree, &pBlock->Core); Assert(fRc); NOREF(fRc);
533 pHeap->cFreePages += RTMEMPAGE_BLOCK_PAGE_COUNT;
534 pHeap->cHeapPages += RTMEMPAGE_BLOCK_PAGE_COUNT;
535
536 /*
537 * Grab memory from the new block (cannot fail).
538 */
539 rc = rtHeapPageAllocFromBlock(pBlock, cPages, fFlags, ppv);
540 Assert(rc == VINF_SUCCESS);
541
542 return rc;
543}
544
545
546/**
547 * Allocates one or more pages off the heap.
548 *
549 * @returns IPRT status code.
550 * @param pHeap The page heap.
551 * @param cPages The number of pages to allocate.
552 * @param pszTag The allocation tag.
553 * @param fFlags RTMEMPAGEALLOC_F_XXX.
554 * @param ppv Where to return the pointer to the pages.
555 */
556static int RTHeapPageAlloc(PRTHEAPPAGE pHeap, size_t cPages, const char *pszTag, uint32_t fFlags, void **ppv)
557{
558 /*
559 * Validate input.
560 */
561 AssertPtr(ppv);
562 *ppv = NULL;
563 AssertPtrReturn(pHeap, VERR_INVALID_HANDLE);
564 AssertReturn(pHeap->u32Magic == RTHEAPPAGE_MAGIC, VERR_INVALID_HANDLE);
565 AssertMsgReturn(cPages < RTMEMPAGE_BLOCK_SIZE, ("%#zx\n", cPages), VERR_OUT_OF_RANGE);
566
567 /*
568 * Grab the lock and call a worker with many returns.
569 */
570 int rc = RTCritSectEnter(&pHeap->CritSect);
571 if (RT_SUCCESS(rc))
572 {
573 rc = rtHeapPageAllocLocked(pHeap, cPages, pszTag, fFlags, ppv);
574 RTCritSectLeave(&pHeap->CritSect);
575 }
576
577 return rc;
578}
579
580
581/**
582 * RTAvlrPVDoWithAll callback.
583 *
584 * @returns 0 to continue the enum, non-zero to quit it.
585 * @param pNode The node.
586 * @param pvUser Pointer to a block pointer variable. For returning
587 * the address of the block to be freed.
588 */
589static DECLCALLBACK(int) rtHeapPageFindUnusedBlockCallback(PAVLRPVNODECORE pNode, void *pvUser)
590{
591 PRTHEAPPAGEBLOCK pBlock = RT_FROM_MEMBER(pNode, RTHEAPPAGEBLOCK, Core);
592 if (pBlock->cFreePages == RTMEMPAGE_BLOCK_PAGE_COUNT)
593 {
594 *(PRTHEAPPAGEBLOCK *)pvUser = pBlock;
595 return 1;
596 }
597 return 0;
598}
599
600
601/**
602 * Frees an allocation.
603 *
604 * @returns IPRT status code.
605 * @retval VERR_NOT_FOUND if pv isn't within any of the memory blocks in the
606 * heap.
607 * @retval VERR_INVALID_POINTER if the given memory range isn't exactly one
608 * allocation block.
609 * @param pHeap The page heap.
610 * @param pv Pointer to what RTHeapPageAlloc returned.
611 * @param cPages The number of pages that was allocated.
612 */
613static int RTHeapPageFree(PRTHEAPPAGE pHeap, void *pv, size_t cPages)
614{
615 /*
616 * Validate input.
617 */
618 if (!pv)
619 return VINF_SUCCESS;
620 AssertPtrReturn(pHeap, VERR_INVALID_HANDLE);
621 AssertReturn(pHeap->u32Magic == RTHEAPPAGE_MAGIC, VERR_INVALID_HANDLE);
622
623 /*
624 * Grab the lock and look up the page.
625 */
626 int rc = RTCritSectEnter(&pHeap->CritSect);
627 if (RT_SUCCESS(rc))
628 {
629 PRTHEAPPAGEBLOCK pBlock = (PRTHEAPPAGEBLOCK)RTAvlrPVRangeGet(&pHeap->BlockTree, pv);
630 if (pBlock)
631 {
632 /*
633 * Validate the specified address range.
634 */
635 uint32_t const iPage = (uint32_t)(((uintptr_t)pv - (uintptr_t)pBlock->Core.Key) >> PAGE_SHIFT);
636 /* Check the range is within the block. */
637 bool fOk = iPage + cPages <= RTMEMPAGE_BLOCK_PAGE_COUNT;
638 /* Check that it's the start of an allocation. */
639 fOk = fOk && ASMBitTest(&pBlock->bmFirst[0], iPage);
640 /* Check that the range ends at an allocation boundrary. */
641 fOk = fOk && ( iPage + cPages == RTMEMPAGE_BLOCK_PAGE_COUNT
642 || ASMBitTest(&pBlock->bmFirst[0], iPage + (uint32_t)cPages)
643 || !ASMBitTest(&pBlock->bmAlloc[0], iPage + (uint32_t)cPages));
644 /* Check the other pages. */
645 uint32_t const iLastPage = iPage + (uint32_t)cPages - 1;
646 for (uint32_t i = iPage + 1; i < iLastPage && fOk; i++)
647 fOk = ASMBitTest(&pBlock->bmAlloc[0], i)
648 && !ASMBitTest(&pBlock->bmFirst[0], i);
649 if (fOk)
650 {
651 /*
652 * Free the memory.
653 */
654 uint32_t fRevert = (ASMBitTest(&pBlock->bmLockedAdviced[0], iPage) ? RTMEMPAGEALLOC_F_ADVISE_LOCKED : 0)
655 | (ASMBitTest(&pBlock->bmNoDumpAdviced[0], iPage) ? RTMEMPAGEALLOC_F_ADVISE_NO_DUMP : 0);
656 if (fRevert)
657 {
658 rtMemPageNativeRevertFlags(pv, cPages << PAGE_SHIFT, fRevert);
659 ASMBitClearRange(&pBlock->bmLockedAdviced[0], iPage, iPage + cPages);
660 ASMBitClearRange(&pBlock->bmNoDumpAdviced[0], iPage, iPage + cPages);
661 }
662 ASMBitClearRange(&pBlock->bmAlloc[0], iPage, iPage + cPages);
663 ASMBitClear(&pBlock->bmFirst[0], iPage);
664 pBlock->cFreePages += (uint32_t)cPages;
665 pHeap->cFreePages += (uint32_t)cPages;
666 pHeap->cFreeCalls++;
667 if (!pHeap->pHint1 || pHeap->pHint1->cFreePages < pBlock->cFreePages)
668 pHeap->pHint1 = pBlock;
669
670 /** @todo Add bitmaps for tracking madvice and mlock so we can undo those. */
671
672 /*
673 * Shrink the heap. Not very efficient because of the AVL tree.
674 */
675 if ( pHeap->cFreePages >= RTMEMPAGE_BLOCK_PAGE_COUNT * 3
676 && pHeap->cFreePages >= pHeap->cHeapPages / 2 /* 50% free */
677 && pHeap->cFreeCalls - pHeap->uLastMinimizeCall > RTMEMPAGE_BLOCK_PAGE_COUNT
678 )
679 {
680 uint32_t cFreePageTarget = pHeap->cHeapPages / 4; /* 25% free */
681 while (pHeap->cFreePages > cFreePageTarget)
682 {
683 pHeap->uLastMinimizeCall = pHeap->cFreeCalls;
684
685 pBlock = NULL;
686 RTAvlrPVDoWithAll(&pHeap->BlockTree, false /*fFromLeft*/,
687 rtHeapPageFindUnusedBlockCallback, &pBlock);
688 if (!pBlock)
689 break;
690
691 void *pv2 = RTAvlrPVRemove(&pHeap->BlockTree, pBlock->Core.Key); Assert(pv2); NOREF(pv2);
692 pHeap->cHeapPages -= RTMEMPAGE_BLOCK_PAGE_COUNT;
693 pHeap->cFreePages -= RTMEMPAGE_BLOCK_PAGE_COUNT;
694 pHeap->pHint1 = NULL;
695 pHeap->pHint2 = NULL;
696 RTCritSectLeave(&pHeap->CritSect);
697
698 rtMemPageNativeFree(pBlock->Core.Key, RTMEMPAGE_BLOCK_SIZE);
699 pBlock->Core.Key = pBlock->Core.KeyLast = NULL;
700 pBlock->cFreePages = 0;
701 rtHeapPageIntBlockAllocatorFree(pHeap, pBlock);
702
703 RTCritSectEnter(&pHeap->CritSect);
704 }
705 }
706 }
707 else
708 rc = VERR_INVALID_POINTER;
709 }
710 else
711 rc = VERR_NOT_FOUND; /* Distinct return code for this so RTMemPageFree and others can try alternative heaps. */
712
713 RTCritSectLeave(&pHeap->CritSect);
714 }
715
716 return rc;
717}
718
719
720/**
721 * Initializes the heap.
722 *
723 * @returns IPRT status code
724 * @param pvUser Unused.
725 */
726static DECLCALLBACK(int) rtMemPageInitOnce(void *pvUser)
727{
728 NOREF(pvUser);
729 int rc = RTHeapPageInit(&g_MemPageHeap, false /*fExec*/);
730 if (RT_SUCCESS(rc))
731 {
732 rc = RTHeapPageInit(&g_MemExecHeap, true /*fExec*/);
733 if (RT_SUCCESS(rc))
734 return rc;
735 RTHeapPageDelete(&g_MemPageHeap);
736 }
737 return rc;
738}
739
740
741/**
742 * Allocates memory from the specified heap.
743 *
744 * @returns Address of the allocated memory.
745 * @param cb The number of bytes to allocate.
746 * @param pszTag The tag.
747 * @param fFlags RTMEMPAGEALLOC_F_XXX.
748 * @param pHeap The heap to use.
749 */
750static void *rtMemPageAllocInner(size_t cb, const char *pszTag, uint32_t fFlags, PRTHEAPPAGE pHeap)
751{
752 /*
753 * Validate & adjust the input.
754 */
755 Assert(cb > 0);
756 NOREF(pszTag);
757 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
758
759 /*
760 * If the allocation is relatively large, we use mmap/VirtualAlloc/DosAllocMem directly.
761 */
762 void *pv = NULL; /* shut up gcc */
763 if (cb >= RTMEMPAGE_NATIVE_THRESHOLD)
764 {
765 int rc = rtMemPageNativeAlloc(cb, fFlags, &pv);
766 if (RT_SUCCESS(rc))
767 {
768 AssertPtr(pv);
769
770 if (fFlags)
771 rtMemPageApplyFlags(pv, cb, fFlags);
772 }
773 else
774 pv = NULL;
775 }
776 else
777 {
778 int rc = RTOnce(&g_MemPageHeapInitOnce, rtMemPageInitOnce, NULL);
779 if (RT_SUCCESS(rc))
780 rc = RTHeapPageAlloc(pHeap, cb >> PAGE_SHIFT, pszTag, fFlags, &pv);
781 if (RT_FAILURE(rc))
782 pv = NULL;
783 }
784
785 return pv;
786}
787
788
789RTDECL(void *) RTMemPageAllocTag(size_t cb, const char *pszTag) RT_NO_THROW_DEF
790{
791 return rtMemPageAllocInner(cb, pszTag, 0, &g_MemPageHeap);
792}
793
794
795RTDECL(void *) RTMemPageAllocZTag(size_t cb, const char *pszTag) RT_NO_THROW_DEF
796{
797 return rtMemPageAllocInner(cb, pszTag, RTMEMPAGEALLOC_F_ZERO, &g_MemPageHeap);
798}
799
800
801RTDECL(void *) RTMemPageAllocExTag(size_t cb, uint32_t fFlags, const char *pszTag) RT_NO_THROW_DEF
802{
803 AssertReturn(!(fFlags & ~RTMEMPAGEALLOC_F_VALID_MASK), NULL);
804 return rtMemPageAllocInner(cb, pszTag, fFlags,
805 !(fFlags & RTMEMPAGEALLOC_F_EXECUTABLE) ? &g_MemPageHeap : &g_MemExecHeap);
806}
807
808
809RTDECL(void) RTMemPageFree(void *pv, size_t cb) RT_NO_THROW_DEF
810{
811 /*
812 * Validate & adjust the input.
813 */
814 if (!pv)
815 return;
816 AssertPtr(pv);
817 Assert(cb > 0);
818 Assert(!((uintptr_t)pv & PAGE_OFFSET_MASK));
819 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
820
821 /*
822 * If the allocation is relatively large, we used mmap/VirtualAlloc/DosAllocMem directly.
823 */
824 if (cb >= RTMEMPAGE_NATIVE_THRESHOLD)
825 rtMemPageNativeFree(pv, cb);
826 else
827 {
828 int rc = RTHeapPageFree(&g_MemPageHeap, pv, cb >> PAGE_SHIFT);
829 if (rc == VERR_NOT_FOUND)
830 rc = RTHeapPageFree(&g_MemExecHeap, pv, cb >> PAGE_SHIFT);
831 AssertRC(rc);
832 }
833}
834
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette