VirtualBox

source: vbox/trunk/src/VBox/Runtime/r3/alloc-ef.cpp@ 100313

Last change on this file since 100313 was 100313, checked in by vboxsync, 11 months ago

Runtime/r3/alloc-ef.{cpp,h}: Replace occurences of PAGE_SIZE/PAGE_OFFSET_MASK with RTSystemGetPageSize()/RTSystemGetPageOffsetMask() in code which is used by the linux.arm64 guest additions / validationkit, bugref:10476

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 33.9 KB
Line 
1/* $Id: alloc-ef.cpp 100313 2023-06-28 10:29:57Z vboxsync $ */
2/** @file
3 * IPRT - Memory Allocation, electric fence.
4 */
5
6/*
7 * Copyright (C) 2006-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * The contents of this file may alternatively be used under the terms
26 * of the Common Development and Distribution License Version 1.0
27 * (CDDL), a copy of it is provided in the "COPYING.CDDL" file included
28 * in the VirtualBox distribution, in which case the provisions of the
29 * CDDL are applicable instead of those of the GPL.
30 *
31 * You may elect to license modified versions of this file under the
32 * terms and conditions of either the GPL or the CDDL or both.
33 *
34 * SPDX-License-Identifier: GPL-3.0-only OR CDDL-1.0
35 */
36
37
38/*********************************************************************************************************************************
39* Header Files *
40*********************************************************************************************************************************/
41#include "alloc-ef.h"
42#include <iprt/mem.h>
43#include <iprt/log.h>
44#include <iprt/asm.h>
45#include <iprt/thread.h>
46#include <VBox/sup.h>
47#include <iprt/errcore.h>
48#ifndef IPRT_NO_CRT
49# include <errno.h>
50# include <stdio.h>
51# include <stdlib.h>
52#endif
53
54#include <iprt/alloc.h>
55#include <iprt/assert.h>
56#include <iprt/param.h>
57#include <iprt/string.h>
58#include <iprt/system.h>
59
60#ifdef RTALLOC_REPLACE_MALLOC
61# include <VBox/dis.h>
62# include <VBox/disopcode.h>
63# include <dlfcn.h>
64# ifdef RT_OS_DARWIN
65# include <malloc/malloc.h>
66# endif
67#endif
68
69
70/*********************************************************************************************************************************
71* Defined Constants And Macros *
72*********************************************************************************************************************************/
73#ifdef RTALLOC_REPLACE_MALLOC
74# define RTMEM_REPLACMENT_ALIGN(a_cb) ((a_cb) >= 16 ? RT_ALIGN_Z(a_cb, 16) \
75 : (a_cb) >= sizeof(uintptr_t) ? RT_ALIGN_Z(a_cb, sizeof(uintptr_t)) : (a_cb))
76#endif
77
78
79/*********************************************************************************************************************************
80* Global Variables *
81*********************************************************************************************************************************/
82#ifdef RTALLOC_EFENCE_TRACE
83/** Spinlock protecting the all the block's globals. */
84static volatile uint32_t g_BlocksLock;
85/** Tree tracking the allocations. */
86static AVLPVTREE g_BlocksTree;
87# ifdef RTALLOC_EFENCE_FREE_DELAYED
88/** Tail of the delayed blocks. */
89static volatile PRTMEMBLOCK g_pBlocksDelayHead;
90/** Tail of the delayed blocks. */
91static volatile PRTMEMBLOCK g_pBlocksDelayTail;
92/** Number of bytes in the delay list (includes fences). */
93static volatile size_t g_cbBlocksDelay;
94# endif /* RTALLOC_EFENCE_FREE_DELAYED */
95# ifdef RTALLOC_REPLACE_MALLOC
96/** @name For calling the real allocation API we've replaced.
97 * @{ */
98void * (*g_pfnOrgMalloc)(size_t);
99void * (*g_pfnOrgCalloc)(size_t, size_t);
100void * (*g_pfnOrgRealloc)(void *, size_t);
101void (*g_pfnOrgFree)(void *);
102size_t (*g_pfnOrgMallocSize)(void *);
103/** @} */
104# endif
105#endif /* RTALLOC_EFENCE_TRACE */
106/** Array of pointers free watches for. */
107void *gapvRTMemFreeWatch[4] = {NULL, NULL, NULL, NULL};
108/** Enable logging of all freed memory. */
109bool gfRTMemFreeLog = false;
110
111
112/*********************************************************************************************************************************
113* Internal Functions *
114*********************************************************************************************************************************/
115#ifdef RTALLOC_REPLACE_MALLOC
116static void rtMemReplaceMallocAndFriends(void);
117#endif
118
119
120/**
121 * Complains about something.
122 */
123static void rtmemComplain(const char *pszOp, const char *pszFormat, ...)
124{
125 va_list args;
126 fprintf(stderr, "RTMem error: %s: ", pszOp);
127 va_start(args, pszFormat);
128 vfprintf(stderr, pszFormat, args);
129 va_end(args);
130 RTAssertDoPanic();
131}
132
133/**
134 * Log an event.
135 */
136DECLINLINE(void) rtmemLog(const char *pszOp, const char *pszFormat, ...)
137{
138#if 0
139 va_list args;
140 fprintf(stderr, "RTMem info: %s: ", pszOp);
141 va_start(args, pszFormat);
142 vfprintf(stderr, pszFormat, args);
143 va_end(args);
144#else
145 NOREF(pszOp); NOREF(pszFormat);
146#endif
147}
148
149
150#ifdef RTALLOC_EFENCE_TRACE
151
152/**
153 * Acquires the lock.
154 */
155DECLINLINE(void) rtmemBlockLock(void)
156{
157 unsigned c = 0;
158 while (!ASMAtomicCmpXchgU32(&g_BlocksLock, 1, 0))
159 RTThreadSleepNoLog(((++c) >> 2) & 31);
160}
161
162
163/**
164 * Releases the lock.
165 */
166DECLINLINE(void) rtmemBlockUnlock(void)
167{
168 Assert(g_BlocksLock == 1);
169 ASMAtomicXchgU32(&g_BlocksLock, 0);
170}
171
172
173/**
174 * Creates a block.
175 */
176DECLINLINE(PRTMEMBLOCK) rtmemBlockCreate(RTMEMTYPE enmType, size_t cbUnaligned, size_t cbAligned,
177 const char *pszTag, void *pvCaller, RT_SRC_POS_DECL)
178{
179# ifdef RTALLOC_REPLACE_MALLOC
180 if (!g_pfnOrgMalloc)
181 rtMemReplaceMallocAndFriends();
182 PRTMEMBLOCK pBlock = (PRTMEMBLOCK)g_pfnOrgMalloc(sizeof(*pBlock));
183# else
184 PRTMEMBLOCK pBlock = (PRTMEMBLOCK)malloc(sizeof(*pBlock));
185# endif
186 if (pBlock)
187 {
188 pBlock->enmType = enmType;
189 pBlock->cbUnaligned = cbUnaligned;
190 pBlock->cbAligned = cbAligned;
191 pBlock->pszTag = pszTag;
192 pBlock->pvCaller = pvCaller;
193 pBlock->iLine = iLine;
194 pBlock->pszFile = pszFile;
195 pBlock->pszFunction = pszFunction;
196 }
197 return pBlock;
198}
199
200
201/**
202 * Frees a block.
203 */
204DECLINLINE(void) rtmemBlockFree(PRTMEMBLOCK pBlock)
205{
206# ifdef RTALLOC_REPLACE_MALLOC
207 g_pfnOrgFree(pBlock);
208# else
209 free(pBlock);
210# endif
211}
212
213
214/**
215 * Insert a block from the tree.
216 */
217DECLINLINE(void) rtmemBlockInsert(PRTMEMBLOCK pBlock, void *pv)
218{
219 pBlock->Core.Key = pv;
220 rtmemBlockLock();
221 bool fRc = RTAvlPVInsert(&g_BlocksTree, &pBlock->Core);
222 rtmemBlockUnlock();
223 AssertRelease(fRc);
224}
225
226
227/**
228 * Remove a block from the tree and returns it to the caller.
229 */
230DECLINLINE(PRTMEMBLOCK) rtmemBlockRemove(void *pv)
231{
232 rtmemBlockLock();
233 PRTMEMBLOCK pBlock = (PRTMEMBLOCK)RTAvlPVRemove(&g_BlocksTree, pv);
234 rtmemBlockUnlock();
235 return pBlock;
236}
237
238/**
239 * Gets a block.
240 */
241DECLINLINE(PRTMEMBLOCK) rtmemBlockGet(void *pv)
242{
243 rtmemBlockLock();
244 PRTMEMBLOCK pBlock = (PRTMEMBLOCK)RTAvlPVGet(&g_BlocksTree, pv);
245 rtmemBlockUnlock();
246 return pBlock;
247}
248
249/**
250 * Dumps one allocation.
251 */
252static DECLCALLBACK(int) RTMemDumpOne(PAVLPVNODECORE pNode, void *pvUser)
253{
254 PRTMEMBLOCK pBlock = (PRTMEMBLOCK)pNode;
255 fprintf(stderr, "%p %08lx(+%02lx) %p\n",
256 pBlock->Core.Key,
257 (unsigned long)pBlock->cbUnaligned,
258 (unsigned long)(pBlock->cbAligned - pBlock->cbUnaligned),
259 pBlock->pvCaller);
260 NOREF(pvUser);
261 return 0;
262}
263
264/**
265 * Dumps the allocated blocks.
266 * This is something which you should call from gdb.
267 */
268extern "C" void RTMemDump(void);
269void RTMemDump(void)
270{
271 fprintf(stderr, "address size(alg) caller\n");
272 RTAvlPVDoWithAll(&g_BlocksTree, true, RTMemDumpOne, NULL);
273}
274
275# ifdef RTALLOC_EFENCE_FREE_DELAYED
276
277/**
278 * Insert a delayed block.
279 */
280DECLINLINE(void) rtmemBlockDelayInsert(PRTMEMBLOCK pBlock)
281{
282 size_t const cbFence = RTALLOC_EFENCE_SIZE_FACTOR * RTSystemGetPageSize();
283 size_t const cbBlock = RTSystemPageAlignSize(pBlock->cbAligned) + cbFence;
284 pBlock->Core.pRight = NULL;
285 pBlock->Core.pLeft = NULL;
286 rtmemBlockLock();
287 if (g_pBlocksDelayHead)
288 {
289 g_pBlocksDelayHead->Core.pLeft = (PAVLPVNODECORE)pBlock;
290 pBlock->Core.pRight = (PAVLPVNODECORE)g_pBlocksDelayHead;
291 g_pBlocksDelayHead = pBlock;
292 }
293 else
294 {
295 g_pBlocksDelayTail = pBlock;
296 g_pBlocksDelayHead = pBlock;
297 }
298 g_cbBlocksDelay += cbBlock;
299 rtmemBlockUnlock();
300}
301
302/**
303 * Removes a delayed block.
304 */
305DECLINLINE(PRTMEMBLOCK) rtmemBlockDelayRemove(void)
306{
307 PRTMEMBLOCK pBlock = NULL;
308 rtmemBlockLock();
309 if (g_cbBlocksDelay > RTALLOC_EFENCE_FREE_DELAYED)
310 {
311 pBlock = g_pBlocksDelayTail;
312 if (pBlock)
313 {
314 g_pBlocksDelayTail = (PRTMEMBLOCK)pBlock->Core.pLeft;
315 if (pBlock->Core.pLeft)
316 pBlock->Core.pLeft->pRight = NULL;
317 else
318 g_pBlocksDelayHead = NULL;
319
320 size_t const cbFence = RTALLOC_EFENCE_SIZE_FACTOR * RTSystemGetPageSize();
321 g_cbBlocksDelay -= RTSystemPageAlignSize(pBlock->cbAligned) + cbFence;
322 }
323 }
324 rtmemBlockUnlock();
325 return pBlock;
326}
327
328
329/**
330 * Dumps the freed blocks.
331 * This is something which you should call from gdb.
332 */
333extern "C" void RTMemDumpFreed(void);
334void RTMemDumpFreed(void)
335{
336 fprintf(stderr, "address size(alg) caller\n");
337 for (PRTMEMBLOCK pCur = g_pBlocksDelayHead; pCur; pCur = (PRTMEMBLOCK)pCur->Core.pRight)
338 RTMemDumpOne(&pCur->Core, NULL);
339
340}
341
342# endif /* RTALLOC_EFENCE_FREE_DELAYED */
343
344#endif /* RTALLOC_EFENCE_TRACE */
345
346
347#if defined(RTALLOC_REPLACE_MALLOC) && defined(RTALLOC_EFENCE_TRACE)
348/*
349 *
350 * Replacing malloc, calloc, realloc, & free.
351 *
352 */
353
354/** Replacement for malloc. */
355static void *rtMemReplacementMalloc(size_t cb)
356{
357 size_t cbAligned = RTMEM_REPLACMENT_ALIGN(cb);
358 void *pv = rtR3MemAlloc("r-malloc", RTMEMTYPE_RTMEMALLOC, cb, cbAligned, "heap", ASMReturnAddress(), RT_SRC_POS);
359 if (!pv)
360 pv = g_pfnOrgMalloc(cb);
361 return pv;
362}
363
364/** Replacement for calloc. */
365static void *rtMemReplacementCalloc(size_t cbItem, size_t cItems)
366{
367 size_t cb = cbItem * cItems;
368 size_t cbAligned = RTMEM_REPLACMENT_ALIGN(cb);
369 void *pv = rtR3MemAlloc("r-calloc", RTMEMTYPE_RTMEMALLOCZ, cb, cbAligned, "heap", ASMReturnAddress(), RT_SRC_POS);
370 if (!pv)
371 pv = g_pfnOrgCalloc(cbItem, cItems);
372 return pv;
373}
374
375/** Replacement for realloc. */
376static void *rtMemReplacementRealloc(void *pvOld, size_t cbNew)
377{
378 if (pvOld)
379 {
380 /* We're not strict about where the memory was allocated. */
381 PRTMEMBLOCK pBlock = rtmemBlockGet(pvOld);
382 if (pBlock)
383 {
384 size_t cbAligned = RTMEM_REPLACMENT_ALIGN(cbNew);
385 return rtR3MemRealloc("r-realloc", RTMEMTYPE_RTMEMREALLOC, pvOld, cbAligned, "heap", ASMReturnAddress(), RT_SRC_POS);
386 }
387 return g_pfnOrgRealloc(pvOld, cbNew);
388 }
389 return rtMemReplacementMalloc(cbNew);
390}
391
392/** Replacement for free(). */
393static void rtMemReplacementFree(void *pv)
394{
395 if (pv)
396 {
397 /* We're not strict about where the memory was allocated. */
398 PRTMEMBLOCK pBlock = rtmemBlockGet(pv);
399 if (pBlock)
400 rtR3MemFree("r-free", RTMEMTYPE_RTMEMFREE, pv, 0, ASMReturnAddress(), RT_SRC_POS);
401 else
402 g_pfnOrgFree(pv);
403 }
404}
405
406# ifdef RT_OS_DARWIN
407/** Replacement for malloc. */
408static size_t rtMemReplacementMallocSize(void *pv)
409{
410 size_t cb;
411 if (pv)
412 {
413 /* We're not strict about where the memory was allocated. */
414 PRTMEMBLOCK pBlock = rtmemBlockGet(pv);
415 if (pBlock)
416 cb = pBlock->cbUnaligned;
417 else
418 cb = g_pfnOrgMallocSize(pv);
419 }
420 else
421 cb = 0;
422 return cb;
423}
424# endif
425
426
427static void rtMemReplaceMallocAndFriends(void)
428{
429 struct
430 {
431 const char *pszName;
432 PFNRT pfnReplacement;
433 PFNRT pfnOrg;
434 PFNRT *ppfnJumpBack;
435 } aApis[] =
436 {
437 { "free", (PFNRT)rtMemReplacementFree, (PFNRT)free, (PFNRT *)&g_pfnOrgFree },
438 { "realloc", (PFNRT)rtMemReplacementRealloc, (PFNRT)realloc, (PFNRT *)&g_pfnOrgRealloc },
439 { "calloc", (PFNRT)rtMemReplacementCalloc, (PFNRT)calloc, (PFNRT *)&g_pfnOrgCalloc },
440 { "malloc", (PFNRT)rtMemReplacementMalloc, (PFNRT)malloc, (PFNRT *)&g_pfnOrgMalloc },
441#ifdef RT_OS_DARWIN
442 { "malloc_size", (PFNRT)rtMemReplacementMallocSize, (PFNRT)malloc_size, (PFNRT *)&g_pfnOrgMallocSize },
443#endif
444 };
445
446 /*
447 * Initialize the jump backs to avoid recursivly entering this function.
448 */
449 for (unsigned i = 0; i < RT_ELEMENTS(aApis); i++)
450 *aApis[i].ppfnJumpBack = aApis[i].pfnOrg;
451
452 /*
453 * Give the user an option to skip replacing malloc.
454 */
455 if (getenv("IPRT_DONT_REPLACE_MALLOC"))
456 return;
457
458 /*
459 * Allocate a page for jump back code (we leak it).
460 */
461 size_t const cbPage = RTSystemGetPageSize();
462 uint8_t *pbExecPage = (uint8_t *)RTMemPageAlloc(cbPage); AssertFatal(pbExecPage);
463 int rc = RTMemProtect(pbExecPage, cbPage, RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC); AssertFatalRC(rc);
464
465 /*
466 * Do the ground work.
467 */
468 uint8_t *pb = pbExecPage;
469 for (unsigned i = 0; i < RT_ELEMENTS(aApis); i++)
470 {
471 /* Resolve it. */
472 PFNRT pfnOrg = (PFNRT)(uintptr_t)dlsym(RTLD_DEFAULT, aApis[i].pszName);
473 if (pfnOrg)
474 aApis[i].pfnOrg = pfnOrg;
475 else
476 pfnOrg = aApis[i].pfnOrg;
477
478 /* Figure what we can replace and how much to duplicate in the jump back code. */
479# ifdef RT_ARCH_AMD64
480 uint32_t cbNeeded = 12;
481 DISCPUMODE const enmCpuMode = DISCPUMODE_64BIT;
482# elif defined(RT_ARCH_X86)
483 uint32_t const cbNeeded = 5;
484 DISCPUMODE const enmCpuMode = DISCPUMODE_32BIT;
485# else
486# error "Port me"
487# endif
488 uint32_t offJmpBack = 0;
489 uint32_t cbCopy = 0;
490 while (offJmpBack < cbNeeded)
491 {
492 DISCPUSTATE Dis;
493 uint32_t cbInstr = 1;
494 rc = DISInstr((void *)((uintptr_t)pfnOrg + offJmpBack), enmCpuMode, &Dis, &cbInstr); AssertFatalRC(rc);
495 AssertFatal(!(Dis.pCurInstr->fOpType & (DISOPTYPE_CONTROLFLOW)));
496# ifdef RT_ARCH_AMD64
497# ifdef RT_OS_DARWIN
498 /* Kludge for: cmp [malloc_def_zone_state], 1; jg 2; call _malloc_initialize; 2: */
499 if ( Dis.ModRM.Bits.Mod == 0
500 && Dis.ModRM.Bits.Rm == 5 /* wrt RIP */
501 && (Dis.Param2.fUse & (DISUSE_IMMEDIATE16_SX8 | DISUSE_IMMEDIATE32_SX8 | DISUSE_IMMEDIATE64_SX8))
502 && Dis.Param2.uValue == 1
503 && Dis.pCurInstr->uOpcode == OP_CMP)
504 {
505 cbCopy = offJmpBack;
506
507 offJmpBack += cbInstr;
508 rc = DISInstr((void *)((uintptr_t)pfnOrg + offJmpBack), enmCpuMode, &Dis, &cbInstr); AssertFatalRC(rc);
509 if ( Dis.pCurInstr->uOpcode == OP_JNBE
510 && Dis.Param1.uDisp.i8 == 5)
511 {
512 offJmpBack += cbInstr + 5;
513 AssertFatal(offJmpBack >= cbNeeded);
514 break;
515 }
516 }
517# endif
518 AssertFatal(!(Dis.ModRM.Bits.Mod == 0 && Dis.ModRM.Bits.Rm == 5 /* wrt RIP */));
519# endif
520 offJmpBack += cbInstr;
521 }
522 if (!cbCopy)
523 cbCopy = offJmpBack;
524
525 /* Assemble the jump back. */
526 memcpy(pb, (void *)(uintptr_t)pfnOrg, cbCopy);
527 uint32_t off = cbCopy;
528# ifdef RT_ARCH_AMD64
529 pb[off++] = 0xff; /* jmp qword [$+8 wrt RIP] */
530 pb[off++] = 0x25;
531 *(uint32_t *)&pb[off] = 0;
532 off += 4;
533 *(uint64_t *)&pb[off] = (uintptr_t)pfnOrg + offJmpBack;
534 off += 8;
535 off = RT_ALIGN_32(off, 16);
536# elif defined(RT_ARCH_X86)
537 pb[off++] = 0xe9; /* jmp rel32 */
538 *(uint32_t *)&pb[off] = (uintptr_t)pfnOrg + offJmpBack - (uintptr_t)&pb[4];
539 off += 4;
540 off = RT_ALIGN_32(off, 8);
541# else
542# error "Port me"
543# endif
544 *aApis[i].ppfnJumpBack = (PFNRT)(uintptr_t)pb;
545 pb += off;
546 }
547
548 /*
549 * Modify the APIs.
550 */
551 for (unsigned i = 0; i < RT_ELEMENTS(aApis); i++)
552 {
553 pb = (uint8_t *)(uintptr_t)aApis[i].pfnOrg;
554 rc = RTMemProtect(pb, 16, RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC); AssertFatalRC(rc);
555
556# ifdef RT_ARCH_AMD64
557 /* Assemble the LdrLoadDll patch. */
558 *pb++ = 0x48; /* mov rax, qword */
559 *pb++ = 0xb8;
560 *(uint64_t *)pb = (uintptr_t)aApis[i].pfnReplacement;
561 pb += 8;
562 *pb++ = 0xff; /* jmp rax */
563 *pb++ = 0xe0;
564# elif defined(RT_ARCH_X86)
565 *pb++ = 0xe9; /* jmp rel32 */
566 *(uint32_t *)pb = (uintptr_t)aApis[i].pfnReplacement - (uintptr_t)&pb[4];
567# else
568# error "Port me"
569# endif
570 }
571}
572
573#endif /* RTALLOC_REPLACE_MALLOC && RTALLOC_EFENCE_TRACE */
574
575
576/**
577 * Internal allocator.
578 */
579RTDECL(void *) rtR3MemAlloc(const char *pszOp, RTMEMTYPE enmType, size_t cbUnaligned, size_t cbAligned,
580 const char *pszTag, void *pvCaller, RT_SRC_POS_DECL)
581{
582 /*
583 * Sanity.
584 */
585 size_t const cbFence = RTALLOC_EFENCE_SIZE_FACTOR * RTSystemGetPageSize();
586 size_t const cbPage = RTSystemGetPageSize();
587 if (RTALLOC_EFENCE_SIZE_FACTOR <= 0)
588 {
589 rtmemComplain(pszOp, "Invalid E-fence size! %#x\n", RTALLOC_EFENCE_SIZE_FACTOR);
590 return NULL;
591 }
592 if (!cbUnaligned)
593 {
594#if 0
595 rtmemComplain(pszOp, "Request of ZERO bytes allocation!\n");
596 return NULL;
597#else
598 cbAligned = cbUnaligned = 1;
599#endif
600 }
601
602#ifndef RTALLOC_EFENCE_IN_FRONT
603 /* Alignment decreases fence accuracy, but this is at least partially
604 * counteracted by filling and checking the alignment padding. When the
605 * fence is in front then then no extra alignment is needed. */
606 cbAligned = RT_ALIGN_Z(cbAligned, RTALLOC_EFENCE_ALIGNMENT);
607#endif
608
609#ifdef RTALLOC_EFENCE_TRACE
610 /*
611 * Allocate the trace block.
612 */
613 PRTMEMBLOCK pBlock = rtmemBlockCreate(enmType, cbUnaligned, cbAligned, pszTag, pvCaller, RT_SRC_POS_ARGS);
614 if (!pBlock)
615 {
616 rtmemComplain(pszOp, "Failed to allocate trace block!\n");
617 return NULL;
618 }
619#endif
620
621 /*
622 * Allocate a block with page alignment space + the size of the E-fence.
623 */
624 size_t cbBlock = RT_ALIGN_Z(cbAligned, cbPage) + cbFence;
625 void *pvBlock = RTMemPageAlloc(cbBlock);
626 if (pvBlock)
627 {
628 /*
629 * Calc the start of the fence and the user block
630 * and then change the page protection of the fence.
631 */
632#ifdef RTALLOC_EFENCE_IN_FRONT
633 void *pvEFence = pvBlock;
634 void *pv = (char *)pvEFence + cbFence;
635# ifdef RTALLOC_EFENCE_NOMAN_FILLER
636 memset((char *)pv + cbUnaligned, RTALLOC_EFENCE_NOMAN_FILLER, cbBlock - cbFence - cbUnaligned);
637# endif
638#else
639 void *pvEFence = (char *)pvBlock + (cbBlock - cbFence);
640 void *pv = (char *)pvEFence - cbAligned;
641# ifdef RTALLOC_EFENCE_NOMAN_FILLER
642 memset(pvBlock, RTALLOC_EFENCE_NOMAN_FILLER, cbBlock - cbFence - cbAligned);
643 memset((char *)pv + cbUnaligned, RTALLOC_EFENCE_NOMAN_FILLER, cbAligned - cbUnaligned);
644# endif
645#endif
646
647#ifdef RTALLOC_EFENCE_FENCE_FILLER
648 memset(pvEFence, RTALLOC_EFENCE_FENCE_FILLER, cbFence);
649#endif
650 int rc = RTMemProtect(pvEFence, cbFence, RTMEM_PROT_NONE);
651 if (!rc)
652 {
653#ifdef RTALLOC_EFENCE_TRACE
654 rtmemBlockInsert(pBlock, pv);
655#endif
656 if (enmType == RTMEMTYPE_RTMEMALLOCZ)
657 memset(pv, 0, cbUnaligned);
658#ifdef RTALLOC_EFENCE_FILLER
659 else
660 memset(pv, RTALLOC_EFENCE_FILLER, cbUnaligned);
661#endif
662
663 rtmemLog(pszOp, "returns %p (pvBlock=%p cbBlock=%#x pvEFence=%p cbUnaligned=%#x)\n", pv, pvBlock, cbBlock, pvEFence, cbUnaligned);
664 return pv;
665 }
666 rtmemComplain(pszOp, "RTMemProtect failed, pvEFence=%p size %d, rc=%d\n", pvEFence, cbFence, rc);
667 RTMemPageFree(pvBlock, cbBlock);
668 }
669 else
670 rtmemComplain(pszOp, "Failed to allocated %lu (%lu) bytes.\n", (unsigned long)cbBlock, (unsigned long)cbUnaligned);
671
672#ifdef RTALLOC_EFENCE_TRACE
673 rtmemBlockFree(pBlock);
674#endif
675 return NULL;
676}
677
678
679/**
680 * Internal free.
681 */
682RTDECL(void) rtR3MemFree(const char *pszOp, RTMEMTYPE enmType, void *pv, size_t cbUser, void *pvCaller, RT_SRC_POS_DECL)
683{
684 NOREF(enmType); RT_SRC_POS_NOREF();
685
686 /*
687 * Simple case.
688 */
689 if (!pv)
690 return;
691
692 /*
693 * Check watch points.
694 */
695 for (unsigned i = 0; i < RT_ELEMENTS(gapvRTMemFreeWatch); i++)
696 if (gapvRTMemFreeWatch[i] == pv)
697 RTAssertDoPanic();
698
699 size_t cbPage = RTSystemGetPageSize();
700#ifdef RTALLOC_EFENCE_TRACE
701 /*
702 * Find the block.
703 */
704 PRTMEMBLOCK pBlock = rtmemBlockRemove(pv);
705 if (pBlock)
706 {
707 if (gfRTMemFreeLog)
708 RTLogPrintf("RTMem %s: pv=%p pvCaller=%p cbUnaligned=%#x\n", pszOp, pv, pvCaller, pBlock->cbUnaligned);
709
710# ifdef RTALLOC_EFENCE_NOMAN_FILLER
711 /*
712 * Check whether the no man's land is untouched.
713 */
714# ifdef RTALLOC_EFENCE_IN_FRONT
715 void *pvWrong = ASMMemFirstMismatchingU8((char *)pv + pBlock->cbUnaligned,
716 RT_ALIGN_Z(pBlock->cbAligned, cbPage) - pBlock->cbUnaligned,
717 RTALLOC_EFENCE_NOMAN_FILLER);
718# else
719 /* Alignment must match allocation alignment in rtMemAlloc(). */
720 void *pvWrong = ASMMemFirstMismatchingU8((char *)pv + pBlock->cbUnaligned,
721 pBlock->cbAligned - pBlock->cbUnaligned,
722 RTALLOC_EFENCE_NOMAN_FILLER);
723 if (pvWrong)
724 RTAssertDoPanic();
725 pvWrong = ASMMemFirstMismatchingU8((void *)((uintptr_t)pv & ~RTSystemGetPageOffsetMask()),
726 RT_ALIGN_Z(pBlock->cbAligned, cbPage) - pBlock->cbAligned,
727 RTALLOC_EFENCE_NOMAN_FILLER);
728# endif
729 if (pvWrong)
730 RTAssertDoPanic();
731# endif
732
733 /*
734 * Fill the user part of the block.
735 */
736 AssertMsg(enmType != RTMEMTYPE_RTMEMFREEZ || cbUser == pBlock->cbUnaligned,
737 ("cbUser=%#zx cbUnaligned=%#zx\n", cbUser, pBlock->cbUnaligned));
738 RT_NOREF(cbUser);
739 if (enmType == RTMEMTYPE_RTMEMFREEZ)
740 RT_BZERO(pv, pBlock->cbUnaligned);
741# ifdef RTALLOC_EFENCE_FREE_FILL
742 else
743 memset(pv, RTALLOC_EFENCE_FREE_FILL, pBlock->cbUnaligned);
744# endif
745
746 size_t const cbFence = RTALLOC_EFENCE_SIZE_FACTOR * RTSystemGetPageSize();
747# if defined(RTALLOC_EFENCE_FREE_DELAYED) && RTALLOC_EFENCE_FREE_DELAYED > 0
748 /*
749 * We're doing delayed freeing.
750 * That means we'll expand the E-fence to cover the entire block.
751 */
752 int rc = RTMemProtect(pv, pBlock->cbAligned, RTMEM_PROT_NONE);
753 if (RT_SUCCESS(rc))
754 {
755 /*
756 * Insert it into the free list and process pending frees.
757 */
758 rtmemBlockDelayInsert(pBlock);
759 while ((pBlock = rtmemBlockDelayRemove()) != NULL)
760 {
761 pv = pBlock->Core.Key;
762# ifdef RTALLOC_EFENCE_IN_FRONT
763 void *pvBlock = (char *)pv - cbFence;
764# else
765 void *pvBlock = (void *)((uintptr_t)pv & ~RTSystemGetPageOffsetMask());
766# endif
767 size_t cbBlock = RT_ALIGN_Z(pBlock->cbAligned, cbPage) + cbFence;
768 rc = RTMemProtect(pvBlock, cbBlock, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
769 if (RT_SUCCESS(rc))
770 RTMemPageFree(pvBlock, RT_ALIGN_Z(pBlock->cbAligned, cbPage) + cbFence);
771 else
772 rtmemComplain(pszOp, "RTMemProtect(%p, %#x, RTMEM_PROT_READ | RTMEM_PROT_WRITE) -> %d\n", pvBlock, cbBlock, rc);
773 rtmemBlockFree(pBlock);
774 }
775 }
776 else
777 rtmemComplain(pszOp, "Failed to expand the efence of pv=%p cb=%d, rc=%d.\n", pv, pBlock, rc);
778
779# else /* !RTALLOC_EFENCE_FREE_DELAYED */
780
781 /*
782 * Turn of the E-fence and free it.
783 */
784# ifdef RTALLOC_EFENCE_IN_FRONT
785 void *pvBlock = (char *)pv - cbFence;
786 void *pvEFence = pvBlock;
787# else
788 void *pvBlock = (void *)((uintptr_t)pv & ~RTSystemGetPageOffsetMask());
789 void *pvEFence = (char *)pv + pBlock->cb;
790# endif
791 int rc = RTMemProtect(pvEFence, cbFence, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
792 if (RT_SUCCESS(rc))
793 RTMemPageFree(pvBlock, RT_ALIGN_Z(pBlock->cbAligned, cbPage) + cbFence);
794 else
795 rtmemComplain(pszOp, "RTMemProtect(%p, %#x, RTMEM_PROT_READ | RTMEM_PROT_WRITE) -> %d\n", pvEFence, cbFence, rc);
796 rtmemBlockFree(pBlock);
797
798# endif /* !RTALLOC_EFENCE_FREE_DELAYED */
799 }
800 else
801 rtmemComplain(pszOp, "pv=%p not found! Incorrect free!\n", pv);
802
803#else /* !RTALLOC_EFENCE_TRACE */
804
805 /*
806 * We have no size tracking, so we're not doing any freeing because
807 * we cannot if the E-fence is after the block.
808 * Let's just expand the E-fence to the first page of the user bit
809 * since we know that it's around.
810 */
811 if (enmType == RTMEMTYPE_RTMEMFREEZ)
812 RT_BZERO(pv, cbUser);
813 int rc = RTMemProtect((void *)((uintptr_t)pv & ~RTSystemGetPageOffsetMask()), cbPage, RTMEM_PROT_NONE);
814 if (RT_FAILURE(rc))
815 rtmemComplain(pszOp, "RTMemProtect(%p, cbPage, RTMEM_PROT_NONE) -> %d\n", (void *)((uintptr_t)pv & ~RTSystemGetPageOffsetMask()), rc);
816#endif /* !RTALLOC_EFENCE_TRACE */
817}
818
819
820/**
821 * Internal realloc.
822 */
823RTDECL(void *) rtR3MemRealloc(const char *pszOp, RTMEMTYPE enmType, void *pvOld, size_t cbNew,
824 const char *pszTag, void *pvCaller, RT_SRC_POS_DECL)
825{
826 /*
827 * Allocate new and copy.
828 */
829 if (!pvOld)
830 return rtR3MemAlloc(pszOp, enmType, cbNew, cbNew, pszTag, pvCaller, RT_SRC_POS_ARGS);
831 if (!cbNew)
832 {
833 rtR3MemFree(pszOp, RTMEMTYPE_RTMEMREALLOC, pvOld, 0, pvCaller, RT_SRC_POS_ARGS);
834 return NULL;
835 }
836
837#ifdef RTALLOC_EFENCE_TRACE
838
839 /*
840 * Get the block, allocate the new, copy the data, free the old one.
841 */
842 PRTMEMBLOCK pBlock = rtmemBlockGet(pvOld);
843 if (pBlock)
844 {
845 void *pvRet = rtR3MemAlloc(pszOp, enmType, cbNew, cbNew, pszTag, pvCaller, RT_SRC_POS_ARGS);
846 if (pvRet)
847 {
848 memcpy(pvRet, pvOld, RT_MIN(cbNew, pBlock->cbUnaligned));
849 rtR3MemFree(pszOp, RTMEMTYPE_RTMEMREALLOC, pvOld, 0, pvCaller, RT_SRC_POS_ARGS);
850 }
851 return pvRet;
852 }
853 else
854 rtmemComplain(pszOp, "pvOld=%p was not found!\n", pvOld);
855 return NULL;
856
857#else /* !RTALLOC_EFENCE_TRACE */
858
859 rtmemComplain(pszOp, "Not supported if RTALLOC_EFENCE_TRACE isn't defined!\n");
860 return NULL;
861
862#endif /* !RTALLOC_EFENCE_TRACE */
863}
864
865
866
867
868RTDECL(void *) RTMemEfTmpAlloc(size_t cb, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW_DEF
869{
870 return rtR3MemAlloc("TmpAlloc", RTMEMTYPE_RTMEMALLOC, cb, cb, pszTag, ASMReturnAddress(), RT_SRC_POS_ARGS);
871}
872
873
874RTDECL(void *) RTMemEfTmpAllocZ(size_t cb, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW_DEF
875{
876 return rtR3MemAlloc("TmpAlloc", RTMEMTYPE_RTMEMALLOCZ, cb, cb, pszTag, ASMReturnAddress(), RT_SRC_POS_ARGS);
877}
878
879
880RTDECL(void) RTMemEfTmpFree(void *pv, RT_SRC_POS_DECL) RT_NO_THROW_DEF
881{
882 if (pv)
883 rtR3MemFree("Free", RTMEMTYPE_RTMEMFREE, pv, 0, ASMReturnAddress(), RT_SRC_POS_ARGS);
884}
885
886
887RTDECL(void) RTMemEfTmpFreeZ(void *pv, size_t cb, RT_SRC_POS_DECL) RT_NO_THROW_DEF
888{
889 if (pv)
890 rtR3MemFree("FreeZ", RTMEMTYPE_RTMEMFREEZ, pv, cb, ASMReturnAddress(), RT_SRC_POS_ARGS);
891}
892
893
894RTDECL(void *) RTMemEfAlloc(size_t cb, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW_DEF
895{
896 return rtR3MemAlloc("Alloc", RTMEMTYPE_RTMEMALLOC, cb, cb, pszTag, ASMReturnAddress(), RT_SRC_POS_ARGS);
897}
898
899
900RTDECL(void *) RTMemEfAllocZ(size_t cb, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW_DEF
901{
902 return rtR3MemAlloc("AllocZ", RTMEMTYPE_RTMEMALLOCZ, cb, cb, pszTag, ASMReturnAddress(), RT_SRC_POS_ARGS);
903}
904
905
906RTDECL(void *) RTMemEfAllocVar(size_t cbUnaligned, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW_DEF
907{
908 size_t cbAligned;
909 if (cbUnaligned >= 16)
910 cbAligned = RT_ALIGN_Z(cbUnaligned, 16);
911 else
912 cbAligned = RT_ALIGN_Z(cbUnaligned, sizeof(void *));
913 return rtR3MemAlloc("Alloc", RTMEMTYPE_RTMEMALLOC, cbUnaligned, cbAligned, pszTag, ASMReturnAddress(), RT_SRC_POS_ARGS);
914}
915
916
917RTDECL(void *) RTMemEfAllocZVar(size_t cbUnaligned, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW_DEF
918{
919 size_t cbAligned;
920 if (cbUnaligned >= 16)
921 cbAligned = RT_ALIGN_Z(cbUnaligned, 16);
922 else
923 cbAligned = RT_ALIGN_Z(cbUnaligned, sizeof(void *));
924 return rtR3MemAlloc("AllocZ", RTMEMTYPE_RTMEMALLOCZ, cbUnaligned, cbAligned, pszTag, ASMReturnAddress(), RT_SRC_POS_ARGS);
925}
926
927
928RTDECL(void *) RTMemEfRealloc(void *pvOld, size_t cbNew, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW_DEF
929{
930 return rtR3MemRealloc("Realloc", RTMEMTYPE_RTMEMREALLOC, pvOld, cbNew, pszTag, ASMReturnAddress(), RT_SRC_POS_ARGS);
931}
932
933
934RTDECL(void *) RTMemEfReallocZ(void *pvOld, size_t cbOld, size_t cbNew, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW_DEF
935{
936 void *pvDst = rtR3MemRealloc("ReallocZ", RTMEMTYPE_RTMEMREALLOC, pvOld, cbNew, pszTag, ASMReturnAddress(), RT_SRC_POS_ARGS);
937 if (pvDst && cbNew > cbOld)
938 memset((uint8_t *)pvDst + cbOld, 0, cbNew - cbOld);
939 return pvDst;
940}
941
942
943RTDECL(void) RTMemEfFree(void *pv, RT_SRC_POS_DECL) RT_NO_THROW_DEF
944{
945 if (pv)
946 rtR3MemFree("Free", RTMEMTYPE_RTMEMFREE, pv, 0, ASMReturnAddress(), RT_SRC_POS_ARGS);
947}
948
949
950RTDECL(void) RTMemEfFreeZ(void *pv, size_t cb, RT_SRC_POS_DECL) RT_NO_THROW_DEF
951{
952 if (pv)
953 rtR3MemFree("FreeZ", RTMEMTYPE_RTMEMFREEZ, pv, cb, ASMReturnAddress(), RT_SRC_POS_ARGS);
954}
955
956
957RTDECL(void *) RTMemEfDup(const void *pvSrc, size_t cb, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW_DEF
958{
959 void *pvDst = RTMemEfAlloc(cb, pszTag, RT_SRC_POS_ARGS);
960 if (pvDst)
961 memcpy(pvDst, pvSrc, cb);
962 return pvDst;
963}
964
965
966RTDECL(void *) RTMemEfDupEx(const void *pvSrc, size_t cbSrc, size_t cbExtra, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW_DEF
967{
968 void *pvDst = RTMemEfAlloc(cbSrc + cbExtra, pszTag, RT_SRC_POS_ARGS);
969 if (pvDst)
970 {
971 memcpy(pvDst, pvSrc, cbSrc);
972 memset((uint8_t *)pvDst + cbSrc, 0, cbExtra);
973 }
974 return pvDst;
975}
976
977
978
979
980/*
981 *
982 * The NP (no position) versions.
983 *
984 */
985
986
987
988RTDECL(void *) RTMemEfTmpAllocNP(size_t cb, const char *pszTag) RT_NO_THROW_DEF
989{
990 return rtR3MemAlloc("TmpAlloc", RTMEMTYPE_RTMEMALLOC, cb, cb, pszTag, ASMReturnAddress(), NULL, 0, NULL);
991}
992
993
994RTDECL(void *) RTMemEfTmpAllocZNP(size_t cb, const char *pszTag) RT_NO_THROW_DEF
995{
996 return rtR3MemAlloc("TmpAllocZ", RTMEMTYPE_RTMEMALLOCZ, cb, cb, pszTag, ASMReturnAddress(), NULL, 0, NULL);
997}
998
999
1000RTDECL(void) RTMemEfTmpFreeNP(void *pv) RT_NO_THROW_DEF
1001{
1002 if (pv)
1003 rtR3MemFree("Free", RTMEMTYPE_RTMEMFREE, pv, 0, ASMReturnAddress(), NULL, 0, NULL);
1004}
1005
1006
1007RTDECL(void) RTMemEfTmpFreeZNP(void *pv, size_t cb) RT_NO_THROW_DEF
1008{
1009 if (pv)
1010 rtR3MemFree("FreeZ", RTMEMTYPE_RTMEMFREEZ, pv, cb, ASMReturnAddress(), NULL, 0, NULL);
1011}
1012
1013
1014RTDECL(void *) RTMemEfAllocNP(size_t cb, const char *pszTag) RT_NO_THROW_DEF
1015{
1016 return rtR3MemAlloc("Alloc", RTMEMTYPE_RTMEMALLOC, cb, cb, pszTag, ASMReturnAddress(), NULL, 0, NULL);
1017}
1018
1019
1020RTDECL(void *) RTMemEfAllocZNP(size_t cb, const char *pszTag) RT_NO_THROW_DEF
1021{
1022 return rtR3MemAlloc("AllocZ", RTMEMTYPE_RTMEMALLOCZ, cb, cb, pszTag, ASMReturnAddress(), NULL, 0, NULL);
1023}
1024
1025
1026RTDECL(void *) RTMemEfAllocVarNP(size_t cbUnaligned, const char *pszTag) RT_NO_THROW_DEF
1027{
1028 size_t cbAligned;
1029 if (cbUnaligned >= 16)
1030 cbAligned = RT_ALIGN_Z(cbUnaligned, 16);
1031 else
1032 cbAligned = RT_ALIGN_Z(cbUnaligned, sizeof(void *));
1033 return rtR3MemAlloc("Alloc", RTMEMTYPE_RTMEMALLOC, cbUnaligned, cbAligned, pszTag, ASMReturnAddress(), NULL, 0, NULL);
1034}
1035
1036
1037RTDECL(void *) RTMemEfAllocZVarNP(size_t cbUnaligned, const char *pszTag) RT_NO_THROW_DEF
1038{
1039 size_t cbAligned;
1040 if (cbUnaligned >= 16)
1041 cbAligned = RT_ALIGN_Z(cbUnaligned, 16);
1042 else
1043 cbAligned = RT_ALIGN_Z(cbUnaligned, sizeof(void *));
1044 return rtR3MemAlloc("AllocZ", RTMEMTYPE_RTMEMALLOCZ, cbUnaligned, cbAligned, pszTag, ASMReturnAddress(), NULL, 0, NULL);
1045}
1046
1047
1048RTDECL(void *) RTMemEfReallocNP(void *pvOld, size_t cbNew, const char *pszTag) RT_NO_THROW_DEF
1049{
1050 return rtR3MemRealloc("Realloc", RTMEMTYPE_RTMEMREALLOC, pvOld, cbNew, pszTag, ASMReturnAddress(), NULL, 0, NULL);
1051}
1052
1053
1054RTDECL(void *) RTMemEfReallocZNP(void *pvOld, size_t cbOld, size_t cbNew, const char *pszTag) RT_NO_THROW_DEF
1055{
1056 void *pvDst = rtR3MemRealloc("ReallocZ", RTMEMTYPE_RTMEMREALLOC, pvOld, cbNew, pszTag, ASMReturnAddress(), NULL, 0, NULL);
1057 if (pvDst && cbNew > cbOld)
1058 memset((uint8_t *)pvDst + cbOld, 0, cbNew - cbOld);
1059 return pvDst;
1060}
1061
1062
1063RTDECL(void) RTMemEfFreeNP(void *pv) RT_NO_THROW_DEF
1064{
1065 if (pv)
1066 rtR3MemFree("Free", RTMEMTYPE_RTMEMFREE, pv, 0, ASMReturnAddress(), NULL, 0, NULL);
1067}
1068
1069
1070RTDECL(void) RTMemEfFreeZNP(void *pv, size_t cb) RT_NO_THROW_DEF
1071{
1072 if (pv)
1073 rtR3MemFree("Free", RTMEMTYPE_RTMEMFREEZ, pv, cb, ASMReturnAddress(), NULL, 0, NULL);
1074}
1075
1076
1077RTDECL(void *) RTMemEfDupNP(const void *pvSrc, size_t cb, const char *pszTag) RT_NO_THROW_DEF
1078{
1079 void *pvDst = RTMemEfAlloc(cb, pszTag, NULL, 0, NULL);
1080 if (pvDst)
1081 memcpy(pvDst, pvSrc, cb);
1082 return pvDst;
1083}
1084
1085
1086RTDECL(void *) RTMemEfDupExNP(const void *pvSrc, size_t cbSrc, size_t cbExtra, const char *pszTag) RT_NO_THROW_DEF
1087{
1088 void *pvDst = RTMemEfAlloc(cbSrc + cbExtra, pszTag, NULL, 0, NULL);
1089 if (pvDst)
1090 {
1091 memcpy(pvDst, pvSrc, cbSrc);
1092 memset((uint8_t *)pvDst + cbSrc, 0, cbExtra);
1093 }
1094 return pvDst;
1095}
1096
Note: See TracBrowser for help on using the repository browser.

© 2023 Oracle
ContactPrivacy policyTerms of Use