VirtualBox

source: vbox/trunk/src/VBox/Runtime/r3/memsafer-r3.cpp

Last change on this file was 100442, checked in by vboxsync, 11 months ago

IPRT,OpenSSL: Support ECDSA for verficiation purposes when IPRT links with OpenSSL. This required quite a bit of cleanups, so not entirely no-risk. bugref:10479 ticketref:21621

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 22.9 KB
RevLine 
[51770]1/* $Id: memsafer-r3.cpp 100442 2023-07-08 11:10:51Z vboxsync $ */
2/** @file
3 * IPRT - Memory Allocate for Sensitive Data, generic heap-based implementation.
4 */
5
6/*
[98103]7 * Copyright (C) 2006-2023 Oracle and/or its affiliates.
[51770]8 *
[96407]9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
[51770]11 *
[96407]12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
[51770]25 * The contents of this file may alternatively be used under the terms
26 * of the Common Development and Distribution License Version 1.0
[96407]27 * (CDDL), a copy of it is provided in the "COPYING.CDDL" file included
28 * in the VirtualBox distribution, in which case the provisions of the
[51770]29 * CDDL are applicable instead of those of the GPL.
30 *
31 * You may elect to license modified versions of this file under the
32 * terms and conditions of either the GPL or the CDDL or both.
[96407]33 *
34 * SPDX-License-Identifier: GPL-3.0-only OR CDDL-1.0
[51770]35 */
36
37
[57358]38/*********************************************************************************************************************************
39* Header Files *
40*********************************************************************************************************************************/
[51770]41#include "internal/iprt.h"
42#include <iprt/memsafer.h>
43
[52050]44#include <iprt/asm.h>
[51770]45#include <iprt/assert.h>
[52050]46#include <iprt/avl.h>
47#include <iprt/critsect.h>
[76417]48#include <iprt/err.h>
[52050]49#include <iprt/mem.h>
50#include <iprt/once.h>
51#include <iprt/rand.h>
52#include <iprt/param.h>
[51770]53#include <iprt/string.h>
[100315]54#include <iprt/system.h>
[52050]55#ifdef IN_SUP_R3
[52018]56# include <VBox/sup.h>
[52050]57#endif
[51770]58
59
[57358]60/*********************************************************************************************************************************
61* Defined Constants And Macros *
62*********************************************************************************************************************************/
[52050]63/** Allocation size alignment (power of two). */
[51770]64#define RTMEMSAFER_ALIGN 16
65
[52050]66
[57358]67/*********************************************************************************************************************************
68* Structures and Typedefs *
69*********************************************************************************************************************************/
[52018]70/**
[52050]71 * Allocators.
[52018]72 */
[52050]73typedef enum RTMEMSAFERALLOCATOR
[52018]74{
75 /** Invalid method. */
[52050]76 RTMEMSAFERALLOCATOR_INVALID = 0,
77 /** RTMemPageAlloc. */
78 RTMEMSAFERALLOCATOR_RTMEMPAGE,
79 /** SUPR3PageAllocEx. */
80 RTMEMSAFERALLOCATOR_SUPR3
81} RTMEMSAFERALLOCATOR;
[52018]82
83/**
[52050]84 * Tracking node (lives on normal heap).
[52018]85 */
[52050]86typedef struct RTMEMSAFERNODE
[52018]87{
[52050]88 /** Node core.
89 * The core key is a scrambled pointer the user memory. */
90 AVLPVNODECORE Core;
91 /** The allocation flags. */
92 uint32_t fFlags;
93 /** The offset into the allocation of the user memory. */
94 uint32_t offUser;
95 /** The requested allocation size. */
96 size_t cbUser;
97 /** The allocation size in pages, this includes the two guard pages. */
98 uint32_t cPages;
99 /** The allocator used for this node. */
100 RTMEMSAFERALLOCATOR enmAllocator;
[73703]101 /** XOR scrambler value for memory. */
102 uintptr_t uScramblerXor;
[52050]103} RTMEMSAFERNODE;
104/** Pointer to an allocation tracking node. */
105typedef RTMEMSAFERNODE *PRTMEMSAFERNODE;
[52018]106
[52050]107
[57358]108/*********************************************************************************************************************************
109* Global Variables *
110*********************************************************************************************************************************/
[52050]111/** Init once structure for this module. */
112static RTONCE g_MemSaferOnce = RTONCE_INITIALIZER;
113/** Critical section protecting the allocation tree. */
114static RTCRITSECTRW g_MemSaferCritSect;
115/** Tree of allocation nodes. */
116static AVLPVTREE g_pMemSaferTree;
117/** XOR scrambler value pointers. */
118static uintptr_t g_uMemSaferPtrScramblerXor;
119/** Pointer rotate shift count.*/
120static uintptr_t g_cMemSaferPtrScramblerRotate;
[51770]121
122
[52050]123/**
124 * @callback_method_impl{FNRTONCE, Inits globals.}
125 */
126static DECLCALLBACK(int32_t) rtMemSaferOnceInit(void *pvUserIgnore)
127{
[62564]128 RT_NOREF_PV(pvUserIgnore);
129
[52050]130 g_uMemSaferPtrScramblerXor = (uintptr_t)RTRandU64();
131 g_cMemSaferPtrScramblerRotate = RTRandU32Ex(0, ARCH_BITS - 1);
132 return RTCritSectRwInit(&g_MemSaferCritSect);
133}
[51770]134
[52050]135
[52018]136/**
[52050]137 * @callback_method_impl{PFNRTONCECLEANUP, Cleans up globals.}
[52018]138 */
[52050]139static DECLCALLBACK(void) rtMemSaferOnceTerm(void *pvUser, bool fLazyCleanUpOk)
[52018]140{
[62564]141 RT_NOREF_PV(pvUser);
142
[52050]143 if (!fLazyCleanUpOk)
[52018]144 {
[52050]145 RTCritSectRwDelete(&g_MemSaferCritSect);
146 Assert(!g_pMemSaferTree);
147 }
148}
[52033]149
[52018]150
151
[52050]152DECLINLINE(void *) rtMemSaferScramblePointer(void *pvUser)
153{
154 uintptr_t uPtr = (uintptr_t)pvUser;
155 uPtr ^= g_uMemSaferPtrScramblerXor;
156#if ARCH_BITS == 64
157 uPtr = ASMRotateRightU64(uPtr, g_cMemSaferPtrScramblerRotate);
158#elif ARCH_BITS == 32
159 uPtr = ASMRotateRightU32(uPtr, g_cMemSaferPtrScramblerRotate);
[52018]160#else
[52050]161# error "Unsupported/missing ARCH_BITS."
[52018]162#endif
[52050]163 return (void *)uPtr;
[52018]164}
165
166
167/**
[52050]168 * Inserts a tracking node into the tree.
[52018]169 *
[52050]170 * @param pThis The allocation tracking node to insert.
[52018]171 */
[52050]172static void rtMemSaferNodeInsert(PRTMEMSAFERNODE pThis)
[52018]173{
[52050]174 RTCritSectRwEnterExcl(&g_MemSaferCritSect);
175 pThis->Core.Key = rtMemSaferScramblePointer(pThis->Core.Key);
176 bool fRc = RTAvlPVInsert(&g_pMemSaferTree, &pThis->Core);
177 RTCritSectRwLeaveExcl(&g_MemSaferCritSect);
[62452]178 Assert(fRc); NOREF(fRc);
[52050]179}
[52018]180
[52050]181
182/**
183 * Finds a tracking node into the tree.
184 *
185 * @returns The allocation tracking node for @a pvUser. NULL if not found.
186 * @param pvUser The user pointer to the allocation.
187 */
188static PRTMEMSAFERNODE rtMemSaferNodeLookup(void *pvUser)
189{
190 void *pvKey = rtMemSaferScramblePointer(pvUser);
191 RTCritSectRwEnterShared(&g_MemSaferCritSect);
192 PRTMEMSAFERNODE pThis = (PRTMEMSAFERNODE)RTAvlPVGet(&g_pMemSaferTree, pvKey);
193 RTCritSectRwLeaveShared(&g_MemSaferCritSect);
194 return pThis;
[52018]195}
196
197
[52050]198/**
199 * Removes a tracking node from the tree.
200 *
201 * @returns The allocation tracking node for @a pvUser. NULL if not found.
202 * @param pvUser The user pointer to the allocation.
203 */
204static PRTMEMSAFERNODE rtMemSaferNodeRemove(void *pvUser)
205{
206 void *pvKey = rtMemSaferScramblePointer(pvUser);
207 RTCritSectRwEnterExcl(&g_MemSaferCritSect);
208 PRTMEMSAFERNODE pThis = (PRTMEMSAFERNODE)RTAvlPVRemove(&g_pMemSaferTree, pvKey);
209 RTCritSectRwLeaveExcl(&g_MemSaferCritSect);
210 return pThis;
211}
212
213
[51770]214RTDECL(int) RTMemSaferScramble(void *pv, size_t cb)
215{
[52050]216 PRTMEMSAFERNODE pThis = rtMemSaferNodeLookup(pv);
217 AssertReturn(pThis, VERR_INVALID_POINTER);
218 AssertMsgReturn(cb == pThis->cbUser, ("cb=%#zx != %#zx\n", cb, pThis->cbUser), VERR_INVALID_PARAMETER);
[51770]219
[73703]220 /* First time we get a new xor value. */
221 if (!pThis->uScramblerXor)
222 pThis->uScramblerXor = (uintptr_t)RTRandU64();
223
[51770]224 /* Note! This isn't supposed to be safe, just less obvious. */
225 uintptr_t *pu = (uintptr_t *)pv;
226 cb = RT_ALIGN_Z(cb, RTMEMSAFER_ALIGN);
227 while (cb > 0)
228 {
[73703]229 *pu ^= pThis->uScramblerXor;
[51770]230 pu++;
231 cb -= sizeof(*pu);
232 }
233
234 return VINF_SUCCESS;
235}
236RT_EXPORT_SYMBOL(RTMemSaferScramble);
237
238
239RTDECL(int) RTMemSaferUnscramble(void *pv, size_t cb)
240{
[52050]241 PRTMEMSAFERNODE pThis = rtMemSaferNodeLookup(pv);
242 AssertReturn(pThis, VERR_INVALID_POINTER);
243 AssertMsgReturn(cb == pThis->cbUser, ("cb=%#zx != %#zx\n", cb, pThis->cbUser), VERR_INVALID_PARAMETER);
[51770]244
245 /* Note! This isn't supposed to be safe, just less obvious. */
246 uintptr_t *pu = (uintptr_t *)pv;
247 cb = RT_ALIGN_Z(cb, RTMEMSAFER_ALIGN);
248 while (cb > 0)
249 {
[73703]250 *pu ^= pThis->uScramblerXor;
[51770]251 pu++;
252 cb -= sizeof(*pu);
253 }
254
255 return VINF_SUCCESS;
256}
257RT_EXPORT_SYMBOL(RTMemSaferUnscramble);
258
259
[52050]260/**
261 * Initializes the pages.
262 *
263 * Fills the memory with random bytes in order to make it less obvious where the
264 * secret data starts and ends. We also zero the user memory in case the
265 * allocator does not do this.
266 *
267 * @param pThis The allocation tracer node. The Core.Key member
268 * will be set.
269 * @param pvPages The pages to initialize.
270 */
271static void rtMemSaferInitializePages(PRTMEMSAFERNODE pThis, void *pvPages)
272{
[100315]273 uint32_t const cbPage = RTSystemGetPageSize();
274 RTRandBytes(pvPages, cbPage + pThis->offUser);
[52050]275
[100315]276 uint8_t *pbUser = (uint8_t *)pvPages + cbPage + pThis->offUser;
[52050]277 pThis->Core.Key = pbUser;
278 RT_BZERO(pbUser, pThis->cbUser); /* paranoia */
279
[100315]280 RTRandBytes(pbUser + pThis->cbUser, (size_t)pThis->cPages * cbPage - cbPage - pThis->offUser - pThis->cbUser);
[52050]281}
282
283
284/**
285 * Allocates and initializes pages from the support driver and initializes it.
286 *
287 * @returns VBox status code.
288 * @param pThis The allocator node. Core.Key will be set on successful
289 * return (unscrambled).
290 */
291static int rtMemSaferSupR3AllocPages(PRTMEMSAFERNODE pThis)
292{
293#ifdef IN_SUP_R3
294 /*
295 * Try allocate the memory.
296 */
297 void *pvPages;
298 int rc = SUPR3PageAllocEx(pThis->cPages, 0 /* fFlags */, &pvPages, NULL /* pR0Ptr */, NULL /* paPages */);
299 if (RT_SUCCESS(rc))
300 {
301 rtMemSaferInitializePages(pThis, pvPages);
302
303 /*
[52963]304 * On darwin we cannot allocate pages without an R0 mapping and
305 * SUPR3PageAllocEx falls back to another method which is incompatible with
306 * the way SUPR3PageProtect works. Ignore changing the protection of the guard
307 * pages.
308 */
309#ifdef RT_OS_DARWIN
310 return VINF_SUCCESS;
311#else
312 /*
[52050]313 * Configure the guard pages.
314 * SUPR3PageProtect isn't supported on all hosts, we ignore that.
315 */
[100315]316 uint32_t const cbPage = RTSystemGetPageSize();
317 rc = SUPR3PageProtect(pvPages, NIL_RTR0PTR, 0, cbPage, RTMEM_PROT_NONE);
[52050]318 if (RT_SUCCESS(rc))
319 {
[100315]320 rc = SUPR3PageProtect(pvPages, NIL_RTR0PTR, (pThis->cPages - 1) * cbPage, cbPage, RTMEM_PROT_NONE);
[52050]321 if (RT_SUCCESS(rc))
322 return VINF_SUCCESS;
[100315]323 SUPR3PageProtect(pvPages, NIL_RTR0PTR, 0, cbPage, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
[52050]324 }
[52964]325 else if (rc == VERR_NOT_SUPPORTED)
[52050]326 return VINF_SUCCESS;
327
328 /* failed. */
329 int rc2 = SUPR3PageFreeEx(pvPages, pThis->cPages); AssertRC(rc2);
[52963]330#endif
[52050]331 }
332 return rc;
[62564]333
[52050]334#else /* !IN_SUP_R3 */
[62564]335 RT_NOREF_PV(pThis);
[52050]336 return VERR_NOT_SUPPORTED;
337#endif /* !IN_SUP_R3 */
338}
339
340
341/**
342 * Allocates and initializes pages using the IPRT page allocator API.
343 *
344 * @returns VBox status code.
345 * @param pThis The allocator node. Core.Key will be set on successful
346 * return (unscrambled).
347 */
348static int rtMemSaferMemAllocPages(PRTMEMSAFERNODE pThis)
349{
350 /*
351 * Try allocate the memory.
352 */
[100315]353 uint32_t const cbPage = RTSystemGetPageSize();
[52105]354 int rc = VINF_SUCCESS;
[100315]355 void *pvPages = RTMemPageAllocEx((size_t)pThis->cPages * cbPage,
[78335]356 RTMEMPAGEALLOC_F_ADVISE_LOCKED | RTMEMPAGEALLOC_F_ADVISE_NO_DUMP | RTMEMPAGEALLOC_F_ZERO);
[52050]357 if (pvPages)
358 {
359 rtMemSaferInitializePages(pThis, pvPages);
360
361 /*
362 * Configure the guard pages.
363 */
[100315]364 rc = RTMemProtect(pvPages, cbPage, RTMEM_PROT_NONE);
[52050]365 if (RT_SUCCESS(rc))
366 {
[100315]367 rc = RTMemProtect((uint8_t *)pvPages + (size_t)(pThis->cPages - 1U) * cbPage, cbPage, RTMEM_PROT_NONE);
[52050]368 if (RT_SUCCESS(rc))
369 return VINF_SUCCESS;
[100315]370 rc = RTMemProtect(pvPages, cbPage, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
[52050]371 }
372
373 /* failed. */
[100315]374 RTMemPageFree(pvPages, (size_t)pThis->cPages * cbPage);
[52050]375 }
[52051]376 else
377 rc = VERR_NO_PAGE_MEMORY;
[52050]378
379 return rc;
380}
381
382
[57432]383RTDECL(int) RTMemSaferAllocZExTag(void **ppvNew, size_t cb, uint32_t fFlags, const char *pszTag) RT_NO_THROW_DEF
[51770]384{
[62564]385 RT_NOREF_PV(pszTag);
386
[52050]387 /*
388 * Validate input.
389 */
[51770]390 AssertPtrReturn(ppvNew, VERR_INVALID_PARAMETER);
391 *ppvNew = NULL;
[52050]392 AssertReturn(cb, VERR_INVALID_PARAMETER);
[100315]393 uint32_t const cbPage = RTSystemGetPageSize();
394 AssertReturn(cb <= 32U*_1M - cbPage * 3U, VERR_ALLOCATION_TOO_BIG); /* Max 32 MB minus padding and guard pages. */
[52050]395 AssertReturn(!(fFlags & ~RTMEMSAFER_F_VALID_MASK), VERR_INVALID_FLAGS);
[51770]396
397 /*
[52050]398 * Initialize globals.
[51770]399 */
[52050]400 int rc = RTOnceEx(&g_MemSaferOnce, rtMemSaferOnceInit, rtMemSaferOnceTerm, NULL);
401 if (RT_SUCCESS(rc))
[52018]402 {
[52050]403 /*
404 * Allocate a tracker node first.
405 */
406 PRTMEMSAFERNODE pThis = (PRTMEMSAFERNODE)RTMemAllocZ(sizeof(RTMEMSAFERNODE));
407 if (pThis)
408 {
409 /*
410 * Prepare the allocation.
411 */
412 pThis->cbUser = cb;
[100315]413 pThis->offUser = (RTRandU32Ex(0, 128) * RTMEMSAFER_ALIGN) & RTSystemGetPageOffsetMask();
[52018]414
[52050]415 size_t cbNeeded = pThis->offUser + pThis->cbUser;
[100315]416 cbNeeded = RT_ALIGN_Z(cbNeeded, cbPage);
[51770]417
[100315]418 pThis->cPages = (uint32_t)(cbNeeded / cbPage) + 2; /* +2 for guard pages */
[51770]419
[52050]420 /*
421 * Try allocate the memory, using the best allocator by default and
422 * falling back on the less safe one.
423 */
424 rc = rtMemSaferSupR3AllocPages(pThis);
425 if (RT_SUCCESS(rc))
426 pThis->enmAllocator = RTMEMSAFERALLOCATOR_SUPR3;
427 else if (!(fFlags & RTMEMSAFER_F_REQUIRE_NOT_PAGABLE))
428 {
429 rc = rtMemSaferMemAllocPages(pThis);
430 if (RT_SUCCESS(rc))
431 pThis->enmAllocator = RTMEMSAFERALLOCATOR_RTMEMPAGE;
432 }
433 if (RT_SUCCESS(rc))
434 {
435 /*
436 * Insert the node.
437 */
438 *ppvNew = pThis->Core.Key;
439 rtMemSaferNodeInsert(pThis); /* (Scrambles Core.Key) */
440 return VINF_SUCCESS;
441 }
[51770]442
[52050]443 RTMemFree(pThis);
444 }
445 else
446 rc = VERR_NO_MEMORY;
[51770]447 }
[52018]448 return rc;
[51770]449}
450RT_EXPORT_SYMBOL(RTMemSaferAllocZExTag);
451
452
[57432]453RTDECL(void) RTMemSaferFree(void *pv, size_t cb) RT_NO_THROW_DEF
[51770]454{
455 if (pv)
456 {
[52050]457 PRTMEMSAFERNODE pThis = rtMemSaferNodeRemove(pv);
458 AssertReturnVoid(pThis);
[78337]459 if (cb == 0) /* for openssl use */
460 cb = pThis->cbUser;
461 else
462 AssertMsg(cb == pThis->cbUser, ("cb=%#zx != %#zx\n", cb, pThis->cbUser));
[52018]463
[52050]464 /*
465 * Wipe the user memory first.
466 */
[51770]467 RTMemWipeThoroughly(pv, RT_ALIGN_Z(cb, RTMEMSAFER_ALIGN), 3);
[52018]468
[52050]469 /*
470 * Free the pages.
471 */
[100315]472 uint32_t const cbPage = RTSystemGetPageSize();
473 uint8_t *pbPages = (uint8_t *)pv - pThis->offUser - cbPage;
474 size_t cbPages = (size_t)pThis->cPages * cbPage;
[52050]475 switch (pThis->enmAllocator)
[52018]476 {
[52050]477#ifdef IN_SUP_R3
478 case RTMEMSAFERALLOCATOR_SUPR3:
[100315]479 SUPR3PageProtect(pbPages, NIL_RTR0PTR, 0, cbPage, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
480 SUPR3PageProtect(pbPages, NIL_RTR0PTR, (uint32_t)(cbPages - cbPage), cbPage, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
[52399]481 SUPR3PageFreeEx(pbPages, pThis->cPages);
[52018]482 break;
[52050]483#endif
484 case RTMEMSAFERALLOCATOR_RTMEMPAGE:
[100315]485 RTMemProtect(pbPages, cbPage, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
486 RTMemProtect(pbPages + cbPages - cbPage, cbPage, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
[52050]487 RTMemPageFree(pbPages, cbPages);
[52018]488 break;
[52050]489
[52018]490 default:
[52050]491 AssertFailed();
[52018]492 }
[52050]493
494 /*
495 * Free the tracking node.
496 */
497 pThis->Core.Key = NULL;
498 pThis->offUser = 0;
499 pThis->cbUser = 0;
500 RTMemFree(pThis);
[51770]501 }
502 else
503 Assert(cb == 0);
504}
505RT_EXPORT_SYMBOL(RTMemSaferFree);
506
507
[78337]508RTDECL(size_t) RTMemSaferGetSize(void *pv) RT_NO_THROW_DEF
509{
510 size_t cbRet = 0;
511 if (pv)
512 {
[100442]513 /*
514 * We use this API for testing whether pv is a safer allocation or not,
515 * so we may be called before the allocators. Thus, it's prudent to
516 * make sure initialization has taken place before attempting to enter
517 * the critical section and such.
518 */
519 int rc = RTOnceEx(&g_MemSaferOnce, rtMemSaferOnceInit, rtMemSaferOnceTerm, NULL);
520 if (RT_SUCCESS(rc))
521 {
522 void *pvKey = rtMemSaferScramblePointer(pv);
523 RTCritSectRwEnterShared(&g_MemSaferCritSect);
524 PRTMEMSAFERNODE pThis = (PRTMEMSAFERNODE)RTAvlPVGet(&g_pMemSaferTree, pvKey);
525 if (pThis)
526 cbRet = pThis->cbUser;
527 RTCritSectRwLeaveShared(&g_MemSaferCritSect);
528 }
[78337]529 }
530 return cbRet;
531}
532RT_EXPORT_SYMBOL(RTMemSaferGetSize);
533
534
[52050]535/**
536 * The simplest reallocation method: allocate new block, copy over the data,
537 * free old block.
538 */
539static int rtMemSaferReallocSimpler(size_t cbOld, void *pvOld, size_t cbNew, void **ppvNew, uint32_t fFlags, const char *pszTag)
540{
541 void *pvNew;
542 int rc = RTMemSaferAllocZExTag(&pvNew, cbNew, fFlags, pszTag);
543 if (RT_SUCCESS(rc))
544 {
545 memcpy(pvNew, pvOld, RT_MIN(cbNew, cbOld));
546 RTMemSaferFree(pvOld, cbOld);
547 *ppvNew = pvNew;
548 }
549 return rc;
550}
551
552
[57432]553RTDECL(int) RTMemSaferReallocZExTag(size_t cbOld, void *pvOld, size_t cbNew, void **ppvNew, uint32_t fFlags, const char *pszTag) RT_NO_THROW_DEF
[51770]554{
555 int rc;
556 /* Real realloc. */
557 if (cbNew && cbOld)
558 {
[52050]559 PRTMEMSAFERNODE pThis = rtMemSaferNodeLookup(pvOld);
560 AssertReturn(pThis, VERR_INVALID_POINTER);
561 AssertMsgStmt(cbOld == pThis->cbUser, ("cbOld=%#zx != %#zx\n", cbOld, pThis->cbUser), cbOld = pThis->cbUser);
[51770]562
[52050]563 if (pThis->fFlags == fFlags)
[51770]564 {
[52050]565 if (cbNew > cbOld)
566 {
567 /*
568 * Is the enough room for us to grow?
569 */
[100315]570 size_t cbMax = (size_t)(pThis->cPages - 2) * RTSystemGetPageSize();
[52050]571 if (cbNew <= cbMax)
572 {
573 size_t const cbAdded = (cbNew - cbOld);
574 size_t const cbAfter = cbMax - pThis->offUser - cbOld;
575 if (cbAfter >= cbAdded)
576 {
577 /*
578 * Sufficient space after the current allocation.
579 */
580 uint8_t *pbNewSpace = (uint8_t *)pvOld + cbOld;
581 RT_BZERO(pbNewSpace, cbAdded);
582 *ppvNew = pvOld;
583 }
584 else
585 {
586 /*
587 * Have to move the allocation to make enough room at the
588 * end. In order to make it a little less predictable and
589 * maybe avoid a relocation or two in the next call, divide
590 * the page offset by four until it it fits.
591 */
592 AssertReturn(rtMemSaferNodeRemove(pvOld) == pThis, VERR_INTERNAL_ERROR_3);
593 uint32_t offNewUser = pThis->offUser;
594 do
595 offNewUser = offNewUser / 2;
596 while ((pThis->offUser - offNewUser) + cbAfter < cbAdded);
597 offNewUser &= ~(RTMEMSAFER_ALIGN - 1U);
598
599 uint32_t const cbMove = pThis->offUser - offNewUser;
600 uint8_t *pbNew = (uint8_t *)pvOld - cbMove;
601 memmove(pbNew, pvOld, cbOld);
602
603 RT_BZERO(pbNew + cbOld, cbAdded);
604 if (cbMove > cbAdded)
605 RTMemWipeThoroughly(pbNew + cbNew, cbMove - cbAdded, 3);
606
607 pThis->offUser = offNewUser;
608 pThis->Core.Key = pbNew;
609 *ppvNew = pbNew;
610
611 rtMemSaferNodeInsert(pThis);
612 }
[100315]613 Assert(((uintptr_t)*ppvNew & RTSystemGetPageOffsetMask()) == pThis->offUser);
[52050]614 pThis->cbUser = cbNew;
615 rc = VINF_SUCCESS;
616 }
617 else
618 {
619 /*
620 * Not enough space, allocate a new block and copy over the data.
621 */
622 rc = rtMemSaferReallocSimpler(cbOld, pvOld, cbNew, ppvNew, fFlags, pszTag);
623 }
624 }
625 else
626 {
627 /*
628 * Shrinking the allocation, just wipe the memory that is no longer
629 * being used.
630 */
631 if (cbNew != cbOld)
632 {
633 uint8_t *pbAbandond = (uint8_t *)pvOld + cbNew;
634 RTMemWipeThoroughly(pbAbandond, cbOld - cbNew, 3);
635 }
636 pThis->cbUser = cbNew;
637 *ppvNew = pvOld;
638 rc = VINF_SUCCESS;
639 }
[51770]640 }
[52050]641 else if (!pThis->fFlags)
642 {
643 /*
644 * New flags added. Allocate a new block and copy over the old one.
645 */
646 rc = rtMemSaferReallocSimpler(cbOld, pvOld, cbNew, ppvNew, fFlags, pszTag);
647 }
648 else
649 {
650 /* Compatible flags. */
651 AssertMsgFailed(("fFlags=%#x old=%#x\n", fFlags, pThis->fFlags));
652 rc = VERR_INVALID_FLAGS;
653 }
[51770]654 }
[52050]655 /*
656 * First allocation. Pass it on.
657 */
[51770]658 else if (!cbOld)
659 {
660 Assert(pvOld == NULL);
[52018]661 rc = RTMemSaferAllocZExTag(ppvNew, cbNew, fFlags, pszTag);
[51770]662 }
[52050]663 /*
664 * Free operation. Pass it on.
665 */
[51770]666 else
667 {
668 RTMemSaferFree(pvOld, cbOld);
[52050]669 *ppvNew = NULL;
[51770]670 rc = VINF_SUCCESS;
671 }
672 return rc;
673}
674RT_EXPORT_SYMBOL(RTMemSaferReallocZExTag);
675
676
[57432]677RTDECL(void *) RTMemSaferAllocZTag(size_t cb, const char *pszTag) RT_NO_THROW_DEF
[51770]678{
679 void *pvNew = NULL;
[52050]680 int rc = RTMemSaferAllocZExTag(&pvNew, cb, 0 /*fFlags*/, pszTag);
[51770]681 if (RT_SUCCESS(rc))
682 return pvNew;
683 return NULL;
684}
685RT_EXPORT_SYMBOL(RTMemSaferAllocZTag);
686
687
[57432]688RTDECL(void *) RTMemSaferReallocZTag(size_t cbOld, void *pvOld, size_t cbNew, const char *pszTag) RT_NO_THROW_DEF
[51770]689{
690 void *pvNew = NULL;
[52050]691 int rc = RTMemSaferReallocZExTag(cbOld, pvOld, cbNew, &pvNew, 0 /*fFlags*/, pszTag);
[51770]692 if (RT_SUCCESS(rc))
693 return pvNew;
694 return NULL;
695}
696RT_EXPORT_SYMBOL(RTMemSaferReallocZTag);
697
Note: See TracBrowser for help on using the repository browser.

© 2023 Oracle
ContactPrivacy policyTerms of Use