VirtualBox

source: vbox/trunk/src/VBox/Runtime/common/alloc/memcache.cpp

Last change on this file was 103005, checked in by vboxsync, 4 months ago

iprt/asm.h,*: Split out the ASMMem* and related stuff into a separate header, asm-mem.h, so that we can get the RT_ASM_PAGE_SIZE stuff out of the way.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 19.8 KB
RevLine 
[26416]1/* $Id: memcache.cpp 103005 2024-01-23 23:55:58Z vboxsync $ */
2/** @file
3 * IPRT - Memory Object Allocation Cache.
4 */
5
6/*
[98103]7 * Copyright (C) 2006-2023 Oracle and/or its affiliates.
[26416]8 *
[96407]9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
[26416]11 *
[96407]12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
[26416]25 * The contents of this file may alternatively be used under the terms
26 * of the Common Development and Distribution License Version 1.0
[96407]27 * (CDDL), a copy of it is provided in the "COPYING.CDDL" file included
28 * in the VirtualBox distribution, in which case the provisions of the
[26416]29 * CDDL are applicable instead of those of the GPL.
30 *
31 * You may elect to license modified versions of this file under the
32 * terms and conditions of either the GPL or the CDDL or both.
[96407]33 *
34 * SPDX-License-Identifier: GPL-3.0-only OR CDDL-1.0
[26416]35 */
36
37
[57358]38/*********************************************************************************************************************************
39* Header Files *
40*********************************************************************************************************************************/
[26416]41#include <iprt/memcache.h>
42#include "internal/iprt.h"
43
44#include <iprt/assert.h>
45#include <iprt/asm.h>
46#include <iprt/critsect.h>
47#include <iprt/err.h>
48#include <iprt/mem.h>
[100309]49#include <iprt/system.h>
[26416]50
51#include "internal/magics.h"
52
53
[57358]54/*********************************************************************************************************************************
55* Structures and Typedefs *
56*********************************************************************************************************************************/
[26416]57/** Pointer to a cache instance. */
58typedef struct RTMEMCACHEINT *PRTMEMCACHEINT;
59/** Pointer to a cache page. */
60typedef struct RTMEMCACHEPAGE *PRTMEMCACHEPAGE;
61
62
[26452]63
[26416]64/**
[26452]65 * A free object.
66 *
67 * @remarks This only works if the objects don't have a constructor or
68 * destructor and are big enough.
69 */
70typedef struct RTMEMCACHEFREEOBJ
71{
72 /** Pointer to the next free object */
73 struct RTMEMCACHEFREEOBJ * volatile pNext;
74} RTMEMCACHEFREEOBJ;
75/** Pointer to a free object. */
76typedef RTMEMCACHEFREEOBJ *PRTMEMCACHEFREEOBJ;
77
78
79/**
[26416]80 * A cache page.
81 *
82 * This is a page of memory that we split up in to a bunch object sized chunks
83 * and hand out to the cache users. The bitmap is updated in an atomic fashion
84 * so that we don't have to take any locks when freeing or allocating memory.
85 */
86typedef struct RTMEMCACHEPAGE
87{
88 /** Pointer to the cache owning this page.
89 * This is used for validation purposes only. */
90 PRTMEMCACHEINT pCache;
91 /** Pointer to the next page.
92 * This is marked as volatile since we'll be adding new entries to the list
93 * without taking any locks. */
94 PRTMEMCACHEPAGE volatile pNext;
95 /** Bitmap tracking allocated blocks. */
96 void volatile *pbmAlloc;
97 /** Bitmap tracking which blocks that has been thru the constructor. */
98 void volatile *pbmCtor;
[55252]99 /** Pointer to the object array. */
[26416]100 uint8_t *pbObjects;
[26420]101 /** The number of objects on this page. */
102 uint32_t cObjects;
103
104 /** Padding to force cFree into the next cache line. (ASSUMES CL = 64) */
105 uint8_t abPadding[ARCH_BITS == 32 ? 64 - 6*4 : 64 - 5*8 - 4];
106 /** The number of free objects. */
107 int32_t volatile cFree;
[26416]108} RTMEMCACHEPAGE;
[26420]109AssertCompileMemberOffset(RTMEMCACHEPAGE, cFree, 64);
[26416]110
111
112/**
113 * Memory object cache instance.
114 */
115typedef struct RTMEMCACHEINT
116{
117 /** Magic value (RTMEMCACHE_MAGIC). */
118 uint32_t u32Magic;
119 /** The object size. */
120 uint32_t cbObject;
121 /** Object alignment. */
122 uint32_t cbAlignment;
123 /** The per page object count. */
124 uint32_t cPerPage;
125 /** Number of bits in the bitmap.
126 * @remarks This is higher or equal to cPerPage and it is aligned such that
127 * the search operation will be most efficient on x86/AMD64. */
128 uint32_t cBits;
129 /** The maximum number of objects. */
130 uint32_t cMax;
[26452]131 /** Whether to the use the free list or not. */
132 bool fUseFreeList;
[26416]133 /** Head of the page list. */
134 PRTMEMCACHEPAGE pPageHead;
[46257]135 /** Poiner to the insertion point in the page list. */
136 PRTMEMCACHEPAGE volatile *ppPageNext;
[26416]137 /** Constructor callback. */
138 PFNMEMCACHECTOR pfnCtor;
139 /** Destructor callback. */
140 PFNMEMCACHEDTOR pfnDtor;
141 /** Callback argument. */
142 void *pvUser;
143 /** Critical section serializing page allocation and similar. */
144 RTCRITSECT CritSect;
[26420]145
146 /** The total object count. */
147 uint32_t volatile cTotal;
148 /** The number of free objects. */
149 int32_t volatile cFree;
150 /** This may point to a page with free entries. */
151 PRTMEMCACHEPAGE volatile pPageHint;
[26452]152 /** Stack of free items.
153 * These are marked as used in the allocation bitmaps.
154 *
155 * @todo This doesn't scale well when several threads are beating on the
[45903]156 * cache. Also, it totally doesn't work when the objects are too
157 * small. */
[26452]158 PRTMEMCACHEFREEOBJ volatile pFreeTop;
[26416]159} RTMEMCACHEINT;
160
161
[57358]162/*********************************************************************************************************************************
163* Internal Functions *
164*********************************************************************************************************************************/
[55252]165static void rtMemCacheFreeList(RTMEMCACHEINT *pThis, PRTMEMCACHEFREEOBJ pHead);
[26416]166
[55252]167
[26416]168RTDECL(int) RTMemCacheCreate(PRTMEMCACHE phMemCache, size_t cbObject, size_t cbAlignment, uint32_t cMaxObjects,
[26452]169 PFNMEMCACHECTOR pfnCtor, PFNMEMCACHEDTOR pfnDtor, void *pvUser, uint32_t fFlags)
170
[26416]171{
172 AssertPtr(phMemCache);
173 AssertPtrNull(pfnCtor);
174 AssertPtrNull(pfnDtor);
[26452]175 AssertReturn(!pfnDtor || pfnCtor, VERR_INVALID_PARAMETER);
[26416]176 AssertReturn(cbObject > 0, VERR_INVALID_PARAMETER);
[100309]177
178 size_t const cbPage = RTSystemGetPageSize();
179 AssertReturn(cbObject <= cbPage / 8, VERR_INVALID_PARAMETER);
[26452]180 AssertReturn(!fFlags, VERR_INVALID_PARAMETER);
[26416]181
182 if (cbAlignment == 0)
183 {
[33278]184 if (cbObject <= 2)
185 cbAlignment = cbObject;
186 else if (cbObject <= 4)
187 cbAlignment = 4;
188 else if (cbObject <= 8)
189 cbAlignment = 8;
190 else if (cbObject <= 16)
191 cbAlignment = 16;
192 else if (cbObject <= 32)
193 cbAlignment = 32;
194 else
[26416]195 cbAlignment = 64;
196 }
197 else
198 {
199 AssertReturn(!((cbAlignment - 1) & cbAlignment), VERR_NOT_POWER_OF_TWO);
200 AssertReturn(cbAlignment <= 64, VERR_OUT_OF_RANGE);
201 }
202
203 /*
204 * Allocate and initialize the instance memory.
205 */
206 RTMEMCACHEINT *pThis = (RTMEMCACHEINT *)RTMemAlloc(sizeof(*pThis));
207 if (!pThis)
208 return VERR_NO_MEMORY;
209 int rc = RTCritSectInit(&pThis->CritSect);
210 if (RT_FAILURE(rc))
211 {
212 RTMemFree(pThis);
213 return rc;
214 }
215
216 pThis->u32Magic = RTMEMCACHE_MAGIC;
[26525]217 pThis->cbObject = (uint32_t)RT_ALIGN_Z(cbObject, cbAlignment);
218 pThis->cbAlignment = (uint32_t)cbAlignment;
[100309]219 pThis->cPerPage = (uint32_t)((cbPage - RT_ALIGN_Z(sizeof(RTMEMCACHEPAGE), cbAlignment)) / pThis->cbObject);
[33278]220 while ( RT_ALIGN_Z(sizeof(RTMEMCACHEPAGE), 8)
221 + pThis->cPerPage * pThis->cbObject
222 + RT_ALIGN(pThis->cPerPage, 64) / 8 * 2
[100309]223 > cbPage)
[26416]224 pThis->cPerPage--;
[26420]225 pThis->cBits = RT_ALIGN(pThis->cPerPage, 64);
[26416]226 pThis->cMax = cMaxObjects;
[26452]227 pThis->fUseFreeList = cbObject >= sizeof(RTMEMCACHEFREEOBJ)
228 && !pfnCtor
229 && !pfnDtor;
[26416]230 pThis->pPageHead = NULL;
[46257]231 pThis->ppPageNext = &pThis->pPageHead;
[26416]232 pThis->pfnCtor = pfnCtor;
233 pThis->pfnDtor = pfnDtor;
234 pThis->pvUser = pvUser;
[26452]235 pThis->cTotal = 0;
236 pThis->cFree = 0;
237 pThis->pPageHint = NULL;
238 pThis->pFreeTop = NULL;
[26416]239
240 *phMemCache = pThis;
241 return VINF_SUCCESS;
242}
243
244
245RTDECL(int) RTMemCacheDestroy(RTMEMCACHE hMemCache)
246{
247 RTMEMCACHEINT *pThis = hMemCache;
248 if (!pThis)
249 return VINF_SUCCESS;
250 AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
251 AssertReturn(pThis->u32Magic == RTMEMCACHE_MAGIC, VERR_INVALID_HANDLE);
[46257]252
253#if 0 /*def RT_STRICT - don't require eveything to be freed. Caches are very convenient for lazy cleanup. */
[26452]254 uint32_t cFree = pThis->cFree;
[33278]255 for (PRTMEMCACHEFREEOBJ pFree = pThis->pFreeTop; pFree && cFree < pThis->cTotal + 5; pFree = pFree->pNext)
[26452]256 cFree++;
257 AssertMsg(cFree == pThis->cTotal, ("cFree=%u cTotal=%u\n", cFree, pThis->cTotal));
258#endif
[26416]259
260 /*
261 * Destroy it.
262 */
263 AssertReturn(ASMAtomicCmpXchgU32(&pThis->u32Magic, RTMEMCACHE_MAGIC_DEAD, RTMEMCACHE_MAGIC), VERR_INVALID_HANDLE);
264 RTCritSectDelete(&pThis->CritSect);
265
266 while (pThis->pPageHead)
267 {
268 PRTMEMCACHEPAGE pPage = pThis->pPageHead;
269 pThis->pPageHead = pPage->pNext;
270 pPage->cFree = 0;
271
272 if (pThis->pfnDtor)
273 {
274 uint32_t iObj = pPage->cObjects;
275 while (iObj-- > 0)
276 if (ASMBitTestAndClear(pPage->pbmCtor, iObj))
[26419]277 pThis->pfnDtor(hMemCache, pPage->pbObjects + iObj * pThis->cbObject, pThis->pvUser);
[26416]278 }
279
[100309]280 RTMemPageFree(pPage, RTSystemGetPageSize());
[26416]281 }
282
283 RTMemFree(pThis);
284 return VINF_SUCCESS;
285}
286
287
288/**
289 * Grows the cache.
290 *
291 * @returns IPRT status code.
292 * @param pThis The memory cache instance.
293 */
294static int rtMemCacheGrow(RTMEMCACHEINT *pThis)
295{
296 /*
297 * Enter the critical section here to avoid allocation races leading to
298 * wasted memory (++) and make it easier to link in the new page.
299 */
300 RTCritSectEnter(&pThis->CritSect);
301 int rc = VINF_SUCCESS;
302 if (pThis->cFree < 0)
303 {
304 /*
305 * Allocate and initialize the new page.
[26420]306 *
307 * We put the constructor bitmap at the lower end right after cFree.
308 * We then push the object array to the end of the page and place the
309 * allocation bitmap below it. The hope is to increase the chance that
310 * the allocation bitmap is in a different cache line than cFree since
311 * this increases performance markably when lots of threads are beating
312 * on the cache.
[26416]313 */
[100309]314 size_t const cbPage = RTSystemGetPageSize();
[103005]315 PRTMEMCACHEPAGE pPage = (PRTMEMCACHEPAGE)RTMemPageAllocZ(cbPage);
[26419]316 if (pPage)
[26416]317 {
[26419]318 uint32_t const cObjects = RT_MIN(pThis->cPerPage, pThis->cMax - pThis->cTotal);
[26416]319
320 pPage->pCache = pThis;
321 pPage->pNext = NULL;
322 pPage->cFree = cObjects;
[26419]323 pPage->cObjects = cObjects;
[26420]324 uint8_t *pb = (uint8_t *)(pPage + 1);
325 pb = RT_ALIGN_PT(pb, 8, uint8_t *);
326 pPage->pbmCtor = pb;
[100309]327 pb = (uint8_t *)pPage + cbPage - pThis->cbObject * cObjects;
[26420]328 pPage->pbObjects = pb; Assert(RT_ALIGN_P(pb, pThis->cbAlignment) == pb);
329 pb -= pThis->cBits / 8;
330 pb = (uint8_t *)((uintptr_t)pb & ~(uintptr_t)7);
331 pPage->pbmAlloc = pb;
[26452]332 Assert((uintptr_t)pPage->pbmCtor + pThis->cBits / 8 <= (uintptr_t)pPage->pbmAlloc);
[26416]333
334 /* Mark the bitmap padding and any unused objects as allocated. */
335 for (uint32_t iBit = cObjects; iBit < pThis->cBits; iBit++)
336 ASMBitSet(pPage->pbmAlloc, iBit);
337
338 /* Make it the hint. */
[30111]339 ASMAtomicWritePtr(&pThis->pPageHint, pPage);
[26416]340
[46257]341 /* Link the page in at the end of the list. */
342 ASMAtomicWritePtr(pThis->ppPageNext, pPage);
343 pThis->ppPageNext = &pPage->pNext;
[26416]344
345 /* Add it to the page counts. */
346 ASMAtomicAddS32(&pThis->cFree, cObjects);
347 ASMAtomicAddU32(&pThis->cTotal, cObjects);
348 }
349 else
350 rc = VERR_NO_MEMORY;
351 }
352 RTCritSectLeave(&pThis->CritSect);
353 return rc;
354}
355
356
357/**
358 * Grabs a an object in a page.
359 * @returns New cFree value on success (0 or higher), -1 on failure.
360 * @param pPage Pointer to the page.
361 */
362DECL_FORCE_INLINE(int32_t) rtMemCacheGrabObj(PRTMEMCACHEPAGE pPage)
363{
[55256]364 if (ASMAtomicUoReadS32(&pPage->cFree) > 0)
[26416]365 {
[55256]366 int32_t cFreeNew = ASMAtomicDecS32(&pPage->cFree);
367 if (cFreeNew >= 0)
368 return cFreeNew;
[26416]369 ASMAtomicIncS32(&pPage->cFree);
370 }
[55256]371 return -1;
[26416]372}
373
374
375RTDECL(int) RTMemCacheAllocEx(RTMEMCACHE hMemCache, void **ppvObj)
376{
377 RTMEMCACHEINT *pThis = hMemCache;
[26417]378 AssertPtrReturn(pThis, VERR_INVALID_PARAMETER);
[26418]379 AssertReturn(pThis->u32Magic == RTMEMCACHE_MAGIC, VERR_INVALID_PARAMETER);
[26416]380
381 /*
[26452]382 * Try grab a free object from the stack.
383 */
[30111]384 PRTMEMCACHEFREEOBJ pObj = ASMAtomicUoReadPtrT(&pThis->pFreeTop, PRTMEMCACHEFREEOBJ);
[26452]385 if (pObj)
386 {
[55252]387 pObj = ASMAtomicXchgPtrT(&pThis->pFreeTop, NULL, PRTMEMCACHEFREEOBJ);
388 if (pObj)
[26452]389 {
[55252]390 if (pObj->pNext)
[26452]391 {
[55252]392 Assert(pObj->pNext != pObj);
393 PRTMEMCACHEFREEOBJ pAllocRace = ASMAtomicXchgPtrT(&pThis->pFreeTop, pObj->pNext, PRTMEMCACHEFREEOBJ);
394 if (pAllocRace)
395 rtMemCacheFreeList(pThis, pAllocRace);
[26452]396 }
[55252]397
398 pObj->pNext = NULL;
399 *ppvObj = pObj;
400 return VINF_SUCCESS;
401 }
[26452]402 }
403
404 /*
[26416]405 * Try grab a free object at the cache level.
406 */
407 int32_t cNewFree = ASMAtomicDecS32(&pThis->cFree);
[26420]408 if (RT_LIKELY(cNewFree < 0))
[26416]409 {
410 uint32_t cTotal = ASMAtomicUoReadU32(&pThis->cTotal);
411 if ( (uint32_t)(cTotal + -cNewFree) > pThis->cMax
412 || (uint32_t)(cTotal + -cNewFree) <= cTotal)
413 {
414 ASMAtomicIncS32(&pThis->cFree);
[26419]415 return VERR_MEM_CACHE_MAX_SIZE;
[26416]416 }
417
418 int rc = rtMemCacheGrow(pThis);
419 if (RT_FAILURE(rc))
420 {
421 ASMAtomicIncS32(&pThis->cFree);
422 return rc;
423 }
424 }
425
426 /*
427 * Grab a free object at the page level.
428 */
[55256]429 PRTMEMCACHEPAGE pPage = ASMAtomicUoReadPtrT(&pThis->pPageHint, PRTMEMCACHEPAGE);
[26416]430 int32_t iObj = pPage ? rtMemCacheGrabObj(pPage) : -1;
431 if (iObj < 0)
432 {
433 for (unsigned cLoops = 0; ; cLoops++)
434 {
435 for (pPage = pThis->pPageHead; pPage; pPage = pPage->pNext)
436 {
437 iObj = rtMemCacheGrabObj(pPage);
438 if (iObj >= 0)
439 {
440 if (iObj > 0)
[30111]441 ASMAtomicWritePtr(&pThis->pPageHint, pPage);
[26416]442 break;
443 }
444 }
445 if (iObj >= 0)
446 break;
447 Assert(cLoops != 2);
448 Assert(cLoops < 10);
449 }
450 }
451 Assert(iObj >= 0);
452 Assert((uint32_t)iObj < pThis->cMax);
453
454 /*
455 * Find a free object in the allocation bitmap. Use the new cFree count
456 * as a hint.
457 */
458 if (ASMAtomicBitTestAndSet(pPage->pbmAlloc, iObj))
459 {
460 for (unsigned cLoops2 = 0;; cLoops2++)
461 {
462 iObj = ASMBitFirstClear(pPage->pbmAlloc, pThis->cBits);
[26420]463 if (RT_LIKELY(iObj >= 0))
464 {
465 if (!ASMAtomicBitTestAndSet(pPage->pbmAlloc, iObj))
466 break;
467 }
468 else
[26416]469 ASMMemoryFence();
[26419]470 Assert(cLoops2 != 40);
[26416]471 }
472 Assert(iObj >= 0);
473 }
474 void *pvObj = &pPage->pbObjects[iObj * pThis->cbObject];
[100309]475 Assert((uintptr_t)pvObj - (uintptr_t)pPage < RTSystemGetPageSize());
[26416]476
477 /*
478 * Call the constructor?
479 */
480 if ( pThis->pfnCtor
481 && !ASMAtomicBitTestAndSet(pPage->pbmCtor, iObj))
482 {
483 int rc = pThis->pfnCtor(hMemCache, pvObj, pThis->pvUser);
484 if (RT_FAILURE(rc))
485 {
486 ASMAtomicBitClear(pPage->pbmCtor, iObj);
487 RTMemCacheFree(pThis, pvObj);
488 return rc;
489 }
490 }
491
[26419]492 *ppvObj = pvObj;
[26416]493 return VINF_SUCCESS;
494}
495
496
497RTDECL(void *) RTMemCacheAlloc(RTMEMCACHE hMemCache)
498{
499 void *pvObj;
500 int rc = RTMemCacheAllocEx(hMemCache, &pvObj);
501 if (RT_SUCCESS(rc))
502 return pvObj;
503 return NULL;
504}
505
506
[55252]507
508/**
509 * Really frees one object.
510 *
511 * @param pThis The memory cache.
512 * @param pvObj The memory object to free.
513 */
514static void rtMemCacheFreeOne(RTMEMCACHEINT *pThis, void *pvObj)
515{
516 /* Note: Do *NOT* attempt to poison the object! */
517
518 /*
519 * Find the cache page. The page structure is at the start of the page.
520 */
[100309]521 PRTMEMCACHEPAGE pPage = (PRTMEMCACHEPAGE)(((uintptr_t)pvObj) & ~RTSystemGetPageOffsetMask());
[55252]522 Assert(pPage->pCache == pThis);
523 Assert(ASMAtomicUoReadS32(&pPage->cFree) < (int32_t)pThis->cPerPage);
524
525 /*
526 * Clear the bitmap bit and update the two object counter. Order matters!
527 */
528 uintptr_t offObj = (uintptr_t)pvObj - (uintptr_t)pPage->pbObjects;
529 uintptr_t iObj = offObj / pThis->cbObject;
530 Assert(iObj * pThis->cbObject == offObj);
531 Assert(iObj < pThis->cPerPage);
532 AssertReturnVoid(ASMAtomicBitTestAndClear(pPage->pbmAlloc, iObj));
533
534 ASMAtomicIncS32(&pPage->cFree);
535 ASMAtomicIncS32(&pThis->cFree);
536}
537
538
539/**
540 * Really frees a list of 'freed' object.
541 *
542 * @param pThis The memory cache.
543 * @param pHead The head of the list.
544 */
545static void rtMemCacheFreeList(RTMEMCACHEINT *pThis, PRTMEMCACHEFREEOBJ pHead)
546{
547 while (pHead)
548 {
549 PRTMEMCACHEFREEOBJ pFreeMe = pHead;
550 pHead = pHead->pNext;
551 pFreeMe->pNext = NULL;
552 ASMCompilerBarrier();
553 rtMemCacheFreeOne(pThis, pFreeMe);
554 }
555}
556
557
558
[26416]559RTDECL(void) RTMemCacheFree(RTMEMCACHE hMemCache, void *pvObj)
560{
561 if (!pvObj)
562 return;
563
564 RTMEMCACHEINT *pThis = hMemCache;
565 AssertPtrReturnVoid(pThis);
566 AssertReturnVoid(pThis->u32Magic == RTMEMCACHE_MAGIC);
567
568 AssertPtr(pvObj);
569 Assert(RT_ALIGN_P(pvObj, pThis->cbAlignment) == pvObj);
570
[55252]571 if (!pThis->fUseFreeList)
572 rtMemCacheFreeOne(pThis, pvObj);
573 else
[26452]574 {
575# ifdef RT_STRICT
576 /* This is the same as the other branch, except it's not actually freed. */
[100309]577 PRTMEMCACHEPAGE pPage = (PRTMEMCACHEPAGE)(((uintptr_t)pvObj) & ~RTSystemGetPageOffsetMask());
[26452]578 Assert(pPage->pCache == pThis);
579 Assert(ASMAtomicUoReadS32(&pPage->cFree) < (int32_t)pThis->cPerPage);
580 uintptr_t offObj = (uintptr_t)pvObj - (uintptr_t)pPage->pbObjects;
581 uintptr_t iObj = offObj / pThis->cbObject;
582 Assert(iObj * pThis->cbObject == offObj);
583 Assert(iObj < pThis->cPerPage);
[34507]584 AssertReturnVoid(ASMBitTest(pPage->pbmAlloc, (int32_t)iObj));
[26452]585# endif
[26416]586
[26452]587 /*
588 * Push it onto the free stack.
589 */
590 PRTMEMCACHEFREEOBJ pObj = (PRTMEMCACHEFREEOBJ)pvObj;
[55252]591 pObj->pNext = ASMAtomicXchgPtrT(&pThis->pFreeTop, NULL, PRTMEMCACHEFREEOBJ);
592 PRTMEMCACHEFREEOBJ pFreeRace = ASMAtomicXchgPtrT(&pThis->pFreeTop, pObj, PRTMEMCACHEFREEOBJ);
593 if (pFreeRace)
594 rtMemCacheFreeList(pThis, pFreeRace);
[26452]595 }
[26416]596}
597
Note: See TracBrowser for help on using the repository browser.

© 2023 Oracle
ContactPrivacy policyTerms of Use