VirtualBox

source: vbox/trunk/src/VBox/Runtime/common/string/strcache.cpp

Last change on this file was 98103, checked in by vboxsync, 16 months ago

Copyright year updates by scm.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 42.3 KB
Line 
1/* $Id: strcache.cpp 98103 2023-01-17 14:15:46Z vboxsync $ */
2/** @file
3 * IPRT - String Cache.
4 */
5
6/*
7 * Copyright (C) 2009-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * The contents of this file may alternatively be used under the terms
26 * of the Common Development and Distribution License Version 1.0
27 * (CDDL), a copy of it is provided in the "COPYING.CDDL" file included
28 * in the VirtualBox distribution, in which case the provisions of the
29 * CDDL are applicable instead of those of the GPL.
30 *
31 * You may elect to license modified versions of this file under the
32 * terms and conditions of either the GPL or the CDDL or both.
33 *
34 * SPDX-License-Identifier: GPL-3.0-only OR CDDL-1.0
35 */
36
37
38/*********************************************************************************************************************************
39* Header Files *
40*********************************************************************************************************************************/
41#include <iprt/strcache.h>
42#include "internal/iprt.h"
43
44#include <iprt/alloca.h>
45#include <iprt/asm.h>
46#include <iprt/assert.h>
47#include <iprt/critsect.h>
48#include <iprt/errcore.h>
49#include <iprt/list.h>
50#include <iprt/mem.h>
51#include <iprt/once.h>
52#include <iprt/param.h>
53#include <iprt/string.h>
54
55#include "internal/strhash.h"
56#include "internal/magics.h"
57
58
59/*********************************************************************************************************************************
60* Defined Constants And Macros *
61*********************************************************************************************************************************/
62/** Special NIL pointer for the hash table. It differs from NULL in that it is
63 * a valid hash table entry when doing a lookup. */
64#define PRTSTRCACHEENTRY_NIL ((PRTSTRCACHEENTRY)~(uintptr_t)1)
65
66/** Calcuates the increment when handling a collision.
67 * The current formula makes sure it's always odd so we cannot possibly end
68 * up a cyclic loop with an even sized table. It also takes more bits from
69 * the length part. */
70#define RTSTRCACHE_COLLISION_INCR(uHashLen) ( ((uHashLen >> 8) | 1) )
71
72/** The initial hash table size. Must be power of two. */
73#define RTSTRCACHE_INITIAL_HASH_SIZE 512
74/** The hash table growth factor. */
75#define RTSTRCACHE_HASH_GROW_FACTOR 4
76
77/**
78 * The RTSTRCACHEENTRY size threshold at which we stop using our own allocator
79 * and switch to the application heap, expressed as a power of two.
80 *
81 * Using a 1KB as a reasonable limit here.
82 */
83#ifdef RTSTRCACHE_WITH_MERGED_ALLOCATOR
84# define RTSTRCACHE_HEAP_THRESHOLD_BIT 10
85#else
86# define RTSTRCACHE_HEAP_THRESHOLD_BIT 9
87#endif
88/** The RTSTRCACHE_HEAP_THRESHOLD_BIT as a byte limit. */
89#define RTSTRCACHE_HEAP_THRESHOLD RT_BIT_32(RTSTRCACHE_HEAP_THRESHOLD_BIT)
90/** Big (heap) entry size alignment. */
91#define RTSTRCACHE_HEAP_ENTRY_SIZE_ALIGN 16
92
93#ifdef RTSTRCACHE_WITH_MERGED_ALLOCATOR
94/**
95 * The RTSTRCACHEENTRY size threshold at which we start using the merge free
96 * list for allocations, expressed as a power of two.
97 */
98# define RTSTRCACHE_MERGED_THRESHOLD_BIT 6
99
100/** The number of bytes (power of two) that the merged allocation lists should
101 * be grown by. Must be much greater than RTSTRCACHE_MERGED_THRESHOLD. */
102# define RTSTRCACHE_MERGED_GROW_SIZE _32K
103#endif
104
105/** The number of bytes (power of two) that the fixed allocation lists should
106 * be grown by. */
107#define RTSTRCACHE_FIXED_GROW_SIZE _32K
108
109/** The number of fixed sized lists. */
110#define RTSTRCACHE_NUM_OF_FIXED_SIZES 12
111
112
113/** Validates a string cache handle, translating RTSTRCACHE_DEFAULT when found,
114 * and returns rc if not valid. */
115#define RTSTRCACHE_VALID_RETURN_RC(pStrCache, rc) \
116 do { \
117 if ((pStrCache) == RTSTRCACHE_DEFAULT) \
118 { \
119 int rcOnce = RTOnce(&g_rtStrCacheOnce, rtStrCacheInitDefault, NULL); \
120 if (RT_FAILURE(rcOnce)) \
121 return (rc); \
122 (pStrCache) = g_hrtStrCacheDefault; \
123 } \
124 else \
125 { \
126 AssertPtrReturn((pStrCache), (rc)); \
127 AssertReturn((pStrCache)->u32Magic == RTSTRCACHE_MAGIC, (rc)); \
128 } \
129 } while (0)
130
131
132
133/*********************************************************************************************************************************
134* Structures and Typedefs *
135*********************************************************************************************************************************/
136/**
137 * String cache entry.
138 */
139typedef struct RTSTRCACHEENTRY
140{
141 /** The number of references. */
142 uint32_t volatile cRefs;
143 /** The lower 16-bit hash value. */
144 uint16_t uHash;
145 /** The string length (excluding the terminator).
146 * If this is set to RTSTRCACHEENTRY_BIG_LEN, this is a BIG entry
147 * (RTSTRCACHEBIGENTRY). */
148 uint16_t cchString;
149 /** The string. */
150 char szString[8];
151} RTSTRCACHEENTRY;
152AssertCompileSize(RTSTRCACHEENTRY, 16);
153/** Pointer to a string cache entry. */
154typedef RTSTRCACHEENTRY *PRTSTRCACHEENTRY;
155/** Pointer to a const string cache entry. */
156typedef RTSTRCACHEENTRY *PCRTSTRCACHEENTRY;
157
158/** RTSTCACHEENTRY::cchString value for big cache entries. */
159#define RTSTRCACHEENTRY_BIG_LEN UINT16_MAX
160
161/**
162 * Big string cache entry.
163 *
164 * These are allocated individually from the application heap.
165 */
166typedef struct RTSTRCACHEBIGENTRY
167{
168 /** List entry. */
169 RTLISTNODE ListEntry;
170 /** The string length. */
171 uint32_t cchString;
172 /** The full hash value / padding. */
173 uint32_t uHash;
174 /** The core entry. */
175 RTSTRCACHEENTRY Core;
176} RTSTRCACHEBIGENTRY;
177AssertCompileSize(RTSTRCACHEENTRY, 16);
178/** Pointer to a big string cache entry. */
179typedef RTSTRCACHEBIGENTRY *PRTSTRCACHEBIGENTRY;
180/** Pointer to a const big string cache entry. */
181typedef RTSTRCACHEBIGENTRY *PCRTSTRCACHEBIGENTRY;
182
183
184/**
185 * A free string cache entry.
186 */
187typedef struct RTSTRCACHEFREE
188{
189 /** Zero value indicating that it's a free entry (no refs, no hash). */
190 uint32_t uZero;
191 /** Number of free bytes. Only used for > 32 byte allocations. */
192 uint32_t cbFree;
193 /** Pointer to the next free item. */
194 struct RTSTRCACHEFREE *pNext;
195} RTSTRCACHEFREE;
196AssertCompileSize(RTSTRCACHEENTRY, 16);
197AssertCompileMembersAtSameOffset(RTSTRCACHEENTRY, cRefs, RTSTRCACHEFREE, uZero);
198AssertCompileMembersAtSameOffset(RTSTRCACHEENTRY, szString, RTSTRCACHEFREE, pNext);
199/** Pointer to a free string cache entry. */
200typedef RTSTRCACHEFREE *PRTSTRCACHEFREE;
201
202#ifdef RTSTRCACHE_WITH_MERGED_ALLOCATOR
203
204/**
205 * A free string cache entry with merging.
206 *
207 * This differs from RTSTRCACHEFREE only in having a back pointer for more
208 * efficient list management (doubly vs. singly linked lists).
209 */
210typedef struct RTSTRCACHEFREEMERGE
211{
212 /** Marker that indicates what kind of entry this is, either . */
213 uint32_t uMarker;
214 /** Number of free bytes. Only used for > 32 byte allocations. */
215 uint32_t cbFree;
216 /** Pointer to the main node. NULL for main nodes. */
217 struct RTSTRCACHEFREEMERGE *pMain;
218 /** The free list entry. */
219 RTLISTNODE ListEntry;
220 /** Pads the size up to the minimum allocation unit for the merge list.
221 * This both defines the minimum allocation unit and simplifies pointer
222 * manipulation during merging and splitting. */
223 uint8_t abPadding[ARCH_BITS == 32 ? 44 : 32];
224} RTSTRCACHEFREEMERGE;
225AssertCompileSize(RTSTRCACHEFREEMERGE, RT_BIT_32(RTSTRCACHE_MERGED_THRESHOLD_BIT));
226/** Pointer to a free cache string in the merge list. */
227typedef RTSTRCACHEFREEMERGE *PRTSTRCACHEFREEMERGE;
228
229/** RTSTRCACHEFREEMERGE::uMarker value indicating that it's the real free chunk
230 * header. Must be something that's invalid UTF-8 for both little and big
231 * endian system. */
232# define RTSTRCACHEFREEMERGE_MAIN UINT32_C(0xfffffff1)
233/** RTSTRCACHEFREEMERGE::uMarker value indicating that it's part of a larger
234 * chunk of free memory. Must be something that's invalid UTF-8 for both little
235 * and big endian system. */
236# define RTSTRCACHEFREEMERGE_PART UINT32_C(0xfffffff2)
237
238#endif /* RTSTRCACHE_WITH_MERGED_ALLOCATOR */
239
240/**
241 * Tracking structure chunk of memory used by the 16 byte or 32 byte
242 * allocations.
243 *
244 * This occupies the first entry in the chunk.
245 */
246typedef struct RTSTRCACHECHUNK
247{
248 /** The size of the chunk. */
249 size_t cb;
250 /** Pointer to the next chunk. */
251 struct RTSTRCACHECHUNK *pNext;
252} RTSTRCACHECHUNK;
253AssertCompile(sizeof(RTSTRCACHECHUNK) <= sizeof(RTSTRCACHEENTRY));
254/** Pointer to the chunk tracking structure. */
255typedef RTSTRCACHECHUNK *PRTSTRCACHECHUNK;
256
257
258/**
259 * Cache instance data.
260 */
261typedef struct RTSTRCACHEINT
262{
263 /** The string cache magic (RTSTRCACHE_MAGIC). */
264 uint32_t u32Magic;
265 /** Ref counter for the cache handle. */
266 uint32_t volatile cRefs;
267 /** The number of strings currently entered in the cache. */
268 uint32_t cStrings;
269 /** The size of the hash table. */
270 uint32_t cHashTab;
271 /** Pointer to the hash table. */
272 PRTSTRCACHEENTRY *papHashTab;
273 /** Free list for allocations of the sizes defined by g_acbFixedLists. */
274 PRTSTRCACHEFREE apFreeLists[RTSTRCACHE_NUM_OF_FIXED_SIZES];
275#ifdef RTSTRCACHE_WITH_MERGED_ALLOCATOR
276 /** Free lists based on */
277 RTLISTANCHOR aMergedFreeLists[RTSTRCACHE_HEAP_THRESHOLD_BIT - RTSTRCACHE_MERGED_THRESHOLD_BIT + 1];
278#endif
279 /** List of allocated memory chunks. */
280 PRTSTRCACHECHUNK pChunkList;
281 /** List of big cache entries. */
282 RTLISTANCHOR BigEntryList;
283
284 /** @name Statistics
285 * @{ */
286 /** The total size of all chunks. */
287 size_t cbChunks;
288 /** The total length of all the strings, terminators included. */
289 size_t cbStrings;
290 /** The total size of all the big entries. */
291 size_t cbBigEntries;
292 /** Hash collisions. */
293 uint32_t cHashCollisions;
294 /** Secondary hash collisions. */
295 uint32_t cHashCollisions2;
296 /** The number of inserts to compare cHashCollisions to. */
297 uint32_t cHashInserts;
298 /** The number of rehashes. */
299 uint32_t cRehashes;
300 /** @} */
301
302 /** Critical section protecting the cache structures. */
303 RTCRITSECT CritSect;
304} RTSTRCACHEINT;
305/** Pointer to a cache instance. */
306typedef RTSTRCACHEINT *PRTSTRCACHEINT;
307
308
309
310/*********************************************************************************************************************************
311* Global Variables *
312*********************************************************************************************************************************/
313/** The entry sizes of the fixed lists (RTSTRCACHEINT::apFreeLists). */
314static const uint32_t g_acbFixedLists[RTSTRCACHE_NUM_OF_FIXED_SIZES] =
315{
316 16, 32, 48, 64, 96, 128, 192, 256, 320, 384, 448, 512
317};
318
319/** Init once for the default string cache. */
320static RTONCE g_rtStrCacheOnce = RTONCE_INITIALIZER;
321/** The default string cache. */
322static RTSTRCACHE g_hrtStrCacheDefault = NIL_RTSTRCACHE;
323
324
325/** @callback_method_impl{FNRTONCE, Initializes g_hrtStrCacheDefault} */
326static DECLCALLBACK(int) rtStrCacheInitDefault(void *pvUser)
327{
328 NOREF(pvUser);
329 return RTStrCacheCreate(&g_hrtStrCacheDefault, "Default");
330}
331
332
333RTDECL(int) RTStrCacheCreate(PRTSTRCACHE phStrCache, const char *pszName)
334{
335 int rc = VERR_NO_MEMORY;
336 PRTSTRCACHEINT pThis = (PRTSTRCACHEINT)RTMemAllocZ(sizeof(*pThis));
337 if (pThis)
338 {
339 pThis->cHashTab = RTSTRCACHE_INITIAL_HASH_SIZE;
340 pThis->papHashTab = (PRTSTRCACHEENTRY*)RTMemAllocZ(sizeof(pThis->papHashTab[0]) * pThis->cHashTab);
341 if (pThis->papHashTab)
342 {
343 rc = RTCritSectInit(&pThis->CritSect);
344 if (RT_SUCCESS(rc))
345 {
346 RTListInit(&pThis->BigEntryList);
347#ifdef RTSTRCACHE_WITH_MERGED_ALLOCATOR
348 for (uint32_t i = 0; i < RT_ELEMENTS(pThis->aMergedFreeLists); i++)
349 RTListInit(&pThis->aMergedFreeLists[i]);
350#endif
351 pThis->cRefs = 1;
352 pThis->u32Magic = RTSTRCACHE_MAGIC;
353
354 *phStrCache = pThis;
355 return VINF_SUCCESS;
356 }
357 RTMemFree(pThis->papHashTab);
358 }
359 RTMemFree(pThis);
360 }
361
362 RT_NOREF_PV(pszName);
363 return rc;
364}
365RT_EXPORT_SYMBOL(RTStrCacheCreate);
366
367
368RTDECL(int) RTStrCacheDestroy(RTSTRCACHE hStrCache)
369{
370 if ( hStrCache == NIL_RTSTRCACHE
371 || hStrCache == RTSTRCACHE_DEFAULT)
372 return VINF_SUCCESS;
373
374 PRTSTRCACHEINT pThis = hStrCache;
375 RTSTRCACHE_VALID_RETURN_RC(pThis, VERR_INVALID_HANDLE);
376
377 /*
378 * Invalidate it. Enter the crit sect just to be on the safe side.
379 */
380 AssertReturn(ASMAtomicCmpXchgU32(&pThis->u32Magic, RTSTRCACHE_MAGIC_DEAD, RTSTRCACHE_MAGIC), VERR_INVALID_HANDLE);
381 RTCritSectEnter(&pThis->CritSect);
382 Assert(pThis->cRefs == 1);
383
384 PRTSTRCACHECHUNK pChunk;
385 while ((pChunk = pThis->pChunkList) != NULL)
386 {
387 pThis->pChunkList = pChunk->pNext;
388 RTMemPageFree(pChunk, pChunk->cb);
389 }
390
391 RTMemFree(pThis->papHashTab);
392 pThis->papHashTab = NULL;
393 pThis->cHashTab = 0;
394
395 PRTSTRCACHEBIGENTRY pCur, pNext;
396 RTListForEachSafe(&pThis->BigEntryList, pCur, pNext, RTSTRCACHEBIGENTRY, ListEntry)
397 {
398 RTMemFree(pCur);
399 }
400
401 RTCritSectLeave(&pThis->CritSect);
402 RTCritSectDelete(&pThis->CritSect);
403
404 RTMemFree(pThis);
405 return VINF_SUCCESS;
406}
407RT_EXPORT_SYMBOL(RTStrCacheDestroy);
408
409
410/**
411 * Selects the fixed free list index for a given minimum entry size.
412 *
413 * @returns Free list index.
414 * @param cbMin Minimum entry size.
415 */
416DECLINLINE(uint32_t) rtStrCacheSelectFixedList(uint32_t cbMin)
417{
418 Assert(cbMin <= g_acbFixedLists[RT_ELEMENTS(g_acbFixedLists) - 1]);
419 unsigned i = 0;
420 while (cbMin > g_acbFixedLists[i])
421 i++;
422 return i;
423}
424
425
426#ifdef RT_STRICT
427# define RTSTRCACHE_CHECK(a_pThis) do { rtStrCacheCheck(pThis); } while (0)
428/**
429 * Internal cache check.
430 */
431static void rtStrCacheCheck(PRTSTRCACHEINT pThis)
432{
433# ifdef RTSTRCACHE_WITH_MERGED_ALLOCATOR
434 for (uint32_t i = 0; i < RT_ELEMENTS(pThis->aMergedFreeLists); i++)
435 {
436 PRTSTRCACHEFREEMERGE pFree;
437 RTListForEach(&pThis->aMergedFreeLists[i], pFree, RTSTRCACHEFREEMERGE, ListEntry)
438 {
439 Assert(pFree->uMarker == RTSTRCACHEFREEMERGE_MAIN);
440 Assert(pFree->cbFree > 0);
441 Assert(RT_ALIGN_32(pFree->cbFree, sizeof(*pFree)) == pFree->cbFree);
442 }
443 }
444# endif
445 RT_NOREF_PV(pThis);
446}
447#else
448# define RTSTRCACHE_CHECK(a_pThis) do { } while (0)
449#endif
450
451
452/**
453 * Finds the first empty hash table entry given a hash+length value.
454 *
455 * ASSUMES that the hash table isn't full.
456 *
457 * @returns Hash table index.
458 * @param pThis The string cache instance.
459 * @param uHashLen The hash + length (not RTSTRCACHEENTRY_BIG_LEN).
460 */
461static uint32_t rtStrCacheFindEmptyHashTabEntry(PRTSTRCACHEINT pThis, uint32_t uHashLen)
462{
463 uint32_t iHash = uHashLen % pThis->cHashTab;
464 for (;;)
465 {
466 PRTSTRCACHEENTRY pEntry = pThis->papHashTab[iHash];
467 if (pEntry == NULL || pEntry == PRTSTRCACHEENTRY_NIL)
468 return iHash;
469
470 /* Advance. */
471 iHash += RTSTRCACHE_COLLISION_INCR(uHashLen);
472 iHash %= pThis->cHashTab;
473 }
474}
475
476/**
477 * Grows the hash table.
478 *
479 * @returns vINF_SUCCESS or VERR_NO_MEMORY.
480 * @param pThis The string cache instance.
481 */
482static int rtStrCacheGrowHashTab(PRTSTRCACHEINT pThis)
483{
484 /*
485 * Allocate a new hash table two times the size of the old one.
486 */
487 uint32_t cNew = pThis->cHashTab * RTSTRCACHE_HASH_GROW_FACTOR;
488 PRTSTRCACHEENTRY *papNew = (PRTSTRCACHEENTRY *)RTMemAllocZ(sizeof(papNew[0]) * cNew);
489 if (papNew == NULL)
490 return VERR_NO_MEMORY;
491
492 /*
493 * Install the new table and move the items from the old table and into the new one.
494 */
495 PRTSTRCACHEENTRY *papOld = pThis->papHashTab;
496 uint32_t iOld = pThis->cHashTab;
497
498 pThis->papHashTab = papNew;
499 pThis->cHashTab = cNew;
500 pThis->cRehashes++;
501
502 while (iOld-- > 0)
503 {
504 PRTSTRCACHEENTRY pEntry = papOld[iOld];
505 if (pEntry != NULL && pEntry != PRTSTRCACHEENTRY_NIL)
506 {
507 uint32_t cchString = pEntry->cchString;
508 if (cchString == RTSTRCACHEENTRY_BIG_LEN)
509 cchString = RT_FROM_MEMBER(pEntry, RTSTRCACHEBIGENTRY, Core)->cchString;
510
511 uint32_t iHash = rtStrCacheFindEmptyHashTabEntry(pThis, RT_MAKE_U32(pEntry->uHash, cchString));
512 pThis->papHashTab[iHash] = pEntry;
513 }
514 }
515
516 /*
517 * Free the old hash table.
518 */
519 RTMemFree(papOld);
520 return VINF_SUCCESS;
521}
522
523#ifdef RTSTRCACHE_WITH_MERGED_ALLOCATOR
524
525/**
526 * Link/Relink into the free right list.
527 *
528 * @param pThis The string cache instance.
529 * @param pFree The free string entry.
530 */
531static void rtStrCacheRelinkMerged(PRTSTRCACHEINT pThis, PRTSTRCACHEFREEMERGE pFree)
532{
533 Assert(pFree->uMarker == RTSTRCACHEFREEMERGE_MAIN);
534 Assert(pFree->cbFree > 0);
535 Assert(RT_ALIGN_32(pFree->cbFree, sizeof(*pFree)) == pFree->cbFree);
536
537 if (!RTListIsEmpty(&pFree->ListEntry))
538 RTListNodeRemove(&pFree->ListEntry);
539
540 uint32_t iList = (ASMBitLastSetU32(pFree->cbFree) - 1) - RTSTRCACHE_MERGED_THRESHOLD_BIT;
541 if (iList >= RT_ELEMENTS(pThis->aMergedFreeLists))
542 iList = RT_ELEMENTS(pThis->aMergedFreeLists) - 1;
543
544 RTListPrepend(&pThis->aMergedFreeLists[iList], &pFree->ListEntry);
545}
546
547
548/**
549 * Allocate a cache entry from the merged free lists.
550 *
551 * @returns Pointer to the cache entry on success, NULL on allocation error.
552 * @param pThis The string cache instance.
553 * @param uHash The full hash of the string.
554 * @param pchString The string.
555 * @param cchString The string length.
556 * @param cbEntry The required entry size.
557 */
558static PRTSTRCACHEENTRY rtStrCacheAllocMergedEntry(PRTSTRCACHEINT pThis, uint32_t uHash,
559 const char *pchString, uint32_t cchString, uint32_t cbEntry)
560{
561 cbEntry = RT_ALIGN_32(cbEntry, sizeof(RTSTRCACHEFREEMERGE));
562 Assert(cbEntry > cchString);
563
564 /*
565 * Search the list heads first.
566 */
567 PRTSTRCACHEFREEMERGE pFree = NULL;
568
569 uint32_t iList = ASMBitLastSetU32(cbEntry) - 1;
570 if (!RT_IS_POWER_OF_TWO(cbEntry))
571 iList++;
572 iList -= RTSTRCACHE_MERGED_THRESHOLD_BIT;
573
574 while (iList < RT_ELEMENTS(pThis->aMergedFreeLists))
575 {
576 pFree = RTListGetFirst(&pThis->aMergedFreeLists[iList], RTSTRCACHEFREEMERGE, ListEntry);
577 if (pFree)
578 {
579 /*
580 * Found something. Should we we split it? We split from the end
581 * to avoid having to update all the sub entries.
582 */
583 Assert(pFree->uMarker == RTSTRCACHEFREEMERGE_MAIN);
584 Assert(pFree->cbFree >= cbEntry);
585 Assert(RT_ALIGN_32(pFree->cbFree, sizeof(*pFree)) == pFree->cbFree);
586
587 if (pFree->cbFree == cbEntry)
588 RTListNodeRemove(&pFree->ListEntry);
589 else
590 {
591 uint32_t cRemainder = (pFree->cbFree - cbEntry) / sizeof(*pFree);
592 PRTSTRCACHEFREEMERGE pRemainder = pFree;
593 pFree += cRemainder;
594
595 Assert((pRemainder->cbFree - cbEntry) == cRemainder * sizeof(*pFree));
596 pRemainder->cbFree = cRemainder * sizeof(*pFree);
597
598 rtStrCacheRelinkMerged(pThis, pRemainder);
599 }
600 break;
601 }
602 iList++;
603 }
604 if (!pFree)
605 {
606 /*
607 * Allocate a new block. (We could search the list below in some
608 * cases, but it's too much effort to write and execute).
609 */
610 size_t const cbChunk = RTSTRCACHE_MERGED_GROW_SIZE; AssertReturn(cbChunk > cbEntry * 2, NULL);
611 PRTSTRCACHECHUNK pChunk = (PRTSTRCACHECHUNK)RTMemPageAlloc(cbChunk);
612 if (!pChunk)
613 return NULL;
614 pChunk->cb = cbChunk;
615 pChunk->pNext = pThis->pChunkList;
616 pThis->pChunkList = pChunk;
617 pThis->cbChunks += cbChunk;
618 AssertCompile(sizeof(*pChunk) <= sizeof(*pFree));
619
620 /*
621 * Get one node for the allocation at hand.
622 */
623 pFree = (PRTSTRCACHEFREEMERGE)((uintptr_t)pChunk + sizeof(*pFree));
624
625 /*
626 * Create a free block out of the remainder (always a reminder).
627 */
628 PRTSTRCACHEFREEMERGE pNewFree = (PRTSTRCACHEFREEMERGE)((uintptr_t)pFree + cbEntry);
629 pNewFree->uMarker = RTSTRCACHEFREEMERGE_MAIN;
630 pNewFree->cbFree = cbChunk - sizeof(*pNewFree) - cbEntry; Assert(pNewFree->cbFree < cbChunk && pNewFree->cbFree > 0);
631 pNewFree->pMain = NULL;
632 RTListInit(&pNewFree->ListEntry);
633
634 uint32_t iInternalBlock = pNewFree->cbFree / sizeof(*pNewFree);
635 while (iInternalBlock-- > 1)
636 {
637 pNewFree[iInternalBlock].uMarker = RTSTRCACHEFREEMERGE_PART;
638 pNewFree[iInternalBlock].cbFree = 0;
639 pNewFree[iInternalBlock].pMain = pNewFree;
640 }
641
642 rtStrCacheRelinkMerged(pThis, pNewFree);
643 }
644
645 /*
646 * Initialize the entry. We zero all bytes we don't use so they cannot
647 * accidentally be mistaken for a free entry.
648 */
649 ASMCompilerBarrier();
650 PRTSTRCACHEENTRY pEntry = (PRTSTRCACHEENTRY)pFree;
651 pEntry->cRefs = 1;
652 pEntry->uHash = (uint16_t)uHash;
653 pEntry->cchString = (uint16_t)cchString;
654 memcpy(pEntry->szString, pchString, cchString);
655 RT_BZERO(&pEntry->szString[cchString], cbEntry - RT_UOFFSETOF(RTSTRCACHEENTRY, szString) - cchString);
656
657 RTSTRCACHE_CHECK(pThis);
658
659 return pEntry;
660}
661
662#endif /* RTSTRCACHE_WITH_MERGED_ALLOCATOR */
663
664/**
665 * Allocate a cache entry from the heap.
666 *
667 * @returns Pointer to the cache entry on success, NULL on allocation error.
668 * @param pThis The string cache instance.
669 * @param uHash The full hash of the string.
670 * @param pchString The string.
671 * @param cchString The string length.
672 */
673static PRTSTRCACHEENTRY rtStrCacheAllocHeapEntry(PRTSTRCACHEINT pThis, uint32_t uHash,
674 const char *pchString, uint32_t cchString)
675{
676 /*
677 * Allocate a heap block for storing the string. We do some size aligning
678 * here to encourage the heap to give us optimal alignment.
679 */
680 size_t cbEntry = RT_UOFFSETOF_DYN(RTSTRCACHEBIGENTRY, Core.szString[cchString + 1]);
681 PRTSTRCACHEBIGENTRY pBigEntry = (PRTSTRCACHEBIGENTRY)RTMemAlloc(RT_ALIGN_Z(cbEntry, RTSTRCACHE_HEAP_ENTRY_SIZE_ALIGN));
682 if (!pBigEntry)
683 return NULL;
684
685 /*
686 * Initialize the block.
687 */
688 RTListAppend(&pThis->BigEntryList, &pBigEntry->ListEntry);
689 pThis->cbBigEntries += cbEntry;
690 pBigEntry->cchString = cchString;
691 pBigEntry->uHash = uHash;
692 pBigEntry->Core.cRefs = 1;
693 pBigEntry->Core.uHash = (uint16_t)uHash;
694 pBigEntry->Core.cchString = RTSTRCACHEENTRY_BIG_LEN;
695 /* The following is to try avoid gcc warnings/errors regarding array bounds: */
696 char *pszDst = (char *)memcpy(pBigEntry->Core.szString, pchString, cchString);
697 pszDst[cchString] = '\0';
698 ASMCompilerBarrier();
699
700 return &pBigEntry->Core;
701}
702
703
704/**
705 * Allocate a cache entry from a fixed size free list.
706 *
707 * @returns Pointer to the cache entry on success, NULL on allocation error.
708 * @param pThis The string cache instance.
709 * @param uHash The full hash of the string.
710 * @param pchString The string.
711 * @param cchString The string length.
712 * @param iFreeList Which free list.
713 */
714static PRTSTRCACHEENTRY rtStrCacheAllocFixedEntry(PRTSTRCACHEINT pThis, uint32_t uHash,
715 const char *pchString, uint32_t cchString, uint32_t iFreeList)
716{
717 /*
718 * Get an entry from the free list. If empty, allocate another chunk of
719 * memory and split it up into free entries of the desired size.
720 */
721 PRTSTRCACHEFREE pFree = pThis->apFreeLists[iFreeList];
722 if (!pFree)
723 {
724 PRTSTRCACHECHUNK pChunk = (PRTSTRCACHECHUNK)RTMemPageAlloc(RTSTRCACHE_FIXED_GROW_SIZE);
725 if (!pChunk)
726 return NULL;
727 pChunk->cb = RTSTRCACHE_FIXED_GROW_SIZE;
728 pChunk->pNext = pThis->pChunkList;
729 pThis->pChunkList = pChunk;
730 pThis->cbChunks += RTSTRCACHE_FIXED_GROW_SIZE;
731
732 PRTSTRCACHEFREE pPrev = NULL;
733 uint32_t const cbEntry = g_acbFixedLists[iFreeList];
734 uint32_t cLeft = RTSTRCACHE_FIXED_GROW_SIZE / cbEntry - 1;
735 pFree = (PRTSTRCACHEFREE)((uintptr_t)pChunk + cbEntry);
736
737 Assert(sizeof(*pChunk) <= cbEntry);
738 Assert(sizeof(*pFree) <= cbEntry);
739 Assert(cbEntry < RTSTRCACHE_FIXED_GROW_SIZE / 16);
740
741 while (cLeft-- > 0)
742 {
743 pFree->uZero = 0;
744 pFree->cbFree = cbEntry;
745 pFree->pNext = pPrev;
746 pPrev = pFree;
747 pFree = (PRTSTRCACHEFREE)((uintptr_t)pFree + cbEntry);
748 }
749
750 Assert(pPrev);
751 pThis->apFreeLists[iFreeList] = pFree = pPrev;
752 }
753
754 /*
755 * Unlink it.
756 */
757 pThis->apFreeLists[iFreeList] = pFree->pNext;
758 ASMCompilerBarrier();
759
760 /*
761 * Initialize the entry.
762 */
763 PRTSTRCACHEENTRY pEntry = (PRTSTRCACHEENTRY)pFree;
764 pEntry->cRefs = 1;
765 pEntry->uHash = (uint16_t)uHash;
766 pEntry->cchString = (uint16_t)cchString;
767 memcpy(pEntry->szString, pchString, cchString);
768 pEntry->szString[cchString] = '\0';
769
770 return pEntry;
771}
772
773
774/**
775 * Looks up a string in the hash table.
776 *
777 * @returns Pointer to the string cache entry, NULL + piFreeHashTabEntry if not
778 * found.
779 * @param pThis The string cache instance.
780 * @param uHashLen The hash + length (not RTSTRCACHEENTRY_BIG_LEN).
781 * @param cchString The real length.
782 * @param pchString The string.
783 * @param piFreeHashTabEntry Where to store the index insertion index if NULL
784 * is returned (same as what
785 * rtStrCacheFindEmptyHashTabEntry would return).
786 * @param pcCollisions Where to return a collision counter.
787 */
788static PRTSTRCACHEENTRY rtStrCacheLookUp(PRTSTRCACHEINT pThis, uint32_t uHashLen, uint32_t cchString, const char *pchString,
789 uint32_t *piFreeHashTabEntry, uint32_t *pcCollisions)
790{
791 *piFreeHashTabEntry = UINT32_MAX;
792 *pcCollisions = 0;
793
794 uint16_t cchStringFirst = RT_UOFFSETOF_DYN(RTSTRCACHEENTRY, szString[cchString + 1]) < RTSTRCACHE_HEAP_THRESHOLD
795 ? (uint16_t)cchString : RTSTRCACHEENTRY_BIG_LEN;
796 uint32_t iHash = uHashLen % pThis->cHashTab;
797 for (;;)
798 {
799 PRTSTRCACHEENTRY pEntry = pThis->papHashTab[iHash];
800
801 /* Give up if NULL, but record the index for insertion. */
802 if (pEntry == NULL)
803 {
804 if (*piFreeHashTabEntry == UINT32_MAX)
805 *piFreeHashTabEntry = iHash;
806 return NULL;
807 }
808
809 if (pEntry != PRTSTRCACHEENTRY_NIL)
810 {
811 /* Compare. */
812 if ( pEntry->uHash == (uint16_t)uHashLen
813 && pEntry->cchString == cchStringFirst)
814 {
815 if (pEntry->cchString != RTSTRCACHEENTRY_BIG_LEN)
816 {
817 if ( !memcmp(pEntry->szString, pchString, cchString)
818 && pEntry->szString[cchString] == '\0')
819 return pEntry;
820 }
821 else
822 {
823 PRTSTRCACHEBIGENTRY pBigEntry = RT_FROM_MEMBER(pEntry, RTSTRCACHEBIGENTRY, Core);
824 if ( pBigEntry->cchString == cchString
825 && !memcmp(pBigEntry->Core.szString, pchString, cchString))
826 return &pBigEntry->Core;
827 }
828 }
829
830 if (*piFreeHashTabEntry == UINT32_MAX)
831 *pcCollisions += 1;
832 }
833 /* Record the first NIL index for insertion in case we don't get a hit. */
834 else if (*piFreeHashTabEntry == UINT32_MAX)
835 *piFreeHashTabEntry = iHash;
836
837 /* Advance. */
838 iHash += RTSTRCACHE_COLLISION_INCR(uHashLen);
839 iHash %= pThis->cHashTab;
840 }
841}
842
843
844RTDECL(const char *) RTStrCacheEnterN(RTSTRCACHE hStrCache, const char *pchString, size_t cchString)
845{
846 PRTSTRCACHEINT pThis = hStrCache;
847 RTSTRCACHE_VALID_RETURN_RC(pThis, NULL);
848
849
850 /*
851 * Calculate the hash and figure the exact string length, then look for an existing entry.
852 */
853 uint32_t const uHash = sdbmN(pchString, cchString, &cchString);
854 uint32_t const uHashLen = RT_MAKE_U32(uHash, cchString);
855 AssertReturn(cchString < _1G, NULL);
856 uint32_t const cchString32 = (uint32_t)cchString;
857
858 RTCritSectEnter(&pThis->CritSect);
859 RTSTRCACHE_CHECK(pThis);
860
861 uint32_t cCollisions;
862 uint32_t iFreeHashTabEntry;
863 PRTSTRCACHEENTRY pEntry = rtStrCacheLookUp(pThis, uHashLen, cchString32, pchString, &iFreeHashTabEntry, &cCollisions);
864 if (pEntry)
865 {
866 uint32_t cRefs = ASMAtomicIncU32(&pEntry->cRefs);
867 Assert(cRefs < UINT32_MAX / 2); NOREF(cRefs);
868 }
869 else
870 {
871 /*
872 * Allocate a new entry.
873 */
874 uint32_t cbEntry = cchString32 + 1U + RT_UOFFSETOF(RTSTRCACHEENTRY, szString);
875 if (cbEntry >= RTSTRCACHE_HEAP_THRESHOLD)
876 pEntry = rtStrCacheAllocHeapEntry(pThis, uHash, pchString, cchString32);
877#ifdef RTSTRCACHE_WITH_MERGED_ALLOCATOR
878 else if (cbEntry >= RT_BIT_32(RTSTRCACHE_MERGED_THRESHOLD_BIT))
879 pEntry = rtStrCacheAllocMergedEntry(pThis, uHash, pchString, cchString32, cbEntry);
880#endif
881 else
882 pEntry = rtStrCacheAllocFixedEntry(pThis, uHash, pchString, cchString32,
883 rtStrCacheSelectFixedList(cbEntry));
884 if (!pEntry)
885 {
886 RTSTRCACHE_CHECK(pThis);
887 RTCritSectLeave(&pThis->CritSect);
888 return NULL;
889 }
890
891 /*
892 * Insert it into the hash table.
893 */
894 if (pThis->cHashTab - pThis->cStrings < pThis->cHashTab / 2)
895 {
896 int rc = rtStrCacheGrowHashTab(pThis);
897 if (RT_SUCCESS(rc))
898 iFreeHashTabEntry = rtStrCacheFindEmptyHashTabEntry(pThis, uHashLen);
899 else if (pThis->cHashTab - pThis->cStrings <= pThis->cHashTab / 8) /* 12.5% full => error */
900 {
901 pThis->papHashTab[iFreeHashTabEntry] = pEntry;
902 pThis->cStrings++;
903 pThis->cHashInserts++;
904 pThis->cHashCollisions += cCollisions > 0;
905 pThis->cHashCollisions2 += cCollisions > 1;
906 pThis->cbStrings += cchString32 + 1;
907 RTStrCacheRelease(hStrCache, pEntry->szString);
908
909 RTSTRCACHE_CHECK(pThis);
910 RTCritSectLeave(&pThis->CritSect);
911 return NULL;
912 }
913 }
914
915 pThis->papHashTab[iFreeHashTabEntry] = pEntry;
916 pThis->cStrings++;
917 pThis->cHashInserts++;
918 pThis->cHashCollisions += cCollisions > 0;
919 pThis->cHashCollisions2 += cCollisions > 1;
920 pThis->cbStrings += cchString32 + 1;
921 Assert(pThis->cStrings < pThis->cHashTab && pThis->cStrings > 0);
922 }
923
924 RTSTRCACHE_CHECK(pThis);
925 RTCritSectLeave(&pThis->CritSect);
926 return pEntry->szString;
927}
928RT_EXPORT_SYMBOL(RTStrCacheEnterN);
929
930
931RTDECL(const char *) RTStrCacheEnter(RTSTRCACHE hStrCache, const char *psz)
932{
933 return RTStrCacheEnterN(hStrCache, psz, strlen(psz));
934}
935RT_EXPORT_SYMBOL(RTStrCacheEnter);
936
937
938static const char *rtStrCacheEnterLowerWorker(PRTSTRCACHEINT pThis, const char *pchString, size_t cchString)
939{
940 /*
941 * Try use a dynamic heap buffer first.
942 */
943 if (cchString < 512)
944 {
945 char *pszStackBuf = (char *)alloca(cchString + 1);
946 if (pszStackBuf)
947 {
948 memcpy(pszStackBuf, pchString, cchString);
949 pszStackBuf[cchString] = '\0';
950 RTStrToLower(pszStackBuf);
951 return RTStrCacheEnterN(pThis, pszStackBuf, cchString);
952 }
953 }
954
955 /*
956 * Fall back on heap.
957 */
958 char *pszHeapBuf = (char *)RTMemTmpAlloc(cchString + 1);
959 if (!pszHeapBuf)
960 return NULL;
961 memcpy(pszHeapBuf, pchString, cchString);
962 pszHeapBuf[cchString] = '\0';
963 RTStrToLower(pszHeapBuf);
964 const char *pszRet = RTStrCacheEnterN(pThis, pszHeapBuf, cchString);
965 RTMemTmpFree(pszHeapBuf);
966 return pszRet;
967}
968
969RTDECL(const char *) RTStrCacheEnterLowerN(RTSTRCACHE hStrCache, const char *pchString, size_t cchString)
970{
971 PRTSTRCACHEINT pThis = hStrCache;
972 RTSTRCACHE_VALID_RETURN_RC(pThis, NULL);
973 return rtStrCacheEnterLowerWorker(pThis, pchString, RTStrNLen(pchString, cchString));
974}
975RT_EXPORT_SYMBOL(RTStrCacheEnterLowerN);
976
977
978RTDECL(const char *) RTStrCacheEnterLower(RTSTRCACHE hStrCache, const char *psz)
979{
980 PRTSTRCACHEINT pThis = hStrCache;
981 RTSTRCACHE_VALID_RETURN_RC(pThis, NULL);
982 return rtStrCacheEnterLowerWorker(pThis, psz, strlen(psz));
983}
984RT_EXPORT_SYMBOL(RTStrCacheEnterLower);
985
986
987RTDECL(uint32_t) RTStrCacheRetain(const char *psz)
988{
989 AssertPtr(psz);
990
991 PRTSTRCACHEENTRY pStr = RT_FROM_MEMBER(psz, RTSTRCACHEENTRY, szString);
992 Assert(!((uintptr_t)pStr & 15) || pStr->cchString == RTSTRCACHEENTRY_BIG_LEN);
993
994 uint32_t cRefs = ASMAtomicIncU32(&pStr->cRefs);
995 Assert(cRefs > 1);
996 Assert(cRefs < UINT32_MAX / 2);
997
998 return cRefs;
999}
1000RT_EXPORT_SYMBOL(RTStrCacheRetain);
1001
1002
1003static uint32_t rtStrCacheFreeEntry(PRTSTRCACHEINT pThis, PRTSTRCACHEENTRY pStr)
1004{
1005 RTCritSectEnter(&pThis->CritSect);
1006 RTSTRCACHE_CHECK(pThis);
1007
1008 /* Remove it from the hash table. */
1009 uint32_t cchString = pStr->cchString == RTSTRCACHEENTRY_BIG_LEN
1010 ? RT_FROM_MEMBER(pStr, RTSTRCACHEBIGENTRY, Core)->cchString
1011 : pStr->cchString;
1012 uint32_t uHashLen = RT_MAKE_U32(pStr->uHash, cchString);
1013 uint32_t iHash = uHashLen % pThis->cHashTab;
1014 if (pThis->papHashTab[iHash] == pStr)
1015 pThis->papHashTab[iHash] = PRTSTRCACHEENTRY_NIL;
1016 else
1017 {
1018 do
1019 {
1020 AssertBreak(pThis->papHashTab[iHash] != NULL);
1021 iHash += RTSTRCACHE_COLLISION_INCR(uHashLen);
1022 iHash %= pThis->cHashTab;
1023 } while (pThis->papHashTab[iHash] != pStr);
1024 if (RT_LIKELY(pThis->papHashTab[iHash] == pStr))
1025 pThis->papHashTab[iHash] = PRTSTRCACHEENTRY_NIL;
1026 else
1027 {
1028 AssertFailed();
1029 iHash = pThis->cHashTab;
1030 while (iHash-- > 0)
1031 if (pThis->papHashTab[iHash] == pStr)
1032 break;
1033 AssertMsgFailed(("iHash=%u cHashTab=%u\n", iHash, pThis->cHashTab));
1034 }
1035 }
1036
1037 pThis->cStrings--;
1038 pThis->cbStrings -= cchString;
1039 Assert(pThis->cStrings < pThis->cHashTab);
1040
1041 /* Free it. */
1042 if (pStr->cchString != RTSTRCACHEENTRY_BIG_LEN)
1043 {
1044 uint32_t const cbMin = pStr->cchString + 1U + RT_UOFFSETOF(RTSTRCACHEENTRY, szString);
1045#ifdef RTSTRCACHE_WITH_MERGED_ALLOCATOR
1046 if (cbMin <= RTSTRCACHE_MAX_FIXED)
1047#endif
1048 {
1049 /*
1050 * No merging, just add it to the list.
1051 */
1052 uint32_t const iFreeList = rtStrCacheSelectFixedList(cbMin);
1053 ASMCompilerBarrier();
1054 PRTSTRCACHEFREE pFreeStr = (PRTSTRCACHEFREE)pStr;
1055 pFreeStr->cbFree = cbMin;
1056 pFreeStr->uZero = 0;
1057 pFreeStr->pNext = pThis->apFreeLists[iFreeList];
1058 pThis->apFreeLists[iFreeList] = pFreeStr;
1059 }
1060#ifdef RTSTRCACHE_WITH_MERGED_ALLOCATOR
1061 else
1062 {
1063 /*
1064 * Complicated mode, we merge with adjecent nodes.
1065 */
1066 ASMCompilerBarrier();
1067 PRTSTRCACHEFREEMERGE pFreeStr = (PRTSTRCACHEFREEMERGE)pStr;
1068 pFreeStr->cbFree = RT_ALIGN_32(cbMin, sizeof(*pFreeStr));
1069 pFreeStr->uMarker = RTSTRCACHEFREEMERGE_MAIN;
1070 pFreeStr->pMain = NULL;
1071 RTListInit(&pFreeStr->ListEntry);
1072
1073 /*
1074 * Merge with previous?
1075 * (Reading one block back is safe because there is always the
1076 * RTSTRCACHECHUNK structure at the head of each memory chunk.)
1077 */
1078 uint32_t cInternalBlocks = pFreeStr->cbFree / sizeof(*pFreeStr);
1079 PRTSTRCACHEFREEMERGE pMain = pFreeStr - 1;
1080 if ( pMain->uMarker == RTSTRCACHEFREEMERGE_MAIN
1081 || pMain->uMarker == RTSTRCACHEFREEMERGE_PART)
1082 {
1083 while (pMain->uMarker != RTSTRCACHEFREEMERGE_MAIN)
1084 pMain--;
1085 pMain->cbFree += pFreeStr->cbFree;
1086 }
1087 else
1088 {
1089 pMain = pFreeStr;
1090 pFreeStr++;
1091 cInternalBlocks--;
1092 }
1093
1094 /*
1095 * Mark internal blocks in the string we're freeing.
1096 */
1097 while (cInternalBlocks-- > 0)
1098 {
1099 pFreeStr->uMarker = RTSTRCACHEFREEMERGE_PART;
1100 pFreeStr->cbFree = 0;
1101 pFreeStr->pMain = pMain;
1102 RTListInit(&pFreeStr->ListEntry);
1103 pFreeStr++;
1104 }
1105
1106 /*
1107 * Merge with next? Limitation: We won't try cross page boundraries.
1108 * (pFreeStr points to the next first free enter after the string now.)
1109 */
1110 if ( PAGE_ADDRESS(pFreeStr) == PAGE_ADDRESS(&pFreeStr[-1])
1111 && pFreeStr->uMarker == RTSTRCACHEFREEMERGE_MAIN)
1112 {
1113 pMain->cbFree += pFreeStr->cbFree;
1114 cInternalBlocks = pFreeStr->cbFree / sizeof(*pFreeStr);
1115 Assert(cInternalBlocks > 0);
1116
1117 /* Update the main block we merge with. */
1118 pFreeStr->cbFree = 0;
1119 pFreeStr->uMarker = RTSTRCACHEFREEMERGE_PART;
1120 RTListNodeRemove(&pFreeStr->ListEntry);
1121 RTListInit(&pFreeStr->ListEntry);
1122
1123 /* Change the internal blocks we merged in. */
1124 cInternalBlocks--;
1125 while (cInternalBlocks-- > 0)
1126 {
1127 pFreeStr++;
1128 pFreeStr->pMain = pMain;
1129 Assert(pFreeStr->uMarker == RTSTRCACHEFREEMERGE_PART);
1130 Assert(!pFreeStr->cbFree);
1131 }
1132 }
1133
1134 /*
1135 * Add/relink into the appropriate free list.
1136 */
1137 rtStrCacheRelinkMerged(pThis, pMain);
1138 }
1139#endif /* RTSTRCACHE_WITH_MERGED_ALLOCATOR */
1140 RTSTRCACHE_CHECK(pThis);
1141 RTCritSectLeave(&pThis->CritSect);
1142 }
1143 else
1144 {
1145 /* Big string. */
1146 PRTSTRCACHEBIGENTRY pBigStr = RT_FROM_MEMBER(pStr, RTSTRCACHEBIGENTRY, Core);
1147 RTListNodeRemove(&pBigStr->ListEntry);
1148 pThis->cbBigEntries -= RT_ALIGN_32(RT_UOFFSETOF_DYN(RTSTRCACHEBIGENTRY, Core.szString[cchString + 1]),
1149 RTSTRCACHE_HEAP_ENTRY_SIZE_ALIGN);
1150
1151 RTSTRCACHE_CHECK(pThis);
1152 RTCritSectLeave(&pThis->CritSect);
1153
1154 RTMemFree(pBigStr);
1155 }
1156
1157 return 0;
1158}
1159
1160RTDECL(uint32_t) RTStrCacheRelease(RTSTRCACHE hStrCache, const char *psz)
1161{
1162 if (!psz)
1163 return 0;
1164
1165 PRTSTRCACHEINT pThis = hStrCache;
1166 RTSTRCACHE_VALID_RETURN_RC(pThis, UINT32_MAX);
1167
1168 AssertPtr(psz);
1169 PRTSTRCACHEENTRY pStr = RT_FROM_MEMBER(psz, RTSTRCACHEENTRY, szString);
1170 Assert(!((uintptr_t)pStr & 15) || pStr->cchString == RTSTRCACHEENTRY_BIG_LEN);
1171
1172 /*
1173 * Drop a reference and maybe free the entry.
1174 */
1175 uint32_t cRefs = ASMAtomicDecU32(&pStr->cRefs);
1176 Assert(cRefs < UINT32_MAX / 2);
1177 if (!cRefs)
1178 return rtStrCacheFreeEntry(pThis, pStr);
1179
1180 return cRefs;
1181}
1182RT_EXPORT_SYMBOL(RTStrCacheRelease);
1183
1184
1185RTDECL(size_t) RTStrCacheLength(const char *psz)
1186{
1187 if (!psz)
1188 return 0;
1189
1190 AssertPtr(psz);
1191 PRTSTRCACHEENTRY pStr = RT_FROM_MEMBER(psz, RTSTRCACHEENTRY, szString);
1192 if (pStr->cchString == RTSTRCACHEENTRY_BIG_LEN)
1193 {
1194 PRTSTRCACHEBIGENTRY pBigStr = RT_FROM_MEMBER(psz, RTSTRCACHEBIGENTRY, Core.szString);
1195 return pBigStr->cchString;
1196 }
1197 Assert(!((uintptr_t)pStr & 15));
1198 return pStr->cchString;
1199}
1200RT_EXPORT_SYMBOL(RTStrCacheLength);
1201
1202
1203RTDECL(bool) RTStrCacheIsRealImpl(void)
1204{
1205 return true;
1206}
1207RT_EXPORT_SYMBOL(RTStrCacheIsRealImpl);
1208
1209
1210RTDECL(uint32_t) RTStrCacheGetStats(RTSTRCACHE hStrCache, size_t *pcbStrings, size_t *pcbChunks, size_t *pcbBigEntries,
1211 uint32_t *pcHashCollisions, uint32_t *pcHashCollisions2, uint32_t *pcHashInserts,
1212 uint32_t *pcRehashes)
1213{
1214 PRTSTRCACHEINT pThis = hStrCache;
1215 RTSTRCACHE_VALID_RETURN_RC(pThis, UINT32_MAX);
1216
1217 RTCritSectEnter(&pThis->CritSect);
1218
1219 if (pcbStrings)
1220 *pcbStrings = pThis->cbStrings;
1221 if (pcbChunks)
1222 *pcbChunks = pThis->cbChunks;
1223 if (pcbBigEntries)
1224 *pcbBigEntries = pThis->cbBigEntries;
1225 if (pcHashCollisions)
1226 *pcHashCollisions = pThis->cHashCollisions;
1227 if (pcHashCollisions2)
1228 *pcHashCollisions2 = pThis->cHashCollisions2;
1229 if (pcHashInserts)
1230 *pcHashInserts = pThis->cHashInserts;
1231 if (pcRehashes)
1232 *pcRehashes = pThis->cRehashes;
1233 uint32_t cStrings = pThis->cStrings;
1234
1235 RTCritSectLeave(&pThis->CritSect);
1236 return cStrings;
1237}
1238RT_EXPORT_SYMBOL(RTStrCacheRelease);
1239
Note: See TracBrowser for help on using the repository browser.

© 2023 Oracle
ContactPrivacy policyTerms of Use