VirtualBox

Changeset 101162 in vbox


Ignore:
Timestamp:
Sep 18, 2023 8:03:52 PM (12 months ago)
Author:
vboxsync
Message:

IPRT/mem: Use mempage /w heap code everwhere all the time. bugref:10370

Location:
trunk/src/VBox/Runtime
Files:
1 added
2 edited
2 copied
2 moved

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/Runtime/Makefile.kmk

    r100931 r101162  
    812812        r3/init-data.cpp \
    813813        r3/process-data.cpp \
     814        r3/mempage-heap.cpp \
    814815        r3/memsafer-r3.cpp \
    815816        r3/path.cpp \
     
    10611062        r3/win/RTSystemQueryTotalRam-win.cpp \
    10621063        r3/win/RTTimeZoneGetCurrent-win.cpp \
    1063         r3/win/alloc-win.cpp \
     1064        r3/win/RTMemProtect-win.cpp \
    10641065        r3/win/allocex-win.cpp \
    10651066        r3/win/dir-win.cpp \
     
    10721073        r3/win/ldrNative-win.cpp \
    10731074        r3/win/localipc-win.cpp \
     1075        r3/win/mempage-native-win.cpp \
    10741076        r3/win/mp-win.cpp \
    10751077        r3/win/path-win.cpp \
     
    11581160        r3/posix/RTTimeSet-posix.cpp \
    11591161        r3/posix/RTTimeZoneGetCurrent-posix.cpp \
    1160         r3/posix/rtmempage-exec-mmap-heap-posix.cpp \
     1162        r3/posix/mempage-native-posix.cpp \
    11611163        r3/posix/dir-posix.cpp \
    11621164        r3/posix/env-posix.cpp \
     
    12751277        r3/os2/RTTimeSet-os2.cpp \
    12761278        r3/os2/filelock-os2.cpp \
     1279        r3/os2/mempage-native-os2.cpp \
    12771280        r3/os2/mp-os2.cpp \
    12781281        r3/os2/pipe-os2.cpp \
     
    13031306        r3/posix/path2-posix.cpp \
    13041307        r3/posix/pathhost-posix.cpp \
    1305         r3/posix/rtmempage-exec-mmap-heap-posix.cpp \
    13061308        r3/posix/RTPathUserDocuments-posix.cpp \
    13071309        r3/posix/process-posix.cpp \
     
    13821384        r3/posix/ldrNative-posix.cpp \
    13831385        r3/posix/localipc-posix.cpp \
    1384         r3/posix/rtmempage-exec-mmap-heap-posix.cpp \
     1386        r3/posix/mempage-native-posix.cpp \
    13851387        r3/posix/path-posix.cpp \
    13861388        r3/posix/path2-posix.cpp \
     
    14831485        r3/posix/ldrNative-posix.cpp \
    14841486        r3/posix/localipc-posix.cpp \
    1485         r3/posix/rtmempage-exec-mmap-heap-posix.cpp \
     1487        r3/posix/mempage-native-posix.cpp \
    14861488        r3/posix/path-posix.cpp \
    14871489        r3/posix/path2-posix.cpp \
     
    15671569        r3/posix/fs3-posix.cpp \
    15681570        r3/posix/ldrNative-posix.cpp \
    1569         r3/posix/rtmempage-exec-mmap-heap-posix.cpp \
     1571        r3/posix/mempage-native-posix.cpp \
    15701572        r3/posix/path-posix.cpp \
    15711573        r3/posix/path2-posix.cpp \
     
    16421644        r3/posix/ldrNative-posix.cpp \
    16431645        r3/posix/localipc-posix.cpp \
    1644         r3/posix/rtmempage-exec-mmap-heap-posix.cpp \
     1646        r3/posix/mempage-native-posix.cpp \
    16451647        r3/posix/path-posix.cpp \
    16461648        r3/posix/path2-posix.cpp \
     
    17491751        r3/posix/ldrNative-posix.cpp \
    17501752        r3/posix/localipc-posix.cpp \
    1751         r3/posix/rtmempage-exec-mmap-heap-posix.cpp \
     1753        r3/posix/mempage-native-posix.cpp \
    17521754        r3/posix/path-posix.cpp \
    17531755        r3/posix/path2-posix.cpp \
     
    25492551        r3/init-data.cpp \
    25502552        r3/process-data.cpp \
     2553        r3/mempage-heap.cpp \
    25512554        r3/memsafer-r3.cpp \
    25522555        r3/path.cpp \
     
    26342637        r3/nt/RTPathSetMode-r3-nt.cpp \
    26352638        r3/nt/RTProcQueryParent-r3-nt.cpp \
    2636         r3/win/alloc-win.cpp \
     2639        r3/win/RTMemProtect-win.cpp \
    26372640        r3/win/allocex-win.cpp \
    26382641        r3/win/dir-win.cpp \
     
    26422645        r3/win/init-win.cpp \
    26432646        r3/win/ldrNative-win.cpp \
     2647        r3/win/mempage-native-win.cpp \
    26442648        r3/win/path-win.cpp \
    26452649        r3/win/pathint-win.cpp \
     
    27162720        r3/generic/dirrel-r3-generic.cpp \
    27172721        r3/os2/filelock-os2.cpp \
     2722        r3/os2/mempage-native-os2.cpp \
    27182723        r3/os2/mp-os2.cpp \
    27192724        r3/os2/pipe-os2.cpp \
     
    27402745        r3/posix/RTFileQueryFsSizes-posix.cpp \
    27412746        r3/posix/RTHandleGetStandard-posix.cpp \
    2742         r3/posix/rtmempage-exec-mmap-heap-posix.cpp \
    27432747        r3/posix/RTMemProtect-posix.cpp \
    27442748        r3/posix/RTPathUserDocuments-posix.cpp \
     
    27512755
    27522756RuntimeBldProg_SOURCES.linux = $(filter-out \
    2753                 r3/posix/rtmempage-exec-mmap-heap-posix.cpp \
    27542757                r3/memsafer-r3.cpp \
    2755                 , $(RuntimeBaseR3_SOURCES.linux)) \
    2756         r3/posix/rtmempage-exec-mmap-posix.cpp
     2758                , $(RuntimeBaseR3_SOURCES.linux))
    27572759
    27582760## @todo reduce linux, solaris and freebsd sources too.
     
    27952797
    27962798RuntimeGuestR3_SOURCES.linux             = $(filter-out \
    2797                 r3/posix/rtmempage-exec-mmap-heap-posix.cpp \
    27982799                r3/memsafer-r3.cpp \
    2799                 , $(RuntimeR3_SOURCES.linux)) \
    2800         r3/posix/rtmempage-exec-mmap-posix.cpp
     2800                , $(RuntimeR3_SOURCES.linux))
    28012801
    28022802
  • trunk/src/VBox/Runtime/include/internal/mem.h

    r98103 r101162  
    8282#endif
    8383
     84#ifdef IN_RING3
     85
     86/**
     87 * Native allocation worker for the heap-based RTMemPage implementation.
     88 */
     89DECLHIDDEN(int) rtMemPageNativeAlloc(size_t cb, uint32_t fFlags, void **ppvRet);
     90
     91/**
     92 * Native allocation worker for the heap-based RTMemPage implementation.
     93 */
     94DECLHIDDEN(int) rtMemPageNativeFree(void *pv, size_t cb);
     95
     96/**
     97 * Native page allocator worker that applies advisory flags to the memory.
     98 *
     99 * @returns Set of flags succesfully applied
     100 * @param   pv      The memory block address.
     101 * @param   cb      The size of the memory block.
     102 * @param   fFlags  The flags to apply (may include other flags too, ignore).
     103 */
     104DECLHIDDEN(uint32_t) rtMemPageNativeApplyFlags(void *pv, size_t cb, uint32_t fFlags);
     105
     106/**
     107 * Reverts flags previously applied by rtMemPageNativeApplyFlags().
     108 *
     109 * @param   pv      The memory block address.
     110 * @param   cb      The size of the memory block.
     111 * @param   fFlags  The flags to revert.
     112 */
     113DECLHIDDEN(void) rtMemPageNativeRevertFlags(void *pv, size_t cb, uint32_t fFlags);
     114
     115#endif /* IN_RING3 */
     116
    84117RT_C_DECLS_END
    85118
  • trunk/src/VBox/Runtime/r3/mempage-heap.cpp

    r101152 r101162  
    5151#include <iprt/param.h>
    5252#include <iprt/string.h>
    53 /*#include "internal/mem.h"*/
    54 
    55 #include <stdlib.h>
    56 #include <errno.h>
    57 #include <sys/mman.h>
    58 #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
    59 # define MAP_ANONYMOUS MAP_ANON
    60 #endif
     53#include "internal/mem.h"
     54
    6155
    6256
     
    198192
    199193/**
    200  * Native allocation worker for the heap-based RTMemPage implementation.
    201  */
    202 DECLHIDDEN(int) rtMemPageNativeAlloc(size_t cb, uint32_t fFlags, void **ppvRet)
    203 {
    204 #ifdef RT_OS_OS2
    205     ULONG fAlloc = OBJ_ANY | PAG_COMMIT | PAG_READ | PAG_WRITE;
    206     if (fFlags & RTMEMPAGEALLOC_F_EXECUTABLE)
    207         fAlloc |= PAG_EXECUTE;
    208     APIRET rc = DosAllocMem(ppvRet, cb, fAlloc);
    209     if (rc == NO_ERROR)
    210         return VINF_SUCCESS;
    211     return RTErrConvertFromOS2(rc);
    212 
    213 #else
    214     void *pvRet = mmap(NULL, cb,
    215                        PROT_READ | PROT_WRITE | (fFlags & RTMEMPAGEALLOC_F_EXECUTABLE ? PROT_EXEC : 0),
    216                        MAP_PRIVATE | MAP_ANONYMOUS,
    217                        -1, 0);
    218     if (pvRet != MAP_FAILED)
    219     {
    220         *ppvRet = pvRet;
    221         return VINF_SUCCESS;
    222     }
    223     *ppvRet = NULL;
    224     return RTErrConvertFromErrno(errno);
    225 #endif
    226 }
    227 
    228 
    229 /**
    230  * Native allocation worker for the heap-based RTMemPage implementation.
    231  */
    232 DECLHIDDEN(int) rtMemPageNativeFree(void *pv, size_t cb)
    233 {
    234 #ifdef RT_OS_OS2
    235     APIRET rc = DosFreeMem(pv);
    236     AssertMsgReturn(rc == NO_ERROR, ("rc=%d pv=%p cb=%#zx\n", rc, pv, cb), RTErrConvertFromOS2(rc));
    237     RT_NOREF(cb);
    238 #else
    239     int rc = munmap(pv, cb);
    240     AssertMsgReturn(rc == 0, ("rc=%d pv=%p cb=%#zx errno=%d\n", rc, pv, cb, errno), RTErrConvertFromErrno(errno));
    241 #endif
    242     return VINF_SUCCESS;
    243 }
    244 
    245 
    246 /**
    247  * Native page allocator worker that applies advisory flags to the memory.
    248  *
    249  * @returns Set of flags succesfully applied
    250  * @param   pv      The memory block address.
    251  * @param   cb      The size of the memory block.
    252  * @param   fFlags  The flags to apply (may include other flags too, ignore).
    253  */
    254 DECLHIDDEN(uint32_t) rtMemPageNativeApplyFlags(void *pv, size_t cb, uint32_t fFlags)
    255 {
    256     uint32_t fRet = 0;
    257 #ifdef RT_OS_OS2
    258     RT_NOREF(pv, cb, fFlags);
    259 #else /* !RT_OS_OS2 */
    260     if (fFlags & RTMEMPAGEALLOC_F_ADVISE_LOCKED)
    261     {
    262         int rc = mlock(pv, cb);
    263 # ifndef RT_OS_SOLARIS /* mlock(3C) on Solaris requires the priv_lock_memory privilege */
    264         AssertMsg(rc == 0, ("mlock %p LB %#zx -> %d errno=%d\n", pv, cb, rc, errno));
    265 # endif
    266         if (rc == 0)
    267             fRet |= RTMEMPAGEALLOC_F_ADVISE_LOCKED;
    268     }
    269 
    270 # ifdef MADV_DONTDUMP
    271     if (fFlags & RTMEMPAGEALLOC_F_ADVISE_NO_DUMP)
    272     {
    273         int rc = madvise(pv, cb, MADV_DONTDUMP);
    274         AssertMsg(rc == 0, ("madvice %p LB %#zx MADV_DONTDUMP -> %d errno=%d\n", pv, cb, rc, errno));
    275         if (rc == 0)
    276             fRet |= RTMEMPAGEALLOC_F_ADVISE_NO_DUMP;
    277     }
    278 # endif
    279 #endif /* !RT_OS_OS2 */
    280     return fRet;
    281 }
    282 
    283 
    284 /**
    285  * Reverts flags previously applied by rtMemPageNativeApplyFlags().
    286  *
    287  * @param   pv      The memory block address.
    288  * @param   cb      The size of the memory block.
    289  * @param   fFlags  The flags to revert.
    290  */
    291 DECLHIDDEN(void) rtMemPageNativeRevertFlags(void *pv, size_t cb, uint32_t fFlags)
    292 {
    293 #ifdef RT_OS_OS2
    294     RT_NOREF(pv, cb, fFlags);
    295 #else /* !RT_OS_OS2 */
    296     if (fFlags & RTMEMPAGEALLOC_F_ADVISE_LOCKED)
    297     {
    298         int rc = munlock(pv, cb);
    299         AssertMsg(rc == 0, ("munlock %p LB %#zx -> %d errno=%d\n", pv, cb, rc, errno));
    300         RT_NOREF(rc);
    301     }
    302 
    303 # if defined(MADV_DONTDUMP) && defined(MADV_DODUMP)
    304     if (fFlags & RTMEMPAGEALLOC_F_ADVISE_NO_DUMP)
    305     {
    306         int rc = madvise(pv, cb, MADV_DODUMP);
    307         AssertMsg(rc == 0, ("madvice %p LB %#zx MADV_DODUMP -> %d errno=%d\n", pv, cb, rc, errno));
    308         RT_NOREF(rc);
    309     }
    310 # endif
    311 #endif /* !RT_OS_OS2 */
    312 }
    313 
    314 
    315 /**
    316194 * Initializes the heap.
    317195 *
     
    469347
    470348    ASMBitSet(&pBlock->bmFirst[0], iPage);
    471     pBlock->cFreePages -= cPages;
    472     pHeap->cFreePages  -= cPages;
     349    pBlock->cFreePages -= (uint32_t)cPages;
     350    pHeap->cFreePages  -= (uint32_t)cPages;
    473351    if (!pHeap->pHint2 || pHeap->pHint2->cFreePages < pBlock->cFreePages)
    474352        pHeap->pHint2 = pBlock;
     
    540418               && (unsigned)iPage <= RTMEMPAGE_BLOCK_PAGE_COUNT - cPages)
    541419        {
    542             if (rtHeapPageIsPageRangeFree(pBlock, iPage + 1, cPages - 1))
     420            if (rtHeapPageIsPageRangeFree(pBlock, iPage + 1, (uint32_t)cPages - 1))
    543421            {
    544422                ASMBitSetRange(&pBlock->bmAlloc[0], iPage, iPage + cPages);
     
    760638            /* Check that the range ends at an allocation boundrary. */
    761639            fOk = fOk && (   iPage + cPages == RTMEMPAGE_BLOCK_PAGE_COUNT
    762                           || ASMBitTest(&pBlock->bmFirst[0], iPage + cPages)
    763                           || !ASMBitTest(&pBlock->bmAlloc[0], iPage + cPages));
     640                          || ASMBitTest(&pBlock->bmFirst[0], iPage + (uint32_t)cPages)
     641                          || !ASMBitTest(&pBlock->bmAlloc[0], iPage + (uint32_t)cPages));
    764642            /* Check the other pages. */
    765             uint32_t const iLastPage = iPage + cPages - 1;
     643            uint32_t const iLastPage = iPage + (uint32_t)cPages - 1;
    766644            for (uint32_t i = iPage + 1; i < iLastPage && fOk; i++)
    767645                fOk = ASMBitTest(&pBlock->bmAlloc[0], i)
     
    782660                ASMBitClearRange(&pBlock->bmAlloc[0], iPage, iPage + cPages);
    783661                ASMBitClear(&pBlock->bmFirst[0], iPage);
    784                 pBlock->cFreePages += cPages;
    785                 pHeap->cFreePages  += cPages;
     662                pBlock->cFreePages += (uint32_t)cPages;
     663                pHeap->cFreePages  += (uint32_t)cPages;
    786664                pHeap->cFreeCalls++;
    787665                if (!pHeap->pHint1 || pHeap->pHint1->cFreePages < pBlock->cFreePages)
  • trunk/src/VBox/Runtime/r3/posix/mempage-native-posix.cpp

    r101161 r101162  
    11/* $Id$ */
    22/** @file
    3  * IPRT - RTMemPage*, POSIX with heap.
     3 * IPRT - rtMemPageNative*, POSIX implementation.
    44 */
    55
     
    4242#include <iprt/mem.h>
    4343
    44 #include <iprt/asm.h>
    4544#include <iprt/assert.h>
    46 #include <iprt/avl.h>
    47 #include <iprt/critsect.h>
    4845#include <iprt/errcore.h>
    49 #include <iprt/list.h>
    50 #include <iprt/once.h>
    5146#include <iprt/param.h>
    52 #include <iprt/string.h>
    53 /*#include "internal/mem.h"*/
     47#include "internal/mem.h"
    5448
    5549#include <stdlib.h>
     
    6155
    6256
    63 /*********************************************************************************************************************************
    64 *   Defined Constants And Macros                                                                                                 *
    65 *********************************************************************************************************************************/
    66 /** Threshold at which to we switch to simply calling mmap. */
    67 #define RTMEMPAGE_NATIVE_THRESHOLD      _1M
    68 /** The size of a heap block (power of two) - in bytes. */
    69 #define RTMEMPAGE_BLOCK_SIZE            _4M
    7057
    71 /** The number of pages per heap block. */
    72 #define RTMEMPAGE_BLOCK_PAGE_COUNT      (RTMEMPAGE_BLOCK_SIZE / PAGE_SIZE)
    73 AssertCompile(RTMEMPAGE_BLOCK_SIZE == RTMEMPAGE_BLOCK_PAGE_COUNT * PAGE_SIZE);
    74 
    75 
    76 /*********************************************************************************************************************************
    77 *   Structures and Typedefs                                                                                                      *
    78 *********************************************************************************************************************************/
    79 /** Pointer to a page heap block. */
    80 typedef struct RTHEAPPAGEBLOCK *PRTHEAPPAGEBLOCK;
    81 
    82 /**
    83  * A simple page heap.
    84  */
    85 typedef struct RTHEAPPAGE
    86 {
    87     /** Magic number (RTHEAPPAGE_MAGIC). */
    88     uint32_t            u32Magic;
    89     /** The number of pages in the heap (in BlockTree). */
    90     uint32_t            cHeapPages;
    91     /** The number of currently free pages. */
    92     uint32_t            cFreePages;
    93     /** Number of successful calls. */
    94     uint32_t            cAllocCalls;
    95     /** Number of successful free calls. */
    96     uint32_t            cFreeCalls;
    97     /** The free call number at which we last tried to minimize the heap. */
    98     uint32_t            uLastMinimizeCall;
    99     /** Tree of heap blocks. */
    100     AVLRPVTREE          BlockTree;
    101     /** Allocation hint no 1 (last freed). */
    102     PRTHEAPPAGEBLOCK    pHint1;
    103     /** Allocation hint no 2 (last alloc). */
    104     PRTHEAPPAGEBLOCK    pHint2;
    105     /** The allocation chunks for the RTHEAPPAGEBLOCK allocator
    106      * (RTHEAPPAGEBLOCKALLOCCHUNK). */
    107     RTLISTANCHOR        BlockAllocatorChunks;
    108     /** Critical section protecting the heap. */
    109     RTCRITSECT          CritSect;
    110     /** Set if the memory must allocated with execute access. */
    111     bool                fExec;
    112 } RTHEAPPAGE;
    113 #define RTHEAPPAGE_MAGIC     UINT32_C(0xfeedface)
    114 /** Pointer to a page heap. */
    115 typedef RTHEAPPAGE *PRTHEAPPAGE;
    116 
    117 
    118 /**
    119  * Describes a page heap block.
    120  */
    121 typedef struct RTHEAPPAGEBLOCK
    122 {
    123     /** The AVL tree node core (void pointer range). */
    124     AVLRPVNODECORE      Core;
    125     /** The number of free pages. */
    126     uint32_t            cFreePages;
    127     /** Pointer back to the heap. */
    128     PRTHEAPPAGE         pHeap;
    129     /** Allocation bitmap.  Set bits marks allocated pages. */
    130     uint32_t            bmAlloc[RTMEMPAGE_BLOCK_PAGE_COUNT / 32];
    131     /** Allocation boundrary bitmap.  Set bits marks the start of
    132      *  allocations. */
    133     uint32_t            bmFirst[RTMEMPAGE_BLOCK_PAGE_COUNT / 32];
    134     /** Bitmap tracking pages where RTMEMPAGEALLOC_F_ADVISE_LOCKED has been
    135      *  successfully applied. */
    136     uint32_t            bmLockedAdviced[RTMEMPAGE_BLOCK_PAGE_COUNT / 32];
    137     /** Bitmap tracking pages where RTMEMPAGEALLOC_F_ADVISE_NO_DUMP has been
    138      *  successfully applied. */
    139     uint32_t            bmNoDumpAdviced[RTMEMPAGE_BLOCK_PAGE_COUNT / 32];
    140 } RTHEAPPAGEBLOCK;
    141 
    142 
    143 /**
    144  * Allocation chunk of RTHEAPPAGEBLOCKALLOCCHUNK structures.
    145  *
    146  * This is backed by an 64KB allocation and non-present blocks will be marked as
    147  * allocated in bmAlloc.
    148  */
    149 typedef struct RTHEAPPAGEBLOCKALLOCCHUNK
    150 {
    151     /** List entry. */
    152     RTLISTNODE          ListEntry;
    153     /** Number of free RTHEAPPAGEBLOCK structures here. */
    154     uint32_t            cFree;
    155     /** Number of blocks in aBlocks. */
    156     uint32_t            cBlocks;
    157     /** Allocation bitmap. */
    158     uint32_t            bmAlloc[ARCH_BITS == 32 ? 28 : 26];
    159     /** Block array. */
    160     RT_FLEXIBLE_ARRAY_EXTENSION
    161     RTHEAPPAGEBLOCK     aBlocks[RT_FLEXIBLE_ARRAY];
    162 } RTHEAPPAGEBLOCKALLOCCHUNK;
    163 AssertCompileMemberAlignment(RTHEAPPAGEBLOCKALLOCCHUNK, bmAlloc, 8);
    164 AssertCompileMemberAlignment(RTHEAPPAGEBLOCKALLOCCHUNK, aBlocks, 64);
    165 /** Pointer to an allocation chunk of RTHEAPPAGEBLOCKALLOCCHUNK structures. */
    166 typedef RTHEAPPAGEBLOCKALLOCCHUNK *PRTHEAPPAGEBLOCKALLOCCHUNK;
    167 
    168 /** Max number of blocks one RTHEAPPAGEBLOCKALLOCCHUNK can track (896/832). */
    169 #define RTHEAPPAGEBLOCKALLOCCHUNK_MAX_BLOCKS    ((ARCH_BITS == 32 ? 28 : 26) * 32)
    170 /** The chunk size for the block allocator. */
    171 #define RTHEAPPAGEBLOCKALLOCCHUNK_ALLOC_SIZE    _64K
    172 
    173 
    174 /**
    175  * Argument package for rtHeapPageAllocCallback.
    176  */
    177 typedef struct RTHEAPPAGEALLOCARGS
    178 {
    179     /** The number of pages to allocate. */
    180     size_t          cPages;
    181     /** Non-null on success.  */
    182     void           *pvAlloc;
    183     /** RTMEMPAGEALLOC_F_XXX. */
    184     uint32_t        fFlags;
    185 } RTHEAPPAGEALLOCARGS;
    186 
    187 
    188 /*********************************************************************************************************************************
    189 *   Global Variables                                                                                                             *
    190 *********************************************************************************************************************************/
    191 /** Initialize once structure. */
    192 static RTONCE       g_MemPageHeapInitOnce = RTONCE_INITIALIZER;
    193 /** The page heap. */
    194 static RTHEAPPAGE   g_MemPageHeap;
    195 /** The exec page heap. */
    196 static RTHEAPPAGE   g_MemExecHeap;
    197 
    198 
    199 /**
    200  * Native allocation worker for the heap-based RTMemPage implementation.
    201  */
    20258DECLHIDDEN(int) rtMemPageNativeAlloc(size_t cb, uint32_t fFlags, void **ppvRet)
    20359{
    204 #ifdef RT_OS_OS2
    205     ULONG fAlloc = OBJ_ANY | PAG_COMMIT | PAG_READ | PAG_WRITE;
    206     if (fFlags & RTMEMPAGEALLOC_F_EXECUTABLE)
    207         fAlloc |= PAG_EXECUTE;
    208     APIRET rc = DosAllocMem(ppvRet, cb, fAlloc);
    209     if (rc == NO_ERROR)
    210         return VINF_SUCCESS;
    211     return RTErrConvertFromOS2(rc);
    212 
    213 #else
    21460    void *pvRet = mmap(NULL, cb,
    21561                       PROT_READ | PROT_WRITE | (fFlags & RTMEMPAGEALLOC_F_EXECUTABLE ? PROT_EXEC : 0),
     
    22369    *ppvRet = NULL;
    22470    return RTErrConvertFromErrno(errno);
    225 #endif
    22671}
    22772
    22873
    229 /**
    230  * Native allocation worker for the heap-based RTMemPage implementation.
    231  */
    23274DECLHIDDEN(int) rtMemPageNativeFree(void *pv, size_t cb)
    23375{
    234 #ifdef RT_OS_OS2
    235     APIRET rc = DosFreeMem(pv);
    236     AssertMsgReturn(rc == NO_ERROR, ("rc=%d pv=%p cb=%#zx\n", rc, pv, cb), RTErrConvertFromOS2(rc));
    237     RT_NOREF(cb);
    238 #else
    23976    int rc = munmap(pv, cb);
    24077    AssertMsgReturn(rc == 0, ("rc=%d pv=%p cb=%#zx errno=%d\n", rc, pv, cb, errno), RTErrConvertFromErrno(errno));
    241 #endif
    24278    return VINF_SUCCESS;
    24379}
    24480
    24581
    246 /**
    247  * Native page allocator worker that applies advisory flags to the memory.
    248  *
    249  * @returns Set of flags succesfully applied
    250  * @param   pv      The memory block address.
    251  * @param   cb      The size of the memory block.
    252  * @param   fFlags  The flags to apply (may include other flags too, ignore).
    253  */
    25482DECLHIDDEN(uint32_t) rtMemPageNativeApplyFlags(void *pv, size_t cb, uint32_t fFlags)
    25583{
    25684    uint32_t fRet = 0;
    257 #ifdef RT_OS_OS2
    258     RT_NOREF(pv, cb, fFlags);
    259 #else /* !RT_OS_OS2 */
    26085    if (fFlags & RTMEMPAGEALLOC_F_ADVISE_LOCKED)
    26186    {
    26287        int rc = mlock(pv, cb);
    263 # ifndef RT_OS_SOLARIS /* mlock(3C) on Solaris requires the priv_lock_memory privilege */
     88#ifndef RT_OS_SOLARIS /* mlock(3C) on Solaris requires the priv_lock_memory privilege */
    26489        AssertMsg(rc == 0, ("mlock %p LB %#zx -> %d errno=%d\n", pv, cb, rc, errno));
    265 # endif
     90#endif
    26691        if (rc == 0)
    26792            fRet |= RTMEMPAGEALLOC_F_ADVISE_LOCKED;
    26893    }
    26994
    270 # ifdef MADV_DONTDUMP
     95#ifdef MADV_DONTDUMP
    27196    if (fFlags & RTMEMPAGEALLOC_F_ADVISE_NO_DUMP)
    27297    {
     
    276101            fRet |= RTMEMPAGEALLOC_F_ADVISE_NO_DUMP;
    277102    }
    278 # endif
    279 #endif /* !RT_OS_OS2 */
     103#endif
    280104    return fRet;
    281105}
    282106
    283107
    284 /**
    285  * Reverts flags previously applied by rtMemPageNativeApplyFlags().
    286  *
    287  * @param   pv      The memory block address.
    288  * @param   cb      The size of the memory block.
    289  * @param   fFlags  The flags to revert.
    290  */
    291108DECLHIDDEN(void) rtMemPageNativeRevertFlags(void *pv, size_t cb, uint32_t fFlags)
    292109{
    293 #ifdef RT_OS_OS2
    294     RT_NOREF(pv, cb, fFlags);
    295 #else /* !RT_OS_OS2 */
    296110    if (fFlags & RTMEMPAGEALLOC_F_ADVISE_LOCKED)
    297111    {
     
    301115    }
    302116
    303 # if defined(MADV_DONTDUMP) && defined(MADV_DODUMP)
     117#if defined(MADV_DONTDUMP) && defined(MADV_DODUMP)
    304118    if (fFlags & RTMEMPAGEALLOC_F_ADVISE_NO_DUMP)
    305119    {
     
    308122        RT_NOREF(rc);
    309123    }
    310 # endif
    311 #endif /* !RT_OS_OS2 */
     124#endif
    312125}
    313126
    314 
    315 /**
    316  * Initializes the heap.
    317  *
    318  * @returns IPRT status code.
    319  * @param   pHeap           The page heap to initialize.
    320  * @param   fExec           Whether the heap memory should be marked as
    321  *                          executable or not.
    322  */
    323 static int RTHeapPageInit(PRTHEAPPAGE pHeap, bool fExec)
    324 {
    325     int rc = RTCritSectInitEx(&pHeap->CritSect,
    326                               RTCRITSECT_FLAGS_NO_LOCK_VAL | RTCRITSECT_FLAGS_NO_NESTING | RTCRITSECT_FLAGS_BOOTSTRAP_HACK,
    327                               NIL_RTLOCKVALCLASS, RTLOCKVAL_SUB_CLASS_NONE, NULL);
    328     if (RT_SUCCESS(rc))
    329     {
    330         pHeap->cHeapPages           = 0;
    331         pHeap->cFreePages           = 0;
    332         pHeap->cAllocCalls          = 0;
    333         pHeap->cFreeCalls           = 0;
    334         pHeap->uLastMinimizeCall    = 0;
    335         pHeap->BlockTree            = NULL;
    336         pHeap->fExec                = fExec;
    337         RTListInit(&pHeap->BlockAllocatorChunks);
    338         pHeap->u32Magic             = RTHEAPPAGE_MAGIC;
    339     }
    340     return rc;
    341 }
    342 
    343 
    344 /**
    345  * Deletes the heap and all the memory it tracks.
    346  *
    347  * @returns IPRT status code.
    348  * @param   pHeap           The page heap to delete.
    349  */
    350 static int RTHeapPageDelete(PRTHEAPPAGE pHeap)
    351 {
    352     NOREF(pHeap);
    353     pHeap->u32Magic = ~RTHEAPPAGE_MAGIC;
    354     return VINF_SUCCESS;
    355 }
    356 
    357 
    358 /**
    359  * Allocates a RTHEAPPAGEBLOCK.
    360  *
    361  * @returns Pointer to RTHEAPPAGEBLOCK on success, NULL on failure.
    362  * @param   pHeap   The heap this is for.
    363  */
    364 static PRTHEAPPAGEBLOCK rtHeapPageIntBlockAllocatorAlloc(PRTHEAPPAGE pHeap)
    365 {
    366     /*
    367      * Locate a chunk with space and grab a block from it.
    368      */
    369     PRTHEAPPAGEBLOCKALLOCCHUNK pChunk;
    370     RTListForEach(&pHeap->BlockAllocatorChunks, pChunk, RTHEAPPAGEBLOCKALLOCCHUNK, ListEntry)
    371     {
    372         if (pChunk->cFree > 0)
    373         {
    374             int idxBlock = ASMBitFirstClear(&pChunk->bmAlloc[0], RT_MIN(RTHEAPPAGEBLOCKALLOCCHUNK_MAX_BLOCKS, pChunk->cBlocks));
    375             if (idxBlock >= 0)
    376             {
    377                 ASMBitSet(&pChunk->bmAlloc[0], idxBlock);
    378                 pChunk->cFree -= 1;
    379                 return &pChunk->aBlocks[idxBlock];
    380             }
    381             AssertFailed();
    382         }
    383     }
    384 
    385     /*
    386      * Allocate a new chunk and return the first block in it.
    387      */
    388     int rc = rtMemPageNativeAlloc(RTHEAPPAGEBLOCKALLOCCHUNK_ALLOC_SIZE, 0, (void **)&pChunk);
    389     AssertRCReturn(rc, NULL);
    390     pChunk->cBlocks = (RTHEAPPAGEBLOCKALLOCCHUNK_ALLOC_SIZE - RT_UOFFSETOF(RTHEAPPAGEBLOCKALLOCCHUNK, aBlocks))
    391                     / sizeof(pChunk->aBlocks[0]);
    392     AssertStmt(pChunk->cBlocks < RTHEAPPAGEBLOCKALLOCCHUNK_MAX_BLOCKS, pChunk->cBlocks = RTHEAPPAGEBLOCKALLOCCHUNK_MAX_BLOCKS);
    393     pChunk->cFree   = pChunk->cBlocks;
    394 
    395     RT_ZERO(pChunk->bmAlloc);
    396     ASMBitSetRange(pChunk->bmAlloc, pChunk->cBlocks, RTHEAPPAGEBLOCKALLOCCHUNK_MAX_BLOCKS);
    397     RTListPrepend(&pHeap->BlockAllocatorChunks, &pChunk->ListEntry);
    398 
    399     /*
    400      * Allocate the first one.
    401      */
    402     ASMBitSet(pChunk->bmAlloc, 0);
    403     pChunk->cFree -= 1;
    404 
    405     return &pChunk->aBlocks[0];
    406 }
    407 
    408 
    409 /**
    410  * Frees a RTHEAPPAGEBLOCK.
    411  *
    412  * @param   pHeap   The heap this is for.
    413  * @param   pBlock  The block to free.
    414  */
    415 static void rtHeapPageIntBlockAllocatorFree(PRTHEAPPAGE pHeap, PRTHEAPPAGEBLOCK pBlock)
    416 {
    417     /*
    418      * Locate the chunk the block belongs to and mark it as freed.
    419      */
    420     PRTHEAPPAGEBLOCKALLOCCHUNK pChunk;
    421     RTListForEach(&pHeap->BlockAllocatorChunks, pChunk, RTHEAPPAGEBLOCKALLOCCHUNK, ListEntry)
    422     {
    423         if ((uintptr_t)pBlock - (uintptr_t)pChunk < RTHEAPPAGEBLOCKALLOCCHUNK_ALLOC_SIZE)
    424         {
    425             uintptr_t const idxBlock = (uintptr_t)(pBlock - &pChunk->aBlocks[0]);
    426             if (ASMBitTestAndClear(&pChunk->bmAlloc[0], idxBlock))
    427                 pChunk->cFree++;
    428             else
    429                 AssertMsgFailed(("pBlock=%p idxBlock=%#zx\n", pBlock, idxBlock));
    430             return;
    431         }
    432     }
    433     AssertFailed();
    434 }
    435 
    436 
    437 /**
    438  * Applies flags to an allocation.
    439  *
    440  * @return  Flags that eeds to be reverted upon free.
    441  * @param   pv              The allocation.
    442  * @param   cb              The size of the allocation (page aligned).
    443  * @param   fFlags          RTMEMPAGEALLOC_F_XXX.
    444  */
    445 DECLINLINE(uint32_t) rtMemPageApplyFlags(void *pv, size_t cb, uint32_t fFlags)
    446 {
    447     uint32_t fHandled = 0;
    448     if (fFlags & (RTMEMPAGEALLOC_F_ADVISE_LOCKED | RTMEMPAGEALLOC_F_ADVISE_NO_DUMP))
    449         fHandled = rtMemPageNativeApplyFlags(pv, cb, fFlags);
    450     if (fFlags & RTMEMPAGEALLOC_F_ZERO)
    451         RT_BZERO(pv, cb);
    452     return fHandled;
    453 }
    454 
    455 
    456 /**
    457  * Avoids some gotos in rtHeapPageAllocFromBlock.
    458  *
    459  * @returns VINF_SUCCESS.
    460  * @param   pBlock          The block.
    461  * @param   iPage           The page to start allocating at.
    462  * @param   cPages          The number of pages.
    463  * @param   fFlags          RTMEMPAGEALLOC_F_XXX.
    464  * @param   ppv             Where to return the allocation address.
    465  */
    466 DECLINLINE(int) rtHeapPageAllocFromBlockSuccess(PRTHEAPPAGEBLOCK pBlock, uint32_t iPage, size_t cPages, uint32_t fFlags, void **ppv)
    467 {
    468     PRTHEAPPAGE pHeap = pBlock->pHeap;
    469 
    470     ASMBitSet(&pBlock->bmFirst[0], iPage);
    471     pBlock->cFreePages -= cPages;
    472     pHeap->cFreePages  -= cPages;
    473     if (!pHeap->pHint2 || pHeap->pHint2->cFreePages < pBlock->cFreePages)
    474         pHeap->pHint2 = pBlock;
    475     pHeap->cAllocCalls++;
    476 
    477     void *pv = (uint8_t *)pBlock->Core.Key + (iPage << PAGE_SHIFT);
    478     *ppv = pv;
    479 
    480     if (fFlags)
    481     {
    482         uint32_t fHandled = rtMemPageApplyFlags(pv, cPages << PAGE_SHIFT, fFlags);
    483         Assert(!(fHandled & ~(RTMEMPAGEALLOC_F_ADVISE_LOCKED | RTMEMPAGEALLOC_F_ADVISE_NO_DUMP)));
    484         if (fHandled & RTMEMPAGEALLOC_F_ADVISE_LOCKED)
    485             ASMBitSetRange(&pBlock->bmLockedAdviced[0], iPage, iPage + cPages);
    486         if (fHandled & RTMEMPAGEALLOC_F_ADVISE_NO_DUMP)
    487             ASMBitSetRange(&pBlock->bmNoDumpAdviced[0], iPage, iPage + cPages);
    488     }
    489 
    490     return VINF_SUCCESS;
    491 }
    492 
    493 
    494 /**
    495  * Checks if a page range is free in the specified block.
    496  *
    497  * @returns @c true if the range is free, @c false if not.
    498  * @param   pBlock          The block.
    499  * @param   iFirst          The first page to check.
    500  * @param   cPages          The number of pages to check.
    501  */
    502 DECLINLINE(bool) rtHeapPageIsPageRangeFree(PRTHEAPPAGEBLOCK pBlock, uint32_t iFirst, uint32_t cPages)
    503 {
    504     uint32_t i = iFirst + cPages;
    505     while (i-- > iFirst)
    506     {
    507         if (ASMBitTest(&pBlock->bmAlloc[0], i))
    508             return false;
    509         Assert(!ASMBitTest(&pBlock->bmFirst[0], i));
    510     }
    511     return true;
    512 }
    513 
    514 
    515 /**
    516  * Tries to allocate a chunk of pages from a heap block.
    517  *
    518  * @retval  VINF_SUCCESS on success.
    519  * @retval  VERR_NO_MEMORY if the allocation failed.
    520  * @param   pBlock          The block to allocate from.
    521  * @param   cPages          The size of the allocation.
    522  * @param   fFlags          RTMEMPAGEALLOC_F_XXX.
    523  * @param   ppv             Where to return the allocation address on success.
    524  */
    525 DECLINLINE(int) rtHeapPageAllocFromBlock(PRTHEAPPAGEBLOCK pBlock, size_t cPages, uint32_t fFlags, void **ppv)
    526 {
    527     if (pBlock->cFreePages >= cPages)
    528     {
    529         int iPage = ASMBitFirstClear(&pBlock->bmAlloc[0], RTMEMPAGE_BLOCK_PAGE_COUNT);
    530         Assert(iPage >= 0);
    531 
    532         /* special case: single page. */
    533         if (cPages == 1)
    534         {
    535             ASMBitSet(&pBlock->bmAlloc[0], iPage);
    536             return rtHeapPageAllocFromBlockSuccess(pBlock, iPage, cPages, fFlags, ppv);
    537         }
    538 
    539         while (   iPage >= 0
    540                && (unsigned)iPage <= RTMEMPAGE_BLOCK_PAGE_COUNT - cPages)
    541         {
    542             if (rtHeapPageIsPageRangeFree(pBlock, iPage + 1, cPages - 1))
    543             {
    544                 ASMBitSetRange(&pBlock->bmAlloc[0], iPage, iPage + cPages);
    545                 return rtHeapPageAllocFromBlockSuccess(pBlock, iPage, cPages, fFlags, ppv);
    546             }
    547 
    548             /* next */
    549             iPage = ASMBitNextSet(&pBlock->bmAlloc[0], RTMEMPAGE_BLOCK_PAGE_COUNT, iPage);
    550             if (iPage < 0 || (unsigned)iPage >= RTMEMPAGE_BLOCK_PAGE_COUNT - 1)
    551                 break;
    552             iPage = ASMBitNextClear(&pBlock->bmAlloc[0], RTMEMPAGE_BLOCK_PAGE_COUNT, iPage);
    553         }
    554     }
    555 
    556     return VERR_NO_MEMORY;
    557 }
    558 
    559 
    560 /**
    561  * RTAvlrPVDoWithAll callback.
    562  *
    563  * @returns 0 to continue the enum, non-zero to quit it.
    564  * @param   pNode           The node.
    565  * @param   pvUser          The user argument.
    566  */
    567 static DECLCALLBACK(int) rtHeapPageAllocCallback(PAVLRPVNODECORE pNode, void *pvUser)
    568 {
    569     PRTHEAPPAGEBLOCK        pBlock = RT_FROM_MEMBER(pNode,  RTHEAPPAGEBLOCK, Core);
    570     RTHEAPPAGEALLOCARGS    *pArgs  = (RTHEAPPAGEALLOCARGS *)pvUser;
    571     int rc = rtHeapPageAllocFromBlock(pBlock, pArgs->cPages, pArgs->fFlags, &pArgs->pvAlloc);
    572     return RT_SUCCESS(rc) ? 1 : 0;
    573 }
    574 
    575 
    576 /**
    577  * Worker for RTHeapPageAlloc.
    578  *
    579  * @returns IPRT status code
    580  * @param   pHeap           The heap - locked.
    581  * @param   cPages          The page count.
    582  * @param   pszTag          The tag.
    583  * @param   fFlags          RTMEMPAGEALLOC_F_XXX.
    584  * @param   ppv             Where to return the address of the allocation
    585  *                          on success.
    586  */
    587 static int rtHeapPageAllocLocked(PRTHEAPPAGE pHeap, size_t cPages, const char *pszTag, uint32_t fFlags, void **ppv)
    588 {
    589     int rc;
    590     NOREF(pszTag);
    591 
    592     /*
    593      * Use the hints first.
    594      */
    595     if (pHeap->pHint1)
    596     {
    597         rc = rtHeapPageAllocFromBlock(pHeap->pHint1, cPages, fFlags, ppv);
    598         if (rc != VERR_NO_MEMORY)
    599             return rc;
    600     }
    601     if (pHeap->pHint2)
    602     {
    603         rc = rtHeapPageAllocFromBlock(pHeap->pHint2, cPages, fFlags, ppv);
    604         if (rc != VERR_NO_MEMORY)
    605             return rc;
    606     }
    607 
    608     /*
    609      * Search the heap for a block with enough free space.
    610      *
    611      * N.B. This search algorithm is not optimal at all. What (hopefully) saves
    612      *      it are the two hints above.
    613      */
    614     if (pHeap->cFreePages >= cPages)
    615     {
    616         RTHEAPPAGEALLOCARGS Args;
    617         Args.cPages  = cPages;
    618         Args.pvAlloc = NULL;
    619         Args.fFlags  = fFlags;
    620         RTAvlrPVDoWithAll(&pHeap->BlockTree, true /*fFromLeft*/, rtHeapPageAllocCallback, &Args);
    621         if (Args.pvAlloc)
    622         {
    623             *ppv = Args.pvAlloc;
    624             return VINF_SUCCESS;
    625         }
    626     }
    627 
    628     /*
    629      * Didn't find anything, so expand the heap with a new block.
    630      */
    631     PRTHEAPPAGEBLOCK const pBlock = rtHeapPageIntBlockAllocatorAlloc(pHeap);
    632     AssertReturn(pBlock, VERR_NO_MEMORY);
    633 
    634     RTCritSectLeave(&pHeap->CritSect);
    635 
    636     void *pvPages = NULL;
    637     rc = rtMemPageNativeAlloc(RTMEMPAGE_BLOCK_SIZE, pHeap->fExec ? RTMEMPAGEALLOC_F_EXECUTABLE : 0, &pvPages);
    638 
    639     RTCritSectEnter(&pHeap->CritSect);
    640     if (RT_FAILURE(rc))
    641     {
    642         rtHeapPageIntBlockAllocatorFree(pHeap, pBlock);
    643         return rc;
    644     }
    645 
    646     RT_ZERO(*pBlock);
    647     pBlock->Core.Key        = pvPages;
    648     pBlock->Core.KeyLast    = (uint8_t *)pvPages + RTMEMPAGE_BLOCK_SIZE - 1;
    649     pBlock->cFreePages      = RTMEMPAGE_BLOCK_PAGE_COUNT;
    650     pBlock->pHeap           = pHeap;
    651 
    652     bool fRc = RTAvlrPVInsert(&pHeap->BlockTree, &pBlock->Core); Assert(fRc); NOREF(fRc);
    653     pHeap->cFreePages      +=  RTMEMPAGE_BLOCK_PAGE_COUNT;
    654     pHeap->cHeapPages      +=  RTMEMPAGE_BLOCK_PAGE_COUNT;
    655 
    656     /*
    657      * Grab memory from the new block (cannot fail).
    658      */
    659     rc = rtHeapPageAllocFromBlock(pBlock, cPages, fFlags, ppv);
    660     Assert(rc == VINF_SUCCESS);
    661 
    662     return rc;
    663 }
    664 
    665 
    666 /**
    667  * Allocates one or more pages off the heap.
    668  *
    669  * @returns IPRT status code.
    670  * @param   pHeap           The page heap.
    671  * @param   cPages          The number of pages to allocate.
    672  * @param   pszTag          The allocation tag.
    673  * @param   fFlags          RTMEMPAGEALLOC_F_XXX.
    674  * @param   ppv             Where to return the pointer to the pages.
    675  */
    676 static int RTHeapPageAlloc(PRTHEAPPAGE pHeap, size_t cPages, const char *pszTag, uint32_t fFlags, void **ppv)
    677 {
    678     /*
    679      * Validate input.
    680      */
    681     AssertPtr(ppv);
    682     *ppv = NULL;
    683     AssertPtrReturn(pHeap, VERR_INVALID_HANDLE);
    684     AssertReturn(pHeap->u32Magic == RTHEAPPAGE_MAGIC, VERR_INVALID_HANDLE);
    685     AssertMsgReturn(cPages < RTMEMPAGE_BLOCK_SIZE, ("%#zx\n", cPages), VERR_OUT_OF_RANGE);
    686 
    687     /*
    688      * Grab the lock and call a worker with many returns.
    689      */
    690     int rc = RTCritSectEnter(&pHeap->CritSect);
    691     if (RT_SUCCESS(rc))
    692     {
    693         rc = rtHeapPageAllocLocked(pHeap, cPages, pszTag, fFlags, ppv);
    694         RTCritSectLeave(&pHeap->CritSect);
    695     }
    696 
    697     return rc;
    698 }
    699 
    700 
    701 /**
    702  * RTAvlrPVDoWithAll callback.
    703  *
    704  * @returns 0 to continue the enum, non-zero to quit it.
    705  * @param   pNode           The node.
    706  * @param   pvUser          Pointer to a block pointer variable. For returning
    707  *                          the address of the block to be freed.
    708  */
    709 static DECLCALLBACK(int) rtHeapPageFindUnusedBlockCallback(PAVLRPVNODECORE pNode, void *pvUser)
    710 {
    711     PRTHEAPPAGEBLOCK pBlock = RT_FROM_MEMBER(pNode, RTHEAPPAGEBLOCK, Core);
    712     if (pBlock->cFreePages == RTMEMPAGE_BLOCK_PAGE_COUNT)
    713     {
    714         *(PRTHEAPPAGEBLOCK *)pvUser = pBlock;
    715         return 1;
    716     }
    717     return 0;
    718 }
    719 
    720 
    721 /**
    722  * Frees an allocation.
    723  *
    724  * @returns IPRT status code.
    725  * @retval  VERR_NOT_FOUND if pv isn't within any of the memory blocks in the
    726  *          heap.
    727  * @retval  VERR_INVALID_POINTER if the given memory range isn't exactly one
    728  *          allocation block.
    729  * @param   pHeap           The page heap.
    730  * @param   pv              Pointer to what RTHeapPageAlloc returned.
    731  * @param   cPages          The number of pages that was allocated.
    732  */
    733 static int RTHeapPageFree(PRTHEAPPAGE pHeap, void *pv, size_t cPages)
    734 {
    735     /*
    736      * Validate input.
    737      */
    738     if (!pv)
    739         return VINF_SUCCESS;
    740     AssertPtrReturn(pHeap, VERR_INVALID_HANDLE);
    741     AssertReturn(pHeap->u32Magic == RTHEAPPAGE_MAGIC, VERR_INVALID_HANDLE);
    742 
    743     /*
    744      * Grab the lock and look up the page.
    745      */
    746     int rc = RTCritSectEnter(&pHeap->CritSect);
    747     if (RT_SUCCESS(rc))
    748     {
    749         PRTHEAPPAGEBLOCK pBlock = (PRTHEAPPAGEBLOCK)RTAvlrPVRangeGet(&pHeap->BlockTree, pv);
    750         if (pBlock)
    751         {
    752             /*
    753              * Validate the specified address range.
    754              */
    755             uint32_t const iPage = (uint32_t)(((uintptr_t)pv - (uintptr_t)pBlock->Core.Key) >> PAGE_SHIFT);
    756             /* Check the range is within the block. */
    757             bool fOk = iPage + cPages <= RTMEMPAGE_BLOCK_PAGE_COUNT;
    758             /* Check that it's the start of an allocation. */
    759             fOk = fOk && ASMBitTest(&pBlock->bmFirst[0], iPage);
    760             /* Check that the range ends at an allocation boundrary. */
    761             fOk = fOk && (   iPage + cPages == RTMEMPAGE_BLOCK_PAGE_COUNT
    762                           || ASMBitTest(&pBlock->bmFirst[0], iPage + cPages)
    763                           || !ASMBitTest(&pBlock->bmAlloc[0], iPage + cPages));
    764             /* Check the other pages. */
    765             uint32_t const iLastPage = iPage + cPages - 1;
    766             for (uint32_t i = iPage + 1; i < iLastPage && fOk; i++)
    767                 fOk = ASMBitTest(&pBlock->bmAlloc[0], i)
    768                    && !ASMBitTest(&pBlock->bmFirst[0], i);
    769             if (fOk)
    770             {
    771                 /*
    772                  * Free the memory.
    773                  */
    774                 uint32_t fRevert = (ASMBitTest(&pBlock->bmLockedAdviced[0], iPage) ? RTMEMPAGEALLOC_F_ADVISE_LOCKED  : 0)
    775                                  | (ASMBitTest(&pBlock->bmNoDumpAdviced[0], iPage) ? RTMEMPAGEALLOC_F_ADVISE_NO_DUMP : 0);
    776                 if (fRevert)
    777                 {
    778                     rtMemPageNativeRevertFlags(pv, cPages << PAGE_SHIFT, fRevert);
    779                     ASMBitClearRange(&pBlock->bmLockedAdviced[0], iPage, iPage + cPages);
    780                     ASMBitClearRange(&pBlock->bmNoDumpAdviced[0], iPage, iPage + cPages);
    781                 }
    782                 ASMBitClearRange(&pBlock->bmAlloc[0], iPage, iPage + cPages);
    783                 ASMBitClear(&pBlock->bmFirst[0], iPage);
    784                 pBlock->cFreePages += cPages;
    785                 pHeap->cFreePages  += cPages;
    786                 pHeap->cFreeCalls++;
    787                 if (!pHeap->pHint1 || pHeap->pHint1->cFreePages < pBlock->cFreePages)
    788                     pHeap->pHint1 = pBlock;
    789 
    790                 /** @todo Add bitmaps for tracking madvice and mlock so we can undo those. */
    791 
    792                 /*
    793                  * Shrink the heap. Not very efficient because of the AVL tree.
    794                  */
    795                 if (   pHeap->cFreePages >= RTMEMPAGE_BLOCK_PAGE_COUNT * 3
    796                     && pHeap->cFreePages >= pHeap->cHeapPages / 2 /* 50% free */
    797                     && pHeap->cFreeCalls - pHeap->uLastMinimizeCall > RTMEMPAGE_BLOCK_PAGE_COUNT
    798                    )
    799                 {
    800                     uint32_t cFreePageTarget = pHeap->cHeapPages / 4; /* 25% free */
    801                     while (pHeap->cFreePages > cFreePageTarget)
    802                     {
    803                         pHeap->uLastMinimizeCall = pHeap->cFreeCalls;
    804 
    805                         pBlock = NULL;
    806                         RTAvlrPVDoWithAll(&pHeap->BlockTree, false /*fFromLeft*/,
    807                                           rtHeapPageFindUnusedBlockCallback, &pBlock);
    808                         if (!pBlock)
    809                             break;
    810 
    811                         void *pv2 = RTAvlrPVRemove(&pHeap->BlockTree, pBlock->Core.Key); Assert(pv2); NOREF(pv2);
    812                         pHeap->cHeapPages -= RTMEMPAGE_BLOCK_PAGE_COUNT;
    813                         pHeap->cFreePages -= RTMEMPAGE_BLOCK_PAGE_COUNT;
    814                         pHeap->pHint1      = NULL;
    815                         pHeap->pHint2      = NULL;
    816                         RTCritSectLeave(&pHeap->CritSect);
    817 
    818                         rtMemPageNativeFree(pBlock->Core.Key, RTMEMPAGE_BLOCK_SIZE);
    819                         pBlock->Core.Key = pBlock->Core.KeyLast = NULL;
    820                         pBlock->cFreePages = 0;
    821                         rtHeapPageIntBlockAllocatorFree(pHeap, pBlock);
    822 
    823                         RTCritSectEnter(&pHeap->CritSect);
    824                     }
    825                 }
    826             }
    827             else
    828                 rc = VERR_INVALID_POINTER;
    829         }
    830         else
    831             rc = VERR_NOT_FOUND; /* Distinct return code for this so RTMemPageFree and others can try alternative heaps. */
    832 
    833         RTCritSectLeave(&pHeap->CritSect);
    834     }
    835 
    836     return rc;
    837 }
    838 
    839 
    840 /**
    841  * Initializes the heap.
    842  *
    843  * @returns IPRT status code
    844  * @param   pvUser              Unused.
    845  */
    846 static DECLCALLBACK(int) rtMemPageInitOnce(void *pvUser)
    847 {
    848     NOREF(pvUser);
    849     int rc = RTHeapPageInit(&g_MemPageHeap, false /*fExec*/);
    850     if (RT_SUCCESS(rc))
    851     {
    852         rc = RTHeapPageInit(&g_MemExecHeap, true /*fExec*/);
    853         if (RT_SUCCESS(rc))
    854             return rc;
    855         RTHeapPageDelete(&g_MemPageHeap);
    856     }
    857     return rc;
    858 }
    859 
    860 
    861 /**
    862  * Allocates memory from the specified heap.
    863  *
    864  * @returns Address of the allocated memory.
    865  * @param   cb                  The number of bytes to allocate.
    866  * @param   pszTag              The tag.
    867  * @param   fFlags              RTMEMPAGEALLOC_F_XXX.
    868  * @param   pHeap               The heap to use.
    869  */
    870 static void *rtMemPageAllocInner(size_t cb, const char *pszTag, uint32_t fFlags, PRTHEAPPAGE pHeap)
    871 {
    872     /*
    873      * Validate & adjust the input.
    874      */
    875     Assert(cb > 0);
    876     NOREF(pszTag);
    877     cb = RT_ALIGN_Z(cb, PAGE_SIZE);
    878 
    879     /*
    880      * If the allocation is relatively large, we use mmap/VirtualAlloc/DosAllocMem directly.
    881      */
    882     void *pv = NULL; /* shut up gcc */
    883     if (cb >= RTMEMPAGE_NATIVE_THRESHOLD)
    884     {
    885         int rc = rtMemPageNativeAlloc(cb, fFlags, &pv);
    886         if (RT_SUCCESS(rc))
    887         {
    888             AssertPtr(pv);
    889 
    890             if (fFlags)
    891                 rtMemPageApplyFlags(pv, cb, fFlags);
    892         }
    893         else
    894             pv = NULL;
    895     }
    896     else
    897     {
    898         int rc = RTOnce(&g_MemPageHeapInitOnce, rtMemPageInitOnce, NULL);
    899         if (RT_SUCCESS(rc))
    900             rc = RTHeapPageAlloc(pHeap, cb >> PAGE_SHIFT, pszTag, fFlags, &pv);
    901         if (RT_FAILURE(rc))
    902             pv = NULL;
    903     }
    904 
    905     return pv;
    906 }
    907 
    908 
    909 RTDECL(void *) RTMemPageAllocTag(size_t cb, const char *pszTag) RT_NO_THROW_DEF
    910 {
    911     return rtMemPageAllocInner(cb, pszTag, 0, &g_MemPageHeap);
    912 }
    913 
    914 
    915 RTDECL(void *) RTMemPageAllocZTag(size_t cb, const char *pszTag) RT_NO_THROW_DEF
    916 {
    917     return rtMemPageAllocInner(cb, pszTag, RTMEMPAGEALLOC_F_ZERO, &g_MemPageHeap);
    918 }
    919 
    920 
    921 RTDECL(void *) RTMemPageAllocExTag(size_t cb, uint32_t fFlags, const char *pszTag) RT_NO_THROW_DEF
    922 {
    923     AssertReturn(!(fFlags & ~RTMEMPAGEALLOC_F_VALID_MASK), NULL);
    924     return rtMemPageAllocInner(cb, pszTag, fFlags,
    925                                !(fFlags & RTMEMPAGEALLOC_F_EXECUTABLE) ? &g_MemPageHeap : &g_MemExecHeap);
    926 }
    927 
    928 
    929 RTDECL(void) RTMemPageFree(void *pv, size_t cb) RT_NO_THROW_DEF
    930 {
    931     /*
    932      * Validate & adjust the input.
    933      */
    934     if (!pv)
    935         return;
    936     AssertPtr(pv);
    937     Assert(cb > 0);
    938     Assert(!((uintptr_t)pv & PAGE_OFFSET_MASK));
    939     cb = RT_ALIGN_Z(cb, PAGE_SIZE);
    940 
    941     /*
    942      * If the allocation is relatively large, we used mmap/VirtualAlloc/DosAllocMem directly.
    943      */
    944     if (cb >= RTMEMPAGE_NATIVE_THRESHOLD)
    945         rtMemPageNativeFree(pv, cb);
    946     else
    947     {
    948         int rc = RTHeapPageFree(&g_MemPageHeap, pv, cb >> PAGE_SHIFT);
    949         if (rc == VERR_NOT_FOUND)
    950             rc = RTHeapPageFree(&g_MemExecHeap, pv, cb >> PAGE_SHIFT);
    951         AssertRC(rc);
    952     }
    953 }
    954 
  • trunk/src/VBox/Runtime/r3/win/RTMemProtect-win.cpp

    r101161 r101162  
    11/* $Id$ */
    22/** @file
    3  * IPRT - Memory Allocation, Windows.
     3 * IPRT - RTMemProtect, Windows.
    44 */
    55
     
    3939*   Header Files                                                                                                                 *
    4040*********************************************************************************************************************************/
    41 #ifdef IPRT_NO_CRT
    42 # define USE_VIRTUAL_ALLOC
    43 #endif
    4441#define LOG_GROUP RTLOGGROUP_MEM
    4542#include <iprt/win/windows.h>
    4643
    47 #include <iprt/alloc.h>
     44#include <iprt/mem.h>
    4845#include <iprt/assert.h>
    4946#include <iprt/param.h>
    50 #include <iprt/string.h>
    5147#include <iprt/errcore.h>
    5248
    53 #ifndef USE_VIRTUAL_ALLOC
    54 # include <malloc.h>
    55 #endif
    56 
    57 
    58 /** @todo merge the page alloc code with the heap-based mmap stuff as
    59  *        _aligned_malloc just isn't well suited for exec. */
    60 
    61 
    62 RTDECL(void *) RTMemPageAllocTag(size_t cb, const char *pszTag) RT_NO_THROW_DEF
    63 {
    64     RT_NOREF_PV(pszTag);
    65 
    66 #ifdef USE_VIRTUAL_ALLOC
    67     void *pv = VirtualAlloc(NULL, RT_ALIGN_Z(cb, PAGE_SIZE), MEM_COMMIT, PAGE_READWRITE);
    68 #else
    69     void *pv = _aligned_malloc(RT_ALIGN_Z(cb, PAGE_SIZE), PAGE_SIZE);
    70 #endif
    71     AssertMsg(pv, ("cb=%d lasterr=%d\n", cb, GetLastError()));
    72     return pv;
    73 }
    74 
    75 
    76 RTDECL(void *) RTMemPageAllocExTag(size_t cb, uint32_t fFlags, const char *pszTag) RT_NO_THROW_DEF
    77 {
    78     size_t const cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
    79     RT_NOREF_PV(pszTag);
    80     AssertReturn(!(fFlags & ~RTMEMPAGEALLOC_F_VALID_MASK), NULL);
    81 
    82 #ifdef USE_VIRTUAL_ALLOC
    83     void *pv = VirtualAlloc(NULL, cbAligned, MEM_COMMIT,
    84                             !(fFlags & RTMEMPAGEALLOC_F_EXECUTABLE) ? PAGE_READWRITE : PAGE_EXECUTE_READWRITE);
    85 #else
    86     void *pv = _aligned_malloc(cbAligned, PAGE_SIZE);
    87 #endif
    88     AssertMsgReturn(pv, ("cb=%d lasterr=%d\n", cb, GetLastError()), NULL);
    89 
    90 #ifndef USE_VIRTUAL_ALLOC
    91     if (fFlags & RTMEMPAGEALLOC_F_EXECUTABLE)
    92     {
    93         DWORD      fIgn = 0;
    94         BOOL const fRc  = VirtualProtect(pv, cbAligned, PAGE_EXECUTE_READWRITE, &fIgn);
    95         AssertMsgReturnStmt(fRc, ("%p LB %#zx: %#u\n", pv, cbAligned, GetLastError()), _aligned_free(pv), NULL);
    96     }
    97 #endif
    98 
    99     if (fFlags & RTMEMPAGEALLOC_F_ADVISE_LOCKED)
    100     {
    101         /** @todo check why we get ERROR_WORKING_SET_QUOTA here. */
    102         BOOL const fOkay = VirtualLock(pv, cbAligned);
    103         AssertMsg(fOkay || GetLastError() == ERROR_WORKING_SET_QUOTA, ("pv=%p cb=%d lasterr=%d\n", pv, cb, GetLastError()));
    104         NOREF(fOkay);
    105     }
    106 
    107     if (fFlags & RTMEMPAGEALLOC_F_ZERO)
    108         RT_BZERO(pv, cbAligned);
    109 
    110     return pv;
    111 }
    112 
    113 
    114 RTDECL(void *) RTMemPageAllocZTag(size_t cb, const char *pszTag) RT_NO_THROW_DEF
    115 {
    116     RT_NOREF_PV(pszTag);
    117 
    118 #ifdef USE_VIRTUAL_ALLOC
    119     void *pv = VirtualAlloc(NULL, RT_ALIGN_Z(cb, PAGE_SIZE), MEM_COMMIT, PAGE_READWRITE);
    120 #else
    121     void *pv = _aligned_malloc(RT_ALIGN_Z(cb, PAGE_SIZE), PAGE_SIZE);
    122 #endif
    123     if (pv)
    124     {
    125         memset(pv, 0, RT_ALIGN_Z(cb, PAGE_SIZE));
    126         return pv;
    127     }
    128     AssertMsgFailed(("cb=%d lasterr=%d\n", cb, GetLastError()));
    129     return NULL;
    130 }
    131 
    132 
    133 RTDECL(void) RTMemPageFree(void *pv, size_t cb) RT_NO_THROW_DEF
    134 {
    135     RT_NOREF_PV(cb);
    136 
    137     if (pv)
    138     {
    139 #ifdef USE_VIRTUAL_ALLOC
    140         if (!VirtualFree(pv, 0, MEM_RELEASE))
    141             AssertMsgFailed(("pv=%p lasterr=%d\n", pv, GetLastError()));
    142 #else
    143         /** @todo The exec version of this doesn't really work well... */
    144         MEMORY_BASIC_INFORMATION MemInfo = { NULL };
    145         SIZE_T cbRet = VirtualQuery(pv, &MemInfo, sizeof(MemInfo));
    146         Assert(cbRet > 0);
    147         if (cbRet > 0 && MemInfo.Protect == PAGE_EXECUTE_READWRITE)
    148         {
    149             DWORD      fIgn = 0;
    150             BOOL const fRc  = VirtualProtect(pv, cb, PAGE_READWRITE, &fIgn);
    151             Assert(fRc); RT_NOREF(fRc);
    152         }
    153         _aligned_free(pv);
    154 #endif
    155     }
    156 }
    15749
    15850
  • trunk/src/VBox/Runtime/r3/win/mempage-native-win.cpp

    • Property svn:mergeinfo set to (toggle deleted branches)
      /branches/VBox-3.0/src/VBox/Runtime/r3/win/alloc-win.cpp58652,​70973
      /branches/VBox-3.2/src/VBox/Runtime/r3/win/alloc-win.cpp66309,​66318
      /branches/VBox-4.0/src/VBox/Runtime/r3/win/alloc-win.cpp70873
      /branches/VBox-4.1/src/VBox/Runtime/r3/win/alloc-win.cpp74233,​78414,​78691,​81841,​82127,​85941,​85944-85947,​85949-85950,​85953,​86701,​86728,​87009
      /branches/VBox-4.2/src/VBox/Runtime/r3/win/alloc-win.cpp86229-86230,​86234,​86529,​91503-91504,​91506-91508,​91510,​91514-91515,​91521,​108112,​108114,​108127
      /branches/VBox-4.3/src/VBox/Runtime/r3/win/alloc-win.cpp89714,​91223,​93628-93629,​94066,​94839,​94897,​95154,​95164,​95167,​95295,​95338,​95353-95354,​95356,​95367,​95451,​95475,​95477,​95480,​95507,​95640,​95659,​95661,​95663,​98913-98914
      /branches/VBox-4.3/trunk/src/VBox/Runtime/r3/win/alloc-win.cpp91223
      /branches/VBox-5.0/src/VBox/Runtime/r3/win/alloc-win.cpp104938,​104943,​104950,​104987-104988,​104990,​106453
      /branches/VBox-5.1/src/VBox/Runtime/r3/win/alloc-win.cpp112367,​116543,​116550,​116568,​116573
      /branches/VBox-5.2/src/VBox/Runtime/r3/win/alloc-win.cpp119536,​120083,​120099,​120213,​120221,​120239,​123597-123598,​123600-123601,​123755,​124263,​124273,​124277-124279,​124284-124286,​124288-124290,​125768,​125779-125780,​125812,​127158-127159,​127162-127167,​127180
      /branches/VBox-6.0/src/VBox/Runtime/r3/win/alloc-win.cpp130474-130475,​130477,​130479,​131352
      /branches/VBox-6.1/src/VBox/Runtime/r3/win/alloc-win.cpp141521,​141567-141568,​141588-141590,​141592-141595,​141652,​141920,​158257-158259
      /branches/VBox-7.0/src/VBox/Runtime/r3/win/alloc-win.cpp156229,​156768
      /branches/aeichner/vbox-chromium-cleanup/src/VBox/Runtime/r3/win/alloc-win.cpp129818-129851,​129853-129861,​129871-129872,​129876,​129880,​129882,​130013-130015,​130094-130095
      /branches/andy/draganddrop/src/VBox/Runtime/r3/win/alloc-win.cpp90781-91268
      /branches/andy/guestctrl20/src/VBox/Runtime/r3/win/alloc-win.cpp78916,​78930
      /branches/andy/pdmaudio/src/VBox/Runtime/r3/win/alloc-win.cpp94582,​94641,​94654,​94688,​94778,​94783,​94816,​95197,​95215-95216,​95250,​95279,​95505-95506,​95543,​95694,​96323,​96470-96471,​96582,​96587,​96802-96803,​96817,​96904,​96967,​96999,​97020-97021,​97025,​97050,​97099
      /branches/bird/hardenedwindows/src/VBox/Runtime/r3/win/alloc-win.cpp92692-94610
      /branches/dsen/gui/src/VBox/Runtime/r3/win/alloc-win.cpp79076-79078,​79089,​79109-79110,​79112-79113,​79127-79130,​79134,​79141,​79151,​79155,​79157-79159,​79193,​79197
      /branches/dsen/gui2/src/VBox/Runtime/r3/win/alloc-win.cpp79224,​79228,​79233,​79235,​79258,​79262-79263,​79273,​79341,​79345,​79354,​79357,​79387-79388,​79559-79569,​79572-79573,​79578,​79581-79582,​79590-79591,​79598-79599,​79602-79603,​79605-79606,​79632,​79635,​79637,​79644
      /branches/dsen/gui3/src/VBox/Runtime/r3/win/alloc-win.cpp79645-79692
      /branches/dsen/gui4/src/VBox/Runtime/r3/win/alloc-win.cpp155183-155185,​155187,​155198,​155200-155201,​155205,​155228,​155235,​155243,​155248,​155282,​155285,​155287-155288,​155311,​155316,​155336,​155342,​155344,​155437-155438,​155441,​155443,​155488,​155509-155513,​155526-155527,​155559,​155572,​155576-155577,​155592-155593
    r101154 r101162  
    11/* $Id$ */
    22/** @file
    3  * IPRT - Memory Allocation, Windows.
     3 * IPRT - rtMemPageNative*, Windows implementation.
    44 */
    55
    66/*
    7  * Copyright (C) 2006-2023 Oracle and/or its affiliates.
     7 * Copyright (C) 2023 Oracle and/or its affiliates.
    88 *
    99 * This file is part of VirtualBox base platform packages, as
     
    3939*   Header Files                                                                                                                 *
    4040*********************************************************************************************************************************/
    41 #ifdef IPRT_NO_CRT
    42 # define USE_VIRTUAL_ALLOC
    43 #endif
    44 #define LOG_GROUP RTLOGGROUP_MEM
    4541#include <iprt/win/windows.h>
    4642
    47 #include <iprt/alloc.h>
     43#include "internal/iprt.h"
     44#include <iprt/mem.h>
     45
    4846#include <iprt/assert.h>
     47#include <iprt/errcore.h>
    4948#include <iprt/param.h>
    5049#include <iprt/string.h>
    51 #include <iprt/errcore.h>
    52 
    53 #ifndef USE_VIRTUAL_ALLOC
    54 # include <malloc.h>
    55 #endif
     50#include "internal/mem.h"
    5651
    5752
    58 /** @todo merge the page alloc code with the heap-based mmap stuff as
    59  *        _aligned_malloc just isn't well suited for exec. */
    6053
    61 
    62 RTDECL(void *) RTMemPageAllocTag(size_t cb, const char *pszTag) RT_NO_THROW_DEF
     54DECLHIDDEN(int) rtMemPageNativeAlloc(size_t cb, uint32_t fFlags, void **ppvRet)
    6355{
    64     RT_NOREF_PV(pszTag);
    65 
    66 #ifdef USE_VIRTUAL_ALLOC
    67     void *pv = VirtualAlloc(NULL, RT_ALIGN_Z(cb, PAGE_SIZE), MEM_COMMIT, PAGE_READWRITE);
    68 #else
    69     void *pv = _aligned_malloc(RT_ALIGN_Z(cb, PAGE_SIZE), PAGE_SIZE);
    70 #endif
    71     AssertMsg(pv, ("cb=%d lasterr=%d\n", cb, GetLastError()));
    72     return pv;
     56    void *pv = VirtualAlloc(NULL, cb, MEM_COMMIT,
     57                            !(fFlags & RTMEMPAGEALLOC_F_EXECUTABLE) ? PAGE_READWRITE : PAGE_EXECUTE_READWRITE);
     58    *ppvRet = pv;
     59    if (RT_LIKELY(pv != NULL))
     60        return VINF_SUCCESS;
     61    return RTErrConvertFromWin32(GetLastError());
    7362}
    7463
    7564
    76 RTDECL(void *) RTMemPageAllocExTag(size_t cb, uint32_t fFlags, const char *pszTag) RT_NO_THROW_DEF
     65DECLHIDDEN(int) rtMemPageNativeFree(void *pv, size_t cb)
    7766{
    78     size_t const cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
    79     RT_NOREF_PV(pszTag);
    80     AssertReturn(!(fFlags & ~RTMEMPAGEALLOC_F_VALID_MASK), NULL);
     67    if (RT_LIKELY(VirtualFree(pv, 0, MEM_RELEASE)))
     68        return VINF_SUCCESS;
     69    int rc = RTErrConvertFromWin32(GetLastError());
     70    AssertMsgFailed(("rc=%d pv=%p cb=%#zx lasterr=%u\n", rc, pv, cb, GetLastError()));
     71    RT_NOREF(cb);
     72    return rc;
     73}
    8174
    82 #ifdef USE_VIRTUAL_ALLOC
    83     void *pv = VirtualAlloc(NULL, cbAligned, MEM_COMMIT,
    84                             !(fFlags & RTMEMPAGEALLOC_F_EXECUTABLE) ? PAGE_READWRITE : PAGE_EXECUTE_READWRITE);
    85 #else
    86     void *pv = _aligned_malloc(cbAligned, PAGE_SIZE);
    87 #endif
    88     AssertMsgReturn(pv, ("cb=%d lasterr=%d\n", cb, GetLastError()), NULL);
    8975
    90 #ifndef USE_VIRTUAL_ALLOC
    91     if (fFlags & RTMEMPAGEALLOC_F_EXECUTABLE)
    92     {
    93         DWORD      fIgn = 0;
    94         BOOL const fRc  = VirtualProtect(pv, cbAligned, PAGE_EXECUTE_READWRITE, &fIgn);
    95         AssertMsgReturnStmt(fRc, ("%p LB %#zx: %#u\n", pv, cbAligned, GetLastError()), _aligned_free(pv), NULL);
    96     }
    97 #endif
     76DECLHIDDEN(uint32_t) rtMemPageNativeApplyFlags(void *pv, size_t cb, uint32_t fFlags)
     77{
     78    uint32_t fRet = 0;
    9879
    9980    if (fFlags & RTMEMPAGEALLOC_F_ADVISE_LOCKED)
    10081    {
    10182        /** @todo check why we get ERROR_WORKING_SET_QUOTA here. */
    102         BOOL const fOkay = VirtualLock(pv, cbAligned);
     83        BOOL const fOkay = VirtualLock(pv, cb);
    10384        AssertMsg(fOkay || GetLastError() == ERROR_WORKING_SET_QUOTA, ("pv=%p cb=%d lasterr=%d\n", pv, cb, GetLastError()));
    104         NOREF(fOkay);
     85        if (fOkay)
     86            fRet |= RTMEMPAGEALLOC_F_ADVISE_LOCKED;
    10587    }
    10688
    107     if (fFlags & RTMEMPAGEALLOC_F_ZERO)
    108         RT_BZERO(pv, cbAligned);
     89    /** @todo Any way to apply RTMEMPAGEALLOC_F_ADVISE_NO_DUMP on windows? */
    10990
    110     return pv;
     91    return fRet;
    11192}
    11293
    11394
    114 RTDECL(void *) RTMemPageAllocZTag(size_t cb, const char *pszTag) RT_NO_THROW_DEF
     95DECLHIDDEN(void) rtMemPageNativeRevertFlags(void *pv, size_t cb, uint32_t fFlags)
    11596{
    116     RT_NOREF_PV(pszTag);
    117 
    118 #ifdef USE_VIRTUAL_ALLOC
    119     void *pv = VirtualAlloc(NULL, RT_ALIGN_Z(cb, PAGE_SIZE), MEM_COMMIT, PAGE_READWRITE);
    120 #else
    121     void *pv = _aligned_malloc(RT_ALIGN_Z(cb, PAGE_SIZE), PAGE_SIZE);
    122 #endif
    123     if (pv)
     97    if (fFlags & RTMEMPAGEALLOC_F_ADVISE_LOCKED)
    12498    {
    125         memset(pv, 0, RT_ALIGN_Z(cb, PAGE_SIZE));
    126         return pv;
    127     }
    128     AssertMsgFailed(("cb=%d lasterr=%d\n", cb, GetLastError()));
    129     return NULL;
    130 }
    131 
    132 
    133 RTDECL(void) RTMemPageFree(void *pv, size_t cb) RT_NO_THROW_DEF
    134 {
    135     RT_NOREF_PV(cb);
    136 
    137     if (pv)
    138     {
    139 #ifdef USE_VIRTUAL_ALLOC
    140         if (!VirtualFree(pv, 0, MEM_RELEASE))
    141             AssertMsgFailed(("pv=%p lasterr=%d\n", pv, GetLastError()));
    142 #else
    143         /** @todo The exec version of this doesn't really work well... */
    144         MEMORY_BASIC_INFORMATION MemInfo = { NULL };
    145         SIZE_T cbRet = VirtualQuery(pv, &MemInfo, sizeof(MemInfo));
    146         Assert(cbRet > 0);
    147         if (cbRet > 0 && MemInfo.Protect == PAGE_EXECUTE_READWRITE)
    148         {
    149             DWORD      fIgn = 0;
    150             BOOL const fRc  = VirtualProtect(pv, cb, PAGE_READWRITE, &fIgn);
    151             Assert(fRc); RT_NOREF(fRc);
    152         }
    153         _aligned_free(pv);
    154 #endif
     99        /** @todo check why we get ERROR_NOT_LOCKED here... */
     100        BOOL const fOkay = VirtualUnlock(pv, cb);
     101        AssertMsg(fOkay || GetLastError() == ERROR_NOT_LOCKED, ("pv=%p cb=%d lasterr=%d\n", pv, cb, GetLastError()));
     102        RT_NOREF(fOkay);
    155103    }
    156104}
    157105
    158 
    159 RTDECL(int) RTMemProtect(void *pv, size_t cb, unsigned fProtect) RT_NO_THROW_DEF
    160 {
    161     /*
    162      * Validate input.
    163      */
    164     if (cb == 0)
    165     {
    166         AssertMsgFailed(("!cb\n"));
    167         return VERR_INVALID_PARAMETER;
    168     }
    169     if (fProtect & ~(RTMEM_PROT_NONE | RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC))
    170     {
    171         AssertMsgFailed(("fProtect=%#x\n", fProtect));
    172         return VERR_INVALID_PARAMETER;
    173     }
    174 
    175     /*
    176      * Convert the flags.
    177      */
    178     int fProt;
    179     Assert(!RTMEM_PROT_NONE);
    180     switch (fProtect & (RTMEM_PROT_NONE | RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC))
    181     {
    182         case RTMEM_PROT_NONE:
    183             fProt = PAGE_NOACCESS;
    184             break;
    185 
    186         case RTMEM_PROT_READ:
    187             fProt = PAGE_READONLY;
    188             break;
    189 
    190         case RTMEM_PROT_READ | RTMEM_PROT_WRITE:
    191             fProt = PAGE_READWRITE;
    192             break;
    193 
    194         case RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC:
    195             fProt = PAGE_EXECUTE_READWRITE;
    196             break;
    197 
    198         case RTMEM_PROT_READ | RTMEM_PROT_EXEC:
    199             fProt = PAGE_EXECUTE_READWRITE;
    200             break;
    201 
    202         case RTMEM_PROT_WRITE:
    203             fProt = PAGE_READWRITE;
    204             break;
    205 
    206         case RTMEM_PROT_WRITE | RTMEM_PROT_EXEC:
    207             fProt = PAGE_EXECUTE_READWRITE;
    208             break;
    209 
    210         case RTMEM_PROT_EXEC:
    211             fProt = PAGE_EXECUTE_READWRITE;
    212             break;
    213 
    214         /* If the compiler had any brains it would warn about this case. */
    215         default:
    216             AssertMsgFailed(("fProtect=%#x\n", fProtect));
    217             return VERR_INTERNAL_ERROR;
    218     }
    219 
    220     /*
    221      * Align the request.
    222      */
    223     cb += (uintptr_t)pv & PAGE_OFFSET_MASK;
    224     pv = (void *)((uintptr_t)pv & ~(uintptr_t)PAGE_OFFSET_MASK);
    225 
    226     /*
    227      * Change the page attributes.
    228      */
    229     DWORD fFlags = 0;
    230     if (VirtualProtect(pv, cb, fProt, &fFlags))
    231         return VINF_SUCCESS;
    232     return RTErrConvertFromWin32(GetLastError());
    233 }
    234 
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette