VirtualBox

source: vbox/trunk/src/VBox/Runtime/r3/posix/rtmempage-exec-mmap-heap-posix.cpp@ 78348

Last change on this file since 78348 was 78348, checked in by vboxsync, 6 years ago

IPRT/mem: Added RTMemPageAllocEx so we can try lock memory and try prevent it from being part of dumps/cores. [copy & past fix]

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 25.1 KB
Line 
1/* $Id: rtmempage-exec-mmap-heap-posix.cpp 78348 2019-04-29 13:12:56Z vboxsync $ */
2/** @file
3 * IPRT - RTMemPage*, POSIX with heap.
4 */
5
6/*
7 * Copyright (C) 2006-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*********************************************************************************************************************************
29* Header Files *
30*********************************************************************************************************************************/
31#include "internal/iprt.h"
32#include <iprt/mem.h>
33
34#include <iprt/asm.h>
35#include <iprt/assert.h>
36#include <iprt/avl.h>
37#include <iprt/critsect.h>
38#include <iprt/errcore.h>
39#include <iprt/once.h>
40#include <iprt/param.h>
41#include <iprt/string.h>
42#include "internal/mem.h"
43#include "../alloc-ef.h"
44
45#include <stdlib.h>
46#include <errno.h>
47#include <sys/mman.h>
48#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
49# define MAP_ANONYMOUS MAP_ANON
50#endif
51
52
53/*********************************************************************************************************************************
54* Defined Constants And Macros *
55*********************************************************************************************************************************/
56/** Threshold at which to we switch to simply calling mmap. */
57#define RTMEMPAGEPOSIX_MMAP_THRESHOLD _128K
58/** The size of a heap block (power of two) - in bytes. */
59#define RTMEMPAGEPOSIX_BLOCK_SIZE _2M
60AssertCompile(RTMEMPAGEPOSIX_BLOCK_SIZE == (RTMEMPAGEPOSIX_BLOCK_SIZE / PAGE_SIZE) * PAGE_SIZE);
61/** The number of pages per heap block. */
62#define RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT (RTMEMPAGEPOSIX_BLOCK_SIZE / PAGE_SIZE)
63
64
65/*********************************************************************************************************************************
66* Structures and Typedefs *
67*********************************************************************************************************************************/
68/** Pointer to a page heap block. */
69typedef struct RTHEAPPAGEBLOCK *PRTHEAPPAGEBLOCK;
70
71/**
72 * A simple page heap.
73 */
74typedef struct RTHEAPPAGE
75{
76 /** Magic number (RTHEAPPAGE_MAGIC). */
77 uint32_t u32Magic;
78 /** The number of pages in the heap (in BlockTree). */
79 uint32_t cHeapPages;
80 /** The number of currently free pages. */
81 uint32_t cFreePages;
82 /** Number of successful calls. */
83 uint32_t cAllocCalls;
84 /** Number of successful free calls. */
85 uint32_t cFreeCalls;
86 /** The free call number at which we last tried to minimize the heap. */
87 uint32_t uLastMinimizeCall;
88 /** Tree of heap blocks. */
89 AVLRPVTREE BlockTree;
90 /** Allocation hint no 1 (last freed). */
91 PRTHEAPPAGEBLOCK pHint1;
92 /** Allocation hint no 2 (last alloc). */
93 PRTHEAPPAGEBLOCK pHint2;
94 /** Critical section protecting the heap. */
95 RTCRITSECT CritSect;
96 /** Set if the memory must allocated with execute access. */
97 bool fExec;
98} RTHEAPPAGE;
99#define RTHEAPPAGE_MAGIC UINT32_C(0xfeedface)
100/** Pointer to a page heap. */
101typedef RTHEAPPAGE *PRTHEAPPAGE;
102
103
104/**
105 * Describes a page heap block.
106 */
107typedef struct RTHEAPPAGEBLOCK
108{
109 /** The AVL tree node core (void pointer range). */
110 AVLRPVNODECORE Core;
111 /** Allocation bitmap. Set bits marks allocated pages. */
112 uint32_t bmAlloc[RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT / 32];
113 /** Allocation boundrary bitmap. Set bits marks the start of
114 * allocations. */
115 uint32_t bmFirst[RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT / 32];
116 /** The number of free pages. */
117 uint32_t cFreePages;
118 /** Pointer back to the heap. */
119 PRTHEAPPAGE pHeap;
120} RTHEAPPAGEBLOCK;
121
122
123/**
124 * Argument package for rtHeapPageAllocCallback.
125 */
126typedef struct RTHEAPPAGEALLOCARGS
127{
128 /** The number of pages to allocate. */
129 size_t cPages;
130 /** Non-null on success. */
131 void *pvAlloc;
132 /** RTMEMPAGEALLOC_F_XXX. */
133 uint32_t fFlags;
134} RTHEAPPAGEALLOCARGS;
135
136
137/*********************************************************************************************************************************
138* Global Variables *
139*********************************************************************************************************************************/
140/** Initialize once structure. */
141static RTONCE g_MemPagePosixInitOnce = RTONCE_INITIALIZER;
142/** The page heap. */
143static RTHEAPPAGE g_MemPagePosixHeap;
144/** The exec page heap. */
145static RTHEAPPAGE g_MemExecPosixHeap;
146
147
148#ifdef RT_OS_OS2
149/*
150 * A quick mmap/munmap mockup for avoid duplicating lots of good code.
151 */
152# define INCL_BASE
153# include <os2.h>
154# undef MAP_PRIVATE
155# define MAP_PRIVATE 0
156# undef MAP_ANONYMOUS
157# define MAP_ANONYMOUS 0
158# undef MAP_FAILED
159# define MAP_FAILED (void *)-1
160# undef mmap
161# define mmap iprt_mmap
162# undef munmap
163# define munmap iprt_munmap
164
165static void *mmap(void *pvWhere, size_t cb, int fProt, int fFlags, int fd, off_t off)
166{
167 NOREF(pvWhere); NOREF(fd); NOREF(off);
168 void *pv = NULL;
169 ULONG fAlloc = OBJ_ANY | PAG_COMMIT;
170 if (fProt & PROT_EXEC)
171 fAlloc |= PAG_EXECUTE;
172 if (fProt & PROT_READ)
173 fAlloc |= PAG_READ;
174 if (fProt & PROT_WRITE)
175 fAlloc |= PAG_WRITE;
176 APIRET rc = DosAllocMem(&pv, cb, fAlloc);
177 if (rc == NO_ERROR)
178 return pv;
179 errno = ENOMEM;
180 return MAP_FAILED;
181}
182
183static int munmap(void *pv, size_t cb)
184{
185 APIRET rc = DosFreeMem(pv);
186 if (rc == NO_ERROR)
187 return 0;
188 errno = EINVAL;
189 return -1;
190}
191
192#endif
193
194/**
195 * Initializes the heap.
196 *
197 * @returns IPRT status code.
198 * @param pHeap The page heap to initialize.
199 * @param fExec Whether the heap memory should be marked as
200 * executable or not.
201 */
202int RTHeapPageInit(PRTHEAPPAGE pHeap, bool fExec)
203{
204 int rc = RTCritSectInitEx(&pHeap->CritSect,
205 RTCRITSECT_FLAGS_NO_LOCK_VAL | RTCRITSECT_FLAGS_NO_NESTING | RTCRITSECT_FLAGS_BOOTSTRAP_HACK,
206 NIL_RTLOCKVALCLASS, RTLOCKVAL_SUB_CLASS_NONE, NULL);
207 if (RT_SUCCESS(rc))
208 {
209 pHeap->cHeapPages = 0;
210 pHeap->cFreePages = 0;
211 pHeap->cAllocCalls = 0;
212 pHeap->cFreeCalls = 0;
213 pHeap->uLastMinimizeCall = 0;
214 pHeap->BlockTree = NULL;
215 pHeap->fExec = fExec;
216 pHeap->u32Magic = RTHEAPPAGE_MAGIC;
217 }
218 return rc;
219}
220
221
222/**
223 * Deletes the heap and all the memory it tracks.
224 *
225 * @returns IPRT status code.
226 * @param pHeap The page heap to delete.
227 */
228int RTHeapPageDelete(PRTHEAPPAGE pHeap)
229{
230 NOREF(pHeap);
231 return VERR_NOT_IMPLEMENTED;
232}
233
234
235/**
236 * Applies flags to an allocation.
237 *
238 * @param pv The allocation.
239 * @param cb The size of the allocation (page aligned).
240 * @param fFlags RTMEMPAGEALLOC_F_XXX.
241 */
242DECLINLINE(void) rtMemPagePosixApplyFlags(void *pv, size_t cb, uint32_t fFlags)
243{
244#ifndef RT_OS_OS2
245 if (fFlags & RTMEMPAGEALLOC_F_ADVISE_LOCKED)
246 {
247 int rc = mlock(pv, cb);
248 AssertMsg(rc == 0, ("mlock %p LB %#zx -> %d errno=%d\n", pv, cb, rc, errno));
249 NOREF(rc);
250 }
251
252# ifdef MADV_DONTDUMP
253 if (fFlags & RTMEMPAGEALLOC_F_ADVISE_NO_DUMP)
254 {
255 int rc = madvise(pv, cb, MADV_DONTDUMP);
256 AssertMsg(rc == 0, ("madvice %p LB %#zx MADV_DONTDUMP -> %d errno=%d\n", pv, cb, rc, errno));
257 NOREF(rc);
258 }
259# endif
260#endif
261
262 if (fFlags & RTMEMPAGEALLOC_F_ZERO)
263 RT_BZERO(pv, cb);
264}
265
266
267/**
268 * Avoids some gotos in rtHeapPageAllocFromBlock.
269 *
270 * @returns VINF_SUCCESS.
271 * @param pBlock The block.
272 * @param iPage The page to start allocating at.
273 * @param cPages The number of pages.
274 * @param fFlags RTMEMPAGEALLOC_F_XXX.
275 * @param ppv Where to return the allocation address.
276 */
277DECLINLINE(int) rtHeapPageAllocFromBlockSuccess(PRTHEAPPAGEBLOCK pBlock, uint32_t iPage, size_t cPages, uint32_t fFlags, void **ppv)
278{
279 PRTHEAPPAGE pHeap = pBlock->pHeap;
280
281 ASMBitSet(&pBlock->bmFirst[0], iPage);
282 pBlock->cFreePages -= cPages;
283 pHeap->cFreePages -= cPages;
284 if (!pHeap->pHint2 || pHeap->pHint2->cFreePages < pBlock->cFreePages)
285 pHeap->pHint2 = pBlock;
286 pHeap->cAllocCalls++;
287
288 void *pv = (uint8_t *)pBlock->Core.Key + (iPage << PAGE_SHIFT);
289 *ppv = pv;
290
291 if (fFlags)
292 rtMemPagePosixApplyFlags(pv, cPages << PAGE_SHIFT, fFlags);
293
294 return VINF_SUCCESS;
295}
296
297
298/**
299 * Checks if a page range is free in the specified block.
300 *
301 * @returns @c true if the range is free, @c false if not.
302 * @param pBlock The block.
303 * @param iFirst The first page to check.
304 * @param cPages The number of pages to check.
305 */
306DECLINLINE(bool) rtHeapPageIsPageRangeFree(PRTHEAPPAGEBLOCK pBlock, uint32_t iFirst, uint32_t cPages)
307{
308 uint32_t i = iFirst + cPages;
309 while (i-- > iFirst)
310 {
311 if (ASMBitTest(&pBlock->bmAlloc[0], i))
312 return false;
313 Assert(!ASMBitTest(&pBlock->bmFirst[0], i));
314 }
315 return true;
316}
317
318
319/**
320 * Tries to allocate a chunk of pages from a heap block.
321 *
322 * @retval VINF_SUCCESS on success.
323 * @retval VERR_NO_MEMORY if the allocation failed.
324 * @param pBlock The block to allocate from.
325 * @param cPages The size of the allocation.
326 * @param fFlags RTMEMPAGEALLOC_F_XXX.
327 * @param ppv Where to return the allocation address on success.
328 */
329DECLINLINE(int) rtHeapPageAllocFromBlock(PRTHEAPPAGEBLOCK pBlock, size_t cPages, uint32_t fFlags, void **ppv)
330{
331 if (pBlock->cFreePages >= cPages)
332 {
333 int iPage = ASMBitFirstClear(&pBlock->bmAlloc[0], RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT);
334 Assert(iPage >= 0);
335
336 /* special case: single page. */
337 if (cPages == 1)
338 {
339 ASMBitSet(&pBlock->bmAlloc[0], iPage);
340 return rtHeapPageAllocFromBlockSuccess(pBlock, iPage, cPages, fFlags, ppv);
341 }
342
343 while ( iPage >= 0
344 && (unsigned)iPage <= RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT - cPages)
345 {
346 if (rtHeapPageIsPageRangeFree(pBlock, iPage + 1, cPages - 1))
347 {
348 ASMBitSetRange(&pBlock->bmAlloc[0], iPage, iPage + cPages);
349 return rtHeapPageAllocFromBlockSuccess(pBlock, iPage, cPages, fFlags, ppv);
350 }
351
352 /* next */
353 iPage = ASMBitNextSet(&pBlock->bmAlloc[0], RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT, iPage);
354 if (iPage < 0 || iPage >= RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT - 1)
355 break;
356 iPage = ASMBitNextClear(&pBlock->bmAlloc[0], RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT, iPage);
357 }
358 }
359
360 return VERR_NO_MEMORY;
361}
362
363
364/**
365 * RTAvlrPVDoWithAll callback.
366 *
367 * @returns 0 to continue the enum, non-zero to quit it.
368 * @param pNode The node.
369 * @param pvUser The user argument.
370 */
371static DECLCALLBACK(int) rtHeapPageAllocCallback(PAVLRPVNODECORE pNode, void *pvUser)
372{
373 PRTHEAPPAGEBLOCK pBlock = RT_FROM_MEMBER(pNode, RTHEAPPAGEBLOCK, Core);
374 RTHEAPPAGEALLOCARGS *pArgs = (RTHEAPPAGEALLOCARGS *)pvUser;
375 int rc = rtHeapPageAllocFromBlock(pBlock, pArgs->cPages, pArgs->fFlags, &pArgs->pvAlloc);
376 return RT_SUCCESS(rc) ? 1 : 0;
377}
378
379
380/**
381 * Worker for RTHeapPageAlloc.
382 *
383 * @returns IPRT status code
384 * @param pHeap The heap - locked.
385 * @param cPages The page count.
386 * @param pszTag The tag.
387 * @param fFlags RTMEMPAGEALLOC_F_XXX.
388 * @param ppv Where to return the address of the allocation
389 * on success.
390 */
391static int rtHeapPageAllocLocked(PRTHEAPPAGE pHeap, size_t cPages, const char *pszTag, uint32_t fFlags, void **ppv)
392{
393 int rc;
394 NOREF(pszTag);
395
396 /*
397 * Use the hints first.
398 */
399 if (pHeap->pHint1)
400 {
401 rc = rtHeapPageAllocFromBlock(pHeap->pHint1, cPages, fFlags, ppv);
402 if (rc != VERR_NO_MEMORY)
403 return rc;
404 }
405 if (pHeap->pHint2)
406 {
407 rc = rtHeapPageAllocFromBlock(pHeap->pHint2, cPages, fFlags, ppv);
408 if (rc != VERR_NO_MEMORY)
409 return rc;
410 }
411
412 /*
413 * Search the heap for a block with enough free space.
414 *
415 * N.B. This search algorithm is not optimal at all. What (hopefully) saves
416 * it are the two hints above.
417 */
418 if (pHeap->cFreePages >= cPages)
419 {
420 RTHEAPPAGEALLOCARGS Args;
421 Args.cPages = cPages;
422 Args.pvAlloc = NULL;
423 Args.fFlags = fFlags;
424 RTAvlrPVDoWithAll(&pHeap->BlockTree, true /*fFromLeft*/, rtHeapPageAllocCallback, &Args);
425 if (Args.pvAlloc)
426 {
427 *ppv = Args.pvAlloc;
428 return VINF_SUCCESS;
429 }
430 }
431
432 /*
433 * Didn't find anytyhing, so expand the heap with a new block.
434 */
435 RTCritSectLeave(&pHeap->CritSect);
436 void *pvPages;
437 pvPages = mmap(NULL, RTMEMPAGEPOSIX_BLOCK_SIZE,
438 PROT_READ | PROT_WRITE | (pHeap->fExec ? PROT_EXEC : 0),
439 MAP_PRIVATE | MAP_ANONYMOUS,
440 -1, 0);
441 if (pvPages == MAP_FAILED)
442 {
443 RTCritSectEnter(&pHeap->CritSect);
444 return RTErrConvertFromErrno(errno);
445
446 }
447 /** @todo Eliminate this rtMemBaseAlloc dependency! */
448 PRTHEAPPAGEBLOCK pBlock;
449#ifdef RTALLOC_REPLACE_MALLOC
450 if (g_pfnOrgMalloc)
451 pBlock = (PRTHEAPPAGEBLOCK)g_pfnOrgMalloc(sizeof(*pBlock));
452 else
453#endif
454 pBlock = (PRTHEAPPAGEBLOCK)rtMemBaseAlloc(sizeof(*pBlock));
455 if (!pBlock)
456 {
457 munmap(pvPages, RTMEMPAGEPOSIX_BLOCK_SIZE);
458 RTCritSectEnter(&pHeap->CritSect);
459 return VERR_NO_MEMORY;
460 }
461
462 RT_ZERO(*pBlock);
463 pBlock->Core.Key = pvPages;
464 pBlock->Core.KeyLast = (uint8_t *)pvPages + RTMEMPAGEPOSIX_BLOCK_SIZE - 1;
465 pBlock->cFreePages = RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT;
466 pBlock->pHeap = pHeap;
467
468 RTCritSectEnter(&pHeap->CritSect);
469
470 bool fRc = RTAvlrPVInsert(&pHeap->BlockTree, &pBlock->Core); Assert(fRc); NOREF(fRc);
471 pHeap->cFreePages += RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT;
472 pHeap->cHeapPages += RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT;
473
474 /*
475 * Grab memory from the new block (cannot fail).
476 */
477 rc = rtHeapPageAllocFromBlock(pBlock, cPages, fFlags, ppv);
478 Assert(rc == VINF_SUCCESS);
479
480 return rc;
481}
482
483
484/**
485 * Allocates one or more pages off the heap.
486 *
487 * @returns IPRT status code.
488 * @param pHeap The page heap.
489 * @param cPages The number of pages to allocate.
490 * @param pszTag The allocation tag.
491 * @param fFlags RTMEMPAGEALLOC_F_XXX.
492 * @param ppv Where to return the pointer to the pages.
493 */
494int RTHeapPageAlloc(PRTHEAPPAGE pHeap, size_t cPages, const char *pszTag, uint32_t fFlags, void **ppv)
495{
496 /*
497 * Validate input.
498 */
499 AssertPtr(ppv);
500 *ppv = NULL;
501 AssertPtrReturn(pHeap, VERR_INVALID_HANDLE);
502 AssertReturn(pHeap->u32Magic == RTHEAPPAGE_MAGIC, VERR_INVALID_HANDLE);
503 AssertMsgReturn(cPages < RTMEMPAGEPOSIX_BLOCK_SIZE, ("%#zx\n", cPages), VERR_OUT_OF_RANGE);
504
505 /*
506 * Grab the lock and call a worker with many returns.
507 */
508 int rc = RTCritSectEnter(&pHeap->CritSect);
509 if (RT_SUCCESS(rc))
510 {
511 rc = rtHeapPageAllocLocked(pHeap, cPages, pszTag, fFlags, ppv);
512 RTCritSectLeave(&pHeap->CritSect);
513 }
514
515 return rc;
516}
517
518
519/**
520 * RTAvlrPVDoWithAll callback.
521 *
522 * @returns 0 to continue the enum, non-zero to quit it.
523 * @param pNode The node.
524 * @param pvUser Pointer to a block pointer variable. For returning
525 * the address of the block to be freed.
526 */
527static DECLCALLBACK(int) rtHeapPageFindUnusedBlockCallback(PAVLRPVNODECORE pNode, void *pvUser)
528{
529 PRTHEAPPAGEBLOCK pBlock = RT_FROM_MEMBER(pNode, RTHEAPPAGEBLOCK, Core);
530 if (pBlock->cFreePages == RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT)
531 {
532 *(PRTHEAPPAGEBLOCK *)pvUser = pBlock;
533 return 1;
534 }
535 return 0;
536}
537
538
539/**
540 * Allocates one or more pages off the heap.
541 *
542 * @returns IPRT status code.
543 * @param pHeap The page heap.
544 * @param pv Pointer to what RTHeapPageAlloc returned.
545 * @param cPages The number of pages that was allocated.
546 */
547int RTHeapPageFree(PRTHEAPPAGE pHeap, void *pv, size_t cPages)
548{
549 /*
550 * Validate input.
551 */
552 if (!pv)
553 return VINF_SUCCESS;
554 AssertPtrReturn(pHeap, VERR_INVALID_HANDLE);
555 AssertReturn(pHeap->u32Magic == RTHEAPPAGE_MAGIC, VERR_INVALID_HANDLE);
556
557 /*
558 * Grab the lock and look up the page.
559 */
560 int rc = RTCritSectEnter(&pHeap->CritSect);
561 if (RT_SUCCESS(rc))
562 {
563 PRTHEAPPAGEBLOCK pBlock = (PRTHEAPPAGEBLOCK)RTAvlrPVRangeGet(&pHeap->BlockTree, pv);
564 if (pBlock)
565 {
566 /*
567 * Validate the specified address range.
568 */
569 uint32_t const iPage = (uint32_t)(((uintptr_t)pv - (uintptr_t)pBlock->Core.Key) >> PAGE_SHIFT);
570 /* Check the range is within the block. */
571 bool fOk = iPage + cPages <= RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT;
572 /* Check that it's the start of an allocation. */
573 fOk = fOk && ASMBitTest(&pBlock->bmFirst[0], iPage);
574 /* Check that the range ends at an allocation boundrary. */
575 fOk = fOk && ( iPage + cPages == RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT
576 || ASMBitTest(&pBlock->bmFirst[0], iPage + cPages)
577 || !ASMBitTest(&pBlock->bmAlloc[0], iPage + cPages));
578 /* Check the other pages. */
579 uint32_t const iLastPage = iPage + cPages - 1;
580 for (uint32_t i = iPage + 1; i < iLastPage && fOk; i++)
581 fOk = ASMBitTest(&pBlock->bmAlloc[0], i)
582 && !ASMBitTest(&pBlock->bmFirst[0], i);
583 if (fOk)
584 {
585 /*
586 * Free the memory.
587 */
588 ASMBitClearRange(&pBlock->bmAlloc[0], iPage, iPage + cPages);
589 ASMBitClear(&pBlock->bmFirst[0], iPage);
590 pBlock->cFreePages += cPages;
591 pHeap->cFreePages += cPages;
592 pHeap->cFreeCalls++;
593 if (!pHeap->pHint1 || pHeap->pHint1->cFreePages < pBlock->cFreePages)
594 pHeap->pHint1 = pBlock;
595
596 /** @todo Add bitmaps for tracking madvice and mlock so we can undo those. */
597
598 /*
599 * Shrink the heap. Not very efficient because of the AVL tree.
600 */
601 if ( pHeap->cFreePages >= RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT * 3
602 && pHeap->cFreePages >= pHeap->cHeapPages / 2 /* 50% free */
603 && pHeap->cFreeCalls - pHeap->uLastMinimizeCall > RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT
604 )
605 {
606 uint32_t cFreePageTarget = pHeap->cHeapPages / 4; /* 25% free */
607 while (pHeap->cFreePages > cFreePageTarget)
608 {
609 pHeap->uLastMinimizeCall = pHeap->cFreeCalls;
610
611 pBlock = NULL;
612 RTAvlrPVDoWithAll(&pHeap->BlockTree, false /*fFromLeft*/,
613 rtHeapPageFindUnusedBlockCallback, &pBlock);
614 if (!pBlock)
615 break;
616
617 void *pv2 = RTAvlrPVRemove(&pHeap->BlockTree, pBlock->Core.Key); Assert(pv2); NOREF(pv2);
618 pHeap->cHeapPages -= RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT;
619 pHeap->cFreePages -= RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT;
620 pHeap->pHint1 = NULL;
621 pHeap->pHint2 = NULL;
622 RTCritSectLeave(&pHeap->CritSect);
623
624 munmap(pBlock->Core.Key, RTMEMPAGEPOSIX_BLOCK_SIZE);
625 pBlock->Core.Key = pBlock->Core.KeyLast = NULL;
626 pBlock->cFreePages = 0;
627#ifdef RTALLOC_REPLACE_MALLOC
628 if (g_pfnOrgFree)
629 g_pfnOrgFree(pBlock);
630 else
631#endif
632 rtMemBaseFree(pBlock);
633
634 RTCritSectEnter(&pHeap->CritSect);
635 }
636 }
637 }
638 else
639 rc = VERR_INVALID_POINTER;
640 }
641 else
642 rc = VERR_INVALID_POINTER;
643
644 RTCritSectLeave(&pHeap->CritSect);
645 }
646
647 return rc;
648}
649
650
651/**
652 * Initializes the heap.
653 *
654 * @returns IPRT status code
655 * @param pvUser Unused.
656 */
657static DECLCALLBACK(int) rtMemPagePosixInitOnce(void *pvUser)
658{
659 NOREF(pvUser);
660 int rc = RTHeapPageInit(&g_MemPagePosixHeap, false /*fExec*/);
661 if (RT_SUCCESS(rc))
662 {
663 rc = RTHeapPageInit(&g_MemExecPosixHeap, true /*fExec*/);
664 if (RT_SUCCESS(rc))
665 return rc;
666 RTHeapPageDelete(&g_MemPagePosixHeap);
667 }
668 return rc;
669}
670
671
672/**
673 * Allocates memory from the specified heap.
674 *
675 * @returns Address of the allocated memory.
676 * @param cb The number of bytes to allocate.
677 * @param pszTag The tag.
678 * @param fFlags RTMEMPAGEALLOC_F_XXX.
679 * @param pHeap The heap to use.
680 */
681static void *rtMemPagePosixAlloc(size_t cb, const char *pszTag, uint32_t fFlags, PRTHEAPPAGE pHeap)
682{
683 /*
684 * Validate & adjust the input.
685 */
686 Assert(cb > 0);
687 NOREF(pszTag);
688 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
689
690 /*
691 * If the allocation is relatively large, we use mmap/munmap directly.
692 */
693 void *pv;
694 if (cb >= RTMEMPAGEPOSIX_MMAP_THRESHOLD)
695 {
696
697 pv = mmap(NULL, cb,
698 PROT_READ | PROT_WRITE | (pHeap == &g_MemExecPosixHeap ? PROT_EXEC : 0),
699 MAP_PRIVATE | MAP_ANONYMOUS,
700 -1, 0);
701 if (pv != MAP_FAILED)
702 {
703 AssertPtr(pv);
704
705 if (fFlags)
706 rtMemPagePosixApplyFlags(pv, cb, fFlags);
707 }
708 else
709 pv = NULL;
710 }
711 else
712 {
713 int rc = RTOnce(&g_MemPagePosixInitOnce, rtMemPagePosixInitOnce, NULL);
714 if (RT_SUCCESS(rc))
715 rc = RTHeapPageAlloc(pHeap, cb >> PAGE_SHIFT, pszTag, fFlags, &pv);
716 if (RT_FAILURE(rc))
717 pv = NULL;
718 }
719
720 return pv;
721}
722
723
724/**
725 * Free memory allocated by rtMemPagePosixAlloc.
726 *
727 * @param pv The address of the memory to free.
728 * @param cb The size.
729 * @param pHeap The heap.
730 */
731static void rtMemPagePosixFree(void *pv, size_t cb, PRTHEAPPAGE pHeap)
732{
733 /*
734 * Validate & adjust the input.
735 */
736 if (!pv)
737 return;
738 AssertPtr(pv);
739 Assert(cb > 0);
740 Assert(!((uintptr_t)pv & PAGE_OFFSET_MASK));
741 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
742
743 /*
744 * If the allocation is relatively large, we use mmap/munmap directly.
745 */
746 if (cb >= RTMEMPAGEPOSIX_MMAP_THRESHOLD)
747 {
748 int rc = munmap(pv, cb);
749 AssertMsg(rc == 0, ("rc=%d pv=%p cb=%#zx\n", rc, pv, cb)); NOREF(rc);
750 }
751 else
752 {
753 int rc = RTHeapPageFree(pHeap, pv, cb >> PAGE_SHIFT);
754 AssertRC(rc);
755 }
756}
757
758
759
760
761
762RTDECL(void *) RTMemPageAllocTag(size_t cb, const char *pszTag) RT_NO_THROW_DEF
763{
764 return rtMemPagePosixAlloc(cb, pszTag, 0, &g_MemPagePosixHeap);
765}
766
767
768RTDECL(void *) RTMemPageAllocZTag(size_t cb, const char *pszTag) RT_NO_THROW_DEF
769{
770 return rtMemPagePosixAlloc(cb, pszTag, RTMEMPAGEALLOC_F_ZERO, &g_MemPagePosixHeap);
771}
772
773
774RTDECL(void *) RTMemPageAllocExTag(size_t cb, uint32_t fFlags, const char *pszTag) RT_NO_THROW_DEF
775{
776 AssertReturn(!(fFlags & ~RTMEMPAGEALLOC_F_VALID_MASK), NULL);
777 return rtMemPagePosixAlloc(cb, pszTag, fFlags, &g_MemPagePosixHeap);
778}
779
780
781RTDECL(void) RTMemPageFree(void *pv, size_t cb) RT_NO_THROW_DEF
782{
783 return rtMemPagePosixFree(pv, cb, &g_MemPagePosixHeap);
784}
785
786
787
788
789
790RTDECL(void *) RTMemExecAllocTag(size_t cb, const char *pszTag) RT_NO_THROW_DEF
791{
792 return rtMemPagePosixAlloc(cb, pszTag, 0, &g_MemExecPosixHeap);
793}
794
795
796RTDECL(void) RTMemExecFree(void *pv, size_t cb) RT_NO_THROW_DEF
797{
798 return rtMemPagePosixFree(pv, cb, &g_MemExecPosixHeap);
799}
800
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette