VirtualBox

source: vbox/trunk/src/VBox/Runtime/r3/alloc-ef.cpp@ 28688

Last change on this file since 28688 was 28434, checked in by vboxsync, 15 years ago

*: whitespace cleanups by scm and two manually picked nits.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 21.3 KB
Line 
1/* $Id: alloc-ef.cpp 28434 2010-04-17 18:08:28Z vboxsync $ */
2/** @file
3 * IPRT - Memory Allocation, electric fence.
4 */
5
6/*
7 * Copyright (C) 2006-2010 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 *
26 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
27 * Clara, CA 95054 USA or visit http://www.sun.com if you need
28 * additional information or have any questions.
29 */
30
31
32/*******************************************************************************
33* Header Files *
34*******************************************************************************/
35#include "alloc-ef.h"
36#include <iprt/mem.h>
37#include <iprt/log.h>
38#include <iprt/asm.h>
39#include <iprt/thread.h>
40#include <VBox/sup.h>
41#include <iprt/err.h>
42#include <errno.h>
43#include <stdio.h>
44#include <stdlib.h>
45
46#include <iprt/alloc.h>
47#include <iprt/assert.h>
48#include <iprt/param.h>
49#include <iprt/string.h>
50
51
52/*******************************************************************************
53* Global Variables *
54*******************************************************************************/
55#ifdef RTALLOC_EFENCE_TRACE
56/** Spinlock protecting the allthe blocks globals. */
57static volatile uint32_t g_BlocksLock;
58/** Tree tracking the allocations. */
59static AVLPVTREE g_BlocksTree;
60#ifdef RTALLOC_EFENCE_FREE_DELAYED
61/** Tail of the delayed blocks. */
62static volatile PRTMEMBLOCK g_pBlocksDelayHead;
63/** Tail of the delayed blocks. */
64static volatile PRTMEMBLOCK g_pBlocksDelayTail;
65/** Number of bytes in the delay list (includes fences). */
66static volatile size_t g_cbBlocksDelay;
67#endif
68#endif
69/** Array of pointers free watches for. */
70void *gapvRTMemFreeWatch[4] = {NULL, NULL, NULL, NULL};
71/** Enable logging of all freed memory. */
72bool gfRTMemFreeLog = false;
73
74
75/*******************************************************************************
76* Internal Functions *
77*******************************************************************************/
78/**
79 * Complains about something.
80 */
81static void rtmemComplain(const char *pszOp, const char *pszFormat, ...)
82{
83 va_list args;
84 fprintf(stderr, "RTMem error: %s: ", pszOp);
85 va_start(args, pszFormat);
86 vfprintf(stderr, pszFormat, args);
87 va_end(args);
88 RTAssertDoPanic();
89}
90
91/**
92 * Log an event.
93 */
94DECLINLINE(void) rtmemLog(const char *pszOp, const char *pszFormat, ...)
95{
96#if 0
97 va_list args;
98 fprintf(stderr, "RTMem info: %s: ", pszOp);
99 va_start(args, pszFormat);
100 vfprintf(stderr, pszFormat, args);
101 va_end(args);
102#endif
103}
104
105
106#ifdef RTALLOC_EFENCE_TRACE
107
108/**
109 * Aquires the lock.
110 */
111DECLINLINE(void) rtmemBlockLock(void)
112{
113 unsigned c = 0;
114 while (!ASMAtomicCmpXchgU32(&g_BlocksLock, 1, 0))
115 RTThreadSleep(((++c) >> 2) & 31);
116}
117
118
119/**
120 * Releases the lock.
121 */
122DECLINLINE(void) rtmemBlockUnlock(void)
123{
124 Assert(g_BlocksLock == 1);
125 ASMAtomicXchgU32(&g_BlocksLock, 0);
126}
127
128
129/**
130 * Creates a block.
131 */
132DECLINLINE(PRTMEMBLOCK) rtmemBlockCreate(RTMEMTYPE enmType, size_t cbUnaligned, size_t cbAligned,
133 void *pvCaller, RT_SRC_POS_DECL)
134{
135 PRTMEMBLOCK pBlock = (PRTMEMBLOCK)malloc(sizeof(*pBlock));
136 if (pBlock)
137 {
138 pBlock->enmType = enmType;
139 pBlock->cbUnaligned = cbUnaligned;
140 pBlock->cbAligned = cbAligned;
141 pBlock->pvCaller = pvCaller;
142 pBlock->iLine = iLine;
143 pBlock->pszFile = pszFile;
144 pBlock->pszFunction = pszFunction;
145 }
146 return pBlock;
147}
148
149
150/**
151 * Frees a block.
152 */
153DECLINLINE(void) rtmemBlockFree(PRTMEMBLOCK pBlock)
154{
155 free(pBlock);
156}
157
158
159/**
160 * Insert a block from the tree.
161 */
162DECLINLINE(void) rtmemBlockInsert(PRTMEMBLOCK pBlock, void *pv)
163{
164 pBlock->Core.Key = pv;
165 rtmemBlockLock();
166 bool fRc = RTAvlPVInsert(&g_BlocksTree, &pBlock->Core);
167 rtmemBlockUnlock();
168 AssertRelease(fRc);
169}
170
171
172/**
173 * Remove a block from the tree and returns it to the caller.
174 */
175DECLINLINE(PRTMEMBLOCK) rtmemBlockRemove(void *pv)
176{
177 rtmemBlockLock();
178 PRTMEMBLOCK pBlock = (PRTMEMBLOCK)RTAvlPVRemove(&g_BlocksTree, pv);
179 rtmemBlockUnlock();
180 return pBlock;
181}
182
183/**
184 * Gets a block.
185 */
186DECLINLINE(PRTMEMBLOCK) rtmemBlockGet(void *pv)
187{
188 rtmemBlockLock();
189 PRTMEMBLOCK pBlock = (PRTMEMBLOCK)RTAvlPVGet(&g_BlocksTree, pv);
190 rtmemBlockUnlock();
191 return pBlock;
192}
193
194/**
195 * Dumps one allocation.
196 */
197static DECLCALLBACK(int) RTMemDumpOne(PAVLPVNODECORE pNode, void *pvUser)
198{
199 PRTMEMBLOCK pBlock = (PRTMEMBLOCK)pNode;
200 fprintf(stderr, "%p %08lx(+%02lx) %p\n",
201 pBlock->Core.Key,
202 (unsigned long)pBlock->cbUnaligned,
203 (unsigned long)(pBlock->cbAligned - pBlock->cbUnaligned),
204 pBlock->pvCaller);
205 return 0;
206}
207
208/**
209 * Dumps the allocated blocks.
210 * This is something which you should call from gdb.
211 */
212extern "C" void RTMemDump(void);
213void RTMemDump(void)
214{
215 fprintf(stderr, "address size(alg) caller\n");
216 RTAvlPVDoWithAll(&g_BlocksTree, true, RTMemDumpOne, NULL);
217}
218
219
220#ifdef RTALLOC_EFENCE_FREE_DELAYED
221/**
222 * Insert a delayed block.
223 */
224DECLINLINE(void) rtmemBlockDelayInsert(PRTMEMBLOCK pBlock)
225{
226 size_t cbBlock = RT_ALIGN_Z(pBlock->cbAligned, PAGE_SIZE) + RTALLOC_EFENCE_SIZE;
227 pBlock->Core.pRight = NULL;
228 pBlock->Core.pLeft = NULL;
229 rtmemBlockLock();
230 if (g_pBlocksDelayHead)
231 {
232 g_pBlocksDelayHead->Core.pLeft = (PAVLPVNODECORE)pBlock;
233 pBlock->Core.pRight = (PAVLPVNODECORE)g_pBlocksDelayHead;
234 g_pBlocksDelayHead = pBlock;
235 }
236 else
237 {
238 g_pBlocksDelayTail = pBlock;
239 g_pBlocksDelayHead = pBlock;
240 }
241 g_cbBlocksDelay += cbBlock;
242 rtmemBlockUnlock();
243}
244
245/**
246 * Removes a delayed block.
247 */
248DECLINLINE(PRTMEMBLOCK) rtmemBlockDelayRemove(void)
249{
250 PRTMEMBLOCK pBlock = NULL;
251 rtmemBlockLock();
252 if (g_cbBlocksDelay > RTALLOC_EFENCE_FREE_DELAYED)
253 {
254 pBlock = g_pBlocksDelayTail;
255 if (pBlock)
256 {
257 g_pBlocksDelayTail = (PRTMEMBLOCK)pBlock->Core.pLeft;
258 if (pBlock->Core.pLeft)
259 pBlock->Core.pLeft->pRight = NULL;
260 else
261 g_pBlocksDelayHead = NULL;
262 g_cbBlocksDelay -= RT_ALIGN_Z(pBlock->cbAligned, PAGE_SIZE) + RTALLOC_EFENCE_SIZE;
263 }
264 }
265 rtmemBlockUnlock();
266 return pBlock;
267}
268
269
270#endif /* DELAY */
271
272#endif /* RTALLOC_EFENCE_TRACE */
273
274
275/**
276 * Internal allocator.
277 */
278RTDECL(void *) rtR3MemAlloc(const char *pszOp, RTMEMTYPE enmType, size_t cbUnaligned, size_t cbAligned,
279 void *pvCaller, RT_SRC_POS_DECL)
280{
281 /*
282 * Sanity.
283 */
284 if ( RT_ALIGN_Z(RTALLOC_EFENCE_SIZE, PAGE_SIZE) != RTALLOC_EFENCE_SIZE
285 && RTALLOC_EFENCE_SIZE <= 0)
286 {
287 rtmemComplain(pszOp, "Invalid E-fence size! %#x\n", RTALLOC_EFENCE_SIZE);
288 return NULL;
289 }
290 if (!cbUnaligned)
291 {
292#if 0
293 rtmemComplain(pszOp, "Request of ZERO bytes allocation!\n");
294 return NULL;
295#else
296 cbAligned = cbUnaligned = 1;
297#endif
298 }
299
300#ifndef RTALLOC_EFENCE_IN_FRONT
301 /* Alignment decreases fence accuracy, but this is at least partially
302 * counteracted by filling and checking the alignment padding. When the
303 * fence is in front then then no extra alignment is needed. */
304 cbAligned = RT_ALIGN_Z(cbAligned, RTALLOC_EFENCE_ALIGNMENT);
305#endif
306
307#ifdef RTALLOC_EFENCE_TRACE
308 /*
309 * Allocate the trace block.
310 */
311 PRTMEMBLOCK pBlock = rtmemBlockCreate(enmType, cbUnaligned, cbAligned, pvCaller, RT_SRC_POS_ARGS);
312 if (!pBlock)
313 {
314 rtmemComplain(pszOp, "Failed to allocate trace block!\n");
315 return NULL;
316 }
317#endif
318
319 /*
320 * Allocate a block with page alignment space + the size of the E-fence.
321 */
322 size_t cbBlock = RT_ALIGN_Z(cbAligned, PAGE_SIZE) + RTALLOC_EFENCE_SIZE;
323 void *pvBlock = RTMemPageAlloc(cbBlock);
324 if (pvBlock)
325 {
326 /*
327 * Calc the start of the fence and the user block
328 * and then change the page protection of the fence.
329 */
330#ifdef RTALLOC_EFENCE_IN_FRONT
331 void *pvEFence = pvBlock;
332 void *pv = (char *)pvEFence + RTALLOC_EFENCE_SIZE;
333# ifdef RTALLOC_EFENCE_NOMAN_FILLER
334 memset((char *)pv + cbUnaligned, RTALLOC_EFENCE_NOMAN_FILLER, cbBlock - RTALLOC_EFENCE_SIZE - cbUnaligned);
335# endif
336#else
337 void *pvEFence = (char *)pvBlock + (cbBlock - RTALLOC_EFENCE_SIZE);
338 void *pv = (char *)pvEFence - cbAligned;
339# ifdef RTALLOC_EFENCE_NOMAN_FILLER
340 memset(pvBlock, RTALLOC_EFENCE_NOMAN_FILLER, cbBlock - RTALLOC_EFENCE_SIZE - cbAligned);
341 memset((char *)pv + cbUnaligned, RTALLOC_EFENCE_NOMAN_FILLER, cbAligned - cbUnaligned);
342# endif
343#endif
344
345#ifdef RTALLOC_EFENCE_FENCE_FILLER
346 memset(pvEFence, RTALLOC_EFENCE_FENCE_FILLER, RTALLOC_EFENCE_SIZE);
347#endif
348 int rc = RTMemProtect(pvEFence, RTALLOC_EFENCE_SIZE, RTMEM_PROT_NONE);
349 if (!rc)
350 {
351#ifdef RTALLOC_EFENCE_TRACE
352 rtmemBlockInsert(pBlock, pv);
353#endif
354 if (enmType == RTMEMTYPE_RTMEMALLOCZ)
355 memset(pv, 0, cbUnaligned);
356#ifdef RTALLOC_EFENCE_FILLER
357 else
358 memset(pv, RTALLOC_EFENCE_FILLER, cbUnaligned);
359#endif
360
361 rtmemLog(pszOp, "returns %p (pvBlock=%p cbBlock=%#x pvEFence=%p cbUnaligned=%#x)\n", pv, pvBlock, cbBlock, pvEFence, cbUnaligned);
362 return pv;
363 }
364 rtmemComplain(pszOp, "RTMemProtect failed, pvEFence=%p size %d, rc=%d\n", pvEFence, RTALLOC_EFENCE_SIZE, rc);
365 RTMemPageFree(pvBlock, cbBlock);
366 }
367 else
368 rtmemComplain(pszOp, "Failed to allocated %lu (%lu) bytes.\n", (unsigned long)cbBlock, (unsigned long)cbUnaligned);
369
370#ifdef RTALLOC_EFENCE_TRACE
371 rtmemBlockFree(pBlock);
372#endif
373 return NULL;
374}
375
376
377/**
378 * Internal free.
379 */
380RTDECL(void) rtR3MemFree(const char *pszOp, RTMEMTYPE enmType, void *pv, void *pvCaller, RT_SRC_POS_DECL)
381{
382 /*
383 * Simple case.
384 */
385 if (!pv)
386 return;
387
388 /*
389 * Check watch points.
390 */
391 for (unsigned i = 0; i < RT_ELEMENTS(gapvRTMemFreeWatch); i++)
392 if (gapvRTMemFreeWatch[i] == pv)
393 RTAssertDoPanic();
394
395#ifdef RTALLOC_EFENCE_TRACE
396 /*
397 * Find the block.
398 */
399 PRTMEMBLOCK pBlock = rtmemBlockRemove(pv);
400 if (pBlock)
401 {
402 if (gfRTMemFreeLog)
403 RTLogPrintf("RTMem %s: pv=%p pvCaller=%p cbUnaligned=%#x\n", pszOp, pv, pvCaller, pBlock->cbUnaligned);
404
405# ifdef RTALLOC_EFENCE_NOMAN_FILLER
406 /*
407 * Check whether the no man's land is untouched.
408 */
409# ifdef RTALLOC_EFENCE_IN_FRONT
410 void *pvWrong = ASMMemIsAll8((char *)pv + pBlock->cbUnaligned,
411 RT_ALIGN_Z(pBlock->cbAligned, PAGE_SIZE) - pBlock->cbUnaligned,
412 RTALLOC_EFENCE_NOMAN_FILLER);
413# else
414 /* Alignment must match allocation alignment in rtMemAlloc(). */
415 void *pvWrong = ASMMemIsAll8((char *)pv + pBlock->cbUnaligned,
416 pBlock->cbAligned - pBlock->cbUnaligned,
417 RTALLOC_EFENCE_NOMAN_FILLER);
418 if (pvWrong)
419 RTAssertDoPanic();
420 pvWrong = ASMMemIsAll8((void *)((uintptr_t)pv & ~PAGE_OFFSET_MASK),
421 RT_ALIGN_Z(pBlock->cbAligned, PAGE_SIZE) - pBlock->cbAligned,
422 RTALLOC_EFENCE_NOMAN_FILLER);
423# endif
424 if (pvWrong)
425 RTAssertDoPanic();
426# endif
427
428# ifdef RTALLOC_EFENCE_FREE_FILL
429 /*
430 * Fill the user part of the block.
431 */
432 memset(pv, RTALLOC_EFENCE_FREE_FILL, pBlock->cbUnaligned);
433# endif
434
435# if defined(RTALLOC_EFENCE_FREE_DELAYED) && RTALLOC_EFENCE_FREE_DELAYED > 0
436 /*
437 * We're doing delayed freeing.
438 * That means we'll expand the E-fence to cover the entire block.
439 */
440 int rc = RTMemProtect(pv, pBlock->cbAligned, RTMEM_PROT_NONE);
441 if (RT_SUCCESS(rc))
442 {
443 /*
444 * Insert it into the free list and process pending frees.
445 */
446 rtmemBlockDelayInsert(pBlock);
447 while ((pBlock = rtmemBlockDelayRemove()) != NULL)
448 {
449 pv = pBlock->Core.Key;
450# ifdef RTALLOC_EFENCE_IN_FRONT
451 void *pvBlock = (char *)pv - RTALLOC_EFENCE_SIZE;
452# else
453 void *pvBlock = (void *)((uintptr_t)pv & ~PAGE_OFFSET_MASK);
454# endif
455 size_t cbBlock = RT_ALIGN_Z(pBlock->cbAligned, PAGE_SIZE) + RTALLOC_EFENCE_SIZE;
456 rc = RTMemProtect(pvBlock, cbBlock, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
457 if (RT_SUCCESS(rc))
458 RTMemPageFree(pvBlock, RT_ALIGN_Z(pBlock->cbAligned, PAGE_SIZE) + RTALLOC_EFENCE_SIZE);
459 else
460 rtmemComplain(pszOp, "RTMemProtect(%p, %#x, RTMEM_PROT_READ | RTMEM_PROT_WRITE) -> %d\n", pvBlock, cbBlock, rc);
461 rtmemBlockFree(pBlock);
462 }
463 }
464 else
465 rtmemComplain(pszOp, "Failed to expand the efence of pv=%p cb=%d, rc=%d.\n", pv, pBlock, rc);
466
467# else /* !RTALLOC_EFENCE_FREE_DELAYED */
468
469 /*
470 * Turn of the E-fence and free it.
471 */
472# ifdef RTALLOC_EFENCE_IN_FRONT
473 void *pvBlock = (char *)pv - RTALLOC_EFENCE_SIZE;
474 void *pvEFence = pvBlock;
475# else
476 void *pvBlock = (void *)((uintptr_t)pv & ~PAGE_OFFSET_MASK);
477 void *pvEFence = (char *)pv + pBlock->cb;
478# endif
479 int rc = RTMemProtect(pvEFence, RTALLOC_EFENCE_SIZE, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
480 if (RT_SUCCESS(rc))
481 RTMemPageFree(pvBlock, RT_ALIGN_Z(pBlock->cbAligned, PAGE_SIZE) + RTALLOC_EFENCE_SIZE);
482 else
483 rtmemComplain(pszOp, "RTMemProtect(%p, %#x, RTMEM_PROT_READ | RTMEM_PROT_WRITE) -> %d\n", pvEFence, RTALLOC_EFENCE_SIZE, rc);
484 rtmemBlockFree(pBlock);
485
486# endif /* !RTALLOC_EFENCE_FREE_DELAYED */
487 }
488 else
489 rtmemComplain(pszOp, "pv=%p not found! Incorrect free!\n", pv);
490
491#else /* !RTALLOC_EFENCE_TRACE */
492
493 /*
494 * We have no size tracking, so we're not doing any freeing because
495 * we cannot if the E-fence is after the block.
496 * Let's just expand the E-fence to the first page of the user bit
497 * since we know that it's around.
498 */
499 int rc = RTMemProtect((void *)((uintptr_t)pv & ~PAGE_OFFSET_MASK), PAGE_SIZE, RTMEM_PROT_NONE);
500 if (RT_FAILURE(rc))
501 rtmemComplain(pszOp, "RTMemProtect(%p, PAGE_SIZE, RTMEM_PROT_NONE) -> %d\n", (void *)((uintptr_t)pv & ~PAGE_OFFSET_MASK), rc);
502#endif /* !RTALLOC_EFENCE_TRACE */
503}
504
505
506/**
507 * Internal realloc.
508 */
509RTDECL(void *) rtR3MemRealloc(const char *pszOp, RTMEMTYPE enmType, void *pvOld, size_t cbNew, void *pvCaller, RT_SRC_POS_DECL)
510{
511 /*
512 * Allocate new and copy.
513 */
514 if (!pvOld)
515 return rtR3MemAlloc(pszOp, enmType, cbNew, cbNew, pvCaller, RT_SRC_POS_ARGS);
516 if (!cbNew)
517 {
518 rtR3MemFree(pszOp, RTMEMTYPE_RTMEMREALLOC, pvOld, pvCaller, RT_SRC_POS_ARGS);
519 return NULL;
520 }
521
522#ifdef RTALLOC_EFENCE_TRACE
523
524 /*
525 * Get the block, allocate the new, copy the data, free the old one.
526 */
527 PRTMEMBLOCK pBlock = rtmemBlockGet(pvOld);
528 if (pBlock)
529 {
530 void *pvRet = rtR3MemAlloc(pszOp, enmType, cbNew, cbNew, pvCaller, RT_SRC_POS_ARGS);
531 if (pvRet)
532 {
533 memcpy(pvRet, pvOld, RT_MIN(cbNew, pBlock->cbUnaligned));
534 rtR3MemFree(pszOp, RTMEMTYPE_RTMEMREALLOC, pvOld, pvCaller, RT_SRC_POS_ARGS);
535 }
536 return pvRet;
537 }
538 else
539 rtmemComplain(pszOp, "pvOld=%p was not found!\n", pvOld);
540 return NULL;
541
542#else /* !RTALLOC_EFENCE_TRACE */
543
544 rtmemComplain(pszOp, "Not supported if RTALLOC_EFENCE_TRACE isn't defined!\n");
545 return NULL;
546
547#endif /* !RTALLOC_EFENCE_TRACE */
548}
549
550
551
552
553RTDECL(void *) RTMemEfTmpAlloc(size_t cb, RT_SRC_POS_DECL) RT_NO_THROW
554{
555 return rtR3MemAlloc("TmpAlloc", RTMEMTYPE_RTMEMALLOC, cb, cb, ASMReturnAddress(), RT_SRC_POS_ARGS);
556}
557
558
559RTDECL(void *) RTMemEfTmpAllocZ(size_t cb, RT_SRC_POS_DECL) RT_NO_THROW
560{
561 return rtR3MemAlloc("TmpAlloc", RTMEMTYPE_RTMEMALLOCZ, cb, cb, ASMReturnAddress(), RT_SRC_POS_ARGS);
562}
563
564
565RTDECL(void) RTMemEfTmpFree(void *pv, RT_SRC_POS_DECL) RT_NO_THROW
566{
567 if (pv)
568 rtR3MemFree("Free", RTMEMTYPE_RTMEMFREE, pv, ASMReturnAddress(), RT_SRC_POS_ARGS);
569}
570
571
572RTDECL(void *) RTMemEfAlloc(size_t cb, RT_SRC_POS_DECL) RT_NO_THROW
573{
574 return rtR3MemAlloc("Alloc", RTMEMTYPE_RTMEMALLOC, cb, cb, ASMReturnAddress(), RT_SRC_POS_ARGS);
575}
576
577
578RTDECL(void *) RTMemEfAllocZ(size_t cb, RT_SRC_POS_DECL) RT_NO_THROW
579{
580 return rtR3MemAlloc("AllocZ", RTMEMTYPE_RTMEMALLOCZ, cb, cb, ASMReturnAddress(), RT_SRC_POS_ARGS);
581}
582
583
584RTDECL(void *) RTMemEfAllocVar(size_t cbUnaligned, RT_SRC_POS_DECL) RT_NO_THROW
585{
586 size_t cbAligned;
587 if (cbUnaligned >= 16)
588 cbAligned = RT_ALIGN_Z(cbUnaligned, 16);
589 else
590 cbAligned = RT_ALIGN_Z(cbUnaligned, sizeof(void *));
591 return rtR3MemAlloc("Alloc", RTMEMTYPE_RTMEMALLOC, cbUnaligned, cbAligned, ASMReturnAddress(), RT_SRC_POS_ARGS);
592}
593
594
595RTDECL(void *) RTMemEfAllocZVar(size_t cbUnaligned, RT_SRC_POS_DECL) RT_NO_THROW
596{
597 size_t cbAligned;
598 if (cbUnaligned >= 16)
599 cbAligned = RT_ALIGN_Z(cbUnaligned, 16);
600 else
601 cbAligned = RT_ALIGN_Z(cbUnaligned, sizeof(void *));
602 return rtR3MemAlloc("AllocZ", RTMEMTYPE_RTMEMALLOCZ, cbUnaligned, cbAligned, ASMReturnAddress(), RT_SRC_POS_ARGS);
603}
604
605
606RTDECL(void *) RTMemEfRealloc(void *pvOld, size_t cbNew, RT_SRC_POS_DECL) RT_NO_THROW
607{
608 return rtR3MemRealloc("Realloc", RTMEMTYPE_RTMEMREALLOC, pvOld, cbNew, ASMReturnAddress(), RT_SRC_POS_ARGS);
609}
610
611
612RTDECL(void) RTMemEfFree(void *pv, RT_SRC_POS_DECL) RT_NO_THROW
613{
614 if (pv)
615 rtR3MemFree("Free", RTMEMTYPE_RTMEMFREE, pv, ASMReturnAddress(), RT_SRC_POS_ARGS);
616}
617
618
619RTDECL(void *) RTMemEfDup(const void *pvSrc, size_t cb, RT_SRC_POS_DECL) RT_NO_THROW
620{
621 void *pvDst = RTMemEfAlloc(cb, RT_SRC_POS_ARGS);
622 if (pvDst)
623 memcpy(pvDst, pvSrc, cb);
624 return pvDst;
625}
626
627
628RTDECL(void *) RTMemEfDupEx(const void *pvSrc, size_t cbSrc, size_t cbExtra, RT_SRC_POS_DECL) RT_NO_THROW
629{
630 void *pvDst = RTMemEfAlloc(cbSrc + cbExtra, RT_SRC_POS_ARGS);
631 if (pvDst)
632 {
633 memcpy(pvDst, pvSrc, cbSrc);
634 memset((uint8_t *)pvDst + cbSrc, 0, cbExtra);
635 }
636 return pvDst;
637}
638
639
640
641
642/*
643 *
644 * The NP (no position) versions.
645 *
646 */
647
648
649
650RTDECL(void *) RTMemEfTmpAllocNP(size_t cb) RT_NO_THROW
651{
652 return rtR3MemAlloc("TmpAlloc", RTMEMTYPE_RTMEMALLOC, cb, cb, ASMReturnAddress(), NULL, 0, NULL);
653}
654
655
656RTDECL(void *) RTMemEfTmpAllocZNP(size_t cb) RT_NO_THROW
657{
658 return rtR3MemAlloc("TmpAllocZ", RTMEMTYPE_RTMEMALLOCZ, cb, cb, ASMReturnAddress(), NULL, 0, NULL);
659}
660
661
662RTDECL(void) RTMemEfTmpFreeNP(void *pv) RT_NO_THROW
663{
664 if (pv)
665 rtR3MemFree("Free", RTMEMTYPE_RTMEMFREE, pv, ASMReturnAddress(), NULL, 0, NULL);
666}
667
668
669RTDECL(void *) RTMemEfAllocNP(size_t cb) RT_NO_THROW
670{
671 return rtR3MemAlloc("Alloc", RTMEMTYPE_RTMEMALLOC, cb, cb, ASMReturnAddress(), NULL, 0, NULL);
672}
673
674
675RTDECL(void *) RTMemEfAllocZNP(size_t cb) RT_NO_THROW
676{
677 return rtR3MemAlloc("AllocZ", RTMEMTYPE_RTMEMALLOCZ, cb, cb, ASMReturnAddress(), NULL, 0, NULL);
678}
679
680
681RTDECL(void *) RTMemEfAllocVarNP(size_t cbUnaligned) RT_NO_THROW
682{
683 size_t cbAligned;
684 if (cbUnaligned >= 16)
685 cbAligned = RT_ALIGN_Z(cbUnaligned, 16);
686 else
687 cbAligned = RT_ALIGN_Z(cbUnaligned, sizeof(void *));
688 return rtR3MemAlloc("Alloc", RTMEMTYPE_RTMEMALLOC, cbUnaligned, cbAligned, ASMReturnAddress(), NULL, 0, NULL);
689}
690
691
692RTDECL(void *) RTMemEfAllocZVarNP(size_t cbUnaligned) RT_NO_THROW
693{
694 size_t cbAligned;
695 if (cbUnaligned >= 16)
696 cbAligned = RT_ALIGN_Z(cbUnaligned, 16);
697 else
698 cbAligned = RT_ALIGN_Z(cbUnaligned, sizeof(void *));
699 return rtR3MemAlloc("AllocZ", RTMEMTYPE_RTMEMALLOCZ, cbUnaligned, cbAligned, ASMReturnAddress(), NULL, 0, NULL);
700}
701
702
703RTDECL(void *) RTMemEfReallocNP(void *pvOld, size_t cbNew) RT_NO_THROW
704{
705 return rtR3MemRealloc("Realloc", RTMEMTYPE_RTMEMREALLOC, pvOld, cbNew, ASMReturnAddress(), NULL, 0, NULL);
706}
707
708
709RTDECL(void) RTMemEfFreeNP(void *pv) RT_NO_THROW
710{
711 if (pv)
712 rtR3MemFree("Free", RTMEMTYPE_RTMEMFREE, pv, ASMReturnAddress(), NULL, 0, NULL);
713}
714
715
716RTDECL(void *) RTMemEfDupNP(const void *pvSrc, size_t cb) RT_NO_THROW
717{
718 void *pvDst = RTMemEfAlloc(cb, NULL, 0, NULL);
719 if (pvDst)
720 memcpy(pvDst, pvSrc, cb);
721 return pvDst;
722}
723
724
725RTDECL(void *) RTMemEfDupExNP(const void *pvSrc, size_t cbSrc, size_t cbExtra) RT_NO_THROW
726{
727 void *pvDst = RTMemEfAlloc(cbSrc + cbExtra, NULL, 0, NULL);
728 if (pvDst)
729 {
730 memcpy(pvDst, pvSrc, cbSrc);
731 memset((uint8_t *)pvDst + cbSrc, 0, cbExtra);
732 }
733 return pvDst;
734}
735
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette