VirtualBox

source: vbox/trunk/src/VBox/Runtime/r3/alloc-ef.cpp@ 90330

Last change on this file since 90330 was 86296, checked in by vboxsync, 4 years ago

IPRT/alloc-ef.cpp: Added RTMemDumpFreed for use from gdb.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 33.4 KB
Line 
1/* $Id: alloc-ef.cpp 86296 2020-09-25 21:04:55Z vboxsync $ */
2/** @file
3 * IPRT - Memory Allocation, electric fence.
4 */
5
6/*
7 * Copyright (C) 2006-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*********************************************************************************************************************************
29* Header Files *
30*********************************************************************************************************************************/
31#include "alloc-ef.h"
32#include <iprt/mem.h>
33#include <iprt/log.h>
34#include <iprt/asm.h>
35#include <iprt/thread.h>
36#include <VBox/sup.h>
37#include <iprt/errcore.h>
38#include <errno.h>
39#include <stdio.h>
40#include <stdlib.h>
41
42#include <iprt/alloc.h>
43#include <iprt/assert.h>
44#include <iprt/param.h>
45#include <iprt/string.h>
46
47#ifdef RTALLOC_REPLACE_MALLOC
48# include <VBox/dis.h>
49# include <VBox/disopcode.h>
50# include <dlfcn.h>
51# ifdef RT_OS_DARWIN
52# include <malloc/malloc.h>
53# endif
54#endif
55
56
57/*********************************************************************************************************************************
58* Defined Constants And Macros *
59*********************************************************************************************************************************/
60#ifdef RTALLOC_REPLACE_MALLOC
61# define RTMEM_REPLACMENT_ALIGN(a_cb) ((a_cb) >= 16 ? RT_ALIGN_Z(a_cb, 16) \
62 : (a_cb) >= sizeof(uintptr_t) ? RT_ALIGN_Z(a_cb, sizeof(uintptr_t)) : (a_cb))
63#endif
64
65
66/*********************************************************************************************************************************
67* Global Variables *
68*********************************************************************************************************************************/
69#ifdef RTALLOC_EFENCE_TRACE
70/** Spinlock protecting the all the block's globals. */
71static volatile uint32_t g_BlocksLock;
72/** Tree tracking the allocations. */
73static AVLPVTREE g_BlocksTree;
74# ifdef RTALLOC_EFENCE_FREE_DELAYED
75/** Tail of the delayed blocks. */
76static volatile PRTMEMBLOCK g_pBlocksDelayHead;
77/** Tail of the delayed blocks. */
78static volatile PRTMEMBLOCK g_pBlocksDelayTail;
79/** Number of bytes in the delay list (includes fences). */
80static volatile size_t g_cbBlocksDelay;
81# endif /* RTALLOC_EFENCE_FREE_DELAYED */
82# ifdef RTALLOC_REPLACE_MALLOC
83/** @name For calling the real allocation API we've replaced.
84 * @{ */
85void * (*g_pfnOrgMalloc)(size_t);
86void * (*g_pfnOrgCalloc)(size_t, size_t);
87void * (*g_pfnOrgRealloc)(void *, size_t);
88void (*g_pfnOrgFree)(void *);
89size_t (*g_pfnOrgMallocSize)(void *);
90/** @} */
91# endif
92#endif /* RTALLOC_EFENCE_TRACE */
93/** Array of pointers free watches for. */
94void *gapvRTMemFreeWatch[4] = {NULL, NULL, NULL, NULL};
95/** Enable logging of all freed memory. */
96bool gfRTMemFreeLog = false;
97
98
99/*********************************************************************************************************************************
100* Internal Functions *
101*********************************************************************************************************************************/
102#ifdef RTALLOC_REPLACE_MALLOC
103static void rtMemReplaceMallocAndFriends(void);
104#endif
105
106
107/**
108 * Complains about something.
109 */
110static void rtmemComplain(const char *pszOp, const char *pszFormat, ...)
111{
112 va_list args;
113 fprintf(stderr, "RTMem error: %s: ", pszOp);
114 va_start(args, pszFormat);
115 vfprintf(stderr, pszFormat, args);
116 va_end(args);
117 RTAssertDoPanic();
118}
119
120/**
121 * Log an event.
122 */
123DECLINLINE(void) rtmemLog(const char *pszOp, const char *pszFormat, ...)
124{
125#if 0
126 va_list args;
127 fprintf(stderr, "RTMem info: %s: ", pszOp);
128 va_start(args, pszFormat);
129 vfprintf(stderr, pszFormat, args);
130 va_end(args);
131#else
132 NOREF(pszOp); NOREF(pszFormat);
133#endif
134}
135
136
137#ifdef RTALLOC_EFENCE_TRACE
138
139/**
140 * Acquires the lock.
141 */
142DECLINLINE(void) rtmemBlockLock(void)
143{
144 unsigned c = 0;
145 while (!ASMAtomicCmpXchgU32(&g_BlocksLock, 1, 0))
146 RTThreadSleepNoLog(((++c) >> 2) & 31);
147}
148
149
150/**
151 * Releases the lock.
152 */
153DECLINLINE(void) rtmemBlockUnlock(void)
154{
155 Assert(g_BlocksLock == 1);
156 ASMAtomicXchgU32(&g_BlocksLock, 0);
157}
158
159
160/**
161 * Creates a block.
162 */
163DECLINLINE(PRTMEMBLOCK) rtmemBlockCreate(RTMEMTYPE enmType, size_t cbUnaligned, size_t cbAligned,
164 const char *pszTag, void *pvCaller, RT_SRC_POS_DECL)
165{
166# ifdef RTALLOC_REPLACE_MALLOC
167 if (!g_pfnOrgMalloc)
168 rtMemReplaceMallocAndFriends();
169 PRTMEMBLOCK pBlock = (PRTMEMBLOCK)g_pfnOrgMalloc(sizeof(*pBlock));
170# else
171 PRTMEMBLOCK pBlock = (PRTMEMBLOCK)malloc(sizeof(*pBlock));
172# endif
173 if (pBlock)
174 {
175 pBlock->enmType = enmType;
176 pBlock->cbUnaligned = cbUnaligned;
177 pBlock->cbAligned = cbAligned;
178 pBlock->pszTag = pszTag;
179 pBlock->pvCaller = pvCaller;
180 pBlock->iLine = iLine;
181 pBlock->pszFile = pszFile;
182 pBlock->pszFunction = pszFunction;
183 }
184 return pBlock;
185}
186
187
188/**
189 * Frees a block.
190 */
191DECLINLINE(void) rtmemBlockFree(PRTMEMBLOCK pBlock)
192{
193# ifdef RTALLOC_REPLACE_MALLOC
194 g_pfnOrgFree(pBlock);
195# else
196 free(pBlock);
197# endif
198}
199
200
201/**
202 * Insert a block from the tree.
203 */
204DECLINLINE(void) rtmemBlockInsert(PRTMEMBLOCK pBlock, void *pv)
205{
206 pBlock->Core.Key = pv;
207 rtmemBlockLock();
208 bool fRc = RTAvlPVInsert(&g_BlocksTree, &pBlock->Core);
209 rtmemBlockUnlock();
210 AssertRelease(fRc);
211}
212
213
214/**
215 * Remove a block from the tree and returns it to the caller.
216 */
217DECLINLINE(PRTMEMBLOCK) rtmemBlockRemove(void *pv)
218{
219 rtmemBlockLock();
220 PRTMEMBLOCK pBlock = (PRTMEMBLOCK)RTAvlPVRemove(&g_BlocksTree, pv);
221 rtmemBlockUnlock();
222 return pBlock;
223}
224
225/**
226 * Gets a block.
227 */
228DECLINLINE(PRTMEMBLOCK) rtmemBlockGet(void *pv)
229{
230 rtmemBlockLock();
231 PRTMEMBLOCK pBlock = (PRTMEMBLOCK)RTAvlPVGet(&g_BlocksTree, pv);
232 rtmemBlockUnlock();
233 return pBlock;
234}
235
236/**
237 * Dumps one allocation.
238 */
239static DECLCALLBACK(int) RTMemDumpOne(PAVLPVNODECORE pNode, void *pvUser)
240{
241 PRTMEMBLOCK pBlock = (PRTMEMBLOCK)pNode;
242 fprintf(stderr, "%p %08lx(+%02lx) %p\n",
243 pBlock->Core.Key,
244 (unsigned long)pBlock->cbUnaligned,
245 (unsigned long)(pBlock->cbAligned - pBlock->cbUnaligned),
246 pBlock->pvCaller);
247 NOREF(pvUser);
248 return 0;
249}
250
251/**
252 * Dumps the allocated blocks.
253 * This is something which you should call from gdb.
254 */
255extern "C" void RTMemDump(void);
256void RTMemDump(void)
257{
258 fprintf(stderr, "address size(alg) caller\n");
259 RTAvlPVDoWithAll(&g_BlocksTree, true, RTMemDumpOne, NULL);
260}
261
262# ifdef RTALLOC_EFENCE_FREE_DELAYED
263
264/**
265 * Insert a delayed block.
266 */
267DECLINLINE(void) rtmemBlockDelayInsert(PRTMEMBLOCK pBlock)
268{
269 size_t cbBlock = RT_ALIGN_Z(pBlock->cbAligned, PAGE_SIZE) + RTALLOC_EFENCE_SIZE;
270 pBlock->Core.pRight = NULL;
271 pBlock->Core.pLeft = NULL;
272 rtmemBlockLock();
273 if (g_pBlocksDelayHead)
274 {
275 g_pBlocksDelayHead->Core.pLeft = (PAVLPVNODECORE)pBlock;
276 pBlock->Core.pRight = (PAVLPVNODECORE)g_pBlocksDelayHead;
277 g_pBlocksDelayHead = pBlock;
278 }
279 else
280 {
281 g_pBlocksDelayTail = pBlock;
282 g_pBlocksDelayHead = pBlock;
283 }
284 g_cbBlocksDelay += cbBlock;
285 rtmemBlockUnlock();
286}
287
288/**
289 * Removes a delayed block.
290 */
291DECLINLINE(PRTMEMBLOCK) rtmemBlockDelayRemove(void)
292{
293 PRTMEMBLOCK pBlock = NULL;
294 rtmemBlockLock();
295 if (g_cbBlocksDelay > RTALLOC_EFENCE_FREE_DELAYED)
296 {
297 pBlock = g_pBlocksDelayTail;
298 if (pBlock)
299 {
300 g_pBlocksDelayTail = (PRTMEMBLOCK)pBlock->Core.pLeft;
301 if (pBlock->Core.pLeft)
302 pBlock->Core.pLeft->pRight = NULL;
303 else
304 g_pBlocksDelayHead = NULL;
305 g_cbBlocksDelay -= RT_ALIGN_Z(pBlock->cbAligned, PAGE_SIZE) + RTALLOC_EFENCE_SIZE;
306 }
307 }
308 rtmemBlockUnlock();
309 return pBlock;
310}
311
312
313/**
314 * Dumps the freed blocks.
315 * This is something which you should call from gdb.
316 */
317extern "C" void RTMemDumpFreed(void);
318void RTMemDumpFreed(void)
319{
320 fprintf(stderr, "address size(alg) caller\n");
321 for (PRTMEMBLOCK pCur = g_pBlocksDelayHead; pCur; pCur = (PRTMEMBLOCK)pCur->Core.pRight)
322 RTMemDumpOne(&pCur->Core, NULL);
323
324}
325
326# endif /* RTALLOC_EFENCE_FREE_DELAYED */
327
328#endif /* RTALLOC_EFENCE_TRACE */
329
330
331#if defined(RTALLOC_REPLACE_MALLOC) && defined(RTALLOC_EFENCE_TRACE)
332/*
333 *
334 * Replacing malloc, calloc, realloc, & free.
335 *
336 */
337
338/** Replacement for malloc. */
339static void *rtMemReplacementMalloc(size_t cb)
340{
341 size_t cbAligned = RTMEM_REPLACMENT_ALIGN(cb);
342 void *pv = rtR3MemAlloc("r-malloc", RTMEMTYPE_RTMEMALLOC, cb, cbAligned, "heap", ASMReturnAddress(), RT_SRC_POS);
343 if (!pv)
344 pv = g_pfnOrgMalloc(cb);
345 return pv;
346}
347
348/** Replacement for calloc. */
349static void *rtMemReplacementCalloc(size_t cbItem, size_t cItems)
350{
351 size_t cb = cbItem * cItems;
352 size_t cbAligned = RTMEM_REPLACMENT_ALIGN(cb);
353 void *pv = rtR3MemAlloc("r-calloc", RTMEMTYPE_RTMEMALLOCZ, cb, cbAligned, "heap", ASMReturnAddress(), RT_SRC_POS);
354 if (!pv)
355 pv = g_pfnOrgCalloc(cbItem, cItems);
356 return pv;
357}
358
359/** Replacement for realloc. */
360static void *rtMemReplacementRealloc(void *pvOld, size_t cbNew)
361{
362 if (pvOld)
363 {
364 /* We're not strict about where the memory was allocated. */
365 PRTMEMBLOCK pBlock = rtmemBlockGet(pvOld);
366 if (pBlock)
367 {
368 size_t cbAligned = RTMEM_REPLACMENT_ALIGN(cbNew);
369 return rtR3MemRealloc("r-realloc", RTMEMTYPE_RTMEMREALLOC, pvOld, cbAligned, "heap", ASMReturnAddress(), RT_SRC_POS);
370 }
371 return g_pfnOrgRealloc(pvOld, cbNew);
372 }
373 return rtMemReplacementMalloc(cbNew);
374}
375
376/** Replacement for free(). */
377static void rtMemReplacementFree(void *pv)
378{
379 if (pv)
380 {
381 /* We're not strict about where the memory was allocated. */
382 PRTMEMBLOCK pBlock = rtmemBlockGet(pv);
383 if (pBlock)
384 rtR3MemFree("r-free", RTMEMTYPE_RTMEMFREE, pv, 0, ASMReturnAddress(), RT_SRC_POS);
385 else
386 g_pfnOrgFree(pv);
387 }
388}
389
390# ifdef RT_OS_DARWIN
391/** Replacement for malloc. */
392static size_t rtMemReplacementMallocSize(void *pv)
393{
394 size_t cb;
395 if (pv)
396 {
397 /* We're not strict about where the memory was allocated. */
398 PRTMEMBLOCK pBlock = rtmemBlockGet(pv);
399 if (pBlock)
400 cb = pBlock->cbUnaligned;
401 else
402 cb = g_pfnOrgMallocSize(pv);
403 }
404 else
405 cb = 0;
406 return cb;
407}
408# endif
409
410
411static void rtMemReplaceMallocAndFriends(void)
412{
413 struct
414 {
415 const char *pszName;
416 PFNRT pfnReplacement;
417 PFNRT pfnOrg;
418 PFNRT *ppfnJumpBack;
419 } aApis[] =
420 {
421 { "free", (PFNRT)rtMemReplacementFree, (PFNRT)free, (PFNRT *)&g_pfnOrgFree },
422 { "realloc", (PFNRT)rtMemReplacementRealloc, (PFNRT)realloc, (PFNRT *)&g_pfnOrgRealloc },
423 { "calloc", (PFNRT)rtMemReplacementCalloc, (PFNRT)calloc, (PFNRT *)&g_pfnOrgCalloc },
424 { "malloc", (PFNRT)rtMemReplacementMalloc, (PFNRT)malloc, (PFNRT *)&g_pfnOrgMalloc },
425#ifdef RT_OS_DARWIN
426 { "malloc_size", (PFNRT)rtMemReplacementMallocSize, (PFNRT)malloc_size, (PFNRT *)&g_pfnOrgMallocSize },
427#endif
428 };
429
430 /*
431 * Initialize the jump backs to avoid recursivly entering this function.
432 */
433 for (unsigned i = 0; i < RT_ELEMENTS(aApis); i++)
434 *aApis[i].ppfnJumpBack = aApis[i].pfnOrg;
435
436 /*
437 * Give the user an option to skip replacing malloc.
438 */
439 if (getenv("IPRT_DONT_REPLACE_MALLOC"))
440 return;
441
442 /*
443 * Allocate a page for jump back code (we leak it).
444 */
445 uint8_t *pbExecPage = (uint8_t *)RTMemPageAlloc(PAGE_SIZE); AssertFatal(pbExecPage);
446 int rc = RTMemProtect(pbExecPage, PAGE_SIZE, RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC); AssertFatalRC(rc);
447
448 /*
449 * Do the ground work.
450 */
451 uint8_t *pb = pbExecPage;
452 for (unsigned i = 0; i < RT_ELEMENTS(aApis); i++)
453 {
454 /* Resolve it. */
455 PFNRT pfnOrg = (PFNRT)(uintptr_t)dlsym(RTLD_DEFAULT, aApis[i].pszName);
456 if (pfnOrg)
457 aApis[i].pfnOrg = pfnOrg;
458 else
459 pfnOrg = aApis[i].pfnOrg;
460
461 /* Figure what we can replace and how much to duplicate in the jump back code. */
462# ifdef RT_ARCH_AMD64
463 uint32_t cbNeeded = 12;
464 DISCPUMODE const enmCpuMode = DISCPUMODE_64BIT;
465# elif defined(RT_ARCH_X86)
466 uint32_t const cbNeeded = 5;
467 DISCPUMODE const enmCpuMode = DISCPUMODE_32BIT;
468# else
469# error "Port me"
470# endif
471 uint32_t offJmpBack = 0;
472 uint32_t cbCopy = 0;
473 while (offJmpBack < cbNeeded)
474 {
475 DISCPUSTATE Dis;
476 uint32_t cbInstr = 1;
477 rc = DISInstr((void *)((uintptr_t)pfnOrg + offJmpBack), enmCpuMode, &Dis, &cbInstr); AssertFatalRC(rc);
478 AssertFatal(!(Dis.pCurInstr->fOpType & (DISOPTYPE_CONTROLFLOW)));
479# ifdef RT_ARCH_AMD64
480# ifdef RT_OS_DARWIN
481 /* Kludge for: cmp [malloc_def_zone_state], 1; jg 2; call _malloc_initialize; 2: */
482 DISQPVPARAMVAL Parm;
483 if ( Dis.ModRM.Bits.Mod == 0
484 && Dis.ModRM.Bits.Rm == 5 /* wrt RIP */
485 && (Dis.Param2.fUse & (DISUSE_IMMEDIATE16_SX8 | DISUSE_IMMEDIATE32_SX8 | DISUSE_IMMEDIATE64_SX8))
486 && Dis.Param2.uValue == 1
487 && Dis.pCurInstr->uOpcode == OP_CMP)
488 {
489 cbCopy = offJmpBack;
490
491 offJmpBack += cbInstr;
492 rc = DISInstr((void *)((uintptr_t)pfnOrg + offJmpBack), enmCpuMode, &Dis, &cbInstr); AssertFatalRC(rc);
493 if ( Dis.pCurInstr->uOpcode == OP_JNBE
494 && Dis.Param1.uDisp.i8 == 5)
495 {
496 offJmpBack += cbInstr + 5;
497 AssertFatal(offJmpBack >= cbNeeded);
498 break;
499 }
500 }
501# endif
502 AssertFatal(!(Dis.ModRM.Bits.Mod == 0 && Dis.ModRM.Bits.Rm == 5 /* wrt RIP */));
503# endif
504 offJmpBack += cbInstr;
505 }
506 if (!cbCopy)
507 cbCopy = offJmpBack;
508
509 /* Assemble the jump back. */
510 memcpy(pb, (void *)(uintptr_t)pfnOrg, cbCopy);
511 uint32_t off = cbCopy;
512# ifdef RT_ARCH_AMD64
513 pb[off++] = 0xff; /* jmp qword [$+8 wrt RIP] */
514 pb[off++] = 0x25;
515 *(uint32_t *)&pb[off] = 0;
516 off += 4;
517 *(uint64_t *)&pb[off] = (uintptr_t)pfnOrg + offJmpBack;
518 off += 8;
519 off = RT_ALIGN_32(off, 16);
520# elif defined(RT_ARCH_X86)
521 pb[off++] = 0xe9; /* jmp rel32 */
522 *(uint32_t *)&pb[off] = (uintptr_t)pfnOrg + offJmpBack - (uintptr_t)&pb[4];
523 off += 4;
524 off = RT_ALIGN_32(off, 8);
525# else
526# error "Port me"
527# endif
528 *aApis[i].ppfnJumpBack = (PFNRT)(uintptr_t)pb;
529 pb += off;
530 }
531
532 /*
533 * Modify the APIs.
534 */
535 for (unsigned i = 0; i < RT_ELEMENTS(aApis); i++)
536 {
537 pb = (uint8_t *)(uintptr_t)aApis[i].pfnOrg;
538 rc = RTMemProtect(pb, 16, RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC); AssertFatalRC(rc);
539
540# ifdef RT_ARCH_AMD64
541 /* Assemble the LdrLoadDll patch. */
542 *pb++ = 0x48; /* mov rax, qword */
543 *pb++ = 0xb8;
544 *(uint64_t *)pb = (uintptr_t)aApis[i].pfnReplacement;
545 pb += 8;
546 *pb++ = 0xff; /* jmp rax */
547 *pb++ = 0xe0;
548# elif defined(RT_ARCH_X86)
549 *pb++ = 0xe9; /* jmp rel32 */
550 *(uint32_t *)pb = (uintptr_t)aApis[i].pfnReplacement - (uintptr_t)&pb[4];
551# else
552# error "Port me"
553# endif
554 }
555}
556
557#endif /* RTALLOC_REPLACE_MALLOC && RTALLOC_EFENCE_TRACE */
558
559
560/**
561 * Internal allocator.
562 */
563RTDECL(void *) rtR3MemAlloc(const char *pszOp, RTMEMTYPE enmType, size_t cbUnaligned, size_t cbAligned,
564 const char *pszTag, void *pvCaller, RT_SRC_POS_DECL)
565{
566 /*
567 * Sanity.
568 */
569 if ( RT_ALIGN_Z(RTALLOC_EFENCE_SIZE, PAGE_SIZE) != RTALLOC_EFENCE_SIZE
570 && RTALLOC_EFENCE_SIZE <= 0)
571 {
572 rtmemComplain(pszOp, "Invalid E-fence size! %#x\n", RTALLOC_EFENCE_SIZE);
573 return NULL;
574 }
575 if (!cbUnaligned)
576 {
577#if 0
578 rtmemComplain(pszOp, "Request of ZERO bytes allocation!\n");
579 return NULL;
580#else
581 cbAligned = cbUnaligned = 1;
582#endif
583 }
584
585#ifndef RTALLOC_EFENCE_IN_FRONT
586 /* Alignment decreases fence accuracy, but this is at least partially
587 * counteracted by filling and checking the alignment padding. When the
588 * fence is in front then then no extra alignment is needed. */
589 cbAligned = RT_ALIGN_Z(cbAligned, RTALLOC_EFENCE_ALIGNMENT);
590#endif
591
592#ifdef RTALLOC_EFENCE_TRACE
593 /*
594 * Allocate the trace block.
595 */
596 PRTMEMBLOCK pBlock = rtmemBlockCreate(enmType, cbUnaligned, cbAligned, pszTag, pvCaller, RT_SRC_POS_ARGS);
597 if (!pBlock)
598 {
599 rtmemComplain(pszOp, "Failed to allocate trace block!\n");
600 return NULL;
601 }
602#endif
603
604 /*
605 * Allocate a block with page alignment space + the size of the E-fence.
606 */
607 size_t cbBlock = RT_ALIGN_Z(cbAligned, PAGE_SIZE) + RTALLOC_EFENCE_SIZE;
608 void *pvBlock = RTMemPageAlloc(cbBlock);
609 if (pvBlock)
610 {
611 /*
612 * Calc the start of the fence and the user block
613 * and then change the page protection of the fence.
614 */
615#ifdef RTALLOC_EFENCE_IN_FRONT
616 void *pvEFence = pvBlock;
617 void *pv = (char *)pvEFence + RTALLOC_EFENCE_SIZE;
618# ifdef RTALLOC_EFENCE_NOMAN_FILLER
619 memset((char *)pv + cbUnaligned, RTALLOC_EFENCE_NOMAN_FILLER, cbBlock - RTALLOC_EFENCE_SIZE - cbUnaligned);
620# endif
621#else
622 void *pvEFence = (char *)pvBlock + (cbBlock - RTALLOC_EFENCE_SIZE);
623 void *pv = (char *)pvEFence - cbAligned;
624# ifdef RTALLOC_EFENCE_NOMAN_FILLER
625 memset(pvBlock, RTALLOC_EFENCE_NOMAN_FILLER, cbBlock - RTALLOC_EFENCE_SIZE - cbAligned);
626 memset((char *)pv + cbUnaligned, RTALLOC_EFENCE_NOMAN_FILLER, cbAligned - cbUnaligned);
627# endif
628#endif
629
630#ifdef RTALLOC_EFENCE_FENCE_FILLER
631 memset(pvEFence, RTALLOC_EFENCE_FENCE_FILLER, RTALLOC_EFENCE_SIZE);
632#endif
633 int rc = RTMemProtect(pvEFence, RTALLOC_EFENCE_SIZE, RTMEM_PROT_NONE);
634 if (!rc)
635 {
636#ifdef RTALLOC_EFENCE_TRACE
637 rtmemBlockInsert(pBlock, pv);
638#endif
639 if (enmType == RTMEMTYPE_RTMEMALLOCZ)
640 memset(pv, 0, cbUnaligned);
641#ifdef RTALLOC_EFENCE_FILLER
642 else
643 memset(pv, RTALLOC_EFENCE_FILLER, cbUnaligned);
644#endif
645
646 rtmemLog(pszOp, "returns %p (pvBlock=%p cbBlock=%#x pvEFence=%p cbUnaligned=%#x)\n", pv, pvBlock, cbBlock, pvEFence, cbUnaligned);
647 return pv;
648 }
649 rtmemComplain(pszOp, "RTMemProtect failed, pvEFence=%p size %d, rc=%d\n", pvEFence, RTALLOC_EFENCE_SIZE, rc);
650 RTMemPageFree(pvBlock, cbBlock);
651 }
652 else
653 rtmemComplain(pszOp, "Failed to allocated %lu (%lu) bytes.\n", (unsigned long)cbBlock, (unsigned long)cbUnaligned);
654
655#ifdef RTALLOC_EFENCE_TRACE
656 rtmemBlockFree(pBlock);
657#endif
658 return NULL;
659}
660
661
662/**
663 * Internal free.
664 */
665RTDECL(void) rtR3MemFree(const char *pszOp, RTMEMTYPE enmType, void *pv, size_t cbUser, void *pvCaller, RT_SRC_POS_DECL)
666{
667 NOREF(enmType); RT_SRC_POS_NOREF();
668
669 /*
670 * Simple case.
671 */
672 if (!pv)
673 return;
674
675 /*
676 * Check watch points.
677 */
678 for (unsigned i = 0; i < RT_ELEMENTS(gapvRTMemFreeWatch); i++)
679 if (gapvRTMemFreeWatch[i] == pv)
680 RTAssertDoPanic();
681
682#ifdef RTALLOC_EFENCE_TRACE
683 /*
684 * Find the block.
685 */
686 PRTMEMBLOCK pBlock = rtmemBlockRemove(pv);
687 if (pBlock)
688 {
689 if (gfRTMemFreeLog)
690 RTLogPrintf("RTMem %s: pv=%p pvCaller=%p cbUnaligned=%#x\n", pszOp, pv, pvCaller, pBlock->cbUnaligned);
691
692# ifdef RTALLOC_EFENCE_NOMAN_FILLER
693 /*
694 * Check whether the no man's land is untouched.
695 */
696# ifdef RTALLOC_EFENCE_IN_FRONT
697 void *pvWrong = ASMMemFirstMismatchingU8((char *)pv + pBlock->cbUnaligned,
698 RT_ALIGN_Z(pBlock->cbAligned, PAGE_SIZE) - pBlock->cbUnaligned,
699 RTALLOC_EFENCE_NOMAN_FILLER);
700# else
701 /* Alignment must match allocation alignment in rtMemAlloc(). */
702 void *pvWrong = ASMMemFirstMismatchingU8((char *)pv + pBlock->cbUnaligned,
703 pBlock->cbAligned - pBlock->cbUnaligned,
704 RTALLOC_EFENCE_NOMAN_FILLER);
705 if (pvWrong)
706 RTAssertDoPanic();
707 pvWrong = ASMMemFirstMismatchingU8((void *)((uintptr_t)pv & ~(uintptr_t)PAGE_OFFSET_MASK),
708 RT_ALIGN_Z(pBlock->cbAligned, PAGE_SIZE) - pBlock->cbAligned,
709 RTALLOC_EFENCE_NOMAN_FILLER);
710# endif
711 if (pvWrong)
712 RTAssertDoPanic();
713# endif
714
715 /*
716 * Fill the user part of the block.
717 */
718 AssertMsg(enmType != RTMEMTYPE_RTMEMFREEZ || cbUser == pBlock->cbUnaligned,
719 ("cbUser=%#zx cbUnaligned=%#zx\n", cbUser, pBlock->cbUnaligned));
720 RT_NOREF(cbUser);
721 if (enmType == RTMEMTYPE_RTMEMFREEZ)
722 RT_BZERO(pv, pBlock->cbUnaligned);
723# ifdef RTALLOC_EFENCE_FREE_FILL
724 else
725 memset(pv, RTALLOC_EFENCE_FREE_FILL, pBlock->cbUnaligned);
726# endif
727
728# if defined(RTALLOC_EFENCE_FREE_DELAYED) && RTALLOC_EFENCE_FREE_DELAYED > 0
729 /*
730 * We're doing delayed freeing.
731 * That means we'll expand the E-fence to cover the entire block.
732 */
733 int rc = RTMemProtect(pv, pBlock->cbAligned, RTMEM_PROT_NONE);
734 if (RT_SUCCESS(rc))
735 {
736 /*
737 * Insert it into the free list and process pending frees.
738 */
739 rtmemBlockDelayInsert(pBlock);
740 while ((pBlock = rtmemBlockDelayRemove()) != NULL)
741 {
742 pv = pBlock->Core.Key;
743# ifdef RTALLOC_EFENCE_IN_FRONT
744 void *pvBlock = (char *)pv - RTALLOC_EFENCE_SIZE;
745# else
746 void *pvBlock = (void *)((uintptr_t)pv & ~(uintptr_t)PAGE_OFFSET_MASK);
747# endif
748 size_t cbBlock = RT_ALIGN_Z(pBlock->cbAligned, PAGE_SIZE) + RTALLOC_EFENCE_SIZE;
749 rc = RTMemProtect(pvBlock, cbBlock, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
750 if (RT_SUCCESS(rc))
751 RTMemPageFree(pvBlock, RT_ALIGN_Z(pBlock->cbAligned, PAGE_SIZE) + RTALLOC_EFENCE_SIZE);
752 else
753 rtmemComplain(pszOp, "RTMemProtect(%p, %#x, RTMEM_PROT_READ | RTMEM_PROT_WRITE) -> %d\n", pvBlock, cbBlock, rc);
754 rtmemBlockFree(pBlock);
755 }
756 }
757 else
758 rtmemComplain(pszOp, "Failed to expand the efence of pv=%p cb=%d, rc=%d.\n", pv, pBlock, rc);
759
760# else /* !RTALLOC_EFENCE_FREE_DELAYED */
761
762 /*
763 * Turn of the E-fence and free it.
764 */
765# ifdef RTALLOC_EFENCE_IN_FRONT
766 void *pvBlock = (char *)pv - RTALLOC_EFENCE_SIZE;
767 void *pvEFence = pvBlock;
768# else
769 void *pvBlock = (void *)((uintptr_t)pv & ~(uintptr_t)PAGE_OFFSET_MASK);
770 void *pvEFence = (char *)pv + pBlock->cb;
771# endif
772 int rc = RTMemProtect(pvEFence, RTALLOC_EFENCE_SIZE, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
773 if (RT_SUCCESS(rc))
774 RTMemPageFree(pvBlock, RT_ALIGN_Z(pBlock->cbAligned, PAGE_SIZE) + RTALLOC_EFENCE_SIZE);
775 else
776 rtmemComplain(pszOp, "RTMemProtect(%p, %#x, RTMEM_PROT_READ | RTMEM_PROT_WRITE) -> %d\n", pvEFence, RTALLOC_EFENCE_SIZE, rc);
777 rtmemBlockFree(pBlock);
778
779# endif /* !RTALLOC_EFENCE_FREE_DELAYED */
780 }
781 else
782 rtmemComplain(pszOp, "pv=%p not found! Incorrect free!\n", pv);
783
784#else /* !RTALLOC_EFENCE_TRACE */
785
786 /*
787 * We have no size tracking, so we're not doing any freeing because
788 * we cannot if the E-fence is after the block.
789 * Let's just expand the E-fence to the first page of the user bit
790 * since we know that it's around.
791 */
792 if (enmType == RTMEMTYPE_RTMEMFREEZ)
793 RT_BZERO(pv, cbUser);
794 int rc = RTMemProtect((void *)((uintptr_t)pv & ~(uintptr_t)PAGE_OFFSET_MASK), PAGE_SIZE, RTMEM_PROT_NONE);
795 if (RT_FAILURE(rc))
796 rtmemComplain(pszOp, "RTMemProtect(%p, PAGE_SIZE, RTMEM_PROT_NONE) -> %d\n", (void *)((uintptr_t)pv & ~(uintptr_t)PAGE_OFFSET_MASK), rc);
797#endif /* !RTALLOC_EFENCE_TRACE */
798}
799
800
801/**
802 * Internal realloc.
803 */
804RTDECL(void *) rtR3MemRealloc(const char *pszOp, RTMEMTYPE enmType, void *pvOld, size_t cbNew,
805 const char *pszTag, void *pvCaller, RT_SRC_POS_DECL)
806{
807 /*
808 * Allocate new and copy.
809 */
810 if (!pvOld)
811 return rtR3MemAlloc(pszOp, enmType, cbNew, cbNew, pszTag, pvCaller, RT_SRC_POS_ARGS);
812 if (!cbNew)
813 {
814 rtR3MemFree(pszOp, RTMEMTYPE_RTMEMREALLOC, pvOld, 0, pvCaller, RT_SRC_POS_ARGS);
815 return NULL;
816 }
817
818#ifdef RTALLOC_EFENCE_TRACE
819
820 /*
821 * Get the block, allocate the new, copy the data, free the old one.
822 */
823 PRTMEMBLOCK pBlock = rtmemBlockGet(pvOld);
824 if (pBlock)
825 {
826 void *pvRet = rtR3MemAlloc(pszOp, enmType, cbNew, cbNew, pszTag, pvCaller, RT_SRC_POS_ARGS);
827 if (pvRet)
828 {
829 memcpy(pvRet, pvOld, RT_MIN(cbNew, pBlock->cbUnaligned));
830 rtR3MemFree(pszOp, RTMEMTYPE_RTMEMREALLOC, pvOld, 0, pvCaller, RT_SRC_POS_ARGS);
831 }
832 return pvRet;
833 }
834 else
835 rtmemComplain(pszOp, "pvOld=%p was not found!\n", pvOld);
836 return NULL;
837
838#else /* !RTALLOC_EFENCE_TRACE */
839
840 rtmemComplain(pszOp, "Not supported if RTALLOC_EFENCE_TRACE isn't defined!\n");
841 return NULL;
842
843#endif /* !RTALLOC_EFENCE_TRACE */
844}
845
846
847
848
849RTDECL(void *) RTMemEfTmpAlloc(size_t cb, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW_DEF
850{
851 return rtR3MemAlloc("TmpAlloc", RTMEMTYPE_RTMEMALLOC, cb, cb, pszTag, ASMReturnAddress(), RT_SRC_POS_ARGS);
852}
853
854
855RTDECL(void *) RTMemEfTmpAllocZ(size_t cb, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW_DEF
856{
857 return rtR3MemAlloc("TmpAlloc", RTMEMTYPE_RTMEMALLOCZ, cb, cb, pszTag, ASMReturnAddress(), RT_SRC_POS_ARGS);
858}
859
860
861RTDECL(void) RTMemEfTmpFree(void *pv, RT_SRC_POS_DECL) RT_NO_THROW_DEF
862{
863 if (pv)
864 rtR3MemFree("Free", RTMEMTYPE_RTMEMFREE, pv, 0, ASMReturnAddress(), RT_SRC_POS_ARGS);
865}
866
867
868RTDECL(void) RTMemEfTmpFreeZ(void *pv, size_t cb, RT_SRC_POS_DECL) RT_NO_THROW_DEF
869{
870 if (pv)
871 rtR3MemFree("FreeZ", RTMEMTYPE_RTMEMFREEZ, pv, cb, ASMReturnAddress(), RT_SRC_POS_ARGS);
872}
873
874
875RTDECL(void *) RTMemEfAlloc(size_t cb, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW_DEF
876{
877 return rtR3MemAlloc("Alloc", RTMEMTYPE_RTMEMALLOC, cb, cb, pszTag, ASMReturnAddress(), RT_SRC_POS_ARGS);
878}
879
880
881RTDECL(void *) RTMemEfAllocZ(size_t cb, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW_DEF
882{
883 return rtR3MemAlloc("AllocZ", RTMEMTYPE_RTMEMALLOCZ, cb, cb, pszTag, ASMReturnAddress(), RT_SRC_POS_ARGS);
884}
885
886
887RTDECL(void *) RTMemEfAllocVar(size_t cbUnaligned, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW_DEF
888{
889 size_t cbAligned;
890 if (cbUnaligned >= 16)
891 cbAligned = RT_ALIGN_Z(cbUnaligned, 16);
892 else
893 cbAligned = RT_ALIGN_Z(cbUnaligned, sizeof(void *));
894 return rtR3MemAlloc("Alloc", RTMEMTYPE_RTMEMALLOC, cbUnaligned, cbAligned, pszTag, ASMReturnAddress(), RT_SRC_POS_ARGS);
895}
896
897
898RTDECL(void *) RTMemEfAllocZVar(size_t cbUnaligned, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW_DEF
899{
900 size_t cbAligned;
901 if (cbUnaligned >= 16)
902 cbAligned = RT_ALIGN_Z(cbUnaligned, 16);
903 else
904 cbAligned = RT_ALIGN_Z(cbUnaligned, sizeof(void *));
905 return rtR3MemAlloc("AllocZ", RTMEMTYPE_RTMEMALLOCZ, cbUnaligned, cbAligned, pszTag, ASMReturnAddress(), RT_SRC_POS_ARGS);
906}
907
908
909RTDECL(void *) RTMemEfRealloc(void *pvOld, size_t cbNew, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW_DEF
910{
911 return rtR3MemRealloc("Realloc", RTMEMTYPE_RTMEMREALLOC, pvOld, cbNew, pszTag, ASMReturnAddress(), RT_SRC_POS_ARGS);
912}
913
914
915RTDECL(void *) RTMemEfReallocZ(void *pvOld, size_t cbOld, size_t cbNew, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW_DEF
916{
917 void *pvDst = rtR3MemRealloc("ReallocZ", RTMEMTYPE_RTMEMREALLOC, pvOld, cbNew, pszTag, ASMReturnAddress(), RT_SRC_POS_ARGS);
918 if (pvDst && cbNew > cbOld)
919 memset((uint8_t *)pvDst + cbOld, 0, cbNew - cbOld);
920 return pvDst;
921}
922
923
924RTDECL(void) RTMemEfFree(void *pv, RT_SRC_POS_DECL) RT_NO_THROW_DEF
925{
926 if (pv)
927 rtR3MemFree("Free", RTMEMTYPE_RTMEMFREE, pv, 0, ASMReturnAddress(), RT_SRC_POS_ARGS);
928}
929
930
931RTDECL(void) RTMemEfFreeZ(void *pv, size_t cb, RT_SRC_POS_DECL) RT_NO_THROW_DEF
932{
933 if (pv)
934 rtR3MemFree("FreeZ", RTMEMTYPE_RTMEMFREEZ, pv, cb, ASMReturnAddress(), RT_SRC_POS_ARGS);
935}
936
937
938RTDECL(void *) RTMemEfDup(const void *pvSrc, size_t cb, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW_DEF
939{
940 void *pvDst = RTMemEfAlloc(cb, pszTag, RT_SRC_POS_ARGS);
941 if (pvDst)
942 memcpy(pvDst, pvSrc, cb);
943 return pvDst;
944}
945
946
947RTDECL(void *) RTMemEfDupEx(const void *pvSrc, size_t cbSrc, size_t cbExtra, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW_DEF
948{
949 void *pvDst = RTMemEfAlloc(cbSrc + cbExtra, pszTag, RT_SRC_POS_ARGS);
950 if (pvDst)
951 {
952 memcpy(pvDst, pvSrc, cbSrc);
953 memset((uint8_t *)pvDst + cbSrc, 0, cbExtra);
954 }
955 return pvDst;
956}
957
958
959
960
961/*
962 *
963 * The NP (no position) versions.
964 *
965 */
966
967
968
969RTDECL(void *) RTMemEfTmpAllocNP(size_t cb, const char *pszTag) RT_NO_THROW_DEF
970{
971 return rtR3MemAlloc("TmpAlloc", RTMEMTYPE_RTMEMALLOC, cb, cb, pszTag, ASMReturnAddress(), NULL, 0, NULL);
972}
973
974
975RTDECL(void *) RTMemEfTmpAllocZNP(size_t cb, const char *pszTag) RT_NO_THROW_DEF
976{
977 return rtR3MemAlloc("TmpAllocZ", RTMEMTYPE_RTMEMALLOCZ, cb, cb, pszTag, ASMReturnAddress(), NULL, 0, NULL);
978}
979
980
981RTDECL(void) RTMemEfTmpFreeNP(void *pv) RT_NO_THROW_DEF
982{
983 if (pv)
984 rtR3MemFree("Free", RTMEMTYPE_RTMEMFREE, pv, 0, ASMReturnAddress(), NULL, 0, NULL);
985}
986
987
988RTDECL(void) RTMemEfTmpFreeZNP(void *pv, size_t cb) RT_NO_THROW_DEF
989{
990 if (pv)
991 rtR3MemFree("FreeZ", RTMEMTYPE_RTMEMFREEZ, pv, cb, ASMReturnAddress(), NULL, 0, NULL);
992}
993
994
995RTDECL(void *) RTMemEfAllocNP(size_t cb, const char *pszTag) RT_NO_THROW_DEF
996{
997 return rtR3MemAlloc("Alloc", RTMEMTYPE_RTMEMALLOC, cb, cb, pszTag, ASMReturnAddress(), NULL, 0, NULL);
998}
999
1000
1001RTDECL(void *) RTMemEfAllocZNP(size_t cb, const char *pszTag) RT_NO_THROW_DEF
1002{
1003 return rtR3MemAlloc("AllocZ", RTMEMTYPE_RTMEMALLOCZ, cb, cb, pszTag, ASMReturnAddress(), NULL, 0, NULL);
1004}
1005
1006
1007RTDECL(void *) RTMemEfAllocVarNP(size_t cbUnaligned, const char *pszTag) RT_NO_THROW_DEF
1008{
1009 size_t cbAligned;
1010 if (cbUnaligned >= 16)
1011 cbAligned = RT_ALIGN_Z(cbUnaligned, 16);
1012 else
1013 cbAligned = RT_ALIGN_Z(cbUnaligned, sizeof(void *));
1014 return rtR3MemAlloc("Alloc", RTMEMTYPE_RTMEMALLOC, cbUnaligned, cbAligned, pszTag, ASMReturnAddress(), NULL, 0, NULL);
1015}
1016
1017
1018RTDECL(void *) RTMemEfAllocZVarNP(size_t cbUnaligned, const char *pszTag) RT_NO_THROW_DEF
1019{
1020 size_t cbAligned;
1021 if (cbUnaligned >= 16)
1022 cbAligned = RT_ALIGN_Z(cbUnaligned, 16);
1023 else
1024 cbAligned = RT_ALIGN_Z(cbUnaligned, sizeof(void *));
1025 return rtR3MemAlloc("AllocZ", RTMEMTYPE_RTMEMALLOCZ, cbUnaligned, cbAligned, pszTag, ASMReturnAddress(), NULL, 0, NULL);
1026}
1027
1028
1029RTDECL(void *) RTMemEfReallocNP(void *pvOld, size_t cbNew, const char *pszTag) RT_NO_THROW_DEF
1030{
1031 return rtR3MemRealloc("Realloc", RTMEMTYPE_RTMEMREALLOC, pvOld, cbNew, pszTag, ASMReturnAddress(), NULL, 0, NULL);
1032}
1033
1034
1035RTDECL(void *) RTMemEfReallocZNP(void *pvOld, size_t cbOld, size_t cbNew, const char *pszTag) RT_NO_THROW_DEF
1036{
1037 void *pvDst = rtR3MemRealloc("ReallocZ", RTMEMTYPE_RTMEMREALLOC, pvOld, cbNew, pszTag, ASMReturnAddress(), NULL, 0, NULL);
1038 if (pvDst && cbNew > cbOld)
1039 memset((uint8_t *)pvDst + cbOld, 0, cbNew - cbOld);
1040 return pvDst;
1041}
1042
1043
1044RTDECL(void) RTMemEfFreeNP(void *pv) RT_NO_THROW_DEF
1045{
1046 if (pv)
1047 rtR3MemFree("Free", RTMEMTYPE_RTMEMFREE, pv, 0, ASMReturnAddress(), NULL, 0, NULL);
1048}
1049
1050
1051RTDECL(void) RTMemEfFreeZNP(void *pv, size_t cb) RT_NO_THROW_DEF
1052{
1053 if (pv)
1054 rtR3MemFree("Free", RTMEMTYPE_RTMEMFREEZ, pv, cb, ASMReturnAddress(), NULL, 0, NULL);
1055}
1056
1057
1058RTDECL(void *) RTMemEfDupNP(const void *pvSrc, size_t cb, const char *pszTag) RT_NO_THROW_DEF
1059{
1060 void *pvDst = RTMemEfAlloc(cb, pszTag, NULL, 0, NULL);
1061 if (pvDst)
1062 memcpy(pvDst, pvSrc, cb);
1063 return pvDst;
1064}
1065
1066
1067RTDECL(void *) RTMemEfDupExNP(const void *pvSrc, size_t cbSrc, size_t cbExtra, const char *pszTag) RT_NO_THROW_DEF
1068{
1069 void *pvDst = RTMemEfAlloc(cbSrc + cbExtra, pszTag, NULL, 0, NULL);
1070 if (pvDst)
1071 {
1072 memcpy(pvDst, pvSrc, cbSrc);
1073 memset((uint8_t *)pvDst + cbSrc, 0, cbExtra);
1074 }
1075 return pvDst;
1076}
1077
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette