VirtualBox

source: vbox/trunk/src/VBox/Runtime/r3/alloc-ef.cpp@ 98032

Last change on this file since 98032 was 97210, checked in by vboxsync, 2 years ago

IPRT/alloc-ef: Drop unused DISQPVPARAMVAL stack variable. The type is being discontinued.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 33.7 KB
Line 
1/* $Id: alloc-ef.cpp 97210 2022-10-18 14:41:48Z vboxsync $ */
2/** @file
3 * IPRT - Memory Allocation, electric fence.
4 */
5
6/*
7 * Copyright (C) 2006-2022 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * The contents of this file may alternatively be used under the terms
26 * of the Common Development and Distribution License Version 1.0
27 * (CDDL), a copy of it is provided in the "COPYING.CDDL" file included
28 * in the VirtualBox distribution, in which case the provisions of the
29 * CDDL are applicable instead of those of the GPL.
30 *
31 * You may elect to license modified versions of this file under the
32 * terms and conditions of either the GPL or the CDDL or both.
33 *
34 * SPDX-License-Identifier: GPL-3.0-only OR CDDL-1.0
35 */
36
37
38/*********************************************************************************************************************************
39* Header Files *
40*********************************************************************************************************************************/
41#include "alloc-ef.h"
42#include <iprt/mem.h>
43#include <iprt/log.h>
44#include <iprt/asm.h>
45#include <iprt/thread.h>
46#include <VBox/sup.h>
47#include <iprt/errcore.h>
48#ifndef IPRT_NO_CRT
49# include <errno.h>
50# include <stdio.h>
51# include <stdlib.h>
52#endif
53
54#include <iprt/alloc.h>
55#include <iprt/assert.h>
56#include <iprt/param.h>
57#include <iprt/string.h>
58
59#ifdef RTALLOC_REPLACE_MALLOC
60# include <VBox/dis.h>
61# include <VBox/disopcode.h>
62# include <dlfcn.h>
63# ifdef RT_OS_DARWIN
64# include <malloc/malloc.h>
65# endif
66#endif
67
68
69/*********************************************************************************************************************************
70* Defined Constants And Macros *
71*********************************************************************************************************************************/
72#ifdef RTALLOC_REPLACE_MALLOC
73# define RTMEM_REPLACMENT_ALIGN(a_cb) ((a_cb) >= 16 ? RT_ALIGN_Z(a_cb, 16) \
74 : (a_cb) >= sizeof(uintptr_t) ? RT_ALIGN_Z(a_cb, sizeof(uintptr_t)) : (a_cb))
75#endif
76
77
78/*********************************************************************************************************************************
79* Global Variables *
80*********************************************************************************************************************************/
81#ifdef RTALLOC_EFENCE_TRACE
82/** Spinlock protecting the all the block's globals. */
83static volatile uint32_t g_BlocksLock;
84/** Tree tracking the allocations. */
85static AVLPVTREE g_BlocksTree;
86# ifdef RTALLOC_EFENCE_FREE_DELAYED
87/** Tail of the delayed blocks. */
88static volatile PRTMEMBLOCK g_pBlocksDelayHead;
89/** Tail of the delayed blocks. */
90static volatile PRTMEMBLOCK g_pBlocksDelayTail;
91/** Number of bytes in the delay list (includes fences). */
92static volatile size_t g_cbBlocksDelay;
93# endif /* RTALLOC_EFENCE_FREE_DELAYED */
94# ifdef RTALLOC_REPLACE_MALLOC
95/** @name For calling the real allocation API we've replaced.
96 * @{ */
97void * (*g_pfnOrgMalloc)(size_t);
98void * (*g_pfnOrgCalloc)(size_t, size_t);
99void * (*g_pfnOrgRealloc)(void *, size_t);
100void (*g_pfnOrgFree)(void *);
101size_t (*g_pfnOrgMallocSize)(void *);
102/** @} */
103# endif
104#endif /* RTALLOC_EFENCE_TRACE */
105/** Array of pointers free watches for. */
106void *gapvRTMemFreeWatch[4] = {NULL, NULL, NULL, NULL};
107/** Enable logging of all freed memory. */
108bool gfRTMemFreeLog = false;
109
110
111/*********************************************************************************************************************************
112* Internal Functions *
113*********************************************************************************************************************************/
114#ifdef RTALLOC_REPLACE_MALLOC
115static void rtMemReplaceMallocAndFriends(void);
116#endif
117
118
119/**
120 * Complains about something.
121 */
122static void rtmemComplain(const char *pszOp, const char *pszFormat, ...)
123{
124 va_list args;
125 fprintf(stderr, "RTMem error: %s: ", pszOp);
126 va_start(args, pszFormat);
127 vfprintf(stderr, pszFormat, args);
128 va_end(args);
129 RTAssertDoPanic();
130}
131
132/**
133 * Log an event.
134 */
135DECLINLINE(void) rtmemLog(const char *pszOp, const char *pszFormat, ...)
136{
137#if 0
138 va_list args;
139 fprintf(stderr, "RTMem info: %s: ", pszOp);
140 va_start(args, pszFormat);
141 vfprintf(stderr, pszFormat, args);
142 va_end(args);
143#else
144 NOREF(pszOp); NOREF(pszFormat);
145#endif
146}
147
148
149#ifdef RTALLOC_EFENCE_TRACE
150
151/**
152 * Acquires the lock.
153 */
154DECLINLINE(void) rtmemBlockLock(void)
155{
156 unsigned c = 0;
157 while (!ASMAtomicCmpXchgU32(&g_BlocksLock, 1, 0))
158 RTThreadSleepNoLog(((++c) >> 2) & 31);
159}
160
161
162/**
163 * Releases the lock.
164 */
165DECLINLINE(void) rtmemBlockUnlock(void)
166{
167 Assert(g_BlocksLock == 1);
168 ASMAtomicXchgU32(&g_BlocksLock, 0);
169}
170
171
172/**
173 * Creates a block.
174 */
175DECLINLINE(PRTMEMBLOCK) rtmemBlockCreate(RTMEMTYPE enmType, size_t cbUnaligned, size_t cbAligned,
176 const char *pszTag, void *pvCaller, RT_SRC_POS_DECL)
177{
178# ifdef RTALLOC_REPLACE_MALLOC
179 if (!g_pfnOrgMalloc)
180 rtMemReplaceMallocAndFriends();
181 PRTMEMBLOCK pBlock = (PRTMEMBLOCK)g_pfnOrgMalloc(sizeof(*pBlock));
182# else
183 PRTMEMBLOCK pBlock = (PRTMEMBLOCK)malloc(sizeof(*pBlock));
184# endif
185 if (pBlock)
186 {
187 pBlock->enmType = enmType;
188 pBlock->cbUnaligned = cbUnaligned;
189 pBlock->cbAligned = cbAligned;
190 pBlock->pszTag = pszTag;
191 pBlock->pvCaller = pvCaller;
192 pBlock->iLine = iLine;
193 pBlock->pszFile = pszFile;
194 pBlock->pszFunction = pszFunction;
195 }
196 return pBlock;
197}
198
199
200/**
201 * Frees a block.
202 */
203DECLINLINE(void) rtmemBlockFree(PRTMEMBLOCK pBlock)
204{
205# ifdef RTALLOC_REPLACE_MALLOC
206 g_pfnOrgFree(pBlock);
207# else
208 free(pBlock);
209# endif
210}
211
212
213/**
214 * Insert a block from the tree.
215 */
216DECLINLINE(void) rtmemBlockInsert(PRTMEMBLOCK pBlock, void *pv)
217{
218 pBlock->Core.Key = pv;
219 rtmemBlockLock();
220 bool fRc = RTAvlPVInsert(&g_BlocksTree, &pBlock->Core);
221 rtmemBlockUnlock();
222 AssertRelease(fRc);
223}
224
225
226/**
227 * Remove a block from the tree and returns it to the caller.
228 */
229DECLINLINE(PRTMEMBLOCK) rtmemBlockRemove(void *pv)
230{
231 rtmemBlockLock();
232 PRTMEMBLOCK pBlock = (PRTMEMBLOCK)RTAvlPVRemove(&g_BlocksTree, pv);
233 rtmemBlockUnlock();
234 return pBlock;
235}
236
237/**
238 * Gets a block.
239 */
240DECLINLINE(PRTMEMBLOCK) rtmemBlockGet(void *pv)
241{
242 rtmemBlockLock();
243 PRTMEMBLOCK pBlock = (PRTMEMBLOCK)RTAvlPVGet(&g_BlocksTree, pv);
244 rtmemBlockUnlock();
245 return pBlock;
246}
247
248/**
249 * Dumps one allocation.
250 */
251static DECLCALLBACK(int) RTMemDumpOne(PAVLPVNODECORE pNode, void *pvUser)
252{
253 PRTMEMBLOCK pBlock = (PRTMEMBLOCK)pNode;
254 fprintf(stderr, "%p %08lx(+%02lx) %p\n",
255 pBlock->Core.Key,
256 (unsigned long)pBlock->cbUnaligned,
257 (unsigned long)(pBlock->cbAligned - pBlock->cbUnaligned),
258 pBlock->pvCaller);
259 NOREF(pvUser);
260 return 0;
261}
262
263/**
264 * Dumps the allocated blocks.
265 * This is something which you should call from gdb.
266 */
267extern "C" void RTMemDump(void);
268void RTMemDump(void)
269{
270 fprintf(stderr, "address size(alg) caller\n");
271 RTAvlPVDoWithAll(&g_BlocksTree, true, RTMemDumpOne, NULL);
272}
273
274# ifdef RTALLOC_EFENCE_FREE_DELAYED
275
276/**
277 * Insert a delayed block.
278 */
279DECLINLINE(void) rtmemBlockDelayInsert(PRTMEMBLOCK pBlock)
280{
281 size_t cbBlock = RT_ALIGN_Z(pBlock->cbAligned, PAGE_SIZE) + RTALLOC_EFENCE_SIZE;
282 pBlock->Core.pRight = NULL;
283 pBlock->Core.pLeft = NULL;
284 rtmemBlockLock();
285 if (g_pBlocksDelayHead)
286 {
287 g_pBlocksDelayHead->Core.pLeft = (PAVLPVNODECORE)pBlock;
288 pBlock->Core.pRight = (PAVLPVNODECORE)g_pBlocksDelayHead;
289 g_pBlocksDelayHead = pBlock;
290 }
291 else
292 {
293 g_pBlocksDelayTail = pBlock;
294 g_pBlocksDelayHead = pBlock;
295 }
296 g_cbBlocksDelay += cbBlock;
297 rtmemBlockUnlock();
298}
299
300/**
301 * Removes a delayed block.
302 */
303DECLINLINE(PRTMEMBLOCK) rtmemBlockDelayRemove(void)
304{
305 PRTMEMBLOCK pBlock = NULL;
306 rtmemBlockLock();
307 if (g_cbBlocksDelay > RTALLOC_EFENCE_FREE_DELAYED)
308 {
309 pBlock = g_pBlocksDelayTail;
310 if (pBlock)
311 {
312 g_pBlocksDelayTail = (PRTMEMBLOCK)pBlock->Core.pLeft;
313 if (pBlock->Core.pLeft)
314 pBlock->Core.pLeft->pRight = NULL;
315 else
316 g_pBlocksDelayHead = NULL;
317 g_cbBlocksDelay -= RT_ALIGN_Z(pBlock->cbAligned, PAGE_SIZE) + RTALLOC_EFENCE_SIZE;
318 }
319 }
320 rtmemBlockUnlock();
321 return pBlock;
322}
323
324
325/**
326 * Dumps the freed blocks.
327 * This is something which you should call from gdb.
328 */
329extern "C" void RTMemDumpFreed(void);
330void RTMemDumpFreed(void)
331{
332 fprintf(stderr, "address size(alg) caller\n");
333 for (PRTMEMBLOCK pCur = g_pBlocksDelayHead; pCur; pCur = (PRTMEMBLOCK)pCur->Core.pRight)
334 RTMemDumpOne(&pCur->Core, NULL);
335
336}
337
338# endif /* RTALLOC_EFENCE_FREE_DELAYED */
339
340#endif /* RTALLOC_EFENCE_TRACE */
341
342
343#if defined(RTALLOC_REPLACE_MALLOC) && defined(RTALLOC_EFENCE_TRACE)
344/*
345 *
346 * Replacing malloc, calloc, realloc, & free.
347 *
348 */
349
350/** Replacement for malloc. */
351static void *rtMemReplacementMalloc(size_t cb)
352{
353 size_t cbAligned = RTMEM_REPLACMENT_ALIGN(cb);
354 void *pv = rtR3MemAlloc("r-malloc", RTMEMTYPE_RTMEMALLOC, cb, cbAligned, "heap", ASMReturnAddress(), RT_SRC_POS);
355 if (!pv)
356 pv = g_pfnOrgMalloc(cb);
357 return pv;
358}
359
360/** Replacement for calloc. */
361static void *rtMemReplacementCalloc(size_t cbItem, size_t cItems)
362{
363 size_t cb = cbItem * cItems;
364 size_t cbAligned = RTMEM_REPLACMENT_ALIGN(cb);
365 void *pv = rtR3MemAlloc("r-calloc", RTMEMTYPE_RTMEMALLOCZ, cb, cbAligned, "heap", ASMReturnAddress(), RT_SRC_POS);
366 if (!pv)
367 pv = g_pfnOrgCalloc(cbItem, cItems);
368 return pv;
369}
370
371/** Replacement for realloc. */
372static void *rtMemReplacementRealloc(void *pvOld, size_t cbNew)
373{
374 if (pvOld)
375 {
376 /* We're not strict about where the memory was allocated. */
377 PRTMEMBLOCK pBlock = rtmemBlockGet(pvOld);
378 if (pBlock)
379 {
380 size_t cbAligned = RTMEM_REPLACMENT_ALIGN(cbNew);
381 return rtR3MemRealloc("r-realloc", RTMEMTYPE_RTMEMREALLOC, pvOld, cbAligned, "heap", ASMReturnAddress(), RT_SRC_POS);
382 }
383 return g_pfnOrgRealloc(pvOld, cbNew);
384 }
385 return rtMemReplacementMalloc(cbNew);
386}
387
388/** Replacement for free(). */
389static void rtMemReplacementFree(void *pv)
390{
391 if (pv)
392 {
393 /* We're not strict about where the memory was allocated. */
394 PRTMEMBLOCK pBlock = rtmemBlockGet(pv);
395 if (pBlock)
396 rtR3MemFree("r-free", RTMEMTYPE_RTMEMFREE, pv, 0, ASMReturnAddress(), RT_SRC_POS);
397 else
398 g_pfnOrgFree(pv);
399 }
400}
401
402# ifdef RT_OS_DARWIN
403/** Replacement for malloc. */
404static size_t rtMemReplacementMallocSize(void *pv)
405{
406 size_t cb;
407 if (pv)
408 {
409 /* We're not strict about where the memory was allocated. */
410 PRTMEMBLOCK pBlock = rtmemBlockGet(pv);
411 if (pBlock)
412 cb = pBlock->cbUnaligned;
413 else
414 cb = g_pfnOrgMallocSize(pv);
415 }
416 else
417 cb = 0;
418 return cb;
419}
420# endif
421
422
423static void rtMemReplaceMallocAndFriends(void)
424{
425 struct
426 {
427 const char *pszName;
428 PFNRT pfnReplacement;
429 PFNRT pfnOrg;
430 PFNRT *ppfnJumpBack;
431 } aApis[] =
432 {
433 { "free", (PFNRT)rtMemReplacementFree, (PFNRT)free, (PFNRT *)&g_pfnOrgFree },
434 { "realloc", (PFNRT)rtMemReplacementRealloc, (PFNRT)realloc, (PFNRT *)&g_pfnOrgRealloc },
435 { "calloc", (PFNRT)rtMemReplacementCalloc, (PFNRT)calloc, (PFNRT *)&g_pfnOrgCalloc },
436 { "malloc", (PFNRT)rtMemReplacementMalloc, (PFNRT)malloc, (PFNRT *)&g_pfnOrgMalloc },
437#ifdef RT_OS_DARWIN
438 { "malloc_size", (PFNRT)rtMemReplacementMallocSize, (PFNRT)malloc_size, (PFNRT *)&g_pfnOrgMallocSize },
439#endif
440 };
441
442 /*
443 * Initialize the jump backs to avoid recursivly entering this function.
444 */
445 for (unsigned i = 0; i < RT_ELEMENTS(aApis); i++)
446 *aApis[i].ppfnJumpBack = aApis[i].pfnOrg;
447
448 /*
449 * Give the user an option to skip replacing malloc.
450 */
451 if (getenv("IPRT_DONT_REPLACE_MALLOC"))
452 return;
453
454 /*
455 * Allocate a page for jump back code (we leak it).
456 */
457 uint8_t *pbExecPage = (uint8_t *)RTMemPageAlloc(PAGE_SIZE); AssertFatal(pbExecPage);
458 int rc = RTMemProtect(pbExecPage, PAGE_SIZE, RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC); AssertFatalRC(rc);
459
460 /*
461 * Do the ground work.
462 */
463 uint8_t *pb = pbExecPage;
464 for (unsigned i = 0; i < RT_ELEMENTS(aApis); i++)
465 {
466 /* Resolve it. */
467 PFNRT pfnOrg = (PFNRT)(uintptr_t)dlsym(RTLD_DEFAULT, aApis[i].pszName);
468 if (pfnOrg)
469 aApis[i].pfnOrg = pfnOrg;
470 else
471 pfnOrg = aApis[i].pfnOrg;
472
473 /* Figure what we can replace and how much to duplicate in the jump back code. */
474# ifdef RT_ARCH_AMD64
475 uint32_t cbNeeded = 12;
476 DISCPUMODE const enmCpuMode = DISCPUMODE_64BIT;
477# elif defined(RT_ARCH_X86)
478 uint32_t const cbNeeded = 5;
479 DISCPUMODE const enmCpuMode = DISCPUMODE_32BIT;
480# else
481# error "Port me"
482# endif
483 uint32_t offJmpBack = 0;
484 uint32_t cbCopy = 0;
485 while (offJmpBack < cbNeeded)
486 {
487 DISCPUSTATE Dis;
488 uint32_t cbInstr = 1;
489 rc = DISInstr((void *)((uintptr_t)pfnOrg + offJmpBack), enmCpuMode, &Dis, &cbInstr); AssertFatalRC(rc);
490 AssertFatal(!(Dis.pCurInstr->fOpType & (DISOPTYPE_CONTROLFLOW)));
491# ifdef RT_ARCH_AMD64
492# ifdef RT_OS_DARWIN
493 /* Kludge for: cmp [malloc_def_zone_state], 1; jg 2; call _malloc_initialize; 2: */
494 if ( Dis.ModRM.Bits.Mod == 0
495 && Dis.ModRM.Bits.Rm == 5 /* wrt RIP */
496 && (Dis.Param2.fUse & (DISUSE_IMMEDIATE16_SX8 | DISUSE_IMMEDIATE32_SX8 | DISUSE_IMMEDIATE64_SX8))
497 && Dis.Param2.uValue == 1
498 && Dis.pCurInstr->uOpcode == OP_CMP)
499 {
500 cbCopy = offJmpBack;
501
502 offJmpBack += cbInstr;
503 rc = DISInstr((void *)((uintptr_t)pfnOrg + offJmpBack), enmCpuMode, &Dis, &cbInstr); AssertFatalRC(rc);
504 if ( Dis.pCurInstr->uOpcode == OP_JNBE
505 && Dis.Param1.uDisp.i8 == 5)
506 {
507 offJmpBack += cbInstr + 5;
508 AssertFatal(offJmpBack >= cbNeeded);
509 break;
510 }
511 }
512# endif
513 AssertFatal(!(Dis.ModRM.Bits.Mod == 0 && Dis.ModRM.Bits.Rm == 5 /* wrt RIP */));
514# endif
515 offJmpBack += cbInstr;
516 }
517 if (!cbCopy)
518 cbCopy = offJmpBack;
519
520 /* Assemble the jump back. */
521 memcpy(pb, (void *)(uintptr_t)pfnOrg, cbCopy);
522 uint32_t off = cbCopy;
523# ifdef RT_ARCH_AMD64
524 pb[off++] = 0xff; /* jmp qword [$+8 wrt RIP] */
525 pb[off++] = 0x25;
526 *(uint32_t *)&pb[off] = 0;
527 off += 4;
528 *(uint64_t *)&pb[off] = (uintptr_t)pfnOrg + offJmpBack;
529 off += 8;
530 off = RT_ALIGN_32(off, 16);
531# elif defined(RT_ARCH_X86)
532 pb[off++] = 0xe9; /* jmp rel32 */
533 *(uint32_t *)&pb[off] = (uintptr_t)pfnOrg + offJmpBack - (uintptr_t)&pb[4];
534 off += 4;
535 off = RT_ALIGN_32(off, 8);
536# else
537# error "Port me"
538# endif
539 *aApis[i].ppfnJumpBack = (PFNRT)(uintptr_t)pb;
540 pb += off;
541 }
542
543 /*
544 * Modify the APIs.
545 */
546 for (unsigned i = 0; i < RT_ELEMENTS(aApis); i++)
547 {
548 pb = (uint8_t *)(uintptr_t)aApis[i].pfnOrg;
549 rc = RTMemProtect(pb, 16, RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC); AssertFatalRC(rc);
550
551# ifdef RT_ARCH_AMD64
552 /* Assemble the LdrLoadDll patch. */
553 *pb++ = 0x48; /* mov rax, qword */
554 *pb++ = 0xb8;
555 *(uint64_t *)pb = (uintptr_t)aApis[i].pfnReplacement;
556 pb += 8;
557 *pb++ = 0xff; /* jmp rax */
558 *pb++ = 0xe0;
559# elif defined(RT_ARCH_X86)
560 *pb++ = 0xe9; /* jmp rel32 */
561 *(uint32_t *)pb = (uintptr_t)aApis[i].pfnReplacement - (uintptr_t)&pb[4];
562# else
563# error "Port me"
564# endif
565 }
566}
567
568#endif /* RTALLOC_REPLACE_MALLOC && RTALLOC_EFENCE_TRACE */
569
570
571/**
572 * Internal allocator.
573 */
574RTDECL(void *) rtR3MemAlloc(const char *pszOp, RTMEMTYPE enmType, size_t cbUnaligned, size_t cbAligned,
575 const char *pszTag, void *pvCaller, RT_SRC_POS_DECL)
576{
577 /*
578 * Sanity.
579 */
580 if ( RT_ALIGN_Z(RTALLOC_EFENCE_SIZE, PAGE_SIZE) != RTALLOC_EFENCE_SIZE
581 && RTALLOC_EFENCE_SIZE <= 0)
582 {
583 rtmemComplain(pszOp, "Invalid E-fence size! %#x\n", RTALLOC_EFENCE_SIZE);
584 return NULL;
585 }
586 if (!cbUnaligned)
587 {
588#if 0
589 rtmemComplain(pszOp, "Request of ZERO bytes allocation!\n");
590 return NULL;
591#else
592 cbAligned = cbUnaligned = 1;
593#endif
594 }
595
596#ifndef RTALLOC_EFENCE_IN_FRONT
597 /* Alignment decreases fence accuracy, but this is at least partially
598 * counteracted by filling and checking the alignment padding. When the
599 * fence is in front then then no extra alignment is needed. */
600 cbAligned = RT_ALIGN_Z(cbAligned, RTALLOC_EFENCE_ALIGNMENT);
601#endif
602
603#ifdef RTALLOC_EFENCE_TRACE
604 /*
605 * Allocate the trace block.
606 */
607 PRTMEMBLOCK pBlock = rtmemBlockCreate(enmType, cbUnaligned, cbAligned, pszTag, pvCaller, RT_SRC_POS_ARGS);
608 if (!pBlock)
609 {
610 rtmemComplain(pszOp, "Failed to allocate trace block!\n");
611 return NULL;
612 }
613#endif
614
615 /*
616 * Allocate a block with page alignment space + the size of the E-fence.
617 */
618 size_t cbBlock = RT_ALIGN_Z(cbAligned, PAGE_SIZE) + RTALLOC_EFENCE_SIZE;
619 void *pvBlock = RTMemPageAlloc(cbBlock);
620 if (pvBlock)
621 {
622 /*
623 * Calc the start of the fence and the user block
624 * and then change the page protection of the fence.
625 */
626#ifdef RTALLOC_EFENCE_IN_FRONT
627 void *pvEFence = pvBlock;
628 void *pv = (char *)pvEFence + RTALLOC_EFENCE_SIZE;
629# ifdef RTALLOC_EFENCE_NOMAN_FILLER
630 memset((char *)pv + cbUnaligned, RTALLOC_EFENCE_NOMAN_FILLER, cbBlock - RTALLOC_EFENCE_SIZE - cbUnaligned);
631# endif
632#else
633 void *pvEFence = (char *)pvBlock + (cbBlock - RTALLOC_EFENCE_SIZE);
634 void *pv = (char *)pvEFence - cbAligned;
635# ifdef RTALLOC_EFENCE_NOMAN_FILLER
636 memset(pvBlock, RTALLOC_EFENCE_NOMAN_FILLER, cbBlock - RTALLOC_EFENCE_SIZE - cbAligned);
637 memset((char *)pv + cbUnaligned, RTALLOC_EFENCE_NOMAN_FILLER, cbAligned - cbUnaligned);
638# endif
639#endif
640
641#ifdef RTALLOC_EFENCE_FENCE_FILLER
642 memset(pvEFence, RTALLOC_EFENCE_FENCE_FILLER, RTALLOC_EFENCE_SIZE);
643#endif
644 int rc = RTMemProtect(pvEFence, RTALLOC_EFENCE_SIZE, RTMEM_PROT_NONE);
645 if (!rc)
646 {
647#ifdef RTALLOC_EFENCE_TRACE
648 rtmemBlockInsert(pBlock, pv);
649#endif
650 if (enmType == RTMEMTYPE_RTMEMALLOCZ)
651 memset(pv, 0, cbUnaligned);
652#ifdef RTALLOC_EFENCE_FILLER
653 else
654 memset(pv, RTALLOC_EFENCE_FILLER, cbUnaligned);
655#endif
656
657 rtmemLog(pszOp, "returns %p (pvBlock=%p cbBlock=%#x pvEFence=%p cbUnaligned=%#x)\n", pv, pvBlock, cbBlock, pvEFence, cbUnaligned);
658 return pv;
659 }
660 rtmemComplain(pszOp, "RTMemProtect failed, pvEFence=%p size %d, rc=%d\n", pvEFence, RTALLOC_EFENCE_SIZE, rc);
661 RTMemPageFree(pvBlock, cbBlock);
662 }
663 else
664 rtmemComplain(pszOp, "Failed to allocated %lu (%lu) bytes.\n", (unsigned long)cbBlock, (unsigned long)cbUnaligned);
665
666#ifdef RTALLOC_EFENCE_TRACE
667 rtmemBlockFree(pBlock);
668#endif
669 return NULL;
670}
671
672
673/**
674 * Internal free.
675 */
676RTDECL(void) rtR3MemFree(const char *pszOp, RTMEMTYPE enmType, void *pv, size_t cbUser, void *pvCaller, RT_SRC_POS_DECL)
677{
678 NOREF(enmType); RT_SRC_POS_NOREF();
679
680 /*
681 * Simple case.
682 */
683 if (!pv)
684 return;
685
686 /*
687 * Check watch points.
688 */
689 for (unsigned i = 0; i < RT_ELEMENTS(gapvRTMemFreeWatch); i++)
690 if (gapvRTMemFreeWatch[i] == pv)
691 RTAssertDoPanic();
692
693#ifdef RTALLOC_EFENCE_TRACE
694 /*
695 * Find the block.
696 */
697 PRTMEMBLOCK pBlock = rtmemBlockRemove(pv);
698 if (pBlock)
699 {
700 if (gfRTMemFreeLog)
701 RTLogPrintf("RTMem %s: pv=%p pvCaller=%p cbUnaligned=%#x\n", pszOp, pv, pvCaller, pBlock->cbUnaligned);
702
703# ifdef RTALLOC_EFENCE_NOMAN_FILLER
704 /*
705 * Check whether the no man's land is untouched.
706 */
707# ifdef RTALLOC_EFENCE_IN_FRONT
708 void *pvWrong = ASMMemFirstMismatchingU8((char *)pv + pBlock->cbUnaligned,
709 RT_ALIGN_Z(pBlock->cbAligned, PAGE_SIZE) - pBlock->cbUnaligned,
710 RTALLOC_EFENCE_NOMAN_FILLER);
711# else
712 /* Alignment must match allocation alignment in rtMemAlloc(). */
713 void *pvWrong = ASMMemFirstMismatchingU8((char *)pv + pBlock->cbUnaligned,
714 pBlock->cbAligned - pBlock->cbUnaligned,
715 RTALLOC_EFENCE_NOMAN_FILLER);
716 if (pvWrong)
717 RTAssertDoPanic();
718 pvWrong = ASMMemFirstMismatchingU8((void *)((uintptr_t)pv & ~(uintptr_t)PAGE_OFFSET_MASK),
719 RT_ALIGN_Z(pBlock->cbAligned, PAGE_SIZE) - pBlock->cbAligned,
720 RTALLOC_EFENCE_NOMAN_FILLER);
721# endif
722 if (pvWrong)
723 RTAssertDoPanic();
724# endif
725
726 /*
727 * Fill the user part of the block.
728 */
729 AssertMsg(enmType != RTMEMTYPE_RTMEMFREEZ || cbUser == pBlock->cbUnaligned,
730 ("cbUser=%#zx cbUnaligned=%#zx\n", cbUser, pBlock->cbUnaligned));
731 RT_NOREF(cbUser);
732 if (enmType == RTMEMTYPE_RTMEMFREEZ)
733 RT_BZERO(pv, pBlock->cbUnaligned);
734# ifdef RTALLOC_EFENCE_FREE_FILL
735 else
736 memset(pv, RTALLOC_EFENCE_FREE_FILL, pBlock->cbUnaligned);
737# endif
738
739# if defined(RTALLOC_EFENCE_FREE_DELAYED) && RTALLOC_EFENCE_FREE_DELAYED > 0
740 /*
741 * We're doing delayed freeing.
742 * That means we'll expand the E-fence to cover the entire block.
743 */
744 int rc = RTMemProtect(pv, pBlock->cbAligned, RTMEM_PROT_NONE);
745 if (RT_SUCCESS(rc))
746 {
747 /*
748 * Insert it into the free list and process pending frees.
749 */
750 rtmemBlockDelayInsert(pBlock);
751 while ((pBlock = rtmemBlockDelayRemove()) != NULL)
752 {
753 pv = pBlock->Core.Key;
754# ifdef RTALLOC_EFENCE_IN_FRONT
755 void *pvBlock = (char *)pv - RTALLOC_EFENCE_SIZE;
756# else
757 void *pvBlock = (void *)((uintptr_t)pv & ~(uintptr_t)PAGE_OFFSET_MASK);
758# endif
759 size_t cbBlock = RT_ALIGN_Z(pBlock->cbAligned, PAGE_SIZE) + RTALLOC_EFENCE_SIZE;
760 rc = RTMemProtect(pvBlock, cbBlock, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
761 if (RT_SUCCESS(rc))
762 RTMemPageFree(pvBlock, RT_ALIGN_Z(pBlock->cbAligned, PAGE_SIZE) + RTALLOC_EFENCE_SIZE);
763 else
764 rtmemComplain(pszOp, "RTMemProtect(%p, %#x, RTMEM_PROT_READ | RTMEM_PROT_WRITE) -> %d\n", pvBlock, cbBlock, rc);
765 rtmemBlockFree(pBlock);
766 }
767 }
768 else
769 rtmemComplain(pszOp, "Failed to expand the efence of pv=%p cb=%d, rc=%d.\n", pv, pBlock, rc);
770
771# else /* !RTALLOC_EFENCE_FREE_DELAYED */
772
773 /*
774 * Turn of the E-fence and free it.
775 */
776# ifdef RTALLOC_EFENCE_IN_FRONT
777 void *pvBlock = (char *)pv - RTALLOC_EFENCE_SIZE;
778 void *pvEFence = pvBlock;
779# else
780 void *pvBlock = (void *)((uintptr_t)pv & ~(uintptr_t)PAGE_OFFSET_MASK);
781 void *pvEFence = (char *)pv + pBlock->cb;
782# endif
783 int rc = RTMemProtect(pvEFence, RTALLOC_EFENCE_SIZE, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
784 if (RT_SUCCESS(rc))
785 RTMemPageFree(pvBlock, RT_ALIGN_Z(pBlock->cbAligned, PAGE_SIZE) + RTALLOC_EFENCE_SIZE);
786 else
787 rtmemComplain(pszOp, "RTMemProtect(%p, %#x, RTMEM_PROT_READ | RTMEM_PROT_WRITE) -> %d\n", pvEFence, RTALLOC_EFENCE_SIZE, rc);
788 rtmemBlockFree(pBlock);
789
790# endif /* !RTALLOC_EFENCE_FREE_DELAYED */
791 }
792 else
793 rtmemComplain(pszOp, "pv=%p not found! Incorrect free!\n", pv);
794
795#else /* !RTALLOC_EFENCE_TRACE */
796
797 /*
798 * We have no size tracking, so we're not doing any freeing because
799 * we cannot if the E-fence is after the block.
800 * Let's just expand the E-fence to the first page of the user bit
801 * since we know that it's around.
802 */
803 if (enmType == RTMEMTYPE_RTMEMFREEZ)
804 RT_BZERO(pv, cbUser);
805 int rc = RTMemProtect((void *)((uintptr_t)pv & ~(uintptr_t)PAGE_OFFSET_MASK), PAGE_SIZE, RTMEM_PROT_NONE);
806 if (RT_FAILURE(rc))
807 rtmemComplain(pszOp, "RTMemProtect(%p, PAGE_SIZE, RTMEM_PROT_NONE) -> %d\n", (void *)((uintptr_t)pv & ~(uintptr_t)PAGE_OFFSET_MASK), rc);
808#endif /* !RTALLOC_EFENCE_TRACE */
809}
810
811
812/**
813 * Internal realloc.
814 */
815RTDECL(void *) rtR3MemRealloc(const char *pszOp, RTMEMTYPE enmType, void *pvOld, size_t cbNew,
816 const char *pszTag, void *pvCaller, RT_SRC_POS_DECL)
817{
818 /*
819 * Allocate new and copy.
820 */
821 if (!pvOld)
822 return rtR3MemAlloc(pszOp, enmType, cbNew, cbNew, pszTag, pvCaller, RT_SRC_POS_ARGS);
823 if (!cbNew)
824 {
825 rtR3MemFree(pszOp, RTMEMTYPE_RTMEMREALLOC, pvOld, 0, pvCaller, RT_SRC_POS_ARGS);
826 return NULL;
827 }
828
829#ifdef RTALLOC_EFENCE_TRACE
830
831 /*
832 * Get the block, allocate the new, copy the data, free the old one.
833 */
834 PRTMEMBLOCK pBlock = rtmemBlockGet(pvOld);
835 if (pBlock)
836 {
837 void *pvRet = rtR3MemAlloc(pszOp, enmType, cbNew, cbNew, pszTag, pvCaller, RT_SRC_POS_ARGS);
838 if (pvRet)
839 {
840 memcpy(pvRet, pvOld, RT_MIN(cbNew, pBlock->cbUnaligned));
841 rtR3MemFree(pszOp, RTMEMTYPE_RTMEMREALLOC, pvOld, 0, pvCaller, RT_SRC_POS_ARGS);
842 }
843 return pvRet;
844 }
845 else
846 rtmemComplain(pszOp, "pvOld=%p was not found!\n", pvOld);
847 return NULL;
848
849#else /* !RTALLOC_EFENCE_TRACE */
850
851 rtmemComplain(pszOp, "Not supported if RTALLOC_EFENCE_TRACE isn't defined!\n");
852 return NULL;
853
854#endif /* !RTALLOC_EFENCE_TRACE */
855}
856
857
858
859
860RTDECL(void *) RTMemEfTmpAlloc(size_t cb, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW_DEF
861{
862 return rtR3MemAlloc("TmpAlloc", RTMEMTYPE_RTMEMALLOC, cb, cb, pszTag, ASMReturnAddress(), RT_SRC_POS_ARGS);
863}
864
865
866RTDECL(void *) RTMemEfTmpAllocZ(size_t cb, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW_DEF
867{
868 return rtR3MemAlloc("TmpAlloc", RTMEMTYPE_RTMEMALLOCZ, cb, cb, pszTag, ASMReturnAddress(), RT_SRC_POS_ARGS);
869}
870
871
872RTDECL(void) RTMemEfTmpFree(void *pv, RT_SRC_POS_DECL) RT_NO_THROW_DEF
873{
874 if (pv)
875 rtR3MemFree("Free", RTMEMTYPE_RTMEMFREE, pv, 0, ASMReturnAddress(), RT_SRC_POS_ARGS);
876}
877
878
879RTDECL(void) RTMemEfTmpFreeZ(void *pv, size_t cb, RT_SRC_POS_DECL) RT_NO_THROW_DEF
880{
881 if (pv)
882 rtR3MemFree("FreeZ", RTMEMTYPE_RTMEMFREEZ, pv, cb, ASMReturnAddress(), RT_SRC_POS_ARGS);
883}
884
885
886RTDECL(void *) RTMemEfAlloc(size_t cb, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW_DEF
887{
888 return rtR3MemAlloc("Alloc", RTMEMTYPE_RTMEMALLOC, cb, cb, pszTag, ASMReturnAddress(), RT_SRC_POS_ARGS);
889}
890
891
892RTDECL(void *) RTMemEfAllocZ(size_t cb, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW_DEF
893{
894 return rtR3MemAlloc("AllocZ", RTMEMTYPE_RTMEMALLOCZ, cb, cb, pszTag, ASMReturnAddress(), RT_SRC_POS_ARGS);
895}
896
897
898RTDECL(void *) RTMemEfAllocVar(size_t cbUnaligned, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW_DEF
899{
900 size_t cbAligned;
901 if (cbUnaligned >= 16)
902 cbAligned = RT_ALIGN_Z(cbUnaligned, 16);
903 else
904 cbAligned = RT_ALIGN_Z(cbUnaligned, sizeof(void *));
905 return rtR3MemAlloc("Alloc", RTMEMTYPE_RTMEMALLOC, cbUnaligned, cbAligned, pszTag, ASMReturnAddress(), RT_SRC_POS_ARGS);
906}
907
908
909RTDECL(void *) RTMemEfAllocZVar(size_t cbUnaligned, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW_DEF
910{
911 size_t cbAligned;
912 if (cbUnaligned >= 16)
913 cbAligned = RT_ALIGN_Z(cbUnaligned, 16);
914 else
915 cbAligned = RT_ALIGN_Z(cbUnaligned, sizeof(void *));
916 return rtR3MemAlloc("AllocZ", RTMEMTYPE_RTMEMALLOCZ, cbUnaligned, cbAligned, pszTag, ASMReturnAddress(), RT_SRC_POS_ARGS);
917}
918
919
920RTDECL(void *) RTMemEfRealloc(void *pvOld, size_t cbNew, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW_DEF
921{
922 return rtR3MemRealloc("Realloc", RTMEMTYPE_RTMEMREALLOC, pvOld, cbNew, pszTag, ASMReturnAddress(), RT_SRC_POS_ARGS);
923}
924
925
926RTDECL(void *) RTMemEfReallocZ(void *pvOld, size_t cbOld, size_t cbNew, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW_DEF
927{
928 void *pvDst = rtR3MemRealloc("ReallocZ", RTMEMTYPE_RTMEMREALLOC, pvOld, cbNew, pszTag, ASMReturnAddress(), RT_SRC_POS_ARGS);
929 if (pvDst && cbNew > cbOld)
930 memset((uint8_t *)pvDst + cbOld, 0, cbNew - cbOld);
931 return pvDst;
932}
933
934
935RTDECL(void) RTMemEfFree(void *pv, RT_SRC_POS_DECL) RT_NO_THROW_DEF
936{
937 if (pv)
938 rtR3MemFree("Free", RTMEMTYPE_RTMEMFREE, pv, 0, ASMReturnAddress(), RT_SRC_POS_ARGS);
939}
940
941
942RTDECL(void) RTMemEfFreeZ(void *pv, size_t cb, RT_SRC_POS_DECL) RT_NO_THROW_DEF
943{
944 if (pv)
945 rtR3MemFree("FreeZ", RTMEMTYPE_RTMEMFREEZ, pv, cb, ASMReturnAddress(), RT_SRC_POS_ARGS);
946}
947
948
949RTDECL(void *) RTMemEfDup(const void *pvSrc, size_t cb, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW_DEF
950{
951 void *pvDst = RTMemEfAlloc(cb, pszTag, RT_SRC_POS_ARGS);
952 if (pvDst)
953 memcpy(pvDst, pvSrc, cb);
954 return pvDst;
955}
956
957
958RTDECL(void *) RTMemEfDupEx(const void *pvSrc, size_t cbSrc, size_t cbExtra, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW_DEF
959{
960 void *pvDst = RTMemEfAlloc(cbSrc + cbExtra, pszTag, RT_SRC_POS_ARGS);
961 if (pvDst)
962 {
963 memcpy(pvDst, pvSrc, cbSrc);
964 memset((uint8_t *)pvDst + cbSrc, 0, cbExtra);
965 }
966 return pvDst;
967}
968
969
970
971
972/*
973 *
974 * The NP (no position) versions.
975 *
976 */
977
978
979
980RTDECL(void *) RTMemEfTmpAllocNP(size_t cb, const char *pszTag) RT_NO_THROW_DEF
981{
982 return rtR3MemAlloc("TmpAlloc", RTMEMTYPE_RTMEMALLOC, cb, cb, pszTag, ASMReturnAddress(), NULL, 0, NULL);
983}
984
985
986RTDECL(void *) RTMemEfTmpAllocZNP(size_t cb, const char *pszTag) RT_NO_THROW_DEF
987{
988 return rtR3MemAlloc("TmpAllocZ", RTMEMTYPE_RTMEMALLOCZ, cb, cb, pszTag, ASMReturnAddress(), NULL, 0, NULL);
989}
990
991
992RTDECL(void) RTMemEfTmpFreeNP(void *pv) RT_NO_THROW_DEF
993{
994 if (pv)
995 rtR3MemFree("Free", RTMEMTYPE_RTMEMFREE, pv, 0, ASMReturnAddress(), NULL, 0, NULL);
996}
997
998
999RTDECL(void) RTMemEfTmpFreeZNP(void *pv, size_t cb) RT_NO_THROW_DEF
1000{
1001 if (pv)
1002 rtR3MemFree("FreeZ", RTMEMTYPE_RTMEMFREEZ, pv, cb, ASMReturnAddress(), NULL, 0, NULL);
1003}
1004
1005
1006RTDECL(void *) RTMemEfAllocNP(size_t cb, const char *pszTag) RT_NO_THROW_DEF
1007{
1008 return rtR3MemAlloc("Alloc", RTMEMTYPE_RTMEMALLOC, cb, cb, pszTag, ASMReturnAddress(), NULL, 0, NULL);
1009}
1010
1011
1012RTDECL(void *) RTMemEfAllocZNP(size_t cb, const char *pszTag) RT_NO_THROW_DEF
1013{
1014 return rtR3MemAlloc("AllocZ", RTMEMTYPE_RTMEMALLOCZ, cb, cb, pszTag, ASMReturnAddress(), NULL, 0, NULL);
1015}
1016
1017
1018RTDECL(void *) RTMemEfAllocVarNP(size_t cbUnaligned, const char *pszTag) RT_NO_THROW_DEF
1019{
1020 size_t cbAligned;
1021 if (cbUnaligned >= 16)
1022 cbAligned = RT_ALIGN_Z(cbUnaligned, 16);
1023 else
1024 cbAligned = RT_ALIGN_Z(cbUnaligned, sizeof(void *));
1025 return rtR3MemAlloc("Alloc", RTMEMTYPE_RTMEMALLOC, cbUnaligned, cbAligned, pszTag, ASMReturnAddress(), NULL, 0, NULL);
1026}
1027
1028
1029RTDECL(void *) RTMemEfAllocZVarNP(size_t cbUnaligned, const char *pszTag) RT_NO_THROW_DEF
1030{
1031 size_t cbAligned;
1032 if (cbUnaligned >= 16)
1033 cbAligned = RT_ALIGN_Z(cbUnaligned, 16);
1034 else
1035 cbAligned = RT_ALIGN_Z(cbUnaligned, sizeof(void *));
1036 return rtR3MemAlloc("AllocZ", RTMEMTYPE_RTMEMALLOCZ, cbUnaligned, cbAligned, pszTag, ASMReturnAddress(), NULL, 0, NULL);
1037}
1038
1039
1040RTDECL(void *) RTMemEfReallocNP(void *pvOld, size_t cbNew, const char *pszTag) RT_NO_THROW_DEF
1041{
1042 return rtR3MemRealloc("Realloc", RTMEMTYPE_RTMEMREALLOC, pvOld, cbNew, pszTag, ASMReturnAddress(), NULL, 0, NULL);
1043}
1044
1045
1046RTDECL(void *) RTMemEfReallocZNP(void *pvOld, size_t cbOld, size_t cbNew, const char *pszTag) RT_NO_THROW_DEF
1047{
1048 void *pvDst = rtR3MemRealloc("ReallocZ", RTMEMTYPE_RTMEMREALLOC, pvOld, cbNew, pszTag, ASMReturnAddress(), NULL, 0, NULL);
1049 if (pvDst && cbNew > cbOld)
1050 memset((uint8_t *)pvDst + cbOld, 0, cbNew - cbOld);
1051 return pvDst;
1052}
1053
1054
1055RTDECL(void) RTMemEfFreeNP(void *pv) RT_NO_THROW_DEF
1056{
1057 if (pv)
1058 rtR3MemFree("Free", RTMEMTYPE_RTMEMFREE, pv, 0, ASMReturnAddress(), NULL, 0, NULL);
1059}
1060
1061
1062RTDECL(void) RTMemEfFreeZNP(void *pv, size_t cb) RT_NO_THROW_DEF
1063{
1064 if (pv)
1065 rtR3MemFree("Free", RTMEMTYPE_RTMEMFREEZ, pv, cb, ASMReturnAddress(), NULL, 0, NULL);
1066}
1067
1068
1069RTDECL(void *) RTMemEfDupNP(const void *pvSrc, size_t cb, const char *pszTag) RT_NO_THROW_DEF
1070{
1071 void *pvDst = RTMemEfAlloc(cb, pszTag, NULL, 0, NULL);
1072 if (pvDst)
1073 memcpy(pvDst, pvSrc, cb);
1074 return pvDst;
1075}
1076
1077
1078RTDECL(void *) RTMemEfDupExNP(const void *pvSrc, size_t cbSrc, size_t cbExtra, const char *pszTag) RT_NO_THROW_DEF
1079{
1080 void *pvDst = RTMemEfAlloc(cbSrc + cbExtra, pszTag, NULL, 0, NULL);
1081 if (pvDst)
1082 {
1083 memcpy(pvDst, pvSrc, cbSrc);
1084 memset((uint8_t *)pvDst + cbSrc, 0, cbExtra);
1085 }
1086 return pvDst;
1087}
1088
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette