VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/alloc-ef-r0drv.cpp@ 103005

Last change on this file since 103005 was 103005, checked in by vboxsync, 10 months ago

iprt/asm.h,*: Split out the ASMMem* and related stuff into a separate header, asm-mem.h, so that we can get the RT_ASM_PAGE_SIZE stuff out of the way.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 31.3 KB
Line 
1/* $Id: alloc-ef-r0drv.cpp 103005 2024-01-23 23:55:58Z vboxsync $ */
2/** @file
3 * IPRT - Memory Allocation, electric fence for ring-0 drivers.
4 */
5
6/*
7 * Copyright (C) 2006-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * The contents of this file may alternatively be used under the terms
26 * of the Common Development and Distribution License Version 1.0
27 * (CDDL), a copy of it is provided in the "COPYING.CDDL" file included
28 * in the VirtualBox distribution, in which case the provisions of the
29 * CDDL are applicable instead of those of the GPL.
30 *
31 * You may elect to license modified versions of this file under the
32 * terms and conditions of either the GPL or the CDDL or both.
33 *
34 * SPDX-License-Identifier: GPL-3.0-only OR CDDL-1.0
35 */
36
37
38/*********************************************************************************************************************************
39* Header Files *
40*********************************************************************************************************************************/
41#define RTMEM_NO_WRAP_TO_EF_APIS
42#include "internal/iprt.h"
43#include <iprt/mem.h>
44
45#include <iprt/alloc.h>
46#include <iprt/asm-mem.h>
47#include <iprt/asm.h>
48#include <iprt/asm-amd64-x86.h>
49#include <iprt/assert.h>
50#include <iprt/errcore.h>
51#include <iprt/log.h>
52#include <iprt/memobj.h>
53#include <iprt/param.h>
54#include <iprt/string.h>
55#include <iprt/thread.h>
56
57#include "internal/mem.h"
58
59
60/*********************************************************************************************************************************
61* Defined Constants And Macros *
62*********************************************************************************************************************************/
63#if defined(DOXYGEN_RUNNING)
64# define RTR0MEM_EF_IN_FRONT
65#endif
66
67/** @def RTR0MEM_EF_SIZE
68 * The size of the fence. This must be page aligned.
69 */
70#define RTR0MEM_EF_SIZE PAGE_SIZE
71
72/** @def RTR0MEM_EF_ALIGNMENT
73 * The allocation alignment, power of two of course.
74 *
75 * Use this for working around misaligned sizes, usually stemming from
76 * allocating a string or something after the main structure. When you
77 * encounter this, please fix the allocation to RTMemAllocVar or RTMemAllocZVar.
78 */
79#if 0
80# define RTR0MEM_EF_ALIGNMENT (ARCH_BITS / 8)
81#else
82# define RTR0MEM_EF_ALIGNMENT 1
83#endif
84
85/** @def RTR0MEM_EF_IN_FRONT
86 * Define this to put the fence up in front of the block.
87 * The default (when this isn't defined) is to up it up after the block.
88 */
89//# define RTR0MEM_EF_IN_FRONT
90
91/** @def RTR0MEM_EF_FREE_DELAYED
92 * This define will enable free() delay and protection of the freed data
93 * while it's being delayed. The value of RTR0MEM_EF_FREE_DELAYED defines
94 * the threshold of the delayed blocks.
95 * Delayed blocks does not consume any physical memory, only virtual address space.
96 */
97#define RTR0MEM_EF_FREE_DELAYED (20 * _1M)
98
99/** @def RTR0MEM_EF_FREE_FILL
100 * This define will enable memset(,RTR0MEM_EF_FREE_FILL,)'ing the user memory
101 * in the block before freeing/decommitting it. This is useful in GDB since GDB
102 * appears to be able to read the content of the page even after it's been
103 * decommitted.
104 */
105#define RTR0MEM_EF_FREE_FILL 'f'
106
107/** @def RTR0MEM_EF_FILLER
108 * This define will enable memset(,RTR0MEM_EF_FILLER,)'ing the allocated
109 * memory when the API doesn't require it to be zero'd.
110 */
111#define RTR0MEM_EF_FILLER 0xef
112
113/** @def RTR0MEM_EF_NOMAN_FILLER
114 * This define will enable memset(,RTR0MEM_EF_NOMAN_FILLER,)'ing the
115 * unprotected but not allocated area of memory, the so called no man's land.
116 */
117#define RTR0MEM_EF_NOMAN_FILLER 0xaa
118
119/** @def RTR0MEM_EF_FENCE_FILLER
120 * This define will enable memset(,RTR0MEM_EF_FENCE_FILLER,)'ing the
121 * fence itself, as debuggers can usually read them.
122 */
123#define RTR0MEM_EF_FENCE_FILLER 0xcc
124
125
126/*********************************************************************************************************************************
127* Header Files *
128*********************************************************************************************************************************/
129#ifdef RT_OS_WINDOWS
130# include <iprt/win/windows.h>
131#elif !defined(RT_OS_FREEBSD)
132# include <sys/mman.h>
133#endif
134#include <iprt/avl.h>
135#include <iprt/thread.h>
136
137
138/*********************************************************************************************************************************
139* Structures and Typedefs *
140*********************************************************************************************************************************/
141/**
142 * Allocation types.
143 */
144typedef enum RTMEMTYPE
145{
146 RTMEMTYPE_RTMEMALLOC,
147 RTMEMTYPE_RTMEMALLOCZ,
148 RTMEMTYPE_RTMEMREALLOC,
149 RTMEMTYPE_RTMEMFREE,
150 RTMEMTYPE_RTMEMFREEZ,
151
152 RTMEMTYPE_NEW,
153 RTMEMTYPE_NEW_ARRAY,
154 RTMEMTYPE_DELETE,
155 RTMEMTYPE_DELETE_ARRAY
156} RTMEMTYPE;
157
158/**
159 * Node tracking a memory allocation.
160 */
161typedef struct RTR0MEMEFBLOCK
162{
163 /** Avl node code, key is the user block pointer. */
164 AVLPVNODECORE Core;
165 /** Allocation type. */
166 RTMEMTYPE enmType;
167 /** The memory object. */
168 RTR0MEMOBJ hMemObj;
169 /** The unaligned size of the block. */
170 size_t cbUnaligned;
171 /** The aligned size of the block. */
172 size_t cbAligned;
173 /** The allocation tag (read-only string). */
174 const char *pszTag;
175 /** The return address of the allocator function. */
176 void *pvCaller;
177 /** Line number of the alloc call. */
178 unsigned iLine;
179 /** File from within the allocation was made. */
180 const char *pszFile;
181 /** Function from within the allocation was made. */
182 const char *pszFunction;
183} RTR0MEMEFBLOCK, *PRTR0MEMEFBLOCK;
184
185
186
187/*********************************************************************************************************************************
188* Global Variables *
189*********************************************************************************************************************************/
190/** Spinlock protecting the all the block's globals. */
191static volatile uint32_t g_BlocksLock;
192/** Tree tracking the allocations. */
193static AVLPVTREE g_BlocksTree;
194
195#ifdef RTR0MEM_EF_FREE_DELAYED
196/** Tail of the delayed blocks. */
197static volatile PRTR0MEMEFBLOCK g_pBlocksDelayHead;
198/** Tail of the delayed blocks. */
199static volatile PRTR0MEMEFBLOCK g_pBlocksDelayTail;
200/** Number of bytes in the delay list (includes fences). */
201static volatile size_t g_cbBlocksDelay;
202#endif /* RTR0MEM_EF_FREE_DELAYED */
203
204/** Array of pointers free watches for. */
205void *gapvRTMemFreeWatch[4] = {NULL, NULL, NULL, NULL};
206/** Enable logging of all freed memory. */
207bool gfRTMemFreeLog = false;
208
209
210/*********************************************************************************************************************************
211* Internal Functions *
212*********************************************************************************************************************************/
213
214
215/**
216 * @callback_method_impl{FNRTSTROUTPUT}
217 */
218static DECLCALLBACK(size_t) rtR0MemEfWrite(void *pvArg, const char *pachChars, size_t cbChars)
219{
220 RT_NOREF1(pvArg);
221 if (cbChars)
222 {
223 RTLogWriteDebugger(pachChars, cbChars);
224 RTLogWriteStdOut(pachChars, cbChars);
225 RTLogWriteUser(pachChars, cbChars);
226 }
227 return cbChars;
228}
229
230
231/**
232 * Complains about something.
233 */
234static void rtR0MemComplain(const char *pszOp, const char *pszFormat, ...)
235{
236 va_list args;
237 RTStrFormat(rtR0MemEfWrite, NULL, NULL, NULL, "RTMem error: %s: ", pszOp);
238 va_start(args, pszFormat);
239 RTStrFormatV(rtR0MemEfWrite, NULL, NULL, NULL, pszFormat, args);
240 va_end(args);
241 RTAssertDoPanic();
242}
243
244/**
245 * Log an event.
246 */
247DECLINLINE(void) rtR0MemLog(const char *pszOp, const char *pszFormat, ...)
248{
249#if 0
250 va_list args;
251 RTStrFormat(rtR0MemEfWrite, NULL, NULL, NULL, "RTMem info: %s: ", pszOp);
252 va_start(args, pszFormat);
253 RTStrFormatV(rtR0MemEfWrite, NULL, NULL, NULL, pszFormat, args);
254 va_end(args);
255#else
256 NOREF(pszOp); NOREF(pszFormat);
257#endif
258}
259
260
261
262/**
263 * Acquires the lock.
264 */
265DECLINLINE(RTCCUINTREG) rtR0MemBlockLock(void)
266{
267 RTCCUINTREG uRet;
268 unsigned c = 0;
269 if (RTThreadPreemptIsEnabled(NIL_RTTHREAD))
270 {
271 for (;;)
272 {
273 uRet = ASMIntDisableFlags();
274 if (ASMAtomicCmpXchgU32(&g_BlocksLock, 1, 0))
275 break;
276 ASMSetFlags(uRet);
277 RTThreadSleepNoLog(((++c) >> 2) & 31);
278 }
279 }
280 else
281 {
282 for (;;)
283 {
284 uRet = ASMIntDisableFlags();
285 if (ASMAtomicCmpXchgU32(&g_BlocksLock, 1, 0))
286 break;
287 ASMSetFlags(uRet);
288 ASMNopPause();
289 if (++c & 3)
290 ASMNopPause();
291 }
292 }
293 return uRet;
294}
295
296
297/**
298 * Releases the lock.
299 */
300DECLINLINE(void) rtR0MemBlockUnlock(RTCCUINTREG fSavedIntFlags)
301{
302 Assert(g_BlocksLock == 1);
303 ASMAtomicXchgU32(&g_BlocksLock, 0);
304 ASMSetFlags(fSavedIntFlags);
305}
306
307
308/**
309 * Creates a block.
310 */
311DECLINLINE(PRTR0MEMEFBLOCK) rtR0MemBlockCreate(RTMEMTYPE enmType, size_t cbUnaligned, size_t cbAligned,
312 const char *pszTag, void *pvCaller, RT_SRC_POS_DECL)
313{
314 PRTR0MEMEFBLOCK pBlock = (PRTR0MEMEFBLOCK)RTMemAlloc(sizeof(*pBlock));
315 if (pBlock)
316 {
317 pBlock->enmType = enmType;
318 pBlock->cbUnaligned = cbUnaligned;
319 pBlock->cbAligned = cbAligned;
320 pBlock->pszTag = pszTag;
321 pBlock->pvCaller = pvCaller;
322 pBlock->iLine = iLine;
323 pBlock->pszFile = pszFile;
324 pBlock->pszFunction = pszFunction;
325 }
326 return pBlock;
327}
328
329
330/**
331 * Frees a block.
332 */
333DECLINLINE(void) rtR0MemBlockFree(PRTR0MEMEFBLOCK pBlock)
334{
335 RTMemFree(pBlock);
336}
337
338
339/**
340 * Insert a block from the tree.
341 */
342DECLINLINE(void) rtR0MemBlockInsert(PRTR0MEMEFBLOCK pBlock, void *pv, RTR0MEMOBJ hMemObj)
343{
344 pBlock->Core.Key = pv;
345 pBlock->hMemObj = hMemObj;
346 RTCCUINTREG fSavedIntFlags = rtR0MemBlockLock();
347 bool fRc = RTAvlPVInsert(&g_BlocksTree, &pBlock->Core);
348 rtR0MemBlockUnlock(fSavedIntFlags);
349 AssertRelease(fRc);
350}
351
352
353/**
354 * Remove a block from the tree and returns it to the caller.
355 */
356DECLINLINE(PRTR0MEMEFBLOCK) rtR0MemBlockRemove(void *pv)
357{
358 RTCCUINTREG fSavedIntFlags = rtR0MemBlockLock();
359 PRTR0MEMEFBLOCK pBlock = (PRTR0MEMEFBLOCK)RTAvlPVRemove(&g_BlocksTree, pv);
360 rtR0MemBlockUnlock(fSavedIntFlags);
361 return pBlock;
362}
363
364
365/**
366 * Gets a block.
367 */
368DECLINLINE(PRTR0MEMEFBLOCK) rtR0MemBlockGet(void *pv)
369{
370 RTCCUINTREG fSavedIntFlags = rtR0MemBlockLock();
371 PRTR0MEMEFBLOCK pBlock = (PRTR0MEMEFBLOCK)RTAvlPVGet(&g_BlocksTree, pv);
372 rtR0MemBlockUnlock(fSavedIntFlags);
373 return pBlock;
374}
375
376
377/**
378 * Dumps one allocation.
379 */
380static DECLCALLBACK(int) RTMemDumpOne(PAVLPVNODECORE pNode, void *pvUser)
381{
382 PRTR0MEMEFBLOCK pBlock = (PRTR0MEMEFBLOCK)pNode;
383 RTStrFormat(rtR0MemEfWrite, NULL, NULL, NULL, "%p %08lx(+%02lx) %p\n",
384 pBlock->Core.Key,
385 (unsigned long)pBlock->cbUnaligned,
386 (unsigned long)(pBlock->cbAligned - pBlock->cbUnaligned),
387 pBlock->pvCaller);
388 NOREF(pvUser);
389 return 0;
390}
391
392
393/**
394 * Dumps the allocated blocks.
395 * This is something which you should call from gdb.
396 */
397RT_C_DECLS_BEGIN
398void RTMemDump(void);
399RT_C_DECLS_END
400
401void RTMemDump(void)
402{
403 RTStrFormat(rtR0MemEfWrite, NULL, NULL, NULL, "address size(alg) caller\n");
404 RTAvlPVDoWithAll(&g_BlocksTree, true, RTMemDumpOne, NULL);
405}
406
407#ifdef RTR0MEM_EF_FREE_DELAYED
408
409/**
410 * Insert a delayed block.
411 */
412DECLINLINE(void) rtR0MemBlockDelayInsert(PRTR0MEMEFBLOCK pBlock)
413{
414 size_t cbBlock = RT_ALIGN_Z(pBlock->cbAligned, PAGE_SIZE) + RTR0MEM_EF_SIZE;
415 pBlock->Core.pRight = NULL;
416 pBlock->Core.pLeft = NULL;
417 RTCCUINTREG fSavedIntFlags = rtR0MemBlockLock();
418 if (g_pBlocksDelayHead)
419 {
420 g_pBlocksDelayHead->Core.pLeft = (PAVLPVNODECORE)pBlock;
421 pBlock->Core.pRight = (PAVLPVNODECORE)g_pBlocksDelayHead;
422 g_pBlocksDelayHead = pBlock;
423 }
424 else
425 {
426 g_pBlocksDelayTail = pBlock;
427 g_pBlocksDelayHead = pBlock;
428 }
429 g_cbBlocksDelay += cbBlock;
430 rtR0MemBlockUnlock(fSavedIntFlags);
431}
432
433/**
434 * Removes a delayed block.
435 */
436DECLINLINE(PRTR0MEMEFBLOCK) rtR0MemBlockDelayRemove(void)
437{
438 PRTR0MEMEFBLOCK pBlock = NULL;
439 RTCCUINTREG fSavedIntFlags = rtR0MemBlockLock();
440 if (g_cbBlocksDelay > RTR0MEM_EF_FREE_DELAYED)
441 {
442 pBlock = g_pBlocksDelayTail;
443 if (pBlock)
444 {
445 g_pBlocksDelayTail = (PRTR0MEMEFBLOCK)pBlock->Core.pLeft;
446 if (pBlock->Core.pLeft)
447 pBlock->Core.pLeft->pRight = NULL;
448 else
449 g_pBlocksDelayHead = NULL;
450 g_cbBlocksDelay -= RT_ALIGN_Z(pBlock->cbAligned, PAGE_SIZE) + RTR0MEM_EF_SIZE;
451 }
452 }
453 rtR0MemBlockUnlock(fSavedIntFlags);
454 return pBlock;
455}
456
457#endif /* RTR0MEM_EF_FREE_DELAYED */
458
459
460static void rtR0MemFreeBlock(PRTR0MEMEFBLOCK pBlock, const char *pszOp)
461{
462 void *pv = pBlock->Core.Key;
463# ifdef RTR0MEM_EF_IN_FRONT
464 void *pvBlock = (char *)pv - RTR0MEM_EF_SIZE;
465# else
466 void *pvBlock = (void *)((uintptr_t)pv & ~(uintptr_t)PAGE_OFFSET_MASK);
467# endif
468 size_t cbBlock = RT_ALIGN_Z(pBlock->cbAligned, PAGE_SIZE) + RTR0MEM_EF_SIZE;
469
470 int rc = RTR0MemObjProtect(pBlock->hMemObj, 0 /*offSub*/, RT_ALIGN_Z(cbBlock, PAGE_SIZE), RTMEM_PROT_READ | RTMEM_PROT_WRITE);
471 if (RT_FAILURE(rc))
472 rtR0MemComplain(pszOp, "RTR0MemObjProtect([%p], 0, %#x, RTMEM_PROT_READ | RTMEM_PROT_WRITE) -> %Rrc\n",
473 pvBlock, cbBlock, rc);
474
475 rc = RTR0MemObjFree(pBlock->hMemObj, true /*fFreeMappings*/);
476 if (RT_FAILURE(rc))
477 rtR0MemComplain(pszOp, "RTR0MemObjFree([%p LB %#x]) -> %Rrc\n", pvBlock, cbBlock, rc);
478 pBlock->hMemObj = NIL_RTR0MEMOBJ;
479
480 rtR0MemBlockFree(pBlock);
481}
482
483
484/**
485 * Initialize call, we shouldn't fail here.
486 */
487void rtR0MemEfInit(void)
488{
489
490}
491
492/**
493 * @callback_method_impl{AVLPVCALLBACK}
494 */
495static DECLCALLBACK(int) rtR0MemEfDestroyBlock(PAVLPVNODECORE pNode, void *pvUser)
496{
497 PRTR0MEMEFBLOCK pBlock = (PRTR0MEMEFBLOCK)pNode;
498
499 /* Note! pszFile and pszFunction may be invalid at this point. */
500 rtR0MemComplain("rtR0MemEfDestroyBlock", "Leaking %zu bytes at %p (iLine=%u pvCaller=%p)\n",
501 pBlock->cbAligned, pBlock->Core.Key, pBlock->iLine, pBlock->pvCaller);
502
503 rtR0MemFreeBlock(pBlock, "rtR0MemEfDestroyBlock");
504
505 NOREF(pvUser);
506 return VINF_SUCCESS;
507}
508
509
510/**
511 * Termination call.
512 *
513 * Will check and free memory.
514 */
515void rtR0MemEfTerm(void)
516{
517#ifdef RTR0MEM_EF_FREE_DELAYED
518 /*
519 * Release delayed frees.
520 */
521 RTCCUINTREG fSavedIntFlags = rtR0MemBlockLock();
522 for (;;)
523 {
524 PRTR0MEMEFBLOCK pBlock = g_pBlocksDelayTail;
525 if (pBlock)
526 {
527 g_pBlocksDelayTail = (PRTR0MEMEFBLOCK)pBlock->Core.pLeft;
528 if (pBlock->Core.pLeft)
529 pBlock->Core.pLeft->pRight = NULL;
530 else
531 g_pBlocksDelayHead = NULL;
532 rtR0MemBlockUnlock(fSavedIntFlags);
533
534 rtR0MemFreeBlock(pBlock, "rtR0MemEfTerm");
535
536 rtR0MemBlockLock();
537 }
538 else
539 break;
540 }
541 g_cbBlocksDelay = 0;
542 rtR0MemBlockUnlock(fSavedIntFlags);
543#endif
544
545 /*
546 * Complain about leaks. Then release them.
547 */
548 RTAvlPVDestroy(&g_BlocksTree, rtR0MemEfDestroyBlock, NULL);
549}
550
551
552/**
553 * Internal allocator.
554 */
555static void * rtR0MemAlloc(const char *pszOp, RTMEMTYPE enmType, size_t cbUnaligned, size_t cbAligned,
556 const char *pszTag, void *pvCaller, RT_SRC_POS_DECL)
557{
558 /*
559 * Sanity.
560 */
561 if ( RT_ALIGN_Z(RTR0MEM_EF_SIZE, PAGE_SIZE) != RTR0MEM_EF_SIZE
562 && RTR0MEM_EF_SIZE <= 0)
563 {
564 rtR0MemComplain(pszOp, "Invalid E-fence size! %#x\n", RTR0MEM_EF_SIZE);
565 return NULL;
566 }
567 if (!cbUnaligned)
568 {
569#if 1
570 rtR0MemComplain(pszOp, "Request of ZERO bytes allocation!\n");
571 return NULL;
572#else
573 cbAligned = cbUnaligned = 1;
574#endif
575 }
576
577#ifndef RTR0MEM_EF_IN_FRONT
578 /* Alignment decreases fence accuracy, but this is at least partially
579 * counteracted by filling and checking the alignment padding. When the
580 * fence is in front then then no extra alignment is needed. */
581 cbAligned = RT_ALIGN_Z(cbAligned, RTR0MEM_EF_ALIGNMENT);
582#endif
583
584 /*
585 * Allocate the trace block.
586 */
587 PRTR0MEMEFBLOCK pBlock = rtR0MemBlockCreate(enmType, cbUnaligned, cbAligned, pszTag, pvCaller, RT_SRC_POS_ARGS);
588 if (!pBlock)
589 {
590 rtR0MemComplain(pszOp, "Failed to allocate trace block!\n");
591 return NULL;
592 }
593
594 /*
595 * Allocate a block with page alignment space + the size of the E-fence.
596 */
597 void *pvBlock = NULL;
598 RTR0MEMOBJ hMemObj;
599 size_t cbBlock = RT_ALIGN_Z(cbAligned, PAGE_SIZE) + RTR0MEM_EF_SIZE;
600 int rc = RTR0MemObjAllocPage(&hMemObj, cbBlock, false /*fExecutable*/);
601 if (RT_SUCCESS(rc))
602 pvBlock = RTR0MemObjAddress(hMemObj);
603 if (pvBlock)
604 {
605 /*
606 * Calc the start of the fence and the user block
607 * and then change the page protection of the fence.
608 */
609#ifdef RTR0MEM_EF_IN_FRONT
610 void *pvEFence = pvBlock;
611 void *pv = (char *)pvEFence + RTR0MEM_EF_SIZE;
612# ifdef RTR0MEM_EF_NOMAN_FILLER
613 memset((char *)pv + cbUnaligned, RTR0MEM_EF_NOMAN_FILLER, cbBlock - RTR0MEM_EF_SIZE - cbUnaligned);
614# endif
615#else
616 void *pvEFence = (char *)pvBlock + (cbBlock - RTR0MEM_EF_SIZE);
617 void *pv = (char *)pvEFence - cbAligned;
618# ifdef RTR0MEM_EF_NOMAN_FILLER
619 memset(pvBlock, RTR0MEM_EF_NOMAN_FILLER, cbBlock - RTR0MEM_EF_SIZE - cbAligned);
620 memset((char *)pv + cbUnaligned, RTR0MEM_EF_NOMAN_FILLER, cbAligned - cbUnaligned);
621# endif
622#endif
623
624#ifdef RTR0MEM_EF_FENCE_FILLER
625 memset(pvEFence, RTR0MEM_EF_FENCE_FILLER, RTR0MEM_EF_SIZE);
626#endif
627 rc = RTR0MemObjProtect(hMemObj, (uint8_t *)pvEFence - (uint8_t *)pvBlock, RTR0MEM_EF_SIZE, RTMEM_PROT_NONE);
628 if (!rc)
629 {
630 rtR0MemBlockInsert(pBlock, pv, hMemObj);
631 if (enmType == RTMEMTYPE_RTMEMALLOCZ)
632 memset(pv, 0, cbUnaligned);
633#ifdef RTR0MEM_EF_FILLER
634 else
635 memset(pv, RTR0MEM_EF_FILLER, cbUnaligned);
636#endif
637
638 rtR0MemLog(pszOp, "returns %p (pvBlock=%p cbBlock=%#x pvEFence=%p cbUnaligned=%#x)\n", pv, pvBlock, cbBlock, pvEFence, cbUnaligned);
639 return pv;
640 }
641 rtR0MemComplain(pszOp, "RTMemProtect failed, pvEFence=%p size %d, rc=%d\n", pvEFence, RTR0MEM_EF_SIZE, rc);
642 RTR0MemObjFree(hMemObj, true /*fFreeMappings*/);
643 }
644 else
645 {
646 rtR0MemComplain(pszOp, "Failed to allocated %zu (%zu) bytes (rc=%Rrc).\n", cbBlock, cbUnaligned, rc);
647 if (RT_SUCCESS(rc))
648 RTR0MemObjFree(hMemObj, true /*fFreeMappings*/);
649 }
650
651 rtR0MemBlockFree(pBlock);
652 return NULL;
653}
654
655
656/**
657 * Internal free.
658 */
659static void rtR0MemFree(const char *pszOp, RTMEMTYPE enmType, void *pv, size_t cbUser, void *pvCaller, RT_SRC_POS_DECL)
660{
661 NOREF(enmType); RT_SRC_POS_NOREF();
662
663 /*
664 * Simple case.
665 */
666 if (!pv)
667 return;
668
669 /*
670 * Check watch points.
671 */
672 for (unsigned i = 0; i < RT_ELEMENTS(gapvRTMemFreeWatch); i++)
673 if (gapvRTMemFreeWatch[i] == pv)
674 RTAssertDoPanic();
675
676 /*
677 * Find the block.
678 */
679 PRTR0MEMEFBLOCK pBlock = rtR0MemBlockRemove(pv);
680 if (pBlock)
681 {
682 if (gfRTMemFreeLog)
683 RTLogPrintf("RTMem %s: pv=%p pvCaller=%p cbUnaligned=%#x\n", pszOp, pv, pvCaller, pBlock->cbUnaligned);
684
685#ifdef RTR0MEM_EF_NOMAN_FILLER
686 /*
687 * Check whether the no man's land is untouched.
688 */
689# ifdef RTR0MEM_EF_IN_FRONT
690 void *pvWrong = ASMMemFirstMismatchingU8((char *)pv + pBlock->cbUnaligned,
691 RT_ALIGN_Z(pBlock->cbAligned, PAGE_SIZE) - pBlock->cbUnaligned,
692 RTR0MEM_EF_NOMAN_FILLER);
693# else
694 /* Alignment must match allocation alignment in rtMemAlloc(). */
695 void *pvWrong = ASMMemFirstMismatchingU8((char *)pv + pBlock->cbUnaligned,
696 pBlock->cbAligned - pBlock->cbUnaligned,
697 RTR0MEM_EF_NOMAN_FILLER);
698 if (pvWrong)
699 RTAssertDoPanic();
700 pvWrong = ASMMemFirstMismatchingU8((void *)((uintptr_t)pv & ~(uintptr_t)PAGE_OFFSET_MASK),
701 RT_ALIGN_Z(pBlock->cbAligned, PAGE_SIZE) - pBlock->cbAligned,
702 RTR0MEM_EF_NOMAN_FILLER);
703# endif
704 if (pvWrong)
705 RTAssertDoPanic();
706#endif
707
708 /*
709 * Fill the user part of the block.
710 */
711 AssertMsg(enmType != RTMEMTYPE_RTMEMFREEZ || cbUser == pBlock->cbUnaligned,
712 ("cbUser=%#zx cbUnaligned=%#zx\n", cbUser, pBlock->cbUnaligned));
713 RT_NOREF(cbUser);
714 if (enmType == RTMEMTYPE_RTMEMFREEZ)
715 RT_BZERO(pv, pBlock->cbUnaligned);
716#ifdef RTR0MEM_EF_FREE_FILL
717 else
718 memset(pv, RTR0MEM_EF_FREE_FILL, pBlock->cbUnaligned);
719#endif
720
721#if defined(RTR0MEM_EF_FREE_DELAYED) && RTR0MEM_EF_FREE_DELAYED > 0
722 /*
723 * We're doing delayed freeing.
724 * That means we'll expand the E-fence to cover the entire block.
725 */
726 int rc = RTR0MemObjProtect(pBlock->hMemObj,
727# ifdef RTR0MEM_EF_IN_FRONT
728 RTR0MEM_EF_SIZE,
729# else
730 0 /*offSub*/,
731# endif
732 RT_ALIGN_Z(pBlock->cbAligned, PAGE_SIZE),
733 RTMEM_PROT_NONE);
734 if (RT_SUCCESS(rc))
735 {
736 /*
737 * Insert it into the free list and process pending frees.
738 */
739 rtR0MemBlockDelayInsert(pBlock);
740 while ((pBlock = rtR0MemBlockDelayRemove()) != NULL)
741 rtR0MemFreeBlock(pBlock, pszOp);
742 }
743 else
744 rtR0MemComplain(pszOp, "Failed to expand the efence of pv=%p cb=%d, rc=%d.\n", pv, pBlock, rc);
745
746#else /* !RTR0MEM_EF_FREE_DELAYED */
747 rtR0MemFreeBlock(pBlock, pszOp);
748#endif /* !RTR0MEM_EF_FREE_DELAYED */
749 }
750 else
751 rtR0MemComplain(pszOp, "pv=%p not found! Incorrect free!\n", pv);
752}
753
754
755/**
756 * Internal realloc.
757 */
758static void *rtR0MemRealloc(const char *pszOp, RTMEMTYPE enmType, void *pvOld, size_t cbNew,
759 const char *pszTag, void *pvCaller, RT_SRC_POS_DECL)
760{
761 /*
762 * Allocate new and copy.
763 */
764 if (!pvOld)
765 return rtR0MemAlloc(pszOp, enmType, cbNew, cbNew, pszTag, pvCaller, RT_SRC_POS_ARGS);
766 if (!cbNew)
767 {
768 rtR0MemFree(pszOp, RTMEMTYPE_RTMEMREALLOC, pvOld, 0, pvCaller, RT_SRC_POS_ARGS);
769 return NULL;
770 }
771
772 /*
773 * Get the block, allocate the new, copy the data, free the old one.
774 */
775 PRTR0MEMEFBLOCK pBlock = rtR0MemBlockGet(pvOld);
776 if (pBlock)
777 {
778 void *pvRet = rtR0MemAlloc(pszOp, enmType, cbNew, cbNew, pszTag, pvCaller, RT_SRC_POS_ARGS);
779 if (pvRet)
780 {
781 memcpy(pvRet, pvOld, RT_MIN(cbNew, pBlock->cbUnaligned));
782 rtR0MemFree(pszOp, RTMEMTYPE_RTMEMREALLOC, pvOld, 0, pvCaller, RT_SRC_POS_ARGS);
783 }
784 return pvRet;
785 }
786 rtR0MemComplain(pszOp, "pvOld=%p was not found!\n", pvOld);
787 return NULL;
788}
789
790
791
792
793RTDECL(void *) RTMemEfTmpAlloc(size_t cb, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW_DEF
794{
795 return rtR0MemAlloc("TmpAlloc", RTMEMTYPE_RTMEMALLOC, cb, cb, pszTag, ASMReturnAddress(), RT_SRC_POS_ARGS);
796}
797
798
799RTDECL(void *) RTMemEfTmpAllocZ(size_t cb, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW_DEF
800{
801 return rtR0MemAlloc("TmpAlloc", RTMEMTYPE_RTMEMALLOCZ, cb, cb, pszTag, ASMReturnAddress(), RT_SRC_POS_ARGS);
802}
803
804
805RTDECL(void) RTMemEfTmpFree(void *pv, RT_SRC_POS_DECL) RT_NO_THROW_DEF
806{
807 if (pv)
808 rtR0MemFree("Free", RTMEMTYPE_RTMEMFREE, pv, 0, ASMReturnAddress(), RT_SRC_POS_ARGS);
809}
810
811
812RTDECL(void) RTMemEfTmpFreeZ(void *pv, size_t cb, RT_SRC_POS_DECL) RT_NO_THROW_DEF
813{
814 if (pv)
815 rtR0MemFree("FreeZ", RTMEMTYPE_RTMEMFREEZ, pv, cb, ASMReturnAddress(), RT_SRC_POS_ARGS);
816}
817
818
819RTDECL(void *) RTMemEfAlloc(size_t cb, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW_DEF
820{
821 return rtR0MemAlloc("Alloc", RTMEMTYPE_RTMEMALLOC, cb, cb, pszTag, ASMReturnAddress(), RT_SRC_POS_ARGS);
822}
823
824
825RTDECL(void *) RTMemEfAllocZ(size_t cb, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW_DEF
826{
827 return rtR0MemAlloc("AllocZ", RTMEMTYPE_RTMEMALLOCZ, cb, cb, pszTag, ASMReturnAddress(), RT_SRC_POS_ARGS);
828}
829
830
831RTDECL(void *) RTMemEfAllocVar(size_t cbUnaligned, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW_DEF
832{
833 size_t cbAligned;
834 if (cbUnaligned >= 16)
835 cbAligned = RT_ALIGN_Z(cbUnaligned, 16);
836 else
837 cbAligned = RT_ALIGN_Z(cbUnaligned, sizeof(void *));
838 return rtR0MemAlloc("Alloc", RTMEMTYPE_RTMEMALLOC, cbUnaligned, cbAligned, pszTag, ASMReturnAddress(), RT_SRC_POS_ARGS);
839}
840
841
842RTDECL(void *) RTMemEfAllocZVar(size_t cbUnaligned, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW_DEF
843{
844 size_t cbAligned;
845 if (cbUnaligned >= 16)
846 cbAligned = RT_ALIGN_Z(cbUnaligned, 16);
847 else
848 cbAligned = RT_ALIGN_Z(cbUnaligned, sizeof(void *));
849 return rtR0MemAlloc("AllocZ", RTMEMTYPE_RTMEMALLOCZ, cbUnaligned, cbAligned, pszTag, ASMReturnAddress(), RT_SRC_POS_ARGS);
850}
851
852
853RTDECL(void *) RTMemEfRealloc(void *pvOld, size_t cbNew, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW_DEF
854{
855 return rtR0MemRealloc("Realloc", RTMEMTYPE_RTMEMREALLOC, pvOld, cbNew, pszTag, ASMReturnAddress(), RT_SRC_POS_ARGS);
856}
857
858RTDECL(void *) RTMemEfReallocZ(void *pvOld, size_t cbOld, size_t cbNew, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW_DEF
859{
860 void *pvDst = rtR0MemRealloc("Realloc", RTMEMTYPE_RTMEMREALLOC, pvOld, cbNew, pszTag, ASMReturnAddress(), RT_SRC_POS_ARGS);
861 if (pvDst && cbNew > cbOld)
862 memset((uint8_t *)pvDst + cbOld, 0, cbNew - cbOld);
863 return pvDst;
864}
865
866
867RTDECL(void) RTMemEfFree(void *pv, RT_SRC_POS_DECL) RT_NO_THROW_DEF
868{
869 if (pv)
870 rtR0MemFree("Free", RTMEMTYPE_RTMEMFREE, pv, 0, ASMReturnAddress(), RT_SRC_POS_ARGS);
871}
872
873
874RTDECL(void) RTMemEfFreeZ(void *pv, size_t cb, RT_SRC_POS_DECL) RT_NO_THROW_DEF
875{
876 if (pv)
877 rtR0MemFree("Free", RTMEMTYPE_RTMEMFREEZ, pv, cb, ASMReturnAddress(), RT_SRC_POS_ARGS);
878}
879
880
881RTDECL(void *) RTMemEfDup(const void *pvSrc, size_t cb, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW_DEF
882{
883 void *pvDst = RTMemEfAlloc(cb, pszTag, RT_SRC_POS_ARGS);
884 if (pvDst)
885 memcpy(pvDst, pvSrc, cb);
886 return pvDst;
887}
888
889
890RTDECL(void *) RTMemEfDupEx(const void *pvSrc, size_t cbSrc, size_t cbExtra, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW_DEF
891{
892 void *pvDst = RTMemEfAlloc(cbSrc + cbExtra, pszTag, RT_SRC_POS_ARGS);
893 if (pvDst)
894 {
895 memcpy(pvDst, pvSrc, cbSrc);
896 memset((uint8_t *)pvDst + cbSrc, 0, cbExtra);
897 }
898 return pvDst;
899}
900
901
902
903
904/*
905 *
906 * The NP (no position) versions.
907 *
908 */
909
910
911
912RTDECL(void *) RTMemEfTmpAllocNP(size_t cb, const char *pszTag) RT_NO_THROW_DEF
913{
914 return rtR0MemAlloc("TmpAlloc", RTMEMTYPE_RTMEMALLOC, cb, cb, pszTag, ASMReturnAddress(), NULL, 0, NULL);
915}
916
917
918RTDECL(void *) RTMemEfTmpAllocZNP(size_t cb, const char *pszTag) RT_NO_THROW_DEF
919{
920 return rtR0MemAlloc("TmpAllocZ", RTMEMTYPE_RTMEMALLOCZ, cb, cb, pszTag, ASMReturnAddress(), NULL, 0, NULL);
921}
922
923
924RTDECL(void) RTMemEfTmpFreeNP(void *pv) RT_NO_THROW_DEF
925{
926 if (pv)
927 rtR0MemFree("Free", RTMEMTYPE_RTMEMFREE, pv, 0, ASMReturnAddress(), NULL, 0, NULL);
928}
929
930
931RTDECL(void) RTMemEfTmpFreeZNP(void *pv, size_t cb) RT_NO_THROW_DEF
932{
933 if (pv)
934 rtR0MemFree("FreeZ", RTMEMTYPE_RTMEMFREEZ, pv, cb, ASMReturnAddress(), NULL, 0, NULL);
935}
936
937
938RTDECL(void *) RTMemEfAllocNP(size_t cb, const char *pszTag) RT_NO_THROW_DEF
939{
940 return rtR0MemAlloc("Alloc", RTMEMTYPE_RTMEMALLOC, cb, cb, pszTag, ASMReturnAddress(), NULL, 0, NULL);
941}
942
943
944RTDECL(void *) RTMemEfAllocZNP(size_t cb, const char *pszTag) RT_NO_THROW_DEF
945{
946 return rtR0MemAlloc("AllocZ", RTMEMTYPE_RTMEMALLOCZ, cb, cb, pszTag, ASMReturnAddress(), NULL, 0, NULL);
947}
948
949
950RTDECL(void *) RTMemEfAllocVarNP(size_t cbUnaligned, const char *pszTag) RT_NO_THROW_DEF
951{
952 size_t cbAligned;
953 if (cbUnaligned >= 16)
954 cbAligned = RT_ALIGN_Z(cbUnaligned, 16);
955 else
956 cbAligned = RT_ALIGN_Z(cbUnaligned, sizeof(void *));
957 return rtR0MemAlloc("Alloc", RTMEMTYPE_RTMEMALLOC, cbUnaligned, cbAligned, pszTag, ASMReturnAddress(), NULL, 0, NULL);
958}
959
960
961RTDECL(void *) RTMemEfAllocZVarNP(size_t cbUnaligned, const char *pszTag) RT_NO_THROW_DEF
962{
963 size_t cbAligned;
964 if (cbUnaligned >= 16)
965 cbAligned = RT_ALIGN_Z(cbUnaligned, 16);
966 else
967 cbAligned = RT_ALIGN_Z(cbUnaligned, sizeof(void *));
968 return rtR0MemAlloc("AllocZ", RTMEMTYPE_RTMEMALLOCZ, cbUnaligned, cbAligned, pszTag, ASMReturnAddress(), NULL, 0, NULL);
969}
970
971
972RTDECL(void *) RTMemEfReallocNP(void *pvOld, size_t cbNew, const char *pszTag) RT_NO_THROW_DEF
973{
974 return rtR0MemRealloc("Realloc", RTMEMTYPE_RTMEMREALLOC, pvOld, cbNew, pszTag, ASMReturnAddress(), NULL, 0, NULL);
975}
976
977
978RTDECL(void *) RTMemEfReallocZNP(void *pvOld, size_t cbOld, size_t cbNew, const char *pszTag) RT_NO_THROW_DEF
979{
980 void *pvDst = rtR0MemRealloc("ReallocZ", RTMEMTYPE_RTMEMREALLOC, pvOld, cbNew, pszTag, ASMReturnAddress(), NULL, 0, NULL);
981 if (pvDst && cbNew > cbOld)
982 memset((uint8_t *)pvDst + cbOld, 0, cbNew - cbOld);
983 return pvDst;
984}
985
986
987RTDECL(void) RTMemEfFreeNP(void *pv) RT_NO_THROW_DEF
988{
989 if (pv)
990 rtR0MemFree("Free", RTMEMTYPE_RTMEMFREE, pv, 0, ASMReturnAddress(), NULL, 0, NULL);
991}
992
993
994RTDECL(void) RTMemEfFreeZNP(void *pv, size_t cb) RT_NO_THROW_DEF
995{
996 if (pv)
997 rtR0MemFree("FreeZ", RTMEMTYPE_RTMEMFREEZ, pv, cb, ASMReturnAddress(), NULL, 0, NULL);
998}
999
1000
1001RTDECL(void *) RTMemEfDupNP(const void *pvSrc, size_t cb, const char *pszTag) RT_NO_THROW_DEF
1002{
1003 void *pvDst = RTMemEfAlloc(cb, pszTag, NULL, 0, NULL);
1004 if (pvDst)
1005 memcpy(pvDst, pvSrc, cb);
1006 return pvDst;
1007}
1008
1009
1010RTDECL(void *) RTMemEfDupExNP(const void *pvSrc, size_t cbSrc, size_t cbExtra, const char *pszTag) RT_NO_THROW_DEF
1011{
1012 void *pvDst = RTMemEfAlloc(cbSrc + cbExtra, pszTag, NULL, 0, NULL);
1013 if (pvDst)
1014 {
1015 memcpy(pvDst, pvSrc, cbSrc);
1016 memset((uint8_t *)pvDst + cbSrc, 0, cbExtra);
1017 }
1018 return pvDst;
1019}
1020
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette