VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/alloc-ef-r0drv.cpp@ 98187

Last change on this file since 98187 was 98103, checked in by vboxsync, 23 months ago

Copyright year updates by scm.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 31.2 KB
Line 
1/* $Id: alloc-ef-r0drv.cpp 98103 2023-01-17 14:15:46Z vboxsync $ */
2/** @file
3 * IPRT - Memory Allocation, electric fence for ring-0 drivers.
4 */
5
6/*
7 * Copyright (C) 2006-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * The contents of this file may alternatively be used under the terms
26 * of the Common Development and Distribution License Version 1.0
27 * (CDDL), a copy of it is provided in the "COPYING.CDDL" file included
28 * in the VirtualBox distribution, in which case the provisions of the
29 * CDDL are applicable instead of those of the GPL.
30 *
31 * You may elect to license modified versions of this file under the
32 * terms and conditions of either the GPL or the CDDL or both.
33 *
34 * SPDX-License-Identifier: GPL-3.0-only OR CDDL-1.0
35 */
36
37
38/*********************************************************************************************************************************
39* Header Files *
40*********************************************************************************************************************************/
41#define RTMEM_NO_WRAP_TO_EF_APIS
42#include "internal/iprt.h"
43#include <iprt/mem.h>
44
45#include <iprt/alloc.h>
46#include <iprt/asm.h>
47#include <iprt/asm-amd64-x86.h>
48#include <iprt/assert.h>
49#include <iprt/errcore.h>
50#include <iprt/log.h>
51#include <iprt/memobj.h>
52#include <iprt/param.h>
53#include <iprt/string.h>
54#include <iprt/thread.h>
55
56#include "internal/mem.h"
57
58
59/*********************************************************************************************************************************
60* Defined Constants And Macros *
61*********************************************************************************************************************************/
62#if defined(DOXYGEN_RUNNING)
63# define RTR0MEM_EF_IN_FRONT
64#endif
65
66/** @def RTR0MEM_EF_SIZE
67 * The size of the fence. This must be page aligned.
68 */
69#define RTR0MEM_EF_SIZE PAGE_SIZE
70
71/** @def RTR0MEM_EF_ALIGNMENT
72 * The allocation alignment, power of two of course.
73 *
74 * Use this for working around misaligned sizes, usually stemming from
75 * allocating a string or something after the main structure. When you
76 * encounter this, please fix the allocation to RTMemAllocVar or RTMemAllocZVar.
77 */
78#if 0
79# define RTR0MEM_EF_ALIGNMENT (ARCH_BITS / 8)
80#else
81# define RTR0MEM_EF_ALIGNMENT 1
82#endif
83
84/** @def RTR0MEM_EF_IN_FRONT
85 * Define this to put the fence up in front of the block.
86 * The default (when this isn't defined) is to up it up after the block.
87 */
88//# define RTR0MEM_EF_IN_FRONT
89
90/** @def RTR0MEM_EF_FREE_DELAYED
91 * This define will enable free() delay and protection of the freed data
92 * while it's being delayed. The value of RTR0MEM_EF_FREE_DELAYED defines
93 * the threshold of the delayed blocks.
94 * Delayed blocks does not consume any physical memory, only virtual address space.
95 */
96#define RTR0MEM_EF_FREE_DELAYED (20 * _1M)
97
98/** @def RTR0MEM_EF_FREE_FILL
99 * This define will enable memset(,RTR0MEM_EF_FREE_FILL,)'ing the user memory
100 * in the block before freeing/decommitting it. This is useful in GDB since GDB
101 * appears to be able to read the content of the page even after it's been
102 * decommitted.
103 */
104#define RTR0MEM_EF_FREE_FILL 'f'
105
106/** @def RTR0MEM_EF_FILLER
107 * This define will enable memset(,RTR0MEM_EF_FILLER,)'ing the allocated
108 * memory when the API doesn't require it to be zero'd.
109 */
110#define RTR0MEM_EF_FILLER 0xef
111
112/** @def RTR0MEM_EF_NOMAN_FILLER
113 * This define will enable memset(,RTR0MEM_EF_NOMAN_FILLER,)'ing the
114 * unprotected but not allocated area of memory, the so called no man's land.
115 */
116#define RTR0MEM_EF_NOMAN_FILLER 0xaa
117
118/** @def RTR0MEM_EF_FENCE_FILLER
119 * This define will enable memset(,RTR0MEM_EF_FENCE_FILLER,)'ing the
120 * fence itself, as debuggers can usually read them.
121 */
122#define RTR0MEM_EF_FENCE_FILLER 0xcc
123
124
125/*********************************************************************************************************************************
126* Header Files *
127*********************************************************************************************************************************/
128#ifdef RT_OS_WINDOWS
129# include <iprt/win/windows.h>
130#elif !defined(RT_OS_FREEBSD)
131# include <sys/mman.h>
132#endif
133#include <iprt/avl.h>
134#include <iprt/thread.h>
135
136
137/*********************************************************************************************************************************
138* Structures and Typedefs *
139*********************************************************************************************************************************/
140/**
141 * Allocation types.
142 */
143typedef enum RTMEMTYPE
144{
145 RTMEMTYPE_RTMEMALLOC,
146 RTMEMTYPE_RTMEMALLOCZ,
147 RTMEMTYPE_RTMEMREALLOC,
148 RTMEMTYPE_RTMEMFREE,
149 RTMEMTYPE_RTMEMFREEZ,
150
151 RTMEMTYPE_NEW,
152 RTMEMTYPE_NEW_ARRAY,
153 RTMEMTYPE_DELETE,
154 RTMEMTYPE_DELETE_ARRAY
155} RTMEMTYPE;
156
157/**
158 * Node tracking a memory allocation.
159 */
160typedef struct RTR0MEMEFBLOCK
161{
162 /** Avl node code, key is the user block pointer. */
163 AVLPVNODECORE Core;
164 /** Allocation type. */
165 RTMEMTYPE enmType;
166 /** The memory object. */
167 RTR0MEMOBJ hMemObj;
168 /** The unaligned size of the block. */
169 size_t cbUnaligned;
170 /** The aligned size of the block. */
171 size_t cbAligned;
172 /** The allocation tag (read-only string). */
173 const char *pszTag;
174 /** The return address of the allocator function. */
175 void *pvCaller;
176 /** Line number of the alloc call. */
177 unsigned iLine;
178 /** File from within the allocation was made. */
179 const char *pszFile;
180 /** Function from within the allocation was made. */
181 const char *pszFunction;
182} RTR0MEMEFBLOCK, *PRTR0MEMEFBLOCK;
183
184
185
186/*********************************************************************************************************************************
187* Global Variables *
188*********************************************************************************************************************************/
189/** Spinlock protecting the all the block's globals. */
190static volatile uint32_t g_BlocksLock;
191/** Tree tracking the allocations. */
192static AVLPVTREE g_BlocksTree;
193
194#ifdef RTR0MEM_EF_FREE_DELAYED
195/** Tail of the delayed blocks. */
196static volatile PRTR0MEMEFBLOCK g_pBlocksDelayHead;
197/** Tail of the delayed blocks. */
198static volatile PRTR0MEMEFBLOCK g_pBlocksDelayTail;
199/** Number of bytes in the delay list (includes fences). */
200static volatile size_t g_cbBlocksDelay;
201#endif /* RTR0MEM_EF_FREE_DELAYED */
202
203/** Array of pointers free watches for. */
204void *gapvRTMemFreeWatch[4] = {NULL, NULL, NULL, NULL};
205/** Enable logging of all freed memory. */
206bool gfRTMemFreeLog = false;
207
208
209/*********************************************************************************************************************************
210* Internal Functions *
211*********************************************************************************************************************************/
212
213
214/**
215 * @callback_method_impl{FNRTSTROUTPUT}
216 */
217static DECLCALLBACK(size_t) rtR0MemEfWrite(void *pvArg, const char *pachChars, size_t cbChars)
218{
219 RT_NOREF1(pvArg);
220 if (cbChars)
221 {
222 RTLogWriteDebugger(pachChars, cbChars);
223 RTLogWriteStdOut(pachChars, cbChars);
224 RTLogWriteUser(pachChars, cbChars);
225 }
226 return cbChars;
227}
228
229
230/**
231 * Complains about something.
232 */
233static void rtR0MemComplain(const char *pszOp, const char *pszFormat, ...)
234{
235 va_list args;
236 RTStrFormat(rtR0MemEfWrite, NULL, NULL, NULL, "RTMem error: %s: ", pszOp);
237 va_start(args, pszFormat);
238 RTStrFormatV(rtR0MemEfWrite, NULL, NULL, NULL, pszFormat, args);
239 va_end(args);
240 RTAssertDoPanic();
241}
242
243/**
244 * Log an event.
245 */
246DECLINLINE(void) rtR0MemLog(const char *pszOp, const char *pszFormat, ...)
247{
248#if 0
249 va_list args;
250 RTStrFormat(rtR0MemEfWrite, NULL, NULL, NULL, "RTMem info: %s: ", pszOp);
251 va_start(args, pszFormat);
252 RTStrFormatV(rtR0MemEfWrite, NULL, NULL, NULL, pszFormat, args);
253 va_end(args);
254#else
255 NOREF(pszOp); NOREF(pszFormat);
256#endif
257}
258
259
260
261/**
262 * Acquires the lock.
263 */
264DECLINLINE(RTCCUINTREG) rtR0MemBlockLock(void)
265{
266 RTCCUINTREG uRet;
267 unsigned c = 0;
268 if (RTThreadPreemptIsEnabled(NIL_RTTHREAD))
269 {
270 for (;;)
271 {
272 uRet = ASMIntDisableFlags();
273 if (ASMAtomicCmpXchgU32(&g_BlocksLock, 1, 0))
274 break;
275 ASMSetFlags(uRet);
276 RTThreadSleepNoLog(((++c) >> 2) & 31);
277 }
278 }
279 else
280 {
281 for (;;)
282 {
283 uRet = ASMIntDisableFlags();
284 if (ASMAtomicCmpXchgU32(&g_BlocksLock, 1, 0))
285 break;
286 ASMSetFlags(uRet);
287 ASMNopPause();
288 if (++c & 3)
289 ASMNopPause();
290 }
291 }
292 return uRet;
293}
294
295
296/**
297 * Releases the lock.
298 */
299DECLINLINE(void) rtR0MemBlockUnlock(RTCCUINTREG fSavedIntFlags)
300{
301 Assert(g_BlocksLock == 1);
302 ASMAtomicXchgU32(&g_BlocksLock, 0);
303 ASMSetFlags(fSavedIntFlags);
304}
305
306
307/**
308 * Creates a block.
309 */
310DECLINLINE(PRTR0MEMEFBLOCK) rtR0MemBlockCreate(RTMEMTYPE enmType, size_t cbUnaligned, size_t cbAligned,
311 const char *pszTag, void *pvCaller, RT_SRC_POS_DECL)
312{
313 PRTR0MEMEFBLOCK pBlock = (PRTR0MEMEFBLOCK)RTMemAlloc(sizeof(*pBlock));
314 if (pBlock)
315 {
316 pBlock->enmType = enmType;
317 pBlock->cbUnaligned = cbUnaligned;
318 pBlock->cbAligned = cbAligned;
319 pBlock->pszTag = pszTag;
320 pBlock->pvCaller = pvCaller;
321 pBlock->iLine = iLine;
322 pBlock->pszFile = pszFile;
323 pBlock->pszFunction = pszFunction;
324 }
325 return pBlock;
326}
327
328
329/**
330 * Frees a block.
331 */
332DECLINLINE(void) rtR0MemBlockFree(PRTR0MEMEFBLOCK pBlock)
333{
334 RTMemFree(pBlock);
335}
336
337
338/**
339 * Insert a block from the tree.
340 */
341DECLINLINE(void) rtR0MemBlockInsert(PRTR0MEMEFBLOCK pBlock, void *pv, RTR0MEMOBJ hMemObj)
342{
343 pBlock->Core.Key = pv;
344 pBlock->hMemObj = hMemObj;
345 RTCCUINTREG fSavedIntFlags = rtR0MemBlockLock();
346 bool fRc = RTAvlPVInsert(&g_BlocksTree, &pBlock->Core);
347 rtR0MemBlockUnlock(fSavedIntFlags);
348 AssertRelease(fRc);
349}
350
351
352/**
353 * Remove a block from the tree and returns it to the caller.
354 */
355DECLINLINE(PRTR0MEMEFBLOCK) rtR0MemBlockRemove(void *pv)
356{
357 RTCCUINTREG fSavedIntFlags = rtR0MemBlockLock();
358 PRTR0MEMEFBLOCK pBlock = (PRTR0MEMEFBLOCK)RTAvlPVRemove(&g_BlocksTree, pv);
359 rtR0MemBlockUnlock(fSavedIntFlags);
360 return pBlock;
361}
362
363
364/**
365 * Gets a block.
366 */
367DECLINLINE(PRTR0MEMEFBLOCK) rtR0MemBlockGet(void *pv)
368{
369 RTCCUINTREG fSavedIntFlags = rtR0MemBlockLock();
370 PRTR0MEMEFBLOCK pBlock = (PRTR0MEMEFBLOCK)RTAvlPVGet(&g_BlocksTree, pv);
371 rtR0MemBlockUnlock(fSavedIntFlags);
372 return pBlock;
373}
374
375
376/**
377 * Dumps one allocation.
378 */
379static DECLCALLBACK(int) RTMemDumpOne(PAVLPVNODECORE pNode, void *pvUser)
380{
381 PRTR0MEMEFBLOCK pBlock = (PRTR0MEMEFBLOCK)pNode;
382 RTStrFormat(rtR0MemEfWrite, NULL, NULL, NULL, "%p %08lx(+%02lx) %p\n",
383 pBlock->Core.Key,
384 (unsigned long)pBlock->cbUnaligned,
385 (unsigned long)(pBlock->cbAligned - pBlock->cbUnaligned),
386 pBlock->pvCaller);
387 NOREF(pvUser);
388 return 0;
389}
390
391
392/**
393 * Dumps the allocated blocks.
394 * This is something which you should call from gdb.
395 */
396RT_C_DECLS_BEGIN
397void RTMemDump(void);
398RT_C_DECLS_END
399
400void RTMemDump(void)
401{
402 RTStrFormat(rtR0MemEfWrite, NULL, NULL, NULL, "address size(alg) caller\n");
403 RTAvlPVDoWithAll(&g_BlocksTree, true, RTMemDumpOne, NULL);
404}
405
406#ifdef RTR0MEM_EF_FREE_DELAYED
407
408/**
409 * Insert a delayed block.
410 */
411DECLINLINE(void) rtR0MemBlockDelayInsert(PRTR0MEMEFBLOCK pBlock)
412{
413 size_t cbBlock = RT_ALIGN_Z(pBlock->cbAligned, PAGE_SIZE) + RTR0MEM_EF_SIZE;
414 pBlock->Core.pRight = NULL;
415 pBlock->Core.pLeft = NULL;
416 RTCCUINTREG fSavedIntFlags = rtR0MemBlockLock();
417 if (g_pBlocksDelayHead)
418 {
419 g_pBlocksDelayHead->Core.pLeft = (PAVLPVNODECORE)pBlock;
420 pBlock->Core.pRight = (PAVLPVNODECORE)g_pBlocksDelayHead;
421 g_pBlocksDelayHead = pBlock;
422 }
423 else
424 {
425 g_pBlocksDelayTail = pBlock;
426 g_pBlocksDelayHead = pBlock;
427 }
428 g_cbBlocksDelay += cbBlock;
429 rtR0MemBlockUnlock(fSavedIntFlags);
430}
431
432/**
433 * Removes a delayed block.
434 */
435DECLINLINE(PRTR0MEMEFBLOCK) rtR0MemBlockDelayRemove(void)
436{
437 PRTR0MEMEFBLOCK pBlock = NULL;
438 RTCCUINTREG fSavedIntFlags = rtR0MemBlockLock();
439 if (g_cbBlocksDelay > RTR0MEM_EF_FREE_DELAYED)
440 {
441 pBlock = g_pBlocksDelayTail;
442 if (pBlock)
443 {
444 g_pBlocksDelayTail = (PRTR0MEMEFBLOCK)pBlock->Core.pLeft;
445 if (pBlock->Core.pLeft)
446 pBlock->Core.pLeft->pRight = NULL;
447 else
448 g_pBlocksDelayHead = NULL;
449 g_cbBlocksDelay -= RT_ALIGN_Z(pBlock->cbAligned, PAGE_SIZE) + RTR0MEM_EF_SIZE;
450 }
451 }
452 rtR0MemBlockUnlock(fSavedIntFlags);
453 return pBlock;
454}
455
456#endif /* RTR0MEM_EF_FREE_DELAYED */
457
458
459static void rtR0MemFreeBlock(PRTR0MEMEFBLOCK pBlock, const char *pszOp)
460{
461 void *pv = pBlock->Core.Key;
462# ifdef RTR0MEM_EF_IN_FRONT
463 void *pvBlock = (char *)pv - RTR0MEM_EF_SIZE;
464# else
465 void *pvBlock = (void *)((uintptr_t)pv & ~(uintptr_t)PAGE_OFFSET_MASK);
466# endif
467 size_t cbBlock = RT_ALIGN_Z(pBlock->cbAligned, PAGE_SIZE) + RTR0MEM_EF_SIZE;
468
469 int rc = RTR0MemObjProtect(pBlock->hMemObj, 0 /*offSub*/, RT_ALIGN_Z(cbBlock, PAGE_SIZE), RTMEM_PROT_READ | RTMEM_PROT_WRITE);
470 if (RT_FAILURE(rc))
471 rtR0MemComplain(pszOp, "RTR0MemObjProtect([%p], 0, %#x, RTMEM_PROT_READ | RTMEM_PROT_WRITE) -> %Rrc\n",
472 pvBlock, cbBlock, rc);
473
474 rc = RTR0MemObjFree(pBlock->hMemObj, true /*fFreeMappings*/);
475 if (RT_FAILURE(rc))
476 rtR0MemComplain(pszOp, "RTR0MemObjFree([%p LB %#x]) -> %Rrc\n", pvBlock, cbBlock, rc);
477 pBlock->hMemObj = NIL_RTR0MEMOBJ;
478
479 rtR0MemBlockFree(pBlock);
480}
481
482
483/**
484 * Initialize call, we shouldn't fail here.
485 */
486void rtR0MemEfInit(void)
487{
488
489}
490
491/**
492 * @callback_method_impl{AVLPVCALLBACK}
493 */
494static DECLCALLBACK(int) rtR0MemEfDestroyBlock(PAVLPVNODECORE pNode, void *pvUser)
495{
496 PRTR0MEMEFBLOCK pBlock = (PRTR0MEMEFBLOCK)pNode;
497
498 /* Note! pszFile and pszFunction may be invalid at this point. */
499 rtR0MemComplain("rtR0MemEfDestroyBlock", "Leaking %zu bytes at %p (iLine=%u pvCaller=%p)\n",
500 pBlock->cbAligned, pBlock->Core.Key, pBlock->iLine, pBlock->pvCaller);
501
502 rtR0MemFreeBlock(pBlock, "rtR0MemEfDestroyBlock");
503
504 NOREF(pvUser);
505 return VINF_SUCCESS;
506}
507
508
509/**
510 * Termination call.
511 *
512 * Will check and free memory.
513 */
514void rtR0MemEfTerm(void)
515{
516#ifdef RTR0MEM_EF_FREE_DELAYED
517 /*
518 * Release delayed frees.
519 */
520 RTCCUINTREG fSavedIntFlags = rtR0MemBlockLock();
521 for (;;)
522 {
523 PRTR0MEMEFBLOCK pBlock = g_pBlocksDelayTail;
524 if (pBlock)
525 {
526 g_pBlocksDelayTail = (PRTR0MEMEFBLOCK)pBlock->Core.pLeft;
527 if (pBlock->Core.pLeft)
528 pBlock->Core.pLeft->pRight = NULL;
529 else
530 g_pBlocksDelayHead = NULL;
531 rtR0MemBlockUnlock(fSavedIntFlags);
532
533 rtR0MemFreeBlock(pBlock, "rtR0MemEfTerm");
534
535 rtR0MemBlockLock();
536 }
537 else
538 break;
539 }
540 g_cbBlocksDelay = 0;
541 rtR0MemBlockUnlock(fSavedIntFlags);
542#endif
543
544 /*
545 * Complain about leaks. Then release them.
546 */
547 RTAvlPVDestroy(&g_BlocksTree, rtR0MemEfDestroyBlock, NULL);
548}
549
550
551/**
552 * Internal allocator.
553 */
554static void * rtR0MemAlloc(const char *pszOp, RTMEMTYPE enmType, size_t cbUnaligned, size_t cbAligned,
555 const char *pszTag, void *pvCaller, RT_SRC_POS_DECL)
556{
557 /*
558 * Sanity.
559 */
560 if ( RT_ALIGN_Z(RTR0MEM_EF_SIZE, PAGE_SIZE) != RTR0MEM_EF_SIZE
561 && RTR0MEM_EF_SIZE <= 0)
562 {
563 rtR0MemComplain(pszOp, "Invalid E-fence size! %#x\n", RTR0MEM_EF_SIZE);
564 return NULL;
565 }
566 if (!cbUnaligned)
567 {
568#if 1
569 rtR0MemComplain(pszOp, "Request of ZERO bytes allocation!\n");
570 return NULL;
571#else
572 cbAligned = cbUnaligned = 1;
573#endif
574 }
575
576#ifndef RTR0MEM_EF_IN_FRONT
577 /* Alignment decreases fence accuracy, but this is at least partially
578 * counteracted by filling and checking the alignment padding. When the
579 * fence is in front then then no extra alignment is needed. */
580 cbAligned = RT_ALIGN_Z(cbAligned, RTR0MEM_EF_ALIGNMENT);
581#endif
582
583 /*
584 * Allocate the trace block.
585 */
586 PRTR0MEMEFBLOCK pBlock = rtR0MemBlockCreate(enmType, cbUnaligned, cbAligned, pszTag, pvCaller, RT_SRC_POS_ARGS);
587 if (!pBlock)
588 {
589 rtR0MemComplain(pszOp, "Failed to allocate trace block!\n");
590 return NULL;
591 }
592
593 /*
594 * Allocate a block with page alignment space + the size of the E-fence.
595 */
596 void *pvBlock = NULL;
597 RTR0MEMOBJ hMemObj;
598 size_t cbBlock = RT_ALIGN_Z(cbAligned, PAGE_SIZE) + RTR0MEM_EF_SIZE;
599 int rc = RTR0MemObjAllocPage(&hMemObj, cbBlock, false /*fExecutable*/);
600 if (RT_SUCCESS(rc))
601 pvBlock = RTR0MemObjAddress(hMemObj);
602 if (pvBlock)
603 {
604 /*
605 * Calc the start of the fence and the user block
606 * and then change the page protection of the fence.
607 */
608#ifdef RTR0MEM_EF_IN_FRONT
609 void *pvEFence = pvBlock;
610 void *pv = (char *)pvEFence + RTR0MEM_EF_SIZE;
611# ifdef RTR0MEM_EF_NOMAN_FILLER
612 memset((char *)pv + cbUnaligned, RTR0MEM_EF_NOMAN_FILLER, cbBlock - RTR0MEM_EF_SIZE - cbUnaligned);
613# endif
614#else
615 void *pvEFence = (char *)pvBlock + (cbBlock - RTR0MEM_EF_SIZE);
616 void *pv = (char *)pvEFence - cbAligned;
617# ifdef RTR0MEM_EF_NOMAN_FILLER
618 memset(pvBlock, RTR0MEM_EF_NOMAN_FILLER, cbBlock - RTR0MEM_EF_SIZE - cbAligned);
619 memset((char *)pv + cbUnaligned, RTR0MEM_EF_NOMAN_FILLER, cbAligned - cbUnaligned);
620# endif
621#endif
622
623#ifdef RTR0MEM_EF_FENCE_FILLER
624 memset(pvEFence, RTR0MEM_EF_FENCE_FILLER, RTR0MEM_EF_SIZE);
625#endif
626 rc = RTR0MemObjProtect(hMemObj, (uint8_t *)pvEFence - (uint8_t *)pvBlock, RTR0MEM_EF_SIZE, RTMEM_PROT_NONE);
627 if (!rc)
628 {
629 rtR0MemBlockInsert(pBlock, pv, hMemObj);
630 if (enmType == RTMEMTYPE_RTMEMALLOCZ)
631 memset(pv, 0, cbUnaligned);
632#ifdef RTR0MEM_EF_FILLER
633 else
634 memset(pv, RTR0MEM_EF_FILLER, cbUnaligned);
635#endif
636
637 rtR0MemLog(pszOp, "returns %p (pvBlock=%p cbBlock=%#x pvEFence=%p cbUnaligned=%#x)\n", pv, pvBlock, cbBlock, pvEFence, cbUnaligned);
638 return pv;
639 }
640 rtR0MemComplain(pszOp, "RTMemProtect failed, pvEFence=%p size %d, rc=%d\n", pvEFence, RTR0MEM_EF_SIZE, rc);
641 RTR0MemObjFree(hMemObj, true /*fFreeMappings*/);
642 }
643 else
644 {
645 rtR0MemComplain(pszOp, "Failed to allocated %zu (%zu) bytes (rc=%Rrc).\n", cbBlock, cbUnaligned, rc);
646 if (RT_SUCCESS(rc))
647 RTR0MemObjFree(hMemObj, true /*fFreeMappings*/);
648 }
649
650 rtR0MemBlockFree(pBlock);
651 return NULL;
652}
653
654
655/**
656 * Internal free.
657 */
658static void rtR0MemFree(const char *pszOp, RTMEMTYPE enmType, void *pv, size_t cbUser, void *pvCaller, RT_SRC_POS_DECL)
659{
660 NOREF(enmType); RT_SRC_POS_NOREF();
661
662 /*
663 * Simple case.
664 */
665 if (!pv)
666 return;
667
668 /*
669 * Check watch points.
670 */
671 for (unsigned i = 0; i < RT_ELEMENTS(gapvRTMemFreeWatch); i++)
672 if (gapvRTMemFreeWatch[i] == pv)
673 RTAssertDoPanic();
674
675 /*
676 * Find the block.
677 */
678 PRTR0MEMEFBLOCK pBlock = rtR0MemBlockRemove(pv);
679 if (pBlock)
680 {
681 if (gfRTMemFreeLog)
682 RTLogPrintf("RTMem %s: pv=%p pvCaller=%p cbUnaligned=%#x\n", pszOp, pv, pvCaller, pBlock->cbUnaligned);
683
684#ifdef RTR0MEM_EF_NOMAN_FILLER
685 /*
686 * Check whether the no man's land is untouched.
687 */
688# ifdef RTR0MEM_EF_IN_FRONT
689 void *pvWrong = ASMMemFirstMismatchingU8((char *)pv + pBlock->cbUnaligned,
690 RT_ALIGN_Z(pBlock->cbAligned, PAGE_SIZE) - pBlock->cbUnaligned,
691 RTR0MEM_EF_NOMAN_FILLER);
692# else
693 /* Alignment must match allocation alignment in rtMemAlloc(). */
694 void *pvWrong = ASMMemFirstMismatchingU8((char *)pv + pBlock->cbUnaligned,
695 pBlock->cbAligned - pBlock->cbUnaligned,
696 RTR0MEM_EF_NOMAN_FILLER);
697 if (pvWrong)
698 RTAssertDoPanic();
699 pvWrong = ASMMemFirstMismatchingU8((void *)((uintptr_t)pv & ~(uintptr_t)PAGE_OFFSET_MASK),
700 RT_ALIGN_Z(pBlock->cbAligned, PAGE_SIZE) - pBlock->cbAligned,
701 RTR0MEM_EF_NOMAN_FILLER);
702# endif
703 if (pvWrong)
704 RTAssertDoPanic();
705#endif
706
707 /*
708 * Fill the user part of the block.
709 */
710 AssertMsg(enmType != RTMEMTYPE_RTMEMFREEZ || cbUser == pBlock->cbUnaligned,
711 ("cbUser=%#zx cbUnaligned=%#zx\n", cbUser, pBlock->cbUnaligned));
712 RT_NOREF(cbUser);
713 if (enmType == RTMEMTYPE_RTMEMFREEZ)
714 RT_BZERO(pv, pBlock->cbUnaligned);
715#ifdef RTR0MEM_EF_FREE_FILL
716 else
717 memset(pv, RTR0MEM_EF_FREE_FILL, pBlock->cbUnaligned);
718#endif
719
720#if defined(RTR0MEM_EF_FREE_DELAYED) && RTR0MEM_EF_FREE_DELAYED > 0
721 /*
722 * We're doing delayed freeing.
723 * That means we'll expand the E-fence to cover the entire block.
724 */
725 int rc = RTR0MemObjProtect(pBlock->hMemObj,
726# ifdef RTR0MEM_EF_IN_FRONT
727 RTR0MEM_EF_SIZE,
728# else
729 0 /*offSub*/,
730# endif
731 RT_ALIGN_Z(pBlock->cbAligned, PAGE_SIZE),
732 RTMEM_PROT_NONE);
733 if (RT_SUCCESS(rc))
734 {
735 /*
736 * Insert it into the free list and process pending frees.
737 */
738 rtR0MemBlockDelayInsert(pBlock);
739 while ((pBlock = rtR0MemBlockDelayRemove()) != NULL)
740 rtR0MemFreeBlock(pBlock, pszOp);
741 }
742 else
743 rtR0MemComplain(pszOp, "Failed to expand the efence of pv=%p cb=%d, rc=%d.\n", pv, pBlock, rc);
744
745#else /* !RTR0MEM_EF_FREE_DELAYED */
746 rtR0MemFreeBlock(pBlock, pszOp);
747#endif /* !RTR0MEM_EF_FREE_DELAYED */
748 }
749 else
750 rtR0MemComplain(pszOp, "pv=%p not found! Incorrect free!\n", pv);
751}
752
753
754/**
755 * Internal realloc.
756 */
757static void *rtR0MemRealloc(const char *pszOp, RTMEMTYPE enmType, void *pvOld, size_t cbNew,
758 const char *pszTag, void *pvCaller, RT_SRC_POS_DECL)
759{
760 /*
761 * Allocate new and copy.
762 */
763 if (!pvOld)
764 return rtR0MemAlloc(pszOp, enmType, cbNew, cbNew, pszTag, pvCaller, RT_SRC_POS_ARGS);
765 if (!cbNew)
766 {
767 rtR0MemFree(pszOp, RTMEMTYPE_RTMEMREALLOC, pvOld, 0, pvCaller, RT_SRC_POS_ARGS);
768 return NULL;
769 }
770
771 /*
772 * Get the block, allocate the new, copy the data, free the old one.
773 */
774 PRTR0MEMEFBLOCK pBlock = rtR0MemBlockGet(pvOld);
775 if (pBlock)
776 {
777 void *pvRet = rtR0MemAlloc(pszOp, enmType, cbNew, cbNew, pszTag, pvCaller, RT_SRC_POS_ARGS);
778 if (pvRet)
779 {
780 memcpy(pvRet, pvOld, RT_MIN(cbNew, pBlock->cbUnaligned));
781 rtR0MemFree(pszOp, RTMEMTYPE_RTMEMREALLOC, pvOld, 0, pvCaller, RT_SRC_POS_ARGS);
782 }
783 return pvRet;
784 }
785 rtR0MemComplain(pszOp, "pvOld=%p was not found!\n", pvOld);
786 return NULL;
787}
788
789
790
791
792RTDECL(void *) RTMemEfTmpAlloc(size_t cb, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW_DEF
793{
794 return rtR0MemAlloc("TmpAlloc", RTMEMTYPE_RTMEMALLOC, cb, cb, pszTag, ASMReturnAddress(), RT_SRC_POS_ARGS);
795}
796
797
798RTDECL(void *) RTMemEfTmpAllocZ(size_t cb, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW_DEF
799{
800 return rtR0MemAlloc("TmpAlloc", RTMEMTYPE_RTMEMALLOCZ, cb, cb, pszTag, ASMReturnAddress(), RT_SRC_POS_ARGS);
801}
802
803
804RTDECL(void) RTMemEfTmpFree(void *pv, RT_SRC_POS_DECL) RT_NO_THROW_DEF
805{
806 if (pv)
807 rtR0MemFree("Free", RTMEMTYPE_RTMEMFREE, pv, 0, ASMReturnAddress(), RT_SRC_POS_ARGS);
808}
809
810
811RTDECL(void) RTMemEfTmpFreeZ(void *pv, size_t cb, RT_SRC_POS_DECL) RT_NO_THROW_DEF
812{
813 if (pv)
814 rtR0MemFree("FreeZ", RTMEMTYPE_RTMEMFREEZ, pv, cb, ASMReturnAddress(), RT_SRC_POS_ARGS);
815}
816
817
818RTDECL(void *) RTMemEfAlloc(size_t cb, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW_DEF
819{
820 return rtR0MemAlloc("Alloc", RTMEMTYPE_RTMEMALLOC, cb, cb, pszTag, ASMReturnAddress(), RT_SRC_POS_ARGS);
821}
822
823
824RTDECL(void *) RTMemEfAllocZ(size_t cb, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW_DEF
825{
826 return rtR0MemAlloc("AllocZ", RTMEMTYPE_RTMEMALLOCZ, cb, cb, pszTag, ASMReturnAddress(), RT_SRC_POS_ARGS);
827}
828
829
830RTDECL(void *) RTMemEfAllocVar(size_t cbUnaligned, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW_DEF
831{
832 size_t cbAligned;
833 if (cbUnaligned >= 16)
834 cbAligned = RT_ALIGN_Z(cbUnaligned, 16);
835 else
836 cbAligned = RT_ALIGN_Z(cbUnaligned, sizeof(void *));
837 return rtR0MemAlloc("Alloc", RTMEMTYPE_RTMEMALLOC, cbUnaligned, cbAligned, pszTag, ASMReturnAddress(), RT_SRC_POS_ARGS);
838}
839
840
841RTDECL(void *) RTMemEfAllocZVar(size_t cbUnaligned, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW_DEF
842{
843 size_t cbAligned;
844 if (cbUnaligned >= 16)
845 cbAligned = RT_ALIGN_Z(cbUnaligned, 16);
846 else
847 cbAligned = RT_ALIGN_Z(cbUnaligned, sizeof(void *));
848 return rtR0MemAlloc("AllocZ", RTMEMTYPE_RTMEMALLOCZ, cbUnaligned, cbAligned, pszTag, ASMReturnAddress(), RT_SRC_POS_ARGS);
849}
850
851
852RTDECL(void *) RTMemEfRealloc(void *pvOld, size_t cbNew, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW_DEF
853{
854 return rtR0MemRealloc("Realloc", RTMEMTYPE_RTMEMREALLOC, pvOld, cbNew, pszTag, ASMReturnAddress(), RT_SRC_POS_ARGS);
855}
856
857RTDECL(void *) RTMemEfReallocZ(void *pvOld, size_t cbOld, size_t cbNew, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW_DEF
858{
859 void *pvDst = rtR0MemRealloc("Realloc", RTMEMTYPE_RTMEMREALLOC, pvOld, cbNew, pszTag, ASMReturnAddress(), RT_SRC_POS_ARGS);
860 if (pvDst && cbNew > cbOld)
861 memset((uint8_t *)pvDst + cbOld, 0, cbNew - cbOld);
862 return pvDst;
863}
864
865
866RTDECL(void) RTMemEfFree(void *pv, RT_SRC_POS_DECL) RT_NO_THROW_DEF
867{
868 if (pv)
869 rtR0MemFree("Free", RTMEMTYPE_RTMEMFREE, pv, 0, ASMReturnAddress(), RT_SRC_POS_ARGS);
870}
871
872
873RTDECL(void) RTMemEfFreeZ(void *pv, size_t cb, RT_SRC_POS_DECL) RT_NO_THROW_DEF
874{
875 if (pv)
876 rtR0MemFree("Free", RTMEMTYPE_RTMEMFREEZ, pv, cb, ASMReturnAddress(), RT_SRC_POS_ARGS);
877}
878
879
880RTDECL(void *) RTMemEfDup(const void *pvSrc, size_t cb, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW_DEF
881{
882 void *pvDst = RTMemEfAlloc(cb, pszTag, RT_SRC_POS_ARGS);
883 if (pvDst)
884 memcpy(pvDst, pvSrc, cb);
885 return pvDst;
886}
887
888
889RTDECL(void *) RTMemEfDupEx(const void *pvSrc, size_t cbSrc, size_t cbExtra, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW_DEF
890{
891 void *pvDst = RTMemEfAlloc(cbSrc + cbExtra, pszTag, RT_SRC_POS_ARGS);
892 if (pvDst)
893 {
894 memcpy(pvDst, pvSrc, cbSrc);
895 memset((uint8_t *)pvDst + cbSrc, 0, cbExtra);
896 }
897 return pvDst;
898}
899
900
901
902
903/*
904 *
905 * The NP (no position) versions.
906 *
907 */
908
909
910
911RTDECL(void *) RTMemEfTmpAllocNP(size_t cb, const char *pszTag) RT_NO_THROW_DEF
912{
913 return rtR0MemAlloc("TmpAlloc", RTMEMTYPE_RTMEMALLOC, cb, cb, pszTag, ASMReturnAddress(), NULL, 0, NULL);
914}
915
916
917RTDECL(void *) RTMemEfTmpAllocZNP(size_t cb, const char *pszTag) RT_NO_THROW_DEF
918{
919 return rtR0MemAlloc("TmpAllocZ", RTMEMTYPE_RTMEMALLOCZ, cb, cb, pszTag, ASMReturnAddress(), NULL, 0, NULL);
920}
921
922
923RTDECL(void) RTMemEfTmpFreeNP(void *pv) RT_NO_THROW_DEF
924{
925 if (pv)
926 rtR0MemFree("Free", RTMEMTYPE_RTMEMFREE, pv, 0, ASMReturnAddress(), NULL, 0, NULL);
927}
928
929
930RTDECL(void) RTMemEfTmpFreeZNP(void *pv, size_t cb) RT_NO_THROW_DEF
931{
932 if (pv)
933 rtR0MemFree("FreeZ", RTMEMTYPE_RTMEMFREEZ, pv, cb, ASMReturnAddress(), NULL, 0, NULL);
934}
935
936
937RTDECL(void *) RTMemEfAllocNP(size_t cb, const char *pszTag) RT_NO_THROW_DEF
938{
939 return rtR0MemAlloc("Alloc", RTMEMTYPE_RTMEMALLOC, cb, cb, pszTag, ASMReturnAddress(), NULL, 0, NULL);
940}
941
942
943RTDECL(void *) RTMemEfAllocZNP(size_t cb, const char *pszTag) RT_NO_THROW_DEF
944{
945 return rtR0MemAlloc("AllocZ", RTMEMTYPE_RTMEMALLOCZ, cb, cb, pszTag, ASMReturnAddress(), NULL, 0, NULL);
946}
947
948
949RTDECL(void *) RTMemEfAllocVarNP(size_t cbUnaligned, const char *pszTag) RT_NO_THROW_DEF
950{
951 size_t cbAligned;
952 if (cbUnaligned >= 16)
953 cbAligned = RT_ALIGN_Z(cbUnaligned, 16);
954 else
955 cbAligned = RT_ALIGN_Z(cbUnaligned, sizeof(void *));
956 return rtR0MemAlloc("Alloc", RTMEMTYPE_RTMEMALLOC, cbUnaligned, cbAligned, pszTag, ASMReturnAddress(), NULL, 0, NULL);
957}
958
959
960RTDECL(void *) RTMemEfAllocZVarNP(size_t cbUnaligned, const char *pszTag) RT_NO_THROW_DEF
961{
962 size_t cbAligned;
963 if (cbUnaligned >= 16)
964 cbAligned = RT_ALIGN_Z(cbUnaligned, 16);
965 else
966 cbAligned = RT_ALIGN_Z(cbUnaligned, sizeof(void *));
967 return rtR0MemAlloc("AllocZ", RTMEMTYPE_RTMEMALLOCZ, cbUnaligned, cbAligned, pszTag, ASMReturnAddress(), NULL, 0, NULL);
968}
969
970
971RTDECL(void *) RTMemEfReallocNP(void *pvOld, size_t cbNew, const char *pszTag) RT_NO_THROW_DEF
972{
973 return rtR0MemRealloc("Realloc", RTMEMTYPE_RTMEMREALLOC, pvOld, cbNew, pszTag, ASMReturnAddress(), NULL, 0, NULL);
974}
975
976
977RTDECL(void *) RTMemEfReallocZNP(void *pvOld, size_t cbOld, size_t cbNew, const char *pszTag) RT_NO_THROW_DEF
978{
979 void *pvDst = rtR0MemRealloc("ReallocZ", RTMEMTYPE_RTMEMREALLOC, pvOld, cbNew, pszTag, ASMReturnAddress(), NULL, 0, NULL);
980 if (pvDst && cbNew > cbOld)
981 memset((uint8_t *)pvDst + cbOld, 0, cbNew - cbOld);
982 return pvDst;
983}
984
985
986RTDECL(void) RTMemEfFreeNP(void *pv) RT_NO_THROW_DEF
987{
988 if (pv)
989 rtR0MemFree("Free", RTMEMTYPE_RTMEMFREE, pv, 0, ASMReturnAddress(), NULL, 0, NULL);
990}
991
992
993RTDECL(void) RTMemEfFreeZNP(void *pv, size_t cb) RT_NO_THROW_DEF
994{
995 if (pv)
996 rtR0MemFree("FreeZ", RTMEMTYPE_RTMEMFREEZ, pv, cb, ASMReturnAddress(), NULL, 0, NULL);
997}
998
999
1000RTDECL(void *) RTMemEfDupNP(const void *pvSrc, size_t cb, const char *pszTag) RT_NO_THROW_DEF
1001{
1002 void *pvDst = RTMemEfAlloc(cb, pszTag, NULL, 0, NULL);
1003 if (pvDst)
1004 memcpy(pvDst, pvSrc, cb);
1005 return pvDst;
1006}
1007
1008
1009RTDECL(void *) RTMemEfDupExNP(const void *pvSrc, size_t cbSrc, size_t cbExtra, const char *pszTag) RT_NO_THROW_DEF
1010{
1011 void *pvDst = RTMemEfAlloc(cbSrc + cbExtra, pszTag, NULL, 0, NULL);
1012 if (pvDst)
1013 {
1014 memcpy(pvDst, pvSrc, cbSrc);
1015 memset((uint8_t *)pvDst + cbSrc, 0, cbExtra);
1016 }
1017 return pvDst;
1018}
1019
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette