VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/alloc-ef-r0drv.cpp@ 60067

Last change on this file since 60067 was 59747, checked in by vboxsync, 9 years ago

iprt/asm.h: Cleaned up the ASMMemIsAll8/U32 mess and implmeneted the former in assembly. (Found inverted usage due to bad naming in copyUtf8Block, but it is fortunately an unused method.) Replaces the complicated ASMBitFirstSet based scanning in RTSgBufIsZero with a simple call to the new ASMMemIsZero function.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 28.6 KB
Line 
1/* $Id: alloc-ef-r0drv.cpp 59747 2016-02-19 23:18:18Z vboxsync $ */
2/** @file
3 * IPRT - Memory Allocation, electric fence for ring-0 drivers.
4 */
5
6/*
7 * Copyright (C) 2006-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*********************************************************************************************************************************
29* Header Files *
30*********************************************************************************************************************************/
31#define RTMEM_NO_WRAP_TO_EF_APIS
32#include "internal/iprt.h"
33#include <iprt/mem.h>
34
35#include <iprt/alloc.h>
36#include <iprt/asm.h>
37#include <iprt/asm-amd64-x86.h>
38#include <iprt/assert.h>
39#include <iprt/err.h>
40#include <iprt/log.h>
41#include <iprt/memobj.h>
42#include <iprt/param.h>
43#include <iprt/string.h>
44#include <iprt/thread.h>
45
46#include "internal/mem.h"
47
48
49/*******************************************************************************
50* Defined Constants And Macros *
51*******************************************************************************/
52#if defined(DOXYGEN_RUNNING)
53# define RTR0MEM_EF_IN_FRONT
54#endif
55
56/** @def RTR0MEM_EF_SIZE
57 * The size of the fence. This must be page aligned.
58 */
59#define RTR0MEM_EF_SIZE PAGE_SIZE
60
61/** @def RTR0MEM_EF_ALIGNMENT
62 * The allocation alignment, power of two of course.
63 *
64 * Use this for working around misaligned sizes, usually stemming from
65 * allocating a string or something after the main structure. When you
66 * encounter this, please fix the allocation to RTMemAllocVar or RTMemAllocZVar.
67 */
68#if 0
69# define RTR0MEM_EF_ALIGNMENT (ARCH_BITS / 8)
70#else
71# define RTR0MEM_EF_ALIGNMENT 1
72#endif
73
74/** @def RTR0MEM_EF_IN_FRONT
75 * Define this to put the fence up in front of the block.
76 * The default (when this isn't defined) is to up it up after the block.
77 */
78//# define RTR0MEM_EF_IN_FRONT
79
80/** @def RTR0MEM_EF_FREE_DELAYED
81 * This define will enable free() delay and protection of the freed data
82 * while it's being delayed. The value of RTR0MEM_EF_FREE_DELAYED defines
83 * the threshold of the delayed blocks.
84 * Delayed blocks does not consume any physical memory, only virtual address space.
85 */
86#define RTR0MEM_EF_FREE_DELAYED (20 * _1M)
87
88/** @def RTR0MEM_EF_FREE_FILL
89 * This define will enable memset(,RTR0MEM_EF_FREE_FILL,)'ing the user memory
90 * in the block before freeing/decommitting it. This is useful in GDB since GDB
91 * appears to be able to read the content of the page even after it's been
92 * decommitted.
93 */
94#define RTR0MEM_EF_FREE_FILL 'f'
95
96/** @def RTR0MEM_EF_FILLER
97 * This define will enable memset(,RTR0MEM_EF_FILLER,)'ing the allocated
98 * memory when the API doesn't require it to be zero'd.
99 */
100#define RTR0MEM_EF_FILLER 0xef
101
102/** @def RTR0MEM_EF_NOMAN_FILLER
103 * This define will enable memset(,RTR0MEM_EF_NOMAN_FILLER,)'ing the
104 * unprotected but not allocated area of memory, the so called no man's land.
105 */
106#define RTR0MEM_EF_NOMAN_FILLER 0xaa
107
108/** @def RTR0MEM_EF_FENCE_FILLER
109 * This define will enable memset(,RTR0MEM_EF_FENCE_FILLER,)'ing the
110 * fence itself, as debuggers can usually read them.
111 */
112#define RTR0MEM_EF_FENCE_FILLER 0xcc
113
114
115/*******************************************************************************
116* Header Files *
117*******************************************************************************/
118#ifdef RT_OS_WINDOWS
119# include <Windows.h>
120#else
121# include <sys/mman.h>
122#endif
123#include <iprt/avl.h>
124#include <iprt/thread.h>
125
126
127/*******************************************************************************
128* Structures and Typedefs *
129*******************************************************************************/
130/**
131 * Allocation types.
132 */
133typedef enum RTMEMTYPE
134{
135 RTMEMTYPE_RTMEMALLOC,
136 RTMEMTYPE_RTMEMALLOCZ,
137 RTMEMTYPE_RTMEMREALLOC,
138 RTMEMTYPE_RTMEMFREE,
139
140 RTMEMTYPE_NEW,
141 RTMEMTYPE_NEW_ARRAY,
142 RTMEMTYPE_DELETE,
143 RTMEMTYPE_DELETE_ARRAY
144} RTMEMTYPE;
145
146/**
147 * Node tracking a memory allocation.
148 */
149typedef struct RTR0MEMEFBLOCK
150{
151 /** Avl node code, key is the user block pointer. */
152 AVLPVNODECORE Core;
153 /** Allocation type. */
154 RTMEMTYPE enmType;
155 /** The memory object. */
156 RTR0MEMOBJ hMemObj;
157 /** The unaligned size of the block. */
158 size_t cbUnaligned;
159 /** The aligned size of the block. */
160 size_t cbAligned;
161 /** The allocation tag (read-only string). */
162 const char *pszTag;
163 /** The return address of the allocator function. */
164 void *pvCaller;
165 /** Line number of the alloc call. */
166 unsigned iLine;
167 /** File from within the allocation was made. */
168 const char *pszFile;
169 /** Function from within the allocation was made. */
170 const char *pszFunction;
171} RTR0MEMEFBLOCK, *PRTR0MEMEFBLOCK;
172
173
174
175/*********************************************************************************************************************************
176* Global Variables *
177*********************************************************************************************************************************/
178/** Spinlock protecting the all the block's globals. */
179static volatile uint32_t g_BlocksLock;
180/** Tree tracking the allocations. */
181static AVLPVTREE g_BlocksTree;
182
183#ifdef RTR0MEM_EF_FREE_DELAYED
184/** Tail of the delayed blocks. */
185static volatile PRTR0MEMEFBLOCK g_pBlocksDelayHead;
186/** Tail of the delayed blocks. */
187static volatile PRTR0MEMEFBLOCK g_pBlocksDelayTail;
188/** Number of bytes in the delay list (includes fences). */
189static volatile size_t g_cbBlocksDelay;
190#endif /* RTR0MEM_EF_FREE_DELAYED */
191
192/** Array of pointers free watches for. */
193void *gapvRTMemFreeWatch[4] = {NULL, NULL, NULL, NULL};
194/** Enable logging of all freed memory. */
195bool gfRTMemFreeLog = false;
196
197
198/*********************************************************************************************************************************
199* Internal Functions *
200*********************************************************************************************************************************/
201
202
203/**
204 * @callback_method_impl{FNRTSTROUTPUT}
205 */
206static DECLCALLBACK(size_t) rtR0MemEfWrite(void *pvArg, const char *pachChars, size_t cbChars)
207{
208 if (cbChars)
209 {
210 RTLogWriteDebugger(pachChars, cbChars);
211 RTLogWriteStdOut(pachChars, cbChars);
212 RTLogWriteUser(pachChars, cbChars);
213 }
214 return cbChars;
215}
216
217
218/**
219 * Complains about something.
220 */
221static void rtR0MemComplain(const char *pszOp, const char *pszFormat, ...)
222{
223 va_list args;
224 RTStrFormat(rtR0MemEfWrite, NULL, NULL, NULL, "RTMem error: %s: ", pszOp);
225 va_start(args, pszFormat);
226 RTStrFormatV(rtR0MemEfWrite, NULL, NULL, NULL, pszFormat, args);
227 va_end(args);
228 RTAssertDoPanic();
229}
230
231/**
232 * Log an event.
233 */
234DECLINLINE(void) rtR0MemLog(const char *pszOp, const char *pszFormat, ...)
235{
236#if 0
237 va_list args;
238 RTStrFormat(rtR0MemEfWrite, NULL, NULL, NULL, "RTMem info: %s: ", pszOp);
239 va_start(args, pszFormat);
240 RTStrFormatV(rtR0MemEfWrite, NULL, NULL, NULL, pszFormat, args);
241 va_end(args);
242#else
243 NOREF(pszOp); NOREF(pszFormat);
244#endif
245}
246
247
248
249/**
250 * Acquires the lock.
251 */
252DECLINLINE(RTCCUINTREG) rtR0MemBlockLock(void)
253{
254 RTCCUINTREG uRet;
255 unsigned c = 0;
256 if (RTThreadPreemptIsEnabled(NIL_RTTHREAD))
257 {
258 for (;;)
259 {
260 uRet = ASMIntDisableFlags();
261 if (ASMAtomicCmpXchgU32(&g_BlocksLock, 1, 0))
262 break;
263 ASMSetFlags(uRet);
264 RTThreadSleepNoLog(((++c) >> 2) & 31);
265 }
266 }
267 else
268 {
269 for (;;)
270 {
271 uRet = ASMIntDisableFlags();
272 if (ASMAtomicCmpXchgU32(&g_BlocksLock, 1, 0))
273 break;
274 ASMSetFlags(uRet);
275 ASMNopPause();
276 if (++c & 3)
277 ASMNopPause();
278 }
279 }
280 return uRet;
281}
282
283
284/**
285 * Releases the lock.
286 */
287DECLINLINE(void) rtR0MemBlockUnlock(RTCCUINTREG fSavedIntFlags)
288{
289 Assert(g_BlocksLock == 1);
290 ASMAtomicXchgU32(&g_BlocksLock, 0);
291 ASMSetFlags(fSavedIntFlags);
292}
293
294
295/**
296 * Creates a block.
297 */
298DECLINLINE(PRTR0MEMEFBLOCK) rtR0MemBlockCreate(RTMEMTYPE enmType, size_t cbUnaligned, size_t cbAligned,
299 const char *pszTag, void *pvCaller, RT_SRC_POS_DECL)
300{
301 PRTR0MEMEFBLOCK pBlock = (PRTR0MEMEFBLOCK)RTMemAlloc(sizeof(*pBlock));
302 if (pBlock)
303 {
304 pBlock->enmType = enmType;
305 pBlock->cbUnaligned = cbUnaligned;
306 pBlock->cbAligned = cbAligned;
307 pBlock->pszTag = pszTag;
308 pBlock->pvCaller = pvCaller;
309 pBlock->iLine = iLine;
310 pBlock->pszFile = pszFile;
311 pBlock->pszFunction = pszFunction;
312 }
313 return pBlock;
314}
315
316
317/**
318 * Frees a block.
319 */
320DECLINLINE(void) rtR0MemBlockFree(PRTR0MEMEFBLOCK pBlock)
321{
322 RTMemFree(pBlock);
323}
324
325
326/**
327 * Insert a block from the tree.
328 */
329DECLINLINE(void) rtR0MemBlockInsert(PRTR0MEMEFBLOCK pBlock, void *pv, RTR0MEMOBJ hMemObj)
330{
331 pBlock->Core.Key = pv;
332 pBlock->hMemObj = hMemObj;
333 RTCCUINTREG fSavedIntFlags = rtR0MemBlockLock();
334 bool fRc = RTAvlPVInsert(&g_BlocksTree, &pBlock->Core);
335 rtR0MemBlockUnlock(fSavedIntFlags);
336 AssertRelease(fRc);
337}
338
339
340/**
341 * Remove a block from the tree and returns it to the caller.
342 */
343DECLINLINE(PRTR0MEMEFBLOCK) rtR0MemBlockRemove(void *pv)
344{
345 RTCCUINTREG fSavedIntFlags = rtR0MemBlockLock();
346 PRTR0MEMEFBLOCK pBlock = (PRTR0MEMEFBLOCK)RTAvlPVRemove(&g_BlocksTree, pv);
347 rtR0MemBlockUnlock(fSavedIntFlags);
348 return pBlock;
349}
350
351
352/**
353 * Gets a block.
354 */
355DECLINLINE(PRTR0MEMEFBLOCK) rtR0MemBlockGet(void *pv)
356{
357 RTCCUINTREG fSavedIntFlags = rtR0MemBlockLock();
358 PRTR0MEMEFBLOCK pBlock = (PRTR0MEMEFBLOCK)RTAvlPVGet(&g_BlocksTree, pv);
359 rtR0MemBlockUnlock(fSavedIntFlags);
360 return pBlock;
361}
362
363
364/**
365 * Dumps one allocation.
366 */
367static DECLCALLBACK(int) RTMemDumpOne(PAVLPVNODECORE pNode, void *pvUser)
368{
369 PRTR0MEMEFBLOCK pBlock = (PRTR0MEMEFBLOCK)pNode;
370 RTStrFormat(rtR0MemEfWrite, NULL, NULL, NULL, "%p %08lx(+%02lx) %p\n",
371 pBlock->Core.Key,
372 (unsigned long)pBlock->cbUnaligned,
373 (unsigned long)(pBlock->cbAligned - pBlock->cbUnaligned),
374 pBlock->pvCaller);
375 NOREF(pvUser);
376 return 0;
377}
378
379
380/**
381 * Dumps the allocated blocks.
382 * This is something which you should call from gdb.
383 */
384extern "C" void RTMemDump(void);
385void RTMemDump(void)
386{
387 RTStrFormat(rtR0MemEfWrite, NULL, NULL, NULL, "address size(alg) caller\n");
388 RTAvlPVDoWithAll(&g_BlocksTree, true, RTMemDumpOne, NULL);
389}
390
391#ifdef RTR0MEM_EF_FREE_DELAYED
392
393/**
394 * Insert a delayed block.
395 */
396DECLINLINE(void) rtR0MemBlockDelayInsert(PRTR0MEMEFBLOCK pBlock)
397{
398 size_t cbBlock = RT_ALIGN_Z(pBlock->cbAligned, PAGE_SIZE) + RTR0MEM_EF_SIZE;
399 pBlock->Core.pRight = NULL;
400 pBlock->Core.pLeft = NULL;
401 RTCCUINTREG fSavedIntFlags = rtR0MemBlockLock();
402 if (g_pBlocksDelayHead)
403 {
404 g_pBlocksDelayHead->Core.pLeft = (PAVLPVNODECORE)pBlock;
405 pBlock->Core.pRight = (PAVLPVNODECORE)g_pBlocksDelayHead;
406 g_pBlocksDelayHead = pBlock;
407 }
408 else
409 {
410 g_pBlocksDelayTail = pBlock;
411 g_pBlocksDelayHead = pBlock;
412 }
413 g_cbBlocksDelay += cbBlock;
414 rtR0MemBlockUnlock(fSavedIntFlags);
415}
416
417/**
418 * Removes a delayed block.
419 */
420DECLINLINE(PRTR0MEMEFBLOCK) rtR0MemBlockDelayRemove(void)
421{
422 PRTR0MEMEFBLOCK pBlock = NULL;
423 RTCCUINTREG fSavedIntFlags = rtR0MemBlockLock();
424 if (g_cbBlocksDelay > RTR0MEM_EF_FREE_DELAYED)
425 {
426 pBlock = g_pBlocksDelayTail;
427 if (pBlock)
428 {
429 g_pBlocksDelayTail = (PRTR0MEMEFBLOCK)pBlock->Core.pLeft;
430 if (pBlock->Core.pLeft)
431 pBlock->Core.pLeft->pRight = NULL;
432 else
433 g_pBlocksDelayHead = NULL;
434 g_cbBlocksDelay -= RT_ALIGN_Z(pBlock->cbAligned, PAGE_SIZE) + RTR0MEM_EF_SIZE;
435 }
436 }
437 rtR0MemBlockUnlock(fSavedIntFlags);
438 return pBlock;
439}
440
441#endif /* RTR0MEM_EF_FREE_DELAYED */
442
443
444static void rtR0MemFreeBlock(PRTR0MEMEFBLOCK pBlock, const char *pszOp)
445{
446 void *pv = pBlock->Core.Key;
447# ifdef RTR0MEM_EF_IN_FRONT
448 void *pvBlock = (char *)pv - RTR0MEM_EF_SIZE;
449# else
450 void *pvBlock = (void *)((uintptr_t)pv & ~(uintptr_t)PAGE_OFFSET_MASK);
451# endif
452 size_t cbBlock = RT_ALIGN_Z(pBlock->cbAligned, PAGE_SIZE) + RTR0MEM_EF_SIZE;
453
454 int rc = RTR0MemObjProtect(pBlock->hMemObj, 0 /*offSub*/, RT_ALIGN_Z(cbBlock, PAGE_SIZE), RTMEM_PROT_READ | RTMEM_PROT_WRITE);
455 if (RT_FAILURE(rc))
456 rtR0MemComplain(pszOp, "RTR0MemObjProtect([%p], 0, %#x, RTMEM_PROT_READ | RTMEM_PROT_WRITE) -> %Rrc\n",
457 pvBlock, cbBlock, rc);
458
459 rc = RTR0MemObjFree(pBlock->hMemObj, true /*fFreeMappings*/);
460 if (RT_FAILURE(rc))
461 rtR0MemComplain(pszOp, "RTR0MemObjFree([%p LB %#x]) -> %Rrc\n", pvBlock, cbBlock, rc);
462 pBlock->hMemObj = NIL_RTR0MEMOBJ;
463
464 rtR0MemBlockFree(pBlock);
465}
466
467
468/**
469 * Initialize call, we shouldn't fail here.
470 */
471void rtR0MemEfInit(void)
472{
473
474}
475
476/**
477 * @callback_method_impl{AVLPVCALLBACK}
478 */
479static DECLCALLBACK(int) rtR0MemEfDestroyBlock(PAVLPVNODECORE pNode, void *pvUser)
480{
481 PRTR0MEMEFBLOCK pBlock = (PRTR0MEMEFBLOCK)pNode;
482
483 /* Note! pszFile and pszFunction may be invalid at this point. */
484 rtR0MemComplain("rtR0MemEfDestroyBlock", "Leaking %zu bytes at %p (iLine=%u pvCaller=%p)\n",
485 pBlock->cbAligned, pBlock->Core.Key, pBlock->iLine, pBlock->pvCaller);
486
487 rtR0MemFreeBlock(pBlock, "rtR0MemEfDestroyBlock");
488
489 NOREF(pvUser);
490 return VINF_SUCCESS;
491}
492
493
494/**
495 * Termination call.
496 *
497 * Will check and free memory.
498 */
499void rtR0MemEfTerm(void)
500{
501#ifdef RTR0MEM_EF_FREE_DELAYED
502 /*
503 * Release delayed frees.
504 */
505 RTCCUINTREG fSavedIntFlags = rtR0MemBlockLock();
506 for (;;)
507 {
508 PRTR0MEMEFBLOCK pBlock = g_pBlocksDelayTail;
509 if (pBlock)
510 {
511 g_pBlocksDelayTail = (PRTR0MEMEFBLOCK)pBlock->Core.pLeft;
512 if (pBlock->Core.pLeft)
513 pBlock->Core.pLeft->pRight = NULL;
514 else
515 g_pBlocksDelayHead = NULL;
516 rtR0MemBlockUnlock(fSavedIntFlags);
517
518 rtR0MemFreeBlock(pBlock, "rtR0MemEfTerm");
519
520 rtR0MemBlockLock();
521 }
522 else
523 break;
524 }
525 g_cbBlocksDelay = 0;
526 rtR0MemBlockUnlock(fSavedIntFlags);
527#endif
528
529 /*
530 * Complain about leaks. Then release them.
531 */
532 RTAvlPVDestroy(&g_BlocksTree, rtR0MemEfDestroyBlock, NULL);
533}
534
535
536/**
537 * Internal allocator.
538 */
539static void * rtR0MemAlloc(const char *pszOp, RTMEMTYPE enmType, size_t cbUnaligned, size_t cbAligned,
540 const char *pszTag, void *pvCaller, RT_SRC_POS_DECL)
541{
542 /*
543 * Sanity.
544 */
545 if ( RT_ALIGN_Z(RTR0MEM_EF_SIZE, PAGE_SIZE) != RTR0MEM_EF_SIZE
546 && RTR0MEM_EF_SIZE <= 0)
547 {
548 rtR0MemComplain(pszOp, "Invalid E-fence size! %#x\n", RTR0MEM_EF_SIZE);
549 return NULL;
550 }
551 if (!cbUnaligned)
552 {
553#if 1
554 rtR0MemComplain(pszOp, "Request of ZERO bytes allocation!\n");
555 return NULL;
556#else
557 cbAligned = cbUnaligned = 1;
558#endif
559 }
560
561#ifndef RTR0MEM_EF_IN_FRONT
562 /* Alignment decreases fence accuracy, but this is at least partially
563 * counteracted by filling and checking the alignment padding. When the
564 * fence is in front then then no extra alignment is needed. */
565 cbAligned = RT_ALIGN_Z(cbAligned, RTR0MEM_EF_ALIGNMENT);
566#endif
567
568 /*
569 * Allocate the trace block.
570 */
571 PRTR0MEMEFBLOCK pBlock = rtR0MemBlockCreate(enmType, cbUnaligned, cbAligned, pszTag, pvCaller, RT_SRC_POS_ARGS);
572 if (!pBlock)
573 {
574 rtR0MemComplain(pszOp, "Failed to allocate trace block!\n");
575 return NULL;
576 }
577
578 /*
579 * Allocate a block with page alignment space + the size of the E-fence.
580 */
581 void *pvBlock = NULL;
582 RTR0MEMOBJ hMemObj;
583 size_t cbBlock = RT_ALIGN_Z(cbAligned, PAGE_SIZE) + RTR0MEM_EF_SIZE;
584 int rc = RTR0MemObjAllocPage(&hMemObj, cbBlock, false /*fExecutable*/);
585 if (RT_SUCCESS(rc))
586 pvBlock = RTR0MemObjAddress(hMemObj);
587 if (pvBlock)
588 {
589 /*
590 * Calc the start of the fence and the user block
591 * and then change the page protection of the fence.
592 */
593#ifdef RTR0MEM_EF_IN_FRONT
594 void *pvEFence = pvBlock;
595 void *pv = (char *)pvEFence + RTR0MEM_EF_SIZE;
596# ifdef RTR0MEM_EF_NOMAN_FILLER
597 memset((char *)pv + cbUnaligned, RTR0MEM_EF_NOMAN_FILLER, cbBlock - RTR0MEM_EF_SIZE - cbUnaligned);
598# endif
599#else
600 void *pvEFence = (char *)pvBlock + (cbBlock - RTR0MEM_EF_SIZE);
601 void *pv = (char *)pvEFence - cbAligned;
602# ifdef RTR0MEM_EF_NOMAN_FILLER
603 memset(pvBlock, RTR0MEM_EF_NOMAN_FILLER, cbBlock - RTR0MEM_EF_SIZE - cbAligned);
604 memset((char *)pv + cbUnaligned, RTR0MEM_EF_NOMAN_FILLER, cbAligned - cbUnaligned);
605# endif
606#endif
607
608#ifdef RTR0MEM_EF_FENCE_FILLER
609 memset(pvEFence, RTR0MEM_EF_FENCE_FILLER, RTR0MEM_EF_SIZE);
610#endif
611 rc = RTR0MemObjProtect(hMemObj, (uint8_t *)pvEFence - (uint8_t *)pvBlock, RTR0MEM_EF_SIZE, RTMEM_PROT_NONE);
612 if (!rc)
613 {
614 rtR0MemBlockInsert(pBlock, pv, hMemObj);
615 if (enmType == RTMEMTYPE_RTMEMALLOCZ)
616 memset(pv, 0, cbUnaligned);
617#ifdef RTR0MEM_EF_FILLER
618 else
619 memset(pv, RTR0MEM_EF_FILLER, cbUnaligned);
620#endif
621
622 rtR0MemLog(pszOp, "returns %p (pvBlock=%p cbBlock=%#x pvEFence=%p cbUnaligned=%#x)\n", pv, pvBlock, cbBlock, pvEFence, cbUnaligned);
623 return pv;
624 }
625 rtR0MemComplain(pszOp, "RTMemProtect failed, pvEFence=%p size %d, rc=%d\n", pvEFence, RTR0MEM_EF_SIZE, rc);
626 RTR0MemObjFree(hMemObj, true /*fFreeMappings*/);
627 }
628 else
629 {
630 rtR0MemComplain(pszOp, "Failed to allocated %zu (%zu) bytes (rc=%Rrc).\n", cbBlock, cbUnaligned, rc);
631 if (RT_SUCCESS(rc))
632 RTR0MemObjFree(hMemObj, true /*fFreeMappings*/);
633 }
634
635 rtR0MemBlockFree(pBlock);
636 return NULL;
637}
638
639
640/**
641 * Internal free.
642 */
643static void rtR0MemFree(const char *pszOp, RTMEMTYPE enmType, void *pv, void *pvCaller, RT_SRC_POS_DECL)
644{
645 NOREF(enmType); RT_SRC_POS_NOREF();
646
647 /*
648 * Simple case.
649 */
650 if (!pv)
651 return;
652
653 /*
654 * Check watch points.
655 */
656 for (unsigned i = 0; i < RT_ELEMENTS(gapvRTMemFreeWatch); i++)
657 if (gapvRTMemFreeWatch[i] == pv)
658 RTAssertDoPanic();
659
660 /*
661 * Find the block.
662 */
663 PRTR0MEMEFBLOCK pBlock = rtR0MemBlockRemove(pv);
664 if (pBlock)
665 {
666 if (gfRTMemFreeLog)
667 RTLogPrintf("RTMem %s: pv=%p pvCaller=%p cbUnaligned=%#x\n", pszOp, pv, pvCaller, pBlock->cbUnaligned);
668
669#ifdef RTR0MEM_EF_NOMAN_FILLER
670 /*
671 * Check whether the no man's land is untouched.
672 */
673# ifdef RTR0MEM_EF_IN_FRONT
674 void *pvWrong = ASMMemFirstMismatchingU8((char *)pv + pBlock->cbUnaligned,
675 RT_ALIGN_Z(pBlock->cbAligned, PAGE_SIZE) - pBlock->cbUnaligned,
676 RTR0MEM_EF_NOMAN_FILLER);
677# else
678 /* Alignment must match allocation alignment in rtMemAlloc(). */
679 void *pvWrong = ASMMemFirstMismatchingU8((char *)pv + pBlock->cbUnaligned,
680 pBlock->cbAligned - pBlock->cbUnaligned,
681 RTR0MEM_EF_NOMAN_FILLER);
682 if (pvWrong)
683 RTAssertDoPanic();
684 pvWrong = ASMMemFirstMismatchingU8((void *)((uintptr_t)pv & ~(uintptr_t)PAGE_OFFSET_MASK),
685 RT_ALIGN_Z(pBlock->cbAligned, PAGE_SIZE) - pBlock->cbAligned,
686 RTR0MEM_EF_NOMAN_FILLER);
687# endif
688 if (pvWrong)
689 RTAssertDoPanic();
690#endif
691
692#ifdef RTR0MEM_EF_FREE_FILL
693 /*
694 * Fill the user part of the block.
695 */
696 memset(pv, RTR0MEM_EF_FREE_FILL, pBlock->cbUnaligned);
697#endif
698
699#if defined(RTR0MEM_EF_FREE_DELAYED) && RTR0MEM_EF_FREE_DELAYED > 0
700 /*
701 * We're doing delayed freeing.
702 * That means we'll expand the E-fence to cover the entire block.
703 */
704 int rc = RTR0MemObjProtect(pBlock->hMemObj,
705# ifdef RTR0MEM_EF_IN_FRONT
706 RTR0MEM_EF_SIZE,
707# else
708 0 /*offSub*/,
709# endif
710 RT_ALIGN_Z(pBlock->cbAligned, PAGE_SIZE),
711 RTMEM_PROT_NONE);
712 if (RT_SUCCESS(rc))
713 {
714 /*
715 * Insert it into the free list and process pending frees.
716 */
717 rtR0MemBlockDelayInsert(pBlock);
718 while ((pBlock = rtR0MemBlockDelayRemove()) != NULL)
719 rtR0MemFreeBlock(pBlock, pszOp);
720 }
721 else
722 rtR0MemComplain(pszOp, "Failed to expand the efence of pv=%p cb=%d, rc=%d.\n", pv, pBlock, rc);
723
724#else /* !RTR0MEM_EF_FREE_DELAYED */
725 rtR0MemFreeBlock(pBlock, pszOp);
726#endif /* !RTR0MEM_EF_FREE_DELAYED */
727 }
728 else
729 rtR0MemComplain(pszOp, "pv=%p not found! Incorrect free!\n", pv);
730}
731
732
733/**
734 * Internal realloc.
735 */
736static void *rtR0MemRealloc(const char *pszOp, RTMEMTYPE enmType, void *pvOld, size_t cbNew,
737 const char *pszTag, void *pvCaller, RT_SRC_POS_DECL)
738{
739 /*
740 * Allocate new and copy.
741 */
742 if (!pvOld)
743 return rtR0MemAlloc(pszOp, enmType, cbNew, cbNew, pszTag, pvCaller, RT_SRC_POS_ARGS);
744 if (!cbNew)
745 {
746 rtR0MemFree(pszOp, RTMEMTYPE_RTMEMREALLOC, pvOld, pvCaller, RT_SRC_POS_ARGS);
747 return NULL;
748 }
749
750 /*
751 * Get the block, allocate the new, copy the data, free the old one.
752 */
753 PRTR0MEMEFBLOCK pBlock = rtR0MemBlockGet(pvOld);
754 if (pBlock)
755 {
756 void *pvRet = rtR0MemAlloc(pszOp, enmType, cbNew, cbNew, pszTag, pvCaller, RT_SRC_POS_ARGS);
757 if (pvRet)
758 {
759 memcpy(pvRet, pvOld, RT_MIN(cbNew, pBlock->cbUnaligned));
760 rtR0MemFree(pszOp, RTMEMTYPE_RTMEMREALLOC, pvOld, pvCaller, RT_SRC_POS_ARGS);
761 }
762 return pvRet;
763 }
764 rtR0MemComplain(pszOp, "pvOld=%p was not found!\n", pvOld);
765 return NULL;
766}
767
768
769
770
771RTDECL(void *) RTMemEfTmpAlloc(size_t cb, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW_DEF
772{
773 return rtR0MemAlloc("TmpAlloc", RTMEMTYPE_RTMEMALLOC, cb, cb, pszTag, ASMReturnAddress(), RT_SRC_POS_ARGS);
774}
775
776
777RTDECL(void *) RTMemEfTmpAllocZ(size_t cb, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW_DEF
778{
779 return rtR0MemAlloc("TmpAlloc", RTMEMTYPE_RTMEMALLOCZ, cb, cb, pszTag, ASMReturnAddress(), RT_SRC_POS_ARGS);
780}
781
782
783RTDECL(void) RTMemEfTmpFree(void *pv, RT_SRC_POS_DECL) RT_NO_THROW_DEF
784{
785 if (pv)
786 rtR0MemFree("Free", RTMEMTYPE_RTMEMFREE, pv, ASMReturnAddress(), RT_SRC_POS_ARGS);
787}
788
789
790RTDECL(void *) RTMemEfAlloc(size_t cb, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW_DEF
791{
792 return rtR0MemAlloc("Alloc", RTMEMTYPE_RTMEMALLOC, cb, cb, pszTag, ASMReturnAddress(), RT_SRC_POS_ARGS);
793}
794
795
796RTDECL(void *) RTMemEfAllocZ(size_t cb, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW_DEF
797{
798 return rtR0MemAlloc("AllocZ", RTMEMTYPE_RTMEMALLOCZ, cb, cb, pszTag, ASMReturnAddress(), RT_SRC_POS_ARGS);
799}
800
801
802RTDECL(void *) RTMemEfAllocVar(size_t cbUnaligned, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW_DEF
803{
804 size_t cbAligned;
805 if (cbUnaligned >= 16)
806 cbAligned = RT_ALIGN_Z(cbUnaligned, 16);
807 else
808 cbAligned = RT_ALIGN_Z(cbUnaligned, sizeof(void *));
809 return rtR0MemAlloc("Alloc", RTMEMTYPE_RTMEMALLOC, cbUnaligned, cbAligned, pszTag, ASMReturnAddress(), RT_SRC_POS_ARGS);
810}
811
812
813RTDECL(void *) RTMemEfAllocZVar(size_t cbUnaligned, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW_DEF
814{
815 size_t cbAligned;
816 if (cbUnaligned >= 16)
817 cbAligned = RT_ALIGN_Z(cbUnaligned, 16);
818 else
819 cbAligned = RT_ALIGN_Z(cbUnaligned, sizeof(void *));
820 return rtR0MemAlloc("AllocZ", RTMEMTYPE_RTMEMALLOCZ, cbUnaligned, cbAligned, pszTag, ASMReturnAddress(), RT_SRC_POS_ARGS);
821}
822
823
824RTDECL(void *) RTMemEfRealloc(void *pvOld, size_t cbNew, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW_DEF
825{
826 return rtR0MemRealloc("Realloc", RTMEMTYPE_RTMEMREALLOC, pvOld, cbNew, pszTag, ASMReturnAddress(), RT_SRC_POS_ARGS);
827}
828
829
830RTDECL(void) RTMemEfFree(void *pv, RT_SRC_POS_DECL) RT_NO_THROW_DEF
831{
832 if (pv)
833 rtR0MemFree("Free", RTMEMTYPE_RTMEMFREE, pv, ASMReturnAddress(), RT_SRC_POS_ARGS);
834}
835
836
837RTDECL(void *) RTMemEfDup(const void *pvSrc, size_t cb, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW_DEF
838{
839 void *pvDst = RTMemEfAlloc(cb, pszTag, RT_SRC_POS_ARGS);
840 if (pvDst)
841 memcpy(pvDst, pvSrc, cb);
842 return pvDst;
843}
844
845
846RTDECL(void *) RTMemEfDupEx(const void *pvSrc, size_t cbSrc, size_t cbExtra, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW_DEF
847{
848 void *pvDst = RTMemEfAlloc(cbSrc + cbExtra, pszTag, RT_SRC_POS_ARGS);
849 if (pvDst)
850 {
851 memcpy(pvDst, pvSrc, cbSrc);
852 memset((uint8_t *)pvDst + cbSrc, 0, cbExtra);
853 }
854 return pvDst;
855}
856
857
858
859
860/*
861 *
862 * The NP (no position) versions.
863 *
864 */
865
866
867
868RTDECL(void *) RTMemEfTmpAllocNP(size_t cb, const char *pszTag) RT_NO_THROW_DEF
869{
870 return rtR0MemAlloc("TmpAlloc", RTMEMTYPE_RTMEMALLOC, cb, cb, pszTag, ASMReturnAddress(), NULL, 0, NULL);
871}
872
873
874RTDECL(void *) RTMemEfTmpAllocZNP(size_t cb, const char *pszTag) RT_NO_THROW_DEF
875{
876 return rtR0MemAlloc("TmpAllocZ", RTMEMTYPE_RTMEMALLOCZ, cb, cb, pszTag, ASMReturnAddress(), NULL, 0, NULL);
877}
878
879
880RTDECL(void) RTMemEfTmpFreeNP(void *pv) RT_NO_THROW_DEF
881{
882 if (pv)
883 rtR0MemFree("Free", RTMEMTYPE_RTMEMFREE, pv, ASMReturnAddress(), NULL, 0, NULL);
884}
885
886
887RTDECL(void *) RTMemEfAllocNP(size_t cb, const char *pszTag) RT_NO_THROW_DEF
888{
889 return rtR0MemAlloc("Alloc", RTMEMTYPE_RTMEMALLOC, cb, cb, pszTag, ASMReturnAddress(), NULL, 0, NULL);
890}
891
892
893RTDECL(void *) RTMemEfAllocZNP(size_t cb, const char *pszTag) RT_NO_THROW_DEF
894{
895 return rtR0MemAlloc("AllocZ", RTMEMTYPE_RTMEMALLOCZ, cb, cb, pszTag, ASMReturnAddress(), NULL, 0, NULL);
896}
897
898
899RTDECL(void *) RTMemEfAllocVarNP(size_t cbUnaligned, const char *pszTag) RT_NO_THROW_DEF
900{
901 size_t cbAligned;
902 if (cbUnaligned >= 16)
903 cbAligned = RT_ALIGN_Z(cbUnaligned, 16);
904 else
905 cbAligned = RT_ALIGN_Z(cbUnaligned, sizeof(void *));
906 return rtR0MemAlloc("Alloc", RTMEMTYPE_RTMEMALLOC, cbUnaligned, cbAligned, pszTag, ASMReturnAddress(), NULL, 0, NULL);
907}
908
909
910RTDECL(void *) RTMemEfAllocZVarNP(size_t cbUnaligned, const char *pszTag) RT_NO_THROW_DEF
911{
912 size_t cbAligned;
913 if (cbUnaligned >= 16)
914 cbAligned = RT_ALIGN_Z(cbUnaligned, 16);
915 else
916 cbAligned = RT_ALIGN_Z(cbUnaligned, sizeof(void *));
917 return rtR0MemAlloc("AllocZ", RTMEMTYPE_RTMEMALLOCZ, cbUnaligned, cbAligned, pszTag, ASMReturnAddress(), NULL, 0, NULL);
918}
919
920
921RTDECL(void *) RTMemEfReallocNP(void *pvOld, size_t cbNew, const char *pszTag) RT_NO_THROW_DEF
922{
923 return rtR0MemRealloc("Realloc", RTMEMTYPE_RTMEMREALLOC, pvOld, cbNew, pszTag, ASMReturnAddress(), NULL, 0, NULL);
924}
925
926
927RTDECL(void) RTMemEfFreeNP(void *pv) RT_NO_THROW_DEF
928{
929 if (pv)
930 rtR0MemFree("Free", RTMEMTYPE_RTMEMFREE, pv, ASMReturnAddress(), NULL, 0, NULL);
931}
932
933
934RTDECL(void *) RTMemEfDupNP(const void *pvSrc, size_t cb, const char *pszTag) RT_NO_THROW_DEF
935{
936 void *pvDst = RTMemEfAlloc(cb, pszTag, NULL, 0, NULL);
937 if (pvDst)
938 memcpy(pvDst, pvSrc, cb);
939 return pvDst;
940}
941
942
943RTDECL(void *) RTMemEfDupExNP(const void *pvSrc, size_t cbSrc, size_t cbExtra, const char *pszTag) RT_NO_THROW_DEF
944{
945 void *pvDst = RTMemEfAlloc(cbSrc + cbExtra, pszTag, NULL, 0, NULL);
946 if (pvDst)
947 {
948 memcpy(pvDst, pvSrc, cbSrc);
949 memset((uint8_t *)pvDst + cbSrc, 0, cbExtra);
950 }
951 return pvDst;
952}
953
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette