VirtualBox

source: vbox/trunk/src/VBox/Runtime/r3/memsafer-r3.cpp@ 103416

Last change on this file since 103416 was 100442, checked in by vboxsync, 16 months ago

IPRT,OpenSSL: Support ECDSA for verficiation purposes when IPRT links with OpenSSL. This required quite a bit of cleanups, so not entirely no-risk. bugref:10479 ticketref:21621

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 22.9 KB
Line 
1/* $Id: memsafer-r3.cpp 100442 2023-07-08 11:10:51Z vboxsync $ */
2/** @file
3 * IPRT - Memory Allocate for Sensitive Data, generic heap-based implementation.
4 */
5
6/*
7 * Copyright (C) 2006-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * The contents of this file may alternatively be used under the terms
26 * of the Common Development and Distribution License Version 1.0
27 * (CDDL), a copy of it is provided in the "COPYING.CDDL" file included
28 * in the VirtualBox distribution, in which case the provisions of the
29 * CDDL are applicable instead of those of the GPL.
30 *
31 * You may elect to license modified versions of this file under the
32 * terms and conditions of either the GPL or the CDDL or both.
33 *
34 * SPDX-License-Identifier: GPL-3.0-only OR CDDL-1.0
35 */
36
37
38/*********************************************************************************************************************************
39* Header Files *
40*********************************************************************************************************************************/
41#include "internal/iprt.h"
42#include <iprt/memsafer.h>
43
44#include <iprt/asm.h>
45#include <iprt/assert.h>
46#include <iprt/avl.h>
47#include <iprt/critsect.h>
48#include <iprt/err.h>
49#include <iprt/mem.h>
50#include <iprt/once.h>
51#include <iprt/rand.h>
52#include <iprt/param.h>
53#include <iprt/string.h>
54#include <iprt/system.h>
55#ifdef IN_SUP_R3
56# include <VBox/sup.h>
57#endif
58
59
60/*********************************************************************************************************************************
61* Defined Constants And Macros *
62*********************************************************************************************************************************/
63/** Allocation size alignment (power of two). */
64#define RTMEMSAFER_ALIGN 16
65
66
67/*********************************************************************************************************************************
68* Structures and Typedefs *
69*********************************************************************************************************************************/
70/**
71 * Allocators.
72 */
73typedef enum RTMEMSAFERALLOCATOR
74{
75 /** Invalid method. */
76 RTMEMSAFERALLOCATOR_INVALID = 0,
77 /** RTMemPageAlloc. */
78 RTMEMSAFERALLOCATOR_RTMEMPAGE,
79 /** SUPR3PageAllocEx. */
80 RTMEMSAFERALLOCATOR_SUPR3
81} RTMEMSAFERALLOCATOR;
82
83/**
84 * Tracking node (lives on normal heap).
85 */
86typedef struct RTMEMSAFERNODE
87{
88 /** Node core.
89 * The core key is a scrambled pointer the user memory. */
90 AVLPVNODECORE Core;
91 /** The allocation flags. */
92 uint32_t fFlags;
93 /** The offset into the allocation of the user memory. */
94 uint32_t offUser;
95 /** The requested allocation size. */
96 size_t cbUser;
97 /** The allocation size in pages, this includes the two guard pages. */
98 uint32_t cPages;
99 /** The allocator used for this node. */
100 RTMEMSAFERALLOCATOR enmAllocator;
101 /** XOR scrambler value for memory. */
102 uintptr_t uScramblerXor;
103} RTMEMSAFERNODE;
104/** Pointer to an allocation tracking node. */
105typedef RTMEMSAFERNODE *PRTMEMSAFERNODE;
106
107
108/*********************************************************************************************************************************
109* Global Variables *
110*********************************************************************************************************************************/
111/** Init once structure for this module. */
112static RTONCE g_MemSaferOnce = RTONCE_INITIALIZER;
113/** Critical section protecting the allocation tree. */
114static RTCRITSECTRW g_MemSaferCritSect;
115/** Tree of allocation nodes. */
116static AVLPVTREE g_pMemSaferTree;
117/** XOR scrambler value pointers. */
118static uintptr_t g_uMemSaferPtrScramblerXor;
119/** Pointer rotate shift count.*/
120static uintptr_t g_cMemSaferPtrScramblerRotate;
121
122
123/**
124 * @callback_method_impl{FNRTONCE, Inits globals.}
125 */
126static DECLCALLBACK(int32_t) rtMemSaferOnceInit(void *pvUserIgnore)
127{
128 RT_NOREF_PV(pvUserIgnore);
129
130 g_uMemSaferPtrScramblerXor = (uintptr_t)RTRandU64();
131 g_cMemSaferPtrScramblerRotate = RTRandU32Ex(0, ARCH_BITS - 1);
132 return RTCritSectRwInit(&g_MemSaferCritSect);
133}
134
135
136/**
137 * @callback_method_impl{PFNRTONCECLEANUP, Cleans up globals.}
138 */
139static DECLCALLBACK(void) rtMemSaferOnceTerm(void *pvUser, bool fLazyCleanUpOk)
140{
141 RT_NOREF_PV(pvUser);
142
143 if (!fLazyCleanUpOk)
144 {
145 RTCritSectRwDelete(&g_MemSaferCritSect);
146 Assert(!g_pMemSaferTree);
147 }
148}
149
150
151
152DECLINLINE(void *) rtMemSaferScramblePointer(void *pvUser)
153{
154 uintptr_t uPtr = (uintptr_t)pvUser;
155 uPtr ^= g_uMemSaferPtrScramblerXor;
156#if ARCH_BITS == 64
157 uPtr = ASMRotateRightU64(uPtr, g_cMemSaferPtrScramblerRotate);
158#elif ARCH_BITS == 32
159 uPtr = ASMRotateRightU32(uPtr, g_cMemSaferPtrScramblerRotate);
160#else
161# error "Unsupported/missing ARCH_BITS."
162#endif
163 return (void *)uPtr;
164}
165
166
167/**
168 * Inserts a tracking node into the tree.
169 *
170 * @param pThis The allocation tracking node to insert.
171 */
172static void rtMemSaferNodeInsert(PRTMEMSAFERNODE pThis)
173{
174 RTCritSectRwEnterExcl(&g_MemSaferCritSect);
175 pThis->Core.Key = rtMemSaferScramblePointer(pThis->Core.Key);
176 bool fRc = RTAvlPVInsert(&g_pMemSaferTree, &pThis->Core);
177 RTCritSectRwLeaveExcl(&g_MemSaferCritSect);
178 Assert(fRc); NOREF(fRc);
179}
180
181
182/**
183 * Finds a tracking node into the tree.
184 *
185 * @returns The allocation tracking node for @a pvUser. NULL if not found.
186 * @param pvUser The user pointer to the allocation.
187 */
188static PRTMEMSAFERNODE rtMemSaferNodeLookup(void *pvUser)
189{
190 void *pvKey = rtMemSaferScramblePointer(pvUser);
191 RTCritSectRwEnterShared(&g_MemSaferCritSect);
192 PRTMEMSAFERNODE pThis = (PRTMEMSAFERNODE)RTAvlPVGet(&g_pMemSaferTree, pvKey);
193 RTCritSectRwLeaveShared(&g_MemSaferCritSect);
194 return pThis;
195}
196
197
198/**
199 * Removes a tracking node from the tree.
200 *
201 * @returns The allocation tracking node for @a pvUser. NULL if not found.
202 * @param pvUser The user pointer to the allocation.
203 */
204static PRTMEMSAFERNODE rtMemSaferNodeRemove(void *pvUser)
205{
206 void *pvKey = rtMemSaferScramblePointer(pvUser);
207 RTCritSectRwEnterExcl(&g_MemSaferCritSect);
208 PRTMEMSAFERNODE pThis = (PRTMEMSAFERNODE)RTAvlPVRemove(&g_pMemSaferTree, pvKey);
209 RTCritSectRwLeaveExcl(&g_MemSaferCritSect);
210 return pThis;
211}
212
213
214RTDECL(int) RTMemSaferScramble(void *pv, size_t cb)
215{
216 PRTMEMSAFERNODE pThis = rtMemSaferNodeLookup(pv);
217 AssertReturn(pThis, VERR_INVALID_POINTER);
218 AssertMsgReturn(cb == pThis->cbUser, ("cb=%#zx != %#zx\n", cb, pThis->cbUser), VERR_INVALID_PARAMETER);
219
220 /* First time we get a new xor value. */
221 if (!pThis->uScramblerXor)
222 pThis->uScramblerXor = (uintptr_t)RTRandU64();
223
224 /* Note! This isn't supposed to be safe, just less obvious. */
225 uintptr_t *pu = (uintptr_t *)pv;
226 cb = RT_ALIGN_Z(cb, RTMEMSAFER_ALIGN);
227 while (cb > 0)
228 {
229 *pu ^= pThis->uScramblerXor;
230 pu++;
231 cb -= sizeof(*pu);
232 }
233
234 return VINF_SUCCESS;
235}
236RT_EXPORT_SYMBOL(RTMemSaferScramble);
237
238
239RTDECL(int) RTMemSaferUnscramble(void *pv, size_t cb)
240{
241 PRTMEMSAFERNODE pThis = rtMemSaferNodeLookup(pv);
242 AssertReturn(pThis, VERR_INVALID_POINTER);
243 AssertMsgReturn(cb == pThis->cbUser, ("cb=%#zx != %#zx\n", cb, pThis->cbUser), VERR_INVALID_PARAMETER);
244
245 /* Note! This isn't supposed to be safe, just less obvious. */
246 uintptr_t *pu = (uintptr_t *)pv;
247 cb = RT_ALIGN_Z(cb, RTMEMSAFER_ALIGN);
248 while (cb > 0)
249 {
250 *pu ^= pThis->uScramblerXor;
251 pu++;
252 cb -= sizeof(*pu);
253 }
254
255 return VINF_SUCCESS;
256}
257RT_EXPORT_SYMBOL(RTMemSaferUnscramble);
258
259
260/**
261 * Initializes the pages.
262 *
263 * Fills the memory with random bytes in order to make it less obvious where the
264 * secret data starts and ends. We also zero the user memory in case the
265 * allocator does not do this.
266 *
267 * @param pThis The allocation tracer node. The Core.Key member
268 * will be set.
269 * @param pvPages The pages to initialize.
270 */
271static void rtMemSaferInitializePages(PRTMEMSAFERNODE pThis, void *pvPages)
272{
273 uint32_t const cbPage = RTSystemGetPageSize();
274 RTRandBytes(pvPages, cbPage + pThis->offUser);
275
276 uint8_t *pbUser = (uint8_t *)pvPages + cbPage + pThis->offUser;
277 pThis->Core.Key = pbUser;
278 RT_BZERO(pbUser, pThis->cbUser); /* paranoia */
279
280 RTRandBytes(pbUser + pThis->cbUser, (size_t)pThis->cPages * cbPage - cbPage - pThis->offUser - pThis->cbUser);
281}
282
283
284/**
285 * Allocates and initializes pages from the support driver and initializes it.
286 *
287 * @returns VBox status code.
288 * @param pThis The allocator node. Core.Key will be set on successful
289 * return (unscrambled).
290 */
291static int rtMemSaferSupR3AllocPages(PRTMEMSAFERNODE pThis)
292{
293#ifdef IN_SUP_R3
294 /*
295 * Try allocate the memory.
296 */
297 void *pvPages;
298 int rc = SUPR3PageAllocEx(pThis->cPages, 0 /* fFlags */, &pvPages, NULL /* pR0Ptr */, NULL /* paPages */);
299 if (RT_SUCCESS(rc))
300 {
301 rtMemSaferInitializePages(pThis, pvPages);
302
303 /*
304 * On darwin we cannot allocate pages without an R0 mapping and
305 * SUPR3PageAllocEx falls back to another method which is incompatible with
306 * the way SUPR3PageProtect works. Ignore changing the protection of the guard
307 * pages.
308 */
309#ifdef RT_OS_DARWIN
310 return VINF_SUCCESS;
311#else
312 /*
313 * Configure the guard pages.
314 * SUPR3PageProtect isn't supported on all hosts, we ignore that.
315 */
316 uint32_t const cbPage = RTSystemGetPageSize();
317 rc = SUPR3PageProtect(pvPages, NIL_RTR0PTR, 0, cbPage, RTMEM_PROT_NONE);
318 if (RT_SUCCESS(rc))
319 {
320 rc = SUPR3PageProtect(pvPages, NIL_RTR0PTR, (pThis->cPages - 1) * cbPage, cbPage, RTMEM_PROT_NONE);
321 if (RT_SUCCESS(rc))
322 return VINF_SUCCESS;
323 SUPR3PageProtect(pvPages, NIL_RTR0PTR, 0, cbPage, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
324 }
325 else if (rc == VERR_NOT_SUPPORTED)
326 return VINF_SUCCESS;
327
328 /* failed. */
329 int rc2 = SUPR3PageFreeEx(pvPages, pThis->cPages); AssertRC(rc2);
330#endif
331 }
332 return rc;
333
334#else /* !IN_SUP_R3 */
335 RT_NOREF_PV(pThis);
336 return VERR_NOT_SUPPORTED;
337#endif /* !IN_SUP_R3 */
338}
339
340
341/**
342 * Allocates and initializes pages using the IPRT page allocator API.
343 *
344 * @returns VBox status code.
345 * @param pThis The allocator node. Core.Key will be set on successful
346 * return (unscrambled).
347 */
348static int rtMemSaferMemAllocPages(PRTMEMSAFERNODE pThis)
349{
350 /*
351 * Try allocate the memory.
352 */
353 uint32_t const cbPage = RTSystemGetPageSize();
354 int rc = VINF_SUCCESS;
355 void *pvPages = RTMemPageAllocEx((size_t)pThis->cPages * cbPage,
356 RTMEMPAGEALLOC_F_ADVISE_LOCKED | RTMEMPAGEALLOC_F_ADVISE_NO_DUMP | RTMEMPAGEALLOC_F_ZERO);
357 if (pvPages)
358 {
359 rtMemSaferInitializePages(pThis, pvPages);
360
361 /*
362 * Configure the guard pages.
363 */
364 rc = RTMemProtect(pvPages, cbPage, RTMEM_PROT_NONE);
365 if (RT_SUCCESS(rc))
366 {
367 rc = RTMemProtect((uint8_t *)pvPages + (size_t)(pThis->cPages - 1U) * cbPage, cbPage, RTMEM_PROT_NONE);
368 if (RT_SUCCESS(rc))
369 return VINF_SUCCESS;
370 rc = RTMemProtect(pvPages, cbPage, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
371 }
372
373 /* failed. */
374 RTMemPageFree(pvPages, (size_t)pThis->cPages * cbPage);
375 }
376 else
377 rc = VERR_NO_PAGE_MEMORY;
378
379 return rc;
380}
381
382
383RTDECL(int) RTMemSaferAllocZExTag(void **ppvNew, size_t cb, uint32_t fFlags, const char *pszTag) RT_NO_THROW_DEF
384{
385 RT_NOREF_PV(pszTag);
386
387 /*
388 * Validate input.
389 */
390 AssertPtrReturn(ppvNew, VERR_INVALID_PARAMETER);
391 *ppvNew = NULL;
392 AssertReturn(cb, VERR_INVALID_PARAMETER);
393 uint32_t const cbPage = RTSystemGetPageSize();
394 AssertReturn(cb <= 32U*_1M - cbPage * 3U, VERR_ALLOCATION_TOO_BIG); /* Max 32 MB minus padding and guard pages. */
395 AssertReturn(!(fFlags & ~RTMEMSAFER_F_VALID_MASK), VERR_INVALID_FLAGS);
396
397 /*
398 * Initialize globals.
399 */
400 int rc = RTOnceEx(&g_MemSaferOnce, rtMemSaferOnceInit, rtMemSaferOnceTerm, NULL);
401 if (RT_SUCCESS(rc))
402 {
403 /*
404 * Allocate a tracker node first.
405 */
406 PRTMEMSAFERNODE pThis = (PRTMEMSAFERNODE)RTMemAllocZ(sizeof(RTMEMSAFERNODE));
407 if (pThis)
408 {
409 /*
410 * Prepare the allocation.
411 */
412 pThis->cbUser = cb;
413 pThis->offUser = (RTRandU32Ex(0, 128) * RTMEMSAFER_ALIGN) & RTSystemGetPageOffsetMask();
414
415 size_t cbNeeded = pThis->offUser + pThis->cbUser;
416 cbNeeded = RT_ALIGN_Z(cbNeeded, cbPage);
417
418 pThis->cPages = (uint32_t)(cbNeeded / cbPage) + 2; /* +2 for guard pages */
419
420 /*
421 * Try allocate the memory, using the best allocator by default and
422 * falling back on the less safe one.
423 */
424 rc = rtMemSaferSupR3AllocPages(pThis);
425 if (RT_SUCCESS(rc))
426 pThis->enmAllocator = RTMEMSAFERALLOCATOR_SUPR3;
427 else if (!(fFlags & RTMEMSAFER_F_REQUIRE_NOT_PAGABLE))
428 {
429 rc = rtMemSaferMemAllocPages(pThis);
430 if (RT_SUCCESS(rc))
431 pThis->enmAllocator = RTMEMSAFERALLOCATOR_RTMEMPAGE;
432 }
433 if (RT_SUCCESS(rc))
434 {
435 /*
436 * Insert the node.
437 */
438 *ppvNew = pThis->Core.Key;
439 rtMemSaferNodeInsert(pThis); /* (Scrambles Core.Key) */
440 return VINF_SUCCESS;
441 }
442
443 RTMemFree(pThis);
444 }
445 else
446 rc = VERR_NO_MEMORY;
447 }
448 return rc;
449}
450RT_EXPORT_SYMBOL(RTMemSaferAllocZExTag);
451
452
453RTDECL(void) RTMemSaferFree(void *pv, size_t cb) RT_NO_THROW_DEF
454{
455 if (pv)
456 {
457 PRTMEMSAFERNODE pThis = rtMemSaferNodeRemove(pv);
458 AssertReturnVoid(pThis);
459 if (cb == 0) /* for openssl use */
460 cb = pThis->cbUser;
461 else
462 AssertMsg(cb == pThis->cbUser, ("cb=%#zx != %#zx\n", cb, pThis->cbUser));
463
464 /*
465 * Wipe the user memory first.
466 */
467 RTMemWipeThoroughly(pv, RT_ALIGN_Z(cb, RTMEMSAFER_ALIGN), 3);
468
469 /*
470 * Free the pages.
471 */
472 uint32_t const cbPage = RTSystemGetPageSize();
473 uint8_t *pbPages = (uint8_t *)pv - pThis->offUser - cbPage;
474 size_t cbPages = (size_t)pThis->cPages * cbPage;
475 switch (pThis->enmAllocator)
476 {
477#ifdef IN_SUP_R3
478 case RTMEMSAFERALLOCATOR_SUPR3:
479 SUPR3PageProtect(pbPages, NIL_RTR0PTR, 0, cbPage, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
480 SUPR3PageProtect(pbPages, NIL_RTR0PTR, (uint32_t)(cbPages - cbPage), cbPage, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
481 SUPR3PageFreeEx(pbPages, pThis->cPages);
482 break;
483#endif
484 case RTMEMSAFERALLOCATOR_RTMEMPAGE:
485 RTMemProtect(pbPages, cbPage, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
486 RTMemProtect(pbPages + cbPages - cbPage, cbPage, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
487 RTMemPageFree(pbPages, cbPages);
488 break;
489
490 default:
491 AssertFailed();
492 }
493
494 /*
495 * Free the tracking node.
496 */
497 pThis->Core.Key = NULL;
498 pThis->offUser = 0;
499 pThis->cbUser = 0;
500 RTMemFree(pThis);
501 }
502 else
503 Assert(cb == 0);
504}
505RT_EXPORT_SYMBOL(RTMemSaferFree);
506
507
508RTDECL(size_t) RTMemSaferGetSize(void *pv) RT_NO_THROW_DEF
509{
510 size_t cbRet = 0;
511 if (pv)
512 {
513 /*
514 * We use this API for testing whether pv is a safer allocation or not,
515 * so we may be called before the allocators. Thus, it's prudent to
516 * make sure initialization has taken place before attempting to enter
517 * the critical section and such.
518 */
519 int rc = RTOnceEx(&g_MemSaferOnce, rtMemSaferOnceInit, rtMemSaferOnceTerm, NULL);
520 if (RT_SUCCESS(rc))
521 {
522 void *pvKey = rtMemSaferScramblePointer(pv);
523 RTCritSectRwEnterShared(&g_MemSaferCritSect);
524 PRTMEMSAFERNODE pThis = (PRTMEMSAFERNODE)RTAvlPVGet(&g_pMemSaferTree, pvKey);
525 if (pThis)
526 cbRet = pThis->cbUser;
527 RTCritSectRwLeaveShared(&g_MemSaferCritSect);
528 }
529 }
530 return cbRet;
531}
532RT_EXPORT_SYMBOL(RTMemSaferGetSize);
533
534
535/**
536 * The simplest reallocation method: allocate new block, copy over the data,
537 * free old block.
538 */
539static int rtMemSaferReallocSimpler(size_t cbOld, void *pvOld, size_t cbNew, void **ppvNew, uint32_t fFlags, const char *pszTag)
540{
541 void *pvNew;
542 int rc = RTMemSaferAllocZExTag(&pvNew, cbNew, fFlags, pszTag);
543 if (RT_SUCCESS(rc))
544 {
545 memcpy(pvNew, pvOld, RT_MIN(cbNew, cbOld));
546 RTMemSaferFree(pvOld, cbOld);
547 *ppvNew = pvNew;
548 }
549 return rc;
550}
551
552
553RTDECL(int) RTMemSaferReallocZExTag(size_t cbOld, void *pvOld, size_t cbNew, void **ppvNew, uint32_t fFlags, const char *pszTag) RT_NO_THROW_DEF
554{
555 int rc;
556 /* Real realloc. */
557 if (cbNew && cbOld)
558 {
559 PRTMEMSAFERNODE pThis = rtMemSaferNodeLookup(pvOld);
560 AssertReturn(pThis, VERR_INVALID_POINTER);
561 AssertMsgStmt(cbOld == pThis->cbUser, ("cbOld=%#zx != %#zx\n", cbOld, pThis->cbUser), cbOld = pThis->cbUser);
562
563 if (pThis->fFlags == fFlags)
564 {
565 if (cbNew > cbOld)
566 {
567 /*
568 * Is the enough room for us to grow?
569 */
570 size_t cbMax = (size_t)(pThis->cPages - 2) * RTSystemGetPageSize();
571 if (cbNew <= cbMax)
572 {
573 size_t const cbAdded = (cbNew - cbOld);
574 size_t const cbAfter = cbMax - pThis->offUser - cbOld;
575 if (cbAfter >= cbAdded)
576 {
577 /*
578 * Sufficient space after the current allocation.
579 */
580 uint8_t *pbNewSpace = (uint8_t *)pvOld + cbOld;
581 RT_BZERO(pbNewSpace, cbAdded);
582 *ppvNew = pvOld;
583 }
584 else
585 {
586 /*
587 * Have to move the allocation to make enough room at the
588 * end. In order to make it a little less predictable and
589 * maybe avoid a relocation or two in the next call, divide
590 * the page offset by four until it it fits.
591 */
592 AssertReturn(rtMemSaferNodeRemove(pvOld) == pThis, VERR_INTERNAL_ERROR_3);
593 uint32_t offNewUser = pThis->offUser;
594 do
595 offNewUser = offNewUser / 2;
596 while ((pThis->offUser - offNewUser) + cbAfter < cbAdded);
597 offNewUser &= ~(RTMEMSAFER_ALIGN - 1U);
598
599 uint32_t const cbMove = pThis->offUser - offNewUser;
600 uint8_t *pbNew = (uint8_t *)pvOld - cbMove;
601 memmove(pbNew, pvOld, cbOld);
602
603 RT_BZERO(pbNew + cbOld, cbAdded);
604 if (cbMove > cbAdded)
605 RTMemWipeThoroughly(pbNew + cbNew, cbMove - cbAdded, 3);
606
607 pThis->offUser = offNewUser;
608 pThis->Core.Key = pbNew;
609 *ppvNew = pbNew;
610
611 rtMemSaferNodeInsert(pThis);
612 }
613 Assert(((uintptr_t)*ppvNew & RTSystemGetPageOffsetMask()) == pThis->offUser);
614 pThis->cbUser = cbNew;
615 rc = VINF_SUCCESS;
616 }
617 else
618 {
619 /*
620 * Not enough space, allocate a new block and copy over the data.
621 */
622 rc = rtMemSaferReallocSimpler(cbOld, pvOld, cbNew, ppvNew, fFlags, pszTag);
623 }
624 }
625 else
626 {
627 /*
628 * Shrinking the allocation, just wipe the memory that is no longer
629 * being used.
630 */
631 if (cbNew != cbOld)
632 {
633 uint8_t *pbAbandond = (uint8_t *)pvOld + cbNew;
634 RTMemWipeThoroughly(pbAbandond, cbOld - cbNew, 3);
635 }
636 pThis->cbUser = cbNew;
637 *ppvNew = pvOld;
638 rc = VINF_SUCCESS;
639 }
640 }
641 else if (!pThis->fFlags)
642 {
643 /*
644 * New flags added. Allocate a new block and copy over the old one.
645 */
646 rc = rtMemSaferReallocSimpler(cbOld, pvOld, cbNew, ppvNew, fFlags, pszTag);
647 }
648 else
649 {
650 /* Compatible flags. */
651 AssertMsgFailed(("fFlags=%#x old=%#x\n", fFlags, pThis->fFlags));
652 rc = VERR_INVALID_FLAGS;
653 }
654 }
655 /*
656 * First allocation. Pass it on.
657 */
658 else if (!cbOld)
659 {
660 Assert(pvOld == NULL);
661 rc = RTMemSaferAllocZExTag(ppvNew, cbNew, fFlags, pszTag);
662 }
663 /*
664 * Free operation. Pass it on.
665 */
666 else
667 {
668 RTMemSaferFree(pvOld, cbOld);
669 *ppvNew = NULL;
670 rc = VINF_SUCCESS;
671 }
672 return rc;
673}
674RT_EXPORT_SYMBOL(RTMemSaferReallocZExTag);
675
676
677RTDECL(void *) RTMemSaferAllocZTag(size_t cb, const char *pszTag) RT_NO_THROW_DEF
678{
679 void *pvNew = NULL;
680 int rc = RTMemSaferAllocZExTag(&pvNew, cb, 0 /*fFlags*/, pszTag);
681 if (RT_SUCCESS(rc))
682 return pvNew;
683 return NULL;
684}
685RT_EXPORT_SYMBOL(RTMemSaferAllocZTag);
686
687
688RTDECL(void *) RTMemSaferReallocZTag(size_t cbOld, void *pvOld, size_t cbNew, const char *pszTag) RT_NO_THROW_DEF
689{
690 void *pvNew = NULL;
691 int rc = RTMemSaferReallocZExTag(cbOld, pvOld, cbNew, &pvNew, 0 /*fFlags*/, pszTag);
692 if (RT_SUCCESS(rc))
693 return pvNew;
694 return NULL;
695}
696RT_EXPORT_SYMBOL(RTMemSaferReallocZTag);
697
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette