VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/MMAllHyper.cpp@ 62338

Last change on this file since 62338 was 59747, checked in by vboxsync, 9 years ago

iprt/asm.h: Cleaned up the ASMMemIsAll8/U32 mess and implmeneted the former in assembly. (Found inverted usage due to bad naming in copyUtf8Block, but it is fortunately an unused method.) Replaces the complicated ASMBitFirstSet based scanning in RTSgBufIsZero with a simple call to the new ASMMemIsZero function.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 49.4 KB
Line 
1/* $Id: MMAllHyper.cpp 59747 2016-02-19 23:18:18Z vboxsync $ */
2/** @file
3 * MM - Memory Manager - Hypervisor Memory Area, All Contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_MM_HYPER_HEAP
23#include <VBox/vmm/mm.h>
24#include <VBox/vmm/stam.h>
25#include "MMInternal.h"
26#include <VBox/vmm/vm.h>
27
28#include <VBox/err.h>
29#include <VBox/param.h>
30#include <iprt/assert.h>
31#include <VBox/log.h>
32#include <iprt/asm.h>
33#include <iprt/string.h>
34
35
36/*********************************************************************************************************************************
37* Defined Constants And Macros *
38*********************************************************************************************************************************/
39#define ASSERT_L(u1, u2) AssertMsg((u1) < (u2), ("u1=%#x u2=%#x\n", u1, u2))
40#define ASSERT_LE(u1, u2) AssertMsg((u1) <= (u2), ("u1=%#x u2=%#x\n", u1, u2))
41#define ASSERT_GE(u1, u2) AssertMsg((u1) >= (u2), ("u1=%#x u2=%#x\n", u1, u2))
42#define ASSERT_ALIGN(u1) AssertMsg(!((u1) & (MMHYPER_HEAP_ALIGN_MIN - 1)), ("u1=%#x (%d)\n", u1, u1))
43
44#define ASSERT_OFFPREV(pHeap, pChunk) \
45 do { Assert(MMHYPERCHUNK_GET_OFFPREV(pChunk) <= 0); \
46 Assert(MMHYPERCHUNK_GET_OFFPREV(pChunk) >= (intptr_t)(pHeap)->CTX_SUFF(pbHeap) - (intptr_t)(pChunk)); \
47 AssertMsg( MMHYPERCHUNK_GET_OFFPREV(pChunk) != 0 \
48 || (uint8_t *)(pChunk) == (pHeap)->CTX_SUFF(pbHeap), \
49 ("pChunk=%p pvHyperHeap=%p\n", (pChunk), (pHeap)->CTX_SUFF(pbHeap))); \
50 } while (0)
51
52#define ASSERT_OFFNEXT(pHeap, pChunk) \
53 do { ASSERT_ALIGN((pChunk)->offNext); \
54 ASSERT_L((pChunk)->offNext, (uintptr_t)(pHeap)->CTX_SUFF(pbHeap) + (pHeap)->offPageAligned - (uintptr_t)(pChunk)); \
55 } while (0)
56
57#define ASSERT_OFFHEAP(pHeap, pChunk) \
58 do { Assert((pChunk)->offHeap); \
59 AssertMsg((PMMHYPERHEAP)((pChunk)->offHeap + (uintptr_t)pChunk) == (pHeap), \
60 ("offHeap=%RX32 pChunk=%p pHeap=%p\n", (pChunk)->offHeap, (pChunk), (pHeap))); \
61 Assert((pHeap)->u32Magic == MMHYPERHEAP_MAGIC); \
62 } while (0)
63
64#ifdef VBOX_WITH_STATISTICS
65#define ASSERT_OFFSTAT(pHeap, pChunk) \
66 do { if (MMHYPERCHUNK_ISFREE(pChunk)) \
67 Assert(!(pChunk)->offStat); \
68 else if ((pChunk)->offStat) \
69 { \
70 Assert((pChunk)->offStat); \
71 AssertMsg(!((pChunk)->offStat & (MMHYPER_HEAP_ALIGN_MIN - 1)), ("offStat=%RX32\n", (pChunk)->offStat)); \
72 uintptr_t uPtr = (uintptr_t)(pChunk)->offStat + (uintptr_t)pChunk; NOREF(uPtr); \
73 AssertMsg(uPtr - (uintptr_t)(pHeap)->CTX_SUFF(pbHeap) < (pHeap)->offPageAligned, \
74 ("%p - %p < %RX32\n", uPtr, (pHeap)->CTX_SUFF(pbHeap), (pHeap)->offPageAligned)); \
75 } \
76 } while (0)
77#else
78#define ASSERT_OFFSTAT(pHeap, pChunk) \
79 do { Assert(!(pChunk)->offStat); \
80 } while (0)
81#endif
82
83#define ASSERT_CHUNK(pHeap, pChunk) \
84 do { ASSERT_OFFNEXT(pHeap, pChunk); \
85 ASSERT_OFFPREV(pHeap, pChunk); \
86 ASSERT_OFFHEAP(pHeap, pChunk); \
87 ASSERT_OFFSTAT(pHeap, pChunk); \
88 } while (0)
89#define ASSERT_CHUNK_USED(pHeap, pChunk) \
90 do { ASSERT_OFFNEXT(pHeap, pChunk); \
91 ASSERT_OFFPREV(pHeap, pChunk); \
92 Assert(MMHYPERCHUNK_ISUSED(pChunk)); \
93 } while (0)
94
95#define ASSERT_FREE_OFFPREV(pHeap, pChunk) \
96 do { ASSERT_ALIGN((pChunk)->offPrev); \
97 ASSERT_GE(((pChunk)->offPrev & (MMHYPER_HEAP_ALIGN_MIN - 1)), (intptr_t)(pHeap)->CTX_SUFF(pbHeap) - (intptr_t)(pChunk)); \
98 Assert((pChunk)->offPrev != MMHYPERCHUNK_GET_OFFPREV(&(pChunk)->core) || !(pChunk)->offPrev); \
99 AssertMsg( (pChunk)->offPrev \
100 || (uintptr_t)(pChunk) - (uintptr_t)(pHeap)->CTX_SUFF(pbHeap) == (pHeap)->offFreeHead, \
101 ("pChunk=%p offChunk=%#x offFreeHead=%#x\n", (pChunk), (uintptr_t)(pChunk) - (uintptr_t)(pHeap)->CTX_SUFF(pbHeap),\
102 (pHeap)->offFreeHead)); \
103 } while (0)
104
105#define ASSERT_FREE_OFFNEXT(pHeap, pChunk) \
106 do { ASSERT_ALIGN((pChunk)->offNext); \
107 ASSERT_L((pChunk)->offNext, (uintptr_t)(pHeap)->CTX_SUFF(pbHeap) + (pHeap)->offPageAligned - (uintptr_t)(pChunk)); \
108 Assert((pChunk)->offNext != (pChunk)->core.offNext || !(pChunk)->offNext); \
109 AssertMsg( (pChunk)->offNext \
110 || (uintptr_t)(pChunk) - (uintptr_t)(pHeap)->CTX_SUFF(pbHeap) == (pHeap)->offFreeTail, \
111 ("pChunk=%p offChunk=%#x offFreeTail=%#x\n", (pChunk), (uintptr_t)(pChunk) - (uintptr_t)(pHeap)->CTX_SUFF(pbHeap), \
112 (pHeap)->offFreeTail)); \
113 } while (0)
114
115#define ASSERT_FREE_CB(pHeap, pChunk) \
116 do { ASSERT_ALIGN((pChunk)->cb); \
117 Assert((pChunk)->cb > 0); \
118 if ((pChunk)->core.offNext) \
119 AssertMsg((pChunk)->cb == ((pChunk)->core.offNext - sizeof(MMHYPERCHUNK)), \
120 ("cb=%d offNext=%d\n", (pChunk)->cb, (pChunk)->core.offNext)); \
121 else \
122 ASSERT_LE((pChunk)->cb, (uintptr_t)(pHeap)->CTX_SUFF(pbHeap) + (pHeap)->offPageAligned - (uintptr_t)(pChunk)); \
123 } while (0)
124
125#define ASSERT_CHUNK_FREE(pHeap, pChunk) \
126 do { ASSERT_CHUNK(pHeap, &(pChunk)->core); \
127 Assert(MMHYPERCHUNK_ISFREE(pChunk)); \
128 ASSERT_FREE_OFFNEXT(pHeap, pChunk); \
129 ASSERT_FREE_OFFPREV(pHeap, pChunk); \
130 ASSERT_FREE_CB(pHeap, pChunk); \
131 } while (0)
132
133
134/*********************************************************************************************************************************
135* Internal Functions *
136*********************************************************************************************************************************/
137static PMMHYPERCHUNK mmHyperAllocChunk(PMMHYPERHEAP pHeap, uint32_t cb, unsigned uAlignment);
138static void *mmHyperAllocPages(PMMHYPERHEAP pHeap, uint32_t cb);
139#ifdef VBOX_WITH_STATISTICS
140static PMMHYPERSTAT mmHyperStat(PMMHYPERHEAP pHeap, MMTAG enmTag);
141#ifdef IN_RING3
142static void mmR3HyperStatRegisterOne(PVM pVM, PMMHYPERSTAT pStat);
143#endif
144#endif
145static int mmHyperFree(PMMHYPERHEAP pHeap, PMMHYPERCHUNK pChunk);
146#ifdef MMHYPER_HEAP_STRICT
147static void mmHyperHeapCheck(PMMHYPERHEAP pHeap);
148#endif
149
150
151
152/**
153 * Locks the hypervisor heap.
154 * This might call back to Ring-3 in order to deal with lock contention in GC and R3.
155 *
156 * @param pVM The cross context VM structure.
157 */
158static int mmHyperLock(PVM pVM)
159{
160 PMMHYPERHEAP pHeap = pVM->mm.s.CTX_SUFF(pHyperHeap);
161
162#ifdef IN_RING3
163 if (!PDMCritSectIsInitialized(&pHeap->Lock))
164 return VINF_SUCCESS; /* early init */
165#else
166 Assert(PDMCritSectIsInitialized(&pHeap->Lock));
167#endif
168 int rc = PDMCritSectEnter(&pHeap->Lock, VERR_SEM_BUSY);
169#if defined(IN_RC) || defined(IN_RING0)
170 if (rc == VERR_SEM_BUSY)
171 rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_MMHYPER_LOCK, 0);
172#endif
173 AssertRC(rc);
174 return rc;
175}
176
177
178/**
179 * Unlocks the hypervisor heap.
180 *
181 * @param pVM The cross context VM structure.
182 */
183static void mmHyperUnlock(PVM pVM)
184{
185 PMMHYPERHEAP pHeap = pVM->mm.s.CTX_SUFF(pHyperHeap);
186
187#ifdef IN_RING3
188 if (!PDMCritSectIsInitialized(&pHeap->Lock))
189 return; /* early init */
190#endif
191 Assert(PDMCritSectIsInitialized(&pHeap->Lock));
192 PDMCritSectLeave(&pHeap->Lock);
193}
194
195/**
196 * Allocates memory in the Hypervisor (RC VMM) area.
197 * The returned memory is of course zeroed.
198 *
199 * @returns VBox status code.
200 * @param pVM The cross context VM structure.
201 * @param cb Number of bytes to allocate.
202 * @param uAlignment Required memory alignment in bytes.
203 * Values are 0,8,16,32,64 and PAGE_SIZE.
204 * 0 -> default alignment, i.e. 8 bytes.
205 * @param enmTag The statistics tag.
206 * @param ppv Where to store the address to the allocated
207 * memory.
208 */
209static int mmHyperAllocInternal(PVM pVM, size_t cb, unsigned uAlignment, MMTAG enmTag, void **ppv)
210{
211 AssertMsg(cb >= 8, ("Hey! Do you really mean to allocate less than 8 bytes?! cb=%d\n", cb));
212
213 /*
214 * Validate input and adjust it to reasonable values.
215 */
216 if (!uAlignment || uAlignment < MMHYPER_HEAP_ALIGN_MIN)
217 uAlignment = MMHYPER_HEAP_ALIGN_MIN;
218 uint32_t cbAligned;
219 switch (uAlignment)
220 {
221 case 8:
222 case 16:
223 case 32:
224 case 64:
225 cbAligned = RT_ALIGN_32(cb, MMHYPER_HEAP_ALIGN_MIN);
226 if (!cbAligned || cbAligned < cb)
227 {
228 Log2(("MMHyperAlloc: cb=%#x uAlignment=%#x returns VERR_INVALID_PARAMETER\n", cb, uAlignment));
229 AssertMsgFailed(("Nice try.\n"));
230 return VERR_INVALID_PARAMETER;
231 }
232 break;
233
234 case PAGE_SIZE:
235 AssertMsg(RT_ALIGN_32(cb, PAGE_SIZE) == cb, ("The size isn't page aligned. (cb=%#x)\n", cb));
236 cbAligned = RT_ALIGN_32(cb, PAGE_SIZE);
237 if (!cbAligned)
238 {
239 Log2(("MMHyperAlloc: cb=%#x uAlignment=%#x returns VERR_INVALID_PARAMETER\n", cb, uAlignment));
240 AssertMsgFailed(("Nice try.\n"));
241 return VERR_INVALID_PARAMETER;
242 }
243 break;
244
245 default:
246 Log2(("MMHyperAlloc: cb=%#x uAlignment=%#x returns VERR_INVALID_PARAMETER\n", cb, uAlignment));
247 AssertMsgFailed(("Invalid alignment %u\n", uAlignment));
248 return VERR_INVALID_PARAMETER;
249 }
250
251
252 /*
253 * Get heap and statisticsStatistics.
254 */
255 PMMHYPERHEAP pHeap = pVM->mm.s.CTX_SUFF(pHyperHeap);
256#ifdef VBOX_WITH_STATISTICS
257 PMMHYPERSTAT pStat = mmHyperStat(pHeap, enmTag);
258 if (!pStat)
259 {
260 Log2(("MMHyperAlloc: cb=%#x uAlignment=%#x returns VERR_MM_HYPER_NO_MEMORY\n", cb, uAlignment));
261 AssertMsgFailed(("Failed to allocate statistics!\n"));
262 return VERR_MM_HYPER_NO_MEMORY;
263 }
264#else
265 NOREF(enmTag);
266#endif
267 if (uAlignment < PAGE_SIZE)
268 {
269 /*
270 * Allocate a chunk.
271 */
272 PMMHYPERCHUNK pChunk = mmHyperAllocChunk(pHeap, cbAligned, uAlignment);
273 if (pChunk)
274 {
275#ifdef VBOX_WITH_STATISTICS
276 const uint32_t cbChunk = pChunk->offNext
277 ? pChunk->offNext
278 : pHeap->CTX_SUFF(pbHeap) + pHeap->offPageAligned - (uint8_t *)pChunk;
279 pStat->cbAllocated += (uint32_t)cbChunk;
280 pStat->cbCurAllocated += (uint32_t)cbChunk;
281 if (pStat->cbCurAllocated > pStat->cbMaxAllocated)
282 pStat->cbMaxAllocated = pStat->cbCurAllocated;
283 pStat->cAllocations++;
284 pChunk->offStat = (uintptr_t)pStat - (uintptr_t)pChunk;
285#else
286 pChunk->offStat = 0;
287#endif
288 void *pv = pChunk + 1;
289 *ppv = pv;
290 ASMMemZero32(pv, cbAligned);
291 Log2(("MMHyperAlloc: cb=%#x uAlignment=%#x returns VINF_SUCCESS and *ppv=%p\n", cb, uAlignment, pv));
292 return VINF_SUCCESS;
293 }
294 }
295 else
296 {
297 /*
298 * Allocate page aligned memory.
299 */
300 void *pv = mmHyperAllocPages(pHeap, cbAligned);
301 if (pv)
302 {
303#ifdef VBOX_WITH_STATISTICS
304 pStat->cbAllocated += cbAligned;
305 pStat->cbCurAllocated += cbAligned;
306 if (pStat->cbCurAllocated > pStat->cbMaxAllocated)
307 pStat->cbMaxAllocated = pStat->cbCurAllocated;
308 pStat->cAllocations++;
309#endif
310 *ppv = pv;
311 /* ASMMemZero32(pv, cbAligned); - not required since memory is alloc-only and SUPR3PageAlloc zeros it. */
312 Log2(("MMHyperAlloc: cb=%#x uAlignment=%#x returns VINF_SUCCESS and *ppv=%p\n", cb, uAlignment, ppv));
313 return VINF_SUCCESS;
314 }
315 }
316
317#ifdef VBOX_WITH_STATISTICS
318 pStat->cAllocations++;
319 pStat->cFailures++;
320#endif
321 Log2(("MMHyperAlloc: cb=%#x uAlignment=%#x returns VERR_MM_HYPER_NO_MEMORY\n", cb, uAlignment));
322 AssertMsgFailed(("Failed to allocate %d bytes!\n", cb));
323 return VERR_MM_HYPER_NO_MEMORY;
324}
325
326
327/**
328 * Wrapper for mmHyperAllocInternal
329 */
330VMMDECL(int) MMHyperAlloc(PVM pVM, size_t cb, unsigned uAlignment, MMTAG enmTag, void **ppv)
331{
332 int rc = mmHyperLock(pVM);
333 AssertRCReturn(rc, rc);
334
335 LogFlow(("MMHyperAlloc %x align=%x tag=%s\n", cb, uAlignment, mmGetTagName(enmTag)));
336
337 rc = mmHyperAllocInternal(pVM, cb, uAlignment, enmTag, ppv);
338
339 mmHyperUnlock(pVM);
340 return rc;
341}
342
343
344/**
345 * Duplicates a block of memory.
346 *
347 * @returns VBox status code.
348 * @param pVM The cross context VM structure.
349 * @param pvSrc The source memory block to copy from.
350 * @param cb Size of the source memory block.
351 * @param uAlignment Required memory alignment in bytes.
352 * Values are 0,8,16,32,64 and PAGE_SIZE.
353 * 0 -> default alignment, i.e. 8 bytes.
354 * @param enmTag The statistics tag.
355 * @param ppv Where to store the address to the allocated
356 * memory.
357 */
358VMMDECL(int) MMHyperDupMem(PVM pVM, const void *pvSrc, size_t cb, unsigned uAlignment, MMTAG enmTag, void **ppv)
359{
360 int rc = MMHyperAlloc(pVM, cb, uAlignment, enmTag, ppv);
361 if (RT_SUCCESS(rc))
362 memcpy(*ppv, pvSrc, cb);
363 return rc;
364}
365
366
367/**
368 * Allocates a chunk of memory from the specified heap.
369 * The caller validates the parameters of this request.
370 *
371 * @returns Pointer to the allocated chunk.
372 * @returns NULL on failure.
373 * @param pHeap The heap.
374 * @param cb Size of the memory block to allocate.
375 * @param uAlignment The alignment specifications for the allocated block.
376 * @internal
377 */
378static PMMHYPERCHUNK mmHyperAllocChunk(PMMHYPERHEAP pHeap, uint32_t cb, unsigned uAlignment)
379{
380 Log3(("mmHyperAllocChunk: Enter cb=%#x uAlignment=%#x\n", cb, uAlignment));
381#ifdef MMHYPER_HEAP_STRICT
382 mmHyperHeapCheck(pHeap);
383#endif
384#ifdef MMHYPER_HEAP_STRICT_FENCE
385 uint32_t cbFence = RT_MAX(MMHYPER_HEAP_STRICT_FENCE_SIZE, uAlignment);
386 cb += cbFence;
387#endif
388
389 /*
390 * Check if there are any free chunks. (NIL_OFFSET use/not-use forces this check)
391 */
392 if (pHeap->offFreeHead == NIL_OFFSET)
393 return NULL;
394
395 /*
396 * Small alignments - from the front of the heap.
397 *
398 * Must split off free chunks at the end to prevent messing up the
399 * last free node which we take the page aligned memory from the top of.
400 */
401 PMMHYPERCHUNK pRet = NULL;
402 PMMHYPERCHUNKFREE pFree = (PMMHYPERCHUNKFREE)((char *)pHeap->CTX_SUFF(pbHeap) + pHeap->offFreeHead);
403 while (pFree)
404 {
405 ASSERT_CHUNK_FREE(pHeap, pFree);
406 if (pFree->cb >= cb)
407 {
408 unsigned offAlign = (uintptr_t)(&pFree->core + 1) & (uAlignment - 1);
409 if (offAlign)
410 offAlign = uAlignment - offAlign;
411 if (!offAlign || pFree->cb - offAlign >= cb)
412 {
413 Log3(("mmHyperAllocChunk: Using pFree=%p pFree->cb=%d offAlign=%d\n", pFree, pFree->cb, offAlign));
414
415 /*
416 * Adjust the node in front.
417 * Because of multiple alignments we need to special case allocation of the first block.
418 */
419 if (offAlign)
420 {
421 MMHYPERCHUNKFREE Free = *pFree;
422 if (MMHYPERCHUNK_GET_OFFPREV(&pFree->core))
423 {
424 /* just add a bit of memory to it. */
425 PMMHYPERCHUNKFREE pPrev = (PMMHYPERCHUNKFREE)((char *)pFree + MMHYPERCHUNK_GET_OFFPREV(&Free.core));
426 pPrev->core.offNext += offAlign;
427 AssertMsg(!MMHYPERCHUNK_ISFREE(&pPrev->core), ("Impossible!\n"));
428 Log3(("mmHyperAllocChunk: Added %d bytes to %p\n", offAlign, pPrev));
429 }
430 else
431 {
432 /* make new head node, mark it USED for simplicity. */
433 PMMHYPERCHUNK pPrev = (PMMHYPERCHUNK)pHeap->CTX_SUFF(pbHeap);
434 Assert(pPrev == &pFree->core);
435 pPrev->offPrev = 0;
436 MMHYPERCHUNK_SET_TYPE(pPrev, MMHYPERCHUNK_FLAGS_USED);
437 pPrev->offNext = offAlign;
438 Log3(("mmHyperAllocChunk: Created new first node of %d bytes\n", offAlign));
439
440 }
441 Log3(("mmHyperAllocChunk: cbFree %d -> %d (%d)\n", pHeap->cbFree, pHeap->cbFree - offAlign, -(int)offAlign));
442 pHeap->cbFree -= offAlign;
443
444 /* Recreate pFree node and adjusting everything... */
445 pFree = (PMMHYPERCHUNKFREE)((char *)pFree + offAlign);
446 *pFree = Free;
447
448 pFree->cb -= offAlign;
449 if (pFree->core.offNext)
450 {
451 pFree->core.offNext -= offAlign;
452 PMMHYPERCHUNK pNext = (PMMHYPERCHUNK)((char *)pFree + pFree->core.offNext);
453 MMHYPERCHUNK_SET_OFFPREV(pNext, -(int32_t)pFree->core.offNext);
454 ASSERT_CHUNK(pHeap, pNext);
455 }
456 if (MMHYPERCHUNK_GET_OFFPREV(&pFree->core))
457 MMHYPERCHUNK_SET_OFFPREV(&pFree->core, MMHYPERCHUNK_GET_OFFPREV(&pFree->core) - offAlign);
458
459 if (pFree->offNext)
460 {
461 pFree->offNext -= offAlign;
462 PMMHYPERCHUNKFREE pNext = (PMMHYPERCHUNKFREE)((char *)pFree + pFree->offNext);
463 pNext->offPrev = -(int32_t)pFree->offNext;
464 ASSERT_CHUNK_FREE(pHeap, pNext);
465 }
466 else
467 pHeap->offFreeTail += offAlign;
468 if (pFree->offPrev)
469 {
470 pFree->offPrev -= offAlign;
471 PMMHYPERCHUNKFREE pPrev = (PMMHYPERCHUNKFREE)((char *)pFree + pFree->offPrev);
472 pPrev->offNext = -pFree->offPrev;
473 ASSERT_CHUNK_FREE(pHeap, pPrev);
474 }
475 else
476 pHeap->offFreeHead += offAlign;
477 pFree->core.offHeap = (uintptr_t)pHeap - (uintptr_t)pFree;
478 pFree->core.offStat = 0;
479 ASSERT_CHUNK_FREE(pHeap, pFree);
480 Log3(("mmHyperAllocChunk: Realigned pFree=%p\n", pFree));
481 }
482
483 /*
484 * Split off a new FREE chunk?
485 */
486 if (pFree->cb >= cb + RT_ALIGN(sizeof(MMHYPERCHUNKFREE), MMHYPER_HEAP_ALIGN_MIN))
487 {
488 /*
489 * Move the FREE chunk up to make room for the new USED chunk.
490 */
491 const int off = cb + sizeof(MMHYPERCHUNK);
492 PMMHYPERCHUNKFREE pNew = (PMMHYPERCHUNKFREE)((char *)&pFree->core + off);
493 *pNew = *pFree;
494 pNew->cb -= off;
495 if (pNew->core.offNext)
496 {
497 pNew->core.offNext -= off;
498 PMMHYPERCHUNK pNext = (PMMHYPERCHUNK)((char *)pNew + pNew->core.offNext);
499 MMHYPERCHUNK_SET_OFFPREV(pNext, -(int32_t)pNew->core.offNext);
500 ASSERT_CHUNK(pHeap, pNext);
501 }
502 pNew->core.offPrev = -off;
503 MMHYPERCHUNK_SET_TYPE(pNew, MMHYPERCHUNK_FLAGS_FREE);
504
505 if (pNew->offNext)
506 {
507 pNew->offNext -= off;
508 PMMHYPERCHUNKFREE pNext = (PMMHYPERCHUNKFREE)((char *)pNew + pNew->offNext);
509 pNext->offPrev = -(int32_t)pNew->offNext;
510 ASSERT_CHUNK_FREE(pHeap, pNext);
511 }
512 else
513 pHeap->offFreeTail += off;
514 if (pNew->offPrev)
515 {
516 pNew->offPrev -= off;
517 PMMHYPERCHUNKFREE pPrev = (PMMHYPERCHUNKFREE)((char *)pNew + pNew->offPrev);
518 pPrev->offNext = -pNew->offPrev;
519 ASSERT_CHUNK_FREE(pHeap, pPrev);
520 }
521 else
522 pHeap->offFreeHead += off;
523 pNew->core.offHeap = (uintptr_t)pHeap - (uintptr_t)pNew;
524 pNew->core.offStat = 0;
525 ASSERT_CHUNK_FREE(pHeap, pNew);
526
527 /*
528 * Update the old FREE node making it a USED node.
529 */
530 pFree->core.offNext = off;
531 MMHYPERCHUNK_SET_TYPE(&pFree->core, MMHYPERCHUNK_FLAGS_USED);
532
533
534 Log3(("mmHyperAllocChunk: cbFree %d -> %d (%d)\n", pHeap->cbFree,
535 pHeap->cbFree - (cb + sizeof(MMHYPERCHUNK)), -(int)(cb + sizeof(MMHYPERCHUNK))));
536 pHeap->cbFree -= (uint32_t)(cb + sizeof(MMHYPERCHUNK));
537 pRet = &pFree->core;
538 ASSERT_CHUNK(pHeap, &pFree->core);
539 Log3(("mmHyperAllocChunk: Created free chunk pNew=%p cb=%d\n", pNew, pNew->cb));
540 }
541 else
542 {
543 /*
544 * Link out of free list.
545 */
546 if (pFree->offNext)
547 {
548 PMMHYPERCHUNKFREE pNext = (PMMHYPERCHUNKFREE)((char *)pFree + pFree->offNext);
549 if (pFree->offPrev)
550 {
551 pNext->offPrev += pFree->offPrev;
552 PMMHYPERCHUNKFREE pPrev = (PMMHYPERCHUNKFREE)((char *)pFree + pFree->offPrev);
553 pPrev->offNext += pFree->offNext;
554 ASSERT_CHUNK_FREE(pHeap, pPrev);
555 }
556 else
557 {
558 pHeap->offFreeHead += pFree->offNext;
559 pNext->offPrev = 0;
560 }
561 ASSERT_CHUNK_FREE(pHeap, pNext);
562 }
563 else
564 {
565 if (pFree->offPrev)
566 {
567 pHeap->offFreeTail += pFree->offPrev;
568 PMMHYPERCHUNKFREE pPrev = (PMMHYPERCHUNKFREE)((char *)pFree + pFree->offPrev);
569 pPrev->offNext = 0;
570 ASSERT_CHUNK_FREE(pHeap, pPrev);
571 }
572 else
573 {
574 pHeap->offFreeHead = NIL_OFFSET;
575 pHeap->offFreeTail = NIL_OFFSET;
576 }
577 }
578
579 Log3(("mmHyperAllocChunk: cbFree %d -> %d (%d)\n", pHeap->cbFree,
580 pHeap->cbFree - pFree->cb, -(int32_t)pFree->cb));
581 pHeap->cbFree -= pFree->cb;
582 MMHYPERCHUNK_SET_TYPE(&pFree->core, MMHYPERCHUNK_FLAGS_USED);
583 pRet = &pFree->core;
584 ASSERT_CHUNK(pHeap, &pFree->core);
585 Log3(("mmHyperAllocChunk: Converted free chunk %p to used chunk.\n", pFree));
586 }
587 Log3(("mmHyperAllocChunk: Returning %p\n", pRet));
588 break;
589 }
590 }
591
592 /* next */
593 pFree = pFree->offNext ? (PMMHYPERCHUNKFREE)((char *)pFree + pFree->offNext) : NULL;
594 }
595
596#ifdef MMHYPER_HEAP_STRICT_FENCE
597 uint32_t *pu32End = (uint32_t *)((uint8_t *)(pRet + 1) + cb);
598 uint32_t *pu32EndReal = pRet->offNext
599 ? (uint32_t *)((uint8_t *)pRet + pRet->offNext)
600 : (uint32_t *)(pHeap->CTX_SUFF(pbHeap) + pHeap->cbHeap);
601 cbFence += (uintptr_t)pu32EndReal - (uintptr_t)pu32End; Assert(!(cbFence & 0x3));
602 ASMMemFill32((uint8_t *)pu32EndReal - cbFence, cbFence, MMHYPER_HEAP_STRICT_FENCE_U32);
603 pu32EndReal[-1] = cbFence;
604#endif
605#ifdef MMHYPER_HEAP_STRICT
606 mmHyperHeapCheck(pHeap);
607#endif
608 return pRet;
609}
610
611
612/**
613 * Allocates one or more pages of memory from the specified heap.
614 * The caller validates the parameters of this request.
615 *
616 * @returns Pointer to the allocated chunk.
617 * @returns NULL on failure.
618 * @param pHeap The heap.
619 * @param cb Size of the memory block to allocate.
620 * @internal
621 */
622static void *mmHyperAllocPages(PMMHYPERHEAP pHeap, uint32_t cb)
623{
624 Log3(("mmHyperAllocPages: Enter cb=%#x\n", cb));
625
626#ifdef MMHYPER_HEAP_STRICT
627 mmHyperHeapCheck(pHeap);
628#endif
629
630 /*
631 * Check if there are any free chunks. (NIL_OFFSET use/not-use forces this check)
632 */
633 if (pHeap->offFreeHead == NIL_OFFSET)
634 return NULL;
635
636 /*
637 * Page aligned chunks.
638 *
639 * Page aligned chunks can only be allocated from the last FREE chunk.
640 * This is for reasons of simplicity and fragmentation. Page aligned memory
641 * must also be allocated in page aligned sizes. Page aligned memory cannot
642 * be freed either.
643 *
644 * So, for this to work, the last FREE chunk needs to end on a page aligned
645 * boundary.
646 */
647 PMMHYPERCHUNKFREE pFree = (PMMHYPERCHUNKFREE)((char *)pHeap->CTX_SUFF(pbHeap) + pHeap->offFreeTail);
648 ASSERT_CHUNK_FREE(pHeap, pFree);
649 if ( (((uintptr_t)(&pFree->core + 1) + pFree->cb) & (PAGE_OFFSET_MASK - 1))
650 || pFree->cb + sizeof(MMHYPERCHUNK) < cb)
651 {
652 Log3(("mmHyperAllocPages: Not enough/no page aligned memory!\n"));
653 return NULL;
654 }
655
656 void *pvRet;
657 if (pFree->cb > cb)
658 {
659 /*
660 * Simple, just cut the top of the free node and return it.
661 */
662 pFree->cb -= cb;
663 pvRet = (char *)(&pFree->core + 1) + pFree->cb;
664 AssertMsg(RT_ALIGN_P(pvRet, PAGE_SIZE) == pvRet, ("pvRet=%p cb=%#x pFree=%p pFree->cb=%#x\n", pvRet, cb, pFree, pFree->cb));
665 Log3(("mmHyperAllocPages: cbFree %d -> %d (%d)\n", pHeap->cbFree, pHeap->cbFree - cb, -(int)cb));
666 pHeap->cbFree -= cb;
667 ASSERT_CHUNK_FREE(pHeap, pFree);
668 Log3(("mmHyperAllocPages: Allocated from pFree=%p new pFree->cb=%d\n", pFree, pFree->cb));
669 }
670 else
671 {
672 /*
673 * Unlink the FREE node.
674 */
675 pvRet = (char *)(&pFree->core + 1) + pFree->cb - cb;
676 Log3(("mmHyperAllocPages: cbFree %d -> %d (%d)\n", pHeap->cbFree, pHeap->cbFree - pFree->cb, -(int32_t)pFree->cb));
677 pHeap->cbFree -= pFree->cb;
678
679 /* a scrap of spare memory (unlikely)? add it to the sprevious chunk. */
680 if (pvRet != (void *)pFree)
681 {
682 AssertMsg(MMHYPERCHUNK_GET_OFFPREV(&pFree->core), ("How the *beep* did someone manage to allocated up all the heap with page aligned memory?!?\n"));
683 PMMHYPERCHUNK pPrev = (PMMHYPERCHUNK)((char *)pFree + MMHYPERCHUNK_GET_OFFPREV(&pFree->core));
684 pPrev->offNext += (uintptr_t)pvRet - (uintptr_t)pFree;
685 AssertMsg(!MMHYPERCHUNK_ISFREE(pPrev), ("Free bug?\n"));
686#ifdef VBOX_WITH_STATISTICS
687 PMMHYPERSTAT pStat = (PMMHYPERSTAT)((uintptr_t)pPrev + pPrev->offStat);
688 pStat->cbAllocated += (uintptr_t)pvRet - (uintptr_t)pFree;
689 pStat->cbCurAllocated += (uintptr_t)pvRet - (uintptr_t)pFree;
690#endif
691 Log3(("mmHyperAllocPages: Added %d to %p (page align)\n", (uintptr_t)pvRet - (uintptr_t)pFree, pFree));
692 }
693
694 /* unlink from FREE chain. */
695 if (pFree->offPrev)
696 {
697 pHeap->offFreeTail += pFree->offPrev;
698 ((PMMHYPERCHUNKFREE)((char *)pFree + pFree->offPrev))->offNext = 0;
699 }
700 else
701 {
702 pHeap->offFreeTail = NIL_OFFSET;
703 pHeap->offFreeHead = NIL_OFFSET;
704 }
705 Log3(("mmHyperAllocPages: Unlinked pFree=%d\n", pFree));
706 }
707 pHeap->offPageAligned = (uintptr_t)pvRet - (uintptr_t)pHeap->CTX_SUFF(pbHeap);
708 Log3(("mmHyperAllocPages: Returning %p (page aligned)\n", pvRet));
709
710#ifdef MMHYPER_HEAP_STRICT
711 mmHyperHeapCheck(pHeap);
712#endif
713 return pvRet;
714}
715
716#ifdef VBOX_WITH_STATISTICS
717
718/**
719 * Get the statistic record for a tag.
720 *
721 * @returns Pointer to a stat record.
722 * @returns NULL on failure.
723 * @param pHeap The heap.
724 * @param enmTag The tag.
725 */
726static PMMHYPERSTAT mmHyperStat(PMMHYPERHEAP pHeap, MMTAG enmTag)
727{
728 /* try look it up first. */
729 PMMHYPERSTAT pStat = (PMMHYPERSTAT)RTAvloGCPhysGet(&pHeap->HyperHeapStatTree, enmTag);
730 if (!pStat)
731 {
732 /* try allocate a new one */
733 PMMHYPERCHUNK pChunk = mmHyperAllocChunk(pHeap, RT_ALIGN(sizeof(*pStat), MMHYPER_HEAP_ALIGN_MIN), MMHYPER_HEAP_ALIGN_MIN);
734 if (!pChunk)
735 return NULL;
736 pStat = (PMMHYPERSTAT)(pChunk + 1);
737 pChunk->offStat = (uintptr_t)pStat - (uintptr_t)pChunk;
738
739 ASMMemZero32(pStat, sizeof(*pStat));
740 pStat->Core.Key = enmTag;
741 RTAvloGCPhysInsert(&pHeap->HyperHeapStatTree, &pStat->Core);
742 }
743 if (!pStat->fRegistered)
744 {
745# ifdef IN_RING3
746 mmR3HyperStatRegisterOne(pHeap->pVMR3, pStat);
747# else
748 /** @todo schedule a R3 action. */
749# endif
750 }
751 return pStat;
752}
753
754
755# ifdef IN_RING3
756/**
757 * Registers statistics with STAM.
758 *
759 */
760static void mmR3HyperStatRegisterOne(PVM pVM, PMMHYPERSTAT pStat)
761{
762 if (pStat->fRegistered)
763 return;
764 const char *pszTag = mmGetTagName((MMTAG)pStat->Core.Key);
765 STAMR3RegisterF(pVM, &pStat->cbCurAllocated, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Number of bytes currently allocated.", "/MM/HyperHeap/%s", pszTag);
766 STAMR3RegisterF(pVM, &pStat->cAllocations, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Number of alloc calls.", "/MM/HyperHeap/%s/cAllocations", pszTag);
767 STAMR3RegisterF(pVM, &pStat->cFrees, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Number of free calls.", "/MM/HyperHeap/%s/cFrees", pszTag);
768 STAMR3RegisterF(pVM, &pStat->cFailures, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Number of failures.", "/MM/HyperHeap/%s/cFailures", pszTag);
769 STAMR3RegisterF(pVM, &pStat->cbAllocated, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Total number of allocated bytes.", "/MM/HyperHeap/%s/cbAllocated", pszTag);
770 STAMR3RegisterF(pVM, &pStat->cbFreed, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Total number of freed bytes.", "/MM/HyperHeap/%s/cbFreed", pszTag);
771 STAMR3RegisterF(pVM, &pStat->cbMaxAllocated, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Max number of bytes allocated at the same time.","/MM/HyperHeap/%s/cbMaxAllocated", pszTag);
772 pStat->fRegistered = true;
773}
774# endif /* IN_RING3 */
775
776#endif /* VBOX_WITH_STATISTICS */
777
778
779/**
780 * Free memory allocated using MMHyperAlloc().
781 * The caller validates the parameters of this request.
782 *
783 * @returns VBox status code.
784 * @param pVM The cross context VM structure.
785 * @param pv The memory to free.
786 * @remark Try avoid free hyper memory.
787 */
788static int mmHyperFreeInternal(PVM pVM, void *pv)
789{
790 Log2(("MMHyperFree: pv=%p\n", pv));
791 if (!pv)
792 return VINF_SUCCESS;
793 AssertMsgReturn(RT_ALIGN_P(pv, MMHYPER_HEAP_ALIGN_MIN) == pv,
794 ("Invalid pointer %p!\n", pv),
795 VERR_INVALID_POINTER);
796
797 /*
798 * Get the heap and stats.
799 * Validate the chunk at the same time.
800 */
801 PMMHYPERCHUNK pChunk = (PMMHYPERCHUNK)((PMMHYPERCHUNK)pv - 1);
802
803 AssertMsgReturn( (uintptr_t)pChunk + pChunk->offNext >= (uintptr_t)pChunk
804 || RT_ALIGN_32(pChunk->offNext, MMHYPER_HEAP_ALIGN_MIN) != pChunk->offNext,
805 ("%p: offNext=%#RX32\n", pv, pChunk->offNext),
806 VERR_INVALID_POINTER);
807
808 AssertMsgReturn(MMHYPERCHUNK_ISUSED(pChunk),
809 ("%p: Not used!\n", pv),
810 VERR_INVALID_POINTER);
811
812 int32_t offPrev = MMHYPERCHUNK_GET_OFFPREV(pChunk);
813 AssertMsgReturn( (uintptr_t)pChunk + offPrev <= (uintptr_t)pChunk
814 && !((uint32_t)-offPrev & (MMHYPER_HEAP_ALIGN_MIN - 1)),
815 ("%p: offPrev=%#RX32!\n", pv, offPrev),
816 VERR_INVALID_POINTER);
817
818 /* statistics */
819#ifdef VBOX_WITH_STATISTICS
820 PMMHYPERSTAT pStat = (PMMHYPERSTAT)((uintptr_t)pChunk + pChunk->offStat);
821 AssertMsgReturn( RT_ALIGN_P(pStat, MMHYPER_HEAP_ALIGN_MIN) == (void *)pStat
822 && pChunk->offStat,
823 ("%p: offStat=%#RX32!\n", pv, pChunk->offStat),
824 VERR_INVALID_POINTER);
825#else
826 AssertMsgReturn(!pChunk->offStat,
827 ("%p: offStat=%#RX32!\n", pv, pChunk->offStat),
828 VERR_INVALID_POINTER);
829#endif
830
831 /* The heap structure. */
832 PMMHYPERHEAP pHeap = (PMMHYPERHEAP)((uintptr_t)pChunk + pChunk->offHeap);
833 AssertMsgReturn( !((uintptr_t)pHeap & PAGE_OFFSET_MASK)
834 && pChunk->offHeap,
835 ("%p: pHeap=%#x offHeap=%RX32\n", pv, pHeap->u32Magic, pChunk->offHeap),
836 VERR_INVALID_POINTER);
837
838 AssertMsgReturn(pHeap->u32Magic == MMHYPERHEAP_MAGIC,
839 ("%p: u32Magic=%#x\n", pv, pHeap->u32Magic),
840 VERR_INVALID_POINTER);
841 Assert(pHeap == pVM->mm.s.CTX_SUFF(pHyperHeap)); NOREF(pVM);
842
843 /* Some more verifications using additional info from pHeap. */
844 AssertMsgReturn((uintptr_t)pChunk + offPrev >= (uintptr_t)pHeap->CTX_SUFF(pbHeap),
845 ("%p: offPrev=%#RX32!\n", pv, offPrev),
846 VERR_INVALID_POINTER);
847
848 AssertMsgReturn(pChunk->offNext < pHeap->cbHeap,
849 ("%p: offNext=%#RX32!\n", pv, pChunk->offNext),
850 VERR_INVALID_POINTER);
851
852 AssertMsgReturn( (uintptr_t)pv - (uintptr_t)pHeap->CTX_SUFF(pbHeap) <= pHeap->offPageAligned,
853 ("Invalid pointer %p! (heap: %p-%p)\n", pv, pHeap->CTX_SUFF(pbHeap),
854 (char *)pHeap->CTX_SUFF(pbHeap) + pHeap->offPageAligned),
855 VERR_INVALID_POINTER);
856
857#ifdef MMHYPER_HEAP_STRICT
858 mmHyperHeapCheck(pHeap);
859#endif
860
861#if defined(VBOX_WITH_STATISTICS) || defined(MMHYPER_HEAP_FREE_POISON)
862 /* calc block size. */
863 const uint32_t cbChunk = pChunk->offNext
864 ? pChunk->offNext
865 : pHeap->CTX_SUFF(pbHeap) + pHeap->offPageAligned - (uint8_t *)pChunk;
866#endif
867#ifdef MMHYPER_HEAP_FREE_POISON
868 /* poison the block */
869 memset(pChunk + 1, MMHYPER_HEAP_FREE_POISON, cbChunk - sizeof(*pChunk));
870#endif
871
872#ifdef MMHYPER_HEAP_FREE_DELAY
873# ifdef MMHYPER_HEAP_FREE_POISON
874 /*
875 * Check poison.
876 */
877 unsigned i = RT_ELEMENTS(pHeap->aDelayedFrees);
878 while (i-- > 0)
879 if (pHeap->aDelayedFrees[i].offChunk)
880 {
881 PMMHYPERCHUNK pCur = (PMMHYPERCHUNK)((uintptr_t)pHeap + pHeap->aDelayedFrees[i].offChunk);
882 const size_t cb = pCur->offNext
883 ? pCur->offNext - sizeof(*pCur)
884 : pHeap->CTX_SUFF(pbHeap) + pHeap->offPageAligned - (uint8_t *)pCur - sizeof(*pCur);
885 uint8_t *pab = (uint8_t *)(pCur + 1);
886 for (unsigned off = 0; off < cb; off++)
887 AssertReleaseMsg(pab[off] == 0xCB,
888 ("caller=%RTptr cb=%#zx off=%#x: %.*Rhxs\n",
889 pHeap->aDelayedFrees[i].uCaller, cb, off, RT_MIN(cb - off, 32), &pab[off]));
890 }
891# endif /* MMHYPER_HEAP_FREE_POISON */
892
893 /*
894 * Delayed freeing.
895 */
896 int rc = VINF_SUCCESS;
897 if (pHeap->aDelayedFrees[pHeap->iDelayedFree].offChunk)
898 {
899 PMMHYPERCHUNK pChunkFree = (PMMHYPERCHUNK)((uintptr_t)pHeap + pHeap->aDelayedFrees[pHeap->iDelayedFree].offChunk);
900 rc = mmHyperFree(pHeap, pChunkFree);
901 }
902 pHeap->aDelayedFrees[pHeap->iDelayedFree].offChunk = (uintptr_t)pChunk - (uintptr_t)pHeap;
903 pHeap->aDelayedFrees[pHeap->iDelayedFree].uCaller = (uintptr_t)ASMReturnAddress();
904 pHeap->iDelayedFree = (pHeap->iDelayedFree + 1) % RT_ELEMENTS(pHeap->aDelayedFrees);
905
906#else /* !MMHYPER_HEAP_FREE_POISON */
907 /*
908 * Call the worker.
909 */
910 int rc = mmHyperFree(pHeap, pChunk);
911#endif /* !MMHYPER_HEAP_FREE_POISON */
912
913 /*
914 * Update statistics.
915 */
916#ifdef VBOX_WITH_STATISTICS
917 pStat->cFrees++;
918 if (RT_SUCCESS(rc))
919 {
920 pStat->cbFreed += cbChunk;
921 pStat->cbCurAllocated -= cbChunk;
922 }
923 else
924 pStat->cFailures++;
925#endif
926
927 return rc;
928}
929
930
931/**
932 * Wrapper for mmHyperFreeInternal
933 */
934VMMDECL(int) MMHyperFree(PVM pVM, void *pv)
935{
936 int rc;
937
938 rc = mmHyperLock(pVM);
939 AssertRCReturn(rc, rc);
940
941 LogFlow(("MMHyperFree %p\n", pv));
942
943 rc = mmHyperFreeInternal(pVM, pv);
944
945 mmHyperUnlock(pVM);
946 return rc;
947}
948
949
950/**
951 * Free memory a memory chunk.
952 *
953 * @returns VBox status code.
954 * @param pHeap The heap.
955 * @param pChunk The memory chunk to free.
956 */
957static int mmHyperFree(PMMHYPERHEAP pHeap, PMMHYPERCHUNK pChunk)
958{
959 Log3(("mmHyperFree: Enter pHeap=%p pChunk=%p\n", pHeap, pChunk));
960 PMMHYPERCHUNKFREE pFree = (PMMHYPERCHUNKFREE)pChunk;
961
962 /*
963 * Insert into the free list (which is sorted on address).
964 *
965 * We'll search towards the end of the heap to locate the
966 * closest FREE chunk.
967 */
968 PMMHYPERCHUNKFREE pLeft = NULL;
969 PMMHYPERCHUNKFREE pRight = NULL;
970 if (pHeap->offFreeTail != NIL_OFFSET)
971 {
972 if (pFree->core.offNext)
973 {
974 pRight = (PMMHYPERCHUNKFREE)((char *)pFree + pFree->core.offNext);
975 ASSERT_CHUNK(pHeap, &pRight->core);
976 while (!MMHYPERCHUNK_ISFREE(&pRight->core))
977 {
978 if (!pRight->core.offNext)
979 {
980 pRight = NULL;
981 break;
982 }
983 pRight = (PMMHYPERCHUNKFREE)((char *)pRight + pRight->core.offNext);
984 ASSERT_CHUNK(pHeap, &pRight->core);
985 }
986 }
987 if (!pRight)
988 pRight = (PMMHYPERCHUNKFREE)((char *)pHeap->CTX_SUFF(pbHeap) + pHeap->offFreeTail); /** @todo this can't be correct! 'pLeft = .. ; else' I think */
989 if (pRight)
990 {
991 ASSERT_CHUNK_FREE(pHeap, pRight);
992 if (pRight->offPrev)
993 {
994 pLeft = (PMMHYPERCHUNKFREE)((char *)pRight + pRight->offPrev);
995 ASSERT_CHUNK_FREE(pHeap, pLeft);
996 }
997 }
998 }
999 if (pLeft == pFree)
1000 {
1001 AssertMsgFailed(("Freed twice! pv=%p (pChunk=%p)\n", pChunk + 1, pChunk));
1002 return VERR_INVALID_POINTER;
1003 }
1004 pChunk->offStat = 0;
1005
1006 /*
1007 * Head free chunk list?
1008 */
1009 if (!pLeft)
1010 {
1011 MMHYPERCHUNK_SET_TYPE(&pFree->core, MMHYPERCHUNK_FLAGS_FREE);
1012 pFree->offPrev = 0;
1013 pHeap->offFreeHead = (uintptr_t)pFree - (uintptr_t)pHeap->CTX_SUFF(pbHeap);
1014 if (pRight)
1015 {
1016 pFree->offNext = (uintptr_t)pRight - (uintptr_t)pFree;
1017 pRight->offPrev = -(int32_t)pFree->offNext;
1018 }
1019 else
1020 {
1021 pFree->offNext = 0;
1022 pHeap->offFreeTail = pHeap->offFreeHead;
1023 }
1024 Log3(("mmHyperFree: Inserted %p at head of free chain.\n", pFree));
1025 }
1026 else
1027 {
1028 /*
1029 * Can we merge with left hand free chunk?
1030 */
1031 if ((char *)pLeft + pLeft->core.offNext == (char *)pFree)
1032 {
1033 if (pFree->core.offNext)
1034 {
1035 pLeft->core.offNext = pLeft->core.offNext + pFree->core.offNext;
1036 MMHYPERCHUNK_SET_OFFPREV(((PMMHYPERCHUNK)((char *)pLeft + pLeft->core.offNext)), -(int32_t)pLeft->core.offNext);
1037 }
1038 else
1039 pLeft->core.offNext = 0;
1040 pFree = pLeft;
1041 Log3(("mmHyperFree: cbFree %d -> %d (%d)\n", pHeap->cbFree, pHeap->cbFree - pLeft->cb, -(int32_t)pLeft->cb));
1042 pHeap->cbFree -= pLeft->cb;
1043 Log3(("mmHyperFree: Merging %p into %p (cb=%d).\n", pFree, pLeft, pLeft->cb));
1044 }
1045 /*
1046 * No, just link it into the free list then.
1047 */
1048 else
1049 {
1050 MMHYPERCHUNK_SET_TYPE(&pFree->core, MMHYPERCHUNK_FLAGS_FREE);
1051 pFree->offPrev = (uintptr_t)pLeft - (uintptr_t)pFree;
1052 pLeft->offNext = -pFree->offPrev;
1053 if (pRight)
1054 {
1055 pFree->offNext = (uintptr_t)pRight - (uintptr_t)pFree;
1056 pRight->offPrev = -(int32_t)pFree->offNext;
1057 }
1058 else
1059 {
1060 pFree->offNext = 0;
1061 pHeap->offFreeTail = (uintptr_t)pFree - (uintptr_t)pHeap->CTX_SUFF(pbHeap);
1062 }
1063 Log3(("mmHyperFree: Inserted %p after %p in free list.\n", pFree, pLeft));
1064 }
1065 }
1066
1067 /*
1068 * Can we merge with right hand free chunk?
1069 */
1070 if (pRight && (char *)pRight == (char *)pFree + pFree->core.offNext)
1071 {
1072 /* core */
1073 if (pRight->core.offNext)
1074 {
1075 pFree->core.offNext += pRight->core.offNext;
1076 PMMHYPERCHUNK pNext = (PMMHYPERCHUNK)((char *)pFree + pFree->core.offNext);
1077 MMHYPERCHUNK_SET_OFFPREV(pNext, -(int32_t)pFree->core.offNext);
1078 ASSERT_CHUNK(pHeap, pNext);
1079 }
1080 else
1081 pFree->core.offNext = 0;
1082
1083 /* free */
1084 if (pRight->offNext)
1085 {
1086 pFree->offNext += pRight->offNext;
1087 ((PMMHYPERCHUNKFREE)((char *)pFree + pFree->offNext))->offPrev = -(int32_t)pFree->offNext;
1088 }
1089 else
1090 {
1091 pFree->offNext = 0;
1092 pHeap->offFreeTail = (uintptr_t)pFree - (uintptr_t)pHeap->CTX_SUFF(pbHeap);
1093 }
1094 Log3(("mmHyperFree: cbFree %d -> %d (%d)\n", pHeap->cbFree, pHeap->cbFree - pRight->cb, -(int32_t)pRight->cb));
1095 pHeap->cbFree -= pRight->cb;
1096 Log3(("mmHyperFree: Merged %p (cb=%d) into %p.\n", pRight, pRight->cb, pFree));
1097 }
1098
1099 /* calculate the size. */
1100 if (pFree->core.offNext)
1101 pFree->cb = pFree->core.offNext - sizeof(MMHYPERCHUNK);
1102 else
1103 pFree->cb = pHeap->offPageAligned - ((uintptr_t)pFree - (uintptr_t)pHeap->CTX_SUFF(pbHeap)) - sizeof(MMHYPERCHUNK);
1104 Log3(("mmHyperFree: cbFree %d -> %d (%d)\n", pHeap->cbFree, pHeap->cbFree + pFree->cb, pFree->cb));
1105 pHeap->cbFree += pFree->cb;
1106 ASSERT_CHUNK_FREE(pHeap, pFree);
1107
1108#ifdef MMHYPER_HEAP_STRICT
1109 mmHyperHeapCheck(pHeap);
1110#endif
1111 return VINF_SUCCESS;
1112}
1113
1114
1115#if defined(DEBUG) || defined(MMHYPER_HEAP_STRICT_FENCE)
1116/**
1117 * Dumps a heap chunk to the log.
1118 *
1119 * @param pHeap Pointer to the heap.
1120 * @param pCur Pointer to the chunk.
1121 */
1122static void mmHyperHeapDumpOne(PMMHYPERHEAP pHeap, PMMHYPERCHUNKFREE pCur)
1123{
1124 if (MMHYPERCHUNK_ISUSED(&pCur->core))
1125 {
1126 if (pCur->core.offStat)
1127 {
1128 PMMHYPERSTAT pStat = (PMMHYPERSTAT)((uintptr_t)pCur + pCur->core.offStat);
1129 const char *pszSelf = pCur->core.offStat == sizeof(MMHYPERCHUNK) ? " stat record" : "";
1130#ifdef IN_RING3
1131 Log(("%p %06x USED offNext=%06x offPrev=-%06x %s%s\n",
1132 pCur, (uintptr_t)pCur - (uintptr_t)pHeap->CTX_SUFF(pbHeap),
1133 pCur->core.offNext, -MMHYPERCHUNK_GET_OFFPREV(&pCur->core),
1134 mmGetTagName((MMTAG)pStat->Core.Key), pszSelf));
1135#else
1136 Log(("%p %06x USED offNext=%06x offPrev=-%06x %d%s\n",
1137 pCur, (uintptr_t)pCur - (uintptr_t)pHeap->CTX_SUFF(pbHeap),
1138 pCur->core.offNext, -MMHYPERCHUNK_GET_OFFPREV(&pCur->core),
1139 (MMTAG)pStat->Core.Key, pszSelf));
1140#endif
1141 NOREF(pStat); NOREF(pszSelf);
1142 }
1143 else
1144 Log(("%p %06x USED offNext=%06x offPrev=-%06x\n",
1145 pCur, (uintptr_t)pCur - (uintptr_t)pHeap->CTX_SUFF(pbHeap),
1146 pCur->core.offNext, -MMHYPERCHUNK_GET_OFFPREV(&pCur->core)));
1147 }
1148 else
1149 Log(("%p %06x FREE offNext=%06x offPrev=-%06x : cb=%06x offNext=%06x offPrev=-%06x\n",
1150 pCur, (uintptr_t)pCur - (uintptr_t)pHeap->CTX_SUFF(pbHeap),
1151 pCur->core.offNext, -MMHYPERCHUNK_GET_OFFPREV(&pCur->core), pCur->cb, pCur->offNext, pCur->offPrev));
1152}
1153#endif /* DEBUG || MMHYPER_HEAP_STRICT */
1154
1155
1156#ifdef MMHYPER_HEAP_STRICT
1157/**
1158 * Internal consistency check.
1159 */
1160static void mmHyperHeapCheck(PMMHYPERHEAP pHeap)
1161{
1162 PMMHYPERCHUNKFREE pPrev = NULL;
1163 PMMHYPERCHUNKFREE pCur = (PMMHYPERCHUNKFREE)pHeap->CTX_SUFF(pbHeap);
1164 for (;;)
1165 {
1166 if (MMHYPERCHUNK_ISUSED(&pCur->core))
1167 ASSERT_CHUNK_USED(pHeap, &pCur->core);
1168 else
1169 ASSERT_CHUNK_FREE(pHeap, pCur);
1170 if (pPrev)
1171 AssertMsg((int32_t)pPrev->core.offNext == -MMHYPERCHUNK_GET_OFFPREV(&pCur->core),
1172 ("pPrev->core.offNext=%d offPrev=%d\n", pPrev->core.offNext, MMHYPERCHUNK_GET_OFFPREV(&pCur->core)));
1173
1174# ifdef MMHYPER_HEAP_STRICT_FENCE
1175 uint32_t off = (uint8_t *)pCur - pHeap->CTX_SUFF(pbHeap);
1176 if ( MMHYPERCHUNK_ISUSED(&pCur->core)
1177 && off < pHeap->offPageAligned)
1178 {
1179 uint32_t cbCur = pCur->core.offNext
1180 ? pCur->core.offNext
1181 : pHeap->cbHeap - off;
1182 uint32_t *pu32End = ((uint32_t *)((uint8_t *)pCur + cbCur));
1183 uint32_t cbFence = pu32End[-1];
1184 if (RT_UNLIKELY( cbFence >= cbCur - sizeof(*pCur)
1185 || cbFence < MMHYPER_HEAP_STRICT_FENCE_SIZE))
1186 {
1187 mmHyperHeapDumpOne(pHeap, pCur);
1188 Assert(cbFence < cbCur - sizeof(*pCur));
1189 Assert(cbFence >= MMHYPER_HEAP_STRICT_FENCE_SIZE);
1190 }
1191
1192 uint32_t *pu32Bad = ASMMemFirstMismatchingU32((uint8_t *)pu32End - cbFence, cbFence - sizeof(uint32_t), MMHYPER_HEAP_STRICT_FENCE_U32);
1193 if (RT_UNLIKELY(pu32Bad))
1194 {
1195 mmHyperHeapDumpOne(pHeap, pCur);
1196 Assert(!pu32Bad);
1197 }
1198 }
1199# endif
1200
1201 /* next */
1202 if (!pCur->core.offNext)
1203 break;
1204 pPrev = pCur;
1205 pCur = (PMMHYPERCHUNKFREE)((char *)pCur + pCur->core.offNext);
1206 }
1207}
1208#endif
1209
1210
1211/**
1212 * Performs consistency checks on the heap if MMHYPER_HEAP_STRICT was
1213 * defined at build time.
1214 *
1215 * @param pVM The cross context VM structure.
1216 */
1217VMMDECL(void) MMHyperHeapCheck(PVM pVM)
1218{
1219#ifdef MMHYPER_HEAP_STRICT
1220 int rc;
1221
1222 rc = mmHyperLock(pVM);
1223 AssertRC(rc);
1224 mmHyperHeapCheck(pVM->mm.s.CTX_SUFF(pHyperHeap));
1225 mmHyperUnlock(pVM);
1226#else
1227 NOREF(pVM);
1228#endif
1229}
1230
1231
1232#ifdef DEBUG
1233/**
1234 * Dumps the hypervisor heap to Log.
1235 * @param pVM The cross context VM structure.
1236 */
1237VMMDECL(void) MMHyperHeapDump(PVM pVM)
1238{
1239 Log(("MMHyperHeapDump: *** heap dump - start ***\n"));
1240 PMMHYPERHEAP pHeap = pVM->mm.s.CTX_SUFF(pHyperHeap);
1241 PMMHYPERCHUNKFREE pCur = (PMMHYPERCHUNKFREE)pHeap->CTX_SUFF(pbHeap);
1242 for (;;)
1243 {
1244 mmHyperHeapDumpOne(pHeap, pCur);
1245
1246 /* next */
1247 if (!pCur->core.offNext)
1248 break;
1249 pCur = (PMMHYPERCHUNKFREE)((char *)pCur + pCur->core.offNext);
1250 }
1251 Log(("MMHyperHeapDump: *** heap dump - end ***\n"));
1252}
1253#endif
1254
1255
1256/**
1257 * Query the amount of free memory in the hypervisor heap.
1258 *
1259 * @returns Number of free bytes in the hypervisor heap.
1260 */
1261VMMDECL(size_t) MMHyperHeapGetFreeSize(PVM pVM)
1262{
1263 return pVM->mm.s.CTX_SUFF(pHyperHeap)->cbFree;
1264}
1265
1266
1267/**
1268 * Query the size the hypervisor heap.
1269 *
1270 * @returns The size of the hypervisor heap in bytes.
1271 */
1272VMMDECL(size_t) MMHyperHeapGetSize(PVM pVM)
1273{
1274 return pVM->mm.s.CTX_SUFF(pHyperHeap)->cbHeap;
1275}
1276
1277
1278/**
1279 * Converts a context neutral heap offset into a pointer.
1280 *
1281 * @returns Pointer to hyper heap data.
1282 * @param pVM The cross context VM structure.
1283 * @param offHeap The hyper heap offset.
1284 */
1285VMMDECL(void *) MMHyperHeapOffsetToPtr(PVM pVM, uint32_t offHeap)
1286{
1287 Assert(offHeap - MMYPERHEAP_HDR_SIZE <= pVM->mm.s.CTX_SUFF(pHyperHeap)->cbHeap);
1288 return (uint8_t *)pVM->mm.s.CTX_SUFF(pHyperHeap) + offHeap;
1289}
1290
1291
1292/**
1293 * Converts a context specific heap pointer into a neutral heap offset.
1294 *
1295 * @returns Heap offset.
1296 * @param pVM The cross context VM structure.
1297 * @param pv Pointer to the heap data.
1298 */
1299VMMDECL(uint32_t) MMHyperHeapPtrToOffset(PVM pVM, void *pv)
1300{
1301 size_t offHeap = (uint8_t *)pv - (uint8_t *)pVM->mm.s.CTX_SUFF(pHyperHeap);
1302 Assert(offHeap - MMYPERHEAP_HDR_SIZE <= pVM->mm.s.CTX_SUFF(pHyperHeap)->cbHeap);
1303 return (uint32_t)offHeap;
1304}
1305
1306
1307/**
1308 * Query the address and size the hypervisor memory area.
1309 *
1310 * @returns Base address of the hypervisor area.
1311 * @param pVM The cross context VM structure.
1312 * @param pcb Where to store the size of the hypervisor area. (out)
1313 */
1314VMMDECL(RTGCPTR) MMHyperGetArea(PVM pVM, size_t *pcb)
1315{
1316 if (pcb)
1317 *pcb = pVM->mm.s.cbHyperArea;
1318 return pVM->mm.s.pvHyperAreaGC;
1319}
1320
1321
1322/**
1323 * Checks if an address is within the hypervisor memory area.
1324 *
1325 * @returns true if inside.
1326 * @returns false if outside.
1327 * @param pVM The cross context VM structure.
1328 * @param GCPtr The pointer to check.
1329 */
1330VMMDECL(bool) MMHyperIsInsideArea(PVM pVM, RTGCPTR GCPtr)
1331{
1332 return (RTGCUINTPTR)GCPtr - (RTGCUINTPTR)pVM->mm.s.pvHyperAreaGC < pVM->mm.s.cbHyperArea;
1333}
1334
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette