VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/MMAllHyper.cpp@ 93554

Last change on this file since 93554 was 93554, checked in by vboxsync, 3 years ago

VMM: Changed PAGE_SIZE -> GUEST_PAGE_SIZE / HOST_PAGE_SIZE, PAGE_SHIFT -> GUEST_PAGE_SHIFT / HOST_PAGE_SHIFT, and PAGE_OFFSET_MASK -> GUEST_PAGE_OFFSET_MASK / HOST_PAGE_OFFSET_MASK. Also removed most usage of ASMMemIsZeroPage and ASMMemZeroPage since the host and guest page size doesn't need to be the same any more. Some work left to do in the page pool code. bugref:9898

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 49.5 KB
Line 
1/* $Id: MMAllHyper.cpp 93554 2022-02-02 22:57:02Z vboxsync $ */
2/** @file
3 * MM - Memory Manager - Hypervisor Memory Area, All Contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2022 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_MM_HYPER_HEAP
23#include <VBox/vmm/mm.h>
24#include <VBox/vmm/stam.h>
25#include "MMInternal.h"
26#include <VBox/vmm/vmcc.h>
27
28#include <VBox/err.h>
29#include <VBox/param.h>
30#include <iprt/assert.h>
31#include <VBox/log.h>
32#include <iprt/asm.h>
33#include <iprt/string.h>
34
35
36/*********************************************************************************************************************************
37* Defined Constants And Macros *
38*********************************************************************************************************************************/
39#define ASSERT_L(u1, u2) AssertMsg((u1) < (u2), ("u1=%#x u2=%#x\n", u1, u2))
40#define ASSERT_LE(u1, u2) AssertMsg((u1) <= (u2), ("u1=%#x u2=%#x\n", u1, u2))
41#define ASSERT_GE(u1, u2) AssertMsg((u1) >= (u2), ("u1=%#x u2=%#x\n", u1, u2))
42#define ASSERT_ALIGN(u1) AssertMsg(!((u1) & (MMHYPER_HEAP_ALIGN_MIN - 1)), ("u1=%#x (%d)\n", u1, u1))
43
44#define ASSERT_OFFPREV(pHeap, pChunk) \
45 do { Assert(MMHYPERCHUNK_GET_OFFPREV(pChunk) <= 0); \
46 Assert(MMHYPERCHUNK_GET_OFFPREV(pChunk) >= (intptr_t)(pHeap)->CTX_SUFF(pbHeap) - (intptr_t)(pChunk)); \
47 AssertMsg( MMHYPERCHUNK_GET_OFFPREV(pChunk) != 0 \
48 || (uint8_t *)(pChunk) == (pHeap)->CTX_SUFF(pbHeap), \
49 ("pChunk=%p pvHyperHeap=%p\n", (pChunk), (pHeap)->CTX_SUFF(pbHeap))); \
50 } while (0)
51
52#define ASSERT_OFFNEXT(pHeap, pChunk) \
53 do { ASSERT_ALIGN((pChunk)->offNext); \
54 ASSERT_L((pChunk)->offNext, (uintptr_t)(pHeap)->CTX_SUFF(pbHeap) + (pHeap)->offPageAligned - (uintptr_t)(pChunk)); \
55 } while (0)
56
57#define ASSERT_OFFHEAP(pHeap, pChunk) \
58 do { Assert((pChunk)->offHeap); \
59 AssertMsg((PMMHYPERHEAP)((pChunk)->offHeap + (uintptr_t)pChunk) == (pHeap), \
60 ("offHeap=%RX32 pChunk=%p pHeap=%p\n", (pChunk)->offHeap, (pChunk), (pHeap))); \
61 Assert((pHeap)->u32Magic == MMHYPERHEAP_MAGIC); \
62 } while (0)
63
64#ifdef VBOX_WITH_STATISTICS
65#define ASSERT_OFFSTAT(pHeap, pChunk) \
66 do { if (MMHYPERCHUNK_ISFREE(pChunk)) \
67 Assert(!(pChunk)->offStat); \
68 else if ((pChunk)->offStat) \
69 { \
70 Assert((pChunk)->offStat); \
71 AssertMsg(!((pChunk)->offStat & (MMHYPER_HEAP_ALIGN_MIN - 1)), ("offStat=%RX32\n", (pChunk)->offStat)); \
72 uintptr_t uPtr = (uintptr_t)(pChunk)->offStat + (uintptr_t)pChunk; NOREF(uPtr); \
73 AssertMsg(uPtr - (uintptr_t)(pHeap)->CTX_SUFF(pbHeap) < (pHeap)->offPageAligned, \
74 ("%p - %p < %RX32\n", uPtr, (pHeap)->CTX_SUFF(pbHeap), (pHeap)->offPageAligned)); \
75 } \
76 } while (0)
77#else
78#define ASSERT_OFFSTAT(pHeap, pChunk) \
79 do { Assert(!(pChunk)->offStat); \
80 } while (0)
81#endif
82
83#define ASSERT_CHUNK(pHeap, pChunk) \
84 do { ASSERT_OFFNEXT(pHeap, pChunk); \
85 ASSERT_OFFPREV(pHeap, pChunk); \
86 ASSERT_OFFHEAP(pHeap, pChunk); \
87 ASSERT_OFFSTAT(pHeap, pChunk); \
88 } while (0)
89#define ASSERT_CHUNK_USED(pHeap, pChunk) \
90 do { ASSERT_OFFNEXT(pHeap, pChunk); \
91 ASSERT_OFFPREV(pHeap, pChunk); \
92 Assert(MMHYPERCHUNK_ISUSED(pChunk)); \
93 } while (0)
94
95#define ASSERT_FREE_OFFPREV(pHeap, pChunk) \
96 do { ASSERT_ALIGN((pChunk)->offPrev); \
97 ASSERT_GE(((pChunk)->offPrev & (MMHYPER_HEAP_ALIGN_MIN - 1)), (intptr_t)(pHeap)->CTX_SUFF(pbHeap) - (intptr_t)(pChunk)); \
98 Assert((pChunk)->offPrev != MMHYPERCHUNK_GET_OFFPREV(&(pChunk)->core) || !(pChunk)->offPrev); \
99 AssertMsg( (pChunk)->offPrev \
100 || (uintptr_t)(pChunk) - (uintptr_t)(pHeap)->CTX_SUFF(pbHeap) == (pHeap)->offFreeHead, \
101 ("pChunk=%p offChunk=%#x offFreeHead=%#x\n", (pChunk), (uintptr_t)(pChunk) - (uintptr_t)(pHeap)->CTX_SUFF(pbHeap),\
102 (pHeap)->offFreeHead)); \
103 } while (0)
104
105#define ASSERT_FREE_OFFNEXT(pHeap, pChunk) \
106 do { ASSERT_ALIGN((pChunk)->offNext); \
107 ASSERT_L((pChunk)->offNext, (uintptr_t)(pHeap)->CTX_SUFF(pbHeap) + (pHeap)->offPageAligned - (uintptr_t)(pChunk)); \
108 Assert((pChunk)->offNext != (pChunk)->core.offNext || !(pChunk)->offNext); \
109 AssertMsg( (pChunk)->offNext \
110 || (uintptr_t)(pChunk) - (uintptr_t)(pHeap)->CTX_SUFF(pbHeap) == (pHeap)->offFreeTail, \
111 ("pChunk=%p offChunk=%#x offFreeTail=%#x\n", (pChunk), (uintptr_t)(pChunk) - (uintptr_t)(pHeap)->CTX_SUFF(pbHeap), \
112 (pHeap)->offFreeTail)); \
113 } while (0)
114
115#define ASSERT_FREE_CB(pHeap, pChunk) \
116 do { ASSERT_ALIGN((pChunk)->cb); \
117 Assert((pChunk)->cb > 0); \
118 if ((pChunk)->core.offNext) \
119 AssertMsg((pChunk)->cb == ((pChunk)->core.offNext - sizeof(MMHYPERCHUNK)), \
120 ("cb=%d offNext=%d\n", (pChunk)->cb, (pChunk)->core.offNext)); \
121 else \
122 ASSERT_LE((pChunk)->cb, (uintptr_t)(pHeap)->CTX_SUFF(pbHeap) + (pHeap)->offPageAligned - (uintptr_t)(pChunk)); \
123 } while (0)
124
125#define ASSERT_CHUNK_FREE(pHeap, pChunk) \
126 do { ASSERT_CHUNK(pHeap, &(pChunk)->core); \
127 Assert(MMHYPERCHUNK_ISFREE(pChunk)); \
128 ASSERT_FREE_OFFNEXT(pHeap, pChunk); \
129 ASSERT_FREE_OFFPREV(pHeap, pChunk); \
130 ASSERT_FREE_CB(pHeap, pChunk); \
131 } while (0)
132
133
134/*********************************************************************************************************************************
135* Internal Functions *
136*********************************************************************************************************************************/
137static PMMHYPERCHUNK mmHyperAllocChunk(PMMHYPERHEAP pHeap, uint32_t cb, unsigned uAlignment);
138static void *mmHyperAllocPages(PMMHYPERHEAP pHeap, uint32_t cb);
139#ifdef VBOX_WITH_STATISTICS
140static PMMHYPERSTAT mmHyperStat(PMMHYPERHEAP pHeap, MMTAG enmTag);
141#ifdef IN_RING3
142static void mmR3HyperStatRegisterOne(PVM pVM, PMMHYPERSTAT pStat);
143#endif
144#endif
145static int mmHyperFree(PMMHYPERHEAP pHeap, PMMHYPERCHUNK pChunk);
146#ifdef MMHYPER_HEAP_STRICT
147static void mmHyperHeapCheck(PMMHYPERHEAP pHeap);
148#endif
149
150
151
152/**
153 * Locks the hypervisor heap.
154 * This might call back to Ring-3 in order to deal with lock contention in GC and R3.
155 *
156 * @param pVM The cross context VM structure.
157 */
158static int mmHyperLock(PVMCC pVM)
159{
160 PMMHYPERHEAP pHeap = pVM->mm.s.CTX_SUFF(pHyperHeap);
161
162#ifdef IN_RING3
163 if (!PDMCritSectIsInitialized(&pHeap->Lock))
164 return VINF_SUCCESS; /* early init */
165#else
166 Assert(PDMCritSectIsInitialized(&pHeap->Lock));
167#endif
168 int rc = PDMCritSectEnter(pVM, &pHeap->Lock, VINF_SUCCESS);
169 PDM_CRITSECT_RELEASE_ASSERT_RC(pVM, &pHeap->Lock, rc);
170 return rc;
171}
172
173
174/**
175 * Unlocks the hypervisor heap.
176 *
177 * @param pVM The cross context VM structure.
178 */
179static void mmHyperUnlock(PVMCC pVM)
180{
181 PMMHYPERHEAP pHeap = pVM->mm.s.CTX_SUFF(pHyperHeap);
182
183#ifdef IN_RING3
184 if (!PDMCritSectIsInitialized(&pHeap->Lock))
185 return; /* early init */
186#endif
187 Assert(PDMCritSectIsInitialized(&pHeap->Lock));
188 PDMCritSectLeave(pVM, &pHeap->Lock);
189}
190
191/**
192 * Allocates memory in the Hypervisor (RC VMM) area.
193 * The returned memory is of course zeroed.
194 *
195 * @returns VBox status code.
196 * @param pVM The cross context VM structure.
197 * @param cb Number of bytes to allocate.
198 * @param uAlignment Required memory alignment in bytes.
199 * Values are 0,8,16,32,64 and GUEST_PAGE_SIZE. 0 ->
200 * default alignment, i.e. 8 bytes.
201 * @param enmTag The statistics tag.
202 * @param ppv Where to store the address to the allocated
203 * memory.
204 */
205static int mmHyperAllocInternal(PVM pVM, size_t cb, unsigned uAlignment, MMTAG enmTag, void **ppv)
206{
207 AssertMsg(cb >= 8, ("Hey! Do you really mean to allocate less than 8 bytes?! cb=%d\n", cb));
208
209 /*
210 * Validate input and adjust it to reasonable values.
211 */
212 if (!uAlignment || uAlignment < MMHYPER_HEAP_ALIGN_MIN)
213 uAlignment = MMHYPER_HEAP_ALIGN_MIN;
214 uint32_t cbAligned;
215 switch (uAlignment)
216 {
217 case 8:
218 case 16:
219 case 32:
220 case 64:
221 cbAligned = RT_ALIGN_32(cb, MMHYPER_HEAP_ALIGN_MIN);
222 if (!cbAligned || cbAligned < cb)
223 {
224 Log2(("MMHyperAlloc: cb=%#x uAlignment=%#x returns VERR_INVALID_PARAMETER\n", cb, uAlignment));
225 AssertMsgFailed(("Nice try.\n"));
226 return VERR_INVALID_PARAMETER;
227 }
228 break;
229
230 case GUEST_PAGE_SIZE:
231 AssertMsg(RT_ALIGN_32(cb, GUEST_PAGE_SIZE) == cb, ("The size isn't page aligned. (cb=%#x)\n", cb));
232 cbAligned = RT_ALIGN_32(cb, GUEST_PAGE_SIZE);
233 if (!cbAligned)
234 {
235 Log2(("MMHyperAlloc: cb=%#x uAlignment=%#x returns VERR_INVALID_PARAMETER\n", cb, uAlignment));
236 AssertMsgFailed(("Nice try.\n"));
237 return VERR_INVALID_PARAMETER;
238 }
239 break;
240
241 default:
242 Log2(("MMHyperAlloc: cb=%#x uAlignment=%#x returns VERR_INVALID_PARAMETER\n", cb, uAlignment));
243 AssertMsgFailed(("Invalid alignment %u\n", uAlignment));
244 return VERR_INVALID_PARAMETER;
245 }
246
247
248 /*
249 * Get heap and statisticsStatistics.
250 */
251 PMMHYPERHEAP pHeap = pVM->mm.s.CTX_SUFF(pHyperHeap);
252#ifdef VBOX_WITH_STATISTICS
253 PMMHYPERSTAT pStat = mmHyperStat(pHeap, enmTag);
254 if (!pStat)
255 {
256 Log2(("MMHyperAlloc: cb=%#x uAlignment=%#x returns VERR_MM_HYPER_NO_MEMORY\n", cb, uAlignment));
257 AssertMsgFailed(("Failed to allocate statistics!\n"));
258 return VERR_MM_HYPER_NO_MEMORY;
259 }
260#else
261 NOREF(enmTag);
262#endif
263 if (uAlignment < GUEST_PAGE_SIZE)
264 {
265 /*
266 * Allocate a chunk.
267 */
268 PMMHYPERCHUNK pChunk = mmHyperAllocChunk(pHeap, cbAligned, uAlignment);
269 if (pChunk)
270 {
271#ifdef VBOX_WITH_STATISTICS
272 const uint32_t cbChunk = pChunk->offNext
273 ? pChunk->offNext
274 : pHeap->CTX_SUFF(pbHeap) + pHeap->offPageAligned - (uint8_t *)pChunk;
275 pStat->cbAllocated += (uint32_t)cbChunk;
276 pStat->cbCurAllocated += (uint32_t)cbChunk;
277 if (pStat->cbCurAllocated > pStat->cbMaxAllocated)
278 pStat->cbMaxAllocated = pStat->cbCurAllocated;
279 pStat->cAllocations++;
280 pChunk->offStat = (uintptr_t)pStat - (uintptr_t)pChunk;
281#else
282 pChunk->offStat = 0;
283#endif
284 void *pv = pChunk + 1;
285 *ppv = pv;
286 ASMMemZero32(pv, cbAligned);
287 Log2(("MMHyperAlloc: cb=%#x uAlignment=%#x returns VINF_SUCCESS and *ppv=%p\n", cb, uAlignment, pv));
288 return VINF_SUCCESS;
289 }
290 }
291 else
292 {
293 /*
294 * Allocate page aligned memory.
295 */
296 void *pv = mmHyperAllocPages(pHeap, cbAligned);
297 if (pv)
298 {
299#ifdef VBOX_WITH_STATISTICS
300 pStat->cbAllocated += cbAligned;
301 pStat->cbCurAllocated += cbAligned;
302 if (pStat->cbCurAllocated > pStat->cbMaxAllocated)
303 pStat->cbMaxAllocated = pStat->cbCurAllocated;
304 pStat->cAllocations++;
305#endif
306 *ppv = pv;
307 /* ASMMemZero32(pv, cbAligned); - not required since memory is alloc-only and SUPR3PageAlloc zeros it. */
308 Log2(("MMHyperAlloc: cb=%#x uAlignment=%#x returns VINF_SUCCESS and *ppv=%p\n", cb, uAlignment, ppv));
309 return VINF_SUCCESS;
310 }
311 }
312
313#ifdef VBOX_WITH_STATISTICS
314 pStat->cAllocations++;
315 pStat->cFailures++;
316#endif
317 Log2(("MMHyperAlloc: cb=%#x uAlignment=%#x returns VERR_MM_HYPER_NO_MEMORY\n", cb, uAlignment));
318 AssertMsgFailed(("Failed to allocate %d bytes!\n", cb));
319 return VERR_MM_HYPER_NO_MEMORY;
320}
321
322
323/**
324 * Wrapper for mmHyperAllocInternal
325 */
326VMMDECL(int) MMHyperAlloc(PVMCC pVM, size_t cb, unsigned uAlignment, MMTAG enmTag, void **ppv)
327{
328 int rc = mmHyperLock(pVM);
329 AssertRCReturn(rc, rc);
330
331 LogFlow(("MMHyperAlloc %x align=%x tag=%s\n", cb, uAlignment, mmGetTagName(enmTag)));
332
333 rc = mmHyperAllocInternal(pVM, cb, uAlignment, enmTag, ppv);
334
335 mmHyperUnlock(pVM);
336 return rc;
337}
338
339
340#if 0
341/**
342 * Duplicates a block of memory.
343 *
344 * @returns VBox status code.
345 * @param pVM The cross context VM structure.
346 * @param pvSrc The source memory block to copy from.
347 * @param cb Size of the source memory block.
348 * @param uAlignment Required memory alignment in bytes.
349 * Values are 0,8,16,32,64 and GUEST_PAGE_SIZE. 0 ->
350 * default alignment, i.e. 8 bytes.
351 * @param enmTag The statistics tag.
352 * @param ppv Where to store the address to the allocated
353 * memory.
354 */
355VMMDECL(int) MMHyperDupMem(PVMCC pVM, const void *pvSrc, size_t cb, unsigned uAlignment, MMTAG enmTag, void **ppv)
356{
357 int rc = MMHyperAlloc(pVM, cb, uAlignment, enmTag, ppv);
358 if (RT_SUCCESS(rc))
359 memcpy(*ppv, pvSrc, cb);
360 return rc;
361}
362#endif
363
364
365/**
366 * Allocates a chunk of memory from the specified heap.
367 * The caller validates the parameters of this request.
368 *
369 * @returns Pointer to the allocated chunk.
370 * @returns NULL on failure.
371 * @param pHeap The heap.
372 * @param cb Size of the memory block to allocate.
373 * @param uAlignment The alignment specifications for the allocated block.
374 * @internal
375 */
376static PMMHYPERCHUNK mmHyperAllocChunk(PMMHYPERHEAP pHeap, uint32_t cb, unsigned uAlignment)
377{
378 Log3(("mmHyperAllocChunk: Enter cb=%#x uAlignment=%#x\n", cb, uAlignment));
379#ifdef MMHYPER_HEAP_STRICT
380 mmHyperHeapCheck(pHeap);
381#endif
382#ifdef MMHYPER_HEAP_STRICT_FENCE
383 uint32_t cbFence = RT_MAX(MMHYPER_HEAP_STRICT_FENCE_SIZE, uAlignment);
384 cb += cbFence;
385#endif
386
387 /*
388 * Check if there are any free chunks. (NIL_OFFSET use/not-use forces this check)
389 */
390 if (pHeap->offFreeHead == NIL_OFFSET)
391 return NULL;
392
393 /*
394 * Small alignments - from the front of the heap.
395 *
396 * Must split off free chunks at the end to prevent messing up the
397 * last free node which we take the page aligned memory from the top of.
398 */
399 PMMHYPERCHUNK pRet = NULL;
400 PMMHYPERCHUNKFREE pFree = (PMMHYPERCHUNKFREE)((char *)pHeap->CTX_SUFF(pbHeap) + pHeap->offFreeHead);
401 while (pFree)
402 {
403 ASSERT_CHUNK_FREE(pHeap, pFree);
404 if (pFree->cb >= cb)
405 {
406 unsigned offAlign = (uintptr_t)(&pFree->core + 1) & (uAlignment - 1);
407 if (offAlign)
408 offAlign = uAlignment - offAlign;
409 if (!offAlign || pFree->cb - offAlign >= cb)
410 {
411 Log3(("mmHyperAllocChunk: Using pFree=%p pFree->cb=%d offAlign=%d\n", pFree, pFree->cb, offAlign));
412
413 /*
414 * Adjust the node in front.
415 * Because of multiple alignments we need to special case allocation of the first block.
416 */
417 if (offAlign)
418 {
419 MMHYPERCHUNKFREE Free = *pFree;
420 if (MMHYPERCHUNK_GET_OFFPREV(&pFree->core))
421 {
422 /* just add a bit of memory to it. */
423 PMMHYPERCHUNKFREE pPrev = (PMMHYPERCHUNKFREE)((char *)pFree + MMHYPERCHUNK_GET_OFFPREV(&Free.core));
424 pPrev->core.offNext += offAlign;
425 AssertMsg(!MMHYPERCHUNK_ISFREE(&pPrev->core), ("Impossible!\n"));
426 Log3(("mmHyperAllocChunk: Added %d bytes to %p\n", offAlign, pPrev));
427 }
428 else
429 {
430 /* make new head node, mark it USED for simplicity. */
431 PMMHYPERCHUNK pPrev = (PMMHYPERCHUNK)pHeap->CTX_SUFF(pbHeap);
432 Assert(pPrev == &pFree->core);
433 pPrev->offPrev = 0;
434 MMHYPERCHUNK_SET_TYPE(pPrev, MMHYPERCHUNK_FLAGS_USED);
435 pPrev->offNext = offAlign;
436 Log3(("mmHyperAllocChunk: Created new first node of %d bytes\n", offAlign));
437
438 }
439 Log3(("mmHyperAllocChunk: cbFree %d -> %d (%d)\n", pHeap->cbFree, pHeap->cbFree - offAlign, -(int)offAlign));
440 pHeap->cbFree -= offAlign;
441
442 /* Recreate pFree node and adjusting everything... */
443 pFree = (PMMHYPERCHUNKFREE)((char *)pFree + offAlign);
444 *pFree = Free;
445
446 pFree->cb -= offAlign;
447 if (pFree->core.offNext)
448 {
449 pFree->core.offNext -= offAlign;
450 PMMHYPERCHUNK pNext = (PMMHYPERCHUNK)((char *)pFree + pFree->core.offNext);
451 MMHYPERCHUNK_SET_OFFPREV(pNext, -(int32_t)pFree->core.offNext);
452 ASSERT_CHUNK(pHeap, pNext);
453 }
454 if (MMHYPERCHUNK_GET_OFFPREV(&pFree->core))
455 MMHYPERCHUNK_SET_OFFPREV(&pFree->core, MMHYPERCHUNK_GET_OFFPREV(&pFree->core) - offAlign);
456
457 if (pFree->offNext)
458 {
459 pFree->offNext -= offAlign;
460 PMMHYPERCHUNKFREE pNext = (PMMHYPERCHUNKFREE)((char *)pFree + pFree->offNext);
461 pNext->offPrev = -(int32_t)pFree->offNext;
462 ASSERT_CHUNK_FREE(pHeap, pNext);
463 }
464 else
465 pHeap->offFreeTail += offAlign;
466 if (pFree->offPrev)
467 {
468 pFree->offPrev -= offAlign;
469 PMMHYPERCHUNKFREE pPrev = (PMMHYPERCHUNKFREE)((char *)pFree + pFree->offPrev);
470 pPrev->offNext = -pFree->offPrev;
471 ASSERT_CHUNK_FREE(pHeap, pPrev);
472 }
473 else
474 pHeap->offFreeHead += offAlign;
475 pFree->core.offHeap = (uintptr_t)pHeap - (uintptr_t)pFree;
476 pFree->core.offStat = 0;
477 ASSERT_CHUNK_FREE(pHeap, pFree);
478 Log3(("mmHyperAllocChunk: Realigned pFree=%p\n", pFree));
479 }
480
481 /*
482 * Split off a new FREE chunk?
483 */
484 if (pFree->cb >= cb + RT_ALIGN(sizeof(MMHYPERCHUNKFREE), MMHYPER_HEAP_ALIGN_MIN))
485 {
486 /*
487 * Move the FREE chunk up to make room for the new USED chunk.
488 */
489 const int off = cb + sizeof(MMHYPERCHUNK);
490 PMMHYPERCHUNKFREE pNew = (PMMHYPERCHUNKFREE)((char *)&pFree->core + off);
491 *pNew = *pFree;
492 pNew->cb -= off;
493 if (pNew->core.offNext)
494 {
495 pNew->core.offNext -= off;
496 PMMHYPERCHUNK pNext = (PMMHYPERCHUNK)((char *)pNew + pNew->core.offNext);
497 MMHYPERCHUNK_SET_OFFPREV(pNext, -(int32_t)pNew->core.offNext);
498 ASSERT_CHUNK(pHeap, pNext);
499 }
500 pNew->core.offPrev = -off;
501 MMHYPERCHUNK_SET_TYPE(pNew, MMHYPERCHUNK_FLAGS_FREE);
502
503 if (pNew->offNext)
504 {
505 pNew->offNext -= off;
506 PMMHYPERCHUNKFREE pNext = (PMMHYPERCHUNKFREE)((char *)pNew + pNew->offNext);
507 pNext->offPrev = -(int32_t)pNew->offNext;
508 ASSERT_CHUNK_FREE(pHeap, pNext);
509 }
510 else
511 pHeap->offFreeTail += off;
512 if (pNew->offPrev)
513 {
514 pNew->offPrev -= off;
515 PMMHYPERCHUNKFREE pPrev = (PMMHYPERCHUNKFREE)((char *)pNew + pNew->offPrev);
516 pPrev->offNext = -pNew->offPrev;
517 ASSERT_CHUNK_FREE(pHeap, pPrev);
518 }
519 else
520 pHeap->offFreeHead += off;
521 pNew->core.offHeap = (uintptr_t)pHeap - (uintptr_t)pNew;
522 pNew->core.offStat = 0;
523 ASSERT_CHUNK_FREE(pHeap, pNew);
524
525 /*
526 * Update the old FREE node making it a USED node.
527 */
528 pFree->core.offNext = off;
529 MMHYPERCHUNK_SET_TYPE(&pFree->core, MMHYPERCHUNK_FLAGS_USED);
530
531
532 Log3(("mmHyperAllocChunk: cbFree %d -> %d (%d)\n", pHeap->cbFree,
533 pHeap->cbFree - (cb + sizeof(MMHYPERCHUNK)), -(int)(cb + sizeof(MMHYPERCHUNK))));
534 pHeap->cbFree -= (uint32_t)(cb + sizeof(MMHYPERCHUNK));
535 pRet = &pFree->core;
536 ASSERT_CHUNK(pHeap, &pFree->core);
537 Log3(("mmHyperAllocChunk: Created free chunk pNew=%p cb=%d\n", pNew, pNew->cb));
538 }
539 else
540 {
541 /*
542 * Link out of free list.
543 */
544 if (pFree->offNext)
545 {
546 PMMHYPERCHUNKFREE pNext = (PMMHYPERCHUNKFREE)((char *)pFree + pFree->offNext);
547 if (pFree->offPrev)
548 {
549 pNext->offPrev += pFree->offPrev;
550 PMMHYPERCHUNKFREE pPrev = (PMMHYPERCHUNKFREE)((char *)pFree + pFree->offPrev);
551 pPrev->offNext += pFree->offNext;
552 ASSERT_CHUNK_FREE(pHeap, pPrev);
553 }
554 else
555 {
556 pHeap->offFreeHead += pFree->offNext;
557 pNext->offPrev = 0;
558 }
559 ASSERT_CHUNK_FREE(pHeap, pNext);
560 }
561 else
562 {
563 if (pFree->offPrev)
564 {
565 pHeap->offFreeTail += pFree->offPrev;
566 PMMHYPERCHUNKFREE pPrev = (PMMHYPERCHUNKFREE)((char *)pFree + pFree->offPrev);
567 pPrev->offNext = 0;
568 ASSERT_CHUNK_FREE(pHeap, pPrev);
569 }
570 else
571 {
572 pHeap->offFreeHead = NIL_OFFSET;
573 pHeap->offFreeTail = NIL_OFFSET;
574 }
575 }
576
577 Log3(("mmHyperAllocChunk: cbFree %d -> %d (%d)\n", pHeap->cbFree,
578 pHeap->cbFree - pFree->cb, -(int32_t)pFree->cb));
579 pHeap->cbFree -= pFree->cb;
580 MMHYPERCHUNK_SET_TYPE(&pFree->core, MMHYPERCHUNK_FLAGS_USED);
581 pRet = &pFree->core;
582 ASSERT_CHUNK(pHeap, &pFree->core);
583 Log3(("mmHyperAllocChunk: Converted free chunk %p to used chunk.\n", pFree));
584 }
585 Log3(("mmHyperAllocChunk: Returning %p\n", pRet));
586 break;
587 }
588 }
589
590 /* next */
591 pFree = pFree->offNext ? (PMMHYPERCHUNKFREE)((char *)pFree + pFree->offNext) : NULL;
592 }
593
594#ifdef MMHYPER_HEAP_STRICT_FENCE
595 uint32_t *pu32End = (uint32_t *)((uint8_t *)(pRet + 1) + cb);
596 uint32_t *pu32EndReal = pRet->offNext
597 ? (uint32_t *)((uint8_t *)pRet + pRet->offNext)
598 : (uint32_t *)(pHeap->CTX_SUFF(pbHeap) + pHeap->cbHeap);
599 cbFence += (uintptr_t)pu32EndReal - (uintptr_t)pu32End; Assert(!(cbFence & 0x3));
600 ASMMemFill32((uint8_t *)pu32EndReal - cbFence, cbFence, MMHYPER_HEAP_STRICT_FENCE_U32);
601 pu32EndReal[-1] = cbFence;
602#endif
603#ifdef MMHYPER_HEAP_STRICT
604 mmHyperHeapCheck(pHeap);
605#endif
606 return pRet;
607}
608
609
610/**
611 * Allocates one or more pages of memory from the specified heap.
612 * The caller validates the parameters of this request.
613 *
614 * @returns Pointer to the allocated chunk.
615 * @returns NULL on failure.
616 * @param pHeap The heap.
617 * @param cb Size of the memory block to allocate.
618 * @internal
619 */
620static void *mmHyperAllocPages(PMMHYPERHEAP pHeap, uint32_t cb)
621{
622 Log3(("mmHyperAllocPages: Enter cb=%#x\n", cb));
623
624#ifdef MMHYPER_HEAP_STRICT
625 mmHyperHeapCheck(pHeap);
626#endif
627
628 /*
629 * Check if there are any free chunks. (NIL_OFFSET use/not-use forces this check)
630 */
631 if (pHeap->offFreeHead == NIL_OFFSET)
632 return NULL;
633
634 /*
635 * Page aligned chunks.
636 *
637 * Page aligned chunks can only be allocated from the last FREE chunk.
638 * This is for reasons of simplicity and fragmentation. Page aligned memory
639 * must also be allocated in page aligned sizes. Page aligned memory cannot
640 * be freed either.
641 *
642 * So, for this to work, the last FREE chunk needs to end on a page aligned
643 * boundary.
644 */
645 PMMHYPERCHUNKFREE pFree = (PMMHYPERCHUNKFREE)((char *)pHeap->CTX_SUFF(pbHeap) + pHeap->offFreeTail);
646 ASSERT_CHUNK_FREE(pHeap, pFree);
647 if ( (((uintptr_t)(&pFree->core + 1) + pFree->cb) & (GUEST_PAGE_OFFSET_MASK - 1))
648 || pFree->cb + sizeof(MMHYPERCHUNK) < cb)
649 {
650 Log3(("mmHyperAllocPages: Not enough/no page aligned memory!\n"));
651 return NULL;
652 }
653
654 void *pvRet;
655 if (pFree->cb > cb)
656 {
657 /*
658 * Simple, just cut the top of the free node and return it.
659 */
660 pFree->cb -= cb;
661 pvRet = (char *)(&pFree->core + 1) + pFree->cb;
662 AssertMsg(RT_ALIGN_P(pvRet, GUEST_PAGE_SIZE) == pvRet, ("pvRet=%p cb=%#x pFree=%p pFree->cb=%#x\n", pvRet, cb, pFree, pFree->cb));
663 Log3(("mmHyperAllocPages: cbFree %d -> %d (%d)\n", pHeap->cbFree, pHeap->cbFree - cb, -(int)cb));
664 pHeap->cbFree -= cb;
665 ASSERT_CHUNK_FREE(pHeap, pFree);
666 Log3(("mmHyperAllocPages: Allocated from pFree=%p new pFree->cb=%d\n", pFree, pFree->cb));
667 }
668 else
669 {
670 /*
671 * Unlink the FREE node.
672 */
673 pvRet = (char *)(&pFree->core + 1) + pFree->cb - cb;
674 Log3(("mmHyperAllocPages: cbFree %d -> %d (%d)\n", pHeap->cbFree, pHeap->cbFree - pFree->cb, -(int32_t)pFree->cb));
675 pHeap->cbFree -= pFree->cb;
676
677 /* a scrap of spare memory (unlikely)? add it to the sprevious chunk. */
678 if (pvRet != (void *)pFree)
679 {
680 AssertMsg(MMHYPERCHUNK_GET_OFFPREV(&pFree->core), ("How the *beep* did someone manage to allocated up all the heap with page aligned memory?!?\n"));
681 PMMHYPERCHUNK pPrev = (PMMHYPERCHUNK)((char *)pFree + MMHYPERCHUNK_GET_OFFPREV(&pFree->core));
682 pPrev->offNext += (uintptr_t)pvRet - (uintptr_t)pFree;
683 AssertMsg(!MMHYPERCHUNK_ISFREE(pPrev), ("Free bug?\n"));
684#ifdef VBOX_WITH_STATISTICS
685 PMMHYPERSTAT pStat = (PMMHYPERSTAT)((uintptr_t)pPrev + pPrev->offStat);
686 pStat->cbAllocated += (uintptr_t)pvRet - (uintptr_t)pFree;
687 pStat->cbCurAllocated += (uintptr_t)pvRet - (uintptr_t)pFree;
688#endif
689 Log3(("mmHyperAllocPages: Added %d to %p (page align)\n", (uintptr_t)pvRet - (uintptr_t)pFree, pFree));
690 }
691
692 /* unlink from FREE chain. */
693 if (pFree->offPrev)
694 {
695 pHeap->offFreeTail += pFree->offPrev;
696 ((PMMHYPERCHUNKFREE)((char *)pFree + pFree->offPrev))->offNext = 0;
697 }
698 else
699 {
700 pHeap->offFreeTail = NIL_OFFSET;
701 pHeap->offFreeHead = NIL_OFFSET;
702 }
703 Log3(("mmHyperAllocPages: Unlinked pFree=%d\n", pFree));
704 }
705 pHeap->offPageAligned = (uintptr_t)pvRet - (uintptr_t)pHeap->CTX_SUFF(pbHeap);
706 Log3(("mmHyperAllocPages: Returning %p (page aligned)\n", pvRet));
707
708#ifdef MMHYPER_HEAP_STRICT
709 mmHyperHeapCheck(pHeap);
710#endif
711 return pvRet;
712}
713
714#ifdef VBOX_WITH_STATISTICS
715
716/**
717 * Get the statistic record for a tag.
718 *
719 * @returns Pointer to a stat record.
720 * @returns NULL on failure.
721 * @param pHeap The heap.
722 * @param enmTag The tag.
723 */
724static PMMHYPERSTAT mmHyperStat(PMMHYPERHEAP pHeap, MMTAG enmTag)
725{
726 /* try look it up first. */
727 PMMHYPERSTAT pStat = (PMMHYPERSTAT)RTAvloGCPhysGet(&pHeap->HyperHeapStatTree, enmTag);
728 if (!pStat)
729 {
730 /* try allocate a new one */
731 PMMHYPERCHUNK pChunk = mmHyperAllocChunk(pHeap, RT_ALIGN(sizeof(*pStat), MMHYPER_HEAP_ALIGN_MIN), MMHYPER_HEAP_ALIGN_MIN);
732 if (!pChunk)
733 return NULL;
734 pStat = (PMMHYPERSTAT)(pChunk + 1);
735 pChunk->offStat = (uintptr_t)pStat - (uintptr_t)pChunk;
736
737 ASMMemZero32(pStat, sizeof(*pStat));
738 pStat->Core.Key = enmTag;
739 RTAvloGCPhysInsert(&pHeap->HyperHeapStatTree, &pStat->Core);
740 }
741 if (!pStat->fRegistered)
742 {
743# ifdef IN_RING3
744 mmR3HyperStatRegisterOne(pHeap->pVMR3, pStat);
745# else
746 /** @todo schedule a R3 action. */
747# endif
748 }
749 return pStat;
750}
751
752
753# ifdef IN_RING3
754/**
755 * Registers statistics with STAM.
756 *
757 */
758static void mmR3HyperStatRegisterOne(PVM pVM, PMMHYPERSTAT pStat)
759{
760 if (pStat->fRegistered)
761 return;
762 const char *pszTag = mmGetTagName((MMTAG)pStat->Core.Key);
763 STAMR3RegisterF(pVM, &pStat->cbCurAllocated, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Number of bytes currently allocated.", "/MM/HyperHeap/%s", pszTag);
764 STAMR3RegisterF(pVM, &pStat->cAllocations, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Number of alloc calls.", "/MM/HyperHeap/%s/cAllocations", pszTag);
765 STAMR3RegisterF(pVM, &pStat->cFrees, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Number of free calls.", "/MM/HyperHeap/%s/cFrees", pszTag);
766 STAMR3RegisterF(pVM, &pStat->cFailures, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Number of failures.", "/MM/HyperHeap/%s/cFailures", pszTag);
767 STAMR3RegisterF(pVM, &pStat->cbAllocated, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Total number of allocated bytes.", "/MM/HyperHeap/%s/cbAllocated", pszTag);
768 STAMR3RegisterF(pVM, &pStat->cbFreed, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Total number of freed bytes.", "/MM/HyperHeap/%s/cbFreed", pszTag);
769 STAMR3RegisterF(pVM, &pStat->cbMaxAllocated, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Max number of bytes allocated at the same time.","/MM/HyperHeap/%s/cbMaxAllocated", pszTag);
770 pStat->fRegistered = true;
771}
772# endif /* IN_RING3 */
773
774#endif /* VBOX_WITH_STATISTICS */
775
776
777/**
778 * Free memory allocated using MMHyperAlloc().
779 * The caller validates the parameters of this request.
780 *
781 * @returns VBox status code.
782 * @param pVM The cross context VM structure.
783 * @param pv The memory to free.
784 * @remark Try avoid free hyper memory.
785 */
786static int mmHyperFreeInternal(PVM pVM, void *pv)
787{
788 Log2(("MMHyperFree: pv=%p\n", pv));
789 if (!pv)
790 return VINF_SUCCESS;
791 AssertMsgReturn(RT_ALIGN_P(pv, MMHYPER_HEAP_ALIGN_MIN) == pv,
792 ("Invalid pointer %p!\n", pv),
793 VERR_INVALID_POINTER);
794
795 /*
796 * Get the heap and stats.
797 * Validate the chunk at the same time.
798 */
799 PMMHYPERCHUNK pChunk = (PMMHYPERCHUNK)((PMMHYPERCHUNK)pv - 1);
800
801 AssertMsgReturn( (uintptr_t)pChunk + pChunk->offNext >= (uintptr_t)pChunk
802 || RT_ALIGN_32(pChunk->offNext, MMHYPER_HEAP_ALIGN_MIN) != pChunk->offNext,
803 ("%p: offNext=%#RX32\n", pv, pChunk->offNext),
804 VERR_INVALID_POINTER);
805
806 AssertMsgReturn(MMHYPERCHUNK_ISUSED(pChunk),
807 ("%p: Not used!\n", pv),
808 VERR_INVALID_POINTER);
809
810 int32_t offPrev = MMHYPERCHUNK_GET_OFFPREV(pChunk);
811 AssertMsgReturn( (uintptr_t)pChunk + offPrev <= (uintptr_t)pChunk
812 && !((uint32_t)-offPrev & (MMHYPER_HEAP_ALIGN_MIN - 1)),
813 ("%p: offPrev=%#RX32!\n", pv, offPrev),
814 VERR_INVALID_POINTER);
815
816 /* statistics */
817#ifdef VBOX_WITH_STATISTICS
818 PMMHYPERSTAT pStat = (PMMHYPERSTAT)((uintptr_t)pChunk + pChunk->offStat);
819 AssertMsgReturn( RT_ALIGN_P(pStat, MMHYPER_HEAP_ALIGN_MIN) == (void *)pStat
820 && pChunk->offStat,
821 ("%p: offStat=%#RX32!\n", pv, pChunk->offStat),
822 VERR_INVALID_POINTER);
823#else
824 AssertMsgReturn(!pChunk->offStat,
825 ("%p: offStat=%#RX32!\n", pv, pChunk->offStat),
826 VERR_INVALID_POINTER);
827#endif
828
829 /* The heap structure. */
830 PMMHYPERHEAP pHeap = (PMMHYPERHEAP)((uintptr_t)pChunk + pChunk->offHeap);
831 AssertMsgReturn( !((uintptr_t)pHeap & GUEST_PAGE_OFFSET_MASK)
832 && pChunk->offHeap,
833 ("%p: pHeap=%#x offHeap=%RX32\n", pv, pHeap->u32Magic, pChunk->offHeap),
834 VERR_INVALID_POINTER);
835
836 AssertMsgReturn(pHeap->u32Magic == MMHYPERHEAP_MAGIC,
837 ("%p: u32Magic=%#x\n", pv, pHeap->u32Magic),
838 VERR_INVALID_POINTER);
839 Assert(pHeap == pVM->mm.s.CTX_SUFF(pHyperHeap)); NOREF(pVM);
840
841 /* Some more verifications using additional info from pHeap. */
842 AssertMsgReturn((uintptr_t)pChunk + offPrev >= (uintptr_t)pHeap->CTX_SUFF(pbHeap),
843 ("%p: offPrev=%#RX32!\n", pv, offPrev),
844 VERR_INVALID_POINTER);
845
846 AssertMsgReturn(pChunk->offNext < pHeap->cbHeap,
847 ("%p: offNext=%#RX32!\n", pv, pChunk->offNext),
848 VERR_INVALID_POINTER);
849
850 AssertMsgReturn( (uintptr_t)pv - (uintptr_t)pHeap->CTX_SUFF(pbHeap) <= pHeap->offPageAligned,
851 ("Invalid pointer %p! (heap: %p-%p)\n", pv, pHeap->CTX_SUFF(pbHeap),
852 (char *)pHeap->CTX_SUFF(pbHeap) + pHeap->offPageAligned),
853 VERR_INVALID_POINTER);
854
855#ifdef MMHYPER_HEAP_STRICT
856 mmHyperHeapCheck(pHeap);
857#endif
858
859#if defined(VBOX_WITH_STATISTICS) || defined(MMHYPER_HEAP_FREE_POISON)
860 /* calc block size. */
861 const uint32_t cbChunk = pChunk->offNext
862 ? pChunk->offNext
863 : pHeap->CTX_SUFF(pbHeap) + pHeap->offPageAligned - (uint8_t *)pChunk;
864#endif
865#ifdef MMHYPER_HEAP_FREE_POISON
866 /* poison the block */
867 memset(pChunk + 1, MMHYPER_HEAP_FREE_POISON, cbChunk - sizeof(*pChunk));
868#endif
869
870#ifdef MMHYPER_HEAP_FREE_DELAY
871# ifdef MMHYPER_HEAP_FREE_POISON
872 /*
873 * Check poison.
874 */
875 unsigned i = RT_ELEMENTS(pHeap->aDelayedFrees);
876 while (i-- > 0)
877 if (pHeap->aDelayedFrees[i].offChunk)
878 {
879 PMMHYPERCHUNK pCur = (PMMHYPERCHUNK)((uintptr_t)pHeap + pHeap->aDelayedFrees[i].offChunk);
880 const size_t cb = pCur->offNext
881 ? pCur->offNext - sizeof(*pCur)
882 : pHeap->CTX_SUFF(pbHeap) + pHeap->offPageAligned - (uint8_t *)pCur - sizeof(*pCur);
883 uint8_t *pab = (uint8_t *)(pCur + 1);
884 for (unsigned off = 0; off < cb; off++)
885 AssertReleaseMsg(pab[off] == 0xCB,
886 ("caller=%RTptr cb=%#zx off=%#x: %.*Rhxs\n",
887 pHeap->aDelayedFrees[i].uCaller, cb, off, RT_MIN(cb - off, 32), &pab[off]));
888 }
889# endif /* MMHYPER_HEAP_FREE_POISON */
890
891 /*
892 * Delayed freeing.
893 */
894 int rc = VINF_SUCCESS;
895 if (pHeap->aDelayedFrees[pHeap->iDelayedFree].offChunk)
896 {
897 PMMHYPERCHUNK pChunkFree = (PMMHYPERCHUNK)((uintptr_t)pHeap + pHeap->aDelayedFrees[pHeap->iDelayedFree].offChunk);
898 rc = mmHyperFree(pHeap, pChunkFree);
899 }
900 pHeap->aDelayedFrees[pHeap->iDelayedFree].offChunk = (uintptr_t)pChunk - (uintptr_t)pHeap;
901 pHeap->aDelayedFrees[pHeap->iDelayedFree].uCaller = (uintptr_t)ASMReturnAddress();
902 pHeap->iDelayedFree = (pHeap->iDelayedFree + 1) % RT_ELEMENTS(pHeap->aDelayedFrees);
903
904#else /* !MMHYPER_HEAP_FREE_POISON */
905 /*
906 * Call the worker.
907 */
908 int rc = mmHyperFree(pHeap, pChunk);
909#endif /* !MMHYPER_HEAP_FREE_POISON */
910
911 /*
912 * Update statistics.
913 */
914#ifdef VBOX_WITH_STATISTICS
915 pStat->cFrees++;
916 if (RT_SUCCESS(rc))
917 {
918 pStat->cbFreed += cbChunk;
919 pStat->cbCurAllocated -= cbChunk;
920 }
921 else
922 pStat->cFailures++;
923#endif
924
925 return rc;
926}
927
928
929/**
930 * Wrapper for mmHyperFreeInternal
931 */
932VMMDECL(int) MMHyperFree(PVMCC pVM, void *pv)
933{
934 int rc = mmHyperLock(pVM);
935 AssertRCReturn(rc, rc);
936
937 LogFlow(("MMHyperFree %p\n", pv));
938
939 rc = mmHyperFreeInternal(pVM, pv);
940
941 mmHyperUnlock(pVM);
942 return rc;
943}
944
945
946/**
947 * Free memory a memory chunk.
948 *
949 * @returns VBox status code.
950 * @param pHeap The heap.
951 * @param pChunk The memory chunk to free.
952 */
953static int mmHyperFree(PMMHYPERHEAP pHeap, PMMHYPERCHUNK pChunk)
954{
955 Log3(("mmHyperFree: Enter pHeap=%p pChunk=%p\n", pHeap, pChunk));
956 PMMHYPERCHUNKFREE pFree = (PMMHYPERCHUNKFREE)pChunk;
957
958 /*
959 * Insert into the free list (which is sorted on address).
960 *
961 * We'll search towards the end of the heap to locate the
962 * closest FREE chunk.
963 */
964 PMMHYPERCHUNKFREE pLeft = NULL;
965 PMMHYPERCHUNKFREE pRight = NULL;
966 if (pHeap->offFreeTail != NIL_OFFSET)
967 {
968 if (pFree->core.offNext)
969 {
970 pRight = (PMMHYPERCHUNKFREE)((char *)pFree + pFree->core.offNext);
971 ASSERT_CHUNK(pHeap, &pRight->core);
972 while (!MMHYPERCHUNK_ISFREE(&pRight->core))
973 {
974 if (!pRight->core.offNext)
975 {
976 pRight = NULL;
977 break;
978 }
979 pRight = (PMMHYPERCHUNKFREE)((char *)pRight + pRight->core.offNext);
980 ASSERT_CHUNK(pHeap, &pRight->core);
981 }
982 }
983 if (!pRight)
984 pRight = (PMMHYPERCHUNKFREE)((char *)pHeap->CTX_SUFF(pbHeap) + pHeap->offFreeTail); /** @todo this can't be correct! 'pLeft = .. ; else' I think */
985 if (pRight)
986 {
987 ASSERT_CHUNK_FREE(pHeap, pRight);
988 if (pRight->offPrev)
989 {
990 pLeft = (PMMHYPERCHUNKFREE)((char *)pRight + pRight->offPrev);
991 ASSERT_CHUNK_FREE(pHeap, pLeft);
992 }
993 }
994 }
995 if (pLeft == pFree)
996 {
997 AssertMsgFailed(("Freed twice! pv=%p (pChunk=%p)\n", pChunk + 1, pChunk));
998 return VERR_INVALID_POINTER;
999 }
1000 pChunk->offStat = 0;
1001
1002 /*
1003 * Head free chunk list?
1004 */
1005 if (!pLeft)
1006 {
1007 MMHYPERCHUNK_SET_TYPE(&pFree->core, MMHYPERCHUNK_FLAGS_FREE);
1008 pFree->offPrev = 0;
1009 pHeap->offFreeHead = (uintptr_t)pFree - (uintptr_t)pHeap->CTX_SUFF(pbHeap);
1010 if (pRight)
1011 {
1012 pFree->offNext = (uintptr_t)pRight - (uintptr_t)pFree;
1013 pRight->offPrev = -(int32_t)pFree->offNext;
1014 }
1015 else
1016 {
1017 pFree->offNext = 0;
1018 pHeap->offFreeTail = pHeap->offFreeHead;
1019 }
1020 Log3(("mmHyperFree: Inserted %p at head of free chain.\n", pFree));
1021 }
1022 else
1023 {
1024 /*
1025 * Can we merge with left hand free chunk?
1026 */
1027 if ((char *)pLeft + pLeft->core.offNext == (char *)pFree)
1028 {
1029 if (pFree->core.offNext)
1030 {
1031 pLeft->core.offNext = pLeft->core.offNext + pFree->core.offNext;
1032 MMHYPERCHUNK_SET_OFFPREV(((PMMHYPERCHUNK)((char *)pLeft + pLeft->core.offNext)), -(int32_t)pLeft->core.offNext);
1033 }
1034 else
1035 pLeft->core.offNext = 0;
1036 pFree = pLeft;
1037 Log3(("mmHyperFree: cbFree %d -> %d (%d)\n", pHeap->cbFree, pHeap->cbFree - pLeft->cb, -(int32_t)pLeft->cb));
1038 pHeap->cbFree -= pLeft->cb;
1039 Log3(("mmHyperFree: Merging %p into %p (cb=%d).\n", pFree, pLeft, pLeft->cb));
1040 }
1041 /*
1042 * No, just link it into the free list then.
1043 */
1044 else
1045 {
1046 MMHYPERCHUNK_SET_TYPE(&pFree->core, MMHYPERCHUNK_FLAGS_FREE);
1047 pFree->offPrev = (uintptr_t)pLeft - (uintptr_t)pFree;
1048 pLeft->offNext = -pFree->offPrev;
1049 if (pRight)
1050 {
1051 pFree->offNext = (uintptr_t)pRight - (uintptr_t)pFree;
1052 pRight->offPrev = -(int32_t)pFree->offNext;
1053 }
1054 else
1055 {
1056 pFree->offNext = 0;
1057 pHeap->offFreeTail = (uintptr_t)pFree - (uintptr_t)pHeap->CTX_SUFF(pbHeap);
1058 }
1059 Log3(("mmHyperFree: Inserted %p after %p in free list.\n", pFree, pLeft));
1060 }
1061 }
1062
1063 /*
1064 * Can we merge with right hand free chunk?
1065 */
1066 if (pRight && (char *)pRight == (char *)pFree + pFree->core.offNext)
1067 {
1068 /* core */
1069 if (pRight->core.offNext)
1070 {
1071 pFree->core.offNext += pRight->core.offNext;
1072 PMMHYPERCHUNK pNext = (PMMHYPERCHUNK)((char *)pFree + pFree->core.offNext);
1073 MMHYPERCHUNK_SET_OFFPREV(pNext, -(int32_t)pFree->core.offNext);
1074 ASSERT_CHUNK(pHeap, pNext);
1075 }
1076 else
1077 pFree->core.offNext = 0;
1078
1079 /* free */
1080 if (pRight->offNext)
1081 {
1082 pFree->offNext += pRight->offNext;
1083 ((PMMHYPERCHUNKFREE)((char *)pFree + pFree->offNext))->offPrev = -(int32_t)pFree->offNext;
1084 }
1085 else
1086 {
1087 pFree->offNext = 0;
1088 pHeap->offFreeTail = (uintptr_t)pFree - (uintptr_t)pHeap->CTX_SUFF(pbHeap);
1089 }
1090 Log3(("mmHyperFree: cbFree %d -> %d (%d)\n", pHeap->cbFree, pHeap->cbFree - pRight->cb, -(int32_t)pRight->cb));
1091 pHeap->cbFree -= pRight->cb;
1092 Log3(("mmHyperFree: Merged %p (cb=%d) into %p.\n", pRight, pRight->cb, pFree));
1093 }
1094
1095 /* calculate the size. */
1096 if (pFree->core.offNext)
1097 pFree->cb = pFree->core.offNext - sizeof(MMHYPERCHUNK);
1098 else
1099 pFree->cb = pHeap->offPageAligned - ((uintptr_t)pFree - (uintptr_t)pHeap->CTX_SUFF(pbHeap)) - sizeof(MMHYPERCHUNK);
1100 Log3(("mmHyperFree: cbFree %d -> %d (%d)\n", pHeap->cbFree, pHeap->cbFree + pFree->cb, pFree->cb));
1101 pHeap->cbFree += pFree->cb;
1102 ASSERT_CHUNK_FREE(pHeap, pFree);
1103
1104#ifdef MMHYPER_HEAP_STRICT
1105 mmHyperHeapCheck(pHeap);
1106#endif
1107 return VINF_SUCCESS;
1108}
1109
1110
1111#if defined(DEBUG) || defined(MMHYPER_HEAP_STRICT_FENCE)
1112/**
1113 * Dumps a heap chunk to the log.
1114 *
1115 * @param pHeap Pointer to the heap.
1116 * @param pCur Pointer to the chunk.
1117 */
1118static void mmHyperHeapDumpOne(PMMHYPERHEAP pHeap, PMMHYPERCHUNKFREE pCur)
1119{
1120 if (MMHYPERCHUNK_ISUSED(&pCur->core))
1121 {
1122 if (pCur->core.offStat)
1123 {
1124 PMMHYPERSTAT pStat = (PMMHYPERSTAT)((uintptr_t)pCur + pCur->core.offStat);
1125 const char *pszSelf = pCur->core.offStat == sizeof(MMHYPERCHUNK) ? " stat record" : "";
1126#ifdef IN_RING3
1127 Log(("%p %06x USED offNext=%06x offPrev=-%06x %s%s\n",
1128 pCur, (uintptr_t)pCur - (uintptr_t)pHeap->CTX_SUFF(pbHeap),
1129 pCur->core.offNext, -MMHYPERCHUNK_GET_OFFPREV(&pCur->core),
1130 mmGetTagName((MMTAG)pStat->Core.Key), pszSelf));
1131#else
1132 Log(("%p %06x USED offNext=%06x offPrev=-%06x %d%s\n",
1133 pCur, (uintptr_t)pCur - (uintptr_t)pHeap->CTX_SUFF(pbHeap),
1134 pCur->core.offNext, -MMHYPERCHUNK_GET_OFFPREV(&pCur->core),
1135 (MMTAG)pStat->Core.Key, pszSelf));
1136#endif
1137 NOREF(pStat); NOREF(pszSelf);
1138 }
1139 else
1140 Log(("%p %06x USED offNext=%06x offPrev=-%06x\n",
1141 pCur, (uintptr_t)pCur - (uintptr_t)pHeap->CTX_SUFF(pbHeap),
1142 pCur->core.offNext, -MMHYPERCHUNK_GET_OFFPREV(&pCur->core)));
1143 }
1144 else
1145 Log(("%p %06x FREE offNext=%06x offPrev=-%06x : cb=%06x offNext=%06x offPrev=-%06x\n",
1146 pCur, (uintptr_t)pCur - (uintptr_t)pHeap->CTX_SUFF(pbHeap),
1147 pCur->core.offNext, -MMHYPERCHUNK_GET_OFFPREV(&pCur->core), pCur->cb, pCur->offNext, pCur->offPrev));
1148}
1149#endif /* DEBUG || MMHYPER_HEAP_STRICT */
1150
1151
1152#ifdef MMHYPER_HEAP_STRICT
1153/**
1154 * Internal consistency check.
1155 */
1156static void mmHyperHeapCheck(PMMHYPERHEAP pHeap)
1157{
1158 PMMHYPERCHUNKFREE pPrev = NULL;
1159 PMMHYPERCHUNKFREE pCur = (PMMHYPERCHUNKFREE)pHeap->CTX_SUFF(pbHeap);
1160 for (;;)
1161 {
1162 if (MMHYPERCHUNK_ISUSED(&pCur->core))
1163 ASSERT_CHUNK_USED(pHeap, &pCur->core);
1164 else
1165 ASSERT_CHUNK_FREE(pHeap, pCur);
1166 if (pPrev)
1167 AssertMsg((int32_t)pPrev->core.offNext == -MMHYPERCHUNK_GET_OFFPREV(&pCur->core),
1168 ("pPrev->core.offNext=%d offPrev=%d\n", pPrev->core.offNext, MMHYPERCHUNK_GET_OFFPREV(&pCur->core)));
1169
1170# ifdef MMHYPER_HEAP_STRICT_FENCE
1171 uint32_t off = (uint8_t *)pCur - pHeap->CTX_SUFF(pbHeap);
1172 if ( MMHYPERCHUNK_ISUSED(&pCur->core)
1173 && off < pHeap->offPageAligned)
1174 {
1175 uint32_t cbCur = pCur->core.offNext
1176 ? pCur->core.offNext
1177 : pHeap->cbHeap - off;
1178 uint32_t *pu32End = ((uint32_t *)((uint8_t *)pCur + cbCur));
1179 uint32_t cbFence = pu32End[-1];
1180 if (RT_UNLIKELY( cbFence >= cbCur - sizeof(*pCur)
1181 || cbFence < MMHYPER_HEAP_STRICT_FENCE_SIZE))
1182 {
1183 mmHyperHeapDumpOne(pHeap, pCur);
1184 Assert(cbFence < cbCur - sizeof(*pCur));
1185 Assert(cbFence >= MMHYPER_HEAP_STRICT_FENCE_SIZE);
1186 }
1187
1188 uint32_t *pu32Bad = ASMMemFirstMismatchingU32((uint8_t *)pu32End - cbFence, cbFence - sizeof(uint32_t), MMHYPER_HEAP_STRICT_FENCE_U32);
1189 if (RT_UNLIKELY(pu32Bad))
1190 {
1191 mmHyperHeapDumpOne(pHeap, pCur);
1192 Assert(!pu32Bad);
1193 }
1194 }
1195# endif
1196
1197 /* next */
1198 if (!pCur->core.offNext)
1199 break;
1200 pPrev = pCur;
1201 pCur = (PMMHYPERCHUNKFREE)((char *)pCur + pCur->core.offNext);
1202 }
1203}
1204#endif
1205
1206
1207/**
1208 * Performs consistency checks on the heap if MMHYPER_HEAP_STRICT was
1209 * defined at build time.
1210 *
1211 * @param pVM The cross context VM structure.
1212 */
1213VMMDECL(void) MMHyperHeapCheck(PVMCC pVM)
1214{
1215#ifdef MMHYPER_HEAP_STRICT
1216 int rc = mmHyperLock(pVM);
1217 AssertRC(rc);
1218 mmHyperHeapCheck(pVM->mm.s.CTX_SUFF(pHyperHeap));
1219 mmHyperUnlock(pVM);
1220#else
1221 NOREF(pVM);
1222#endif
1223}
1224
1225
1226#ifdef DEBUG
1227/**
1228 * Dumps the hypervisor heap to Log.
1229 * @param pVM The cross context VM structure.
1230 */
1231VMMDECL(void) MMHyperHeapDump(PVM pVM)
1232{
1233 Log(("MMHyperHeapDump: *** heap dump - start ***\n"));
1234 PMMHYPERHEAP pHeap = pVM->mm.s.CTX_SUFF(pHyperHeap);
1235 PMMHYPERCHUNKFREE pCur = (PMMHYPERCHUNKFREE)pHeap->CTX_SUFF(pbHeap);
1236 for (;;)
1237 {
1238 mmHyperHeapDumpOne(pHeap, pCur);
1239
1240 /* next */
1241 if (!pCur->core.offNext)
1242 break;
1243 pCur = (PMMHYPERCHUNKFREE)((char *)pCur + pCur->core.offNext);
1244 }
1245 Log(("MMHyperHeapDump: *** heap dump - end ***\n"));
1246}
1247#endif
1248
1249
1250/**
1251 * Query the amount of free memory in the hypervisor heap.
1252 *
1253 * @returns Number of free bytes in the hypervisor heap.
1254 */
1255VMMDECL(size_t) MMHyperHeapGetFreeSize(PVM pVM)
1256{
1257 return pVM->mm.s.CTX_SUFF(pHyperHeap)->cbFree;
1258}
1259
1260
1261/**
1262 * Query the size the hypervisor heap.
1263 *
1264 * @returns The size of the hypervisor heap in bytes.
1265 */
1266VMMDECL(size_t) MMHyperHeapGetSize(PVM pVM)
1267{
1268 return pVM->mm.s.CTX_SUFF(pHyperHeap)->cbHeap;
1269}
1270
1271
1272/**
1273 * Converts a context neutral heap offset into a pointer.
1274 *
1275 * @returns Pointer to hyper heap data.
1276 * @param pVM The cross context VM structure.
1277 * @param offHeap The hyper heap offset.
1278 */
1279VMMDECL(void *) MMHyperHeapOffsetToPtr(PVM pVM, uint32_t offHeap)
1280{
1281 Assert(offHeap - MMYPERHEAP_HDR_SIZE <= pVM->mm.s.CTX_SUFF(pHyperHeap)->cbHeap);
1282 return (uint8_t *)pVM->mm.s.CTX_SUFF(pHyperHeap) + offHeap;
1283}
1284
1285
1286/**
1287 * Converts a context specific heap pointer into a neutral heap offset.
1288 *
1289 * @returns Heap offset.
1290 * @param pVM The cross context VM structure.
1291 * @param pv Pointer to the heap data.
1292 */
1293VMMDECL(uint32_t) MMHyperHeapPtrToOffset(PVM pVM, void *pv)
1294{
1295 size_t offHeap = (uint8_t *)pv - (uint8_t *)pVM->mm.s.CTX_SUFF(pHyperHeap);
1296 Assert(offHeap - MMYPERHEAP_HDR_SIZE <= pVM->mm.s.CTX_SUFF(pHyperHeap)->cbHeap);
1297 return (uint32_t)offHeap;
1298}
1299
1300
1301/**
1302 * Query the address and size the hypervisor memory area.
1303 *
1304 * @returns Base address of the hypervisor area.
1305 * @param pVM The cross context VM structure.
1306 * @param pcb Where to store the size of the hypervisor area. (out)
1307 */
1308VMMDECL(RTGCPTR) MMHyperGetArea(PVM pVM, size_t *pcb)
1309{
1310 if (pcb)
1311 *pcb = pVM->mm.s.cbHyperArea;
1312 return pVM->mm.s.pvHyperAreaGC;
1313}
1314
1315
1316/**
1317 * Checks if an address is within the hypervisor memory area.
1318 *
1319 * @returns true if inside.
1320 * @returns false if outside.
1321 * @param pVM The cross context VM structure.
1322 * @param GCPtr The pointer to check.
1323 *
1324 * @note Caller must check that we're in raw-mode before calling!
1325 */
1326VMMDECL(bool) MMHyperIsInsideArea(PVM pVM, RTGCPTR GCPtr)
1327{
1328 Assert(VM_IS_RAW_MODE_ENABLED(pVM));
1329 return (RTGCUINTPTR)GCPtr - (RTGCUINTPTR)pVM->mm.s.pvHyperAreaGC < pVM->mm.s.cbHyperArea;
1330}
1331
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette