1 | /* $Id: VBoxGuestR0LibPhysHeap.cpp 106061 2024-09-16 14:03:52Z vboxsync $ */
|
---|
2 | /** @file
|
---|
3 | * VBoxGuestLibR0 - Physical memory heap.
|
---|
4 | */
|
---|
5 |
|
---|
6 | /*
|
---|
7 | * Copyright (C) 2006-2024 Oracle and/or its affiliates.
|
---|
8 | *
|
---|
9 | * Permission is hereby granted, free of charge, to any person
|
---|
10 | * obtaining a copy of this software and associated documentation
|
---|
11 | * files (the "Software"), to deal in the Software without
|
---|
12 | * restriction, including without limitation the rights to use,
|
---|
13 | * copy, modify, merge, publish, distribute, sublicense, and/or sell
|
---|
14 | * copies of the Software, and to permit persons to whom the
|
---|
15 | * Software is furnished to do so, subject to the following
|
---|
16 | * conditions:
|
---|
17 | *
|
---|
18 | * The above copyright notice and this permission notice shall be
|
---|
19 | * included in all copies or substantial portions of the Software.
|
---|
20 | *
|
---|
21 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
---|
22 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
|
---|
23 | * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
---|
24 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
|
---|
25 | * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
|
---|
26 | * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
---|
27 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
---|
28 | * OTHER DEALINGS IN THE SOFTWARE.
|
---|
29 | */
|
---|
30 |
|
---|
31 | /** @page pg_vbglr0_phys_heap VBoxGuestLibR0 - Physical memory heap.
|
---|
32 | *
|
---|
33 | * Traditional heap implementation keeping all blocks in a ordered list and
|
---|
34 | * keeping free blocks on additional list via pointers in the user area. This
|
---|
35 | * is similar to @ref grp_rt_heap_simple "RTHeapSimple" and
|
---|
36 | * @ref grp_rt_heap_offset "RTHeapOffset" in IPRT, except that this code handles
|
---|
37 | * mutiple chunks and has a physical address associated with each chunk and
|
---|
38 | * block. The alignment is fixed (VBGL_PH_ALLOC_ALIGN).
|
---|
39 | *
|
---|
40 | * When allocating memory, a free block is found that satisfies the request,
|
---|
41 | * extending the heap with another chunk if needed. The block is split if it's
|
---|
42 | * too large, and the tail end is put on the free list.
|
---|
43 | *
|
---|
44 | * When freeing memory, the block being freed is put back on the free list and
|
---|
45 | * we use the block list to check whether it can be merged with adjacent blocks.
|
---|
46 | *
|
---|
47 | * @note The original code managed the blocks in two separate lists for free and
|
---|
48 | * allocated blocks, which had the disadvantage only allowing merging with
|
---|
49 | * the block after the block being freed. On the plus side, it had the
|
---|
50 | * potential for slightly better locality when examining the free list,
|
---|
51 | * since the next pointer and block size members were closer to one
|
---|
52 | * another.
|
---|
53 | */
|
---|
54 |
|
---|
55 |
|
---|
56 | /*********************************************************************************************************************************
|
---|
57 | * Header Files *
|
---|
58 | *********************************************************************************************************************************/
|
---|
59 | #include "VBoxGuestR0LibInternal.h"
|
---|
60 |
|
---|
61 | #include <iprt/assert.h>
|
---|
62 | #include <iprt/err.h>
|
---|
63 | #include <iprt/mem.h>
|
---|
64 | #include <iprt/memobj.h>
|
---|
65 | #include <iprt/semaphore.h>
|
---|
66 |
|
---|
67 |
|
---|
68 | /*********************************************************************************************************************************
|
---|
69 | * Defined Constants And Macros *
|
---|
70 | *********************************************************************************************************************************/
|
---|
71 | /** Enables heap dumping. */
|
---|
72 | #if defined(DOXYGEN_RUNNING) || 0
|
---|
73 | # define VBGL_PH_DUMPHEAP
|
---|
74 | #endif
|
---|
75 |
|
---|
76 | #ifdef VBGL_PH_DUMPHEAP
|
---|
77 | # define VBGL_PH_DPRINTF(a) RTAssertMsg2Weak a
|
---|
78 | #else
|
---|
79 | # define VBGL_PH_DPRINTF(a) do { } while (0)
|
---|
80 | #endif
|
---|
81 |
|
---|
82 | /** Heap chunk signature */
|
---|
83 | #define VBGL_PH_CHUNKSIGNATURE UINT32_C(0xADDCCCCC)
|
---|
84 | /** Heap chunk allocation unit */
|
---|
85 | #define VBGL_PH_CHUNKSIZE (0x10000)
|
---|
86 |
|
---|
87 | /** Heap block signature */
|
---|
88 | #define VBGL_PH_BLOCKSIGNATURE UINT32_C(0xADDBBBBB)
|
---|
89 |
|
---|
90 | /** The allocation block alignment.
|
---|
91 | *
|
---|
92 | * This cannot be larger than VBGLPHYSHEAPBLOCK.
|
---|
93 | */
|
---|
94 | #define VBGL_PH_ALLOC_ALIGN (sizeof(void *))
|
---|
95 |
|
---|
96 | /** Max number of free nodes to search before just using the best fit.
|
---|
97 | *
|
---|
98 | * This is used to limit the free list walking during allocation and just get
|
---|
99 | * on with the job. A low number should reduce the cache trashing at the
|
---|
100 | * possible cost of heap fragmentation.
|
---|
101 | *
|
---|
102 | * Picked 16 after comparing the tstVbglR0PhysHeap-1 results w/ uRandSeed=42 for
|
---|
103 | * different max values.
|
---|
104 | */
|
---|
105 | #define VBGL_PH_MAX_FREE_SEARCH 16
|
---|
106 |
|
---|
107 | /** Threshold to stop the block search if a free block is at least this much too big.
|
---|
108 | *
|
---|
109 | * May cause more fragmation (depending on usage pattern), but should speed up
|
---|
110 | * allocation and hopefully reduce cache trashing.
|
---|
111 | *
|
---|
112 | * Since we merge adjacent free blocks when we can, free blocks should typically
|
---|
113 | * be a lot larger that what's requested. So, it is probably a good idea to
|
---|
114 | * just chop up a large block rather than keep searching for a perfect-ish
|
---|
115 | * match.
|
---|
116 | *
|
---|
117 | * Undefine this to disable this trick.
|
---|
118 | */
|
---|
119 | #if defined(DOXYGEN_RUNNING) || 1
|
---|
120 | # define VBGL_PH_STOP_SEARCH_AT_EXCESS _4K
|
---|
121 | #endif
|
---|
122 |
|
---|
123 | /** Threshold at which to split out a tail free block when allocating.
|
---|
124 | *
|
---|
125 | * The value gives the amount of user space, i.e. excluding the header.
|
---|
126 | *
|
---|
127 | * Using 32 bytes based on VMMDev.h request sizes. The smallest requests are 24
|
---|
128 | * bytes, i.e. only the header, at least 4 of these. There are at least 10 with
|
---|
129 | * size 28 bytes and at least 11 with size 32 bytes. So, 32 bytes would fit
|
---|
130 | * some 25 requests out of about 60, which is reasonable.
|
---|
131 | */
|
---|
132 | #define VBGL_PH_MIN_SPLIT_FREE_BLOCK 32
|
---|
133 |
|
---|
134 |
|
---|
135 | /** The smallest amount of user data that can be allocated.
|
---|
136 | *
|
---|
137 | * This is to ensure that the block can be converted into a
|
---|
138 | * VBGLPHYSHEAPFREEBLOCK structure when freed. This must be smaller or equal
|
---|
139 | * to VBGL_PH_MIN_SPLIT_FREE_BLOCK.
|
---|
140 | */
|
---|
141 | #define VBGL_PH_SMALLEST_ALLOC_SIZE 16
|
---|
142 |
|
---|
143 | /** The maximum allocation request size. */
|
---|
144 | #define VBGL_PH_LARGEST_ALLOC_SIZE RT_ALIGN_32( _128M \
|
---|
145 | - sizeof(VBGLPHYSHEAPBLOCK) \
|
---|
146 | - sizeof(VBGLPHYSHEAPCHUNK) \
|
---|
147 | - VBGL_PH_ALLOC_ALIGN, \
|
---|
148 | VBGL_PH_ALLOC_ALIGN)
|
---|
149 |
|
---|
150 | /**
|
---|
151 | * Whether to use the RTR0MemObjAllocCont API or RTMemContAlloc for
|
---|
152 | * allocating chunks.
|
---|
153 | *
|
---|
154 | * This can be enabled on hosts where RTMemContAlloc is more complicated than
|
---|
155 | * RTR0MemObjAllocCont. This can also be done if we wish to save code space, as
|
---|
156 | * the latter is typically always dragged into the link on guests where the
|
---|
157 | * linker cannot eliminiate functions within objects. Only drawback is that
|
---|
158 | * RTR0MemObjAllocCont requires another heap allocation for the handle.
|
---|
159 | *
|
---|
160 | * Update: Just enable it everywhere so we can more easily use memory above 4G.
|
---|
161 | */
|
---|
162 | #define VBGL_PH_USE_MEMOBJ
|
---|
163 |
|
---|
164 |
|
---|
165 | /*********************************************************************************************************************************
|
---|
166 | * Structures and Typedefs *
|
---|
167 | *********************************************************************************************************************************/
|
---|
168 | /**
|
---|
169 | * A heap block (within a chunk).
|
---|
170 | *
|
---|
171 | * This is used to track a part of a heap chunk that's either free or
|
---|
172 | * allocated. The VBGLPHYSHEAPBLOCK::fAllocated member indicates which it is.
|
---|
173 | */
|
---|
174 | struct VBGLPHYSHEAPBLOCK
|
---|
175 | {
|
---|
176 | /** Magic value (VBGL_PH_BLOCKSIGNATURE). */
|
---|
177 | uint32_t u32Signature;
|
---|
178 |
|
---|
179 | /** Size of user data in the block. Does not include this block header. */
|
---|
180 | uint32_t cbUser : 31;
|
---|
181 | /** The top bit indicates whether it's allocated or free. */
|
---|
182 | uint32_t fAllocated : 1;
|
---|
183 |
|
---|
184 | /** Pointer to the next block on the list. */
|
---|
185 | VBGLPHYSHEAPBLOCK *pNext;
|
---|
186 | /** Pointer to the previous block on the list. */
|
---|
187 | VBGLPHYSHEAPBLOCK *pPrev;
|
---|
188 | /** Pointer back to the chunk. */
|
---|
189 | VBGLPHYSHEAPCHUNK *pChunk;
|
---|
190 | };
|
---|
191 |
|
---|
192 | /**
|
---|
193 | * A free block.
|
---|
194 | */
|
---|
195 | struct VBGLPHYSHEAPFREEBLOCK
|
---|
196 | {
|
---|
197 | /** Core block data. */
|
---|
198 | VBGLPHYSHEAPBLOCK Core;
|
---|
199 | /** Pointer to the next free list entry. */
|
---|
200 | VBGLPHYSHEAPFREEBLOCK *pNextFree;
|
---|
201 | /** Pointer to the previous free list entry. */
|
---|
202 | VBGLPHYSHEAPFREEBLOCK *pPrevFree;
|
---|
203 | };
|
---|
204 | AssertCompile(VBGL_PH_SMALLEST_ALLOC_SIZE >= sizeof(VBGLPHYSHEAPFREEBLOCK) - sizeof(VBGLPHYSHEAPBLOCK));
|
---|
205 | AssertCompile(VBGL_PH_MIN_SPLIT_FREE_BLOCK >= sizeof(VBGLPHYSHEAPFREEBLOCK) - sizeof(VBGLPHYSHEAPBLOCK));
|
---|
206 | AssertCompile(VBGL_PH_MIN_SPLIT_FREE_BLOCK >= VBGL_PH_SMALLEST_ALLOC_SIZE);
|
---|
207 |
|
---|
208 | /**
|
---|
209 | * A chunk of memory used by the heap for sub-allocations.
|
---|
210 | *
|
---|
211 | * There is a list of these.
|
---|
212 | */
|
---|
213 | struct VBGLPHYSHEAPCHUNK
|
---|
214 | {
|
---|
215 | /** Magic value (VBGL_PH_CHUNKSIGNATURE). */
|
---|
216 | uint32_t u32Signature;
|
---|
217 | /** Size of the chunk. Includes the chunk header. */
|
---|
218 | uint32_t cbChunk;
|
---|
219 | /** Number of block of any kind. */
|
---|
220 | int32_t cBlocks;
|
---|
221 | /** Number of free blocks. */
|
---|
222 | int32_t cFreeBlocks;
|
---|
223 |
|
---|
224 | /** Physical address of the chunk (contiguous). */
|
---|
225 | RTCCPHYS physAddr;
|
---|
226 |
|
---|
227 | /** Pointer to the next chunk. */
|
---|
228 | VBGLPHYSHEAPCHUNK *pNext;
|
---|
229 | /** Pointer to the previous chunk. */
|
---|
230 | VBGLPHYSHEAPCHUNK *pPrev;
|
---|
231 |
|
---|
232 | #if defined(VBGL_PH_USE_MEMOBJ)
|
---|
233 | /** The allocation handle. */
|
---|
234 | RTR0MEMOBJ hMemObj;
|
---|
235 | #endif
|
---|
236 | #if ARCH_BITS == 64
|
---|
237 | /** Pad the size up to 64 bytes. */
|
---|
238 | # ifdef VBGL_PH_USE_MEMOBJ
|
---|
239 | uintptr_t auPadding2[2];
|
---|
240 | # else
|
---|
241 | uintptr_t auPadding2[3];
|
---|
242 | # endif
|
---|
243 | #endif
|
---|
244 | };
|
---|
245 | #if ARCH_BITS == 64
|
---|
246 | AssertCompileSize(VBGLPHYSHEAPCHUNK, 64);
|
---|
247 | #endif
|
---|
248 |
|
---|
249 |
|
---|
250 | /**
|
---|
251 | * Debug function that dumps the heap.
|
---|
252 | */
|
---|
253 | #ifndef VBGL_PH_DUMPHEAP
|
---|
254 | # define dumpheap(pszWhere) do { } while (0)
|
---|
255 | #else
|
---|
256 | static void dumpheap(const char *pszWhere)
|
---|
257 | {
|
---|
258 | VBGL_PH_DPRINTF(("VBGL_PH dump at '%s'\n", pszWhere));
|
---|
259 |
|
---|
260 | VBGL_PH_DPRINTF(("Chunks:\n"));
|
---|
261 | for (VBGLPHYSHEAPCHUNK *pChunk = g_vbgldata.pChunkHead; pChunk; pChunk = pChunk->pNext)
|
---|
262 | VBGL_PH_DPRINTF(("%p: pNext = %p, pPrev = %p, sign = %08X, size = %8d, cBlocks = %8d, cFreeBlocks=%8d, phys = %08X\n",
|
---|
263 | pChunk, pChunk->pNext, pChunk->pPrev, pChunk->u32Signature, pChunk->cbChunk,
|
---|
264 | pChunk->cBlocks, pChunk->cFreeBlocks, pChunk->physAddr));
|
---|
265 |
|
---|
266 | VBGL_PH_DPRINTF(("Allocated blocks:\n"));
|
---|
267 | for (VBGLPHYSHEAPBLOCK *pBlock = g_vbgldata.pBlockHead; pBlock; pBlock = pBlock->pNext)
|
---|
268 | VBGL_PH_DPRINTF(("%p: pNext = %p, pPrev = %p, size = %05x, sign = %08X, %s, pChunk = %p\n",
|
---|
269 | pBlock, pBlock->pNext, pBlock->pPrev, pBlock->cbUser,
|
---|
270 | pBlock->u32Signature, pBlock->fAllocated ? "allocated" : " free", pBlock->pChunk));
|
---|
271 |
|
---|
272 | VBGL_PH_DPRINTF(("Free blocks:\n"));
|
---|
273 | for (VBGLPHYSHEAPFREEBLOCK *pBlock = g_vbgldata.pFreeHead; pBlock; pBlock = pBlock->pNextFree)
|
---|
274 | VBGL_PH_DPRINTF(("%p: pNextFree = %p, pPrevFree = %p, size = %05x, sign = %08X, pChunk = %p%s\n",
|
---|
275 | pBlock, pBlock->pNextFree, pBlock->pPrevFree, pBlock->Core.cbUser,
|
---|
276 | pBlock->Core.u32Signature, pBlock->Core.pChunk,
|
---|
277 | !pBlock->Core.fAllocated ? "" : " !!allocated-block-on-freelist!!"));
|
---|
278 |
|
---|
279 | VBGL_PH_DPRINTF(("VBGL_PH dump at '%s' done\n", pszWhere));
|
---|
280 | }
|
---|
281 | #endif
|
---|
282 |
|
---|
283 |
|
---|
284 | /**
|
---|
285 | * Initialize a free block
|
---|
286 | */
|
---|
287 | static void vbglPhysHeapInitFreeBlock(VBGLPHYSHEAPFREEBLOCK *pBlock, VBGLPHYSHEAPCHUNK *pChunk, uint32_t cbUser)
|
---|
288 | {
|
---|
289 | Assert(pBlock != NULL);
|
---|
290 | Assert(pChunk != NULL);
|
---|
291 |
|
---|
292 | pBlock->Core.u32Signature = VBGL_PH_BLOCKSIGNATURE;
|
---|
293 | pBlock->Core.cbUser = cbUser;
|
---|
294 | pBlock->Core.fAllocated = false;
|
---|
295 | pBlock->Core.pNext = NULL;
|
---|
296 | pBlock->Core.pPrev = NULL;
|
---|
297 | pBlock->Core.pChunk = pChunk;
|
---|
298 | pBlock->pNextFree = NULL;
|
---|
299 | pBlock->pPrevFree = NULL;
|
---|
300 | }
|
---|
301 |
|
---|
302 |
|
---|
303 | /**
|
---|
304 | * Updates block statistics when a block is added.
|
---|
305 | */
|
---|
306 | DECLINLINE(void) vbglPhysHeapStatsBlockAdded(VBGLPHYSHEAPBLOCK *pBlock)
|
---|
307 | {
|
---|
308 | g_vbgldata.cBlocks += 1;
|
---|
309 | pBlock->pChunk->cBlocks += 1;
|
---|
310 | AssertMsg((uint32_t)pBlock->pChunk->cBlocks <= pBlock->pChunk->cbChunk / sizeof(VBGLPHYSHEAPFREEBLOCK),
|
---|
311 | ("pChunk=%p: cbChunk=%#x cBlocks=%d\n", pBlock->pChunk, pBlock->pChunk->cbChunk, pBlock->pChunk->cBlocks));
|
---|
312 | }
|
---|
313 |
|
---|
314 |
|
---|
315 | /**
|
---|
316 | * Links @a pBlock onto the head of block list.
|
---|
317 | *
|
---|
318 | * This also update the per-chunk block counts.
|
---|
319 | */
|
---|
320 | static void vbglPhysHeapInsertBlock(VBGLPHYSHEAPBLOCK *pBlock)
|
---|
321 | {
|
---|
322 | AssertMsg(pBlock->pNext == NULL, ("pBlock->pNext = %p\n", pBlock->pNext));
|
---|
323 | AssertMsg(pBlock->pPrev == NULL, ("pBlock->pPrev = %p\n", pBlock->pPrev));
|
---|
324 |
|
---|
325 | /* inserting to head of list */
|
---|
326 | VBGLPHYSHEAPBLOCK *pOldHead = g_vbgldata.pBlockHead;
|
---|
327 |
|
---|
328 | pBlock->pNext = pOldHead;
|
---|
329 | pBlock->pPrev = NULL;
|
---|
330 |
|
---|
331 | if (pOldHead)
|
---|
332 | pOldHead->pPrev = pBlock;
|
---|
333 | g_vbgldata.pBlockHead = pBlock;
|
---|
334 |
|
---|
335 | /* Update the stats: */
|
---|
336 | vbglPhysHeapStatsBlockAdded(pBlock);
|
---|
337 | }
|
---|
338 |
|
---|
339 |
|
---|
340 | /**
|
---|
341 | * Links @a pBlock onto the block list after @a pInsertAfter.
|
---|
342 | *
|
---|
343 | * This also update the per-chunk block counts.
|
---|
344 | */
|
---|
345 | static void vbglPhysHeapInsertBlockAfter(VBGLPHYSHEAPBLOCK *pBlock, VBGLPHYSHEAPBLOCK *pInsertAfter)
|
---|
346 | {
|
---|
347 | AssertMsg(pBlock->pNext == NULL, ("pBlock->pNext = %p\n", pBlock->pNext));
|
---|
348 | AssertMsg(pBlock->pPrev == NULL, ("pBlock->pPrev = %p\n", pBlock->pPrev));
|
---|
349 |
|
---|
350 | pBlock->pNext = pInsertAfter->pNext;
|
---|
351 | pBlock->pPrev = pInsertAfter;
|
---|
352 |
|
---|
353 | if (pInsertAfter->pNext)
|
---|
354 | pInsertAfter->pNext->pPrev = pBlock;
|
---|
355 |
|
---|
356 | pInsertAfter->pNext = pBlock;
|
---|
357 |
|
---|
358 | /* Update the stats: */
|
---|
359 | vbglPhysHeapStatsBlockAdded(pBlock);
|
---|
360 | }
|
---|
361 |
|
---|
362 |
|
---|
363 | /**
|
---|
364 | * Unlinks @a pBlock from the block list.
|
---|
365 | *
|
---|
366 | * This also update the per-chunk block counts.
|
---|
367 | */
|
---|
368 | static void vbglPhysHeapUnlinkBlock(VBGLPHYSHEAPBLOCK *pBlock)
|
---|
369 | {
|
---|
370 | VBGLPHYSHEAPBLOCK *pOtherBlock = pBlock->pNext;
|
---|
371 | if (pOtherBlock)
|
---|
372 | pOtherBlock->pPrev = pBlock->pPrev;
|
---|
373 | /* else: this is tail of list but we do not maintain tails of block lists. so nothing to do. */
|
---|
374 |
|
---|
375 | pOtherBlock = pBlock->pPrev;
|
---|
376 | if (pOtherBlock)
|
---|
377 | pOtherBlock->pNext = pBlock->pNext;
|
---|
378 | else
|
---|
379 | {
|
---|
380 | Assert(g_vbgldata.pBlockHead == pBlock);
|
---|
381 | g_vbgldata.pBlockHead = pBlock->pNext;
|
---|
382 | }
|
---|
383 |
|
---|
384 | pBlock->pNext = NULL;
|
---|
385 | pBlock->pPrev = NULL;
|
---|
386 |
|
---|
387 | /* Update the stats: */
|
---|
388 | g_vbgldata.cBlocks -= 1;
|
---|
389 | pBlock->pChunk->cBlocks -= 1;
|
---|
390 | AssertMsg(pBlock->pChunk->cBlocks >= 0,
|
---|
391 | ("pChunk=%p: cbChunk=%#x cBlocks=%d\n", pBlock->pChunk, pBlock->pChunk->cbChunk, pBlock->pChunk->cBlocks));
|
---|
392 | Assert(g_vbgldata.cBlocks >= 0);
|
---|
393 | }
|
---|
394 |
|
---|
395 |
|
---|
396 |
|
---|
397 | /**
|
---|
398 | * Updates statistics after adding a free block.
|
---|
399 | */
|
---|
400 | DECLINLINE(void) vbglPhysHeapStatsFreeBlockAdded(VBGLPHYSHEAPFREEBLOCK *pBlock)
|
---|
401 | {
|
---|
402 | g_vbgldata.cFreeBlocks += 1;
|
---|
403 | pBlock->Core.pChunk->cFreeBlocks += 1;
|
---|
404 | }
|
---|
405 |
|
---|
406 |
|
---|
407 | /**
|
---|
408 | * Links @a pBlock onto head of the free chain.
|
---|
409 | *
|
---|
410 | * This is used during block freeing and when adding a new chunk.
|
---|
411 | *
|
---|
412 | * This also update the per-chunk block counts.
|
---|
413 | */
|
---|
414 | static void vbglPhysHeapInsertFreeBlock(VBGLPHYSHEAPFREEBLOCK *pBlock)
|
---|
415 | {
|
---|
416 | Assert(!pBlock->Core.fAllocated);
|
---|
417 | AssertMsg(pBlock->pNextFree == NULL, ("pBlock->pNextFree = %p\n", pBlock->pNextFree));
|
---|
418 | AssertMsg(pBlock->pPrevFree == NULL, ("pBlock->pPrevFree = %p\n", pBlock->pPrevFree));
|
---|
419 |
|
---|
420 | /* inserting to head of list */
|
---|
421 | VBGLPHYSHEAPFREEBLOCK *pOldHead = g_vbgldata.pFreeHead;
|
---|
422 |
|
---|
423 | pBlock->pNextFree = pOldHead;
|
---|
424 | pBlock->pPrevFree = NULL;
|
---|
425 |
|
---|
426 | if (pOldHead)
|
---|
427 | pOldHead->pPrevFree = pBlock;
|
---|
428 | g_vbgldata.pFreeHead = pBlock;
|
---|
429 |
|
---|
430 | /* Update the stats: */
|
---|
431 | vbglPhysHeapStatsFreeBlockAdded(pBlock);
|
---|
432 | }
|
---|
433 |
|
---|
434 |
|
---|
435 | /**
|
---|
436 | * Links @a pBlock after @a pInsertAfter.
|
---|
437 | *
|
---|
438 | * This is used when splitting a free block during allocation to preserve the
|
---|
439 | * place in the free list.
|
---|
440 | *
|
---|
441 | * This also update the per-chunk block counts.
|
---|
442 | */
|
---|
443 | static void vbglPhysHeapInsertFreeBlockAfter(VBGLPHYSHEAPFREEBLOCK *pBlock, VBGLPHYSHEAPFREEBLOCK *pInsertAfter)
|
---|
444 | {
|
---|
445 | Assert(!pBlock->Core.fAllocated);
|
---|
446 | AssertMsg(pBlock->pNextFree == NULL, ("pBlock->pNextFree = %p\n", pBlock->pNextFree));
|
---|
447 | AssertMsg(pBlock->pPrevFree == NULL, ("pBlock->pPrevFree = %p\n", pBlock->pPrevFree));
|
---|
448 |
|
---|
449 | /* inserting after the tiven node */
|
---|
450 | pBlock->pNextFree = pInsertAfter->pNextFree;
|
---|
451 | pBlock->pPrevFree = pInsertAfter;
|
---|
452 |
|
---|
453 | if (pInsertAfter->pNextFree)
|
---|
454 | pInsertAfter->pNextFree->pPrevFree = pBlock;
|
---|
455 |
|
---|
456 | pInsertAfter->pNextFree = pBlock;
|
---|
457 |
|
---|
458 | /* Update the stats: */
|
---|
459 | vbglPhysHeapStatsFreeBlockAdded(pBlock);
|
---|
460 | }
|
---|
461 |
|
---|
462 |
|
---|
463 | /**
|
---|
464 | * Unlinks @a pBlock from the free list.
|
---|
465 | *
|
---|
466 | * This also update the per-chunk block counts.
|
---|
467 | */
|
---|
468 | static void vbglPhysHeapUnlinkFreeBlock(VBGLPHYSHEAPFREEBLOCK *pBlock)
|
---|
469 | {
|
---|
470 | Assert(!pBlock->Core.fAllocated);
|
---|
471 |
|
---|
472 | VBGLPHYSHEAPFREEBLOCK *pOtherBlock = pBlock->pNextFree;
|
---|
473 | if (pOtherBlock)
|
---|
474 | pOtherBlock->pPrevFree = pBlock->pPrevFree;
|
---|
475 | /* else: this is tail of list but we do not maintain tails of block lists. so nothing to do. */
|
---|
476 |
|
---|
477 | pOtherBlock = pBlock->pPrevFree;
|
---|
478 | if (pOtherBlock)
|
---|
479 | pOtherBlock->pNextFree = pBlock->pNextFree;
|
---|
480 | else
|
---|
481 | {
|
---|
482 | Assert(g_vbgldata.pFreeHead == pBlock);
|
---|
483 | g_vbgldata.pFreeHead = pBlock->pNextFree;
|
---|
484 | }
|
---|
485 |
|
---|
486 | pBlock->pNextFree = NULL;
|
---|
487 | pBlock->pPrevFree = NULL;
|
---|
488 |
|
---|
489 | /* Update the stats: */
|
---|
490 | g_vbgldata.cFreeBlocks -= 1;
|
---|
491 | pBlock->Core.pChunk->cFreeBlocks -= 1;
|
---|
492 | AssertMsg(pBlock->Core.pChunk->cFreeBlocks >= 0,
|
---|
493 | ("pChunk=%p: cbChunk=%#x cFreeBlocks=%d\n",
|
---|
494 | pBlock->Core.pChunk, pBlock->Core.pChunk->cbChunk, pBlock->Core.pChunk->cFreeBlocks));
|
---|
495 | Assert(g_vbgldata.cFreeBlocks >= 0);
|
---|
496 | }
|
---|
497 |
|
---|
498 |
|
---|
499 | /**
|
---|
500 | * Allocate another chunk and add it to the heap.
|
---|
501 | *
|
---|
502 | * @returns Pointer to the free block in the new chunk on success, NULL on
|
---|
503 | * allocation failure.
|
---|
504 | * @param cbMinBlock The size of the user block we need this chunk for.
|
---|
505 | */
|
---|
506 | static VBGLPHYSHEAPFREEBLOCK *vbglPhysHeapChunkAlloc(uint32_t cbMinBlock)
|
---|
507 | {
|
---|
508 | RTCCPHYS PhysAddr = NIL_RTHCPHYS;
|
---|
509 | VBGLPHYSHEAPCHUNK *pChunk;
|
---|
510 | uint32_t cbChunk;
|
---|
511 | #ifdef VBGL_PH_USE_MEMOBJ
|
---|
512 | RTR0MEMOBJ hMemObj = NIL_RTR0MEMOBJ;
|
---|
513 | int rc;
|
---|
514 | #endif
|
---|
515 |
|
---|
516 | VBGL_PH_DPRINTF(("Allocating new chunk for %#x byte allocation\n", cbMinBlock));
|
---|
517 | AssertReturn(cbMinBlock <= VBGL_PH_LARGEST_ALLOC_SIZE, NULL); /* paranoia */
|
---|
518 |
|
---|
519 | /*
|
---|
520 | * Compute the size of the new chunk, rounding up to next chunk size,
|
---|
521 | * which must be power of 2.
|
---|
522 | *
|
---|
523 | * Note! Using VBGLPHYSHEAPFREEBLOCK here means the minimum block size is
|
---|
524 | * 8 or 16 bytes too high, but safer this way since cbMinBlock is
|
---|
525 | * zero during the init code call.
|
---|
526 | */
|
---|
527 | Assert(RT_IS_POWER_OF_TWO(VBGL_PH_CHUNKSIZE));
|
---|
528 | cbChunk = cbMinBlock + sizeof(VBGLPHYSHEAPCHUNK) + sizeof(VBGLPHYSHEAPFREEBLOCK);
|
---|
529 | cbChunk = RT_ALIGN_32(cbChunk, VBGL_PH_CHUNKSIZE);
|
---|
530 |
|
---|
531 | #ifdef VBGL_PH_USE_MEMOBJ
|
---|
532 | rc = RTR0MemObjAllocCont(&hMemObj, cbChunk, g_vbgldata.HCPhysMax, false /*fExecutable*/);
|
---|
533 | pChunk = (VBGLPHYSHEAPCHUNK *)(RT_SUCCESS(rc) ? RTR0MemObjAddress(hMemObj) : NULL);
|
---|
534 | PhysAddr = RT_SUCCESS(rc) ? (RTCCPHYS)RTR0MemObjGetPagePhysAddr(hMemObj, 0 /*iPage*/) : NIL_RTCCPHYS;
|
---|
535 | #else
|
---|
536 | pChunk = (VBGLPHYSHEAPCHUNK *)RTMemContAlloc(&PhysAddr, cbChunk);
|
---|
537 | #endif
|
---|
538 | if (!pChunk)
|
---|
539 | {
|
---|
540 | /* If the allocation fail, halv the size till and try again. */
|
---|
541 | uint32_t cbMinChunk = RT_MAX(cbMinBlock, PAGE_SIZE / 2) + sizeof(VBGLPHYSHEAPCHUNK) + sizeof(VBGLPHYSHEAPFREEBLOCK);
|
---|
542 | cbMinChunk = RT_ALIGN_32(cbMinChunk, PAGE_SIZE);
|
---|
543 | if (cbChunk > cbMinChunk)
|
---|
544 | do
|
---|
545 | {
|
---|
546 | cbChunk >>= 2;
|
---|
547 | cbChunk = RT_ALIGN_32(cbChunk, PAGE_SIZE);
|
---|
548 | #ifdef VBGL_PH_USE_MEMOBJ
|
---|
549 | rc = RTR0MemObjAllocCont(&hMemObj, cbChunk, g_vbgldata.HCPhysMax, false /*fExecutable*/);
|
---|
550 | pChunk = (VBGLPHYSHEAPCHUNK *)(RT_SUCCESS(rc) ? RTR0MemObjAddress(hMemObj) : NULL);
|
---|
551 | PhysAddr = RT_SUCCESS(rc) ? (RTCCPHYS)RTR0MemObjGetPagePhysAddr(hMemObj, 0 /*iPage*/) : NIL_RTCCPHYS;
|
---|
552 | #else
|
---|
553 | pChunk = (VBGLPHYSHEAPCHUNK *)RTMemContAlloc(&PhysAddr, cbChunk);
|
---|
554 | #endif
|
---|
555 | } while (!pChunk && cbChunk > cbMinChunk);
|
---|
556 | }
|
---|
557 | if (pChunk)
|
---|
558 | {
|
---|
559 | VBGLPHYSHEAPCHUNK *pOldHeadChunk;
|
---|
560 | VBGLPHYSHEAPFREEBLOCK *pBlock;
|
---|
561 | AssertRelease( g_vbgldata.HCPhysMax == NIL_RTHCPHYS
|
---|
562 | || (PhysAddr < _4G && PhysAddr + cbChunk <= _4G));
|
---|
563 |
|
---|
564 | /*
|
---|
565 | * Init the new chunk.
|
---|
566 | */
|
---|
567 | pChunk->u32Signature = VBGL_PH_CHUNKSIGNATURE;
|
---|
568 | pChunk->cbChunk = cbChunk;
|
---|
569 | pChunk->physAddr = PhysAddr;
|
---|
570 | pChunk->cBlocks = 0;
|
---|
571 | pChunk->cFreeBlocks = 0;
|
---|
572 | pChunk->pNext = NULL;
|
---|
573 | pChunk->pPrev = NULL;
|
---|
574 | #ifdef VBGL_PH_USE_MEMOBJ
|
---|
575 | pChunk->hMemObj = hMemObj;
|
---|
576 | #endif
|
---|
577 |
|
---|
578 | /* Initialize the padding too: */
|
---|
579 | #if ARCH_BITS == 64
|
---|
580 | pChunk->auPadding2[0] = UINT64_C(0xADDCAAA3ADDCAAA2);
|
---|
581 | pChunk->auPadding2[1] = UINT64_C(0xADDCAAA5ADDCAAA4);
|
---|
582 | # ifndef VBGL_PH_USE_MEMOBJ
|
---|
583 | pChunk->auPadding2[2] = UINT64_C(0xADDCAAA7ADDCAAA6);
|
---|
584 | # endif
|
---|
585 | #endif
|
---|
586 |
|
---|
587 | /*
|
---|
588 | * Initialize the free block, which now occupies entire chunk.
|
---|
589 | */
|
---|
590 | pBlock = (VBGLPHYSHEAPFREEBLOCK *)(pChunk + 1);
|
---|
591 | vbglPhysHeapInitFreeBlock(pBlock, pChunk, cbChunk - sizeof(VBGLPHYSHEAPCHUNK) - sizeof(VBGLPHYSHEAPBLOCK));
|
---|
592 | vbglPhysHeapInsertBlock(&pBlock->Core);
|
---|
593 | vbglPhysHeapInsertFreeBlock(pBlock);
|
---|
594 |
|
---|
595 | /*
|
---|
596 | * Add the chunk to the list.
|
---|
597 | */
|
---|
598 | pOldHeadChunk = g_vbgldata.pChunkHead;
|
---|
599 | pChunk->pNext = pOldHeadChunk;
|
---|
600 | if (pOldHeadChunk)
|
---|
601 | pOldHeadChunk->pPrev = pChunk;
|
---|
602 | g_vbgldata.pChunkHead = pChunk;
|
---|
603 |
|
---|
604 | VBGL_PH_DPRINTF(("Allocated chunk %p LB %#x, block %p LB %#x\n", pChunk, cbChunk, pBlock, pBlock->Core.cbUser));
|
---|
605 | return pBlock;
|
---|
606 | }
|
---|
607 | LogRel(("vbglPhysHeapChunkAlloc: failed to alloc %u (%#x) contiguous bytes.\n", cbChunk, cbChunk));
|
---|
608 | return NULL;
|
---|
609 | }
|
---|
610 |
|
---|
611 |
|
---|
612 | /**
|
---|
613 | * Deletes a chunk: Unlinking all its blocks and freeing its memory.
|
---|
614 | */
|
---|
615 | static void vbglPhysHeapChunkDelete(VBGLPHYSHEAPCHUNK *pChunk)
|
---|
616 | {
|
---|
617 | uintptr_t uEnd, uCur;
|
---|
618 | Assert(pChunk != NULL);
|
---|
619 | AssertMsg(pChunk->u32Signature == VBGL_PH_CHUNKSIGNATURE, ("pChunk->u32Signature = %08X\n", pChunk->u32Signature));
|
---|
620 |
|
---|
621 | VBGL_PH_DPRINTF(("Deleting chunk %p size %x\n", pChunk, pChunk->cbChunk));
|
---|
622 |
|
---|
623 | /*
|
---|
624 | * First scan the chunk and unlink all blocks from the lists.
|
---|
625 | *
|
---|
626 | * Note! We could do this by finding the first and last block list entries
|
---|
627 | * and just drop the whole chain relating to this chunk, rather than
|
---|
628 | * doing it one by one. But doing it one by one is simpler and will
|
---|
629 | * continue to work if the block list ends in an unsorted state.
|
---|
630 | */
|
---|
631 | uEnd = (uintptr_t)pChunk + pChunk->cbChunk;
|
---|
632 | uCur = (uintptr_t)(pChunk + 1);
|
---|
633 |
|
---|
634 | while (uCur < uEnd)
|
---|
635 | {
|
---|
636 | VBGLPHYSHEAPBLOCK *pBlock = (VBGLPHYSHEAPBLOCK *)uCur;
|
---|
637 | Assert(pBlock->u32Signature == VBGL_PH_BLOCKSIGNATURE);
|
---|
638 | Assert(pBlock->pChunk == pChunk);
|
---|
639 |
|
---|
640 | uCur += pBlock->cbUser + sizeof(VBGLPHYSHEAPBLOCK);
|
---|
641 | Assert(uCur == (uintptr_t)pBlock->pNext || uCur >= uEnd);
|
---|
642 |
|
---|
643 | if (!pBlock->fAllocated)
|
---|
644 | vbglPhysHeapUnlinkFreeBlock((VBGLPHYSHEAPFREEBLOCK *)pBlock);
|
---|
645 | vbglPhysHeapUnlinkBlock(pBlock);
|
---|
646 | }
|
---|
647 |
|
---|
648 | AssertMsg(uCur == uEnd, ("uCur = %p, uEnd = %p, pChunk->cbChunk = %08X\n", uCur, uEnd, pChunk->cbChunk));
|
---|
649 |
|
---|
650 | /*
|
---|
651 | * Unlink the chunk from the chunk list.
|
---|
652 | */
|
---|
653 | if (pChunk->pNext)
|
---|
654 | pChunk->pNext->pPrev = pChunk->pPrev;
|
---|
655 | /* else: we do not maintain tail pointer. */
|
---|
656 |
|
---|
657 | if (pChunk->pPrev)
|
---|
658 | pChunk->pPrev->pNext = pChunk->pNext;
|
---|
659 | else
|
---|
660 | {
|
---|
661 | Assert(g_vbgldata.pChunkHead == pChunk);
|
---|
662 | g_vbgldata.pChunkHead = pChunk->pNext;
|
---|
663 | }
|
---|
664 |
|
---|
665 | /*
|
---|
666 | * Finally, free the chunk memory.
|
---|
667 | */
|
---|
668 | #ifdef VBGL_PH_USE_MEMOBJ
|
---|
669 | RTR0MemObjFree(pChunk->hMemObj, true /*fFreeMappings*/);
|
---|
670 | #else
|
---|
671 | RTMemContFree(pChunk, pChunk->cbChunk);
|
---|
672 | #endif
|
---|
673 | }
|
---|
674 |
|
---|
675 |
|
---|
676 | DECLR0VBGL(void *) VbglR0PhysHeapAlloc(uint32_t cb)
|
---|
677 | {
|
---|
678 | VBGLPHYSHEAPFREEBLOCK *pBlock;
|
---|
679 | VBGLPHYSHEAPFREEBLOCK *pIter;
|
---|
680 | int32_t cLeft;
|
---|
681 | #ifdef VBGL_PH_STOP_SEARCH_AT_EXCESS
|
---|
682 | uint32_t cbAlwaysSplit;
|
---|
683 | #endif
|
---|
684 | int rc;
|
---|
685 |
|
---|
686 | /*
|
---|
687 | * Make sure we don't allocate anything too small to turn into a free node
|
---|
688 | * and align the size to prevent pointer misalignment and whatnot.
|
---|
689 | */
|
---|
690 | cb = RT_MAX(cb, VBGL_PH_SMALLEST_ALLOC_SIZE);
|
---|
691 | cb = RT_ALIGN_32(cb, VBGL_PH_ALLOC_ALIGN);
|
---|
692 | AssertCompile(VBGL_PH_ALLOC_ALIGN <= sizeof(pBlock->Core));
|
---|
693 |
|
---|
694 | rc = RTSemFastMutexRequest(g_vbgldata.hMtxHeap);
|
---|
695 | AssertRCReturn(rc, NULL);
|
---|
696 |
|
---|
697 | dumpheap("pre alloc");
|
---|
698 |
|
---|
699 | /*
|
---|
700 | * Search the free list. We do this in linear fashion as we don't expect
|
---|
701 | * there to be many blocks in the heap.
|
---|
702 | */
|
---|
703 | #ifdef VBGL_PH_STOP_SEARCH_AT_EXCESS
|
---|
704 | cbAlwaysSplit = cb + VBGL_PH_STOP_SEARCH_AT_EXCESS;
|
---|
705 | #endif
|
---|
706 | cLeft = VBGL_PH_MAX_FREE_SEARCH;
|
---|
707 | pBlock = NULL;
|
---|
708 | if (cb <= PAGE_SIZE / 4 * 3)
|
---|
709 | {
|
---|
710 | /* Smaller than 3/4 page: Prefer a free block that can keep the request within a single page,
|
---|
711 | so HGCM processing in VMMDev can use page locks instead of several reads and writes. */
|
---|
712 | VBGLPHYSHEAPFREEBLOCK *pFallback = NULL;
|
---|
713 | for (pIter = g_vbgldata.pFreeHead; pIter != NULL; pIter = pIter->pNextFree, cLeft--)
|
---|
714 | {
|
---|
715 | AssertBreak(pIter->Core.u32Signature == VBGL_PH_BLOCKSIGNATURE);
|
---|
716 | if (pIter->Core.cbUser >= cb)
|
---|
717 | {
|
---|
718 | if (pIter->Core.cbUser == cb)
|
---|
719 | {
|
---|
720 | if (PAGE_SIZE - ((uintptr_t)(pIter + 1) & PAGE_OFFSET_MASK) >= cb)
|
---|
721 | {
|
---|
722 | pBlock = pIter;
|
---|
723 | break;
|
---|
724 | }
|
---|
725 | pFallback = pIter;
|
---|
726 | }
|
---|
727 | else
|
---|
728 | {
|
---|
729 | if (!pFallback || pIter->Core.cbUser < pFallback->Core.cbUser)
|
---|
730 | pFallback = pIter;
|
---|
731 | if (PAGE_SIZE - ((uintptr_t)(pIter + 1) & PAGE_OFFSET_MASK) >= cb)
|
---|
732 | {
|
---|
733 | if (!pBlock || pIter->Core.cbUser < pBlock->Core.cbUser)
|
---|
734 | pBlock = pIter;
|
---|
735 | #ifdef VBGL_PH_STOP_SEARCH_AT_EXCESS
|
---|
736 | else if (pIter->Core.cbUser >= cbAlwaysSplit)
|
---|
737 | {
|
---|
738 | pBlock = pIter;
|
---|
739 | break;
|
---|
740 | }
|
---|
741 | #endif
|
---|
742 | }
|
---|
743 | }
|
---|
744 |
|
---|
745 | if (cLeft > 0)
|
---|
746 | { /* likely */ }
|
---|
747 | else
|
---|
748 | break;
|
---|
749 | }
|
---|
750 | }
|
---|
751 |
|
---|
752 | if (!pBlock)
|
---|
753 | pBlock = pFallback;
|
---|
754 | }
|
---|
755 | else
|
---|
756 | {
|
---|
757 | /* Large than 3/4 page: Find closest free list match. */
|
---|
758 | for (pIter = g_vbgldata.pFreeHead; pIter != NULL; pIter = pIter->pNextFree, cLeft--)
|
---|
759 | {
|
---|
760 | AssertBreak(pIter->Core.u32Signature == VBGL_PH_BLOCKSIGNATURE);
|
---|
761 | if (pIter->Core.cbUser >= cb)
|
---|
762 | {
|
---|
763 | if (pIter->Core.cbUser == cb)
|
---|
764 | {
|
---|
765 | /* Exact match - we're done! */
|
---|
766 | pBlock = pIter;
|
---|
767 | break;
|
---|
768 | }
|
---|
769 |
|
---|
770 | #ifdef VBGL_PH_STOP_SEARCH_AT_EXCESS
|
---|
771 | if (pIter->Core.cbUser >= cbAlwaysSplit)
|
---|
772 | {
|
---|
773 | /* Really big block - no point continue searching! */
|
---|
774 | pBlock = pIter;
|
---|
775 | break;
|
---|
776 | }
|
---|
777 | #endif
|
---|
778 | /* Looking for a free block with nearest size. */
|
---|
779 | if (!pBlock || pIter->Core.cbUser < pBlock->Core.cbUser)
|
---|
780 | pBlock = pIter;
|
---|
781 |
|
---|
782 | if (cLeft > 0)
|
---|
783 | { /* likely */ }
|
---|
784 | else
|
---|
785 | break;
|
---|
786 | }
|
---|
787 | }
|
---|
788 | }
|
---|
789 |
|
---|
790 | if (!pBlock)
|
---|
791 | {
|
---|
792 | /* No free blocks, allocate a new chunk, the only free block of the
|
---|
793 | chunk will be returned. */
|
---|
794 | pBlock = vbglPhysHeapChunkAlloc(cb);
|
---|
795 | }
|
---|
796 |
|
---|
797 | if (pBlock)
|
---|
798 | {
|
---|
799 | /* We have a free block, either found or allocated. */
|
---|
800 | AssertMsg(pBlock->Core.u32Signature == VBGL_PH_BLOCKSIGNATURE,
|
---|
801 | ("pBlock = %p, pBlock->u32Signature = %08X\n", pBlock, pBlock->Core.u32Signature));
|
---|
802 | AssertMsg(!pBlock->Core.fAllocated, ("pBlock = %p\n", pBlock));
|
---|
803 |
|
---|
804 | /*
|
---|
805 | * If the block is too large, split off a free block with the unused space.
|
---|
806 | *
|
---|
807 | * We do this before unlinking the block so we can preserve the location
|
---|
808 | * in the free list.
|
---|
809 | *
|
---|
810 | * Note! We cannot split off and return the tail end here, because that may
|
---|
811 | * violate the same page requirements for requests smaller than 3/4 page.
|
---|
812 | */
|
---|
813 | AssertCompile(VBGL_PH_MIN_SPLIT_FREE_BLOCK >= sizeof(*pBlock) - sizeof(pBlock->Core));
|
---|
814 | if (pBlock->Core.cbUser >= sizeof(VBGLPHYSHEAPBLOCK) * 2 + VBGL_PH_MIN_SPLIT_FREE_BLOCK + cb)
|
---|
815 | {
|
---|
816 | pIter = (VBGLPHYSHEAPFREEBLOCK *)((uintptr_t)(&pBlock->Core + 1) + cb);
|
---|
817 | vbglPhysHeapInitFreeBlock(pIter, pBlock->Core.pChunk, pBlock->Core.cbUser - cb - sizeof(VBGLPHYSHEAPBLOCK));
|
---|
818 |
|
---|
819 | pBlock->Core.cbUser = cb;
|
---|
820 |
|
---|
821 | /* Insert the new 'pIter' block after the 'pBlock' in the block list
|
---|
822 | and in the free list. */
|
---|
823 | vbglPhysHeapInsertBlockAfter(&pIter->Core, &pBlock->Core);
|
---|
824 | vbglPhysHeapInsertFreeBlockAfter(pIter, pBlock);
|
---|
825 | }
|
---|
826 |
|
---|
827 | /*
|
---|
828 | * Unlink the block from the free list and mark it as allocated.
|
---|
829 | */
|
---|
830 | vbglPhysHeapUnlinkFreeBlock(pBlock);
|
---|
831 | pBlock->Core.fAllocated = true;
|
---|
832 |
|
---|
833 | dumpheap("post alloc");
|
---|
834 |
|
---|
835 | /*
|
---|
836 | * Return success.
|
---|
837 | */
|
---|
838 | rc = RTSemFastMutexRelease(g_vbgldata.hMtxHeap);
|
---|
839 |
|
---|
840 | VBGL_PH_DPRINTF(("VbglR0PhysHeapAlloc: returns %p size %x\n", pBlock + 1, pBlock->Core.cbUser));
|
---|
841 | return &pBlock->Core + 1;
|
---|
842 | }
|
---|
843 |
|
---|
844 | /*
|
---|
845 | * Return failure.
|
---|
846 | */
|
---|
847 | rc = RTSemFastMutexRelease(g_vbgldata.hMtxHeap);
|
---|
848 | AssertRC(rc);
|
---|
849 |
|
---|
850 | VBGL_PH_DPRINTF(("VbglR0PhysHeapAlloc: returns NULL (requested %#x bytes)\n", cb));
|
---|
851 | return NULL;
|
---|
852 | }
|
---|
853 |
|
---|
854 |
|
---|
855 | DECLR0VBGL(RTCCPHYS) VbglR0PhysHeapGetPhysAddr(void *pv)
|
---|
856 | {
|
---|
857 | /*
|
---|
858 | * Validate the incoming pointer.
|
---|
859 | */
|
---|
860 | if (pv != NULL)
|
---|
861 | {
|
---|
862 | VBGLPHYSHEAPBLOCK *pBlock = (VBGLPHYSHEAPBLOCK *)pv - 1;
|
---|
863 | if ( pBlock->u32Signature == VBGL_PH_BLOCKSIGNATURE
|
---|
864 | && pBlock->fAllocated)
|
---|
865 | {
|
---|
866 | /*
|
---|
867 | * Calculate and return its physical address.
|
---|
868 | */
|
---|
869 | VBGLPHYSHEAPCHUNK *pChunk = pBlock->pChunk;
|
---|
870 | return pChunk->physAddr + (uint32_t)((uintptr_t)pv - (uintptr_t)pChunk);
|
---|
871 | }
|
---|
872 |
|
---|
873 | AssertMsgFailed(("Use after free or corrupt pointer variable: pv=%p pBlock=%p: u32Signature=%#x cb=%#x fAllocated=%d\n",
|
---|
874 | pv, pBlock, pBlock->u32Signature, pBlock->cbUser, pBlock->fAllocated));
|
---|
875 | }
|
---|
876 | else
|
---|
877 | AssertMsgFailed(("Unexpected NULL pointer\n"));
|
---|
878 | return 0;
|
---|
879 | }
|
---|
880 |
|
---|
881 |
|
---|
882 | DECLR0VBGL(void) VbglR0PhysHeapFree(void *pv)
|
---|
883 | {
|
---|
884 | if (pv != NULL)
|
---|
885 | {
|
---|
886 | VBGLPHYSHEAPFREEBLOCK *pBlock;
|
---|
887 |
|
---|
888 | int rc = RTSemFastMutexRequest(g_vbgldata.hMtxHeap);
|
---|
889 | AssertRCReturnVoid(rc);
|
---|
890 |
|
---|
891 | dumpheap("pre free");
|
---|
892 |
|
---|
893 | /*
|
---|
894 | * Validate the block header.
|
---|
895 | */
|
---|
896 | pBlock = (VBGLPHYSHEAPFREEBLOCK *)((VBGLPHYSHEAPBLOCK *)pv - 1);
|
---|
897 | if ( pBlock->Core.u32Signature == VBGL_PH_BLOCKSIGNATURE
|
---|
898 | && pBlock->Core.fAllocated
|
---|
899 | && pBlock->Core.cbUser >= VBGL_PH_SMALLEST_ALLOC_SIZE)
|
---|
900 | {
|
---|
901 | VBGLPHYSHEAPCHUNK *pChunk;
|
---|
902 | VBGLPHYSHEAPBLOCK *pNeighbour;
|
---|
903 |
|
---|
904 | /*
|
---|
905 | * Change the block status to freeed.
|
---|
906 | */
|
---|
907 | VBGL_PH_DPRINTF(("VbglR0PhysHeapFree: %p size %#x\n", pv, pBlock->Core.cbUser));
|
---|
908 |
|
---|
909 | pBlock->Core.fAllocated = false;
|
---|
910 | pBlock->pNextFree = pBlock->pPrevFree = NULL;
|
---|
911 | vbglPhysHeapInsertFreeBlock(pBlock);
|
---|
912 |
|
---|
913 | dumpheap("post insert");
|
---|
914 |
|
---|
915 | /*
|
---|
916 | * Check if the block after this one is also free and we can merge it into this one.
|
---|
917 | */
|
---|
918 | pChunk = pBlock->Core.pChunk;
|
---|
919 |
|
---|
920 | pNeighbour = pBlock->Core.pNext;
|
---|
921 | if ( pNeighbour
|
---|
922 | && !pNeighbour->fAllocated
|
---|
923 | && pNeighbour->pChunk == pChunk)
|
---|
924 | {
|
---|
925 | Assert((uintptr_t)pBlock + sizeof(pBlock->Core) + pBlock->Core.cbUser == (uintptr_t)pNeighbour);
|
---|
926 |
|
---|
927 | /* Adjust size of current memory block */
|
---|
928 | pBlock->Core.cbUser += pNeighbour->cbUser + sizeof(VBGLPHYSHEAPBLOCK);
|
---|
929 |
|
---|
930 | /* Unlink the following node and invalid it. */
|
---|
931 | vbglPhysHeapUnlinkFreeBlock((VBGLPHYSHEAPFREEBLOCK *)pNeighbour);
|
---|
932 | vbglPhysHeapUnlinkBlock(pNeighbour);
|
---|
933 |
|
---|
934 | pNeighbour->u32Signature = ~VBGL_PH_BLOCKSIGNATURE;
|
---|
935 | pNeighbour->cbUser = UINT32_MAX / 4;
|
---|
936 |
|
---|
937 | dumpheap("post merge after");
|
---|
938 | }
|
---|
939 |
|
---|
940 | /*
|
---|
941 | * Same check for the block before us. This invalidates pBlock.
|
---|
942 | */
|
---|
943 | pNeighbour = pBlock->Core.pPrev;
|
---|
944 | if ( pNeighbour
|
---|
945 | && !pNeighbour->fAllocated
|
---|
946 | && pNeighbour->pChunk == pChunk)
|
---|
947 | {
|
---|
948 | Assert((uintptr_t)pNeighbour + sizeof(*pNeighbour) + pNeighbour->cbUser == (uintptr_t)pBlock);
|
---|
949 |
|
---|
950 | /* Adjust size of the block before us */
|
---|
951 | pNeighbour->cbUser += pBlock->Core.cbUser + sizeof(VBGLPHYSHEAPBLOCK);
|
---|
952 |
|
---|
953 | /* Unlink this node and invalid it. */
|
---|
954 | vbglPhysHeapUnlinkFreeBlock(pBlock);
|
---|
955 | vbglPhysHeapUnlinkBlock(&pBlock->Core);
|
---|
956 |
|
---|
957 | pBlock->Core.u32Signature = ~VBGL_PH_BLOCKSIGNATURE;
|
---|
958 | pBlock->Core.cbUser = UINT32_MAX / 8;
|
---|
959 |
|
---|
960 | pBlock = NULL; /* invalid */
|
---|
961 |
|
---|
962 | dumpheap("post merge before");
|
---|
963 | }
|
---|
964 |
|
---|
965 | /*
|
---|
966 | * If this chunk is now completely unused, delete it if there are
|
---|
967 | * more completely free ones.
|
---|
968 | */
|
---|
969 | if ( pChunk->cFreeBlocks == pChunk->cBlocks
|
---|
970 | && (pChunk->pPrev || pChunk->pNext))
|
---|
971 | {
|
---|
972 | VBGLPHYSHEAPCHUNK *pCurChunk;
|
---|
973 | uint32_t cUnusedChunks = 0;
|
---|
974 | for (pCurChunk = g_vbgldata.pChunkHead; pCurChunk; pCurChunk = pCurChunk->pNext)
|
---|
975 | {
|
---|
976 | AssertBreak(pCurChunk->u32Signature == VBGL_PH_CHUNKSIGNATURE);
|
---|
977 | if (pCurChunk->cFreeBlocks == pCurChunk->cBlocks)
|
---|
978 | {
|
---|
979 | cUnusedChunks++;
|
---|
980 | if (cUnusedChunks > 1)
|
---|
981 | {
|
---|
982 | /* Delete current chunk, it will also unlink all free blocks
|
---|
983 | * remaining in the chunk from the free list, so the pBlock
|
---|
984 | * will also be invalid after this.
|
---|
985 | */
|
---|
986 | vbglPhysHeapChunkDelete(pChunk);
|
---|
987 | pBlock = NULL; /* invalid */
|
---|
988 | pChunk = NULL;
|
---|
989 | pNeighbour = NULL;
|
---|
990 | break;
|
---|
991 | }
|
---|
992 | }
|
---|
993 | }
|
---|
994 | }
|
---|
995 |
|
---|
996 | dumpheap("post free");
|
---|
997 | }
|
---|
998 | else
|
---|
999 | AssertMsgFailed(("pBlock: %p: u32Signature=%#x cb=%#x fAllocated=%d - double free?\n",
|
---|
1000 | pBlock, pBlock->Core.u32Signature, pBlock->Core.cbUser, pBlock->Core.fAllocated));
|
---|
1001 |
|
---|
1002 | rc = RTSemFastMutexRelease(g_vbgldata.hMtxHeap);
|
---|
1003 | AssertRC(rc);
|
---|
1004 | }
|
---|
1005 | }
|
---|
1006 |
|
---|
1007 | #ifdef IN_TESTCASE /* For the testcase only */
|
---|
1008 |
|
---|
1009 | /**
|
---|
1010 | * Returns the sum of all free heap blocks.
|
---|
1011 | *
|
---|
1012 | * This is the amount of memory you can theoretically allocate if you do
|
---|
1013 | * allocations exactly matching the free blocks.
|
---|
1014 | *
|
---|
1015 | * @returns The size of the free blocks.
|
---|
1016 | * @returns 0 if heap was safely detected as being bad.
|
---|
1017 | */
|
---|
1018 | DECLVBGL(size_t) VbglR0PhysHeapGetFreeSize(void)
|
---|
1019 | {
|
---|
1020 | int rc = RTSemFastMutexRequest(g_vbgldata.hMtxHeap);
|
---|
1021 | AssertRCReturn(rc, 0);
|
---|
1022 |
|
---|
1023 | size_t cbTotal = 0;
|
---|
1024 | for (VBGLPHYSHEAPFREEBLOCK *pCurBlock = g_vbgldata.pFreeHead; pCurBlock; pCurBlock = pCurBlock->pNextFree)
|
---|
1025 | {
|
---|
1026 | Assert(pCurBlock->Core.u32Signature == VBGL_PH_BLOCKSIGNATURE);
|
---|
1027 | Assert(!pCurBlock->Core.fAllocated);
|
---|
1028 | cbTotal += pCurBlock->Core.cbUser;
|
---|
1029 | }
|
---|
1030 |
|
---|
1031 | RTSemFastMutexRelease(g_vbgldata.hMtxHeap);
|
---|
1032 | return cbTotal;
|
---|
1033 | }
|
---|
1034 |
|
---|
1035 |
|
---|
1036 | /**
|
---|
1037 | * Checks the heap, caller responsible for locking.
|
---|
1038 | *
|
---|
1039 | * @returns VINF_SUCCESS if okay, error status if not.
|
---|
1040 | * @param pErrInfo Where to return more error details, optional.
|
---|
1041 | */
|
---|
1042 | static int vbglR0PhysHeapCheckLocked(PRTERRINFO pErrInfo)
|
---|
1043 | {
|
---|
1044 | /*
|
---|
1045 | * Scan the blocks in each chunk, walking the block list in parallel.
|
---|
1046 | */
|
---|
1047 | const VBGLPHYSHEAPBLOCK *pPrevBlockListEntry = NULL;
|
---|
1048 | const VBGLPHYSHEAPBLOCK *pCurBlockListEntry = g_vbgldata.pBlockHead;
|
---|
1049 | unsigned acTotalBlocks[2] = { 0, 0 };
|
---|
1050 | for (VBGLPHYSHEAPCHUNK *pCurChunk = g_vbgldata.pChunkHead, *pPrevChunk = NULL; pCurChunk; pCurChunk = pCurChunk->pNext)
|
---|
1051 | {
|
---|
1052 | AssertReturn(pCurChunk->u32Signature == VBGL_PH_CHUNKSIGNATURE,
|
---|
1053 | RTErrInfoSetF(pErrInfo, VERR_INVALID_MAGIC, "pCurChunk=%p: magic=%#x", pCurChunk, pCurChunk->u32Signature));
|
---|
1054 | AssertReturn(pCurChunk->pPrev == pPrevChunk,
|
---|
1055 | RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR_2,
|
---|
1056 | "pCurChunk=%p: pPrev=%p, expected %p", pCurChunk, pCurChunk->pPrev, pPrevChunk));
|
---|
1057 |
|
---|
1058 | const VBGLPHYSHEAPBLOCK *pCurBlock = (const VBGLPHYSHEAPBLOCK *)(pCurChunk + 1);
|
---|
1059 | uintptr_t const uEnd = (uintptr_t)pCurChunk + pCurChunk->cbChunk;
|
---|
1060 | unsigned acBlocks[2] = { 0, 0 };
|
---|
1061 | while ((uintptr_t)pCurBlock < uEnd)
|
---|
1062 | {
|
---|
1063 | AssertReturn(pCurBlock->u32Signature == VBGL_PH_BLOCKSIGNATURE,
|
---|
1064 | RTErrInfoSetF(pErrInfo, VERR_INVALID_MAGIC,
|
---|
1065 | "pCurBlock=%p: magic=%#x", pCurBlock, pCurBlock->u32Signature));
|
---|
1066 | AssertReturn(pCurBlock->pChunk == pCurChunk,
|
---|
1067 | RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR_2,
|
---|
1068 | "pCurBlock=%p: pChunk=%p, expected %p", pCurBlock, pCurBlock->pChunk, pCurChunk));
|
---|
1069 | AssertReturn( pCurBlock->cbUser >= VBGL_PH_SMALLEST_ALLOC_SIZE
|
---|
1070 | && pCurBlock->cbUser <= VBGL_PH_LARGEST_ALLOC_SIZE
|
---|
1071 | && RT_ALIGN_32(pCurBlock->cbUser, VBGL_PH_ALLOC_ALIGN) == pCurBlock->cbUser,
|
---|
1072 | RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR_3,
|
---|
1073 | "pCurBlock=%p: cbUser=%#x", pCurBlock, pCurBlock->cbUser));
|
---|
1074 | AssertReturn(pCurBlock == pCurBlockListEntry,
|
---|
1075 | RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR_4,
|
---|
1076 | "pCurChunk=%p: pCurBlock=%p, pCurBlockListEntry=%p\n",
|
---|
1077 | pCurChunk, pCurBlock, pCurBlockListEntry));
|
---|
1078 | AssertReturn(pCurBlock->pPrev == pPrevBlockListEntry,
|
---|
1079 | RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR_5,
|
---|
1080 | "pCurChunk=%p: pCurBlock->pPrev=%p, pPrevBlockListEntry=%p\n",
|
---|
1081 | pCurChunk, pCurBlock->pPrev, pPrevBlockListEntry));
|
---|
1082 |
|
---|
1083 | acBlocks[pCurBlock->fAllocated] += 1;
|
---|
1084 |
|
---|
1085 | /* advance */
|
---|
1086 | pPrevBlockListEntry = pCurBlock;
|
---|
1087 | pCurBlockListEntry = pCurBlock->pNext;
|
---|
1088 | pCurBlock = (const VBGLPHYSHEAPBLOCK *)((uintptr_t)(pCurBlock + 1) + pCurBlock->cbUser);
|
---|
1089 | }
|
---|
1090 | AssertReturn((uintptr_t)pCurBlock == uEnd,
|
---|
1091 | RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR_4,
|
---|
1092 | "pCurBlock=%p uEnd=%p", pCurBlock, uEnd));
|
---|
1093 |
|
---|
1094 | acTotalBlocks[1] += acBlocks[1];
|
---|
1095 | AssertReturn(acBlocks[0] + acBlocks[1] == (uint32_t)pCurChunk->cBlocks,
|
---|
1096 | RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR_4,
|
---|
1097 | "pCurChunk=%p: cBlocks=%u, expected %u",
|
---|
1098 | pCurChunk, pCurChunk->cBlocks, acBlocks[0] + acBlocks[1]));
|
---|
1099 |
|
---|
1100 | acTotalBlocks[0] += acBlocks[0];
|
---|
1101 | AssertReturn(acBlocks[0] == (uint32_t)pCurChunk->cFreeBlocks,
|
---|
1102 | RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR_5,
|
---|
1103 | "pCurChunk=%p: cFreeBlocks=%u, expected %u",
|
---|
1104 | pCurChunk, pCurChunk->cFreeBlocks, acBlocks[0]));
|
---|
1105 |
|
---|
1106 | pPrevChunk = pCurChunk;
|
---|
1107 | }
|
---|
1108 |
|
---|
1109 | AssertReturn(acTotalBlocks[0] == (uint32_t)g_vbgldata.cFreeBlocks,
|
---|
1110 | RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR,
|
---|
1111 | "g_vbgldata.cFreeBlocks=%u, expected %u", g_vbgldata.cFreeBlocks, acTotalBlocks[0]));
|
---|
1112 | AssertReturn(acTotalBlocks[0] + acTotalBlocks[1] == (uint32_t)g_vbgldata.cBlocks,
|
---|
1113 | RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR,
|
---|
1114 | "g_vbgldata.cBlocks=%u, expected %u", g_vbgldata.cBlocks, acTotalBlocks[0] + acTotalBlocks[1]));
|
---|
1115 |
|
---|
1116 | /*
|
---|
1117 | * Check that the free list contains the same number of blocks as we
|
---|
1118 | * encountered during the above scan.
|
---|
1119 | */
|
---|
1120 | {
|
---|
1121 | unsigned cFreeListBlocks = 0;
|
---|
1122 | for (const VBGLPHYSHEAPFREEBLOCK *pCurBlock = g_vbgldata.pFreeHead, *pPrevBlock = NULL;
|
---|
1123 | pCurBlock;
|
---|
1124 | pCurBlock = pCurBlock->pNextFree)
|
---|
1125 | {
|
---|
1126 | AssertReturn(pCurBlock->Core.u32Signature == VBGL_PH_BLOCKSIGNATURE,
|
---|
1127 | RTErrInfoSetF(pErrInfo, VERR_INVALID_MAGIC,
|
---|
1128 | "pCurBlock=%p/free: magic=%#x", pCurBlock, pCurBlock->Core.u32Signature));
|
---|
1129 | AssertReturn(pCurBlock->pPrevFree == pPrevBlock,
|
---|
1130 | RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR_2,
|
---|
1131 | "pCurBlock=%p/free: pPrev=%p, expected %p", pCurBlock, pCurBlock->pPrevFree, pPrevBlock));
|
---|
1132 | AssertReturn(pCurBlock->Core.pChunk->u32Signature == VBGL_PH_CHUNKSIGNATURE,
|
---|
1133 | RTErrInfoSetF(pErrInfo, VERR_INVALID_MAGIC, "pCurBlock=%p/free: chunk (%p) magic=%#x",
|
---|
1134 | pCurBlock, pCurBlock->Core.pChunk, pCurBlock->Core.pChunk->u32Signature));
|
---|
1135 | cFreeListBlocks++;
|
---|
1136 | pPrevBlock = pCurBlock;
|
---|
1137 | }
|
---|
1138 |
|
---|
1139 | AssertReturn(cFreeListBlocks == acTotalBlocks[0],
|
---|
1140 | RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR_3,
|
---|
1141 | "Found %u in free list, expected %u", cFreeListBlocks, acTotalBlocks[0]));
|
---|
1142 | }
|
---|
1143 | return VINF_SUCCESS;
|
---|
1144 | }
|
---|
1145 |
|
---|
1146 |
|
---|
1147 | /**
|
---|
1148 | * Performs a heap check.
|
---|
1149 | *
|
---|
1150 | * @returns Problem description on failure, NULL on success.
|
---|
1151 | * @param pErrInfo Where to return more error details, optional.
|
---|
1152 | */
|
---|
1153 | DECLVBGL(int) VbglR0PhysHeapCheck(PRTERRINFO pErrInfo)
|
---|
1154 | {
|
---|
1155 | int rc = RTSemFastMutexRequest(g_vbgldata.hMtxHeap);
|
---|
1156 | AssertRCReturn(rc, 0);
|
---|
1157 |
|
---|
1158 | rc = vbglR0PhysHeapCheckLocked(pErrInfo);
|
---|
1159 |
|
---|
1160 | RTSemFastMutexRelease(g_vbgldata.hMtxHeap);
|
---|
1161 | return rc;
|
---|
1162 | }
|
---|
1163 |
|
---|
1164 | #endif /* IN_TESTCASE */
|
---|
1165 |
|
---|
1166 | DECLR0VBGL(int) VbglR0PhysHeapInit(RTHCPHYS HCPhysMax)
|
---|
1167 | {
|
---|
1168 | g_vbgldata.HCPhysMax = HCPhysMax;
|
---|
1169 | g_vbgldata.hMtxHeap = NIL_RTSEMFASTMUTEX;
|
---|
1170 |
|
---|
1171 | /* Allocate the first chunk of the heap. */
|
---|
1172 | VBGLPHYSHEAPFREEBLOCK *pBlock = vbglPhysHeapChunkAlloc(0);
|
---|
1173 | if (pBlock)
|
---|
1174 | return RTSemFastMutexCreate(&g_vbgldata.hMtxHeap);
|
---|
1175 | return VERR_NO_CONT_MEMORY;
|
---|
1176 | }
|
---|
1177 |
|
---|
1178 | DECLR0VBGL(void) VbglR0PhysHeapTerminate(void)
|
---|
1179 | {
|
---|
1180 | while (g_vbgldata.pChunkHead)
|
---|
1181 | vbglPhysHeapChunkDelete(g_vbgldata.pChunkHead);
|
---|
1182 |
|
---|
1183 | RTSemFastMutexDestroy(g_vbgldata.hMtxHeap);
|
---|
1184 | g_vbgldata.hMtxHeap = NIL_RTSEMFASTMUTEX;
|
---|
1185 | }
|
---|
1186 |
|
---|