VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/MMHyper.cpp@ 91999

Last change on this file since 91999 was 91856, checked in by vboxsync, 3 years ago

VMM/MM: Build fix. bugref:10122

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 30.7 KB
Line 
1/* $Id: MMHyper.cpp 91856 2021-10-20 01:02:23Z vboxsync $ */
2/** @file
3 * MM - Memory Manager - Hypervisor Memory Area.
4 */
5
6/*
7 * Copyright (C) 2006-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_MM_HYPER
23#include <VBox/vmm/pgm.h>
24#include <VBox/vmm/mm.h>
25#include <VBox/vmm/hm.h>
26#include <VBox/vmm/dbgf.h>
27#include "MMInternal.h"
28#include <VBox/vmm/vm.h>
29#include <VBox/vmm/gvm.h>
30#include <VBox/err.h>
31#include <VBox/param.h>
32#include <VBox/log.h>
33#include <iprt/alloc.h>
34#include <iprt/assert.h>
35#include <iprt/string.h>
36
37
38/*********************************************************************************************************************************
39* Internal Functions *
40*********************************************************************************************************************************/
41static int mmR3HyperMap(PVM pVM, const size_t cb, const char *pszDesc, PRTGCPTR pGCPtr, PMMLOOKUPHYPER *ppLookup);
42static int mmR3HyperHeapCreate(PVM pVM, const size_t cb, PMMHYPERHEAP *ppHeap, PRTR0PTR pR0PtrHeap);
43static int mmR3HyperHeapMap(PVM pVM, PMMHYPERHEAP pHeap, PRTGCPTR ppHeapGC);
44static DECLCALLBACK(void) mmR3HyperInfoHma(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
45static int MMR3HyperReserveFence(PVM pVM);
46static int MMR3HyperMapPages(PVM pVM, void *pvR3, RTR0PTR pvR0, size_t cPages, PCSUPPAGE paPages,
47 const char *pszDesc, PRTGCPTR pGCPtr);
48
49
50/**
51 * Determin the default heap size.
52 *
53 * @returns The heap size in bytes.
54 * @param pVM The cross context VM structure.
55 */
56static uint32_t mmR3HyperComputeHeapSize(PVM pVM)
57{
58 /** @todo Redo after moving allocations off the hyper heap. */
59
60 /*
61 * Gather parameters.
62 */
63 bool fCanUseLargerHeap = true;
64 //bool fCanUseLargerHeap;
65 //int rc = CFGMR3QueryBoolDef(CFGMR3GetChild(CFGMR3GetRoot(pVM), "MM"), "CanUseLargerHeap", &fCanUseLargerHeap, false);
66 //AssertStmt(RT_SUCCESS(rc), fCanUseLargerHeap = false);
67
68 uint64_t cbRam;
69 int rc = CFGMR3QueryU64(CFGMR3GetRoot(pVM), "RamSize", &cbRam);
70 AssertStmt(RT_SUCCESS(rc), cbRam = _1G);
71
72 /*
73 * We need to keep saved state compatibility if raw-mode is an option,
74 * so lets filter out that case first.
75 */
76 if ( !fCanUseLargerHeap
77 && VM_IS_RAW_MODE_ENABLED(pVM)
78 && cbRam < 16*_1G64)
79 return 1280 * _1K;
80
81 /*
82 * Calculate the heap size.
83 */
84 uint32_t cbHeap = _1M;
85
86 /* The newer chipset may have more devices attached, putting additional
87 pressure on the heap. */
88 if (fCanUseLargerHeap)
89 cbHeap += _1M;
90
91 /* More CPUs means some extra memory usage. */
92 if (pVM->cCpus > 1)
93 cbHeap += pVM->cCpus * _64K;
94
95 /* Lots of memory means extra memory consumption as well (pool). */
96 if (cbRam > 16*_1G64)
97 cbHeap += _2M; /** @todo figure out extactly how much */
98
99 return RT_ALIGN(cbHeap, _256K);
100}
101
102
103/**
104 * Initializes the hypervisor related MM stuff without
105 * calling down to PGM.
106 *
107 * PGM is not initialized at this point, PGM relies on
108 * the heap to initialize.
109 *
110 * @returns VBox status code.
111 */
112int mmR3HyperInit(PVM pVM)
113{
114 LogFlow(("mmR3HyperInit:\n"));
115
116 /*
117 * Decide Hypervisor mapping in the guest context
118 * And setup various hypervisor area and heap parameters.
119 */
120 pVM->mm.s.pvHyperAreaGC = (RTGCPTR)MM_HYPER_AREA_ADDRESS;
121 pVM->mm.s.cbHyperArea = MM_HYPER_AREA_MAX_SIZE;
122 AssertRelease(RT_ALIGN_T(pVM->mm.s.pvHyperAreaGC, 1 << X86_PD_SHIFT, RTGCPTR) == pVM->mm.s.pvHyperAreaGC);
123 Assert(pVM->mm.s.pvHyperAreaGC < 0xff000000);
124
125 /** @todo @bugref{1865}, @bugref{3202}: Change the cbHyperHeap default
126 * depending on whether VT-x/AMD-V is enabled or not! Don't waste
127 * precious kernel space on heap for the PATM.
128 */
129 PCFGMNODE pMM = CFGMR3GetChild(CFGMR3GetRoot(pVM), "MM");
130 uint32_t cbHyperHeap;
131 int rc = CFGMR3QueryU32Def(pMM, "cbHyperHeap", &cbHyperHeap, mmR3HyperComputeHeapSize(pVM));
132 AssertLogRelRCReturn(rc, rc);
133
134 cbHyperHeap = RT_ALIGN_32(cbHyperHeap, PAGE_SIZE);
135 LogRel(("MM: cbHyperHeap=%#x (%u)\n", cbHyperHeap, cbHyperHeap));
136
137 /*
138 * Allocate the hypervisor heap.
139 *
140 * (This must be done before we start adding memory to the
141 * hypervisor static area because lookup records are allocated from it.)
142 */
143 rc = mmR3HyperHeapCreate(pVM, cbHyperHeap, &pVM->mm.s.pHyperHeapR3, &pVM->mm.s.pHyperHeapR0);
144 if (RT_SUCCESS(rc))
145 {
146 /*
147 * Make a small head fence to fend of accidental sequential access.
148 */
149 MMR3HyperReserveFence(pVM);
150
151 /*
152 * Map the VM structure into the hypervisor space.
153 * Note! Keeping the mappings here for now in case someone is using
154 * MMHyperR3ToR0 or similar.
155 */
156 AssertCompileSizeAlignment(VM, PAGE_SIZE);
157 AssertCompileSizeAlignment(VMCPU, PAGE_SIZE);
158 AssertCompileSizeAlignment(GVM, PAGE_SIZE);
159 AssertCompileSizeAlignment(GVMCPU, PAGE_SIZE);
160 AssertRelease(pVM->cbSelf == sizeof(VM));
161 AssertRelease(pVM->cbVCpu == sizeof(VMCPU));
162/** @todo get rid of this */
163 RTGCPTR GCPtr;
164 rc = MMR3HyperMapPages(pVM, pVM, pVM->pVMR0ForCall, sizeof(VM) >> PAGE_SHIFT, pVM->paVMPagesR3, "VM", &GCPtr);
165 uint32_t offPages = RT_UOFFSETOF_DYN(GVM, aCpus) >> PAGE_SHIFT; /* (Using the _DYN variant avoids -Winvalid-offset) */
166 for (uint32_t idCpu = 0; idCpu < pVM->cCpus && RT_SUCCESS(rc); idCpu++, offPages += sizeof(GVMCPU) >> PAGE_SHIFT)
167 {
168 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
169 RTGCPTR GCPtrIgn;
170 rc = MMR3HyperMapPages(pVM, pVCpu, pVM->pVMR0ForCall + offPages * PAGE_SIZE,
171 sizeof(VMCPU) >> PAGE_SHIFT, &pVM->paVMPagesR3[offPages], "VMCPU", &GCPtrIgn);
172 }
173 if (RT_SUCCESS(rc))
174 {
175 pVM->pVMRC = (RTRCPTR)GCPtr;
176 for (VMCPUID i = 0; i < pVM->cCpus; i++)
177 pVM->apCpusR3[i]->pVMRC = pVM->pVMRC;
178
179 /* Reserve a page for fencing. */
180 MMR3HyperReserveFence(pVM);
181
182 /*
183 * Map the heap into the hypervisor space.
184 */
185 rc = mmR3HyperHeapMap(pVM, pVM->mm.s.pHyperHeapR3, &GCPtr);
186 if (RT_SUCCESS(rc))
187 {
188 pVM->mm.s.pHyperHeapRC = (RTRCPTR)GCPtr;
189 Assert(pVM->mm.s.pHyperHeapRC == GCPtr);
190
191 /*
192 * Register info handlers.
193 */
194 DBGFR3InfoRegisterInternal(pVM, "hma", "Show the layout of the Hypervisor Memory Area.", mmR3HyperInfoHma);
195
196 LogFlow(("mmR3HyperInit: returns VINF_SUCCESS\n"));
197 return VINF_SUCCESS;
198 }
199 /* Caller will do proper cleanup. */
200 }
201 }
202
203 LogFlow(("mmR3HyperInit: returns %Rrc\n", rc));
204 return rc;
205}
206
207
208/**
209 * Cleans up the hypervisor heap.
210 *
211 * @returns VBox status code.
212 */
213int mmR3HyperTerm(PVM pVM)
214{
215 if (pVM->mm.s.pHyperHeapR3)
216 PDMR3CritSectDelete(pVM, &pVM->mm.s.pHyperHeapR3->Lock);
217
218 return VINF_SUCCESS;
219}
220
221
222/**
223 * Finalizes the HMA mapping (obsolete).
224 *
225 * This is called later during init, most (all) HMA allocations should be done
226 * by the time this function is called.
227 *
228 * @returns VBox status code.
229 */
230VMMR3DECL(int) MMR3HyperInitFinalize(PVM pVM)
231{
232 LogFlow(("MMR3HyperInitFinalize:\n"));
233
234 /*
235 * Initialize the hyper heap critical section.
236 */
237 int rc = PDMR3CritSectInit(pVM, &pVM->mm.s.pHyperHeapR3->Lock, RT_SRC_POS, "MM-HYPER");
238 AssertRC(rc);
239
240 pVM->mm.s.fPGMInitialized = true;
241
242 LogFlow(("MMR3HyperInitFinalize: returns VINF_SUCCESS\n"));
243 return VINF_SUCCESS;
244}
245
246
247/**
248 * Maps locked R3 virtual memory into the hypervisor region in the GC.
249 *
250 * @return VBox status code.
251 *
252 * @param pVM The cross context VM structure.
253 * @param pvR3 The ring-3 address of the memory, must be page aligned.
254 * @param pvR0 The ring-0 address of the memory, must be page aligned. (optional)
255 * @param cPages The number of pages.
256 * @param paPages The page descriptors.
257 * @param pszDesc Mapping description.
258 * @param pGCPtr Where to store the GC address corresponding to pvR3.
259 */
260static int MMR3HyperMapPages(PVM pVM, void *pvR3, RTR0PTR pvR0, size_t cPages, PCSUPPAGE paPages,
261 const char *pszDesc, PRTGCPTR pGCPtr)
262{
263 LogFlow(("MMR3HyperMapPages: pvR3=%p pvR0=%p cPages=%zu paPages=%p pszDesc=%p:{%s} pGCPtr=%p\n",
264 pvR3, pvR0, cPages, paPages, pszDesc, pszDesc, pGCPtr));
265
266 /*
267 * Validate input.
268 */
269 AssertPtrReturn(pvR3, VERR_INVALID_POINTER);
270 AssertPtrReturn(paPages, VERR_INVALID_POINTER);
271 AssertReturn(cPages > 0, VERR_PAGE_COUNT_OUT_OF_RANGE);
272 AssertReturn(cPages <= VBOX_MAX_ALLOC_PAGE_COUNT, VERR_PAGE_COUNT_OUT_OF_RANGE);
273 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
274 AssertReturn(*pszDesc, VERR_INVALID_PARAMETER);
275 AssertPtrReturn(pGCPtr, VERR_INVALID_PARAMETER);
276
277 /*
278 * Add the memory to the hypervisor area.
279 */
280 RTGCPTR GCPtr;
281 PMMLOOKUPHYPER pLookup;
282 int rc = mmR3HyperMap(pVM, cPages << PAGE_SHIFT, pszDesc, &GCPtr, &pLookup);
283 if (RT_SUCCESS(rc))
284 {
285 /*
286 * Copy the physical page addresses and tell PGM about them.
287 */
288 PRTHCPHYS paHCPhysPages = (PRTHCPHYS)MMR3HeapAlloc(pVM, MM_TAG_MM, sizeof(RTHCPHYS) * cPages);
289 if (paHCPhysPages)
290 {
291 for (size_t i = 0; i < cPages; i++)
292 {
293 AssertReleaseMsgReturn( paPages[i].Phys != 0
294 && paPages[i].Phys != NIL_RTHCPHYS
295 && !(paPages[i].Phys & PAGE_OFFSET_MASK),
296 ("i=%#zx Phys=%RHp %s\n", i, paPages[i].Phys, pszDesc),
297 VERR_INTERNAL_ERROR);
298 paHCPhysPages[i] = paPages[i].Phys;
299 }
300
301 pLookup->enmType = MMLOOKUPHYPERTYPE_LOCKED;
302 pLookup->u.Locked.pvR3 = pvR3;
303 pLookup->u.Locked.pvR0 = pvR0;
304 pLookup->u.Locked.paHCPhysPages = paHCPhysPages;
305
306 /* done. */
307 *pGCPtr = GCPtr;
308 return rc;
309 }
310 /* Don't care about failure clean, we're screwed if this fails anyway. */
311 }
312
313 return rc;
314}
315
316
317/**
318 * Reserves an electric fence page.
319 *
320 * @returns VBox status code.
321 * @param pVM The cross context VM structure.
322 */
323static int MMR3HyperReserveFence(PVM pVM)
324{
325 RT_NOREF(pVM);
326 return VINF_SUCCESS;
327}
328
329
330/**
331 * Adds memory to the hypervisor memory arena.
332 *
333 * @return VBox status code.
334 * @param pVM The cross context VM structure.
335 * @param cb Size of the memory. Will be rounded up to nearest page.
336 * @param pszDesc The description of the memory.
337 * @param pGCPtr Where to store the GC address.
338 * @param ppLookup Where to store the pointer to the lookup record.
339 * @remark We assume the threading structure of VBox imposes natural
340 * serialization of most functions, this one included.
341 */
342static int mmR3HyperMap(PVM pVM, const size_t cb, const char *pszDesc, PRTGCPTR pGCPtr, PMMLOOKUPHYPER *ppLookup)
343{
344 /*
345 * Validate input.
346 */
347 const uint32_t cbAligned = RT_ALIGN_32(cb, PAGE_SIZE);
348 AssertReturn(cbAligned >= cb, VERR_INVALID_PARAMETER);
349 if (pVM->mm.s.offHyperNextStatic + cbAligned >= pVM->mm.s.cbHyperArea) /* don't use the last page, it's a fence. */
350 {
351 AssertMsgFailed(("Out of static mapping space in the HMA! offHyperAreaGC=%x cbAligned=%x cbHyperArea=%x\n",
352 pVM->mm.s.offHyperNextStatic, cbAligned, pVM->mm.s.cbHyperArea));
353 return VERR_NO_MEMORY;
354 }
355
356 /*
357 * Allocate lookup record.
358 */
359 PMMLOOKUPHYPER pLookup;
360 int rc = MMHyperAlloc(pVM, sizeof(*pLookup), 1, MM_TAG_MM, (void **)&pLookup);
361 if (RT_SUCCESS(rc))
362 {
363 /*
364 * Initialize it and insert it.
365 */
366 pLookup->offNext = pVM->mm.s.offLookupHyper;
367 pLookup->cb = cbAligned;
368 pLookup->off = pVM->mm.s.offHyperNextStatic;
369 pVM->mm.s.offLookupHyper = (uint8_t *)pLookup - (uint8_t *)pVM->mm.s.pHyperHeapR3;
370 if (pLookup->offNext != (int32_t)NIL_OFFSET)
371 pLookup->offNext -= pVM->mm.s.offLookupHyper;
372 pLookup->enmType = MMLOOKUPHYPERTYPE_INVALID;
373 memset(&pLookup->u, 0xff, sizeof(pLookup->u));
374 pLookup->pszDesc = pszDesc;
375
376 /* Mapping. */
377 *pGCPtr = pVM->mm.s.pvHyperAreaGC + pVM->mm.s.offHyperNextStatic;
378 pVM->mm.s.offHyperNextStatic += cbAligned;
379
380 /* Return pointer. */
381 *ppLookup = pLookup;
382 }
383
384 AssertRC(rc);
385 LogFlow(("mmR3HyperMap: returns %Rrc *pGCPtr=%RGv\n", rc, *pGCPtr));
386 return rc;
387}
388
389
390/**
391 * Allocates a new heap.
392 *
393 * @returns VBox status code.
394 * @param pVM The cross context VM structure.
395 * @param cb The size of the new heap.
396 * @param ppHeap Where to store the heap pointer on successful return.
397 * @param pR0PtrHeap Where to store the ring-0 address of the heap on
398 * success.
399 */
400static int mmR3HyperHeapCreate(PVM pVM, const size_t cb, PMMHYPERHEAP *ppHeap, PRTR0PTR pR0PtrHeap)
401{
402 /*
403 * Allocate the hypervisor heap.
404 */
405 const uint32_t cbAligned = RT_ALIGN_32(cb, PAGE_SIZE);
406 AssertReturn(cbAligned >= cb, VERR_INVALID_PARAMETER);
407 uint32_t const cPages = cbAligned >> PAGE_SHIFT;
408 PSUPPAGE paPages = (PSUPPAGE)MMR3HeapAlloc(pVM, MM_TAG_MM, cPages * sizeof(paPages[0]));
409 if (!paPages)
410 return VERR_NO_MEMORY;
411 void *pv;
412 RTR0PTR pvR0 = NIL_RTR0PTR;
413 int rc = SUPR3PageAllocEx(cPages,
414 0 /*fFlags*/,
415 &pv,
416 &pvR0,
417 paPages);
418 if (RT_SUCCESS(rc))
419 {
420 Assert(pvR0 != NIL_RTR0PTR && !(PAGE_OFFSET_MASK & pvR0));
421 memset(pv, 0, cbAligned);
422
423 /*
424 * Initialize the heap and first free chunk.
425 */
426 PMMHYPERHEAP pHeap = (PMMHYPERHEAP)pv;
427 pHeap->u32Magic = MMHYPERHEAP_MAGIC;
428 pHeap->pbHeapR3 = (uint8_t *)pHeap + MMYPERHEAP_HDR_SIZE;
429 pHeap->pbHeapR0 = pvR0 + MMYPERHEAP_HDR_SIZE;
430 //pHeap->pbHeapRC = 0; // set by mmR3HyperHeapMap()
431 pHeap->pVMR3 = pVM;
432 pHeap->pVMR0 = pVM->pVMR0ForCall;
433 pHeap->pVMRC = pVM->pVMRC;
434 pHeap->cbHeap = cbAligned - MMYPERHEAP_HDR_SIZE;
435 pHeap->cbFree = pHeap->cbHeap - sizeof(MMHYPERCHUNK);
436 //pHeap->offFreeHead = 0;
437 //pHeap->offFreeTail = 0;
438 pHeap->offPageAligned = pHeap->cbHeap;
439 //pHeap->HyperHeapStatTree = 0;
440 pHeap->paPages = paPages;
441
442 PMMHYPERCHUNKFREE pFree = (PMMHYPERCHUNKFREE)pHeap->pbHeapR3;
443 pFree->cb = pHeap->cbFree;
444 //pFree->core.offNext = 0;
445 MMHYPERCHUNK_SET_TYPE(&pFree->core, MMHYPERCHUNK_FLAGS_FREE);
446 pFree->core.offHeap = -(int32_t)MMYPERHEAP_HDR_SIZE;
447 //pFree->offNext = 0;
448 //pFree->offPrev = 0;
449
450 STAMR3Register(pVM, &pHeap->cbHeap, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, "/MM/HyperHeap/cbHeap", STAMUNIT_BYTES, "The heap size.");
451 STAMR3Register(pVM, &pHeap->cbFree, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, "/MM/HyperHeap/cbFree", STAMUNIT_BYTES, "The free space.");
452
453 *ppHeap = pHeap;
454 *pR0PtrHeap = pvR0;
455 return VINF_SUCCESS;
456 }
457 AssertMsgFailed(("SUPR3PageAllocEx(%d,,,,) -> %Rrc\n", cbAligned >> PAGE_SHIFT, rc));
458
459 *ppHeap = NULL;
460 return rc;
461}
462
463
464/**
465 * Allocates a new heap.
466 */
467static int mmR3HyperHeapMap(PVM pVM, PMMHYPERHEAP pHeap, PRTGCPTR ppHeapGC)
468{
469 Assert(RT_ALIGN_Z(pHeap->cbHeap + MMYPERHEAP_HDR_SIZE, PAGE_SIZE) == pHeap->cbHeap + MMYPERHEAP_HDR_SIZE);
470 Assert(pHeap->pbHeapR0);
471 Assert(pHeap->paPages);
472 int rc = MMR3HyperMapPages(pVM,
473 pHeap,
474 pHeap->pbHeapR0 - MMYPERHEAP_HDR_SIZE,
475 (pHeap->cbHeap + MMYPERHEAP_HDR_SIZE) >> PAGE_SHIFT,
476 pHeap->paPages,
477 "Heap", ppHeapGC);
478 if (RT_SUCCESS(rc))
479 {
480 pHeap->pVMRC = pVM->pVMRC;
481 pHeap->pbHeapRC = *ppHeapGC + MMYPERHEAP_HDR_SIZE;
482 /* Reserve a page for fencing. */
483 MMR3HyperReserveFence(pVM);
484
485 /* We won't need these any more. */
486 MMR3HeapFree(pHeap->paPages);
487 pHeap->paPages = NULL;
488 }
489 return rc;
490}
491
492
493/**
494 * Allocates memory in the Hypervisor (GC VMM) area which never will
495 * be freed and doesn't have any offset based relation to other heap blocks.
496 *
497 * The latter means that two blocks allocated by this API will not have the
498 * same relative position to each other in GC and HC. In short, never use
499 * this API for allocating nodes for an offset based AVL tree!
500 *
501 * The returned memory is of course zeroed.
502 *
503 * @returns VBox status code.
504 * @param pVM The cross context VM structure.
505 * @param cb Number of bytes to allocate.
506 * @param uAlignment Required memory alignment in bytes.
507 * Values are 0,8,16,32 and PAGE_SIZE.
508 * 0 -> default alignment, i.e. 8 bytes.
509 * @param enmTag The statistics tag.
510 * @param ppv Where to store the address to the allocated
511 * memory.
512 * @remark This is assumed not to be used at times when serialization is required.
513 */
514VMMR3DECL(int) MMR3HyperAllocOnceNoRel(PVM pVM, size_t cb, unsigned uAlignment, MMTAG enmTag, void **ppv)
515{
516 return MMR3HyperAllocOnceNoRelEx(pVM, cb, uAlignment, enmTag, 0/*fFlags*/, ppv);
517}
518
519
520/**
521 * Allocates memory in the Hypervisor (GC VMM) area which never will
522 * be freed and doesn't have any offset based relation to other heap blocks.
523 *
524 * The latter means that two blocks allocated by this API will not have the
525 * same relative position to each other in GC and HC. In short, never use
526 * this API for allocating nodes for an offset based AVL tree!
527 *
528 * The returned memory is of course zeroed.
529 *
530 * @returns VBox status code.
531 * @param pVM The cross context VM structure.
532 * @param cb Number of bytes to allocate.
533 * @param uAlignment Required memory alignment in bytes.
534 * Values are 0,8,16,32 and PAGE_SIZE.
535 * 0 -> default alignment, i.e. 8 bytes.
536 * @param enmTag The statistics tag.
537 * @param fFlags Flags, see MMHYPER_AONR_FLAGS_KERNEL_MAPPING.
538 * @param ppv Where to store the address to the allocated memory.
539 * @remark This is assumed not to be used at times when serialization is required.
540 */
541VMMR3DECL(int) MMR3HyperAllocOnceNoRelEx(PVM pVM, size_t cb, unsigned uAlignment, MMTAG enmTag, uint32_t fFlags, void **ppv)
542{
543 AssertMsg(cb >= 8, ("Hey! Do you really mean to allocate less than 8 bytes?! cb=%d\n", cb));
544 Assert(!(fFlags & ~(MMHYPER_AONR_FLAGS_KERNEL_MAPPING)));
545
546 /*
547 * Choose between allocating a new chunk of HMA memory
548 * and the heap. We will only do BIG allocations from HMA and
549 * only at creation time.
550 */
551 if ( ( cb < _64K
552 && ( uAlignment != PAGE_SIZE
553 || cb < 48*_1K)
554 && !(fFlags & MMHYPER_AONR_FLAGS_KERNEL_MAPPING)
555 )
556 || VMR3GetState(pVM) != VMSTATE_CREATING
557 )
558 {
559 Assert(!(fFlags & MMHYPER_AONR_FLAGS_KERNEL_MAPPING));
560 int rc = MMHyperAlloc(pVM, cb, uAlignment, enmTag, ppv);
561 if ( rc != VERR_MM_HYPER_NO_MEMORY
562 || cb <= 8*_1K)
563 {
564 Log2(("MMR3HyperAllocOnceNoRel: cb=%#zx uAlignment=%#x returns %Rrc and *ppv=%p\n",
565 cb, uAlignment, rc, *ppv));
566 return rc;
567 }
568 }
569
570 /*
571 * Validate alignment.
572 */
573 switch (uAlignment)
574 {
575 case 0:
576 case 8:
577 case 16:
578 case 32:
579 case PAGE_SIZE:
580 break;
581 default:
582 AssertMsgFailed(("Invalid alignment %u\n", uAlignment));
583 return VERR_INVALID_PARAMETER;
584 }
585
586 /*
587 * Allocate the pages and map them into HMA space.
588 */
589 uint32_t const cbAligned = RT_ALIGN_32(cb, PAGE_SIZE);
590 AssertReturn(cbAligned >= cb, VERR_INVALID_PARAMETER);
591 uint32_t const cPages = cbAligned >> PAGE_SHIFT;
592 PSUPPAGE paPages = (PSUPPAGE)RTMemTmpAlloc(cPages * sizeof(paPages[0]));
593 if (!paPages)
594 return VERR_NO_TMP_MEMORY;
595 void *pvPages;
596 RTR0PTR pvR0 = NIL_RTR0PTR;
597 int rc = SUPR3PageAllocEx(cPages,
598 0 /*fFlags*/,
599 &pvPages,
600 &pvR0,
601 paPages);
602 if (RT_SUCCESS(rc))
603 {
604 Assert(pvR0 != NIL_RTR0PTR);
605 memset(pvPages, 0, cbAligned);
606
607 RTGCPTR GCPtr;
608 rc = MMR3HyperMapPages(pVM,
609 pvPages,
610 pvR0,
611 cPages,
612 paPages,
613 MMR3HeapAPrintf(pVM, MM_TAG_MM, "alloc once (%s)", mmGetTagName(enmTag)),
614 &GCPtr);
615 /* not needed anymore */
616 RTMemTmpFree(paPages);
617 if (RT_SUCCESS(rc))
618 {
619 *ppv = pvPages;
620 Log2(("MMR3HyperAllocOnceNoRel: cbAligned=%#x uAlignment=%#x returns VINF_SUCCESS and *ppv=%p\n",
621 cbAligned, uAlignment, *ppv));
622 MMR3HyperReserveFence(pVM);
623 return rc;
624 }
625 AssertMsgFailed(("Failed to allocate %zd bytes! %Rrc\n", cbAligned, rc));
626 SUPR3PageFreeEx(pvPages, cPages);
627
628
629 /*
630 * HACK ALERT! Try allocate it off the heap so that we don't freak
631 * out during vga/vmmdev mmio2 allocation with certain ram sizes.
632 */
633 /** @todo make a proper fix for this so we will never end up in this kind of situation! */
634 Log(("MMR3HyperAllocOnceNoRel: MMR3HyperMapHCRam failed with rc=%Rrc, try MMHyperAlloc(,%#x,,) instead\n", rc, cb));
635 int rc2 = MMHyperAlloc(pVM, cb, uAlignment, enmTag, ppv);
636 if (RT_SUCCESS(rc2))
637 {
638 Log2(("MMR3HyperAllocOnceNoRel: cb=%#x uAlignment=%#x returns %Rrc and *ppv=%p\n",
639 cb, uAlignment, rc, *ppv));
640 return rc;
641 }
642 }
643 else
644 AssertMsgFailed(("Failed to allocate %zd bytes! %Rrc\n", cbAligned, rc));
645
646 if (rc == VERR_NO_MEMORY)
647 rc = VERR_MM_HYPER_NO_MEMORY;
648 LogRel(("MMR3HyperAllocOnceNoRel: cb=%#zx uAlignment=%#x returns %Rrc\n", cb, uAlignment, rc));
649 return rc;
650}
651
652
653/**
654 * Convert hypervisor HC virtual address to HC physical address.
655 *
656 * @returns HC physical address.
657 * @param pVM The cross context VM structure.
658 * @param pvR3 Host context virtual address.
659 */
660VMMR3DECL(RTHCPHYS) MMR3HyperHCVirt2HCPhys(PVM pVM, void *pvR3)
661{
662 PMMLOOKUPHYPER pLookup = (PMMLOOKUPHYPER)((uint8_t *)pVM->mm.s.pHyperHeapR3 + pVM->mm.s.offLookupHyper);
663 for (;;)
664 {
665 switch (pLookup->enmType)
666 {
667 case MMLOOKUPHYPERTYPE_LOCKED:
668 {
669 unsigned off = (uint8_t *)pvR3 - (uint8_t *)pLookup->u.Locked.pvR3;
670 if (off < pLookup->cb)
671 return pLookup->u.Locked.paHCPhysPages[off >> PAGE_SHIFT] | (off & PAGE_OFFSET_MASK);
672 break;
673 }
674
675 case MMLOOKUPHYPERTYPE_HCPHYS:
676 {
677 unsigned off = (uint8_t *)pvR3 - (uint8_t *)pLookup->u.HCPhys.pvR3;
678 if (off < pLookup->cb)
679 return pLookup->u.HCPhys.HCPhys + off;
680 break;
681 }
682
683 case MMLOOKUPHYPERTYPE_GCPHYS:
684 case MMLOOKUPHYPERTYPE_MMIO2:
685 case MMLOOKUPHYPERTYPE_DYNAMIC:
686 /* can (or don't want to) convert these kind of records. */
687 break;
688
689 default:
690 AssertMsgFailed(("enmType=%d\n", pLookup->enmType));
691 break;
692 }
693
694 /* next */
695 if ((unsigned)pLookup->offNext == NIL_OFFSET)
696 break;
697 pLookup = (PMMLOOKUPHYPER)((uint8_t *)pLookup + pLookup->offNext);
698 }
699
700 AssertMsgFailed(("pvR3=%p is not inside the hypervisor memory area!\n", pvR3));
701 return NIL_RTHCPHYS;
702}
703
704
705/**
706 * Info handler for 'hma', it dumps the list of lookup records for the hypervisor memory area.
707 *
708 * @param pVM The cross context VM structure.
709 * @param pHlp Callback functions for doing output.
710 * @param pszArgs Argument string. Optional and specific to the handler.
711 */
712static DECLCALLBACK(void) mmR3HyperInfoHma(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
713{
714 NOREF(pszArgs);
715
716 pHlp->pfnPrintf(pHlp, "Hypervisor Memory Area (HMA) Layout: Base %RGv, 0x%08x bytes\n",
717 pVM->mm.s.pvHyperAreaGC, pVM->mm.s.cbHyperArea);
718
719 PMMLOOKUPHYPER pLookup = (PMMLOOKUPHYPER)((uint8_t *)pVM->mm.s.pHyperHeapR3 + pVM->mm.s.offLookupHyper);
720 for (;;)
721 {
722 switch (pLookup->enmType)
723 {
724 case MMLOOKUPHYPERTYPE_LOCKED:
725 pHlp->pfnPrintf(pHlp, "%RGv-%RGv %RHv %RHv LOCKED %-*s %s\n",
726 pLookup->off + pVM->mm.s.pvHyperAreaGC,
727 pLookup->off + pVM->mm.s.pvHyperAreaGC + pLookup->cb,
728 pLookup->u.Locked.pvR3,
729 pLookup->u.Locked.pvR0,
730 sizeof(RTHCPTR) * 2, "",
731 pLookup->pszDesc);
732 break;
733
734 case MMLOOKUPHYPERTYPE_HCPHYS:
735 pHlp->pfnPrintf(pHlp, "%RGv-%RGv %RHv %RHv HCPHYS %RHp %s\n",
736 pLookup->off + pVM->mm.s.pvHyperAreaGC,
737 pLookup->off + pVM->mm.s.pvHyperAreaGC + pLookup->cb,
738 pLookup->u.HCPhys.pvR3,
739 pLookup->u.HCPhys.pvR0,
740 pLookup->u.HCPhys.HCPhys,
741 pLookup->pszDesc);
742 break;
743
744 case MMLOOKUPHYPERTYPE_GCPHYS:
745 pHlp->pfnPrintf(pHlp, "%RGv-%RGv %*s GCPHYS %RGp%*s %s\n",
746 pLookup->off + pVM->mm.s.pvHyperAreaGC,
747 pLookup->off + pVM->mm.s.pvHyperAreaGC + pLookup->cb,
748 sizeof(RTHCPTR) * 2 * 2 + 1, "",
749 pLookup->u.GCPhys.GCPhys, RT_ABS((int)(sizeof(RTHCPHYS) - sizeof(RTGCPHYS))) * 2, "",
750 pLookup->pszDesc);
751 break;
752
753 case MMLOOKUPHYPERTYPE_MMIO2:
754 pHlp->pfnPrintf(pHlp, "%RGv-%RGv %*s MMIO2 %RGp%*s %s\n",
755 pLookup->off + pVM->mm.s.pvHyperAreaGC,
756 pLookup->off + pVM->mm.s.pvHyperAreaGC + pLookup->cb,
757 sizeof(RTHCPTR) * 2 * 2 + 1, "",
758 pLookup->u.MMIO2.off, RT_ABS((int)(sizeof(RTHCPHYS) - sizeof(RTGCPHYS))) * 2, "",
759 pLookup->pszDesc);
760 break;
761
762 case MMLOOKUPHYPERTYPE_DYNAMIC:
763 pHlp->pfnPrintf(pHlp, "%RGv-%RGv %*s DYNAMIC %*s %s\n",
764 pLookup->off + pVM->mm.s.pvHyperAreaGC,
765 pLookup->off + pVM->mm.s.pvHyperAreaGC + pLookup->cb,
766 sizeof(RTHCPTR) * 2 * 2 + 1, "",
767 sizeof(RTHCPTR) * 2, "",
768 pLookup->pszDesc);
769 break;
770
771 default:
772 AssertMsgFailed(("enmType=%d\n", pLookup->enmType));
773 break;
774 }
775
776 /* next */
777 if ((unsigned)pLookup->offNext == NIL_OFFSET)
778 break;
779 pLookup = (PMMLOOKUPHYPER)((uint8_t *)pLookup + pLookup->offNext);
780 }
781}
782
783
784#if 0
785/**
786 * Re-allocates memory from the hyper heap.
787 *
788 * @returns VBox status code.
789 * @param pVM The cross context VM structure.
790 * @param pvOld The existing block of memory in the hyper heap to
791 * re-allocate (can be NULL).
792 * @param cbOld Size of the existing block.
793 * @param uAlignmentNew Required memory alignment in bytes. Values are
794 * 0,8,16,32 and PAGE_SIZE. 0 -> default alignment,
795 * i.e. 8 bytes.
796 * @param enmTagNew The statistics tag.
797 * @param cbNew The required size of the new block.
798 * @param ppv Where to store the address to the re-allocated
799 * block.
800 *
801 * @remarks This does not work like normal realloc() on failure, the memory
802 * pointed to by @a pvOld is lost if there isn't sufficient space on
803 * the hyper heap for the re-allocation to succeed.
804*/
805VMMR3DECL(int) MMR3HyperRealloc(PVM pVM, void *pvOld, size_t cbOld, unsigned uAlignmentNew, MMTAG enmTagNew, size_t cbNew,
806 void **ppv)
807{
808 if (!pvOld)
809 return MMHyperAlloc(pVM, cbNew, uAlignmentNew, enmTagNew, ppv);
810
811 if (!cbNew && pvOld)
812 return MMHyperFree(pVM, pvOld);
813
814 if (cbOld == cbNew)
815 return VINF_SUCCESS;
816
817 size_t cbData = RT_MIN(cbNew, cbOld);
818 void *pvTmp = RTMemTmpAlloc(cbData);
819 if (RT_UNLIKELY(!pvTmp))
820 {
821 MMHyperFree(pVM, pvOld);
822 return VERR_NO_TMP_MEMORY;
823 }
824 memcpy(pvTmp, pvOld, cbData);
825
826 int rc = MMHyperFree(pVM, pvOld);
827 if (RT_SUCCESS(rc))
828 {
829 rc = MMHyperAlloc(pVM, cbNew, uAlignmentNew, enmTagNew, ppv);
830 if (RT_SUCCESS(rc))
831 {
832 Assert(cbData <= cbNew);
833 memcpy(*ppv, pvTmp, cbData);
834 }
835 }
836 else
837 AssertMsgFailed(("Failed to free hyper heap block pvOld=%p cbOld=%u\n", pvOld, cbOld));
838
839 RTMemTmpFree(pvTmp);
840 return rc;
841}
842#endif
843
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette