VirtualBox

source: vbox/trunk/src/VBox/VMM/MM.cpp@ 6079

Last change on this file since 6079 was 5999, checked in by vboxsync, 17 years ago

The Giant CDDL Dual-License Header Change.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 18.8 KB
Line 
1/* $Id: MM.cpp 5999 2007-12-07 15:05:06Z vboxsync $ */
2/** @file
3 * MM - Memory Monitor(/Manager).
4 */
5
6/*
7 * Copyright (C) 2006-2007 innotek GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_mm MM - The Memory Monitor/Manager
20 *
21 * It seems like this is going to be the entity taking care of memory allocations
22 * and the locking of physical memory for a VM. MM will track these allocations and
23 * pinnings so pointer conversions, memory read and write, and correct clean up can
24 * be done.
25 *
26 * Memory types:
27 * - Hypervisor Memory Area (HMA).
28 * - Page tables.
29 * - Physical pages.
30 *
31 * The first two types are not accessible using the generic conversion functions
32 * for GC memory, there are special functions for these.
33 *
34 *
35 * A decent structure for this component need to be eveloped as we see usage. One
36 * or two rewrites is probabaly needed to get it right...
37 *
38 *
39 *
40 * @section Hypervisor Memory Area
41 *
42 * The hypervisor is give 4MB of space inside the guest, we assume that we can
43 * steal an page directory entry from the guest OS without cause trouble. In
44 * addition to these 4MB we'll be mapping memory for the graphics emulation,
45 * but that will be an independant mapping.
46 *
47 * The 4MBs are divided into two main parts:
48 * -# The static code and data
49 * -# The shortlived page mappings.
50 *
51 * The first part is used for the VM structure, the core code (VMMSwitch),
52 * GC modules, and the alloc-only-heap. The size will be determined at a
53 * later point but initially we'll say 2MB of locked memory, most of which
54 * is non contiguous physically.
55 *
56 * The second part is used for mapping pages to the hypervisor. We'll be using
57 * a simple round robin when doing these mappings. This means that no-one can
58 * assume that a mapping hangs around for very long, while the managing of the
59 * pages are very simple.
60 *
61 *
62 *
63 * @section Page Pool
64 *
65 * The MM manages a per VM page pool from which other components can allocate
66 * locked, page aligned and page granular memory objects. The pool provides
67 * facilities to convert back and forth between physical and virtual addresses
68 * (within the pool of course). Several specialized interfaces are provided
69 * for the most common alloctions and convertions to save the caller from
70 * bothersome casting and extra parameter passing.
71 *
72 *
73 */
74
75
76
77/*******************************************************************************
78* Header Files *
79*******************************************************************************/
80#define LOG_GROUP LOG_GROUP_MM
81#include <VBox/mm.h>
82#include <VBox/pgm.h>
83#include <VBox/cfgm.h>
84#include <VBox/ssm.h>
85#include "MMInternal.h"
86#include <VBox/vm.h>
87#include <VBox/err.h>
88#include <VBox/param.h>
89
90#include <VBox/log.h>
91#include <iprt/alloc.h>
92#include <iprt/assert.h>
93#include <iprt/string.h>
94
95
96/*******************************************************************************
97* Internal Functions *
98*******************************************************************************/
99static int mmR3Term(PVM pVM, bool fKeepTheHeap);
100static DECLCALLBACK(int) mmR3Save(PVM pVM, PSSMHANDLE pSSM);
101static DECLCALLBACK(int) mmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version);
102
103
104
105/**
106 * Initializes the MM.
107 *
108 * MM is managing the virtual address space (among other things) and
109 * setup the hypvervisor memory area mapping in the VM structure and
110 * the hypvervisor alloc-only-heap. Assuming the current init order
111 * and components the hypvervisor memory area looks like this:
112 * -# VM Structure.
113 * -# Hypervisor alloc only heap (also call Hypervisor memory region).
114 * -# Core code.
115 *
116 * MM determins the virtual address of the hypvervisor memory area by
117 * checking for location at previous run. If that property isn't available
118 * it will choose a default starting location, currently 0xe0000000.
119 *
120 * @returns VBox status code.
121 * @param pVM The VM to operate on.
122 */
123MMR3DECL(int) MMR3Init(PVM pVM)
124{
125 LogFlow(("MMR3Init\n"));
126
127 /*
128 * Assert alignment, sizes and order.
129 */
130 AssertRelease(!(RT_OFFSETOF(VM, mm.s) & 31));
131 AssertRelease(sizeof(pVM->mm.s) <= sizeof(pVM->mm.padding));
132 AssertMsg(pVM->mm.s.offVM == 0, ("Already initialized!\n"));
133
134 /*
135 * Init the structure.
136 */
137 pVM->mm.s.offVM = RT_OFFSETOF(VM, mm);
138 pVM->mm.s.offLookupHyper = NIL_OFFSET;
139
140 /*
141 * Init the heap (may already be initialized already if someone used it).
142 */
143 if (!pVM->mm.s.pHeap)
144 {
145 int rc = mmr3HeapCreate(pVM, &pVM->mm.s.pHeap);
146 if (!VBOX_SUCCESS(rc))
147 return rc;
148 }
149
150 /*
151 * Init the page pool.
152 */
153 int rc = mmr3PagePoolInit(pVM);
154 if (VBOX_SUCCESS(rc))
155 {
156 /*
157 * Init the hypervisor related stuff.
158 */
159 rc = mmr3HyperInit(pVM);
160 if (VBOX_SUCCESS(rc))
161 {
162 /*
163 * Register the saved state data unit.
164 */
165 rc = SSMR3RegisterInternal(pVM, "mm", 1, 1, sizeof(uint32_t) * 2,
166 NULL, mmR3Save, NULL,
167 NULL, mmR3Load, NULL);
168 if (VBOX_SUCCESS(rc))
169 return rc;
170
171 /* .... failure .... */
172 }
173 }
174 mmR3Term(pVM, true /* keep the heap */);
175 return rc;
176}
177
178
179/**
180 * Initializes the MM parts which depends on PGM being initialized.
181 *
182 * @returns VBox status code.
183 * @param pVM The VM to operate on.
184 * @remark No cleanup necessary since MMR3Term() will be called on failure.
185 */
186MMR3DECL(int) MMR3InitPaging(PVM pVM)
187{
188 LogFlow(("MMR3InitPaging:\n"));
189 bool fPreAlloc;
190 int rc = CFGMR3QueryBool(CFGMR3GetRoot(pVM), "RamPreAlloc", &fPreAlloc);
191 if (rc == VERR_CFGM_VALUE_NOT_FOUND)
192#ifdef VBOX_WITH_PREALLOC_RAM_BY_DEFAULT
193 fPreAlloc = true;
194#else
195 fPreAlloc = false;
196#endif
197 else
198 AssertMsgRCReturn(rc, ("Configuration error: Failed to query integer \"RamPreAlloc\", rc=%Vrc.\n", rc), rc);
199
200 uint64_t cbRam;
201 rc = CFGMR3QueryU64(CFGMR3GetRoot(pVM), "RamSize", &cbRam);
202 if (rc == VERR_CFGM_VALUE_NOT_FOUND)
203 cbRam = 0;
204 if (VBOX_SUCCESS(rc) || rc == VERR_CFGM_VALUE_NOT_FOUND)
205 {
206 if (cbRam < PAGE_SIZE)
207 {
208 Log(("MM: No RAM configured\n"));
209 return VINF_SUCCESS;
210 }
211#ifdef PGM_DYNAMIC_RAM_ALLOC
212 Log(("MM: %llu bytes of RAM%s\n", cbRam, fPreAlloc ? " (PreAlloc)" : ""));
213 pVM->mm.s.pvRamBaseHC = 0; /** @todo obsolete */
214 pVM->mm.s.cbRamBase = cbRam & PAGE_BASE_GC_MASK;
215 rc = MMR3PhysRegister(pVM, pVM->mm.s.pvRamBaseHC, 0, pVM->mm.s.cbRamBase, MM_RAM_FLAGS_DYNAMIC_ALLOC, "Main Memory");
216 if (VBOX_SUCCESS(rc))
217 {
218 /* Allocate the first chunk, as we'll map ROM ranges there. */
219 rc = PGM3PhysGrowRange(pVM, (RTGCPHYS)0);
220 if (VBOX_SUCCESS(rc))
221 {
222 /* Should we preallocate the entire guest RAM? */
223 if (fPreAlloc)
224 {
225 for (RTGCPHYS GCPhys = PGM_DYNAMIC_CHUNK_SIZE; GCPhys < cbRam; GCPhys += PGM_DYNAMIC_CHUNK_SIZE)
226 {
227 rc = PGM3PhysGrowRange(pVM, GCPhys);
228 if (VBOX_FAILURE(rc))
229 return rc;
230 }
231 }
232 return rc;
233 }
234 }
235#else
236 unsigned cPages = cbRam >> PAGE_SHIFT;
237 Log(("MM: %llu bytes of RAM (%d pages)\n", cbRam, cPages));
238 rc = SUPPageAlloc(cPages, &pVM->mm.s.pvRamBaseHC);
239 if (VBOX_SUCCESS(rc))
240 {
241 pVM->mm.s.cbRamBase = cPages << PAGE_SHIFT;
242 rc = MMR3PhysRegister(pVM, pVM->mm.s.pvRamBaseHC, 0, pVM->mm.s.cbRamBase, 0, "Main Memory");
243 if (VBOX_SUCCESS(rc))
244 return rc;
245 SUPPageFree(pVM->mm.s.pvRamBaseHC);
246 }
247 else
248 LogRel(("MMR3InitPage: Failed to allocate %u bytes of RAM! rc=%Vrc\n", cPages << PAGE_SHIFT));
249#endif
250 }
251 else
252 AssertMsgFailed(("Configuration error: Failed to query integer \"RamSize\", rc=%Vrc.\n", rc));
253
254 LogFlow(("MMR3InitPaging: returns %Vrc\n", rc));
255 return rc;
256}
257
258
259/**
260 * Terminates the MM.
261 *
262 * Termination means cleaning up and freeing all resources,
263 * the VM it self is at this point powered off or suspended.
264 *
265 * @returns VBox status code.
266 * @param pVM The VM to operate on.
267 */
268MMR3DECL(int) MMR3Term(PVM pVM)
269{
270 return mmR3Term(pVM, false /* free the heap */);
271}
272
273
274/**
275 * Worker for MMR3Term and MMR3Init.
276 *
277 * The tricky bit here is that we must not destroy the heap if we're
278 * called from MMR3Init, otherwise we'll get into trouble when
279 * CFGMR3Term is called later in the bailout process.
280 *
281 * @returns VBox status code.
282 * @param pVM The VM to operate on.
283 * @param fKeepTheHeap Whether or not to keep the heap.
284 */
285static int mmR3Term(PVM pVM, bool fKeepTheHeap)
286{
287 /*
288 * Destroy the page pool. (first as it used the hyper heap)
289 */
290 mmr3PagePoolTerm(pVM);
291
292 /*
293 * Release locked memory.
294 * (Associated record are released by the heap.)
295 */
296 PMMLOCKEDMEM pLockedMem = pVM->mm.s.pLockedMem;
297 while (pLockedMem)
298 {
299 int rc = SUPPageUnlock(pLockedMem->pv);
300 AssertMsgRC(rc, ("SUPPageUnlock(%p) -> rc=%d\n", pLockedMem->pv, rc));
301 switch (pLockedMem->eType)
302 {
303 case MM_LOCKED_TYPE_HYPER:
304 rc = SUPPageFree(pLockedMem->pv, pLockedMem->cb >> PAGE_SHIFT);
305 AssertMsgRC(rc, ("SUPPageFree(%p) -> rc=%d\n", pLockedMem->pv, rc));
306 break;
307 case MM_LOCKED_TYPE_HYPER_NOFREE:
308 case MM_LOCKED_TYPE_HYPER_PAGES:
309 case MM_LOCKED_TYPE_PHYS:
310 /* nothing to do. */
311 break;
312 }
313 /* next */
314 pLockedMem = pLockedMem->pNext;
315 }
316
317 /*
318 * Destroy the heap if requested.
319 */
320 if (!fKeepTheHeap)
321 {
322 mmr3HeapDestroy(pVM->mm.s.pHeap);
323 pVM->mm.s.pHeap = NULL;
324 }
325
326 /*
327 * Zero stuff to detect after termination use of the MM interface
328 */
329 pVM->mm.s.offLookupHyper = NIL_OFFSET;
330 pVM->mm.s.pLockedMem = NULL;
331 pVM->mm.s.pHyperHeapHC = NULL; /* freed above. */
332 pVM->mm.s.pHyperHeapGC = 0; /* freed above. */
333 pVM->mm.s.offVM = 0; /* init assertion on this */
334
335 return 0;
336}
337
338
339/**
340 * Reset notification.
341 *
342 * MM will reload shadow ROMs into RAM at this point and make
343 * the ROM writable.
344 *
345 * @param pVM The VM handle.
346 */
347MMR3DECL(void) MMR3Reset(PVM pVM)
348{
349 mmR3PhysRomReset(pVM);
350}
351
352
353/**
354 * Execute state save operation.
355 *
356 * @returns VBox status code.
357 * @param pVM VM Handle.
358 * @param pSSM SSM operation handle.
359 */
360static DECLCALLBACK(int) mmR3Save(PVM pVM, PSSMHANDLE pSSM)
361{
362 LogFlow(("mmR3Save:\n"));
363
364 /* (PGM saves the physical memory.) */
365 SSMR3PutUInt(pSSM, pVM->mm.s.cbRAMSize);
366 return SSMR3PutUInt(pSSM, pVM->mm.s.cbRamBase);
367}
368
369
370/**
371 * Execute state load operation.
372 *
373 * @returns VBox status code.
374 * @param pVM VM Handle.
375 * @param pSSM SSM operation handle.
376 * @param u32Version Data layout version.
377 */
378static DECLCALLBACK(int) mmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version)
379{
380 LogFlow(("mmR3Load:\n"));
381
382 /*
383 * Validate version.
384 */
385 if (u32Version != 1)
386 {
387 Log(("mmR3Load: Invalid version u32Version=%d!\n", u32Version));
388 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
389 }
390
391 /*
392 * Check the cbRAMSize and cbRamBase values.
393 */
394 RTUINT cb;
395 int rc = SSMR3GetUInt(pSSM, &cb);
396 if (VBOX_FAILURE(rc))
397 return rc;
398 if (cb != pVM->mm.s.cbRAMSize)
399 {
400 Log(("mmR3Load: Memory configuration has changed. cbRAMSize=%#x save %#x\n", pVM->mm.s.cbRAMSize, cb));
401 return VERR_SSM_LOAD_MEMORY_SIZE_MISMATCH;
402 }
403
404 rc = SSMR3GetUInt(pSSM, &cb);
405 if (VBOX_FAILURE(rc))
406 return rc;
407 if (cb != pVM->mm.s.cbRamBase)
408 {
409 Log(("mmR3Load: Memory configuration has changed. cbRamBase=%#x save %#x\n", pVM->mm.s.cbRamBase, cb));
410 return VERR_SSM_LOAD_MEMORY_SIZE_MISMATCH;
411 }
412
413 /* PGM restores the physical memory. */
414 return rc;
415}
416
417
418/**
419 * Locks physical memory which backs a virtual memory range (HC) adding
420 * the required records to the pLockedMem list.
421 *
422 * @returns VBox status code.
423 * @param pVM The VM handle.
424 * @param pv Pointer to memory range which shall be locked down.
425 * This pointer is page aligned.
426 * @param cb Size of memory range (in bytes). This size is page aligned.
427 * @param eType Memory type.
428 * @param ppLockedMem Where to store the pointer to the created locked memory record.
429 * This is optional, pass NULL if not used.
430 * @param fSilentFailure Don't raise an error when unsuccessful. Upper layer with deal with it.
431 */
432int mmr3LockMem(PVM pVM, void *pv, size_t cb, MMLOCKEDTYPE eType, PMMLOCKEDMEM *ppLockedMem, bool fSilentFailure)
433{
434 Assert(RT_ALIGN_P(pv, PAGE_SIZE) == pv);
435 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
436
437 if (ppLockedMem)
438 *ppLockedMem = NULL;
439
440 /*
441 * Allocate locked mem structure.
442 */
443 unsigned cPages = cb >> PAGE_SHIFT;
444 AssertReturn(cPages == (cb >> PAGE_SHIFT), VERR_OUT_OF_RANGE);
445 PMMLOCKEDMEM pLockedMem = (PMMLOCKEDMEM)MMR3HeapAlloc(pVM, MM_TAG_MM, RT_OFFSETOF(MMLOCKEDMEM, aPhysPages[cPages]));
446 if (!pLockedMem)
447 return VERR_NO_MEMORY;
448 pLockedMem->pv = pv;
449 pLockedMem->cb = cb;
450 pLockedMem->eType = eType;
451 memset(&pLockedMem->u, 0, sizeof(pLockedMem->u));
452
453 /*
454 * Lock the memory.
455 */
456 int rc = SUPPageLock(pv, cPages, &pLockedMem->aPhysPages[0]);
457 if (VBOX_SUCCESS(rc))
458 {
459 /*
460 * Setup the reserved field.
461 */
462 PSUPPAGE pPhysPage = &pLockedMem->aPhysPages[0];
463 for (unsigned c = cPages; c > 0; c--, pPhysPage++)
464 pPhysPage->uReserved = (RTHCUINTPTR)pLockedMem;
465
466 /*
467 * Insert into the list.
468 *
469 * ASSUME no protected needed here as only one thread in the system can possibly
470 * be doing this. No other threads will walk this list either we assume.
471 */
472 pLockedMem->pNext = pVM->mm.s.pLockedMem;
473 pVM->mm.s.pLockedMem = pLockedMem;
474 /* Set return value. */
475 if (ppLockedMem)
476 *ppLockedMem = pLockedMem;
477 }
478 else
479 {
480 AssertMsgFailed(("SUPPageLock failed with rc=%d\n", rc));
481 MMR3HeapFree(pLockedMem);
482 if (!fSilentFailure)
483 rc = VMSetError(pVM, rc, RT_SRC_POS, N_("Failed to lock %d bytes of host memory (out of memory)"), cb);
484 }
485
486 return rc;
487}
488
489
490/**
491 * Maps a part of or an entire locked memory region into the guest context.
492 *
493 * @returns VBox status.
494 * God knows what happens if we fail...
495 * @param pVM VM handle.
496 * @param pLockedMem Locked memory structure.
497 * @param Addr GC Address where to start the mapping.
498 * @param iPage Page number in the locked memory region.
499 * @param cPages Number of pages to map.
500 * @param fFlags See the fFlags argument of PGR3Map().
501 */
502int mmr3MapLocked(PVM pVM, PMMLOCKEDMEM pLockedMem, RTGCPTR Addr, unsigned iPage, size_t cPages, unsigned fFlags)
503{
504 /*
505 * Adjust ~0 argument
506 */
507 if (cPages == ~(size_t)0)
508 cPages = (pLockedMem->cb >> PAGE_SHIFT) - iPage;
509 Assert(cPages != ~0U);
510 /* no incorrect arguments are accepted */
511 Assert(RT_ALIGN_GCPT(Addr, PAGE_SIZE, RTGCPTR) == Addr);
512 AssertMsg(iPage < (pLockedMem->cb >> PAGE_SHIFT), ("never even think about giving me a bad iPage(=%d)\n", iPage));
513 AssertMsg(iPage + cPages <= (pLockedMem->cb >> PAGE_SHIFT), ("never even think about giving me a bad cPages(=%d)\n", cPages));
514
515 /*
516 * Map the the pages.
517 */
518 PSUPPAGE pPhysPage = &pLockedMem->aPhysPages[iPage];
519 while (cPages)
520 {
521 RTHCPHYS HCPhys = pPhysPage->Phys;
522 int rc = PGMMap(pVM, Addr, HCPhys, PAGE_SIZE, fFlags);
523 if (VBOX_FAILURE(rc))
524 {
525 /** @todo how the hell can we do a proper bailout here. */
526 return rc;
527 }
528
529 /* next */
530 cPages--;
531 iPage++;
532 pPhysPage++;
533 Addr += PAGE_SIZE;
534 }
535
536 return VINF_SUCCESS;
537}
538
539
540/**
541 * Convert HC Physical address to HC Virtual address.
542 *
543 * @returns VBox status.
544 * @param pVM VM handle.
545 * @param HCPhys The host context virtual address.
546 * @param ppv Where to store the resulting address.
547 * @thread The Emulation Thread.
548 */
549MMR3DECL(int) MMR3HCPhys2HCVirt(PVM pVM, RTHCPHYS HCPhys, void **ppv)
550{
551 /*
552 * Try page tables.
553 */
554 int rc = MMPagePhys2PageTry(pVM, HCPhys, ppv);
555 if (VBOX_SUCCESS(rc))
556 return rc;
557
558 /*
559 * Iterate the locked memory - very slow.
560 */
561 uint32_t off = HCPhys & PAGE_OFFSET_MASK;
562 HCPhys &= X86_PTE_PAE_PG_MASK;
563 for (PMMLOCKEDMEM pCur = pVM->mm.s.pLockedMem; pCur; pCur = pCur->pNext)
564 {
565 size_t iPage = pCur->cb >> PAGE_SHIFT;
566 while (iPage-- > 0)
567 if ((pCur->aPhysPages[iPage].Phys & X86_PTE_PAE_PG_MASK) == HCPhys)
568 {
569 *ppv = (char *)pCur->pv + (iPage << PAGE_SHIFT) + off;
570 return VINF_SUCCESS;
571 }
572 }
573 /* give up */
574 return VERR_INVALID_POINTER;
575}
576
577
578/**
579 * Read memory from GC virtual address using the current guest CR3.
580 *
581 * @returns VBox status.
582 * @param pVM VM handle.
583 * @param pvDst Destination address (HC of course).
584 * @param GCPtr GC virtual address.
585 * @param cb Number of bytes to read.
586 */
587MMR3DECL(int) MMR3ReadGCVirt(PVM pVM, void *pvDst, RTGCPTR GCPtr, size_t cb)
588{
589 if (GCPtr - pVM->mm.s.pvHyperAreaGC < pVM->mm.s.cbHyperArea)
590 return MMR3HyperReadGCVirt(pVM, pvDst, GCPtr, cb);
591 return PGMPhysReadGCPtr(pVM, pvDst, GCPtr, cb);
592}
593
594
595/**
596 * Write to memory at GC virtual address translated using the current guest CR3.
597 *
598 * @returns VBox status.
599 * @param pVM VM handle.
600 * @param GCPtrDst GC virtual address.
601 * @param pvSrc The source address (HC of course).
602 * @param cb Number of bytes to read.
603 */
604MMR3DECL(int) MMR3WriteGCVirt(PVM pVM, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
605{
606 if (GCPtrDst - pVM->mm.s.pvHyperAreaGC < pVM->mm.s.cbHyperArea)
607 return VERR_ACCESS_DENIED;
608 return PGMPhysWriteGCPtr(pVM, GCPtrDst, pvSrc, cb);
609}
610
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette