VirtualBox

source: vbox/trunk/src/VBox/VMM/MM.cpp@ 28

Last change on this file since 28 was 28, checked in by vboxsync, 18 years ago

Updates.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 17.9 KB
Line 
1/* $Id: MM.cpp 28 2007-01-15 16:48:27Z vboxsync $ */
2/** @file
3 * MM - Memory Monitor(/Manager).
4 */
5
6/*
7 * Copyright (C) 2006 InnoTek Systemberatung GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License as published by the Free Software Foundation,
13 * in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
14 * distribution. VirtualBox OSE is distributed in the hope that it will
15 * be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * If you received this file as part of a commercial VirtualBox
18 * distribution, then only the terms of your commercial VirtualBox
19 * license agreement apply instead of the previous paragraph.
20 */
21
22
23/** @page pg_mm MM - The Memory Monitor/Manager
24 *
25 * It seems like this is going to be the entity taking care of memory allocations
26 * and the locking of physical memory for a VM. MM will track these allocations and
27 * pinnings so pointer conversions, memory read and write, and correct clean up can
28 * be done.
29 *
30 * Memory types:
31 * - Hypervisor Memory Area (HMA).
32 * - Page tables.
33 * - Physical pages.
34 *
35 * The first two types are not accessible using the generic conversion functions
36 * for GC memory, there are special functions for these.
37 *
38 *
39 * A decent structure for this component need to be eveloped as we see usage. One
40 * or two rewrites is probabaly needed to get it right...
41 *
42 *
43 *
44 * @section Hypervisor Memory Area
45 *
46 * The hypervisor is give 4MB of space inside the guest, we assume that we can
47 * steal an page directory entry from the guest OS without cause trouble. In
48 * addition to these 4MB we'll be mapping memory for the graphics emulation,
49 * but that will be an independant mapping.
50 *
51 * The 4MBs are divided into two main parts:
52 * -# The static code and data
53 * -# The shortlived page mappings.
54 *
55 * The first part is used for the VM structure, the core code (VMMSwitch),
56 * GC modules, and the alloc-only-heap. The size will be determined at a
57 * later point but initially we'll say 2MB of locked memory, most of which
58 * is non contiguous physically.
59 *
60 * The second part is used for mapping pages to the hypervisor. We'll be using
61 * a simple round robin when doing these mappings. This means that no-one can
62 * assume that a mapping hangs around for very long, while the managing of the
63 * pages are very simple.
64 *
65 *
66 *
67 * @section Page Pool
68 *
69 * The MM manages a per VM page pool from which other components can allocate
70 * locked, page aligned and page granular memory objects. The pool provides
71 * facilities to convert back and forth between physical and virtual addresses
72 * (within the pool of course). Several specialized interfaces are provided
73 * for the most common alloctions and convertions to save the caller from
74 * bothersome casting and extra parameter passing.
75 *
76 *
77 */
78
79
80
81/*******************************************************************************
82* Header Files *
83*******************************************************************************/
84#define LOG_GROUP LOG_GROUP_MM
85#include <VBox/mm.h>
86#include <VBox/pgm.h>
87#include <VBox/cfgm.h>
88#include <VBox/ssm.h>
89#include "MMInternal.h"
90#include <VBox/vm.h>
91#include <VBox/err.h>
92#include <VBox/param.h>
93
94#include <VBox/log.h>
95#include <iprt/alloc.h>
96#include <iprt/assert.h>
97#include <iprt/string.h>
98
99
100/*******************************************************************************
101* Internal Functions *
102*******************************************************************************/
103static int mmR3Term(PVM pVM, bool fKeepTheHeap);
104static DECLCALLBACK(int) mmR3Save(PVM pVM, PSSMHANDLE pSSM);
105static DECLCALLBACK(int) mmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version);
106
107
108
109/**
110 * Initializes the MM.
111 *
112 * MM is managing the virtual address space (among other things) and
113 * setup the hypvervisor memory area mapping in the VM structure and
114 * the hypvervisor alloc-only-heap. Assuming the current init order
115 * and components the hypvervisor memory area looks like this:
116 * -# VM Structure.
117 * -# Hypervisor alloc only heap (also call Hypervisor memory region).
118 * -# Core code.
119 *
120 * MM determins the virtual address of the hypvervisor memory area by
121 * checking for location at previous run. If that property isn't available
122 * it will choose a default starting location, currently 0xe0000000.
123 *
124 * @returns VBox status code.
125 * @param pVM The VM to operate on.
126 */
127MMR3DECL(int) MMR3Init(PVM pVM)
128{
129 LogFlow(("MMR3Init\n"));
130
131 /*
132 * Assert alignment, sizes and order.
133 */
134 AssertRelease(!(RT_OFFSETOF(VM, mm.s) & 31));
135 AssertRelease(sizeof(pVM->mm.s) <= sizeof(pVM->mm.padding));
136 AssertMsg(pVM->mm.s.offVM == 0, ("Already initialized!\n"));
137
138 /*
139 * Init the structure.
140 */
141 pVM->mm.s.offVM = RT_OFFSETOF(VM, mm);
142 pVM->mm.s.offLookupHyper = NIL_OFFSET;
143
144 /*
145 * Init the heap (may already be initialized already if someone used it).
146 */
147 if (!pVM->mm.s.pHeap)
148 {
149 int rc = mmr3HeapCreate(pVM, &pVM->mm.s.pHeap);
150 if (!VBOX_SUCCESS(rc))
151 return rc;
152 }
153
154 /*
155 * Init the page pool.
156 */
157 int rc = mmr3PagePoolInit(pVM);
158 if (VBOX_SUCCESS(rc))
159 {
160 /*
161 * Init the hypervisor related stuff.
162 */
163 rc = mmr3HyperInit(pVM);
164 if (VBOX_SUCCESS(rc))
165 {
166 /*
167 * Register the saved state data unit.
168 */
169 rc = SSMR3RegisterInternal(pVM, "mm", 1, 1, sizeof(uint32_t) * 2,
170 NULL, mmR3Save, NULL,
171 NULL, mmR3Load, NULL);
172 if (VBOX_SUCCESS(rc))
173 return rc;
174 }
175
176 /* .... failure .... */
177 mmR3Term(pVM, true /* keep the heap */);
178 }
179 else
180 mmr3HeapDestroy(pVM->mm.s.pHeap);
181 return rc;
182}
183
184
185/**
186 * Initializes the MM parts which depends on PGM being initialized.
187 *
188 * @returns VBox status code.
189 * @param pVM The VM to operate on.
190 * @remark No cleanup necessary since MMR3Term() will be called on failure.
191 */
192MMR3DECL(int) MMR3InitPaging(PVM pVM)
193{
194 LogFlow(("MMR3InitPaging:\n"));
195 uint64_t cbRam;
196 int rc = CFGMR3QueryU64(CFGMR3GetRoot(pVM), "RamSize", &cbRam);
197 if (rc == VERR_CFGM_VALUE_NOT_FOUND)
198 cbRam = 0;
199 if (VBOX_SUCCESS(rc) || rc == VERR_CFGM_VALUE_NOT_FOUND)
200 {
201 if (cbRam < PAGE_SIZE)
202 {
203 Log(("MM: No RAM configured\n"));
204 return VINF_SUCCESS;
205 }
206#ifdef PGM_DYNAMIC_RAM_ALLOC
207 Log(("MM: %llu bytes of RAM \n", cbRam));
208 pVM->mm.s.pvRamBaseHC = 0; /** @todo obsolete */
209 pVM->mm.s.cbRamBase = cbRam & PAGE_BASE_GC_MASK;
210 rc = MMR3PhysRegister(pVM, pVM->mm.s.pvRamBaseHC, 0, pVM->mm.s.cbRamBase, MM_RAM_FLAGS_DYNAMIC_ALLOC, "Main Memory");
211 if (VBOX_SUCCESS(rc))
212 {
213 /* Allocate the first chunk, as we'll map ROM ranges there. */
214 rc = PGM3PhysGrowRange(pVM, (RTGCPHYS)0);
215 if (VBOX_SUCCESS(rc))
216 return rc;
217 }
218#else
219 unsigned cPages = cbRam >> PAGE_SHIFT;
220 Log(("MM: %llu bytes of RAM (%d pages)\n", cbRam, cPages));
221 rc = SUPPageAlloc(cPages, &pVM->mm.s.pvRamBaseHC);
222 if (VBOX_SUCCESS(rc))
223 {
224 pVM->mm.s.cbRamBase = cPages << PAGE_SHIFT;
225 rc = MMR3PhysRegister(pVM, pVM->mm.s.pvRamBaseHC, 0, pVM->mm.s.cbRamBase, 0, "Main Memory");
226 if (VBOX_SUCCESS(rc))
227 return rc;
228 SUPPageFree(pVM->mm.s.pvRamBaseHC);
229 }
230 else
231 LogRel(("MMR3InitPage: Failed to allocate %u bytes of RAM! rc=%Vrc\n", cPages << PAGE_SHIFT));
232#endif
233 }
234 else
235 AssertMsgFailed(("Configuration error: Failed to query integer \"RamSize\", rc=%Vrc.\n", rc));
236
237 LogFlow(("MMR3InitPaging: returns %Vrc\n", rc));
238 return rc;
239}
240
241
242/**
243 * Terminates the MM.
244 *
245 * Termination means cleaning up and freeing all resources,
246 * the VM it self is at this point powered off or suspended.
247 *
248 * @returns VBox status code.
249 * @param pVM The VM to operate on.
250 */
251MMR3DECL(int) MMR3Term(PVM pVM)
252{
253 return mmR3Term(pVM, false /* free the heap */);
254}
255
256
257/**
258 * Worker for MMR3Term and MMR3Init.
259 *
260 * The tricky bit here is that we must not destroy the heap if we're
261 * called from MMR3Init, otherwise we'll get into trouble when
262 * CFGMR3Term is called later in the bailout process.
263 *
264 * @returns VBox status code.
265 * @param pVM The VM to operate on.
266 * @param fKeepTheHeap Whether or not to keep the heap.
267 */
268static int mmR3Term(PVM pVM, bool fKeepTheHeap)
269{
270 /*
271 * Release locked memory.
272 * (Associated record are released by the heap.)
273 */
274 PMMLOCKEDMEM pLockedMem = pVM->mm.s.pLockedMem;
275 while (pLockedMem)
276 {
277 int rc = SUPPageUnlock(pLockedMem->pv);
278 AssertMsgRC(rc, ("SUPPageUnlock(%p) -> rc=%d\n", pLockedMem->pv, rc));
279 switch (pLockedMem->eType)
280 {
281 case MM_LOCKED_TYPE_PHYS:
282 case MM_LOCKED_TYPE_HYPER_NOFREE:
283 break;
284 case MM_LOCKED_TYPE_HYPER:
285 rc = SUPPageFree(pLockedMem->pv);
286 AssertMsgRC(rc, ("SUPPageFree(%p) -> rc=%d\n", pLockedMem->pv, rc));
287 break;
288 }
289 /* next */
290 pLockedMem = pLockedMem->pNext;
291 }
292
293 /*
294 * Destroy the page pool.
295 */
296 mmr3PagePoolTerm(pVM);
297
298 /*
299 * Destroy the heap if requested.
300 */
301 if (!fKeepTheHeap)
302 {
303 mmr3HeapDestroy(pVM->mm.s.pHeap);
304 pVM->mm.s.pHeap = NULL;
305 }
306
307 /*
308 * Zero stuff to detect after termination use of the MM interface
309 */
310 pVM->mm.s.offLookupHyper = NIL_OFFSET;
311 pVM->mm.s.pLockedMem = NULL;
312 pVM->mm.s.pHyperHeapHC = NULL; /* freed above. */
313 pVM->mm.s.pHyperHeapGC = 0; /* freed above. */
314 pVM->mm.s.offVM = 0; /* init assertion on this */
315
316 return 0;
317}
318
319
320/**
321 * Execute state save operation.
322 *
323 * @returns VBox status code.
324 * @param pVM VM Handle.
325 * @param pSSM SSM operation handle.
326 */
327static DECLCALLBACK(int) mmR3Save(PVM pVM, PSSMHANDLE pSSM)
328{
329 LogFlow(("mmR3Save:\n"));
330
331 /* (PGM saves the physical memory.) */
332 SSMR3PutUInt(pSSM, pVM->mm.s.cbRAMSize);
333 return SSMR3PutUInt(pSSM, pVM->mm.s.cbRamBase);
334}
335
336
337/**
338 * Execute state load operation.
339 *
340 * @returns VBox status code.
341 * @param pVM VM Handle.
342 * @param pSSM SSM operation handle.
343 * @param u32Version Data layout version.
344 */
345static DECLCALLBACK(int) mmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version)
346{
347 LogFlow(("mmR3Load:\n"));
348
349 /*
350 * Validate version.
351 */
352 if (u32Version != 1)
353 {
354 Log(("mmR3Load: Invalid version u32Version=%d!\n", u32Version));
355 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
356 }
357
358 /*
359 * Check the cbRAMSize and cbRamBase values.
360 */
361 RTUINT cb;
362 int rc = SSMR3GetUInt(pSSM, &cb);
363 if (VBOX_FAILURE(rc))
364 return rc;
365 if (cb != pVM->mm.s.cbRAMSize)
366 {
367 Log(("mmR3Load: Memory configuration has changed. cbRAMSize=%#x save %#x\n", pVM->mm.s.cbRAMSize, cb));
368 return VERR_SSM_LOAD_MEMORY_SIZE_MISMATCH;
369 }
370
371 rc = SSMR3GetUInt(pSSM, &cb);
372 if (VBOX_FAILURE(rc))
373 return rc;
374 if (cb != pVM->mm.s.cbRamBase)
375 {
376 Log(("mmR3Load: Memory configuration has changed. cbRamBase=%#x save %#x\n", pVM->mm.s.cbRamBase, cb));
377 return VERR_SSM_LOAD_MEMORY_SIZE_MISMATCH;
378 }
379
380 /* PGM restores the physical memory. */
381 return rc;
382}
383
384
385/**
386 * Locks physical memory which backs a virtual memory range (HC) adding
387 * the required records to the pLockedMem list.
388 *
389 * @returns VBox status code.
390 * @param pVM The VM handle.
391 * @param pv Pointer to memory range which shall be locked down.
392 * This pointer is page aligned.
393 * @param cb Size of memory range (in bytes). This size is page aligned.
394 * @param eType Memory type.
395 * @param ppLockedMem Where to store the pointer to the created locked memory record.
396 * This is optional, pass NULL if not used.
397 */
398int mmr3LockMem(PVM pVM, void *pv, size_t cb, MMLOCKEDTYPE eType, PMMLOCKEDMEM *ppLockedMem)
399{
400 Assert(RT_ALIGN_P(pv, PAGE_SIZE) == pv);
401 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
402
403 if (ppLockedMem)
404 *ppLockedMem = NULL;
405
406 /*
407 * Allocate locked mem structure.
408 */
409 unsigned cPages = cb >> PAGE_SHIFT;
410 AssertReturn(cPages == (cb >> PAGE_SHIFT), VERR_OUT_OF_RANGE);
411 PMMLOCKEDMEM pLockedMem = (PMMLOCKEDMEM)MMR3HeapAlloc(pVM, MM_TAG_MM, RT_OFFSETOF(MMLOCKEDMEM, aPhysPages[cPages]));
412 if (!pLockedMem)
413 return VERR_NO_MEMORY;
414 pLockedMem->pv = pv;
415 pLockedMem->cb = cb;
416 pLockedMem->eType = eType;
417 memset(&pLockedMem->u, 0, sizeof(pLockedMem->u));
418
419 /*
420 * Lock the memory.
421 */
422 int rc = SUPPageLock(pv, cb, &pLockedMem->aPhysPages[0]);
423 if (VBOX_SUCCESS(rc))
424 {
425 /*
426 * Setup the reserved field.
427 */
428 PSUPPAGE pPhysPage = &pLockedMem->aPhysPages[0];
429 for (unsigned c = cPages; c > 0; c--, pPhysPage++)
430 pPhysPage->uReserved = (unsigned)pLockedMem;
431
432 /*
433 * Insert into the list.
434 *
435 * ASSUME no protected needed here as only one thread in the system can possibly
436 * be doing this. No other threads will walk this list either we assume.
437 */
438 pLockedMem->pNext = pVM->mm.s.pLockedMem;
439 pVM->mm.s.pLockedMem = pLockedMem;
440 /* Set return value. */
441 if (ppLockedMem)
442 *ppLockedMem = pLockedMem;
443 }
444 else
445 {
446 AssertMsgFailed(("SUPPageLock failed with rc=%d\n", rc));
447 MMR3HeapFree(pLockedMem);
448 rc = VMSetError(pVM, rc, RT_SRC_POS, N_("Failed to lock %d bytes of host memory (out of memory)"), cb);
449 }
450
451 return rc;
452}
453
454
455/**
456 * Maps a part of or an entire locked memory region into the guest context.
457 *
458 * @returns VBox status.
459 * God knows what happens if we fail...
460 * @param pVM VM handle.
461 * @param pLockedMem Locked memory structure.
462 * @param Addr GC Address where to start the mapping.
463 * @param iPage Page number in the locked memory region.
464 * @param cPages Number of pages to map.
465 * @param fFlags See the fFlags argument of PGR3Map().
466 */
467int mmr3MapLocked(PVM pVM, PMMLOCKEDMEM pLockedMem, RTGCPTR Addr, unsigned iPage, size_t cPages, unsigned fFlags)
468{
469 /*
470 * Adjust ~0 argument
471 */
472 if (cPages == ~(size_t)0)
473 cPages = (pLockedMem->cb >> PAGE_SHIFT) - iPage;
474 Assert(cPages != ~0U);
475 /* no incorrect arguments are accepted */
476 Assert(RT_ALIGN_GCPT(Addr, PAGE_SIZE, RTGCPTR) == Addr);
477 AssertMsg(iPage < (pLockedMem->cb >> PAGE_SHIFT), ("never even think about giving me a bad iPage(=%d)\n", iPage));
478 AssertMsg(iPage + cPages <= (pLockedMem->cb >> PAGE_SHIFT), ("never even think about giving me a bad cPages(=%d)\n", cPages));
479
480 /*
481 * Map the the pages.
482 */
483 PSUPPAGE pPhysPage = &pLockedMem->aPhysPages[iPage];
484 while (cPages)
485 {
486 RTHCPHYS HCPhys = pPhysPage->Phys;
487 int rc = PGMMap(pVM, Addr, HCPhys, PAGE_SIZE, fFlags);
488 if (VBOX_FAILURE(rc))
489 {
490 /** @todo how the hell can we do a proper bailout here. */
491 return rc;
492 }
493
494 /* next */
495 cPages--;
496 iPage++;
497 pPhysPage++;
498 Addr += PAGE_SIZE;
499 }
500
501 return VINF_SUCCESS;
502}
503
504
505/**
506 * Convert HC Physical address to HC Virtual address.
507 *
508 * @returns VBox status.
509 * @param pVM VM handle.
510 * @param HCPhys The host context virtual address.
511 * @param ppv Where to store the resulting address.
512 * @thread The Emulation Thread.
513 */
514MMR3DECL(int) MMR3HCPhys2HCVirt(PVM pVM, RTHCPHYS HCPhys, void **ppv)
515{
516 /*
517 * Try page tables.
518 */
519 int rc = MMPagePhys2PageTry(pVM, HCPhys, ppv);
520 if (VBOX_SUCCESS(rc))
521 return rc;
522
523 /*
524 * The VM structure?
525 */
526 uint32_t off = (uint32_t)(HCPhys - pVM->HCPhysVM);
527 if (off < RT_ALIGN_32(sizeof(*pVM), PAGE_SIZE))
528 {
529 *ppv = (char *)pVM + off;
530 return VINF_SUCCESS;
531 }
532
533 /*
534 * Iterate the locked memory - very slow.
535 */
536 off = HCPhys & PAGE_OFFSET_MASK;
537 HCPhys &= X86_PTE_PAE_PG_MASK;
538 for (PMMLOCKEDMEM pCur = pVM->mm.s.pLockedMem; pCur; pCur = pCur->pNext)
539 {
540 size_t iPage = pCur->cb >> PAGE_SHIFT;
541 while (iPage-- > 0)
542 if ((pCur->aPhysPages[iPage].Phys & X86_PTE_PAE_PG_MASK) == HCPhys)
543 {
544 *ppv = (char *)pCur->pv + (iPage << PAGE_SHIFT) + off;
545 return VINF_SUCCESS;
546 }
547 }
548 /* give up */
549 return VERR_INVALID_POINTER;
550}
551
552
553/**
554 * Read memory from GC virtual address using the current guest CR3.
555 *
556 * @returns VBox status.
557 * @param pVM VM handle.
558 * @param pvDst Destination address (HC of course).
559 * @param GCPtr GC virtual address.
560 * @param cb Number of bytes to read.
561 */
562MMR3DECL(int) MMR3ReadGCVirt(PVM pVM, void *pvDst, RTGCPTR GCPtr, size_t cb)
563{
564 if (GCPtr - pVM->mm.s.pvHyperAreaGC < pVM->mm.s.cbHyperArea)
565 return MMR3HyperReadGCVirt(pVM, pvDst, GCPtr, cb);
566 return PGMPhysReadGCPtr(pVM, pvDst, GCPtr, cb);
567}
568
569
570/**
571 * Write to memory at GC virtual address translated using the current guest CR3.
572 *
573 * @returns VBox status.
574 * @param pVM VM handle.
575 * @param GCPtrDst GC virtual address.
576 * @param pvSrc The source address (HC of course).
577 * @param cb Number of bytes to read.
578 */
579MMR3DECL(int) MMR3WriteGCVirt(PVM pVM, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
580{
581 if (GCPtrDst - pVM->mm.s.pvHyperAreaGC < pVM->mm.s.cbHyperArea)
582 return VERR_ACCESS_DENIED;
583 return PGMPhysWriteGCPtr(pVM, GCPtrDst, pvSrc, cb);
584}
585
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette