VirtualBox

source: vbox/trunk/src/VBox/VMM/MM.cpp@ 552

Last change on this file since 552 was 247, checked in by vboxsync, 18 years ago

Out of memory reporting for dynamic memory allocation case.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 18.8 KB
Line 
1/* $Id: MM.cpp 247 2007-01-23 17:10:04Z vboxsync $ */
2/** @file
3 * MM - Memory Monitor(/Manager).
4 */
5
6/*
7 * Copyright (C) 2006 InnoTek Systemberatung GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License as published by the Free Software Foundation,
13 * in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
14 * distribution. VirtualBox OSE is distributed in the hope that it will
15 * be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * If you received this file as part of a commercial VirtualBox
18 * distribution, then only the terms of your commercial VirtualBox
19 * license agreement apply instead of the previous paragraph.
20 */
21
22
23/** @page pg_mm MM - The Memory Monitor/Manager
24 *
25 * It seems like this is going to be the entity taking care of memory allocations
26 * and the locking of physical memory for a VM. MM will track these allocations and
27 * pinnings so pointer conversions, memory read and write, and correct clean up can
28 * be done.
29 *
30 * Memory types:
31 * - Hypervisor Memory Area (HMA).
32 * - Page tables.
33 * - Physical pages.
34 *
35 * The first two types are not accessible using the generic conversion functions
36 * for GC memory, there are special functions for these.
37 *
38 *
39 * A decent structure for this component need to be eveloped as we see usage. One
40 * or two rewrites is probabaly needed to get it right...
41 *
42 *
43 *
44 * @section Hypervisor Memory Area
45 *
46 * The hypervisor is give 4MB of space inside the guest, we assume that we can
47 * steal an page directory entry from the guest OS without cause trouble. In
48 * addition to these 4MB we'll be mapping memory for the graphics emulation,
49 * but that will be an independant mapping.
50 *
51 * The 4MBs are divided into two main parts:
52 * -# The static code and data
53 * -# The shortlived page mappings.
54 *
55 * The first part is used for the VM structure, the core code (VMMSwitch),
56 * GC modules, and the alloc-only-heap. The size will be determined at a
57 * later point but initially we'll say 2MB of locked memory, most of which
58 * is non contiguous physically.
59 *
60 * The second part is used for mapping pages to the hypervisor. We'll be using
61 * a simple round robin when doing these mappings. This means that no-one can
62 * assume that a mapping hangs around for very long, while the managing of the
63 * pages are very simple.
64 *
65 *
66 *
67 * @section Page Pool
68 *
69 * The MM manages a per VM page pool from which other components can allocate
70 * locked, page aligned and page granular memory objects. The pool provides
71 * facilities to convert back and forth between physical and virtual addresses
72 * (within the pool of course). Several specialized interfaces are provided
73 * for the most common alloctions and convertions to save the caller from
74 * bothersome casting and extra parameter passing.
75 *
76 *
77 */
78
79
80
81/*******************************************************************************
82* Header Files *
83*******************************************************************************/
84#define LOG_GROUP LOG_GROUP_MM
85#include <VBox/mm.h>
86#include <VBox/pgm.h>
87#include <VBox/cfgm.h>
88#include <VBox/ssm.h>
89#include "MMInternal.h"
90#include <VBox/vm.h>
91#include <VBox/err.h>
92#include <VBox/param.h>
93
94#include <VBox/log.h>
95#include <iprt/alloc.h>
96#include <iprt/assert.h>
97#include <iprt/string.h>
98
99
100/*******************************************************************************
101* Internal Functions *
102*******************************************************************************/
103static int mmR3Term(PVM pVM, bool fKeepTheHeap);
104static DECLCALLBACK(int) mmR3Save(PVM pVM, PSSMHANDLE pSSM);
105static DECLCALLBACK(int) mmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version);
106
107
108
109/**
110 * Initializes the MM.
111 *
112 * MM is managing the virtual address space (among other things) and
113 * setup the hypvervisor memory area mapping in the VM structure and
114 * the hypvervisor alloc-only-heap. Assuming the current init order
115 * and components the hypvervisor memory area looks like this:
116 * -# VM Structure.
117 * -# Hypervisor alloc only heap (also call Hypervisor memory region).
118 * -# Core code.
119 *
120 * MM determins the virtual address of the hypvervisor memory area by
121 * checking for location at previous run. If that property isn't available
122 * it will choose a default starting location, currently 0xe0000000.
123 *
124 * @returns VBox status code.
125 * @param pVM The VM to operate on.
126 */
127MMR3DECL(int) MMR3Init(PVM pVM)
128{
129 LogFlow(("MMR3Init\n"));
130
131 /*
132 * Assert alignment, sizes and order.
133 */
134 AssertRelease(!(RT_OFFSETOF(VM, mm.s) & 31));
135 AssertRelease(sizeof(pVM->mm.s) <= sizeof(pVM->mm.padding));
136 AssertMsg(pVM->mm.s.offVM == 0, ("Already initialized!\n"));
137
138 /*
139 * Init the structure.
140 */
141 pVM->mm.s.offVM = RT_OFFSETOF(VM, mm);
142 pVM->mm.s.offLookupHyper = NIL_OFFSET;
143
144 /*
145 * Init the heap (may already be initialized already if someone used it).
146 */
147 if (!pVM->mm.s.pHeap)
148 {
149 int rc = mmr3HeapCreate(pVM, &pVM->mm.s.pHeap);
150 if (!VBOX_SUCCESS(rc))
151 return rc;
152 }
153
154 /*
155 * Init the page pool.
156 */
157 int rc = mmr3PagePoolInit(pVM);
158 if (VBOX_SUCCESS(rc))
159 {
160 /*
161 * Init the hypervisor related stuff.
162 */
163 rc = mmr3HyperInit(pVM);
164 if (VBOX_SUCCESS(rc))
165 {
166 /*
167 * Register the saved state data unit.
168 */
169 rc = SSMR3RegisterInternal(pVM, "mm", 1, 1, sizeof(uint32_t) * 2,
170 NULL, mmR3Save, NULL,
171 NULL, mmR3Load, NULL);
172 if (VBOX_SUCCESS(rc))
173 return rc;
174 }
175
176 /* .... failure .... */
177 mmR3Term(pVM, true /* keep the heap */);
178 }
179 else
180 mmr3HeapDestroy(pVM->mm.s.pHeap);
181 return rc;
182}
183
184
185/**
186 * Initializes the MM parts which depends on PGM being initialized.
187 *
188 * @returns VBox status code.
189 * @param pVM The VM to operate on.
190 * @remark No cleanup necessary since MMR3Term() will be called on failure.
191 */
192MMR3DECL(int) MMR3InitPaging(PVM pVM)
193{
194 LogFlow(("MMR3InitPaging:\n"));
195 bool fPreAlloc;
196 int rc = CFGMR3QueryBool(CFGMR3GetRoot(pVM), "RamPreAlloc", &fPreAlloc);
197 if (rc == VERR_CFGM_VALUE_NOT_FOUND)
198 fPreAlloc = false;
199 else
200 AssertMsgRCReturn(rc, ("Configuration error: Failed to query integer \"RamPreAlloc\", rc=%Vrc.\n", rc), rc);
201
202 uint64_t cbRam;
203 rc = CFGMR3QueryU64(CFGMR3GetRoot(pVM), "RamSize", &cbRam);
204 if (rc == VERR_CFGM_VALUE_NOT_FOUND)
205 cbRam = 0;
206 if (VBOX_SUCCESS(rc) || rc == VERR_CFGM_VALUE_NOT_FOUND)
207 {
208 if (cbRam < PAGE_SIZE)
209 {
210 Log(("MM: No RAM configured\n"));
211 return VINF_SUCCESS;
212 }
213#ifdef PGM_DYNAMIC_RAM_ALLOC
214 Log(("MM: %llu bytes of RAM%s\n", cbRam, fPreAlloc ? " (PreAlloc)" : ""));
215 pVM->mm.s.pvRamBaseHC = 0; /** @todo obsolete */
216 pVM->mm.s.cbRamBase = cbRam & PAGE_BASE_GC_MASK;
217 rc = MMR3PhysRegister(pVM, pVM->mm.s.pvRamBaseHC, 0, pVM->mm.s.cbRamBase, MM_RAM_FLAGS_DYNAMIC_ALLOC, "Main Memory");
218 if (VBOX_SUCCESS(rc))
219 {
220 /* Allocate the first chunk, as we'll map ROM ranges there. */
221 rc = PGM3PhysGrowRange(pVM, (RTGCPHYS)0);
222 if (VBOX_SUCCESS(rc))
223 {
224 /* Should we preallocate the entire guest RAM? */
225 if (fPreAlloc)
226 {
227 for (RTGCPHYS GCPhys = PGM_DYNAMIC_CHUNK_SIZE; GCPhys < cbRam; GCPhys += PGM_DYNAMIC_CHUNK_SIZE)
228 {
229 rc = PGM3PhysGrowRange(pVM, GCPhys);
230 if (VBOX_FAILURE(rc))
231 return rc;
232 }
233 }
234 return rc;
235 }
236 }
237#else
238 unsigned cPages = cbRam >> PAGE_SHIFT;
239 Log(("MM: %llu bytes of RAM (%d pages)\n", cbRam, cPages));
240 rc = SUPPageAlloc(cPages, &pVM->mm.s.pvRamBaseHC);
241 if (VBOX_SUCCESS(rc))
242 {
243 pVM->mm.s.cbRamBase = cPages << PAGE_SHIFT;
244 rc = MMR3PhysRegister(pVM, pVM->mm.s.pvRamBaseHC, 0, pVM->mm.s.cbRamBase, 0, "Main Memory");
245 if (VBOX_SUCCESS(rc))
246 return rc;
247 SUPPageFree(pVM->mm.s.pvRamBaseHC);
248 }
249 else
250 LogRel(("MMR3InitPage: Failed to allocate %u bytes of RAM! rc=%Vrc\n", cPages << PAGE_SHIFT));
251#endif
252 }
253 else
254 AssertMsgFailed(("Configuration error: Failed to query integer \"RamSize\", rc=%Vrc.\n", rc));
255
256 LogFlow(("MMR3InitPaging: returns %Vrc\n", rc));
257 return rc;
258}
259
260
261/**
262 * Terminates the MM.
263 *
264 * Termination means cleaning up and freeing all resources,
265 * the VM it self is at this point powered off or suspended.
266 *
267 * @returns VBox status code.
268 * @param pVM The VM to operate on.
269 */
270MMR3DECL(int) MMR3Term(PVM pVM)
271{
272 return mmR3Term(pVM, false /* free the heap */);
273}
274
275
276/**
277 * Worker for MMR3Term and MMR3Init.
278 *
279 * The tricky bit here is that we must not destroy the heap if we're
280 * called from MMR3Init, otherwise we'll get into trouble when
281 * CFGMR3Term is called later in the bailout process.
282 *
283 * @returns VBox status code.
284 * @param pVM The VM to operate on.
285 * @param fKeepTheHeap Whether or not to keep the heap.
286 */
287static int mmR3Term(PVM pVM, bool fKeepTheHeap)
288{
289 /*
290 * Release locked memory.
291 * (Associated record are released by the heap.)
292 */
293 PMMLOCKEDMEM pLockedMem = pVM->mm.s.pLockedMem;
294 while (pLockedMem)
295 {
296 int rc = SUPPageUnlock(pLockedMem->pv);
297 AssertMsgRC(rc, ("SUPPageUnlock(%p) -> rc=%d\n", pLockedMem->pv, rc));
298 switch (pLockedMem->eType)
299 {
300 case MM_LOCKED_TYPE_PHYS:
301 case MM_LOCKED_TYPE_HYPER_NOFREE:
302 break;
303 case MM_LOCKED_TYPE_HYPER:
304 rc = SUPPageFree(pLockedMem->pv);
305 AssertMsgRC(rc, ("SUPPageFree(%p) -> rc=%d\n", pLockedMem->pv, rc));
306 break;
307 }
308 /* next */
309 pLockedMem = pLockedMem->pNext;
310 }
311
312 /*
313 * Destroy the page pool.
314 */
315 mmr3PagePoolTerm(pVM);
316
317 /*
318 * Destroy the heap if requested.
319 */
320 if (!fKeepTheHeap)
321 {
322 mmr3HeapDestroy(pVM->mm.s.pHeap);
323 pVM->mm.s.pHeap = NULL;
324 }
325
326 /*
327 * Zero stuff to detect after termination use of the MM interface
328 */
329 pVM->mm.s.offLookupHyper = NIL_OFFSET;
330 pVM->mm.s.pLockedMem = NULL;
331 pVM->mm.s.pHyperHeapHC = NULL; /* freed above. */
332 pVM->mm.s.pHyperHeapGC = 0; /* freed above. */
333 pVM->mm.s.offVM = 0; /* init assertion on this */
334
335 return 0;
336}
337
338
339/**
340 * Execute state save operation.
341 *
342 * @returns VBox status code.
343 * @param pVM VM Handle.
344 * @param pSSM SSM operation handle.
345 */
346static DECLCALLBACK(int) mmR3Save(PVM pVM, PSSMHANDLE pSSM)
347{
348 LogFlow(("mmR3Save:\n"));
349
350 /* (PGM saves the physical memory.) */
351 SSMR3PutUInt(pSSM, pVM->mm.s.cbRAMSize);
352 return SSMR3PutUInt(pSSM, pVM->mm.s.cbRamBase);
353}
354
355
356/**
357 * Execute state load operation.
358 *
359 * @returns VBox status code.
360 * @param pVM VM Handle.
361 * @param pSSM SSM operation handle.
362 * @param u32Version Data layout version.
363 */
364static DECLCALLBACK(int) mmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version)
365{
366 LogFlow(("mmR3Load:\n"));
367
368 /*
369 * Validate version.
370 */
371 if (u32Version != 1)
372 {
373 Log(("mmR3Load: Invalid version u32Version=%d!\n", u32Version));
374 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
375 }
376
377 /*
378 * Check the cbRAMSize and cbRamBase values.
379 */
380 RTUINT cb;
381 int rc = SSMR3GetUInt(pSSM, &cb);
382 if (VBOX_FAILURE(rc))
383 return rc;
384 if (cb != pVM->mm.s.cbRAMSize)
385 {
386 Log(("mmR3Load: Memory configuration has changed. cbRAMSize=%#x save %#x\n", pVM->mm.s.cbRAMSize, cb));
387 return VERR_SSM_LOAD_MEMORY_SIZE_MISMATCH;
388 }
389
390 rc = SSMR3GetUInt(pSSM, &cb);
391 if (VBOX_FAILURE(rc))
392 return rc;
393 if (cb != pVM->mm.s.cbRamBase)
394 {
395 Log(("mmR3Load: Memory configuration has changed. cbRamBase=%#x save %#x\n", pVM->mm.s.cbRamBase, cb));
396 return VERR_SSM_LOAD_MEMORY_SIZE_MISMATCH;
397 }
398
399 /* PGM restores the physical memory. */
400 return rc;
401}
402
403
404/**
405 * Locks physical memory which backs a virtual memory range (HC) adding
406 * the required records to the pLockedMem list.
407 *
408 * @returns VBox status code.
409 * @param pVM The VM handle.
410 * @param pv Pointer to memory range which shall be locked down.
411 * This pointer is page aligned.
412 * @param cb Size of memory range (in bytes). This size is page aligned.
413 * @param eType Memory type.
414 * @param ppLockedMem Where to store the pointer to the created locked memory record.
415 * This is optional, pass NULL if not used.
416 * @param fSilentFailure Don't raise an error when unsuccessful. Upper layer with deal with it.
417 */
418int mmr3LockMem(PVM pVM, void *pv, size_t cb, MMLOCKEDTYPE eType, PMMLOCKEDMEM *ppLockedMem, bool fSilentFailure)
419{
420 Assert(RT_ALIGN_P(pv, PAGE_SIZE) == pv);
421 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
422
423 if (ppLockedMem)
424 *ppLockedMem = NULL;
425
426 /*
427 * Allocate locked mem structure.
428 */
429 unsigned cPages = cb >> PAGE_SHIFT;
430 AssertReturn(cPages == (cb >> PAGE_SHIFT), VERR_OUT_OF_RANGE);
431 PMMLOCKEDMEM pLockedMem = (PMMLOCKEDMEM)MMR3HeapAlloc(pVM, MM_TAG_MM, RT_OFFSETOF(MMLOCKEDMEM, aPhysPages[cPages]));
432 if (!pLockedMem)
433 return VERR_NO_MEMORY;
434 pLockedMem->pv = pv;
435 pLockedMem->cb = cb;
436 pLockedMem->eType = eType;
437 memset(&pLockedMem->u, 0, sizeof(pLockedMem->u));
438
439 /*
440 * Lock the memory.
441 */
442 int rc = SUPPageLock(pv, cb, &pLockedMem->aPhysPages[0]);
443 if (VBOX_SUCCESS(rc))
444 {
445 /*
446 * Setup the reserved field.
447 */
448 PSUPPAGE pPhysPage = &pLockedMem->aPhysPages[0];
449 for (unsigned c = cPages; c > 0; c--, pPhysPage++)
450 pPhysPage->uReserved = (RTHCUINTPTR)pLockedMem;
451
452 /*
453 * Insert into the list.
454 *
455 * ASSUME no protected needed here as only one thread in the system can possibly
456 * be doing this. No other threads will walk this list either we assume.
457 */
458 pLockedMem->pNext = pVM->mm.s.pLockedMem;
459 pVM->mm.s.pLockedMem = pLockedMem;
460 /* Set return value. */
461 if (ppLockedMem)
462 *ppLockedMem = pLockedMem;
463 }
464 else
465 {
466 AssertMsgFailed(("SUPPageLock failed with rc=%d\n", rc));
467 MMR3HeapFree(pLockedMem);
468 if (!fSilentFailure)
469 rc = VMSetError(pVM, rc, RT_SRC_POS, N_("Failed to lock %d bytes of host memory (out of memory)"), cb);
470 }
471
472 return rc;
473}
474
475
476/**
477 * Maps a part of or an entire locked memory region into the guest context.
478 *
479 * @returns VBox status.
480 * God knows what happens if we fail...
481 * @param pVM VM handle.
482 * @param pLockedMem Locked memory structure.
483 * @param Addr GC Address where to start the mapping.
484 * @param iPage Page number in the locked memory region.
485 * @param cPages Number of pages to map.
486 * @param fFlags See the fFlags argument of PGR3Map().
487 */
488int mmr3MapLocked(PVM pVM, PMMLOCKEDMEM pLockedMem, RTGCPTR Addr, unsigned iPage, size_t cPages, unsigned fFlags)
489{
490 /*
491 * Adjust ~0 argument
492 */
493 if (cPages == ~(size_t)0)
494 cPages = (pLockedMem->cb >> PAGE_SHIFT) - iPage;
495 Assert(cPages != ~0U);
496 /* no incorrect arguments are accepted */
497 Assert(RT_ALIGN_GCPT(Addr, PAGE_SIZE, RTGCPTR) == Addr);
498 AssertMsg(iPage < (pLockedMem->cb >> PAGE_SHIFT), ("never even think about giving me a bad iPage(=%d)\n", iPage));
499 AssertMsg(iPage + cPages <= (pLockedMem->cb >> PAGE_SHIFT), ("never even think about giving me a bad cPages(=%d)\n", cPages));
500
501 /*
502 * Map the the pages.
503 */
504 PSUPPAGE pPhysPage = &pLockedMem->aPhysPages[iPage];
505 while (cPages)
506 {
507 RTHCPHYS HCPhys = pPhysPage->Phys;
508 int rc = PGMMap(pVM, Addr, HCPhys, PAGE_SIZE, fFlags);
509 if (VBOX_FAILURE(rc))
510 {
511 /** @todo how the hell can we do a proper bailout here. */
512 return rc;
513 }
514
515 /* next */
516 cPages--;
517 iPage++;
518 pPhysPage++;
519 Addr += PAGE_SIZE;
520 }
521
522 return VINF_SUCCESS;
523}
524
525
526/**
527 * Convert HC Physical address to HC Virtual address.
528 *
529 * @returns VBox status.
530 * @param pVM VM handle.
531 * @param HCPhys The host context virtual address.
532 * @param ppv Where to store the resulting address.
533 * @thread The Emulation Thread.
534 */
535MMR3DECL(int) MMR3HCPhys2HCVirt(PVM pVM, RTHCPHYS HCPhys, void **ppv)
536{
537 /*
538 * Try page tables.
539 */
540 int rc = MMPagePhys2PageTry(pVM, HCPhys, ppv);
541 if (VBOX_SUCCESS(rc))
542 return rc;
543
544 /*
545 * The VM structure?
546 */
547 uint32_t off = (uint32_t)(HCPhys - pVM->HCPhysVM);
548 if (off < RT_ALIGN_32(sizeof(*pVM), PAGE_SIZE))
549 {
550 *ppv = (char *)pVM + off;
551 return VINF_SUCCESS;
552 }
553
554 /*
555 * Iterate the locked memory - very slow.
556 */
557 off = HCPhys & PAGE_OFFSET_MASK;
558 HCPhys &= X86_PTE_PAE_PG_MASK;
559 for (PMMLOCKEDMEM pCur = pVM->mm.s.pLockedMem; pCur; pCur = pCur->pNext)
560 {
561 size_t iPage = pCur->cb >> PAGE_SHIFT;
562 while (iPage-- > 0)
563 if ((pCur->aPhysPages[iPage].Phys & X86_PTE_PAE_PG_MASK) == HCPhys)
564 {
565 *ppv = (char *)pCur->pv + (iPage << PAGE_SHIFT) + off;
566 return VINF_SUCCESS;
567 }
568 }
569 /* give up */
570 return VERR_INVALID_POINTER;
571}
572
573
574/**
575 * Read memory from GC virtual address using the current guest CR3.
576 *
577 * @returns VBox status.
578 * @param pVM VM handle.
579 * @param pvDst Destination address (HC of course).
580 * @param GCPtr GC virtual address.
581 * @param cb Number of bytes to read.
582 */
583MMR3DECL(int) MMR3ReadGCVirt(PVM pVM, void *pvDst, RTGCPTR GCPtr, size_t cb)
584{
585 if (GCPtr - pVM->mm.s.pvHyperAreaGC < pVM->mm.s.cbHyperArea)
586 return MMR3HyperReadGCVirt(pVM, pvDst, GCPtr, cb);
587 return PGMPhysReadGCPtr(pVM, pvDst, GCPtr, cb);
588}
589
590
591/**
592 * Write to memory at GC virtual address translated using the current guest CR3.
593 *
594 * @returns VBox status.
595 * @param pVM VM handle.
596 * @param GCPtrDst GC virtual address.
597 * @param pvSrc The source address (HC of course).
598 * @param cb Number of bytes to read.
599 */
600MMR3DECL(int) MMR3WriteGCVirt(PVM pVM, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
601{
602 if (GCPtrDst - pVM->mm.s.pvHyperAreaGC < pVM->mm.s.cbHyperArea)
603 return VERR_ACCESS_DENIED;
604 return PGMPhysWriteGCPtr(pVM, GCPtrDst, pvSrc, cb);
605}
606
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette