VirtualBox

source: vbox/trunk/src/VBox/VMM/MMPhys.cpp@ 2270

Last change on this file since 2270 was 1890, checked in by vboxsync, 18 years ago

Attempt to fix ALSA on Linux kernels <= 2.6.17: use mmap not memalign for allocating pages. Use madvise or mprotect to separater VM area structs inside the kernel. Most SUP* functions work on cPages now (not cBytes anymore). The free functions take a cPages parameter which is used for munmap on Linux.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 17.1 KB
Line 
1/* $Id: MMPhys.cpp 1890 2007-04-03 16:04:19Z vboxsync $ */
2/** @file
3 * MM - Memory Monitor(/Manager) - Physical Memory.
4 */
5
6/*
7 * Copyright (C) 2006 InnoTek Systemberatung GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License as published by the Free Software Foundation,
13 * in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
14 * distribution. VirtualBox OSE is distributed in the hope that it will
15 * be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * If you received this file as part of a commercial VirtualBox
18 * distribution, then only the terms of your commercial VirtualBox
19 * license agreement apply instead of the previous paragraph.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_MM_PHYS
27#include <VBox/mm.h>
28#include <VBox/pgm.h>
29#include <VBox/rem.h>
30#include "MMInternal.h"
31#include <VBox/vm.h>
32
33#include <VBox/log.h>
34#include <VBox/param.h>
35#include <VBox/err.h>
36#include <iprt/alloc.h>
37#include <iprt/assert.h>
38#include <iprt/string.h>
39
40
41/**
42 * Register externally allocated RAM for the virtual machine.
43 *
44 * The memory registered with the VM thru this interface must not be freed
45 * before the virtual machine has been destroyed. Bad things may happen... :-)
46 *
47 * @return VBox status code.
48 * @param pVM VM handle.
49 * @param pvRam Virtual address of the guest's physical memory range Must be page aligned.
50 * @param GCPhys The physical address the ram shall be registered at.
51 * @param cb Size of the memory. Must be page aligend.
52 * @param fFlags Flags of the MM_RAM_FLAGS_* defines.
53 * @param pszDesc Description of the memory.
54 */
55MMR3DECL(int) MMR3PhysRegister(PVM pVM, void *pvRam, RTGCPHYS GCPhys, unsigned cb, unsigned fFlags, const char *pszDesc)
56{
57 return MMR3PhysRegisterEx(pVM, pvRam, GCPhys, cb, fFlags, MM_PHYS_TYPE_NORMAL, pszDesc);
58}
59
60
61/**
62 * Register externally allocated RAM for the virtual machine.
63 *
64 * The memory registered with the VM thru this interface must not be freed
65 * before the virtual machine has been destroyed. Bad things may happen... :-)
66 *
67 * @return VBox status code.
68 * @param pVM VM handle.
69 * @param pvRam Virtual address of the guest's physical memory range Must be page aligned.
70 * @param GCPhys The physical address the ram shall be registered at.
71 * @param cb Size of the memory. Must be page aligend.
72 * @param fFlags Flags of the MM_RAM_FLAGS_* defines.
73 * @param enmType Physical range type (MM_PHYS_TYPE_*)
74 * @param pszDesc Description of the memory.
75 * @thread The Emulation Thread.
76 */
77/** @todo this function description is not longer up-to-date */
78MMR3DECL(int) MMR3PhysRegisterEx(PVM pVM, void *pvRam, RTGCPHYS GCPhys, unsigned cb, unsigned fFlags, MMPHYSREG enmType, const char *pszDesc)
79{
80 int rc = VINF_SUCCESS;
81
82 Log(("MMR3PhysRegister: pvRam=%p GCPhys=%VGp cb=%#x fFlags=%#x\n", pvRam, GCPhys, cb, fFlags));
83
84 /*
85 * Validate input.
86 */
87 AssertMsg(pVM, ("Invalid VM pointer\n"));
88 if (pvRam)
89 AssertReturn(ALIGNP(pvRam, PAGE_SIZE) == pvRam, VERR_INVALID_PARAMETER);
90 else
91 AssertReturn(fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC, VERR_INVALID_PARAMETER);
92 AssertReturn(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys, VERR_INVALID_PARAMETER);
93 AssertReturn(RT_ALIGN_Z(cb, PAGE_SIZE) == cb, VERR_INVALID_PARAMETER);
94 AssertReturn(enmType == MM_PHYS_TYPE_NORMAL || enmType == MM_PHYS_TYPE_DYNALLOC_CHUNK, VERR_INVALID_PARAMETER);
95 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
96 AssertReturn(GCPhysLast > GCPhys, VERR_INVALID_PARAMETER);
97
98
99 /*
100 * Check for conflicts.
101 *
102 * We do not support overlapping physical memory regions yet,
103 * even if that's what the MM_RAM_FLAGS_MMIO2 flags is trying to
104 * tell us to do. Provided that all MMIO2 addresses are very high
105 * there is no real danger we'll be able to assign so much memory
106 * for a guest that it'll ever be a problem.
107 */
108 AssertMsg(!(fFlags & MM_RAM_FLAGS_MMIO2) || GCPhys > 0xc0000000,
109 ("MMIO2 addresses should be above 3GB for avoiding conflicts with real RAM.\n"));
110 PMMLOCKEDMEM pCur = pVM->mm.s.pLockedMem;
111 while (pCur)
112 {
113 if ( pCur->eType == MM_LOCKED_TYPE_PHYS
114 && ( GCPhys - pCur->u.phys.GCPhys < pCur->cb
115 || pCur->u.phys.GCPhys - GCPhys < cb)
116 )
117 {
118 AssertMsgFailed(("Conflicting RAM range. Existing %#x LB%#x, Req %#x LB%#x\n",
119 pCur->u.phys.GCPhys, pCur->cb, GCPhys, cb));
120 return VERR_MM_RAM_CONFLICT;
121 }
122
123 /* next */
124 pCur = pCur->pNext;
125 }
126
127
128 /* Dynamic/on-demand allocation of backing memory? */
129 if (fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)
130 {
131 /*
132 * Register the ram with PGM.
133 */
134 rc = PGMR3PhysRegister(pVM, pvRam, GCPhys, cb, fFlags, NULL, pszDesc);
135 if (VBOX_SUCCESS(rc))
136 {
137 if (fFlags == MM_RAM_FLAGS_DYNAMIC_ALLOC)
138 pVM->mm.s.cbRAMSize += cb;
139
140 REMR3NotifyPhysRamRegister(pVM, GCPhys, cb, pvRam, fFlags);
141 return rc;
142 }
143 }
144 else
145 {
146 /*
147 * Lock the memory. (fully allocated by caller)
148 */
149 PMMLOCKEDMEM pLockedMem;
150 rc = mmr3LockMem(pVM, pvRam, cb, MM_LOCKED_TYPE_PHYS, &pLockedMem, enmType == MM_PHYS_TYPE_DYNALLOC_CHUNK /* fSilentFailure */);
151 if (VBOX_SUCCESS(rc))
152 {
153 pLockedMem->u.phys.GCPhys = GCPhys;
154
155 /*
156 * We set any page flags specified.
157 */
158 if (fFlags)
159 for (unsigned i = 0; i < cb >> PAGE_SHIFT; i++)
160 pLockedMem->aPhysPages[i].Phys |= fFlags;
161
162 /*
163 * Register the ram with PGM.
164 */
165 if (enmType == MM_PHYS_TYPE_NORMAL)
166 {
167 rc = PGMR3PhysRegister(pVM, pvRam, pLockedMem->u.phys.GCPhys, cb, fFlags, &pLockedMem->aPhysPages[0], pszDesc);
168 if (VBOX_SUCCESS(rc))
169 {
170 if (!fFlags)
171 pVM->mm.s.cbRAMSize += cb;
172
173 REMR3NotifyPhysRamRegister(pVM, GCPhys, cb, pvRam, fFlags);
174 return rc;
175 }
176 }
177 else
178 {
179 Assert(enmType == MM_PHYS_TYPE_DYNALLOC_CHUNK);
180 return PGMR3PhysRegisterChunk(pVM, pvRam, pLockedMem->u.phys.GCPhys, cb, fFlags, &pLockedMem->aPhysPages[0], pszDesc);
181 }
182 }
183 /* Cleanup is done in VM destruction to which failure of this function will lead. */
184 /* Not true in case of MM_PHYS_TYPE_DYNALLOC_CHUNK */
185 }
186
187 return rc;
188}
189
190
191/**
192 * Relocate previously registered externally allocated RAM for the virtual machine.
193 *
194 * Use this only for MMIO ranges or the guest will become very confused.
195 * The memory registered with the VM thru this interface must not be freed
196 * before the virtual machine has been destroyed. Bad things may happen... :-)
197 *
198 * @return VBox status code.
199 * @param pVM VM handle.
200 * @param GCPhysOld The physical address the ram was registered at.
201 * @param GCPhysNew The physical address the ram shall be registered at.
202 * @param cb Size of the memory. Must be page aligend.
203 */
204MMR3DECL(int) MMR3PhysRelocate(PVM pVM, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, unsigned cb)
205{
206 Log(("MMR3PhysRelocate: GCPhysOld=%VGp GCPhysNew=%VGp cb=%#x\n", GCPhysOld, GCPhysNew, cb));
207
208 /*
209 * Validate input.
210 */
211 AssertMsg(pVM, ("Invalid VM pointer\n"));
212 AssertReturn(RT_ALIGN_T(GCPhysOld, PAGE_SIZE, RTGCPHYS) == GCPhysOld, VERR_INVALID_PARAMETER);
213 AssertReturn(RT_ALIGN_T(GCPhysNew, PAGE_SIZE, RTGCPHYS) == GCPhysNew, VERR_INVALID_PARAMETER);
214 AssertReturn(RT_ALIGN(cb, PAGE_SIZE) == cb, VERR_INVALID_PARAMETER);
215 RTGCPHYS GCPhysLast;
216 GCPhysLast = GCPhysOld + (cb - 1);
217 AssertReturn(GCPhysLast > GCPhysOld, VERR_INVALID_PARAMETER);
218 GCPhysLast = GCPhysNew + (cb - 1);
219 AssertReturn(GCPhysLast > GCPhysNew, VERR_INVALID_PARAMETER);
220
221 /*
222 * Find the old memory region.
223 */
224 PMMLOCKEDMEM pCur = pVM->mm.s.pLockedMem;
225 while (pCur)
226 {
227 if ( pCur->eType == MM_LOCKED_TYPE_PHYS
228 && GCPhysOld == pCur->u.phys.GCPhys
229 && cb == pCur->cb)
230 break;
231
232 /* next */
233 pCur = pCur->pNext;
234 }
235 if (!pCur)
236 {
237 AssertMsgFailed(("Unknown old region! %VGp LB%#x\n", GCPhysOld, cb));
238 return VERR_INVALID_PARAMETER;
239 }
240
241 /*
242 * Region is already locked, just need to change GC address.
243 */
244/** @todo r=bird: check for conflicts? */
245 pCur->u.phys.GCPhys = GCPhysNew;
246
247 /*
248 * Relocate the registered RAM range with PGM.
249 */
250 int rc = PGMR3PhysRelocate(pVM, GCPhysOld, GCPhysNew, cb);
251 if (VBOX_SUCCESS(rc))
252 {
253 /* Somewhat hackish way to relocate the region with REM. There
254 * is unfortunately no official way to unregister anything with
255 * REM, as there is no way to unregister memory with QEMU.
256 * This implementation seems to work, but is not very pretty. */
257 /// @todo one day provide a proper MMIO relocation operation
258 REMR3NotifyPhysReserve(pVM, GCPhysOld, cb);
259 REMR3NotifyPhysRamRegister(pVM, GCPhysNew, cb, pCur->pv,
260 pCur->aPhysPages[0].Phys & (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2));
261 }
262
263 return rc;
264}
265
266
267/**
268 * Register a ROM (BIOS) region.
269 *
270 * It goes without saying that this is read-only memory. The memory region must be
271 * in unassigned memory. I.e. from the top of the address space or on the PC in
272 * the 0xa0000-0xfffff range.
273 *
274 * @returns VBox status.
275 * @param pVM VM Handle.
276 * @param pDevIns The device instance owning the ROM region.
277 * @param GCPhys First physical address in the range.
278 * Must be page aligned!
279 * @param cbRange The size of the range (in bytes).
280 * Must be page aligned!
281 * @param pvBinary Pointer to the binary data backing the ROM image.
282 * This must be cbRange bytes big.
283 * It will be copied and doesn't have to stick around.
284 * @param pszDesc Pointer to description string. This must not be freed.
285 * @remark There is no way to remove the rom, automatically on device cleanup or
286 * manually from the device yet. At present I doubt we need such features...
287 */
288MMR3DECL(int) MMR3PhysRomRegister(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GCPhys, RTUINT cbRange, const void *pvBinary, const char *pszDesc)
289{
290 /*
291 * Validate input.
292 */
293 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
294 AssertReturn(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys, VERR_INVALID_PARAMETER);
295 AssertReturn(RT_ALIGN(cbRange, PAGE_SIZE) == cbRange, VERR_INVALID_PARAMETER);
296 RTGCPHYS GCPhysLast = GCPhys + (cbRange - 1);
297 AssertReturn(GCPhysLast > GCPhys, VERR_INVALID_PARAMETER);
298 AssertPtrReturn(pvBinary, VERR_INVALID_PARAMETER);
299
300
301 /*
302 * Check if this can fit in an existing range.
303 *
304 * We do not handle the case where a new chunk of locked memory is
305 * required to accommodate the ROM since we assume MMR3PhysReserve()
306 * have been called to reserve the memory first.
307 *
308 * To make things even simpler, the pages in question must be
309 * marked as reserved.
310 */
311 PMMLOCKEDMEM pCur = pVM->mm.s.pLockedMem;
312 for ( ; pCur; pCur = pCur->pNext)
313 if ( pCur->eType == MM_LOCKED_TYPE_PHYS
314 && GCPhys - pCur->u.phys.GCPhys < pCur->cb)
315 break;
316 if (!pCur)
317 {
318 AssertMsgFailed(("No physical range was found matching the ROM location (%#VGp LB%#x)\n", GCPhys, cbRange));
319 return VERR_INVALID_PARAMETER;
320 }
321 if (GCPhysLast - pCur->u.phys.GCPhys >= pCur->cb)
322 {
323 AssertMsgFailed(("The ROM range (%#VGp LB%#x) was crossing the end of the physical range (%#VGp LB%#x)\n",
324 GCPhys, cbRange, pCur->u.phys.GCPhys, pCur->cb));
325 return VERR_INVALID_PARAMETER;
326 }
327
328 /* flags must be all reserved. */
329 unsigned iPage = (GCPhys - pCur->u.phys.GCPhys) >> PAGE_SHIFT;
330 unsigned iPageEnd = cbRange >> PAGE_SHIFT;
331 for (; iPage < iPageEnd; iPage++)
332 if ( (pCur->aPhysPages[iPage].Phys & (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2))
333 != MM_RAM_FLAGS_RESERVED)
334 {
335 AssertMsgFailed(("Flags conflict at %VGp, HCPhys=%VHp.\n", pCur->u.phys.GCPhys + (iPage << PAGE_SHIFT), pCur->aPhysPages[iPage].Phys));
336 return VERR_INVALID_PARAMETER;
337 }
338
339 /*
340 * Copy the ram and update the flags.
341 */
342 iPage = (GCPhys - pCur->u.phys.GCPhys) >> PAGE_SHIFT;
343 void *pvCopy = (char *)pCur->pv + (iPage << PAGE_SHIFT);
344 memcpy(pvCopy, pvBinary, cbRange);
345
346 /** @note we rely on the MM_RAM_FLAGS_ROM flag in PGMPhysRead now. Don't change to reserved! */
347 /** @todo r=bird: Noone ever talked about changing *to* _RESERVED. The question is whether
348 * we should *clear* _RESERVED. I've no idea what the state of that flag is for ROM areas right
349 * now, but I will find out later. */
350 for (; iPage < iPageEnd; iPage++)
351 pCur->aPhysPages[iPage].Phys |= MM_RAM_FLAGS_ROM; /** @todo should be clearing _RESERVED? */
352 int rc = PGMR3PhysSetFlags(pVM, GCPhys, cbRange, MM_RAM_FLAGS_ROM, ~0); /** @todo should be clearing _RESERVED? */
353 AssertRC(rc);
354 if (VBOX_SUCCESS(rc))
355 {
356 /*
357 * Prevent changes to the ROM memory when executing in raw mode by
358 * registering a GC only write access handler.
359 *
360 * ASSUMES that REMR3NotifyPhysRomRegister doesn't call cpu_register_physical_memory
361 * when there is no HC handler. The result would probably be immediate boot failure.
362 */
363 rc = PGMR3HandlerPhysicalRegister(pVM, PGMPHYSHANDLERTYPE_PHYSICAL_WRITE, GCPhys, GCPhys + cbRange - 1,
364 NULL, NULL,
365 NULL, "pgmGuestROMWriteHandler", 0,
366 NULL, "pgmGuestROMWriteHandler", 0, "ROM Write Access Handler");
367 AssertRC(rc);
368 }
369
370 REMR3NotifyPhysRomRegister(pVM, GCPhys, cbRange, pvCopy);
371 return rc; /* we're sloppy with error cleanup here, but we're toast anyway if this fails. */
372}
373
374
375/**
376 * Reserve physical address space for ROM and MMIO ranges.
377 *
378 * @returns VBox status code.
379 * @param pVM VM Handle.
380 * @param GCPhys Start physical address.
381 * @param cbRange The size of the range.
382 * @param pszDesc Description string.
383 */
384MMR3DECL(int) MMR3PhysReserve(PVM pVM, RTGCPHYS GCPhys, RTUINT cbRange, const char *pszDesc)
385{
386 /*
387 * Validate input.
388 */
389 AssertReturn(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys, VERR_INVALID_PARAMETER);
390 AssertReturn(RT_ALIGN(cbRange, PAGE_SIZE) == cbRange, VERR_INVALID_PARAMETER);
391 RTGCPHYS GCPhysLast = GCPhys + (cbRange - 1);
392 AssertReturn(GCPhysLast > GCPhys, VERR_INVALID_PARAMETER);
393
394 /*
395 * Do we have an existing physical address range for the request?
396 */
397 PMMLOCKEDMEM pCur = pVM->mm.s.pLockedMem;
398 for ( ; pCur; pCur = pCur->pNext)
399 if ( pCur->eType == MM_LOCKED_TYPE_PHYS
400 && GCPhys - pCur->u.phys.GCPhys < pCur->cb)
401 break;
402 if (!pCur)
403 {
404 /*
405 * No range, we'll just allocate backing pages and register
406 * them as reserved using the Ram interface.
407 */
408 void *pvPages;
409 int rc = SUPPageAlloc(cbRange >> PAGE_SHIFT, &pvPages);
410 if (VBOX_SUCCESS(rc))
411 {
412 rc = MMR3PhysRegister(pVM, pvPages, GCPhys, cbRange, MM_RAM_FLAGS_RESERVED, pszDesc);
413 if (VBOX_FAILURE(rc))
414 SUPPageFree(pvPages, cbRange >> PAGE_SHIFT);
415 }
416 return rc;
417 }
418 if (GCPhysLast - pCur->u.phys.GCPhys >= pCur->cb)
419 {
420 AssertMsgFailed(("The reserved range (%#VGp LB%#x) was crossing the end of the physical range (%#VGp LB%#x)\n",
421 GCPhys, cbRange, pCur->u.phys.GCPhys, pCur->cb));
422 return VERR_INVALID_PARAMETER;
423 }
424
425 /*
426 * Update the flags.
427 */
428 unsigned iPage = (GCPhys - pCur->u.phys.GCPhys) >> PAGE_SHIFT;
429 unsigned iPageEnd = cbRange >> PAGE_SHIFT;
430 for (; iPage < iPageEnd; iPage++)
431 pCur->aPhysPages[iPage].Phys |= MM_RAM_FLAGS_RESERVED;
432 int rc = PGMR3PhysSetFlags(pVM, GCPhys, cbRange, MM_RAM_FLAGS_RESERVED, ~0);
433 AssertRC(rc);
434
435 REMR3NotifyPhysReserve(pVM, GCPhys, cbRange);
436 return rc;
437}
438
439
440/**
441 * Get the size of the base RAM.
442 * This usually means the size of the first contigous block of physical memory.
443 *
444 * @returns
445 * @param pVM
446 * @thread Any.
447 */
448MMR3DECL(uint64_t) MMR3PhysGetRamSize(PVM pVM)
449{
450 return pVM->mm.s.cbRamBase;
451}
452
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette