VirtualBox

source: vbox/trunk/src/VBox/VMM/MMPhys.cpp@ 7692

Last change on this file since 7692 was 7635, checked in by vboxsync, 17 years ago

The new MMIO2 code.
WARNING! This changes the pci mapping protocol for MMIO2 so it's working the same way as I/O ports and normal MMIO memory. External users of the interface will have to update their mapping routines.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 18.5 KB
Line 
1/* $Id: MMPhys.cpp 7635 2008-03-28 17:15:38Z vboxsync $ */
2/** @file
3 * MM - Memory Monitor(/Manager) - Physical Memory.
4 */
5
6/*
7 * Copyright (C) 2006-2007 innotek GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_MM_PHYS
23#include <VBox/mm.h>
24#include <VBox/pgm.h>
25#include <VBox/rem.h>
26#include "MMInternal.h"
27#include <VBox/vm.h>
28
29#include <VBox/log.h>
30#include <VBox/param.h>
31#include <VBox/err.h>
32#include <iprt/alloc.h>
33#include <iprt/assert.h>
34#include <iprt/string.h>
35
36
37/**
38 * Register externally allocated RAM for the virtual machine.
39 *
40 * The memory registered with the VM thru this interface must not be freed
41 * before the virtual machine has been destroyed. Bad things may happen... :-)
42 *
43 * @return VBox status code.
44 * @param pVM VM handle.
45 * @param pvRam Virtual address of the guest's physical memory range Must be page aligned.
46 * @param GCPhys The physical address the ram shall be registered at.
47 * @param cb Size of the memory. Must be page aligend.
48 * @param fFlags Flags of the MM_RAM_FLAGS_* defines.
49 * @param pszDesc Description of the memory.
50 */
51MMR3DECL(int) MMR3PhysRegister(PVM pVM, void *pvRam, RTGCPHYS GCPhys, unsigned cb, unsigned fFlags, const char *pszDesc)
52{
53 return MMR3PhysRegisterEx(pVM, pvRam, GCPhys, cb, fFlags, MM_PHYS_TYPE_NORMAL, pszDesc);
54}
55
56
57/**
58 * Register externally allocated RAM for the virtual machine.
59 *
60 * The memory registered with the VM thru this interface must not be freed
61 * before the virtual machine has been destroyed. Bad things may happen... :-)
62 *
63 * @return VBox status code.
64 * @param pVM VM handle.
65 * @param pvRam Virtual address of the guest's physical memory range Must be page aligned.
66 * @param GCPhys The physical address the ram shall be registered at.
67 * @param cb Size of the memory. Must be page aligend.
68 * @param fFlags Flags of the MM_RAM_FLAGS_* defines.
69 * @param enmType Physical range type (MM_PHYS_TYPE_*)
70 * @param pszDesc Description of the memory.
71 * @thread The Emulation Thread.
72 *
73 * @deprecated For the old dynamic allocation code only. Will be removed with VBOX_WITH_NEW_PHYS_CODE.
74 */
75/** @todo this function description is not longer up-to-date */
76MMR3DECL(int) MMR3PhysRegisterEx(PVM pVM, void *pvRam, RTGCPHYS GCPhys, unsigned cb, unsigned fFlags, MMPHYSREG enmType, const char *pszDesc)
77{
78 int rc = VINF_SUCCESS;
79
80 Log(("MMR3PhysRegister: pvRam=%p GCPhys=%VGp cb=%#x fFlags=%#x\n", pvRam, GCPhys, cb, fFlags));
81
82 /*
83 * Validate input.
84 */
85 AssertMsg(pVM, ("Invalid VM pointer\n"));
86 if (pvRam)
87 AssertReturn(RT_ALIGN_P(pvRam, PAGE_SIZE) == pvRam, VERR_INVALID_PARAMETER);
88 else
89 AssertReturn(fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC, VERR_INVALID_PARAMETER);
90 AssertReturn(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys, VERR_INVALID_PARAMETER);
91 AssertReturn(RT_ALIGN_Z(cb, PAGE_SIZE) == cb, VERR_INVALID_PARAMETER);
92 AssertReturn(enmType == MM_PHYS_TYPE_NORMAL || enmType == MM_PHYS_TYPE_DYNALLOC_CHUNK, VERR_INVALID_PARAMETER);
93 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
94 AssertReturn(GCPhysLast > GCPhys, VERR_INVALID_PARAMETER);
95
96
97 /*
98 * Check for conflicts.
99 *
100 * We do not support overlapping physical memory regions yet,
101 * even if that's what the MM_RAM_FLAGS_MMIO2 flags is trying to
102 * tell us to do. Provided that all MMIO2 addresses are very high
103 * there is no real danger we'll be able to assign so much memory
104 * for a guest that it'll ever be a problem.
105 */
106 AssertMsg(!(fFlags & MM_RAM_FLAGS_MMIO2) || GCPhys > 0xc0000000,
107 ("MMIO2 addresses should be above 3GB for avoiding conflicts with real RAM.\n"));
108 PMMLOCKEDMEM pCur = pVM->mm.s.pLockedMem;
109 while (pCur)
110 {
111 if ( pCur->eType == MM_LOCKED_TYPE_PHYS
112 && ( GCPhys - pCur->u.phys.GCPhys < pCur->cb
113 || pCur->u.phys.GCPhys - GCPhys < cb)
114 )
115 {
116 AssertMsgFailed(("Conflicting RAM range. Existing %#x LB%#x, Req %#x LB%#x\n",
117 pCur->u.phys.GCPhys, pCur->cb, GCPhys, cb));
118 return VERR_MM_RAM_CONFLICT;
119 }
120
121 /* next */
122 pCur = pCur->pNext;
123 }
124
125
126 /* Dynamic/on-demand allocation of backing memory? */
127 if (fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)
128 {
129 /*
130 * Register the ram with PGM.
131 */
132 rc = PGMR3PhysRegister(pVM, pvRam, GCPhys, cb, fFlags, NULL, pszDesc);
133 if (VBOX_SUCCESS(rc))
134 {
135 if (fFlags == MM_RAM_FLAGS_DYNAMIC_ALLOC)
136 pVM->mm.s.cBasePages += cb >> PAGE_SHIFT;
137
138 REMR3NotifyPhysRamRegister(pVM, GCPhys, cb, fFlags);
139 return rc;
140 }
141 }
142 else
143 {
144 /*
145 * Lock the memory. (fully allocated by caller)
146 */
147 PMMLOCKEDMEM pLockedMem;
148 rc = mmR3LockMem(pVM, pvRam, cb, MM_LOCKED_TYPE_PHYS, &pLockedMem, enmType == MM_PHYS_TYPE_DYNALLOC_CHUNK /* fSilentFailure */);
149 if (VBOX_SUCCESS(rc))
150 {
151 pLockedMem->u.phys.GCPhys = GCPhys;
152
153 /*
154 * We set any page flags specified.
155 */
156 if (fFlags)
157 for (unsigned i = 0; i < cb >> PAGE_SHIFT; i++)
158 pLockedMem->aPhysPages[i].Phys |= fFlags;
159
160 /*
161 * Register the ram with PGM.
162 */
163 if (enmType == MM_PHYS_TYPE_NORMAL)
164 {
165 rc = PGMR3PhysRegister(pVM, pvRam, pLockedMem->u.phys.GCPhys, cb, fFlags, &pLockedMem->aPhysPages[0], pszDesc);
166 if (VBOX_SUCCESS(rc))
167 {
168 if (!fFlags)
169 pVM->mm.s.cBasePages += cb >> PAGE_SHIFT;
170
171 REMR3NotifyPhysRamRegister(pVM, GCPhys, cb, fFlags);
172 return rc;
173 }
174 }
175 else
176 {
177 Assert(enmType == MM_PHYS_TYPE_DYNALLOC_CHUNK);
178 return PGMR3PhysRegisterChunk(pVM, pvRam, pLockedMem->u.phys.GCPhys, cb, fFlags, &pLockedMem->aPhysPages[0], pszDesc);
179 }
180 }
181 /* Cleanup is done in VM destruction to which failure of this function will lead. */
182 /* Not true in case of MM_PHYS_TYPE_DYNALLOC_CHUNK */
183 }
184
185 return rc;
186}
187
188
189/**
190 * Register a ROM (BIOS) region.
191 *
192 * It goes without saying that this is read-only memory. The memory region must be
193 * in unassigned memory. I.e. from the top of the address space or on the PC in
194 * the 0xa0000-0xfffff range.
195 *
196 * @returns VBox status.
197 * @param pVM VM Handle.
198 * @param pDevIns The device instance owning the ROM region.
199 * @param GCPhys First physical address in the range.
200 * Must be page aligned!
201 * @param cbRange The size of the range (in bytes).
202 * Must be page aligned!
203 * @param pvBinary Pointer to the binary data backing the ROM image.
204 * This must be cbRange bytes big.
205 * It will be copied and doesn't have to stick around.
206 * It will be copied and doesn't have to stick around if fShadow is clear.
207 * @param fShadow Whether to emulate ROM shadowing. This involves leaving
208 * the ROM writable for a while during the POST and refreshing
209 * it at reset. When this flag is set, the memory pointed to by
210 * pvBinary has to stick around for the lifespan of the VM.
211 * @param pszDesc Pointer to description string. This must not be freed.
212 * @remark There is no way to remove the rom, automatically on device cleanup or
213 * manually from the device yet. At present I doubt we need such features...
214 */
215MMR3DECL(int) MMR3PhysRomRegister(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GCPhys, RTUINT cbRange, const void *pvBinary,
216 bool fShadow, const char *pszDesc)
217{
218 /*
219 * Validate input.
220 */
221 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
222 AssertReturn(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys, VERR_INVALID_PARAMETER);
223 AssertReturn(RT_ALIGN(cbRange, PAGE_SIZE) == cbRange, VERR_INVALID_PARAMETER);
224 RTGCPHYS GCPhysLast = GCPhys + (cbRange - 1);
225 AssertReturn(GCPhysLast > GCPhys, VERR_INVALID_PARAMETER);
226 AssertPtrReturn(pvBinary, VERR_INVALID_PARAMETER);
227
228
229 /*
230 * Check if this can fit in an existing range.
231 *
232 * We do not handle the case where a new chunk of locked memory is
233 * required to accommodate the ROM since we assume MMR3PhysReserve()
234 * have been called to reserve the memory first.
235 *
236 * To make things even simpler, the pages in question must be
237 * marked as reserved.
238 */
239 PMMLOCKEDMEM pCur = pVM->mm.s.pLockedMem;
240 for ( ; pCur; pCur = pCur->pNext)
241 if ( pCur->eType == MM_LOCKED_TYPE_PHYS
242 && GCPhys - pCur->u.phys.GCPhys < pCur->cb)
243 break;
244 if (!pCur)
245 {
246 AssertMsgFailed(("No physical range was found matching the ROM location (%#VGp LB%#x)\n", GCPhys, cbRange));
247 return VERR_INVALID_PARAMETER;
248 }
249 if (GCPhysLast - pCur->u.phys.GCPhys >= pCur->cb)
250 {
251 AssertMsgFailed(("The ROM range (%#VGp LB%#x) was crossing the end of the physical range (%#VGp LB%#x)\n",
252 GCPhys, cbRange, pCur->u.phys.GCPhys, pCur->cb));
253 return VERR_INVALID_PARAMETER;
254 }
255
256 /* flags must be all reserved. */
257 unsigned iPage = (GCPhys - pCur->u.phys.GCPhys) >> PAGE_SHIFT;
258 unsigned iPageEnd = cbRange >> PAGE_SHIFT;
259 for (; iPage < iPageEnd; iPage++)
260 if ( (pCur->aPhysPages[iPage].Phys & (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2))
261 != MM_RAM_FLAGS_RESERVED)
262 {
263 AssertMsgFailed(("Flags conflict at %VGp, HCPhys=%VHp.\n", pCur->u.phys.GCPhys + (iPage << PAGE_SHIFT), pCur->aPhysPages[iPage].Phys));
264 return VERR_INVALID_PARAMETER;
265 }
266
267 /*
268 * Copy the ram and update the flags.
269 */
270 iPage = (GCPhys - pCur->u.phys.GCPhys) >> PAGE_SHIFT;
271 void *pvCopy = (char *)pCur->pv + (iPage << PAGE_SHIFT);
272 memcpy(pvCopy, pvBinary, cbRange);
273
274 const unsigned fSet = fShadow ? MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO2 : MM_RAM_FLAGS_ROM;
275 for (; iPage < iPageEnd; iPage++)
276 {
277 pCur->aPhysPages[iPage].Phys &= ~MM_RAM_FLAGS_RESERVED;
278 pCur->aPhysPages[iPage].Phys |= fSet;
279 }
280 int rc = PGMR3PhysSetFlags(pVM, GCPhys, cbRange, fSet, ~MM_RAM_FLAGS_RESERVED);
281 AssertRC(rc);
282 if (VBOX_SUCCESS(rc))
283 {
284 /*
285 * To prevent the shadow page table mappings from being RW in raw-mode, we
286 * must currently employ a little hack. We register an write access handler
287 * and thereby ensures a RO mapping of the pages. This is NOT very nice,
288 * and wasn't really my intention when writing the code, consider it a PGM bug.
289 *
290 * ASSUMES that REMR3NotifyPhysRomRegister doesn't call cpu_register_physical_memory
291 * when there is no HC handler. The result would probably be immediate boot failure.
292 */
293 rc = PGMR3HandlerPhysicalRegister(pVM, PGMPHYSHANDLERTYPE_PHYSICAL_WRITE, GCPhys, GCPhys + cbRange - 1,
294 NULL, NULL,
295 NULL, "pgmPhysRomWriteHandler", 0,
296 NULL, "pgmPhysRomWriteHandler", 0, pszDesc);
297 AssertRC(rc);
298 }
299
300 /*
301 * Create a ROM range it so we can make a 'info rom' thingy and more importantly
302 * reload and protect/unprotect shadow ROM correctly.
303 */
304 if (VBOX_SUCCESS(rc))
305 {
306 PMMROMRANGE pRomRange = (PMMROMRANGE)MMR3HeapAlloc(pVM, MM_TAG_MM, sizeof(*pRomRange));
307 AssertReturn(pRomRange, VERR_NO_MEMORY);
308 pRomRange->GCPhys = GCPhys;
309 pRomRange->cbRange = cbRange;
310 pRomRange->pszDesc = pszDesc;
311 pRomRange->fShadow = fShadow;
312 pRomRange->fWritable = fShadow;
313 pRomRange->pvBinary = fShadow ? pvBinary : NULL;
314 pRomRange->pvCopy = pvCopy;
315
316 /* sort it for 'info rom' readability. */
317 PMMROMRANGE pPrev = NULL;
318 PMMROMRANGE pCur = pVM->mm.s.pRomHead;
319 while (pCur && pCur->GCPhys < GCPhys)
320 {
321 pPrev = pCur;
322 pCur = pCur->pNext;
323 }
324 pRomRange->pNext = pCur;
325 if (pPrev)
326 pPrev->pNext = pRomRange;
327 else
328 pVM->mm.s.pRomHead = pRomRange;
329 }
330
331 REMR3NotifyPhysRomRegister(pVM, GCPhys, cbRange, pvCopy, fShadow);
332 return rc; /* we're sloppy with error cleanup here, but we're toast anyway if this fails. */
333}
334
335
336/**
337 * Reserve physical address space for ROM and MMIO ranges.
338 *
339 * @returns VBox status code.
340 * @param pVM VM Handle.
341 * @param GCPhys Start physical address.
342 * @param cbRange The size of the range.
343 * @param pszDesc Description string.
344 */
345MMR3DECL(int) MMR3PhysReserve(PVM pVM, RTGCPHYS GCPhys, RTUINT cbRange, const char *pszDesc)
346{
347 /*
348 * Validate input.
349 */
350 AssertReturn(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys, VERR_INVALID_PARAMETER);
351 AssertReturn(RT_ALIGN(cbRange, PAGE_SIZE) == cbRange, VERR_INVALID_PARAMETER);
352 RTGCPHYS GCPhysLast = GCPhys + (cbRange - 1);
353 AssertReturn(GCPhysLast > GCPhys, VERR_INVALID_PARAMETER);
354
355 /*
356 * Do we have an existing physical address range for the request?
357 */
358 PMMLOCKEDMEM pCur = pVM->mm.s.pLockedMem;
359 for ( ; pCur; pCur = pCur->pNext)
360 if ( pCur->eType == MM_LOCKED_TYPE_PHYS
361 && GCPhys - pCur->u.phys.GCPhys < pCur->cb)
362 break;
363 if (!pCur)
364 {
365 /*
366 * No range, we'll just allocate backing pages and register
367 * them as reserved using the Ram interface.
368 */
369 void *pvPages;
370 int rc = SUPPageAlloc(cbRange >> PAGE_SHIFT, &pvPages);
371 if (VBOX_SUCCESS(rc))
372 {
373 rc = MMR3PhysRegister(pVM, pvPages, GCPhys, cbRange, MM_RAM_FLAGS_RESERVED, pszDesc);
374 if (VBOX_FAILURE(rc))
375 SUPPageFree(pvPages, cbRange >> PAGE_SHIFT);
376 }
377 return rc;
378 }
379 if (GCPhysLast - pCur->u.phys.GCPhys >= pCur->cb)
380 {
381 AssertMsgFailed(("The reserved range (%#VGp LB%#x) was crossing the end of the physical range (%#VGp LB%#x)\n",
382 GCPhys, cbRange, pCur->u.phys.GCPhys, pCur->cb));
383 return VERR_INVALID_PARAMETER;
384 }
385
386 /*
387 * Update the flags.
388 */
389 unsigned iPage = (GCPhys - pCur->u.phys.GCPhys) >> PAGE_SHIFT;
390 unsigned iPageEnd = cbRange >> PAGE_SHIFT;
391 for (; iPage < iPageEnd; iPage++)
392 pCur->aPhysPages[iPage].Phys |= MM_RAM_FLAGS_RESERVED;
393 int rc = PGMR3PhysSetFlags(pVM, GCPhys, cbRange, MM_RAM_FLAGS_RESERVED, ~0);
394 AssertRC(rc);
395
396 REMR3NotifyPhysReserve(pVM, GCPhys, cbRange);
397 return rc;
398}
399
400
401/**
402 * Get the size of the base RAM.
403 * This usually means the size of the first contigous block of physical memory.
404 *
405 * @returns The guest base RAM size.
406 * @param pVM The VM handle.
407 * @thread Any.
408 */
409MMR3DECL(uint64_t) MMR3PhysGetRamSize(PVM pVM)
410{
411 return pVM->mm.s.cbRamBase;
412}
413
414
415/**
416 * Called by MMR3Reset to reset the shadow ROM.
417 *
418 * Resetting involves reloading the ROM into RAM and make it
419 * wriable again (as it was made read only at the end of the POST).
420 *
421 * @param pVM The VM handle.
422 */
423void mmR3PhysRomReset(PVM pVM)
424{
425 for (PMMROMRANGE pCur = pVM->mm.s.pRomHead; pCur; pCur = pCur->pNext)
426 if (pCur->fShadow)
427 {
428 memcpy(pCur->pvCopy, pCur->pvBinary, pCur->cbRange);
429 if (!pCur->fWritable)
430 {
431 int rc = PGMHandlerPhysicalDeregister(pVM, pCur->GCPhys);
432 AssertRC(rc);
433 pCur->fWritable = true;
434
435 rc = PGMR3PhysSetFlags(pVM, pCur->GCPhys, pCur->cbRange, MM_RAM_FLAGS_MMIO2, ~0); /* ROM -> ROM + MMIO2 */
436 AssertRC(rc);
437
438 REMR3NotifyPhysRomRegister(pVM, pCur->GCPhys, pCur->cbRange, pCur->pvCopy, true /* read-write now */);
439 }
440 }
441}
442
443
444/**
445 * Write-protects a shadow ROM range.
446 *
447 * This is called late in the POST for shadow ROM ranges.
448 *
449 * @returns VBox status code.
450 * @param pVM The VM handle.
451 * @param GCPhys Start of the registered shadow ROM range
452 * @param cbRange The length of the registered shadow ROM range.
453 * This can be NULL (not sure about the BIOS interface yet).
454 */
455MMR3DECL(int) MMR3PhysRomProtect(PVM pVM, RTGCPHYS GCPhys, RTUINT cbRange)
456{
457 for (PMMROMRANGE pCur = pVM->mm.s.pRomHead; pCur; pCur = pCur->pNext)
458 if ( pCur->GCPhys == GCPhys
459 && ( pCur->cbRange == cbRange
460 || !cbRange))
461 {
462 if (pCur->fWritable)
463 {
464 cbRange = pCur->cbRange;
465 int rc = PGMR3HandlerPhysicalRegister(pVM, PGMPHYSHANDLERTYPE_PHYSICAL_WRITE, GCPhys, GCPhys + cbRange - 1,
466 NULL, NULL,
467 NULL, "pgmPhysRomWriteHandler", 0,
468 NULL, "pgmPhysRomWriteHandler", 0, pCur->pszDesc);
469 AssertRCReturn(rc, rc);
470 pCur->fWritable = false;
471
472 rc = PGMR3PhysSetFlags(pVM, GCPhys, cbRange, 0, ~MM_RAM_FLAGS_MMIO2); /* ROM + MMIO2 -> ROM */
473 AssertRCReturn(rc, rc);
474 /* Don't bother with the MM page flags here because I don't think they are
475 really used beyond conflict checking at ROM, RAM, Reservation, etc. */
476
477 REMR3NotifyPhysRomRegister(pVM, GCPhys, cbRange, pCur->pvCopy, false /* read-only now */);
478 }
479 return VINF_SUCCESS;
480 }
481 AssertMsgFailed(("GCPhys=%VGp cbRange=%#x\n", GCPhys, cbRange));
482 return VERR_INVALID_PARAMETER;
483}
484
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette