VirtualBox

source: vbox/trunk/src/VBox/VMM/PGMPhys.cpp@ 6862

Last change on this file since 6862 was 6861, checked in by vboxsync, 17 years ago

Sketected out new MMIO and MMIO2 interfaces. Not complete or implemented yet as the MM_RAM_FLAGS_* needs eliminating first.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 62.4 KB
Line 
1/* $Id: PGMPhys.cpp 6861 2008-02-08 10:04:44Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, Physical Memory Addressing.
4 */
5
6/*
7 * Copyright (C) 2006-2007 innotek GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_PGM
23#include <VBox/pgm.h>
24#include <VBox/cpum.h>
25#include <VBox/iom.h>
26#include <VBox/sup.h>
27#include <VBox/mm.h>
28#include <VBox/stam.h>
29#include <VBox/rem.h>
30#include <VBox/csam.h>
31#include "PGMInternal.h"
32#include <VBox/vm.h>
33#include <VBox/dbg.h>
34#include <VBox/param.h>
35#include <VBox/err.h>
36#include <iprt/assert.h>
37#include <iprt/alloc.h>
38#include <iprt/asm.h>
39#include <VBox/log.h>
40#include <iprt/thread.h>
41#include <iprt/string.h>
42
43
44/*******************************************************************************
45* Internal Functions *
46*******************************************************************************/
47/*static - shut up warning */
48DECLCALLBACK(int) pgmR3PhysRomWriteHandler(PVM pVM, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser);
49
50
51
52/*
53 * PGMR3PhysReadByte/Word/Dword
54 * PGMR3PhysWriteByte/Word/Dword
55 */
56/** @todo rename and add U64. */
57
58#define PGMPHYSFN_READNAME PGMR3PhysReadByte
59#define PGMPHYSFN_WRITENAME PGMR3PhysWriteByte
60#define PGMPHYS_DATASIZE 1
61#define PGMPHYS_DATATYPE uint8_t
62#include "PGMPhys.h"
63
64#define PGMPHYSFN_READNAME PGMR3PhysReadWord
65#define PGMPHYSFN_WRITENAME PGMR3PhysWriteWord
66#define PGMPHYS_DATASIZE 2
67#define PGMPHYS_DATATYPE uint16_t
68#include "PGMPhys.h"
69
70#define PGMPHYSFN_READNAME PGMR3PhysReadDword
71#define PGMPHYSFN_WRITENAME PGMR3PhysWriteDword
72#define PGMPHYS_DATASIZE 4
73#define PGMPHYS_DATATYPE uint32_t
74#include "PGMPhys.h"
75
76
77
78/**
79 * Links a new RAM range into the list.
80 *
81 * @param pVM Pointer to the shared VM structure.
82 * @param pNew Pointer to the new list entry.
83 * @param pPrev Pointer to the previous list entry. If NULL, insert as head.
84 */
85static void pgmR3PhysLinkRamRange(PVM pVM, PPGMRAMRANGE pNew, PPGMRAMRANGE pPrev)
86{
87 pgmLock(pVM);
88
89 PPGMRAMRANGE pRam = pPrev ? pPrev->pNextR3 : pVM->pgm.s.pRamRangesR3;
90 pNew->pNextR3 = pRam;
91 pNew->pNextR0 = pRam ? MMHyperCCToR0(pVM, pRam) : NIL_RTR0PTR;
92 pNew->pNextGC = pRam ? MMHyperCCToGC(pVM, pRam) : NIL_RTGCPTR;
93
94 if (pPrev)
95 {
96 pPrev->pNextR3 = pNew;
97 pPrev->pNextR0 = MMHyperCCToR0(pVM, pNew);
98 pPrev->pNextGC = MMHyperCCToGC(pVM, pNew);
99 }
100 else
101 {
102 pVM->pgm.s.pRamRangesR3 = pNew;
103 pVM->pgm.s.pRamRangesR0 = MMHyperCCToR0(pVM, pNew);
104 pVM->pgm.s.pRamRangesGC = MMHyperCCToGC(pVM, pNew);
105 }
106
107 pgmUnlock(pVM);
108}
109
110
111/**
112 * Links a new RAM range into the list.
113 *
114 * @param pVM Pointer to the shared VM structure.
115 * @param pRam Pointer to the new list entry.
116 * @param pPrev Pointer to the previous list entry. If NULL, insert as head.
117 */
118static void pgmR3PhysUnlinkRamRange(PVM pVM, PPGMRAMRANGE pRam, PPGMRAMRANGE pPrev)
119{
120 Assert(pPrev ? pPrev->pNextR3 == pRam : pVM->pgm.s.pRamRangesR3 == pRam);
121
122 pgmLock(pVM);
123
124 PPGMRAMRANGE pNext = pRam->pNextR3;
125 if (pPrev)
126 {
127 pPrev->pNextR3 = pNext;
128 pPrev->pNextR0 = pNext ? MMHyperCCToR0(pVM, pNext) : NIL_RTR0PTR;
129 pPrev->pNextGC = pNext ? MMHyperCCToGC(pVM, pNext) : NIL_RTGCPTR;
130 }
131 else
132 {
133 pVM->pgm.s.pRamRangesR3 = pNext;
134 pVM->pgm.s.pRamRangesR0 = pNext ? MMHyperCCToR0(pVM, pNext) : NIL_RTR0PTR;
135 pVM->pgm.s.pRamRangesGC = pNext ? MMHyperCCToGC(pVM, pNext) : NIL_RTGCPTR;
136 }
137
138 pgmUnlock(pVM);
139}
140
141
142
143/**
144 * Sets up a range RAM.
145 *
146 * This will check for conflicting registrations, make a resource
147 * reservation for the memory (with GMM), and setup the per-page
148 * tracking structures (PGMPAGE).
149 *
150 * @returns VBox stutus code.
151 * @param pVM Pointer to the shared VM structure.
152 * @param GCPhys The physical address of the RAM.
153 * @param cb The size of the RAM.
154 * @param pszDesc The description - not copied, so, don't free or change it.
155 */
156PGMR3DECL(int) PGMR3PhysRegisterRam(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, const char *pszDesc)
157{
158 /*
159 * Validate input.
160 */
161 Log(("PGMR3PhysRegisterRam: GCPhys=%RGp cb=%RGp pszDesc=%s\n", GCPhys, cb, pszDesc));
162 AssertReturn(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys, VERR_INVALID_PARAMETER);
163 AssertReturn(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb, VERR_INVALID_PARAMETER);
164 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
165 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
166 AssertMsgReturn(GCPhysLast > GCPhys, ("The range wraps! GCPhys=%RGp cb=%RGp\n", GCPhys, cb), VERR_INVALID_PARAMETER);
167 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
168 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
169
170 /*
171 * Find range location and check for conflicts.
172 * (We don't lock here because the locking by EMT is only required on update.)
173 */
174 PPGMRAMRANGE pPrev = NULL;
175 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3;
176 while (pRam && GCPhysLast >= pRam->GCPhys)
177 {
178 if ( GCPhys <= pRam->GCPhysLast
179 && GCPhysLast >= pRam->GCPhys)
180 AssertLogRelMsgFailedReturn(("%RGp-%RGp (%s) conflicts with existing %RGp-%RGp (%s)\n",
181 GCPhys, GCPhysLast, pszDesc,
182 pRam->GCPhys, pRam->GCPhysLast, pRam->pszDesc),
183 VERR_PGM_RAM_CONFLICT);
184
185 /* next */
186 pPrev = pRam;
187 pRam = pRam->pNextR3;
188 }
189
190 /*
191 * Register it with GMM (the API bitches).
192 */
193 const RTGCPHYS cPages = cb >> PAGE_SHIFT;
194 int rc = MMR3IncreaseBaseReservation(pVM, cPages);
195 if (RT_FAILURE(rc))
196 return rc;
197
198 /*
199 * Allocate RAM range.
200 */
201 const size_t cbRamRange = RT_OFFSETOF(PGMRAMRANGE, aPages[cPages]);
202 PPGMRAMRANGE pNew;
203 rc = MMR3HyperAllocOnceNoRel(pVM, cbRamRange, 0, MM_TAG_PGM_PHYS, (void **)&pNew);
204 AssertLogRelMsgRCReturn(rc, ("cbRamRange=%zd\n", cbRamRange), rc);
205
206 /*
207 * Initialize the range.
208 */
209 pNew->GCPhys = GCPhys;
210 pNew->GCPhysLast = GCPhysLast;
211 pNew->pszDesc = pszDesc;
212 pNew->cb = cb;
213 pNew->fFlags = 0;
214 pNew->pvHC = NULL;
215
216 pNew->pavHCChunkHC = NULL;
217 pNew->pavHCChunkGC = 0;
218
219#ifndef VBOX_WITH_NEW_PHYS_CODE
220 /* Allocate memory for chunk to HC ptr lookup array. */
221 rc = MMHyperAlloc(pVM, (cb >> PGM_DYNAMIC_CHUNK_SHIFT) * sizeof(void *), 16, MM_TAG_PGM, (void **)&pNew->pavHCChunkHC);
222 AssertRCReturn(rc, rc);
223 pNew->pavHCChunkGC = MMHyperCCToGC(pVM, pNew->pavHCChunkHC);
224 pNew->fFlags |= MM_RAM_FLAGS_DYNAMIC_ALLOC;
225
226#endif
227 RTGCPHYS iPage = cPages;
228 while (iPage-- > 0)
229 {
230#ifdef VBOX_WITH_NEW_PHYS_CODE
231 pNew->aPages[iPage].HCPhys = pVM->pgm.s.HCPhysZeroPg;
232#else
233 pNew->aPages[iPage].HCPhys = 0;
234#endif
235 pNew->aPages[iPage].fWrittenTo = 0;
236 pNew->aPages[iPage].fSomethingElse = 0;
237 pNew->aPages[iPage].u29B = 0;
238 PGM_PAGE_SET_TYPE(&pNew->aPages[iPage], PGMPAGETYPE_RAM);
239 PGM_PAGE_SET_STATE(&pNew->aPages[iPage], PGM_PAGE_STATE_ZERO);
240 PGM_PAGE_SET_PAGEID(&pNew->aPages[iPage], NIL_GMM_PAGEID);
241 }
242
243 /*
244 * Insert the new RAM range.
245 */
246 pgmR3PhysLinkRamRange(pVM, pNew, pPrev);
247
248 /*
249 * Notify REM.
250 */
251#ifdef VBOX_WITH_NEW_PHYS_CODE
252 REMR3NotifyPhysRamRegister(pVM, GCPhys, cb, 0);
253#else
254 REMR3NotifyPhysRamRegister(pVM, GCPhys, cb, MM_RAM_FLAGS_DYNAMIC_ALLOC);
255#endif
256
257 return VINF_SUCCESS;
258}
259
260
261/**
262 * This is the interface IOM is using to register an MMIO region.
263 *
264 * It will check for conflicts and ensure that a RAM range structure
265 * is present before calling the PGMR3HandlerPhysicalRegister API to
266 * register the callbacks.
267 *
268 */
269PDMR3DECL(int) PGMR3PhysMMIORegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb)
270{
271 return -1;
272}
273
274
275/**
276 * This is the interface IOM is using to register an MMIO region.
277 *
278 * It will validate the MMIO region, call PGMHandlerPhysicalDeregister,
279 * and free the RAM range if one was allocated specially for this MMIO
280 * region.
281 */
282PDMR3DECL(int) PGMR3PhysMMIODeregister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb)
283{
284 return -1;
285}
286
287
288/**
289 * Allocate and register a MMIO2 region.
290 *
291 * As mentioned elsewhere, MMIO2 is just RAM spelled differently. It's
292 * RAM associated with a device. It is also non-shared memory with a
293 * permanent ring-3 mapping and page backing (presently).
294 *
295 * A MMIO2 range may overlap with base memory if a lot of RAM
296 * is configured for the VM, in which case we'll drop the base
297 * memory pages. Presently we will make no attempt to preserve
298 * anything that happens to be present in the base memory that
299 * is replaced, this is of course incorrectly but it's too much
300 * effort.
301 */
302PDMR3DECL(int) PGMR3PhysMMIO2Register(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GCPhys, RTGCPHYS cb, void **ppv, const char *pszDesc)
303{
304 return -1;
305}
306
307
308/**
309 * Reallocates a MMIO2 region.
310 *
311 * This is done when a guest / the bios / state loading changes the
312 * PCI config. The replacing of base memory has the same restrictions
313 * as during registration, of course.
314 */
315PDMR3DECL(int) PGMR3PhysMMIO2Relocate(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew)
316{
317 return -1;
318}
319
320
321/**
322 * Deregisters and frees a MMIO2 region.
323 *
324 * Any physical (and virtual) access handlers registered for the region must
325 * be deregistered before calling this function.
326 */
327PDMR3DECL(int) PGMR3PhysMMIO2Deregister(PVM pVM, RTGCPHYS GCPhys, void *pv)
328{
329 return -1;
330}
331
332
333/**
334 * Registers a ROM image.
335 *
336 * Shadowed ROM images requires double the amount of backing memory, so,
337 * don't use that unless you have to. Shadowing of ROM images is process
338 * where we can select where the reads go and where the writes go. On real
339 * hardware the chipset provides means to configure this. We provide
340 * PGMR3PhysProtectROM() for this purpose.
341 *
342 * A read-only copy of the ROM image will always be kept around while we
343 * will allocate RAM pages for the changes on demand (unless all memory
344 * is configured to be preallocated).
345 *
346 * @returns VBox status.
347 * @param pVM VM Handle.
348 * @param pDevIns The device instance owning the ROM.
349 * @param GCPhys First physical address in the range.
350 * Must be page aligned!
351 * @param cbRange The size of the range (in bytes).
352 * Must be page aligned!
353 * @param pvBinary Pointer to the binary data backing the ROM image.
354 * This must be exactly \a cbRange in size.
355 * @param fFlags Mask of flags. PGMPHYS_ROM_FLAG_SHADOWED
356 * and/or PGMPHYS_ROM_FLAG_PERMANENT_BINARY.
357 * @param pszDesc Pointer to description string. This must not be freed.
358 *
359 * @remark There is no way to remove the rom, automatically on device cleanup or
360 * manually from the device yet. This isn't difficult in any way, it's
361 * just not something we expect to be necessary for a while.
362 */
363PGMR3DECL(int) PGMR3PhysRomRegister(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GCPhys, RTGCPHYS cb,
364 const void *pvBinary, uint32_t fFlags, const char *pszDesc)
365{
366 Log(("PGMR3PhysRomRegister: pDevIns=%p GCPhys=%RGp(-%RGp) cb=%RGp pvBinary=%p fFlags=%#x pszDesc=%s\n",
367 pDevIns, GCPhys, GCPhys + cb, cb, pvBinary, fFlags, pszDesc));
368
369 /*
370 * Validate input.
371 */
372 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
373 AssertReturn(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys, VERR_INVALID_PARAMETER);
374 AssertReturn(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb, VERR_INVALID_PARAMETER);
375 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
376 AssertReturn(GCPhysLast > GCPhys, VERR_INVALID_PARAMETER);
377 AssertPtrReturn(pvBinary, VERR_INVALID_PARAMETER);
378 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
379 AssertReturn(!(fFlags & ~(PGMPHYS_ROM_FLAG_SHADOWED | PGMPHYS_ROM_FLAG_PERMANENT_BINARY)), VERR_INVALID_PARAMETER);
380 VM_ASSERT_STATE_RETURN(pVM, VMSTATE_CREATING, VERR_VM_INVALID_VM_STATE);
381
382 const uint32_t cPages = cb >> PAGE_SHIFT;
383
384 /*
385 * Find the ROM location in the ROM list first.
386 */
387 PPGMROMRANGE pRomPrev = NULL;
388 PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3;
389 while (pRom && GCPhysLast >= pRom->GCPhys)
390 {
391 if ( GCPhys <= pRom->GCPhysLast
392 && GCPhysLast >= pRom->GCPhys)
393 AssertLogRelMsgFailedReturn(("%RGp-%RGp (%s) conflicts with existing %RGp-%RGp (%s)\n",
394 GCPhys, GCPhysLast, pszDesc,
395 pRom->GCPhys, pRom->GCPhysLast, pRom->pszDesc),
396 VERR_PGM_RAM_CONFLICT);
397 /* next */
398 pRomPrev = pRom;
399 pRom = pRom->pNextR3;
400 }
401
402 /*
403 * Find the RAM location and check for conflicts.
404 *
405 * Conflict detection is a bit different than for RAM
406 * registration since a ROM can be located within a RAM
407 * range. So, what we have to check for is other memory
408 * types (other than RAM that is) and that we don't span
409 * more than one RAM range (layz).
410 */
411 bool fRamExists = false;
412 PPGMRAMRANGE pRamPrev = NULL;
413 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3;
414 while (pRam && GCPhysLast >= pRam->GCPhys)
415 {
416 if ( GCPhys <= pRam->GCPhysLast
417 && GCPhysLast >= pRam->GCPhys)
418 {
419 /* completely within? */
420 AssertLogRelMsgReturn( GCPhys >= pRam->GCPhys
421 && GCPhysLast <= pRam->GCPhysLast,
422 ("%RGp-%RGp (%s) falls partly outside %RGp-%RGp (%s)\n",
423 GCPhys, GCPhysLast, pszDesc,
424 pRam->GCPhys, pRam->GCPhysLast, pRam->pszDesc),
425 VERR_PGM_RAM_CONFLICT);
426 fRamExists = true;
427 break;
428 }
429
430 /* next */
431 pRamPrev = pRam;
432 pRam = pRam->pNextR3;
433 }
434 if (fRamExists)
435 {
436 PPGMPAGE pPage = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
437 uint32_t cPagesLeft = cPages;
438 while (cPagesLeft-- > 0)
439 {
440 AssertLogRelMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM,
441 ("%RGp isn't a RAM page (%d) - registering %RGp-%RGp (%s).\n",
442 GCPhys, PGM_PAGE_GET_TYPE(pPage), GCPhys, GCPhysLast, pszDesc),
443 VERR_PGM_RAM_CONFLICT);
444 Assert(PGM_PAGE_IS_ZERO(pPage));
445 }
446 }
447
448 /*
449 * Update the base memory reservation if necessary.
450 */
451 uint32_t cExtraBaseCost = fRamExists ? cPages : 0;
452 if (fFlags & PGMPHYS_ROM_FLAG_SHADOWED)
453 cExtraBaseCost += cPages;
454 if (cExtraBaseCost)
455 {
456 int rc = MMR3IncreaseBaseReservation(pVM, cExtraBaseCost);
457 if (RT_FAILURE(rc))
458 return rc;
459 }
460
461 /*
462 * Allocate memory for the virgin copy of the RAM.
463 */
464 PGMMALLOCATEPAGESREQ pReq;
465 int rc = GMMR3AllocatePagesPrepare(pVM, &pReq, cPages, GMMACCOUNT_BASE);
466 AssertRCReturn(rc, rc);
467
468 for (uint32_t iPage = 0; iPage < cPages; iPage++)
469 {
470 pReq->aPages[iPage].HCPhysGCPhys = GCPhys + (iPage << PAGE_SHIFT);
471 pReq->aPages[iPage].idPage = NIL_GMM_PAGEID;
472 pReq->aPages[iPage].idSharedPage = NIL_GMM_PAGEID;
473 }
474
475 pgmLock(pVM);
476 rc = GMMR3AllocatePagesPerform(pVM, pReq);
477 pgmUnlock(pVM);
478 if (RT_FAILURE(rc))
479 {
480 GMMR3AllocatePagesCleanup(pReq);
481 return rc;
482 }
483
484 /*
485 * Allocate the new ROM range and RAM range (if necessary).
486 */
487 PPGMROMRANGE pRomNew;
488 rc = MMHyperAlloc(pVM, RT_OFFSETOF(PGMROMRANGE, aPages[cPages]), 0, MM_TAG_PGM_PHYS, (void **)pRomNew);
489 if (RT_SUCCESS(rc))
490 {
491 PPGMRAMRANGE pRamNew = NULL;
492 if (!fRamExists)
493 rc = MMHyperAlloc(pVM, RT_OFFSETOF(PGMRAMRANGE, aPages[cPages]), sizeof(PGMPAGE), MM_TAG_PGM_PHYS, (void **)pRamNew);
494 if (RT_SUCCESS(rc))
495 {
496 pgmLock(pVM);
497
498 /*
499 * Initialize and insert the RAM range (if required).
500 */
501 PPGMROMPAGE pRomPage = &pRomNew->aPages[0];
502 if (!fRamExists)
503 {
504 pRamNew->GCPhys = GCPhys;
505 pRamNew->GCPhysLast = GCPhysLast;
506 pRamNew->pszDesc = pszDesc;
507 pRamNew->cb = cb;
508 pRamNew->fFlags = 0;
509 pRamNew->pvHC = NULL;
510
511 PPGMPAGE pPage = &pRamNew->aPages[0];
512 for (uint32_t iPage = 0; iPage < cPages; iPage++, pPage++, pRomPage++)
513 {
514 pPage->fWrittenTo = 0;
515 pPage->fSomethingElse = 0;
516 pPage->u29B = 0;
517 PGM_PAGE_SET_TYPE(pPage, PGMPAGETYPE_ROM);
518 PGM_PAGE_SET_HCPHYS(pPage, pReq->aPages[iPage].HCPhysGCPhys);
519 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ALLOCATED);
520 PGM_PAGE_SET_PAGEID(pPage, pReq->aPages[iPage].idPage);
521
522 pRomPage->Virgin = *pPage;
523 }
524
525 pgmR3PhysLinkRamRange(pVM, pRamNew, pRamPrev);
526 }
527 else
528 {
529 PPGMPAGE pPage = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
530 for (uint32_t iPage = 0; iPage < cPages; iPage++, pPage++, pRomPage++)
531 {
532 PGM_PAGE_SET_TYPE(pPage, PGMPAGETYPE_ROM);
533 PGM_PAGE_SET_HCPHYS(pPage, pReq->aPages[iPage].HCPhysGCPhys);
534 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ALLOCATED);
535 PGM_PAGE_SET_PAGEID(pPage, pReq->aPages[iPage].idPage);
536
537 pRomPage->Virgin = *pPage;
538 }
539
540 pRamNew = pRam;
541 }
542 pgmUnlock(pVM);
543
544
545 /*
546 * Register the write access handler for the range (PGMROMPROT_READ_ROM_WRITE_IGNORE).
547 */
548 rc = PGMR3HandlerPhysicalRegister(pVM, PGMPHYSHANDLERTYPE_PHYSICAL_WRITE, GCPhys, GCPhysLast,
549#if 0 /** @todo we actually need a ring-3 write handler here for shadowed ROMs, so hack REM! */
550 pgmR3PhysRomWriteHandler, pRomNew,
551#else
552 NULL, NULL,
553#endif
554 NULL, "pgmPhysRomWriteHandler", MMHyperCCToR0(pVM, pRomNew),
555 NULL, "pgmPhysRomWriteHandler", MMHyperCCToGC(pVM, pRomNew), pszDesc);
556 if (RT_SUCCESS(rc))
557 {
558 pgmLock(pVM);
559
560 /*
561 * Copy the image over to the virgin pages.
562 * This must be done after linking in the RAM range.
563 */
564 PPGMPAGE pRamPage = &pRamNew->aPages[(GCPhys - pRamNew->GCPhys) >> PAGE_SHIFT];
565 for (uint32_t iPage = 0; iPage < cPages; iPage++, pRamPage++)
566 {
567 void *pvDstPage;
568 PPGMPAGEMAP pMapIgnored;
569 rc = pgmPhysPageMap(pVM, pRamPage, GCPhys + (iPage << PAGE_SHIFT), &pMapIgnored, &pvDstPage);
570 if (RT_FAILURE(rc))
571 {
572 VMSetError(pVM, rc, RT_SRC_POS, "Failed to map virgin ROM page at %RGp", GCPhys);
573 break;
574 }
575 memcpy(pvDstPage, (const uint8_t *)pvBinary + (iPage << PAGE_SHIFT), PAGE_SIZE);
576 }
577 if (RT_SUCCESS(rc))
578 {
579 /*
580 * Initialize the ROM range.
581 * Note that the Virgin member of the pages has already been initialized above.
582 */
583 pRomNew->GCPhys = GCPhys;
584 pRomNew->GCPhysLast = GCPhysLast;
585 pRomNew->cb = cb;
586 pRomNew->fFlags = fFlags;
587 pRomNew->pvOriginal = fFlags & PGMPHYS_ROM_FLAG_PERMANENT_BINARY ? pvBinary : NULL;
588 pRomNew->pszDesc = pszDesc;
589
590 for (unsigned iPage = 0; iPage < cPages; iPage++)
591 {
592 PPGMROMPAGE pPage = &pRomNew->aPages[iPage];
593
594 pPage->Shadow.HCPhys = 0;
595 pPage->Shadow.fWrittenTo = 0;
596 pPage->Shadow.fSomethingElse = 0;
597 pPage->Shadow.u29B = 0;
598 PGM_PAGE_SET_TYPE( &pPage->Shadow, PGMPAGETYPE_ROM_SHADOW);
599 PGM_PAGE_SET_STATE( &pPage->Shadow, PGM_PAGE_STATE_ZERO);
600 PGM_PAGE_SET_PAGEID(&pPage->Shadow, pReq->aPages[iPage].idPage);
601
602 pRomNew->aPages[iPage].enmProt = PGMROMPROT_READ_ROM_WRITE_IGNORE;
603 }
604
605 /*
606 * Insert the ROM range, tell REM and return successfully.
607 */
608 pRomNew->pNextR3 = pRom;
609 pRomNew->pNextR0 = pRom ? MMHyperCCToR0(pVM, pRom) : NIL_RTR0PTR;
610 pRomNew->pNextGC = pRom ? MMHyperCCToGC(pVM, pRom) : NIL_RTGCPTR;
611
612 if (pRomPrev)
613 {
614 pRomPrev->pNextR3 = pRomNew;
615 pRomPrev->pNextR0 = MMHyperCCToR0(pVM, pRomNew);
616 pRomPrev->pNextGC = MMHyperCCToGC(pVM, pRomNew);
617 }
618 else
619 {
620 pVM->pgm.s.pRomRangesR3 = pRomNew;
621 pVM->pgm.s.pRomRangesR0 = MMHyperCCToR0(pVM, pRomNew);
622 pVM->pgm.s.pRomRangesGC = MMHyperCCToGC(pVM, pRomNew);
623 }
624
625 REMR3NotifyPhysRomRegister(pVM, GCPhys, cb, NULL, false); /** @todo fix shadowing and REM. */
626
627 GMMR3AllocatePagesCleanup(pReq);
628 pgmUnlock(pVM);
629 return VINF_SUCCESS;
630 }
631
632 /* bail out */
633
634 pgmUnlock(pVM);
635 int rc2 = PGMHandlerPhysicalDeregister(pVM, GCPhys);
636 AssertRC(rc2);
637 pgmLock(pVM);
638 }
639
640 pgmR3PhysUnlinkRamRange(pVM, pRamNew, pRamPrev);
641 if (pRamNew)
642 MMHyperFree(pVM, pRamNew);
643 }
644 MMHyperFree(pVM, pRomNew);
645 }
646
647 /** @todo Purge the mapping cache or something... */
648 GMMR3FreeAllocatedPages(pVM, pReq);
649 GMMR3AllocatePagesCleanup(pReq);
650 pgmUnlock(pVM);
651 return rc;
652}
653
654
655/**
656 * \#PF Handler callback for ROM write accesses.
657 *
658 * @returns VINF_SUCCESS if the handler have carried out the operation.
659 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
660 * @param pVM VM Handle.
661 * @param GCPhys The physical address the guest is writing to.
662 * @param pvPhys The HC mapping of that address.
663 * @param pvBuf What the guest is reading/writing.
664 * @param cbBuf How much it's reading/writing.
665 * @param enmAccessType The access type.
666 * @param pvUser User argument.
667 */
668/*static - shut up warning */
669 DECLCALLBACK(int) pgmR3PhysRomWriteHandler(PVM pVM, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser)
670{
671 PPGMROMRANGE pRom = (PPGMROMRANGE)pvUser;
672 const uint32_t iPage = GCPhys - pRom->GCPhys;
673 Assert(iPage < (pRom->cb >> PAGE_SHIFT));
674 PPGMROMPAGE pRomPage = &pRom->aPages[iPage];
675 switch (pRomPage->enmProt)
676 {
677 /*
678 * Ignore.
679 */
680 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
681 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
682 return VINF_SUCCESS;
683
684 /*
685 * Write to the ram page.
686 */
687 case PGMROMPROT_READ_ROM_WRITE_RAM:
688 case PGMROMPROT_READ_RAM_WRITE_RAM: /* yes this will get here too, it's *way* simpler that way. */
689 {
690 /* This should be impossible now, pvPhys doesn't work cross page anylonger. */
691 Assert(((GCPhys - pRom->GCPhys + cbBuf - 1) >> PAGE_SHIFT) == iPage);
692
693 /*
694 * Take the lock, do lazy allocation, map the page and copy the data.
695 *
696 * Note that we have to bypass the mapping TLB since it works on
697 * guest physical addresses and entering the shadow page would
698 * kind of screw things up...
699 */
700 int rc = pgmLock(pVM);
701 AssertRC(rc);
702
703 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(&pRomPage->Shadow) != PGM_PAGE_STATE_ALLOCATED))
704 {
705 rc = pgmPhysPageMakeWritable(pVM, &pRomPage->Shadow, GCPhys);
706 if (RT_FAILURE(rc))
707 {
708 pgmUnlock(pVM);
709 return rc;
710 }
711 }
712
713 void *pvDstPage;
714 PPGMPAGEMAP pMapIgnored;
715 rc = pgmPhysPageMap(pVM, &pRomPage->Shadow, GCPhys & X86_PTE_PG_MASK, &pMapIgnored, &pvDstPage);
716 if (RT_SUCCESS(rc))
717 memcpy((uint8_t *)pvDstPage + (GCPhys & PAGE_OFFSET_MASK), pvBuf, cbBuf);
718
719 pgmUnlock(pVM);
720 return rc;
721 }
722
723 default:
724 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhys=%RGp\n",
725 pRom->aPages[iPage].enmProt, iPage, GCPhys),
726 VERR_INTERNAL_ERROR);
727 }
728}
729
730
731
732/**
733 * Called by PGMR3Reset to reset the shadow, switch to the virgin,
734 * and verify that the virgin part is untouched.
735 *
736 * This is done after the normal memory has been cleared.
737 *
738 * @param pVM The VM handle.
739 */
740int pgmR3PhysRomReset(PVM pVM)
741{
742 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
743 {
744 const uint32_t cPages = pRom->cb >> PAGE_SHIFT;
745
746 if (pRom->fFlags & PGMPHYS_ROM_FLAG_SHADOWED)
747 {
748 /*
749 * Reset the physical handler.
750 */
751 int rc = PGMR3PhysRomProtect(pVM, pRom->GCPhys, pRom->cb, PGMROMPROT_READ_ROM_WRITE_IGNORE);
752 AssertRCReturn(rc, rc);
753
754 /*
755 * What we do with the shadow pages depends on the memory
756 * preallocation option. If not enabled, we'll just throw
757 * out all the dirty pages and replace them by the zero page.
758 */
759 if (1)///@todo !pVM->pgm.f.fRamPreAlloc)
760 {
761 /* Count dirty shadow pages. */
762 uint32_t cDirty = 0;
763 uint32_t iPage = cPages;
764 while (iPage-- > 0)
765 if (PGM_PAGE_GET_STATE(&pRom->aPages[iPage].Shadow) != PGM_PAGE_STATE_ZERO)
766 cDirty++;
767 if (cDirty)
768 {
769 /* Free the dirty pages. */
770 PGMMFREEPAGESREQ pReq;
771 rc = GMMR3FreePagesPrepare(pVM, &pReq, cDirty, GMMACCOUNT_BASE);
772 AssertRCReturn(rc, rc);
773
774 uint32_t iReqPage = 0;
775 for (iPage = 0; iPage < cPages; iPage++)
776 if (PGM_PAGE_GET_STATE(&pRom->aPages[iPage].Shadow) != PGM_PAGE_STATE_ZERO)
777 {
778 pReq->aPages[iReqPage].idPage = PGM_PAGE_GET_PAGEID(&pRom->aPages[iPage].Shadow);
779 iReqPage++;
780 }
781
782 rc = GMMR3FreePagesPerform(pVM, pReq);
783 GMMR3FreePagesCleanup(pReq);
784 AssertRCReturn(rc, rc);
785
786 /* setup the zero page. */
787 for (iPage = 0; iPage < cPages; iPage++)
788 if (PGM_PAGE_GET_STATE(&pRom->aPages[iPage].Shadow) != PGM_PAGE_STATE_ZERO)
789 {
790 PGM_PAGE_SET_STATE( &pRom->aPages[iPage].Shadow, PGM_PAGE_STATE_ZERO);
791 PGM_PAGE_SET_HCPHYS(&pRom->aPages[iPage].Shadow, pVM->pgm.s.HCPhysZeroPg);
792 PGM_PAGE_SET_PAGEID(&pRom->aPages[iPage].Shadow, NIL_GMM_PAGEID);
793 pRom->aPages[iPage].Shadow.fWrittenTo = false;
794 iReqPage++;
795 }
796 }
797 }
798 else
799 {
800 /* clear all the pages. */
801 pgmLock(pVM);
802 for (uint32_t iPage = 0; iPage < cPages; iPage++)
803 {
804 const RTGCPHYS GCPhys = pRom->GCPhys + (iPage << PAGE_SHIFT);
805 rc = pgmPhysPageMakeWritable(pVM, &pRom->aPages[iPage].Shadow, GCPhys);
806 if (RT_FAILURE(rc))
807 break;
808
809 void *pvDstPage;
810 PPGMPAGEMAP pMapIgnored;
811 rc = pgmPhysPageMap(pVM, &pRom->aPages[iPage].Shadow, GCPhys, &pMapIgnored, &pvDstPage);
812 if (RT_FAILURE(rc))
813 break;
814 memset(pvDstPage, 0, PAGE_SIZE);
815 }
816 pgmUnlock(pVM);
817 AssertRCReturn(rc, rc);
818 }
819 }
820
821#ifdef VBOX_STRICT
822 /*
823 * Verify that the virgin page is unchanged if possible.
824 */
825 if (pRom->pvOriginal)
826 {
827 uint8_t const *pbSrcPage = (uint8_t const *)pRom->pvOriginal;
828 for (uint32_t iPage = 0; iPage < cPages; iPage++, pbSrcPage += PAGE_SIZE)
829 {
830 const RTGCPHYS GCPhys = pRom->GCPhys + (iPage << PAGE_SHIFT);
831 PPGMPAGEMAP pMapIgnored;
832 void *pvDstPage;
833 int rc = pgmPhysPageMap(pVM, &pRom->aPages[iPage].Virgin, GCPhys, &pMapIgnored, &pvDstPage);
834 if (RT_FAILURE(rc))
835 break;
836 if (memcmp(pvDstPage, pbSrcPage, PAGE_SIZE))
837 LogRel(("pgmR3PhysRomReset: %RGp rom page changed (%s) - loaded saved state?\n",
838 GCPhys, pRom->pszDesc));
839 }
840 }
841#endif
842 }
843
844 return VINF_SUCCESS;
845}
846
847
848/**
849 * Change the shadowing of a range of ROM pages.
850 *
851 * This is intended for implementing chipset specific memory registers
852 * and will not be very strict about the input. It will silently ignore
853 * any pages that are not the part of a shadowed ROM.
854 *
855 * @returns VBox status code.
856 * @param pVM Pointer to the shared VM structure.
857 * @param GCPhys Where to start. Page aligned.
858 * @param cb How much to change. Page aligned.
859 * @param enmProt The new ROM protection.
860 */
861PGMR3DECL(int) PGMR3PhysRomProtect(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, PGMROMPROT enmProt)
862{
863 /*
864 * Check input
865 */
866 if (!cb)
867 return VINF_SUCCESS;
868 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
869 AssertReturn(!(cb & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
870 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
871 AssertReturn(GCPhysLast > GCPhys, VERR_INVALID_PARAMETER);
872 AssertReturn(enmProt >= PGMROMPROT_INVALID && enmProt <= PGMROMPROT_END, VERR_INVALID_PARAMETER);
873
874 /*
875 * Process the request.
876 */
877 bool fFlushedPool = false;
878 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
879 if ( GCPhys <= pRom->GCPhysLast
880 && GCPhysLast >= pRom->GCPhys)
881 {
882 /*
883 * Iterate the relevant pages and the ncessary make changes.
884 */
885 bool fChanges = false;
886 uint32_t const cPages = pRom->GCPhysLast > GCPhysLast
887 ? pRom->cb >> PAGE_SHIFT
888 : (GCPhysLast - pRom->GCPhys) >> PAGE_SHIFT;
889 for (uint32_t iPage = (GCPhys - pRom->GCPhys) >> PAGE_SHIFT;
890 iPage < cPages;
891 iPage++)
892 {
893 PPGMROMPAGE pRomPage = &pRom->aPages[iPage];
894 if (PGMROMPROT_IS_ROM(pRomPage->enmProt) != PGMROMPROT_IS_ROM(enmProt))
895 {
896 fChanges = true;
897
898 /* flush the page pool first so we don't leave any usage references dangling. */
899 if (!fFlushedPool)
900 {
901 pgmPoolFlushAll(pVM);
902 fFlushedPool = true;
903 }
904
905 PPGMPAGE pOld = PGMROMPROT_IS_ROM(pRomPage->enmProt) ? &pRomPage->Virgin : &pRomPage->Shadow;
906 PPGMPAGE pNew = PGMROMPROT_IS_ROM(pRomPage->enmProt) ? &pRomPage->Shadow : &pRomPage->Virgin;
907 PPGMPAGE pRamPage = pgmPhysGetPage(&pVM->pgm.s, pRom->GCPhys + (iPage << PAGE_SHIFT));
908
909 *pOld = *pRamPage;
910 *pRamPage = *pNew;
911 /** @todo sync the volatile flags (handlers) when these have been moved out of HCPhys. */
912 }
913 }
914
915 /*
916 * Reset the access handler if we made changes, no need
917 * to optimize this.
918 */
919 if (fChanges)
920 {
921 int rc = PGMHandlerPhysicalReset(pVM, pRom->GCPhys);
922 AssertRCReturn(rc, rc);
923 }
924
925 /* Advance - cb isn't updated. */
926 GCPhys = pRom->GCPhys + (cPages << PAGE_SHIFT);
927 }
928
929 return VINF_SUCCESS;
930}
931
932
933/**
934 * Interface that the MMR3RamRegister(), MMR3RomRegister() and MMIO handler
935 * registration APIs calls to inform PGM about memory registrations.
936 *
937 * It registers the physical memory range with PGM. MM is responsible
938 * for the toplevel things - allocation and locking - while PGM is taking
939 * care of all the details and implements the physical address space virtualization.
940 *
941 * @returns VBox status.
942 * @param pVM The VM handle.
943 * @param pvRam HC virtual address of the RAM range. (page aligned)
944 * @param GCPhys GC physical address of the RAM range. (page aligned)
945 * @param cb Size of the RAM range. (page aligned)
946 * @param fFlags Flags, MM_RAM_*.
947 * @param paPages Pointer an array of physical page descriptors.
948 * @param pszDesc Description string.
949 */
950PGMR3DECL(int) PGMR3PhysRegister(PVM pVM, void *pvRam, RTGCPHYS GCPhys, size_t cb, unsigned fFlags, const SUPPAGE *paPages, const char *pszDesc)
951{
952 /*
953 * Validate input.
954 * (Not so important because callers are only MMR3PhysRegister()
955 * and PGMR3HandlerPhysicalRegisterEx(), but anyway...)
956 */
957 Log(("PGMR3PhysRegister %08X %x bytes flags %x %s\n", GCPhys, cb, fFlags, pszDesc));
958
959 Assert((fFlags & (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_DYNAMIC_ALLOC)) || paPages);
960 /*Assert(!(fFlags & MM_RAM_FLAGS_RESERVED) || !paPages);*/
961 Assert((fFlags == (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO)) || (fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC) || pvRam);
962 /*Assert(!(fFlags & MM_RAM_FLAGS_RESERVED) || !pvRam);*/
963 Assert(!(fFlags & ~0xfff));
964 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb && cb);
965 Assert(RT_ALIGN_P(pvRam, PAGE_SIZE) == pvRam);
966 Assert(!(fFlags & ~(MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2 | MM_RAM_FLAGS_DYNAMIC_ALLOC)));
967 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
968 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
969 if (GCPhysLast < GCPhys)
970 {
971 AssertMsgFailed(("The range wraps! GCPhys=%VGp cb=%#x\n", GCPhys, cb));
972 return VERR_INVALID_PARAMETER;
973 }
974
975 /*
976 * Find range location and check for conflicts.
977 */
978 PPGMRAMRANGE pPrev = NULL;
979 PPGMRAMRANGE pCur = pVM->pgm.s.pRamRangesR3;
980 while (pCur)
981 {
982 if (GCPhys <= pCur->GCPhysLast && GCPhysLast >= pCur->GCPhys)
983 {
984 AssertMsgFailed(("Conflict! This cannot happen!\n"));
985 return VERR_PGM_RAM_CONFLICT;
986 }
987 if (GCPhysLast < pCur->GCPhys)
988 break;
989
990 /* next */
991 pPrev = pCur;
992 pCur = pCur->pNextR3;
993 }
994
995 /*
996 * Allocate RAM range.
997 * Small ranges are allocated from the heap, big ones have separate mappings.
998 */
999 size_t cbRam = RT_OFFSETOF(PGMRAMRANGE, aPages[cb >> PAGE_SHIFT]);
1000 PPGMRAMRANGE pNew;
1001 RTGCPTR GCPtrNew;
1002 int rc = VERR_NO_MEMORY;
1003 if (cbRam > PAGE_SIZE / 2)
1004 { /* large */
1005 cbRam = RT_ALIGN_Z(cbRam, PAGE_SIZE);
1006 rc = SUPPageAlloc(cbRam >> PAGE_SHIFT, (void **)&pNew);
1007 if (VBOX_SUCCESS(rc))
1008 {
1009 rc = MMR3HyperMapHCRam(pVM, pNew, cbRam, true, pszDesc, &GCPtrNew);
1010 if (VBOX_SUCCESS(rc))
1011 {
1012 Assert(MMHyperHC2GC(pVM, pNew) == GCPtrNew);
1013 rc = MMR3HyperReserve(pVM, PAGE_SIZE, "fence", NULL);
1014 }
1015 else
1016 {
1017 AssertMsgFailed(("MMR3HyperMapHCRam(,,%#x,,,) -> %Vrc\n", cbRam, rc));
1018 SUPPageFree(pNew, cbRam >> PAGE_SHIFT);
1019 }
1020 }
1021 else
1022 AssertMsgFailed(("SUPPageAlloc(%#x,,) -> %Vrc\n", cbRam >> PAGE_SHIFT, rc));
1023
1024 }
1025/** @todo Make VGA and VMMDev register their memory at init time before the hma size is fixated. */
1026 if (RT_FAILURE(rc))
1027 { /* small + fallback (vga) */
1028 rc = MMHyperAlloc(pVM, cbRam, 16, MM_TAG_PGM, (void **)&pNew);
1029 if (VBOX_SUCCESS(rc))
1030 GCPtrNew = MMHyperHC2GC(pVM, pNew);
1031 else
1032 AssertMsgFailed(("MMHyperAlloc(,%#x,,,) -> %Vrc\n", cbRam, cb));
1033 }
1034 if (VBOX_SUCCESS(rc))
1035 {
1036 /*
1037 * Initialize the range.
1038 */
1039 pNew->pvHC = pvRam;
1040 pNew->GCPhys = GCPhys;
1041 pNew->GCPhysLast = GCPhysLast;
1042 pNew->cb = cb;
1043 pNew->fFlags = fFlags;
1044 pNew->pavHCChunkHC = NULL;
1045 pNew->pavHCChunkGC = 0;
1046
1047 unsigned iPage = cb >> PAGE_SHIFT;
1048 if (paPages)
1049 {
1050 while (iPage-- > 0)
1051 {
1052 pNew->aPages[iPage].HCPhys = (paPages[iPage].Phys & X86_PTE_PAE_PG_MASK) | fFlags; /** @todo PAGE FLAGS */
1053 pNew->aPages[iPage].fWrittenTo = 0;
1054 pNew->aPages[iPage].fSomethingElse = 0;
1055 pNew->aPages[iPage].u29B = 0;
1056 PGM_PAGE_SET_PAGEID(&pNew->aPages[iPage], NIL_GMM_PAGEID);
1057 PGM_PAGE_SET_TYPE(&pNew->aPages[iPage], fFlags & MM_RAM_FLAGS_MMIO2 ? PGMPAGETYPE_MMIO2 : PGMPAGETYPE_RAM);
1058 PGM_PAGE_SET_STATE(&pNew->aPages[iPage], PGM_PAGE_STATE_ALLOCATED);
1059 }
1060 }
1061 else if (fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)
1062 {
1063 /* Allocate memory for chunk to HC ptr lookup array. */
1064 rc = MMHyperAlloc(pVM, (cb >> PGM_DYNAMIC_CHUNK_SHIFT) * sizeof(void *), 16, MM_TAG_PGM, (void **)&pNew->pavHCChunkHC);
1065 AssertMsgReturn(rc == VINF_SUCCESS, ("MMHyperAlloc(,%#x,,,) -> %Vrc\n", cbRam, cb), rc);
1066
1067 pNew->pavHCChunkGC = MMHyperHC2GC(pVM, pNew->pavHCChunkHC);
1068 Assert(pNew->pavHCChunkGC);
1069
1070 /* Physical memory will be allocated on demand. */
1071 while (iPage-- > 0)
1072 {
1073 pNew->aPages[iPage].HCPhys = fFlags; /** @todo PAGE FLAGS */
1074 pNew->aPages[iPage].fWrittenTo = 0;
1075 pNew->aPages[iPage].fSomethingElse = 0;
1076 pNew->aPages[iPage].u29B = 0;
1077 PGM_PAGE_SET_PAGEID(&pNew->aPages[iPage], NIL_GMM_PAGEID);
1078 PGM_PAGE_SET_TYPE(&pNew->aPages[iPage], PGMPAGETYPE_RAM);
1079 PGM_PAGE_SET_STATE(&pNew->aPages[iPage], PGM_PAGE_STATE_ZERO);
1080 }
1081 }
1082 else
1083 {
1084 Assert(fFlags == (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO));
1085 RTHCPHYS HCPhysDummyPage = (MMR3PageDummyHCPhys(pVM) & X86_PTE_PAE_PG_MASK) | fFlags; /** @todo PAGE FLAGS */
1086 while (iPage-- > 0)
1087 {
1088 pNew->aPages[iPage].HCPhys = HCPhysDummyPage; /** @todo PAGE FLAGS */
1089 pNew->aPages[iPage].fWrittenTo = 0;
1090 pNew->aPages[iPage].fSomethingElse = 0;
1091 pNew->aPages[iPage].u29B = 0;
1092 PGM_PAGE_SET_PAGEID(&pNew->aPages[iPage], NIL_GMM_PAGEID);
1093 PGM_PAGE_SET_TYPE(&pNew->aPages[iPage], PGMPAGETYPE_MMIO);
1094 PGM_PAGE_SET_STATE(&pNew->aPages[iPage], PGM_PAGE_STATE_ZERO);
1095 }
1096 }
1097
1098 /*
1099 * Insert the new RAM range.
1100 */
1101 pgmLock(pVM);
1102 pNew->pNextR3 = pCur;
1103 pNew->pNextR0 = pCur ? MMHyperCCToR0(pVM, pCur) : NIL_RTR0PTR;
1104 pNew->pNextGC = pCur ? MMHyperCCToGC(pVM, pCur) : NIL_RTGCPTR;
1105 if (pPrev)
1106 {
1107 pPrev->pNextR3 = pNew;
1108 pPrev->pNextR0 = MMHyperCCToR0(pVM, pNew);
1109 pPrev->pNextGC = GCPtrNew;
1110 }
1111 else
1112 {
1113 pVM->pgm.s.pRamRangesR3 = pNew;
1114 pVM->pgm.s.pRamRangesR0 = MMHyperCCToR0(pVM, pNew);
1115 pVM->pgm.s.pRamRangesGC = GCPtrNew;
1116 }
1117 pgmUnlock(pVM);
1118 }
1119 return rc;
1120}
1121
1122#ifndef VBOX_WITH_NEW_PHYS_CODE
1123
1124/**
1125 * Register a chunk of a the physical memory range with PGM. MM is responsible
1126 * for the toplevel things - allocation and locking - while PGM is taking
1127 * care of all the details and implements the physical address space virtualization.
1128 *
1129 *
1130 * @returns VBox status.
1131 * @param pVM The VM handle.
1132 * @param pvRam HC virtual address of the RAM range. (page aligned)
1133 * @param GCPhys GC physical address of the RAM range. (page aligned)
1134 * @param cb Size of the RAM range. (page aligned)
1135 * @param fFlags Flags, MM_RAM_*.
1136 * @param paPages Pointer an array of physical page descriptors.
1137 * @param pszDesc Description string.
1138 */
1139PGMR3DECL(int) PGMR3PhysRegisterChunk(PVM pVM, void *pvRam, RTGCPHYS GCPhys, size_t cb, unsigned fFlags, const SUPPAGE *paPages, const char *pszDesc)
1140{
1141 NOREF(pszDesc);
1142
1143 /*
1144 * Validate input.
1145 * (Not so important because callers are only MMR3PhysRegister()
1146 * and PGMR3HandlerPhysicalRegisterEx(), but anyway...)
1147 */
1148 Log(("PGMR3PhysRegisterChunk %08X %x bytes flags %x %s\n", GCPhys, cb, fFlags, pszDesc));
1149
1150 Assert(paPages);
1151 Assert(pvRam);
1152 Assert(!(fFlags & ~0xfff));
1153 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb && cb);
1154 Assert(RT_ALIGN_P(pvRam, PAGE_SIZE) == pvRam);
1155 Assert(!(fFlags & ~(MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2 | MM_RAM_FLAGS_DYNAMIC_ALLOC)));
1156 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
1157 Assert(VM_IS_EMT(pVM));
1158 Assert(!(GCPhys & PGM_DYNAMIC_CHUNK_OFFSET_MASK));
1159 Assert(cb == PGM_DYNAMIC_CHUNK_SIZE);
1160
1161 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
1162 if (GCPhysLast < GCPhys)
1163 {
1164 AssertMsgFailed(("The range wraps! GCPhys=%VGp cb=%#x\n", GCPhys, cb));
1165 return VERR_INVALID_PARAMETER;
1166 }
1167
1168 /*
1169 * Find existing range location.
1170 */
1171 PPGMRAMRANGE pRam = CTXALLSUFF(pVM->pgm.s.pRamRanges);
1172 while (pRam)
1173 {
1174 RTGCPHYS off = GCPhys - pRam->GCPhys;
1175 if ( off < pRam->cb
1176 && (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC))
1177 break;
1178
1179 pRam = CTXALLSUFF(pRam->pNext);
1180 }
1181 AssertReturn(pRam, VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS);
1182
1183 unsigned off = (GCPhys - pRam->GCPhys) >> PAGE_SHIFT;
1184 unsigned iPage = cb >> PAGE_SHIFT;
1185 if (paPages)
1186 {
1187 while (iPage-- > 0)
1188 pRam->aPages[off + iPage].HCPhys = (paPages[iPage].Phys & X86_PTE_PAE_PG_MASK) | fFlags; /** @todo PAGE FLAGS */
1189 }
1190 off >>= (PGM_DYNAMIC_CHUNK_SHIFT - PAGE_SHIFT);
1191 pRam->pavHCChunkHC[off] = pvRam;
1192
1193 /* Notify the recompiler. */
1194 REMR3NotifyPhysRamChunkRegister(pVM, GCPhys, PGM_DYNAMIC_CHUNK_SIZE, (RTHCUINTPTR)pvRam, fFlags);
1195
1196 return VINF_SUCCESS;
1197}
1198
1199
1200/**
1201 * Allocate missing physical pages for an existing guest RAM range.
1202 *
1203 * @returns VBox status.
1204 * @param pVM The VM handle.
1205 * @param GCPhys GC physical address of the RAM range. (page aligned)
1206 */
1207PGMR3DECL(int) PGM3PhysGrowRange(PVM pVM, RTGCPHYS GCPhys)
1208{
1209 /*
1210 * Walk range list.
1211 */
1212 pgmLock(pVM);
1213
1214 PPGMRAMRANGE pRam = CTXALLSUFF(pVM->pgm.s.pRamRanges);
1215 while (pRam)
1216 {
1217 RTGCPHYS off = GCPhys - pRam->GCPhys;
1218 if ( off < pRam->cb
1219 && (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC))
1220 {
1221 bool fRangeExists = false;
1222 unsigned off = (GCPhys - pRam->GCPhys) >> PGM_DYNAMIC_CHUNK_SHIFT;
1223
1224 /** @note A request made from another thread may end up in EMT after somebody else has already allocated the range. */
1225 if (pRam->pavHCChunkHC[off])
1226 fRangeExists = true;
1227
1228 pgmUnlock(pVM);
1229 if (fRangeExists)
1230 return VINF_SUCCESS;
1231 return pgmr3PhysGrowRange(pVM, GCPhys);
1232 }
1233
1234 pRam = CTXALLSUFF(pRam->pNext);
1235 }
1236 pgmUnlock(pVM);
1237 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
1238}
1239
1240
1241/**
1242 * Allocate missing physical pages for an existing guest RAM range.
1243 *
1244 * @returns VBox status.
1245 * @param pVM The VM handle.
1246 * @param pRamRange RAM range
1247 * @param GCPhys GC physical address of the RAM range. (page aligned)
1248 */
1249int pgmr3PhysGrowRange(PVM pVM, RTGCPHYS GCPhys)
1250{
1251 void *pvRam;
1252 int rc;
1253
1254 /* We must execute this function in the EMT thread, otherwise we'll run into problems. */
1255 if (!VM_IS_EMT(pVM))
1256 {
1257 PVMREQ pReq;
1258
1259 AssertMsg(!PDMCritSectIsOwner(&pVM->pgm.s.CritSect), ("We own the PGM lock -> deadlock danger!!\n"));
1260
1261 rc = VMR3ReqCall(pVM, &pReq, RT_INDEFINITE_WAIT, (PFNRT)PGM3PhysGrowRange, 2, pVM, GCPhys);
1262 if (VBOX_SUCCESS(rc))
1263 {
1264 rc = pReq->iStatus;
1265 VMR3ReqFree(pReq);
1266 }
1267 return rc;
1268 }
1269
1270 /* Round down to chunk boundary */
1271 GCPhys = GCPhys & PGM_DYNAMIC_CHUNK_BASE_MASK;
1272
1273 STAM_COUNTER_INC(&pVM->pgm.s.StatDynRamGrow);
1274 STAM_COUNTER_ADD(&pVM->pgm.s.StatDynRamTotal, PGM_DYNAMIC_CHUNK_SIZE/(1024*1024));
1275
1276 Log(("pgmr3PhysGrowRange: allocate chunk of size 0x%X at %VGp\n", PGM_DYNAMIC_CHUNK_SIZE, GCPhys));
1277
1278 unsigned cPages = PGM_DYNAMIC_CHUNK_SIZE >> PAGE_SHIFT;
1279
1280 for (;;)
1281 {
1282 rc = SUPPageAlloc(cPages, &pvRam);
1283 if (VBOX_SUCCESS(rc))
1284 {
1285
1286 rc = MMR3PhysRegisterEx(pVM, pvRam, GCPhys, PGM_DYNAMIC_CHUNK_SIZE, 0, MM_PHYS_TYPE_DYNALLOC_CHUNK, "Main Memory");
1287 if (VBOX_SUCCESS(rc))
1288 return rc;
1289
1290 SUPPageFree(pvRam, cPages);
1291 }
1292
1293 VMSTATE enmVMState = VMR3GetState(pVM);
1294 if (enmVMState != VMSTATE_RUNNING)
1295 {
1296 AssertMsgFailed(("Out of memory while trying to allocate a guest RAM chunk at %VGp!\n", GCPhys));
1297 LogRel(("PGM: Out of memory while trying to allocate a guest RAM chunk at %VGp (VMstate=%s)!\n", GCPhys, VMR3GetStateName(enmVMState)));
1298 return rc;
1299 }
1300
1301 LogRel(("pgmr3PhysGrowRange: out of memory. pause until the user resumes execution.\n"));
1302
1303 /* Pause first, then inform Main. */
1304 rc = VMR3SuspendNoSave(pVM);
1305 AssertRC(rc);
1306
1307 VMSetRuntimeError(pVM, false, "HostMemoryLow", "Unable to allocate and lock memory. The virtual machine will be paused. Please close applications to free up memory or close the VM.");
1308
1309 /* Wait for resume event; will only return in that case. If the VM is stopped, the EMT thread will be destroyed. */
1310 rc = VMR3WaitForResume(pVM);
1311
1312 /* Retry */
1313 LogRel(("pgmr3PhysGrowRange: VM execution resumed -> retry.\n"));
1314 }
1315}
1316
1317#endif /* !VBOX_WITH_NEW_PHYS_CODE */
1318
1319/**
1320 * Interface MMIO handler relocation calls.
1321 *
1322 * It relocates an existing physical memory range with PGM.
1323 *
1324 * @returns VBox status.
1325 * @param pVM The VM handle.
1326 * @param GCPhysOld Previous GC physical address of the RAM range. (page aligned)
1327 * @param GCPhysNew New GC physical address of the RAM range. (page aligned)
1328 * @param cb Size of the RAM range. (page aligned)
1329 */
1330PGMR3DECL(int) PGMR3PhysRelocate(PVM pVM, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, size_t cb)
1331{
1332 /*
1333 * Validate input.
1334 * (Not so important because callers are only MMR3PhysRelocate(),
1335 * but anyway...)
1336 */
1337 Log(("PGMR3PhysRelocate Old %VGp New %VGp (%#x bytes)\n", GCPhysOld, GCPhysNew, cb));
1338
1339 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb && cb);
1340 Assert(RT_ALIGN_T(GCPhysOld, PAGE_SIZE, RTGCPHYS) == GCPhysOld);
1341 Assert(RT_ALIGN_T(GCPhysNew, PAGE_SIZE, RTGCPHYS) == GCPhysNew);
1342 RTGCPHYS GCPhysLast;
1343 GCPhysLast = GCPhysOld + (cb - 1);
1344 if (GCPhysLast < GCPhysOld)
1345 {
1346 AssertMsgFailed(("The old range wraps! GCPhys=%VGp cb=%#x\n", GCPhysOld, cb));
1347 return VERR_INVALID_PARAMETER;
1348 }
1349 GCPhysLast = GCPhysNew + (cb - 1);
1350 if (GCPhysLast < GCPhysNew)
1351 {
1352 AssertMsgFailed(("The new range wraps! GCPhys=%VGp cb=%#x\n", GCPhysNew, cb));
1353 return VERR_INVALID_PARAMETER;
1354 }
1355
1356 /*
1357 * Find and remove old range location.
1358 */
1359 pgmLock(pVM);
1360 PPGMRAMRANGE pPrev = NULL;
1361 PPGMRAMRANGE pCur = pVM->pgm.s.pRamRangesR3;
1362 while (pCur)
1363 {
1364 if (pCur->GCPhys == GCPhysOld && pCur->cb == cb)
1365 break;
1366
1367 /* next */
1368 pPrev = pCur;
1369 pCur = pCur->pNextR3;
1370 }
1371 if (pPrev)
1372 {
1373 pPrev->pNextR3 = pCur->pNextR3;
1374 pPrev->pNextR0 = pCur->pNextR0;
1375 pPrev->pNextGC = pCur->pNextGC;
1376 }
1377 else
1378 {
1379 pVM->pgm.s.pRamRangesR3 = pCur->pNextR3;
1380 pVM->pgm.s.pRamRangesR0 = pCur->pNextR0;
1381 pVM->pgm.s.pRamRangesGC = pCur->pNextGC;
1382 }
1383
1384 /*
1385 * Update the range.
1386 */
1387 pCur->GCPhys = GCPhysNew;
1388 pCur->GCPhysLast= GCPhysLast;
1389 PPGMRAMRANGE pNew = pCur;
1390
1391 /*
1392 * Find range location and check for conflicts.
1393 */
1394 pPrev = NULL;
1395 pCur = pVM->pgm.s.pRamRangesR3;
1396 while (pCur)
1397 {
1398 if (GCPhysNew <= pCur->GCPhysLast && GCPhysLast >= pCur->GCPhys)
1399 {
1400 AssertMsgFailed(("Conflict! This cannot happen!\n"));
1401 pgmUnlock(pVM);
1402 return VERR_PGM_RAM_CONFLICT;
1403 }
1404 if (GCPhysLast < pCur->GCPhys)
1405 break;
1406
1407 /* next */
1408 pPrev = pCur;
1409 pCur = pCur->pNextR3;
1410 }
1411
1412 /*
1413 * Reinsert the RAM range.
1414 */
1415 pNew->pNextR3 = pCur;
1416 pNew->pNextR0 = pCur ? MMHyperCCToR0(pVM, pCur) : 0;
1417 pNew->pNextGC = pCur ? MMHyperCCToGC(pVM, pCur) : 0;
1418 if (pPrev)
1419 {
1420 pPrev->pNextR3 = pNew;
1421 pPrev->pNextR0 = MMHyperCCToR0(pVM, pNew);
1422 pPrev->pNextGC = MMHyperCCToGC(pVM, pNew);
1423 }
1424 else
1425 {
1426 pVM->pgm.s.pRamRangesR3 = pNew;
1427 pVM->pgm.s.pRamRangesR0 = MMHyperCCToR0(pVM, pNew);
1428 pVM->pgm.s.pRamRangesGC = MMHyperCCToGC(pVM, pNew);
1429 }
1430
1431 pgmUnlock(pVM);
1432 return VINF_SUCCESS;
1433}
1434
1435
1436/**
1437 * Interface MMR3RomRegister() and MMR3PhysReserve calls to update the
1438 * flags of existing RAM ranges.
1439 *
1440 * @returns VBox status.
1441 * @param pVM The VM handle.
1442 * @param GCPhys GC physical address of the RAM range. (page aligned)
1443 * @param cb Size of the RAM range. (page aligned)
1444 * @param fFlags The Or flags, MM_RAM_* \#defines.
1445 * @param fMask The and mask for the flags.
1446 */
1447PGMR3DECL(int) PGMR3PhysSetFlags(PVM pVM, RTGCPHYS GCPhys, size_t cb, unsigned fFlags, unsigned fMask)
1448{
1449 Log(("PGMR3PhysSetFlags %08X %x %x %x\n", GCPhys, cb, fFlags, fMask));
1450
1451 /*
1452 * Validate input.
1453 * (Not so important because caller is always MMR3RomRegister() and MMR3PhysReserve(), but anyway...)
1454 */
1455 Assert(!(fFlags & ~(MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2)));
1456 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb && cb);
1457 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
1458 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
1459 AssertReturn(GCPhysLast > GCPhys, VERR_INVALID_PARAMETER);
1460
1461 /*
1462 * Lookup the range.
1463 */
1464 PPGMRAMRANGE pRam = CTXALLSUFF(pVM->pgm.s.pRamRanges);
1465 while (pRam && GCPhys > pRam->GCPhysLast)
1466 pRam = CTXALLSUFF(pRam->pNext);
1467 if ( !pRam
1468 || GCPhys > pRam->GCPhysLast
1469 || GCPhysLast < pRam->GCPhys)
1470 {
1471 AssertMsgFailed(("No RAM range for %VGp-%VGp\n", GCPhys, GCPhysLast));
1472 return VERR_INVALID_PARAMETER;
1473 }
1474
1475 /*
1476 * Update the requested flags.
1477 */
1478 RTHCPHYS fFullMask = ~(RTHCPHYS)(MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2)
1479 | fMask;
1480 unsigned iPageEnd = (GCPhysLast - pRam->GCPhys + 1) >> PAGE_SHIFT;
1481 unsigned iPage = (GCPhys - pRam->GCPhys) >> PAGE_SHIFT;
1482 for ( ; iPage < iPageEnd; iPage++)
1483 pRam->aPages[iPage].HCPhys = (pRam->aPages[iPage].HCPhys & fFullMask) | fFlags; /** @todo PAGE FLAGS */
1484
1485 return VINF_SUCCESS;
1486}
1487
1488
1489/**
1490 * Sets the Address Gate 20 state.
1491 *
1492 * @param pVM VM handle.
1493 * @param fEnable True if the gate should be enabled.
1494 * False if the gate should be disabled.
1495 */
1496PGMDECL(void) PGMR3PhysSetA20(PVM pVM, bool fEnable)
1497{
1498 LogFlow(("PGMR3PhysSetA20 %d (was %d)\n", fEnable, pVM->pgm.s.fA20Enabled));
1499 if (pVM->pgm.s.fA20Enabled != (RTUINT)fEnable)
1500 {
1501 pVM->pgm.s.fA20Enabled = fEnable;
1502 pVM->pgm.s.GCPhysA20Mask = ~(RTGCPHYS)(!fEnable << 20);
1503 REMR3A20Set(pVM, fEnable);
1504 }
1505}
1506
1507
1508/**
1509 * Tree enumeration callback for dealing with age rollover.
1510 * It will perform a simple compression of the current age.
1511 */
1512static DECLCALLBACK(int) pgmR3PhysChunkAgeingRolloverCallback(PAVLU32NODECORE pNode, void *pvUser)
1513{
1514 /* Age compression - ASSUMES iNow == 4. */
1515 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)pNode;
1516 if (pChunk->iAge >= UINT32_C(0xffffff00))
1517 pChunk->iAge = 3;
1518 else if (pChunk->iAge >= UINT32_C(0xfffff000))
1519 pChunk->iAge = 2;
1520 else if (pChunk->iAge)
1521 pChunk->iAge = 1;
1522 else /* iAge = 0 */
1523 pChunk->iAge = 4;
1524
1525 /* reinsert */
1526 PVM pVM = (PVM)pvUser;
1527 RTAvllU32Remove(&pVM->pgm.s.ChunkR3Map.pAgeTree, pChunk->AgeCore.Key);
1528 pChunk->AgeCore.Key = pChunk->iAge;
1529 RTAvllU32Insert(&pVM->pgm.s.ChunkR3Map.pAgeTree, &pChunk->AgeCore);
1530 return 0;
1531}
1532
1533
1534/**
1535 * Tree enumeration callback that updates the chunks that have
1536 * been used since the last
1537 */
1538static DECLCALLBACK(int) pgmR3PhysChunkAgeingCallback(PAVLU32NODECORE pNode, void *pvUser)
1539{
1540 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)pNode;
1541 if (!pChunk->iAge)
1542 {
1543 PVM pVM = (PVM)pvUser;
1544 RTAvllU32Remove(&pVM->pgm.s.ChunkR3Map.pAgeTree, pChunk->AgeCore.Key);
1545 pChunk->AgeCore.Key = pChunk->iAge = pVM->pgm.s.ChunkR3Map.iNow;
1546 RTAvllU32Insert(&pVM->pgm.s.ChunkR3Map.pAgeTree, &pChunk->AgeCore);
1547 }
1548
1549 return 0;
1550}
1551
1552
1553/**
1554 * Performs ageing of the ring-3 chunk mappings.
1555 *
1556 * @param pVM The VM handle.
1557 */
1558PGMR3DECL(void) PGMR3PhysChunkAgeing(PVM pVM)
1559{
1560 pVM->pgm.s.ChunkR3Map.AgeingCountdown = RT_MIN(pVM->pgm.s.ChunkR3Map.cMax / 4, 1024);
1561 pVM->pgm.s.ChunkR3Map.iNow++;
1562 if (pVM->pgm.s.ChunkR3Map.iNow == 0)
1563 {
1564 pVM->pgm.s.ChunkR3Map.iNow = 4;
1565 RTAvlU32DoWithAll(&pVM->pgm.s.ChunkR3Map.pTree, true /*fFromLeft*/, pgmR3PhysChunkAgeingRolloverCallback, pVM);
1566 }
1567 else
1568 RTAvlU32DoWithAll(&pVM->pgm.s.ChunkR3Map.pTree, true /*fFromLeft*/, pgmR3PhysChunkAgeingCallback, pVM);
1569}
1570
1571
1572/**
1573 * The structure passed in the pvUser argument of pgmR3PhysChunkUnmapCandidateCallback().
1574 */
1575typedef struct PGMR3PHYSCHUNKUNMAPCB
1576{
1577 PVM pVM; /**< The VM handle. */
1578 PPGMCHUNKR3MAP pChunk; /**< The chunk to unmap. */
1579} PGMR3PHYSCHUNKUNMAPCB, *PPGMR3PHYSCHUNKUNMAPCB;
1580
1581
1582/**
1583 * Callback used to find the mapping that's been unused for
1584 * the longest time.
1585 */
1586static DECLCALLBACK(int) pgmR3PhysChunkUnmapCandidateCallback(PAVLLU32NODECORE pNode, void *pvUser)
1587{
1588 do
1589 {
1590 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)((uint8_t *)pNode - RT_OFFSETOF(PGMCHUNKR3MAP, AgeCore));
1591 if ( pChunk->iAge
1592 && !pChunk->cRefs)
1593 {
1594 /*
1595 * Check that it's not in any of the TLBs.
1596 */
1597 PVM pVM = ((PPGMR3PHYSCHUNKUNMAPCB)pvUser)->pVM;
1598 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.ChunkR3Map.Tlb.aEntries); i++)
1599 if (pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].pChunk == pChunk)
1600 {
1601 pChunk = NULL;
1602 break;
1603 }
1604 if (pChunk)
1605 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.PhysTlbHC.aEntries); i++)
1606 if (pVM->pgm.s.PhysTlbHC.aEntries[i].pMap == pChunk)
1607 {
1608 pChunk = NULL;
1609 break;
1610 }
1611 if (pChunk)
1612 {
1613 ((PPGMR3PHYSCHUNKUNMAPCB)pvUser)->pChunk = pChunk;
1614 return 1; /* done */
1615 }
1616 }
1617
1618 /* next with the same age - this version of the AVL API doesn't enumerate the list, so we have to do it. */
1619 pNode = pNode->pList;
1620 } while (pNode);
1621 return 0;
1622}
1623
1624
1625/**
1626 * Finds a good candidate for unmapping when the ring-3 mapping cache is full.
1627 *
1628 * The candidate will not be part of any TLBs, so no need to flush
1629 * anything afterwards.
1630 *
1631 * @returns Chunk id.
1632 * @param pVM The VM handle.
1633 */
1634static int32_t pgmR3PhysChunkFindUnmapCandidate(PVM pVM)
1635{
1636 /*
1637 * Do tree ageing first?
1638 */
1639 if (pVM->pgm.s.ChunkR3Map.AgeingCountdown-- == 0)
1640 PGMR3PhysChunkAgeing(pVM);
1641
1642 /*
1643 * Enumerate the age tree starting with the left most node.
1644 */
1645 PGMR3PHYSCHUNKUNMAPCB Args;
1646 Args.pVM = pVM;
1647 Args.pChunk = NULL;
1648 if (RTAvllU32DoWithAll(&pVM->pgm.s.ChunkR3Map.pAgeTree, true /*fFromLeft*/, pgmR3PhysChunkUnmapCandidateCallback, pVM))
1649 return Args.pChunk->Core.Key;
1650 return INT32_MAX;
1651}
1652
1653
1654/**
1655 * Maps the given chunk into the ring-3 mapping cache.
1656 *
1657 * This will call ring-0.
1658 *
1659 * @returns VBox status code.
1660 * @param pVM The VM handle.
1661 * @param idChunk The chunk in question.
1662 * @param ppChunk Where to store the chunk tracking structure.
1663 *
1664 * @remarks Called from within the PGM critical section.
1665 */
1666int pgmR3PhysChunkMap(PVM pVM, uint32_t idChunk, PPPGMCHUNKR3MAP ppChunk)
1667{
1668 int rc;
1669 /*
1670 * Allocate a new tracking structure first.
1671 */
1672#if 0 /* for later when we've got a separate mapping method for ring-0. */
1673 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)MMR3HeapAlloc(pVM, MM_TAG_PGM_CHUNK_MAPPING, sizeof(*pChunk));
1674 AssertReturn(pChunk, VERR_NO_MEMORY);
1675#else
1676 PPGMCHUNKR3MAP pChunk;
1677 rc = MMHyperAlloc(pVM, sizeof(*pChunk), 0, MM_TAG_PGM_CHUNK_MAPPING, (void **)&pChunk);
1678 AssertRCReturn(rc, rc);
1679#endif
1680 pChunk->Core.Key = idChunk;
1681 pChunk->AgeCore.Key = pVM->pgm.s.ChunkR3Map.iNow;
1682 pChunk->iAge = 0;
1683 pChunk->cRefs = 0;
1684 pChunk->cPermRefs = 0;
1685 pChunk->pv = NULL;
1686
1687 /*
1688 * Request the ring-0 part to map the chunk in question and if
1689 * necessary unmap another one to make space in the mapping cache.
1690 */
1691 GMMMAPUNMAPCHUNKREQ Req;
1692 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
1693 Req.Hdr.cbReq = sizeof(Req);
1694 Req.pvR3 = NULL;
1695 Req.idChunkMap = idChunk;
1696 Req.idChunkUnmap = INT32_MAX;
1697 if (pVM->pgm.s.ChunkR3Map.c >= pVM->pgm.s.ChunkR3Map.cMax)
1698 Req.idChunkUnmap = pgmR3PhysChunkFindUnmapCandidate(pVM);
1699 rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_GMM_MAP_UNMAP_CHUNK, 0, &Req.Hdr);
1700 if (VBOX_SUCCESS(rc))
1701 {
1702 /*
1703 * Update the tree.
1704 */
1705 /* insert the new one. */
1706 AssertPtr(Req.pvR3);
1707 pChunk->pv = Req.pvR3;
1708 bool fRc = RTAvlU32Insert(&pVM->pgm.s.ChunkR3Map.pTree, &pChunk->Core);
1709 AssertRelease(fRc);
1710 pVM->pgm.s.ChunkR3Map.c++;
1711
1712 fRc = RTAvllU32Insert(&pVM->pgm.s.ChunkR3Map.pAgeTree, &pChunk->AgeCore);
1713 AssertRelease(fRc);
1714
1715 /* remove the unmapped one. */
1716 if (Req.idChunkUnmap != INT32_MAX)
1717 {
1718 PPGMCHUNKR3MAP pUnmappedChunk = (PPGMCHUNKR3MAP)RTAvlU32Remove(&pVM->pgm.s.ChunkR3Map.pTree, Req.idChunkUnmap);
1719 AssertRelease(pUnmappedChunk);
1720 pUnmappedChunk->pv = NULL;
1721 pUnmappedChunk->Core.Key = UINT32_MAX;
1722#if 0 /* for later when we've got a separate mapping method for ring-0. */
1723 MMR3HeapFree(pUnmappedChunk);
1724#else
1725 MMHyperFree(pVM, pUnmappedChunk);
1726#endif
1727 pVM->pgm.s.ChunkR3Map.c--;
1728 }
1729 }
1730 else
1731 {
1732 AssertRC(rc);
1733#if 0 /* for later when we've got a separate mapping method for ring-0. */
1734 MMR3HeapFree(pChunk);
1735#else
1736 MMHyperFree(pVM, pChunk);
1737#endif
1738 pChunk = NULL;
1739 }
1740
1741 *ppChunk = pChunk;
1742 return rc;
1743}
1744
1745
1746/**
1747 * For VMMCALLHOST_PGM_MAP_CHUNK, considered internal.
1748 *
1749 * @returns see pgmR3PhysChunkMap.
1750 * @param pVM The VM handle.
1751 * @param idChunk The chunk to map.
1752 */
1753PDMR3DECL(int) PGMR3PhysChunkMap(PVM pVM, uint32_t idChunk)
1754{
1755 PPGMCHUNKR3MAP pChunk;
1756 return pgmR3PhysChunkMap(pVM, idChunk, &pChunk);
1757}
1758
1759
1760/**
1761 * Invalidates the TLB for the ring-3 mapping cache.
1762 *
1763 * @param pVM The VM handle.
1764 */
1765PGMR3DECL(void) PGMR3PhysChunkInvalidateTLB(PVM pVM)
1766{
1767 pgmLock(pVM);
1768 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.ChunkR3Map.Tlb.aEntries); i++)
1769 {
1770 pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].idChunk = NIL_GMM_CHUNKID;
1771 pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].pChunk = NULL;
1772 }
1773 pgmUnlock(pVM);
1774}
1775
1776
1777/**
1778 * Response to VM_FF_PGM_NEED_HANDY_PAGES and VMMCALLHOST_PGM_ALLOCATE_HANDY_PAGES.
1779 *
1780 * @returns The following VBox status codes.
1781 * @retval VINF_SUCCESS on success. FF cleared.
1782 * @retval VINF_EM_NO_MEMORY if we're out of memory. The FF is not cleared in this case.
1783 *
1784 * @param pVM The VM handle.
1785 */
1786PDMR3DECL(int) PGMR3PhysAllocateHandyPages(PVM pVM)
1787{
1788 pgmLock(pVM);
1789 int rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES, 0, NULL);
1790 if (rc == VERR_GMM_SEED_ME)
1791 {
1792 void *pvChunk;
1793 rc = SUPPageAlloc(GMM_CHUNK_SIZE >> PAGE_SHIFT, &pvChunk);
1794 if (VBOX_SUCCESS(rc))
1795 rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_GMM_SEED_CHUNK, (uintptr_t)pvChunk, NULL);
1796 if (VBOX_FAILURE(rc))
1797 {
1798 LogRel(("PGM: GMM Seeding failed, rc=%Vrc\n", rc));
1799 rc = VINF_EM_NO_MEMORY;
1800 }
1801 }
1802 pgmUnlock(pVM);
1803 Assert(rc == VINF_SUCCESS || rc == VINF_EM_NO_MEMORY);
1804 return rc;
1805}
1806
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette