VirtualBox

source: vbox/trunk/src/VBox/VMM/PGMPhys.cpp@ 7692

Last change on this file since 7692 was 7635, checked in by vboxsync, 17 years ago

The new MMIO2 code.
WARNING! This changes the pci mapping protocol for MMIO2 so it's working the same way as I/O ports and normal MMIO memory. External users of the interface will have to update their mapping routines.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 72.6 KB
Line 
1/* $Id: PGMPhys.cpp 7635 2008-03-28 17:15:38Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, Physical Memory Addressing.
4 */
5
6/*
7 * Copyright (C) 2006-2007 innotek GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_PGM
23#include <VBox/pgm.h>
24#include <VBox/cpum.h>
25#include <VBox/iom.h>
26#include <VBox/sup.h>
27#include <VBox/mm.h>
28#include <VBox/stam.h>
29#include <VBox/rem.h>
30#include <VBox/csam.h>
31#include "PGMInternal.h"
32#include <VBox/vm.h>
33#include <VBox/dbg.h>
34#include <VBox/param.h>
35#include <VBox/err.h>
36#include <iprt/assert.h>
37#include <iprt/alloc.h>
38#include <iprt/asm.h>
39#include <VBox/log.h>
40#include <iprt/thread.h>
41#include <iprt/string.h>
42
43
44/*******************************************************************************
45* Internal Functions *
46*******************************************************************************/
47/*static - shut up warning */
48DECLCALLBACK(int) pgmR3PhysRomWriteHandler(PVM pVM, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser);
49
50
51
52/*
53 * PGMR3PhysReadByte/Word/Dword
54 * PGMR3PhysWriteByte/Word/Dword
55 */
56/** @todo rename and add U64. */
57
58#define PGMPHYSFN_READNAME PGMR3PhysReadByte
59#define PGMPHYSFN_WRITENAME PGMR3PhysWriteByte
60#define PGMPHYS_DATASIZE 1
61#define PGMPHYS_DATATYPE uint8_t
62#include "PGMPhys.h"
63
64#define PGMPHYSFN_READNAME PGMR3PhysReadWord
65#define PGMPHYSFN_WRITENAME PGMR3PhysWriteWord
66#define PGMPHYS_DATASIZE 2
67#define PGMPHYS_DATATYPE uint16_t
68#include "PGMPhys.h"
69
70#define PGMPHYSFN_READNAME PGMR3PhysReadDword
71#define PGMPHYSFN_WRITENAME PGMR3PhysWriteDword
72#define PGMPHYS_DATASIZE 4
73#define PGMPHYS_DATATYPE uint32_t
74#include "PGMPhys.h"
75
76
77
78/**
79 * Links a new RAM range into the list.
80 *
81 * @param pVM Pointer to the shared VM structure.
82 * @param pNew Pointer to the new list entry.
83 * @param pPrev Pointer to the previous list entry. If NULL, insert as head.
84 */
85static void pgmR3PhysLinkRamRange(PVM pVM, PPGMRAMRANGE pNew, PPGMRAMRANGE pPrev)
86{
87 pgmLock(pVM);
88
89 PPGMRAMRANGE pRam = pPrev ? pPrev->pNextR3 : pVM->pgm.s.pRamRangesR3;
90 pNew->pNextR3 = pRam;
91 pNew->pNextR0 = pRam ? MMHyperCCToR0(pVM, pRam) : NIL_RTR0PTR;
92 pNew->pNextGC = pRam ? MMHyperCCToGC(pVM, pRam) : NIL_RTGCPTR;
93
94 if (pPrev)
95 {
96 pPrev->pNextR3 = pNew;
97 pPrev->pNextR0 = MMHyperCCToR0(pVM, pNew);
98 pPrev->pNextGC = MMHyperCCToGC(pVM, pNew);
99 }
100 else
101 {
102 pVM->pgm.s.pRamRangesR3 = pNew;
103 pVM->pgm.s.pRamRangesR0 = MMHyperCCToR0(pVM, pNew);
104 pVM->pgm.s.pRamRangesGC = MMHyperCCToGC(pVM, pNew);
105 }
106
107 pgmUnlock(pVM);
108}
109
110
111/**
112 * Unlink an existing RAM range from the list.
113 *
114 * @param pVM Pointer to the shared VM structure.
115 * @param pRam Pointer to the new list entry.
116 * @param pPrev Pointer to the previous list entry. If NULL, insert as head.
117 */
118static void pgmR3PhysUnlinkRamRange2(PVM pVM, PPGMRAMRANGE pRam, PPGMRAMRANGE pPrev)
119{
120 Assert(pPrev ? pPrev->pNextR3 == pRam : pVM->pgm.s.pRamRangesR3 == pRam);
121
122 pgmLock(pVM);
123
124 PPGMRAMRANGE pNext = pRam->pNextR3;
125 if (pPrev)
126 {
127 pPrev->pNextR3 = pNext;
128 pPrev->pNextR0 = pNext ? MMHyperCCToR0(pVM, pNext) : NIL_RTR0PTR;
129 pPrev->pNextGC = pNext ? MMHyperCCToGC(pVM, pNext) : NIL_RTGCPTR;
130 }
131 else
132 {
133 Assert(pVM->pgm.s.pRamRangesR3 == pRam);
134 pVM->pgm.s.pRamRangesR3 = pNext;
135 pVM->pgm.s.pRamRangesR0 = pNext ? MMHyperCCToR0(pVM, pNext) : NIL_RTR0PTR;
136 pVM->pgm.s.pRamRangesGC = pNext ? MMHyperCCToGC(pVM, pNext) : NIL_RTGCPTR;
137 }
138
139 pgmUnlock(pVM);
140}
141
142
143/**
144 * Unlink an existing RAM range from the list.
145 *
146 * @param pVM Pointer to the shared VM structure.
147 * @param pRam Pointer to the new list entry.
148 */
149static void pgmR3PhysUnlinkRamRange(PVM pVM, PPGMRAMRANGE pRam)
150{
151 /* find prev. */
152 PPGMRAMRANGE pPrev = NULL;
153 PPGMRAMRANGE pCur = pVM->pgm.s.pRamRangesR3;
154 while (pCur != pRam)
155 {
156 pPrev = pCur;
157 pCur = pCur->pNextR3;
158 }
159 AssertFatal(pCur);
160
161 pgmR3PhysUnlinkRamRange2(pVM, pRam, pPrev);
162}
163
164
165
166/**
167 * Sets up a range RAM.
168 *
169 * This will check for conflicting registrations, make a resource
170 * reservation for the memory (with GMM), and setup the per-page
171 * tracking structures (PGMPAGE).
172 *
173 * @returns VBox stutus code.
174 * @param pVM Pointer to the shared VM structure.
175 * @param GCPhys The physical address of the RAM.
176 * @param cb The size of the RAM.
177 * @param pszDesc The description - not copied, so, don't free or change it.
178 */
179PGMR3DECL(int) PGMR3PhysRegisterRam(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, const char *pszDesc)
180{
181 /*
182 * Validate input.
183 */
184 Log(("PGMR3PhysRegisterRam: GCPhys=%RGp cb=%RGp pszDesc=%s\n", GCPhys, cb, pszDesc));
185 AssertReturn(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys, VERR_INVALID_PARAMETER);
186 AssertReturn(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb, VERR_INVALID_PARAMETER);
187 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
188 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
189 AssertMsgReturn(GCPhysLast > GCPhys, ("The range wraps! GCPhys=%RGp cb=%RGp\n", GCPhys, cb), VERR_INVALID_PARAMETER);
190 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
191 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
192
193 /*
194 * Find range location and check for conflicts.
195 * (We don't lock here because the locking by EMT is only required on update.)
196 */
197 PPGMRAMRANGE pPrev = NULL;
198 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3;
199 while (pRam && GCPhysLast >= pRam->GCPhys)
200 {
201 if ( GCPhys <= pRam->GCPhysLast
202 && GCPhysLast >= pRam->GCPhys)
203 AssertLogRelMsgFailedReturn(("%RGp-%RGp (%s) conflicts with existing %RGp-%RGp (%s)\n",
204 GCPhys, GCPhysLast, pszDesc,
205 pRam->GCPhys, pRam->GCPhysLast, pRam->pszDesc),
206 VERR_PGM_RAM_CONFLICT);
207
208 /* next */
209 pPrev = pRam;
210 pRam = pRam->pNextR3;
211 }
212
213 /*
214 * Register it with GMM (the API bitches).
215 */
216 const RTGCPHYS cPages = cb >> PAGE_SHIFT;
217 int rc = MMR3IncreaseBaseReservation(pVM, cPages);
218 if (RT_FAILURE(rc))
219 return rc;
220
221 /*
222 * Allocate RAM range.
223 */
224 const size_t cbRamRange = RT_OFFSETOF(PGMRAMRANGE, aPages[cPages]);
225 PPGMRAMRANGE pNew;
226 rc = MMR3HyperAllocOnceNoRel(pVM, cbRamRange, 0, MM_TAG_PGM_PHYS, (void **)&pNew);
227 AssertLogRelMsgRCReturn(rc, ("cbRamRange=%zu\n", cbRamRange), rc);
228
229 /*
230 * Initialize the range.
231 */
232 pNew->GCPhys = GCPhys;
233 pNew->GCPhysLast = GCPhysLast;
234 pNew->pszDesc = pszDesc;
235 pNew->cb = cb;
236 pNew->fFlags = 0;
237 pNew->pvHC = NULL;
238
239 pNew->pavHCChunkHC = NULL;
240 pNew->pavHCChunkGC = 0;
241
242#ifndef VBOX_WITH_NEW_PHYS_CODE
243 /* Allocate memory for chunk to HC ptr lookup array. */
244 rc = MMHyperAlloc(pVM, (cb >> PGM_DYNAMIC_CHUNK_SHIFT) * sizeof(void *), 16, MM_TAG_PGM, (void **)&pNew->pavHCChunkHC);
245 AssertRCReturn(rc, rc);
246 pNew->pavHCChunkGC = MMHyperCCToGC(pVM, pNew->pavHCChunkHC);
247 pNew->fFlags |= MM_RAM_FLAGS_DYNAMIC_ALLOC;
248
249#endif
250 RTGCPHYS iPage = cPages;
251 while (iPage-- > 0)
252 PGM_PAGE_INIT_ZERO(&pNew->aPages[iPage], pVM, PGMPAGETYPE_RAM);
253
254 /*
255 * Insert the new RAM range.
256 */
257 pgmR3PhysLinkRamRange(pVM, pNew, pPrev);
258
259 /*
260 * Notify REM.
261 */
262#ifdef VBOX_WITH_NEW_PHYS_CODE
263 REMR3NotifyPhysRamRegister(pVM, GCPhys, cb, 0);
264#else
265 REMR3NotifyPhysRamRegister(pVM, GCPhys, cb, MM_RAM_FLAGS_DYNAMIC_ALLOC);
266#endif
267
268 return VINF_SUCCESS;
269}
270
271
272/**
273 * This is the interface IOM is using to register an MMIO region.
274 *
275 * It will check for conflicts and ensure that a RAM range structure
276 * is present before calling the PGMR3HandlerPhysicalRegister API to
277 * register the callbacks.
278 *
279 */
280PDMR3DECL(int) PGMR3PhysMMIORegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb)
281{
282 return -1;
283}
284
285
286/**
287 * This is the interface IOM is using to register an MMIO region.
288 *
289 * It will validate the MMIO region, call PGMHandlerPhysicalDeregister,
290 * and free the RAM range if one was allocated specially for this MMIO
291 * region.
292 */
293PDMR3DECL(int) PGMR3PhysMMIODeregister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb)
294{
295 return -1;
296}
297
298
299/**
300 * Locate a MMIO2 range.
301 *
302 * @returns Pointer to the MMIO2 range.
303 * @param pVM Pointer to the shared VM structure.
304 * @param pDevIns The device instance owning the region.
305 * @param iRegion The region.
306 */
307DECLINLINE(PPGMMMIO2RANGE) pgmR3PhysMMIO2Find(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion)
308{
309 /*
310 * Search the list.
311 */
312 for (PPGMMMIO2RANGE pCur = pVM->pgm.s.pMmio2RangesR3; pCur; pCur = pCur->pNextR3)
313 if (pCur->pDevInsR3 == pDevIns)
314 return pCur;
315 return NULL;
316}
317
318
319/**
320 * Allocate and register a MMIO2 region.
321 *
322 * As mentioned elsewhere, MMIO2 is just RAM spelled differently. It's
323 * RAM associated with a device. It is also non-shared memory with a
324 * permanent ring-3 mapping and page backing (presently).
325 *
326 * A MMIO2 range may overlap with base memory if a lot of RAM
327 * is configured for the VM, in which case we'll drop the base
328 * memory pages. Presently we will make no attempt to preserve
329 * anything that happens to be present in the base memory that
330 * is replaced, this is of course incorrectly but it's too much
331 * effort.
332 *
333 * @returns VBox status code.
334 * @retval VINF_SUCCESS on success, *ppv pointing to the R3 mapping of the memory.
335 * @retval VERR_ALREADY_EXISTS if the region already exists.
336 *
337 * @param pVM Pointer to the shared VM structure.
338 * @param pDevIns The device instance owning the region.
339 * @param iRegion The region number. If the MMIO2 memory is a PCI I/O region
340 * this number has to be the number of that region. Otherwise
341 * it can be any number safe UINT8_MAX.
342 * @param cb The size of the region. Must be page aligned.
343 * @param ppv Where to store the pointer to the ring-3 mapping of the memory.
344 * @param pszDesc The description.
345 */
346PDMR3DECL(int) PGMR3PhysMMIO2Register(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS cb, void **ppv, const char *pszDesc)
347{
348 /*
349 * Validate input.
350 */
351 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
352 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
353 AssertReturn(iRegion <= UINT8_MAX, VERR_INVALID_PARAMETER);
354 AssertPtrReturn(ppv, VERR_INVALID_POINTER);
355 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
356 AssertReturn(*pszDesc, VERR_INVALID_PARAMETER);
357 AssertReturn(pgmR3PhysMMIO2Find(pVM, pDevIns, iRegion) == NULL, VERR_ALREADY_EXISTS);
358 AssertReturn(!(cb & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
359 AssertReturn(cb, VERR_INVALID_PARAMETER);
360
361 const uint32_t cPages = cb >> PAGE_SHIFT;
362 AssertLogRelReturn((RTGCPHYS)cPages << PAGE_SHIFT == cb, VERR_INVALID_PARAMETER);
363 AssertLogRelReturn(cPages <= INT32_MAX / 2, VERR_NO_MEMORY);
364
365 /*
366 * Try reserve and allocate the backing memory first as this is what is
367 * most likely to fail.
368 */
369 int rc = MMR3AdjustFixedReservation(pVM, cPages, pszDesc);
370 if (RT_FAILURE(rc))
371 return rc;
372
373 void *pvPages;
374 PSUPPAGE paPages = (PSUPPAGE)RTMemTmpAlloc(cPages * sizeof(SUPPAGE));
375 if (RT_SUCCESS(rc))
376 rc = SUPPageAllocLockedEx(cPages, &pvPages, paPages);
377 if (RT_SUCCESS(rc))
378 {
379 /*
380 * Create the MMIO2 range record for it.
381 */
382 const size_t cbRange = RT_OFFSETOF(PGMMMIO2RANGE, RamRange.aPages[cPages]);
383 PPGMMMIO2RANGE pNew;
384 rc = MMR3HyperAllocOnceNoRel(pVM, cbRange, 0, MM_TAG_PGM_PHYS, (void **)&pNew);
385 AssertLogRelMsgRC(rc, ("cbRamRange=%zu\n", cbRange));
386 if (RT_SUCCESS(rc))
387 {
388 pNew->pDevInsR3 = pDevIns;
389 pNew->pvR3 = pvPages;
390 //pNew->pNext = NULL;
391 //pNew->fMapped = false;
392 //pNew->fOverlapping = false;
393 pNew->iRegion = iRegion;
394 pNew->RamRange.GCPhys = NIL_RTGCPHYS;
395 pNew->RamRange.GCPhysLast = NIL_RTGCPHYS;
396 pNew->RamRange.pszDesc = pszDesc;
397 pNew->RamRange.cb = cb;
398 //pNew->RamRange.fFlags = 0;
399
400 pNew->RamRange.pvHC = pvPages; ///@todo remove this
401 pNew->RamRange.pavHCChunkHC = NULL; ///@todo remove this
402 pNew->RamRange.pavHCChunkGC = 0; ///@todo remove this
403
404 uint32_t iPage = cPages;
405 while (iPage-- > 0)
406 {
407 PGM_PAGE_INIT(&pNew->RamRange.aPages[iPage],
408 paPages[iPage].Phys & X86_PTE_PAE_PG_MASK, NIL_GMM_PAGEID,
409 PGMPAGETYPE_MMIO2, PGM_PAGE_STATE_ALLOCATED);
410 }
411
412 /*
413 * Link it into the list.
414 * Since there is no particular order, just push it.
415 */
416 pNew->pNextR3 = pVM->pgm.s.pMmio2RangesR3;
417 pVM->pgm.s.pMmio2RangesR3 = pNew;
418
419 *ppv = pvPages;
420 RTMemTmpFree(paPages);
421 return VINF_SUCCESS;
422 }
423
424 SUPPageFreeLocked(pvPages, cPages);
425 }
426 RTMemTmpFree(paPages);
427 MMR3AdjustFixedReservation(pVM, -cPages, pszDesc);
428 return rc;
429}
430
431
432/**
433 * Deregisters and frees a MMIO2 region.
434 *
435 * Any physical (and virtual) access handlers registered for the region must
436 * be deregistered before calling this function.
437 *
438 * @returns VBox status code.
439 * @param pVM Pointer to the shared VM structure.
440 * @param pDevIns The device instance owning the region.
441 * @param iRegion The region. If it's UINT32_MAX it'll be a wildcard match.
442 */
443PDMR3DECL(int) PGMR3PhysMMIO2Deregister(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion)
444{
445 /*
446 * Validate input.
447 */
448 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
449 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
450 AssertReturn(iRegion <= UINT8_MAX || iRegion == UINT32_MAX, VERR_INVALID_PARAMETER);
451
452 int rc = VINF_SUCCESS;
453 unsigned cFound = 0;
454 PPGMMMIO2RANGE pPrev = NULL;
455 PPGMMMIO2RANGE pCur = pVM->pgm.s.pMmio2RangesR3;
456 while (pCur)
457 {
458 if ( pCur->pDevInsR3 == pDevIns
459 && ( iRegion == UINT32_MAX
460 || pCur->iRegion == iRegion))
461 {
462 cFound++;
463
464 /*
465 * Unmap it if it's mapped.
466 */
467 if (pCur->fMapped)
468 {
469 int rc2 = PGMR3PhysMMIO2Unmap(pVM, pCur->pDevInsR3, pCur->iRegion, pCur->RamRange.GCPhys);
470 AssertRC(rc2);
471 if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
472 rc = rc2;
473 }
474
475 /*
476 * Unlink it
477 */
478 PPGMMMIO2RANGE pNext = pCur->pNextR3;
479 if (pPrev)
480 pPrev->pNextR3 = pNext;
481 else
482 pVM->pgm.s.pMmio2RangesR3 = pNext;
483 pCur->pNextR3 = NULL;
484
485 /*
486 * Free the memory.
487 */
488 int rc2 = SUPPageFreeLocked(pCur->pvR3, pCur->RamRange.cb >> PAGE_SHIFT);
489 AssertRC(rc2);
490 if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
491 rc = rc2;
492
493 rc2 = MMR3AdjustFixedReservation(pVM, -(pCur->RamRange.cb >> PAGE_SHIFT), pCur->RamRange.pszDesc);
494 AssertRC(rc2);
495 if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
496 rc = rc2;
497
498 /* we're leaking hyper memory here if done at runtime. */
499 Assert( VMR3GetState(pVM) == VMSTATE_OFF
500 || VMR3GetState(pVM) == VMSTATE_DESTROYING
501 || VMR3GetState(pVM) == VMSTATE_TERMINATED);
502 /*rc = MMHyperFree(pVM, pCur);
503 AssertRCReturn(rc, rc); - not safe, see the alloc call. */
504
505 /* next */
506 pCur = pNext;
507 }
508 else
509 {
510 pPrev = pCur;
511 pCur = pCur->pNextR3;
512 }
513 }
514
515 return !cFound && iRegion != UINT32_MAX ? VERR_NOT_FOUND : rc;
516}
517
518
519/**
520 * Maps a MMIO2 region.
521 *
522 * This is done when a guest / the bios / state loading changes the
523 * PCI config. The replacing of base memory has the same restrictions
524 * as during registration, of course.
525 *
526 * @returns VBox status code.
527 *
528 * @param pVM Pointer to the shared VM structure.
529 * @param pDevIns The
530 */
531PDMR3DECL(int) PGMR3PhysMMIO2Map(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS GCPhys)
532{
533 /*
534 * Validate input
535 */
536 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
537 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
538 AssertReturn(iRegion <= UINT8_MAX, VERR_INVALID_PARAMETER);
539 AssertReturn(GCPhys != NIL_RTGCPHYS, VERR_INVALID_PARAMETER);
540 AssertReturn(GCPhys != 0, VERR_INVALID_PARAMETER);
541 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
542
543 PPGMMMIO2RANGE pCur = pgmR3PhysMMIO2Find(pVM, pDevIns, iRegion);
544 AssertReturn(pCur, VERR_NOT_FOUND);
545 AssertReturn(!pCur->fMapped, VERR_WRONG_ORDER);
546 Assert(pCur->RamRange.GCPhys == NIL_RTGCPHYS);
547 Assert(pCur->RamRange.GCPhysLast == NIL_RTGCPHYS);
548
549 const RTGCPHYS GCPhysLast = GCPhys + pCur->RamRange.cb - 1;
550 AssertReturn(GCPhysLast > GCPhys, VERR_INVALID_PARAMETER);
551
552 /*
553 * Find our location in the ram range list, checking for
554 * restriction we don't bother implementing yet (partially overlapping).
555 */
556 bool fRamExists = false;
557 PPGMRAMRANGE pRamPrev = NULL;
558 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3;
559 while (pRam && GCPhysLast >= pRam->GCPhys)
560 {
561 if ( GCPhys <= pRam->GCPhysLast
562 && GCPhysLast >= pRam->GCPhys)
563 {
564 /* completely within? */
565 AssertLogRelMsgReturn( GCPhys >= pRam->GCPhys
566 && GCPhysLast <= pRam->GCPhysLast,
567 ("%RGp-%RGp (MMIO2/%s) falls partly outside %RGp-%RGp (%s)\n",
568 GCPhys, GCPhysLast, pCur->RamRange.pszDesc,
569 pRam->GCPhys, pRam->GCPhysLast, pRam->pszDesc),
570 VERR_PGM_RAM_CONFLICT);
571 fRamExists = true;
572 break;
573 }
574
575 /* next */
576 pRamPrev = pRam;
577 pRam = pRam->pNextR3;
578 }
579 if (fRamExists)
580 {
581 PPGMPAGE pPage = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
582 uint32_t cPagesLeft = pCur->RamRange.cb >> PAGE_SHIFT;
583 while (cPagesLeft-- > 0)
584 {
585 AssertLogRelMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM,
586 ("%RGp isn't a RAM page (%d) - mapping %RGp-%RGp (MMIO2/%s).\n",
587 GCPhys, PGM_PAGE_GET_TYPE(pPage), GCPhys, GCPhysLast, pCur->RamRange.pszDesc),
588 VERR_PGM_RAM_CONFLICT);
589 pPage++;
590 }
591 }
592 Log(("PGMR3PhysMMIO2Map: %RGp-%RGp fRamExists=%RTbool %s\n",
593 GCPhys, GCPhysLast, fRamExists, pCur->RamRange.pszDesc));
594
595 /*
596 * Make the changes.
597 */
598 pgmLock(pVM);
599
600 pCur->RamRange.GCPhys = GCPhys;
601 pCur->RamRange.GCPhysLast = GCPhysLast;
602 pCur->fMapped = true;
603 pCur->fOverlapping = fRamExists;
604
605 if (fRamExists)
606 {
607 /* replace the pages, freeing all present RAM pages. */
608 PPGMPAGE pPageSrc = &pCur->RamRange.aPages[0];
609 PPGMPAGE pPageDst = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
610 uint32_t cPagesLeft = pCur->RamRange.cb >> PAGE_SHIFT;
611 while (cPagesLeft-- > 0)
612 {
613 pgmPhysFreePage(pVM, pPageDst, GCPhys);
614
615 RTHCPHYS const HCPhys = PGM_PAGE_GET_HCPHYS(pPageSrc);
616 PGM_PAGE_SET_HCPHYS(pPageDst, HCPhys);
617 PGM_PAGE_SET_TYPE(pPageDst, PGMPAGETYPE_MMIO2);
618 PGM_PAGE_SET_STATE(pPageDst, PGM_PAGE_STATE_ALLOCATED);
619
620 GCPhys += PAGE_SIZE;
621 pPageSrc++;
622 pPageDst++;
623 }
624 }
625 else
626 {
627 /* link in the ram range */
628 pgmR3PhysLinkRamRange(pVM, &pCur->RamRange, pRamPrev);
629 REMR3NotifyPhysRamRegister(pVM, GCPhys, pCur->RamRange.cb, 0);
630 }
631
632 pgmUnlock(pVM);
633
634 return VINF_SUCCESS;
635}
636
637
638/**
639 * Unmaps a MMIO2 region.
640 *
641 * This is done when a guest / the bios / state loading changes the
642 * PCI config. The replacing of base memory has the same restrictions
643 * as during registration, of course.
644 */
645PDMR3DECL(int) PGMR3PhysMMIO2Unmap(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS GCPhys)
646{
647 /*
648 * Validate input
649 */
650 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
651 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
652 AssertReturn(iRegion <= UINT8_MAX, VERR_INVALID_PARAMETER);
653 AssertReturn(GCPhys != NIL_RTGCPHYS, VERR_INVALID_PARAMETER);
654 AssertReturn(GCPhys != 0, VERR_INVALID_PARAMETER);
655 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
656
657 PPGMMMIO2RANGE pCur = pgmR3PhysMMIO2Find(pVM, pDevIns, iRegion);
658 AssertReturn(pCur, VERR_NOT_FOUND);
659 AssertReturn(pCur->fMapped, VERR_WRONG_ORDER);
660 AssertReturn(pCur->RamRange.GCPhys == GCPhys, VERR_INVALID_PARAMETER);
661 Assert(pCur->RamRange.GCPhysLast != NIL_RTGCPHYS);
662
663 Log(("PGMR3PhysMMIO2Unmap: %RGp-%RGp %s\n",
664 pCur->RamRange.GCPhys, pCur->RamRange.GCPhysLast, pCur->RamRange.pszDesc));
665
666 /*
667 * Unmap it.
668 */
669 pgmLock(pVM);
670
671 if (pCur->fOverlapping)
672 {
673 /* Restore the RAM pages we've replaced. */
674 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3;
675 while (pRam->GCPhys > pCur->RamRange.GCPhysLast)
676 pRam = pRam->pNextR3;
677
678 RTHCPHYS const HCPhysZeroPg = pVM->pgm.s.HCPhysZeroPg;
679 Assert(HCPhysZeroPg != 0 && HCPhysZeroPg != NIL_RTHCPHYS);
680 PPGMPAGE pPageDst = &pRam->aPages[(pCur->RamRange.GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
681 uint32_t cPagesLeft = pCur->RamRange.cb >> PAGE_SHIFT;
682 while (cPagesLeft-- > 0)
683 {
684 PGM_PAGE_SET_HCPHYS(pPageDst, pVM->pgm.s.HCPhysZeroPg);
685 PGM_PAGE_SET_TYPE(pPageDst, PGMPAGETYPE_RAM);
686 PGM_PAGE_SET_STATE(pPageDst, PGM_PAGE_STATE_ZERO);
687
688 pPageDst++;
689 }
690 }
691 else
692 {
693 REMR3NotifyPhysReserve(pVM, pCur->RamRange.GCPhys, pCur->RamRange.cb);
694 pgmR3PhysUnlinkRamRange(pVM, &pCur->RamRange);
695 }
696
697 pCur->RamRange.GCPhys = NIL_RTGCPHYS;
698 pCur->RamRange.GCPhysLast = NIL_RTGCPHYS;
699 pCur->fOverlapping = false;
700 pCur->fMapped = false;
701
702 pgmUnlock(pVM);
703
704 return VINF_SUCCESS;
705}
706
707
708/**
709 * Checks if the given address is an MMIO2 base address or not.
710 *
711 * @returns true/false accordingly.
712 * @param pVM Pointer to the shared VM structure.
713 * @param pDevIns The owner of the memory, optional.
714 * @param GCPhys The address to check.
715 */
716PDMR3DECL(bool) PGMR3PhysMMIO2IsBase(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GCPhys)
717{
718 /*
719 * Validate input
720 */
721 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
722 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
723 AssertReturn(GCPhys != NIL_RTGCPHYS, VERR_INVALID_PARAMETER);
724 AssertReturn(GCPhys != 0, VERR_INVALID_PARAMETER);
725 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
726
727 /*
728 * Search the list.
729 */
730 for (PPGMMMIO2RANGE pCur = pVM->pgm.s.pMmio2RangesR3; pCur; pCur = pCur->pNextR3)
731 if (pCur->RamRange.GCPhys == GCPhys)
732 {
733 Assert(pCur->fMapped);
734 return true;
735 }
736 return false;
737}
738
739
740/**
741 * Gets the HC physical address of a page in the MMIO2 region.
742 *
743 * This is API is intended for MMHyper and shouldn't be called
744 * by anyone else...
745 *
746 * @returns VBox status code.
747 * @param pVM Pointer to the shared VM structure.
748 * @param pDevIns The owner of the memory, optional.
749 * @param iRegion The region.
750 * @param off The page expressed an offset into the MMIO2 region.
751 * @param pHCPhys Where to store the result.
752 */
753PDMR3DECL(int) PGMR3PhysMMIO2GetHCPhys(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS off, PRTHCPHYS pHCPhys)
754{
755 /*
756 * Validate input
757 */
758 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
759 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
760 AssertReturn(iRegion <= UINT8_MAX, VERR_INVALID_PARAMETER);
761
762 PPGMMMIO2RANGE pCur = pgmR3PhysMMIO2Find(pVM, pDevIns, iRegion);
763 AssertReturn(pCur, VERR_NOT_FOUND);
764 AssertReturn(off < pCur->RamRange.cb, VERR_INVALID_PARAMETER);
765
766 PCPGMPAGE pPage = &pCur->RamRange.aPages[off >> PAGE_SHIFT];
767 *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage);
768 return VINF_SUCCESS;
769}
770
771
772/**
773 * Registers a ROM image.
774 *
775 * Shadowed ROM images requires double the amount of backing memory, so,
776 * don't use that unless you have to. Shadowing of ROM images is process
777 * where we can select where the reads go and where the writes go. On real
778 * hardware the chipset provides means to configure this. We provide
779 * PGMR3PhysProtectROM() for this purpose.
780 *
781 * A read-only copy of the ROM image will always be kept around while we
782 * will allocate RAM pages for the changes on demand (unless all memory
783 * is configured to be preallocated).
784 *
785 * @returns VBox status.
786 * @param pVM VM Handle.
787 * @param pDevIns The device instance owning the ROM.
788 * @param GCPhys First physical address in the range.
789 * Must be page aligned!
790 * @param cbRange The size of the range (in bytes).
791 * Must be page aligned!
792 * @param pvBinary Pointer to the binary data backing the ROM image.
793 * This must be exactly \a cbRange in size.
794 * @param fFlags Mask of flags. PGMPHYS_ROM_FLAG_SHADOWED
795 * and/or PGMPHYS_ROM_FLAG_PERMANENT_BINARY.
796 * @param pszDesc Pointer to description string. This must not be freed.
797 *
798 * @remark There is no way to remove the rom, automatically on device cleanup or
799 * manually from the device yet. This isn't difficult in any way, it's
800 * just not something we expect to be necessary for a while.
801 */
802PGMR3DECL(int) PGMR3PhysRomRegister(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GCPhys, RTGCPHYS cb,
803 const void *pvBinary, uint32_t fFlags, const char *pszDesc)
804{
805 Log(("PGMR3PhysRomRegister: pDevIns=%p GCPhys=%RGp(-%RGp) cb=%RGp pvBinary=%p fFlags=%#x pszDesc=%s\n",
806 pDevIns, GCPhys, GCPhys + cb, cb, pvBinary, fFlags, pszDesc));
807
808 /*
809 * Validate input.
810 */
811 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
812 AssertReturn(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys, VERR_INVALID_PARAMETER);
813 AssertReturn(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb, VERR_INVALID_PARAMETER);
814 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
815 AssertReturn(GCPhysLast > GCPhys, VERR_INVALID_PARAMETER);
816 AssertPtrReturn(pvBinary, VERR_INVALID_PARAMETER);
817 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
818 AssertReturn(!(fFlags & ~(PGMPHYS_ROM_FLAG_SHADOWED | PGMPHYS_ROM_FLAG_PERMANENT_BINARY)), VERR_INVALID_PARAMETER);
819 VM_ASSERT_STATE_RETURN(pVM, VMSTATE_CREATING, VERR_VM_INVALID_VM_STATE);
820
821 const uint32_t cPages = cb >> PAGE_SHIFT;
822
823 /*
824 * Find the ROM location in the ROM list first.
825 */
826 PPGMROMRANGE pRomPrev = NULL;
827 PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3;
828 while (pRom && GCPhysLast >= pRom->GCPhys)
829 {
830 if ( GCPhys <= pRom->GCPhysLast
831 && GCPhysLast >= pRom->GCPhys)
832 AssertLogRelMsgFailedReturn(("%RGp-%RGp (%s) conflicts with existing %RGp-%RGp (%s)\n",
833 GCPhys, GCPhysLast, pszDesc,
834 pRom->GCPhys, pRom->GCPhysLast, pRom->pszDesc),
835 VERR_PGM_RAM_CONFLICT);
836 /* next */
837 pRomPrev = pRom;
838 pRom = pRom->pNextR3;
839 }
840
841 /*
842 * Find the RAM location and check for conflicts.
843 *
844 * Conflict detection is a bit different than for RAM
845 * registration since a ROM can be located within a RAM
846 * range. So, what we have to check for is other memory
847 * types (other than RAM that is) and that we don't span
848 * more than one RAM range (layz).
849 */
850 bool fRamExists = false;
851 PPGMRAMRANGE pRamPrev = NULL;
852 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3;
853 while (pRam && GCPhysLast >= pRam->GCPhys)
854 {
855 if ( GCPhys <= pRam->GCPhysLast
856 && GCPhysLast >= pRam->GCPhys)
857 {
858 /* completely within? */
859 AssertLogRelMsgReturn( GCPhys >= pRam->GCPhys
860 && GCPhysLast <= pRam->GCPhysLast,
861 ("%RGp-%RGp (%s) falls partly outside %RGp-%RGp (%s)\n",
862 GCPhys, GCPhysLast, pszDesc,
863 pRam->GCPhys, pRam->GCPhysLast, pRam->pszDesc),
864 VERR_PGM_RAM_CONFLICT);
865 fRamExists = true;
866 break;
867 }
868
869 /* next */
870 pRamPrev = pRam;
871 pRam = pRam->pNextR3;
872 }
873 if (fRamExists)
874 {
875 PPGMPAGE pPage = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
876 uint32_t cPagesLeft = cPages;
877 while (cPagesLeft-- > 0)
878 {
879 AssertLogRelMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM,
880 ("%RGp isn't a RAM page (%d) - registering %RGp-%RGp (%s).\n",
881 GCPhys, PGM_PAGE_GET_TYPE(pPage), GCPhys, GCPhysLast, pszDesc),
882 VERR_PGM_RAM_CONFLICT);
883 Assert(PGM_PAGE_IS_ZERO(pPage));
884 pPage++;
885 }
886 }
887
888 /*
889 * Update the base memory reservation if necessary.
890 */
891 uint32_t cExtraBaseCost = fRamExists ? cPages : 0;
892 if (fFlags & PGMPHYS_ROM_FLAG_SHADOWED)
893 cExtraBaseCost += cPages;
894 if (cExtraBaseCost)
895 {
896 int rc = MMR3IncreaseBaseReservation(pVM, cExtraBaseCost);
897 if (RT_FAILURE(rc))
898 return rc;
899 }
900
901 /*
902 * Allocate memory for the virgin copy of the RAM.
903 */
904 PGMMALLOCATEPAGESREQ pReq;
905 int rc = GMMR3AllocatePagesPrepare(pVM, &pReq, cPages, GMMACCOUNT_BASE);
906 AssertRCReturn(rc, rc);
907
908 for (uint32_t iPage = 0; iPage < cPages; iPage++)
909 {
910 pReq->aPages[iPage].HCPhysGCPhys = GCPhys + (iPage << PAGE_SHIFT);
911 pReq->aPages[iPage].idPage = NIL_GMM_PAGEID;
912 pReq->aPages[iPage].idSharedPage = NIL_GMM_PAGEID;
913 }
914
915 pgmLock(pVM);
916 rc = GMMR3AllocatePagesPerform(pVM, pReq);
917 pgmUnlock(pVM);
918 if (RT_FAILURE(rc))
919 {
920 GMMR3AllocatePagesCleanup(pReq);
921 return rc;
922 }
923
924 /*
925 * Allocate the new ROM range and RAM range (if necessary).
926 */
927 PPGMROMRANGE pRomNew;
928 rc = MMHyperAlloc(pVM, RT_OFFSETOF(PGMROMRANGE, aPages[cPages]), 0, MM_TAG_PGM_PHYS, (void **)&pRomNew);
929 if (RT_SUCCESS(rc))
930 {
931 PPGMRAMRANGE pRamNew = NULL;
932 if (!fRamExists)
933 rc = MMHyperAlloc(pVM, RT_OFFSETOF(PGMRAMRANGE, aPages[cPages]), sizeof(PGMPAGE), MM_TAG_PGM_PHYS, (void **)&pRamNew);
934 if (RT_SUCCESS(rc))
935 {
936 pgmLock(pVM);
937
938 /*
939 * Initialize and insert the RAM range (if required).
940 */
941 PPGMROMPAGE pRomPage = &pRomNew->aPages[0];
942 if (!fRamExists)
943 {
944 pRamNew->GCPhys = GCPhys;
945 pRamNew->GCPhysLast = GCPhysLast;
946 pRamNew->pszDesc = pszDesc;
947 pRamNew->cb = cb;
948 pRamNew->fFlags = 0;
949 pRamNew->pvHC = NULL;
950
951 PPGMPAGE pPage = &pRamNew->aPages[0];
952 for (uint32_t iPage = 0; iPage < cPages; iPage++, pPage++, pRomPage++)
953 {
954 PGM_PAGE_INIT(pPage,
955 pReq->aPages[iPage].HCPhysGCPhys,
956 pReq->aPages[iPage].idPage,
957 PGMPAGETYPE_ROM,
958 PGM_PAGE_STATE_ALLOCATED);
959
960 pRomPage->Virgin = *pPage;
961 }
962
963 pgmR3PhysLinkRamRange(pVM, pRamNew, pRamPrev);
964 }
965 else
966 {
967 PPGMPAGE pPage = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
968 for (uint32_t iPage = 0; iPage < cPages; iPage++, pPage++, pRomPage++)
969 {
970 PGM_PAGE_SET_TYPE(pPage, PGMPAGETYPE_ROM);
971 PGM_PAGE_SET_HCPHYS(pPage, pReq->aPages[iPage].HCPhysGCPhys);
972 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ALLOCATED);
973 PGM_PAGE_SET_PAGEID(pPage, pReq->aPages[iPage].idPage);
974
975 pRomPage->Virgin = *pPage;
976 }
977
978 pRamNew = pRam;
979 }
980 pgmUnlock(pVM);
981
982
983 /*
984 * Register the write access handler for the range (PGMROMPROT_READ_ROM_WRITE_IGNORE).
985 */
986 rc = PGMR3HandlerPhysicalRegister(pVM, PGMPHYSHANDLERTYPE_PHYSICAL_WRITE, GCPhys, GCPhysLast,
987#if 0 /** @todo we actually need a ring-3 write handler here for shadowed ROMs, so hack REM! */
988 pgmR3PhysRomWriteHandler, pRomNew,
989#else
990 NULL, NULL,
991#endif
992 NULL, "pgmPhysRomWriteHandler", MMHyperCCToR0(pVM, pRomNew),
993 NULL, "pgmPhysRomWriteHandler", MMHyperCCToGC(pVM, pRomNew), pszDesc);
994 if (RT_SUCCESS(rc))
995 {
996 pgmLock(pVM);
997
998 /*
999 * Copy the image over to the virgin pages.
1000 * This must be done after linking in the RAM range.
1001 */
1002 PPGMPAGE pRamPage = &pRamNew->aPages[(GCPhys - pRamNew->GCPhys) >> PAGE_SHIFT];
1003 for (uint32_t iPage = 0; iPage < cPages; iPage++, pRamPage++)
1004 {
1005 void *pvDstPage;
1006 PPGMPAGEMAP pMapIgnored;
1007 rc = pgmPhysPageMap(pVM, pRamPage, GCPhys + (iPage << PAGE_SHIFT), &pMapIgnored, &pvDstPage);
1008 if (RT_FAILURE(rc))
1009 {
1010 VMSetError(pVM, rc, RT_SRC_POS, "Failed to map virgin ROM page at %RGp", GCPhys);
1011 break;
1012 }
1013 memcpy(pvDstPage, (const uint8_t *)pvBinary + (iPage << PAGE_SHIFT), PAGE_SIZE);
1014 }
1015 if (RT_SUCCESS(rc))
1016 {
1017 /*
1018 * Initialize the ROM range.
1019 * Note that the Virgin member of the pages has already been initialized above.
1020 */
1021 pRomNew->GCPhys = GCPhys;
1022 pRomNew->GCPhysLast = GCPhysLast;
1023 pRomNew->cb = cb;
1024 pRomNew->fFlags = fFlags;
1025 pRomNew->pvOriginal = fFlags & PGMPHYS_ROM_FLAG_PERMANENT_BINARY ? pvBinary : NULL;
1026 pRomNew->pszDesc = pszDesc;
1027
1028 for (unsigned iPage = 0; iPage < cPages; iPage++)
1029 {
1030 PPGMROMPAGE pPage = &pRomNew->aPages[iPage];
1031 pPage->enmProt = PGMROMPROT_READ_ROM_WRITE_IGNORE;
1032 PGM_PAGE_INIT_ZERO_REAL(&pPage->Shadow, pVM, PGMPAGETYPE_ROM_SHADOW);
1033 }
1034
1035 /*
1036 * Insert the ROM range, tell REM and return successfully.
1037 */
1038 pRomNew->pNextR3 = pRom;
1039 pRomNew->pNextR0 = pRom ? MMHyperCCToR0(pVM, pRom) : NIL_RTR0PTR;
1040 pRomNew->pNextGC = pRom ? MMHyperCCToGC(pVM, pRom) : NIL_RTGCPTR;
1041
1042 if (pRomPrev)
1043 {
1044 pRomPrev->pNextR3 = pRomNew;
1045 pRomPrev->pNextR0 = MMHyperCCToR0(pVM, pRomNew);
1046 pRomPrev->pNextGC = MMHyperCCToGC(pVM, pRomNew);
1047 }
1048 else
1049 {
1050 pVM->pgm.s.pRomRangesR3 = pRomNew;
1051 pVM->pgm.s.pRomRangesR0 = MMHyperCCToR0(pVM, pRomNew);
1052 pVM->pgm.s.pRomRangesGC = MMHyperCCToGC(pVM, pRomNew);
1053 }
1054
1055 REMR3NotifyPhysRomRegister(pVM, GCPhys, cb, NULL, false); /** @todo fix shadowing and REM. */
1056
1057 GMMR3AllocatePagesCleanup(pReq);
1058 pgmUnlock(pVM);
1059 return VINF_SUCCESS;
1060 }
1061
1062 /* bail out */
1063
1064 pgmUnlock(pVM);
1065 int rc2 = PGMHandlerPhysicalDeregister(pVM, GCPhys);
1066 AssertRC(rc2);
1067 pgmLock(pVM);
1068 }
1069
1070 pgmR3PhysUnlinkRamRange2(pVM, pRamNew, pRamPrev);
1071 if (pRamNew)
1072 MMHyperFree(pVM, pRamNew);
1073 }
1074 MMHyperFree(pVM, pRomNew);
1075 }
1076
1077 /** @todo Purge the mapping cache or something... */
1078 GMMR3FreeAllocatedPages(pVM, pReq);
1079 GMMR3AllocatePagesCleanup(pReq);
1080 pgmUnlock(pVM);
1081 return rc;
1082}
1083
1084
1085/**
1086 * \#PF Handler callback for ROM write accesses.
1087 *
1088 * @returns VINF_SUCCESS if the handler have carried out the operation.
1089 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
1090 * @param pVM VM Handle.
1091 * @param GCPhys The physical address the guest is writing to.
1092 * @param pvPhys The HC mapping of that address.
1093 * @param pvBuf What the guest is reading/writing.
1094 * @param cbBuf How much it's reading/writing.
1095 * @param enmAccessType The access type.
1096 * @param pvUser User argument.
1097 */
1098/*static - shut up warning */
1099 DECLCALLBACK(int) pgmR3PhysRomWriteHandler(PVM pVM, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser)
1100{
1101 PPGMROMRANGE pRom = (PPGMROMRANGE)pvUser;
1102 const uint32_t iPage = GCPhys - pRom->GCPhys;
1103 Assert(iPage < (pRom->cb >> PAGE_SHIFT));
1104 PPGMROMPAGE pRomPage = &pRom->aPages[iPage];
1105 switch (pRomPage->enmProt)
1106 {
1107 /*
1108 * Ignore.
1109 */
1110 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
1111 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
1112 return VINF_SUCCESS;
1113
1114 /*
1115 * Write to the ram page.
1116 */
1117 case PGMROMPROT_READ_ROM_WRITE_RAM:
1118 case PGMROMPROT_READ_RAM_WRITE_RAM: /* yes this will get here too, it's *way* simpler that way. */
1119 {
1120 /* This should be impossible now, pvPhys doesn't work cross page anylonger. */
1121 Assert(((GCPhys - pRom->GCPhys + cbBuf - 1) >> PAGE_SHIFT) == iPage);
1122
1123 /*
1124 * Take the lock, do lazy allocation, map the page and copy the data.
1125 *
1126 * Note that we have to bypass the mapping TLB since it works on
1127 * guest physical addresses and entering the shadow page would
1128 * kind of screw things up...
1129 */
1130 int rc = pgmLock(pVM);
1131 AssertRC(rc);
1132
1133 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(&pRomPage->Shadow) != PGM_PAGE_STATE_ALLOCATED))
1134 {
1135 rc = pgmPhysPageMakeWritable(pVM, &pRomPage->Shadow, GCPhys);
1136 if (RT_FAILURE(rc))
1137 {
1138 pgmUnlock(pVM);
1139 return rc;
1140 }
1141 }
1142
1143 void *pvDstPage;
1144 PPGMPAGEMAP pMapIgnored;
1145 rc = pgmPhysPageMap(pVM, &pRomPage->Shadow, GCPhys & X86_PTE_PG_MASK, &pMapIgnored, &pvDstPage);
1146 if (RT_SUCCESS(rc))
1147 memcpy((uint8_t *)pvDstPage + (GCPhys & PAGE_OFFSET_MASK), pvBuf, cbBuf);
1148
1149 pgmUnlock(pVM);
1150 return rc;
1151 }
1152
1153 default:
1154 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhys=%RGp\n",
1155 pRom->aPages[iPage].enmProt, iPage, GCPhys),
1156 VERR_INTERNAL_ERROR);
1157 }
1158}
1159
1160
1161
1162/**
1163 * Called by PGMR3Reset to reset the shadow, switch to the virgin,
1164 * and verify that the virgin part is untouched.
1165 *
1166 * This is done after the normal memory has been cleared.
1167 *
1168 * @param pVM The VM handle.
1169 */
1170int pgmR3PhysRomReset(PVM pVM)
1171{
1172 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
1173 {
1174 const uint32_t cPages = pRom->cb >> PAGE_SHIFT;
1175
1176 if (pRom->fFlags & PGMPHYS_ROM_FLAG_SHADOWED)
1177 {
1178 /*
1179 * Reset the physical handler.
1180 */
1181 int rc = PGMR3PhysRomProtect(pVM, pRom->GCPhys, pRom->cb, PGMROMPROT_READ_ROM_WRITE_IGNORE);
1182 AssertRCReturn(rc, rc);
1183
1184 /*
1185 * What we do with the shadow pages depends on the memory
1186 * preallocation option. If not enabled, we'll just throw
1187 * out all the dirty pages and replace them by the zero page.
1188 */
1189 if (1)///@todo !pVM->pgm.f.fRamPreAlloc)
1190 {
1191 /* Count dirty shadow pages. */
1192 uint32_t cDirty = 0;
1193 uint32_t iPage = cPages;
1194 while (iPage-- > 0)
1195 if (PGM_PAGE_GET_STATE(&pRom->aPages[iPage].Shadow) != PGM_PAGE_STATE_ZERO)
1196 cDirty++;
1197 if (cDirty)
1198 {
1199 /* Free the dirty pages. */
1200 PGMMFREEPAGESREQ pReq;
1201 rc = GMMR3FreePagesPrepare(pVM, &pReq, cDirty, GMMACCOUNT_BASE);
1202 AssertRCReturn(rc, rc);
1203
1204 uint32_t iReqPage = 0;
1205 for (iPage = 0; iPage < cPages; iPage++)
1206 if (PGM_PAGE_GET_STATE(&pRom->aPages[iPage].Shadow) != PGM_PAGE_STATE_ZERO)
1207 {
1208 pReq->aPages[iReqPage].idPage = PGM_PAGE_GET_PAGEID(&pRom->aPages[iPage].Shadow);
1209 iReqPage++;
1210 }
1211
1212 rc = GMMR3FreePagesPerform(pVM, pReq);
1213 GMMR3FreePagesCleanup(pReq);
1214 AssertRCReturn(rc, rc);
1215
1216 /* setup the zero page. */
1217 for (iPage = 0; iPage < cPages; iPage++)
1218 if (PGM_PAGE_GET_STATE(&pRom->aPages[iPage].Shadow) != PGM_PAGE_STATE_ZERO)
1219 PGM_PAGE_INIT_ZERO_REAL(&pRom->aPages[iPage].Shadow, pVM, PGMPAGETYPE_ROM_SHADOW);
1220 }
1221 }
1222 else
1223 {
1224 /* clear all the pages. */
1225 pgmLock(pVM);
1226 for (uint32_t iPage = 0; iPage < cPages; iPage++)
1227 {
1228 const RTGCPHYS GCPhys = pRom->GCPhys + (iPage << PAGE_SHIFT);
1229 rc = pgmPhysPageMakeWritable(pVM, &pRom->aPages[iPage].Shadow, GCPhys);
1230 if (RT_FAILURE(rc))
1231 break;
1232
1233 void *pvDstPage;
1234 PPGMPAGEMAP pMapIgnored;
1235 rc = pgmPhysPageMap(pVM, &pRom->aPages[iPage].Shadow, GCPhys, &pMapIgnored, &pvDstPage);
1236 if (RT_FAILURE(rc))
1237 break;
1238 memset(pvDstPage, 0, PAGE_SIZE);
1239 }
1240 pgmUnlock(pVM);
1241 AssertRCReturn(rc, rc);
1242 }
1243 }
1244
1245#ifdef VBOX_STRICT
1246 /*
1247 * Verify that the virgin page is unchanged if possible.
1248 */
1249 if (pRom->pvOriginal)
1250 {
1251 uint8_t const *pbSrcPage = (uint8_t const *)pRom->pvOriginal;
1252 for (uint32_t iPage = 0; iPage < cPages; iPage++, pbSrcPage += PAGE_SIZE)
1253 {
1254 const RTGCPHYS GCPhys = pRom->GCPhys + (iPage << PAGE_SHIFT);
1255 PPGMPAGEMAP pMapIgnored;
1256 void *pvDstPage;
1257 int rc = pgmPhysPageMap(pVM, &pRom->aPages[iPage].Virgin, GCPhys, &pMapIgnored, &pvDstPage);
1258 if (RT_FAILURE(rc))
1259 break;
1260 if (memcmp(pvDstPage, pbSrcPage, PAGE_SIZE))
1261 LogRel(("pgmR3PhysRomReset: %RGp rom page changed (%s) - loaded saved state?\n",
1262 GCPhys, pRom->pszDesc));
1263 }
1264 }
1265#endif
1266 }
1267
1268 return VINF_SUCCESS;
1269}
1270
1271
1272/**
1273 * Change the shadowing of a range of ROM pages.
1274 *
1275 * This is intended for implementing chipset specific memory registers
1276 * and will not be very strict about the input. It will silently ignore
1277 * any pages that are not the part of a shadowed ROM.
1278 *
1279 * @returns VBox status code.
1280 * @param pVM Pointer to the shared VM structure.
1281 * @param GCPhys Where to start. Page aligned.
1282 * @param cb How much to change. Page aligned.
1283 * @param enmProt The new ROM protection.
1284 */
1285PGMR3DECL(int) PGMR3PhysRomProtect(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, PGMROMPROT enmProt)
1286{
1287 /*
1288 * Check input
1289 */
1290 if (!cb)
1291 return VINF_SUCCESS;
1292 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
1293 AssertReturn(!(cb & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
1294 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
1295 AssertReturn(GCPhysLast > GCPhys, VERR_INVALID_PARAMETER);
1296 AssertReturn(enmProt >= PGMROMPROT_INVALID && enmProt <= PGMROMPROT_END, VERR_INVALID_PARAMETER);
1297
1298 /*
1299 * Process the request.
1300 */
1301 bool fFlushedPool = false;
1302 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
1303 if ( GCPhys <= pRom->GCPhysLast
1304 && GCPhysLast >= pRom->GCPhys)
1305 {
1306 /*
1307 * Iterate the relevant pages and the ncessary make changes.
1308 */
1309 bool fChanges = false;
1310 uint32_t const cPages = pRom->GCPhysLast > GCPhysLast
1311 ? pRom->cb >> PAGE_SHIFT
1312 : (GCPhysLast - pRom->GCPhys) >> PAGE_SHIFT;
1313 for (uint32_t iPage = (GCPhys - pRom->GCPhys) >> PAGE_SHIFT;
1314 iPage < cPages;
1315 iPage++)
1316 {
1317 PPGMROMPAGE pRomPage = &pRom->aPages[iPage];
1318 if (PGMROMPROT_IS_ROM(pRomPage->enmProt) != PGMROMPROT_IS_ROM(enmProt))
1319 {
1320 fChanges = true;
1321
1322 /* flush the page pool first so we don't leave any usage references dangling. */
1323 if (!fFlushedPool)
1324 {
1325 pgmPoolFlushAll(pVM);
1326 fFlushedPool = true;
1327 }
1328
1329 PPGMPAGE pOld = PGMROMPROT_IS_ROM(pRomPage->enmProt) ? &pRomPage->Virgin : &pRomPage->Shadow;
1330 PPGMPAGE pNew = PGMROMPROT_IS_ROM(pRomPage->enmProt) ? &pRomPage->Shadow : &pRomPage->Virgin;
1331 PPGMPAGE pRamPage = pgmPhysGetPage(&pVM->pgm.s, pRom->GCPhys + (iPage << PAGE_SHIFT));
1332
1333 *pOld = *pRamPage;
1334 *pRamPage = *pNew;
1335 /** @todo preserve the volatile flags (handlers) when these have been moved out of HCPhys! */
1336 }
1337 }
1338
1339 /*
1340 * Reset the access handler if we made changes, no need
1341 * to optimize this.
1342 */
1343 if (fChanges)
1344 {
1345 int rc = PGMHandlerPhysicalReset(pVM, pRom->GCPhys);
1346 AssertRCReturn(rc, rc);
1347 }
1348
1349 /* Advance - cb isn't updated. */
1350 GCPhys = pRom->GCPhys + (cPages << PAGE_SHIFT);
1351 }
1352
1353 return VINF_SUCCESS;
1354}
1355
1356
1357/**
1358 * Interface that the MMR3RamRegister(), MMR3RomRegister() and MMIO handler
1359 * registration APIs calls to inform PGM about memory registrations.
1360 *
1361 * It registers the physical memory range with PGM. MM is responsible
1362 * for the toplevel things - allocation and locking - while PGM is taking
1363 * care of all the details and implements the physical address space virtualization.
1364 *
1365 * @returns VBox status.
1366 * @param pVM The VM handle.
1367 * @param pvRam HC virtual address of the RAM range. (page aligned)
1368 * @param GCPhys GC physical address of the RAM range. (page aligned)
1369 * @param cb Size of the RAM range. (page aligned)
1370 * @param fFlags Flags, MM_RAM_*.
1371 * @param paPages Pointer an array of physical page descriptors.
1372 * @param pszDesc Description string.
1373 */
1374PGMR3DECL(int) PGMR3PhysRegister(PVM pVM, void *pvRam, RTGCPHYS GCPhys, size_t cb, unsigned fFlags, const SUPPAGE *paPages, const char *pszDesc)
1375{
1376 /*
1377 * Validate input.
1378 * (Not so important because callers are only MMR3PhysRegister()
1379 * and PGMR3HandlerPhysicalRegisterEx(), but anyway...)
1380 */
1381 Log(("PGMR3PhysRegister %08X %x bytes flags %x %s\n", GCPhys, cb, fFlags, pszDesc));
1382
1383 Assert((fFlags & (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_DYNAMIC_ALLOC)) || paPages);
1384 /*Assert(!(fFlags & MM_RAM_FLAGS_RESERVED) || !paPages);*/
1385 Assert((fFlags == (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO)) || (fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC) || pvRam);
1386 /*Assert(!(fFlags & MM_RAM_FLAGS_RESERVED) || !pvRam);*/
1387 Assert(!(fFlags & ~0xfff));
1388 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb && cb);
1389 Assert(RT_ALIGN_P(pvRam, PAGE_SIZE) == pvRam);
1390 Assert(!(fFlags & ~(MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2 | MM_RAM_FLAGS_DYNAMIC_ALLOC)));
1391 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
1392 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
1393 if (GCPhysLast < GCPhys)
1394 {
1395 AssertMsgFailed(("The range wraps! GCPhys=%VGp cb=%#x\n", GCPhys, cb));
1396 return VERR_INVALID_PARAMETER;
1397 }
1398
1399 /*
1400 * Find range location and check for conflicts.
1401 */
1402 PPGMRAMRANGE pPrev = NULL;
1403 PPGMRAMRANGE pCur = pVM->pgm.s.pRamRangesR3;
1404 while (pCur)
1405 {
1406 if (GCPhys <= pCur->GCPhysLast && GCPhysLast >= pCur->GCPhys)
1407 {
1408 AssertMsgFailed(("Conflict! This cannot happen!\n"));
1409 return VERR_PGM_RAM_CONFLICT;
1410 }
1411 if (GCPhysLast < pCur->GCPhys)
1412 break;
1413
1414 /* next */
1415 pPrev = pCur;
1416 pCur = pCur->pNextR3;
1417 }
1418
1419 /*
1420 * Allocate RAM range.
1421 * Small ranges are allocated from the heap, big ones have separate mappings.
1422 */
1423 size_t cbRam = RT_OFFSETOF(PGMRAMRANGE, aPages[cb >> PAGE_SHIFT]);
1424 PPGMRAMRANGE pNew;
1425 RTGCPTR GCPtrNew;
1426 int rc = VERR_NO_MEMORY;
1427 if (cbRam > PAGE_SIZE / 2)
1428 { /* large */
1429 cbRam = RT_ALIGN_Z(cbRam, PAGE_SIZE);
1430 rc = SUPPageAlloc(cbRam >> PAGE_SHIFT, (void **)&pNew);
1431 if (VBOX_SUCCESS(rc))
1432 {
1433 rc = MMR3HyperMapHCRam(pVM, pNew, cbRam, true, pszDesc, &GCPtrNew);
1434 if (VBOX_SUCCESS(rc))
1435 {
1436 Assert(MMHyperHC2GC(pVM, pNew) == GCPtrNew);
1437 rc = MMR3HyperReserve(pVM, PAGE_SIZE, "fence", NULL);
1438 }
1439 else
1440 {
1441 AssertMsgFailed(("MMR3HyperMapHCRam(,,%#x,,,) -> %Vrc\n", cbRam, rc));
1442 SUPPageFree(pNew, cbRam >> PAGE_SHIFT);
1443 }
1444 }
1445 else
1446 AssertMsgFailed(("SUPPageAlloc(%#x,,) -> %Vrc\n", cbRam >> PAGE_SHIFT, rc));
1447
1448 }
1449/** @todo Make VGA and VMMDev register their memory at init time before the hma size is fixated. */
1450 if (RT_FAILURE(rc))
1451 { /* small + fallback (vga) */
1452 rc = MMHyperAlloc(pVM, cbRam, 16, MM_TAG_PGM, (void **)&pNew);
1453 if (VBOX_SUCCESS(rc))
1454 GCPtrNew = MMHyperHC2GC(pVM, pNew);
1455 else
1456 AssertMsgFailed(("MMHyperAlloc(,%#x,,,) -> %Vrc\n", cbRam, cb));
1457 }
1458 if (VBOX_SUCCESS(rc))
1459 {
1460 /*
1461 * Initialize the range.
1462 */
1463 pNew->pvHC = pvRam;
1464 pNew->GCPhys = GCPhys;
1465 pNew->GCPhysLast = GCPhysLast;
1466 pNew->cb = cb;
1467 pNew->fFlags = fFlags;
1468 pNew->pavHCChunkHC = NULL;
1469 pNew->pavHCChunkGC = 0;
1470
1471 unsigned iPage = cb >> PAGE_SHIFT;
1472 if (paPages)
1473 {
1474 while (iPage-- > 0)
1475 {
1476 PGM_PAGE_INIT(&pNew->aPages[iPage], paPages[iPage].Phys & X86_PTE_PAE_PG_MASK, NIL_GMM_PAGEID,
1477 fFlags & MM_RAM_FLAGS_MMIO2 ? PGMPAGETYPE_MMIO2 : PGMPAGETYPE_RAM,
1478 PGM_PAGE_STATE_ALLOCATED);
1479 pNew->aPages[iPage].HCPhys |= fFlags; /** @todo PAGE FLAGS*/
1480 }
1481 }
1482 else if (fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)
1483 {
1484 /* Allocate memory for chunk to HC ptr lookup array. */
1485 rc = MMHyperAlloc(pVM, (cb >> PGM_DYNAMIC_CHUNK_SHIFT) * sizeof(void *), 16, MM_TAG_PGM, (void **)&pNew->pavHCChunkHC);
1486 AssertMsgReturn(rc == VINF_SUCCESS, ("MMHyperAlloc(,%#x,,,) -> %Vrc\n", cbRam, cb), rc);
1487
1488 pNew->pavHCChunkGC = MMHyperHC2GC(pVM, pNew->pavHCChunkHC);
1489 Assert(pNew->pavHCChunkGC);
1490
1491 /* Physical memory will be allocated on demand. */
1492 while (iPage-- > 0)
1493 {
1494 PGM_PAGE_INIT(&pNew->aPages[iPage], 0, NIL_GMM_PAGEID, PGMPAGETYPE_RAM, PGM_PAGE_STATE_ZERO);
1495 pNew->aPages[iPage].HCPhys = fFlags; /** @todo PAGE FLAGS */
1496 }
1497 }
1498 else
1499 {
1500 Assert(fFlags == (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO));
1501 RTHCPHYS HCPhysDummyPage = MMR3PageDummyHCPhys(pVM);
1502 while (iPage-- > 0)
1503 {
1504 PGM_PAGE_INIT(&pNew->aPages[iPage], HCPhysDummyPage, NIL_GMM_PAGEID, PGMPAGETYPE_MMIO, PGM_PAGE_STATE_ZERO);
1505 pNew->aPages[iPage].HCPhys |= fFlags; /** @todo PAGE FLAGS*/
1506 }
1507 }
1508
1509 /*
1510 * Insert the new RAM range.
1511 */
1512 pgmLock(pVM);
1513 pNew->pNextR3 = pCur;
1514 pNew->pNextR0 = pCur ? MMHyperCCToR0(pVM, pCur) : NIL_RTR0PTR;
1515 pNew->pNextGC = pCur ? MMHyperCCToGC(pVM, pCur) : NIL_RTGCPTR;
1516 if (pPrev)
1517 {
1518 pPrev->pNextR3 = pNew;
1519 pPrev->pNextR0 = MMHyperCCToR0(pVM, pNew);
1520 pPrev->pNextGC = GCPtrNew;
1521 }
1522 else
1523 {
1524 pVM->pgm.s.pRamRangesR3 = pNew;
1525 pVM->pgm.s.pRamRangesR0 = MMHyperCCToR0(pVM, pNew);
1526 pVM->pgm.s.pRamRangesGC = GCPtrNew;
1527 }
1528 pgmUnlock(pVM);
1529 }
1530 return rc;
1531}
1532
1533#ifndef VBOX_WITH_NEW_PHYS_CODE
1534
1535/**
1536 * Register a chunk of a the physical memory range with PGM. MM is responsible
1537 * for the toplevel things - allocation and locking - while PGM is taking
1538 * care of all the details and implements the physical address space virtualization.
1539 *
1540 *
1541 * @returns VBox status.
1542 * @param pVM The VM handle.
1543 * @param pvRam HC virtual address of the RAM range. (page aligned)
1544 * @param GCPhys GC physical address of the RAM range. (page aligned)
1545 * @param cb Size of the RAM range. (page aligned)
1546 * @param fFlags Flags, MM_RAM_*.
1547 * @param paPages Pointer an array of physical page descriptors.
1548 * @param pszDesc Description string.
1549 */
1550PGMR3DECL(int) PGMR3PhysRegisterChunk(PVM pVM, void *pvRam, RTGCPHYS GCPhys, size_t cb, unsigned fFlags, const SUPPAGE *paPages, const char *pszDesc)
1551{
1552 NOREF(pszDesc);
1553
1554 /*
1555 * Validate input.
1556 * (Not so important because callers are only MMR3PhysRegister()
1557 * and PGMR3HandlerPhysicalRegisterEx(), but anyway...)
1558 */
1559 Log(("PGMR3PhysRegisterChunk %08X %x bytes flags %x %s\n", GCPhys, cb, fFlags, pszDesc));
1560
1561 Assert(paPages);
1562 Assert(pvRam);
1563 Assert(!(fFlags & ~0xfff));
1564 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb && cb);
1565 Assert(RT_ALIGN_P(pvRam, PAGE_SIZE) == pvRam);
1566 Assert(!(fFlags & ~(MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2 | MM_RAM_FLAGS_DYNAMIC_ALLOC)));
1567 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
1568 Assert(VM_IS_EMT(pVM));
1569 Assert(!(GCPhys & PGM_DYNAMIC_CHUNK_OFFSET_MASK));
1570 Assert(cb == PGM_DYNAMIC_CHUNK_SIZE);
1571
1572 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
1573 if (GCPhysLast < GCPhys)
1574 {
1575 AssertMsgFailed(("The range wraps! GCPhys=%VGp cb=%#x\n", GCPhys, cb));
1576 return VERR_INVALID_PARAMETER;
1577 }
1578
1579 /*
1580 * Find existing range location.
1581 */
1582 PPGMRAMRANGE pRam = CTXALLSUFF(pVM->pgm.s.pRamRanges);
1583 while (pRam)
1584 {
1585 RTGCPHYS off = GCPhys - pRam->GCPhys;
1586 if ( off < pRam->cb
1587 && (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC))
1588 break;
1589
1590 pRam = CTXALLSUFF(pRam->pNext);
1591 }
1592 AssertReturn(pRam, VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS);
1593
1594 unsigned off = (GCPhys - pRam->GCPhys) >> PAGE_SHIFT;
1595 unsigned iPage = cb >> PAGE_SHIFT;
1596 if (paPages)
1597 {
1598 while (iPage-- > 0)
1599 pRam->aPages[off + iPage].HCPhys = (paPages[iPage].Phys & X86_PTE_PAE_PG_MASK) | fFlags; /** @todo PAGE FLAGS */
1600 }
1601 off >>= (PGM_DYNAMIC_CHUNK_SHIFT - PAGE_SHIFT);
1602 pRam->pavHCChunkHC[off] = pvRam;
1603
1604 /* Notify the recompiler. */
1605 REMR3NotifyPhysRamChunkRegister(pVM, GCPhys, PGM_DYNAMIC_CHUNK_SIZE, (RTHCUINTPTR)pvRam, fFlags);
1606
1607 return VINF_SUCCESS;
1608}
1609
1610
1611/**
1612 * Allocate missing physical pages for an existing guest RAM range.
1613 *
1614 * @returns VBox status.
1615 * @param pVM The VM handle.
1616 * @param GCPhys GC physical address of the RAM range. (page aligned)
1617 */
1618PGMR3DECL(int) PGM3PhysGrowRange(PVM pVM, PCRTGCPHYS pGCPhys)
1619{
1620 RTGCPHYS GCPhys = *pGCPhys;
1621
1622 /*
1623 * Walk range list.
1624 */
1625 pgmLock(pVM);
1626
1627 PPGMRAMRANGE pRam = CTXALLSUFF(pVM->pgm.s.pRamRanges);
1628 while (pRam)
1629 {
1630 RTGCPHYS off = GCPhys - pRam->GCPhys;
1631 if ( off < pRam->cb
1632 && (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC))
1633 {
1634 bool fRangeExists = false;
1635 unsigned off = (GCPhys - pRam->GCPhys) >> PGM_DYNAMIC_CHUNK_SHIFT;
1636
1637 /** @note A request made from another thread may end up in EMT after somebody else has already allocated the range. */
1638 if (pRam->pavHCChunkHC[off])
1639 fRangeExists = true;
1640
1641 pgmUnlock(pVM);
1642 if (fRangeExists)
1643 return VINF_SUCCESS;
1644 return pgmr3PhysGrowRange(pVM, GCPhys);
1645 }
1646
1647 pRam = CTXALLSUFF(pRam->pNext);
1648 }
1649 pgmUnlock(pVM);
1650 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
1651}
1652
1653
1654/**
1655 * Allocate missing physical pages for an existing guest RAM range.
1656 *
1657 * @returns VBox status.
1658 * @param pVM The VM handle.
1659 * @param pRamRange RAM range
1660 * @param GCPhys GC physical address of the RAM range. (page aligned)
1661 */
1662int pgmr3PhysGrowRange(PVM pVM, RTGCPHYS GCPhys)
1663{
1664 void *pvRam;
1665 int rc;
1666
1667 /* We must execute this function in the EMT thread, otherwise we'll run into problems. */
1668 if (!VM_IS_EMT(pVM))
1669 {
1670 PVMREQ pReq;
1671 const RTGCPHYS GCPhysParam = GCPhys;
1672
1673 AssertMsg(!PDMCritSectIsOwner(&pVM->pgm.s.CritSect), ("We own the PGM lock -> deadlock danger!!\n"));
1674
1675 rc = VMR3ReqCall(pVM, &pReq, RT_INDEFINITE_WAIT, (PFNRT)PGM3PhysGrowRange, 2, pVM, &GCPhysParam);
1676 if (VBOX_SUCCESS(rc))
1677 {
1678 rc = pReq->iStatus;
1679 VMR3ReqFree(pReq);
1680 }
1681 return rc;
1682 }
1683
1684 /* Round down to chunk boundary */
1685 GCPhys = GCPhys & PGM_DYNAMIC_CHUNK_BASE_MASK;
1686
1687 STAM_COUNTER_INC(&pVM->pgm.s.StatDynRamGrow);
1688 STAM_COUNTER_ADD(&pVM->pgm.s.StatDynRamTotal, PGM_DYNAMIC_CHUNK_SIZE/(1024*1024));
1689
1690 Log(("pgmr3PhysGrowRange: allocate chunk of size 0x%X at %VGp\n", PGM_DYNAMIC_CHUNK_SIZE, GCPhys));
1691
1692 unsigned cPages = PGM_DYNAMIC_CHUNK_SIZE >> PAGE_SHIFT;
1693
1694 for (;;)
1695 {
1696 rc = SUPPageAlloc(cPages, &pvRam);
1697 if (VBOX_SUCCESS(rc))
1698 {
1699
1700 rc = MMR3PhysRegisterEx(pVM, pvRam, GCPhys, PGM_DYNAMIC_CHUNK_SIZE, 0, MM_PHYS_TYPE_DYNALLOC_CHUNK, "Main Memory");
1701 if (VBOX_SUCCESS(rc))
1702 return rc;
1703
1704 SUPPageFree(pvRam, cPages);
1705 }
1706
1707 VMSTATE enmVMState = VMR3GetState(pVM);
1708 if (enmVMState != VMSTATE_RUNNING)
1709 {
1710 AssertMsgFailed(("Out of memory while trying to allocate a guest RAM chunk at %VGp!\n", GCPhys));
1711 LogRel(("PGM: Out of memory while trying to allocate a guest RAM chunk at %VGp (VMstate=%s)!\n", GCPhys, VMR3GetStateName(enmVMState)));
1712 return rc;
1713 }
1714
1715 LogRel(("pgmr3PhysGrowRange: out of memory. pause until the user resumes execution.\n"));
1716
1717 /* Pause first, then inform Main. */
1718 rc = VMR3SuspendNoSave(pVM);
1719 AssertRC(rc);
1720
1721 VMSetRuntimeError(pVM, false, "HostMemoryLow", "Unable to allocate and lock memory. The virtual machine will be paused. Please close applications to free up memory or close the VM.");
1722
1723 /* Wait for resume event; will only return in that case. If the VM is stopped, the EMT thread will be destroyed. */
1724 rc = VMR3WaitForResume(pVM);
1725
1726 /* Retry */
1727 LogRel(("pgmr3PhysGrowRange: VM execution resumed -> retry.\n"));
1728 }
1729}
1730
1731#endif /* !VBOX_WITH_NEW_PHYS_CODE */
1732
1733
1734/**
1735 * Interface MMR3RomRegister() and MMR3PhysReserve calls to update the
1736 * flags of existing RAM ranges.
1737 *
1738 * @returns VBox status.
1739 * @param pVM The VM handle.
1740 * @param GCPhys GC physical address of the RAM range. (page aligned)
1741 * @param cb Size of the RAM range. (page aligned)
1742 * @param fFlags The Or flags, MM_RAM_* \#defines.
1743 * @param fMask The and mask for the flags.
1744 */
1745PGMR3DECL(int) PGMR3PhysSetFlags(PVM pVM, RTGCPHYS GCPhys, size_t cb, unsigned fFlags, unsigned fMask)
1746{
1747 Log(("PGMR3PhysSetFlags %08X %x %x %x\n", GCPhys, cb, fFlags, fMask));
1748
1749 /*
1750 * Validate input.
1751 * (Not so important because caller is always MMR3RomRegister() and MMR3PhysReserve(), but anyway...)
1752 */
1753 Assert(!(fFlags & ~(MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2)));
1754 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb && cb);
1755 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
1756 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
1757 AssertReturn(GCPhysLast > GCPhys, VERR_INVALID_PARAMETER);
1758
1759 /*
1760 * Lookup the range.
1761 */
1762 PPGMRAMRANGE pRam = CTXALLSUFF(pVM->pgm.s.pRamRanges);
1763 while (pRam && GCPhys > pRam->GCPhysLast)
1764 pRam = CTXALLSUFF(pRam->pNext);
1765 if ( !pRam
1766 || GCPhys > pRam->GCPhysLast
1767 || GCPhysLast < pRam->GCPhys)
1768 {
1769 AssertMsgFailed(("No RAM range for %VGp-%VGp\n", GCPhys, GCPhysLast));
1770 return VERR_INVALID_PARAMETER;
1771 }
1772
1773 /*
1774 * Update the requested flags.
1775 */
1776 RTHCPHYS fFullMask = ~(RTHCPHYS)(MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2)
1777 | fMask;
1778 unsigned iPageEnd = (GCPhysLast - pRam->GCPhys + 1) >> PAGE_SHIFT;
1779 unsigned iPage = (GCPhys - pRam->GCPhys) >> PAGE_SHIFT;
1780 for ( ; iPage < iPageEnd; iPage++)
1781 pRam->aPages[iPage].HCPhys = (pRam->aPages[iPage].HCPhys & fFullMask) | fFlags; /** @todo PAGE FLAGS */
1782
1783 return VINF_SUCCESS;
1784}
1785
1786
1787/**
1788 * Sets the Address Gate 20 state.
1789 *
1790 * @param pVM VM handle.
1791 * @param fEnable True if the gate should be enabled.
1792 * False if the gate should be disabled.
1793 */
1794PGMDECL(void) PGMR3PhysSetA20(PVM pVM, bool fEnable)
1795{
1796 LogFlow(("PGMR3PhysSetA20 %d (was %d)\n", fEnable, pVM->pgm.s.fA20Enabled));
1797 if (pVM->pgm.s.fA20Enabled != (RTUINT)fEnable)
1798 {
1799 pVM->pgm.s.fA20Enabled = fEnable;
1800 pVM->pgm.s.GCPhysA20Mask = ~(RTGCPHYS)(!fEnable << 20);
1801 REMR3A20Set(pVM, fEnable);
1802 }
1803}
1804
1805
1806/**
1807 * Tree enumeration callback for dealing with age rollover.
1808 * It will perform a simple compression of the current age.
1809 */
1810static DECLCALLBACK(int) pgmR3PhysChunkAgeingRolloverCallback(PAVLU32NODECORE pNode, void *pvUser)
1811{
1812 /* Age compression - ASSUMES iNow == 4. */
1813 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)pNode;
1814 if (pChunk->iAge >= UINT32_C(0xffffff00))
1815 pChunk->iAge = 3;
1816 else if (pChunk->iAge >= UINT32_C(0xfffff000))
1817 pChunk->iAge = 2;
1818 else if (pChunk->iAge)
1819 pChunk->iAge = 1;
1820 else /* iAge = 0 */
1821 pChunk->iAge = 4;
1822
1823 /* reinsert */
1824 PVM pVM = (PVM)pvUser;
1825 RTAvllU32Remove(&pVM->pgm.s.ChunkR3Map.pAgeTree, pChunk->AgeCore.Key);
1826 pChunk->AgeCore.Key = pChunk->iAge;
1827 RTAvllU32Insert(&pVM->pgm.s.ChunkR3Map.pAgeTree, &pChunk->AgeCore);
1828 return 0;
1829}
1830
1831
1832/**
1833 * Tree enumeration callback that updates the chunks that have
1834 * been used since the last
1835 */
1836static DECLCALLBACK(int) pgmR3PhysChunkAgeingCallback(PAVLU32NODECORE pNode, void *pvUser)
1837{
1838 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)pNode;
1839 if (!pChunk->iAge)
1840 {
1841 PVM pVM = (PVM)pvUser;
1842 RTAvllU32Remove(&pVM->pgm.s.ChunkR3Map.pAgeTree, pChunk->AgeCore.Key);
1843 pChunk->AgeCore.Key = pChunk->iAge = pVM->pgm.s.ChunkR3Map.iNow;
1844 RTAvllU32Insert(&pVM->pgm.s.ChunkR3Map.pAgeTree, &pChunk->AgeCore);
1845 }
1846
1847 return 0;
1848}
1849
1850
1851/**
1852 * Performs ageing of the ring-3 chunk mappings.
1853 *
1854 * @param pVM The VM handle.
1855 */
1856PGMR3DECL(void) PGMR3PhysChunkAgeing(PVM pVM)
1857{
1858 pVM->pgm.s.ChunkR3Map.AgeingCountdown = RT_MIN(pVM->pgm.s.ChunkR3Map.cMax / 4, 1024);
1859 pVM->pgm.s.ChunkR3Map.iNow++;
1860 if (pVM->pgm.s.ChunkR3Map.iNow == 0)
1861 {
1862 pVM->pgm.s.ChunkR3Map.iNow = 4;
1863 RTAvlU32DoWithAll(&pVM->pgm.s.ChunkR3Map.pTree, true /*fFromLeft*/, pgmR3PhysChunkAgeingRolloverCallback, pVM);
1864 }
1865 else
1866 RTAvlU32DoWithAll(&pVM->pgm.s.ChunkR3Map.pTree, true /*fFromLeft*/, pgmR3PhysChunkAgeingCallback, pVM);
1867}
1868
1869
1870/**
1871 * The structure passed in the pvUser argument of pgmR3PhysChunkUnmapCandidateCallback().
1872 */
1873typedef struct PGMR3PHYSCHUNKUNMAPCB
1874{
1875 PVM pVM; /**< The VM handle. */
1876 PPGMCHUNKR3MAP pChunk; /**< The chunk to unmap. */
1877} PGMR3PHYSCHUNKUNMAPCB, *PPGMR3PHYSCHUNKUNMAPCB;
1878
1879
1880/**
1881 * Callback used to find the mapping that's been unused for
1882 * the longest time.
1883 */
1884static DECLCALLBACK(int) pgmR3PhysChunkUnmapCandidateCallback(PAVLLU32NODECORE pNode, void *pvUser)
1885{
1886 do
1887 {
1888 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)((uint8_t *)pNode - RT_OFFSETOF(PGMCHUNKR3MAP, AgeCore));
1889 if ( pChunk->iAge
1890 && !pChunk->cRefs)
1891 {
1892 /*
1893 * Check that it's not in any of the TLBs.
1894 */
1895 PVM pVM = ((PPGMR3PHYSCHUNKUNMAPCB)pvUser)->pVM;
1896 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.ChunkR3Map.Tlb.aEntries); i++)
1897 if (pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].pChunk == pChunk)
1898 {
1899 pChunk = NULL;
1900 break;
1901 }
1902 if (pChunk)
1903 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.PhysTlbHC.aEntries); i++)
1904 if (pVM->pgm.s.PhysTlbHC.aEntries[i].pMap == pChunk)
1905 {
1906 pChunk = NULL;
1907 break;
1908 }
1909 if (pChunk)
1910 {
1911 ((PPGMR3PHYSCHUNKUNMAPCB)pvUser)->pChunk = pChunk;
1912 return 1; /* done */
1913 }
1914 }
1915
1916 /* next with the same age - this version of the AVL API doesn't enumerate the list, so we have to do it. */
1917 pNode = pNode->pList;
1918 } while (pNode);
1919 return 0;
1920}
1921
1922
1923/**
1924 * Finds a good candidate for unmapping when the ring-3 mapping cache is full.
1925 *
1926 * The candidate will not be part of any TLBs, so no need to flush
1927 * anything afterwards.
1928 *
1929 * @returns Chunk id.
1930 * @param pVM The VM handle.
1931 */
1932static int32_t pgmR3PhysChunkFindUnmapCandidate(PVM pVM)
1933{
1934 /*
1935 * Do tree ageing first?
1936 */
1937 if (pVM->pgm.s.ChunkR3Map.AgeingCountdown-- == 0)
1938 PGMR3PhysChunkAgeing(pVM);
1939
1940 /*
1941 * Enumerate the age tree starting with the left most node.
1942 */
1943 PGMR3PHYSCHUNKUNMAPCB Args;
1944 Args.pVM = pVM;
1945 Args.pChunk = NULL;
1946 if (RTAvllU32DoWithAll(&pVM->pgm.s.ChunkR3Map.pAgeTree, true /*fFromLeft*/, pgmR3PhysChunkUnmapCandidateCallback, pVM))
1947 return Args.pChunk->Core.Key;
1948 return INT32_MAX;
1949}
1950
1951
1952/**
1953 * Maps the given chunk into the ring-3 mapping cache.
1954 *
1955 * This will call ring-0.
1956 *
1957 * @returns VBox status code.
1958 * @param pVM The VM handle.
1959 * @param idChunk The chunk in question.
1960 * @param ppChunk Where to store the chunk tracking structure.
1961 *
1962 * @remarks Called from within the PGM critical section.
1963 */
1964int pgmR3PhysChunkMap(PVM pVM, uint32_t idChunk, PPPGMCHUNKR3MAP ppChunk)
1965{
1966 int rc;
1967 /*
1968 * Allocate a new tracking structure first.
1969 */
1970#if 0 /* for later when we've got a separate mapping method for ring-0. */
1971 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)MMR3HeapAlloc(pVM, MM_TAG_PGM_CHUNK_MAPPING, sizeof(*pChunk));
1972 AssertReturn(pChunk, VERR_NO_MEMORY);
1973#else
1974 PPGMCHUNKR3MAP pChunk;
1975 rc = MMHyperAlloc(pVM, sizeof(*pChunk), 0, MM_TAG_PGM_CHUNK_MAPPING, (void **)&pChunk);
1976 AssertRCReturn(rc, rc);
1977#endif
1978 pChunk->Core.Key = idChunk;
1979 pChunk->AgeCore.Key = pVM->pgm.s.ChunkR3Map.iNow;
1980 pChunk->iAge = 0;
1981 pChunk->cRefs = 0;
1982 pChunk->cPermRefs = 0;
1983 pChunk->pv = NULL;
1984
1985 /*
1986 * Request the ring-0 part to map the chunk in question and if
1987 * necessary unmap another one to make space in the mapping cache.
1988 */
1989 GMMMAPUNMAPCHUNKREQ Req;
1990 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
1991 Req.Hdr.cbReq = sizeof(Req);
1992 Req.pvR3 = NULL;
1993 Req.idChunkMap = idChunk;
1994 Req.idChunkUnmap = INT32_MAX;
1995 if (pVM->pgm.s.ChunkR3Map.c >= pVM->pgm.s.ChunkR3Map.cMax)
1996 Req.idChunkUnmap = pgmR3PhysChunkFindUnmapCandidate(pVM);
1997 rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_GMM_MAP_UNMAP_CHUNK, 0, &Req.Hdr);
1998 if (VBOX_SUCCESS(rc))
1999 {
2000 /*
2001 * Update the tree.
2002 */
2003 /* insert the new one. */
2004 AssertPtr(Req.pvR3);
2005 pChunk->pv = Req.pvR3;
2006 bool fRc = RTAvlU32Insert(&pVM->pgm.s.ChunkR3Map.pTree, &pChunk->Core);
2007 AssertRelease(fRc);
2008 pVM->pgm.s.ChunkR3Map.c++;
2009
2010 fRc = RTAvllU32Insert(&pVM->pgm.s.ChunkR3Map.pAgeTree, &pChunk->AgeCore);
2011 AssertRelease(fRc);
2012
2013 /* remove the unmapped one. */
2014 if (Req.idChunkUnmap != INT32_MAX)
2015 {
2016 PPGMCHUNKR3MAP pUnmappedChunk = (PPGMCHUNKR3MAP)RTAvlU32Remove(&pVM->pgm.s.ChunkR3Map.pTree, Req.idChunkUnmap);
2017 AssertRelease(pUnmappedChunk);
2018 pUnmappedChunk->pv = NULL;
2019 pUnmappedChunk->Core.Key = UINT32_MAX;
2020#if 0 /* for later when we've got a separate mapping method for ring-0. */
2021 MMR3HeapFree(pUnmappedChunk);
2022#else
2023 MMHyperFree(pVM, pUnmappedChunk);
2024#endif
2025 pVM->pgm.s.ChunkR3Map.c--;
2026 }
2027 }
2028 else
2029 {
2030 AssertRC(rc);
2031#if 0 /* for later when we've got a separate mapping method for ring-0. */
2032 MMR3HeapFree(pChunk);
2033#else
2034 MMHyperFree(pVM, pChunk);
2035#endif
2036 pChunk = NULL;
2037 }
2038
2039 *ppChunk = pChunk;
2040 return rc;
2041}
2042
2043
2044/**
2045 * For VMMCALLHOST_PGM_MAP_CHUNK, considered internal.
2046 *
2047 * @returns see pgmR3PhysChunkMap.
2048 * @param pVM The VM handle.
2049 * @param idChunk The chunk to map.
2050 */
2051PDMR3DECL(int) PGMR3PhysChunkMap(PVM pVM, uint32_t idChunk)
2052{
2053 PPGMCHUNKR3MAP pChunk;
2054 return pgmR3PhysChunkMap(pVM, idChunk, &pChunk);
2055}
2056
2057
2058/**
2059 * Invalidates the TLB for the ring-3 mapping cache.
2060 *
2061 * @param pVM The VM handle.
2062 */
2063PGMR3DECL(void) PGMR3PhysChunkInvalidateTLB(PVM pVM)
2064{
2065 pgmLock(pVM);
2066 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.ChunkR3Map.Tlb.aEntries); i++)
2067 {
2068 pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].idChunk = NIL_GMM_CHUNKID;
2069 pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].pChunk = NULL;
2070 }
2071 pgmUnlock(pVM);
2072}
2073
2074
2075/**
2076 * Response to VM_FF_PGM_NEED_HANDY_PAGES and VMMCALLHOST_PGM_ALLOCATE_HANDY_PAGES.
2077 *
2078 * @returns The following VBox status codes.
2079 * @retval VINF_SUCCESS on success. FF cleared.
2080 * @retval VINF_EM_NO_MEMORY if we're out of memory. The FF is not cleared in this case.
2081 *
2082 * @param pVM The VM handle.
2083 */
2084PDMR3DECL(int) PGMR3PhysAllocateHandyPages(PVM pVM)
2085{
2086 pgmLock(pVM);
2087 int rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES, 0, NULL);
2088 if (rc == VERR_GMM_SEED_ME)
2089 {
2090 void *pvChunk;
2091 rc = SUPPageAlloc(GMM_CHUNK_SIZE >> PAGE_SHIFT, &pvChunk);
2092 if (VBOX_SUCCESS(rc))
2093 rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_GMM_SEED_CHUNK, (uintptr_t)pvChunk, NULL);
2094 if (VBOX_FAILURE(rc))
2095 {
2096 LogRel(("PGM: GMM Seeding failed, rc=%Vrc\n", rc));
2097 rc = VINF_EM_NO_MEMORY;
2098 }
2099 }
2100 pgmUnlock(pVM);
2101 Assert(rc == VINF_SUCCESS || rc == VINF_EM_NO_MEMORY);
2102 return rc;
2103}
2104
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette