VirtualBox

source: vbox/trunk/src/VBox/VMM/PGMPhys.cpp@ 4968

Last change on this file since 4968 was 4811, checked in by vboxsync, 17 years ago

Split VMMR0Entry into VMMR0EntryInt, VMMR0EntryFast and VMMr0EntryEx. This will prevent the SUPCallVMMR0Ex path from causing harm and messing up the paths that has to be optimized.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 30.3 KB
Line 
1/* $Id: PGMPhys.cpp 4811 2007-09-14 17:53:56Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, Physical Memory Addressing.
4 */
5
6/*
7 * Copyright (C) 2006-2007 innotek GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License as published by the Free Software Foundation,
13 * in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
14 * distribution. VirtualBox OSE is distributed in the hope that it will
15 * be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_PGM
23#include <VBox/pgm.h>
24#include <VBox/cpum.h>
25#include <VBox/iom.h>
26#include <VBox/sup.h>
27#include <VBox/mm.h>
28#include <VBox/stam.h>
29#include <VBox/rem.h>
30#include <VBox/csam.h>
31#include "PGMInternal.h"
32#include <VBox/vm.h>
33#include <VBox/dbg.h>
34#include <VBox/param.h>
35#include <VBox/err.h>
36#include <iprt/assert.h>
37#include <iprt/alloc.h>
38#include <iprt/asm.h>
39#include <VBox/log.h>
40#include <iprt/thread.h>
41#include <iprt/string.h>
42
43
44
45/*
46 * PGMR3PhysReadByte/Word/Dword
47 * PGMR3PhysWriteByte/Word/Dword
48 */
49
50#define PGMPHYSFN_READNAME PGMR3PhysReadByte
51#define PGMPHYSFN_WRITENAME PGMR3PhysWriteByte
52#define PGMPHYS_DATASIZE 1
53#define PGMPHYS_DATATYPE uint8_t
54#include "PGMPhys.h"
55
56#define PGMPHYSFN_READNAME PGMR3PhysReadWord
57#define PGMPHYSFN_WRITENAME PGMR3PhysWriteWord
58#define PGMPHYS_DATASIZE 2
59#define PGMPHYS_DATATYPE uint16_t
60#include "PGMPhys.h"
61
62#define PGMPHYSFN_READNAME PGMR3PhysReadDword
63#define PGMPHYSFN_WRITENAME PGMR3PhysWriteDword
64#define PGMPHYS_DATASIZE 4
65#define PGMPHYS_DATATYPE uint32_t
66#include "PGMPhys.h"
67
68
69
70
71/**
72 * Interface that the MMR3RamRegister(), MMR3RomRegister() and MMIO handler
73 * registration APIs calls to inform PGM about memory registrations.
74 *
75 * It registers the physical memory range with PGM. MM is responsible
76 * for the toplevel things - allocation and locking - while PGM is taking
77 * care of all the details and implements the physical address space virtualization.
78 *
79 * @returns VBox status.
80 * @param pVM The VM handle.
81 * @param pvRam HC virtual address of the RAM range. (page aligned)
82 * @param GCPhys GC physical address of the RAM range. (page aligned)
83 * @param cb Size of the RAM range. (page aligned)
84 * @param fFlags Flags, MM_RAM_*.
85 * @param paPages Pointer an array of physical page descriptors.
86 * @param pszDesc Description string.
87 */
88PGMR3DECL(int) PGMR3PhysRegister(PVM pVM, void *pvRam, RTGCPHYS GCPhys, size_t cb, unsigned fFlags, const SUPPAGE *paPages, const char *pszDesc)
89{
90 /*
91 * Validate input.
92 * (Not so important because callers are only MMR3PhysRegister()
93 * and PGMR3HandlerPhysicalRegisterEx(), but anyway...)
94 */
95 Log(("PGMR3PhysRegister %08X %x bytes flags %x %s\n", GCPhys, cb, fFlags, pszDesc));
96
97 Assert((fFlags & (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_DYNAMIC_ALLOC)) || paPages);
98 /*Assert(!(fFlags & MM_RAM_FLAGS_RESERVED) || !paPages);*/
99 Assert((fFlags == (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO)) || (fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC) || pvRam);
100 /*Assert(!(fFlags & MM_RAM_FLAGS_RESERVED) || !pvRam);*/
101 Assert(!(fFlags & ~0xfff));
102 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb && cb);
103 Assert(RT_ALIGN_P(pvRam, PAGE_SIZE) == pvRam);
104 Assert(!(fFlags & ~(MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2 | MM_RAM_FLAGS_DYNAMIC_ALLOC)));
105 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
106 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
107 if (GCPhysLast < GCPhys)
108 {
109 AssertMsgFailed(("The range wraps! GCPhys=%VGp cb=%#x\n", GCPhys, cb));
110 return VERR_INVALID_PARAMETER;
111 }
112
113 /*
114 * Find range location and check for conflicts.
115 */
116 PPGMRAMRANGE pPrev = NULL;
117 PPGMRAMRANGE pCur = pVM->pgm.s.pRamRangesHC;
118 while (pCur)
119 {
120 if (GCPhys <= pCur->GCPhysLast && GCPhysLast >= pCur->GCPhys)
121 {
122 AssertMsgFailed(("Conflict! This cannot happen!\n"));
123 return VERR_PGM_RAM_CONFLICT;
124 }
125 if (GCPhysLast < pCur->GCPhys)
126 break;
127
128 /* next */
129 pPrev = pCur;
130 pCur = pCur->pNextHC;
131 }
132
133 /*
134 * Allocate RAM range.
135 * Small ranges are allocated from the heap, big ones have separate mappings.
136 */
137 size_t cbRam = RT_OFFSETOF(PGMRAMRANGE, aPages[cb >> PAGE_SHIFT]);
138 PPGMRAMRANGE pNew;
139 RTGCPTR GCPtrNew;
140 int rc;
141 if (cbRam > PAGE_SIZE / 2)
142 { /* large */
143 cbRam = RT_ALIGN_Z(cbRam, PAGE_SIZE);
144 rc = SUPPageAlloc(cbRam >> PAGE_SHIFT, (void **)&pNew);
145 if (VBOX_SUCCESS(rc))
146 {
147 rc = MMR3HyperMapHCRam(pVM, pNew, cbRam, true, pszDesc, &GCPtrNew);
148 if (VBOX_SUCCESS(rc))
149 {
150 Assert(MMHyperHC2GC(pVM, pNew) == GCPtrNew);
151 rc = MMR3HyperReserve(pVM, PAGE_SIZE, "fence", NULL);
152 }
153 else
154 {
155 AssertMsgFailed(("MMR3HyperMapHCRam(,,%#x,,,) -> %Vrc\n", cbRam, rc));
156 SUPPageFree(pNew, cbRam >> PAGE_SHIFT);
157 }
158 }
159 else
160 AssertMsgFailed(("SUPPageAlloc(%#x,,) -> %Vrc\n", cbRam >> PAGE_SHIFT, rc));
161 }
162 else
163 { /* small */
164 rc = MMHyperAlloc(pVM, cbRam, 16, MM_TAG_PGM, (void **)&pNew);
165 if (VBOX_SUCCESS(rc))
166 GCPtrNew = MMHyperHC2GC(pVM, pNew);
167 else
168 AssertMsgFailed(("MMHyperAlloc(,%#x,,,) -> %Vrc\n", cbRam, cb));
169 }
170 if (VBOX_SUCCESS(rc))
171 {
172 /*
173 * Initialize the range.
174 */
175 pNew->pvHC = pvRam;
176 pNew->GCPhys = GCPhys;
177 pNew->GCPhysLast = GCPhysLast;
178 pNew->cb = cb;
179 pNew->fFlags = fFlags;
180 pNew->pavHCChunkHC = NULL;
181 pNew->pavHCChunkGC = 0;
182
183 unsigned iPage = cb >> PAGE_SHIFT;
184 if (paPages)
185 {
186 while (iPage-- > 0)
187 {
188 pNew->aPages[iPage].HCPhys = (paPages[iPage].Phys & X86_PTE_PAE_PG_MASK) | fFlags; /** @todo PAGE FLAGS */
189 pNew->aPages[iPage].u2State = PGM_PAGE_STATE_ALLOCATED;
190 pNew->aPages[iPage].fWrittenTo = 0;
191 pNew->aPages[iPage].fSomethingElse = 0;
192 pNew->aPages[iPage].idPage = 0;
193 pNew->aPages[iPage].u32B = 0;
194 }
195 }
196 else if (fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)
197 {
198 /* Allocate memory for chunk to HC ptr lookup array. */
199 rc = MMHyperAlloc(pVM, (cb >> PGM_DYNAMIC_CHUNK_SHIFT) * sizeof(void *), 16, MM_TAG_PGM, (void **)&pNew->pavHCChunkHC);
200 AssertMsgReturn(rc == VINF_SUCCESS, ("MMHyperAlloc(,%#x,,,) -> %Vrc\n", cbRam, cb), rc);
201
202 pNew->pavHCChunkGC = MMHyperHC2GC(pVM, pNew->pavHCChunkHC);
203 Assert(pNew->pavHCChunkGC);
204
205 /* Physical memory will be allocated on demand. */
206 while (iPage-- > 0)
207 {
208 pNew->aPages[iPage].HCPhys = fFlags; /** @todo PAGE FLAGS */
209 pNew->aPages[iPage].u2State = PGM_PAGE_STATE_ZERO;
210 pNew->aPages[iPage].fWrittenTo = 0;
211 pNew->aPages[iPage].fSomethingElse = 0;
212 pNew->aPages[iPage].idPage = 0;
213 pNew->aPages[iPage].u32B = 0;
214 }
215 }
216 else
217 {
218 Assert(fFlags == (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO));
219 RTHCPHYS HCPhysDummyPage = (MMR3PageDummyHCPhys(pVM) & X86_PTE_PAE_PG_MASK) | fFlags; /** @todo PAGE FLAGS */
220 while (iPage-- > 0)
221 {
222 pNew->aPages[iPage].HCPhys = HCPhysDummyPage; /** @todo PAGE FLAGS */
223 pNew->aPages[iPage].u2State = PGM_PAGE_STATE_ZERO;
224 pNew->aPages[iPage].fWrittenTo = 0;
225 pNew->aPages[iPage].fSomethingElse = 0;
226 pNew->aPages[iPage].idPage = 0;
227 pNew->aPages[iPage].u32B = 0;
228 }
229 }
230
231 /*
232 * Insert the new RAM range.
233 */
234 pgmLock(pVM);
235 pNew->pNextHC = pCur;
236 pNew->pNextGC = pCur ? MMHyperHC2GC(pVM, pCur) : 0;
237 if (pPrev)
238 {
239 pPrev->pNextHC = pNew;
240 pPrev->pNextGC = GCPtrNew;
241 }
242 else
243 {
244 pVM->pgm.s.pRamRangesHC = pNew;
245 pVM->pgm.s.pRamRangesGC = GCPtrNew;
246 }
247 pgmUnlock(pVM);
248 }
249 return rc;
250}
251
252
253/**
254 * Register a chunk of a the physical memory range with PGM. MM is responsible
255 * for the toplevel things - allocation and locking - while PGM is taking
256 * care of all the details and implements the physical address space virtualization.
257 *
258 *
259 * @returns VBox status.
260 * @param pVM The VM handle.
261 * @param pvRam HC virtual address of the RAM range. (page aligned)
262 * @param GCPhys GC physical address of the RAM range. (page aligned)
263 * @param cb Size of the RAM range. (page aligned)
264 * @param fFlags Flags, MM_RAM_*.
265 * @param paPages Pointer an array of physical page descriptors.
266 * @param pszDesc Description string.
267 */
268PGMR3DECL(int) PGMR3PhysRegisterChunk(PVM pVM, void *pvRam, RTGCPHYS GCPhys, size_t cb, unsigned fFlags, const SUPPAGE *paPages, const char *pszDesc)
269{
270#ifdef PGM_DYNAMIC_RAM_ALLOC
271 NOREF(pszDesc);
272
273 /*
274 * Validate input.
275 * (Not so important because callers are only MMR3PhysRegister()
276 * and PGMR3HandlerPhysicalRegisterEx(), but anyway...)
277 */
278 Log(("PGMR3PhysRegisterChunk %08X %x bytes flags %x %s\n", GCPhys, cb, fFlags, pszDesc));
279
280 Assert(paPages);
281 Assert(pvRam);
282 Assert(!(fFlags & ~0xfff));
283 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb && cb);
284 Assert(RT_ALIGN_P(pvRam, PAGE_SIZE) == pvRam);
285 Assert(!(fFlags & ~(MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2 | MM_RAM_FLAGS_DYNAMIC_ALLOC)));
286 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
287 Assert(VM_IS_EMT(pVM));
288 Assert(!(GCPhys & PGM_DYNAMIC_CHUNK_OFFSET_MASK));
289 Assert(cb == PGM_DYNAMIC_CHUNK_SIZE);
290
291 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
292 if (GCPhysLast < GCPhys)
293 {
294 AssertMsgFailed(("The range wraps! GCPhys=%VGp cb=%#x\n", GCPhys, cb));
295 return VERR_INVALID_PARAMETER;
296 }
297
298 /*
299 * Find existing range location.
300 */
301 PPGMRAMRANGE pRam = CTXSUFF(pVM->pgm.s.pRamRanges);
302 while (pRam)
303 {
304 RTGCPHYS off = GCPhys - pRam->GCPhys;
305 if ( off < pRam->cb
306 && (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC))
307 break;
308
309 pRam = CTXSUFF(pRam->pNext);
310 }
311 AssertReturn(pRam, VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS);
312
313 unsigned off = (GCPhys - pRam->GCPhys) >> PAGE_SHIFT;
314 unsigned iPage = cb >> PAGE_SHIFT;
315 if (paPages)
316 {
317 while (iPage-- > 0)
318 pRam->aPages[off + iPage].HCPhys = (paPages[iPage].Phys & X86_PTE_PAE_PG_MASK) | fFlags; /** @todo PAGE FLAGS */
319 }
320 off >>= (PGM_DYNAMIC_CHUNK_SHIFT - PAGE_SHIFT);
321 pRam->pavHCChunkHC[off] = pvRam;
322
323 /* Notify the recompiler. */
324 REMR3NotifyPhysRamChunkRegister(pVM, GCPhys, PGM_DYNAMIC_CHUNK_SIZE, (RTHCUINTPTR)pvRam, fFlags);
325
326 return VINF_SUCCESS;
327#else /* !PGM_DYNAMIC_RAM_ALLOC */
328 AssertReleaseMsgFailed(("Shouldn't ever get here when PGM_DYNAMIC_RAM_ALLOC isn't defined!\n"));
329 return VERR_INTERNAL_ERROR;
330#endif /* !PGM_DYNAMIC_RAM_ALLOC */
331}
332
333
334/**
335 * Allocate missing physical pages for an existing guest RAM range.
336 *
337 * @returns VBox status.
338 * @param pVM The VM handle.
339 * @param GCPhys GC physical address of the RAM range. (page aligned)
340 */
341PGMR3DECL(int) PGM3PhysGrowRange(PVM pVM, RTGCPHYS GCPhys)
342{
343 /*
344 * Walk range list.
345 */
346 pgmLock(pVM);
347
348 PPGMRAMRANGE pRam = CTXSUFF(pVM->pgm.s.pRamRanges);
349 while (pRam)
350 {
351 RTGCPHYS off = GCPhys - pRam->GCPhys;
352 if ( off < pRam->cb
353 && (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC))
354 {
355 bool fRangeExists = false;
356 unsigned off = (GCPhys - pRam->GCPhys) >> PGM_DYNAMIC_CHUNK_SHIFT;
357
358 /** @note A request made from another thread may end up in EMT after somebody else has already allocated the range. */
359 if (pRam->pavHCChunkHC[off])
360 fRangeExists = true;
361
362 pgmUnlock(pVM);
363 if (fRangeExists)
364 return VINF_SUCCESS;
365 return pgmr3PhysGrowRange(pVM, GCPhys);
366 }
367
368 pRam = CTXSUFF(pRam->pNext);
369 }
370 pgmUnlock(pVM);
371 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
372}
373
374#ifndef NEW_PHYS_CODE
375
376/**
377 * Allocate missing physical pages for an existing guest RAM range.
378 *
379 * @returns VBox status.
380 * @param pVM The VM handle.
381 * @param pRamRange RAM range
382 * @param GCPhys GC physical address of the RAM range. (page aligned)
383 */
384int pgmr3PhysGrowRange(PVM pVM, RTGCPHYS GCPhys)
385{
386 void *pvRam;
387 int rc;
388
389 /* We must execute this function in the EMT thread, otherwise we'll run into problems. */
390 if (!VM_IS_EMT(pVM))
391 {
392 PVMREQ pReq;
393
394 AssertMsg(!PDMCritSectIsOwner(&pVM->pgm.s.CritSect), ("We own the PGM lock -> deadlock danger!!\n"));
395
396 rc = VMR3ReqCall(pVM, &pReq, RT_INDEFINITE_WAIT, (PFNRT)PGM3PhysGrowRange, 2, pVM, GCPhys);
397 if (VBOX_SUCCESS(rc))
398 {
399 rc = pReq->iStatus;
400 VMR3ReqFree(pReq);
401 }
402 return rc;
403 }
404
405 /* Round down to chunk boundary */
406 GCPhys = GCPhys & PGM_DYNAMIC_CHUNK_BASE_MASK;
407
408 STAM_COUNTER_INC(&pVM->pgm.s.StatDynRamGrow);
409 STAM_COUNTER_ADD(&pVM->pgm.s.StatDynRamTotal, PGM_DYNAMIC_CHUNK_SIZE/(1024*1024));
410
411 Log(("pgmr3PhysGrowRange: allocate chunk of size 0x%X at %VGp\n", PGM_DYNAMIC_CHUNK_SIZE, GCPhys));
412
413 unsigned cPages = PGM_DYNAMIC_CHUNK_SIZE >> PAGE_SHIFT;
414 rc = SUPPageAlloc(cPages, &pvRam);
415 if (VBOX_SUCCESS(rc))
416 {
417 VMSTATE enmVMState = VMR3GetState(pVM);
418
419 rc = MMR3PhysRegisterEx(pVM, pvRam, GCPhys, PGM_DYNAMIC_CHUNK_SIZE, 0, MM_PHYS_TYPE_DYNALLOC_CHUNK, "Main Memory");
420 if ( VBOX_SUCCESS(rc)
421 || enmVMState != VMSTATE_RUNNING)
422 {
423 if (VBOX_FAILURE(rc))
424 {
425 AssertMsgFailed(("Out of memory while trying to allocate a guest RAM chunk at %VGp!\n", GCPhys));
426 LogRel(("PGM: Out of memory while trying to allocate a guest RAM chunk at %VGp (VMstate=%s)!\n", GCPhys, VMR3GetStateName(enmVMState)));
427 }
428 return rc;
429 }
430
431 SUPPageFree(pvRam, cPages);
432
433 LogRel(("pgmr3PhysGrowRange: out of memory. pause until the user resumes execution.\n"));
434
435 /* Pause first, then inform Main. */
436 rc = VMR3SuspendNoSave(pVM);
437 AssertRC(rc);
438
439 VMSetRuntimeError(pVM, false, "HostMemoryLow", "Unable to allocate and lock memory. The virtual machine will be paused. Please close applications to free up memory or close the VM.");
440
441 /* Wait for resume event; will only return in that case. If the VM is stopped, the EMT thread will be destroyed. */
442 rc = VMR3WaitForResume(pVM);
443
444 /* Retry */
445 LogRel(("pgmr3PhysGrowRange: VM execution resumed -> retry.\n"));
446 return pgmr3PhysGrowRange(pVM, GCPhys);
447 }
448 return rc;
449}
450
451#endif /* !NEW_PHYS_CODE */
452
453/**
454 * Interface MMIO handler relocation calls.
455 *
456 * It relocates an existing physical memory range with PGM.
457 *
458 * @returns VBox status.
459 * @param pVM The VM handle.
460 * @param GCPhysOld Previous GC physical address of the RAM range. (page aligned)
461 * @param GCPhysNew New GC physical address of the RAM range. (page aligned)
462 * @param cb Size of the RAM range. (page aligned)
463 */
464PGMR3DECL(int) PGMR3PhysRelocate(PVM pVM, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, size_t cb)
465{
466 /*
467 * Validate input.
468 * (Not so important because callers are only MMR3PhysRelocate(),
469 * but anyway...)
470 */
471 Log(("PGMR3PhysRelocate Old %VGp New %VGp (%#x bytes)\n", GCPhysOld, GCPhysNew, cb));
472
473 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb && cb);
474 Assert(RT_ALIGN_T(GCPhysOld, PAGE_SIZE, RTGCPHYS) == GCPhysOld);
475 Assert(RT_ALIGN_T(GCPhysNew, PAGE_SIZE, RTGCPHYS) == GCPhysNew);
476 RTGCPHYS GCPhysLast;
477 GCPhysLast = GCPhysOld + (cb - 1);
478 if (GCPhysLast < GCPhysOld)
479 {
480 AssertMsgFailed(("The old range wraps! GCPhys=%VGp cb=%#x\n", GCPhysOld, cb));
481 return VERR_INVALID_PARAMETER;
482 }
483 GCPhysLast = GCPhysNew + (cb - 1);
484 if (GCPhysLast < GCPhysNew)
485 {
486 AssertMsgFailed(("The new range wraps! GCPhys=%VGp cb=%#x\n", GCPhysNew, cb));
487 return VERR_INVALID_PARAMETER;
488 }
489
490 /*
491 * Find and remove old range location.
492 */
493 pgmLock(pVM);
494 PPGMRAMRANGE pPrev = NULL;
495 PPGMRAMRANGE pCur = pVM->pgm.s.pRamRangesHC;
496 while (pCur)
497 {
498 if (pCur->GCPhys == GCPhysOld && pCur->cb == cb)
499 break;
500
501 /* next */
502 pPrev = pCur;
503 pCur = pCur->pNextHC;
504 }
505 if (pPrev)
506 {
507 pPrev->pNextHC = pCur->pNextHC;
508 pPrev->pNextGC = pCur->pNextGC;
509 }
510 else
511 {
512 pVM->pgm.s.pRamRangesHC = pCur->pNextHC;
513 pVM->pgm.s.pRamRangesGC = pCur->pNextGC;
514 }
515
516 /*
517 * Update the range.
518 */
519 pCur->GCPhys = GCPhysNew;
520 pCur->GCPhysLast= GCPhysLast;
521 PPGMRAMRANGE pNew = pCur;
522
523 /*
524 * Find range location and check for conflicts.
525 */
526 pPrev = NULL;
527 pCur = pVM->pgm.s.pRamRangesHC;
528 while (pCur)
529 {
530 if (GCPhysNew <= pCur->GCPhysLast && GCPhysLast >= pCur->GCPhys)
531 {
532 AssertMsgFailed(("Conflict! This cannot happen!\n"));
533 pgmUnlock(pVM);
534 return VERR_PGM_RAM_CONFLICT;
535 }
536 if (GCPhysLast < pCur->GCPhys)
537 break;
538
539 /* next */
540 pPrev = pCur;
541 pCur = pCur->pNextHC;
542 }
543
544 /*
545 * Reinsert the RAM range.
546 */
547 pNew->pNextHC = pCur;
548 pNew->pNextGC = pCur ? MMHyperHC2GC(pVM, pCur) : 0;
549 if (pPrev)
550 {
551 pPrev->pNextHC = pNew;
552 pPrev->pNextGC = MMHyperHC2GC(pVM, pNew);
553 }
554 else
555 {
556 pVM->pgm.s.pRamRangesHC = pNew;
557 pVM->pgm.s.pRamRangesGC = MMHyperHC2GC(pVM, pNew);
558 }
559
560 pgmUnlock(pVM);
561 return VINF_SUCCESS;
562}
563
564
565/**
566 * Interface MMR3RomRegister() and MMR3PhysReserve calls to update the
567 * flags of existing RAM ranges.
568 *
569 * @returns VBox status.
570 * @param pVM The VM handle.
571 * @param GCPhys GC physical address of the RAM range. (page aligned)
572 * @param cb Size of the RAM range. (page aligned)
573 * @param fFlags The Or flags, MM_RAM_* \#defines.
574 * @param fMask The and mask for the flags.
575 */
576PGMR3DECL(int) PGMR3PhysSetFlags(PVM pVM, RTGCPHYS GCPhys, size_t cb, unsigned fFlags, unsigned fMask)
577{
578 Log(("PGMR3PhysSetFlags %08X %x %x %x\n", GCPhys, cb, fFlags, fMask));
579
580 /*
581 * Validate input.
582 * (Not so important because caller is always MMR3RomRegister() and MMR3PhysReserve(), but anyway...)
583 */
584 Assert(!(fFlags & ~(MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2)));
585 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb && cb);
586 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
587 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
588 AssertReturn(GCPhysLast > GCPhys, VERR_INVALID_PARAMETER);
589
590 /*
591 * Lookup the range.
592 */
593 PPGMRAMRANGE pRam = CTXSUFF(pVM->pgm.s.pRamRanges);
594 while (pRam && GCPhys > pRam->GCPhysLast)
595 pRam = CTXSUFF(pRam->pNext);
596 if ( !pRam
597 || GCPhys > pRam->GCPhysLast
598 || GCPhysLast < pRam->GCPhys)
599 {
600 AssertMsgFailed(("No RAM range for %VGp-%VGp\n", GCPhys, GCPhysLast));
601 return VERR_INVALID_PARAMETER;
602 }
603
604 /*
605 * Update the requested flags.
606 */
607 RTHCPHYS fFullMask = ~(RTHCPHYS)(MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2)
608 | fMask;
609 unsigned iPageEnd = (GCPhysLast - pRam->GCPhys + 1) >> PAGE_SHIFT;
610 unsigned iPage = (GCPhys - pRam->GCPhys) >> PAGE_SHIFT;
611 for ( ; iPage < iPageEnd; iPage++)
612 pRam->aPages[iPage].HCPhys = (pRam->aPages[iPage].HCPhys & fFullMask) | fFlags; /** @todo PAGE FLAGS */
613
614 return VINF_SUCCESS;
615}
616
617
618/**
619 * Sets the Address Gate 20 state.
620 *
621 * @param pVM VM handle.
622 * @param fEnable True if the gate should be enabled.
623 * False if the gate should be disabled.
624 */
625PGMDECL(void) PGMR3PhysSetA20(PVM pVM, bool fEnable)
626{
627 LogFlow(("PGMR3PhysSetA20 %d (was %d)\n", fEnable, pVM->pgm.s.fA20Enabled));
628 if (pVM->pgm.s.fA20Enabled != (RTUINT)fEnable)
629 {
630 pVM->pgm.s.fA20Enabled = fEnable;
631 pVM->pgm.s.GCPhysA20Mask = ~(RTGCPHYS)(!fEnable << 20);
632 REMR3A20Set(pVM, fEnable);
633 }
634}
635
636
637/**
638 * Tree enumeration callback for dealing with age rollover.
639 * It will perform a simple compression of the current age.
640 */
641static DECLCALLBACK(int) pgmR3PhysChunkAgeingRolloverCallback(PAVLU32NODECORE pNode, void *pvUser)
642{
643 /* Age compression - ASSUMES iNow == 4. */
644 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)pNode;
645 if (pChunk->iAge >= UINT32_C(0xffffff00))
646 pChunk->iAge = 3;
647 else if (pChunk->iAge >= UINT32_C(0xfffff000))
648 pChunk->iAge = 2;
649 else if (pChunk->iAge)
650 pChunk->iAge = 1;
651 else /* iAge = 0 */
652 pChunk->iAge = 4;
653
654 /* reinsert */
655 PVM pVM = (PVM)pvUser;
656 RTAvllU32Remove(&pVM->pgm.s.ChunkR3Map.pAgeTree, pChunk->AgeCore.Key);
657 pChunk->AgeCore.Key = pChunk->iAge;
658 RTAvllU32Insert(&pVM->pgm.s.ChunkR3Map.pAgeTree, &pChunk->AgeCore);
659 return 0;
660}
661
662
663/**
664 * Tree enumeration callback that updates the chunks that have
665 * been used since the last
666 */
667static DECLCALLBACK(int) pgmR3PhysChunkAgeingCallback(PAVLU32NODECORE pNode, void *pvUser)
668{
669 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)pNode;
670 if (!pChunk->iAge)
671 {
672 PVM pVM = (PVM)pvUser;
673 RTAvllU32Remove(&pVM->pgm.s.ChunkR3Map.pAgeTree, pChunk->AgeCore.Key);
674 pChunk->AgeCore.Key = pChunk->iAge = pVM->pgm.s.ChunkR3Map.iNow;
675 RTAvllU32Insert(&pVM->pgm.s.ChunkR3Map.pAgeTree, &pChunk->AgeCore);
676 }
677
678 return 0;
679}
680
681
682/**
683 * Performs ageing of the ring-3 chunk mappings.
684 *
685 * @param pVM The VM handle.
686 */
687PGMR3DECL(void) PGMR3PhysChunkAgeing(PVM pVM)
688{
689 pVM->pgm.s.ChunkR3Map.AgeingCountdown = RT_MIN(pVM->pgm.s.ChunkR3Map.cMax / 4, 1024);
690 pVM->pgm.s.ChunkR3Map.iNow++;
691 if (pVM->pgm.s.ChunkR3Map.iNow == 0)
692 {
693 pVM->pgm.s.ChunkR3Map.iNow = 4;
694 RTAvlU32DoWithAll(&pVM->pgm.s.ChunkR3Map.pTree, true /*fFromLeft*/, pgmR3PhysChunkAgeingRolloverCallback, pVM);
695 }
696 else
697 RTAvlU32DoWithAll(&pVM->pgm.s.ChunkR3Map.pTree, true /*fFromLeft*/, pgmR3PhysChunkAgeingCallback, pVM);
698}
699
700
701/**
702 * The structure passed in the pvUser argument of pgmR3PhysChunkUnmapCandidateCallback().
703 */
704typedef struct PGMR3PHYSCHUNKUNMAPCB
705{
706 PVM pVM; /**< The VM handle. */
707 PPGMCHUNKR3MAP pChunk; /**< The chunk to unmap. */
708} PGMR3PHYSCHUNKUNMAPCB, *PPGMR3PHYSCHUNKUNMAPCB;
709
710
711/**
712 * Callback used to find the mapping that's been unused for
713 * the longest time.
714 */
715static DECLCALLBACK(int) pgmR3PhysChunkUnmapCandidateCallback(PAVLLU32NODECORE pNode, void *pvUser)
716{
717 do
718 {
719 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)((uint8_t *)pNode - RT_OFFSETOF(PGMCHUNKR3MAP, AgeCore));
720 if ( pChunk->iAge
721 && !pChunk->cRefs)
722 {
723 /*
724 * Check that it's not in any of the TLBs.
725 */
726 PVM pVM = ((PPGMR3PHYSCHUNKUNMAPCB)pvUser)->pVM;
727 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.ChunkR3Map.Tlb.aEntries); i++)
728 if (pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].pChunk == pChunk)
729 {
730 pChunk = NULL;
731 break;
732 }
733 if (pChunk)
734 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.PhysTlbHC.aEntries); i++)
735 if (pVM->pgm.s.PhysTlbHC.aEntries[i].pMap == pChunk)
736 {
737 pChunk = NULL;
738 break;
739 }
740 if (pChunk)
741 {
742 ((PPGMR3PHYSCHUNKUNMAPCB)pvUser)->pChunk = pChunk;
743 return 1; /* done */
744 }
745 }
746
747 /* next with the same age - this version of the AVL API doesn't enumerate the list, so we have to do it. */
748 pNode = pNode->pList;
749 } while (pNode);
750 return 0;
751}
752
753
754/**
755 * Finds a good candidate for unmapping when the ring-3 mapping cache is full.
756 *
757 * The candidate will not be part of any TLBs, so no need to flush
758 * anything afterwards.
759 *
760 * @returns Chunk id.
761 * @param pVM The VM handle.
762 */
763static int32_t pgmR3PhysChunkFindUnmapCandidate(PVM pVM)
764{
765 /*
766 * Do tree ageing first?
767 */
768 if (pVM->pgm.s.ChunkR3Map.AgeingCountdown-- == 0)
769 PGMR3PhysChunkAgeing(pVM);
770
771 /*
772 * Enumerate the age tree starting with the left most node.
773 */
774 PGMR3PHYSCHUNKUNMAPCB Args;
775 Args.pVM = pVM;
776 Args.pChunk = NULL;
777 if (RTAvllU32DoWithAll(&pVM->pgm.s.ChunkR3Map.pAgeTree, true /*fFromLeft*/, pgmR3PhysChunkUnmapCandidateCallback, pVM))
778 return Args.pChunk->Core.Key;
779 return INT32_MAX;
780}
781
782
783/**
784 * Argument package for the VMMR0_DO_GMM_MAP_UNMAP_CHUNK request.
785 */
786typedef struct GMMMAPUNMAPCHUNKREQ
787{
788 /** The header. */
789 SUPVMMR0REQHDR Hdr;
790 /** The chunk to map, UINT32_MAX if unmap only. (IN) */
791 uint32_t idChunkMap;
792 /** The chunk to unmap, UINT32_MAX if map only. (IN) */
793 uint32_t idChunkUnmap;
794 /** Where the mapping address is returned. (OUT) */
795 RTR3PTR pvR3;
796} GMMMAPUNMAPCHUNKREQ;
797
798
799/**
800 * Maps the given chunk into the ring-3 mapping cache.
801 *
802 * This will call ring-0.
803 *
804 * @returns VBox status code.
805 * @param pVM The VM handle.
806 * @param idChunk The chunk in question.
807 * @param ppChunk Where to store the chunk tracking structure.
808 *
809 * @remarks Called from within the PGM critical section.
810 */
811int pgmR3PhysChunkMap(PVM pVM, uint32_t idChunk, PPPGMCHUNKR3MAP ppChunk)
812{
813 int rc;
814 /*
815 * Allocate a new tracking structure first.
816 */
817#if 0 /* for later when we've got a separate mapping method for ring-0. */
818 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)MMR3HeapAlloc(pVM, MM_TAG_PGM_CHUNK_MAPPING, sizeof(*pChunk));
819 AssertReturn(pChunk, VERR_NO_MEMORY);
820#else
821 PPGMCHUNKR3MAP pChunk;
822 rc = MMHyperAlloc(pVM, sizeof(*pChunk), 0, MM_TAG_PGM_CHUNK_MAPPING, (void **)&pChunk);
823 AssertRCReturn(rc, rc);
824#endif
825 pChunk->Core.Key = idChunk;
826 pChunk->AgeCore.Key = pVM->pgm.s.ChunkR3Map.iNow;
827 pChunk->iAge = 0;
828 pChunk->cRefs = 0;
829 pChunk->cPermRefs = 0;
830 pChunk->pv = NULL;
831
832 /*
833 * Request the ring-0 part to map the chunk in question and if
834 * necessary unmap another one to make space in the mapping cache.
835 */
836 GMMMAPUNMAPCHUNKREQ Req;
837 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
838 Req.Hdr.cbReq = sizeof(Req);
839 Req.pvR3 = NULL;
840 Req.idChunkMap = idChunk;
841 Req.idChunkUnmap = INT32_MAX;
842 if (pVM->pgm.s.ChunkR3Map.c >= pVM->pgm.s.ChunkR3Map.cMax)
843 Req.idChunkUnmap = pgmR3PhysChunkFindUnmapCandidate(pVM);
844 rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_GMM_MAP_UNMAP_CHUNK, 0, &Req.Hdr);
845 if (VBOX_SUCCESS(rc))
846 {
847 /*
848 * Update the tree.
849 */
850 /* insert the new one. */
851 AssertPtr(Req.pvR3);
852 pChunk->pv = Req.pvR3;
853 bool fRc = RTAvlU32Insert(&pVM->pgm.s.ChunkR3Map.pTree, &pChunk->Core);
854 AssertRelease(fRc);
855 pVM->pgm.s.ChunkR3Map.c++;
856
857 fRc = RTAvllU32Insert(&pVM->pgm.s.ChunkR3Map.pAgeTree, &pChunk->AgeCore);
858 AssertRelease(fRc);
859
860 /* remove the unmapped one. */
861 if (Req.idChunkUnmap != INT32_MAX)
862 {
863 PPGMCHUNKR3MAP pUnmappedChunk = (PPGMCHUNKR3MAP)RTAvlU32Remove(&pVM->pgm.s.ChunkR3Map.pTree, Req.idChunkUnmap);
864 AssertRelease(pUnmappedChunk);
865 pUnmappedChunk->pv = NULL;
866 pUnmappedChunk->Core.Key = UINT32_MAX;
867#if 0 /* for later when we've got a separate mapping method for ring-0. */
868 MMR3HeapFree(pUnmappedChunk);
869#else
870 MMHyperFree(pVM, pUnmappedChunk);
871#endif
872 pVM->pgm.s.ChunkR3Map.c--;
873 }
874 }
875 else
876 {
877 AssertRC(rc);
878#if 0 /* for later when we've got a separate mapping method for ring-0. */
879 MMR3HeapFree(pChunk);
880#else
881 MMHyperFree(pVM, pChunk);
882#endif
883 pChunk = NULL;
884 }
885
886 *ppChunk = pChunk;
887 return rc;
888}
889
890
891/**
892 * For VMMCALLHOST_PGM_MAP_CHUNK, considered internal.
893 *
894 * @returns see pgmR3PhysChunkMap.
895 * @param pVM The VM handle.
896 * @param idChunk The chunk to map.
897 */
898PDMR3DECL(int) PGMR3PhysChunkMap(PVM pVM, uint32_t idChunk)
899{
900 PPGMCHUNKR3MAP pChunk;
901 return pgmR3PhysChunkMap(pVM, idChunk, &pChunk);
902}
903
904
905/**
906 * Invalidates the TLB for the ring-3 mapping cache.
907 *
908 * @param pVM The VM handle.
909 */
910PGMR3DECL(void) PGMR3PhysChunkInvalidateTLB(PVM pVM)
911{
912 pgmLock(pVM);
913 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.ChunkR3Map.Tlb.aEntries); i++)
914 {
915 pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].idChunk = NIL_GMM_CHUNKID;
916 pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].pChunk = NULL;
917 }
918 pgmUnlock(pVM);
919}
920
921
922/**
923 * Response to VM_FF_PGM_NEED_HANDY_PAGES and VMMCALLHOST_PGM_ALLOCATE_HANDY_PAGES.
924 *
925 * @returns The following VBox status codes.
926 * @retval VINF_SUCCESS on success. FF cleared.
927 * @retval VINF_EM_NO_MEMORY if we're out of memory. The FF is not cleared in this case.
928 *
929 * @param pVM The VM handle.
930 */
931PDMR3DECL(int) PGMR3PhysAllocateHandyPages(PVM pVM)
932{
933 pgmLock(pVM);
934 int rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES, 0, NULL);
935 if (rc == VERR_GMM_SEED_ME)
936 {
937 void *pvChunk;
938 rc = SUPPageAlloc(GMM_CHUNK_SIZE >> PAGE_SHIFT, &pvChunk);
939 if (VBOX_SUCCESS(rc))
940 rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_GMM_SEED_CHUNK, (uintptr_t)pvChunk, NULL);
941 if (VBOX_FAILURE(rc))
942 {
943 LogRel(("PGM: GMM Seeding failed, rc=%Vrc\n", rc));
944 rc = VINF_EM_NO_MEMORY;
945 }
946 }
947 pgmUnlock(pVM);
948 Assert(rc == VINF_SUCCESS || rc == VINF_EM_NO_MEMORY);
949 return rc;
950}
951
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette