1 | /* $Id: PGMPhys.cpp 96407 2022-08-22 17:43:14Z vboxsync $ */
|
---|
2 | /** @file
|
---|
3 | * PGM - Page Manager and Monitor, Physical Memory Addressing.
|
---|
4 | */
|
---|
5 |
|
---|
6 | /*
|
---|
7 | * Copyright (C) 2006-2022 Oracle and/or its affiliates.
|
---|
8 | *
|
---|
9 | * This file is part of VirtualBox base platform packages, as
|
---|
10 | * available from https://www.virtualbox.org.
|
---|
11 | *
|
---|
12 | * This program is free software; you can redistribute it and/or
|
---|
13 | * modify it under the terms of the GNU General Public License
|
---|
14 | * as published by the Free Software Foundation, in version 3 of the
|
---|
15 | * License.
|
---|
16 | *
|
---|
17 | * This program is distributed in the hope that it will be useful, but
|
---|
18 | * WITHOUT ANY WARRANTY; without even the implied warranty of
|
---|
19 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
---|
20 | * General Public License for more details.
|
---|
21 | *
|
---|
22 | * You should have received a copy of the GNU General Public License
|
---|
23 | * along with this program; if not, see <https://www.gnu.org/licenses>.
|
---|
24 | *
|
---|
25 | * SPDX-License-Identifier: GPL-3.0-only
|
---|
26 | */
|
---|
27 |
|
---|
28 |
|
---|
29 | /*********************************************************************************************************************************
|
---|
30 | * Header Files *
|
---|
31 | *********************************************************************************************************************************/
|
---|
32 | #define LOG_GROUP LOG_GROUP_PGM_PHYS
|
---|
33 | #define VBOX_WITHOUT_PAGING_BIT_FIELDS /* 64-bit bitfields are just asking for trouble. See @bugref{9841} and others. */
|
---|
34 | #include <VBox/vmm/pgm.h>
|
---|
35 | #include <VBox/vmm/iem.h>
|
---|
36 | #include <VBox/vmm/iom.h>
|
---|
37 | #include <VBox/vmm/mm.h>
|
---|
38 | #include <VBox/vmm/nem.h>
|
---|
39 | #include <VBox/vmm/stam.h>
|
---|
40 | #include <VBox/vmm/pdmdev.h>
|
---|
41 | #include "PGMInternal.h"
|
---|
42 | #include <VBox/vmm/vmcc.h>
|
---|
43 |
|
---|
44 | #include "PGMInline.h"
|
---|
45 |
|
---|
46 | #include <VBox/sup.h>
|
---|
47 | #include <VBox/param.h>
|
---|
48 | #include <VBox/err.h>
|
---|
49 | #include <VBox/log.h>
|
---|
50 | #include <iprt/assert.h>
|
---|
51 | #include <iprt/alloc.h>
|
---|
52 | #include <iprt/asm.h>
|
---|
53 | #ifdef VBOX_STRICT
|
---|
54 | # include <iprt/crc.h>
|
---|
55 | #endif
|
---|
56 | #include <iprt/thread.h>
|
---|
57 | #include <iprt/string.h>
|
---|
58 | #include <iprt/system.h>
|
---|
59 |
|
---|
60 |
|
---|
61 | /*********************************************************************************************************************************
|
---|
62 | * Defined Constants And Macros *
|
---|
63 | *********************************************************************************************************************************/
|
---|
64 | /** The number of pages to free in one batch. */
|
---|
65 | #define PGMPHYS_FREE_PAGE_BATCH_SIZE 128
|
---|
66 |
|
---|
67 |
|
---|
68 |
|
---|
69 | /*********************************************************************************************************************************
|
---|
70 | * Reading and Writing Guest Pysical Memory *
|
---|
71 | *********************************************************************************************************************************/
|
---|
72 |
|
---|
73 | /*
|
---|
74 | * PGMR3PhysReadU8-64
|
---|
75 | * PGMR3PhysWriteU8-64
|
---|
76 | */
|
---|
77 | #define PGMPHYSFN_READNAME PGMR3PhysReadU8
|
---|
78 | #define PGMPHYSFN_WRITENAME PGMR3PhysWriteU8
|
---|
79 | #define PGMPHYS_DATASIZE 1
|
---|
80 | #define PGMPHYS_DATATYPE uint8_t
|
---|
81 | #include "PGMPhysRWTmpl.h"
|
---|
82 |
|
---|
83 | #define PGMPHYSFN_READNAME PGMR3PhysReadU16
|
---|
84 | #define PGMPHYSFN_WRITENAME PGMR3PhysWriteU16
|
---|
85 | #define PGMPHYS_DATASIZE 2
|
---|
86 | #define PGMPHYS_DATATYPE uint16_t
|
---|
87 | #include "PGMPhysRWTmpl.h"
|
---|
88 |
|
---|
89 | #define PGMPHYSFN_READNAME PGMR3PhysReadU32
|
---|
90 | #define PGMPHYSFN_WRITENAME PGMR3PhysWriteU32
|
---|
91 | #define PGMPHYS_DATASIZE 4
|
---|
92 | #define PGMPHYS_DATATYPE uint32_t
|
---|
93 | #include "PGMPhysRWTmpl.h"
|
---|
94 |
|
---|
95 | #define PGMPHYSFN_READNAME PGMR3PhysReadU64
|
---|
96 | #define PGMPHYSFN_WRITENAME PGMR3PhysWriteU64
|
---|
97 | #define PGMPHYS_DATASIZE 8
|
---|
98 | #define PGMPHYS_DATATYPE uint64_t
|
---|
99 | #include "PGMPhysRWTmpl.h"
|
---|
100 |
|
---|
101 |
|
---|
102 | /**
|
---|
103 | * EMT worker for PGMR3PhysReadExternal.
|
---|
104 | */
|
---|
105 | static DECLCALLBACK(int) pgmR3PhysReadExternalEMT(PVM pVM, PRTGCPHYS pGCPhys, void *pvBuf, size_t cbRead,
|
---|
106 | PGMACCESSORIGIN enmOrigin)
|
---|
107 | {
|
---|
108 | VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, *pGCPhys, pvBuf, cbRead, enmOrigin);
|
---|
109 | AssertMsg(rcStrict == VINF_SUCCESS, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); NOREF(rcStrict);
|
---|
110 | return VINF_SUCCESS;
|
---|
111 | }
|
---|
112 |
|
---|
113 |
|
---|
114 | /**
|
---|
115 | * Read from physical memory, external users.
|
---|
116 | *
|
---|
117 | * @returns VBox status code.
|
---|
118 | * @retval VINF_SUCCESS.
|
---|
119 | *
|
---|
120 | * @param pVM The cross context VM structure.
|
---|
121 | * @param GCPhys Physical address to read from.
|
---|
122 | * @param pvBuf Where to read into.
|
---|
123 | * @param cbRead How many bytes to read.
|
---|
124 | * @param enmOrigin Who is calling.
|
---|
125 | *
|
---|
126 | * @thread Any but EMTs.
|
---|
127 | */
|
---|
128 | VMMR3DECL(int) PGMR3PhysReadExternal(PVM pVM, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead, PGMACCESSORIGIN enmOrigin)
|
---|
129 | {
|
---|
130 | VM_ASSERT_OTHER_THREAD(pVM);
|
---|
131 |
|
---|
132 | AssertMsgReturn(cbRead > 0, ("don't even think about reading zero bytes!\n"), VINF_SUCCESS);
|
---|
133 | LogFlow(("PGMR3PhysReadExternal: %RGp %d\n", GCPhys, cbRead));
|
---|
134 |
|
---|
135 | PGM_LOCK_VOID(pVM);
|
---|
136 |
|
---|
137 | /*
|
---|
138 | * Copy loop on ram ranges.
|
---|
139 | */
|
---|
140 | PPGMRAMRANGE pRam = pgmPhysGetRangeAtOrAbove(pVM, GCPhys);
|
---|
141 | for (;;)
|
---|
142 | {
|
---|
143 | /* Inside range or not? */
|
---|
144 | if (pRam && GCPhys >= pRam->GCPhys)
|
---|
145 | {
|
---|
146 | /*
|
---|
147 | * Must work our way thru this page by page.
|
---|
148 | */
|
---|
149 | RTGCPHYS off = GCPhys - pRam->GCPhys;
|
---|
150 | while (off < pRam->cb)
|
---|
151 | {
|
---|
152 | unsigned iPage = off >> GUEST_PAGE_SHIFT;
|
---|
153 | PPGMPAGE pPage = &pRam->aPages[iPage];
|
---|
154 |
|
---|
155 | /*
|
---|
156 | * If the page has an ALL access handler, we'll have to
|
---|
157 | * delegate the job to EMT.
|
---|
158 | */
|
---|
159 | if ( PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)
|
---|
160 | || PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
|
---|
161 | {
|
---|
162 | PGM_UNLOCK(pVM);
|
---|
163 |
|
---|
164 | return VMR3ReqPriorityCallWait(pVM, VMCPUID_ANY, (PFNRT)pgmR3PhysReadExternalEMT, 5,
|
---|
165 | pVM, &GCPhys, pvBuf, cbRead, enmOrigin);
|
---|
166 | }
|
---|
167 | Assert(!PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage));
|
---|
168 |
|
---|
169 | /*
|
---|
170 | * Simple stuff, go ahead.
|
---|
171 | */
|
---|
172 | size_t cb = GUEST_PAGE_SIZE - (off & GUEST_PAGE_OFFSET_MASK);
|
---|
173 | if (cb > cbRead)
|
---|
174 | cb = cbRead;
|
---|
175 | PGMPAGEMAPLOCK PgMpLck;
|
---|
176 | const void *pvSrc;
|
---|
177 | int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, pRam->GCPhys + off, &pvSrc, &PgMpLck);
|
---|
178 | if (RT_SUCCESS(rc))
|
---|
179 | {
|
---|
180 | memcpy(pvBuf, pvSrc, cb);
|
---|
181 | pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
|
---|
182 | }
|
---|
183 | else
|
---|
184 | {
|
---|
185 | AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
|
---|
186 | pRam->GCPhys + off, pPage, rc));
|
---|
187 | memset(pvBuf, 0xff, cb);
|
---|
188 | }
|
---|
189 |
|
---|
190 | /* next page */
|
---|
191 | if (cb >= cbRead)
|
---|
192 | {
|
---|
193 | PGM_UNLOCK(pVM);
|
---|
194 | return VINF_SUCCESS;
|
---|
195 | }
|
---|
196 | cbRead -= cb;
|
---|
197 | off += cb;
|
---|
198 | GCPhys += cb;
|
---|
199 | pvBuf = (char *)pvBuf + cb;
|
---|
200 | } /* walk pages in ram range. */
|
---|
201 | }
|
---|
202 | else
|
---|
203 | {
|
---|
204 | LogFlow(("PGMPhysRead: Unassigned %RGp size=%u\n", GCPhys, cbRead));
|
---|
205 |
|
---|
206 | /*
|
---|
207 | * Unassigned address space.
|
---|
208 | */
|
---|
209 | size_t cb = pRam ? pRam->GCPhys - GCPhys : ~(size_t)0;
|
---|
210 | if (cb >= cbRead)
|
---|
211 | {
|
---|
212 | memset(pvBuf, 0xff, cbRead);
|
---|
213 | break;
|
---|
214 | }
|
---|
215 | memset(pvBuf, 0xff, cb);
|
---|
216 |
|
---|
217 | cbRead -= cb;
|
---|
218 | pvBuf = (char *)pvBuf + cb;
|
---|
219 | GCPhys += cb;
|
---|
220 | }
|
---|
221 |
|
---|
222 | /* Advance range if necessary. */
|
---|
223 | while (pRam && GCPhys > pRam->GCPhysLast)
|
---|
224 | pRam = pRam->CTX_SUFF(pNext);
|
---|
225 | } /* Ram range walk */
|
---|
226 |
|
---|
227 | PGM_UNLOCK(pVM);
|
---|
228 |
|
---|
229 | return VINF_SUCCESS;
|
---|
230 | }
|
---|
231 |
|
---|
232 |
|
---|
233 | /**
|
---|
234 | * EMT worker for PGMR3PhysWriteExternal.
|
---|
235 | */
|
---|
236 | static DECLCALLBACK(int) pgmR3PhysWriteExternalEMT(PVM pVM, PRTGCPHYS pGCPhys, const void *pvBuf, size_t cbWrite,
|
---|
237 | PGMACCESSORIGIN enmOrigin)
|
---|
238 | {
|
---|
239 | /** @todo VERR_EM_NO_MEMORY */
|
---|
240 | VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM, *pGCPhys, pvBuf, cbWrite, enmOrigin);
|
---|
241 | AssertMsg(rcStrict == VINF_SUCCESS, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); NOREF(rcStrict);
|
---|
242 | return VINF_SUCCESS;
|
---|
243 | }
|
---|
244 |
|
---|
245 |
|
---|
246 | /**
|
---|
247 | * Write to physical memory, external users.
|
---|
248 | *
|
---|
249 | * @returns VBox status code.
|
---|
250 | * @retval VINF_SUCCESS.
|
---|
251 | * @retval VERR_EM_NO_MEMORY.
|
---|
252 | *
|
---|
253 | * @param pVM The cross context VM structure.
|
---|
254 | * @param GCPhys Physical address to write to.
|
---|
255 | * @param pvBuf What to write.
|
---|
256 | * @param cbWrite How many bytes to write.
|
---|
257 | * @param enmOrigin Who is calling.
|
---|
258 | *
|
---|
259 | * @thread Any but EMTs.
|
---|
260 | */
|
---|
261 | VMMDECL(int) PGMR3PhysWriteExternal(PVM pVM, RTGCPHYS GCPhys, const void *pvBuf, size_t cbWrite, PGMACCESSORIGIN enmOrigin)
|
---|
262 | {
|
---|
263 | VM_ASSERT_OTHER_THREAD(pVM);
|
---|
264 |
|
---|
265 | AssertMsg(!pVM->pgm.s.fNoMorePhysWrites,
|
---|
266 | ("Calling PGMR3PhysWriteExternal after pgmR3Save()! GCPhys=%RGp cbWrite=%#x enmOrigin=%d\n",
|
---|
267 | GCPhys, cbWrite, enmOrigin));
|
---|
268 | AssertMsgReturn(cbWrite > 0, ("don't even think about writing zero bytes!\n"), VINF_SUCCESS);
|
---|
269 | LogFlow(("PGMR3PhysWriteExternal: %RGp %d\n", GCPhys, cbWrite));
|
---|
270 |
|
---|
271 | PGM_LOCK_VOID(pVM);
|
---|
272 |
|
---|
273 | /*
|
---|
274 | * Copy loop on ram ranges, stop when we hit something difficult.
|
---|
275 | */
|
---|
276 | PPGMRAMRANGE pRam = pgmPhysGetRangeAtOrAbove(pVM, GCPhys);
|
---|
277 | for (;;)
|
---|
278 | {
|
---|
279 | /* Inside range or not? */
|
---|
280 | if (pRam && GCPhys >= pRam->GCPhys)
|
---|
281 | {
|
---|
282 | /*
|
---|
283 | * Must work our way thru this page by page.
|
---|
284 | */
|
---|
285 | RTGCPTR off = GCPhys - pRam->GCPhys;
|
---|
286 | while (off < pRam->cb)
|
---|
287 | {
|
---|
288 | RTGCPTR iPage = off >> GUEST_PAGE_SHIFT;
|
---|
289 | PPGMPAGE pPage = &pRam->aPages[iPage];
|
---|
290 |
|
---|
291 | /*
|
---|
292 | * Is the page problematic, we have to do the work on the EMT.
|
---|
293 | *
|
---|
294 | * Allocating writable pages and access handlers are
|
---|
295 | * problematic, write monitored pages are simple and can be
|
---|
296 | * dealt with here.
|
---|
297 | */
|
---|
298 | if ( PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage)
|
---|
299 | || PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED
|
---|
300 | || PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
|
---|
301 | {
|
---|
302 | if ( PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED
|
---|
303 | && !PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
|
---|
304 | pgmPhysPageMakeWriteMonitoredWritable(pVM, pPage, GCPhys);
|
---|
305 | else
|
---|
306 | {
|
---|
307 | PGM_UNLOCK(pVM);
|
---|
308 |
|
---|
309 | return VMR3ReqPriorityCallWait(pVM, VMCPUID_ANY, (PFNRT)pgmR3PhysWriteExternalEMT, 5,
|
---|
310 | pVM, &GCPhys, pvBuf, cbWrite, enmOrigin);
|
---|
311 | }
|
---|
312 | }
|
---|
313 | Assert(!PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage));
|
---|
314 |
|
---|
315 | /*
|
---|
316 | * Simple stuff, go ahead.
|
---|
317 | */
|
---|
318 | size_t cb = GUEST_PAGE_SIZE - (off & GUEST_PAGE_OFFSET_MASK);
|
---|
319 | if (cb > cbWrite)
|
---|
320 | cb = cbWrite;
|
---|
321 | PGMPAGEMAPLOCK PgMpLck;
|
---|
322 | void *pvDst;
|
---|
323 | int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pRam->GCPhys + off, &pvDst, &PgMpLck);
|
---|
324 | if (RT_SUCCESS(rc))
|
---|
325 | {
|
---|
326 | memcpy(pvDst, pvBuf, cb);
|
---|
327 | pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
|
---|
328 | }
|
---|
329 | else
|
---|
330 | AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
|
---|
331 | pRam->GCPhys + off, pPage, rc));
|
---|
332 |
|
---|
333 | /* next page */
|
---|
334 | if (cb >= cbWrite)
|
---|
335 | {
|
---|
336 | PGM_UNLOCK(pVM);
|
---|
337 | return VINF_SUCCESS;
|
---|
338 | }
|
---|
339 |
|
---|
340 | cbWrite -= cb;
|
---|
341 | off += cb;
|
---|
342 | GCPhys += cb;
|
---|
343 | pvBuf = (const char *)pvBuf + cb;
|
---|
344 | } /* walk pages in ram range */
|
---|
345 | }
|
---|
346 | else
|
---|
347 | {
|
---|
348 | /*
|
---|
349 | * Unassigned address space, skip it.
|
---|
350 | */
|
---|
351 | if (!pRam)
|
---|
352 | break;
|
---|
353 | size_t cb = pRam->GCPhys - GCPhys;
|
---|
354 | if (cb >= cbWrite)
|
---|
355 | break;
|
---|
356 | cbWrite -= cb;
|
---|
357 | pvBuf = (const char *)pvBuf + cb;
|
---|
358 | GCPhys += cb;
|
---|
359 | }
|
---|
360 |
|
---|
361 | /* Advance range if necessary. */
|
---|
362 | while (pRam && GCPhys > pRam->GCPhysLast)
|
---|
363 | pRam = pRam->CTX_SUFF(pNext);
|
---|
364 | } /* Ram range walk */
|
---|
365 |
|
---|
366 | PGM_UNLOCK(pVM);
|
---|
367 | return VINF_SUCCESS;
|
---|
368 | }
|
---|
369 |
|
---|
370 |
|
---|
371 | /*********************************************************************************************************************************
|
---|
372 | * Mapping Guest Physical Memory *
|
---|
373 | *********************************************************************************************************************************/
|
---|
374 |
|
---|
375 | /**
|
---|
376 | * VMR3ReqCall worker for PGMR3PhysGCPhys2CCPtrExternal to make pages writable.
|
---|
377 | *
|
---|
378 | * @returns see PGMR3PhysGCPhys2CCPtrExternal
|
---|
379 | * @param pVM The cross context VM structure.
|
---|
380 | * @param pGCPhys Pointer to the guest physical address.
|
---|
381 | * @param ppv Where to store the mapping address.
|
---|
382 | * @param pLock Where to store the lock.
|
---|
383 | */
|
---|
384 | static DECLCALLBACK(int) pgmR3PhysGCPhys2CCPtrDelegated(PVM pVM, PRTGCPHYS pGCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
|
---|
385 | {
|
---|
386 | /*
|
---|
387 | * Just hand it to PGMPhysGCPhys2CCPtr and check that it's not a page with
|
---|
388 | * an access handler after it succeeds.
|
---|
389 | */
|
---|
390 | int rc = PGM_LOCK(pVM);
|
---|
391 | AssertRCReturn(rc, rc);
|
---|
392 |
|
---|
393 | rc = PGMPhysGCPhys2CCPtr(pVM, *pGCPhys, ppv, pLock);
|
---|
394 | if (RT_SUCCESS(rc))
|
---|
395 | {
|
---|
396 | PPGMPAGEMAPTLBE pTlbe;
|
---|
397 | int rc2 = pgmPhysPageQueryTlbe(pVM, *pGCPhys, &pTlbe);
|
---|
398 | AssertFatalRC(rc2);
|
---|
399 | PPGMPAGE pPage = pTlbe->pPage;
|
---|
400 | if (PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage))
|
---|
401 | {
|
---|
402 | PGMPhysReleasePageMappingLock(pVM, pLock);
|
---|
403 | rc = VERR_PGM_PHYS_PAGE_RESERVED;
|
---|
404 | }
|
---|
405 | else if ( PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage)
|
---|
406 | #ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
|
---|
407 | || pgmPoolIsDirtyPage(pVM, *pGCPhys)
|
---|
408 | #endif
|
---|
409 | )
|
---|
410 | {
|
---|
411 | /* We *must* flush any corresponding pgm pool page here, otherwise we'll
|
---|
412 | * not be informed about writes and keep bogus gst->shw mappings around.
|
---|
413 | */
|
---|
414 | pgmPoolFlushPageByGCPhys(pVM, *pGCPhys);
|
---|
415 | Assert(!PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage));
|
---|
416 | /** @todo r=bird: return VERR_PGM_PHYS_PAGE_RESERVED here if it still has
|
---|
417 | * active handlers, see the PGMR3PhysGCPhys2CCPtrExternal docs. */
|
---|
418 | }
|
---|
419 | }
|
---|
420 |
|
---|
421 | PGM_UNLOCK(pVM);
|
---|
422 | return rc;
|
---|
423 | }
|
---|
424 |
|
---|
425 |
|
---|
426 | /**
|
---|
427 | * Requests the mapping of a guest page into ring-3, external threads.
|
---|
428 | *
|
---|
429 | * When you're done with the page, call PGMPhysReleasePageMappingLock() ASAP to
|
---|
430 | * release it.
|
---|
431 | *
|
---|
432 | * This API will assume your intention is to write to the page, and will
|
---|
433 | * therefore replace shared and zero pages. If you do not intend to modify the
|
---|
434 | * page, use the PGMR3PhysGCPhys2CCPtrReadOnlyExternal() API.
|
---|
435 | *
|
---|
436 | * @returns VBox status code.
|
---|
437 | * @retval VINF_SUCCESS on success.
|
---|
438 | * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical
|
---|
439 | * backing or if the page has any active access handlers. The caller
|
---|
440 | * must fall back on using PGMR3PhysWriteExternal.
|
---|
441 | * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
|
---|
442 | *
|
---|
443 | * @param pVM The cross context VM structure.
|
---|
444 | * @param GCPhys The guest physical address of the page that should be mapped.
|
---|
445 | * @param ppv Where to store the address corresponding to GCPhys.
|
---|
446 | * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
|
---|
447 | *
|
---|
448 | * @remark Avoid calling this API from within critical sections (other than the
|
---|
449 | * PGM one) because of the deadlock risk when we have to delegating the
|
---|
450 | * task to an EMT.
|
---|
451 | * @thread Any.
|
---|
452 | */
|
---|
453 | VMMR3DECL(int) PGMR3PhysGCPhys2CCPtrExternal(PVM pVM, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
|
---|
454 | {
|
---|
455 | AssertPtr(ppv);
|
---|
456 | AssertPtr(pLock);
|
---|
457 |
|
---|
458 | Assert(VM_IS_EMT(pVM) || !PGMIsLockOwner(pVM));
|
---|
459 |
|
---|
460 | int rc = PGM_LOCK(pVM);
|
---|
461 | AssertRCReturn(rc, rc);
|
---|
462 |
|
---|
463 | /*
|
---|
464 | * Query the Physical TLB entry for the page (may fail).
|
---|
465 | */
|
---|
466 | PPGMPAGEMAPTLBE pTlbe;
|
---|
467 | rc = pgmPhysPageQueryTlbe(pVM, GCPhys, &pTlbe);
|
---|
468 | if (RT_SUCCESS(rc))
|
---|
469 | {
|
---|
470 | PPGMPAGE pPage = pTlbe->pPage;
|
---|
471 | if (PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage))
|
---|
472 | rc = VERR_PGM_PHYS_PAGE_RESERVED;
|
---|
473 | else
|
---|
474 | {
|
---|
475 | /*
|
---|
476 | * If the page is shared, the zero page, or being write monitored
|
---|
477 | * it must be converted to an page that's writable if possible.
|
---|
478 | * We can only deal with write monitored pages here, the rest have
|
---|
479 | * to be on an EMT.
|
---|
480 | */
|
---|
481 | if ( PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage)
|
---|
482 | || PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED
|
---|
483 | #ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
|
---|
484 | || pgmPoolIsDirtyPage(pVM, GCPhys)
|
---|
485 | #endif
|
---|
486 | )
|
---|
487 | {
|
---|
488 | if ( PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED
|
---|
489 | && !PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage)
|
---|
490 | #ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
|
---|
491 | && !pgmPoolIsDirtyPage(pVM, GCPhys) /** @todo we're very likely doing this twice. */
|
---|
492 | #endif
|
---|
493 | )
|
---|
494 | pgmPhysPageMakeWriteMonitoredWritable(pVM, pPage, GCPhys);
|
---|
495 | else
|
---|
496 | {
|
---|
497 | PGM_UNLOCK(pVM);
|
---|
498 |
|
---|
499 | return VMR3ReqPriorityCallWait(pVM, VMCPUID_ANY, (PFNRT)pgmR3PhysGCPhys2CCPtrDelegated, 4,
|
---|
500 | pVM, &GCPhys, ppv, pLock);
|
---|
501 | }
|
---|
502 | }
|
---|
503 |
|
---|
504 | /*
|
---|
505 | * Now, just perform the locking and calculate the return address.
|
---|
506 | */
|
---|
507 | PPGMPAGEMAP pMap = pTlbe->pMap;
|
---|
508 | if (pMap)
|
---|
509 | pMap->cRefs++;
|
---|
510 |
|
---|
511 | unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
|
---|
512 | if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
|
---|
513 | {
|
---|
514 | if (cLocks == 0)
|
---|
515 | pVM->pgm.s.cWriteLockedPages++;
|
---|
516 | PGM_PAGE_INC_WRITE_LOCKS(pPage);
|
---|
517 | }
|
---|
518 | else if (cLocks != PGM_PAGE_GET_WRITE_LOCKS(pPage))
|
---|
519 | {
|
---|
520 | PGM_PAGE_INC_WRITE_LOCKS(pPage);
|
---|
521 | AssertMsgFailed(("%RGp / %R[pgmpage] is entering permanent write locked state!\n", GCPhys, pPage));
|
---|
522 | if (pMap)
|
---|
523 | pMap->cRefs++; /* Extra ref to prevent it from going away. */
|
---|
524 | }
|
---|
525 |
|
---|
526 | *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & GUEST_PAGE_OFFSET_MASK));
|
---|
527 | pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_WRITE;
|
---|
528 | pLock->pvMap = pMap;
|
---|
529 | }
|
---|
530 | }
|
---|
531 |
|
---|
532 | PGM_UNLOCK(pVM);
|
---|
533 | return rc;
|
---|
534 | }
|
---|
535 |
|
---|
536 |
|
---|
537 | /**
|
---|
538 | * Requests the mapping of a guest page into ring-3, external threads.
|
---|
539 | *
|
---|
540 | * When you're done with the page, call PGMPhysReleasePageMappingLock() ASAP to
|
---|
541 | * release it.
|
---|
542 | *
|
---|
543 | * @returns VBox status code.
|
---|
544 | * @retval VINF_SUCCESS on success.
|
---|
545 | * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical
|
---|
546 | * backing or if the page as an active ALL access handler. The caller
|
---|
547 | * must fall back on using PGMPhysRead.
|
---|
548 | * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
|
---|
549 | *
|
---|
550 | * @param pVM The cross context VM structure.
|
---|
551 | * @param GCPhys The guest physical address of the page that should be mapped.
|
---|
552 | * @param ppv Where to store the address corresponding to GCPhys.
|
---|
553 | * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
|
---|
554 | *
|
---|
555 | * @remark Avoid calling this API from within critical sections (other than
|
---|
556 | * the PGM one) because of the deadlock risk.
|
---|
557 | * @thread Any.
|
---|
558 | */
|
---|
559 | VMMR3DECL(int) PGMR3PhysGCPhys2CCPtrReadOnlyExternal(PVM pVM, RTGCPHYS GCPhys, void const **ppv, PPGMPAGEMAPLOCK pLock)
|
---|
560 | {
|
---|
561 | int rc = PGM_LOCK(pVM);
|
---|
562 | AssertRCReturn(rc, rc);
|
---|
563 |
|
---|
564 | /*
|
---|
565 | * Query the Physical TLB entry for the page (may fail).
|
---|
566 | */
|
---|
567 | PPGMPAGEMAPTLBE pTlbe;
|
---|
568 | rc = pgmPhysPageQueryTlbe(pVM, GCPhys, &pTlbe);
|
---|
569 | if (RT_SUCCESS(rc))
|
---|
570 | {
|
---|
571 | PPGMPAGE pPage = pTlbe->pPage;
|
---|
572 | #if 1
|
---|
573 | /* MMIO pages doesn't have any readable backing. */
|
---|
574 | if (PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage))
|
---|
575 | rc = VERR_PGM_PHYS_PAGE_RESERVED;
|
---|
576 | #else
|
---|
577 | if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage))
|
---|
578 | rc = VERR_PGM_PHYS_PAGE_RESERVED;
|
---|
579 | #endif
|
---|
580 | else
|
---|
581 | {
|
---|
582 | /*
|
---|
583 | * Now, just perform the locking and calculate the return address.
|
---|
584 | */
|
---|
585 | PPGMPAGEMAP pMap = pTlbe->pMap;
|
---|
586 | if (pMap)
|
---|
587 | pMap->cRefs++;
|
---|
588 |
|
---|
589 | unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
|
---|
590 | if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
|
---|
591 | {
|
---|
592 | if (cLocks == 0)
|
---|
593 | pVM->pgm.s.cReadLockedPages++;
|
---|
594 | PGM_PAGE_INC_READ_LOCKS(pPage);
|
---|
595 | }
|
---|
596 | else if (cLocks != PGM_PAGE_GET_READ_LOCKS(pPage))
|
---|
597 | {
|
---|
598 | PGM_PAGE_INC_READ_LOCKS(pPage);
|
---|
599 | AssertMsgFailed(("%RGp / %R[pgmpage] is entering permanent readonly locked state!\n", GCPhys, pPage));
|
---|
600 | if (pMap)
|
---|
601 | pMap->cRefs++; /* Extra ref to prevent it from going away. */
|
---|
602 | }
|
---|
603 |
|
---|
604 | *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & GUEST_PAGE_OFFSET_MASK));
|
---|
605 | pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_READ;
|
---|
606 | pLock->pvMap = pMap;
|
---|
607 | }
|
---|
608 | }
|
---|
609 |
|
---|
610 | PGM_UNLOCK(pVM);
|
---|
611 | return rc;
|
---|
612 | }
|
---|
613 |
|
---|
614 |
|
---|
615 | /**
|
---|
616 | * Requests the mapping of multiple guest page into ring-3, external threads.
|
---|
617 | *
|
---|
618 | * When you're done with the pages, call PGMPhysBulkReleasePageMappingLock()
|
---|
619 | * ASAP to release them.
|
---|
620 | *
|
---|
621 | * This API will assume your intention is to write to the pages, and will
|
---|
622 | * therefore replace shared and zero pages. If you do not intend to modify the
|
---|
623 | * pages, use the PGMR3PhysBulkGCPhys2CCPtrReadOnlyExternal() API.
|
---|
624 | *
|
---|
625 | * @returns VBox status code.
|
---|
626 | * @retval VINF_SUCCESS on success.
|
---|
627 | * @retval VERR_PGM_PHYS_PAGE_RESERVED if any of the pages has no physical
|
---|
628 | * backing or if any of the pages the page has any active access
|
---|
629 | * handlers. The caller must fall back on using PGMR3PhysWriteExternal.
|
---|
630 | * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if @a paGCPhysPages contains
|
---|
631 | * an invalid physical address.
|
---|
632 | *
|
---|
633 | * @param pVM The cross context VM structure.
|
---|
634 | * @param cPages Number of pages to lock.
|
---|
635 | * @param paGCPhysPages The guest physical address of the pages that
|
---|
636 | * should be mapped (@a cPages entries).
|
---|
637 | * @param papvPages Where to store the ring-3 mapping addresses
|
---|
638 | * corresponding to @a paGCPhysPages.
|
---|
639 | * @param paLocks Where to store the locking information that
|
---|
640 | * pfnPhysBulkReleasePageMappingLock needs (@a cPages
|
---|
641 | * in length).
|
---|
642 | *
|
---|
643 | * @remark Avoid calling this API from within critical sections (other than the
|
---|
644 | * PGM one) because of the deadlock risk when we have to delegating the
|
---|
645 | * task to an EMT.
|
---|
646 | * @thread Any.
|
---|
647 | */
|
---|
648 | VMMR3DECL(int) PGMR3PhysBulkGCPhys2CCPtrExternal(PVM pVM, uint32_t cPages, PCRTGCPHYS paGCPhysPages,
|
---|
649 | void **papvPages, PPGMPAGEMAPLOCK paLocks)
|
---|
650 | {
|
---|
651 | Assert(cPages > 0);
|
---|
652 | AssertPtr(papvPages);
|
---|
653 | AssertPtr(paLocks);
|
---|
654 |
|
---|
655 | Assert(VM_IS_EMT(pVM) || !PGMIsLockOwner(pVM));
|
---|
656 |
|
---|
657 | int rc = PGM_LOCK(pVM);
|
---|
658 | AssertRCReturn(rc, rc);
|
---|
659 |
|
---|
660 | /*
|
---|
661 | * Lock the pages one by one.
|
---|
662 | * The loop body is similar to PGMR3PhysGCPhys2CCPtrExternal.
|
---|
663 | */
|
---|
664 | int32_t cNextYield = 128;
|
---|
665 | uint32_t iPage;
|
---|
666 | for (iPage = 0; iPage < cPages; iPage++)
|
---|
667 | {
|
---|
668 | if (--cNextYield > 0)
|
---|
669 | { /* likely */ }
|
---|
670 | else
|
---|
671 | {
|
---|
672 | PGM_UNLOCK(pVM);
|
---|
673 | ASMNopPause();
|
---|
674 | PGM_LOCK_VOID(pVM);
|
---|
675 | cNextYield = 128;
|
---|
676 | }
|
---|
677 |
|
---|
678 | /*
|
---|
679 | * Query the Physical TLB entry for the page (may fail).
|
---|
680 | */
|
---|
681 | PPGMPAGEMAPTLBE pTlbe;
|
---|
682 | rc = pgmPhysPageQueryTlbe(pVM, paGCPhysPages[iPage], &pTlbe);
|
---|
683 | if (RT_SUCCESS(rc))
|
---|
684 | { }
|
---|
685 | else
|
---|
686 | break;
|
---|
687 | PPGMPAGE pPage = pTlbe->pPage;
|
---|
688 |
|
---|
689 | /*
|
---|
690 | * No MMIO or active access handlers.
|
---|
691 | */
|
---|
692 | if ( !PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage)
|
---|
693 | && !PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
|
---|
694 | { }
|
---|
695 | else
|
---|
696 | {
|
---|
697 | rc = VERR_PGM_PHYS_PAGE_RESERVED;
|
---|
698 | break;
|
---|
699 | }
|
---|
700 |
|
---|
701 | /*
|
---|
702 | * The page must be in the allocated state and not be a dirty pool page.
|
---|
703 | * We can handle converting a write monitored page to an allocated one, but
|
---|
704 | * anything more complicated must be delegated to an EMT.
|
---|
705 | */
|
---|
706 | bool fDelegateToEmt = false;
|
---|
707 | if (PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED)
|
---|
708 | #ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
|
---|
709 | fDelegateToEmt = pgmPoolIsDirtyPage(pVM, paGCPhysPages[iPage]);
|
---|
710 | #else
|
---|
711 | fDelegateToEmt = false;
|
---|
712 | #endif
|
---|
713 | else if (PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED)
|
---|
714 | {
|
---|
715 | #ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
|
---|
716 | if (!pgmPoolIsDirtyPage(pVM, paGCPhysPages[iPage]))
|
---|
717 | pgmPhysPageMakeWriteMonitoredWritable(pVM, pPage, paGCPhysPages[iPage]);
|
---|
718 | else
|
---|
719 | fDelegateToEmt = true;
|
---|
720 | #endif
|
---|
721 | }
|
---|
722 | else
|
---|
723 | fDelegateToEmt = true;
|
---|
724 | if (!fDelegateToEmt)
|
---|
725 | { }
|
---|
726 | else
|
---|
727 | {
|
---|
728 | /* We could do this delegation in bulk, but considered too much work vs gain. */
|
---|
729 | PGM_UNLOCK(pVM);
|
---|
730 | rc = VMR3ReqPriorityCallWait(pVM, VMCPUID_ANY, (PFNRT)pgmR3PhysGCPhys2CCPtrDelegated, 4,
|
---|
731 | pVM, &paGCPhysPages[iPage], &papvPages[iPage], &paLocks[iPage]);
|
---|
732 | PGM_LOCK_VOID(pVM);
|
---|
733 | if (RT_FAILURE(rc))
|
---|
734 | break;
|
---|
735 | cNextYield = 128;
|
---|
736 | }
|
---|
737 |
|
---|
738 | /*
|
---|
739 | * Now, just perform the locking and address calculation.
|
---|
740 | */
|
---|
741 | PPGMPAGEMAP pMap = pTlbe->pMap;
|
---|
742 | if (pMap)
|
---|
743 | pMap->cRefs++;
|
---|
744 |
|
---|
745 | unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
|
---|
746 | if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
|
---|
747 | {
|
---|
748 | if (cLocks == 0)
|
---|
749 | pVM->pgm.s.cWriteLockedPages++;
|
---|
750 | PGM_PAGE_INC_WRITE_LOCKS(pPage);
|
---|
751 | }
|
---|
752 | else if (cLocks != PGM_PAGE_GET_WRITE_LOCKS(pPage))
|
---|
753 | {
|
---|
754 | PGM_PAGE_INC_WRITE_LOCKS(pPage);
|
---|
755 | AssertMsgFailed(("%RGp / %R[pgmpage] is entering permanent write locked state!\n", paGCPhysPages[iPage], pPage));
|
---|
756 | if (pMap)
|
---|
757 | pMap->cRefs++; /* Extra ref to prevent it from going away. */
|
---|
758 | }
|
---|
759 |
|
---|
760 | papvPages[iPage] = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(paGCPhysPages[iPage] & GUEST_PAGE_OFFSET_MASK));
|
---|
761 | paLocks[iPage].uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_WRITE;
|
---|
762 | paLocks[iPage].pvMap = pMap;
|
---|
763 | }
|
---|
764 |
|
---|
765 | PGM_UNLOCK(pVM);
|
---|
766 |
|
---|
767 | /*
|
---|
768 | * On failure we must unlock any pages we managed to get already.
|
---|
769 | */
|
---|
770 | if (RT_FAILURE(rc) && iPage > 0)
|
---|
771 | PGMPhysBulkReleasePageMappingLocks(pVM, iPage, paLocks);
|
---|
772 |
|
---|
773 | return rc;
|
---|
774 | }
|
---|
775 |
|
---|
776 |
|
---|
777 | /**
|
---|
778 | * Requests the mapping of multiple guest page into ring-3, for reading only,
|
---|
779 | * external threads.
|
---|
780 | *
|
---|
781 | * When you're done with the pages, call PGMPhysReleasePageMappingLock() ASAP
|
---|
782 | * to release them.
|
---|
783 | *
|
---|
784 | * @returns VBox status code.
|
---|
785 | * @retval VINF_SUCCESS on success.
|
---|
786 | * @retval VERR_PGM_PHYS_PAGE_RESERVED if any of the pages has no physical
|
---|
787 | * backing or if any of the pages the page has an active ALL access
|
---|
788 | * handler. The caller must fall back on using PGMR3PhysWriteExternal.
|
---|
789 | * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if @a paGCPhysPages contains
|
---|
790 | * an invalid physical address.
|
---|
791 | *
|
---|
792 | * @param pVM The cross context VM structure.
|
---|
793 | * @param cPages Number of pages to lock.
|
---|
794 | * @param paGCPhysPages The guest physical address of the pages that
|
---|
795 | * should be mapped (@a cPages entries).
|
---|
796 | * @param papvPages Where to store the ring-3 mapping addresses
|
---|
797 | * corresponding to @a paGCPhysPages.
|
---|
798 | * @param paLocks Where to store the lock information that
|
---|
799 | * pfnPhysReleasePageMappingLock needs (@a cPages
|
---|
800 | * in length).
|
---|
801 | *
|
---|
802 | * @remark Avoid calling this API from within critical sections (other than
|
---|
803 | * the PGM one) because of the deadlock risk.
|
---|
804 | * @thread Any.
|
---|
805 | */
|
---|
806 | VMMR3DECL(int) PGMR3PhysBulkGCPhys2CCPtrReadOnlyExternal(PVM pVM, uint32_t cPages, PCRTGCPHYS paGCPhysPages,
|
---|
807 | void const **papvPages, PPGMPAGEMAPLOCK paLocks)
|
---|
808 | {
|
---|
809 | Assert(cPages > 0);
|
---|
810 | AssertPtr(papvPages);
|
---|
811 | AssertPtr(paLocks);
|
---|
812 |
|
---|
813 | Assert(VM_IS_EMT(pVM) || !PGMIsLockOwner(pVM));
|
---|
814 |
|
---|
815 | int rc = PGM_LOCK(pVM);
|
---|
816 | AssertRCReturn(rc, rc);
|
---|
817 |
|
---|
818 | /*
|
---|
819 | * Lock the pages one by one.
|
---|
820 | * The loop body is similar to PGMR3PhysGCPhys2CCPtrReadOnlyExternal.
|
---|
821 | */
|
---|
822 | int32_t cNextYield = 256;
|
---|
823 | uint32_t iPage;
|
---|
824 | for (iPage = 0; iPage < cPages; iPage++)
|
---|
825 | {
|
---|
826 | if (--cNextYield > 0)
|
---|
827 | { /* likely */ }
|
---|
828 | else
|
---|
829 | {
|
---|
830 | PGM_UNLOCK(pVM);
|
---|
831 | ASMNopPause();
|
---|
832 | PGM_LOCK_VOID(pVM);
|
---|
833 | cNextYield = 256;
|
---|
834 | }
|
---|
835 |
|
---|
836 | /*
|
---|
837 | * Query the Physical TLB entry for the page (may fail).
|
---|
838 | */
|
---|
839 | PPGMPAGEMAPTLBE pTlbe;
|
---|
840 | rc = pgmPhysPageQueryTlbe(pVM, paGCPhysPages[iPage], &pTlbe);
|
---|
841 | if (RT_SUCCESS(rc))
|
---|
842 | { }
|
---|
843 | else
|
---|
844 | break;
|
---|
845 | PPGMPAGE pPage = pTlbe->pPage;
|
---|
846 |
|
---|
847 | /*
|
---|
848 | * No MMIO or active all access handlers, everything else can be accessed.
|
---|
849 | */
|
---|
850 | if ( !PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage)
|
---|
851 | && !PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage))
|
---|
852 | { }
|
---|
853 | else
|
---|
854 | {
|
---|
855 | rc = VERR_PGM_PHYS_PAGE_RESERVED;
|
---|
856 | break;
|
---|
857 | }
|
---|
858 |
|
---|
859 | /*
|
---|
860 | * Now, just perform the locking and address calculation.
|
---|
861 | */
|
---|
862 | PPGMPAGEMAP pMap = pTlbe->pMap;
|
---|
863 | if (pMap)
|
---|
864 | pMap->cRefs++;
|
---|
865 |
|
---|
866 | unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
|
---|
867 | if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
|
---|
868 | {
|
---|
869 | if (cLocks == 0)
|
---|
870 | pVM->pgm.s.cReadLockedPages++;
|
---|
871 | PGM_PAGE_INC_READ_LOCKS(pPage);
|
---|
872 | }
|
---|
873 | else if (cLocks != PGM_PAGE_GET_READ_LOCKS(pPage))
|
---|
874 | {
|
---|
875 | PGM_PAGE_INC_READ_LOCKS(pPage);
|
---|
876 | AssertMsgFailed(("%RGp / %R[pgmpage] is entering permanent readonly locked state!\n", paGCPhysPages[iPage], pPage));
|
---|
877 | if (pMap)
|
---|
878 | pMap->cRefs++; /* Extra ref to prevent it from going away. */
|
---|
879 | }
|
---|
880 |
|
---|
881 | papvPages[iPage] = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(paGCPhysPages[iPage] & GUEST_PAGE_OFFSET_MASK));
|
---|
882 | paLocks[iPage].uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_READ;
|
---|
883 | paLocks[iPage].pvMap = pMap;
|
---|
884 | }
|
---|
885 |
|
---|
886 | PGM_UNLOCK(pVM);
|
---|
887 |
|
---|
888 | /*
|
---|
889 | * On failure we must unlock any pages we managed to get already.
|
---|
890 | */
|
---|
891 | if (RT_FAILURE(rc) && iPage > 0)
|
---|
892 | PGMPhysBulkReleasePageMappingLocks(pVM, iPage, paLocks);
|
---|
893 |
|
---|
894 | return rc;
|
---|
895 | }
|
---|
896 |
|
---|
897 |
|
---|
898 | /**
|
---|
899 | * Converts a GC physical address to a HC ring-3 pointer, with some
|
---|
900 | * additional checks.
|
---|
901 | *
|
---|
902 | * @returns VBox status code.
|
---|
903 | * @retval VINF_SUCCESS on success.
|
---|
904 | * @retval VINF_PGM_PHYS_TLB_CATCH_WRITE and *ppv set if the page has a write
|
---|
905 | * access handler of some kind.
|
---|
906 | * @retval VERR_PGM_PHYS_TLB_CATCH_ALL if the page has a handler catching all
|
---|
907 | * accesses or is odd in any way.
|
---|
908 | * @retval VERR_PGM_PHYS_TLB_UNASSIGNED if the page doesn't exist.
|
---|
909 | *
|
---|
910 | * @param pVM The cross context VM structure.
|
---|
911 | * @param GCPhys The GC physical address to convert. Since this is only
|
---|
912 | * used for filling the REM TLB, the A20 mask must be
|
---|
913 | * applied before calling this API.
|
---|
914 | * @param fWritable Whether write access is required.
|
---|
915 | * @param ppv Where to store the pointer corresponding to GCPhys on
|
---|
916 | * success.
|
---|
917 | */
|
---|
918 | VMMR3DECL(int) PGMR3PhysTlbGCPhys2Ptr(PVM pVM, RTGCPHYS GCPhys, bool fWritable, void **ppv)
|
---|
919 | {
|
---|
920 | PGM_LOCK_VOID(pVM);
|
---|
921 | PGM_A20_ASSERT_MASKED(VMMGetCpu(pVM), GCPhys);
|
---|
922 |
|
---|
923 | PPGMRAMRANGE pRam;
|
---|
924 | PPGMPAGE pPage;
|
---|
925 | int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
|
---|
926 | if (RT_SUCCESS(rc))
|
---|
927 | {
|
---|
928 | if (PGM_PAGE_IS_BALLOONED(pPage))
|
---|
929 | rc = VINF_PGM_PHYS_TLB_CATCH_WRITE;
|
---|
930 | else if (!PGM_PAGE_HAS_ANY_HANDLERS(pPage))
|
---|
931 | rc = VINF_SUCCESS;
|
---|
932 | else
|
---|
933 | {
|
---|
934 | if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)) /* catches MMIO */
|
---|
935 | rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
|
---|
936 | else if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
|
---|
937 | {
|
---|
938 | /** @todo Handle TLB loads of virtual handlers so ./test.sh can be made to work
|
---|
939 | * in -norawr0 mode. */
|
---|
940 | if (fWritable)
|
---|
941 | rc = VINF_PGM_PHYS_TLB_CATCH_WRITE;
|
---|
942 | }
|
---|
943 | else
|
---|
944 | {
|
---|
945 | /* Temporarily disabled physical handler(s), since the recompiler
|
---|
946 | doesn't get notified when it's reset we'll have to pretend it's
|
---|
947 | operating normally. */
|
---|
948 | if (pgmHandlerPhysicalIsAll(pVM, GCPhys))
|
---|
949 | rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
|
---|
950 | else
|
---|
951 | rc = VINF_PGM_PHYS_TLB_CATCH_WRITE;
|
---|
952 | }
|
---|
953 | }
|
---|
954 | if (RT_SUCCESS(rc))
|
---|
955 | {
|
---|
956 | int rc2;
|
---|
957 |
|
---|
958 | /* Make sure what we return is writable. */
|
---|
959 | if (fWritable)
|
---|
960 | switch (PGM_PAGE_GET_STATE(pPage))
|
---|
961 | {
|
---|
962 | case PGM_PAGE_STATE_ALLOCATED:
|
---|
963 | break;
|
---|
964 | case PGM_PAGE_STATE_BALLOONED:
|
---|
965 | AssertFailed();
|
---|
966 | break;
|
---|
967 | case PGM_PAGE_STATE_ZERO:
|
---|
968 | case PGM_PAGE_STATE_SHARED:
|
---|
969 | if (rc == VINF_PGM_PHYS_TLB_CATCH_WRITE)
|
---|
970 | break;
|
---|
971 | RT_FALL_THRU();
|
---|
972 | case PGM_PAGE_STATE_WRITE_MONITORED:
|
---|
973 | rc2 = pgmPhysPageMakeWritable(pVM, pPage, GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK);
|
---|
974 | AssertLogRelRCReturn(rc2, rc2);
|
---|
975 | break;
|
---|
976 | }
|
---|
977 |
|
---|
978 | /* Get a ring-3 mapping of the address. */
|
---|
979 | PPGMPAGER3MAPTLBE pTlbe;
|
---|
980 | rc2 = pgmPhysPageQueryTlbe(pVM, GCPhys, &pTlbe);
|
---|
981 | AssertLogRelRCReturn(rc2, rc2);
|
---|
982 | *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & GUEST_PAGE_OFFSET_MASK));
|
---|
983 | /** @todo mapping/locking hell; this isn't horribly efficient since
|
---|
984 | * pgmPhysPageLoadIntoTlb will repeat the lookup we've done here. */
|
---|
985 |
|
---|
986 | Log6(("PGMR3PhysTlbGCPhys2Ptr: GCPhys=%RGp rc=%Rrc pPage=%R[pgmpage] *ppv=%p\n", GCPhys, rc, pPage, *ppv));
|
---|
987 | }
|
---|
988 | else
|
---|
989 | Log6(("PGMR3PhysTlbGCPhys2Ptr: GCPhys=%RGp rc=%Rrc pPage=%R[pgmpage]\n", GCPhys, rc, pPage));
|
---|
990 |
|
---|
991 | /* else: handler catching all access, no pointer returned. */
|
---|
992 | }
|
---|
993 | else
|
---|
994 | rc = VERR_PGM_PHYS_TLB_UNASSIGNED;
|
---|
995 |
|
---|
996 | PGM_UNLOCK(pVM);
|
---|
997 | return rc;
|
---|
998 | }
|
---|
999 |
|
---|
1000 |
|
---|
1001 |
|
---|
1002 | /*********************************************************************************************************************************
|
---|
1003 | * RAM Range Management *
|
---|
1004 | *********************************************************************************************************************************/
|
---|
1005 |
|
---|
1006 | #define MAKE_LEAF(a_pNode) \
|
---|
1007 | do { \
|
---|
1008 | (a_pNode)->pLeftR3 = NIL_RTR3PTR; \
|
---|
1009 | (a_pNode)->pRightR3 = NIL_RTR3PTR; \
|
---|
1010 | (a_pNode)->pLeftR0 = NIL_RTR0PTR; \
|
---|
1011 | (a_pNode)->pRightR0 = NIL_RTR0PTR; \
|
---|
1012 | } while (0)
|
---|
1013 |
|
---|
1014 | #define INSERT_LEFT(a_pParent, a_pNode) \
|
---|
1015 | do { \
|
---|
1016 | (a_pParent)->pLeftR3 = (a_pNode); \
|
---|
1017 | (a_pParent)->pLeftR0 = (a_pNode)->pSelfR0; \
|
---|
1018 | } while (0)
|
---|
1019 | #define INSERT_RIGHT(a_pParent, a_pNode) \
|
---|
1020 | do { \
|
---|
1021 | (a_pParent)->pRightR3 = (a_pNode); \
|
---|
1022 | (a_pParent)->pRightR0 = (a_pNode)->pSelfR0; \
|
---|
1023 | } while (0)
|
---|
1024 |
|
---|
1025 |
|
---|
1026 | /**
|
---|
1027 | * Recursive tree builder.
|
---|
1028 | *
|
---|
1029 | * @param ppRam Pointer to the iterator variable.
|
---|
1030 | * @param iDepth The current depth. Inserts a leaf node if 0.
|
---|
1031 | */
|
---|
1032 | static PPGMRAMRANGE pgmR3PhysRebuildRamRangeSearchTreesRecursively(PPGMRAMRANGE *ppRam, int iDepth)
|
---|
1033 | {
|
---|
1034 | PPGMRAMRANGE pRam;
|
---|
1035 | if (iDepth <= 0)
|
---|
1036 | {
|
---|
1037 | /*
|
---|
1038 | * Leaf node.
|
---|
1039 | */
|
---|
1040 | pRam = *ppRam;
|
---|
1041 | if (pRam)
|
---|
1042 | {
|
---|
1043 | *ppRam = pRam->pNextR3;
|
---|
1044 | MAKE_LEAF(pRam);
|
---|
1045 | }
|
---|
1046 | }
|
---|
1047 | else
|
---|
1048 | {
|
---|
1049 |
|
---|
1050 | /*
|
---|
1051 | * Intermediate node.
|
---|
1052 | */
|
---|
1053 | PPGMRAMRANGE pLeft = pgmR3PhysRebuildRamRangeSearchTreesRecursively(ppRam, iDepth - 1);
|
---|
1054 |
|
---|
1055 | pRam = *ppRam;
|
---|
1056 | if (!pRam)
|
---|
1057 | return pLeft;
|
---|
1058 | *ppRam = pRam->pNextR3;
|
---|
1059 | MAKE_LEAF(pRam);
|
---|
1060 | INSERT_LEFT(pRam, pLeft);
|
---|
1061 |
|
---|
1062 | PPGMRAMRANGE pRight = pgmR3PhysRebuildRamRangeSearchTreesRecursively(ppRam, iDepth - 1);
|
---|
1063 | if (pRight)
|
---|
1064 | INSERT_RIGHT(pRam, pRight);
|
---|
1065 | }
|
---|
1066 | return pRam;
|
---|
1067 | }
|
---|
1068 |
|
---|
1069 |
|
---|
1070 | /**
|
---|
1071 | * Rebuilds the RAM range search trees.
|
---|
1072 | *
|
---|
1073 | * @param pVM The cross context VM structure.
|
---|
1074 | */
|
---|
1075 | static void pgmR3PhysRebuildRamRangeSearchTrees(PVM pVM)
|
---|
1076 | {
|
---|
1077 |
|
---|
1078 | /*
|
---|
1079 | * Create the reasonably balanced tree in a sequential fashion.
|
---|
1080 | * For simplicity (laziness) we use standard recursion here.
|
---|
1081 | */
|
---|
1082 | int iDepth = 0;
|
---|
1083 | PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3;
|
---|
1084 | PPGMRAMRANGE pRoot = pgmR3PhysRebuildRamRangeSearchTreesRecursively(&pRam, 0);
|
---|
1085 | while (pRam)
|
---|
1086 | {
|
---|
1087 | PPGMRAMRANGE pLeft = pRoot;
|
---|
1088 |
|
---|
1089 | pRoot = pRam;
|
---|
1090 | pRam = pRam->pNextR3;
|
---|
1091 | MAKE_LEAF(pRoot);
|
---|
1092 | INSERT_LEFT(pRoot, pLeft);
|
---|
1093 |
|
---|
1094 | PPGMRAMRANGE pRight = pgmR3PhysRebuildRamRangeSearchTreesRecursively(&pRam, iDepth);
|
---|
1095 | if (pRight)
|
---|
1096 | INSERT_RIGHT(pRoot, pRight);
|
---|
1097 | /** @todo else: rotate the tree. */
|
---|
1098 |
|
---|
1099 | iDepth++;
|
---|
1100 | }
|
---|
1101 |
|
---|
1102 | pVM->pgm.s.pRamRangeTreeR3 = pRoot;
|
---|
1103 | pVM->pgm.s.pRamRangeTreeR0 = pRoot ? pRoot->pSelfR0 : NIL_RTR0PTR;
|
---|
1104 |
|
---|
1105 | #ifdef VBOX_STRICT
|
---|
1106 | /*
|
---|
1107 | * Verify that the above code works.
|
---|
1108 | */
|
---|
1109 | unsigned cRanges = 0;
|
---|
1110 | for (pRam = pVM->pgm.s.pRamRangesXR3; pRam; pRam = pRam->pNextR3)
|
---|
1111 | cRanges++;
|
---|
1112 | Assert(cRanges > 0);
|
---|
1113 |
|
---|
1114 | unsigned cMaxDepth = ASMBitLastSetU32(cRanges);
|
---|
1115 | if ((1U << cMaxDepth) < cRanges)
|
---|
1116 | cMaxDepth++;
|
---|
1117 |
|
---|
1118 | for (pRam = pVM->pgm.s.pRamRangesXR3; pRam; pRam = pRam->pNextR3)
|
---|
1119 | {
|
---|
1120 | unsigned cDepth = 0;
|
---|
1121 | PPGMRAMRANGE pRam2 = pVM->pgm.s.pRamRangeTreeR3;
|
---|
1122 | for (;;)
|
---|
1123 | {
|
---|
1124 | if (pRam == pRam2)
|
---|
1125 | break;
|
---|
1126 | Assert(pRam2);
|
---|
1127 | if (pRam->GCPhys < pRam2->GCPhys)
|
---|
1128 | pRam2 = pRam2->pLeftR3;
|
---|
1129 | else
|
---|
1130 | pRam2 = pRam2->pRightR3;
|
---|
1131 | }
|
---|
1132 | AssertMsg(cDepth <= cMaxDepth, ("cDepth=%d cMaxDepth=%d\n", cDepth, cMaxDepth));
|
---|
1133 | }
|
---|
1134 | #endif /* VBOX_STRICT */
|
---|
1135 | }
|
---|
1136 |
|
---|
1137 | #undef MAKE_LEAF
|
---|
1138 | #undef INSERT_LEFT
|
---|
1139 | #undef INSERT_RIGHT
|
---|
1140 |
|
---|
1141 | /**
|
---|
1142 | * Relinks the RAM ranges using the pSelfRC and pSelfR0 pointers.
|
---|
1143 | *
|
---|
1144 | * Called when anything was relocated.
|
---|
1145 | *
|
---|
1146 | * @param pVM The cross context VM structure.
|
---|
1147 | */
|
---|
1148 | void pgmR3PhysRelinkRamRanges(PVM pVM)
|
---|
1149 | {
|
---|
1150 | PPGMRAMRANGE pCur;
|
---|
1151 |
|
---|
1152 | #ifdef VBOX_STRICT
|
---|
1153 | for (pCur = pVM->pgm.s.pRamRangesXR3; pCur; pCur = pCur->pNextR3)
|
---|
1154 | {
|
---|
1155 | Assert((pCur->GCPhys & GUEST_PAGE_OFFSET_MASK) == 0);
|
---|
1156 | Assert((pCur->GCPhysLast & GUEST_PAGE_OFFSET_MASK) == GUEST_PAGE_OFFSET_MASK);
|
---|
1157 | Assert((pCur->cb & GUEST_PAGE_OFFSET_MASK) == 0);
|
---|
1158 | Assert(pCur->cb == pCur->GCPhysLast - pCur->GCPhys + 1);
|
---|
1159 | for (PPGMRAMRANGE pCur2 = pVM->pgm.s.pRamRangesXR3; pCur2; pCur2 = pCur2->pNextR3)
|
---|
1160 | Assert( pCur2 == pCur
|
---|
1161 | || strcmp(pCur2->pszDesc, pCur->pszDesc)); /** @todo fix MMIO ranges!! */
|
---|
1162 | }
|
---|
1163 | #endif
|
---|
1164 |
|
---|
1165 | pCur = pVM->pgm.s.pRamRangesXR3;
|
---|
1166 | if (pCur)
|
---|
1167 | {
|
---|
1168 | pVM->pgm.s.pRamRangesXR0 = pCur->pSelfR0;
|
---|
1169 |
|
---|
1170 | for (; pCur->pNextR3; pCur = pCur->pNextR3)
|
---|
1171 | pCur->pNextR0 = pCur->pNextR3->pSelfR0;
|
---|
1172 |
|
---|
1173 | Assert(pCur->pNextR0 == NIL_RTR0PTR);
|
---|
1174 | }
|
---|
1175 | else
|
---|
1176 | {
|
---|
1177 | Assert(pVM->pgm.s.pRamRangesXR0 == NIL_RTR0PTR);
|
---|
1178 | }
|
---|
1179 | ASMAtomicIncU32(&pVM->pgm.s.idRamRangesGen);
|
---|
1180 |
|
---|
1181 | pgmR3PhysRebuildRamRangeSearchTrees(pVM);
|
---|
1182 | }
|
---|
1183 |
|
---|
1184 |
|
---|
1185 | /**
|
---|
1186 | * Links a new RAM range into the list.
|
---|
1187 | *
|
---|
1188 | * @param pVM The cross context VM structure.
|
---|
1189 | * @param pNew Pointer to the new list entry.
|
---|
1190 | * @param pPrev Pointer to the previous list entry. If NULL, insert as head.
|
---|
1191 | */
|
---|
1192 | static void pgmR3PhysLinkRamRange(PVM pVM, PPGMRAMRANGE pNew, PPGMRAMRANGE pPrev)
|
---|
1193 | {
|
---|
1194 | AssertMsg(pNew->pszDesc, ("%RGp-%RGp\n", pNew->GCPhys, pNew->GCPhysLast));
|
---|
1195 |
|
---|
1196 | PGM_LOCK_VOID(pVM);
|
---|
1197 |
|
---|
1198 | PPGMRAMRANGE pRam = pPrev ? pPrev->pNextR3 : pVM->pgm.s.pRamRangesXR3;
|
---|
1199 | pNew->pNextR3 = pRam;
|
---|
1200 | pNew->pNextR0 = pRam ? pRam->pSelfR0 : NIL_RTR0PTR;
|
---|
1201 |
|
---|
1202 | if (pPrev)
|
---|
1203 | {
|
---|
1204 | pPrev->pNextR3 = pNew;
|
---|
1205 | pPrev->pNextR0 = pNew->pSelfR0;
|
---|
1206 | }
|
---|
1207 | else
|
---|
1208 | {
|
---|
1209 | pVM->pgm.s.pRamRangesXR3 = pNew;
|
---|
1210 | pVM->pgm.s.pRamRangesXR0 = pNew->pSelfR0;
|
---|
1211 | }
|
---|
1212 | ASMAtomicIncU32(&pVM->pgm.s.idRamRangesGen);
|
---|
1213 |
|
---|
1214 | pgmR3PhysRebuildRamRangeSearchTrees(pVM);
|
---|
1215 | PGM_UNLOCK(pVM);
|
---|
1216 | }
|
---|
1217 |
|
---|
1218 |
|
---|
1219 | /**
|
---|
1220 | * Unlink an existing RAM range from the list.
|
---|
1221 | *
|
---|
1222 | * @param pVM The cross context VM structure.
|
---|
1223 | * @param pRam Pointer to the new list entry.
|
---|
1224 | * @param pPrev Pointer to the previous list entry. If NULL, insert as head.
|
---|
1225 | */
|
---|
1226 | static void pgmR3PhysUnlinkRamRange2(PVM pVM, PPGMRAMRANGE pRam, PPGMRAMRANGE pPrev)
|
---|
1227 | {
|
---|
1228 | Assert(pPrev ? pPrev->pNextR3 == pRam : pVM->pgm.s.pRamRangesXR3 == pRam);
|
---|
1229 |
|
---|
1230 | PGM_LOCK_VOID(pVM);
|
---|
1231 |
|
---|
1232 | PPGMRAMRANGE pNext = pRam->pNextR3;
|
---|
1233 | if (pPrev)
|
---|
1234 | {
|
---|
1235 | pPrev->pNextR3 = pNext;
|
---|
1236 | pPrev->pNextR0 = pNext ? pNext->pSelfR0 : NIL_RTR0PTR;
|
---|
1237 | }
|
---|
1238 | else
|
---|
1239 | {
|
---|
1240 | Assert(pVM->pgm.s.pRamRangesXR3 == pRam);
|
---|
1241 | pVM->pgm.s.pRamRangesXR3 = pNext;
|
---|
1242 | pVM->pgm.s.pRamRangesXR0 = pNext ? pNext->pSelfR0 : NIL_RTR0PTR;
|
---|
1243 | }
|
---|
1244 | ASMAtomicIncU32(&pVM->pgm.s.idRamRangesGen);
|
---|
1245 |
|
---|
1246 | pgmR3PhysRebuildRamRangeSearchTrees(pVM);
|
---|
1247 | PGM_UNLOCK(pVM);
|
---|
1248 | }
|
---|
1249 |
|
---|
1250 |
|
---|
1251 | /**
|
---|
1252 | * Unlink an existing RAM range from the list.
|
---|
1253 | *
|
---|
1254 | * @param pVM The cross context VM structure.
|
---|
1255 | * @param pRam Pointer to the new list entry.
|
---|
1256 | */
|
---|
1257 | static void pgmR3PhysUnlinkRamRange(PVM pVM, PPGMRAMRANGE pRam)
|
---|
1258 | {
|
---|
1259 | PGM_LOCK_VOID(pVM);
|
---|
1260 |
|
---|
1261 | /* find prev. */
|
---|
1262 | PPGMRAMRANGE pPrev = NULL;
|
---|
1263 | PPGMRAMRANGE pCur = pVM->pgm.s.pRamRangesXR3;
|
---|
1264 | while (pCur != pRam)
|
---|
1265 | {
|
---|
1266 | pPrev = pCur;
|
---|
1267 | pCur = pCur->pNextR3;
|
---|
1268 | }
|
---|
1269 | AssertFatal(pCur);
|
---|
1270 |
|
---|
1271 | pgmR3PhysUnlinkRamRange2(pVM, pRam, pPrev);
|
---|
1272 | PGM_UNLOCK(pVM);
|
---|
1273 | }
|
---|
1274 |
|
---|
1275 |
|
---|
1276 | /**
|
---|
1277 | * Gets the number of ram ranges.
|
---|
1278 | *
|
---|
1279 | * @returns Number of ram ranges. Returns UINT32_MAX if @a pVM is invalid.
|
---|
1280 | * @param pVM The cross context VM structure.
|
---|
1281 | */
|
---|
1282 | VMMR3DECL(uint32_t) PGMR3PhysGetRamRangeCount(PVM pVM)
|
---|
1283 | {
|
---|
1284 | VM_ASSERT_VALID_EXT_RETURN(pVM, UINT32_MAX);
|
---|
1285 |
|
---|
1286 | PGM_LOCK_VOID(pVM);
|
---|
1287 | uint32_t cRamRanges = 0;
|
---|
1288 | for (PPGMRAMRANGE pCur = pVM->pgm.s.CTX_SUFF(pRamRangesX); pCur; pCur = pCur->CTX_SUFF(pNext))
|
---|
1289 | cRamRanges++;
|
---|
1290 | PGM_UNLOCK(pVM);
|
---|
1291 | return cRamRanges;
|
---|
1292 | }
|
---|
1293 |
|
---|
1294 |
|
---|
1295 | /**
|
---|
1296 | * Get information about a range.
|
---|
1297 | *
|
---|
1298 | * @returns VINF_SUCCESS or VERR_OUT_OF_RANGE.
|
---|
1299 | * @param pVM The cross context VM structure.
|
---|
1300 | * @param iRange The ordinal of the range.
|
---|
1301 | * @param pGCPhysStart Where to return the start of the range. Optional.
|
---|
1302 | * @param pGCPhysLast Where to return the address of the last byte in the
|
---|
1303 | * range. Optional.
|
---|
1304 | * @param ppszDesc Where to return the range description. Optional.
|
---|
1305 | * @param pfIsMmio Where to indicate that this is a pure MMIO range.
|
---|
1306 | * Optional.
|
---|
1307 | */
|
---|
1308 | VMMR3DECL(int) PGMR3PhysGetRange(PVM pVM, uint32_t iRange, PRTGCPHYS pGCPhysStart, PRTGCPHYS pGCPhysLast,
|
---|
1309 | const char **ppszDesc, bool *pfIsMmio)
|
---|
1310 | {
|
---|
1311 | VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
|
---|
1312 |
|
---|
1313 | PGM_LOCK_VOID(pVM);
|
---|
1314 | uint32_t iCurRange = 0;
|
---|
1315 | for (PPGMRAMRANGE pCur = pVM->pgm.s.CTX_SUFF(pRamRangesX); pCur; pCur = pCur->CTX_SUFF(pNext), iCurRange++)
|
---|
1316 | if (iCurRange == iRange)
|
---|
1317 | {
|
---|
1318 | if (pGCPhysStart)
|
---|
1319 | *pGCPhysStart = pCur->GCPhys;
|
---|
1320 | if (pGCPhysLast)
|
---|
1321 | *pGCPhysLast = pCur->GCPhysLast;
|
---|
1322 | if (ppszDesc)
|
---|
1323 | *ppszDesc = pCur->pszDesc;
|
---|
1324 | if (pfIsMmio)
|
---|
1325 | *pfIsMmio = !!(pCur->fFlags & PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO);
|
---|
1326 |
|
---|
1327 | PGM_UNLOCK(pVM);
|
---|
1328 | return VINF_SUCCESS;
|
---|
1329 | }
|
---|
1330 | PGM_UNLOCK(pVM);
|
---|
1331 | return VERR_OUT_OF_RANGE;
|
---|
1332 | }
|
---|
1333 |
|
---|
1334 |
|
---|
1335 | /*********************************************************************************************************************************
|
---|
1336 | * RAM *
|
---|
1337 | *********************************************************************************************************************************/
|
---|
1338 |
|
---|
1339 | /**
|
---|
1340 | * Frees the specified RAM page and replaces it with the ZERO page.
|
---|
1341 | *
|
---|
1342 | * This is used by ballooning, remapping MMIO2, RAM reset and state loading.
|
---|
1343 | *
|
---|
1344 | * @param pVM The cross context VM structure.
|
---|
1345 | * @param pReq Pointer to the request. This is NULL when doing a
|
---|
1346 | * bulk free in NEM memory mode.
|
---|
1347 | * @param pcPendingPages Where the number of pages waiting to be freed are
|
---|
1348 | * kept. This will normally be incremented. This is
|
---|
1349 | * NULL when doing a bulk free in NEM memory mode.
|
---|
1350 | * @param pPage Pointer to the page structure.
|
---|
1351 | * @param GCPhys The guest physical address of the page, if applicable.
|
---|
1352 | * @param enmNewType New page type for NEM notification, since several
|
---|
1353 | * callers will change the type upon successful return.
|
---|
1354 | *
|
---|
1355 | * @remarks The caller must own the PGM lock.
|
---|
1356 | */
|
---|
1357 | int pgmPhysFreePage(PVM pVM, PGMMFREEPAGESREQ pReq, uint32_t *pcPendingPages, PPGMPAGE pPage, RTGCPHYS GCPhys,
|
---|
1358 | PGMPAGETYPE enmNewType)
|
---|
1359 | {
|
---|
1360 | /*
|
---|
1361 | * Assert sanity.
|
---|
1362 | */
|
---|
1363 | PGM_LOCK_ASSERT_OWNER(pVM);
|
---|
1364 | if (RT_UNLIKELY( PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_RAM
|
---|
1365 | && PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_ROM_SHADOW))
|
---|
1366 | {
|
---|
1367 | AssertMsgFailed(("GCPhys=%RGp pPage=%R[pgmpage]\n", GCPhys, pPage));
|
---|
1368 | return VMSetError(pVM, VERR_PGM_PHYS_NOT_RAM, RT_SRC_POS, "GCPhys=%RGp type=%d", GCPhys, PGM_PAGE_GET_TYPE(pPage));
|
---|
1369 | }
|
---|
1370 |
|
---|
1371 | /** @todo What about ballooning of large pages??! */
|
---|
1372 | Assert( PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE
|
---|
1373 | && PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE_DISABLED);
|
---|
1374 |
|
---|
1375 | if ( PGM_PAGE_IS_ZERO(pPage)
|
---|
1376 | || PGM_PAGE_IS_BALLOONED(pPage))
|
---|
1377 | return VINF_SUCCESS;
|
---|
1378 |
|
---|
1379 | const uint32_t idPage = PGM_PAGE_GET_PAGEID(pPage);
|
---|
1380 | Log3(("pgmPhysFreePage: idPage=%#x GCPhys=%RGp pPage=%R[pgmpage]\n", idPage, GCPhys, pPage));
|
---|
1381 | if (RT_UNLIKELY(!PGM_IS_IN_NEM_MODE(pVM)
|
---|
1382 | ? idPage == NIL_GMM_PAGEID
|
---|
1383 | || idPage > GMM_PAGEID_LAST
|
---|
1384 | || PGM_PAGE_GET_CHUNKID(pPage) == NIL_GMM_CHUNKID
|
---|
1385 | : idPage != NIL_GMM_PAGEID))
|
---|
1386 | {
|
---|
1387 | AssertMsgFailed(("GCPhys=%RGp pPage=%R[pgmpage]\n", GCPhys, pPage));
|
---|
1388 | return VMSetError(pVM, VERR_PGM_PHYS_INVALID_PAGE_ID, RT_SRC_POS, "GCPhys=%RGp idPage=%#x", GCPhys, pPage);
|
---|
1389 | }
|
---|
1390 | #ifdef VBOX_WITH_NATIVE_NEM
|
---|
1391 | const RTHCPHYS HCPhysPrev = PGM_PAGE_GET_HCPHYS(pPage);
|
---|
1392 | #endif
|
---|
1393 |
|
---|
1394 | /* update page count stats. */
|
---|
1395 | if (PGM_PAGE_IS_SHARED(pPage))
|
---|
1396 | pVM->pgm.s.cSharedPages--;
|
---|
1397 | else
|
---|
1398 | pVM->pgm.s.cPrivatePages--;
|
---|
1399 | pVM->pgm.s.cZeroPages++;
|
---|
1400 |
|
---|
1401 | /* Deal with write monitored pages. */
|
---|
1402 | if (PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED)
|
---|
1403 | {
|
---|
1404 | PGM_PAGE_SET_WRITTEN_TO(pVM, pPage);
|
---|
1405 | pVM->pgm.s.cWrittenToPages++;
|
---|
1406 | }
|
---|
1407 |
|
---|
1408 | /*
|
---|
1409 | * pPage = ZERO page.
|
---|
1410 | */
|
---|
1411 | PGM_PAGE_SET_HCPHYS(pVM, pPage, pVM->pgm.s.HCPhysZeroPg);
|
---|
1412 | PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ZERO);
|
---|
1413 | PGM_PAGE_SET_PAGEID(pVM, pPage, NIL_GMM_PAGEID);
|
---|
1414 | PGM_PAGE_SET_PDE_TYPE(pVM, pPage, PGM_PAGE_PDE_TYPE_DONTCARE);
|
---|
1415 | PGM_PAGE_SET_PTE_INDEX(pVM, pPage, 0);
|
---|
1416 | PGM_PAGE_SET_TRACKING(pVM, pPage, 0);
|
---|
1417 |
|
---|
1418 | /* Flush physical page map TLB entry. */
|
---|
1419 | pgmPhysInvalidatePageMapTLBEntry(pVM, GCPhys);
|
---|
1420 | IEMTlbInvalidateAllPhysicalAllCpus(pVM, NIL_VMCPUID); /// @todo move to the perform step.
|
---|
1421 |
|
---|
1422 | #ifdef VBOX_WITH_PGM_NEM_MODE
|
---|
1423 | /*
|
---|
1424 | * Skip the rest if we're doing a bulk free in NEM memory mode.
|
---|
1425 | */
|
---|
1426 | if (!pReq)
|
---|
1427 | return VINF_SUCCESS;
|
---|
1428 | AssertLogRelReturn(!pVM->pgm.s.fNemMode, VERR_PGM_NOT_SUPPORTED_FOR_NEM_MODE);
|
---|
1429 | #endif
|
---|
1430 |
|
---|
1431 | #ifdef VBOX_WITH_NATIVE_NEM
|
---|
1432 | /* Notify NEM. */
|
---|
1433 | /** @todo Remove this one? */
|
---|
1434 | if (VM_IS_NEM_ENABLED(pVM))
|
---|
1435 | {
|
---|
1436 | uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
|
---|
1437 | NEMHCNotifyPhysPageChanged(pVM, GCPhys, HCPhysPrev, pVM->pgm.s.HCPhysZeroPg, pVM->pgm.s.abZeroPg,
|
---|
1438 | pgmPhysPageCalcNemProtection(pPage, enmNewType), enmNewType, &u2State);
|
---|
1439 | PGM_PAGE_SET_NEM_STATE(pPage, u2State);
|
---|
1440 | }
|
---|
1441 | #else
|
---|
1442 | RT_NOREF(enmNewType);
|
---|
1443 | #endif
|
---|
1444 |
|
---|
1445 | /*
|
---|
1446 | * Make sure it's not in the handy page array.
|
---|
1447 | */
|
---|
1448 | for (uint32_t i = pVM->pgm.s.cHandyPages; i < RT_ELEMENTS(pVM->pgm.s.aHandyPages); i++)
|
---|
1449 | {
|
---|
1450 | if (pVM->pgm.s.aHandyPages[i].idPage == idPage)
|
---|
1451 | {
|
---|
1452 | pVM->pgm.s.aHandyPages[i].HCPhysGCPhys = NIL_GMMPAGEDESC_PHYS;
|
---|
1453 | pVM->pgm.s.aHandyPages[i].fZeroed = false;
|
---|
1454 | pVM->pgm.s.aHandyPages[i].idPage = NIL_GMM_PAGEID;
|
---|
1455 | break;
|
---|
1456 | }
|
---|
1457 | if (pVM->pgm.s.aHandyPages[i].idSharedPage == idPage)
|
---|
1458 | {
|
---|
1459 | pVM->pgm.s.aHandyPages[i].idSharedPage = NIL_GMM_PAGEID;
|
---|
1460 | break;
|
---|
1461 | }
|
---|
1462 | }
|
---|
1463 |
|
---|
1464 | /*
|
---|
1465 | * Push it onto the page array.
|
---|
1466 | */
|
---|
1467 | uint32_t iPage = *pcPendingPages;
|
---|
1468 | Assert(iPage < PGMPHYS_FREE_PAGE_BATCH_SIZE);
|
---|
1469 | *pcPendingPages += 1;
|
---|
1470 |
|
---|
1471 | pReq->aPages[iPage].idPage = idPage;
|
---|
1472 |
|
---|
1473 | if (iPage + 1 < PGMPHYS_FREE_PAGE_BATCH_SIZE)
|
---|
1474 | return VINF_SUCCESS;
|
---|
1475 |
|
---|
1476 | /*
|
---|
1477 | * Flush the pages.
|
---|
1478 | */
|
---|
1479 | int rc = GMMR3FreePagesPerform(pVM, pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE);
|
---|
1480 | if (RT_SUCCESS(rc))
|
---|
1481 | {
|
---|
1482 | GMMR3FreePagesRePrep(pVM, pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE, GMMACCOUNT_BASE);
|
---|
1483 | *pcPendingPages = 0;
|
---|
1484 | }
|
---|
1485 | return rc;
|
---|
1486 | }
|
---|
1487 |
|
---|
1488 |
|
---|
1489 | /**
|
---|
1490 | * Frees a range of pages, replacing them with ZERO pages of the specified type.
|
---|
1491 | *
|
---|
1492 | * @returns VBox status code.
|
---|
1493 | * @param pVM The cross context VM structure.
|
---|
1494 | * @param pRam The RAM range in which the pages resides.
|
---|
1495 | * @param GCPhys The address of the first page.
|
---|
1496 | * @param GCPhysLast The address of the last page.
|
---|
1497 | * @param pvMmio2 Pointer to the ring-3 mapping of any MMIO2 memory that
|
---|
1498 | * will replace the pages we're freeing up.
|
---|
1499 | */
|
---|
1500 | static int pgmR3PhysFreePageRange(PVM pVM, PPGMRAMRANGE pRam, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast, void *pvMmio2)
|
---|
1501 | {
|
---|
1502 | PGM_LOCK_ASSERT_OWNER(pVM);
|
---|
1503 |
|
---|
1504 | #ifdef VBOX_WITH_PGM_NEM_MODE
|
---|
1505 | /*
|
---|
1506 | * In simplified memory mode we don't actually free the memory,
|
---|
1507 | * we just unmap it and let NEM do any unlocking of it.
|
---|
1508 | */
|
---|
1509 | if (pVM->pgm.s.fNemMode)
|
---|
1510 | {
|
---|
1511 | Assert(VM_IS_NEM_ENABLED(pVM) || VM_IS_EXEC_ENGINE_IEM(pVM));
|
---|
1512 | uint8_t u2State = 0; /* (We don't support UINT8_MAX here.) */
|
---|
1513 | if (VM_IS_NEM_ENABLED(pVM))
|
---|
1514 | {
|
---|
1515 | uint32_t const fNemNotify = (pvMmio2 ? NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2 : 0) | NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE;
|
---|
1516 | int rc = NEMR3NotifyPhysMmioExMapEarly(pVM, GCPhys, GCPhysLast - GCPhys + 1, fNemNotify,
|
---|
1517 | pRam->pvR3 ? (uint8_t *)pRam->pvR3 + GCPhys - pRam->GCPhys : NULL,
|
---|
1518 | pvMmio2, &u2State, NULL /*puNemRange*/);
|
---|
1519 | AssertLogRelRCReturn(rc, rc);
|
---|
1520 | }
|
---|
1521 |
|
---|
1522 | /* Iterate the pages. */
|
---|
1523 | PPGMPAGE pPageDst = &pRam->aPages[(GCPhys - pRam->GCPhys) >> GUEST_PAGE_SHIFT];
|
---|
1524 | uint32_t cPagesLeft = ((GCPhysLast - GCPhys) >> GUEST_PAGE_SHIFT) + 1;
|
---|
1525 | while (cPagesLeft-- > 0)
|
---|
1526 | {
|
---|
1527 | int rc = pgmPhysFreePage(pVM, NULL, NULL, pPageDst, GCPhys, PGMPAGETYPE_MMIO);
|
---|
1528 | AssertLogRelRCReturn(rc, rc); /* We're done for if this goes wrong. */
|
---|
1529 |
|
---|
1530 | PGM_PAGE_SET_TYPE(pVM, pPageDst, PGMPAGETYPE_MMIO);
|
---|
1531 | PGM_PAGE_SET_NEM_STATE(pPageDst, u2State);
|
---|
1532 |
|
---|
1533 | GCPhys += GUEST_PAGE_SIZE;
|
---|
1534 | pPageDst++;
|
---|
1535 | }
|
---|
1536 | return VINF_SUCCESS;
|
---|
1537 | }
|
---|
1538 | #else /* !VBOX_WITH_PGM_NEM_MODE */
|
---|
1539 | RT_NOREF(pvMmio2);
|
---|
1540 | #endif /* !VBOX_WITH_PGM_NEM_MODE */
|
---|
1541 |
|
---|
1542 | /*
|
---|
1543 | * Regular mode.
|
---|
1544 | */
|
---|
1545 | /* Prepare. */
|
---|
1546 | uint32_t cPendingPages = 0;
|
---|
1547 | PGMMFREEPAGESREQ pReq;
|
---|
1548 | int rc = GMMR3FreePagesPrepare(pVM, &pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE, GMMACCOUNT_BASE);
|
---|
1549 | AssertLogRelRCReturn(rc, rc);
|
---|
1550 |
|
---|
1551 | #ifdef VBOX_WITH_NATIVE_NEM
|
---|
1552 | /* Tell NEM up-front. */
|
---|
1553 | uint8_t u2State = UINT8_MAX;
|
---|
1554 | if (VM_IS_NEM_ENABLED(pVM))
|
---|
1555 | {
|
---|
1556 | uint32_t const fNemNotify = (pvMmio2 ? NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2 : 0) | NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE;
|
---|
1557 | rc = NEMR3NotifyPhysMmioExMapEarly(pVM, GCPhys, GCPhysLast - GCPhys + 1, fNemNotify, NULL, pvMmio2,
|
---|
1558 | &u2State, NULL /*puNemRange*/);
|
---|
1559 | AssertLogRelRCReturnStmt(rc, GMMR3FreePagesCleanup(pReq), rc);
|
---|
1560 | }
|
---|
1561 | #endif
|
---|
1562 |
|
---|
1563 | /* Iterate the pages. */
|
---|
1564 | PPGMPAGE pPageDst = &pRam->aPages[(GCPhys - pRam->GCPhys) >> GUEST_PAGE_SHIFT];
|
---|
1565 | uint32_t cPagesLeft = ((GCPhysLast - GCPhys) >> GUEST_PAGE_SHIFT) + 1;
|
---|
1566 | while (cPagesLeft-- > 0)
|
---|
1567 | {
|
---|
1568 | rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, pPageDst, GCPhys, PGMPAGETYPE_MMIO);
|
---|
1569 | AssertLogRelRCReturn(rc, rc); /* We're done for if this goes wrong. */
|
---|
1570 |
|
---|
1571 | PGM_PAGE_SET_TYPE(pVM, pPageDst, PGMPAGETYPE_MMIO);
|
---|
1572 | #ifdef VBOX_WITH_NATIVE_NEM
|
---|
1573 | if (u2State != UINT8_MAX)
|
---|
1574 | PGM_PAGE_SET_NEM_STATE(pPageDst, u2State);
|
---|
1575 | #endif
|
---|
1576 |
|
---|
1577 | GCPhys += GUEST_PAGE_SIZE;
|
---|
1578 | pPageDst++;
|
---|
1579 | }
|
---|
1580 |
|
---|
1581 | /* Finish pending and cleanup. */
|
---|
1582 | if (cPendingPages)
|
---|
1583 | {
|
---|
1584 | rc = GMMR3FreePagesPerform(pVM, pReq, cPendingPages);
|
---|
1585 | AssertLogRelRCReturn(rc, rc);
|
---|
1586 | }
|
---|
1587 | GMMR3FreePagesCleanup(pReq);
|
---|
1588 |
|
---|
1589 | return rc;
|
---|
1590 | }
|
---|
1591 |
|
---|
1592 |
|
---|
1593 | /**
|
---|
1594 | * PGMR3PhysRegisterRam worker that initializes and links a RAM range.
|
---|
1595 | *
|
---|
1596 | * In NEM mode, this will allocate the pages backing the RAM range and this may
|
---|
1597 | * fail. NEM registration may also fail. (In regular HM mode it won't fail.)
|
---|
1598 | *
|
---|
1599 | * @returns VBox status code.
|
---|
1600 | * @param pVM The cross context VM structure.
|
---|
1601 | * @param pNew The new RAM range.
|
---|
1602 | * @param GCPhys The address of the RAM range.
|
---|
1603 | * @param GCPhysLast The last address of the RAM range.
|
---|
1604 | * @param R0PtrNew Ditto for R0.
|
---|
1605 | * @param fFlags PGM_RAM_RANGE_FLAGS_FLOATING or zero.
|
---|
1606 | * @param pszDesc The description.
|
---|
1607 | * @param pPrev The previous RAM range (for linking).
|
---|
1608 | */
|
---|
1609 | static int pgmR3PhysInitAndLinkRamRange(PVM pVM, PPGMRAMRANGE pNew, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast,
|
---|
1610 | RTR0PTR R0PtrNew, uint32_t fFlags, const char *pszDesc, PPGMRAMRANGE pPrev)
|
---|
1611 | {
|
---|
1612 | /*
|
---|
1613 | * Initialize the range.
|
---|
1614 | */
|
---|
1615 | pNew->pSelfR0 = R0PtrNew;
|
---|
1616 | pNew->GCPhys = GCPhys;
|
---|
1617 | pNew->GCPhysLast = GCPhysLast;
|
---|
1618 | pNew->cb = GCPhysLast - GCPhys + 1;
|
---|
1619 | pNew->pszDesc = pszDesc;
|
---|
1620 | pNew->fFlags = fFlags;
|
---|
1621 | pNew->uNemRange = UINT32_MAX;
|
---|
1622 | pNew->pvR3 = NULL;
|
---|
1623 | pNew->paLSPages = NULL;
|
---|
1624 |
|
---|
1625 | uint32_t const cPages = pNew->cb >> GUEST_PAGE_SHIFT;
|
---|
1626 | #ifdef VBOX_WITH_PGM_NEM_MODE
|
---|
1627 | if (!pVM->pgm.s.fNemMode)
|
---|
1628 | #endif
|
---|
1629 | {
|
---|
1630 | RTGCPHYS iPage = cPages;
|
---|
1631 | while (iPage-- > 0)
|
---|
1632 | PGM_PAGE_INIT_ZERO(&pNew->aPages[iPage], pVM, PGMPAGETYPE_RAM);
|
---|
1633 |
|
---|
1634 | /* Update the page count stats. */
|
---|
1635 | pVM->pgm.s.cZeroPages += cPages;
|
---|
1636 | pVM->pgm.s.cAllPages += cPages;
|
---|
1637 | }
|
---|
1638 | #ifdef VBOX_WITH_PGM_NEM_MODE
|
---|
1639 | else
|
---|
1640 | {
|
---|
1641 | int rc = SUPR3PageAlloc(RT_ALIGN_Z(pNew->cb, HOST_PAGE_SIZE) >> HOST_PAGE_SHIFT,
|
---|
1642 | pVM->pgm.s.fUseLargePages ? SUP_PAGE_ALLOC_F_LARGE_PAGES : 0, &pNew->pvR3);
|
---|
1643 | if (RT_FAILURE(rc))
|
---|
1644 | return rc;
|
---|
1645 |
|
---|
1646 | RTGCPHYS iPage = cPages;
|
---|
1647 | while (iPage-- > 0)
|
---|
1648 | PGM_PAGE_INIT(&pNew->aPages[iPage], UINT64_C(0x0000fffffffff000), NIL_GMM_PAGEID,
|
---|
1649 | PGMPAGETYPE_RAM, PGM_PAGE_STATE_ALLOCATED);
|
---|
1650 |
|
---|
1651 | /* Update the page count stats. */
|
---|
1652 | pVM->pgm.s.cPrivatePages += cPages;
|
---|
1653 | pVM->pgm.s.cAllPages += cPages;
|
---|
1654 | }
|
---|
1655 | #endif
|
---|
1656 |
|
---|
1657 | /*
|
---|
1658 | * Link it.
|
---|
1659 | */
|
---|
1660 | pgmR3PhysLinkRamRange(pVM, pNew, pPrev);
|
---|
1661 |
|
---|
1662 | #ifdef VBOX_WITH_NATIVE_NEM
|
---|
1663 | /*
|
---|
1664 | * Notify NEM now that it has been linked.
|
---|
1665 | */
|
---|
1666 | if (VM_IS_NEM_ENABLED(pVM))
|
---|
1667 | {
|
---|
1668 | uint8_t u2State = UINT8_MAX;
|
---|
1669 | int rc = NEMR3NotifyPhysRamRegister(pVM, GCPhys, pNew->cb, pNew->pvR3, &u2State, &pNew->uNemRange);
|
---|
1670 | if (RT_SUCCESS(rc))
|
---|
1671 | {
|
---|
1672 | if (u2State != UINT8_MAX)
|
---|
1673 | pgmPhysSetNemStateForPages(&pNew->aPages[0], cPages, u2State);
|
---|
1674 | }
|
---|
1675 | else
|
---|
1676 | pgmR3PhysUnlinkRamRange2(pVM, pNew, pPrev);
|
---|
1677 | return rc;
|
---|
1678 | }
|
---|
1679 | #endif
|
---|
1680 | return VINF_SUCCESS;
|
---|
1681 | }
|
---|
1682 |
|
---|
1683 |
|
---|
1684 | /**
|
---|
1685 | * PGMR3PhysRegisterRam worker that registers a high chunk.
|
---|
1686 | *
|
---|
1687 | * @returns VBox status code.
|
---|
1688 | * @param pVM The cross context VM structure.
|
---|
1689 | * @param GCPhys The address of the RAM.
|
---|
1690 | * @param cRamPages The number of RAM pages to register.
|
---|
1691 | * @param iChunk The chunk number.
|
---|
1692 | * @param pszDesc The RAM range description.
|
---|
1693 | * @param ppPrev Previous RAM range pointer. In/Out.
|
---|
1694 | */
|
---|
1695 | static int pgmR3PhysRegisterHighRamChunk(PVM pVM, RTGCPHYS GCPhys, uint32_t cRamPages, uint32_t iChunk,
|
---|
1696 | const char *pszDesc, PPGMRAMRANGE *ppPrev)
|
---|
1697 | {
|
---|
1698 | const char *pszDescChunk = iChunk == 0
|
---|
1699 | ? pszDesc
|
---|
1700 | : MMR3HeapAPrintf(pVM, MM_TAG_PGM_PHYS, "%s (#%u)", pszDesc, iChunk + 1);
|
---|
1701 | AssertReturn(pszDescChunk, VERR_NO_MEMORY);
|
---|
1702 |
|
---|
1703 | /*
|
---|
1704 | * Allocate memory for the new chunk.
|
---|
1705 | */
|
---|
1706 | size_t const cChunkPages = RT_ALIGN_Z(RT_UOFFSETOF_DYN(PGMRAMRANGE, aPages[cRamPages]), HOST_PAGE_SIZE) >> HOST_PAGE_SHIFT;
|
---|
1707 | PSUPPAGE paChunkPages = (PSUPPAGE)RTMemTmpAllocZ(sizeof(SUPPAGE) * cChunkPages);
|
---|
1708 | AssertReturn(paChunkPages, VERR_NO_TMP_MEMORY);
|
---|
1709 | RTR0PTR R0PtrChunk = NIL_RTR0PTR;
|
---|
1710 | void *pvChunk = NULL;
|
---|
1711 | int rc = SUPR3PageAllocEx(cChunkPages, 0 /*fFlags*/, &pvChunk, &R0PtrChunk, paChunkPages);
|
---|
1712 | if (RT_SUCCESS(rc))
|
---|
1713 | {
|
---|
1714 | Assert(R0PtrChunk != NIL_RTR0PTR || PGM_IS_IN_NEM_MODE(pVM));
|
---|
1715 | memset(pvChunk, 0, cChunkPages << HOST_PAGE_SHIFT);
|
---|
1716 |
|
---|
1717 | PPGMRAMRANGE pNew = (PPGMRAMRANGE)pvChunk;
|
---|
1718 |
|
---|
1719 | /*
|
---|
1720 | * Ok, init and link the range.
|
---|
1721 | */
|
---|
1722 | rc = pgmR3PhysInitAndLinkRamRange(pVM, pNew, GCPhys, GCPhys + ((RTGCPHYS)cRamPages << GUEST_PAGE_SHIFT) - 1,
|
---|
1723 | R0PtrChunk, PGM_RAM_RANGE_FLAGS_FLOATING, pszDescChunk, *ppPrev);
|
---|
1724 | if (RT_SUCCESS(rc))
|
---|
1725 | *ppPrev = pNew;
|
---|
1726 |
|
---|
1727 | if (RT_FAILURE(rc))
|
---|
1728 | SUPR3PageFreeEx(pvChunk, cChunkPages);
|
---|
1729 | }
|
---|
1730 |
|
---|
1731 | RTMemTmpFree(paChunkPages);
|
---|
1732 | return rc;
|
---|
1733 | }
|
---|
1734 |
|
---|
1735 |
|
---|
1736 | /**
|
---|
1737 | * Sets up a range RAM.
|
---|
1738 | *
|
---|
1739 | * This will check for conflicting registrations, make a resource
|
---|
1740 | * reservation for the memory (with GMM), and setup the per-page
|
---|
1741 | * tracking structures (PGMPAGE).
|
---|
1742 | *
|
---|
1743 | * @returns VBox status code.
|
---|
1744 | * @param pVM The cross context VM structure.
|
---|
1745 | * @param GCPhys The physical address of the RAM.
|
---|
1746 | * @param cb The size of the RAM.
|
---|
1747 | * @param pszDesc The description - not copied, so, don't free or change it.
|
---|
1748 | */
|
---|
1749 | VMMR3DECL(int) PGMR3PhysRegisterRam(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, const char *pszDesc)
|
---|
1750 | {
|
---|
1751 | /*
|
---|
1752 | * Validate input.
|
---|
1753 | */
|
---|
1754 | Log(("PGMR3PhysRegisterRam: GCPhys=%RGp cb=%RGp pszDesc=%s\n", GCPhys, cb, pszDesc));
|
---|
1755 | AssertReturn(RT_ALIGN_T(GCPhys, GUEST_PAGE_SIZE, RTGCPHYS) == GCPhys, VERR_INVALID_PARAMETER);
|
---|
1756 | AssertReturn(RT_ALIGN_T(cb, GUEST_PAGE_SIZE, RTGCPHYS) == cb, VERR_INVALID_PARAMETER);
|
---|
1757 | AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
|
---|
1758 | RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
|
---|
1759 | AssertMsgReturn(GCPhysLast > GCPhys, ("The range wraps! GCPhys=%RGp cb=%RGp\n", GCPhys, cb), VERR_INVALID_PARAMETER);
|
---|
1760 | AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
|
---|
1761 | VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
|
---|
1762 |
|
---|
1763 | PGM_LOCK_VOID(pVM);
|
---|
1764 |
|
---|
1765 | /*
|
---|
1766 | * Find range location and check for conflicts.
|
---|
1767 | */
|
---|
1768 | PPGMRAMRANGE pPrev = NULL;
|
---|
1769 | PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3;
|
---|
1770 | while (pRam && GCPhysLast >= pRam->GCPhys)
|
---|
1771 | {
|
---|
1772 | AssertLogRelMsgReturnStmt( GCPhysLast < pRam->GCPhys
|
---|
1773 | || GCPhys > pRam->GCPhysLast,
|
---|
1774 | ("%RGp-%RGp (%s) conflicts with existing %RGp-%RGp (%s)\n",
|
---|
1775 | GCPhys, GCPhysLast, pszDesc, pRam->GCPhys, pRam->GCPhysLast, pRam->pszDesc),
|
---|
1776 | PGM_UNLOCK(pVM), VERR_PGM_RAM_CONFLICT);
|
---|
1777 |
|
---|
1778 | /* next */
|
---|
1779 | pPrev = pRam;
|
---|
1780 | pRam = pRam->pNextR3;
|
---|
1781 | }
|
---|
1782 |
|
---|
1783 | /*
|
---|
1784 | * Register it with GMM (the API bitches).
|
---|
1785 | */
|
---|
1786 | const RTGCPHYS cPages = cb >> GUEST_PAGE_SHIFT;
|
---|
1787 | int rc = MMR3IncreaseBaseReservation(pVM, cPages);
|
---|
1788 | if (RT_FAILURE(rc))
|
---|
1789 | {
|
---|
1790 | PGM_UNLOCK(pVM);
|
---|
1791 | return rc;
|
---|
1792 | }
|
---|
1793 |
|
---|
1794 | if ( GCPhys >= _4G
|
---|
1795 | && cPages > 256)
|
---|
1796 | {
|
---|
1797 | /*
|
---|
1798 | * The PGMRAMRANGE structures for the high memory can get very big.
|
---|
1799 | * There used to be some limitations on SUPR3PageAllocEx allocation
|
---|
1800 | * sizes, so traditionally we limited this to 16MB chunks. These days
|
---|
1801 | * we do ~64 MB chunks each covering 16GB of guest RAM, making sure
|
---|
1802 | * each range is a multiple of 1GB to enable eager hosts to use 1GB
|
---|
1803 | * pages in NEM mode.
|
---|
1804 | *
|
---|
1805 | * See also pgmR3PhysMmio2CalcChunkCount.
|
---|
1806 | */
|
---|
1807 | uint32_t const cPagesPerChunk = _4M;
|
---|
1808 | Assert(RT_ALIGN_32(cPagesPerChunk, X86_PD_PAE_SHIFT - X86_PAGE_SHIFT)); /* NEM large page requirement: 1GB pages. */
|
---|
1809 |
|
---|
1810 | RTGCPHYS cPagesLeft = cPages;
|
---|
1811 | RTGCPHYS GCPhysChunk = GCPhys;
|
---|
1812 | uint32_t iChunk = 0;
|
---|
1813 | while (cPagesLeft > 0)
|
---|
1814 | {
|
---|
1815 | uint32_t cPagesInChunk = cPagesLeft;
|
---|
1816 | if (cPagesInChunk > cPagesPerChunk)
|
---|
1817 | cPagesInChunk = cPagesPerChunk;
|
---|
1818 |
|
---|
1819 | rc = pgmR3PhysRegisterHighRamChunk(pVM, GCPhysChunk, cPagesInChunk, iChunk, pszDesc, &pPrev);
|
---|
1820 | AssertRCReturn(rc, rc);
|
---|
1821 |
|
---|
1822 | /* advance */
|
---|
1823 | GCPhysChunk += (RTGCPHYS)cPagesInChunk << GUEST_PAGE_SHIFT;
|
---|
1824 | cPagesLeft -= cPagesInChunk;
|
---|
1825 | iChunk++;
|
---|
1826 | }
|
---|
1827 | }
|
---|
1828 | else
|
---|
1829 | {
|
---|
1830 | /*
|
---|
1831 | * Allocate, initialize and link the new RAM range.
|
---|
1832 | */
|
---|
1833 | const size_t cbRamRange = RT_UOFFSETOF_DYN(PGMRAMRANGE, aPages[cPages]);
|
---|
1834 | PPGMRAMRANGE pNew = NULL;
|
---|
1835 | RTR0PTR pNewR0 = NIL_RTR0PTR;
|
---|
1836 | rc = SUPR3PageAllocEx(RT_ALIGN_Z(cbRamRange, HOST_PAGE_SIZE) >> HOST_PAGE_SHIFT, 0 /*fFlags*/,
|
---|
1837 | (void **)&pNew, &pNewR0, NULL /*paPages*/);
|
---|
1838 | AssertLogRelMsgRCReturn(rc, ("rc=%Rrc cbRamRange=%zu\n", rc, cbRamRange), rc);
|
---|
1839 |
|
---|
1840 | rc = pgmR3PhysInitAndLinkRamRange(pVM, pNew, GCPhys, GCPhysLast, pNewR0, 0 /*fFlags*/, pszDesc, pPrev);
|
---|
1841 | AssertLogRelMsgRCReturn(rc, ("rc=%Rrc cbRamRange=%zu\n", rc, cbRamRange), rc);
|
---|
1842 | }
|
---|
1843 | pgmPhysInvalidatePageMapTLB(pVM);
|
---|
1844 |
|
---|
1845 | PGM_UNLOCK(pVM);
|
---|
1846 | return rc;
|
---|
1847 | }
|
---|
1848 |
|
---|
1849 |
|
---|
1850 | /**
|
---|
1851 | * Worker called by PGMR3InitFinalize if we're configured to pre-allocate RAM.
|
---|
1852 | *
|
---|
1853 | * We do this late in the init process so that all the ROM and MMIO ranges have
|
---|
1854 | * been registered already and we don't go wasting memory on them.
|
---|
1855 | *
|
---|
1856 | * @returns VBox status code.
|
---|
1857 | *
|
---|
1858 | * @param pVM The cross context VM structure.
|
---|
1859 | */
|
---|
1860 | int pgmR3PhysRamPreAllocate(PVM pVM)
|
---|
1861 | {
|
---|
1862 | Assert(pVM->pgm.s.fRamPreAlloc);
|
---|
1863 | Log(("pgmR3PhysRamPreAllocate: enter\n"));
|
---|
1864 | #ifdef VBOX_WITH_PGM_NEM_MODE
|
---|
1865 | AssertLogRelReturn(!pVM->pgm.s.fNemMode, VERR_PGM_NOT_SUPPORTED_FOR_NEM_MODE);
|
---|
1866 | #endif
|
---|
1867 |
|
---|
1868 | /*
|
---|
1869 | * Walk the RAM ranges and allocate all RAM pages, halt at
|
---|
1870 | * the first allocation error.
|
---|
1871 | */
|
---|
1872 | uint64_t cPages = 0;
|
---|
1873 | uint64_t NanoTS = RTTimeNanoTS();
|
---|
1874 | PGM_LOCK_VOID(pVM);
|
---|
1875 | for (PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3; pRam; pRam = pRam->pNextR3)
|
---|
1876 | {
|
---|
1877 | PPGMPAGE pPage = &pRam->aPages[0];
|
---|
1878 | RTGCPHYS GCPhys = pRam->GCPhys;
|
---|
1879 | uint32_t cLeft = pRam->cb >> GUEST_PAGE_SHIFT;
|
---|
1880 | while (cLeft-- > 0)
|
---|
1881 | {
|
---|
1882 | if (PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM)
|
---|
1883 | {
|
---|
1884 | switch (PGM_PAGE_GET_STATE(pPage))
|
---|
1885 | {
|
---|
1886 | case PGM_PAGE_STATE_ZERO:
|
---|
1887 | {
|
---|
1888 | int rc = pgmPhysAllocPage(pVM, pPage, GCPhys);
|
---|
1889 | if (RT_FAILURE(rc))
|
---|
1890 | {
|
---|
1891 | LogRel(("PGM: RAM Pre-allocation failed at %RGp (in %s) with rc=%Rrc\n", GCPhys, pRam->pszDesc, rc));
|
---|
1892 | PGM_UNLOCK(pVM);
|
---|
1893 | return rc;
|
---|
1894 | }
|
---|
1895 | cPages++;
|
---|
1896 | break;
|
---|
1897 | }
|
---|
1898 |
|
---|
1899 | case PGM_PAGE_STATE_BALLOONED:
|
---|
1900 | case PGM_PAGE_STATE_ALLOCATED:
|
---|
1901 | case PGM_PAGE_STATE_WRITE_MONITORED:
|
---|
1902 | case PGM_PAGE_STATE_SHARED:
|
---|
1903 | /* nothing to do here. */
|
---|
1904 | break;
|
---|
1905 | }
|
---|
1906 | }
|
---|
1907 |
|
---|
1908 | /* next */
|
---|
1909 | pPage++;
|
---|
1910 | GCPhys += GUEST_PAGE_SIZE;
|
---|
1911 | }
|
---|
1912 | }
|
---|
1913 | PGM_UNLOCK(pVM);
|
---|
1914 | NanoTS = RTTimeNanoTS() - NanoTS;
|
---|
1915 |
|
---|
1916 | LogRel(("PGM: Pre-allocated %llu pages in %llu ms\n", cPages, NanoTS / 1000000));
|
---|
1917 | Log(("pgmR3PhysRamPreAllocate: returns VINF_SUCCESS\n"));
|
---|
1918 | return VINF_SUCCESS;
|
---|
1919 | }
|
---|
1920 |
|
---|
1921 |
|
---|
1922 | /**
|
---|
1923 | * Checks shared page checksums.
|
---|
1924 | *
|
---|
1925 | * @param pVM The cross context VM structure.
|
---|
1926 | */
|
---|
1927 | void pgmR3PhysAssertSharedPageChecksums(PVM pVM)
|
---|
1928 | {
|
---|
1929 | #ifdef VBOX_STRICT
|
---|
1930 | PGM_LOCK_VOID(pVM);
|
---|
1931 |
|
---|
1932 | if (pVM->pgm.s.cSharedPages > 0)
|
---|
1933 | {
|
---|
1934 | /*
|
---|
1935 | * Walk the ram ranges.
|
---|
1936 | */
|
---|
1937 | for (PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3; pRam; pRam = pRam->pNextR3)
|
---|
1938 | {
|
---|
1939 | uint32_t iPage = pRam->cb >> GUEST_PAGE_SHIFT;
|
---|
1940 | AssertMsg(((RTGCPHYS)iPage << GUEST_PAGE_SHIFT) == pRam->cb,
|
---|
1941 | ("%RGp %RGp\n", (RTGCPHYS)iPage << GUEST_PAGE_SHIFT, pRam->cb));
|
---|
1942 |
|
---|
1943 | while (iPage-- > 0)
|
---|
1944 | {
|
---|
1945 | PPGMPAGE pPage = &pRam->aPages[iPage];
|
---|
1946 | if (PGM_PAGE_IS_SHARED(pPage))
|
---|
1947 | {
|
---|
1948 | uint32_t u32Checksum = pPage->s.u2Unused0/* | ((uint32_t)pPage->s.u2Unused1 << 8)*/;
|
---|
1949 | if (!u32Checksum)
|
---|
1950 | {
|
---|
1951 | RTGCPHYS GCPhysPage = pRam->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT);
|
---|
1952 | void const *pvPage;
|
---|
1953 | int rc = pgmPhysPageMapReadOnly(pVM, pPage, GCPhysPage, &pvPage);
|
---|
1954 | if (RT_SUCCESS(rc))
|
---|
1955 | {
|
---|
1956 | uint32_t u32Checksum2 = RTCrc32(pvPage, GUEST_PAGE_SIZE);
|
---|
1957 | # if 0
|
---|
1958 | AssertMsg((u32Checksum2 & /*UINT32_C(0x00000303)*/ 0x3) == u32Checksum, ("GCPhysPage=%RGp\n", GCPhysPage));
|
---|
1959 | # else
|
---|
1960 | if ((u32Checksum2 & /*UINT32_C(0x00000303)*/ 0x3) == u32Checksum)
|
---|
1961 | LogFlow(("shpg %#x @ %RGp %#x [OK]\n", PGM_PAGE_GET_PAGEID(pPage), GCPhysPage, u32Checksum2));
|
---|
1962 | else
|
---|
1963 | AssertMsgFailed(("shpg %#x @ %RGp %#x\n", PGM_PAGE_GET_PAGEID(pPage), GCPhysPage, u32Checksum2));
|
---|
1964 | # endif
|
---|
1965 | }
|
---|
1966 | else
|
---|
1967 | AssertRC(rc);
|
---|
1968 | }
|
---|
1969 | }
|
---|
1970 |
|
---|
1971 | } /* for each page */
|
---|
1972 |
|
---|
1973 | } /* for each ram range */
|
---|
1974 | }
|
---|
1975 |
|
---|
1976 | PGM_UNLOCK(pVM);
|
---|
1977 | #endif /* VBOX_STRICT */
|
---|
1978 | NOREF(pVM);
|
---|
1979 | }
|
---|
1980 |
|
---|
1981 |
|
---|
1982 | /**
|
---|
1983 | * Resets the physical memory state.
|
---|
1984 | *
|
---|
1985 | * ASSUMES that the caller owns the PGM lock.
|
---|
1986 | *
|
---|
1987 | * @returns VBox status code.
|
---|
1988 | * @param pVM The cross context VM structure.
|
---|
1989 | */
|
---|
1990 | int pgmR3PhysRamReset(PVM pVM)
|
---|
1991 | {
|
---|
1992 | PGM_LOCK_ASSERT_OWNER(pVM);
|
---|
1993 |
|
---|
1994 | /* Reset the memory balloon. */
|
---|
1995 | int rc = GMMR3BalloonedPages(pVM, GMMBALLOONACTION_RESET, 0);
|
---|
1996 | AssertRC(rc);
|
---|
1997 |
|
---|
1998 | #ifdef VBOX_WITH_PAGE_SHARING
|
---|
1999 | /* Clear all registered shared modules. */
|
---|
2000 | pgmR3PhysAssertSharedPageChecksums(pVM);
|
---|
2001 | rc = GMMR3ResetSharedModules(pVM);
|
---|
2002 | AssertRC(rc);
|
---|
2003 | #endif
|
---|
2004 | /* Reset counters. */
|
---|
2005 | pVM->pgm.s.cReusedSharedPages = 0;
|
---|
2006 | pVM->pgm.s.cBalloonedPages = 0;
|
---|
2007 |
|
---|
2008 | return VINF_SUCCESS;
|
---|
2009 | }
|
---|
2010 |
|
---|
2011 |
|
---|
2012 | /**
|
---|
2013 | * Resets (zeros) the RAM after all devices and components have been reset.
|
---|
2014 | *
|
---|
2015 | * ASSUMES that the caller owns the PGM lock.
|
---|
2016 | *
|
---|
2017 | * @returns VBox status code.
|
---|
2018 | * @param pVM The cross context VM structure.
|
---|
2019 | */
|
---|
2020 | int pgmR3PhysRamZeroAll(PVM pVM)
|
---|
2021 | {
|
---|
2022 | PGM_LOCK_ASSERT_OWNER(pVM);
|
---|
2023 |
|
---|
2024 | /*
|
---|
2025 | * We batch up pages that should be freed instead of calling GMM for
|
---|
2026 | * each and every one of them.
|
---|
2027 | */
|
---|
2028 | uint32_t cPendingPages = 0;
|
---|
2029 | PGMMFREEPAGESREQ pReq;
|
---|
2030 | int rc = GMMR3FreePagesPrepare(pVM, &pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE, GMMACCOUNT_BASE);
|
---|
2031 | AssertLogRelRCReturn(rc, rc);
|
---|
2032 |
|
---|
2033 | /*
|
---|
2034 | * Walk the ram ranges.
|
---|
2035 | */
|
---|
2036 | for (PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3; pRam; pRam = pRam->pNextR3)
|
---|
2037 | {
|
---|
2038 | uint32_t iPage = pRam->cb >> GUEST_PAGE_SHIFT;
|
---|
2039 | AssertMsg(((RTGCPHYS)iPage << GUEST_PAGE_SHIFT) == pRam->cb, ("%RGp %RGp\n", (RTGCPHYS)iPage << GUEST_PAGE_SHIFT, pRam->cb));
|
---|
2040 |
|
---|
2041 | if ( !pVM->pgm.s.fRamPreAlloc
|
---|
2042 | #ifdef VBOX_WITH_PGM_NEM_MODE
|
---|
2043 | && !pVM->pgm.s.fNemMode
|
---|
2044 | #endif
|
---|
2045 | && pVM->pgm.s.fZeroRamPagesOnReset)
|
---|
2046 | {
|
---|
2047 | /* Replace all RAM pages by ZERO pages. */
|
---|
2048 | while (iPage-- > 0)
|
---|
2049 | {
|
---|
2050 | PPGMPAGE pPage = &pRam->aPages[iPage];
|
---|
2051 | switch (PGM_PAGE_GET_TYPE(pPage))
|
---|
2052 | {
|
---|
2053 | case PGMPAGETYPE_RAM:
|
---|
2054 | /* Do not replace pages part of a 2 MB continuous range
|
---|
2055 | with zero pages, but zero them instead. */
|
---|
2056 | if ( PGM_PAGE_GET_PDE_TYPE(pPage) == PGM_PAGE_PDE_TYPE_PDE
|
---|
2057 | || PGM_PAGE_GET_PDE_TYPE(pPage) == PGM_PAGE_PDE_TYPE_PDE_DISABLED)
|
---|
2058 | {
|
---|
2059 | void *pvPage;
|
---|
2060 | rc = pgmPhysPageMap(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT), &pvPage);
|
---|
2061 | AssertLogRelRCReturn(rc, rc);
|
---|
2062 | RT_BZERO(pvPage, GUEST_PAGE_SIZE);
|
---|
2063 | }
|
---|
2064 | else if (PGM_PAGE_IS_BALLOONED(pPage))
|
---|
2065 | {
|
---|
2066 | /* Turn into a zero page; the balloon status is lost when the VM reboots. */
|
---|
2067 | PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ZERO);
|
---|
2068 | }
|
---|
2069 | else if (!PGM_PAGE_IS_ZERO(pPage))
|
---|
2070 | {
|
---|
2071 | rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, pPage,
|
---|
2072 | pRam->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT), PGMPAGETYPE_RAM);
|
---|
2073 | AssertLogRelRCReturn(rc, rc);
|
---|
2074 | }
|
---|
2075 | break;
|
---|
2076 |
|
---|
2077 | case PGMPAGETYPE_MMIO2_ALIAS_MMIO:
|
---|
2078 | case PGMPAGETYPE_SPECIAL_ALIAS_MMIO: /** @todo perhaps leave the special page alone? I don't think VT-x copes with this code. */
|
---|
2079 | pgmHandlerPhysicalResetAliasedPage(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT),
|
---|
2080 | pRam, true /*fDoAccounting*/);
|
---|
2081 | break;
|
---|
2082 |
|
---|
2083 | case PGMPAGETYPE_MMIO2:
|
---|
2084 | case PGMPAGETYPE_ROM_SHADOW: /* handled by pgmR3PhysRomReset. */
|
---|
2085 | case PGMPAGETYPE_ROM:
|
---|
2086 | case PGMPAGETYPE_MMIO:
|
---|
2087 | break;
|
---|
2088 | default:
|
---|
2089 | AssertFailed();
|
---|
2090 | }
|
---|
2091 | } /* for each page */
|
---|
2092 | }
|
---|
2093 | else
|
---|
2094 | {
|
---|
2095 | /* Zero the memory. */
|
---|
2096 | while (iPage-- > 0)
|
---|
2097 | {
|
---|
2098 | PPGMPAGE pPage = &pRam->aPages[iPage];
|
---|
2099 | switch (PGM_PAGE_GET_TYPE(pPage))
|
---|
2100 | {
|
---|
2101 | case PGMPAGETYPE_RAM:
|
---|
2102 | switch (PGM_PAGE_GET_STATE(pPage))
|
---|
2103 | {
|
---|
2104 | case PGM_PAGE_STATE_ZERO:
|
---|
2105 | break;
|
---|
2106 |
|
---|
2107 | case PGM_PAGE_STATE_BALLOONED:
|
---|
2108 | /* Turn into a zero page; the balloon status is lost when the VM reboots. */
|
---|
2109 | PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ZERO);
|
---|
2110 | break;
|
---|
2111 |
|
---|
2112 | case PGM_PAGE_STATE_SHARED:
|
---|
2113 | case PGM_PAGE_STATE_WRITE_MONITORED:
|
---|
2114 | rc = pgmPhysPageMakeWritable(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT));
|
---|
2115 | AssertLogRelRCReturn(rc, rc);
|
---|
2116 | RT_FALL_THRU();
|
---|
2117 |
|
---|
2118 | case PGM_PAGE_STATE_ALLOCATED:
|
---|
2119 | if (pVM->pgm.s.fZeroRamPagesOnReset)
|
---|
2120 | {
|
---|
2121 | void *pvPage;
|
---|
2122 | rc = pgmPhysPageMap(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT), &pvPage);
|
---|
2123 | AssertLogRelRCReturn(rc, rc);
|
---|
2124 | RT_BZERO(pvPage, GUEST_PAGE_SIZE);
|
---|
2125 | }
|
---|
2126 | break;
|
---|
2127 | }
|
---|
2128 | break;
|
---|
2129 |
|
---|
2130 | case PGMPAGETYPE_MMIO2_ALIAS_MMIO:
|
---|
2131 | case PGMPAGETYPE_SPECIAL_ALIAS_MMIO: /** @todo perhaps leave the special page alone? I don't think VT-x copes with this code. */
|
---|
2132 | pgmHandlerPhysicalResetAliasedPage(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT),
|
---|
2133 | pRam, true /*fDoAccounting*/);
|
---|
2134 | break;
|
---|
2135 |
|
---|
2136 | case PGMPAGETYPE_MMIO2:
|
---|
2137 | case PGMPAGETYPE_ROM_SHADOW:
|
---|
2138 | case PGMPAGETYPE_ROM:
|
---|
2139 | case PGMPAGETYPE_MMIO:
|
---|
2140 | break;
|
---|
2141 | default:
|
---|
2142 | AssertFailed();
|
---|
2143 |
|
---|
2144 | }
|
---|
2145 | } /* for each page */
|
---|
2146 | }
|
---|
2147 |
|
---|
2148 | }
|
---|
2149 |
|
---|
2150 | /*
|
---|
2151 | * Finish off any pages pending freeing.
|
---|
2152 | */
|
---|
2153 | if (cPendingPages)
|
---|
2154 | {
|
---|
2155 | rc = GMMR3FreePagesPerform(pVM, pReq, cPendingPages);
|
---|
2156 | AssertLogRelRCReturn(rc, rc);
|
---|
2157 | }
|
---|
2158 | GMMR3FreePagesCleanup(pReq);
|
---|
2159 | return VINF_SUCCESS;
|
---|
2160 | }
|
---|
2161 |
|
---|
2162 |
|
---|
2163 | /**
|
---|
2164 | * Frees all RAM during VM termination
|
---|
2165 | *
|
---|
2166 | * ASSUMES that the caller owns the PGM lock.
|
---|
2167 | *
|
---|
2168 | * @returns VBox status code.
|
---|
2169 | * @param pVM The cross context VM structure.
|
---|
2170 | */
|
---|
2171 | int pgmR3PhysRamTerm(PVM pVM)
|
---|
2172 | {
|
---|
2173 | PGM_LOCK_ASSERT_OWNER(pVM);
|
---|
2174 |
|
---|
2175 | /* Reset the memory balloon. */
|
---|
2176 | int rc = GMMR3BalloonedPages(pVM, GMMBALLOONACTION_RESET, 0);
|
---|
2177 | AssertRC(rc);
|
---|
2178 |
|
---|
2179 | #ifdef VBOX_WITH_PAGE_SHARING
|
---|
2180 | /*
|
---|
2181 | * Clear all registered shared modules.
|
---|
2182 | */
|
---|
2183 | pgmR3PhysAssertSharedPageChecksums(pVM);
|
---|
2184 | rc = GMMR3ResetSharedModules(pVM);
|
---|
2185 | AssertRC(rc);
|
---|
2186 |
|
---|
2187 | /*
|
---|
2188 | * Flush the handy pages updates to make sure no shared pages are hiding
|
---|
2189 | * in there. (Not unlikely if the VM shuts down, apparently.)
|
---|
2190 | */
|
---|
2191 | # ifdef VBOX_WITH_PGM_NEM_MODE
|
---|
2192 | if (!pVM->pgm.s.fNemMode)
|
---|
2193 | # endif
|
---|
2194 | rc = VMMR3CallR0(pVM, VMMR0_DO_PGM_FLUSH_HANDY_PAGES, 0, NULL);
|
---|
2195 | #endif
|
---|
2196 |
|
---|
2197 | /*
|
---|
2198 | * We batch up pages that should be freed instead of calling GMM for
|
---|
2199 | * each and every one of them.
|
---|
2200 | */
|
---|
2201 | uint32_t cPendingPages = 0;
|
---|
2202 | PGMMFREEPAGESREQ pReq;
|
---|
2203 | rc = GMMR3FreePagesPrepare(pVM, &pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE, GMMACCOUNT_BASE);
|
---|
2204 | AssertLogRelRCReturn(rc, rc);
|
---|
2205 |
|
---|
2206 | /*
|
---|
2207 | * Walk the ram ranges.
|
---|
2208 | */
|
---|
2209 | for (PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3; pRam; pRam = pRam->pNextR3)
|
---|
2210 | {
|
---|
2211 | uint32_t iPage = pRam->cb >> GUEST_PAGE_SHIFT;
|
---|
2212 | AssertMsg(((RTGCPHYS)iPage << GUEST_PAGE_SHIFT) == pRam->cb, ("%RGp %RGp\n", (RTGCPHYS)iPage << GUEST_PAGE_SHIFT, pRam->cb));
|
---|
2213 |
|
---|
2214 | while (iPage-- > 0)
|
---|
2215 | {
|
---|
2216 | PPGMPAGE pPage = &pRam->aPages[iPage];
|
---|
2217 | switch (PGM_PAGE_GET_TYPE(pPage))
|
---|
2218 | {
|
---|
2219 | case PGMPAGETYPE_RAM:
|
---|
2220 | /* Free all shared pages. Private pages are automatically freed during GMM VM cleanup. */
|
---|
2221 | /** @todo change this to explicitly free private pages here. */
|
---|
2222 | if (PGM_PAGE_IS_SHARED(pPage))
|
---|
2223 | {
|
---|
2224 | rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, pPage,
|
---|
2225 | pRam->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT), PGMPAGETYPE_RAM);
|
---|
2226 | AssertLogRelRCReturn(rc, rc);
|
---|
2227 | }
|
---|
2228 | break;
|
---|
2229 |
|
---|
2230 | case PGMPAGETYPE_MMIO2_ALIAS_MMIO:
|
---|
2231 | case PGMPAGETYPE_SPECIAL_ALIAS_MMIO:
|
---|
2232 | case PGMPAGETYPE_MMIO2:
|
---|
2233 | case PGMPAGETYPE_ROM_SHADOW: /* handled by pgmR3PhysRomReset. */
|
---|
2234 | case PGMPAGETYPE_ROM:
|
---|
2235 | case PGMPAGETYPE_MMIO:
|
---|
2236 | break;
|
---|
2237 | default:
|
---|
2238 | AssertFailed();
|
---|
2239 | }
|
---|
2240 | } /* for each page */
|
---|
2241 | }
|
---|
2242 |
|
---|
2243 | /*
|
---|
2244 | * Finish off any pages pending freeing.
|
---|
2245 | */
|
---|
2246 | if (cPendingPages)
|
---|
2247 | {
|
---|
2248 | rc = GMMR3FreePagesPerform(pVM, pReq, cPendingPages);
|
---|
2249 | AssertLogRelRCReturn(rc, rc);
|
---|
2250 | }
|
---|
2251 | GMMR3FreePagesCleanup(pReq);
|
---|
2252 | return VINF_SUCCESS;
|
---|
2253 | }
|
---|
2254 |
|
---|
2255 |
|
---|
2256 |
|
---|
2257 | /*********************************************************************************************************************************
|
---|
2258 | * MMIO *
|
---|
2259 | *********************************************************************************************************************************/
|
---|
2260 |
|
---|
2261 | /**
|
---|
2262 | * This is the interface IOM is using to register an MMIO region.
|
---|
2263 | *
|
---|
2264 | * It will check for conflicts and ensure that a RAM range structure
|
---|
2265 | * is present before calling the PGMR3HandlerPhysicalRegister API to
|
---|
2266 | * register the callbacks.
|
---|
2267 | *
|
---|
2268 | * @returns VBox status code.
|
---|
2269 | *
|
---|
2270 | * @param pVM The cross context VM structure.
|
---|
2271 | * @param GCPhys The start of the MMIO region.
|
---|
2272 | * @param cb The size of the MMIO region.
|
---|
2273 | * @param hType The physical access handler type registration.
|
---|
2274 | * @param uUser The user argument.
|
---|
2275 | * @param pszDesc The description of the MMIO region.
|
---|
2276 | */
|
---|
2277 | VMMR3DECL(int) PGMR3PhysMMIORegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, PGMPHYSHANDLERTYPE hType,
|
---|
2278 | uint64_t uUser, const char *pszDesc)
|
---|
2279 | {
|
---|
2280 | /*
|
---|
2281 | * Assert on some assumption.
|
---|
2282 | */
|
---|
2283 | VM_ASSERT_EMT(pVM);
|
---|
2284 | AssertReturn(!(cb & GUEST_PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
|
---|
2285 | AssertReturn(!(GCPhys & GUEST_PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
|
---|
2286 | AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
|
---|
2287 | AssertReturn(*pszDesc, VERR_INVALID_PARAMETER);
|
---|
2288 | #ifdef VBOX_STRICT
|
---|
2289 | PCPGMPHYSHANDLERTYPEINT pType = pgmHandlerPhysicalTypeHandleToPtr(pVM, hType);
|
---|
2290 | Assert(pType);
|
---|
2291 | Assert(pType->enmKind == PGMPHYSHANDLERKIND_MMIO);
|
---|
2292 | #endif
|
---|
2293 |
|
---|
2294 | int rc = PGM_LOCK(pVM);
|
---|
2295 | AssertRCReturn(rc, rc);
|
---|
2296 |
|
---|
2297 | /*
|
---|
2298 | * Make sure there's a RAM range structure for the region.
|
---|
2299 | */
|
---|
2300 | RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
|
---|
2301 | bool fRamExists = false;
|
---|
2302 | PPGMRAMRANGE pRamPrev = NULL;
|
---|
2303 | PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3;
|
---|
2304 | while (pRam && GCPhysLast >= pRam->GCPhys)
|
---|
2305 | {
|
---|
2306 | if ( GCPhysLast >= pRam->GCPhys
|
---|
2307 | && GCPhys <= pRam->GCPhysLast)
|
---|
2308 | {
|
---|
2309 | /* Simplification: all within the same range. */
|
---|
2310 | AssertLogRelMsgReturnStmt( GCPhys >= pRam->GCPhys
|
---|
2311 | && GCPhysLast <= pRam->GCPhysLast,
|
---|
2312 | ("%RGp-%RGp (MMIO/%s) falls partly outside %RGp-%RGp (%s)\n",
|
---|
2313 | GCPhys, GCPhysLast, pszDesc,
|
---|
2314 | pRam->GCPhys, pRam->GCPhysLast, pRam->pszDesc),
|
---|
2315 | PGM_UNLOCK(pVM),
|
---|
2316 | VERR_PGM_RAM_CONFLICT);
|
---|
2317 |
|
---|
2318 | /* Check that it's all RAM or MMIO pages. */
|
---|
2319 | PCPGMPAGE pPage = &pRam->aPages[(GCPhys - pRam->GCPhys) >> GUEST_PAGE_SHIFT];
|
---|
2320 | uint32_t cLeft = cb >> GUEST_PAGE_SHIFT;
|
---|
2321 | while (cLeft-- > 0)
|
---|
2322 | {
|
---|
2323 | AssertLogRelMsgReturnStmt( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM
|
---|
2324 | || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO,
|
---|
2325 | ("%RGp-%RGp (MMIO/%s): %RGp is not a RAM or MMIO page - type=%d desc=%s\n",
|
---|
2326 | GCPhys, GCPhysLast, pszDesc, pRam->GCPhys, PGM_PAGE_GET_TYPE(pPage), pRam->pszDesc),
|
---|
2327 | PGM_UNLOCK(pVM),
|
---|
2328 | VERR_PGM_RAM_CONFLICT);
|
---|
2329 | pPage++;
|
---|
2330 | }
|
---|
2331 |
|
---|
2332 | /* Looks good. */
|
---|
2333 | fRamExists = true;
|
---|
2334 | break;
|
---|
2335 | }
|
---|
2336 |
|
---|
2337 | /* next */
|
---|
2338 | pRamPrev = pRam;
|
---|
2339 | pRam = pRam->pNextR3;
|
---|
2340 | }
|
---|
2341 | PPGMRAMRANGE pNew;
|
---|
2342 | if (fRamExists)
|
---|
2343 | {
|
---|
2344 | pNew = NULL;
|
---|
2345 |
|
---|
2346 | /*
|
---|
2347 | * Make all the pages in the range MMIO/ZERO pages, freeing any
|
---|
2348 | * RAM pages currently mapped here. This might not be 100% correct
|
---|
2349 | * for PCI memory, but we're doing the same thing for MMIO2 pages.
|
---|
2350 | */
|
---|
2351 | rc = pgmR3PhysFreePageRange(pVM, pRam, GCPhys, GCPhysLast, NULL);
|
---|
2352 | AssertRCReturnStmt(rc, PGM_UNLOCK(pVM), rc);
|
---|
2353 |
|
---|
2354 | /* Force a PGM pool flush as guest ram references have been changed. */
|
---|
2355 | /** @todo not entirely SMP safe; assuming for now the guest takes
|
---|
2356 | * care of this internally (not touch mapped mmio while changing the
|
---|
2357 | * mapping). */
|
---|
2358 | PVMCPU pVCpu = VMMGetCpu(pVM);
|
---|
2359 | pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_CLEAR_PGM_POOL;
|
---|
2360 | VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
|
---|
2361 | }
|
---|
2362 | else
|
---|
2363 | {
|
---|
2364 | /*
|
---|
2365 | * No RAM range, insert an ad hoc one.
|
---|
2366 | *
|
---|
2367 | * Note that we don't have to tell REM about this range because
|
---|
2368 | * PGMHandlerPhysicalRegisterEx will do that for us.
|
---|
2369 | */
|
---|
2370 | Log(("PGMR3PhysMMIORegister: Adding ad hoc MMIO range for %RGp-%RGp %s\n", GCPhys, GCPhysLast, pszDesc));
|
---|
2371 |
|
---|
2372 | /* Alloc. */
|
---|
2373 | const uint32_t cPages = cb >> GUEST_PAGE_SHIFT;
|
---|
2374 | const size_t cbRamRange = RT_UOFFSETOF_DYN(PGMRAMRANGE, aPages[cPages]);
|
---|
2375 | const size_t cRangePages = RT_ALIGN_Z(cbRamRange, HOST_PAGE_SIZE) >> HOST_PAGE_SHIFT;
|
---|
2376 | RTR0PTR pNewR0 = NIL_RTR0PTR;
|
---|
2377 | rc = SUPR3PageAllocEx(cRangePages, 0 /*fFlags*/, (void **)&pNew, &pNewR0, NULL /*paPages*/);
|
---|
2378 | AssertLogRelMsgRCReturnStmt(rc, ("cbRamRange=%zu\n", cbRamRange), PGM_UNLOCK(pVM), rc);
|
---|
2379 |
|
---|
2380 | #ifdef VBOX_WITH_NATIVE_NEM
|
---|
2381 | /* Notify NEM. */
|
---|
2382 | uint8_t u2State = 0; /* (must have valid state as there can't be anything to preserve) */
|
---|
2383 | if (VM_IS_NEM_ENABLED(pVM))
|
---|
2384 | {
|
---|
2385 | rc = NEMR3NotifyPhysMmioExMapEarly(pVM, GCPhys, cPages << GUEST_PAGE_SHIFT, 0 /*fFlags*/, NULL, NULL,
|
---|
2386 | &u2State, &pNew->uNemRange);
|
---|
2387 | AssertLogRelRCReturnStmt(rc, SUPR3PageFreeEx(pNew, cRangePages), rc);
|
---|
2388 | }
|
---|
2389 | #endif
|
---|
2390 |
|
---|
2391 | /* Initialize the range. */
|
---|
2392 | pNew->pSelfR0 = pNewR0;
|
---|
2393 | pNew->GCPhys = GCPhys;
|
---|
2394 | pNew->GCPhysLast = GCPhysLast;
|
---|
2395 | pNew->cb = cb;
|
---|
2396 | pNew->pszDesc = pszDesc;
|
---|
2397 | pNew->fFlags = PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO;
|
---|
2398 | pNew->pvR3 = NULL;
|
---|
2399 | pNew->paLSPages = NULL;
|
---|
2400 |
|
---|
2401 | uint32_t iPage = cPages;
|
---|
2402 | while (iPage-- > 0)
|
---|
2403 | {
|
---|
2404 | PGM_PAGE_INIT_ZERO(&pNew->aPages[iPage], pVM, PGMPAGETYPE_MMIO);
|
---|
2405 | #ifdef VBOX_WITH_NATIVE_NEM
|
---|
2406 | PGM_PAGE_SET_NEM_STATE(&pNew->aPages[iPage], u2State);
|
---|
2407 | #endif
|
---|
2408 | }
|
---|
2409 | Assert(PGM_PAGE_GET_TYPE(&pNew->aPages[0]) == PGMPAGETYPE_MMIO);
|
---|
2410 |
|
---|
2411 | /* update the page count stats. */
|
---|
2412 | pVM->pgm.s.cPureMmioPages += cPages;
|
---|
2413 | pVM->pgm.s.cAllPages += cPages;
|
---|
2414 |
|
---|
2415 | /* link it */
|
---|
2416 | pgmR3PhysLinkRamRange(pVM, pNew, pRamPrev);
|
---|
2417 | }
|
---|
2418 |
|
---|
2419 | /*
|
---|
2420 | * Register the access handler.
|
---|
2421 | */
|
---|
2422 | rc = PGMHandlerPhysicalRegister(pVM, GCPhys, GCPhysLast, hType, uUser, pszDesc);
|
---|
2423 | if (RT_SUCCESS(rc))
|
---|
2424 | {
|
---|
2425 | #ifdef VBOX_WITH_NATIVE_NEM
|
---|
2426 | /* Late NEM notification. */
|
---|
2427 | if (VM_IS_NEM_ENABLED(pVM))
|
---|
2428 | {
|
---|
2429 | uint32_t const fNemNotify = (fRamExists ? NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE : 0);
|
---|
2430 | rc = NEMR3NotifyPhysMmioExMapLate(pVM, GCPhys, GCPhysLast - GCPhys + 1, fNemNotify,
|
---|
2431 | fRamExists ? (uint8_t *)pRam->pvR3 + (uintptr_t)(GCPhys - pRam->GCPhys) : NULL,
|
---|
2432 | NULL, !fRamExists ? &pRam->uNemRange : NULL);
|
---|
2433 | AssertLogRelRCReturn(rc, rc);
|
---|
2434 | }
|
---|
2435 | #endif
|
---|
2436 | }
|
---|
2437 | /** @todo the phys handler failure handling isn't complete, esp. wrt NEM. */
|
---|
2438 | else if (!fRamExists)
|
---|
2439 | {
|
---|
2440 | pVM->pgm.s.cPureMmioPages -= cb >> GUEST_PAGE_SHIFT;
|
---|
2441 | pVM->pgm.s.cAllPages -= cb >> GUEST_PAGE_SHIFT;
|
---|
2442 |
|
---|
2443 | /* remove the ad hoc range. */
|
---|
2444 | pgmR3PhysUnlinkRamRange2(pVM, pNew, pRamPrev);
|
---|
2445 | pNew->cb = pNew->GCPhys = pNew->GCPhysLast = NIL_RTGCPHYS;
|
---|
2446 | SUPR3PageFreeEx(pRam, RT_ALIGN_Z(RT_UOFFSETOF_DYN(PGMRAMRANGE, aPages[cb >> GUEST_PAGE_SHIFT]),
|
---|
2447 | HOST_PAGE_SIZE) >> HOST_PAGE_SHIFT);
|
---|
2448 | }
|
---|
2449 | pgmPhysInvalidatePageMapTLB(pVM);
|
---|
2450 |
|
---|
2451 | PGM_UNLOCK(pVM);
|
---|
2452 | return rc;
|
---|
2453 | }
|
---|
2454 |
|
---|
2455 |
|
---|
2456 | /**
|
---|
2457 | * This is the interface IOM is using to register an MMIO region.
|
---|
2458 | *
|
---|
2459 | * It will take care of calling PGMHandlerPhysicalDeregister and clean up
|
---|
2460 | * any ad hoc PGMRAMRANGE left behind.
|
---|
2461 | *
|
---|
2462 | * @returns VBox status code.
|
---|
2463 | * @param pVM The cross context VM structure.
|
---|
2464 | * @param GCPhys The start of the MMIO region.
|
---|
2465 | * @param cb The size of the MMIO region.
|
---|
2466 | */
|
---|
2467 | VMMR3DECL(int) PGMR3PhysMMIODeregister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb)
|
---|
2468 | {
|
---|
2469 | VM_ASSERT_EMT(pVM);
|
---|
2470 |
|
---|
2471 | int rc = PGM_LOCK(pVM);
|
---|
2472 | AssertRCReturn(rc, rc);
|
---|
2473 |
|
---|
2474 | /*
|
---|
2475 | * First deregister the handler, then check if we should remove the ram range.
|
---|
2476 | */
|
---|
2477 | rc = PGMHandlerPhysicalDeregister(pVM, GCPhys);
|
---|
2478 | if (RT_SUCCESS(rc))
|
---|
2479 | {
|
---|
2480 | RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
|
---|
2481 | PPGMRAMRANGE pRamPrev = NULL;
|
---|
2482 | PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3;
|
---|
2483 | while (pRam && GCPhysLast >= pRam->GCPhys)
|
---|
2484 | {
|
---|
2485 | /** @todo We're being a bit too careful here. rewrite. */
|
---|
2486 | if ( GCPhysLast == pRam->GCPhysLast
|
---|
2487 | && GCPhys == pRam->GCPhys)
|
---|
2488 | {
|
---|
2489 | Assert(pRam->cb == cb);
|
---|
2490 |
|
---|
2491 | /*
|
---|
2492 | * See if all the pages are dead MMIO pages.
|
---|
2493 | */
|
---|
2494 | uint32_t const cGuestPages = cb >> GUEST_PAGE_SHIFT;
|
---|
2495 | bool fAllMMIO = true;
|
---|
2496 | uint32_t iPage = 0;
|
---|
2497 | uint32_t cLeft = cGuestPages;
|
---|
2498 | while (cLeft-- > 0)
|
---|
2499 | {
|
---|
2500 | PPGMPAGE pPage = &pRam->aPages[iPage];
|
---|
2501 | if ( !PGM_PAGE_IS_MMIO_OR_ALIAS(pPage)
|
---|
2502 | /*|| not-out-of-action later */)
|
---|
2503 | {
|
---|
2504 | fAllMMIO = false;
|
---|
2505 | AssertMsgFailed(("%RGp %R[pgmpage]\n", pRam->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT), pPage));
|
---|
2506 | break;
|
---|
2507 | }
|
---|
2508 | Assert( PGM_PAGE_IS_ZERO(pPage)
|
---|
2509 | || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO
|
---|
2510 | || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_SPECIAL_ALIAS_MMIO);
|
---|
2511 | pPage++;
|
---|
2512 | }
|
---|
2513 | if (fAllMMIO)
|
---|
2514 | {
|
---|
2515 | /*
|
---|
2516 | * Ad-hoc range, unlink and free it.
|
---|
2517 | */
|
---|
2518 | Log(("PGMR3PhysMMIODeregister: Freeing ad hoc MMIO range for %RGp-%RGp %s\n",
|
---|
2519 | GCPhys, GCPhysLast, pRam->pszDesc));
|
---|
2520 | /** @todo check the ad-hoc flags? */
|
---|
2521 |
|
---|
2522 | #ifdef VBOX_WITH_NATIVE_NEM
|
---|
2523 | if (VM_IS_NEM_ENABLED(pVM)) /* Notify REM before we unlink the range. */
|
---|
2524 | {
|
---|
2525 | rc = NEMR3NotifyPhysMmioExUnmap(pVM, GCPhys, GCPhysLast - GCPhys + 1, 0 /*fFlags*/,
|
---|
2526 | NULL, NULL, NULL, &pRam->uNemRange);
|
---|
2527 | AssertLogRelRCReturn(rc, rc);
|
---|
2528 | }
|
---|
2529 | #endif
|
---|
2530 |
|
---|
2531 | pVM->pgm.s.cAllPages -= cGuestPages;
|
---|
2532 | pVM->pgm.s.cPureMmioPages -= cGuestPages;
|
---|
2533 |
|
---|
2534 | pgmR3PhysUnlinkRamRange2(pVM, pRam, pRamPrev);
|
---|
2535 | const uint32_t cPages = pRam->cb >> GUEST_PAGE_SHIFT;
|
---|
2536 | const size_t cbRamRange = RT_UOFFSETOF_DYN(PGMRAMRANGE, aPages[cPages]);
|
---|
2537 | pRam->cb = pRam->GCPhys = pRam->GCPhysLast = NIL_RTGCPHYS;
|
---|
2538 | SUPR3PageFreeEx(pRam, RT_ALIGN_Z(cbRamRange, HOST_PAGE_SIZE) >> HOST_PAGE_SHIFT);
|
---|
2539 | break;
|
---|
2540 | }
|
---|
2541 | }
|
---|
2542 |
|
---|
2543 | /*
|
---|
2544 | * Range match? It will all be within one range (see PGMAllHandler.cpp).
|
---|
2545 | */
|
---|
2546 | if ( GCPhysLast >= pRam->GCPhys
|
---|
2547 | && GCPhys <= pRam->GCPhysLast)
|
---|
2548 | {
|
---|
2549 | Assert(GCPhys >= pRam->GCPhys);
|
---|
2550 | Assert(GCPhysLast <= pRam->GCPhysLast);
|
---|
2551 |
|
---|
2552 | /*
|
---|
2553 | * Turn the pages back into RAM pages.
|
---|
2554 | */
|
---|
2555 | uint32_t iPage = (GCPhys - pRam->GCPhys) >> GUEST_PAGE_SHIFT;
|
---|
2556 | uint32_t cLeft = cb >> GUEST_PAGE_SHIFT;
|
---|
2557 | while (cLeft--)
|
---|
2558 | {
|
---|
2559 | PPGMPAGE pPage = &pRam->aPages[iPage];
|
---|
2560 | AssertMsg( (PGM_PAGE_IS_MMIO(pPage) && PGM_PAGE_IS_ZERO(pPage))
|
---|
2561 | || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO
|
---|
2562 | || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_SPECIAL_ALIAS_MMIO,
|
---|
2563 | ("%RGp %R[pgmpage]\n", pRam->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT), pPage));
|
---|
2564 | if (PGM_PAGE_IS_MMIO_OR_ALIAS(pPage))
|
---|
2565 | PGM_PAGE_SET_TYPE(pVM, pPage, PGMPAGETYPE_RAM);
|
---|
2566 | iPage++;
|
---|
2567 | }
|
---|
2568 |
|
---|
2569 | #ifdef VBOX_WITH_NATIVE_NEM
|
---|
2570 | /* Notify REM (failure will probably leave things in a non-working state). */
|
---|
2571 | if (VM_IS_NEM_ENABLED(pVM))
|
---|
2572 | {
|
---|
2573 | uint8_t u2State = UINT8_MAX;
|
---|
2574 | rc = NEMR3NotifyPhysMmioExUnmap(pVM, GCPhys, GCPhysLast - GCPhys + 1, NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE,
|
---|
2575 | pRam->pvR3 ? (uint8_t *)pRam->pvR3 + GCPhys - pRam->GCPhys : NULL,
|
---|
2576 | NULL, &u2State, &pRam->uNemRange);
|
---|
2577 | AssertLogRelRCReturn(rc, rc);
|
---|
2578 | if (u2State != UINT8_MAX)
|
---|
2579 | pgmPhysSetNemStateForPages(&pRam->aPages[(GCPhys - pRam->GCPhys) >> GUEST_PAGE_SHIFT],
|
---|
2580 | cb >> GUEST_PAGE_SHIFT, u2State);
|
---|
2581 | }
|
---|
2582 | #endif
|
---|
2583 | break;
|
---|
2584 | }
|
---|
2585 |
|
---|
2586 | /* next */
|
---|
2587 | pRamPrev = pRam;
|
---|
2588 | pRam = pRam->pNextR3;
|
---|
2589 | }
|
---|
2590 | }
|
---|
2591 |
|
---|
2592 | /* Force a PGM pool flush as guest ram references have been changed. */
|
---|
2593 | /** @todo Not entirely SMP safe; assuming for now the guest takes care of
|
---|
2594 | * this internally (not touch mapped mmio while changing the mapping). */
|
---|
2595 | PVMCPU pVCpu = VMMGetCpu(pVM);
|
---|
2596 | pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_CLEAR_PGM_POOL;
|
---|
2597 | VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
|
---|
2598 |
|
---|
2599 | pgmPhysInvalidatePageMapTLB(pVM);
|
---|
2600 | pgmPhysInvalidRamRangeTlbs(pVM);
|
---|
2601 | PGM_UNLOCK(pVM);
|
---|
2602 | return rc;
|
---|
2603 | }
|
---|
2604 |
|
---|
2605 |
|
---|
2606 |
|
---|
2607 | /*********************************************************************************************************************************
|
---|
2608 | * MMIO2 *
|
---|
2609 | *********************************************************************************************************************************/
|
---|
2610 |
|
---|
2611 | /**
|
---|
2612 | * Locate a MMIO2 range.
|
---|
2613 | *
|
---|
2614 | * @returns Pointer to the MMIO2 range.
|
---|
2615 | * @param pVM The cross context VM structure.
|
---|
2616 | * @param pDevIns The device instance owning the region.
|
---|
2617 | * @param iSubDev The sub-device number.
|
---|
2618 | * @param iRegion The region.
|
---|
2619 | * @param hMmio2 Handle to look up. If NIL, use the @a iSubDev and
|
---|
2620 | * @a iRegion.
|
---|
2621 | */
|
---|
2622 | DECLINLINE(PPGMREGMMIO2RANGE) pgmR3PhysMmio2Find(PVM pVM, PPDMDEVINS pDevIns, uint32_t iSubDev,
|
---|
2623 | uint32_t iRegion, PGMMMIO2HANDLE hMmio2)
|
---|
2624 | {
|
---|
2625 | if (hMmio2 != NIL_PGMMMIO2HANDLE)
|
---|
2626 | {
|
---|
2627 | if (hMmio2 <= RT_ELEMENTS(pVM->pgm.s.apMmio2RangesR3) && hMmio2 != 0)
|
---|
2628 | {
|
---|
2629 | PPGMREGMMIO2RANGE pCur = pVM->pgm.s.apMmio2RangesR3[hMmio2 - 1];
|
---|
2630 | if (pCur && pCur->pDevInsR3 == pDevIns)
|
---|
2631 | {
|
---|
2632 | Assert(pCur->idMmio2 == hMmio2);
|
---|
2633 | AssertReturn(pCur->fFlags & PGMREGMMIO2RANGE_F_FIRST_CHUNK, NULL);
|
---|
2634 | return pCur;
|
---|
2635 | }
|
---|
2636 | Assert(!pCur);
|
---|
2637 | }
|
---|
2638 | for (PPGMREGMMIO2RANGE pCur = pVM->pgm.s.pRegMmioRangesR3; pCur; pCur = pCur->pNextR3)
|
---|
2639 | if (pCur->idMmio2 == hMmio2)
|
---|
2640 | {
|
---|
2641 | AssertBreak(pCur->pDevInsR3 == pDevIns);
|
---|
2642 | AssertReturn(pCur->fFlags & PGMREGMMIO2RANGE_F_FIRST_CHUNK, NULL);
|
---|
2643 | return pCur;
|
---|
2644 | }
|
---|
2645 | }
|
---|
2646 | else
|
---|
2647 | {
|
---|
2648 | /*
|
---|
2649 | * Search the list. There shouldn't be many entries.
|
---|
2650 | */
|
---|
2651 | /** @todo Optimize this lookup! There may now be many entries and it'll
|
---|
2652 | * become really slow when doing MMR3HyperMapMMIO2 and similar. */
|
---|
2653 | for (PPGMREGMMIO2RANGE pCur = pVM->pgm.s.pRegMmioRangesR3; pCur; pCur = pCur->pNextR3)
|
---|
2654 | if ( pCur->pDevInsR3 == pDevIns
|
---|
2655 | && pCur->iRegion == iRegion
|
---|
2656 | && pCur->iSubDev == iSubDev)
|
---|
2657 | return pCur;
|
---|
2658 | }
|
---|
2659 | return NULL;
|
---|
2660 | }
|
---|
2661 |
|
---|
2662 |
|
---|
2663 | /**
|
---|
2664 | * Worker for PGMR3PhysMmio2ControlDirtyPageTracking and PGMR3PhysMmio2Map.
|
---|
2665 | */
|
---|
2666 | static int pgmR3PhysMmio2EnableDirtyPageTracing(PVM pVM, PPGMREGMMIO2RANGE pFirstMmio2)
|
---|
2667 | {
|
---|
2668 | int rc = VINF_SUCCESS;
|
---|
2669 | for (PPGMREGMMIO2RANGE pCurMmio2 = pFirstMmio2; pCurMmio2; pCurMmio2 = pCurMmio2->pNextR3)
|
---|
2670 | {
|
---|
2671 | Assert(!(pCurMmio2->fFlags & PGMREGMMIO2RANGE_F_IS_TRACKING));
|
---|
2672 | int rc2 = pgmHandlerPhysicalExRegister(pVM, pCurMmio2->pPhysHandlerR3, pCurMmio2->RamRange.GCPhys,
|
---|
2673 | pCurMmio2->RamRange.GCPhysLast);
|
---|
2674 | AssertLogRelMsgRC(rc2, ("%#RGp-%#RGp %s failed -> %Rrc\n", pCurMmio2->RamRange.GCPhys, pCurMmio2->RamRange.GCPhysLast,
|
---|
2675 | pCurMmio2->RamRange.pszDesc, rc2));
|
---|
2676 | if (RT_SUCCESS(rc2))
|
---|
2677 | pCurMmio2->fFlags |= PGMREGMMIO2RANGE_F_IS_TRACKING;
|
---|
2678 | else if (RT_SUCCESS(rc))
|
---|
2679 | rc = rc2;
|
---|
2680 | if (pCurMmio2->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK)
|
---|
2681 | return rc;
|
---|
2682 | }
|
---|
2683 | AssertFailed();
|
---|
2684 | return rc;
|
---|
2685 | }
|
---|
2686 |
|
---|
2687 |
|
---|
2688 | /**
|
---|
2689 | * Worker for PGMR3PhysMmio2ControlDirtyPageTracking and PGMR3PhysMmio2Unmap.
|
---|
2690 | */
|
---|
2691 | static int pgmR3PhysMmio2DisableDirtyPageTracing(PVM pVM, PPGMREGMMIO2RANGE pFirstMmio2)
|
---|
2692 | {
|
---|
2693 | for (PPGMREGMMIO2RANGE pCurMmio2 = pFirstMmio2; pCurMmio2; pCurMmio2 = pCurMmio2->pNextR3)
|
---|
2694 | {
|
---|
2695 | if (pCurMmio2->fFlags & PGMREGMMIO2RANGE_F_IS_TRACKING)
|
---|
2696 | {
|
---|
2697 | int rc2 = pgmHandlerPhysicalExDeregister(pVM, pCurMmio2->pPhysHandlerR3);
|
---|
2698 | AssertLogRelMsgRC(rc2, ("%#RGp-%#RGp %s failed -> %Rrc\n", pCurMmio2->RamRange.GCPhys, pCurMmio2->RamRange.GCPhysLast,
|
---|
2699 | pCurMmio2->RamRange.pszDesc, rc2));
|
---|
2700 | pCurMmio2->fFlags &= ~PGMREGMMIO2RANGE_F_IS_TRACKING;
|
---|
2701 | }
|
---|
2702 | if (pCurMmio2->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK)
|
---|
2703 | return VINF_SUCCESS;
|
---|
2704 | }
|
---|
2705 | AssertFailed();
|
---|
2706 | return VINF_SUCCESS;
|
---|
2707 |
|
---|
2708 | }
|
---|
2709 |
|
---|
2710 |
|
---|
2711 | /**
|
---|
2712 | * Calculates the number of chunks
|
---|
2713 | *
|
---|
2714 | * @returns Number of registration chunk needed.
|
---|
2715 | * @param pVM The cross context VM structure.
|
---|
2716 | * @param cb The size of the MMIO/MMIO2 range.
|
---|
2717 | * @param pcPagesPerChunk Where to return the number of pages tracked by each
|
---|
2718 | * chunk. Optional.
|
---|
2719 | * @param pcbChunk Where to return the guest mapping size for a chunk.
|
---|
2720 | */
|
---|
2721 | static uint16_t pgmR3PhysMmio2CalcChunkCount(PVM pVM, RTGCPHYS cb, uint32_t *pcPagesPerChunk, uint32_t *pcbChunk)
|
---|
2722 | {
|
---|
2723 | RT_NOREF_PV(pVM); /* without raw mode */
|
---|
2724 |
|
---|
2725 | /*
|
---|
2726 | * This is the same calculation as PGMR3PhysRegisterRam does, except we'll be
|
---|
2727 | * needing a few bytes extra the PGMREGMMIO2RANGE structure.
|
---|
2728 | *
|
---|
2729 | * Note! In additions, we've got a 24 bit sub-page range for MMIO2 ranges, leaving
|
---|
2730 | * us with an absolute maximum of 16777215 pages per chunk (close to 64 GB).
|
---|
2731 | */
|
---|
2732 | uint32_t const cPagesPerChunk = _4M;
|
---|
2733 | Assert(RT_ALIGN_32(cPagesPerChunk, X86_PD_PAE_SHIFT - X86_PAGE_SHIFT)); /* NEM large page requirement: 1GB pages. */
|
---|
2734 | uint32_t const cbChunk = RT_UOFFSETOF_DYN(PGMREGMMIO2RANGE, RamRange.aPages[cPagesPerChunk]);
|
---|
2735 | AssertRelease(cPagesPerChunk < _16M);
|
---|
2736 |
|
---|
2737 | if (pcbChunk)
|
---|
2738 | *pcbChunk = cbChunk;
|
---|
2739 | if (pcPagesPerChunk)
|
---|
2740 | *pcPagesPerChunk = cPagesPerChunk;
|
---|
2741 |
|
---|
2742 | /* Calc the number of chunks we need. */
|
---|
2743 | RTGCPHYS const cGuestPages = cb >> GUEST_PAGE_SHIFT;
|
---|
2744 | uint16_t cChunks = (uint16_t)((cGuestPages + cPagesPerChunk - 1) / cPagesPerChunk);
|
---|
2745 | AssertRelease((RTGCPHYS)cChunks * cPagesPerChunk >= cGuestPages);
|
---|
2746 | return cChunks;
|
---|
2747 | }
|
---|
2748 |
|
---|
2749 |
|
---|
2750 | /**
|
---|
2751 | * Worker for PGMR3PhysMMIO2Register that allocates and the PGMREGMMIO2RANGE
|
---|
2752 | * structures and does basic initialization.
|
---|
2753 | *
|
---|
2754 | * Caller must set type specfic members and initialize the PGMPAGE structures.
|
---|
2755 | *
|
---|
2756 | * This was previously also used by PGMR3PhysMmio2PreRegister, a function for
|
---|
2757 | * pre-registering MMIO that was later (6.1) replaced by a new handle based IOM
|
---|
2758 | * interface. The reference to caller and type above is purely historical.
|
---|
2759 | *
|
---|
2760 | * @returns VBox status code.
|
---|
2761 | * @param pVM The cross context VM structure.
|
---|
2762 | * @param pDevIns The device instance owning the region.
|
---|
2763 | * @param iSubDev The sub-device number (internal PCI config number).
|
---|
2764 | * @param iRegion The region number. If the MMIO2 memory is a PCI
|
---|
2765 | * I/O region this number has to be the number of that
|
---|
2766 | * region. Otherwise it can be any number safe
|
---|
2767 | * UINT8_MAX.
|
---|
2768 | * @param cb The size of the region. Must be page aligned.
|
---|
2769 | * @param fFlags PGMPHYS_MMIO2_FLAGS_XXX.
|
---|
2770 | * @param idMmio2 The MMIO2 ID for the first chunk.
|
---|
2771 | * @param pszDesc The description.
|
---|
2772 | * @param ppHeadRet Where to return the pointer to the first
|
---|
2773 | * registration chunk.
|
---|
2774 | *
|
---|
2775 | * @thread EMT
|
---|
2776 | */
|
---|
2777 | static int pgmR3PhysMmio2Create(PVM pVM, PPDMDEVINS pDevIns, uint32_t iSubDev, uint32_t iRegion, RTGCPHYS cb, uint32_t fFlags,
|
---|
2778 | uint8_t idMmio2, const char *pszDesc, PPGMREGMMIO2RANGE *ppHeadRet)
|
---|
2779 | {
|
---|
2780 | /*
|
---|
2781 | * Figure out how many chunks we need and of which size.
|
---|
2782 | */
|
---|
2783 | uint32_t cPagesPerChunk;
|
---|
2784 | uint16_t cChunks = pgmR3PhysMmio2CalcChunkCount(pVM, cb, &cPagesPerChunk, NULL);
|
---|
2785 | AssertReturn(cChunks, VERR_PGM_PHYS_MMIO_EX_IPE);
|
---|
2786 |
|
---|
2787 | /*
|
---|
2788 | * Allocate the chunks.
|
---|
2789 | */
|
---|
2790 | PPGMREGMMIO2RANGE *ppNext = ppHeadRet;
|
---|
2791 | *ppNext = NULL;
|
---|
2792 |
|
---|
2793 | int rc = VINF_SUCCESS;
|
---|
2794 | uint32_t cPagesLeft = cb >> GUEST_PAGE_SHIFT;
|
---|
2795 | for (uint16_t iChunk = 0; iChunk < cChunks && RT_SUCCESS(rc); iChunk++, idMmio2++)
|
---|
2796 | {
|
---|
2797 | /*
|
---|
2798 | * We currently do a single RAM range for the whole thing. This will
|
---|
2799 | * probably have to change once someone needs really large MMIO regions,
|
---|
2800 | * as we will be running into SUPR3PageAllocEx limitations and such.
|
---|
2801 | */
|
---|
2802 | const uint32_t cPagesTrackedByChunk = RT_MIN(cPagesLeft, cPagesPerChunk);
|
---|
2803 | const size_t cbRange = RT_UOFFSETOF_DYN(PGMREGMMIO2RANGE, RamRange.aPages[cPagesTrackedByChunk]);
|
---|
2804 | PPGMREGMMIO2RANGE pNew = NULL;
|
---|
2805 |
|
---|
2806 | /*
|
---|
2807 | * Allocate memory for the registration structure.
|
---|
2808 | */
|
---|
2809 | size_t const cChunkPages = RT_ALIGN_Z(cbRange, HOST_PAGE_SIZE) >> HOST_PAGE_SHIFT;
|
---|
2810 | size_t const cbChunk = (1 + cChunkPages + 1) << HOST_PAGE_SHIFT;
|
---|
2811 | AssertLogRelBreakStmt(cbChunk == (uint32_t)cbChunk, rc = VERR_OUT_OF_RANGE);
|
---|
2812 | RTR0PTR R0PtrChunk = NIL_RTR0PTR;
|
---|
2813 | void *pvChunk = NULL;
|
---|
2814 | rc = SUPR3PageAllocEx(cChunkPages, 0 /*fFlags*/, &pvChunk, &R0PtrChunk, NULL /*paPages*/);
|
---|
2815 | AssertLogRelMsgRCBreak(rc, ("rc=%Rrc, cChunkPages=%#zx\n", rc, cChunkPages));
|
---|
2816 |
|
---|
2817 | Assert(R0PtrChunk != NIL_RTR0PTR || PGM_IS_IN_NEM_MODE(pVM));
|
---|
2818 | RT_BZERO(pvChunk, cChunkPages << HOST_PAGE_SHIFT);
|
---|
2819 |
|
---|
2820 | pNew = (PPGMREGMMIO2RANGE)pvChunk;
|
---|
2821 | pNew->RamRange.fFlags = PGM_RAM_RANGE_FLAGS_FLOATING;
|
---|
2822 | pNew->RamRange.pSelfR0 = R0PtrChunk + RT_UOFFSETOF(PGMREGMMIO2RANGE, RamRange);
|
---|
2823 |
|
---|
2824 | /*
|
---|
2825 | * Initialize the registration structure (caller does specific bits).
|
---|
2826 | */
|
---|
2827 | pNew->pDevInsR3 = pDevIns;
|
---|
2828 | //pNew->pvR3 = NULL;
|
---|
2829 | //pNew->pNext = NULL;
|
---|
2830 | if (iChunk == 0)
|
---|
2831 | pNew->fFlags |= PGMREGMMIO2RANGE_F_FIRST_CHUNK;
|
---|
2832 | if (iChunk + 1 == cChunks)
|
---|
2833 | pNew->fFlags |= PGMREGMMIO2RANGE_F_LAST_CHUNK;
|
---|
2834 | if (fFlags & PGMPHYS_MMIO2_FLAGS_TRACK_DIRTY_PAGES)
|
---|
2835 | pNew->fFlags |= PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES;
|
---|
2836 | pNew->iSubDev = iSubDev;
|
---|
2837 | pNew->iRegion = iRegion;
|
---|
2838 | pNew->idSavedState = UINT8_MAX;
|
---|
2839 | pNew->idMmio2 = idMmio2;
|
---|
2840 | //pNew->pPhysHandlerR3 = NULL;
|
---|
2841 | //pNew->paLSPages = NULL;
|
---|
2842 | pNew->RamRange.GCPhys = NIL_RTGCPHYS;
|
---|
2843 | pNew->RamRange.GCPhysLast = NIL_RTGCPHYS;
|
---|
2844 | pNew->RamRange.pszDesc = pszDesc;
|
---|
2845 | pNew->RamRange.cb = pNew->cbReal = (RTGCPHYS)cPagesTrackedByChunk << X86_PAGE_SHIFT;
|
---|
2846 | pNew->RamRange.fFlags |= PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO_EX;
|
---|
2847 | pNew->RamRange.uNemRange = UINT32_MAX;
|
---|
2848 | //pNew->RamRange.pvR3 = NULL;
|
---|
2849 | //pNew->RamRange.paLSPages = NULL;
|
---|
2850 |
|
---|
2851 | *ppNext = pNew;
|
---|
2852 | ASMCompilerBarrier();
|
---|
2853 | cPagesLeft -= cPagesTrackedByChunk;
|
---|
2854 | ppNext = &pNew->pNextR3;
|
---|
2855 |
|
---|
2856 | /*
|
---|
2857 | * Pre-allocate a handler if we're tracking dirty pages, unless NEM takes care of this.
|
---|
2858 | */
|
---|
2859 | if ( (fFlags & PGMPHYS_MMIO2_FLAGS_TRACK_DIRTY_PAGES)
|
---|
2860 | #ifdef VBOX_WITH_PGM_NEM_MODE
|
---|
2861 | && (!VM_IS_NEM_ENABLED(pVM) || !NEMR3IsMmio2DirtyPageTrackingSupported(pVM))
|
---|
2862 | #endif
|
---|
2863 | )
|
---|
2864 |
|
---|
2865 | {
|
---|
2866 | rc = pgmHandlerPhysicalExCreate(pVM, pVM->pgm.s.hMmio2DirtyPhysHandlerType, idMmio2, pszDesc, &pNew->pPhysHandlerR3);
|
---|
2867 | AssertLogRelMsgRCBreak(rc, ("idMmio2=%zu\n", idMmio2));
|
---|
2868 | }
|
---|
2869 | }
|
---|
2870 | Assert(cPagesLeft == 0);
|
---|
2871 |
|
---|
2872 | if (RT_SUCCESS(rc))
|
---|
2873 | {
|
---|
2874 | Assert((*ppHeadRet)->fFlags & PGMREGMMIO2RANGE_F_FIRST_CHUNK);
|
---|
2875 | return VINF_SUCCESS;
|
---|
2876 | }
|
---|
2877 |
|
---|
2878 | /*
|
---|
2879 | * Free floating ranges.
|
---|
2880 | */
|
---|
2881 | while (*ppHeadRet)
|
---|
2882 | {
|
---|
2883 | PPGMREGMMIO2RANGE pFree = *ppHeadRet;
|
---|
2884 | *ppHeadRet = pFree->pNextR3;
|
---|
2885 |
|
---|
2886 | if (pFree->pPhysHandlerR3)
|
---|
2887 | {
|
---|
2888 | pgmHandlerPhysicalExDestroy(pVM, pFree->pPhysHandlerR3);
|
---|
2889 | pFree->pPhysHandlerR3 = NULL;
|
---|
2890 | }
|
---|
2891 |
|
---|
2892 | if (pFree->RamRange.fFlags & PGM_RAM_RANGE_FLAGS_FLOATING)
|
---|
2893 | {
|
---|
2894 | const size_t cbRange = RT_UOFFSETOF_DYN(PGMREGMMIO2RANGE,
|
---|
2895 | RamRange.aPages[pFree->RamRange.cb >> X86_PAGE_SHIFT]);
|
---|
2896 | size_t const cChunkPages = RT_ALIGN_Z(cbRange, HOST_PAGE_SIZE) >> HOST_PAGE_SHIFT;
|
---|
2897 | SUPR3PageFreeEx(pFree, cChunkPages);
|
---|
2898 | }
|
---|
2899 | }
|
---|
2900 |
|
---|
2901 | return rc;
|
---|
2902 | }
|
---|
2903 |
|
---|
2904 |
|
---|
2905 | /**
|
---|
2906 | * Common worker PGMR3PhysMmio2PreRegister & PGMR3PhysMMIO2Register that links a
|
---|
2907 | * complete registration entry into the lists and lookup tables.
|
---|
2908 | *
|
---|
2909 | * @param pVM The cross context VM structure.
|
---|
2910 | * @param pNew The new MMIO / MMIO2 registration to link.
|
---|
2911 | */
|
---|
2912 | static void pgmR3PhysMmio2Link(PVM pVM, PPGMREGMMIO2RANGE pNew)
|
---|
2913 | {
|
---|
2914 | Assert(pNew->idMmio2 != UINT8_MAX);
|
---|
2915 |
|
---|
2916 | /*
|
---|
2917 | * Link it into the list (order doesn't matter, so insert it at the head).
|
---|
2918 | *
|
---|
2919 | * Note! The range we're linking may consist of multiple chunks, so we
|
---|
2920 | * have to find the last one.
|
---|
2921 | */
|
---|
2922 | PPGMREGMMIO2RANGE pLast = pNew;
|
---|
2923 | for (pLast = pNew; ; pLast = pLast->pNextR3)
|
---|
2924 | {
|
---|
2925 | if (pLast->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK)
|
---|
2926 | break;
|
---|
2927 | Assert(pLast->pNextR3);
|
---|
2928 | Assert(pLast->pNextR3->pDevInsR3 == pNew->pDevInsR3);
|
---|
2929 | Assert(pLast->pNextR3->iSubDev == pNew->iSubDev);
|
---|
2930 | Assert(pLast->pNextR3->iRegion == pNew->iRegion);
|
---|
2931 | Assert(pLast->pNextR3->idMmio2 == pLast->idMmio2 + 1);
|
---|
2932 | }
|
---|
2933 |
|
---|
2934 | PGM_LOCK_VOID(pVM);
|
---|
2935 |
|
---|
2936 | /* Link in the chain of ranges at the head of the list. */
|
---|
2937 | pLast->pNextR3 = pVM->pgm.s.pRegMmioRangesR3;
|
---|
2938 | pVM->pgm.s.pRegMmioRangesR3 = pNew;
|
---|
2939 |
|
---|
2940 | /* Insert the MMIO2 range/page IDs. */
|
---|
2941 | uint8_t idMmio2 = pNew->idMmio2;
|
---|
2942 | for (;;)
|
---|
2943 | {
|
---|
2944 | Assert(pVM->pgm.s.apMmio2RangesR3[idMmio2 - 1] == NULL);
|
---|
2945 | Assert(pVM->pgm.s.apMmio2RangesR0[idMmio2 - 1] == NIL_RTR0PTR);
|
---|
2946 | pVM->pgm.s.apMmio2RangesR3[idMmio2 - 1] = pNew;
|
---|
2947 | pVM->pgm.s.apMmio2RangesR0[idMmio2 - 1] = pNew->RamRange.pSelfR0 - RT_UOFFSETOF(PGMREGMMIO2RANGE, RamRange);
|
---|
2948 | if (pNew->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK)
|
---|
2949 | break;
|
---|
2950 | pNew = pNew->pNextR3;
|
---|
2951 | idMmio2++;
|
---|
2952 | }
|
---|
2953 |
|
---|
2954 | pgmPhysInvalidatePageMapTLB(pVM);
|
---|
2955 | PGM_UNLOCK(pVM);
|
---|
2956 | }
|
---|
2957 |
|
---|
2958 |
|
---|
2959 | /**
|
---|
2960 | * Allocate and register an MMIO2 region.
|
---|
2961 | *
|
---|
2962 | * As mentioned elsewhere, MMIO2 is just RAM spelled differently. It's RAM
|
---|
2963 | * associated with a device. It is also non-shared memory with a permanent
|
---|
2964 | * ring-3 mapping and page backing (presently).
|
---|
2965 | *
|
---|
2966 | * A MMIO2 range may overlap with base memory if a lot of RAM is configured for
|
---|
2967 | * the VM, in which case we'll drop the base memory pages. Presently we will
|
---|
2968 | * make no attempt to preserve anything that happens to be present in the base
|
---|
2969 | * memory that is replaced, this is of course incorrect but it's too much
|
---|
2970 | * effort.
|
---|
2971 | *
|
---|
2972 | * @returns VBox status code.
|
---|
2973 | * @retval VINF_SUCCESS on success, *ppv pointing to the R3 mapping of the
|
---|
2974 | * memory.
|
---|
2975 | * @retval VERR_ALREADY_EXISTS if the region already exists.
|
---|
2976 | *
|
---|
2977 | * @param pVM The cross context VM structure.
|
---|
2978 | * @param pDevIns The device instance owning the region.
|
---|
2979 | * @param iSubDev The sub-device number.
|
---|
2980 | * @param iRegion The region number. If the MMIO2 memory is a PCI
|
---|
2981 | * I/O region this number has to be the number of that
|
---|
2982 | * region. Otherwise it can be any number save
|
---|
2983 | * UINT8_MAX.
|
---|
2984 | * @param cb The size of the region. Must be page aligned.
|
---|
2985 | * @param fFlags Reserved for future use, must be zero.
|
---|
2986 | * @param pszDesc The description.
|
---|
2987 | * @param ppv Where to store the pointer to the ring-3 mapping of
|
---|
2988 | * the memory.
|
---|
2989 | * @param phRegion Where to return the MMIO2 region handle. Optional.
|
---|
2990 | * @thread EMT
|
---|
2991 | */
|
---|
2992 | VMMR3_INT_DECL(int) PGMR3PhysMmio2Register(PVM pVM, PPDMDEVINS pDevIns, uint32_t iSubDev, uint32_t iRegion, RTGCPHYS cb,
|
---|
2993 | uint32_t fFlags, const char *pszDesc, void **ppv, PGMMMIO2HANDLE *phRegion)
|
---|
2994 | {
|
---|
2995 | /*
|
---|
2996 | * Validate input.
|
---|
2997 | */
|
---|
2998 | AssertPtrReturn(ppv, VERR_INVALID_POINTER);
|
---|
2999 | *ppv = NULL;
|
---|
3000 | if (phRegion)
|
---|
3001 | {
|
---|
3002 | AssertPtrReturn(phRegion, VERR_INVALID_POINTER);
|
---|
3003 | *phRegion = NIL_PGMMMIO2HANDLE;
|
---|
3004 | }
|
---|
3005 | VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
|
---|
3006 | AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
|
---|
3007 | AssertReturn(iSubDev <= UINT8_MAX, VERR_INVALID_PARAMETER);
|
---|
3008 | AssertReturn(iRegion <= UINT8_MAX, VERR_INVALID_PARAMETER);
|
---|
3009 | AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
|
---|
3010 | AssertReturn(*pszDesc, VERR_INVALID_PARAMETER);
|
---|
3011 | AssertReturn(pgmR3PhysMmio2Find(pVM, pDevIns, iSubDev, iRegion, NIL_PGMMMIO2HANDLE) == NULL, VERR_ALREADY_EXISTS);
|
---|
3012 | AssertReturn(!(cb & GUEST_PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
|
---|
3013 | AssertReturn(cb, VERR_INVALID_PARAMETER);
|
---|
3014 | AssertReturn(!(fFlags & ~PGMPHYS_MMIO2_FLAGS_VALID_MASK), VERR_INVALID_FLAGS);
|
---|
3015 |
|
---|
3016 | const uint32_t cGuestPages = cb >> GUEST_PAGE_SHIFT;
|
---|
3017 | AssertLogRelReturn(((RTGCPHYS)cGuestPages << GUEST_PAGE_SHIFT) == cb, VERR_INVALID_PARAMETER);
|
---|
3018 | AssertLogRelReturn(cGuestPages <= (MM_MMIO_64_MAX >> X86_PAGE_SHIFT), VERR_OUT_OF_RANGE);
|
---|
3019 | AssertLogRelReturn(cGuestPages <= PGM_MMIO2_MAX_PAGE_COUNT, VERR_OUT_OF_RANGE);
|
---|
3020 |
|
---|
3021 | /*
|
---|
3022 | * For the 2nd+ instance, mangle the description string so it's unique.
|
---|
3023 | */
|
---|
3024 | if (pDevIns->iInstance > 0) /** @todo Move to PDMDevHlp.cpp and use a real string cache. */
|
---|
3025 | {
|
---|
3026 | pszDesc = MMR3HeapAPrintf(pVM, MM_TAG_PGM_PHYS, "%s [%u]", pszDesc, pDevIns->iInstance);
|
---|
3027 | if (!pszDesc)
|
---|
3028 | return VERR_NO_MEMORY;
|
---|
3029 | }
|
---|
3030 |
|
---|
3031 | /*
|
---|
3032 | * Allocate an MMIO2 range ID (not freed on failure).
|
---|
3033 | *
|
---|
3034 | * The zero ID is not used as it could be confused with NIL_GMM_PAGEID, so
|
---|
3035 | * the IDs goes from 1 thru PGM_MMIO2_MAX_RANGES.
|
---|
3036 | */
|
---|
3037 | unsigned cChunks = pgmR3PhysMmio2CalcChunkCount(pVM, cb, NULL, NULL);
|
---|
3038 |
|
---|
3039 | PGM_LOCK_VOID(pVM);
|
---|
3040 | AssertCompile(PGM_MMIO2_MAX_RANGES < 255);
|
---|
3041 | uint8_t const idMmio2 = pVM->pgm.s.cMmio2Regions + 1;
|
---|
3042 | unsigned const cNewMmio2Regions = pVM->pgm.s.cMmio2Regions + cChunks;
|
---|
3043 | if (cNewMmio2Regions > PGM_MMIO2_MAX_RANGES)
|
---|
3044 | {
|
---|
3045 | PGM_UNLOCK(pVM);
|
---|
3046 | AssertLogRelFailedReturn(VERR_PGM_TOO_MANY_MMIO2_RANGES);
|
---|
3047 | }
|
---|
3048 | pVM->pgm.s.cMmio2Regions = cNewMmio2Regions;
|
---|
3049 | PGM_UNLOCK(pVM);
|
---|
3050 |
|
---|
3051 | /*
|
---|
3052 | * Try reserve and allocate the backing memory first as this is what is
|
---|
3053 | * most likely to fail.
|
---|
3054 | */
|
---|
3055 | int rc = MMR3AdjustFixedReservation(pVM, cGuestPages, pszDesc);
|
---|
3056 | if (RT_SUCCESS(rc))
|
---|
3057 | {
|
---|
3058 | const uint32_t cHostPages = RT_ALIGN_T(cb, HOST_PAGE_SIZE, RTGCPHYS) >> HOST_PAGE_SHIFT;
|
---|
3059 | PSUPPAGE paPages = (PSUPPAGE)RTMemTmpAlloc(cHostPages * sizeof(SUPPAGE));
|
---|
3060 | if (RT_SUCCESS(rc))
|
---|
3061 | {
|
---|
3062 | void *pvPages = NULL;
|
---|
3063 | #ifndef VBOX_WITH_LINEAR_HOST_PHYS_MEM
|
---|
3064 | RTR0PTR pvPagesR0 = NIL_RTR0PTR;
|
---|
3065 | #endif
|
---|
3066 | #ifdef VBOX_WITH_PGM_NEM_MODE
|
---|
3067 | if (PGM_IS_IN_NEM_MODE(pVM))
|
---|
3068 | rc = SUPR3PageAlloc(cHostPages, pVM->pgm.s.fUseLargePages ? SUP_PAGE_ALLOC_F_LARGE_PAGES : 0, &pvPages);
|
---|
3069 | else
|
---|
3070 | #endif
|
---|
3071 | {
|
---|
3072 | #ifndef VBOX_WITH_LINEAR_HOST_PHYS_MEM
|
---|
3073 | rc = SUPR3PageAllocEx(cHostPages, 0 /*fFlags*/, &pvPages, &pvPagesR0, paPages);
|
---|
3074 | #else
|
---|
3075 | rc = SUPR3PageAllocEx(cHostPages, 0 /*fFlags*/, &pvPages, NULL /*pR0Ptr*/, paPages);
|
---|
3076 | #endif
|
---|
3077 | }
|
---|
3078 | if (RT_SUCCESS(rc))
|
---|
3079 | {
|
---|
3080 | memset(pvPages, 0, cGuestPages * GUEST_PAGE_SIZE);
|
---|
3081 |
|
---|
3082 | /*
|
---|
3083 | * Create the registered MMIO range record for it.
|
---|
3084 | */
|
---|
3085 | PPGMREGMMIO2RANGE pNew;
|
---|
3086 | rc = pgmR3PhysMmio2Create(pVM, pDevIns, iSubDev, iRegion, cb, fFlags, idMmio2, pszDesc, &pNew);
|
---|
3087 | if (RT_SUCCESS(rc))
|
---|
3088 | {
|
---|
3089 | if (phRegion)
|
---|
3090 | *phRegion = idMmio2; /* The ID of the first chunk. */
|
---|
3091 |
|
---|
3092 | uint32_t iSrcPage = 0;
|
---|
3093 | uint8_t *pbCurPages = (uint8_t *)pvPages;
|
---|
3094 | for (PPGMREGMMIO2RANGE pCur = pNew; pCur; pCur = pCur->pNextR3)
|
---|
3095 | {
|
---|
3096 | pCur->pvR3 = pbCurPages;
|
---|
3097 | #ifndef VBOX_WITH_LINEAR_HOST_PHYS_MEM
|
---|
3098 | pCur->pvR0 = pvPagesR0 + (iSrcPage << GUEST_PAGE_SHIFT);
|
---|
3099 | #endif
|
---|
3100 | pCur->RamRange.pvR3 = pbCurPages;
|
---|
3101 |
|
---|
3102 | uint32_t iDstPage = pCur->RamRange.cb >> GUEST_PAGE_SHIFT;
|
---|
3103 | #ifdef VBOX_WITH_PGM_NEM_MODE
|
---|
3104 | if (PGM_IS_IN_NEM_MODE(pVM))
|
---|
3105 | while (iDstPage-- > 0)
|
---|
3106 | PGM_PAGE_INIT(&pNew->RamRange.aPages[iDstPage], UINT64_C(0x0000ffffffff0000),
|
---|
3107 | PGM_MMIO2_PAGEID_MAKE(idMmio2, iDstPage),
|
---|
3108 | PGMPAGETYPE_MMIO2, PGM_PAGE_STATE_ALLOCATED);
|
---|
3109 | else
|
---|
3110 | #endif
|
---|
3111 | {
|
---|
3112 | AssertRelease(HOST_PAGE_SHIFT == GUEST_PAGE_SHIFT);
|
---|
3113 | while (iDstPage-- > 0)
|
---|
3114 | PGM_PAGE_INIT(&pNew->RamRange.aPages[iDstPage], paPages[iDstPage + iSrcPage].Phys,
|
---|
3115 | PGM_MMIO2_PAGEID_MAKE(idMmio2, iDstPage),
|
---|
3116 | PGMPAGETYPE_MMIO2, PGM_PAGE_STATE_ALLOCATED);
|
---|
3117 | }
|
---|
3118 |
|
---|
3119 | /* advance. */
|
---|
3120 | iSrcPage += pCur->RamRange.cb >> GUEST_PAGE_SHIFT;
|
---|
3121 | pbCurPages += pCur->RamRange.cb;
|
---|
3122 | }
|
---|
3123 |
|
---|
3124 | RTMemTmpFree(paPages);
|
---|
3125 |
|
---|
3126 | /*
|
---|
3127 | * Update the page count stats, link the registration and we're done.
|
---|
3128 | */
|
---|
3129 | pVM->pgm.s.cAllPages += cGuestPages;
|
---|
3130 | pVM->pgm.s.cPrivatePages += cGuestPages;
|
---|
3131 |
|
---|
3132 | pgmR3PhysMmio2Link(pVM, pNew);
|
---|
3133 |
|
---|
3134 | *ppv = pvPages;
|
---|
3135 | return VINF_SUCCESS;
|
---|
3136 | }
|
---|
3137 |
|
---|
3138 | SUPR3PageFreeEx(pvPages, cHostPages);
|
---|
3139 | }
|
---|
3140 | }
|
---|
3141 | RTMemTmpFree(paPages);
|
---|
3142 | MMR3AdjustFixedReservation(pVM, -(int32_t)cGuestPages, pszDesc);
|
---|
3143 | }
|
---|
3144 | if (pDevIns->iInstance > 0)
|
---|
3145 | MMR3HeapFree((void *)pszDesc);
|
---|
3146 | return rc;
|
---|
3147 | }
|
---|
3148 |
|
---|
3149 |
|
---|
3150 | /**
|
---|
3151 | * Deregisters and frees an MMIO2 region.
|
---|
3152 | *
|
---|
3153 | * Any physical access handlers registered for the region must be deregistered
|
---|
3154 | * before calling this function.
|
---|
3155 | *
|
---|
3156 | * @returns VBox status code.
|
---|
3157 | * @param pVM The cross context VM structure.
|
---|
3158 | * @param pDevIns The device instance owning the region.
|
---|
3159 | * @param hMmio2 The MMIO2 handle to deregister, or NIL if all
|
---|
3160 | * regions for the given device is to be deregistered.
|
---|
3161 | */
|
---|
3162 | VMMR3_INT_DECL(int) PGMR3PhysMmio2Deregister(PVM pVM, PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2)
|
---|
3163 | {
|
---|
3164 | /*
|
---|
3165 | * Validate input.
|
---|
3166 | */
|
---|
3167 | VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
|
---|
3168 | AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
|
---|
3169 |
|
---|
3170 | /*
|
---|
3171 | * The loop here scanning all registrations will make sure that multi-chunk ranges
|
---|
3172 | * get properly deregistered, though it's original purpose was the wildcard iRegion.
|
---|
3173 | */
|
---|
3174 | PGM_LOCK_VOID(pVM);
|
---|
3175 | int rc = VINF_SUCCESS;
|
---|
3176 | unsigned cFound = 0;
|
---|
3177 | PPGMREGMMIO2RANGE pPrev = NULL;
|
---|
3178 | PPGMREGMMIO2RANGE pCur = pVM->pgm.s.pRegMmioRangesR3;
|
---|
3179 | while (pCur)
|
---|
3180 | {
|
---|
3181 | uint32_t const fFlags = pCur->fFlags;
|
---|
3182 | if ( pCur->pDevInsR3 == pDevIns
|
---|
3183 | && ( hMmio2 == NIL_PGMMMIO2HANDLE
|
---|
3184 | || pCur->idMmio2 == hMmio2))
|
---|
3185 | {
|
---|
3186 | cFound++;
|
---|
3187 |
|
---|
3188 | /*
|
---|
3189 | * Unmap it if it's mapped.
|
---|
3190 | */
|
---|
3191 | if (fFlags & PGMREGMMIO2RANGE_F_MAPPED)
|
---|
3192 | {
|
---|
3193 | int rc2 = PGMR3PhysMmio2Unmap(pVM, pCur->pDevInsR3, pCur->idMmio2, pCur->RamRange.GCPhys);
|
---|
3194 | AssertRC(rc2);
|
---|
3195 | if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
|
---|
3196 | rc = rc2;
|
---|
3197 | }
|
---|
3198 |
|
---|
3199 | /*
|
---|
3200 | * Unlink it
|
---|
3201 | */
|
---|
3202 | PPGMREGMMIO2RANGE pNext = pCur->pNextR3;
|
---|
3203 | if (pPrev)
|
---|
3204 | pPrev->pNextR3 = pNext;
|
---|
3205 | else
|
---|
3206 | pVM->pgm.s.pRegMmioRangesR3 = pNext;
|
---|
3207 | pCur->pNextR3 = NULL;
|
---|
3208 |
|
---|
3209 | uint8_t idMmio2 = pCur->idMmio2;
|
---|
3210 | Assert(idMmio2 <= RT_ELEMENTS(pVM->pgm.s.apMmio2RangesR3));
|
---|
3211 | if (idMmio2 <= RT_ELEMENTS(pVM->pgm.s.apMmio2RangesR3))
|
---|
3212 | {
|
---|
3213 | Assert(pVM->pgm.s.apMmio2RangesR3[idMmio2 - 1] == pCur);
|
---|
3214 | pVM->pgm.s.apMmio2RangesR3[idMmio2 - 1] = NULL;
|
---|
3215 | pVM->pgm.s.apMmio2RangesR0[idMmio2 - 1] = NIL_RTR0PTR;
|
---|
3216 | }
|
---|
3217 |
|
---|
3218 | /*
|
---|
3219 | * Free the memory.
|
---|
3220 | */
|
---|
3221 | uint32_t const cGuestPages = pCur->cbReal >> GUEST_PAGE_SHIFT;
|
---|
3222 | uint32_t const cHostPages = RT_ALIGN_T(pCur->cbReal, HOST_PAGE_SIZE, RTGCPHYS) >> HOST_PAGE_SHIFT;
|
---|
3223 | #ifdef VBOX_WITH_PGM_NEM_MODE
|
---|
3224 | if (!pVM->pgm.s.fNemMode)
|
---|
3225 | #endif
|
---|
3226 | {
|
---|
3227 | int rc2 = SUPR3PageFreeEx(pCur->pvR3, cHostPages);
|
---|
3228 | AssertRC(rc2);
|
---|
3229 | if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
|
---|
3230 | rc = rc2;
|
---|
3231 |
|
---|
3232 | rc2 = MMR3AdjustFixedReservation(pVM, -(int32_t)cGuestPages, pCur->RamRange.pszDesc);
|
---|
3233 | AssertRC(rc2);
|
---|
3234 | if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
|
---|
3235 | rc = rc2;
|
---|
3236 | }
|
---|
3237 | #ifdef VBOX_WITH_PGM_NEM_MODE
|
---|
3238 | else
|
---|
3239 | {
|
---|
3240 | int rc2 = SUPR3PageFreeEx(pCur->pvR3, cHostPages);
|
---|
3241 | AssertRC(rc2);
|
---|
3242 | if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
|
---|
3243 | rc = rc2;
|
---|
3244 | }
|
---|
3245 | #endif
|
---|
3246 |
|
---|
3247 | if (pCur->pPhysHandlerR3)
|
---|
3248 | {
|
---|
3249 | pgmHandlerPhysicalExDestroy(pVM, pCur->pPhysHandlerR3);
|
---|
3250 | pCur->pPhysHandlerR3 = NULL;
|
---|
3251 | }
|
---|
3252 |
|
---|
3253 | /* we're leaking hyper memory here if done at runtime. */
|
---|
3254 | #ifdef VBOX_STRICT
|
---|
3255 | VMSTATE const enmState = VMR3GetState(pVM);
|
---|
3256 | AssertMsg( enmState == VMSTATE_POWERING_OFF
|
---|
3257 | || enmState == VMSTATE_POWERING_OFF_LS
|
---|
3258 | || enmState == VMSTATE_OFF
|
---|
3259 | || enmState == VMSTATE_OFF_LS
|
---|
3260 | || enmState == VMSTATE_DESTROYING
|
---|
3261 | || enmState == VMSTATE_TERMINATED
|
---|
3262 | || enmState == VMSTATE_CREATING
|
---|
3263 | , ("%s\n", VMR3GetStateName(enmState)));
|
---|
3264 | #endif
|
---|
3265 |
|
---|
3266 | if (pCur->RamRange.fFlags & PGM_RAM_RANGE_FLAGS_FLOATING)
|
---|
3267 | {
|
---|
3268 | const size_t cbRange = RT_UOFFSETOF_DYN(PGMREGMMIO2RANGE, RamRange.aPages[cGuestPages]);
|
---|
3269 | size_t const cChunkPages = RT_ALIGN_Z(cbRange, HOST_PAGE_SIZE) >> HOST_PAGE_SHIFT;
|
---|
3270 | SUPR3PageFreeEx(pCur, cChunkPages);
|
---|
3271 | }
|
---|
3272 | /*else
|
---|
3273 | {
|
---|
3274 | rc = MMHyperFree(pVM, pCur); - does not work, see the alloc call.
|
---|
3275 | AssertRCReturn(rc, rc);
|
---|
3276 | } */
|
---|
3277 |
|
---|
3278 |
|
---|
3279 | /* update page count stats */
|
---|
3280 | pVM->pgm.s.cAllPages -= cGuestPages;
|
---|
3281 | pVM->pgm.s.cPrivatePages -= cGuestPages;
|
---|
3282 |
|
---|
3283 | /* next */
|
---|
3284 | pCur = pNext;
|
---|
3285 | if (hMmio2 != NIL_PGMMMIO2HANDLE)
|
---|
3286 | {
|
---|
3287 | if (fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK)
|
---|
3288 | break;
|
---|
3289 | hMmio2++;
|
---|
3290 | Assert(pCur->idMmio2 == hMmio2);
|
---|
3291 | Assert(pCur->pDevInsR3 == pDevIns);
|
---|
3292 | Assert(!(pCur->fFlags & PGMREGMMIO2RANGE_F_FIRST_CHUNK));
|
---|
3293 | }
|
---|
3294 | }
|
---|
3295 | else
|
---|
3296 | {
|
---|
3297 | pPrev = pCur;
|
---|
3298 | pCur = pCur->pNextR3;
|
---|
3299 | }
|
---|
3300 | }
|
---|
3301 | pgmPhysInvalidatePageMapTLB(pVM);
|
---|
3302 | PGM_UNLOCK(pVM);
|
---|
3303 | return !cFound && hMmio2 != NIL_PGMMMIO2HANDLE ? VERR_NOT_FOUND : rc;
|
---|
3304 | }
|
---|
3305 |
|
---|
3306 |
|
---|
3307 | /**
|
---|
3308 | * Maps a MMIO2 region.
|
---|
3309 | *
|
---|
3310 | * This is typically done when a guest / the bios / state loading changes the
|
---|
3311 | * PCI config. The replacing of base memory has the same restrictions as during
|
---|
3312 | * registration, of course.
|
---|
3313 | *
|
---|
3314 | * @returns VBox status code.
|
---|
3315 | *
|
---|
3316 | * @param pVM The cross context VM structure.
|
---|
3317 | * @param pDevIns The device instance owning the region.
|
---|
3318 | * @param hMmio2 The handle of the region to map.
|
---|
3319 | * @param GCPhys The guest-physical address to be remapped.
|
---|
3320 | */
|
---|
3321 | VMMR3_INT_DECL(int) PGMR3PhysMmio2Map(PVM pVM, PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2, RTGCPHYS GCPhys)
|
---|
3322 | {
|
---|
3323 | /*
|
---|
3324 | * Validate input.
|
---|
3325 | *
|
---|
3326 | * Note! It's safe to walk the MMIO/MMIO2 list since registrations only
|
---|
3327 | * happens during VM construction.
|
---|
3328 | */
|
---|
3329 | VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
|
---|
3330 | AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
|
---|
3331 | AssertReturn(GCPhys != NIL_RTGCPHYS, VERR_INVALID_PARAMETER);
|
---|
3332 | AssertReturn(GCPhys != 0, VERR_INVALID_PARAMETER);
|
---|
3333 | AssertReturn(!(GCPhys & GUEST_PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
|
---|
3334 | AssertReturn(hMmio2 != NIL_PGMMMIO2HANDLE, VERR_INVALID_HANDLE);
|
---|
3335 |
|
---|
3336 | PPGMREGMMIO2RANGE pFirstMmio = pgmR3PhysMmio2Find(pVM, pDevIns, UINT32_MAX, UINT32_MAX, hMmio2);
|
---|
3337 | AssertReturn(pFirstMmio, VERR_NOT_FOUND);
|
---|
3338 | Assert(pFirstMmio->fFlags & PGMREGMMIO2RANGE_F_FIRST_CHUNK);
|
---|
3339 |
|
---|
3340 | PPGMREGMMIO2RANGE pLastMmio = pFirstMmio;
|
---|
3341 | RTGCPHYS cbRange = 0;
|
---|
3342 | for (;;)
|
---|
3343 | {
|
---|
3344 | AssertReturn(!(pLastMmio->fFlags & PGMREGMMIO2RANGE_F_MAPPED), VERR_WRONG_ORDER);
|
---|
3345 | Assert(pLastMmio->RamRange.GCPhys == NIL_RTGCPHYS);
|
---|
3346 | Assert(pLastMmio->RamRange.GCPhysLast == NIL_RTGCPHYS);
|
---|
3347 | Assert(pLastMmio->pDevInsR3 == pFirstMmio->pDevInsR3);
|
---|
3348 | Assert(pLastMmio->iSubDev == pFirstMmio->iSubDev);
|
---|
3349 | Assert(pLastMmio->iRegion == pFirstMmio->iRegion);
|
---|
3350 | cbRange += pLastMmio->RamRange.cb;
|
---|
3351 | if (pLastMmio->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK)
|
---|
3352 | break;
|
---|
3353 | pLastMmio = pLastMmio->pNextR3;
|
---|
3354 | }
|
---|
3355 |
|
---|
3356 | RTGCPHYS GCPhysLast = GCPhys + cbRange - 1;
|
---|
3357 | AssertLogRelReturn(GCPhysLast > GCPhys, VERR_INVALID_PARAMETER);
|
---|
3358 |
|
---|
3359 | /*
|
---|
3360 | * Find our location in the ram range list, checking for restriction
|
---|
3361 | * we don't bother implementing yet (partially overlapping, multiple
|
---|
3362 | * ram ranges).
|
---|
3363 | */
|
---|
3364 | PGM_LOCK_VOID(pVM);
|
---|
3365 |
|
---|
3366 | AssertReturnStmt(!(pFirstMmio->fFlags & PGMREGMMIO2RANGE_F_MAPPED), PGM_UNLOCK(pVM), VERR_WRONG_ORDER);
|
---|
3367 |
|
---|
3368 | bool fRamExists = false;
|
---|
3369 | PPGMRAMRANGE pRamPrev = NULL;
|
---|
3370 | PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3;
|
---|
3371 | while (pRam && GCPhysLast >= pRam->GCPhys)
|
---|
3372 | {
|
---|
3373 | if ( GCPhys <= pRam->GCPhysLast
|
---|
3374 | && GCPhysLast >= pRam->GCPhys)
|
---|
3375 | {
|
---|
3376 | /* Completely within? */
|
---|
3377 | AssertLogRelMsgReturnStmt( GCPhys >= pRam->GCPhys
|
---|
3378 | && GCPhysLast <= pRam->GCPhysLast,
|
---|
3379 | ("%RGp-%RGp (MMIOEx/%s) falls partly outside %RGp-%RGp (%s)\n",
|
---|
3380 | GCPhys, GCPhysLast, pFirstMmio->RamRange.pszDesc,
|
---|
3381 | pRam->GCPhys, pRam->GCPhysLast, pRam->pszDesc),
|
---|
3382 | PGM_UNLOCK(pVM),
|
---|
3383 | VERR_PGM_RAM_CONFLICT);
|
---|
3384 |
|
---|
3385 | /* Check that all the pages are RAM pages. */
|
---|
3386 | PPGMPAGE pPage = &pRam->aPages[(GCPhys - pRam->GCPhys) >> GUEST_PAGE_SHIFT];
|
---|
3387 | uint32_t cPagesLeft = cbRange >> GUEST_PAGE_SHIFT;
|
---|
3388 | while (cPagesLeft-- > 0)
|
---|
3389 | {
|
---|
3390 | AssertLogRelMsgReturnStmt(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM,
|
---|
3391 | ("%RGp isn't a RAM page (%d) - mapping %RGp-%RGp (MMIO2/%s).\n",
|
---|
3392 | GCPhys, PGM_PAGE_GET_TYPE(pPage), GCPhys, GCPhysLast, pFirstMmio->RamRange.pszDesc),
|
---|
3393 | PGM_UNLOCK(pVM),
|
---|
3394 | VERR_PGM_RAM_CONFLICT);
|
---|
3395 | pPage++;
|
---|
3396 | }
|
---|
3397 |
|
---|
3398 | /* There can only be one MMIO/MMIO2 chunk matching here! */
|
---|
3399 | AssertLogRelMsgReturnStmt(pFirstMmio->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK,
|
---|
3400 | ("%RGp-%RGp (MMIOEx/%s, flags %#X) consists of multiple chunks whereas the RAM somehow doesn't!\n",
|
---|
3401 | GCPhys, GCPhysLast, pFirstMmio->RamRange.pszDesc, pFirstMmio->fFlags),
|
---|
3402 | PGM_UNLOCK(pVM),
|
---|
3403 | VERR_PGM_PHYS_MMIO_EX_IPE);
|
---|
3404 |
|
---|
3405 | fRamExists = true;
|
---|
3406 | break;
|
---|
3407 | }
|
---|
3408 |
|
---|
3409 | /* next */
|
---|
3410 | pRamPrev = pRam;
|
---|
3411 | pRam = pRam->pNextR3;
|
---|
3412 | }
|
---|
3413 | Log(("PGMR3PhysMmio2Map: %RGp-%RGp fRamExists=%RTbool %s\n", GCPhys, GCPhysLast, fRamExists, pFirstMmio->RamRange.pszDesc));
|
---|
3414 |
|
---|
3415 |
|
---|
3416 | /*
|
---|
3417 | * Make the changes.
|
---|
3418 | */
|
---|
3419 | RTGCPHYS GCPhysCur = GCPhys;
|
---|
3420 | for (PPGMREGMMIO2RANGE pCurMmio = pFirstMmio; ; pCurMmio = pCurMmio->pNextR3)
|
---|
3421 | {
|
---|
3422 | pCurMmio->RamRange.GCPhys = GCPhysCur;
|
---|
3423 | pCurMmio->RamRange.GCPhysLast = GCPhysCur + pCurMmio->RamRange.cb - 1;
|
---|
3424 | if (pCurMmio->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK)
|
---|
3425 | {
|
---|
3426 | Assert(pCurMmio->RamRange.GCPhysLast == GCPhysLast);
|
---|
3427 | break;
|
---|
3428 | }
|
---|
3429 | GCPhysCur += pCurMmio->RamRange.cb;
|
---|
3430 | }
|
---|
3431 |
|
---|
3432 | if (fRamExists)
|
---|
3433 | {
|
---|
3434 | /*
|
---|
3435 | * Make all the pages in the range MMIO/ZERO pages, freeing any
|
---|
3436 | * RAM pages currently mapped here. This might not be 100% correct
|
---|
3437 | * for PCI memory, but we're doing the same thing for MMIO2 pages.
|
---|
3438 | *
|
---|
3439 | * We replace these MMIO/ZERO pages with real pages in the MMIO2 case.
|
---|
3440 | */
|
---|
3441 | Assert(pFirstMmio->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK); /* Only one chunk */
|
---|
3442 | Assert(pFirstMmio->pvR3 == pFirstMmio->RamRange.pvR3);
|
---|
3443 | Assert(pFirstMmio->RamRange.pvR3 != NULL);
|
---|
3444 |
|
---|
3445 | #ifdef VBOX_WITH_PGM_NEM_MODE
|
---|
3446 | /* We cannot mix MMIO2 into a RAM range in simplified memory mode because pRam->pvR3 can't point
|
---|
3447 | both at the RAM and MMIO2, so we won't ever write & read from the actual MMIO2 memory if we try. */
|
---|
3448 | AssertLogRelMsgReturn(!pVM->pgm.s.fNemMode, ("%s at %RGp-%RGp\n", pFirstMmio->RamRange.pszDesc, GCPhys, GCPhysLast),
|
---|
3449 | VERR_PGM_NOT_SUPPORTED_FOR_NEM_MODE);
|
---|
3450 | #endif
|
---|
3451 |
|
---|
3452 | int rc = pgmR3PhysFreePageRange(pVM, pRam, GCPhys, GCPhysLast, pFirstMmio->RamRange.pvR3);
|
---|
3453 | AssertRCReturnStmt(rc, PGM_UNLOCK(pVM), rc);
|
---|
3454 |
|
---|
3455 | /* Replace the pages, freeing all present RAM pages. */
|
---|
3456 | PPGMPAGE pPageSrc = &pFirstMmio->RamRange.aPages[0];
|
---|
3457 | PPGMPAGE pPageDst = &pRam->aPages[(GCPhys - pRam->GCPhys) >> GUEST_PAGE_SHIFT];
|
---|
3458 | uint32_t cPagesLeft = pFirstMmio->RamRange.cb >> GUEST_PAGE_SHIFT;
|
---|
3459 | while (cPagesLeft-- > 0)
|
---|
3460 | {
|
---|
3461 | Assert(PGM_PAGE_IS_MMIO(pPageDst));
|
---|
3462 |
|
---|
3463 | RTHCPHYS const HCPhys = PGM_PAGE_GET_HCPHYS(pPageSrc);
|
---|
3464 | uint32_t const idPage = PGM_PAGE_GET_PAGEID(pPageSrc);
|
---|
3465 | PGM_PAGE_SET_PAGEID(pVM, pPageDst, idPage);
|
---|
3466 | PGM_PAGE_SET_HCPHYS(pVM, pPageDst, HCPhys);
|
---|
3467 | PGM_PAGE_SET_TYPE(pVM, pPageDst, PGMPAGETYPE_MMIO2);
|
---|
3468 | PGM_PAGE_SET_STATE(pVM, pPageDst, PGM_PAGE_STATE_ALLOCATED);
|
---|
3469 | PGM_PAGE_SET_PDE_TYPE(pVM, pPageDst, PGM_PAGE_PDE_TYPE_DONTCARE);
|
---|
3470 | PGM_PAGE_SET_PTE_INDEX(pVM, pPageDst, 0);
|
---|
3471 | PGM_PAGE_SET_TRACKING(pVM, pPageDst, 0);
|
---|
3472 | /* NEM state is set by pgmR3PhysFreePageRange. */
|
---|
3473 |
|
---|
3474 | pVM->pgm.s.cZeroPages--;
|
---|
3475 | GCPhys += GUEST_PAGE_SIZE;
|
---|
3476 | pPageSrc++;
|
---|
3477 | pPageDst++;
|
---|
3478 | }
|
---|
3479 |
|
---|
3480 | /* Flush physical page map TLB. */
|
---|
3481 | pgmPhysInvalidatePageMapTLB(pVM);
|
---|
3482 |
|
---|
3483 | /* Force a PGM pool flush as guest ram references have been changed. */
|
---|
3484 | /** @todo not entirely SMP safe; assuming for now the guest takes care of
|
---|
3485 | * this internally (not touch mapped mmio while changing the mapping). */
|
---|
3486 | PVMCPU pVCpu = VMMGetCpu(pVM);
|
---|
3487 | pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_CLEAR_PGM_POOL;
|
---|
3488 | VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
|
---|
3489 | }
|
---|
3490 | else
|
---|
3491 | {
|
---|
3492 | /*
|
---|
3493 | * No RAM range, insert the ones prepared during registration.
|
---|
3494 | */
|
---|
3495 | for (PPGMREGMMIO2RANGE pCurMmio = pFirstMmio; ; pCurMmio = pCurMmio->pNextR3)
|
---|
3496 | {
|
---|
3497 | #ifdef VBOX_WITH_NATIVE_NEM
|
---|
3498 | /* Tell NEM and get the new NEM state for the pages. */
|
---|
3499 | uint8_t u2NemState = 0;
|
---|
3500 | if (VM_IS_NEM_ENABLED(pVM))
|
---|
3501 | {
|
---|
3502 | int rc = NEMR3NotifyPhysMmioExMapEarly(pVM, pCurMmio->RamRange.GCPhys,
|
---|
3503 | pCurMmio->RamRange.GCPhysLast - pCurMmio->RamRange.GCPhys + 1,
|
---|
3504 | NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2
|
---|
3505 | | (pCurMmio->fFlags & PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES
|
---|
3506 | ? NEM_NOTIFY_PHYS_MMIO_EX_F_TRACK_DIRTY_PAGES : 0),
|
---|
3507 | NULL /*pvRam*/, pCurMmio->RamRange.pvR3,
|
---|
3508 | &u2NemState, &pCurMmio->RamRange.uNemRange);
|
---|
3509 | AssertLogRelRCReturnStmt(rc, PGM_UNLOCK(pVM), rc);
|
---|
3510 | }
|
---|
3511 | #endif
|
---|
3512 |
|
---|
3513 | /* Clear the tracking data of pages we're going to reactivate. */
|
---|
3514 | PPGMPAGE pPageSrc = &pCurMmio->RamRange.aPages[0];
|
---|
3515 | uint32_t cPagesLeft = pCurMmio->RamRange.cb >> GUEST_PAGE_SHIFT;
|
---|
3516 | while (cPagesLeft-- > 0)
|
---|
3517 | {
|
---|
3518 | PGM_PAGE_SET_TRACKING(pVM, pPageSrc, 0);
|
---|
3519 | PGM_PAGE_SET_PTE_INDEX(pVM, pPageSrc, 0);
|
---|
3520 | #ifdef VBOX_WITH_NATIVE_NEM
|
---|
3521 | PGM_PAGE_SET_NEM_STATE(pPageSrc, u2NemState);
|
---|
3522 | #endif
|
---|
3523 | pPageSrc++;
|
---|
3524 | }
|
---|
3525 |
|
---|
3526 | /* link in the ram range */
|
---|
3527 | pgmR3PhysLinkRamRange(pVM, &pCurMmio->RamRange, pRamPrev);
|
---|
3528 |
|
---|
3529 | if (pCurMmio->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK)
|
---|
3530 | {
|
---|
3531 | Assert(pCurMmio->RamRange.GCPhysLast == GCPhysLast);
|
---|
3532 | break;
|
---|
3533 | }
|
---|
3534 | pRamPrev = &pCurMmio->RamRange;
|
---|
3535 | }
|
---|
3536 | }
|
---|
3537 |
|
---|
3538 | /*
|
---|
3539 | * If the range have dirty page monitoring enabled, enable that.
|
---|
3540 | *
|
---|
3541 | * We ignore failures here for now because if we fail, the whole mapping
|
---|
3542 | * will have to be reversed and we'll end up with nothing at all on the
|
---|
3543 | * screen and a grumpy guest, whereas if we just go on, we'll only have
|
---|
3544 | * visual distortions to gripe about. There will be something in the
|
---|
3545 | * release log.
|
---|
3546 | */
|
---|
3547 | if ( pFirstMmio->pPhysHandlerR3
|
---|
3548 | && (pFirstMmio->fFlags & PGMREGMMIO2RANGE_F_TRACKING_ENABLED))
|
---|
3549 | pgmR3PhysMmio2EnableDirtyPageTracing(pVM, pFirstMmio);
|
---|
3550 |
|
---|
3551 | /*
|
---|
3552 | * We're good, set the flags and invalid the mapping TLB.
|
---|
3553 | */
|
---|
3554 | for (PPGMREGMMIO2RANGE pCurMmio = pFirstMmio; ; pCurMmio = pCurMmio->pNextR3)
|
---|
3555 | {
|
---|
3556 | pCurMmio->fFlags |= PGMREGMMIO2RANGE_F_MAPPED;
|
---|
3557 | if (fRamExists)
|
---|
3558 | pCurMmio->fFlags |= PGMREGMMIO2RANGE_F_OVERLAPPING;
|
---|
3559 | else
|
---|
3560 | pCurMmio->fFlags &= ~PGMREGMMIO2RANGE_F_OVERLAPPING;
|
---|
3561 | if (pCurMmio->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK)
|
---|
3562 | break;
|
---|
3563 | }
|
---|
3564 | pgmPhysInvalidatePageMapTLB(pVM);
|
---|
3565 |
|
---|
3566 | #ifdef VBOX_WITH_NATIVE_NEM
|
---|
3567 | /*
|
---|
3568 | * Late NEM notification.
|
---|
3569 | */
|
---|
3570 | if (VM_IS_NEM_ENABLED(pVM))
|
---|
3571 | {
|
---|
3572 | int rc;
|
---|
3573 | uint32_t fNemFlags = NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2;
|
---|
3574 | if (pFirstMmio->fFlags & PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES)
|
---|
3575 | fNemFlags |= NEM_NOTIFY_PHYS_MMIO_EX_F_TRACK_DIRTY_PAGES;
|
---|
3576 | if (fRamExists)
|
---|
3577 | rc = NEMR3NotifyPhysMmioExMapLate(pVM, GCPhys, GCPhysLast - GCPhys + 1, fNemFlags | NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE,
|
---|
3578 | pRam->pvR3 ? (uint8_t *)pRam->pvR3 + GCPhys - pRam->GCPhys : NULL, pFirstMmio->pvR3,
|
---|
3579 | NULL /*puNemRange*/);
|
---|
3580 | else
|
---|
3581 | {
|
---|
3582 | rc = VINF_SUCCESS;
|
---|
3583 | for (PPGMREGMMIO2RANGE pCurMmio = pFirstMmio; ; pCurMmio = pCurMmio->pNextR3)
|
---|
3584 | {
|
---|
3585 | rc = NEMR3NotifyPhysMmioExMapLate(pVM, pCurMmio->RamRange.GCPhys, pCurMmio->RamRange.cb, fNemFlags,
|
---|
3586 | NULL, pCurMmio->RamRange.pvR3, &pCurMmio->RamRange.uNemRange);
|
---|
3587 | if ((pCurMmio->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK) || RT_FAILURE(rc))
|
---|
3588 | break;
|
---|
3589 | }
|
---|
3590 | }
|
---|
3591 | AssertLogRelRCReturnStmt(rc, PGMR3PhysMmio2Unmap(pVM, pDevIns, hMmio2, GCPhys); PGM_UNLOCK(pVM), rc);
|
---|
3592 | }
|
---|
3593 | #endif
|
---|
3594 |
|
---|
3595 | PGM_UNLOCK(pVM);
|
---|
3596 |
|
---|
3597 | return VINF_SUCCESS;
|
---|
3598 | }
|
---|
3599 |
|
---|
3600 |
|
---|
3601 | /**
|
---|
3602 | * Unmaps an MMIO2 region.
|
---|
3603 | *
|
---|
3604 | * This is typically done when a guest / the bios / state loading changes the
|
---|
3605 | * PCI config. The replacing of base memory has the same restrictions as during
|
---|
3606 | * registration, of course.
|
---|
3607 | */
|
---|
3608 | VMMR3_INT_DECL(int) PGMR3PhysMmio2Unmap(PVM pVM, PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2, RTGCPHYS GCPhys)
|
---|
3609 | {
|
---|
3610 | /*
|
---|
3611 | * Validate input
|
---|
3612 | */
|
---|
3613 | VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
|
---|
3614 | AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
|
---|
3615 | AssertReturn(hMmio2 != NIL_PGMMMIO2HANDLE, VERR_INVALID_HANDLE);
|
---|
3616 | if (GCPhys != NIL_RTGCPHYS)
|
---|
3617 | {
|
---|
3618 | AssertReturn(GCPhys != 0, VERR_INVALID_PARAMETER);
|
---|
3619 | AssertReturn(!(GCPhys & GUEST_PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
|
---|
3620 | }
|
---|
3621 |
|
---|
3622 | PPGMREGMMIO2RANGE pFirstMmio = pgmR3PhysMmio2Find(pVM, pDevIns, UINT32_MAX, UINT32_MAX, hMmio2);
|
---|
3623 | AssertReturn(pFirstMmio, VERR_NOT_FOUND);
|
---|
3624 | Assert(pFirstMmio->fFlags & PGMREGMMIO2RANGE_F_FIRST_CHUNK);
|
---|
3625 |
|
---|
3626 | int rc = PGM_LOCK(pVM);
|
---|
3627 | AssertRCReturn(rc, rc);
|
---|
3628 |
|
---|
3629 | PPGMREGMMIO2RANGE pLastMmio = pFirstMmio;
|
---|
3630 | RTGCPHYS cbRange = 0;
|
---|
3631 | for (;;)
|
---|
3632 | {
|
---|
3633 | AssertReturnStmt(pLastMmio->fFlags & PGMREGMMIO2RANGE_F_MAPPED, PGM_UNLOCK(pVM), VERR_WRONG_ORDER);
|
---|
3634 | AssertReturnStmt(pLastMmio->RamRange.GCPhys == GCPhys + cbRange || GCPhys == NIL_RTGCPHYS, PGM_UNLOCK(pVM), VERR_INVALID_PARAMETER);
|
---|
3635 | Assert(pLastMmio->pDevInsR3 == pFirstMmio->pDevInsR3);
|
---|
3636 | Assert(pLastMmio->iSubDev == pFirstMmio->iSubDev);
|
---|
3637 | Assert(pLastMmio->iRegion == pFirstMmio->iRegion);
|
---|
3638 | cbRange += pLastMmio->RamRange.cb;
|
---|
3639 | if (pLastMmio->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK)
|
---|
3640 | break;
|
---|
3641 | pLastMmio = pLastMmio->pNextR3;
|
---|
3642 | }
|
---|
3643 |
|
---|
3644 | Log(("PGMR3PhysMmio2Unmap: %RGp-%RGp %s\n",
|
---|
3645 | pFirstMmio->RamRange.GCPhys, pLastMmio->RamRange.GCPhysLast, pFirstMmio->RamRange.pszDesc));
|
---|
3646 |
|
---|
3647 | uint16_t const fOldFlags = pFirstMmio->fFlags;
|
---|
3648 | AssertReturnStmt(fOldFlags & PGMREGMMIO2RANGE_F_MAPPED, PGM_UNLOCK(pVM), VERR_WRONG_ORDER);
|
---|
3649 |
|
---|
3650 | /*
|
---|
3651 | * If monitoring dirty pages, we must deregister the handlers first.
|
---|
3652 | */
|
---|
3653 | if ( pFirstMmio->pPhysHandlerR3
|
---|
3654 | && (fOldFlags & PGMREGMMIO2RANGE_F_TRACKING_ENABLED))
|
---|
3655 | pgmR3PhysMmio2DisableDirtyPageTracing(pVM, pFirstMmio);
|
---|
3656 |
|
---|
3657 | /*
|
---|
3658 | * Unmap it.
|
---|
3659 | */
|
---|
3660 | int rcRet = VINF_SUCCESS;
|
---|
3661 | #ifdef VBOX_WITH_NATIVE_NEM
|
---|
3662 | uint32_t const fNemFlags = NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2
|
---|
3663 | | (fOldFlags & PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES
|
---|
3664 | ? NEM_NOTIFY_PHYS_MMIO_EX_F_TRACK_DIRTY_PAGES : 0);
|
---|
3665 | #endif
|
---|
3666 | if (fOldFlags & PGMREGMMIO2RANGE_F_OVERLAPPING)
|
---|
3667 | {
|
---|
3668 | /*
|
---|
3669 | * We've replaced RAM, replace with zero pages.
|
---|
3670 | *
|
---|
3671 | * Note! This is where we might differ a little from a real system, because
|
---|
3672 | * it's likely to just show the RAM pages as they were before the
|
---|
3673 | * MMIO/MMIO2 region was mapped here.
|
---|
3674 | */
|
---|
3675 | /* Only one chunk allowed when overlapping! */
|
---|
3676 | Assert(fOldFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK);
|
---|
3677 |
|
---|
3678 | /* Restore the RAM pages we've replaced. */
|
---|
3679 | PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3;
|
---|
3680 | while (pRam->GCPhys > pFirstMmio->RamRange.GCPhysLast)
|
---|
3681 | pRam = pRam->pNextR3;
|
---|
3682 |
|
---|
3683 | PPGMPAGE pPageDst = &pRam->aPages[(pFirstMmio->RamRange.GCPhys - pRam->GCPhys) >> GUEST_PAGE_SHIFT];
|
---|
3684 | uint32_t cPagesLeft = pFirstMmio->RamRange.cb >> GUEST_PAGE_SHIFT;
|
---|
3685 | pVM->pgm.s.cZeroPages += cPagesLeft; /** @todo not correct for NEM mode */
|
---|
3686 |
|
---|
3687 | #ifdef VBOX_WITH_NATIVE_NEM
|
---|
3688 | if (VM_IS_NEM_ENABLED(pVM)) /* Notify NEM. Note! we cannot be here in simple memory mode, see mapping function. */
|
---|
3689 | {
|
---|
3690 | uint8_t u2State = UINT8_MAX;
|
---|
3691 | rc = NEMR3NotifyPhysMmioExUnmap(pVM, pFirstMmio->RamRange.GCPhys, pFirstMmio->RamRange.cb,
|
---|
3692 | fNemFlags | NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE,
|
---|
3693 | pRam->pvR3
|
---|
3694 | ? (uint8_t *)pRam->pvR3 + pFirstMmio->RamRange.GCPhys - pRam->GCPhys : NULL,
|
---|
3695 | pFirstMmio->pvR3, &u2State, &pRam->uNemRange);
|
---|
3696 | AssertRCStmt(rc, rcRet = rc);
|
---|
3697 | if (u2State != UINT8_MAX)
|
---|
3698 | pgmPhysSetNemStateForPages(pPageDst, cPagesLeft, u2State);
|
---|
3699 | }
|
---|
3700 | #endif
|
---|
3701 |
|
---|
3702 | while (cPagesLeft-- > 0)
|
---|
3703 | {
|
---|
3704 | PGM_PAGE_INIT_ZERO(pPageDst, pVM, PGMPAGETYPE_RAM);
|
---|
3705 | pPageDst++;
|
---|
3706 | }
|
---|
3707 |
|
---|
3708 | /* Flush physical page map TLB. */
|
---|
3709 | pgmPhysInvalidatePageMapTLB(pVM);
|
---|
3710 |
|
---|
3711 | /* Update range state. */
|
---|
3712 | pFirstMmio->RamRange.GCPhys = NIL_RTGCPHYS;
|
---|
3713 | pFirstMmio->RamRange.GCPhysLast = NIL_RTGCPHYS;
|
---|
3714 | pFirstMmio->fFlags &= ~(PGMREGMMIO2RANGE_F_OVERLAPPING | PGMREGMMIO2RANGE_F_MAPPED);
|
---|
3715 | }
|
---|
3716 | else
|
---|
3717 | {
|
---|
3718 | /*
|
---|
3719 | * Unlink the chunks related to the MMIO/MMIO2 region.
|
---|
3720 | */
|
---|
3721 | for (PPGMREGMMIO2RANGE pCurMmio = pFirstMmio; ; pCurMmio = pCurMmio->pNextR3)
|
---|
3722 | {
|
---|
3723 | #ifdef VBOX_WITH_NATIVE_NEM
|
---|
3724 | if (VM_IS_NEM_ENABLED(pVM)) /* Notify NEM. */
|
---|
3725 | {
|
---|
3726 | uint8_t u2State = UINT8_MAX;
|
---|
3727 | rc = NEMR3NotifyPhysMmioExUnmap(pVM, pCurMmio->RamRange.GCPhys, pCurMmio->RamRange.cb, fNemFlags,
|
---|
3728 | NULL, pCurMmio->pvR3, &u2State, &pCurMmio->RamRange.uNemRange);
|
---|
3729 | AssertRCStmt(rc, rcRet = rc);
|
---|
3730 | if (u2State != UINT8_MAX)
|
---|
3731 | pgmPhysSetNemStateForPages(pCurMmio->RamRange.aPages, pCurMmio->RamRange.cb >> GUEST_PAGE_SHIFT, u2State);
|
---|
3732 | }
|
---|
3733 | #endif
|
---|
3734 | pgmR3PhysUnlinkRamRange(pVM, &pCurMmio->RamRange);
|
---|
3735 | pCurMmio->RamRange.GCPhys = NIL_RTGCPHYS;
|
---|
3736 | pCurMmio->RamRange.GCPhysLast = NIL_RTGCPHYS;
|
---|
3737 | pCurMmio->fFlags &= ~(PGMREGMMIO2RANGE_F_OVERLAPPING | PGMREGMMIO2RANGE_F_MAPPED);
|
---|
3738 | if (pCurMmio->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK)
|
---|
3739 | break;
|
---|
3740 | }
|
---|
3741 | }
|
---|
3742 |
|
---|
3743 | /* Force a PGM pool flush as guest ram references have been changed. */
|
---|
3744 | /** @todo not entirely SMP safe; assuming for now the guest takes care
|
---|
3745 | * of this internally (not touch mapped mmio while changing the
|
---|
3746 | * mapping). */
|
---|
3747 | PVMCPU pVCpu = VMMGetCpu(pVM);
|
---|
3748 | pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_CLEAR_PGM_POOL;
|
---|
3749 | VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
|
---|
3750 |
|
---|
3751 | pgmPhysInvalidatePageMapTLB(pVM);
|
---|
3752 | pgmPhysInvalidRamRangeTlbs(pVM);
|
---|
3753 |
|
---|
3754 | PGM_UNLOCK(pVM);
|
---|
3755 | return rcRet;
|
---|
3756 | }
|
---|
3757 |
|
---|
3758 |
|
---|
3759 | /**
|
---|
3760 | * Reduces the mapping size of a MMIO2 region.
|
---|
3761 | *
|
---|
3762 | * This is mainly for dealing with old saved states after changing the default
|
---|
3763 | * size of a mapping region. See PGMDevHlpMMIOExReduce and
|
---|
3764 | * PDMPCIDEV::pfnRegionLoadChangeHookR3.
|
---|
3765 | *
|
---|
3766 | * The region must not currently be mapped when making this call. The VM state
|
---|
3767 | * must be state restore or VM construction.
|
---|
3768 | *
|
---|
3769 | * @returns VBox status code.
|
---|
3770 | * @param pVM The cross context VM structure.
|
---|
3771 | * @param pDevIns The device instance owning the region.
|
---|
3772 | * @param hMmio2 The handle of the region to reduce.
|
---|
3773 | * @param cbRegion The new mapping size.
|
---|
3774 | */
|
---|
3775 | VMMR3_INT_DECL(int) PGMR3PhysMmio2Reduce(PVM pVM, PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2, RTGCPHYS cbRegion)
|
---|
3776 | {
|
---|
3777 | /*
|
---|
3778 | * Validate input
|
---|
3779 | */
|
---|
3780 | VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
|
---|
3781 | AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
|
---|
3782 | AssertReturn(hMmio2 != NIL_PGMMMIO2HANDLE, VERR_INVALID_HANDLE);
|
---|
3783 | AssertReturn(cbRegion >= X86_PAGE_SIZE, VERR_INVALID_PARAMETER);
|
---|
3784 | AssertReturn(!(cbRegion & X86_PAGE_OFFSET_MASK), VERR_UNSUPPORTED_ALIGNMENT);
|
---|
3785 | VMSTATE enmVmState = VMR3GetState(pVM);
|
---|
3786 | AssertLogRelMsgReturn( enmVmState == VMSTATE_CREATING
|
---|
3787 | || enmVmState == VMSTATE_LOADING,
|
---|
3788 | ("enmVmState=%d (%s)\n", enmVmState, VMR3GetStateName(enmVmState)),
|
---|
3789 | VERR_VM_INVALID_VM_STATE);
|
---|
3790 |
|
---|
3791 | int rc = PGM_LOCK(pVM);
|
---|
3792 | AssertRCReturn(rc, rc);
|
---|
3793 |
|
---|
3794 | PPGMREGMMIO2RANGE pFirstMmio = pgmR3PhysMmio2Find(pVM, pDevIns, UINT32_MAX, UINT32_MAX, hMmio2);
|
---|
3795 | if (pFirstMmio)
|
---|
3796 | {
|
---|
3797 | Assert(pFirstMmio->fFlags & PGMREGMMIO2RANGE_F_FIRST_CHUNK);
|
---|
3798 | if (!(pFirstMmio->fFlags & PGMREGMMIO2RANGE_F_MAPPED))
|
---|
3799 | {
|
---|
3800 | /*
|
---|
3801 | * NOTE! Current implementation does not support multiple ranges.
|
---|
3802 | * Implement when there is a real world need and thus a testcase.
|
---|
3803 | */
|
---|
3804 | AssertLogRelMsgStmt(pFirstMmio->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK,
|
---|
3805 | ("%s: %#x\n", pFirstMmio->RamRange.pszDesc, pFirstMmio->fFlags),
|
---|
3806 | rc = VERR_NOT_SUPPORTED);
|
---|
3807 | if (RT_SUCCESS(rc))
|
---|
3808 | {
|
---|
3809 | /*
|
---|
3810 | * Make the change.
|
---|
3811 | */
|
---|
3812 | Log(("PGMR3PhysMmio2Reduce: %s changes from %RGp bytes (%RGp) to %RGp bytes.\n",
|
---|
3813 | pFirstMmio->RamRange.pszDesc, pFirstMmio->RamRange.cb, pFirstMmio->cbReal, cbRegion));
|
---|
3814 |
|
---|
3815 | AssertLogRelMsgStmt(cbRegion <= pFirstMmio->cbReal,
|
---|
3816 | ("%s: cbRegion=%#RGp cbReal=%#RGp\n", pFirstMmio->RamRange.pszDesc, cbRegion, pFirstMmio->cbReal),
|
---|
3817 | rc = VERR_OUT_OF_RANGE);
|
---|
3818 | if (RT_SUCCESS(rc))
|
---|
3819 | {
|
---|
3820 | pFirstMmio->RamRange.cb = cbRegion;
|
---|
3821 | }
|
---|
3822 | }
|
---|
3823 | }
|
---|
3824 | else
|
---|
3825 | rc = VERR_WRONG_ORDER;
|
---|
3826 | }
|
---|
3827 | else
|
---|
3828 | rc = VERR_NOT_FOUND;
|
---|
3829 |
|
---|
3830 | PGM_UNLOCK(pVM);
|
---|
3831 | return rc;
|
---|
3832 | }
|
---|
3833 |
|
---|
3834 |
|
---|
3835 | /**
|
---|
3836 | * Validates @a hMmio2, making sure it belongs to @a pDevIns.
|
---|
3837 | *
|
---|
3838 | * @returns VBox status code.
|
---|
3839 | * @param pVM The cross context VM structure.
|
---|
3840 | * @param pDevIns The device which allegedly owns @a hMmio2.
|
---|
3841 | * @param hMmio2 The handle to validate.
|
---|
3842 | */
|
---|
3843 | VMMR3_INT_DECL(int) PGMR3PhysMmio2ValidateHandle(PVM pVM, PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2)
|
---|
3844 | {
|
---|
3845 | /*
|
---|
3846 | * Validate input
|
---|
3847 | */
|
---|
3848 | VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
|
---|
3849 | AssertPtrReturn(pDevIns, VERR_INVALID_POINTER);
|
---|
3850 |
|
---|
3851 | /*
|
---|
3852 | * Just do this the simple way. No need for locking as this is only taken at
|
---|
3853 | */
|
---|
3854 | PGM_LOCK_VOID(pVM);
|
---|
3855 | PPGMREGMMIO2RANGE pFirstMmio = pgmR3PhysMmio2Find(pVM, pDevIns, UINT32_MAX, UINT32_MAX, hMmio2);
|
---|
3856 | PGM_UNLOCK(pVM);
|
---|
3857 | AssertReturn(pFirstMmio, VERR_INVALID_HANDLE);
|
---|
3858 | AssertReturn(pFirstMmio->fFlags & PGMREGMMIO2RANGE_F_FIRST_CHUNK, VERR_INVALID_HANDLE);
|
---|
3859 | return VINF_SUCCESS;
|
---|
3860 | }
|
---|
3861 |
|
---|
3862 |
|
---|
3863 | /**
|
---|
3864 | * Gets the mapping address of an MMIO2 region.
|
---|
3865 | *
|
---|
3866 | * @returns Mapping address, NIL_RTGCPHYS if not mapped or invalid handle.
|
---|
3867 | *
|
---|
3868 | * @param pVM The cross context VM structure.
|
---|
3869 | * @param pDevIns The device owning the MMIO2 handle.
|
---|
3870 | * @param hMmio2 The region handle.
|
---|
3871 | */
|
---|
3872 | VMMR3_INT_DECL(RTGCPHYS) PGMR3PhysMmio2GetMappingAddress(PVM pVM, PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2)
|
---|
3873 | {
|
---|
3874 | AssertPtrReturn(pDevIns, NIL_RTGCPHYS);
|
---|
3875 |
|
---|
3876 | PPGMREGMMIO2RANGE pFirstRegMmio = pgmR3PhysMmio2Find(pVM, pDevIns, UINT32_MAX, UINT32_MAX, hMmio2);
|
---|
3877 | AssertReturn(pFirstRegMmio, NIL_RTGCPHYS);
|
---|
3878 |
|
---|
3879 | if (pFirstRegMmio->fFlags & PGMREGMMIO2RANGE_F_MAPPED)
|
---|
3880 | return pFirstRegMmio->RamRange.GCPhys;
|
---|
3881 | return NIL_RTGCPHYS;
|
---|
3882 | }
|
---|
3883 |
|
---|
3884 |
|
---|
3885 | /**
|
---|
3886 | * Worker for PGMR3PhysMmio2QueryAndResetDirtyBitmap.
|
---|
3887 | *
|
---|
3888 | * Called holding the PGM lock.
|
---|
3889 | */
|
---|
3890 | static int pgmR3PhysMmio2QueryAndResetDirtyBitmapLocked(PVM pVM, PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2,
|
---|
3891 | void *pvBitmap, size_t cbBitmap)
|
---|
3892 | {
|
---|
3893 | /*
|
---|
3894 | * Continue validation.
|
---|
3895 | */
|
---|
3896 | PPGMREGMMIO2RANGE pFirstRegMmio = pgmR3PhysMmio2Find(pVM, pDevIns, UINT32_MAX, UINT32_MAX, hMmio2);
|
---|
3897 | AssertReturn(pFirstRegMmio, VERR_INVALID_HANDLE);
|
---|
3898 | AssertReturn( (pFirstRegMmio->fFlags & (PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES | PGMREGMMIO2RANGE_F_FIRST_CHUNK))
|
---|
3899 | == (PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES | PGMREGMMIO2RANGE_F_FIRST_CHUNK),
|
---|
3900 | VERR_INVALID_FUNCTION);
|
---|
3901 | AssertReturn(pDevIns == pFirstRegMmio->pDevInsR3, VERR_NOT_OWNER);
|
---|
3902 |
|
---|
3903 | RTGCPHYS cbTotal = 0;
|
---|
3904 | uint16_t fTotalDirty = 0;
|
---|
3905 | for (PPGMREGMMIO2RANGE pCur = pFirstRegMmio;;)
|
---|
3906 | {
|
---|
3907 | cbTotal += pCur->RamRange.cb; /* Not using cbReal here, because NEM is not in on the creating, only the mapping. */
|
---|
3908 | fTotalDirty |= pCur->fFlags;
|
---|
3909 | if (pCur->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK)
|
---|
3910 | break;
|
---|
3911 | pCur = pCur->pNextR3;
|
---|
3912 | AssertPtrReturn(pCur, VERR_INTERNAL_ERROR_5);
|
---|
3913 | AssertReturn( (pCur->fFlags & (PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES | PGMREGMMIO2RANGE_F_FIRST_CHUNK))
|
---|
3914 | == PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES,
|
---|
3915 | VERR_INTERNAL_ERROR_4);
|
---|
3916 | }
|
---|
3917 | size_t const cbTotalBitmap = RT_ALIGN_T(cbTotal, GUEST_PAGE_SIZE * 64, RTGCPHYS) / GUEST_PAGE_SIZE / 8;
|
---|
3918 |
|
---|
3919 | if (cbBitmap)
|
---|
3920 | {
|
---|
3921 | AssertPtrReturn(pvBitmap, VERR_INVALID_POINTER);
|
---|
3922 | AssertReturn(RT_ALIGN_P(pvBitmap, sizeof(uint64_t)) == pvBitmap, VERR_INVALID_POINTER);
|
---|
3923 | AssertReturn(cbBitmap == cbTotalBitmap, VERR_INVALID_PARAMETER);
|
---|
3924 | }
|
---|
3925 |
|
---|
3926 | /*
|
---|
3927 | * Do the work.
|
---|
3928 | */
|
---|
3929 | int rc = VINF_SUCCESS;
|
---|
3930 | if (pvBitmap)
|
---|
3931 | {
|
---|
3932 | #ifdef VBOX_WITH_PGM_NEM_MODE
|
---|
3933 | if (pFirstRegMmio->pPhysHandlerR3 == NULL)
|
---|
3934 | {
|
---|
3935 | /** @todo This does not integrate at all with --execute-all-in-iem, leaving the
|
---|
3936 | * screen blank when using it together with --driverless. Fixing this won't be
|
---|
3937 | * entirely easy as we take the PGM_PAGE_HNDL_PHYS_STATE_DISABLED page status to
|
---|
3938 | * mean a dirty page. */
|
---|
3939 | AssertReturn(VM_IS_NEM_ENABLED(pVM), VERR_INTERNAL_ERROR_4);
|
---|
3940 | uint8_t *pbBitmap = (uint8_t *)pvBitmap;
|
---|
3941 | for (PPGMREGMMIO2RANGE pCur = pFirstRegMmio; pCur; pCur = pCur->pNextR3)
|
---|
3942 | {
|
---|
3943 | size_t const cbBitmapChunk = pCur->RamRange.cb / GUEST_PAGE_SIZE / 8;
|
---|
3944 | Assert((RTGCPHYS)cbBitmapChunk * GUEST_PAGE_SIZE * 8 == pCur->RamRange.cb);
|
---|
3945 | int rc2 = NEMR3PhysMmio2QueryAndResetDirtyBitmap(pVM, pCur->RamRange.GCPhys, pCur->RamRange.cb,
|
---|
3946 | pCur->RamRange.uNemRange, pbBitmap, cbBitmapChunk);
|
---|
3947 | if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
|
---|
3948 | rc = rc2;
|
---|
3949 | if (pCur->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK)
|
---|
3950 | break;
|
---|
3951 | pbBitmap += pCur->RamRange.cb / GUEST_PAGE_SIZE / 8;
|
---|
3952 | }
|
---|
3953 | }
|
---|
3954 | else
|
---|
3955 | #endif
|
---|
3956 | if (fTotalDirty & PGMREGMMIO2RANGE_F_IS_DIRTY)
|
---|
3957 | {
|
---|
3958 | if ( (pFirstRegMmio->fFlags & (PGMREGMMIO2RANGE_F_MAPPED | PGMREGMMIO2RANGE_F_TRACKING_ENABLED))
|
---|
3959 | == (PGMREGMMIO2RANGE_F_MAPPED | PGMREGMMIO2RANGE_F_TRACKING_ENABLED))
|
---|
3960 | {
|
---|
3961 | /*
|
---|
3962 | * Reset each chunk, gathering dirty bits.
|
---|
3963 | */
|
---|
3964 | RT_BZERO(pvBitmap, cbBitmap); /* simpler for now. */
|
---|
3965 | uint32_t iPageNo = 0;
|
---|
3966 | for (PPGMREGMMIO2RANGE pCur = pFirstRegMmio; pCur; pCur = pCur->pNextR3)
|
---|
3967 | {
|
---|
3968 | if (pCur->fFlags & PGMREGMMIO2RANGE_F_IS_DIRTY)
|
---|
3969 | {
|
---|
3970 | int rc2 = pgmHandlerPhysicalResetMmio2WithBitmap(pVM, pCur->RamRange.GCPhys, pvBitmap, iPageNo);
|
---|
3971 | if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
|
---|
3972 | rc = rc2;
|
---|
3973 | pCur->fFlags &= ~PGMREGMMIO2RANGE_F_IS_DIRTY;
|
---|
3974 | }
|
---|
3975 | if (pCur->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK)
|
---|
3976 | break;
|
---|
3977 | iPageNo += pCur->RamRange.cb >> GUEST_PAGE_SHIFT;
|
---|
3978 | }
|
---|
3979 | }
|
---|
3980 | else
|
---|
3981 | {
|
---|
3982 | /*
|
---|
3983 | * If not mapped or tracking is disabled, we return the
|
---|
3984 | * PGMREGMMIO2RANGE_F_IS_DIRTY status for all pages. We cannot
|
---|
3985 | * get more accurate data than that after unmapping or disabling.
|
---|
3986 | */
|
---|
3987 | RT_BZERO(pvBitmap, cbBitmap);
|
---|
3988 | uint32_t iPageNo = 0;
|
---|
3989 | for (PPGMREGMMIO2RANGE pCur = pFirstRegMmio; pCur; pCur = pCur->pNextR3)
|
---|
3990 | {
|
---|
3991 | if (pCur->fFlags & PGMREGMMIO2RANGE_F_IS_DIRTY)
|
---|
3992 | {
|
---|
3993 | ASMBitSetRange(pvBitmap, iPageNo, iPageNo + (pCur->RamRange.cb >> GUEST_PAGE_SHIFT));
|
---|
3994 | pCur->fFlags &= ~PGMREGMMIO2RANGE_F_IS_DIRTY;
|
---|
3995 | }
|
---|
3996 | if (pCur->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK)
|
---|
3997 | break;
|
---|
3998 | iPageNo += pCur->RamRange.cb >> GUEST_PAGE_SHIFT;
|
---|
3999 | }
|
---|
4000 | }
|
---|
4001 | }
|
---|
4002 | /*
|
---|
4003 | * No dirty chunks.
|
---|
4004 | */
|
---|
4005 | else
|
---|
4006 | RT_BZERO(pvBitmap, cbBitmap);
|
---|
4007 | }
|
---|
4008 | /*
|
---|
4009 | * No bitmap. Reset the region if tracking is currently enabled.
|
---|
4010 | */
|
---|
4011 | else if ( (pFirstRegMmio->fFlags & (PGMREGMMIO2RANGE_F_MAPPED | PGMREGMMIO2RANGE_F_TRACKING_ENABLED))
|
---|
4012 | == (PGMREGMMIO2RANGE_F_MAPPED | PGMREGMMIO2RANGE_F_TRACKING_ENABLED))
|
---|
4013 | {
|
---|
4014 | #ifdef VBOX_WITH_PGM_NEM_MODE
|
---|
4015 | if (pFirstRegMmio->pPhysHandlerR3 == NULL)
|
---|
4016 | {
|
---|
4017 | AssertReturn(VM_IS_NEM_ENABLED(pVM), VERR_INTERNAL_ERROR_4);
|
---|
4018 | for (PPGMREGMMIO2RANGE pCur = pFirstRegMmio; pCur; pCur = pCur->pNextR3)
|
---|
4019 | {
|
---|
4020 | int rc2 = NEMR3PhysMmio2QueryAndResetDirtyBitmap(pVM, pCur->RamRange.GCPhys, pCur->RamRange.cb,
|
---|
4021 | pCur->RamRange.uNemRange, NULL, 0);
|
---|
4022 | if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
|
---|
4023 | rc = rc2;
|
---|
4024 | if (pCur->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK)
|
---|
4025 | break;
|
---|
4026 | }
|
---|
4027 | }
|
---|
4028 | else
|
---|
4029 | #endif
|
---|
4030 | {
|
---|
4031 | for (PPGMREGMMIO2RANGE pCur = pFirstRegMmio; pCur; pCur = pCur->pNextR3)
|
---|
4032 | {
|
---|
4033 | pCur->fFlags &= ~PGMREGMMIO2RANGE_F_IS_DIRTY;
|
---|
4034 | int rc2 = PGMHandlerPhysicalReset(pVM, pCur->RamRange.GCPhys);
|
---|
4035 | if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
|
---|
4036 | rc = rc2;
|
---|
4037 | if (pCur->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK)
|
---|
4038 | break;
|
---|
4039 | }
|
---|
4040 | }
|
---|
4041 | }
|
---|
4042 |
|
---|
4043 | return rc;
|
---|
4044 | }
|
---|
4045 |
|
---|
4046 |
|
---|
4047 | /**
|
---|
4048 | * Queries the dirty page bitmap and resets the monitoring.
|
---|
4049 | *
|
---|
4050 | * The PGMPHYS_MMIO2_FLAGS_TRACK_DIRTY_PAGES flag must be specified when
|
---|
4051 | * creating the range for this to work.
|
---|
4052 | *
|
---|
4053 | * @returns VBox status code.
|
---|
4054 | * @retval VERR_INVALID_FUNCTION if not created using
|
---|
4055 | * PGMPHYS_MMIO2_FLAGS_TRACK_DIRTY_PAGES.
|
---|
4056 | * @param pVM The cross context VM structure.
|
---|
4057 | * @param pDevIns The device owning the MMIO2 handle.
|
---|
4058 | * @param hMmio2 The region handle.
|
---|
4059 | * @param pvBitmap The output bitmap. Must be 8-byte aligned. Ignored
|
---|
4060 | * when @a cbBitmap is zero.
|
---|
4061 | * @param cbBitmap The size of the bitmap. Must be the size of the whole
|
---|
4062 | * MMIO2 range, rounded up to the nearest 8 bytes.
|
---|
4063 | * When zero only a reset is done.
|
---|
4064 | */
|
---|
4065 | VMMR3_INT_DECL(int) PGMR3PhysMmio2QueryAndResetDirtyBitmap(PVM pVM, PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2,
|
---|
4066 | void *pvBitmap, size_t cbBitmap)
|
---|
4067 | {
|
---|
4068 | /*
|
---|
4069 | * Do some basic validation before grapping the PGM lock and continuing.
|
---|
4070 | */
|
---|
4071 | AssertPtrReturn(pDevIns, VERR_INVALID_POINTER);
|
---|
4072 | AssertReturn(RT_ALIGN_Z(cbBitmap, sizeof(uint64_t)) == cbBitmap, VERR_INVALID_PARAMETER);
|
---|
4073 | int rc = PGM_LOCK(pVM);
|
---|
4074 | if (RT_SUCCESS(rc))
|
---|
4075 | {
|
---|
4076 | STAM_PROFILE_START(&pVM->pgm.s.StatMmio2QueryAndResetDirtyBitmap, a);
|
---|
4077 | rc = pgmR3PhysMmio2QueryAndResetDirtyBitmapLocked(pVM, pDevIns, hMmio2, pvBitmap, cbBitmap);
|
---|
4078 | STAM_PROFILE_STOP(&pVM->pgm.s.StatMmio2QueryAndResetDirtyBitmap, a);
|
---|
4079 | PGM_UNLOCK(pVM);
|
---|
4080 | }
|
---|
4081 | return rc;
|
---|
4082 | }
|
---|
4083 |
|
---|
4084 |
|
---|
4085 | /**
|
---|
4086 | * Worker for PGMR3PhysMmio2ControlDirtyPageTracking
|
---|
4087 | *
|
---|
4088 | * Called owning the PGM lock.
|
---|
4089 | */
|
---|
4090 | static int pgmR3PhysMmio2ControlDirtyPageTrackingLocked(PVM pVM, PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2, bool fEnabled)
|
---|
4091 | {
|
---|
4092 | /*
|
---|
4093 | * Continue validation.
|
---|
4094 | */
|
---|
4095 | PPGMREGMMIO2RANGE pFirstRegMmio = pgmR3PhysMmio2Find(pVM, pDevIns, UINT32_MAX, UINT32_MAX, hMmio2);
|
---|
4096 | AssertReturn(pFirstRegMmio, VERR_INVALID_HANDLE);
|
---|
4097 | AssertReturn( (pFirstRegMmio->fFlags & (PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES | PGMREGMMIO2RANGE_F_FIRST_CHUNK))
|
---|
4098 | == (PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES | PGMREGMMIO2RANGE_F_FIRST_CHUNK)
|
---|
4099 | , VERR_INVALID_FUNCTION);
|
---|
4100 | AssertReturn(pDevIns == pFirstRegMmio->pDevInsR3, VERR_NOT_OWNER);
|
---|
4101 |
|
---|
4102 | #ifdef VBOX_WITH_PGM_NEM_MODE
|
---|
4103 | /*
|
---|
4104 | * This is a nop if NEM is responsible for doing the tracking, we simply
|
---|
4105 | * leave the tracking on all the time there.
|
---|
4106 | */
|
---|
4107 | if (pFirstRegMmio->pPhysHandlerR3 == NULL)
|
---|
4108 | {
|
---|
4109 | AssertReturn(VM_IS_NEM_ENABLED(pVM), VERR_INTERNAL_ERROR_4);
|
---|
4110 | return VINF_SUCCESS;
|
---|
4111 | }
|
---|
4112 | #endif
|
---|
4113 |
|
---|
4114 | /*
|
---|
4115 | * Anyting needing doing?
|
---|
4116 | */
|
---|
4117 | if (fEnabled != RT_BOOL(pFirstRegMmio->fFlags & PGMREGMMIO2RANGE_F_TRACKING_ENABLED))
|
---|
4118 | {
|
---|
4119 | LogFlowFunc(("fEnabled=%RTbool %s\n", fEnabled, pFirstRegMmio->RamRange.pszDesc));
|
---|
4120 |
|
---|
4121 | /*
|
---|
4122 | * Update the PGMREGMMIO2RANGE_F_TRACKING_ENABLED flag.
|
---|
4123 | */
|
---|
4124 | for (PPGMREGMMIO2RANGE pCur = pFirstRegMmio;;)
|
---|
4125 | {
|
---|
4126 | if (fEnabled)
|
---|
4127 | pCur->fFlags |= PGMREGMMIO2RANGE_F_TRACKING_ENABLED;
|
---|
4128 | else
|
---|
4129 | pCur->fFlags &= ~PGMREGMMIO2RANGE_F_TRACKING_ENABLED;
|
---|
4130 | if (pCur->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK)
|
---|
4131 | break;
|
---|
4132 | pCur = pCur->pNextR3;
|
---|
4133 | AssertPtrReturn(pCur, VERR_INTERNAL_ERROR_5);
|
---|
4134 | AssertReturn( (pCur->fFlags & (PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES | PGMREGMMIO2RANGE_F_FIRST_CHUNK))
|
---|
4135 | == PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES
|
---|
4136 | , VERR_INTERNAL_ERROR_4);
|
---|
4137 | }
|
---|
4138 |
|
---|
4139 | /*
|
---|
4140 | * Enable/disable handlers if currently mapped.
|
---|
4141 | *
|
---|
4142 | * We ignore status codes here as we've already changed the flags and
|
---|
4143 | * returning a failure status now would be confusing. Besides, the two
|
---|
4144 | * functions will continue past failures. As argued in the mapping code,
|
---|
4145 | * it's in the release log.
|
---|
4146 | */
|
---|
4147 | if (pFirstRegMmio->fFlags & PGMREGMMIO2RANGE_F_MAPPED)
|
---|
4148 | {
|
---|
4149 | if (fEnabled)
|
---|
4150 | pgmR3PhysMmio2EnableDirtyPageTracing(pVM, pFirstRegMmio);
|
---|
4151 | else
|
---|
4152 | pgmR3PhysMmio2DisableDirtyPageTracing(pVM, pFirstRegMmio);
|
---|
4153 | }
|
---|
4154 | }
|
---|
4155 | else
|
---|
4156 | LogFlowFunc(("fEnabled=%RTbool %s - no change\n", fEnabled, pFirstRegMmio->RamRange.pszDesc));
|
---|
4157 |
|
---|
4158 | return VINF_SUCCESS;
|
---|
4159 | }
|
---|
4160 |
|
---|
4161 |
|
---|
4162 | /**
|
---|
4163 | * Controls the dirty page tracking for an MMIO2 range.
|
---|
4164 | *
|
---|
4165 | * @returns VBox status code.
|
---|
4166 | * @param pVM The cross context VM structure.
|
---|
4167 | * @param pDevIns The device owning the MMIO2 memory.
|
---|
4168 | * @param hMmio2 The handle of the region.
|
---|
4169 | * @param fEnabled The new tracking state.
|
---|
4170 | */
|
---|
4171 | VMMR3_INT_DECL(int) PGMR3PhysMmio2ControlDirtyPageTracking(PVM pVM, PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2, bool fEnabled)
|
---|
4172 | {
|
---|
4173 | /*
|
---|
4174 | * Do some basic validation before grapping the PGM lock and continuing.
|
---|
4175 | */
|
---|
4176 | AssertPtrReturn(pDevIns, VERR_INVALID_POINTER);
|
---|
4177 | int rc = PGM_LOCK(pVM);
|
---|
4178 | if (RT_SUCCESS(rc))
|
---|
4179 | {
|
---|
4180 | rc = pgmR3PhysMmio2ControlDirtyPageTrackingLocked(pVM, pDevIns, hMmio2, fEnabled);
|
---|
4181 | PGM_UNLOCK(pVM);
|
---|
4182 | }
|
---|
4183 | return rc;
|
---|
4184 | }
|
---|
4185 |
|
---|
4186 |
|
---|
4187 | /**
|
---|
4188 | * Changes the region number of an MMIO2 region.
|
---|
4189 | *
|
---|
4190 | * This is only for dealing with save state issues, nothing else.
|
---|
4191 | *
|
---|
4192 | * @return VBox status code.
|
---|
4193 | *
|
---|
4194 | * @param pVM The cross context VM structure.
|
---|
4195 | * @param pDevIns The device owning the MMIO2 memory.
|
---|
4196 | * @param hMmio2 The handle of the region.
|
---|
4197 | * @param iNewRegion The new region index.
|
---|
4198 | *
|
---|
4199 | * @thread EMT(0)
|
---|
4200 | * @sa @bugref{9359}
|
---|
4201 | */
|
---|
4202 | VMMR3_INT_DECL(int) PGMR3PhysMmio2ChangeRegionNo(PVM pVM, PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2, uint32_t iNewRegion)
|
---|
4203 | {
|
---|
4204 | /*
|
---|
4205 | * Validate input.
|
---|
4206 | */
|
---|
4207 | VM_ASSERT_EMT0_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
|
---|
4208 | VM_ASSERT_STATE_RETURN(pVM, VMSTATE_LOADING, VERR_VM_INVALID_VM_STATE);
|
---|
4209 | AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
|
---|
4210 | AssertReturn(hMmio2 != NIL_PGMMMIO2HANDLE, VERR_INVALID_HANDLE);
|
---|
4211 | AssertReturn(iNewRegion <= UINT8_MAX, VERR_INVALID_PARAMETER);
|
---|
4212 |
|
---|
4213 | AssertReturn(pVM->enmVMState == VMSTATE_LOADING, VERR_INVALID_STATE);
|
---|
4214 |
|
---|
4215 | int rc = PGM_LOCK(pVM);
|
---|
4216 | AssertRCReturn(rc, rc);
|
---|
4217 |
|
---|
4218 | PPGMREGMMIO2RANGE pFirstRegMmio = pgmR3PhysMmio2Find(pVM, pDevIns, UINT32_MAX, UINT32_MAX, hMmio2);
|
---|
4219 | AssertReturnStmt(pFirstRegMmio, PGM_UNLOCK(pVM), VERR_NOT_FOUND);
|
---|
4220 | AssertReturnStmt(pgmR3PhysMmio2Find(pVM, pDevIns, pFirstRegMmio->iSubDev, iNewRegion, NIL_PGMMMIO2HANDLE) == NULL,
|
---|
4221 | PGM_UNLOCK(pVM), VERR_RESOURCE_IN_USE);
|
---|
4222 |
|
---|
4223 | /*
|
---|
4224 | * Make the change.
|
---|
4225 | */
|
---|
4226 | pFirstRegMmio->iRegion = (uint8_t)iNewRegion;
|
---|
4227 |
|
---|
4228 | PGM_UNLOCK(pVM);
|
---|
4229 | return VINF_SUCCESS;
|
---|
4230 | }
|
---|
4231 |
|
---|
4232 |
|
---|
4233 |
|
---|
4234 | /*********************************************************************************************************************************
|
---|
4235 | * ROM *
|
---|
4236 | *********************************************************************************************************************************/
|
---|
4237 |
|
---|
4238 | /**
|
---|
4239 | * Worker for PGMR3PhysRomRegister.
|
---|
4240 | *
|
---|
4241 | * This is here to simplify lock management, i.e. the caller does all the
|
---|
4242 | * locking and we can simply return without needing to remember to unlock
|
---|
4243 | * anything first.
|
---|
4244 | *
|
---|
4245 | * @returns VBox status code.
|
---|
4246 | * @param pVM The cross context VM structure.
|
---|
4247 | * @param pDevIns The device instance owning the ROM.
|
---|
4248 | * @param GCPhys First physical address in the range.
|
---|
4249 | * Must be page aligned!
|
---|
4250 | * @param cb The size of the range (in bytes).
|
---|
4251 | * Must be page aligned!
|
---|
4252 | * @param pvBinary Pointer to the binary data backing the ROM image.
|
---|
4253 | * @param cbBinary The size of the binary data pvBinary points to.
|
---|
4254 | * This must be less or equal to @a cb.
|
---|
4255 | * @param fFlags Mask of flags. PGMPHYS_ROM_FLAGS_SHADOWED
|
---|
4256 | * and/or PGMPHYS_ROM_FLAGS_PERMANENT_BINARY.
|
---|
4257 | * @param pszDesc Pointer to description string. This must not be freed.
|
---|
4258 | */
|
---|
4259 | static int pgmR3PhysRomRegisterLocked(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GCPhys, RTGCPHYS cb,
|
---|
4260 | const void *pvBinary, uint32_t cbBinary, uint8_t fFlags, const char *pszDesc)
|
---|
4261 | {
|
---|
4262 | /*
|
---|
4263 | * Validate input.
|
---|
4264 | */
|
---|
4265 | AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
|
---|
4266 | AssertReturn(RT_ALIGN_T(GCPhys, GUEST_PAGE_SIZE, RTGCPHYS) == GCPhys, VERR_INVALID_PARAMETER);
|
---|
4267 | AssertReturn(RT_ALIGN_T(cb, GUEST_PAGE_SIZE, RTGCPHYS) == cb, VERR_INVALID_PARAMETER);
|
---|
4268 | RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
|
---|
4269 | AssertReturn(GCPhysLast > GCPhys, VERR_INVALID_PARAMETER);
|
---|
4270 | AssertPtrReturn(pvBinary, VERR_INVALID_PARAMETER);
|
---|
4271 | AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
|
---|
4272 | AssertReturn(!(fFlags & ~PGMPHYS_ROM_FLAGS_VALID_MASK), VERR_INVALID_PARAMETER);
|
---|
4273 | VM_ASSERT_STATE_RETURN(pVM, VMSTATE_CREATING, VERR_VM_INVALID_VM_STATE);
|
---|
4274 |
|
---|
4275 | const uint32_t cGuestPages = cb >> GUEST_PAGE_SHIFT;
|
---|
4276 | #ifdef VBOX_WITH_PGM_NEM_MODE
|
---|
4277 | const uint32_t cHostPages = RT_ALIGN_T(cb, HOST_PAGE_SIZE, RTGCPHYS) >> HOST_PAGE_SHIFT;
|
---|
4278 | #endif
|
---|
4279 |
|
---|
4280 | /*
|
---|
4281 | * Find the ROM location in the ROM list first.
|
---|
4282 | */
|
---|
4283 | PPGMROMRANGE pRomPrev = NULL;
|
---|
4284 | PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3;
|
---|
4285 | while (pRom && GCPhysLast >= pRom->GCPhys)
|
---|
4286 | {
|
---|
4287 | if ( GCPhys <= pRom->GCPhysLast
|
---|
4288 | && GCPhysLast >= pRom->GCPhys)
|
---|
4289 | AssertLogRelMsgFailedReturn(("%RGp-%RGp (%s) conflicts with existing %RGp-%RGp (%s)\n",
|
---|
4290 | GCPhys, GCPhysLast, pszDesc,
|
---|
4291 | pRom->GCPhys, pRom->GCPhysLast, pRom->pszDesc),
|
---|
4292 | VERR_PGM_RAM_CONFLICT);
|
---|
4293 | /* next */
|
---|
4294 | pRomPrev = pRom;
|
---|
4295 | pRom = pRom->pNextR3;
|
---|
4296 | }
|
---|
4297 |
|
---|
4298 | /*
|
---|
4299 | * Find the RAM location and check for conflicts.
|
---|
4300 | *
|
---|
4301 | * Conflict detection is a bit different than for RAM registration since a
|
---|
4302 | * ROM can be located within a RAM range. So, what we have to check for is
|
---|
4303 | * other memory types (other than RAM that is) and that we don't span more
|
---|
4304 | * than one RAM range (lazy).
|
---|
4305 | */
|
---|
4306 | bool fRamExists = false;
|
---|
4307 | PPGMRAMRANGE pRamPrev = NULL;
|
---|
4308 | PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3;
|
---|
4309 | while (pRam && GCPhysLast >= pRam->GCPhys)
|
---|
4310 | {
|
---|
4311 | if ( GCPhys <= pRam->GCPhysLast
|
---|
4312 | && GCPhysLast >= pRam->GCPhys)
|
---|
4313 | {
|
---|
4314 | /* completely within? */
|
---|
4315 | AssertLogRelMsgReturn( GCPhys >= pRam->GCPhys
|
---|
4316 | && GCPhysLast <= pRam->GCPhysLast,
|
---|
4317 | ("%RGp-%RGp (%s) falls partly outside %RGp-%RGp (%s)\n",
|
---|
4318 | GCPhys, GCPhysLast, pszDesc,
|
---|
4319 | pRam->GCPhys, pRam->GCPhysLast, pRam->pszDesc),
|
---|
4320 | VERR_PGM_RAM_CONFLICT);
|
---|
4321 | fRamExists = true;
|
---|
4322 | break;
|
---|
4323 | }
|
---|
4324 |
|
---|
4325 | /* next */
|
---|
4326 | pRamPrev = pRam;
|
---|
4327 | pRam = pRam->pNextR3;
|
---|
4328 | }
|
---|
4329 | if (fRamExists)
|
---|
4330 | {
|
---|
4331 | PPGMPAGE pPage = &pRam->aPages[(GCPhys - pRam->GCPhys) >> GUEST_PAGE_SHIFT];
|
---|
4332 | uint32_t cPagesLeft = cGuestPages;
|
---|
4333 | while (cPagesLeft-- > 0)
|
---|
4334 | {
|
---|
4335 | AssertLogRelMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM,
|
---|
4336 | ("%RGp (%R[pgmpage]) isn't a RAM page - registering %RGp-%RGp (%s).\n",
|
---|
4337 | pRam->GCPhys + ((RTGCPHYS)(uintptr_t)(pPage - &pRam->aPages[0]) << GUEST_PAGE_SHIFT),
|
---|
4338 | pPage, GCPhys, GCPhysLast, pszDesc), VERR_PGM_RAM_CONFLICT);
|
---|
4339 | Assert(PGM_PAGE_IS_ZERO(pPage) || PGM_IS_IN_NEM_MODE(pVM));
|
---|
4340 | pPage++;
|
---|
4341 | }
|
---|
4342 | }
|
---|
4343 |
|
---|
4344 | /*
|
---|
4345 | * Update the base memory reservation if necessary.
|
---|
4346 | */
|
---|
4347 | uint32_t cExtraBaseCost = fRamExists ? 0 : cGuestPages;
|
---|
4348 | if (fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)
|
---|
4349 | cExtraBaseCost += cGuestPages;
|
---|
4350 | if (cExtraBaseCost)
|
---|
4351 | {
|
---|
4352 | int rc = MMR3IncreaseBaseReservation(pVM, cExtraBaseCost);
|
---|
4353 | if (RT_FAILURE(rc))
|
---|
4354 | return rc;
|
---|
4355 | }
|
---|
4356 |
|
---|
4357 | #ifdef VBOX_WITH_NATIVE_NEM
|
---|
4358 | /*
|
---|
4359 | * Early NEM notification before we've made any changes or anything.
|
---|
4360 | */
|
---|
4361 | uint32_t const fNemNotify = (fRamExists ? NEM_NOTIFY_PHYS_ROM_F_REPLACE : 0)
|
---|
4362 | | (fFlags & PGMPHYS_ROM_FLAGS_SHADOWED ? NEM_NOTIFY_PHYS_ROM_F_SHADOW : 0);
|
---|
4363 | uint8_t u2NemState = UINT8_MAX;
|
---|
4364 | uint32_t uNemRange = 0;
|
---|
4365 | if (VM_IS_NEM_ENABLED(pVM))
|
---|
4366 | {
|
---|
4367 | int rc = NEMR3NotifyPhysRomRegisterEarly(pVM, GCPhys, cGuestPages << GUEST_PAGE_SHIFT,
|
---|
4368 | fRamExists ? PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhys) : NULL,
|
---|
4369 | fNemNotify, &u2NemState, fRamExists ? &pRam->uNemRange : &uNemRange);
|
---|
4370 | AssertLogRelRCReturn(rc, rc);
|
---|
4371 | }
|
---|
4372 | #endif
|
---|
4373 |
|
---|
4374 | /*
|
---|
4375 | * Allocate memory for the virgin copy of the RAM. In simplified memory mode,
|
---|
4376 | * we allocate memory for any ad-hoc RAM range and for shadow pages.
|
---|
4377 | */
|
---|
4378 | PGMMALLOCATEPAGESREQ pReq = NULL;
|
---|
4379 | #ifdef VBOX_WITH_PGM_NEM_MODE
|
---|
4380 | void *pvRam = NULL;
|
---|
4381 | void *pvAlt = NULL;
|
---|
4382 | if (pVM->pgm.s.fNemMode)
|
---|
4383 | {
|
---|
4384 | if (!fRamExists)
|
---|
4385 | {
|
---|
4386 | int rc = SUPR3PageAlloc(cHostPages, 0, &pvRam);
|
---|
4387 | if (RT_FAILURE(rc))
|
---|
4388 | return rc;
|
---|
4389 | }
|
---|
4390 | if (fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)
|
---|
4391 | {
|
---|
4392 | int rc = SUPR3PageAlloc(cHostPages, 0, &pvAlt);
|
---|
4393 | if (RT_FAILURE(rc))
|
---|
4394 | {
|
---|
4395 | if (pvRam)
|
---|
4396 | SUPR3PageFree(pvRam, cHostPages);
|
---|
4397 | return rc;
|
---|
4398 | }
|
---|
4399 | }
|
---|
4400 | }
|
---|
4401 | else
|
---|
4402 | #endif
|
---|
4403 | {
|
---|
4404 | int rc = GMMR3AllocatePagesPrepare(pVM, &pReq, cGuestPages, GMMACCOUNT_BASE);
|
---|
4405 | AssertRCReturn(rc, rc);
|
---|
4406 |
|
---|
4407 | for (uint32_t iPage = 0; iPage < cGuestPages; iPage++)
|
---|
4408 | {
|
---|
4409 | pReq->aPages[iPage].HCPhysGCPhys = GCPhys + (iPage << GUEST_PAGE_SHIFT);
|
---|
4410 | pReq->aPages[iPage].fZeroed = false;
|
---|
4411 | pReq->aPages[iPage].idPage = NIL_GMM_PAGEID;
|
---|
4412 | pReq->aPages[iPage].idSharedPage = NIL_GMM_PAGEID;
|
---|
4413 | }
|
---|
4414 |
|
---|
4415 | rc = GMMR3AllocatePagesPerform(pVM, pReq);
|
---|
4416 | if (RT_FAILURE(rc))
|
---|
4417 | {
|
---|
4418 | GMMR3AllocatePagesCleanup(pReq);
|
---|
4419 | return rc;
|
---|
4420 | }
|
---|
4421 | }
|
---|
4422 |
|
---|
4423 | /*
|
---|
4424 | * Allocate the new ROM range and RAM range (if necessary).
|
---|
4425 | */
|
---|
4426 | PPGMROMRANGE pRomNew = NULL;
|
---|
4427 | RTR0PTR pRomNewR0 = NIL_RTR0PTR;
|
---|
4428 | size_t const cbRomRange = RT_ALIGN_Z(RT_UOFFSETOF_DYN(PGMROMRANGE, aPages[cGuestPages]), 128);
|
---|
4429 | size_t const cbRamRange = fRamExists ? 0 : RT_UOFFSETOF_DYN(PGMROMRANGE, aPages[cGuestPages]);
|
---|
4430 | size_t const cRangePages = RT_ALIGN_Z(cbRomRange + cbRamRange, HOST_PAGE_SIZE) >> HOST_PAGE_SHIFT;
|
---|
4431 | int rc = SUPR3PageAllocEx(cRangePages, 0 /*fFlags*/, (void **)&pRomNew, &pRomNewR0, NULL /*paPages*/);
|
---|
4432 | if (RT_SUCCESS(rc))
|
---|
4433 | {
|
---|
4434 |
|
---|
4435 | /*
|
---|
4436 | * Initialize and insert the RAM range (if required).
|
---|
4437 | */
|
---|
4438 | PPGMRAMRANGE pRamNew;
|
---|
4439 | uint32_t const idxFirstRamPage = fRamExists ? (GCPhys - pRam->GCPhys) >> GUEST_PAGE_SHIFT : 0;
|
---|
4440 | PPGMROMPAGE pRomPage = &pRomNew->aPages[0];
|
---|
4441 | if (!fRamExists)
|
---|
4442 | {
|
---|
4443 | /* New RAM range. */
|
---|
4444 | pRamNew = (PPGMRAMRANGE)((uintptr_t)pRomNew + cbRomRange);
|
---|
4445 | pRamNew->pSelfR0 = !pRomNewR0 ? NIL_RTR0PTR : pRomNewR0 + cbRomRange;
|
---|
4446 | pRamNew->GCPhys = GCPhys;
|
---|
4447 | pRamNew->GCPhysLast = GCPhysLast;
|
---|
4448 | pRamNew->cb = cb;
|
---|
4449 | pRamNew->pszDesc = pszDesc;
|
---|
4450 | pRamNew->fFlags = PGM_RAM_RANGE_FLAGS_AD_HOC_ROM;
|
---|
4451 | pRamNew->pvR3 = NULL;
|
---|
4452 | pRamNew->paLSPages = NULL;
|
---|
4453 | #ifdef VBOX_WITH_NATIVE_NEM
|
---|
4454 | pRamNew->uNemRange = uNemRange;
|
---|
4455 | #endif
|
---|
4456 |
|
---|
4457 | PPGMPAGE pRamPage = &pRamNew->aPages[idxFirstRamPage];
|
---|
4458 | #ifdef VBOX_WITH_PGM_NEM_MODE
|
---|
4459 | if (pVM->pgm.s.fNemMode)
|
---|
4460 | {
|
---|
4461 | AssertPtr(pvRam); Assert(pReq == NULL);
|
---|
4462 | pRamNew->pvR3 = pvRam;
|
---|
4463 | for (uint32_t iPage = 0; iPage < cGuestPages; iPage++, pRamPage++, pRomPage++)
|
---|
4464 | {
|
---|
4465 | PGM_PAGE_INIT(pRamPage, UINT64_C(0x0000fffffffff000), NIL_GMM_PAGEID,
|
---|
4466 | PGMPAGETYPE_ROM, PGM_PAGE_STATE_ALLOCATED);
|
---|
4467 | pRomPage->Virgin = *pRamPage;
|
---|
4468 | }
|
---|
4469 | }
|
---|
4470 | else
|
---|
4471 | #endif
|
---|
4472 | for (uint32_t iPage = 0; iPage < cGuestPages; iPage++, pRamPage++, pRomPage++)
|
---|
4473 | {
|
---|
4474 | PGM_PAGE_INIT(pRamPage,
|
---|
4475 | pReq->aPages[iPage].HCPhysGCPhys,
|
---|
4476 | pReq->aPages[iPage].idPage,
|
---|
4477 | PGMPAGETYPE_ROM,
|
---|
4478 | PGM_PAGE_STATE_ALLOCATED);
|
---|
4479 |
|
---|
4480 | pRomPage->Virgin = *pRamPage;
|
---|
4481 | }
|
---|
4482 |
|
---|
4483 | pVM->pgm.s.cAllPages += cGuestPages;
|
---|
4484 | pVM->pgm.s.cPrivatePages += cGuestPages;
|
---|
4485 | pgmR3PhysLinkRamRange(pVM, pRamNew, pRamPrev);
|
---|
4486 | }
|
---|
4487 | else
|
---|
4488 | {
|
---|
4489 | /* Existing RAM range. */
|
---|
4490 | PPGMPAGE pRamPage = &pRam->aPages[idxFirstRamPage];
|
---|
4491 | #ifdef VBOX_WITH_PGM_NEM_MODE
|
---|
4492 | if (pVM->pgm.s.fNemMode)
|
---|
4493 | {
|
---|
4494 | Assert(pvRam == NULL); Assert(pReq == NULL);
|
---|
4495 | for (uint32_t iPage = 0; iPage < cGuestPages; iPage++, pRamPage++, pRomPage++)
|
---|
4496 | {
|
---|
4497 | Assert(PGM_PAGE_GET_HCPHYS(pRamPage) == UINT64_C(0x0000fffffffff000));
|
---|
4498 | Assert(PGM_PAGE_GET_PAGEID(pRamPage) == NIL_GMM_PAGEID);
|
---|
4499 | Assert(PGM_PAGE_GET_STATE(pRamPage) == PGM_PAGE_STATE_ALLOCATED);
|
---|
4500 | PGM_PAGE_SET_TYPE(pVM, pRamPage, PGMPAGETYPE_ROM);
|
---|
4501 | PGM_PAGE_SET_STATE(pVM, pRamPage, PGM_PAGE_STATE_ALLOCATED);
|
---|
4502 | PGM_PAGE_SET_PDE_TYPE(pVM, pRamPage, PGM_PAGE_PDE_TYPE_DONTCARE);
|
---|
4503 | PGM_PAGE_SET_PTE_INDEX(pVM, pRamPage, 0);
|
---|
4504 | PGM_PAGE_SET_TRACKING(pVM, pRamPage, 0);
|
---|
4505 |
|
---|
4506 | pRomPage->Virgin = *pRamPage;
|
---|
4507 | }
|
---|
4508 | }
|
---|
4509 | else
|
---|
4510 | #endif
|
---|
4511 | {
|
---|
4512 | for (uint32_t iPage = 0; iPage < cGuestPages; iPage++, pRamPage++, pRomPage++)
|
---|
4513 | {
|
---|
4514 | PGM_PAGE_SET_TYPE(pVM, pRamPage, PGMPAGETYPE_ROM);
|
---|
4515 | PGM_PAGE_SET_HCPHYS(pVM, pRamPage, pReq->aPages[iPage].HCPhysGCPhys);
|
---|
4516 | PGM_PAGE_SET_STATE(pVM, pRamPage, PGM_PAGE_STATE_ALLOCATED);
|
---|
4517 | PGM_PAGE_SET_PAGEID(pVM, pRamPage, pReq->aPages[iPage].idPage);
|
---|
4518 | PGM_PAGE_SET_PDE_TYPE(pVM, pRamPage, PGM_PAGE_PDE_TYPE_DONTCARE);
|
---|
4519 | PGM_PAGE_SET_PTE_INDEX(pVM, pRamPage, 0);
|
---|
4520 | PGM_PAGE_SET_TRACKING(pVM, pRamPage, 0);
|
---|
4521 |
|
---|
4522 | pRomPage->Virgin = *pRamPage;
|
---|
4523 | }
|
---|
4524 | pVM->pgm.s.cZeroPages -= cGuestPages;
|
---|
4525 | pVM->pgm.s.cPrivatePages += cGuestPages;
|
---|
4526 | }
|
---|
4527 | pRamNew = pRam;
|
---|
4528 | }
|
---|
4529 |
|
---|
4530 | #ifdef VBOX_WITH_NATIVE_NEM
|
---|
4531 | /* Set the NEM state of the pages if needed. */
|
---|
4532 | if (u2NemState != UINT8_MAX)
|
---|
4533 | pgmPhysSetNemStateForPages(&pRamNew->aPages[idxFirstRamPage], cGuestPages, u2NemState);
|
---|
4534 | #endif
|
---|
4535 |
|
---|
4536 | /* Flush physical page map TLB. */
|
---|
4537 | pgmPhysInvalidatePageMapTLB(pVM);
|
---|
4538 |
|
---|
4539 | /*
|
---|
4540 | * Register the ROM access handler.
|
---|
4541 | */
|
---|
4542 | rc = PGMHandlerPhysicalRegister(pVM, GCPhys, GCPhysLast, pVM->pgm.s.hRomPhysHandlerType, GCPhys, pszDesc);
|
---|
4543 | if (RT_SUCCESS(rc))
|
---|
4544 | {
|
---|
4545 | /*
|
---|
4546 | * Copy the image over to the virgin pages.
|
---|
4547 | * This must be done after linking in the RAM range.
|
---|
4548 | */
|
---|
4549 | size_t cbBinaryLeft = cbBinary;
|
---|
4550 | PPGMPAGE pRamPage = &pRamNew->aPages[idxFirstRamPage];
|
---|
4551 | for (uint32_t iPage = 0; iPage < cGuestPages; iPage++, pRamPage++)
|
---|
4552 | {
|
---|
4553 | void *pvDstPage;
|
---|
4554 | rc = pgmPhysPageMap(pVM, pRamPage, GCPhys + (iPage << GUEST_PAGE_SHIFT), &pvDstPage);
|
---|
4555 | if (RT_FAILURE(rc))
|
---|
4556 | {
|
---|
4557 | VMSetError(pVM, rc, RT_SRC_POS, "Failed to map virgin ROM page at %RGp", GCPhys);
|
---|
4558 | break;
|
---|
4559 | }
|
---|
4560 | if (cbBinaryLeft >= GUEST_PAGE_SIZE)
|
---|
4561 | {
|
---|
4562 | memcpy(pvDstPage, (uint8_t const *)pvBinary + ((size_t)iPage << GUEST_PAGE_SHIFT), GUEST_PAGE_SIZE);
|
---|
4563 | cbBinaryLeft -= GUEST_PAGE_SIZE;
|
---|
4564 | }
|
---|
4565 | else
|
---|
4566 | {
|
---|
4567 | RT_BZERO(pvDstPage, GUEST_PAGE_SIZE); /* (shouldn't be necessary, but can't hurt either) */
|
---|
4568 | if (cbBinaryLeft > 0)
|
---|
4569 | {
|
---|
4570 | memcpy(pvDstPage, (uint8_t const *)pvBinary + ((size_t)iPage << GUEST_PAGE_SHIFT), cbBinaryLeft);
|
---|
4571 | cbBinaryLeft = 0;
|
---|
4572 | }
|
---|
4573 | }
|
---|
4574 | }
|
---|
4575 | if (RT_SUCCESS(rc))
|
---|
4576 | {
|
---|
4577 | /*
|
---|
4578 | * Initialize the ROM range.
|
---|
4579 | * Note that the Virgin member of the pages has already been initialized above.
|
---|
4580 | */
|
---|
4581 | pRomNew->pSelfR0 = pRomNewR0;
|
---|
4582 | pRomNew->GCPhys = GCPhys;
|
---|
4583 | pRomNew->GCPhysLast = GCPhysLast;
|
---|
4584 | pRomNew->cb = cb;
|
---|
4585 | pRomNew->fFlags = fFlags;
|
---|
4586 | pRomNew->idSavedState = UINT8_MAX;
|
---|
4587 | pRomNew->cbOriginal = cbBinary;
|
---|
4588 | pRomNew->pszDesc = pszDesc;
|
---|
4589 | #ifdef VBOX_WITH_PGM_NEM_MODE
|
---|
4590 | pRomNew->pbR3Alternate = (uint8_t *)pvAlt;
|
---|
4591 | #endif
|
---|
4592 | pRomNew->pvOriginal = fFlags & PGMPHYS_ROM_FLAGS_PERMANENT_BINARY
|
---|
4593 | ? pvBinary : RTMemDup(pvBinary, cbBinary);
|
---|
4594 | if (pRomNew->pvOriginal)
|
---|
4595 | {
|
---|
4596 | for (unsigned iPage = 0; iPage < cGuestPages; iPage++)
|
---|
4597 | {
|
---|
4598 | PPGMROMPAGE pPage = &pRomNew->aPages[iPage];
|
---|
4599 | pPage->enmProt = PGMROMPROT_READ_ROM_WRITE_IGNORE;
|
---|
4600 | #ifdef VBOX_WITH_PGM_NEM_MODE
|
---|
4601 | if (pVM->pgm.s.fNemMode)
|
---|
4602 | PGM_PAGE_INIT(&pPage->Shadow, UINT64_C(0x0000fffffffff000), NIL_GMM_PAGEID,
|
---|
4603 | PGMPAGETYPE_ROM_SHADOW, PGM_PAGE_STATE_ALLOCATED);
|
---|
4604 | else
|
---|
4605 | #endif
|
---|
4606 | PGM_PAGE_INIT_ZERO(&pPage->Shadow, pVM, PGMPAGETYPE_ROM_SHADOW);
|
---|
4607 | }
|
---|
4608 |
|
---|
4609 | /* update the page count stats for the shadow pages. */
|
---|
4610 | if (fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)
|
---|
4611 | {
|
---|
4612 | #ifdef VBOX_WITH_PGM_NEM_MODE
|
---|
4613 | if (pVM->pgm.s.fNemMode)
|
---|
4614 | pVM->pgm.s.cPrivatePages += cGuestPages;
|
---|
4615 | else
|
---|
4616 | #endif
|
---|
4617 | pVM->pgm.s.cZeroPages += cGuestPages;
|
---|
4618 | pVM->pgm.s.cAllPages += cGuestPages;
|
---|
4619 | }
|
---|
4620 |
|
---|
4621 | /*
|
---|
4622 | * Insert the ROM range, tell REM and return successfully.
|
---|
4623 | */
|
---|
4624 | pRomNew->pNextR3 = pRom;
|
---|
4625 | pRomNew->pNextR0 = pRom ? pRom->pSelfR0 : NIL_RTR0PTR;
|
---|
4626 |
|
---|
4627 | if (pRomPrev)
|
---|
4628 | {
|
---|
4629 | pRomPrev->pNextR3 = pRomNew;
|
---|
4630 | pRomPrev->pNextR0 = pRomNew->pSelfR0;
|
---|
4631 | }
|
---|
4632 | else
|
---|
4633 | {
|
---|
4634 | pVM->pgm.s.pRomRangesR3 = pRomNew;
|
---|
4635 | pVM->pgm.s.pRomRangesR0 = pRomNew->pSelfR0;
|
---|
4636 | }
|
---|
4637 |
|
---|
4638 | pgmPhysInvalidatePageMapTLB(pVM);
|
---|
4639 | #ifdef VBOX_WITH_PGM_NEM_MODE
|
---|
4640 | if (!pVM->pgm.s.fNemMode)
|
---|
4641 | #endif
|
---|
4642 | GMMR3AllocatePagesCleanup(pReq);
|
---|
4643 |
|
---|
4644 | #ifdef VBOX_WITH_NATIVE_NEM
|
---|
4645 | /*
|
---|
4646 | * Notify NEM again.
|
---|
4647 | */
|
---|
4648 | if (VM_IS_NEM_ENABLED(pVM))
|
---|
4649 | {
|
---|
4650 | u2NemState = UINT8_MAX;
|
---|
4651 | rc = NEMR3NotifyPhysRomRegisterLate(pVM, GCPhys, cb, PGM_RAMRANGE_CALC_PAGE_R3PTR(pRamNew, GCPhys),
|
---|
4652 | fNemNotify, &u2NemState,
|
---|
4653 | fRamExists ? &pRam->uNemRange : &pRamNew->uNemRange);
|
---|
4654 | if (u2NemState != UINT8_MAX)
|
---|
4655 | pgmPhysSetNemStateForPages(&pRamNew->aPages[idxFirstRamPage], cGuestPages, u2NemState);
|
---|
4656 | if (RT_SUCCESS(rc))
|
---|
4657 | return rc;
|
---|
4658 | }
|
---|
4659 | else
|
---|
4660 | #endif
|
---|
4661 | return rc;
|
---|
4662 |
|
---|
4663 | /*
|
---|
4664 | * bail out
|
---|
4665 | */
|
---|
4666 | #ifdef VBOX_WITH_NATIVE_NEM
|
---|
4667 | /* unlink */
|
---|
4668 | if (pRomPrev)
|
---|
4669 | {
|
---|
4670 | pRomPrev->pNextR3 = pRom;
|
---|
4671 | pRomPrev->pNextR0 = pRom ? pRom->pSelfR0 : NIL_RTR0PTR;
|
---|
4672 | }
|
---|
4673 | else
|
---|
4674 | {
|
---|
4675 | pVM->pgm.s.pRomRangesR3 = pRom;
|
---|
4676 | pVM->pgm.s.pRomRangesR0 = pRom ? pRom->pSelfR0 : NIL_RTR0PTR;
|
---|
4677 | }
|
---|
4678 |
|
---|
4679 | if (fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)
|
---|
4680 | {
|
---|
4681 | # ifdef VBOX_WITH_PGM_NEM_MODE
|
---|
4682 | if (pVM->pgm.s.fNemMode)
|
---|
4683 | pVM->pgm.s.cPrivatePages -= cGuestPages;
|
---|
4684 | else
|
---|
4685 | # endif
|
---|
4686 | pVM->pgm.s.cZeroPages -= cGuestPages;
|
---|
4687 | pVM->pgm.s.cAllPages -= cGuestPages;
|
---|
4688 | }
|
---|
4689 | #endif
|
---|
4690 | }
|
---|
4691 | else
|
---|
4692 | rc = VERR_NO_MEMORY;
|
---|
4693 | }
|
---|
4694 |
|
---|
4695 | int rc2 = PGMHandlerPhysicalDeregister(pVM, GCPhys);
|
---|
4696 | AssertRC(rc2);
|
---|
4697 | }
|
---|
4698 |
|
---|
4699 | if (!fRamExists)
|
---|
4700 | pgmR3PhysUnlinkRamRange2(pVM, pRamNew, pRamPrev);
|
---|
4701 | else
|
---|
4702 | {
|
---|
4703 | PPGMPAGE pRamPage = &pRam->aPages[idxFirstRamPage];
|
---|
4704 | #ifdef VBOX_WITH_PGM_NEM_MODE
|
---|
4705 | if (pVM->pgm.s.fNemMode)
|
---|
4706 | {
|
---|
4707 | Assert(pvRam == NULL); Assert(pReq == NULL);
|
---|
4708 | for (uint32_t iPage = 0; iPage < cGuestPages; iPage++, pRamPage++, pRomPage++)
|
---|
4709 | {
|
---|
4710 | Assert(PGM_PAGE_GET_HCPHYS(pRamPage) == UINT64_C(0x0000fffffffff000));
|
---|
4711 | Assert(PGM_PAGE_GET_PAGEID(pRamPage) == NIL_GMM_PAGEID);
|
---|
4712 | Assert(PGM_PAGE_GET_STATE(pRamPage) == PGM_PAGE_STATE_ALLOCATED);
|
---|
4713 | PGM_PAGE_SET_TYPE(pVM, pRamPage, PGMPAGETYPE_RAM);
|
---|
4714 | PGM_PAGE_SET_STATE(pVM, pRamPage, PGM_PAGE_STATE_ALLOCATED);
|
---|
4715 | }
|
---|
4716 | }
|
---|
4717 | else
|
---|
4718 | #endif
|
---|
4719 | {
|
---|
4720 | for (uint32_t iPage = 0; iPage < cGuestPages; iPage++, pRamPage++)
|
---|
4721 | PGM_PAGE_INIT_ZERO(pRamPage, pVM, PGMPAGETYPE_RAM);
|
---|
4722 | pVM->pgm.s.cZeroPages += cGuestPages;
|
---|
4723 | pVM->pgm.s.cPrivatePages -= cGuestPages;
|
---|
4724 | }
|
---|
4725 | }
|
---|
4726 |
|
---|
4727 | SUPR3PageFreeEx(pRomNew, cRangePages);
|
---|
4728 | }
|
---|
4729 |
|
---|
4730 | /** @todo Purge the mapping cache or something... */
|
---|
4731 | #ifdef VBOX_WITH_PGM_NEM_MODE
|
---|
4732 | if (pVM->pgm.s.fNemMode)
|
---|
4733 | {
|
---|
4734 | Assert(!pReq);
|
---|
4735 | if (pvRam)
|
---|
4736 | SUPR3PageFree(pvRam, cHostPages);
|
---|
4737 | if (pvAlt)
|
---|
4738 | SUPR3PageFree(pvAlt, cHostPages);
|
---|
4739 | }
|
---|
4740 | else
|
---|
4741 | #endif
|
---|
4742 | {
|
---|
4743 | GMMR3FreeAllocatedPages(pVM, pReq);
|
---|
4744 | GMMR3AllocatePagesCleanup(pReq);
|
---|
4745 | }
|
---|
4746 | return rc;
|
---|
4747 | }
|
---|
4748 |
|
---|
4749 |
|
---|
4750 | /**
|
---|
4751 | * Registers a ROM image.
|
---|
4752 | *
|
---|
4753 | * Shadowed ROM images requires double the amount of backing memory, so,
|
---|
4754 | * don't use that unless you have to. Shadowing of ROM images is process
|
---|
4755 | * where we can select where the reads go and where the writes go. On real
|
---|
4756 | * hardware the chipset provides means to configure this. We provide
|
---|
4757 | * PGMR3PhysProtectROM() for this purpose.
|
---|
4758 | *
|
---|
4759 | * A read-only copy of the ROM image will always be kept around while we
|
---|
4760 | * will allocate RAM pages for the changes on demand (unless all memory
|
---|
4761 | * is configured to be preallocated).
|
---|
4762 | *
|
---|
4763 | * @returns VBox status code.
|
---|
4764 | * @param pVM The cross context VM structure.
|
---|
4765 | * @param pDevIns The device instance owning the ROM.
|
---|
4766 | * @param GCPhys First physical address in the range.
|
---|
4767 | * Must be page aligned!
|
---|
4768 | * @param cb The size of the range (in bytes).
|
---|
4769 | * Must be page aligned!
|
---|
4770 | * @param pvBinary Pointer to the binary data backing the ROM image.
|
---|
4771 | * @param cbBinary The size of the binary data pvBinary points to.
|
---|
4772 | * This must be less or equal to @a cb.
|
---|
4773 | * @param fFlags Mask of flags, PGMPHYS_ROM_FLAGS_XXX.
|
---|
4774 | * @param pszDesc Pointer to description string. This must not be freed.
|
---|
4775 | *
|
---|
4776 | * @remark There is no way to remove the rom, automatically on device cleanup or
|
---|
4777 | * manually from the device yet. This isn't difficult in any way, it's
|
---|
4778 | * just not something we expect to be necessary for a while.
|
---|
4779 | */
|
---|
4780 | VMMR3DECL(int) PGMR3PhysRomRegister(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GCPhys, RTGCPHYS cb,
|
---|
4781 | const void *pvBinary, uint32_t cbBinary, uint8_t fFlags, const char *pszDesc)
|
---|
4782 | {
|
---|
4783 | Log(("PGMR3PhysRomRegister: pDevIns=%p GCPhys=%RGp(-%RGp) cb=%RGp pvBinary=%p cbBinary=%#x fFlags=%#x pszDesc=%s\n",
|
---|
4784 | pDevIns, GCPhys, GCPhys + cb, cb, pvBinary, cbBinary, fFlags, pszDesc));
|
---|
4785 | PGM_LOCK_VOID(pVM);
|
---|
4786 | int rc = pgmR3PhysRomRegisterLocked(pVM, pDevIns, GCPhys, cb, pvBinary, cbBinary, fFlags, pszDesc);
|
---|
4787 | PGM_UNLOCK(pVM);
|
---|
4788 | return rc;
|
---|
4789 | }
|
---|
4790 |
|
---|
4791 |
|
---|
4792 | /**
|
---|
4793 | * Called by PGMR3MemSetup to reset the shadow, switch to the virgin, and verify
|
---|
4794 | * that the virgin part is untouched.
|
---|
4795 | *
|
---|
4796 | * This is done after the normal memory has been cleared.
|
---|
4797 | *
|
---|
4798 | * ASSUMES that the caller owns the PGM lock.
|
---|
4799 | *
|
---|
4800 | * @param pVM The cross context VM structure.
|
---|
4801 | */
|
---|
4802 | int pgmR3PhysRomReset(PVM pVM)
|
---|
4803 | {
|
---|
4804 | PGM_LOCK_ASSERT_OWNER(pVM);
|
---|
4805 | for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
|
---|
4806 | {
|
---|
4807 | const uint32_t cGuestPages = pRom->cb >> GUEST_PAGE_SHIFT;
|
---|
4808 |
|
---|
4809 | if (pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)
|
---|
4810 | {
|
---|
4811 | /*
|
---|
4812 | * Reset the physical handler.
|
---|
4813 | */
|
---|
4814 | int rc = PGMR3PhysRomProtect(pVM, pRom->GCPhys, pRom->cb, PGMROMPROT_READ_ROM_WRITE_IGNORE);
|
---|
4815 | AssertRCReturn(rc, rc);
|
---|
4816 |
|
---|
4817 | /*
|
---|
4818 | * What we do with the shadow pages depends on the memory
|
---|
4819 | * preallocation option. If not enabled, we'll just throw
|
---|
4820 | * out all the dirty pages and replace them by the zero page.
|
---|
4821 | */
|
---|
4822 | #ifdef VBOX_WITH_PGM_NEM_MODE
|
---|
4823 | if (pVM->pgm.s.fNemMode)
|
---|
4824 | {
|
---|
4825 | /* Clear all the shadow pages (currently using alternate backing). */
|
---|
4826 | RT_BZERO(pRom->pbR3Alternate, pRom->cb);
|
---|
4827 | }
|
---|
4828 | else
|
---|
4829 | #endif
|
---|
4830 | if (!pVM->pgm.s.fRamPreAlloc)
|
---|
4831 | {
|
---|
4832 | /* Free the dirty pages. */
|
---|
4833 | uint32_t cPendingPages = 0;
|
---|
4834 | PGMMFREEPAGESREQ pReq;
|
---|
4835 | rc = GMMR3FreePagesPrepare(pVM, &pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE, GMMACCOUNT_BASE);
|
---|
4836 | AssertRCReturn(rc, rc);
|
---|
4837 |
|
---|
4838 | for (uint32_t iPage = 0; iPage < cGuestPages; iPage++)
|
---|
4839 | if ( !PGM_PAGE_IS_ZERO(&pRom->aPages[iPage].Shadow)
|
---|
4840 | && !PGM_PAGE_IS_BALLOONED(&pRom->aPages[iPage].Shadow))
|
---|
4841 | {
|
---|
4842 | Assert(PGM_PAGE_GET_STATE(&pRom->aPages[iPage].Shadow) == PGM_PAGE_STATE_ALLOCATED);
|
---|
4843 | rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, &pRom->aPages[iPage].Shadow,
|
---|
4844 | pRom->GCPhys + (iPage << GUEST_PAGE_SHIFT),
|
---|
4845 | (PGMPAGETYPE)PGM_PAGE_GET_TYPE(&pRom->aPages[iPage].Shadow));
|
---|
4846 | AssertLogRelRCReturn(rc, rc);
|
---|
4847 | }
|
---|
4848 |
|
---|
4849 | if (cPendingPages)
|
---|
4850 | {
|
---|
4851 | rc = GMMR3FreePagesPerform(pVM, pReq, cPendingPages);
|
---|
4852 | AssertLogRelRCReturn(rc, rc);
|
---|
4853 | }
|
---|
4854 | GMMR3FreePagesCleanup(pReq);
|
---|
4855 | }
|
---|
4856 | else
|
---|
4857 | {
|
---|
4858 | /* clear all the shadow pages. */
|
---|
4859 | for (uint32_t iPage = 0; iPage < cGuestPages; iPage++)
|
---|
4860 | {
|
---|
4861 | if (PGM_PAGE_IS_ZERO(&pRom->aPages[iPage].Shadow))
|
---|
4862 | continue;
|
---|
4863 | Assert(!PGM_PAGE_IS_BALLOONED(&pRom->aPages[iPage].Shadow));
|
---|
4864 | void *pvDstPage;
|
---|
4865 | const RTGCPHYS GCPhys = pRom->GCPhys + (iPage << GUEST_PAGE_SHIFT);
|
---|
4866 | rc = pgmPhysPageMakeWritableAndMap(pVM, &pRom->aPages[iPage].Shadow, GCPhys, &pvDstPage);
|
---|
4867 | if (RT_FAILURE(rc))
|
---|
4868 | break;
|
---|
4869 | RT_BZERO(pvDstPage, GUEST_PAGE_SIZE);
|
---|
4870 | }
|
---|
4871 | AssertRCReturn(rc, rc);
|
---|
4872 | }
|
---|
4873 | }
|
---|
4874 |
|
---|
4875 | /*
|
---|
4876 | * Restore the original ROM pages after a saved state load.
|
---|
4877 | * Also, in strict builds check that ROM pages remain unmodified.
|
---|
4878 | */
|
---|
4879 | #ifndef VBOX_STRICT
|
---|
4880 | if (pVM->pgm.s.fRestoreRomPagesOnReset)
|
---|
4881 | #endif
|
---|
4882 | {
|
---|
4883 | size_t cbSrcLeft = pRom->cbOriginal;
|
---|
4884 | uint8_t const *pbSrcPage = (uint8_t const *)pRom->pvOriginal;
|
---|
4885 | uint32_t cRestored = 0;
|
---|
4886 | for (uint32_t iPage = 0; iPage < cGuestPages && cbSrcLeft > 0; iPage++, pbSrcPage += GUEST_PAGE_SIZE)
|
---|
4887 | {
|
---|
4888 | const RTGCPHYS GCPhys = pRom->GCPhys + (iPage << GUEST_PAGE_SHIFT);
|
---|
4889 | PPGMPAGE const pPage = pgmPhysGetPage(pVM, GCPhys);
|
---|
4890 | void const *pvDstPage = NULL;
|
---|
4891 | int rc = pgmPhysPageMapReadOnly(pVM, pPage, GCPhys, &pvDstPage);
|
---|
4892 | if (RT_FAILURE(rc))
|
---|
4893 | break;
|
---|
4894 |
|
---|
4895 | if (memcmp(pvDstPage, pbSrcPage, RT_MIN(cbSrcLeft, GUEST_PAGE_SIZE)))
|
---|
4896 | {
|
---|
4897 | if (pVM->pgm.s.fRestoreRomPagesOnReset)
|
---|
4898 | {
|
---|
4899 | void *pvDstPageW = NULL;
|
---|
4900 | rc = pgmPhysPageMap(pVM, pPage, GCPhys, &pvDstPageW);
|
---|
4901 | AssertLogRelRCReturn(rc, rc);
|
---|
4902 | memcpy(pvDstPageW, pbSrcPage, RT_MIN(cbSrcLeft, GUEST_PAGE_SIZE));
|
---|
4903 | cRestored++;
|
---|
4904 | }
|
---|
4905 | else
|
---|
4906 | LogRel(("pgmR3PhysRomReset: %RGp: ROM page changed (%s)\n", GCPhys, pRom->pszDesc));
|
---|
4907 | }
|
---|
4908 | cbSrcLeft -= RT_MIN(cbSrcLeft, GUEST_PAGE_SIZE);
|
---|
4909 | }
|
---|
4910 | if (cRestored > 0)
|
---|
4911 | LogRel(("PGM: ROM \"%s\": Reloaded %u of %u pages.\n", pRom->pszDesc, cRestored, cGuestPages));
|
---|
4912 | }
|
---|
4913 | }
|
---|
4914 |
|
---|
4915 | /* Clear the ROM restore flag now as we only need to do this once after
|
---|
4916 | loading saved state. */
|
---|
4917 | pVM->pgm.s.fRestoreRomPagesOnReset = false;
|
---|
4918 |
|
---|
4919 | return VINF_SUCCESS;
|
---|
4920 | }
|
---|
4921 |
|
---|
4922 |
|
---|
4923 | /**
|
---|
4924 | * Called by PGMR3Term to free resources.
|
---|
4925 | *
|
---|
4926 | * ASSUMES that the caller owns the PGM lock.
|
---|
4927 | *
|
---|
4928 | * @param pVM The cross context VM structure.
|
---|
4929 | */
|
---|
4930 | void pgmR3PhysRomTerm(PVM pVM)
|
---|
4931 | {
|
---|
4932 | /*
|
---|
4933 | * Free the heap copy of the original bits.
|
---|
4934 | */
|
---|
4935 | for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
|
---|
4936 | {
|
---|
4937 | if ( pRom->pvOriginal
|
---|
4938 | && !(pRom->fFlags & PGMPHYS_ROM_FLAGS_PERMANENT_BINARY))
|
---|
4939 | {
|
---|
4940 | RTMemFree((void *)pRom->pvOriginal);
|
---|
4941 | pRom->pvOriginal = NULL;
|
---|
4942 | }
|
---|
4943 | }
|
---|
4944 | }
|
---|
4945 |
|
---|
4946 |
|
---|
4947 | /**
|
---|
4948 | * Change the shadowing of a range of ROM pages.
|
---|
4949 | *
|
---|
4950 | * This is intended for implementing chipset specific memory registers
|
---|
4951 | * and will not be very strict about the input. It will silently ignore
|
---|
4952 | * any pages that are not the part of a shadowed ROM.
|
---|
4953 | *
|
---|
4954 | * @returns VBox status code.
|
---|
4955 | * @retval VINF_PGM_SYNC_CR3
|
---|
4956 | *
|
---|
4957 | * @param pVM The cross context VM structure.
|
---|
4958 | * @param GCPhys Where to start. Page aligned.
|
---|
4959 | * @param cb How much to change. Page aligned.
|
---|
4960 | * @param enmProt The new ROM protection.
|
---|
4961 | */
|
---|
4962 | VMMR3DECL(int) PGMR3PhysRomProtect(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, PGMROMPROT enmProt)
|
---|
4963 | {
|
---|
4964 | /*
|
---|
4965 | * Check input
|
---|
4966 | */
|
---|
4967 | if (!cb)
|
---|
4968 | return VINF_SUCCESS;
|
---|
4969 | AssertReturn(!(GCPhys & GUEST_PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
|
---|
4970 | AssertReturn(!(cb & GUEST_PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
|
---|
4971 | RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
|
---|
4972 | AssertReturn(GCPhysLast > GCPhys, VERR_INVALID_PARAMETER);
|
---|
4973 | AssertReturn(enmProt >= PGMROMPROT_INVALID && enmProt <= PGMROMPROT_END, VERR_INVALID_PARAMETER);
|
---|
4974 |
|
---|
4975 | /*
|
---|
4976 | * Process the request.
|
---|
4977 | */
|
---|
4978 | PGM_LOCK_VOID(pVM);
|
---|
4979 | int rc = VINF_SUCCESS;
|
---|
4980 | bool fFlushTLB = false;
|
---|
4981 | for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
|
---|
4982 | {
|
---|
4983 | if ( GCPhys <= pRom->GCPhysLast
|
---|
4984 | && GCPhysLast >= pRom->GCPhys
|
---|
4985 | && (pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED))
|
---|
4986 | {
|
---|
4987 | /*
|
---|
4988 | * Iterate the relevant pages and make necessary the changes.
|
---|
4989 | */
|
---|
4990 | #ifdef VBOX_WITH_NATIVE_NEM
|
---|
4991 | PPGMRAMRANGE const pRam = pgmPhysGetRange(pVM, GCPhys);
|
---|
4992 | AssertPtrReturn(pRam, VERR_INTERNAL_ERROR_3);
|
---|
4993 | #endif
|
---|
4994 | bool fChanges = false;
|
---|
4995 | uint32_t const cPages = pRom->GCPhysLast <= GCPhysLast
|
---|
4996 | ? pRom->cb >> GUEST_PAGE_SHIFT
|
---|
4997 | : (GCPhysLast - pRom->GCPhys + 1) >> GUEST_PAGE_SHIFT;
|
---|
4998 | for (uint32_t iPage = (GCPhys - pRom->GCPhys) >> GUEST_PAGE_SHIFT;
|
---|
4999 | iPage < cPages;
|
---|
5000 | iPage++)
|
---|
5001 | {
|
---|
5002 | PPGMROMPAGE pRomPage = &pRom->aPages[iPage];
|
---|
5003 | if (PGMROMPROT_IS_ROM(pRomPage->enmProt) != PGMROMPROT_IS_ROM(enmProt))
|
---|
5004 | {
|
---|
5005 | fChanges = true;
|
---|
5006 |
|
---|
5007 | /* flush references to the page. */
|
---|
5008 | RTGCPHYS const GCPhysPage = pRom->GCPhys + (iPage << GUEST_PAGE_SHIFT);
|
---|
5009 | PPGMPAGE pRamPage = pgmPhysGetPage(pVM, GCPhysPage);
|
---|
5010 | int rc2 = pgmPoolTrackUpdateGCPhys(pVM, GCPhysPage, pRamPage, true /*fFlushPTEs*/, &fFlushTLB);
|
---|
5011 | if (rc2 != VINF_SUCCESS && (rc == VINF_SUCCESS || RT_FAILURE(rc2)))
|
---|
5012 | rc = rc2;
|
---|
5013 | #ifdef VBOX_WITH_NATIVE_NEM
|
---|
5014 | uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pRamPage);
|
---|
5015 | #endif
|
---|
5016 |
|
---|
5017 | PPGMPAGE pOld = PGMROMPROT_IS_ROM(pRomPage->enmProt) ? &pRomPage->Virgin : &pRomPage->Shadow;
|
---|
5018 | PPGMPAGE pNew = PGMROMPROT_IS_ROM(pRomPage->enmProt) ? &pRomPage->Shadow : &pRomPage->Virgin;
|
---|
5019 |
|
---|
5020 | *pOld = *pRamPage;
|
---|
5021 | *pRamPage = *pNew;
|
---|
5022 | /** @todo preserve the volatile flags (handlers) when these have been moved out of HCPhys! */
|
---|
5023 |
|
---|
5024 | #ifdef VBOX_WITH_NATIVE_NEM
|
---|
5025 | # ifdef VBOX_WITH_PGM_NEM_MODE
|
---|
5026 | /* In simplified mode we have to switch the page data around too. */
|
---|
5027 | if (pVM->pgm.s.fNemMode)
|
---|
5028 | {
|
---|
5029 | uint8_t abPage[GUEST_PAGE_SIZE];
|
---|
5030 | uint8_t * const pbRamPage = PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhysPage);
|
---|
5031 | memcpy(abPage, &pRom->pbR3Alternate[(size_t)iPage << GUEST_PAGE_SHIFT], sizeof(abPage));
|
---|
5032 | memcpy(&pRom->pbR3Alternate[(size_t)iPage << GUEST_PAGE_SHIFT], pbRamPage, sizeof(abPage));
|
---|
5033 | memcpy(pbRamPage, abPage, sizeof(abPage));
|
---|
5034 | }
|
---|
5035 | # endif
|
---|
5036 | /* Tell NEM about the backing and protection change. */
|
---|
5037 | if (VM_IS_NEM_ENABLED(pVM))
|
---|
5038 | {
|
---|
5039 | PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pNew);
|
---|
5040 | NEMHCNotifyPhysPageChanged(pVM, GCPhys, PGM_PAGE_GET_HCPHYS(pOld), PGM_PAGE_GET_HCPHYS(pNew),
|
---|
5041 | PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhysPage),
|
---|
5042 | pgmPhysPageCalcNemProtection(pRamPage, enmType), enmType, &u2State);
|
---|
5043 | PGM_PAGE_SET_NEM_STATE(pRamPage, u2State);
|
---|
5044 | }
|
---|
5045 | #endif
|
---|
5046 | }
|
---|
5047 | pRomPage->enmProt = enmProt;
|
---|
5048 | }
|
---|
5049 |
|
---|
5050 | /*
|
---|
5051 | * Reset the access handler if we made changes, no need
|
---|
5052 | * to optimize this.
|
---|
5053 | */
|
---|
5054 | if (fChanges)
|
---|
5055 | {
|
---|
5056 | int rc2 = PGMHandlerPhysicalReset(pVM, pRom->GCPhys);
|
---|
5057 | if (RT_FAILURE(rc2))
|
---|
5058 | {
|
---|
5059 | PGM_UNLOCK(pVM);
|
---|
5060 | AssertRC(rc);
|
---|
5061 | return rc2;
|
---|
5062 | }
|
---|
5063 | }
|
---|
5064 |
|
---|
5065 | /* Advance - cb isn't updated. */
|
---|
5066 | GCPhys = pRom->GCPhys + (cPages << GUEST_PAGE_SHIFT);
|
---|
5067 | }
|
---|
5068 | }
|
---|
5069 | PGM_UNLOCK(pVM);
|
---|
5070 | if (fFlushTLB)
|
---|
5071 | PGM_INVL_ALL_VCPU_TLBS(pVM);
|
---|
5072 |
|
---|
5073 | return rc;
|
---|
5074 | }
|
---|
5075 |
|
---|
5076 |
|
---|
5077 |
|
---|
5078 | /*********************************************************************************************************************************
|
---|
5079 | * Ballooning *
|
---|
5080 | *********************************************************************************************************************************/
|
---|
5081 |
|
---|
5082 | #if HC_ARCH_BITS == 64 && (defined(RT_OS_WINDOWS) || defined(RT_OS_SOLARIS) || defined(RT_OS_LINUX) || defined(RT_OS_FREEBSD))
|
---|
5083 |
|
---|
5084 | /**
|
---|
5085 | * Rendezvous callback used by PGMR3ChangeMemBalloon that changes the memory balloon size
|
---|
5086 | *
|
---|
5087 | * This is only called on one of the EMTs while the other ones are waiting for
|
---|
5088 | * it to complete this function.
|
---|
5089 | *
|
---|
5090 | * @returns VINF_SUCCESS (VBox strict status code).
|
---|
5091 | * @param pVM The cross context VM structure.
|
---|
5092 | * @param pVCpu The cross context virtual CPU structure of the calling EMT. Unused.
|
---|
5093 | * @param pvUser User parameter
|
---|
5094 | */
|
---|
5095 | static DECLCALLBACK(VBOXSTRICTRC) pgmR3PhysChangeMemBalloonRendezvous(PVM pVM, PVMCPU pVCpu, void *pvUser)
|
---|
5096 | {
|
---|
5097 | uintptr_t *paUser = (uintptr_t *)pvUser;
|
---|
5098 | bool fInflate = !!paUser[0];
|
---|
5099 | unsigned cPages = paUser[1];
|
---|
5100 | RTGCPHYS *paPhysPage = (RTGCPHYS *)paUser[2];
|
---|
5101 | uint32_t cPendingPages = 0;
|
---|
5102 | PGMMFREEPAGESREQ pReq;
|
---|
5103 | int rc;
|
---|
5104 |
|
---|
5105 | Log(("pgmR3PhysChangeMemBalloonRendezvous: %s %x pages\n", (fInflate) ? "inflate" : "deflate", cPages));
|
---|
5106 | PGM_LOCK_VOID(pVM);
|
---|
5107 |
|
---|
5108 | if (fInflate)
|
---|
5109 | {
|
---|
5110 | /* Flush the PGM pool cache as we might have stale references to pages that we just freed. */
|
---|
5111 | pgmR3PoolClearAllRendezvous(pVM, pVCpu, NULL);
|
---|
5112 |
|
---|
5113 | /* Replace pages with ZERO pages. */
|
---|
5114 | rc = GMMR3FreePagesPrepare(pVM, &pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE, GMMACCOUNT_BASE);
|
---|
5115 | if (RT_FAILURE(rc))
|
---|
5116 | {
|
---|
5117 | PGM_UNLOCK(pVM);
|
---|
5118 | AssertLogRelRC(rc);
|
---|
5119 | return rc;
|
---|
5120 | }
|
---|
5121 |
|
---|
5122 | /* Iterate the pages. */
|
---|
5123 | for (unsigned i = 0; i < cPages; i++)
|
---|
5124 | {
|
---|
5125 | PPGMPAGE pPage = pgmPhysGetPage(pVM, paPhysPage[i]);
|
---|
5126 | if ( pPage == NULL
|
---|
5127 | || PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_RAM)
|
---|
5128 | {
|
---|
5129 | Log(("pgmR3PhysChangeMemBalloonRendezvous: invalid physical page %RGp pPage->u3Type=%d\n", paPhysPage[i], pPage ? PGM_PAGE_GET_TYPE(pPage) : 0));
|
---|
5130 | break;
|
---|
5131 | }
|
---|
5132 |
|
---|
5133 | LogFlow(("balloon page: %RGp\n", paPhysPage[i]));
|
---|
5134 |
|
---|
5135 | /* Flush the shadow PT if this page was previously used as a guest page table. */
|
---|
5136 | pgmPoolFlushPageByGCPhys(pVM, paPhysPage[i]);
|
---|
5137 |
|
---|
5138 | rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, pPage, paPhysPage[i], (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage));
|
---|
5139 | if (RT_FAILURE(rc))
|
---|
5140 | {
|
---|
5141 | PGM_UNLOCK(pVM);
|
---|
5142 | AssertLogRelRC(rc);
|
---|
5143 | return rc;
|
---|
5144 | }
|
---|
5145 | Assert(PGM_PAGE_IS_ZERO(pPage));
|
---|
5146 | PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_BALLOONED);
|
---|
5147 | }
|
---|
5148 |
|
---|
5149 | if (cPendingPages)
|
---|
5150 | {
|
---|
5151 | rc = GMMR3FreePagesPerform(pVM, pReq, cPendingPages);
|
---|
5152 | if (RT_FAILURE(rc))
|
---|
5153 | {
|
---|
5154 | PGM_UNLOCK(pVM);
|
---|
5155 | AssertLogRelRC(rc);
|
---|
5156 | return rc;
|
---|
5157 | }
|
---|
5158 | }
|
---|
5159 | GMMR3FreePagesCleanup(pReq);
|
---|
5160 | }
|
---|
5161 | else
|
---|
5162 | {
|
---|
5163 | /* Iterate the pages. */
|
---|
5164 | for (unsigned i = 0; i < cPages; i++)
|
---|
5165 | {
|
---|
5166 | PPGMPAGE pPage = pgmPhysGetPage(pVM, paPhysPage[i]);
|
---|
5167 | AssertBreak(pPage && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM);
|
---|
5168 |
|
---|
5169 | LogFlow(("Free ballooned page: %RGp\n", paPhysPage[i]));
|
---|
5170 |
|
---|
5171 | Assert(PGM_PAGE_IS_BALLOONED(pPage));
|
---|
5172 |
|
---|
5173 | /* Change back to zero page. (NEM does not need to be informed.) */
|
---|
5174 | PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ZERO);
|
---|
5175 | }
|
---|
5176 |
|
---|
5177 | /* Note that we currently do not map any ballooned pages in our shadow page tables, so no need to flush the pgm pool. */
|
---|
5178 | }
|
---|
5179 |
|
---|
5180 | /* Notify GMM about the balloon change. */
|
---|
5181 | rc = GMMR3BalloonedPages(pVM, (fInflate) ? GMMBALLOONACTION_INFLATE : GMMBALLOONACTION_DEFLATE, cPages);
|
---|
5182 | if (RT_SUCCESS(rc))
|
---|
5183 | {
|
---|
5184 | if (!fInflate)
|
---|
5185 | {
|
---|
5186 | Assert(pVM->pgm.s.cBalloonedPages >= cPages);
|
---|
5187 | pVM->pgm.s.cBalloonedPages -= cPages;
|
---|
5188 | }
|
---|
5189 | else
|
---|
5190 | pVM->pgm.s.cBalloonedPages += cPages;
|
---|
5191 | }
|
---|
5192 |
|
---|
5193 | PGM_UNLOCK(pVM);
|
---|
5194 |
|
---|
5195 | /* Flush the recompiler's TLB as well. */
|
---|
5196 | for (VMCPUID i = 0; i < pVM->cCpus; i++)
|
---|
5197 | CPUMSetChangedFlags(pVM->apCpusR3[i], CPUM_CHANGED_GLOBAL_TLB_FLUSH);
|
---|
5198 |
|
---|
5199 | AssertLogRelRC(rc);
|
---|
5200 | return rc;
|
---|
5201 | }
|
---|
5202 |
|
---|
5203 |
|
---|
5204 | /**
|
---|
5205 | * Frees a range of ram pages, replacing them with ZERO pages; helper for PGMR3PhysFreeRamPages
|
---|
5206 | *
|
---|
5207 | * @returns VBox status code.
|
---|
5208 | * @param pVM The cross context VM structure.
|
---|
5209 | * @param fInflate Inflate or deflate memory balloon
|
---|
5210 | * @param cPages Number of pages to free
|
---|
5211 | * @param paPhysPage Array of guest physical addresses
|
---|
5212 | */
|
---|
5213 | static DECLCALLBACK(void) pgmR3PhysChangeMemBalloonHelper(PVM pVM, bool fInflate, unsigned cPages, RTGCPHYS *paPhysPage)
|
---|
5214 | {
|
---|
5215 | uintptr_t paUser[3];
|
---|
5216 |
|
---|
5217 | paUser[0] = fInflate;
|
---|
5218 | paUser[1] = cPages;
|
---|
5219 | paUser[2] = (uintptr_t)paPhysPage;
|
---|
5220 | int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE, pgmR3PhysChangeMemBalloonRendezvous, (void *)paUser);
|
---|
5221 | AssertRC(rc);
|
---|
5222 |
|
---|
5223 | /* Made a copy in PGMR3PhysFreeRamPages; free it here. */
|
---|
5224 | RTMemFree(paPhysPage);
|
---|
5225 | }
|
---|
5226 |
|
---|
5227 | #endif /* 64-bit host && (Windows || Solaris || Linux || FreeBSD) */
|
---|
5228 |
|
---|
5229 | /**
|
---|
5230 | * Inflate or deflate a memory balloon
|
---|
5231 | *
|
---|
5232 | * @returns VBox status code.
|
---|
5233 | * @param pVM The cross context VM structure.
|
---|
5234 | * @param fInflate Inflate or deflate memory balloon
|
---|
5235 | * @param cPages Number of pages to free
|
---|
5236 | * @param paPhysPage Array of guest physical addresses
|
---|
5237 | */
|
---|
5238 | VMMR3DECL(int) PGMR3PhysChangeMemBalloon(PVM pVM, bool fInflate, unsigned cPages, RTGCPHYS *paPhysPage)
|
---|
5239 | {
|
---|
5240 | /* This must match GMMR0Init; currently we only support memory ballooning on all 64-bit hosts except Mac OS X */
|
---|
5241 | #if HC_ARCH_BITS == 64 && (defined(RT_OS_WINDOWS) || defined(RT_OS_SOLARIS) || defined(RT_OS_LINUX) || defined(RT_OS_FREEBSD))
|
---|
5242 | int rc;
|
---|
5243 |
|
---|
5244 | /* Older additions (ancient non-functioning balloon code) pass wrong physical addresses. */
|
---|
5245 | AssertReturn(!(paPhysPage[0] & 0xfff), VERR_INVALID_PARAMETER);
|
---|
5246 |
|
---|
5247 | /* We own the IOM lock here and could cause a deadlock by waiting for another VCPU that is blocking on the IOM lock.
|
---|
5248 | * In the SMP case we post a request packet to postpone the job.
|
---|
5249 | */
|
---|
5250 | if (pVM->cCpus > 1)
|
---|
5251 | {
|
---|
5252 | unsigned cbPhysPage = cPages * sizeof(paPhysPage[0]);
|
---|
5253 | RTGCPHYS *paPhysPageCopy = (RTGCPHYS *)RTMemAlloc(cbPhysPage);
|
---|
5254 | AssertReturn(paPhysPageCopy, VERR_NO_MEMORY);
|
---|
5255 |
|
---|
5256 | memcpy(paPhysPageCopy, paPhysPage, cbPhysPage);
|
---|
5257 |
|
---|
5258 | rc = VMR3ReqCallNoWait(pVM, VMCPUID_ANY_QUEUE, (PFNRT)pgmR3PhysChangeMemBalloonHelper, 4, pVM, fInflate, cPages, paPhysPageCopy);
|
---|
5259 | AssertRC(rc);
|
---|
5260 | }
|
---|
5261 | else
|
---|
5262 | {
|
---|
5263 | uintptr_t paUser[3];
|
---|
5264 |
|
---|
5265 | paUser[0] = fInflate;
|
---|
5266 | paUser[1] = cPages;
|
---|
5267 | paUser[2] = (uintptr_t)paPhysPage;
|
---|
5268 | rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE, pgmR3PhysChangeMemBalloonRendezvous, (void *)paUser);
|
---|
5269 | AssertRC(rc);
|
---|
5270 | }
|
---|
5271 | return rc;
|
---|
5272 |
|
---|
5273 | #else
|
---|
5274 | NOREF(pVM); NOREF(fInflate); NOREF(cPages); NOREF(paPhysPage);
|
---|
5275 | return VERR_NOT_IMPLEMENTED;
|
---|
5276 | #endif
|
---|
5277 | }
|
---|
5278 |
|
---|
5279 |
|
---|
5280 | /*********************************************************************************************************************************
|
---|
5281 | * Write Monitoring *
|
---|
5282 | *********************************************************************************************************************************/
|
---|
5283 |
|
---|
5284 | /**
|
---|
5285 | * Rendezvous callback used by PGMR3WriteProtectRAM that write protects all
|
---|
5286 | * physical RAM.
|
---|
5287 | *
|
---|
5288 | * This is only called on one of the EMTs while the other ones are waiting for
|
---|
5289 | * it to complete this function.
|
---|
5290 | *
|
---|
5291 | * @returns VINF_SUCCESS (VBox strict status code).
|
---|
5292 | * @param pVM The cross context VM structure.
|
---|
5293 | * @param pVCpu The cross context virtual CPU structure of the calling EMT. Unused.
|
---|
5294 | * @param pvUser User parameter, unused.
|
---|
5295 | */
|
---|
5296 | static DECLCALLBACK(VBOXSTRICTRC) pgmR3PhysWriteProtectRAMRendezvous(PVM pVM, PVMCPU pVCpu, void *pvUser)
|
---|
5297 | {
|
---|
5298 | int rc = VINF_SUCCESS;
|
---|
5299 | NOREF(pvUser); NOREF(pVCpu);
|
---|
5300 |
|
---|
5301 | PGM_LOCK_VOID(pVM);
|
---|
5302 | #ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
|
---|
5303 | pgmPoolResetDirtyPages(pVM);
|
---|
5304 | #endif
|
---|
5305 |
|
---|
5306 | /** @todo pointless to write protect the physical page pointed to by RSP. */
|
---|
5307 |
|
---|
5308 | for (PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangesX);
|
---|
5309 | pRam;
|
---|
5310 | pRam = pRam->CTX_SUFF(pNext))
|
---|
5311 | {
|
---|
5312 | uint32_t cPages = pRam->cb >> GUEST_PAGE_SHIFT;
|
---|
5313 | for (uint32_t iPage = 0; iPage < cPages; iPage++)
|
---|
5314 | {
|
---|
5315 | PPGMPAGE pPage = &pRam->aPages[iPage];
|
---|
5316 | PGMPAGETYPE enmPageType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
|
---|
5317 |
|
---|
5318 | if ( RT_LIKELY(enmPageType == PGMPAGETYPE_RAM)
|
---|
5319 | || enmPageType == PGMPAGETYPE_MMIO2)
|
---|
5320 | {
|
---|
5321 | /*
|
---|
5322 | * A RAM page.
|
---|
5323 | */
|
---|
5324 | switch (PGM_PAGE_GET_STATE(pPage))
|
---|
5325 | {
|
---|
5326 | case PGM_PAGE_STATE_ALLOCATED:
|
---|
5327 | /** @todo Optimize this: Don't always re-enable write
|
---|
5328 | * monitoring if the page is known to be very busy. */
|
---|
5329 | if (PGM_PAGE_IS_WRITTEN_TO(pPage))
|
---|
5330 | PGM_PAGE_CLEAR_WRITTEN_TO(pVM, pPage);
|
---|
5331 |
|
---|
5332 | pgmPhysPageWriteMonitor(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT));
|
---|
5333 | break;
|
---|
5334 |
|
---|
5335 | case PGM_PAGE_STATE_SHARED:
|
---|
5336 | AssertFailed();
|
---|
5337 | break;
|
---|
5338 |
|
---|
5339 | case PGM_PAGE_STATE_WRITE_MONITORED: /* nothing to change. */
|
---|
5340 | default:
|
---|
5341 | break;
|
---|
5342 | }
|
---|
5343 | }
|
---|
5344 | }
|
---|
5345 | }
|
---|
5346 | pgmR3PoolWriteProtectPages(pVM);
|
---|
5347 | PGM_INVL_ALL_VCPU_TLBS(pVM);
|
---|
5348 | for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
|
---|
5349 | CPUMSetChangedFlags(pVM->apCpusR3[idCpu], CPUM_CHANGED_GLOBAL_TLB_FLUSH);
|
---|
5350 |
|
---|
5351 | PGM_UNLOCK(pVM);
|
---|
5352 | return rc;
|
---|
5353 | }
|
---|
5354 |
|
---|
5355 | /**
|
---|
5356 | * Protect all physical RAM to monitor writes
|
---|
5357 | *
|
---|
5358 | * @returns VBox status code.
|
---|
5359 | * @param pVM The cross context VM structure.
|
---|
5360 | */
|
---|
5361 | VMMR3DECL(int) PGMR3PhysWriteProtectRAM(PVM pVM)
|
---|
5362 | {
|
---|
5363 | VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
|
---|
5364 |
|
---|
5365 | int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE, pgmR3PhysWriteProtectRAMRendezvous, NULL);
|
---|
5366 | AssertRC(rc);
|
---|
5367 | return rc;
|
---|
5368 | }
|
---|
5369 |
|
---|
5370 |
|
---|
5371 | /*********************************************************************************************************************************
|
---|
5372 | * Stats. *
|
---|
5373 | *********************************************************************************************************************************/
|
---|
5374 |
|
---|
5375 | /**
|
---|
5376 | * Query the amount of free memory inside VMMR0
|
---|
5377 | *
|
---|
5378 | * @returns VBox status code.
|
---|
5379 | * @param pUVM The user mode VM handle.
|
---|
5380 | * @param pcbAllocMem Where to return the amount of memory allocated
|
---|
5381 | * by VMs.
|
---|
5382 | * @param pcbFreeMem Where to return the amount of memory that is
|
---|
5383 | * allocated from the host but not currently used
|
---|
5384 | * by any VMs.
|
---|
5385 | * @param pcbBallonedMem Where to return the sum of memory that is
|
---|
5386 | * currently ballooned by the VMs.
|
---|
5387 | * @param pcbSharedMem Where to return the amount of memory that is
|
---|
5388 | * currently shared.
|
---|
5389 | */
|
---|
5390 | VMMR3DECL(int) PGMR3QueryGlobalMemoryStats(PUVM pUVM, uint64_t *pcbAllocMem, uint64_t *pcbFreeMem,
|
---|
5391 | uint64_t *pcbBallonedMem, uint64_t *pcbSharedMem)
|
---|
5392 | {
|
---|
5393 | UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
|
---|
5394 | VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
|
---|
5395 |
|
---|
5396 | uint64_t cAllocPages = 0;
|
---|
5397 | uint64_t cFreePages = 0;
|
---|
5398 | uint64_t cBalloonPages = 0;
|
---|
5399 | uint64_t cSharedPages = 0;
|
---|
5400 | if (!SUPR3IsDriverless())
|
---|
5401 | {
|
---|
5402 | int rc = GMMR3QueryHypervisorMemoryStats(pUVM->pVM, &cAllocPages, &cFreePages, &cBalloonPages, &cSharedPages);
|
---|
5403 | AssertRCReturn(rc, rc);
|
---|
5404 | }
|
---|
5405 |
|
---|
5406 | if (pcbAllocMem)
|
---|
5407 | *pcbAllocMem = cAllocPages * _4K;
|
---|
5408 |
|
---|
5409 | if (pcbFreeMem)
|
---|
5410 | *pcbFreeMem = cFreePages * _4K;
|
---|
5411 |
|
---|
5412 | if (pcbBallonedMem)
|
---|
5413 | *pcbBallonedMem = cBalloonPages * _4K;
|
---|
5414 |
|
---|
5415 | if (pcbSharedMem)
|
---|
5416 | *pcbSharedMem = cSharedPages * _4K;
|
---|
5417 |
|
---|
5418 | Log(("PGMR3QueryVMMMemoryStats: all=%llx free=%llx ballooned=%llx shared=%llx\n",
|
---|
5419 | cAllocPages, cFreePages, cBalloonPages, cSharedPages));
|
---|
5420 | return VINF_SUCCESS;
|
---|
5421 | }
|
---|
5422 |
|
---|
5423 |
|
---|
5424 | /**
|
---|
5425 | * Query memory stats for the VM.
|
---|
5426 | *
|
---|
5427 | * @returns VBox status code.
|
---|
5428 | * @param pUVM The user mode VM handle.
|
---|
5429 | * @param pcbTotalMem Where to return total amount memory the VM may
|
---|
5430 | * possibly use.
|
---|
5431 | * @param pcbPrivateMem Where to return the amount of private memory
|
---|
5432 | * currently allocated.
|
---|
5433 | * @param pcbSharedMem Where to return the amount of actually shared
|
---|
5434 | * memory currently used by the VM.
|
---|
5435 | * @param pcbZeroMem Where to return the amount of memory backed by
|
---|
5436 | * zero pages.
|
---|
5437 | *
|
---|
5438 | * @remarks The total mem is normally larger than the sum of the three
|
---|
5439 | * components. There are two reasons for this, first the amount of
|
---|
5440 | * shared memory is what we're sure is shared instead of what could
|
---|
5441 | * possibly be shared with someone. Secondly, because the total may
|
---|
5442 | * include some pure MMIO pages that doesn't go into any of the three
|
---|
5443 | * sub-counts.
|
---|
5444 | *
|
---|
5445 | * @todo Why do we return reused shared pages instead of anything that could
|
---|
5446 | * potentially be shared? Doesn't this mean the first VM gets a much
|
---|
5447 | * lower number of shared pages?
|
---|
5448 | */
|
---|
5449 | VMMR3DECL(int) PGMR3QueryMemoryStats(PUVM pUVM, uint64_t *pcbTotalMem, uint64_t *pcbPrivateMem,
|
---|
5450 | uint64_t *pcbSharedMem, uint64_t *pcbZeroMem)
|
---|
5451 | {
|
---|
5452 | UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
|
---|
5453 | PVM pVM = pUVM->pVM;
|
---|
5454 | VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
|
---|
5455 |
|
---|
5456 | if (pcbTotalMem)
|
---|
5457 | *pcbTotalMem = (uint64_t)pVM->pgm.s.cAllPages * GUEST_PAGE_SIZE;
|
---|
5458 |
|
---|
5459 | if (pcbPrivateMem)
|
---|
5460 | *pcbPrivateMem = (uint64_t)pVM->pgm.s.cPrivatePages * GUEST_PAGE_SIZE;
|
---|
5461 |
|
---|
5462 | if (pcbSharedMem)
|
---|
5463 | *pcbSharedMem = (uint64_t)pVM->pgm.s.cReusedSharedPages * GUEST_PAGE_SIZE;
|
---|
5464 |
|
---|
5465 | if (pcbZeroMem)
|
---|
5466 | *pcbZeroMem = (uint64_t)pVM->pgm.s.cZeroPages * GUEST_PAGE_SIZE;
|
---|
5467 |
|
---|
5468 | Log(("PGMR3QueryMemoryStats: all=%x private=%x reused=%x zero=%x\n", pVM->pgm.s.cAllPages, pVM->pgm.s.cPrivatePages, pVM->pgm.s.cReusedSharedPages, pVM->pgm.s.cZeroPages));
|
---|
5469 | return VINF_SUCCESS;
|
---|
5470 | }
|
---|
5471 |
|
---|
5472 |
|
---|
5473 |
|
---|
5474 | /*********************************************************************************************************************************
|
---|
5475 | * Chunk Mappings and Page Allocation *
|
---|
5476 | *********************************************************************************************************************************/
|
---|
5477 |
|
---|
5478 | /**
|
---|
5479 | * Tree enumeration callback for dealing with age rollover.
|
---|
5480 | * It will perform a simple compression of the current age.
|
---|
5481 | */
|
---|
5482 | static DECLCALLBACK(int) pgmR3PhysChunkAgeingRolloverCallback(PAVLU32NODECORE pNode, void *pvUser)
|
---|
5483 | {
|
---|
5484 | /* Age compression - ASSUMES iNow == 4. */
|
---|
5485 | PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)pNode;
|
---|
5486 | if (pChunk->iLastUsed >= UINT32_C(0xffffff00))
|
---|
5487 | pChunk->iLastUsed = 3;
|
---|
5488 | else if (pChunk->iLastUsed >= UINT32_C(0xfffff000))
|
---|
5489 | pChunk->iLastUsed = 2;
|
---|
5490 | else if (pChunk->iLastUsed)
|
---|
5491 | pChunk->iLastUsed = 1;
|
---|
5492 | else /* iLastUsed = 0 */
|
---|
5493 | pChunk->iLastUsed = 4;
|
---|
5494 |
|
---|
5495 | NOREF(pvUser);
|
---|
5496 | return 0;
|
---|
5497 | }
|
---|
5498 |
|
---|
5499 |
|
---|
5500 | /**
|
---|
5501 | * The structure passed in the pvUser argument of pgmR3PhysChunkUnmapCandidateCallback().
|
---|
5502 | */
|
---|
5503 | typedef struct PGMR3PHYSCHUNKUNMAPCB
|
---|
5504 | {
|
---|
5505 | PVM pVM; /**< Pointer to the VM. */
|
---|
5506 | PPGMCHUNKR3MAP pChunk; /**< The chunk to unmap. */
|
---|
5507 | } PGMR3PHYSCHUNKUNMAPCB, *PPGMR3PHYSCHUNKUNMAPCB;
|
---|
5508 |
|
---|
5509 |
|
---|
5510 | /**
|
---|
5511 | * Callback used to find the mapping that's been unused for
|
---|
5512 | * the longest time.
|
---|
5513 | */
|
---|
5514 | static DECLCALLBACK(int) pgmR3PhysChunkUnmapCandidateCallback(PAVLU32NODECORE pNode, void *pvUser)
|
---|
5515 | {
|
---|
5516 | PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)pNode;
|
---|
5517 | PPGMR3PHYSCHUNKUNMAPCB pArg = (PPGMR3PHYSCHUNKUNMAPCB)pvUser;
|
---|
5518 |
|
---|
5519 | /*
|
---|
5520 | * Check for locks and compare when last used.
|
---|
5521 | */
|
---|
5522 | if (pChunk->cRefs)
|
---|
5523 | return 0;
|
---|
5524 | if (pChunk->cPermRefs)
|
---|
5525 | return 0;
|
---|
5526 | if ( pArg->pChunk
|
---|
5527 | && pChunk->iLastUsed >= pArg->pChunk->iLastUsed)
|
---|
5528 | return 0;
|
---|
5529 |
|
---|
5530 | /*
|
---|
5531 | * Check that it's not in any of the TLBs.
|
---|
5532 | */
|
---|
5533 | PVM pVM = pArg->pVM;
|
---|
5534 | if ( pVM->pgm.s.ChunkR3Map.Tlb.aEntries[PGM_CHUNKR3MAPTLB_IDX(pChunk->Core.Key)].idChunk
|
---|
5535 | == pChunk->Core.Key)
|
---|
5536 | {
|
---|
5537 | pChunk = NULL;
|
---|
5538 | return 0;
|
---|
5539 | }
|
---|
5540 | #ifdef VBOX_STRICT
|
---|
5541 | for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.ChunkR3Map.Tlb.aEntries); i++)
|
---|
5542 | {
|
---|
5543 | Assert(pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].pChunk != pChunk);
|
---|
5544 | Assert(pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].idChunk != pChunk->Core.Key);
|
---|
5545 | }
|
---|
5546 | #endif
|
---|
5547 |
|
---|
5548 | for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.PhysTlbR3.aEntries); i++)
|
---|
5549 | if (pVM->pgm.s.PhysTlbR3.aEntries[i].pMap == pChunk)
|
---|
5550 | return 0;
|
---|
5551 |
|
---|
5552 | pArg->pChunk = pChunk;
|
---|
5553 | return 0;
|
---|
5554 | }
|
---|
5555 |
|
---|
5556 |
|
---|
5557 | /**
|
---|
5558 | * Finds a good candidate for unmapping when the ring-3 mapping cache is full.
|
---|
5559 | *
|
---|
5560 | * The candidate will not be part of any TLBs, so no need to flush
|
---|
5561 | * anything afterwards.
|
---|
5562 | *
|
---|
5563 | * @returns Chunk id.
|
---|
5564 | * @param pVM The cross context VM structure.
|
---|
5565 | */
|
---|
5566 | static int32_t pgmR3PhysChunkFindUnmapCandidate(PVM pVM)
|
---|
5567 | {
|
---|
5568 | PGM_LOCK_ASSERT_OWNER(pVM);
|
---|
5569 |
|
---|
5570 | /*
|
---|
5571 | * Enumerate the age tree starting with the left most node.
|
---|
5572 | */
|
---|
5573 | STAM_PROFILE_START(&pVM->pgm.s.Stats.StatChunkFindCandidate, a);
|
---|
5574 | PGMR3PHYSCHUNKUNMAPCB Args;
|
---|
5575 | Args.pVM = pVM;
|
---|
5576 | Args.pChunk = NULL;
|
---|
5577 | RTAvlU32DoWithAll(&pVM->pgm.s.ChunkR3Map.pTree, true /*fFromLeft*/, pgmR3PhysChunkUnmapCandidateCallback, &Args);
|
---|
5578 | Assert(Args.pChunk);
|
---|
5579 | if (Args.pChunk)
|
---|
5580 | {
|
---|
5581 | Assert(Args.pChunk->cRefs == 0);
|
---|
5582 | Assert(Args.pChunk->cPermRefs == 0);
|
---|
5583 | STAM_PROFILE_STOP(&pVM->pgm.s.Stats.StatChunkFindCandidate, a);
|
---|
5584 | return Args.pChunk->Core.Key;
|
---|
5585 | }
|
---|
5586 |
|
---|
5587 | STAM_PROFILE_STOP(&pVM->pgm.s.Stats.StatChunkFindCandidate, a);
|
---|
5588 | return INT32_MAX;
|
---|
5589 | }
|
---|
5590 |
|
---|
5591 |
|
---|
5592 | /**
|
---|
5593 | * Rendezvous callback used by pgmR3PhysUnmapChunk that unmaps a chunk
|
---|
5594 | *
|
---|
5595 | * This is only called on one of the EMTs while the other ones are waiting for
|
---|
5596 | * it to complete this function.
|
---|
5597 | *
|
---|
5598 | * @returns VINF_SUCCESS (VBox strict status code).
|
---|
5599 | * @param pVM The cross context VM structure.
|
---|
5600 | * @param pVCpu The cross context virtual CPU structure of the calling EMT. Unused.
|
---|
5601 | * @param pvUser User pointer. Unused
|
---|
5602 | *
|
---|
5603 | */
|
---|
5604 | static DECLCALLBACK(VBOXSTRICTRC) pgmR3PhysUnmapChunkRendezvous(PVM pVM, PVMCPU pVCpu, void *pvUser)
|
---|
5605 | {
|
---|
5606 | int rc = VINF_SUCCESS;
|
---|
5607 | PGM_LOCK_VOID(pVM);
|
---|
5608 | NOREF(pVCpu); NOREF(pvUser);
|
---|
5609 |
|
---|
5610 | if (pVM->pgm.s.ChunkR3Map.c >= pVM->pgm.s.ChunkR3Map.cMax)
|
---|
5611 | {
|
---|
5612 | /* Flush the pgm pool cache; call the internal rendezvous handler as we're already in a rendezvous handler here. */
|
---|
5613 | /** @todo also not really efficient to unmap a chunk that contains PD
|
---|
5614 | * or PT pages. */
|
---|
5615 | pgmR3PoolClearAllRendezvous(pVM, pVM->apCpusR3[0], NULL /* no need to flush the REM TLB as we already did that above */);
|
---|
5616 |
|
---|
5617 | /*
|
---|
5618 | * Request the ring-0 part to unmap a chunk to make space in the mapping cache.
|
---|
5619 | */
|
---|
5620 | GMMMAPUNMAPCHUNKREQ Req;
|
---|
5621 | Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
|
---|
5622 | Req.Hdr.cbReq = sizeof(Req);
|
---|
5623 | Req.pvR3 = NULL;
|
---|
5624 | Req.idChunkMap = NIL_GMM_CHUNKID;
|
---|
5625 | Req.idChunkUnmap = pgmR3PhysChunkFindUnmapCandidate(pVM);
|
---|
5626 | if (Req.idChunkUnmap != INT32_MAX)
|
---|
5627 | {
|
---|
5628 | STAM_PROFILE_START(&pVM->pgm.s.Stats.StatChunkUnmap, a);
|
---|
5629 | rc = VMMR3CallR0(pVM, VMMR0_DO_GMM_MAP_UNMAP_CHUNK, 0, &Req.Hdr);
|
---|
5630 | STAM_PROFILE_STOP(&pVM->pgm.s.Stats.StatChunkUnmap, a);
|
---|
5631 | if (RT_SUCCESS(rc))
|
---|
5632 | {
|
---|
5633 | /*
|
---|
5634 | * Remove the unmapped one.
|
---|
5635 | */
|
---|
5636 | PPGMCHUNKR3MAP pUnmappedChunk = (PPGMCHUNKR3MAP)RTAvlU32Remove(&pVM->pgm.s.ChunkR3Map.pTree, Req.idChunkUnmap);
|
---|
5637 | AssertRelease(pUnmappedChunk);
|
---|
5638 | AssertRelease(!pUnmappedChunk->cRefs);
|
---|
5639 | AssertRelease(!pUnmappedChunk->cPermRefs);
|
---|
5640 | pUnmappedChunk->pv = NULL;
|
---|
5641 | pUnmappedChunk->Core.Key = UINT32_MAX;
|
---|
5642 | MMR3HeapFree(pUnmappedChunk);
|
---|
5643 | pVM->pgm.s.ChunkR3Map.c--;
|
---|
5644 | pVM->pgm.s.cUnmappedChunks++;
|
---|
5645 |
|
---|
5646 | /*
|
---|
5647 | * Flush dangling PGM pointers (R3 & R0 ptrs to GC physical addresses).
|
---|
5648 | */
|
---|
5649 | /** @todo We should not flush chunks which include cr3 mappings. */
|
---|
5650 | for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
|
---|
5651 | {
|
---|
5652 | PPGMCPU pPGM = &pVM->apCpusR3[idCpu]->pgm.s;
|
---|
5653 |
|
---|
5654 | pPGM->pGst32BitPdR3 = NULL;
|
---|
5655 | pPGM->pGstPaePdptR3 = NULL;
|
---|
5656 | pPGM->pGstAmd64Pml4R3 = NULL;
|
---|
5657 | pPGM->pGstEptPml4R3 = NULL;
|
---|
5658 | pPGM->pGst32BitPdR0 = NIL_RTR0PTR;
|
---|
5659 | pPGM->pGstPaePdptR0 = NIL_RTR0PTR;
|
---|
5660 | pPGM->pGstAmd64Pml4R0 = NIL_RTR0PTR;
|
---|
5661 | pPGM->pGstEptPml4R0 = NIL_RTR0PTR;
|
---|
5662 | for (unsigned i = 0; i < RT_ELEMENTS(pPGM->apGstPaePDsR3); i++)
|
---|
5663 | {
|
---|
5664 | pPGM->apGstPaePDsR3[i] = NULL;
|
---|
5665 | pPGM->apGstPaePDsR0[i] = NIL_RTR0PTR;
|
---|
5666 | }
|
---|
5667 |
|
---|
5668 | /* Flush REM TLBs. */
|
---|
5669 | CPUMSetChangedFlags(pVM->apCpusR3[idCpu], CPUM_CHANGED_GLOBAL_TLB_FLUSH);
|
---|
5670 | }
|
---|
5671 | }
|
---|
5672 | }
|
---|
5673 | }
|
---|
5674 | PGM_UNLOCK(pVM);
|
---|
5675 | return rc;
|
---|
5676 | }
|
---|
5677 |
|
---|
5678 | /**
|
---|
5679 | * Unmap a chunk to free up virtual address space (request packet handler for pgmR3PhysChunkMap)
|
---|
5680 | *
|
---|
5681 | * @returns VBox status code.
|
---|
5682 | * @param pVM The cross context VM structure.
|
---|
5683 | */
|
---|
5684 | static DECLCALLBACK(void) pgmR3PhysUnmapChunk(PVM pVM)
|
---|
5685 | {
|
---|
5686 | int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE, pgmR3PhysUnmapChunkRendezvous, NULL);
|
---|
5687 | AssertRC(rc);
|
---|
5688 | }
|
---|
5689 |
|
---|
5690 |
|
---|
5691 | /**
|
---|
5692 | * Maps the given chunk into the ring-3 mapping cache.
|
---|
5693 | *
|
---|
5694 | * This will call ring-0.
|
---|
5695 | *
|
---|
5696 | * @returns VBox status code.
|
---|
5697 | * @param pVM The cross context VM structure.
|
---|
5698 | * @param idChunk The chunk in question.
|
---|
5699 | * @param ppChunk Where to store the chunk tracking structure.
|
---|
5700 | *
|
---|
5701 | * @remarks Called from within the PGM critical section.
|
---|
5702 | * @remarks Can be called from any thread!
|
---|
5703 | */
|
---|
5704 | int pgmR3PhysChunkMap(PVM pVM, uint32_t idChunk, PPPGMCHUNKR3MAP ppChunk)
|
---|
5705 | {
|
---|
5706 | int rc;
|
---|
5707 |
|
---|
5708 | PGM_LOCK_ASSERT_OWNER(pVM);
|
---|
5709 |
|
---|
5710 | /*
|
---|
5711 | * Move the chunk time forward.
|
---|
5712 | */
|
---|
5713 | pVM->pgm.s.ChunkR3Map.iNow++;
|
---|
5714 | if (pVM->pgm.s.ChunkR3Map.iNow == 0)
|
---|
5715 | {
|
---|
5716 | pVM->pgm.s.ChunkR3Map.iNow = 4;
|
---|
5717 | RTAvlU32DoWithAll(&pVM->pgm.s.ChunkR3Map.pTree, true /*fFromLeft*/, pgmR3PhysChunkAgeingRolloverCallback, NULL);
|
---|
5718 | }
|
---|
5719 |
|
---|
5720 | /*
|
---|
5721 | * Allocate a new tracking structure first.
|
---|
5722 | */
|
---|
5723 | PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)MMR3HeapAllocZ(pVM, MM_TAG_PGM_CHUNK_MAPPING, sizeof(*pChunk));
|
---|
5724 | AssertReturn(pChunk, VERR_NO_MEMORY);
|
---|
5725 | pChunk->Core.Key = idChunk;
|
---|
5726 | pChunk->iLastUsed = pVM->pgm.s.ChunkR3Map.iNow;
|
---|
5727 |
|
---|
5728 | /*
|
---|
5729 | * Request the ring-0 part to map the chunk in question.
|
---|
5730 | */
|
---|
5731 | GMMMAPUNMAPCHUNKREQ Req;
|
---|
5732 | Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
|
---|
5733 | Req.Hdr.cbReq = sizeof(Req);
|
---|
5734 | Req.pvR3 = NULL;
|
---|
5735 | Req.idChunkMap = idChunk;
|
---|
5736 | Req.idChunkUnmap = NIL_GMM_CHUNKID;
|
---|
5737 |
|
---|
5738 | /* Must be callable from any thread, so can't use VMMR3CallR0. */
|
---|
5739 | STAM_PROFILE_START(&pVM->pgm.s.Stats.StatChunkMap, a);
|
---|
5740 | rc = SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pVM), NIL_VMCPUID, VMMR0_DO_GMM_MAP_UNMAP_CHUNK, 0, &Req.Hdr);
|
---|
5741 | STAM_PROFILE_STOP(&pVM->pgm.s.Stats.StatChunkMap, a);
|
---|
5742 | if (RT_SUCCESS(rc))
|
---|
5743 | {
|
---|
5744 | pChunk->pv = Req.pvR3;
|
---|
5745 |
|
---|
5746 | /*
|
---|
5747 | * If we're running out of virtual address space, then we should
|
---|
5748 | * unmap another chunk.
|
---|
5749 | *
|
---|
5750 | * Currently, an unmap operation requires that all other virtual CPUs
|
---|
5751 | * are idling and not by chance making use of the memory we're
|
---|
5752 | * unmapping. So, we create an async unmap operation here.
|
---|
5753 | *
|
---|
5754 | * Now, when creating or restoring a saved state this wont work very
|
---|
5755 | * well since we may want to restore all guest RAM + a little something.
|
---|
5756 | * So, we have to do the unmap synchronously. Fortunately for us
|
---|
5757 | * though, during these operations the other virtual CPUs are inactive
|
---|
5758 | * and it should be safe to do this.
|
---|
5759 | */
|
---|
5760 | /** @todo Eventually we should lock all memory when used and do
|
---|
5761 | * map+unmap as one kernel call without any rendezvous or
|
---|
5762 | * other precautions. */
|
---|
5763 | if (pVM->pgm.s.ChunkR3Map.c + 1 >= pVM->pgm.s.ChunkR3Map.cMax)
|
---|
5764 | {
|
---|
5765 | switch (VMR3GetState(pVM))
|
---|
5766 | {
|
---|
5767 | case VMSTATE_LOADING:
|
---|
5768 | case VMSTATE_SAVING:
|
---|
5769 | {
|
---|
5770 | PVMCPU pVCpu = VMMGetCpu(pVM);
|
---|
5771 | if ( pVCpu
|
---|
5772 | && pVM->pgm.s.cDeprecatedPageLocks == 0)
|
---|
5773 | {
|
---|
5774 | pgmR3PhysUnmapChunkRendezvous(pVM, pVCpu, NULL);
|
---|
5775 | break;
|
---|
5776 | }
|
---|
5777 | }
|
---|
5778 | RT_FALL_THRU();
|
---|
5779 | default:
|
---|
5780 | rc = VMR3ReqCallNoWait(pVM, VMCPUID_ANY_QUEUE, (PFNRT)pgmR3PhysUnmapChunk, 1, pVM);
|
---|
5781 | AssertRC(rc);
|
---|
5782 | break;
|
---|
5783 | }
|
---|
5784 | }
|
---|
5785 |
|
---|
5786 | /*
|
---|
5787 | * Update the tree. We must do this after any unmapping to make sure
|
---|
5788 | * the chunk we're going to return isn't unmapped by accident.
|
---|
5789 | */
|
---|
5790 | AssertPtr(Req.pvR3);
|
---|
5791 | bool fRc = RTAvlU32Insert(&pVM->pgm.s.ChunkR3Map.pTree, &pChunk->Core);
|
---|
5792 | AssertRelease(fRc);
|
---|
5793 | pVM->pgm.s.ChunkR3Map.c++;
|
---|
5794 | pVM->pgm.s.cMappedChunks++;
|
---|
5795 | }
|
---|
5796 | else
|
---|
5797 | {
|
---|
5798 | /** @todo this may fail because of /proc/sys/vm/max_map_count, so we
|
---|
5799 | * should probably restrict ourselves on linux. */
|
---|
5800 | AssertRC(rc);
|
---|
5801 | MMR3HeapFree(pChunk);
|
---|
5802 | pChunk = NULL;
|
---|
5803 | }
|
---|
5804 |
|
---|
5805 | *ppChunk = pChunk;
|
---|
5806 | return rc;
|
---|
5807 | }
|
---|
5808 |
|
---|
5809 |
|
---|
5810 | /**
|
---|
5811 | * Invalidates the TLB for the ring-3 mapping cache.
|
---|
5812 | *
|
---|
5813 | * @param pVM The cross context VM structure.
|
---|
5814 | */
|
---|
5815 | VMMR3DECL(void) PGMR3PhysChunkInvalidateTLB(PVM pVM)
|
---|
5816 | {
|
---|
5817 | PGM_LOCK_VOID(pVM);
|
---|
5818 | for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.ChunkR3Map.Tlb.aEntries); i++)
|
---|
5819 | {
|
---|
5820 | pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].idChunk = NIL_GMM_CHUNKID;
|
---|
5821 | pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].pChunk = NULL;
|
---|
5822 | }
|
---|
5823 | /* The page map TLB references chunks, so invalidate that one too. */
|
---|
5824 | pgmPhysInvalidatePageMapTLB(pVM);
|
---|
5825 | PGM_UNLOCK(pVM);
|
---|
5826 | }
|
---|
5827 |
|
---|
5828 |
|
---|
5829 | /**
|
---|
5830 | * Response to VM_FF_PGM_NEED_HANDY_PAGES and helper for pgmPhysEnsureHandyPage.
|
---|
5831 | *
|
---|
5832 | * This function will also work the VM_FF_PGM_NO_MEMORY force action flag, to
|
---|
5833 | * signal and clear the out of memory condition. When called, this API is used
|
---|
5834 | * to try clear the condition when the user wants to resume.
|
---|
5835 | *
|
---|
5836 | * @returns The following VBox status codes.
|
---|
5837 | * @retval VINF_SUCCESS on success. FFs cleared.
|
---|
5838 | * @retval VINF_EM_NO_MEMORY if we're out of memory. The FF is not cleared in
|
---|
5839 | * this case and it gets accompanied by VM_FF_PGM_NO_MEMORY.
|
---|
5840 | *
|
---|
5841 | * @param pVM The cross context VM structure.
|
---|
5842 | *
|
---|
5843 | * @remarks The VINF_EM_NO_MEMORY status is for the benefit of the FF processing
|
---|
5844 | * in EM.cpp and shouldn't be propagated outside TRPM, HM, EM and
|
---|
5845 | * pgmPhysEnsureHandyPage. There is one exception to this in the \#PF
|
---|
5846 | * handler.
|
---|
5847 | */
|
---|
5848 | VMMR3DECL(int) PGMR3PhysAllocateHandyPages(PVM pVM)
|
---|
5849 | {
|
---|
5850 | PGM_LOCK_VOID(pVM);
|
---|
5851 |
|
---|
5852 | /*
|
---|
5853 | * Allocate more pages, noting down the index of the first new page.
|
---|
5854 | */
|
---|
5855 | uint32_t iClear = pVM->pgm.s.cHandyPages;
|
---|
5856 | AssertMsgReturn(iClear <= RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d", iClear), VERR_PGM_HANDY_PAGE_IPE);
|
---|
5857 | Log(("PGMR3PhysAllocateHandyPages: %d -> %d\n", iClear, RT_ELEMENTS(pVM->pgm.s.aHandyPages)));
|
---|
5858 | int rc = VMMR3CallR0(pVM, VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES, 0, NULL);
|
---|
5859 | /** @todo we should split this up into an allocate and flush operation. sometimes you want to flush and not allocate more (which will trigger the vm account limit error) */
|
---|
5860 | if ( rc == VERR_GMM_HIT_VM_ACCOUNT_LIMIT
|
---|
5861 | && pVM->pgm.s.cHandyPages > 0)
|
---|
5862 | {
|
---|
5863 | /* Still handy pages left, so don't panic. */
|
---|
5864 | rc = VINF_SUCCESS;
|
---|
5865 | }
|
---|
5866 |
|
---|
5867 | if (RT_SUCCESS(rc))
|
---|
5868 | {
|
---|
5869 | AssertMsg(rc == VINF_SUCCESS, ("%Rrc\n", rc));
|
---|
5870 | Assert(pVM->pgm.s.cHandyPages > 0);
|
---|
5871 | #ifdef VBOX_STRICT
|
---|
5872 | uint32_t i;
|
---|
5873 | for (i = iClear; i < pVM->pgm.s.cHandyPages; i++)
|
---|
5874 | if ( pVM->pgm.s.aHandyPages[i].idPage == NIL_GMM_PAGEID
|
---|
5875 | || pVM->pgm.s.aHandyPages[i].idSharedPage != NIL_GMM_PAGEID
|
---|
5876 | || (pVM->pgm.s.aHandyPages[i].HCPhysGCPhys & GUEST_PAGE_OFFSET_MASK))
|
---|
5877 | break;
|
---|
5878 | if (i != pVM->pgm.s.cHandyPages)
|
---|
5879 | {
|
---|
5880 | RTAssertMsg1Weak(NULL, __LINE__, __FILE__, __FUNCTION__);
|
---|
5881 | RTAssertMsg2Weak("i=%d iClear=%d cHandyPages=%d\n", i, iClear, pVM->pgm.s.cHandyPages);
|
---|
5882 | for (uint32_t j = iClear; j < pVM->pgm.s.cHandyPages; j++)
|
---|
5883 | RTAssertMsg2Add("%03d: idPage=%d HCPhysGCPhys=%RHp idSharedPage=%d%s\n", j,
|
---|
5884 | pVM->pgm.s.aHandyPages[j].idPage,
|
---|
5885 | pVM->pgm.s.aHandyPages[j].HCPhysGCPhys,
|
---|
5886 | pVM->pgm.s.aHandyPages[j].idSharedPage,
|
---|
5887 | j == i ? " <---" : "");
|
---|
5888 | RTAssertPanic();
|
---|
5889 | }
|
---|
5890 | #endif
|
---|
5891 | }
|
---|
5892 | else
|
---|
5893 | {
|
---|
5894 | /*
|
---|
5895 | * We should never get here unless there is a genuine shortage of
|
---|
5896 | * memory (or some internal error). Flag the error so the VM can be
|
---|
5897 | * suspended ASAP and the user informed. If we're totally out of
|
---|
5898 | * handy pages we will return failure.
|
---|
5899 | */
|
---|
5900 | /* Report the failure. */
|
---|
5901 | LogRel(("PGM: Failed to procure handy pages; rc=%Rrc cHandyPages=%#x\n"
|
---|
5902 | " cAllPages=%#x cPrivatePages=%#x cSharedPages=%#x cZeroPages=%#x\n",
|
---|
5903 | rc, pVM->pgm.s.cHandyPages,
|
---|
5904 | pVM->pgm.s.cAllPages, pVM->pgm.s.cPrivatePages, pVM->pgm.s.cSharedPages, pVM->pgm.s.cZeroPages));
|
---|
5905 |
|
---|
5906 | if ( rc != VERR_NO_MEMORY
|
---|
5907 | && rc != VERR_NO_PHYS_MEMORY
|
---|
5908 | && rc != VERR_LOCK_FAILED)
|
---|
5909 | for (uint32_t i = 0; i < RT_ELEMENTS(pVM->pgm.s.aHandyPages); i++)
|
---|
5910 | {
|
---|
5911 | LogRel(("PGM: aHandyPages[#%#04x] = {.HCPhysGCPhys=%RHp, .idPage=%#08x, .idSharedPage=%#08x}\n",
|
---|
5912 | i, pVM->pgm.s.aHandyPages[i].HCPhysGCPhys, pVM->pgm.s.aHandyPages[i].idPage,
|
---|
5913 | pVM->pgm.s.aHandyPages[i].idSharedPage));
|
---|
5914 | uint32_t const idPage = pVM->pgm.s.aHandyPages[i].idPage;
|
---|
5915 | if (idPage != NIL_GMM_PAGEID)
|
---|
5916 | {
|
---|
5917 | for (PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3;
|
---|
5918 | pRam;
|
---|
5919 | pRam = pRam->pNextR3)
|
---|
5920 | {
|
---|
5921 | uint32_t const cPages = pRam->cb >> GUEST_PAGE_SHIFT;
|
---|
5922 | for (uint32_t iPage = 0; iPage < cPages; iPage++)
|
---|
5923 | if (PGM_PAGE_GET_PAGEID(&pRam->aPages[iPage]) == idPage)
|
---|
5924 | LogRel(("PGM: Used by %RGp %R[pgmpage] (%s)\n",
|
---|
5925 | pRam->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT), &pRam->aPages[iPage], pRam->pszDesc));
|
---|
5926 | }
|
---|
5927 | }
|
---|
5928 | }
|
---|
5929 |
|
---|
5930 | if (rc == VERR_NO_MEMORY)
|
---|
5931 | {
|
---|
5932 | uint64_t cbHostRamAvail = 0;
|
---|
5933 | int rc2 = RTSystemQueryAvailableRam(&cbHostRamAvail);
|
---|
5934 | if (RT_SUCCESS(rc2))
|
---|
5935 | LogRel(("Host RAM: %RU64MB available\n", cbHostRamAvail / _1M));
|
---|
5936 | else
|
---|
5937 | LogRel(("Cannot determine the amount of available host memory\n"));
|
---|
5938 | }
|
---|
5939 |
|
---|
5940 | /* Set the FFs and adjust rc. */
|
---|
5941 | VM_FF_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES);
|
---|
5942 | VM_FF_SET(pVM, VM_FF_PGM_NO_MEMORY);
|
---|
5943 | if ( rc == VERR_NO_MEMORY
|
---|
5944 | || rc == VERR_NO_PHYS_MEMORY
|
---|
5945 | || rc == VERR_LOCK_FAILED)
|
---|
5946 | rc = VINF_EM_NO_MEMORY;
|
---|
5947 | }
|
---|
5948 |
|
---|
5949 | PGM_UNLOCK(pVM);
|
---|
5950 | return rc;
|
---|
5951 | }
|
---|
5952 |
|
---|
5953 |
|
---|
5954 | /*********************************************************************************************************************************
|
---|
5955 | * Other Stuff *
|
---|
5956 | *********************************************************************************************************************************/
|
---|
5957 |
|
---|
5958 | /**
|
---|
5959 | * Sets the Address Gate 20 state.
|
---|
5960 | *
|
---|
5961 | * @param pVCpu The cross context virtual CPU structure.
|
---|
5962 | * @param fEnable True if the gate should be enabled.
|
---|
5963 | * False if the gate should be disabled.
|
---|
5964 | */
|
---|
5965 | VMMDECL(void) PGMR3PhysSetA20(PVMCPU pVCpu, bool fEnable)
|
---|
5966 | {
|
---|
5967 | LogFlow(("PGMR3PhysSetA20 %d (was %d)\n", fEnable, pVCpu->pgm.s.fA20Enabled));
|
---|
5968 | if (pVCpu->pgm.s.fA20Enabled != fEnable)
|
---|
5969 | {
|
---|
5970 | #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
|
---|
5971 | PCCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
|
---|
5972 | if ( CPUMIsGuestInVmxRootMode(pCtx)
|
---|
5973 | && !fEnable)
|
---|
5974 | {
|
---|
5975 | Log(("Cannot enter A20M mode while in VMX root mode\n"));
|
---|
5976 | return;
|
---|
5977 | }
|
---|
5978 | #endif
|
---|
5979 | pVCpu->pgm.s.fA20Enabled = fEnable;
|
---|
5980 | pVCpu->pgm.s.GCPhysA20Mask = ~((RTGCPHYS)!fEnable << 20);
|
---|
5981 | if (VM_IS_NEM_ENABLED(pVCpu->CTX_SUFF(pVM)))
|
---|
5982 | NEMR3NotifySetA20(pVCpu, fEnable);
|
---|
5983 | #ifdef PGM_WITH_A20
|
---|
5984 | VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
|
---|
5985 | pgmR3RefreshShadowModeAfterA20Change(pVCpu);
|
---|
5986 | HMFlushTlb(pVCpu);
|
---|
5987 | #endif
|
---|
5988 | #if 0 /* PGMGetPage will apply the A20 mask to the GCPhys it returns, so we must invalid both sides of the TLB. */
|
---|
5989 | IEMTlbInvalidateAllPhysical(pVCpu);
|
---|
5990 | #else
|
---|
5991 | IEMTlbInvalidateAll(pVCpu);
|
---|
5992 | #endif
|
---|
5993 | STAM_REL_COUNTER_INC(&pVCpu->pgm.s.cA20Changes);
|
---|
5994 | }
|
---|
5995 | }
|
---|
5996 |
|
---|