VirtualBox

source: vbox/trunk/src/VBox/VMM/PGMSavedState.cpp@ 24061

Last change on this file since 24061 was 23801, checked in by vboxsync, 15 years ago

Main,VMM,Frontends,++: Teminology. Added a bind address for the (target) teleporter.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 103.6 KB
Line 
1/* $Id: PGMSavedState.cpp 23801 2009-10-15 15:00:47Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, The Saved State Part.
4 */
5
6/*
7 * Copyright (C) 2006-2009 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_PGM
27#include <VBox/pgm.h>
28#include <VBox/stam.h>
29#include <VBox/ssm.h>
30#include <VBox/pdm.h>
31#include "PGMInternal.h"
32#include <VBox/vm.h>
33
34#include <VBox/param.h>
35#include <VBox/err.h>
36
37#include <iprt/asm.h>
38#include <iprt/assert.h>
39#include <iprt/crc32.h>
40#include <iprt/mem.h>
41#include <iprt/sha.h>
42#include <iprt/string.h>
43#include <iprt/thread.h>
44
45
46/*******************************************************************************
47* Defined Constants And Macros *
48*******************************************************************************/
49/** Saved state data unit version. */
50#define PGM_SAVED_STATE_VERSION 10
51/** Saved state data unit version for 3.0 (pre teleportation). */
52#define PGM_SAVED_STATE_VERSION_3_0_0 9
53/** Saved state data unit version for 2.2.2 and later. */
54#define PGM_SAVED_STATE_VERSION_2_2_2 8
55/** Saved state data unit version for 2.2.0. */
56#define PGM_SAVED_STATE_VERSION_RR_DESC 7
57/** Saved state data unit version. */
58#define PGM_SAVED_STATE_VERSION_OLD_PHYS_CODE 6
59
60
61/** @name Sparse state record types
62 * @{ */
63/** Zero page. No data. */
64#define PGM_STATE_REC_RAM_ZERO UINT8_C(0x00)
65/** Raw page. */
66#define PGM_STATE_REC_RAM_RAW UINT8_C(0x01)
67/** Raw MMIO2 page. */
68#define PGM_STATE_REC_MMIO2_RAW UINT8_C(0x02)
69/** Zero MMIO2 page. */
70#define PGM_STATE_REC_MMIO2_ZERO UINT8_C(0x03)
71/** Virgin ROM page. Followed by protection (8-bit) and the raw bits. */
72#define PGM_STATE_REC_ROM_VIRGIN UINT8_C(0x04)
73/** Raw shadowed ROM page. The protection (8-bit) preceeds the raw bits. */
74#define PGM_STATE_REC_ROM_SHW_RAW UINT8_C(0x05)
75/** Zero shadowed ROM page. The protection (8-bit) is the only payload. */
76#define PGM_STATE_REC_ROM_SHW_ZERO UINT8_C(0x06)
77/** ROM protection (8-bit). */
78#define PGM_STATE_REC_ROM_PROT UINT8_C(0x07)
79/** The last record type. */
80#define PGM_STATE_REC_LAST PGM_STATE_REC_ROM_PROT
81/** End marker. */
82#define PGM_STATE_REC_END UINT8_C(0xff)
83/** Flag indicating that the data is preceeded by the page address.
84 * For RAW pages this is a RTGCPHYS. For MMIO2 and ROM pages this is a 8-bit
85 * range ID and a 32-bit page index.
86 */
87#define PGM_STATE_REC_FLAG_ADDR UINT8_C(0x80)
88/** @} */
89
90/** The CRC-32 for a zero page. */
91#define PGM_STATE_CRC32_ZERO_PAGE UINT32_C(0xc71c0011)
92/** The CRC-32 for a zero half page. */
93#define PGM_STATE_CRC32_ZERO_HALF_PAGE UINT32_C(0xf1e8ba9e)
94
95
96/*******************************************************************************
97* Structures and Typedefs *
98*******************************************************************************/
99/** For loading old saved states. (pre-smp) */
100typedef struct
101{
102 /** If set no conflict checks are required. (boolean) */
103 bool fMappingsFixed;
104 /** Size of fixed mapping */
105 uint32_t cbMappingFixed;
106 /** Base address (GC) of fixed mapping */
107 RTGCPTR GCPtrMappingFixed;
108 /** A20 gate mask.
109 * Our current approach to A20 emulation is to let REM do it and don't bother
110 * anywhere else. The interesting Guests will be operating with it enabled anyway.
111 * But whould need arrise, we'll subject physical addresses to this mask. */
112 RTGCPHYS GCPhysA20Mask;
113 /** A20 gate state - boolean! */
114 bool fA20Enabled;
115 /** The guest paging mode. */
116 PGMMODE enmGuestMode;
117} PGMOLD;
118
119
120/*******************************************************************************
121* Global Variables *
122*******************************************************************************/
123/** PGM fields to save/load. */
124static const SSMFIELD s_aPGMFields[] =
125{
126 SSMFIELD_ENTRY( PGM, fMappingsFixed),
127 SSMFIELD_ENTRY_GCPTR( PGM, GCPtrMappingFixed),
128 SSMFIELD_ENTRY( PGM, cbMappingFixed),
129 SSMFIELD_ENTRY_TERM()
130};
131
132static const SSMFIELD s_aPGMCpuFields[] =
133{
134 SSMFIELD_ENTRY( PGMCPU, fA20Enabled),
135 SSMFIELD_ENTRY_GCPHYS( PGMCPU, GCPhysA20Mask),
136 SSMFIELD_ENTRY( PGMCPU, enmGuestMode),
137 SSMFIELD_ENTRY_TERM()
138};
139
140static const SSMFIELD s_aPGMFields_Old[] =
141{
142 SSMFIELD_ENTRY( PGMOLD, fMappingsFixed),
143 SSMFIELD_ENTRY_GCPTR( PGMOLD, GCPtrMappingFixed),
144 SSMFIELD_ENTRY( PGMOLD, cbMappingFixed),
145 SSMFIELD_ENTRY( PGMOLD, fA20Enabled),
146 SSMFIELD_ENTRY_GCPHYS( PGMOLD, GCPhysA20Mask),
147 SSMFIELD_ENTRY( PGMOLD, enmGuestMode),
148 SSMFIELD_ENTRY_TERM()
149};
150
151
152/**
153 * Find the ROM tracking structure for the given page.
154 *
155 * @returns Pointer to the ROM page structure. NULL if the caller didn't check
156 * that it's a ROM page.
157 * @param pVM The VM handle.
158 * @param GCPhys The address of the ROM page.
159 */
160static PPGMROMPAGE pgmR3GetRomPage(PVM pVM, RTGCPHYS GCPhys) /** @todo change this to take a hint. */
161{
162 for (PPGMROMRANGE pRomRange = pVM->pgm.s.CTX_SUFF(pRomRanges);
163 pRomRange;
164 pRomRange = pRomRange->CTX_SUFF(pNext))
165 {
166 RTGCPHYS off = GCPhys - pRomRange->GCPhys;
167 if (GCPhys - pRomRange->GCPhys < pRomRange->cb)
168 return &pRomRange->aPages[off >> PAGE_SHIFT];
169 }
170 return NULL;
171}
172
173
174/**
175 * Prepares the ROM pages for a live save.
176 *
177 * @returns VBox status code.
178 * @param pVM The VM handle.
179 */
180static int pgmR3PrepRomPages(PVM pVM)
181{
182 /*
183 * Initialize the live save tracking in the ROM page descriptors.
184 */
185 pgmLock(pVM);
186 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
187 {
188 PPGMRAMRANGE pRamHint = NULL;;
189 uint32_t const cPages = pRom->cb >> PAGE_SHIFT;
190
191 for (uint32_t iPage = 0; iPage < cPages; iPage++)
192 {
193 pRom->aPages[iPage].LiveSave.u8Prot = (uint8_t)PGMROMPROT_INVALID;
194 pRom->aPages[iPage].LiveSave.fWrittenTo = false;
195 pRom->aPages[iPage].LiveSave.fDirty = true;
196 pRom->aPages[iPage].LiveSave.fDirtiedRecently = true;
197 if (!(pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED))
198 {
199 if (PGMROMPROT_IS_ROM(pRom->aPages[iPage].enmProt))
200 pRom->aPages[iPage].LiveSave.fWrittenTo = !PGM_PAGE_IS_ZERO(&pRom->aPages[iPage].Shadow);
201 else
202 {
203 RTGCPHYS GCPhys = pRom->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT);
204 PPGMPAGE pPage;
205 int rc = pgmPhysGetPageWithHintEx(&pVM->pgm.s, GCPhys, &pPage, &pRamHint);
206 AssertLogRelMsgRC(rc, ("%Rrc GCPhys=%RGp\n", rc, GCPhys));
207 if (RT_SUCCESS(rc))
208 pRom->aPages[iPage].LiveSave.fWrittenTo = !PGM_PAGE_IS_ZERO(pPage);
209 else
210 pRom->aPages[iPage].LiveSave.fWrittenTo = !PGM_PAGE_IS_ZERO(&pRom->aPages[iPage].Shadow);
211 }
212 }
213 }
214
215 pVM->pgm.s.LiveSave.Rom.cDirtyPages += cPages;
216 if (pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)
217 pVM->pgm.s.LiveSave.Rom.cDirtyPages += cPages;
218 }
219 pgmUnlock(pVM);
220
221 return VINF_SUCCESS;
222}
223
224
225/**
226 * Assigns IDs to the ROM ranges and saves them.
227 *
228 * @returns VBox status code.
229 * @param pVM The VM handle.
230 * @param pSSM Saved state handle.
231 */
232static int pgmR3SaveRomRanges(PVM pVM, PSSMHANDLE pSSM)
233{
234 pgmLock(pVM);
235 uint8_t id = 1;
236 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3, id++)
237 {
238 pRom->idSavedState = id;
239 SSMR3PutU8(pSSM, id);
240 SSMR3PutStrZ(pSSM, ""); /* device name */
241 SSMR3PutU32(pSSM, 0); /* device instance */
242 SSMR3PutU8(pSSM, 0); /* region */
243 SSMR3PutStrZ(pSSM, pRom->pszDesc);
244 SSMR3PutGCPhys(pSSM, pRom->GCPhys);
245 int rc = SSMR3PutGCPhys(pSSM, pRom->cb);
246 if (RT_FAILURE(rc))
247 break;
248 }
249 pgmUnlock(pVM);
250 return SSMR3PutU8(pSSM, UINT8_MAX);
251}
252
253
254/**
255 * Loads the ROM range ID assignments.
256 *
257 * @returns VBox status code.
258 *
259 * @param pVM The VM handle.
260 * @param pSSM The saved state handle.
261 */
262static int pgmR3LoadRomRanges(PVM pVM, PSSMHANDLE pSSM)
263{
264 Assert(PGMIsLockOwner(pVM));
265
266 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
267 pRom->idSavedState = UINT8_MAX;
268
269 for (;;)
270 {
271 /*
272 * Read the data.
273 */
274 uint8_t id;
275 int rc = SSMR3GetU8(pSSM, &id);
276 if (RT_FAILURE(rc))
277 return rc;
278 if (id == UINT8_MAX)
279 {
280 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
281 AssertLogRelMsg(pRom->idSavedState != UINT8_MAX, ("%s\n", pRom->pszDesc));
282 return VINF_SUCCESS; /* the end */
283 }
284 AssertLogRelReturn(id != 0, VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
285
286 char szDevName[RT_SIZEOFMEMB(PDMDEVREG, szDeviceName)];
287 rc = SSMR3GetStrZ(pSSM, szDevName, sizeof(szDevName));
288 AssertLogRelRCReturn(rc, rc);
289
290 uint32_t uInstance;
291 SSMR3GetU32(pSSM, &uInstance);
292 uint8_t iRegion;
293 SSMR3GetU8(pSSM, &iRegion);
294
295 char szDesc[64];
296 rc = SSMR3GetStrZ(pSSM, szDesc, sizeof(szDesc));
297 AssertLogRelRCReturn(rc, rc);
298
299 RTGCPHYS GCPhys;
300 SSMR3GetGCPhys(pSSM, &GCPhys);
301 RTGCPHYS cb;
302 rc = SSMR3GetGCPhys(pSSM, &cb);
303 if (RT_FAILURE(rc))
304 return rc;
305 AssertLogRelMsgReturn(!(GCPhys & PAGE_OFFSET_MASK), ("GCPhys=%RGp %s\n", GCPhys, szDesc), VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
306 AssertLogRelMsgReturn(!(cb & PAGE_OFFSET_MASK), ("cb=%RGp %s\n", cb, szDesc), VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
307
308 /*
309 * Locate a matching ROM range.
310 */
311 AssertLogRelMsgReturn( uInstance == 0
312 && iRegion == 0
313 && szDevName[0] == '\0',
314 ("GCPhys=%RGp %s\n", GCPhys, szDesc),
315 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
316 PPGMROMRANGE pRom;
317 for (pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
318 {
319 if ( pRom->idSavedState == UINT8_MAX
320 && !strcmp(pRom->pszDesc, szDesc))
321 {
322 pRom->idSavedState = id;
323 break;
324 }
325 }
326 AssertLogRelMsgReturn(pRom, ("GCPhys=%RGp %s\n", GCPhys, szDesc), VERR_SSM_LOAD_CONFIG_MISMATCH);
327 } /* forever */
328}
329
330
331/**
332 * Scan ROM pages.
333 *
334 * @param pVM The VM handle.
335 */
336static void pgmR3ScanRomPages(PVM pVM)
337{
338 /*
339 * The shadow ROMs.
340 */
341 pgmLock(pVM);
342 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
343 {
344 if (pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)
345 {
346 uint32_t const cPages = pRom->cb >> PAGE_SHIFT;
347 for (uint32_t iPage = 0; iPage < cPages; iPage++)
348 {
349 PPGMROMPAGE pRomPage = &pRom->aPages[iPage];
350 if (pRomPage->LiveSave.fWrittenTo)
351 {
352 pRomPage->LiveSave.fWrittenTo = false;
353 if (!pRomPage->LiveSave.fDirty)
354 {
355 pRomPage->LiveSave.fDirty = true;
356 pVM->pgm.s.LiveSave.Rom.cReadyPages--;
357 pVM->pgm.s.LiveSave.Rom.cDirtyPages++;
358 }
359 pRomPage->LiveSave.fDirtiedRecently = true;
360 }
361 else
362 pRomPage->LiveSave.fDirtiedRecently = false;
363 }
364 }
365 }
366 pgmUnlock(pVM);
367}
368
369
370/**
371 * Takes care of the virgin ROM pages in the first pass.
372 *
373 * This is an attempt at simplifying the handling of ROM pages a little bit.
374 * This ASSUMES that no new ROM ranges will be added and that they won't be
375 * relinked in any way.
376 *
377 * @param pVM The VM handle.
378 * @param pSSM The SSM handle.
379 * @param fLiveSave Whether we're in a live save or not.
380 */
381static int pgmR3SaveRomVirginPages(PVM pVM, PSSMHANDLE pSSM, bool fLiveSave)
382{
383 pgmLock(pVM);
384 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
385 {
386 uint32_t const cPages = pRom->cb >> PAGE_SHIFT;
387 for (uint32_t iPage = 0; iPage < cPages; iPage++)
388 {
389 RTGCPHYS GCPhys = pRom->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT);
390 PGMROMPROT enmProt = pRom->aPages[iPage].enmProt;
391
392 /* Get the virgin page descriptor. */
393 PPGMPAGE pPage;
394 if (PGMROMPROT_IS_ROM(enmProt))
395 pPage = pgmPhysGetPage(&pVM->pgm.s, GCPhys);
396 else
397 pPage = &pRom->aPages[iPage].Virgin;
398
399 /* Get the page bits. (Cannot use pgmPhysGCPhys2CCPtrInternalReadOnly here!) */
400 int rc = VINF_SUCCESS;
401 char abPage[PAGE_SIZE];
402 if (!PGM_PAGE_IS_ZERO(pPage))
403 {
404 void const *pvPage;
405 rc = pgmPhysPageMapReadOnly(pVM, pPage, GCPhys, &pvPage);
406 if (RT_SUCCESS(rc))
407 memcpy(abPage, pvPage, PAGE_SIZE);
408 }
409 else
410 ASMMemZeroPage(abPage);
411 pgmUnlock(pVM);
412 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc GCPhys=%RGp\n", rc, GCPhys), rc);
413
414 /* Save it. */
415 if (iPage > 0)
416 SSMR3PutU8(pSSM, PGM_STATE_REC_ROM_VIRGIN);
417 else
418 {
419 SSMR3PutU8(pSSM, PGM_STATE_REC_ROM_VIRGIN | PGM_STATE_REC_FLAG_ADDR);
420 SSMR3PutU8(pSSM, pRom->idSavedState);
421 SSMR3PutU32(pSSM, iPage);
422 }
423 SSMR3PutU8(pSSM, (uint8_t)enmProt);
424 rc = SSMR3PutMem(pSSM, abPage, PAGE_SIZE);
425 if (RT_FAILURE(rc))
426 return rc;
427
428 /* Update state. */
429 pgmLock(pVM);
430 pRom->aPages[iPage].LiveSave.u8Prot = (uint8_t)enmProt;
431 if (fLiveSave)
432 {
433 pVM->pgm.s.LiveSave.Rom.cDirtyPages--;
434 pVM->pgm.s.LiveSave.Rom.cReadyPages++;
435 }
436 }
437 }
438 pgmUnlock(pVM);
439 return VINF_SUCCESS;
440}
441
442
443/**
444 * Saves dirty pages in the shadowed ROM ranges.
445 *
446 * Used by pgmR3LiveExecPart2 and pgmR3SaveExecMemory.
447 *
448 * @returns VBox status code.
449 * @param pVM The VM handle.
450 * @param pSSM The SSM handle.
451 * @param fLiveSave Whether it's a live save or not.
452 * @param fFinalPass Whether this is the final pass or not.
453 */
454static int pgmR3SaveShadowedRomPages(PVM pVM, PSSMHANDLE pSSM, bool fLiveSave, bool fFinalPass)
455{
456 /*
457 * The Shadowed ROMs.
458 *
459 * ASSUMES that the ROM ranges are fixed.
460 * ASSUMES that all the ROM ranges are mapped.
461 */
462 pgmLock(pVM);
463 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
464 {
465 if (pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)
466 {
467 uint32_t const cPages = pRom->cb >> PAGE_SHIFT;
468 uint32_t iPrevPage = cPages;
469 for (uint32_t iPage = 0; iPage < cPages; iPage++)
470 {
471 PPGMROMPAGE pRomPage = &pRom->aPages[iPage];
472 if ( !fLiveSave
473 || ( pRomPage->LiveSave.fDirty
474 && ( ( !pRomPage->LiveSave.fDirtiedRecently
475 && !pRomPage->LiveSave.fWrittenTo)
476 || fFinalPass
477 )
478 )
479 )
480 {
481 uint8_t abPage[PAGE_SIZE];
482 PGMROMPROT enmProt = pRomPage->enmProt;
483 RTGCPHYS GCPhys = pRom->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT);
484 PPGMPAGE pPage = PGMROMPROT_IS_ROM(enmProt) ? &pRomPage->Shadow : pgmPhysGetPage(&pVM->pgm.s, GCPhys);
485 bool fZero = PGM_PAGE_IS_ZERO(pPage);
486 int rc = VINF_SUCCESS;
487 if (!fZero)
488 {
489 void const *pvPage;
490 rc = pgmPhysPageMapReadOnly(pVM, pPage, GCPhys, &pvPage);
491 if (RT_SUCCESS(rc))
492 memcpy(abPage, pvPage, PAGE_SIZE);
493 }
494 if (fLiveSave && RT_SUCCESS(rc))
495 {
496 pRomPage->LiveSave.u8Prot = (uint8_t)enmProt;
497 pRomPage->LiveSave.fDirty = false;
498 pVM->pgm.s.LiveSave.Rom.cReadyPages++;
499 pVM->pgm.s.LiveSave.Rom.cDirtyPages--;
500 }
501 pgmUnlock(pVM);
502 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc GCPhys=%RGp\n", rc, GCPhys), rc);
503
504 if (iPage - 1U == iPrevPage && iPage > 0)
505 SSMR3PutU8(pSSM, (fZero ? PGM_STATE_REC_ROM_SHW_ZERO : PGM_STATE_REC_ROM_SHW_RAW));
506 else
507 {
508 SSMR3PutU8(pSSM, (fZero ? PGM_STATE_REC_ROM_SHW_ZERO : PGM_STATE_REC_ROM_SHW_RAW) | PGM_STATE_REC_FLAG_ADDR);
509 SSMR3PutU8(pSSM, pRom->idSavedState);
510 SSMR3PutU32(pSSM, iPage);
511 }
512 rc = SSMR3PutU8(pSSM, (uint8_t)enmProt);
513 if (!fZero)
514 rc = SSMR3PutMem(pSSM, abPage, PAGE_SIZE);
515 if (RT_FAILURE(rc))
516 return rc;
517
518 pgmLock(pVM);
519 iPrevPage = iPage;
520 }
521 /*
522 * In the final pass, make sure the protection is in sync.
523 */
524 else if ( fFinalPass
525 && pRomPage->LiveSave.u8Prot != pRomPage->enmProt)
526 {
527 PGMROMPROT enmProt = pRomPage->enmProt;
528 pRomPage->LiveSave.u8Prot = (uint8_t)enmProt;
529 pgmUnlock(pVM);
530
531 if (iPage - 1U == iPrevPage && iPage > 0)
532 SSMR3PutU8(pSSM, PGM_STATE_REC_ROM_PROT);
533 else
534 {
535 SSMR3PutU8(pSSM, PGM_STATE_REC_ROM_PROT | PGM_STATE_REC_FLAG_ADDR);
536 SSMR3PutU8(pSSM, pRom->idSavedState);
537 SSMR3PutU32(pSSM, iPage);
538 }
539 int rc = SSMR3PutU8(pSSM, (uint8_t)enmProt);
540 if (RT_FAILURE(rc))
541 return rc;
542
543 pgmLock(pVM);
544 iPrevPage = iPage;
545 }
546 }
547 }
548 }
549 pgmUnlock(pVM);
550 return VINF_SUCCESS;
551}
552
553
554/**
555 * Cleans up ROM pages after a live save.
556 *
557 * @param pVM The VM handle.
558 */
559static void pgmR3DoneRomPages(PVM pVM)
560{
561 NOREF(pVM);
562}
563
564
565/**
566 * Prepares the MMIO2 pages for a live save.
567 *
568 * @returns VBox status code.
569 * @param pVM The VM handle.
570 */
571static int pgmR3PrepMmio2Pages(PVM pVM)
572{
573 /*
574 * Initialize the live save tracking in the MMIO2 ranges.
575 * ASSUME nothing changes here.
576 */
577 pgmLock(pVM);
578 for (PPGMMMIO2RANGE pMmio2 = pVM->pgm.s.pMmio2RangesR3; pMmio2; pMmio2 = pMmio2->pNextR3)
579 {
580 uint32_t const cPages = pMmio2->RamRange.cb >> PAGE_SHIFT;
581 pgmUnlock(pVM);
582
583 PPGMLIVESAVEMMIO2PAGE paLSPages = (PPGMLIVESAVEMMIO2PAGE)MMR3HeapAllocZ(pVM, MM_TAG_PGM, sizeof(PGMLIVESAVEMMIO2PAGE) * cPages);
584 if (!paLSPages)
585 return VERR_NO_MEMORY;
586 for (uint32_t iPage = 0; iPage < cPages; iPage++)
587 {
588 /* Initialize it as a dirty zero page. */
589 paLSPages[iPage].fDirty = true;
590 paLSPages[iPage].cUnchangedScans = 0;
591 paLSPages[iPage].fZero = true;
592 paLSPages[iPage].u32CrcH1 = PGM_STATE_CRC32_ZERO_HALF_PAGE;
593 paLSPages[iPage].u32CrcH2 = PGM_STATE_CRC32_ZERO_HALF_PAGE;
594 }
595
596 pgmLock(pVM);
597 pMmio2->paLSPages = paLSPages;
598 pVM->pgm.s.LiveSave.Mmio2.cDirtyPages += cPages;
599 }
600 pgmUnlock(pVM);
601 return VINF_SUCCESS;
602}
603
604
605/**
606 * Assigns IDs to the MMIO2 ranges and saves them.
607 *
608 * @returns VBox status code.
609 * @param pVM The VM handle.
610 * @param pSSM Saved state handle.
611 */
612static int pgmR3SaveMmio2Ranges(PVM pVM, PSSMHANDLE pSSM)
613{
614 pgmLock(pVM);
615 uint8_t id = 1;
616 for (PPGMMMIO2RANGE pMmio2 = pVM->pgm.s.pMmio2RangesR3; pMmio2; pMmio2 = pMmio2->pNextR3, id++)
617 {
618 pMmio2->idSavedState = id;
619 SSMR3PutU8(pSSM, id);
620 SSMR3PutStrZ(pSSM, pMmio2->pDevInsR3->pDevReg->szDeviceName);
621 SSMR3PutU32(pSSM, pMmio2->pDevInsR3->iInstance);
622 SSMR3PutU8(pSSM, pMmio2->iRegion);
623 SSMR3PutStrZ(pSSM, pMmio2->RamRange.pszDesc);
624 int rc = SSMR3PutGCPhys(pSSM, pMmio2->RamRange.cb);
625 if (RT_FAILURE(rc))
626 break;
627 }
628 pgmUnlock(pVM);
629 return SSMR3PutU8(pSSM, UINT8_MAX);
630}
631
632
633/**
634 * Loads the MMIO2 range ID assignments.
635 *
636 * @returns VBox status code.
637 *
638 * @param pVM The VM handle.
639 * @param pSSM The saved state handle.
640 */
641static int pgmR3LoadMmio2Ranges(PVM pVM, PSSMHANDLE pSSM)
642{
643 Assert(PGMIsLockOwner(pVM));
644
645 for (PPGMMMIO2RANGE pMmio2 = pVM->pgm.s.pMmio2RangesR3; pMmio2; pMmio2 = pMmio2->pNextR3)
646 pMmio2->idSavedState = UINT8_MAX;
647
648 for (;;)
649 {
650 /*
651 * Read the data.
652 */
653 uint8_t id;
654 int rc = SSMR3GetU8(pSSM, &id);
655 if (RT_FAILURE(rc))
656 return rc;
657 if (id == UINT8_MAX)
658 {
659 for (PPGMMMIO2RANGE pMmio2 = pVM->pgm.s.pMmio2RangesR3; pMmio2; pMmio2 = pMmio2->pNextR3)
660 AssertLogRelMsg(pMmio2->idSavedState != UINT8_MAX, ("%s\n", pMmio2->RamRange.pszDesc));
661 return VINF_SUCCESS; /* the end */
662 }
663 AssertLogRelReturn(id != 0, VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
664
665 char szDevName[RT_SIZEOFMEMB(PDMDEVREG, szDeviceName)];
666 rc = SSMR3GetStrZ(pSSM, szDevName, sizeof(szDevName));
667 AssertLogRelRCReturn(rc, rc);
668
669 uint32_t uInstance;
670 SSMR3GetU32(pSSM, &uInstance);
671 uint8_t iRegion;
672 SSMR3GetU8(pSSM, &iRegion);
673
674 char szDesc[64];
675 rc = SSMR3GetStrZ(pSSM, szDesc, sizeof(szDesc));
676 AssertLogRelRCReturn(rc, rc);
677
678 RTGCPHYS cb;
679 rc = SSMR3GetGCPhys(pSSM, &cb);
680 AssertLogRelMsgReturn(!(cb & PAGE_OFFSET_MASK), ("cb=%RGp %s\n", cb, szDesc), VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
681
682 /*
683 * Locate a matching MMIO2 range.
684 */
685 PPGMMMIO2RANGE pMmio2;
686 for (pMmio2 = pVM->pgm.s.pMmio2RangesR3; pMmio2; pMmio2 = pMmio2->pNextR3)
687 {
688 if ( pMmio2->idSavedState == UINT8_MAX
689 && pMmio2->iRegion == iRegion
690 && pMmio2->pDevInsR3->iInstance == uInstance
691 && !strcmp(pMmio2->pDevInsR3->pDevReg->szDeviceName, szDevName))
692 {
693 pMmio2->idSavedState = id;
694 break;
695 }
696 }
697 AssertLogRelMsgReturn(pMmio2, ("%s/%u/%u: %s\n", szDevName, uInstance, iRegion, szDesc), VERR_SSM_LOAD_CONFIG_MISMATCH);
698 } /* forever */
699}
700
701
702/**
703 * Scans one MMIO2 page.
704 *
705 * @returns True if changed, false if unchanged.
706 *
707 * @param pVM The VM handle
708 * @param pbPage The page bits.
709 * @param pLSPage The live save tracking structure for the page.
710 *
711 */
712DECLINLINE(bool) pgmR3ScanMmio2Page(PVM pVM, uint8_t const *pbPage, PPGMLIVESAVEMMIO2PAGE pLSPage)
713{
714 /*
715 * Special handling of zero pages.
716 */
717 bool const fZero = pLSPage->fZero;
718 if (fZero)
719 {
720 if (ASMMemIsZeroPage(pbPage))
721 {
722 /* Not modified. */
723 if (pLSPage->fDirty)
724 pLSPage->cUnchangedScans++;
725 return false;
726 }
727
728 pLSPage->fZero = false;
729 pLSPage->u32CrcH1 = RTCrc32(pbPage, PAGE_SIZE / 2);
730 }
731 else
732 {
733 /*
734 * CRC the first half, if it doesn't match the page is dirty and
735 * we won't check the 2nd half (we'll do that next time).
736 */
737 uint32_t u32CrcH1 = RTCrc32(pbPage, PAGE_SIZE / 2);
738 if (u32CrcH1 == pLSPage->u32CrcH1)
739 {
740 uint32_t u32CrcH2 = RTCrc32(pbPage + PAGE_SIZE / 2, PAGE_SIZE / 2);
741 if (u32CrcH2 == pLSPage->u32CrcH2)
742 {
743 /* Probably not modified. */
744 if (pLSPage->fDirty)
745 pLSPage->cUnchangedScans++;
746 return false;
747 }
748
749 pLSPage->u32CrcH2 = u32CrcH2;
750 }
751 else
752 {
753 pLSPage->u32CrcH1 = u32CrcH1;
754 if ( u32CrcH1 == PGM_STATE_CRC32_ZERO_HALF_PAGE
755 && ASMMemIsZeroPage(pbPage))
756 {
757 pLSPage->u32CrcH2 = PGM_STATE_CRC32_ZERO_HALF_PAGE;
758 pLSPage->fZero = true;
759 }
760 }
761 }
762
763 /* dirty page path */
764 pLSPage->cUnchangedScans = 0;
765 if (!pLSPage->fDirty)
766 {
767 pLSPage->fDirty = true;
768 pVM->pgm.s.LiveSave.Mmio2.cReadyPages--;
769 pVM->pgm.s.LiveSave.Mmio2.cDirtyPages++;
770 if (fZero)
771 pVM->pgm.s.LiveSave.Mmio2.cZeroPages--;
772 }
773 return true;
774}
775
776
777/**
778 * Scan for MMIO2 page modifications.
779 *
780 * @param pVM The VM handle.
781 * @param uPass The pass number.
782 */
783static void pgmR3ScanMmio2Pages(PVM pVM, uint32_t uPass)
784{
785 /*
786 * Since this is a bit expensive we lower the scan rate after a little while.
787 */
788 if ( ( (uPass & 3) != 0
789 && uPass > 10)
790 || uPass == SSM_PASS_FINAL)
791 return;
792
793 pgmLock(pVM); /* paranoia */
794 for (PPGMMMIO2RANGE pMmio2 = pVM->pgm.s.pMmio2RangesR3; pMmio2; pMmio2 = pMmio2->pNextR3)
795 {
796 PPGMLIVESAVEMMIO2PAGE paLSPages = pMmio2->paLSPages;
797 uint8_t const *pbPage = (uint8_t const *)pMmio2->RamRange.pvR3;
798 uint32_t cPages = pMmio2->RamRange.cb >> PAGE_SHIFT;
799 pgmUnlock(pVM);
800
801 for (uint32_t iPage = 0; iPage < cPages; iPage++, pbPage += PAGE_SIZE)
802 {
803 uint8_t const *pbPage = (uint8_t const *)pMmio2->pvR3 + iPage * PAGE_SIZE;
804 pgmR3ScanMmio2Page(pVM,pbPage, &paLSPages[iPage]);
805 }
806
807 pgmLock(pVM);
808 }
809 pgmUnlock(pVM);
810
811}
812
813
814/**
815 * Save quiescent MMIO2 pages.
816 *
817 * @returns VBox status code.
818 * @param pVM The VM handle.
819 * @param pSSM The SSM handle.
820 * @param fLiveSave Whether it's a live save or not.
821 * @param uPass The pass number.
822 */
823static int pgmR3SaveMmio2Pages(PVM pVM, PSSMHANDLE pSSM, bool fLiveSave, uint32_t uPass)
824{
825 /** @todo implement live saving of MMIO2 pages. (Need some way of telling the
826 * device that we wish to know about changes.) */
827
828 int rc = VINF_SUCCESS;
829 if (uPass == SSM_PASS_FINAL)
830 {
831 /*
832 * The mop up round.
833 */
834 pgmLock(pVM);
835 for (PPGMMMIO2RANGE pMmio2 = pVM->pgm.s.pMmio2RangesR3;
836 pMmio2 && RT_SUCCESS(rc);
837 pMmio2 = pMmio2->pNextR3)
838 {
839 PPGMLIVESAVEMMIO2PAGE paLSPages = pMmio2->paLSPages;
840 uint8_t const *pbPage = (uint8_t const *)pMmio2->RamRange.pvR3;
841 uint32_t cPages = pMmio2->RamRange.cb >> PAGE_SHIFT;
842 uint32_t iPageLast = cPages;
843 for (uint32_t iPage = 0; iPage < cPages; iPage++, pbPage += PAGE_SIZE)
844 {
845 uint8_t u8Type;
846 if (!fLiveSave)
847 u8Type = ASMMemIsZeroPage(pbPage) ? PGM_STATE_REC_MMIO2_ZERO : PGM_STATE_REC_MMIO2_RAW;
848 else
849 {
850 /* Try figure if it's a clean page, compare the SHA-1 to be really sure. */
851 if ( !paLSPages[iPage].fDirty
852 && !pgmR3ScanMmio2Page(pVM, pbPage, &paLSPages[iPage]))
853 {
854 if (paLSPages[iPage].fZero)
855 continue;
856
857 uint8_t abSha1Hash[RTSHA1_HASH_SIZE];
858 RTSha1(pbPage, PAGE_SIZE, abSha1Hash);
859 if (!memcmp(abSha1Hash, paLSPages[iPage].abSha1Saved, sizeof(abSha1Hash)))
860 continue;
861 }
862 u8Type = paLSPages[iPage].fZero ? PGM_STATE_REC_MMIO2_ZERO : PGM_STATE_REC_MMIO2_RAW;
863 }
864
865 if (iPage != 0 && iPage == iPageLast + 1)
866 rc = SSMR3PutU8(pSSM, u8Type);
867 else
868 {
869 SSMR3PutU8(pSSM, u8Type | PGM_STATE_REC_FLAG_ADDR);
870 SSMR3PutU8(pSSM, pMmio2->idSavedState);
871 rc = SSMR3PutU32(pSSM, iPage);
872 }
873 if (u8Type == PGM_STATE_REC_MMIO2_RAW)
874 rc = SSMR3PutMem(pSSM, pbPage, PAGE_SIZE);
875 if (RT_FAILURE(rc))
876 break;
877 iPageLast = iPage;
878 }
879 }
880 pgmUnlock(pVM);
881 }
882 /*
883 * Reduce the rate after a little while since the current MMIO2 approach is
884 * a bit expensive.
885 * We position it two passes after the scan pass to avoid saving busy pages.
886 */
887 else if ( uPass <= 10
888 || (uPass & 3) == 2)
889 {
890 pgmLock(pVM);
891 for (PPGMMMIO2RANGE pMmio2 = pVM->pgm.s.pMmio2RangesR3;
892 pMmio2 && RT_SUCCESS(rc);
893 pMmio2 = pMmio2->pNextR3)
894 {
895 PPGMLIVESAVEMMIO2PAGE paLSPages = pMmio2->paLSPages;
896 uint8_t const *pbPage = (uint8_t const *)pMmio2->RamRange.pvR3;
897 uint32_t cPages = pMmio2->RamRange.cb >> PAGE_SHIFT;
898 uint32_t iPageLast = cPages;
899 pgmUnlock(pVM);
900
901 for (uint32_t iPage = 0; iPage < cPages; iPage++, pbPage += PAGE_SIZE)
902 {
903 /* Skip clean pages and pages which hasn't quiesced. */
904 if (!paLSPages[iPage].fDirty)
905 continue;
906 if (paLSPages[iPage].cUnchangedScans < 3)
907 continue;
908 if (pgmR3ScanMmio2Page(pVM, pbPage, &paLSPages[iPage]))
909 continue;
910
911 /* Save it. */
912 bool const fZero = paLSPages[iPage].fZero;
913 uint8_t abPage[PAGE_SIZE];
914 if (!fZero)
915 {
916 memcpy(abPage, pbPage, PAGE_SIZE);
917 RTSha1(abPage, PAGE_SIZE, paLSPages[iPage].abSha1Saved);
918 }
919
920 uint8_t u8Type = paLSPages[iPage].fZero ? PGM_STATE_REC_MMIO2_ZERO : PGM_STATE_REC_MMIO2_RAW;
921 if (iPage != 0 && iPage == iPageLast + 1)
922 rc = SSMR3PutU8(pSSM, u8Type);
923 else
924 {
925 SSMR3PutU8(pSSM, u8Type | PGM_STATE_REC_FLAG_ADDR);
926 SSMR3PutU8(pSSM, pMmio2->idSavedState);
927 rc = SSMR3PutU32(pSSM, iPage);
928 }
929 if (u8Type == PGM_STATE_REC_MMIO2_RAW)
930 rc = SSMR3PutMem(pSSM, abPage, PAGE_SIZE);
931 if (RT_FAILURE(rc))
932 break;
933
934 /* Housekeeping. */
935 paLSPages[iPage].fDirty = false;
936 pVM->pgm.s.LiveSave.Mmio2.cDirtyPages--;
937 pVM->pgm.s.LiveSave.Mmio2.cReadyPages++;
938 if (u8Type == PGM_STATE_REC_MMIO2_ZERO)
939 pVM->pgm.s.LiveSave.Mmio2.cZeroPages++;
940 iPageLast = iPage;
941 }
942
943 pgmLock(pVM);
944 }
945 pgmUnlock(pVM);
946 }
947
948 return rc;
949}
950
951
952/**
953 * Cleans up MMIO2 pages after a live save.
954 *
955 * @param pVM The VM handle.
956 */
957static void pgmR3DoneMmio2Pages(PVM pVM)
958{
959 /*
960 * Free the tracking structures for the MMIO2 pages.
961 * We do the freeing outside the lock in case the VM is running.
962 */
963 pgmLock(pVM);
964 for (PPGMMMIO2RANGE pMmio2 = pVM->pgm.s.pMmio2RangesR3; pMmio2; pMmio2 = pMmio2->pNextR3)
965 {
966 void *pvMmio2ToFree = pMmio2->paLSPages;
967 if (pvMmio2ToFree)
968 {
969 pMmio2->paLSPages = NULL;
970 pgmUnlock(pVM);
971 MMR3HeapFree(pvMmio2ToFree);
972 pgmLock(pVM);
973 }
974 }
975 pgmUnlock(pVM);
976}
977
978
979/**
980 * Prepares the RAM pages for a live save.
981 *
982 * @returns VBox status code.
983 * @param pVM The VM handle.
984 */
985static int pgmR3PrepRamPages(PVM pVM)
986{
987
988 /*
989 * Try allocating tracking structures for the ram ranges.
990 *
991 * To avoid lock contention, we leave the lock every time we're allocating
992 * a new array. This means we'll have to ditch the allocation and start
993 * all over again if the RAM range list changes in-between.
994 *
995 * Note! pgmR3SaveDone will always be called and it is therefore responsible
996 * for cleaning up.
997 */
998 PPGMRAMRANGE pCur;
999 pgmLock(pVM);
1000 do
1001 {
1002 for (pCur = pVM->pgm.s.pRamRangesR3; pCur; pCur = pCur->pNextR3)
1003 {
1004 if ( !pCur->paLSPages
1005 && !PGM_RAM_RANGE_IS_AD_HOC(pCur))
1006 {
1007 uint32_t const idRamRangesGen = pVM->pgm.s.idRamRangesGen;
1008 uint32_t const cPages = pCur->cb >> PAGE_SHIFT;
1009 pgmUnlock(pVM);
1010 PPGMLIVESAVERAMPAGE paLSPages = (PPGMLIVESAVERAMPAGE)MMR3HeapAllocZ(pVM, MM_TAG_PGM, cPages * sizeof(PGMLIVESAVERAMPAGE));
1011 if (!paLSPages)
1012 return VERR_NO_MEMORY;
1013 pgmLock(pVM);
1014 if (pVM->pgm.s.idRamRangesGen != idRamRangesGen)
1015 {
1016 pgmUnlock(pVM);
1017 MMR3HeapFree(paLSPages);
1018 pgmLock(pVM);
1019 break; /* try again */
1020 }
1021 pCur->paLSPages = paLSPages;
1022
1023 /*
1024 * Initialize the array.
1025 */
1026 uint32_t iPage = cPages;
1027 while (iPage-- > 0)
1028 {
1029 /** @todo yield critsect! (after moving this away from EMT0) */
1030 PCPGMPAGE pPage = &pCur->aPages[iPage];
1031 paLSPages[iPage].cDirtied = 0;
1032 paLSPages[iPage].fDirty = 1; /* everything is dirty at this time */
1033 paLSPages[iPage].fWriteMonitored = 0;
1034 paLSPages[iPage].fWriteMonitoredJustNow = 0;
1035 paLSPages[iPage].u2Reserved = 0;
1036 switch (PGM_PAGE_GET_TYPE(pPage))
1037 {
1038 case PGMPAGETYPE_RAM:
1039 if (PGM_PAGE_IS_ZERO(pPage))
1040 {
1041 paLSPages[iPage].fZero = 1;
1042 paLSPages[iPage].fShared = 0;
1043#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1044 paLSPages[iPage].u32Crc = PGM_STATE_CRC32_ZERO_PAGE;
1045#endif
1046 }
1047 else if (PGM_PAGE_IS_SHARED(pPage))
1048 {
1049 paLSPages[iPage].fZero = 0;
1050 paLSPages[iPage].fShared = 1;
1051#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1052 paLSPages[iPage].u32Crc = UINT32_MAX;
1053#endif
1054 }
1055 else
1056 {
1057 paLSPages[iPage].fZero = 0;
1058 paLSPages[iPage].fShared = 0;
1059#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1060 paLSPages[iPage].u32Crc = UINT32_MAX;
1061#endif
1062 }
1063 paLSPages[iPage].fIgnore = 0;
1064 pVM->pgm.s.LiveSave.Ram.cDirtyPages++;
1065 break;
1066
1067 case PGMPAGETYPE_ROM_SHADOW:
1068 case PGMPAGETYPE_ROM:
1069 {
1070 paLSPages[iPage].fZero = 0;
1071 paLSPages[iPage].fShared = 0;
1072 paLSPages[iPage].fDirty = 0;
1073 paLSPages[iPage].fIgnore = 1;
1074#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1075 paLSPages[iPage].u32Crc = UINT32_MAX;
1076#endif
1077 pVM->pgm.s.LiveSave.cIgnoredPages++;
1078 break;
1079 }
1080
1081 default:
1082 AssertMsgFailed(("%R[pgmpage]", pPage));
1083 case PGMPAGETYPE_MMIO2:
1084 case PGMPAGETYPE_MMIO2_ALIAS_MMIO:
1085 paLSPages[iPage].fZero = 0;
1086 paLSPages[iPage].fShared = 0;
1087 paLSPages[iPage].fDirty = 0;
1088 paLSPages[iPage].fIgnore = 1;
1089#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1090 paLSPages[iPage].u32Crc = UINT32_MAX;
1091#endif
1092 pVM->pgm.s.LiveSave.cIgnoredPages++;
1093 break;
1094
1095 case PGMPAGETYPE_MMIO:
1096 paLSPages[iPage].fZero = 0;
1097 paLSPages[iPage].fShared = 0;
1098 paLSPages[iPage].fDirty = 0;
1099 paLSPages[iPage].fIgnore = 1;
1100#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1101 paLSPages[iPage].u32Crc = UINT32_MAX;
1102#endif
1103 pVM->pgm.s.LiveSave.cIgnoredPages++;
1104 break;
1105 }
1106 }
1107 }
1108 }
1109 } while (pCur);
1110 pgmUnlock(pVM);
1111
1112 return VINF_SUCCESS;
1113}
1114
1115#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1116
1117/**
1118 * Calculates the CRC-32 for a RAM page and updates the live save page tracking
1119 * info with it.
1120 *
1121 * @param pVM The VM handle.
1122 * @param pCur The current RAM range.
1123 * @param paLSPages The current array of live save page tracking
1124 * structures.
1125 * @param iPage The page index.
1126 */
1127static void pgmR3StateCalcCrc32ForRamPage(PVM pVM, PPGMRAMRANGE pCur, PPGMLIVESAVERAMPAGE paLSPages, uint32_t iPage)
1128{
1129 RTGCPHYS GCPhys = pCur->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT);
1130 void const *pvPage;
1131 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, &pCur->aPages[iPage], GCPhys, &pvPage);
1132 if (RT_SUCCESS(rc))
1133 paLSPages[iPage].u32Crc = RTCrc32(pvPage, PAGE_SIZE);
1134 else
1135 paLSPages[iPage].u32Crc = UINT32_MAX; /* Invalid */
1136}
1137
1138
1139/**
1140 * Verifies the CRC-32 for a page given it's raw bits.
1141 *
1142 * @param pvPage The page bits.
1143 * @param pCur The current RAM range.
1144 * @param paLSPages The current array of live save page tracking
1145 * structures.
1146 * @param iPage The page index.
1147 */
1148static void pgmR3StateVerifyCrc32ForPage(void const *pvPage, PPGMRAMRANGE pCur, PPGMLIVESAVERAMPAGE paLSPages, uint32_t iPage)
1149{
1150 if (paLSPages[iPage].u32Crc != UINT32_MAX)
1151 {
1152 uint32_t u32Crc = RTCrc32(pvPage, PAGE_SIZE);
1153 Assert(!PGM_PAGE_IS_ZERO(&pCur->aPages[iPage]) || u32Crc == PGM_STATE_CRC32_ZERO_PAGE);
1154 AssertMsg(paLSPages[iPage].u32Crc == u32Crc,
1155 ("%08x != %08x for %RGp %R[pgmpage]\n", paLSPages[iPage].u32Crc, u32Crc,
1156 pCur->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT), &pCur->aPages[iPage]));
1157 }
1158}
1159
1160
1161/**
1162 * Verfies the CRC-32 for a RAM page.
1163 *
1164 * @param pVM The VM handle.
1165 * @param pCur The current RAM range.
1166 * @param paLSPages The current array of live save page tracking
1167 * structures.
1168 * @param iPage The page index.
1169 */
1170static void pgmR3StateVerifyCrc32ForRamPage(PVM pVM, PPGMRAMRANGE pCur, PPGMLIVESAVERAMPAGE paLSPages, uint32_t iPage)
1171{
1172 if (paLSPages[iPage].u32Crc != UINT32_MAX)
1173 {
1174 RTGCPHYS GCPhys = pCur->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT);
1175 void const *pvPage;
1176 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, &pCur->aPages[iPage], GCPhys, &pvPage);
1177 if (RT_SUCCESS(rc))
1178 pgmR3StateVerifyCrc32ForPage(pvPage, pCur, paLSPages, iPage);
1179 }
1180}
1181
1182#endif /* PGMLIVESAVERAMPAGE_WITH_CRC32 */
1183
1184/**
1185 * Scan for RAM page modifications and reprotect them.
1186 *
1187 * @param pVM The VM handle.
1188 * @param fFinalPass Whether this is the final pass or not.
1189 */
1190static void pgmR3ScanRamPages(PVM pVM, bool fFinalPass)
1191{
1192 /*
1193 * The RAM.
1194 */
1195 RTGCPHYS GCPhysCur = 0;
1196 PPGMRAMRANGE pCur;
1197 pgmLock(pVM);
1198 do
1199 {
1200 uint32_t const idRamRangesGen = pVM->pgm.s.idRamRangesGen;
1201 for (pCur = pVM->pgm.s.pRamRangesR3; pCur; pCur = pCur->pNextR3)
1202 {
1203 if ( pCur->GCPhysLast > GCPhysCur
1204 && !PGM_RAM_RANGE_IS_AD_HOC(pCur))
1205 {
1206 PPGMLIVESAVERAMPAGE paLSPages = pCur->paLSPages;
1207 uint32_t cPages = pCur->cb >> PAGE_SHIFT;
1208 uint32_t iPage = GCPhysCur <= pCur->GCPhys ? 0 : (GCPhysCur - pCur->GCPhys) >> PAGE_SHIFT;
1209 GCPhysCur = 0;
1210 for (; iPage < cPages; iPage++)
1211 {
1212 /* Do yield first. */
1213 if ( !fFinalPass
1214#ifndef PGMLIVESAVERAMPAGE_WITH_CRC32
1215 && (iPage & 0x7ff) == 0x100
1216#endif
1217 && PDMR3CritSectYield(&pVM->pgm.s.CritSect)
1218 && pVM->pgm.s.idRamRangesGen != idRamRangesGen)
1219 {
1220 GCPhysCur = pCur->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT);
1221 break; /* restart */
1222 }
1223
1224 /* Skip already ignored pages. */
1225 if (paLSPages[iPage].fIgnore)
1226 continue;
1227
1228 if (RT_LIKELY(PGM_PAGE_GET_TYPE(&pCur->aPages[iPage]) == PGMPAGETYPE_RAM))
1229 {
1230 /*
1231 * A RAM page.
1232 */
1233 switch (PGM_PAGE_GET_STATE(&pCur->aPages[iPage]))
1234 {
1235 case PGM_PAGE_STATE_ALLOCATED:
1236 /** @todo Optimize this: Don't always re-enable write
1237 * monitoring if the page is known to be very busy. */
1238 if (PGM_PAGE_IS_WRITTEN_TO(&pCur->aPages[iPage]))
1239 {
1240 Assert(paLSPages[iPage].fWriteMonitored);
1241 PGM_PAGE_CLEAR_WRITTEN_TO(&pCur->aPages[iPage]);
1242 Assert(pVM->pgm.s.cWrittenToPages > 0);
1243 pVM->pgm.s.cWrittenToPages--;
1244 }
1245 else
1246 {
1247 Assert(!paLSPages[iPage].fWriteMonitored);
1248 pVM->pgm.s.LiveSave.Ram.cMonitoredPages++;
1249 }
1250
1251 if (!paLSPages[iPage].fDirty)
1252 {
1253 pVM->pgm.s.LiveSave.Ram.cReadyPages--;
1254 if (paLSPages[iPage].fZero)
1255 pVM->pgm.s.LiveSave.Ram.cZeroPages--;
1256 pVM->pgm.s.LiveSave.Ram.cDirtyPages++;
1257 if (++paLSPages[iPage].cDirtied > PGMLIVSAVEPAGE_MAX_DIRTIED)
1258 paLSPages[iPage].cDirtied = PGMLIVSAVEPAGE_MAX_DIRTIED;
1259 }
1260
1261 PGM_PAGE_SET_STATE(&pCur->aPages[iPage], PGM_PAGE_STATE_WRITE_MONITORED);
1262 pVM->pgm.s.cMonitoredPages++;
1263 paLSPages[iPage].fWriteMonitored = 1;
1264 paLSPages[iPage].fWriteMonitoredJustNow = 1;
1265 paLSPages[iPage].fDirty = 1;
1266 paLSPages[iPage].fZero = 0;
1267 paLSPages[iPage].fShared = 0;
1268#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1269 paLSPages[iPage].u32Crc = UINT32_MAX; /* invalid */
1270#endif
1271 break;
1272
1273 case PGM_PAGE_STATE_WRITE_MONITORED:
1274 Assert(paLSPages[iPage].fWriteMonitored);
1275 if (PGM_PAGE_GET_WRITE_LOCKS(&pCur->aPages[iPage]) == 0)
1276 {
1277#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1278 if (paLSPages[iPage].fWriteMonitoredJustNow)
1279 pgmR3StateCalcCrc32ForRamPage(pVM, pCur, paLSPages, iPage);
1280 else
1281 pgmR3StateVerifyCrc32ForRamPage(pVM, pCur, paLSPages, iPage);
1282#endif
1283 paLSPages[iPage].fWriteMonitoredJustNow = 0;
1284 }
1285 else
1286 {
1287 paLSPages[iPage].fWriteMonitoredJustNow = 1;
1288#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1289 paLSPages[iPage].u32Crc = UINT32_MAX; /* invalid */
1290#endif
1291 if (!paLSPages[iPage].fDirty)
1292 {
1293 pVM->pgm.s.LiveSave.Ram.cReadyPages--;
1294 pVM->pgm.s.LiveSave.Ram.cDirtyPages++;
1295 if (++paLSPages[iPage].cDirtied > PGMLIVSAVEPAGE_MAX_DIRTIED)
1296 paLSPages[iPage].cDirtied = PGMLIVSAVEPAGE_MAX_DIRTIED;
1297 }
1298 }
1299 break;
1300
1301 case PGM_PAGE_STATE_ZERO:
1302 if (!paLSPages[iPage].fZero)
1303 {
1304 if (!paLSPages[iPage].fDirty)
1305 {
1306 paLSPages[iPage].fDirty = 1;
1307 pVM->pgm.s.LiveSave.Ram.cReadyPages--;
1308 pVM->pgm.s.LiveSave.Ram.cDirtyPages++;
1309 }
1310 paLSPages[iPage].fZero = 1;
1311 paLSPages[iPage].fShared = 0;
1312#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1313 paLSPages[iPage].u32Crc = PGM_STATE_CRC32_ZERO_PAGE;
1314#endif
1315 }
1316 break;
1317
1318 case PGM_PAGE_STATE_SHARED:
1319 if (!paLSPages[iPage].fShared)
1320 {
1321 if (!paLSPages[iPage].fDirty)
1322 {
1323 paLSPages[iPage].fDirty = 1;
1324 pVM->pgm.s.LiveSave.Ram.cReadyPages--;
1325 if (paLSPages[iPage].fZero)
1326 pVM->pgm.s.LiveSave.Ram.cZeroPages--;
1327 pVM->pgm.s.LiveSave.Ram.cDirtyPages++;
1328 }
1329 paLSPages[iPage].fZero = 0;
1330 paLSPages[iPage].fShared = 1;
1331#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1332 pgmR3StateCalcCrc32ForRamPage(pVM, pCur, paLSPages, iPage);
1333#endif
1334 }
1335 break;
1336 }
1337 }
1338 else
1339 {
1340 /*
1341 * All other types => Ignore the page.
1342 */
1343 Assert(!paLSPages[iPage].fIgnore); /* skipped before switch */
1344 paLSPages[iPage].fIgnore = 1;
1345 if (paLSPages[iPage].fWriteMonitored)
1346 {
1347 /** @todo this doesn't hold water when we start monitoring MMIO2 and ROM shadow
1348 * pages! */
1349 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(&pCur->aPages[iPage]) == PGM_PAGE_STATE_WRITE_MONITORED))
1350 {
1351 AssertMsgFailed(("%R[pgmpage]", &pCur->aPages[iPage])); /* shouldn't happen. */
1352 PGM_PAGE_SET_STATE(&pCur->aPages[iPage], PGM_PAGE_STATE_ALLOCATED);
1353 Assert(pVM->pgm.s.cMonitoredPages > 0);
1354 pVM->pgm.s.cMonitoredPages--;
1355 }
1356 if (PGM_PAGE_IS_WRITTEN_TO(&pCur->aPages[iPage]))
1357 {
1358 PGM_PAGE_CLEAR_WRITTEN_TO(&pCur->aPages[iPage]);
1359 Assert(pVM->pgm.s.cWrittenToPages > 0);
1360 pVM->pgm.s.cWrittenToPages--;
1361 }
1362 pVM->pgm.s.LiveSave.Ram.cMonitoredPages--;
1363 }
1364
1365 /** @todo the counting doesn't quite work out here. fix later? */
1366 if (paLSPages[iPage].fDirty)
1367 pVM->pgm.s.LiveSave.Ram.cDirtyPages--;
1368 else
1369 {
1370 pVM->pgm.s.LiveSave.Ram.cReadyPages--;
1371 if (paLSPages[iPage].fZero)
1372 pVM->pgm.s.LiveSave.Ram.cZeroPages--;
1373 }
1374 pVM->pgm.s.LiveSave.cIgnoredPages++;
1375 }
1376 } /* for each page in range */
1377
1378 if (GCPhysCur != 0)
1379 break; /* Yield + ramrange change */
1380 GCPhysCur = pCur->GCPhysLast;
1381 }
1382 } /* for each range */
1383 } while (pCur);
1384 pgmUnlock(pVM);
1385}
1386
1387
1388/**
1389 * Save quiescent RAM pages.
1390 *
1391 * @returns VBox status code.
1392 * @param pVM The VM handle.
1393 * @param pSSM The SSM handle.
1394 * @param fLiveSave Whether it's a live save or not.
1395 * @param uPass The pass number.
1396 */
1397static int pgmR3SaveRamPages(PVM pVM, PSSMHANDLE pSSM, bool fLiveSave, uint32_t uPass)
1398{
1399 /*
1400 * The RAM.
1401 */
1402 RTGCPHYS GCPhysLast = NIL_RTGCPHYS;
1403 RTGCPHYS GCPhysCur = 0;
1404 PPGMRAMRANGE pCur;
1405 pgmLock(pVM);
1406 do
1407 {
1408 uint32_t const idRamRangesGen = pVM->pgm.s.idRamRangesGen;
1409 for (pCur = pVM->pgm.s.pRamRangesR3; pCur; pCur = pCur->pNextR3)
1410 {
1411 if ( pCur->GCPhysLast > GCPhysCur
1412 && !PGM_RAM_RANGE_IS_AD_HOC(pCur))
1413 {
1414 PPGMLIVESAVERAMPAGE paLSPages = pCur->paLSPages;
1415 uint32_t cPages = pCur->cb >> PAGE_SHIFT;
1416 uint32_t iPage = GCPhysCur <= pCur->GCPhys ? 0 : (GCPhysCur - pCur->GCPhys) >> PAGE_SHIFT;
1417 GCPhysCur = 0;
1418 for (; iPage < cPages; iPage++)
1419 {
1420 /* Do yield first. */
1421 if ( uPass != SSM_PASS_FINAL
1422 && (iPage & 0x7ff) == 0x100
1423 && PDMR3CritSectYield(&pVM->pgm.s.CritSect)
1424 && pVM->pgm.s.idRamRangesGen != idRamRangesGen)
1425 {
1426 GCPhysCur = pCur->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT);
1427 break; /* restart */
1428 }
1429
1430 /*
1431 * Only save pages that hasn't changed since last scan and are dirty.
1432 */
1433 if ( uPass != SSM_PASS_FINAL
1434 && paLSPages)
1435 {
1436 if (!paLSPages[iPage].fDirty)
1437 continue;
1438 if (paLSPages[iPage].fWriteMonitoredJustNow)
1439 continue;
1440 if (paLSPages[iPage].fIgnore)
1441 continue;
1442 if (PGM_PAGE_GET_TYPE(&pCur->aPages[iPage]) != PGMPAGETYPE_RAM) /* in case of recent ramppings */
1443 continue;
1444 if ( PGM_PAGE_GET_STATE(&pCur->aPages[iPage])
1445 != ( paLSPages[iPage].fZero
1446 ? PGM_PAGE_STATE_ZERO
1447 : paLSPages[iPage].fShared
1448 ? PGM_PAGE_STATE_SHARED
1449 : PGM_PAGE_STATE_WRITE_MONITORED))
1450 continue;
1451 if (PGM_PAGE_GET_WRITE_LOCKS(&pCur->aPages[iPage]) > 0)
1452 continue;
1453 }
1454 else
1455 {
1456 if ( paLSPages
1457 && !paLSPages[iPage].fDirty
1458 && !paLSPages[iPage].fIgnore)
1459 {
1460#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1461 if (PGM_PAGE_GET_TYPE(&pCur->aPages[iPage]) != PGMPAGETYPE_RAM)
1462 pgmR3StateVerifyCrc32ForRamPage(pVM, pCur, paLSPages, iPage);
1463#endif
1464 continue;
1465 }
1466 if (PGM_PAGE_GET_TYPE(&pCur->aPages[iPage]) != PGMPAGETYPE_RAM)
1467 continue;
1468 }
1469
1470 /*
1471 * Do the saving outside the PGM critsect since SSM may block on I/O.
1472 */
1473 int rc;
1474 RTGCPHYS GCPhys = pCur->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT);
1475 bool fZero = PGM_PAGE_IS_ZERO(&pCur->aPages[iPage]);
1476
1477 if (!fZero)
1478 {
1479 /*
1480 * Copy the page and then save it outside the lock (since any
1481 * SSM call may block).
1482 */
1483 uint8_t abPage[PAGE_SIZE];
1484 void const *pvPage;
1485 rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, &pCur->aPages[iPage], GCPhys, &pvPage);
1486 if (RT_SUCCESS(rc))
1487 {
1488 memcpy(abPage, pvPage, PAGE_SIZE);
1489#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1490 if (paLSPages)
1491 pgmR3StateVerifyCrc32ForPage(abPage, pCur, paLSPages, iPage);
1492#endif
1493 }
1494 pgmUnlock(pVM);
1495 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc GCPhys=%RGp\n", rc, GCPhys), rc);
1496
1497 if (GCPhys == GCPhysLast + PAGE_SIZE)
1498 SSMR3PutU8(pSSM, PGM_STATE_REC_RAM_RAW);
1499 else
1500 {
1501 SSMR3PutU8(pSSM, PGM_STATE_REC_RAM_RAW | PGM_STATE_REC_FLAG_ADDR);
1502 SSMR3PutGCPhys(pSSM, GCPhys);
1503 }
1504 rc = SSMR3PutMem(pSSM, abPage, PAGE_SIZE);
1505 }
1506 else
1507 {
1508 /*
1509 * Dirty zero page.
1510 */
1511#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1512 if (paLSPages)
1513 pgmR3StateVerifyCrc32ForRamPage(pVM, pCur, paLSPages, iPage);
1514#endif
1515 pgmUnlock(pVM);
1516
1517 if (GCPhys == GCPhysLast + PAGE_SIZE)
1518 rc = SSMR3PutU8(pSSM, PGM_STATE_REC_RAM_ZERO);
1519 else
1520 {
1521 SSMR3PutU8(pSSM, PGM_STATE_REC_RAM_ZERO | PGM_STATE_REC_FLAG_ADDR);
1522 rc = SSMR3PutGCPhys(pSSM, GCPhys);
1523 }
1524 }
1525 if (RT_FAILURE(rc))
1526 return rc;
1527
1528 pgmLock(pVM);
1529 GCPhysLast = GCPhys;
1530 if (paLSPages)
1531 {
1532 paLSPages[iPage].fDirty = 0;
1533 pVM->pgm.s.LiveSave.Ram.cReadyPages++;
1534 if (fZero)
1535 pVM->pgm.s.LiveSave.Ram.cZeroPages++;
1536 pVM->pgm.s.LiveSave.Ram.cDirtyPages--;
1537 }
1538 if (idRamRangesGen != pVM->pgm.s.idRamRangesGen)
1539 {
1540 GCPhysCur = GCPhys | PAGE_OFFSET_MASK;
1541 break; /* restart */
1542 }
1543
1544 } /* for each page in range */
1545
1546 if (GCPhysCur != 0)
1547 break; /* Yield + ramrange change */
1548 GCPhysCur = pCur->GCPhysLast;
1549 }
1550 } /* for each range */
1551 } while (pCur);
1552 pgmUnlock(pVM);
1553
1554 return VINF_SUCCESS;
1555}
1556
1557
1558/**
1559 * Cleans up RAM pages after a live save.
1560 *
1561 * @param pVM The VM handle.
1562 */
1563static void pgmR3DoneRamPages(PVM pVM)
1564{
1565 /*
1566 * Free the tracking arrays and disable write monitoring.
1567 *
1568 * Play nice with the PGM lock in case we're called while the VM is still
1569 * running. This means we have to delay the freeing since we wish to use
1570 * paLSPages as an indicator of which RAM ranges which we need to scan for
1571 * write monitored pages.
1572 */
1573 void *pvToFree = NULL;
1574 PPGMRAMRANGE pCur;
1575 uint32_t cMonitoredPages = 0;
1576 pgmLock(pVM);
1577 do
1578 {
1579 for (pCur = pVM->pgm.s.pRamRangesR3; pCur; pCur = pCur->pNextR3)
1580 {
1581 if (pCur->paLSPages)
1582 {
1583 if (pvToFree)
1584 {
1585 uint32_t idRamRangesGen = pVM->pgm.s.idRamRangesGen;
1586 pgmUnlock(pVM);
1587 MMR3HeapFree(pvToFree);
1588 pvToFree = NULL;
1589 pgmLock(pVM);
1590 if (idRamRangesGen != pVM->pgm.s.idRamRangesGen)
1591 break; /* start over again. */
1592 }
1593
1594 pvToFree = pCur->paLSPages;
1595 pCur->paLSPages = NULL;
1596
1597 uint32_t iPage = pCur->cb >> PAGE_SHIFT;
1598 while (iPage--)
1599 {
1600 PPGMPAGE pPage = &pCur->aPages[iPage];
1601 PGM_PAGE_CLEAR_WRITTEN_TO(pPage);
1602 if (PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED)
1603 {
1604 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ALLOCATED);
1605 cMonitoredPages++;
1606 }
1607 }
1608 }
1609 }
1610 } while (pCur);
1611
1612 Assert(pVM->pgm.s.cMonitoredPages >= cMonitoredPages);
1613 if (pVM->pgm.s.cMonitoredPages < cMonitoredPages)
1614 pVM->pgm.s.cMonitoredPages = 0;
1615 else
1616 pVM->pgm.s.cMonitoredPages -= cMonitoredPages;
1617
1618 pgmUnlock(pVM);
1619
1620 MMR3HeapFree(pvToFree);
1621 pvToFree = NULL;
1622}
1623
1624
1625/**
1626 * Execute a live save pass.
1627 *
1628 * @returns VBox status code.
1629 *
1630 * @param pVM The VM handle.
1631 * @param pSSM The SSM handle.
1632 */
1633static DECLCALLBACK(int) pgmR3LiveExec(PVM pVM, PSSMHANDLE pSSM, uint32_t uPass)
1634{
1635 int rc;
1636
1637 /*
1638 * Save the MMIO2 and ROM range IDs in pass 0.
1639 */
1640 if (uPass == 0)
1641 {
1642 rc = pgmR3SaveRomRanges(pVM, pSSM);
1643 if (RT_FAILURE(rc))
1644 return rc;
1645 rc = pgmR3SaveMmio2Ranges(pVM, pSSM);
1646 if (RT_FAILURE(rc))
1647 return rc;
1648 }
1649
1650 /*
1651 * Do the scanning.
1652 */
1653 pgmR3ScanRomPages(pVM);
1654 pgmR3ScanMmio2Pages(pVM, uPass);
1655 pgmR3ScanRamPages(pVM, false /*fFinalPass*/);
1656 pgmR3PoolClearAll(pVM); /** @todo this could perhaps be optimized a bit. */
1657
1658 /*
1659 * Save the pages.
1660 */
1661 if (uPass == 0)
1662 rc = pgmR3SaveRomVirginPages( pVM, pSSM, true /*fLiveSave*/);
1663 else
1664 rc = VINF_SUCCESS;
1665 if (RT_SUCCESS(rc))
1666 rc = pgmR3SaveShadowedRomPages(pVM, pSSM, true /*fLiveSave*/, false /*fFinalPass*/);
1667 if (RT_SUCCESS(rc))
1668 rc = pgmR3SaveMmio2Pages( pVM, pSSM, true /*fLiveSave*/, uPass);
1669 if (RT_SUCCESS(rc))
1670 rc = pgmR3SaveRamPages( pVM, pSSM, true /*fLiveSave*/, uPass);
1671 SSMR3PutU8(pSSM, PGM_STATE_REC_END); /* (Ignore the rc, SSM takes of it.) */
1672
1673 return rc;
1674}
1675
1676//#include <iprt/stream.h>
1677
1678/**
1679 * Votes on whether the live save phase is done or not.
1680 *
1681 * @returns VBox status code.
1682 *
1683 * @param pVM The VM handle.
1684 * @param pSSM The SSM handle.
1685 */
1686static DECLCALLBACK(int) pgmR3LiveVote(PVM pVM, PSSMHANDLE pSSM)
1687{
1688#if 0
1689 RTPrintf("# Rom[R/D/Z/M]=%03x/%03x/%03x/%03x Mmio2=%04x/%04x/%04x/%04x Ram=%06x/%06x/%06x/%06x Ignored=%03x\n",
1690 pVM->pgm.s.LiveSave.Rom.cReadyPages,
1691 pVM->pgm.s.LiveSave.Rom.cDirtyPages,
1692 pVM->pgm.s.LiveSave.Rom.cZeroPages,
1693 pVM->pgm.s.LiveSave.Rom.cMonitoredPages,
1694 pVM->pgm.s.LiveSave.Mmio2.cReadyPages,
1695 pVM->pgm.s.LiveSave.Mmio2.cDirtyPages,
1696 pVM->pgm.s.LiveSave.Mmio2.cZeroPages,
1697 pVM->pgm.s.LiveSave.Mmio2.cMonitoredPages,
1698 pVM->pgm.s.LiveSave.Ram.cReadyPages,
1699 pVM->pgm.s.LiveSave.Ram.cDirtyPages,
1700 pVM->pgm.s.LiveSave.Ram.cZeroPages,
1701 pVM->pgm.s.LiveSave.Ram.cMonitoredPages,
1702 pVM->pgm.s.LiveSave.cIgnoredPages
1703 );
1704 static int s_iHack = 0;
1705 if ((++s_iHack % 42) == 0)
1706 return VINF_SUCCESS;
1707 RTThreadSleep(1000);
1708
1709#else
1710 if ( pVM->pgm.s.LiveSave.Rom.cDirtyPages
1711 + pVM->pgm.s.LiveSave.Mmio2.cDirtyPages
1712 + pVM->pgm.s.LiveSave.Ram.cDirtyPages
1713 < 256) /* semi random numbers. */
1714 return VINF_SUCCESS;
1715#endif
1716 return VINF_SSM_VOTE_FOR_ANOTHER_PASS;
1717}
1718
1719
1720/**
1721 * Prepare for a live save operation.
1722 *
1723 * This will attempt to allocate and initialize the tracking structures. It
1724 * will also prepare for write monitoring of pages and initialize PGM::LiveSave.
1725 * pgmR3SaveDone will do the cleanups.
1726 *
1727 * @returns VBox status code.
1728 *
1729 * @param pVM The VM handle.
1730 * @param pSSM The SSM handle.
1731 */
1732static DECLCALLBACK(int) pgmR3LivePrep(PVM pVM, PSSMHANDLE pSSM)
1733{
1734 /*
1735 * Indicate that we will be using the write monitoring.
1736 */
1737 pgmLock(pVM);
1738 /** @todo find a way of mediating this when more users are added. */
1739 if (pVM->pgm.s.fPhysWriteMonitoringEngaged)
1740 {
1741 pgmUnlock(pVM);
1742 AssertLogRelFailedReturn(VERR_INTERNAL_ERROR_2);
1743 }
1744 pVM->pgm.s.fPhysWriteMonitoringEngaged = true;
1745 pgmUnlock(pVM);
1746
1747 /*
1748 * Initialize the statistics.
1749 */
1750 pVM->pgm.s.LiveSave.Rom.cReadyPages = 0;
1751 pVM->pgm.s.LiveSave.Rom.cDirtyPages = 0;
1752 pVM->pgm.s.LiveSave.Mmio2.cReadyPages = 0;
1753 pVM->pgm.s.LiveSave.Mmio2.cDirtyPages = 0;
1754 pVM->pgm.s.LiveSave.Ram.cReadyPages = 0;
1755 pVM->pgm.s.LiveSave.Ram.cDirtyPages = 0;
1756 pVM->pgm.s.LiveSave.cIgnoredPages = 0;
1757 pVM->pgm.s.LiveSave.fActive = true;
1758
1759 /*
1760 * Per page type.
1761 */
1762 int rc = pgmR3PrepRomPages(pVM);
1763 if (RT_SUCCESS(rc))
1764 rc = pgmR3PrepMmio2Pages(pVM);
1765 if (RT_SUCCESS(rc))
1766 rc = pgmR3PrepRamPages(pVM);
1767 return rc;
1768}
1769
1770
1771/**
1772 * Execute state save operation.
1773 *
1774 * @returns VBox status code.
1775 * @param pVM VM Handle.
1776 * @param pSSM SSM operation handle.
1777 */
1778static DECLCALLBACK(int) pgmR3SaveExec(PVM pVM, PSSMHANDLE pSSM)
1779{
1780 int rc;
1781 unsigned i;
1782 PPGM pPGM = &pVM->pgm.s;
1783
1784 /*
1785 * Lock PGM and set the no-more-writes indicator.
1786 */
1787 pgmLock(pVM);
1788 pVM->pgm.s.fNoMorePhysWrites = true;
1789
1790 /*
1791 * Save basic data (required / unaffected by relocation).
1792 */
1793 SSMR3PutStruct(pSSM, pPGM, &s_aPGMFields[0]);
1794
1795 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
1796 {
1797 PVMCPU pVCpu = &pVM->aCpus[idCpu];
1798 SSMR3PutStruct(pSSM, &pVCpu->pgm.s, &s_aPGMCpuFields[0]);
1799 }
1800
1801 /*
1802 * The guest mappings.
1803 */
1804 i = 0;
1805 for (PPGMMAPPING pMapping = pPGM->pMappingsR3; pMapping; pMapping = pMapping->pNextR3, i++)
1806 {
1807 SSMR3PutU32( pSSM, i);
1808 SSMR3PutStrZ( pSSM, pMapping->pszDesc); /* This is the best unique id we have... */
1809 SSMR3PutGCPtr( pSSM, pMapping->GCPtr);
1810 SSMR3PutGCUIntPtr(pSSM, pMapping->cPTs);
1811 }
1812 rc = SSMR3PutU32(pSSM, ~0); /* terminator. */
1813
1814 /*
1815 * Save the (remainder of the) memory.
1816 */
1817 if (RT_SUCCESS(rc))
1818 {
1819 if (pVM->pgm.s.LiveSave.fActive)
1820 {
1821 pgmR3ScanRomPages(pVM);
1822 pgmR3ScanMmio2Pages(pVM, SSM_PASS_FINAL);
1823 pgmR3ScanRamPages(pVM, true /*fFinalPass*/);
1824
1825 rc = pgmR3SaveShadowedRomPages( pVM, pSSM, true /*fLiveSave*/, true /*fFinalPass*/);
1826 if (RT_SUCCESS(rc))
1827 rc = pgmR3SaveMmio2Pages( pVM, pSSM, true /*fLiveSave*/, SSM_PASS_FINAL);
1828 if (RT_SUCCESS(rc))
1829 rc = pgmR3SaveRamPages( pVM, pSSM, true /*fLiveSave*/, SSM_PASS_FINAL);
1830 }
1831 else
1832 {
1833 rc = pgmR3SaveRomRanges(pVM, pSSM);
1834 if (RT_SUCCESS(rc))
1835 rc = pgmR3SaveMmio2Ranges(pVM, pSSM);
1836 if (RT_SUCCESS(rc))
1837 rc = pgmR3SaveRomVirginPages( pVM, pSSM, false /*fLiveSave*/);
1838 if (RT_SUCCESS(rc))
1839 rc = pgmR3SaveShadowedRomPages(pVM, pSSM, false /*fLiveSave*/, true /*fFinalPass*/);
1840 if (RT_SUCCESS(rc))
1841 rc = pgmR3SaveMmio2Pages( pVM, pSSM, false /*fLiveSave*/, SSM_PASS_FINAL);
1842 if (RT_SUCCESS(rc))
1843 rc = pgmR3SaveRamPages( pVM, pSSM, false /*fLiveSave*/, SSM_PASS_FINAL);
1844 }
1845 SSMR3PutU8(pSSM, PGM_STATE_REC_END); /* (Ignore the rc, SSM takes of it.) */
1846 }
1847
1848 pgmUnlock(pVM);
1849 return rc;
1850}
1851
1852
1853/**
1854 * Cleans up after an save state operation.
1855 *
1856 * @returns VBox status code.
1857 * @param pVM VM Handle.
1858 * @param pSSM SSM operation handle.
1859 */
1860static DECLCALLBACK(int) pgmR3SaveDone(PVM pVM, PSSMHANDLE pSSM)
1861{
1862 /*
1863 * Do per page type cleanups first.
1864 */
1865 if (pVM->pgm.s.LiveSave.fActive)
1866 {
1867 pgmR3DoneRomPages(pVM);
1868 pgmR3DoneMmio2Pages(pVM);
1869 pgmR3DoneRamPages(pVM);
1870 }
1871
1872 /*
1873 * Clear the live save indicator and disengage write monitoring.
1874 */
1875 pgmLock(pVM);
1876 pVM->pgm.s.LiveSave.fActive = false;
1877 /** @todo this is blindly assuming that we're the only user of write
1878 * monitoring. Fix this when more users are added. */
1879 pVM->pgm.s.fPhysWriteMonitoringEngaged = false;
1880 pgmUnlock(pVM);
1881
1882 return VINF_SUCCESS;
1883}
1884
1885
1886/**
1887 * Prepare state load operation.
1888 *
1889 * @returns VBox status code.
1890 * @param pVM VM Handle.
1891 * @param pSSM SSM operation handle.
1892 */
1893static DECLCALLBACK(int) pgmR3LoadPrep(PVM pVM, PSSMHANDLE pSSM)
1894{
1895 /*
1896 * Call the reset function to make sure all the memory is cleared.
1897 */
1898 PGMR3Reset(pVM);
1899 pVM->pgm.s.LiveSave.fActive = false;
1900 NOREF(pSSM);
1901 return VINF_SUCCESS;
1902}
1903
1904
1905/**
1906 * Load an ignored page.
1907 *
1908 * @returns VBox status code.
1909 * @param pSSM The saved state handle.
1910 */
1911static int pgmR3LoadPageToDevNullOld(PSSMHANDLE pSSM)
1912{
1913 uint8_t abPage[PAGE_SIZE];
1914 return SSMR3GetMem(pSSM, &abPage[0], sizeof(abPage));
1915}
1916
1917
1918/**
1919 * Loads a page without any bits in the saved state, i.e. making sure it's
1920 * really zero.
1921 *
1922 * @returns VBox status code.
1923 * @param pVM The VM handle.
1924 * @param uType The page type or PGMPAGETYPE_INVALID (old saved
1925 * state).
1926 * @param pPage The guest page tracking structure.
1927 * @param GCPhys The page address.
1928 * @param pRam The ram range (logging).
1929 */
1930static int pgmR3LoadPageZeroOld(PVM pVM, uint8_t uType, PPGMPAGE pPage, RTGCPHYS GCPhys, PPGMRAMRANGE pRam)
1931{
1932 if ( PGM_PAGE_GET_TYPE(pPage) != uType
1933 && uType != PGMPAGETYPE_INVALID)
1934 return VERR_SSM_UNEXPECTED_DATA;
1935
1936 /* I think this should be sufficient. */
1937 if (!PGM_PAGE_IS_ZERO(pPage))
1938 return VERR_SSM_UNEXPECTED_DATA;
1939
1940 NOREF(pVM);
1941 NOREF(GCPhys);
1942 NOREF(pRam);
1943 return VINF_SUCCESS;
1944}
1945
1946
1947/**
1948 * Loads a page from the saved state.
1949 *
1950 * @returns VBox status code.
1951 * @param pVM The VM handle.
1952 * @param pSSM The SSM handle.
1953 * @param uType The page type or PGMPAGETYEP_INVALID (old saved
1954 * state).
1955 * @param pPage The guest page tracking structure.
1956 * @param GCPhys The page address.
1957 * @param pRam The ram range (logging).
1958 */
1959static int pgmR3LoadPageBitsOld(PVM pVM, PSSMHANDLE pSSM, uint8_t uType, PPGMPAGE pPage, RTGCPHYS GCPhys, PPGMRAMRANGE pRam)
1960{
1961 /*
1962 * Match up the type, dealing with MMIO2 aliases (dropped).
1963 */
1964 AssertLogRelMsgReturn( PGM_PAGE_GET_TYPE(pPage) == uType
1965 || uType == PGMPAGETYPE_INVALID,
1966 ("pPage=%R[pgmpage] GCPhys=%#x %s\n", pPage, GCPhys, pRam->pszDesc),
1967 VERR_SSM_UNEXPECTED_DATA);
1968
1969 /*
1970 * Load the page.
1971 */
1972 void *pvPage;
1973 int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvPage);
1974 if (RT_SUCCESS(rc))
1975 rc = SSMR3GetMem(pSSM, pvPage, PAGE_SIZE);
1976
1977 return rc;
1978}
1979
1980
1981/**
1982 * Loads a page (counter part to pgmR3SavePage).
1983 *
1984 * @returns VBox status code, fully bitched errors.
1985 * @param pVM The VM handle.
1986 * @param pSSM The SSM handle.
1987 * @param uType The page type.
1988 * @param pPage The page.
1989 * @param GCPhys The page address.
1990 * @param pRam The RAM range (for error messages).
1991 */
1992static int pgmR3LoadPageOld(PVM pVM, PSSMHANDLE pSSM, uint8_t uType, PPGMPAGE pPage, RTGCPHYS GCPhys, PPGMRAMRANGE pRam)
1993{
1994 uint8_t uState;
1995 int rc = SSMR3GetU8(pSSM, &uState);
1996 AssertLogRelMsgRCReturn(rc, ("pPage=%R[pgmpage] GCPhys=%#x %s rc=%Rrc\n", pPage, GCPhys, pRam->pszDesc, rc), rc);
1997 if (uState == 0 /* zero */)
1998 rc = pgmR3LoadPageZeroOld(pVM, uType, pPage, GCPhys, pRam);
1999 else if (uState == 1)
2000 rc = pgmR3LoadPageBitsOld(pVM, pSSM, uType, pPage, GCPhys, pRam);
2001 else
2002 rc = VERR_INTERNAL_ERROR;
2003 AssertLogRelMsgRCReturn(rc, ("pPage=%R[pgmpage] uState=%d uType=%d GCPhys=%RGp %s rc=%Rrc\n",
2004 pPage, uState, uType, GCPhys, pRam->pszDesc, rc),
2005 rc);
2006 return VINF_SUCCESS;
2007}
2008
2009
2010/**
2011 * Loads a shadowed ROM page.
2012 *
2013 * @returns VBox status code, errors are fully bitched.
2014 * @param pVM The VM handle.
2015 * @param pSSM The saved state handle.
2016 * @param pPage The page.
2017 * @param GCPhys The page address.
2018 * @param pRam The RAM range (for error messages).
2019 */
2020static int pgmR3LoadShadowedRomPageOld(PVM pVM, PSSMHANDLE pSSM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPGMRAMRANGE pRam)
2021{
2022 /*
2023 * Load and set the protection first, then load the two pages, the first
2024 * one is the active the other is the passive.
2025 */
2026 PPGMROMPAGE pRomPage = pgmR3GetRomPage(pVM, GCPhys);
2027 AssertLogRelMsgReturn(pRomPage, ("GCPhys=%RGp %s\n", GCPhys, pRam->pszDesc), VERR_INTERNAL_ERROR);
2028
2029 uint8_t uProt;
2030 int rc = SSMR3GetU8(pSSM, &uProt);
2031 AssertLogRelMsgRCReturn(rc, ("pPage=%R[pgmpage] GCPhys=%#x %s\n", pPage, GCPhys, pRam->pszDesc), rc);
2032 PGMROMPROT enmProt = (PGMROMPROT)uProt;
2033 AssertLogRelMsgReturn( enmProt >= PGMROMPROT_INVALID
2034 && enmProt < PGMROMPROT_END,
2035 ("enmProt=%d pPage=%R[pgmpage] GCPhys=%#x %s\n", enmProt, pPage, GCPhys, pRam->pszDesc),
2036 VERR_SSM_UNEXPECTED_DATA);
2037
2038 if (pRomPage->enmProt != enmProt)
2039 {
2040 rc = PGMR3PhysRomProtect(pVM, GCPhys, PAGE_SIZE, enmProt);
2041 AssertLogRelRCReturn(rc, rc);
2042 AssertLogRelReturn(pRomPage->enmProt == enmProt, VERR_INTERNAL_ERROR);
2043 }
2044
2045 PPGMPAGE pPageActive = PGMROMPROT_IS_ROM(enmProt) ? &pRomPage->Virgin : &pRomPage->Shadow;
2046 PPGMPAGE pPagePassive = PGMROMPROT_IS_ROM(enmProt) ? &pRomPage->Shadow : &pRomPage->Virgin;
2047 uint8_t u8ActiveType = PGMROMPROT_IS_ROM(enmProt) ? PGMPAGETYPE_ROM : PGMPAGETYPE_ROM_SHADOW;
2048 uint8_t u8PassiveType= PGMROMPROT_IS_ROM(enmProt) ? PGMPAGETYPE_ROM_SHADOW : PGMPAGETYPE_ROM;
2049
2050 /** @todo this isn't entirely correct as long as pgmPhysGCPhys2CCPtrInternal is
2051 * used down the line (will the 2nd page will be written to the first
2052 * one because of a false TLB hit since the TLB is using GCPhys and
2053 * doesn't check the HCPhys of the desired page). */
2054 rc = pgmR3LoadPageOld(pVM, pSSM, u8ActiveType, pPage, GCPhys, pRam);
2055 if (RT_SUCCESS(rc))
2056 {
2057 *pPageActive = *pPage;
2058 rc = pgmR3LoadPageOld(pVM, pSSM, u8PassiveType, pPagePassive, GCPhys, pRam);
2059 }
2060 return rc;
2061}
2062
2063/**
2064 * Ram range flags and bits for older versions of the saved state.
2065 *
2066 * @returns VBox status code.
2067 *
2068 * @param pVM The VM handle
2069 * @param pSSM The SSM handle.
2070 * @param uVersion The saved state version.
2071 */
2072static int pgmR3LoadMemoryOld(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion)
2073{
2074 PPGM pPGM = &pVM->pgm.s;
2075
2076 /*
2077 * Ram range flags and bits.
2078 */
2079 uint32_t i = 0;
2080 for (PPGMRAMRANGE pRam = pPGM->pRamRangesR3; ; pRam = pRam->pNextR3, i++)
2081 {
2082 /* Check the seqence number / separator. */
2083 uint32_t u32Sep;
2084 int rc = SSMR3GetU32(pSSM, &u32Sep);
2085 if (RT_FAILURE(rc))
2086 return rc;
2087 if (u32Sep == ~0U)
2088 break;
2089 if (u32Sep != i)
2090 {
2091 AssertMsgFailed(("u32Sep=%#x (last)\n", u32Sep));
2092 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
2093 }
2094 AssertLogRelReturn(pRam, VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2095
2096 /* Get the range details. */
2097 RTGCPHYS GCPhys;
2098 SSMR3GetGCPhys(pSSM, &GCPhys);
2099 RTGCPHYS GCPhysLast;
2100 SSMR3GetGCPhys(pSSM, &GCPhysLast);
2101 RTGCPHYS cb;
2102 SSMR3GetGCPhys(pSSM, &cb);
2103 uint8_t fHaveBits;
2104 rc = SSMR3GetU8(pSSM, &fHaveBits);
2105 if (RT_FAILURE(rc))
2106 return rc;
2107 if (fHaveBits & ~1)
2108 {
2109 AssertMsgFailed(("u32Sep=%#x (last)\n", u32Sep));
2110 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
2111 }
2112 size_t cchDesc = 0;
2113 char szDesc[256];
2114 szDesc[0] = '\0';
2115 if (uVersion >= PGM_SAVED_STATE_VERSION_RR_DESC)
2116 {
2117 rc = SSMR3GetStrZ(pSSM, szDesc, sizeof(szDesc));
2118 if (RT_FAILURE(rc))
2119 return rc;
2120 /* Since we've modified the description strings in r45878, only compare
2121 them if the saved state is more recent. */
2122 if (uVersion != PGM_SAVED_STATE_VERSION_RR_DESC)
2123 cchDesc = strlen(szDesc);
2124 }
2125
2126 /*
2127 * Match it up with the current range.
2128 *
2129 * Note there is a hack for dealing with the high BIOS mapping
2130 * in the old saved state format, this means we might not have
2131 * a 1:1 match on success.
2132 */
2133 if ( ( GCPhys != pRam->GCPhys
2134 || GCPhysLast != pRam->GCPhysLast
2135 || cb != pRam->cb
2136 || ( cchDesc
2137 && strcmp(szDesc, pRam->pszDesc)) )
2138 /* Hack for PDMDevHlpPhysReserve(pDevIns, 0xfff80000, 0x80000, "High ROM Region"); */
2139 && ( uVersion != PGM_SAVED_STATE_VERSION_OLD_PHYS_CODE
2140 || GCPhys != UINT32_C(0xfff80000)
2141 || GCPhysLast != UINT32_C(0xffffffff)
2142 || pRam->GCPhysLast != GCPhysLast
2143 || pRam->GCPhys < GCPhys
2144 || !fHaveBits)
2145 )
2146 {
2147 LogRel(("Ram range: %RGp-%RGp %RGp bytes %s %s\n"
2148 "State : %RGp-%RGp %RGp bytes %s %s\n",
2149 pRam->GCPhys, pRam->GCPhysLast, pRam->cb, pRam->pvR3 ? "bits" : "nobits", pRam->pszDesc,
2150 GCPhys, GCPhysLast, cb, fHaveBits ? "bits" : "nobits", szDesc));
2151 /*
2152 * If we're loading a state for debugging purpose, don't make a fuss if
2153 * the MMIO and ROM stuff isn't 100% right, just skip the mismatches.
2154 */
2155 if ( SSMR3HandleGetAfter(pSSM) != SSMAFTER_DEBUG_IT
2156 || GCPhys < 8 * _1M)
2157 AssertFailedReturn(VERR_SSM_LOAD_CONFIG_MISMATCH);
2158
2159 AssertMsgFailed(("debug skipping not implemented, sorry\n"));
2160 continue;
2161 }
2162
2163 uint32_t cPages = (GCPhysLast - GCPhys + 1) >> PAGE_SHIFT;
2164 if (uVersion >= PGM_SAVED_STATE_VERSION_RR_DESC)
2165 {
2166 /*
2167 * Load the pages one by one.
2168 */
2169 for (uint32_t iPage = 0; iPage < cPages; iPage++)
2170 {
2171 RTGCPHYS const GCPhysPage = ((RTGCPHYS)iPage << PAGE_SHIFT) + pRam->GCPhys;
2172 PPGMPAGE pPage = &pRam->aPages[iPage];
2173 uint8_t uType;
2174 rc = SSMR3GetU8(pSSM, &uType);
2175 AssertLogRelMsgRCReturn(rc, ("pPage=%R[pgmpage] iPage=%#x GCPhysPage=%#x %s\n", pPage, iPage, GCPhysPage, pRam->pszDesc), rc);
2176 if (uType == PGMPAGETYPE_ROM_SHADOW)
2177 rc = pgmR3LoadShadowedRomPageOld(pVM, pSSM, pPage, GCPhysPage, pRam);
2178 else
2179 rc = pgmR3LoadPageOld(pVM, pSSM, uType, pPage, GCPhysPage, pRam);
2180 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc iPage=%#x GCPhysPage=%#x %s\n", rc, iPage, GCPhysPage, pRam->pszDesc), rc);
2181 }
2182 }
2183 else
2184 {
2185 /*
2186 * Old format.
2187 */
2188 AssertLogRelReturn(!pVM->pgm.s.fRamPreAlloc, VERR_NOT_SUPPORTED); /* can't be detected. */
2189
2190 /* Of the page flags, pick up MMIO2 and ROM/RESERVED for the !fHaveBits case.
2191 The rest is generally irrelevant and wrong since the stuff have to match registrations. */
2192 uint32_t fFlags = 0;
2193 for (uint32_t iPage = 0; iPage < cPages; iPage++)
2194 {
2195 uint16_t u16Flags;
2196 rc = SSMR3GetU16(pSSM, &u16Flags);
2197 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc iPage=%#x GCPhys=%#x %s\n", rc, iPage, pRam->GCPhys, pRam->pszDesc), rc);
2198 fFlags |= u16Flags;
2199 }
2200
2201 /* Load the bits */
2202 if ( !fHaveBits
2203 && GCPhysLast < UINT32_C(0xe0000000))
2204 {
2205 /*
2206 * Dynamic chunks.
2207 */
2208 const uint32_t cPagesInChunk = (1*1024*1024) >> PAGE_SHIFT;
2209 AssertLogRelMsgReturn(cPages % cPagesInChunk == 0,
2210 ("cPages=%#x cPagesInChunk=%#x\n", cPages, cPagesInChunk, pRam->GCPhys, pRam->pszDesc),
2211 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2212
2213 for (uint32_t iPage = 0; iPage < cPages; /* incremented by inner loop */ )
2214 {
2215 uint8_t fPresent;
2216 rc = SSMR3GetU8(pSSM, &fPresent);
2217 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc iPage=%#x GCPhys=%#x %s\n", rc, iPage, pRam->GCPhys, pRam->pszDesc), rc);
2218 AssertLogRelMsgReturn(fPresent == (uint8_t)true || fPresent == (uint8_t)false,
2219 ("fPresent=%#x iPage=%#x GCPhys=%#x %s\n", fPresent, iPage, pRam->GCPhys, pRam->pszDesc),
2220 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2221
2222 for (uint32_t iChunkPage = 0; iChunkPage < cPagesInChunk; iChunkPage++, iPage++)
2223 {
2224 RTGCPHYS const GCPhysPage = ((RTGCPHYS)iPage << PAGE_SHIFT) + pRam->GCPhys;
2225 PPGMPAGE pPage = &pRam->aPages[iPage];
2226 if (fPresent)
2227 {
2228 if (PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO)
2229 rc = pgmR3LoadPageToDevNullOld(pSSM);
2230 else
2231 rc = pgmR3LoadPageBitsOld(pVM, pSSM, PGMPAGETYPE_INVALID, pPage, GCPhysPage, pRam);
2232 }
2233 else
2234 rc = pgmR3LoadPageZeroOld(pVM, PGMPAGETYPE_INVALID, pPage, GCPhysPage, pRam);
2235 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc iPage=%#x GCPhysPage=%#x %s\n", rc, iPage, GCPhysPage, pRam->pszDesc), rc);
2236 }
2237 }
2238 }
2239 else if (pRam->pvR3)
2240 {
2241 /*
2242 * MMIO2.
2243 */
2244 AssertLogRelMsgReturn((fFlags & 0x0f) == RT_BIT(3) /*MM_RAM_FLAGS_MMIO2*/,
2245 ("fFlags=%#x GCPhys=%#x %s\n", fFlags, pRam->GCPhys, pRam->pszDesc),
2246 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2247 AssertLogRelMsgReturn(pRam->pvR3,
2248 ("GCPhys=%#x %s\n", pRam->GCPhys, pRam->pszDesc),
2249 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2250
2251 rc = SSMR3GetMem(pSSM, pRam->pvR3, pRam->cb);
2252 AssertLogRelMsgRCReturn(rc, ("GCPhys=%#x %s\n", pRam->GCPhys, pRam->pszDesc), rc);
2253 }
2254 else if (GCPhysLast < UINT32_C(0xfff80000))
2255 {
2256 /*
2257 * PCI MMIO, no pages saved.
2258 */
2259 }
2260 else
2261 {
2262 /*
2263 * Load the 0xfff80000..0xffffffff BIOS range.
2264 * It starts with X reserved pages that we have to skip over since
2265 * the RAMRANGE create by the new code won't include those.
2266 */
2267 AssertLogRelMsgReturn( !(fFlags & RT_BIT(3) /*MM_RAM_FLAGS_MMIO2*/)
2268 && (fFlags & RT_BIT(0) /*MM_RAM_FLAGS_RESERVED*/),
2269 ("fFlags=%#x GCPhys=%#x %s\n", fFlags, pRam->GCPhys, pRam->pszDesc),
2270 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2271 AssertLogRelMsgReturn(GCPhys == UINT32_C(0xfff80000),
2272 ("GCPhys=%RGp pRamRange{GCPhys=%#x %s}\n", GCPhys, pRam->GCPhys, pRam->pszDesc),
2273 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2274
2275 /* Skip wasted reserved pages before the ROM. */
2276 while (GCPhys < pRam->GCPhys)
2277 {
2278 rc = pgmR3LoadPageToDevNullOld(pSSM);
2279 GCPhys += PAGE_SIZE;
2280 }
2281
2282 /* Load the bios pages. */
2283 cPages = pRam->cb >> PAGE_SHIFT;
2284 for (uint32_t iPage = 0; iPage < cPages; iPage++)
2285 {
2286 RTGCPHYS const GCPhysPage = ((RTGCPHYS)iPage << PAGE_SHIFT) + pRam->GCPhys;
2287 PPGMPAGE pPage = &pRam->aPages[iPage];
2288
2289 AssertLogRelMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_ROM,
2290 ("GCPhys=%RGp pPage=%R[pgmpage]\n", GCPhys, GCPhys),
2291 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2292 rc = pgmR3LoadPageBitsOld(pVM, pSSM, PGMPAGETYPE_ROM, pPage, GCPhysPage, pRam);
2293 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc iPage=%#x GCPhys=%#x %s\n", rc, iPage, pRam->GCPhys, pRam->pszDesc), rc);
2294 }
2295 }
2296 }
2297 }
2298
2299 return VINF_SUCCESS;
2300}
2301
2302
2303/**
2304 * Worker for pgmR3Load and pgmR3LoadLocked.
2305 *
2306 * @returns VBox status code.
2307 *
2308 * @param pVM The VM handle.
2309 * @param pSSM The SSM handle.
2310 * @param uVersion The saved state version.
2311 *
2312 * @todo This needs splitting up if more record types or code twists are
2313 * added...
2314 */
2315static int pgmR3LoadMemory(PVM pVM, PSSMHANDLE pSSM, uint32_t uPass)
2316{
2317 /*
2318 * Process page records until we hit the terminator.
2319 */
2320 RTGCPHYS GCPhys = NIL_RTGCPHYS;
2321 PPGMRAMRANGE pRamHint = NULL;
2322 uint8_t id = UINT8_MAX;
2323 uint32_t iPage = UINT32_MAX - 10;
2324 PPGMROMRANGE pRom = NULL;
2325 PPGMMMIO2RANGE pMmio2 = NULL;
2326 for (;;)
2327 {
2328 /*
2329 * Get the record type and flags.
2330 */
2331 uint8_t u8;
2332 int rc = SSMR3GetU8(pSSM, &u8);
2333 if (RT_FAILURE(rc))
2334 return rc;
2335 if (u8 == PGM_STATE_REC_END)
2336 return VINF_SUCCESS;
2337 AssertLogRelMsgReturn((u8 & ~PGM_STATE_REC_FLAG_ADDR) <= PGM_STATE_REC_LAST, ("%#x\n", u8), VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2338 switch (u8 & ~PGM_STATE_REC_FLAG_ADDR)
2339 {
2340 /*
2341 * RAM page.
2342 */
2343 case PGM_STATE_REC_RAM_ZERO:
2344 case PGM_STATE_REC_RAM_RAW:
2345 {
2346 /*
2347 * Get the address and resolve it into a page descriptor.
2348 */
2349 if (!(u8 & PGM_STATE_REC_FLAG_ADDR))
2350 GCPhys += PAGE_SIZE;
2351 else
2352 {
2353 rc = SSMR3GetGCPhys(pSSM, &GCPhys);
2354 if (RT_FAILURE(rc))
2355 return rc;
2356 }
2357 AssertLogRelMsgReturn(!(GCPhys & PAGE_OFFSET_MASK), ("%RGp\n", GCPhys), VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2358
2359 PPGMPAGE pPage;
2360 rc = pgmPhysGetPageWithHintEx(&pVM->pgm.s, GCPhys, &pPage, &pRamHint);
2361 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc %RGp\n", rc, GCPhys), rc);
2362
2363 /*
2364 * Take action according to the record type.
2365 */
2366 switch (u8 & ~PGM_STATE_REC_FLAG_ADDR)
2367 {
2368 case PGM_STATE_REC_RAM_ZERO:
2369 {
2370 if (PGM_PAGE_IS_ZERO(pPage))
2371 break;
2372 /** @todo implement zero page replacing. */
2373 AssertLogRelMsgReturn(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED, ("GCPhys=%RGp %R[pgmpage]\n", GCPhys, pPage), VERR_INTERNAL_ERROR_5);
2374 void *pvDstPage;
2375 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDstPage);
2376 AssertLogRelMsgRCReturn(rc, ("GCPhys=%RGp %R[pgmpage] rc=%Rrc\n", GCPhys, pPage, rc), rc);
2377 ASMMemZeroPage(pvDstPage);
2378 break;
2379 }
2380
2381 case PGM_STATE_REC_RAM_RAW:
2382 {
2383 void *pvDstPage;
2384 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDstPage);
2385 AssertLogRelMsgRCReturn(rc, ("GCPhys=%RGp %R[pgmpage] rc=%Rrc\n", GCPhys, pPage, rc), rc);
2386 rc = SSMR3GetMem(pSSM, pvDstPage, PAGE_SIZE);
2387 if (RT_FAILURE(rc))
2388 return rc;
2389 break;
2390 }
2391
2392 default:
2393 AssertMsgFailedReturn(("%#x\n", u8), VERR_INTERNAL_ERROR);
2394 }
2395 id = UINT8_MAX;
2396 break;
2397 }
2398
2399 /*
2400 * MMIO2 page.
2401 */
2402 case PGM_STATE_REC_MMIO2_RAW:
2403 case PGM_STATE_REC_MMIO2_ZERO:
2404 {
2405 /*
2406 * Get the ID + page number and resolved that into a MMIO2 page.
2407 */
2408 if (!(u8 & PGM_STATE_REC_FLAG_ADDR))
2409 iPage++;
2410 else
2411 {
2412 SSMR3GetU8(pSSM, &id);
2413 rc = SSMR3GetU32(pSSM, &iPage);
2414 if (RT_FAILURE(rc))
2415 return rc;
2416 }
2417 if ( !pMmio2
2418 || pMmio2->idSavedState != id)
2419 {
2420 for (pMmio2 = pVM->pgm.s.pMmio2RangesR3; pMmio2; pMmio2 = pMmio2->pNextR3)
2421 if (pMmio2->idSavedState == id)
2422 break;
2423 AssertLogRelMsgReturn(pMmio2, ("id=%#u iPage=%#x\n", id, iPage), VERR_INTERNAL_ERROR);
2424 }
2425 AssertLogRelMsgReturn(iPage < (pMmio2->RamRange.cb >> PAGE_SHIFT), ("iPage=%#x cb=%RGp %s\n", iPage, pMmio2->RamRange.cb, pMmio2->RamRange.pszDesc), VERR_INTERNAL_ERROR);
2426 void *pvDstPage = (uint8_t *)pMmio2->RamRange.pvR3 + ((size_t)iPage << PAGE_SHIFT);
2427
2428 /*
2429 * Load the page bits.
2430 */
2431 if ((u8 & ~PGM_STATE_REC_FLAG_ADDR) == PGM_STATE_REC_MMIO2_ZERO)
2432 ASMMemZeroPage(pvDstPage);
2433 else
2434 {
2435 rc = SSMR3GetMem(pSSM, pvDstPage, PAGE_SIZE);
2436 if (RT_FAILURE(rc))
2437 return rc;
2438 }
2439 GCPhys = NIL_RTGCPHYS;
2440 break;
2441 }
2442
2443 /*
2444 * ROM pages.
2445 */
2446 case PGM_STATE_REC_ROM_VIRGIN:
2447 case PGM_STATE_REC_ROM_SHW_RAW:
2448 case PGM_STATE_REC_ROM_SHW_ZERO:
2449 case PGM_STATE_REC_ROM_PROT:
2450 {
2451 /*
2452 * Get the ID + page number and resolved that into a ROM page descriptor.
2453 */
2454 if (!(u8 & PGM_STATE_REC_FLAG_ADDR))
2455 iPage++;
2456 else
2457 {
2458 SSMR3GetU8(pSSM, &id);
2459 rc = SSMR3GetU32(pSSM, &iPage);
2460 if (RT_FAILURE(rc))
2461 return rc;
2462 }
2463 if ( !pRom
2464 || pRom->idSavedState != id)
2465 {
2466 for (pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
2467 if (pRom->idSavedState == id)
2468 break;
2469 AssertLogRelMsgReturn(pRom, ("id=%#u iPage=%#x\n", id, iPage), VERR_INTERNAL_ERROR);
2470 }
2471 AssertLogRelMsgReturn(iPage < (pRom->cb >> PAGE_SHIFT), ("iPage=%#x cb=%RGp %s\n", iPage, pRom->cb, pRom->pszDesc), VERR_INTERNAL_ERROR);
2472 PPGMROMPAGE pRomPage = &pRom->aPages[iPage];
2473 GCPhys = pRom->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT);
2474
2475 /*
2476 * Get and set the protection.
2477 */
2478 uint8_t u8Prot;
2479 rc = SSMR3GetU8(pSSM, &u8Prot);
2480 if (RT_FAILURE(rc))
2481 return rc;
2482 PGMROMPROT enmProt = (PGMROMPROT)u8Prot;
2483 AssertLogRelMsgReturn(enmProt > PGMROMPROT_INVALID && enmProt < PGMROMPROT_END, ("GCPhys=%RGp enmProt=%d\n", GCPhys, enmProt), VERR_INTERNAL_ERROR);
2484
2485 if (enmProt != pRomPage->enmProt)
2486 {
2487 AssertLogRelMsgReturn(pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED,
2488 ("GCPhys=%RGp enmProt=%d %s\n", GCPhys, enmProt, pRom->pszDesc),
2489 VERR_SSM_LOAD_CONFIG_MISMATCH);
2490 rc = PGMR3PhysRomProtect(pVM, GCPhys, PAGE_SIZE, enmProt);
2491 AssertLogRelMsgRCReturn(rc, ("GCPhys=%RGp rc=%Rrc\n", GCPhys, rc), rc);
2492 AssertLogRelReturn(pRomPage->enmProt == enmProt, VERR_INTERNAL_ERROR);
2493 }
2494 if ((u8 & ~PGM_STATE_REC_FLAG_ADDR) == PGM_STATE_REC_ROM_PROT)
2495 break; /* done */
2496
2497 /*
2498 * Get the right page descriptor.
2499 */
2500 PPGMPAGE pRealPage;
2501 switch (u8 & ~PGM_STATE_REC_FLAG_ADDR)
2502 {
2503 case PGM_STATE_REC_ROM_VIRGIN:
2504 if (!PGMROMPROT_IS_ROM(enmProt))
2505 pRealPage = &pRomPage->Virgin;
2506 else
2507 pRealPage = NULL;
2508 break;
2509
2510 case PGM_STATE_REC_ROM_SHW_RAW:
2511 case PGM_STATE_REC_ROM_SHW_ZERO:
2512 AssertLogRelMsgReturn(pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED,
2513 ("GCPhys=%RGp enmProt=%d %s\n", GCPhys, enmProt, pRom->pszDesc),
2514 VERR_SSM_LOAD_CONFIG_MISMATCH);
2515 if (PGMROMPROT_IS_ROM(enmProt))
2516 pRealPage = &pRomPage->Shadow;
2517 else
2518 pRealPage = NULL;
2519 break;
2520
2521 default: AssertLogRelFailedReturn(VERR_INTERNAL_ERROR); /* shut up gcc */
2522 }
2523 if (!pRealPage)
2524 {
2525 rc = pgmPhysGetPageWithHintEx(&pVM->pgm.s, GCPhys, &pRealPage, &pRamHint);
2526 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc %RGp\n", rc, GCPhys), rc);
2527 }
2528
2529 /*
2530 * Make it writable and map it (if necessary).
2531 */
2532 void *pvDstPage = NULL;
2533 switch (u8 & ~PGM_STATE_REC_FLAG_ADDR)
2534 {
2535 case PGM_STATE_REC_ROM_SHW_ZERO:
2536 if (PGM_PAGE_IS_ZERO(pRealPage))
2537 break;
2538 /** @todo implement zero page replacing. */
2539 /* fall thru */
2540 case PGM_STATE_REC_ROM_VIRGIN:
2541 case PGM_STATE_REC_ROM_SHW_RAW:
2542 {
2543 rc = pgmPhysPageMakeWritableAndMap(pVM, pRealPage, GCPhys, &pvDstPage);
2544 AssertLogRelMsgRCReturn(rc, ("GCPhys=%RGp rc=%Rrc\n", GCPhys, rc), rc);
2545 break;
2546 }
2547 }
2548
2549 /*
2550 * Load the bits.
2551 */
2552 switch (u8 & ~PGM_STATE_REC_FLAG_ADDR)
2553 {
2554 case PGM_STATE_REC_ROM_SHW_ZERO:
2555 if (pvDstPage)
2556 ASMMemZeroPage(pvDstPage);
2557 break;
2558
2559 case PGM_STATE_REC_ROM_VIRGIN:
2560 case PGM_STATE_REC_ROM_SHW_RAW:
2561 rc = SSMR3GetMem(pSSM, pvDstPage, PAGE_SIZE);
2562 if (RT_FAILURE(rc))
2563 return rc;
2564 break;
2565 }
2566 GCPhys = NIL_RTGCPHYS;
2567 break;
2568 }
2569
2570 /*
2571 * Unknown type.
2572 */
2573 default:
2574 AssertLogRelMsgFailedReturn(("%#x\n", u8), VERR_INTERNAL_ERROR);
2575 }
2576 } /* forever */
2577}
2578
2579
2580/**
2581 * Worker for pgmR3Load.
2582 *
2583 * @returns VBox status code.
2584 *
2585 * @param pVM The VM handle.
2586 * @param pSSM The SSM handle.
2587 * @param uVersion The saved state version.
2588 */
2589static int pgmR3LoadFinalLocked(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion)
2590{
2591 PPGM pPGM = &pVM->pgm.s;
2592 int rc;
2593 uint32_t u32Sep;
2594
2595 /*
2596 * Load basic data (required / unaffected by relocation).
2597 */
2598 if (uVersion >= PGM_SAVED_STATE_VERSION_3_0_0)
2599 {
2600 rc = SSMR3GetStruct(pSSM, pPGM, &s_aPGMFields[0]);
2601 AssertLogRelRCReturn(rc, rc);
2602
2603 for (VMCPUID i = 0; i < pVM->cCpus; i++)
2604 {
2605 rc = SSMR3GetStruct(pSSM, &pVM->aCpus[i].pgm.s, &s_aPGMCpuFields[0]);
2606 AssertLogRelRCReturn(rc, rc);
2607 }
2608 }
2609 else if (uVersion >= PGM_SAVED_STATE_VERSION_RR_DESC)
2610 {
2611 AssertRelease(pVM->cCpus == 1);
2612
2613 PGMOLD pgmOld;
2614 rc = SSMR3GetStruct(pSSM, &pgmOld, &s_aPGMFields_Old[0]);
2615 AssertLogRelRCReturn(rc, rc);
2616
2617 pPGM->fMappingsFixed = pgmOld.fMappingsFixed;
2618 pPGM->GCPtrMappingFixed = pgmOld.GCPtrMappingFixed;
2619 pPGM->cbMappingFixed = pgmOld.cbMappingFixed;
2620
2621 pVM->aCpus[0].pgm.s.fA20Enabled = pgmOld.fA20Enabled;
2622 pVM->aCpus[0].pgm.s.GCPhysA20Mask = pgmOld.GCPhysA20Mask;
2623 pVM->aCpus[0].pgm.s.enmGuestMode = pgmOld.enmGuestMode;
2624 }
2625 else
2626 {
2627 AssertRelease(pVM->cCpus == 1);
2628
2629 SSMR3GetBool(pSSM, &pPGM->fMappingsFixed);
2630 SSMR3GetGCPtr(pSSM, &pPGM->GCPtrMappingFixed);
2631 SSMR3GetU32(pSSM, &pPGM->cbMappingFixed);
2632
2633 uint32_t cbRamSizeIgnored;
2634 rc = SSMR3GetU32(pSSM, &cbRamSizeIgnored);
2635 if (RT_FAILURE(rc))
2636 return rc;
2637 SSMR3GetGCPhys(pSSM, &pVM->aCpus[0].pgm.s.GCPhysA20Mask);
2638
2639 uint32_t u32 = 0;
2640 SSMR3GetUInt(pSSM, &u32);
2641 pVM->aCpus[0].pgm.s.fA20Enabled = !!u32;
2642 SSMR3GetUInt(pSSM, &pVM->aCpus[0].pgm.s.fSyncFlags);
2643 RTUINT uGuestMode;
2644 SSMR3GetUInt(pSSM, &uGuestMode);
2645 pVM->aCpus[0].pgm.s.enmGuestMode = (PGMMODE)uGuestMode;
2646
2647 /* check separator. */
2648 SSMR3GetU32(pSSM, &u32Sep);
2649 if (RT_FAILURE(rc))
2650 return rc;
2651 if (u32Sep != (uint32_t)~0)
2652 {
2653 AssertMsgFailed(("u32Sep=%#x (first)\n", u32Sep));
2654 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
2655 }
2656 }
2657
2658 /*
2659 * The guest mappings.
2660 */
2661 uint32_t i = 0;
2662 for (;; i++)
2663 {
2664 /* Check the seqence number / separator. */
2665 rc = SSMR3GetU32(pSSM, &u32Sep);
2666 if (RT_FAILURE(rc))
2667 return rc;
2668 if (u32Sep == ~0U)
2669 break;
2670 if (u32Sep != i)
2671 {
2672 AssertMsgFailed(("u32Sep=%#x (last)\n", u32Sep));
2673 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
2674 }
2675
2676 /* get the mapping details. */
2677 char szDesc[256];
2678 szDesc[0] = '\0';
2679 rc = SSMR3GetStrZ(pSSM, szDesc, sizeof(szDesc));
2680 if (RT_FAILURE(rc))
2681 return rc;
2682 RTGCPTR GCPtr;
2683 SSMR3GetGCPtr(pSSM, &GCPtr);
2684 RTGCPTR cPTs;
2685 rc = SSMR3GetGCUIntPtr(pSSM, &cPTs);
2686 if (RT_FAILURE(rc))
2687 return rc;
2688
2689 /* find matching range. */
2690 PPGMMAPPING pMapping;
2691 for (pMapping = pPGM->pMappingsR3; pMapping; pMapping = pMapping->pNextR3)
2692 if ( pMapping->cPTs == cPTs
2693 && !strcmp(pMapping->pszDesc, szDesc))
2694 break;
2695 AssertLogRelMsgReturn(pMapping, ("Couldn't find mapping: cPTs=%#x szDesc=%s (GCPtr=%RGv)\n",
2696 cPTs, szDesc, GCPtr),
2697 VERR_SSM_LOAD_CONFIG_MISMATCH);
2698
2699 /* relocate it. */
2700 if (pMapping->GCPtr != GCPtr)
2701 {
2702 AssertMsg((GCPtr >> X86_PD_SHIFT << X86_PD_SHIFT) == GCPtr, ("GCPtr=%RGv\n", GCPtr));
2703 pgmR3MapRelocate(pVM, pMapping, pMapping->GCPtr, GCPtr);
2704 }
2705 else
2706 Log(("pgmR3Load: '%s' needed no relocation (%RGv)\n", szDesc, GCPtr));
2707 }
2708
2709 /*
2710 * Load the RAM contents.
2711 */
2712 if (uVersion > PGM_SAVED_STATE_VERSION_3_0_0)
2713 {
2714 if (!pVM->pgm.s.LiveSave.fActive)
2715 {
2716 rc = pgmR3LoadRomRanges(pVM, pSSM);
2717 if (RT_FAILURE(rc))
2718 return rc;
2719 rc = pgmR3LoadMmio2Ranges(pVM, pSSM);
2720 if (RT_FAILURE(rc))
2721 return rc;
2722 }
2723
2724 return pgmR3LoadMemory(pVM, pSSM, SSM_PASS_FINAL);
2725 }
2726 return pgmR3LoadMemoryOld(pVM, pSSM, uVersion);
2727}
2728
2729
2730/**
2731 * Execute state load operation.
2732 *
2733 * @returns VBox status code.
2734 * @param pVM VM Handle.
2735 * @param pSSM SSM operation handle.
2736 * @param uVersion Data layout version.
2737 * @param uPass The data pass.
2738 */
2739static DECLCALLBACK(int) pgmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
2740{
2741 int rc;
2742 PPGM pPGM = &pVM->pgm.s;
2743
2744 /*
2745 * Validate version.
2746 */
2747 if ( ( uPass != SSM_PASS_FINAL
2748 && uVersion != PGM_SAVED_STATE_VERSION)
2749 || ( uVersion != PGM_SAVED_STATE_VERSION
2750 && uVersion != PGM_SAVED_STATE_VERSION_3_0_0
2751 && uVersion != PGM_SAVED_STATE_VERSION_2_2_2
2752 && uVersion != PGM_SAVED_STATE_VERSION_RR_DESC
2753 && uVersion != PGM_SAVED_STATE_VERSION_OLD_PHYS_CODE)
2754 )
2755 {
2756 AssertMsgFailed(("pgmR3Load: Invalid version uVersion=%d (current %d)!\n", uVersion, PGM_SAVED_STATE_VERSION));
2757 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
2758 }
2759
2760 /*
2761 * Do the loading while owning the lock because a bunch of the functions
2762 * we're using requires this.
2763 */
2764 if (uPass != SSM_PASS_FINAL)
2765 {
2766 pgmLock(pVM);
2767 if (uPass != 0)
2768 rc = pgmR3LoadMemory(pVM, pSSM, uPass);
2769 else
2770 {
2771 pVM->pgm.s.LiveSave.fActive = true;
2772 rc = pgmR3LoadRomRanges(pVM, pSSM);
2773 if (RT_SUCCESS(rc))
2774 rc = pgmR3LoadMmio2Ranges(pVM, pSSM);
2775 if (RT_SUCCESS(rc))
2776 rc = pgmR3LoadMemory(pVM, pSSM, uPass);
2777 }
2778 pgmUnlock(pVM);
2779 }
2780 else
2781 {
2782 pgmLock(pVM);
2783 rc = pgmR3LoadFinalLocked(pVM, pSSM, uVersion);
2784 pVM->pgm.s.LiveSave.fActive = false;
2785 pgmUnlock(pVM);
2786 if (RT_SUCCESS(rc))
2787 {
2788 /*
2789 * We require a full resync now.
2790 */
2791 for (VMCPUID i = 0; i < pVM->cCpus; i++)
2792 {
2793 PVMCPU pVCpu = &pVM->aCpus[i];
2794 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
2795 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2796
2797 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL;
2798 }
2799
2800 pgmR3HandlerPhysicalUpdateAll(pVM);
2801
2802 for (VMCPUID i = 0; i < pVM->cCpus; i++)
2803 {
2804 PVMCPU pVCpu = &pVM->aCpus[i];
2805
2806 /*
2807 * Change the paging mode.
2808 */
2809 rc = PGMR3ChangeMode(pVM, pVCpu, pVCpu->pgm.s.enmGuestMode);
2810
2811 /* Restore pVM->pgm.s.GCPhysCR3. */
2812 Assert(pVCpu->pgm.s.GCPhysCR3 == NIL_RTGCPHYS);
2813 RTGCPHYS GCPhysCR3 = CPUMGetGuestCR3(pVCpu);
2814 if ( pVCpu->pgm.s.enmGuestMode == PGMMODE_PAE
2815 || pVCpu->pgm.s.enmGuestMode == PGMMODE_PAE_NX
2816 || pVCpu->pgm.s.enmGuestMode == PGMMODE_AMD64
2817 || pVCpu->pgm.s.enmGuestMode == PGMMODE_AMD64_NX)
2818 GCPhysCR3 = (GCPhysCR3 & X86_CR3_PAE_PAGE_MASK);
2819 else
2820 GCPhysCR3 = (GCPhysCR3 & X86_CR3_PAGE_MASK);
2821 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
2822 }
2823 }
2824 }
2825
2826 return rc;
2827}
2828
2829
2830/**
2831 * Registers the saved state callbacks with SSM.
2832 *
2833 * @returns VBox status code.
2834 * @param pVM Pointer to VM structure.
2835 * @param cbRam The RAM size.
2836 */
2837int pgmR3InitSavedState(PVM pVM, uint64_t cbRam)
2838{
2839 return SSMR3RegisterInternal(pVM, "pgm", 1, PGM_SAVED_STATE_VERSION, (size_t)cbRam + sizeof(PGM),
2840 pgmR3LivePrep, pgmR3LiveExec, pgmR3LiveVote,
2841 NULL, pgmR3SaveExec, pgmR3SaveDone,
2842 pgmR3LoadPrep, pgmR3Load, NULL);
2843}
2844
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette