VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/PGMSavedState.cpp@ 74795

Last change on this file since 74795 was 73266, checked in by vboxsync, 6 years ago

PGM,HM: Made PGMR3ChangeMode work in ring-0 too. This required a kludge for the VT-x real-in-V86-mode stuff, as there are certain limitations on that mode which weren't checked as CR0.PE was cleared. The kludge isn't very smart, but it seems to do the job. Similar kludge for getting out of the mode. bugref:9044

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 128.6 KB
Line 
1/* $Id: PGMSavedState.cpp 73266 2018-07-20 14:27:20Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, The Saved State Part.
4 */
5
6/*
7 * Copyright (C) 2006-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_PGM
23#include <VBox/vmm/pgm.h>
24#include <VBox/vmm/stam.h>
25#include <VBox/vmm/ssm.h>
26#include <VBox/vmm/pdmdrv.h>
27#include <VBox/vmm/pdmdev.h>
28#include "PGMInternal.h"
29#include <VBox/vmm/vm.h>
30#include "PGMInline.h"
31
32#include <VBox/param.h>
33#include <VBox/err.h>
34#include <VBox/vmm/ftm.h>
35
36#include <iprt/asm.h>
37#include <iprt/assert.h>
38#include <iprt/crc.h>
39#include <iprt/mem.h>
40#include <iprt/sha.h>
41#include <iprt/string.h>
42#include <iprt/thread.h>
43
44
45/*********************************************************************************************************************************
46* Defined Constants And Macros *
47*********************************************************************************************************************************/
48/** Saved state data unit version. */
49#define PGM_SAVED_STATE_VERSION 14
50/** Saved state data unit version before the PAE PDPE registers. */
51#define PGM_SAVED_STATE_VERSION_PRE_PAE 13
52/** Saved state data unit version after this includes ballooned page flags in
53 * the state (see @bugref{5515}). */
54#define PGM_SAVED_STATE_VERSION_BALLOON_BROKEN 12
55/** Saved state before the balloon change. */
56#define PGM_SAVED_STATE_VERSION_PRE_BALLOON 11
57/** Saved state data unit version used during 3.1 development, misses the RAM
58 * config. */
59#define PGM_SAVED_STATE_VERSION_NO_RAM_CFG 10
60/** Saved state data unit version for 3.0 (pre teleportation). */
61#define PGM_SAVED_STATE_VERSION_3_0_0 9
62/** Saved state data unit version for 2.2.2 and later. */
63#define PGM_SAVED_STATE_VERSION_2_2_2 8
64/** Saved state data unit version for 2.2.0. */
65#define PGM_SAVED_STATE_VERSION_RR_DESC 7
66/** Saved state data unit version. */
67#define PGM_SAVED_STATE_VERSION_OLD_PHYS_CODE 6
68
69
70/** @name Sparse state record types
71 * @{ */
72/** Zero page. No data. */
73#define PGM_STATE_REC_RAM_ZERO UINT8_C(0x00)
74/** Raw page. */
75#define PGM_STATE_REC_RAM_RAW UINT8_C(0x01)
76/** Raw MMIO2 page. */
77#define PGM_STATE_REC_MMIO2_RAW UINT8_C(0x02)
78/** Zero MMIO2 page. */
79#define PGM_STATE_REC_MMIO2_ZERO UINT8_C(0x03)
80/** Virgin ROM page. Followed by protection (8-bit) and the raw bits. */
81#define PGM_STATE_REC_ROM_VIRGIN UINT8_C(0x04)
82/** Raw shadowed ROM page. The protection (8-bit) precedes the raw bits. */
83#define PGM_STATE_REC_ROM_SHW_RAW UINT8_C(0x05)
84/** Zero shadowed ROM page. The protection (8-bit) is the only payload. */
85#define PGM_STATE_REC_ROM_SHW_ZERO UINT8_C(0x06)
86/** ROM protection (8-bit). */
87#define PGM_STATE_REC_ROM_PROT UINT8_C(0x07)
88/** Ballooned page. No data. */
89#define PGM_STATE_REC_RAM_BALLOONED UINT8_C(0x08)
90/** The last record type. */
91#define PGM_STATE_REC_LAST PGM_STATE_REC_RAM_BALLOONED
92/** End marker. */
93#define PGM_STATE_REC_END UINT8_C(0xff)
94/** Flag indicating that the data is preceded by the page address.
95 * For RAW pages this is a RTGCPHYS. For MMIO2 and ROM pages this is a 8-bit
96 * range ID and a 32-bit page index.
97 */
98#define PGM_STATE_REC_FLAG_ADDR UINT8_C(0x80)
99/** @} */
100
101/** The CRC-32 for a zero page. */
102#define PGM_STATE_CRC32_ZERO_PAGE UINT32_C(0xc71c0011)
103/** The CRC-32 for a zero half page. */
104#define PGM_STATE_CRC32_ZERO_HALF_PAGE UINT32_C(0xf1e8ba9e)
105
106
107
108/** @name Old Page types used in older saved states.
109 * @{ */
110/** Old saved state: The usual invalid zero entry. */
111#define PGMPAGETYPE_OLD_INVALID 0
112/** Old saved state: RAM page. (RWX) */
113#define PGMPAGETYPE_OLD_RAM 1
114/** Old saved state: MMIO2 page. (RWX) */
115#define PGMPAGETYPE_OLD_MMIO2 1
116/** Old saved state: MMIO2 page aliased over an MMIO page. (RWX)
117 * See PGMHandlerPhysicalPageAlias(). */
118#define PGMPAGETYPE_OLD_MMIO2_ALIAS_MMIO 2
119/** Old saved state: Shadowed ROM. (RWX) */
120#define PGMPAGETYPE_OLD_ROM_SHADOW 3
121/** Old saved state: ROM page. (R-X) */
122#define PGMPAGETYPE_OLD_ROM 4
123/** Old saved state: MMIO page. (---) */
124#define PGMPAGETYPE_OLD_MMIO 5
125/** @} */
126
127
128/*********************************************************************************************************************************
129* Structures and Typedefs *
130*********************************************************************************************************************************/
131/** For loading old saved states. (pre-smp) */
132typedef struct
133{
134 /** If set no conflict checks are required. (boolean) */
135 bool fMappingsFixed;
136 /** Size of fixed mapping */
137 uint32_t cbMappingFixed;
138 /** Base address (GC) of fixed mapping */
139 RTGCPTR GCPtrMappingFixed;
140 /** A20 gate mask.
141 * Our current approach to A20 emulation is to let REM do it and don't bother
142 * anywhere else. The interesting guests will be operating with it enabled anyway.
143 * But should the need arise, we'll subject physical addresses to this mask. */
144 RTGCPHYS GCPhysA20Mask;
145 /** A20 gate state - boolean! */
146 bool fA20Enabled;
147 /** The guest paging mode. */
148 PGMMODE enmGuestMode;
149} PGMOLD;
150
151
152/*********************************************************************************************************************************
153* Global Variables *
154*********************************************************************************************************************************/
155/** PGM fields to save/load. */
156
157static const SSMFIELD s_aPGMFields[] =
158{
159 SSMFIELD_ENTRY( PGM, fMappingsFixed),
160 SSMFIELD_ENTRY_GCPTR( PGM, GCPtrMappingFixed),
161 SSMFIELD_ENTRY( PGM, cbMappingFixed),
162 SSMFIELD_ENTRY( PGM, cBalloonedPages),
163 SSMFIELD_ENTRY_TERM()
164};
165
166static const SSMFIELD s_aPGMFieldsPreBalloon[] =
167{
168 SSMFIELD_ENTRY( PGM, fMappingsFixed),
169 SSMFIELD_ENTRY_GCPTR( PGM, GCPtrMappingFixed),
170 SSMFIELD_ENTRY( PGM, cbMappingFixed),
171 SSMFIELD_ENTRY_TERM()
172};
173
174static const SSMFIELD s_aPGMCpuFields[] =
175{
176 SSMFIELD_ENTRY( PGMCPU, fA20Enabled),
177 SSMFIELD_ENTRY_GCPHYS( PGMCPU, GCPhysA20Mask),
178 SSMFIELD_ENTRY( PGMCPU, enmGuestMode),
179 SSMFIELD_ENTRY( PGMCPU, aGCPhysGstPaePDs[0]),
180 SSMFIELD_ENTRY( PGMCPU, aGCPhysGstPaePDs[1]),
181 SSMFIELD_ENTRY( PGMCPU, aGCPhysGstPaePDs[2]),
182 SSMFIELD_ENTRY( PGMCPU, aGCPhysGstPaePDs[3]),
183 SSMFIELD_ENTRY_TERM()
184};
185
186static const SSMFIELD s_aPGMCpuFieldsPrePae[] =
187{
188 SSMFIELD_ENTRY( PGMCPU, fA20Enabled),
189 SSMFIELD_ENTRY_GCPHYS( PGMCPU, GCPhysA20Mask),
190 SSMFIELD_ENTRY( PGMCPU, enmGuestMode),
191 SSMFIELD_ENTRY_TERM()
192};
193
194static const SSMFIELD s_aPGMFields_Old[] =
195{
196 SSMFIELD_ENTRY( PGMOLD, fMappingsFixed),
197 SSMFIELD_ENTRY_GCPTR( PGMOLD, GCPtrMappingFixed),
198 SSMFIELD_ENTRY( PGMOLD, cbMappingFixed),
199 SSMFIELD_ENTRY( PGMOLD, fA20Enabled),
200 SSMFIELD_ENTRY_GCPHYS( PGMOLD, GCPhysA20Mask),
201 SSMFIELD_ENTRY( PGMOLD, enmGuestMode),
202 SSMFIELD_ENTRY_TERM()
203};
204
205
206/**
207 * Find the ROM tracking structure for the given page.
208 *
209 * @returns Pointer to the ROM page structure. NULL if the caller didn't check
210 * that it's a ROM page.
211 * @param pVM The cross context VM structure.
212 * @param GCPhys The address of the ROM page.
213 */
214static PPGMROMPAGE pgmR3GetRomPage(PVM pVM, RTGCPHYS GCPhys) /** @todo change this to take a hint. */
215{
216 for (PPGMROMRANGE pRomRange = pVM->pgm.s.CTX_SUFF(pRomRanges);
217 pRomRange;
218 pRomRange = pRomRange->CTX_SUFF(pNext))
219 {
220 RTGCPHYS off = GCPhys - pRomRange->GCPhys;
221 if (GCPhys - pRomRange->GCPhys < pRomRange->cb)
222 return &pRomRange->aPages[off >> PAGE_SHIFT];
223 }
224 return NULL;
225}
226
227
228/**
229 * Prepares the ROM pages for a live save.
230 *
231 * @returns VBox status code.
232 * @param pVM The cross context VM structure.
233 */
234static int pgmR3PrepRomPages(PVM pVM)
235{
236 /*
237 * Initialize the live save tracking in the ROM page descriptors.
238 */
239 pgmLock(pVM);
240 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
241 {
242 PPGMRAMRANGE pRamHint = NULL;;
243 uint32_t const cPages = pRom->cb >> PAGE_SHIFT;
244
245 for (uint32_t iPage = 0; iPage < cPages; iPage++)
246 {
247 pRom->aPages[iPage].LiveSave.u8Prot = (uint8_t)PGMROMPROT_INVALID;
248 pRom->aPages[iPage].LiveSave.fWrittenTo = false;
249 pRom->aPages[iPage].LiveSave.fDirty = true;
250 pRom->aPages[iPage].LiveSave.fDirtiedRecently = true;
251 if (!(pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED))
252 {
253 if (PGMROMPROT_IS_ROM(pRom->aPages[iPage].enmProt))
254 pRom->aPages[iPage].LiveSave.fWrittenTo = !PGM_PAGE_IS_ZERO(&pRom->aPages[iPage].Shadow) && !PGM_PAGE_IS_BALLOONED(&pRom->aPages[iPage].Shadow);
255 else
256 {
257 RTGCPHYS GCPhys = pRom->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT);
258 PPGMPAGE pPage;
259 int rc = pgmPhysGetPageWithHintEx(pVM, GCPhys, &pPage, &pRamHint);
260 AssertLogRelMsgRC(rc, ("%Rrc GCPhys=%RGp\n", rc, GCPhys));
261 if (RT_SUCCESS(rc))
262 pRom->aPages[iPage].LiveSave.fWrittenTo = !PGM_PAGE_IS_ZERO(pPage) && !PGM_PAGE_IS_BALLOONED(pPage);
263 else
264 pRom->aPages[iPage].LiveSave.fWrittenTo = !PGM_PAGE_IS_ZERO(&pRom->aPages[iPage].Shadow) && !PGM_PAGE_IS_BALLOONED(&pRom->aPages[iPage].Shadow);
265 }
266 }
267 }
268
269 pVM->pgm.s.LiveSave.Rom.cDirtyPages += cPages;
270 if (pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)
271 pVM->pgm.s.LiveSave.Rom.cDirtyPages += cPages;
272 }
273 pgmUnlock(pVM);
274
275 return VINF_SUCCESS;
276}
277
278
279/**
280 * Assigns IDs to the ROM ranges and saves them.
281 *
282 * @returns VBox status code.
283 * @param pVM The cross context VM structure.
284 * @param pSSM Saved state handle.
285 */
286static int pgmR3SaveRomRanges(PVM pVM, PSSMHANDLE pSSM)
287{
288 pgmLock(pVM);
289 uint8_t id = 1;
290 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3, id++)
291 {
292 pRom->idSavedState = id;
293 SSMR3PutU8(pSSM, id);
294 SSMR3PutStrZ(pSSM, ""); /* device name */
295 SSMR3PutU32(pSSM, 0); /* device instance */
296 SSMR3PutU8(pSSM, 0); /* region */
297 SSMR3PutStrZ(pSSM, pRom->pszDesc);
298 SSMR3PutGCPhys(pSSM, pRom->GCPhys);
299 int rc = SSMR3PutGCPhys(pSSM, pRom->cb);
300 if (RT_FAILURE(rc))
301 break;
302 }
303 pgmUnlock(pVM);
304 return SSMR3PutU8(pSSM, UINT8_MAX);
305}
306
307
308/**
309 * Loads the ROM range ID assignments.
310 *
311 * @returns VBox status code.
312 *
313 * @param pVM The cross context VM structure.
314 * @param pSSM The saved state handle.
315 */
316static int pgmR3LoadRomRanges(PVM pVM, PSSMHANDLE pSSM)
317{
318 PGM_LOCK_ASSERT_OWNER(pVM);
319
320 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
321 pRom->idSavedState = UINT8_MAX;
322
323 for (;;)
324 {
325 /*
326 * Read the data.
327 */
328 uint8_t id;
329 int rc = SSMR3GetU8(pSSM, &id);
330 if (RT_FAILURE(rc))
331 return rc;
332 if (id == UINT8_MAX)
333 {
334 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
335 AssertLogRelMsg(pRom->idSavedState != UINT8_MAX,
336 ("The \"%s\" ROM was not found in the saved state. Probably due to some misconfiguration\n",
337 pRom->pszDesc));
338 return VINF_SUCCESS; /* the end */
339 }
340 AssertLogRelReturn(id != 0, VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
341
342 char szDevName[RT_SIZEOFMEMB(PDMDEVREG, szName)];
343 rc = SSMR3GetStrZ(pSSM, szDevName, sizeof(szDevName));
344 AssertLogRelRCReturn(rc, rc);
345
346 uint32_t uInstance;
347 SSMR3GetU32(pSSM, &uInstance);
348 uint8_t iRegion;
349 SSMR3GetU8(pSSM, &iRegion);
350
351 char szDesc[64];
352 rc = SSMR3GetStrZ(pSSM, szDesc, sizeof(szDesc));
353 AssertLogRelRCReturn(rc, rc);
354
355 RTGCPHYS GCPhys;
356 SSMR3GetGCPhys(pSSM, &GCPhys);
357 RTGCPHYS cb;
358 rc = SSMR3GetGCPhys(pSSM, &cb);
359 if (RT_FAILURE(rc))
360 return rc;
361 AssertLogRelMsgReturn(!(GCPhys & PAGE_OFFSET_MASK), ("GCPhys=%RGp %s\n", GCPhys, szDesc), VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
362 AssertLogRelMsgReturn(!(cb & PAGE_OFFSET_MASK), ("cb=%RGp %s\n", cb, szDesc), VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
363
364 /*
365 * Locate a matching ROM range.
366 */
367 AssertLogRelMsgReturn( uInstance == 0
368 && iRegion == 0
369 && szDevName[0] == '\0',
370 ("GCPhys=%RGp %s\n", GCPhys, szDesc),
371 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
372 PPGMROMRANGE pRom;
373 for (pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
374 {
375 if ( pRom->idSavedState == UINT8_MAX
376 && !strcmp(pRom->pszDesc, szDesc))
377 {
378 pRom->idSavedState = id;
379 break;
380 }
381 }
382 if (!pRom)
383 return SSMR3SetCfgError(pSSM, RT_SRC_POS, N_("ROM at %RGp by the name '%s' was not found"), GCPhys, szDesc);
384 } /* forever */
385}
386
387
388/**
389 * Scan ROM pages.
390 *
391 * @param pVM The cross context VM structure.
392 */
393static void pgmR3ScanRomPages(PVM pVM)
394{
395 /*
396 * The shadow ROMs.
397 */
398 pgmLock(pVM);
399 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
400 {
401 if (pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)
402 {
403 uint32_t const cPages = pRom->cb >> PAGE_SHIFT;
404 for (uint32_t iPage = 0; iPage < cPages; iPage++)
405 {
406 PPGMROMPAGE pRomPage = &pRom->aPages[iPage];
407 if (pRomPage->LiveSave.fWrittenTo)
408 {
409 pRomPage->LiveSave.fWrittenTo = false;
410 if (!pRomPage->LiveSave.fDirty)
411 {
412 pRomPage->LiveSave.fDirty = true;
413 pVM->pgm.s.LiveSave.Rom.cReadyPages--;
414 pVM->pgm.s.LiveSave.Rom.cDirtyPages++;
415 }
416 pRomPage->LiveSave.fDirtiedRecently = true;
417 }
418 else
419 pRomPage->LiveSave.fDirtiedRecently = false;
420 }
421 }
422 }
423 pgmUnlock(pVM);
424}
425
426
427/**
428 * Takes care of the virgin ROM pages in the first pass.
429 *
430 * This is an attempt at simplifying the handling of ROM pages a little bit.
431 * This ASSUMES that no new ROM ranges will be added and that they won't be
432 * relinked in any way.
433 *
434 * @param pVM The cross context VM structure.
435 * @param pSSM The SSM handle.
436 * @param fLiveSave Whether we're in a live save or not.
437 */
438static int pgmR3SaveRomVirginPages(PVM pVM, PSSMHANDLE pSSM, bool fLiveSave)
439{
440 if (FTMIsDeltaLoadSaveActive(pVM))
441 return VINF_SUCCESS; /* nothing to do as nothing has changed here */
442
443 pgmLock(pVM);
444 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
445 {
446 uint32_t const cPages = pRom->cb >> PAGE_SHIFT;
447 for (uint32_t iPage = 0; iPage < cPages; iPage++)
448 {
449 RTGCPHYS GCPhys = pRom->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT);
450 PGMROMPROT enmProt = pRom->aPages[iPage].enmProt;
451
452 /* Get the virgin page descriptor. */
453 PPGMPAGE pPage;
454 if (PGMROMPROT_IS_ROM(enmProt))
455 pPage = pgmPhysGetPage(pVM, GCPhys);
456 else
457 pPage = &pRom->aPages[iPage].Virgin;
458
459 /* Get the page bits. (Cannot use pgmPhysGCPhys2CCPtrInternalReadOnly here!) */
460 int rc = VINF_SUCCESS;
461 char abPage[PAGE_SIZE];
462 if ( !PGM_PAGE_IS_ZERO(pPage)
463 && !PGM_PAGE_IS_BALLOONED(pPage))
464 {
465 void const *pvPage;
466 rc = pgmPhysPageMapReadOnly(pVM, pPage, GCPhys, &pvPage);
467 if (RT_SUCCESS(rc))
468 memcpy(abPage, pvPage, PAGE_SIZE);
469 }
470 else
471 ASMMemZeroPage(abPage);
472 pgmUnlock(pVM);
473 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc GCPhys=%RGp\n", rc, GCPhys), rc);
474
475 /* Save it. */
476 if (iPage > 0)
477 SSMR3PutU8(pSSM, PGM_STATE_REC_ROM_VIRGIN);
478 else
479 {
480 SSMR3PutU8(pSSM, PGM_STATE_REC_ROM_VIRGIN | PGM_STATE_REC_FLAG_ADDR);
481 SSMR3PutU8(pSSM, pRom->idSavedState);
482 SSMR3PutU32(pSSM, iPage);
483 }
484 SSMR3PutU8(pSSM, (uint8_t)enmProt);
485 rc = SSMR3PutMem(pSSM, abPage, PAGE_SIZE);
486 if (RT_FAILURE(rc))
487 return rc;
488
489 /* Update state. */
490 pgmLock(pVM);
491 pRom->aPages[iPage].LiveSave.u8Prot = (uint8_t)enmProt;
492 if (fLiveSave)
493 {
494 pVM->pgm.s.LiveSave.Rom.cDirtyPages--;
495 pVM->pgm.s.LiveSave.Rom.cReadyPages++;
496 pVM->pgm.s.LiveSave.cSavedPages++;
497 }
498 }
499 }
500 pgmUnlock(pVM);
501 return VINF_SUCCESS;
502}
503
504
505/**
506 * Saves dirty pages in the shadowed ROM ranges.
507 *
508 * Used by pgmR3LiveExecPart2 and pgmR3SaveExecMemory.
509 *
510 * @returns VBox status code.
511 * @param pVM The cross context VM structure.
512 * @param pSSM The SSM handle.
513 * @param fLiveSave Whether it's a live save or not.
514 * @param fFinalPass Whether this is the final pass or not.
515 */
516static int pgmR3SaveShadowedRomPages(PVM pVM, PSSMHANDLE pSSM, bool fLiveSave, bool fFinalPass)
517{
518 if (FTMIsDeltaLoadSaveActive(pVM))
519 return VINF_SUCCESS; /* nothing to do as we deal with those pages separately */
520
521 /*
522 * The Shadowed ROMs.
523 *
524 * ASSUMES that the ROM ranges are fixed.
525 * ASSUMES that all the ROM ranges are mapped.
526 */
527 pgmLock(pVM);
528 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
529 {
530 if (pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)
531 {
532 uint32_t const cPages = pRom->cb >> PAGE_SHIFT;
533 uint32_t iPrevPage = cPages;
534 for (uint32_t iPage = 0; iPage < cPages; iPage++)
535 {
536 PPGMROMPAGE pRomPage = &pRom->aPages[iPage];
537 if ( !fLiveSave
538 || ( pRomPage->LiveSave.fDirty
539 && ( ( !pRomPage->LiveSave.fDirtiedRecently
540 && !pRomPage->LiveSave.fWrittenTo)
541 || fFinalPass
542 )
543 )
544 )
545 {
546 uint8_t abPage[PAGE_SIZE];
547 PGMROMPROT enmProt = pRomPage->enmProt;
548 RTGCPHYS GCPhys = pRom->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT);
549 PPGMPAGE pPage = PGMROMPROT_IS_ROM(enmProt) ? &pRomPage->Shadow : pgmPhysGetPage(pVM, GCPhys);
550 bool fZero = PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_BALLOONED(pPage); Assert(!PGM_PAGE_IS_BALLOONED(pPage)); /* Shouldn't be ballooned. */
551 int rc = VINF_SUCCESS;
552 if (!fZero)
553 {
554 void const *pvPage;
555 rc = pgmPhysPageMapReadOnly(pVM, pPage, GCPhys, &pvPage);
556 if (RT_SUCCESS(rc))
557 memcpy(abPage, pvPage, PAGE_SIZE);
558 }
559 if (fLiveSave && RT_SUCCESS(rc))
560 {
561 pRomPage->LiveSave.u8Prot = (uint8_t)enmProt;
562 pRomPage->LiveSave.fDirty = false;
563 pVM->pgm.s.LiveSave.Rom.cReadyPages++;
564 pVM->pgm.s.LiveSave.Rom.cDirtyPages--;
565 pVM->pgm.s.LiveSave.cSavedPages++;
566 }
567 pgmUnlock(pVM);
568 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc GCPhys=%RGp\n", rc, GCPhys), rc);
569
570 if (iPage - 1U == iPrevPage && iPage > 0)
571 SSMR3PutU8(pSSM, (fZero ? PGM_STATE_REC_ROM_SHW_ZERO : PGM_STATE_REC_ROM_SHW_RAW));
572 else
573 {
574 SSMR3PutU8(pSSM, (fZero ? PGM_STATE_REC_ROM_SHW_ZERO : PGM_STATE_REC_ROM_SHW_RAW) | PGM_STATE_REC_FLAG_ADDR);
575 SSMR3PutU8(pSSM, pRom->idSavedState);
576 SSMR3PutU32(pSSM, iPage);
577 }
578 rc = SSMR3PutU8(pSSM, (uint8_t)enmProt);
579 if (!fZero)
580 rc = SSMR3PutMem(pSSM, abPage, PAGE_SIZE);
581 if (RT_FAILURE(rc))
582 return rc;
583
584 pgmLock(pVM);
585 iPrevPage = iPage;
586 }
587 /*
588 * In the final pass, make sure the protection is in sync.
589 */
590 else if ( fFinalPass
591 && pRomPage->LiveSave.u8Prot != pRomPage->enmProt)
592 {
593 PGMROMPROT enmProt = pRomPage->enmProt;
594 pRomPage->LiveSave.u8Prot = (uint8_t)enmProt;
595 pgmUnlock(pVM);
596
597 if (iPage - 1U == iPrevPage && iPage > 0)
598 SSMR3PutU8(pSSM, PGM_STATE_REC_ROM_PROT);
599 else
600 {
601 SSMR3PutU8(pSSM, PGM_STATE_REC_ROM_PROT | PGM_STATE_REC_FLAG_ADDR);
602 SSMR3PutU8(pSSM, pRom->idSavedState);
603 SSMR3PutU32(pSSM, iPage);
604 }
605 int rc = SSMR3PutU8(pSSM, (uint8_t)enmProt);
606 if (RT_FAILURE(rc))
607 return rc;
608
609 pgmLock(pVM);
610 iPrevPage = iPage;
611 }
612 }
613 }
614 }
615 pgmUnlock(pVM);
616 return VINF_SUCCESS;
617}
618
619
620/**
621 * Cleans up ROM pages after a live save.
622 *
623 * @param pVM The cross context VM structure.
624 */
625static void pgmR3DoneRomPages(PVM pVM)
626{
627 NOREF(pVM);
628}
629
630
631/**
632 * Prepares the MMIO2 pages for a live save.
633 *
634 * @returns VBox status code.
635 * @param pVM The cross context VM structure.
636 */
637static int pgmR3PrepMmio2Pages(PVM pVM)
638{
639 /*
640 * Initialize the live save tracking in the MMIO2 ranges.
641 * ASSUME nothing changes here.
642 */
643 pgmLock(pVM);
644 for (PPGMREGMMIORANGE pRegMmio = pVM->pgm.s.pRegMmioRangesR3; pRegMmio; pRegMmio = pRegMmio->pNextR3)
645 {
646 if (pRegMmio->fFlags & PGMREGMMIORANGE_F_MMIO2)
647 {
648 uint32_t const cPages = pRegMmio->RamRange.cb >> PAGE_SHIFT;
649 pgmUnlock(pVM);
650
651 PPGMLIVESAVEMMIO2PAGE paLSPages = (PPGMLIVESAVEMMIO2PAGE)MMR3HeapAllocZ(pVM, MM_TAG_PGM, sizeof(PGMLIVESAVEMMIO2PAGE) * cPages);
652 if (!paLSPages)
653 return VERR_NO_MEMORY;
654 for (uint32_t iPage = 0; iPage < cPages; iPage++)
655 {
656 /* Initialize it as a dirty zero page. */
657 paLSPages[iPage].fDirty = true;
658 paLSPages[iPage].cUnchangedScans = 0;
659 paLSPages[iPage].fZero = true;
660 paLSPages[iPage].u32CrcH1 = PGM_STATE_CRC32_ZERO_HALF_PAGE;
661 paLSPages[iPage].u32CrcH2 = PGM_STATE_CRC32_ZERO_HALF_PAGE;
662 }
663
664 pgmLock(pVM);
665 pRegMmio->paLSPages = paLSPages;
666 pVM->pgm.s.LiveSave.Mmio2.cDirtyPages += cPages;
667 }
668 }
669 pgmUnlock(pVM);
670 return VINF_SUCCESS;
671}
672
673
674/**
675 * Assigns IDs to the MMIO2 ranges and saves them.
676 *
677 * @returns VBox status code.
678 * @param pVM The cross context VM structure.
679 * @param pSSM Saved state handle.
680 */
681static int pgmR3SaveMmio2Ranges(PVM pVM, PSSMHANDLE pSSM)
682{
683 pgmLock(pVM);
684 uint8_t id = 1;
685 for (PPGMREGMMIORANGE pRegMmio = pVM->pgm.s.pRegMmioRangesR3; pRegMmio; pRegMmio = pRegMmio->pNextR3)
686 {
687 if (pRegMmio->fFlags & PGMREGMMIORANGE_F_MMIO2)
688 {
689 pRegMmio->idSavedState = id;
690 SSMR3PutU8(pSSM, id);
691 SSMR3PutStrZ(pSSM, pRegMmio->pDevInsR3->pReg->szName);
692 SSMR3PutU32(pSSM, pRegMmio->pDevInsR3->iInstance);
693 SSMR3PutU8(pSSM, pRegMmio->iRegion);
694 SSMR3PutStrZ(pSSM, pRegMmio->RamRange.pszDesc);
695 int rc = SSMR3PutGCPhys(pSSM, pRegMmio->RamRange.cb);
696 if (RT_FAILURE(rc))
697 break;
698 id++;
699 }
700 }
701 pgmUnlock(pVM);
702 return SSMR3PutU8(pSSM, UINT8_MAX);
703}
704
705
706/**
707 * Loads the MMIO2 range ID assignments.
708 *
709 * @returns VBox status code.
710 *
711 * @param pVM The cross context VM structure.
712 * @param pSSM The saved state handle.
713 */
714static int pgmR3LoadMmio2Ranges(PVM pVM, PSSMHANDLE pSSM)
715{
716 PGM_LOCK_ASSERT_OWNER(pVM);
717
718 for (PPGMREGMMIORANGE pRegMmio = pVM->pgm.s.pRegMmioRangesR3; pRegMmio; pRegMmio = pRegMmio->pNextR3)
719 if (pRegMmio->fFlags & PGMREGMMIORANGE_F_MMIO2)
720 pRegMmio->idSavedState = UINT8_MAX;
721
722 for (;;)
723 {
724 /*
725 * Read the data.
726 */
727 uint8_t id;
728 int rc = SSMR3GetU8(pSSM, &id);
729 if (RT_FAILURE(rc))
730 return rc;
731 if (id == UINT8_MAX)
732 {
733 for (PPGMREGMMIORANGE pRegMmio = pVM->pgm.s.pRegMmioRangesR3; pRegMmio; pRegMmio = pRegMmio->pNextR3)
734 AssertLogRelMsg( pRegMmio->idSavedState != UINT8_MAX
735 || !(pRegMmio->fFlags & PGMREGMMIORANGE_F_MMIO2),
736 ("%s\n", pRegMmio->RamRange.pszDesc));
737 return VINF_SUCCESS; /* the end */
738 }
739 AssertLogRelReturn(id != 0, VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
740
741 char szDevName[RT_SIZEOFMEMB(PDMDEVREG, szName)];
742 rc = SSMR3GetStrZ(pSSM, szDevName, sizeof(szDevName));
743 AssertLogRelRCReturn(rc, rc);
744
745 uint32_t uInstance;
746 SSMR3GetU32(pSSM, &uInstance);
747 uint8_t iRegion;
748 SSMR3GetU8(pSSM, &iRegion);
749
750 char szDesc[64];
751 rc = SSMR3GetStrZ(pSSM, szDesc, sizeof(szDesc));
752 AssertLogRelRCReturn(rc, rc);
753
754 RTGCPHYS cb;
755 rc = SSMR3GetGCPhys(pSSM, &cb);
756 AssertLogRelMsgReturn(!(cb & PAGE_OFFSET_MASK), ("cb=%RGp %s\n", cb, szDesc), VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
757
758 /*
759 * Locate a matching MMIO2 range.
760 */
761 PPGMREGMMIORANGE pRegMmio;
762 for (pRegMmio = pVM->pgm.s.pRegMmioRangesR3; pRegMmio; pRegMmio = pRegMmio->pNextR3)
763 {
764 if ( pRegMmio->idSavedState == UINT8_MAX
765 && pRegMmio->iRegion == iRegion
766 && pRegMmio->pDevInsR3->iInstance == uInstance
767 && (pRegMmio->fFlags & PGMREGMMIORANGE_F_MMIO2)
768 && !strcmp(pRegMmio->pDevInsR3->pReg->szName, szDevName))
769 {
770 pRegMmio->idSavedState = id;
771 break;
772 }
773 }
774 if (!pRegMmio)
775 return SSMR3SetCfgError(pSSM, RT_SRC_POS, N_("Failed to locate a MMIO2 range called '%s' owned by %s/%u, region %d"),
776 szDesc, szDevName, uInstance, iRegion);
777
778 /*
779 * Validate the configuration, the size of the MMIO2 region should be
780 * the same.
781 */
782 if (cb != pRegMmio->RamRange.cb)
783 {
784 LogRel(("PGM: MMIO2 region \"%s\" size mismatch: saved=%RGp config=%RGp\n",
785 pRegMmio->RamRange.pszDesc, cb, pRegMmio->RamRange.cb));
786 if (cb > pRegMmio->RamRange.cb) /* bad idea? */
787 return SSMR3SetCfgError(pSSM, RT_SRC_POS, N_("MMIO2 region \"%s\" size mismatch: saved=%RGp config=%RGp"),
788 pRegMmio->RamRange.pszDesc, cb, pRegMmio->RamRange.cb);
789 }
790 } /* forever */
791}
792
793
794/**
795 * Scans one MMIO2 page.
796 *
797 * @returns True if changed, false if unchanged.
798 *
799 * @param pVM The cross context VM structure.
800 * @param pbPage The page bits.
801 * @param pLSPage The live save tracking structure for the page.
802 *
803 */
804DECLINLINE(bool) pgmR3ScanMmio2Page(PVM pVM, uint8_t const *pbPage, PPGMLIVESAVEMMIO2PAGE pLSPage)
805{
806 /*
807 * Special handling of zero pages.
808 */
809 bool const fZero = pLSPage->fZero;
810 if (fZero)
811 {
812 if (ASMMemIsZeroPage(pbPage))
813 {
814 /* Not modified. */
815 if (pLSPage->fDirty)
816 pLSPage->cUnchangedScans++;
817 return false;
818 }
819
820 pLSPage->fZero = false;
821 pLSPage->u32CrcH1 = RTCrc32(pbPage, PAGE_SIZE / 2);
822 }
823 else
824 {
825 /*
826 * CRC the first half, if it doesn't match the page is dirty and
827 * we won't check the 2nd half (we'll do that next time).
828 */
829 uint32_t u32CrcH1 = RTCrc32(pbPage, PAGE_SIZE / 2);
830 if (u32CrcH1 == pLSPage->u32CrcH1)
831 {
832 uint32_t u32CrcH2 = RTCrc32(pbPage + PAGE_SIZE / 2, PAGE_SIZE / 2);
833 if (u32CrcH2 == pLSPage->u32CrcH2)
834 {
835 /* Probably not modified. */
836 if (pLSPage->fDirty)
837 pLSPage->cUnchangedScans++;
838 return false;
839 }
840
841 pLSPage->u32CrcH2 = u32CrcH2;
842 }
843 else
844 {
845 pLSPage->u32CrcH1 = u32CrcH1;
846 if ( u32CrcH1 == PGM_STATE_CRC32_ZERO_HALF_PAGE
847 && ASMMemIsZeroPage(pbPage))
848 {
849 pLSPage->u32CrcH2 = PGM_STATE_CRC32_ZERO_HALF_PAGE;
850 pLSPage->fZero = true;
851 }
852 }
853 }
854
855 /* dirty page path */
856 pLSPage->cUnchangedScans = 0;
857 if (!pLSPage->fDirty)
858 {
859 pLSPage->fDirty = true;
860 pVM->pgm.s.LiveSave.Mmio2.cReadyPages--;
861 pVM->pgm.s.LiveSave.Mmio2.cDirtyPages++;
862 if (fZero)
863 pVM->pgm.s.LiveSave.Mmio2.cZeroPages--;
864 }
865 return true;
866}
867
868
869/**
870 * Scan for MMIO2 page modifications.
871 *
872 * @param pVM The cross context VM structure.
873 * @param uPass The pass number.
874 */
875static void pgmR3ScanMmio2Pages(PVM pVM, uint32_t uPass)
876{
877 /*
878 * Since this is a bit expensive we lower the scan rate after a little while.
879 */
880 if ( ( (uPass & 3) != 0
881 && uPass > 10)
882 || uPass == SSM_PASS_FINAL)
883 return;
884
885 pgmLock(pVM); /* paranoia */
886 for (PPGMREGMMIORANGE pRegMmio = pVM->pgm.s.pRegMmioRangesR3; pRegMmio; pRegMmio = pRegMmio->pNextR3)
887 if (pRegMmio->fFlags & PGMREGMMIORANGE_F_MMIO2)
888 {
889 PPGMLIVESAVEMMIO2PAGE paLSPages = pRegMmio->paLSPages;
890 uint32_t cPages = pRegMmio->RamRange.cb >> PAGE_SHIFT;
891 pgmUnlock(pVM);
892
893 for (uint32_t iPage = 0; iPage < cPages; iPage++)
894 {
895 uint8_t const *pbPage = (uint8_t const *)pRegMmio->pvR3 + iPage * PAGE_SIZE;
896 pgmR3ScanMmio2Page(pVM, pbPage, &paLSPages[iPage]);
897 }
898
899 pgmLock(pVM);
900 }
901 pgmUnlock(pVM);
902
903}
904
905
906/**
907 * Save quiescent MMIO2 pages.
908 *
909 * @returns VBox status code.
910 * @param pVM The cross context VM structure.
911 * @param pSSM The SSM handle.
912 * @param fLiveSave Whether it's a live save or not.
913 * @param uPass The pass number.
914 */
915static int pgmR3SaveMmio2Pages(PVM pVM, PSSMHANDLE pSSM, bool fLiveSave, uint32_t uPass)
916{
917 /** @todo implement live saving of MMIO2 pages. (Need some way of telling the
918 * device that we wish to know about changes.) */
919
920 int rc = VINF_SUCCESS;
921 if (uPass == SSM_PASS_FINAL)
922 {
923 /*
924 * The mop up round.
925 */
926 pgmLock(pVM);
927 for (PPGMREGMMIORANGE pRegMmio = pVM->pgm.s.pRegMmioRangesR3;
928 pRegMmio && RT_SUCCESS(rc);
929 pRegMmio = pRegMmio->pNextR3)
930 if (pRegMmio->fFlags & PGMREGMMIORANGE_F_MMIO2)
931 {
932 PPGMLIVESAVEMMIO2PAGE paLSPages = pRegMmio->paLSPages;
933 uint8_t const *pbPage = (uint8_t const *)pRegMmio->RamRange.pvR3;
934 uint32_t cPages = pRegMmio->RamRange.cb >> PAGE_SHIFT;
935 uint32_t iPageLast = cPages;
936 for (uint32_t iPage = 0; iPage < cPages; iPage++, pbPage += PAGE_SIZE)
937 {
938 uint8_t u8Type;
939 if (!fLiveSave)
940 u8Type = ASMMemIsZeroPage(pbPage) ? PGM_STATE_REC_MMIO2_ZERO : PGM_STATE_REC_MMIO2_RAW;
941 else
942 {
943 /* Try figure if it's a clean page, compare the SHA-1 to be really sure. */
944 if ( !paLSPages[iPage].fDirty
945 && !pgmR3ScanMmio2Page(pVM, pbPage, &paLSPages[iPage]))
946 {
947 if (paLSPages[iPage].fZero)
948 continue;
949
950 uint8_t abSha1Hash[RTSHA1_HASH_SIZE];
951 RTSha1(pbPage, PAGE_SIZE, abSha1Hash);
952 if (!memcmp(abSha1Hash, paLSPages[iPage].abSha1Saved, sizeof(abSha1Hash)))
953 continue;
954 }
955 u8Type = paLSPages[iPage].fZero ? PGM_STATE_REC_MMIO2_ZERO : PGM_STATE_REC_MMIO2_RAW;
956 pVM->pgm.s.LiveSave.cSavedPages++;
957 }
958
959 if (iPage != 0 && iPage == iPageLast + 1)
960 rc = SSMR3PutU8(pSSM, u8Type);
961 else
962 {
963 SSMR3PutU8(pSSM, u8Type | PGM_STATE_REC_FLAG_ADDR);
964 SSMR3PutU8(pSSM, pRegMmio->idSavedState);
965 rc = SSMR3PutU32(pSSM, iPage);
966 }
967 if (u8Type == PGM_STATE_REC_MMIO2_RAW)
968 rc = SSMR3PutMem(pSSM, pbPage, PAGE_SIZE);
969 if (RT_FAILURE(rc))
970 break;
971 iPageLast = iPage;
972 }
973 }
974 pgmUnlock(pVM);
975 }
976 /*
977 * Reduce the rate after a little while since the current MMIO2 approach is
978 * a bit expensive.
979 * We position it two passes after the scan pass to avoid saving busy pages.
980 */
981 else if ( uPass <= 10
982 || (uPass & 3) == 2)
983 {
984 pgmLock(pVM);
985 for (PPGMREGMMIORANGE pRegMmio = pVM->pgm.s.pRegMmioRangesR3;
986 pRegMmio && RT_SUCCESS(rc);
987 pRegMmio = pRegMmio->pNextR3)
988 if (pRegMmio->fFlags & PGMREGMMIORANGE_F_MMIO2)
989 {
990 PPGMLIVESAVEMMIO2PAGE paLSPages = pRegMmio->paLSPages;
991 uint8_t const *pbPage = (uint8_t const *)pRegMmio->RamRange.pvR3;
992 uint32_t cPages = pRegMmio->RamRange.cb >> PAGE_SHIFT;
993 uint32_t iPageLast = cPages;
994 pgmUnlock(pVM);
995
996 for (uint32_t iPage = 0; iPage < cPages; iPage++, pbPage += PAGE_SIZE)
997 {
998 /* Skip clean pages and pages which hasn't quiesced. */
999 if (!paLSPages[iPage].fDirty)
1000 continue;
1001 if (paLSPages[iPage].cUnchangedScans < 3)
1002 continue;
1003 if (pgmR3ScanMmio2Page(pVM, pbPage, &paLSPages[iPage]))
1004 continue;
1005
1006 /* Save it. */
1007 bool const fZero = paLSPages[iPage].fZero;
1008 uint8_t abPage[PAGE_SIZE];
1009 if (!fZero)
1010 {
1011 memcpy(abPage, pbPage, PAGE_SIZE);
1012 RTSha1(abPage, PAGE_SIZE, paLSPages[iPage].abSha1Saved);
1013 }
1014
1015 uint8_t u8Type = paLSPages[iPage].fZero ? PGM_STATE_REC_MMIO2_ZERO : PGM_STATE_REC_MMIO2_RAW;
1016 if (iPage != 0 && iPage == iPageLast + 1)
1017 rc = SSMR3PutU8(pSSM, u8Type);
1018 else
1019 {
1020 SSMR3PutU8(pSSM, u8Type | PGM_STATE_REC_FLAG_ADDR);
1021 SSMR3PutU8(pSSM, pRegMmio->idSavedState);
1022 rc = SSMR3PutU32(pSSM, iPage);
1023 }
1024 if (u8Type == PGM_STATE_REC_MMIO2_RAW)
1025 rc = SSMR3PutMem(pSSM, abPage, PAGE_SIZE);
1026 if (RT_FAILURE(rc))
1027 break;
1028
1029 /* Housekeeping. */
1030 paLSPages[iPage].fDirty = false;
1031 pVM->pgm.s.LiveSave.Mmio2.cDirtyPages--;
1032 pVM->pgm.s.LiveSave.Mmio2.cReadyPages++;
1033 if (u8Type == PGM_STATE_REC_MMIO2_ZERO)
1034 pVM->pgm.s.LiveSave.Mmio2.cZeroPages++;
1035 pVM->pgm.s.LiveSave.cSavedPages++;
1036 iPageLast = iPage;
1037 }
1038
1039 pgmLock(pVM);
1040 }
1041 pgmUnlock(pVM);
1042 }
1043
1044 return rc;
1045}
1046
1047
1048/**
1049 * Cleans up MMIO2 pages after a live save.
1050 *
1051 * @param pVM The cross context VM structure.
1052 */
1053static void pgmR3DoneMmio2Pages(PVM pVM)
1054{
1055 /*
1056 * Free the tracking structures for the MMIO2 pages.
1057 * We do the freeing outside the lock in case the VM is running.
1058 */
1059 pgmLock(pVM);
1060 for (PPGMREGMMIORANGE pRegMmio = pVM->pgm.s.pRegMmioRangesR3; pRegMmio; pRegMmio = pRegMmio->pNextR3)
1061 if (pRegMmio->fFlags & PGMREGMMIORANGE_F_MMIO2)
1062 {
1063 void *pvMmio2ToFree = pRegMmio->paLSPages;
1064 if (pvMmio2ToFree)
1065 {
1066 pRegMmio->paLSPages = NULL;
1067 pgmUnlock(pVM);
1068 MMR3HeapFree(pvMmio2ToFree);
1069 pgmLock(pVM);
1070 }
1071 }
1072 pgmUnlock(pVM);
1073}
1074
1075
1076/**
1077 * Prepares the RAM pages for a live save.
1078 *
1079 * @returns VBox status code.
1080 * @param pVM The cross context VM structure.
1081 */
1082static int pgmR3PrepRamPages(PVM pVM)
1083{
1084
1085 /*
1086 * Try allocating tracking structures for the ram ranges.
1087 *
1088 * To avoid lock contention, we leave the lock every time we're allocating
1089 * a new array. This means we'll have to ditch the allocation and start
1090 * all over again if the RAM range list changes in-between.
1091 *
1092 * Note! pgmR3SaveDone will always be called and it is therefore responsible
1093 * for cleaning up.
1094 */
1095 PPGMRAMRANGE pCur;
1096 pgmLock(pVM);
1097 do
1098 {
1099 for (pCur = pVM->pgm.s.pRamRangesXR3; pCur; pCur = pCur->pNextR3)
1100 {
1101 if ( !pCur->paLSPages
1102 && !PGM_RAM_RANGE_IS_AD_HOC(pCur))
1103 {
1104 uint32_t const idRamRangesGen = pVM->pgm.s.idRamRangesGen;
1105 uint32_t const cPages = pCur->cb >> PAGE_SHIFT;
1106 pgmUnlock(pVM);
1107 PPGMLIVESAVERAMPAGE paLSPages = (PPGMLIVESAVERAMPAGE)MMR3HeapAllocZ(pVM, MM_TAG_PGM, cPages * sizeof(PGMLIVESAVERAMPAGE));
1108 if (!paLSPages)
1109 return VERR_NO_MEMORY;
1110 pgmLock(pVM);
1111 if (pVM->pgm.s.idRamRangesGen != idRamRangesGen)
1112 {
1113 pgmUnlock(pVM);
1114 MMR3HeapFree(paLSPages);
1115 pgmLock(pVM);
1116 break; /* try again */
1117 }
1118 pCur->paLSPages = paLSPages;
1119
1120 /*
1121 * Initialize the array.
1122 */
1123 uint32_t iPage = cPages;
1124 while (iPage-- > 0)
1125 {
1126 /** @todo yield critsect! (after moving this away from EMT0) */
1127 PCPGMPAGE pPage = &pCur->aPages[iPage];
1128 paLSPages[iPage].cDirtied = 0;
1129 paLSPages[iPage].fDirty = 1; /* everything is dirty at this time */
1130 paLSPages[iPage].fWriteMonitored = 0;
1131 paLSPages[iPage].fWriteMonitoredJustNow = 0;
1132 paLSPages[iPage].u2Reserved = 0;
1133 switch (PGM_PAGE_GET_TYPE(pPage))
1134 {
1135 case PGMPAGETYPE_RAM:
1136 if ( PGM_PAGE_IS_ZERO(pPage)
1137 || PGM_PAGE_IS_BALLOONED(pPage))
1138 {
1139 paLSPages[iPage].fZero = 1;
1140 paLSPages[iPage].fShared = 0;
1141#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1142 paLSPages[iPage].u32Crc = PGM_STATE_CRC32_ZERO_PAGE;
1143#endif
1144 }
1145 else if (PGM_PAGE_IS_SHARED(pPage))
1146 {
1147 paLSPages[iPage].fZero = 0;
1148 paLSPages[iPage].fShared = 1;
1149#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1150 paLSPages[iPage].u32Crc = UINT32_MAX;
1151#endif
1152 }
1153 else
1154 {
1155 paLSPages[iPage].fZero = 0;
1156 paLSPages[iPage].fShared = 0;
1157#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1158 paLSPages[iPage].u32Crc = UINT32_MAX;
1159#endif
1160 }
1161 paLSPages[iPage].fIgnore = 0;
1162 pVM->pgm.s.LiveSave.Ram.cDirtyPages++;
1163 break;
1164
1165 case PGMPAGETYPE_ROM_SHADOW:
1166 case PGMPAGETYPE_ROM:
1167 {
1168 paLSPages[iPage].fZero = 0;
1169 paLSPages[iPage].fShared = 0;
1170 paLSPages[iPage].fDirty = 0;
1171 paLSPages[iPage].fIgnore = 1;
1172#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1173 paLSPages[iPage].u32Crc = UINT32_MAX;
1174#endif
1175 pVM->pgm.s.LiveSave.cIgnoredPages++;
1176 break;
1177 }
1178
1179 default:
1180 AssertMsgFailed(("%R[pgmpage]", pPage));
1181 RT_FALL_THRU();
1182 case PGMPAGETYPE_MMIO2:
1183 case PGMPAGETYPE_MMIO2_ALIAS_MMIO:
1184 paLSPages[iPage].fZero = 0;
1185 paLSPages[iPage].fShared = 0;
1186 paLSPages[iPage].fDirty = 0;
1187 paLSPages[iPage].fIgnore = 1;
1188#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1189 paLSPages[iPage].u32Crc = UINT32_MAX;
1190#endif
1191 pVM->pgm.s.LiveSave.cIgnoredPages++;
1192 break;
1193
1194 case PGMPAGETYPE_MMIO:
1195 case PGMPAGETYPE_SPECIAL_ALIAS_MMIO:
1196 paLSPages[iPage].fZero = 0;
1197 paLSPages[iPage].fShared = 0;
1198 paLSPages[iPage].fDirty = 0;
1199 paLSPages[iPage].fIgnore = 1;
1200#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1201 paLSPages[iPage].u32Crc = UINT32_MAX;
1202#endif
1203 pVM->pgm.s.LiveSave.cIgnoredPages++;
1204 break;
1205 }
1206 }
1207 }
1208 }
1209 } while (pCur);
1210 pgmUnlock(pVM);
1211
1212 return VINF_SUCCESS;
1213}
1214
1215
1216/**
1217 * Saves the RAM configuration.
1218 *
1219 * @returns VBox status code.
1220 * @param pVM The cross context VM structure.
1221 * @param pSSM The saved state handle.
1222 */
1223static int pgmR3SaveRamConfig(PVM pVM, PSSMHANDLE pSSM)
1224{
1225 uint32_t cbRamHole = 0;
1226 int rc = CFGMR3QueryU32Def(CFGMR3GetRoot(pVM), "RamHoleSize", &cbRamHole, MM_RAM_HOLE_SIZE_DEFAULT);
1227 AssertRCReturn(rc, rc);
1228
1229 uint64_t cbRam = 0;
1230 rc = CFGMR3QueryU64Def(CFGMR3GetRoot(pVM), "RamSize", &cbRam, 0);
1231 AssertRCReturn(rc, rc);
1232
1233 SSMR3PutU32(pSSM, cbRamHole);
1234 return SSMR3PutU64(pSSM, cbRam);
1235}
1236
1237
1238/**
1239 * Loads and verifies the RAM configuration.
1240 *
1241 * @returns VBox status code.
1242 * @param pVM The cross context VM structure.
1243 * @param pSSM The saved state handle.
1244 */
1245static int pgmR3LoadRamConfig(PVM pVM, PSSMHANDLE pSSM)
1246{
1247 uint32_t cbRamHoleCfg = 0;
1248 int rc = CFGMR3QueryU32Def(CFGMR3GetRoot(pVM), "RamHoleSize", &cbRamHoleCfg, MM_RAM_HOLE_SIZE_DEFAULT);
1249 AssertRCReturn(rc, rc);
1250
1251 uint64_t cbRamCfg = 0;
1252 rc = CFGMR3QueryU64Def(CFGMR3GetRoot(pVM), "RamSize", &cbRamCfg, 0);
1253 AssertRCReturn(rc, rc);
1254
1255 uint32_t cbRamHoleSaved;
1256 SSMR3GetU32(pSSM, &cbRamHoleSaved);
1257
1258 uint64_t cbRamSaved;
1259 rc = SSMR3GetU64(pSSM, &cbRamSaved);
1260 AssertRCReturn(rc, rc);
1261
1262 if ( cbRamHoleCfg != cbRamHoleSaved
1263 || cbRamCfg != cbRamSaved)
1264 return SSMR3SetCfgError(pSSM, RT_SRC_POS, N_("Ram config mismatch: saved=%RX64/%RX32 config=%RX64/%RX32 (RAM/Hole)"),
1265 cbRamSaved, cbRamHoleSaved, cbRamCfg, cbRamHoleCfg);
1266 return VINF_SUCCESS;
1267}
1268
1269#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1270
1271/**
1272 * Calculates the CRC-32 for a RAM page and updates the live save page tracking
1273 * info with it.
1274 *
1275 * @param pVM The cross context VM structure.
1276 * @param pCur The current RAM range.
1277 * @param paLSPages The current array of live save page tracking
1278 * structures.
1279 * @param iPage The page index.
1280 */
1281static void pgmR3StateCalcCrc32ForRamPage(PVM pVM, PPGMRAMRANGE pCur, PPGMLIVESAVERAMPAGE paLSPages, uint32_t iPage)
1282{
1283 RTGCPHYS GCPhys = pCur->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT);
1284 PGMPAGEMAPLOCK PgMpLck;
1285 void const *pvPage;
1286 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, &pCur->aPages[iPage], GCPhys, &pvPage, &PgMpLck);
1287 if (RT_SUCCESS(rc))
1288 {
1289 paLSPages[iPage].u32Crc = RTCrc32(pvPage, PAGE_SIZE);
1290 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
1291 }
1292 else
1293 paLSPages[iPage].u32Crc = UINT32_MAX; /* Invalid */
1294}
1295
1296
1297/**
1298 * Verifies the CRC-32 for a page given it's raw bits.
1299 *
1300 * @param pvPage The page bits.
1301 * @param pCur The current RAM range.
1302 * @param paLSPages The current array of live save page tracking
1303 * structures.
1304 * @param iPage The page index.
1305 */
1306static void pgmR3StateVerifyCrc32ForPage(void const *pvPage, PPGMRAMRANGE pCur, PPGMLIVESAVERAMPAGE paLSPages, uint32_t iPage, const char *pszWhere)
1307{
1308 if (paLSPages[iPage].u32Crc != UINT32_MAX)
1309 {
1310 uint32_t u32Crc = RTCrc32(pvPage, PAGE_SIZE);
1311 Assert( ( !PGM_PAGE_IS_ZERO(&pCur->aPages[iPage])
1312 && !PGM_PAGE_IS_BALLOONED(&pCur->aPages[iPage]))
1313 || u32Crc == PGM_STATE_CRC32_ZERO_PAGE);
1314 AssertMsg(paLSPages[iPage].u32Crc == u32Crc,
1315 ("%08x != %08x for %RGp %R[pgmpage] %s\n", paLSPages[iPage].u32Crc, u32Crc,
1316 pCur->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT), &pCur->aPages[iPage], pszWhere));
1317 }
1318}
1319
1320
1321/**
1322 * Verifies the CRC-32 for a RAM page.
1323 *
1324 * @param pVM The cross context VM structure.
1325 * @param pCur The current RAM range.
1326 * @param paLSPages The current array of live save page tracking
1327 * structures.
1328 * @param iPage The page index.
1329 */
1330static void pgmR3StateVerifyCrc32ForRamPage(PVM pVM, PPGMRAMRANGE pCur, PPGMLIVESAVERAMPAGE paLSPages, uint32_t iPage, const char *pszWhere)
1331{
1332 if (paLSPages[iPage].u32Crc != UINT32_MAX)
1333 {
1334 RTGCPHYS GCPhys = pCur->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT);
1335 PGMPAGEMAPLOCK PgMpLck;
1336 void const *pvPage;
1337 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, &pCur->aPages[iPage], GCPhys, &pvPage, &PgMpLck);
1338 if (RT_SUCCESS(rc))
1339 {
1340 pgmR3StateVerifyCrc32ForPage(pvPage, pCur, paLSPages, iPage, pszWhere);
1341 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
1342 }
1343 }
1344}
1345
1346#endif /* PGMLIVESAVERAMPAGE_WITH_CRC32 */
1347
1348/**
1349 * Scan for RAM page modifications and reprotect them.
1350 *
1351 * @param pVM The cross context VM structure.
1352 * @param fFinalPass Whether this is the final pass or not.
1353 */
1354static void pgmR3ScanRamPages(PVM pVM, bool fFinalPass)
1355{
1356 /*
1357 * The RAM.
1358 */
1359 RTGCPHYS GCPhysCur = 0;
1360 PPGMRAMRANGE pCur;
1361 pgmLock(pVM);
1362 do
1363 {
1364 uint32_t const idRamRangesGen = pVM->pgm.s.idRamRangesGen;
1365 for (pCur = pVM->pgm.s.pRamRangesXR3; pCur; pCur = pCur->pNextR3)
1366 {
1367 if ( pCur->GCPhysLast > GCPhysCur
1368 && !PGM_RAM_RANGE_IS_AD_HOC(pCur))
1369 {
1370 PPGMLIVESAVERAMPAGE paLSPages = pCur->paLSPages;
1371 uint32_t cPages = pCur->cb >> PAGE_SHIFT;
1372 uint32_t iPage = GCPhysCur <= pCur->GCPhys ? 0 : (GCPhysCur - pCur->GCPhys) >> PAGE_SHIFT;
1373 GCPhysCur = 0;
1374 for (; iPage < cPages; iPage++)
1375 {
1376 /* Do yield first. */
1377 if ( !fFinalPass
1378#ifndef PGMLIVESAVERAMPAGE_WITH_CRC32
1379 && (iPage & 0x7ff) == 0x100
1380#endif
1381 && PDMR3CritSectYield(&pVM->pgm.s.CritSectX)
1382 && pVM->pgm.s.idRamRangesGen != idRamRangesGen)
1383 {
1384 GCPhysCur = pCur->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT);
1385 break; /* restart */
1386 }
1387
1388 /* Skip already ignored pages. */
1389 if (paLSPages[iPage].fIgnore)
1390 continue;
1391
1392 if (RT_LIKELY(PGM_PAGE_GET_TYPE(&pCur->aPages[iPage]) == PGMPAGETYPE_RAM))
1393 {
1394 /*
1395 * A RAM page.
1396 */
1397 switch (PGM_PAGE_GET_STATE(&pCur->aPages[iPage]))
1398 {
1399 case PGM_PAGE_STATE_ALLOCATED:
1400 /** @todo Optimize this: Don't always re-enable write
1401 * monitoring if the page is known to be very busy. */
1402 if (PGM_PAGE_IS_WRITTEN_TO(&pCur->aPages[iPage]))
1403 {
1404 AssertMsg(paLSPages[iPage].fWriteMonitored,
1405 ("%RGp %R[pgmpage]\n", pCur->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT), &pCur->aPages[iPage]));
1406 PGM_PAGE_CLEAR_WRITTEN_TO(pVM, &pCur->aPages[iPage]);
1407 Assert(pVM->pgm.s.cWrittenToPages > 0);
1408 pVM->pgm.s.cWrittenToPages--;
1409 }
1410 else
1411 {
1412 AssertMsg(!paLSPages[iPage].fWriteMonitored,
1413 ("%RGp %R[pgmpage]\n", pCur->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT), &pCur->aPages[iPage]));
1414 pVM->pgm.s.LiveSave.Ram.cMonitoredPages++;
1415 }
1416
1417 if (!paLSPages[iPage].fDirty)
1418 {
1419 pVM->pgm.s.LiveSave.Ram.cReadyPages--;
1420 if (paLSPages[iPage].fZero)
1421 pVM->pgm.s.LiveSave.Ram.cZeroPages--;
1422 pVM->pgm.s.LiveSave.Ram.cDirtyPages++;
1423 if (++paLSPages[iPage].cDirtied > PGMLIVSAVEPAGE_MAX_DIRTIED)
1424 paLSPages[iPage].cDirtied = PGMLIVSAVEPAGE_MAX_DIRTIED;
1425 }
1426
1427 pgmPhysPageWriteMonitor(pVM, &pCur->aPages[iPage],
1428 pCur->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT));
1429 paLSPages[iPage].fWriteMonitored = 1;
1430 paLSPages[iPage].fWriteMonitoredJustNow = 1;
1431 paLSPages[iPage].fDirty = 1;
1432 paLSPages[iPage].fZero = 0;
1433 paLSPages[iPage].fShared = 0;
1434#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1435 paLSPages[iPage].u32Crc = UINT32_MAX; /* invalid */
1436#endif
1437 break;
1438
1439 case PGM_PAGE_STATE_WRITE_MONITORED:
1440 Assert(paLSPages[iPage].fWriteMonitored);
1441 if (PGM_PAGE_GET_WRITE_LOCKS(&pCur->aPages[iPage]) == 0)
1442 {
1443#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1444 if (paLSPages[iPage].fWriteMonitoredJustNow)
1445 pgmR3StateCalcCrc32ForRamPage(pVM, pCur, paLSPages, iPage);
1446 else
1447 pgmR3StateVerifyCrc32ForRamPage(pVM, pCur, paLSPages, iPage, "scan");
1448#endif
1449 paLSPages[iPage].fWriteMonitoredJustNow = 0;
1450 }
1451 else
1452 {
1453 paLSPages[iPage].fWriteMonitoredJustNow = 1;
1454#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1455 paLSPages[iPage].u32Crc = UINT32_MAX; /* invalid */
1456#endif
1457 if (!paLSPages[iPage].fDirty)
1458 {
1459 pVM->pgm.s.LiveSave.Ram.cReadyPages--;
1460 pVM->pgm.s.LiveSave.Ram.cDirtyPages++;
1461 if (++paLSPages[iPage].cDirtied > PGMLIVSAVEPAGE_MAX_DIRTIED)
1462 paLSPages[iPage].cDirtied = PGMLIVSAVEPAGE_MAX_DIRTIED;
1463 }
1464 }
1465 break;
1466
1467 case PGM_PAGE_STATE_ZERO:
1468 case PGM_PAGE_STATE_BALLOONED:
1469 if (!paLSPages[iPage].fZero)
1470 {
1471 if (!paLSPages[iPage].fDirty)
1472 {
1473 paLSPages[iPage].fDirty = 1;
1474 pVM->pgm.s.LiveSave.Ram.cReadyPages--;
1475 pVM->pgm.s.LiveSave.Ram.cDirtyPages++;
1476 }
1477 paLSPages[iPage].fZero = 1;
1478 paLSPages[iPage].fShared = 0;
1479#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1480 paLSPages[iPage].u32Crc = PGM_STATE_CRC32_ZERO_PAGE;
1481#endif
1482 }
1483 break;
1484
1485 case PGM_PAGE_STATE_SHARED:
1486 if (!paLSPages[iPage].fShared)
1487 {
1488 if (!paLSPages[iPage].fDirty)
1489 {
1490 paLSPages[iPage].fDirty = 1;
1491 pVM->pgm.s.LiveSave.Ram.cReadyPages--;
1492 if (paLSPages[iPage].fZero)
1493 pVM->pgm.s.LiveSave.Ram.cZeroPages--;
1494 pVM->pgm.s.LiveSave.Ram.cDirtyPages++;
1495 }
1496 paLSPages[iPage].fZero = 0;
1497 paLSPages[iPage].fShared = 1;
1498#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1499 pgmR3StateCalcCrc32ForRamPage(pVM, pCur, paLSPages, iPage);
1500#endif
1501 }
1502 break;
1503 }
1504 }
1505 else
1506 {
1507 /*
1508 * All other types => Ignore the page.
1509 */
1510 Assert(!paLSPages[iPage].fIgnore); /* skipped before switch */
1511 paLSPages[iPage].fIgnore = 1;
1512 if (paLSPages[iPage].fWriteMonitored)
1513 {
1514 /** @todo this doesn't hold water when we start monitoring MMIO2 and ROM shadow
1515 * pages! */
1516 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(&pCur->aPages[iPage]) == PGM_PAGE_STATE_WRITE_MONITORED))
1517 {
1518 AssertMsgFailed(("%R[pgmpage]", &pCur->aPages[iPage])); /* shouldn't happen. */
1519 PGM_PAGE_SET_STATE(pVM, &pCur->aPages[iPage], PGM_PAGE_STATE_ALLOCATED);
1520 Assert(pVM->pgm.s.cMonitoredPages > 0);
1521 pVM->pgm.s.cMonitoredPages--;
1522 }
1523 if (PGM_PAGE_IS_WRITTEN_TO(&pCur->aPages[iPage]))
1524 {
1525 PGM_PAGE_CLEAR_WRITTEN_TO(pVM, &pCur->aPages[iPage]);
1526 Assert(pVM->pgm.s.cWrittenToPages > 0);
1527 pVM->pgm.s.cWrittenToPages--;
1528 }
1529 pVM->pgm.s.LiveSave.Ram.cMonitoredPages--;
1530 }
1531
1532 /** @todo the counting doesn't quite work out here. fix later? */
1533 if (paLSPages[iPage].fDirty)
1534 pVM->pgm.s.LiveSave.Ram.cDirtyPages--;
1535 else
1536 {
1537 pVM->pgm.s.LiveSave.Ram.cReadyPages--;
1538 if (paLSPages[iPage].fZero)
1539 pVM->pgm.s.LiveSave.Ram.cZeroPages--;
1540 }
1541 pVM->pgm.s.LiveSave.cIgnoredPages++;
1542 }
1543 } /* for each page in range */
1544
1545 if (GCPhysCur != 0)
1546 break; /* Yield + ramrange change */
1547 GCPhysCur = pCur->GCPhysLast;
1548 }
1549 } /* for each range */
1550 } while (pCur);
1551 pgmUnlock(pVM);
1552}
1553
1554
1555/**
1556 * Save quiescent RAM pages.
1557 *
1558 * @returns VBox status code.
1559 * @param pVM The cross context VM structure.
1560 * @param pSSM The SSM handle.
1561 * @param fLiveSave Whether it's a live save or not.
1562 * @param uPass The pass number.
1563 */
1564static int pgmR3SaveRamPages(PVM pVM, PSSMHANDLE pSSM, bool fLiveSave, uint32_t uPass)
1565{
1566 NOREF(fLiveSave);
1567
1568 /*
1569 * The RAM.
1570 */
1571 RTGCPHYS GCPhysLast = NIL_RTGCPHYS;
1572 RTGCPHYS GCPhysCur = 0;
1573 PPGMRAMRANGE pCur;
1574 bool fFTMDeltaSaveActive = FTMIsDeltaLoadSaveActive(pVM);
1575
1576 pgmLock(pVM);
1577 do
1578 {
1579 uint32_t const idRamRangesGen = pVM->pgm.s.idRamRangesGen;
1580 for (pCur = pVM->pgm.s.pRamRangesXR3; pCur; pCur = pCur->pNextR3)
1581 {
1582 if ( pCur->GCPhysLast > GCPhysCur
1583 && !PGM_RAM_RANGE_IS_AD_HOC(pCur))
1584 {
1585 PPGMLIVESAVERAMPAGE paLSPages = pCur->paLSPages;
1586 uint32_t cPages = pCur->cb >> PAGE_SHIFT;
1587 uint32_t iPage = GCPhysCur <= pCur->GCPhys ? 0 : (GCPhysCur - pCur->GCPhys) >> PAGE_SHIFT;
1588 GCPhysCur = 0;
1589 for (; iPage < cPages; iPage++)
1590 {
1591 /* Do yield first. */
1592 if ( uPass != SSM_PASS_FINAL
1593 && (iPage & 0x7ff) == 0x100
1594 && PDMR3CritSectYield(&pVM->pgm.s.CritSectX)
1595 && pVM->pgm.s.idRamRangesGen != idRamRangesGen)
1596 {
1597 GCPhysCur = pCur->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT);
1598 break; /* restart */
1599 }
1600
1601 PPGMPAGE pCurPage = &pCur->aPages[iPage];
1602
1603 /*
1604 * Only save pages that haven't changed since last scan and are dirty.
1605 */
1606 if ( uPass != SSM_PASS_FINAL
1607 && paLSPages)
1608 {
1609 if (!paLSPages[iPage].fDirty)
1610 continue;
1611 if (paLSPages[iPage].fWriteMonitoredJustNow)
1612 continue;
1613 if (paLSPages[iPage].fIgnore)
1614 continue;
1615 if (PGM_PAGE_GET_TYPE(pCurPage) != PGMPAGETYPE_RAM) /* in case of recent remappings */
1616 continue;
1617 if ( PGM_PAGE_GET_STATE(pCurPage)
1618 != ( paLSPages[iPage].fZero
1619 ? PGM_PAGE_STATE_ZERO
1620 : paLSPages[iPage].fShared
1621 ? PGM_PAGE_STATE_SHARED
1622 : PGM_PAGE_STATE_WRITE_MONITORED))
1623 continue;
1624 if (PGM_PAGE_GET_WRITE_LOCKS(&pCur->aPages[iPage]) > 0)
1625 continue;
1626 }
1627 else
1628 {
1629 if ( paLSPages
1630 && !paLSPages[iPage].fDirty
1631 && !paLSPages[iPage].fIgnore)
1632 {
1633#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1634 if (PGM_PAGE_GET_TYPE(pCurPage) != PGMPAGETYPE_RAM)
1635 pgmR3StateVerifyCrc32ForRamPage(pVM, pCur, paLSPages, iPage, "save#1");
1636#endif
1637 continue;
1638 }
1639 if (PGM_PAGE_GET_TYPE(pCurPage) != PGMPAGETYPE_RAM)
1640 continue;
1641 }
1642
1643 /*
1644 * Do the saving outside the PGM critsect since SSM may block on I/O.
1645 */
1646 int rc;
1647 RTGCPHYS GCPhys = pCur->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT);
1648 bool fZero = PGM_PAGE_IS_ZERO(pCurPage);
1649 bool fBallooned = PGM_PAGE_IS_BALLOONED(pCurPage);
1650 bool fSkipped = false;
1651
1652 if (!fZero && !fBallooned)
1653 {
1654 /*
1655 * Copy the page and then save it outside the lock (since any
1656 * SSM call may block).
1657 */
1658 uint8_t abPage[PAGE_SIZE];
1659 PGMPAGEMAPLOCK PgMpLck;
1660 void const *pvPage;
1661 rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pCurPage, GCPhys, &pvPage, &PgMpLck);
1662 if (RT_SUCCESS(rc))
1663 {
1664 memcpy(abPage, pvPage, PAGE_SIZE);
1665#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1666 if (paLSPages)
1667 pgmR3StateVerifyCrc32ForPage(abPage, pCur, paLSPages, iPage, "save#3");
1668#endif
1669 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
1670 }
1671 pgmUnlock(pVM);
1672 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc GCPhys=%RGp\n", rc, GCPhys), rc);
1673
1674 /* Try save some memory when restoring. */
1675 if (!ASMMemIsZeroPage(pvPage))
1676 {
1677 if (fFTMDeltaSaveActive)
1678 {
1679 if ( PGM_PAGE_IS_WRITTEN_TO(pCurPage)
1680 || PGM_PAGE_IS_FT_DIRTY(pCurPage))
1681 {
1682 if (GCPhys == GCPhysLast + PAGE_SIZE)
1683 SSMR3PutU8(pSSM, PGM_STATE_REC_RAM_RAW);
1684 else
1685 {
1686 SSMR3PutU8(pSSM, PGM_STATE_REC_RAM_RAW | PGM_STATE_REC_FLAG_ADDR);
1687 SSMR3PutGCPhys(pSSM, GCPhys);
1688 }
1689 rc = SSMR3PutMem(pSSM, abPage, PAGE_SIZE);
1690 PGM_PAGE_CLEAR_WRITTEN_TO(pVM, pCurPage);
1691 PGM_PAGE_CLEAR_FT_DIRTY(pCurPage);
1692 }
1693 /* else nothing changed, so skip it. */
1694 else
1695 fSkipped = true;
1696 }
1697 else
1698 {
1699 if (GCPhys == GCPhysLast + PAGE_SIZE)
1700 SSMR3PutU8(pSSM, PGM_STATE_REC_RAM_RAW);
1701 else
1702 {
1703 SSMR3PutU8(pSSM, PGM_STATE_REC_RAM_RAW | PGM_STATE_REC_FLAG_ADDR);
1704 SSMR3PutGCPhys(pSSM, GCPhys);
1705 }
1706 rc = SSMR3PutMem(pSSM, abPage, PAGE_SIZE);
1707 }
1708 }
1709 else
1710 {
1711 if (GCPhys == GCPhysLast + PAGE_SIZE)
1712 rc = SSMR3PutU8(pSSM, PGM_STATE_REC_RAM_ZERO);
1713 else
1714 {
1715 SSMR3PutU8(pSSM, PGM_STATE_REC_RAM_ZERO | PGM_STATE_REC_FLAG_ADDR);
1716 rc = SSMR3PutGCPhys(pSSM, GCPhys);
1717 }
1718 }
1719 }
1720 else
1721 {
1722 /*
1723 * Dirty zero or ballooned page.
1724 */
1725#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1726 if (paLSPages)
1727 pgmR3StateVerifyCrc32ForRamPage(pVM, pCur, paLSPages, iPage, "save#2");
1728#endif
1729 pgmUnlock(pVM);
1730
1731 uint8_t u8RecType = fBallooned ? PGM_STATE_REC_RAM_BALLOONED : PGM_STATE_REC_RAM_ZERO;
1732 if (GCPhys == GCPhysLast + PAGE_SIZE)
1733 rc = SSMR3PutU8(pSSM, u8RecType);
1734 else
1735 {
1736 SSMR3PutU8(pSSM, u8RecType | PGM_STATE_REC_FLAG_ADDR);
1737 rc = SSMR3PutGCPhys(pSSM, GCPhys);
1738 }
1739 }
1740 if (RT_FAILURE(rc))
1741 return rc;
1742
1743 pgmLock(pVM);
1744 if (!fSkipped)
1745 GCPhysLast = GCPhys;
1746 if (paLSPages)
1747 {
1748 paLSPages[iPage].fDirty = 0;
1749 pVM->pgm.s.LiveSave.Ram.cReadyPages++;
1750 if (fZero)
1751 pVM->pgm.s.LiveSave.Ram.cZeroPages++;
1752 pVM->pgm.s.LiveSave.Ram.cDirtyPages--;
1753 pVM->pgm.s.LiveSave.cSavedPages++;
1754 }
1755 if (idRamRangesGen != pVM->pgm.s.idRamRangesGen)
1756 {
1757 GCPhysCur = GCPhys | PAGE_OFFSET_MASK;
1758 break; /* restart */
1759 }
1760
1761 } /* for each page in range */
1762
1763 if (GCPhysCur != 0)
1764 break; /* Yield + ramrange change */
1765 GCPhysCur = pCur->GCPhysLast;
1766 }
1767 } /* for each range */
1768 } while (pCur);
1769
1770 pgmUnlock(pVM);
1771
1772 return VINF_SUCCESS;
1773}
1774
1775
1776/**
1777 * Cleans up RAM pages after a live save.
1778 *
1779 * @param pVM The cross context VM structure.
1780 */
1781static void pgmR3DoneRamPages(PVM pVM)
1782{
1783 /*
1784 * Free the tracking arrays and disable write monitoring.
1785 *
1786 * Play nice with the PGM lock in case we're called while the VM is still
1787 * running. This means we have to delay the freeing since we wish to use
1788 * paLSPages as an indicator of which RAM ranges which we need to scan for
1789 * write monitored pages.
1790 */
1791 void *pvToFree = NULL;
1792 PPGMRAMRANGE pCur;
1793 uint32_t cMonitoredPages = 0;
1794 pgmLock(pVM);
1795 do
1796 {
1797 for (pCur = pVM->pgm.s.pRamRangesXR3; pCur; pCur = pCur->pNextR3)
1798 {
1799 if (pCur->paLSPages)
1800 {
1801 if (pvToFree)
1802 {
1803 uint32_t idRamRangesGen = pVM->pgm.s.idRamRangesGen;
1804 pgmUnlock(pVM);
1805 MMR3HeapFree(pvToFree);
1806 pvToFree = NULL;
1807 pgmLock(pVM);
1808 if (idRamRangesGen != pVM->pgm.s.idRamRangesGen)
1809 break; /* start over again. */
1810 }
1811
1812 pvToFree = pCur->paLSPages;
1813 pCur->paLSPages = NULL;
1814
1815 uint32_t iPage = pCur->cb >> PAGE_SHIFT;
1816 while (iPage--)
1817 {
1818 PPGMPAGE pPage = &pCur->aPages[iPage];
1819 PGM_PAGE_CLEAR_WRITTEN_TO(pVM, pPage);
1820 if (PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED)
1821 {
1822 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
1823 cMonitoredPages++;
1824 }
1825 }
1826 }
1827 }
1828 } while (pCur);
1829
1830 Assert(pVM->pgm.s.cMonitoredPages >= cMonitoredPages);
1831 if (pVM->pgm.s.cMonitoredPages < cMonitoredPages)
1832 pVM->pgm.s.cMonitoredPages = 0;
1833 else
1834 pVM->pgm.s.cMonitoredPages -= cMonitoredPages;
1835
1836 pgmUnlock(pVM);
1837
1838 MMR3HeapFree(pvToFree);
1839 pvToFree = NULL;
1840}
1841
1842
1843/**
1844 * @callback_method_impl{FNSSMINTLIVEEXEC}
1845 */
1846static DECLCALLBACK(int) pgmR3LiveExec(PVM pVM, PSSMHANDLE pSSM, uint32_t uPass)
1847{
1848 int rc;
1849
1850 /*
1851 * Save the MMIO2 and ROM range IDs in pass 0.
1852 */
1853 if (uPass == 0)
1854 {
1855 rc = pgmR3SaveRamConfig(pVM, pSSM);
1856 if (RT_FAILURE(rc))
1857 return rc;
1858 rc = pgmR3SaveRomRanges(pVM, pSSM);
1859 if (RT_FAILURE(rc))
1860 return rc;
1861 rc = pgmR3SaveMmio2Ranges(pVM, pSSM);
1862 if (RT_FAILURE(rc))
1863 return rc;
1864 }
1865 /*
1866 * Reset the page-per-second estimate to avoid inflation by the initial
1867 * load of zero pages. pgmR3LiveVote ASSUMES this is done at pass 7.
1868 */
1869 else if (uPass == 7)
1870 {
1871 pVM->pgm.s.LiveSave.cSavedPages = 0;
1872 pVM->pgm.s.LiveSave.uSaveStartNS = RTTimeNanoTS();
1873 }
1874
1875 /*
1876 * Do the scanning.
1877 */
1878 pgmR3ScanRomPages(pVM);
1879 pgmR3ScanMmio2Pages(pVM, uPass);
1880 pgmR3ScanRamPages(pVM, false /*fFinalPass*/);
1881 pgmR3PoolClearAll(pVM, true /*fFlushRemTlb*/); /** @todo this could perhaps be optimized a bit. */
1882
1883 /*
1884 * Save the pages.
1885 */
1886 if (uPass == 0)
1887 rc = pgmR3SaveRomVirginPages( pVM, pSSM, true /*fLiveSave*/);
1888 else
1889 rc = VINF_SUCCESS;
1890 if (RT_SUCCESS(rc))
1891 rc = pgmR3SaveShadowedRomPages(pVM, pSSM, true /*fLiveSave*/, false /*fFinalPass*/);
1892 if (RT_SUCCESS(rc))
1893 rc = pgmR3SaveMmio2Pages( pVM, pSSM, true /*fLiveSave*/, uPass);
1894 if (RT_SUCCESS(rc))
1895 rc = pgmR3SaveRamPages( pVM, pSSM, true /*fLiveSave*/, uPass);
1896 SSMR3PutU8(pSSM, PGM_STATE_REC_END); /* (Ignore the rc, SSM takes care of it.) */
1897
1898 return rc;
1899}
1900
1901
1902/**
1903 * @callback_method_impl{FNSSMINTLIVEVOTE}
1904 */
1905static DECLCALLBACK(int) pgmR3LiveVote(PVM pVM, PSSMHANDLE pSSM, uint32_t uPass)
1906{
1907 /*
1908 * Update and calculate parameters used in the decision making.
1909 */
1910 const uint32_t cHistoryEntries = RT_ELEMENTS(pVM->pgm.s.LiveSave.acDirtyPagesHistory);
1911
1912 /* update history. */
1913 pgmLock(pVM);
1914 uint32_t const cWrittenToPages = pVM->pgm.s.cWrittenToPages;
1915 pgmUnlock(pVM);
1916 uint32_t const cDirtyNow = pVM->pgm.s.LiveSave.Rom.cDirtyPages
1917 + pVM->pgm.s.LiveSave.Mmio2.cDirtyPages
1918 + pVM->pgm.s.LiveSave.Ram.cDirtyPages
1919 + cWrittenToPages;
1920 uint32_t i = pVM->pgm.s.LiveSave.iDirtyPagesHistory;
1921 pVM->pgm.s.LiveSave.acDirtyPagesHistory[i] = cDirtyNow;
1922 pVM->pgm.s.LiveSave.iDirtyPagesHistory = (i + 1) % cHistoryEntries;
1923
1924 /* calc shortterm average (4 passes). */
1925 AssertCompile(RT_ELEMENTS(pVM->pgm.s.LiveSave.acDirtyPagesHistory) > 4);
1926 uint64_t cTotal = pVM->pgm.s.LiveSave.acDirtyPagesHistory[i];
1927 cTotal += pVM->pgm.s.LiveSave.acDirtyPagesHistory[(i + cHistoryEntries - 1) % cHistoryEntries];
1928 cTotal += pVM->pgm.s.LiveSave.acDirtyPagesHistory[(i + cHistoryEntries - 2) % cHistoryEntries];
1929 cTotal += pVM->pgm.s.LiveSave.acDirtyPagesHistory[(i + cHistoryEntries - 3) % cHistoryEntries];
1930 uint32_t const cDirtyPagesShort = cTotal / 4;
1931 pVM->pgm.s.LiveSave.cDirtyPagesShort = cDirtyPagesShort;
1932
1933 /* calc longterm average. */
1934 cTotal = 0;
1935 if (uPass < cHistoryEntries)
1936 for (i = 0; i < cHistoryEntries && i <= uPass; i++)
1937 cTotal += pVM->pgm.s.LiveSave.acDirtyPagesHistory[i];
1938 else
1939 for (i = 0; i < cHistoryEntries; i++)
1940 cTotal += pVM->pgm.s.LiveSave.acDirtyPagesHistory[i];
1941 uint32_t const cDirtyPagesLong = cTotal / cHistoryEntries;
1942 pVM->pgm.s.LiveSave.cDirtyPagesLong = cDirtyPagesLong;
1943
1944 /* estimate the speed */
1945 uint64_t cNsElapsed = RTTimeNanoTS() - pVM->pgm.s.LiveSave.uSaveStartNS;
1946 uint32_t cPagesPerSecond = (uint32_t)( pVM->pgm.s.LiveSave.cSavedPages
1947 / ((long double)cNsElapsed / 1000000000.0) );
1948 pVM->pgm.s.LiveSave.cPagesPerSecond = cPagesPerSecond;
1949
1950 /*
1951 * Try make a decision.
1952 */
1953 if ( cDirtyPagesShort <= cDirtyPagesLong
1954 && ( cDirtyNow <= cDirtyPagesShort
1955 || cDirtyNow - cDirtyPagesShort < RT_MIN(cDirtyPagesShort / 8, 16)
1956 )
1957 )
1958 {
1959 if (uPass > 10)
1960 {
1961 uint32_t cMsLeftShort = (uint32_t)(cDirtyPagesShort / (long double)cPagesPerSecond * 1000.0);
1962 uint32_t cMsLeftLong = (uint32_t)(cDirtyPagesLong / (long double)cPagesPerSecond * 1000.0);
1963 uint32_t cMsMaxDowntime = SSMR3HandleMaxDowntime(pSSM);
1964 if (cMsMaxDowntime < 32)
1965 cMsMaxDowntime = 32;
1966 if ( ( cMsLeftLong <= cMsMaxDowntime
1967 && cMsLeftShort < cMsMaxDowntime)
1968 || cMsLeftShort < cMsMaxDowntime / 2
1969 )
1970 {
1971 Log(("pgmR3LiveVote: VINF_SUCCESS - pass=%d cDirtyPagesShort=%u|%ums cDirtyPagesLong=%u|%ums cMsMaxDowntime=%u\n",
1972 uPass, cDirtyPagesShort, cMsLeftShort, cDirtyPagesLong, cMsLeftLong, cMsMaxDowntime));
1973 return VINF_SUCCESS;
1974 }
1975 }
1976 else
1977 {
1978 if ( ( cDirtyPagesShort <= 128
1979 && cDirtyPagesLong <= 1024)
1980 || cDirtyPagesLong <= 256
1981 )
1982 {
1983 Log(("pgmR3LiveVote: VINF_SUCCESS - pass=%d cDirtyPagesShort=%u cDirtyPagesLong=%u\n", uPass, cDirtyPagesShort, cDirtyPagesLong));
1984 return VINF_SUCCESS;
1985 }
1986 }
1987 }
1988
1989 /*
1990 * Come up with a completion percentage. Currently this is a simple
1991 * dirty page (long term) vs. total pages ratio + some pass trickery.
1992 */
1993 unsigned uPctDirty = (unsigned)( (long double)cDirtyPagesLong
1994 / (pVM->pgm.s.cAllPages - pVM->pgm.s.LiveSave.cIgnoredPages - pVM->pgm.s.cZeroPages) );
1995 if (uPctDirty <= 100)
1996 SSMR3HandleReportLivePercent(pSSM, RT_MIN(100 - uPctDirty, uPass * 2));
1997 else
1998 AssertMsgFailed(("uPctDirty=%u cDirtyPagesLong=%#x cAllPages=%#x cIgnoredPages=%#x cZeroPages=%#x\n",
1999 uPctDirty, cDirtyPagesLong, pVM->pgm.s.cAllPages, pVM->pgm.s.LiveSave.cIgnoredPages, pVM->pgm.s.cZeroPages));
2000
2001 return VINF_SSM_VOTE_FOR_ANOTHER_PASS;
2002}
2003
2004
2005/**
2006 * @callback_method_impl{FNSSMINTLIVEPREP}
2007 *
2008 * This will attempt to allocate and initialize the tracking structures. It
2009 * will also prepare for write monitoring of pages and initialize PGM::LiveSave.
2010 * pgmR3SaveDone will do the cleanups.
2011 */
2012static DECLCALLBACK(int) pgmR3LivePrep(PVM pVM, PSSMHANDLE pSSM)
2013{
2014 /*
2015 * Indicate that we will be using the write monitoring.
2016 */
2017 pgmLock(pVM);
2018 /** @todo find a way of mediating this when more users are added. */
2019 if (pVM->pgm.s.fPhysWriteMonitoringEngaged)
2020 {
2021 pgmUnlock(pVM);
2022 AssertLogRelFailedReturn(VERR_PGM_WRITE_MONITOR_ENGAGED);
2023 }
2024 pVM->pgm.s.fPhysWriteMonitoringEngaged = true;
2025 pgmUnlock(pVM);
2026
2027 /*
2028 * Initialize the statistics.
2029 */
2030 pVM->pgm.s.LiveSave.Rom.cReadyPages = 0;
2031 pVM->pgm.s.LiveSave.Rom.cDirtyPages = 0;
2032 pVM->pgm.s.LiveSave.Mmio2.cReadyPages = 0;
2033 pVM->pgm.s.LiveSave.Mmio2.cDirtyPages = 0;
2034 pVM->pgm.s.LiveSave.Ram.cReadyPages = 0;
2035 pVM->pgm.s.LiveSave.Ram.cDirtyPages = 0;
2036 pVM->pgm.s.LiveSave.cIgnoredPages = 0;
2037 pVM->pgm.s.LiveSave.fActive = true;
2038 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.LiveSave.acDirtyPagesHistory); i++)
2039 pVM->pgm.s.LiveSave.acDirtyPagesHistory[i] = UINT32_MAX / 2;
2040 pVM->pgm.s.LiveSave.iDirtyPagesHistory = 0;
2041 pVM->pgm.s.LiveSave.cSavedPages = 0;
2042 pVM->pgm.s.LiveSave.uSaveStartNS = RTTimeNanoTS();
2043 pVM->pgm.s.LiveSave.cPagesPerSecond = 8192;
2044
2045 /*
2046 * Per page type.
2047 */
2048 int rc = pgmR3PrepRomPages(pVM);
2049 if (RT_SUCCESS(rc))
2050 rc = pgmR3PrepMmio2Pages(pVM);
2051 if (RT_SUCCESS(rc))
2052 rc = pgmR3PrepRamPages(pVM);
2053
2054 NOREF(pSSM);
2055 return rc;
2056}
2057
2058
2059/**
2060 * @callback_method_impl{FNSSMINTSAVEEXEC}
2061 */
2062static DECLCALLBACK(int) pgmR3SaveExec(PVM pVM, PSSMHANDLE pSSM)
2063{
2064 int rc = VINF_SUCCESS;
2065 PPGM pPGM = &pVM->pgm.s;
2066
2067 /*
2068 * Lock PGM and set the no-more-writes indicator.
2069 */
2070 pgmLock(pVM);
2071 pVM->pgm.s.fNoMorePhysWrites = true;
2072
2073 /*
2074 * Save basic data (required / unaffected by relocation).
2075 */
2076 bool const fMappingsFixed = pVM->pgm.s.fMappingsFixed;
2077 pVM->pgm.s.fMappingsFixed |= pVM->pgm.s.fMappingsFixedRestored;
2078 SSMR3PutStruct(pSSM, pPGM, &s_aPGMFields[0]);
2079 pVM->pgm.s.fMappingsFixed = fMappingsFixed;
2080
2081 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
2082 rc = SSMR3PutStruct(pSSM, &pVM->aCpus[idCpu].pgm.s, &s_aPGMCpuFields[0]);
2083
2084 /*
2085 * Save the (remainder of the) memory.
2086 */
2087 if (RT_SUCCESS(rc))
2088 {
2089 if (pVM->pgm.s.LiveSave.fActive)
2090 {
2091 pgmR3ScanRomPages(pVM);
2092 pgmR3ScanMmio2Pages(pVM, SSM_PASS_FINAL);
2093 pgmR3ScanRamPages(pVM, true /*fFinalPass*/);
2094
2095 rc = pgmR3SaveShadowedRomPages( pVM, pSSM, true /*fLiveSave*/, true /*fFinalPass*/);
2096 if (RT_SUCCESS(rc))
2097 rc = pgmR3SaveMmio2Pages( pVM, pSSM, true /*fLiveSave*/, SSM_PASS_FINAL);
2098 if (RT_SUCCESS(rc))
2099 rc = pgmR3SaveRamPages( pVM, pSSM, true /*fLiveSave*/, SSM_PASS_FINAL);
2100 }
2101 else
2102 {
2103 rc = pgmR3SaveRamConfig(pVM, pSSM);
2104 if (RT_SUCCESS(rc))
2105 rc = pgmR3SaveRomRanges(pVM, pSSM);
2106 if (RT_SUCCESS(rc))
2107 rc = pgmR3SaveMmio2Ranges(pVM, pSSM);
2108 if (RT_SUCCESS(rc))
2109 rc = pgmR3SaveRomVirginPages( pVM, pSSM, false /*fLiveSave*/);
2110 if (RT_SUCCESS(rc))
2111 rc = pgmR3SaveShadowedRomPages(pVM, pSSM, false /*fLiveSave*/, true /*fFinalPass*/);
2112 if (RT_SUCCESS(rc))
2113 rc = pgmR3SaveMmio2Pages( pVM, pSSM, false /*fLiveSave*/, SSM_PASS_FINAL);
2114 if (RT_SUCCESS(rc))
2115 rc = pgmR3SaveRamPages( pVM, pSSM, false /*fLiveSave*/, SSM_PASS_FINAL);
2116 }
2117 SSMR3PutU8(pSSM, PGM_STATE_REC_END); /* (Ignore the rc, SSM takes of it.) */
2118 }
2119
2120 pgmUnlock(pVM);
2121 return rc;
2122}
2123
2124
2125/**
2126 * @callback_method_impl{FNSSMINTSAVEDONE}
2127 */
2128static DECLCALLBACK(int) pgmR3SaveDone(PVM pVM, PSSMHANDLE pSSM)
2129{
2130 /*
2131 * Do per page type cleanups first.
2132 */
2133 if (pVM->pgm.s.LiveSave.fActive)
2134 {
2135 pgmR3DoneRomPages(pVM);
2136 pgmR3DoneMmio2Pages(pVM);
2137 pgmR3DoneRamPages(pVM);
2138 }
2139
2140 /*
2141 * Clear the live save indicator and disengage write monitoring.
2142 */
2143 pgmLock(pVM);
2144 pVM->pgm.s.LiveSave.fActive = false;
2145 /** @todo this is blindly assuming that we're the only user of write
2146 * monitoring. Fix this when more users are added. */
2147 pVM->pgm.s.fPhysWriteMonitoringEngaged = false;
2148 pgmUnlock(pVM);
2149
2150 NOREF(pSSM);
2151 return VINF_SUCCESS;
2152}
2153
2154
2155/**
2156 * @callback_method_impl{FNSSMINTLOADPREP}
2157 */
2158static DECLCALLBACK(int) pgmR3LoadPrep(PVM pVM, PSSMHANDLE pSSM)
2159{
2160 /*
2161 * Call the reset function to make sure all the memory is cleared.
2162 */
2163 PGMR3Reset(pVM);
2164 pVM->pgm.s.LiveSave.fActive = false;
2165 NOREF(pSSM);
2166 return VINF_SUCCESS;
2167}
2168
2169
2170/**
2171 * Load an ignored page.
2172 *
2173 * @returns VBox status code.
2174 * @param pSSM The saved state handle.
2175 */
2176static int pgmR3LoadPageToDevNullOld(PSSMHANDLE pSSM)
2177{
2178 uint8_t abPage[PAGE_SIZE];
2179 return SSMR3GetMem(pSSM, &abPage[0], sizeof(abPage));
2180}
2181
2182
2183/**
2184 * Compares a page with an old save type value.
2185 *
2186 * @returns true if equal, false if not.
2187 * @param pPage The page to compare.
2188 * @param uOldType The old type value from the saved state.
2189 */
2190DECLINLINE(bool) pgmR3CompareNewAndOldPageTypes(PPGMPAGE pPage, uint8_t uOldType)
2191{
2192 uint8_t uOldPageType;
2193 switch (PGM_PAGE_GET_TYPE(pPage))
2194 {
2195 case PGMPAGETYPE_INVALID: uOldPageType = PGMPAGETYPE_OLD_INVALID; break;
2196 case PGMPAGETYPE_RAM: uOldPageType = PGMPAGETYPE_OLD_RAM; break;
2197 case PGMPAGETYPE_MMIO2: uOldPageType = PGMPAGETYPE_OLD_MMIO2; break;
2198 case PGMPAGETYPE_MMIO2_ALIAS_MMIO: uOldPageType = PGMPAGETYPE_OLD_MMIO2_ALIAS_MMIO; break;
2199 case PGMPAGETYPE_ROM_SHADOW: uOldPageType = PGMPAGETYPE_OLD_ROM_SHADOW; break;
2200 case PGMPAGETYPE_ROM: uOldPageType = PGMPAGETYPE_OLD_ROM; break;
2201 case PGMPAGETYPE_SPECIAL_ALIAS_MMIO: RT_FALL_THRU();
2202 case PGMPAGETYPE_MMIO: uOldPageType = PGMPAGETYPE_OLD_MMIO; break;
2203 default:
2204 AssertFailed();
2205 uOldPageType = PGMPAGETYPE_OLD_INVALID;
2206 break;
2207 }
2208 return uOldPageType == uOldType;
2209}
2210
2211
2212/**
2213 * Loads a page without any bits in the saved state, i.e. making sure it's
2214 * really zero.
2215 *
2216 * @returns VBox status code.
2217 * @param pVM The cross context VM structure.
2218 * @param uOldType The page type or PGMPAGETYPE_OLD_INVALID (old saved
2219 * state).
2220 * @param pPage The guest page tracking structure.
2221 * @param GCPhys The page address.
2222 * @param pRam The ram range (logging).
2223 */
2224static int pgmR3LoadPageZeroOld(PVM pVM, uint8_t uOldType, PPGMPAGE pPage, RTGCPHYS GCPhys, PPGMRAMRANGE pRam)
2225{
2226 if ( uOldType != PGMPAGETYPE_OLD_INVALID
2227 && !pgmR3CompareNewAndOldPageTypes(pPage, uOldType))
2228 return VERR_SSM_UNEXPECTED_DATA;
2229
2230 /* I think this should be sufficient. */
2231 if ( !PGM_PAGE_IS_ZERO(pPage)
2232 && !PGM_PAGE_IS_BALLOONED(pPage))
2233 return VERR_SSM_UNEXPECTED_DATA;
2234
2235 NOREF(pVM);
2236 NOREF(GCPhys);
2237 NOREF(pRam);
2238 return VINF_SUCCESS;
2239}
2240
2241
2242/**
2243 * Loads a page from the saved state.
2244 *
2245 * @returns VBox status code.
2246 * @param pVM The cross context VM structure.
2247 * @param pSSM The SSM handle.
2248 * @param uOldType The page type or PGMPAGETYPE_OLD_INVALID (old saved
2249 * state).
2250 * @param pPage The guest page tracking structure.
2251 * @param GCPhys The page address.
2252 * @param pRam The ram range (logging).
2253 */
2254static int pgmR3LoadPageBitsOld(PVM pVM, PSSMHANDLE pSSM, uint8_t uOldType, PPGMPAGE pPage, RTGCPHYS GCPhys, PPGMRAMRANGE pRam)
2255{
2256 /*
2257 * Match up the type, dealing with MMIO2 aliases (dropped).
2258 */
2259 AssertLogRelMsgReturn( uOldType == PGMPAGETYPE_INVALID
2260 || pgmR3CompareNewAndOldPageTypes(pPage, uOldType)
2261 /* kudge for the expanded PXE bios (r67885) - @bugref{5687}: */
2262 || ( uOldType == PGMPAGETYPE_OLD_RAM
2263 && GCPhys >= 0xed000
2264 && GCPhys <= 0xeffff
2265 && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_ROM)
2266 ,
2267 ("pPage=%R[pgmpage] GCPhys=%#x %s\n", pPage, GCPhys, pRam->pszDesc),
2268 VERR_SSM_UNEXPECTED_DATA);
2269
2270 /*
2271 * Load the page.
2272 */
2273 PGMPAGEMAPLOCK PgMpLck;
2274 void *pvPage;
2275 int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvPage, &PgMpLck);
2276 if (RT_SUCCESS(rc))
2277 {
2278 rc = SSMR3GetMem(pSSM, pvPage, PAGE_SIZE);
2279 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2280 }
2281
2282 return rc;
2283}
2284
2285
2286/**
2287 * Loads a page (counter part to pgmR3SavePage).
2288 *
2289 * @returns VBox status code, fully bitched errors.
2290 * @param pVM The cross context VM structure.
2291 * @param pSSM The SSM handle.
2292 * @param uOldType The page type.
2293 * @param pPage The page.
2294 * @param GCPhys The page address.
2295 * @param pRam The RAM range (for error messages).
2296 */
2297static int pgmR3LoadPageOld(PVM pVM, PSSMHANDLE pSSM, uint8_t uOldType, PPGMPAGE pPage, RTGCPHYS GCPhys, PPGMRAMRANGE pRam)
2298{
2299 uint8_t uState;
2300 int rc = SSMR3GetU8(pSSM, &uState);
2301 AssertLogRelMsgRCReturn(rc, ("pPage=%R[pgmpage] GCPhys=%#x %s rc=%Rrc\n", pPage, GCPhys, pRam->pszDesc, rc), rc);
2302 if (uState == 0 /* zero */)
2303 rc = pgmR3LoadPageZeroOld(pVM, uOldType, pPage, GCPhys, pRam);
2304 else if (uState == 1)
2305 rc = pgmR3LoadPageBitsOld(pVM, pSSM, uOldType, pPage, GCPhys, pRam);
2306 else
2307 rc = VERR_PGM_INVALID_SAVED_PAGE_STATE;
2308 AssertLogRelMsgRCReturn(rc, ("pPage=%R[pgmpage] uState=%d uOldType=%d GCPhys=%RGp %s rc=%Rrc\n",
2309 pPage, uState, uOldType, GCPhys, pRam->pszDesc, rc),
2310 rc);
2311 return VINF_SUCCESS;
2312}
2313
2314
2315/**
2316 * Loads a shadowed ROM page.
2317 *
2318 * @returns VBox status code, errors are fully bitched.
2319 * @param pVM The cross context VM structure.
2320 * @param pSSM The saved state handle.
2321 * @param pPage The page.
2322 * @param GCPhys The page address.
2323 * @param pRam The RAM range (for error messages).
2324 */
2325static int pgmR3LoadShadowedRomPageOld(PVM pVM, PSSMHANDLE pSSM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPGMRAMRANGE pRam)
2326{
2327 /*
2328 * Load and set the protection first, then load the two pages, the first
2329 * one is the active the other is the passive.
2330 */
2331 PPGMROMPAGE pRomPage = pgmR3GetRomPage(pVM, GCPhys);
2332 AssertLogRelMsgReturn(pRomPage, ("GCPhys=%RGp %s\n", GCPhys, pRam->pszDesc), VERR_PGM_SAVED_ROM_PAGE_NOT_FOUND);
2333
2334 uint8_t uProt;
2335 int rc = SSMR3GetU8(pSSM, &uProt);
2336 AssertLogRelMsgRCReturn(rc, ("pPage=%R[pgmpage] GCPhys=%#x %s\n", pPage, GCPhys, pRam->pszDesc), rc);
2337 PGMROMPROT enmProt = (PGMROMPROT)uProt;
2338 AssertLogRelMsgReturn( enmProt >= PGMROMPROT_INVALID
2339 && enmProt < PGMROMPROT_END,
2340 ("enmProt=%d pPage=%R[pgmpage] GCPhys=%#x %s\n", enmProt, pPage, GCPhys, pRam->pszDesc),
2341 VERR_SSM_UNEXPECTED_DATA);
2342
2343 if (pRomPage->enmProt != enmProt)
2344 {
2345 rc = PGMR3PhysRomProtect(pVM, GCPhys, PAGE_SIZE, enmProt);
2346 AssertLogRelRCReturn(rc, rc);
2347 AssertLogRelReturn(pRomPage->enmProt == enmProt, VERR_PGM_SAVED_ROM_PAGE_PROT);
2348 }
2349
2350 PPGMPAGE pPageActive = PGMROMPROT_IS_ROM(enmProt) ? &pRomPage->Virgin : &pRomPage->Shadow;
2351 PPGMPAGE pPagePassive = PGMROMPROT_IS_ROM(enmProt) ? &pRomPage->Shadow : &pRomPage->Virgin;
2352 uint8_t u8ActiveType = PGMROMPROT_IS_ROM(enmProt) ? PGMPAGETYPE_ROM : PGMPAGETYPE_ROM_SHADOW;
2353 uint8_t u8PassiveType= PGMROMPROT_IS_ROM(enmProt) ? PGMPAGETYPE_ROM_SHADOW : PGMPAGETYPE_ROM;
2354
2355 /** @todo this isn't entirely correct as long as pgmPhysGCPhys2CCPtrInternal is
2356 * used down the line (will the 2nd page will be written to the first
2357 * one because of a false TLB hit since the TLB is using GCPhys and
2358 * doesn't check the HCPhys of the desired page). */
2359 rc = pgmR3LoadPageOld(pVM, pSSM, u8ActiveType, pPage, GCPhys, pRam);
2360 if (RT_SUCCESS(rc))
2361 {
2362 *pPageActive = *pPage;
2363 rc = pgmR3LoadPageOld(pVM, pSSM, u8PassiveType, pPagePassive, GCPhys, pRam);
2364 }
2365 return rc;
2366}
2367
2368/**
2369 * Ram range flags and bits for older versions of the saved state.
2370 *
2371 * @returns VBox status code.
2372 *
2373 * @param pVM The cross context VM structure.
2374 * @param pSSM The SSM handle.
2375 * @param uVersion The saved state version.
2376 */
2377static int pgmR3LoadMemoryOld(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion)
2378{
2379 PPGM pPGM = &pVM->pgm.s;
2380
2381 /*
2382 * Ram range flags and bits.
2383 */
2384 uint32_t i = 0;
2385 for (PPGMRAMRANGE pRam = pPGM->pRamRangesXR3; ; pRam = pRam->pNextR3, i++)
2386 {
2387 /* Check the sequence number / separator. */
2388 uint32_t u32Sep;
2389 int rc = SSMR3GetU32(pSSM, &u32Sep);
2390 if (RT_FAILURE(rc))
2391 return rc;
2392 if (u32Sep == ~0U)
2393 break;
2394 if (u32Sep != i)
2395 {
2396 AssertMsgFailed(("u32Sep=%#x (last)\n", u32Sep));
2397 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
2398 }
2399 AssertLogRelReturn(pRam, VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2400
2401 /* Get the range details. */
2402 RTGCPHYS GCPhys;
2403 SSMR3GetGCPhys(pSSM, &GCPhys);
2404 RTGCPHYS GCPhysLast;
2405 SSMR3GetGCPhys(pSSM, &GCPhysLast);
2406 RTGCPHYS cb;
2407 SSMR3GetGCPhys(pSSM, &cb);
2408 uint8_t fHaveBits;
2409 rc = SSMR3GetU8(pSSM, &fHaveBits);
2410 if (RT_FAILURE(rc))
2411 return rc;
2412 if (fHaveBits & ~1)
2413 {
2414 AssertMsgFailed(("u32Sep=%#x (last)\n", u32Sep));
2415 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
2416 }
2417 size_t cchDesc = 0;
2418 char szDesc[256];
2419 szDesc[0] = '\0';
2420 if (uVersion >= PGM_SAVED_STATE_VERSION_RR_DESC)
2421 {
2422 rc = SSMR3GetStrZ(pSSM, szDesc, sizeof(szDesc));
2423 if (RT_FAILURE(rc))
2424 return rc;
2425 /* Since we've modified the description strings in r45878, only compare
2426 them if the saved state is more recent. */
2427 if (uVersion != PGM_SAVED_STATE_VERSION_RR_DESC)
2428 cchDesc = strlen(szDesc);
2429 }
2430
2431 /*
2432 * Match it up with the current range.
2433 *
2434 * Note there is a hack for dealing with the high BIOS mapping
2435 * in the old saved state format, this means we might not have
2436 * a 1:1 match on success.
2437 */
2438 if ( ( GCPhys != pRam->GCPhys
2439 || GCPhysLast != pRam->GCPhysLast
2440 || cb != pRam->cb
2441 || ( cchDesc
2442 && strcmp(szDesc, pRam->pszDesc)) )
2443 /* Hack for PDMDevHlpPhysReserve(pDevIns, 0xfff80000, 0x80000, "High ROM Region"); */
2444 && ( uVersion != PGM_SAVED_STATE_VERSION_OLD_PHYS_CODE
2445 || GCPhys != UINT32_C(0xfff80000)
2446 || GCPhysLast != UINT32_C(0xffffffff)
2447 || pRam->GCPhysLast != GCPhysLast
2448 || pRam->GCPhys < GCPhys
2449 || !fHaveBits)
2450 )
2451 {
2452 LogRel(("Ram range: %RGp-%RGp %RGp bytes %s %s\n"
2453 "State : %RGp-%RGp %RGp bytes %s %s\n",
2454 pRam->GCPhys, pRam->GCPhysLast, pRam->cb, pRam->pvR3 ? "bits" : "nobits", pRam->pszDesc,
2455 GCPhys, GCPhysLast, cb, fHaveBits ? "bits" : "nobits", szDesc));
2456 /*
2457 * If we're loading a state for debugging purpose, don't make a fuss if
2458 * the MMIO and ROM stuff isn't 100% right, just skip the mismatches.
2459 */
2460 if ( SSMR3HandleGetAfter(pSSM) != SSMAFTER_DEBUG_IT
2461 || GCPhys < 8 * _1M)
2462 return SSMR3SetCfgError(pSSM, RT_SRC_POS,
2463 N_("RAM range mismatch; saved={%RGp-%RGp %RGp bytes %s %s} config={%RGp-%RGp %RGp bytes %s %s}"),
2464 GCPhys, GCPhysLast, cb, fHaveBits ? "bits" : "nobits", szDesc,
2465 pRam->GCPhys, pRam->GCPhysLast, pRam->cb, pRam->pvR3 ? "bits" : "nobits", pRam->pszDesc);
2466
2467 AssertMsgFailed(("debug skipping not implemented, sorry\n"));
2468 continue;
2469 }
2470
2471 uint32_t cPages = (GCPhysLast - GCPhys + 1) >> PAGE_SHIFT;
2472 if (uVersion >= PGM_SAVED_STATE_VERSION_RR_DESC)
2473 {
2474 /*
2475 * Load the pages one by one.
2476 */
2477 for (uint32_t iPage = 0; iPage < cPages; iPage++)
2478 {
2479 RTGCPHYS const GCPhysPage = ((RTGCPHYS)iPage << PAGE_SHIFT) + pRam->GCPhys;
2480 PPGMPAGE pPage = &pRam->aPages[iPage];
2481 uint8_t uOldType;
2482 rc = SSMR3GetU8(pSSM, &uOldType);
2483 AssertLogRelMsgRCReturn(rc, ("pPage=%R[pgmpage] iPage=%#x GCPhysPage=%#x %s\n", pPage, iPage, GCPhysPage, pRam->pszDesc), rc);
2484 if (uOldType == PGMPAGETYPE_OLD_ROM_SHADOW)
2485 rc = pgmR3LoadShadowedRomPageOld(pVM, pSSM, pPage, GCPhysPage, pRam);
2486 else
2487 rc = pgmR3LoadPageOld(pVM, pSSM, uOldType, pPage, GCPhysPage, pRam);
2488 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc iPage=%#x GCPhysPage=%#x %s\n", rc, iPage, GCPhysPage, pRam->pszDesc), rc);
2489 }
2490 }
2491 else
2492 {
2493 /*
2494 * Old format.
2495 */
2496
2497 /* Of the page flags, pick up MMIO2 and ROM/RESERVED for the !fHaveBits case.
2498 The rest is generally irrelevant and wrong since the stuff have to match registrations. */
2499 uint32_t fFlags = 0;
2500 for (uint32_t iPage = 0; iPage < cPages; iPage++)
2501 {
2502 uint16_t u16Flags;
2503 rc = SSMR3GetU16(pSSM, &u16Flags);
2504 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc iPage=%#x GCPhys=%#x %s\n", rc, iPage, pRam->GCPhys, pRam->pszDesc), rc);
2505 fFlags |= u16Flags;
2506 }
2507
2508 /* Load the bits */
2509 if ( !fHaveBits
2510 && GCPhysLast < UINT32_C(0xe0000000))
2511 {
2512 /*
2513 * Dynamic chunks.
2514 */
2515 const uint32_t cPagesInChunk = (1*1024*1024) >> PAGE_SHIFT;
2516 AssertLogRelMsgReturn(cPages % cPagesInChunk == 0,
2517 ("cPages=%#x cPagesInChunk=%#x GCPhys=%RGp %s\n", cPages, cPagesInChunk, pRam->GCPhys, pRam->pszDesc),
2518 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2519
2520 for (uint32_t iPage = 0; iPage < cPages; /* incremented by inner loop */ )
2521 {
2522 uint8_t fPresent;
2523 rc = SSMR3GetU8(pSSM, &fPresent);
2524 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc iPage=%#x GCPhys=%#x %s\n", rc, iPage, pRam->GCPhys, pRam->pszDesc), rc);
2525 AssertLogRelMsgReturn(fPresent == (uint8_t)true || fPresent == (uint8_t)false,
2526 ("fPresent=%#x iPage=%#x GCPhys=%#x %s\n", fPresent, iPage, pRam->GCPhys, pRam->pszDesc),
2527 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2528
2529 for (uint32_t iChunkPage = 0; iChunkPage < cPagesInChunk; iChunkPage++, iPage++)
2530 {
2531 RTGCPHYS const GCPhysPage = ((RTGCPHYS)iPage << PAGE_SHIFT) + pRam->GCPhys;
2532 PPGMPAGE pPage = &pRam->aPages[iPage];
2533 if (fPresent)
2534 {
2535 if ( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO
2536 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_SPECIAL_ALIAS_MMIO)
2537 rc = pgmR3LoadPageToDevNullOld(pSSM);
2538 else
2539 rc = pgmR3LoadPageBitsOld(pVM, pSSM, PGMPAGETYPE_INVALID, pPage, GCPhysPage, pRam);
2540 }
2541 else
2542 rc = pgmR3LoadPageZeroOld(pVM, PGMPAGETYPE_INVALID, pPage, GCPhysPage, pRam);
2543 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc iPage=%#x GCPhysPage=%#x %s\n", rc, iPage, GCPhysPage, pRam->pszDesc), rc);
2544 }
2545 }
2546 }
2547 else if (pRam->pvR3)
2548 {
2549 /*
2550 * MMIO2.
2551 */
2552 AssertLogRelMsgReturn((fFlags & 0x0f) == RT_BIT(3) /*MM_RAM_FLAGS_MMIO2*/,
2553 ("fFlags=%#x GCPhys=%#x %s\n", fFlags, pRam->GCPhys, pRam->pszDesc),
2554 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2555 AssertLogRelMsgReturn(pRam->pvR3,
2556 ("GCPhys=%#x %s\n", pRam->GCPhys, pRam->pszDesc),
2557 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2558
2559 rc = SSMR3GetMem(pSSM, pRam->pvR3, pRam->cb);
2560 AssertLogRelMsgRCReturn(rc, ("GCPhys=%#x %s\n", pRam->GCPhys, pRam->pszDesc), rc);
2561 }
2562 else if (GCPhysLast < UINT32_C(0xfff80000))
2563 {
2564 /*
2565 * PCI MMIO, no pages saved.
2566 */
2567 }
2568 else
2569 {
2570 /*
2571 * Load the 0xfff80000..0xffffffff BIOS range.
2572 * It starts with X reserved pages that we have to skip over since
2573 * the RAMRANGE create by the new code won't include those.
2574 */
2575 AssertLogRelMsgReturn( !(fFlags & RT_BIT(3) /*MM_RAM_FLAGS_MMIO2*/)
2576 && (fFlags & RT_BIT(0) /*MM_RAM_FLAGS_RESERVED*/),
2577 ("fFlags=%#x GCPhys=%#x %s\n", fFlags, pRam->GCPhys, pRam->pszDesc),
2578 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2579 AssertLogRelMsgReturn(GCPhys == UINT32_C(0xfff80000),
2580 ("GCPhys=%RGp pRamRange{GCPhys=%#x %s}\n", GCPhys, pRam->GCPhys, pRam->pszDesc),
2581 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2582
2583 /* Skip wasted reserved pages before the ROM. */
2584 while (GCPhys < pRam->GCPhys)
2585 {
2586 rc = pgmR3LoadPageToDevNullOld(pSSM);
2587 GCPhys += PAGE_SIZE;
2588 }
2589
2590 /* Load the bios pages. */
2591 cPages = pRam->cb >> PAGE_SHIFT;
2592 for (uint32_t iPage = 0; iPage < cPages; iPage++)
2593 {
2594 RTGCPHYS const GCPhysPage = ((RTGCPHYS)iPage << PAGE_SHIFT) + pRam->GCPhys;
2595 PPGMPAGE pPage = &pRam->aPages[iPage];
2596
2597 AssertLogRelMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_ROM,
2598 ("GCPhys=%RGp pPage=%R[pgmpage]\n", GCPhys, GCPhys),
2599 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2600 rc = pgmR3LoadPageBitsOld(pVM, pSSM, PGMPAGETYPE_ROM, pPage, GCPhysPage, pRam);
2601 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc iPage=%#x GCPhys=%#x %s\n", rc, iPage, pRam->GCPhys, pRam->pszDesc), rc);
2602 }
2603 }
2604 }
2605 }
2606
2607 return VINF_SUCCESS;
2608}
2609
2610
2611/**
2612 * Worker for pgmR3Load and pgmR3LoadLocked.
2613 *
2614 * @returns VBox status code.
2615 *
2616 * @param pVM The cross context VM structure.
2617 * @param pSSM The SSM handle.
2618 * @param uVersion The PGM saved state unit version.
2619 * @param uPass The pass number.
2620 *
2621 * @todo This needs splitting up if more record types or code twists are
2622 * added...
2623 */
2624static int pgmR3LoadMemory(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
2625{
2626 NOREF(uPass);
2627
2628 /*
2629 * Process page records until we hit the terminator.
2630 */
2631 RTGCPHYS GCPhys = NIL_RTGCPHYS;
2632 PPGMRAMRANGE pRamHint = NULL;
2633 uint8_t id = UINT8_MAX;
2634 uint32_t iPage = UINT32_MAX - 10;
2635 PPGMROMRANGE pRom = NULL;
2636 PPGMREGMMIORANGE pRegMmio = NULL;
2637
2638 /*
2639 * We batch up pages that should be freed instead of calling GMM for
2640 * each and every one of them. Note that we'll lose the pages in most
2641 * failure paths - this should probably be addressed one day.
2642 */
2643 uint32_t cPendingPages = 0;
2644 PGMMFREEPAGESREQ pReq;
2645 int rc = GMMR3FreePagesPrepare(pVM, &pReq, 128 /* batch size */, GMMACCOUNT_BASE);
2646 AssertLogRelRCReturn(rc, rc);
2647
2648 for (;;)
2649 {
2650 /*
2651 * Get the record type and flags.
2652 */
2653 uint8_t u8;
2654 rc = SSMR3GetU8(pSSM, &u8);
2655 if (RT_FAILURE(rc))
2656 return rc;
2657 if (u8 == PGM_STATE_REC_END)
2658 {
2659 /*
2660 * Finish off any pages pending freeing.
2661 */
2662 if (cPendingPages)
2663 {
2664 Log(("pgmR3LoadMemory: GMMR3FreePagesPerform pVM=%p cPendingPages=%u\n", pVM, cPendingPages));
2665 rc = GMMR3FreePagesPerform(pVM, pReq, cPendingPages);
2666 AssertLogRelRCReturn(rc, rc);
2667 }
2668 GMMR3FreePagesCleanup(pReq);
2669 return VINF_SUCCESS;
2670 }
2671 AssertLogRelMsgReturn((u8 & ~PGM_STATE_REC_FLAG_ADDR) <= PGM_STATE_REC_LAST, ("%#x\n", u8), VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2672 switch (u8 & ~PGM_STATE_REC_FLAG_ADDR)
2673 {
2674 /*
2675 * RAM page.
2676 */
2677 case PGM_STATE_REC_RAM_ZERO:
2678 case PGM_STATE_REC_RAM_RAW:
2679 case PGM_STATE_REC_RAM_BALLOONED:
2680 {
2681 /*
2682 * Get the address and resolve it into a page descriptor.
2683 */
2684 if (!(u8 & PGM_STATE_REC_FLAG_ADDR))
2685 GCPhys += PAGE_SIZE;
2686 else
2687 {
2688 rc = SSMR3GetGCPhys(pSSM, &GCPhys);
2689 if (RT_FAILURE(rc))
2690 return rc;
2691 }
2692 AssertLogRelMsgReturn(!(GCPhys & PAGE_OFFSET_MASK), ("%RGp\n", GCPhys), VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2693
2694 PPGMPAGE pPage;
2695 rc = pgmPhysGetPageWithHintEx(pVM, GCPhys, &pPage, &pRamHint);
2696 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc %RGp\n", rc, GCPhys), rc);
2697
2698 /*
2699 * Take action according to the record type.
2700 */
2701 switch (u8 & ~PGM_STATE_REC_FLAG_ADDR)
2702 {
2703 case PGM_STATE_REC_RAM_ZERO:
2704 {
2705 if (PGM_PAGE_IS_ZERO(pPage))
2706 break;
2707
2708 /* Ballooned pages must be unmarked (live snapshot and
2709 teleportation scenarios). */
2710 if (PGM_PAGE_IS_BALLOONED(pPage))
2711 {
2712 Assert(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM);
2713 if (uVersion == PGM_SAVED_STATE_VERSION_BALLOON_BROKEN)
2714 break;
2715 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ZERO);
2716 break;
2717 }
2718
2719 AssertLogRelMsgReturn(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED, ("GCPhys=%RGp %R[pgmpage]\n", GCPhys, pPage), VERR_PGM_UNEXPECTED_PAGE_STATE);
2720
2721 /* If this is a ROM page, we must clear it and not try to
2722 * free it. Ditto if the VM is using RamPreAlloc (see
2723 * @bugref{6318}). */
2724 if ( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_ROM
2725 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_ROM_SHADOW
2726 || pVM->pgm.s.fRamPreAlloc)
2727 {
2728 PGMPAGEMAPLOCK PgMpLck;
2729 void *pvDstPage;
2730 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDstPage, &PgMpLck);
2731 AssertLogRelMsgRCReturn(rc, ("GCPhys=%RGp %R[pgmpage] rc=%Rrc\n", GCPhys, pPage, rc), rc);
2732
2733 ASMMemZeroPage(pvDstPage);
2734 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2735 }
2736 /* Free it only if it's not part of a previously
2737 allocated large page (no need to clear the page). */
2738 else if ( PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE
2739 && PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE_DISABLED)
2740 {
2741 rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, pPage, GCPhys, (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage));
2742 AssertRCReturn(rc, rc);
2743 }
2744 /** @todo handle large pages (see @bugref{5545}) */
2745 break;
2746 }
2747
2748 case PGM_STATE_REC_RAM_BALLOONED:
2749 {
2750 Assert(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM);
2751 if (PGM_PAGE_IS_BALLOONED(pPage))
2752 break;
2753
2754 /* We don't map ballooned pages in our shadow page tables, let's
2755 just free it if allocated and mark as ballooned. See @bugref{5515}. */
2756 if (PGM_PAGE_IS_ALLOCATED(pPage))
2757 {
2758 /** @todo handle large pages + ballooning when it works. (see @bugref{5515},
2759 * @bugref{5545}). */
2760 AssertLogRelMsgReturn( PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE
2761 && PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE_DISABLED,
2762 ("GCPhys=%RGp %R[pgmpage]\n", GCPhys, pPage), VERR_PGM_LOAD_UNEXPECTED_PAGE_TYPE);
2763
2764 rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, pPage, GCPhys, (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage));
2765 AssertRCReturn(rc, rc);
2766 }
2767 Assert(PGM_PAGE_IS_ZERO(pPage));
2768 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_BALLOONED);
2769 break;
2770 }
2771
2772 case PGM_STATE_REC_RAM_RAW:
2773 {
2774 PGMPAGEMAPLOCK PgMpLck;
2775 void *pvDstPage;
2776 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDstPage, &PgMpLck);
2777 AssertLogRelMsgRCReturn(rc, ("GCPhys=%RGp %R[pgmpage] rc=%Rrc\n", GCPhys, pPage, rc), rc);
2778 rc = SSMR3GetMem(pSSM, pvDstPage, PAGE_SIZE);
2779 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2780 if (RT_FAILURE(rc))
2781 return rc;
2782 break;
2783 }
2784
2785 default:
2786 AssertMsgFailedReturn(("%#x\n", u8), VERR_PGM_SAVED_REC_TYPE);
2787 }
2788 id = UINT8_MAX;
2789 break;
2790 }
2791
2792 /*
2793 * MMIO2 page.
2794 */
2795 case PGM_STATE_REC_MMIO2_RAW:
2796 case PGM_STATE_REC_MMIO2_ZERO:
2797 {
2798 /*
2799 * Get the ID + page number and resolved that into a MMIO2 page.
2800 */
2801 if (!(u8 & PGM_STATE_REC_FLAG_ADDR))
2802 iPage++;
2803 else
2804 {
2805 SSMR3GetU8(pSSM, &id);
2806 rc = SSMR3GetU32(pSSM, &iPage);
2807 if (RT_FAILURE(rc))
2808 return rc;
2809 }
2810 if ( !pRegMmio
2811 || pRegMmio->idSavedState != id)
2812 {
2813 for (pRegMmio = pVM->pgm.s.pRegMmioRangesR3; pRegMmio; pRegMmio = pRegMmio->pNextR3)
2814 if ( pRegMmio->idSavedState == id
2815 && (pRegMmio->fFlags & PGMREGMMIORANGE_F_MMIO2))
2816 break;
2817 AssertLogRelMsgReturn(pRegMmio, ("id=%#u iPage=%#x\n", id, iPage), VERR_PGM_SAVED_MMIO2_RANGE_NOT_FOUND);
2818 }
2819 AssertLogRelMsgReturn(iPage < (pRegMmio->RamRange.cb >> PAGE_SHIFT), ("iPage=%#x cb=%RGp %s\n", iPage, pRegMmio->RamRange.cb, pRegMmio->RamRange.pszDesc), VERR_PGM_SAVED_MMIO2_PAGE_NOT_FOUND);
2820 void *pvDstPage = (uint8_t *)pRegMmio->RamRange.pvR3 + ((size_t)iPage << PAGE_SHIFT);
2821
2822 /*
2823 * Load the page bits.
2824 */
2825 if ((u8 & ~PGM_STATE_REC_FLAG_ADDR) == PGM_STATE_REC_MMIO2_ZERO)
2826 ASMMemZeroPage(pvDstPage);
2827 else
2828 {
2829 rc = SSMR3GetMem(pSSM, pvDstPage, PAGE_SIZE);
2830 if (RT_FAILURE(rc))
2831 return rc;
2832 }
2833 GCPhys = NIL_RTGCPHYS;
2834 break;
2835 }
2836
2837 /*
2838 * ROM pages.
2839 */
2840 case PGM_STATE_REC_ROM_VIRGIN:
2841 case PGM_STATE_REC_ROM_SHW_RAW:
2842 case PGM_STATE_REC_ROM_SHW_ZERO:
2843 case PGM_STATE_REC_ROM_PROT:
2844 {
2845 /*
2846 * Get the ID + page number and resolved that into a ROM page descriptor.
2847 */
2848 if (!(u8 & PGM_STATE_REC_FLAG_ADDR))
2849 iPage++;
2850 else
2851 {
2852 SSMR3GetU8(pSSM, &id);
2853 rc = SSMR3GetU32(pSSM, &iPage);
2854 if (RT_FAILURE(rc))
2855 return rc;
2856 }
2857 if ( !pRom
2858 || pRom->idSavedState != id)
2859 {
2860 for (pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
2861 if (pRom->idSavedState == id)
2862 break;
2863 AssertLogRelMsgReturn(pRom, ("id=%#u iPage=%#x\n", id, iPage), VERR_PGM_SAVED_ROM_RANGE_NOT_FOUND);
2864 }
2865 AssertLogRelMsgReturn(iPage < (pRom->cb >> PAGE_SHIFT), ("iPage=%#x cb=%RGp %s\n", iPage, pRom->cb, pRom->pszDesc), VERR_PGM_SAVED_ROM_PAGE_NOT_FOUND);
2866 PPGMROMPAGE pRomPage = &pRom->aPages[iPage];
2867 GCPhys = pRom->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT);
2868
2869 /*
2870 * Get and set the protection.
2871 */
2872 uint8_t u8Prot;
2873 rc = SSMR3GetU8(pSSM, &u8Prot);
2874 if (RT_FAILURE(rc))
2875 return rc;
2876 PGMROMPROT enmProt = (PGMROMPROT)u8Prot;
2877 AssertLogRelMsgReturn(enmProt > PGMROMPROT_INVALID && enmProt < PGMROMPROT_END, ("GCPhys=%RGp enmProt=%d\n", GCPhys, enmProt), VERR_PGM_SAVED_ROM_PAGE_PROT);
2878
2879 if (enmProt != pRomPage->enmProt)
2880 {
2881 if (RT_UNLIKELY(!(pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)))
2882 return SSMR3SetCfgError(pSSM, RT_SRC_POS,
2883 N_("Protection change of unshadowed ROM page: GCPhys=%RGp enmProt=%d %s"),
2884 GCPhys, enmProt, pRom->pszDesc);
2885 rc = PGMR3PhysRomProtect(pVM, GCPhys, PAGE_SIZE, enmProt);
2886 AssertLogRelMsgRCReturn(rc, ("GCPhys=%RGp rc=%Rrc\n", GCPhys, rc), rc);
2887 AssertLogRelReturn(pRomPage->enmProt == enmProt, VERR_PGM_SAVED_ROM_PAGE_PROT);
2888 }
2889 if ((u8 & ~PGM_STATE_REC_FLAG_ADDR) == PGM_STATE_REC_ROM_PROT)
2890 break; /* done */
2891
2892 /*
2893 * Get the right page descriptor.
2894 */
2895 PPGMPAGE pRealPage;
2896 switch (u8 & ~PGM_STATE_REC_FLAG_ADDR)
2897 {
2898 case PGM_STATE_REC_ROM_VIRGIN:
2899 if (!PGMROMPROT_IS_ROM(enmProt))
2900 pRealPage = &pRomPage->Virgin;
2901 else
2902 pRealPage = NULL;
2903 break;
2904
2905 case PGM_STATE_REC_ROM_SHW_RAW:
2906 case PGM_STATE_REC_ROM_SHW_ZERO:
2907 if (RT_UNLIKELY(!(pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)))
2908 return SSMR3SetCfgError(pSSM, RT_SRC_POS,
2909 N_("Shadowed / non-shadowed page type mismatch: GCPhys=%RGp enmProt=%d %s"),
2910 GCPhys, enmProt, pRom->pszDesc);
2911 if (PGMROMPROT_IS_ROM(enmProt))
2912 pRealPage = &pRomPage->Shadow;
2913 else
2914 pRealPage = NULL;
2915 break;
2916
2917 default: AssertLogRelFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE); /* shut up gcc */
2918 }
2919 if (!pRealPage)
2920 {
2921 rc = pgmPhysGetPageWithHintEx(pVM, GCPhys, &pRealPage, &pRamHint);
2922 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc %RGp\n", rc, GCPhys), rc);
2923 }
2924
2925 /*
2926 * Make it writable and map it (if necessary).
2927 */
2928 void *pvDstPage = NULL;
2929 switch (u8 & ~PGM_STATE_REC_FLAG_ADDR)
2930 {
2931 case PGM_STATE_REC_ROM_SHW_ZERO:
2932 if ( PGM_PAGE_IS_ZERO(pRealPage)
2933 || PGM_PAGE_IS_BALLOONED(pRealPage))
2934 break;
2935 /** @todo implement zero page replacing. */
2936 RT_FALL_THRU();
2937 case PGM_STATE_REC_ROM_VIRGIN:
2938 case PGM_STATE_REC_ROM_SHW_RAW:
2939 {
2940 rc = pgmPhysPageMakeWritableAndMap(pVM, pRealPage, GCPhys, &pvDstPage);
2941 AssertLogRelMsgRCReturn(rc, ("GCPhys=%RGp rc=%Rrc\n", GCPhys, rc), rc);
2942 break;
2943 }
2944 }
2945
2946 /*
2947 * Load the bits.
2948 */
2949 switch (u8 & ~PGM_STATE_REC_FLAG_ADDR)
2950 {
2951 case PGM_STATE_REC_ROM_SHW_ZERO:
2952 if (pvDstPage)
2953 ASMMemZeroPage(pvDstPage);
2954 break;
2955
2956 case PGM_STATE_REC_ROM_VIRGIN:
2957 case PGM_STATE_REC_ROM_SHW_RAW:
2958 rc = SSMR3GetMem(pSSM, pvDstPage, PAGE_SIZE);
2959 if (RT_FAILURE(rc))
2960 return rc;
2961 break;
2962 }
2963 GCPhys = NIL_RTGCPHYS;
2964 break;
2965 }
2966
2967 /*
2968 * Unknown type.
2969 */
2970 default:
2971 AssertLogRelMsgFailedReturn(("%#x\n", u8), VERR_PGM_SAVED_REC_TYPE);
2972 }
2973 } /* forever */
2974}
2975
2976
2977/**
2978 * Worker for pgmR3Load.
2979 *
2980 * @returns VBox status code.
2981 *
2982 * @param pVM The cross context VM structure.
2983 * @param pSSM The SSM handle.
2984 * @param uVersion The saved state version.
2985 */
2986static int pgmR3LoadFinalLocked(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion)
2987{
2988 PPGM pPGM = &pVM->pgm.s;
2989 int rc;
2990 uint32_t u32Sep;
2991
2992 /*
2993 * Load basic data (required / unaffected by relocation).
2994 */
2995 if (uVersion >= PGM_SAVED_STATE_VERSION_3_0_0)
2996 {
2997 if (uVersion > PGM_SAVED_STATE_VERSION_PRE_BALLOON)
2998 rc = SSMR3GetStruct(pSSM, pPGM, &s_aPGMFields[0]);
2999 else
3000 rc = SSMR3GetStruct(pSSM, pPGM, &s_aPGMFieldsPreBalloon[0]);
3001
3002 AssertLogRelRCReturn(rc, rc);
3003
3004 for (VMCPUID i = 0; i < pVM->cCpus; i++)
3005 {
3006 if (uVersion <= PGM_SAVED_STATE_VERSION_PRE_PAE)
3007 rc = SSMR3GetStruct(pSSM, &pVM->aCpus[i].pgm.s, &s_aPGMCpuFieldsPrePae[0]);
3008 else
3009 rc = SSMR3GetStruct(pSSM, &pVM->aCpus[i].pgm.s, &s_aPGMCpuFields[0]);
3010 AssertLogRelRCReturn(rc, rc);
3011 }
3012 }
3013 else if (uVersion >= PGM_SAVED_STATE_VERSION_RR_DESC)
3014 {
3015 AssertRelease(pVM->cCpus == 1);
3016
3017 PGMOLD pgmOld;
3018 rc = SSMR3GetStruct(pSSM, &pgmOld, &s_aPGMFields_Old[0]);
3019 AssertLogRelRCReturn(rc, rc);
3020
3021 pPGM->fMappingsFixed = pgmOld.fMappingsFixed;
3022 pPGM->GCPtrMappingFixed = pgmOld.GCPtrMappingFixed;
3023 pPGM->cbMappingFixed = pgmOld.cbMappingFixed;
3024
3025 pVM->aCpus[0].pgm.s.fA20Enabled = pgmOld.fA20Enabled;
3026 pVM->aCpus[0].pgm.s.GCPhysA20Mask = pgmOld.GCPhysA20Mask;
3027 pVM->aCpus[0].pgm.s.enmGuestMode = pgmOld.enmGuestMode;
3028 }
3029 else
3030 {
3031 AssertRelease(pVM->cCpus == 1);
3032
3033 SSMR3GetBool(pSSM, &pPGM->fMappingsFixed);
3034 SSMR3GetGCPtr(pSSM, &pPGM->GCPtrMappingFixed);
3035 SSMR3GetU32(pSSM, &pPGM->cbMappingFixed);
3036
3037 uint32_t cbRamSizeIgnored;
3038 rc = SSMR3GetU32(pSSM, &cbRamSizeIgnored);
3039 if (RT_FAILURE(rc))
3040 return rc;
3041 SSMR3GetGCPhys(pSSM, &pVM->aCpus[0].pgm.s.GCPhysA20Mask);
3042
3043 uint32_t u32 = 0;
3044 SSMR3GetUInt(pSSM, &u32);
3045 pVM->aCpus[0].pgm.s.fA20Enabled = !!u32;
3046 SSMR3GetUInt(pSSM, &pVM->aCpus[0].pgm.s.fSyncFlags);
3047 RTUINT uGuestMode;
3048 SSMR3GetUInt(pSSM, &uGuestMode);
3049 pVM->aCpus[0].pgm.s.enmGuestMode = (PGMMODE)uGuestMode;
3050
3051 /* check separator. */
3052 SSMR3GetU32(pSSM, &u32Sep);
3053 if (RT_FAILURE(rc))
3054 return rc;
3055 if (u32Sep != (uint32_t)~0)
3056 {
3057 AssertMsgFailed(("u32Sep=%#x (first)\n", u32Sep));
3058 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
3059 }
3060 }
3061
3062 /*
3063 * Fix the A20 mask.
3064 */
3065 for (VMCPUID i = 0; i < pVM->cCpus; i++)
3066 {
3067 PVMCPU pVCpu = &pVM->aCpus[i];
3068 pVCpu->pgm.s.GCPhysA20Mask = ~((RTGCPHYS)!pVCpu->pgm.s.fA20Enabled << 20);
3069 pgmR3RefreshShadowModeAfterA20Change(pVCpu);
3070 }
3071
3072 /*
3073 * The guest mappings - skipped now, see re-fixation in the caller.
3074 */
3075 if (uVersion <= PGM_SAVED_STATE_VERSION_PRE_PAE)
3076 {
3077 for (uint32_t i = 0; ; i++)
3078 {
3079 rc = SSMR3GetU32(pSSM, &u32Sep); /* sequence number */
3080 if (RT_FAILURE(rc))
3081 return rc;
3082 if (u32Sep == ~0U)
3083 break;
3084 AssertMsgReturn(u32Sep == i, ("u32Sep=%#x i=%#x\n", u32Sep, i), VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
3085
3086 char szDesc[256];
3087 rc = SSMR3GetStrZ(pSSM, szDesc, sizeof(szDesc));
3088 if (RT_FAILURE(rc))
3089 return rc;
3090 RTGCPTR GCPtrIgnore;
3091 SSMR3GetGCPtr(pSSM, &GCPtrIgnore); /* GCPtr */
3092 rc = SSMR3GetGCPtr(pSSM, &GCPtrIgnore); /* cPTs */
3093 if (RT_FAILURE(rc))
3094 return rc;
3095 }
3096 }
3097
3098 /*
3099 * Load the RAM contents.
3100 */
3101 if (uVersion > PGM_SAVED_STATE_VERSION_3_0_0)
3102 {
3103 if (!pVM->pgm.s.LiveSave.fActive)
3104 {
3105 if (uVersion > PGM_SAVED_STATE_VERSION_NO_RAM_CFG)
3106 {
3107 rc = pgmR3LoadRamConfig(pVM, pSSM);
3108 if (RT_FAILURE(rc))
3109 return rc;
3110 }
3111 rc = pgmR3LoadRomRanges(pVM, pSSM);
3112 if (RT_FAILURE(rc))
3113 return rc;
3114 rc = pgmR3LoadMmio2Ranges(pVM, pSSM);
3115 if (RT_FAILURE(rc))
3116 return rc;
3117 }
3118
3119 rc = pgmR3LoadMemory(pVM, pSSM, uVersion, SSM_PASS_FINAL);
3120 }
3121 else
3122 rc = pgmR3LoadMemoryOld(pVM, pSSM, uVersion);
3123
3124 /* Refresh balloon accounting. */
3125 if (pVM->pgm.s.cBalloonedPages)
3126 {
3127 Log(("pgmR3LoadFinalLocked: pVM=%p cBalloonedPages=%#x\n", pVM, pVM->pgm.s.cBalloonedPages));
3128 rc = GMMR3BalloonedPages(pVM, GMMBALLOONACTION_INFLATE, pVM->pgm.s.cBalloonedPages);
3129 AssertRCReturn(rc, rc);
3130 }
3131 return rc;
3132}
3133
3134
3135/**
3136 * @callback_method_impl{FNSSMINTLOADEXEC}
3137 */
3138static DECLCALLBACK(int) pgmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
3139{
3140 int rc;
3141
3142 /*
3143 * Validate version.
3144 */
3145 if ( ( uPass != SSM_PASS_FINAL
3146 && uVersion != PGM_SAVED_STATE_VERSION
3147 && uVersion != PGM_SAVED_STATE_VERSION_PRE_PAE
3148 && uVersion != PGM_SAVED_STATE_VERSION_BALLOON_BROKEN
3149 && uVersion != PGM_SAVED_STATE_VERSION_PRE_BALLOON
3150 && uVersion != PGM_SAVED_STATE_VERSION_NO_RAM_CFG)
3151 || ( uVersion != PGM_SAVED_STATE_VERSION
3152 && uVersion != PGM_SAVED_STATE_VERSION_PRE_PAE
3153 && uVersion != PGM_SAVED_STATE_VERSION_BALLOON_BROKEN
3154 && uVersion != PGM_SAVED_STATE_VERSION_PRE_BALLOON
3155 && uVersion != PGM_SAVED_STATE_VERSION_NO_RAM_CFG
3156 && uVersion != PGM_SAVED_STATE_VERSION_3_0_0
3157 && uVersion != PGM_SAVED_STATE_VERSION_2_2_2
3158 && uVersion != PGM_SAVED_STATE_VERSION_RR_DESC
3159 && uVersion != PGM_SAVED_STATE_VERSION_OLD_PHYS_CODE)
3160 )
3161 {
3162 AssertMsgFailed(("pgmR3Load: Invalid version uVersion=%d (current %d)!\n", uVersion, PGM_SAVED_STATE_VERSION));
3163 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
3164 }
3165
3166 /*
3167 * Do the loading while owning the lock because a bunch of the functions
3168 * we're using requires this.
3169 */
3170 if (uPass != SSM_PASS_FINAL)
3171 {
3172 pgmLock(pVM);
3173 if (uPass != 0)
3174 rc = pgmR3LoadMemory(pVM, pSSM, uVersion, uPass);
3175 else
3176 {
3177 pVM->pgm.s.LiveSave.fActive = true;
3178 if (uVersion > PGM_SAVED_STATE_VERSION_NO_RAM_CFG)
3179 rc = pgmR3LoadRamConfig(pVM, pSSM);
3180 else
3181 rc = VINF_SUCCESS;
3182 if (RT_SUCCESS(rc))
3183 rc = pgmR3LoadRomRanges(pVM, pSSM);
3184 if (RT_SUCCESS(rc))
3185 rc = pgmR3LoadMmio2Ranges(pVM, pSSM);
3186 if (RT_SUCCESS(rc))
3187 rc = pgmR3LoadMemory(pVM, pSSM, uVersion, uPass);
3188 }
3189 pgmUnlock(pVM);
3190 }
3191 else
3192 {
3193 pgmLock(pVM);
3194 rc = pgmR3LoadFinalLocked(pVM, pSSM, uVersion);
3195 pVM->pgm.s.LiveSave.fActive = false;
3196 pgmUnlock(pVM);
3197 if (RT_SUCCESS(rc))
3198 {
3199 /*
3200 * We require a full resync now.
3201 */
3202 for (VMCPUID i = 0; i < pVM->cCpus; i++)
3203 {
3204 PVMCPU pVCpu = &pVM->aCpus[i];
3205 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
3206 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
3207 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL;
3208 /** @todo For guest PAE, we might get the wrong
3209 * aGCPhysGstPaePDs values now. We should used the
3210 * saved ones... Postponing this since it nothing new
3211 * and PAE/PDPTR needs some general readjusting, see
3212 * @bugref{5880}. */
3213 }
3214
3215 pgmR3HandlerPhysicalUpdateAll(pVM);
3216
3217 /*
3218 * Change the paging mode (indirectly restores PGMCPU::GCPhysCR3).
3219 * (Requires the CPUM state to be restored already!)
3220 */
3221 if (CPUMR3IsStateRestorePending(pVM))
3222 return SSMR3SetLoadError(pSSM, VERR_WRONG_ORDER, RT_SRC_POS,
3223 N_("PGM was unexpectedly restored before CPUM"));
3224
3225 for (VMCPUID i = 0; i < pVM->cCpus; i++)
3226 {
3227 PVMCPU pVCpu = &pVM->aCpus[i];
3228
3229 rc = PGMHCChangeMode(pVM, pVCpu, pVCpu->pgm.s.enmGuestMode);
3230 AssertLogRelRCReturn(rc, rc);
3231
3232 /* Update the PSE, NX flags and validity masks. */
3233 pVCpu->pgm.s.fGst32BitPageSizeExtension = CPUMIsGuestPageSizeExtEnabled(pVCpu);
3234 PGMNotifyNxeChanged(pVCpu, CPUMIsGuestNXEnabled(pVCpu));
3235 }
3236
3237 /*
3238 * Try re-fixate the guest mappings.
3239 */
3240 pVM->pgm.s.fMappingsFixedRestored = false;
3241 if ( pVM->pgm.s.fMappingsFixed
3242 && pgmMapAreMappingsEnabled(pVM))
3243 {
3244#ifndef PGM_WITHOUT_MAPPINGS
3245 RTGCPTR GCPtrFixed = pVM->pgm.s.GCPtrMappingFixed;
3246 uint32_t cbFixed = pVM->pgm.s.cbMappingFixed;
3247 pVM->pgm.s.fMappingsFixed = false;
3248
3249 uint32_t cbRequired;
3250 int rc2 = PGMR3MappingsSize(pVM, &cbRequired); AssertRC(rc2);
3251 if ( RT_SUCCESS(rc2)
3252 && cbRequired > cbFixed)
3253 rc2 = VERR_OUT_OF_RANGE;
3254 if (RT_SUCCESS(rc2))
3255 rc2 = pgmR3MappingsFixInternal(pVM, GCPtrFixed, cbFixed);
3256 if (RT_FAILURE(rc2))
3257 {
3258 LogRel(("PGM: Unable to re-fixate the guest mappings at %RGv-%RGv: rc=%Rrc (cbRequired=%#x)\n",
3259 GCPtrFixed, GCPtrFixed + cbFixed, rc2, cbRequired));
3260 pVM->pgm.s.fMappingsFixed = false;
3261 pVM->pgm.s.fMappingsFixedRestored = true;
3262 pVM->pgm.s.GCPtrMappingFixed = GCPtrFixed;
3263 pVM->pgm.s.cbMappingFixed = cbFixed;
3264 }
3265#else
3266 AssertFailed();
3267#endif
3268 }
3269 else
3270 {
3271 /* We used to set fixed + disabled while we only use disabled now,
3272 so wipe the state to avoid any confusion. */
3273 pVM->pgm.s.fMappingsFixed = false;
3274 pVM->pgm.s.GCPtrMappingFixed = NIL_RTGCPTR;
3275 pVM->pgm.s.cbMappingFixed = 0;
3276 }
3277
3278 /*
3279 * If we have floating mappings, do a CR3 sync now to make sure the HMA
3280 * doesn't conflict with guest code / data and thereby cause trouble
3281 * when restoring other components like PATM.
3282 */
3283 if (pgmMapAreMappingsFloating(pVM))
3284 {
3285 PVMCPU pVCpu = &pVM->aCpus[0];
3286 rc = PGMSyncCR3(pVCpu, CPUMGetGuestCR0(pVCpu), CPUMGetGuestCR3(pVCpu), CPUMGetGuestCR4(pVCpu), true);
3287 if (RT_FAILURE(rc))
3288 return SSMR3SetLoadError(pSSM, VERR_WRONG_ORDER, RT_SRC_POS,
3289 N_("PGMSyncCR3 failed unexpectedly with rc=%Rrc"), rc);
3290
3291 /* Make sure to re-sync before executing code. */
3292 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
3293 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
3294 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL;
3295 }
3296 }
3297 }
3298
3299 return rc;
3300}
3301
3302
3303/**
3304 * @callback_method_impl{FNSSMINTLOADDONE}
3305 */
3306static DECLCALLBACK(int) pgmR3LoadDone(PVM pVM, PSSMHANDLE pSSM)
3307{
3308 pVM->pgm.s.fRestoreRomPagesOnReset = true;
3309 NOREF(pSSM);
3310 return VINF_SUCCESS;
3311}
3312
3313
3314/**
3315 * Registers the saved state callbacks with SSM.
3316 *
3317 * @returns VBox status code.
3318 * @param pVM The cross context VM structure.
3319 * @param cbRam The RAM size.
3320 */
3321int pgmR3InitSavedState(PVM pVM, uint64_t cbRam)
3322{
3323 return SSMR3RegisterInternal(pVM, "pgm", 1, PGM_SAVED_STATE_VERSION, (size_t)cbRam + sizeof(PGM),
3324 pgmR3LivePrep, pgmR3LiveExec, pgmR3LiveVote,
3325 NULL, pgmR3SaveExec, pgmR3SaveDone,
3326 pgmR3LoadPrep, pgmR3Load, pgmR3LoadDone);
3327}
3328
Note: See TracBrowser for help on using the repository browser.

© 2023 Oracle
ContactPrivacy policyTerms of Use