VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/PGMSavedState.cpp

Last change on this file was 108843, checked in by vboxsync, 4 weeks ago

VMM/PGM,NEM: Some early page table management infrastructure for ARMv8, bugref:10388

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 134.7 KB
Line 
1/* $Id: PGMSavedState.cpp 108843 2025-04-04 08:36:32Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, The Saved State Part.
4 */
5
6/*
7 * Copyright (C) 2006-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_PGM
33#define VBOX_WITHOUT_PAGING_BIT_FIELDS /* 64-bit bitfields are just asking for trouble. See @bugref{9841} and others. */
34#include <VBox/vmm/pgm.h>
35#include <VBox/vmm/stam.h>
36#include <VBox/vmm/ssm.h>
37#include <VBox/vmm/pdmdrv.h>
38#include <VBox/vmm/pdmdev.h>
39#include "PGMInternal.h"
40#include <VBox/vmm/vmcc.h>
41#include "PGMInline.h"
42
43#include <VBox/param.h>
44#include <VBox/err.h>
45
46#include <iprt/asm.h>
47#include <iprt/assert.h>
48#include <iprt/crc.h>
49#include <iprt/mem.h>
50#include <iprt/sha.h>
51#include <iprt/string.h>
52#include <iprt/thread.h>
53
54
55/*********************************************************************************************************************************
56* Defined Constants And Macros *
57*********************************************************************************************************************************/
58#if defined(VBOX_VMM_TARGET_X86)
59/** Saved state data unit version. */
60# define PGM_SAVED_STATE_VERSION 14
61/** Saved state data unit version before the PAE PDPE registers. */
62# define PGM_SAVED_STATE_VERSION_PRE_PAE 13
63/** Saved state data unit version after this includes ballooned page flags in
64 * the state (see @bugref{5515}). */
65# define PGM_SAVED_STATE_VERSION_BALLOON_BROKEN 12
66/** Saved state before the balloon change. */
67# define PGM_SAVED_STATE_VERSION_PRE_BALLOON 11
68/** Saved state data unit version used during 3.1 development, misses the RAM
69 * config. */
70# define PGM_SAVED_STATE_VERSION_NO_RAM_CFG 10
71/** Saved state data unit version for 3.0 (pre teleportation). */
72# define PGM_SAVED_STATE_VERSION_3_0_0 9
73/** Saved state data unit version for 2.2.2 and later. */
74# define PGM_SAVED_STATE_VERSION_2_2_2 8
75/** Saved state data unit version for 2.2.0. */
76# define PGM_SAVED_STATE_VERSION_RR_DESC 7
77/** Saved state data unit version. */
78# define PGM_SAVED_STATE_VERSION_OLD_PHYS_CODE 6
79#elif defined(VBOX_VMM_TARGET_ARMV8)
80/** Saved state data unit version. */
81# define PGM_SAVED_STATE_VERSION 1
82#endif
83
84
85/** @name Sparse state record types
86 * @{ */
87/** Zero page. No data. */
88#define PGM_STATE_REC_RAM_ZERO UINT8_C(0x00)
89/** Raw page. */
90#define PGM_STATE_REC_RAM_RAW UINT8_C(0x01)
91/** Raw MMIO2 page. */
92#define PGM_STATE_REC_MMIO2_RAW UINT8_C(0x02)
93/** Zero MMIO2 page. */
94#define PGM_STATE_REC_MMIO2_ZERO UINT8_C(0x03)
95/** Virgin ROM page. Followed by protection (8-bit) and the raw bits. */
96#define PGM_STATE_REC_ROM_VIRGIN UINT8_C(0x04)
97/** Raw shadowed ROM page. The protection (8-bit) precedes the raw bits. */
98#define PGM_STATE_REC_ROM_SHW_RAW UINT8_C(0x05)
99/** Zero shadowed ROM page. The protection (8-bit) is the only payload. */
100#define PGM_STATE_REC_ROM_SHW_ZERO UINT8_C(0x06)
101/** ROM protection (8-bit). */
102#define PGM_STATE_REC_ROM_PROT UINT8_C(0x07)
103/** Ballooned page. No data. */
104#define PGM_STATE_REC_RAM_BALLOONED UINT8_C(0x08)
105/** The last record type. */
106#define PGM_STATE_REC_LAST PGM_STATE_REC_RAM_BALLOONED
107/** End marker. */
108#define PGM_STATE_REC_END UINT8_C(0xff)
109/** Flag indicating that the data is preceded by the page address.
110 * For RAW pages this is a RTGCPHYS. For MMIO2 and ROM pages this is a 8-bit
111 * range ID and a 32-bit page index.
112 */
113#define PGM_STATE_REC_FLAG_ADDR UINT8_C(0x80)
114/** @} */
115
116/** The CRC-32 for a zero page. */
117#define PGM_STATE_CRC32_ZERO_PAGE UINT32_C(0xc71c0011)
118/** The CRC-32 for a zero half page. */
119#define PGM_STATE_CRC32_ZERO_HALF_PAGE UINT32_C(0xf1e8ba9e)
120
121
122
123/** @name Old Page types used in older saved states.
124 * @{ */
125/** Old saved state: The usual invalid zero entry. */
126#define PGMPAGETYPE_OLD_INVALID 0
127/** Old saved state: RAM page. (RWX) */
128#define PGMPAGETYPE_OLD_RAM 1
129/** Old saved state: MMIO2 page. (RWX) */
130#define PGMPAGETYPE_OLD_MMIO2 1
131/** Old saved state: MMIO2 page aliased over an MMIO page. (RWX)
132 * See PGMHandlerPhysicalPageAlias(). */
133#define PGMPAGETYPE_OLD_MMIO2_ALIAS_MMIO 2
134/** Old saved state: Shadowed ROM. (RWX) */
135#define PGMPAGETYPE_OLD_ROM_SHADOW 3
136/** Old saved state: ROM page. (R-X) */
137#define PGMPAGETYPE_OLD_ROM 4
138/** Old saved state: MMIO page. (---) */
139#define PGMPAGETYPE_OLD_MMIO 5
140/** @} */
141
142
143/*********************************************************************************************************************************
144* Structures and Typedefs *
145*********************************************************************************************************************************/
146/** For loading old saved states. (pre-smp) */
147typedef struct
148{
149 /** If set no conflict checks are required. (boolean) */
150 bool fMappingsFixed;
151 /** Size of fixed mapping */
152 uint32_t cbMappingFixed;
153 /** Base address (GC) of fixed mapping */
154 RTGCPTR GCPtrMappingFixed;
155 /** A20 gate mask.
156 * Our current approach to A20 emulation is to let REM do it and don't bother
157 * anywhere else. The interesting guests will be operating with it enabled anyway.
158 * But should the need arise, we'll subject physical addresses to this mask. */
159 RTGCPHYS GCPhysA20Mask;
160 /** A20 gate state - boolean! */
161 bool fA20Enabled;
162 /** The guest paging mode. */
163 PGMMODE enmGuestMode;
164} PGMOLD;
165
166
167/*********************************************************************************************************************************
168* Global Variables *
169*********************************************************************************************************************************/
170/** PGM fields to save/load. */
171
172static const SSMFIELD s_aPGMFields[] =
173{
174 SSMFIELD_ENTRY_OLD( fMappingsFixed, sizeof(bool)),
175 SSMFIELD_ENTRY_OLD_GCPTR( GCPtrMappingFixed),
176 SSMFIELD_ENTRY_OLD( cbMappingFixed, sizeof(uint32_t)),
177 SSMFIELD_ENTRY( PGM, cBalloonedPages),
178 SSMFIELD_ENTRY_TERM()
179};
180
181#if defined(VBOX_VMM_TARGET_X86)
182static const SSMFIELD s_aPGMFieldsPreBalloon[] =
183{
184 SSMFIELD_ENTRY_OLD( fMappingsFixed, sizeof(bool)),
185 SSMFIELD_ENTRY_OLD_GCPTR( GCPtrMappingFixed),
186 SSMFIELD_ENTRY_OLD( cbMappingFixed, sizeof(uint32_t)),
187 SSMFIELD_ENTRY_TERM()
188};
189
190static const SSMFIELD s_aPGMCpuFields[] =
191{
192 SSMFIELD_ENTRY( PGMCPU, fA20Enabled),
193 SSMFIELD_ENTRY_GCPHYS( PGMCPU, GCPhysA20Mask),
194 SSMFIELD_ENTRY( PGMCPU, enmGuestMode),
195 SSMFIELD_ENTRY( PGMCPU, aGCPhysGstPaePDs[0]),
196 SSMFIELD_ENTRY( PGMCPU, aGCPhysGstPaePDs[1]),
197 SSMFIELD_ENTRY( PGMCPU, aGCPhysGstPaePDs[2]),
198 SSMFIELD_ENTRY( PGMCPU, aGCPhysGstPaePDs[3]),
199 SSMFIELD_ENTRY_TERM()
200};
201
202static const SSMFIELD s_aPGMCpuFieldsPrePae[] =
203{
204 SSMFIELD_ENTRY( PGMCPU, fA20Enabled),
205 SSMFIELD_ENTRY_GCPHYS( PGMCPU, GCPhysA20Mask),
206 SSMFIELD_ENTRY( PGMCPU, enmGuestMode),
207 SSMFIELD_ENTRY_TERM()
208};
209
210static const SSMFIELD s_aPGMFields_Old[] =
211{
212 SSMFIELD_ENTRY( PGMOLD, fMappingsFixed),
213 SSMFIELD_ENTRY_GCPTR( PGMOLD, GCPtrMappingFixed),
214 SSMFIELD_ENTRY( PGMOLD, cbMappingFixed),
215 SSMFIELD_ENTRY( PGMOLD, fA20Enabled),
216 SSMFIELD_ENTRY_GCPHYS( PGMOLD, GCPhysA20Mask),
217 SSMFIELD_ENTRY( PGMOLD, enmGuestMode),
218 SSMFIELD_ENTRY_TERM()
219};
220#elif defined(VBOX_VMM_TARGET_ARMV8)
221static const SSMFIELD s_aPGMCpuFields[] =
222{
223 SSMFIELD_ENTRY_TERM()
224};
225#else
226# error "Port me"
227#endif
228
229
230#if defined(VBOX_VMM_TARGET_X86)
231/**
232 * Find the ROM tracking structure for the given page.
233 *
234 * @returns Pointer to the ROM page structure. NULL if the caller didn't check
235 * that it's a ROM page.
236 * @param pVM The cross context VM structure.
237 * @param GCPhys The address of the ROM page.
238 */
239static PPGMROMPAGE pgmR3GetRomPage(PVM pVM, RTGCPHYS GCPhys) /** @todo change this to take a hint. */
240{
241 uint32_t const cRomRanges = RT_MIN(pVM->pgm.s.cRomRanges, RT_ELEMENTS(pVM->pgm.s.apRomRanges));
242 for (uint32_t idx = 0; idx < cRomRanges; idx++)
243 {
244 PPGMROMRANGE const pRomRange = pVM->pgm.s.apRomRanges[idx];
245 RTGCPHYS const off = GCPhys - pRomRange->GCPhys;
246 if (GCPhys - pRomRange->GCPhys < pRomRange->cb)
247 return &pRomRange->aPages[off >> GUEST_PAGE_SHIFT];
248 }
249 return NULL;
250}
251#endif
252
253
254/**
255 * Prepares the ROM pages for a live save.
256 *
257 * @returns VBox status code.
258 * @param pVM The cross context VM structure.
259 */
260static int pgmR3PrepRomPages(PVM pVM)
261{
262 /*
263 * Initialize the live save tracking in the ROM page descriptors.
264 */
265 PGM_LOCK_VOID(pVM);
266 uint32_t const cRomRanges = RT_MIN(pVM->pgm.s.cRomRanges, RT_ELEMENTS(pVM->pgm.s.apRomRanges));
267 for (uint32_t idx = 0; idx < cRomRanges; idx++)
268 {
269 PPGMROMRANGE const pRom = pVM->pgm.s.apRomRanges[idx];
270 uint32_t const cPages = pRom->cb >> GUEST_PAGE_SHIFT;
271 PPGMRAMRANGE pRamHint = NULL;
272
273 for (uint32_t iPage = 0; iPage < cPages; iPage++)
274 {
275 pRom->aPages[iPage].LiveSave.u8Prot = (uint8_t)PGMROMPROT_INVALID;
276 pRom->aPages[iPage].LiveSave.fWrittenTo = false;
277 pRom->aPages[iPage].LiveSave.fDirty = true;
278 pRom->aPages[iPage].LiveSave.fDirtiedRecently = true;
279 if (!(pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED))
280 {
281 if (PGMROMPROT_IS_ROM(pRom->aPages[iPage].enmProt))
282 pRom->aPages[iPage].LiveSave.fWrittenTo = !PGM_PAGE_IS_ZERO(&pRom->aPages[iPage].Shadow) && !PGM_PAGE_IS_BALLOONED(&pRom->aPages[iPage].Shadow);
283 else
284 {
285 RTGCPHYS GCPhys = pRom->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT);
286 PPGMPAGE pPage;
287 int rc = pgmPhysGetPageWithHintEx(pVM, GCPhys, &pPage, &pRamHint);
288 AssertLogRelMsgRC(rc, ("%Rrc GCPhys=%RGp\n", rc, GCPhys));
289 if (RT_SUCCESS(rc))
290 pRom->aPages[iPage].LiveSave.fWrittenTo = !PGM_PAGE_IS_ZERO(pPage) && !PGM_PAGE_IS_BALLOONED(pPage);
291 else
292 pRom->aPages[iPage].LiveSave.fWrittenTo = !PGM_PAGE_IS_ZERO(&pRom->aPages[iPage].Shadow) && !PGM_PAGE_IS_BALLOONED(&pRom->aPages[iPage].Shadow);
293 }
294 }
295 }
296
297 pVM->pgm.s.LiveSave.Rom.cDirtyPages += cPages;
298 if (pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)
299 pVM->pgm.s.LiveSave.Rom.cDirtyPages += cPages;
300 }
301 PGM_UNLOCK(pVM);
302
303 return VINF_SUCCESS;
304}
305
306
307/**
308 * Assigns IDs to the ROM ranges and saves them.
309 *
310 * @returns VBox status code.
311 * @param pVM The cross context VM structure.
312 * @param pSSM Saved state handle.
313 */
314static int pgmR3SaveRomRanges(PVM pVM, PSSMHANDLE pSSM)
315{
316 PGM_LOCK_VOID(pVM);
317 uint32_t const cRomRanges = RT_MIN(pVM->pgm.s.cRomRanges, RT_ELEMENTS(pVM->pgm.s.apRomRanges));
318 for (uint32_t idx = 0; idx < cRomRanges; idx++)
319 {
320 PPGMROMRANGE const pRom = pVM->pgm.s.apRomRanges[idx];
321 uint8_t const idSavedState = (uint8_t)(idx + 1);
322 pRom->idSavedState = idSavedState;
323 SSMR3PutU8(pSSM, idSavedState);
324 SSMR3PutStrZ(pSSM, ""); /* device name */
325 SSMR3PutU32(pSSM, 0); /* device instance */
326 SSMR3PutU8(pSSM, 0); /* region */
327 SSMR3PutStrZ(pSSM, pRom->pszDesc);
328 SSMR3PutGCPhys(pSSM, pRom->GCPhys);
329 int rc = SSMR3PutGCPhys(pSSM, pRom->cb);
330 if (RT_FAILURE(rc))
331 break;
332 }
333 PGM_UNLOCK(pVM);
334 return SSMR3PutU8(pSSM, UINT8_MAX);
335}
336
337
338/**
339 * Loads the ROM range ID assignments.
340 *
341 * @returns VBox status code.
342 *
343 * @param pVM The cross context VM structure.
344 * @param pSSM The saved state handle.
345 */
346static int pgmR3LoadRomRanges(PVM pVM, PSSMHANDLE pSSM)
347{
348 PGM_LOCK_ASSERT_OWNER(pVM);
349
350 uint32_t const cRomRanges = RT_MIN(pVM->pgm.s.cRomRanges, RT_ELEMENTS(pVM->pgm.s.apRomRanges));
351 for (uint32_t idx = 0; idx < cRomRanges; idx++)
352 pVM->pgm.s.apRomRanges[idx]->idSavedState = UINT8_MAX;
353
354 for (;;)
355 {
356 /*
357 * Read the data.
358 */
359 uint8_t id;
360 int rc = SSMR3GetU8(pSSM, &id);
361 if (RT_FAILURE(rc))
362 return rc;
363 if (id == UINT8_MAX)
364 {
365 /*
366 * End of ROM ranges. Check that all are accounted for.
367 */
368 for (uint32_t idx = 0; idx < cRomRanges; idx++)
369 {
370 PPGMROMRANGE const pRom = pVM->pgm.s.apRomRanges[idx];
371 if (pRom->idSavedState != UINT8_MAX)
372 { /* likely */ }
373 else if (pRom->fFlags & PGMPHYS_ROM_FLAGS_MAYBE_MISSING_FROM_STATE)
374 LogRel(("PGM: The '%s' ROM was not found in the saved state, but it is marked as maybe-missing, so that's probably okay.\n",
375 pRom->pszDesc));
376 else
377 AssertLogRelMsg(pRom->idSavedState != UINT8_MAX,
378 ("The '%s' ROM was not found in the saved state. Probably due to some misconfiguration\n",
379 pRom->pszDesc));
380 }
381 return VINF_SUCCESS; /* the end */
382 }
383 AssertLogRelReturn(id != 0, VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
384
385 char szDevName[RT_SIZEOFMEMB(PDMDEVREG, szName)];
386 rc = SSMR3GetStrZ(pSSM, szDevName, sizeof(szDevName));
387 AssertLogRelRCReturn(rc, rc);
388
389 uint32_t uInstance;
390 SSMR3GetU32(pSSM, &uInstance);
391 uint8_t iRegion;
392 SSMR3GetU8(pSSM, &iRegion);
393
394 char szDesc[64];
395 rc = SSMR3GetStrZ(pSSM, szDesc, sizeof(szDesc));
396 AssertLogRelRCReturn(rc, rc);
397
398 RTGCPHYS GCPhys;
399 SSMR3GetGCPhys(pSSM, &GCPhys);
400 RTGCPHYS cb;
401 rc = SSMR3GetGCPhys(pSSM, &cb);
402 if (RT_FAILURE(rc))
403 return rc;
404 AssertLogRelMsgReturn(!(GCPhys & GUEST_PAGE_OFFSET_MASK), ("GCPhys=%RGp %s\n", GCPhys, szDesc), VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
405 AssertLogRelMsgReturn(!(cb & GUEST_PAGE_OFFSET_MASK), ("cb=%RGp %s\n", cb, szDesc), VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
406
407 /*
408 * Locate a matching ROM range.
409 */
410 AssertLogRelMsgReturn( uInstance == 0
411 && iRegion == 0
412 && szDevName[0] == '\0',
413 ("GCPhys=%RGp LB %RGp %s\n", GCPhys, cb, szDesc),
414 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
415 uint32_t idx;
416 for (idx = 0; idx < cRomRanges; idx++)
417 {
418 PPGMROMRANGE const pRom = pVM->pgm.s.apRomRanges[idx];
419 if ( pRom->idSavedState == UINT8_MAX
420 && !strcmp(pRom->pszDesc, szDesc))
421 {
422 pRom->idSavedState = id;
423 break;
424 }
425 }
426 if (idx >= cRomRanges)
427 return SSMR3SetCfgError(pSSM, RT_SRC_POS, N_("ROM at %RGp LB %RGp by the name '%s' was not found"),
428 GCPhys, cb, szDesc);
429 } /* forever */
430}
431
432
433/**
434 * Scan ROM pages.
435 *
436 * @param pVM The cross context VM structure.
437 */
438static void pgmR3ScanRomPages(PVM pVM)
439{
440 /*
441 * The shadow ROMs.
442 */
443 PGM_LOCK_VOID(pVM);
444 uint32_t const cRomRanges = RT_MIN(pVM->pgm.s.cRomRanges, RT_ELEMENTS(pVM->pgm.s.apRomRanges));
445 for (uint32_t idx = 0; idx < cRomRanges; idx++)
446 {
447 PPGMROMRANGE const pRom = pVM->pgm.s.apRomRanges[idx];
448 if (pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)
449 {
450 uint32_t const cPages = pRom->cb >> GUEST_PAGE_SHIFT;
451 for (uint32_t iPage = 0; iPage < cPages; iPage++)
452 {
453 PPGMROMPAGE pRomPage = &pRom->aPages[iPage];
454 if (pRomPage->LiveSave.fWrittenTo)
455 {
456 pRomPage->LiveSave.fWrittenTo = false;
457 if (!pRomPage->LiveSave.fDirty)
458 {
459 pRomPage->LiveSave.fDirty = true;
460 pVM->pgm.s.LiveSave.Rom.cReadyPages--;
461 pVM->pgm.s.LiveSave.Rom.cDirtyPages++;
462 }
463 pRomPage->LiveSave.fDirtiedRecently = true;
464 }
465 else
466 pRomPage->LiveSave.fDirtiedRecently = false;
467 }
468 }
469 }
470 PGM_UNLOCK(pVM);
471}
472
473
474/**
475 * Takes care of the virgin ROM pages in the first pass.
476 *
477 * This is an attempt at simplifying the handling of ROM pages a little bit.
478 * This ASSUMES that no new ROM ranges will be added and that they won't be
479 * relinked in any way.
480 *
481 * @param pVM The cross context VM structure.
482 * @param pSSM The SSM handle.
483 * @param fLiveSave Whether we're in a live save or not.
484 */
485static int pgmR3SaveRomVirginPages(PVM pVM, PSSMHANDLE pSSM, bool fLiveSave)
486{
487 PGM_LOCK_VOID(pVM);
488 uint32_t const cRomRanges = RT_MIN(pVM->pgm.s.cRomRanges, RT_ELEMENTS(pVM->pgm.s.apRomRanges));
489 for (uint32_t idx = 0; idx < cRomRanges; idx++)
490 {
491 PPGMROMRANGE const pRom = pVM->pgm.s.apRomRanges[idx];
492 uint32_t const cPages = pRom->cb >> GUEST_PAGE_SHIFT;
493 for (uint32_t iPage = 0; iPage < cPages; iPage++)
494 {
495 RTGCPHYS GCPhys = pRom->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT);
496 PGMROMPROT enmProt = pRom->aPages[iPage].enmProt;
497
498 /* Get the virgin page descriptor. */
499 PPGMPAGE pPage;
500 if (PGMROMPROT_IS_ROM(enmProt))
501 pPage = pgmPhysGetPage(pVM, GCPhys);
502 else
503 pPage = &pRom->aPages[iPage].Virgin;
504
505 /* Get the page bits. (Cannot use pgmPhysGCPhys2CCPtrInternalReadOnly here!) */
506 int rc = VINF_SUCCESS;
507 char abPage[GUEST_PAGE_SIZE];
508 if ( !PGM_PAGE_IS_ZERO(pPage)
509 && !PGM_PAGE_IS_BALLOONED(pPage))
510 {
511 void const *pvPage;
512#ifdef VBOX_WITH_PGM_NEM_MODE
513 if (!PGMROMPROT_IS_ROM(enmProt) && PGM_IS_IN_NEM_MODE(pVM))
514 pvPage = &pRom->pbR3Alternate[iPage << GUEST_PAGE_SHIFT];
515 else
516#endif
517 rc = pgmPhysPageMapReadOnly(pVM, pPage, GCPhys, &pvPage);
518 if (RT_SUCCESS(rc))
519 memcpy(abPage, pvPage, GUEST_PAGE_SIZE);
520 }
521 else
522 RT_ZERO(abPage);
523 PGM_UNLOCK(pVM);
524 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc GCPhys=%RGp\n", rc, GCPhys), rc);
525
526 /* Save it. */
527 if (iPage > 0)
528 SSMR3PutU8(pSSM, PGM_STATE_REC_ROM_VIRGIN);
529 else
530 {
531 SSMR3PutU8(pSSM, PGM_STATE_REC_ROM_VIRGIN | PGM_STATE_REC_FLAG_ADDR);
532 SSMR3PutU8(pSSM, pRom->idSavedState);
533 SSMR3PutU32(pSSM, iPage);
534 }
535 SSMR3PutU8(pSSM, (uint8_t)enmProt);
536 rc = SSMR3PutMem(pSSM, abPage, GUEST_PAGE_SIZE);
537 if (RT_FAILURE(rc))
538 return rc;
539
540 /* Update state. */
541 PGM_LOCK_VOID(pVM);
542 pRom->aPages[iPage].LiveSave.u8Prot = (uint8_t)enmProt;
543 if (fLiveSave)
544 {
545 pVM->pgm.s.LiveSave.Rom.cDirtyPages--;
546 pVM->pgm.s.LiveSave.Rom.cReadyPages++;
547 pVM->pgm.s.LiveSave.cSavedPages++;
548 }
549 }
550 }
551 PGM_UNLOCK(pVM);
552 return VINF_SUCCESS;
553}
554
555
556/**
557 * Saves dirty pages in the shadowed ROM ranges.
558 *
559 * Used by pgmR3LiveExecPart2 and pgmR3SaveExecMemory.
560 *
561 * @returns VBox status code.
562 * @param pVM The cross context VM structure.
563 * @param pSSM The SSM handle.
564 * @param fLiveSave Whether it's a live save or not.
565 * @param fFinalPass Whether this is the final pass or not.
566 */
567static int pgmR3SaveShadowedRomPages(PVM pVM, PSSMHANDLE pSSM, bool fLiveSave, bool fFinalPass)
568{
569 /*
570 * The Shadowed ROMs.
571 *
572 * ASSUMES that the ROM ranges are fixed.
573 * ASSUMES that all the ROM ranges are mapped.
574 */
575 PGM_LOCK_VOID(pVM);
576 uint32_t const cRomRanges = RT_MIN(pVM->pgm.s.cRomRanges, RT_ELEMENTS(pVM->pgm.s.apRomRanges));
577 for (uint32_t idx = 0; idx < cRomRanges; idx++)
578 {
579 PPGMROMRANGE const pRom = pVM->pgm.s.apRomRanges[idx];
580 if (pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)
581 {
582 uint32_t const cPages = pRom->cb >> GUEST_PAGE_SHIFT;
583 uint32_t iPrevPage = cPages;
584 for (uint32_t iPage = 0; iPage < cPages; iPage++)
585 {
586 PPGMROMPAGE pRomPage = &pRom->aPages[iPage];
587 if ( !fLiveSave
588 || ( pRomPage->LiveSave.fDirty
589 && ( ( !pRomPage->LiveSave.fDirtiedRecently
590 && !pRomPage->LiveSave.fWrittenTo)
591 || fFinalPass
592 )
593 )
594 )
595 {
596 uint8_t abPage[GUEST_PAGE_SIZE];
597 PGMROMPROT enmProt = pRomPage->enmProt;
598 RTGCPHYS GCPhys = pRom->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT);
599 PPGMPAGE pPage = PGMROMPROT_IS_ROM(enmProt) ? &pRomPage->Shadow : pgmPhysGetPage(pVM, GCPhys);
600 bool fZero = PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_BALLOONED(pPage); Assert(!PGM_PAGE_IS_BALLOONED(pPage)); /* Shouldn't be ballooned. */
601 int rc = VINF_SUCCESS;
602 if (!fZero)
603 {
604 void const *pvPage;
605#ifdef VBOX_WITH_PGM_NEM_MODE
606 if (PGMROMPROT_IS_ROM(enmProt) && PGM_IS_IN_NEM_MODE(pVM))
607 pvPage = &pRom->pbR3Alternate[iPage << GUEST_PAGE_SHIFT];
608 else
609#endif
610 rc = pgmPhysPageMapReadOnly(pVM, pPage, GCPhys, &pvPage);
611 if (RT_SUCCESS(rc))
612 memcpy(abPage, pvPage, GUEST_PAGE_SIZE);
613 }
614 if (fLiveSave && RT_SUCCESS(rc))
615 {
616 pRomPage->LiveSave.u8Prot = (uint8_t)enmProt;
617 pRomPage->LiveSave.fDirty = false;
618 pVM->pgm.s.LiveSave.Rom.cReadyPages++;
619 pVM->pgm.s.LiveSave.Rom.cDirtyPages--;
620 pVM->pgm.s.LiveSave.cSavedPages++;
621 }
622 PGM_UNLOCK(pVM);
623 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc GCPhys=%RGp\n", rc, GCPhys), rc);
624
625 if (iPage - 1U == iPrevPage && iPage > 0)
626 SSMR3PutU8(pSSM, (fZero ? PGM_STATE_REC_ROM_SHW_ZERO : PGM_STATE_REC_ROM_SHW_RAW));
627 else
628 {
629 SSMR3PutU8(pSSM, (fZero ? PGM_STATE_REC_ROM_SHW_ZERO : PGM_STATE_REC_ROM_SHW_RAW) | PGM_STATE_REC_FLAG_ADDR);
630 SSMR3PutU8(pSSM, pRom->idSavedState);
631 SSMR3PutU32(pSSM, iPage);
632 }
633 rc = SSMR3PutU8(pSSM, (uint8_t)enmProt);
634 if (!fZero)
635 rc = SSMR3PutMem(pSSM, abPage, GUEST_PAGE_SIZE);
636 if (RT_FAILURE(rc))
637 return rc;
638
639 PGM_LOCK_VOID(pVM);
640 iPrevPage = iPage;
641 }
642 /*
643 * In the final pass, make sure the protection is in sync.
644 */
645 else if ( fFinalPass
646 && pRomPage->LiveSave.u8Prot != pRomPage->enmProt)
647 {
648 PGMROMPROT enmProt = pRomPage->enmProt;
649 pRomPage->LiveSave.u8Prot = (uint8_t)enmProt;
650 PGM_UNLOCK(pVM);
651
652 if (iPage - 1U == iPrevPage && iPage > 0)
653 SSMR3PutU8(pSSM, PGM_STATE_REC_ROM_PROT);
654 else
655 {
656 SSMR3PutU8(pSSM, PGM_STATE_REC_ROM_PROT | PGM_STATE_REC_FLAG_ADDR);
657 SSMR3PutU8(pSSM, pRom->idSavedState);
658 SSMR3PutU32(pSSM, iPage);
659 }
660 int rc = SSMR3PutU8(pSSM, (uint8_t)enmProt);
661 if (RT_FAILURE(rc))
662 return rc;
663
664 PGM_LOCK_VOID(pVM);
665 iPrevPage = iPage;
666 }
667 }
668 }
669 }
670 PGM_UNLOCK(pVM);
671 return VINF_SUCCESS;
672}
673
674
675/**
676 * Cleans up ROM pages after a live save.
677 *
678 * @param pVM The cross context VM structure.
679 */
680static void pgmR3DoneRomPages(PVM pVM)
681{
682 NOREF(pVM);
683}
684
685
686/**
687 * Prepares the MMIO2 pages for a live save.
688 *
689 * @returns VBox status code.
690 * @param pVM The cross context VM structure.
691 */
692static int pgmR3PrepMmio2Pages(PVM pVM)
693{
694 /*
695 * Initialize the live save tracking in the MMIO2 ranges.
696 * ASSUME nothing changes here.
697 */
698 PGM_LOCK_VOID(pVM);
699 uint32_t const cMmio2Ranges = RT_MIN(pVM->pgm.s.cMmio2Ranges, RT_ELEMENTS(pVM->pgm.s.aMmio2Ranges));
700 for (uint32_t idx = 0; idx < cMmio2Ranges; idx++)
701 {
702 PPGMREGMMIO2RANGE const pRegMmio2 = &pVM->pgm.s.aMmio2Ranges[idx];
703 PPGMRAMRANGE const pRamRange = pVM->pgm.s.apMmio2RamRanges[idx];
704 uint32_t const cPages = pRamRange->cb >> GUEST_PAGE_SHIFT;
705 PGM_UNLOCK(pVM);
706
707 PPGMLIVESAVEMMIO2PAGE paLSPages = (PPGMLIVESAVEMMIO2PAGE)MMR3HeapAllocZ(pVM, MM_TAG_PGM,
708 sizeof(PGMLIVESAVEMMIO2PAGE) * cPages);
709 if (!paLSPages)
710 return VERR_NO_MEMORY;
711 for (uint32_t iPage = 0; iPage < cPages; iPage++)
712 {
713 /* Initialize it as a dirty zero page. */
714 paLSPages[iPage].fDirty = true;
715 paLSPages[iPage].cUnchangedScans = 0;
716 paLSPages[iPage].fZero = true;
717 paLSPages[iPage].u32CrcH1 = PGM_STATE_CRC32_ZERO_HALF_PAGE;
718 paLSPages[iPage].u32CrcH2 = PGM_STATE_CRC32_ZERO_HALF_PAGE;
719 }
720
721 PGM_LOCK_VOID(pVM);
722 pRegMmio2->paLSPages = paLSPages;
723 pVM->pgm.s.LiveSave.Mmio2.cDirtyPages += cPages;
724 }
725 PGM_UNLOCK(pVM);
726 return VINF_SUCCESS;
727}
728
729
730/**
731 * Assigns IDs to the MMIO2 ranges and saves them.
732 *
733 * @returns VBox status code.
734 * @param pVM The cross context VM structure.
735 * @param pSSM Saved state handle.
736 */
737static int pgmR3SaveMmio2Ranges(PVM pVM, PSSMHANDLE pSSM)
738{
739 PGM_LOCK_VOID(pVM);
740 uint32_t const cMmio2Ranges = RT_MIN(pVM->pgm.s.cMmio2Ranges, RT_ELEMENTS(pVM->pgm.s.aMmio2Ranges));
741 for (uint32_t idx = 0; idx < cMmio2Ranges; idx++)
742 {
743 PPGMREGMMIO2RANGE const pRegMmio2 = &pVM->pgm.s.aMmio2Ranges[idx];
744 PPGMRAMRANGE const pRamRange = pVM->pgm.s.apMmio2RamRanges[idx];
745 uint8_t const idSavedState = (uint8_t)(idx + 1);
746 pRegMmio2->idSavedState = idSavedState;
747 SSMR3PutU8(pSSM, idSavedState);
748 SSMR3PutStrZ(pSSM, pRegMmio2->pDevInsR3->pReg->szName);
749 SSMR3PutU32(pSSM, pRegMmio2->pDevInsR3->iInstance);
750 SSMR3PutU8(pSSM, pRegMmio2->iRegion);
751 SSMR3PutStrZ(pSSM, pRamRange->pszDesc);
752 int rc = SSMR3PutGCPhys(pSSM, pRamRange->cb);
753 if (RT_FAILURE(rc))
754 break;
755 }
756 PGM_UNLOCK(pVM);
757 return SSMR3PutU8(pSSM, UINT8_MAX);
758}
759
760
761/**
762 * Loads the MMIO2 range ID assignments.
763 *
764 * @returns VBox status code.
765 *
766 * @param pVM The cross context VM structure.
767 * @param pSSM The saved state handle.
768 */
769static int pgmR3LoadMmio2Ranges(PVM pVM, PSSMHANDLE pSSM)
770{
771 PGM_LOCK_ASSERT_OWNER(pVM);
772
773 uint32_t const cMmio2Ranges = RT_MIN(pVM->pgm.s.cMmio2Ranges, RT_ELEMENTS(pVM->pgm.s.aMmio2Ranges));
774 for (uint32_t idx = 0; idx < cMmio2Ranges; idx++)
775 pVM->pgm.s.aMmio2Ranges[idx].idSavedState = UINT8_MAX;
776
777 for (;;)
778 {
779 /*
780 * Read the data.
781 */
782 uint8_t id;
783 int rc = SSMR3GetU8(pSSM, &id);
784 if (RT_FAILURE(rc))
785 return rc;
786 if (id == UINT8_MAX)
787 {
788 /*
789 * End of MMIO2 ranges. Check that all are accounted for.
790 */
791 for (uint32_t idx = 0; idx < cMmio2Ranges; idx++)
792 AssertLogRelMsg(pVM->pgm.s.aMmio2Ranges[idx].idSavedState != UINT8_MAX,
793 ("%s\n", pVM->pgm.s.apMmio2RamRanges[idx]->pszDesc));
794 return VINF_SUCCESS; /* the end */
795 }
796 AssertLogRelReturn(id != 0, VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
797
798 char szDevName[RT_SIZEOFMEMB(PDMDEVREG, szName)];
799 rc = SSMR3GetStrZ(pSSM, szDevName, sizeof(szDevName));
800 AssertLogRelRCReturn(rc, rc);
801
802 uint32_t uInstance;
803 SSMR3GetU32(pSSM, &uInstance);
804 uint8_t iRegion;
805 SSMR3GetU8(pSSM, &iRegion);
806
807 char szDesc[64];
808 rc = SSMR3GetStrZ(pSSM, szDesc, sizeof(szDesc));
809 AssertLogRelRCReturn(rc, rc);
810
811 RTGCPHYS cb;
812 rc = SSMR3GetGCPhys(pSSM, &cb);
813 AssertLogRelRCReturn(rc, rc);
814 AssertLogRelMsgReturn(!(cb & GUEST_PAGE_OFFSET_MASK), ("cb=%RGp %s\n", cb, szDesc), VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
815
816 /*
817 * Locate a matching MMIO2 range.
818 */
819 uint32_t idx;
820 for (idx = 0; idx < cMmio2Ranges; idx++)
821 {
822 PPGMREGMMIO2RANGE const pRegMmio2 = &pVM->pgm.s.aMmio2Ranges[idx];
823 if ( pRegMmio2->idSavedState == UINT8_MAX
824 && pRegMmio2->iRegion == iRegion
825 && pRegMmio2->pDevInsR3->iInstance == uInstance
826 && !strcmp(pRegMmio2->pDevInsR3->pReg->szName, szDevName))
827 {
828 pRegMmio2->idSavedState = id;
829 break;
830 }
831 }
832 if (idx >= cMmio2Ranges)
833 return SSMR3SetCfgError(pSSM, RT_SRC_POS, N_("Failed to locate a MMIO2 range called '%s' owned by %s/%u, region %d"),
834 szDesc, szDevName, uInstance, iRegion);
835
836 /*
837 * Validate the configuration, the size of the MMIO2 region should be
838 * the same.
839 */
840 PPGMRAMRANGE const pRamRange = pVM->pgm.s.apMmio2RamRanges[idx];
841 if (cb != pRamRange->cb)
842 {
843 LogRel(("PGM: MMIO2 region \"%s\" size mismatch: saved=%RGp config=%RGp\n", pRamRange->pszDesc, cb, pRamRange->cb));
844 if (cb > pRamRange->cb) /* bad idea? */
845 return SSMR3SetCfgError(pSSM, RT_SRC_POS, N_("MMIO2 region \"%s\" size mismatch: saved=%RGp config=%RGp"),
846 pRamRange->pszDesc, cb, pRamRange->cb);
847 }
848 } /* forever */
849}
850
851
852/**
853 * Scans one MMIO2 page.
854 *
855 * @returns True if changed, false if unchanged.
856 *
857 * @param pVM The cross context VM structure.
858 * @param pbPage The page bits.
859 * @param pLSPage The live save tracking structure for the page.
860 *
861 */
862DECLINLINE(bool) pgmR3ScanMmio2Page(PVM pVM, uint8_t const *pbPage, PPGMLIVESAVEMMIO2PAGE pLSPage)
863{
864 /*
865 * Special handling of zero pages.
866 */
867 bool const fZero = pLSPage->fZero;
868 if (fZero)
869 {
870 if (ASMMemIsZero(pbPage, GUEST_PAGE_SIZE))
871 {
872 /* Not modified. */
873 if (pLSPage->fDirty)
874 pLSPage->cUnchangedScans++;
875 return false;
876 }
877
878 pLSPage->fZero = false;
879 pLSPage->u32CrcH1 = RTCrc32(pbPage, GUEST_PAGE_SIZE / 2);
880 }
881 else
882 {
883 /*
884 * CRC the first half, if it doesn't match the page is dirty and
885 * we won't check the 2nd half (we'll do that next time).
886 */
887 uint32_t u32CrcH1 = RTCrc32(pbPage, GUEST_PAGE_SIZE / 2);
888 if (u32CrcH1 == pLSPage->u32CrcH1)
889 {
890 uint32_t u32CrcH2 = RTCrc32(pbPage + GUEST_PAGE_SIZE / 2, GUEST_PAGE_SIZE / 2);
891 if (u32CrcH2 == pLSPage->u32CrcH2)
892 {
893 /* Probably not modified. */
894 if (pLSPage->fDirty)
895 pLSPage->cUnchangedScans++;
896 return false;
897 }
898
899 pLSPage->u32CrcH2 = u32CrcH2;
900 }
901 else
902 {
903 pLSPage->u32CrcH1 = u32CrcH1;
904 if ( u32CrcH1 == PGM_STATE_CRC32_ZERO_HALF_PAGE
905 && ASMMemIsZero(pbPage, GUEST_PAGE_SIZE))
906 {
907 pLSPage->u32CrcH2 = PGM_STATE_CRC32_ZERO_HALF_PAGE;
908 pLSPage->fZero = true;
909 }
910 }
911 }
912
913 /* dirty page path */
914 pLSPage->cUnchangedScans = 0;
915 if (!pLSPage->fDirty)
916 {
917 pLSPage->fDirty = true;
918 pVM->pgm.s.LiveSave.Mmio2.cReadyPages--;
919 pVM->pgm.s.LiveSave.Mmio2.cDirtyPages++;
920 if (fZero)
921 pVM->pgm.s.LiveSave.Mmio2.cZeroPages--;
922 }
923 return true;
924}
925
926
927/**
928 * Scan for MMIO2 page modifications.
929 *
930 * @param pVM The cross context VM structure.
931 * @param uPass The pass number.
932 */
933static void pgmR3ScanMmio2Pages(PVM pVM, uint32_t uPass)
934{
935 /*
936 * Since this is a bit expensive we lower the scan rate after a little while.
937 */
938 if ( ( (uPass & 3) != 0
939 && uPass > 10)
940 || uPass == SSM_PASS_FINAL)
941 return;
942
943 PGM_LOCK_VOID(pVM); /* paranoia */
944 uint32_t const cMmio2Ranges = RT_MIN(pVM->pgm.s.cMmio2Ranges, RT_ELEMENTS(pVM->pgm.s.aMmio2Ranges));
945 for (uint32_t idx = 0; idx < cMmio2Ranges; idx++)
946 {
947 PPGMREGMMIO2RANGE const pRegMmio2 = &pVM->pgm.s.aMmio2Ranges[idx];
948 PPGMLIVESAVEMMIO2PAGE paLSPages = pRegMmio2->paLSPages;
949 uint32_t cPages = pVM->pgm.s.apMmio2RamRanges[idx]->cb >> GUEST_PAGE_SHIFT;
950 PGM_UNLOCK(pVM);
951
952 for (uint32_t iPage = 0; iPage < cPages; iPage++)
953 {
954 uint8_t const *pbPage = &pRegMmio2->pbR3[iPage * GUEST_PAGE_SIZE];
955 pgmR3ScanMmio2Page(pVM, pbPage, &paLSPages[iPage]);
956 }
957
958 PGM_LOCK_VOID(pVM);
959 }
960 PGM_UNLOCK(pVM);
961
962}
963
964
965/**
966 * Save quiescent MMIO2 pages.
967 *
968 * @returns VBox status code.
969 * @param pVM The cross context VM structure.
970 * @param pSSM The SSM handle.
971 * @param fLiveSave Whether it's a live save or not.
972 * @param uPass The pass number.
973 */
974static int pgmR3SaveMmio2Pages(PVM pVM, PSSMHANDLE pSSM, bool fLiveSave, uint32_t uPass)
975{
976 /** @todo implement live saving of MMIO2 pages. (Need some way of telling the
977 * device that we wish to know about changes.) */
978
979 int rc = VINF_SUCCESS;
980 if (uPass == SSM_PASS_FINAL)
981 {
982 /*
983 * The mop up round.
984 */
985 PGM_LOCK_VOID(pVM);
986 uint32_t const cMmio2Ranges = RT_MIN(pVM->pgm.s.cMmio2Ranges, RT_ELEMENTS(pVM->pgm.s.aMmio2Ranges));
987 for (uint32_t idx = 0; idx < cMmio2Ranges && RT_SUCCESS(rc); idx++)
988 {
989 PPGMREGMMIO2RANGE const pRegMmio2 = &pVM->pgm.s.aMmio2Ranges[idx];
990 PPGMRAMRANGE const pRamRange = pVM->pgm.s.apMmio2RamRanges[idx];
991 PPGMLIVESAVEMMIO2PAGE const paLSPages = pRegMmio2->paLSPages;
992 uint32_t const cPages = pRamRange->cb >> GUEST_PAGE_SHIFT;
993 uint8_t const *pbPage = pRamRange->pbR3;
994 uint32_t iPageLast = cPages;
995 for (uint32_t iPage = 0; iPage < cPages; iPage++, pbPage += GUEST_PAGE_SIZE)
996 {
997 uint8_t u8Type;
998 if (!fLiveSave)
999 u8Type = ASMMemIsZero(pbPage, GUEST_PAGE_SIZE) ? PGM_STATE_REC_MMIO2_ZERO : PGM_STATE_REC_MMIO2_RAW;
1000 else
1001 {
1002 /* Try figure if it's a clean page, compare the SHA-1 to be really sure. */
1003 if ( !paLSPages[iPage].fDirty
1004 && !pgmR3ScanMmio2Page(pVM, pbPage, &paLSPages[iPage]))
1005 {
1006 if (paLSPages[iPage].fZero)
1007 continue;
1008
1009 uint8_t abSha1Hash[RTSHA1_HASH_SIZE];
1010 RTSha1(pbPage, GUEST_PAGE_SIZE, abSha1Hash);
1011 if (!memcmp(abSha1Hash, paLSPages[iPage].abSha1Saved, sizeof(abSha1Hash)))
1012 continue;
1013 }
1014 u8Type = paLSPages[iPage].fZero ? PGM_STATE_REC_MMIO2_ZERO : PGM_STATE_REC_MMIO2_RAW;
1015 pVM->pgm.s.LiveSave.cSavedPages++;
1016 }
1017
1018 if (iPage != 0 && iPage == iPageLast + 1)
1019 rc = SSMR3PutU8(pSSM, u8Type);
1020 else
1021 {
1022 SSMR3PutU8(pSSM, u8Type | PGM_STATE_REC_FLAG_ADDR);
1023 SSMR3PutU8(pSSM, pRegMmio2->idSavedState);
1024 rc = SSMR3PutU32(pSSM, iPage);
1025 }
1026 if (u8Type == PGM_STATE_REC_MMIO2_RAW)
1027 rc = SSMR3PutMem(pSSM, pbPage, GUEST_PAGE_SIZE);
1028 if (RT_FAILURE(rc))
1029 break;
1030 iPageLast = iPage;
1031 }
1032 }
1033 PGM_UNLOCK(pVM);
1034 }
1035 /*
1036 * Reduce the rate after a little while since the current MMIO2 approach is
1037 * a bit expensive.
1038 * We position it two passes after the scan pass to avoid saving busy pages.
1039 */
1040 else if ( uPass <= 10
1041 || (uPass & 3) == 2)
1042 {
1043 PGM_LOCK_VOID(pVM);
1044 uint32_t const cMmio2Ranges = RT_MIN(pVM->pgm.s.cMmio2Ranges, RT_ELEMENTS(pVM->pgm.s.aMmio2Ranges));
1045 for (uint32_t idx = 0; idx < cMmio2Ranges && RT_SUCCESS(rc); idx++)
1046 {
1047 PPGMREGMMIO2RANGE const pRegMmio2 = &pVM->pgm.s.aMmio2Ranges[idx];
1048 PPGMRAMRANGE const pRamRange = pVM->pgm.s.apMmio2RamRanges[idx];
1049 PPGMLIVESAVEMMIO2PAGE const paLSPages = pRegMmio2->paLSPages;
1050 uint32_t const cPages = pRamRange->cb >> GUEST_PAGE_SHIFT;
1051 uint8_t const *pbPage = pRamRange->pbR3;
1052 uint32_t iPageLast = cPages;
1053 PGM_UNLOCK(pVM);
1054
1055 for (uint32_t iPage = 0; iPage < cPages; iPage++, pbPage += GUEST_PAGE_SIZE)
1056 {
1057 /* Skip clean pages and pages which hasn't quiesced. */
1058 if (!paLSPages[iPage].fDirty)
1059 continue;
1060 if (paLSPages[iPage].cUnchangedScans < 3)
1061 continue;
1062 if (pgmR3ScanMmio2Page(pVM, pbPage, &paLSPages[iPage]))
1063 continue;
1064
1065 /* Save it. */
1066 bool const fZero = paLSPages[iPage].fZero;
1067 uint8_t abPage[GUEST_PAGE_SIZE];
1068 if (!fZero)
1069 {
1070 memcpy(abPage, pbPage, GUEST_PAGE_SIZE);
1071 RTSha1(abPage, GUEST_PAGE_SIZE, paLSPages[iPage].abSha1Saved);
1072 }
1073
1074 uint8_t u8Type = paLSPages[iPage].fZero ? PGM_STATE_REC_MMIO2_ZERO : PGM_STATE_REC_MMIO2_RAW;
1075 if (iPage != 0 && iPage == iPageLast + 1)
1076 rc = SSMR3PutU8(pSSM, u8Type);
1077 else
1078 {
1079 SSMR3PutU8(pSSM, u8Type | PGM_STATE_REC_FLAG_ADDR);
1080 SSMR3PutU8(pSSM, pRegMmio2->idSavedState);
1081 rc = SSMR3PutU32(pSSM, iPage);
1082 }
1083 if (u8Type == PGM_STATE_REC_MMIO2_RAW)
1084 rc = SSMR3PutMem(pSSM, abPage, GUEST_PAGE_SIZE);
1085 if (RT_FAILURE(rc))
1086 break;
1087
1088 /* Housekeeping. */
1089 paLSPages[iPage].fDirty = false;
1090 pVM->pgm.s.LiveSave.Mmio2.cDirtyPages--;
1091 pVM->pgm.s.LiveSave.Mmio2.cReadyPages++;
1092 if (u8Type == PGM_STATE_REC_MMIO2_ZERO)
1093 pVM->pgm.s.LiveSave.Mmio2.cZeroPages++;
1094 pVM->pgm.s.LiveSave.cSavedPages++;
1095 iPageLast = iPage;
1096 }
1097
1098 PGM_LOCK_VOID(pVM);
1099 }
1100 PGM_UNLOCK(pVM);
1101 }
1102
1103 return rc;
1104}
1105
1106
1107/**
1108 * Cleans up MMIO2 pages after a live save.
1109 *
1110 * @param pVM The cross context VM structure.
1111 */
1112static void pgmR3DoneMmio2Pages(PVM pVM)
1113{
1114 /*
1115 * Free the tracking structures for the MMIO2 pages.
1116 * We do the freeing outside the lock in case the VM is running.
1117 */
1118 PGM_LOCK_VOID(pVM);
1119 uint32_t const cMmio2Ranges = RT_MIN(pVM->pgm.s.cMmio2Ranges, RT_ELEMENTS(pVM->pgm.s.aMmio2Ranges));
1120 for (uint32_t idx = 0; idx < cMmio2Ranges; idx++)
1121 {
1122 PPGMREGMMIO2RANGE const pRegMmio2 = &pVM->pgm.s.aMmio2Ranges[idx];
1123 void *pvMmio2ToFree = pRegMmio2->paLSPages;
1124 if (pvMmio2ToFree)
1125 {
1126 pRegMmio2->paLSPages = NULL;
1127 PGM_UNLOCK(pVM);
1128 MMR3HeapFree(pvMmio2ToFree);
1129 PGM_LOCK_VOID(pVM);
1130 }
1131 }
1132 PGM_UNLOCK(pVM);
1133}
1134
1135
1136/**
1137 * Prepares the RAM pages for a live save.
1138 *
1139 * @returns VBox status code.
1140 * @param pVM The cross context VM structure.
1141 */
1142static int pgmR3PrepRamPages(PVM pVM)
1143{
1144
1145 /*
1146 * Try allocating tracking structures for the ram ranges.
1147 *
1148 * To avoid lock contention, we leave the lock every time we're allocating
1149 * a new array. This means we'll have to ditch the allocation and start
1150 * all over again if the RAM range list changes in-between.
1151 *
1152 * Note! pgmR3SaveDone will always be called and it is therefore responsible
1153 * for cleaning up.
1154 */
1155 PGM_LOCK_VOID(pVM);
1156 uint32_t idRamRange;
1157 do
1158 {
1159 uint32_t const idRamRangeMax = RT_MIN(pVM->pgm.s.idRamRangeMax, RT_ELEMENTS(pVM->pgm.s.apRamRanges) - 1U);
1160 for (idRamRange = 0; idRamRange <= idRamRangeMax; idRamRange++)
1161 {
1162 PPGMRAMRANGE const pCur = pVM->pgm.s.apRamRanges[idRamRange];
1163 Assert(pCur || idRamRange == 0);
1164 if (!pCur) continue;
1165 Assert(pCur->idRange == idRamRange);
1166
1167 if ( !pCur->paLSPages
1168 && !PGM_RAM_RANGE_IS_AD_HOC(pCur))
1169 {
1170 uint32_t const idRamRangesGen = pVM->pgm.s.RamRangeUnion.idGeneration;
1171 uint32_t const cPages = pCur->cb >> GUEST_PAGE_SHIFT;
1172 PGM_UNLOCK(pVM);
1173 PPGMLIVESAVERAMPAGE paLSPages = (PPGMLIVESAVERAMPAGE)MMR3HeapAllocZ(pVM, MM_TAG_PGM, cPages * sizeof(PGMLIVESAVERAMPAGE));
1174 if (!paLSPages)
1175 return VERR_NO_MEMORY;
1176 PGM_LOCK_VOID(pVM);
1177 if (pVM->pgm.s.RamRangeUnion.idGeneration != idRamRangesGen)
1178 {
1179 PGM_UNLOCK(pVM);
1180 MMR3HeapFree(paLSPages);
1181 PGM_LOCK_VOID(pVM);
1182 break; /* try again */
1183 }
1184 pCur->paLSPages = paLSPages;
1185
1186 /*
1187 * Initialize the array.
1188 */
1189 uint32_t iPage = cPages;
1190 while (iPage-- > 0)
1191 {
1192 /** @todo yield critsect! (after moving this away from EMT0) */
1193 PCPGMPAGE pPage = &pCur->aPages[iPage];
1194 paLSPages[iPage].cDirtied = 0;
1195 paLSPages[iPage].fDirty = 1; /* everything is dirty at this time */
1196 paLSPages[iPage].fWriteMonitored = 0;
1197 paLSPages[iPage].fWriteMonitoredJustNow = 0;
1198 paLSPages[iPage].u2Reserved = 0;
1199 switch (PGM_PAGE_GET_TYPE(pPage))
1200 {
1201 case PGMPAGETYPE_RAM:
1202 if ( PGM_PAGE_IS_ZERO(pPage)
1203 || PGM_PAGE_IS_BALLOONED(pPage))
1204 {
1205 paLSPages[iPage].fZero = 1;
1206 paLSPages[iPage].fShared = 0;
1207#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1208 paLSPages[iPage].u32Crc = PGM_STATE_CRC32_ZERO_PAGE;
1209#endif
1210 }
1211 else if (PGM_PAGE_IS_SHARED(pPage))
1212 {
1213 paLSPages[iPage].fZero = 0;
1214 paLSPages[iPage].fShared = 1;
1215#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1216 paLSPages[iPage].u32Crc = UINT32_MAX;
1217#endif
1218 }
1219 else
1220 {
1221 paLSPages[iPage].fZero = 0;
1222 paLSPages[iPage].fShared = 0;
1223#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1224 paLSPages[iPage].u32Crc = UINT32_MAX;
1225#endif
1226 }
1227 paLSPages[iPage].fIgnore = 0;
1228 pVM->pgm.s.LiveSave.Ram.cDirtyPages++;
1229 break;
1230
1231 case PGMPAGETYPE_ROM_SHADOW:
1232 case PGMPAGETYPE_ROM:
1233 {
1234 paLSPages[iPage].fZero = 0;
1235 paLSPages[iPage].fShared = 0;
1236 paLSPages[iPage].fDirty = 0;
1237 paLSPages[iPage].fIgnore = 1;
1238#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1239 paLSPages[iPage].u32Crc = UINT32_MAX;
1240#endif
1241 pVM->pgm.s.LiveSave.cIgnoredPages++;
1242 break;
1243 }
1244
1245 default:
1246 AssertMsgFailed(("%R[pgmpage]", pPage));
1247 RT_FALL_THRU();
1248 case PGMPAGETYPE_MMIO2:
1249 case PGMPAGETYPE_MMIO2_ALIAS_MMIO:
1250 paLSPages[iPage].fZero = 0;
1251 paLSPages[iPage].fShared = 0;
1252 paLSPages[iPage].fDirty = 0;
1253 paLSPages[iPage].fIgnore = 1;
1254#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1255 paLSPages[iPage].u32Crc = UINT32_MAX;
1256#endif
1257 pVM->pgm.s.LiveSave.cIgnoredPages++;
1258 break;
1259
1260 case PGMPAGETYPE_MMIO:
1261 case PGMPAGETYPE_SPECIAL_ALIAS_MMIO:
1262 paLSPages[iPage].fZero = 0;
1263 paLSPages[iPage].fShared = 0;
1264 paLSPages[iPage].fDirty = 0;
1265 paLSPages[iPage].fIgnore = 1;
1266#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1267 paLSPages[iPage].u32Crc = UINT32_MAX;
1268#endif
1269 pVM->pgm.s.LiveSave.cIgnoredPages++;
1270 break;
1271 }
1272 }
1273 }
1274 }
1275 } while (idRamRange <= RT_MIN(pVM->pgm.s.idRamRangeMax, RT_ELEMENTS(pVM->pgm.s.apRamRanges) - 1U));
1276 PGM_UNLOCK(pVM);
1277
1278 return VINF_SUCCESS;
1279}
1280
1281
1282/**
1283 * Saves the RAM configuration.
1284 *
1285 * @returns VBox status code.
1286 * @param pVM The cross context VM structure.
1287 * @param pSSM The saved state handle.
1288 */
1289static int pgmR3SaveRamConfig(PVM pVM, PSSMHANDLE pSSM)
1290{
1291 uint32_t cbRamHole = 0;
1292 int rc = CFGMR3QueryU32Def(CFGMR3GetRoot(pVM), "RamHoleSize", &cbRamHole, MM_RAM_HOLE_SIZE_DEFAULT);
1293 AssertRCReturn(rc, rc);
1294
1295 uint64_t cbRam = 0;
1296 rc = CFGMR3QueryU64Def(CFGMR3GetRoot(pVM), "RamSize", &cbRam, 0);
1297 AssertRCReturn(rc, rc);
1298
1299 SSMR3PutU32(pSSM, cbRamHole);
1300 return SSMR3PutU64(pSSM, cbRam);
1301}
1302
1303
1304/**
1305 * Loads and verifies the RAM configuration.
1306 *
1307 * @returns VBox status code.
1308 * @param pVM The cross context VM structure.
1309 * @param pSSM The saved state handle.
1310 */
1311static int pgmR3LoadRamConfig(PVM pVM, PSSMHANDLE pSSM)
1312{
1313 uint32_t cbRamHoleCfg = 0;
1314 int rc = CFGMR3QueryU32Def(CFGMR3GetRoot(pVM), "RamHoleSize", &cbRamHoleCfg, MM_RAM_HOLE_SIZE_DEFAULT);
1315 AssertRCReturn(rc, rc);
1316
1317 uint64_t cbRamCfg = 0;
1318 rc = CFGMR3QueryU64Def(CFGMR3GetRoot(pVM), "RamSize", &cbRamCfg, 0);
1319 AssertRCReturn(rc, rc);
1320
1321 uint32_t cbRamHoleSaved;
1322 SSMR3GetU32(pSSM, &cbRamHoleSaved);
1323
1324 uint64_t cbRamSaved;
1325 rc = SSMR3GetU64(pSSM, &cbRamSaved);
1326 AssertRCReturn(rc, rc);
1327
1328 if ( cbRamHoleCfg != cbRamHoleSaved
1329 || cbRamCfg != cbRamSaved)
1330 return SSMR3SetCfgError(pSSM, RT_SRC_POS, N_("Ram config mismatch: saved=%RX64/%RX32 config=%RX64/%RX32 (RAM/Hole)"),
1331 cbRamSaved, cbRamHoleSaved, cbRamCfg, cbRamHoleCfg);
1332 return VINF_SUCCESS;
1333}
1334
1335#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1336
1337/**
1338 * Calculates the CRC-32 for a RAM page and updates the live save page tracking
1339 * info with it.
1340 *
1341 * @param pVM The cross context VM structure.
1342 * @param pCur The current RAM range.
1343 * @param paLSPages The current array of live save page tracking
1344 * structures.
1345 * @param iPage The page index.
1346 */
1347static void pgmR3StateCalcCrc32ForRamPage(PVM pVM, PPGMRAMRANGE pCur, PPGMLIVESAVERAMPAGE paLSPages, uint32_t iPage)
1348{
1349 RTGCPHYS GCPhys = pCur->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT);
1350 PGMPAGEMAPLOCK PgMpLck;
1351 void const *pvPage;
1352 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, &pCur->aPages[iPage], GCPhys, &pvPage, &PgMpLck);
1353 if (RT_SUCCESS(rc))
1354 {
1355 paLSPages[iPage].u32Crc = RTCrc32(pvPage, GUEST_PAGE_SIZE);
1356 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
1357 }
1358 else
1359 paLSPages[iPage].u32Crc = UINT32_MAX; /* Invalid */
1360}
1361
1362
1363/**
1364 * Verifies the CRC-32 for a page given it's raw bits.
1365 *
1366 * @param pvPage The page bits.
1367 * @param pCur The current RAM range.
1368 * @param paLSPages The current array of live save page tracking
1369 * structures.
1370 * @param iPage The page index.
1371 */
1372static void pgmR3StateVerifyCrc32ForPage(void const *pvPage, PPGMRAMRANGE pCur, PPGMLIVESAVERAMPAGE paLSPages, uint32_t iPage, const char *pszWhere)
1373{
1374 if (paLSPages[iPage].u32Crc != UINT32_MAX)
1375 {
1376 uint32_t u32Crc = RTCrc32(pvPage, GUEST_PAGE_SIZE);
1377 Assert( ( !PGM_PAGE_IS_ZERO(&pCur->aPages[iPage])
1378 && !PGM_PAGE_IS_BALLOONED(&pCur->aPages[iPage]))
1379 || u32Crc == PGM_STATE_CRC32_ZERO_PAGE);
1380 AssertMsg(paLSPages[iPage].u32Crc == u32Crc,
1381 ("%08x != %08x for %RGp %R[pgmpage] %s\n", paLSPages[iPage].u32Crc, u32Crc,
1382 pCur->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT), &pCur->aPages[iPage], pszWhere));
1383 }
1384}
1385
1386
1387/**
1388 * Verifies the CRC-32 for a RAM page.
1389 *
1390 * @param pVM The cross context VM structure.
1391 * @param pCur The current RAM range.
1392 * @param paLSPages The current array of live save page tracking
1393 * structures.
1394 * @param iPage The page index.
1395 */
1396static void pgmR3StateVerifyCrc32ForRamPage(PVM pVM, PPGMRAMRANGE pCur, PPGMLIVESAVERAMPAGE paLSPages, uint32_t iPage, const char *pszWhere)
1397{
1398 if (paLSPages[iPage].u32Crc != UINT32_MAX)
1399 {
1400 RTGCPHYS GCPhys = pCur->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT);
1401 PGMPAGEMAPLOCK PgMpLck;
1402 void const *pvPage;
1403 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, &pCur->aPages[iPage], GCPhys, &pvPage, &PgMpLck);
1404 if (RT_SUCCESS(rc))
1405 {
1406 pgmR3StateVerifyCrc32ForPage(pvPage, pCur, paLSPages, iPage, pszWhere);
1407 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
1408 }
1409 }
1410}
1411
1412#endif /* PGMLIVESAVERAMPAGE_WITH_CRC32 */
1413
1414/**
1415 * Scan for RAM page modifications and reprotect them.
1416 *
1417 * @param pVM The cross context VM structure.
1418 * @param fFinalPass Whether this is the final pass or not.
1419 */
1420static void pgmR3ScanRamPages(PVM pVM, bool fFinalPass)
1421{
1422 /*
1423 * The RAM.
1424 */
1425 RTGCPHYS GCPhysCur = 0;
1426 uint32_t idxLookup;
1427 uint32_t cLookupEntries;
1428 PGM_LOCK_VOID(pVM);
1429 do
1430 {
1431 PGM::PGMRAMRANGEGENANDLOOKUPCOUNT const RamRangeUnion = pVM->pgm.s.RamRangeUnion;
1432 Assert(pVM->pgm.s.RamRangeUnion.cLookupEntries < RT_ELEMENTS(pVM->pgm.s.aRamRangeLookup));
1433 cLookupEntries = pVM->pgm.s.RamRangeUnion.cLookupEntries;
1434 for (idxLookup = 0; idxLookup < cLookupEntries; idxLookup++)
1435 {
1436 uint32_t const idRamRange = PGMRAMRANGELOOKUPENTRY_GET_ID(pVM->pgm.s.aRamRangeLookup[idxLookup]);
1437 AssertContinue(idRamRange < RT_ELEMENTS(pVM->pgm.s.apRamRanges));
1438 PPGMRAMRANGE const pCur = pVM->pgm.s.apRamRanges[idRamRange];
1439 AssertContinue(pCur);
1440 Assert(pCur->GCPhys == PGMRAMRANGELOOKUPENTRY_GET_FIRST(pVM->pgm.s.aRamRangeLookup[idxLookup]));
1441
1442 if ( pCur->GCPhysLast > GCPhysCur
1443 && !PGM_RAM_RANGE_IS_AD_HOC(pCur))
1444 {
1445 PPGMLIVESAVERAMPAGE paLSPages = pCur->paLSPages;
1446 uint32_t cPages = pCur->cb >> GUEST_PAGE_SHIFT;
1447 uint32_t iPage = GCPhysCur <= pCur->GCPhys ? 0 : (GCPhysCur - pCur->GCPhys) >> GUEST_PAGE_SHIFT;
1448 GCPhysCur = 0;
1449 for (; iPage < cPages; iPage++)
1450 {
1451 /* Do yield first. */
1452 if ( !fFinalPass
1453#ifndef PGMLIVESAVERAMPAGE_WITH_CRC32
1454 && (iPage & 0x7ff) == 0x100
1455#endif
1456 && PDMR3CritSectYield(pVM, &pVM->pgm.s.CritSectX)
1457 && pVM->pgm.s.RamRangeUnion.u64Combined != RamRangeUnion.u64Combined)
1458 {
1459 GCPhysCur = pCur->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT);
1460 break; /* restart */
1461 }
1462
1463 /* Skip already ignored pages. */
1464 if (paLSPages[iPage].fIgnore)
1465 continue;
1466
1467 if (RT_LIKELY(PGM_PAGE_GET_TYPE(&pCur->aPages[iPage]) == PGMPAGETYPE_RAM))
1468 {
1469 /*
1470 * A RAM page.
1471 */
1472 switch (PGM_PAGE_GET_STATE(&pCur->aPages[iPage]))
1473 {
1474 case PGM_PAGE_STATE_ALLOCATED:
1475 /** @todo Optimize this: Don't always re-enable write
1476 * monitoring if the page is known to be very busy. */
1477 if (PGM_PAGE_IS_WRITTEN_TO(&pCur->aPages[iPage]))
1478 {
1479 AssertMsg(paLSPages[iPage].fWriteMonitored,
1480 ("%RGp %R[pgmpage]\n", pCur->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT), &pCur->aPages[iPage]));
1481 PGM_PAGE_CLEAR_WRITTEN_TO(pVM, &pCur->aPages[iPage]);
1482 Assert(pVM->pgm.s.cWrittenToPages > 0);
1483 pVM->pgm.s.cWrittenToPages--;
1484 }
1485 else
1486 {
1487 AssertMsg(!paLSPages[iPage].fWriteMonitored,
1488 ("%RGp %R[pgmpage]\n", pCur->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT), &pCur->aPages[iPage]));
1489 pVM->pgm.s.LiveSave.Ram.cMonitoredPages++;
1490 }
1491
1492 if (!paLSPages[iPage].fDirty)
1493 {
1494 pVM->pgm.s.LiveSave.Ram.cReadyPages--;
1495 if (paLSPages[iPage].fZero)
1496 pVM->pgm.s.LiveSave.Ram.cZeroPages--;
1497 pVM->pgm.s.LiveSave.Ram.cDirtyPages++;
1498 if (++paLSPages[iPage].cDirtied > PGMLIVSAVEPAGE_MAX_DIRTIED)
1499 paLSPages[iPage].cDirtied = PGMLIVSAVEPAGE_MAX_DIRTIED;
1500 }
1501
1502 pgmPhysPageWriteMonitor(pVM, &pCur->aPages[iPage],
1503 pCur->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT));
1504 paLSPages[iPage].fWriteMonitored = 1;
1505 paLSPages[iPage].fWriteMonitoredJustNow = 1;
1506 paLSPages[iPage].fDirty = 1;
1507 paLSPages[iPage].fZero = 0;
1508 paLSPages[iPage].fShared = 0;
1509#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1510 paLSPages[iPage].u32Crc = UINT32_MAX; /* invalid */
1511#endif
1512 break;
1513
1514 case PGM_PAGE_STATE_WRITE_MONITORED:
1515 Assert(paLSPages[iPage].fWriteMonitored);
1516 if (PGM_PAGE_GET_WRITE_LOCKS(&pCur->aPages[iPage]) == 0)
1517 {
1518#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1519 if (paLSPages[iPage].fWriteMonitoredJustNow)
1520 pgmR3StateCalcCrc32ForRamPage(pVM, pCur, paLSPages, iPage);
1521 else
1522 pgmR3StateVerifyCrc32ForRamPage(pVM, pCur, paLSPages, iPage, "scan");
1523#endif
1524 paLSPages[iPage].fWriteMonitoredJustNow = 0;
1525 }
1526 else
1527 {
1528 paLSPages[iPage].fWriteMonitoredJustNow = 1;
1529#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1530 paLSPages[iPage].u32Crc = UINT32_MAX; /* invalid */
1531#endif
1532 if (!paLSPages[iPage].fDirty)
1533 {
1534 pVM->pgm.s.LiveSave.Ram.cReadyPages--;
1535 pVM->pgm.s.LiveSave.Ram.cDirtyPages++;
1536 if (++paLSPages[iPage].cDirtied > PGMLIVSAVEPAGE_MAX_DIRTIED)
1537 paLSPages[iPage].cDirtied = PGMLIVSAVEPAGE_MAX_DIRTIED;
1538 }
1539 }
1540 break;
1541
1542 case PGM_PAGE_STATE_ZERO:
1543 case PGM_PAGE_STATE_BALLOONED:
1544 if (!paLSPages[iPage].fZero)
1545 {
1546 if (!paLSPages[iPage].fDirty)
1547 {
1548 paLSPages[iPage].fDirty = 1;
1549 pVM->pgm.s.LiveSave.Ram.cReadyPages--;
1550 pVM->pgm.s.LiveSave.Ram.cDirtyPages++;
1551 }
1552 paLSPages[iPage].fZero = 1;
1553 paLSPages[iPage].fShared = 0;
1554#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1555 paLSPages[iPage].u32Crc = PGM_STATE_CRC32_ZERO_PAGE;
1556#endif
1557 }
1558 break;
1559
1560 case PGM_PAGE_STATE_SHARED:
1561 if (!paLSPages[iPage].fShared)
1562 {
1563 if (!paLSPages[iPage].fDirty)
1564 {
1565 paLSPages[iPage].fDirty = 1;
1566 pVM->pgm.s.LiveSave.Ram.cReadyPages--;
1567 if (paLSPages[iPage].fZero)
1568 pVM->pgm.s.LiveSave.Ram.cZeroPages--;
1569 pVM->pgm.s.LiveSave.Ram.cDirtyPages++;
1570 }
1571 paLSPages[iPage].fZero = 0;
1572 paLSPages[iPage].fShared = 1;
1573#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1574 pgmR3StateCalcCrc32ForRamPage(pVM, pCur, paLSPages, iPage);
1575#endif
1576 }
1577 break;
1578 }
1579 }
1580 else
1581 {
1582 /*
1583 * All other types => Ignore the page.
1584 */
1585 Assert(!paLSPages[iPage].fIgnore); /* skipped before switch */
1586 paLSPages[iPage].fIgnore = 1;
1587 if (paLSPages[iPage].fWriteMonitored)
1588 {
1589 /** @todo this doesn't hold water when we start monitoring MMIO2 and ROM shadow
1590 * pages! */
1591 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(&pCur->aPages[iPage]) == PGM_PAGE_STATE_WRITE_MONITORED))
1592 {
1593 AssertMsgFailed(("%R[pgmpage]", &pCur->aPages[iPage])); /* shouldn't happen. */
1594 PGM_PAGE_SET_STATE(pVM, &pCur->aPages[iPage], PGM_PAGE_STATE_ALLOCATED);
1595 Assert(pVM->pgm.s.cMonitoredPages > 0);
1596 pVM->pgm.s.cMonitoredPages--;
1597 }
1598 if (PGM_PAGE_IS_WRITTEN_TO(&pCur->aPages[iPage]))
1599 {
1600 PGM_PAGE_CLEAR_WRITTEN_TO(pVM, &pCur->aPages[iPage]);
1601 Assert(pVM->pgm.s.cWrittenToPages > 0);
1602 pVM->pgm.s.cWrittenToPages--;
1603 }
1604 pVM->pgm.s.LiveSave.Ram.cMonitoredPages--;
1605 }
1606
1607 /** @todo the counting doesn't quite work out here. fix later? */
1608 if (paLSPages[iPage].fDirty)
1609 pVM->pgm.s.LiveSave.Ram.cDirtyPages--;
1610 else
1611 {
1612 pVM->pgm.s.LiveSave.Ram.cReadyPages--;
1613 if (paLSPages[iPage].fZero)
1614 pVM->pgm.s.LiveSave.Ram.cZeroPages--;
1615 }
1616 pVM->pgm.s.LiveSave.cIgnoredPages++;
1617 }
1618 } /* for each page in range */
1619
1620 if (GCPhysCur != 0)
1621 break; /* Yield + ramrange change */
1622 GCPhysCur = pCur->GCPhysLast;
1623 }
1624 } /* for each range */
1625
1626 /* We must use the starting lookup count here to determine whether we've
1627 been thru all or not, since using the current count could lead us to
1628 skip the final range if one was umapped while we yielded the lock. */
1629 } while (idxLookup < cLookupEntries);
1630 PGM_UNLOCK(pVM);
1631}
1632
1633
1634/**
1635 * Save quiescent RAM pages.
1636 *
1637 * @returns VBox status code.
1638 * @param pVM The cross context VM structure.
1639 * @param pSSM The SSM handle.
1640 * @param fLiveSave Whether it's a live save or not.
1641 * @param uPass The pass number.
1642 */
1643static int pgmR3SaveRamPages(PVM pVM, PSSMHANDLE pSSM, bool fLiveSave, uint32_t uPass)
1644{
1645 NOREF(fLiveSave);
1646
1647 /*
1648 * The RAM.
1649 */
1650 RTGCPHYS GCPhysLast = NIL_RTGCPHYS;
1651 RTGCPHYS GCPhysCur = 0;
1652 uint32_t idxLookup;
1653 uint32_t cRamRangeLookupEntries;
1654
1655 PGM_LOCK_VOID(pVM);
1656 do
1657 {
1658 uint32_t const idRamRangesGen = pVM->pgm.s.RamRangeUnion.idGeneration;
1659 cRamRangeLookupEntries = RT_MIN(pVM->pgm.s.RamRangeUnion.cLookupEntries, RT_ELEMENTS(pVM->pgm.s.aRamRangeLookup));
1660 for (idxLookup = 0; idxLookup < cRamRangeLookupEntries; idxLookup++)
1661 {
1662 uint32_t const idRamRange = PGMRAMRANGELOOKUPENTRY_GET_ID(pVM->pgm.s.aRamRangeLookup[idxLookup]);
1663 AssertContinue(idRamRange < RT_ELEMENTS(pVM->pgm.s.apRamRanges));
1664 PPGMRAMRANGE const pCur = pVM->pgm.s.apRamRanges[idRamRange];
1665 AssertContinue(pCur);
1666 Assert(pCur->GCPhys == PGMRAMRANGELOOKUPENTRY_GET_FIRST(pVM->pgm.s.aRamRangeLookup[idxLookup]));
1667
1668 if ( pCur->GCPhysLast > GCPhysCur
1669 && !PGM_RAM_RANGE_IS_AD_HOC(pCur))
1670 {
1671 PPGMLIVESAVERAMPAGE paLSPages = pCur->paLSPages;
1672 uint32_t cPages = pCur->cb >> GUEST_PAGE_SHIFT;
1673 uint32_t iPage = GCPhysCur <= pCur->GCPhys ? 0 : (GCPhysCur - pCur->GCPhys) >> GUEST_PAGE_SHIFT;
1674 GCPhysCur = 0;
1675 for (; iPage < cPages; iPage++)
1676 {
1677 /* Do yield first. */
1678 if ( uPass != SSM_PASS_FINAL
1679 && (iPage & 0x7ff) == 0x100
1680 && PDMR3CritSectYield(pVM, &pVM->pgm.s.CritSectX)
1681 && pVM->pgm.s.RamRangeUnion.idGeneration != idRamRangesGen)
1682 {
1683 GCPhysCur = pCur->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT);
1684 break; /* restart */
1685 }
1686
1687 PPGMPAGE pCurPage = &pCur->aPages[iPage];
1688
1689 /*
1690 * Only save pages that haven't changed since last scan and are dirty.
1691 */
1692 if ( uPass != SSM_PASS_FINAL
1693 && paLSPages)
1694 {
1695 if (!paLSPages[iPage].fDirty)
1696 continue;
1697 if (paLSPages[iPage].fWriteMonitoredJustNow)
1698 continue;
1699 if (paLSPages[iPage].fIgnore)
1700 continue;
1701 if (PGM_PAGE_GET_TYPE(pCurPage) != PGMPAGETYPE_RAM) /* in case of recent remappings */
1702 continue;
1703 if ( PGM_PAGE_GET_STATE(pCurPage)
1704 != ( paLSPages[iPage].fZero
1705 ? PGM_PAGE_STATE_ZERO
1706 : paLSPages[iPage].fShared
1707 ? PGM_PAGE_STATE_SHARED
1708 : PGM_PAGE_STATE_WRITE_MONITORED))
1709 continue;
1710 if (PGM_PAGE_GET_WRITE_LOCKS(&pCur->aPages[iPage]) > 0)
1711 continue;
1712 }
1713 else
1714 {
1715 if ( paLSPages
1716 && !paLSPages[iPage].fDirty
1717 && !paLSPages[iPage].fIgnore)
1718 {
1719#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1720 if (PGM_PAGE_GET_TYPE(pCurPage) != PGMPAGETYPE_RAM)
1721 pgmR3StateVerifyCrc32ForRamPage(pVM, pCur, paLSPages, iPage, "save#1");
1722#endif
1723 continue;
1724 }
1725 if (PGM_PAGE_GET_TYPE(pCurPage) != PGMPAGETYPE_RAM)
1726 continue;
1727 }
1728
1729 /*
1730 * Do the saving outside the PGM critsect since SSM may block on I/O.
1731 */
1732 int rc;
1733 RTGCPHYS GCPhys = pCur->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT);
1734 bool fZero = PGM_PAGE_IS_ZERO(pCurPage);
1735 bool fBallooned = PGM_PAGE_IS_BALLOONED(pCurPage);
1736 bool fSkipped = false;
1737
1738 if (!fZero && !fBallooned)
1739 {
1740 /*
1741 * Copy the page and then save it outside the lock (since any
1742 * SSM call may block).
1743 */
1744 uint8_t abPage[GUEST_PAGE_SIZE];
1745 PGMPAGEMAPLOCK PgMpLck;
1746 void const *pvPage;
1747 rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pCurPage, GCPhys, &pvPage, &PgMpLck);
1748 if (RT_SUCCESS(rc))
1749 {
1750 memcpy(abPage, pvPage, GUEST_PAGE_SIZE);
1751#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1752 if (paLSPages)
1753 pgmR3StateVerifyCrc32ForPage(abPage, pCur, paLSPages, iPage, "save#3");
1754#endif
1755 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
1756 }
1757 PGM_UNLOCK(pVM);
1758 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc GCPhys=%RGp\n", rc, GCPhys), rc);
1759
1760 /* Try save some memory when restoring. */
1761 if (!ASMMemIsZero(pvPage, GUEST_PAGE_SIZE))
1762 {
1763 if (GCPhys == GCPhysLast + GUEST_PAGE_SIZE)
1764 SSMR3PutU8(pSSM, PGM_STATE_REC_RAM_RAW);
1765 else
1766 {
1767 SSMR3PutU8(pSSM, PGM_STATE_REC_RAM_RAW | PGM_STATE_REC_FLAG_ADDR);
1768 SSMR3PutGCPhys(pSSM, GCPhys);
1769 }
1770 rc = SSMR3PutMem(pSSM, abPage, GUEST_PAGE_SIZE);
1771 }
1772 else
1773 {
1774 if (GCPhys == GCPhysLast + GUEST_PAGE_SIZE)
1775 rc = SSMR3PutU8(pSSM, PGM_STATE_REC_RAM_ZERO);
1776 else
1777 {
1778 SSMR3PutU8(pSSM, PGM_STATE_REC_RAM_ZERO | PGM_STATE_REC_FLAG_ADDR);
1779 rc = SSMR3PutGCPhys(pSSM, GCPhys);
1780 }
1781 }
1782 }
1783 else
1784 {
1785 /*
1786 * Dirty zero or ballooned page.
1787 */
1788#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1789 if (paLSPages)
1790 pgmR3StateVerifyCrc32ForRamPage(pVM, pCur, paLSPages, iPage, "save#2");
1791#endif
1792 PGM_UNLOCK(pVM);
1793
1794 uint8_t u8RecType = fBallooned ? PGM_STATE_REC_RAM_BALLOONED : PGM_STATE_REC_RAM_ZERO;
1795 if (GCPhys == GCPhysLast + GUEST_PAGE_SIZE)
1796 rc = SSMR3PutU8(pSSM, u8RecType);
1797 else
1798 {
1799 SSMR3PutU8(pSSM, u8RecType | PGM_STATE_REC_FLAG_ADDR);
1800 rc = SSMR3PutGCPhys(pSSM, GCPhys);
1801 }
1802 }
1803 if (RT_FAILURE(rc))
1804 return rc;
1805
1806 PGM_LOCK_VOID(pVM);
1807 if (!fSkipped)
1808 GCPhysLast = GCPhys;
1809 if (paLSPages)
1810 {
1811 paLSPages[iPage].fDirty = 0;
1812 pVM->pgm.s.LiveSave.Ram.cReadyPages++;
1813 if (fZero)
1814 pVM->pgm.s.LiveSave.Ram.cZeroPages++;
1815 pVM->pgm.s.LiveSave.Ram.cDirtyPages--;
1816 pVM->pgm.s.LiveSave.cSavedPages++;
1817 }
1818 if (idRamRangesGen != pVM->pgm.s.RamRangeUnion.idGeneration)
1819 {
1820 GCPhysCur = GCPhys | GUEST_PAGE_OFFSET_MASK;
1821 break; /* restart */
1822 }
1823
1824 } /* for each page in range */
1825
1826 if (GCPhysCur != 0)
1827 break; /* Yield + ramrange change */
1828 GCPhysCur = pCur->GCPhysLast;
1829 }
1830 } /* for each range */
1831
1832 /* We must use the starting lookup count here to determine whether we've
1833 been thru all or not, since using the current count could lead us to
1834 skip the final range if one was umapped while we yielded the lock. */
1835 } while (idxLookup < cRamRangeLookupEntries);
1836
1837 PGM_UNLOCK(pVM);
1838
1839 return VINF_SUCCESS;
1840}
1841
1842
1843/**
1844 * Cleans up RAM pages after a live save.
1845 *
1846 * @param pVM The cross context VM structure.
1847 */
1848static void pgmR3DoneRamPages(PVM pVM)
1849{
1850 /*
1851 * Free the tracking arrays and disable write monitoring.
1852 *
1853 * Play nice with the PGM lock in case we're called while the VM is still
1854 * running. This means we have to delay the freeing since we wish to use
1855 * paLSPages as an indicator of which RAM ranges which we need to scan for
1856 * write monitored pages.
1857 */
1858 void *pvToFree = NULL;
1859 uint32_t cMonitoredPages = 0;
1860 uint32_t idRamRangeMax;
1861 uint32_t idRamRange;
1862 PGM_LOCK_VOID(pVM);
1863 do
1864 {
1865 idRamRangeMax = RT_MIN(pVM->pgm.s.idRamRangeMax, RT_ELEMENTS(pVM->pgm.s.apRamRanges) - 1U);
1866 for (idRamRange = 0; idRamRange <= idRamRangeMax; idRamRange++)
1867 {
1868 PPGMRAMRANGE const pCur = pVM->pgm.s.apRamRanges[idRamRange];
1869 Assert(pCur || idRamRange == 0);
1870 if (!pCur) continue;
1871 Assert(pCur->idRange == idRamRange);
1872
1873 if (pCur->paLSPages)
1874 {
1875 if (pvToFree)
1876 {
1877 uint32_t const idRamRangesGen = pVM->pgm.s.RamRangeUnion.idGeneration;
1878 PGM_UNLOCK(pVM);
1879 MMR3HeapFree(pvToFree);
1880 pvToFree = NULL;
1881 PGM_LOCK_VOID(pVM);
1882 if (idRamRangesGen != pVM->pgm.s.RamRangeUnion.idGeneration)
1883 break; /* start over again. */
1884 }
1885
1886 pvToFree = pCur->paLSPages;
1887 pCur->paLSPages = NULL;
1888
1889 uint32_t iPage = pCur->cb >> GUEST_PAGE_SHIFT;
1890 while (iPage--)
1891 {
1892 PPGMPAGE pPage = &pCur->aPages[iPage];
1893 PGM_PAGE_CLEAR_WRITTEN_TO(pVM, pPage);
1894 if (PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED)
1895 {
1896 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
1897 cMonitoredPages++;
1898 }
1899 }
1900 }
1901 }
1902 } while (idRamRange <= idRamRangeMax);
1903
1904 Assert(pVM->pgm.s.cMonitoredPages >= cMonitoredPages);
1905 if (pVM->pgm.s.cMonitoredPages < cMonitoredPages)
1906 pVM->pgm.s.cMonitoredPages = 0;
1907 else
1908 pVM->pgm.s.cMonitoredPages -= cMonitoredPages;
1909
1910 PGM_UNLOCK(pVM);
1911
1912 MMR3HeapFree(pvToFree);
1913 pvToFree = NULL;
1914}
1915
1916
1917/**
1918 * @callback_method_impl{FNSSMINTLIVEEXEC}
1919 */
1920static DECLCALLBACK(int) pgmR3LiveExec(PVM pVM, PSSMHANDLE pSSM, uint32_t uPass)
1921{
1922 int rc;
1923
1924 /*
1925 * Save the MMIO2 and ROM range IDs in pass 0.
1926 */
1927 if (uPass == 0)
1928 {
1929 rc = pgmR3SaveRamConfig(pVM, pSSM);
1930 if (RT_FAILURE(rc))
1931 return rc;
1932 rc = pgmR3SaveRomRanges(pVM, pSSM);
1933 if (RT_FAILURE(rc))
1934 return rc;
1935 rc = pgmR3SaveMmio2Ranges(pVM, pSSM);
1936 if (RT_FAILURE(rc))
1937 return rc;
1938 }
1939 /*
1940 * Reset the page-per-second estimate to avoid inflation by the initial
1941 * load of zero pages. pgmR3LiveVote ASSUMES this is done at pass 7.
1942 */
1943 else if (uPass == 7)
1944 {
1945 pVM->pgm.s.LiveSave.cSavedPages = 0;
1946 pVM->pgm.s.LiveSave.uSaveStartNS = RTTimeNanoTS();
1947 }
1948
1949 /*
1950 * Do the scanning.
1951 */
1952 pgmR3ScanRomPages(pVM);
1953 pgmR3ScanMmio2Pages(pVM, uPass);
1954 pgmR3ScanRamPages(pVM, false /*fFinalPass*/);
1955#ifndef VBOX_WITH_ONLY_PGM_NEM_MODE
1956 pgmR3PoolClearAll(pVM, true /*fFlushRemTlb*/); /** @todo this could perhaps be optimized a bit. */
1957#endif
1958
1959 /*
1960 * Save the pages.
1961 */
1962 if (uPass == 0)
1963 rc = pgmR3SaveRomVirginPages( pVM, pSSM, true /*fLiveSave*/);
1964 else
1965 rc = VINF_SUCCESS;
1966 if (RT_SUCCESS(rc))
1967 rc = pgmR3SaveShadowedRomPages(pVM, pSSM, true /*fLiveSave*/, false /*fFinalPass*/);
1968 if (RT_SUCCESS(rc))
1969 rc = pgmR3SaveMmio2Pages( pVM, pSSM, true /*fLiveSave*/, uPass);
1970 if (RT_SUCCESS(rc))
1971 rc = pgmR3SaveRamPages( pVM, pSSM, true /*fLiveSave*/, uPass);
1972 SSMR3PutU8(pSSM, PGM_STATE_REC_END); /* (Ignore the rc, SSM takes care of it.) */
1973
1974 return rc;
1975}
1976
1977
1978/**
1979 * @callback_method_impl{FNSSMINTLIVEVOTE}
1980 */
1981static DECLCALLBACK(int) pgmR3LiveVote(PVM pVM, PSSMHANDLE pSSM, uint32_t uPass)
1982{
1983 /*
1984 * Update and calculate parameters used in the decision making.
1985 */
1986 const uint32_t cHistoryEntries = RT_ELEMENTS(pVM->pgm.s.LiveSave.acDirtyPagesHistory);
1987
1988 /* update history. */
1989 PGM_LOCK_VOID(pVM);
1990 uint32_t const cWrittenToPages = pVM->pgm.s.cWrittenToPages;
1991 PGM_UNLOCK(pVM);
1992 uint32_t const cDirtyNow = pVM->pgm.s.LiveSave.Rom.cDirtyPages
1993 + pVM->pgm.s.LiveSave.Mmio2.cDirtyPages
1994 + pVM->pgm.s.LiveSave.Ram.cDirtyPages
1995 + cWrittenToPages;
1996 uint32_t i = pVM->pgm.s.LiveSave.iDirtyPagesHistory;
1997 pVM->pgm.s.LiveSave.acDirtyPagesHistory[i] = cDirtyNow;
1998 pVM->pgm.s.LiveSave.iDirtyPagesHistory = (i + 1) % cHistoryEntries;
1999
2000 /* calc shortterm average (4 passes). */
2001 AssertCompile(RT_ELEMENTS(pVM->pgm.s.LiveSave.acDirtyPagesHistory) > 4);
2002 uint64_t cTotal = pVM->pgm.s.LiveSave.acDirtyPagesHistory[i];
2003 cTotal += pVM->pgm.s.LiveSave.acDirtyPagesHistory[(i + cHistoryEntries - 1) % cHistoryEntries];
2004 cTotal += pVM->pgm.s.LiveSave.acDirtyPagesHistory[(i + cHistoryEntries - 2) % cHistoryEntries];
2005 cTotal += pVM->pgm.s.LiveSave.acDirtyPagesHistory[(i + cHistoryEntries - 3) % cHistoryEntries];
2006 uint32_t const cDirtyPagesShort = cTotal / 4;
2007 pVM->pgm.s.LiveSave.cDirtyPagesShort = cDirtyPagesShort;
2008
2009 /* calc longterm average. */
2010 cTotal = 0;
2011 if (uPass < cHistoryEntries)
2012 for (i = 0; i < cHistoryEntries && i <= uPass; i++)
2013 cTotal += pVM->pgm.s.LiveSave.acDirtyPagesHistory[i];
2014 else
2015 for (i = 0; i < cHistoryEntries; i++)
2016 cTotal += pVM->pgm.s.LiveSave.acDirtyPagesHistory[i];
2017 uint32_t const cDirtyPagesLong = cTotal / cHistoryEntries;
2018 pVM->pgm.s.LiveSave.cDirtyPagesLong = cDirtyPagesLong;
2019
2020 /* estimate the speed */
2021 uint64_t cNsElapsed = RTTimeNanoTS() - pVM->pgm.s.LiveSave.uSaveStartNS;
2022 uint32_t cPagesPerSecond = (uint32_t)( (long double)pVM->pgm.s.LiveSave.cSavedPages
2023 / ((long double)cNsElapsed / 1000000000.0) );
2024 pVM->pgm.s.LiveSave.cPagesPerSecond = cPagesPerSecond;
2025
2026 /*
2027 * Try make a decision.
2028 */
2029 if ( cDirtyPagesShort <= cDirtyPagesLong
2030 && ( cDirtyNow <= cDirtyPagesShort
2031 || cDirtyNow - cDirtyPagesShort < RT_MIN(cDirtyPagesShort / 8, 16)
2032 )
2033 )
2034 {
2035 if (uPass > 10)
2036 {
2037 uint32_t cMsLeftShort = (uint32_t)(cDirtyPagesShort / (long double)cPagesPerSecond * 1000.0);
2038 uint32_t cMsLeftLong = (uint32_t)(cDirtyPagesLong / (long double)cPagesPerSecond * 1000.0);
2039 uint32_t cMsMaxDowntime = SSMR3HandleMaxDowntime(pSSM);
2040 if (cMsMaxDowntime < 32)
2041 cMsMaxDowntime = 32;
2042 if ( ( cMsLeftLong <= cMsMaxDowntime
2043 && cMsLeftShort < cMsMaxDowntime)
2044 || cMsLeftShort < cMsMaxDowntime / 2
2045 )
2046 {
2047 Log(("pgmR3LiveVote: VINF_SUCCESS - pass=%d cDirtyPagesShort=%u|%ums cDirtyPagesLong=%u|%ums cMsMaxDowntime=%u\n",
2048 uPass, cDirtyPagesShort, cMsLeftShort, cDirtyPagesLong, cMsLeftLong, cMsMaxDowntime));
2049 return VINF_SUCCESS;
2050 }
2051 }
2052 else
2053 {
2054 if ( ( cDirtyPagesShort <= 128
2055 && cDirtyPagesLong <= 1024)
2056 || cDirtyPagesLong <= 256
2057 )
2058 {
2059 Log(("pgmR3LiveVote: VINF_SUCCESS - pass=%d cDirtyPagesShort=%u cDirtyPagesLong=%u\n", uPass, cDirtyPagesShort, cDirtyPagesLong));
2060 return VINF_SUCCESS;
2061 }
2062 }
2063 }
2064
2065 /*
2066 * Come up with a completion percentage. Currently this is a simple
2067 * dirty page (long term) vs. total pages ratio + some pass trickery.
2068 */
2069 unsigned uPctDirty = (unsigned)( (long double)cDirtyPagesLong
2070 / (pVM->pgm.s.cAllPages - pVM->pgm.s.LiveSave.cIgnoredPages - pVM->pgm.s.cZeroPages) );
2071 if (uPctDirty <= 100)
2072 SSMR3HandleReportLivePercent(pSSM, RT_MIN(100 - uPctDirty, uPass * 2));
2073 else
2074 AssertMsgFailed(("uPctDirty=%u cDirtyPagesLong=%#x cAllPages=%#x cIgnoredPages=%#x cZeroPages=%#x\n",
2075 uPctDirty, cDirtyPagesLong, pVM->pgm.s.cAllPages, pVM->pgm.s.LiveSave.cIgnoredPages, pVM->pgm.s.cZeroPages));
2076
2077 return VINF_SSM_VOTE_FOR_ANOTHER_PASS;
2078}
2079
2080
2081/**
2082 * @callback_method_impl{FNSSMINTLIVEPREP}
2083 *
2084 * This will attempt to allocate and initialize the tracking structures. It
2085 * will also prepare for write monitoring of pages and initialize PGM::LiveSave.
2086 * pgmR3SaveDone will do the cleanups.
2087 */
2088static DECLCALLBACK(int) pgmR3LivePrep(PVM pVM, PSSMHANDLE pSSM)
2089{
2090 /*
2091 * Indicate that we will be using the write monitoring.
2092 */
2093 PGM_LOCK_VOID(pVM);
2094 /** @todo find a way of mediating this when more users are added. */
2095 if (pVM->pgm.s.fPhysWriteMonitoringEngaged)
2096 {
2097 PGM_UNLOCK(pVM);
2098 AssertLogRelFailedReturn(VERR_PGM_WRITE_MONITOR_ENGAGED);
2099 }
2100 pVM->pgm.s.fPhysWriteMonitoringEngaged = true;
2101 PGM_UNLOCK(pVM);
2102
2103 /*
2104 * Initialize the statistics.
2105 */
2106 pVM->pgm.s.LiveSave.Rom.cReadyPages = 0;
2107 pVM->pgm.s.LiveSave.Rom.cDirtyPages = 0;
2108 pVM->pgm.s.LiveSave.Mmio2.cReadyPages = 0;
2109 pVM->pgm.s.LiveSave.Mmio2.cDirtyPages = 0;
2110 pVM->pgm.s.LiveSave.Ram.cReadyPages = 0;
2111 pVM->pgm.s.LiveSave.Ram.cDirtyPages = 0;
2112 pVM->pgm.s.LiveSave.cIgnoredPages = 0;
2113 pVM->pgm.s.LiveSave.fActive = true;
2114 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.LiveSave.acDirtyPagesHistory); i++)
2115 pVM->pgm.s.LiveSave.acDirtyPagesHistory[i] = UINT32_MAX / 2;
2116 pVM->pgm.s.LiveSave.iDirtyPagesHistory = 0;
2117 pVM->pgm.s.LiveSave.cSavedPages = 0;
2118 pVM->pgm.s.LiveSave.uSaveStartNS = RTTimeNanoTS();
2119 pVM->pgm.s.LiveSave.cPagesPerSecond = 8192;
2120
2121 /*
2122 * Per page type.
2123 */
2124 int rc = pgmR3PrepRomPages(pVM);
2125 if (RT_SUCCESS(rc))
2126 rc = pgmR3PrepMmio2Pages(pVM);
2127 if (RT_SUCCESS(rc))
2128 rc = pgmR3PrepRamPages(pVM);
2129
2130 NOREF(pSSM);
2131 return rc;
2132}
2133
2134
2135/**
2136 * @callback_method_impl{FNSSMINTSAVEEXEC}
2137 */
2138static DECLCALLBACK(int) pgmR3SaveExec(PVM pVM, PSSMHANDLE pSSM)
2139{
2140 PPGM pPGM = &pVM->pgm.s;
2141
2142 /*
2143 * Lock PGM and set the no-more-writes indicator.
2144 */
2145 PGM_LOCK_VOID(pVM);
2146 pVM->pgm.s.fNoMorePhysWrites = true;
2147
2148 /*
2149 * Save basic data (required / unaffected by relocation).
2150 */
2151 int rc = SSMR3PutStructEx(pSSM, pPGM, sizeof(*pPGM), 0 /*fFlags*/, &s_aPGMFields[0], NULL /*pvUser*/);
2152
2153 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus && RT_SUCCESS(rc); idCpu++)
2154 rc = SSMR3PutStruct(pSSM, &pVM->apCpusR3[idCpu]->pgm.s, &s_aPGMCpuFields[0]);
2155
2156 /*
2157 * Save the (remainder of the) memory.
2158 */
2159 if (RT_SUCCESS(rc))
2160 {
2161 if (pVM->pgm.s.LiveSave.fActive)
2162 {
2163 pgmR3ScanRomPages(pVM);
2164 pgmR3ScanMmio2Pages(pVM, SSM_PASS_FINAL);
2165 pgmR3ScanRamPages(pVM, true /*fFinalPass*/);
2166
2167 rc = pgmR3SaveShadowedRomPages( pVM, pSSM, true /*fLiveSave*/, true /*fFinalPass*/);
2168 if (RT_SUCCESS(rc))
2169 rc = pgmR3SaveMmio2Pages( pVM, pSSM, true /*fLiveSave*/, SSM_PASS_FINAL);
2170 if (RT_SUCCESS(rc))
2171 rc = pgmR3SaveRamPages( pVM, pSSM, true /*fLiveSave*/, SSM_PASS_FINAL);
2172 }
2173 else
2174 {
2175 rc = pgmR3SaveRamConfig(pVM, pSSM);
2176 if (RT_SUCCESS(rc))
2177 rc = pgmR3SaveRomRanges(pVM, pSSM);
2178 if (RT_SUCCESS(rc))
2179 rc = pgmR3SaveMmio2Ranges(pVM, pSSM);
2180 if (RT_SUCCESS(rc))
2181 rc = pgmR3SaveRomVirginPages( pVM, pSSM, false /*fLiveSave*/);
2182 if (RT_SUCCESS(rc))
2183 rc = pgmR3SaveShadowedRomPages(pVM, pSSM, false /*fLiveSave*/, true /*fFinalPass*/);
2184 if (RT_SUCCESS(rc))
2185 rc = pgmR3SaveMmio2Pages( pVM, pSSM, false /*fLiveSave*/, SSM_PASS_FINAL);
2186 if (RT_SUCCESS(rc))
2187 rc = pgmR3SaveRamPages( pVM, pSSM, false /*fLiveSave*/, SSM_PASS_FINAL);
2188 }
2189 SSMR3PutU8(pSSM, PGM_STATE_REC_END); /* (Ignore the rc, SSM takes of it.) */
2190 }
2191
2192 PGM_UNLOCK(pVM);
2193 return rc;
2194}
2195
2196
2197/**
2198 * @callback_method_impl{FNSSMINTSAVEDONE}
2199 */
2200static DECLCALLBACK(int) pgmR3SaveDone(PVM pVM, PSSMHANDLE pSSM)
2201{
2202 /*
2203 * Do per page type cleanups first.
2204 */
2205 if (pVM->pgm.s.LiveSave.fActive)
2206 {
2207 pgmR3DoneRomPages(pVM);
2208 pgmR3DoneMmio2Pages(pVM);
2209 pgmR3DoneRamPages(pVM);
2210 }
2211
2212 /*
2213 * Clear the live save indicator and disengage write monitoring.
2214 */
2215 PGM_LOCK_VOID(pVM);
2216 pVM->pgm.s.LiveSave.fActive = false;
2217 /** @todo this is blindly assuming that we're the only user of write
2218 * monitoring. Fix this when more users are added. */
2219 pVM->pgm.s.fPhysWriteMonitoringEngaged = false;
2220 PGM_UNLOCK(pVM);
2221
2222 NOREF(pSSM);
2223 return VINF_SUCCESS;
2224}
2225
2226
2227/**
2228 * @callback_method_impl{FNSSMINTLOADPREP}
2229 */
2230static DECLCALLBACK(int) pgmR3LoadPrep(PVM pVM, PSSMHANDLE pSSM)
2231{
2232 /*
2233 * Call the reset function to make sure all the memory is cleared.
2234 */
2235 PGMR3Reset(pVM);
2236 pVM->pgm.s.LiveSave.fActive = false;
2237 NOREF(pSSM);
2238 return VINF_SUCCESS;
2239}
2240
2241
2242#if defined(VBOX_VMM_TARGET_X86)
2243/**
2244 * Load an ignored page.
2245 *
2246 * @returns VBox status code.
2247 * @param pSSM The saved state handle.
2248 */
2249static int pgmR3LoadPageToDevNullOld(PSSMHANDLE pSSM)
2250{
2251 uint8_t abPage[GUEST_PAGE_SIZE];
2252 return SSMR3GetMem(pSSM, &abPage[0], sizeof(abPage));
2253}
2254
2255
2256/**
2257 * Compares a page with an old save type value.
2258 *
2259 * @returns true if equal, false if not.
2260 * @param pPage The page to compare.
2261 * @param uOldType The old type value from the saved state.
2262 */
2263DECLINLINE(bool) pgmR3CompareNewAndOldPageTypes(PPGMPAGE pPage, uint8_t uOldType)
2264{
2265 uint8_t uOldPageType;
2266 switch (PGM_PAGE_GET_TYPE(pPage))
2267 {
2268 case PGMPAGETYPE_INVALID: uOldPageType = PGMPAGETYPE_OLD_INVALID; break;
2269 case PGMPAGETYPE_RAM: uOldPageType = PGMPAGETYPE_OLD_RAM; break;
2270 case PGMPAGETYPE_MMIO2: uOldPageType = PGMPAGETYPE_OLD_MMIO2; break;
2271 case PGMPAGETYPE_MMIO2_ALIAS_MMIO: uOldPageType = PGMPAGETYPE_OLD_MMIO2_ALIAS_MMIO; break;
2272 case PGMPAGETYPE_ROM_SHADOW: uOldPageType = PGMPAGETYPE_OLD_ROM_SHADOW; break;
2273 case PGMPAGETYPE_ROM: uOldPageType = PGMPAGETYPE_OLD_ROM; break;
2274 case PGMPAGETYPE_SPECIAL_ALIAS_MMIO: RT_FALL_THRU();
2275 case PGMPAGETYPE_MMIO: uOldPageType = PGMPAGETYPE_OLD_MMIO; break;
2276 default:
2277 AssertFailed();
2278 uOldPageType = PGMPAGETYPE_OLD_INVALID;
2279 break;
2280 }
2281 return uOldPageType == uOldType;
2282}
2283
2284
2285/**
2286 * Loads a page without any bits in the saved state, i.e. making sure it's
2287 * really zero.
2288 *
2289 * @returns VBox status code.
2290 * @param pVM The cross context VM structure.
2291 * @param uOldType The page type or PGMPAGETYPE_OLD_INVALID (old saved
2292 * state).
2293 * @param pPage The guest page tracking structure.
2294 * @param GCPhys The page address.
2295 * @param pRam The ram range (logging).
2296 */
2297static int pgmR3LoadPageZeroOld(PVM pVM, uint8_t uOldType, PPGMPAGE pPage, RTGCPHYS GCPhys, PPGMRAMRANGE pRam)
2298{
2299 if ( uOldType != PGMPAGETYPE_OLD_INVALID
2300 && !pgmR3CompareNewAndOldPageTypes(pPage, uOldType))
2301 return VERR_SSM_UNEXPECTED_DATA;
2302
2303 /* I think this should be sufficient. */
2304 if ( !PGM_PAGE_IS_ZERO(pPage)
2305 && !PGM_PAGE_IS_BALLOONED(pPage))
2306 return VERR_SSM_UNEXPECTED_DATA;
2307
2308 NOREF(pVM);
2309 NOREF(GCPhys);
2310 NOREF(pRam);
2311 return VINF_SUCCESS;
2312}
2313
2314
2315/**
2316 * Loads a page from the saved state.
2317 *
2318 * @returns VBox status code.
2319 * @param pVM The cross context VM structure.
2320 * @param pSSM The SSM handle.
2321 * @param uOldType The page type or PGMPAGETYPE_OLD_INVALID (old saved
2322 * state).
2323 * @param pPage The guest page tracking structure.
2324 * @param GCPhys The page address.
2325 * @param pRam The ram range (logging).
2326 */
2327static int pgmR3LoadPageBitsOld(PVM pVM, PSSMHANDLE pSSM, uint8_t uOldType, PPGMPAGE pPage, RTGCPHYS GCPhys, PPGMRAMRANGE pRam)
2328{
2329 /*
2330 * Match up the type, dealing with MMIO2 aliases (dropped).
2331 */
2332 AssertLogRelMsgReturn( uOldType == PGMPAGETYPE_INVALID
2333 || pgmR3CompareNewAndOldPageTypes(pPage, uOldType)
2334 /* kudge for the expanded PXE bios (r67885) - @bugref{5687}: */
2335 || ( uOldType == PGMPAGETYPE_OLD_RAM
2336 && GCPhys >= 0xed000
2337 && GCPhys <= 0xeffff
2338 && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_ROM)
2339 ,
2340 ("pPage=%R[pgmpage] GCPhys=%#x %s\n", pPage, GCPhys, pRam->pszDesc),
2341 VERR_SSM_UNEXPECTED_DATA);
2342
2343 /*
2344 * Load the page.
2345 */
2346 PGMPAGEMAPLOCK PgMpLck;
2347 void *pvPage;
2348 int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvPage, &PgMpLck);
2349 if (RT_SUCCESS(rc))
2350 {
2351 rc = SSMR3GetMem(pSSM, pvPage, GUEST_PAGE_SIZE);
2352 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2353 }
2354
2355 return rc;
2356}
2357
2358
2359/**
2360 * Loads a page (counter part to pgmR3SavePage).
2361 *
2362 * @returns VBox status code, fully bitched errors.
2363 * @param pVM The cross context VM structure.
2364 * @param pSSM The SSM handle.
2365 * @param uOldType The page type.
2366 * @param pPage The page.
2367 * @param GCPhys The page address.
2368 * @param pRam The RAM range (for error messages).
2369 */
2370static int pgmR3LoadPageOld(PVM pVM, PSSMHANDLE pSSM, uint8_t uOldType, PPGMPAGE pPage, RTGCPHYS GCPhys, PPGMRAMRANGE pRam)
2371{
2372 uint8_t uState;
2373 int rc = SSMR3GetU8(pSSM, &uState);
2374 AssertLogRelMsgRCReturn(rc, ("pPage=%R[pgmpage] GCPhys=%#x %s rc=%Rrc\n", pPage, GCPhys, pRam->pszDesc, rc), rc);
2375 if (uState == 0 /* zero */)
2376 rc = pgmR3LoadPageZeroOld(pVM, uOldType, pPage, GCPhys, pRam);
2377 else if (uState == 1)
2378 rc = pgmR3LoadPageBitsOld(pVM, pSSM, uOldType, pPage, GCPhys, pRam);
2379 else
2380 rc = VERR_PGM_INVALID_SAVED_PAGE_STATE;
2381 AssertLogRelMsgRCReturn(rc, ("pPage=%R[pgmpage] uState=%d uOldType=%d GCPhys=%RGp %s rc=%Rrc\n",
2382 pPage, uState, uOldType, GCPhys, pRam->pszDesc, rc),
2383 rc);
2384 return VINF_SUCCESS;
2385}
2386
2387
2388/**
2389 * Loads a shadowed ROM page.
2390 *
2391 * @returns VBox status code, errors are fully bitched.
2392 * @param pVM The cross context VM structure.
2393 * @param pSSM The saved state handle.
2394 * @param pPage The page.
2395 * @param GCPhys The page address.
2396 * @param pRam The RAM range (for error messages).
2397 */
2398static int pgmR3LoadShadowedRomPageOld(PVM pVM, PSSMHANDLE pSSM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPGMRAMRANGE pRam)
2399{
2400 /*
2401 * Load and set the protection first, then load the two pages, the first
2402 * one is the active the other is the passive.
2403 */
2404 PPGMROMPAGE pRomPage = pgmR3GetRomPage(pVM, GCPhys);
2405 AssertLogRelMsgReturn(pRomPage, ("GCPhys=%RGp %s\n", GCPhys, pRam->pszDesc), VERR_PGM_SAVED_ROM_PAGE_NOT_FOUND);
2406
2407 uint8_t uProt;
2408 int rc = SSMR3GetU8(pSSM, &uProt);
2409 AssertLogRelMsgRCReturn(rc, ("pPage=%R[pgmpage] GCPhys=%#x %s\n", pPage, GCPhys, pRam->pszDesc), rc);
2410 PGMROMPROT enmProt = (PGMROMPROT)uProt;
2411 AssertLogRelMsgReturn( enmProt >= PGMROMPROT_INVALID
2412 && enmProt < PGMROMPROT_END,
2413 ("enmProt=%d pPage=%R[pgmpage] GCPhys=%#x %s\n", enmProt, pPage, GCPhys, pRam->pszDesc),
2414 VERR_SSM_UNEXPECTED_DATA);
2415
2416 if (pRomPage->enmProt != enmProt)
2417 {
2418 rc = PGMR3PhysRomProtect(pVM, GCPhys, GUEST_PAGE_SIZE, enmProt);
2419 AssertLogRelRCReturn(rc, rc);
2420 AssertLogRelReturn(pRomPage->enmProt == enmProt, VERR_PGM_SAVED_ROM_PAGE_PROT);
2421 }
2422
2423 PPGMPAGE pPageActive = PGMROMPROT_IS_ROM(enmProt) ? &pRomPage->Virgin : &pRomPage->Shadow;
2424 PPGMPAGE pPagePassive = PGMROMPROT_IS_ROM(enmProt) ? &pRomPage->Shadow : &pRomPage->Virgin;
2425 uint8_t u8ActiveType = PGMROMPROT_IS_ROM(enmProt) ? PGMPAGETYPE_ROM : PGMPAGETYPE_ROM_SHADOW;
2426 uint8_t u8PassiveType= PGMROMPROT_IS_ROM(enmProt) ? PGMPAGETYPE_ROM_SHADOW : PGMPAGETYPE_ROM;
2427
2428 /** @todo this isn't entirely correct as long as pgmPhysGCPhys2CCPtrInternal is
2429 * used down the line (will the 2nd page will be written to the first
2430 * one because of a false TLB hit since the TLB is using GCPhys and
2431 * doesn't check the HCPhys of the desired page). */
2432 rc = pgmR3LoadPageOld(pVM, pSSM, u8ActiveType, pPage, GCPhys, pRam);
2433 if (RT_SUCCESS(rc))
2434 {
2435 *pPageActive = *pPage;
2436 rc = pgmR3LoadPageOld(pVM, pSSM, u8PassiveType, pPagePassive, GCPhys, pRam);
2437 }
2438 return rc;
2439}
2440
2441
2442/**
2443 * Ram range flags and bits for older versions of the saved state.
2444 *
2445 * @returns VBox status code.
2446 *
2447 * @param pVM The cross context VM structure.
2448 * @param pSSM The SSM handle.
2449 * @param uVersion The saved state version.
2450 */
2451static int pgmR3LoadMemoryOld(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion)
2452{
2453 /*
2454 * Ram range flags and bits.
2455 */
2456 uint32_t iSeqNo = 0;
2457 uint32_t const cRamRangeLookupEntries = RT_MIN(pVM->pgm.s.RamRangeUnion.cLookupEntries,
2458 RT_ELEMENTS(pVM->pgm.s.aRamRangeLookup));
2459 for (uint32_t idxLookup = 0; idxLookup < cRamRangeLookupEntries; idxLookup++)
2460 {
2461 uint32_t const idRamRange = PGMRAMRANGELOOKUPENTRY_GET_ID(pVM->pgm.s.aRamRangeLookup[idxLookup]);
2462 AssertContinue(idRamRange < RT_ELEMENTS(pVM->pgm.s.apRamRanges));
2463 PPGMRAMRANGE const pRam = pVM->pgm.s.apRamRanges[idRamRange];
2464 AssertContinue(pRam);
2465
2466 /* Check the sequence number / separator. */
2467 uint32_t u32Sep;
2468 int rc = SSMR3GetU32(pSSM, &u32Sep);
2469 if (RT_FAILURE(rc))
2470 return rc;
2471 if (u32Sep == ~0U)
2472 break;
2473 if (u32Sep != iSeqNo)
2474 {
2475 AssertMsgFailed(("u32Sep=%#x (last)\n", u32Sep));
2476 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
2477 }
2478 AssertLogRelReturn(pRam, VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2479
2480 /* Get the range details. */
2481 RTGCPHYS GCPhys;
2482 SSMR3GetGCPhys(pSSM, &GCPhys);
2483 RTGCPHYS GCPhysLast;
2484 SSMR3GetGCPhys(pSSM, &GCPhysLast);
2485 RTGCPHYS cb;
2486 SSMR3GetGCPhys(pSSM, &cb);
2487 uint8_t fHaveBits;
2488 rc = SSMR3GetU8(pSSM, &fHaveBits);
2489 if (RT_FAILURE(rc))
2490 return rc;
2491 if (fHaveBits & ~1)
2492 {
2493 AssertMsgFailed(("u32Sep=%#x (last)\n", u32Sep));
2494 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
2495 }
2496 size_t cchDesc = 0;
2497 char szDesc[256];
2498 szDesc[0] = '\0';
2499 if (uVersion >= PGM_SAVED_STATE_VERSION_RR_DESC)
2500 {
2501 rc = SSMR3GetStrZ(pSSM, szDesc, sizeof(szDesc));
2502 if (RT_FAILURE(rc))
2503 return rc;
2504 /* Since we've modified the description strings in r45878, only compare
2505 them if the saved state is more recent. */
2506 if (uVersion != PGM_SAVED_STATE_VERSION_RR_DESC)
2507 cchDesc = strlen(szDesc);
2508 }
2509
2510 /*
2511 * Match it up with the current range.
2512 *
2513 * Note there is a hack for dealing with the high BIOS mapping
2514 * in the old saved state format, this means we might not have
2515 * a 1:1 match on success.
2516 */
2517 if ( ( GCPhys != pRam->GCPhys
2518 || GCPhysLast != pRam->GCPhysLast
2519 || cb != pRam->cb
2520 || ( cchDesc
2521 && strcmp(szDesc, pRam->pszDesc)) )
2522 /* Hack for PDMDevHlpPhysReserve(pDevIns, 0xfff80000, 0x80000, "High ROM Region"); */
2523 && ( uVersion != PGM_SAVED_STATE_VERSION_OLD_PHYS_CODE
2524 || GCPhys != UINT32_C(0xfff80000)
2525 || GCPhysLast != UINT32_C(0xffffffff)
2526 || pRam->GCPhysLast != GCPhysLast
2527 || pRam->GCPhys < GCPhys
2528 || !fHaveBits)
2529 )
2530 {
2531 LogRel(("Ram range: %RGp-%RGp %RGp bytes %s %s\n"
2532 "State : %RGp-%RGp %RGp bytes %s %s\n",
2533 pRam->GCPhys, pRam->GCPhysLast, pRam->cb, pRam->pbR3 ? "bits" : "nobits", pRam->pszDesc,
2534 GCPhys, GCPhysLast, cb, fHaveBits ? "bits" : "nobits", szDesc));
2535 /*
2536 * If we're loading a state for debugging purpose, don't make a fuss if
2537 * the MMIO and ROM stuff isn't 100% right, just skip the mismatches.
2538 */
2539 if ( SSMR3HandleGetAfter(pSSM) != SSMAFTER_DEBUG_IT
2540 || GCPhys < 8 * _1M)
2541 return SSMR3SetCfgError(pSSM, RT_SRC_POS,
2542 N_("RAM range mismatch; saved={%RGp-%RGp %RGp bytes %s %s} config={%RGp-%RGp %RGp bytes %s %s}"),
2543 GCPhys, GCPhysLast, cb, fHaveBits ? "bits" : "nobits", szDesc,
2544 pRam->GCPhys, pRam->GCPhysLast, pRam->cb, pRam->pbR3 ? "bits" : "nobits", pRam->pszDesc);
2545
2546 AssertMsgFailed(("debug skipping not implemented, sorry\n"));
2547 iSeqNo++;
2548 continue;
2549 }
2550
2551 uint32_t cPages = (GCPhysLast - GCPhys + 1) >> GUEST_PAGE_SHIFT;
2552 if (uVersion >= PGM_SAVED_STATE_VERSION_RR_DESC)
2553 {
2554 /*
2555 * Load the pages one by one.
2556 */
2557 for (uint32_t iPage = 0; iPage < cPages; iPage++)
2558 {
2559 RTGCPHYS const GCPhysPage = ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT) + pRam->GCPhys;
2560 PPGMPAGE pPage = &pRam->aPages[iPage];
2561 uint8_t uOldType;
2562 rc = SSMR3GetU8(pSSM, &uOldType);
2563 AssertLogRelMsgRCReturn(rc, ("pPage=%R[pgmpage] iPage=%#x GCPhysPage=%#x %s\n", pPage, iPage, GCPhysPage, pRam->pszDesc), rc);
2564 if (uOldType == PGMPAGETYPE_OLD_ROM_SHADOW)
2565 rc = pgmR3LoadShadowedRomPageOld(pVM, pSSM, pPage, GCPhysPage, pRam);
2566 else
2567 rc = pgmR3LoadPageOld(pVM, pSSM, uOldType, pPage, GCPhysPage, pRam);
2568 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc iPage=%#x GCPhysPage=%#x %s\n", rc, iPage, GCPhysPage, pRam->pszDesc), rc);
2569 }
2570 }
2571 else
2572 {
2573 /*
2574 * Old format.
2575 */
2576
2577 /* Of the page flags, pick up MMIO2 and ROM/RESERVED for the !fHaveBits case.
2578 The rest is generally irrelevant and wrong since the stuff have to match registrations. */
2579 uint32_t fFlags = 0;
2580 for (uint32_t iPage = 0; iPage < cPages; iPage++)
2581 {
2582 uint16_t u16Flags;
2583 rc = SSMR3GetU16(pSSM, &u16Flags);
2584 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc iPage=%#x GCPhys=%#x %s\n", rc, iPage, pRam->GCPhys, pRam->pszDesc), rc);
2585 fFlags |= u16Flags;
2586 }
2587
2588 /* Load the bits */
2589 if ( !fHaveBits
2590 && GCPhysLast < UINT32_C(0xe0000000))
2591 {
2592 /*
2593 * Dynamic chunks.
2594 */
2595 const uint32_t cPagesInChunk = (1*1024*1024) >> GUEST_PAGE_SHIFT;
2596 AssertLogRelMsgReturn(cPages % cPagesInChunk == 0,
2597 ("cPages=%#x cPagesInChunk=%#x GCPhys=%RGp %s\n", cPages, cPagesInChunk, pRam->GCPhys, pRam->pszDesc),
2598 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2599
2600 for (uint32_t iPage = 0; iPage < cPages; /* incremented by inner loop */ )
2601 {
2602 uint8_t fPresent;
2603 rc = SSMR3GetU8(pSSM, &fPresent);
2604 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc iPage=%#x GCPhys=%#x %s\n", rc, iPage, pRam->GCPhys, pRam->pszDesc), rc);
2605 AssertLogRelMsgReturn(fPresent == (uint8_t)true || fPresent == (uint8_t)false,
2606 ("fPresent=%#x iPage=%#x GCPhys=%#x %s\n", fPresent, iPage, pRam->GCPhys, pRam->pszDesc),
2607 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2608
2609 for (uint32_t iChunkPage = 0; iChunkPage < cPagesInChunk; iChunkPage++, iPage++)
2610 {
2611 RTGCPHYS const GCPhysPage = ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT) + pRam->GCPhys;
2612 PPGMPAGE pPage = &pRam->aPages[iPage];
2613 if (fPresent)
2614 {
2615 if ( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO
2616 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_SPECIAL_ALIAS_MMIO)
2617 rc = pgmR3LoadPageToDevNullOld(pSSM);
2618 else
2619 rc = pgmR3LoadPageBitsOld(pVM, pSSM, PGMPAGETYPE_INVALID, pPage, GCPhysPage, pRam);
2620 }
2621 else
2622 rc = pgmR3LoadPageZeroOld(pVM, PGMPAGETYPE_INVALID, pPage, GCPhysPage, pRam);
2623 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc iPage=%#x GCPhysPage=%#x %s\n", rc, iPage, GCPhysPage, pRam->pszDesc), rc);
2624 }
2625 }
2626 }
2627 else if (pRam->pbR3)
2628 {
2629 /*
2630 * MMIO2.
2631 */
2632 AssertLogRelMsgReturn((fFlags & 0x0f) == RT_BIT(3) /*MM_RAM_FLAGS_MMIO2*/,
2633 ("fFlags=%#x GCPhys=%#x %s\n", fFlags, pRam->GCPhys, pRam->pszDesc),
2634 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2635 AssertLogRelMsgReturn(pRam->pbR3,
2636 ("GCPhys=%#x %s\n", pRam->GCPhys, pRam->pszDesc),
2637 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2638
2639 rc = SSMR3GetMem(pSSM, pRam->pbR3, pRam->cb);
2640 AssertLogRelMsgRCReturn(rc, ("GCPhys=%#x %s\n", pRam->GCPhys, pRam->pszDesc), rc);
2641 }
2642 else if (GCPhysLast < UINT32_C(0xfff80000))
2643 {
2644 /*
2645 * PCI MMIO, no pages saved.
2646 */
2647 }
2648 else
2649 {
2650 /*
2651 * Load the 0xfff80000..0xffffffff BIOS range.
2652 * It starts with X reserved pages that we have to skip over since
2653 * the RAMRANGE create by the new code won't include those.
2654 */
2655 AssertLogRelMsgReturn( !(fFlags & RT_BIT(3) /*MM_RAM_FLAGS_MMIO2*/)
2656 && (fFlags & RT_BIT(0) /*MM_RAM_FLAGS_RESERVED*/),
2657 ("fFlags=%#x GCPhys=%#x %s\n", fFlags, pRam->GCPhys, pRam->pszDesc),
2658 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2659 AssertLogRelMsgReturn(GCPhys == UINT32_C(0xfff80000),
2660 ("GCPhys=%RGp pRamRange{GCPhys=%#x %s}\n", GCPhys, pRam->GCPhys, pRam->pszDesc),
2661 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2662
2663 /* Skip wasted reserved pages before the ROM. */
2664 while (GCPhys < pRam->GCPhys)
2665 {
2666 rc = pgmR3LoadPageToDevNullOld(pSSM);
2667 AssertLogRelRCReturn(rc, rc);
2668 GCPhys += GUEST_PAGE_SIZE;
2669 }
2670
2671 /* Load the bios pages. */
2672 cPages = pRam->cb >> GUEST_PAGE_SHIFT;
2673 for (uint32_t iPage = 0; iPage < cPages; iPage++)
2674 {
2675 RTGCPHYS const GCPhysPage = ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT) + pRam->GCPhys;
2676 PPGMPAGE pPage = &pRam->aPages[iPage];
2677
2678 AssertLogRelMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_ROM,
2679 ("GCPhys=%RGp pPage=%R[pgmpage]\n", GCPhys, GCPhys),
2680 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2681 rc = pgmR3LoadPageBitsOld(pVM, pSSM, PGMPAGETYPE_ROM, pPage, GCPhysPage, pRam);
2682 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc iPage=%#x GCPhys=%#x %s\n", rc, iPage, pRam->GCPhys, pRam->pszDesc), rc);
2683 }
2684 }
2685 }
2686
2687 iSeqNo++;
2688 }
2689
2690 return VINF_SUCCESS;
2691}
2692#endif
2693
2694
2695/**
2696 * Worker for pgmR3Load and pgmR3LoadLocked.
2697 *
2698 * @returns VBox status code.
2699 *
2700 * @param pVM The cross context VM structure.
2701 * @param pSSM The SSM handle.
2702 * @param uVersion The PGM saved state unit version.
2703 * @param uPass The pass number.
2704 *
2705 * @todo This needs splitting up if more record types or code twists are
2706 * added...
2707 */
2708static int pgmR3LoadMemory(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
2709{
2710 NOREF(uPass);
2711
2712#if defined(VBOX_VMM_TARGET_ARMV8)
2713 RT_NOREF(uVersion);
2714#endif
2715
2716 /*
2717 * Process page records until we hit the terminator.
2718 */
2719 RTGCPHYS GCPhys = NIL_RTGCPHYS;
2720 PPGMRAMRANGE pRamHint = NULL;
2721 uint8_t id = UINT8_MAX;
2722 uint32_t iPage = UINT32_MAX - 10;
2723 PPGMROMRANGE pRom = NULL;
2724 PPGMREGMMIO2RANGE pRegMmio2 = NULL;
2725 PPGMRAMRANGE pMmio2RamRange = NULL;
2726
2727 /*
2728 * We batch up pages that should be freed instead of calling GMM for
2729 * each and every one of them. Note that we'll lose the pages in most
2730 * failure paths - this should probably be addressed one day.
2731 */
2732 uint32_t cPendingPages = 0;
2733 PGMMFREEPAGESREQ pReq;
2734 int rc = GMMR3FreePagesPrepare(pVM, &pReq, 128 /* batch size */, GMMACCOUNT_BASE);
2735 AssertLogRelRCReturn(rc, rc);
2736
2737 for (;;)
2738 {
2739 /*
2740 * Get the record type and flags.
2741 */
2742 uint8_t u8;
2743 rc = SSMR3GetU8(pSSM, &u8);
2744 if (RT_FAILURE(rc))
2745 return rc;
2746 if (u8 == PGM_STATE_REC_END)
2747 {
2748 /*
2749 * Finish off any pages pending freeing.
2750 */
2751 if (cPendingPages)
2752 {
2753 Log(("pgmR3LoadMemory: GMMR3FreePagesPerform pVM=%p cPendingPages=%u\n", pVM, cPendingPages));
2754 rc = GMMR3FreePagesPerform(pVM, pReq, cPendingPages);
2755 AssertLogRelRCReturn(rc, rc);
2756 }
2757 GMMR3FreePagesCleanup(pReq);
2758 return VINF_SUCCESS;
2759 }
2760 AssertLogRelMsgReturn((u8 & ~PGM_STATE_REC_FLAG_ADDR) <= PGM_STATE_REC_LAST, ("%#x\n", u8), VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2761 switch (u8 & ~PGM_STATE_REC_FLAG_ADDR)
2762 {
2763 /*
2764 * RAM page.
2765 */
2766 case PGM_STATE_REC_RAM_ZERO:
2767 case PGM_STATE_REC_RAM_RAW:
2768 case PGM_STATE_REC_RAM_BALLOONED:
2769 {
2770 /*
2771 * Get the address and resolve it into a page descriptor.
2772 */
2773 if (!(u8 & PGM_STATE_REC_FLAG_ADDR))
2774 GCPhys += GUEST_PAGE_SIZE;
2775 else
2776 {
2777 rc = SSMR3GetGCPhys(pSSM, &GCPhys);
2778 if (RT_FAILURE(rc))
2779 return rc;
2780 }
2781 AssertLogRelMsgReturn(!(GCPhys & GUEST_PAGE_OFFSET_MASK), ("%RGp\n", GCPhys), VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2782
2783 PPGMPAGE pPage;
2784 rc = pgmPhysGetPageWithHintEx(pVM, GCPhys, &pPage, &pRamHint);
2785 if (RT_SUCCESS(rc))
2786 { /* likely */ }
2787 else if ( rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS
2788 && GCPhys < _1M
2789 && GCPhys >= 640U*_1K
2790 && (u8 & ~PGM_STATE_REC_FLAG_ADDR) == PGM_STATE_REC_RAM_ZERO)
2791 {
2792 rc = VINF_SUCCESS; /* We've kicked out unused pages between 640K and 1MB, but older states may include them. */
2793 id = UINT8_MAX;
2794 break;
2795 }
2796 else
2797 AssertLogRelMsgFailedReturn(("rc=%Rrc %RGp u8=%#x\n", rc, GCPhys, u8), rc);
2798
2799 /*
2800 * Take action according to the record type.
2801 */
2802 switch (u8 & ~PGM_STATE_REC_FLAG_ADDR)
2803 {
2804 case PGM_STATE_REC_RAM_ZERO:
2805 {
2806 if (PGM_PAGE_IS_ZERO(pPage))
2807 break;
2808
2809 /* Ballooned pages must be unmarked (live snapshot and
2810 teleportation scenarios). */
2811 if (PGM_PAGE_IS_BALLOONED(pPage))
2812 {
2813 Assert(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM);
2814#if defined(VBOX_VMM_TARGET_X86)
2815 if (uVersion == PGM_SAVED_STATE_VERSION_BALLOON_BROKEN)
2816 break;
2817#endif
2818 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ZERO);
2819 break;
2820 }
2821
2822 AssertLogRelMsgReturn(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED, ("GCPhys=%RGp %R[pgmpage]\n", GCPhys, pPage), VERR_PGM_UNEXPECTED_PAGE_STATE);
2823
2824 /* If this is a ROM page, we must clear it and not try to
2825 * free it. Ditto if the VM is using RamPreAlloc (see
2826 * @bugref{6318}). */
2827 if ( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_ROM
2828 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_ROM_SHADOW
2829 || PGM_IS_IN_NEM_MODE(pVM)
2830 || pVM->pgm.s.fRamPreAlloc)
2831 {
2832 PGMPAGEMAPLOCK PgMpLck;
2833 void *pvDstPage;
2834 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDstPage, &PgMpLck);
2835 AssertLogRelMsgRCReturn(rc, ("GCPhys=%RGp %R[pgmpage] rc=%Rrc\n", GCPhys, pPage, rc), rc);
2836
2837 RT_BZERO(pvDstPage, GUEST_PAGE_SIZE);
2838 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2839 }
2840 /* Free it only if it's not part of a previously
2841 allocated large page (no need to clear the page). */
2842 else if ( PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE
2843 && PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE_DISABLED)
2844 {
2845 rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, pPage, GCPhys, (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage));
2846 AssertRCReturn(rc, rc);
2847 }
2848 /** @todo handle large pages (see @bugref{5545}) */
2849 break;
2850 }
2851
2852 case PGM_STATE_REC_RAM_BALLOONED:
2853 {
2854 Assert(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM);
2855 if (PGM_PAGE_IS_BALLOONED(pPage))
2856 break;
2857
2858 /* We don't map ballooned pages in our shadow page tables, let's
2859 just free it if allocated and mark as ballooned. See @bugref{5515}. */
2860 if (PGM_PAGE_IS_ALLOCATED(pPage))
2861 {
2862 /** @todo handle large pages + ballooning when it works. (see @bugref{5515},
2863 * @bugref{5545}). */
2864 AssertLogRelMsgReturn( PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE
2865 && PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE_DISABLED,
2866 ("GCPhys=%RGp %R[pgmpage]\n", GCPhys, pPage), VERR_PGM_LOAD_UNEXPECTED_PAGE_TYPE);
2867
2868 rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, pPage, GCPhys, (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage));
2869 AssertRCReturn(rc, rc);
2870 }
2871 Assert(PGM_PAGE_IS_ZERO(pPage));
2872 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_BALLOONED);
2873 break;
2874 }
2875
2876 case PGM_STATE_REC_RAM_RAW:
2877 {
2878 PGMPAGEMAPLOCK PgMpLck;
2879 void *pvDstPage;
2880 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDstPage, &PgMpLck);
2881 AssertLogRelMsgRCReturn(rc, ("GCPhys=%RGp %R[pgmpage] rc=%Rrc\n", GCPhys, pPage, rc), rc);
2882 rc = SSMR3GetMem(pSSM, pvDstPage, GUEST_PAGE_SIZE);
2883 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2884 if (RT_FAILURE(rc))
2885 return rc;
2886 break;
2887 }
2888
2889 default:
2890 AssertMsgFailedReturn(("%#x\n", u8), VERR_PGM_SAVED_REC_TYPE);
2891 }
2892 id = UINT8_MAX;
2893 break;
2894 }
2895
2896 /*
2897 * MMIO2 page.
2898 */
2899 case PGM_STATE_REC_MMIO2_RAW:
2900 case PGM_STATE_REC_MMIO2_ZERO:
2901 {
2902 /*
2903 * Get the ID + page number and resolved that into a MMIO2 page.
2904 */
2905 if (!(u8 & PGM_STATE_REC_FLAG_ADDR))
2906 iPage++;
2907 else
2908 {
2909 SSMR3GetU8(pSSM, &id);
2910 rc = SSMR3GetU32(pSSM, &iPage);
2911 if (RT_FAILURE(rc))
2912 return rc;
2913 }
2914 if ( !pRegMmio2
2915 || pRegMmio2->idSavedState != id)
2916 {
2917 pMmio2RamRange = NULL;
2918 uint32_t const cMmio2Ranges = RT_MIN(pVM->pgm.s.cMmio2Ranges, RT_ELEMENTS(pVM->pgm.s.aMmio2Ranges));
2919 for (uint32_t idx = 0; idx < cMmio2Ranges; idx++)
2920 if (pVM->pgm.s.aMmio2Ranges[idx].idSavedState == id)
2921 {
2922 pRegMmio2 = &pVM->pgm.s.aMmio2Ranges[idx];
2923 pMmio2RamRange = pVM->pgm.s.apMmio2RamRanges[idx];
2924 break;
2925 }
2926 AssertLogRelMsgReturn(pRegMmio2 && pMmio2RamRange, ("id=%#u iPage=%#x\n", id, iPage),
2927 VERR_PGM_SAVED_MMIO2_RANGE_NOT_FOUND);
2928 }
2929 AssertLogRelMsgReturn(iPage < (pMmio2RamRange->cb >> GUEST_PAGE_SHIFT),
2930 ("iPage=%#x cb=%RGp %s\n", iPage, pMmio2RamRange->cb, pMmio2RamRange->pszDesc),
2931 VERR_PGM_SAVED_MMIO2_PAGE_NOT_FOUND);
2932 void * const pvDstPage = &pMmio2RamRange->pbR3[(size_t)iPage << GUEST_PAGE_SHIFT];
2933
2934 /*
2935 * Load the page bits.
2936 */
2937 if ((u8 & ~PGM_STATE_REC_FLAG_ADDR) == PGM_STATE_REC_MMIO2_ZERO)
2938 RT_BZERO(pvDstPage, GUEST_PAGE_SIZE);
2939 else
2940 {
2941 rc = SSMR3GetMem(pSSM, pvDstPage, GUEST_PAGE_SIZE);
2942 if (RT_FAILURE(rc))
2943 return rc;
2944 }
2945 GCPhys = NIL_RTGCPHYS;
2946 break;
2947 }
2948
2949 /*
2950 * ROM pages.
2951 */
2952 case PGM_STATE_REC_ROM_VIRGIN:
2953 case PGM_STATE_REC_ROM_SHW_RAW:
2954 case PGM_STATE_REC_ROM_SHW_ZERO:
2955 case PGM_STATE_REC_ROM_PROT:
2956 {
2957 /*
2958 * Get the ID + page number and resolved that into a ROM page descriptor.
2959 */
2960 if (!(u8 & PGM_STATE_REC_FLAG_ADDR))
2961 iPage++;
2962 else
2963 {
2964 SSMR3GetU8(pSSM, &id);
2965 rc = SSMR3GetU32(pSSM, &iPage);
2966 if (RT_FAILURE(rc))
2967 return rc;
2968 }
2969 if ( !pRom
2970 || pRom->idSavedState != id)
2971 {
2972 uint32_t const cRomRanges = RT_MIN(pVM->pgm.s.cRomRanges, RT_ELEMENTS(pVM->pgm.s.apRomRanges));
2973 uint32_t idx;
2974 for (idx = 0; idx < cRomRanges; idx++)
2975 {
2976 pRom = pVM->pgm.s.apRomRanges[idx];
2977 if (pRom->idSavedState == id)
2978 break;
2979 }
2980 AssertLogRelMsgReturn(idx < cRomRanges, ("id=%#u iPage=%#x\n", id, iPage), VERR_PGM_SAVED_ROM_RANGE_NOT_FOUND);
2981 }
2982 AssertLogRelMsgReturn(iPage < (pRom->cb >> GUEST_PAGE_SHIFT),
2983 ("iPage=%#x cb=%RGp %s\n", iPage, pRom->cb, pRom->pszDesc),
2984 VERR_PGM_SAVED_ROM_PAGE_NOT_FOUND);
2985 PPGMROMPAGE pRomPage = &pRom->aPages[iPage];
2986 GCPhys = pRom->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT);
2987
2988 /*
2989 * Get and set the protection.
2990 */
2991 uint8_t u8Prot;
2992 rc = SSMR3GetU8(pSSM, &u8Prot);
2993 if (RT_FAILURE(rc))
2994 return rc;
2995 PGMROMPROT enmProt = (PGMROMPROT)u8Prot;
2996 AssertLogRelMsgReturn(enmProt > PGMROMPROT_INVALID && enmProt < PGMROMPROT_END, ("GCPhys=%RGp enmProt=%d\n", GCPhys, enmProt), VERR_PGM_SAVED_ROM_PAGE_PROT);
2997
2998 if (enmProt != pRomPage->enmProt)
2999 {
3000 if (RT_UNLIKELY(!(pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)))
3001 return SSMR3SetCfgError(pSSM, RT_SRC_POS,
3002 N_("Protection change of unshadowed ROM page: GCPhys=%RGp enmProt=%d %s"),
3003 GCPhys, enmProt, pRom->pszDesc);
3004 rc = PGMR3PhysRomProtect(pVM, GCPhys, GUEST_PAGE_SIZE, enmProt);
3005 AssertLogRelMsgRCReturn(rc, ("GCPhys=%RGp rc=%Rrc\n", GCPhys, rc), rc);
3006 AssertLogRelReturn(pRomPage->enmProt == enmProt, VERR_PGM_SAVED_ROM_PAGE_PROT);
3007 }
3008 if ((u8 & ~PGM_STATE_REC_FLAG_ADDR) == PGM_STATE_REC_ROM_PROT)
3009 break; /* done */
3010
3011 /*
3012 * Get the right page descriptor.
3013 */
3014 PPGMPAGE pRealPage;
3015 switch (u8 & ~PGM_STATE_REC_FLAG_ADDR)
3016 {
3017 case PGM_STATE_REC_ROM_VIRGIN:
3018 if (!PGMROMPROT_IS_ROM(enmProt))
3019 pRealPage = &pRomPage->Virgin;
3020 else
3021 pRealPage = NULL;
3022 break;
3023
3024 case PGM_STATE_REC_ROM_SHW_RAW:
3025 case PGM_STATE_REC_ROM_SHW_ZERO:
3026 if (RT_UNLIKELY(!(pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)))
3027 return SSMR3SetCfgError(pSSM, RT_SRC_POS,
3028 N_("Shadowed / non-shadowed page type mismatch: GCPhys=%RGp enmProt=%d %s"),
3029 GCPhys, enmProt, pRom->pszDesc);
3030 if (PGMROMPROT_IS_ROM(enmProt))
3031 pRealPage = &pRomPage->Shadow;
3032 else
3033 pRealPage = NULL;
3034 break;
3035
3036 default: AssertLogRelFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE); /* shut up gcc */
3037 }
3038#ifdef VBOX_WITH_PGM_NEM_MODE
3039 bool const fAltPage = pRealPage != NULL;
3040#endif
3041 if (!pRealPage)
3042 {
3043 rc = pgmPhysGetPageWithHintEx(pVM, GCPhys, &pRealPage, &pRamHint);
3044 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc %RGp\n", rc, GCPhys), rc);
3045 }
3046
3047 /*
3048 * Make it writable and map it (if necessary).
3049 */
3050 void *pvDstPage = NULL;
3051 switch (u8 & ~PGM_STATE_REC_FLAG_ADDR)
3052 {
3053 case PGM_STATE_REC_ROM_SHW_ZERO:
3054 if ( PGM_PAGE_IS_ZERO(pRealPage)
3055 || PGM_PAGE_IS_BALLOONED(pRealPage))
3056 break;
3057 /** @todo implement zero page replacing. */
3058 RT_FALL_THRU();
3059 case PGM_STATE_REC_ROM_VIRGIN:
3060 case PGM_STATE_REC_ROM_SHW_RAW:
3061#ifdef VBOX_WITH_PGM_NEM_MODE
3062 if (fAltPage && PGM_IS_IN_NEM_MODE(pVM))
3063 pvDstPage = &pRom->pbR3Alternate[iPage << GUEST_PAGE_SHIFT];
3064 else
3065#endif
3066 {
3067 rc = pgmPhysPageMakeWritableAndMap(pVM, pRealPage, GCPhys, &pvDstPage);
3068 AssertLogRelMsgRCReturn(rc, ("GCPhys=%RGp rc=%Rrc\n", GCPhys, rc), rc);
3069 }
3070 break;
3071 }
3072
3073 /*
3074 * Load the bits.
3075 */
3076 switch (u8 & ~PGM_STATE_REC_FLAG_ADDR)
3077 {
3078 case PGM_STATE_REC_ROM_SHW_ZERO:
3079 if (pvDstPage)
3080 RT_BZERO(pvDstPage, GUEST_PAGE_SIZE);
3081 break;
3082
3083 case PGM_STATE_REC_ROM_VIRGIN:
3084 case PGM_STATE_REC_ROM_SHW_RAW:
3085 rc = SSMR3GetMem(pSSM, pvDstPage, GUEST_PAGE_SIZE);
3086 if (RT_FAILURE(rc))
3087 return rc;
3088 break;
3089 }
3090 GCPhys = NIL_RTGCPHYS;
3091 break;
3092 }
3093
3094 /*
3095 * Unknown type.
3096 */
3097 default:
3098 AssertLogRelMsgFailedReturn(("%#x\n", u8), VERR_PGM_SAVED_REC_TYPE);
3099 }
3100 } /* forever */
3101}
3102
3103
3104/**
3105 * Worker for pgmR3Load.
3106 *
3107 * @returns VBox status code.
3108 *
3109 * @param pVM The cross context VM structure.
3110 * @param pSSM The SSM handle.
3111 * @param uVersion The saved state version.
3112 */
3113static int pgmR3LoadFinalLocked(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion)
3114{
3115 PPGM pPGM = &pVM->pgm.s;
3116 int rc;
3117
3118 /*
3119 * Load basic data (required / unaffected by relocation).
3120 */
3121#if defined(VBOX_VMM_TARGET_X86)
3122 uint32_t u32Sep;
3123
3124 if (uVersion >= PGM_SAVED_STATE_VERSION_3_0_0)
3125 {
3126 if (uVersion > PGM_SAVED_STATE_VERSION_PRE_BALLOON)
3127 rc = SSMR3GetStructEx(pSSM, pPGM, sizeof(*pPGM), 0 /*fFlags*/, &s_aPGMFields[0], NULL /*pvUser*/);
3128 else
3129 rc = SSMR3GetStructEx(pSSM, pPGM, sizeof(*pPGM), 0 /*fFlags*/, &s_aPGMFieldsPreBalloon[0], NULL /*pvUser*/);
3130
3131 AssertLogRelRCReturn(rc, rc);
3132
3133 for (VMCPUID i = 0; i < pVM->cCpus; i++)
3134 {
3135 if (uVersion > PGM_SAVED_STATE_VERSION_PRE_PAE)
3136 rc = SSMR3GetStruct(pSSM, &pVM->apCpusR3[i]->pgm.s, &s_aPGMCpuFields[0]);
3137 else
3138 rc = SSMR3GetStruct(pSSM, &pVM->apCpusR3[i]->pgm.s, &s_aPGMCpuFieldsPrePae[0]);
3139 AssertLogRelRCReturn(rc, rc);
3140 }
3141 }
3142 else if (uVersion >= PGM_SAVED_STATE_VERSION_RR_DESC)
3143 {
3144 AssertRelease(pVM->cCpus == 1);
3145
3146 PGMOLD pgmOld;
3147 rc = SSMR3GetStruct(pSSM, &pgmOld, &s_aPGMFields_Old[0]);
3148 AssertLogRelRCReturn(rc, rc);
3149
3150 PVMCPU pVCpu0 = pVM->apCpusR3[0];
3151 pVCpu0->pgm.s.fA20Enabled = pgmOld.fA20Enabled;
3152 pVCpu0->pgm.s.GCPhysA20Mask = pgmOld.GCPhysA20Mask;
3153 pVCpu0->pgm.s.enmGuestMode = pgmOld.enmGuestMode;
3154 }
3155 else
3156 {
3157 AssertRelease(pVM->cCpus == 1);
3158
3159 SSMR3Skip(pSSM, sizeof(bool));
3160 RTGCPTR GCPtrIgn;
3161 SSMR3GetGCPtr(pSSM, &GCPtrIgn);
3162 SSMR3Skip(pSSM, sizeof(uint32_t));
3163
3164 uint32_t cbRamSizeIgnored;
3165 rc = SSMR3GetU32(pSSM, &cbRamSizeIgnored);
3166 if (RT_FAILURE(rc))
3167 return rc;
3168 PVMCPU pVCpu0 = pVM->apCpusR3[0];
3169 SSMR3GetGCPhys(pSSM, &pVCpu0->pgm.s.GCPhysA20Mask);
3170
3171 uint32_t u32 = 0;
3172 SSMR3GetUInt(pSSM, &u32);
3173 pVCpu0->pgm.s.fA20Enabled = !!u32;
3174 SSMR3GetUInt(pSSM, &pVCpu0->pgm.s.fSyncFlags);
3175 RTUINT uGuestMode;
3176 SSMR3GetUInt(pSSM, &uGuestMode);
3177 pVCpu0->pgm.s.enmGuestMode = (PGMMODE)uGuestMode;
3178
3179 /* check separator. */
3180 SSMR3GetU32(pSSM, &u32Sep);
3181 if (RT_FAILURE(rc))
3182 return rc;
3183 if (u32Sep != (uint32_t)~0)
3184 {
3185 AssertMsgFailed(("u32Sep=%#x (first)\n", u32Sep));
3186 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
3187 }
3188 }
3189
3190 /*
3191 * Fix the A20 mask.
3192 */
3193 for (VMCPUID i = 0; i < pVM->cCpus; i++)
3194 {
3195 PVMCPU pVCpu = pVM->apCpusR3[i];
3196 pVCpu->pgm.s.GCPhysA20Mask = ~((RTGCPHYS)!pVCpu->pgm.s.fA20Enabled << 20);
3197# ifndef VBOX_WITH_ONLY_PGM_NEM_MODE
3198 pgmR3RefreshShadowModeAfterA20Change(pVCpu);
3199# endif
3200 }
3201
3202 /*
3203 * The guest mappings - skipped now, see re-fixation in the caller.
3204 */
3205 if (uVersion <= PGM_SAVED_STATE_VERSION_PRE_PAE)
3206 {
3207 for (uint32_t i = 0; ; i++)
3208 {
3209 rc = SSMR3GetU32(pSSM, &u32Sep); /* sequence number */
3210 if (RT_FAILURE(rc))
3211 return rc;
3212 if (u32Sep == ~0U)
3213 break;
3214 AssertMsgReturn(u32Sep == i, ("u32Sep=%#x i=%#x\n", u32Sep, i), VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
3215
3216 char szDesc[256];
3217 rc = SSMR3GetStrZ(pSSM, szDesc, sizeof(szDesc));
3218 if (RT_FAILURE(rc))
3219 return rc;
3220 RTGCPTR GCPtrIgnore;
3221 SSMR3GetGCPtr(pSSM, &GCPtrIgnore); /* GCPtr */
3222 rc = SSMR3GetGCPtr(pSSM, &GCPtrIgnore); /* cPTs */
3223 if (RT_FAILURE(rc))
3224 return rc;
3225 }
3226 }
3227
3228 /*
3229 * Load the RAM contents.
3230 */
3231 if (uVersion > PGM_SAVED_STATE_VERSION_3_0_0)
3232 {
3233 if (!pVM->pgm.s.LiveSave.fActive)
3234 {
3235 if (uVersion > PGM_SAVED_STATE_VERSION_NO_RAM_CFG)
3236 {
3237 rc = pgmR3LoadRamConfig(pVM, pSSM);
3238 if (RT_FAILURE(rc))
3239 return rc;
3240 }
3241 rc = pgmR3LoadRomRanges(pVM, pSSM);
3242 if (RT_FAILURE(rc))
3243 return rc;
3244 rc = pgmR3LoadMmio2Ranges(pVM, pSSM);
3245 if (RT_FAILURE(rc))
3246 return rc;
3247 }
3248
3249 rc = pgmR3LoadMemory(pVM, pSSM, uVersion, SSM_PASS_FINAL);
3250 }
3251 else
3252 rc = pgmR3LoadMemoryOld(pVM, pSSM, uVersion);
3253
3254#elif defined(VBOX_VMM_TARGET_ARMV8)
3255 rc = SSMR3GetStructEx(pSSM, pPGM, sizeof(*pPGM), 0 /*fFlags*/, &s_aPGMFields[0], NULL /*pvUser*/);
3256 AssertLogRelRCReturn(rc, rc);
3257
3258 for (VMCPUID i = 0; i < pVM->cCpus; i++)
3259 {
3260 rc = SSMR3GetStruct(pSSM, &pVM->apCpusR3[i]->pgm.s, &s_aPGMCpuFields[0]);
3261 AssertLogRelRCReturn(rc, rc);
3262 }
3263
3264 if (!pVM->pgm.s.LiveSave.fActive)
3265 {
3266 rc = pgmR3LoadRamConfig(pVM, pSSM);
3267 if (RT_FAILURE(rc))
3268 return rc;
3269 rc = pgmR3LoadRomRanges(pVM, pSSM);
3270 if (RT_FAILURE(rc))
3271 return rc;
3272 rc = pgmR3LoadMmio2Ranges(pVM, pSSM);
3273 if (RT_FAILURE(rc))
3274 return rc;
3275 }
3276
3277 rc = pgmR3LoadMemory(pVM, pSSM, uVersion, SSM_PASS_FINAL);
3278
3279#else
3280# error "Port me"
3281#endif /* VBOX_VMM_TARGET_X86 */
3282
3283#if defined(VBOX_WITH_R0_MODULES) && !defined(VBOX_WITH_MINIMAL_R0)
3284 /* Refresh balloon accounting. */
3285 if (pVM->pgm.s.cBalloonedPages)
3286 {
3287 Log(("pgmR3LoadFinalLocked: pVM=%p cBalloonedPages=%#x\n", pVM, pVM->pgm.s.cBalloonedPages));
3288 rc = GMMR3BalloonedPages(pVM, GMMBALLOONACTION_INFLATE, pVM->pgm.s.cBalloonedPages);
3289 AssertRCReturn(rc, rc);
3290 }
3291#endif
3292 return rc;
3293}
3294
3295
3296/**
3297 * @callback_method_impl{FNSSMINTLOADEXEC}
3298 */
3299static DECLCALLBACK(int) pgmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
3300{
3301 int rc;
3302
3303 /*
3304 * Validate version.
3305 */
3306#if defined(VBOX_VMM_TARGET_X86)
3307 if ( ( uPass != SSM_PASS_FINAL
3308 && uVersion != PGM_SAVED_STATE_VERSION
3309 && uVersion != PGM_SAVED_STATE_VERSION_PRE_PAE
3310 && uVersion != PGM_SAVED_STATE_VERSION_BALLOON_BROKEN
3311 && uVersion != PGM_SAVED_STATE_VERSION_PRE_BALLOON
3312 && uVersion != PGM_SAVED_STATE_VERSION_NO_RAM_CFG)
3313 || ( uVersion != PGM_SAVED_STATE_VERSION
3314 && uVersion != PGM_SAVED_STATE_VERSION_PRE_PAE
3315 && uVersion != PGM_SAVED_STATE_VERSION_BALLOON_BROKEN
3316 && uVersion != PGM_SAVED_STATE_VERSION_PRE_BALLOON
3317 && uVersion != PGM_SAVED_STATE_VERSION_NO_RAM_CFG
3318 && uVersion != PGM_SAVED_STATE_VERSION_3_0_0
3319 && uVersion != PGM_SAVED_STATE_VERSION_2_2_2
3320 && uVersion != PGM_SAVED_STATE_VERSION_RR_DESC
3321 && uVersion != PGM_SAVED_STATE_VERSION_OLD_PHYS_CODE)
3322 )
3323 {
3324 AssertMsgFailed(("pgmR3Load: Invalid version uVersion=%d (current %d)!\n", uVersion, PGM_SAVED_STATE_VERSION));
3325 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
3326 }
3327#elif defined(VBOX_VMM_TARGET_ARMV8)
3328 if (uVersion != PGM_SAVED_STATE_VERSION)
3329 {
3330 AssertMsgFailed(("pgmR3Load: Invalid version uVersion=%d (current %d)!\n", uVersion, PGM_SAVED_STATE_VERSION));
3331 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
3332 }
3333#else
3334# error "Port me"
3335#endif
3336
3337
3338 /*
3339 * Do the loading while owning the lock because a bunch of the functions
3340 * we're using requires this.
3341 */
3342 if (uPass != SSM_PASS_FINAL)
3343 {
3344 PGM_LOCK_VOID(pVM);
3345 if (uPass != 0)
3346 rc = pgmR3LoadMemory(pVM, pSSM, uVersion, uPass);
3347 else
3348 {
3349 pVM->pgm.s.LiveSave.fActive = true;
3350 rc = pgmR3LoadRamConfig(pVM, pSSM);
3351 if (RT_SUCCESS(rc))
3352 rc = pgmR3LoadRomRanges(pVM, pSSM);
3353 if (RT_SUCCESS(rc))
3354 rc = pgmR3LoadMmio2Ranges(pVM, pSSM);
3355 if (RT_SUCCESS(rc))
3356 rc = pgmR3LoadMemory(pVM, pSSM, uVersion, uPass);
3357 }
3358 PGM_UNLOCK(pVM);
3359 }
3360 else
3361 {
3362 PGM_LOCK_VOID(pVM);
3363 rc = pgmR3LoadFinalLocked(pVM, pSSM, uVersion);
3364 pVM->pgm.s.LiveSave.fActive = false;
3365 PGM_UNLOCK(pVM);
3366 if (RT_SUCCESS(rc))
3367 {
3368#if defined(VBOX_VMM_TARGET_X86)
3369 /*
3370 * We require a full resync now.
3371 */
3372 for (VMCPUID i = 0; i < pVM->cCpus; i++)
3373 {
3374 PVMCPU pVCpu = pVM->apCpusR3[i];
3375 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
3376 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
3377 /** @todo For guest PAE, we might get the wrong
3378 * aGCPhysGstPaePDs values now. We should used the
3379 * saved ones... Postponing this since it nothing new
3380 * and PAE/PDPTR needs some general readjusting, see
3381 * @bugref{5880}. */
3382 }
3383#endif
3384
3385 pgmR3HandlerPhysicalUpdateAll(pVM);
3386
3387 /*
3388 * Change the paging mode (indirectly restores PGMCPU::GCPhysCR3).
3389 * (Requires the CPUM state to be restored already!)
3390 */
3391 if (CPUMR3IsStateRestorePending(pVM))
3392 return SSMR3SetLoadError(pSSM, VERR_WRONG_ORDER, RT_SRC_POS,
3393 N_("PGM was unexpectedly restored before CPUM"));
3394
3395 for (VMCPUID i = 0; i < pVM->cCpus; i++)
3396 {
3397 PVMCPU pVCpu = pVM->apCpusR3[i];
3398
3399#ifdef VBOX_VMM_TARGET_X86
3400 /** @todo ARM VMs may have an invalid value here, since PGMMODE_NONE was
3401 * moved from 12 to 31. Thus far, though, this is a complete NOOP on
3402 * ARM and we still have very limited PGM functionality there (the
3403 * saved state is mostly X86-isms). */
3404 rc = PGMHCChangeMode(pVM, pVCpu, pVCpu->pgm.s.enmGuestMode, false /* fForce */);
3405 AssertLogRelRCReturn(rc, rc);
3406
3407 /* Update the PSE, NX flags and validity masks. */
3408 pVCpu->pgm.s.fGst32BitPageSizeExtension = CPUMIsGuestPageSizeExtEnabled(pVCpu);
3409 PGMNotifyNxeChanged(pVCpu, CPUMIsGuestNXEnabled(pVCpu));
3410#elif defined(VBOX_VMM_TARGET_ARMV8)
3411 RT_NOREF(pVCpu); /** @todo */
3412#else
3413# error "Port me"
3414#endif
3415 }
3416 }
3417 }
3418
3419 return rc;
3420}
3421
3422
3423/**
3424 * @callback_method_impl{FNSSMINTLOADDONE}
3425 */
3426static DECLCALLBACK(int) pgmR3LoadDone(PVM pVM, PSSMHANDLE pSSM)
3427{
3428 pVM->pgm.s.fRestoreRomPagesOnReset = true;
3429 NOREF(pSSM);
3430 return VINF_SUCCESS;
3431}
3432
3433
3434/**
3435 * Registers the saved state callbacks with SSM.
3436 *
3437 * @returns VBox status code.
3438 * @param pVM The cross context VM structure.
3439 * @param cbRam The RAM size.
3440 */
3441int pgmR3InitSavedState(PVM pVM, uint64_t cbRam)
3442{
3443 return SSMR3RegisterInternal(pVM, "pgm", 1, PGM_SAVED_STATE_VERSION, (size_t)cbRam + sizeof(PGM),
3444 pgmR3LivePrep, pgmR3LiveExec, pgmR3LiveVote,
3445 NULL, pgmR3SaveExec, pgmR3SaveDone,
3446 pgmR3LoadPrep, pgmR3Load, pgmR3LoadDone);
3447}
3448
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette