VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/PGM-armv8.cpp@ 107171

Last change on this file since 107171 was 107171, checked in by vboxsync, 5 months ago

VMM/PGM: Introducing VBOX_WITH_ONLY_PGM_NEM_MODE to disable lots unused code on *.arm64 and darwin. jiraref:VBP-1466

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 32.4 KB
Line 
1/* $Id: PGM-armv8.cpp 107171 2024-11-28 10:38:10Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, ARMv8 variant. (Mixing stuff here, not good?)
4 */
5
6/*
7 * Copyright (C) 2023-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/** @page pg_pgm_armv8 PGM - The Page Manager and Monitor (ARMv8 variant)
30 *
31 * For now this is just a stub for bringing up the ARMv8 hypervisor. We'll see how
32 * much we really need here later on and whether it makes sense to merge this with the original PGM.cpp
33 * (avoiding \#ifdef hell for with this as I'm not confident enough to fiddle around with PGM too much at this point).
34 */
35
36
37/*********************************************************************************************************************************
38* Header Files *
39*********************************************************************************************************************************/
40#define LOG_GROUP LOG_GROUP_PGM
41#define VBOX_WITHOUT_PAGING_BIT_FIELDS /* 64-bit bitfields are just asking for trouble. See @bugref{9841} and others. */
42#include <VBox/vmm/dbgf.h>
43#include <VBox/vmm/pgm.h>
44#include <VBox/vmm/cpum.h>
45#include <VBox/vmm/cpum-armv8.h>
46#include <VBox/vmm/iom.h>
47#include <VBox/sup.h>
48#include <VBox/vmm/mm.h>
49#include <VBox/vmm/em.h>
50#include <VBox/vmm/stam.h>
51#include <VBox/vmm/selm.h>
52#include <VBox/vmm/ssm.h>
53#include <VBox/vmm/hm.h>
54#include "PGMInternal.h"
55#include <VBox/vmm/vmcc.h>
56#include <VBox/vmm/uvm.h>
57#include "PGMInline.h"
58
59#include <VBox/dbg.h>
60#include <VBox/param.h>
61#include <VBox/err.h>
62
63#include <iprt/asm.h>
64#include <iprt/assert.h>
65#include <iprt/env.h>
66#include <iprt/file.h>
67#include <iprt/mem.h>
68#include <iprt/rand.h>
69#include <iprt/string.h>
70#include <iprt/thread.h>
71
72
73/*********************************************************************************************************************************
74* Internal Functions *
75*********************************************************************************************************************************/
76#ifdef VBOX_STRICT
77static FNVMATSTATE pgmR3ResetNoMorePhysWritesFlag;
78#endif
79
80
81/*********************************************************************************************************************************
82* Global Variables *
83*********************************************************************************************************************************/
84#ifndef VBOX_WITH_PGM_NEM_MODE
85# error "This requires VBOX_WITH_PGM_NEM_MODE to be set at all times!"
86#endif
87
88/**
89 * Interface that NEM uses to switch PGM into simplified memory managment mode.
90 *
91 * This call occurs before PGMR3Init.
92 *
93 * @param pVM The cross context VM structure.
94 */
95VMMR3_INT_DECL(void) PGMR3EnableNemMode(PVM pVM)
96{
97 AssertFatal(!PDMCritSectIsInitialized(&pVM->pgm.s.CritSectX));
98#ifndef VBOX_WITH_ONLY_PGM_NEM_MODE
99 pVM->pgm.s.fNemMode = true;
100#endif
101}
102
103
104/**
105 * Checks whether the simplificed memory management mode for NEM is enabled.
106 *
107 * @returns true if enabled, false if not.
108 * @param pVM The cross context VM structure.
109 */
110VMMR3_INT_DECL(bool) PGMR3IsNemModeEnabled(PVM pVM)
111{
112 RT_NOREF(pVM);
113 return PGM_IS_IN_NEM_MODE(pVM);
114}
115
116
117/**
118 * Initiates the paging of VM.
119 *
120 * @returns VBox status code.
121 * @param pVM The cross context VM structure.
122 */
123VMMR3DECL(int) PGMR3Init(PVM pVM)
124{
125 LogFlow(("PGMR3Init:\n"));
126
127 /*
128 * Assert alignment and sizes.
129 */
130 AssertCompile(sizeof(pVM->pgm.s) <= sizeof(pVM->pgm.padding));
131 AssertCompile(sizeof(pVM->apCpusR3[0]->pgm.s) <= sizeof(pVM->apCpusR3[0]->pgm.padding));
132 AssertCompileMemberAlignment(PGM, CritSectX, sizeof(uintptr_t));
133
134 bool const fDriverless = SUPR3IsDriverless();
135
136 /*
137 * Init the structure.
138 */
139 /*pVM->pgm.s.fRestoreRomPagesAtReset = false;*/
140
141 /* We always use the simplified memory mode on arm. */
142#ifndef VBOX_WITH_ONLY_PGM_NEM_MODE
143 pVM->pgm.s.fNemMode = true;
144#endif
145
146 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.aHandyPages); i++)
147 {
148 pVM->pgm.s.aHandyPages[i].HCPhysGCPhys = NIL_GMMPAGEDESC_PHYS;
149 pVM->pgm.s.aHandyPages[i].fZeroed = false;
150 pVM->pgm.s.aHandyPages[i].idPage = NIL_GMM_PAGEID;
151 pVM->pgm.s.aHandyPages[i].idSharedPage = NIL_GMM_PAGEID;
152 }
153
154 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.aLargeHandyPage); i++)
155 {
156 pVM->pgm.s.aLargeHandyPage[i].HCPhysGCPhys = NIL_GMMPAGEDESC_PHYS;
157 pVM->pgm.s.aLargeHandyPage[i].fZeroed = false;
158 pVM->pgm.s.aLargeHandyPage[i].idPage = NIL_GMM_PAGEID;
159 pVM->pgm.s.aLargeHandyPage[i].idSharedPage = NIL_GMM_PAGEID;
160 }
161
162 AssertReleaseReturn(pVM->pgm.s.cPhysHandlerTypes == 0, VERR_WRONG_ORDER);
163 for (size_t i = 0; i < RT_ELEMENTS(pVM->pgm.s.aPhysHandlerTypes); i++)
164 {
165#if defined(VBOX_WITH_R0_MODULES) && !defined(VBOX_WITH_MINIMAL_R0)
166 if (fDriverless)
167#endif
168 pVM->pgm.s.aPhysHandlerTypes[i].hType = i | (RTRandU64() & ~(uint64_t)PGMPHYSHANDLERTYPE_IDX_MASK);
169 pVM->pgm.s.aPhysHandlerTypes[i].enmKind = PGMPHYSHANDLERKIND_INVALID;
170 pVM->pgm.s.aPhysHandlerTypes[i].pfnHandler = pgmR3HandlerPhysicalHandlerInvalid;
171 }
172
173#if 0
174 /* Init the per-CPU part. */
175 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
176 {
177 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
178 PPGMCPU pPGM = &pVCpu->pgm.s;
179 }
180#endif
181
182 /*
183 * Read the configuration.
184 */
185 PCFGMNODE const pCfgPGM = CFGMR3GetChild(CFGMR3GetRoot(pVM), "/PGM");
186
187 int rc = CFGMR3QueryBoolDef(CFGMR3GetRoot(pVM), "RamPreAlloc", &pVM->pgm.s.fRamPreAlloc,
188#ifdef VBOX_WITH_PREALLOC_RAM_BY_DEFAULT
189 true
190#else
191 false
192#endif
193 );
194 AssertLogRelRCReturn(rc, rc);
195
196 rc = CFGMR3QueryU32Def(pCfgPGM, "MaxRing3Chunks", &pVM->pgm.s.ChunkR3Map.cMax, UINT32_MAX);
197 AssertLogRelRCReturn(rc, rc);
198 for (uint32_t i = 0; i < RT_ELEMENTS(pVM->pgm.s.ChunkR3Map.Tlb.aEntries); i++)
199 pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].idChunk = NIL_GMM_CHUNKID;
200
201 /*
202 * Get the configured RAM size - to estimate saved state size.
203 */
204 uint64_t cbRam;
205 rc = CFGMR3QueryU64(CFGMR3GetRoot(pVM), "RamSize", &cbRam);
206 if (rc == VERR_CFGM_VALUE_NOT_FOUND)
207 cbRam = 0;
208 else if (RT_SUCCESS(rc))
209 {
210 if (cbRam < GUEST_PAGE_SIZE)
211 cbRam = 0;
212 cbRam = RT_ALIGN_64(cbRam, GUEST_PAGE_SIZE);
213 }
214 else
215 {
216 AssertMsgFailed(("Configuration error: Failed to query integer \"RamSize\", rc=%Rrc.\n", rc));
217 return rc;
218 }
219
220 /** @cfgm{/PGM/ZeroRamPagesOnReset, boolean, true}
221 * Whether to clear RAM pages on (hard) reset. */
222 rc = CFGMR3QueryBoolDef(pCfgPGM, "ZeroRamPagesOnReset", &pVM->pgm.s.fZeroRamPagesOnReset, true);
223 AssertLogRelRCReturn(rc, rc);
224
225 /*
226 * Register callbacks, string formatters and the saved state data unit.
227 */
228#ifdef VBOX_STRICT
229 VMR3AtStateRegister(pVM->pUVM, pgmR3ResetNoMorePhysWritesFlag, NULL);
230#endif
231 PGMRegisterStringFormatTypes();
232
233 rc = pgmR3InitSavedState(pVM, cbRam);
234 if (RT_FAILURE(rc))
235 return rc;
236
237 /*
238 * Initialize the PGM critical section and flush the phys TLBs
239 */
240 rc = PDMR3CritSectInit(pVM, &pVM->pgm.s.CritSectX, RT_SRC_POS, "PGM");
241 AssertRCReturn(rc, rc);
242
243 pgmR3PhysChunkInvalidateTLB(pVM, false /*fInRendezvous*/); /* includes pgmPhysInvalidatePageMapTLB call */
244
245 /*
246 * For the time being we sport a full set of handy pages in addition to the base
247 * memory to simplify things.
248 */
249 rc = MMR3ReserveHandyPages(pVM, RT_ELEMENTS(pVM->pgm.s.aHandyPages)); /** @todo this should be changed to PGM_HANDY_PAGES_MIN but this needs proper testing... */
250 AssertRCReturn(rc, rc);
251
252 /*
253 * Setup the zero page (HCPHysZeroPg is set by ring-0).
254 */
255 RT_ZERO(pVM->pgm.s.abZeroPg); /* paranoia */
256#ifndef VBOX_WITH_ONLY_PGM_NEM_MODE
257 if (fDriverless)
258 pVM->pgm.s.HCPhysZeroPg = _4G - GUEST_PAGE_SIZE * 2 /* fake to avoid PGM_PAGE_INIT_ZERO assertion */;
259 AssertRelease(pVM->pgm.s.HCPhysZeroPg != NIL_RTHCPHYS);
260 AssertRelease(pVM->pgm.s.HCPhysZeroPg != 0);
261#endif
262
263 /*
264 * Setup the invalid MMIO page (HCPhysMmioPg is set by ring-0).
265 * (The invalid bits in HCPhysInvMmioPg are set later on init complete.)
266 */
267 ASMMemFill32(pVM->pgm.s.abMmioPg, sizeof(pVM->pgm.s.abMmioPg), 0xfeedface);
268#ifndef VBOX_WITH_ONLY_PGM_NEM_MODE
269 if (fDriverless)
270 pVM->pgm.s.HCPhysMmioPg = _4G - GUEST_PAGE_SIZE * 3 /* fake to avoid PGM_PAGE_INIT_ZERO assertion */;
271 AssertRelease(pVM->pgm.s.HCPhysMmioPg != NIL_RTHCPHYS);
272 AssertRelease(pVM->pgm.s.HCPhysMmioPg != 0);
273 pVM->pgm.s.HCPhysInvMmioPg = pVM->pgm.s.HCPhysMmioPg;
274#endif
275
276 /*
277 * Initialize physical access handlers.
278 */
279 /** @cfgm{/PGM/MaxPhysicalAccessHandlers, uint32_t, 32, 65536, 6144}
280 * Number of physical access handlers allowed (subject to rounding). This is
281 * managed as one time allocation during initializations. The default is
282 * lower for a driverless setup. */
283 /** @todo can lower it for nested paging too, at least when there is no
284 * nested guest involved. */
285 uint32_t cAccessHandlers = 0;
286 rc = CFGMR3QueryU32Def(pCfgPGM, "MaxPhysicalAccessHandlers", &cAccessHandlers, !fDriverless ? 6144 : 640);
287 AssertLogRelRCReturn(rc, rc);
288 AssertLogRelMsgStmt(cAccessHandlers >= 32, ("cAccessHandlers=%#x, min 32\n", cAccessHandlers), cAccessHandlers = 32);
289 AssertLogRelMsgStmt(cAccessHandlers <= _64K, ("cAccessHandlers=%#x, max 65536\n", cAccessHandlers), cAccessHandlers = _64K);
290#if defined(VBOX_WITH_R0_MODULES) && !defined(VBOX_WITH_MINIMAL_R0)
291 if (!fDriverless)
292 {
293 rc = VMMR3CallR0(pVM, VMMR0_DO_PGM_PHYS_HANDLER_INIT, cAccessHandlers, NULL);
294 AssertRCReturn(rc, rc);
295 AssertPtr(pVM->pgm.s.pPhysHandlerTree);
296 AssertPtr(pVM->pgm.s.PhysHandlerAllocator.m_paNodes);
297 AssertPtr(pVM->pgm.s.PhysHandlerAllocator.m_pbmAlloc);
298 }
299 else
300#endif
301 {
302 uint32_t cbTreeAndBitmap = 0;
303 uint32_t const cbTotalAligned = pgmHandlerPhysicalCalcTableSizes(&cAccessHandlers, &cbTreeAndBitmap);
304 uint8_t *pb = NULL;
305 rc = SUPR3PageAlloc(cbTotalAligned >> HOST_PAGE_SHIFT, 0, (void **)&pb);
306 AssertLogRelRCReturn(rc, rc);
307
308 pVM->pgm.s.PhysHandlerAllocator.initSlabAllocator(cAccessHandlers, (PPGMPHYSHANDLER)&pb[cbTreeAndBitmap],
309 (uint64_t *)&pb[sizeof(PGMPHYSHANDLERTREE)]);
310 pVM->pgm.s.pPhysHandlerTree = (PPGMPHYSHANDLERTREE)pb;
311 pVM->pgm.s.pPhysHandlerTree->initWithAllocator(&pVM->pgm.s.PhysHandlerAllocator);
312 }
313
314 /*
315 * Register the physical access handler protecting ROMs.
316 */
317 if (RT_SUCCESS(rc))
318 /** @todo why isn't pgmPhysRomWriteHandler registered for ring-0? */
319 rc = PGMR3HandlerPhysicalTypeRegister(pVM, PGMPHYSHANDLERKIND_WRITE, 0 /*fFlags*/, pgmPhysRomWriteHandler,
320 "ROM write protection", &pVM->pgm.s.hRomPhysHandlerType);
321
322 /*
323 * Register the physical access handler doing dirty MMIO2 tracing.
324 */
325 if (RT_SUCCESS(rc))
326 rc = PGMR3HandlerPhysicalTypeRegister(pVM, PGMPHYSHANDLERKIND_WRITE, PGMPHYSHANDLER_F_KEEP_PGM_LOCK,
327 pgmPhysMmio2WriteHandler, "MMIO2 dirty page tracing",
328 &pVM->pgm.s.hMmio2DirtyPhysHandlerType);
329
330 if (RT_SUCCESS(rc))
331 return VINF_SUCCESS;
332
333 /* Almost no cleanup necessary, MM frees all memory. */
334 PDMR3CritSectDelete(pVM, &pVM->pgm.s.CritSectX);
335
336 return rc;
337}
338
339
340/**
341 * Ring-3 init finalizing (not required here).
342 *
343 * @returns VBox status code.
344 * @param pVM The cross context VM structure.
345 */
346VMMR3DECL(int) PGMR3InitFinalize(PVM pVM)
347{
348 RT_NOREF(pVM);
349 int rc = VINF_SUCCESS;
350#ifndef VBOX_WITH_ONLY_PGM_NEM_MODE
351 if (pVM->pgm.s.fRamPreAlloc)
352 rc = pgmR3PhysRamPreAllocate(pVM);
353#endif
354
355 //pgmLogState(pVM);
356 LogRel(("PGM: PGMR3InitFinalize done: %Rrc\n", rc));
357 return rc;
358}
359
360
361/**
362 * Init phase completed callback.
363 *
364 * @returns VBox status code.
365 * @param pVM The cross context VM structure.
366 * @param enmWhat What has been completed.
367 * @thread EMT(0)
368 */
369VMMR3_INT_DECL(int) PGMR3InitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
370{
371 switch (enmWhat)
372 {
373 case VMINITCOMPLETED_HM:
374 AssertLogRelReturn(!pVM->pgm.s.fPciPassthrough, VERR_PGM_PCI_PASSTHRU_MISCONFIG);
375 break;
376
377 default:
378 /* shut up gcc */
379 break;
380 }
381
382 return VINF_SUCCESS;
383}
384
385
386/**
387 * Applies relocations to data and code managed by this component.
388 *
389 * This function will be called at init and whenever the VMM need to relocate it
390 * self inside the GC.
391 *
392 * @param pVM The cross context VM structure.
393 * @param offDelta Relocation delta relative to old location.
394 */
395VMMR3DECL(void) PGMR3Relocate(PVM pVM, RTGCINTPTR offDelta)
396{
397 LogFlow(("PGMR3Relocate: offDelta=%RGv\n", offDelta));
398 RT_NOREF(pVM, offDelta);
399}
400
401
402/**
403 * Resets a virtual CPU when unplugged.
404 *
405 * @param pVM The cross context VM structure.
406 * @param pVCpu The cross context virtual CPU structure.
407 */
408VMMR3DECL(void) PGMR3ResetCpu(PVM pVM, PVMCPU pVCpu)
409{
410 RT_NOREF(pVM, pVCpu);
411}
412
413
414/**
415 * The VM is being reset.
416 *
417 * For the PGM component this means that any PD write monitors
418 * needs to be removed.
419 *
420 * @param pVM The cross context VM structure.
421 */
422VMMR3_INT_DECL(void) PGMR3Reset(PVM pVM)
423{
424 LogFlow(("PGMR3Reset:\n"));
425 VM_ASSERT_EMT(pVM);
426
427 PGM_LOCK_VOID(pVM);
428
429#ifdef DEBUG
430 DBGFR3_INFO_LOG_SAFE(pVM, "mappings", NULL);
431 DBGFR3_INFO_LOG_SAFE(pVM, "handlers", "all nostat");
432#endif
433
434 //pgmLogState(pVM);
435 PGM_UNLOCK(pVM);
436}
437
438
439/**
440 * Memory setup after VM construction or reset.
441 *
442 * @param pVM The cross context VM structure.
443 * @param fAtReset Indicates the context, after reset if @c true or after
444 * construction if @c false.
445 */
446VMMR3_INT_DECL(void) PGMR3MemSetup(PVM pVM, bool fAtReset)
447{
448 if (fAtReset)
449 {
450 PGM_LOCK_VOID(pVM);
451
452 int rc = pgmR3PhysRamZeroAll(pVM);
453 AssertReleaseRC(rc);
454
455 rc = pgmR3PhysRomReset(pVM);
456 AssertReleaseRC(rc);
457
458 PGM_UNLOCK(pVM);
459 }
460}
461
462
463#ifdef VBOX_STRICT
464/**
465 * VM state change callback for clearing fNoMorePhysWrites after
466 * a snapshot has been created.
467 */
468static DECLCALLBACK(void) pgmR3ResetNoMorePhysWritesFlag(PUVM pUVM, PCVMMR3VTABLE pVMM, VMSTATE enmState,
469 VMSTATE enmOldState, void *pvUser)
470{
471 if ( enmState == VMSTATE_RUNNING
472 || enmState == VMSTATE_RESUMING)
473 pUVM->pVM->pgm.s.fNoMorePhysWrites = false;
474 RT_NOREF(pVMM, enmOldState, pvUser);
475}
476#endif
477
478/**
479 * Private API to reset fNoMorePhysWrites.
480 */
481VMMR3_INT_DECL(void) PGMR3ResetNoMorePhysWritesFlag(PVM pVM)
482{
483 pVM->pgm.s.fNoMorePhysWrites = false;
484}
485
486/**
487 * Terminates the PGM.
488 *
489 * @returns VBox status code.
490 * @param pVM The cross context VM structure.
491 */
492VMMR3DECL(int) PGMR3Term(PVM pVM)
493{
494 /* Must free shared pages here. */
495 PGM_LOCK_VOID(pVM);
496 pgmR3PhysRamTerm(pVM);
497 pgmR3PhysRomTerm(pVM);
498 PGM_UNLOCK(pVM);
499
500 PGMDeregisterStringFormatTypes();
501 return PDMR3CritSectDelete(pVM, &pVM->pgm.s.CritSectX);
502}
503
504
505/**
506 * Perform an integrity check on the PGM component.
507 *
508 * @returns VINF_SUCCESS if everything is fine.
509 * @returns VBox error status after asserting on integrity breach.
510 * @param pVM The cross context VM structure.
511 */
512VMMR3DECL(int) PGMR3CheckIntegrity(PVM pVM)
513{
514 RT_NOREF(pVM);
515 return VINF_SUCCESS;
516}
517
518
519VMMDECL(bool) PGMHasDirtyPages(PVM pVM)
520{
521#ifndef VBOX_WITH_ONLY_PGM_NEM_MODE
522 return pVM->pgm.s.CTX_SUFF(pPool)->cDirtyPages != 0;
523#else
524 RT_NOREF(pVM);
525 return false;
526#endif
527}
528
529
530VMMDECL(bool) PGMIsLockOwner(PVMCC pVM)
531{
532 return PDMCritSectIsOwner(pVM, &pVM->pgm.s.CritSectX);
533}
534
535
536VMMDECL(int) PGMSetLargePageUsage(PVMCC pVM, bool fUseLargePages)
537{
538 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
539
540 pVM->pgm.s.fUseLargePages = fUseLargePages;
541 return VINF_SUCCESS;
542}
543
544
545#if defined(VBOX_STRICT) || defined(DOXYGEN_RUNNING)
546int pgmLockDebug(PVMCC pVM, bool fVoid, RT_SRC_POS_DECL)
547#else
548int pgmLock(PVMCC pVM, bool fVoid)
549#endif
550{
551#if defined(VBOX_STRICT)
552 int rc = PDMCritSectEnterDebug(pVM, &pVM->pgm.s.CritSectX, VINF_SUCCESS, (uintptr_t)ASMReturnAddress(), RT_SRC_POS_ARGS);
553#else
554 int rc = PDMCritSectEnter(pVM, &pVM->pgm.s.CritSectX, VINF_SUCCESS);
555#endif
556 if (RT_SUCCESS(rc))
557 return rc;
558 if (fVoid)
559 PDM_CRITSECT_RELEASE_ASSERT_RC(pVM, &pVM->pgm.s.CritSectX, rc);
560 else
561 AssertRC(rc);
562 return rc;
563}
564
565
566void pgmUnlock(PVMCC pVM)
567{
568 uint32_t cDeprecatedPageLocks = pVM->pgm.s.cDeprecatedPageLocks;
569 pVM->pgm.s.cDeprecatedPageLocks = 0;
570 int rc = PDMCritSectLeave(pVM, &pVM->pgm.s.CritSectX);
571 if (rc == VINF_SEM_NESTED)
572 pVM->pgm.s.cDeprecatedPageLocks = cDeprecatedPageLocks;
573}
574
575
576#if !defined(IN_R0) || defined(LOG_ENABLED)
577
578/** Format handler for PGMPAGE.
579 * @copydoc FNRTSTRFORMATTYPE */
580static DECLCALLBACK(size_t) pgmFormatTypeHandlerPage(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
581 const char *pszType, void const *pvValue,
582 int cchWidth, int cchPrecision, unsigned fFlags,
583 void *pvUser)
584{
585 size_t cch;
586 PCPGMPAGE pPage = (PCPGMPAGE)pvValue;
587 if (RT_VALID_PTR(pPage))
588 {
589 char szTmp[64+80];
590
591 cch = 0;
592
593 /* The single char state stuff. */
594 static const char s_achPageStates[4] = { 'Z', 'A', 'W', 'S' };
595 szTmp[cch++] = s_achPageStates[PGM_PAGE_GET_STATE_NA(pPage)];
596
597# define IS_PART_INCLUDED(lvl) ( !(fFlags & RTSTR_F_PRECISION) || cchPrecision == (lvl) || cchPrecision >= (lvl)+10 )
598 if (IS_PART_INCLUDED(5))
599 {
600 static const char s_achHandlerStates[4*2] = { '-', 't', 'w', 'a' , '_', 'T', 'W', 'A' };
601 szTmp[cch++] = s_achHandlerStates[ PGM_PAGE_GET_HNDL_PHYS_STATE(pPage)
602 | ((uint8_t)PGM_PAGE_IS_HNDL_PHYS_NOT_IN_HM(pPage) << 2)];
603 }
604
605 /* The type. */
606 if (IS_PART_INCLUDED(4))
607 {
608 szTmp[cch++] = ':';
609 static const char s_achPageTypes[8][4] = { "INV", "RAM", "MI2", "M2A", "SHA", "ROM", "MIO", "BAD" };
610 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][0];
611 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][1];
612 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][2];
613 }
614
615 /* The numbers. */
616 if (IS_PART_INCLUDED(3))
617 {
618 szTmp[cch++] = ':';
619 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_HCPHYS_NA(pPage), 16, 12, 0, RTSTR_F_ZEROPAD | RTSTR_F_64BIT);
620 }
621
622 if (IS_PART_INCLUDED(2))
623 {
624 szTmp[cch++] = ':';
625 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_PAGEID(pPage), 16, 7, 0, RTSTR_F_ZEROPAD | RTSTR_F_32BIT);
626 }
627
628 if (IS_PART_INCLUDED(6))
629 {
630 szTmp[cch++] = ':';
631 static const char s_achRefs[4] = { '-', 'U', '!', 'L' };
632 szTmp[cch++] = s_achRefs[PGM_PAGE_GET_TD_CREFS_NA(pPage)];
633 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_TD_IDX_NA(pPage), 16, 4, 0, RTSTR_F_ZEROPAD | RTSTR_F_16BIT);
634 }
635# undef IS_PART_INCLUDED
636
637 cch = pfnOutput(pvArgOutput, szTmp, cch);
638 }
639 else
640 cch = pfnOutput(pvArgOutput, RT_STR_TUPLE("<bad-pgmpage-ptr>"));
641 NOREF(pszType); NOREF(cchWidth); NOREF(pvUser);
642 return cch;
643}
644
645
646/** Format handler for PGMRAMRANGE.
647 * @copydoc FNRTSTRFORMATTYPE */
648static DECLCALLBACK(size_t) pgmFormatTypeHandlerRamRange(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
649 const char *pszType, void const *pvValue,
650 int cchWidth, int cchPrecision, unsigned fFlags,
651 void *pvUser)
652{
653 size_t cch;
654 PGMRAMRANGE const *pRam = (PGMRAMRANGE const *)pvValue;
655 if (RT_VALID_PTR(pRam))
656 {
657 char szTmp[80];
658 cch = RTStrPrintf(szTmp, sizeof(szTmp), "%RGp-%RGp", pRam->GCPhys, pRam->GCPhysLast);
659 cch = pfnOutput(pvArgOutput, szTmp, cch);
660 }
661 else
662 cch = pfnOutput(pvArgOutput, RT_STR_TUPLE("<bad-pgmramrange-ptr>"));
663 NOREF(pszType); NOREF(cchWidth); NOREF(cchPrecision); NOREF(pvUser); NOREF(fFlags);
664 return cch;
665}
666
667/** Format type andlers to be registered/deregistered. */
668static const struct
669{
670 char szType[24];
671 PFNRTSTRFORMATTYPE pfnHandler;
672} g_aPgmFormatTypes[] =
673{
674 { "pgmpage", pgmFormatTypeHandlerPage },
675 { "pgmramrange", pgmFormatTypeHandlerRamRange }
676};
677
678#endif /* !IN_R0 || LOG_ENABLED */
679
680
681VMMDECL(int) PGMRegisterStringFormatTypes(void)
682{
683#if !defined(IN_R0) || defined(LOG_ENABLED)
684 int rc = VINF_SUCCESS;
685 unsigned i;
686 for (i = 0; RT_SUCCESS(rc) && i < RT_ELEMENTS(g_aPgmFormatTypes); i++)
687 {
688 rc = RTStrFormatTypeRegister(g_aPgmFormatTypes[i].szType, g_aPgmFormatTypes[i].pfnHandler, NULL);
689# ifdef IN_RING0
690 if (rc == VERR_ALREADY_EXISTS)
691 {
692 /* in case of cleanup failure in ring-0 */
693 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
694 rc = RTStrFormatTypeRegister(g_aPgmFormatTypes[i].szType, g_aPgmFormatTypes[i].pfnHandler, NULL);
695 }
696# endif
697 }
698 if (RT_FAILURE(rc))
699 while (i-- > 0)
700 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
701
702 return rc;
703#else
704 return VINF_SUCCESS;
705#endif
706}
707
708
709VMMDECL(void) PGMDeregisterStringFormatTypes(void)
710{
711#if !defined(IN_R0) || defined(LOG_ENABLED)
712 for (unsigned i = 0; i < RT_ELEMENTS(g_aPgmFormatTypes); i++)
713 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
714#endif
715}
716
717
718VMMDECL(int) PGMGstModifyPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
719{
720 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,GstModifyPage), a);
721 VMCPU_ASSERT_EMT(pVCpu);
722
723 /*
724 * Validate input.
725 */
726 Assert(cb);
727
728 LogFlow(("PGMGstModifyPage %RGv %d bytes fFlags=%08llx fMask=%08llx\n", GCPtr, cb, fFlags, fMask));
729 RT_NOREF(pVCpu, GCPtr, cb, fFlags, fMask);
730
731 AssertReleaseFailed();
732 return VERR_NOT_IMPLEMENTED;
733}
734
735
736VMMDECL(PGMMODE) PGMGetGuestMode(PVMCPU pVCpu)
737{
738 VMCPU_ASSERT_EMT(pVCpu);
739
740 bool fMmuEnabled = CPUMGetGuestMmuEnabled(pVCpu);
741 if (!fMmuEnabled)
742 return PGMMODE_NONE;
743
744 CPUMMODE enmCpuMode = CPUMGetGuestMode(pVCpu);
745 return enmCpuMode == CPUMMODE_ARMV8_AARCH64
746 ? PGMMODE_VMSA_V8_64
747 : PGMMODE_VMSA_V8_32;
748}
749
750
751VMMDECL(PGMMODE) PGMGetShadowMode(PVMCPU pVCpu)
752{
753 RT_NOREF(pVCpu);
754 return PGMMODE_NONE; /* NEM doesn't need any shadow paging. */
755}
756
757
758DECLINLINE(int) pgmGstWalkReturnNotPresent(PVMCPUCC pVCpu, PPGMPTWALK pWalk, uint8_t uLevel)
759{
760 NOREF(pVCpu);
761 pWalk->fNotPresent = true;
762 pWalk->uLevel = uLevel;
763 pWalk->fFailed = PGM_WALKFAIL_NOT_PRESENT
764 | ((uint32_t)uLevel << PGM_WALKFAIL_LEVEL_SHIFT);
765 return VERR_PAGE_TABLE_NOT_PRESENT;
766}
767
768DECLINLINE(int) pgmGstWalkReturnBadPhysAddr(PVMCPUCC pVCpu, PPGMPTWALK pWalk, uint8_t uLevel, int rc)
769{
770 AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc)); NOREF(rc); NOREF(pVCpu);
771 pWalk->fBadPhysAddr = true;
772 pWalk->uLevel = uLevel;
773 pWalk->fFailed = PGM_WALKFAIL_BAD_PHYSICAL_ADDRESS
774 | ((uint32_t)uLevel << PGM_WALKFAIL_LEVEL_SHIFT);
775 return VERR_PAGE_TABLE_NOT_PRESENT;
776}
777
778
779DECLINLINE(int) pgmGstWalkReturnRsvdError(PVMCPUCC pVCpu, PPGMPTWALK pWalk, uint8_t uLevel)
780{
781 NOREF(pVCpu);
782 pWalk->fRsvdError = true;
783 pWalk->uLevel = uLevel;
784 pWalk->fFailed = PGM_WALKFAIL_RESERVED_BITS
785 | ((uint32_t)uLevel << PGM_WALKFAIL_LEVEL_SHIFT);
786 return VERR_PAGE_TABLE_NOT_PRESENT;
787}
788
789
790VMMDECL(int) PGMGstGetPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk)
791{
792 VMCPU_ASSERT_EMT(pVCpu);
793 Assert(pWalk);
794
795 pWalk->fSucceeded = false;
796
797 RTGCPHYS GCPhysPt = CPUMGetEffectiveTtbr(pVCpu, GCPtr);
798 if (GCPhysPt == RTGCPHYS_MAX) /* MMU disabled? */
799 {
800 pWalk->GCPtr = GCPtr;
801 pWalk->fSucceeded = true;
802 pWalk->GCPhys = GCPtr;
803 return VINF_SUCCESS;
804 }
805
806 /* Do the translation. */
807 /** @todo This is just a sketch to get something working for debugging, assumes 4KiB granules and 48-bit output address.
808 * Needs to be moved to PGMAllGst like on x86 and implemented for 16KiB and 64KiB granule sizes. */
809 uint64_t u64TcrEl1 = CPUMGetTcrEl1(pVCpu);
810 uint8_t u8TxSz = (GCPtr & RT_BIT_64(55))
811 ? ARMV8_TCR_EL1_AARCH64_T1SZ_GET(u64TcrEl1)
812 : ARMV8_TCR_EL1_AARCH64_T0SZ_GET(u64TcrEl1);
813 uint8_t uLookupLvl;
814 RTGCPHYS fLookupMask;
815
816 /*
817 * From: https://github.com/codingbelief/arm-architecture-reference-manual-for-armv8-a/blob/master/en/chapter_d4/d42_2_controlling_address_translation_stages.md
818 * For all translation stages
819 * The maximum TxSZ value is 39. If TxSZ is programmed to a value larger than 39 then it is IMPLEMENTATION DEFINED whether:
820 * - The implementation behaves as if the field is programmed to 39 for all purposes other than reading back the value of the field.
821 * - Any use of the TxSZ value generates a Level 0 Translation fault for the stage of translation at which TxSZ is used.
822 *
823 * For a stage 1 translation
824 * The minimum TxSZ value is 16. If TxSZ is programmed to a value smaller than 16 then it is IMPLEMENTATION DEFINED whether:
825 * - The implementation behaves as if the field were programmed to 16 for all purposes other than reading back the value of the field.
826 * - Any use of the TxSZ value generates a stage 1 Level 0 Translation fault.
827 *
828 * We currently choose the former for both.
829 */
830 if (/*u8TxSz >= 16 &&*/ u8TxSz <= 24)
831 {
832 uLookupLvl = 0;
833 fLookupMask = RT_BIT_64(24 - u8TxSz + 1) - 1;
834 }
835 else if (u8TxSz >= 25 && u8TxSz <= 33)
836 {
837 uLookupLvl = 1;
838 fLookupMask = RT_BIT_64(33 - u8TxSz + 1) - 1;
839 }
840 else /*if (u8TxSz >= 34 && u8TxSz <= 39)*/
841 {
842 uLookupLvl = 2;
843 fLookupMask = RT_BIT_64(39 - u8TxSz + 1) - 1;
844 }
845 /*else
846 return pgmGstWalkReturnBadPhysAddr(pVCpu, pWalk, 0, VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS);*/ /** @todo Better status (Invalid TCR config). */
847
848 uint64_t *pu64Pt = NULL;
849 uint64_t uPt;
850 int rc;
851 if (uLookupLvl == 0)
852 {
853 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, GCPhysPt, &pu64Pt);
854 if (RT_SUCCESS(rc)) { /* probable */ }
855 else return pgmGstWalkReturnBadPhysAddr(pVCpu, pWalk, 0, rc);
856
857 uPt = pu64Pt[(GCPtr >> 39) & fLookupMask];
858 if (uPt & RT_BIT_64(0)) { /* probable */ }
859 else return pgmGstWalkReturnNotPresent(pVCpu, pWalk, 0);
860
861 if (uPt & RT_BIT_64(1)) { /* probable */ }
862 else return pgmGstWalkReturnRsvdError(pVCpu, pWalk, 0); /** @todo Only supported if TCR_EL1.DS is set. */
863
864 /* All nine bits from now on. */
865 fLookupMask = RT_BIT_64(9) - 1;
866 GCPhysPt = (RTGCPHYS)(uPt & UINT64_C(0xfffffffff000));
867 }
868
869 if (uLookupLvl <= 1)
870 {
871 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, GCPhysPt, &pu64Pt);
872 if (RT_SUCCESS(rc)) { /* probable */ }
873 else return pgmGstWalkReturnBadPhysAddr(pVCpu, pWalk, 1, rc);
874
875 uPt = pu64Pt[(GCPtr >> 30) & fLookupMask];
876 if (uPt & RT_BIT_64(0)) { /* probable */ }
877 else return pgmGstWalkReturnNotPresent(pVCpu, pWalk, 1);
878
879 if (uPt & RT_BIT_64(1)) { /* probable */ }
880 else
881 {
882 /* Block descriptor (1G page). */
883 pWalk->GCPtr = GCPtr;
884 pWalk->fSucceeded = true;
885 pWalk->GCPhys = (RTGCPHYS)(uPt & UINT64_C(0xffffc0000000)) | (GCPtr & (RTGCPTR)(_1G - 1));
886 pWalk->fGigantPage = true;
887 return VINF_SUCCESS;
888 }
889
890 /* All nine bits from now on. */
891 fLookupMask = RT_BIT_64(9) - 1;
892 GCPhysPt = (RTGCPHYS)(uPt & UINT64_C(0xfffffffff000));
893 }
894
895 if (uLookupLvl <= 2)
896 {
897 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, GCPhysPt, &pu64Pt);
898 if (RT_SUCCESS(rc)) { /* probable */ }
899 else return pgmGstWalkReturnBadPhysAddr(pVCpu, pWalk, 2, rc);
900
901 uPt = pu64Pt[(GCPtr >> 21) & fLookupMask];
902 if (uPt & RT_BIT_64(0)) { /* probable */ }
903 else return pgmGstWalkReturnNotPresent(pVCpu, pWalk, 2);
904
905 if (uPt & RT_BIT_64(1)) { /* probable */ }
906 else
907 {
908 /* Block descriptor (2M page). */
909 pWalk->GCPtr = GCPtr;
910 pWalk->fSucceeded = true;
911 pWalk->GCPhys = (RTGCPHYS)(uPt & UINT64_C(0xffffffe00000)) | (GCPtr & (RTGCPTR)(_2M - 1));
912 pWalk->fBigPage = true;
913 return VINF_SUCCESS;
914 }
915
916 /* All nine bits from now on. */
917 fLookupMask = RT_BIT_64(9) - 1;
918 GCPhysPt = (RTGCPHYS)(uPt & UINT64_C(0xfffffffff000));
919 }
920
921 Assert(uLookupLvl <= 3);
922
923 /* Next level. */
924 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, GCPhysPt, &pu64Pt);
925 if (RT_SUCCESS(rc)) { /* probable */ }
926 else return pgmGstWalkReturnBadPhysAddr(pVCpu, pWalk, 3, rc);
927
928 uPt = pu64Pt[(GCPtr & UINT64_C(0x1ff000)) >> 12];
929 if (uPt & RT_BIT_64(0)) { /* probable */ }
930 else return pgmGstWalkReturnNotPresent(pVCpu, pWalk, 3);
931
932 if (uPt & RT_BIT_64(1)) { /* probable */ }
933 else return pgmGstWalkReturnRsvdError(pVCpu, pWalk, 3); /** No block descriptors. */
934
935 pWalk->GCPtr = GCPtr;
936 pWalk->fSucceeded = true;
937 pWalk->GCPhys = (RTGCPHYS)(uPt & UINT64_C(0xfffffffff000)) | (GCPtr & (RTGCPTR)(_4K - 1));
938 return VINF_SUCCESS;
939}
940
941
942VMMDECL(int) PGMShwMakePageReadonly(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
943{
944 AssertReleaseFailed();
945 RT_NOREF(pVCpu, GCPtr, fOpFlags);
946 return VERR_NOT_IMPLEMENTED;
947}
948
949
950VMMDECL(int) PGMShwMakePageWritable(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
951{
952 AssertReleaseFailed();
953 RT_NOREF(pVCpu, GCPtr, fOpFlags);
954 return VERR_NOT_IMPLEMENTED;
955}
956
957
958VMMDECL(int) PGMShwMakePageNotPresent(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
959{
960 AssertReleaseFailed();
961 RT_NOREF(pVCpu, GCPtr, fOpFlags);
962 return VERR_NOT_IMPLEMENTED;
963}
964
965
966VMM_INT_DECL(int) PGMHCChangeMode(PVMCC pVM, PVMCPUCC pVCpu, PGMMODE enmGuestMode, bool fForce)
967{
968 //AssertReleaseFailed(); /** @todo Called by the PGM saved state code. */
969 RT_NOREF(pVM, pVCpu, enmGuestMode, fForce);
970 return VINF_SUCCESS;
971}
972
973
974VMMDECL(int) PGMShwGetPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys)
975{
976 AssertReleaseFailed();
977 RT_NOREF(pVCpu, GCPtr, pfFlags, pHCPhys);
978 return VERR_NOT_SUPPORTED;
979}
980
981
982int pgmR3ExitShadowModeBeforePoolFlush(PVMCPU pVCpu)
983{
984 RT_NOREF(pVCpu);
985 return VINF_SUCCESS;
986}
987
988
989int pgmR3ReEnterShadowModeAfterPoolFlush(PVM pVM, PVMCPU pVCpu)
990{
991 RT_NOREF(pVM, pVCpu);
992 return VINF_SUCCESS;
993}
994
995
996void pgmR3RefreshShadowModeAfterA20Change(PVMCPU pVCpu)
997{
998 RT_NOREF(pVCpu);
999}
1000
1001
1002int pgmGstPtWalk(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk, PPGMPTWALKGST pGstWalk)
1003{
1004 VMCPU_ASSERT_EMT(pVCpu);
1005 RT_NOREF(pGstWalk);
1006 return PGMGstGetPage(pVCpu, GCPtr, pWalk);
1007}
1008
1009
1010int pgmGstPtWalkNext(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk, PPGMPTWALKGST pGstWalk)
1011{
1012 VMCPU_ASSERT_EMT(pVCpu);
1013 return pgmGstPtWalk(pVCpu, GCPtr, pWalk, pGstWalk); /** @todo Always do full walk for now. */
1014}
1015
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette