VirtualBox

source: vbox/trunk/src/VBox/VMM/PGMPhys.cpp@ 13762

Last change on this file since 13762 was 13755, checked in by vboxsync, 16 years ago

Started with VM request API changes.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 84.5 KB
Line 
1/* $Id: PGMPhys.cpp 13755 2008-11-03 15:49:06Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, Physical Memory Addressing.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_PGM
27#include <VBox/pgm.h>
28#include <VBox/cpum.h>
29#include <VBox/iom.h>
30#include <VBox/sup.h>
31#include <VBox/mm.h>
32#include <VBox/stam.h>
33#include <VBox/rem.h>
34#include <VBox/csam.h>
35#include "PGMInternal.h"
36#include <VBox/vm.h>
37#include <VBox/dbg.h>
38#include <VBox/param.h>
39#include <VBox/err.h>
40#include <iprt/assert.h>
41#include <iprt/alloc.h>
42#include <iprt/asm.h>
43#include <VBox/log.h>
44#include <iprt/thread.h>
45#include <iprt/string.h>
46
47
48/*******************************************************************************
49* Internal Functions *
50*******************************************************************************/
51/*static - shut up warning */
52DECLCALLBACK(int) pgmR3PhysRomWriteHandler(PVM pVM, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser);
53
54
55
56/*
57 * PGMR3PhysReadU8-64
58 * PGMR3PhysWriteU8-64
59 */
60#define PGMPHYSFN_READNAME PGMR3PhysReadU8
61#define PGMPHYSFN_WRITENAME PGMR3PhysWriteU8
62#define PGMPHYS_DATASIZE 1
63#define PGMPHYS_DATATYPE uint8_t
64#include "PGMPhysRWTmpl.h"
65
66#define PGMPHYSFN_READNAME PGMR3PhysReadU16
67#define PGMPHYSFN_WRITENAME PGMR3PhysWriteU16
68#define PGMPHYS_DATASIZE 2
69#define PGMPHYS_DATATYPE uint16_t
70#include "PGMPhysRWTmpl.h"
71
72#define PGMPHYSFN_READNAME PGMR3PhysReadU32
73#define PGMPHYSFN_WRITENAME PGMR3PhysWriteU32
74#define PGMPHYS_DATASIZE 4
75#define PGMPHYS_DATATYPE uint32_t
76#include "PGMPhysRWTmpl.h"
77
78#define PGMPHYSFN_READNAME PGMR3PhysReadU64
79#define PGMPHYSFN_WRITENAME PGMR3PhysWriteU64
80#define PGMPHYS_DATASIZE 8
81#define PGMPHYS_DATATYPE uint64_t
82#include "PGMPhysRWTmpl.h"
83
84
85
86/**
87 * Links a new RAM range into the list.
88 *
89 * @param pVM Pointer to the shared VM structure.
90 * @param pNew Pointer to the new list entry.
91 * @param pPrev Pointer to the previous list entry. If NULL, insert as head.
92 */
93static void pgmR3PhysLinkRamRange(PVM pVM, PPGMRAMRANGE pNew, PPGMRAMRANGE pPrev)
94{
95 pgmLock(pVM);
96
97 PPGMRAMRANGE pRam = pPrev ? pPrev->pNextR3 : pVM->pgm.s.pRamRangesR3;
98 pNew->pNextR3 = pRam;
99 pNew->pNextR0 = pRam ? MMHyperCCToR0(pVM, pRam) : NIL_RTR0PTR;
100 pNew->pNextRC = pRam ? MMHyperCCToRC(pVM, pRam) : NIL_RTRCPTR;
101
102 if (pPrev)
103 {
104 pPrev->pNextR3 = pNew;
105 pPrev->pNextR0 = MMHyperCCToR0(pVM, pNew);
106 pPrev->pNextRC = MMHyperCCToRC(pVM, pNew);
107 }
108 else
109 {
110 pVM->pgm.s.pRamRangesR3 = pNew;
111 pVM->pgm.s.pRamRangesR0 = MMHyperCCToR0(pVM, pNew);
112 pVM->pgm.s.pRamRangesRC = MMHyperCCToRC(pVM, pNew);
113 }
114
115 pgmUnlock(pVM);
116}
117
118
119/**
120 * Unlink an existing RAM range from the list.
121 *
122 * @param pVM Pointer to the shared VM structure.
123 * @param pRam Pointer to the new list entry.
124 * @param pPrev Pointer to the previous list entry. If NULL, insert as head.
125 */
126static void pgmR3PhysUnlinkRamRange2(PVM pVM, PPGMRAMRANGE pRam, PPGMRAMRANGE pPrev)
127{
128 Assert(pPrev ? pPrev->pNextR3 == pRam : pVM->pgm.s.pRamRangesR3 == pRam);
129
130 pgmLock(pVM);
131
132 PPGMRAMRANGE pNext = pRam->pNextR3;
133 if (pPrev)
134 {
135 pPrev->pNextR3 = pNext;
136 pPrev->pNextR0 = pNext ? MMHyperCCToR0(pVM, pNext) : NIL_RTR0PTR;
137 pPrev->pNextRC = pNext ? MMHyperCCToRC(pVM, pNext) : NIL_RTRCPTR;
138 }
139 else
140 {
141 Assert(pVM->pgm.s.pRamRangesR3 == pRam);
142 pVM->pgm.s.pRamRangesR3 = pNext;
143 pVM->pgm.s.pRamRangesR0 = pNext ? MMHyperCCToR0(pVM, pNext) : NIL_RTR0PTR;
144 pVM->pgm.s.pRamRangesRC = pNext ? MMHyperCCToRC(pVM, pNext) : NIL_RTRCPTR;
145 }
146
147 pgmUnlock(pVM);
148}
149
150
151/**
152 * Unlink an existing RAM range from the list.
153 *
154 * @param pVM Pointer to the shared VM structure.
155 * @param pRam Pointer to the new list entry.
156 */
157static void pgmR3PhysUnlinkRamRange(PVM pVM, PPGMRAMRANGE pRam)
158{
159 /* find prev. */
160 PPGMRAMRANGE pPrev = NULL;
161 PPGMRAMRANGE pCur = pVM->pgm.s.pRamRangesR3;
162 while (pCur != pRam)
163 {
164 pPrev = pCur;
165 pCur = pCur->pNextR3;
166 }
167 AssertFatal(pCur);
168
169 pgmR3PhysUnlinkRamRange2(pVM, pRam, pPrev);
170}
171
172
173
174/**
175 * Sets up a range RAM.
176 *
177 * This will check for conflicting registrations, make a resource
178 * reservation for the memory (with GMM), and setup the per-page
179 * tracking structures (PGMPAGE).
180 *
181 * @returns VBox stutus code.
182 * @param pVM Pointer to the shared VM structure.
183 * @param GCPhys The physical address of the RAM.
184 * @param cb The size of the RAM.
185 * @param pszDesc The description - not copied, so, don't free or change it.
186 */
187VMMR3DECL(int) PGMR3PhysRegisterRam(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, const char *pszDesc)
188{
189 /*
190 * Validate input.
191 */
192 Log(("PGMR3PhysRegisterRam: GCPhys=%RGp cb=%RGp pszDesc=%s\n", GCPhys, cb, pszDesc));
193 AssertReturn(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys, VERR_INVALID_PARAMETER);
194 AssertReturn(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb, VERR_INVALID_PARAMETER);
195 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
196 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
197 AssertMsgReturn(GCPhysLast > GCPhys, ("The range wraps! GCPhys=%RGp cb=%RGp\n", GCPhys, cb), VERR_INVALID_PARAMETER);
198 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
199 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
200
201 /*
202 * Find range location and check for conflicts.
203 * (We don't lock here because the locking by EMT is only required on update.)
204 */
205 PPGMRAMRANGE pPrev = NULL;
206 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3;
207 while (pRam && GCPhysLast >= pRam->GCPhys)
208 {
209 if ( GCPhysLast >= pRam->GCPhys
210 && GCPhys <= pRam->GCPhysLast)
211 AssertLogRelMsgFailedReturn(("%RGp-%RGp (%s) conflicts with existing %RGp-%RGp (%s)\n",
212 GCPhys, GCPhysLast, pszDesc,
213 pRam->GCPhys, pRam->GCPhysLast, pRam->pszDesc),
214 VERR_PGM_RAM_CONFLICT);
215
216 /* next */
217 pPrev = pRam;
218 pRam = pRam->pNextR3;
219 }
220
221 /*
222 * Register it with GMM (the API bitches).
223 */
224 const RTGCPHYS cPages = cb >> PAGE_SHIFT;
225 int rc = MMR3IncreaseBaseReservation(pVM, cPages);
226 if (RT_FAILURE(rc))
227 return rc;
228
229 /*
230 * Allocate RAM range.
231 */
232 const size_t cbRamRange = RT_OFFSETOF(PGMRAMRANGE, aPages[cPages]);
233 PPGMRAMRANGE pNew;
234 rc = MMR3HyperAllocOnceNoRel(pVM, cbRamRange, 0, MM_TAG_PGM_PHYS, (void **)&pNew);
235 AssertLogRelMsgRCReturn(rc, ("cbRamRange=%zu\n", cbRamRange), rc);
236
237 /*
238 * Initialize the range.
239 */
240 pNew->GCPhys = GCPhys;
241 pNew->GCPhysLast = GCPhysLast;
242 pNew->pszDesc = pszDesc;
243 pNew->cb = cb;
244 pNew->fFlags = 0;
245
246 pNew->pvR3 = NULL;
247 pNew->paChunkR3Ptrs = NULL;
248
249#ifndef VBOX_WITH_NEW_PHYS_CODE
250 /* Allocate memory for chunk to HC ptr lookup array. */
251 rc = MMHyperAlloc(pVM, (cb >> PGM_DYNAMIC_CHUNK_SHIFT) * sizeof(void *), 16, MM_TAG_PGM, (void **)&pNew->paChunkR3Ptrs);
252 AssertRCReturn(rc, rc);
253 pNew->fFlags |= MM_RAM_FLAGS_DYNAMIC_ALLOC;
254
255#endif
256 RTGCPHYS iPage = cPages;
257 while (iPage-- > 0)
258 PGM_PAGE_INIT_ZERO(&pNew->aPages[iPage], pVM, PGMPAGETYPE_RAM);
259
260 /*
261 * Insert the new RAM range.
262 */
263 pgmR3PhysLinkRamRange(pVM, pNew, pPrev);
264
265 /*
266 * Notify REM.
267 */
268#ifdef VBOX_WITH_NEW_PHYS_CODE
269 REMR3NotifyPhysRamRegister(pVM, GCPhys, cb, 0);
270#else
271 REMR3NotifyPhysRamRegister(pVM, GCPhys, cb, MM_RAM_FLAGS_DYNAMIC_ALLOC);
272#endif
273
274 return VINF_SUCCESS;
275}
276
277
278/**
279 * Resets (zeros) the RAM.
280 *
281 * ASSUMES that the caller owns the PGM lock.
282 *
283 * @returns VBox status code.
284 * @param pVM Pointer to the shared VM structure.
285 */
286int pgmR3PhysRamReset(PVM pVM)
287{
288 /*
289 * Walk the ram ranges.
290 */
291 for (PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3; pRam; pRam = pRam->pNextR3)
292 {
293 uint32_t iPage = pRam->cb >> PAGE_SHIFT; Assert((RTGCPHYS)iPage << PAGE_SHIFT == pRam->cb);
294#ifdef VBOX_WITH_NEW_PHYS_CODE
295 if (!pVM->pgm.f.fRamPreAlloc)
296 {
297 /* Replace all RAM pages by ZERO pages. */
298 while (iPage-- > 0)
299 {
300 PPGMPAGE pPage = &pRam->aPages[iPage];
301 switch (PGM_PAGE_GET_TYPE(pPage))
302 {
303 case PGMPAGETYPE_RAM:
304 if (!PGM_PAGE_IS_ZERO(pPage))
305 pgmPhysFreePage(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)i << PAGE_SHIFT));
306 break;
307
308 case PGMPAGETYPE_MMIO2:
309 case PGMPAGETYPE_ROM_SHADOW: /* handled by pgmR3PhysRomReset. */
310 case PGMPAGETYPE_ROM:
311 case PGMPAGETYPE_MMIO:
312 break;
313 default:
314 AssertFailed();
315 }
316 } /* for each page */
317 }
318 else
319#endif
320 {
321 /* Zero the memory. */
322 while (iPage-- > 0)
323 {
324 PPGMPAGE pPage = &pRam->aPages[iPage];
325 switch (PGM_PAGE_GET_TYPE(pPage))
326 {
327#ifndef VBOX_WITH_NEW_PHYS_CODE
328 case PGMPAGETYPE_INVALID:
329 case PGMPAGETYPE_RAM:
330 if (pRam->aPages[iPage].HCPhys & (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2)) /** @todo PAGE FLAGS */
331 {
332 /* shadow ram is reloaded elsewhere. */
333 Log4(("PGMR3Reset: not clearing phys page %RGp due to flags %RHp\n", pRam->GCPhys + (iPage << PAGE_SHIFT), pRam->aPages[iPage].HCPhys & (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO))); /** @todo PAGE FLAGS */
334 continue;
335 }
336 if (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)
337 {
338 unsigned iChunk = iPage >> (PGM_DYNAMIC_CHUNK_SHIFT - PAGE_SHIFT);
339 if (pRam->paChunkR3Ptrs[iChunk])
340 ASMMemZero32((char *)pRam->paChunkR3Ptrs[iChunk] + ((iPage << PAGE_SHIFT) & PGM_DYNAMIC_CHUNK_OFFSET_MASK), PAGE_SIZE);
341 }
342 else
343 ASMMemZero32((char *)pRam->pvR3 + (iPage << PAGE_SHIFT), PAGE_SIZE);
344 break;
345#else /* VBOX_WITH_NEW_PHYS_CODE */
346 case PGMPAGETYPE_RAM:
347 switch (PGM_PAGE_GET_STATE(pPage))
348 {
349 case PGM_PAGE_STATE_ZERO:
350 break;
351 case PGM_PAGE_STATE_SHARED:
352 case PGM_PAGE_STATE_WRITE_MONITORED:
353 rc = pgmPhysPageMakeWritable(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)i << PAGE_SHIFT));
354 AssertLogRelRCReturn(rc, rc);
355 case PGM_PAGE_STATE_ALLOCATED:
356 {
357 void *pvPage;
358 PPGMPAGEMAP pMapIgnored;
359 rc = pgmPhysPageMap(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)i << PAGE_SHIFT), &pMapIgnored, &pvPage);
360 AssertLogRelRCReturn(rc, rc);
361 ASMMemZeroPage(pvPage);
362 break;
363 }
364 }
365 break;
366#endif /* VBOX_WITH_NEW_PHYS_CODE */
367
368 case PGMPAGETYPE_MMIO2:
369 case PGMPAGETYPE_ROM_SHADOW:
370 case PGMPAGETYPE_ROM:
371 case PGMPAGETYPE_MMIO:
372 break;
373 default:
374 AssertFailed();
375
376 }
377 } /* for each page */
378 }
379
380 }
381
382 return VINF_SUCCESS;
383}
384
385
386/**
387 * This is the interface IOM is using to register an MMIO region.
388 *
389 * It will check for conflicts and ensure that a RAM range structure
390 * is present before calling the PGMR3HandlerPhysicalRegister API to
391 * register the callbacks.
392 *
393 * @returns VBox status code.
394 *
395 * @param pVM Pointer to the shared VM structure.
396 * @param GCPhys The start of the MMIO region.
397 * @param cb The size of the MMIO region.
398 * @param pfnHandlerR3 The address of the ring-3 handler. (IOMR3MMIOHandler)
399 * @param pvUserR3 The user argument for R3.
400 * @param pfnHandlerR0 The address of the ring-0 handler. (IOMMMIOHandler)
401 * @param pvUserR0 The user argument for R0.
402 * @param pfnHandlerRC The address of the RC handler. (IOMMMIOHandler)
403 * @param pvUserRC The user argument for RC.
404 * @param pszDesc The description of the MMIO region.
405 */
406VMMR3DECL(int) PGMR3PhysMMIORegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb,
407 R3PTRTYPE(PFNPGMR3PHYSHANDLER) pfnHandlerR3, RTR3PTR pvUserR3,
408 R0PTRTYPE(PFNPGMR0PHYSHANDLER) pfnHandlerR0, RTR0PTR pvUserR0,
409 RCPTRTYPE(PFNPGMRCPHYSHANDLER) pfnHandlerRC, RTRCPTR pvUserRC,
410 R3PTRTYPE(const char *) pszDesc)
411{
412 /*
413 * Assert on some assumption.
414 */
415 VM_ASSERT_EMT(pVM);
416 AssertReturn(!(cb & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
417 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
418 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
419 AssertReturn(*pszDesc, VERR_INVALID_PARAMETER);
420
421 /*
422 * Make sure there's a RAM range structure for the region.
423 */
424 int rc;
425 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
426 bool fRamExists = false;
427 PPGMRAMRANGE pRamPrev = NULL;
428 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3;
429 while (pRam && GCPhysLast >= pRam->GCPhys)
430 {
431 if ( GCPhysLast >= pRam->GCPhys
432 && GCPhys <= pRam->GCPhysLast)
433 {
434 /* Simplification: all within the same range. */
435 AssertLogRelMsgReturn( GCPhys >= pRam->GCPhys
436 && GCPhysLast <= pRam->GCPhysLast,
437 ("%RGp-%RGp (MMIO/%s) falls partly outside %RGp-%RGp (%s)\n",
438 GCPhys, GCPhysLast, pszDesc,
439 pRam->GCPhys, pRam->GCPhysLast, pRam->pszDesc),
440 VERR_PGM_RAM_CONFLICT);
441
442 /* Check that it's all RAM or MMIO pages. */
443 PCPGMPAGE pPage = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
444 uint32_t cLeft = cb >> PAGE_SHIFT;
445 while (cLeft-- > 0)
446 {
447 AssertLogRelMsgReturn( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM
448 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO,
449 ("%RGp-%RGp (MMIO/%s): %RGp is not a RAM or MMIO page - type=%d desc=%s\n",
450 GCPhys, GCPhysLast, pszDesc, PGM_PAGE_GET_TYPE(pPage), pRam->pszDesc),
451 VERR_PGM_RAM_CONFLICT);
452 pPage++;
453 }
454
455 /* Looks good. */
456 fRamExists = true;
457 break;
458 }
459
460 /* next */
461 pRamPrev = pRam;
462 pRam = pRam->pNextR3;
463 }
464 PPGMRAMRANGE pNew;
465 if (fRamExists)
466 pNew = NULL;
467 else
468 {
469 /*
470 * No RAM range, insert an ad-hoc one.
471 *
472 * Note that we don't have to tell REM about this range because
473 * PGMHandlerPhysicalRegisterEx will do that for us.
474 */
475 Log(("PGMR3PhysMMIORegister: Adding ad-hoc MMIO range for %RGp-%RGp %s\n", GCPhys, GCPhysLast, pszDesc));
476
477 const uint32_t cPages = cb >> PAGE_SHIFT;
478 const size_t cbRamRange = RT_OFFSETOF(PGMRAMRANGE, aPages[cPages]);
479 rc = MMHyperAlloc(pVM, RT_OFFSETOF(PGMRAMRANGE, aPages[cPages]), 16, MM_TAG_PGM_PHYS, (void **)&pNew);
480 AssertLogRelMsgRCReturn(rc, ("cbRamRange=%zu\n", cbRamRange), rc);
481
482 /* Initialize the range. */
483 pNew->GCPhys = GCPhys;
484 pNew->GCPhysLast = GCPhysLast;
485 pNew->pszDesc = pszDesc;
486 pNew->cb = cb;
487 pNew->fFlags = 0; /* Some MMIO flag here? */
488
489 pNew->pvR3 = NULL;
490 pNew->paChunkR3Ptrs = NULL;
491
492 uint32_t iPage = cPages;
493 while (iPage-- > 0)
494 PGM_PAGE_INIT_ZERO_REAL(&pNew->aPages[iPage], pVM, PGMPAGETYPE_MMIO);
495 Assert(PGM_PAGE_GET_TYPE(&pNew->aPages[0]) == PGMPAGETYPE_MMIO);
496
497 /* link it */
498 pgmR3PhysLinkRamRange(pVM, pNew, pRamPrev);
499 }
500
501 /*
502 * Register the access handler.
503 */
504 rc = PGMHandlerPhysicalRegisterEx(pVM, PGMPHYSHANDLERTYPE_MMIO, GCPhys, GCPhysLast,
505 pfnHandlerR3, pvUserR3,
506 pfnHandlerR0, pvUserR0,
507 pfnHandlerRC, pvUserRC, pszDesc);
508 if ( RT_FAILURE(rc)
509 && !fRamExists)
510 {
511 /* remove the ad-hoc range. */
512 pgmR3PhysUnlinkRamRange2(pVM, pNew, pRamPrev);
513 pNew->cb = pNew->GCPhys = pNew->GCPhysLast = NIL_RTGCPHYS;
514 MMHyperFree(pVM, pRam);
515 }
516
517 return rc;
518}
519
520
521/**
522 * This is the interface IOM is using to register an MMIO region.
523 *
524 * It will take care of calling PGMHandlerPhysicalDeregister and clean up
525 * any ad-hoc PGMRAMRANGE left behind.
526 *
527 * @returns VBox status code.
528 * @param pVM Pointer to the shared VM structure.
529 * @param GCPhys The start of the MMIO region.
530 * @param cb The size of the MMIO region.
531 */
532VMMR3DECL(int) PGMR3PhysMMIODeregister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb)
533{
534 VM_ASSERT_EMT(pVM);
535
536 /*
537 * First deregister the handler, then check if we should remove the ram range.
538 */
539 int rc = PGMHandlerPhysicalDeregister(pVM, GCPhys);
540 if (RT_SUCCESS(rc))
541 {
542 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
543 PPGMRAMRANGE pRamPrev = NULL;
544 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3;
545 while (pRam && GCPhysLast >= pRam->GCPhys)
546 {
547 /*if ( GCPhysLast >= pRam->GCPhys
548 && GCPhys <= pRam->GCPhysLast) - later */
549 if ( GCPhysLast == pRam->GCPhysLast
550 && GCPhys == pRam->GCPhys)
551 {
552 Assert(pRam->cb == cb);
553
554 /*
555 * See if all the pages are dead MMIO pages.
556 */
557 bool fAllMMIO = true;
558 PPGMPAGE pPage = &pRam->aPages[0];
559 uint32_t cLeft = cb >> PAGE_SHIFT;
560 while (cLeft-- > 0)
561 {
562 if ( PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO
563 /*|| not-out-of-action later */)
564 {
565 fAllMMIO = false;
566 break;
567 }
568 pPage++;
569 }
570
571 /*
572 * Unlink it and free if it's all MMIO.
573 */
574 if (fAllMMIO)
575 {
576 Log(("PGMR3PhysMMIODeregister: Freeing ad-hoc MMIO range for %RGp-%RGp %s\n",
577 GCPhys, GCPhysLast, pRam->pszDesc));
578
579 pgmR3PhysUnlinkRamRange2(pVM, pRam, pRamPrev);
580 pRam->cb = pRam->GCPhys = pRam->GCPhysLast = NIL_RTGCPHYS;
581 MMHyperFree(pVM, pRam);
582 }
583 break;
584 }
585
586 /* next */
587 pRamPrev = pRam;
588 pRam = pRam->pNextR3;
589 }
590 }
591
592 return rc;
593}
594
595
596/**
597 * Locate a MMIO2 range.
598 *
599 * @returns Pointer to the MMIO2 range.
600 * @param pVM Pointer to the shared VM structure.
601 * @param pDevIns The device instance owning the region.
602 * @param iRegion The region.
603 */
604DECLINLINE(PPGMMMIO2RANGE) pgmR3PhysMMIO2Find(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion)
605{
606 /*
607 * Search the list.
608 */
609 for (PPGMMMIO2RANGE pCur = pVM->pgm.s.pMmio2RangesR3; pCur; pCur = pCur->pNextR3)
610 if ( pCur->pDevInsR3 == pDevIns
611 && pCur->iRegion == iRegion)
612 return pCur;
613 return NULL;
614}
615
616
617/**
618 * Allocate and register an MMIO2 region.
619 *
620 * As mentioned elsewhere, MMIO2 is just RAM spelled differently. It's
621 * RAM associated with a device. It is also non-shared memory with a
622 * permanent ring-3 mapping and page backing (presently).
623 *
624 * A MMIO2 range may overlap with base memory if a lot of RAM
625 * is configured for the VM, in which case we'll drop the base
626 * memory pages. Presently we will make no attempt to preserve
627 * anything that happens to be present in the base memory that
628 * is replaced, this is of course incorrectly but it's too much
629 * effort.
630 *
631 * @returns VBox status code.
632 * @retval VINF_SUCCESS on success, *ppv pointing to the R3 mapping of the memory.
633 * @retval VERR_ALREADY_EXISTS if the region already exists.
634 *
635 * @param pVM Pointer to the shared VM structure.
636 * @param pDevIns The device instance owning the region.
637 * @param iRegion The region number. If the MMIO2 memory is a PCI I/O region
638 * this number has to be the number of that region. Otherwise
639 * it can be any number safe UINT8_MAX.
640 * @param cb The size of the region. Must be page aligned.
641 * @param fFlags Reserved for future use, must be zero.
642 * @param ppv Where to store the pointer to the ring-3 mapping of the memory.
643 * @param pszDesc The description.
644 */
645VMMR3DECL(int) PGMR3PhysMMIO2Register(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS cb, uint32_t fFlags, void **ppv, const char *pszDesc)
646{
647 /*
648 * Validate input.
649 */
650 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
651 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
652 AssertReturn(iRegion <= UINT8_MAX, VERR_INVALID_PARAMETER);
653 AssertPtrReturn(ppv, VERR_INVALID_POINTER);
654 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
655 AssertReturn(*pszDesc, VERR_INVALID_PARAMETER);
656 AssertReturn(pgmR3PhysMMIO2Find(pVM, pDevIns, iRegion) == NULL, VERR_ALREADY_EXISTS);
657 AssertReturn(!(cb & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
658 AssertReturn(cb, VERR_INVALID_PARAMETER);
659 AssertReturn(!fFlags, VERR_INVALID_PARAMETER);
660
661 const uint32_t cPages = cb >> PAGE_SHIFT;
662 AssertLogRelReturn((RTGCPHYS)cPages << PAGE_SHIFT == cb, VERR_INVALID_PARAMETER);
663 AssertLogRelReturn(cPages <= INT32_MAX / 2, VERR_NO_MEMORY);
664
665 /*
666 * Try reserve and allocate the backing memory first as this is what is
667 * most likely to fail.
668 */
669 int rc = MMR3AdjustFixedReservation(pVM, cPages, pszDesc);
670 if (RT_FAILURE(rc))
671 return rc;
672
673 void *pvPages;
674 PSUPPAGE paPages = (PSUPPAGE)RTMemTmpAlloc(cPages * sizeof(SUPPAGE));
675 if (RT_SUCCESS(rc))
676 rc = SUPPageAllocLockedEx(cPages, &pvPages, paPages);
677 if (RT_SUCCESS(rc))
678 {
679 /*
680 * Create the MMIO2 range record for it.
681 */
682 const size_t cbRange = RT_OFFSETOF(PGMMMIO2RANGE, RamRange.aPages[cPages]);
683 PPGMMMIO2RANGE pNew;
684 rc = MMR3HyperAllocOnceNoRel(pVM, cbRange, 0, MM_TAG_PGM_PHYS, (void **)&pNew);
685 AssertLogRelMsgRC(rc, ("cbRamRange=%zu\n", cbRange));
686 if (RT_SUCCESS(rc))
687 {
688 pNew->pDevInsR3 = pDevIns;
689 pNew->pvR3 = pvPages;
690 //pNew->pNext = NULL;
691 //pNew->fMapped = false;
692 //pNew->fOverlapping = false;
693 pNew->iRegion = iRegion;
694 pNew->RamRange.GCPhys = NIL_RTGCPHYS;
695 pNew->RamRange.GCPhysLast = NIL_RTGCPHYS;
696 pNew->RamRange.pszDesc = pszDesc;
697 pNew->RamRange.cb = cb;
698 //pNew->RamRange.fFlags = 0;
699
700 pNew->RamRange.pvR3 = pvPages; ///@todo remove this [new phys code]
701 pNew->RamRange.paChunkR3Ptrs = NULL; ///@todo remove this [new phys code]
702
703 uint32_t iPage = cPages;
704 while (iPage-- > 0)
705 {
706 PGM_PAGE_INIT(&pNew->RamRange.aPages[iPage],
707 paPages[iPage].Phys & X86_PTE_PAE_PG_MASK, NIL_GMM_PAGEID,
708 PGMPAGETYPE_MMIO2, PGM_PAGE_STATE_ALLOCATED);
709 }
710
711 /*
712 * Link it into the list.
713 * Since there is no particular order, just push it.
714 */
715 pNew->pNextR3 = pVM->pgm.s.pMmio2RangesR3;
716 pVM->pgm.s.pMmio2RangesR3 = pNew;
717
718 *ppv = pvPages;
719 RTMemTmpFree(paPages);
720 return VINF_SUCCESS;
721 }
722
723 SUPPageFreeLocked(pvPages, cPages);
724 }
725 RTMemTmpFree(paPages);
726 MMR3AdjustFixedReservation(pVM, -cPages, pszDesc);
727 return rc;
728}
729
730
731/**
732 * Deregisters and frees an MMIO2 region.
733 *
734 * Any physical (and virtual) access handlers registered for the region must
735 * be deregistered before calling this function.
736 *
737 * @returns VBox status code.
738 * @param pVM Pointer to the shared VM structure.
739 * @param pDevIns The device instance owning the region.
740 * @param iRegion The region. If it's UINT32_MAX it'll be a wildcard match.
741 */
742VMMR3DECL(int) PGMR3PhysMMIO2Deregister(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion)
743{
744 /*
745 * Validate input.
746 */
747 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
748 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
749 AssertReturn(iRegion <= UINT8_MAX || iRegion == UINT32_MAX, VERR_INVALID_PARAMETER);
750
751 int rc = VINF_SUCCESS;
752 unsigned cFound = 0;
753 PPGMMMIO2RANGE pPrev = NULL;
754 PPGMMMIO2RANGE pCur = pVM->pgm.s.pMmio2RangesR3;
755 while (pCur)
756 {
757 if ( pCur->pDevInsR3 == pDevIns
758 && ( iRegion == UINT32_MAX
759 || pCur->iRegion == iRegion))
760 {
761 cFound++;
762
763 /*
764 * Unmap it if it's mapped.
765 */
766 if (pCur->fMapped)
767 {
768 int rc2 = PGMR3PhysMMIO2Unmap(pVM, pCur->pDevInsR3, pCur->iRegion, pCur->RamRange.GCPhys);
769 AssertRC(rc2);
770 if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
771 rc = rc2;
772 }
773
774 /*
775 * Unlink it
776 */
777 PPGMMMIO2RANGE pNext = pCur->pNextR3;
778 if (pPrev)
779 pPrev->pNextR3 = pNext;
780 else
781 pVM->pgm.s.pMmio2RangesR3 = pNext;
782 pCur->pNextR3 = NULL;
783
784 /*
785 * Free the memory.
786 */
787 int rc2 = SUPPageFreeLocked(pCur->pvR3, pCur->RamRange.cb >> PAGE_SHIFT);
788 AssertRC(rc2);
789 if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
790 rc = rc2;
791
792 rc2 = MMR3AdjustFixedReservation(pVM, -(pCur->RamRange.cb >> PAGE_SHIFT), pCur->RamRange.pszDesc);
793 AssertRC(rc2);
794 if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
795 rc = rc2;
796
797 /* we're leaking hyper memory here if done at runtime. */
798 Assert( VMR3GetState(pVM) == VMSTATE_OFF
799 || VMR3GetState(pVM) == VMSTATE_DESTROYING
800 || VMR3GetState(pVM) == VMSTATE_TERMINATED
801 || VMR3GetState(pVM) == VMSTATE_CREATING);
802 /*rc = MMHyperFree(pVM, pCur);
803 AssertRCReturn(rc, rc); - not safe, see the alloc call. */
804
805 /* next */
806 pCur = pNext;
807 }
808 else
809 {
810 pPrev = pCur;
811 pCur = pCur->pNextR3;
812 }
813 }
814
815 return !cFound && iRegion != UINT32_MAX ? VERR_NOT_FOUND : rc;
816}
817
818
819/**
820 * Maps a MMIO2 region.
821 *
822 * This is done when a guest / the bios / state loading changes the
823 * PCI config. The replacing of base memory has the same restrictions
824 * as during registration, of course.
825 *
826 * @returns VBox status code.
827 *
828 * @param pVM Pointer to the shared VM structure.
829 * @param pDevIns The
830 */
831VMMR3DECL(int) PGMR3PhysMMIO2Map(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS GCPhys)
832{
833 /*
834 * Validate input
835 */
836 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
837 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
838 AssertReturn(iRegion <= UINT8_MAX, VERR_INVALID_PARAMETER);
839 AssertReturn(GCPhys != NIL_RTGCPHYS, VERR_INVALID_PARAMETER);
840 AssertReturn(GCPhys != 0, VERR_INVALID_PARAMETER);
841 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
842
843 PPGMMMIO2RANGE pCur = pgmR3PhysMMIO2Find(pVM, pDevIns, iRegion);
844 AssertReturn(pCur, VERR_NOT_FOUND);
845 AssertReturn(!pCur->fMapped, VERR_WRONG_ORDER);
846 Assert(pCur->RamRange.GCPhys == NIL_RTGCPHYS);
847 Assert(pCur->RamRange.GCPhysLast == NIL_RTGCPHYS);
848
849 const RTGCPHYS GCPhysLast = GCPhys + pCur->RamRange.cb - 1;
850 AssertReturn(GCPhysLast > GCPhys, VERR_INVALID_PARAMETER);
851
852 /*
853 * Find our location in the ram range list, checking for
854 * restriction we don't bother implementing yet (partially overlapping).
855 */
856 bool fRamExists = false;
857 PPGMRAMRANGE pRamPrev = NULL;
858 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3;
859 while (pRam && GCPhysLast >= pRam->GCPhys)
860 {
861 if ( GCPhys <= pRam->GCPhysLast
862 && GCPhysLast >= pRam->GCPhys)
863 {
864 /* completely within? */
865 AssertLogRelMsgReturn( GCPhys >= pRam->GCPhys
866 && GCPhysLast <= pRam->GCPhysLast,
867 ("%RGp-%RGp (MMIO2/%s) falls partly outside %RGp-%RGp (%s)\n",
868 GCPhys, GCPhysLast, pCur->RamRange.pszDesc,
869 pRam->GCPhys, pRam->GCPhysLast, pRam->pszDesc),
870 VERR_PGM_RAM_CONFLICT);
871 fRamExists = true;
872 break;
873 }
874
875 /* next */
876 pRamPrev = pRam;
877 pRam = pRam->pNextR3;
878 }
879 if (fRamExists)
880 {
881 PPGMPAGE pPage = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
882 uint32_t cPagesLeft = pCur->RamRange.cb >> PAGE_SHIFT;
883 while (cPagesLeft-- > 0)
884 {
885 AssertLogRelMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM,
886 ("%RGp isn't a RAM page (%d) - mapping %RGp-%RGp (MMIO2/%s).\n",
887 GCPhys, PGM_PAGE_GET_TYPE(pPage), GCPhys, GCPhysLast, pCur->RamRange.pszDesc),
888 VERR_PGM_RAM_CONFLICT);
889 pPage++;
890 }
891 }
892 Log(("PGMR3PhysMMIO2Map: %RGp-%RGp fRamExists=%RTbool %s\n",
893 GCPhys, GCPhysLast, fRamExists, pCur->RamRange.pszDesc));
894
895 /*
896 * Make the changes.
897 */
898 pgmLock(pVM);
899
900 pCur->RamRange.GCPhys = GCPhys;
901 pCur->RamRange.GCPhysLast = GCPhysLast;
902 pCur->fMapped = true;
903 pCur->fOverlapping = fRamExists;
904
905 if (fRamExists)
906 {
907 /* replace the pages, freeing all present RAM pages. */
908 PPGMPAGE pPageSrc = &pCur->RamRange.aPages[0];
909 PPGMPAGE pPageDst = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
910 uint32_t cPagesLeft = pCur->RamRange.cb >> PAGE_SHIFT;
911 while (cPagesLeft-- > 0)
912 {
913 pgmPhysFreePage(pVM, pPageDst, GCPhys);
914
915 RTHCPHYS const HCPhys = PGM_PAGE_GET_HCPHYS(pPageSrc);
916 PGM_PAGE_SET_HCPHYS(pPageDst, HCPhys);
917 PGM_PAGE_SET_TYPE(pPageDst, PGMPAGETYPE_MMIO2);
918 PGM_PAGE_SET_STATE(pPageDst, PGM_PAGE_STATE_ALLOCATED);
919
920 GCPhys += PAGE_SIZE;
921 pPageSrc++;
922 pPageDst++;
923 }
924 }
925 else
926 {
927 /* link in the ram range */
928 pgmR3PhysLinkRamRange(pVM, &pCur->RamRange, pRamPrev);
929 REMR3NotifyPhysRamRegister(pVM, GCPhys, pCur->RamRange.cb, 0);
930 }
931
932 pgmUnlock(pVM);
933
934 return VINF_SUCCESS;
935}
936
937
938/**
939 * Unmaps a MMIO2 region.
940 *
941 * This is done when a guest / the bios / state loading changes the
942 * PCI config. The replacing of base memory has the same restrictions
943 * as during registration, of course.
944 */
945VMMR3DECL(int) PGMR3PhysMMIO2Unmap(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS GCPhys)
946{
947 /*
948 * Validate input
949 */
950 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
951 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
952 AssertReturn(iRegion <= UINT8_MAX, VERR_INVALID_PARAMETER);
953 AssertReturn(GCPhys != NIL_RTGCPHYS, VERR_INVALID_PARAMETER);
954 AssertReturn(GCPhys != 0, VERR_INVALID_PARAMETER);
955 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
956
957 PPGMMMIO2RANGE pCur = pgmR3PhysMMIO2Find(pVM, pDevIns, iRegion);
958 AssertReturn(pCur, VERR_NOT_FOUND);
959 AssertReturn(pCur->fMapped, VERR_WRONG_ORDER);
960 AssertReturn(pCur->RamRange.GCPhys == GCPhys, VERR_INVALID_PARAMETER);
961 Assert(pCur->RamRange.GCPhysLast != NIL_RTGCPHYS);
962
963 Log(("PGMR3PhysMMIO2Unmap: %RGp-%RGp %s\n",
964 pCur->RamRange.GCPhys, pCur->RamRange.GCPhysLast, pCur->RamRange.pszDesc));
965
966 /*
967 * Unmap it.
968 */
969 pgmLock(pVM);
970
971 if (pCur->fOverlapping)
972 {
973 /* Restore the RAM pages we've replaced. */
974 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3;
975 while (pRam->GCPhys > pCur->RamRange.GCPhysLast)
976 pRam = pRam->pNextR3;
977
978#ifdef RT_STRICT
979 RTHCPHYS const HCPhysZeroPg = pVM->pgm.s.HCPhysZeroPg;
980#endif
981 Assert(HCPhysZeroPg != 0 && HCPhysZeroPg != NIL_RTHCPHYS);
982 PPGMPAGE pPageDst = &pRam->aPages[(pCur->RamRange.GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
983 uint32_t cPagesLeft = pCur->RamRange.cb >> PAGE_SHIFT;
984 while (cPagesLeft-- > 0)
985 {
986 PGM_PAGE_SET_HCPHYS(pPageDst, pVM->pgm.s.HCPhysZeroPg);
987 PGM_PAGE_SET_TYPE(pPageDst, PGMPAGETYPE_RAM);
988 PGM_PAGE_SET_STATE(pPageDst, PGM_PAGE_STATE_ZERO);
989
990 pPageDst++;
991 }
992 }
993 else
994 {
995 REMR3NotifyPhysReserve(pVM, pCur->RamRange.GCPhys, pCur->RamRange.cb);
996 pgmR3PhysUnlinkRamRange(pVM, &pCur->RamRange);
997 }
998
999 pCur->RamRange.GCPhys = NIL_RTGCPHYS;
1000 pCur->RamRange.GCPhysLast = NIL_RTGCPHYS;
1001 pCur->fOverlapping = false;
1002 pCur->fMapped = false;
1003
1004 pgmUnlock(pVM);
1005
1006 return VINF_SUCCESS;
1007}
1008
1009
1010/**
1011 * Checks if the given address is an MMIO2 base address or not.
1012 *
1013 * @returns true/false accordingly.
1014 * @param pVM Pointer to the shared VM structure.
1015 * @param pDevIns The owner of the memory, optional.
1016 * @param GCPhys The address to check.
1017 */
1018VMMR3DECL(bool) PGMR3PhysMMIO2IsBase(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GCPhys)
1019{
1020 /*
1021 * Validate input
1022 */
1023 VM_ASSERT_EMT_RETURN(pVM, false);
1024 AssertPtrReturn(pDevIns, false);
1025 AssertReturn(GCPhys != NIL_RTGCPHYS, false);
1026 AssertReturn(GCPhys != 0, false);
1027 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), false);
1028
1029 /*
1030 * Search the list.
1031 */
1032 for (PPGMMMIO2RANGE pCur = pVM->pgm.s.pMmio2RangesR3; pCur; pCur = pCur->pNextR3)
1033 if (pCur->RamRange.GCPhys == GCPhys)
1034 {
1035 Assert(pCur->fMapped);
1036 return true;
1037 }
1038 return false;
1039}
1040
1041
1042/**
1043 * Gets the HC physical address of a page in the MMIO2 region.
1044 *
1045 * This is API is intended for MMHyper and shouldn't be called
1046 * by anyone else...
1047 *
1048 * @returns VBox status code.
1049 * @param pVM Pointer to the shared VM structure.
1050 * @param pDevIns The owner of the memory, optional.
1051 * @param iRegion The region.
1052 * @param off The page expressed an offset into the MMIO2 region.
1053 * @param pHCPhys Where to store the result.
1054 */
1055VMMR3DECL(int) PGMR3PhysMMIO2GetHCPhys(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS off, PRTHCPHYS pHCPhys)
1056{
1057 /*
1058 * Validate input
1059 */
1060 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
1061 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
1062 AssertReturn(iRegion <= UINT8_MAX, VERR_INVALID_PARAMETER);
1063
1064 PPGMMMIO2RANGE pCur = pgmR3PhysMMIO2Find(pVM, pDevIns, iRegion);
1065 AssertReturn(pCur, VERR_NOT_FOUND);
1066 AssertReturn(off < pCur->RamRange.cb, VERR_INVALID_PARAMETER);
1067
1068 PCPGMPAGE pPage = &pCur->RamRange.aPages[off >> PAGE_SHIFT];
1069 *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage);
1070 return VINF_SUCCESS;
1071}
1072
1073
1074/**
1075 * Registers a ROM image.
1076 *
1077 * Shadowed ROM images requires double the amount of backing memory, so,
1078 * don't use that unless you have to. Shadowing of ROM images is process
1079 * where we can select where the reads go and where the writes go. On real
1080 * hardware the chipset provides means to configure this. We provide
1081 * PGMR3PhysProtectROM() for this purpose.
1082 *
1083 * A read-only copy of the ROM image will always be kept around while we
1084 * will allocate RAM pages for the changes on demand (unless all memory
1085 * is configured to be preallocated).
1086 *
1087 * @returns VBox status.
1088 * @param pVM VM Handle.
1089 * @param pDevIns The device instance owning the ROM.
1090 * @param GCPhys First physical address in the range.
1091 * Must be page aligned!
1092 * @param cbRange The size of the range (in bytes).
1093 * Must be page aligned!
1094 * @param pvBinary Pointer to the binary data backing the ROM image.
1095 * This must be exactly \a cbRange in size.
1096 * @param fFlags Mask of flags. PGMPHYS_ROM_FLAG_SHADOWED
1097 * and/or PGMPHYS_ROM_FLAG_PERMANENT_BINARY.
1098 * @param pszDesc Pointer to description string. This must not be freed.
1099 *
1100 * @remark There is no way to remove the rom, automatically on device cleanup or
1101 * manually from the device yet. This isn't difficult in any way, it's
1102 * just not something we expect to be necessary for a while.
1103 */
1104VMMR3DECL(int) PGMR3PhysRomRegister(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GCPhys, RTGCPHYS cb,
1105 const void *pvBinary, uint32_t fFlags, const char *pszDesc)
1106{
1107 Log(("PGMR3PhysRomRegister: pDevIns=%p GCPhys=%RGp(-%RGp) cb=%RGp pvBinary=%p fFlags=%#x pszDesc=%s\n",
1108 pDevIns, GCPhys, GCPhys + cb, cb, pvBinary, fFlags, pszDesc));
1109
1110 /*
1111 * Validate input.
1112 */
1113 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
1114 AssertReturn(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys, VERR_INVALID_PARAMETER);
1115 AssertReturn(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb, VERR_INVALID_PARAMETER);
1116 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
1117 AssertReturn(GCPhysLast > GCPhys, VERR_INVALID_PARAMETER);
1118 AssertPtrReturn(pvBinary, VERR_INVALID_PARAMETER);
1119 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
1120 AssertReturn(!(fFlags & ~(PGMPHYS_ROM_FLAG_SHADOWED | PGMPHYS_ROM_FLAG_PERMANENT_BINARY)), VERR_INVALID_PARAMETER);
1121 VM_ASSERT_STATE_RETURN(pVM, VMSTATE_CREATING, VERR_VM_INVALID_VM_STATE);
1122
1123 const uint32_t cPages = cb >> PAGE_SHIFT;
1124
1125 /*
1126 * Find the ROM location in the ROM list first.
1127 */
1128 PPGMROMRANGE pRomPrev = NULL;
1129 PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3;
1130 while (pRom && GCPhysLast >= pRom->GCPhys)
1131 {
1132 if ( GCPhys <= pRom->GCPhysLast
1133 && GCPhysLast >= pRom->GCPhys)
1134 AssertLogRelMsgFailedReturn(("%RGp-%RGp (%s) conflicts with existing %RGp-%RGp (%s)\n",
1135 GCPhys, GCPhysLast, pszDesc,
1136 pRom->GCPhys, pRom->GCPhysLast, pRom->pszDesc),
1137 VERR_PGM_RAM_CONFLICT);
1138 /* next */
1139 pRomPrev = pRom;
1140 pRom = pRom->pNextR3;
1141 }
1142
1143 /*
1144 * Find the RAM location and check for conflicts.
1145 *
1146 * Conflict detection is a bit different than for RAM
1147 * registration since a ROM can be located within a RAM
1148 * range. So, what we have to check for is other memory
1149 * types (other than RAM that is) and that we don't span
1150 * more than one RAM range (layz).
1151 */
1152 bool fRamExists = false;
1153 PPGMRAMRANGE pRamPrev = NULL;
1154 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesR3;
1155 while (pRam && GCPhysLast >= pRam->GCPhys)
1156 {
1157 if ( GCPhys <= pRam->GCPhysLast
1158 && GCPhysLast >= pRam->GCPhys)
1159 {
1160 /* completely within? */
1161 AssertLogRelMsgReturn( GCPhys >= pRam->GCPhys
1162 && GCPhysLast <= pRam->GCPhysLast,
1163 ("%RGp-%RGp (%s) falls partly outside %RGp-%RGp (%s)\n",
1164 GCPhys, GCPhysLast, pszDesc,
1165 pRam->GCPhys, pRam->GCPhysLast, pRam->pszDesc),
1166 VERR_PGM_RAM_CONFLICT);
1167 fRamExists = true;
1168 break;
1169 }
1170
1171 /* next */
1172 pRamPrev = pRam;
1173 pRam = pRam->pNextR3;
1174 }
1175 if (fRamExists)
1176 {
1177 PPGMPAGE pPage = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
1178 uint32_t cPagesLeft = cPages;
1179 while (cPagesLeft-- > 0)
1180 {
1181 AssertLogRelMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM,
1182 ("%RGp isn't a RAM page (%d) - registering %RGp-%RGp (%s).\n",
1183 GCPhys, PGM_PAGE_GET_TYPE(pPage), GCPhys, GCPhysLast, pszDesc),
1184 VERR_PGM_RAM_CONFLICT);
1185 Assert(PGM_PAGE_IS_ZERO(pPage));
1186 pPage++;
1187 }
1188 }
1189
1190 /*
1191 * Update the base memory reservation if necessary.
1192 */
1193 uint32_t cExtraBaseCost = fRamExists ? cPages : 0;
1194 if (fFlags & PGMPHYS_ROM_FLAG_SHADOWED)
1195 cExtraBaseCost += cPages;
1196 if (cExtraBaseCost)
1197 {
1198 int rc = MMR3IncreaseBaseReservation(pVM, cExtraBaseCost);
1199 if (RT_FAILURE(rc))
1200 return rc;
1201 }
1202
1203 /*
1204 * Allocate memory for the virgin copy of the RAM.
1205 */
1206 PGMMALLOCATEPAGESREQ pReq;
1207 int rc = GMMR3AllocatePagesPrepare(pVM, &pReq, cPages, GMMACCOUNT_BASE);
1208 AssertRCReturn(rc, rc);
1209
1210 for (uint32_t iPage = 0; iPage < cPages; iPage++)
1211 {
1212 pReq->aPages[iPage].HCPhysGCPhys = GCPhys + (iPage << PAGE_SHIFT);
1213 pReq->aPages[iPage].idPage = NIL_GMM_PAGEID;
1214 pReq->aPages[iPage].idSharedPage = NIL_GMM_PAGEID;
1215 }
1216
1217 pgmLock(pVM);
1218 rc = GMMR3AllocatePagesPerform(pVM, pReq);
1219 pgmUnlock(pVM);
1220 if (RT_FAILURE(rc))
1221 {
1222 GMMR3AllocatePagesCleanup(pReq);
1223 return rc;
1224 }
1225
1226 /*
1227 * Allocate the new ROM range and RAM range (if necessary).
1228 */
1229 PPGMROMRANGE pRomNew;
1230 rc = MMHyperAlloc(pVM, RT_OFFSETOF(PGMROMRANGE, aPages[cPages]), 0, MM_TAG_PGM_PHYS, (void **)&pRomNew);
1231 if (RT_SUCCESS(rc))
1232 {
1233 PPGMRAMRANGE pRamNew = NULL;
1234 if (!fRamExists)
1235 rc = MMHyperAlloc(pVM, RT_OFFSETOF(PGMRAMRANGE, aPages[cPages]), sizeof(PGMPAGE), MM_TAG_PGM_PHYS, (void **)&pRamNew);
1236 if (RT_SUCCESS(rc))
1237 {
1238 pgmLock(pVM);
1239
1240 /*
1241 * Initialize and insert the RAM range (if required).
1242 */
1243 PPGMROMPAGE pRomPage = &pRomNew->aPages[0];
1244 if (!fRamExists)
1245 {
1246 pRamNew->GCPhys = GCPhys;
1247 pRamNew->GCPhysLast = GCPhysLast;
1248 pRamNew->pszDesc = pszDesc;
1249 pRamNew->cb = cb;
1250 pRamNew->fFlags = 0;
1251 pRamNew->pvR3 = NULL;
1252
1253 PPGMPAGE pPage = &pRamNew->aPages[0];
1254 for (uint32_t iPage = 0; iPage < cPages; iPage++, pPage++, pRomPage++)
1255 {
1256 PGM_PAGE_INIT(pPage,
1257 pReq->aPages[iPage].HCPhysGCPhys,
1258 pReq->aPages[iPage].idPage,
1259 PGMPAGETYPE_ROM,
1260 PGM_PAGE_STATE_ALLOCATED);
1261
1262 pRomPage->Virgin = *pPage;
1263 }
1264
1265 pgmR3PhysLinkRamRange(pVM, pRamNew, pRamPrev);
1266 }
1267 else
1268 {
1269 PPGMPAGE pPage = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
1270 for (uint32_t iPage = 0; iPage < cPages; iPage++, pPage++, pRomPage++)
1271 {
1272 PGM_PAGE_SET_TYPE(pPage, PGMPAGETYPE_ROM);
1273 PGM_PAGE_SET_HCPHYS(pPage, pReq->aPages[iPage].HCPhysGCPhys);
1274 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ALLOCATED);
1275 PGM_PAGE_SET_PAGEID(pPage, pReq->aPages[iPage].idPage);
1276
1277 pRomPage->Virgin = *pPage;
1278 }
1279
1280 pRamNew = pRam;
1281 }
1282 pgmUnlock(pVM);
1283
1284
1285 /*
1286 * Register the write access handler for the range (PGMROMPROT_READ_ROM_WRITE_IGNORE).
1287 */
1288 rc = PGMR3HandlerPhysicalRegister(pVM, PGMPHYSHANDLERTYPE_PHYSICAL_WRITE, GCPhys, GCPhysLast,
1289#if 0 /** @todo we actually need a ring-3 write handler here for shadowed ROMs, so hack REM! */
1290 pgmR3PhysRomWriteHandler, pRomNew,
1291#else
1292 NULL, NULL,
1293#endif
1294 NULL, "pgmPhysRomWriteHandler", MMHyperCCToR0(pVM, pRomNew),
1295 NULL, "pgmPhysRomWriteHandler", MMHyperCCToRC(pVM, pRomNew), pszDesc);
1296 if (RT_SUCCESS(rc))
1297 {
1298 pgmLock(pVM);
1299
1300 /*
1301 * Copy the image over to the virgin pages.
1302 * This must be done after linking in the RAM range.
1303 */
1304 PPGMPAGE pRamPage = &pRamNew->aPages[(GCPhys - pRamNew->GCPhys) >> PAGE_SHIFT];
1305 for (uint32_t iPage = 0; iPage < cPages; iPage++, pRamPage++)
1306 {
1307 void *pvDstPage;
1308 PPGMPAGEMAP pMapIgnored;
1309 rc = pgmPhysPageMap(pVM, pRamPage, GCPhys + (iPage << PAGE_SHIFT), &pMapIgnored, &pvDstPage);
1310 if (RT_FAILURE(rc))
1311 {
1312 VMSetError(pVM, rc, RT_SRC_POS, "Failed to map virgin ROM page at %RGp", GCPhys);
1313 break;
1314 }
1315 memcpy(pvDstPage, (const uint8_t *)pvBinary + (iPage << PAGE_SHIFT), PAGE_SIZE);
1316 }
1317 if (RT_SUCCESS(rc))
1318 {
1319 /*
1320 * Initialize the ROM range.
1321 * Note that the Virgin member of the pages has already been initialized above.
1322 */
1323 pRomNew->GCPhys = GCPhys;
1324 pRomNew->GCPhysLast = GCPhysLast;
1325 pRomNew->cb = cb;
1326 pRomNew->fFlags = fFlags;
1327 pRomNew->pvOriginal = fFlags & PGMPHYS_ROM_FLAG_PERMANENT_BINARY ? pvBinary : NULL;
1328 pRomNew->pszDesc = pszDesc;
1329
1330 for (unsigned iPage = 0; iPage < cPages; iPage++)
1331 {
1332 PPGMROMPAGE pPage = &pRomNew->aPages[iPage];
1333 pPage->enmProt = PGMROMPROT_READ_ROM_WRITE_IGNORE;
1334 PGM_PAGE_INIT_ZERO_REAL(&pPage->Shadow, pVM, PGMPAGETYPE_ROM_SHADOW);
1335 }
1336
1337 /*
1338 * Insert the ROM range, tell REM and return successfully.
1339 */
1340 pRomNew->pNextR3 = pRom;
1341 pRomNew->pNextR0 = pRom ? MMHyperCCToR0(pVM, pRom) : NIL_RTR0PTR;
1342 pRomNew->pNextRC = pRom ? MMHyperCCToRC(pVM, pRom) : NIL_RTRCPTR;
1343
1344 if (pRomPrev)
1345 {
1346 pRomPrev->pNextR3 = pRomNew;
1347 pRomPrev->pNextR0 = MMHyperCCToR0(pVM, pRomNew);
1348 pRomPrev->pNextRC = MMHyperCCToRC(pVM, pRomNew);
1349 }
1350 else
1351 {
1352 pVM->pgm.s.pRomRangesR3 = pRomNew;
1353 pVM->pgm.s.pRomRangesR0 = MMHyperCCToR0(pVM, pRomNew);
1354 pVM->pgm.s.pRomRangesRC = MMHyperCCToRC(pVM, pRomNew);
1355 }
1356
1357 REMR3NotifyPhysRomRegister(pVM, GCPhys, cb, NULL, false); /** @todo fix shadowing and REM. */
1358
1359 GMMR3AllocatePagesCleanup(pReq);
1360 pgmUnlock(pVM);
1361 return VINF_SUCCESS;
1362 }
1363
1364 /* bail out */
1365
1366 pgmUnlock(pVM);
1367 int rc2 = PGMHandlerPhysicalDeregister(pVM, GCPhys);
1368 AssertRC(rc2);
1369 pgmLock(pVM);
1370 }
1371
1372 pgmR3PhysUnlinkRamRange2(pVM, pRamNew, pRamPrev);
1373 if (pRamNew)
1374 MMHyperFree(pVM, pRamNew);
1375 }
1376 MMHyperFree(pVM, pRomNew);
1377 }
1378
1379 /** @todo Purge the mapping cache or something... */
1380 GMMR3FreeAllocatedPages(pVM, pReq);
1381 GMMR3AllocatePagesCleanup(pReq);
1382 pgmUnlock(pVM);
1383 return rc;
1384}
1385
1386
1387/**
1388 * \#PF Handler callback for ROM write accesses.
1389 *
1390 * @returns VINF_SUCCESS if the handler have carried out the operation.
1391 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
1392 * @param pVM VM Handle.
1393 * @param GCPhys The physical address the guest is writing to.
1394 * @param pvPhys The HC mapping of that address.
1395 * @param pvBuf What the guest is reading/writing.
1396 * @param cbBuf How much it's reading/writing.
1397 * @param enmAccessType The access type.
1398 * @param pvUser User argument.
1399 */
1400/*static - shut up warning */
1401 DECLCALLBACK(int) pgmR3PhysRomWriteHandler(PVM pVM, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser)
1402{
1403 PPGMROMRANGE pRom = (PPGMROMRANGE)pvUser;
1404 const uint32_t iPage = GCPhys - pRom->GCPhys;
1405 Assert(iPage < (pRom->cb >> PAGE_SHIFT));
1406 PPGMROMPAGE pRomPage = &pRom->aPages[iPage];
1407 switch (pRomPage->enmProt)
1408 {
1409 /*
1410 * Ignore.
1411 */
1412 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
1413 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
1414 return VINF_SUCCESS;
1415
1416 /*
1417 * Write to the ram page.
1418 */
1419 case PGMROMPROT_READ_ROM_WRITE_RAM:
1420 case PGMROMPROT_READ_RAM_WRITE_RAM: /* yes this will get here too, it's *way* simpler that way. */
1421 {
1422 /* This should be impossible now, pvPhys doesn't work cross page anylonger. */
1423 Assert(((GCPhys - pRom->GCPhys + cbBuf - 1) >> PAGE_SHIFT) == iPage);
1424
1425 /*
1426 * Take the lock, do lazy allocation, map the page and copy the data.
1427 *
1428 * Note that we have to bypass the mapping TLB since it works on
1429 * guest physical addresses and entering the shadow page would
1430 * kind of screw things up...
1431 */
1432 int rc = pgmLock(pVM);
1433 AssertRC(rc);
1434
1435 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(&pRomPage->Shadow) != PGM_PAGE_STATE_ALLOCATED))
1436 {
1437 rc = pgmPhysPageMakeWritable(pVM, &pRomPage->Shadow, GCPhys);
1438 if (RT_FAILURE(rc))
1439 {
1440 pgmUnlock(pVM);
1441 return rc;
1442 }
1443 }
1444
1445 void *pvDstPage;
1446 PPGMPAGEMAP pMapIgnored;
1447 rc = pgmPhysPageMap(pVM, &pRomPage->Shadow, GCPhys & X86_PTE_PG_MASK, &pMapIgnored, &pvDstPage);
1448 if (RT_SUCCESS(rc))
1449 memcpy((uint8_t *)pvDstPage + (GCPhys & PAGE_OFFSET_MASK), pvBuf, cbBuf);
1450
1451 pgmUnlock(pVM);
1452 return rc;
1453 }
1454
1455 default:
1456 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhys=%RGp\n",
1457 pRom->aPages[iPage].enmProt, iPage, GCPhys),
1458 VERR_INTERNAL_ERROR);
1459 }
1460}
1461
1462
1463
1464/**
1465 * Called by PGMR3Reset to reset the shadow, switch to the virgin,
1466 * and verify that the virgin part is untouched.
1467 *
1468 * This is done after the normal memory has been cleared.
1469 *
1470 * ASSUMES that the caller owns the PGM lock.
1471 *
1472 * @param pVM The VM handle.
1473 */
1474int pgmR3PhysRomReset(PVM pVM)
1475{
1476 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
1477 {
1478 const uint32_t cPages = pRom->cb >> PAGE_SHIFT;
1479
1480 if (pRom->fFlags & PGMPHYS_ROM_FLAG_SHADOWED)
1481 {
1482 /*
1483 * Reset the physical handler.
1484 */
1485 int rc = PGMR3PhysRomProtect(pVM, pRom->GCPhys, pRom->cb, PGMROMPROT_READ_ROM_WRITE_IGNORE);
1486 AssertRCReturn(rc, rc);
1487
1488 /*
1489 * What we do with the shadow pages depends on the memory
1490 * preallocation option. If not enabled, we'll just throw
1491 * out all the dirty pages and replace them by the zero page.
1492 */
1493 if (1)///@todo !pVM->pgm.f.fRamPreAlloc)
1494 {
1495 /* Count dirty shadow pages. */
1496 uint32_t cDirty = 0;
1497 uint32_t iPage = cPages;
1498 while (iPage-- > 0)
1499 if (PGM_PAGE_GET_STATE(&pRom->aPages[iPage].Shadow) != PGM_PAGE_STATE_ZERO)
1500 cDirty++;
1501 if (cDirty)
1502 {
1503 /* Free the dirty pages. */
1504 PGMMFREEPAGESREQ pReq;
1505 rc = GMMR3FreePagesPrepare(pVM, &pReq, cDirty, GMMACCOUNT_BASE);
1506 AssertRCReturn(rc, rc);
1507
1508 uint32_t iReqPage = 0;
1509 for (iPage = 0; iPage < cPages; iPage++)
1510 if (PGM_PAGE_GET_STATE(&pRom->aPages[iPage].Shadow) != PGM_PAGE_STATE_ZERO)
1511 {
1512 pReq->aPages[iReqPage].idPage = PGM_PAGE_GET_PAGEID(&pRom->aPages[iPage].Shadow);
1513 iReqPage++;
1514 }
1515
1516 rc = GMMR3FreePagesPerform(pVM, pReq);
1517 GMMR3FreePagesCleanup(pReq);
1518 AssertRCReturn(rc, rc);
1519
1520 /* setup the zero page. */
1521 for (iPage = 0; iPage < cPages; iPage++)
1522 if (PGM_PAGE_GET_STATE(&pRom->aPages[iPage].Shadow) != PGM_PAGE_STATE_ZERO)
1523 PGM_PAGE_INIT_ZERO_REAL(&pRom->aPages[iPage].Shadow, pVM, PGMPAGETYPE_ROM_SHADOW);
1524 }
1525 }
1526 else
1527 {
1528 /* clear all the pages. */
1529 for (uint32_t iPage = 0; iPage < cPages; iPage++)
1530 {
1531 const RTGCPHYS GCPhys = pRom->GCPhys + (iPage << PAGE_SHIFT);
1532 rc = pgmPhysPageMakeWritable(pVM, &pRom->aPages[iPage].Shadow, GCPhys);
1533 if (RT_FAILURE(rc))
1534 break;
1535
1536 void *pvDstPage;
1537 PPGMPAGEMAP pMapIgnored;
1538 rc = pgmPhysPageMap(pVM, &pRom->aPages[iPage].Shadow, GCPhys, &pMapIgnored, &pvDstPage);
1539 if (RT_FAILURE(rc))
1540 break;
1541 ASMMemZeroPage(pvDstPage);
1542 }
1543 AssertRCReturn(rc, rc);
1544 }
1545 }
1546
1547#ifdef VBOX_STRICT
1548 /*
1549 * Verify that the virgin page is unchanged if possible.
1550 */
1551 if (pRom->pvOriginal)
1552 {
1553 uint8_t const *pbSrcPage = (uint8_t const *)pRom->pvOriginal;
1554 for (uint32_t iPage = 0; iPage < cPages; iPage++, pbSrcPage += PAGE_SIZE)
1555 {
1556 const RTGCPHYS GCPhys = pRom->GCPhys + (iPage << PAGE_SHIFT);
1557 PPGMPAGEMAP pMapIgnored;
1558 void *pvDstPage;
1559 int rc = pgmPhysPageMap(pVM, &pRom->aPages[iPage].Virgin, GCPhys, &pMapIgnored, &pvDstPage);
1560 if (RT_FAILURE(rc))
1561 break;
1562 if (memcmp(pvDstPage, pbSrcPage, PAGE_SIZE))
1563 LogRel(("pgmR3PhysRomReset: %RGp rom page changed (%s) - loaded saved state?\n",
1564 GCPhys, pRom->pszDesc));
1565 }
1566 }
1567#endif
1568 }
1569
1570 return VINF_SUCCESS;
1571}
1572
1573
1574/**
1575 * Change the shadowing of a range of ROM pages.
1576 *
1577 * This is intended for implementing chipset specific memory registers
1578 * and will not be very strict about the input. It will silently ignore
1579 * any pages that are not the part of a shadowed ROM.
1580 *
1581 * @returns VBox status code.
1582 * @param pVM Pointer to the shared VM structure.
1583 * @param GCPhys Where to start. Page aligned.
1584 * @param cb How much to change. Page aligned.
1585 * @param enmProt The new ROM protection.
1586 */
1587VMMR3DECL(int) PGMR3PhysRomProtect(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, PGMROMPROT enmProt)
1588{
1589 /*
1590 * Check input
1591 */
1592 if (!cb)
1593 return VINF_SUCCESS;
1594 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
1595 AssertReturn(!(cb & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
1596 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
1597 AssertReturn(GCPhysLast > GCPhys, VERR_INVALID_PARAMETER);
1598 AssertReturn(enmProt >= PGMROMPROT_INVALID && enmProt <= PGMROMPROT_END, VERR_INVALID_PARAMETER);
1599
1600 /*
1601 * Process the request.
1602 */
1603 bool fFlushedPool = false;
1604 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
1605 if ( GCPhys <= pRom->GCPhysLast
1606 && GCPhysLast >= pRom->GCPhys)
1607 {
1608 /*
1609 * Iterate the relevant pages and the ncessary make changes.
1610 */
1611 bool fChanges = false;
1612 uint32_t const cPages = pRom->GCPhysLast > GCPhysLast
1613 ? pRom->cb >> PAGE_SHIFT
1614 : (GCPhysLast - pRom->GCPhys) >> PAGE_SHIFT;
1615 for (uint32_t iPage = (GCPhys - pRom->GCPhys) >> PAGE_SHIFT;
1616 iPage < cPages;
1617 iPage++)
1618 {
1619 PPGMROMPAGE pRomPage = &pRom->aPages[iPage];
1620 if (PGMROMPROT_IS_ROM(pRomPage->enmProt) != PGMROMPROT_IS_ROM(enmProt))
1621 {
1622 fChanges = true;
1623
1624 /* flush the page pool first so we don't leave any usage references dangling. */
1625 if (!fFlushedPool)
1626 {
1627 pgmPoolFlushAll(pVM);
1628 fFlushedPool = true;
1629 }
1630
1631 PPGMPAGE pOld = PGMROMPROT_IS_ROM(pRomPage->enmProt) ? &pRomPage->Virgin : &pRomPage->Shadow;
1632 PPGMPAGE pNew = PGMROMPROT_IS_ROM(pRomPage->enmProt) ? &pRomPage->Shadow : &pRomPage->Virgin;
1633 PPGMPAGE pRamPage = pgmPhysGetPage(&pVM->pgm.s, pRom->GCPhys + (iPage << PAGE_SHIFT));
1634
1635 *pOld = *pRamPage;
1636 *pRamPage = *pNew;
1637 /** @todo preserve the volatile flags (handlers) when these have been moved out of HCPhys! */
1638 }
1639 }
1640
1641 /*
1642 * Reset the access handler if we made changes, no need
1643 * to optimize this.
1644 */
1645 if (fChanges)
1646 {
1647 int rc = PGMHandlerPhysicalReset(pVM, pRom->GCPhys);
1648 AssertRCReturn(rc, rc);
1649 }
1650
1651 /* Advance - cb isn't updated. */
1652 GCPhys = pRom->GCPhys + (cPages << PAGE_SHIFT);
1653 }
1654
1655 return VINF_SUCCESS;
1656}
1657
1658
1659/**
1660 * Interface that the MMR3RamRegister(), MMR3RomRegister() and MMIO handler
1661 * registration APIs calls to inform PGM about memory registrations.
1662 *
1663 * It registers the physical memory range with PGM. MM is responsible
1664 * for the toplevel things - allocation and locking - while PGM is taking
1665 * care of all the details and implements the physical address space virtualization.
1666 *
1667 * @returns VBox status.
1668 * @param pVM The VM handle.
1669 * @param pvRam HC virtual address of the RAM range. (page aligned)
1670 * @param GCPhys GC physical address of the RAM range. (page aligned)
1671 * @param cb Size of the RAM range. (page aligned)
1672 * @param fFlags Flags, MM_RAM_*.
1673 * @param paPages Pointer an array of physical page descriptors.
1674 * @param pszDesc Description string.
1675 */
1676VMMR3DECL(int) PGMR3PhysRegister(PVM pVM, void *pvRam, RTGCPHYS GCPhys, size_t cb, unsigned fFlags, const SUPPAGE *paPages, const char *pszDesc)
1677{
1678 /*
1679 * Validate input.
1680 * (Not so important because callers are only MMR3PhysRegister()
1681 * and PGMR3HandlerPhysicalRegisterEx(), but anyway...)
1682 */
1683 Log(("PGMR3PhysRegister %08X %x bytes flags %x %s\n", GCPhys, cb, fFlags, pszDesc));
1684
1685 Assert((fFlags & (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_DYNAMIC_ALLOC)) || paPages);
1686 /*Assert(!(fFlags & MM_RAM_FLAGS_RESERVED) || !paPages);*/
1687 Assert((fFlags == (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO)) || (fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC) || pvRam);
1688 /*Assert(!(fFlags & MM_RAM_FLAGS_RESERVED) || !pvRam);*/
1689 Assert(!(fFlags & ~0xfff));
1690 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb && cb);
1691 Assert(RT_ALIGN_P(pvRam, PAGE_SIZE) == pvRam);
1692 Assert(!(fFlags & ~(MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2 | MM_RAM_FLAGS_DYNAMIC_ALLOC)));
1693 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
1694 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
1695 if (GCPhysLast < GCPhys)
1696 {
1697 AssertMsgFailed(("The range wraps! GCPhys=%VGp cb=%#x\n", GCPhys, cb));
1698 return VERR_INVALID_PARAMETER;
1699 }
1700
1701 /*
1702 * Find range location and check for conflicts.
1703 */
1704 PPGMRAMRANGE pPrev = NULL;
1705 PPGMRAMRANGE pCur = pVM->pgm.s.pRamRangesR3;
1706 while (pCur)
1707 {
1708 if (GCPhys <= pCur->GCPhysLast && GCPhysLast >= pCur->GCPhys)
1709 {
1710 AssertMsgFailed(("Conflict! This cannot happen!\n"));
1711 return VERR_PGM_RAM_CONFLICT;
1712 }
1713 if (GCPhysLast < pCur->GCPhys)
1714 break;
1715
1716 /* next */
1717 pPrev = pCur;
1718 pCur = pCur->pNextR3;
1719 }
1720
1721 /*
1722 * Allocate RAM range.
1723 * Small ranges are allocated from the heap, big ones have separate mappings.
1724 */
1725 size_t cbRam = RT_OFFSETOF(PGMRAMRANGE, aPages[cb >> PAGE_SHIFT]);
1726 PPGMRAMRANGE pNew;
1727 RTRCPTR RCPtrNew;
1728 int rc = VERR_NO_MEMORY;
1729 if (cbRam > PAGE_SIZE / 2)
1730 { /* large */
1731 cbRam = RT_ALIGN_Z(cbRam, PAGE_SIZE);
1732 rc = SUPPageAlloc(cbRam >> PAGE_SHIFT, (void **)&pNew);
1733 if (VBOX_SUCCESS(rc))
1734 {
1735 RTGCPTR GCPtrNew;
1736 rc = MMR3HyperMapHCRam(pVM, pNew, cbRam, true,
1737 MMR3HeapAPrintf(pVM, MM_TAG_PGM_PHYS, "ram range (%s)", pszDesc),
1738 &GCPtrNew);
1739 if (VBOX_SUCCESS(rc))
1740 {
1741 RCPtrNew = GCPtrNew;
1742 Assert(MMHyperR3ToRC(pVM, pNew) == GCPtrNew && RCPtrNew == GCPtrNew);
1743 rc = MMR3HyperReserve(pVM, PAGE_SIZE, "fence", NULL);
1744 }
1745 else
1746 {
1747 AssertMsgFailed(("MMR3HyperMapHCRam(,,%#x,,,) -> %Vrc\n", cbRam, rc));
1748 SUPPageFree(pNew, cbRam >> PAGE_SHIFT);
1749 }
1750 }
1751 else
1752 AssertMsgFailed(("SUPPageAlloc(%#x,,) -> %Vrc\n", cbRam >> PAGE_SHIFT, rc));
1753
1754 }
1755/** @todo Make VGA and VMMDev register their memory at init time before the hma size is fixated. */
1756 if (RT_FAILURE(rc))
1757 { /* small + fallback (vga) */
1758 rc = MMHyperAlloc(pVM, cbRam, 16, MM_TAG_PGM, (void **)&pNew);
1759 if (VBOX_SUCCESS(rc))
1760 RCPtrNew = MMHyperR3ToRC(pVM, pNew);
1761 else
1762 AssertMsgFailed(("MMHyperAlloc(,%#x,,,) -> %Vrc\n", cbRam, cb));
1763 }
1764 if (VBOX_SUCCESS(rc))
1765 {
1766 /*
1767 * Initialize the range.
1768 */
1769 pNew->pvR3 = pvRam;
1770 pNew->GCPhys = GCPhys;
1771 pNew->GCPhysLast = GCPhysLast;
1772 pNew->cb = cb;
1773 pNew->fFlags = fFlags;
1774 pNew->paChunkR3Ptrs = NULL;
1775
1776 unsigned iPage = cb >> PAGE_SHIFT;
1777 if (paPages)
1778 {
1779 while (iPage-- > 0)
1780 {
1781 PGM_PAGE_INIT(&pNew->aPages[iPage], paPages[iPage].Phys & X86_PTE_PAE_PG_MASK, NIL_GMM_PAGEID,
1782 fFlags & MM_RAM_FLAGS_MMIO2 ? PGMPAGETYPE_MMIO2 : PGMPAGETYPE_RAM,
1783 PGM_PAGE_STATE_ALLOCATED);
1784 pNew->aPages[iPage].HCPhys |= fFlags; /** @todo PAGE FLAGS*/
1785 }
1786 }
1787 else if (fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)
1788 {
1789 /* Allocate memory for chunk to HC ptr lookup array. */
1790 rc = MMHyperAlloc(pVM, (cb >> PGM_DYNAMIC_CHUNK_SHIFT) * sizeof(void *), 16, MM_TAG_PGM, (void **)&pNew->paChunkR3Ptrs);
1791 AssertMsgReturn(rc == VINF_SUCCESS, ("MMHyperAlloc(,%#x,,,) -> %Vrc\n", cbRam, cb), rc);
1792
1793 /* Physical memory will be allocated on demand. */
1794 while (iPage-- > 0)
1795 {
1796 PGM_PAGE_INIT(&pNew->aPages[iPage], 0, NIL_GMM_PAGEID, PGMPAGETYPE_RAM, PGM_PAGE_STATE_ZERO);
1797 pNew->aPages[iPage].HCPhys = fFlags; /** @todo PAGE FLAGS */
1798 }
1799 }
1800 else
1801 {
1802 Assert(fFlags == (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO));
1803 RTHCPHYS HCPhysDummyPage = MMR3PageDummyHCPhys(pVM);
1804 while (iPage-- > 0)
1805 {
1806 PGM_PAGE_INIT(&pNew->aPages[iPage], HCPhysDummyPage, NIL_GMM_PAGEID, PGMPAGETYPE_MMIO, PGM_PAGE_STATE_ZERO);
1807 pNew->aPages[iPage].HCPhys |= fFlags; /** @todo PAGE FLAGS*/
1808 }
1809 }
1810
1811 /*
1812 * Insert the new RAM range.
1813 */
1814 pgmLock(pVM);
1815 pNew->pNextR3 = pCur;
1816 pNew->pNextR0 = pCur ? MMHyperCCToR0(pVM, pCur) : NIL_RTR0PTR;
1817 pNew->pNextRC = pCur ? MMHyperCCToRC(pVM, pCur) : NIL_RTRCPTR;
1818 if (pPrev)
1819 {
1820 pPrev->pNextR3 = pNew;
1821 pPrev->pNextR0 = MMHyperCCToR0(pVM, pNew);
1822 pPrev->pNextRC = RCPtrNew;
1823 }
1824 else
1825 {
1826 pVM->pgm.s.pRamRangesR3 = pNew;
1827 pVM->pgm.s.pRamRangesR0 = MMHyperCCToR0(pVM, pNew);
1828 pVM->pgm.s.pRamRangesRC = RCPtrNew;
1829 }
1830 pgmUnlock(pVM);
1831 }
1832 return rc;
1833}
1834
1835#ifndef VBOX_WITH_NEW_PHYS_CODE
1836
1837/**
1838 * Register a chunk of a the physical memory range with PGM. MM is responsible
1839 * for the toplevel things - allocation and locking - while PGM is taking
1840 * care of all the details and implements the physical address space virtualization.
1841 *
1842 *
1843 * @returns VBox status.
1844 * @param pVM The VM handle.
1845 * @param pvRam HC virtual address of the RAM range. (page aligned)
1846 * @param GCPhys GC physical address of the RAM range. (page aligned)
1847 * @param cb Size of the RAM range. (page aligned)
1848 * @param fFlags Flags, MM_RAM_*.
1849 * @param paPages Pointer an array of physical page descriptors.
1850 * @param pszDesc Description string.
1851 */
1852VMMR3DECL(int) PGMR3PhysRegisterChunk(PVM pVM, void *pvRam, RTGCPHYS GCPhys, size_t cb, unsigned fFlags, const SUPPAGE *paPages, const char *pszDesc)
1853{
1854 NOREF(pszDesc);
1855
1856 /*
1857 * Validate input.
1858 * (Not so important because callers are only MMR3PhysRegister()
1859 * and PGMR3HandlerPhysicalRegisterEx(), but anyway...)
1860 */
1861 Log(("PGMR3PhysRegisterChunk %08X %x bytes flags %x %s\n", GCPhys, cb, fFlags, pszDesc));
1862
1863 Assert(paPages);
1864 Assert(pvRam);
1865 Assert(!(fFlags & ~0xfff));
1866 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb && cb);
1867 Assert(RT_ALIGN_P(pvRam, PAGE_SIZE) == pvRam);
1868 Assert(!(fFlags & ~(MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2 | MM_RAM_FLAGS_DYNAMIC_ALLOC)));
1869 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
1870 Assert(VM_IS_EMT(pVM));
1871 Assert(!(GCPhys & PGM_DYNAMIC_CHUNK_OFFSET_MASK));
1872 Assert(cb == PGM_DYNAMIC_CHUNK_SIZE);
1873
1874 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
1875 if (GCPhysLast < GCPhys)
1876 {
1877 AssertMsgFailed(("The range wraps! GCPhys=%VGp cb=%#x\n", GCPhys, cb));
1878 return VERR_INVALID_PARAMETER;
1879 }
1880
1881 /*
1882 * Find existing range location.
1883 */
1884 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
1885 while (pRam)
1886 {
1887 RTGCPHYS off = GCPhys - pRam->GCPhys;
1888 if ( off < pRam->cb
1889 && (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC))
1890 break;
1891
1892 pRam = pRam->CTX_SUFF(pNext);
1893 }
1894 AssertReturn(pRam, VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS);
1895
1896 unsigned off = (GCPhys - pRam->GCPhys) >> PAGE_SHIFT;
1897 unsigned iPage = cb >> PAGE_SHIFT;
1898 if (paPages)
1899 {
1900 while (iPage-- > 0)
1901 pRam->aPages[off + iPage].HCPhys = (paPages[iPage].Phys & X86_PTE_PAE_PG_MASK) | fFlags; /** @todo PAGE FLAGS */
1902 }
1903 off >>= (PGM_DYNAMIC_CHUNK_SHIFT - PAGE_SHIFT);
1904 pRam->paChunkR3Ptrs[off] = (uintptr_t)pvRam;
1905
1906 /* Notify the recompiler. */
1907 REMR3NotifyPhysRamChunkRegister(pVM, GCPhys, PGM_DYNAMIC_CHUNK_SIZE, (RTHCUINTPTR)pvRam, fFlags);
1908
1909 return VINF_SUCCESS;
1910}
1911
1912
1913/**
1914 * Allocate missing physical pages for an existing guest RAM range.
1915 *
1916 * @returns VBox status.
1917 * @param pVM The VM handle.
1918 * @param GCPhys GC physical address of the RAM range. (page aligned)
1919 */
1920VMMR3DECL(int) PGM3PhysGrowRange(PVM pVM, PCRTGCPHYS pGCPhys)
1921{
1922 RTGCPHYS GCPhys = *pGCPhys;
1923
1924 /*
1925 * Walk range list.
1926 */
1927 pgmLock(pVM);
1928
1929 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
1930 while (pRam)
1931 {
1932 RTGCPHYS off = GCPhys - pRam->GCPhys;
1933 if ( off < pRam->cb
1934 && (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC))
1935 {
1936 bool fRangeExists = false;
1937 unsigned off = (GCPhys - pRam->GCPhys) >> PGM_DYNAMIC_CHUNK_SHIFT;
1938
1939 /* Note: A request made from another thread may end up in EMT after somebody else has already allocated the range. */
1940 if (pRam->paChunkR3Ptrs[off])
1941 fRangeExists = true;
1942
1943 pgmUnlock(pVM);
1944 if (fRangeExists)
1945 return VINF_SUCCESS;
1946 return pgmr3PhysGrowRange(pVM, GCPhys);
1947 }
1948
1949 pRam = pRam->CTX_SUFF(pNext);
1950 }
1951 pgmUnlock(pVM);
1952 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
1953}
1954
1955
1956/**
1957 * Allocate missing physical pages for an existing guest RAM range.
1958 *
1959 * @returns VBox status.
1960 * @param pVM The VM handle.
1961 * @param pRamRange RAM range
1962 * @param GCPhys GC physical address of the RAM range. (page aligned)
1963 */
1964int pgmr3PhysGrowRange(PVM pVM, RTGCPHYS GCPhys)
1965{
1966 void *pvRam;
1967 int rc;
1968
1969 /* We must execute this function in the EMT thread, otherwise we'll run into problems. */
1970 if (!VM_IS_EMT(pVM))
1971 {
1972 PVMREQ pReq;
1973 const RTGCPHYS GCPhysParam = GCPhys;
1974
1975 AssertMsg(!PDMCritSectIsOwner(&pVM->pgm.s.CritSect), ("We own the PGM lock -> deadlock danger!!\n"));
1976
1977 rc = VMR3ReqCall(pVM, VMREQDEST_ALL, &pReq, RT_INDEFINITE_WAIT, (PFNRT)PGM3PhysGrowRange, 2, pVM, &GCPhysParam);
1978 if (VBOX_SUCCESS(rc))
1979 {
1980 rc = pReq->iStatus;
1981 VMR3ReqFree(pReq);
1982 }
1983 return rc;
1984 }
1985
1986 /* Round down to chunk boundary */
1987 GCPhys = GCPhys & PGM_DYNAMIC_CHUNK_BASE_MASK;
1988
1989 STAM_COUNTER_INC(&pVM->pgm.s.StatR3DynRamGrow);
1990 STAM_COUNTER_ADD(&pVM->pgm.s.StatR3DynRamTotal, PGM_DYNAMIC_CHUNK_SIZE/(1024*1024));
1991
1992 Log(("pgmr3PhysGrowRange: allocate chunk of size 0x%X at %VGp\n", PGM_DYNAMIC_CHUNK_SIZE, GCPhys));
1993
1994 unsigned cPages = PGM_DYNAMIC_CHUNK_SIZE >> PAGE_SHIFT;
1995
1996 for (;;)
1997 {
1998 rc = SUPPageAlloc(cPages, &pvRam);
1999 if (VBOX_SUCCESS(rc))
2000 {
2001
2002 rc = MMR3PhysRegisterEx(pVM, pvRam, GCPhys, PGM_DYNAMIC_CHUNK_SIZE, 0, MM_PHYS_TYPE_DYNALLOC_CHUNK, "Main Memory");
2003 if (VBOX_SUCCESS(rc))
2004 return rc;
2005
2006 SUPPageFree(pvRam, cPages);
2007 }
2008
2009 VMSTATE enmVMState = VMR3GetState(pVM);
2010 if (enmVMState != VMSTATE_RUNNING)
2011 {
2012 AssertMsgFailed(("Out of memory while trying to allocate a guest RAM chunk at %VGp!\n", GCPhys));
2013 LogRel(("PGM: Out of memory while trying to allocate a guest RAM chunk at %VGp (VMstate=%s)!\n", GCPhys, VMR3GetStateName(enmVMState)));
2014 return rc;
2015 }
2016
2017 LogRel(("pgmr3PhysGrowRange: out of memory. pause until the user resumes execution.\n"));
2018
2019 /* Pause first, then inform Main. */
2020 rc = VMR3SuspendNoSave(pVM);
2021 AssertRC(rc);
2022
2023 VMSetRuntimeError(pVM, false, "HostMemoryLow", "Unable to allocate and lock memory. The virtual machine will be paused. Please close applications to free up memory or close the VM");
2024
2025 /* Wait for resume event; will only return in that case. If the VM is stopped, the EMT thread will be destroyed. */
2026 rc = VMR3WaitForResume(pVM);
2027
2028 /* Retry */
2029 LogRel(("pgmr3PhysGrowRange: VM execution resumed -> retry.\n"));
2030 }
2031}
2032
2033#endif /* !VBOX_WITH_NEW_PHYS_CODE */
2034
2035
2036/**
2037 * Interface MMR3RomRegister() and MMR3PhysReserve calls to update the
2038 * flags of existing RAM ranges.
2039 *
2040 * @returns VBox status.
2041 * @param pVM The VM handle.
2042 * @param GCPhys GC physical address of the RAM range. (page aligned)
2043 * @param cb Size of the RAM range. (page aligned)
2044 * @param fFlags The Or flags, MM_RAM_* \#defines.
2045 * @param fMask The and mask for the flags.
2046 */
2047VMMR3DECL(int) PGMR3PhysSetFlags(PVM pVM, RTGCPHYS GCPhys, size_t cb, unsigned fFlags, unsigned fMask)
2048{
2049 Log(("PGMR3PhysSetFlags %08X %x %x %x\n", GCPhys, cb, fFlags, fMask));
2050
2051 /*
2052 * Validate input.
2053 * (Not so important because caller is always MMR3RomRegister() and MMR3PhysReserve(), but anyway...)
2054 */
2055 Assert(!(fFlags & ~(MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2)));
2056 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb && cb);
2057 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2058 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
2059 AssertReturn(GCPhysLast > GCPhys, VERR_INVALID_PARAMETER);
2060
2061 /*
2062 * Lookup the range.
2063 */
2064 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
2065 while (pRam && GCPhys > pRam->GCPhysLast)
2066 pRam = pRam->CTX_SUFF(pNext);
2067 if ( !pRam
2068 || GCPhys > pRam->GCPhysLast
2069 || GCPhysLast < pRam->GCPhys)
2070 {
2071 AssertMsgFailed(("No RAM range for %VGp-%VGp\n", GCPhys, GCPhysLast));
2072 return VERR_INVALID_PARAMETER;
2073 }
2074
2075 /*
2076 * Update the requested flags.
2077 */
2078 RTHCPHYS fFullMask = ~(RTHCPHYS)(MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2)
2079 | fMask;
2080 unsigned iPageEnd = (GCPhysLast - pRam->GCPhys + 1) >> PAGE_SHIFT;
2081 unsigned iPage = (GCPhys - pRam->GCPhys) >> PAGE_SHIFT;
2082 for ( ; iPage < iPageEnd; iPage++)
2083 pRam->aPages[iPage].HCPhys = (pRam->aPages[iPage].HCPhys & fFullMask) | fFlags; /** @todo PAGE FLAGS */
2084
2085 return VINF_SUCCESS;
2086}
2087
2088
2089/**
2090 * Sets the Address Gate 20 state.
2091 *
2092 * @param pVM VM handle.
2093 * @param fEnable True if the gate should be enabled.
2094 * False if the gate should be disabled.
2095 */
2096VMMDECL(void) PGMR3PhysSetA20(PVM pVM, bool fEnable)
2097{
2098 LogFlow(("PGMR3PhysSetA20 %d (was %d)\n", fEnable, pVM->pgm.s.fA20Enabled));
2099 if (pVM->pgm.s.fA20Enabled != (RTUINT)fEnable)
2100 {
2101 pVM->pgm.s.fA20Enabled = fEnable;
2102 pVM->pgm.s.GCPhysA20Mask = ~(RTGCPHYS)(!fEnable << 20);
2103 REMR3A20Set(pVM, fEnable);
2104 /** @todo we're not handling this correctly for VT-x / AMD-V. See #2911 */
2105 }
2106}
2107
2108
2109/**
2110 * Tree enumeration callback for dealing with age rollover.
2111 * It will perform a simple compression of the current age.
2112 */
2113static DECLCALLBACK(int) pgmR3PhysChunkAgeingRolloverCallback(PAVLU32NODECORE pNode, void *pvUser)
2114{
2115 /* Age compression - ASSUMES iNow == 4. */
2116 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)pNode;
2117 if (pChunk->iAge >= UINT32_C(0xffffff00))
2118 pChunk->iAge = 3;
2119 else if (pChunk->iAge >= UINT32_C(0xfffff000))
2120 pChunk->iAge = 2;
2121 else if (pChunk->iAge)
2122 pChunk->iAge = 1;
2123 else /* iAge = 0 */
2124 pChunk->iAge = 4;
2125
2126 /* reinsert */
2127 PVM pVM = (PVM)pvUser;
2128 RTAvllU32Remove(&pVM->pgm.s.ChunkR3Map.pAgeTree, pChunk->AgeCore.Key);
2129 pChunk->AgeCore.Key = pChunk->iAge;
2130 RTAvllU32Insert(&pVM->pgm.s.ChunkR3Map.pAgeTree, &pChunk->AgeCore);
2131 return 0;
2132}
2133
2134
2135/**
2136 * Tree enumeration callback that updates the chunks that have
2137 * been used since the last
2138 */
2139static DECLCALLBACK(int) pgmR3PhysChunkAgeingCallback(PAVLU32NODECORE pNode, void *pvUser)
2140{
2141 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)pNode;
2142 if (!pChunk->iAge)
2143 {
2144 PVM pVM = (PVM)pvUser;
2145 RTAvllU32Remove(&pVM->pgm.s.ChunkR3Map.pAgeTree, pChunk->AgeCore.Key);
2146 pChunk->AgeCore.Key = pChunk->iAge = pVM->pgm.s.ChunkR3Map.iNow;
2147 RTAvllU32Insert(&pVM->pgm.s.ChunkR3Map.pAgeTree, &pChunk->AgeCore);
2148 }
2149
2150 return 0;
2151}
2152
2153
2154/**
2155 * Performs ageing of the ring-3 chunk mappings.
2156 *
2157 * @param pVM The VM handle.
2158 */
2159VMMR3DECL(void) PGMR3PhysChunkAgeing(PVM pVM)
2160{
2161 pVM->pgm.s.ChunkR3Map.AgeingCountdown = RT_MIN(pVM->pgm.s.ChunkR3Map.cMax / 4, 1024);
2162 pVM->pgm.s.ChunkR3Map.iNow++;
2163 if (pVM->pgm.s.ChunkR3Map.iNow == 0)
2164 {
2165 pVM->pgm.s.ChunkR3Map.iNow = 4;
2166 RTAvlU32DoWithAll(&pVM->pgm.s.ChunkR3Map.pTree, true /*fFromLeft*/, pgmR3PhysChunkAgeingRolloverCallback, pVM);
2167 }
2168 else
2169 RTAvlU32DoWithAll(&pVM->pgm.s.ChunkR3Map.pTree, true /*fFromLeft*/, pgmR3PhysChunkAgeingCallback, pVM);
2170}
2171
2172
2173/**
2174 * The structure passed in the pvUser argument of pgmR3PhysChunkUnmapCandidateCallback().
2175 */
2176typedef struct PGMR3PHYSCHUNKUNMAPCB
2177{
2178 PVM pVM; /**< The VM handle. */
2179 PPGMCHUNKR3MAP pChunk; /**< The chunk to unmap. */
2180} PGMR3PHYSCHUNKUNMAPCB, *PPGMR3PHYSCHUNKUNMAPCB;
2181
2182
2183/**
2184 * Callback used to find the mapping that's been unused for
2185 * the longest time.
2186 */
2187static DECLCALLBACK(int) pgmR3PhysChunkUnmapCandidateCallback(PAVLLU32NODECORE pNode, void *pvUser)
2188{
2189 do
2190 {
2191 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)((uint8_t *)pNode - RT_OFFSETOF(PGMCHUNKR3MAP, AgeCore));
2192 if ( pChunk->iAge
2193 && !pChunk->cRefs)
2194 {
2195 /*
2196 * Check that it's not in any of the TLBs.
2197 */
2198 PVM pVM = ((PPGMR3PHYSCHUNKUNMAPCB)pvUser)->pVM;
2199 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.ChunkR3Map.Tlb.aEntries); i++)
2200 if (pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].pChunk == pChunk)
2201 {
2202 pChunk = NULL;
2203 break;
2204 }
2205 if (pChunk)
2206 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.PhysTlbHC.aEntries); i++)
2207 if (pVM->pgm.s.PhysTlbHC.aEntries[i].pMap == pChunk)
2208 {
2209 pChunk = NULL;
2210 break;
2211 }
2212 if (pChunk)
2213 {
2214 ((PPGMR3PHYSCHUNKUNMAPCB)pvUser)->pChunk = pChunk;
2215 return 1; /* done */
2216 }
2217 }
2218
2219 /* next with the same age - this version of the AVL API doesn't enumerate the list, so we have to do it. */
2220 pNode = pNode->pList;
2221 } while (pNode);
2222 return 0;
2223}
2224
2225
2226/**
2227 * Finds a good candidate for unmapping when the ring-3 mapping cache is full.
2228 *
2229 * The candidate will not be part of any TLBs, so no need to flush
2230 * anything afterwards.
2231 *
2232 * @returns Chunk id.
2233 * @param pVM The VM handle.
2234 */
2235static int32_t pgmR3PhysChunkFindUnmapCandidate(PVM pVM)
2236{
2237 /*
2238 * Do tree ageing first?
2239 */
2240 if (pVM->pgm.s.ChunkR3Map.AgeingCountdown-- == 0)
2241 PGMR3PhysChunkAgeing(pVM);
2242
2243 /*
2244 * Enumerate the age tree starting with the left most node.
2245 */
2246 PGMR3PHYSCHUNKUNMAPCB Args;
2247 Args.pVM = pVM;
2248 Args.pChunk = NULL;
2249 if (RTAvllU32DoWithAll(&pVM->pgm.s.ChunkR3Map.pAgeTree, true /*fFromLeft*/, pgmR3PhysChunkUnmapCandidateCallback, pVM))
2250 return Args.pChunk->Core.Key;
2251 return INT32_MAX;
2252}
2253
2254
2255/**
2256 * Maps the given chunk into the ring-3 mapping cache.
2257 *
2258 * This will call ring-0.
2259 *
2260 * @returns VBox status code.
2261 * @param pVM The VM handle.
2262 * @param idChunk The chunk in question.
2263 * @param ppChunk Where to store the chunk tracking structure.
2264 *
2265 * @remarks Called from within the PGM critical section.
2266 */
2267int pgmR3PhysChunkMap(PVM pVM, uint32_t idChunk, PPPGMCHUNKR3MAP ppChunk)
2268{
2269 int rc;
2270 /*
2271 * Allocate a new tracking structure first.
2272 */
2273#if 0 /* for later when we've got a separate mapping method for ring-0. */
2274 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)MMR3HeapAlloc(pVM, MM_TAG_PGM_CHUNK_MAPPING, sizeof(*pChunk));
2275 AssertReturn(pChunk, VERR_NO_MEMORY);
2276#else
2277 PPGMCHUNKR3MAP pChunk;
2278 rc = MMHyperAlloc(pVM, sizeof(*pChunk), 0, MM_TAG_PGM_CHUNK_MAPPING, (void **)&pChunk);
2279 AssertRCReturn(rc, rc);
2280#endif
2281 pChunk->Core.Key = idChunk;
2282 pChunk->AgeCore.Key = pVM->pgm.s.ChunkR3Map.iNow;
2283 pChunk->iAge = 0;
2284 pChunk->cRefs = 0;
2285 pChunk->cPermRefs = 0;
2286 pChunk->pv = NULL;
2287
2288 /*
2289 * Request the ring-0 part to map the chunk in question and if
2290 * necessary unmap another one to make space in the mapping cache.
2291 */
2292 GMMMAPUNMAPCHUNKREQ Req;
2293 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
2294 Req.Hdr.cbReq = sizeof(Req);
2295 Req.pvR3 = NULL;
2296 Req.idChunkMap = idChunk;
2297 Req.idChunkUnmap = INT32_MAX;
2298 if (pVM->pgm.s.ChunkR3Map.c >= pVM->pgm.s.ChunkR3Map.cMax)
2299 Req.idChunkUnmap = pgmR3PhysChunkFindUnmapCandidate(pVM);
2300 rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_GMM_MAP_UNMAP_CHUNK, 0, &Req.Hdr);
2301 if (VBOX_SUCCESS(rc))
2302 {
2303 /*
2304 * Update the tree.
2305 */
2306 /* insert the new one. */
2307 AssertPtr(Req.pvR3);
2308 pChunk->pv = Req.pvR3;
2309 bool fRc = RTAvlU32Insert(&pVM->pgm.s.ChunkR3Map.pTree, &pChunk->Core);
2310 AssertRelease(fRc);
2311 pVM->pgm.s.ChunkR3Map.c++;
2312
2313 fRc = RTAvllU32Insert(&pVM->pgm.s.ChunkR3Map.pAgeTree, &pChunk->AgeCore);
2314 AssertRelease(fRc);
2315
2316 /* remove the unmapped one. */
2317 if (Req.idChunkUnmap != INT32_MAX)
2318 {
2319 PPGMCHUNKR3MAP pUnmappedChunk = (PPGMCHUNKR3MAP)RTAvlU32Remove(&pVM->pgm.s.ChunkR3Map.pTree, Req.idChunkUnmap);
2320 AssertRelease(pUnmappedChunk);
2321 pUnmappedChunk->pv = NULL;
2322 pUnmappedChunk->Core.Key = UINT32_MAX;
2323#if 0 /* for later when we've got a separate mapping method for ring-0. */
2324 MMR3HeapFree(pUnmappedChunk);
2325#else
2326 MMHyperFree(pVM, pUnmappedChunk);
2327#endif
2328 pVM->pgm.s.ChunkR3Map.c--;
2329 }
2330 }
2331 else
2332 {
2333 AssertRC(rc);
2334#if 0 /* for later when we've got a separate mapping method for ring-0. */
2335 MMR3HeapFree(pChunk);
2336#else
2337 MMHyperFree(pVM, pChunk);
2338#endif
2339 pChunk = NULL;
2340 }
2341
2342 *ppChunk = pChunk;
2343 return rc;
2344}
2345
2346
2347/**
2348 * For VMMCALLHOST_PGM_MAP_CHUNK, considered internal.
2349 *
2350 * @returns see pgmR3PhysChunkMap.
2351 * @param pVM The VM handle.
2352 * @param idChunk The chunk to map.
2353 */
2354VMMR3DECL(int) PGMR3PhysChunkMap(PVM pVM, uint32_t idChunk)
2355{
2356 PPGMCHUNKR3MAP pChunk;
2357 return pgmR3PhysChunkMap(pVM, idChunk, &pChunk);
2358}
2359
2360
2361/**
2362 * Invalidates the TLB for the ring-3 mapping cache.
2363 *
2364 * @param pVM The VM handle.
2365 */
2366VMMR3DECL(void) PGMR3PhysChunkInvalidateTLB(PVM pVM)
2367{
2368 pgmLock(pVM);
2369 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.ChunkR3Map.Tlb.aEntries); i++)
2370 {
2371 pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].idChunk = NIL_GMM_CHUNKID;
2372 pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].pChunk = NULL;
2373 }
2374 pgmUnlock(pVM);
2375}
2376
2377
2378/**
2379 * Response to VM_FF_PGM_NEED_HANDY_PAGES and VMMCALLHOST_PGM_ALLOCATE_HANDY_PAGES.
2380 *
2381 * @returns The following VBox status codes.
2382 * @retval VINF_SUCCESS on success. FF cleared.
2383 * @retval VINF_EM_NO_MEMORY if we're out of memory. The FF is not cleared in this case.
2384 *
2385 * @param pVM The VM handle.
2386 */
2387VMMR3DECL(int) PGMR3PhysAllocateHandyPages(PVM pVM)
2388{
2389 pgmLock(pVM);
2390 int rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES, 0, NULL);
2391 if (rc == VERR_GMM_SEED_ME)
2392 {
2393 void *pvChunk;
2394 rc = SUPPageAlloc(GMM_CHUNK_SIZE >> PAGE_SHIFT, &pvChunk);
2395 if (VBOX_SUCCESS(rc))
2396 rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_GMM_SEED_CHUNK, (uintptr_t)pvChunk, NULL);
2397 if (VBOX_FAILURE(rc))
2398 {
2399 LogRel(("PGM: GMM Seeding failed, rc=%Vrc\n", rc));
2400 rc = VINF_EM_NO_MEMORY;
2401 }
2402 }
2403 pgmUnlock(pVM);
2404 Assert(rc == VINF_SUCCESS || rc == VINF_EM_NO_MEMORY);
2405 return rc;
2406}
2407
Note: See TracBrowser for help on using the repository browser.

© 2023 Oracle
ContactPrivacy policyTerms of Use