VirtualBox

source: vbox/trunk/src/VBox/VMM/MMPagePool.cpp@ 30037

Last change on this file since 30037 was 28800, checked in by vboxsync, 14 years ago

Automated rebranding to Oracle copyright/license strings via filemuncher

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 18.5 KB
Line 
1/* $Id: MMPagePool.cpp 28800 2010-04-27 08:22:32Z vboxsync $ */
2/** @file
3 * MM - Memory Manager - Page Pool.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_MM_POOL
22#include <VBox/mm.h>
23#include <VBox/pgm.h>
24#include <VBox/stam.h>
25#include "MMInternal.h"
26#include <VBox/vm.h>
27#include <VBox/param.h>
28#include <VBox/err.h>
29#include <VBox/log.h>
30#include <iprt/alloc.h>
31#include <iprt/assert.h>
32#define USE_INLINE_ASM_BIT_OPS
33#ifdef USE_INLINE_ASM_BIT_OPS
34# include <iprt/asm.h>
35#endif
36#include <iprt/string.h>
37
38
39
40/*******************************************************************************
41* Internal Functions *
42*******************************************************************************/
43#ifdef IN_RING3
44static void * mmR3PagePoolAlloc(PMMPAGEPOOL pPool);
45static void mmR3PagePoolFree(PMMPAGEPOOL pPool, void *pv);
46#endif
47
48
49/**
50 * Initializes the page pool
51 *
52 * @return VBox status.
53 * @param pVM VM handle.
54 * @thread The Emulation Thread.
55 */
56int mmR3PagePoolInit(PVM pVM)
57{
58 AssertMsg(!pVM->mm.s.pPagePoolR3, ("Already initialized!\n"));
59
60 /*
61 * Allocate the pool structures.
62 */
63 /** @todo @bufref{1865},@bufref{3202}: mapping the page pool page into
64 * ring-0. Need to change the wasy we allocate it... */
65 AssertReleaseReturn(sizeof(*pVM->mm.s.pPagePoolR3) + sizeof(*pVM->mm.s.pPagePoolLowR3) < PAGE_SIZE, VERR_INTERNAL_ERROR);
66 int rc = SUPR3PageAllocEx(1, 0 /*fFlags*/, (void **)&pVM->mm.s.pPagePoolR3, NULL /*pR0Ptr*/, NULL /*paPages*/);
67 if (RT_FAILURE(rc))
68 return rc;
69 memset(pVM->mm.s.pPagePoolR3, 0, PAGE_SIZE);
70 pVM->mm.s.pPagePoolR3->pVM = pVM;
71 STAM_REG(pVM, &pVM->mm.s.pPagePoolR3->cPages, STAMTYPE_U32, "/MM/Page/Def/cPages", STAMUNIT_PAGES, "Number of pages in the default pool.");
72 STAM_REG(pVM, &pVM->mm.s.pPagePoolR3->cFreePages, STAMTYPE_U32, "/MM/Page/Def/cFreePages", STAMUNIT_PAGES, "Number of free pages in the default pool.");
73 STAM_REG(pVM, &pVM->mm.s.pPagePoolR3->cSubPools, STAMTYPE_U32, "/MM/Page/Def/cSubPools", STAMUNIT_COUNT, "Number of sub pools in the default pool.");
74 STAM_REG(pVM, &pVM->mm.s.pPagePoolR3->cAllocCalls, STAMTYPE_COUNTER, "/MM/Page/Def/cAllocCalls", STAMUNIT_CALLS, "Number of MMR3PageAlloc() calls for the default pool.");
75 STAM_REG(pVM, &pVM->mm.s.pPagePoolR3->cFreeCalls, STAMTYPE_COUNTER, "/MM/Page/Def/cFreeCalls", STAMUNIT_CALLS, "Number of MMR3PageFree()+MMR3PageFreeByPhys() calls for the default pool.");
76 STAM_REG(pVM, &pVM->mm.s.pPagePoolR3->cToPhysCalls, STAMTYPE_COUNTER, "/MM/Page/Def/cToPhysCalls", STAMUNIT_CALLS, "Number of MMR3Page2Phys() calls for this pool.");
77 STAM_REG(pVM, &pVM->mm.s.pPagePoolR3->cToVirtCalls, STAMTYPE_COUNTER, "/MM/Page/Def/cToVirtCalls", STAMUNIT_CALLS, "Number of MMR3PagePhys2Page()+MMR3PageFreeByPhys() calls for the default pool.");
78 STAM_REG(pVM, &pVM->mm.s.pPagePoolR3->cErrors, STAMTYPE_COUNTER, "/MM/Page/Def/cErrors", STAMUNIT_ERRORS,"Number of errors for the default pool.");
79
80 pVM->mm.s.pPagePoolLowR3 = pVM->mm.s.pPagePoolR3 + 1;
81 pVM->mm.s.pPagePoolLowR3->pVM = pVM;
82 pVM->mm.s.pPagePoolLowR3->fLow = true;
83 STAM_REG(pVM, &pVM->mm.s.pPagePoolLowR3->cPages, STAMTYPE_U32, "/MM/Page/Low/cPages", STAMUNIT_PAGES, "Number of pages in the <4GB pool.");
84 STAM_REG(pVM, &pVM->mm.s.pPagePoolLowR3->cFreePages, STAMTYPE_U32, "/MM/Page/Low/cFreePages", STAMUNIT_PAGES, "Number of free pages in the <4GB pool.");
85 STAM_REG(pVM, &pVM->mm.s.pPagePoolLowR3->cSubPools, STAMTYPE_U32, "/MM/Page/Low/cSubPools", STAMUNIT_COUNT, "Number of sub pools in the <4GB pool.");
86 STAM_REG(pVM, &pVM->mm.s.pPagePoolLowR3->cAllocCalls, STAMTYPE_COUNTER, "/MM/Page/Low/cAllocCalls", STAMUNIT_CALLS, "Number of MMR3PageAllocLow() calls for the <4GB pool.");
87 STAM_REG(pVM, &pVM->mm.s.pPagePoolLowR3->cFreeCalls, STAMTYPE_COUNTER, "/MM/Page/Low/cFreeCalls", STAMUNIT_CALLS, "Number of MMR3PageFreeLow()+MMR3PageFreeByPhys() calls for the <4GB pool.");
88 STAM_REG(pVM, &pVM->mm.s.pPagePoolLowR3->cToPhysCalls,STAMTYPE_COUNTER, "/MM/Page/Low/cToPhysCalls", STAMUNIT_CALLS, "Number of MMR3Page2Phys() calls for the <4GB pool.");
89 STAM_REG(pVM, &pVM->mm.s.pPagePoolLowR3->cToVirtCalls,STAMTYPE_COUNTER, "/MM/Page/Low/cToVirtCalls", STAMUNIT_CALLS, "Number of MMR3PagePhys2Page()+MMR3PageFreeByPhys() calls for the <4GB pool.");
90 STAM_REG(pVM, &pVM->mm.s.pPagePoolLowR3->cErrors, STAMTYPE_COUNTER, "/MM/Page/Low/cErrors", STAMUNIT_ERRORS,"Number of errors for the <4GB pool.");
91
92#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
93 pVM->mm.s.pPagePoolR0 = (uintptr_t)pVM->mm.s.pPagePoolR3;
94 pVM->mm.s.pPagePoolLowR0 = (uintptr_t)pVM->mm.s.pPagePoolLowR3;
95#endif
96
97 /** @todo init a mutex? */
98 return VINF_SUCCESS;
99}
100
101
102/**
103 * Release all locks and free the allocated memory.
104 *
105 * @param pVM VM handle.
106 * @thread The Emulation Thread.
107 */
108void mmR3PagePoolTerm(PVM pVM)
109{
110 if (pVM->mm.s.pPagePoolR3)
111 {
112 /*
113 * Unlock all memory held by subpools and free the memory.
114 * (The MM Heap will free the memory used for internal stuff.)
115 */
116 Assert(!pVM->mm.s.pPagePoolR3->fLow);
117 PMMPAGESUBPOOL pSubPool = pVM->mm.s.pPagePoolR3->pHead;
118 while (pSubPool)
119 {
120 int rc = SUPR3PageFreeEx(pSubPool->pvPages, pSubPool->cPages);
121 AssertMsgRC(rc, ("SUPR3PageFreeEx(%p) failed with rc=%Rrc\n", pSubPool->pvPages, rc));
122 pSubPool->pvPages = NULL;
123
124 /* next */
125 pSubPool = pSubPool->pNext;
126 }
127 pVM->mm.s.pPagePoolR3 = NULL;
128#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
129 pVM->mm.s.pPagePoolR0 = NIL_RTR0PTR;
130#endif
131 }
132
133 if (pVM->mm.s.pPagePoolLowR3)
134 {
135 /*
136 * Free the memory.
137 */
138 Assert(pVM->mm.s.pPagePoolLowR3->fLow);
139 PMMPAGESUBPOOL pSubPool = pVM->mm.s.pPagePoolLowR3->pHead;
140 while (pSubPool)
141 {
142 int rc = SUPR3LowFree(pSubPool->pvPages, pSubPool->cPages);
143 AssertMsgRC(rc, ("SUPR3LowFree(%p) failed with rc=%d\n", pSubPool->pvPages, rc));
144 pSubPool->pvPages = NULL;
145
146 /* next */
147 pSubPool = pSubPool->pNext;
148 }
149 pVM->mm.s.pPagePoolLowR3 = NULL;
150#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
151 pVM->mm.s.pPagePoolLowR0 = NIL_RTR0PTR;
152#endif
153 }
154}
155
156
157/**
158 * Allocates a page from the page pool.
159 *
160 * @returns Pointer to allocated page(s).
161 * @returns NULL on failure.
162 * @param pPool Pointer to the page pool.
163 * @thread The Emulation Thread.
164 */
165DECLINLINE(void *) mmR3PagePoolAlloc(PMMPAGEPOOL pPool)
166{
167 VM_ASSERT_EMT(pPool->pVM);
168 STAM_COUNTER_INC(&pPool->cAllocCalls);
169
170 /*
171 * Walk free list.
172 */
173 if (pPool->pHeadFree)
174 {
175 PMMPAGESUBPOOL pSub = pPool->pHeadFree;
176 /* decrement free count and unlink if no more free entries. */
177 if (!--pSub->cPagesFree)
178 pPool->pHeadFree = pSub->pNextFree;
179#ifdef VBOX_WITH_STATISTICS
180 pPool->cFreePages--;
181#endif
182
183 /* find free spot in bitmap. */
184#ifdef USE_INLINE_ASM_BIT_OPS
185 const int iPage = ASMBitFirstClear(pSub->auBitmap, pSub->cPages);
186 if (iPage >= 0)
187 {
188 Assert(!ASMBitTest(pSub->auBitmap, iPage));
189 ASMBitSet(pSub->auBitmap, iPage);
190 return (uint8_t *)pSub->pvPages + PAGE_SIZE * iPage;
191 }
192#else
193 unsigned *pu = &pSub->auBitmap[0];
194 unsigned *puEnd = &pSub->auBitmap[pSub->cPages / (sizeof(pSub->auBitmap) * 8)];
195 while (pu < puEnd)
196 {
197 unsigned u;
198 if ((u = *pu) != ~0U)
199 {
200 unsigned iBit = 0;
201 unsigned uMask = 1;
202 while (iBit < sizeof(pSub->auBitmap[0]) * 8)
203 {
204 if (!(u & uMask))
205 {
206 *pu |= uMask;
207 return (uint8_t *)pSub->pvPages
208 + PAGE_SIZE * (iBit + ((uint8_t *)pu - (uint8_t *)&pSub->auBitmap[0]) * 8);
209 }
210 iBit++;
211 uMask <<= 1;
212 }
213 STAM_COUNTER_INC(&pPool->cErrors);
214 AssertMsgFailed(("how odd, expected to find a free bit in %#x, but didn't\n", u));
215 }
216 /* next */
217 pu++;
218 }
219#endif
220 STAM_COUNTER_INC(&pPool->cErrors);
221#ifdef VBOX_WITH_STATISTICS
222 pPool->cFreePages++;
223#endif
224 AssertMsgFailed(("how strange, expected to find a free bit in %p, but didn't (%d pages supposed to be free!)\n", pSub, pSub->cPagesFree + 1));
225 }
226
227 /*
228 * Allocate new subpool.
229 */
230 unsigned cPages = !pPool->fLow ? 128 : 32;
231 PMMPAGESUBPOOL pSub;
232 int rc = MMHyperAlloc(pPool->pVM,
233 RT_OFFSETOF(MMPAGESUBPOOL, auBitmap[cPages / (sizeof(pSub->auBitmap[0] * 8))])
234 + (sizeof(SUPPAGE) + sizeof(MMPPLOOKUPHCPHYS)) * cPages
235 + sizeof(MMPPLOOKUPHCPTR),
236 0,
237 MM_TAG_MM_PAGE,
238 (void **)&pSub);
239 if (RT_FAILURE(rc))
240 return NULL;
241
242 PSUPPAGE paPhysPages = (PSUPPAGE)&pSub->auBitmap[cPages / (sizeof(pSub->auBitmap[0]) * 8)];
243 Assert((uintptr_t)paPhysPages >= (uintptr_t)&pSub->auBitmap[1]);
244 if (!pPool->fLow)
245 {
246 rc = SUPR3PageAllocEx(cPages,
247 0 /* fFlags */,
248 &pSub->pvPages,
249 NULL,
250 paPhysPages);
251 if (RT_FAILURE(rc))
252 rc = VMSetError(pPool->pVM, rc, RT_SRC_POS,
253 N_("Failed to lock host %zd bytes of memory (out of memory)"), (size_t)cPages << PAGE_SHIFT);
254 }
255 else
256 rc = SUPR3LowAlloc(cPages, &pSub->pvPages, NULL, paPhysPages);
257 if (RT_SUCCESS(rc))
258 {
259 /*
260 * Setup the sub structure and allocate the requested page.
261 */
262 pSub->cPages = cPages;
263 pSub->cPagesFree= cPages - 1;
264 pSub->paPhysPages = paPhysPages;
265 memset(pSub->auBitmap, 0, cPages / 8);
266 /* allocate first page. */
267 pSub->auBitmap[0] |= 1;
268 /* link into free chain. */
269 pSub->pNextFree = pPool->pHeadFree;
270 pPool->pHeadFree= pSub;
271 /* link into main chain. */
272 pSub->pNext = pPool->pHead;
273 pPool->pHead = pSub;
274 /* update pool statistics. */
275 pPool->cSubPools++;
276 pPool->cPages += cPages;
277#ifdef VBOX_WITH_STATISTICS
278 pPool->cFreePages += cPages - 1;
279#endif
280
281 /*
282 * Initialize the physical pages with backpointer to subpool.
283 */
284 unsigned i = cPages;
285 while (i-- > 0)
286 {
287 AssertMsg(paPhysPages[i].Phys && !(paPhysPages[i].Phys & PAGE_OFFSET_MASK),
288 ("i=%d Phys=%d\n", i, paPhysPages[i].Phys));
289 paPhysPages[i].uReserved = (RTHCUINTPTR)pSub;
290 }
291
292 /*
293 * Initialize the physical lookup record with backpointers to the physical pages.
294 */
295 PMMPPLOOKUPHCPHYS paLookupPhys = (PMMPPLOOKUPHCPHYS)&paPhysPages[cPages];
296 i = cPages;
297 while (i-- > 0)
298 {
299 paLookupPhys[i].pPhysPage = &paPhysPages[i];
300 paLookupPhys[i].Core.Key = paPhysPages[i].Phys;
301 RTAvlHCPhysInsert(&pPool->pLookupPhys, &paLookupPhys[i].Core);
302 }
303
304 /*
305 * And the one record for virtual memory lookup.
306 */
307 PMMPPLOOKUPHCPTR pLookupVirt = (PMMPPLOOKUPHCPTR)&paLookupPhys[cPages];
308 pLookupVirt->pSubPool = pSub;
309 pLookupVirt->Core.Key = pSub->pvPages;
310 RTAvlPVInsert(&pPool->pLookupVirt, &pLookupVirt->Core);
311
312 /* return allocated page (first). */
313 return pSub->pvPages;
314 }
315
316 MMHyperFree(pPool->pVM, pSub);
317 STAM_COUNTER_INC(&pPool->cErrors);
318 if (pPool->fLow)
319 VMSetError(pPool->pVM, rc, RT_SRC_POS,
320 N_("Failed to expand page pool for memory below 4GB. Current size: %d pages"),
321 pPool->cPages);
322 AssertMsgFailed(("Failed to expand pool%s. rc=%Rrc poolsize=%d\n",
323 pPool->fLow ? " (<4GB)" : "", rc, pPool->cPages));
324 return NULL;
325}
326
327
328/**
329 * Frees a page from the page pool.
330 *
331 * @param pPool Pointer to the page pool.
332 * @param pv Pointer to the page to free.
333 * I.e. pointer returned by mmR3PagePoolAlloc().
334 * @thread The Emulation Thread.
335 */
336DECLINLINE(void) mmR3PagePoolFree(PMMPAGEPOOL pPool, void *pv)
337{
338 VM_ASSERT_EMT(pPool->pVM);
339 STAM_COUNTER_INC(&pPool->cFreeCalls);
340
341 /*
342 * Lookup the virtual address.
343 */
344 PMMPPLOOKUPHCPTR pLookup = (PMMPPLOOKUPHCPTR)RTAvlPVGetBestFit(&pPool->pLookupVirt, pv, false);
345 if ( !pLookup
346 || (uint8_t *)pv >= (uint8_t *)pLookup->pSubPool->pvPages + (pLookup->pSubPool->cPages << PAGE_SHIFT)
347 )
348 {
349 STAM_COUNTER_INC(&pPool->cErrors);
350 AssertMsgFailed(("invalid pointer %p\n", pv));
351 return;
352 }
353
354 /*
355 * Free the page.
356 */
357 PMMPAGESUBPOOL pSubPool = pLookup->pSubPool;
358 /* clear bitmap bit */
359 const unsigned iPage = ((uint8_t *)pv - (uint8_t *)pSubPool->pvPages) >> PAGE_SHIFT;
360#ifdef USE_INLINE_ASM_BIT_OPS
361 Assert(ASMBitTest(pSubPool->auBitmap, iPage));
362 ASMBitClear(pSubPool->auBitmap, iPage);
363#else
364 unsigned iBit = iPage % (sizeof(pSubPool->auBitmap[0]) * 8);
365 unsigned iIndex = iPage / (sizeof(pSubPool->auBitmap[0]) * 8);
366 pSubPool->auBitmap[iIndex] &= ~(1 << iBit);
367#endif
368 /* update stats. */
369 pSubPool->cPagesFree++;
370#ifdef VBOX_WITH_STATISTICS
371 pPool->cFreePages++;
372#endif
373 if (pSubPool->cPagesFree == 1)
374 {
375 pSubPool->pNextFree = pPool->pHeadFree;
376 pPool->pHeadFree = pSubPool;
377 }
378}
379
380
381/**
382 * Allocates a page from the page pool.
383 *
384 * This function may returns pages which has physical addresses any
385 * where. If you require a page to be within the first 4GB of physical
386 * memory, use MMR3PageAllocLow().
387 *
388 * @returns Pointer to the allocated page page.
389 * @returns NULL on failure.
390 * @param pVM VM handle.
391 * @thread The Emulation Thread.
392 */
393VMMR3DECL(void *) MMR3PageAlloc(PVM pVM)
394{
395 /* Note: unprotected by locks; currently fine as it's used during init or under the PGM lock */
396 return mmR3PagePoolAlloc(pVM->mm.s.pPagePoolR3);
397}
398
399
400/**
401 * Allocates a page from the page pool and return its physical address.
402 *
403 * This function may returns pages which has physical addresses any
404 * where. If you require a page to be within the first 4GB of physical
405 * memory, use MMR3PageAllocLow().
406 *
407 * @returns Pointer to the allocated page page.
408 * @returns NIL_RTHCPHYS on failure.
409 * @param pVM VM handle.
410 * @thread The Emulation Thread.
411 */
412VMMR3DECL(RTHCPHYS) MMR3PageAllocPhys(PVM pVM)
413{
414 /* Note: unprotected by locks; currently fine as it's used during init or under the PGM lock */
415 /** @todo optimize this, it's the most common case now. */
416 void *pv = mmR3PagePoolAlloc(pVM->mm.s.pPagePoolR3);
417 if (pv)
418 return mmPagePoolPtr2Phys(pVM->mm.s.pPagePoolR3, pv);
419 return NIL_RTHCPHYS;
420}
421
422
423/**
424 * Frees a page allocated from the page pool by MMR3PageAlloc() or
425 * MMR3PageAllocPhys().
426 *
427 * @param pVM VM handle.
428 * @param pvPage Pointer to the page.
429 * @thread The Emulation Thread.
430 */
431VMMR3DECL(void) MMR3PageFree(PVM pVM, void *pvPage)
432{
433 mmR3PagePoolFree(pVM->mm.s.pPagePoolR3, pvPage);
434}
435
436
437/**
438 * Allocates a page from the low page pool.
439 *
440 * @returns Pointer to the allocated page.
441 * @returns NULL on failure.
442 * @param pVM VM handle.
443 * @thread The Emulation Thread.
444 */
445VMMR3DECL(void *) MMR3PageAllocLow(PVM pVM)
446{
447 return mmR3PagePoolAlloc(pVM->mm.s.pPagePoolLowR3);
448}
449
450
451/**
452 * Frees a page allocated from the page pool by MMR3PageAllocLow().
453 *
454 * @param pVM VM handle.
455 * @param pvPage Pointer to the page.
456 * @thread The Emulation Thread.
457 */
458VMMR3DECL(void) MMR3PageFreeLow(PVM pVM, void *pvPage)
459{
460 mmR3PagePoolFree(pVM->mm.s.pPagePoolLowR3, pvPage);
461}
462
463
464/**
465 * Free a page allocated from the page pool by physical address.
466 * This works for pages allocated by MMR3PageAlloc(), MMR3PageAllocPhys()
467 * and MMR3PageAllocLow().
468 *
469 * @param pVM VM handle.
470 * @param HCPhysPage The physical address of the page to be freed.
471 * @thread The Emulation Thread.
472 */
473VMMR3DECL(void) MMR3PageFreeByPhys(PVM pVM, RTHCPHYS HCPhysPage)
474{
475 void *pvPage = mmPagePoolPhys2Ptr(pVM->mm.s.pPagePoolR3, HCPhysPage);
476 if (!pvPage)
477 pvPage = mmPagePoolPhys2Ptr(pVM->mm.s.pPagePoolLowR3, HCPhysPage);
478 if (pvPage)
479 mmR3PagePoolFree(pVM->mm.s.pPagePoolR3, pvPage);
480 else
481 AssertMsgFailed(("Invalid address HCPhysPT=%#x\n", HCPhysPage));
482}
483
484
485/**
486 * Gets the HC pointer to the dummy page.
487 *
488 * The dummy page is used as a place holder to prevent potential bugs
489 * from doing really bad things to the system.
490 *
491 * @returns Pointer to the dummy page.
492 * @param pVM VM handle.
493 * @thread The Emulation Thread.
494 */
495VMMR3DECL(void *) MMR3PageDummyHCPtr(PVM pVM)
496{
497 VM_ASSERT_EMT(pVM);
498 if (!pVM->mm.s.pvDummyPage)
499 {
500 pVM->mm.s.pvDummyPage = mmR3PagePoolAlloc(pVM->mm.s.pPagePoolR3);
501 AssertRelease(pVM->mm.s.pvDummyPage);
502 pVM->mm.s.HCPhysDummyPage = mmPagePoolPtr2Phys(pVM->mm.s.pPagePoolR3, pVM->mm.s.pvDummyPage);
503 AssertRelease(!(pVM->mm.s.HCPhysDummyPage & ~X86_PTE_PAE_PG_MASK));
504 }
505 return pVM->mm.s.pvDummyPage;
506}
507
508
509/**
510 * Gets the HC Phys to the dummy page.
511 *
512 * The dummy page is used as a place holder to prevent potential bugs
513 * from doing really bad things to the system.
514 *
515 * @returns Pointer to the dummy page.
516 * @param pVM VM handle.
517 * @thread The Emulation Thread.
518 */
519VMMR3DECL(RTHCPHYS) MMR3PageDummyHCPhys(PVM pVM)
520{
521 VM_ASSERT_EMT(pVM);
522 if (!pVM->mm.s.pvDummyPage)
523 MMR3PageDummyHCPtr(pVM);
524 return pVM->mm.s.HCPhysDummyPage;
525}
526
Note: See TracBrowser for help on using the repository browser.

© 2023 Oracle
ContactPrivacy policyTerms of Use