VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/IOMR3Mmio.cpp

Last change on this file was 107194, checked in by vboxsync, 7 weeks ago

VMM: More adjustments for VBOX_WITH_ONLY_PGM_NEM_MODE, VBOX_WITH_MINIMAL_R0, VBOX_WITH_HWVIRT and such. jiraref:VBP-1466

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 28.8 KB
Line 
1/* $Id: IOMR3Mmio.cpp 107194 2024-11-29 14:47:06Z vboxsync $ */
2/** @file
3 * IOM - Input / Output Monitor, MMIO related APIs.
4 */
5
6/*
7 * Copyright (C) 2006-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_IOM_MMIO
33#include <VBox/vmm/iom.h>
34#include <VBox/sup.h>
35#include <VBox/vmm/mm.h>
36#include <VBox/vmm/stam.h>
37#include <VBox/vmm/dbgf.h>
38#include <VBox/vmm/pdmapi.h>
39#include <VBox/vmm/pdmdev.h>
40#include "IOMInternal.h"
41#include <VBox/vmm/vm.h>
42
43#include <VBox/param.h>
44#include <iprt/assert.h>
45#include <iprt/mem.h>
46#include <iprt/string.h>
47#include <VBox/log.h>
48#include <VBox/err.h>
49
50#include "IOMInline.h"
51
52
53#ifdef VBOX_WITH_STATISTICS
54
55/**
56 * Register statistics for a MMIO entry.
57 */
58void iomR3MmioRegStats(PVM pVM, PIOMMMIOENTRYR3 pRegEntry)
59{
60 bool const fDoRZ = pRegEntry->fRing0 || pRegEntry->fRawMode;
61 PIOMMMIOSTATSENTRY pStats = &pVM->iom.s.paMmioStats[pRegEntry->idxStats];
62
63 /* Format the prefix: */
64 char szName[80];
65 size_t cchPrefix = RTStrPrintf(szName, sizeof(szName), "/IOM/MmioRegions/%RGp-%RGp",
66 pRegEntry->GCPhysMapping, pRegEntry->GCPhysMapping + pRegEntry->cbRegion - 1);
67
68 /* Mangle the description if this isn't the first device instance: */
69 const char *pszDesc = pRegEntry->pszDesc;
70 char *pszFreeDesc = NULL;
71 if (pRegEntry->pDevIns && pRegEntry->pDevIns->iInstance > 0 && pszDesc)
72 pszDesc = pszFreeDesc = RTStrAPrintf2("%u / %s", pRegEntry->pDevIns->iInstance, pszDesc);
73
74 /* Register statistics: */
75 int rc = STAMR3Register(pVM, &pRegEntry->idxSelf, STAMTYPE_U16, STAMVISIBILITY_ALWAYS, szName, STAMUNIT_NONE, pszDesc); AssertRC(rc);
76 RTStrFree(pszFreeDesc);
77
78# define SET_NM_SUFFIX(a_sz) memcpy(&szName[cchPrefix], a_sz, sizeof(a_sz))
79 SET_NM_SUFFIX("/Read-Complicated");
80 rc = STAMR3Register(pVM, &pStats->ComplicatedReads, STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, NULL); AssertRC(rc);
81 SET_NM_SUFFIX("/Read-FFor00");
82 rc = STAMR3Register(pVM, &pStats->FFor00Reads, STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, NULL); AssertRC(rc);
83 SET_NM_SUFFIX("/Read-R3");
84 rc = STAMR3Register(pVM, &pStats->ProfReadR3, STAMTYPE_PROFILE, STAMVISIBILITY_USED, szName, STAMUNIT_TICKS_PER_CALL, NULL); AssertRC(rc);
85 if (fDoRZ)
86 {
87 SET_NM_SUFFIX("/Read-RZ");
88 rc = STAMR3Register(pVM, &pStats->ProfReadRZ, STAMTYPE_PROFILE, STAMVISIBILITY_USED, szName, STAMUNIT_TICKS_PER_CALL, NULL); AssertRC(rc);
89 SET_NM_SUFFIX("/Read-RZtoR3");
90 rc = STAMR3Register(pVM, &pStats->ReadRZToR3, STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, NULL); AssertRC(rc);
91 }
92 SET_NM_SUFFIX("/Read-Total");
93 rc = STAMR3Register(pVM, &pStats->Reads, STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, NULL); AssertRC(rc);
94
95 SET_NM_SUFFIX("/Write-Complicated");
96 rc = STAMR3Register(pVM, &pStats->ComplicatedWrites, STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, NULL); AssertRC(rc);
97 SET_NM_SUFFIX("/Write-R3");
98 rc = STAMR3Register(pVM, &pStats->ProfWriteR3, STAMTYPE_PROFILE, STAMVISIBILITY_USED, szName, STAMUNIT_TICKS_PER_CALL, NULL); AssertRC(rc);
99 if (fDoRZ)
100 {
101 SET_NM_SUFFIX("/Write-RZ");
102 rc = STAMR3Register(pVM, &pStats->ProfWriteRZ, STAMTYPE_PROFILE, STAMVISIBILITY_USED, szName, STAMUNIT_TICKS_PER_CALL, NULL); AssertRC(rc);
103 SET_NM_SUFFIX("/Write-RZtoR3");
104 rc = STAMR3Register(pVM, &pStats->WriteRZToR3, STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, NULL); AssertRC(rc);
105 SET_NM_SUFFIX("/Write-RZtoR3-Commit");
106 rc = STAMR3Register(pVM, &pStats->CommitRZToR3, STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, NULL); AssertRC(rc);
107 }
108 SET_NM_SUFFIX("/Write-Total");
109 rc = STAMR3Register(pVM, &pStats->Writes, STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, NULL); AssertRC(rc);
110}
111
112
113/**
114 * Deregister statistics for a MMIO entry.
115 */
116static void iomR3MmioDeregStats(PVM pVM, PIOMMMIOENTRYR3 pRegEntry, RTGCPHYS GCPhys)
117{
118 char szPrefix[80];
119 RTStrPrintf(szPrefix, sizeof(szPrefix), "/IOM/MmioRegions/%RGp-%RGp", GCPhys, GCPhys + pRegEntry->cbRegion - 1);
120 STAMR3DeregisterByPrefix(pVM->pUVM, szPrefix);
121}
122
123
124/**
125 * Grows the statistics table.
126 *
127 * @returns VBox status code.
128 * @param pVM The cross context VM structure.
129 * @param cNewEntries The minimum number of new entrie.
130 * @see IOMR0IoPortGrowStatisticsTable
131 */
132static int iomR3MmioGrowStatisticsTable(PVM pVM, uint32_t cNewEntries)
133{
134 AssertReturn(cNewEntries <= _64K, VERR_IOM_TOO_MANY_MMIO_REGISTRATIONS);
135
136 int rc;
137# if defined(VBOX_WITH_R0_MODULES) && !defined(VBOX_WITH_MINIMAL_R0)
138 if (!SUPR3IsDriverless())
139 {
140 rc = VMMR3CallR0Emt(pVM, pVM->apCpusR3[0], VMMR0_DO_IOM_GROW_MMIO_STATS, cNewEntries, NULL);
141 AssertLogRelRCReturn(rc, rc);
142 AssertReturn(cNewEntries <= pVM->iom.s.cMmioStatsAllocation, VERR_IOM_MMIO_IPE_2);
143 }
144 else
145# endif
146 {
147 /*
148 * Validate input and state.
149 */
150 uint32_t const cOldEntries = pVM->iom.s.cMmioStatsAllocation;
151 AssertReturn(cNewEntries > cOldEntries, VERR_IOM_MMIO_IPE_1);
152 AssertReturn(pVM->iom.s.cMmioStats <= cOldEntries, VERR_IOM_MMIO_IPE_2);
153
154 /*
155 * Calc size and allocate a new table.
156 */
157 uint32_t const cbNew = RT_ALIGN_32(cNewEntries * sizeof(IOMMMIOSTATSENTRY), HOST_PAGE_SIZE);
158 cNewEntries = cbNew / sizeof(IOMMMIOSTATSENTRY);
159
160 PIOMMMIOSTATSENTRY const paMmioStats = (PIOMMMIOSTATSENTRY)RTMemPageAllocZ(cbNew);
161 if (paMmioStats)
162 {
163 /*
164 * Anything to copy over, update and free the old one.
165 */
166 PIOMMMIOSTATSENTRY const pOldMmioStats = pVM->iom.s.paMmioStats;
167 if (pOldMmioStats)
168 memcpy(paMmioStats, pOldMmioStats, cOldEntries * sizeof(IOMMMIOSTATSENTRY));
169
170 pVM->iom.s.paMmioStats = paMmioStats;
171 pVM->iom.s.cMmioStatsAllocation = cNewEntries;
172
173 RTMemPageFree(pOldMmioStats, RT_ALIGN_32(cOldEntries * sizeof(IOMMMIOSTATSENTRY), HOST_PAGE_SIZE));
174
175 rc = VINF_SUCCESS;
176 }
177 else
178 rc = VERR_NO_PAGE_MEMORY;
179 }
180
181 return rc;
182}
183
184#endif /* VBOX_WITH_STATISTICS */
185
186/**
187 * Grows the I/O port registration statistics table.
188 *
189 * @returns VBox status code.
190 * @param pVM The cross context VM structure.
191 * @param cNewEntries The minimum number of new entrie.
192 * @see IOMR0MmioGrowRegistrationTables
193 */
194static int iomR3MmioGrowTable(PVM pVM, uint32_t cNewEntries)
195{
196 AssertReturn(cNewEntries <= _4K, VERR_IOM_TOO_MANY_MMIO_REGISTRATIONS);
197
198 int rc;
199#if defined(VBOX_WITH_R0_MODULES) && !defined(VBOX_WITH_MINIMAL_R0)
200 if (!SUPR3IsDriverless())
201 {
202 rc = VMMR3CallR0Emt(pVM, pVM->apCpusR3[0], VMMR0_DO_IOM_GROW_MMIO_REGS, cNewEntries, NULL);
203 AssertLogRelRCReturn(rc, rc);
204 AssertReturn(cNewEntries <= pVM->iom.s.cMmioAlloc, VERR_IOM_MMIO_IPE_2);
205 }
206 else
207#endif
208 {
209 /*
210 * Validate input and state.
211 */
212 uint32_t const cOldEntries = pVM->iom.s.cMmioAlloc;
213 AssertReturn(cNewEntries >= cOldEntries, VERR_IOM_MMIO_IPE_1);
214
215 /*
216 * Allocate the new tables. We use a single allocation for the three tables (ring-0,
217 * ring-3, lookup) and does a partial mapping of the result to ring-3.
218 */
219 uint32_t const cbRing3 = RT_ALIGN_32(cNewEntries * sizeof(IOMMMIOENTRYR3), HOST_PAGE_SIZE);
220 uint32_t const cbShared = RT_ALIGN_32(cNewEntries * sizeof(IOMMMIOLOOKUPENTRY), HOST_PAGE_SIZE);
221 uint32_t const cbNew = cbRing3 + cbShared;
222
223 /* Use the rounded up space as best we can. */
224 cNewEntries = RT_MIN(cbRing3 / sizeof(IOMMMIOENTRYR3), cbShared / sizeof(IOMMMIOLOOKUPENTRY));
225
226 PIOMMMIOENTRYR3 const paRing3 = (PIOMMMIOENTRYR3)RTMemPageAllocZ(cbNew);
227 if (paRing3)
228 {
229 PIOMMMIOLOOKUPENTRY const paLookup = (PIOMMMIOLOOKUPENTRY)((uintptr_t)paRing3 + cbRing3);
230
231 /*
232 * Copy over the old info and initialize the idxSelf and idxStats members.
233 */
234 if (pVM->iom.s.paMmioRegs != NULL)
235 {
236 memcpy(paRing3, pVM->iom.s.paMmioRegs, sizeof(paRing3[0]) * cOldEntries);
237 memcpy(paLookup, pVM->iom.s.paMmioLookup, sizeof(paLookup[0]) * cOldEntries);
238 }
239
240 size_t i = cbRing3 / sizeof(*paRing3);
241 while (i-- > cOldEntries)
242 {
243 paRing3[i].idxSelf = (uint16_t)i;
244 paRing3[i].idxStats = UINT16_MAX;
245 }
246
247 /*
248 * Update the variables and free the old memory.
249 */
250 void * const pvFree = pVM->iom.s.paMmioRegs;
251
252 pVM->iom.s.paMmioRegs = paRing3;
253 pVM->iom.s.paMmioLookup = paLookup;
254 pVM->iom.s.cMmioAlloc = cNewEntries;
255
256 RTMemPageFree(pvFree,
257 RT_ALIGN_32(cOldEntries * sizeof(IOMMMIOENTRYR3), HOST_PAGE_SIZE)
258 + RT_ALIGN_32(cOldEntries * sizeof(IOMMMIOLOOKUPENTRY), HOST_PAGE_SIZE));
259
260 rc = VINF_SUCCESS;
261 }
262 else
263 rc = VERR_NO_PAGE_MEMORY;
264 }
265 return rc;
266}
267
268
269/**
270 * Worker for PDMDEVHLPR3::pfnMmioCreateEx.
271 */
272VMMR3_INT_DECL(int) IOMR3MmioCreate(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS cbRegion, uint32_t fFlags, PPDMPCIDEV pPciDev,
273 uint32_t iPciRegion, PFNIOMMMIONEWWRITE pfnWrite, PFNIOMMMIONEWREAD pfnRead,
274 PFNIOMMMIONEWFILL pfnFill, void *pvUser, const char *pszDesc, PIOMMMIOHANDLE phRegion)
275{
276 /*
277 * Validate input.
278 */
279 AssertPtrReturn(phRegion, VERR_INVALID_POINTER);
280 *phRegion = UINT32_MAX;
281 PVMCPU const pVCpu = VMMGetCpu(pVM);
282 AssertReturn(pVCpu && pVCpu->idCpu == 0, VERR_VM_THREAD_NOT_EMT);
283 VM_ASSERT_STATE_RETURN(pVM, VMSTATE_CREATING, VERR_VM_INVALID_VM_STATE);
284 AssertReturn(!pVM->iom.s.fMmioFrozen, VERR_WRONG_ORDER);
285
286 AssertPtrReturn(pDevIns, VERR_INVALID_POINTER);
287
288 AssertMsgReturn(cbRegion > 0 && cbRegion <= MM_MMIO_64_MAX, ("cbRegion=%#RGp (max %#RGp)\n", cbRegion, MM_MMIO_64_MAX),
289 VERR_OUT_OF_RANGE);
290 AssertMsgReturn(!(cbRegion & GUEST_PAGE_OFFSET_MASK), ("cbRegion=%#RGp\n", cbRegion), VERR_UNSUPPORTED_ALIGNMENT);
291
292 AssertMsgReturn( !(fFlags & ~IOMMMIO_FLAGS_VALID_MASK)
293 && (fFlags & IOMMMIO_FLAGS_READ_MODE) <= IOMMMIO_FLAGS_READ_DWORD_QWORD
294 && (fFlags & IOMMMIO_FLAGS_WRITE_MODE) <= IOMMMIO_FLAGS_WRITE_ONLY_DWORD_QWORD,
295 ("%#x\n", fFlags),
296 VERR_INVALID_FLAGS);
297
298 AssertReturn(pfnWrite || pfnRead, VERR_INVALID_PARAMETER);
299 AssertPtrNullReturn(pfnWrite, VERR_INVALID_POINTER);
300 AssertPtrNullReturn(pfnRead, VERR_INVALID_POINTER);
301 AssertPtrNullReturn(pfnFill, VERR_INVALID_POINTER);
302
303 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
304 AssertReturn(*pszDesc != '\0', VERR_INVALID_POINTER);
305 AssertReturn(strlen(pszDesc) < 128, VERR_INVALID_POINTER);
306
307 /*
308 * Ensure that we've got table space for it.
309 */
310#ifndef VBOX_WITH_STATISTICS
311 uint16_t const idxStats = UINT16_MAX;
312#else
313 uint32_t const idxStats = pVM->iom.s.cMmioStats;
314 uint32_t const cNewMmioStats = idxStats + 1;
315 AssertReturn(cNewMmioStats <= _64K, VERR_IOM_TOO_MANY_MMIO_REGISTRATIONS);
316 if (cNewMmioStats > pVM->iom.s.cMmioStatsAllocation)
317 {
318 int rc = iomR3MmioGrowStatisticsTable(pVM, cNewMmioStats);
319 AssertRCReturn(rc, rc);
320 AssertReturn(idxStats == pVM->iom.s.cMmioStats, VERR_IOM_MMIO_IPE_1);
321 }
322#endif
323
324 uint32_t idx = pVM->iom.s.cMmioRegs;
325 if (idx >= pVM->iom.s.cMmioAlloc)
326 {
327 int rc = iomR3MmioGrowTable(pVM, pVM->iom.s.cMmioAlloc + 1);
328 AssertRCReturn(rc, rc);
329 AssertReturn(idx == pVM->iom.s.cMmioRegs, VERR_IOM_MMIO_IPE_1);
330 }
331
332 /*
333 * Create a matching ad-hoc RAM range for this MMIO region.
334 */
335 uint16_t idRamRange = 0;
336 int rc = PGMR3PhysMmioRegister(pVM, pVCpu, cbRegion, pszDesc, &idRamRange);
337 AssertRCReturn(rc, rc);
338
339 /*
340 * Enter it.
341 */
342 pVM->iom.s.paMmioRegs[idx].cbRegion = cbRegion;
343 pVM->iom.s.paMmioRegs[idx].GCPhysMapping = NIL_RTGCPHYS;
344 pVM->iom.s.paMmioRegs[idx].pvUser = pvUser;
345 pVM->iom.s.paMmioRegs[idx].pDevIns = pDevIns;
346 pVM->iom.s.paMmioRegs[idx].pfnWriteCallback = pfnWrite;
347 pVM->iom.s.paMmioRegs[idx].pfnReadCallback = pfnRead;
348 pVM->iom.s.paMmioRegs[idx].pfnFillCallback = pfnFill;
349 pVM->iom.s.paMmioRegs[idx].pszDesc = pszDesc;
350 pVM->iom.s.paMmioRegs[idx].pPciDev = pPciDev;
351 pVM->iom.s.paMmioRegs[idx].iPciRegion = iPciRegion;
352 pVM->iom.s.paMmioRegs[idx].idxStats = (uint16_t)idxStats;
353 pVM->iom.s.paMmioRegs[idx].fMapped = false;
354 pVM->iom.s.paMmioRegs[idx].fFlags = fFlags;
355 pVM->iom.s.paMmioRegs[idx].idRamRange = idRamRange;
356 pVM->iom.s.paMmioRegs[idx].idxSelf = idx;
357
358 pVM->iom.s.cMmioRegs = idx + 1;
359#ifdef VBOX_WITH_STATISTICS
360 pVM->iom.s.cMmioStats = cNewMmioStats;
361#endif
362 *phRegion = idx;
363 return VINF_SUCCESS;
364}
365
366
367/**
368 * Worker for PDMDEVHLPR3::pfnMmioMap.
369 */
370VMMR3_INT_DECL(int) IOMR3MmioMap(PVM pVM, PVMCPU pVCpu, PPDMDEVINS pDevIns, IOMMMIOHANDLE hRegion, RTGCPHYS GCPhys)
371{
372 /*
373 * Validate input and state.
374 */
375 VMCPU_ASSERT_EMT_RETURN(pVCpu, VERR_VM_THREAD_NOT_EMT);
376 AssertPtrReturn(pDevIns, VERR_INVALID_HANDLE);
377 AssertReturn(hRegion < pVM->iom.s.cMmioRegs, VERR_IOM_INVALID_MMIO_HANDLE);
378 PIOMMMIOENTRYR3 const pRegEntry = &pVM->iom.s.paMmioRegs[hRegion];
379 AssertReturn(pRegEntry->pDevIns == pDevIns, VERR_IOM_INVALID_MMIO_HANDLE);
380
381 RTGCPHYS const cbRegion = pRegEntry->cbRegion;
382 AssertMsgReturn(cbRegion > 0 && cbRegion <= MM_MMIO_64_MAX, ("cbRegion=%RGp\n", cbRegion), VERR_IOM_MMIO_IPE_1);
383 RTGCPHYS const GCPhysLast = GCPhys + cbRegion - 1;
384
385 AssertLogRelMsgReturn(!(GCPhys & GUEST_PAGE_OFFSET_MASK),
386 ("Misaligned! GCPhys=%RGp LB %RGp %s (%s[#%u])\n",
387 GCPhys, cbRegion, pRegEntry->pszDesc, pDevIns->pReg->szName, pDevIns->iInstance),
388 VERR_IOM_INVALID_MMIO_RANGE);
389 AssertLogRelMsgReturn(GCPhysLast > GCPhys,
390 ("Wrapped! GCPhys=%RGp LB %RGp %s (%s[#%u])\n",
391 GCPhys, cbRegion, pRegEntry->pszDesc, pDevIns->pReg->szName, pDevIns->iInstance),
392 VERR_IOM_INVALID_MMIO_RANGE);
393
394 /*
395 * Do the mapping.
396 */
397 int rc = VINF_SUCCESS;
398 IOM_LOCK_EXCL(pVM);
399
400 if (!pRegEntry->fMapped)
401 {
402 uint32_t const cEntries = RT_MIN(pVM->iom.s.cMmioLookupEntries, pVM->iom.s.cMmioRegs);
403 Assert(pVM->iom.s.cMmioLookupEntries == cEntries);
404
405 PIOMMMIOLOOKUPENTRY paEntries = pVM->iom.s.paMmioLookup;
406 PIOMMMIOLOOKUPENTRY pEntry;
407 if (cEntries > 0)
408 {
409 uint32_t iFirst = 0;
410 uint32_t iEnd = cEntries;
411 uint32_t i = cEntries / 2;
412 for (;;)
413 {
414 pEntry = &paEntries[i];
415 if (pEntry->GCPhysLast < GCPhys)
416 {
417 i += 1;
418 if (i < iEnd)
419 iFirst = i;
420 else
421 {
422 /* Register with PGM before we shuffle the array: */
423 ASMAtomicWriteU64(&pRegEntry->GCPhysMapping, GCPhys);
424 rc = PGMR3PhysMmioMap(pVM, pVCpu, GCPhys, cbRegion, pRegEntry->idRamRange,
425 pVM->iom.s.hNewMmioHandlerType, hRegion);
426 AssertRCReturnStmt(rc, ASMAtomicWriteU64(&pRegEntry->GCPhysMapping, NIL_RTGCPHYS); IOM_UNLOCK_EXCL(pVM), rc);
427
428 /* Insert after the entry we just considered: */
429 pEntry += 1;
430 if (i < cEntries)
431 memmove(pEntry + 1, pEntry, sizeof(*pEntry) * (cEntries - i));
432 break;
433 }
434 }
435 else if (pEntry->GCPhysFirst > GCPhysLast)
436 {
437 if (i > iFirst)
438 iEnd = i;
439 else
440 {
441 /* Register with PGM before we shuffle the array: */
442 ASMAtomicWriteU64(&pRegEntry->GCPhysMapping, GCPhys);
443 rc = PGMR3PhysMmioMap(pVM, pVCpu, GCPhys, cbRegion, pRegEntry->idRamRange,
444 pVM->iom.s.hNewMmioHandlerType, hRegion);
445 AssertRCReturnStmt(rc, ASMAtomicWriteU64(&pRegEntry->GCPhysMapping, NIL_RTGCPHYS); IOM_UNLOCK_EXCL(pVM), rc);
446
447 /* Insert at the entry we just considered: */
448 if (i < cEntries)
449 memmove(pEntry + 1, pEntry, sizeof(*pEntry) * (cEntries - i));
450 break;
451 }
452 }
453 else
454 {
455 /* Oops! We've got a conflict. */
456 AssertLogRelMsgFailed(("%RGp..%RGp (%s) conflicts with existing mapping %RGp..%RGp (%s)\n",
457 GCPhys, GCPhysLast, pRegEntry->pszDesc,
458 pEntry->GCPhysFirst, pEntry->GCPhysLast, pVM->iom.s.paMmioRegs[pEntry->idx].pszDesc));
459 IOM_UNLOCK_EXCL(pVM);
460 return VERR_IOM_MMIO_RANGE_CONFLICT;
461 }
462
463 i = iFirst + (iEnd - iFirst) / 2;
464 }
465 }
466 else
467 {
468 /* First entry in the lookup table: */
469 ASMAtomicWriteU64(&pRegEntry->GCPhysMapping, GCPhys);
470 rc = PGMR3PhysMmioMap(pVM, pVCpu, GCPhys, cbRegion, pRegEntry->idRamRange,
471 pVM->iom.s.hNewMmioHandlerType, hRegion);
472 AssertRCReturnStmt(rc, ASMAtomicWriteU64(&pRegEntry->GCPhysMapping, NIL_RTGCPHYS); IOM_UNLOCK_EXCL(pVM), rc);
473
474 pEntry = paEntries;
475 }
476
477 /*
478 * Fill in the entry and bump the table size.
479 */
480 pRegEntry->fMapped = true;
481 pEntry->idx = hRegion;
482 pEntry->GCPhysFirst = GCPhys;
483 pEntry->GCPhysLast = GCPhysLast;
484 pVM->iom.s.cMmioLookupEntries = cEntries + 1;
485
486#ifdef VBOX_WITH_STATISTICS
487 /* Don't register stats here when we're creating the VM as the
488 statistics table may still be reallocated. */
489 if (pVM->enmVMState >= VMSTATE_CREATED)
490 iomR3MmioRegStats(pVM, pRegEntry);
491#endif
492
493#ifdef VBOX_STRICT
494 /*
495 * Assert table sanity.
496 */
497 AssertMsg(paEntries[0].GCPhysLast >= paEntries[0].GCPhysFirst, ("%RGp %RGp\n", paEntries[0].GCPhysLast, paEntries[0].GCPhysFirst));
498 AssertMsg(paEntries[0].idx < pVM->iom.s.cMmioRegs, ("%#x %#x\n", paEntries[0].idx, pVM->iom.s.cMmioRegs));
499
500 RTGCPHYS GCPhysPrev = paEntries[0].GCPhysLast;
501 for (size_t i = 1; i <= cEntries; i++)
502 {
503 AssertMsg(paEntries[i].GCPhysLast >= paEntries[i].GCPhysFirst, ("%u: %RGp %RGp\n", i, paEntries[i].GCPhysLast, paEntries[i].GCPhysFirst));
504 AssertMsg(paEntries[i].idx < pVM->iom.s.cMmioRegs, ("%u: %#x %#x\n", i, paEntries[i].idx, pVM->iom.s.cMmioRegs));
505 AssertMsg(GCPhysPrev < paEntries[i].GCPhysFirst, ("%u: %RGp %RGp\n", i, GCPhysPrev, paEntries[i].GCPhysFirst));
506 GCPhysPrev = paEntries[i].GCPhysLast;
507 }
508#endif
509 }
510 else
511 {
512 AssertFailed();
513 rc = VERR_IOM_MMIO_REGION_ALREADY_MAPPED;
514 }
515
516 IOM_UNLOCK_EXCL(pVM);
517 return rc;
518}
519
520
521/**
522 * Worker for PDMDEVHLPR3::pfnMmioUnmap.
523 */
524VMMR3_INT_DECL(int) IOMR3MmioUnmap(PVM pVM, PVMCPU pVCpu, PPDMDEVINS pDevIns, IOMMMIOHANDLE hRegion)
525{
526 /*
527 * Validate input and state.
528 */
529 VMCPU_ASSERT_EMT_RETURN(pVCpu, VERR_VM_THREAD_NOT_EMT);
530 AssertPtrReturn(pDevIns, VERR_INVALID_HANDLE);
531 AssertReturn(hRegion < pVM->iom.s.cMmioRegs, VERR_IOM_INVALID_MMIO_HANDLE);
532 PIOMMMIOENTRYR3 const pRegEntry = &pVM->iom.s.paMmioRegs[hRegion];
533 AssertReturn(pRegEntry->pDevIns == pDevIns, VERR_IOM_INVALID_MMIO_HANDLE);
534
535 /*
536 * Do the mapping.
537 */
538 int rc;
539 IOM_LOCK_EXCL(pVM);
540
541 if (pRegEntry->fMapped)
542 {
543 RTGCPHYS const GCPhys = pRegEntry->GCPhysMapping;
544 RTGCPHYS const GCPhysLast = GCPhys + pRegEntry->cbRegion - 1;
545 uint32_t const cEntries = RT_MIN(pVM->iom.s.cMmioLookupEntries, pVM->iom.s.cMmioRegs);
546 Assert(pVM->iom.s.cMmioLookupEntries == cEntries);
547 Assert(cEntries > 0);
548
549 PIOMMMIOLOOKUPENTRY paEntries = pVM->iom.s.paMmioLookup;
550 uint32_t iFirst = 0;
551 uint32_t iEnd = cEntries;
552 uint32_t i = cEntries / 2;
553 for (;;)
554 {
555 PIOMMMIOLOOKUPENTRY pEntry = &paEntries[i];
556 if (pEntry->GCPhysLast < GCPhys)
557 {
558 i += 1;
559 if (i < iEnd)
560 iFirst = i;
561 else
562 {
563 rc = VERR_IOM_MMIO_IPE_1;
564 AssertLogRelMsgFailedBreak(("%RGp..%RGp (%s) not found!\n", GCPhys, GCPhysLast, pRegEntry->pszDesc));
565 }
566 }
567 else if (pEntry->GCPhysFirst > GCPhysLast)
568 {
569 if (i > iFirst)
570 iEnd = i;
571 else
572 {
573 rc = VERR_IOM_MMIO_IPE_1;
574 AssertLogRelMsgFailedBreak(("%RGp..%RGp (%s) not found!\n", GCPhys, GCPhysLast, pRegEntry->pszDesc));
575 }
576 }
577 else if (pEntry->idx == hRegion)
578 {
579 Assert(pEntry->GCPhysFirst == GCPhys);
580 Assert(pEntry->GCPhysLast == GCPhysLast);
581#ifdef VBOX_WITH_STATISTICS
582 iomR3MmioDeregStats(pVM, pRegEntry, GCPhys);
583#endif
584 if (i + 1 < cEntries)
585 memmove(pEntry, pEntry + 1, sizeof(*pEntry) * (cEntries - i - 1));
586 pVM->iom.s.cMmioLookupEntries = cEntries - 1;
587
588 rc = PGMR3PhysMmioUnmap(pVM, pVCpu, GCPhys, pRegEntry->cbRegion, pRegEntry->idRamRange);
589 AssertRC(rc);
590
591 pRegEntry->fMapped = false;
592 ASMAtomicWriteU64(&pRegEntry->GCPhysMapping, NIL_RTGCPHYS);
593 break;
594 }
595 else
596 {
597 AssertLogRelMsgFailed(("Looking for %RGp..%RGp (%s), found %RGp..%RGp (%s) instead!\n",
598 GCPhys, GCPhysLast, pRegEntry->pszDesc,
599 pEntry->GCPhysFirst, pEntry->GCPhysLast, pVM->iom.s.paMmioRegs[pEntry->idx].pszDesc));
600 rc = VERR_IOM_MMIO_IPE_1;
601 break;
602 }
603
604 i = iFirst + (iEnd - iFirst) / 2;
605 }
606
607#ifdef VBOX_STRICT
608 /*
609 * Assert table sanity.
610 */
611 AssertMsg(paEntries[0].GCPhysLast >= paEntries[0].GCPhysFirst, ("%RGp %RGp\n", paEntries[0].GCPhysLast, paEntries[0].GCPhysFirst));
612 AssertMsg(paEntries[0].idx < pVM->iom.s.cMmioRegs, ("%#x %#x\n", paEntries[0].idx, pVM->iom.s.cMmioRegs));
613
614 RTGCPHYS GCPhysPrev = paEntries[0].GCPhysLast;
615 for (i = 1; i < cEntries - 1; i++)
616 {
617 AssertMsg(paEntries[i].GCPhysLast >= paEntries[i].GCPhysFirst, ("%u: %RGp %RGp\n", i, paEntries[i].GCPhysLast, paEntries[i].GCPhysFirst));
618 AssertMsg(paEntries[i].idx < pVM->iom.s.cMmioRegs, ("%u: %#x %#x\n", i, paEntries[i].idx, pVM->iom.s.cMmioRegs));
619 AssertMsg(GCPhysPrev < paEntries[i].GCPhysFirst, ("%u: %RGp %RGp\n", i, GCPhysPrev, paEntries[i].GCPhysFirst));
620 GCPhysPrev = paEntries[i].GCPhysLast;
621 }
622#endif
623 }
624 else
625 {
626 AssertFailed();
627 rc = VERR_IOM_MMIO_REGION_NOT_MAPPED;
628 }
629
630 IOM_UNLOCK_EXCL(pVM);
631 return rc;
632}
633
634
635VMMR3_INT_DECL(int) IOMR3MmioReduce(PVM pVM, PPDMDEVINS pDevIns, IOMMMIOHANDLE hRegion, RTGCPHYS cbRegion)
636{
637 RT_NOREF(pVM, pDevIns, hRegion, cbRegion);
638 AssertFailed();
639 return VERR_NOT_IMPLEMENTED;
640}
641
642
643/**
644 * Validates @a hRegion, making sure it belongs to @a pDevIns.
645 *
646 * @returns VBox status code.
647 * @param pVM The cross context VM structure.
648 * @param pDevIns The device which allegedly owns @a hRegion.
649 * @param hRegion The handle to validate.
650 */
651VMMR3_INT_DECL(int) IOMR3MmioValidateHandle(PVM pVM, PPDMDEVINS pDevIns, IOMMMIOHANDLE hRegion)
652{
653 AssertPtrReturn(pDevIns, VERR_INVALID_HANDLE);
654 AssertReturn(hRegion < RT_MIN(pVM->iom.s.cMmioRegs, pVM->iom.s.cMmioAlloc), VERR_IOM_INVALID_MMIO_HANDLE);
655 PIOMMMIOENTRYR3 const pRegEntry = &pVM->iom.s.paMmioRegs[hRegion];
656 AssertReturn(pRegEntry->pDevIns == pDevIns, VERR_IOM_INVALID_MMIO_HANDLE);
657 return VINF_SUCCESS;
658}
659
660
661/**
662 * Gets the mapping address of MMIO region @a hRegion.
663 *
664 * @returns Mapping address if mapped, NIL_RTGCPHYS if not mapped or invalid
665 * input.
666 * @param pVM The cross context VM structure.
667 * @param pDevIns The device which allegedly owns @a hRegion.
668 * @param hRegion The handle to validate.
669 */
670VMMR3_INT_DECL(RTGCPHYS) IOMR3MmioGetMappingAddress(PVM pVM, PPDMDEVINS pDevIns, IOMMMIOHANDLE hRegion)
671{
672 AssertPtrReturn(pDevIns, NIL_RTGCPHYS);
673 AssertReturn(hRegion < RT_MIN(pVM->iom.s.cMmioRegs, pVM->iom.s.cMmioAlloc), NIL_RTGCPHYS);
674 PIOMMMIOENTRYR3 const pRegEntry = &pVM->iom.s.paMmioRegs[hRegion];
675 AssertReturn(pRegEntry->pDevIns == pDevIns, NIL_RTGCPHYS);
676 return pRegEntry->GCPhysMapping;
677}
678
679
680/**
681 * Display all registered MMIO ranges.
682 *
683 * @param pVM The cross context VM structure.
684 * @param pHlp The info helpers.
685 * @param pszArgs Arguments, ignored.
686 */
687DECLCALLBACK(void) iomR3MmioInfo(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
688{
689 RT_NOREF(pszArgs);
690
691 /* No locking needed here as registerations are only happening during VMSTATE_CREATING. */
692 pHlp->pfnPrintf(pHlp,
693 "MMIO registrations: %u (%u allocated)\n"
694 " ## Ctx %.*s %.*s PCI Description\n",
695 pVM->iom.s.cMmioRegs, pVM->iom.s.cMmioAlloc,
696 sizeof(RTGCPHYS) * 2, "Size",
697 sizeof(RTGCPHYS) * 2 * 2 + 1, "Mapping");
698 PIOMMMIOENTRYR3 paRegs = pVM->iom.s.paMmioRegs;
699 for (uint32_t i = 0; i < pVM->iom.s.cMmioRegs; i++)
700 {
701 const char * const pszRing = paRegs[i].fRing0 ? paRegs[i].fRawMode ? "+0+C" : "+0 "
702 : paRegs[i].fRawMode ? "+C " : " ";
703 if (paRegs[i].fMapped && paRegs[i].pPciDev)
704 pHlp->pfnPrintf(pHlp, "%3u R3%s %RGp %RGp-%RGp pci%u/%u %s\n", paRegs[i].idxSelf, pszRing, paRegs[i].cbRegion,
705 paRegs[i].GCPhysMapping, paRegs[i].GCPhysMapping + paRegs[i].cbRegion - 1,
706 paRegs[i].pPciDev->idxSubDev, paRegs[i].iPciRegion, paRegs[i].pszDesc);
707 else if (paRegs[i].fMapped && !paRegs[i].pPciDev)
708 pHlp->pfnPrintf(pHlp, "%3u R3%s %RGp %RGp-%RGp %s\n", paRegs[i].idxSelf, pszRing, paRegs[i].cbRegion,
709 paRegs[i].GCPhysMapping, paRegs[i].GCPhysMapping + paRegs[i].cbRegion - 1, paRegs[i].pszDesc);
710 else if (paRegs[i].pPciDev)
711 pHlp->pfnPrintf(pHlp, "%3u R3%s %RGp %.*s pci%u/%u %s\n", paRegs[i].idxSelf, pszRing, paRegs[i].cbRegion,
712 sizeof(RTGCPHYS) * 2, "unmapped", paRegs[i].pPciDev->idxSubDev, paRegs[i].iPciRegion, paRegs[i].pszDesc);
713 else
714 pHlp->pfnPrintf(pHlp, "%3u R3%s %RGp %.*s %s\n", paRegs[i].idxSelf, pszRing, paRegs[i].cbRegion,
715 sizeof(RTGCPHYS) * 2, "unmapped", paRegs[i].pszDesc);
716 }
717}
718
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette