VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/IOMR3Mmio.cpp

Last change on this file was 98103, checked in by vboxsync, 17 months ago

Copyright year updates by scm.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 28.1 KB
RevLine 
[23]1/* $Id: IOMR3Mmio.cpp 98103 2023-01-17 14:15:46Z vboxsync $ */
[1]2/** @file
[81162]3 * IOM - Input / Output Monitor, MMIO related APIs.
[1]4 */
5
6/*
[98103]7 * Copyright (C) 2006-2023 Oracle and/or its affiliates.
[1]8 *
[96407]9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
[1]26 */
27
28
[57358]29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
[81383]32#define LOG_GROUP LOG_GROUP_IOM_MMIO
[35346]33#include <VBox/vmm/iom.h>
[1]34#include <VBox/sup.h>
[35346]35#include <VBox/vmm/mm.h>
36#include <VBox/vmm/stam.h>
37#include <VBox/vmm/dbgf.h>
38#include <VBox/vmm/pdmapi.h>
39#include <VBox/vmm/pdmdev.h>
[1]40#include "IOMInternal.h"
[35346]41#include <VBox/vmm/vm.h>
[1]42
43#include <VBox/param.h>
44#include <iprt/assert.h>
[92716]45#include <iprt/mem.h>
[1]46#include <iprt/string.h>
47#include <VBox/log.h>
48#include <VBox/err.h>
49
[37424]50#include "IOMInline.h"
[1]51
[37424]52
[80649]53#ifdef VBOX_WITH_STATISTICS
[1]54
55/**
[81162]56 * Register statistics for a MMIO entry.
[1]57 */
[81162]58void iomR3MmioRegStats(PVM pVM, PIOMMMIOENTRYR3 pRegEntry)
[1]59{
[81798]60 bool const fDoRZ = pRegEntry->fRing0 || pRegEntry->fRawMode;
61 PIOMMMIOSTATSENTRY pStats = &pVM->iom.s.paMmioStats[pRegEntry->idxStats];
[1]62
[81162]63 /* Format the prefix: */
[81156]64 char szName[80];
[82380]65 size_t cchPrefix = RTStrPrintf(szName, sizeof(szName), "/IOM/MmioRegions/%RGp-%RGp",
[81162]66 pRegEntry->GCPhysMapping, pRegEntry->GCPhysMapping + pRegEntry->cbRegion - 1);
67
68 /* Mangle the description if this isn't the first device instance: */
69 const char *pszDesc = pRegEntry->pszDesc;
70 char *pszFreeDesc = NULL;
[81156]71 if (pRegEntry->pDevIns && pRegEntry->pDevIns->iInstance > 0 && pszDesc)
72 pszDesc = pszFreeDesc = RTStrAPrintf2("%u / %s", pRegEntry->pDevIns->iInstance, pszDesc);
[81162]73
74 /* Register statistics: */
[81333]75 int rc = STAMR3Register(pVM, &pRegEntry->idxSelf, STAMTYPE_U16, STAMVISIBILITY_ALWAYS, szName, STAMUNIT_NONE, pszDesc); AssertRC(rc);
[81156]76 RTStrFree(pszFreeDesc);
[1]77
[81162]78# define SET_NM_SUFFIX(a_sz) memcpy(&szName[cchPrefix], a_sz, sizeof(a_sz))
[81333]79 SET_NM_SUFFIX("/Read-Complicated");
80 rc = STAMR3Register(pVM, &pStats->ComplicatedReads, STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, NULL); AssertRC(rc);
81 SET_NM_SUFFIX("/Read-FFor00");
82 rc = STAMR3Register(pVM, &pStats->FFor00Reads, STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, NULL); AssertRC(rc);
[81162]83 SET_NM_SUFFIX("/Read-R3");
84 rc = STAMR3Register(pVM, &pStats->ProfReadR3, STAMTYPE_PROFILE, STAMVISIBILITY_USED, szName, STAMUNIT_TICKS_PER_CALL, NULL); AssertRC(rc);
[81798]85 if (fDoRZ)
86 {
87 SET_NM_SUFFIX("/Read-RZ");
88 rc = STAMR3Register(pVM, &pStats->ProfReadRZ, STAMTYPE_PROFILE, STAMVISIBILITY_USED, szName, STAMUNIT_TICKS_PER_CALL, NULL); AssertRC(rc);
89 SET_NM_SUFFIX("/Read-RZtoR3");
90 rc = STAMR3Register(pVM, &pStats->ReadRZToR3, STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, NULL); AssertRC(rc);
91 }
[81333]92 SET_NM_SUFFIX("/Read-Total");
93 rc = STAMR3Register(pVM, &pStats->Reads, STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, NULL); AssertRC(rc);
94
95 SET_NM_SUFFIX("/Write-Complicated");
96 rc = STAMR3Register(pVM, &pStats->ComplicatedWrites, STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, NULL); AssertRC(rc);
97 SET_NM_SUFFIX("/Write-R3");
98 rc = STAMR3Register(pVM, &pStats->ProfWriteR3, STAMTYPE_PROFILE, STAMVISIBILITY_USED, szName, STAMUNIT_TICKS_PER_CALL, NULL); AssertRC(rc);
[81798]99 if (fDoRZ)
100 {
101 SET_NM_SUFFIX("/Write-RZ");
102 rc = STAMR3Register(pVM, &pStats->ProfWriteRZ, STAMTYPE_PROFILE, STAMVISIBILITY_USED, szName, STAMUNIT_TICKS_PER_CALL, NULL); AssertRC(rc);
103 SET_NM_SUFFIX("/Write-RZtoR3");
104 rc = STAMR3Register(pVM, &pStats->WriteRZToR3, STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, NULL); AssertRC(rc);
105 SET_NM_SUFFIX("/Write-RZtoR3-Commit");
106 rc = STAMR3Register(pVM, &pStats->CommitRZToR3, STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, NULL); AssertRC(rc);
107 }
[81333]108 SET_NM_SUFFIX("/Write-Total");
109 rc = STAMR3Register(pVM, &pStats->Writes, STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, NULL); AssertRC(rc);
[80649]110}
111
112
113/**
[81162]114 * Deregister statistics for a MMIO entry.
[7726]115 */
[81162]116static void iomR3MmioDeregStats(PVM pVM, PIOMMMIOENTRYR3 pRegEntry, RTGCPHYS GCPhys)
[7726]117{
[81375]118 char szPrefix[80];
[82380]119 RTStrPrintf(szPrefix, sizeof(szPrefix), "/IOM/MmioRegions/%RGp-%RGp", GCPhys, GCPhys + pRegEntry->cbRegion - 1);
[81156]120 STAMR3DeregisterByPrefix(pVM->pUVM, szPrefix);
[7726]121}
122
[92716]123
124/**
125 * Grows the statistics table.
126 *
127 * @returns VBox status code.
128 * @param pVM The cross context VM structure.
129 * @param cNewEntries The minimum number of new entrie.
130 * @see IOMR0IoPortGrowStatisticsTable
131 */
132static int iomR3MmioGrowStatisticsTable(PVM pVM, uint32_t cNewEntries)
133{
134 AssertReturn(cNewEntries <= _64K, VERR_IOM_TOO_MANY_MMIO_REGISTRATIONS);
135
136 int rc;
137 if (!SUPR3IsDriverless())
138 {
139 rc = VMMR3CallR0Emt(pVM, pVM->apCpusR3[0], VMMR0_DO_IOM_GROW_MMIO_STATS, cNewEntries, NULL);
140 AssertLogRelRCReturn(rc, rc);
141 AssertReturn(cNewEntries <= pVM->iom.s.cMmioStatsAllocation, VERR_IOM_MMIO_IPE_2);
142 }
143 else
144 {
145 /*
146 * Validate input and state.
147 */
148 uint32_t const cOldEntries = pVM->iom.s.cMmioStatsAllocation;
149 AssertReturn(cNewEntries > cOldEntries, VERR_IOM_MMIO_IPE_1);
150 AssertReturn(pVM->iom.s.cMmioStats <= cOldEntries, VERR_IOM_MMIO_IPE_2);
151
152 /*
153 * Calc size and allocate a new table.
154 */
[93554]155 uint32_t const cbNew = RT_ALIGN_32(cNewEntries * sizeof(IOMMMIOSTATSENTRY), HOST_PAGE_SIZE);
[92716]156 cNewEntries = cbNew / sizeof(IOMMMIOSTATSENTRY);
157
158 PIOMMMIOSTATSENTRY const paMmioStats = (PIOMMMIOSTATSENTRY)RTMemPageAllocZ(cbNew);
159 if (paMmioStats)
160 {
161 /*
162 * Anything to copy over, update and free the old one.
163 */
164 PIOMMMIOSTATSENTRY const pOldMmioStats = pVM->iom.s.paMmioStats;
165 if (pOldMmioStats)
166 memcpy(paMmioStats, pOldMmioStats, cOldEntries * sizeof(IOMMMIOSTATSENTRY));
167
168 pVM->iom.s.paMmioStats = paMmioStats;
169 pVM->iom.s.cMmioStatsAllocation = cNewEntries;
170
[93554]171 RTMemPageFree(pOldMmioStats, RT_ALIGN_32(cOldEntries * sizeof(IOMMMIOSTATSENTRY), HOST_PAGE_SIZE));
[92716]172
173 rc = VINF_SUCCESS;
174 }
175 else
176 rc = VERR_NO_PAGE_MEMORY;
177 }
178
179 return rc;
180}
181
[81156]182#endif /* VBOX_WITH_STATISTICS */
[7726]183
[92716]184/**
185 * Grows the I/O port registration statistics table.
186 *
187 * @returns VBox status code.
188 * @param pVM The cross context VM structure.
189 * @param cNewEntries The minimum number of new entrie.
190 * @see IOMR0MmioGrowRegistrationTables
191 */
192static int iomR3MmioGrowTable(PVM pVM, uint32_t cNewEntries)
193{
194 AssertReturn(cNewEntries <= _4K, VERR_IOM_TOO_MANY_MMIO_REGISTRATIONS);
[1]195
[92716]196 int rc;
197 if (!SUPR3IsDriverless())
198 {
199 rc = VMMR3CallR0Emt(pVM, pVM->apCpusR3[0], VMMR0_DO_IOM_GROW_MMIO_REGS, cNewEntries, NULL);
200 AssertLogRelRCReturn(rc, rc);
201 AssertReturn(cNewEntries <= pVM->iom.s.cMmioAlloc, VERR_IOM_MMIO_IPE_2);
202 }
203 else
204 {
205 /*
206 * Validate input and state.
207 */
208 uint32_t const cOldEntries = pVM->iom.s.cMmioAlloc;
209 AssertReturn(cNewEntries >= cOldEntries, VERR_IOM_MMIO_IPE_1);
210
211 /*
212 * Allocate the new tables. We use a single allocation for the three tables (ring-0,
213 * ring-3, lookup) and does a partial mapping of the result to ring-3.
214 */
[93554]215 uint32_t const cbRing3 = RT_ALIGN_32(cNewEntries * sizeof(IOMMMIOENTRYR3), HOST_PAGE_SIZE);
216 uint32_t const cbShared = RT_ALIGN_32(cNewEntries * sizeof(IOMMMIOLOOKUPENTRY), HOST_PAGE_SIZE);
[92716]217 uint32_t const cbNew = cbRing3 + cbShared;
218
219 /* Use the rounded up space as best we can. */
220 cNewEntries = RT_MIN(cbRing3 / sizeof(IOMMMIOENTRYR3), cbShared / sizeof(IOMMMIOLOOKUPENTRY));
221
222 PIOMMMIOENTRYR3 const paRing3 = (PIOMMMIOENTRYR3)RTMemPageAllocZ(cbNew);
223 if (paRing3)
224 {
225 PIOMMMIOLOOKUPENTRY const paLookup = (PIOMMMIOLOOKUPENTRY)((uintptr_t)paRing3 + cbRing3);
226
227 /*
228 * Copy over the old info and initialize the idxSelf and idxStats members.
229 */
230 if (pVM->iom.s.paMmioRegs != NULL)
231 {
232 memcpy(paRing3, pVM->iom.s.paMmioRegs, sizeof(paRing3[0]) * cOldEntries);
233 memcpy(paLookup, pVM->iom.s.paMmioLookup, sizeof(paLookup[0]) * cOldEntries);
234 }
235
236 size_t i = cbRing3 / sizeof(*paRing3);
237 while (i-- > cOldEntries)
238 {
239 paRing3[i].idxSelf = (uint16_t)i;
240 paRing3[i].idxStats = UINT16_MAX;
241 }
242
243 /*
244 * Update the variables and free the old memory.
245 */
246 void * const pvFree = pVM->iom.s.paMmioRegs;
247
248 pVM->iom.s.paMmioRegs = paRing3;
249 pVM->iom.s.paMmioLookup = paLookup;
250 pVM->iom.s.cMmioAlloc = cNewEntries;
251
252 RTMemPageFree(pvFree,
[93554]253 RT_ALIGN_32(cOldEntries * sizeof(IOMMMIOENTRYR3), HOST_PAGE_SIZE)
254 + RT_ALIGN_32(cOldEntries * sizeof(IOMMMIOLOOKUPENTRY), HOST_PAGE_SIZE));
[92716]255
256 rc = VINF_SUCCESS;
257 }
258 else
259 rc = VERR_NO_PAGE_MEMORY;
260 }
261 return rc;
262}
263
264
[1]265/**
[81162]266 * Worker for PDMDEVHLPR3::pfnMmioCreateEx.
[1]267 */
[81162]268VMMR3_INT_DECL(int) IOMR3MmioCreate(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS cbRegion, uint32_t fFlags, PPDMPCIDEV pPciDev,
269 uint32_t iPciRegion, PFNIOMMMIONEWWRITE pfnWrite, PFNIOMMMIONEWREAD pfnRead,
270 PFNIOMMMIONEWFILL pfnFill, void *pvUser, const char *pszDesc, PIOMMMIOHANDLE phRegion)
[1]271{
[80641]272 /*
273 * Validate input.
274 */
[81162]275 AssertPtrReturn(phRegion, VERR_INVALID_POINTER);
276 *phRegion = UINT32_MAX;
[80641]277 VM_ASSERT_EMT0_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
278 VM_ASSERT_STATE_RETURN(pVM, VMSTATE_CREATING, VERR_VM_INVALID_VM_STATE);
[92719]279 AssertReturn(!pVM->iom.s.fMmioFrozen, VERR_WRONG_ORDER);
[80641]280
281 AssertPtrReturn(pDevIns, VERR_INVALID_POINTER);
282
[81375]283 AssertMsgReturn(cbRegion > 0 && cbRegion <= MM_MMIO_64_MAX, ("cbRegion=%#RGp (max %#RGp)\n", cbRegion, MM_MMIO_64_MAX),
284 VERR_OUT_OF_RANGE);
[93554]285 AssertMsgReturn(!(cbRegion & GUEST_PAGE_OFFSET_MASK), ("cbRegion=%#RGp\n", cbRegion), VERR_UNSUPPORTED_ALIGNMENT);
[81375]286
[81337]287 AssertMsgReturn( !(fFlags & ~IOMMMIO_FLAGS_VALID_MASK)
288 && (fFlags & IOMMMIO_FLAGS_READ_MODE) <= IOMMMIO_FLAGS_READ_DWORD_QWORD
289 && (fFlags & IOMMMIO_FLAGS_WRITE_MODE) <= IOMMMIO_FLAGS_WRITE_ONLY_DWORD_QWORD,
290 ("%#x\n", fFlags),
291 VERR_INVALID_FLAGS);
[80641]292
[81375]293 AssertReturn(pfnWrite || pfnRead, VERR_INVALID_PARAMETER);
[81162]294 AssertPtrNullReturn(pfnWrite, VERR_INVALID_POINTER);
295 AssertPtrNullReturn(pfnRead, VERR_INVALID_POINTER);
296 AssertPtrNullReturn(pfnFill, VERR_INVALID_POINTER);
[81375]297
[80641]298 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
299 AssertReturn(*pszDesc != '\0', VERR_INVALID_POINTER);
300 AssertReturn(strlen(pszDesc) < 128, VERR_INVALID_POINTER);
301
302 /*
303 * Ensure that we've got table space for it.
304 */
305#ifndef VBOX_WITH_STATISTICS
[81461]306 uint16_t const idxStats = UINT16_MAX;
[80641]307#else
[81461]308 uint32_t const idxStats = pVM->iom.s.cMmioStats;
[81162]309 uint32_t const cNewMmioStats = idxStats + 1;
310 AssertReturn(cNewMmioStats <= _64K, VERR_IOM_TOO_MANY_MMIO_REGISTRATIONS);
311 if (cNewMmioStats > pVM->iom.s.cMmioStatsAllocation)
[80641]312 {
[92716]313 int rc = iomR3MmioGrowStatisticsTable(pVM, cNewMmioStats);
314 AssertRCReturn(rc, rc);
[81162]315 AssertReturn(idxStats == pVM->iom.s.cMmioStats, VERR_IOM_MMIO_IPE_1);
[80641]316 }
317#endif
318
[81162]319 uint32_t idx = pVM->iom.s.cMmioRegs;
320 if (idx >= pVM->iom.s.cMmioAlloc)
[80641]321 {
[92716]322 int rc = iomR3MmioGrowTable(pVM, pVM->iom.s.cMmioAlloc + 1);
323 AssertRCReturn(rc, rc);
[81162]324 AssertReturn(idx == pVM->iom.s.cMmioRegs, VERR_IOM_MMIO_IPE_1);
[80641]325 }
326
327 /*
328 * Enter it.
329 */
[81162]330 pVM->iom.s.paMmioRegs[idx].cbRegion = cbRegion;
331 pVM->iom.s.paMmioRegs[idx].GCPhysMapping = NIL_RTGCPHYS;
332 pVM->iom.s.paMmioRegs[idx].pvUser = pvUser;
333 pVM->iom.s.paMmioRegs[idx].pDevIns = pDevIns;
334 pVM->iom.s.paMmioRegs[idx].pfnWriteCallback = pfnWrite;
335 pVM->iom.s.paMmioRegs[idx].pfnReadCallback = pfnRead;
336 pVM->iom.s.paMmioRegs[idx].pfnFillCallback = pfnFill;
337 pVM->iom.s.paMmioRegs[idx].pszDesc = pszDesc;
338 pVM->iom.s.paMmioRegs[idx].pPciDev = pPciDev;
339 pVM->iom.s.paMmioRegs[idx].iPciRegion = iPciRegion;
340 pVM->iom.s.paMmioRegs[idx].idxStats = (uint16_t)idxStats;
341 pVM->iom.s.paMmioRegs[idx].fMapped = false;
342 pVM->iom.s.paMmioRegs[idx].fFlags = fFlags;
343 pVM->iom.s.paMmioRegs[idx].idxSelf = idx;
[80641]344
[81162]345 pVM->iom.s.cMmioRegs = idx + 1;
[81461]346#ifdef VBOX_WITH_STATISTICS
347 pVM->iom.s.cMmioStats = cNewMmioStats;
348#endif
[81162]349 *phRegion = idx;
[80641]350 return VINF_SUCCESS;
351}
352
353
354/**
[81162]355 * Worker for PDMDEVHLPR3::pfnMmioMap.
[80641]356 */
[81162]357VMMR3_INT_DECL(int) IOMR3MmioMap(PVM pVM, PPDMDEVINS pDevIns, IOMMMIOHANDLE hRegion, RTGCPHYS GCPhys)
[80641]358{
359 /*
360 * Validate input and state.
361 */
362 AssertPtrReturn(pDevIns, VERR_INVALID_HANDLE);
[81162]363 AssertReturn(hRegion < pVM->iom.s.cMmioRegs, VERR_IOM_INVALID_MMIO_HANDLE);
364 PIOMMMIOENTRYR3 const pRegEntry = &pVM->iom.s.paMmioRegs[hRegion];
365 AssertReturn(pRegEntry->pDevIns == pDevIns, VERR_IOM_INVALID_MMIO_HANDLE);
[80641]366
[81162]367 RTGCPHYS const cbRegion = pRegEntry->cbRegion;
[81375]368 AssertMsgReturn(cbRegion > 0 && cbRegion <= MM_MMIO_64_MAX, ("cbRegion=%RGp\n", cbRegion), VERR_IOM_MMIO_IPE_1);
[81162]369 RTGCPHYS const GCPhysLast = GCPhys + cbRegion - 1;
[80641]370
[93554]371 AssertLogRelMsgReturn(!(GCPhys & GUEST_PAGE_OFFSET_MASK),
[81337]372 ("Misaligned! GCPhys=%RGp LB %RGp %s (%s[#%u])\n",
373 GCPhys, cbRegion, pRegEntry->pszDesc, pDevIns->pReg->szName, pDevIns->iInstance),
374 VERR_IOM_INVALID_MMIO_RANGE);
375 AssertLogRelMsgReturn(GCPhysLast > GCPhys,
376 ("Wrapped! GCPhys=%RGp LB %RGp %s (%s[#%u])\n",
377 GCPhys, cbRegion, pRegEntry->pszDesc, pDevIns->pReg->szName, pDevIns->iInstance),
378 VERR_IOM_INVALID_MMIO_RANGE);
379
[80641]380 /*
381 * Do the mapping.
382 */
383 int rc = VINF_SUCCESS;
384 IOM_LOCK_EXCL(pVM);
385
386 if (!pRegEntry->fMapped)
387 {
[81162]388 uint32_t const cEntries = RT_MIN(pVM->iom.s.cMmioLookupEntries, pVM->iom.s.cMmioRegs);
389 Assert(pVM->iom.s.cMmioLookupEntries == cEntries);
[80641]390
[81162]391 PIOMMMIOLOOKUPENTRY paEntries = pVM->iom.s.paMmioLookup;
392 PIOMMMIOLOOKUPENTRY pEntry;
[80641]393 if (cEntries > 0)
394 {
395 uint32_t iFirst = 0;
396 uint32_t iEnd = cEntries;
397 uint32_t i = cEntries / 2;
398 for (;;)
399 {
400 pEntry = &paEntries[i];
[81162]401 if (pEntry->GCPhysLast < GCPhys)
[80641]402 {
403 i += 1;
404 if (i < iEnd)
405 iFirst = i;
406 else
407 {
[81333]408 /* Register with PGM before we shuffle the array: */
[81336]409 ASMAtomicWriteU64(&pRegEntry->GCPhysMapping, GCPhys);
[81333]410 rc = PGMR3PhysMMIORegister(pVM, GCPhys, cbRegion, pVM->iom.s.hNewMmioHandlerType,
[93635]411 hRegion, pRegEntry->pszDesc);
[81336]412 AssertRCReturnStmt(rc, ASMAtomicWriteU64(&pRegEntry->GCPhysMapping, NIL_RTGCPHYS); IOM_UNLOCK_EXCL(pVM), rc);
[81333]413
[80641]414 /* Insert after the entry we just considered: */
415 pEntry += 1;
[80960]416 if (i < cEntries)
417 memmove(pEntry + 1, pEntry, sizeof(*pEntry) * (cEntries - i));
[80641]418 break;
419 }
420 }
[81162]421 else if (pEntry->GCPhysFirst > GCPhysLast)
[80641]422 {
423 if (i > iFirst)
424 iEnd = i;
425 else
426 {
[81333]427 /* Register with PGM before we shuffle the array: */
[81336]428 ASMAtomicWriteU64(&pRegEntry->GCPhysMapping, GCPhys);
[81333]429 rc = PGMR3PhysMMIORegister(pVM, GCPhys, cbRegion, pVM->iom.s.hNewMmioHandlerType,
[93635]430 hRegion, pRegEntry->pszDesc);
[81336]431 AssertRCReturnStmt(rc, ASMAtomicWriteU64(&pRegEntry->GCPhysMapping, NIL_RTGCPHYS); IOM_UNLOCK_EXCL(pVM), rc);
[81333]432
[80641]433 /* Insert at the entry we just considered: */
[80960]434 if (i < cEntries)
435 memmove(pEntry + 1, pEntry, sizeof(*pEntry) * (cEntries - i));
[80641]436 break;
437 }
438 }
439 else
440 {
441 /* Oops! We've got a conflict. */
[81162]442 AssertLogRelMsgFailed(("%RGp..%RGp (%s) conflicts with existing mapping %RGp..%RGp (%s)\n",
443 GCPhys, GCPhysLast, pRegEntry->pszDesc,
444 pEntry->GCPhysFirst, pEntry->GCPhysLast, pVM->iom.s.paMmioRegs[pEntry->idx].pszDesc));
[80641]445 IOM_UNLOCK_EXCL(pVM);
[81162]446 return VERR_IOM_MMIO_RANGE_CONFLICT;
[80641]447 }
448
449 i = iFirst + (iEnd - iFirst) / 2;
450 }
451 }
452 else
[81375]453 {
454 /* First entry in the lookup table: */
455 ASMAtomicWriteU64(&pRegEntry->GCPhysMapping, GCPhys);
[93635]456 rc = PGMR3PhysMMIORegister(pVM, GCPhys, cbRegion, pVM->iom.s.hNewMmioHandlerType, hRegion, pRegEntry->pszDesc);
[81375]457 AssertRCReturnStmt(rc, ASMAtomicWriteU64(&pRegEntry->GCPhysMapping, NIL_RTGCPHYS); IOM_UNLOCK_EXCL(pVM), rc);
458
[80641]459 pEntry = paEntries;
[81375]460 }
[80641]461
462 /*
463 * Fill in the entry and bump the table size.
464 */
[81336]465 pRegEntry->fMapped = true;
[81162]466 pEntry->idx = hRegion;
467 pEntry->GCPhysFirst = GCPhys;
468 pEntry->GCPhysLast = GCPhysLast;
469 pVM->iom.s.cMmioLookupEntries = cEntries + 1;
[80641]470
[80649]471#ifdef VBOX_WITH_STATISTICS
472 /* Don't register stats here when we're creating the VM as the
473 statistics table may still be reallocated. */
474 if (pVM->enmVMState >= VMSTATE_CREATED)
[81162]475 iomR3MmioRegStats(pVM, pRegEntry);
[80649]476#endif
477
[80641]478#ifdef VBOX_STRICT
479 /*
480 * Assert table sanity.
481 */
[81162]482 AssertMsg(paEntries[0].GCPhysLast >= paEntries[0].GCPhysFirst, ("%RGp %RGp\n", paEntries[0].GCPhysLast, paEntries[0].GCPhysFirst));
483 AssertMsg(paEntries[0].idx < pVM->iom.s.cMmioRegs, ("%#x %#x\n", paEntries[0].idx, pVM->iom.s.cMmioRegs));
[80641]484
[81162]485 RTGCPHYS GCPhysPrev = paEntries[0].GCPhysLast;
[80641]486 for (size_t i = 1; i <= cEntries; i++)
487 {
[81162]488 AssertMsg(paEntries[i].GCPhysLast >= paEntries[i].GCPhysFirst, ("%u: %RGp %RGp\n", i, paEntries[i].GCPhysLast, paEntries[i].GCPhysFirst));
489 AssertMsg(paEntries[i].idx < pVM->iom.s.cMmioRegs, ("%u: %#x %#x\n", i, paEntries[i].idx, pVM->iom.s.cMmioRegs));
490 AssertMsg(GCPhysPrev < paEntries[i].GCPhysFirst, ("%u: %RGp %RGp\n", i, GCPhysPrev, paEntries[i].GCPhysFirst));
491 GCPhysPrev = paEntries[i].GCPhysLast;
[80641]492 }
493#endif
494 }
495 else
496 {
497 AssertFailed();
[81167]498 rc = VERR_IOM_MMIO_REGION_ALREADY_MAPPED;
[80641]499 }
500
501 IOM_UNLOCK_EXCL(pVM);
502 return rc;
503}
504
505
506/**
[81162]507 * Worker for PDMDEVHLPR3::pfnMmioUnmap.
[80641]508 */
[81162]509VMMR3_INT_DECL(int) IOMR3MmioUnmap(PVM pVM, PPDMDEVINS pDevIns, IOMMMIOHANDLE hRegion)
[80641]510{
511 /*
512 * Validate input and state.
513 */
514 AssertPtrReturn(pDevIns, VERR_INVALID_HANDLE);
[81162]515 AssertReturn(hRegion < pVM->iom.s.cMmioRegs, VERR_IOM_INVALID_MMIO_HANDLE);
516 PIOMMMIOENTRYR3 const pRegEntry = &pVM->iom.s.paMmioRegs[hRegion];
517 AssertReturn(pRegEntry->pDevIns == pDevIns, VERR_IOM_INVALID_MMIO_HANDLE);
[80641]518
519 /*
520 * Do the mapping.
521 */
522 int rc;
523 IOM_LOCK_EXCL(pVM);
524
525 if (pRegEntry->fMapped)
526 {
[81162]527 RTGCPHYS const GCPhys = pRegEntry->GCPhysMapping;
528 RTGCPHYS const GCPhysLast = GCPhys + pRegEntry->cbRegion - 1;
529 uint32_t const cEntries = RT_MIN(pVM->iom.s.cMmioLookupEntries, pVM->iom.s.cMmioRegs);
530 Assert(pVM->iom.s.cMmioLookupEntries == cEntries);
[80641]531 Assert(cEntries > 0);
532
[81162]533 PIOMMMIOLOOKUPENTRY paEntries = pVM->iom.s.paMmioLookup;
[80641]534 uint32_t iFirst = 0;
535 uint32_t iEnd = cEntries;
536 uint32_t i = cEntries / 2;
537 for (;;)
538 {
[81162]539 PIOMMMIOLOOKUPENTRY pEntry = &paEntries[i];
540 if (pEntry->GCPhysLast < GCPhys)
[80641]541 {
542 i += 1;
543 if (i < iEnd)
544 iFirst = i;
545 else
546 {
[81162]547 rc = VERR_IOM_MMIO_IPE_1;
548 AssertLogRelMsgFailedBreak(("%RGp..%RGp (%s) not found!\n", GCPhys, GCPhysLast, pRegEntry->pszDesc));
[80641]549 }
550 }
[81162]551 else if (pEntry->GCPhysFirst > GCPhysLast)
[80641]552 {
553 if (i > iFirst)
554 iEnd = i;
555 else
556 {
[81162]557 rc = VERR_IOM_MMIO_IPE_1;
558 AssertLogRelMsgFailedBreak(("%RGp..%RGp (%s) not found!\n", GCPhys, GCPhysLast, pRegEntry->pszDesc));
[80641]559 }
560 }
[81162]561 else if (pEntry->idx == hRegion)
[80641]562 {
[81162]563 Assert(pEntry->GCPhysFirst == GCPhys);
564 Assert(pEntry->GCPhysLast == GCPhysLast);
[80649]565#ifdef VBOX_WITH_STATISTICS
[81162]566 iomR3MmioDeregStats(pVM, pRegEntry, GCPhys);
[80649]567#endif
[80641]568 if (i + 1 < cEntries)
569 memmove(pEntry, pEntry + 1, sizeof(*pEntry) * (cEntries - i - 1));
[81162]570 pVM->iom.s.cMmioLookupEntries = cEntries - 1;
[81336]571
[81333]572 rc = PGMR3PhysMMIODeregister(pVM, GCPhys, pRegEntry->cbRegion);
573 AssertRC(rc);
[81336]574
575 pRegEntry->fMapped = false;
576 ASMAtomicWriteU64(&pRegEntry->GCPhysMapping, NIL_RTGCPHYS);
[80641]577 break;
578 }
579 else
580 {
[81162]581 AssertLogRelMsgFailed(("Lookig for %RGp..%RGp (%s), found %RGp..%RGp (%s) instead!\n",
582 GCPhys, GCPhysLast, pRegEntry->pszDesc,
583 pEntry->GCPhysFirst, pEntry->GCPhysLast, pVM->iom.s.paMmioRegs[pEntry->idx].pszDesc));
584 rc = VERR_IOM_MMIO_IPE_1;
[80641]585 break;
586 }
587
588 i = iFirst + (iEnd - iFirst) / 2;
589 }
590
591#ifdef VBOX_STRICT
592 /*
593 * Assert table sanity.
594 */
[81162]595 AssertMsg(paEntries[0].GCPhysLast >= paEntries[0].GCPhysFirst, ("%RGp %RGp\n", paEntries[0].GCPhysLast, paEntries[0].GCPhysFirst));
596 AssertMsg(paEntries[0].idx < pVM->iom.s.cMmioRegs, ("%#x %#x\n", paEntries[0].idx, pVM->iom.s.cMmioRegs));
[80641]597
[81162]598 RTGCPHYS GCPhysPrev = paEntries[0].GCPhysLast;
[80960]599 for (i = 1; i < cEntries - 1; i++)
[80641]600 {
[81162]601 AssertMsg(paEntries[i].GCPhysLast >= paEntries[i].GCPhysFirst, ("%u: %RGp %RGp\n", i, paEntries[i].GCPhysLast, paEntries[i].GCPhysFirst));
602 AssertMsg(paEntries[i].idx < pVM->iom.s.cMmioRegs, ("%u: %#x %#x\n", i, paEntries[i].idx, pVM->iom.s.cMmioRegs));
603 AssertMsg(GCPhysPrev < paEntries[i].GCPhysFirst, ("%u: %RGp %RGp\n", i, GCPhysPrev, paEntries[i].GCPhysFirst));
604 GCPhysPrev = paEntries[i].GCPhysLast;
[80641]605 }
606#endif
607 }
608 else
609 {
610 AssertFailed();
[81167]611 rc = VERR_IOM_MMIO_REGION_NOT_MAPPED;
[80641]612 }
613
614 IOM_UNLOCK_EXCL(pVM);
615 return rc;
616}
617
618
[81167]619VMMR3_INT_DECL(int) IOMR3MmioReduce(PVM pVM, PPDMDEVINS pDevIns, IOMMMIOHANDLE hRegion, RTGCPHYS cbRegion)
620{
621 RT_NOREF(pVM, pDevIns, hRegion, cbRegion);
622 return VERR_NOT_IMPLEMENTED;
623}
624
625
[80649]626/**
[81375]627 * Validates @a hRegion, making sure it belongs to @a pDevIns.
628 *
629 * @returns VBox status code.
630 * @param pVM The cross context VM structure.
631 * @param pDevIns The device which allegedly owns @a hRegion.
632 * @param hRegion The handle to validate.
633 */
634VMMR3_INT_DECL(int) IOMR3MmioValidateHandle(PVM pVM, PPDMDEVINS pDevIns, IOMMMIOHANDLE hRegion)
635{
636 AssertPtrReturn(pDevIns, VERR_INVALID_HANDLE);
637 AssertReturn(hRegion < RT_MIN(pVM->iom.s.cMmioRegs, pVM->iom.s.cMmioAlloc), VERR_IOM_INVALID_MMIO_HANDLE);
638 PIOMMMIOENTRYR3 const pRegEntry = &pVM->iom.s.paMmioRegs[hRegion];
639 AssertReturn(pRegEntry->pDevIns == pDevIns, VERR_IOM_INVALID_MMIO_HANDLE);
640 return VINF_SUCCESS;
641}
642
643
644/**
[81564]645 * Gets the mapping address of MMIO region @a hRegion.
646 *
647 * @returns Mapping address if mapped, NIL_RTGCPHYS if not mapped or invalid
648 * input.
649 * @param pVM The cross context VM structure.
650 * @param pDevIns The device which allegedly owns @a hRegion.
651 * @param hRegion The handle to validate.
652 */
653VMMR3_INT_DECL(RTGCPHYS) IOMR3MmioGetMappingAddress(PVM pVM, PPDMDEVINS pDevIns, IOMMMIOHANDLE hRegion)
654{
655 AssertPtrReturn(pDevIns, NIL_RTGCPHYS);
656 AssertReturn(hRegion < RT_MIN(pVM->iom.s.cMmioRegs, pVM->iom.s.cMmioAlloc), NIL_RTGCPHYS);
657 PIOMMMIOENTRYR3 const pRegEntry = &pVM->iom.s.paMmioRegs[hRegion];
658 AssertReturn(pRegEntry->pDevIns == pDevIns, NIL_RTGCPHYS);
659 return pRegEntry->GCPhysMapping;
660}
661
662
663/**
[81162]664 * Display all registered MMIO ranges.
[1]665 *
[58122]666 * @param pVM The cross context VM structure.
[1]667 * @param pHlp The info helpers.
668 * @param pszArgs Arguments, ignored.
669 */
[81162]670DECLCALLBACK(void) iomR3MmioInfo(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
[1]671{
[82313]672 RT_NOREF(pszArgs);
673
[81056]674 /* No locking needed here as registerations are only happening during VMSTATE_CREATING. */
675 pHlp->pfnPrintf(pHlp,
[81162]676 "MMIO registrations: %u (%u allocated)\n"
677 " ## Ctx %.*s %.*s PCI Description\n",
678 pVM->iom.s.cMmioRegs, pVM->iom.s.cMmioAlloc,
679 sizeof(RTGCPHYS) * 2, "Size",
680 sizeof(RTGCPHYS) * 2 * 2 + 1, "Mapping");
681 PIOMMMIOENTRYR3 paRegs = pVM->iom.s.paMmioRegs;
682 for (uint32_t i = 0; i < pVM->iom.s.cMmioRegs; i++)
[81056]683 {
684 const char * const pszRing = paRegs[i].fRing0 ? paRegs[i].fRawMode ? "+0+C" : "+0 "
685 : paRegs[i].fRawMode ? "+C " : " ";
686 if (paRegs[i].fMapped && paRegs[i].pPciDev)
[81162]687 pHlp->pfnPrintf(pHlp, "%3u R3%s %RGp %RGp-%RGp pci%u/%u %s\n", paRegs[i].idxSelf, pszRing, paRegs[i].cbRegion,
688 paRegs[i].GCPhysMapping, paRegs[i].GCPhysMapping + paRegs[i].cbRegion - 1,
[81056]689 paRegs[i].pPciDev->idxSubDev, paRegs[i].iPciRegion, paRegs[i].pszDesc);
690 else if (paRegs[i].fMapped && !paRegs[i].pPciDev)
[81162]691 pHlp->pfnPrintf(pHlp, "%3u R3%s %RGp %RGp-%RGp %s\n", paRegs[i].idxSelf, pszRing, paRegs[i].cbRegion,
692 paRegs[i].GCPhysMapping, paRegs[i].GCPhysMapping + paRegs[i].cbRegion - 1, paRegs[i].pszDesc);
[81056]693 else if (paRegs[i].pPciDev)
[81162]694 pHlp->pfnPrintf(pHlp, "%3u R3%s %RGp %.*s pci%u/%u %s\n", paRegs[i].idxSelf, pszRing, paRegs[i].cbRegion,
695 sizeof(RTGCPHYS) * 2, "unmapped", paRegs[i].pPciDev->idxSubDev, paRegs[i].iPciRegion, paRegs[i].pszDesc);
[81056]696 else
[81162]697 pHlp->pfnPrintf(pHlp, "%3u R3%s %RGp %.*s %s\n", paRegs[i].idxSelf, pszRing, paRegs[i].cbRegion,
698 sizeof(RTGCPHYS) * 2, "unmapped", paRegs[i].pszDesc);
[81056]699 }
[1]700}
701
Note: See TracBrowser for help on using the repository browser.

© 2023 Oracle
ContactPrivacy policyTerms of Use