Changeset 82313 in vbox
- Timestamp:
- Dec 1, 2019 3:38:40 AM (5 years ago)
- Location:
- trunk
- Files:
-
- 1 deleted
- 12 edited
-
include/VBox/vmm/iom.h (modified) (2 diffs)
-
include/VBox/vmm/pdmdev.h (modified) (3 diffs)
-
src/VBox/Devices/Bus/DevPciIch9.cpp (modified) (8 diffs)
-
src/VBox/VMM/Makefile.kmk (modified) (2 diffs)
-
src/VBox/VMM/VMMAll/IOMAll.cpp (modified) (1 diff)
-
src/VBox/VMM/VMMAll/IOMAllMMIO.cpp (deleted)
-
src/VBox/VMM/VMMAll/IOMAllMmioNew.cpp (modified) (1 diff)
-
src/VBox/VMM/VMMR3/IOM.cpp (modified) (5 diffs)
-
src/VBox/VMM/VMMR3/IOMR3Mmio.cpp (modified) (3 diffs)
-
src/VBox/VMM/VMMR3/PDMDevHlp.cpp (modified) (4 diffs)
-
src/VBox/VMM/VMMR3/PGMPhys.cpp (modified) (5 diffs)
-
src/VBox/VMM/include/IOMInline.h (modified) (1 diff)
-
src/VBox/VMM/include/IOMInternal.h (modified) (7 diffs)
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/vmm/iom.h
r82311 r82313 457 457 VMMDECL(VBOXSTRICTRC) IOMMMIORead(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, uint32_t *pu32Value, size_t cbValue); 458 458 VMMDECL(VBOXSTRICTRC) IOMMMIOWrite(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue); 459 /*VMMDECL(int) IOMMMIOMapMMIO2Page(PVMCC pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysRemapped, uint64_t fPageFlags);*/460 /*VMMDECL(int) IOMMMIOMapMMIOHCPage(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uint64_t fPageFlags); - not used any more */461 /*VMMDECL(int) IOMMMIOResetRegion(PVMCC pVM, RTGCPHYS GCPhys);*/462 463 459 VMM_INT_DECL(VBOXSTRICTRC) IOMR0MmioPhysHandler(PVMCC pVM, PVMCPUCC pVCpu, uint32_t uErrorCode, RTGCPHYS GCPhysFault); 464 460 VMMDECL(int) IOMMmioMapMmio2Page(PVMCC pVM, PPDMDEVINS pDevIns, IOMMMIOHANDLE hRegion, RTGCPHYS offRegion, … … 504 500 VMMR3_INT_DECL(RTGCPHYS) IOMR3MmioGetMappingAddress(PVM pVM, PPDMDEVINS pDevIns, IOMMMIOHANDLE hRegion); 505 501 506 /** @name obsolete507 * @deprecated508 * @{ */509 VMMR3_INT_DECL(int) IOMR3MmioRegisterR3(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GCPhysStart, RTGCPHYS cbRange, RTHCPTR pvUser,510 R3PTRTYPE(PFNIOMMMIOWRITE) pfnWriteCallback,511 R3PTRTYPE(PFNIOMMMIOREAD) pfnReadCallback,512 R3PTRTYPE(PFNIOMMMIOFILL) pfnFillCallback,513 uint32_t fFlags, const char *pszDesc);514 VMMR3_INT_DECL(int) IOMR3MmioRegisterR0(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GCPhysStart, RTGCPHYS cbRange, RTR0PTR pvUser,515 R0PTRTYPE(PFNIOMMMIOWRITE) pfnWriteCallback,516 R0PTRTYPE(PFNIOMMMIOREAD) pfnReadCallback,517 R0PTRTYPE(PFNIOMMMIOFILL) pfnFillCallback);518 #if 0519 VMMR3_INT_DECL(int) IOMR3MmioRegisterRC(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GCPhysStart, RTGCPHYS cbRange, RTGCPTR pvUser,520 RCPTRTYPE(PFNIOMMMIOWRITE) pfnWriteCallback,521 RCPTRTYPE(PFNIOMMMIOREAD) pfnReadCallback,522 RCPTRTYPE(PFNIOMMMIOFILL) pfnFillCallback);523 #endif524 VMMR3_INT_DECL(int) IOMR3MmioDeregister(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GCPhysStart, RTGCPHYS cbRange);525 VMMR3_INT_DECL(int) IOMR3MmioExNotifyMapped(PVM pVM, void *pvUser, RTGCPHYS GCPhys);526 VMMR3_INT_DECL(void) IOMR3MmioExNotifyUnmapped(PVM pVM, void *pvUser, RTGCPHYS GCPhys);527 VMMR3_INT_DECL(void) IOMR3MmioExNotifyDeregistered(PVM pVM, void *pvUser);528 529 /** @} */530 531 502 VMMR3_INT_DECL(VBOXSTRICTRC) IOMR3ProcessForceFlag(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rcStrict); 532 503 533 504 VMMR3_INT_DECL(void) IOMR3NotifyBreakpointCountChange(PVM pVM, bool fPortIo, bool fMmio); 534 505 VMMR3_INT_DECL(void) IOMR3NotifyDebugEventChange(PVM pVM, DBGFEVENT enmEvent, bool fEnabled); 535 536 506 /** @} */ 537 507 #endif /* IN_RING3 */ -
trunk/include/VBox/vmm/pdmdev.h
r82311 r82313 1975 1975 1976 1976 /** Current PDMDEVHLPR3 version number. */ 1977 #define PDM_DEVHLPR3_VERSION PDM_VERSION_MAKE_PP(0xffe7, 39, 0)1977 #define PDM_DEVHLPR3_VERSION PDM_VERSION_MAKE_PP(0xffe7, 40, 0) 1978 1978 1979 1979 /** … … 2154 2154 DECLR3CALLBACKMEMBER(RTGCPHYS, pfnMmioGetMappingAddress,(PPDMDEVINS pDevIns, IOMMMIOHANDLE hRegion)); 2155 2155 /** @} */ 2156 2157 /**2158 * Register a Memory Mapped I/O (MMIO) region.2159 *2160 * These callbacks are of course for the ring-3 context (R3). Register HC2161 * handlers before raw-mode context (RC) and ring-0 context (R0) handlers! There2162 * must be a R3 handler for every RC and R0 handler!2163 *2164 * @returns VBox status.2165 * @param pDevIns The device instance to register the MMIO with.2166 * @param GCPhysStart First physical address in the range.2167 * @param cbRange The size of the range (in bytes).2168 * @param pvUser User argument.2169 * @param pfnWrite Pointer to function which is gonna handle Write operations.2170 * @param pfnRead Pointer to function which is gonna handle Read operations.2171 * @param pfnFill Pointer to function which is gonna handle Fill/memset operations. (optional)2172 * @param fFlags Flags, IOMMMIO_FLAGS_XXX.2173 * @param pszDesc Pointer to description string. This must not be freed.2174 * @remarks Caller enters the device critical section prior to invoking the2175 * registered callback methods.2176 * @deprecated2177 */2178 DECLR3CALLBACKMEMBER(int, pfnMMIORegister,(PPDMDEVINS pDevIns, RTGCPHYS GCPhysStart, RTGCPHYS cbRange, RTHCPTR pvUser,2179 PFNIOMMMIOWRITE pfnWrite, PFNIOMMMIOREAD pfnRead, PFNIOMMMIOFILL pfnFill,2180 uint32_t fFlags, const char *pszDesc));2181 2182 /**2183 * Register a Memory Mapped I/O (MMIO) region for RC.2184 *2185 * These callbacks are for the raw-mode context (RC). Register ring-3 context2186 * (R3) handlers before guest context handlers! There must be a R3 handler for2187 * every RC handler!2188 *2189 * @returns VBox status.2190 * @param pDevIns The device instance to register the MMIO with.2191 * @param GCPhysStart First physical address in the range.2192 * @param cbRange The size of the range (in bytes).2193 * @param pvUser User argument.2194 * @param pszWrite Name of the RC function which is gonna handle Write operations.2195 * @param pszRead Name of the RC function which is gonna handle Read operations.2196 * @param pszFill Name of the RC function which is gonna handle Fill/memset operations. (optional)2197 * @remarks Caller enters the device critical section prior to invoking the2198 * registered callback methods.2199 * @deprecated2200 */2201 DECLR3CALLBACKMEMBER(int, pfnMMIORegisterRC,(PPDMDEVINS pDevIns, RTGCPHYS GCPhysStart, RTGCPHYS cbRange, RTRCPTR pvUser,2202 const char *pszWrite, const char *pszRead, const char *pszFill));2203 2204 /**2205 * Register a Memory Mapped I/O (MMIO) region for R0.2206 *2207 * These callbacks are for the ring-0 host context (R0). Register ring-32208 * constext (R3) handlers before R0 handlers! There must be a R3 handler for2209 * every R0 handler!2210 *2211 * @returns VBox status.2212 * @param pDevIns The device instance to register the MMIO with.2213 * @param GCPhysStart First physical address in the range.2214 * @param cbRange The size of the range (in bytes).2215 * @param pvUser User argument. (if pointer, then it must be in locked memory!)2216 * @param pszWrite Name of the RC function which is gonna handle Write operations.2217 * @param pszRead Name of the RC function which is gonna handle Read operations.2218 * @param pszFill Name of the RC function which is gonna handle Fill/memset operations. (optional)2219 * @remarks Caller enters the device critical section prior to invoking the2220 * registered callback methods.2221 * @deprecated2222 */2223 DECLR3CALLBACKMEMBER(int, pfnMMIORegisterR0,(PPDMDEVINS pDevIns, RTGCPHYS GCPhysStart, RTGCPHYS cbRange, RTR0PTR pvUser,2224 const char *pszWrite, const char *pszRead, const char *pszFill));2225 2226 /**2227 * Deregister a Memory Mapped I/O (MMIO) region.2228 *2229 * This naturally affects both guest context (GC), ring-0 (R0) and ring-3 (R3/HC) handlers.2230 *2231 * @returns VBox status.2232 * @param pDevIns The device instance owning the MMIO region(s).2233 * @param GCPhysStart First physical address in the range.2234 * @param cbRange The size of the range (in bytes).2235 * @deprecated2236 */2237 DECLR3CALLBACKMEMBER(int, pfnMMIODeregister,(PPDMDEVINS pDevIns, RTGCPHYS GCPhysStart, RTGCPHYS cbRange));2238 2156 2239 2157 /** @name MMIO2 … … 5733 5651 5734 5652 /** 5735 * Register a Memory Mapped I/O (MMIO) region.5736 *5737 * These callbacks are of course for the ring-3 context (R3). Register HC5738 * handlers before raw-mode context (RC) and ring-0 context (R0) handlers! There5739 * must be a R3 handler for every RC and R0 handler!5740 *5741 * @returns VBox status.5742 * @param pDevIns The device instance to register the MMIO with.5743 * @param GCPhysStart First physical address in the range.5744 * @param cbRange The size of the range (in bytes).5745 * @param pvUser User argument.5746 * @param fFlags Flags, IOMMMIO_FLAGS_XXX.5747 * @param pfnWrite Pointer to function which is gonna handle Write operations.5748 * @param pfnRead Pointer to function which is gonna handle Read operations.5749 * @param pszDesc Pointer to description string. This must not be freed.5750 */5751 DECLINLINE(int) PDMDevHlpMMIORegister(PPDMDEVINS pDevIns, RTGCPHYS GCPhysStart, RTGCPHYS cbRange, RTHCPTR pvUser,5752 uint32_t fFlags, PFNIOMMMIOWRITE pfnWrite, PFNIOMMMIOREAD pfnRead, const char *pszDesc)5753 {5754 return pDevIns->pHlpR3->pfnMMIORegister(pDevIns, GCPhysStart, cbRange, pvUser, pfnWrite, pfnRead, NULL /*pfnFill*/,5755 fFlags, pszDesc);5756 }5757 5758 /**5759 * Register a Memory Mapped I/O (MMIO) region for RC.5760 *5761 * These callbacks are for the raw-mode context (RC). Register ring-3 context5762 * (R3) handlers before guest context handlers! There must be a R3 handler for5763 * every RC handler!5764 *5765 * @returns VBox status.5766 * @param pDevIns The device instance to register the MMIO with.5767 * @param GCPhysStart First physical address in the range.5768 * @param cbRange The size of the range (in bytes).5769 * @param pvUser User argument.5770 * @param pszWrite Name of the RC function which is gonna handle Write operations.5771 * @param pszRead Name of the RC function which is gonna handle Read operations.5772 */5773 DECLINLINE(int) PDMDevHlpMMIORegisterRC(PPDMDEVINS pDevIns, RTGCPHYS GCPhysStart, RTGCPHYS cbRange, RTRCPTR pvUser,5774 const char *pszWrite, const char *pszRead)5775 {5776 return pDevIns->pHlpR3->pfnMMIORegisterRC(pDevIns, GCPhysStart, cbRange, pvUser, pszWrite, pszRead, NULL /*pszFill*/);5777 }5778 5779 /**5780 * Register a Memory Mapped I/O (MMIO) region for R0.5781 *5782 * These callbacks are for the ring-0 host context (R0). Register ring-35783 * constext (R3) handlers before R0 handlers! There must be a R3 handler for5784 * every R0 handler!5785 *5786 * @returns VBox status.5787 * @param pDevIns The device instance to register the MMIO with.5788 * @param GCPhysStart First physical address in the range.5789 * @param cbRange The size of the range (in bytes).5790 * @param pvUser User argument. (if pointer, then it must be in locked memory!)5791 * @param pszWrite Name of the RC function which is gonna handle Write operations.5792 * @param pszRead Name of the RC function which is gonna handle Read operations.5793 * @remarks Caller enters the device critical section prior to invoking the5794 * registered callback methods.5795 */5796 DECLINLINE(int) PDMDevHlpMMIORegisterR0(PPDMDEVINS pDevIns, RTGCPHYS GCPhysStart, RTGCPHYS cbRange, RTR0PTR pvUser,5797 const char *pszWrite, const char *pszRead)5798 {5799 return pDevIns->pHlpR3->pfnMMIORegisterR0(pDevIns, GCPhysStart, cbRange, pvUser, pszWrite, pszRead, NULL /*pszFill*/);5800 }5801 5802 /**5803 * @copydoc PDMDEVHLPR3::pfnMMIORegister5804 */5805 DECLINLINE(int) PDMDevHlpMMIORegisterEx(PPDMDEVINS pDevIns, RTGCPHYS GCPhysStart, RTGCPHYS cbRange, RTHCPTR pvUser,5806 uint32_t fFlags, PFNIOMMMIOWRITE pfnWrite, PFNIOMMMIOREAD pfnRead,5807 PFNIOMMMIOFILL pfnFill, const char *pszDesc)5808 {5809 return pDevIns->pHlpR3->pfnMMIORegister(pDevIns, GCPhysStart, cbRange, pvUser, pfnWrite, pfnRead, pfnFill,5810 fFlags, pszDesc);5811 }5812 5813 /**5814 * @copydoc PDMDEVHLPR3::pfnMMIORegisterRC5815 */5816 DECLINLINE(int) PDMDevHlpMMIORegisterRCEx(PPDMDEVINS pDevIns, RTGCPHYS GCPhysStart, RTGCPHYS cbRange, RTRCPTR pvUser,5817 const char *pszWrite, const char *pszRead, const char *pszFill)5818 {5819 return pDevIns->pHlpR3->pfnMMIORegisterRC(pDevIns, GCPhysStart, cbRange, pvUser, pszWrite, pszRead, pszFill);5820 }5821 5822 /**5823 * @copydoc PDMDEVHLPR3::pfnMMIORegisterR05824 */5825 DECLINLINE(int) PDMDevHlpMMIORegisterR0Ex(PPDMDEVINS pDevIns, RTGCPHYS GCPhysStart, RTGCPHYS cbRange, RTR0PTR pvUser,5826 const char *pszWrite, const char *pszRead, const char *pszFill)5827 {5828 return pDevIns->pHlpR3->pfnMMIORegisterR0(pDevIns, GCPhysStart, cbRange, pvUser, pszWrite, pszRead, pszFill);5829 }5830 5831 /**5832 * @copydoc PDMDEVHLPR3::pfnMMIODeregister5833 */5834 DECLINLINE(int) PDMDevHlpMMIODeregister(PPDMDEVINS pDevIns, RTGCPHYS GCPhysStart, RTGCPHYS cbRange)5835 {5836 return pDevIns->pHlpR3->pfnMMIODeregister(pDevIns, GCPhysStart, cbRange);5837 }5838 5839 /**5840 5653 * @copydoc PDMDEVHLPR3::pfnMmio2Create 5841 5654 */ -
trunk/src/VBox/Devices/Bus/DevPciIch9.cpp
r82299 r82313 2512 2512 * 2513 2513 * @returns VBox status code. 2514 * @param pDevIns The PCI bus device instance.2515 2514 * @param pDev The PCI device. 2516 2515 * @param iRegion The region to unmap. 2517 2516 */ 2518 static int devpciR3UnmapRegion(PPDM DEVINS pDevIns, PPDMPCIDEV pDev, int iRegion)2517 static int devpciR3UnmapRegion(PPDMPCIDEV pDev, int iRegion) 2519 2518 { 2520 2519 PPCIIOREGION pRegion = &pDev->Int.s.aIORegions[iRegion]; … … 2524 2523 if (pRegion->addr != INVALID_PCI_ADDRESS) 2525 2524 { 2526 if ( (pRegion->hHandle != UINT64_MAX) 2527 || (pRegion->fFlags & PDMPCIDEV_IORGN_F_NEW_STYLE)) 2528 { 2529 /* 2530 * New style device with a IOM handle. Do callout first (optional), 2531 * then do the unmapping via handle. 2532 */ 2533 if (pRegion->pfnMap) 2534 { 2535 rc = pRegion->pfnMap(pDev->Int.s.pDevInsR3, pDev, iRegion, 2536 NIL_RTGCPHYS, pRegion->size, (PCIADDRESSSPACE)(pRegion->type)); 2525 /* 2526 * Do callout first (optional), then do the unmapping via handle if we've been handed one. 2527 */ 2528 if (pRegion->pfnMap) 2529 { 2530 rc = pRegion->pfnMap(pDev->Int.s.pDevInsR3, pDev, iRegion, 2531 NIL_RTGCPHYS, pRegion->size, (PCIADDRESSSPACE)(pRegion->type)); 2532 AssertRC(rc); 2533 } 2534 2535 switch (pRegion->fFlags & PDMPCIDEV_IORGN_F_HANDLE_MASK) 2536 { 2537 case PDMPCIDEV_IORGN_F_IOPORT_HANDLE: 2538 rc = PDMDevHlpIoPortUnmap(pDev->Int.s.pDevInsR3, (IOMIOPORTHANDLE)pRegion->hHandle); 2537 2539 AssertRC(rc); 2538 } 2539 2540 switch (pRegion->fFlags & PDMPCIDEV_IORGN_F_HANDLE_MASK) 2541 { 2542 case PDMPCIDEV_IORGN_F_IOPORT_HANDLE: 2543 rc = PDMDevHlpIoPortUnmap(pDev->Int.s.pDevInsR3, (IOMIOPORTHANDLE)pRegion->hHandle); 2544 AssertRC(rc); 2545 break; 2546 2547 case PDMPCIDEV_IORGN_F_MMIO_HANDLE: 2548 rc = PDMDevHlpMmioUnmap(pDev->Int.s.pDevInsR3, (IOMMMIOHANDLE)pRegion->hHandle); 2549 AssertRC(rc); 2550 break; 2551 2552 case PDMPCIDEV_IORGN_F_MMIO2_HANDLE: 2553 rc = PDMDevHlpMmio2Unmap(pDev->Int.s.pDevInsR3, (PGMMMIO2HANDLE)pRegion->hHandle); 2554 AssertRC(rc); 2555 break; 2556 2557 case PDMPCIDEV_IORGN_F_NO_HANDLE: 2558 Assert(pRegion->fFlags & PDMPCIDEV_IORGN_F_NEW_STYLE); 2559 Assert(pRegion->hHandle == UINT64_MAX); 2560 break; 2561 2562 default: 2563 AssertLogRelFailed(); 2564 } 2565 } 2566 else 2567 { 2568 /* 2569 * Old style device, no handle here and only MMIOEx gets callouts. 2570 */ 2571 if (pRegion->type & PCI_ADDRESS_SPACE_IO) 2572 AssertFailed(); 2573 else 2574 { 2575 RTGCPHYS GCPhysBase = pRegion->addr; 2576 #ifdef VBOX_STRICT 2577 PDEVPCIBUSCC pBusCC = PDMINS_2_DATA_CC(pDevIns, PDEVPCIBUSCC); 2578 Assert(!pBusCC->pPciHlpR3->pfnIsMMIOExBase(pDevIns, pDev->Int.s.pDevInsR3, GCPhysBase)); 2579 #else 2580 RT_NOREF(pDevIns); 2581 #endif 2582 rc = PDMDevHlpMMIODeregister(pDev->Int.s.pDevInsR3, GCPhysBase, pRegion->size); 2540 break; 2541 2542 case PDMPCIDEV_IORGN_F_MMIO_HANDLE: 2543 rc = PDMDevHlpMmioUnmap(pDev->Int.s.pDevInsR3, (IOMMMIOHANDLE)pRegion->hHandle); 2583 2544 AssertRC(rc); 2584 } 2545 break; 2546 2547 case PDMPCIDEV_IORGN_F_MMIO2_HANDLE: 2548 rc = PDMDevHlpMmio2Unmap(pDev->Int.s.pDevInsR3, (PGMMMIO2HANDLE)pRegion->hHandle); 2549 AssertRC(rc); 2550 break; 2551 2552 case PDMPCIDEV_IORGN_F_NO_HANDLE: 2553 Assert(pRegion->fFlags & PDMPCIDEV_IORGN_F_NEW_STYLE); 2554 Assert(pRegion->hHandle == UINT64_MAX); 2555 break; 2556 2557 default: 2558 AssertLogRelFailed(); 2585 2559 } 2586 2560 pRegion->addr = INVALID_PCI_ADDRESS; … … 2594 2568 * 2595 2569 * @returns VINF_SUCCESS of DBGFSTOP result. 2596 * @param pDevIns The PCI bus device instance.2597 2570 * @param pPciDev The PCI device to update the mappings for. 2598 2571 * @param fP2PBridge Whether this is a PCI to PCI bridge or not. 2599 2572 */ 2600 static VBOXSTRICTRC devpciR3UpdateMappings(PPDM DEVINS pDevIns, PPDMPCIDEV pPciDev, bool fP2PBridge)2573 static VBOXSTRICTRC devpciR3UpdateMappings(PPDMPCIDEV pPciDev, bool fP2PBridge) 2601 2574 { 2602 2575 /* safe, only needs to go to the config space array */ … … 2692 2665 /* 2693 2666 * Do real unmapping and/or mapping if the address change. 2694 *2695 * For new style device we'll do the actual mapping, whereas old ones2696 * are expected to do it themselves via the callback.2697 2667 */ 2698 2668 Log4(("devpciR3UpdateMappings: dev %u/%u (%s): iRegion=%u addr=%#RX64 uNew=%#RX64\n", … … 2705 2675 pPciDev->pszNameR3, iRegion, pRegion->addr, uNew, cbRegion, cbRegion)); 2706 2676 2707 int rc = devpciR3UnmapRegion(p DevIns, pPciDev, iRegion);2677 int rc = devpciR3UnmapRegion(pPciDev, iRegion); 2708 2678 AssertLogRelRC(rc); 2709 2679 pRegion->addr = uNew; 2710 2680 if (uNew != INVALID_PCI_ADDRESS) 2711 2681 { 2712 /* The callout is optional with new style devices: */2682 /* The callout is optional (typically not used): */ 2713 2683 if (!pRegion->pfnMap) 2714 2684 rc = VINF_SUCCESS; … … 2720 2690 } 2721 2691 2722 /* We do the mapping for new-styledevices: */2692 /* We do the mapping for most devices: */ 2723 2693 if (pRegion->hHandle != UINT64_MAX && rc != VINF_PCI_MAPPING_DONE) 2724 2694 { … … 3009 2979 */ 3010 2980 if (fUpdateMappings) 3011 rcStrict = devpciR3UpdateMappings(p DevIns, pPciDev, fP2PBridge);2981 rcStrict = devpciR3UpdateMappings(pPciDev, fP2PBridge); 3012 2982 } 3013 2983 } … … 3491 3461 == PCI_ADDRESS_SPACE_BAR64; 3492 3462 3493 devpciR3UnmapRegion(pDev Ins, pDev, iRegion);3463 devpciR3UnmapRegion(pDev, iRegion); 3494 3464 3495 3465 if (f64Bit) -
trunk/src/VBox/VMM/Makefile.kmk
r81406 r82313 181 181 VMMAll/IEMAllAImplC.cpp \ 182 182 VMMAll/IOMAll.cpp \ 183 VMMAll/IOMAllMMIO.cpp \184 183 VMMAll/IOMAllMmioNew.cpp \ 185 184 VMMAll/MMAll.cpp \ … … 512 511 VMMAll/IEMAllAImplC.cpp \ 513 512 VMMAll/IOMAll.cpp \ 514 VMMAll/IOMAllMMIO.cpp \515 513 VMMAll/IOMAllMmioNew.cpp \ 516 514 VMMAll/MMAll.cpp \ -
trunk/src/VBox/VMM/VMMAll/IOMAll.cpp
r82311 r82313 576 576 } 577 577 578 579 /**580 * Fress an MMIO range after the reference counter has become zero.581 *582 * @param pVM The cross context VM structure.583 * @param pRange The range to free.584 */585 void iomMmioFreeRange(PVMCC pVM, PIOMMMIORANGE pRange)586 {587 MMHyperFree(pVM, pRange);588 }589 -
trunk/src/VBox/VMM/VMMAll/IOMAllMmioNew.cpp
r82094 r82313 779 779 RTGCPHYS offRegion; 780 780 CTX_SUFF(PIOMMMIOENTRY) pRegEntry = iomMmioGetEntry(pVM, GCPhysFault, &offRegion, &pVCpu->iom.s.idxMmioLastPhysHandler); 781 IOM_UNLOCK_SHARED(pVM); 781 782 if (RT_LIKELY(pRegEntry)) 782 {783 IOM_UNLOCK_SHARED(pVM);784 783 rcStrict = iomMmioCommonPfHandlerNew(pVM, pVCpu, (uint32_t)uErrorCode, GCPhysFault, pRegEntry); 785 }786 784 else 787 { 788 /* 789 * Old style registrations. 790 */ 791 PIOMMMIORANGE pRange = iomMmioGetRange(pVM, pVCpu, GCPhysFault); 792 if (pRange) 793 { 794 iomMmioRetainRange(pRange); 795 IOM_UNLOCK_SHARED(pVM); 796 797 rcStrict = iomMmioCommonPfHandlerOld(pVM, pVCpu, (uint32_t)uErrorCode, 798 CPUMCTX2CORE(&pVCpu->cpum.GstCtx), GCPhysFault, pRange); 799 800 iomMmioReleaseRange(pVM, pRange); 801 } 802 else 803 { 804 IOM_UNLOCK_SHARED(pVM); 805 rcStrict = VERR_IOM_MMIO_RANGE_NOT_FOUND; 806 } 807 } 785 rcStrict = VERR_IOM_MMIO_RANGE_NOT_FOUND; 808 786 } 809 787 else if (rcStrict == VERR_SEM_BUSY) -
trunk/src/VBox/VMM/VMMR3/IOM.cpp
r82312 r82313 127 127 #include <VBox/err.h> 128 128 129 #include "IOMInline.h"130 131 132 /*********************************************************************************************************************************133 * Internal Functions *134 *********************************************************************************************************************************/135 static void iomR3FlushCache(PVM pVM);136 129 137 130 … … 164 157 165 158 /* 166 * Allocate the trees structure.167 */168 rc = MMHyperAlloc(pVM, sizeof(*pVM->iom.s.pTreesR3), 0, MM_TAG_IOM, (void **)&pVM->iom.s.pTreesR3);169 AssertRCReturn(rc, rc);170 pVM->iom.s.pTreesR0 = MMHyperR3ToR0(pVM, pVM->iom.s.pTreesR3);171 172 /*173 159 * Register the MMIO access handler type. 174 160 */ 175 rc = PGMR3HandlerPhysicalTypeRegister(pVM, PGMPHYSHANDLERKIND_MMIO,176 iomMmioHandler,177 NULL, "iomMmioHandler", "iomMmioPfHandler",178 NULL, "iomMmioHandler", "iomMmioPfHandler",179 "MMIO", &pVM->iom.s.hMmioHandlerType);180 AssertRCReturn(rc, rc);181 182 161 rc = PGMR3HandlerPhysicalTypeRegister(pVM, PGMPHYSHANDLERKIND_MMIO, 183 162 iomMmioHandlerNew, … … 215 194 STAM_REG(pVM, &pVM->iom.s.StatMmioCommitsDirect, STAMTYPE_COUNTER, "/IOM/MmioCommitsDirect", STAMUNIT_OCCURENCES, "Number of ring-3 MMIO commits direct to handler via handle hint."); 216 195 STAM_REG(pVM, &pVM->iom.s.StatMmioCommitsPgm, STAMTYPE_COUNTER, "/IOM/MmioCommitsPgm", STAMUNIT_OCCURENCES, "Number of ring-3 MMIO commits via PGM."); 217 218 /* Redundant, but just in case we change something in the future */219 iomR3FlushCache(pVM);220 196 221 197 LogFlow(("IOMR3Init: returns VINF_SUCCESS\n")); … … 272 248 273 249 /** 274 * Flushes the IOM port & statistics lookup cache250 * The VM is being reset. 275 251 * 276 252 * @param pVM The cross context VM structure. 277 253 */ 278 static void iomR3FlushCache(PVM pVM)279 {280 /*281 * Since all relevant (1) cache use requires at least read access to the282 * critical section, we can exclude all other EMTs by grabbing exclusive283 * access to the critical section and then safely update the caches of284 * other EMTs.285 * (1) The irrelvant access not holding the lock is in assertion code.286 */287 IOM_LOCK_EXCL(pVM);288 VMCPUID idCpu = pVM->cCpus;289 while (idCpu-- > 0)290 {291 PVMCPU pVCpu = pVM->apCpusR3[idCpu];292 pVCpu->iom.s.pMMIORangeLastR0 = NIL_RTR0PTR;293 pVCpu->iom.s.pMMIOStatsLastR0 = NIL_RTR0PTR;294 295 pVCpu->iom.s.pMMIORangeLastR3 = NULL;296 pVCpu->iom.s.pMMIOStatsLastR3 = NULL;297 }298 299 IOM_UNLOCK_EXCL(pVM);300 }301 302 303 /**304 * The VM is being reset.305 *306 * @param pVM The cross context VM structure.307 */308 254 VMMR3_INT_DECL(void) IOMR3Reset(PVM pVM) 309 255 { 310 iomR3FlushCache(pVM);256 RT_NOREF(pVM); 311 257 } 312 258 … … 344 290 NOREF(pVM); 345 291 return VINF_SUCCESS; 346 }347 348 349 #ifdef VBOX_WITH_STATISTICS350 351 /**352 * Create the statistics node for an MMIO address.353 *354 * @returns Pointer to new stats node.355 *356 * @param pVM The cross context VM structure.357 * @param GCPhys The address.358 * @param pszDesc Description.359 */360 PIOMMMIOSTATS iomR3MMIOStatsCreate(PVM pVM, RTGCPHYS GCPhys, const char *pszDesc)361 {362 IOM_LOCK_EXCL(pVM);363 364 /* check if it already exists. */365 PIOMMMIOSTATS pStats = (PIOMMMIOSTATS)RTAvloGCPhysGet(&pVM->iom.s.pTreesR3->MmioStatTree, GCPhys);366 if (pStats)367 {368 IOM_UNLOCK_EXCL(pVM);369 return pStats;370 }371 372 /* allocate stats node. */373 int rc = MMHyperAlloc(pVM, sizeof(*pStats), 0, MM_TAG_IOM_STATS, (void **)&pStats);374 AssertRC(rc);375 if (RT_SUCCESS(rc))376 {377 /* insert into the tree. */378 pStats->Core.Key = GCPhys;379 if (RTAvloGCPhysInsert(&pVM->iom.s.pTreesR3->MmioStatTree, &pStats->Core))380 {381 IOM_UNLOCK_EXCL(pVM);382 383 rc = STAMR3RegisterF(pVM, &pStats->Accesses, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, pszDesc, "/IOM/MMIO/%RGp", GCPhys); AssertRC(rc);384 rc = STAMR3RegisterF(pVM, &pStats->ProfReadR3, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, pszDesc, "/IOM/MMIO/%RGp/Read-R3", GCPhys); AssertRC(rc);385 rc = STAMR3RegisterF(pVM, &pStats->ProfWriteR3, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, pszDesc, "/IOM/MMIO/%RGp/Write-R3", GCPhys); AssertRC(rc);386 rc = STAMR3RegisterF(pVM, &pStats->ProfReadRZ, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, pszDesc, "/IOM/MMIO/%RGp/Read-RZ", GCPhys); AssertRC(rc);387 rc = STAMR3RegisterF(pVM, &pStats->ProfWriteRZ, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, pszDesc, "/IOM/MMIO/%RGp/Write-RZ", GCPhys); AssertRC(rc);388 rc = STAMR3RegisterF(pVM, &pStats->ReadRZToR3, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, pszDesc, "/IOM/MMIO/%RGp/Read-RZtoR3", GCPhys); AssertRC(rc);389 rc = STAMR3RegisterF(pVM, &pStats->WriteRZToR3, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, pszDesc, "/IOM/MMIO/%RGp/Write-RZtoR3", GCPhys); AssertRC(rc);390 391 return pStats;392 }393 AssertMsgFailed(("what! GCPhys=%RGp\n", GCPhys));394 MMHyperFree(pVM, pStats);395 }396 IOM_UNLOCK_EXCL(pVM);397 return NULL;398 }399 400 #endif /* VBOX_WITH_STATISTICS */401 402 /**403 * Registers a Memory Mapped I/O R3 handler.404 *405 * This API is called by PDM on behalf of a device. Devices must register ring-3 ranges406 * before any GC and R0 ranges can be registered using IOMR3MMIORegisterRC() and IOMR3MMIORegisterR0().407 *408 * @returns VBox status code.409 *410 * @param pVM The cross context VM structure.411 * @param pDevIns PDM device instance owning the MMIO range.412 * @param GCPhysStart First physical address in the range.413 * @param cbRange The size of the range (in bytes).414 * @param pvUser User argument for the callbacks.415 * @param pfnWriteCallback Pointer to function which is gonna handle Write operations.416 * @param pfnReadCallback Pointer to function which is gonna handle Read operations.417 * @param pfnFillCallback Pointer to function which is gonna handle Fill/memset operations.418 * @param fFlags Flags, see IOMMMIO_FLAGS_XXX.419 * @param pszDesc Pointer to description string. This must not be freed.420 */421 VMMR3_INT_DECL(int)422 IOMR3MmioRegisterR3(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GCPhysStart, RTGCPHYS cbRange, RTHCPTR pvUser,423 R3PTRTYPE(PFNIOMMMIOWRITE) pfnWriteCallback, R3PTRTYPE(PFNIOMMMIOREAD) pfnReadCallback,424 R3PTRTYPE(PFNIOMMMIOFILL) pfnFillCallback, uint32_t fFlags, const char *pszDesc)425 {426 LogFlow(("IOMR3MmioRegisterR3: pDevIns=%p GCPhysStart=%RGp cbRange=%RGp pvUser=%RHv pfnWriteCallback=%#x pfnReadCallback=%#x pfnFillCallback=%#x fFlags=%#x pszDesc=%s\n",427 pDevIns, GCPhysStart, cbRange, pvUser, pfnWriteCallback, pfnReadCallback, pfnFillCallback, fFlags, pszDesc));428 int rc;429 430 /*431 * Validate input.432 */433 AssertMsgReturn(GCPhysStart + (cbRange - 1) >= GCPhysStart,("Wrapped! %RGp LB %RGp\n", GCPhysStart, cbRange),434 VERR_IOM_INVALID_MMIO_RANGE);435 AssertMsgReturn( !(fFlags & ~(IOMMMIO_FLAGS_VALID_MASK & ~IOMMMIO_FLAGS_ABS))436 && (fFlags & IOMMMIO_FLAGS_READ_MODE) <= IOMMMIO_FLAGS_READ_DWORD_QWORD437 && (fFlags & IOMMMIO_FLAGS_WRITE_MODE) <= IOMMMIO_FLAGS_WRITE_ONLY_DWORD_QWORD,438 ("%#x\n", fFlags),439 VERR_INVALID_PARAMETER);440 441 /*442 * Allocate new range record and initialize it.443 */444 PIOMMMIORANGE pRange;445 rc = MMHyperAlloc(pVM, sizeof(*pRange), 0, MM_TAG_IOM, (void **)&pRange);446 if (RT_SUCCESS(rc))447 {448 pRange->Core.Key = GCPhysStart;449 pRange->Core.KeyLast = GCPhysStart + (cbRange - 1);450 pRange->GCPhys = GCPhysStart;451 pRange->cb = cbRange;452 pRange->cRefs = 1; /* The tree reference. */453 pRange->pszDesc = pszDesc;454 455 //pRange->pvUserR0 = NIL_RTR0PTR;456 //pRange->pDevInsR0 = NIL_RTR0PTR;457 //pRange->pfnReadCallbackR0 = NIL_RTR0PTR;458 //pRange->pfnWriteCallbackR0 = NIL_RTR0PTR;459 //pRange->pfnFillCallbackR0 = NIL_RTR0PTR;460 461 //pRange->pvUserRC = NIL_RTRCPTR;462 //pRange->pDevInsRC = NIL_RTRCPTR;463 //pRange->pfnReadCallbackRC = NIL_RTRCPTR;464 //pRange->pfnWriteCallbackRC = NIL_RTRCPTR;465 //pRange->pfnFillCallbackRC = NIL_RTRCPTR;466 467 pRange->fFlags = fFlags;468 469 pRange->pvUserR3 = pvUser;470 pRange->pDevInsR3 = pDevIns;471 pRange->pfnReadCallbackR3 = pfnReadCallback;472 pRange->pfnWriteCallbackR3 = pfnWriteCallback;473 pRange->pfnFillCallbackR3 = pfnFillCallback;474 475 /*476 * Try register it with PGM and then insert it into the tree.477 */478 rc = PGMR3PhysMMIORegister(pVM, GCPhysStart, cbRange, pVM->iom.s.hMmioHandlerType,479 pRange, MMHyperR3ToR0(pVM, pRange), MMHyperR3ToRC(pVM, pRange), pszDesc);480 if (RT_SUCCESS(rc))481 {482 IOM_LOCK_EXCL(pVM);483 if (RTAvlroGCPhysInsert(&pVM->iom.s.pTreesR3->MMIOTree, &pRange->Core))484 {485 iomR3FlushCache(pVM);486 IOM_UNLOCK_EXCL(pVM);487 return VINF_SUCCESS;488 }489 490 /* bail out */491 IOM_UNLOCK_EXCL(pVM);492 DBGFR3Info(pVM->pUVM, "mmio", NULL, NULL);493 AssertMsgFailed(("This cannot happen!\n"));494 rc = VERR_IOM_IOPORT_IPE_3;495 }496 497 MMHyperFree(pVM, pRange);498 }499 if (pDevIns->iInstance > 0)500 MMR3HeapFree((void *)pszDesc);501 return rc;502 }503 504 505 #if 0506 /**507 * Registers a Memory Mapped I/O RC handler range.508 *509 * This API is called by PDM on behalf of a device. Devices must first register ring-3 ranges510 * using IOMMMIORegisterR3() before calling this function.511 *512 *513 * @returns VBox status code.514 *515 * @param pVM The cross context VM structure.516 * @param pDevIns PDM device instance owning the MMIO range.517 * @param GCPhysStart First physical address in the range.518 * @param cbRange The size of the range (in bytes).519 * @param pvUser User argument for the callbacks.520 * @param pfnWriteCallback Pointer to function which is gonna handle Write operations.521 * @param pfnReadCallback Pointer to function which is gonna handle Read operations.522 * @param pfnFillCallback Pointer to function which is gonna handle Fill/memset operations.523 * @thread EMT524 */525 VMMR3_INT_DECL(int)526 IOMR3MmioRegisterRC(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GCPhysStart, RTGCPHYS cbRange, RTGCPTR pvUser,527 RCPTRTYPE(PFNIOMMMIOWRITE) pfnWriteCallback, RCPTRTYPE(PFNIOMMMIOREAD) pfnReadCallback,528 RCPTRTYPE(PFNIOMMMIOFILL) pfnFillCallback)529 {530 LogFlow(("IOMR3MmioRegisterRC: pDevIns=%p GCPhysStart=%RGp cbRange=%RGp pvUser=%RGv pfnWriteCallback=%#x pfnReadCallback=%#x pfnFillCallback=%#x\n",531 pDevIns, GCPhysStart, cbRange, pvUser, pfnWriteCallback, pfnReadCallback, pfnFillCallback));532 AssertReturn(VM_IS_RAW_MODE_ENABLED(pVM), VERR_IOM_HM_IPE);533 534 /*535 * Validate input.536 */537 if (!pfnWriteCallback && !pfnReadCallback)538 {539 AssertMsgFailed(("No callbacks! %RGp LB %RGp\n", GCPhysStart, cbRange));540 return VERR_INVALID_PARAMETER;541 }542 PVMCPU pVCpu = VMMGetCpu(pVM); Assert(pVCpu);543 544 /*545 * Find the MMIO range and check that the input matches.546 */547 IOM_LOCK_EXCL(pVM);548 PIOMMMIORANGE pRange = iomMmioGetRange(pVM, pVCpu, GCPhysStart);549 AssertReturnStmt(pRange, IOM_UNLOCK_EXCL(pVM), VERR_IOM_MMIO_RANGE_NOT_FOUND);550 AssertReturnStmt(pRange->pDevInsR3 == pDevIns, IOM_UNLOCK_EXCL(pVM), VERR_IOM_NOT_MMIO_RANGE_OWNER);551 AssertReturnStmt(pRange->GCPhys == GCPhysStart, IOM_UNLOCK_EXCL(pVM), VERR_IOM_INVALID_MMIO_RANGE);552 AssertReturnStmt(pRange->cb == cbRange, IOM_UNLOCK_EXCL(pVM), VERR_IOM_INVALID_MMIO_RANGE);553 554 pRange->pvUserRC = pvUser;555 pRange->pfnReadCallbackRC = pfnReadCallback;556 pRange->pfnWriteCallbackRC= pfnWriteCallback;557 pRange->pfnFillCallbackRC = pfnFillCallback;558 pRange->pDevInsRC = pDevIns->pDevInsForRC;559 IOM_UNLOCK_EXCL(pVM);560 561 return VINF_SUCCESS;562 }563 #endif564 565 566 /**567 * Registers a Memory Mapped I/O R0 handler range.568 *569 * This API is called by PDM on behalf of a device. Devices must first register ring-3 ranges570 * using IOMMR3MIORegisterHC() before calling this function.571 *572 *573 * @returns VBox status code.574 *575 * @param pVM The cross context VM structure.576 * @param pDevIns PDM device instance owning the MMIO range.577 * @param GCPhysStart First physical address in the range.578 * @param cbRange The size of the range (in bytes).579 * @param pvUser User argument for the callbacks.580 * @param pfnWriteCallback Pointer to function which is gonna handle Write operations.581 * @param pfnReadCallback Pointer to function which is gonna handle Read operations.582 * @param pfnFillCallback Pointer to function which is gonna handle Fill/memset operations.583 * @thread EMT584 */585 VMMR3_INT_DECL(int)586 IOMR3MmioRegisterR0(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GCPhysStart, RTGCPHYS cbRange, RTR0PTR pvUser,587 R0PTRTYPE(PFNIOMMMIOWRITE) pfnWriteCallback,588 R0PTRTYPE(PFNIOMMMIOREAD) pfnReadCallback,589 R0PTRTYPE(PFNIOMMMIOFILL) pfnFillCallback)590 {591 LogFlow(("IOMR3MmioRegisterR0: pDevIns=%p GCPhysStart=%RGp cbRange=%RGp pvUser=%RHv pfnWriteCallback=%#x pfnReadCallback=%#x pfnFillCallback=%#x\n",592 pDevIns, GCPhysStart, cbRange, pvUser, pfnWriteCallback, pfnReadCallback, pfnFillCallback));593 594 /*595 * Validate input.596 */597 if (!pfnWriteCallback && !pfnReadCallback)598 {599 AssertMsgFailed(("No callbacks! %RGp LB %RGp\n", GCPhysStart, cbRange));600 return VERR_INVALID_PARAMETER;601 }602 PVMCPU pVCpu = VMMGetCpu(pVM); Assert(pVCpu);603 604 /*605 * Find the MMIO range and check that the input matches.606 */607 IOM_LOCK_EXCL(pVM);608 PIOMMMIORANGE pRange = iomMmioGetRange(pVM, pVCpu, GCPhysStart);609 AssertReturnStmt(pRange, IOM_UNLOCK_EXCL(pVM), VERR_IOM_MMIO_RANGE_NOT_FOUND);610 AssertReturnStmt(pRange->pDevInsR3 == pDevIns, IOM_UNLOCK_EXCL(pVM), VERR_IOM_NOT_MMIO_RANGE_OWNER);611 AssertReturnStmt(pRange->GCPhys == GCPhysStart, IOM_UNLOCK_EXCL(pVM), VERR_IOM_INVALID_MMIO_RANGE);612 AssertReturnStmt(pRange->cb == cbRange, IOM_UNLOCK_EXCL(pVM), VERR_IOM_INVALID_MMIO_RANGE);613 614 pRange->pvUserR0 = pvUser;615 pRange->pfnReadCallbackR0 = pfnReadCallback;616 pRange->pfnWriteCallbackR0= pfnWriteCallback;617 pRange->pfnFillCallbackR0 = pfnFillCallback;618 pRange->pDevInsR0 = pDevIns->pDevInsR0RemoveMe;619 IOM_UNLOCK_EXCL(pVM);620 621 return VINF_SUCCESS;622 }623 624 625 /**626 * Deregisters a Memory Mapped I/O handler range.627 *628 * Registered GC, R0, and R3 ranges are affected.629 *630 * @returns VBox status code.631 *632 * @param pVM The cross context VM structure.633 * @param pDevIns Device instance which the MMIO region is registered.634 * @param GCPhysStart First physical address (GC) in the range.635 * @param cbRange Number of bytes to deregister.636 *637 * @remark This function mainly for PCI PnP Config and will not do638 * all the checks you might expect it to do.639 */640 VMMR3_INT_DECL(int) IOMR3MmioDeregister(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GCPhysStart, RTGCPHYS cbRange)641 {642 LogFlow(("IOMR3MmioDeregister: pDevIns=%p GCPhysStart=%RGp cbRange=%RGp\n", pDevIns, GCPhysStart, cbRange));643 644 /*645 * Validate input.646 */647 RTGCPHYS GCPhysLast = GCPhysStart + (cbRange - 1);648 if (GCPhysLast < GCPhysStart)649 {650 AssertMsgFailed(("Wrapped! %#x LB %RGp\n", GCPhysStart, cbRange));651 return VERR_IOM_INVALID_MMIO_RANGE;652 }653 PVMCPU pVCpu = VMMGetCpu(pVM); Assert(pVCpu);654 655 IOM_LOCK_EXCL(pVM);656 657 /*658 * Check ownership and such for the entire area.659 */660 RTGCPHYS GCPhys = GCPhysStart;661 while (GCPhys <= GCPhysLast && GCPhys >= GCPhysStart)662 {663 PIOMMMIORANGE pRange = iomMmioGetRange(pVM, pVCpu, GCPhys);664 if (!pRange)665 {666 IOM_UNLOCK_EXCL(pVM);667 return VERR_IOM_MMIO_RANGE_NOT_FOUND;668 }669 AssertMsgReturnStmt(pRange->pDevInsR3 == pDevIns,670 ("Not owner! GCPhys=%RGp %RGp LB %RGp %s\n", GCPhys, GCPhysStart, cbRange, pRange->pszDesc),671 IOM_UNLOCK_EXCL(pVM),672 VERR_IOM_NOT_MMIO_RANGE_OWNER);673 AssertMsgReturnStmt(pRange->Core.KeyLast <= GCPhysLast,674 ("Incomplete R3 range! GCPhys=%RGp %RGp LB %RGp %s\n", GCPhys, GCPhysStart, cbRange, pRange->pszDesc),675 IOM_UNLOCK_EXCL(pVM),676 VERR_IOM_INCOMPLETE_MMIO_RANGE);677 678 /* next */679 Assert(GCPhys <= pRange->Core.KeyLast);680 GCPhys = pRange->Core.KeyLast + 1;681 }682 683 /*684 * Do the actual removing of the MMIO ranges.685 */686 GCPhys = GCPhysStart;687 while (GCPhys <= GCPhysLast && GCPhys >= GCPhysStart)688 {689 iomR3FlushCache(pVM);690 691 PIOMMMIORANGE pRange = (PIOMMMIORANGE)RTAvlroGCPhysRemove(&pVM->iom.s.pTreesR3->MMIOTree, GCPhys);692 Assert(pRange);693 Assert(pRange->Core.Key == GCPhys && pRange->Core.KeyLast <= GCPhysLast);694 IOM_UNLOCK_EXCL(pVM); /* Lock order fun. */695 696 /* remove it from PGM */697 int rc = PGMR3PhysMMIODeregister(pVM, GCPhys, pRange->cb);698 AssertRC(rc);699 700 IOM_LOCK_EXCL(pVM);701 702 /* advance and free. */703 GCPhys = pRange->Core.KeyLast + 1;704 if (pDevIns->iInstance > 0)705 {706 void *pvDesc = ASMAtomicXchgPtr((void * volatile *)&pRange->pszDesc, NULL);707 MMR3HeapFree(pvDesc);708 }709 iomMmioReleaseRange(pVM, pRange);710 }711 712 IOM_UNLOCK_EXCL(pVM);713 return VINF_SUCCESS;714 }715 716 717 /**718 * Notfication from PGM that the pre-registered MMIO region has been mapped into719 * user address space.720 *721 * @returns VBox status code.722 * @param pVM Pointer to the cross context VM structure.723 * @param pvUser The pvUserR3 argument of PGMR3PhysMMIOExPreRegister.724 * @param GCPhys The mapping address.725 * @remarks Called while owning the PGM lock.726 */727 VMMR3_INT_DECL(int) IOMR3MmioExNotifyMapped(PVM pVM, void *pvUser, RTGCPHYS GCPhys)728 {729 PIOMMMIORANGE pRange = (PIOMMMIORANGE)pvUser;730 AssertReturn(pRange->GCPhys == NIL_RTGCPHYS, VERR_IOM_MMIO_IPE_1);731 732 IOM_LOCK_EXCL(pVM);733 Assert(pRange->GCPhys == NIL_RTGCPHYS);734 pRange->GCPhys = GCPhys;735 pRange->Core.Key = GCPhys;736 pRange->Core.KeyLast = GCPhys + pRange->cb - 1;737 if (RTAvlroGCPhysInsert(&pVM->iom.s.pTreesR3->MMIOTree, &pRange->Core))738 {739 iomR3FlushCache(pVM);740 IOM_UNLOCK_EXCL(pVM);741 return VINF_SUCCESS;742 }743 IOM_UNLOCK_EXCL(pVM);744 745 AssertLogRelMsgFailed(("RTAvlroGCPhysInsert failed on %RGp..%RGp - %s\n", pRange->Core.Key, pRange->Core.KeyLast, pRange->pszDesc));746 pRange->GCPhys = NIL_RTGCPHYS;747 pRange->Core.Key = NIL_RTGCPHYS;748 pRange->Core.KeyLast = NIL_RTGCPHYS;749 return VERR_IOM_MMIO_IPE_2;750 }751 752 753 /**754 * Notfication from PGM that the pre-registered MMIO region has been unmapped755 * from user address space.756 *757 * @param pVM Pointer to the cross context VM structure.758 * @param pvUser The pvUserR3 argument of PGMR3PhysMMIOExPreRegister.759 * @param GCPhys The mapping address.760 * @remarks Called while owning the PGM lock.761 */762 VMMR3_INT_DECL(void) IOMR3MmioExNotifyUnmapped(PVM pVM, void *pvUser, RTGCPHYS GCPhys)763 {764 PIOMMMIORANGE pRange = (PIOMMMIORANGE)pvUser;765 AssertLogRelReturnVoid(pRange->GCPhys == GCPhys);766 767 IOM_LOCK_EXCL(pVM);768 Assert(pRange->GCPhys == GCPhys);769 PIOMMMIORANGE pRemoved = (PIOMMMIORANGE)RTAvlroGCPhysRemove(&pVM->iom.s.pTreesR3->MMIOTree, GCPhys);770 if (pRemoved == pRange)771 {772 pRange->GCPhys = NIL_RTGCPHYS;773 pRange->Core.Key = NIL_RTGCPHYS;774 pRange->Core.KeyLast = NIL_RTGCPHYS;775 iomR3FlushCache(pVM);776 IOM_UNLOCK_EXCL(pVM);777 }778 else779 {780 if (pRemoved)781 RTAvlroGCPhysInsert(&pVM->iom.s.pTreesR3->MMIOTree, &pRemoved->Core);782 IOM_UNLOCK_EXCL(pVM);783 AssertLogRelMsgFailed(("RTAvlroGCPhysRemove returned %p instead of %p for %RGp (%s)\n",784 pRemoved, pRange, GCPhys, pRange->pszDesc));785 }786 }787 788 789 /**790 * Notfication from PGM that the pre-registered MMIO region has been mapped into791 * user address space.792 *793 * @param pVM Pointer to the cross context VM structure.794 * @param pvUser The pvUserR3 argument of PGMR3PhysMMIOExPreRegister.795 * @remarks Called while owning the PGM lock.796 */797 VMMR3_INT_DECL(void) IOMR3MmioExNotifyDeregistered(PVM pVM, void *pvUser)798 {799 PIOMMMIORANGE pRange = (PIOMMMIORANGE)pvUser;800 AssertLogRelReturnVoid(pRange->GCPhys == NIL_RTGCPHYS);801 iomMmioReleaseRange(pVM, pRange);802 292 } 803 293 -
trunk/src/VBox/VMM/VMMR3/IOMR3Mmio.cpp
r81798 r82313 514 514 515 515 /** 516 * Display a single MMIO range.517 *518 * @returns 0519 * @param pNode Pointer to MMIO R3 range.520 * @param pvUser Pointer to info output callback structure.521 */522 static DECLCALLBACK(int) iomR3MmioInfoOne(PAVLROGCPHYSNODECORE pNode, void *pvUser)523 {524 PIOMMMIORANGE pRange = (PIOMMMIORANGE)pNode;525 PCDBGFINFOHLP pHlp = (PCDBGFINFOHLP)pvUser;526 pHlp->pfnPrintf(pHlp,527 "%RGp-%RGp %RHv %RHv %RHv %RHv %RHv %s\n",528 pRange->Core.Key,529 pRange->Core.KeyLast,530 pRange->pDevInsR3,531 pRange->pfnReadCallbackR3,532 pRange->pfnWriteCallbackR3,533 pRange->pfnFillCallbackR3,534 pRange->pvUserR3,535 pRange->pszDesc);536 pHlp->pfnPrintf(pHlp,537 "%*s %RHv %RHv %RHv %RHv %RHv\n",538 sizeof(RTGCPHYS) * 2 * 2 + 1, "R0",539 pRange->pDevInsR0,540 pRange->pfnReadCallbackR0,541 pRange->pfnWriteCallbackR0,542 pRange->pfnFillCallbackR0,543 pRange->pvUserR0);544 #if 0545 pHlp->pfnPrintf(pHlp,546 "%*s %RRv %RRv %RRv %RRv %RRv\n",547 sizeof(RTGCPHYS) * 2 * 2 + 1, "RC",548 pRange->pDevInsRC,549 pRange->pfnReadCallbackRC,550 pRange->pfnWriteCallbackRC,551 pRange->pfnFillCallbackRC,552 pRange->pvUserRC);553 #endif554 return 0;555 }556 557 558 /**559 516 * Display all registered MMIO ranges. 560 517 * … … 565 522 DECLCALLBACK(void) iomR3MmioInfo(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs) 566 523 { 524 RT_NOREF(pszArgs); 525 567 526 /* No locking needed here as registerations are only happening during VMSTATE_CREATING. */ 568 527 pHlp->pfnPrintf(pHlp, … … 591 550 sizeof(RTGCPHYS) * 2, "unmapped", paRegs[i].pszDesc); 592 551 } 593 594 /* Legacy registration: */ 595 NOREF(pszArgs); 596 pHlp->pfnPrintf(pHlp, 597 "MMIO ranges (pVM=%p)\n" 598 "%.*s %.*s %.*s %.*s %.*s %.*s %s\n", 599 pVM, 600 sizeof(RTGCPHYS) * 4 + 1, "GC Phys Range ", 601 sizeof(RTHCPTR) * 2, "pDevIns ", 602 sizeof(RTHCPTR) * 2, "Read ", 603 sizeof(RTHCPTR) * 2, "Write ", 604 sizeof(RTHCPTR) * 2, "Fill ", 605 sizeof(RTHCPTR) * 2, "pvUser ", 606 "Description"); 607 IOM_LOCK_SHARED(pVM); 608 RTAvlroGCPhysDoWithAll(&pVM->iom.s.pTreesR3->MMIOTree, true, iomR3MmioInfoOne, (void *)pHlp); 609 IOM_UNLOCK_SHARED(pVM); 610 } 611 552 } 553 -
trunk/src/VBox/VMM/VMMR3/PDMDevHlp.cpp
r82311 r82313 59 59 60 60 61 /**62 * Wrapper around PDMR3LdrGetSymbolR0Lazy.63 */64 DECLINLINE(int) pdmR3DevGetSymbolR0Lazy(PPDMDEVINS pDevIns, const char *pszSymbol, PRTR0PTR ppvValue)65 {66 return PDMR3LdrGetSymbolR0Lazy(pDevIns->Internal.s.pVMR3,67 pDevIns->Internal.s.pDevR3->pReg->pszR0Mod,68 pDevIns->Internal.s.pDevR3->pszR0SearchPath,69 pszSymbol, ppvValue);70 }71 72 73 61 /** @name R3 DevHlp 74 62 * @{ … … 225 213 LogFlow(("pdmR3DevHlp_MmioGetMappingAddress: caller='%s'/%d: returns %RGp\n", pDevIns->pReg->szName, pDevIns->iInstance, GCPhys)); 226 214 return GCPhys; 227 }228 229 230 /** @interface_method_impl{PDMDEVHLPR3,pfnMMIORegister} */231 static DECLCALLBACK(int) pdmR3DevHlp_MMIORegister(PPDMDEVINS pDevIns, RTGCPHYS GCPhysStart, RTGCPHYS cbRange, RTHCPTR pvUser,232 PFNIOMMMIOWRITE pfnWrite, PFNIOMMMIOREAD pfnRead, PFNIOMMMIOFILL pfnFill,233 uint32_t fFlags, const char *pszDesc)234 {235 PDMDEV_ASSERT_DEVINS(pDevIns);236 PVM pVM = pDevIns->Internal.s.pVMR3;237 VM_ASSERT_EMT(pVM);238 LogFlow(("pdmR3DevHlp_MMIORegister: caller='%s'/%d: GCPhysStart=%RGp cbRange=%RGp pvUser=%p pfnWrite=%p pfnRead=%p pfnFill=%p fFlags=%#x pszDesc=%p:{%s}\n",239 pDevIns->pReg->szName, pDevIns->iInstance, GCPhysStart, cbRange, pvUser, pfnWrite, pfnRead, pfnFill, pszDesc, fFlags, pszDesc));240 241 if (pDevIns->iInstance > 0)242 {243 char *pszDesc2 = MMR3HeapAPrintf(pVM, MM_TAG_PDM_DEVICE_DESC, "%s [%u]", pszDesc, pDevIns->iInstance);244 if (pszDesc2)245 pszDesc = pszDesc2;246 }247 248 int rc = IOMR3MmioRegisterR3(pVM, pDevIns, GCPhysStart, cbRange, pvUser,249 pfnWrite, pfnRead, pfnFill, fFlags, pszDesc);250 251 LogFlow(("pdmR3DevHlp_MMIORegister: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));252 return rc;253 }254 255 256 /** @interface_method_impl{PDMDEVHLPR3,pfnMMIORegisterRC} */257 static DECLCALLBACK(int) pdmR3DevHlp_MMIORegisterRC(PPDMDEVINS pDevIns, RTGCPHYS GCPhysStart, RTGCPHYS cbRange, RTRCPTR pvUser,258 const char *pszWrite, const char *pszRead, const char *pszFill)259 {260 PDMDEV_ASSERT_DEVINS(pDevIns);261 VM_ASSERT_EMT(pDevIns->Internal.s.pVMR3);262 Assert(pDevIns->pReg->pszR0Mod[0]);263 Assert(pDevIns->pReg->fFlags & PDM_DEVREG_FLAGS_R0);264 LogFlow(("pdmR3DevHlp_MMIORegisterRC: caller='%s'/%d: GCPhysStart=%RGp cbRange=%RGp pvUser=%p pszWrite=%p:{%s} pszRead=%p:{%s} pszFill=%p:{%s}\n",265 pDevIns->pReg->szName, pDevIns->iInstance, GCPhysStart, cbRange, pvUser, pszWrite, pszWrite, pszRead, pszRead, pszFill, pszFill));266 267 #if 0268 /*269 * Resolve the functions.270 * Not all function have to present, leave it to IOM to enforce this.271 */272 int rc = VINF_SUCCESS;273 if ( pDevIns->pReg->pszRCMod[0]274 && (pDevIns->pReg->fFlags & PDM_DEVREG_FLAGS_RC)275 && VM_IS_RAW_MODE_ENABLED(pDevIns->Internal.s.pVMR3))276 {277 RTRCPTR RCPtrWrite = NIL_RTRCPTR;278 if (pszWrite)279 rc = pdmR3DevGetSymbolRCLazy(pDevIns, pszWrite, &RCPtrWrite);280 281 RTRCPTR RCPtrRead = NIL_RTRCPTR;282 int rc2 = VINF_SUCCESS;283 if (pszRead)284 rc2 = pdmR3DevGetSymbolRCLazy(pDevIns, pszRead, &RCPtrRead);285 286 RTRCPTR RCPtrFill = NIL_RTRCPTR;287 int rc3 = VINF_SUCCESS;288 if (pszFill)289 rc3 = pdmR3DevGetSymbolRCLazy(pDevIns, pszFill, &RCPtrFill);290 291 if (RT_SUCCESS(rc) && RT_SUCCESS(rc2) && RT_SUCCESS(rc3))292 rc = IOMR3MmioRegisterRC(pDevIns->Internal.s.pVMR3, pDevIns, GCPhysStart, cbRange, pvUser, RCPtrWrite, RCPtrRead, RCPtrFill);293 else294 {295 AssertMsgRC(rc, ("Failed to resolve %s.%s (pszWrite)\n", pDevIns->pReg->pszRCMod, pszWrite));296 AssertMsgRC(rc2, ("Failed to resolve %s.%s (pszRead)\n", pDevIns->pReg->pszRCMod, pszRead));297 AssertMsgRC(rc3, ("Failed to resolve %s.%s (pszFill)\n", pDevIns->pReg->pszRCMod, pszFill));298 if (RT_FAILURE(rc2) && RT_SUCCESS(rc))299 rc = rc2;300 if (RT_FAILURE(rc3) && RT_SUCCESS(rc))301 rc = rc3;302 }303 }304 else if (VM_IS_RAW_MODE_ENABLED(pDevIns->Internal.s.pVMR3))305 {306 AssertMsgFailed(("No RC module for this driver!\n"));307 rc = VERR_INVALID_PARAMETER;308 }309 #else310 int rc = VINF_SUCCESS;311 RT_NOREF(pDevIns, GCPhysStart, cbRange, pvUser, pszWrite, pszRead, pszFill);312 #endif313 314 LogFlow(("pdmR3DevHlp_MMIORegisterRC: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));315 return rc;316 }317 318 /** @interface_method_impl{PDMDEVHLPR3,pfnMMIORegisterR0} */319 static DECLCALLBACK(int) pdmR3DevHlp_MMIORegisterR0(PPDMDEVINS pDevIns, RTGCPHYS GCPhysStart, RTGCPHYS cbRange, RTR0PTR pvUser,320 const char *pszWrite, const char *pszRead, const char *pszFill)321 {322 PDMDEV_ASSERT_DEVINS(pDevIns);323 VM_ASSERT_EMT(pDevIns->Internal.s.pVMR3);324 Assert(pDevIns->pReg->pszR0Mod[0]);325 Assert(pDevIns->pReg->fFlags & PDM_DEVREG_FLAGS_R0);326 LogFlow(("pdmR3DevHlp_MMIORegisterHC: caller='%s'/%d: GCPhysStart=%RGp cbRange=%RGp pvUser=%p pszWrite=%p:{%s} pszRead=%p:{%s} pszFill=%p:{%s}\n",327 pDevIns->pReg->szName, pDevIns->iInstance, GCPhysStart, cbRange, pvUser, pszWrite, pszWrite, pszRead, pszRead, pszFill, pszFill));328 329 /*330 * Resolve the functions.331 * Not all function have to present, leave it to IOM to enforce this.332 */333 int rc = VINF_SUCCESS;334 if ( pDevIns->pReg->pszR0Mod[0]335 && (pDevIns->pReg->fFlags & PDM_DEVREG_FLAGS_R0))336 {337 R0PTRTYPE(PFNIOMMMIOWRITE) pfnR0PtrWrite = 0;338 if (pszWrite)339 rc = pdmR3DevGetSymbolR0Lazy(pDevIns, pszWrite, &pfnR0PtrWrite);340 R0PTRTYPE(PFNIOMMMIOREAD) pfnR0PtrRead = 0;341 int rc2 = VINF_SUCCESS;342 if (pszRead)343 rc2 = pdmR3DevGetSymbolR0Lazy(pDevIns, pszRead, &pfnR0PtrRead);344 R0PTRTYPE(PFNIOMMMIOFILL) pfnR0PtrFill = 0;345 int rc3 = VINF_SUCCESS;346 if (pszFill)347 rc3 = pdmR3DevGetSymbolR0Lazy(pDevIns, pszFill, &pfnR0PtrFill);348 if (RT_SUCCESS(rc) && RT_SUCCESS(rc2) && RT_SUCCESS(rc3))349 rc = IOMR3MmioRegisterR0(pDevIns->Internal.s.pVMR3, pDevIns, GCPhysStart, cbRange, pvUser,350 pfnR0PtrWrite, pfnR0PtrRead, pfnR0PtrFill);351 else352 {353 AssertMsgRC(rc, ("Failed to resolve %s.%s (pszWrite)\n", pDevIns->pReg->pszR0Mod, pszWrite));354 AssertMsgRC(rc2, ("Failed to resolve %s.%s (pszRead)\n", pDevIns->pReg->pszR0Mod, pszRead));355 AssertMsgRC(rc3, ("Failed to resolve %s.%s (pszFill)\n", pDevIns->pReg->pszR0Mod, pszFill));356 if (RT_FAILURE(rc2) && RT_SUCCESS(rc))357 rc = rc2;358 if (RT_FAILURE(rc3) && RT_SUCCESS(rc))359 rc = rc3;360 }361 }362 else363 {364 AssertMsgFailed(("No R0 module for this driver!\n"));365 rc = VERR_INVALID_PARAMETER;366 }367 368 LogFlow(("pdmR3DevHlp_MMIORegisterR0: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));369 return rc;370 }371 372 373 /** @interface_method_impl{PDMDEVHLPR3,pfnMMIODeregister} */374 static DECLCALLBACK(int) pdmR3DevHlp_MMIODeregister(PPDMDEVINS pDevIns, RTGCPHYS GCPhysStart, RTGCPHYS cbRange)375 {376 PDMDEV_ASSERT_DEVINS(pDevIns);377 VM_ASSERT_EMT(pDevIns->Internal.s.pVMR3);378 LogFlow(("pdmR3DevHlp_MMIODeregister: caller='%s'/%d: GCPhysStart=%RGp cbRange=%RGp\n",379 pDevIns->pReg->szName, pDevIns->iInstance, GCPhysStart, cbRange));380 381 int rc = IOMR3MmioDeregister(pDevIns->Internal.s.pVMR3, pDevIns, GCPhysStart, cbRange);382 383 LogFlow(("pdmR3DevHlp_MMIODeregister: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc));384 return rc;385 215 } 386 216 … … 3989 3819 pdmR3DevHlp_MmioReduce, 3990 3820 pdmR3DevHlp_MmioGetMappingAddress, 3991 pdmR3DevHlp_MMIORegister,3992 pdmR3DevHlp_MMIORegisterRC,3993 pdmR3DevHlp_MMIORegisterR0,3994 pdmR3DevHlp_MMIODeregister,3995 3821 pdmR3DevHlp_Mmio2Create, 3996 3822 pdmR3DevHlp_Mmio2Destroy, … … 4475 4301 pdmR3DevHlp_MmioReduce, 4476 4302 pdmR3DevHlp_MmioGetMappingAddress, 4477 pdmR3DevHlp_MMIORegister,4478 pdmR3DevHlp_MMIORegisterRC,4479 pdmR3DevHlp_MMIORegisterR0,4480 pdmR3DevHlp_MMIODeregister,4481 4303 pdmR3DevHlp_Mmio2Create, 4482 4304 pdmR3DevHlp_Mmio2Destroy, -
trunk/src/VBox/VMM/VMMR3/PGMPhys.cpp
r82091 r82313 3172 3172 3173 3173 /* 3174 * Must tell IOM about MMIO (first one only).3175 */3176 if ((fFlags & (PGMREGMMIO2RANGE_F_MMIO2 | PGMREGMMIO2RANGE_F_FIRST_CHUNK)) == PGMREGMMIO2RANGE_F_FIRST_CHUNK)3177 IOMR3MmioExNotifyDeregistered(pVM, pCur->pPhysHandlerR3->pvUserR3);3178 3179 /*3180 3174 * Unlink it 3181 3175 */ … … 3500 3494 if (!(pFirstMmio->fFlags & PGMREGMMIO2RANGE_F_MMIO2)) 3501 3495 { 3496 AssertFailed(); 3502 3497 int rc = VINF_SUCCESS; 3503 3498 for (PPGMREGMMIO2RANGE pCurMmio = pFirstMmio; ; pCurMmio = pCurMmio->pNextR3) … … 3510 3505 pCurMmio->fFlags |= PGMREGMMIO2RANGE_F_MAPPED; /* Use this to mark that the handler is registered. */ 3511 3506 if (pCurMmio->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK) 3512 {3513 rc = IOMR3MmioExNotifyMapped(pVM, pFirstMmio->pPhysHandlerR3->pvUserR3, GCPhys);3514 3507 break; 3515 }3516 3508 } 3517 3509 if (RT_FAILURE(rc)) … … 3644 3636 if (!(fOldFlags & PGMREGMMIO2RANGE_F_MMIO2)) 3645 3637 { 3638 AssertFailed(); 3639 3646 3640 PPGMREGMMIO2RANGE pCurMmio = pFirstMmio; 3647 3641 rc = pgmHandlerPhysicalExDeregister(pVM, pFirstMmio->pPhysHandlerR3, RT_BOOL(fOldFlags & PGMREGMMIO2RANGE_F_OVERLAPPING)); … … 3653 3647 AssertRCReturnStmt(rc, pgmUnlock(pVM), VERR_PGM_PHYS_MMIO_EX_IPE); 3654 3648 } 3655 3656 IOMR3MmioExNotifyUnmapped(pVM, pFirstMmio->pPhysHandlerR3->pvUserR3, pFirstMmio->RamRange.GCPhys);3657 3649 } 3658 3650 -
trunk/src/VBox/VMM/include/IOMInline.h
r82311 r82313 255 255 #endif 256 256 257 258 /**259 * Gets the MMIO range for the specified physical address in the current context.260 *261 * @returns Pointer to MMIO range.262 * @returns NULL if address not in a MMIO range.263 *264 * @param pVM The cross context VM structure.265 * @param pVCpu The cross context virtual CPU structure of the calling EMT.266 * @param GCPhys Physical address to lookup.267 */268 DECLINLINE(PIOMMMIORANGE) iomMmioGetRange(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys)269 {270 Assert(IOM_IS_SHARED_LOCK_OWNER(pVM));271 PIOMMMIORANGE pRange = pVCpu->iom.s.CTX_SUFF(pMMIORangeLast);272 if ( !pRange273 || GCPhys - pRange->GCPhys >= pRange->cb)274 pVCpu->iom.s.CTX_SUFF(pMMIORangeLast) = pRange275 = (PIOMMMIORANGE)RTAvlroGCPhysRangeGet(&pVM->iom.s.CTX_SUFF(pTrees)->MMIOTree, GCPhys);276 return pRange;277 }278 279 /**280 * Retain a MMIO range.281 *282 * @param pRange The range to release.283 */284 DECLINLINE(void) iomMmioRetainRange(PIOMMMIORANGE pRange)285 {286 uint32_t cRefs = ASMAtomicIncU32(&pRange->cRefs);287 Assert(cRefs > 1);288 Assert(cRefs < _1M);289 NOREF(cRefs);290 }291 292 293 /**294 * Gets the referenced MMIO range for the specified physical address in the295 * current context.296 *297 * @returns Pointer to MMIO range.298 * @returns NULL if address not in a MMIO range.299 *300 * @param pVM The cross context VM structure.301 * @param pVCpu The cross context virtual CPU structure of the calling EMT.302 * @param GCPhys Physical address to lookup.303 */304 DECLINLINE(PIOMMMIORANGE) iomMmioGetRangeWithRef(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys)305 {306 int rc = IOM_LOCK_SHARED_EX(pVM, VINF_SUCCESS);307 AssertRCReturn(rc, NULL);308 309 PIOMMMIORANGE pRange = pVCpu->iom.s.CTX_SUFF(pMMIORangeLast);310 if ( !pRange311 || GCPhys - pRange->GCPhys >= pRange->cb)312 pVCpu->iom.s.CTX_SUFF(pMMIORangeLast) = pRange313 = (PIOMMMIORANGE)RTAvlroGCPhysRangeGet(&pVM->iom.s.CTX_SUFF(pTrees)->MMIOTree, GCPhys);314 if (pRange)315 iomMmioRetainRange(pRange);316 317 IOM_UNLOCK_SHARED(pVM);318 return pRange;319 }320 321 322 /**323 * Releases a MMIO range.324 *325 * @param pVM The cross context VM structure.326 * @param pRange The range to release.327 */328 DECLINLINE(void) iomMmioReleaseRange(PVMCC pVM, PIOMMMIORANGE pRange)329 {330 uint32_t cRefs = ASMAtomicDecU32(&pRange->cRefs);331 if (!cRefs)332 iomMmioFreeRange(pVM, pRange);333 }334 335 336 #ifdef VBOX_STRICT337 /**338 * Gets the MMIO range for the specified physical address in the current context.339 *340 * @returns Pointer to MMIO range.341 * @returns NULL if address not in a MMIO range.342 *343 * @param pVM The cross context VM structure.344 * @param pVCpu The cross context virtual CPU structure of the calling EMT.345 * @param GCPhys Physical address to lookup.346 */347 DECLINLINE(PIOMMMIORANGE) iomMMIOGetRangeUnsafe(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys)348 {349 PIOMMMIORANGE pRange = pVCpu->iom.s.CTX_SUFF(pMMIORangeLast);350 if ( !pRange351 || GCPhys - pRange->GCPhys >= pRange->cb)352 pVCpu->iom.s.CTX_SUFF(pMMIORangeLast) = pRange353 = (PIOMMMIORANGE)RTAvlroGCPhysRangeGet(&pVM->iom.s.CTX_SUFF(pTrees)->MMIOTree, GCPhys);354 return pRange;355 }356 #endif /* VBOX_STRICT */357 358 359 #ifdef VBOX_WITH_STATISTICS360 /**361 * Gets the MMIO statistics record.362 *363 * In ring-3 this will lazily create missing records, while in GC/R0 the caller has to364 * return the appropriate status to defer the operation to ring-3.365 *366 * @returns Pointer to MMIO stats.367 * @returns NULL if not found (R0/GC), or out of memory (R3).368 *369 * @param pVM The cross context VM structure.370 * @param pVCpu The cross context virtual CPU structure of the calling EMT.371 * @param GCPhys Physical address to lookup.372 * @param pRange The MMIO range.373 *374 * @remarks The caller holds the IOM critical section with shared access prior375 * to calling this method. Upon return, the lock has been released!376 * This is ugly, but it's a necessary evil since we cannot upgrade read377 * locks to write locks and the whole purpose here is calling378 * iomR3MMIOStatsCreate.379 */380 DECLINLINE(PIOMMMIOSTATS) iomMmioGetStats(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, PIOMMMIORANGE pRange)381 {382 Assert(IOM_IS_SHARED_LOCK_OWNER(pVM));383 384 /* For large ranges, we'll put everything on the first byte. */385 if (pRange->cb > PAGE_SIZE)386 GCPhys = pRange->GCPhys;387 388 PIOMMMIOSTATS pStats = pVCpu->iom.s.CTX_SUFF(pMMIOStatsLast);389 if ( !pStats390 || pStats->Core.Key != GCPhys)391 {392 pStats = (PIOMMMIOSTATS)RTAvloGCPhysGet(&pVM->iom.s.CTX_SUFF(pTrees)->MmioStatTree, GCPhys);393 # ifdef IN_RING3394 if (!pStats)395 {396 IOM_UNLOCK_SHARED(pVM);397 return iomR3MMIOStatsCreate(pVM, GCPhys, pRange->pszDesc);398 }399 # endif400 }401 402 IOM_UNLOCK_SHARED(pVM);403 return pStats;404 }405 #endif /* VBOX_WITH_STATISTICS */406 407 408 257 /** @} */ 409 258 -
trunk/src/VBox/VMM/include/IOMInternal.h
r82311 r82313 44 44 * @{ 45 45 */ 46 47 /**48 * MMIO range descriptor.49 */50 typedef struct IOMMMIORANGE51 {52 /** Avl node core with GCPhys as Key and GCPhys + cbSize - 1 as KeyLast. */53 AVLROGCPHYSNODECORE Core;54 /** Start physical address. */55 RTGCPHYS GCPhys;56 /** Size of the range. */57 RTGCPHYS cb;58 /** The reference counter. */59 uint32_t volatile cRefs;60 /** Flags, see IOMMMIO_FLAGS_XXX. */61 uint32_t fFlags;62 63 /** Pointer to user argument - R0. */64 RTR0PTR pvUserR0;65 /** Pointer to device instance - R0. */66 PPDMDEVINSR0 pDevInsR0;67 /** Pointer to write callback function - R0. */68 R0PTRTYPE(PFNIOMMMIOWRITE) pfnWriteCallbackR0;69 /** Pointer to read callback function - R0. */70 R0PTRTYPE(PFNIOMMMIOREAD) pfnReadCallbackR0;71 /** Pointer to fill (memset) callback function - R0. */72 R0PTRTYPE(PFNIOMMMIOFILL) pfnFillCallbackR0;73 74 /** Pointer to user argument - R3. */75 RTR3PTR pvUserR3;76 /** Pointer to device instance - R3. */77 PPDMDEVINSR3 pDevInsR3;78 /** Pointer to write callback function - R3. */79 R3PTRTYPE(PFNIOMMMIOWRITE) pfnWriteCallbackR3;80 /** Pointer to read callback function - R3. */81 R3PTRTYPE(PFNIOMMMIOREAD) pfnReadCallbackR3;82 /** Pointer to fill (memset) callback function - R3. */83 R3PTRTYPE(PFNIOMMMIOFILL) pfnFillCallbackR3;84 85 /** Description / Name. For easing debugging. */86 R3PTRTYPE(const char *) pszDesc;87 88 #if 089 /** Pointer to user argument - RC. */90 RTRCPTR pvUserRC;91 /** Pointer to device instance - RC. */92 PPDMDEVINSRC pDevInsRC;93 /** Pointer to write callback function - RC. */94 RCPTRTYPE(PFNIOMMMIOWRITE) pfnWriteCallbackRC;95 /** Pointer to read callback function - RC. */96 RCPTRTYPE(PFNIOMMMIOREAD) pfnReadCallbackRC;97 /** Pointer to fill (memset) callback function - RC. */98 RCPTRTYPE(PFNIOMMMIOFILL) pfnFillCallbackRC;99 #if HC_ARCH_BITS == 64100 /** Padding structure length to multiple of 8 bytes. */101 RTRCPTR RCPtrPadding;102 #endif103 #endif104 } IOMMMIORANGE;105 /** Pointer to a MMIO range descriptor, R3 version. */106 typedef struct IOMMMIORANGE *PIOMMMIORANGE;107 108 109 /**110 * MMIO address statistics. (one address)111 *112 * This is a simple way of making on demand statistics, however it's a113 * bit free with the hypervisor heap memory.114 */115 typedef struct IOMMMIOSTATS116 {117 /** Avl node core with the address as Key. */118 AVLOGCPHYSNODECORE Core;119 120 /** Number of accesses (subtract ReadRZToR3 and WriteRZToR3 to get the right121 * number). */122 STAMCOUNTER Accesses;123 124 /** Profiling read handler overhead in R3. */125 STAMPROFILE ProfReadR3;126 /** Profiling write handler overhead in R3. */127 STAMPROFILE ProfWriteR3;128 /** Counting and profiling reads in R0/RC. */129 STAMPROFILE ProfReadRZ;130 /** Counting and profiling writes in R0/RC. */131 STAMPROFILE ProfWriteRZ;132 133 /** Number of reads to this address from R0/RC which was serviced in R3. */134 STAMCOUNTER ReadRZToR3;135 /** Number of writes to this address from R0/RC which was serviced in R3. */136 STAMCOUNTER WriteRZToR3;137 } IOMMMIOSTATS;138 AssertCompileMemberAlignment(IOMMMIOSTATS, Accesses, 8);139 /** Pointer to I/O port statistics. */140 typedef IOMMMIOSTATS *PIOMMMIOSTATS;141 46 142 47 /** … … 405 310 406 311 407 408 /**409 * The IOM trees.410 *411 * These are offset based the nodes and root must be in the same412 * memory block in HC. The locations of IOM structure and the hypervisor heap413 * are quite different in R3, R0 and RC.414 */415 typedef struct IOMTREES416 {417 /** Tree containing the MMIO range descriptors (IOMMMIORANGE). */418 AVLROGCPHYSTREE MMIOTree;419 /** Tree containing MMIO statistics (IOMMMIOSTATS). */420 AVLOGCPHYSTREE MmioStatTree;421 } IOMTREES;422 /** Pointer to the IOM trees. */423 typedef IOMTREES *PIOMTREES;424 425 426 312 /** 427 313 * IOM per virtual CPU instance data. … … 487 373 * @note pretty static as only used by APIC on AMD-V. */ 488 374 uint16_t idxMmioLastPhysHandler; 489 uint16_t u16Padding; 490 491 R3PTRTYPE(PIOMMMIORANGE) pMMIORangeLastR3; 492 R3PTRTYPE(PIOMMMIOSTATS) pMMIOStatsLastR3; 493 494 R0PTRTYPE(PIOMMMIORANGE) pMMIORangeLastR0; 495 R0PTRTYPE(PIOMMMIOSTATS) pMMIOStatsLastR0; 375 uint16_t au16Padding[3]; 496 376 /** @} */ 497 377 } IOMCPU; … … 505 385 typedef struct IOM 506 386 { 507 /** Pointer to the trees - R3 ptr. */508 R3PTRTYPE(PIOMTREES) pTreesR3;509 /** Pointer to the trees - R0 ptr. */510 R0PTRTYPE(PIOMTREES) pTreesR0;511 512 /** MMIO physical access handler type. */513 PGMPHYSHANDLERTYPE hMmioHandlerType;514 /** MMIO physical access handler type, new style. */515 PGMPHYSHANDLERTYPE hNewMmioHandlerType;516 517 387 /** @name I/O ports 518 388 * @note The updating of these variables is done exclusively from EMT(0). … … 544 414 * @note The updating of these variables is done exclusively from EMT(0). 545 415 * @{ */ 416 /** MMIO physical access handler type, new style. */ 417 PGMPHYSHANDLERTYPE hNewMmioHandlerType; 546 418 /** Number of MMIO registrations. */ 547 419 uint32_t cMmioRegs; … … 692 564 RT_C_DECLS_BEGIN 693 565 694 void iomMmioFreeRange(PVMCC pVM, PIOMMMIORANGE pRange);695 566 #ifdef IN_RING3 696 PIOMMMIOSTATS iomR3MMIOStatsCreate(PVM pVM, RTGCPHYS GCPhys, const char *pszDesc);697 567 DECLCALLBACK(void) iomR3IoPortInfo(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs); 698 568 void iomR3IoPortRegStats(PVM pVM, PIOMIOPORTENTRYR3 pRegEntry); … … 707 577 void iomR0MmioInitPerVMData(PGVM pGVM); 708 578 #endif 709 VBOXSTRICTRC iomMmioCommonPfHandlerOld(PVMCC pVM, PVMCPUCC pVCpu, uint32_t uErrorCode, PCPUMCTXCORE pCtxCore,710 RTGCPHYS GCPhysFault, void *pvUser);711 579 712 580 #ifndef IN_RING3 713 DECLEXPORT(FNPGMRZPHYSPFHANDLER) iomMmioPfHandler;714 581 DECLEXPORT(FNPGMRZPHYSPFHANDLER) iomMmioPfHandlerNew; 715 582 #endif 716 PGM_ALL_CB2_PROTO(FNPGMPHYSHANDLER) iomMmioHandler;717 583 PGM_ALL_CB2_PROTO(FNPGMPHYSHANDLER) iomMmioHandlerNew; 718 584
Note:
See TracChangeset
for help on using the changeset viewer.

