Changeset 87786 in vbox
- Timestamp:
- Feb 18, 2021 11:31:38 AM (4 years ago)
- Location:
- trunk
- Files:
-
- 2 edited
-
include/VBox/iommu-amd.h (modified) (3 diffs)
-
src/VBox/Devices/Bus/DevIommuAmd.cpp (modified) (54 diffs)
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/iommu-amd.h
r87732 r87786 579 579 /** Gets the interrupt table length (in bytes) given the DTE pointer. */ 580 580 #define IOMMU_GET_INTR_TAB_LEN(a_pDte) (IOMMU_GET_INTR_TAB_ENTRIES(a_pDte) * sizeof(IRTE_T)) 581 /** Mask of interrupt control bits. */ 582 #define IOMMU_DTE_INTR_CTRL_MASK 0x3 583 /** Gets the interrupt control bits given the DTE pointer. */ 584 #define IOMMU_GET_INTR_CTRL(a_pDte) (((a_pDte)->au64[2] >> 60) & IOMMU_DTE_INTR_CTRL_MASK) 581 585 582 586 /** … … 694 698 * interrupt message. See AMD IOMMU spec. 2.2.5 "Interrupt Remapping Tables". */ 695 699 #define IOMMU_MSI_DATA_IRTE_OFFSET_MASK UINT32_C(0x000007ff) 700 /** Gets the IRTE offset from the originating MSI interrupt message. */ 701 #define IOMMU_GET_IRTE_OFF(a_u32MsiData) (((a_u32MsiData) & IOMMU_MSI_DATA_IRTE_OFFSET_MASK) * sizeof(IRTE_T)); 696 702 697 703 /** … … 882 888 } CMD_INV_INTR_TABLE_T; 883 889 AssertCompileSize(CMD_INV_INTR_TABLE_T, 16); 890 /** Pointer to a invalidate interrupt table command. */ 891 typedef CMD_INV_INTR_TABLE_T *PCMD_INV_INTR_TABLE_T; 892 /** Pointer to a const invalidate interrupt table command. */ 893 typedef CMD_INV_INTR_TABLE_T const *PCCMD_INV_INTR_TABLE_T; 894 #define IOMMU_CMD_INV_INTR_TABLE_QWORD_0_VALID_MASK UINT64_C(0xf00000000000ffff) 895 #define IOMMU_CMD_INV_INTR_TABLE_QWORD_1_VALID_MASK UINT64_C(0x0000000000000000) 884 896 885 897 /** -
trunk/src/VBox/Devices/Bus/DevIommuAmd.cpp
r87735 r87786 46 46 /** The IOMMU device instance magic. */ 47 47 #define IOMMU_MAGIC 0x10acce55 48 48 49 /** Enable the IOTLBE cache. */ 49 50 #define IOMMU_WITH_IOTLBE_CACHE 51 /** Enable the interrupt cache. */ 52 #define IOMMU_WITH_IRTE_CACHE 53 54 /* The DTE cache is mandatory for the IOTLB or interrupt cache to work. */ 55 #if defined(IOMMU_WITH_IOTLBE_CACHE) || defined(IOMMU_WITH_IRTE_CACHE) 56 # define IOMMU_WITH_DTE_CACHE 57 #endif 58 59 #ifdef IOMMU_WITH_IRTE_CACHE 60 /** The maximum number of interrupt cache entries configurable through CFGM. */ 61 # define IOMMU_IRTE_CACHE_MAX 32 62 /** The default number of interrupt cache entries. */ 63 # define IOMMU_IRTE_CACHE_DEFAULT 16 64 /** The minimum number of interrupt cache entries configurable through CFGM. */ 65 # define IOMMU_IRTE_CACHE_MIN 8 66 67 /** A NIL IRTE cache entry key. */ 68 # define IOMMU_IRTE_CACHE_KEY_NIL (~(uint32_t)0U) 69 /** Gets the device ID from an IRTE cache entry key. */ 70 #define IOMMU_IRTE_CACHE_KEY_GET_DEVICE_ID(a_Key) RT_HIWORD(a_Key) 71 /** Gets the IOVA from the IOTLB entry key. */ 72 # define IOMMU_IRTE_CACHE_KEY_GET_OFF(a_Key) RT_LOWORD(a_Key) 73 /** Makes an IRTE cache entry key. 74 * 75 * Bits 31:16 is the device ID (Bus, Device, Function). 76 * Bits 15:0 is the the offset into the IRTE table. 77 */ 78 # define IOMMU_IRTE_CACHE_KEY_MAKE(a_DevId, a_off) RT_MAKE_U32(a_off, a_DevId) 79 #endif /* IOMMU_WITH_IRTE_CACHE */ 50 80 51 81 #ifdef IOMMU_WITH_IOTLBE_CACHE 52 /** The maximum number of DTE entries. */53 # define IOMMU_DTE_CACHE_MAX UINT16_MAX54 82 /** The maximum number of IOTLB entries. */ 55 83 # define IOMMU_IOTLBE_MAX 96 … … 60 88 /** The number of bits to shift for the domain ID of the IOTLBE key. */ 61 89 # define IOMMU_IOTLB_DOMAIN_ID_SHIFT 40 62 /** The mask of bits for the domain ID of the IOTLBEkey. */90 /** A NIL IOTLB key. */ 63 91 # define IOMMU_IOTLB_KEY_NIL UINT64_C(0) 64 92 /** Gets the domain ID from an IOTLB entry key. */ … … 77 105 # define IOMMU_IOTLB_KEY_MAKE(a_DomainId, a_uIova) ( ((uint64_t)(a_DomainId) << IOMMU_IOTLB_DOMAIN_ID_SHIFT) \ 78 106 | (((a_uIova) >> X86_PAGE_4K_SHIFT) & IOMMU_IOTLB_IOVA_MASK)) 107 #endif /* IOMMU_WITH_IOTLBE_CACHE */ 108 109 #ifdef IOMMU_WITH_DTE_CACHE 110 /** The maximum number of DTE entries. */ 111 # define IOMMU_DTE_CACHE_MAX UINT16_MAX 112 113 /** @name IOMMU_DTECACHE_F_XXX: DTE cache flags. 114 * 115 * Some of these flags are "basic" i.e. they correspond directly to their bits in 116 * the DTE. The rest of the flags are based on checks or operations on several DTE 117 * bits. 118 * 119 * The basic flags are: 120 * - VALID (DTE.V) 121 * - IO_PERM_READ (DTE.IR) 122 * - IO_PERM_WRITE (DTE.IW) 123 * - IO_PERM_RSVD (bit following DTW.IW reserved for future & to keep 124 * masking consistent) 125 * - SUPPRESS_ALL_IOPF (DTE.SA) 126 * - SUPPRESS_IOPF (DTE.SE) 127 * - INTR_MAP_VALID (DTE.IV) 128 * - IGNORE_UNMAPPED_INTR (DTE.IG) 129 * 130 * @see iommuAmdGetBasicDevFlags() 131 * @{ */ 132 /** The DTE is present. */ 133 # define IOMMU_DTECACHE_F_PRESENT RT_BIT(0) 134 /** The DTE is valid. */ 135 # define IOMMU_DTECACHE_F_VALID RT_BIT(1) 136 /** The DTE permissions apply for address translations. */ 137 # define IOMMU_DTECACHE_F_IO_PERM RT_BIT(2) 138 /** DTE permission - I/O read allowed. */ 139 # define IOMMU_DTECACHE_F_IO_PERM_READ RT_BIT(3) 140 /** DTE permission - I/O write allowed. */ 141 # define IOMMU_DTECACHE_F_IO_PERM_WRITE RT_BIT(4) 142 /** DTE permission - reserved. */ 143 # define IOMMU_DTECACHE_F_IO_PERM_RSVD RT_BIT(5) 144 /** Address translation required. */ 145 # define IOMMU_DTECACHE_F_ADDR_TRANSLATE RT_BIT(6) 146 /** Suppress all I/O page faults. */ 147 # define IOMMU_DTECACHE_F_SUPPRESS_ALL_IOPF RT_BIT(7) 148 /** Suppress I/O page faults. */ 149 # define IOMMU_DTECACHE_F_SUPPRESS_IOPF RT_BIT(8) 150 /** Interrupt map valid. */ 151 # define IOMMU_DTECACHE_F_INTR_MAP_VALID RT_BIT(9) 152 /** Ignore unmapped interrupts. */ 153 # define IOMMU_DTECACHE_F_IGNORE_UNMAPPED_INTR RT_BIT(10) 154 /** An I/O page fault has been raised for this device. */ 155 # define IOMMU_DTECACHE_F_IO_PAGE_FAULT_RAISED RT_BIT(11) 156 /** Fixed and arbitrary interrupt control: Target Abort. */ 157 # define IOMMU_DTECACHE_F_INTR_CTRL_TARGET_ABORT RT_BIT(12) 158 /** Fixed and arbitrary interrupt control: Forward unmapped. */ 159 # define IOMMU_DTECACHE_F_INTR_CTRL_FWD_UNMAPPED RT_BIT(13) 160 /** Fixed and arbitrary interrupt control: Remapped. */ 161 # define IOMMU_DTECACHE_F_INTR_CTRL_REMAPPED RT_BIT(14) 162 /** Fixed and arbitrary interrupt control: Reserved. */ 163 # define IOMMU_DTECACHE_F_INTR_CTRL_RSVD RT_BIT(15) 164 /** @} */ 165 166 /** The number of bits to shift I/O device flags for DTE permissions. */ 167 # define IOMMU_DTECACHE_F_IO_PERM_SHIFT 3 168 /** The mask of DTE permissions in I/O device flags. */ 169 # define IOMMU_DTECACHE_F_IO_PERM_MASK 0x3 170 /** The number of bits to shift I/O device flags for interrupt control bits. */ 171 # define IOMMU_DTECACHE_F_INTR_CTRL_SHIFT 12 172 /** The mask of interrupt control bits in I/O device flags. */ 173 # define IOMMU_DTECACHE_F_INTR_CTRL_MASK 0x3 79 174 80 175 /** Acquires the cache lock. */ … … 100 195 /** Releases the cache lock. */ 101 196 # define IOMMU_UNLOCK_CACHE(a_pDevIns, a_pThis) PDMDevHlpCritSectLeave((a_pDevIns), &(a_pThis)->CritSectCache) 102 #endif 103 104 /** @name IOMMU_DEV_F_XXX: I/O device flags. 105 * 106 * Some of these flags are "basic" i.e. they correspond directly to their bits in 107 * the DTE. The rest of the flags are based on checks or operations on several DTE 108 * bits. 109 * 110 * The basic flags are: 111 * - VALID (DTE.V) 112 * - IO_PERM_READ (DTE.IR) 113 * - IO_PERM_WRITE (DTE.IW) 114 * - IO_PERM_RSVD (bit following DTW.IW reserved for future & to keep 115 * masking consistent) 116 * - SUPPRESS_ALL_IOPF (DTE.SA) 117 * - SUPPRESS_IOPF (DTE.SE) 118 * - INTR_MAP_VALID (DTE.IV) 119 * - IGNORE_UNMAPPED_INTR (DTE.IG) 120 * 121 * @sa iommuAmdGetBasicDevFlags() 122 * @{ */ 123 /** The DTE is present. */ 124 #define IOMMU_DEV_F_PRESENT RT_BIT(0) 125 /** The DTE is valid. */ 126 #define IOMMU_DEV_F_VALID RT_BIT(1) 127 /** DTE permissions apply for address translations. */ 128 #define IOMMU_DEV_F_IO_PERM RT_BIT(2) 129 /** DTE permission - I/O read allowed. */ 130 #define IOMMU_DEV_F_IO_PERM_READ RT_BIT(3) 131 /** DTE permission - I/O write allowed. */ 132 #define IOMMU_DEV_F_IO_PERM_WRITE RT_BIT(4) 133 /** DTE permission - reserved. */ 134 #define IOMMU_DEV_F_IO_PERM_RSVD RT_BIT(5) 135 /** Address translation required. */ 136 #define IOMMU_DEV_F_ADDR_TRANSLATE RT_BIT(6) 137 /** Suppress all I/O page faults. */ 138 #define IOMMU_DEV_F_SUPPRESS_ALL_IOPF RT_BIT(7) 139 /** Suppress I/O page faults. */ 140 #define IOMMU_DEV_F_SUPPRESS_IOPF RT_BIT(8) 141 /** Interrupt map valid. */ 142 #define IOMMU_DEV_F_INTR_MAP_VALID RT_BIT(9) 143 /** Ignore unmapped interrupts. */ 144 #define IOMMU_DEV_F_IGNORE_UNMAPPED_INTR RT_BIT(10) 145 /** An I/O page fault has been raised for this device. */ 146 #define IOMMU_DEV_F_IO_PAGE_FAULT_RAISED RT_BIT(11) 147 /** @} */ 148 /** The number of bits to shift I/O device flags for DTE permissions. */ 149 #define IOMMU_DEV_F_IO_PERM_SHIFT 3 150 /** The mask of DTE permissions in I/O device flags. */ 151 #define IOMMU_DEV_F_IO_PERM_MASK 0x3 197 #endif /* IOMMU_WITH_DTE_CACHE */ 152 198 153 199 /** Gets the page offset mask given the number of bits to shift. */ … … 231 277 typedef IOADDRRANGE const *PCIOADDRRANGE; 232 278 233 /** 234 * IOMMU I/O Device. 235 * Used for caching as well as passing flags to events.236 */ 237 typedef struct IODEVICE238 { 239 /** This device's flags, see IOMMU_D EV_F_XXX. */240 uint16_t fFlags;279 #ifdef IOMMU_WITH_DTE_CACHE 280 /** 281 * Device Table Entry Cache. 282 */ 283 typedef struct DTECACHE 284 { 285 /** This device's flags, see IOMMU_DTECACHE_F_XXX. */ 286 uint16_t fFlags; 241 287 /** The domain ID assigned for this device by software. */ 242 288 uint16_t uDomainId; 243 } IODEVICE;289 } DTECACHE; 244 290 /** Pointer to an I/O device struct. */ 245 typedef IODEVICE *PIODEVICE;291 typedef DTECACHE *PDTECACHE; 246 292 /** Pointer to a const I/O device struct. */ 247 typedef IODEVICE *PCIODEVICE; 248 AssertCompileSize(IODEVICE, 4); 293 typedef DTECACHE *PCDTECACHE; 294 AssertCompileSize(DTECACHE, 4); 295 #endif /* IOMMU_WITH_DTE_CACHE */ 249 296 250 297 #ifdef IOMMU_WITH_IOTLBE_CACHE 251 298 /** 252 * I OMMU I/O TLB Entry.299 * I/O TLB Entry. 253 300 * Keep this as small and aligned as possible. 254 301 */ … … 272 319 #endif /* IOMMU_WITH_IOTLBE_CACHE */ 273 320 321 #ifdef IOMMU_WITH_IRTE_CACHE 322 /** 323 * Interrupt Remap Table Entry Cache. 324 */ 325 typedef struct IRTECACHE 326 { 327 /** The IRTE. */ 328 IRTE_T Irte; 329 /** The key, see IOMMU_IRTE_CACHE_KEY_MAKE. */ 330 uint32_t uKey; 331 } IRTECACHE; 332 /** Pointer to an IRTE cache struct. */ 333 typedef IRTECACHE *PIRTECACHE; 334 /** Pointer to a const IRTE cache struct. */ 335 typedef IRTECACHE const *PCIRTECACHE; 336 AssertCompileSizeAlignment(IRTECACHE, 4); 337 #endif /* IOMMU_WITH_IRTE_CACHE */ 338 274 339 /** 275 340 * The shared IOMMU device state. … … 296 361 IOMMMIOHANDLE hMmio; 297 362 298 #ifdef IOMMU_WITH_ IOTLBE_CACHE363 #ifdef IOMMU_WITH_DTE_CACHE 299 364 /** The critsect that protects the cache from concurrent access. */ 300 365 PDMCRITSECT CritSectCache; 301 /** L1 Cache - Maps [DeviceId] to [DomainId]. */ 302 PIODEVICE paDevices; 366 /** Maps [DeviceId] to [DomainId]. */ 367 PDTECACHE paDteCache; 368 #endif 369 #ifdef IOMMU_WITH_IOTLBE_CACHE 303 370 /** Pointer to array of pre-allocated IOTLBEs. */ 304 371 PIOTLBE paIotlbes; 305 /** L2 Cache -Maps [DomainId,Iova] to [IOTLBE]. */372 /** Maps [DomainId,Iova] to [IOTLBE]. */ 306 373 AVLU64TREE TreeIotlbe; 307 374 /** LRU list anchor for IOTLB entries. */ … … 311 378 /** Number of cached IOTLB entries in the tree. */ 312 379 uint32_t cCachedIotlbes; 380 #endif 381 #ifdef IOMMU_WITH_IRTE_CACHE 382 /** Maps [DeviceId] to [IRTE]. */ 383 PIRTECACHE paIrteCache; 384 /** Maximum number of entries in the IRTE cache. */ 385 uint16_t cIrteCache; 386 /** Padding. */ 387 uint16_t auPadding[3]; 313 388 #endif 314 389 … … 465 540 STAMCOUNTER StatIotlbeLazyEvictReuse; /**< Number of IOTLB entries re-used after lazy eviction. */ 466 541 542 STAMPROFILEADV StatProfDteLookup; /**< Profiling of I/O page walk (from memory). */ 467 543 STAMPROFILEADV StatProfIotlbeLookup; /**< Profiling of IOTLB entry lookup (from cache). */ 468 STAMPROFILEADV StatProfDteLookup; /**< Profiling of I/O page walk (from memory). */ 544 545 STAMPROFILEADV StatProfIrteLookup; /**< Profiling of IRTE entry lookup (from memory). */ 546 STAMPROFILEADV StatProfIrteCacheLookup; /**< Profiling of IRTE entry lookup (from cache). */ 469 547 470 548 STAMCOUNTER StatAccessCacheHit; /**< Number of IOTLB cache hits. */ … … 475 553 STAMCOUNTER StatAccessDteNonContig; /**< Number of DTE accesses resulting in non-contiguous access. */ 476 554 STAMCOUNTER StatAccessDtePermDenied; /**< Number of DTE accesses resulting in insufficient permissions. */ 555 556 STAMCOUNTER StatIntrCacheHit; /**< Number of interrupt cache hits. */ 557 STAMCOUNTER StatIntrCacheMiss; /**< Number of interrupt cache misses. */ 477 558 /** @} */ 478 559 #endif … … 486 567 AssertCompileMemberAlignment(IOMMU, hEvtCmdThread, 8); 487 568 AssertCompileMemberAlignment(IOMMU, hMmio, 8); 569 #ifdef IOMMU_WITH_DTE_CACHE 570 AssertCompileMemberAlignment(IOMMU, paDteCache, 8); 571 #endif 488 572 #ifdef IOMMU_WITH_IOTLBE_CACHE 489 AssertCompileMemberAlignment(IOMMU, paDevices, 8);490 573 AssertCompileMemberAlignment(IOMMU, paIotlbes, 8); 491 574 AssertCompileMemberAlignment(IOMMU, TreeIotlbe, 8); 492 575 AssertCompileMemberAlignment(IOMMU, LstLruIotlbe, 8); 576 #endif 577 #ifdef IOMMU_WITH_IRTE_CACHE 578 AssertCompileMemberAlignment(IOMMU, paIrteCache, 8); 493 579 #endif 494 580 AssertCompileMemberAlignment(IOMMU, IommuBar, 8); … … 708 794 709 795 710 #if 0711 /**712 * Gets the number of (unconsumed) commands in the command buffer.713 *714 * @returns The number of commands in the command buffer.715 * @param pThis The IOMMU device state.716 */717 static uint32_t iommuAmdGetCmdBufEntryCount(PIOMMU pThis)718 {719 uint32_t const idxTail = pThis->CmdBufTailPtr.n.off >> IOMMU_CMD_GENERIC_SHIFT;720 uint32_t const idxHead = pThis->CmdBufHeadPtr.n.off >> IOMMU_CMD_GENERIC_SHIFT;721 if (idxTail >= idxHead)722 return idxTail - idxHead;723 724 uint32_t const cMaxCmds = iommuAmdGetBufMaxEntries(pThis->CmdBufBaseAddr.n.u4Len);725 return cMaxCmds - idxHead + idxTail;726 }727 #endif728 729 730 796 /** 731 797 * Checks whether two consecutive I/O page lookup results translates to a physically … … 767 833 if (pDte->n.u1Valid) 768 834 { 769 fFlags |= IOMMU_D EV_F_VALID;835 fFlags |= IOMMU_DTECACHE_F_VALID; 770 836 771 837 if (pDte->n.u1SuppressAllPfEvents) 772 fFlags |= IOMMU_D EV_F_SUPPRESS_ALL_IOPF;838 fFlags |= IOMMU_DTECACHE_F_SUPPRESS_ALL_IOPF; 773 839 if (pDte->n.u1SuppressPfEvents) 774 fFlags |= IOMMU_D EV_F_SUPPRESS_IOPF;840 fFlags |= IOMMU_DTECACHE_F_SUPPRESS_IOPF; 775 841 776 842 uint16_t const fDtePerm = (pDte->au64[0] >> IOMMU_IO_PERM_SHIFT) & IOMMU_IO_PERM_MASK; 777 AssertCompile(IOMMU_D EV_F_IO_PERM_MASK == IOMMU_IO_PERM_MASK);778 fFlags |= fDtePerm << IOMMU_D EV_F_IO_PERM_SHIFT;843 AssertCompile(IOMMU_DTECACHE_F_IO_PERM_MASK == IOMMU_IO_PERM_MASK); 844 fFlags |= fDtePerm << IOMMU_DTECACHE_F_IO_PERM_SHIFT; 779 845 } 780 846 … … 782 848 if (pDte->n.u1IntrMapValid) 783 849 { 784 fFlags |= IOMMU_D EV_F_INTR_MAP_VALID;850 fFlags |= IOMMU_DTECACHE_F_INTR_MAP_VALID; 785 851 if (pDte->n.u1IgnoreUnmappedIntrs) 786 fFlags |= IOMMU_DEV_F_IGNORE_UNMAPPED_INTR; 852 fFlags |= IOMMU_DTECACHE_F_IGNORE_UNMAPPED_INTR; 853 854 uint16_t const fIntrCtrl = IOMMU_GET_INTR_CTRL(pDte); 855 AssertCompile(IOMMU_DTECACHE_F_INTR_CTRL_MASK == IOMMU_DTE_INTR_CTRL_MASK); 856 fFlags |= fIntrCtrl << IOMMU_DTECACHE_F_INTR_CTRL_SHIFT; 787 857 } 788 858 return fFlags; 859 } 860 861 862 /** 863 * Remaps the source MSI to the destination MSI given the IRTE. 864 * 865 * @param pMsiIn The source MSI. 866 * @param pMsiOut Where to store the remapped MSI. 867 * @param pIrte The IRTE used for the remapping. 868 */ 869 static void iommuAmdIrteRemapMsi(PCMSIMSG pMsiIn, PMSIMSG pMsiOut, PCIRTE_T pIrte) 870 { 871 /* Preserve all bits from the source MSI address and data that don't map 1:1 from the IRTE. */ 872 *pMsiOut = *pMsiIn; 873 874 pMsiOut->Addr.n.u1DestMode = pIrte->n.u1DestMode; 875 pMsiOut->Addr.n.u8DestId = pIrte->n.u8Dest; 876 877 pMsiOut->Data.n.u8Vector = pIrte->n.u8Vector; 878 pMsiOut->Data.n.u3DeliveryMode = pIrte->n.u3IntrType; 789 879 } 790 880 … … 823 913 824 914 825 # ifdef IN_RING3915 # ifdef IN_RING3 826 916 /** 827 917 * Dumps the IOTLB entry via the debug info helper. … … 863 953 return VINF_SUCCESS; 864 954 } 865 # endif /* IN_RING3 */955 # endif /* IN_RING3 */ 866 956 867 957 … … 1158 1248 1159 1249 IOMMU_LOCK_CACHE_NORET(pDevIns, pThis); 1160 /** @todo Check level 1cache? */1250 /** @todo Re-check DTE cache? */ 1161 1251 do 1162 1252 { … … 1168 1258 IOMMU_UNLOCK_CACHE(pDevIns, pThis); 1169 1259 } 1170 1171 1260 #endif /* IOMMU_WITH_IOTLBE_CACHE */ 1261 1262 1263 #ifdef IOMMU_WITH_IRTE_CACHE 1264 /** 1265 * Looks up an IRTE cache entry. 1266 * 1267 * @returns Index of the found entry, or cache capacity if not found. 1268 * @param pThis The IOMMU device state. 1269 * @param uDevId The device ID (bus, device, function). 1270 * @param offIrte The offset into the interrupt remap table. 1271 */ 1272 static uint16_t iommuAmdIrteCacheEntryLookup(PCIOMMU pThis, uint16_t uDevId, uint16_t offIrte) 1273 { 1274 /** @todo Consider sorting and binary search when the cache capacity grows. 1275 * For the IRTE cache this should be okay since typically guests do not alter the 1276 * interrupt remapping once programmed, so hopefully sorting shouldn't happen 1277 * often. */ 1278 uint32_t const uKey = IOMMU_IRTE_CACHE_KEY_MAKE(uDevId, offIrte); 1279 uint16_t const cIrteCache = pThis->cIrteCache; 1280 for (uint16_t i = 0; i < cIrteCache; i++) 1281 { 1282 PCIRTECACHE pIrteCache = &pThis->paIrteCache[i]; 1283 if (pIrteCache->uKey == uKey) 1284 return i; 1285 } 1286 return cIrteCache; 1287 } 1288 1289 1290 /** 1291 * Gets an free/unused IRTE cache entry. 1292 * 1293 * @returns The index of an unused entry, or cache capacity if the cache is full. 1294 * @param pThis The IOMMU device state. 1295 */ 1296 static uint16_t iommuAmdIrteCacheEntryGetUnused(PCIOMMU pThis) 1297 { 1298 uint16_t const cIrteCache = pThis->cIrteCache; 1299 for (uint16_t i = 0; i < cIrteCache; i++) 1300 { 1301 PCIRTECACHE pIrteCache = &pThis->paIrteCache[i]; 1302 if (pIrteCache->uKey == IOMMU_IRTE_CACHE_KEY_NIL) 1303 { 1304 Assert(!pIrteCache->Irte.u32); 1305 return i; 1306 } 1307 } 1308 return cIrteCache; 1309 } 1310 1311 1312 /** 1313 * Looks up the IRTE cache for the given MSI. 1314 * 1315 * @returns VBox status code. 1316 * @param pDevIns The IOMMU instance data. 1317 * @param uDevId The device ID (bus, device, function). 1318 * @param enmOp The IOMMU operation being performed. 1319 * @param pMsiIn The source MSI. 1320 * @param pMsiOut Where to store the remapped MSI. 1321 */ 1322 static int iommuAmdIrteCacheLookup(PPDMDEVINS pDevIns, uint16_t uDevId, IOMMUOP enmOp, PCMSIMSG pMsiIn, PMSIMSG pMsiOut) 1323 { 1324 RT_NOREF(enmOp); /* May need it if we have to report errors (currently we fallback to the slower path to do that). */ 1325 1326 int rc = VERR_NOT_FOUND; 1327 /* Deal with such cases in the slower/fallback path. */ 1328 if ((pMsiIn->Addr.u64 & VBOX_MSI_ADDR_ADDR_MASK) == VBOX_MSI_ADDR_BASE) 1329 { /* likely */ } 1330 else 1331 return rc; 1332 1333 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU); 1334 IOMMU_LOCK_CACHE_NORET(pDevIns, pThis); 1335 1336 PCDTECACHE pDteCache = &pThis->paDteCache[uDevId]; 1337 if ((pDteCache->fFlags & (IOMMU_DTECACHE_F_PRESENT | IOMMU_DTECACHE_F_INTR_MAP_VALID)) 1338 == (IOMMU_DTECACHE_F_PRESENT | IOMMU_DTECACHE_F_INTR_MAP_VALID)) 1339 { 1340 Assert((pMsiIn->Addr.u64 & VBOX_MSI_ADDR_ADDR_MASK) == VBOX_MSI_ADDR_BASE); /* Paranoia. */ 1341 1342 /* Currently, we only cache remapping of fixed and arbitrated interrupts. */ 1343 uint8_t const u8DeliveryMode = pMsiIn->Data.n.u3DeliveryMode; 1344 if (u8DeliveryMode <= VBOX_MSI_DELIVERY_MODE_LOWEST_PRIO) 1345 { 1346 uint8_t const uIntrCtrl = (pDteCache->fFlags >> IOMMU_DTECACHE_F_INTR_CTRL_SHIFT) 1347 & IOMMU_DTECACHE_F_INTR_CTRL_MASK; 1348 if (uIntrCtrl == IOMMU_INTR_CTRL_REMAP) 1349 { 1350 /* Interrupt table length has been verified prior to adding entries to the cache. */ 1351 uint16_t const offIrte = IOMMU_GET_IRTE_OFF(pMsiIn->Data.u32); 1352 uint16_t const idxIrteCache = iommuAmdIrteCacheEntryLookup(pThis, uDevId, offIrte); 1353 if (idxIrteCache < pThis->cIrteCache) 1354 { 1355 PCIRTE_T pIrte = &pThis->paIrteCache[idxIrteCache].Irte; 1356 iommuAmdIrteRemapMsi(pMsiIn, pMsiOut, pIrte); 1357 rc = VINF_SUCCESS; 1358 } 1359 } 1360 else if (uIntrCtrl == IOMMU_INTR_CTRL_FWD_UNMAPPED) 1361 { 1362 *pMsiOut = *pMsiIn; 1363 rc = VINF_SUCCESS; 1364 } 1365 } 1366 } 1367 else if (pDteCache->fFlags & IOMMU_DTECACHE_F_PRESENT) 1368 { 1369 *pMsiOut = *pMsiIn; 1370 rc = VINF_SUCCESS; 1371 } 1372 1373 IOMMU_UNLOCK_CACHE(pDevIns, pThis); 1374 return rc; 1375 } 1376 1377 1378 /** 1379 * Adds or updates the IRTE cache for the given IRTE. 1380 * 1381 * @returns VBox status code. 1382 * @retval VERR_OUT_OF_RESOURCES if the cache is full. 1383 * 1384 * @param pDevIns The IOMMU instance data. 1385 * @param uDevId The device ID (bus, device, function). 1386 * @param offIrte The offset into the interrupt remap table. 1387 * @param pIrte The IRTE to cache. 1388 */ 1389 static int iommuAmdIrteCacheAdd(PPDMDEVINS pDevIns, uint16_t uDevId, uint16_t offIrte, PCIRTE_T pIrte) 1390 { 1391 Assert(offIrte != 0xffff); /* Shouldn't be a valid IRTE table offset since sizeof(IRTE) is a multiple of 4. */ 1392 1393 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU); 1394 IOMMU_LOCK_CACHE_NORET(pDevIns, pThis); 1395 1396 /* Find an existing entry or get an unused slot. */ 1397 uint16_t const cIrteCache = pThis->cIrteCache; 1398 uint16_t idxIrteCache = iommuAmdIrteCacheEntryLookup(pThis, uDevId, offIrte); 1399 if (idxIrteCache == pThis->cIrteCache) 1400 idxIrteCache = iommuAmdIrteCacheEntryGetUnused(pThis); 1401 1402 /* Update the cache entry. */ 1403 int rc; 1404 if (idxIrteCache < cIrteCache) 1405 { 1406 PIRTECACHE pIrteCache = &pThis->paIrteCache[idxIrteCache]; 1407 pIrteCache->uKey = IOMMU_IRTE_CACHE_KEY_MAKE(uDevId, offIrte); 1408 pIrteCache->Irte.u32 = pIrte->u32; 1409 rc = VINF_SUCCESS; 1410 } 1411 else 1412 rc = VERR_OUT_OF_RESOURCES; 1413 1414 IOMMU_UNLOCK_CACHE(pDevIns, pThis); 1415 return rc; 1416 } 1417 1418 1419 /** 1420 * Removes IRTE cache entries for the given device ID. 1421 * 1422 * @param pDevIns The IOMMU instance data. 1423 * @param uDevId The device ID (bus, device, function). 1424 */ 1425 static void iommuAmdIrteCacheRemove(PPDMDEVINS pDevIns, uint16_t uDevId) 1426 { 1427 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU); 1428 IOMMU_LOCK_CACHE_NORET(pDevIns, pThis); 1429 uint16_t const cIrteCache = pThis->cIrteCache; 1430 for (uint16_t i = 0; i < cIrteCache; i++) 1431 { 1432 PIRTECACHE pIrteCache = &pThis->paIrteCache[i]; 1433 if (uDevId == IOMMU_IRTE_CACHE_KEY_GET_DEVICE_ID(pIrteCache->uKey)) 1434 { 1435 pIrteCache->uKey = IOMMU_IRTE_CACHE_KEY_NIL; 1436 pIrteCache->Irte.u32 = 0; 1437 } 1438 } 1439 IOMMU_UNLOCK_CACHE(pDevIns, pThis); 1440 } 1441 1442 1443 /** 1444 * Removes all IRTE cache entries. 1445 * 1446 * @param pDevIns The IOMMU instance data. 1447 */ 1448 static void iommuAmdIrteCacheRemoveAll(PPDMDEVINS pDevIns) 1449 { 1450 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU); 1451 IOMMU_LOCK_CACHE_NORET(pDevIns, pThis); 1452 uint16_t const cIrteCache = pThis->cIrteCache; 1453 for (uint16_t i = 0; i < cIrteCache; i++) 1454 { 1455 PIRTECACHE pIrteCache = &pThis->paIrteCache[i]; 1456 pIrteCache->uKey = IOMMU_IRTE_CACHE_KEY_NIL; 1457 pIrteCache->Irte.u32 = 0; 1458 } 1459 IOMMU_UNLOCK_CACHE(pDevIns, pThis); 1460 } 1461 #endif /* IOMMU_WITH_IRTE_CACHE */ 1462 1463 1464 #ifdef IOMMU_WITH_DTE_CACHE 1172 1465 /** 1173 1466 * Updates the I/O device flags for the given device ID. … … 1178 1471 * 0. 1179 1472 * @param fOrMask The device flags (usually compound flags) to OR in with the 1180 * basic flags, see IOMMU_D EV_F_XXX. Pass 0 to flush the DTE1473 * basic flags, see IOMMU_DTECACHE_F_XXX. Pass 0 to flush the DTE 1181 1474 * from the cache. 1182 1475 */ … … 1186 1479 IOMMU_LOCK_CACHE_NORET(pDevIns, pThis); 1187 1480 1188 if (fOrMask & IOMMU_D EV_F_PRESENT)1481 if (fOrMask & IOMMU_DTECACHE_F_PRESENT) 1189 1482 { 1190 1483 Assert(pDte); 1191 pThis->paD evices[uDevId].fFlags = iommuAmdGetBasicDevFlags(pDte) | fOrMask;1192 pThis->paD evices[uDevId].uDomainId = pDte->n.u16DomainId;1484 pThis->paDteCache[uDevId].fFlags = iommuAmdGetBasicDevFlags(pDte) | fOrMask; 1485 pThis->paDteCache[uDevId].uDomainId = pDte->n.u16DomainId; 1193 1486 } 1194 1487 else 1195 1488 { 1196 pThis->paD evices[uDevId].fFlags = 0;1197 pThis->paD evices[uDevId].uDomainId = 0;1489 pThis->paDteCache[uDevId].fFlags = 0; 1490 pThis->paDteCache[uDevId].uDomainId = 0; 1198 1491 } 1199 1492 … … 1214 1507 IOMMU_LOCK_CACHE_NORET(pDevIns, pThis); 1215 1508 1216 if (fDevIoFlags & IOMMU_D EV_F_PRESENT)1217 pThis->paD evices[uDevId].fFlags |= fDevIoFlags;1509 if (fDevIoFlags & IOMMU_DTECACHE_F_PRESENT) 1510 pThis->paDteCache[uDevId].fFlags |= fDevIoFlags; 1218 1511 1219 1512 IOMMU_UNLOCK_CACHE(pDevIns, pThis); … … 1231 1524 IOMMU_LOCK_CACHE_NORET(pDevIns, pThis); 1232 1525 1233 size_t const cbD evices = sizeof(IODEVICE) * IOMMU_DTE_CACHE_MAX;1234 RT_BZERO(pThis->paD evices, cbDevices);1526 size_t const cbDteCache = sizeof(DTECACHE) * IOMMU_DTE_CACHE_MAX; 1527 RT_BZERO(pThis->paDteCache, cbDteCache); 1235 1528 1236 1529 IOMMU_UNLOCK_CACHE(pDevIns, pThis); 1237 1530 } 1238 #endif /* IOMMU_WITH_IOTLBE_CACHE */ 1239 1240 1531 #endif /* IOMMU_WITH_DTE_CACHE */ 1532 1533 1534 /** 1535 * Atomically reads the control register without locking the IOMMU device. 1536 * 1537 * @returns The control register. 1538 * @param pThis The IOMMU device state. 1539 */ 1241 1540 DECL_FORCE_INLINE(IOMMU_CTRL_T) iommuAmdGetCtrlUnlocked(PCIOMMU pThis) 1242 1541 { … … 2888 3187 * 2889 3188 * @param pDevIns The IOMMU instance data. 2890 * @param fIoDevFlags The I/O device flags, see IOMMU_D EV_F_XXX.3189 * @param fIoDevFlags The I/O device flags, see IOMMU_DTECACHE_F_XXX. 2891 3190 * @param pIrte The interrupt remapping table entry, can be NULL. 2892 3191 * @param enmOp The IOMMU operation being performed. … … 2904 3203 #ifdef IOMMU_WITH_IOTLBE_CACHE 2905 3204 # define IOMMU_DTE_CACHE_SET_PF_RAISED(a_pDevIns, a_DevId) iommuAmdDteCacheSetFlags((a_pDevIns), (a_DevId), \ 2906 IOMMU_D EV_F_IO_PAGE_FAULT_RAISED)3205 IOMMU_DTECACHE_F_IO_PAGE_FAULT_RAISED) 2907 3206 #else 2908 3207 # define IOMMU_DTE_CACHE_SET_PF_RAISED(a_pDevIns, a_DevId) do { } while (0) … … 2913 3212 || enmOp == IOMMUOP_MEM_WRITE) 2914 3213 { 2915 uint16_t const fSuppressIopf = IOMMU_DEV_F_VALID | IOMMU_DEV_F_SUPPRESS_IOPF | IOMMU_DEV_F_IO_PAGE_FAULT_RAISED; 2916 uint16_t const fSuppressAllIopf = IOMMU_DEV_F_VALID | IOMMU_DEV_F_SUPPRESS_ALL_IOPF; 3214 uint16_t const fSuppressIopf = IOMMU_DTECACHE_F_VALID 3215 | IOMMU_DTECACHE_F_SUPPRESS_IOPF 3216 | IOMMU_DTECACHE_F_IO_PAGE_FAULT_RAISED; 3217 uint16_t const fSuppressAllIopf = IOMMU_DTECACHE_F_VALID 3218 | IOMMU_DTECACHE_F_SUPPRESS_ALL_IOPF; 2917 3219 if ( (fIoDevFlags & fSuppressAllIopf) == fSuppressAllIopf 2918 3220 || (fIoDevFlags & fSuppressIopf) == fSuppressIopf) … … 2923 3225 else if (enmOp == IOMMUOP_INTR_REQ) 2924 3226 { 2925 uint16_t const fSuppressIopf = IOMMU_DEV_F_VALID | IOMMU_DEV_F_INTR_MAP_VALID | IOMMU_DEV_F_IGNORE_UNMAPPED_INTR; 3227 uint16_t const fSuppressIopf = IOMMU_DTECACHE_F_VALID 3228 | IOMMU_DTECACHE_F_INTR_MAP_VALID 3229 | IOMMU_DTECACHE_F_IGNORE_UNMAPPED_INTR; 2926 3230 if ((fIoDevFlags & fSuppressIopf) == fSuppressIopf) 2927 3231 fSuppressEvtLogging = true; … … 3590 3894 { 3591 3895 /* Update that addresses requires translation (cumulative permissions of DTE and I/O page tables). */ 3592 iommuAmdDteCacheUpdate(pDevIns, uDevId, &Dte, IOMMU_D EV_F_PRESENT | IOMMU_DEV_F_ADDR_TRANSLATE);3896 iommuAmdDteCacheUpdate(pDevIns, uDevId, &Dte, IOMMU_DTECACHE_F_PRESENT | IOMMU_DTECACHE_F_ADDR_TRANSLATE); 3593 3897 /* Update IOTLB for the contiguous range of I/O virtual addresses. */ 3594 3898 iommuAmdIotlbAddRange(pDevIns, Dte.n.u16DomainId, uIova & X86_PAGE_4K_BASE_MASK, cbPages, … … 3609 3913 #if defined(IN_RING3) && defined(IOMMU_WITH_IOTLBE_CACHE) 3610 3914 /* Update that addresses permissions of DTE apply (but omit address translation). */ 3611 iommuAmdDteCacheUpdate(pDevIns, uDevId, &Dte, IOMMU_D EV_F_PRESENT | IOMMU_DEV_F_IO_PERM);3915 iommuAmdDteCacheUpdate(pDevIns, uDevId, &Dte, IOMMU_DTECACHE_F_PRESENT | IOMMU_DTECACHE_F_IO_PERM); 3612 3916 #endif 3613 3917 } … … 3642 3946 #if defined(IN_RING3) && defined(IOMMU_WITH_IOTLBE_CACHE) 3643 3947 /* Update that addresses don't require translation (nor permission checks) but a DTE is present. */ 3644 iommuAmdDteCacheUpdate(pDevIns, uDevId, &Dte, IOMMU_D EV_F_PRESENT);3948 iommuAmdDteCacheUpdate(pDevIns, uDevId, &Dte, IOMMU_DTECACHE_F_PRESENT); 3645 3949 #endif 3646 3950 } … … 3732 4036 3733 4037 /* 3734 * We hold the cache lock across both the deviceand the IOTLB lookups (if any) because3735 * we don't want the devicecache to be invalidate while we perform IOTBL lookups.4038 * We hold the cache lock across both the DTE and the IOTLB lookups (if any) because 4039 * we don't want the DTE cache to be invalidate while we perform IOTBL lookups. 3736 4040 */ 3737 4041 IOMMU_LOCK_CACHE(pDevIns, pThis); 3738 4042 3739 /* Lookup the device from the level 1 cache. */3740 PC IODEVICE pDevice = &pThis->paDevices[uDevId];3741 if ((pD evice->fFlags & (IOMMU_DEV_F_PRESENT | IOMMU_DEV_F_VALID | IOMMU_DEV_F_ADDR_TRANSLATE))3742 == (IOMMU_DEV_F_PRESENT | IOMMU_DEV_F_VALID | IOMMU_DEV_F_ADDR_TRANSLATE))3743 { 3744 /* Lookup the IOTLB entries from the level 2 cache. */4043 /* Lookup the DTE cache entry. */ 4044 PCDTECACHE pDteCache = &pThis->paDteCache[uDevId]; 4045 if ((pDteCache->fFlags & (IOMMU_DTECACHE_F_PRESENT | IOMMU_DTECACHE_F_VALID | IOMMU_DTECACHE_F_ADDR_TRANSLATE)) 4046 == (IOMMU_DTECACHE_F_PRESENT | IOMMU_DTECACHE_F_VALID | IOMMU_DTECACHE_F_ADDR_TRANSLATE)) 4047 { 4048 /* Lookup IOTLB entries. */ 3745 4049 IOADDRRANGE AddrIn; 3746 4050 AddrIn.uAddr = uIova; … … 3752 4056 Aux.pDte = NULL; 3753 4057 Aux.uDeviceId = uDevId; 3754 Aux.uDomainId = pD evice->uDomainId;4058 Aux.uDomainId = pDteCache->uDomainId; 3755 4059 3756 4060 IOADDRRANGE AddrOut; … … 3760 4064 *pcbContiguous = AddrOut.cb; 3761 4065 } 3762 else if ((pD evice->fFlags & (IOMMU_DEV_F_PRESENT | IOMMU_DEV_F_VALID | IOMMU_DEV_F_IO_PERM))3763 == (IOMMU_DEV_F_PRESENT | IOMMU_DEV_F_VALID | IOMMU_DEV_F_IO_PERM))4066 else if ((pDteCache->fFlags & (IOMMU_DTECACHE_F_PRESENT | IOMMU_DTECACHE_F_VALID | IOMMU_DTECACHE_F_IO_PERM)) 4067 == (IOMMU_DTECACHE_F_PRESENT | IOMMU_DTECACHE_F_VALID | IOMMU_DTECACHE_F_IO_PERM)) 3764 4068 { 3765 4069 /* Address translation is disabled, but DTE permissions apply. */ 3766 Assert(!(pD evice->fFlags & IOMMU_DEV_F_ADDR_TRANSLATE));3767 uint8_t const fDtePerm = (pD evice->fFlags >> IOMMU_DEV_F_IO_PERM_SHIFT) & IOMMU_DEV_F_IO_PERM_MASK;4070 Assert(!(pDteCache->fFlags & IOMMU_DTECACHE_F_ADDR_TRANSLATE)); 4071 uint8_t const fDtePerm = (pDteCache->fFlags >> IOMMU_DTECACHE_F_IO_PERM_SHIFT) & IOMMU_DTECACHE_F_IO_PERM_MASK; 3768 4072 if ((fDtePerm & fPerm) == fPerm) 3769 4073 { … … 3779 4083 } 3780 4084 } 3781 else if (pD evice->fFlags & IOMMU_DEV_F_PRESENT)4085 else if (pDteCache->fFlags & IOMMU_DTECACHE_F_PRESENT) 3782 4086 { 3783 4087 /* Forward addresses untranslated, without checking permissions. */ … … 3799 4103 { 3800 4104 EVT_IO_PAGE_FAULT_T EvtIoPageFault; 3801 iommuAmdIoPageFaultEventInit(uDevId, pD evice->uDomainId, uIova, true /* fPresent */,4105 iommuAmdIoPageFaultEventInit(uDevId, pDteCache->uDomainId, uIova, true /* fPresent */, 3802 4106 false /* fRsvdNotZero */, true /* fPermDenied */, enmOp, &EvtIoPageFault); 3803 iommuAmdIoPageFaultEventRaise(pDevIns, pD evice->fFlags, NULL /* pIrte */, enmOp, &EvtIoPageFault,4107 iommuAmdIoPageFaultEventRaise(pDevIns, pDteCache->fFlags, NULL /* pIrte */, enmOp, &EvtIoPageFault, 3804 4108 kIoPageFaultType_PermDenied); 3805 4109 } … … 3807 4111 return rc; 3808 4112 } 3809 #endif /* I OMMU_WITH_IOTLBE_CACHE */4113 #endif /* IN_RING3 && IOMMU_WITH_IOTLBE_CACHE */ 3810 4114 3811 4115 … … 4009 4313 RTGCPHYS const GCPhysIntrTable = pDte->au64[2] & IOMMU_DTE_IRTE_ROOT_PTR_MASK; 4010 4314 uint16_t const cbIntrTable = IOMMU_GET_INTR_TAB_LEN(pDte); 4011 uint16_t const offIrte = (uDataIn & IOMMU_MSI_DATA_IRTE_OFFSET_MASK) * sizeof(IRTE_T);4315 uint16_t const offIrte = IOMMU_GET_IRTE_OFF(uDataIn); 4012 4316 RTGCPHYS const GCPhysIrte = GCPhysIntrTable + offIrte; 4013 4317 … … 4062 4366 4063 4367 IRTE_T Irte; 4064 int rc = iommuAmdIrteRead(pDevIns, uDevId, pDte, pMsiIn->Addr.u64, pMsiIn->Data.u32, enmOp, &Irte); 4368 uint32_t const uMsiInData = pMsiIn->Data.u32; 4369 int rc = iommuAmdIrteRead(pDevIns, uDevId, pDte, pMsiIn->Addr.u64, uMsiInData, enmOp, &Irte); 4065 4370 if (RT_SUCCESS(rc)) 4066 4371 { … … 4071 4376 if (Irte.n.u3IntrType <= VBOX_MSI_DELIVERY_MODE_LOWEST_PRIO) 4072 4377 { 4073 /* Preserve all bits from the source MSI address and data that don't map 1:1 from the IRTE. */4074 *pMsiOut = *pMsiIn; 4075 4076 pMsiOut->Addr.n.u1DestMode = Irte.n.u1DestMode;4077 pMsiOut->Addr.n.u8DestId = Irte.n.u8Dest;4078 4079 pMsiOut->Data.n.u8Vector = Irte.n.u8Vector;4080 pMsiOut->Data.n.u3DeliveryMode = Irte.n.u3IntrType; 4378 iommuAmdIrteRemapMsi(pMsiIn, pMsiOut, &Irte); 4379 #ifdef IOMMU_WITH_IRTE_CACHE 4380 /* Add/Update the interrupt cache with the remapped results. */ 4381 uint16_t const offIrte = IOMMU_GET_IRTE_OFF(uMsiInData); 4382 int const rcUpdate = iommuAmdIrteCacheAdd(pDevIns, uDevId, offIrte, &Irte); 4383 if (RT_FAILURE(rcUpdate)) 4384 LogRelMax(1, ("%s: Warning! Interrupt cache full. Consider increasing cache capacity.\n", IOMMU_LOG_PFX)); 4385 #endif 4081 4386 return VINF_SUCCESS; 4082 4387 } … … 4152 4457 return VERR_IOMMU_INTR_REMAP_FAILED; 4153 4458 } 4459 4460 #ifdef IOMMU_WITH_IRTE_CACHE 4461 /* Update the DTE cache -after- we've checked reserved bits (above) when the interrupt map is valid. */ 4462 iommuAmdDteCacheUpdate(pDevIns, uDevId, &Dte, IOMMU_DTECACHE_F_PRESENT); 4463 #endif 4154 4464 4155 4465 /* … … 4268 4578 else 4269 4579 { 4580 /** @todo should be cause a PCI target abort here? */ 4270 4581 LogFunc(("MSI address region invalid %#RX64\n", pMsiIn->Addr.u64)); 4271 4582 return VERR_IOMMU_INTR_REMAP_FAILED; … … 4274 4585 else 4275 4586 { 4276 /** @todo IOMMU: Add to interrupt remapping cache. */ 4587 #ifdef IOMMU_WITH_IRTE_CACHE 4588 /* Update the DTE cache that the interrupt map isn't valid. */ 4589 iommuAmdDteCacheUpdate(pDevIns, uDevId, &Dte, IOMMU_DTECACHE_F_PRESENT); 4590 #endif 4277 4591 LogFlowFunc(("DTE interrupt map not valid\n")); 4278 4592 *pMsiOut = *pMsiIn; … … 4309 4623 { 4310 4624 STAM_COUNTER_INC(&pThis->CTX_SUFF_Z(StatMsiRemap)); 4311 /** @todo Cache? */ 4312 4313 return iommuAmdIntrTableLookup(pDevIns, uDevId, IOMMUOP_INTR_REQ, pMsiIn, pMsiOut); 4625 4626 int rc; 4627 #ifdef IOMMU_WITH_IRTE_CACHE 4628 STAM_PROFILE_ADV_START(&pThis->StatProfIrteCacheLookup, a); 4629 rc = iommuAmdIrteCacheLookup(pDevIns, uDevId, IOMMUOP_INTR_REQ, pMsiIn, pMsiOut); 4630 STAM_PROFILE_ADV_STOP(&pThis->StatProfIrteCacheLookup, a); 4631 if (RT_SUCCESS(rc)) 4632 { 4633 STAM_COUNTER_INC(&pThis->StatIntrCacheHit); 4634 return VINF_SUCCESS; 4635 } 4636 STAM_COUNTER_INC(&pThis->StatIntrCacheMiss); 4637 #endif 4638 4639 STAM_PROFILE_ADV_START(&pThis->StatProfIrteLookup, a); 4640 rc = iommuAmdIntrTableLookup(pDevIns, uDevId, IOMMUOP_INTR_REQ, pMsiIn, pMsiOut); 4641 STAM_PROFILE_ADV_STOP(&pThis->StatProfIrteLookup, a); 4642 return rc; 4314 4643 } 4315 4644 … … 4527 4856 case IOMMU_CMD_INV_INTR_TABLE: 4528 4857 { 4529 /** @todo IOMMU: Implement this once we implement IOTLB. Pretend success until4530 * then. */4531 4858 STAM_COUNTER_INC(&pThis->StatCmdInvIntrTable); 4532 return VINF_SUCCESS; 4859 4860 PCCMD_INV_INTR_TABLE_T pCmdInvIntrTable = (PCCMD_INV_INTR_TABLE_T)pCmd; 4861 AssertCompile(sizeof(*pCmdInvIntrTable) == sizeof(*pCmd)); 4862 4863 /* Validate reserved bits in the command. */ 4864 if ( !(pCmdInvIntrTable->au64[0] & ~IOMMU_CMD_INV_INTR_TABLE_QWORD_0_VALID_MASK) 4865 && !(pCmdInvIntrTable->au64[1] & ~IOMMU_CMD_INV_INTR_TABLE_QWORD_1_VALID_MASK)) 4866 { 4867 #ifdef IOMMU_WITH_IRTE_CACHE 4868 iommuAmdIrteCacheRemove(pDevIns, pCmdInvIntrTable->u.u16DevId); 4869 #endif 4870 return VINF_SUCCESS; 4871 } 4872 iommuAmdIllegalCmdEventInit(GCPhysCmd, (PEVT_ILLEGAL_CMD_ERR_T)pEvtError); 4873 return VERR_IOMMU_CMD_INVALID_FORMAT; 4533 4874 } 4534 4875 … … 4570 4911 } 4571 4912 iommuAmdIllegalCmdEventInit(GCPhysCmd, (PEVT_ILLEGAL_CMD_ERR_T)pEvtError); 4572 return VERR_IOMMU_CMD_ NOT_SUPPORTED;4913 return VERR_IOMMU_CMD_INVALID_FORMAT; 4573 4914 #else 4574 4915 return VINF_SUCCESS; … … 5524 5865 5525 5866 5526 # if defined(IN_RING3) && defined(IOMMU_WITH_IOTLBE_CACHE)5867 # ifdef IOMMU_WITH_IOTLBE_CACHE 5527 5868 /** 5528 5869 * @callback_method_impl{FNDBGFHANDLERDEV} … … 5552 5893 else 5553 5894 pHlp->pfnPrintf(pHlp, "Missing domain ID.\n"); 5895 } 5896 # endif 5897 5898 5899 #ifdef IOMMU_WITH_IRTE_CACHE 5900 /** 5901 * Gets the interrupt type name for an interrupt type in the IRTE. 5902 * 5903 * @returns The interrupt type name. 5904 * @param uIntrType The interrupt type (as specified in the IRTE). 5905 */ 5906 static const char *iommuAmdIrteGetIntrTypeName(uint8_t uIntrType) 5907 { 5908 switch (uIntrType) 5909 { 5910 case VBOX_MSI_DELIVERY_MODE_FIXED: return "Fixed"; 5911 case VBOX_MSI_DELIVERY_MODE_LOWEST_PRIO: return "Arbitrated"; 5912 default: return "<Reserved>"; 5913 } 5914 } 5915 5916 /** 5917 * @callback_method_impl{FNDBGFHANDLERDEV} 5918 */ 5919 static DECLCALLBACK(void) iommuAmdR3DbgInfoIrtes(PPDMDEVINS pDevIns, PCDBGFINFOHLP pHlp, const char *pszArgs) 5920 { 5921 RT_NOREF(pszArgs); 5922 5923 PIOMMU pThis = PDMDEVINS_2_DATA(pDevIns, PIOMMU); 5924 IOMMU_LOCK_CACHE_NORET(pDevIns, pThis); 5925 5926 uint16_t const cIrteCache = pThis->cIrteCache; 5927 pHlp->pfnPrintf(pHlp, "IRTE Cache: Capacity=%u entries\n", cIrteCache); 5928 for (uint16_t idxIrte = 0; idxIrte < cIrteCache; idxIrte++) 5929 { 5930 PCIRTECACHE pIrteCache = &pThis->paIrteCache[idxIrte]; 5931 uint32_t const uKey = pIrteCache->uKey; 5932 if (uKey != IOMMU_IRTE_CACHE_KEY_NIL) 5933 { 5934 uint16_t const uDeviceId = IOMMU_IRTE_CACHE_KEY_GET_DEVICE_ID(uKey); 5935 uint16_t const offIrte = IOMMU_IRTE_CACHE_KEY_GET_OFF(uKey); 5936 pHlp->pfnPrintf(pHlp, " Entry[%u]: Offset=%#x Device=%#x (BDF %02x:%02x.%d)\n", 5937 idxIrte, offIrte, uDeviceId, 5938 (uDeviceId >> VBOX_PCI_BUS_SHIFT) & VBOX_PCI_BUS_MASK, 5939 (uDeviceId >> VBOX_PCI_DEVFN_DEV_SHIFT) & VBOX_PCI_DEVFN_DEV_MASK, 5940 uDeviceId & VBOX_PCI_DEVFN_FUN_MASK); 5941 5942 PCIRTE_T pIrte = &pIrteCache->Irte; 5943 pHlp->pfnPrintf(pHlp, " Remap Enable = %RTbool\n", pIrte->n.u1RemapEnable); 5944 pHlp->pfnPrintf(pHlp, " Suppress IOPF = %RTbool\n", pIrte->n.u1SuppressIoPf); 5945 pHlp->pfnPrintf(pHlp, " Interrupt Type = %#x (%s)\n", pIrte->n.u3IntrType, 5946 iommuAmdIrteGetIntrTypeName(pIrte->n.u3IntrType)); 5947 pHlp->pfnPrintf(pHlp, " Request EOI = %RTbool\n", pIrte->n.u1ReqEoi); 5948 pHlp->pfnPrintf(pHlp, " Destination mode = %s\n", pIrte->n.u1DestMode ? "Logical" : "Physical"); 5949 pHlp->pfnPrintf(pHlp, " Destination Id = %u\n", pIrte->n.u8Dest); 5950 pHlp->pfnPrintf(pHlp, " Vector = %#x (%u)\n", pIrte->n.u8Vector, pIrte->n.u8Vector); 5951 pHlp->pfnPrintf(pHlp, "\n"); 5952 } 5953 } 5954 IOMMU_UNLOCK_CACHE(pDevIns, pThis); 5554 5955 } 5555 5956 #endif … … 5747 6148 IOMMU_UNLOCK(pDevIns, pThisCC); 5748 6149 6150 #ifdef IOMMU_WITH_DTE_CACHE 6151 iommuAmdDteCacheRemoveAll(pDevIns); 6152 #endif 5749 6153 #ifdef IOMMU_WITH_IOTLBE_CACHE 5750 iommuAmdDteCacheRemoveAll(pDevIns);5751 6154 iommuAmdIotlbRemoveAll(pDevIns); 6155 #endif 6156 #ifdef IOMMU_WITH_IRTE_CACHE 6157 iommuAmdIrteCacheRemoveAll(pDevIns); 5752 6158 #endif 5753 6159 } … … 5773 6179 } 5774 6180 6181 #ifdef IOMMU_WITH_DTE_CACHE 6182 /* Destroy the DTE cache. */ 6183 if (pThis->paDteCache) 6184 { 6185 PDMDevHlpMMHeapFree(pDevIns, pThis->paDteCache); 6186 pThis->paDteCache = NULL; 6187 } 6188 #endif 6189 5775 6190 #ifdef IOMMU_WITH_IOTLBE_CACHE 5776 /* Destroy level 1 cache. */ 5777 if (pThis->paDevices) 5778 { 5779 PDMDevHlpMMHeapFree(pDevIns, pThis->paDevices); 5780 pThis->paDevices = NULL; 5781 } 5782 5783 /* Destroy level 2 cache. */ 6191 /* Destroy the IOTLB cache. */ 5784 6192 if (pThis->paIotlbes) 5785 6193 { 5786 6194 PDMDevHlpMMHeapFree(pDevIns, pThis->paIotlbes); 5787 6195 pThis->paIotlbes = NULL; 6196 pThis->idxUnusedIotlbe = 0; 6197 } 6198 #endif 6199 6200 #ifdef IOMMU_WITH_IRTE_CACHE 6201 /* Destroy the interrupt cache. */ 6202 if (pThis->paIrteCache) 6203 { 6204 PDMDevHlpMMHeapFree(pDevIns, pThis->paIrteCache); 6205 pThis->paIrteCache = NULL; 5788 6206 } 5789 6207 #endif … … 5972 6390 PDMDevHlpDBGFInfoRegister(pDevIns, "iommutlb", "Display IOTLBs for a domain. Arguments: DomainID.", iommuAmdR3DbgInfoIotlb); 5973 6391 #endif 6392 #ifdef IOMMU_WITH_IRTE_CACHE 6393 PDMDevHlpDBGFInfoRegister(pDevIns, "iommuirtes", "Display the IRTE cache.", iommuAmdR3DbgInfoIrtes); 6394 #endif 5974 6395 5975 6396 # ifdef VBOX_WITH_STATISTICS … … 6015 6436 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatProfIotlbeLookup, STAMTYPE_PROFILE, "Profile/IotlbeLookup", STAMUNIT_TICKS_PER_CALL, "Profiling IOTLBE lookup."); 6016 6437 6438 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatProfIrteLookup, STAMTYPE_PROFILE, "Profile/IrteLookup", STAMUNIT_TICKS_PER_CALL, "Profiling IRTE lookup."); 6439 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatProfIrteCacheLookup, STAMTYPE_PROFILE, "Profile/IrteCacheLookup", STAMUNIT_TICKS_PER_CALL, "Profiling IRTE cache lookup."); 6440 6017 6441 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatAccessCacheHit, STAMTYPE_COUNTER, "Access/CacheHit", STAMUNIT_OCCURENCES, "Number of cache hits."); 6018 6442 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatAccessCacheMiss, STAMTYPE_COUNTER, "Access/CacheMiss", STAMUNIT_OCCURENCES, "Number of cache misses."); … … 6022 6446 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatAccessDteNonContig, STAMTYPE_COUNTER, "Access/DteNonContig", STAMUNIT_OCCURENCES, "Number of DTE accesses that resulted in non-contiguous translated regions."); 6023 6447 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatAccessDtePermDenied, STAMTYPE_COUNTER, "Access/DtePermDenied", STAMUNIT_OCCURENCES, "Number of DTE accesses that resulted in denied permissions."); 6448 6449 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatIntrCacheHit, STAMTYPE_COUNTER, "Intr/CacheHit", STAMUNIT_OCCURENCES, "Number of cache hits."); 6450 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatIntrCacheMiss, STAMTYPE_COUNTER, "Intr/CacheMiss", STAMUNIT_OCCURENCES, "Number of cache misses."); 6024 6451 # endif 6025 6452 … … 6037 6464 AssertLogRelRCReturn(rc, rc); 6038 6465 6039 #ifdef IOMMU_WITH_ IOTLBE_CACHE6466 #ifdef IOMMU_WITH_DTE_CACHE 6040 6467 /* 6041 6468 * Initialize the critsect of the cache. … … 6045 6472 6046 6473 /* 6047 * Allocate the level 1 cache (device ID to domain ID mapping).6048 * PCI devices are hotpluggable , pluswe don't have a way of querying the bus for all6474 * Allocate the device table entry cache. 6475 * PCI devices are hotpluggable and we don't have a way of querying the bus for all 6049 6476 * assigned PCI BDF slots. So while this wastes some memory, it should work regardless 6050 * of how code, features and devices around the IOMMU change s.6477 * of how code, features and devices around the IOMMU change. 6051 6478 */ 6052 size_t const cbDevices = sizeof(IODEVICE) * IOMMU_DTE_CACHE_MAX; 6479 size_t cbCache = 0; 6480 size_t const cbDteCache = sizeof(DTECACHE) * IOMMU_DTE_CACHE_MAX; 6053 6481 AssertCompile(IOMMU_DTE_CACHE_MAX >= UINT16_MAX); 6054 pThis->paDevices = (PIODEVICE)PDMDevHlpMMHeapAllocZ(pDevIns, cbDevices); 6055 if (!pThis->paDevices) 6056 { 6482 pThis->paDteCache = (PDTECACHE)PDMDevHlpMMHeapAllocZ(pDevIns, cbDteCache); 6483 if (!pThis->paDteCache) 6057 6484 return PDMDevHlpVMSetError(pDevIns, VERR_NO_MEMORY, RT_SRC_POS, 6058 N_("Failed to allocate %zu bytes from the hyperheap for the IOMMU level 1 cache."), cbDevices); 6059 } 6060 6485 N_("Failed to allocate %zu bytes from the hyperheap for the DTE cache."), cbDteCache); 6486 cbCache += cbDteCache; 6487 #endif 6488 6489 #ifdef IOMMU_WITH_IOTLBE_CACHE 6061 6490 /* 6062 * Allocate the level 2 cache (IOTLB entries).6491 * Allocate IOTLB entries. 6063 6492 * This is allocated upfront since we expect a relatively small number of entries, 6064 6493 * is more cache-line efficient and easier to track least recently used entries for 6065 * eviction when the cache is full. This also prevents unpredictable behavior during 6066 * the lifetime of the VM if the hyperheap gets full as allocation would fail upfront 6067 * or not at all. 6494 * eviction when the cache is full. This also avoids unpredictable behavior during 6495 * the lifetime of the VM if the hyperheap gets full. 6068 6496 */ 6069 6497 size_t const cbIotlbes = sizeof(IOTLBE) * IOMMU_IOTLBE_MAX; 6070 6498 pThis->paIotlbes = (PIOTLBE)PDMDevHlpMMHeapAllocZ(pDevIns, cbIotlbes); 6071 6499 if (!pThis->paIotlbes) 6072 {6073 6500 return PDMDevHlpVMSetError(pDevIns, VERR_NO_MEMORY, RT_SRC_POS, 6074 N_("Failed to allocate %zu bytes from the hyperheap for the IOMMU level 2 cache."), 6075 cbIotlbes); 6076 } 6501 N_("Failed to allocate %zu bytes from the hyperheap for the IOTLB cache."), cbIotlbes); 6077 6502 RTListInit(&pThis->LstLruIotlbe); 6078 6079 LogRel(("%s: Allocated %zu bytes from the hyperheap for the IOTLB cache\n", IOMMU_LOG_PFX, cbDevices + cbIotlbes)); 6503 cbCache += cbIotlbes; 6504 #endif 6505 6506 #ifdef IOMMU_WITH_IRTE_CACHE 6507 /* Maximum number of elements in the IRTE cache. */ 6508 PCPDMDEVHLPR3 pHlp = pDevIns->pHlpR3; 6509 rc = pHlp->pfnCFGMQueryU16Def(pCfg, "InterruptCacheCount", &pThis->cIrteCache, IOMMU_IRTE_CACHE_DEFAULT); 6510 if (RT_FAILURE(rc)) 6511 return PDMDevHlpVMSetError(pDevIns, rc, RT_SRC_POS, N_("IOMMU: failed to read InterruptCacheCount as integer")); 6512 AssertCompile(IOMMU_IRTE_CACHE_DEFAULT >= IOMMU_IRTE_CACHE_MIN); 6513 AssertCompile(IOMMU_IRTE_CACHE_DEFAULT <= IOMMU_IRTE_CACHE_MAX); 6514 if ( pThis->cIrteCache < IOMMU_IRTE_CACHE_MIN 6515 || pThis->cIrteCache > IOMMU_IRTE_CACHE_MAX) 6516 return PDMDevHlpVMSetError(pDevIns, VERR_INVALID_PARAMETER, RT_SRC_POS, 6517 N_("IOMMU: InterruptCacheCount invalid (must be between %u and %u)."), 6518 IOMMU_IRTE_CACHE_MIN, IOMMU_IRTE_CACHE_MAX); 6519 6520 /* 6521 * Allocate the interrupt remapping cache. 6522 * This is an array of devices and their corresponding interrupt remap table entries. 6523 * Typically only a handful of PCI devices are used in VMs so this is kept rather small. 6524 * If we ever need to support a vast number of interrupt-remapped devices, we can 6525 * implement a more sophisticated cache solution then. 6526 * 6527 * NOTE: IRTE cache entry keys are initialized later in this function by calling 6528 * iommuAmdR3Reset() -> iommuAmdIrteCacheRemoveAll(). 6529 */ 6530 size_t const cbIrteCache = sizeof(IRTECACHE) * pThis->cIrteCache; 6531 pThis->paIrteCache = (PIRTECACHE)PDMDevHlpMMHeapAllocZ(pDevIns, cbIrteCache); 6532 if (!pThis->paIrteCache) 6533 return PDMDevHlpVMSetError(pDevIns, VERR_NO_MEMORY, RT_SRC_POS, 6534 N_("Failed to allocate %zu bytes from the hyperheap for the interrupt cache."), cbIrteCache); 6535 cbCache += cbIrteCache; 6536 #endif 6537 6538 #ifdef IOMMU_WITH_DTE_CACHE 6539 LogRel(("%s: Allocated %zu bytes from the hyperheap for the IOMMU cache\n", IOMMU_LOG_PFX, cbCache)); 6080 6540 #endif 6081 6541 … … 6141 6601 /* 6142 6602 * Initialize parts of the IOMMU state as it would during reset. 6603 * Also initializes non-zero initial values like IRTE cache keys. 6143 6604 * Must be called -after- initializing PCI config. space registers. 6144 6605 */
Note:
See TracChangeset
for help on using the changeset viewer.

