VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/GICAll.cpp@ 109206

Last change on this file since 109206 was 109206, checked in by vboxsync, 3 weeks ago

VMM/GIC: bugref:10877 GITS command-queue, work-in-progress.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 145.6 KB
Line 
1/* $Id: GICAll.cpp 109206 2025-05-08 12:02:48Z vboxsync $ */
2/** @file
3 * GIC - Generic Interrupt Controller Architecture (GIC) - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2023-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28/** @page pg_gic GIC - Generic Interrupt Controller
29 *
30 * The GIC is an interrupt controller device that lives in VMM but also registers
31 * itself with PDM similar to the APIC. The reason for this is needs to access
32 * per-VCPU data and is an integral part of any ARMv8 VM.
33 *
34 * The GIC is made up of 3 main components:
35 * - Distributor
36 * - Redistributor
37 * - Interrupt Translation Service (ITS)
38 *
39 * The distributor is per-VM while the redistributors are per-VCPU. PEs (Processing
40 * Elements) and CIs (CPU Interfaces) correspond to VCPUs. The distributor and
41 * redistributor each have their memory mapped I/O regions. The redistributor is
42 * accessible via CPU system registers as well. The distributor and redistributor
43 * code lives in GICAll.cpp and GICR3.cpp.
44 *
45 * The ITS is the interrupt translation service component of the GIC and its
46 * presence is optional. It provides MSI support along with routing interrupt
47 * sources to specific PEs. The ITS is only accessible via its memory mapped I/O
48 * region. When the MMIO handle for the its region is NIL_IOMMMIOHANDLE it's
49 * considered to be disabled for the VM. Most of the ITS code lives in GITSAll.cpp.
50 *
51 * This implementation only targets GICv3. This implementation does not support
52 * dual security states, nor does it support exception levels (EL2, EL3). Earlier
53 * versions are considered legacy and not important enough to be emulated.
54 * GICv4 primarily adds support for virtualizing the GIC and its necessity will be
55 * evaluated in the future if/when there is support for nested virtualization on
56 * ARMv8 hosts.
57 */
58
59
60/*********************************************************************************************************************************
61* Header Files *
62*********************************************************************************************************************************/
63#define LOG_GROUP LOG_GROUP_DEV_GIC
64#include "GICInternal.h"
65#include <VBox/vmm/pdmgic.h>
66#include <VBox/vmm/pdmdev.h>
67#include <VBox/vmm/pdmapi.h>
68#include <VBox/vmm/vmcc.h>
69#include <VBox/vmm/vmm.h>
70#include <VBox/vmm/vmcpuset.h>
71
72
73/*********************************************************************************************************************************
74* Defined Constants And Macros *
75*********************************************************************************************************************************/
76#define GIC_IDLE_PRIORITY 0xff
77#define GIC_IS_INTR_SGI(a_uIntId) ((a_uIntId) - GIC_INTID_RANGE_SGI_START < GIC_INTID_SGI_RANGE_SIZE)
78#define GIC_IS_INTR_PPI(a_uIntId) ((a_uIntId) - GIC_INTID_RANGE_PPI_START < GIC_INTID_PPI_RANGE_SIZE)
79#define GIC_IS_INTR_SGI_OR_PPI(a_uIntId) ((a_uIntId) - GIC_INTID_RANGE_SGI_START < GIC_INTID_PPI_RANGE_SIZE)
80#define GIC_IS_INTR_SPI(a_uIntId) ((a_uIntId) - GIC_INTID_RANGE_SPI_START < GIC_INTID_SPI_RANGE_SIZE)
81#define GIC_IS_INTR_SPECIAL(a_uIntId) ((a_uIntId) - GIC_INTID_RANGE_SPECIAL_START < GIC_INTID_EXT_PPI_RANGE_SIZE)
82#define GIC_IS_INTR_EXT_PPI(a_uIntId) ((a_uIntId) - GIC_INTID_RANGE_EXT_PPI_START < GIC_INTID_EXT_PPI_RANGE_SIZE)
83#define GIC_IS_INTR_EXT_SPI(a_uIntId) ((a_uIntId) - GIC_INTID_RANGE_EXT_SPI_START < GIC_INTID_EXT_SPI_RANGE_SIZE)
84#define GIC_IS_INTR_LPI(a_pGicDev, a_uIntId) ((a_uIntId) - GIC_INTID_RANGE_LPI_START < RT_ELEMENTS((a_pGicDev)->abLpiConfig))
85
86
87#ifdef LOG_ENABLED
88/**
89 * Gets the description of a CPU interface register.
90 *
91 * @returns The description.
92 * @param u32Reg The CPU interface register offset.
93 */
94static const char *gicIccGetRegDescription(uint32_t u32Reg)
95{
96 switch (u32Reg)
97 {
98#define GIC_ICC_REG_CASE(a_Reg) case ARMV8_AARCH64_SYSREG_ ## a_Reg: return #a_Reg
99 GIC_ICC_REG_CASE(ICC_PMR_EL1);
100 GIC_ICC_REG_CASE(ICC_IAR0_EL1);
101 GIC_ICC_REG_CASE(ICC_EOIR0_EL1);
102 GIC_ICC_REG_CASE(ICC_HPPIR0_EL1);
103 GIC_ICC_REG_CASE(ICC_BPR0_EL1);
104 GIC_ICC_REG_CASE(ICC_AP0R0_EL1);
105 GIC_ICC_REG_CASE(ICC_AP0R1_EL1);
106 GIC_ICC_REG_CASE(ICC_AP0R2_EL1);
107 GIC_ICC_REG_CASE(ICC_AP0R3_EL1);
108 GIC_ICC_REG_CASE(ICC_AP1R0_EL1);
109 GIC_ICC_REG_CASE(ICC_AP1R1_EL1);
110 GIC_ICC_REG_CASE(ICC_AP1R2_EL1);
111 GIC_ICC_REG_CASE(ICC_AP1R3_EL1);
112 GIC_ICC_REG_CASE(ICC_DIR_EL1);
113 GIC_ICC_REG_CASE(ICC_RPR_EL1);
114 GIC_ICC_REG_CASE(ICC_SGI1R_EL1);
115 GIC_ICC_REG_CASE(ICC_ASGI1R_EL1);
116 GIC_ICC_REG_CASE(ICC_SGI0R_EL1);
117 GIC_ICC_REG_CASE(ICC_IAR1_EL1);
118 GIC_ICC_REG_CASE(ICC_EOIR1_EL1);
119 GIC_ICC_REG_CASE(ICC_HPPIR1_EL1);
120 GIC_ICC_REG_CASE(ICC_BPR1_EL1);
121 GIC_ICC_REG_CASE(ICC_CTLR_EL1);
122 GIC_ICC_REG_CASE(ICC_SRE_EL1);
123 GIC_ICC_REG_CASE(ICC_IGRPEN0_EL1);
124 GIC_ICC_REG_CASE(ICC_IGRPEN1_EL1);
125#undef GIC_ICC_REG_CASE
126 default:
127 return "<UNKNOWN>";
128 }
129}
130
131
132/**
133 * Gets the description of a distributor register given it's register offset.
134 *
135 * @returns The register description.
136 * @param offReg The distributor register offset.
137 */
138static const char *gicDistGetRegDescription(uint16_t offReg)
139{
140 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_IGROUPRn_OFF_START, GIC_DIST_REG_IGROUPRn_RANGE_SIZE)) return "GICD_IGROUPRn";
141 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_IGROUPRnE_OFF_START, GIC_DIST_REG_IGROUPRnE_RANGE_SIZE)) return "GICD_IGROUPRnE";
142 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_IROUTERn_OFF_START, GIC_DIST_REG_IROUTERn_RANGE_SIZE)) return "GICD_IROUTERn";
143 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_IROUTERnE_OFF_START, GIC_DIST_REG_IROUTERnE_RANGE_SIZE)) return "GICD_IROUTERnE";
144 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ISENABLERn_OFF_START, GIC_DIST_REG_ISENABLERn_RANGE_SIZE)) return "GICD_ISENABLERn";
145 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ISENABLERnE_OFF_START, GIC_DIST_REG_ISENABLERnE_RANGE_SIZE)) return "GICD_ISENABLERnE";
146 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICENABLERn_OFF_START, GIC_DIST_REG_ICENABLERn_RANGE_SIZE)) return "GICD_ICENABLERn";
147 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICENABLERnE_OFF_START, GIC_DIST_REG_ICENABLERnE_RANGE_SIZE)) return "GICD_ICENABLERnE";
148 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ISACTIVERn_OFF_START, GIC_DIST_REG_ISACTIVERn_RANGE_SIZE)) return "GICD_ISACTIVERn";
149 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ISACTIVERnE_OFF_START, GIC_DIST_REG_ISACTIVERnE_RANGE_SIZE)) return "GICD_ISACTIVERnE";
150 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICACTIVERn_OFF_START, GIC_DIST_REG_ICACTIVERn_RANGE_SIZE)) return "GICD_ICACTIVERn";
151 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICACTIVERnE_OFF_START, GIC_DIST_REG_ICACTIVERnE_RANGE_SIZE)) return "GICD_ICACTIVERnE";
152 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_IPRIORITYRn_OFF_START, GIC_DIST_REG_IPRIORITYRn_RANGE_SIZE)) return "GICD_IPRIORITYRn";
153 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_IPRIORITYRnE_OFF_START, GIC_DIST_REG_IPRIORITYRnE_RANGE_SIZE)) return "GICD_IPRIORITYRnE";
154 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ISPENDRn_OFF_START, GIC_DIST_REG_ISPENDRn_RANGE_SIZE)) return "GICD_ISPENDRn";
155 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ISPENDRnE_OFF_START, GIC_DIST_REG_ISPENDRnE_RANGE_SIZE)) return "GICD_ISPENDRnE";
156 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICPENDRn_OFF_START, GIC_DIST_REG_ICPENDRn_RANGE_SIZE)) return "GICD_ICPENDRn";
157 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICPENDRnE_OFF_START, GIC_DIST_REG_ICPENDRnE_RANGE_SIZE)) return "GICD_ICPENDRnE";
158 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICFGRn_OFF_START, GIC_DIST_REG_ICFGRn_RANGE_SIZE)) return "GICD_ICFGRn";
159 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICFGRnE_OFF_START, GIC_DIST_REG_ICFGRnE_RANGE_SIZE)) return "GICD_ICFGRnE";
160 switch (offReg)
161 {
162 case GIC_DIST_REG_CTLR_OFF: return "GICD_CTLR";
163 case GIC_DIST_REG_TYPER_OFF: return "GICD_TYPER";
164 case GIC_DIST_REG_STATUSR_OFF: return "GICD_STATUSR";
165 case GIC_DIST_REG_ITARGETSRn_OFF_START: return "GICD_ITARGETSRn";
166 case GIC_DIST_REG_IGRPMODRn_OFF_START: return "GICD_IGRPMODRn";
167 case GIC_DIST_REG_NSACRn_OFF_START: return "GICD_NSACRn";
168 case GIC_DIST_REG_SGIR_OFF: return "GICD_SGIR";
169 case GIC_DIST_REG_CPENDSGIRn_OFF_START: return "GICD_CSPENDSGIRn";
170 case GIC_DIST_REG_SPENDSGIRn_OFF_START: return "GICD_SPENDSGIRn";
171 case GIC_DIST_REG_INMIn_OFF_START: return "GICD_INMIn";
172 case GIC_DIST_REG_PIDR2_OFF: return "GICD_PIDR2";
173 case GIC_DIST_REG_IIDR_OFF: return "GICD_IIDR";
174 case GIC_DIST_REG_TYPER2_OFF: return "GICD_TYPER2";
175 default:
176 return "<UNKNOWN>";
177 }
178}
179#endif /* LOG_ENABLED */
180
181
182/**
183 * Gets the description of a redistributor register given it's register offset.
184 *
185 * @returns The register description.
186 * @param offReg The redistributor register offset.
187 */
188static const char *gicReDistGetRegDescription(uint16_t offReg)
189{
190 switch (offReg)
191 {
192 case GIC_REDIST_REG_CTLR_OFF: return "GICR_CTLR";
193 case GIC_REDIST_REG_IIDR_OFF: return "GICR_IIDR";
194 case GIC_REDIST_REG_TYPER_OFF: return "GICR_TYPER";
195 case GIC_REDIST_REG_TYPER_AFFINITY_OFF: return "GICR_TYPER_AFF";
196 case GIC_REDIST_REG_STATUSR_OFF: return "GICR_STATUSR";
197 case GIC_REDIST_REG_WAKER_OFF: return "GICR_WAKER";
198 case GIC_REDIST_REG_MPAMIDR_OFF: return "GICR_MPAMIDR";
199 case GIC_REDIST_REG_PARTIDR_OFF: return "GICR_PARTIDR";
200 case GIC_REDIST_REG_SETLPIR_OFF: return "GICR_SETLPIR";
201 case GIC_REDIST_REG_CLRLPIR_OFF: return "GICR_CLRLPIR";
202 case GIC_REDIST_REG_PROPBASER_OFF: return "GICR_PROPBASER";
203 case GIC_REDIST_REG_PENDBASER_OFF: return "GICR_PENDBASER";
204 case GIC_REDIST_REG_INVLPIR_OFF: return "GICR_INVLPIR";
205 case GIC_REDIST_REG_INVALLR_OFF: return "GICR_INVALLR";
206 case GIC_REDIST_REG_SYNCR_OFF: return "GICR_SYNCR";
207 case GIC_REDIST_REG_PIDR2_OFF: return "GICR_PIDR2";
208 default:
209 return "<UNKNOWN>";
210 }
211}
212
213
214/**
215 * Gets the description of an SGI/PPI redistributor register given it's register
216 * offset.
217 *
218 * @returns The register description.
219 * @param offReg The redistributor register offset.
220 */
221static const char *gicReDistGetSgiPpiRegDescription(uint16_t offReg)
222{
223 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_IGROUPR0_OFF, GIC_REDIST_SGI_PPI_REG_IGROUPRnE_RANGE_SIZE)) return "GICR_IGROUPn";
224 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_ISENABLER0_OFF, GIC_REDIST_SGI_PPI_REG_ISENABLERnE_RANGE_SIZE)) return "GICR_ISENABLERn";
225 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_ICENABLER0_OFF, GIC_REDIST_SGI_PPI_REG_ICENABLERnE_RANGE_SIZE)) return "GICR_ICENABLERn";
226 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_ISACTIVER0_OFF, GIC_REDIST_SGI_PPI_REG_ISACTIVERnE_RANGE_SIZE)) return "GICR_ISACTIVERn";
227 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_ICACTIVER0_OFF, GIC_REDIST_SGI_PPI_REG_ICACTIVERnE_RANGE_SIZE)) return "GICR_ICACTIVERn";
228 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_ISPENDR0_OFF, GIC_REDIST_SGI_PPI_REG_ISPENDRnE_RANGE_SIZE)) return "GICR_ISPENDRn";
229 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_ICPENDR0_OFF, GIC_REDIST_SGI_PPI_REG_ICPENDRnE_RANGE_SIZE)) return "GICR_ICPENDRn";
230 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_IPRIORITYRn_OFF_START, GIC_REDIST_SGI_PPI_REG_IPRIORITYRnE_RANGE_SIZE)) return "GICR_IPREIORITYn";
231 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_ICFGR0_OFF, GIC_REDIST_SGI_PPI_REG_ICFGRnE_RANGE_SIZE)) return "GICR_ICFGRn";
232 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_INMIR0_OFF, GIC_REDIST_SGI_PPI_REG_INMIRnE_RANGE_SIZE)) return "GICR_INMIRn";
233 switch (offReg)
234 {
235 case GIC_REDIST_SGI_PPI_REG_NSACR_OFF: return "GICR_NSACR";
236 case GIC_REDIST_SGI_PPI_REG_IGRPMODR0_OFF: return "GICR_IGRPMODR0";
237 case GIC_REDIST_SGI_PPI_REG_IGRPMODR1E_OFF: return "GICR_IGRPMODR1E";
238 case GIC_REDIST_SGI_PPI_REG_IGRPMODR2E_OFF: return "GICR_IGRPMODR2E";
239 default:
240 return "<UNKNOWN>";
241 }
242}
243
244
245/**
246 * Gets the interrupt ID given a distributor interrupt index.
247 *
248 * @returns The interrupt ID.
249 * @param idxIntr The distributor interrupt index.
250 * @remarks A distributor interrupt is an interrupt type that belong in the
251 * distributor (e.g. SPIs, extended SPIs).
252 */
253DECLHIDDEN(uint16_t) gicDistGetIntIdFromIndex(uint16_t idxIntr)
254{
255 /*
256 * Distributor interrupts bits to interrupt ID mapping:
257 * +--------------------------------------------------------+
258 * | Range (incl) | SGI | PPI | SPI | Ext SPI |
259 * |--------------+--------+--------+----------+------------|
260 * | Bit | 0..15 | 16..31 | 32..1023 | 1024..2047 |
261 * | Int Id | 0..15 | 16..31 | 32..1023 | 4096..5119 |
262 * +--------------------------------------------------------+
263 */
264 uint16_t uIntId;
265 /* SGIs, PPIs, SPIs and specials. */
266 if (idxIntr < 1024)
267 uIntId = idxIntr;
268 /* Extended SPIs. */
269 else if (idxIntr < 2048)
270 uIntId = GIC_INTID_RANGE_EXT_SPI_START + idxIntr - 1024;
271 else
272 {
273 uIntId = 0;
274 AssertReleaseMsgFailed(("idxIntr=%u\n", idxIntr));
275 }
276 Assert( GIC_IS_INTR_SGI_OR_PPI(uIntId)
277 || GIC_IS_INTR_SPI(uIntId)
278 || GIC_IS_INTR_SPECIAL(uIntId)
279 || GIC_IS_INTR_EXT_SPI(uIntId));
280 return uIntId;
281}
282
283
284/**
285 * Gets the distributor interrupt index given an interrupt ID.
286 *
287 * @returns The distributor interrupt index.
288 * @param uIntId The interrupt ID.
289 * @remarks A distributor interrupt is an interrupt type that belong in the
290 * distributor (e.g. SPIs, extended SPIs).
291 */
292static uint16_t gicDistGetIndexFromIntId(uint16_t uIntId)
293{
294 uint16_t idxIntr;
295 /* SGIs, PPIs, SPIs and specials. */
296 if (uIntId <= GIC_INTID_RANGE_SPECIAL_NO_INTERRUPT)
297 idxIntr = uIntId;
298 /* Extended SPIs. */
299 else if (uIntId - GIC_INTID_RANGE_EXT_SPI_START < GIC_INTID_EXT_SPI_RANGE_SIZE)
300 idxIntr = 1024 + uIntId - GIC_INTID_RANGE_EXT_SPI_START;
301 else
302 {
303 idxIntr = 0;
304 AssertReleaseMsgFailed(("uIntId=%u\n", uIntId));
305 }
306 Assert(idxIntr < sizeof(GICDEV::bmIntrPending) * 8);
307 return idxIntr;
308}
309
310
311/**
312 * Gets the interrupt ID given a redistributor interrupt index.
313 *
314 * @returns The interrupt ID.
315 * @param idxIntr The redistributor interrupt index.
316 * @remarks A redistributor interrupt is an interrupt type that belong in the
317 * redistributor (e.g. SGIs, PPIs, extended PPIs).
318 */
319DECLHIDDEN(uint16_t) gicReDistGetIntIdFromIndex(uint16_t idxIntr)
320{
321 /*
322 * Redistributor interrupts bits to interrupt ID mapping:
323 * +---------------------------------------------+
324 * | Range (incl) | SGI | PPI | Ext PPI |
325 * +---------------------------------------------+
326 * | Bit | 0..15 | 16..31 | 32..95 |
327 * | Int Id | 0..15 | 16..31 | 1056..1119 |
328 * +---------------------------------------------+
329 */
330 uint16_t uIntId;
331 /* SGIs and PPIs. */
332 if (idxIntr < 32)
333 uIntId = idxIntr;
334 /* Extended PPIs. */
335 else if (idxIntr < 96)
336 uIntId = GIC_INTID_RANGE_EXT_PPI_START + idxIntr - 32;
337 else
338 {
339 uIntId = 0;
340 AssertReleaseMsgFailed(("idxIntr=%u\n", idxIntr));
341 }
342 Assert(GIC_IS_INTR_SGI_OR_PPI(uIntId) || GIC_IS_INTR_EXT_PPI(uIntId));
343 return uIntId;
344}
345
346
347/**
348 * Gets the redistributor interrupt index given an interrupt ID.
349 *
350 * @returns The interrupt ID.
351 * @param uIntId The interrupt ID.
352 * @remarks A redistributor interrupt is an interrupt type that belong in the
353 * redistributor (e.g. SGIs, PPIs, extended PPIs).
354 */
355static uint16_t gicReDistGetIndexFromIntId(uint16_t uIntId)
356{
357 /* SGIs and PPIs. */
358 uint16_t idxIntr;
359 if (uIntId <= GIC_INTID_RANGE_PPI_LAST)
360 idxIntr = uIntId;
361 /* Extended PPIs. */
362 else if (uIntId - GIC_INTID_RANGE_EXT_PPI_START < GIC_INTID_EXT_PPI_RANGE_SIZE)
363 idxIntr = 32 + uIntId - GIC_INTID_RANGE_EXT_PPI_START;
364 else
365 {
366 idxIntr = 0;
367 AssertReleaseMsgFailed(("uIntId=%u\n", uIntId));
368 }
369 Assert(idxIntr < sizeof(GICCPU::bmIntrPending) * 8);
370 return idxIntr;
371}
372
373
374/**
375 * Sets the interrupt pending force-flag and pokes the EMT if required.
376 *
377 * @param pVCpu The cross context virtual CPU structure.
378 * @param fIrq Flag whether to assert the IRQ line or leave it alone.
379 * @param fFiq Flag whether to assert the FIQ line or leave it alone.
380 */
381static void gicSetInterruptFF(PVMCPUCC pVCpu, bool fIrq, bool fFiq)
382{
383 Assert(fIrq || fFiq);
384 LogFlowFunc(("pVCpu=%p{.idCpu=%u} fIrq=%RTbool fFiq=%RTbool\n", pVCpu, pVCpu->idCpu, fIrq, fFiq));
385
386#ifdef IN_RING3
387 /* IRQ state should be loaded as-is by "LoadExec". Changes can be made from LoadDone. */
388 Assert(pVCpu->pVMR3->enmVMState != VMSTATE_LOADING || PDMR3HasLoadedState(pVCpu->pVMR3));
389#endif
390
391 if (fIrq)
392 VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_IRQ);
393 if (fFiq)
394 VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_FIQ);
395
396 /*
397 * We need to wake up the target CPU if we're not on EMT.
398 */
399 /** @todo We could just use RTThreadNativeSelf() here, couldn't we? */
400#if defined(IN_RING0)
401# error "Implement me!"
402#elif defined(IN_RING3)
403 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
404 VMCPUID idCpu = pVCpu->idCpu;
405 if (VMMGetCpuId(pVM) != idCpu)
406 {
407 Log7Func(("idCpu=%u enmState=%d\n", idCpu, pVCpu->enmState));
408 VMR3NotifyCpuFFU(pVCpu->pUVCpu, VMNOTIFYFF_FLAGS_POKE);
409 }
410#endif
411}
412
413
414/**
415 * Clears the interrupt pending force-flag.
416 *
417 * @param pVCpu The cross context virtual CPU structure.
418 * @param fIrq Flag whether to clear the IRQ flag.
419 * @param fFiq Flag whether to clear the FIQ flag.
420 */
421DECLINLINE(void) gicClearInterruptFF(PVMCPUCC pVCpu, bool fIrq, bool fFiq)
422{
423 Assert(fIrq || fFiq);
424 LogFlowFunc(("pVCpu=%p{.idCpu=%u} fIrq=%RTbool fFiq=%RTbool\n", pVCpu, pVCpu->idCpu, fIrq, fFiq));
425
426#ifdef IN_RING3
427 /* IRQ state should be loaded as-is by "LoadExec". Changes can be made from LoadDone. */
428 Assert(pVCpu->pVMR3->enmVMState != VMSTATE_LOADING || PDMR3HasLoadedState(pVCpu->pVMR3));
429#endif
430
431 if (fIrq)
432 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_IRQ);
433 if (fFiq)
434 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_FIQ);
435}
436
437
438/**
439 * Updates the interrupt force-flag.
440 *
441 * @param pVCpu The cross context virtual CPU structure.
442 * @param fIrq Flag whether to clear the IRQ flag.
443 * @param fFiq Flag whether to clear the FIQ flag.
444 */
445DECLINLINE(void) gicUpdateInterruptFF(PVMCPUCC pVCpu, bool fIrq, bool fFiq)
446{
447 LogFlowFunc(("pVCpu=%p{.idCpu=%u} fIrq=%RTbool fFiq=%RTbool\n", pVCpu, pVCpu->idCpu, fIrq, fFiq));
448
449 if (fIrq || fFiq)
450 gicSetInterruptFF(pVCpu, fIrq, fFiq);
451
452 if (!fIrq || !fFiq)
453 gicClearInterruptFF(pVCpu, !fIrq, !fFiq);
454}
455
456
457/**
458 * Gets whether the redistributor has pending interrupts with sufficient priority to
459 * be signalled to the PE.
460 *
461 * @param pGicCpu The GIC redistributor and CPU interface state.
462 * @param pfIrq Where to store whether IRQs can be signalled.
463 * @param pfFiq Where to store whether FIQs can be signalled.
464 */
465static void gicReDistHasIrqPending(PCGICCPU pGicCpu, bool *pfIrq, bool *pfFiq)
466{
467 bool const fIsGroup1Enabled = pGicCpu->fIntrGroup1Enabled;
468 bool const fIsGroup0Enabled = pGicCpu->fIntrGroup0Enabled;
469 LogFlowFunc(("fIsGroup0Enabled=%RTbool fIsGroup1Enabled=%RTbool\n", fIsGroup0Enabled, fIsGroup1Enabled));
470
471# if 1
472 uint32_t bmIntrs[3];
473 for (uint8_t i = 0; i < RT_ELEMENTS(bmIntrs); i++)
474 {
475 /* Collect interrupts that are pending, enabled and inactive. */
476 bmIntrs[i] = (pGicCpu->bmIntrPending[i] & pGicCpu->bmIntrEnabled[i]) & ~pGicCpu->bmIntrActive[i];
477
478 /* Discard interrupts if the group they belong to is disabled. */
479 if (!fIsGroup1Enabled)
480 bmIntrs[i] &= ~pGicCpu->bmIntrGroup[i];
481 if (!fIsGroup0Enabled)
482 bmIntrs[i] &= pGicCpu->bmIntrGroup[i];
483 }
484
485 uint32_t const cIntrs = sizeof(bmIntrs) * 8;
486 int32_t idxIntr = ASMBitFirstSet(&bmIntrs[0], cIntrs);
487 AssertCompile(!(cIntrs % 32));
488 if (idxIntr >= 0)
489 {
490 /* Only allow interrupts with higher priority than the current configured and running one. */
491 uint8_t const bPriority = RT_MIN(pGicCpu->bIntrPriorityMask, pGicCpu->abRunningPriorities[pGicCpu->idxRunningPriority]);
492 do
493 {
494 Assert((uint32_t)idxIntr < RT_ELEMENTS(pGicCpu->abIntrPriority));
495 if (pGicCpu->abIntrPriority[idxIntr] < bPriority)
496 {
497 bool const fInGroup1 = ASMBitTest(&pGicCpu->bmIntrGroup[0], idxIntr);
498 bool const fInGroup0 = !fInGroup1;
499 *pfIrq = fInGroup1 && fIsGroup1Enabled;
500 *pfFiq = fInGroup0 && fIsGroup0Enabled;
501 return;
502 }
503 idxIntr = ASMBitNextSet(&bmIntrs[0], cIntrs, idxIntr);
504 } while (idxIntr != -1);
505 }
506#else /** @todo Measure and pick the faster version. */
507 /* Only allow interrupts with higher priority than the current configured and running one. */
508 uint8_t const bPriority = RT_MIN(pGicCpu->bIntrPriorityMask, pGicCpu->abRunningPriorities[pGicCpu->idxRunningPriority]);
509
510 for (uint8_t i = 0; i < RT_ELEMENTS(pGicCpu->bmIntrPending); i++)
511 {
512 /* Collect interrupts that are pending, enabled and inactive. */
513 uint32_t bmIntr = (pGicCpu->bmIntrPending[i] & pGicCpu->bmIntrEnabled[i]) & ~pGicCpu->bmIntrActive[i];
514
515 /* Discard interrupts if the group they belong to is disabled. */
516 if (!fIsGroup1Enabled)
517 bmIntr &= ~pGicCpu->bmIntrGroup[i];
518 if (!fIsGroup0Enabled)
519 bmIntr &= pGicCpu->bmIntrGroup[i];
520
521 /* If the interrupt is higher priority than the running interrupt, return whether to signal an IRQ, FIQ or neither. */
522 uint16_t const idxPending = ASMBitFirstSetU32(bmIntr);
523 if (idxPending > 0)
524 {
525 uint16_t const idxIntr = 32 * i + idxPending - 1;
526 AssertRelease(idxIntr < RT_ELEMENTS(pGicCpu->abIntrPriority));
527 if (pGicCpu->abIntrPriority[idxIntr] < bPriority)
528 {
529 AssertRelease(idxIntr < sizeof(pGicCpu->bmIntrGroup) * 8);
530 bool const fInGroup1 = ASMBitTest(&pGicCpu->bmIntrGroup[0], idxIntr);
531 bool const fInGroup0 = !fInGroup1;
532 *pfIrq = fInGroup1 && fIsGroup1Enabled;
533 *pfFiq = fInGroup0 && fIsGroup0Enabled;
534 return;
535 }
536 }
537 }
538#endif
539 *pfIrq = false;
540 *pfFiq = false;
541}
542
543
544/**
545 * Gets whether the distributor has pending interrupts with sufficient priority to
546 * be signalled to the PE.
547 *
548 * @param pGicDev The GIC distributor state.
549 * @param pVCpu The cross context virtual CPU structure.
550 * @param idCpu The ID of the virtual CPU.
551 * @param pfIrq Where to store whether there are IRQs can be signalled.
552 * @param pfFiq Where to store whether there are FIQs can be signalled.
553 */
554static void gicDistHasIrqPendingForVCpu(PCGICDEV pGicDev, PCVMCPUCC pVCpu, VMCPUID idCpu, bool *pfIrq, bool *pfFiq)
555{
556 bool const fIsGroup1Enabled = pGicDev->fIntrGroup1Enabled;
557 bool const fIsGroup0Enabled = pGicDev->fIntrGroup0Enabled;
558 LogFlowFunc(("fIsGroup1Enabled=%RTbool fIsGroup0Enabled=%RTbool\n", fIsGroup1Enabled, fIsGroup0Enabled));
559
560#if 1
561 uint32_t bmIntrs[64];
562 for (uint8_t i = 0; i < RT_ELEMENTS(bmIntrs); i++)
563 {
564 /* Collect interrupts that are pending, enabled and inactive. */
565 bmIntrs[i] = (pGicDev->bmIntrPending[i] & pGicDev->bmIntrEnabled[i]) & ~pGicDev->bmIntrActive[i];
566
567 /* Discard interrupts if the group they belong to is disabled. */
568 if (!fIsGroup1Enabled)
569 bmIntrs[i] &= ~pGicDev->bmIntrGroup[i];
570 if (!fIsGroup0Enabled)
571 bmIntrs[i] &= pGicDev->bmIntrGroup[i];
572 }
573
574 /*
575 * The distributor's interrupt pending/enabled/active bitmaps have 2048 bits which map
576 * SGIs (16), PPIs (16), SPIs (988), reserved SPIs (4) and extended SPIs (1024).
577 * Of these, the first 32 bits corresponding to SGIs and PPIs are RAZ/WI when affinity
578 * routing is enabled (which it currently is always enabled in our implementation).
579 */
580 Assert(pGicDev->fAffRoutingEnabled);
581 Assert(bmIntrs[0] == 0);
582 uint32_t const cIntrs = sizeof(bmIntrs) * 8;
583 int32_t idxIntr = ASMBitFirstSet(&bmIntrs[0], cIntrs);
584 AssertCompile(!(cIntrs % 32));
585 if (idxIntr >= 0)
586 {
587 /* Only allow interrupts with higher priority than the current configured and running one. */
588 PCGICCPU pGicCpu = VMCPU_TO_GICCPU(pVCpu);
589 uint8_t const bPriority = RT_MIN(pGicCpu->bIntrPriorityMask, pGicCpu->abRunningPriorities[pGicCpu->idxRunningPriority]);
590 do
591 {
592 AssertCompile(RT_ELEMENTS(pGicDev->abIntrPriority) == RT_ELEMENTS(pGicDev->au32IntrRouting));
593 Assert((uint32_t)idxIntr < RT_ELEMENTS(pGicDev->abIntrPriority));
594 Assert(idxIntr < GIC_INTID_RANGE_SPECIAL_START || idxIntr > GIC_INTID_RANGE_SPECIAL_LAST);
595 if ( pGicDev->abIntrPriority[idxIntr] < bPriority
596 && pGicDev->au32IntrRouting[idxIntr] == idCpu)
597 {
598 bool const fInGroup1 = ASMBitTest(&pGicDev->bmIntrGroup[0], idxIntr);
599 bool const fInGroup0 = !fInGroup1;
600 *pfFiq = fInGroup0 && fIsGroup0Enabled;
601 *pfIrq = fInGroup1 && fIsGroup1Enabled;
602 return;
603 }
604 idxIntr = ASMBitNextSet(&bmIntrs[0], cIntrs, idxIntr);
605 } while (idxIntr != -1);
606 }
607#else /** @todo Measure and pick the faster version. */
608 /* Only allow interrupts with higher priority than the running one. */
609 PCGICCPU pGicCpu = VMCPU_TO_GICCPU(pVCpu);
610 uint8_t const bPriority = RT_MIN(pGicCpu->bIntrPriorityMask, pGicCpu->abRunningPriorities[pGicCpu->idxRunningPriority]);
611
612 for (uint8_t i = 0; i < RT_ELEMENTS(pGicDev->bmIntrPending); i += 2)
613 {
614 /* Collect interrupts that are pending, enabled and inactive. */
615 uint32_t uLo = (pGicDev->bmIntrPending[i] & pGicDev->bmIntrEnabled[i]) & ~pGicDev->bmIntrActive[i];
616 uint32_t uHi = (pGicDev->bmIntrPending[i + 1] & pGicDev->bmIntrEnabled[i + 1]) & ~pGicDev->bmIntrActive[i + 1];
617
618 /* Discard interrupts if the group they belong to is disabled. */
619 if (!fIsGroup1Enabled)
620 {
621 uLo &= ~pGicDev->bmIntrGroup[i];
622 uHi &= ~pGicDev->bmIntrGroup[i + 1];
623 }
624 if (!fIsGroup0Enabled)
625 {
626 uLo &= pGicDev->bmIntrGroup[i];
627 uHi &= pGicDev->bmIntrGroup[i + 1];
628 }
629
630 /* If the interrupt is higher priority than the running interrupt, return whether to signal an IRQ, FIQ or neither. */
631 Assert(pGicDev->fAffRoutingEnabled);
632 uint64_t const bmIntrPending = RT_MAKE_U64(uLo, uHi);
633 uint16_t const idxPending = ASMBitFirstSetU64(bmIntrPending);
634 if (idxPending > 0)
635 {
636 /*
637 * The distributor's interrupt pending/enabled/active bitmaps have 2048 bits which map
638 * SGIs (16), PPIs (16), SPIs (988), reserved SPIs (4) and extended SPIs (1024).
639 * Of these, the first 32 bits corresponding to SGIs and PPIs are RAZ/WI when affinity
640 * routing is enabled (which it always is in our implementation).
641 */
642 uint32_t const idxIntr = 64 * i + idxPending - 1;
643 AssertRelease(idxIntr < RT_ELEMENTS(pGicDev->abIntrPriority));
644 if ( pGicDev->abIntrPriority[idxIntr] < bPriority
645 && pGicDev->au32IntrRouting[idxIntr] == idCpu)
646 {
647 Assert(idxIntr > GIC_INTID_RANGE_PPI_LAST);
648 AssertRelease(idxIntr < sizeof(pGicDev->bmIntrGroup) * 8);
649 bool const fInGroup1 = ASMBitTest(&pGicDev->bmIntrGroup[0], idxIntr);
650 bool const fInGroup0 = !fInGroup1;
651 *pfFiq = fInGroup0 && fIsGroup0Enabled;
652 *pfIrq = fInGroup1 && fIsGroup1Enabled;
653 return;
654 }
655 }
656 }
657#endif
658 *pfIrq = false;
659 *pfFiq = false;
660}
661
662
663DECLHIDDEN(bool) gicDistIsLpiValid(PPDMDEVINS pDevIns, uint16_t uIntId)
664{
665 PGICDEV pGicDev = PDMDEVINS_2_DATA(pDevIns, PGICDEV);
666 return GIC_IS_INTR_LPI(pGicDev, uIntId);
667}
668
669
670DECLHIDDEN(void) gicDistReadLpiConfigTableFromMem(PPDMDEVINS pDevIns)
671{
672 PGICDEV pGicDev = PDMDEVINS_2_DATA(pDevIns, PGICDEV);
673 Assert(pGicDev->fEnableLpis);
674 LogFlowFunc(("\n"));
675
676 /* Check if the guest is disabling LPIs by setting the number of LPI INTID bits below the minimum required bits. */
677 uint8_t const cIdBits = RT_BF_GET(pGicDev->uLpiConfigBaseReg.u, GIC_BF_REDIST_REG_PROPBASER_ID_BITS) + 1;
678 if (cIdBits < GIC_LPI_ID_BITS_MIN)
679 {
680 RT_ZERO(pGicDev->abLpiConfig);
681 return;
682 }
683
684 /* Copy the LPI config table from guest memory to our internal cache. */
685 Assert(UINT32_C(2) << pGicDev->uMaxLpi == RT_ELEMENTS(pGicDev->abLpiConfig));
686 RTGCPHYS const GCPhysLpiConfigTable = pGicDev->uLpiConfigBaseReg.u & GIC_BF_REDIST_REG_PROPBASER_PHYS_ADDR_MASK;
687 uint32_t const cbLpiConfigTable = sizeof(pGicDev->abLpiConfig);
688
689 /** @todo Try releasing and re-acquiring the device critical section here.
690 * Probably safe, but haven't verified this... */
691 int const rc = PDMDevHlpPhysReadMeta(pDevIns, GCPhysLpiConfigTable, (void *)&pGicDev->abLpiConfig[0], cbLpiConfigTable);
692 AssertRC(rc);
693}
694
695
696static void gicReDistReadLpiPendingBitmapFromMem(PPDMDEVINS pDevIns, PVMCPU pVCpu)
697{
698 PGICDEV pGicDev = PDMDEVINS_2_DATA(pDevIns, PGICDEV);
699 Assert(pGicDev->fEnableLpis);
700 LogFlowFunc(("\n"));
701
702 PGICCPU pGicCpu = VMCPU_TO_GICCPU(pVCpu);
703 bool const fIsZeroed = RT_BF_GET(pGicDev->uLpiPendingBaseReg.u, GIC_BF_REDIST_REG_PENDBASER_PTZ);
704 if (!fIsZeroed)
705 {
706 /* Copy the LPI pending bitmap from guest memory to our internal cache. */
707 RTGCPHYS const GCPhysLpiPendingBitmap = (pGicDev->uLpiPendingBaseReg.u & GIC_BF_REDIST_REG_PENDBASER_PHYS_ADDR_MASK)
708 + GIC_INTID_RANGE_LPI_START; /* Skip first 1KB (since LPI INTIDs start at 8192). */
709 uint32_t const cbLpiPendingBitmap = sizeof(pGicCpu->bmLpiPending);
710
711 /** @todo Try releasing and re-acquiring the device critical section here.
712 * Probably safe, but haven't verified this... */
713 int const rc = PDMDevHlpPhysReadMeta(pDevIns, GCPhysLpiPendingBitmap, (void *)&pGicCpu->bmLpiPending[0],
714 cbLpiPendingBitmap);
715 AssertRC(rc);
716 }
717 else
718 RT_ZERO(pGicCpu->bmLpiPending); /* Paranoia. */
719}
720
721
722/**
723 * Updates the internal IRQ state and sets or clears the appropriate force action
724 * flags.
725 *
726 * @returns Strict VBox status code.
727 * @param pGicDev The GIC distributor state.
728 * @param pVCpu The cross context virtual CPU structure.
729 */
730static VBOXSTRICTRC gicReDistUpdateIrqState(PCGICDEV pGicDev, PVMCPUCC pVCpu)
731{
732 LogFlowFunc(("\n"));
733 bool fIrq;
734 bool fFiq;
735 gicReDistHasIrqPending(VMCPU_TO_GICCPU(pVCpu), &fIrq, &fFiq);
736
737 bool fIrqDist;
738 bool fFiqDist;
739 gicDistHasIrqPendingForVCpu(pGicDev, pVCpu, pVCpu->idCpu, &fIrqDist, &fFiqDist);
740 LogFlowFunc(("fIrq=%RTbool fFiq=%RTbool fIrqDist=%RTbool fFiqDist=%RTbool\n", fIrq, fFiq, fIrqDist, fFiqDist));
741
742 fIrq |= fIrqDist;
743 fFiq |= fFiqDist;
744 gicUpdateInterruptFF(pVCpu, fIrq, fFiq);
745 return VINF_SUCCESS;
746}
747
748
749/**
750 * Updates the internal IRQ state of the distributor and sets or clears the appropirate force action flags.
751 *
752 * @returns Strict VBox status code.
753 * @param pVM The cross context VM state.
754 * @param pGicDev The GIC distributor state.
755 */
756static VBOXSTRICTRC gicDistUpdateIrqState(PCVMCC pVM, PCGICDEV pGicDev)
757{
758 LogFlowFunc(("\n"));
759 for (uint32_t i = 0; i < pVM->cCpus; i++)
760 {
761 PVMCPUCC pVCpu = pVM->CTX_SUFF(apCpus)[i];
762 PCGICCPU pGicCpu = VMCPU_TO_GICCPU(pVCpu);
763
764 bool fIrq, fFiq;
765 gicReDistHasIrqPending(pGicCpu, &fIrq, &fFiq);
766
767 bool fIrqDist, fFiqDist;
768 gicDistHasIrqPendingForVCpu(pGicDev, pVCpu, i, &fIrqDist, &fFiqDist);
769 fIrq |= fIrqDist;
770 fFiq |= fFiqDist;
771
772 gicUpdateInterruptFF(pVCpu, fIrq, fFiq);
773 }
774 return VINF_SUCCESS;
775}
776
777
778/**
779 * Reads the distributor's interrupt routing register (GICD_IROUTER).
780 *
781 * @returns Strict VBox status code.
782 * @param pGicDev The GIC distributor state.
783 * @param idxReg The index of the register in the GICD_IROUTER range.
784 * @param puValue Where to store the register's value.
785 */
786static VBOXSTRICTRC gicDistReadIntrRoutingReg(PCGICDEV pGicDev, uint16_t idxReg, uint32_t *puValue)
787{
788 /* When affinity routing is disabled, reads return 0. */
789 Assert(pGicDev->fAffRoutingEnabled);
790
791 /* Hardware does not map the first 32 registers (corresponding to SGIs and PPIs). */
792 idxReg += GIC_INTID_RANGE_SPI_START;
793 AssertReturn(idxReg < RT_ELEMENTS(pGicDev->au32IntrRouting), VERR_BUFFER_OVERFLOW);
794 Assert(idxReg < sizeof(pGicDev->bmIntrRoutingMode) * 8);
795 if (!(idxReg % 2))
796 {
797 /* Lower 32-bits. */
798 uint8_t const fIrm = ASMBitTest(&pGicDev->bmIntrRoutingMode[0], idxReg);
799 *puValue = GIC_DIST_REG_IROUTERn_SET(fIrm, pGicDev->au32IntrRouting[idxReg]);
800 }
801 else
802 {
803 /* Upper 32-bits. */
804 *puValue = pGicDev->au32IntrRouting[idxReg] >> 24;
805 }
806
807 LogFlowFunc(("idxReg=%#x read %#x\n", idxReg, *puValue));
808 return VINF_SUCCESS;
809}
810
811
812/**
813 * Writes the distributor's interrupt routing register (GICD_IROUTER).
814 *
815 * @returns Strict VBox status code.
816 * @param pGicDev The GIC distributor state.
817 * @param idxReg The index of the register in the GICD_IROUTER range.
818 * @param uValue The value to write to the register.
819 */
820static VBOXSTRICTRC gicDistWriteIntrRoutingReg(PGICDEV pGicDev, uint16_t idxReg, uint32_t uValue)
821{
822 /* When affinity routing is disabled, writes are ignored. */
823 Assert(pGicDev->fAffRoutingEnabled);
824
825 AssertMsgReturn(idxReg < RT_ELEMENTS(pGicDev->au32IntrRouting), ("idxReg=%u\n", idxReg), VERR_BUFFER_OVERFLOW);
826 Assert(idxReg < sizeof(pGicDev->bmIntrRoutingMode) * 8);
827 if (!(idxReg % 2))
828 {
829 /* Lower 32-bits. */
830 bool const fIrm = GIC_DIST_REG_IROUTERn_IRM_GET(uValue);
831 if (fIrm)
832 ASMBitSet(&pGicDev->bmIntrRoutingMode[0], idxReg);
833 else
834 ASMBitClear(&pGicDev->bmIntrRoutingMode[0], idxReg);
835 uint32_t const fAff3 = pGicDev->au32IntrRouting[idxReg] & 0xff000000;
836 pGicDev->au32IntrRouting[idxReg] = fAff3 | (uValue & 0x00ffffff);
837 }
838 else
839 {
840 /* Upper 32-bits. */
841 uint32_t const fAffOthers = pGicDev->au32IntrRouting[idxReg] & 0x00ffffff;
842 pGicDev->au32IntrRouting[idxReg] = (uValue << 24) | fAffOthers;
843 }
844
845 LogFlowFunc(("idxReg=%#x written %#x\n", idxReg, pGicDev->au32IntrRouting[idxReg]));
846 return VINF_SUCCESS;
847}
848
849
850/**
851 * Reads the distributor's interrupt (set/clear) enable register (GICD_ISENABLER and
852 * GICD_ICENABLER).
853 *
854 * @returns Strict VBox status code.
855 * @param pGicDev The GIC distributor state.
856 * @param idxReg The index of the register in the GICD_ISENABLER and
857 * GICD_ICENABLER range.
858 * @param puValue Where to store the register's value.
859 */
860static VBOXSTRICTRC gicDistReadIntrEnableReg(PGICDEV pGicDev, uint16_t idxReg, uint32_t *puValue)
861{
862 Assert(idxReg < RT_ELEMENTS(pGicDev->bmIntrEnabled));
863 *puValue = pGicDev->bmIntrEnabled[idxReg];
864 LogFlowFunc(("idxReg=%#x read %#x\n", idxReg, pGicDev->bmIntrEnabled[idxReg]));
865 return VINF_SUCCESS;
866}
867
868
869/**
870 * Writes the distributor's interrupt set-enable register (GICD_ISENABLER).
871 *
872 * @returns Strict VBox status code.
873 * @param pVM The cross context VM structure.
874 * @param pGicDev The GIC distributor state.
875 * @param idxReg The index of the register in the GICD_ISENABLER range.
876 * @param uValue The value to write to the register.
877 */
878static VBOXSTRICTRC gicDistWriteIntrSetEnableReg(PVM pVM, PGICDEV pGicDev, uint16_t idxReg, uint32_t uValue)
879{
880 /* When affinity routing is enabled, writes to SGIs and PPIs are ignored. */
881 Assert(pGicDev->fAffRoutingEnabled);
882 if (idxReg > 0)
883 {
884 Assert(idxReg < RT_ELEMENTS(pGicDev->bmIntrEnabled));
885 pGicDev->bmIntrEnabled[idxReg] |= uValue;
886 return gicDistUpdateIrqState(pVM, pGicDev);
887 }
888 else
889 AssertReleaseMsgFailed(("Unexpected (but not illegal) write to SGI/PPI register in distributor\n"));
890 LogFlowFunc(("idxReg=%#x written %#x\n", idxReg, pGicDev->bmIntrEnabled[idxReg]));
891 return VINF_SUCCESS;
892}
893
894
895/**
896 * Writes the distributor's interrupt clear-enable register (GICD_ICENABLER).
897 *
898 * @returns Strict VBox status code.
899 * @param pVM The cross context VM structure.
900 * @param pGicDev The GIC distributor state.
901 * @param idxReg The index of the register in the GICD_ICENABLER range.
902 * @param uValue The value to write to the register.
903 */
904static VBOXSTRICTRC gicDistWriteIntrClearEnableReg(PVM pVM, PGICDEV pGicDev, uint16_t idxReg, uint32_t uValue)
905{
906 /* When affinity routing is enabled, writes to SGIs and PPIs are ignored. */
907 Assert(pGicDev->fAffRoutingEnabled);
908 if (idxReg > 0)
909 {
910 Assert(idxReg < RT_ELEMENTS(pGicDev->bmIntrEnabled));
911 pGicDev->bmIntrEnabled[idxReg] &= ~uValue;
912 return gicDistUpdateIrqState(pVM, pGicDev);
913 }
914 else
915 AssertReleaseMsgFailed(("Unexpected (but not illegal) write to SGI/PPI register in distributor\n"));
916 LogFlowFunc(("idxReg=%#x written %#x\n", idxReg, pGicDev->bmIntrEnabled[idxReg]));
917 return VINF_SUCCESS;
918}
919
920
921/**
922 * Reads the distributor's interrupt active register (GICD_ISACTIVER and
923 * GICD_ICACTIVER).
924 *
925 * @returns Strict VBox status code.
926 * @param pGicDev The GIC distributor state.
927 * @param idxReg The index of the register in the GICD_ISACTIVER and
928 * GICD_ICACTIVER range.
929 * @param puValue Where to store the register's value.
930 */
931static VBOXSTRICTRC gicDistReadIntrActiveReg(PGICDEV pGicDev, uint16_t idxReg, uint32_t *puValue)
932{
933 Assert(idxReg < RT_ELEMENTS(pGicDev->bmIntrActive));
934 *puValue = pGicDev->bmIntrActive[idxReg];
935 LogFlowFunc(("idxReg=%#x read %#x\n", idxReg, pGicDev->bmIntrActive[idxReg]));
936 return VINF_SUCCESS;
937}
938
939
940/**
941 * Writes the distributor's interrupt set-active register (GICD_ISACTIVER).
942 *
943 * @returns Strict VBox status code.
944 * @param pVM The cross context VM structure.
945 * @param pGicDev The GIC distributor state.
946 * @param idxReg The index of the register in the GICD_ISACTIVER range.
947 * @param uValue The value to write to the register.
948 */
949static VBOXSTRICTRC gicDistWriteIntrSetActiveReg(PVM pVM, PGICDEV pGicDev, uint16_t idxReg, uint32_t uValue)
950{
951 /* When affinity routing is enabled, writes to SGIs and PPIs are ignored. */
952 Assert(pGicDev->fAffRoutingEnabled);
953 if (idxReg > 0)
954 {
955 Assert(idxReg < RT_ELEMENTS(pGicDev->bmIntrActive));
956 pGicDev->bmIntrActive[idxReg] |= uValue;
957 return gicDistUpdateIrqState(pVM, pGicDev);
958 }
959 else
960 AssertReleaseMsgFailed(("Unexpected (but not illegal) write to SGI/PPI register in distributor\n"));
961 LogFlowFunc(("idxReg=%#x written %#x\n", idxReg, pGicDev->bmIntrActive[idxReg]));
962 return VINF_SUCCESS;
963}
964
965
966/**
967 * Writes the distributor's interrupt clear-active register (GICD_ICACTIVER).
968 *
969 * @returns Strict VBox status code.
970 * @param pVM The cross context VM structure.
971 * @param pGicDev The GIC distributor state.
972 * @param idxReg The index of the register in the GICD_ICACTIVER range.
973 * @param uValue The value to write to the register.
974 */
975static VBOXSTRICTRC gicDistWriteIntrClearActiveReg(PVM pVM, PGICDEV pGicDev, uint16_t idxReg, uint32_t uValue)
976{
977 /* When affinity routing is enabled, writes to SGIs and PPIs are ignored. */
978 Assert(pGicDev->fAffRoutingEnabled);
979 if (idxReg > 0)
980 {
981 Assert(idxReg < RT_ELEMENTS(pGicDev->bmIntrActive));
982 pGicDev->bmIntrActive[idxReg] &= ~uValue;
983 return gicDistUpdateIrqState(pVM, pGicDev);
984 }
985 else
986 AssertReleaseMsgFailed(("Unexpected (but not illegal) write to SGI/PPI register in distributor\n"));
987 LogFlowFunc(("idxReg=%#x written %#x\n", idxReg, pGicDev->bmIntrActive[idxReg]));
988 return VINF_SUCCESS;
989}
990
991
992/**
993 * Reads the distributor's interrupt priority register (GICD_IPRIORITYR).
994 *
995 * @returns Strict VBox status code.
996 * @param pGicDev The GIC distributor state.
997 * @param idxReg The index of the register in the GICD_IPRIORITY range.
998 * @param puValue Where to store the register's value.
999 */
1000static VBOXSTRICTRC gicDistReadIntrPriorityReg(PGICDEV pGicDev, uint16_t idxReg, uint32_t *puValue)
1001{
1002 /* When affinity routing is enabled, reads to registers 0..7 (pertaining to SGIs and PPIs) return 0. */
1003 Assert(pGicDev->fAffRoutingEnabled);
1004 Assert(idxReg < RT_ELEMENTS(pGicDev->abIntrPriority) / sizeof(uint32_t));
1005 Assert(idxReg != 255);
1006 if (idxReg > 7)
1007 {
1008 uint16_t const idxPriority = idxReg * sizeof(uint32_t);
1009 AssertReturn(idxPriority <= RT_ELEMENTS(pGicDev->abIntrPriority) - sizeof(uint32_t), VERR_BUFFER_OVERFLOW);
1010 AssertCompile(sizeof(*puValue) == sizeof(uint32_t));
1011 *puValue = *(uint32_t *)&pGicDev->abIntrPriority[idxPriority];
1012 }
1013 else
1014 {
1015 AssertReleaseMsgFailed(("Unexpected (but not illegal) read to SGI/PPI register in distributor\n"));
1016 *puValue = 0;
1017 }
1018 LogFlowFunc(("idxReg=%#x read %#x\n", idxReg, *puValue));
1019 return VINF_SUCCESS;
1020}
1021
1022
1023/**
1024 * Writes the distributor's interrupt priority register (GICD_IPRIORITYR).
1025 *
1026 * @returns Strict VBox status code.
1027 * @param pGicDev The GIC distributor state.
1028 * @param idxReg The index of the register in the GICD_IPRIORITY range.
1029 * @param uValue The value to write to the register.
1030 */
1031static VBOXSTRICTRC gicDistWriteIntrPriorityReg(PGICDEV pGicDev, uint16_t idxReg, uint32_t uValue)
1032{
1033 /* When affinity routing is enabled, writes to registers 0..7 are ignored. */
1034 Assert(pGicDev->fAffRoutingEnabled);
1035 Assert(idxReg < RT_ELEMENTS(pGicDev->abIntrPriority) / sizeof(uint32_t));
1036 Assert(idxReg != 255);
1037 if (idxReg > 7)
1038 {
1039 uint16_t const idxPriority = idxReg * sizeof(uint32_t);
1040 AssertReturn(idxPriority <= RT_ELEMENTS(pGicDev->abIntrPriority) - sizeof(uint32_t), VERR_BUFFER_OVERFLOW);
1041 AssertCompile(sizeof(uValue) == sizeof(uint32_t));
1042 *(uint32_t *)&pGicDev->abIntrPriority[idxPriority] = uValue;
1043 LogFlowFunc(("idxReg=%#x written %#x\n", idxReg, *(uint32_t *)&pGicDev->abIntrPriority[idxPriority]));
1044 }
1045 else
1046 AssertReleaseMsgFailed(("Unexpected (but not illegal) write to SGI/PPI register in distributor\n"));
1047 return VINF_SUCCESS;
1048}
1049
1050
1051/**
1052 * Reads the distributor's interrupt pending register (GICD_ISPENDR and
1053 * GICD_ICPENDR).
1054 *
1055 * @returns Strict VBox status code.
1056 * @param pGicDev The GIC distributor state.
1057 * @param idxReg The index of the register in the GICD_ISPENDR and
1058 * GICD_ICPENDR range.
1059 * @param puValue Where to store the register's value.
1060 */
1061static VBOXSTRICTRC gicDistReadIntrPendingReg(PGICDEV pGicDev, uint16_t idxReg, uint32_t *puValue)
1062{
1063 /* When affinity routing is enabled, reads for SGIs and PPIs return 0. */
1064 Assert(pGicDev->fAffRoutingEnabled);
1065 if (idxReg > 0)
1066 {
1067 Assert(idxReg < RT_ELEMENTS(pGicDev->bmIntrPending));
1068 *puValue = pGicDev->bmIntrPending[idxReg];
1069 }
1070 else
1071 {
1072 AssertReleaseMsgFailed(("Unexpected (but not illegal) read to SGI/PPI register in distributor\n"));
1073 *puValue = 0;
1074 }
1075 LogFlowFunc(("idxReg=%#x read %#x\n", idxReg, pGicDev->bmIntrPending[idxReg]));
1076 return VINF_SUCCESS;
1077}
1078
1079
1080/**
1081 * Write's the distributor's interrupt set-pending register (GICD_ISPENDR).
1082 *
1083 * @returns Strict VBox status code.
1084 * @param pVM The cross context VM structure.
1085 * @param pGicDev The GIC distributor state.
1086 * @param idxReg The index of the register in the GICD_ISPENDR range.
1087 * @param uValue The value to write to the register.
1088 */
1089static VBOXSTRICTRC gicDistWriteIntrSetPendingReg(PVMCC pVM, PGICDEV pGicDev, uint16_t idxReg, uint32_t uValue)
1090{
1091 /* When affinity routing is enabled, writes to SGIs and PPIs are ignored. */
1092 Assert(pGicDev->fAffRoutingEnabled);
1093 if (idxReg > 0)
1094 {
1095 Assert(idxReg < RT_ELEMENTS(pGicDev->bmIntrPending));
1096 pGicDev->bmIntrPending[idxReg] |= uValue;
1097 return gicDistUpdateIrqState(pVM, pGicDev);
1098 }
1099 else
1100 AssertReleaseMsgFailed(("Unexpected (but not illegal) write to SGI/PPI register in distributor\n"));
1101 LogFlowFunc(("idxReg=%#x written %#x\n", idxReg, pGicDev->bmIntrPending[idxReg]));
1102 return VINF_SUCCESS;
1103}
1104
1105
1106/**
1107 * Write's the distributor's interrupt clear-pending register (GICD_ICPENDR).
1108 *
1109 * @returns Strict VBox status code.
1110 * @param pVM The cross context VM structure.
1111 * @param pGicDev The GIC distributor state.
1112 * @param idxReg The index of the register in the GICD_ICPENDR range.
1113 * @param uValue The value to write to the register.
1114 */
1115static VBOXSTRICTRC gicDistWriteIntrClearPendingReg(PVMCC pVM, PGICDEV pGicDev, uint16_t idxReg, uint32_t uValue)
1116{
1117 /* When affinity routing is enabled, writes to SGIs and PPIs are ignored. */
1118 Assert(pGicDev->fAffRoutingEnabled);
1119 if (idxReg > 0)
1120 {
1121 Assert(idxReg < RT_ELEMENTS(pGicDev->bmIntrPending));
1122 pGicDev->bmIntrPending[idxReg] &= ~uValue;
1123 return gicDistUpdateIrqState(pVM, pGicDev);
1124 }
1125 else
1126 AssertReleaseMsgFailed(("Unexpected (but not illegal) write to SGI/PPI register in distributor\n"));
1127 LogFlowFunc(("idxReg=%#x written %#x\n", idxReg, pGicDev->bmIntrPending[idxReg]));
1128 return VINF_SUCCESS;
1129}
1130
1131
1132/**
1133 * Reads the distributor's interrupt config register (GICD_ICFGR).
1134 *
1135 * @returns Strict VBox status code.
1136 * @param pGicDev The GIC distributor state.
1137 * @param idxReg The index of the register in the GICD_ICFGR range.
1138 * @param puValue Where to store the register's value.
1139 */
1140static VBOXSTRICTRC gicDistReadIntrConfigReg(PCGICDEV pGicDev, uint16_t idxReg, uint32_t *puValue)
1141{
1142 /* When affinity routing is enabled SGIs and PPIs, reads to SGIs and PPIs return 0. */
1143 Assert(pGicDev->fAffRoutingEnabled);
1144 if (idxReg >= 2)
1145 {
1146 Assert(idxReg < RT_ELEMENTS(pGicDev->bmIntrConfig));
1147 *puValue = pGicDev->bmIntrConfig[idxReg];
1148 }
1149 else
1150 AssertReleaseMsgFailed(("Unexpected (but not illegal) read to SGI/PPI register in distributor\n"));
1151 LogFlowFunc(("idxReg=%#x read %#x\n", idxReg, pGicDev->bmIntrConfig[idxReg]));
1152 return VINF_SUCCESS;
1153}
1154
1155
1156/**
1157 * Writes the distributor's interrupt config register (GICD_ICFGR).
1158 *
1159 * @returns Strict VBox status code.
1160 * @param pGicDev The GIC distributor state.
1161 * @param idxReg The index of the register in the GICD_ICFGR range.
1162 * @param uValue The value to write to the register.
1163 */
1164static VBOXSTRICTRC gicDistWriteIntrConfigReg(PGICDEV pGicDev, uint16_t idxReg, uint32_t uValue)
1165{
1166 /* When affinity routing is enabled SGIs and PPIs, writes to SGIs and PPIs are ignored. */
1167 Assert(pGicDev->fAffRoutingEnabled);
1168 if (idxReg >= 2)
1169 {
1170 Assert(idxReg < RT_ELEMENTS(pGicDev->bmIntrConfig));
1171 pGicDev->bmIntrConfig[idxReg] = uValue & 0xaaaaaaaa;
1172 }
1173 else
1174 AssertReleaseMsgFailed(("Unexpected (but not illegal) write to SGI/PPI register in distributor\n"));
1175 LogFlowFunc(("idxReg=%#x written %#x\n", idxReg, pGicDev->bmIntrConfig[idxReg]));
1176 return VINF_SUCCESS;
1177}
1178
1179
1180/**
1181 * Reads the distributor's interrupt config register (GICD_IGROUPR).
1182 *
1183 * @returns Strict VBox status code.
1184 * @param pGicDev The GIC distributor state.
1185 * @param idxReg The index of the register in the GICD_IGROUPR range.
1186 * @param puValue Where to store the register's value.
1187 */
1188static VBOXSTRICTRC gicDistReadIntrGroupReg(PGICDEV pGicDev, uint16_t idxReg, uint32_t *puValue)
1189{
1190 /* When affinity routing is enabled, reads to SGIs and PPIs return 0. */
1191 Assert(pGicDev->fAffRoutingEnabled);
1192 if (idxReg > 0)
1193 {
1194 Assert(idxReg < RT_ELEMENTS(pGicDev->bmIntrGroup));
1195 *puValue = pGicDev->bmIntrGroup[idxReg];
1196 }
1197 else
1198 AssertReleaseMsgFailed(("Unexpected (but not illegal) read to SGI/PPI register in distributor\n"));
1199 LogFlowFunc(("idxReg=%#x read %#x\n", idxReg, *puValue));
1200 return VINF_SUCCESS;
1201}
1202
1203
1204/**
1205 * Writes the distributor's interrupt config register (GICD_ICFGR).
1206 *
1207 * @returns Strict VBox status code.
1208 * @param pVM The cross context VM structure.
1209 * @param pGicDev The GIC distributor state.
1210 * @param idxReg The index of the register in the GICD_ICFGR range.
1211 * @param uValue The value to write to the register.
1212 */
1213static VBOXSTRICTRC gicDistWriteIntrGroupReg(PCVM pVM, PGICDEV pGicDev, uint16_t idxReg, uint32_t uValue)
1214{
1215 /* When affinity routing is enabled, writes to SGIs and PPIs are ignored. */
1216 Assert(pGicDev->fAffRoutingEnabled);
1217 if (idxReg > 0)
1218 {
1219 pGicDev->bmIntrGroup[idxReg] = uValue;
1220 LogFlowFunc(("idxReg=%#x written %#x\n", idxReg, pGicDev->bmIntrGroup[idxReg]));
1221 }
1222 else
1223 AssertReleaseMsgFailed(("Unexpected (but not illegal) write to SGI/PPI register in distributor\n"));
1224 return gicDistUpdateIrqState(pVM, pGicDev);
1225}
1226
1227
1228/**
1229 * Reads the redistributor's interrupt priority register (GICR_IPRIORITYR).
1230 *
1231 * @returns Strict VBox status code.
1232 * @param pGicDev The GIC distributor state.
1233 * @param pGicCpu The GIC redistributor and CPU interface state.
1234 * @param idxReg The index of the register in the GICR_IPRIORITY range.
1235 * @param puValue Where to store the register's value.
1236 */
1237static VBOXSTRICTRC gicReDistReadIntrPriorityReg(PCGICDEV pGicDev, PGICCPU pGicCpu, uint16_t idxReg, uint32_t *puValue)
1238{
1239 /* When affinity routing is disabled, reads return 0. */
1240 Assert(pGicDev->fAffRoutingEnabled); RT_NOREF(pGicDev);
1241 uint16_t const idxPriority = idxReg * sizeof(uint32_t);
1242 AssertReturn(idxPriority <= RT_ELEMENTS(pGicCpu->abIntrPriority) - sizeof(uint32_t), VERR_BUFFER_OVERFLOW);
1243 AssertCompile(sizeof(*puValue) == sizeof(uint32_t));
1244 *puValue = *(uint32_t *)&pGicCpu->abIntrPriority[idxPriority];
1245 LogFlowFunc(("idxReg=%#x read %#x\n", idxReg, *puValue));
1246 return VINF_SUCCESS;
1247}
1248
1249
1250/**
1251 * Writes the redistributor's interrupt priority register (GICR_IPRIORITYR).
1252 *
1253 * @returns Strict VBox status code.
1254 * @param pGicDev The GIC distributor state.
1255 * @param pVCpu The cross context virtual CPU structure.
1256 * @param idxReg The index of the register in the GICR_IPRIORITY range.
1257 * @param uValue The value to write to the register.
1258 */
1259static VBOXSTRICTRC gicReDistWriteIntrPriorityReg(PCGICDEV pGicDev, PVMCPUCC pVCpu, uint16_t idxReg, uint32_t uValue)
1260{
1261 /* When affinity routing is disabled, writes are ignored. */
1262 Assert(pGicDev->fAffRoutingEnabled); RT_NOREF(pGicDev);
1263 PGICCPU pGicCpu = VMCPU_TO_GICCPU(pVCpu);
1264 uint16_t const idxPriority = idxReg * sizeof(uint32_t);
1265 AssertReturn(idxPriority <= RT_ELEMENTS(pGicCpu->abIntrPriority) - sizeof(uint32_t), VERR_BUFFER_OVERFLOW);
1266 AssertCompile(sizeof(uValue) == sizeof(uint32_t));
1267 *(uint32_t *)&pGicCpu->abIntrPriority[idxPriority] = uValue;
1268 LogFlowFunc(("idxReg=%#x written %#x\n", idxReg, *(uint32_t *)&pGicCpu->abIntrPriority[idxPriority]));
1269 return VINF_SUCCESS;
1270}
1271
1272
1273/**
1274 * Reads the redistributor's interrupt pending register (GICR_ISPENDR and
1275 * GICR_ICPENDR).
1276 *
1277 * @returns Strict VBox status code.
1278 * @param pGicDev The GIC distributor state.
1279 * @param pGicCpu The GIC redistributor and CPU interface state.
1280 * @param idxReg The index of the register in the GICR_ISPENDR and
1281 * GICR_ICPENDR range.
1282 * @param puValue Where to store the register's value.
1283 */
1284static VBOXSTRICTRC gicReDistReadIntrPendingReg(PCGICDEV pGicDev, PGICCPU pGicCpu, uint16_t idxReg, uint32_t *puValue)
1285{
1286 /* When affinity routing is disabled, reads return 0. */
1287 Assert(pGicDev->fAffRoutingEnabled); RT_NOREF(pGicDev);
1288 Assert(idxReg < RT_ELEMENTS(pGicCpu->bmIntrPending));
1289 *puValue = pGicCpu->bmIntrPending[idxReg];
1290 LogFlowFunc(("idxReg=%#x read %#x\n", idxReg, pGicCpu->bmIntrPending[idxReg]));
1291 return VINF_SUCCESS;
1292}
1293
1294
1295/**
1296 * Writes the redistributor's interrupt set-pending register (GICR_ISPENDR).
1297 *
1298 * @returns Strict VBox status code.
1299 * @param pGicDev The GIC distributor state.
1300 * @param pVCpu The cross context virtual CPU structure.
1301 * @param idxReg The index of the register in the GICR_ISPENDR range.
1302 * @param uValue The value to write to the register.
1303 */
1304static VBOXSTRICTRC gicReDistWriteIntrSetPendingReg(PCGICDEV pGicDev, PVMCPUCC pVCpu, uint16_t idxReg, uint32_t uValue)
1305{
1306 /* When affinity routing is disabled, writes are ignored. */
1307 Assert(pGicDev->fAffRoutingEnabled);
1308 PGICCPU pGicCpu = VMCPU_TO_GICCPU(pVCpu);
1309 Assert(idxReg < RT_ELEMENTS(pGicCpu->bmIntrPending));
1310 pGicCpu->bmIntrPending[idxReg] |= uValue;
1311 LogFlowFunc(("idxReg=%#x written %#x\n", idxReg, pGicCpu->bmIntrPending[idxReg]));
1312 return gicReDistUpdateIrqState(pGicDev, pVCpu);
1313}
1314
1315
1316/**
1317 * Writes the redistributor's interrupt clear-pending register (GICR_ICPENDR).
1318 *
1319 * @returns Strict VBox status code.
1320 * @param pGicDev The GIC distributor state.
1321 * @param pVCpu The cross context virtual CPU structure.
1322 * @param idxReg The index of the register in the GICR_ICPENDR range.
1323 * @param uValue The value to write to the register.
1324 */
1325static VBOXSTRICTRC gicReDistWriteIntrClearPendingReg(PCGICDEV pGicDev, PVMCPUCC pVCpu, uint16_t idxReg, uint32_t uValue)
1326{
1327 /* When affinity routing is disabled, writes are ignored. */
1328 Assert(pGicDev->fAffRoutingEnabled);
1329 PGICCPU pGicCpu = VMCPU_TO_GICCPU(pVCpu);
1330 Assert(idxReg < RT_ELEMENTS(pGicCpu->bmIntrPending));
1331 pGicCpu->bmIntrPending[idxReg] &= ~uValue;
1332 LogFlowFunc(("idxReg=%#x written %#x\n", idxReg, pGicCpu->bmIntrPending[idxReg]));
1333 return gicReDistUpdateIrqState(pGicDev, pVCpu);
1334}
1335
1336
1337/**
1338 * Reads the redistributor's interrupt enable register (GICR_ISENABLER and
1339 * GICR_ICENABLER).
1340 *
1341 * @returns Strict VBox status code.
1342 * @param pGicDev The GIC distributor state.
1343 * @param pGicCpu The GIC redistributor and CPU interface state.
1344 * @param idxReg The index of the register in the GICR_ISENABLER and
1345 * GICR_ICENABLER range.
1346 * @param puValue Where to store the register's value.
1347 */
1348static VBOXSTRICTRC gicReDistReadIntrEnableReg(PCGICDEV pGicDev, PGICCPU pGicCpu, uint16_t idxReg, uint32_t *puValue)
1349{
1350 Assert(pGicDev->fAffRoutingEnabled); RT_NOREF(pGicDev);
1351 Assert(idxReg < RT_ELEMENTS(pGicCpu->bmIntrEnabled));
1352 *puValue = pGicCpu->bmIntrEnabled[idxReg];
1353 LogFlowFunc(("idxReg=%#x read %#x\n", idxReg, pGicCpu->bmIntrEnabled[idxReg]));
1354 return VINF_SUCCESS;
1355}
1356
1357
1358/**
1359 * Writes the redistributor's interrupt set-enable register (GICR_ISENABLER).
1360 *
1361 * @returns Strict VBox status code.
1362 * @param pGicDev The GIC distributor state.
1363 * @param pVCpu The cross context virtual CPU structure.
1364 * @param idxReg The index of the register in the GICR_ISENABLER range.
1365 * @param uValue The value to write to the register.
1366 */
1367static VBOXSTRICTRC gicReDistWriteIntrSetEnableReg(PCGICDEV pGicDev, PVMCPUCC pVCpu, uint16_t idxReg, uint32_t uValue)
1368{
1369 Assert(pGicDev->fAffRoutingEnabled);
1370 PGICCPU pGicCpu = VMCPU_TO_GICCPU(pVCpu);
1371 Assert(idxReg < RT_ELEMENTS(pGicCpu->bmIntrEnabled));
1372 pGicCpu->bmIntrEnabled[idxReg] |= uValue;
1373 LogFlowFunc(("idxReg=%#x written %#x\n", idxReg, pGicCpu->bmIntrEnabled[idxReg]));
1374 return gicReDistUpdateIrqState(pGicDev, pVCpu);
1375}
1376
1377
1378/**
1379 * Writes the redistributor's interrupt clear-enable register (GICR_ICENABLER).
1380 *
1381 * @returns Strict VBox status code.
1382 * @param pGicDev The GIC distributor state.
1383 * @param pVCpu The cross context virtual CPU structure.
1384 * @param idxReg The index of the register in the GICR_ICENABLER range.
1385 * @param uValue The value to write to the register.
1386 */
1387static VBOXSTRICTRC gicReDistWriteIntrClearEnableReg(PCGICDEV pGicDev, PVMCPUCC pVCpu, uint16_t idxReg, uint32_t uValue)
1388{
1389 PGICCPU pGicCpu = VMCPU_TO_GICCPU(pVCpu);
1390 Assert(idxReg < RT_ELEMENTS(pGicCpu->bmIntrEnabled));
1391 pGicCpu->bmIntrEnabled[idxReg] &= ~uValue;
1392 LogFlowFunc(("idxReg=%#x written %#x\n", idxReg, pGicCpu->bmIntrEnabled[idxReg]));
1393 return gicReDistUpdateIrqState(pGicDev, pVCpu);
1394}
1395
1396
1397/**
1398 * Reads the redistributor's interrupt active register (GICR_ISACTIVER and
1399 * GICR_ICACTIVER).
1400 *
1401 * @returns Strict VBox status code.
1402 * @param pGicCpu The GIC redistributor and CPU interface state.
1403 * @param idxReg The index of the register in the GICR_ISACTIVER and
1404 * GICR_ICACTIVER range.
1405 * @param puValue Where to store the register's value.
1406 */
1407static VBOXSTRICTRC gicReDistReadIntrActiveReg(PGICCPU pGicCpu, uint16_t idxReg, uint32_t *puValue)
1408{
1409 Assert(idxReg < RT_ELEMENTS(pGicCpu->bmIntrActive));
1410 *puValue = pGicCpu->bmIntrActive[idxReg];
1411 LogFlowFunc(("idxReg=%#x read %#x\n", idxReg, pGicCpu->bmIntrActive[idxReg]));
1412 return VINF_SUCCESS;
1413}
1414
1415
1416/**
1417 * Writes the redistributor's interrupt set-active register (GICR_ISACTIVER).
1418 *
1419 * @returns Strict VBox status code.
1420 * @param pGicDev The GIC distributor state.
1421 * @param pVCpu The cross context virtual CPU structure.
1422 * @param idxReg The index of the register in the GICR_ISACTIVER range.
1423 * @param uValue The value to write to the register.
1424 */
1425static VBOXSTRICTRC gicReDistWriteIntrSetActiveReg(PCGICDEV pGicDev, PVMCPUCC pVCpu, uint16_t idxReg, uint32_t uValue)
1426{
1427 PGICCPU pGicCpu = VMCPU_TO_GICCPU(pVCpu);
1428 Assert(idxReg < RT_ELEMENTS(pGicCpu->bmIntrActive));
1429 pGicCpu->bmIntrActive[idxReg] |= uValue;
1430 LogFlowFunc(("idxReg=%#x written %#x\n", idxReg, pGicCpu->bmIntrActive[idxReg]));
1431 return gicReDistUpdateIrqState(pGicDev, pVCpu);
1432}
1433
1434
1435/**
1436 * Writes the redistributor's interrupt clear-active register (GICR_ICACTIVER).
1437 *
1438 * @returns Strict VBox status code.
1439 * @param pGicDev The GIC distributor state.
1440 * @param pVCpu The cross context virtual CPU structure.
1441 * @param idxReg The index of the register in the GICR_ICACTIVER range.
1442 * @param uValue The value to write to the register.
1443 */
1444static VBOXSTRICTRC gicReDistWriteIntrClearActiveReg(PCGICDEV pGicDev, PVMCPUCC pVCpu, uint16_t idxReg, uint32_t uValue)
1445{
1446 PGICCPU pGicCpu = VMCPU_TO_GICCPU(pVCpu);
1447 Assert(idxReg < RT_ELEMENTS(pGicCpu->bmIntrActive));
1448 pGicCpu->bmIntrActive[idxReg] &= ~uValue;
1449 LogFlowFunc(("idxReg=%#x written %#x\n", idxReg, pGicCpu->bmIntrActive[idxReg]));
1450 return gicReDistUpdateIrqState(pGicDev, pVCpu);
1451}
1452
1453
1454/**
1455 * Reads the redistributor's interrupt config register (GICR_ICFGR).
1456 *
1457 * @returns Strict VBox status code.
1458 * @param pGicDev The GIC distributor state.
1459 * @param pGicCpu The GIC redistributor and CPU interface state.
1460 * @param idxReg The index of the register in the GICR_ICFGR range.
1461 * @param puValue Where to store the register's value.
1462 */
1463static VBOXSTRICTRC gicReDistReadIntrConfigReg(PCGICDEV pGicDev, PGICCPU pGicCpu, uint16_t idxReg, uint32_t *puValue)
1464{
1465 /* When affinity routing is disabled, reads return 0. */
1466 Assert(pGicDev->fAffRoutingEnabled); RT_NOREF(pGicDev);
1467 Assert(idxReg < RT_ELEMENTS(pGicCpu->bmIntrConfig));
1468 *puValue = pGicCpu->bmIntrConfig[idxReg];
1469 /* Ensure SGIs are read-only and remain configured as edge-triggered. */
1470 Assert(idxReg > 0 || *puValue == 0xaaaaaaaa);
1471 LogFlowFunc(("idxReg=%#x read %#x\n", idxReg, *puValue));
1472 return VINF_SUCCESS;
1473}
1474
1475
1476/**
1477 * Writes the redistributor's interrupt config register (GICR_ICFGR).
1478 *
1479 * @returns Strict VBox status code.
1480 * @param pGicDev The GIC distributor state.
1481 * @param pVCpu The cross context virtual CPU structure.
1482 * @param idxReg The index of the register in the GICR_ICFGR range.
1483 * @param uValue The value to write to the register.
1484 */
1485static VBOXSTRICTRC gicReDistWriteIntrConfigReg(PCGICDEV pGicDev, PVMCPUCC pVCpu, uint16_t idxReg, uint32_t uValue)
1486{
1487 /* When affinity routing is disabled, writes are ignored. */
1488 Assert(pGicDev->fAffRoutingEnabled); RT_NOREF(pGicDev);
1489 PGICCPU pGicCpu = VMCPU_TO_GICCPU(pVCpu);
1490 if (idxReg > 0)
1491 {
1492 Assert(idxReg < RT_ELEMENTS(pGicCpu->bmIntrConfig));
1493 pGicCpu->bmIntrConfig[idxReg] = uValue & 0xaaaaaaaa;
1494 }
1495 else
1496 {
1497 /* SGIs are always edge-triggered ignore writes. Windows 11 (24H2) arm64 guests writes these. */
1498 Assert(uValue == 0xaaaaaaaa);
1499 Assert(pGicCpu->bmIntrConfig[0] == uValue);
1500 }
1501 LogFlowFunc(("idxReg=%#x written %#x\n", idxReg, pGicCpu->bmIntrConfig[idxReg]));
1502 return VINF_SUCCESS;
1503}
1504
1505
1506/**
1507 * Reads the redistributor's interrupt group register (GICD_IGROUPR).
1508 *
1509 * @returns Strict VBox status code.
1510 * @param pGicDev The GIC distributor state.
1511 * @param pGicCpu The GIC redistributor and CPU interface state.
1512 * @param idxReg The index of the register in the GICR_IGROUPR range.
1513 * @param puValue Where to store the register's value.
1514 */
1515static VBOXSTRICTRC gicReDistReadIntrGroupReg(PCGICDEV pGicDev, PGICCPU pGicCpu, uint16_t idxReg, uint32_t *puValue)
1516{
1517 /* When affinity routing is disabled, reads return 0. */
1518 Assert(pGicDev->fAffRoutingEnabled); RT_NOREF(pGicDev);
1519 Assert(idxReg < RT_ELEMENTS(pGicCpu->bmIntrGroup));
1520 *puValue = pGicCpu->bmIntrGroup[idxReg];
1521 LogFlowFunc(("idxReg=%#x read %#x\n", idxReg, pGicCpu->bmIntrGroup[idxReg]));
1522 return VINF_SUCCESS;
1523}
1524
1525
1526/**
1527 * Writes the redistributor's interrupt group register (GICR_IGROUPR).
1528 *
1529 * @returns Strict VBox status code.
1530 * @param pGicDev The GIC distributor state.
1531 * @param pVCpu The cross context virtual CPU structure.
1532 * @param idxReg The index of the register in the GICR_IGROUPR range.
1533 * @param uValue The value to write to the register.
1534 */
1535static VBOXSTRICTRC gicReDistWriteIntrGroupReg(PCGICDEV pGicDev, PVMCPUCC pVCpu, uint16_t idxReg, uint32_t uValue)
1536{
1537 /* When affinity routing is disabled, writes are ignored. */
1538 Assert(pGicDev->fAffRoutingEnabled);
1539 PGICCPU pGicCpu = VMCPU_TO_GICCPU(pVCpu);
1540 Assert(idxReg < RT_ELEMENTS(pGicCpu->bmIntrGroup));
1541 pGicCpu->bmIntrGroup[idxReg] = uValue;
1542 LogFlowFunc(("idxReg=%#x written %#x\n", idxReg, pGicCpu->bmIntrGroup[idxReg]));
1543 return gicReDistUpdateIrqState(pGicDev, pVCpu);
1544}
1545
1546
1547/**
1548 * Gets the virtual CPUID given the affinity values.
1549 *
1550 * @returns The virtual CPUID.
1551 * @param idCpuInterface The virtual CPUID within the PE cluster (0..15).
1552 * @param uAff1 The affinity 1 value.
1553 * @param uAff2 The affinity 2 value.
1554 * @param uAff3 The affinity 3 value.
1555 */
1556DECL_FORCE_INLINE(VMCPUID) gicGetCpuIdFromAffinity(uint8_t idCpuInterface, uint8_t uAff1, uint8_t uAff2, uint8_t uAff3)
1557{
1558 AssertReturn(idCpuInterface < 16, 0);
1559 return (uAff3 * 1048576) + (uAff2 * 4096) + (uAff1 * 16) + idCpuInterface;
1560}
1561
1562
1563/**
1564 * Gets the highest priority pending interrupt that can be signalled to the PE.
1565 *
1566 * @returns The interrupt ID or GIC_INTID_RANGE_SPECIAL_NO_INTERRUPT if no interrupt
1567 * is pending or not in a state to be signalled to the PE.
1568 * @param pGicDev The GIC distributor state.
1569 * @param pGicCpu The GIC redistributor and CPU interface state.
1570 * @param fGroup0 Whether to consider group 0 interrupts.
1571 * @param fGroup1 Whether to consider group 1 interrupts.
1572 * @param pidxIntr Where to store the distributor interrupt index for the
1573 * returned interrupt ID. UINT16_MAX if this function returns
1574 * GIC_INTID_RANGE_SPECIAL_NO_INTERRUPT. Optional, can be
1575 * NULL.
1576 * @param pbPriority Where to store the priority of the returned interrupt ID.
1577 * GIC_IDLE_PRIORITY if this function returns
1578 * GIC_INTID_RANGE_SPECIAL_NO_INTERRUPT.
1579 */
1580static uint16_t gicGetHighestPriorityPendingIntr(PCGICDEV pGicDev, PCGICCPU pGicCpu, bool fGroup0, bool fGroup1,
1581 uint16_t *pidxIntr, uint8_t *pbPriority)
1582{
1583#if 1
1584 uint16_t idxIntr = UINT16_MAX;
1585 uint16_t uIntId = GIC_INTID_RANGE_SPECIAL_NO_INTERRUPT;
1586 uint8_t uPriority = GIC_IDLE_PRIORITY;
1587
1588 /* Redistributor. */
1589 {
1590 uint32_t bmReDistIntrs[RT_ELEMENTS(pGicCpu->bmIntrPending)];
1591 AssertCompile(sizeof(pGicCpu->bmIntrPending) == sizeof(bmReDistIntrs));
1592 for (uint16_t i = 0; i < RT_ELEMENTS(bmReDistIntrs); i++)
1593 {
1594 /* Collect interrupts that are pending, enabled and inactive. */
1595 bmReDistIntrs[i] = (pGicCpu->bmIntrPending[i] & pGicCpu->bmIntrEnabled[i]) & ~pGicCpu->bmIntrActive[i];
1596 /* Discard interrupts if the group they belong to is disabled. */
1597 if (!fGroup1)
1598 bmReDistIntrs[i] &= ~pGicCpu->bmIntrGroup[i];
1599 if (!fGroup0)
1600 bmReDistIntrs[i] &= pGicCpu->bmIntrGroup[i];
1601 }
1602 /* Among the collected interrupts, pick the one with the highest, non-idle priority. */
1603 uint16_t idxHighest = UINT16_MAX;
1604 const void *pvIntrs = &bmReDistIntrs[0];
1605 uint32_t const cIntrs = sizeof(bmReDistIntrs) * 8; AssertCompile(!(cIntrs % 32));
1606 int16_t idxPending = ASMBitFirstSet(pvIntrs, cIntrs);
1607 if (idxPending >= 0)
1608 {
1609 do
1610 {
1611 if (pGicCpu->abIntrPriority[idxPending] < uPriority)
1612 {
1613 idxHighest = (uint16_t)idxPending;
1614 uPriority = pGicCpu->abIntrPriority[idxPending];
1615 }
1616 idxPending = ASMBitNextSet(pvIntrs, cIntrs, idxPending);
1617 } while (idxPending != -1);
1618 if (idxHighest != UINT16_MAX)
1619 {
1620 uIntId = gicReDistGetIntIdFromIndex(idxHighest);
1621 idxIntr = idxHighest;
1622 Assert( GIC_IS_INTR_SGI_OR_PPI(uIntId)
1623 || GIC_IS_INTR_EXT_PPI(uIntId));
1624 }
1625 }
1626 }
1627
1628 /* Distributor */
1629 {
1630 uint32_t bmDistIntrs[RT_ELEMENTS(pGicDev->bmIntrPending)];
1631 AssertCompile(sizeof(pGicDev->bmIntrPending) == sizeof(bmDistIntrs));
1632 for (uint16_t i = 0; i < RT_ELEMENTS(bmDistIntrs); i++)
1633 {
1634 /* Collect interrupts that are pending, enabled and inactive. */
1635 bmDistIntrs[i] = (pGicDev->bmIntrPending[i] & pGicDev->bmIntrEnabled[i]) & ~pGicDev->bmIntrActive[i];
1636 /* Discard interrupts if the group they belong to is disabled. */
1637 if (!fGroup1)
1638 bmDistIntrs[i] &= ~pGicDev->bmIntrGroup[i];
1639 if (!fGroup0)
1640 bmDistIntrs[i] &= pGicDev->bmIntrGroup[i];
1641 }
1642 /* Among the collected interrupts, pick one with priority higher than what we picked from the redistributor. */
1643 {
1644 uint16_t idxHighest = UINT16_MAX;
1645 const void *pvIntrs = &bmDistIntrs[0];
1646 uint32_t const cIntrs = sizeof(bmDistIntrs) * 8; AssertCompile(!(cIntrs % 32));
1647 int16_t idxPending = ASMBitFirstSet(pvIntrs, cIntrs);
1648 if (idxPending >= 0)
1649 {
1650 do
1651 {
1652 if (pGicDev->abIntrPriority[idxPending] < uPriority)
1653 {
1654 idxHighest = (uint16_t)idxPending;
1655 uPriority = pGicDev->abIntrPriority[idxPending];
1656 }
1657 idxPending = ASMBitNextSet(pvIntrs, cIntrs, idxPending);
1658 } while (idxPending != -1);
1659 if (idxHighest != UINT16_MAX)
1660 {
1661 uIntId = gicDistGetIntIdFromIndex(idxHighest);
1662 idxIntr = idxHighest;
1663 Assert( GIC_IS_INTR_SPI(uIntId)
1664 || GIC_IS_INTR_EXT_SPI(uIntId));
1665 }
1666 }
1667 }
1668 }
1669#else /** @todo Measure and pick the faster version. */
1670 /*
1671 * Collect interrupts that are pending, enabled and inactive.
1672 * Discard interrupts if the group they belong to is disabled.
1673 * While collecting the interrupts, pick the one with the highest, non-idle priority.
1674 */
1675 uint16_t uIntId = GIC_INTID_RANGE_SPECIAL_NO_INTERRUPT;
1676 uint16_t idxIntr = UINT16_MAX;
1677 uint8_t uPriority = GIC_IDLE_PRIORITY;
1678
1679 /* Redistributor. */
1680 {
1681 uint16_t idxHighest = UINT16_MAX;
1682 for (uint16_t i = 0; i < RT_ELEMENTS(pGicCpu->bmIntrPending); i++)
1683 {
1684 uint32_t uIntrPending = (pGicCpu->bmIntrPending[i] & pGicCpu->bmIntrEnabled[i]) & ~pGicCpu->bmIntrActive[i];
1685 if (!fGroup1)
1686 uIntrPending &= ~pGicCpu->bmIntrGroup[i];
1687 if (!fGroup0)
1688 uIntrPending &= pGicCpu->bmIntrGroup[i];
1689
1690 uint16_t const idxPending = ASMBitFirstSetU32(uIntrPending);
1691 if (idxPending > 0)
1692 {
1693 uint32_t const idxPriority = 32 * i + idxPending - 1;
1694 Assert(idxPriority < RT_ELEMENTS(pGicCpu->abIntrPriority));
1695 if (pGicCpu->abIntrPriority[idxPriority] < uPriority)
1696 {
1697 idxHighest = idxPriority;
1698 uPriority = pGicCpu->abIntrPriority[idxPriority];
1699 }
1700 }
1701 }
1702 if (idxHighest != UINT16_MAX)
1703 {
1704 idxIntr = idxHighest;
1705 uIntId = gicReDistGetIntIdFromIndex(idxHighest);
1706 Assert( GIC_IS_INTR_SGI_OR_PPI(uIntId)
1707 || GIC_IS_INTR_EXT_PPI(uIntId));
1708 Assert(uPriority != GIC_IDLE_PRIORITY);
1709 }
1710 }
1711
1712 /* Distributor. */
1713 {
1714 uint16_t idxHighest = UINT16_MAX;
1715 for (uint16_t i = 0; i < RT_ELEMENTS(pGicDev->bmIntrPending); i += 2)
1716 {
1717 uint32_t uLo = (pGicDev->bmIntrPending[i] & pGicDev->bmIntrEnabled[i]) & ~pGicDev->bmIntrActive[i];
1718 uint32_t uHi = (pGicDev->bmIntrPending[i + 1] & pGicDev->bmIntrEnabled[i + 1]) & ~pGicDev->bmIntrActive[i + 1];
1719 if (!fGroup1)
1720 {
1721 uLo &= ~pGicDev->bmIntrGroup[i];
1722 uHi &= ~pGicDev->bmIntrGroup[i + 1];
1723 }
1724 if (!fGroup0)
1725 {
1726 uLo &= pGicDev->bmIntrGroup[i];
1727 uHi &= pGicDev->bmIntrGroup[i + 1];
1728 }
1729
1730 uint64_t const uIntrPending = RT_MAKE_U64(uLo, uHi);
1731 uint16_t const idxPending = ASMBitFirstSetU64(uIntrPending);
1732 if (idxPending > 0)
1733 {
1734 uint32_t const idxPriority = 64 * i + idxPending - 1;
1735 if (pGicDev->abIntrPriority[idxPriority] < uPriority)
1736 {
1737 idxHighest = idxPriority;
1738 uPriority = pGicDev->abIntrPriority[idxPriority];
1739 }
1740 }
1741 }
1742 if (idxHighest != UINT16_MAX)
1743 {
1744 idxIntr = idxHighest;
1745 uIntId = gicDistGetIntIdFromIndex(idxHighest);
1746 Assert( GIC_IS_INTR_SPI(uIntId)
1747 || GIC_IS_INTR_EXT_SPI(uIntId));
1748 Assert(uPriority != GIC_IDLE_PRIORITY);
1749 }
1750 }
1751#endif
1752
1753 /* Ensure that if no interrupt is pending, the idle priority is returned. */
1754 Assert(uIntId != GIC_INTID_RANGE_SPECIAL_NO_INTERRUPT || uPriority == GIC_IDLE_PRIORITY);
1755 if (pbPriority)
1756 *pbPriority = uPriority;
1757 if (pidxIntr)
1758 *pidxIntr = idxIntr;
1759
1760 LogFlowFunc(("uIntId=%u [idxIntr=%u uPriority=%u]\n", uIntId, idxIntr, uPriority));
1761 return uIntId;
1762}
1763
1764
1765/**
1766 * Get and acknowledge the interrupt ID of a signalled interrupt.
1767 *
1768 * @returns The interrupt ID or GIC_INTID_RANGE_SPECIAL_NO_INTERRUPT no interrupts
1769 * are pending or not in a state to be signalled.
1770 * @param pGicDev The GIC distributor state.
1771 * @param pVCpu The cross context virtual CPU structure.
1772 * @param fGroup0 Whether to consider group 0 interrupts.
1773 * @param fGroup1 Whether to consider group 1 interrupts.
1774 */
1775static uint16_t gicAckHighestPriorityPendingIntr(PGICDEV pGicDev, PVMCPUCC pVCpu, bool fGroup0, bool fGroup1)
1776{
1777 Assert(fGroup0 || fGroup1);
1778 LogFlowFunc(("fGroup0=%RTbool fGroup1=%RTbool\n", fGroup0, fGroup1));
1779
1780 /*
1781 * Get the pending interrupt with the highest priority for the given group.
1782 */
1783 uint8_t bIntrPriority;
1784 uint16_t idxIntr;
1785 PGICCPU pGicCpu = VMCPU_TO_GICCPU(pVCpu);
1786 STAM_PROFILE_START(&pGicCpu->StatProfIntrAck, x);
1787 uint16_t const uIntId = gicGetHighestPriorityPendingIntr(pGicDev, pGicCpu, fGroup0, fGroup1, &idxIntr, &bIntrPriority);
1788 if (uIntId != GIC_INTID_RANGE_SPECIAL_NO_INTERRUPT)
1789 {
1790 /*
1791 * The interrupt priority must be higher than the priority mask of the CPU interface for the
1792 * interrupt to be signalled/acknowledged. Here, we must NOT use priority grouping when comparing
1793 * the priority of a pending interrupt with this priority mask (threshold).
1794 *
1795 * See ARM GIC spec. 4.8.6 "Priority masking".
1796 */
1797 if (bIntrPriority >= pGicCpu->bIntrPriorityMask)
1798 {
1799 STAM_PROFILE_STOP(&pGicCpu->StatProfIntrAck, x);
1800 return GIC_INTID_RANGE_SPECIAL_NO_INTERRUPT;
1801 }
1802
1803 /*
1804 * The group priority of the pending interrupt must be higher than that of the running priority.
1805 * The number of bits for the group priority depends on the the binary point registers.
1806 * We mask the sub-priority bits and only compare the group priority.
1807 *
1808 * When the binary point registers indicates no preemption, we must allow interrupts that have
1809 * a higher priority than idle. Hence, the use of two different masks below.
1810 *
1811 * See ARM GIC spec. 4.8.3 "Priority grouping".
1812 * See ARM GIC spec. 4.8.5 "Preemption".
1813 */
1814 static uint8_t const s_afGroupPriorityMasks[8] = { 0xfe, 0xfc, 0xf8, 0xf0, 0xe0, 0xc0, 0x80, 0x00 };
1815 static uint8_t const s_afRunningPriorityMasks[8] = { 0xfe, 0xfc, 0xf8, 0xf0, 0xe0, 0xc0, 0x80, 0xff };
1816 uint8_t const idxPriorityMask = (fGroup0 || (pGicCpu->uIccCtlr & ARMV8_ICC_CTLR_EL1_AARCH64_CBPR))
1817 ? pGicCpu->bBinaryPtGroup0 & 7
1818 : pGicCpu->bBinaryPtGroup1 & 7;
1819 uint8_t const bRunningPriority = pGicCpu->abRunningPriorities[pGicCpu->idxRunningPriority];
1820 uint8_t const bRunningGroupPriority = bRunningPriority & s_afRunningPriorityMasks[idxPriorityMask];
1821 uint8_t const bIntrGroupPriority = bIntrPriority & s_afGroupPriorityMasks[idxPriorityMask];
1822 if (bIntrGroupPriority >= bRunningGroupPriority)
1823 {
1824 STAM_PROFILE_STOP(&pGicCpu->StatProfIntrAck, x);
1825 return GIC_INTID_RANGE_SPECIAL_NO_INTERRUPT;
1826 }
1827
1828 /*
1829 * Acknowledge the interrupt.
1830 */
1831 bool const fIsRedistIntId = GIC_IS_INTR_SGI_OR_PPI(uIntId) || GIC_IS_INTR_EXT_PPI(uIntId);
1832 if (fIsRedistIntId)
1833 {
1834 /* Mark the interrupt as active. */
1835 AssertMsg(idxIntr < sizeof(pGicCpu->bmIntrActive) * 8, ("idxIntr=%u\n", idxIntr));
1836 ASMBitSet(&pGicCpu->bmIntrActive[0], idxIntr);
1837
1838 /** @todo Duplicate block Id=E5ED12D2-088D-4525-9609-8325C02846C3 (start). */
1839 /* Update the active priorities bitmap. */
1840 AssertCompile(sizeof(pGicCpu->bmActivePriorityGroup0) * 8 >= 128);
1841 AssertCompile(sizeof(pGicCpu->bmActivePriorityGroup1) * 8 >= 128);
1842 uint8_t const idxPreemptionLevel = bIntrPriority >> 1;
1843 if (fGroup0)
1844 ASMBitSet(&pGicCpu->bmActivePriorityGroup0[0], idxPreemptionLevel);
1845 if (fGroup1)
1846 ASMBitSet(&pGicCpu->bmActivePriorityGroup1[0], idxPreemptionLevel);
1847
1848 /* Drop priority. */
1849 if (RT_LIKELY(pGicCpu->idxRunningPriority < RT_ELEMENTS(pGicCpu->abRunningPriorities) - 1))
1850 {
1851 LogFlowFunc(("Dropping interrupt priority from %u -> %u (idxRunningPriority: %u -> %u)\n",
1852 pGicCpu->abRunningPriorities[pGicCpu->idxRunningPriority],
1853 bIntrPriority,
1854 pGicCpu->idxRunningPriority, pGicCpu->idxRunningPriority + 1));
1855 ++pGicCpu->idxRunningPriority;
1856 pGicCpu->abRunningPriorities[pGicCpu->idxRunningPriority] = bIntrPriority;
1857 }
1858 else
1859 AssertReleaseMsgFailed(("Index of running-interrupt priority out-of-bounds %u\n", pGicCpu->idxRunningPriority));
1860 /** @todo Duplicate block Id=E5ED12D2-088D-4525-9609-8325C02846C3 (end). */
1861
1862 /* If it is an edge-triggered interrupt, mark it as no longer pending. */
1863 AssertRelease(UINT32_C(2) * idxIntr + 1 < sizeof(pGicCpu->bmIntrConfig) * 8);
1864 bool const fEdgeTriggered = ASMBitTest(&pGicCpu->bmIntrConfig[0], 2 * idxIntr + 1);
1865 if (fEdgeTriggered)
1866 ASMBitClear(&pGicCpu->bmIntrPending[0], idxIntr);
1867
1868 /* Update the redistributor IRQ state to reflect change to the active interrupt. */
1869 gicReDistUpdateIrqState(pGicDev, pVCpu);
1870 }
1871 else
1872 {
1873 /* Sanity check if the interrupt ID belongs to the distributor. */
1874 Assert(GIC_IS_INTR_SPI(uIntId) || GIC_IS_INTR_EXT_SPI(uIntId));
1875
1876 /* Mark the interrupt as active. */
1877 Assert(idxIntr < sizeof(pGicDev->bmIntrActive) * 8);
1878 ASMBitSet(&pGicDev->bmIntrActive[0], idxIntr);
1879
1880 /** @todo Duplicate block Id=E5ED12D2-088D-4525-9609-8325C02846C3 (start). */
1881 /* Update the active priorities bitmap. */
1882 AssertCompile(sizeof(pGicCpu->bmActivePriorityGroup0) * 8 >= 128);
1883 AssertCompile(sizeof(pGicCpu->bmActivePriorityGroup1) * 8 >= 128);
1884 uint8_t const idxPreemptionLevel = bIntrPriority >> 1;
1885 if (fGroup0)
1886 ASMBitSet(&pGicCpu->bmActivePriorityGroup0[0], idxPreemptionLevel);
1887 if (fGroup1)
1888 ASMBitSet(&pGicCpu->bmActivePriorityGroup1[0], idxPreemptionLevel);
1889
1890 /* Drop priority. */
1891 if (RT_LIKELY(pGicCpu->idxRunningPriority < RT_ELEMENTS(pGicCpu->abRunningPriorities) - 1))
1892 {
1893 LogFlowFunc(("Dropping interrupt priority from %u -> %u (idxRunningPriority: %u -> %u)\n",
1894 pGicCpu->abRunningPriorities[pGicCpu->idxRunningPriority],
1895 bIntrPriority,
1896 pGicCpu->idxRunningPriority, pGicCpu->idxRunningPriority + 1));
1897 ++pGicCpu->idxRunningPriority;
1898 pGicCpu->abRunningPriorities[pGicCpu->idxRunningPriority] = bIntrPriority;
1899 }
1900 else
1901 AssertReleaseMsgFailed(("Index of running-interrupt priority out-of-bounds %u\n", pGicCpu->idxRunningPriority));
1902 /** @todo Duplicate block Id=E5ED12D2-088D-4525-9609-8325C02846C3 (end). */
1903
1904 /* If it is an edge-triggered interrupt, mark it as no longer pending. */
1905 AssertRelease(UINT32_C(2) * idxIntr + 1 < sizeof(pGicDev->bmIntrConfig) * 8);
1906 bool const fEdgeTriggered = ASMBitTest(&pGicDev->bmIntrConfig[0], 2 * idxIntr + 1);
1907 if (fEdgeTriggered)
1908 ASMBitClear(&pGicDev->bmIntrPending[0], idxIntr);
1909
1910 /* Update the distributor IRQ state to reflect change to the active interrupt. */
1911 gicDistUpdateIrqState(pVCpu->CTX_SUFF(pVM), pGicDev);
1912 }
1913 }
1914 else
1915 Assert(bIntrPriority == GIC_IDLE_PRIORITY);
1916
1917 LogFlowFunc(("uIntId=%u\n", uIntId));
1918 STAM_PROFILE_STOP(&pGicCpu->StatProfIntrAck, x);
1919 return uIntId;
1920}
1921
1922
1923/**
1924 * Reads a distributor register.
1925 *
1926 * @returns VBox status code.
1927 * @param pDevIns The device instance.
1928 * @param pVCpu The cross context virtual CPU structure.
1929 * @param offReg The offset of the register being read.
1930 * @param puValue Where to store the register value.
1931 */
1932DECLINLINE(VBOXSTRICTRC) gicDistReadRegister(PPDMDEVINS pDevIns, PVMCPUCC pVCpu, uint16_t offReg, uint32_t *puValue)
1933{
1934 VMCPU_ASSERT_EMT(pVCpu); RT_NOREF(pVCpu);
1935 PGICDEV pGicDev = PDMDEVINS_2_DATA(pDevIns, PGICDEV);
1936
1937 /*
1938 * 64-bit registers.
1939 */
1940 {
1941 /*
1942 * GICD_IROUTER<n> and GICD_IROUTER<n>E.
1943 */
1944 uint16_t const cbReg = sizeof(uint64_t);
1945 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_IROUTERn_OFF_START, GIC_DIST_REG_IROUTERn_RANGE_SIZE))
1946 {
1947 /* Hardware does not map the first 32 registers (corresponding to SGIs and PPIs). */
1948 uint16_t const idxExt = GIC_INTID_RANGE_SPI_START;
1949 uint16_t const idxReg = idxExt + (offReg - GIC_DIST_REG_IROUTERn_OFF_START) / cbReg;
1950 return gicDistReadIntrRoutingReg(pGicDev, idxReg, puValue);
1951 }
1952 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_IROUTERnE_OFF_START, GIC_DIST_REG_IROUTERnE_RANGE_SIZE))
1953 {
1954 uint16_t const idxExt = RT_ELEMENTS(pGicDev->au32IntrRouting) / 2;
1955 uint16_t const idxReg = idxExt + (offReg - GIC_DIST_REG_IROUTERnE_OFF_START) / cbReg;
1956 return gicDistReadIntrRoutingReg(pGicDev, idxReg, puValue);
1957 }
1958 }
1959
1960 /*
1961 * 32-bit registers.
1962 */
1963 {
1964 /*
1965 * GICD_IGROUPR<n> and GICD_IGROUPR<n>E.
1966 */
1967 uint16_t const cbReg = sizeof(uint32_t);
1968 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_IGROUPRn_OFF_START, GIC_DIST_REG_IGROUPRn_RANGE_SIZE))
1969 {
1970 uint16_t const idxReg = (offReg - GIC_DIST_REG_IGROUPRn_OFF_START) / cbReg;
1971 return gicDistReadIntrGroupReg(pGicDev, idxReg, puValue);
1972 }
1973 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_IGROUPRnE_OFF_START, GIC_DIST_REG_IGROUPRnE_RANGE_SIZE))
1974 {
1975 uint16_t const idxExt = RT_ELEMENTS(pGicDev->bmIntrGroup) / 2;
1976 uint16_t const idxReg = idxExt + (offReg - GIC_DIST_REG_IGROUPRnE_OFF_START) / cbReg;
1977 return gicDistReadIntrGroupReg(pGicDev, idxReg, puValue);
1978 }
1979
1980 /*
1981 * GICD_ISENABLER<n> and GICD_ISENABLER<n>E.
1982 * GICD_ICENABLER<n> and GICD_ICENABLER<n>E.
1983 */
1984 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ISENABLERn_OFF_START, GIC_DIST_REG_ISENABLERn_RANGE_SIZE))
1985 {
1986 uint16_t const idxReg = (offReg - GIC_DIST_REG_ISENABLERn_OFF_START) / cbReg;
1987 return gicDistReadIntrEnableReg(pGicDev, idxReg, puValue);
1988 }
1989 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ISENABLERnE_OFF_START, GIC_DIST_REG_ISENABLERnE_RANGE_SIZE))
1990 {
1991 uint16_t const idxExt = RT_ELEMENTS(pGicDev->bmIntrEnabled) / 2;
1992 uint16_t const idxReg = idxExt + (offReg - GIC_DIST_REG_ISENABLERnE_OFF_START) / cbReg;
1993 return gicDistReadIntrEnableReg(pGicDev, idxReg, puValue);
1994 }
1995 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICENABLERn_OFF_START, GIC_DIST_REG_ICENABLERn_RANGE_SIZE))
1996 {
1997 uint16_t const idxReg = (offReg - GIC_DIST_REG_ICENABLERn_OFF_START) / cbReg;
1998 return gicDistReadIntrEnableReg(pGicDev, idxReg, puValue);
1999 }
2000 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICENABLERnE_OFF_START, GIC_DIST_REG_ICENABLERnE_RANGE_SIZE))
2001 {
2002 uint16_t const idxExt = RT_ELEMENTS(pGicDev->bmIntrEnabled) / 2;
2003 uint16_t const idxReg = idxExt + (offReg - GIC_DIST_REG_ICENABLERnE_OFF_START) / cbReg;
2004 return gicDistReadIntrEnableReg(pGicDev, idxReg, puValue);
2005 }
2006
2007 /*
2008 * GICD_ISACTIVER<n> and GICD_ISACTIVER<n>E.
2009 * GICD_ICACTIVER<n> and GICD_ICACTIVER<n>E.
2010 */
2011 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ISACTIVERn_OFF_START, GIC_DIST_REG_ISACTIVERn_RANGE_SIZE))
2012 {
2013 uint16_t const idxReg = (offReg - GIC_DIST_REG_ISACTIVERn_OFF_START) / cbReg;
2014 return gicDistReadIntrActiveReg(pGicDev, idxReg, puValue);
2015 }
2016 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ISACTIVERnE_OFF_START, GIC_DIST_REG_ISACTIVERnE_RANGE_SIZE))
2017 {
2018 uint16_t const idxExt = RT_ELEMENTS(pGicDev->bmIntrActive) / 2;
2019 uint16_t const idxReg = idxExt + (offReg - GIC_DIST_REG_ISACTIVERnE_OFF_START) / cbReg;
2020 return gicDistReadIntrActiveReg(pGicDev, idxReg, puValue);
2021 }
2022 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICACTIVERn_OFF_START, GIC_DIST_REG_ICACTIVERn_RANGE_SIZE))
2023 {
2024 uint16_t const idxReg = (offReg - GIC_DIST_REG_ICENABLERn_OFF_START) / cbReg;
2025 return gicDistReadIntrActiveReg(pGicDev, idxReg, puValue);
2026 }
2027 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICACTIVERnE_OFF_START, GIC_DIST_REG_ICACTIVERnE_RANGE_SIZE))
2028 {
2029 uint16_t const idxExt = RT_ELEMENTS(pGicDev->bmIntrActive) / 2;
2030 uint16_t const idxReg = idxExt + (offReg - GIC_DIST_REG_ICACTIVERnE_OFF_START) / cbReg;
2031 return gicDistReadIntrActiveReg(pGicDev, idxReg, puValue);
2032 }
2033
2034 /*
2035 * GICD_IPRIORITYR<n> and GICD_IPRIORITYR<n>E.
2036 */
2037 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_IPRIORITYRn_OFF_START, GIC_DIST_REG_IPRIORITYRn_RANGE_SIZE))
2038 {
2039 uint16_t const idxReg = (offReg - GIC_DIST_REG_IPRIORITYRn_OFF_START) / cbReg;
2040 return gicDistReadIntrPriorityReg(pGicDev, idxReg, puValue);
2041 }
2042 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_IPRIORITYRnE_OFF_START, GIC_DIST_REG_IPRIORITYRnE_RANGE_SIZE))
2043 {
2044 uint16_t const idxExt = RT_ELEMENTS(pGicDev->abIntrPriority) / (2 * sizeof(uint32_t));
2045 uint16_t const idxReg = idxExt + (offReg - GIC_DIST_REG_IPRIORITYRnE_OFF_START) / cbReg;
2046 return gicDistReadIntrPriorityReg(pGicDev, idxReg, puValue);
2047 }
2048
2049 /*
2050 * GICD_ISPENDR<n> and GICD_ISPENDR<n>E.
2051 * GICD_ICPENDR<n> and GICD_ICPENDR<n>E.
2052 */
2053 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ISPENDRn_OFF_START, GIC_DIST_REG_ISPENDRn_RANGE_SIZE))
2054 {
2055 uint16_t const idxReg = (offReg - GIC_DIST_REG_ISPENDRn_OFF_START) / cbReg;
2056 return gicDistReadIntrPendingReg(pGicDev, idxReg, puValue);
2057 }
2058 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ISPENDRnE_OFF_START, GIC_DIST_REG_ISPENDRnE_RANGE_SIZE))
2059 {
2060 uint16_t const idxExt = RT_ELEMENTS(pGicDev->bmIntrPending) / 2;
2061 uint16_t const idxReg = idxExt + (offReg - GIC_DIST_REG_ISPENDRnE_OFF_START) / cbReg;
2062 return gicDistReadIntrPendingReg(pGicDev, idxReg, puValue);
2063 }
2064 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICPENDRn_OFF_START, GIC_DIST_REG_ICPENDRn_RANGE_SIZE))
2065 {
2066 uint16_t const idxReg = (offReg - GIC_DIST_REG_ICPENDRn_OFF_START) / cbReg;
2067 return gicDistReadIntrPendingReg(pGicDev, idxReg, puValue);
2068 }
2069 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICPENDRnE_OFF_START, GIC_DIST_REG_ICPENDRnE_RANGE_SIZE))
2070 {
2071 uint16_t const idxExt = RT_ELEMENTS(pGicDev->bmIntrPending) / 2;
2072 uint16_t const idxReg = idxExt + (offReg - GIC_DIST_REG_ICPENDRnE_OFF_START) / cbReg;
2073 return gicDistReadIntrPendingReg(pGicDev, idxReg, puValue);
2074 }
2075
2076 /*
2077 * GICD_ICFGR<n> and GICD_ICFGR<n>E.
2078 */
2079 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICFGRn_OFF_START, GIC_DIST_REG_ICFGRn_RANGE_SIZE))
2080 {
2081 uint16_t const idxReg = (offReg - GIC_DIST_REG_ICFGRn_OFF_START) / cbReg;
2082 return gicDistReadIntrConfigReg(pGicDev, idxReg, puValue);
2083 }
2084 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICFGRnE_OFF_START, GIC_DIST_REG_ICFGRnE_RANGE_SIZE))
2085 {
2086 uint16_t const idxExt = RT_ELEMENTS(pGicDev->bmIntrConfig) / 2;
2087 uint16_t const idxReg = idxExt + (offReg - GIC_DIST_REG_ICFGRnE_OFF_START) / cbReg;
2088 return gicDistReadIntrConfigReg(pGicDev, idxReg, puValue);
2089 }
2090 }
2091
2092 switch (offReg)
2093 {
2094 case GIC_DIST_REG_CTLR_OFF:
2095 Assert(pGicDev->fAffRoutingEnabled);
2096 *puValue = (pGicDev->fIntrGroup0Enabled ? GIC_DIST_REG_CTRL_ENABLE_GRP0 : 0)
2097 | (pGicDev->fIntrGroup1Enabled ? GIC_DIST_REG_CTRL_ENABLE_GRP1_NS : 0)
2098 | GIC_DIST_REG_CTRL_DS /* We don't support multiple security states. */
2099 | GIC_DIST_REG_CTRL_ARE_S; /* We don't support GICv2 backwards compatibility, ARE is always enabled. */
2100 break;
2101 case GIC_DIST_REG_TYPER_OFF:
2102 {
2103 Assert(pGicDev->uMaxSpi > 0 && pGicDev->uMaxSpi <= GIC_DIST_REG_TYPER_NUM_ITLINES);
2104 Assert(pGicDev->fAffRoutingEnabled);
2105 *puValue = GIC_DIST_REG_TYPER_NUM_ITLINES_SET(pGicDev->uMaxSpi)
2106 | GIC_DIST_REG_TYPER_NUM_PES_SET(0) /* Affinity routing is always enabled, hence this MBZ. */
2107 /*| GIC_DIST_REG_TYPER_NMI*/ /** @todo Support non-maskable interrupts */
2108 /*| GIC_DIST_REG_TYPER_SECURITY_EXTN*/ /** @todo Support dual security states. */
2109 | (pGicDev->fMbi ? GIC_DIST_REG_TYPER_MBIS : 0)
2110 | (pGicDev->fRangeSel ? GIC_DIST_REG_TYPER_RSS : 0)
2111 | GIC_DIST_REG_TYPER_IDBITS_SET(15) /* We only support 16-bit interrupt IDs. */
2112 | (pGicDev->fAff3Levels ? GIC_DIST_REG_TYPER_A3V : 0);
2113 if (pGicDev->fExtSpi)
2114 *puValue |= GIC_DIST_REG_TYPER_ESPI
2115 | GIC_DIST_REG_TYPER_ESPI_RANGE_SET(pGicDev->uMaxExtSpi);
2116 if (pGicDev->fLpi)
2117 {
2118 Assert(pGicDev->uMaxLpi - 2 < 13);
2119 Assert(GIC_INTID_RANGE_LPI_START + (UINT32_C(2) << pGicDev->uMaxLpi) <= UINT16_MAX);
2120 *puValue |= GIC_DIST_REG_TYPER_LPIS
2121 | GIC_DIST_REG_TYPER_NUM_LPIS_SET(pGicDev->uMaxLpi);
2122 }
2123 AssertMsg( RT_BOOL(*puValue & GIC_DIST_REG_TYPER_MBIS)
2124 == RT_BOOL(*puValue & GIC_DIST_REG_TYPER_LPIS), ("%#RX32\n", *puValue));
2125 break;
2126 }
2127 case GIC_DIST_REG_PIDR2_OFF:
2128 Assert(pGicDev->uArchRev <= GIC_DIST_REG_PIDR2_ARCHREV_GICV4);
2129 *puValue = GIC_DIST_REG_PIDR2_ARCHREV_SET(pGicDev->uArchRev);
2130 break;
2131 case GIC_DIST_REG_IIDR_OFF:
2132 *puValue = GIC_DIST_REG_IIDR_IMPL_SET(GIC_JEDEC_JEP106_IDENTIFICATION_CODE, GIC_JEDEC_JEP106_CONTINUATION_CODE);
2133 break;
2134 case GIC_DIST_REG_TYPER2_OFF:
2135 *puValue = 0;
2136 break;
2137 default:
2138 AssertReleaseMsgFailed(("offReg=%#x\n", offReg));
2139 *puValue = 0;
2140 break;
2141 }
2142 return VINF_SUCCESS;
2143}
2144
2145
2146/**
2147 * Writes a distributor register.
2148 *
2149 * @returns Strict VBox status code.
2150 * @param pDevIns The device instance.
2151 * @param pVCpu The cross context virtual CPU structure.
2152 * @param offReg The offset of the register being written.
2153 * @param uValue The register value.
2154 */
2155DECLINLINE(VBOXSTRICTRC) gicDistWriteRegister(PPDMDEVINS pDevIns, PVMCPUCC pVCpu, uint16_t offReg, uint32_t uValue)
2156{
2157 VMCPU_ASSERT_EMT(pVCpu); RT_NOREF(pVCpu);
2158 PGICDEV pGicDev = PDMDEVINS_2_DATA(pDevIns, PGICDEV);
2159 PVMCC pVM = PDMDevHlpGetVM(pDevIns);
2160
2161 /*
2162 * 64-bit registers.
2163 */
2164 {
2165 /*
2166 * GICD_IROUTER<n> and GICD_IROUTER<n>E.
2167 */
2168 uint16_t const cbReg = sizeof(uint64_t);
2169 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_IROUTERn_OFF_START, GIC_DIST_REG_IROUTERn_RANGE_SIZE))
2170 {
2171 /* Hardware does not map the first 32 registers (corresponding to SGIs and PPIs). */
2172 uint16_t const idxExt = GIC_INTID_RANGE_SPI_START;
2173 uint16_t const idxReg = idxExt + (offReg - GIC_DIST_REG_IROUTERn_OFF_START) / cbReg;
2174 return gicDistWriteIntrRoutingReg(pGicDev, idxReg, uValue);
2175 }
2176 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_IROUTERnE_OFF_START, GIC_DIST_REG_IROUTERnE_RANGE_SIZE))
2177 {
2178 uint16_t const idxExt = RT_ELEMENTS(pGicDev->au32IntrRouting) / 2;
2179 uint16_t const idxReg = idxExt + (offReg - GIC_DIST_REG_IROUTERnE_OFF_START) / cbReg;
2180 return gicDistWriteIntrRoutingReg(pGicDev, idxReg, uValue);
2181 }
2182
2183 }
2184
2185 /*
2186 * 32-bit registers.
2187 */
2188 {
2189 /*
2190 * GICD_IGROUPR<n> and GICD_IGROUPR<n>E.
2191 */
2192 uint16_t const cbReg = sizeof(uint32_t);
2193 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_IGROUPRn_OFF_START, GIC_DIST_REG_IGROUPRn_RANGE_SIZE))
2194 {
2195 uint16_t const idxReg = (offReg - GIC_DIST_REG_IGROUPRn_OFF_START) / cbReg;
2196 return gicDistWriteIntrGroupReg(pVM, pGicDev, idxReg, uValue);
2197 }
2198 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_IGROUPRnE_OFF_START, GIC_DIST_REG_IGROUPRnE_RANGE_SIZE))
2199 {
2200 uint16_t const idxExt = RT_ELEMENTS(pGicDev->bmIntrGroup) / 2;
2201 uint16_t const idxReg = idxExt + (offReg - GIC_DIST_REG_IGROUPRnE_OFF_START) / cbReg;
2202 return gicDistWriteIntrGroupReg(pVM, pGicDev, idxReg, uValue);
2203 }
2204
2205 /*
2206 * GICD_ISENABLER<n> and GICD_ISENABLER<n>E.
2207 * GICD_ICENABLER<n> and GICD_ICENABLER<n>E.
2208 */
2209 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ISENABLERn_OFF_START, GIC_DIST_REG_ISENABLERn_RANGE_SIZE))
2210 {
2211 uint16_t const idxReg = (offReg - GIC_DIST_REG_ISENABLERn_OFF_START) / cbReg;
2212 return gicDistWriteIntrSetEnableReg(pVM, pGicDev, idxReg, uValue);
2213 }
2214 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ISENABLERnE_OFF_START, GIC_DIST_REG_ISENABLERnE_RANGE_SIZE))
2215 {
2216 uint16_t const idxExt = RT_ELEMENTS(pGicDev->bmIntrEnabled) / 2;
2217 uint16_t const idxReg = idxExt + (offReg - GIC_DIST_REG_ISENABLERnE_OFF_START) / cbReg;
2218 return gicDistWriteIntrSetEnableReg(pVM, pGicDev, idxReg, uValue);
2219 }
2220 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICENABLERn_OFF_START, GIC_DIST_REG_ICENABLERn_RANGE_SIZE))
2221 {
2222 uint16_t const idxReg = (offReg - GIC_DIST_REG_ICENABLERn_OFF_START) / cbReg;
2223 return gicDistWriteIntrClearEnableReg(pVM, pGicDev, idxReg, uValue);
2224 }
2225 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICENABLERnE_OFF_START, GIC_DIST_REG_ICENABLERnE_RANGE_SIZE))
2226 {
2227 uint16_t const idxExt = RT_ELEMENTS(pGicDev->bmIntrEnabled) / 2;
2228 uint16_t const idxReg = idxExt + (offReg - GIC_DIST_REG_ICENABLERnE_OFF_START) / cbReg;
2229 return gicDistWriteIntrClearEnableReg(pVM, pGicDev, idxReg, uValue);
2230 }
2231
2232 /*
2233 * GICD_ISACTIVER<n> and GICD_ISACTIVER<n>E.
2234 * GICD_ICACTIVER<n> and GICD_ICACTIVER<n>E.
2235 */
2236 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ISACTIVERn_OFF_START, GIC_DIST_REG_ISACTIVERn_RANGE_SIZE))
2237 {
2238 uint16_t const idxReg = (offReg - GIC_DIST_REG_ISACTIVERn_OFF_START) / cbReg;
2239 return gicDistWriteIntrSetActiveReg(pVM, pGicDev, idxReg, uValue);
2240 }
2241 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ISACTIVERnE_OFF_START, GIC_DIST_REG_ISACTIVERnE_RANGE_SIZE))
2242 {
2243 uint16_t const idxExt = RT_ELEMENTS(pGicDev->bmIntrActive) / 2;
2244 uint16_t const idxReg = idxExt + (offReg - GIC_DIST_REG_ISACTIVERnE_OFF_START) / cbReg;
2245 return gicDistWriteIntrSetActiveReg(pVM, pGicDev, idxReg, uValue);
2246 }
2247 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICACTIVERn_OFF_START, GIC_DIST_REG_ICACTIVERn_RANGE_SIZE))
2248 {
2249 uint16_t const idxReg = (offReg - GIC_DIST_REG_ICACTIVERn_OFF_START) / cbReg;
2250 return gicDistWriteIntrClearActiveReg(pVM, pGicDev, idxReg, uValue);
2251 }
2252 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICACTIVERnE_OFF_START, GIC_DIST_REG_ICACTIVERnE_RANGE_SIZE))
2253 {
2254 uint16_t const idxExt = RT_ELEMENTS(pGicDev->bmIntrActive) / 2;
2255 uint16_t const idxReg = idxExt + (offReg - GIC_DIST_REG_ICACTIVERnE_OFF_START) / cbReg;
2256 return gicDistWriteIntrClearActiveReg(pVM, pGicDev, idxReg, uValue);
2257 }
2258
2259 /*
2260 * GICD_IPRIORITYR<n> and GICD_IPRIORITYR<n>E.
2261 */
2262 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_IPRIORITYRn_OFF_START, GIC_DIST_REG_IPRIORITYRn_RANGE_SIZE))
2263 {
2264 uint16_t const idxReg = (offReg - GIC_DIST_REG_IPRIORITYRn_OFF_START) / cbReg;
2265 return gicDistWriteIntrPriorityReg(pGicDev, idxReg, uValue);
2266 }
2267 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_IPRIORITYRnE_OFF_START, GIC_DIST_REG_IPRIORITYRnE_RANGE_SIZE))
2268 {
2269 uint16_t const idxExt = RT_ELEMENTS(pGicDev->abIntrPriority) / (2 * sizeof(uint32_t));
2270 uint16_t const idxReg = idxExt + (offReg - GIC_DIST_REG_IPRIORITYRnE_OFF_START) / cbReg;
2271 return gicDistWriteIntrPriorityReg(pGicDev, idxReg, uValue);
2272 }
2273
2274 /*
2275 * GICD_ISPENDR<n> and GICD_ISPENDR<n>E.
2276 * GICD_ICPENDR<n> and GICD_ICPENDR<n>E.
2277 */
2278 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ISPENDRn_OFF_START, GIC_DIST_REG_ISPENDRn_RANGE_SIZE))
2279 {
2280 uint16_t const idxReg = (offReg - GIC_DIST_REG_ISPENDRn_OFF_START) / cbReg;
2281 return gicDistWriteIntrSetPendingReg(pVM, pGicDev, idxReg, uValue);
2282 }
2283 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ISPENDRnE_OFF_START, GIC_DIST_REG_ISPENDRnE_RANGE_SIZE))
2284 {
2285 uint16_t const idxExt = RT_ELEMENTS(pGicDev->bmIntrPending) / 2;
2286 uint16_t const idxReg = idxExt + (offReg - GIC_DIST_REG_ISPENDRnE_OFF_START) / cbReg;
2287 return gicDistWriteIntrSetPendingReg(pVM, pGicDev, idxReg, uValue);
2288 }
2289 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICPENDRn_OFF_START, GIC_DIST_REG_ICPENDRn_RANGE_SIZE))
2290 {
2291 uint16_t const idxReg = (offReg - GIC_DIST_REG_ICPENDRn_OFF_START) / cbReg;
2292 return gicDistWriteIntrClearPendingReg(pVM, pGicDev, idxReg, uValue);
2293 }
2294 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICPENDRnE_OFF_START, GIC_DIST_REG_ICPENDRnE_RANGE_SIZE))
2295 {
2296 uint16_t const idxExt = RT_ELEMENTS(pGicDev->bmIntrPending) / 2;
2297 uint16_t const idxReg = idxExt + (offReg - GIC_DIST_REG_ICPENDRnE_OFF_START) / cbReg;
2298 return gicDistWriteIntrClearPendingReg(pVM, pGicDev, idxReg, uValue);
2299 }
2300
2301 /*
2302 * GICD_ICFGR<n> and GICD_ICFGR<n>E.
2303 */
2304 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICFGRn_OFF_START, GIC_DIST_REG_ICFGRn_RANGE_SIZE))
2305 {
2306 uint16_t const idxReg = (offReg - GIC_DIST_REG_ICFGRn_OFF_START) / cbReg;
2307 return gicDistWriteIntrConfigReg(pGicDev, idxReg, uValue);
2308 }
2309 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICFGRnE_OFF_START, GIC_DIST_REG_ICFGRnE_RANGE_SIZE))
2310 {
2311 uint16_t const idxExt = RT_ELEMENTS(pGicDev->bmIntrConfig) / 2;
2312 uint16_t const idxReg = idxExt + (offReg - GIC_DIST_REG_ICFGRnE_OFF_START) / cbReg;
2313 return gicDistWriteIntrConfigReg(pGicDev, idxReg, uValue);
2314 }
2315 }
2316
2317 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2318 switch (offReg)
2319 {
2320 case GIC_DIST_REG_CTLR_OFF:
2321 Assert(!(uValue & GIC_DIST_REG_CTRL_ARE_NS));
2322 pGicDev->fIntrGroup0Enabled = RT_BOOL(uValue & GIC_DIST_REG_CTRL_ENABLE_GRP0);
2323 pGicDev->fIntrGroup1Enabled = RT_BOOL(uValue & GIC_DIST_REG_CTRL_ENABLE_GRP1_NS);
2324 rcStrict = gicDistUpdateIrqState(pVM, pGicDev);
2325 break;
2326 default:
2327 {
2328 /* Windows 11 arm64 (24H2) writes zeroes into these reserved registers. We ignore them. */
2329 if (offReg >= 0x7fe0 && offReg <= 0x7ffc)
2330 LogFlowFunc(("Bad guest writing to reserved GIC distributor register space [0x7fe0..0x7ffc] -- ignoring!"));
2331 else
2332 AssertReleaseMsgFailed(("offReg=%#x uValue=%#RX32\n", offReg, uValue));
2333 break;
2334 }
2335 }
2336
2337 return rcStrict;
2338}
2339
2340
2341/**
2342 * Reads a GIC redistributor register.
2343 *
2344 * @returns VBox status code.
2345 * @param pDevIns The device instance.
2346 * @param pVCpu The cross context virtual CPU structure.
2347 * @param idRedist The redistributor ID.
2348 * @param offReg The offset of the register being read.
2349 * @param puValue Where to store the register value.
2350 */
2351DECLINLINE(VBOXSTRICTRC) gicReDistReadRegister(PPDMDEVINS pDevIns, PVMCPUCC pVCpu, uint32_t idRedist, uint16_t offReg, uint32_t *puValue)
2352{
2353 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
2354 PCGICDEV pGicDev = PDMDEVINS_2_DATA(pDevIns, PGICDEV);
2355 Assert(idRedist == pVCpu->idCpu);
2356
2357 switch (offReg)
2358 {
2359 case GIC_REDIST_REG_TYPER_OFF:
2360 *puValue = (pVCpu->idCpu == pVM->cCpus - 1 ? GIC_REDIST_REG_TYPER_LAST : 0)
2361 | GIC_REDIST_REG_TYPER_CPU_NUMBER_SET(idRedist)
2362 | GIC_REDIST_REG_TYPER_CMN_LPI_AFF_SET(GIC_REDIST_REG_TYPER_CMN_LPI_AFF_ALL)
2363 | (pGicDev->fExtPpi ? GIC_REDIST_REG_TYPER_PPI_NUM_SET(pGicDev->uMaxExtPpi) : 0)
2364 | (pGicDev->fLpi ? GIC_REDIST_REG_TYPER_PLPIS : 0);
2365 Assert(!pGicDev->fExtPpi || pGicDev->uMaxExtPpi > 0);
2366 break;
2367 case GIC_REDIST_REG_WAKER_OFF:
2368 *puValue = 0;
2369 break;
2370 case GIC_REDIST_REG_IIDR_OFF:
2371 *puValue = GIC_REDIST_REG_IIDR_IMPL_SET(GIC_JEDEC_JEP106_IDENTIFICATION_CODE, GIC_JEDEC_JEP106_CONTINUATION_CODE);
2372 break;
2373 case GIC_REDIST_REG_TYPER_AFFINITY_OFF:
2374 *puValue = idRedist;
2375 break;
2376 case GIC_REDIST_REG_PIDR2_OFF:
2377 Assert(pGicDev->uArchRev <= GIC_DIST_REG_PIDR2_ARCHREV_GICV4);
2378 *puValue = GIC_REDIST_REG_PIDR2_ARCHREV_SET(pGicDev->uArchRev);
2379 break;
2380 case GIC_REDIST_REG_CTLR_OFF:
2381 *puValue = (pGicDev->fEnableLpis ? GIC_REDIST_REG_CTLR_ENABLE_LPI : 0)
2382 | GIC_REDIST_REG_CTLR_CES_SET(1);
2383 break;
2384 case GIC_REDIST_REG_PROPBASER_OFF:
2385 *puValue = pGicDev->uLpiConfigBaseReg.s.Lo;
2386 break;
2387 case GIC_REDIST_REG_PROPBASER_OFF + 4:
2388 *puValue = pGicDev->uLpiConfigBaseReg.s.Hi;
2389 break;
2390 case GIC_REDIST_REG_PENDBASER_OFF:
2391 *puValue = pGicDev->uLpiPendingBaseReg.s.Lo;
2392 break;
2393 case GIC_REDIST_REG_PENDBASER_OFF + 4:
2394 *puValue = pGicDev->uLpiPendingBaseReg.s.Hi;
2395 break;
2396 default:
2397 AssertReleaseMsgFailed(("offReg=%#x\n", offReg));
2398 *puValue = 0;
2399 break;
2400 }
2401 return VINF_SUCCESS;
2402}
2403
2404
2405/**
2406 * Reads a GIC redistributor SGI/PPI frame register.
2407 *
2408 * @returns VBox status code.
2409 * @param pDevIns The device instance.
2410 * @param pVCpu The cross context virtual CPU structure.
2411 * @param offReg The offset of the register being read.
2412 * @param puValue Where to store the register value.
2413 */
2414DECLINLINE(VBOXSTRICTRC) gicReDistReadSgiPpiRegister(PPDMDEVINS pDevIns, PVMCPUCC pVCpu, uint16_t offReg, uint32_t *puValue)
2415{
2416 VMCPU_ASSERT_EMT(pVCpu);
2417 RT_NOREF(pDevIns);
2418
2419 PGICCPU pGicCpu = VMCPU_TO_GICCPU(pVCpu);
2420 PCGICDEV pGicDev = PDMDEVINS_2_DATA(pDevIns, PGICDEV);
2421 uint16_t const cbReg = sizeof(uint32_t);
2422
2423 /*
2424 * GICR_IGROUPR0 and GICR_IGROUPR<n>E.
2425 */
2426 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_IGROUPR0_OFF, GIC_REDIST_SGI_PPI_REG_IGROUPRnE_RANGE_SIZE))
2427 {
2428 uint16_t const idxReg = (offReg - GIC_REDIST_SGI_PPI_REG_IGROUPR0_OFF) / cbReg;
2429 return gicReDistReadIntrGroupReg(pGicDev, pGicCpu, idxReg, puValue);
2430 }
2431
2432 /*
2433 * GICR_ISENABLER0 and GICR_ISENABLER<n>E.
2434 * GICR_ICENABLER0 and GICR_ICENABLER<n>E.
2435 */
2436 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_ISENABLER0_OFF, GIC_REDIST_SGI_PPI_REG_ISENABLERnE_RANGE_SIZE))
2437 {
2438 uint16_t const idxReg = (offReg - GIC_REDIST_SGI_PPI_REG_ISENABLER0_OFF) / cbReg;
2439 return gicReDistReadIntrEnableReg(pGicDev, pGicCpu, idxReg, puValue);
2440 }
2441 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_ICENABLER0_OFF, GIC_REDIST_SGI_PPI_REG_ICENABLERnE_RANGE_SIZE))
2442 {
2443 uint16_t const idxReg = (offReg - GIC_REDIST_SGI_PPI_REG_ICENABLERnE_OFF_START) / cbReg;
2444 return gicReDistReadIntrEnableReg(pGicDev, pGicCpu, idxReg, puValue);
2445 }
2446
2447 /*
2448 * GICR_ISACTIVER0 and GICR_ISACTIVER<n>E.
2449 * GICR_ICACTIVER0 and GICR_ICACTIVER<n>E.
2450 */
2451 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_ISACTIVER0_OFF, GIC_REDIST_SGI_PPI_REG_ISACTIVERnE_RANGE_SIZE))
2452 {
2453 uint16_t const idxReg = (offReg - GIC_REDIST_SGI_PPI_REG_ISACTIVER0_OFF) / cbReg;
2454 return gicReDistReadIntrActiveReg(pGicCpu, idxReg, puValue);
2455 }
2456 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_ICACTIVER0_OFF, GIC_REDIST_SGI_PPI_REG_ICACTIVERnE_RANGE_SIZE))
2457 {
2458 uint16_t const idxReg = (offReg - GIC_REDIST_SGI_PPI_REG_ICACTIVER0_OFF) / cbReg;
2459 return gicReDistReadIntrActiveReg(pGicCpu, idxReg, puValue);
2460 }
2461
2462 /*
2463 * GICR_ISPENDR0 and GICR_ISPENDR<n>E.
2464 * GICR_ICPENDR0 and GICR_ICPENDR<n>E.
2465 */
2466 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_ISPENDR0_OFF, GIC_REDIST_SGI_PPI_REG_ISPENDRnE_RANGE_SIZE))
2467 {
2468 uint16_t const idxReg = (offReg - GIC_REDIST_SGI_PPI_REG_ISPENDR0_OFF) / cbReg;
2469 return gicReDistReadIntrPendingReg(pGicDev, pGicCpu, idxReg, puValue);
2470 }
2471 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_ICPENDR0_OFF, GIC_REDIST_SGI_PPI_REG_ICPENDRnE_RANGE_SIZE))
2472 {
2473 uint16_t const idxReg = (offReg - GIC_REDIST_SGI_PPI_REG_ICPENDR0_OFF) / cbReg;
2474 return gicReDistReadIntrPendingReg(pGicDev, pGicCpu, idxReg, puValue);
2475 }
2476
2477 /*
2478 * GICR_IPRIORITYR<n> and GICR_IPRIORITYR<n>E.
2479 */
2480 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_IPRIORITYRn_OFF_START, GIC_REDIST_SGI_PPI_REG_IPRIORITYRnE_RANGE_SIZE))
2481 {
2482 uint16_t const idxReg = (offReg - GIC_REDIST_SGI_PPI_REG_IPRIORITYRn_OFF_START) / cbReg;
2483 return gicReDistReadIntrPriorityReg(pGicDev, pGicCpu, idxReg, puValue);
2484 }
2485
2486 /*
2487 * GICR_ICFGR0, GICR_ICFGR1 and GICR_ICFGR<n>E.
2488 */
2489 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_ICFGR0_OFF, GIC_REDIST_SGI_PPI_REG_ICFGRnE_RANGE_SIZE))
2490 {
2491 uint16_t const idxReg = (offReg - GIC_REDIST_SGI_PPI_REG_ICFGR0_OFF) / cbReg;
2492 return gicReDistReadIntrConfigReg(pGicDev, pGicCpu, idxReg, puValue);
2493 }
2494
2495 AssertReleaseMsgFailed(("offReg=%#x (%s)\n", offReg, gicReDistGetSgiPpiRegDescription(offReg)));
2496 *puValue = 0;
2497 return VINF_SUCCESS;
2498}
2499
2500
2501/**
2502 * Writes a GIC redistributor frame register.
2503 *
2504 * @returns Strict VBox status code.
2505 * @param pDevIns The device instance.
2506 * @param pVCpu The cross context virtual CPU structure.
2507 * @param offReg The offset of the register being written.
2508 * @param uValue The register value.
2509 */
2510DECLINLINE(VBOXSTRICTRC) gicReDistWriteRegister(PPDMDEVINS pDevIns, PVMCPUCC pVCpu, uint16_t offReg, uint32_t uValue)
2511{
2512 VMCPU_ASSERT_EMT(pVCpu);
2513 RT_NOREF(pVCpu, uValue);
2514
2515 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2516 PGICDEV pGicDev = PDMDEVINS_2_DATA(pDevIns, PGICDEV);
2517 switch (offReg)
2518 {
2519 case GIC_REDIST_REG_WAKER_OFF:
2520 Assert(uValue == 0);
2521 break;
2522 case GIC_REDIST_REG_CTLR_OFF:
2523 {
2524 /* Check if LPIs are supported and whether the enable LPI bit changed. */
2525 uint32_t const uOldCtlr = pGicDev->fEnableLpis ? GIC_REDIST_REG_CTLR_ENABLE_LPI : 0;
2526 uint32_t const uNewCtlr = uValue & GIC_REDIST_REG_CTLR_ENABLE_LPI;
2527 if ( pGicDev->fLpi
2528 && ((uNewCtlr ^ uOldCtlr) & GIC_REDIST_REG_CTLR_ENABLE_LPI))
2529 {
2530 pGicDev->fEnableLpis = RT_BOOL(uNewCtlr & GIC_REDIST_REG_CTLR_ENABLE_LPI);
2531 if (pGicDev->fEnableLpis)
2532 {
2533 gicDistReadLpiConfigTableFromMem(pDevIns);
2534 gicReDistReadLpiPendingBitmapFromMem(pDevIns, pVCpu);
2535 }
2536 else
2537 {
2538 PGICCPU pGicCpu = VMCPU_TO_GICCPU(pVCpu);
2539 RT_ZERO(pGicCpu->bmLpiPending);
2540 }
2541 }
2542 break;
2543 }
2544 case GIC_REDIST_REG_PROPBASER_OFF:
2545 pGicDev->uLpiConfigBaseReg.s.Lo = uValue & RT_LO_U32(GIC_REDIST_REG_PROPBASER_RW_MASK);
2546 break;
2547 case GIC_REDIST_REG_PROPBASER_OFF + 4:
2548 pGicDev->uLpiConfigBaseReg.s.Hi = uValue & RT_HI_U32(GIC_REDIST_REG_PROPBASER_RW_MASK);
2549 break;
2550 case GIC_REDIST_REG_PENDBASER_OFF:
2551 pGicDev->uLpiPendingBaseReg.s.Lo = uValue & RT_LO_U32(GIC_REDIST_REG_PENDBASER_RW_MASK);
2552 break;
2553 case GIC_REDIST_REG_PENDBASER_OFF + 4:
2554 pGicDev->uLpiPendingBaseReg.s.Hi = uValue & RT_HI_U32(GIC_REDIST_REG_PENDBASER_RW_MASK);
2555 break;
2556 default:
2557 AssertReleaseMsgFailed(("offReg=%#x (%s) uValue=%#RX32\n", offReg, gicReDistGetRegDescription(offReg), uValue));
2558 break;
2559 }
2560
2561 return rcStrict;
2562}
2563
2564
2565/**
2566 * Writes a GIC redistributor SGI/PPI frame register.
2567 *
2568 * @returns Strict VBox status code.
2569 * @param pDevIns The device instance.
2570 * @param pVCpu The cross context virtual CPU structure.
2571 * @param offReg The offset of the register being written.
2572 * @param uValue The register value.
2573 */
2574DECLINLINE(VBOXSTRICTRC) gicReDistWriteSgiPpiRegister(PPDMDEVINS pDevIns, PVMCPUCC pVCpu, uint16_t offReg, uint32_t uValue)
2575{
2576 VMCPU_ASSERT_EMT(pVCpu);
2577 PCGICDEV pGicDev = PDMDEVINS_2_DATA(pDevIns, PCGICDEV);
2578 uint16_t const cbReg = sizeof(uint32_t);
2579
2580 /*
2581 * GICR_IGROUPR0 and GICR_IGROUPR<n>E.
2582 */
2583 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_IGROUPR0_OFF, GIC_REDIST_SGI_PPI_REG_IGROUPRnE_RANGE_SIZE))
2584 {
2585 uint16_t const idxReg = (offReg - GIC_REDIST_SGI_PPI_REG_IGROUPR0_OFF) / cbReg;
2586 return gicReDistWriteIntrGroupReg(pGicDev, pVCpu, idxReg, uValue);
2587 }
2588
2589 /*
2590 * GICR_ISENABLER0 and GICR_ISENABLER<n>E.
2591 * GICR_ICENABLER0 and GICR_ICENABLER<n>E.
2592 */
2593 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_ISENABLER0_OFF, GIC_REDIST_SGI_PPI_REG_ISENABLERnE_RANGE_SIZE))
2594 {
2595 uint16_t const idxReg = (offReg - GIC_REDIST_SGI_PPI_REG_ISENABLER0_OFF) / cbReg;
2596 return gicReDistWriteIntrSetEnableReg(pGicDev, pVCpu, idxReg, uValue);
2597 }
2598 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_ICENABLER0_OFF, GIC_REDIST_SGI_PPI_REG_ICENABLERnE_RANGE_SIZE))
2599 {
2600 uint16_t const idxReg = (offReg - GIC_REDIST_SGI_PPI_REG_ICENABLER0_OFF) / cbReg;
2601 return gicReDistWriteIntrClearEnableReg(pGicDev, pVCpu, idxReg, uValue);
2602 }
2603
2604 /*
2605 * GICR_ISACTIVER0 and GICR_ISACTIVER<n>E.
2606 * GICR_ICACTIVER0 and GICR_ICACTIVER<n>E.
2607 */
2608 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_ISACTIVER0_OFF, GIC_REDIST_SGI_PPI_REG_ISACTIVERnE_RANGE_SIZE))
2609 {
2610 uint16_t const idxReg = (offReg - GIC_REDIST_SGI_PPI_REG_ISACTIVER0_OFF) / cbReg;
2611 return gicReDistWriteIntrSetActiveReg(pGicDev, pVCpu, idxReg, uValue);
2612 }
2613 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_ICACTIVER0_OFF, GIC_REDIST_SGI_PPI_REG_ICACTIVERnE_RANGE_SIZE))
2614 {
2615 uint16_t const idxReg = (offReg - GIC_REDIST_SGI_PPI_REG_ICACTIVER0_OFF) / cbReg;
2616 return gicReDistWriteIntrClearActiveReg(pGicDev, pVCpu, idxReg, uValue);
2617 }
2618
2619 /*
2620 * GICR_ISPENDR0 and GICR_ISPENDR<n>E.
2621 * GICR_ICPENDR0 and GICR_ICPENDR<n>E.
2622 */
2623 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_ISPENDR0_OFF, GIC_REDIST_SGI_PPI_REG_ISPENDRnE_RANGE_SIZE))
2624 {
2625 uint16_t const idxReg = (offReg - GIC_REDIST_SGI_PPI_REG_ISPENDR0_OFF) / cbReg;
2626 return gicReDistWriteIntrSetPendingReg(pGicDev, pVCpu, idxReg, uValue);
2627 }
2628 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_ICPENDR0_OFF, GIC_REDIST_SGI_PPI_REG_ICPENDRnE_RANGE_SIZE))
2629 {
2630 uint16_t const idxReg = (offReg - GIC_REDIST_SGI_PPI_REG_ICPENDR0_OFF) / cbReg;
2631 return gicReDistWriteIntrClearPendingReg(pGicDev, pVCpu, idxReg, uValue);
2632 }
2633
2634 /*
2635 * GICR_IPRIORITYR<n> and GICR_IPRIORITYR<n>E.
2636 */
2637 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_IPRIORITYRn_OFF_START, GIC_REDIST_SGI_PPI_REG_IPRIORITYRnE_RANGE_SIZE))
2638 {
2639 uint16_t const idxReg = (offReg - GIC_REDIST_SGI_PPI_REG_IPRIORITYRn_OFF_START) / cbReg;
2640 return gicReDistWriteIntrPriorityReg(pGicDev, pVCpu, idxReg, uValue);
2641 }
2642
2643 /*
2644 * GICR_ICFGR0, GIC_ICFGR1 and GICR_ICFGR<n>E.
2645 */
2646 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_ICFGR0_OFF, GIC_REDIST_SGI_PPI_REG_ICFGRnE_RANGE_SIZE))
2647 {
2648 uint16_t const idxReg = (offReg - GIC_REDIST_SGI_PPI_REG_ICFGR0_OFF) / cbReg;
2649 return gicReDistWriteIntrConfigReg(pGicDev, pVCpu, idxReg, uValue);
2650 }
2651
2652 AssertReleaseMsgFailed(("offReg=%#RX16 (%s)\n", offReg, gicReDistGetSgiPpiRegDescription(offReg)));
2653 return VERR_INTERNAL_ERROR_2;
2654}
2655
2656
2657/**
2658 * @interface_method_impl{PDMGICBACKEND,pfnSetSpi}
2659 */
2660static DECLCALLBACK(int) gicSetSpi(PVMCC pVM, uint32_t uSpiIntId, bool fAsserted)
2661{
2662 LogFlowFunc(("pVM=%p uSpiIntId=%u fAsserted=%RTbool\n",
2663 pVM, uSpiIntId, fAsserted));
2664
2665 PGIC pGic = VM_TO_GIC(pVM);
2666 PPDMDEVINS pDevIns = pGic->CTX_SUFF(pDevIns);
2667 PGICDEV pGicDev = PDMDEVINS_2_DATA(pDevIns, PGICDEV);
2668
2669#ifdef VBOX_WITH_STATISTICS
2670 PVMCPU pVCpu = VMMGetCpuById(pVM, 0);
2671 STAM_COUNTER_INC(&pVCpu->gic.s.StatSetSpi);
2672 PGICCPU pGicCpu = VMCPU_TO_GICCPU(pVCpu);
2673#endif
2674 STAM_PROFILE_START(&pGicCpu->StatProfSetSpi, a);
2675
2676 uint16_t const uIntId = GIC_INTID_RANGE_SPI_START + uSpiIntId;
2677 uint16_t const idxIntr = gicDistGetIndexFromIntId(uIntId);
2678
2679 Assert(idxIntr >= GIC_INTID_RANGE_SPI_START);
2680 AssertMsgReturn(idxIntr < sizeof(pGicDev->bmIntrPending) * 8,
2681 ("out-of-range SPI interrupt ID %RU32 (%RU32)\n", uIntId, uSpiIntId),
2682 VERR_INVALID_PARAMETER);
2683
2684 GIC_CRIT_SECT_ENTER(pDevIns);
2685
2686 /* Update the interrupt pending state. */
2687 if (fAsserted)
2688 ASMBitSet(&pGicDev->bmIntrPending[0], idxIntr);
2689 else
2690 ASMBitClear(&pGicDev->bmIntrPending[0], idxIntr);
2691
2692 int const rc = VBOXSTRICTRC_VAL(gicDistUpdateIrqState(pVM, pGicDev));
2693 STAM_PROFILE_STOP(&pGicCpu->StatProfSetSpi, a);
2694
2695 GIC_CRIT_SECT_LEAVE(pDevIns);
2696 return rc;
2697}
2698
2699
2700/**
2701 * @interface_method_impl{PDMGICBACKEND,pfnSetPpi}
2702 */
2703static DECLCALLBACK(int) gicSetPpi(PVMCPUCC pVCpu, uint32_t uPpiIntId, bool fAsserted)
2704{
2705 LogFlowFunc(("pVCpu=%p{.idCpu=%u} uPpiIntId=%u fAsserted=%RTbool\n", pVCpu, pVCpu->idCpu, uPpiIntId, fAsserted));
2706
2707 PPDMDEVINS pDevIns = VMCPU_TO_DEVINS(pVCpu);
2708 PCGICDEV pGicDev = PDMDEVINS_2_DATA(pDevIns, PCGICDEV);
2709 PGICCPU pGicCpu = VMCPU_TO_GICCPU(pVCpu);
2710
2711 STAM_COUNTER_INC(&pVCpu->gic.s.StatSetPpi);
2712 STAM_PROFILE_START(&pGicCpu->StatProfSetPpi, b);
2713
2714 uint32_t const uIntId = GIC_INTID_RANGE_PPI_START + uPpiIntId;
2715 uint16_t const idxIntr = gicReDistGetIndexFromIntId(uIntId);
2716
2717 Assert(idxIntr >= GIC_INTID_RANGE_PPI_START);
2718 AssertMsgReturn(idxIntr < sizeof(pGicCpu->bmIntrPending) * 8,
2719 ("out-of-range PPI interrupt ID %RU32 (%RU32)\n", uIntId, uPpiIntId),
2720 VERR_INVALID_PARAMETER);
2721
2722 GIC_CRIT_SECT_ENTER(pDevIns);
2723
2724 /* Update the interrupt pending state. */
2725 if (fAsserted)
2726 ASMBitSet(&pGicCpu->bmIntrPending[0], idxIntr);
2727 else
2728 ASMBitClear(&pGicCpu->bmIntrPending[0], idxIntr);
2729
2730 int const rc = VBOXSTRICTRC_VAL(gicReDistUpdateIrqState(pGicDev, pVCpu));
2731 STAM_PROFILE_STOP(&pGicCpu->StatProfSetPpi, b);
2732
2733 GIC_CRIT_SECT_LEAVE(pDevIns);
2734 return rc;
2735}
2736
2737
2738/**
2739 * Sets the specified software generated interrupt (SGI).
2740 *
2741 * @returns Strict VBox status code.
2742 * @param pGicDev The GIC distributor state.
2743 * @param pVCpu The cross context virtual CPU structure.
2744 * @param pDestCpuSet Which CPUs to deliver the SGI to.
2745 * @param uIntId The SGI interrupt ID.
2746 */
2747static VBOXSTRICTRC gicSetSgi(PCGICDEV pGicDev, PVMCPUCC pVCpu, PCVMCPUSET pDestCpuSet, uint8_t uIntId)
2748{
2749 LogFlowFunc(("pVCpu=%p{.idCpu=%u} uIntId=%u\n", pVCpu, pVCpu->idCpu, uIntId));
2750
2751 PPDMDEVINS pDevIns = VMCPU_TO_DEVINS(pVCpu);
2752 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
2753 uint32_t const cCpus = pVM->cCpus;
2754 AssertReturn(uIntId <= GIC_INTID_RANGE_SGI_LAST, VERR_INVALID_PARAMETER);
2755 Assert(GIC_CRIT_SECT_IS_OWNER(pDevIns)); NOREF(pDevIns);
2756
2757 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++)
2758 if (VMCPUSET_IS_PRESENT(pDestCpuSet, idCpu))
2759 {
2760 PGICCPU pGicCpu = VMCPU_TO_GICCPU(pVM->CTX_SUFF(apCpus)[idCpu]);
2761 pGicCpu->bmIntrPending[0] |= RT_BIT_32(uIntId);
2762 }
2763
2764 return gicDistUpdateIrqState(pVM, pGicDev);
2765}
2766
2767
2768/**
2769 * Writes to the redistributor's SGI group 1 register (ICC_SGI1R_EL1).
2770 *
2771 * @returns Strict VBox status code.
2772 * @param pGicDev The GIC distributor state.
2773 * @param pVCpu The cross context virtual CPU structure.
2774 * @param uValue The value being written to the ICC_SGI1R_EL1 register.
2775 */
2776static VBOXSTRICTRC gicReDistWriteSgiReg(PCGICDEV pGicDev, PVMCPUCC pVCpu, uint64_t uValue)
2777{
2778#ifdef VBOX_WITH_STATISTICS
2779 PGICCPU pGicCpu = VMCPU_TO_GICCPU(pVCpu);
2780 STAM_COUNTER_INC(&pVCpu->gic.s.StatSetSgi);
2781 STAM_PROFILE_START(&pGicCpu->StatProfSetSgi, c);
2782#else
2783 PCGICCPU pGicCpu = VMCPU_TO_GICCPU(pVCpu);
2784#endif
2785
2786 VMCPUSET DestCpuSet;
2787 if (uValue & ARMV8_ICC_SGI1R_EL1_AARCH64_IRM)
2788 {
2789 /*
2790 * Deliver to all VCPUs but this one.
2791 */
2792 VMCPUSET_FILL(&DestCpuSet);
2793 VMCPUSET_DEL(&DestCpuSet, pVCpu->idCpu);
2794 }
2795 else
2796 {
2797 /*
2798 * Target specific VCPUs.
2799 * See ARM GICv3 and GICv4 Software Overview spec 3.3 "Affinity routing".
2800 */
2801 VMCPUSET_EMPTY(&DestCpuSet);
2802 bool const fRangeSelSupport = RT_BOOL(pGicCpu->uIccCtlr & ARMV8_ICC_CTLR_EL1_AARCH64_RSS);
2803 uint8_t const idRangeStart = ARMV8_ICC_SGI1R_EL1_AARCH64_RS_GET(uValue) * 16;
2804 uint16_t const bmCpuInterfaces = ARMV8_ICC_SGI1R_EL1_AARCH64_TARGET_LIST_GET(uValue);
2805 uint8_t const uAff1 = ARMV8_ICC_SGI1R_EL1_AARCH64_AFF1_GET(uValue);
2806 uint8_t const uAff2 = ARMV8_ICC_SGI1R_EL1_AARCH64_AFF2_GET(uValue);
2807 uint8_t const uAff3 = (pGicCpu->uIccCtlr & ARMV8_ICC_CTLR_EL1_AARCH64_A3V)
2808 ? ARMV8_ICC_SGI1R_EL1_AARCH64_AFF3_GET(uValue)
2809 : 0;
2810 uint32_t const cCpus = pVCpu->CTX_SUFF(pVM)->cCpus;
2811 for (uint8_t idCpuInterface = 0; idCpuInterface < 16; idCpuInterface++)
2812 {
2813 if (bmCpuInterfaces & RT_BIT(idCpuInterface))
2814 {
2815 VMCPUID idCpuTarget;
2816 if (fRangeSelSupport)
2817 idCpuTarget = RT_MAKE_U32_FROM_U8(idRangeStart + idCpuInterface, uAff1, uAff2, uAff3);
2818 else
2819 idCpuTarget = gicGetCpuIdFromAffinity(idCpuInterface, uAff1, uAff2, uAff3);
2820 if (RT_LIKELY(idCpuTarget < cCpus))
2821 VMCPUSET_ADD(&DestCpuSet, idCpuTarget);
2822 else
2823 AssertReleaseMsgFailed(("VCPU ID out-of-bounds %RU32, must be < %u\n", idCpuTarget, cCpus));
2824 }
2825 }
2826 }
2827
2828 if (!VMCPUSET_IS_EMPTY(&DestCpuSet))
2829 {
2830 uint8_t const uSgiIntId = ARMV8_ICC_SGI1R_EL1_AARCH64_INTID_GET(uValue);
2831 Assert(GIC_IS_INTR_SGI(uSgiIntId));
2832 VBOXSTRICTRC const rcStrict = gicSetSgi(pGicDev, pVCpu, &DestCpuSet, uSgiIntId);
2833 Assert(RT_SUCCESS(rcStrict)); RT_NOREF_PV(rcStrict);
2834 }
2835
2836 STAM_PROFILE_STOP(&pGicCpu->StatProfSetSgi, c);
2837 return VINF_SUCCESS;
2838}
2839
2840
2841/**
2842 * @interface_method_impl{PDMGICBACKEND,pfnReadSysReg}
2843 */
2844static DECLCALLBACK(VBOXSTRICTRC) gicReadSysReg(PVMCPUCC pVCpu, uint32_t u32Reg, uint64_t *pu64Value)
2845{
2846 /*
2847 * Validate.
2848 */
2849 VMCPU_ASSERT_EMT(pVCpu);
2850 Assert(pu64Value);
2851
2852 STAM_COUNTER_INC(&pVCpu->gic.s.StatSysRegRead);
2853
2854 *pu64Value = 0;
2855 PGICCPU pGicCpu = VMCPU_TO_GICCPU(pVCpu);
2856 PPDMDEVINS pDevIns = VMCPU_TO_DEVINS(pVCpu);
2857 PGICDEV pGicDev = PDMDEVINS_2_DATA(pDevIns, PGICDEV);
2858
2859 GIC_CRIT_SECT_ENTER(pDevIns);
2860
2861 switch (u32Reg)
2862 {
2863 case ARMV8_AARCH64_SYSREG_ICC_PMR_EL1:
2864 *pu64Value = pGicCpu->bIntrPriorityMask;
2865 break;
2866 case ARMV8_AARCH64_SYSREG_ICC_IAR0_EL1:
2867 AssertReleaseFailed();
2868 break;
2869 case ARMV8_AARCH64_SYSREG_ICC_EOIR0_EL1:
2870 AssertReleaseFailed();
2871 break;
2872 case ARMV8_AARCH64_SYSREG_ICC_HPPIR0_EL1:
2873 AssertReleaseFailed();
2874 break;
2875 case ARMV8_AARCH64_SYSREG_ICC_BPR0_EL1:
2876 *pu64Value = ARMV8_ICC_BPR0_EL1_AARCH64_BINARYPOINT_SET(pGicCpu->bBinaryPtGroup0);
2877 break;
2878 case ARMV8_AARCH64_SYSREG_ICC_AP0R0_EL1:
2879 AssertReleaseFailed();
2880 *pu64Value = pGicCpu->bmActivePriorityGroup0[0];
2881 break;
2882 case ARMV8_AARCH64_SYSREG_ICC_AP0R1_EL1:
2883 AssertReleaseFailed();
2884 *pu64Value = pGicCpu->bmActivePriorityGroup0[1];
2885 break;
2886 case ARMV8_AARCH64_SYSREG_ICC_AP0R2_EL1:
2887 AssertReleaseFailed();
2888 *pu64Value = pGicCpu->bmActivePriorityGroup0[2];
2889 break;
2890 case ARMV8_AARCH64_SYSREG_ICC_AP0R3_EL1:
2891 AssertReleaseFailed();
2892 *pu64Value = pGicCpu->bmActivePriorityGroup0[3];
2893 break;
2894 case ARMV8_AARCH64_SYSREG_ICC_AP1R0_EL1:
2895 AssertReleaseFailed();
2896 *pu64Value = pGicCpu->bmActivePriorityGroup1[0];
2897 break;
2898 case ARMV8_AARCH64_SYSREG_ICC_AP1R1_EL1:
2899 AssertReleaseFailed();
2900 *pu64Value = pGicCpu->bmActivePriorityGroup1[1];
2901 break;
2902 case ARMV8_AARCH64_SYSREG_ICC_AP1R2_EL1:
2903 AssertReleaseFailed();
2904 *pu64Value = pGicCpu->bmActivePriorityGroup1[2];
2905 break;
2906 case ARMV8_AARCH64_SYSREG_ICC_AP1R3_EL1:
2907 AssertReleaseFailed();
2908 *pu64Value = pGicCpu->bmActivePriorityGroup1[3];
2909 break;
2910 case ARMV8_AARCH64_SYSREG_ICC_NMIAR1_EL1:
2911 AssertReleaseFailed();
2912 break;
2913 case ARMV8_AARCH64_SYSREG_ICC_DIR_EL1:
2914 AssertReleaseFailed();
2915 break;
2916 case ARMV8_AARCH64_SYSREG_ICC_RPR_EL1:
2917 *pu64Value = pGicCpu->abRunningPriorities[pGicCpu->idxRunningPriority];
2918 break;
2919 case ARMV8_AARCH64_SYSREG_ICC_SGI1R_EL1:
2920 AssertReleaseFailed();
2921 break;
2922 case ARMV8_AARCH64_SYSREG_ICC_ASGI1R_EL1:
2923 AssertReleaseFailed();
2924 break;
2925 case ARMV8_AARCH64_SYSREG_ICC_SGI0R_EL1:
2926 AssertReleaseFailed();
2927 break;
2928 case ARMV8_AARCH64_SYSREG_ICC_IAR1_EL1:
2929 *pu64Value = gicAckHighestPriorityPendingIntr(pGicDev, pVCpu, false /*fGroup0*/, true /*fGroup1*/);
2930 break;
2931 case ARMV8_AARCH64_SYSREG_ICC_EOIR1_EL1:
2932 AssertReleaseFailed();
2933 break;
2934 case ARMV8_AARCH64_SYSREG_ICC_HPPIR1_EL1:
2935 {
2936 AssertReleaseFailed();
2937 *pu64Value = gicGetHighestPriorityPendingIntr(pGicDev, pGicCpu, false /*fGroup0*/, true /*fGroup1*/,
2938 NULL /*pidxIntr*/, NULL /*pbPriority*/);
2939 break;
2940 }
2941 case ARMV8_AARCH64_SYSREG_ICC_BPR1_EL1:
2942 *pu64Value = ARMV8_ICC_BPR1_EL1_AARCH64_BINARYPOINT_SET(pGicCpu->bBinaryPtGroup1);
2943 break;
2944 case ARMV8_AARCH64_SYSREG_ICC_CTLR_EL1:
2945 *pu64Value = pGicCpu->uIccCtlr;
2946 break;
2947 case ARMV8_AARCH64_SYSREG_ICC_SRE_EL1:
2948 AssertReleaseFailed();
2949 break;
2950 case ARMV8_AARCH64_SYSREG_ICC_IGRPEN0_EL1:
2951 *pu64Value = pGicCpu->fIntrGroup0Enabled ? ARMV8_ICC_IGRPEN0_EL1_AARCH64_ENABLE : 0;
2952 break;
2953 case ARMV8_AARCH64_SYSREG_ICC_IGRPEN1_EL1:
2954 *pu64Value = pGicCpu->fIntrGroup1Enabled ? ARMV8_ICC_IGRPEN1_EL1_AARCH64_ENABLE : 0;
2955 break;
2956 default:
2957 AssertReleaseMsgFailed(("u32Reg=%#RX32\n", u32Reg));
2958 break;
2959 }
2960
2961 GIC_CRIT_SECT_LEAVE(pDevIns);
2962
2963 LogFlowFunc(("pVCpu=%p u32Reg=%#x{%s} pu64Value=%RX64\n", pVCpu, u32Reg, gicIccGetRegDescription(u32Reg), *pu64Value));
2964 return VINF_SUCCESS;
2965}
2966
2967
2968/**
2969 * @interface_method_impl{PDMGICBACKEND,pfnWriteSysReg}
2970 */
2971static DECLCALLBACK(VBOXSTRICTRC) gicWriteSysReg(PVMCPUCC pVCpu, uint32_t u32Reg, uint64_t u64Value)
2972{
2973 /*
2974 * Validate.
2975 */
2976 VMCPU_ASSERT_EMT(pVCpu);
2977 LogFlowFunc(("pVCpu=%p u32Reg=%#x{%s} u64Value=%RX64\n", pVCpu, u32Reg, gicIccGetRegDescription(u32Reg), u64Value));
2978
2979 STAM_COUNTER_INC(&pVCpu->gic.s.StatSysRegWrite);
2980
2981 PPDMDEVINS pDevIns = VMCPU_TO_DEVINS(pVCpu);
2982 PGICDEV pGicDev = PDMDEVINS_2_DATA(pDevIns, PGICDEV);
2983 PGICCPU pGicCpu = VMCPU_TO_GICCPU(pVCpu);
2984
2985 GIC_CRIT_SECT_ENTER(pDevIns);
2986
2987 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2988 switch (u32Reg)
2989 {
2990 case ARMV8_AARCH64_SYSREG_ICC_PMR_EL1:
2991 LogFlowFunc(("ICC_PMR_EL1: Interrupt priority now %u\n", (uint8_t)u64Value));
2992 pGicCpu->bIntrPriorityMask = (uint8_t)u64Value;
2993 rcStrict = gicReDistUpdateIrqState(pGicDev, pVCpu);
2994 break;
2995 case ARMV8_AARCH64_SYSREG_ICC_IAR0_EL1:
2996 AssertReleaseFailed();
2997 break;
2998 case ARMV8_AARCH64_SYSREG_ICC_EOIR0_EL1:
2999 AssertReleaseFailed();
3000 break;
3001 case ARMV8_AARCH64_SYSREG_ICC_HPPIR0_EL1:
3002 AssertReleaseFailed();
3003 break;
3004 case ARMV8_AARCH64_SYSREG_ICC_BPR0_EL1:
3005 pGicCpu->bBinaryPtGroup0 = (uint8_t)ARMV8_ICC_BPR0_EL1_AARCH64_BINARYPOINT_GET(u64Value);
3006 break;
3007 case ARMV8_AARCH64_SYSREG_ICC_AP0R0_EL1:
3008 case ARMV8_AARCH64_SYSREG_ICC_AP0R1_EL1:
3009 case ARMV8_AARCH64_SYSREG_ICC_AP0R2_EL1:
3010 case ARMV8_AARCH64_SYSREG_ICC_AP0R3_EL1:
3011 case ARMV8_AARCH64_SYSREG_ICC_AP1R0_EL1:
3012 case ARMV8_AARCH64_SYSREG_ICC_AP1R1_EL1:
3013 case ARMV8_AARCH64_SYSREG_ICC_AP1R2_EL1:
3014 case ARMV8_AARCH64_SYSREG_ICC_AP1R3_EL1:
3015 /* Writes ignored, well behaving guest would write all 0s or the last read value of the register. */
3016 break;
3017 case ARMV8_AARCH64_SYSREG_ICC_NMIAR1_EL1:
3018 AssertReleaseFailed();
3019 break;
3020 case ARMV8_AARCH64_SYSREG_ICC_DIR_EL1:
3021 AssertReleaseFailed();
3022 break;
3023 case ARMV8_AARCH64_SYSREG_ICC_RPR_EL1:
3024 AssertReleaseFailed();
3025 break;
3026 case ARMV8_AARCH64_SYSREG_ICC_SGI1R_EL1:
3027 {
3028 gicReDistWriteSgiReg(pGicDev, pVCpu, u64Value);
3029 break;
3030 }
3031 case ARMV8_AARCH64_SYSREG_ICC_ASGI1R_EL1:
3032 AssertReleaseFailed();
3033 break;
3034 case ARMV8_AARCH64_SYSREG_ICC_SGI0R_EL1:
3035 AssertReleaseFailed();
3036 break;
3037 case ARMV8_AARCH64_SYSREG_ICC_IAR1_EL1:
3038 AssertReleaseFailed();
3039 break;
3040 case ARMV8_AARCH64_SYSREG_ICC_EOIR1_EL1:
3041 {
3042 /*
3043 * We only support priority drop + interrupt deactivation with writes to this register.
3044 * This avoids an extra access which would be required by software for deactivation.
3045 */
3046 Assert(!(pGicCpu->uIccCtlr & ARMV8_ICC_CTLR_EL1_AARCH64_EOIMODE));
3047
3048 /*
3049 * Mark the interrupt as inactive, though it might still be pending.
3050 * It is up to the guest to ensure the interrupt ID belongs to the right group as
3051 * failure to do so results in unpredictable behavior.
3052 *
3053 * See ARM GIC spec. 12.2.10 "ICC_EOIR1_EL1, Interrupt Controller End Of Interrupt Register 1".
3054 * NOTE! The order of the 'if' checks below are crucial.
3055 */
3056 uint16_t const uIntId = (uint16_t)u64Value;
3057 if (uIntId <= GIC_INTID_RANGE_PPI_LAST)
3058 {
3059 /* SGIs and PPIs. */
3060 AssertCompile(GIC_INTID_RANGE_PPI_LAST < 8 * sizeof(pGicDev->bmIntrActive[0]));
3061 Assert(pGicDev->fAffRoutingEnabled);
3062 pGicCpu->bmIntrActive[0] &= ~RT_BIT_32(uIntId);
3063 }
3064 else if (uIntId <= GIC_INTID_RANGE_SPI_LAST)
3065 {
3066 /* SPIs. */
3067 uint16_t const idxIntr = /*gicDistGetIndexFromIntId*/(uIntId);
3068 AssertReturn(idxIntr < sizeof(pGicDev->bmIntrActive) * 8, VERR_BUFFER_OVERFLOW);
3069 ASMBitClear(&pGicDev->bmIntrActive[0], idxIntr);
3070 }
3071 else if (uIntId <= GIC_INTID_RANGE_SPECIAL_NO_INTERRUPT)
3072 {
3073 /* Special interrupt IDs, ignored. */
3074 Log(("Ignoring write to EOI with special interrupt ID.\n"));
3075 break;
3076 }
3077 else if (uIntId <= GIC_INTID_RANGE_EXT_PPI_LAST)
3078 {
3079 /* Extended PPIs. */
3080 uint16_t const idxIntr = gicReDistGetIndexFromIntId(uIntId);
3081 AssertReturn(idxIntr < sizeof(pGicCpu->bmIntrActive) * 8, VERR_BUFFER_OVERFLOW);
3082 ASMBitClear(&pGicCpu->bmIntrActive[0], idxIntr);
3083 }
3084 else if (uIntId <= GIC_INTID_RANGE_EXT_SPI_LAST)
3085 {
3086 /* Extended SPIs. */
3087 uint16_t const idxIntr = gicDistGetIndexFromIntId(uIntId);
3088 AssertReturn(idxIntr < sizeof(pGicDev->bmIntrActive) * 8, VERR_BUFFER_OVERFLOW);
3089 ASMBitClear(&pGicDev->bmIntrActive[0], idxIntr);
3090 }
3091 else
3092 {
3093 AssertMsgFailed(("Invalid INTID %u\n", uIntId));
3094 break;
3095 }
3096
3097 /*
3098 * Drop priority by restoring previous interrupt.
3099 */
3100 if (RT_LIKELY(pGicCpu->idxRunningPriority))
3101 {
3102 LogFlowFunc(("Restoring interrupt priority from %u -> %u (idxRunningPriority: %u -> %u)\n",
3103 pGicCpu->abRunningPriorities[pGicCpu->idxRunningPriority],
3104 pGicCpu->abRunningPriorities[pGicCpu->idxRunningPriority - 1],
3105 pGicCpu->idxRunningPriority, pGicCpu->idxRunningPriority - 1));
3106
3107 /*
3108 * Clear the interrupt priority from the active priorities bitmap.
3109 * It is up to the guest to ensure that writes to EOI registers are done in the exact
3110 * reverse order of the reads from the IAR registers.
3111 *
3112 * See ARM GIC spec 4.1.1 "Physical CPU interface".
3113 */
3114 uint8_t const idxPreemptionLevel = pGicCpu->abRunningPriorities[pGicCpu->idxRunningPriority] >> 1;
3115 AssertCompile(sizeof(pGicCpu->bmActivePriorityGroup1) * 8 >= 128);
3116 ASMBitClear(&pGicCpu->bmActivePriorityGroup1[0], idxPreemptionLevel);
3117
3118 pGicCpu->idxRunningPriority--;
3119 Assert(pGicCpu->abRunningPriorities[0] == GIC_IDLE_PRIORITY);
3120 }
3121 else
3122 AssertReleaseMsgFailed(("Index of running-priority interrupt out-of-bounds %u\n", pGicCpu->idxRunningPriority));
3123 rcStrict = gicReDistUpdateIrqState(pGicDev, pVCpu);
3124 break;
3125 }
3126 case ARMV8_AARCH64_SYSREG_ICC_HPPIR1_EL1:
3127 AssertReleaseFailed();
3128 break;
3129 case ARMV8_AARCH64_SYSREG_ICC_BPR1_EL1:
3130 pGicCpu->bBinaryPtGroup1 = (uint8_t)ARMV8_ICC_BPR1_EL1_AARCH64_BINARYPOINT_GET(u64Value);
3131 break;
3132 case ARMV8_AARCH64_SYSREG_ICC_CTLR_EL1:
3133 pGicCpu->uIccCtlr &= ARMV8_ICC_CTLR_EL1_RW;
3134 /** @todo */
3135 break;
3136 case ARMV8_AARCH64_SYSREG_ICC_SRE_EL1:
3137 AssertReleaseFailed();
3138 break;
3139 case ARMV8_AARCH64_SYSREG_ICC_IGRPEN0_EL1:
3140 pGicCpu->fIntrGroup0Enabled = RT_BOOL(u64Value & ARMV8_ICC_IGRPEN0_EL1_AARCH64_ENABLE);
3141 break;
3142 case ARMV8_AARCH64_SYSREG_ICC_IGRPEN1_EL1:
3143 pGicCpu->fIntrGroup1Enabled = RT_BOOL(u64Value & ARMV8_ICC_IGRPEN1_EL1_AARCH64_ENABLE);
3144 break;
3145 default:
3146 AssertReleaseMsgFailed(("u32Reg=%#RX32\n", u32Reg));
3147 break;
3148 }
3149
3150 GIC_CRIT_SECT_LEAVE(pDevIns);
3151 return rcStrict;
3152}
3153
3154
3155/**
3156 * Initializes the GIC distributor state.
3157 *
3158 * @param pDevIns The device instance.
3159 * @remarks This is also called during VM reset, so do NOT remove values that are
3160 * cleared to zero!
3161 */
3162static void gicInit(PPDMDEVINS pDevIns)
3163{
3164 LogFlowFunc(("\n"));
3165 PGICDEV pGicDev = PDMDEVINS_2_DATA(pDevIns, PGICDEV);
3166
3167 /* Distributor. */
3168 RT_ZERO(pGicDev->bmIntrGroup);
3169 RT_ZERO(pGicDev->bmIntrConfig);
3170 RT_ZERO(pGicDev->bmIntrEnabled);
3171 RT_ZERO(pGicDev->bmIntrPending);
3172 RT_ZERO(pGicDev->bmIntrActive);
3173 RT_ZERO(pGicDev->abIntrPriority);
3174 RT_ZERO(pGicDev->au32IntrRouting);
3175 RT_ZERO(pGicDev->bmIntrRoutingMode);
3176 pGicDev->fIntrGroup0Enabled = false;
3177 pGicDev->fIntrGroup1Enabled = false;
3178 pGicDev->fAffRoutingEnabled = true; /* GICv2 backwards compatibility is not implemented, so this is RA1/WI. */
3179
3180 /* GITS. */
3181 PGITSDEV pGitsDev = &pGicDev->Gits;
3182 gitsInit(pGitsDev);
3183
3184 /* LPIs. */
3185 RT_ZERO(pGicDev->abLpiConfig);
3186 pGicDev->uLpiConfigBaseReg.u = 0;
3187 pGicDev->uLpiPendingBaseReg.u = 0;
3188 pGicDev->fEnableLpis = false;
3189}
3190
3191
3192/**
3193 * Initialies the GIC redistributor and CPU interface state.
3194 *
3195 * @param pDevIns The device instance.
3196 * @param pVCpu The cross context virtual CPU structure.
3197 * @remarks This is also called during VM reset, so do NOT remove values that are
3198 * cleared to zero!
3199 */
3200static void gicInitCpu(PPDMDEVINS pDevIns, PVMCPUCC pVCpu)
3201{
3202 LogFlowFunc(("[%u]\n", pVCpu->idCpu));
3203 PGICDEV pGicDev = PDMDEVINS_2_DATA(pDevIns, PGICDEV);
3204 PGICCPU pGicCpu = &pVCpu->gic.s;
3205
3206 RT_ZERO(pGicCpu->bmIntrGroup);
3207 RT_ZERO(pGicCpu->bmIntrConfig);
3208 /* SGIs are always edge-triggered, writes to GICR_ICFGR0 are to be ignored. */
3209 pGicCpu->bmIntrConfig[0] = 0xaaaaaaaa;
3210 RT_ZERO(pGicCpu->bmIntrEnabled);
3211 RT_ZERO(pGicCpu->bmIntrPending);
3212 RT_ZERO(pGicCpu->bmIntrActive);
3213 RT_ZERO(pGicCpu->abIntrPriority);
3214
3215 pGicCpu->uIccCtlr = ARMV8_ICC_CTLR_EL1_AARCH64_PMHE
3216 | ARMV8_ICC_CTLR_EL1_AARCH64_PRIBITS_SET(4)
3217 | ARMV8_ICC_CTLR_EL1_AARCH64_IDBITS_SET(ARMV8_ICC_CTLR_EL1_AARCH64_IDBITS_16BITS)
3218 | (pGicDev->fRangeSel ? ARMV8_ICC_CTLR_EL1_AARCH64_RSS : 0)
3219 | (pGicDev->fAff3Levels ? ARMV8_ICC_CTLR_EL1_AARCH64_A3V : 0)
3220 | (pGicDev->fExtPpi || pGicDev->fExtSpi ? ARMV8_ICC_CTLR_EL1_AARCH64_EXTRANGE : 0);
3221
3222 pGicCpu->bIntrPriorityMask = 0; /* Means no interrupt gets through to the PE. */
3223 pGicCpu->idxRunningPriority = 0;
3224 memset((void *)&pGicCpu->abRunningPriorities[0], 0xff, sizeof(pGicCpu->abRunningPriorities));
3225 RT_ZERO(pGicCpu->bmActivePriorityGroup0);
3226 RT_ZERO(pGicCpu->bmActivePriorityGroup1);
3227 pGicCpu->bBinaryPtGroup0 = 0;
3228 pGicCpu->bBinaryPtGroup1 = 0;
3229 pGicCpu->fIntrGroup0Enabled = false;
3230 pGicCpu->fIntrGroup1Enabled = false;
3231 RT_ZERO(pGicCpu->bmLpiPending);
3232}
3233
3234
3235/**
3236 * Initializes per-VM GIC to the state following a power-up or hardware
3237 * reset.
3238 *
3239 * @param pDevIns The device instance.
3240 */
3241DECLHIDDEN(void) gicReset(PPDMDEVINS pDevIns)
3242{
3243 LogFlowFunc(("\n"));
3244 gicInit(pDevIns);
3245}
3246
3247
3248/**
3249 * Initializes per-VCPU GIC to the state following a power-up or hardware
3250 * reset.
3251 *
3252 * @param pDevIns The device instance.
3253 * @param pVCpu The cross context virtual CPU structure.
3254 */
3255DECLHIDDEN(void) gicResetCpu(PPDMDEVINS pDevIns, PVMCPUCC pVCpu)
3256{
3257 LogFlowFunc(("[%u]\n", pVCpu->idCpu));
3258 VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu);
3259 gicInitCpu(pDevIns, pVCpu);
3260}
3261
3262
3263/**
3264 * @callback_method_impl{FNIOMMMIONEWREAD}
3265 */
3266DECL_HIDDEN_CALLBACK(VBOXSTRICTRC) gicDistMmioRead(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off, void *pv, unsigned cb)
3267{
3268 NOREF(pvUser);
3269 Assert(!(off & 0x3));
3270 Assert(cb == 4); RT_NOREF_PV(cb);
3271
3272 PVMCPUCC pVCpu = PDMDevHlpGetVMCPU(pDevIns);
3273 uint16_t offReg = off & 0xfffc;
3274 uint32_t uValue = 0;
3275
3276 STAM_COUNTER_INC(&pVCpu->gic.s.StatMmioRead);
3277
3278 VBOXSTRICTRC rc = VBOXSTRICTRC_VAL(gicDistReadRegister(pDevIns, pVCpu, offReg, &uValue));
3279 *(uint32_t *)pv = uValue;
3280
3281 LogFlowFunc(("[%u]: offReg=%#RX16 (%s) uValue=%#RX32\n", pVCpu->idCpu, offReg, gicDistGetRegDescription(offReg), uValue));
3282 return rc;
3283}
3284
3285
3286/**
3287 * @callback_method_impl{FNIOMMMIONEWWRITE}
3288 */
3289DECL_HIDDEN_CALLBACK(VBOXSTRICTRC) gicDistMmioWrite(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off, void const *pv, unsigned cb)
3290{
3291 NOREF(pvUser);
3292 Assert(!(off & 0x3));
3293 Assert(cb == 4); RT_NOREF_PV(cb);
3294
3295 PVMCPUCC pVCpu = PDMDevHlpGetVMCPU(pDevIns);
3296 uint16_t offReg = off & 0xfffc;
3297 uint32_t uValue = *(uint32_t *)pv;
3298
3299 STAM_COUNTER_INC(&pVCpu->gic.s.StatMmioWrite);
3300 LogFlowFunc(("[%u]: offReg=%#RX16 (%s) uValue=%#RX32\n", pVCpu->idCpu, offReg, gicDistGetRegDescription(offReg), uValue));
3301
3302 return gicDistWriteRegister(pDevIns, pVCpu, offReg, uValue);
3303}
3304
3305
3306/**
3307 * @callback_method_impl{FNIOMMMIONEWREAD}
3308 */
3309DECL_HIDDEN_CALLBACK(VBOXSTRICTRC) gicReDistMmioRead(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off, void *pv, unsigned cb)
3310{
3311 NOREF(pvUser);
3312 Assert(!(off & 0x3));
3313 Assert(cb == 4); RT_NOREF_PV(cb);
3314
3315 /*
3316 * Determine the redistributor being targeted. Each redistributor takes
3317 * GIC_REDIST_REG_FRAME_SIZE + GIC_REDIST_SGI_PPI_REG_FRAME_SIZE bytes
3318 * and the redistributors are adjacent.
3319 */
3320 uint32_t const idReDist = off / (GIC_REDIST_REG_FRAME_SIZE + GIC_REDIST_SGI_PPI_REG_FRAME_SIZE);
3321 off %= (GIC_REDIST_REG_FRAME_SIZE + GIC_REDIST_SGI_PPI_REG_FRAME_SIZE);
3322
3323 PVMCC pVM = PDMDevHlpGetVM(pDevIns);
3324 Assert(idReDist < pVM->cCpus);
3325 PVMCPUCC pVCpu = pVM->CTX_SUFF(apCpus)[idReDist];
3326
3327 STAM_COUNTER_INC(&pVCpu->gic.s.StatMmioRead);
3328
3329 /* Redistributor or SGI/PPI frame? */
3330 uint16_t const offReg = off & 0xfffc;
3331 uint32_t uValue = 0;
3332 VBOXSTRICTRC rcStrict;
3333 if (off < GIC_REDIST_REG_FRAME_SIZE)
3334 rcStrict = gicReDistReadRegister(pDevIns, pVCpu, idReDist, offReg, &uValue);
3335 else
3336 rcStrict = gicReDistReadSgiPpiRegister(pDevIns, pVCpu, offReg, &uValue);
3337
3338 *(uint32_t *)pv = uValue;
3339 LogFlowFunc(("[%u]: off=%RGp idReDist=%u offReg=%#RX16 (%s) uValue=%#RX32 -> %Rrc\n", pVCpu->idCpu, off, idReDist, offReg,
3340 gicReDistGetRegDescription(offReg), uValue, VBOXSTRICTRC_VAL(rcStrict)));
3341 return rcStrict;
3342}
3343
3344
3345/**
3346 * @callback_method_impl{FNIOMMMIONEWWRITE}
3347 */
3348DECL_HIDDEN_CALLBACK(VBOXSTRICTRC) gicReDistMmioWrite(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off, void const *pv, unsigned cb)
3349{
3350 NOREF(pvUser);
3351 Assert(!(off & 0x3));
3352 Assert(cb == 4); RT_NOREF_PV(cb);
3353
3354 uint32_t uValue = *(uint32_t *)pv;
3355
3356 /*
3357 * Determine the redistributor being targeted. Each redistributor takes
3358 * GIC_REDIST_REG_FRAME_SIZE + GIC_REDIST_SGI_PPI_REG_FRAME_SIZE bytes
3359 * and the redistributors are adjacent.
3360 */
3361 uint32_t const idReDist = off / (GIC_REDIST_REG_FRAME_SIZE + GIC_REDIST_SGI_PPI_REG_FRAME_SIZE);
3362 off %= (GIC_REDIST_REG_FRAME_SIZE + GIC_REDIST_SGI_PPI_REG_FRAME_SIZE);
3363
3364 PCVMCC pVM = PDMDevHlpGetVM(pDevIns);
3365 Assert(idReDist < pVM->cCpus);
3366 PVMCPUCC pVCpu = pVM->CTX_SUFF(apCpus)[idReDist];
3367
3368 STAM_COUNTER_INC(&pVCpu->gic.s.StatMmioWrite);
3369
3370 /* Redistributor or SGI/PPI frame? */
3371 uint16_t const offReg = off & 0xfffc;
3372 VBOXSTRICTRC rcStrict;
3373 if (off < GIC_REDIST_REG_FRAME_SIZE)
3374 rcStrict = gicReDistWriteRegister(pDevIns, pVCpu, offReg, uValue);
3375 else
3376 rcStrict = gicReDistWriteSgiPpiRegister(pDevIns, pVCpu, offReg, uValue);
3377
3378 LogFlowFunc(("[%u]: off=%RGp idReDist=%u offReg=%#RX16 (%s) uValue=%#RX32 -> %Rrc\n", pVCpu->idCpu, off, idReDist, offReg,
3379 gicReDistGetRegDescription(offReg), uValue, VBOXSTRICTRC_VAL(rcStrict)));
3380 return rcStrict;
3381}
3382
3383
3384/**
3385 * @callback_method_impl{FNIOMMMIONEWREAD}
3386 */
3387DECL_HIDDEN_CALLBACK(VBOXSTRICTRC) gicItsMmioRead(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off, void *pv, unsigned cb)
3388{
3389 RT_NOREF_PV(pvUser);
3390 Assert(!(off & 0x3));
3391 Assert(cb == 8 || cb == 4);
3392
3393 PCGICDEV pGicDev = PDMDEVINS_2_DATA(pDevIns, PCGICDEV);
3394 PCGITSDEV pGitsDev = &pGicDev->Gits;
3395 uint64_t uReg;
3396 if (off < GITS_REG_FRAME_SIZE)
3397 {
3398 /* Control registers space. */
3399 uint16_t const offReg = off & 0xfffc;
3400 uReg = gitsMmioReadCtrl(pGitsDev, offReg, cb);
3401 LogFlowFunc(("offReg=%#RX16 (%s) read %#RX64\n", offReg, gitsGetCtrlRegDescription(offReg), uReg));
3402 }
3403 else
3404 {
3405 /* Translation registers space. */
3406 uint16_t const offReg = (off - GITS_REG_FRAME_SIZE) & 0xfffc;
3407 uReg = gitsMmioReadTranslate(pGitsDev, offReg, cb);
3408 LogFlowFunc(("offReg=%#RX16 (%s) read %#RX64\n", offReg, gitsGetTranslationRegDescription(offReg), uReg));
3409 }
3410
3411 if (cb == 8)
3412 *(uint64_t *)pv = uReg;
3413 else
3414 *(uint32_t *)pv = uReg;
3415 return VINF_SUCCESS;
3416}
3417
3418
3419/**
3420 * @callback_method_impl{FNIOMMMIONEWWRITE}
3421 */
3422DECL_HIDDEN_CALLBACK(VBOXSTRICTRC) gicItsMmioWrite(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off, void const *pv, unsigned cb)
3423{
3424 RT_NOREF_PV(pvUser);
3425 Assert(!(off & 0x3));
3426 Assert(cb == 8 || cb == 4);
3427
3428 PGICDEV pGicDev = PDMDEVINS_2_DATA(pDevIns, PGICDEV);
3429 PGITSDEV pGitsDev = &pGicDev->Gits;
3430
3431 uint64_t const uValue = cb == 8 ? *(uint64_t *)pv : *(uint32_t *)pv;
3432 if (off < GITS_REG_FRAME_SIZE)
3433 {
3434 /* Control registers space. */
3435 uint16_t const offReg = off & 0xfffc;
3436 gitsMmioWriteCtrl(pDevIns, pGitsDev, offReg, uValue, cb);
3437 LogFlowFunc(("offReg=%#RX16 (%s) written %#RX64\n", offReg, gitsGetCtrlRegDescription(offReg), uValue));
3438 }
3439 else
3440 {
3441 /* Translation registers space. */
3442 uint16_t const offReg = (off - GITS_REG_FRAME_SIZE) & 0xfffc;
3443 gitsMmioWriteTranslate(pGitsDev, offReg, uValue, cb);
3444 LogFlowFunc(("offReg=%#RX16 (%s) written %#RX64\n", offReg, gitsGetTranslationRegDescription(offReg), uValue));
3445 }
3446 return VINF_SUCCESS;
3447}
3448
3449
3450/**
3451 * GIC device registration structure.
3452 */
3453const PDMDEVREG g_DeviceGIC =
3454{
3455 /* .u32Version = */ PDM_DEVREG_VERSION,
3456 /* .uReserved0 = */ 0,
3457 /* .szName = */ "gic",
3458 /* .fFlags = */ PDM_DEVREG_FLAGS_DEFAULT_BITS | PDM_DEVREG_FLAGS_RZ | PDM_DEVREG_FLAGS_NEW_STYLE,
3459 /* .fClass = */ PDM_DEVREG_CLASS_PIC,
3460 /* .cMaxInstances = */ 1,
3461 /* .uSharedVersion = */ 42,
3462 /* .cbInstanceShared = */ sizeof(GICDEV),
3463 /* .cbInstanceCC = */ 0,
3464 /* .cbInstanceRC = */ 0,
3465 /* .cMaxPciDevices = */ 0,
3466 /* .cMaxMsixVectors = */ 0,
3467 /* .pszDescription = */ "Generic Interrupt Controller",
3468#if defined(IN_RING3)
3469 /* .szRCMod = */ "VMMRC.rc",
3470 /* .szR0Mod = */ "VMMR0.r0",
3471 /* .pfnConstruct = */ gicR3Construct,
3472 /* .pfnDestruct = */ gicR3Destruct,
3473 /* .pfnRelocate = */ NULL,
3474 /* .pfnMemSetup = */ NULL,
3475 /* .pfnPowerOn = */ NULL,
3476 /* .pfnReset = */ gicR3Reset,
3477 /* .pfnSuspend = */ NULL,
3478 /* .pfnResume = */ NULL,
3479 /* .pfnAttach = */ NULL,
3480 /* .pfnDetach = */ NULL,
3481 /* .pfnQueryInterface = */ NULL,
3482 /* .pfnInitComplete = */ NULL,
3483 /* .pfnPowerOff = */ NULL,
3484 /* .pfnSoftReset = */ NULL,
3485 /* .pfnReserved0 = */ NULL,
3486 /* .pfnReserved1 = */ NULL,
3487 /* .pfnReserved2 = */ NULL,
3488 /* .pfnReserved3 = */ NULL,
3489 /* .pfnReserved4 = */ NULL,
3490 /* .pfnReserved5 = */ NULL,
3491 /* .pfnReserved6 = */ NULL,
3492 /* .pfnReserved7 = */ NULL,
3493#elif defined(IN_RING0)
3494 /* .pfnEarlyConstruct = */ NULL,
3495 /* .pfnConstruct = */ NULL,
3496 /* .pfnDestruct = */ NULL,
3497 /* .pfnFinalDestruct = */ NULL,
3498 /* .pfnRequest = */ NULL,
3499 /* .pfnReserved0 = */ NULL,
3500 /* .pfnReserved1 = */ NULL,
3501 /* .pfnReserved2 = */ NULL,
3502 /* .pfnReserved3 = */ NULL,
3503 /* .pfnReserved4 = */ NULL,
3504 /* .pfnReserved5 = */ NULL,
3505 /* .pfnReserved6 = */ NULL,
3506 /* .pfnReserved7 = */ NULL,
3507#elif defined(IN_RC)
3508 /* .pfnConstruct = */ NULL,
3509 /* .pfnReserved0 = */ NULL,
3510 /* .pfnReserved1 = */ NULL,
3511 /* .pfnReserved2 = */ NULL,
3512 /* .pfnReserved3 = */ NULL,
3513 /* .pfnReserved4 = */ NULL,
3514 /* .pfnReserved5 = */ NULL,
3515 /* .pfnReserved6 = */ NULL,
3516 /* .pfnReserved7 = */ NULL,
3517#else
3518# error "Not in IN_RING3, IN_RING0 or IN_RC!"
3519#endif
3520 /* .u32VersionEnd = */ PDM_DEVREG_VERSION
3521};
3522
3523
3524/**
3525 * The VirtualBox GIC backend.
3526 */
3527const PDMGICBACKEND g_GicBackend =
3528{
3529 /* .pfnReadSysReg = */ gicReadSysReg,
3530 /* .pfnWriteSysReg = */ gicWriteSysReg,
3531 /* .pfnSetSpi = */ gicSetSpi,
3532 /* .pfnSetPpi = */ gicSetPpi,
3533 /* .pfnSendMsi = */ gitsSendMsi,
3534};
3535
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette