VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/GICAll.cpp@ 106432

Last change on this file since 106432 was 106370, checked in by vboxsync, 6 weeks ago

VMM/GIC: Some updates to the emulation to make Windows happy enough, bugref:10404 bugref:10732

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 61.4 KB
Line 
1/* $Id: GICAll.cpp 106370 2024-10-16 13:25:07Z vboxsync $ */
2/** @file
3 * GIC - Generic Interrupt Controller Architecture (GICv3) - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2023-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_DEV_APIC
33#include "GICInternal.h"
34#include <VBox/vmm/gic.h>
35#include <VBox/vmm/pdmdev.h>
36#include <VBox/vmm/pdmapi.h>
37#include <VBox/vmm/vmcc.h>
38#include <VBox/vmm/vmm.h>
39#include <VBox/vmm/vmcpuset.h>
40#ifdef IN_RING0
41# include <VBox/vmm/gvmm.h>
42#endif
43
44
45/*********************************************************************************************************************************
46* Internal Functions *
47*********************************************************************************************************************************/
48
49
50/*********************************************************************************************************************************
51* Global Variables *
52*********************************************************************************************************************************/
53
54#ifdef LOG_ENABLED
55/**
56 * Returns a human readable string of the given exception class.
57 *
58 * @returns Pointer to the string matching the given EC.
59 * @param u32Ec The exception class to return the string for.
60 */
61static const char *gicIccRegisterStringify(uint32_t u32Reg)
62{
63 switch (u32Reg)
64 {
65#define GIC_ICC_REG_CASE(a_Reg) case ARMV8_AARCH64_SYSREG_ ## a_Reg: return #a_Reg
66 GIC_ICC_REG_CASE(ICC_PMR_EL1);
67 GIC_ICC_REG_CASE(ICC_IAR0_EL1);
68 GIC_ICC_REG_CASE(ICC_EOIR0_EL1);
69 GIC_ICC_REG_CASE(ICC_HPPIR0_EL1);
70 GIC_ICC_REG_CASE(ICC_BPR0_EL1);
71 GIC_ICC_REG_CASE(ICC_AP0R0_EL1);
72 GIC_ICC_REG_CASE(ICC_AP0R1_EL1);
73 GIC_ICC_REG_CASE(ICC_AP0R2_EL1);
74 GIC_ICC_REG_CASE(ICC_AP0R3_EL1);
75 GIC_ICC_REG_CASE(ICC_AP1R0_EL1);
76 GIC_ICC_REG_CASE(ICC_AP1R1_EL1);
77 GIC_ICC_REG_CASE(ICC_AP1R2_EL1);
78 GIC_ICC_REG_CASE(ICC_AP1R3_EL1);
79 GIC_ICC_REG_CASE(ICC_DIR_EL1);
80 GIC_ICC_REG_CASE(ICC_RPR_EL1);
81 GIC_ICC_REG_CASE(ICC_SGI1R_EL1);
82 GIC_ICC_REG_CASE(ICC_ASGI1R_EL1);
83 GIC_ICC_REG_CASE(ICC_SGI0R_EL1);
84 GIC_ICC_REG_CASE(ICC_IAR1_EL1);
85 GIC_ICC_REG_CASE(ICC_EOIR1_EL1);
86 GIC_ICC_REG_CASE(ICC_HPPIR1_EL1);
87 GIC_ICC_REG_CASE(ICC_BPR1_EL1);
88 GIC_ICC_REG_CASE(ICC_CTLR_EL1);
89 GIC_ICC_REG_CASE(ICC_SRE_EL1);
90 GIC_ICC_REG_CASE(ICC_IGRPEN0_EL1);
91 GIC_ICC_REG_CASE(ICC_IGRPEN1_EL1);
92#undef GIC_ICC_REG_CASE
93 default:
94 break;
95 }
96
97 return "<UNKNOWN>";
98}
99#endif
100
101
102/**
103 * Sets the interrupt pending force-flag and pokes the EMT if required.
104 *
105 * @param pVCpu The cross context virtual CPU structure.
106 * @param fIrq Flag whether to assert the IRQ line or leave it alone.
107 * @param fFiq Flag whether to assert the FIQ line or leave it alone.
108 */
109static void gicSetInterruptFF(PVMCPUCC pVCpu, bool fIrq, bool fFiq)
110{
111 LogFlowFunc(("pVCpu=%p{.idCpu=%u} fIrq=%RTbool fFiq=%RTbool\n",
112 pVCpu, pVCpu->idCpu, fIrq, fFiq));
113
114 Assert(fIrq || fFiq);
115
116#ifdef IN_RING3
117 /* IRQ state should be loaded as-is by "LoadExec". Changes can be made from LoadDone. */
118 Assert(pVCpu->pVMR3->enmVMState != VMSTATE_LOADING || PDMR3HasLoadedState(pVCpu->pVMR3));
119#endif
120
121 if (fIrq)
122 VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_IRQ);
123 if (fFiq)
124 VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_FIQ);
125
126 /*
127 * We need to wake up the target CPU if we're not on EMT.
128 */
129 /** @todo We could just use RTThreadNativeSelf() here, couldn't we? */
130#if defined(IN_RING0)
131 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
132 VMCPUID idCpu = pVCpu->idCpu;
133 if (VMMGetCpuId(pVM) != idCpu)
134 {
135 switch (VMCPU_GET_STATE(pVCpu))
136 {
137 case VMCPUSTATE_STARTED_EXEC:
138 Log7Func(("idCpu=%u VMCPUSTATE_STARTED_EXEC\n", idCpu));
139 GVMMR0SchedPokeNoGVMNoLock(pVM, idCpu);
140 break;
141
142 case VMCPUSTATE_STARTED_HALTED:
143 Log7Func(("idCpu=%u VMCPUSTATE_STARTED_HALTED\n", idCpu));
144 GVMMR0SchedWakeUpNoGVMNoLock(pVM, idCpu);
145 break;
146
147 default:
148 Log7Func(("idCpu=%u enmState=%d\n", idCpu, pVCpu->enmState));
149 break; /* nothing to do in other states. */
150 }
151 }
152#elif defined(IN_RING3)
153 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
154 VMCPUID idCpu = pVCpu->idCpu;
155 if (VMMGetCpuId(pVM) != idCpu)
156 {
157 Log7Func(("idCpu=%u enmState=%d\n", idCpu, pVCpu->enmState));
158 VMR3NotifyCpuFFU(pVCpu->pUVCpu, VMNOTIFYFF_FLAGS_POKE);
159 }
160#endif
161}
162
163
164/**
165 * Clears the interrupt pending force-flag.
166 *
167 * @param pVCpu The cross context virtual CPU structure.
168 * @param fIrq Flag whether to clear the IRQ flag.
169 * @param fFiq Flag whether to clear the FIQ flag.
170 */
171DECLINLINE(void) gicClearInterruptFF(PVMCPUCC pVCpu, bool fIrq, bool fFiq)
172{
173 LogFlowFunc(("pVCpu=%p{.idCpu=%u} fIrq=%RTbool fFiq=%RTbool\n",
174 pVCpu, pVCpu->idCpu, fIrq, fFiq));
175
176 Assert(fIrq || fFiq);
177
178#ifdef IN_RING3
179 /* IRQ state should be loaded as-is by "LoadExec". Changes can be made from LoadDone. */
180 Assert(pVCpu->pVMR3->enmVMState != VMSTATE_LOADING || PDMR3HasLoadedState(pVCpu->pVMR3));
181#endif
182
183 if (fIrq)
184 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_IRQ);
185 if (fFiq)
186 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_FIQ);
187}
188
189
190DECLINLINE(void) gicUpdateInterruptFF(PVMCPUCC pVCpu, bool fIrq, bool fFiq)
191{
192 LogFlowFunc(("pVCpu=%p{.idCpu=%u} fIrq=%RTbool fFiq=%RTbool\n",
193 pVCpu, pVCpu->idCpu, fIrq, fFiq));
194
195 if (fIrq || fFiq)
196 gicSetInterruptFF(pVCpu, fIrq, fFiq);
197
198 if (!fIrq || !fFiq)
199 gicClearInterruptFF(pVCpu, !fIrq, !fFiq);
200}
201
202
203DECLINLINE(void) gicReDistHasIrqPending(PGICCPU pThis, bool *pfIrq, bool *pfFiq)
204{
205 /* Read the interrupt state. */
206 uint32_t u32RegIGrp0 = ASMAtomicReadU32(&pThis->u32RegIGrp0);
207 uint32_t bmIntEnabled = ASMAtomicReadU32(&pThis->bmIntEnabled);
208 uint32_t bmIntPending = ASMAtomicReadU32(&pThis->bmIntPending);
209 uint32_t bmIntActive = ASMAtomicReadU32(&pThis->bmIntActive);
210 bool fIrqGrp0Enabled = ASMAtomicReadBool(&pThis->fIrqGrp0Enabled);
211 bool fIrqGrp1Enabled = ASMAtomicReadBool(&pThis->fIrqGrp1Enabled);
212
213 /* Only allow interrupts with higher priority than the current configured and running one. */
214 uint8_t bPriority = RT_MIN(pThis->bInterruptPriority, pThis->abRunningPriorities[pThis->idxRunningPriority]);
215
216 /* Is anything enabled at all? */
217 uint32_t bmIntForward = (bmIntPending & bmIntEnabled) & ~bmIntActive; /* Exclude the currently active interrupt. */
218 if (bmIntForward)
219 {
220 for (uint32_t i = 0; i < RT_ELEMENTS(pThis->abIntPriority); i++)
221 {
222 Log4(("SGI/PPI %u, configured priority %u, running priority %u\n", i, pThis->abIntPriority[i], bPriority));
223 if ( (bmIntForward & RT_BIT_32(i))
224 && pThis->abIntPriority[i] < bPriority)
225 break;
226 else
227 bmIntForward &= ~RT_BIT_32(i);
228
229 if (!bmIntForward)
230 break;
231 }
232 }
233
234 if (bmIntForward)
235 {
236 /* Determine whether we have to assert the IRQ or FIQ line. */
237 *pfIrq = RT_BOOL(bmIntForward & u32RegIGrp0) && fIrqGrp1Enabled;
238 *pfFiq = RT_BOOL(bmIntForward & ~u32RegIGrp0) && fIrqGrp0Enabled;
239 }
240 else
241 {
242 *pfIrq = false;
243 *pfFiq = false;
244 }
245
246 LogFlowFunc(("pThis=%p bPriority=%u bmIntEnabled=%#x bmIntPending=%#x bmIntActive=%#x fIrq=%RTbool fFiq=%RTbool\n",
247 pThis, bPriority, bmIntEnabled, bmIntPending, bmIntActive, *pfIrq, *pfFiq));
248}
249
250
251DECLINLINE(void) gicDistHasIrqPendingForVCpu(PGICDEV pThis, PGICCPU pGicVCpu, VMCPUID idCpu, bool *pfIrq, bool *pfFiq)
252{
253 /* Read the interrupt state. */
254 uint32_t u32RegIGrp0 = ASMAtomicReadU32(&pThis->u32RegIGrp0);
255 uint32_t bmIntEnabled = ASMAtomicReadU32(&pThis->bmIntEnabled);
256 uint32_t bmIntPending = ASMAtomicReadU32(&pThis->bmIntPending);
257 uint32_t bmIntActive = ASMAtomicReadU32(&pThis->bmIntActive);
258 bool fIrqGrp0Enabled = ASMAtomicReadBool(&pThis->fIrqGrp0Enabled);
259 bool fIrqGrp1Enabled = ASMAtomicReadBool(&pThis->fIrqGrp1Enabled);
260
261 /* Only allow interrupts with higher priority than the current configured and running one. */
262 uint8_t bPriority = RT_MIN(pGicVCpu->bInterruptPriority, pGicVCpu->abRunningPriorities[pGicVCpu->idxRunningPriority]);
263
264 /* Is anything enabled at all? */
265 uint32_t bmIntForward = (bmIntPending & bmIntEnabled) & ~bmIntActive; /* Exclude the currently active interrupt. */
266 if (bmIntForward)
267 {
268 for (uint32_t i = 0; i < RT_ELEMENTS(pThis->abIntPriority); i++)
269 {
270 Log4(("SPI %u, configured priority %u (routing %#x), running priority %u\n", i + GIC_INTID_RANGE_SPI_START, pThis->abIntPriority[i],
271 pThis->au32IntRouting[i], bPriority));
272 if ( (bmIntForward & RT_BIT_32(i))
273 && pThis->abIntPriority[i] < bPriority
274 && pThis->au32IntRouting[i] == idCpu)
275 break;
276 else
277 bmIntForward &= ~RT_BIT_32(i);
278
279 if (!bmIntForward)
280 break;
281 }
282 }
283
284 if (bmIntForward)
285 {
286 /* Determine whether we have to assert the IRQ or FIQ line. */
287 *pfIrq = RT_BOOL(bmIntForward & u32RegIGrp0) && fIrqGrp1Enabled;
288 *pfFiq = RT_BOOL(bmIntForward & ~u32RegIGrp0) && fIrqGrp0Enabled;
289 }
290 else
291 {
292 *pfIrq = false;
293 *pfFiq = false;
294 }
295
296 LogFlowFunc(("pThis=%p bPriority=%u bmIntEnabled=%#x bmIntPending=%#x bmIntActive=%#x fIrq=%RTbool fFiq=%RTbool\n",
297 pThis, bPriority, bmIntEnabled, bmIntPending, bmIntActive, *pfIrq, *pfFiq));
298}
299
300
301/**
302 * Updates the internal IRQ state and sets or clears the appropirate force action flags.
303 *
304 * @returns Strict VBox status code.
305 * @param pThis The GIC re-distributor state for the associated vCPU.
306 * @param pVCpu The cross context virtual CPU structure.
307 */
308static VBOXSTRICTRC gicReDistUpdateIrqState(PGICCPU pThis, PVMCPUCC pVCpu)
309{
310 bool fIrq, fFiq;
311 gicReDistHasIrqPending(pThis, &fIrq, &fFiq);
312
313 PPDMDEVINS pDevIns = VMCPU_TO_DEVINS(pVCpu);
314 PGICDEV pGicDev = PDMDEVINS_2_DATA(pDevIns, PGICDEV);
315 bool fIrqDist, fFiqDist;
316 gicDistHasIrqPendingForVCpu(pGicDev, pThis, pVCpu->idCpu, &fIrqDist, &fFiqDist);
317 fIrq |= fIrqDist;
318 fFiq |= fFiqDist;
319
320 gicUpdateInterruptFF(pVCpu, fIrq, fFiq);
321 return VINF_SUCCESS;
322}
323
324
325/**
326 * Updates the internal IRQ state of the distributor and sets or clears the appropirate force action flags.
327 *
328 * @returns Strict VBox status code.
329 * @param pVM The cross context VM state.
330 * @param pThis The GIC distributor state.
331 */
332static VBOXSTRICTRC gicDistUpdateIrqState(PVMCC pVM, PGICDEV pThis)
333{
334 for (uint32_t i = 0; i < pVM->cCpus; i++)
335 {
336 PVMCPUCC pVCpu = pVM->CTX_SUFF(apCpus)[i];
337 PGICCPU pGicVCpu = VMCPU_TO_GICCPU(pVCpu);
338
339 bool fIrq, fFiq;
340 gicReDistHasIrqPending(pGicVCpu, &fIrq, &fFiq);
341
342 bool fIrqDist, fFiqDist;
343 gicDistHasIrqPendingForVCpu(pThis, pGicVCpu, i, &fIrqDist, &fFiqDist);
344 fIrq |= fIrqDist;
345 fFiq |= fFiqDist;
346
347 gicUpdateInterruptFF(pVCpu, fIrq, fFiq);
348 }
349 return VINF_SUCCESS;
350}
351
352
353/**
354 * Sets the given SGI/PPI interrupt ID on the re-distributor of the given vCPU.
355 *
356 * @returns VBox status code.
357 * @param pVCpu The cross context virtual CPU structure.
358 * @param uIntId The SGI/PPI interrupt identifier.
359 * @param fAsserted Flag whether the SGI/PPI interrupt is asserted or not.
360 */
361static int gicReDistInterruptSet(PVMCPUCC pVCpu, uint32_t uIntId, bool fAsserted)
362{
363 PGICCPU pThis = VMCPU_TO_GICCPU(pVCpu);
364
365 /* Update the interrupts pending state. */
366 if (fAsserted)
367 ASMAtomicOrU32(&pThis->bmIntPending, RT_BIT_32(uIntId));
368 else
369 ASMAtomicAndU32(&pThis->bmIntPending, ~RT_BIT_32(uIntId));
370
371 return VBOXSTRICTRC_VAL(gicReDistUpdateIrqState(pThis, pVCpu));
372}
373
374
375/**
376 * Reads a GIC distributor register.
377 *
378 * @returns VBox status code.
379 * @param pDevIns The device instance.
380 * @param pVCpu The cross context virtual CPU structure.
381 * @param offReg The offset of the register being read.
382 * @param puValue Where to store the register value.
383 */
384DECLINLINE(VBOXSTRICTRC) gicDistRegisterRead(PPDMDEVINS pDevIns, PVMCPUCC pVCpu, uint16_t offReg, uint32_t *puValue)
385{
386 VMCPU_ASSERT_EMT(pVCpu);
387 PGICDEV pThis = PDMDEVINS_2_DATA(pDevIns, PGICDEV);
388
389 if (offReg >= GIC_DIST_REG_IROUTERn_OFF_START && offReg <= GIC_DIST_REG_IROUTERn_OFF_LAST)
390 {
391 *puValue = pThis->au32IntRouting[(offReg - GIC_DIST_REG_IROUTERn_OFF_START) / 4];
392 return VINF_SUCCESS;
393 }
394
395 switch (offReg)
396 {
397 case GIC_DIST_REG_CTLR_OFF:
398 *puValue = (ASMAtomicReadBool(&pThis->fIrqGrp0Enabled) ? GIC_DIST_REG_CTRL_ENABLE_GRP0 : 0)
399 | (ASMAtomicReadBool(&pThis->fIrqGrp1Enabled) ? GIC_DIST_REG_CTRL_ENABLE_GRP1_NS : 0)
400 | GIC_DIST_REG_CTRL_DS
401 | GIC_DIST_REG_CTRL_ARE_S;
402 break;
403 case GIC_DIST_REG_TYPER_OFF:
404 *puValue = GIC_DIST_REG_TYPER_NUM_ITLINES_SET(1) /** @todo 32 SPIs for now. */
405 | GIC_DIST_REG_TYPER_NUM_PES_SET(0) /* 1 PE */
406 /*| GIC_DIST_REG_TYPER_ESPI*/ /** @todo */
407 /*| GIC_DIST_REG_TYPER_NMI*/ /** @todo Non-maskable interrupts */
408 /*| GIC_DIST_REG_TYPER_SECURITY_EXTN */ /** @todo */
409 /*| GIC_DIST_REG_TYPER_MBIS */ /** @todo Message based interrupts */
410 /*| GIC_DIST_REG_TYPER_LPIS */ /** @todo Support LPIs */
411 | GIC_DIST_REG_TYPER_IDBITS_SET(16);
412 break;
413 case GIC_DIST_REG_STATUSR_OFF:
414 AssertReleaseFailed();
415 break;
416 case GIC_DIST_REG_IGROUPRn_OFF_START: /* Only 32 lines for now. */
417 AssertReleaseFailed();
418 break;
419 case GIC_DIST_REG_ISENABLERn_OFF_START + 4: /* Only 32 lines for now. */
420 case GIC_DIST_REG_ICENABLERn_OFF_START + 4: /* Only 32 lines for now. */
421 *puValue = ASMAtomicReadU32(&pThis->bmIntEnabled);
422 break;
423 case GIC_DIST_REG_ISPENDRn_OFF_START: /* Only 32 lines for now. */
424 AssertReleaseFailed();
425 break;
426 case GIC_DIST_REG_ICPENDRn_OFF_START: /* Only 32 lines for now. */
427 AssertReleaseFailed();
428 break;
429 case GIC_DIST_REG_ISACTIVERn_OFF_START: /* Only 32 lines for now. */
430 AssertReleaseFailed();
431 break;
432 case GIC_DIST_REG_ICACTIVERn_OFF_START: /* Only 32 lines for now. */
433 AssertReleaseFailed();
434 break;
435 case GIC_DIST_REG_IPRIORITYn_OFF_START:
436 case GIC_DIST_REG_IPRIORITYn_OFF_START + 4: /* These are banked for the PEs and access the redistributor. */
437 {
438 PGICCPU pGicVCpu = VMCPU_TO_GICCPU(pVCpu);
439
440 /* Figure out the register which is written. */
441 uint8_t idxPrio = offReg - GIC_DIST_REG_IPRIORITYn_OFF_START;
442 Assert(idxPrio <= RT_ELEMENTS(pThis->abIntPriority) - sizeof(uint32_t));
443
444 uint32_t u32Value = 0;
445 for (uint32_t i = idxPrio; i < idxPrio + sizeof(uint32_t); i++)
446 u32Value |= pGicVCpu->abIntPriority[i] << ((i - idxPrio) * 8);
447
448 *puValue = u32Value;
449 break;
450 }
451 case GIC_DIST_REG_IPRIORITYn_OFF_START + 32: /* Only 32 lines for now. */
452 {
453 /* Figure out the register which is written. */
454 uint8_t idxPrio = offReg - GIC_DIST_REG_IPRIORITYn_OFF_START - 32;
455 Assert(idxPrio <= RT_ELEMENTS(pThis->abIntPriority) - sizeof(uint32_t));
456
457 uint32_t u32Value = 0;
458 for (uint32_t i = idxPrio; i < idxPrio + sizeof(uint32_t); i++)
459 u32Value |= pThis->abIntPriority[i] << ((i - idxPrio) * 8);
460
461 *puValue = u32Value;
462 break;
463 }
464 case GIC_DIST_REG_ITARGETSRn_OFF_START: /* Only 32 lines for now. */
465 AssertReleaseFailed();
466 break;
467 case GIC_DIST_REG_ICFGRn_OFF_START: /* Only 32 lines for now. */
468 AssertReleaseFailed();
469 break;
470 case GIC_DIST_REG_IGRPMODRn_OFF_START: /* Only 32 lines for now. */
471 AssertReleaseFailed();
472 break;
473 case GIC_DIST_REG_NSACRn_OFF_START: /* Only 32 lines for now. */
474 AssertReleaseFailed();
475 break;
476 case GIC_DIST_REG_SGIR_OFF:
477 AssertReleaseFailed();
478 break;
479 case GIC_DIST_REG_CPENDSGIRn_OFF_START:
480 AssertReleaseFailed();
481 break;
482 case GIC_DIST_REG_SPENDSGIRn_OFF_START:
483 AssertReleaseFailed();
484 break;
485 case GIC_DIST_REG_INMIn_OFF_START:
486 AssertReleaseFailed();
487 break;
488 case GIC_DIST_REG_PIDR2_OFF:
489 *puValue = GIC_REDIST_REG_PIDR2_ARCH_REV_SET(GIC_REDIST_REG_PIDR2_ARCH_REV_GICV3);
490 break;
491 case GIC_DIST_REG_IIDR_OFF:
492 case GIC_DIST_REG_TYPER2_OFF:
493 *puValue = 0;
494 break;
495 case GIC_DIST_REG_IROUTERn_OFF_START:
496 AssertFailed();
497 break;
498 default:
499 *puValue = 0;
500 }
501 return VINF_SUCCESS;
502}
503
504
505/**
506 * Writes a GIC distributor register.
507 *
508 * @returns Strict VBox status code.
509 * @param pDevIns The device instance.
510 * @param pVCpu The cross context virtual CPU structure.
511 * @param offReg The offset of the register being written.
512 * @param uValue The register value.
513 */
514DECLINLINE(VBOXSTRICTRC) gicDistRegisterWrite(PPDMDEVINS pDevIns, PVMCPUCC pVCpu, uint16_t offReg, uint32_t uValue)
515{
516 VMCPU_ASSERT_EMT(pVCpu); RT_NOREF(pVCpu);
517 PGICDEV pThis = PDMDEVINS_2_DATA(pDevIns, PGICDEV);
518 PVMCC pVM = PDMDevHlpGetVM(pDevIns);
519
520 if (offReg >= GIC_DIST_REG_IROUTERn_OFF_START && offReg <= GIC_DIST_REG_IROUTERn_OFF_LAST)
521 {
522 uint32_t idxReg = (offReg - GIC_DIST_REG_IROUTERn_OFF_START) / 4;
523 LogFlowFunc(("GicDist: idxIRouter=%u uValue=%#x\n", idxReg, uValue));
524 if (idxReg < RT_ELEMENTS(pThis->au32IntRouting))
525 pThis->au32IntRouting[idxReg] = uValue;
526 return VINF_SUCCESS;
527 }
528
529 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
530 switch (offReg)
531 {
532 case GIC_DIST_REG_CTLR_OFF:
533 ASMAtomicWriteBool(&pThis->fIrqGrp0Enabled, RT_BOOL(uValue & GIC_DIST_REG_CTRL_ENABLE_GRP0));
534 ASMAtomicWriteBool(&pThis->fIrqGrp1Enabled, RT_BOOL(uValue & GIC_DIST_REG_CTRL_ENABLE_GRP1_NS));
535 Assert(!(uValue & GIC_DIST_REG_CTRL_ARE_NS));
536 rcStrict = gicDistUpdateIrqState(pVM, pThis);
537 break;
538 case GIC_DIST_REG_STATUSR_OFF:
539 AssertReleaseFailed();
540 break;
541 case GIC_DIST_REG_SETSPI_NSR_OFF:
542 AssertReleaseFailed();
543 break;
544 case GIC_DIST_REG_CLRSPI_NSR_OFF:
545 AssertReleaseFailed();
546 break;
547 case GIC_DIST_REG_SETSPI_SR_OFF:
548 AssertReleaseFailed();
549 break;
550 case GIC_DIST_REG_CLRSPI_SR_OFF:
551 AssertReleaseFailed();
552 break;
553 case GIC_DIST_REG_IGROUPRn_OFF_START: /* Only 32 lines for now. */
554 AssertReleaseFailed();
555 break;
556 case GIC_DIST_REG_IGROUPRn_OFF_START + 4: /* Only 32 lines for now. */
557 ASMAtomicOrU32(&pThis->u32RegIGrp0, uValue);
558 rcStrict = gicDistUpdateIrqState(pVM, pThis);
559 break;
560 case GIC_DIST_REG_ISENABLERn_OFF_START + 4: /* Only 32 lines for now. */
561 ASMAtomicOrU32(&pThis->bmIntEnabled, uValue);
562 rcStrict = gicDistUpdateIrqState(pVM, pThis);
563 break;
564 case GIC_DIST_REG_ICENABLERn_OFF_START:
565 AssertReleaseFailed();
566 break;
567 case GIC_DIST_REG_ICENABLERn_OFF_START + 4: /* Only 32 lines for now. */
568 ASMAtomicAndU32(&pThis->bmIntEnabled, ~uValue);
569 rcStrict = gicDistUpdateIrqState(pVM, pThis);
570 break;
571 case GIC_DIST_REG_ISPENDRn_OFF_START: /* Only 32 lines for now. */
572 AssertReleaseFailed();
573 break;
574 case GIC_DIST_REG_ICPENDRn_OFF_START: /* Only 32 lines for now. */
575 AssertReleaseFailed();
576 break;
577 case GIC_DIST_REG_ISACTIVERn_OFF_START: /* Only 32 lines for now. */
578 AssertReleaseFailed();
579 break;
580 case GIC_DIST_REG_ICACTIVERn_OFF_START + 4: /* Only 32 lines for now. */
581 ASMAtomicAndU32(&pThis->bmIntActive, ~uValue);
582 rcStrict = gicDistUpdateIrqState(pVM, pThis);
583 break;
584 case GIC_DIST_REG_IPRIORITYn_OFF_START: /* These are banked for the PEs and access the redistributor. */
585 case GIC_DIST_REG_IPRIORITYn_OFF_START + 4:
586 case GIC_DIST_REG_IPRIORITYn_OFF_START + 8:
587 case GIC_DIST_REG_IPRIORITYn_OFF_START + 12:
588 case GIC_DIST_REG_IPRIORITYn_OFF_START + 16:
589 case GIC_DIST_REG_IPRIORITYn_OFF_START + 20:
590 case GIC_DIST_REG_IPRIORITYn_OFF_START + 24:
591 case GIC_DIST_REG_IPRIORITYn_OFF_START + 28:
592 {
593 PGICCPU pGicVCpu = VMCPU_TO_GICCPU(pVCpu);
594
595 /* Figure out the register which is written. */
596 uint8_t idxPrio = offReg - GIC_DIST_REG_IPRIORITYn_OFF_START;
597 Assert(idxPrio <= RT_ELEMENTS(pGicVCpu->abIntPriority) - sizeof(uint32_t));
598 for (uint32_t i = idxPrio; i < idxPrio + sizeof(uint32_t); i++)
599 {
600 pGicVCpu->abIntPriority[i] = (uint8_t)(uValue & 0xff);
601 uValue >>= 8;
602 }
603 break;
604 }
605 case GIC_DIST_REG_IPRIORITYn_OFF_START + 32: /* Only 32 lines for now. */
606 case GIC_DIST_REG_IPRIORITYn_OFF_START + 36:
607 case GIC_DIST_REG_IPRIORITYn_OFF_START + 40:
608 case GIC_DIST_REG_IPRIORITYn_OFF_START + 44:
609 case GIC_DIST_REG_IPRIORITYn_OFF_START + 48:
610 case GIC_DIST_REG_IPRIORITYn_OFF_START + 52:
611 case GIC_DIST_REG_IPRIORITYn_OFF_START + 56:
612 case GIC_DIST_REG_IPRIORITYn_OFF_START + 60:
613 {
614 /* Figure out the register which is written. */
615 uint8_t idxPrio = offReg - GIC_DIST_REG_IPRIORITYn_OFF_START - 32;
616 Assert(idxPrio <= RT_ELEMENTS(pThis->abIntPriority) - sizeof(uint32_t));
617 for (uint32_t i = idxPrio; i < idxPrio + sizeof(uint32_t); i++)
618 {
619#if 1
620 /** @todo r=aeichner This gross hack prevents Windows from hanging during boot because
621 * it tries to set the interrupt priority for PCI interrupt lines to 0 which will cause an interrupt
622 * storm later on because the lowest interrupt priority Windows seems to use is 32 for the per vCPU
623 * timer.
624 */
625 if ((uValue & 0xff) == 0)
626 continue;
627#endif
628 pThis->abIntPriority[i] = (uint8_t)(uValue & 0xff);
629 uValue >>= 8;
630 }
631 break;
632 }
633 case GIC_DIST_REG_ITARGETSRn_OFF_START: /* Only 32 lines for now. */
634 AssertReleaseFailed();
635 break;
636 case GIC_DIST_REG_ICFGRn_OFF_START + 8: /* Only 32 lines for now. */
637 ASMAtomicWriteU32(&pThis->u32RegICfg0, uValue);
638 break;
639 case GIC_DIST_REG_ICFGRn_OFF_START+ 12:
640 ASMAtomicWriteU32(&pThis->u32RegICfg1, uValue);
641 break;
642 case GIC_DIST_REG_IGRPMODRn_OFF_START: /* Only 32 lines for now. */
643 AssertReleaseFailed();
644 break;
645 case GIC_DIST_REG_NSACRn_OFF_START: /* Only 32 lines for now. */
646 AssertReleaseFailed();
647 break;
648 case GIC_DIST_REG_SGIR_OFF:
649 AssertReleaseFailed();
650 break;
651 case GIC_DIST_REG_CPENDSGIRn_OFF_START:
652 AssertReleaseFailed();
653 break;
654 case GIC_DIST_REG_SPENDSGIRn_OFF_START:
655 AssertReleaseFailed();
656 break;
657 case GIC_DIST_REG_INMIn_OFF_START:
658 AssertReleaseFailed();
659 break;
660 default:
661 //AssertReleaseFailed();
662 break;
663 }
664
665 return rcStrict;
666}
667
668
669/**
670 * Reads a GIC redistributor register.
671 *
672 * @returns VBox status code.
673 * @param pDevIns The device instance.
674 * @param pVCpu The cross context virtual CPU structure.
675 * @param idRedist The redistributor ID.
676 * @param offReg The offset of the register being read.
677 * @param puValue Where to store the register value.
678 */
679DECLINLINE(VBOXSTRICTRC) gicReDistRegisterRead(PPDMDEVINS pDevIns, PVMCPUCC pVCpu, uint32_t idRedist, uint16_t offReg, uint32_t *puValue)
680{
681 RT_NOREF(pDevIns);
682
683 switch (offReg)
684 {
685 case GIC_REDIST_REG_TYPER_OFF:
686 {
687 PVMCC pVM = PDMDevHlpGetVM(pDevIns);
688 *puValue = ((pVCpu->idCpu == pVM->cCpus - 1) ? GIC_REDIST_REG_TYPER_LAST : 0)
689 | GIC_REDIST_REG_TYPER_CPU_NUMBER_SET(idRedist)
690 | GIC_REDIST_REG_TYPER_CMN_LPI_AFF_SET(GIC_REDIST_REG_TYPER_CMN_LPI_AFF_ALL);
691 break;
692 }
693 case GIC_REDIST_REG_TYPER_AFFINITY_OFF:
694 *puValue = idRedist;
695 break;
696 case GIC_REDIST_REG_PIDR2_OFF:
697 *puValue = GIC_REDIST_REG_PIDR2_ARCH_REV_SET(GIC_REDIST_REG_PIDR2_ARCH_REV_GICV3);
698 break;
699 default:
700 *puValue = 0;
701 }
702
703 return VINF_SUCCESS;
704}
705
706
707/**
708 * Reads a GIC redistributor SGI/PPI frame register.
709 *
710 * @returns VBox status code.
711 * @param pDevIns The device instance.
712 * @param pVCpu The cross context virtual CPU structure.
713 * @param offReg The offset of the register being read.
714 * @param puValue Where to store the register value.
715 */
716DECLINLINE(VBOXSTRICTRC) gicReDistSgiPpiRegisterRead(PPDMDEVINS pDevIns, PVMCPUCC pVCpu, uint16_t offReg, uint32_t *puValue)
717{
718 VMCPU_ASSERT_EMT(pVCpu);
719 RT_NOREF(pDevIns);
720
721 PGICCPU pThis = VMCPU_TO_GICCPU(pVCpu);
722 switch (offReg)
723 {
724 case GIC_REDIST_SGI_PPI_REG_ISENABLER0_OFF:
725 case GIC_REDIST_SGI_PPI_REG_ICENABLER0_OFF:
726 *puValue = ASMAtomicReadU32(&pThis->bmIntEnabled);
727 break;
728 case GIC_REDIST_SGI_PPI_REG_ISPENDR0_OFF:
729 case GIC_REDIST_SGI_PPI_REG_ICPENDR0_OFF:
730 *puValue = ASMAtomicReadU32(&pThis->bmIntPending);
731 break;
732 case GIC_REDIST_SGI_PPI_REG_ISACTIVER0_OFF:
733 case GIC_REDIST_SGI_PPI_REG_ICACTIVER0_OFF:
734 *puValue = ASMAtomicReadU32(&pThis->bmIntActive);
735 break;
736 case GIC_REDIST_SGI_PPI_REG_IPRIORITYn_OFF_START:
737 case GIC_REDIST_SGI_PPI_REG_IPRIORITYn_OFF_START + 4:
738 case GIC_REDIST_SGI_PPI_REG_IPRIORITYn_OFF_START + 8:
739 case GIC_REDIST_SGI_PPI_REG_IPRIORITYn_OFF_START + 12:
740 case GIC_REDIST_SGI_PPI_REG_IPRIORITYn_OFF_START + 16:
741 case GIC_REDIST_SGI_PPI_REG_IPRIORITYn_OFF_START + 20:
742 case GIC_REDIST_SGI_PPI_REG_IPRIORITYn_OFF_START + 24:
743 case GIC_REDIST_SGI_PPI_REG_IPRIORITYn_OFF_START + 28:
744 {
745 /* Figure out the register which is written. */
746 uint8_t idxPrio = offReg - GIC_REDIST_SGI_PPI_REG_IPRIORITYn_OFF_START;
747 Assert(idxPrio <= RT_ELEMENTS(pThis->abIntPriority) - sizeof(uint32_t));
748
749 uint32_t u32Value = 0;
750 for (uint32_t i = idxPrio; i < idxPrio + sizeof(uint32_t); i++)
751 u32Value |= pThis->abIntPriority[i] << ((i - idxPrio) * 8);
752
753 *puValue = u32Value;
754 break;
755 }
756 case GIC_REDIST_SGI_PPI_REG_ICFGR0_OFF:
757 *puValue = ASMAtomicReadU32(&pThis->u32RegICfg0);
758 break;
759 case GIC_REDIST_SGI_PPI_REG_ICFGR1_OFF:
760 *puValue = ASMAtomicReadU32(&pThis->u32RegICfg1);
761 break;
762 default:
763 AssertReleaseFailed();
764 *puValue = 0;
765 }
766
767 return VINF_SUCCESS;
768}
769
770
771/**
772 * Writes a GIC redistributor frame register.
773 *
774 * @returns Strict VBox status code.
775 * @param pDevIns The device instance.
776 * @param pVCpu The cross context virtual CPU structure.
777 * @param offReg The offset of the register being written.
778 * @param uValue The register value.
779 */
780DECLINLINE(VBOXSTRICTRC) gicReDistRegisterWrite(PPDMDEVINS pDevIns, PVMCPUCC pVCpu, uint16_t offReg, uint32_t uValue)
781{
782 VMCPU_ASSERT_EMT(pVCpu);
783 RT_NOREF(pDevIns, pVCpu, uValue);
784
785 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
786 switch (offReg)
787 {
788 case GIC_REDIST_REG_STATUSR_OFF:
789 AssertReleaseFailed();
790 break;
791 case GIC_REDIST_REG_WAKER_OFF:
792 Assert(uValue == 0);
793 break;
794 case GIC_REDIST_REG_PARTIDR_OFF:
795 AssertReleaseFailed();
796 break;
797 case GIC_REDIST_REG_SETLPIR_OFF:
798 AssertReleaseFailed();
799 break;
800 case GIC_REDIST_REG_CLRLPIR_OFF:
801 AssertReleaseFailed();
802 break;
803 case GIC_REDIST_REG_PROPBASER_OFF:
804 AssertReleaseFailed();
805 break;
806 case GIC_REDIST_REG_PENDBASER_OFF:
807 AssertReleaseFailed();
808 break;
809 case GIC_REDIST_REG_INVLPIR_OFF:
810 AssertReleaseFailed();
811 break;
812 case GIC_REDIST_REG_INVALLR_OFF:
813 AssertReleaseFailed();
814 break;
815 default:
816 AssertReleaseFailed();
817 break;
818 }
819
820 return rcStrict;
821}
822
823
824/**
825 * Writes a GIC redistributor SGI/PPI frame register.
826 *
827 * @returns Strict VBox status code.
828 * @param pDevIns The device instance.
829 * @param pVCpu The cross context virtual CPU structure.
830 * @param offReg The offset of the register being written.
831 * @param uValue The register value.
832 */
833DECLINLINE(VBOXSTRICTRC) gicReDistSgiPpiRegisterWrite(PPDMDEVINS pDevIns, PVMCPUCC pVCpu, uint16_t offReg, uint32_t uValue)
834{
835 VMCPU_ASSERT_EMT(pVCpu);
836 RT_NOREF(pDevIns);
837
838 PGICCPU pThis = VMCPU_TO_GICCPU(pVCpu);
839 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
840 switch (offReg)
841 {
842 case GIC_REDIST_SGI_PPI_REG_IGROUPR0_OFF:
843 ASMAtomicOrU32(&pThis->u32RegIGrp0, uValue);
844 rcStrict = gicReDistUpdateIrqState(pThis, pVCpu);
845 break;
846 case GIC_REDIST_SGI_PPI_REG_ISENABLER0_OFF:
847 ASMAtomicOrU32(&pThis->bmIntEnabled, uValue);
848 rcStrict = gicReDistUpdateIrqState(pThis, pVCpu);
849 break;
850 case GIC_REDIST_SGI_PPI_REG_ICENABLER0_OFF:
851 ASMAtomicAndU32(&pThis->bmIntEnabled, ~uValue);
852 rcStrict = gicReDistUpdateIrqState(pThis, pVCpu);
853 break;
854 case GIC_REDIST_SGI_PPI_REG_ISPENDR0_OFF:
855 ASMAtomicOrU32(&pThis->bmIntPending, uValue);
856 rcStrict = gicReDistUpdateIrqState(pThis, pVCpu);
857 break;
858 case GIC_REDIST_SGI_PPI_REG_ICPENDR0_OFF:
859 ASMAtomicAndU32(&pThis->bmIntPending, ~uValue);
860 rcStrict = gicReDistUpdateIrqState(pThis, pVCpu);
861 break;
862 case GIC_REDIST_SGI_PPI_REG_ISACTIVER0_OFF:
863 ASMAtomicOrU32(&pThis->bmIntActive, uValue);
864 rcStrict = gicReDistUpdateIrqState(pThis, pVCpu);
865 break;
866 case GIC_REDIST_SGI_PPI_REG_ICACTIVER0_OFF:
867 ASMAtomicAndU32(&pThis->bmIntActive, ~uValue);
868 rcStrict = gicReDistUpdateIrqState(pThis, pVCpu);
869 break;
870 case GIC_REDIST_SGI_PPI_REG_IPRIORITYn_OFF_START:
871 case GIC_REDIST_SGI_PPI_REG_IPRIORITYn_OFF_START + 4:
872 case GIC_REDIST_SGI_PPI_REG_IPRIORITYn_OFF_START + 8:
873 case GIC_REDIST_SGI_PPI_REG_IPRIORITYn_OFF_START + 12:
874 case GIC_REDIST_SGI_PPI_REG_IPRIORITYn_OFF_START + 16:
875 case GIC_REDIST_SGI_PPI_REG_IPRIORITYn_OFF_START + 20:
876 case GIC_REDIST_SGI_PPI_REG_IPRIORITYn_OFF_START + 24:
877 case GIC_REDIST_SGI_PPI_REG_IPRIORITYn_OFF_START + 28:
878 {
879 /* Figure out the register whch is written. */
880 uint8_t idxPrio = offReg - GIC_REDIST_SGI_PPI_REG_IPRIORITYn_OFF_START;
881 Assert(idxPrio <= RT_ELEMENTS(pThis->abIntPriority) - sizeof(uint32_t));
882 for (uint32_t i = idxPrio; i < idxPrio + sizeof(uint32_t); i++)
883 {
884 pThis->abIntPriority[i] = (uint8_t)(uValue & 0xff);
885 uValue >>= 8;
886 }
887 break;
888 }
889 case GIC_REDIST_SGI_PPI_REG_ICFGR0_OFF:
890 ASMAtomicWriteU32(&pThis->u32RegICfg0, uValue);
891 break;
892 case GIC_REDIST_SGI_PPI_REG_ICFGR1_OFF:
893 ASMAtomicWriteU32(&pThis->u32RegICfg1, uValue);
894 break;
895 default:
896 //AssertReleaseFailed();
897 break;
898 }
899
900 return rcStrict;
901}
902
903
904/**
905 * Reads a GIC system register.
906 *
907 * @returns Strict VBox status code.
908 * @param pVCpu The cross context virtual CPU structure.
909 * @param u32Reg The system register being read.
910 * @param pu64Value Where to store the read value.
911 */
912VMM_INT_DECL(VBOXSTRICTRC) GICReadSysReg(PVMCPUCC pVCpu, uint32_t u32Reg, uint64_t *pu64Value)
913{
914 /*
915 * Validate.
916 */
917 VMCPU_ASSERT_EMT(pVCpu);
918 Assert(pu64Value);
919
920 *pu64Value = 0;
921 PGICCPU pThis = VMCPU_TO_GICCPU(pVCpu);
922 PPDMDEVINS pDevIns = VMCPU_TO_DEVINS(pVCpu);
923 PGICDEV pGicDev = PDMDEVINS_2_DATA(pDevIns, PGICDEV);
924
925 int const rcLock = PDMDevHlpCritSectEnter(pDevIns, pDevIns->pCritSectRoR3, VERR_IGNORED);
926 PDM_CRITSECT_RELEASE_ASSERT_RC_DEV(pDevIns, pDevIns->pCritSectRoR3, rcLock);
927
928 switch (u32Reg)
929 {
930 case ARMV8_AARCH64_SYSREG_ICC_PMR_EL1:
931 *pu64Value = pThis->bInterruptPriority;
932 break;
933 case ARMV8_AARCH64_SYSREG_ICC_IAR0_EL1:
934 AssertReleaseFailed();
935 break;
936 case ARMV8_AARCH64_SYSREG_ICC_EOIR0_EL1:
937 AssertReleaseFailed();
938 break;
939 case ARMV8_AARCH64_SYSREG_ICC_HPPIR0_EL1:
940 AssertReleaseFailed();
941 break;
942 case ARMV8_AARCH64_SYSREG_ICC_BPR0_EL1:
943 *pu64Value = pThis->bBinaryPointGrp0 & 0x7;
944 break;
945 case ARMV8_AARCH64_SYSREG_ICC_AP0R0_EL1:
946 AssertReleaseFailed();
947 break;
948 case ARMV8_AARCH64_SYSREG_ICC_AP0R1_EL1:
949 AssertReleaseFailed();
950 break;
951 case ARMV8_AARCH64_SYSREG_ICC_AP0R2_EL1:
952 AssertReleaseFailed();
953 break;
954 case ARMV8_AARCH64_SYSREG_ICC_AP0R3_EL1:
955 AssertReleaseFailed();
956 break;
957 case ARMV8_AARCH64_SYSREG_ICC_AP1R0_EL1:
958 AssertReleaseFailed();
959 break;
960 case ARMV8_AARCH64_SYSREG_ICC_AP1R1_EL1:
961 AssertReleaseFailed();
962 break;
963 case ARMV8_AARCH64_SYSREG_ICC_AP1R2_EL1:
964 AssertReleaseFailed();
965 break;
966 case ARMV8_AARCH64_SYSREG_ICC_AP1R3_EL1:
967 AssertReleaseFailed();
968 break;
969 case ARMV8_AARCH64_SYSREG_ICC_NMIAR1_EL1:
970 AssertReleaseFailed();
971 break;
972 case ARMV8_AARCH64_SYSREG_ICC_DIR_EL1:
973 AssertReleaseFailed();
974 break;
975 case ARMV8_AARCH64_SYSREG_ICC_RPR_EL1:
976 *pu64Value = pThis->abRunningPriorities[pThis->idxRunningPriority];
977 break;
978 case ARMV8_AARCH64_SYSREG_ICC_SGI1R_EL1:
979 AssertReleaseFailed();
980 break;
981 case ARMV8_AARCH64_SYSREG_ICC_ASGI1R_EL1:
982 AssertReleaseFailed();
983 break;
984 case ARMV8_AARCH64_SYSREG_ICC_SGI0R_EL1:
985 AssertReleaseFailed();
986 break;
987 case ARMV8_AARCH64_SYSREG_ICC_IAR1_EL1:
988 {
989 /** @todo Figure out the highest priority interrupt. */
990 uint32_t bmIntActive = ASMAtomicReadU32(&pThis->bmIntActive);
991 uint32_t bmIntEnabled = ASMAtomicReadU32(&pThis->bmIntEnabled);
992 uint32_t bmPending = (ASMAtomicReadU32(&pThis->bmIntPending) & bmIntEnabled) & ~bmIntActive;
993 int32_t idxIntPending = ASMBitFirstSet(&bmPending, sizeof(bmPending) * 8);
994 if (idxIntPending > -1)
995 {
996 /* Mark the interrupt as active. */
997 ASMAtomicOrU32(&pThis->bmIntActive, RT_BIT_32(idxIntPending));
998 /* Drop priority. */
999 Assert((uint32_t)idxIntPending < RT_ELEMENTS(pThis->abIntPriority));
1000 Assert(pThis->idxRunningPriority < RT_ELEMENTS(pThis->abRunningPriorities) - 1);
1001
1002 LogFlowFunc(("Dropping interrupt priority from %u -> %u (idxRunningPriority: %u -> %u)\n",
1003 pThis->abRunningPriorities[pThis->idxRunningPriority],
1004 pThis->abIntPriority[idxIntPending],
1005 pThis->idxRunningPriority, pThis->idxRunningPriority + 1));
1006
1007 pThis->abRunningPriorities[++pThis->idxRunningPriority] = pThis->abIntPriority[idxIntPending];
1008
1009 /* Clear edge level interrupts like SGIs as pending. */
1010 if (idxIntPending <= GIC_INTID_RANGE_SGI_LAST)
1011 ASMAtomicBitClear(&pThis->bmIntPending, idxIntPending);
1012 *pu64Value = idxIntPending;
1013 gicReDistUpdateIrqState(pThis, pVCpu);
1014 }
1015 else
1016 {
1017 /** @todo This is wrong as the guest might decide to prioritize PPIs and SPIs differently. */
1018 bmIntActive = ASMAtomicReadU32(&pGicDev->bmIntActive);
1019 bmIntEnabled = ASMAtomicReadU32(&pGicDev->bmIntEnabled);
1020 bmPending = (ASMAtomicReadU32(&pGicDev->bmIntPending) & bmIntEnabled) & ~bmIntActive;
1021 idxIntPending = ASMBitFirstSet(&bmPending, sizeof(bmPending) * 8);
1022 if ( idxIntPending > -1
1023 && pGicDev->abIntPriority[idxIntPending] < pThis->bInterruptPriority)
1024 {
1025 /* Mark the interrupt as active. */
1026 ASMAtomicOrU32(&pGicDev->bmIntActive, RT_BIT_32(idxIntPending));
1027
1028 /* Drop priority. */
1029 Assert((uint32_t)idxIntPending < RT_ELEMENTS(pGicDev->abIntPriority));
1030 Assert(pThis->idxRunningPriority < RT_ELEMENTS(pThis->abRunningPriorities) - 1);
1031
1032 LogFlowFunc(("Dropping interrupt priority from %u -> %u (idxRunningPriority: %u -> %u)\n",
1033 pThis->abRunningPriorities[pThis->idxRunningPriority],
1034 pThis->abIntPriority[idxIntPending],
1035 pThis->idxRunningPriority, pThis->idxRunningPriority + 1));
1036
1037 pThis->abRunningPriorities[++pThis->idxRunningPriority] = pGicDev->abIntPriority[idxIntPending];
1038
1039 *pu64Value = idxIntPending + GIC_INTID_RANGE_SPI_START;
1040 gicReDistUpdateIrqState(pThis, pVCpu);
1041 }
1042 else
1043 *pu64Value = GIC_INTID_RANGE_SPECIAL_NO_INTERRUPT;
1044 }
1045 break;
1046 }
1047 case ARMV8_AARCH64_SYSREG_ICC_EOIR1_EL1:
1048 AssertReleaseFailed();
1049 break;
1050 case ARMV8_AARCH64_SYSREG_ICC_HPPIR1_EL1:
1051 {
1052 /** @todo Figure out the highest priority interrupt. */
1053 uint32_t bmIntActive = ASMAtomicReadU32(&pThis->bmIntActive);
1054 uint32_t bmIntEnabled = ASMAtomicReadU32(&pThis->bmIntEnabled);
1055 uint32_t bmPending = (ASMAtomicReadU32(&pThis->bmIntPending) & bmIntEnabled) & ~bmIntActive;
1056 int32_t idxIntPending = ASMBitFirstSet(&bmPending, sizeof(bmPending) * 8);
1057 if (idxIntPending > -1)
1058 *pu64Value = idxIntPending;
1059 else
1060 {
1061 /** @todo This is wrong as the guest might decide to prioritize PPIs and SPIs differently. */
1062 bmIntActive = ASMAtomicReadU32(&pGicDev->bmIntActive);
1063 bmIntEnabled = ASMAtomicReadU32(&pGicDev->bmIntEnabled);
1064 bmPending = (ASMAtomicReadU32(&pGicDev->bmIntPending) & bmIntEnabled) & ~bmIntActive;
1065 idxIntPending = ASMBitFirstSet(&bmPending, sizeof(bmPending) * 8);
1066 if (idxIntPending > -1)
1067 *pu64Value = idxIntPending + GIC_INTID_RANGE_SPI_START;
1068 else
1069 *pu64Value = GIC_INTID_RANGE_SPECIAL_NO_INTERRUPT;
1070 }
1071 break;
1072 }
1073 case ARMV8_AARCH64_SYSREG_ICC_BPR1_EL1:
1074 *pu64Value = pThis->bBinaryPointGrp1 & 0x7;
1075 break;
1076 case ARMV8_AARCH64_SYSREG_ICC_CTLR_EL1:
1077 *pu64Value = ARMV8_ICC_CTLR_EL1_AARCH64_PMHE
1078 | ARMV8_ICC_CTLR_EL1_AARCH64_PRIBITS_SET(4)
1079 | ARMV8_ICC_CTLR_EL1_AARCH64_IDBITS_SET(ARMV8_ICC_CTLR_EL1_AARCH64_IDBITS_16BITS);
1080 break;
1081 case ARMV8_AARCH64_SYSREG_ICC_SRE_EL1:
1082 AssertReleaseFailed();
1083 break;
1084 case ARMV8_AARCH64_SYSREG_ICC_IGRPEN0_EL1:
1085 *pu64Value = ASMAtomicReadBool(&pThis->fIrqGrp0Enabled) ? ARMV8_ICC_IGRPEN0_EL1_AARCH64_ENABLE : 0;
1086 break;
1087 case ARMV8_AARCH64_SYSREG_ICC_IGRPEN1_EL1:
1088 *pu64Value = ASMAtomicReadBool(&pThis->fIrqGrp1Enabled) ? ARMV8_ICC_IGRPEN1_EL1_AARCH64_ENABLE : 0;
1089 break;
1090 default:
1091 AssertReleaseFailed();
1092 break;
1093 }
1094
1095 PDMDevHlpCritSectLeave(pDevIns, pDevIns->pCritSectRoR3);
1096
1097 LogFlowFunc(("pVCpu=%p u32Reg=%#x{%s} pu64Value=%RX64\n", pVCpu, u32Reg, gicIccRegisterStringify(u32Reg), *pu64Value));
1098 return VINF_SUCCESS;
1099}
1100
1101
1102/**
1103 * Writes an GIC system register.
1104 *
1105 * @returns Strict VBox status code.
1106 * @param pVCpu The cross context virtual CPU structure.
1107 * @param u32Reg The system register being written (IPRT system register identifier).
1108 * @param u64Value The value to write.
1109 */
1110VMM_INT_DECL(VBOXSTRICTRC) GICWriteSysReg(PVMCPUCC pVCpu, uint32_t u32Reg, uint64_t u64Value)
1111{
1112 /*
1113 * Validate.
1114 */
1115 VMCPU_ASSERT_EMT(pVCpu);
1116 LogFlowFunc(("pVCpu=%p u32Reg=%#x{%s} u64Value=%RX64\n", pVCpu, u32Reg, gicIccRegisterStringify(u32Reg), u64Value));
1117
1118 PGICCPU pThis = VMCPU_TO_GICCPU(pVCpu);
1119 PPDMDEVINS pDevIns = VMCPU_TO_DEVINS(pVCpu);
1120 PGICDEV pGicDev = PDMDEVINS_2_DATA(pDevIns, PGICDEV);
1121
1122 int const rcLock = PDMDevHlpCritSectEnter(pDevIns, pDevIns->pCritSectRoR3, VERR_IGNORED);
1123 PDM_CRITSECT_RELEASE_ASSERT_RC_DEV(pDevIns, pDevIns->pCritSectRoR3, rcLock);
1124
1125 switch (u32Reg)
1126 {
1127 case ARMV8_AARCH64_SYSREG_ICC_PMR_EL1:
1128 LogFlowFunc(("ICC_PMR_EL1: Interrupt priority now %u\n", (uint8_t)u64Value));
1129 ASMAtomicWriteU8(&pThis->bInterruptPriority, (uint8_t)u64Value);
1130 gicReDistUpdateIrqState(pThis, pVCpu);
1131 break;
1132 case ARMV8_AARCH64_SYSREG_ICC_IAR0_EL1:
1133 AssertReleaseFailed();
1134 break;
1135 case ARMV8_AARCH64_SYSREG_ICC_EOIR0_EL1:
1136 AssertReleaseFailed();
1137 break;
1138 case ARMV8_AARCH64_SYSREG_ICC_HPPIR0_EL1:
1139 AssertReleaseFailed();
1140 break;
1141 case ARMV8_AARCH64_SYSREG_ICC_BPR0_EL1:
1142 pThis->bBinaryPointGrp0 = (uint8_t)(u64Value & 0x7);
1143 break;
1144 case ARMV8_AARCH64_SYSREG_ICC_AP0R0_EL1:
1145 /** @todo */
1146 break;
1147 case ARMV8_AARCH64_SYSREG_ICC_AP0R1_EL1:
1148 AssertReleaseFailed();
1149 break;
1150 case ARMV8_AARCH64_SYSREG_ICC_AP0R2_EL1:
1151 AssertReleaseFailed();
1152 break;
1153 case ARMV8_AARCH64_SYSREG_ICC_AP0R3_EL1:
1154 AssertReleaseFailed();
1155 break;
1156 case ARMV8_AARCH64_SYSREG_ICC_AP1R0_EL1:
1157 /** @todo */
1158 break;
1159 case ARMV8_AARCH64_SYSREG_ICC_AP1R1_EL1:
1160 AssertReleaseFailed();
1161 break;
1162 case ARMV8_AARCH64_SYSREG_ICC_AP1R2_EL1:
1163 AssertReleaseFailed();
1164 break;
1165 case ARMV8_AARCH64_SYSREG_ICC_AP1R3_EL1:
1166 AssertReleaseFailed();
1167 break;
1168 case ARMV8_AARCH64_SYSREG_ICC_NMIAR1_EL1:
1169 AssertReleaseFailed();
1170 break;
1171 case ARMV8_AARCH64_SYSREG_ICC_DIR_EL1:
1172 AssertReleaseFailed();
1173 break;
1174 case ARMV8_AARCH64_SYSREG_ICC_RPR_EL1:
1175 AssertReleaseFailed();
1176 break;
1177 case ARMV8_AARCH64_SYSREG_ICC_SGI1R_EL1:
1178 {
1179 uint32_t uIntId = ARMV8_ICC_SGI1R_EL1_AARCH64_INTID_GET(u64Value) - GIC_INTID_RANGE_SGI_START;
1180 if (u64Value & ARMV8_ICC_SGI1R_EL1_AARCH64_IRM)
1181 {
1182 /* Route to all but this vCPU. */
1183 for (uint32_t i = 0; i < pVCpu->pVMR3->cCpus; i++)
1184 {
1185 if (i != pVCpu->idCpu)
1186 {
1187 PVMCPUCC pVCpuDst = VMMGetCpuById(pVCpu->CTX_SUFF(pVM), i);
1188 if (pVCpuDst)
1189 GICSgiSet(pVCpuDst, uIntId, true /*fAsserted*/);
1190 else
1191 AssertFailed();
1192 }
1193 }
1194 }
1195 else
1196 {
1197 /* Examine target list. */
1198 /** @todo Range selector support. */
1199 VMCPUID idCpu = 0;
1200 uint16_t uTgtList = ARMV8_ICC_SGI1R_EL1_AARCH64_TARGET_LIST_GET(u64Value);
1201 /** @todo rewrite using ASMBitFirstSetU16. */
1202 while (uTgtList)
1203 {
1204 if (uTgtList & 0x1)
1205 {
1206 PVMCPUCC pVCpuDst = VMMGetCpuById(pVCpu->CTX_SUFF(pVM), idCpu);
1207 if (pVCpuDst)
1208 GICSgiSet(pVCpuDst, uIntId, true /*fAsserted*/);
1209 else
1210 AssertFailed();
1211 }
1212 uTgtList >>= 1;
1213 idCpu++;
1214 }
1215 }
1216 break;
1217 }
1218 case ARMV8_AARCH64_SYSREG_ICC_ASGI1R_EL1:
1219 AssertReleaseFailed();
1220 break;
1221 case ARMV8_AARCH64_SYSREG_ICC_SGI0R_EL1:
1222 AssertReleaseFailed();
1223 break;
1224 case ARMV8_AARCH64_SYSREG_ICC_IAR1_EL1:
1225 AssertReleaseFailed();
1226 break;
1227 case ARMV8_AARCH64_SYSREG_ICC_EOIR1_EL1:
1228 {
1229 /* Mark the interrupt as not active anymore, though it might still be pending. */
1230 if (u64Value < GIC_INTID_RANGE_SPI_START)
1231 ASMAtomicAndU32(&pThis->bmIntActive, ~RT_BIT_32((uint32_t)u64Value));
1232 else
1233 ASMAtomicAndU32(&pGicDev->bmIntActive, ~RT_BIT_32((uint32_t)(u64Value - GIC_INTID_RANGE_SPI_START)));
1234
1235 /* Restore previous interrupt priority. */
1236 Assert(pThis->idxRunningPriority > 0);
1237 if (RT_LIKELY(pThis->idxRunningPriority))
1238 {
1239 LogFlowFunc(("Restoring interrupt priority from %u -> %u (idxRunningPriority: %u -> %u)\n",
1240 pThis->abRunningPriorities[pThis->idxRunningPriority],
1241 pThis->abRunningPriorities[pThis->idxRunningPriority - 1],
1242 pThis->idxRunningPriority, pThis->idxRunningPriority - 1));
1243 pThis->idxRunningPriority--;
1244 }
1245 gicReDistUpdateIrqState(pThis, pVCpu);
1246 break;
1247 }
1248 case ARMV8_AARCH64_SYSREG_ICC_HPPIR1_EL1:
1249 AssertReleaseFailed();
1250 break;
1251 case ARMV8_AARCH64_SYSREG_ICC_BPR1_EL1:
1252 pThis->bBinaryPointGrp0 = (uint8_t)(u64Value & 0x7);
1253 break;
1254 case ARMV8_AARCH64_SYSREG_ICC_CTLR_EL1:
1255 u64Value &= ARMV8_ICC_CTLR_EL1_RW;
1256 /** @todo */
1257 break;
1258 case ARMV8_AARCH64_SYSREG_ICC_SRE_EL1:
1259 AssertReleaseFailed();
1260 break;
1261 case ARMV8_AARCH64_SYSREG_ICC_IGRPEN0_EL1:
1262 ASMAtomicWriteBool(&pThis->fIrqGrp0Enabled, RT_BOOL(u64Value & ARMV8_ICC_IGRPEN0_EL1_AARCH64_ENABLE));
1263 break;
1264 case ARMV8_AARCH64_SYSREG_ICC_IGRPEN1_EL1:
1265 ASMAtomicWriteBool(&pThis->fIrqGrp1Enabled, RT_BOOL(u64Value & ARMV8_ICC_IGRPEN1_EL1_AARCH64_ENABLE));
1266 break;
1267 default:
1268 AssertReleaseFailed();
1269 break;
1270 }
1271
1272 PDMDevHlpCritSectLeave(pDevIns, pDevIns->pCritSectRoR3);
1273 return VINF_SUCCESS;
1274}
1275
1276
1277/**
1278 * Sets the specified shared peripheral interrupt starting.
1279 *
1280 * @returns VBox status code.
1281 * @param pVM The cross context virtual machine structure.
1282 * @param uIntId The SPI ID (minus GIC_INTID_RANGE_SPI_START) to assert/de-assert.
1283 * @param fAsserted Flag whether to mark the interrupt as asserted/de-asserted.
1284 */
1285VMM_INT_DECL(int) GICSpiSet(PVMCC pVM, uint32_t uIntId, bool fAsserted)
1286{
1287 LogFlowFunc(("pVM=%p uIntId=%u fAsserted=%RTbool\n",
1288 pVM, uIntId, fAsserted));
1289
1290 AssertReturn(uIntId < GIC_SPI_MAX, VERR_INVALID_PARAMETER);
1291
1292 PGIC pGic = VM_TO_GIC(pVM);
1293
1294 /** @todo r=aeichner There must be another way to do this better, maybe create some callback interface
1295 * the GIC can register. */
1296#ifdef IN_RING3
1297 if (pGic->fNemGic)
1298 return GICR3NemSpiSet(pVM, uIntId, fAsserted);
1299#else
1300# error "Impossible to call the NEM in-kernel GIC from this context!"
1301#endif
1302
1303 PPDMDEVINS pDevIns = pGic->CTX_SUFF(pDevIns);
1304 PGICDEV pThis = PDMDEVINS_2_DATA(pDevIns, PGICDEV);
1305
1306 int const rcLock = PDMDevHlpCritSectEnter(pDevIns, pDevIns->pCritSectRoR3, VERR_IGNORED);
1307 PDM_CRITSECT_RELEASE_ASSERT_RC_DEV(pDevIns, pDevIns->pCritSectRoR3, rcLock);
1308
1309 /* Update the interrupts pending state. */
1310 if (fAsserted)
1311 ASMAtomicOrU32(&pThis->bmIntPending, RT_BIT_32(uIntId));
1312 else
1313 ASMAtomicAndU32(&pThis->bmIntPending, ~RT_BIT_32(uIntId));
1314
1315 int rc = VBOXSTRICTRC_VAL(gicDistUpdateIrqState(pVM, pThis));
1316 PDMDevHlpCritSectLeave(pDevIns, pDevIns->pCritSectRoR3);
1317 return rc;
1318}
1319
1320
1321/**
1322 * Sets the specified private peripheral interrupt starting.
1323 *
1324 * @returns VBox status code.
1325 * @param pVCpu The cross context virtual CPU structure.
1326 * @param uIntId The PPI ID (minus GIC_INTID_RANGE_PPI_START) to assert/de-assert.
1327 * @param fAsserted Flag whether to mark the interrupt as asserted/de-asserted.
1328 */
1329VMM_INT_DECL(int) GICPpiSet(PVMCPUCC pVCpu, uint32_t uIntId, bool fAsserted)
1330{
1331 LogFlowFunc(("pVCpu=%p{.idCpu=%u} uIntId=%u fAsserted=%RTbool\n",
1332 pVCpu, pVCpu->idCpu, uIntId, fAsserted));
1333
1334 PPDMDEVINS pDevIns = VMCPU_TO_DEVINS(pVCpu);
1335
1336 /** @todo r=aeichner There must be another way to do this better, maybe create some callback interface
1337 * the GIC can register. */
1338#ifdef IN_RING3
1339 PGIC pGic = VM_TO_GIC(pVCpu->pVMR3);
1340 if (pGic->fNemGic)
1341 return GICR3NemPpiSet(pVCpu, uIntId, fAsserted);
1342#else
1343# error "Impossible to call the NEM in-kernel GIC from this context!"
1344#endif
1345
1346 int const rcLock = PDMDevHlpCritSectEnter(pDevIns, pDevIns->pCritSectRoR3, VERR_IGNORED);
1347 PDM_CRITSECT_RELEASE_ASSERT_RC_DEV(pDevIns, pDevIns->pCritSectRoR3, rcLock);
1348
1349 AssertReturn(uIntId <= (GIC_INTID_RANGE_PPI_LAST - GIC_INTID_RANGE_PPI_START), VERR_INVALID_PARAMETER);
1350 int rc = gicReDistInterruptSet(pVCpu, uIntId + GIC_INTID_RANGE_PPI_START, fAsserted);
1351 PDMDevHlpCritSectLeave(pDevIns, pDevIns->pCritSectRoR3);
1352
1353 return rc;
1354}
1355
1356
1357/**
1358 * Sets the specified software generated interrupt starting.
1359 *
1360 * @returns VBox status code.
1361 * @param pVCpu The cross context virtual CPU structure.
1362 * @param uIntId The PPI ID (minus GIC_INTID_RANGE_SGI_START) to assert/de-assert.
1363 * @param fAsserted Flag whether to mark the interrupt as asserted/de-asserted.
1364 */
1365VMM_INT_DECL(int) GICSgiSet(PVMCPUCC pVCpu, uint32_t uIntId, bool fAsserted)
1366{
1367 LogFlowFunc(("pVCpu=%p{.idCpu=%u} uIntId=%u fAsserted=%RTbool\n",
1368 pVCpu, pVCpu->idCpu, uIntId, fAsserted));
1369
1370 PPDMDEVINS pDevIns = VMCPU_TO_DEVINS(pVCpu);
1371
1372 /** @todo r=aeichner There must be another way to do this better, maybe create some callback interface
1373 * the GIC can register. */
1374#ifdef IN_RING3
1375 PGIC pGic = VM_TO_GIC(pVCpu->pVMR3);
1376 /* These should be handled in the kernel and never be set from here. */
1377 AssertReturn(!pGic->fNemGic, VERR_NEM_IPE_6);
1378#else
1379# error "Impossible to call the in-kernel GIC from this context!"
1380#endif
1381
1382 int const rcLock = PDMDevHlpCritSectEnter(pDevIns, pDevIns->pCritSectRoR3, VERR_IGNORED);
1383 PDM_CRITSECT_RELEASE_ASSERT_RC_DEV(pDevIns, pDevIns->pCritSectRoR3, rcLock);
1384
1385 AssertReturn(uIntId <= (GIC_INTID_RANGE_SGI_LAST - GIC_INTID_RANGE_SGI_START), VERR_INVALID_PARAMETER);
1386 int rc = gicReDistInterruptSet(pVCpu, uIntId + GIC_INTID_RANGE_SGI_START, fAsserted);
1387 PDMDevHlpCritSectLeave(pDevIns, pDevIns->pCritSectRoR3);
1388
1389 return rc;
1390}
1391
1392
1393/**
1394 * Initializes per-VCPU GIC to the state following a power-up or hardware
1395 * reset.
1396 *
1397 * @param pVCpu The cross context virtual CPU structure.
1398 */
1399DECLHIDDEN(void) gicResetCpu(PVMCPUCC pVCpu)
1400{
1401 LogFlowFunc(("GIC%u\n", pVCpu->idCpu));
1402 VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu);
1403
1404 memset((void *)&pVCpu->gic.s.abRunningPriorities[0], 0xff, sizeof(pVCpu->gic.s.abRunningPriorities));
1405 pVCpu->gic.s.idxRunningPriority = 0;
1406 pVCpu->gic.s.bInterruptPriority = 0; /* Means no interrupt gets through to the PE. */
1407}
1408
1409
1410/**
1411 * @callback_method_impl{FNIOMMMIONEWREAD}
1412 */
1413DECL_HIDDEN_CALLBACK(VBOXSTRICTRC) gicDistMmioRead(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off, void *pv, unsigned cb)
1414{
1415 NOREF(pvUser);
1416 Assert(!(off & 0x3));
1417 Assert(cb == 4); RT_NOREF_PV(cb);
1418
1419 PVMCPUCC pVCpu = PDMDevHlpGetVMCPU(pDevIns);
1420 uint16_t offReg = off & 0xfffc;
1421 uint32_t uValue = 0;
1422
1423 STAM_COUNTER_INC(&pVCpu->gic.s.CTX_SUFF_Z(StatMmioRead));
1424
1425 VBOXSTRICTRC rc = VBOXSTRICTRC_VAL(gicDistRegisterRead(pDevIns, pVCpu, offReg, &uValue));
1426 *(uint32_t *)pv = uValue;
1427
1428 Log2(("GIC%u: gicDistMmioRead: offReg=%#RX16 uValue=%#RX32\n", pVCpu->idCpu, offReg, uValue));
1429 return rc;
1430}
1431
1432
1433/**
1434 * @callback_method_impl{FNIOMMMIONEWWRITE}
1435 */
1436DECL_HIDDEN_CALLBACK(VBOXSTRICTRC) gicDistMmioWrite(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off, void const *pv, unsigned cb)
1437{
1438 NOREF(pvUser);
1439 Assert(!(off & 0x3));
1440 Assert(cb == 4); RT_NOREF_PV(cb);
1441
1442 PVMCPUCC pVCpu = PDMDevHlpGetVMCPU(pDevIns);
1443 uint16_t offReg = off & 0xfffc;
1444 uint32_t uValue = *(uint32_t *)pv;
1445
1446 STAM_COUNTER_INC(&pVCpu->gic.s.CTX_SUFF_Z(StatMmioWrite));
1447
1448 Log2(("GIC%u: gicDistMmioWrite: offReg=%#RX16 uValue=%#RX32\n", pVCpu->idCpu, offReg, uValue));
1449 return gicDistRegisterWrite(pDevIns, pVCpu, offReg, uValue);
1450}
1451
1452
1453/**
1454 * @callback_method_impl{FNIOMMMIONEWREAD}
1455 */
1456DECL_HIDDEN_CALLBACK(VBOXSTRICTRC) gicReDistMmioRead(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off, void *pv, unsigned cb)
1457{
1458 NOREF(pvUser);
1459 Assert(!(off & 0x3));
1460 Assert(cb == 4); RT_NOREF_PV(cb);
1461
1462 /*
1463 * Determine the redistributor being targeted. Each redistributor takes GIC_REDIST_REG_FRAME_SIZE + GIC_REDIST_SGI_PPI_REG_FRAME_SIZE bytes
1464 * and the redistributors are adjacent.
1465 */
1466 uint32_t idReDist = off / (GIC_REDIST_REG_FRAME_SIZE + GIC_REDIST_SGI_PPI_REG_FRAME_SIZE);
1467 off %= (GIC_REDIST_REG_FRAME_SIZE + GIC_REDIST_SGI_PPI_REG_FRAME_SIZE);
1468
1469 PVMCC pVM = PDMDevHlpGetVM(pDevIns);
1470 Assert(idReDist < pVM->cCpus);
1471 PVMCPUCC pVCpu = pVM->apCpusR3[idReDist];
1472
1473 STAM_COUNTER_INC(&pVCpu->gic.s.CTX_SUFF_Z(StatMmioRead));
1474
1475 /* Redistributor or SGI/PPI frame? */
1476 uint16_t offReg = off & 0xfffc;
1477 uint32_t uValue = 0;
1478 VBOXSTRICTRC rcStrict;
1479 if (off < GIC_REDIST_REG_FRAME_SIZE)
1480 rcStrict = gicReDistRegisterRead(pDevIns, pVCpu, idReDist, offReg, &uValue);
1481 else
1482 rcStrict = gicReDistSgiPpiRegisterRead(pDevIns, pVCpu, offReg, &uValue);
1483
1484 *(uint32_t *)pv = uValue;
1485 Log2(("GICReDist%u: gicReDistMmioRead: off=%RGp idReDist=%u offReg=%#RX16 uValue=%#RX32 -> %Rrc\n",
1486 pVCpu->idCpu, off, idReDist, offReg, uValue, VBOXSTRICTRC_VAL(rcStrict)));
1487 return rcStrict;
1488}
1489
1490
1491/**
1492 * @callback_method_impl{FNIOMMMIONEWWRITE}
1493 */
1494DECL_HIDDEN_CALLBACK(VBOXSTRICTRC) gicReDistMmioWrite(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off, void const *pv, unsigned cb)
1495{
1496 NOREF(pvUser);
1497 Assert(!(off & 0x3));
1498 Assert(cb == 4); RT_NOREF_PV(cb);
1499
1500 uint32_t uValue = *(uint32_t *)pv;
1501
1502 /*
1503 * Determine the redistributor being targeted. Each redistributor takes GIC_REDIST_REG_FRAME_SIZE + GIC_REDIST_SGI_PPI_REG_FRAME_SIZE bytes
1504 * and the redistributors are adjacent.
1505 */
1506 uint32_t idReDist = off / (GIC_REDIST_REG_FRAME_SIZE + GIC_REDIST_SGI_PPI_REG_FRAME_SIZE);
1507 off %= (GIC_REDIST_REG_FRAME_SIZE + GIC_REDIST_SGI_PPI_REG_FRAME_SIZE);
1508
1509 PVMCC pVM = PDMDevHlpGetVM(pDevIns);
1510 Assert(idReDist < pVM->cCpus);
1511 PVMCPUCC pVCpu = pVM->apCpusR3[idReDist];
1512
1513 STAM_COUNTER_INC(&pVCpu->gic.s.CTX_SUFF_Z(StatMmioWrite));
1514
1515 /* Redistributor or SGI/PPI frame? */
1516 uint16_t offReg = off & 0xfffc;
1517 VBOXSTRICTRC rcStrict;
1518 if (off < GIC_REDIST_REG_FRAME_SIZE)
1519 rcStrict = gicReDistRegisterWrite(pDevIns, pVCpu, offReg, uValue);
1520 else
1521 rcStrict = gicReDistSgiPpiRegisterWrite(pDevIns, pVCpu, offReg, uValue);
1522
1523 Log2(("GICReDist%u: gicReDistMmioWrite: off=%RGp idReDist=%u offReg=%#RX16 uValue=%#RX32 -> %Rrc\n",
1524 pVCpu->idCpu, off, idReDist, offReg, uValue, VBOXSTRICTRC_VAL(rcStrict)));
1525 return rcStrict;
1526}
1527
1528
1529#ifndef IN_RING3
1530
1531/**
1532 * @callback_method_impl{PDMDEVREGR0,pfnConstruct}
1533 */
1534static DECLCALLBACK(int) gicRZConstruct(PPDMDEVINS pDevIns)
1535{
1536 PDMDEV_CHECK_VERSIONS_RETURN(pDevIns);
1537 AssertReleaseFailed();
1538 return VINF_SUCCESS;
1539}
1540#endif /* !IN_RING3 */
1541
1542/**
1543 * GIC device registration structure.
1544 */
1545const PDMDEVREG g_DeviceGIC =
1546{
1547 /* .u32Version = */ PDM_DEVREG_VERSION,
1548 /* .uReserved0 = */ 0,
1549 /* .szName = */ "gic",
1550 /* .fFlags = */ PDM_DEVREG_FLAGS_DEFAULT_BITS | PDM_DEVREG_FLAGS_RZ | PDM_DEVREG_FLAGS_NEW_STYLE,
1551 /* .fClass = */ PDM_DEVREG_CLASS_PIC,
1552 /* .cMaxInstances = */ 1,
1553 /* .uSharedVersion = */ 42,
1554 /* .cbInstanceShared = */ sizeof(GICDEV),
1555 /* .cbInstanceCC = */ 0,
1556 /* .cbInstanceRC = */ 0,
1557 /* .cMaxPciDevices = */ 0,
1558 /* .cMaxMsixVectors = */ 0,
1559 /* .pszDescription = */ "Generic Interrupt Controller",
1560#if defined(IN_RING3)
1561 /* .szRCMod = */ "VMMRC.rc",
1562 /* .szR0Mod = */ "VMMR0.r0",
1563 /* .pfnConstruct = */ gicR3Construct,
1564 /* .pfnDestruct = */ gicR3Destruct,
1565 /* .pfnRelocate = */ gicR3Relocate,
1566 /* .pfnMemSetup = */ NULL,
1567 /* .pfnPowerOn = */ NULL,
1568 /* .pfnReset = */ gicR3Reset,
1569 /* .pfnSuspend = */ NULL,
1570 /* .pfnResume = */ NULL,
1571 /* .pfnAttach = */ NULL,
1572 /* .pfnDetach = */ NULL,
1573 /* .pfnQueryInterface = */ NULL,
1574 /* .pfnInitComplete = */ NULL,
1575 /* .pfnPowerOff = */ NULL,
1576 /* .pfnSoftReset = */ NULL,
1577 /* .pfnReserved0 = */ NULL,
1578 /* .pfnReserved1 = */ NULL,
1579 /* .pfnReserved2 = */ NULL,
1580 /* .pfnReserved3 = */ NULL,
1581 /* .pfnReserved4 = */ NULL,
1582 /* .pfnReserved5 = */ NULL,
1583 /* .pfnReserved6 = */ NULL,
1584 /* .pfnReserved7 = */ NULL,
1585#elif defined(IN_RING0)
1586 /* .pfnEarlyConstruct = */ NULL,
1587 /* .pfnConstruct = */ gicRZConstruct,
1588 /* .pfnDestruct = */ NULL,
1589 /* .pfnFinalDestruct = */ NULL,
1590 /* .pfnRequest = */ NULL,
1591 /* .pfnReserved0 = */ NULL,
1592 /* .pfnReserved1 = */ NULL,
1593 /* .pfnReserved2 = */ NULL,
1594 /* .pfnReserved3 = */ NULL,
1595 /* .pfnReserved4 = */ NULL,
1596 /* .pfnReserved5 = */ NULL,
1597 /* .pfnReserved6 = */ NULL,
1598 /* .pfnReserved7 = */ NULL,
1599#elif defined(IN_RC)
1600 /* .pfnConstruct = */ gicRZConstruct,
1601 /* .pfnReserved0 = */ NULL,
1602 /* .pfnReserved1 = */ NULL,
1603 /* .pfnReserved2 = */ NULL,
1604 /* .pfnReserved3 = */ NULL,
1605 /* .pfnReserved4 = */ NULL,
1606 /* .pfnReserved5 = */ NULL,
1607 /* .pfnReserved6 = */ NULL,
1608 /* .pfnReserved7 = */ NULL,
1609#else
1610# error "Not in IN_RING3, IN_RING0 or IN_RC!"
1611#endif
1612 /* .u32VersionEnd = */ PDM_DEVREG_VERSION
1613};
1614
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette