VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/APICAll.cpp@ 62601

Last change on this file since 62601 was 62601, checked in by vboxsync, 9 years ago

VMM: Unused parameters.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 103.4 KB
Line 
1/* $Id: APICAll.cpp 62601 2016-07-27 15:46:22Z vboxsync $ */
2/** @file
3 * APIC - Advanced Programmable Interrupt Controller - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2016 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_DEV_APIC
23#include "APICInternal.h"
24#include <VBox/vmm/pdmdev.h>
25#include <VBox/vmm/vm.h>
26#include <VBox/vmm/vmcpuset.h>
27
28
29/*********************************************************************************************************************************
30* Global Variables *
31*********************************************************************************************************************************/
32#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
33/** An ordered array of valid LVT masks. */
34static const uint32_t g_au32LvtValidMasks[] =
35{
36 XAPIC_LVT_TIMER_VALID,
37 XAPIC_LVT_THERMAL_VALID,
38 XAPIC_LVT_PERF_VALID,
39 XAPIC_LVT_LINT_VALID, /* LINT0 */
40 XAPIC_LVT_LINT_VALID, /* LINT1 */
41 XAPIC_LVT_ERROR_VALID
42};
43#endif
44
45#if 0
46/** @todo CMCI */
47static const uint32_t g_au32LvtExtValidMask[] =
48{
49 XAPIC_LVT_CMCI_VALID
50};
51#endif
52
53
54/**
55 * Checks if a vector is set in an APIC 256-bit sparse register.
56 *
57 * @returns true if the specified vector is set, false otherwise.
58 * @param pApicReg The APIC 256-bit spare register.
59 * @param uVector The vector to check if set.
60 */
61DECLINLINE(bool) apicTestVectorInReg(const volatile XAPIC256BITREG *pApicReg, uint8_t uVector)
62{
63 const volatile uint8_t *pbBitmap = (const volatile uint8_t *)&pApicReg->u[0];
64 return ASMBitTest(pbBitmap + XAPIC_REG256_VECTOR_OFF(uVector), XAPIC_REG256_VECTOR_BIT(uVector));
65}
66
67
68/**
69 * Sets the vector in an APIC 256-bit sparse register.
70 *
71 * @param pApicReg The APIC 256-bit spare register.
72 * @param uVector The vector to set.
73 */
74DECLINLINE(void) apicSetVectorInReg(volatile XAPIC256BITREG *pApicReg, uint8_t uVector)
75{
76 volatile uint8_t *pbBitmap = (volatile uint8_t *)&pApicReg->u[0];
77 ASMAtomicBitSet(pbBitmap + XAPIC_REG256_VECTOR_OFF(uVector), XAPIC_REG256_VECTOR_BIT(uVector));
78}
79
80
81/**
82 * Clears the vector in an APIC 256-bit sparse register.
83 *
84 * @param pApicReg The APIC 256-bit spare register.
85 * @param uVector The vector to clear.
86 */
87DECLINLINE(void) apicClearVectorInReg(volatile XAPIC256BITREG *pApicReg, uint8_t uVector)
88{
89 volatile uint8_t *pbBitmap = (volatile uint8_t *)&pApicReg->u[0];
90 ASMAtomicBitClear(pbBitmap + XAPIC_REG256_VECTOR_OFF(uVector), XAPIC_REG256_VECTOR_BIT(uVector));
91}
92
93
94/**
95 * Checks if a vector is set in an APIC Pending-Interrupt Bitmap (PIB).
96 *
97 * @returns true if the specified vector is set, false otherwise.
98 * @param pvPib Opaque pointer to the PIB.
99 * @param uVector The vector to check if set.
100 */
101DECLINLINE(bool) apicTestVectorInPib(volatile void *pvPib, uint8_t uVector)
102{
103 return ASMBitTest(pvPib, uVector);
104}
105
106
107/**
108 * Atomically sets the PIB notification bit.
109 *
110 * @returns non-zero if the bit was already set, 0 otherwise.
111 * @param pApicPib Pointer to the PIB.
112 */
113DECLINLINE(uint32_t) apicSetNotificationBitInPib(PAPICPIB pApicPib)
114{
115 return ASMAtomicXchgU32(&pApicPib->fOutstandingNotification, RT_BIT_32(31));
116}
117
118
119/**
120 * Atomically tests and clears the PIB notification bit.
121 *
122 * @returns non-zero if the bit was already set, 0 otherwise.
123 * @param pApicPib Pointer to the PIB.
124 */
125DECLINLINE(uint32_t) apicClearNotificationBitInPib(PAPICPIB pApicPib)
126{
127 return ASMAtomicXchgU32(&pApicPib->fOutstandingNotification, UINT32_C(0));
128}
129
130
131/**
132 * Sets the vector in an APIC Pending-Interrupt Bitmap (PIB).
133 *
134 * @param pvPib Opaque pointer to the PIB.
135 * @param uVector The vector to set.
136 */
137DECLINLINE(void) apicSetVectorInPib(volatile void *pvPib, uint8_t uVector)
138{
139 ASMAtomicBitSet(pvPib, uVector);
140}
141
142
143/**
144 * Clears the vector in an APIC Pending-Interrupt Bitmap (PIB).
145 *
146 * @param pvPib Opaque pointer to the PIB.
147 * @param uVector The vector to clear.
148 */
149DECLINLINE(void) apicClearVectorInPib(volatile void *pvPib, uint8_t uVector)
150{
151 ASMAtomicBitClear(pvPib, uVector);
152}
153
154
155/**
156 * Atomically OR's a fragment (32 vectors) into an APIC 256-bit sparse
157 * register.
158 *
159 * @param pApicReg The APIC 256-bit spare register.
160 * @param idxFragment The index of the 32-bit fragment in @a
161 * pApicReg.
162 * @param u32Fragment The 32-bit vector fragment to OR.
163 */
164DECLINLINE(void) apicOrVectorsToReg(volatile XAPIC256BITREG *pApicReg, size_t idxFragment, uint32_t u32Fragment)
165{
166 Assert(idxFragment < RT_ELEMENTS(pApicReg->u));
167 ASMAtomicOrU32(&pApicReg->u[idxFragment].u32Reg, u32Fragment);
168}
169
170
171/**
172 * Atomically AND's a fragment (32 vectors) into an APIC
173 * 256-bit sparse register.
174 *
175 * @param pApicReg The APIC 256-bit spare register.
176 * @param idxFragment The index of the 32-bit fragment in @a
177 * pApicReg.
178 * @param u32Fragment The 32-bit vector fragment to AND.
179 */
180DECLINLINE(void) apicAndVectorsToReg(volatile XAPIC256BITREG *pApicReg, size_t idxFragment, uint32_t u32Fragment)
181{
182 Assert(idxFragment < RT_ELEMENTS(pApicReg->u));
183 ASMAtomicAndU32(&pApicReg->u[idxFragment].u32Reg, u32Fragment);
184}
185
186
187/**
188 * Reports and returns appropriate error code for invalid MSR accesses.
189 *
190 * @returns Strict VBox status code.
191 * @retval VINF_CPUM_R3_MSR_WRITE if the MSR write could not be serviced in the
192 * current context (raw-mode or ring-0).
193 * @retval VINF_CPUM_R3_MSR_READ if the MSR read could not be serviced in the
194 * current context (raw-mode or ring-0).
195 * @retval VERR_CPUM_RAISE_GP_0 on failure, the caller is expected to take the
196 * appropriate actions.
197 *
198 * @param pVCpu The cross context virtual CPU structure.
199 * @param u32Reg The MSR being accessed.
200 * @param enmAccess The invalid-access type.
201 */
202static VBOXSTRICTRC apicMsrAccessError(PVMCPU pVCpu, uint32_t u32Reg, APICMSRACCESS enmAccess)
203{
204 static struct
205 {
206 const char *pszBefore; /* The error message before printing the MSR index */
207 const char *pszAfter; /* The error message after printing the MSR index */
208 int rcRZ; /* The RZ error code */
209 } const s_aAccess[] =
210 {
211 { "read MSR", " while not in x2APIC mode", VINF_CPUM_R3_MSR_READ },
212 { "write MSR", " while not in x2APIC mode", VINF_CPUM_R3_MSR_WRITE },
213 { "read reserved/unknown MSR", "", VINF_CPUM_R3_MSR_READ },
214 { "write reserved/unknown MSR", "", VINF_CPUM_R3_MSR_WRITE },
215 { "read write-only MSR", "", VINF_CPUM_R3_MSR_READ },
216 { "write read-only MSR", "", VINF_CPUM_R3_MSR_WRITE },
217 { "read reserved bits of MSR", "", VINF_CPUM_R3_MSR_READ },
218 { "write reserved bits of MSR", "", VINF_CPUM_R3_MSR_WRITE },
219 { "write an invalid value to MSR", "", VINF_CPUM_R3_MSR_WRITE },
220 { "write MSR", "disallowed by configuration", VINF_CPUM_R3_MSR_WRITE }
221 };
222 AssertCompile(RT_ELEMENTS(s_aAccess) == APICMSRACCESS_COUNT);
223
224 size_t const i = enmAccess;
225 Assert(i < RT_ELEMENTS(s_aAccess));
226#ifdef IN_RING3
227 LogRelMax(5, ("APIC%u: Attempt to %s (%#x)%s -> #GP(0)\n", pVCpu->idCpu, s_aAccess[i].pszBefore, u32Reg,
228 s_aAccess[i].pszAfter));
229 return VERR_CPUM_RAISE_GP_0;
230#else
231 RT_NOREF_PV(u32Reg); RT_NOREF_PV(pVCpu);
232 return s_aAccess[i].rcRZ;
233#endif
234}
235
236
237/**
238 * Gets the descriptive APIC mode.
239 *
240 * @returns The name.
241 * @param enmMode The xAPIC mode.
242 */
243const char *apicGetModeName(APICMODE enmMode)
244{
245 switch (enmMode)
246 {
247 case APICMODE_DISABLED: return "Disabled";
248 case APICMODE_XAPIC: return "xAPIC";
249 case APICMODE_X2APIC: return "x2APIC";
250 default: break;
251 }
252 return "Invalid";
253}
254
255
256/**
257 * Gets the descriptive destination format name.
258 *
259 * @returns The destination format name.
260 * @param enmDestFormat The destination format.
261 */
262const char *apicGetDestFormatName(XAPICDESTFORMAT enmDestFormat)
263{
264 switch (enmDestFormat)
265 {
266 case XAPICDESTFORMAT_FLAT: return "Flat";
267 case XAPICDESTFORMAT_CLUSTER: return "Cluster";
268 default: break;
269 }
270 return "Invalid";
271}
272
273
274/**
275 * Gets the descriptive delivery mode name.
276 *
277 * @returns The delivery mode name.
278 * @param enmDeliveryMode The delivery mode.
279 */
280const char *apicGetDeliveryModeName(XAPICDELIVERYMODE enmDeliveryMode)
281{
282 switch (enmDeliveryMode)
283 {
284 case XAPICDELIVERYMODE_FIXED: return "Fixed";
285 case XAPICDELIVERYMODE_LOWEST_PRIO: return "Lowest-priority";
286 case XAPICDELIVERYMODE_SMI: return "SMI";
287 case XAPICDELIVERYMODE_NMI: return "NMI";
288 case XAPICDELIVERYMODE_INIT: return "INIT";
289 case XAPICDELIVERYMODE_STARTUP: return "SIPI";
290 case XAPICDELIVERYMODE_EXTINT: return "ExtINT";
291 default: break;
292 }
293 return "Invalid";
294}
295
296
297/**
298 * Gets the descriptive destination mode name.
299 *
300 * @returns The destination mode name.
301 * @param enmDestMode The destination mode.
302 */
303const char *apicGetDestModeName(XAPICDESTMODE enmDestMode)
304{
305 switch (enmDestMode)
306 {
307 case XAPICDESTMODE_PHYSICAL: return "Physical";
308 case XAPICDESTMODE_LOGICAL: return "Logical";
309 default: break;
310 }
311 return "Invalid";
312}
313
314
315/**
316 * Gets the descriptive trigger mode name.
317 *
318 * @returns The trigger mode name.
319 * @param enmTriggerMode The trigger mode.
320 */
321const char *apicGetTriggerModeName(XAPICTRIGGERMODE enmTriggerMode)
322{
323 switch (enmTriggerMode)
324 {
325 case XAPICTRIGGERMODE_EDGE: return "Edge";
326 case XAPICTRIGGERMODE_LEVEL: return "Level";
327 default: break;
328 }
329 return "Invalid";
330}
331
332
333/**
334 * Gets the destination shorthand name.
335 *
336 * @returns The destination shorthand name.
337 * @param enmDestShorthand The destination shorthand.
338 */
339const char *apicGetDestShorthandName(XAPICDESTSHORTHAND enmDestShorthand)
340{
341 switch (enmDestShorthand)
342 {
343 case XAPICDESTSHORTHAND_NONE: return "None";
344 case XAPICDESTSHORTHAND_SELF: return "Self";
345 case XAPIDDESTSHORTHAND_ALL_INCL_SELF: return "All including self";
346 case XAPICDESTSHORTHAND_ALL_EXCL_SELF: return "All excluding self";
347 default: break;
348 }
349 return "Invalid";
350}
351
352
353/**
354 * Gets the timer mode name.
355 *
356 * @returns The timer mode name.
357 * @param enmTimerMode The timer mode.
358 */
359const char *apicGetTimerModeName(XAPICTIMERMODE enmTimerMode)
360{
361 switch (enmTimerMode)
362 {
363 case XAPICTIMERMODE_ONESHOT: return "One-shot";
364 case XAPICTIMERMODE_PERIODIC: return "Periodic";
365 case XAPICTIMERMODE_TSC_DEADLINE: return "TSC deadline";
366 default: break;
367 }
368 return "Invalid";
369}
370
371
372/**
373 * Gets the APIC mode given the base MSR value.
374 *
375 * @returns The APIC mode.
376 * @param uApicBaseMsr The APIC Base MSR value.
377 */
378APICMODE apicGetMode(uint64_t uApicBaseMsr)
379{
380 uint32_t const uMode = (uApicBaseMsr >> 10) & UINT64_C(3);
381 APICMODE const enmMode = (APICMODE)uMode;
382#ifdef VBOX_STRICT
383 /* Paranoia. */
384 switch (uMode)
385 {
386 case APICMODE_DISABLED:
387 case APICMODE_INVALID:
388 case APICMODE_XAPIC:
389 case APICMODE_X2APIC:
390 break;
391 default:
392 AssertMsgFailed(("Invalid mode"));
393 }
394#endif
395 return enmMode;
396}
397
398
399/**
400 * Returns whether the APIC is hardware enabled or not.
401 *
402 * @returns true if enabled, false otherwise.
403 */
404DECLINLINE(bool) apicIsEnabled(PVMCPU pVCpu)
405{
406 PCAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
407 return RT_BOOL(pApicCpu->uApicBaseMsr & MSR_IA32_APICBASE_EN);
408}
409
410
411/**
412 * Finds the most significant set bit in an APIC 256-bit sparse register.
413 *
414 * @returns @a rcNotFound if no bit was set, 0-255 otherwise.
415 * @param pReg The APIC 256-bit sparse register.
416 * @param rcNotFound What to return when no bit is set.
417 */
418static int apicGetHighestSetBitInReg(volatile const XAPIC256BITREG *pReg, int rcNotFound)
419{
420 ssize_t const cFragments = RT_ELEMENTS(pReg->u);
421 unsigned const uFragmentShift = 5;
422 AssertCompile(1 << uFragmentShift == sizeof(pReg->u[0].u32Reg) * 8);
423 for (ssize_t i = cFragments - 1; i >= 0; i--)
424 {
425 uint32_t const uFragment = pReg->u[i].u32Reg;
426 if (uFragment)
427 {
428 unsigned idxSetBit = ASMBitLastSetU32(uFragment);
429 --idxSetBit;
430 idxSetBit |= i << uFragmentShift;
431 return idxSetBit;
432 }
433 }
434 return rcNotFound;
435}
436
437
438/**
439 * Reads a 32-bit register at a specified offset.
440 *
441 * @returns The value at the specified offset.
442 * @param pXApicPage The xAPIC page.
443 * @param offReg The offset of the register being read.
444 */
445DECLINLINE(uint32_t) apicReadRaw32(PCXAPICPAGE pXApicPage, uint16_t offReg)
446{
447 Assert(offReg < sizeof(*pXApicPage) - sizeof(uint32_t));
448 uint8_t const *pbXApic = (const uint8_t *)pXApicPage;
449 uint32_t const uValue = *(const uint32_t *)(pbXApic + offReg);
450 return uValue;
451}
452
453
454/**
455 * Writes a 32-bit register at a specified offset.
456 *
457 * @param pXApicPage The xAPIC page.
458 * @param offReg The offset of the register being written.
459 * @param uReg The value of the register.
460 */
461DECLINLINE(void) apicWriteRaw32(PXAPICPAGE pXApicPage, uint16_t offReg, uint32_t uReg)
462{
463 Assert(offReg < sizeof(*pXApicPage) - sizeof(uint32_t));
464 uint8_t *pbXApic = (uint8_t *)pXApicPage;
465 *(uint32_t *)(pbXApic + offReg) = uReg;
466}
467
468
469/**
470 * Broadcasts the EOI to the I/O APICs.
471 *
472 * @param pVCpu The cross context virtual CPU structure.
473 * @param uVector The interrupt vector corresponding to the EOI.
474 */
475DECLINLINE(int) apicBusBroadcastEoi(PVMCPU pVCpu, uint8_t uVector)
476{
477 PVM pVM = pVCpu->CTX_SUFF(pVM);
478 PAPICDEV pApicDev = VM_TO_APICDEV(pVM);
479 return pApicDev->CTX_SUFF(pApicHlp)->pfnBusBroadcastEoi(pApicDev->CTX_SUFF(pDevIns), uVector);
480}
481
482
483/**
484 * Sets an error in the internal ESR of the specified APIC.
485 *
486 * @param pVCpu The cross context virtual CPU structure.
487 * @param uError The error.
488 * @thread Any.
489 */
490DECLINLINE(void) apicSetError(PVMCPU pVCpu, uint32_t uError)
491{
492 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
493 ASMAtomicOrU32(&pApicCpu->uEsrInternal, uError);
494}
495
496
497/**
498 * Clears all errors in the internal ESR.
499 *
500 * @returns The value of the internal ESR before clearing.
501 * @param pVCpu The cross context virtual CPU structure.
502 */
503DECLINLINE(uint32_t) apicClearAllErrors(PVMCPU pVCpu)
504{
505 VMCPU_ASSERT_EMT(pVCpu);
506 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
507 return ASMAtomicXchgU32(&pApicCpu->uEsrInternal, 0);
508}
509
510
511/**
512 * Signals the guest if a pending interrupt is ready to be serviced.
513 *
514 * @param pVCpu The cross context virtual CPU structure.
515 */
516static void apicSignalNextPendingIntr(PVMCPU pVCpu)
517{
518 VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu);
519
520 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu);
521 if (pXApicPage->svr.u.fApicSoftwareEnable)
522 {
523 int const irrv = apicGetHighestSetBitInReg(&pXApicPage->irr, -1 /* rcNotFound */);
524 if (irrv >= 0)
525 {
526 Assert(irrv <= (int)UINT8_MAX);
527 uint8_t const uVector = irrv;
528 uint8_t const uPpr = pXApicPage->ppr.u8Ppr;
529 if ( !uPpr
530 || XAPIC_PPR_GET_PP(uVector) > XAPIC_PPR_GET_PP(uPpr))
531 {
532 Log2(("APIC%u: apicSignalNextPendingIntr: Signaling pending interrupt. uVector=%#x\n", pVCpu->idCpu, uVector));
533 apicSetInterruptFF(pVCpu, PDMAPICIRQ_HARDWARE);
534 }
535 else
536 {
537 Log2(("APIC%u: apicSignalNextPendingIntr: Nothing to signal. uVector=%#x uPpr=%#x uTpr=%#x\n", pVCpu->idCpu,
538 uVector, uPpr, pXApicPage->tpr.u8Tpr));
539 }
540 }
541 }
542 else
543 {
544 Log2(("APIC%u: apicSignalNextPendingIntr: APIC software-disabled, clearing pending interrupt\n", pVCpu->idCpu));
545 apicClearInterruptFF(pVCpu, PDMAPICIRQ_HARDWARE);
546 }
547}
548
549
550/**
551 * Sets the Spurious-Interrupt Vector Register (SVR).
552 *
553 * @returns Strict VBox status code.
554 * @param pVCpu The cross context virtual CPU structure.
555 * @param uSvr The SVR value.
556 */
557static VBOXSTRICTRC apicSetSvr(PVMCPU pVCpu, uint32_t uSvr)
558{
559 VMCPU_ASSERT_EMT(pVCpu);
560
561 uint32_t uValidMask = XAPIC_SVR_VALID;
562 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
563 if (pXApicPage->version.u.fEoiBroadcastSupression)
564 uValidMask |= XAPIC_SVR_SUPRESS_EOI_BROADCAST;
565
566 if ( XAPIC_IN_X2APIC_MODE(pVCpu)
567 && (uSvr & ~uValidMask))
568 return apicMsrAccessError(pVCpu, MSR_IA32_X2APIC_SVR, APICMSRACCESS_WRITE_RSVD_BITS);
569
570 Log2(("APIC%u: apicSetSvr: uSvr=%#RX32\n", pVCpu->idCpu, uSvr));
571 apicWriteRaw32(pXApicPage, XAPIC_OFF_SVR, uSvr);
572 if (!pXApicPage->svr.u.fApicSoftwareEnable)
573 {
574 /** @todo CMCI. */
575 pXApicPage->lvt_timer.u.u1Mask = 1;
576#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
577 pXApicPage->lvt_thermal.u.u1Mask = 1;
578#endif
579 pXApicPage->lvt_perf.u.u1Mask = 1;
580 pXApicPage->lvt_lint0.u.u1Mask = 1;
581 pXApicPage->lvt_lint1.u.u1Mask = 1;
582 pXApicPage->lvt_error.u.u1Mask = 1;
583 }
584
585 apicSignalNextPendingIntr(pVCpu);
586 return VINF_SUCCESS;
587}
588
589
590/**
591 * Sends an interrupt to one or more APICs.
592 *
593 * @returns Strict VBox status code.
594 * @param pVM The cross context VM structure.
595 * @param pVCpu The cross context virtual CPU structure, can be
596 * NULL if the source of the interrupt is not an
597 * APIC (for e.g. a bus).
598 * @param uVector The interrupt vector.
599 * @param enmTriggerMode The trigger mode.
600 * @param enmDeliveryMode The delivery mode.
601 * @param pDestCpuSet The destination CPU set.
602 * @param pfIntrAccepted Where to store whether this interrupt was
603 * accepted by the target APIC(s) or not.
604 * Optional, can be NULL.
605 * @param rcRZ The return code if the operation cannot be
606 * performed in the current context.
607 */
608static VBOXSTRICTRC apicSendIntr(PVM pVM, PVMCPU pVCpu, uint8_t uVector, XAPICTRIGGERMODE enmTriggerMode,
609 XAPICDELIVERYMODE enmDeliveryMode, PCVMCPUSET pDestCpuSet, bool *pfIntrAccepted, int rcRZ)
610{
611 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
612 VMCPUID const cCpus = pVM->cCpus;
613 bool fAccepted = false;
614 switch (enmDeliveryMode)
615 {
616 case XAPICDELIVERYMODE_FIXED:
617 {
618 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++)
619 {
620 if ( VMCPUSET_IS_PRESENT(pDestCpuSet, idCpu)
621 && apicIsEnabled(&pVM->aCpus[idCpu]))
622 fAccepted = apicPostInterrupt(&pVM->aCpus[idCpu], uVector, enmTriggerMode);
623 }
624 break;
625 }
626
627 case XAPICDELIVERYMODE_LOWEST_PRIO:
628 {
629 VMCPUID const idCpu = VMCPUSET_FIND_FIRST_PRESENT(pDestCpuSet);
630 if ( idCpu < pVM->cCpus
631 && apicIsEnabled(&pVM->aCpus[idCpu]))
632 fAccepted = apicPostInterrupt(&pVM->aCpus[idCpu], uVector, enmTriggerMode);
633 else
634 AssertMsgFailed(("APIC: apicSendIntr: No CPU found for lowest-priority delivery mode! idCpu=%u\n", idCpu));
635 break;
636 }
637
638 case XAPICDELIVERYMODE_SMI:
639 {
640 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++)
641 {
642 if (VMCPUSET_IS_PRESENT(pDestCpuSet, idCpu))
643 {
644 Log2(("APIC: apicSendIntr: Raising SMI on VCPU%u\n", idCpu));
645 apicSetInterruptFF(&pVM->aCpus[idCpu], PDMAPICIRQ_SMI);
646 fAccepted = true;
647 }
648 }
649 break;
650 }
651
652 case XAPICDELIVERYMODE_NMI:
653 {
654 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++)
655 {
656 if ( VMCPUSET_IS_PRESENT(pDestCpuSet, idCpu)
657 && apicIsEnabled(&pVM->aCpus[idCpu]))
658 {
659 Log2(("APIC: apicSendIntr: Raising NMI on VCPU%u\n", idCpu));
660 apicSetInterruptFF(&pVM->aCpus[idCpu], PDMAPICIRQ_NMI);
661 fAccepted = true;
662 }
663 }
664 break;
665 }
666
667 case XAPICDELIVERYMODE_INIT:
668 {
669#ifdef IN_RING3
670 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++)
671 if (VMCPUSET_IS_PRESENT(pDestCpuSet, idCpu))
672 {
673 Log2(("APIC: apicSendIntr: Issuing INIT to VCPU%u\n", idCpu));
674 VMMR3SendInitIpi(pVM, idCpu);
675 fAccepted = true;
676 }
677#else
678 /* We need to return to ring-3 to deliver the INIT. */
679 rcStrict = rcRZ;
680 fAccepted = true;
681#endif
682 break;
683 }
684
685 case XAPICDELIVERYMODE_STARTUP:
686 {
687#ifdef IN_RING3
688 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++)
689 if (VMCPUSET_IS_PRESENT(pDestCpuSet, idCpu))
690 {
691 Log2(("APIC: apicSendIntr: Issuing SIPI to VCPU%u\n", idCpu));
692 VMMR3SendStartupIpi(pVM, idCpu, uVector);
693 fAccepted = true;
694 }
695#else
696 /* We need to return to ring-3 to deliver the SIPI. */
697 rcStrict = rcRZ;
698 fAccepted = true;
699 Log2(("APIC: apicSendIntr: SIPI issued, returning to RZ. rc=%Rrc\n", rcRZ));
700#endif
701 break;
702 }
703
704 case XAPICDELIVERYMODE_EXTINT:
705 {
706 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++)
707 if (VMCPUSET_IS_PRESENT(pDestCpuSet, idCpu))
708 {
709 Log2(("APIC: apicSendIntr: Raising EXTINT on VCPU%u\n", idCpu));
710 apicSetInterruptFF(&pVM->aCpus[idCpu], PDMAPICIRQ_EXTINT);
711 fAccepted = true;
712 }
713 break;
714 }
715
716 default:
717 {
718 AssertMsgFailed(("APIC: apicSendIntr: Unsupported delivery mode %#x (%s)\n", enmDeliveryMode,
719 apicGetDeliveryModeName(enmDeliveryMode)));
720 break;
721 }
722 }
723
724 /*
725 * If an illegal vector is programmed, set the 'send illegal vector' error here if the
726 * interrupt is being sent by an APIC.
727 *
728 * The 'receive illegal vector' will be set on the target APIC when the interrupt
729 * gets generated, see APICPostInterrupt().
730 *
731 * See Intel spec. 10.5.3 "Error Handling".
732 */
733 if ( rcStrict != rcRZ
734 && pVCpu)
735 {
736 /*
737 * Flag only errors when the delivery mode is fixed and not others.
738 *
739 * Ubuntu 10.04-3 amd64 live CD with 2 VCPUs gets upset as it sends an SIPI to the
740 * 2nd VCPU with vector 6 and checks the ESR for no errors, see @bugref{8245#c86}.
741 */
742 /** @todo The spec says this for LVT, but not explcitly for ICR-lo
743 * but it probably is true. */
744 if (enmDeliveryMode == XAPICDELIVERYMODE_FIXED)
745 {
746 if (RT_UNLIKELY(uVector <= XAPIC_ILLEGAL_VECTOR_END))
747 apicSetError(pVCpu, XAPIC_ESR_SEND_ILLEGAL_VECTOR);
748 }
749 }
750
751 if (pfIntrAccepted)
752 *pfIntrAccepted = fAccepted;
753
754 return rcStrict;
755}
756
757
758/**
759 * Checks if this APIC belongs to a logical destination.
760 *
761 * @returns true if the APIC belongs to the logical
762 * destination, false otherwise.
763 * @param pVCpu The cross context virtual CPU structure.
764 * @param fDest The destination mask.
765 *
766 * @thread Any.
767 */
768static bool apicIsLogicalDest(PVMCPU pVCpu, uint32_t fDest)
769{
770 if (XAPIC_IN_X2APIC_MODE(pVCpu))
771 {
772 /*
773 * Flat logical mode is not supported in x2APIC mode.
774 * In clustered logical mode, the 32-bit logical ID in the LDR is interpreted as follows:
775 * - High 16 bits is the cluster ID.
776 * - Low 16 bits: each bit represents a unique APIC within the cluster.
777 */
778 PCX2APICPAGE pX2ApicPage = VMCPU_TO_CX2APICPAGE(pVCpu);
779 uint32_t const u32Ldr = pX2ApicPage->ldr.u32LogicalApicId;
780 if (X2APIC_LDR_GET_CLUSTER_ID(u32Ldr) == (fDest & X2APIC_LDR_CLUSTER_ID))
781 return RT_BOOL(u32Ldr & fDest & X2APIC_LDR_LOGICAL_ID);
782 return false;
783 }
784
785#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
786 /*
787 * In both flat and clustered logical mode, a destination mask of all set bits indicates a broadcast.
788 * See AMD spec. 16.6.1 "Receiving System and IPI Interrupts".
789 */
790 Assert(!XAPIC_IN_X2APIC_MODE(pVCpu));
791 if ((fDest & XAPIC_LDR_FLAT_LOGICAL_ID) == XAPIC_LDR_FLAT_LOGICAL_ID)
792 return true;
793
794 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu);
795 XAPICDESTFORMAT enmDestFormat = (XAPICDESTFORMAT)pXApicPage->dfr.u.u4Model;
796 if (enmDestFormat == XAPICDESTFORMAT_FLAT)
797 {
798 /* The destination mask is interpreted as a bitmap of 8 unique logical APIC IDs. */
799 uint8_t const u8Ldr = pXApicPage->ldr.u.u8LogicalApicId;
800 return RT_BOOL(u8Ldr & fDest & XAPIC_LDR_FLAT_LOGICAL_ID);
801 }
802
803 /*
804 * In clustered logical mode, the 8-bit logical ID in the LDR is interpreted as follows:
805 * - High 4 bits is the cluster ID.
806 * - Low 4 bits: each bit represents a unique APIC within the cluster.
807 */
808 Assert(enmDestFormat == XAPICDESTFORMAT_CLUSTER);
809 uint8_t const u8Ldr = pXApicPage->ldr.u.u8LogicalApicId;
810 if (XAPIC_LDR_CLUSTERED_GET_CLUSTER_ID(u8Ldr) == (fDest & XAPIC_LDR_CLUSTERED_CLUSTER_ID))
811 return RT_BOOL(u8Ldr & fDest & XAPIC_LDR_CLUSTERED_LOGICAL_ID);
812 return false;
813#else
814# error "Implement Pentium and P6 family APIC architectures"
815#endif
816}
817
818
819/**
820 * Figures out the set of destination CPUs for a given destination mode, format
821 * and delivery mode setting.
822 *
823 * @param pVM The cross context VM structure.
824 * @param fDestMask The destination mask.
825 * @param fBroadcastMask The broadcast mask.
826 * @param enmDestMode The destination mode.
827 * @param enmDeliveryMode The delivery mode.
828 * @param pDestCpuSet The destination CPU set to update.
829 */
830static void apicGetDestCpuSet(PVM pVM, uint32_t fDestMask, uint32_t fBroadcastMask, XAPICDESTMODE enmDestMode,
831 XAPICDELIVERYMODE enmDeliveryMode, PVMCPUSET pDestCpuSet)
832{
833 VMCPUSET_EMPTY(pDestCpuSet);
834
835 /*
836 * Physical destination mode only supports either a broadcast or a single target.
837 * - Broadcast with lowest-priority delivery mode is not supported[1], we deliver it
838 * as a regular broadcast like in fixed delivery mode.
839 * - For a single target, lowest-priority delivery mode makes no sense. We deliver
840 * to the target like in fixed delivery mode.
841 *
842 * [1] See Intel spec. 10.6.2.1 "Physical Destination Mode".
843 */
844 if ( enmDestMode == XAPICDESTMODE_PHYSICAL
845 && enmDeliveryMode == XAPICDELIVERYMODE_LOWEST_PRIO)
846 {
847 AssertMsgFailed(("APIC: Lowest-priority delivery using physical destination mode!"));
848 enmDeliveryMode = XAPICDELIVERYMODE_FIXED;
849 }
850
851 uint32_t const cCpus = pVM->cCpus;
852 if (enmDeliveryMode == XAPICDELIVERYMODE_LOWEST_PRIO)
853 {
854 Assert(enmDestMode == XAPICDESTMODE_LOGICAL);
855#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
856 VMCPUID idCpuLowestTpr = NIL_VMCPUID;
857 uint8_t u8LowestTpr = UINT8_C(0xff);
858 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++)
859 {
860 PVMCPU pVCpuDest = &pVM->aCpus[idCpu];
861 if (apicIsLogicalDest(pVCpuDest, fDestMask))
862 {
863 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpuDest);
864 uint8_t const u8Tpr = pXApicPage->tpr.u8Tpr; /* PAV */
865
866 /*
867 * If there is a tie for lowest priority, the local APIC with the highest ID is chosen.
868 * Hence the use of "<=" in the check below.
869 * See AMD spec. 16.6.2 "Lowest Priority Messages and Arbitration".
870 */
871 if (u8Tpr <= u8LowestTpr)
872 {
873 u8LowestTpr = u8Tpr;
874 idCpuLowestTpr = idCpu;
875 }
876 }
877 }
878 if (idCpuLowestTpr != NIL_VMCPUID)
879 VMCPUSET_ADD(pDestCpuSet, idCpuLowestTpr);
880#else
881# error "Implement Pentium and P6 family APIC architectures"
882#endif
883 return;
884 }
885
886 /*
887 * x2APIC:
888 * - In both physical and logical destination mode, a destination mask of 0xffffffff implies a broadcast[1].
889 * xAPIC:
890 * - In physical destination mode, a destination mask of 0xff implies a broadcast[2].
891 * - In both flat and clustered logical mode, a destination mask of 0xff implies a broadcast[3].
892 *
893 * [1] See Intel spec. 10.12.9 "ICR Operation in x2APIC Mode".
894 * [2] See Intel spec. 10.6.2.1 "Physical Destination Mode".
895 * [2] See AMD spec. 16.6.1 "Receiving System and IPI Interrupts".
896 */
897 if ((fDestMask & fBroadcastMask) == fBroadcastMask)
898 {
899 VMCPUSET_FILL(pDestCpuSet);
900 return;
901 }
902
903 if (enmDestMode == XAPICDESTMODE_PHYSICAL)
904 {
905 /* The destination mask is interpreted as the physical APIC ID of a single target. */
906#if 1
907 /* Since our physical APIC ID is read-only to software, set the corresponding bit in the CPU set. */
908 if (RT_LIKELY(fDestMask < cCpus))
909 VMCPUSET_ADD(pDestCpuSet, fDestMask);
910#else
911 /* The physical APIC ID may not match our VCPU ID, search through the list of targets. */
912 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++)
913 {
914 PVMCPU pVCpuDest = &pVM->aCpus[idCpu];
915 if (XAPIC_IN_X2APIC_MODE(pVCpuDest))
916 {
917 PCX2APICPAGE pX2ApicPage = VMCPU_TO_CX2APICPAGE(pVCpuDest);
918 if (pX2ApicPage->id.u32ApicId == fDestMask)
919 VMCPUSET_ADD(pDestCpuSet, pVCpuDest->idCpu);
920 }
921 else
922 {
923 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpuDest);
924 if (pXApicPage->id.u8ApicId == (uint8_t)fDestMask)
925 VMCPUSET_ADD(pDestCpuSet, pVCpuDest->idCpu);
926 }
927 }
928#endif
929 }
930 else
931 {
932 Assert(enmDestMode == XAPICDESTMODE_LOGICAL);
933
934 /* A destination mask of all 0's implies no target APICs (since it's interpreted as a bitmap or partial bitmap). */
935 if (RT_UNLIKELY(!fDestMask))
936 return;
937
938 /* The destination mask is interpreted as a bitmap of software-programmable logical APIC ID of the target APICs. */
939 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++)
940 {
941 PVMCPU pVCpuDest = &pVM->aCpus[idCpu];
942 if (apicIsLogicalDest(pVCpuDest, fDestMask))
943 VMCPUSET_ADD(pDestCpuSet, pVCpuDest->idCpu);
944 }
945 }
946}
947
948
949/**
950 * Sends an Interprocessor Interrupt (IPI) using values from the Interrupt
951 * Command Register (ICR).
952 *
953 * @returns VBox status code.
954 * @param pVCpu The cross context virtual CPU structure.
955 * @param rcRZ The return code if the operation cannot be
956 * performed in the current context.
957 */
958DECLINLINE(VBOXSTRICTRC) apicSendIpi(PVMCPU pVCpu, int rcRZ)
959{
960 VMCPU_ASSERT_EMT(pVCpu);
961
962 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
963 XAPICDELIVERYMODE const enmDeliveryMode = (XAPICDELIVERYMODE)pXApicPage->icr_lo.u.u3DeliveryMode;
964 XAPICDESTMODE const enmDestMode = (XAPICDESTMODE)pXApicPage->icr_lo.u.u1DestMode;
965 XAPICINITLEVEL const enmInitLevel = (XAPICINITLEVEL)pXApicPage->icr_lo.u.u1Level;
966 XAPICTRIGGERMODE const enmTriggerMode = (XAPICTRIGGERMODE)pXApicPage->icr_lo.u.u1TriggerMode;
967 XAPICDESTSHORTHAND const enmDestShorthand = (XAPICDESTSHORTHAND)pXApicPage->icr_lo.u.u2DestShorthand;
968 uint8_t const uVector = pXApicPage->icr_lo.u.u8Vector;
969
970 PX2APICPAGE pX2ApicPage = VMCPU_TO_X2APICPAGE(pVCpu);
971 uint32_t const fDest = XAPIC_IN_X2APIC_MODE(pVCpu) ? pX2ApicPage->icr_hi.u32IcrHi : pXApicPage->icr_hi.u.u8Dest;
972
973#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
974 /*
975 * INIT Level De-assert is not support on Pentium 4 and Xeon processors.
976 * Apparently, this also applies to NMI, SMI, lowest-priority and fixed delivery modes,
977 * see @bugref{8245#c116}.
978 *
979 * See AMD spec. 16.5 "Interprocessor Interrupts (IPI)" for a table of valid ICR combinations.
980 */
981 if ( enmTriggerMode == XAPICTRIGGERMODE_LEVEL
982 && enmInitLevel == XAPICINITLEVEL_DEASSERT
983 && ( enmDeliveryMode == XAPICDELIVERYMODE_FIXED
984 || enmDeliveryMode == XAPICDELIVERYMODE_LOWEST_PRIO
985 || enmDeliveryMode == XAPICDELIVERYMODE_SMI
986 || enmDeliveryMode == XAPICDELIVERYMODE_NMI
987 || enmDeliveryMode == XAPICDELIVERYMODE_INIT))
988 {
989 Log2(("APIC%u: %s level de-assert unsupported, ignoring!\n", apicGetDeliveryModeName(enmDeliveryMode), pVCpu->idCpu));
990 return VINF_SUCCESS;
991 }
992#else
993# error "Implement Pentium and P6 family APIC architectures"
994#endif
995
996 /*
997 * The destination and delivery modes are ignored/by-passed when a destination shorthand is specified.
998 * See Intel spec. 10.6.2.3 "Broadcast/Self Delivery Mode".
999 */
1000 VMCPUSET DestCpuSet;
1001 switch (enmDestShorthand)
1002 {
1003 case XAPICDESTSHORTHAND_NONE:
1004 {
1005 PVM pVM = pVCpu->CTX_SUFF(pVM);
1006 uint32_t const fBroadcastMask = XAPIC_IN_X2APIC_MODE(pVCpu) ? X2APIC_ID_BROADCAST_MASK : XAPIC_ID_BROADCAST_MASK;
1007 apicGetDestCpuSet(pVM, fDest, fBroadcastMask, enmDestMode, enmDeliveryMode, &DestCpuSet);
1008 break;
1009 }
1010
1011 case XAPICDESTSHORTHAND_SELF:
1012 {
1013 VMCPUSET_EMPTY(&DestCpuSet);
1014 VMCPUSET_ADD(&DestCpuSet, pVCpu->idCpu);
1015 break;
1016 }
1017
1018 case XAPIDDESTSHORTHAND_ALL_INCL_SELF:
1019 {
1020 VMCPUSET_FILL(&DestCpuSet);
1021 break;
1022 }
1023
1024 case XAPICDESTSHORTHAND_ALL_EXCL_SELF:
1025 {
1026 VMCPUSET_FILL(&DestCpuSet);
1027 VMCPUSET_DEL(&DestCpuSet, pVCpu->idCpu);
1028 break;
1029 }
1030 }
1031
1032 return apicSendIntr(pVCpu->CTX_SUFF(pVM), pVCpu, uVector, enmTriggerMode, enmDeliveryMode, &DestCpuSet,
1033 NULL /* pfIntrAccepted */, rcRZ);
1034}
1035
1036
1037/**
1038 * Sets the Interrupt Command Register (ICR) high dword.
1039 *
1040 * @returns Strict VBox status code.
1041 * @param pVCpu The cross context virtual CPU structure.
1042 * @param uIcrHi The ICR high dword.
1043 */
1044static VBOXSTRICTRC apicSetIcrHi(PVMCPU pVCpu, uint32_t uIcrHi)
1045{
1046 VMCPU_ASSERT_EMT(pVCpu);
1047 Assert(!XAPIC_IN_X2APIC_MODE(pVCpu));
1048
1049 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1050 pXApicPage->icr_hi.all.u32IcrHi = uIcrHi & XAPIC_ICR_HI_DEST;
1051 Log2(("APIC%u: apicSetIcrHi: uIcrHi=%#RX32\n", pVCpu->idCpu, pXApicPage->icr_hi.all.u32IcrHi));
1052
1053 return VINF_SUCCESS;
1054}
1055
1056
1057/**
1058 * Sets the Interrupt Command Register (ICR) low dword.
1059 *
1060 * @returns Strict VBox status code.
1061 * @param pVCpu The cross context virtual CPU structure.
1062 * @param uIcrLo The ICR low dword.
1063 * @param rcRZ The return code if the operation cannot be performed
1064 * in the current context.
1065 */
1066static VBOXSTRICTRC apicSetIcrLo(PVMCPU pVCpu, uint32_t uIcrLo, int rcRZ)
1067{
1068 VMCPU_ASSERT_EMT(pVCpu);
1069
1070 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1071 pXApicPage->icr_lo.all.u32IcrLo = uIcrLo & XAPIC_ICR_LO_WR_VALID;
1072 Log2(("APIC%u: apicSetIcrLo: uIcrLo=%#RX32\n", pVCpu->idCpu, pXApicPage->icr_lo.all.u32IcrLo));
1073 STAM_COUNTER_INC(&pVCpu->apic.s.StatIcrLoWrite);
1074
1075 return apicSendIpi(pVCpu, rcRZ);
1076}
1077
1078
1079/**
1080 * Sets the Interrupt Command Register (ICR).
1081 *
1082 * @returns Strict VBox status code.
1083 * @param pVCpu The cross context virtual CPU structure.
1084 * @param u64Icr The ICR (High and Low combined).
1085 * @param rcRZ The return code if the operation cannot be performed
1086 * in the current context.
1087 */
1088static VBOXSTRICTRC apicSetIcr(PVMCPU pVCpu, uint64_t u64Icr, int rcRZ)
1089{
1090 VMCPU_ASSERT_EMT(pVCpu);
1091 Assert(XAPIC_IN_X2APIC_MODE(pVCpu));
1092
1093 /* Validate. */
1094 uint32_t const uLo = RT_LO_U32(u64Icr);
1095 if (RT_LIKELY(!(uLo & ~XAPIC_ICR_LO_WR_VALID)))
1096 {
1097 /* Update high dword first, then update the low dword which sends the IPI. */
1098 PX2APICPAGE pX2ApicPage = VMCPU_TO_X2APICPAGE(pVCpu);
1099 pX2ApicPage->icr_hi.u32IcrHi = RT_HI_U32(u64Icr);
1100 return apicSetIcrLo(pVCpu, uLo, rcRZ);
1101 }
1102 return apicMsrAccessError(pVCpu, MSR_IA32_X2APIC_ICR, APICMSRACCESS_WRITE_RSVD_BITS);
1103}
1104
1105
1106/**
1107 * Sets the Error Status Register (ESR).
1108 *
1109 * @returns Strict VBox status code.
1110 * @param pVCpu The cross context virtual CPU structure.
1111 * @param uEsr The ESR value.
1112 */
1113static VBOXSTRICTRC apicSetEsr(PVMCPU pVCpu, uint32_t uEsr)
1114{
1115 VMCPU_ASSERT_EMT(pVCpu);
1116
1117 Log2(("APIC%u: apicSetEsr: uEsr=%#RX32\n", pVCpu->idCpu, uEsr));
1118
1119 if ( XAPIC_IN_X2APIC_MODE(pVCpu)
1120 && (uEsr & ~XAPIC_ESR_WO_VALID))
1121 return apicMsrAccessError(pVCpu, MSR_IA32_X2APIC_ESR, APICMSRACCESS_WRITE_RSVD_BITS);
1122
1123 /*
1124 * Writes to the ESR causes the internal state to be updated in the register,
1125 * clearing the original state. See AMD spec. 16.4.6 "APIC Error Interrupts".
1126 */
1127 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1128 pXApicPage->esr.all.u32Errors = apicClearAllErrors(pVCpu);
1129 return VINF_SUCCESS;
1130}
1131
1132
1133/**
1134 * Updates the Processor Priority Register (PPR).
1135 *
1136 * @param pVCpu The cross context virtual CPU structure.
1137 */
1138static void apicUpdatePpr(PVMCPU pVCpu)
1139{
1140 VMCPU_ASSERT_EMT(pVCpu);
1141
1142 /* See Intel spec 10.8.3.1 "Task and Processor Priorities". */
1143 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1144 uint8_t const uIsrv = apicGetHighestSetBitInReg(&pXApicPage->isr, 0 /* rcNotFound */);
1145 uint8_t uPpr;
1146 if (XAPIC_TPR_GET_TP(pXApicPage->tpr.u8Tpr) >= XAPIC_PPR_GET_PP(uIsrv))
1147 uPpr = pXApicPage->tpr.u8Tpr;
1148 else
1149 uPpr = XAPIC_PPR_GET_PP(uIsrv);
1150 pXApicPage->ppr.u8Ppr = uPpr;
1151}
1152
1153
1154/**
1155 * Gets the Processor Priority Register (PPR).
1156 *
1157 * @returns The PPR value.
1158 * @param pVCpu The cross context virtual CPU structure.
1159 */
1160static uint8_t apicGetPpr(PVMCPU pVCpu)
1161{
1162 VMCPU_ASSERT_EMT(pVCpu);
1163 STAM_COUNTER_INC(&pVCpu->apic.s.StatTprRead);
1164
1165 /*
1166 * With virtualized APIC registers or with TPR virtualization, the hardware may
1167 * update ISR/TPR transparently. We thus re-calculate the PPR which may be out of sync.
1168 * See Intel spec. 29.2.2 "Virtual-Interrupt Delivery".
1169 *
1170 * In all other instances, whenever the TPR or ISR changes, we need to update the PPR
1171 * as well (e.g. like we do manually in apicR3InitIpi and by calling apicUpdatePpr).
1172 */
1173 PCAPIC pApic = VM_TO_APIC(pVCpu->CTX_SUFF(pVM));
1174 if (pApic->fVirtApicRegsEnabled) /** @todo re-think this */
1175 apicUpdatePpr(pVCpu);
1176 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu);
1177 return pXApicPage->ppr.u8Ppr;
1178}
1179
1180
1181/**
1182 * Sets the Task Priority Register (TPR).
1183 *
1184 * @returns Strict VBox status code.
1185 * @param pVCpu The cross context virtual CPU structure.
1186 * @param uTpr The TPR value.
1187 */
1188static VBOXSTRICTRC apicSetTpr(PVMCPU pVCpu, uint32_t uTpr)
1189{
1190 VMCPU_ASSERT_EMT(pVCpu);
1191
1192 Log2(("APIC%u: apicSetTpr: uTpr=%#RX32\n", pVCpu->idCpu, uTpr));
1193 STAM_COUNTER_INC(&pVCpu->apic.s.StatTprWrite);
1194
1195 if ( XAPIC_IN_X2APIC_MODE(pVCpu)
1196 && (uTpr & ~XAPIC_TPR_VALID))
1197 return apicMsrAccessError(pVCpu, MSR_IA32_X2APIC_TPR, APICMSRACCESS_WRITE_RSVD_BITS);
1198
1199 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1200 pXApicPage->tpr.u8Tpr = uTpr;
1201 apicUpdatePpr(pVCpu);
1202 apicSignalNextPendingIntr(pVCpu);
1203 return VINF_SUCCESS;
1204}
1205
1206
1207/**
1208 * Sets the End-Of-Interrupt (EOI) register.
1209 *
1210 * @returns Strict VBox status code.
1211 * @param pVCpu The cross context virtual CPU structure.
1212 * @param uEoi The EOI value.
1213 */
1214static VBOXSTRICTRC apicSetEoi(PVMCPU pVCpu, uint32_t uEoi)
1215{
1216 VMCPU_ASSERT_EMT(pVCpu);
1217
1218 Log2(("APIC%u: apicSetEoi: uEoi=%#RX32\n", pVCpu->idCpu, uEoi));
1219 STAM_COUNTER_INC(&pVCpu->apic.s.StatEoiWrite);
1220
1221 if ( XAPIC_IN_X2APIC_MODE(pVCpu)
1222 && (uEoi & ~XAPIC_EOI_WO_VALID))
1223 return apicMsrAccessError(pVCpu, MSR_IA32_X2APIC_EOI, APICMSRACCESS_WRITE_RSVD_BITS);
1224
1225 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1226 int isrv = apicGetHighestSetBitInReg(&pXApicPage->isr, -1 /* rcNotFound */);
1227 if (isrv >= 0)
1228 {
1229 /*
1230 * Broadcast the EOI to the I/O APIC(s).
1231 *
1232 * We'll handle the EOI broadcast first as there is tiny chance we get rescheduled to
1233 * ring-3 due to contention on the I/O APIC lock. This way we don't mess with the rest
1234 * of the APIC state and simply restart the EOI write operation from ring-3.
1235 */
1236 Assert(isrv <= (int)UINT8_MAX);
1237 uint8_t const uVector = isrv;
1238 bool const fLevelTriggered = apicTestVectorInReg(&pXApicPage->tmr, uVector);
1239 if (fLevelTriggered)
1240 {
1241 int rc = apicBusBroadcastEoi(pVCpu, uVector);
1242 if (rc == VINF_SUCCESS)
1243 { /* likely */ }
1244 else
1245 return XAPIC_IN_X2APIC_MODE(pVCpu) ? VINF_CPUM_R3_MSR_WRITE : VINF_IOM_R3_MMIO_WRITE;
1246
1247 /*
1248 * Clear the vector from the TMR.
1249 *
1250 * The broadcast to I/O APIC can re-trigger new interrupts to arrive via the bus. However,
1251 * APICUpdatePendingInterrupts() which updates TMR can only be done from EMT which we
1252 * currently are on, so no possibility of concurrent updates.
1253 */
1254 apicClearVectorInReg(&pXApicPage->tmr, uVector);
1255
1256 /*
1257 * Clear the remote IRR bit for level-triggered, fixed mode LINT0 interrupt.
1258 * The LINT1 pin does not support level-triggered interrupts.
1259 * See Intel spec. 10.5.1 "Local Vector Table".
1260 */
1261 uint32_t const uLvtLint0 = pXApicPage->lvt_lint0.all.u32LvtLint0;
1262 if ( XAPIC_LVT_GET_REMOTE_IRR(uLvtLint0)
1263 && XAPIC_LVT_GET_VECTOR(uLvtLint0) == uVector
1264 && XAPIC_LVT_GET_DELIVERY_MODE(uLvtLint0) == XAPICDELIVERYMODE_FIXED)
1265 {
1266 ASMAtomicAndU32((volatile uint32_t *)&pXApicPage->lvt_lint0.all.u32LvtLint0, ~XAPIC_LVT_REMOTE_IRR);
1267 Log2(("APIC%u: apicSetEoi: Cleared remote-IRR for LINT0. uVector=%#x\n", pVCpu->idCpu, uVector));
1268 }
1269
1270 Log2(("APIC%u: apicSetEoi: Cleared level triggered interrupt from TMR. uVector=%#x\n", pVCpu->idCpu, uVector));
1271 }
1272
1273 /*
1274 * Mark interrupt as serviced, update the PPR and signal pending interrupts.
1275 */
1276 Log2(("APIC%u: apicSetEoi: Clearing interrupt from ISR. uVector=%#x\n", pVCpu->idCpu, uVector));
1277 apicClearVectorInReg(&pXApicPage->isr, uVector);
1278 apicUpdatePpr(pVCpu);
1279 apicSignalNextPendingIntr(pVCpu);
1280 }
1281 else
1282 {
1283#ifdef DEBUG_ramshankar
1284 /** @todo Figure out if this is done intentionally by guests or is a bug
1285 * in our emulation. Happened with Win10 SMP VM during reboot after
1286 * installation of guest additions with 3D support. */
1287 AssertMsgFailed(("APIC%u: apicSetEoi: Failed to find any ISR bit\n", pVCpu->idCpu));
1288#endif
1289 }
1290
1291 return VINF_SUCCESS;
1292}
1293
1294
1295/**
1296 * Sets the Logical Destination Register (LDR).
1297 *
1298 * @returns Strict VBox status code.
1299 * @param pVCpu The cross context virtual CPU structure.
1300 * @param uLdr The LDR value.
1301 *
1302 * @remarks LDR is read-only in x2APIC mode.
1303 */
1304static VBOXSTRICTRC apicSetLdr(PVMCPU pVCpu, uint32_t uLdr)
1305{
1306 VMCPU_ASSERT_EMT(pVCpu);
1307 Assert(!XAPIC_IN_X2APIC_MODE(pVCpu));
1308
1309 Log2(("APIC%u: apicSetLdr: uLdr=%#RX32\n", pVCpu->idCpu, uLdr));
1310
1311 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1312 apicWriteRaw32(pXApicPage, XAPIC_OFF_LDR, uLdr & XAPIC_LDR_VALID);
1313 return VINF_SUCCESS;
1314}
1315
1316
1317/**
1318 * Sets the Destination Format Register (DFR).
1319 *
1320 * @returns Strict VBox status code.
1321 * @param pVCpu The cross context virtual CPU structure.
1322 * @param uDfr The DFR value.
1323 *
1324 * @remarks DFR is not available in x2APIC mode.
1325 */
1326static VBOXSTRICTRC apicSetDfr(PVMCPU pVCpu, uint32_t uDfr)
1327{
1328 VMCPU_ASSERT_EMT(pVCpu);
1329 Assert(!XAPIC_IN_X2APIC_MODE(pVCpu));
1330
1331 uDfr &= XAPIC_DFR_VALID;
1332 uDfr |= XAPIC_DFR_RSVD_MB1;
1333
1334 Log2(("APIC%u: apicSetDfr: uDfr=%#RX32\n", pVCpu->idCpu, uDfr));
1335
1336 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1337 apicWriteRaw32(pXApicPage, XAPIC_OFF_DFR, uDfr);
1338 return VINF_SUCCESS;
1339}
1340
1341
1342/**
1343 * Sets the Timer Divide Configuration Register (DCR).
1344 *
1345 * @returns Strict VBox status code.
1346 * @param pVCpu The cross context virtual CPU structure.
1347 * @param uTimerDcr The timer DCR value.
1348 */
1349static VBOXSTRICTRC apicSetTimerDcr(PVMCPU pVCpu, uint32_t uTimerDcr)
1350{
1351 VMCPU_ASSERT_EMT(pVCpu);
1352 if ( XAPIC_IN_X2APIC_MODE(pVCpu)
1353 && (uTimerDcr & ~XAPIC_TIMER_DCR_VALID))
1354 return apicMsrAccessError(pVCpu, MSR_IA32_X2APIC_TIMER_DCR, APICMSRACCESS_WRITE_RSVD_BITS);
1355
1356 Log2(("APIC%u: apicSetTimerDcr: uTimerDcr=%#RX32\n", pVCpu->idCpu, uTimerDcr));
1357
1358 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1359 apicWriteRaw32(pXApicPage, XAPIC_OFF_TIMER_DCR, uTimerDcr);
1360 return VINF_SUCCESS;
1361}
1362
1363
1364/**
1365 * Gets the timer's Current Count Register (CCR).
1366 *
1367 * @returns VBox status code.
1368 * @param pVCpu The cross context virtual CPU structure.
1369 * @param rcBusy The busy return code for the timer critical section.
1370 * @param puValue Where to store the LVT timer CCR.
1371 */
1372static VBOXSTRICTRC apicGetTimerCcr(PVMCPU pVCpu, int rcBusy, uint32_t *puValue)
1373{
1374 VMCPU_ASSERT_EMT(pVCpu);
1375 Assert(puValue);
1376
1377 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu);
1378 *puValue = 0;
1379
1380 /* In TSC-deadline mode, CCR returns 0, see Intel spec. 10.5.4.1 "TSC-Deadline Mode". */
1381 if (pXApicPage->lvt_timer.u.u2TimerMode == XAPIC_TIMER_MODE_TSC_DEADLINE)
1382 return VINF_SUCCESS;
1383
1384 /* If the initial-count register is 0, CCR returns 0 as it cannot exceed the ICR. */
1385 uint32_t const uInitialCount = pXApicPage->timer_icr.u32InitialCount;
1386 if (!uInitialCount)
1387 return VINF_SUCCESS;
1388
1389 /*
1390 * Reading the virtual-sync clock requires locking its timer because it's not
1391 * a simple atomic operation, see tmVirtualSyncGetEx().
1392 *
1393 * We also need to lock before reading the timer CCR, see apicR3TimerCallback().
1394 */
1395 PCAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
1396 PTMTIMER pTimer = pApicCpu->CTX_SUFF(pTimer);
1397
1398 int rc = TMTimerLock(pTimer, rcBusy);
1399 if (rc == VINF_SUCCESS)
1400 {
1401 /* If the current-count register is 0, it implies the timer expired. */
1402 uint32_t const uCurrentCount = pXApicPage->timer_ccr.u32CurrentCount;
1403 if (uCurrentCount)
1404 {
1405 uint64_t const cTicksElapsed = TMTimerGet(pApicCpu->CTX_SUFF(pTimer)) - pApicCpu->u64TimerInitial;
1406 TMTimerUnlock(pTimer);
1407 uint8_t const uTimerShift = apicGetTimerShift(pXApicPage);
1408 uint64_t const uDelta = cTicksElapsed >> uTimerShift;
1409 if (uInitialCount > uDelta)
1410 *puValue = uInitialCount - uDelta;
1411 }
1412 else
1413 TMTimerUnlock(pTimer);
1414 }
1415 return rc;
1416}
1417
1418
1419/**
1420 * Sets the timer's Initial-Count Register (ICR).
1421 *
1422 * @returns Strict VBox status code.
1423 * @param pVCpu The cross context virtual CPU structure.
1424 * @param rcBusy The busy return code for the timer critical section.
1425 * @param uInitialCount The timer ICR.
1426 */
1427static VBOXSTRICTRC apicSetTimerIcr(PVMCPU pVCpu, int rcBusy, uint32_t uInitialCount)
1428{
1429 VMCPU_ASSERT_EMT(pVCpu);
1430
1431 PAPIC pApic = VM_TO_APIC(pVCpu->CTX_SUFF(pVM));
1432 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
1433 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1434 PTMTIMER pTimer = pApicCpu->CTX_SUFF(pTimer);
1435
1436 Log2(("APIC%u: apicSetTimerIcr: uInitialCount=%#RX32\n", pVCpu->idCpu, uInitialCount));
1437 STAM_COUNTER_INC(&pApicCpu->StatTimerIcrWrite);
1438
1439 /* In TSC-deadline mode, timer ICR writes are ignored, see Intel spec. 10.5.4.1 "TSC-Deadline Mode". */
1440 if ( pApic->fSupportsTscDeadline
1441 && pXApicPage->lvt_timer.u.u2TimerMode == XAPIC_TIMER_MODE_TSC_DEADLINE)
1442 return VINF_SUCCESS;
1443
1444 /*
1445 * The timer CCR may be modified by apicR3TimerCallback() in parallel,
1446 * so obtain the lock -before- updating it here to be consistent with the
1447 * timer ICR. We rely on CCR being consistent in apicGetTimerCcr().
1448 */
1449 int rc = TMTimerLock(pTimer, rcBusy);
1450 if (rc == VINF_SUCCESS)
1451 {
1452 pXApicPage->timer_icr.u32InitialCount = uInitialCount;
1453 pXApicPage->timer_ccr.u32CurrentCount = uInitialCount;
1454 if (uInitialCount)
1455 apicStartTimer(pVCpu, uInitialCount);
1456 else
1457 apicStopTimer(pVCpu);
1458 TMTimerUnlock(pTimer);
1459 }
1460 return rc;
1461}
1462
1463
1464/**
1465 * Sets an LVT entry.
1466 *
1467 * @returns Strict VBox status code.
1468 * @param pVCpu The cross context virtual CPU structure.
1469 * @param offLvt The LVT entry offset in the xAPIC page.
1470 * @param uLvt The LVT value to set.
1471 */
1472static VBOXSTRICTRC apicSetLvtEntry(PVMCPU pVCpu, uint16_t offLvt, uint32_t uLvt)
1473{
1474 VMCPU_ASSERT_EMT(pVCpu);
1475
1476#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
1477 AssertMsg( offLvt == XAPIC_OFF_LVT_TIMER
1478 || offLvt == XAPIC_OFF_LVT_THERMAL
1479 || offLvt == XAPIC_OFF_LVT_PERF
1480 || offLvt == XAPIC_OFF_LVT_LINT0
1481 || offLvt == XAPIC_OFF_LVT_LINT1
1482 || offLvt == XAPIC_OFF_LVT_ERROR,
1483 ("APIC%u: apicSetLvtEntry: invalid offset, offLvt=%#RX16, uLvt=%#RX32\n", pVCpu->idCpu, offLvt, uLvt));
1484
1485 /*
1486 * If TSC-deadline mode isn't support, ignore the bit in xAPIC mode
1487 * and raise #GP(0) in x2APIC mode.
1488 */
1489 PCAPIC pApic = VM_TO_APIC(pVCpu->CTX_SUFF(pVM));
1490 if (offLvt == XAPIC_OFF_LVT_TIMER)
1491 {
1492 if ( !pApic->fSupportsTscDeadline
1493 && (uLvt & XAPIC_LVT_TIMER_TSCDEADLINE))
1494 {
1495 if (XAPIC_IN_X2APIC_MODE(pVCpu))
1496 return apicMsrAccessError(pVCpu, XAPIC_GET_X2APIC_MSR(offLvt), APICMSRACCESS_WRITE_RSVD_BITS);
1497 uLvt &= ~XAPIC_LVT_TIMER_TSCDEADLINE;
1498 /** @todo TSC-deadline timer mode transition */
1499 }
1500 }
1501
1502 /*
1503 * Validate rest of the LVT bits.
1504 */
1505 uint16_t const idxLvt = (offLvt - XAPIC_OFF_LVT_START) >> 4;
1506 AssertReturn(idxLvt < RT_ELEMENTS(g_au32LvtValidMasks), VERR_OUT_OF_RANGE);
1507
1508 /*
1509 * For x2APIC, disallow setting of invalid/reserved bits.
1510 * For xAPIC, mask out invalid/reserved bits (i.e. ignore them).
1511 */
1512 if ( XAPIC_IN_X2APIC_MODE(pVCpu)
1513 && (uLvt & ~g_au32LvtValidMasks[idxLvt]))
1514 return apicMsrAccessError(pVCpu, XAPIC_GET_X2APIC_MSR(offLvt), APICMSRACCESS_WRITE_RSVD_BITS);
1515
1516 uLvt &= g_au32LvtValidMasks[idxLvt];
1517
1518 /*
1519 * In the software-disabled state, LVT mask-bit must remain set and attempts to clear the mask
1520 * bit must be ignored. See Intel spec. 10.4.7.2 "Local APIC State After It Has Been Software Disabled".
1521 */
1522 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1523 if (!pXApicPage->svr.u.fApicSoftwareEnable)
1524 uLvt |= XAPIC_LVT_MASK;
1525
1526 /*
1527 * It is unclear whether we should signal a 'send illegal vector' error here and ignore updating
1528 * the LVT entry when the delivery mode is 'fixed'[1] or update it in addition to signaling the
1529 * error or not signal the error at all. For now, we'll allow setting illegal vectors into the LVT
1530 * but set the 'send illegal vector' error here. The 'receive illegal vector' error will be set if
1531 * the interrupt for the vector happens to be generated, see APICPostInterrupt().
1532 *
1533 * [1] See Intel spec. 10.5.2 "Valid Interrupt Vectors".
1534 */
1535 if (RT_UNLIKELY( XAPIC_LVT_GET_VECTOR(uLvt) <= XAPIC_ILLEGAL_VECTOR_END
1536 && XAPIC_LVT_GET_DELIVERY_MODE(uLvt) == XAPICDELIVERYMODE_FIXED))
1537 apicSetError(pVCpu, XAPIC_ESR_SEND_ILLEGAL_VECTOR);
1538
1539 Log2(("APIC%u: apicSetLvtEntry: offLvt=%#RX16 uLvt=%#RX32\n", pVCpu->idCpu, offLvt, uLvt));
1540
1541 apicWriteRaw32(pXApicPage, offLvt, uLvt);
1542 return VINF_SUCCESS;
1543#else
1544# error "Implement Pentium and P6 family APIC architectures"
1545#endif /* XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4 */
1546}
1547
1548
1549#if 0
1550/**
1551 * Sets an LVT entry in the extended LVT range.
1552 *
1553 * @returns VBox status code.
1554 * @param pVCpu The cross context virtual CPU structure.
1555 * @param offLvt The LVT entry offset in the xAPIC page.
1556 * @param uValue The LVT value to set.
1557 */
1558static int apicSetLvtExtEntry(PVMCPU pVCpu, uint16_t offLvt, uint32_t uLvt)
1559{
1560 VMCPU_ASSERT_EMT(pVCpu);
1561 AssertMsg(offLvt == XAPIC_OFF_CMCI, ("APIC%u: apicSetLvt1Entry: invalid offset %#RX16\n", pVCpu->idCpu, offLvt));
1562
1563 /** @todo support CMCI. */
1564 return VERR_NOT_IMPLEMENTED;
1565}
1566#endif
1567
1568
1569/**
1570 * Hints TM about the APIC timer frequency.
1571 *
1572 * @param pApicCpu The APIC CPU state.
1573 * @param uInitialCount The new initial count.
1574 * @param uTimerShift The new timer shift.
1575 * @thread Any.
1576 */
1577void apicHintTimerFreq(PAPICCPU pApicCpu, uint32_t uInitialCount, uint8_t uTimerShift)
1578{
1579 Assert(pApicCpu);
1580
1581 if ( pApicCpu->uHintedTimerInitialCount != uInitialCount
1582 || pApicCpu->uHintedTimerShift != uTimerShift)
1583 {
1584 uint32_t uHz;
1585 if (uInitialCount)
1586 {
1587 uint64_t cTicksPerPeriod = (uint64_t)uInitialCount << uTimerShift;
1588 uHz = TMTimerGetFreq(pApicCpu->CTX_SUFF(pTimer)) / cTicksPerPeriod;
1589 }
1590 else
1591 uHz = 0;
1592
1593 TMTimerSetFrequencyHint(pApicCpu->CTX_SUFF(pTimer), uHz);
1594 pApicCpu->uHintedTimerInitialCount = uInitialCount;
1595 pApicCpu->uHintedTimerShift = uTimerShift;
1596 }
1597}
1598
1599
1600/**
1601 * Reads an APIC register.
1602 *
1603 * @returns VBox status code.
1604 * @param pApicDev The APIC device instance.
1605 * @param pVCpu The cross context virtual CPU structure.
1606 * @param offReg The offset of the register being read.
1607 * @param puValue Where to store the register value.
1608 */
1609DECLINLINE(VBOXSTRICTRC) apicReadRegister(PAPICDEV pApicDev, PVMCPU pVCpu, uint16_t offReg, uint32_t *puValue)
1610{
1611 VMCPU_ASSERT_EMT(pVCpu);
1612 Assert(offReg <= XAPIC_OFF_MAX_VALID);
1613
1614 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1615 uint32_t uValue = 0;
1616 VBOXSTRICTRC rc = VINF_SUCCESS;
1617 switch (offReg)
1618 {
1619 case XAPIC_OFF_ID:
1620 case XAPIC_OFF_VERSION:
1621 case XAPIC_OFF_TPR:
1622 case XAPIC_OFF_EOI:
1623 case XAPIC_OFF_RRD:
1624 case XAPIC_OFF_LDR:
1625 case XAPIC_OFF_DFR:
1626 case XAPIC_OFF_SVR:
1627 case XAPIC_OFF_ISR0: case XAPIC_OFF_ISR1: case XAPIC_OFF_ISR2: case XAPIC_OFF_ISR3:
1628 case XAPIC_OFF_ISR4: case XAPIC_OFF_ISR5: case XAPIC_OFF_ISR6: case XAPIC_OFF_ISR7:
1629 case XAPIC_OFF_TMR0: case XAPIC_OFF_TMR1: case XAPIC_OFF_TMR2: case XAPIC_OFF_TMR3:
1630 case XAPIC_OFF_TMR4: case XAPIC_OFF_TMR5: case XAPIC_OFF_TMR6: case XAPIC_OFF_TMR7:
1631 case XAPIC_OFF_IRR0: case XAPIC_OFF_IRR1: case XAPIC_OFF_IRR2: case XAPIC_OFF_IRR3:
1632 case XAPIC_OFF_IRR4: case XAPIC_OFF_IRR5: case XAPIC_OFF_IRR6: case XAPIC_OFF_IRR7:
1633 case XAPIC_OFF_ESR:
1634 case XAPIC_OFF_ICR_LO:
1635 case XAPIC_OFF_ICR_HI:
1636 case XAPIC_OFF_LVT_TIMER:
1637#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
1638 case XAPIC_OFF_LVT_THERMAL:
1639#endif
1640 case XAPIC_OFF_LVT_PERF:
1641 case XAPIC_OFF_LVT_LINT0:
1642 case XAPIC_OFF_LVT_LINT1:
1643 case XAPIC_OFF_LVT_ERROR:
1644 case XAPIC_OFF_TIMER_ICR:
1645 case XAPIC_OFF_TIMER_DCR:
1646 {
1647 Assert( !XAPIC_IN_X2APIC_MODE(pVCpu)
1648 || ( offReg != XAPIC_OFF_DFR
1649 && offReg != XAPIC_OFF_ICR_HI
1650 && offReg != XAPIC_OFF_EOI));
1651 uValue = apicReadRaw32(pXApicPage, offReg);
1652 Log2(("APIC%u: apicReadRegister: offReg=%#x uValue=%#x\n", pVCpu->idCpu, offReg, uValue));
1653 break;
1654 }
1655
1656 case XAPIC_OFF_PPR:
1657 {
1658 uValue = apicGetPpr(pVCpu);
1659 break;
1660 }
1661
1662 case XAPIC_OFF_TIMER_CCR:
1663 {
1664 Assert(!XAPIC_IN_X2APIC_MODE(pVCpu));
1665 rc = apicGetTimerCcr(pVCpu, VINF_IOM_R3_MMIO_READ, &uValue);
1666 break;
1667 }
1668
1669 case XAPIC_OFF_APR:
1670 {
1671#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
1672 /* Unsupported on Pentium 4 and Xeon CPUs, invalid in x2APIC mode. */
1673 Assert(!XAPIC_IN_X2APIC_MODE(pVCpu));
1674#else
1675# error "Implement Pentium and P6 family APIC architectures"
1676#endif
1677 break;
1678 }
1679
1680 default:
1681 {
1682 Assert(!XAPIC_IN_X2APIC_MODE(pVCpu));
1683 rc = PDMDevHlpDBGFStop(pApicDev->CTX_SUFF(pDevIns), RT_SRC_POS, "VCPU[%u]: offReg=%#RX16\n", pVCpu->idCpu,
1684 offReg);
1685 apicSetError(pVCpu, XAPIC_ESR_ILLEGAL_REG_ADDRESS);
1686 break;
1687 }
1688 }
1689
1690 *puValue = uValue;
1691 return rc;
1692}
1693
1694
1695/**
1696 * Writes an APIC register.
1697 *
1698 * @returns Strict VBox status code.
1699 * @param pApicDev The APIC device instance.
1700 * @param pVCpu The cross context virtual CPU structure.
1701 * @param offReg The offset of the register being written.
1702 * @param uValue The register value.
1703 */
1704DECLINLINE(VBOXSTRICTRC) apicWriteRegister(PAPICDEV pApicDev, PVMCPU pVCpu, uint16_t offReg, uint32_t uValue)
1705{
1706 VMCPU_ASSERT_EMT(pVCpu);
1707 Assert(offReg <= XAPIC_OFF_MAX_VALID);
1708 Assert(!XAPIC_IN_X2APIC_MODE(pVCpu));
1709
1710 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
1711 switch (offReg)
1712 {
1713 case XAPIC_OFF_TPR:
1714 {
1715 rcStrict = apicSetTpr(pVCpu, uValue);
1716 break;
1717 }
1718
1719 case XAPIC_OFF_LVT_TIMER:
1720#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
1721 case XAPIC_OFF_LVT_THERMAL:
1722#endif
1723 case XAPIC_OFF_LVT_PERF:
1724 case XAPIC_OFF_LVT_LINT0:
1725 case XAPIC_OFF_LVT_LINT1:
1726 case XAPIC_OFF_LVT_ERROR:
1727 {
1728 rcStrict = apicSetLvtEntry(pVCpu, offReg, uValue);
1729 break;
1730 }
1731
1732 case XAPIC_OFF_TIMER_ICR:
1733 {
1734 rcStrict = apicSetTimerIcr(pVCpu, VINF_IOM_R3_MMIO_WRITE, uValue);
1735 break;
1736 }
1737
1738 case XAPIC_OFF_EOI:
1739 {
1740 rcStrict = apicSetEoi(pVCpu, uValue);
1741 break;
1742 }
1743
1744 case XAPIC_OFF_LDR:
1745 {
1746 rcStrict = apicSetLdr(pVCpu, uValue);
1747 break;
1748 }
1749
1750 case XAPIC_OFF_DFR:
1751 {
1752 rcStrict = apicSetDfr(pVCpu, uValue);
1753 break;
1754 }
1755
1756 case XAPIC_OFF_SVR:
1757 {
1758 rcStrict = apicSetSvr(pVCpu, uValue);
1759 break;
1760 }
1761
1762 case XAPIC_OFF_ICR_LO:
1763 {
1764 rcStrict = apicSetIcrLo(pVCpu, uValue, VINF_IOM_R3_MMIO_WRITE);
1765 break;
1766 }
1767
1768 case XAPIC_OFF_ICR_HI:
1769 {
1770 rcStrict = apicSetIcrHi(pVCpu, uValue);
1771 break;
1772 }
1773
1774 case XAPIC_OFF_TIMER_DCR:
1775 {
1776 rcStrict = apicSetTimerDcr(pVCpu, uValue);
1777 break;
1778 }
1779
1780 case XAPIC_OFF_ESR:
1781 {
1782 rcStrict = apicSetEsr(pVCpu, uValue);
1783 break;
1784 }
1785
1786 case XAPIC_OFF_APR:
1787 case XAPIC_OFF_RRD:
1788 {
1789#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
1790 /* Unsupported on Pentium 4 and Xeon CPUs but writes do -not- set an illegal register access error. */
1791#else
1792# error "Implement Pentium and P6 family APIC architectures"
1793#endif
1794 break;
1795 }
1796
1797 /* Read-only, write ignored: */
1798 case XAPIC_OFF_VERSION:
1799 case XAPIC_OFF_ID:
1800 break;
1801
1802 /* Unavailable/reserved in xAPIC mode: */
1803 case X2APIC_OFF_SELF_IPI:
1804 /* Read-only registers: */
1805 case XAPIC_OFF_PPR:
1806 case XAPIC_OFF_ISR0: case XAPIC_OFF_ISR1: case XAPIC_OFF_ISR2: case XAPIC_OFF_ISR3:
1807 case XAPIC_OFF_ISR4: case XAPIC_OFF_ISR5: case XAPIC_OFF_ISR6: case XAPIC_OFF_ISR7:
1808 case XAPIC_OFF_TMR0: case XAPIC_OFF_TMR1: case XAPIC_OFF_TMR2: case XAPIC_OFF_TMR3:
1809 case XAPIC_OFF_TMR4: case XAPIC_OFF_TMR5: case XAPIC_OFF_TMR6: case XAPIC_OFF_TMR7:
1810 case XAPIC_OFF_IRR0: case XAPIC_OFF_IRR1: case XAPIC_OFF_IRR2: case XAPIC_OFF_IRR3:
1811 case XAPIC_OFF_IRR4: case XAPIC_OFF_IRR5: case XAPIC_OFF_IRR6: case XAPIC_OFF_IRR7:
1812 case XAPIC_OFF_TIMER_CCR:
1813 default:
1814 {
1815 rcStrict = PDMDevHlpDBGFStop(pApicDev->CTX_SUFF(pDevIns), RT_SRC_POS, "APIC%u: offReg=%#RX16\n", pVCpu->idCpu,
1816 offReg);
1817 apicSetError(pVCpu, XAPIC_ESR_ILLEGAL_REG_ADDRESS);
1818 break;
1819 }
1820 }
1821
1822 return rcStrict;
1823}
1824
1825
1826/**
1827 * @interface_method_impl{PDMAPICREG,pfnReadMsrR3}
1828 */
1829APICBOTHCBDECL(VBOXSTRICTRC) apicReadMsr(PPDMDEVINS pDevIns, PVMCPU pVCpu, uint32_t u32Reg, uint64_t *pu64Value)
1830{
1831 /*
1832 * Validate.
1833 */
1834 VMCPU_ASSERT_EMT(pVCpu);
1835 Assert(u32Reg >= MSR_IA32_X2APIC_ID && u32Reg <= MSR_IA32_X2APIC_SELF_IPI);
1836 Assert(pu64Value);
1837 RT_NOREF_PV(pDevIns);
1838
1839#ifndef IN_RING3
1840 PCAPIC pApic = VM_TO_APIC(pVCpu->CTX_SUFF(pVM));
1841 if (pApic->fRZEnabled)
1842 { /* likely */}
1843 else
1844 return VINF_CPUM_R3_MSR_READ;
1845#endif
1846
1847 STAM_COUNTER_INC(&pVCpu->apic.s.CTX_SUFF_Z(StatMsrRead));
1848
1849 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
1850 if (RT_LIKELY(XAPIC_IN_X2APIC_MODE(pVCpu)))
1851 {
1852 switch (u32Reg)
1853 {
1854 /* Special handling for x2APIC: */
1855 case MSR_IA32_X2APIC_ICR:
1856 {
1857 PCX2APICPAGE pX2ApicPage = VMCPU_TO_CX2APICPAGE(pVCpu);
1858 uint64_t const uHi = pX2ApicPage->icr_hi.u32IcrHi;
1859 uint64_t const uLo = pX2ApicPage->icr_lo.all.u32IcrLo;
1860 *pu64Value = RT_MAKE_U64(uLo, uHi);
1861 break;
1862 }
1863
1864 /* Special handling, compatible with xAPIC: */
1865 case MSR_IA32_X2APIC_TIMER_CCR:
1866 {
1867 uint32_t uValue;
1868 rcStrict = apicGetTimerCcr(pVCpu, VINF_CPUM_R3_MSR_READ, &uValue);
1869 *pu64Value = uValue;
1870 break;
1871 }
1872
1873 /* Special handling, compatible with xAPIC: */
1874 case MSR_IA32_X2APIC_PPR:
1875 {
1876 *pu64Value = apicGetPpr(pVCpu);
1877 break;
1878 }
1879
1880 /* Raw read, compatible with xAPIC: */
1881 case MSR_IA32_X2APIC_ID:
1882 case MSR_IA32_X2APIC_VERSION:
1883 case MSR_IA32_X2APIC_TPR:
1884 case MSR_IA32_X2APIC_LDR:
1885 case MSR_IA32_X2APIC_SVR:
1886 case MSR_IA32_X2APIC_ISR0: case MSR_IA32_X2APIC_ISR1: case MSR_IA32_X2APIC_ISR2: case MSR_IA32_X2APIC_ISR3:
1887 case MSR_IA32_X2APIC_ISR4: case MSR_IA32_X2APIC_ISR5: case MSR_IA32_X2APIC_ISR6: case MSR_IA32_X2APIC_ISR7:
1888 case MSR_IA32_X2APIC_TMR0: case MSR_IA32_X2APIC_TMR1: case MSR_IA32_X2APIC_TMR2: case MSR_IA32_X2APIC_TMR3:
1889 case MSR_IA32_X2APIC_TMR4: case MSR_IA32_X2APIC_TMR5: case MSR_IA32_X2APIC_TMR6: case MSR_IA32_X2APIC_TMR7:
1890 case MSR_IA32_X2APIC_IRR0: case MSR_IA32_X2APIC_IRR1: case MSR_IA32_X2APIC_IRR2: case MSR_IA32_X2APIC_IRR3:
1891 case MSR_IA32_X2APIC_IRR4: case MSR_IA32_X2APIC_IRR5: case MSR_IA32_X2APIC_IRR6: case MSR_IA32_X2APIC_IRR7:
1892 case MSR_IA32_X2APIC_ESR:
1893 case MSR_IA32_X2APIC_LVT_TIMER:
1894 case MSR_IA32_X2APIC_LVT_THERMAL:
1895 case MSR_IA32_X2APIC_LVT_PERF:
1896 case MSR_IA32_X2APIC_LVT_LINT0:
1897 case MSR_IA32_X2APIC_LVT_LINT1:
1898 case MSR_IA32_X2APIC_LVT_ERROR:
1899 case MSR_IA32_X2APIC_TIMER_ICR:
1900 case MSR_IA32_X2APIC_TIMER_DCR:
1901 {
1902 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1903 uint16_t const offReg = X2APIC_GET_XAPIC_OFF(u32Reg);
1904 *pu64Value = apicReadRaw32(pXApicPage, offReg);
1905 break;
1906 }
1907
1908 /* Write-only MSRs: */
1909 case MSR_IA32_X2APIC_SELF_IPI:
1910 case MSR_IA32_X2APIC_EOI:
1911 {
1912 rcStrict = apicMsrAccessError(pVCpu, u32Reg, APICMSRACCESS_READ_WRITE_ONLY);
1913 break;
1914 }
1915
1916 /* Reserved MSRs: */
1917 case MSR_IA32_X2APIC_LVT_CMCI:
1918 default:
1919 {
1920 rcStrict = apicMsrAccessError(pVCpu, u32Reg, APICMSRACCESS_READ_RSVD_OR_UNKNOWN);
1921 break;
1922 }
1923 }
1924 }
1925 else
1926 rcStrict = apicMsrAccessError(pVCpu, u32Reg, APICMSRACCESS_INVALID_READ_MODE);
1927
1928 return rcStrict;
1929}
1930
1931
1932/**
1933 * @interface_method_impl{PDMAPICREG,pfnWriteMsrR3}
1934 */
1935APICBOTHCBDECL(VBOXSTRICTRC) apicWriteMsr(PPDMDEVINS pDevIns, PVMCPU pVCpu, uint32_t u32Reg, uint64_t u64Value)
1936{
1937 /*
1938 * Validate.
1939 */
1940 VMCPU_ASSERT_EMT(pVCpu);
1941 Assert(u32Reg >= MSR_IA32_X2APIC_ID && u32Reg <= MSR_IA32_X2APIC_SELF_IPI);
1942 RT_NOREF_PV(pDevIns);
1943
1944#ifndef IN_RING3
1945 PCAPIC pApic = VM_TO_APIC(pVCpu->CTX_SUFF(pVM));
1946 if (pApic->fRZEnabled)
1947 { /* likely */ }
1948 else
1949 return VINF_CPUM_R3_MSR_WRITE;
1950#endif
1951
1952 STAM_COUNTER_INC(&pVCpu->apic.s.CTX_SUFF_Z(StatMsrWrite));
1953
1954 /*
1955 * In x2APIC mode, we need to raise #GP(0) for writes to reserved bits, unlike MMIO
1956 * accesses where they are ignored. Hence, we need to validate each register before
1957 * invoking the generic/xAPIC write functions.
1958 *
1959 * Bits 63:32 of all registers except the ICR are reserved, we'll handle this common
1960 * case first and handle validating the remaining bits on a per-register basis.
1961 * See Intel spec. 10.12.1.2 "x2APIC Register Address Space".
1962 */
1963 if ( u32Reg != MSR_IA32_X2APIC_ICR
1964 && RT_HI_U32(u64Value))
1965 return apicMsrAccessError(pVCpu, u32Reg, APICMSRACCESS_WRITE_RSVD_BITS);
1966
1967 uint32_t u32Value = RT_LO_U32(u64Value);
1968 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
1969 if (RT_LIKELY(XAPIC_IN_X2APIC_MODE(pVCpu)))
1970 {
1971 switch (u32Reg)
1972 {
1973 case MSR_IA32_X2APIC_TPR:
1974 {
1975 rcStrict = apicSetTpr(pVCpu, u32Value);
1976 break;
1977 }
1978
1979 case MSR_IA32_X2APIC_ICR:
1980 {
1981 rcStrict = apicSetIcr(pVCpu, u64Value, VINF_CPUM_R3_MSR_WRITE);
1982 break;
1983 }
1984
1985 case MSR_IA32_X2APIC_SVR:
1986 {
1987 rcStrict = apicSetSvr(pVCpu, u32Value);
1988 break;
1989 }
1990
1991 case MSR_IA32_X2APIC_ESR:
1992 {
1993 rcStrict = apicSetEsr(pVCpu, u32Value);
1994 break;
1995 }
1996
1997 case MSR_IA32_X2APIC_TIMER_DCR:
1998 {
1999 rcStrict = apicSetTimerDcr(pVCpu, u32Value);
2000 break;
2001 }
2002
2003 case MSR_IA32_X2APIC_LVT_TIMER:
2004 case MSR_IA32_X2APIC_LVT_THERMAL:
2005 case MSR_IA32_X2APIC_LVT_PERF:
2006 case MSR_IA32_X2APIC_LVT_LINT0:
2007 case MSR_IA32_X2APIC_LVT_LINT1:
2008 case MSR_IA32_X2APIC_LVT_ERROR:
2009 {
2010 rcStrict = apicSetLvtEntry(pVCpu, X2APIC_GET_XAPIC_OFF(u32Reg), u32Value);
2011 break;
2012 }
2013
2014 case MSR_IA32_X2APIC_TIMER_ICR:
2015 {
2016 rcStrict = apicSetTimerIcr(pVCpu, VINF_CPUM_R3_MSR_WRITE, u32Value);
2017 break;
2018 }
2019
2020 /* Write-only MSRs: */
2021 case MSR_IA32_X2APIC_SELF_IPI:
2022 {
2023 uint8_t const uVector = XAPIC_SELF_IPI_GET_VECTOR(u32Value);
2024 apicPostInterrupt(pVCpu, uVector, XAPICTRIGGERMODE_EDGE);
2025 rcStrict = VINF_SUCCESS;
2026 break;
2027 }
2028
2029 case MSR_IA32_X2APIC_EOI:
2030 {
2031 rcStrict = apicSetEoi(pVCpu, u32Value);
2032 break;
2033 }
2034
2035 /* Read-only MSRs: */
2036 case MSR_IA32_X2APIC_ID:
2037 case MSR_IA32_X2APIC_VERSION:
2038 case MSR_IA32_X2APIC_PPR:
2039 case MSR_IA32_X2APIC_LDR:
2040 case MSR_IA32_X2APIC_ISR0: case MSR_IA32_X2APIC_ISR1: case MSR_IA32_X2APIC_ISR2: case MSR_IA32_X2APIC_ISR3:
2041 case MSR_IA32_X2APIC_ISR4: case MSR_IA32_X2APIC_ISR5: case MSR_IA32_X2APIC_ISR6: case MSR_IA32_X2APIC_ISR7:
2042 case MSR_IA32_X2APIC_TMR0: case MSR_IA32_X2APIC_TMR1: case MSR_IA32_X2APIC_TMR2: case MSR_IA32_X2APIC_TMR3:
2043 case MSR_IA32_X2APIC_TMR4: case MSR_IA32_X2APIC_TMR5: case MSR_IA32_X2APIC_TMR6: case MSR_IA32_X2APIC_TMR7:
2044 case MSR_IA32_X2APIC_IRR0: case MSR_IA32_X2APIC_IRR1: case MSR_IA32_X2APIC_IRR2: case MSR_IA32_X2APIC_IRR3:
2045 case MSR_IA32_X2APIC_IRR4: case MSR_IA32_X2APIC_IRR5: case MSR_IA32_X2APIC_IRR6: case MSR_IA32_X2APIC_IRR7:
2046 case MSR_IA32_X2APIC_TIMER_CCR:
2047 {
2048 rcStrict = apicMsrAccessError(pVCpu, u32Reg, APICMSRACCESS_WRITE_READ_ONLY);
2049 break;
2050 }
2051
2052 /* Reserved MSRs: */
2053 case MSR_IA32_X2APIC_LVT_CMCI:
2054 default:
2055 {
2056 rcStrict = apicMsrAccessError(pVCpu, u32Reg, APICMSRACCESS_WRITE_RSVD_OR_UNKNOWN);
2057 break;
2058 }
2059 }
2060 }
2061 else
2062 rcStrict = apicMsrAccessError(pVCpu, u32Reg, APICMSRACCESS_INVALID_WRITE_MODE);
2063
2064 return rcStrict;
2065}
2066
2067
2068/**
2069 * @interface_method_impl{PDMAPICREG,pfnSetBaseMsrR3}
2070 */
2071APICBOTHCBDECL(VBOXSTRICTRC) apicSetBaseMsr(PPDMDEVINS pDevIns, PVMCPU pVCpu, uint64_t u64BaseMsr)
2072{
2073 Assert(pVCpu);
2074 NOREF(pDevIns);
2075
2076#ifdef IN_RING3
2077 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
2078 PAPIC pApic = VM_TO_APIC(pVCpu->CTX_SUFF(pVM));
2079 APICMODE enmOldMode = apicGetMode(pApicCpu->uApicBaseMsr);
2080 APICMODE enmNewMode = apicGetMode(u64BaseMsr);
2081 uint64_t uBaseMsr = pApicCpu->uApicBaseMsr;
2082
2083 Log2(("APIC%u: ApicSetBaseMsr: u64BaseMsr=%#RX64 enmNewMode=%s enmOldMode=%s\n", pVCpu->idCpu, u64BaseMsr,
2084 apicGetModeName(enmNewMode), apicGetModeName(enmOldMode)));
2085
2086 /*
2087 * We do not support re-mapping the APIC base address because:
2088 * - We'll have to manage all the mappings ourselves in the APIC (reference counting based unmapping etc.)
2089 * i.e. we can only unmap the MMIO region if no other APIC is mapped on that location.
2090 * - It's unclear how/if IOM can fallback to handling regions as regular memory (if the MMIO
2091 * region remains mapped but doesn't belong to the called VCPU's APIC).
2092 */
2093 /** @todo Handle per-VCPU APIC base relocation. */
2094 if (MSR_IA32_APICBASE_GET_ADDR(uBaseMsr) != MSR_IA32_APICBASE_ADDR)
2095 {
2096 LogRelMax(5, ("APIC%u: Attempt to relocate base to %#RGp, unsupported -> #GP(0)\n", pVCpu->idCpu,
2097 MSR_IA32_APICBASE_GET_ADDR(uBaseMsr)));
2098 return VERR_CPUM_RAISE_GP_0;
2099 }
2100
2101 /* Don't allow enabling xAPIC/x2APIC if the VM is configured with the APIC disabled. */
2102 if (pApic->enmMaxMode == PDMAPICMODE_NONE)
2103 {
2104 LogRel(("APIC%u: Disallowing APIC base MSR write as the VM is configured with APIC disabled!\n",
2105 pVCpu->idCpu));
2106 return apicMsrAccessError(pVCpu, MSR_IA32_APICBASE, APICMSRACCESS_WRITE_DISALLOWED_CONFIG);
2107 }
2108
2109 /*
2110 * Act on state transition.
2111 */
2112 if (enmNewMode != enmOldMode)
2113 {
2114 switch (enmNewMode)
2115 {
2116 case APICMODE_DISABLED:
2117 {
2118 /*
2119 * The APIC state needs to be reset (especially the APIC ID as x2APIC APIC ID bit layout
2120 * is different). We can start with a clean slate identical to the state after a power-up/reset.
2121 *
2122 * See Intel spec. 10.4.3 "Enabling or Disabling the Local APIC".
2123 *
2124 * We'll also manually manage the APIC base MSR here. We want a single-point of commit
2125 * at the end of this function rather than updating it in apicR3ResetCpu. This means we also
2126 * need to update the CPUID leaf ourselves.
2127 */
2128 apicR3ResetCpu(pVCpu, false /* fResetApicBaseMsr */);
2129 uBaseMsr &= ~(MSR_IA32_APICBASE_EN | MSR_IA32_APICBASE_EXTD);
2130 CPUMSetGuestCpuIdPerCpuApicFeature(pVCpu, false /*fVisible*/);
2131 LogRel(("APIC%u: Switched mode to disabled\n", pVCpu->idCpu));
2132 break;
2133 }
2134
2135 case APICMODE_XAPIC:
2136 {
2137 if (enmOldMode != APICMODE_DISABLED)
2138 {
2139 LogRel(("APIC%u: Can only transition to xAPIC state from disabled state\n", pVCpu->idCpu));
2140 return apicMsrAccessError(pVCpu, MSR_IA32_APICBASE, APICMSRACCESS_WRITE_INVALID);
2141 }
2142
2143 uBaseMsr |= MSR_IA32_APICBASE_EN;
2144 CPUMSetGuestCpuIdPerCpuApicFeature(pVCpu, true /*fVisible*/);
2145 LogRel(("APIC%u: Switched mode to xAPIC\n", pVCpu->idCpu));
2146 break;
2147 }
2148
2149 case APICMODE_X2APIC:
2150 {
2151 if (pApic->enmMaxMode != PDMAPICMODE_X2APIC)
2152 {
2153 LogRel(("APIC%u: Disallowing transition to x2APIC mode as the VM is configured with the x2APIC disabled!\n",
2154 pVCpu->idCpu));
2155 return apicMsrAccessError(pVCpu, MSR_IA32_APICBASE, APICMSRACCESS_WRITE_INVALID);
2156 }
2157
2158 if (enmOldMode != APICMODE_XAPIC)
2159 {
2160 LogRel(("APIC%u: Can only transition to x2APIC state from xAPIC state\n", pVCpu->idCpu));
2161 return apicMsrAccessError(pVCpu, MSR_IA32_APICBASE, APICMSRACCESS_WRITE_INVALID);
2162 }
2163
2164 uBaseMsr |= MSR_IA32_APICBASE_EN | MSR_IA32_APICBASE_EXTD;
2165
2166 /*
2167 * The APIC ID needs updating when entering x2APIC mode.
2168 * Software written APIC ID in xAPIC mode isn't preserved.
2169 * The APIC ID becomes read-only to software in x2APIC mode.
2170 *
2171 * See Intel spec. 10.12.5.1 "x2APIC States".
2172 */
2173 PX2APICPAGE pX2ApicPage = VMCPU_TO_X2APICPAGE(pVCpu);
2174 ASMMemZero32(&pX2ApicPage->id, sizeof(pX2ApicPage->id));
2175 pX2ApicPage->id.u32ApicId = pVCpu->idCpu;
2176
2177 /*
2178 * LDR initialization occurs when entering x2APIC mode.
2179 * See Intel spec. 10.12.10.2 "Deriving Logical x2APIC ID from the Local x2APIC ID".
2180 */
2181 pX2ApicPage->ldr.u32LogicalApicId = ((pX2ApicPage->id.u32ApicId & UINT32_C(0xffff0)) << 16)
2182 | (UINT32_C(1) << pX2ApicPage->id.u32ApicId & UINT32_C(0xf));
2183
2184 LogRel(("APIC%u: Switched mode to x2APIC\n", pVCpu->idCpu));
2185 break;
2186 }
2187
2188 case APICMODE_INVALID:
2189 default:
2190 {
2191 Log(("APIC%u: Invalid state transition attempted\n", pVCpu->idCpu));
2192 return apicMsrAccessError(pVCpu, MSR_IA32_APICBASE, APICMSRACCESS_WRITE_INVALID);
2193 }
2194 }
2195 }
2196
2197 ASMAtomicWriteU64(&pApicCpu->uApicBaseMsr, uBaseMsr);
2198 return VINF_SUCCESS;
2199
2200#else /* !IN_RING3 */
2201 RT_NOREF_PV(pDevIns);
2202 RT_NOREF_PV(pVCpu);
2203 RT_NOREF_PV(u64BaseMsr);
2204 return VINF_CPUM_R3_MSR_WRITE;
2205#endif /* IN_RING3 */
2206}
2207
2208
2209/**
2210 * @interface_method_impl{PDMAPICREG,pfnGetBaseMsrR3}
2211 */
2212APICBOTHCBDECL(uint64_t) apicGetBaseMsr(PPDMDEVINS pDevIns, PVMCPU pVCpu)
2213{
2214 RT_NOREF_PV(pDevIns);
2215 VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu);
2216
2217 PCAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
2218 return pApicCpu->uApicBaseMsr;
2219}
2220
2221
2222/**
2223 * @interface_method_impl{PDMAPICREG,pfnSetTprR3}
2224 */
2225APICBOTHCBDECL(void) apicSetTpr(PPDMDEVINS pDevIns, PVMCPU pVCpu, uint8_t u8Tpr)
2226{
2227 RT_NOREF_PV(pDevIns);
2228 apicSetTpr(pVCpu, u8Tpr);
2229}
2230
2231
2232/**
2233 * Gets the highest priority pending interrupt.
2234 *
2235 * @returns true if any interrupt is pending, false otherwise.
2236 * @param pVCpu The cross context virtual CPU structure.
2237 * @param pu8PendingIntr Where to store the interrupt vector if the
2238 * interrupt is pending (optional, can be NULL).
2239 */
2240static bool apicGetHighestPendingInterrupt(PVMCPU pVCpu, uint8_t *pu8PendingIntr)
2241{
2242 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu);
2243 int const irrv = apicGetHighestSetBitInReg(&pXApicPage->irr, -1);
2244 if (irrv >= 0)
2245 {
2246 Assert(irrv <= (int)UINT8_MAX);
2247 if (pu8PendingIntr)
2248 *pu8PendingIntr = (uint8_t)irrv;
2249 return true;
2250 }
2251 return false;
2252}
2253
2254
2255/**
2256 * @interface_method_impl{PDMAPICREG,pfnGetTprR3}
2257 */
2258APICBOTHCBDECL(uint8_t) apicGetTpr(PPDMDEVINS pDevIns, PVMCPU pVCpu, bool *pfPending, uint8_t *pu8PendingIntr)
2259{
2260 RT_NOREF_PV(pDevIns);
2261 VMCPU_ASSERT_EMT(pVCpu);
2262 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu);
2263
2264 if (pfPending)
2265 {
2266 /*
2267 * Just return whatever the highest pending interrupt is in the IRR.
2268 * The caller is responsible for figuring out if it's masked by the TPR etc.
2269 */
2270 *pfPending = apicGetHighestPendingInterrupt(pVCpu, pu8PendingIntr);
2271 }
2272
2273 return pXApicPage->tpr.u8Tpr;
2274}
2275
2276
2277/**
2278 * @interface_method_impl{PDMAPICREG,pfnGetTimerFreqR3}
2279 */
2280APICBOTHCBDECL(uint64_t) apicGetTimerFreq(PPDMDEVINS pDevIns)
2281{
2282 PVM pVM = PDMDevHlpGetVM(pDevIns);
2283 PVMCPU pVCpu = &pVM->aCpus[0];
2284 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
2285 uint64_t uTimer = TMTimerGetFreq(pApicCpu->CTX_SUFF(pTimer));
2286 return uTimer;
2287}
2288
2289
2290/**
2291 * @interface_method_impl{PDMAPICREG,pfnBusDeliverR3}
2292 * @remarks This is a private interface between the IOAPIC and the APIC.
2293 */
2294APICBOTHCBDECL(int) apicBusDeliver(PPDMDEVINS pDevIns, uint8_t uDest, uint8_t uDestMode, uint8_t uDeliveryMode, uint8_t uVector,
2295 uint8_t uPolarity, uint8_t uTriggerMode, uint32_t uTagSrc)
2296{
2297 NOREF(uPolarity);
2298 NOREF(uTagSrc);
2299 PVM pVM = PDMDevHlpGetVM(pDevIns);
2300
2301 /*
2302 * The destination field (mask) in the IO APIC redirectable table entry is 8-bits.
2303 * Hence, the broadcast mask is 0xff.
2304 * See IO APIC spec. 3.2.4. "IOREDTBL[23:0] - I/O Redirectable Table Registers".
2305 */
2306 XAPICTRIGGERMODE enmTriggerMode = (XAPICTRIGGERMODE)uTriggerMode;
2307 XAPICDELIVERYMODE enmDeliveryMode = (XAPICDELIVERYMODE)uDeliveryMode;
2308 XAPICDESTMODE enmDestMode = (XAPICDESTMODE)uDestMode;
2309 uint32_t fDestMask = uDest;
2310 uint32_t fBroadcastMask = UINT32_C(0xff);
2311
2312 Log2(("APIC: apicBusDeliver: fDestMask=%#x enmDestMode=%s enmTriggerMode=%s enmDeliveryMode=%s uVector=%#x\n", fDestMask,
2313 apicGetDestModeName(enmDestMode), apicGetTriggerModeName(enmTriggerMode), apicGetDeliveryModeName(enmDeliveryMode),
2314 uVector));
2315
2316 bool fIntrAccepted;
2317 VMCPUSET DestCpuSet;
2318 apicGetDestCpuSet(pVM, fDestMask, fBroadcastMask, enmDestMode, enmDeliveryMode, &DestCpuSet);
2319 VBOXSTRICTRC rcStrict = apicSendIntr(pVM, NULL /* pVCpu */, uVector, enmTriggerMode, enmDeliveryMode, &DestCpuSet,
2320 &fIntrAccepted, VINF_SUCCESS /* rcRZ */);
2321 if (fIntrAccepted)
2322 return VBOXSTRICTRC_VAL(rcStrict);
2323 return VERR_APIC_INTR_DISCARDED;
2324}
2325
2326
2327/**
2328 * @interface_method_impl{PDMAPICREG,pfnLocalInterruptR3}
2329 * @remarks This is a private interface between the PIC and the APIC.
2330 */
2331APICBOTHCBDECL(VBOXSTRICTRC) apicLocalInterrupt(PPDMDEVINS pDevIns, PVMCPU pVCpu, uint8_t u8Pin, uint8_t u8Level, int rcRZ)
2332{
2333 NOREF(pDevIns);
2334 AssertReturn(u8Pin <= 1, VERR_INVALID_PARAMETER);
2335 AssertReturn(u8Level <= 1, VERR_INVALID_PARAMETER);
2336
2337 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2338
2339 /* If the APIC is enabled, the interrupt is subject to LVT programming. */
2340 if (apicIsEnabled(pVCpu))
2341 {
2342 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu);
2343
2344 /* Pick the LVT entry corresponding to the interrupt pin. */
2345 static const uint16_t s_au16LvtOffsets[] =
2346 {
2347 XAPIC_OFF_LVT_LINT0,
2348 XAPIC_OFF_LVT_LINT1
2349 };
2350 Assert(u8Pin < RT_ELEMENTS(s_au16LvtOffsets));
2351 uint16_t const offLvt = s_au16LvtOffsets[u8Pin];
2352 uint32_t const uLvt = apicReadRaw32(pXApicPage, offLvt);
2353
2354 /* If software hasn't masked the interrupt in the LVT entry, proceed interrupt processing. */
2355 if (!XAPIC_LVT_IS_MASKED(uLvt))
2356 {
2357 XAPICDELIVERYMODE const enmDeliveryMode = XAPIC_LVT_GET_DELIVERY_MODE(uLvt);
2358 XAPICTRIGGERMODE enmTriggerMode = XAPIC_LVT_GET_TRIGGER_MODE(uLvt);
2359
2360 switch (enmDeliveryMode)
2361 {
2362 case XAPICDELIVERYMODE_INIT:
2363 {
2364 /** @todo won't work in R0/RC because callers don't care about rcRZ. */
2365 AssertMsgFailed(("INIT through LINT0/LINT1 is not yet supported\n"));
2366 /* fallthru */
2367 }
2368 case XAPICDELIVERYMODE_FIXED:
2369 {
2370 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
2371 uint8_t const uVector = XAPIC_LVT_GET_VECTOR(uLvt);
2372 bool fActive = RT_BOOL(u8Level & 1);
2373 bool volatile *pfActiveLine = u8Pin == 0 ? &pApicCpu->fActiveLint0 : &pApicCpu->fActiveLint1;
2374 /** @todo Polarity is busted elsewhere, we need to fix that
2375 * first. See @bugref{8386#c7}. */
2376#if 0
2377 uint8_t const u8Polarity = XAPIC_LVT_GET_POLARITY(uLvt);
2378 fActive ^= u8Polarity; */
2379#endif
2380 if (!fActive)
2381 {
2382 ASMAtomicCmpXchgBool(pfActiveLine, false, true);
2383 break;
2384 }
2385
2386 /* Level-sensitive interrupts are not supported for LINT1. See Intel spec. 10.5.1 "Local Vector Table". */
2387 if (offLvt == XAPIC_OFF_LVT_LINT1)
2388 enmTriggerMode = XAPICTRIGGERMODE_EDGE;
2389 /** @todo figure out what "If the local APIC is not used in conjunction with an I/O APIC and fixed
2390 delivery mode is selected; the Pentium 4, Intel Xeon, and P6 family processors will always
2391 use level-sensitive triggering, regardless if edge-sensitive triggering is selected."
2392 means. */
2393
2394 bool fSendIntr;
2395 if (enmTriggerMode == XAPICTRIGGERMODE_EDGE)
2396 {
2397 /* Recognize and send the interrupt only on an edge transition. */
2398 fSendIntr = ASMAtomicCmpXchgBool(pfActiveLine, true, false);
2399 }
2400 else
2401 {
2402 /* For level-triggered interrupts, redundant interrupts are not a problem. */
2403 Assert(enmTriggerMode == XAPICTRIGGERMODE_LEVEL);
2404 ASMAtomicCmpXchgBool(pfActiveLine, true, false);
2405
2406 /* Only when the remote IRR isn't set, set it and send the interrupt. */
2407 if (!(pXApicPage->lvt_lint0.all.u32LvtLint0 & XAPIC_LVT_REMOTE_IRR))
2408 {
2409 Assert(offLvt == XAPIC_OFF_LVT_LINT0);
2410 ASMAtomicOrU32((volatile uint32_t *)&pXApicPage->lvt_lint0.all.u32LvtLint0, XAPIC_LVT_REMOTE_IRR);
2411 fSendIntr = true;
2412 }
2413 else
2414 fSendIntr = false;
2415 }
2416
2417 if (fSendIntr)
2418 {
2419 VMCPUSET DestCpuSet;
2420 VMCPUSET_EMPTY(&DestCpuSet);
2421 VMCPUSET_ADD(&DestCpuSet, pVCpu->idCpu);
2422 rcStrict = apicSendIntr(pVCpu->CTX_SUFF(pVM), pVCpu, uVector, enmTriggerMode, enmDeliveryMode,
2423 &DestCpuSet, NULL /* pfIntrAccepted */, rcRZ);
2424 }
2425 break;
2426 }
2427
2428 case XAPICDELIVERYMODE_SMI:
2429 case XAPICDELIVERYMODE_NMI:
2430 {
2431 VMCPUSET DestCpuSet;
2432 VMCPUSET_EMPTY(&DestCpuSet);
2433 VMCPUSET_ADD(&DestCpuSet, pVCpu->idCpu);
2434 uint8_t const uVector = XAPIC_LVT_GET_VECTOR(uLvt);
2435 rcStrict = apicSendIntr(pVCpu->CTX_SUFF(pVM), pVCpu, uVector, enmTriggerMode, enmDeliveryMode, &DestCpuSet,
2436 NULL /* pfIntrAccepted */, rcRZ);
2437 break;
2438 }
2439
2440 case XAPICDELIVERYMODE_EXTINT:
2441 {
2442 Log2(("APIC%u: apicLocalInterrupt: %s ExtINT through LINT%u\n", pVCpu->idCpu,
2443 u8Level ? "Raising" : "Lowering", u8Pin));
2444 if (u8Level)
2445 apicSetInterruptFF(pVCpu, PDMAPICIRQ_EXTINT);
2446 else
2447 apicClearInterruptFF(pVCpu, PDMAPICIRQ_EXTINT);
2448 break;
2449 }
2450
2451 /* Reserved/unknown delivery modes: */
2452 case XAPICDELIVERYMODE_LOWEST_PRIO:
2453 case XAPICDELIVERYMODE_STARTUP:
2454 default:
2455 {
2456 rcStrict = VERR_INTERNAL_ERROR_3;
2457 AssertMsgFailed(("APIC%u: LocalInterrupt: Invalid delivery mode %#x (%s) on LINT%d\n", pVCpu->idCpu,
2458 enmDeliveryMode, apicGetDeliveryModeName(enmDeliveryMode), u8Pin));
2459 break;
2460 }
2461 }
2462 }
2463 }
2464 else
2465 {
2466 /* The APIC is hardware disabled. The CPU behaves as though there is no on-chip APIC. */
2467 if (u8Pin == 0)
2468 {
2469 /* LINT0 behaves as an external interrupt pin. */
2470 Log2(("APIC%u: apicLocalInterrupt: APIC hardware-disabled, %s INTR\n", pVCpu->idCpu,
2471 u8Level ? "raising" : "lowering"));
2472 if (u8Level)
2473 apicSetInterruptFF(pVCpu, PDMAPICIRQ_EXTINT);
2474 else
2475 apicClearInterruptFF(pVCpu, PDMAPICIRQ_EXTINT);
2476 }
2477 else
2478 {
2479 /* LINT1 behaves as NMI. */
2480 Log2(("APIC%u: apicLocalInterrupt: APIC hardware-disabled, raising NMI\n", pVCpu->idCpu));
2481 apicSetInterruptFF(pVCpu, PDMAPICIRQ_NMI);
2482 }
2483 }
2484
2485 return rcStrict;
2486}
2487
2488
2489/**
2490 * @interface_method_impl{PDMAPICREG,pfnGetInterruptR3}
2491 */
2492APICBOTHCBDECL(int) apicGetInterrupt(PPDMDEVINS pDevIns, PVMCPU pVCpu, uint8_t *pu8Vector, uint32_t *pu32TagSrc)
2493{
2494 RT_NOREF_PV(pDevIns);
2495 VMCPU_ASSERT_EMT(pVCpu);
2496 Assert(pu8Vector);
2497 NOREF(pu32TagSrc);
2498
2499 LogFlow(("APIC%u: apicGetInterrupt:\n", pVCpu->idCpu));
2500
2501 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
2502 bool const fApicHwEnabled = apicIsEnabled(pVCpu);
2503 if ( fApicHwEnabled
2504 && pXApicPage->svr.u.fApicSoftwareEnable)
2505 {
2506 int const irrv = apicGetHighestSetBitInReg(&pXApicPage->irr, -1);
2507 if (RT_LIKELY(irrv >= 0))
2508 {
2509 Assert(irrv <= (int)UINT8_MAX);
2510 uint8_t const uVector = irrv;
2511
2512 /*
2513 * This can happen if the APIC receives an interrupt when the CPU has interrupts
2514 * disabled but the TPR is raised by the guest before re-enabling interrupts.
2515 */
2516 uint8_t const uTpr = pXApicPage->tpr.u8Tpr;
2517 if ( uTpr > 0
2518 && XAPIC_TPR_GET_TP(uVector) <= XAPIC_TPR_GET_TP(uTpr))
2519 {
2520 Log2(("APIC%u: apicGetInterrupt: Interrupt masked. uVector=%#x uTpr=%#x SpuriousVector=%#x\n", pVCpu->idCpu,
2521 uVector, uTpr, pXApicPage->svr.u.u8SpuriousVector));
2522 *pu8Vector = uVector;
2523 STAM_COUNTER_INC(&pVCpu->apic.s.StatMaskedByTpr);
2524 return VERR_APIC_INTR_MASKED_BY_TPR;
2525 }
2526
2527 /*
2528 * The PPR should be up-to-date at this point through apicSetEoi().
2529 * We're on EMT so no parallel updates possible.
2530 * Subject the pending vector to PPR prioritization.
2531 */
2532 uint8_t const uPpr = pXApicPage->ppr.u8Ppr;
2533 if ( !uPpr
2534 || XAPIC_PPR_GET_PP(uVector) > XAPIC_PPR_GET_PP(uPpr))
2535 {
2536 apicClearVectorInReg(&pXApicPage->irr, uVector);
2537 apicSetVectorInReg(&pXApicPage->isr, uVector);
2538 apicUpdatePpr(pVCpu);
2539 apicSignalNextPendingIntr(pVCpu);
2540
2541 Log2(("APIC%u: apicGetInterrupt: Valid Interrupt. uVector=%#x\n", pVCpu->idCpu, uVector));
2542 *pu8Vector = uVector;
2543 return VINF_SUCCESS;
2544 }
2545 else
2546 {
2547 STAM_COUNTER_INC(&pVCpu->apic.s.StatMaskedByPpr);
2548 Log2(("APIC%u: apicGetInterrupt: Interrupt's priority is not higher than the PPR. uVector=%#x PPR=%#x\n",
2549 pVCpu->idCpu, uVector, uPpr));
2550 }
2551 }
2552 else
2553 Log2(("APIC%u: apicGetInterrupt: No pending bits in IRR\n", pVCpu->idCpu));
2554 }
2555 else
2556 Log2(("APIC%u: apicGetInterrupt: APIC %s disabled\n", pVCpu->idCpu, !fApicHwEnabled ? "hardware" : "software"));
2557
2558 return VERR_APIC_INTR_NOT_PENDING;
2559}
2560
2561
2562/**
2563 * @callback_method_impl{FNIOMMMIOREAD}
2564 */
2565APICBOTHCBDECL(int) apicReadMmio(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS GCPhysAddr, void *pv, unsigned cb)
2566{
2567 NOREF(pvUser);
2568 Assert(!(GCPhysAddr & 0xf));
2569 Assert(cb == 4);
2570
2571 PAPICDEV pApicDev = PDMINS_2_DATA(pDevIns, PAPICDEV);
2572 PVMCPU pVCpu = PDMDevHlpGetVMCPU(pDevIns);
2573 uint16_t offReg = GCPhysAddr & 0xff0;
2574 uint32_t uValue = 0;
2575
2576 STAM_COUNTER_INC(&pVCpu->apic.s.CTX_SUFF_Z(StatMmioRead));
2577
2578 int rc = VBOXSTRICTRC_VAL(apicReadRegister(pApicDev, pVCpu, offReg, &uValue));
2579 *(uint32_t *)pv = uValue;
2580
2581 Log2(("APIC%u: apicReadMmio: offReg=%#RX16 uValue=%#RX32\n", pVCpu->idCpu, offReg, uValue));
2582 return rc;
2583}
2584
2585
2586/**
2587 * @callback_method_impl{FNIOMMMIOWRITE}
2588 */
2589APICBOTHCBDECL(int) apicWriteMmio(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS GCPhysAddr, void const *pv, unsigned cb)
2590{
2591 NOREF(pvUser);
2592 Assert(!(GCPhysAddr & 0xf));
2593 Assert(cb == 4);
2594
2595 PAPICDEV pApicDev = PDMINS_2_DATA(pDevIns, PAPICDEV);
2596 PVMCPU pVCpu = PDMDevHlpGetVMCPU(pDevIns);
2597 uint16_t offReg = GCPhysAddr & 0xff0;
2598 uint32_t uValue = *(uint32_t *)pv;
2599
2600 STAM_COUNTER_INC(&pVCpu->apic.s.CTX_SUFF_Z(StatMmioWrite));
2601
2602 Log2(("APIC%u: apicWriteMmio: offReg=%#RX16 uValue=%#RX32\n", pVCpu->idCpu, offReg, uValue));
2603
2604 int rc = VBOXSTRICTRC_VAL(apicWriteRegister(pApicDev, pVCpu, offReg, uValue));
2605 return rc;
2606}
2607
2608
2609/**
2610 * Sets the interrupt pending force-flag and pokes the EMT if required.
2611 *
2612 * @param pVCpu The cross context virtual CPU structure.
2613 * @param enmType The IRQ type.
2614 */
2615VMM_INT_DECL(void) apicSetInterruptFF(PVMCPU pVCpu, PDMAPICIRQ enmType)
2616{
2617 PVM pVM = pVCpu->CTX_SUFF(pVM);
2618 PAPICDEV pApicDev = VM_TO_APICDEV(pVM);
2619 CTX_SUFF(pApicDev->pApicHlp)->pfnSetInterruptFF(pApicDev->CTX_SUFF(pDevIns), enmType, pVCpu->idCpu);
2620}
2621
2622
2623/**
2624 * Clears the interrupt pending force-flag.
2625 *
2626 * @param pVCpu The cross context virtual CPU structure.
2627 * @param enmType The IRQ type.
2628 */
2629VMM_INT_DECL(void) apicClearInterruptFF(PVMCPU pVCpu, PDMAPICIRQ enmType)
2630{
2631 PVM pVM = pVCpu->CTX_SUFF(pVM);
2632 PAPICDEV pApicDev = VM_TO_APICDEV(pVM);
2633 pApicDev->CTX_SUFF(pApicHlp)->pfnClearInterruptFF(pApicDev->CTX_SUFF(pDevIns), enmType, pVCpu->idCpu);
2634}
2635
2636
2637/**
2638 * Posts an interrupt to a target APIC.
2639 *
2640 * This function handles interrupts received from the system bus or
2641 * interrupts generated locally from the LVT or via a self IPI.
2642 *
2643 * Don't use this function to try and deliver ExtINT style interrupts.
2644 *
2645 * @returns true if the interrupt was accepted, false otherwise.
2646 * @param pVCpu The cross context virtual CPU structure.
2647 * @param uVector The vector of the interrupt to be posted.
2648 * @param enmTriggerMode The trigger mode of the interrupt.
2649 *
2650 * @thread Any.
2651 */
2652VMM_INT_DECL(bool) apicPostInterrupt(PVMCPU pVCpu, uint8_t uVector, XAPICTRIGGERMODE enmTriggerMode)
2653{
2654 Assert(pVCpu);
2655 Assert(uVector > XAPIC_ILLEGAL_VECTOR_END);
2656
2657 PVM pVM = pVCpu->CTX_SUFF(pVM);
2658 PCAPIC pApic = VM_TO_APIC(pVM);
2659 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
2660 bool fAccepted = true;
2661
2662 STAM_PROFILE_START(&pApicCpu->StatPostIntr, a);
2663
2664 /*
2665 * Only post valid interrupt vectors.
2666 * See Intel spec. 10.5.2 "Valid Interrupt Vectors".
2667 */
2668 if (RT_LIKELY(uVector > XAPIC_ILLEGAL_VECTOR_END))
2669 {
2670 /*
2671 * If the interrupt is already pending in the IRR we can skip the
2672 * potential expensive operation of poking the guest EMT out of execution.
2673 */
2674 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu);
2675 if (!apicTestVectorInReg(&pXApicPage->irr, uVector)) /* PAV */
2676 {
2677 Log2(("APIC: apicPostInterrupt: SrcCpu=%u TargetCpu=%u uVector=%#x\n", VMMGetCpuId(pVM), pVCpu->idCpu, uVector));
2678 if (enmTriggerMode == XAPICTRIGGERMODE_EDGE)
2679 {
2680 if (pApic->fPostedIntrsEnabled)
2681 { /** @todo posted-interrupt call to hardware */ }
2682 else
2683 {
2684 apicSetVectorInPib(pApicCpu->CTX_SUFF(pvApicPib), uVector);
2685 uint32_t const fAlreadySet = apicSetNotificationBitInPib((PAPICPIB)pApicCpu->CTX_SUFF(pvApicPib));
2686 if (!fAlreadySet)
2687 {
2688 Log2(("APIC: apicPostInterrupt: Setting UPDATE_APIC FF for edge-triggered intr. uVector=%#x\n", uVector));
2689 apicSetInterruptFF(pVCpu, PDMAPICIRQ_UPDATE_PENDING);
2690 }
2691 }
2692 }
2693 else
2694 {
2695 /*
2696 * Level-triggered interrupts requires updating of the TMR and thus cannot be
2697 * delivered asynchronously.
2698 */
2699 apicSetVectorInPib(&pApicCpu->ApicPibLevel, uVector);
2700 uint32_t const fAlreadySet = apicSetNotificationBitInPib(&pApicCpu->ApicPibLevel);
2701 if (!fAlreadySet)
2702 {
2703 Log2(("APIC: apicPostInterrupt: Setting UPDATE_APIC FF for level-triggered intr. uVector=%#x\n", uVector));
2704 apicSetInterruptFF(pVCpu, PDMAPICIRQ_UPDATE_PENDING);
2705 }
2706 }
2707 }
2708 else
2709 {
2710 Log2(("APIC: apicPostInterrupt: SrcCpu=%u TargetCpu=%u. Vector %#x Already in IRR, skipping\n", VMMGetCpuId(pVM),
2711 pVCpu->idCpu, uVector));
2712 STAM_COUNTER_INC(&pApicCpu->StatPostIntrAlreadyPending);
2713 }
2714 }
2715 else
2716 {
2717 fAccepted = false;
2718 apicSetError(pVCpu, XAPIC_ESR_RECV_ILLEGAL_VECTOR);
2719 }
2720
2721 STAM_PROFILE_STOP(&pApicCpu->StatPostIntr, a);
2722 return fAccepted;
2723}
2724
2725
2726/**
2727 * Starts the APIC timer.
2728 *
2729 * @param pVCpu The cross context virtual CPU structure.
2730 * @param uInitialCount The timer's Initial-Count Register (ICR), must be >
2731 * 0.
2732 * @thread Any.
2733 */
2734VMM_INT_DECL(void) apicStartTimer(PVMCPU pVCpu, uint32_t uInitialCount)
2735{
2736 Assert(pVCpu);
2737 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
2738 Assert(TMTimerIsLockOwner(pApicCpu->CTX_SUFF(pTimer)));
2739 Assert(uInitialCount > 0);
2740
2741 PCXAPICPAGE pXApicPage = APICCPU_TO_CXAPICPAGE(pApicCpu);
2742 uint8_t const uTimerShift = apicGetTimerShift(pXApicPage);
2743 uint64_t const cTicksToNext = (uint64_t)uInitialCount << uTimerShift;
2744
2745 Log2(("APIC%u: apicStartTimer: uInitialCount=%#RX32 uTimerShift=%u cTicksToNext=%RU64\n", pVCpu->idCpu, uInitialCount,
2746 uTimerShift, cTicksToNext));
2747
2748 /*
2749 * The assumption here is that the timer doesn't tick during this call
2750 * and thus setting a relative time to fire next is accurate. The advantage
2751 * however is updating u64TimerInitial 'atomically' while setting the next
2752 * tick.
2753 */
2754 PTMTIMER pTimer = pApicCpu->CTX_SUFF(pTimer);
2755 TMTimerSetRelative(pTimer, cTicksToNext, &pApicCpu->u64TimerInitial);
2756 apicHintTimerFreq(pApicCpu, uInitialCount, uTimerShift);
2757}
2758
2759
2760/**
2761 * Stops the APIC timer.
2762 *
2763 * @param pVCpu The cross context virtual CPU structure.
2764 * @thread Any.
2765 */
2766VMM_INT_DECL(void) apicStopTimer(PVMCPU pVCpu)
2767{
2768 Assert(pVCpu);
2769 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
2770 Assert(TMTimerIsLockOwner(pApicCpu->CTX_SUFF(pTimer)));
2771
2772 Log2(("APIC%u: apicStopTimer\n", pVCpu->idCpu));
2773
2774 PTMTIMER pTimer = pApicCpu->CTX_SUFF(pTimer);
2775 TMTimerStop(pTimer); /* This will reset the hint, no need to explicitly call TMTimerSetFrequencyHint(). */
2776 pApicCpu->uHintedTimerInitialCount = 0;
2777 pApicCpu->uHintedTimerShift = 0;
2778}
2779
2780
2781/**
2782 * Queues a pending interrupt as in-service.
2783 *
2784 * This function should only be needed without virtualized APIC
2785 * registers. With virtualized APIC registers, it's sufficient to keep
2786 * the interrupts pending in the IRR as the hardware takes care of
2787 * virtual interrupt delivery.
2788 *
2789 * @returns true if the interrupt was queued to in-service interrupts,
2790 * false otherwise.
2791 * @param pVCpu The cross context virtual CPU structure.
2792 * @param u8PendingIntr The pending interrupt to queue as
2793 * in-service.
2794 *
2795 * @remarks This assumes the caller has done the necessary checks and
2796 * is ready to take actually service the interrupt (TPR,
2797 * interrupt shadow etc.)
2798 */
2799VMMDECL(bool) APICQueueInterruptToService(PVMCPU pVCpu, uint8_t u8PendingIntr)
2800{
2801 VMCPU_ASSERT_EMT(pVCpu);
2802
2803 PVM pVM = pVCpu->CTX_SUFF(pVM);
2804 PAPIC pApic = VM_TO_APIC(pVM);
2805 Assert(!pApic->fVirtApicRegsEnabled);
2806 NOREF(pApic);
2807
2808 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
2809 bool const fIsPending = apicTestVectorInReg(&pXApicPage->irr, u8PendingIntr);
2810 if (fIsPending)
2811 {
2812 apicClearVectorInReg(&pXApicPage->irr, u8PendingIntr);
2813 apicSetVectorInReg(&pXApicPage->isr, u8PendingIntr);
2814 apicUpdatePpr(pVCpu);
2815 return true;
2816 }
2817 return false;
2818}
2819
2820
2821/**
2822 * De-queues a pending interrupt from in-service.
2823 *
2824 * This undoes APICQueueInterruptToService() for premature VM-exits before event
2825 * injection.
2826 *
2827 * @param pVCpu The cross context virtual CPU structure.
2828 * @param u8PendingIntr The pending interrupt to de-queue from
2829 * in-service.
2830 */
2831VMMDECL(void) APICDequeueInterruptFromService(PVMCPU pVCpu, uint8_t u8PendingIntr)
2832{
2833 VMCPU_ASSERT_EMT(pVCpu);
2834
2835 PVM pVM = pVCpu->CTX_SUFF(pVM);
2836 PAPIC pApic = VM_TO_APIC(pVM);
2837 Assert(!pApic->fVirtApicRegsEnabled);
2838 NOREF(pApic);
2839
2840 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
2841 bool const fInService = apicTestVectorInReg(&pXApicPage->isr, u8PendingIntr);
2842 if (fInService)
2843 {
2844 apicClearVectorInReg(&pXApicPage->isr, u8PendingIntr);
2845 apicSetVectorInReg(&pXApicPage->irr, u8PendingIntr);
2846 apicUpdatePpr(pVCpu);
2847 }
2848}
2849
2850
2851/**
2852 * Updates pending interrupts from the pending-interrupt bitmaps to the IRR.
2853 *
2854 * @param pVCpu The cross context virtual CPU structure.
2855 */
2856VMMDECL(void) APICUpdatePendingInterrupts(PVMCPU pVCpu)
2857{
2858 VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu);
2859
2860 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
2861 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
2862 bool fHasPendingIntrs = false;
2863
2864 Log3(("APIC%u: APICUpdatePendingInterrupts:\n", pVCpu->idCpu));
2865 STAM_PROFILE_START(&pApicCpu->StatUpdatePendingIntrs, a);
2866
2867 /* Update edge-triggered pending interrupts. */
2868 PAPICPIB pPib = (PAPICPIB)pApicCpu->CTX_SUFF(pvApicPib);
2869 for (;;)
2870 {
2871 uint32_t const fAlreadySet = apicClearNotificationBitInPib((PAPICPIB)pApicCpu->CTX_SUFF(pvApicPib));
2872 if (!fAlreadySet)
2873 break;
2874
2875 AssertCompile(RT_ELEMENTS(pXApicPage->irr.u) == 2 * RT_ELEMENTS(pPib->aVectorBitmap));
2876 for (size_t idxPib = 0, idxReg = 0; idxPib < RT_ELEMENTS(pPib->aVectorBitmap); idxPib++, idxReg += 2)
2877 {
2878 uint64_t const u64Fragment = ASMAtomicXchgU64(&pPib->aVectorBitmap[idxPib], 0);
2879 if (u64Fragment)
2880 {
2881 uint32_t const u32FragmentLo = RT_LO_U32(u64Fragment);
2882 uint32_t const u32FragmentHi = RT_HI_U32(u64Fragment);
2883
2884 pXApicPage->irr.u[idxReg].u32Reg |= u32FragmentLo;
2885 pXApicPage->irr.u[idxReg + 1].u32Reg |= u32FragmentHi;
2886
2887 pXApicPage->tmr.u[idxReg].u32Reg &= ~u32FragmentLo;
2888 pXApicPage->tmr.u[idxReg + 1].u32Reg &= ~u32FragmentHi;
2889 fHasPendingIntrs = true;
2890 }
2891 }
2892 }
2893
2894 /* Update level-triggered pending interrupts. */
2895 pPib = (PAPICPIB)&pApicCpu->ApicPibLevel;
2896 for (;;)
2897 {
2898 uint32_t const fAlreadySet = apicClearNotificationBitInPib((PAPICPIB)&pApicCpu->ApicPibLevel);
2899 if (!fAlreadySet)
2900 break;
2901
2902 AssertCompile(RT_ELEMENTS(pXApicPage->irr.u) == 2 * RT_ELEMENTS(pPib->aVectorBitmap));
2903 for (size_t idxPib = 0, idxReg = 0; idxPib < RT_ELEMENTS(pPib->aVectorBitmap); idxPib++, idxReg += 2)
2904 {
2905 uint64_t const u64Fragment = ASMAtomicXchgU64(&pPib->aVectorBitmap[idxPib], 0);
2906 if (u64Fragment)
2907 {
2908 uint32_t const u32FragmentLo = RT_LO_U32(u64Fragment);
2909 uint32_t const u32FragmentHi = RT_HI_U32(u64Fragment);
2910
2911 pXApicPage->irr.u[idxReg].u32Reg |= u32FragmentLo;
2912 pXApicPage->irr.u[idxReg + 1].u32Reg |= u32FragmentHi;
2913
2914 pXApicPage->tmr.u[idxReg].u32Reg |= u32FragmentLo;
2915 pXApicPage->tmr.u[idxReg + 1].u32Reg |= u32FragmentHi;
2916 fHasPendingIntrs = true;
2917 }
2918 }
2919 }
2920
2921 STAM_PROFILE_STOP(&pApicCpu->StatUpdatePendingIntrs, a);
2922 Log3(("APIC%u: APICUpdatePendingInterrupts: fHasPendingIntrs=%RTbool\n", pVCpu->idCpu, fHasPendingIntrs));
2923
2924 if ( fHasPendingIntrs
2925 && !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC))
2926 apicSignalNextPendingIntr(pVCpu);
2927}
2928
2929
2930/**
2931 * Gets the highest priority pending interrupt.
2932 *
2933 * @returns true if any interrupt is pending, false otherwise.
2934 * @param pVCpu The cross context virtual CPU structure.
2935 * @param pu8PendingIntr Where to store the interrupt vector if the
2936 * interrupt is pending.
2937 */
2938VMMDECL(bool) APICGetHighestPendingInterrupt(PVMCPU pVCpu, uint8_t *pu8PendingIntr)
2939{
2940 VMCPU_ASSERT_EMT(pVCpu);
2941 return apicGetHighestPendingInterrupt(pVCpu, pu8PendingIntr);
2942}
2943
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette