VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/APICAll.cpp@ 63525

Last change on this file since 63525 was 63525, checked in by vboxsync, 8 years ago

nit

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 103.5 KB
Line 
1/* $Id: APICAll.cpp 63525 2016-08-16 08:41:33Z vboxsync $ */
2/** @file
3 * APIC - Advanced Programmable Interrupt Controller - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2016 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_DEV_APIC
23#include "APICInternal.h"
24#include <VBox/vmm/pdmdev.h>
25#include <VBox/vmm/vm.h>
26#include <VBox/vmm/vmcpuset.h>
27
28
29/*********************************************************************************************************************************
30* Global Variables *
31*********************************************************************************************************************************/
32#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
33/** An ordered array of valid LVT masks. */
34static const uint32_t g_au32LvtValidMasks[] =
35{
36 XAPIC_LVT_TIMER_VALID,
37 XAPIC_LVT_THERMAL_VALID,
38 XAPIC_LVT_PERF_VALID,
39 XAPIC_LVT_LINT_VALID, /* LINT0 */
40 XAPIC_LVT_LINT_VALID, /* LINT1 */
41 XAPIC_LVT_ERROR_VALID
42};
43#endif
44
45#if 0
46/** @todo CMCI */
47static const uint32_t g_au32LvtExtValidMask[] =
48{
49 XAPIC_LVT_CMCI_VALID
50};
51#endif
52
53
54/**
55 * Checks if a vector is set in an APIC 256-bit sparse register.
56 *
57 * @returns true if the specified vector is set, false otherwise.
58 * @param pApicReg The APIC 256-bit spare register.
59 * @param uVector The vector to check if set.
60 */
61DECLINLINE(bool) apicTestVectorInReg(const volatile XAPIC256BITREG *pApicReg, uint8_t uVector)
62{
63 const volatile uint8_t *pbBitmap = (const volatile uint8_t *)&pApicReg->u[0];
64 return ASMBitTest(pbBitmap + XAPIC_REG256_VECTOR_OFF(uVector), XAPIC_REG256_VECTOR_BIT(uVector));
65}
66
67
68/**
69 * Sets the vector in an APIC 256-bit sparse register.
70 *
71 * @param pApicReg The APIC 256-bit spare register.
72 * @param uVector The vector to set.
73 */
74DECLINLINE(void) apicSetVectorInReg(volatile XAPIC256BITREG *pApicReg, uint8_t uVector)
75{
76 volatile uint8_t *pbBitmap = (volatile uint8_t *)&pApicReg->u[0];
77 ASMAtomicBitSet(pbBitmap + XAPIC_REG256_VECTOR_OFF(uVector), XAPIC_REG256_VECTOR_BIT(uVector));
78}
79
80
81/**
82 * Clears the vector in an APIC 256-bit sparse register.
83 *
84 * @param pApicReg The APIC 256-bit spare register.
85 * @param uVector The vector to clear.
86 */
87DECLINLINE(void) apicClearVectorInReg(volatile XAPIC256BITREG *pApicReg, uint8_t uVector)
88{
89 volatile uint8_t *pbBitmap = (volatile uint8_t *)&pApicReg->u[0];
90 ASMAtomicBitClear(pbBitmap + XAPIC_REG256_VECTOR_OFF(uVector), XAPIC_REG256_VECTOR_BIT(uVector));
91}
92
93
94#if 0 /* unused */
95/**
96 * Checks if a vector is set in an APIC Pending-Interrupt Bitmap (PIB).
97 *
98 * @returns true if the specified vector is set, false otherwise.
99 * @param pvPib Opaque pointer to the PIB.
100 * @param uVector The vector to check if set.
101 */
102DECLINLINE(bool) apicTestVectorInPib(volatile void *pvPib, uint8_t uVector)
103{
104 return ASMBitTest(pvPib, uVector);
105}
106#endif /* unused */
107
108
109/**
110 * Atomically sets the PIB notification bit.
111 *
112 * @returns non-zero if the bit was already set, 0 otherwise.
113 * @param pApicPib Pointer to the PIB.
114 */
115DECLINLINE(uint32_t) apicSetNotificationBitInPib(PAPICPIB pApicPib)
116{
117 return ASMAtomicXchgU32(&pApicPib->fOutstandingNotification, RT_BIT_32(31));
118}
119
120
121/**
122 * Atomically tests and clears the PIB notification bit.
123 *
124 * @returns non-zero if the bit was already set, 0 otherwise.
125 * @param pApicPib Pointer to the PIB.
126 */
127DECLINLINE(uint32_t) apicClearNotificationBitInPib(PAPICPIB pApicPib)
128{
129 return ASMAtomicXchgU32(&pApicPib->fOutstandingNotification, UINT32_C(0));
130}
131
132
133/**
134 * Sets the vector in an APIC Pending-Interrupt Bitmap (PIB).
135 *
136 * @param pvPib Opaque pointer to the PIB.
137 * @param uVector The vector to set.
138 */
139DECLINLINE(void) apicSetVectorInPib(volatile void *pvPib, uint8_t uVector)
140{
141 ASMAtomicBitSet(pvPib, uVector);
142}
143
144#if 0 /* unused */
145/**
146 * Clears the vector in an APIC Pending-Interrupt Bitmap (PIB).
147 *
148 * @param pvPib Opaque pointer to the PIB.
149 * @param uVector The vector to clear.
150 */
151DECLINLINE(void) apicClearVectorInPib(volatile void *pvPib, uint8_t uVector)
152{
153 ASMAtomicBitClear(pvPib, uVector);
154}
155#endif /* unused */
156
157#if 0 /* unused */
158/**
159 * Atomically OR's a fragment (32 vectors) into an APIC 256-bit sparse
160 * register.
161 *
162 * @param pApicReg The APIC 256-bit spare register.
163 * @param idxFragment The index of the 32-bit fragment in @a
164 * pApicReg.
165 * @param u32Fragment The 32-bit vector fragment to OR.
166 */
167DECLINLINE(void) apicOrVectorsToReg(volatile XAPIC256BITREG *pApicReg, size_t idxFragment, uint32_t u32Fragment)
168{
169 Assert(idxFragment < RT_ELEMENTS(pApicReg->u));
170 ASMAtomicOrU32(&pApicReg->u[idxFragment].u32Reg, u32Fragment);
171}
172#endif /* unused */
173
174
175#if 0 /* unused */
176/**
177 * Atomically AND's a fragment (32 vectors) into an APIC
178 * 256-bit sparse register.
179 *
180 * @param pApicReg The APIC 256-bit spare register.
181 * @param idxFragment The index of the 32-bit fragment in @a
182 * pApicReg.
183 * @param u32Fragment The 32-bit vector fragment to AND.
184 */
185DECLINLINE(void) apicAndVectorsToReg(volatile XAPIC256BITREG *pApicReg, size_t idxFragment, uint32_t u32Fragment)
186{
187 Assert(idxFragment < RT_ELEMENTS(pApicReg->u));
188 ASMAtomicAndU32(&pApicReg->u[idxFragment].u32Reg, u32Fragment);
189}
190#endif /* unused */
191
192
193/**
194 * Reports and returns appropriate error code for invalid MSR accesses.
195 *
196 * @returns Strict VBox status code.
197 * @retval VINF_CPUM_R3_MSR_WRITE if the MSR write could not be serviced in the
198 * current context (raw-mode or ring-0).
199 * @retval VINF_CPUM_R3_MSR_READ if the MSR read could not be serviced in the
200 * current context (raw-mode or ring-0).
201 * @retval VERR_CPUM_RAISE_GP_0 on failure, the caller is expected to take the
202 * appropriate actions.
203 *
204 * @param pVCpu The cross context virtual CPU structure.
205 * @param u32Reg The MSR being accessed.
206 * @param enmAccess The invalid-access type.
207 */
208static VBOXSTRICTRC apicMsrAccessError(PVMCPU pVCpu, uint32_t u32Reg, APICMSRACCESS enmAccess)
209{
210 static struct
211 {
212 const char *pszBefore; /* The error message before printing the MSR index */
213 const char *pszAfter; /* The error message after printing the MSR index */
214 int rcRZ; /* The RZ error code */
215 } const s_aAccess[] =
216 {
217 { "read MSR", " while not in x2APIC mode", VINF_CPUM_R3_MSR_READ },
218 { "write MSR", " while not in x2APIC mode", VINF_CPUM_R3_MSR_WRITE },
219 { "read reserved/unknown MSR", "", VINF_CPUM_R3_MSR_READ },
220 { "write reserved/unknown MSR", "", VINF_CPUM_R3_MSR_WRITE },
221 { "read write-only MSR", "", VINF_CPUM_R3_MSR_READ },
222 { "write read-only MSR", "", VINF_CPUM_R3_MSR_WRITE },
223 { "read reserved bits of MSR", "", VINF_CPUM_R3_MSR_READ },
224 { "write reserved bits of MSR", "", VINF_CPUM_R3_MSR_WRITE },
225 { "write an invalid value to MSR", "", VINF_CPUM_R3_MSR_WRITE },
226 { "write MSR", "disallowed by configuration", VINF_CPUM_R3_MSR_WRITE }
227 };
228 AssertCompile(RT_ELEMENTS(s_aAccess) == APICMSRACCESS_COUNT);
229
230 size_t const i = enmAccess;
231 Assert(i < RT_ELEMENTS(s_aAccess));
232#ifdef IN_RING3
233 LogRelMax(5, ("APIC%u: Attempt to %s (%#x)%s -> #GP(0)\n", pVCpu->idCpu, s_aAccess[i].pszBefore, u32Reg,
234 s_aAccess[i].pszAfter));
235 return VERR_CPUM_RAISE_GP_0;
236#else
237 RT_NOREF_PV(u32Reg); RT_NOREF_PV(pVCpu);
238 return s_aAccess[i].rcRZ;
239#endif
240}
241
242
243/**
244 * Gets the descriptive APIC mode.
245 *
246 * @returns The name.
247 * @param enmMode The xAPIC mode.
248 */
249const char *apicGetModeName(APICMODE enmMode)
250{
251 switch (enmMode)
252 {
253 case APICMODE_DISABLED: return "Disabled";
254 case APICMODE_XAPIC: return "xAPIC";
255 case APICMODE_X2APIC: return "x2APIC";
256 default: break;
257 }
258 return "Invalid";
259}
260
261
262/**
263 * Gets the descriptive destination format name.
264 *
265 * @returns The destination format name.
266 * @param enmDestFormat The destination format.
267 */
268const char *apicGetDestFormatName(XAPICDESTFORMAT enmDestFormat)
269{
270 switch (enmDestFormat)
271 {
272 case XAPICDESTFORMAT_FLAT: return "Flat";
273 case XAPICDESTFORMAT_CLUSTER: return "Cluster";
274 default: break;
275 }
276 return "Invalid";
277}
278
279
280/**
281 * Gets the descriptive delivery mode name.
282 *
283 * @returns The delivery mode name.
284 * @param enmDeliveryMode The delivery mode.
285 */
286const char *apicGetDeliveryModeName(XAPICDELIVERYMODE enmDeliveryMode)
287{
288 switch (enmDeliveryMode)
289 {
290 case XAPICDELIVERYMODE_FIXED: return "Fixed";
291 case XAPICDELIVERYMODE_LOWEST_PRIO: return "Lowest-priority";
292 case XAPICDELIVERYMODE_SMI: return "SMI";
293 case XAPICDELIVERYMODE_NMI: return "NMI";
294 case XAPICDELIVERYMODE_INIT: return "INIT";
295 case XAPICDELIVERYMODE_STARTUP: return "SIPI";
296 case XAPICDELIVERYMODE_EXTINT: return "ExtINT";
297 default: break;
298 }
299 return "Invalid";
300}
301
302
303/**
304 * Gets the descriptive destination mode name.
305 *
306 * @returns The destination mode name.
307 * @param enmDestMode The destination mode.
308 */
309const char *apicGetDestModeName(XAPICDESTMODE enmDestMode)
310{
311 switch (enmDestMode)
312 {
313 case XAPICDESTMODE_PHYSICAL: return "Physical";
314 case XAPICDESTMODE_LOGICAL: return "Logical";
315 default: break;
316 }
317 return "Invalid";
318}
319
320
321/**
322 * Gets the descriptive trigger mode name.
323 *
324 * @returns The trigger mode name.
325 * @param enmTriggerMode The trigger mode.
326 */
327const char *apicGetTriggerModeName(XAPICTRIGGERMODE enmTriggerMode)
328{
329 switch (enmTriggerMode)
330 {
331 case XAPICTRIGGERMODE_EDGE: return "Edge";
332 case XAPICTRIGGERMODE_LEVEL: return "Level";
333 default: break;
334 }
335 return "Invalid";
336}
337
338
339/**
340 * Gets the destination shorthand name.
341 *
342 * @returns The destination shorthand name.
343 * @param enmDestShorthand The destination shorthand.
344 */
345const char *apicGetDestShorthandName(XAPICDESTSHORTHAND enmDestShorthand)
346{
347 switch (enmDestShorthand)
348 {
349 case XAPICDESTSHORTHAND_NONE: return "None";
350 case XAPICDESTSHORTHAND_SELF: return "Self";
351 case XAPIDDESTSHORTHAND_ALL_INCL_SELF: return "All including self";
352 case XAPICDESTSHORTHAND_ALL_EXCL_SELF: return "All excluding self";
353 default: break;
354 }
355 return "Invalid";
356}
357
358
359/**
360 * Gets the timer mode name.
361 *
362 * @returns The timer mode name.
363 * @param enmTimerMode The timer mode.
364 */
365const char *apicGetTimerModeName(XAPICTIMERMODE enmTimerMode)
366{
367 switch (enmTimerMode)
368 {
369 case XAPICTIMERMODE_ONESHOT: return "One-shot";
370 case XAPICTIMERMODE_PERIODIC: return "Periodic";
371 case XAPICTIMERMODE_TSC_DEADLINE: return "TSC deadline";
372 default: break;
373 }
374 return "Invalid";
375}
376
377
378/**
379 * Gets the APIC mode given the base MSR value.
380 *
381 * @returns The APIC mode.
382 * @param uApicBaseMsr The APIC Base MSR value.
383 */
384APICMODE apicGetMode(uint64_t uApicBaseMsr)
385{
386 uint32_t const uMode = (uApicBaseMsr >> 10) & UINT64_C(3);
387 APICMODE const enmMode = (APICMODE)uMode;
388#ifdef VBOX_STRICT
389 /* Paranoia. */
390 switch (uMode)
391 {
392 case APICMODE_DISABLED:
393 case APICMODE_INVALID:
394 case APICMODE_XAPIC:
395 case APICMODE_X2APIC:
396 break;
397 default:
398 AssertMsgFailed(("Invalid mode"));
399 }
400#endif
401 return enmMode;
402}
403
404
405/**
406 * Returns whether the APIC is hardware enabled or not.
407 *
408 * @returns true if enabled, false otherwise.
409 */
410DECLINLINE(bool) apicIsEnabled(PVMCPU pVCpu)
411{
412 PCAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
413 return RT_BOOL(pApicCpu->uApicBaseMsr & MSR_IA32_APICBASE_EN);
414}
415
416
417/**
418 * Finds the most significant set bit in an APIC 256-bit sparse register.
419 *
420 * @returns @a rcNotFound if no bit was set, 0-255 otherwise.
421 * @param pReg The APIC 256-bit sparse register.
422 * @param rcNotFound What to return when no bit is set.
423 */
424static int apicGetHighestSetBitInReg(volatile const XAPIC256BITREG *pReg, int rcNotFound)
425{
426 ssize_t const cFragments = RT_ELEMENTS(pReg->u);
427 unsigned const uFragmentShift = 5;
428 AssertCompile(1 << uFragmentShift == sizeof(pReg->u[0].u32Reg) * 8);
429 for (ssize_t i = cFragments - 1; i >= 0; i--)
430 {
431 uint32_t const uFragment = pReg->u[i].u32Reg;
432 if (uFragment)
433 {
434 unsigned idxSetBit = ASMBitLastSetU32(uFragment);
435 --idxSetBit;
436 idxSetBit |= i << uFragmentShift;
437 return idxSetBit;
438 }
439 }
440 return rcNotFound;
441}
442
443
444/**
445 * Reads a 32-bit register at a specified offset.
446 *
447 * @returns The value at the specified offset.
448 * @param pXApicPage The xAPIC page.
449 * @param offReg The offset of the register being read.
450 */
451DECLINLINE(uint32_t) apicReadRaw32(PCXAPICPAGE pXApicPage, uint16_t offReg)
452{
453 Assert(offReg < sizeof(*pXApicPage) - sizeof(uint32_t));
454 uint8_t const *pbXApic = (const uint8_t *)pXApicPage;
455 uint32_t const uValue = *(const uint32_t *)(pbXApic + offReg);
456 return uValue;
457}
458
459
460/**
461 * Writes a 32-bit register at a specified offset.
462 *
463 * @param pXApicPage The xAPIC page.
464 * @param offReg The offset of the register being written.
465 * @param uReg The value of the register.
466 */
467DECLINLINE(void) apicWriteRaw32(PXAPICPAGE pXApicPage, uint16_t offReg, uint32_t uReg)
468{
469 Assert(offReg < sizeof(*pXApicPage) - sizeof(uint32_t));
470 uint8_t *pbXApic = (uint8_t *)pXApicPage;
471 *(uint32_t *)(pbXApic + offReg) = uReg;
472}
473
474
475/**
476 * Broadcasts the EOI to the I/O APICs.
477 *
478 * @param pVCpu The cross context virtual CPU structure.
479 * @param uVector The interrupt vector corresponding to the EOI.
480 */
481DECLINLINE(int) apicBusBroadcastEoi(PVMCPU pVCpu, uint8_t uVector)
482{
483 PVM pVM = pVCpu->CTX_SUFF(pVM);
484 PAPICDEV pApicDev = VM_TO_APICDEV(pVM);
485 return pApicDev->CTX_SUFF(pApicHlp)->pfnBusBroadcastEoi(pApicDev->CTX_SUFF(pDevIns), uVector);
486}
487
488
489/**
490 * Sets an error in the internal ESR of the specified APIC.
491 *
492 * @param pVCpu The cross context virtual CPU structure.
493 * @param uError The error.
494 * @thread Any.
495 */
496DECLINLINE(void) apicSetError(PVMCPU pVCpu, uint32_t uError)
497{
498 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
499 ASMAtomicOrU32(&pApicCpu->uEsrInternal, uError);
500}
501
502
503/**
504 * Clears all errors in the internal ESR.
505 *
506 * @returns The value of the internal ESR before clearing.
507 * @param pVCpu The cross context virtual CPU structure.
508 */
509DECLINLINE(uint32_t) apicClearAllErrors(PVMCPU pVCpu)
510{
511 VMCPU_ASSERT_EMT(pVCpu);
512 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
513 return ASMAtomicXchgU32(&pApicCpu->uEsrInternal, 0);
514}
515
516
517/**
518 * Signals the guest if a pending interrupt is ready to be serviced.
519 *
520 * @param pVCpu The cross context virtual CPU structure.
521 */
522static void apicSignalNextPendingIntr(PVMCPU pVCpu)
523{
524 VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu);
525
526 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu);
527 if (pXApicPage->svr.u.fApicSoftwareEnable)
528 {
529 int const irrv = apicGetHighestSetBitInReg(&pXApicPage->irr, -1 /* rcNotFound */);
530 if (irrv >= 0)
531 {
532 Assert(irrv <= (int)UINT8_MAX);
533 uint8_t const uVector = irrv;
534 uint8_t const uPpr = pXApicPage->ppr.u8Ppr;
535 if ( !uPpr
536 || XAPIC_PPR_GET_PP(uVector) > XAPIC_PPR_GET_PP(uPpr))
537 {
538 Log2(("APIC%u: apicSignalNextPendingIntr: Signaling pending interrupt. uVector=%#x\n", pVCpu->idCpu, uVector));
539 apicSetInterruptFF(pVCpu, PDMAPICIRQ_HARDWARE);
540 }
541 else
542 {
543 Log2(("APIC%u: apicSignalNextPendingIntr: Nothing to signal. uVector=%#x uPpr=%#x uTpr=%#x\n", pVCpu->idCpu,
544 uVector, uPpr, pXApicPage->tpr.u8Tpr));
545 }
546 }
547 }
548 else
549 {
550 Log2(("APIC%u: apicSignalNextPendingIntr: APIC software-disabled, clearing pending interrupt\n", pVCpu->idCpu));
551 apicClearInterruptFF(pVCpu, PDMAPICIRQ_HARDWARE);
552 }
553}
554
555
556/**
557 * Sets the Spurious-Interrupt Vector Register (SVR).
558 *
559 * @returns Strict VBox status code.
560 * @param pVCpu The cross context virtual CPU structure.
561 * @param uSvr The SVR value.
562 */
563static VBOXSTRICTRC apicSetSvr(PVMCPU pVCpu, uint32_t uSvr)
564{
565 VMCPU_ASSERT_EMT(pVCpu);
566
567 uint32_t uValidMask = XAPIC_SVR_VALID;
568 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
569 if (pXApicPage->version.u.fEoiBroadcastSupression)
570 uValidMask |= XAPIC_SVR_SUPRESS_EOI_BROADCAST;
571
572 if ( XAPIC_IN_X2APIC_MODE(pVCpu)
573 && (uSvr & ~uValidMask))
574 return apicMsrAccessError(pVCpu, MSR_IA32_X2APIC_SVR, APICMSRACCESS_WRITE_RSVD_BITS);
575
576 Log2(("APIC%u: apicSetSvr: uSvr=%#RX32\n", pVCpu->idCpu, uSvr));
577 apicWriteRaw32(pXApicPage, XAPIC_OFF_SVR, uSvr);
578 if (!pXApicPage->svr.u.fApicSoftwareEnable)
579 {
580 /** @todo CMCI. */
581 pXApicPage->lvt_timer.u.u1Mask = 1;
582#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
583 pXApicPage->lvt_thermal.u.u1Mask = 1;
584#endif
585 pXApicPage->lvt_perf.u.u1Mask = 1;
586 pXApicPage->lvt_lint0.u.u1Mask = 1;
587 pXApicPage->lvt_lint1.u.u1Mask = 1;
588 pXApicPage->lvt_error.u.u1Mask = 1;
589 }
590
591 apicSignalNextPendingIntr(pVCpu);
592 return VINF_SUCCESS;
593}
594
595
596/**
597 * Sends an interrupt to one or more APICs.
598 *
599 * @returns Strict VBox status code.
600 * @param pVM The cross context VM structure.
601 * @param pVCpu The cross context virtual CPU structure, can be
602 * NULL if the source of the interrupt is not an
603 * APIC (for e.g. a bus).
604 * @param uVector The interrupt vector.
605 * @param enmTriggerMode The trigger mode.
606 * @param enmDeliveryMode The delivery mode.
607 * @param pDestCpuSet The destination CPU set.
608 * @param pfIntrAccepted Where to store whether this interrupt was
609 * accepted by the target APIC(s) or not.
610 * Optional, can be NULL.
611 * @param rcRZ The return code if the operation cannot be
612 * performed in the current context.
613 */
614static VBOXSTRICTRC apicSendIntr(PVM pVM, PVMCPU pVCpu, uint8_t uVector, XAPICTRIGGERMODE enmTriggerMode,
615 XAPICDELIVERYMODE enmDeliveryMode, PCVMCPUSET pDestCpuSet, bool *pfIntrAccepted, int rcRZ)
616{
617 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
618 VMCPUID const cCpus = pVM->cCpus;
619 bool fAccepted = false;
620 switch (enmDeliveryMode)
621 {
622 case XAPICDELIVERYMODE_FIXED:
623 {
624 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++)
625 {
626 if ( VMCPUSET_IS_PRESENT(pDestCpuSet, idCpu)
627 && apicIsEnabled(&pVM->aCpus[idCpu]))
628 fAccepted = apicPostInterrupt(&pVM->aCpus[idCpu], uVector, enmTriggerMode);
629 }
630 break;
631 }
632
633 case XAPICDELIVERYMODE_LOWEST_PRIO:
634 {
635 VMCPUID const idCpu = VMCPUSET_FIND_FIRST_PRESENT(pDestCpuSet);
636 if ( idCpu < pVM->cCpus
637 && apicIsEnabled(&pVM->aCpus[idCpu]))
638 fAccepted = apicPostInterrupt(&pVM->aCpus[idCpu], uVector, enmTriggerMode);
639 else
640 AssertMsgFailed(("APIC: apicSendIntr: No CPU found for lowest-priority delivery mode! idCpu=%u\n", idCpu));
641 break;
642 }
643
644 case XAPICDELIVERYMODE_SMI:
645 {
646 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++)
647 {
648 if (VMCPUSET_IS_PRESENT(pDestCpuSet, idCpu))
649 {
650 Log2(("APIC: apicSendIntr: Raising SMI on VCPU%u\n", idCpu));
651 apicSetInterruptFF(&pVM->aCpus[idCpu], PDMAPICIRQ_SMI);
652 fAccepted = true;
653 }
654 }
655 break;
656 }
657
658 case XAPICDELIVERYMODE_NMI:
659 {
660 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++)
661 {
662 if ( VMCPUSET_IS_PRESENT(pDestCpuSet, idCpu)
663 && apicIsEnabled(&pVM->aCpus[idCpu]))
664 {
665 Log2(("APIC: apicSendIntr: Raising NMI on VCPU%u\n", idCpu));
666 apicSetInterruptFF(&pVM->aCpus[idCpu], PDMAPICIRQ_NMI);
667 fAccepted = true;
668 }
669 }
670 break;
671 }
672
673 case XAPICDELIVERYMODE_INIT:
674 {
675#ifdef IN_RING3
676 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++)
677 if (VMCPUSET_IS_PRESENT(pDestCpuSet, idCpu))
678 {
679 Log2(("APIC: apicSendIntr: Issuing INIT to VCPU%u\n", idCpu));
680 VMMR3SendInitIpi(pVM, idCpu);
681 fAccepted = true;
682 }
683#else
684 /* We need to return to ring-3 to deliver the INIT. */
685 rcStrict = rcRZ;
686 fAccepted = true;
687#endif
688 break;
689 }
690
691 case XAPICDELIVERYMODE_STARTUP:
692 {
693#ifdef IN_RING3
694 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++)
695 if (VMCPUSET_IS_PRESENT(pDestCpuSet, idCpu))
696 {
697 Log2(("APIC: apicSendIntr: Issuing SIPI to VCPU%u\n", idCpu));
698 VMMR3SendStartupIpi(pVM, idCpu, uVector);
699 fAccepted = true;
700 }
701#else
702 /* We need to return to ring-3 to deliver the SIPI. */
703 rcStrict = rcRZ;
704 fAccepted = true;
705 Log2(("APIC: apicSendIntr: SIPI issued, returning to RZ. rc=%Rrc\n", rcRZ));
706#endif
707 break;
708 }
709
710 case XAPICDELIVERYMODE_EXTINT:
711 {
712 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++)
713 if (VMCPUSET_IS_PRESENT(pDestCpuSet, idCpu))
714 {
715 Log2(("APIC: apicSendIntr: Raising EXTINT on VCPU%u\n", idCpu));
716 apicSetInterruptFF(&pVM->aCpus[idCpu], PDMAPICIRQ_EXTINT);
717 fAccepted = true;
718 }
719 break;
720 }
721
722 default:
723 {
724 AssertMsgFailed(("APIC: apicSendIntr: Unsupported delivery mode %#x (%s)\n", enmDeliveryMode,
725 apicGetDeliveryModeName(enmDeliveryMode)));
726 break;
727 }
728 }
729
730 /*
731 * If an illegal vector is programmed, set the 'send illegal vector' error here if the
732 * interrupt is being sent by an APIC.
733 *
734 * The 'receive illegal vector' will be set on the target APIC when the interrupt
735 * gets generated, see apicPostInterrupt().
736 *
737 * See Intel spec. 10.5.3 "Error Handling".
738 */
739 if ( rcStrict != rcRZ
740 && pVCpu)
741 {
742 /*
743 * Flag only errors when the delivery mode is fixed and not others.
744 *
745 * Ubuntu 10.04-3 amd64 live CD with 2 VCPUs gets upset as it sends an SIPI to the
746 * 2nd VCPU with vector 6 and checks the ESR for no errors, see @bugref{8245#c86}.
747 */
748 /** @todo The spec says this for LVT, but not explcitly for ICR-lo
749 * but it probably is true. */
750 if (enmDeliveryMode == XAPICDELIVERYMODE_FIXED)
751 {
752 if (RT_UNLIKELY(uVector <= XAPIC_ILLEGAL_VECTOR_END))
753 apicSetError(pVCpu, XAPIC_ESR_SEND_ILLEGAL_VECTOR);
754 }
755 }
756
757 if (pfIntrAccepted)
758 *pfIntrAccepted = fAccepted;
759
760 return rcStrict;
761}
762
763
764/**
765 * Checks if this APIC belongs to a logical destination.
766 *
767 * @returns true if the APIC belongs to the logical
768 * destination, false otherwise.
769 * @param pVCpu The cross context virtual CPU structure.
770 * @param fDest The destination mask.
771 *
772 * @thread Any.
773 */
774static bool apicIsLogicalDest(PVMCPU pVCpu, uint32_t fDest)
775{
776 if (XAPIC_IN_X2APIC_MODE(pVCpu))
777 {
778 /*
779 * Flat logical mode is not supported in x2APIC mode.
780 * In clustered logical mode, the 32-bit logical ID in the LDR is interpreted as follows:
781 * - High 16 bits is the cluster ID.
782 * - Low 16 bits: each bit represents a unique APIC within the cluster.
783 */
784 PCX2APICPAGE pX2ApicPage = VMCPU_TO_CX2APICPAGE(pVCpu);
785 uint32_t const u32Ldr = pX2ApicPage->ldr.u32LogicalApicId;
786 if (X2APIC_LDR_GET_CLUSTER_ID(u32Ldr) == (fDest & X2APIC_LDR_CLUSTER_ID))
787 return RT_BOOL(u32Ldr & fDest & X2APIC_LDR_LOGICAL_ID);
788 return false;
789 }
790
791#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
792 /*
793 * In both flat and clustered logical mode, a destination mask of all set bits indicates a broadcast.
794 * See AMD spec. 16.6.1 "Receiving System and IPI Interrupts".
795 */
796 Assert(!XAPIC_IN_X2APIC_MODE(pVCpu));
797 if ((fDest & XAPIC_LDR_FLAT_LOGICAL_ID) == XAPIC_LDR_FLAT_LOGICAL_ID)
798 return true;
799
800 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu);
801 XAPICDESTFORMAT enmDestFormat = (XAPICDESTFORMAT)pXApicPage->dfr.u.u4Model;
802 if (enmDestFormat == XAPICDESTFORMAT_FLAT)
803 {
804 /* The destination mask is interpreted as a bitmap of 8 unique logical APIC IDs. */
805 uint8_t const u8Ldr = pXApicPage->ldr.u.u8LogicalApicId;
806 return RT_BOOL(u8Ldr & fDest & XAPIC_LDR_FLAT_LOGICAL_ID);
807 }
808
809 /*
810 * In clustered logical mode, the 8-bit logical ID in the LDR is interpreted as follows:
811 * - High 4 bits is the cluster ID.
812 * - Low 4 bits: each bit represents a unique APIC within the cluster.
813 */
814 Assert(enmDestFormat == XAPICDESTFORMAT_CLUSTER);
815 uint8_t const u8Ldr = pXApicPage->ldr.u.u8LogicalApicId;
816 if (XAPIC_LDR_CLUSTERED_GET_CLUSTER_ID(u8Ldr) == (fDest & XAPIC_LDR_CLUSTERED_CLUSTER_ID))
817 return RT_BOOL(u8Ldr & fDest & XAPIC_LDR_CLUSTERED_LOGICAL_ID);
818 return false;
819#else
820# error "Implement Pentium and P6 family APIC architectures"
821#endif
822}
823
824
825/**
826 * Figures out the set of destination CPUs for a given destination mode, format
827 * and delivery mode setting.
828 *
829 * @param pVM The cross context VM structure.
830 * @param fDestMask The destination mask.
831 * @param fBroadcastMask The broadcast mask.
832 * @param enmDestMode The destination mode.
833 * @param enmDeliveryMode The delivery mode.
834 * @param pDestCpuSet The destination CPU set to update.
835 */
836static void apicGetDestCpuSet(PVM pVM, uint32_t fDestMask, uint32_t fBroadcastMask, XAPICDESTMODE enmDestMode,
837 XAPICDELIVERYMODE enmDeliveryMode, PVMCPUSET pDestCpuSet)
838{
839 VMCPUSET_EMPTY(pDestCpuSet);
840
841 /*
842 * Physical destination mode only supports either a broadcast or a single target.
843 * - Broadcast with lowest-priority delivery mode is not supported[1], we deliver it
844 * as a regular broadcast like in fixed delivery mode.
845 * - For a single target, lowest-priority delivery mode makes no sense. We deliver
846 * to the target like in fixed delivery mode.
847 *
848 * [1] See Intel spec. 10.6.2.1 "Physical Destination Mode".
849 */
850 if ( enmDestMode == XAPICDESTMODE_PHYSICAL
851 && enmDeliveryMode == XAPICDELIVERYMODE_LOWEST_PRIO)
852 {
853 AssertMsgFailed(("APIC: Lowest-priority delivery using physical destination mode!"));
854 enmDeliveryMode = XAPICDELIVERYMODE_FIXED;
855 }
856
857 uint32_t const cCpus = pVM->cCpus;
858 if (enmDeliveryMode == XAPICDELIVERYMODE_LOWEST_PRIO)
859 {
860 Assert(enmDestMode == XAPICDESTMODE_LOGICAL);
861#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
862 VMCPUID idCpuLowestTpr = NIL_VMCPUID;
863 uint8_t u8LowestTpr = UINT8_C(0xff);
864 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++)
865 {
866 PVMCPU pVCpuDest = &pVM->aCpus[idCpu];
867 if (apicIsLogicalDest(pVCpuDest, fDestMask))
868 {
869 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpuDest);
870 uint8_t const u8Tpr = pXApicPage->tpr.u8Tpr; /* PAV */
871
872 /*
873 * If there is a tie for lowest priority, the local APIC with the highest ID is chosen.
874 * Hence the use of "<=" in the check below.
875 * See AMD spec. 16.6.2 "Lowest Priority Messages and Arbitration".
876 */
877 if (u8Tpr <= u8LowestTpr)
878 {
879 u8LowestTpr = u8Tpr;
880 idCpuLowestTpr = idCpu;
881 }
882 }
883 }
884 if (idCpuLowestTpr != NIL_VMCPUID)
885 VMCPUSET_ADD(pDestCpuSet, idCpuLowestTpr);
886#else
887# error "Implement Pentium and P6 family APIC architectures"
888#endif
889 return;
890 }
891
892 /*
893 * x2APIC:
894 * - In both physical and logical destination mode, a destination mask of 0xffffffff implies a broadcast[1].
895 * xAPIC:
896 * - In physical destination mode, a destination mask of 0xff implies a broadcast[2].
897 * - In both flat and clustered logical mode, a destination mask of 0xff implies a broadcast[3].
898 *
899 * [1] See Intel spec. 10.12.9 "ICR Operation in x2APIC Mode".
900 * [2] See Intel spec. 10.6.2.1 "Physical Destination Mode".
901 * [2] See AMD spec. 16.6.1 "Receiving System and IPI Interrupts".
902 */
903 if ((fDestMask & fBroadcastMask) == fBroadcastMask)
904 {
905 VMCPUSET_FILL(pDestCpuSet);
906 return;
907 }
908
909 if (enmDestMode == XAPICDESTMODE_PHYSICAL)
910 {
911 /* The destination mask is interpreted as the physical APIC ID of a single target. */
912#if 1
913 /* Since our physical APIC ID is read-only to software, set the corresponding bit in the CPU set. */
914 if (RT_LIKELY(fDestMask < cCpus))
915 VMCPUSET_ADD(pDestCpuSet, fDestMask);
916#else
917 /* The physical APIC ID may not match our VCPU ID, search through the list of targets. */
918 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++)
919 {
920 PVMCPU pVCpuDest = &pVM->aCpus[idCpu];
921 if (XAPIC_IN_X2APIC_MODE(pVCpuDest))
922 {
923 PCX2APICPAGE pX2ApicPage = VMCPU_TO_CX2APICPAGE(pVCpuDest);
924 if (pX2ApicPage->id.u32ApicId == fDestMask)
925 VMCPUSET_ADD(pDestCpuSet, pVCpuDest->idCpu);
926 }
927 else
928 {
929 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpuDest);
930 if (pXApicPage->id.u8ApicId == (uint8_t)fDestMask)
931 VMCPUSET_ADD(pDestCpuSet, pVCpuDest->idCpu);
932 }
933 }
934#endif
935 }
936 else
937 {
938 Assert(enmDestMode == XAPICDESTMODE_LOGICAL);
939
940 /* A destination mask of all 0's implies no target APICs (since it's interpreted as a bitmap or partial bitmap). */
941 if (RT_UNLIKELY(!fDestMask))
942 return;
943
944 /* The destination mask is interpreted as a bitmap of software-programmable logical APIC ID of the target APICs. */
945 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++)
946 {
947 PVMCPU pVCpuDest = &pVM->aCpus[idCpu];
948 if (apicIsLogicalDest(pVCpuDest, fDestMask))
949 VMCPUSET_ADD(pDestCpuSet, pVCpuDest->idCpu);
950 }
951 }
952}
953
954
955/**
956 * Sends an Interprocessor Interrupt (IPI) using values from the Interrupt
957 * Command Register (ICR).
958 *
959 * @returns VBox status code.
960 * @param pVCpu The cross context virtual CPU structure.
961 * @param rcRZ The return code if the operation cannot be
962 * performed in the current context.
963 */
964DECLINLINE(VBOXSTRICTRC) apicSendIpi(PVMCPU pVCpu, int rcRZ)
965{
966 VMCPU_ASSERT_EMT(pVCpu);
967
968 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
969 XAPICDELIVERYMODE const enmDeliveryMode = (XAPICDELIVERYMODE)pXApicPage->icr_lo.u.u3DeliveryMode;
970 XAPICDESTMODE const enmDestMode = (XAPICDESTMODE)pXApicPage->icr_lo.u.u1DestMode;
971 XAPICINITLEVEL const enmInitLevel = (XAPICINITLEVEL)pXApicPage->icr_lo.u.u1Level;
972 XAPICTRIGGERMODE const enmTriggerMode = (XAPICTRIGGERMODE)pXApicPage->icr_lo.u.u1TriggerMode;
973 XAPICDESTSHORTHAND const enmDestShorthand = (XAPICDESTSHORTHAND)pXApicPage->icr_lo.u.u2DestShorthand;
974 uint8_t const uVector = pXApicPage->icr_lo.u.u8Vector;
975
976 PX2APICPAGE pX2ApicPage = VMCPU_TO_X2APICPAGE(pVCpu);
977 uint32_t const fDest = XAPIC_IN_X2APIC_MODE(pVCpu) ? pX2ApicPage->icr_hi.u32IcrHi : pXApicPage->icr_hi.u.u8Dest;
978
979#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
980 /*
981 * INIT Level De-assert is not support on Pentium 4 and Xeon processors.
982 * Apparently, this also applies to NMI, SMI, lowest-priority and fixed delivery modes,
983 * see @bugref{8245#c116}.
984 *
985 * See AMD spec. 16.5 "Interprocessor Interrupts (IPI)" for a table of valid ICR combinations.
986 */
987 if ( enmTriggerMode == XAPICTRIGGERMODE_LEVEL
988 && enmInitLevel == XAPICINITLEVEL_DEASSERT
989 && ( enmDeliveryMode == XAPICDELIVERYMODE_FIXED
990 || enmDeliveryMode == XAPICDELIVERYMODE_LOWEST_PRIO
991 || enmDeliveryMode == XAPICDELIVERYMODE_SMI
992 || enmDeliveryMode == XAPICDELIVERYMODE_NMI
993 || enmDeliveryMode == XAPICDELIVERYMODE_INIT))
994 {
995 Log2(("APIC%u: %s level de-assert unsupported, ignoring!\n", apicGetDeliveryModeName(enmDeliveryMode), pVCpu->idCpu));
996 return VINF_SUCCESS;
997 }
998#else
999# error "Implement Pentium and P6 family APIC architectures"
1000#endif
1001
1002 /*
1003 * The destination and delivery modes are ignored/by-passed when a destination shorthand is specified.
1004 * See Intel spec. 10.6.2.3 "Broadcast/Self Delivery Mode".
1005 */
1006 VMCPUSET DestCpuSet;
1007 switch (enmDestShorthand)
1008 {
1009 case XAPICDESTSHORTHAND_NONE:
1010 {
1011 PVM pVM = pVCpu->CTX_SUFF(pVM);
1012 uint32_t const fBroadcastMask = XAPIC_IN_X2APIC_MODE(pVCpu) ? X2APIC_ID_BROADCAST_MASK : XAPIC_ID_BROADCAST_MASK;
1013 apicGetDestCpuSet(pVM, fDest, fBroadcastMask, enmDestMode, enmDeliveryMode, &DestCpuSet);
1014 break;
1015 }
1016
1017 case XAPICDESTSHORTHAND_SELF:
1018 {
1019 VMCPUSET_EMPTY(&DestCpuSet);
1020 VMCPUSET_ADD(&DestCpuSet, pVCpu->idCpu);
1021 break;
1022 }
1023
1024 case XAPIDDESTSHORTHAND_ALL_INCL_SELF:
1025 {
1026 VMCPUSET_FILL(&DestCpuSet);
1027 break;
1028 }
1029
1030 case XAPICDESTSHORTHAND_ALL_EXCL_SELF:
1031 {
1032 VMCPUSET_FILL(&DestCpuSet);
1033 VMCPUSET_DEL(&DestCpuSet, pVCpu->idCpu);
1034 break;
1035 }
1036 }
1037
1038 return apicSendIntr(pVCpu->CTX_SUFF(pVM), pVCpu, uVector, enmTriggerMode, enmDeliveryMode, &DestCpuSet,
1039 NULL /* pfIntrAccepted */, rcRZ);
1040}
1041
1042
1043/**
1044 * Sets the Interrupt Command Register (ICR) high dword.
1045 *
1046 * @returns Strict VBox status code.
1047 * @param pVCpu The cross context virtual CPU structure.
1048 * @param uIcrHi The ICR high dword.
1049 */
1050static VBOXSTRICTRC apicSetIcrHi(PVMCPU pVCpu, uint32_t uIcrHi)
1051{
1052 VMCPU_ASSERT_EMT(pVCpu);
1053 Assert(!XAPIC_IN_X2APIC_MODE(pVCpu));
1054
1055 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1056 pXApicPage->icr_hi.all.u32IcrHi = uIcrHi & XAPIC_ICR_HI_DEST;
1057 Log2(("APIC%u: apicSetIcrHi: uIcrHi=%#RX32\n", pVCpu->idCpu, pXApicPage->icr_hi.all.u32IcrHi));
1058
1059 return VINF_SUCCESS;
1060}
1061
1062
1063/**
1064 * Sets the Interrupt Command Register (ICR) low dword.
1065 *
1066 * @returns Strict VBox status code.
1067 * @param pVCpu The cross context virtual CPU structure.
1068 * @param uIcrLo The ICR low dword.
1069 * @param rcRZ The return code if the operation cannot be performed
1070 * in the current context.
1071 */
1072static VBOXSTRICTRC apicSetIcrLo(PVMCPU pVCpu, uint32_t uIcrLo, int rcRZ)
1073{
1074 VMCPU_ASSERT_EMT(pVCpu);
1075
1076 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1077 pXApicPage->icr_lo.all.u32IcrLo = uIcrLo & XAPIC_ICR_LO_WR_VALID;
1078 Log2(("APIC%u: apicSetIcrLo: uIcrLo=%#RX32\n", pVCpu->idCpu, pXApicPage->icr_lo.all.u32IcrLo));
1079 STAM_COUNTER_INC(&pVCpu->apic.s.StatIcrLoWrite);
1080
1081 return apicSendIpi(pVCpu, rcRZ);
1082}
1083
1084
1085/**
1086 * Sets the Interrupt Command Register (ICR).
1087 *
1088 * @returns Strict VBox status code.
1089 * @param pVCpu The cross context virtual CPU structure.
1090 * @param u64Icr The ICR (High and Low combined).
1091 * @param rcRZ The return code if the operation cannot be performed
1092 * in the current context.
1093 */
1094static VBOXSTRICTRC apicSetIcr(PVMCPU pVCpu, uint64_t u64Icr, int rcRZ)
1095{
1096 VMCPU_ASSERT_EMT(pVCpu);
1097 Assert(XAPIC_IN_X2APIC_MODE(pVCpu));
1098
1099 /* Validate. */
1100 uint32_t const uLo = RT_LO_U32(u64Icr);
1101 if (RT_LIKELY(!(uLo & ~XAPIC_ICR_LO_WR_VALID)))
1102 {
1103 /* Update high dword first, then update the low dword which sends the IPI. */
1104 PX2APICPAGE pX2ApicPage = VMCPU_TO_X2APICPAGE(pVCpu);
1105 pX2ApicPage->icr_hi.u32IcrHi = RT_HI_U32(u64Icr);
1106 return apicSetIcrLo(pVCpu, uLo, rcRZ);
1107 }
1108 return apicMsrAccessError(pVCpu, MSR_IA32_X2APIC_ICR, APICMSRACCESS_WRITE_RSVD_BITS);
1109}
1110
1111
1112/**
1113 * Sets the Error Status Register (ESR).
1114 *
1115 * @returns Strict VBox status code.
1116 * @param pVCpu The cross context virtual CPU structure.
1117 * @param uEsr The ESR value.
1118 */
1119static VBOXSTRICTRC apicSetEsr(PVMCPU pVCpu, uint32_t uEsr)
1120{
1121 VMCPU_ASSERT_EMT(pVCpu);
1122
1123 Log2(("APIC%u: apicSetEsr: uEsr=%#RX32\n", pVCpu->idCpu, uEsr));
1124
1125 if ( XAPIC_IN_X2APIC_MODE(pVCpu)
1126 && (uEsr & ~XAPIC_ESR_WO_VALID))
1127 return apicMsrAccessError(pVCpu, MSR_IA32_X2APIC_ESR, APICMSRACCESS_WRITE_RSVD_BITS);
1128
1129 /*
1130 * Writes to the ESR causes the internal state to be updated in the register,
1131 * clearing the original state. See AMD spec. 16.4.6 "APIC Error Interrupts".
1132 */
1133 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1134 pXApicPage->esr.all.u32Errors = apicClearAllErrors(pVCpu);
1135 return VINF_SUCCESS;
1136}
1137
1138
1139/**
1140 * Updates the Processor Priority Register (PPR).
1141 *
1142 * @param pVCpu The cross context virtual CPU structure.
1143 */
1144static void apicUpdatePpr(PVMCPU pVCpu)
1145{
1146 VMCPU_ASSERT_EMT(pVCpu);
1147
1148 /* See Intel spec 10.8.3.1 "Task and Processor Priorities". */
1149 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1150 uint8_t const uIsrv = apicGetHighestSetBitInReg(&pXApicPage->isr, 0 /* rcNotFound */);
1151 uint8_t uPpr;
1152 if (XAPIC_TPR_GET_TP(pXApicPage->tpr.u8Tpr) >= XAPIC_PPR_GET_PP(uIsrv))
1153 uPpr = pXApicPage->tpr.u8Tpr;
1154 else
1155 uPpr = XAPIC_PPR_GET_PP(uIsrv);
1156 pXApicPage->ppr.u8Ppr = uPpr;
1157}
1158
1159
1160/**
1161 * Gets the Processor Priority Register (PPR).
1162 *
1163 * @returns The PPR value.
1164 * @param pVCpu The cross context virtual CPU structure.
1165 */
1166static uint8_t apicGetPpr(PVMCPU pVCpu)
1167{
1168 VMCPU_ASSERT_EMT(pVCpu);
1169 STAM_COUNTER_INC(&pVCpu->apic.s.StatTprRead);
1170
1171 /*
1172 * With virtualized APIC registers or with TPR virtualization, the hardware may
1173 * update ISR/TPR transparently. We thus re-calculate the PPR which may be out of sync.
1174 * See Intel spec. 29.2.2 "Virtual-Interrupt Delivery".
1175 *
1176 * In all other instances, whenever the TPR or ISR changes, we need to update the PPR
1177 * as well (e.g. like we do manually in apicR3InitIpi and by calling apicUpdatePpr).
1178 */
1179 PCAPIC pApic = VM_TO_APIC(pVCpu->CTX_SUFF(pVM));
1180 if (pApic->fVirtApicRegsEnabled) /** @todo re-think this */
1181 apicUpdatePpr(pVCpu);
1182 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu);
1183 return pXApicPage->ppr.u8Ppr;
1184}
1185
1186
1187/**
1188 * Sets the Task Priority Register (TPR).
1189 *
1190 * @returns Strict VBox status code.
1191 * @param pVCpu The cross context virtual CPU structure.
1192 * @param uTpr The TPR value.
1193 */
1194static VBOXSTRICTRC apicSetTpr(PVMCPU pVCpu, uint32_t uTpr)
1195{
1196 VMCPU_ASSERT_EMT(pVCpu);
1197
1198 Log2(("APIC%u: apicSetTpr: uTpr=%#RX32\n", pVCpu->idCpu, uTpr));
1199 STAM_COUNTER_INC(&pVCpu->apic.s.StatTprWrite);
1200
1201 if ( XAPIC_IN_X2APIC_MODE(pVCpu)
1202 && (uTpr & ~XAPIC_TPR_VALID))
1203 return apicMsrAccessError(pVCpu, MSR_IA32_X2APIC_TPR, APICMSRACCESS_WRITE_RSVD_BITS);
1204
1205 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1206 pXApicPage->tpr.u8Tpr = uTpr;
1207 apicUpdatePpr(pVCpu);
1208 apicSignalNextPendingIntr(pVCpu);
1209 return VINF_SUCCESS;
1210}
1211
1212
1213/**
1214 * Sets the End-Of-Interrupt (EOI) register.
1215 *
1216 * @returns Strict VBox status code.
1217 * @param pVCpu The cross context virtual CPU structure.
1218 * @param uEoi The EOI value.
1219 */
1220static VBOXSTRICTRC apicSetEoi(PVMCPU pVCpu, uint32_t uEoi)
1221{
1222 VMCPU_ASSERT_EMT(pVCpu);
1223
1224 Log2(("APIC%u: apicSetEoi: uEoi=%#RX32\n", pVCpu->idCpu, uEoi));
1225 STAM_COUNTER_INC(&pVCpu->apic.s.StatEoiWrite);
1226
1227 if ( XAPIC_IN_X2APIC_MODE(pVCpu)
1228 && (uEoi & ~XAPIC_EOI_WO_VALID))
1229 return apicMsrAccessError(pVCpu, MSR_IA32_X2APIC_EOI, APICMSRACCESS_WRITE_RSVD_BITS);
1230
1231 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1232 int isrv = apicGetHighestSetBitInReg(&pXApicPage->isr, -1 /* rcNotFound */);
1233 if (isrv >= 0)
1234 {
1235 /*
1236 * Broadcast the EOI to the I/O APIC(s).
1237 *
1238 * We'll handle the EOI broadcast first as there is tiny chance we get rescheduled to
1239 * ring-3 due to contention on the I/O APIC lock. This way we don't mess with the rest
1240 * of the APIC state and simply restart the EOI write operation from ring-3.
1241 */
1242 Assert(isrv <= (int)UINT8_MAX);
1243 uint8_t const uVector = isrv;
1244 bool const fLevelTriggered = apicTestVectorInReg(&pXApicPage->tmr, uVector);
1245 if (fLevelTriggered)
1246 {
1247 int rc = apicBusBroadcastEoi(pVCpu, uVector);
1248 if (rc == VINF_SUCCESS)
1249 { /* likely */ }
1250 else
1251 return XAPIC_IN_X2APIC_MODE(pVCpu) ? VINF_CPUM_R3_MSR_WRITE : VINF_IOM_R3_MMIO_WRITE;
1252
1253 /*
1254 * Clear the vector from the TMR.
1255 *
1256 * The broadcast to I/O APIC can re-trigger new interrupts to arrive via the bus. However,
1257 * APICUpdatePendingInterrupts() which updates TMR can only be done from EMT which we
1258 * currently are on, so no possibility of concurrent updates.
1259 */
1260 apicClearVectorInReg(&pXApicPage->tmr, uVector);
1261
1262 /*
1263 * Clear the remote IRR bit for level-triggered, fixed mode LINT0 interrupt.
1264 * The LINT1 pin does not support level-triggered interrupts.
1265 * See Intel spec. 10.5.1 "Local Vector Table".
1266 */
1267 uint32_t const uLvtLint0 = pXApicPage->lvt_lint0.all.u32LvtLint0;
1268 if ( XAPIC_LVT_GET_REMOTE_IRR(uLvtLint0)
1269 && XAPIC_LVT_GET_VECTOR(uLvtLint0) == uVector
1270 && XAPIC_LVT_GET_DELIVERY_MODE(uLvtLint0) == XAPICDELIVERYMODE_FIXED)
1271 {
1272 ASMAtomicAndU32((volatile uint32_t *)&pXApicPage->lvt_lint0.all.u32LvtLint0, ~XAPIC_LVT_REMOTE_IRR);
1273 Log2(("APIC%u: apicSetEoi: Cleared remote-IRR for LINT0. uVector=%#x\n", pVCpu->idCpu, uVector));
1274 }
1275
1276 Log2(("APIC%u: apicSetEoi: Cleared level triggered interrupt from TMR. uVector=%#x\n", pVCpu->idCpu, uVector));
1277 }
1278
1279 /*
1280 * Mark interrupt as serviced, update the PPR and signal pending interrupts.
1281 */
1282 Log2(("APIC%u: apicSetEoi: Clearing interrupt from ISR. uVector=%#x\n", pVCpu->idCpu, uVector));
1283 apicClearVectorInReg(&pXApicPage->isr, uVector);
1284 apicUpdatePpr(pVCpu);
1285 apicSignalNextPendingIntr(pVCpu);
1286 }
1287 else
1288 {
1289#ifdef DEBUG_ramshankar
1290 /** @todo Figure out if this is done intentionally by guests or is a bug
1291 * in our emulation. Happened with Win10 SMP VM during reboot after
1292 * installation of guest additions with 3D support. */
1293 AssertMsgFailed(("APIC%u: apicSetEoi: Failed to find any ISR bit\n", pVCpu->idCpu));
1294#endif
1295 }
1296
1297 return VINF_SUCCESS;
1298}
1299
1300
1301/**
1302 * Sets the Logical Destination Register (LDR).
1303 *
1304 * @returns Strict VBox status code.
1305 * @param pVCpu The cross context virtual CPU structure.
1306 * @param uLdr The LDR value.
1307 *
1308 * @remarks LDR is read-only in x2APIC mode.
1309 */
1310static VBOXSTRICTRC apicSetLdr(PVMCPU pVCpu, uint32_t uLdr)
1311{
1312 VMCPU_ASSERT_EMT(pVCpu);
1313 Assert(!XAPIC_IN_X2APIC_MODE(pVCpu));
1314
1315 Log2(("APIC%u: apicSetLdr: uLdr=%#RX32\n", pVCpu->idCpu, uLdr));
1316
1317 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1318 apicWriteRaw32(pXApicPage, XAPIC_OFF_LDR, uLdr & XAPIC_LDR_VALID);
1319 return VINF_SUCCESS;
1320}
1321
1322
1323/**
1324 * Sets the Destination Format Register (DFR).
1325 *
1326 * @returns Strict VBox status code.
1327 * @param pVCpu The cross context virtual CPU structure.
1328 * @param uDfr The DFR value.
1329 *
1330 * @remarks DFR is not available in x2APIC mode.
1331 */
1332static VBOXSTRICTRC apicSetDfr(PVMCPU pVCpu, uint32_t uDfr)
1333{
1334 VMCPU_ASSERT_EMT(pVCpu);
1335 Assert(!XAPIC_IN_X2APIC_MODE(pVCpu));
1336
1337 uDfr &= XAPIC_DFR_VALID;
1338 uDfr |= XAPIC_DFR_RSVD_MB1;
1339
1340 Log2(("APIC%u: apicSetDfr: uDfr=%#RX32\n", pVCpu->idCpu, uDfr));
1341
1342 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1343 apicWriteRaw32(pXApicPage, XAPIC_OFF_DFR, uDfr);
1344 return VINF_SUCCESS;
1345}
1346
1347
1348/**
1349 * Sets the Timer Divide Configuration Register (DCR).
1350 *
1351 * @returns Strict VBox status code.
1352 * @param pVCpu The cross context virtual CPU structure.
1353 * @param uTimerDcr The timer DCR value.
1354 */
1355static VBOXSTRICTRC apicSetTimerDcr(PVMCPU pVCpu, uint32_t uTimerDcr)
1356{
1357 VMCPU_ASSERT_EMT(pVCpu);
1358 if ( XAPIC_IN_X2APIC_MODE(pVCpu)
1359 && (uTimerDcr & ~XAPIC_TIMER_DCR_VALID))
1360 return apicMsrAccessError(pVCpu, MSR_IA32_X2APIC_TIMER_DCR, APICMSRACCESS_WRITE_RSVD_BITS);
1361
1362 Log2(("APIC%u: apicSetTimerDcr: uTimerDcr=%#RX32\n", pVCpu->idCpu, uTimerDcr));
1363
1364 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1365 apicWriteRaw32(pXApicPage, XAPIC_OFF_TIMER_DCR, uTimerDcr);
1366 return VINF_SUCCESS;
1367}
1368
1369
1370/**
1371 * Gets the timer's Current Count Register (CCR).
1372 *
1373 * @returns VBox status code.
1374 * @param pVCpu The cross context virtual CPU structure.
1375 * @param rcBusy The busy return code for the timer critical section.
1376 * @param puValue Where to store the LVT timer CCR.
1377 */
1378static VBOXSTRICTRC apicGetTimerCcr(PVMCPU pVCpu, int rcBusy, uint32_t *puValue)
1379{
1380 VMCPU_ASSERT_EMT(pVCpu);
1381 Assert(puValue);
1382
1383 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu);
1384 *puValue = 0;
1385
1386 /* In TSC-deadline mode, CCR returns 0, see Intel spec. 10.5.4.1 "TSC-Deadline Mode". */
1387 if (pXApicPage->lvt_timer.u.u2TimerMode == XAPIC_TIMER_MODE_TSC_DEADLINE)
1388 return VINF_SUCCESS;
1389
1390 /* If the initial-count register is 0, CCR returns 0 as it cannot exceed the ICR. */
1391 uint32_t const uInitialCount = pXApicPage->timer_icr.u32InitialCount;
1392 if (!uInitialCount)
1393 return VINF_SUCCESS;
1394
1395 /*
1396 * Reading the virtual-sync clock requires locking its timer because it's not
1397 * a simple atomic operation, see tmVirtualSyncGetEx().
1398 *
1399 * We also need to lock before reading the timer CCR, see apicR3TimerCallback().
1400 */
1401 PCAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
1402 PTMTIMER pTimer = pApicCpu->CTX_SUFF(pTimer);
1403
1404 int rc = TMTimerLock(pTimer, rcBusy);
1405 if (rc == VINF_SUCCESS)
1406 {
1407 /* If the current-count register is 0, it implies the timer expired. */
1408 uint32_t const uCurrentCount = pXApicPage->timer_ccr.u32CurrentCount;
1409 if (uCurrentCount)
1410 {
1411 uint64_t const cTicksElapsed = TMTimerGet(pApicCpu->CTX_SUFF(pTimer)) - pApicCpu->u64TimerInitial;
1412 TMTimerUnlock(pTimer);
1413 uint8_t const uTimerShift = apicGetTimerShift(pXApicPage);
1414 uint64_t const uDelta = cTicksElapsed >> uTimerShift;
1415 if (uInitialCount > uDelta)
1416 *puValue = uInitialCount - uDelta;
1417 }
1418 else
1419 TMTimerUnlock(pTimer);
1420 }
1421 return rc;
1422}
1423
1424
1425/**
1426 * Sets the timer's Initial-Count Register (ICR).
1427 *
1428 * @returns Strict VBox status code.
1429 * @param pVCpu The cross context virtual CPU structure.
1430 * @param rcBusy The busy return code for the timer critical section.
1431 * @param uInitialCount The timer ICR.
1432 */
1433static VBOXSTRICTRC apicSetTimerIcr(PVMCPU pVCpu, int rcBusy, uint32_t uInitialCount)
1434{
1435 VMCPU_ASSERT_EMT(pVCpu);
1436
1437 PAPIC pApic = VM_TO_APIC(pVCpu->CTX_SUFF(pVM));
1438 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
1439 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1440 PTMTIMER pTimer = pApicCpu->CTX_SUFF(pTimer);
1441
1442 Log2(("APIC%u: apicSetTimerIcr: uInitialCount=%#RX32\n", pVCpu->idCpu, uInitialCount));
1443 STAM_COUNTER_INC(&pApicCpu->StatTimerIcrWrite);
1444
1445 /* In TSC-deadline mode, timer ICR writes are ignored, see Intel spec. 10.5.4.1 "TSC-Deadline Mode". */
1446 if ( pApic->fSupportsTscDeadline
1447 && pXApicPage->lvt_timer.u.u2TimerMode == XAPIC_TIMER_MODE_TSC_DEADLINE)
1448 return VINF_SUCCESS;
1449
1450 /*
1451 * The timer CCR may be modified by apicR3TimerCallback() in parallel,
1452 * so obtain the lock -before- updating it here to be consistent with the
1453 * timer ICR. We rely on CCR being consistent in apicGetTimerCcr().
1454 */
1455 int rc = TMTimerLock(pTimer, rcBusy);
1456 if (rc == VINF_SUCCESS)
1457 {
1458 pXApicPage->timer_icr.u32InitialCount = uInitialCount;
1459 pXApicPage->timer_ccr.u32CurrentCount = uInitialCount;
1460 if (uInitialCount)
1461 apicStartTimer(pVCpu, uInitialCount);
1462 else
1463 apicStopTimer(pVCpu);
1464 TMTimerUnlock(pTimer);
1465 }
1466 return rc;
1467}
1468
1469
1470/**
1471 * Sets an LVT entry.
1472 *
1473 * @returns Strict VBox status code.
1474 * @param pVCpu The cross context virtual CPU structure.
1475 * @param offLvt The LVT entry offset in the xAPIC page.
1476 * @param uLvt The LVT value to set.
1477 */
1478static VBOXSTRICTRC apicSetLvtEntry(PVMCPU pVCpu, uint16_t offLvt, uint32_t uLvt)
1479{
1480 VMCPU_ASSERT_EMT(pVCpu);
1481
1482#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
1483 AssertMsg( offLvt == XAPIC_OFF_LVT_TIMER
1484 || offLvt == XAPIC_OFF_LVT_THERMAL
1485 || offLvt == XAPIC_OFF_LVT_PERF
1486 || offLvt == XAPIC_OFF_LVT_LINT0
1487 || offLvt == XAPIC_OFF_LVT_LINT1
1488 || offLvt == XAPIC_OFF_LVT_ERROR,
1489 ("APIC%u: apicSetLvtEntry: invalid offset, offLvt=%#RX16, uLvt=%#RX32\n", pVCpu->idCpu, offLvt, uLvt));
1490
1491 /*
1492 * If TSC-deadline mode isn't support, ignore the bit in xAPIC mode
1493 * and raise #GP(0) in x2APIC mode.
1494 */
1495 PCAPIC pApic = VM_TO_APIC(pVCpu->CTX_SUFF(pVM));
1496 if (offLvt == XAPIC_OFF_LVT_TIMER)
1497 {
1498 if ( !pApic->fSupportsTscDeadline
1499 && (uLvt & XAPIC_LVT_TIMER_TSCDEADLINE))
1500 {
1501 if (XAPIC_IN_X2APIC_MODE(pVCpu))
1502 return apicMsrAccessError(pVCpu, XAPIC_GET_X2APIC_MSR(offLvt), APICMSRACCESS_WRITE_RSVD_BITS);
1503 uLvt &= ~XAPIC_LVT_TIMER_TSCDEADLINE;
1504 /** @todo TSC-deadline timer mode transition */
1505 }
1506 }
1507
1508 /*
1509 * Validate rest of the LVT bits.
1510 */
1511 uint16_t const idxLvt = (offLvt - XAPIC_OFF_LVT_START) >> 4;
1512 AssertReturn(idxLvt < RT_ELEMENTS(g_au32LvtValidMasks), VERR_OUT_OF_RANGE);
1513
1514 /*
1515 * For x2APIC, disallow setting of invalid/reserved bits.
1516 * For xAPIC, mask out invalid/reserved bits (i.e. ignore them).
1517 */
1518 if ( XAPIC_IN_X2APIC_MODE(pVCpu)
1519 && (uLvt & ~g_au32LvtValidMasks[idxLvt]))
1520 return apicMsrAccessError(pVCpu, XAPIC_GET_X2APIC_MSR(offLvt), APICMSRACCESS_WRITE_RSVD_BITS);
1521
1522 uLvt &= g_au32LvtValidMasks[idxLvt];
1523
1524 /*
1525 * In the software-disabled state, LVT mask-bit must remain set and attempts to clear the mask
1526 * bit must be ignored. See Intel spec. 10.4.7.2 "Local APIC State After It Has Been Software Disabled".
1527 */
1528 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1529 if (!pXApicPage->svr.u.fApicSoftwareEnable)
1530 uLvt |= XAPIC_LVT_MASK;
1531
1532 /*
1533 * It is unclear whether we should signal a 'send illegal vector' error here and ignore updating
1534 * the LVT entry when the delivery mode is 'fixed'[1] or update it in addition to signaling the
1535 * error or not signal the error at all. For now, we'll allow setting illegal vectors into the LVT
1536 * but set the 'send illegal vector' error here. The 'receive illegal vector' error will be set if
1537 * the interrupt for the vector happens to be generated, see apicPostInterrupt().
1538 *
1539 * [1] See Intel spec. 10.5.2 "Valid Interrupt Vectors".
1540 */
1541 if (RT_UNLIKELY( XAPIC_LVT_GET_VECTOR(uLvt) <= XAPIC_ILLEGAL_VECTOR_END
1542 && XAPIC_LVT_GET_DELIVERY_MODE(uLvt) == XAPICDELIVERYMODE_FIXED))
1543 apicSetError(pVCpu, XAPIC_ESR_SEND_ILLEGAL_VECTOR);
1544
1545 Log2(("APIC%u: apicSetLvtEntry: offLvt=%#RX16 uLvt=%#RX32\n", pVCpu->idCpu, offLvt, uLvt));
1546
1547 apicWriteRaw32(pXApicPage, offLvt, uLvt);
1548 return VINF_SUCCESS;
1549#else
1550# error "Implement Pentium and P6 family APIC architectures"
1551#endif /* XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4 */
1552}
1553
1554
1555#if 0
1556/**
1557 * Sets an LVT entry in the extended LVT range.
1558 *
1559 * @returns VBox status code.
1560 * @param pVCpu The cross context virtual CPU structure.
1561 * @param offLvt The LVT entry offset in the xAPIC page.
1562 * @param uValue The LVT value to set.
1563 */
1564static int apicSetLvtExtEntry(PVMCPU pVCpu, uint16_t offLvt, uint32_t uLvt)
1565{
1566 VMCPU_ASSERT_EMT(pVCpu);
1567 AssertMsg(offLvt == XAPIC_OFF_CMCI, ("APIC%u: apicSetLvt1Entry: invalid offset %#RX16\n", pVCpu->idCpu, offLvt));
1568
1569 /** @todo support CMCI. */
1570 return VERR_NOT_IMPLEMENTED;
1571}
1572#endif
1573
1574
1575/**
1576 * Hints TM about the APIC timer frequency.
1577 *
1578 * @param pApicCpu The APIC CPU state.
1579 * @param uInitialCount The new initial count.
1580 * @param uTimerShift The new timer shift.
1581 * @thread Any.
1582 */
1583void apicHintTimerFreq(PAPICCPU pApicCpu, uint32_t uInitialCount, uint8_t uTimerShift)
1584{
1585 Assert(pApicCpu);
1586
1587 if ( pApicCpu->uHintedTimerInitialCount != uInitialCount
1588 || pApicCpu->uHintedTimerShift != uTimerShift)
1589 {
1590 uint32_t uHz;
1591 if (uInitialCount)
1592 {
1593 uint64_t cTicksPerPeriod = (uint64_t)uInitialCount << uTimerShift;
1594 uHz = TMTimerGetFreq(pApicCpu->CTX_SUFF(pTimer)) / cTicksPerPeriod;
1595 }
1596 else
1597 uHz = 0;
1598
1599 TMTimerSetFrequencyHint(pApicCpu->CTX_SUFF(pTimer), uHz);
1600 pApicCpu->uHintedTimerInitialCount = uInitialCount;
1601 pApicCpu->uHintedTimerShift = uTimerShift;
1602 }
1603}
1604
1605
1606/**
1607 * Reads an APIC register.
1608 *
1609 * @returns VBox status code.
1610 * @param pApicDev The APIC device instance.
1611 * @param pVCpu The cross context virtual CPU structure.
1612 * @param offReg The offset of the register being read.
1613 * @param puValue Where to store the register value.
1614 */
1615DECLINLINE(VBOXSTRICTRC) apicReadRegister(PAPICDEV pApicDev, PVMCPU pVCpu, uint16_t offReg, uint32_t *puValue)
1616{
1617 VMCPU_ASSERT_EMT(pVCpu);
1618 Assert(offReg <= XAPIC_OFF_MAX_VALID);
1619
1620 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1621 uint32_t uValue = 0;
1622 VBOXSTRICTRC rc = VINF_SUCCESS;
1623 switch (offReg)
1624 {
1625 case XAPIC_OFF_ID:
1626 case XAPIC_OFF_VERSION:
1627 case XAPIC_OFF_TPR:
1628 case XAPIC_OFF_EOI:
1629 case XAPIC_OFF_RRD:
1630 case XAPIC_OFF_LDR:
1631 case XAPIC_OFF_DFR:
1632 case XAPIC_OFF_SVR:
1633 case XAPIC_OFF_ISR0: case XAPIC_OFF_ISR1: case XAPIC_OFF_ISR2: case XAPIC_OFF_ISR3:
1634 case XAPIC_OFF_ISR4: case XAPIC_OFF_ISR5: case XAPIC_OFF_ISR6: case XAPIC_OFF_ISR7:
1635 case XAPIC_OFF_TMR0: case XAPIC_OFF_TMR1: case XAPIC_OFF_TMR2: case XAPIC_OFF_TMR3:
1636 case XAPIC_OFF_TMR4: case XAPIC_OFF_TMR5: case XAPIC_OFF_TMR6: case XAPIC_OFF_TMR7:
1637 case XAPIC_OFF_IRR0: case XAPIC_OFF_IRR1: case XAPIC_OFF_IRR2: case XAPIC_OFF_IRR3:
1638 case XAPIC_OFF_IRR4: case XAPIC_OFF_IRR5: case XAPIC_OFF_IRR6: case XAPIC_OFF_IRR7:
1639 case XAPIC_OFF_ESR:
1640 case XAPIC_OFF_ICR_LO:
1641 case XAPIC_OFF_ICR_HI:
1642 case XAPIC_OFF_LVT_TIMER:
1643#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
1644 case XAPIC_OFF_LVT_THERMAL:
1645#endif
1646 case XAPIC_OFF_LVT_PERF:
1647 case XAPIC_OFF_LVT_LINT0:
1648 case XAPIC_OFF_LVT_LINT1:
1649 case XAPIC_OFF_LVT_ERROR:
1650 case XAPIC_OFF_TIMER_ICR:
1651 case XAPIC_OFF_TIMER_DCR:
1652 {
1653 Assert( !XAPIC_IN_X2APIC_MODE(pVCpu)
1654 || ( offReg != XAPIC_OFF_DFR
1655 && offReg != XAPIC_OFF_ICR_HI
1656 && offReg != XAPIC_OFF_EOI));
1657 uValue = apicReadRaw32(pXApicPage, offReg);
1658 Log2(("APIC%u: apicReadRegister: offReg=%#x uValue=%#x\n", pVCpu->idCpu, offReg, uValue));
1659 break;
1660 }
1661
1662 case XAPIC_OFF_PPR:
1663 {
1664 uValue = apicGetPpr(pVCpu);
1665 break;
1666 }
1667
1668 case XAPIC_OFF_TIMER_CCR:
1669 {
1670 Assert(!XAPIC_IN_X2APIC_MODE(pVCpu));
1671 rc = apicGetTimerCcr(pVCpu, VINF_IOM_R3_MMIO_READ, &uValue);
1672 break;
1673 }
1674
1675 case XAPIC_OFF_APR:
1676 {
1677#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
1678 /* Unsupported on Pentium 4 and Xeon CPUs, invalid in x2APIC mode. */
1679 Assert(!XAPIC_IN_X2APIC_MODE(pVCpu));
1680#else
1681# error "Implement Pentium and P6 family APIC architectures"
1682#endif
1683 break;
1684 }
1685
1686 default:
1687 {
1688 Assert(!XAPIC_IN_X2APIC_MODE(pVCpu));
1689 rc = PDMDevHlpDBGFStop(pApicDev->CTX_SUFF(pDevIns), RT_SRC_POS, "VCPU[%u]: offReg=%#RX16\n", pVCpu->idCpu,
1690 offReg);
1691 apicSetError(pVCpu, XAPIC_ESR_ILLEGAL_REG_ADDRESS);
1692 break;
1693 }
1694 }
1695
1696 *puValue = uValue;
1697 return rc;
1698}
1699
1700
1701/**
1702 * Writes an APIC register.
1703 *
1704 * @returns Strict VBox status code.
1705 * @param pApicDev The APIC device instance.
1706 * @param pVCpu The cross context virtual CPU structure.
1707 * @param offReg The offset of the register being written.
1708 * @param uValue The register value.
1709 */
1710DECLINLINE(VBOXSTRICTRC) apicWriteRegister(PAPICDEV pApicDev, PVMCPU pVCpu, uint16_t offReg, uint32_t uValue)
1711{
1712 VMCPU_ASSERT_EMT(pVCpu);
1713 Assert(offReg <= XAPIC_OFF_MAX_VALID);
1714 Assert(!XAPIC_IN_X2APIC_MODE(pVCpu));
1715
1716 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
1717 switch (offReg)
1718 {
1719 case XAPIC_OFF_TPR:
1720 {
1721 rcStrict = apicSetTpr(pVCpu, uValue);
1722 break;
1723 }
1724
1725 case XAPIC_OFF_LVT_TIMER:
1726#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
1727 case XAPIC_OFF_LVT_THERMAL:
1728#endif
1729 case XAPIC_OFF_LVT_PERF:
1730 case XAPIC_OFF_LVT_LINT0:
1731 case XAPIC_OFF_LVT_LINT1:
1732 case XAPIC_OFF_LVT_ERROR:
1733 {
1734 rcStrict = apicSetLvtEntry(pVCpu, offReg, uValue);
1735 break;
1736 }
1737
1738 case XAPIC_OFF_TIMER_ICR:
1739 {
1740 rcStrict = apicSetTimerIcr(pVCpu, VINF_IOM_R3_MMIO_WRITE, uValue);
1741 break;
1742 }
1743
1744 case XAPIC_OFF_EOI:
1745 {
1746 rcStrict = apicSetEoi(pVCpu, uValue);
1747 break;
1748 }
1749
1750 case XAPIC_OFF_LDR:
1751 {
1752 rcStrict = apicSetLdr(pVCpu, uValue);
1753 break;
1754 }
1755
1756 case XAPIC_OFF_DFR:
1757 {
1758 rcStrict = apicSetDfr(pVCpu, uValue);
1759 break;
1760 }
1761
1762 case XAPIC_OFF_SVR:
1763 {
1764 rcStrict = apicSetSvr(pVCpu, uValue);
1765 break;
1766 }
1767
1768 case XAPIC_OFF_ICR_LO:
1769 {
1770 rcStrict = apicSetIcrLo(pVCpu, uValue, VINF_IOM_R3_MMIO_WRITE);
1771 break;
1772 }
1773
1774 case XAPIC_OFF_ICR_HI:
1775 {
1776 rcStrict = apicSetIcrHi(pVCpu, uValue);
1777 break;
1778 }
1779
1780 case XAPIC_OFF_TIMER_DCR:
1781 {
1782 rcStrict = apicSetTimerDcr(pVCpu, uValue);
1783 break;
1784 }
1785
1786 case XAPIC_OFF_ESR:
1787 {
1788 rcStrict = apicSetEsr(pVCpu, uValue);
1789 break;
1790 }
1791
1792 case XAPIC_OFF_APR:
1793 case XAPIC_OFF_RRD:
1794 {
1795#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
1796 /* Unsupported on Pentium 4 and Xeon CPUs but writes do -not- set an illegal register access error. */
1797#else
1798# error "Implement Pentium and P6 family APIC architectures"
1799#endif
1800 break;
1801 }
1802
1803 /* Read-only, write ignored: */
1804 case XAPIC_OFF_VERSION:
1805 case XAPIC_OFF_ID:
1806 break;
1807
1808 /* Unavailable/reserved in xAPIC mode: */
1809 case X2APIC_OFF_SELF_IPI:
1810 /* Read-only registers: */
1811 case XAPIC_OFF_PPR:
1812 case XAPIC_OFF_ISR0: case XAPIC_OFF_ISR1: case XAPIC_OFF_ISR2: case XAPIC_OFF_ISR3:
1813 case XAPIC_OFF_ISR4: case XAPIC_OFF_ISR5: case XAPIC_OFF_ISR6: case XAPIC_OFF_ISR7:
1814 case XAPIC_OFF_TMR0: case XAPIC_OFF_TMR1: case XAPIC_OFF_TMR2: case XAPIC_OFF_TMR3:
1815 case XAPIC_OFF_TMR4: case XAPIC_OFF_TMR5: case XAPIC_OFF_TMR6: case XAPIC_OFF_TMR7:
1816 case XAPIC_OFF_IRR0: case XAPIC_OFF_IRR1: case XAPIC_OFF_IRR2: case XAPIC_OFF_IRR3:
1817 case XAPIC_OFF_IRR4: case XAPIC_OFF_IRR5: case XAPIC_OFF_IRR6: case XAPIC_OFF_IRR7:
1818 case XAPIC_OFF_TIMER_CCR:
1819 default:
1820 {
1821 rcStrict = PDMDevHlpDBGFStop(pApicDev->CTX_SUFF(pDevIns), RT_SRC_POS, "APIC%u: offReg=%#RX16\n", pVCpu->idCpu,
1822 offReg);
1823 apicSetError(pVCpu, XAPIC_ESR_ILLEGAL_REG_ADDRESS);
1824 break;
1825 }
1826 }
1827
1828 return rcStrict;
1829}
1830
1831
1832/**
1833 * @interface_method_impl{PDMAPICREG,pfnReadMsrR3}
1834 */
1835APICBOTHCBDECL(VBOXSTRICTRC) apicReadMsr(PPDMDEVINS pDevIns, PVMCPU pVCpu, uint32_t u32Reg, uint64_t *pu64Value)
1836{
1837 /*
1838 * Validate.
1839 */
1840 VMCPU_ASSERT_EMT(pVCpu);
1841 Assert(u32Reg >= MSR_IA32_X2APIC_ID && u32Reg <= MSR_IA32_X2APIC_SELF_IPI);
1842 Assert(pu64Value);
1843 RT_NOREF_PV(pDevIns);
1844
1845#ifndef IN_RING3
1846 PCAPIC pApic = VM_TO_APIC(pVCpu->CTX_SUFF(pVM));
1847 if (pApic->fRZEnabled)
1848 { /* likely */}
1849 else
1850 return VINF_CPUM_R3_MSR_READ;
1851#endif
1852
1853 STAM_COUNTER_INC(&pVCpu->apic.s.CTX_SUFF_Z(StatMsrRead));
1854
1855 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
1856 if (RT_LIKELY(XAPIC_IN_X2APIC_MODE(pVCpu)))
1857 {
1858 switch (u32Reg)
1859 {
1860 /* Special handling for x2APIC: */
1861 case MSR_IA32_X2APIC_ICR:
1862 {
1863 PCX2APICPAGE pX2ApicPage = VMCPU_TO_CX2APICPAGE(pVCpu);
1864 uint64_t const uHi = pX2ApicPage->icr_hi.u32IcrHi;
1865 uint64_t const uLo = pX2ApicPage->icr_lo.all.u32IcrLo;
1866 *pu64Value = RT_MAKE_U64(uLo, uHi);
1867 break;
1868 }
1869
1870 /* Special handling, compatible with xAPIC: */
1871 case MSR_IA32_X2APIC_TIMER_CCR:
1872 {
1873 uint32_t uValue;
1874 rcStrict = apicGetTimerCcr(pVCpu, VINF_CPUM_R3_MSR_READ, &uValue);
1875 *pu64Value = uValue;
1876 break;
1877 }
1878
1879 /* Special handling, compatible with xAPIC: */
1880 case MSR_IA32_X2APIC_PPR:
1881 {
1882 *pu64Value = apicGetPpr(pVCpu);
1883 break;
1884 }
1885
1886 /* Raw read, compatible with xAPIC: */
1887 case MSR_IA32_X2APIC_ID:
1888 case MSR_IA32_X2APIC_VERSION:
1889 case MSR_IA32_X2APIC_TPR:
1890 case MSR_IA32_X2APIC_LDR:
1891 case MSR_IA32_X2APIC_SVR:
1892 case MSR_IA32_X2APIC_ISR0: case MSR_IA32_X2APIC_ISR1: case MSR_IA32_X2APIC_ISR2: case MSR_IA32_X2APIC_ISR3:
1893 case MSR_IA32_X2APIC_ISR4: case MSR_IA32_X2APIC_ISR5: case MSR_IA32_X2APIC_ISR6: case MSR_IA32_X2APIC_ISR7:
1894 case MSR_IA32_X2APIC_TMR0: case MSR_IA32_X2APIC_TMR1: case MSR_IA32_X2APIC_TMR2: case MSR_IA32_X2APIC_TMR3:
1895 case MSR_IA32_X2APIC_TMR4: case MSR_IA32_X2APIC_TMR5: case MSR_IA32_X2APIC_TMR6: case MSR_IA32_X2APIC_TMR7:
1896 case MSR_IA32_X2APIC_IRR0: case MSR_IA32_X2APIC_IRR1: case MSR_IA32_X2APIC_IRR2: case MSR_IA32_X2APIC_IRR3:
1897 case MSR_IA32_X2APIC_IRR4: case MSR_IA32_X2APIC_IRR5: case MSR_IA32_X2APIC_IRR6: case MSR_IA32_X2APIC_IRR7:
1898 case MSR_IA32_X2APIC_ESR:
1899 case MSR_IA32_X2APIC_LVT_TIMER:
1900 case MSR_IA32_X2APIC_LVT_THERMAL:
1901 case MSR_IA32_X2APIC_LVT_PERF:
1902 case MSR_IA32_X2APIC_LVT_LINT0:
1903 case MSR_IA32_X2APIC_LVT_LINT1:
1904 case MSR_IA32_X2APIC_LVT_ERROR:
1905 case MSR_IA32_X2APIC_TIMER_ICR:
1906 case MSR_IA32_X2APIC_TIMER_DCR:
1907 {
1908 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1909 uint16_t const offReg = X2APIC_GET_XAPIC_OFF(u32Reg);
1910 *pu64Value = apicReadRaw32(pXApicPage, offReg);
1911 break;
1912 }
1913
1914 /* Write-only MSRs: */
1915 case MSR_IA32_X2APIC_SELF_IPI:
1916 case MSR_IA32_X2APIC_EOI:
1917 {
1918 rcStrict = apicMsrAccessError(pVCpu, u32Reg, APICMSRACCESS_READ_WRITE_ONLY);
1919 break;
1920 }
1921
1922 /* Reserved MSRs: */
1923 case MSR_IA32_X2APIC_LVT_CMCI:
1924 default:
1925 {
1926 rcStrict = apicMsrAccessError(pVCpu, u32Reg, APICMSRACCESS_READ_RSVD_OR_UNKNOWN);
1927 break;
1928 }
1929 }
1930 }
1931 else
1932 rcStrict = apicMsrAccessError(pVCpu, u32Reg, APICMSRACCESS_INVALID_READ_MODE);
1933
1934 return rcStrict;
1935}
1936
1937
1938/**
1939 * @interface_method_impl{PDMAPICREG,pfnWriteMsrR3}
1940 */
1941APICBOTHCBDECL(VBOXSTRICTRC) apicWriteMsr(PPDMDEVINS pDevIns, PVMCPU pVCpu, uint32_t u32Reg, uint64_t u64Value)
1942{
1943 /*
1944 * Validate.
1945 */
1946 VMCPU_ASSERT_EMT(pVCpu);
1947 Assert(u32Reg >= MSR_IA32_X2APIC_ID && u32Reg <= MSR_IA32_X2APIC_SELF_IPI);
1948 RT_NOREF_PV(pDevIns);
1949
1950#ifndef IN_RING3
1951 PCAPIC pApic = VM_TO_APIC(pVCpu->CTX_SUFF(pVM));
1952 if (pApic->fRZEnabled)
1953 { /* likely */ }
1954 else
1955 return VINF_CPUM_R3_MSR_WRITE;
1956#endif
1957
1958 STAM_COUNTER_INC(&pVCpu->apic.s.CTX_SUFF_Z(StatMsrWrite));
1959
1960 /*
1961 * In x2APIC mode, we need to raise #GP(0) for writes to reserved bits, unlike MMIO
1962 * accesses where they are ignored. Hence, we need to validate each register before
1963 * invoking the generic/xAPIC write functions.
1964 *
1965 * Bits 63:32 of all registers except the ICR are reserved, we'll handle this common
1966 * case first and handle validating the remaining bits on a per-register basis.
1967 * See Intel spec. 10.12.1.2 "x2APIC Register Address Space".
1968 */
1969 if ( u32Reg != MSR_IA32_X2APIC_ICR
1970 && RT_HI_U32(u64Value))
1971 return apicMsrAccessError(pVCpu, u32Reg, APICMSRACCESS_WRITE_RSVD_BITS);
1972
1973 uint32_t u32Value = RT_LO_U32(u64Value);
1974 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
1975 if (RT_LIKELY(XAPIC_IN_X2APIC_MODE(pVCpu)))
1976 {
1977 switch (u32Reg)
1978 {
1979 case MSR_IA32_X2APIC_TPR:
1980 {
1981 rcStrict = apicSetTpr(pVCpu, u32Value);
1982 break;
1983 }
1984
1985 case MSR_IA32_X2APIC_ICR:
1986 {
1987 rcStrict = apicSetIcr(pVCpu, u64Value, VINF_CPUM_R3_MSR_WRITE);
1988 break;
1989 }
1990
1991 case MSR_IA32_X2APIC_SVR:
1992 {
1993 rcStrict = apicSetSvr(pVCpu, u32Value);
1994 break;
1995 }
1996
1997 case MSR_IA32_X2APIC_ESR:
1998 {
1999 rcStrict = apicSetEsr(pVCpu, u32Value);
2000 break;
2001 }
2002
2003 case MSR_IA32_X2APIC_TIMER_DCR:
2004 {
2005 rcStrict = apicSetTimerDcr(pVCpu, u32Value);
2006 break;
2007 }
2008
2009 case MSR_IA32_X2APIC_LVT_TIMER:
2010 case MSR_IA32_X2APIC_LVT_THERMAL:
2011 case MSR_IA32_X2APIC_LVT_PERF:
2012 case MSR_IA32_X2APIC_LVT_LINT0:
2013 case MSR_IA32_X2APIC_LVT_LINT1:
2014 case MSR_IA32_X2APIC_LVT_ERROR:
2015 {
2016 rcStrict = apicSetLvtEntry(pVCpu, X2APIC_GET_XAPIC_OFF(u32Reg), u32Value);
2017 break;
2018 }
2019
2020 case MSR_IA32_X2APIC_TIMER_ICR:
2021 {
2022 rcStrict = apicSetTimerIcr(pVCpu, VINF_CPUM_R3_MSR_WRITE, u32Value);
2023 break;
2024 }
2025
2026 /* Write-only MSRs: */
2027 case MSR_IA32_X2APIC_SELF_IPI:
2028 {
2029 uint8_t const uVector = XAPIC_SELF_IPI_GET_VECTOR(u32Value);
2030 apicPostInterrupt(pVCpu, uVector, XAPICTRIGGERMODE_EDGE);
2031 rcStrict = VINF_SUCCESS;
2032 break;
2033 }
2034
2035 case MSR_IA32_X2APIC_EOI:
2036 {
2037 rcStrict = apicSetEoi(pVCpu, u32Value);
2038 break;
2039 }
2040
2041 /* Read-only MSRs: */
2042 case MSR_IA32_X2APIC_ID:
2043 case MSR_IA32_X2APIC_VERSION:
2044 case MSR_IA32_X2APIC_PPR:
2045 case MSR_IA32_X2APIC_LDR:
2046 case MSR_IA32_X2APIC_ISR0: case MSR_IA32_X2APIC_ISR1: case MSR_IA32_X2APIC_ISR2: case MSR_IA32_X2APIC_ISR3:
2047 case MSR_IA32_X2APIC_ISR4: case MSR_IA32_X2APIC_ISR5: case MSR_IA32_X2APIC_ISR6: case MSR_IA32_X2APIC_ISR7:
2048 case MSR_IA32_X2APIC_TMR0: case MSR_IA32_X2APIC_TMR1: case MSR_IA32_X2APIC_TMR2: case MSR_IA32_X2APIC_TMR3:
2049 case MSR_IA32_X2APIC_TMR4: case MSR_IA32_X2APIC_TMR5: case MSR_IA32_X2APIC_TMR6: case MSR_IA32_X2APIC_TMR7:
2050 case MSR_IA32_X2APIC_IRR0: case MSR_IA32_X2APIC_IRR1: case MSR_IA32_X2APIC_IRR2: case MSR_IA32_X2APIC_IRR3:
2051 case MSR_IA32_X2APIC_IRR4: case MSR_IA32_X2APIC_IRR5: case MSR_IA32_X2APIC_IRR6: case MSR_IA32_X2APIC_IRR7:
2052 case MSR_IA32_X2APIC_TIMER_CCR:
2053 {
2054 rcStrict = apicMsrAccessError(pVCpu, u32Reg, APICMSRACCESS_WRITE_READ_ONLY);
2055 break;
2056 }
2057
2058 /* Reserved MSRs: */
2059 case MSR_IA32_X2APIC_LVT_CMCI:
2060 default:
2061 {
2062 rcStrict = apicMsrAccessError(pVCpu, u32Reg, APICMSRACCESS_WRITE_RSVD_OR_UNKNOWN);
2063 break;
2064 }
2065 }
2066 }
2067 else
2068 rcStrict = apicMsrAccessError(pVCpu, u32Reg, APICMSRACCESS_INVALID_WRITE_MODE);
2069
2070 return rcStrict;
2071}
2072
2073
2074/**
2075 * @interface_method_impl{PDMAPICREG,pfnSetBaseMsrR3}
2076 */
2077APICBOTHCBDECL(VBOXSTRICTRC) apicSetBaseMsr(PPDMDEVINS pDevIns, PVMCPU pVCpu, uint64_t u64BaseMsr)
2078{
2079 Assert(pVCpu);
2080 NOREF(pDevIns);
2081
2082#ifdef IN_RING3
2083 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
2084 PAPIC pApic = VM_TO_APIC(pVCpu->CTX_SUFF(pVM));
2085 APICMODE enmOldMode = apicGetMode(pApicCpu->uApicBaseMsr);
2086 APICMODE enmNewMode = apicGetMode(u64BaseMsr);
2087 uint64_t uBaseMsr = pApicCpu->uApicBaseMsr;
2088
2089 Log2(("APIC%u: ApicSetBaseMsr: u64BaseMsr=%#RX64 enmNewMode=%s enmOldMode=%s\n", pVCpu->idCpu, u64BaseMsr,
2090 apicGetModeName(enmNewMode), apicGetModeName(enmOldMode)));
2091
2092 /*
2093 * We do not support re-mapping the APIC base address because:
2094 * - We'll have to manage all the mappings ourselves in the APIC (reference counting based unmapping etc.)
2095 * i.e. we can only unmap the MMIO region if no other APIC is mapped on that location.
2096 * - It's unclear how/if IOM can fallback to handling regions as regular memory (if the MMIO
2097 * region remains mapped but doesn't belong to the called VCPU's APIC).
2098 */
2099 /** @todo Handle per-VCPU APIC base relocation. */
2100 if (MSR_IA32_APICBASE_GET_ADDR(uBaseMsr) != MSR_IA32_APICBASE_ADDR)
2101 {
2102 LogRelMax(5, ("APIC%u: Attempt to relocate base to %#RGp, unsupported -> #GP(0)\n", pVCpu->idCpu,
2103 MSR_IA32_APICBASE_GET_ADDR(uBaseMsr)));
2104 return VERR_CPUM_RAISE_GP_0;
2105 }
2106
2107 /* Don't allow enabling xAPIC/x2APIC if the VM is configured with the APIC disabled. */
2108 if (pApic->enmMaxMode == PDMAPICMODE_NONE)
2109 {
2110 LogRel(("APIC%u: Disallowing APIC base MSR write as the VM is configured with APIC disabled!\n",
2111 pVCpu->idCpu));
2112 return apicMsrAccessError(pVCpu, MSR_IA32_APICBASE, APICMSRACCESS_WRITE_DISALLOWED_CONFIG);
2113 }
2114
2115 /*
2116 * Act on state transition.
2117 */
2118 if (enmNewMode != enmOldMode)
2119 {
2120 switch (enmNewMode)
2121 {
2122 case APICMODE_DISABLED:
2123 {
2124 /*
2125 * The APIC state needs to be reset (especially the APIC ID as x2APIC APIC ID bit layout
2126 * is different). We can start with a clean slate identical to the state after a power-up/reset.
2127 *
2128 * See Intel spec. 10.4.3 "Enabling or Disabling the Local APIC".
2129 *
2130 * We'll also manually manage the APIC base MSR here. We want a single-point of commit
2131 * at the end of this function rather than updating it in apicR3ResetCpu. This means we also
2132 * need to update the CPUID leaf ourselves.
2133 */
2134 apicR3ResetCpu(pVCpu, false /* fResetApicBaseMsr */);
2135 uBaseMsr &= ~(MSR_IA32_APICBASE_EN | MSR_IA32_APICBASE_EXTD);
2136 CPUMSetGuestCpuIdPerCpuApicFeature(pVCpu, false /*fVisible*/);
2137 LogRel(("APIC%u: Switched mode to disabled\n", pVCpu->idCpu));
2138 break;
2139 }
2140
2141 case APICMODE_XAPIC:
2142 {
2143 if (enmOldMode != APICMODE_DISABLED)
2144 {
2145 LogRel(("APIC%u: Can only transition to xAPIC state from disabled state\n", pVCpu->idCpu));
2146 return apicMsrAccessError(pVCpu, MSR_IA32_APICBASE, APICMSRACCESS_WRITE_INVALID);
2147 }
2148
2149 uBaseMsr |= MSR_IA32_APICBASE_EN;
2150 CPUMSetGuestCpuIdPerCpuApicFeature(pVCpu, true /*fVisible*/);
2151 LogRel(("APIC%u: Switched mode to xAPIC\n", pVCpu->idCpu));
2152 break;
2153 }
2154
2155 case APICMODE_X2APIC:
2156 {
2157 if (pApic->enmMaxMode != PDMAPICMODE_X2APIC)
2158 {
2159 LogRel(("APIC%u: Disallowing transition to x2APIC mode as the VM is configured with the x2APIC disabled!\n",
2160 pVCpu->idCpu));
2161 return apicMsrAccessError(pVCpu, MSR_IA32_APICBASE, APICMSRACCESS_WRITE_INVALID);
2162 }
2163
2164 if (enmOldMode != APICMODE_XAPIC)
2165 {
2166 LogRel(("APIC%u: Can only transition to x2APIC state from xAPIC state\n", pVCpu->idCpu));
2167 return apicMsrAccessError(pVCpu, MSR_IA32_APICBASE, APICMSRACCESS_WRITE_INVALID);
2168 }
2169
2170 uBaseMsr |= MSR_IA32_APICBASE_EN | MSR_IA32_APICBASE_EXTD;
2171
2172 /*
2173 * The APIC ID needs updating when entering x2APIC mode.
2174 * Software written APIC ID in xAPIC mode isn't preserved.
2175 * The APIC ID becomes read-only to software in x2APIC mode.
2176 *
2177 * See Intel spec. 10.12.5.1 "x2APIC States".
2178 */
2179 PX2APICPAGE pX2ApicPage = VMCPU_TO_X2APICPAGE(pVCpu);
2180 ASMMemZero32(&pX2ApicPage->id, sizeof(pX2ApicPage->id));
2181 pX2ApicPage->id.u32ApicId = pVCpu->idCpu;
2182
2183 /*
2184 * LDR initialization occurs when entering x2APIC mode.
2185 * See Intel spec. 10.12.10.2 "Deriving Logical x2APIC ID from the Local x2APIC ID".
2186 */
2187 pX2ApicPage->ldr.u32LogicalApicId = ((pX2ApicPage->id.u32ApicId & UINT32_C(0xffff0)) << 16)
2188 | (UINT32_C(1) << pX2ApicPage->id.u32ApicId & UINT32_C(0xf));
2189
2190 LogRel(("APIC%u: Switched mode to x2APIC\n", pVCpu->idCpu));
2191 break;
2192 }
2193
2194 case APICMODE_INVALID:
2195 default:
2196 {
2197 Log(("APIC%u: Invalid state transition attempted\n", pVCpu->idCpu));
2198 return apicMsrAccessError(pVCpu, MSR_IA32_APICBASE, APICMSRACCESS_WRITE_INVALID);
2199 }
2200 }
2201 }
2202
2203 ASMAtomicWriteU64(&pApicCpu->uApicBaseMsr, uBaseMsr);
2204 return VINF_SUCCESS;
2205
2206#else /* !IN_RING3 */
2207 RT_NOREF_PV(pDevIns);
2208 RT_NOREF_PV(pVCpu);
2209 RT_NOREF_PV(u64BaseMsr);
2210 return VINF_CPUM_R3_MSR_WRITE;
2211#endif /* IN_RING3 */
2212}
2213
2214
2215/**
2216 * @interface_method_impl{PDMAPICREG,pfnGetBaseMsrR3}
2217 */
2218APICBOTHCBDECL(uint64_t) apicGetBaseMsr(PPDMDEVINS pDevIns, PVMCPU pVCpu)
2219{
2220 RT_NOREF_PV(pDevIns);
2221 VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu);
2222
2223 PCAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
2224 return pApicCpu->uApicBaseMsr;
2225}
2226
2227
2228/**
2229 * @interface_method_impl{PDMAPICREG,pfnSetTprR3}
2230 */
2231APICBOTHCBDECL(void) apicSetTpr(PPDMDEVINS pDevIns, PVMCPU pVCpu, uint8_t u8Tpr)
2232{
2233 RT_NOREF_PV(pDevIns);
2234 apicSetTpr(pVCpu, u8Tpr);
2235}
2236
2237
2238/**
2239 * Gets the highest priority pending interrupt.
2240 *
2241 * @returns true if any interrupt is pending, false otherwise.
2242 * @param pVCpu The cross context virtual CPU structure.
2243 * @param pu8PendingIntr Where to store the interrupt vector if the
2244 * interrupt is pending (optional, can be NULL).
2245 */
2246static bool apicGetHighestPendingInterrupt(PVMCPU pVCpu, uint8_t *pu8PendingIntr)
2247{
2248 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu);
2249 int const irrv = apicGetHighestSetBitInReg(&pXApicPage->irr, -1);
2250 if (irrv >= 0)
2251 {
2252 Assert(irrv <= (int)UINT8_MAX);
2253 if (pu8PendingIntr)
2254 *pu8PendingIntr = (uint8_t)irrv;
2255 return true;
2256 }
2257 return false;
2258}
2259
2260
2261/**
2262 * @interface_method_impl{PDMAPICREG,pfnGetTprR3}
2263 */
2264APICBOTHCBDECL(uint8_t) apicGetTpr(PPDMDEVINS pDevIns, PVMCPU pVCpu, bool *pfPending, uint8_t *pu8PendingIntr)
2265{
2266 RT_NOREF_PV(pDevIns);
2267 VMCPU_ASSERT_EMT(pVCpu);
2268 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu);
2269
2270 if (pfPending)
2271 {
2272 /*
2273 * Just return whatever the highest pending interrupt is in the IRR.
2274 * The caller is responsible for figuring out if it's masked by the TPR etc.
2275 */
2276 *pfPending = apicGetHighestPendingInterrupt(pVCpu, pu8PendingIntr);
2277 }
2278
2279 return pXApicPage->tpr.u8Tpr;
2280}
2281
2282
2283/**
2284 * @interface_method_impl{PDMAPICREG,pfnGetTimerFreqR3}
2285 */
2286APICBOTHCBDECL(uint64_t) apicGetTimerFreq(PPDMDEVINS pDevIns)
2287{
2288 PVM pVM = PDMDevHlpGetVM(pDevIns);
2289 PVMCPU pVCpu = &pVM->aCpus[0];
2290 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
2291 uint64_t uTimer = TMTimerGetFreq(pApicCpu->CTX_SUFF(pTimer));
2292 return uTimer;
2293}
2294
2295
2296/**
2297 * @interface_method_impl{PDMAPICREG,pfnBusDeliverR3}
2298 * @remarks This is a private interface between the IOAPIC and the APIC.
2299 */
2300APICBOTHCBDECL(int) apicBusDeliver(PPDMDEVINS pDevIns, uint8_t uDest, uint8_t uDestMode, uint8_t uDeliveryMode, uint8_t uVector,
2301 uint8_t uPolarity, uint8_t uTriggerMode, uint32_t uTagSrc)
2302{
2303 NOREF(uPolarity);
2304 NOREF(uTagSrc);
2305 PVM pVM = PDMDevHlpGetVM(pDevIns);
2306
2307 /*
2308 * The destination field (mask) in the IO APIC redirectable table entry is 8-bits.
2309 * Hence, the broadcast mask is 0xff.
2310 * See IO APIC spec. 3.2.4. "IOREDTBL[23:0] - I/O Redirectable Table Registers".
2311 */
2312 XAPICTRIGGERMODE enmTriggerMode = (XAPICTRIGGERMODE)uTriggerMode;
2313 XAPICDELIVERYMODE enmDeliveryMode = (XAPICDELIVERYMODE)uDeliveryMode;
2314 XAPICDESTMODE enmDestMode = (XAPICDESTMODE)uDestMode;
2315 uint32_t fDestMask = uDest;
2316 uint32_t fBroadcastMask = UINT32_C(0xff);
2317
2318 Log2(("APIC: apicBusDeliver: fDestMask=%#x enmDestMode=%s enmTriggerMode=%s enmDeliveryMode=%s uVector=%#x\n", fDestMask,
2319 apicGetDestModeName(enmDestMode), apicGetTriggerModeName(enmTriggerMode), apicGetDeliveryModeName(enmDeliveryMode),
2320 uVector));
2321
2322 bool fIntrAccepted;
2323 VMCPUSET DestCpuSet;
2324 apicGetDestCpuSet(pVM, fDestMask, fBroadcastMask, enmDestMode, enmDeliveryMode, &DestCpuSet);
2325 VBOXSTRICTRC rcStrict = apicSendIntr(pVM, NULL /* pVCpu */, uVector, enmTriggerMode, enmDeliveryMode, &DestCpuSet,
2326 &fIntrAccepted, VINF_SUCCESS /* rcRZ */);
2327 if (fIntrAccepted)
2328 return VBOXSTRICTRC_VAL(rcStrict);
2329 return VERR_APIC_INTR_DISCARDED;
2330}
2331
2332
2333/**
2334 * @interface_method_impl{PDMAPICREG,pfnLocalInterruptR3}
2335 * @remarks This is a private interface between the PIC and the APIC.
2336 */
2337APICBOTHCBDECL(VBOXSTRICTRC) apicLocalInterrupt(PPDMDEVINS pDevIns, PVMCPU pVCpu, uint8_t u8Pin, uint8_t u8Level, int rcRZ)
2338{
2339 NOREF(pDevIns);
2340 AssertReturn(u8Pin <= 1, VERR_INVALID_PARAMETER);
2341 AssertReturn(u8Level <= 1, VERR_INVALID_PARAMETER);
2342
2343 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2344
2345 /* If the APIC is enabled, the interrupt is subject to LVT programming. */
2346 if (apicIsEnabled(pVCpu))
2347 {
2348 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu);
2349
2350 /* Pick the LVT entry corresponding to the interrupt pin. */
2351 static const uint16_t s_au16LvtOffsets[] =
2352 {
2353 XAPIC_OFF_LVT_LINT0,
2354 XAPIC_OFF_LVT_LINT1
2355 };
2356 Assert(u8Pin < RT_ELEMENTS(s_au16LvtOffsets));
2357 uint16_t const offLvt = s_au16LvtOffsets[u8Pin];
2358 uint32_t const uLvt = apicReadRaw32(pXApicPage, offLvt);
2359
2360 /* If software hasn't masked the interrupt in the LVT entry, proceed interrupt processing. */
2361 if (!XAPIC_LVT_IS_MASKED(uLvt))
2362 {
2363 XAPICDELIVERYMODE const enmDeliveryMode = XAPIC_LVT_GET_DELIVERY_MODE(uLvt);
2364 XAPICTRIGGERMODE enmTriggerMode = XAPIC_LVT_GET_TRIGGER_MODE(uLvt);
2365
2366 switch (enmDeliveryMode)
2367 {
2368 case XAPICDELIVERYMODE_INIT:
2369 {
2370 /** @todo won't work in R0/RC because callers don't care about rcRZ. */
2371 AssertMsgFailed(("INIT through LINT0/LINT1 is not yet supported\n"));
2372 /* fallthru */
2373 }
2374 case XAPICDELIVERYMODE_FIXED:
2375 {
2376 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
2377 uint8_t const uVector = XAPIC_LVT_GET_VECTOR(uLvt);
2378 bool fActive = RT_BOOL(u8Level & 1);
2379 bool volatile *pfActiveLine = u8Pin == 0 ? &pApicCpu->fActiveLint0 : &pApicCpu->fActiveLint1;
2380 /** @todo Polarity is busted elsewhere, we need to fix that
2381 * first. See @bugref{8386#c7}. */
2382#if 0
2383 uint8_t const u8Polarity = XAPIC_LVT_GET_POLARITY(uLvt);
2384 fActive ^= u8Polarity; */
2385#endif
2386 if (!fActive)
2387 {
2388 ASMAtomicCmpXchgBool(pfActiveLine, false, true);
2389 break;
2390 }
2391
2392 /* Level-sensitive interrupts are not supported for LINT1. See Intel spec. 10.5.1 "Local Vector Table". */
2393 if (offLvt == XAPIC_OFF_LVT_LINT1)
2394 enmTriggerMode = XAPICTRIGGERMODE_EDGE;
2395 /** @todo figure out what "If the local APIC is not used in conjunction with an I/O APIC and fixed
2396 delivery mode is selected; the Pentium 4, Intel Xeon, and P6 family processors will always
2397 use level-sensitive triggering, regardless if edge-sensitive triggering is selected."
2398 means. */
2399
2400 bool fSendIntr;
2401 if (enmTriggerMode == XAPICTRIGGERMODE_EDGE)
2402 {
2403 /* Recognize and send the interrupt only on an edge transition. */
2404 fSendIntr = ASMAtomicCmpXchgBool(pfActiveLine, true, false);
2405 }
2406 else
2407 {
2408 /* For level-triggered interrupts, redundant interrupts are not a problem. */
2409 Assert(enmTriggerMode == XAPICTRIGGERMODE_LEVEL);
2410 ASMAtomicCmpXchgBool(pfActiveLine, true, false);
2411
2412 /* Only when the remote IRR isn't set, set it and send the interrupt. */
2413 if (!(pXApicPage->lvt_lint0.all.u32LvtLint0 & XAPIC_LVT_REMOTE_IRR))
2414 {
2415 Assert(offLvt == XAPIC_OFF_LVT_LINT0);
2416 ASMAtomicOrU32((volatile uint32_t *)&pXApicPage->lvt_lint0.all.u32LvtLint0, XAPIC_LVT_REMOTE_IRR);
2417 fSendIntr = true;
2418 }
2419 else
2420 fSendIntr = false;
2421 }
2422
2423 if (fSendIntr)
2424 {
2425 VMCPUSET DestCpuSet;
2426 VMCPUSET_EMPTY(&DestCpuSet);
2427 VMCPUSET_ADD(&DestCpuSet, pVCpu->idCpu);
2428 rcStrict = apicSendIntr(pVCpu->CTX_SUFF(pVM), pVCpu, uVector, enmTriggerMode, enmDeliveryMode,
2429 &DestCpuSet, NULL /* pfIntrAccepted */, rcRZ);
2430 }
2431 break;
2432 }
2433
2434 case XAPICDELIVERYMODE_SMI:
2435 case XAPICDELIVERYMODE_NMI:
2436 {
2437 VMCPUSET DestCpuSet;
2438 VMCPUSET_EMPTY(&DestCpuSet);
2439 VMCPUSET_ADD(&DestCpuSet, pVCpu->idCpu);
2440 uint8_t const uVector = XAPIC_LVT_GET_VECTOR(uLvt);
2441 rcStrict = apicSendIntr(pVCpu->CTX_SUFF(pVM), pVCpu, uVector, enmTriggerMode, enmDeliveryMode, &DestCpuSet,
2442 NULL /* pfIntrAccepted */, rcRZ);
2443 break;
2444 }
2445
2446 case XAPICDELIVERYMODE_EXTINT:
2447 {
2448 Log2(("APIC%u: apicLocalInterrupt: %s ExtINT through LINT%u\n", pVCpu->idCpu,
2449 u8Level ? "Raising" : "Lowering", u8Pin));
2450 if (u8Level)
2451 apicSetInterruptFF(pVCpu, PDMAPICIRQ_EXTINT);
2452 else
2453 apicClearInterruptFF(pVCpu, PDMAPICIRQ_EXTINT);
2454 break;
2455 }
2456
2457 /* Reserved/unknown delivery modes: */
2458 case XAPICDELIVERYMODE_LOWEST_PRIO:
2459 case XAPICDELIVERYMODE_STARTUP:
2460 default:
2461 {
2462 rcStrict = VERR_INTERNAL_ERROR_3;
2463 AssertMsgFailed(("APIC%u: LocalInterrupt: Invalid delivery mode %#x (%s) on LINT%d\n", pVCpu->idCpu,
2464 enmDeliveryMode, apicGetDeliveryModeName(enmDeliveryMode), u8Pin));
2465 break;
2466 }
2467 }
2468 }
2469 }
2470 else
2471 {
2472 /* The APIC is hardware disabled. The CPU behaves as though there is no on-chip APIC. */
2473 if (u8Pin == 0)
2474 {
2475 /* LINT0 behaves as an external interrupt pin. */
2476 Log2(("APIC%u: apicLocalInterrupt: APIC hardware-disabled, %s INTR\n", pVCpu->idCpu,
2477 u8Level ? "raising" : "lowering"));
2478 if (u8Level)
2479 apicSetInterruptFF(pVCpu, PDMAPICIRQ_EXTINT);
2480 else
2481 apicClearInterruptFF(pVCpu, PDMAPICIRQ_EXTINT);
2482 }
2483 else
2484 {
2485 /* LINT1 behaves as NMI. */
2486 Log2(("APIC%u: apicLocalInterrupt: APIC hardware-disabled, raising NMI\n", pVCpu->idCpu));
2487 apicSetInterruptFF(pVCpu, PDMAPICIRQ_NMI);
2488 }
2489 }
2490
2491 return rcStrict;
2492}
2493
2494
2495/**
2496 * @interface_method_impl{PDMAPICREG,pfnGetInterruptR3}
2497 */
2498APICBOTHCBDECL(int) apicGetInterrupt(PPDMDEVINS pDevIns, PVMCPU pVCpu, uint8_t *pu8Vector, uint32_t *pu32TagSrc)
2499{
2500 RT_NOREF_PV(pDevIns);
2501 VMCPU_ASSERT_EMT(pVCpu);
2502 Assert(pu8Vector);
2503 NOREF(pu32TagSrc);
2504
2505 LogFlow(("APIC%u: apicGetInterrupt:\n", pVCpu->idCpu));
2506
2507 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
2508 bool const fApicHwEnabled = apicIsEnabled(pVCpu);
2509 if ( fApicHwEnabled
2510 && pXApicPage->svr.u.fApicSoftwareEnable)
2511 {
2512 int const irrv = apicGetHighestSetBitInReg(&pXApicPage->irr, -1);
2513 if (RT_LIKELY(irrv >= 0))
2514 {
2515 Assert(irrv <= (int)UINT8_MAX);
2516 uint8_t const uVector = irrv;
2517
2518 /*
2519 * This can happen if the APIC receives an interrupt when the CPU has interrupts
2520 * disabled but the TPR is raised by the guest before re-enabling interrupts.
2521 */
2522 uint8_t const uTpr = pXApicPage->tpr.u8Tpr;
2523 if ( uTpr > 0
2524 && XAPIC_TPR_GET_TP(uVector) <= XAPIC_TPR_GET_TP(uTpr))
2525 {
2526 Log2(("APIC%u: apicGetInterrupt: Interrupt masked. uVector=%#x uTpr=%#x SpuriousVector=%#x\n", pVCpu->idCpu,
2527 uVector, uTpr, pXApicPage->svr.u.u8SpuriousVector));
2528 *pu8Vector = uVector;
2529 STAM_COUNTER_INC(&pVCpu->apic.s.StatMaskedByTpr);
2530 return VERR_APIC_INTR_MASKED_BY_TPR;
2531 }
2532
2533 /*
2534 * The PPR should be up-to-date at this point through apicSetEoi().
2535 * We're on EMT so no parallel updates possible.
2536 * Subject the pending vector to PPR prioritization.
2537 */
2538 uint8_t const uPpr = pXApicPage->ppr.u8Ppr;
2539 if ( !uPpr
2540 || XAPIC_PPR_GET_PP(uVector) > XAPIC_PPR_GET_PP(uPpr))
2541 {
2542 apicClearVectorInReg(&pXApicPage->irr, uVector);
2543 apicSetVectorInReg(&pXApicPage->isr, uVector);
2544 apicUpdatePpr(pVCpu);
2545 apicSignalNextPendingIntr(pVCpu);
2546
2547 Log2(("APIC%u: apicGetInterrupt: Valid Interrupt. uVector=%#x\n", pVCpu->idCpu, uVector));
2548 *pu8Vector = uVector;
2549 return VINF_SUCCESS;
2550 }
2551 else
2552 {
2553 STAM_COUNTER_INC(&pVCpu->apic.s.StatMaskedByPpr);
2554 Log2(("APIC%u: apicGetInterrupt: Interrupt's priority is not higher than the PPR. uVector=%#x PPR=%#x\n",
2555 pVCpu->idCpu, uVector, uPpr));
2556 }
2557 }
2558 else
2559 Log2(("APIC%u: apicGetInterrupt: No pending bits in IRR\n", pVCpu->idCpu));
2560 }
2561 else
2562 Log2(("APIC%u: apicGetInterrupt: APIC %s disabled\n", pVCpu->idCpu, !fApicHwEnabled ? "hardware" : "software"));
2563
2564 return VERR_APIC_INTR_NOT_PENDING;
2565}
2566
2567
2568/**
2569 * @callback_method_impl{FNIOMMMIOREAD}
2570 */
2571APICBOTHCBDECL(int) apicReadMmio(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS GCPhysAddr, void *pv, unsigned cb)
2572{
2573 NOREF(pvUser);
2574 Assert(!(GCPhysAddr & 0xf));
2575 Assert(cb == 4); RT_NOREF_PV(cb);
2576
2577 PAPICDEV pApicDev = PDMINS_2_DATA(pDevIns, PAPICDEV);
2578 PVMCPU pVCpu = PDMDevHlpGetVMCPU(pDevIns);
2579 uint16_t offReg = GCPhysAddr & 0xff0;
2580 uint32_t uValue = 0;
2581
2582 STAM_COUNTER_INC(&pVCpu->apic.s.CTX_SUFF_Z(StatMmioRead));
2583
2584 int rc = VBOXSTRICTRC_VAL(apicReadRegister(pApicDev, pVCpu, offReg, &uValue));
2585 *(uint32_t *)pv = uValue;
2586
2587 Log2(("APIC%u: apicReadMmio: offReg=%#RX16 uValue=%#RX32\n", pVCpu->idCpu, offReg, uValue));
2588 return rc;
2589}
2590
2591
2592/**
2593 * @callback_method_impl{FNIOMMMIOWRITE}
2594 */
2595APICBOTHCBDECL(int) apicWriteMmio(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS GCPhysAddr, void const *pv, unsigned cb)
2596{
2597 NOREF(pvUser);
2598 Assert(!(GCPhysAddr & 0xf));
2599 Assert(cb == 4); RT_NOREF_PV(cb);
2600
2601 PAPICDEV pApicDev = PDMINS_2_DATA(pDevIns, PAPICDEV);
2602 PVMCPU pVCpu = PDMDevHlpGetVMCPU(pDevIns);
2603 uint16_t offReg = GCPhysAddr & 0xff0;
2604 uint32_t uValue = *(uint32_t *)pv;
2605
2606 STAM_COUNTER_INC(&pVCpu->apic.s.CTX_SUFF_Z(StatMmioWrite));
2607
2608 Log2(("APIC%u: apicWriteMmio: offReg=%#RX16 uValue=%#RX32\n", pVCpu->idCpu, offReg, uValue));
2609
2610 int rc = VBOXSTRICTRC_VAL(apicWriteRegister(pApicDev, pVCpu, offReg, uValue));
2611 return rc;
2612}
2613
2614
2615/**
2616 * Sets the interrupt pending force-flag and pokes the EMT if required.
2617 *
2618 * @param pVCpu The cross context virtual CPU structure.
2619 * @param enmType The IRQ type.
2620 */
2621VMM_INT_DECL(void) apicSetInterruptFF(PVMCPU pVCpu, PDMAPICIRQ enmType)
2622{
2623 PVM pVM = pVCpu->CTX_SUFF(pVM);
2624 PAPICDEV pApicDev = VM_TO_APICDEV(pVM);
2625 CTX_SUFF(pApicDev->pApicHlp)->pfnSetInterruptFF(pApicDev->CTX_SUFF(pDevIns), enmType, pVCpu->idCpu);
2626}
2627
2628
2629/**
2630 * Clears the interrupt pending force-flag.
2631 *
2632 * @param pVCpu The cross context virtual CPU structure.
2633 * @param enmType The IRQ type.
2634 */
2635VMM_INT_DECL(void) apicClearInterruptFF(PVMCPU pVCpu, PDMAPICIRQ enmType)
2636{
2637 PVM pVM = pVCpu->CTX_SUFF(pVM);
2638 PAPICDEV pApicDev = VM_TO_APICDEV(pVM);
2639 pApicDev->CTX_SUFF(pApicHlp)->pfnClearInterruptFF(pApicDev->CTX_SUFF(pDevIns), enmType, pVCpu->idCpu);
2640}
2641
2642
2643/**
2644 * Posts an interrupt to a target APIC.
2645 *
2646 * This function handles interrupts received from the system bus or
2647 * interrupts generated locally from the LVT or via a self IPI.
2648 *
2649 * Don't use this function to try and deliver ExtINT style interrupts.
2650 *
2651 * @returns true if the interrupt was accepted, false otherwise.
2652 * @param pVCpu The cross context virtual CPU structure.
2653 * @param uVector The vector of the interrupt to be posted.
2654 * @param enmTriggerMode The trigger mode of the interrupt.
2655 *
2656 * @thread Any.
2657 */
2658VMM_INT_DECL(bool) apicPostInterrupt(PVMCPU pVCpu, uint8_t uVector, XAPICTRIGGERMODE enmTriggerMode)
2659{
2660 Assert(pVCpu);
2661 Assert(uVector > XAPIC_ILLEGAL_VECTOR_END);
2662
2663 PVM pVM = pVCpu->CTX_SUFF(pVM);
2664 PCAPIC pApic = VM_TO_APIC(pVM);
2665 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
2666 bool fAccepted = true;
2667
2668 STAM_PROFILE_START(&pApicCpu->StatPostIntr, a);
2669
2670 /*
2671 * Only post valid interrupt vectors.
2672 * See Intel spec. 10.5.2 "Valid Interrupt Vectors".
2673 */
2674 if (RT_LIKELY(uVector > XAPIC_ILLEGAL_VECTOR_END))
2675 {
2676 /*
2677 * If the interrupt is already pending in the IRR we can skip the
2678 * potential expensive operation of poking the guest EMT out of execution.
2679 */
2680 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu);
2681 if (!apicTestVectorInReg(&pXApicPage->irr, uVector)) /* PAV */
2682 {
2683 Log2(("APIC: apicPostInterrupt: SrcCpu=%u TargetCpu=%u uVector=%#x\n", VMMGetCpuId(pVM), pVCpu->idCpu, uVector));
2684 if (enmTriggerMode == XAPICTRIGGERMODE_EDGE)
2685 {
2686 if (pApic->fPostedIntrsEnabled)
2687 { /** @todo posted-interrupt call to hardware */ }
2688 else
2689 {
2690 apicSetVectorInPib(pApicCpu->CTX_SUFF(pvApicPib), uVector);
2691 uint32_t const fAlreadySet = apicSetNotificationBitInPib((PAPICPIB)pApicCpu->CTX_SUFF(pvApicPib));
2692 if (!fAlreadySet)
2693 {
2694 Log2(("APIC: apicPostInterrupt: Setting UPDATE_APIC FF for edge-triggered intr. uVector=%#x\n", uVector));
2695 apicSetInterruptFF(pVCpu, PDMAPICIRQ_UPDATE_PENDING);
2696 }
2697 }
2698 }
2699 else
2700 {
2701 /*
2702 * Level-triggered interrupts requires updating of the TMR and thus cannot be
2703 * delivered asynchronously.
2704 */
2705 apicSetVectorInPib(&pApicCpu->ApicPibLevel, uVector);
2706 uint32_t const fAlreadySet = apicSetNotificationBitInPib(&pApicCpu->ApicPibLevel);
2707 if (!fAlreadySet)
2708 {
2709 Log2(("APIC: apicPostInterrupt: Setting UPDATE_APIC FF for level-triggered intr. uVector=%#x\n", uVector));
2710 apicSetInterruptFF(pVCpu, PDMAPICIRQ_UPDATE_PENDING);
2711 }
2712 }
2713 }
2714 else
2715 {
2716 Log2(("APIC: apicPostInterrupt: SrcCpu=%u TargetCpu=%u. Vector %#x Already in IRR, skipping\n", VMMGetCpuId(pVM),
2717 pVCpu->idCpu, uVector));
2718 STAM_COUNTER_INC(&pApicCpu->StatPostIntrAlreadyPending);
2719 }
2720 }
2721 else
2722 {
2723 fAccepted = false;
2724 apicSetError(pVCpu, XAPIC_ESR_RECV_ILLEGAL_VECTOR);
2725 }
2726
2727 STAM_PROFILE_STOP(&pApicCpu->StatPostIntr, a);
2728 return fAccepted;
2729}
2730
2731
2732/**
2733 * Starts the APIC timer.
2734 *
2735 * @param pVCpu The cross context virtual CPU structure.
2736 * @param uInitialCount The timer's Initial-Count Register (ICR), must be >
2737 * 0.
2738 * @thread Any.
2739 */
2740VMM_INT_DECL(void) apicStartTimer(PVMCPU pVCpu, uint32_t uInitialCount)
2741{
2742 Assert(pVCpu);
2743 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
2744 Assert(TMTimerIsLockOwner(pApicCpu->CTX_SUFF(pTimer)));
2745 Assert(uInitialCount > 0);
2746
2747 PCXAPICPAGE pXApicPage = APICCPU_TO_CXAPICPAGE(pApicCpu);
2748 uint8_t const uTimerShift = apicGetTimerShift(pXApicPage);
2749 uint64_t const cTicksToNext = (uint64_t)uInitialCount << uTimerShift;
2750
2751 Log2(("APIC%u: apicStartTimer: uInitialCount=%#RX32 uTimerShift=%u cTicksToNext=%RU64\n", pVCpu->idCpu, uInitialCount,
2752 uTimerShift, cTicksToNext));
2753
2754 /*
2755 * The assumption here is that the timer doesn't tick during this call
2756 * and thus setting a relative time to fire next is accurate. The advantage
2757 * however is updating u64TimerInitial 'atomically' while setting the next
2758 * tick.
2759 */
2760 PTMTIMER pTimer = pApicCpu->CTX_SUFF(pTimer);
2761 TMTimerSetRelative(pTimer, cTicksToNext, &pApicCpu->u64TimerInitial);
2762 apicHintTimerFreq(pApicCpu, uInitialCount, uTimerShift);
2763}
2764
2765
2766/**
2767 * Stops the APIC timer.
2768 *
2769 * @param pVCpu The cross context virtual CPU structure.
2770 * @thread Any.
2771 */
2772VMM_INT_DECL(void) apicStopTimer(PVMCPU pVCpu)
2773{
2774 Assert(pVCpu);
2775 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
2776 Assert(TMTimerIsLockOwner(pApicCpu->CTX_SUFF(pTimer)));
2777
2778 Log2(("APIC%u: apicStopTimer\n", pVCpu->idCpu));
2779
2780 PTMTIMER pTimer = pApicCpu->CTX_SUFF(pTimer);
2781 TMTimerStop(pTimer); /* This will reset the hint, no need to explicitly call TMTimerSetFrequencyHint(). */
2782 pApicCpu->uHintedTimerInitialCount = 0;
2783 pApicCpu->uHintedTimerShift = 0;
2784}
2785
2786
2787/**
2788 * Queues a pending interrupt as in-service.
2789 *
2790 * This function should only be needed without virtualized APIC
2791 * registers. With virtualized APIC registers, it's sufficient to keep
2792 * the interrupts pending in the IRR as the hardware takes care of
2793 * virtual interrupt delivery.
2794 *
2795 * @returns true if the interrupt was queued to in-service interrupts,
2796 * false otherwise.
2797 * @param pVCpu The cross context virtual CPU structure.
2798 * @param u8PendingIntr The pending interrupt to queue as
2799 * in-service.
2800 *
2801 * @remarks This assumes the caller has done the necessary checks and
2802 * is ready to take actually service the interrupt (TPR,
2803 * interrupt shadow etc.)
2804 */
2805VMMDECL(bool) APICQueueInterruptToService(PVMCPU pVCpu, uint8_t u8PendingIntr)
2806{
2807 VMCPU_ASSERT_EMT(pVCpu);
2808
2809 PVM pVM = pVCpu->CTX_SUFF(pVM);
2810 PAPIC pApic = VM_TO_APIC(pVM);
2811 Assert(!pApic->fVirtApicRegsEnabled);
2812 NOREF(pApic);
2813
2814 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
2815 bool const fIsPending = apicTestVectorInReg(&pXApicPage->irr, u8PendingIntr);
2816 if (fIsPending)
2817 {
2818 apicClearVectorInReg(&pXApicPage->irr, u8PendingIntr);
2819 apicSetVectorInReg(&pXApicPage->isr, u8PendingIntr);
2820 apicUpdatePpr(pVCpu);
2821 return true;
2822 }
2823 return false;
2824}
2825
2826
2827/**
2828 * De-queues a pending interrupt from in-service.
2829 *
2830 * This undoes APICQueueInterruptToService() for premature VM-exits before event
2831 * injection.
2832 *
2833 * @param pVCpu The cross context virtual CPU structure.
2834 * @param u8PendingIntr The pending interrupt to de-queue from
2835 * in-service.
2836 */
2837VMMDECL(void) APICDequeueInterruptFromService(PVMCPU pVCpu, uint8_t u8PendingIntr)
2838{
2839 VMCPU_ASSERT_EMT(pVCpu);
2840
2841 PVM pVM = pVCpu->CTX_SUFF(pVM);
2842 PAPIC pApic = VM_TO_APIC(pVM);
2843 Assert(!pApic->fVirtApicRegsEnabled);
2844 NOREF(pApic);
2845
2846 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
2847 bool const fInService = apicTestVectorInReg(&pXApicPage->isr, u8PendingIntr);
2848 if (fInService)
2849 {
2850 apicClearVectorInReg(&pXApicPage->isr, u8PendingIntr);
2851 apicSetVectorInReg(&pXApicPage->irr, u8PendingIntr);
2852 apicUpdatePpr(pVCpu);
2853 }
2854}
2855
2856
2857/**
2858 * Updates pending interrupts from the pending-interrupt bitmaps to the IRR.
2859 *
2860 * @param pVCpu The cross context virtual CPU structure.
2861 */
2862VMMDECL(void) APICUpdatePendingInterrupts(PVMCPU pVCpu)
2863{
2864 VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu);
2865
2866 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
2867 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
2868 bool fHasPendingIntrs = false;
2869
2870 Log3(("APIC%u: APICUpdatePendingInterrupts:\n", pVCpu->idCpu));
2871 STAM_PROFILE_START(&pApicCpu->StatUpdatePendingIntrs, a);
2872
2873 /* Update edge-triggered pending interrupts. */
2874 PAPICPIB pPib = (PAPICPIB)pApicCpu->CTX_SUFF(pvApicPib);
2875 for (;;)
2876 {
2877 uint32_t const fAlreadySet = apicClearNotificationBitInPib((PAPICPIB)pApicCpu->CTX_SUFF(pvApicPib));
2878 if (!fAlreadySet)
2879 break;
2880
2881 AssertCompile(RT_ELEMENTS(pXApicPage->irr.u) == 2 * RT_ELEMENTS(pPib->aVectorBitmap));
2882 for (size_t idxPib = 0, idxReg = 0; idxPib < RT_ELEMENTS(pPib->aVectorBitmap); idxPib++, idxReg += 2)
2883 {
2884 uint64_t const u64Fragment = ASMAtomicXchgU64(&pPib->aVectorBitmap[idxPib], 0);
2885 if (u64Fragment)
2886 {
2887 uint32_t const u32FragmentLo = RT_LO_U32(u64Fragment);
2888 uint32_t const u32FragmentHi = RT_HI_U32(u64Fragment);
2889
2890 pXApicPage->irr.u[idxReg].u32Reg |= u32FragmentLo;
2891 pXApicPage->irr.u[idxReg + 1].u32Reg |= u32FragmentHi;
2892
2893 pXApicPage->tmr.u[idxReg].u32Reg &= ~u32FragmentLo;
2894 pXApicPage->tmr.u[idxReg + 1].u32Reg &= ~u32FragmentHi;
2895 fHasPendingIntrs = true;
2896 }
2897 }
2898 }
2899
2900 /* Update level-triggered pending interrupts. */
2901 pPib = (PAPICPIB)&pApicCpu->ApicPibLevel;
2902 for (;;)
2903 {
2904 uint32_t const fAlreadySet = apicClearNotificationBitInPib((PAPICPIB)&pApicCpu->ApicPibLevel);
2905 if (!fAlreadySet)
2906 break;
2907
2908 AssertCompile(RT_ELEMENTS(pXApicPage->irr.u) == 2 * RT_ELEMENTS(pPib->aVectorBitmap));
2909 for (size_t idxPib = 0, idxReg = 0; idxPib < RT_ELEMENTS(pPib->aVectorBitmap); idxPib++, idxReg += 2)
2910 {
2911 uint64_t const u64Fragment = ASMAtomicXchgU64(&pPib->aVectorBitmap[idxPib], 0);
2912 if (u64Fragment)
2913 {
2914 uint32_t const u32FragmentLo = RT_LO_U32(u64Fragment);
2915 uint32_t const u32FragmentHi = RT_HI_U32(u64Fragment);
2916
2917 pXApicPage->irr.u[idxReg].u32Reg |= u32FragmentLo;
2918 pXApicPage->irr.u[idxReg + 1].u32Reg |= u32FragmentHi;
2919
2920 pXApicPage->tmr.u[idxReg].u32Reg |= u32FragmentLo;
2921 pXApicPage->tmr.u[idxReg + 1].u32Reg |= u32FragmentHi;
2922 fHasPendingIntrs = true;
2923 }
2924 }
2925 }
2926
2927 STAM_PROFILE_STOP(&pApicCpu->StatUpdatePendingIntrs, a);
2928 Log3(("APIC%u: APICUpdatePendingInterrupts: fHasPendingIntrs=%RTbool\n", pVCpu->idCpu, fHasPendingIntrs));
2929
2930 if ( fHasPendingIntrs
2931 && !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC))
2932 apicSignalNextPendingIntr(pVCpu);
2933}
2934
2935
2936/**
2937 * Gets the highest priority pending interrupt.
2938 *
2939 * @returns true if any interrupt is pending, false otherwise.
2940 * @param pVCpu The cross context virtual CPU structure.
2941 * @param pu8PendingIntr Where to store the interrupt vector if the
2942 * interrupt is pending.
2943 */
2944VMMDECL(bool) APICGetHighestPendingInterrupt(PVMCPU pVCpu, uint8_t *pu8PendingIntr)
2945{
2946 VMCPU_ASSERT_EMT(pVCpu);
2947 return apicGetHighestPendingInterrupt(pVCpu, pu8PendingIntr);
2948}
2949
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette