VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/APICAll.cpp@ 77830

Last change on this file since 77830 was 76553, checked in by vboxsync, 6 years ago

scm --update-copyright-year

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 122.6 KB
Line 
1/* $Id: APICAll.cpp 76553 2019-01-01 01:45:53Z vboxsync $ */
2/** @file
3 * APIC - Advanced Programmable Interrupt Controller - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2016-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_DEV_APIC
23#include "APICInternal.h"
24#include <VBox/vmm/pdmdev.h>
25#include <VBox/vmm/pdmapi.h>
26#include <VBox/vmm/rem.h>
27#include <VBox/vmm/vm.h>
28#include <VBox/vmm/vmm.h>
29#include <VBox/vmm/vmcpuset.h>
30
31
32/*********************************************************************************************************************************
33* Global Variables *
34*********************************************************************************************************************************/
35#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
36/** An ordered array of valid LVT masks. */
37static const uint32_t g_au32LvtValidMasks[] =
38{
39 XAPIC_LVT_TIMER_VALID,
40 XAPIC_LVT_THERMAL_VALID,
41 XAPIC_LVT_PERF_VALID,
42 XAPIC_LVT_LINT_VALID, /* LINT0 */
43 XAPIC_LVT_LINT_VALID, /* LINT1 */
44 XAPIC_LVT_ERROR_VALID
45};
46#endif
47
48#if 0
49/** @todo CMCI */
50static const uint32_t g_au32LvtExtValidMask[] =
51{
52 XAPIC_LVT_CMCI_VALID
53};
54#endif
55
56
57/**
58 * Checks if a vector is set in an APIC 256-bit sparse register.
59 *
60 * @returns true if the specified vector is set, false otherwise.
61 * @param pApicReg The APIC 256-bit spare register.
62 * @param uVector The vector to check if set.
63 */
64DECLINLINE(bool) apicTestVectorInReg(const volatile XAPIC256BITREG *pApicReg, uint8_t uVector)
65{
66 const volatile uint8_t *pbBitmap = (const volatile uint8_t *)&pApicReg->u[0];
67 return ASMBitTest(pbBitmap + XAPIC_REG256_VECTOR_OFF(uVector), XAPIC_REG256_VECTOR_BIT(uVector));
68}
69
70
71/**
72 * Sets the vector in an APIC 256-bit sparse register.
73 *
74 * @param pApicReg The APIC 256-bit spare register.
75 * @param uVector The vector to set.
76 */
77DECLINLINE(void) apicSetVectorInReg(volatile XAPIC256BITREG *pApicReg, uint8_t uVector)
78{
79 volatile uint8_t *pbBitmap = (volatile uint8_t *)&pApicReg->u[0];
80 ASMAtomicBitSet(pbBitmap + XAPIC_REG256_VECTOR_OFF(uVector), XAPIC_REG256_VECTOR_BIT(uVector));
81}
82
83
84/**
85 * Clears the vector in an APIC 256-bit sparse register.
86 *
87 * @param pApicReg The APIC 256-bit spare register.
88 * @param uVector The vector to clear.
89 */
90DECLINLINE(void) apicClearVectorInReg(volatile XAPIC256BITREG *pApicReg, uint8_t uVector)
91{
92 volatile uint8_t *pbBitmap = (volatile uint8_t *)&pApicReg->u[0];
93 ASMAtomicBitClear(pbBitmap + XAPIC_REG256_VECTOR_OFF(uVector), XAPIC_REG256_VECTOR_BIT(uVector));
94}
95
96
97#if 0 /* unused */
98/**
99 * Checks if a vector is set in an APIC Pending-Interrupt Bitmap (PIB).
100 *
101 * @returns true if the specified vector is set, false otherwise.
102 * @param pvPib Opaque pointer to the PIB.
103 * @param uVector The vector to check if set.
104 */
105DECLINLINE(bool) apicTestVectorInPib(volatile void *pvPib, uint8_t uVector)
106{
107 return ASMBitTest(pvPib, uVector);
108}
109#endif /* unused */
110
111
112/**
113 * Atomically sets the PIB notification bit.
114 *
115 * @returns non-zero if the bit was already set, 0 otherwise.
116 * @param pApicPib Pointer to the PIB.
117 */
118DECLINLINE(uint32_t) apicSetNotificationBitInPib(PAPICPIB pApicPib)
119{
120 return ASMAtomicXchgU32(&pApicPib->fOutstandingNotification, RT_BIT_32(31));
121}
122
123
124/**
125 * Atomically tests and clears the PIB notification bit.
126 *
127 * @returns non-zero if the bit was already set, 0 otherwise.
128 * @param pApicPib Pointer to the PIB.
129 */
130DECLINLINE(uint32_t) apicClearNotificationBitInPib(PAPICPIB pApicPib)
131{
132 return ASMAtomicXchgU32(&pApicPib->fOutstandingNotification, UINT32_C(0));
133}
134
135
136/**
137 * Sets the vector in an APIC Pending-Interrupt Bitmap (PIB).
138 *
139 * @param pvPib Opaque pointer to the PIB.
140 * @param uVector The vector to set.
141 */
142DECLINLINE(void) apicSetVectorInPib(volatile void *pvPib, uint8_t uVector)
143{
144 ASMAtomicBitSet(pvPib, uVector);
145}
146
147#if 0 /* unused */
148/**
149 * Clears the vector in an APIC Pending-Interrupt Bitmap (PIB).
150 *
151 * @param pvPib Opaque pointer to the PIB.
152 * @param uVector The vector to clear.
153 */
154DECLINLINE(void) apicClearVectorInPib(volatile void *pvPib, uint8_t uVector)
155{
156 ASMAtomicBitClear(pvPib, uVector);
157}
158#endif /* unused */
159
160#if 0 /* unused */
161/**
162 * Atomically OR's a fragment (32 vectors) into an APIC 256-bit sparse
163 * register.
164 *
165 * @param pApicReg The APIC 256-bit spare register.
166 * @param idxFragment The index of the 32-bit fragment in @a
167 * pApicReg.
168 * @param u32Fragment The 32-bit vector fragment to OR.
169 */
170DECLINLINE(void) apicOrVectorsToReg(volatile XAPIC256BITREG *pApicReg, size_t idxFragment, uint32_t u32Fragment)
171{
172 Assert(idxFragment < RT_ELEMENTS(pApicReg->u));
173 ASMAtomicOrU32(&pApicReg->u[idxFragment].u32Reg, u32Fragment);
174}
175#endif /* unused */
176
177
178#if 0 /* unused */
179/**
180 * Atomically AND's a fragment (32 vectors) into an APIC
181 * 256-bit sparse register.
182 *
183 * @param pApicReg The APIC 256-bit spare register.
184 * @param idxFragment The index of the 32-bit fragment in @a
185 * pApicReg.
186 * @param u32Fragment The 32-bit vector fragment to AND.
187 */
188DECLINLINE(void) apicAndVectorsToReg(volatile XAPIC256BITREG *pApicReg, size_t idxFragment, uint32_t u32Fragment)
189{
190 Assert(idxFragment < RT_ELEMENTS(pApicReg->u));
191 ASMAtomicAndU32(&pApicReg->u[idxFragment].u32Reg, u32Fragment);
192}
193#endif /* unused */
194
195
196/**
197 * Reports and returns appropriate error code for invalid MSR accesses.
198 *
199 * @returns VERR_CPUM_RAISE_GP_0
200 *
201 * @param pVCpu The cross context virtual CPU structure.
202 * @param u32Reg The MSR being accessed.
203 * @param enmAccess The invalid-access type.
204 */
205static int apicMsrAccessError(PVMCPU pVCpu, uint32_t u32Reg, APICMSRACCESS enmAccess)
206{
207 static struct
208 {
209 const char *pszBefore; /* The error message before printing the MSR index */
210 const char *pszAfter; /* The error message after printing the MSR index */
211 } const s_aAccess[] =
212 {
213 /* enmAccess pszBefore pszAfter */
214 /* 0 */ { "read MSR", " while not in x2APIC mode" },
215 /* 1 */ { "write MSR", " while not in x2APIC mode" },
216 /* 2 */ { "read reserved/unknown MSR", "" },
217 /* 3 */ { "write reserved/unknown MSR", "" },
218 /* 4 */ { "read write-only MSR", "" },
219 /* 5 */ { "write read-only MSR", "" },
220 /* 6 */ { "read reserved bits of MSR", "" },
221 /* 7 */ { "write reserved bits of MSR", "" },
222 /* 8 */ { "write an invalid value to MSR", "" },
223 /* 9 */ { "write MSR", " disallowed by configuration" },
224 /* 10 */ { "read MSR", " disallowed by configuration" },
225 };
226 AssertCompile(RT_ELEMENTS(s_aAccess) == APICMSRACCESS_COUNT);
227
228 size_t const i = enmAccess;
229 Assert(i < RT_ELEMENTS(s_aAccess));
230 if (pVCpu->apic.s.cLogMaxAccessError++ < 5)
231 LogRel(("APIC%u: Attempt to %s (%#x)%s -> #GP(0)\n", pVCpu->idCpu, s_aAccess[i].pszBefore, u32Reg, s_aAccess[i].pszAfter));
232 return VERR_CPUM_RAISE_GP_0;
233}
234
235
236/**
237 * Gets the descriptive APIC mode.
238 *
239 * @returns The name.
240 * @param enmMode The xAPIC mode.
241 */
242const char *apicGetModeName(APICMODE enmMode)
243{
244 switch (enmMode)
245 {
246 case APICMODE_DISABLED: return "Disabled";
247 case APICMODE_XAPIC: return "xAPIC";
248 case APICMODE_X2APIC: return "x2APIC";
249 default: break;
250 }
251 return "Invalid";
252}
253
254
255/**
256 * Gets the descriptive destination format name.
257 *
258 * @returns The destination format name.
259 * @param enmDestFormat The destination format.
260 */
261const char *apicGetDestFormatName(XAPICDESTFORMAT enmDestFormat)
262{
263 switch (enmDestFormat)
264 {
265 case XAPICDESTFORMAT_FLAT: return "Flat";
266 case XAPICDESTFORMAT_CLUSTER: return "Cluster";
267 default: break;
268 }
269 return "Invalid";
270}
271
272
273/**
274 * Gets the descriptive delivery mode name.
275 *
276 * @returns The delivery mode name.
277 * @param enmDeliveryMode The delivery mode.
278 */
279const char *apicGetDeliveryModeName(XAPICDELIVERYMODE enmDeliveryMode)
280{
281 switch (enmDeliveryMode)
282 {
283 case XAPICDELIVERYMODE_FIXED: return "Fixed";
284 case XAPICDELIVERYMODE_LOWEST_PRIO: return "Lowest-priority";
285 case XAPICDELIVERYMODE_SMI: return "SMI";
286 case XAPICDELIVERYMODE_NMI: return "NMI";
287 case XAPICDELIVERYMODE_INIT: return "INIT";
288 case XAPICDELIVERYMODE_STARTUP: return "SIPI";
289 case XAPICDELIVERYMODE_EXTINT: return "ExtINT";
290 default: break;
291 }
292 return "Invalid";
293}
294
295
296/**
297 * Gets the descriptive destination mode name.
298 *
299 * @returns The destination mode name.
300 * @param enmDestMode The destination mode.
301 */
302const char *apicGetDestModeName(XAPICDESTMODE enmDestMode)
303{
304 switch (enmDestMode)
305 {
306 case XAPICDESTMODE_PHYSICAL: return "Physical";
307 case XAPICDESTMODE_LOGICAL: return "Logical";
308 default: break;
309 }
310 return "Invalid";
311}
312
313
314/**
315 * Gets the descriptive trigger mode name.
316 *
317 * @returns The trigger mode name.
318 * @param enmTriggerMode The trigger mode.
319 */
320const char *apicGetTriggerModeName(XAPICTRIGGERMODE enmTriggerMode)
321{
322 switch (enmTriggerMode)
323 {
324 case XAPICTRIGGERMODE_EDGE: return "Edge";
325 case XAPICTRIGGERMODE_LEVEL: return "Level";
326 default: break;
327 }
328 return "Invalid";
329}
330
331
332/**
333 * Gets the destination shorthand name.
334 *
335 * @returns The destination shorthand name.
336 * @param enmDestShorthand The destination shorthand.
337 */
338const char *apicGetDestShorthandName(XAPICDESTSHORTHAND enmDestShorthand)
339{
340 switch (enmDestShorthand)
341 {
342 case XAPICDESTSHORTHAND_NONE: return "None";
343 case XAPICDESTSHORTHAND_SELF: return "Self";
344 case XAPIDDESTSHORTHAND_ALL_INCL_SELF: return "All including self";
345 case XAPICDESTSHORTHAND_ALL_EXCL_SELF: return "All excluding self";
346 default: break;
347 }
348 return "Invalid";
349}
350
351
352/**
353 * Gets the timer mode name.
354 *
355 * @returns The timer mode name.
356 * @param enmTimerMode The timer mode.
357 */
358const char *apicGetTimerModeName(XAPICTIMERMODE enmTimerMode)
359{
360 switch (enmTimerMode)
361 {
362 case XAPICTIMERMODE_ONESHOT: return "One-shot";
363 case XAPICTIMERMODE_PERIODIC: return "Periodic";
364 case XAPICTIMERMODE_TSC_DEADLINE: return "TSC deadline";
365 default: break;
366 }
367 return "Invalid";
368}
369
370
371/**
372 * Gets the APIC mode given the base MSR value.
373 *
374 * @returns The APIC mode.
375 * @param uApicBaseMsr The APIC Base MSR value.
376 */
377APICMODE apicGetMode(uint64_t uApicBaseMsr)
378{
379 uint32_t const uMode = (uApicBaseMsr >> 10) & UINT64_C(3);
380 APICMODE const enmMode = (APICMODE)uMode;
381#ifdef VBOX_STRICT
382 /* Paranoia. */
383 switch (uMode)
384 {
385 case APICMODE_DISABLED:
386 case APICMODE_INVALID:
387 case APICMODE_XAPIC:
388 case APICMODE_X2APIC:
389 break;
390 default:
391 AssertMsgFailed(("Invalid mode"));
392 }
393#endif
394 return enmMode;
395}
396
397
398/**
399 * Returns whether the APIC is hardware enabled or not.
400 *
401 * @returns true if enabled, false otherwise.
402 * @param pVCpu The cross context virtual CPU structure.
403 */
404VMM_INT_DECL(bool) APICIsEnabled(PVMCPU pVCpu)
405{
406 PCAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
407 return RT_BOOL(pApicCpu->uApicBaseMsr & MSR_IA32_APICBASE_EN);
408}
409
410
411/**
412 * Finds the most significant set bit in an APIC 256-bit sparse register.
413 *
414 * @returns @a rcNotFound if no bit was set, 0-255 otherwise.
415 * @param pReg The APIC 256-bit sparse register.
416 * @param rcNotFound What to return when no bit is set.
417 */
418static int apicGetHighestSetBitInReg(volatile const XAPIC256BITREG *pReg, int rcNotFound)
419{
420 ssize_t const cFragments = RT_ELEMENTS(pReg->u);
421 unsigned const uFragmentShift = 5;
422 AssertCompile(1 << uFragmentShift == sizeof(pReg->u[0].u32Reg) * 8);
423 for (ssize_t i = cFragments - 1; i >= 0; i--)
424 {
425 uint32_t const uFragment = pReg->u[i].u32Reg;
426 if (uFragment)
427 {
428 unsigned idxSetBit = ASMBitLastSetU32(uFragment);
429 --idxSetBit;
430 idxSetBit |= i << uFragmentShift;
431 return idxSetBit;
432 }
433 }
434 return rcNotFound;
435}
436
437
438/**
439 * Reads a 32-bit register at a specified offset.
440 *
441 * @returns The value at the specified offset.
442 * @param pXApicPage The xAPIC page.
443 * @param offReg The offset of the register being read.
444 */
445DECLINLINE(uint32_t) apicReadRaw32(PCXAPICPAGE pXApicPage, uint16_t offReg)
446{
447 Assert(offReg < sizeof(*pXApicPage) - sizeof(uint32_t));
448 uint8_t const *pbXApic = (const uint8_t *)pXApicPage;
449 uint32_t const uValue = *(const uint32_t *)(pbXApic + offReg);
450 return uValue;
451}
452
453
454/**
455 * Writes a 32-bit register at a specified offset.
456 *
457 * @param pXApicPage The xAPIC page.
458 * @param offReg The offset of the register being written.
459 * @param uReg The value of the register.
460 */
461DECLINLINE(void) apicWriteRaw32(PXAPICPAGE pXApicPage, uint16_t offReg, uint32_t uReg)
462{
463 Assert(offReg < sizeof(*pXApicPage) - sizeof(uint32_t));
464 uint8_t *pbXApic = (uint8_t *)pXApicPage;
465 *(uint32_t *)(pbXApic + offReg) = uReg;
466}
467
468
469/**
470 * Sets an error in the internal ESR of the specified APIC.
471 *
472 * @param pVCpu The cross context virtual CPU structure.
473 * @param uError The error.
474 * @thread Any.
475 */
476DECLINLINE(void) apicSetError(PVMCPU pVCpu, uint32_t uError)
477{
478 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
479 ASMAtomicOrU32(&pApicCpu->uEsrInternal, uError);
480}
481
482
483/**
484 * Clears all errors in the internal ESR.
485 *
486 * @returns The value of the internal ESR before clearing.
487 * @param pVCpu The cross context virtual CPU structure.
488 */
489DECLINLINE(uint32_t) apicClearAllErrors(PVMCPU pVCpu)
490{
491 VMCPU_ASSERT_EMT(pVCpu);
492 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
493 return ASMAtomicXchgU32(&pApicCpu->uEsrInternal, 0);
494}
495
496
497/**
498 * Signals the guest if a pending interrupt is ready to be serviced.
499 *
500 * @param pVCpu The cross context virtual CPU structure.
501 */
502static void apicSignalNextPendingIntr(PVMCPU pVCpu)
503{
504 VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu);
505
506 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu);
507 if (pXApicPage->svr.u.fApicSoftwareEnable)
508 {
509 int const irrv = apicGetHighestSetBitInReg(&pXApicPage->irr, -1 /* rcNotFound */);
510 if (irrv >= 0)
511 {
512 Assert(irrv <= (int)UINT8_MAX);
513 uint8_t const uVector = irrv;
514 uint8_t const uPpr = pXApicPage->ppr.u8Ppr;
515 if ( !uPpr
516 || XAPIC_PPR_GET_PP(uVector) > XAPIC_PPR_GET_PP(uPpr))
517 {
518 Log2(("APIC%u: apicSignalNextPendingIntr: Signaling pending interrupt. uVector=%#x\n", pVCpu->idCpu, uVector));
519 apicSetInterruptFF(pVCpu, PDMAPICIRQ_HARDWARE);
520 }
521 else
522 {
523 Log2(("APIC%u: apicSignalNextPendingIntr: Nothing to signal. uVector=%#x uPpr=%#x uTpr=%#x\n", pVCpu->idCpu,
524 uVector, uPpr, pXApicPage->tpr.u8Tpr));
525 }
526 }
527 }
528 else
529 {
530 Log2(("APIC%u: apicSignalNextPendingIntr: APIC software-disabled, clearing pending interrupt\n", pVCpu->idCpu));
531 apicClearInterruptFF(pVCpu, PDMAPICIRQ_HARDWARE);
532 }
533}
534
535
536/**
537 * Sets the Spurious-Interrupt Vector Register (SVR).
538 *
539 * @returns VINF_SUCCESS or VERR_CPUM_RAISE_GP_0.
540 * @param pVCpu The cross context virtual CPU structure.
541 * @param uSvr The SVR value.
542 */
543static int apicSetSvr(PVMCPU pVCpu, uint32_t uSvr)
544{
545 VMCPU_ASSERT_EMT(pVCpu);
546
547 uint32_t uValidMask = XAPIC_SVR_VALID;
548 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
549 if (pXApicPage->version.u.fEoiBroadcastSupression)
550 uValidMask |= XAPIC_SVR_SUPRESS_EOI_BROADCAST;
551
552 if ( XAPIC_IN_X2APIC_MODE(pVCpu)
553 && (uSvr & ~uValidMask))
554 return apicMsrAccessError(pVCpu, MSR_IA32_X2APIC_SVR, APICMSRACCESS_WRITE_RSVD_BITS);
555
556 Log2(("APIC%u: apicSetSvr: uSvr=%#RX32\n", pVCpu->idCpu, uSvr));
557 apicWriteRaw32(pXApicPage, XAPIC_OFF_SVR, uSvr);
558 if (!pXApicPage->svr.u.fApicSoftwareEnable)
559 {
560 /** @todo CMCI. */
561 pXApicPage->lvt_timer.u.u1Mask = 1;
562#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
563 pXApicPage->lvt_thermal.u.u1Mask = 1;
564#endif
565 pXApicPage->lvt_perf.u.u1Mask = 1;
566 pXApicPage->lvt_lint0.u.u1Mask = 1;
567 pXApicPage->lvt_lint1.u.u1Mask = 1;
568 pXApicPage->lvt_error.u.u1Mask = 1;
569 }
570
571 apicSignalNextPendingIntr(pVCpu);
572 return VINF_SUCCESS;
573}
574
575
576/**
577 * Sends an interrupt to one or more APICs.
578 *
579 * @returns Strict VBox status code.
580 * @param pVM The cross context VM structure.
581 * @param pVCpu The cross context virtual CPU structure, can be
582 * NULL if the source of the interrupt is not an
583 * APIC (for e.g. a bus).
584 * @param uVector The interrupt vector.
585 * @param enmTriggerMode The trigger mode.
586 * @param enmDeliveryMode The delivery mode.
587 * @param pDestCpuSet The destination CPU set.
588 * @param pfIntrAccepted Where to store whether this interrupt was
589 * accepted by the target APIC(s) or not.
590 * Optional, can be NULL.
591 * @param uSrcTag The interrupt source tag (debugging).
592 * @param rcRZ The return code if the operation cannot be
593 * performed in the current context.
594 */
595static VBOXSTRICTRC apicSendIntr(PVM pVM, PVMCPU pVCpu, uint8_t uVector, XAPICTRIGGERMODE enmTriggerMode,
596 XAPICDELIVERYMODE enmDeliveryMode, PCVMCPUSET pDestCpuSet, bool *pfIntrAccepted,
597 uint32_t uSrcTag, int rcRZ)
598{
599 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
600 VMCPUID const cCpus = pVM->cCpus;
601 bool fAccepted = false;
602 switch (enmDeliveryMode)
603 {
604 case XAPICDELIVERYMODE_FIXED:
605 {
606 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++)
607 {
608 if ( VMCPUSET_IS_PRESENT(pDestCpuSet, idCpu)
609 && APICIsEnabled(&pVM->aCpus[idCpu]))
610 fAccepted = apicPostInterrupt(&pVM->aCpus[idCpu], uVector, enmTriggerMode, uSrcTag);
611 }
612 break;
613 }
614
615 case XAPICDELIVERYMODE_LOWEST_PRIO:
616 {
617 VMCPUID const idCpu = VMCPUSET_FIND_FIRST_PRESENT(pDestCpuSet);
618 if ( idCpu < pVM->cCpus
619 && APICIsEnabled(&pVM->aCpus[idCpu]))
620 fAccepted = apicPostInterrupt(&pVM->aCpus[idCpu], uVector, enmTriggerMode, uSrcTag);
621 else
622 AssertMsgFailed(("APIC: apicSendIntr: No CPU found for lowest-priority delivery mode! idCpu=%u\n", idCpu));
623 break;
624 }
625
626 case XAPICDELIVERYMODE_SMI:
627 {
628 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++)
629 {
630 if (VMCPUSET_IS_PRESENT(pDestCpuSet, idCpu))
631 {
632 Log2(("APIC: apicSendIntr: Raising SMI on VCPU%u\n", idCpu));
633 apicSetInterruptFF(&pVM->aCpus[idCpu], PDMAPICIRQ_SMI);
634 fAccepted = true;
635 }
636 }
637 break;
638 }
639
640 case XAPICDELIVERYMODE_NMI:
641 {
642 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++)
643 {
644 if ( VMCPUSET_IS_PRESENT(pDestCpuSet, idCpu)
645 && APICIsEnabled(&pVM->aCpus[idCpu]))
646 {
647 Log2(("APIC: apicSendIntr: Raising NMI on VCPU%u\n", idCpu));
648 apicSetInterruptFF(&pVM->aCpus[idCpu], PDMAPICIRQ_NMI);
649 fAccepted = true;
650 }
651 }
652 break;
653 }
654
655 case XAPICDELIVERYMODE_INIT:
656 {
657#ifdef IN_RING3
658 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++)
659 if (VMCPUSET_IS_PRESENT(pDestCpuSet, idCpu))
660 {
661 Log2(("APIC: apicSendIntr: Issuing INIT to VCPU%u\n", idCpu));
662 VMMR3SendInitIpi(pVM, idCpu);
663 fAccepted = true;
664 }
665#else
666 /* We need to return to ring-3 to deliver the INIT. */
667 rcStrict = rcRZ;
668 fAccepted = true;
669#endif
670 break;
671 }
672
673 case XAPICDELIVERYMODE_STARTUP:
674 {
675#ifdef IN_RING3
676 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++)
677 if (VMCPUSET_IS_PRESENT(pDestCpuSet, idCpu))
678 {
679 Log2(("APIC: apicSendIntr: Issuing SIPI to VCPU%u\n", idCpu));
680 VMMR3SendStartupIpi(pVM, idCpu, uVector);
681 fAccepted = true;
682 }
683#else
684 /* We need to return to ring-3 to deliver the SIPI. */
685 rcStrict = rcRZ;
686 fAccepted = true;
687 Log2(("APIC: apicSendIntr: SIPI issued, returning to RZ. rc=%Rrc\n", rcRZ));
688#endif
689 break;
690 }
691
692 case XAPICDELIVERYMODE_EXTINT:
693 {
694 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++)
695 if (VMCPUSET_IS_PRESENT(pDestCpuSet, idCpu))
696 {
697 Log2(("APIC: apicSendIntr: Raising EXTINT on VCPU%u\n", idCpu));
698 apicSetInterruptFF(&pVM->aCpus[idCpu], PDMAPICIRQ_EXTINT);
699 fAccepted = true;
700 }
701 break;
702 }
703
704 default:
705 {
706 AssertMsgFailed(("APIC: apicSendIntr: Unsupported delivery mode %#x (%s)\n", enmDeliveryMode,
707 apicGetDeliveryModeName(enmDeliveryMode)));
708 break;
709 }
710 }
711
712 /*
713 * If an illegal vector is programmed, set the 'send illegal vector' error here if the
714 * interrupt is being sent by an APIC.
715 *
716 * The 'receive illegal vector' will be set on the target APIC when the interrupt
717 * gets generated, see apicPostInterrupt().
718 *
719 * See Intel spec. 10.5.3 "Error Handling".
720 */
721 if ( rcStrict != rcRZ
722 && pVCpu)
723 {
724 /*
725 * Flag only errors when the delivery mode is fixed and not others.
726 *
727 * Ubuntu 10.04-3 amd64 live CD with 2 VCPUs gets upset as it sends an SIPI to the
728 * 2nd VCPU with vector 6 and checks the ESR for no errors, see @bugref{8245#c86}.
729 */
730 /** @todo The spec says this for LVT, but not explcitly for ICR-lo
731 * but it probably is true. */
732 if (enmDeliveryMode == XAPICDELIVERYMODE_FIXED)
733 {
734 if (RT_UNLIKELY(uVector <= XAPIC_ILLEGAL_VECTOR_END))
735 apicSetError(pVCpu, XAPIC_ESR_SEND_ILLEGAL_VECTOR);
736 }
737 }
738
739 if (pfIntrAccepted)
740 *pfIntrAccepted = fAccepted;
741
742 return rcStrict;
743}
744
745
746/**
747 * Checks if this APIC belongs to a logical destination.
748 *
749 * @returns true if the APIC belongs to the logical
750 * destination, false otherwise.
751 * @param pVCpu The cross context virtual CPU structure.
752 * @param fDest The destination mask.
753 *
754 * @thread Any.
755 */
756static bool apicIsLogicalDest(PVMCPU pVCpu, uint32_t fDest)
757{
758 if (XAPIC_IN_X2APIC_MODE(pVCpu))
759 {
760 /*
761 * Flat logical mode is not supported in x2APIC mode.
762 * In clustered logical mode, the 32-bit logical ID in the LDR is interpreted as follows:
763 * - High 16 bits is the cluster ID.
764 * - Low 16 bits: each bit represents a unique APIC within the cluster.
765 */
766 PCX2APICPAGE pX2ApicPage = VMCPU_TO_CX2APICPAGE(pVCpu);
767 uint32_t const u32Ldr = pX2ApicPage->ldr.u32LogicalApicId;
768 if (X2APIC_LDR_GET_CLUSTER_ID(u32Ldr) == (fDest & X2APIC_LDR_CLUSTER_ID))
769 return RT_BOOL(u32Ldr & fDest & X2APIC_LDR_LOGICAL_ID);
770 return false;
771 }
772
773#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
774 /*
775 * In both flat and clustered logical mode, a destination mask of all set bits indicates a broadcast.
776 * See AMD spec. 16.6.1 "Receiving System and IPI Interrupts".
777 */
778 Assert(!XAPIC_IN_X2APIC_MODE(pVCpu));
779 if ((fDest & XAPIC_LDR_FLAT_LOGICAL_ID) == XAPIC_LDR_FLAT_LOGICAL_ID)
780 return true;
781
782 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu);
783 XAPICDESTFORMAT enmDestFormat = (XAPICDESTFORMAT)pXApicPage->dfr.u.u4Model;
784 if (enmDestFormat == XAPICDESTFORMAT_FLAT)
785 {
786 /* The destination mask is interpreted as a bitmap of 8 unique logical APIC IDs. */
787 uint8_t const u8Ldr = pXApicPage->ldr.u.u8LogicalApicId;
788 return RT_BOOL(u8Ldr & fDest & XAPIC_LDR_FLAT_LOGICAL_ID);
789 }
790
791 /*
792 * In clustered logical mode, the 8-bit logical ID in the LDR is interpreted as follows:
793 * - High 4 bits is the cluster ID.
794 * - Low 4 bits: each bit represents a unique APIC within the cluster.
795 */
796 Assert(enmDestFormat == XAPICDESTFORMAT_CLUSTER);
797 uint8_t const u8Ldr = pXApicPage->ldr.u.u8LogicalApicId;
798 if (XAPIC_LDR_CLUSTERED_GET_CLUSTER_ID(u8Ldr) == (fDest & XAPIC_LDR_CLUSTERED_CLUSTER_ID))
799 return RT_BOOL(u8Ldr & fDest & XAPIC_LDR_CLUSTERED_LOGICAL_ID);
800 return false;
801#else
802# error "Implement Pentium and P6 family APIC architectures"
803#endif
804}
805
806
807/**
808 * Figures out the set of destination CPUs for a given destination mode, format
809 * and delivery mode setting.
810 *
811 * @param pVM The cross context VM structure.
812 * @param fDestMask The destination mask.
813 * @param fBroadcastMask The broadcast mask.
814 * @param enmDestMode The destination mode.
815 * @param enmDeliveryMode The delivery mode.
816 * @param pDestCpuSet The destination CPU set to update.
817 */
818static void apicGetDestCpuSet(PVM pVM, uint32_t fDestMask, uint32_t fBroadcastMask, XAPICDESTMODE enmDestMode,
819 XAPICDELIVERYMODE enmDeliveryMode, PVMCPUSET pDestCpuSet)
820{
821 VMCPUSET_EMPTY(pDestCpuSet);
822
823 /*
824 * Physical destination mode only supports either a broadcast or a single target.
825 * - Broadcast with lowest-priority delivery mode is not supported[1], we deliver it
826 * as a regular broadcast like in fixed delivery mode.
827 * - For a single target, lowest-priority delivery mode makes no sense. We deliver
828 * to the target like in fixed delivery mode.
829 *
830 * [1] See Intel spec. 10.6.2.1 "Physical Destination Mode".
831 */
832 if ( enmDestMode == XAPICDESTMODE_PHYSICAL
833 && enmDeliveryMode == XAPICDELIVERYMODE_LOWEST_PRIO)
834 {
835 AssertMsgFailed(("APIC: Lowest-priority delivery using physical destination mode!"));
836 enmDeliveryMode = XAPICDELIVERYMODE_FIXED;
837 }
838
839 uint32_t const cCpus = pVM->cCpus;
840 if (enmDeliveryMode == XAPICDELIVERYMODE_LOWEST_PRIO)
841 {
842 Assert(enmDestMode == XAPICDESTMODE_LOGICAL);
843#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
844 VMCPUID idCpuLowestTpr = NIL_VMCPUID;
845 uint8_t u8LowestTpr = UINT8_C(0xff);
846 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++)
847 {
848 PVMCPU pVCpuDest = &pVM->aCpus[idCpu];
849 if (apicIsLogicalDest(pVCpuDest, fDestMask))
850 {
851 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpuDest);
852 uint8_t const u8Tpr = pXApicPage->tpr.u8Tpr; /* PAV */
853
854 /*
855 * If there is a tie for lowest priority, the local APIC with the highest ID is chosen.
856 * Hence the use of "<=" in the check below.
857 * See AMD spec. 16.6.2 "Lowest Priority Messages and Arbitration".
858 */
859 if (u8Tpr <= u8LowestTpr)
860 {
861 u8LowestTpr = u8Tpr;
862 idCpuLowestTpr = idCpu;
863 }
864 }
865 }
866 if (idCpuLowestTpr != NIL_VMCPUID)
867 VMCPUSET_ADD(pDestCpuSet, idCpuLowestTpr);
868#else
869# error "Implement Pentium and P6 family APIC architectures"
870#endif
871 return;
872 }
873
874 /*
875 * x2APIC:
876 * - In both physical and logical destination mode, a destination mask of 0xffffffff implies a broadcast[1].
877 * xAPIC:
878 * - In physical destination mode, a destination mask of 0xff implies a broadcast[2].
879 * - In both flat and clustered logical mode, a destination mask of 0xff implies a broadcast[3].
880 *
881 * [1] See Intel spec. 10.12.9 "ICR Operation in x2APIC Mode".
882 * [2] See Intel spec. 10.6.2.1 "Physical Destination Mode".
883 * [2] See AMD spec. 16.6.1 "Receiving System and IPI Interrupts".
884 */
885 if ((fDestMask & fBroadcastMask) == fBroadcastMask)
886 {
887 VMCPUSET_FILL(pDestCpuSet);
888 return;
889 }
890
891 if (enmDestMode == XAPICDESTMODE_PHYSICAL)
892 {
893 /* The destination mask is interpreted as the physical APIC ID of a single target. */
894#if 1
895 /* Since our physical APIC ID is read-only to software, set the corresponding bit in the CPU set. */
896 if (RT_LIKELY(fDestMask < cCpus))
897 VMCPUSET_ADD(pDestCpuSet, fDestMask);
898#else
899 /* The physical APIC ID may not match our VCPU ID, search through the list of targets. */
900 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++)
901 {
902 PVMCPU pVCpuDest = &pVM->aCpus[idCpu];
903 if (XAPIC_IN_X2APIC_MODE(pVCpuDest))
904 {
905 PCX2APICPAGE pX2ApicPage = VMCPU_TO_CX2APICPAGE(pVCpuDest);
906 if (pX2ApicPage->id.u32ApicId == fDestMask)
907 VMCPUSET_ADD(pDestCpuSet, pVCpuDest->idCpu);
908 }
909 else
910 {
911 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpuDest);
912 if (pXApicPage->id.u8ApicId == (uint8_t)fDestMask)
913 VMCPUSET_ADD(pDestCpuSet, pVCpuDest->idCpu);
914 }
915 }
916#endif
917 }
918 else
919 {
920 Assert(enmDestMode == XAPICDESTMODE_LOGICAL);
921
922 /* A destination mask of all 0's implies no target APICs (since it's interpreted as a bitmap or partial bitmap). */
923 if (RT_UNLIKELY(!fDestMask))
924 return;
925
926 /* The destination mask is interpreted as a bitmap of software-programmable logical APIC ID of the target APICs. */
927 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++)
928 {
929 PVMCPU pVCpuDest = &pVM->aCpus[idCpu];
930 if (apicIsLogicalDest(pVCpuDest, fDestMask))
931 VMCPUSET_ADD(pDestCpuSet, pVCpuDest->idCpu);
932 }
933 }
934}
935
936
937/**
938 * Sends an Interprocessor Interrupt (IPI) using values from the Interrupt
939 * Command Register (ICR).
940 *
941 * @returns VBox status code.
942 * @param pVCpu The cross context virtual CPU structure.
943 * @param rcRZ The return code if the operation cannot be
944 * performed in the current context.
945 */
946DECLINLINE(VBOXSTRICTRC) apicSendIpi(PVMCPU pVCpu, int rcRZ)
947{
948 VMCPU_ASSERT_EMT(pVCpu);
949
950 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
951 XAPICDELIVERYMODE const enmDeliveryMode = (XAPICDELIVERYMODE)pXApicPage->icr_lo.u.u3DeliveryMode;
952 XAPICDESTMODE const enmDestMode = (XAPICDESTMODE)pXApicPage->icr_lo.u.u1DestMode;
953 XAPICINITLEVEL const enmInitLevel = (XAPICINITLEVEL)pXApicPage->icr_lo.u.u1Level;
954 XAPICTRIGGERMODE const enmTriggerMode = (XAPICTRIGGERMODE)pXApicPage->icr_lo.u.u1TriggerMode;
955 XAPICDESTSHORTHAND const enmDestShorthand = (XAPICDESTSHORTHAND)pXApicPage->icr_lo.u.u2DestShorthand;
956 uint8_t const uVector = pXApicPage->icr_lo.u.u8Vector;
957
958 PX2APICPAGE pX2ApicPage = VMCPU_TO_X2APICPAGE(pVCpu);
959 uint32_t const fDest = XAPIC_IN_X2APIC_MODE(pVCpu) ? pX2ApicPage->icr_hi.u32IcrHi : pXApicPage->icr_hi.u.u8Dest;
960
961#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
962 /*
963 * INIT Level De-assert is not support on Pentium 4 and Xeon processors.
964 * Apparently, this also applies to NMI, SMI, lowest-priority and fixed delivery modes,
965 * see @bugref{8245#c116}.
966 *
967 * See AMD spec. 16.5 "Interprocessor Interrupts (IPI)" for a table of valid ICR combinations.
968 */
969 if ( enmTriggerMode == XAPICTRIGGERMODE_LEVEL
970 && enmInitLevel == XAPICINITLEVEL_DEASSERT
971 && ( enmDeliveryMode == XAPICDELIVERYMODE_FIXED
972 || enmDeliveryMode == XAPICDELIVERYMODE_LOWEST_PRIO
973 || enmDeliveryMode == XAPICDELIVERYMODE_SMI
974 || enmDeliveryMode == XAPICDELIVERYMODE_NMI
975 || enmDeliveryMode == XAPICDELIVERYMODE_INIT))
976 {
977 Log2(("APIC%u: %s level de-assert unsupported, ignoring!\n", pVCpu->idCpu, apicGetDeliveryModeName(enmDeliveryMode)));
978 return VINF_SUCCESS;
979 }
980#else
981# error "Implement Pentium and P6 family APIC architectures"
982#endif
983
984 /*
985 * The destination and delivery modes are ignored/by-passed when a destination shorthand is specified.
986 * See Intel spec. 10.6.2.3 "Broadcast/Self Delivery Mode".
987 */
988 VMCPUSET DestCpuSet;
989 switch (enmDestShorthand)
990 {
991 case XAPICDESTSHORTHAND_NONE:
992 {
993 PVM pVM = pVCpu->CTX_SUFF(pVM);
994 uint32_t const fBroadcastMask = XAPIC_IN_X2APIC_MODE(pVCpu) ? X2APIC_ID_BROADCAST_MASK : XAPIC_ID_BROADCAST_MASK;
995 apicGetDestCpuSet(pVM, fDest, fBroadcastMask, enmDestMode, enmDeliveryMode, &DestCpuSet);
996 break;
997 }
998
999 case XAPICDESTSHORTHAND_SELF:
1000 {
1001 VMCPUSET_EMPTY(&DestCpuSet);
1002 VMCPUSET_ADD(&DestCpuSet, pVCpu->idCpu);
1003 break;
1004 }
1005
1006 case XAPIDDESTSHORTHAND_ALL_INCL_SELF:
1007 {
1008 VMCPUSET_FILL(&DestCpuSet);
1009 break;
1010 }
1011
1012 case XAPICDESTSHORTHAND_ALL_EXCL_SELF:
1013 {
1014 VMCPUSET_FILL(&DestCpuSet);
1015 VMCPUSET_DEL(&DestCpuSet, pVCpu->idCpu);
1016 break;
1017 }
1018 }
1019
1020 return apicSendIntr(pVCpu->CTX_SUFF(pVM), pVCpu, uVector, enmTriggerMode, enmDeliveryMode, &DestCpuSet,
1021 NULL /* pfIntrAccepted */, 0 /* uSrcTag */, rcRZ);
1022}
1023
1024
1025/**
1026 * Sets the Interrupt Command Register (ICR) high dword.
1027 *
1028 * @returns Strict VBox status code.
1029 * @param pVCpu The cross context virtual CPU structure.
1030 * @param uIcrHi The ICR high dword.
1031 */
1032static VBOXSTRICTRC apicSetIcrHi(PVMCPU pVCpu, uint32_t uIcrHi)
1033{
1034 VMCPU_ASSERT_EMT(pVCpu);
1035 Assert(!XAPIC_IN_X2APIC_MODE(pVCpu));
1036
1037 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1038 pXApicPage->icr_hi.all.u32IcrHi = uIcrHi & XAPIC_ICR_HI_DEST;
1039 STAM_COUNTER_INC(&pVCpu->apic.s.StatIcrHiWrite);
1040 Log2(("APIC%u: apicSetIcrHi: uIcrHi=%#RX32\n", pVCpu->idCpu, pXApicPage->icr_hi.all.u32IcrHi));
1041
1042 return VINF_SUCCESS;
1043}
1044
1045
1046/**
1047 * Sets the Interrupt Command Register (ICR) low dword.
1048 *
1049 * @returns Strict VBox status code.
1050 * @param pVCpu The cross context virtual CPU structure.
1051 * @param uIcrLo The ICR low dword.
1052 * @param rcRZ The return code if the operation cannot be performed
1053 * in the current context.
1054 * @param fUpdateStat Whether to update the ICR low write statistics
1055 * counter.
1056 */
1057static VBOXSTRICTRC apicSetIcrLo(PVMCPU pVCpu, uint32_t uIcrLo, int rcRZ, bool fUpdateStat)
1058{
1059 VMCPU_ASSERT_EMT(pVCpu);
1060
1061 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1062 pXApicPage->icr_lo.all.u32IcrLo = uIcrLo & XAPIC_ICR_LO_WR_VALID;
1063 Log2(("APIC%u: apicSetIcrLo: uIcrLo=%#RX32\n", pVCpu->idCpu, pXApicPage->icr_lo.all.u32IcrLo));
1064
1065 if (fUpdateStat)
1066 STAM_COUNTER_INC(&pVCpu->apic.s.StatIcrLoWrite);
1067 RT_NOREF(fUpdateStat);
1068
1069 return apicSendIpi(pVCpu, rcRZ);
1070}
1071
1072
1073/**
1074 * Sets the Interrupt Command Register (ICR).
1075 *
1076 * @returns Strict VBox status code.
1077 * @param pVCpu The cross context virtual CPU structure.
1078 * @param u64Icr The ICR (High and Low combined).
1079 * @param rcRZ The return code if the operation cannot be performed
1080 * in the current context.
1081 *
1082 * @remarks This function is used by both x2APIC interface and the Hyper-V
1083 * interface, see APICHvSetIcr. The Hyper-V spec isn't clear what
1084 * happens when invalid bits are set. For the time being, it will
1085 * \#GP like a regular x2APIC access.
1086 */
1087static VBOXSTRICTRC apicSetIcr(PVMCPU pVCpu, uint64_t u64Icr, int rcRZ)
1088{
1089 VMCPU_ASSERT_EMT(pVCpu);
1090
1091 /* Validate. */
1092 uint32_t const uLo = RT_LO_U32(u64Icr);
1093 if (RT_LIKELY(!(uLo & ~XAPIC_ICR_LO_WR_VALID)))
1094 {
1095 /* Update high dword first, then update the low dword which sends the IPI. */
1096 PX2APICPAGE pX2ApicPage = VMCPU_TO_X2APICPAGE(pVCpu);
1097 pX2ApicPage->icr_hi.u32IcrHi = RT_HI_U32(u64Icr);
1098 STAM_COUNTER_INC(&pVCpu->apic.s.StatIcrFullWrite);
1099 return apicSetIcrLo(pVCpu, uLo, rcRZ, false /* fUpdateStat */);
1100 }
1101 return apicMsrAccessError(pVCpu, MSR_IA32_X2APIC_ICR, APICMSRACCESS_WRITE_RSVD_BITS);
1102}
1103
1104
1105/**
1106 * Sets the Error Status Register (ESR).
1107 *
1108 * @returns VINF_SUCCESS or VERR_CPUM_RAISE_GP_0.
1109 * @param pVCpu The cross context virtual CPU structure.
1110 * @param uEsr The ESR value.
1111 */
1112static int apicSetEsr(PVMCPU pVCpu, uint32_t uEsr)
1113{
1114 VMCPU_ASSERT_EMT(pVCpu);
1115
1116 Log2(("APIC%u: apicSetEsr: uEsr=%#RX32\n", pVCpu->idCpu, uEsr));
1117
1118 if ( XAPIC_IN_X2APIC_MODE(pVCpu)
1119 && (uEsr & ~XAPIC_ESR_WO_VALID))
1120 return apicMsrAccessError(pVCpu, MSR_IA32_X2APIC_ESR, APICMSRACCESS_WRITE_RSVD_BITS);
1121
1122 /*
1123 * Writes to the ESR causes the internal state to be updated in the register,
1124 * clearing the original state. See AMD spec. 16.4.6 "APIC Error Interrupts".
1125 */
1126 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1127 pXApicPage->esr.all.u32Errors = apicClearAllErrors(pVCpu);
1128 return VINF_SUCCESS;
1129}
1130
1131
1132/**
1133 * Updates the Processor Priority Register (PPR).
1134 *
1135 * @param pVCpu The cross context virtual CPU structure.
1136 */
1137static void apicUpdatePpr(PVMCPU pVCpu)
1138{
1139 VMCPU_ASSERT_EMT(pVCpu);
1140
1141 /* See Intel spec 10.8.3.1 "Task and Processor Priorities". */
1142 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1143 uint8_t const uIsrv = apicGetHighestSetBitInReg(&pXApicPage->isr, 0 /* rcNotFound */);
1144 uint8_t uPpr;
1145 if (XAPIC_TPR_GET_TP(pXApicPage->tpr.u8Tpr) >= XAPIC_PPR_GET_PP(uIsrv))
1146 uPpr = pXApicPage->tpr.u8Tpr;
1147 else
1148 uPpr = XAPIC_PPR_GET_PP(uIsrv);
1149 pXApicPage->ppr.u8Ppr = uPpr;
1150}
1151
1152
1153/**
1154 * Gets the Processor Priority Register (PPR).
1155 *
1156 * @returns The PPR value.
1157 * @param pVCpu The cross context virtual CPU structure.
1158 */
1159static uint8_t apicGetPpr(PVMCPU pVCpu)
1160{
1161 VMCPU_ASSERT_EMT(pVCpu);
1162 STAM_COUNTER_INC(&pVCpu->apic.s.StatTprRead);
1163
1164 /*
1165 * With virtualized APIC registers or with TPR virtualization, the hardware may
1166 * update ISR/TPR transparently. We thus re-calculate the PPR which may be out of sync.
1167 * See Intel spec. 29.2.2 "Virtual-Interrupt Delivery".
1168 *
1169 * In all other instances, whenever the TPR or ISR changes, we need to update the PPR
1170 * as well (e.g. like we do manually in apicR3InitIpi and by calling apicUpdatePpr).
1171 */
1172 PCAPIC pApic = VM_TO_APIC(pVCpu->CTX_SUFF(pVM));
1173 if (pApic->fVirtApicRegsEnabled) /** @todo re-think this */
1174 apicUpdatePpr(pVCpu);
1175 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu);
1176 return pXApicPage->ppr.u8Ppr;
1177}
1178
1179
1180/**
1181 * Sets the Task Priority Register (TPR).
1182 *
1183 * @returns VINF_SUCCESS or VERR_CPUM_RAISE_GP_0.
1184 * @param pVCpu The cross context virtual CPU structure.
1185 * @param uTpr The TPR value.
1186 * @param fForceX2ApicBehaviour Pretend the APIC is in x2APIC mode during
1187 * this write.
1188 */
1189static int apicSetTprEx(PVMCPU pVCpu, uint32_t uTpr, bool fForceX2ApicBehaviour)
1190{
1191 VMCPU_ASSERT_EMT(pVCpu);
1192
1193 Log2(("APIC%u: apicSetTprEx: uTpr=%#RX32\n", pVCpu->idCpu, uTpr));
1194 STAM_COUNTER_INC(&pVCpu->apic.s.StatTprWrite);
1195
1196 bool const fX2ApicMode = XAPIC_IN_X2APIC_MODE(pVCpu) || fForceX2ApicBehaviour;
1197 if ( fX2ApicMode
1198 && (uTpr & ~XAPIC_TPR_VALID))
1199 return apicMsrAccessError(pVCpu, MSR_IA32_X2APIC_TPR, APICMSRACCESS_WRITE_RSVD_BITS);
1200
1201 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1202 pXApicPage->tpr.u8Tpr = uTpr;
1203 apicUpdatePpr(pVCpu);
1204 apicSignalNextPendingIntr(pVCpu);
1205 return VINF_SUCCESS;
1206}
1207
1208
1209/**
1210 * Sets the End-Of-Interrupt (EOI) register.
1211 *
1212 * @returns Strict VBox status code.
1213 * @param pVCpu The cross context virtual CPU structure.
1214 * @param uEoi The EOI value.
1215 * @param rcBusy The busy return code when the write cannot
1216 * be completed successfully in this context.
1217 * @param fForceX2ApicBehaviour Pretend the APIC is in x2APIC mode during
1218 * this write.
1219 */
1220static VBOXSTRICTRC apicSetEoi(PVMCPU pVCpu, uint32_t uEoi, int rcBusy, bool fForceX2ApicBehaviour)
1221{
1222 VMCPU_ASSERT_EMT(pVCpu);
1223
1224 Log2(("APIC%u: apicSetEoi: uEoi=%#RX32\n", pVCpu->idCpu, uEoi));
1225 STAM_COUNTER_INC(&pVCpu->apic.s.StatEoiWrite);
1226
1227 bool const fX2ApicMode = XAPIC_IN_X2APIC_MODE(pVCpu) || fForceX2ApicBehaviour;
1228 if ( fX2ApicMode
1229 && (uEoi & ~XAPIC_EOI_WO_VALID))
1230 return apicMsrAccessError(pVCpu, MSR_IA32_X2APIC_EOI, APICMSRACCESS_WRITE_RSVD_BITS);
1231
1232 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1233 int isrv = apicGetHighestSetBitInReg(&pXApicPage->isr, -1 /* rcNotFound */);
1234 if (isrv >= 0)
1235 {
1236 /*
1237 * Broadcast the EOI to the I/O APIC(s).
1238 *
1239 * We'll handle the EOI broadcast first as there is tiny chance we get rescheduled to
1240 * ring-3 due to contention on the I/O APIC lock. This way we don't mess with the rest
1241 * of the APIC state and simply restart the EOI write operation from ring-3.
1242 */
1243 Assert(isrv <= (int)UINT8_MAX);
1244 uint8_t const uVector = isrv;
1245 bool const fLevelTriggered = apicTestVectorInReg(&pXApicPage->tmr, uVector);
1246 if (fLevelTriggered)
1247 {
1248 int rc = PDMIoApicBroadcastEoi(pVCpu->CTX_SUFF(pVM), uVector);
1249 if (rc == VINF_SUCCESS)
1250 { /* likely */ }
1251 else
1252 return rcBusy;
1253
1254 /*
1255 * Clear the vector from the TMR.
1256 *
1257 * The broadcast to I/O APIC can re-trigger new interrupts to arrive via the bus. However,
1258 * APICUpdatePendingInterrupts() which updates TMR can only be done from EMT which we
1259 * currently are on, so no possibility of concurrent updates.
1260 */
1261 apicClearVectorInReg(&pXApicPage->tmr, uVector);
1262
1263 /*
1264 * Clear the remote IRR bit for level-triggered, fixed mode LINT0 interrupt.
1265 * The LINT1 pin does not support level-triggered interrupts.
1266 * See Intel spec. 10.5.1 "Local Vector Table".
1267 */
1268 uint32_t const uLvtLint0 = pXApicPage->lvt_lint0.all.u32LvtLint0;
1269 if ( XAPIC_LVT_GET_REMOTE_IRR(uLvtLint0)
1270 && XAPIC_LVT_GET_VECTOR(uLvtLint0) == uVector
1271 && XAPIC_LVT_GET_DELIVERY_MODE(uLvtLint0) == XAPICDELIVERYMODE_FIXED)
1272 {
1273 ASMAtomicAndU32((volatile uint32_t *)&pXApicPage->lvt_lint0.all.u32LvtLint0, ~XAPIC_LVT_REMOTE_IRR);
1274 Log2(("APIC%u: apicSetEoi: Cleared remote-IRR for LINT0. uVector=%#x\n", pVCpu->idCpu, uVector));
1275 }
1276
1277 Log2(("APIC%u: apicSetEoi: Cleared level triggered interrupt from TMR. uVector=%#x\n", pVCpu->idCpu, uVector));
1278 }
1279
1280 /*
1281 * Mark interrupt as serviced, update the PPR and signal pending interrupts.
1282 */
1283 Log2(("APIC%u: apicSetEoi: Clearing interrupt from ISR. uVector=%#x\n", pVCpu->idCpu, uVector));
1284 apicClearVectorInReg(&pXApicPage->isr, uVector);
1285 apicUpdatePpr(pVCpu);
1286 apicSignalNextPendingIntr(pVCpu);
1287 }
1288 else
1289 {
1290#ifdef DEBUG_ramshankar
1291 /** @todo Figure out if this is done intentionally by guests or is a bug
1292 * in our emulation. Happened with Win10 SMP VM during reboot after
1293 * installation of guest additions with 3D support. */
1294 AssertMsgFailed(("APIC%u: apicSetEoi: Failed to find any ISR bit\n", pVCpu->idCpu));
1295#endif
1296 }
1297
1298 return VINF_SUCCESS;
1299}
1300
1301
1302/**
1303 * Sets the Logical Destination Register (LDR).
1304 *
1305 * @returns Strict VBox status code.
1306 * @param pVCpu The cross context virtual CPU structure.
1307 * @param uLdr The LDR value.
1308 *
1309 * @remarks LDR is read-only in x2APIC mode.
1310 */
1311static VBOXSTRICTRC apicSetLdr(PVMCPU pVCpu, uint32_t uLdr)
1312{
1313 VMCPU_ASSERT_EMT(pVCpu);
1314 PCAPIC pApic = VM_TO_APIC(pVCpu->CTX_SUFF(pVM));
1315 Assert(!XAPIC_IN_X2APIC_MODE(pVCpu) || pApic->fHyperVCompatMode); RT_NOREF_PV(pApic);
1316
1317 Log2(("APIC%u: apicSetLdr: uLdr=%#RX32\n", pVCpu->idCpu, uLdr));
1318
1319 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1320 apicWriteRaw32(pXApicPage, XAPIC_OFF_LDR, uLdr & XAPIC_LDR_VALID);
1321 return VINF_SUCCESS;
1322}
1323
1324
1325/**
1326 * Sets the Destination Format Register (DFR).
1327 *
1328 * @returns Strict VBox status code.
1329 * @param pVCpu The cross context virtual CPU structure.
1330 * @param uDfr The DFR value.
1331 *
1332 * @remarks DFR is not available in x2APIC mode.
1333 */
1334static VBOXSTRICTRC apicSetDfr(PVMCPU pVCpu, uint32_t uDfr)
1335{
1336 VMCPU_ASSERT_EMT(pVCpu);
1337 Assert(!XAPIC_IN_X2APIC_MODE(pVCpu));
1338
1339 uDfr &= XAPIC_DFR_VALID;
1340 uDfr |= XAPIC_DFR_RSVD_MB1;
1341
1342 Log2(("APIC%u: apicSetDfr: uDfr=%#RX32\n", pVCpu->idCpu, uDfr));
1343
1344 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1345 apicWriteRaw32(pXApicPage, XAPIC_OFF_DFR, uDfr);
1346 return VINF_SUCCESS;
1347}
1348
1349
1350/**
1351 * Sets the Timer Divide Configuration Register (DCR).
1352 *
1353 * @returns Strict VBox status code.
1354 * @param pVCpu The cross context virtual CPU structure.
1355 * @param uTimerDcr The timer DCR value.
1356 */
1357static VBOXSTRICTRC apicSetTimerDcr(PVMCPU pVCpu, uint32_t uTimerDcr)
1358{
1359 VMCPU_ASSERT_EMT(pVCpu);
1360 if ( XAPIC_IN_X2APIC_MODE(pVCpu)
1361 && (uTimerDcr & ~XAPIC_TIMER_DCR_VALID))
1362 return apicMsrAccessError(pVCpu, MSR_IA32_X2APIC_TIMER_DCR, APICMSRACCESS_WRITE_RSVD_BITS);
1363
1364 Log2(("APIC%u: apicSetTimerDcr: uTimerDcr=%#RX32\n", pVCpu->idCpu, uTimerDcr));
1365
1366 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1367 apicWriteRaw32(pXApicPage, XAPIC_OFF_TIMER_DCR, uTimerDcr);
1368 return VINF_SUCCESS;
1369}
1370
1371
1372/**
1373 * Gets the timer's Current Count Register (CCR).
1374 *
1375 * @returns VBox status code.
1376 * @param pVCpu The cross context virtual CPU structure.
1377 * @param rcBusy The busy return code for the timer critical section.
1378 * @param puValue Where to store the LVT timer CCR.
1379 */
1380static VBOXSTRICTRC apicGetTimerCcr(PVMCPU pVCpu, int rcBusy, uint32_t *puValue)
1381{
1382 VMCPU_ASSERT_EMT(pVCpu);
1383 Assert(puValue);
1384
1385 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu);
1386 *puValue = 0;
1387
1388 /* In TSC-deadline mode, CCR returns 0, see Intel spec. 10.5.4.1 "TSC-Deadline Mode". */
1389 if (pXApicPage->lvt_timer.u.u2TimerMode == XAPIC_TIMER_MODE_TSC_DEADLINE)
1390 return VINF_SUCCESS;
1391
1392 /* If the initial-count register is 0, CCR returns 0 as it cannot exceed the ICR. */
1393 uint32_t const uInitialCount = pXApicPage->timer_icr.u32InitialCount;
1394 if (!uInitialCount)
1395 return VINF_SUCCESS;
1396
1397 /*
1398 * Reading the virtual-sync clock requires locking its timer because it's not
1399 * a simple atomic operation, see tmVirtualSyncGetEx().
1400 *
1401 * We also need to lock before reading the timer CCR, see apicR3TimerCallback().
1402 */
1403 PCAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
1404 PTMTIMER pTimer = pApicCpu->CTX_SUFF(pTimer);
1405
1406 int rc = TMTimerLock(pTimer, rcBusy);
1407 if (rc == VINF_SUCCESS)
1408 {
1409 /* If the current-count register is 0, it implies the timer expired. */
1410 uint32_t const uCurrentCount = pXApicPage->timer_ccr.u32CurrentCount;
1411 if (uCurrentCount)
1412 {
1413 uint64_t const cTicksElapsed = TMTimerGet(pApicCpu->CTX_SUFF(pTimer)) - pApicCpu->u64TimerInitial;
1414 TMTimerUnlock(pTimer);
1415 uint8_t const uTimerShift = apicGetTimerShift(pXApicPage);
1416 uint64_t const uDelta = cTicksElapsed >> uTimerShift;
1417 if (uInitialCount > uDelta)
1418 *puValue = uInitialCount - uDelta;
1419 }
1420 else
1421 TMTimerUnlock(pTimer);
1422 }
1423 return rc;
1424}
1425
1426
1427/**
1428 * Sets the timer's Initial-Count Register (ICR).
1429 *
1430 * @returns Strict VBox status code.
1431 * @param pVCpu The cross context virtual CPU structure.
1432 * @param rcBusy The busy return code for the timer critical section.
1433 * @param uInitialCount The timer ICR.
1434 */
1435static VBOXSTRICTRC apicSetTimerIcr(PVMCPU pVCpu, int rcBusy, uint32_t uInitialCount)
1436{
1437 VMCPU_ASSERT_EMT(pVCpu);
1438
1439 PAPIC pApic = VM_TO_APIC(pVCpu->CTX_SUFF(pVM));
1440 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
1441 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1442 PTMTIMER pTimer = pApicCpu->CTX_SUFF(pTimer);
1443
1444 Log2(("APIC%u: apicSetTimerIcr: uInitialCount=%#RX32\n", pVCpu->idCpu, uInitialCount));
1445 STAM_COUNTER_INC(&pApicCpu->StatTimerIcrWrite);
1446
1447 /* In TSC-deadline mode, timer ICR writes are ignored, see Intel spec. 10.5.4.1 "TSC-Deadline Mode". */
1448 if ( pApic->fSupportsTscDeadline
1449 && pXApicPage->lvt_timer.u.u2TimerMode == XAPIC_TIMER_MODE_TSC_DEADLINE)
1450 return VINF_SUCCESS;
1451
1452 /*
1453 * The timer CCR may be modified by apicR3TimerCallback() in parallel,
1454 * so obtain the lock -before- updating it here to be consistent with the
1455 * timer ICR. We rely on CCR being consistent in apicGetTimerCcr().
1456 */
1457 int rc = TMTimerLock(pTimer, rcBusy);
1458 if (rc == VINF_SUCCESS)
1459 {
1460 pXApicPage->timer_icr.u32InitialCount = uInitialCount;
1461 pXApicPage->timer_ccr.u32CurrentCount = uInitialCount;
1462 if (uInitialCount)
1463 apicStartTimer(pVCpu, uInitialCount);
1464 else
1465 apicStopTimer(pVCpu);
1466 TMTimerUnlock(pTimer);
1467 }
1468 return rc;
1469}
1470
1471
1472/**
1473 * Sets an LVT entry.
1474 *
1475 * @returns Strict VBox status code.
1476 * @param pVCpu The cross context virtual CPU structure.
1477 * @param offLvt The LVT entry offset in the xAPIC page.
1478 * @param uLvt The LVT value to set.
1479 */
1480static VBOXSTRICTRC apicSetLvtEntry(PVMCPU pVCpu, uint16_t offLvt, uint32_t uLvt)
1481{
1482 VMCPU_ASSERT_EMT(pVCpu);
1483
1484#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
1485 AssertMsg( offLvt == XAPIC_OFF_LVT_TIMER
1486 || offLvt == XAPIC_OFF_LVT_THERMAL
1487 || offLvt == XAPIC_OFF_LVT_PERF
1488 || offLvt == XAPIC_OFF_LVT_LINT0
1489 || offLvt == XAPIC_OFF_LVT_LINT1
1490 || offLvt == XAPIC_OFF_LVT_ERROR,
1491 ("APIC%u: apicSetLvtEntry: invalid offset, offLvt=%#RX16, uLvt=%#RX32\n", pVCpu->idCpu, offLvt, uLvt));
1492
1493 /*
1494 * If TSC-deadline mode isn't support, ignore the bit in xAPIC mode
1495 * and raise #GP(0) in x2APIC mode.
1496 */
1497 PCAPIC pApic = VM_TO_APIC(pVCpu->CTX_SUFF(pVM));
1498 if (offLvt == XAPIC_OFF_LVT_TIMER)
1499 {
1500 if ( !pApic->fSupportsTscDeadline
1501 && (uLvt & XAPIC_LVT_TIMER_TSCDEADLINE))
1502 {
1503 if (XAPIC_IN_X2APIC_MODE(pVCpu))
1504 return apicMsrAccessError(pVCpu, XAPIC_GET_X2APIC_MSR(offLvt), APICMSRACCESS_WRITE_RSVD_BITS);
1505 uLvt &= ~XAPIC_LVT_TIMER_TSCDEADLINE;
1506 /** @todo TSC-deadline timer mode transition */
1507 }
1508 }
1509
1510 /*
1511 * Validate rest of the LVT bits.
1512 */
1513 uint16_t const idxLvt = (offLvt - XAPIC_OFF_LVT_START) >> 4;
1514 AssertReturn(idxLvt < RT_ELEMENTS(g_au32LvtValidMasks), VERR_OUT_OF_RANGE);
1515
1516 /*
1517 * For x2APIC, disallow setting of invalid/reserved bits.
1518 * For xAPIC, mask out invalid/reserved bits (i.e. ignore them).
1519 */
1520 if ( XAPIC_IN_X2APIC_MODE(pVCpu)
1521 && (uLvt & ~g_au32LvtValidMasks[idxLvt]))
1522 return apicMsrAccessError(pVCpu, XAPIC_GET_X2APIC_MSR(offLvt), APICMSRACCESS_WRITE_RSVD_BITS);
1523
1524 uLvt &= g_au32LvtValidMasks[idxLvt];
1525
1526 /*
1527 * In the software-disabled state, LVT mask-bit must remain set and attempts to clear the mask
1528 * bit must be ignored. See Intel spec. 10.4.7.2 "Local APIC State After It Has Been Software Disabled".
1529 */
1530 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1531 if (!pXApicPage->svr.u.fApicSoftwareEnable)
1532 uLvt |= XAPIC_LVT_MASK;
1533
1534 /*
1535 * It is unclear whether we should signal a 'send illegal vector' error here and ignore updating
1536 * the LVT entry when the delivery mode is 'fixed'[1] or update it in addition to signaling the
1537 * error or not signal the error at all. For now, we'll allow setting illegal vectors into the LVT
1538 * but set the 'send illegal vector' error here. The 'receive illegal vector' error will be set if
1539 * the interrupt for the vector happens to be generated, see apicPostInterrupt().
1540 *
1541 * [1] See Intel spec. 10.5.2 "Valid Interrupt Vectors".
1542 */
1543 if (RT_UNLIKELY( XAPIC_LVT_GET_VECTOR(uLvt) <= XAPIC_ILLEGAL_VECTOR_END
1544 && XAPIC_LVT_GET_DELIVERY_MODE(uLvt) == XAPICDELIVERYMODE_FIXED))
1545 apicSetError(pVCpu, XAPIC_ESR_SEND_ILLEGAL_VECTOR);
1546
1547 Log2(("APIC%u: apicSetLvtEntry: offLvt=%#RX16 uLvt=%#RX32\n", pVCpu->idCpu, offLvt, uLvt));
1548
1549 apicWriteRaw32(pXApicPage, offLvt, uLvt);
1550 return VINF_SUCCESS;
1551#else
1552# error "Implement Pentium and P6 family APIC architectures"
1553#endif /* XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4 */
1554}
1555
1556
1557#if 0
1558/**
1559 * Sets an LVT entry in the extended LVT range.
1560 *
1561 * @returns VBox status code.
1562 * @param pVCpu The cross context virtual CPU structure.
1563 * @param offLvt The LVT entry offset in the xAPIC page.
1564 * @param uValue The LVT value to set.
1565 */
1566static int apicSetLvtExtEntry(PVMCPU pVCpu, uint16_t offLvt, uint32_t uLvt)
1567{
1568 VMCPU_ASSERT_EMT(pVCpu);
1569 AssertMsg(offLvt == XAPIC_OFF_CMCI, ("APIC%u: apicSetLvt1Entry: invalid offset %#RX16\n", pVCpu->idCpu, offLvt));
1570
1571 /** @todo support CMCI. */
1572 return VERR_NOT_IMPLEMENTED;
1573}
1574#endif
1575
1576
1577/**
1578 * Hints TM about the APIC timer frequency.
1579 *
1580 * @param pApicCpu The APIC CPU state.
1581 * @param uInitialCount The new initial count.
1582 * @param uTimerShift The new timer shift.
1583 * @thread Any.
1584 */
1585void apicHintTimerFreq(PAPICCPU pApicCpu, uint32_t uInitialCount, uint8_t uTimerShift)
1586{
1587 Assert(pApicCpu);
1588
1589 if ( pApicCpu->uHintedTimerInitialCount != uInitialCount
1590 || pApicCpu->uHintedTimerShift != uTimerShift)
1591 {
1592 uint32_t uHz;
1593 if (uInitialCount)
1594 {
1595 uint64_t cTicksPerPeriod = (uint64_t)uInitialCount << uTimerShift;
1596 uHz = TMTimerGetFreq(pApicCpu->CTX_SUFF(pTimer)) / cTicksPerPeriod;
1597 }
1598 else
1599 uHz = 0;
1600
1601 TMTimerSetFrequencyHint(pApicCpu->CTX_SUFF(pTimer), uHz);
1602 pApicCpu->uHintedTimerInitialCount = uInitialCount;
1603 pApicCpu->uHintedTimerShift = uTimerShift;
1604 }
1605}
1606
1607
1608/**
1609 * Gets the Interrupt Command Register (ICR), without performing any interface
1610 * checks.
1611 *
1612 * @returns The ICR value.
1613 * @param pVCpu The cross context virtual CPU structure.
1614 */
1615DECLINLINE(uint64_t) apicGetIcrNoCheck(PVMCPU pVCpu)
1616{
1617 PCX2APICPAGE pX2ApicPage = VMCPU_TO_CX2APICPAGE(pVCpu);
1618 uint64_t const uHi = pX2ApicPage->icr_hi.u32IcrHi;
1619 uint64_t const uLo = pX2ApicPage->icr_lo.all.u32IcrLo;
1620 uint64_t const uIcr = RT_MAKE_U64(uLo, uHi);
1621 return uIcr;
1622}
1623
1624
1625/**
1626 * Reads an APIC register.
1627 *
1628 * @returns VBox status code.
1629 * @param pApicDev The APIC device instance.
1630 * @param pVCpu The cross context virtual CPU structure.
1631 * @param offReg The offset of the register being read.
1632 * @param puValue Where to store the register value.
1633 */
1634DECLINLINE(VBOXSTRICTRC) apicReadRegister(PAPICDEV pApicDev, PVMCPU pVCpu, uint16_t offReg, uint32_t *puValue)
1635{
1636 VMCPU_ASSERT_EMT(pVCpu);
1637 Assert(offReg <= XAPIC_OFF_MAX_VALID);
1638
1639 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1640 uint32_t uValue = 0;
1641 VBOXSTRICTRC rc = VINF_SUCCESS;
1642 switch (offReg)
1643 {
1644 case XAPIC_OFF_ID:
1645 case XAPIC_OFF_VERSION:
1646 case XAPIC_OFF_TPR:
1647 case XAPIC_OFF_EOI:
1648 case XAPIC_OFF_RRD:
1649 case XAPIC_OFF_LDR:
1650 case XAPIC_OFF_DFR:
1651 case XAPIC_OFF_SVR:
1652 case XAPIC_OFF_ISR0: case XAPIC_OFF_ISR1: case XAPIC_OFF_ISR2: case XAPIC_OFF_ISR3:
1653 case XAPIC_OFF_ISR4: case XAPIC_OFF_ISR5: case XAPIC_OFF_ISR6: case XAPIC_OFF_ISR7:
1654 case XAPIC_OFF_TMR0: case XAPIC_OFF_TMR1: case XAPIC_OFF_TMR2: case XAPIC_OFF_TMR3:
1655 case XAPIC_OFF_TMR4: case XAPIC_OFF_TMR5: case XAPIC_OFF_TMR6: case XAPIC_OFF_TMR7:
1656 case XAPIC_OFF_IRR0: case XAPIC_OFF_IRR1: case XAPIC_OFF_IRR2: case XAPIC_OFF_IRR3:
1657 case XAPIC_OFF_IRR4: case XAPIC_OFF_IRR5: case XAPIC_OFF_IRR6: case XAPIC_OFF_IRR7:
1658 case XAPIC_OFF_ESR:
1659 case XAPIC_OFF_ICR_LO:
1660 case XAPIC_OFF_ICR_HI:
1661 case XAPIC_OFF_LVT_TIMER:
1662#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
1663 case XAPIC_OFF_LVT_THERMAL:
1664#endif
1665 case XAPIC_OFF_LVT_PERF:
1666 case XAPIC_OFF_LVT_LINT0:
1667 case XAPIC_OFF_LVT_LINT1:
1668 case XAPIC_OFF_LVT_ERROR:
1669 case XAPIC_OFF_TIMER_ICR:
1670 case XAPIC_OFF_TIMER_DCR:
1671 {
1672 Assert( !XAPIC_IN_X2APIC_MODE(pVCpu)
1673 || ( offReg != XAPIC_OFF_DFR
1674 && offReg != XAPIC_OFF_ICR_HI
1675 && offReg != XAPIC_OFF_EOI));
1676 uValue = apicReadRaw32(pXApicPage, offReg);
1677 Log2(("APIC%u: apicReadRegister: offReg=%#x uValue=%#x\n", pVCpu->idCpu, offReg, uValue));
1678 break;
1679 }
1680
1681 case XAPIC_OFF_PPR:
1682 {
1683 uValue = apicGetPpr(pVCpu);
1684 break;
1685 }
1686
1687 case XAPIC_OFF_TIMER_CCR:
1688 {
1689 Assert(!XAPIC_IN_X2APIC_MODE(pVCpu));
1690 rc = apicGetTimerCcr(pVCpu, VINF_IOM_R3_MMIO_READ, &uValue);
1691 break;
1692 }
1693
1694 case XAPIC_OFF_APR:
1695 {
1696#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
1697 /* Unsupported on Pentium 4 and Xeon CPUs, invalid in x2APIC mode. */
1698 Assert(!XAPIC_IN_X2APIC_MODE(pVCpu));
1699#else
1700# error "Implement Pentium and P6 family APIC architectures"
1701#endif
1702 break;
1703 }
1704
1705 default:
1706 {
1707 Assert(!XAPIC_IN_X2APIC_MODE(pVCpu));
1708 rc = PDMDevHlpDBGFStop(pApicDev->CTX_SUFF(pDevIns), RT_SRC_POS, "VCPU[%u]: offReg=%#RX16\n", pVCpu->idCpu,
1709 offReg);
1710 apicSetError(pVCpu, XAPIC_ESR_ILLEGAL_REG_ADDRESS);
1711 break;
1712 }
1713 }
1714
1715 *puValue = uValue;
1716 return rc;
1717}
1718
1719
1720/**
1721 * Writes an APIC register.
1722 *
1723 * @returns Strict VBox status code.
1724 * @param pApicDev The APIC device instance.
1725 * @param pVCpu The cross context virtual CPU structure.
1726 * @param offReg The offset of the register being written.
1727 * @param uValue The register value.
1728 */
1729DECLINLINE(VBOXSTRICTRC) apicWriteRegister(PAPICDEV pApicDev, PVMCPU pVCpu, uint16_t offReg, uint32_t uValue)
1730{
1731 VMCPU_ASSERT_EMT(pVCpu);
1732 Assert(offReg <= XAPIC_OFF_MAX_VALID);
1733 Assert(!XAPIC_IN_X2APIC_MODE(pVCpu));
1734
1735 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
1736 switch (offReg)
1737 {
1738 case XAPIC_OFF_TPR:
1739 {
1740 rcStrict = apicSetTprEx(pVCpu, uValue, false /* fForceX2ApicBehaviour */);
1741 break;
1742 }
1743
1744 case XAPIC_OFF_LVT_TIMER:
1745#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
1746 case XAPIC_OFF_LVT_THERMAL:
1747#endif
1748 case XAPIC_OFF_LVT_PERF:
1749 case XAPIC_OFF_LVT_LINT0:
1750 case XAPIC_OFF_LVT_LINT1:
1751 case XAPIC_OFF_LVT_ERROR:
1752 {
1753 rcStrict = apicSetLvtEntry(pVCpu, offReg, uValue);
1754 break;
1755 }
1756
1757 case XAPIC_OFF_TIMER_ICR:
1758 {
1759 rcStrict = apicSetTimerIcr(pVCpu, VINF_IOM_R3_MMIO_WRITE, uValue);
1760 break;
1761 }
1762
1763 case XAPIC_OFF_EOI:
1764 {
1765 rcStrict = apicSetEoi(pVCpu, uValue, VINF_IOM_R3_MMIO_WRITE, false /* fForceX2ApicBehaviour */);
1766 break;
1767 }
1768
1769 case XAPIC_OFF_LDR:
1770 {
1771 rcStrict = apicSetLdr(pVCpu, uValue);
1772 break;
1773 }
1774
1775 case XAPIC_OFF_DFR:
1776 {
1777 rcStrict = apicSetDfr(pVCpu, uValue);
1778 break;
1779 }
1780
1781 case XAPIC_OFF_SVR:
1782 {
1783 rcStrict = apicSetSvr(pVCpu, uValue);
1784 break;
1785 }
1786
1787 case XAPIC_OFF_ICR_LO:
1788 {
1789 rcStrict = apicSetIcrLo(pVCpu, uValue, VINF_IOM_R3_MMIO_WRITE, true /* fUpdateStat */);
1790 break;
1791 }
1792
1793 case XAPIC_OFF_ICR_HI:
1794 {
1795 rcStrict = apicSetIcrHi(pVCpu, uValue);
1796 break;
1797 }
1798
1799 case XAPIC_OFF_TIMER_DCR:
1800 {
1801 rcStrict = apicSetTimerDcr(pVCpu, uValue);
1802 break;
1803 }
1804
1805 case XAPIC_OFF_ESR:
1806 {
1807 rcStrict = apicSetEsr(pVCpu, uValue);
1808 break;
1809 }
1810
1811 case XAPIC_OFF_APR:
1812 case XAPIC_OFF_RRD:
1813 {
1814#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
1815 /* Unsupported on Pentium 4 and Xeon CPUs but writes do -not- set an illegal register access error. */
1816#else
1817# error "Implement Pentium and P6 family APIC architectures"
1818#endif
1819 break;
1820 }
1821
1822 /* Read-only, write ignored: */
1823 case XAPIC_OFF_VERSION:
1824 case XAPIC_OFF_ID:
1825 break;
1826
1827 /* Unavailable/reserved in xAPIC mode: */
1828 case X2APIC_OFF_SELF_IPI:
1829 /* Read-only registers: */
1830 case XAPIC_OFF_PPR:
1831 case XAPIC_OFF_ISR0: case XAPIC_OFF_ISR1: case XAPIC_OFF_ISR2: case XAPIC_OFF_ISR3:
1832 case XAPIC_OFF_ISR4: case XAPIC_OFF_ISR5: case XAPIC_OFF_ISR6: case XAPIC_OFF_ISR7:
1833 case XAPIC_OFF_TMR0: case XAPIC_OFF_TMR1: case XAPIC_OFF_TMR2: case XAPIC_OFF_TMR3:
1834 case XAPIC_OFF_TMR4: case XAPIC_OFF_TMR5: case XAPIC_OFF_TMR6: case XAPIC_OFF_TMR7:
1835 case XAPIC_OFF_IRR0: case XAPIC_OFF_IRR1: case XAPIC_OFF_IRR2: case XAPIC_OFF_IRR3:
1836 case XAPIC_OFF_IRR4: case XAPIC_OFF_IRR5: case XAPIC_OFF_IRR6: case XAPIC_OFF_IRR7:
1837 case XAPIC_OFF_TIMER_CCR:
1838 default:
1839 {
1840 rcStrict = PDMDevHlpDBGFStop(pApicDev->CTX_SUFF(pDevIns), RT_SRC_POS, "APIC%u: offReg=%#RX16\n", pVCpu->idCpu,
1841 offReg);
1842 apicSetError(pVCpu, XAPIC_ESR_ILLEGAL_REG_ADDRESS);
1843 break;
1844 }
1845 }
1846
1847 return rcStrict;
1848}
1849
1850
1851/**
1852 * Reads an APIC MSR.
1853 *
1854 * @returns Strict VBox status code.
1855 * @param pVCpu The cross context virtual CPU structure.
1856 * @param u32Reg The MSR being read.
1857 * @param pu64Value Where to store the read value.
1858 */
1859VMM_INT_DECL(VBOXSTRICTRC) APICReadMsr(PVMCPU pVCpu, uint32_t u32Reg, uint64_t *pu64Value)
1860{
1861 /*
1862 * Validate.
1863 */
1864 VMCPU_ASSERT_EMT(pVCpu);
1865 Assert(u32Reg >= MSR_IA32_X2APIC_ID && u32Reg <= MSR_IA32_X2APIC_SELF_IPI);
1866 Assert(pu64Value);
1867
1868 /*
1869 * Is the APIC enabled?
1870 */
1871 PCAPIC pApic = VM_TO_APIC(pVCpu->CTX_SUFF(pVM));
1872 if (APICIsEnabled(pVCpu))
1873 { /* likely */ }
1874 else
1875 {
1876 return apicMsrAccessError(pVCpu, u32Reg, pApic->enmMaxMode == PDMAPICMODE_NONE ?
1877 APICMSRACCESS_READ_DISALLOWED_CONFIG : APICMSRACCESS_READ_RSVD_OR_UNKNOWN);
1878 }
1879
1880#ifndef IN_RING3
1881 if (pApic->fRZEnabled)
1882 { /* likely */}
1883 else
1884 return VINF_CPUM_R3_MSR_READ;
1885#endif
1886
1887 STAM_COUNTER_INC(&pVCpu->apic.s.CTX_SUFF_Z(StatMsrRead));
1888
1889 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
1890 if (RT_LIKELY( XAPIC_IN_X2APIC_MODE(pVCpu)
1891 || pApic->fHyperVCompatMode))
1892 {
1893 switch (u32Reg)
1894 {
1895 /* Special handling for x2APIC: */
1896 case MSR_IA32_X2APIC_ICR:
1897 {
1898 *pu64Value = apicGetIcrNoCheck(pVCpu);
1899 break;
1900 }
1901
1902 /* Special handling, compatible with xAPIC: */
1903 case MSR_IA32_X2APIC_TIMER_CCR:
1904 {
1905 uint32_t uValue;
1906 rcStrict = apicGetTimerCcr(pVCpu, VINF_CPUM_R3_MSR_READ, &uValue);
1907 *pu64Value = uValue;
1908 break;
1909 }
1910
1911 /* Special handling, compatible with xAPIC: */
1912 case MSR_IA32_X2APIC_PPR:
1913 {
1914 *pu64Value = apicGetPpr(pVCpu);
1915 break;
1916 }
1917
1918 /* Raw read, compatible with xAPIC: */
1919 case MSR_IA32_X2APIC_ID:
1920 case MSR_IA32_X2APIC_VERSION:
1921 case MSR_IA32_X2APIC_TPR:
1922 case MSR_IA32_X2APIC_LDR:
1923 case MSR_IA32_X2APIC_SVR:
1924 case MSR_IA32_X2APIC_ISR0: case MSR_IA32_X2APIC_ISR1: case MSR_IA32_X2APIC_ISR2: case MSR_IA32_X2APIC_ISR3:
1925 case MSR_IA32_X2APIC_ISR4: case MSR_IA32_X2APIC_ISR5: case MSR_IA32_X2APIC_ISR6: case MSR_IA32_X2APIC_ISR7:
1926 case MSR_IA32_X2APIC_TMR0: case MSR_IA32_X2APIC_TMR1: case MSR_IA32_X2APIC_TMR2: case MSR_IA32_X2APIC_TMR3:
1927 case MSR_IA32_X2APIC_TMR4: case MSR_IA32_X2APIC_TMR5: case MSR_IA32_X2APIC_TMR6: case MSR_IA32_X2APIC_TMR7:
1928 case MSR_IA32_X2APIC_IRR0: case MSR_IA32_X2APIC_IRR1: case MSR_IA32_X2APIC_IRR2: case MSR_IA32_X2APIC_IRR3:
1929 case MSR_IA32_X2APIC_IRR4: case MSR_IA32_X2APIC_IRR5: case MSR_IA32_X2APIC_IRR6: case MSR_IA32_X2APIC_IRR7:
1930 case MSR_IA32_X2APIC_ESR:
1931 case MSR_IA32_X2APIC_LVT_TIMER:
1932 case MSR_IA32_X2APIC_LVT_THERMAL:
1933 case MSR_IA32_X2APIC_LVT_PERF:
1934 case MSR_IA32_X2APIC_LVT_LINT0:
1935 case MSR_IA32_X2APIC_LVT_LINT1:
1936 case MSR_IA32_X2APIC_LVT_ERROR:
1937 case MSR_IA32_X2APIC_TIMER_ICR:
1938 case MSR_IA32_X2APIC_TIMER_DCR:
1939 {
1940 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
1941 uint16_t const offReg = X2APIC_GET_XAPIC_OFF(u32Reg);
1942 *pu64Value = apicReadRaw32(pXApicPage, offReg);
1943 break;
1944 }
1945
1946 /* Write-only MSRs: */
1947 case MSR_IA32_X2APIC_SELF_IPI:
1948 case MSR_IA32_X2APIC_EOI:
1949 {
1950 rcStrict = apicMsrAccessError(pVCpu, u32Reg, APICMSRACCESS_READ_WRITE_ONLY);
1951 break;
1952 }
1953
1954 /*
1955 * Windows guest using Hyper-V x2APIC MSR compatibility mode tries to read the "high"
1956 * LDR bits, which is quite absurd (as it's a 32-bit register) using this invalid MSR
1957 * index (0x80E), see @bugref{8382#c175}.
1958 */
1959 case MSR_IA32_X2APIC_LDR + 1:
1960 {
1961 if (pApic->fHyperVCompatMode)
1962 *pu64Value = 0;
1963 else
1964 rcStrict = apicMsrAccessError(pVCpu, u32Reg, APICMSRACCESS_READ_RSVD_OR_UNKNOWN);
1965 break;
1966 }
1967
1968 /* Reserved MSRs: */
1969 case MSR_IA32_X2APIC_LVT_CMCI:
1970 default:
1971 {
1972 rcStrict = apicMsrAccessError(pVCpu, u32Reg, APICMSRACCESS_READ_RSVD_OR_UNKNOWN);
1973 break;
1974 }
1975 }
1976 }
1977 else
1978 rcStrict = apicMsrAccessError(pVCpu, u32Reg, APICMSRACCESS_INVALID_READ_MODE);
1979
1980 return rcStrict;
1981}
1982
1983
1984/**
1985 * Writes an APIC MSR.
1986 *
1987 * @returns Strict VBox status code.
1988 * @param pVCpu The cross context virtual CPU structure.
1989 * @param u32Reg The MSR being written.
1990 * @param u64Value The value to write.
1991 */
1992VMM_INT_DECL(VBOXSTRICTRC) APICWriteMsr(PVMCPU pVCpu, uint32_t u32Reg, uint64_t u64Value)
1993{
1994 /*
1995 * Validate.
1996 */
1997 VMCPU_ASSERT_EMT(pVCpu);
1998 Assert(u32Reg >= MSR_IA32_X2APIC_ID && u32Reg <= MSR_IA32_X2APIC_SELF_IPI);
1999
2000 /*
2001 * Is the APIC enabled?
2002 */
2003 PCAPIC pApic = VM_TO_APIC(pVCpu->CTX_SUFF(pVM));
2004 if (APICIsEnabled(pVCpu))
2005 { /* likely */ }
2006 else
2007 {
2008 return apicMsrAccessError(pVCpu, u32Reg, pApic->enmMaxMode == PDMAPICMODE_NONE ?
2009 APICMSRACCESS_WRITE_DISALLOWED_CONFIG : APICMSRACCESS_WRITE_RSVD_OR_UNKNOWN);
2010 }
2011
2012#ifndef IN_RING3
2013 if (pApic->fRZEnabled)
2014 { /* likely */ }
2015 else
2016 return VINF_CPUM_R3_MSR_WRITE;
2017#endif
2018
2019 STAM_COUNTER_INC(&pVCpu->apic.s.CTX_SUFF_Z(StatMsrWrite));
2020
2021 /*
2022 * In x2APIC mode, we need to raise #GP(0) for writes to reserved bits, unlike MMIO
2023 * accesses where they are ignored. Hence, we need to validate each register before
2024 * invoking the generic/xAPIC write functions.
2025 *
2026 * Bits 63:32 of all registers except the ICR are reserved, we'll handle this common
2027 * case first and handle validating the remaining bits on a per-register basis.
2028 * See Intel spec. 10.12.1.2 "x2APIC Register Address Space".
2029 */
2030 if ( u32Reg != MSR_IA32_X2APIC_ICR
2031 && RT_HI_U32(u64Value))
2032 return apicMsrAccessError(pVCpu, u32Reg, APICMSRACCESS_WRITE_RSVD_BITS);
2033
2034 uint32_t u32Value = RT_LO_U32(u64Value);
2035 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2036 if (RT_LIKELY( XAPIC_IN_X2APIC_MODE(pVCpu)
2037 || pApic->fHyperVCompatMode))
2038 {
2039 switch (u32Reg)
2040 {
2041 case MSR_IA32_X2APIC_TPR:
2042 {
2043 rcStrict = apicSetTprEx(pVCpu, u32Value, false /* fForceX2ApicBehaviour */);
2044 break;
2045 }
2046
2047 case MSR_IA32_X2APIC_ICR:
2048 {
2049 rcStrict = apicSetIcr(pVCpu, u64Value, VINF_CPUM_R3_MSR_WRITE);
2050 break;
2051 }
2052
2053 case MSR_IA32_X2APIC_SVR:
2054 {
2055 rcStrict = apicSetSvr(pVCpu, u32Value);
2056 break;
2057 }
2058
2059 case MSR_IA32_X2APIC_ESR:
2060 {
2061 rcStrict = apicSetEsr(pVCpu, u32Value);
2062 break;
2063 }
2064
2065 case MSR_IA32_X2APIC_TIMER_DCR:
2066 {
2067 rcStrict = apicSetTimerDcr(pVCpu, u32Value);
2068 break;
2069 }
2070
2071 case MSR_IA32_X2APIC_LVT_TIMER:
2072 case MSR_IA32_X2APIC_LVT_THERMAL:
2073 case MSR_IA32_X2APIC_LVT_PERF:
2074 case MSR_IA32_X2APIC_LVT_LINT0:
2075 case MSR_IA32_X2APIC_LVT_LINT1:
2076 case MSR_IA32_X2APIC_LVT_ERROR:
2077 {
2078 rcStrict = apicSetLvtEntry(pVCpu, X2APIC_GET_XAPIC_OFF(u32Reg), u32Value);
2079 break;
2080 }
2081
2082 case MSR_IA32_X2APIC_TIMER_ICR:
2083 {
2084 rcStrict = apicSetTimerIcr(pVCpu, VINF_CPUM_R3_MSR_WRITE, u32Value);
2085 break;
2086 }
2087
2088 /* Write-only MSRs: */
2089 case MSR_IA32_X2APIC_SELF_IPI:
2090 {
2091 uint8_t const uVector = XAPIC_SELF_IPI_GET_VECTOR(u32Value);
2092 apicPostInterrupt(pVCpu, uVector, XAPICTRIGGERMODE_EDGE, 0 /* uSrcTag */);
2093 rcStrict = VINF_SUCCESS;
2094 break;
2095 }
2096
2097 case MSR_IA32_X2APIC_EOI:
2098 {
2099 rcStrict = apicSetEoi(pVCpu, u32Value, VINF_CPUM_R3_MSR_WRITE, false /* fForceX2ApicBehaviour */);
2100 break;
2101 }
2102
2103 /*
2104 * Windows guest using Hyper-V x2APIC MSR compatibility mode tries to write the "high"
2105 * LDR bits, which is quite absurd (as it's a 32-bit register) using this invalid MSR
2106 * index (0x80E). The write value was 0xffffffff on a Windows 8.1 64-bit guest. We can
2107 * safely ignore this nonsense, See @bugref{8382#c7}.
2108 */
2109 case MSR_IA32_X2APIC_LDR + 1:
2110 {
2111 if (pApic->fHyperVCompatMode)
2112 rcStrict = VINF_SUCCESS;
2113 else
2114 rcStrict = apicMsrAccessError(pVCpu, u32Reg, APICMSRACCESS_WRITE_RSVD_OR_UNKNOWN);
2115 break;
2116 }
2117
2118 /* Special-treament (read-only normally, but not with Hyper-V) */
2119 case MSR_IA32_X2APIC_LDR:
2120 {
2121 if (pApic->fHyperVCompatMode)
2122 {
2123 rcStrict = apicSetLdr(pVCpu, u32Value);
2124 break;
2125 }
2126 }
2127 RT_FALL_THRU();
2128 /* Read-only MSRs: */
2129 case MSR_IA32_X2APIC_ID:
2130 case MSR_IA32_X2APIC_VERSION:
2131 case MSR_IA32_X2APIC_PPR:
2132 case MSR_IA32_X2APIC_ISR0: case MSR_IA32_X2APIC_ISR1: case MSR_IA32_X2APIC_ISR2: case MSR_IA32_X2APIC_ISR3:
2133 case MSR_IA32_X2APIC_ISR4: case MSR_IA32_X2APIC_ISR5: case MSR_IA32_X2APIC_ISR6: case MSR_IA32_X2APIC_ISR7:
2134 case MSR_IA32_X2APIC_TMR0: case MSR_IA32_X2APIC_TMR1: case MSR_IA32_X2APIC_TMR2: case MSR_IA32_X2APIC_TMR3:
2135 case MSR_IA32_X2APIC_TMR4: case MSR_IA32_X2APIC_TMR5: case MSR_IA32_X2APIC_TMR6: case MSR_IA32_X2APIC_TMR7:
2136 case MSR_IA32_X2APIC_IRR0: case MSR_IA32_X2APIC_IRR1: case MSR_IA32_X2APIC_IRR2: case MSR_IA32_X2APIC_IRR3:
2137 case MSR_IA32_X2APIC_IRR4: case MSR_IA32_X2APIC_IRR5: case MSR_IA32_X2APIC_IRR6: case MSR_IA32_X2APIC_IRR7:
2138 case MSR_IA32_X2APIC_TIMER_CCR:
2139 {
2140 rcStrict = apicMsrAccessError(pVCpu, u32Reg, APICMSRACCESS_WRITE_READ_ONLY);
2141 break;
2142 }
2143
2144 /* Reserved MSRs: */
2145 case MSR_IA32_X2APIC_LVT_CMCI:
2146 default:
2147 {
2148 rcStrict = apicMsrAccessError(pVCpu, u32Reg, APICMSRACCESS_WRITE_RSVD_OR_UNKNOWN);
2149 break;
2150 }
2151 }
2152 }
2153 else
2154 rcStrict = apicMsrAccessError(pVCpu, u32Reg, APICMSRACCESS_INVALID_WRITE_MODE);
2155
2156 return rcStrict;
2157}
2158
2159
2160/**
2161 * Resets the APIC base MSR.
2162 *
2163 * @param pVCpu The cross context virtual CPU structure.
2164 */
2165static void apicResetBaseMsr(PVMCPU pVCpu)
2166{
2167 /*
2168 * Initialize the APIC base MSR. The APIC enable-bit is set upon power-up or reset[1].
2169 *
2170 * A Reset (in xAPIC and x2APIC mode) brings up the local APIC in xAPIC mode.
2171 * An INIT IPI does -not- cause a transition between xAPIC and x2APIC mode[2].
2172 *
2173 * [1] See AMD spec. 14.1.3 "Processor Initialization State"
2174 * [2] See Intel spec. 10.12.5.1 "x2APIC States".
2175 */
2176 VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu);
2177
2178 /* Construct. */
2179 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
2180 PAPIC pApic = VM_TO_APIC(pVCpu->CTX_SUFF(pVM));
2181 uint64_t uApicBaseMsr = MSR_IA32_APICBASE_ADDR;
2182 if (pVCpu->idCpu == 0)
2183 uApicBaseMsr |= MSR_IA32_APICBASE_BSP;
2184
2185 /* If the VM was configured with no APIC, don't enable xAPIC mode, obviously. */
2186 if (pApic->enmMaxMode != PDMAPICMODE_NONE)
2187 {
2188 uApicBaseMsr |= MSR_IA32_APICBASE_EN;
2189
2190 /*
2191 * While coming out of a reset the APIC is enabled and in xAPIC mode. If software had previously
2192 * disabled the APIC (which results in the CPUID bit being cleared as well) we re-enable it here.
2193 * See Intel spec. 10.12.5.1 "x2APIC States".
2194 */
2195 if (CPUMSetGuestCpuIdPerCpuApicFeature(pVCpu, true /*fVisible*/) == false)
2196 LogRel(("APIC%u: Resetting mode to xAPIC\n", pVCpu->idCpu));
2197 }
2198
2199 /* Commit. */
2200 ASMAtomicWriteU64(&pApicCpu->uApicBaseMsr, uApicBaseMsr);
2201}
2202
2203
2204/**
2205 * Initializes per-VCPU APIC to the state following an INIT reset
2206 * ("Wait-for-SIPI" state).
2207 *
2208 * @param pVCpu The cross context virtual CPU structure.
2209 */
2210void apicInitIpi(PVMCPU pVCpu)
2211{
2212 VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu);
2213 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
2214
2215 /*
2216 * See Intel spec. 10.4.7.3 "Local APIC State After an INIT Reset (Wait-for-SIPI State)"
2217 * and AMD spec 16.3.2 "APIC Registers".
2218 *
2219 * The reason we don't simply zero out the entire APIC page and only set the non-zero members
2220 * is because there are some registers that are not touched by the INIT IPI (e.g. version)
2221 * operation and this function is only a subset of the reset operation.
2222 */
2223 RT_ZERO(pXApicPage->irr);
2224 RT_ZERO(pXApicPage->irr);
2225 RT_ZERO(pXApicPage->isr);
2226 RT_ZERO(pXApicPage->tmr);
2227 RT_ZERO(pXApicPage->icr_hi);
2228 RT_ZERO(pXApicPage->icr_lo);
2229 RT_ZERO(pXApicPage->ldr);
2230 RT_ZERO(pXApicPage->tpr);
2231 RT_ZERO(pXApicPage->ppr);
2232 RT_ZERO(pXApicPage->timer_icr);
2233 RT_ZERO(pXApicPage->timer_ccr);
2234 RT_ZERO(pXApicPage->timer_dcr);
2235
2236 pXApicPage->dfr.u.u4Model = XAPICDESTFORMAT_FLAT;
2237 pXApicPage->dfr.u.u28ReservedMb1 = UINT32_C(0xfffffff);
2238
2239 /** @todo CMCI. */
2240
2241 RT_ZERO(pXApicPage->lvt_timer);
2242 pXApicPage->lvt_timer.u.u1Mask = 1;
2243
2244#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
2245 RT_ZERO(pXApicPage->lvt_thermal);
2246 pXApicPage->lvt_thermal.u.u1Mask = 1;
2247#endif
2248
2249 RT_ZERO(pXApicPage->lvt_perf);
2250 pXApicPage->lvt_perf.u.u1Mask = 1;
2251
2252 RT_ZERO(pXApicPage->lvt_lint0);
2253 pXApicPage->lvt_lint0.u.u1Mask = 1;
2254
2255 RT_ZERO(pXApicPage->lvt_lint1);
2256 pXApicPage->lvt_lint1.u.u1Mask = 1;
2257
2258 RT_ZERO(pXApicPage->lvt_error);
2259 pXApicPage->lvt_error.u.u1Mask = 1;
2260
2261 RT_ZERO(pXApicPage->svr);
2262 pXApicPage->svr.u.u8SpuriousVector = 0xff;
2263
2264 /* The self-IPI register is reset to 0. See Intel spec. 10.12.5.1 "x2APIC States" */
2265 PX2APICPAGE pX2ApicPage = VMCPU_TO_X2APICPAGE(pVCpu);
2266 RT_ZERO(pX2ApicPage->self_ipi);
2267
2268 /* Clear the pending-interrupt bitmaps. */
2269 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
2270 RT_BZERO(&pApicCpu->ApicPibLevel, sizeof(APICPIB));
2271 RT_BZERO(pApicCpu->CTX_SUFF(pvApicPib), sizeof(APICPIB));
2272
2273 /* Clear the interrupt line states for LINT0 and LINT1 pins. */
2274 pApicCpu->fActiveLint0 = false;
2275 pApicCpu->fActiveLint1 = false;
2276}
2277
2278
2279/**
2280 * Initializes per-VCPU APIC to the state following a power-up or hardware
2281 * reset.
2282 *
2283 * @param pVCpu The cross context virtual CPU structure.
2284 * @param fResetApicBaseMsr Whether to reset the APIC base MSR.
2285 */
2286void apicResetCpu(PVMCPU pVCpu, bool fResetApicBaseMsr)
2287{
2288 VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu);
2289
2290 LogFlow(("APIC%u: apicR3ResetCpu: fResetApicBaseMsr=%RTbool\n", pVCpu->idCpu, fResetApicBaseMsr));
2291
2292#ifdef VBOX_STRICT
2293 /* Verify that the initial APIC ID reported via CPUID matches our VMCPU ID assumption. */
2294 uint32_t uEax, uEbx, uEcx, uEdx;
2295 uEax = uEbx = uEcx = uEdx = UINT32_MAX;
2296 CPUMGetGuestCpuId(pVCpu, 1, 0, &uEax, &uEbx, &uEcx, &uEdx);
2297 Assert(((uEbx >> 24) & 0xff) == pVCpu->idCpu);
2298#endif
2299
2300 /*
2301 * The state following a power-up or reset is a superset of the INIT state.
2302 * See Intel spec. 10.4.7.3 "Local APIC State After an INIT Reset ('Wait-for-SIPI' State)"
2303 */
2304 apicInitIpi(pVCpu);
2305
2306 /*
2307 * The APIC version register is read-only, so just initialize it here.
2308 * It is not clear from the specs, where exactly it is initialized.
2309 * The version determines the number of LVT entries and size of the APIC ID (8 bits for P4).
2310 */
2311 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
2312#if XAPIC_HARDWARE_VERSION == XAPIC_HARDWARE_VERSION_P4
2313 pXApicPage->version.u.u8MaxLvtEntry = XAPIC_MAX_LVT_ENTRIES_P4 - 1;
2314 pXApicPage->version.u.u8Version = XAPIC_HARDWARE_VERSION_P4;
2315 AssertCompile(sizeof(pXApicPage->id.u8ApicId) >= XAPIC_APIC_ID_BIT_COUNT_P4 / 8);
2316#else
2317# error "Implement Pentium and P6 family APIC architectures"
2318#endif
2319
2320 /** @todo It isn't clear in the spec. where exactly the default base address
2321 * is (re)initialized, atm we do it here in Reset. */
2322 if (fResetApicBaseMsr)
2323 apicResetBaseMsr(pVCpu);
2324
2325 /*
2326 * Initialize the APIC ID register to xAPIC format.
2327 */
2328 ASMMemZero32(&pXApicPage->id, sizeof(pXApicPage->id));
2329 pXApicPage->id.u8ApicId = pVCpu->idCpu;
2330}
2331
2332
2333/**
2334 * Sets the APIC base MSR.
2335 *
2336 * @returns VBox status code - no informational ones, esp. not
2337 * VINF_CPUM_R3_MSR_WRITE. Only the following two:
2338 * @retval VINF_SUCCESS
2339 * @retval VERR_CPUM_RAISE_GP_0
2340 *
2341 * @param pVCpu The cross context virtual CPU structure.
2342 * @param u64BaseMsr The value to set.
2343 */
2344VMM_INT_DECL(int) APICSetBaseMsr(PVMCPU pVCpu, uint64_t u64BaseMsr)
2345{
2346 Assert(pVCpu);
2347
2348 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
2349 PAPIC pApic = VM_TO_APIC(pVCpu->CTX_SUFF(pVM));
2350 APICMODE enmOldMode = apicGetMode(pApicCpu->uApicBaseMsr);
2351 APICMODE enmNewMode = apicGetMode(u64BaseMsr);
2352 uint64_t uBaseMsr = pApicCpu->uApicBaseMsr;
2353
2354 Log2(("APIC%u: ApicSetBaseMsr: u64BaseMsr=%#RX64 enmNewMode=%s enmOldMode=%s\n", pVCpu->idCpu, u64BaseMsr,
2355 apicGetModeName(enmNewMode), apicGetModeName(enmOldMode)));
2356
2357 /*
2358 * We do not support re-mapping the APIC base address because:
2359 * - We'll have to manage all the mappings ourselves in the APIC (reference counting based unmapping etc.)
2360 * i.e. we can only unmap the MMIO region if no other APIC is mapped on that location.
2361 * - It's unclear how/if IOM can fallback to handling regions as regular memory (if the MMIO
2362 * region remains mapped but doesn't belong to the called VCPU's APIC).
2363 */
2364 /** @todo Handle per-VCPU APIC base relocation. */
2365 if (MSR_IA32_APICBASE_GET_ADDR(uBaseMsr) != MSR_IA32_APICBASE_ADDR)
2366 {
2367 if (pVCpu->apic.s.cLogMaxSetApicBaseAddr++ < 5)
2368 LogRel(("APIC%u: Attempt to relocate base to %#RGp, unsupported -> #GP(0)\n", pVCpu->idCpu,
2369 MSR_IA32_APICBASE_GET_ADDR(uBaseMsr)));
2370 return VERR_CPUM_RAISE_GP_0;
2371 }
2372
2373 /* Don't allow enabling xAPIC/x2APIC if the VM is configured with the APIC disabled. */
2374 if (pApic->enmMaxMode == PDMAPICMODE_NONE)
2375 {
2376 LogRel(("APIC%u: Disallowing APIC base MSR write as the VM is configured with APIC disabled!\n", pVCpu->idCpu));
2377 return apicMsrAccessError(pVCpu, MSR_IA32_APICBASE, APICMSRACCESS_WRITE_DISALLOWED_CONFIG);
2378 }
2379
2380 /*
2381 * Act on state transition.
2382 */
2383 if (enmNewMode != enmOldMode)
2384 {
2385 switch (enmNewMode)
2386 {
2387 case APICMODE_DISABLED:
2388 {
2389 /*
2390 * The APIC state needs to be reset (especially the APIC ID as x2APIC APIC ID bit layout
2391 * is different). We can start with a clean slate identical to the state after a power-up/reset.
2392 *
2393 * See Intel spec. 10.4.3 "Enabling or Disabling the Local APIC".
2394 *
2395 * We'll also manually manage the APIC base MSR here. We want a single-point of commit
2396 * at the end of this function rather than updating it in apicR3ResetCpu. This means we also
2397 * need to update the CPUID leaf ourselves.
2398 */
2399 apicResetCpu(pVCpu, false /* fResetApicBaseMsr */);
2400 uBaseMsr &= ~(MSR_IA32_APICBASE_EN | MSR_IA32_APICBASE_EXTD);
2401 CPUMSetGuestCpuIdPerCpuApicFeature(pVCpu, false /*fVisible*/);
2402 LogRel(("APIC%u: Switched mode to disabled\n", pVCpu->idCpu));
2403 break;
2404 }
2405
2406 case APICMODE_XAPIC:
2407 {
2408 if (enmOldMode != APICMODE_DISABLED)
2409 {
2410 LogRel(("APIC%u: Can only transition to xAPIC state from disabled state\n", pVCpu->idCpu));
2411 return apicMsrAccessError(pVCpu, MSR_IA32_APICBASE, APICMSRACCESS_WRITE_INVALID);
2412 }
2413
2414 uBaseMsr |= MSR_IA32_APICBASE_EN;
2415 CPUMSetGuestCpuIdPerCpuApicFeature(pVCpu, true /*fVisible*/);
2416 LogRel(("APIC%u: Switched mode to xAPIC\n", pVCpu->idCpu));
2417 break;
2418 }
2419
2420 case APICMODE_X2APIC:
2421 {
2422 if (pApic->enmMaxMode != PDMAPICMODE_X2APIC)
2423 {
2424 LogRel(("APIC%u: Disallowing transition to x2APIC mode as the VM is configured with the x2APIC disabled!\n",
2425 pVCpu->idCpu));
2426 return apicMsrAccessError(pVCpu, MSR_IA32_APICBASE, APICMSRACCESS_WRITE_INVALID);
2427 }
2428
2429 if (enmOldMode != APICMODE_XAPIC)
2430 {
2431 LogRel(("APIC%u: Can only transition to x2APIC state from xAPIC state\n", pVCpu->idCpu));
2432 return apicMsrAccessError(pVCpu, MSR_IA32_APICBASE, APICMSRACCESS_WRITE_INVALID);
2433 }
2434
2435 uBaseMsr |= MSR_IA32_APICBASE_EN | MSR_IA32_APICBASE_EXTD;
2436
2437 /*
2438 * The APIC ID needs updating when entering x2APIC mode.
2439 * Software written APIC ID in xAPIC mode isn't preserved.
2440 * The APIC ID becomes read-only to software in x2APIC mode.
2441 *
2442 * See Intel spec. 10.12.5.1 "x2APIC States".
2443 */
2444 PX2APICPAGE pX2ApicPage = VMCPU_TO_X2APICPAGE(pVCpu);
2445 ASMMemZero32(&pX2ApicPage->id, sizeof(pX2ApicPage->id));
2446 pX2ApicPage->id.u32ApicId = pVCpu->idCpu;
2447
2448 /*
2449 * LDR initialization occurs when entering x2APIC mode.
2450 * See Intel spec. 10.12.10.2 "Deriving Logical x2APIC ID from the Local x2APIC ID".
2451 */
2452 pX2ApicPage->ldr.u32LogicalApicId = ((pX2ApicPage->id.u32ApicId & UINT32_C(0xffff0)) << 16)
2453 | (UINT32_C(1) << pX2ApicPage->id.u32ApicId & UINT32_C(0xf));
2454
2455 LogRel(("APIC%u: Switched mode to x2APIC\n", pVCpu->idCpu));
2456 break;
2457 }
2458
2459 case APICMODE_INVALID:
2460 default:
2461 {
2462 Log(("APIC%u: Invalid state transition attempted\n", pVCpu->idCpu));
2463 return apicMsrAccessError(pVCpu, MSR_IA32_APICBASE, APICMSRACCESS_WRITE_INVALID);
2464 }
2465 }
2466 }
2467
2468 ASMAtomicWriteU64(&pApicCpu->uApicBaseMsr, uBaseMsr);
2469 return VINF_SUCCESS;
2470}
2471
2472
2473/**
2474 * Gets the APIC base MSR (no checks are performed wrt APIC hardware or its
2475 * state).
2476 *
2477 * @returns The base MSR value.
2478 * @param pVCpu The cross context virtual CPU structure.
2479 */
2480VMM_INT_DECL(uint64_t) APICGetBaseMsrNoCheck(PVMCPU pVCpu)
2481{
2482 VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu);
2483 PCAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
2484 return pApicCpu->uApicBaseMsr;
2485}
2486
2487
2488/**
2489 * Gets the APIC base MSR.
2490 *
2491 * @returns Strict VBox status code.
2492 * @param pVCpu The cross context virtual CPU structure.
2493 * @param pu64Value Where to store the MSR value.
2494 */
2495VMM_INT_DECL(VBOXSTRICTRC) APICGetBaseMsr(PVMCPU pVCpu, uint64_t *pu64Value)
2496{
2497 VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu);
2498
2499 PCAPIC pApic = VM_TO_APIC(pVCpu->CTX_SUFF(pVM));
2500 if (pApic->enmMaxMode != PDMAPICMODE_NONE)
2501 {
2502 *pu64Value = APICGetBaseMsrNoCheck(pVCpu);
2503 return VINF_SUCCESS;
2504 }
2505
2506 if (pVCpu->apic.s.cLogMaxGetApicBaseAddr++ < 5)
2507 LogRel(("APIC%u: Reading APIC base MSR (%#x) when there is no APIC -> #GP(0)\n", pVCpu->idCpu, MSR_IA32_APICBASE));
2508 return VERR_CPUM_RAISE_GP_0;
2509}
2510
2511
2512/**
2513 * Sets the TPR (Task Priority Register).
2514 *
2515 * @retval VINF_SUCCESS
2516 * @retval VERR_CPUM_RAISE_GP_0
2517 * @retval VERR_PDM_NO_APIC_INSTANCE
2518 *
2519 * @param pVCpu The cross context virtual CPU structure.
2520 * @param u8Tpr The TPR value to set.
2521 */
2522VMMDECL(int) APICSetTpr(PVMCPU pVCpu, uint8_t u8Tpr)
2523{
2524 if (APICIsEnabled(pVCpu))
2525 return apicSetTprEx(pVCpu, u8Tpr, false /* fForceX2ApicBehaviour */);
2526 return VERR_PDM_NO_APIC_INSTANCE;
2527}
2528
2529
2530/**
2531 * Gets the highest priority pending interrupt.
2532 *
2533 * @returns true if any interrupt is pending, false otherwise.
2534 * @param pVCpu The cross context virtual CPU structure.
2535 * @param pu8PendingIntr Where to store the interrupt vector if the
2536 * interrupt is pending (optional, can be NULL).
2537 */
2538static bool apicGetHighestPendingInterrupt(PVMCPU pVCpu, uint8_t *pu8PendingIntr)
2539{
2540 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu);
2541 int const irrv = apicGetHighestSetBitInReg(&pXApicPage->irr, -1);
2542 if (irrv >= 0)
2543 {
2544 Assert(irrv <= (int)UINT8_MAX);
2545 if (pu8PendingIntr)
2546 *pu8PendingIntr = (uint8_t)irrv;
2547 return true;
2548 }
2549 return false;
2550}
2551
2552
2553/**
2554 * Gets the APIC TPR (Task Priority Register).
2555 *
2556 * @returns VBox status code.
2557 * @param pVCpu The cross context virtual CPU structure.
2558 * @param pu8Tpr Where to store the TPR.
2559 * @param pfPending Where to store whether there is a pending interrupt
2560 * (optional, can be NULL).
2561 * @param pu8PendingIntr Where to store the highest-priority pending
2562 * interrupt (optional, can be NULL).
2563 */
2564VMMDECL(int) APICGetTpr(PVMCPU pVCpu, uint8_t *pu8Tpr, bool *pfPending, uint8_t *pu8PendingIntr)
2565{
2566 VMCPU_ASSERT_EMT(pVCpu);
2567 if (APICIsEnabled(pVCpu))
2568 {
2569 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu);
2570 if (pfPending)
2571 {
2572 /*
2573 * Just return whatever the highest pending interrupt is in the IRR.
2574 * The caller is responsible for figuring out if it's masked by the TPR etc.
2575 */
2576 *pfPending = apicGetHighestPendingInterrupt(pVCpu, pu8PendingIntr);
2577 }
2578
2579 *pu8Tpr = pXApicPage->tpr.u8Tpr;
2580 return VINF_SUCCESS;
2581 }
2582
2583 *pu8Tpr = 0;
2584 return VERR_PDM_NO_APIC_INSTANCE;
2585}
2586
2587
2588/**
2589 * Gets the APIC timer frequency.
2590 *
2591 * @returns Strict VBox status code.
2592 * @param pVM The cross context VM structure.
2593 * @param pu64Value Where to store the timer frequency.
2594 */
2595VMM_INT_DECL(int) APICGetTimerFreq(PVM pVM, uint64_t *pu64Value)
2596{
2597 /*
2598 * Validate.
2599 */
2600 Assert(pVM);
2601 AssertPtrReturn(pu64Value, VERR_INVALID_PARAMETER);
2602
2603 PVMCPU pVCpu = &pVM->aCpus[0];
2604 if (APICIsEnabled(pVCpu))
2605 {
2606 PCAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
2607 *pu64Value = TMTimerGetFreq(pApicCpu->CTX_SUFF(pTimer));
2608 return VINF_SUCCESS;
2609 }
2610 return VERR_PDM_NO_APIC_INSTANCE;
2611}
2612
2613
2614/**
2615 * Delivers an interrupt message via the system bus.
2616 *
2617 * @returns VBox status code.
2618 * @param pVM The cross context VM structure.
2619 * @param uDest The destination mask.
2620 * @param uDestMode The destination mode.
2621 * @param uDeliveryMode The delivery mode.
2622 * @param uVector The interrupt vector.
2623 * @param uPolarity The interrupt line polarity.
2624 * @param uTriggerMode The trigger mode.
2625 * @param uSrcTag The interrupt source tag (debugging).
2626 */
2627VMM_INT_DECL(int) APICBusDeliver(PVM pVM, uint8_t uDest, uint8_t uDestMode, uint8_t uDeliveryMode, uint8_t uVector,
2628 uint8_t uPolarity, uint8_t uTriggerMode, uint32_t uSrcTag)
2629{
2630 NOREF(uPolarity);
2631
2632 /*
2633 * If the APIC isn't enabled, do nothing and pretend success.
2634 */
2635 if (APICIsEnabled(&pVM->aCpus[0]))
2636 { /* likely */ }
2637 else
2638 return VINF_SUCCESS;
2639
2640 /*
2641 * The destination field (mask) in the IO APIC redirectable table entry is 8-bits.
2642 * Hence, the broadcast mask is 0xff.
2643 * See IO APIC spec. 3.2.4. "IOREDTBL[23:0] - I/O Redirectable Table Registers".
2644 */
2645 XAPICTRIGGERMODE enmTriggerMode = (XAPICTRIGGERMODE)uTriggerMode;
2646 XAPICDELIVERYMODE enmDeliveryMode = (XAPICDELIVERYMODE)uDeliveryMode;
2647 XAPICDESTMODE enmDestMode = (XAPICDESTMODE)uDestMode;
2648 uint32_t fDestMask = uDest;
2649 uint32_t fBroadcastMask = UINT32_C(0xff);
2650
2651 Log2(("APIC: apicBusDeliver: fDestMask=%#x enmDestMode=%s enmTriggerMode=%s enmDeliveryMode=%s uVector=%#x\n", fDestMask,
2652 apicGetDestModeName(enmDestMode), apicGetTriggerModeName(enmTriggerMode), apicGetDeliveryModeName(enmDeliveryMode),
2653 uVector));
2654
2655 bool fIntrAccepted;
2656 VMCPUSET DestCpuSet;
2657 apicGetDestCpuSet(pVM, fDestMask, fBroadcastMask, enmDestMode, enmDeliveryMode, &DestCpuSet);
2658 VBOXSTRICTRC rcStrict = apicSendIntr(pVM, NULL /* pVCpu */, uVector, enmTriggerMode, enmDeliveryMode, &DestCpuSet,
2659 &fIntrAccepted, uSrcTag, VINF_SUCCESS /* rcRZ */);
2660 if (fIntrAccepted)
2661 return VBOXSTRICTRC_VAL(rcStrict);
2662 return VERR_APIC_INTR_DISCARDED;
2663}
2664
2665
2666/**
2667 * Assert/de-assert the local APIC's LINT0/LINT1 interrupt pins.
2668 *
2669 * @returns Strict VBox status code.
2670 * @param pVCpu The cross context virtual CPU structure.
2671 * @param u8Pin The interrupt pin (0 for LINT0 or 1 for LINT1).
2672 * @param u8Level The level (0 for low or 1 for high).
2673 * @param rcRZ The return code if the operation cannot be performed in
2674 * the current context.
2675 */
2676VMM_INT_DECL(VBOXSTRICTRC) APICLocalInterrupt(PVMCPU pVCpu, uint8_t u8Pin, uint8_t u8Level, int rcRZ)
2677{
2678 AssertReturn(u8Pin <= 1, VERR_INVALID_PARAMETER);
2679 AssertReturn(u8Level <= 1, VERR_INVALID_PARAMETER);
2680
2681 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2682
2683 /* If the APIC is enabled, the interrupt is subject to LVT programming. */
2684 if (APICIsEnabled(pVCpu))
2685 {
2686 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu);
2687
2688 /* Pick the LVT entry corresponding to the interrupt pin. */
2689 static const uint16_t s_au16LvtOffsets[] =
2690 {
2691 XAPIC_OFF_LVT_LINT0,
2692 XAPIC_OFF_LVT_LINT1
2693 };
2694 Assert(u8Pin < RT_ELEMENTS(s_au16LvtOffsets));
2695 uint16_t const offLvt = s_au16LvtOffsets[u8Pin];
2696 uint32_t const uLvt = apicReadRaw32(pXApicPage, offLvt);
2697
2698 /* If software hasn't masked the interrupt in the LVT entry, proceed interrupt processing. */
2699 if (!XAPIC_LVT_IS_MASKED(uLvt))
2700 {
2701 XAPICDELIVERYMODE const enmDeliveryMode = XAPIC_LVT_GET_DELIVERY_MODE(uLvt);
2702 XAPICTRIGGERMODE enmTriggerMode = XAPIC_LVT_GET_TRIGGER_MODE(uLvt);
2703
2704 switch (enmDeliveryMode)
2705 {
2706 case XAPICDELIVERYMODE_INIT:
2707 {
2708 /** @todo won't work in R0/RC because callers don't care about rcRZ. */
2709 AssertMsgFailed(("INIT through LINT0/LINT1 is not yet supported\n"));
2710 }
2711 RT_FALL_THRU();
2712 case XAPICDELIVERYMODE_FIXED:
2713 {
2714 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
2715 uint8_t const uVector = XAPIC_LVT_GET_VECTOR(uLvt);
2716 bool fActive = RT_BOOL(u8Level & 1);
2717 bool volatile *pfActiveLine = u8Pin == 0 ? &pApicCpu->fActiveLint0 : &pApicCpu->fActiveLint1;
2718 /** @todo Polarity is busted elsewhere, we need to fix that
2719 * first. See @bugref{8386#c7}. */
2720#if 0
2721 uint8_t const u8Polarity = XAPIC_LVT_GET_POLARITY(uLvt);
2722 fActive ^= u8Polarity; */
2723#endif
2724 if (!fActive)
2725 {
2726 ASMAtomicCmpXchgBool(pfActiveLine, false, true);
2727 break;
2728 }
2729
2730 /* Level-sensitive interrupts are not supported for LINT1. See Intel spec. 10.5.1 "Local Vector Table". */
2731 if (offLvt == XAPIC_OFF_LVT_LINT1)
2732 enmTriggerMode = XAPICTRIGGERMODE_EDGE;
2733 /** @todo figure out what "If the local APIC is not used in conjunction with an I/O APIC and fixed
2734 delivery mode is selected; the Pentium 4, Intel Xeon, and P6 family processors will always
2735 use level-sensitive triggering, regardless if edge-sensitive triggering is selected."
2736 means. */
2737
2738 bool fSendIntr;
2739 if (enmTriggerMode == XAPICTRIGGERMODE_EDGE)
2740 {
2741 /* Recognize and send the interrupt only on an edge transition. */
2742 fSendIntr = ASMAtomicCmpXchgBool(pfActiveLine, true, false);
2743 }
2744 else
2745 {
2746 /* For level-triggered interrupts, redundant interrupts are not a problem. */
2747 Assert(enmTriggerMode == XAPICTRIGGERMODE_LEVEL);
2748 ASMAtomicCmpXchgBool(pfActiveLine, true, false);
2749
2750 /* Only when the remote IRR isn't set, set it and send the interrupt. */
2751 if (!(pXApicPage->lvt_lint0.all.u32LvtLint0 & XAPIC_LVT_REMOTE_IRR))
2752 {
2753 Assert(offLvt == XAPIC_OFF_LVT_LINT0);
2754 ASMAtomicOrU32((volatile uint32_t *)&pXApicPage->lvt_lint0.all.u32LvtLint0, XAPIC_LVT_REMOTE_IRR);
2755 fSendIntr = true;
2756 }
2757 else
2758 fSendIntr = false;
2759 }
2760
2761 if (fSendIntr)
2762 {
2763 VMCPUSET DestCpuSet;
2764 VMCPUSET_EMPTY(&DestCpuSet);
2765 VMCPUSET_ADD(&DestCpuSet, pVCpu->idCpu);
2766 rcStrict = apicSendIntr(pVCpu->CTX_SUFF(pVM), pVCpu, uVector, enmTriggerMode, enmDeliveryMode,
2767 &DestCpuSet, NULL /* pfIntrAccepted */, 0 /* uSrcTag */, rcRZ);
2768 }
2769 break;
2770 }
2771
2772 case XAPICDELIVERYMODE_SMI:
2773 case XAPICDELIVERYMODE_NMI:
2774 {
2775 VMCPUSET DestCpuSet;
2776 VMCPUSET_EMPTY(&DestCpuSet);
2777 VMCPUSET_ADD(&DestCpuSet, pVCpu->idCpu);
2778 uint8_t const uVector = XAPIC_LVT_GET_VECTOR(uLvt);
2779 rcStrict = apicSendIntr(pVCpu->CTX_SUFF(pVM), pVCpu, uVector, enmTriggerMode, enmDeliveryMode, &DestCpuSet,
2780 NULL /* pfIntrAccepted */, 0 /* uSrcTag */, rcRZ);
2781 break;
2782 }
2783
2784 case XAPICDELIVERYMODE_EXTINT:
2785 {
2786 Log2(("APIC%u: apicLocalInterrupt: %s ExtINT through LINT%u\n", pVCpu->idCpu,
2787 u8Level ? "Raising" : "Lowering", u8Pin));
2788 if (u8Level)
2789 apicSetInterruptFF(pVCpu, PDMAPICIRQ_EXTINT);
2790 else
2791 apicClearInterruptFF(pVCpu, PDMAPICIRQ_EXTINT);
2792 break;
2793 }
2794
2795 /* Reserved/unknown delivery modes: */
2796 case XAPICDELIVERYMODE_LOWEST_PRIO:
2797 case XAPICDELIVERYMODE_STARTUP:
2798 default:
2799 {
2800 rcStrict = VERR_INTERNAL_ERROR_3;
2801 AssertMsgFailed(("APIC%u: LocalInterrupt: Invalid delivery mode %#x (%s) on LINT%d\n", pVCpu->idCpu,
2802 enmDeliveryMode, apicGetDeliveryModeName(enmDeliveryMode), u8Pin));
2803 break;
2804 }
2805 }
2806 }
2807 }
2808 else
2809 {
2810 /* The APIC is hardware disabled. The CPU behaves as though there is no on-chip APIC. */
2811 if (u8Pin == 0)
2812 {
2813 /* LINT0 behaves as an external interrupt pin. */
2814 Log2(("APIC%u: apicLocalInterrupt: APIC hardware-disabled, %s INTR\n", pVCpu->idCpu,
2815 u8Level ? "raising" : "lowering"));
2816 if (u8Level)
2817 apicSetInterruptFF(pVCpu, PDMAPICIRQ_EXTINT);
2818 else
2819 apicClearInterruptFF(pVCpu, PDMAPICIRQ_EXTINT);
2820 }
2821 else
2822 {
2823 /* LINT1 behaves as NMI. */
2824 Log2(("APIC%u: apicLocalInterrupt: APIC hardware-disabled, raising NMI\n", pVCpu->idCpu));
2825 apicSetInterruptFF(pVCpu, PDMAPICIRQ_NMI);
2826 }
2827 }
2828
2829 return rcStrict;
2830}
2831
2832
2833/**
2834 * Gets the next highest-priority interrupt from the APIC, marking it as an
2835 * "in-service" interrupt.
2836 *
2837 * @returns VBox status code.
2838 * @param pVCpu The cross context virtual CPU structure.
2839 * @param pu8Vector Where to store the vector.
2840 * @param puSrcTag Where to store the interrupt source tag (debugging).
2841 */
2842VMM_INT_DECL(int) APICGetInterrupt(PVMCPU pVCpu, uint8_t *pu8Vector, uint32_t *puSrcTag)
2843{
2844 VMCPU_ASSERT_EMT(pVCpu);
2845 Assert(pu8Vector);
2846
2847 LogFlow(("APIC%u: apicGetInterrupt:\n", pVCpu->idCpu));
2848
2849 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
2850 bool const fApicHwEnabled = APICIsEnabled(pVCpu);
2851 if ( fApicHwEnabled
2852 && pXApicPage->svr.u.fApicSoftwareEnable)
2853 {
2854 int const irrv = apicGetHighestSetBitInReg(&pXApicPage->irr, -1);
2855 if (RT_LIKELY(irrv >= 0))
2856 {
2857 Assert(irrv <= (int)UINT8_MAX);
2858 uint8_t const uVector = irrv;
2859
2860 /*
2861 * This can happen if the APIC receives an interrupt when the CPU has interrupts
2862 * disabled but the TPR is raised by the guest before re-enabling interrupts.
2863 */
2864 uint8_t const uTpr = pXApicPage->tpr.u8Tpr;
2865 if ( uTpr > 0
2866 && XAPIC_TPR_GET_TP(uVector) <= XAPIC_TPR_GET_TP(uTpr))
2867 {
2868 Log2(("APIC%u: apicGetInterrupt: Interrupt masked. uVector=%#x uTpr=%#x SpuriousVector=%#x\n", pVCpu->idCpu,
2869 uVector, uTpr, pXApicPage->svr.u.u8SpuriousVector));
2870 *pu8Vector = uVector;
2871 *puSrcTag = 0;
2872 STAM_COUNTER_INC(&pVCpu->apic.s.StatMaskedByTpr);
2873 return VERR_APIC_INTR_MASKED_BY_TPR;
2874 }
2875
2876 /*
2877 * The PPR should be up-to-date at this point through apicSetEoi().
2878 * We're on EMT so no parallel updates possible.
2879 * Subject the pending vector to PPR prioritization.
2880 */
2881 uint8_t const uPpr = pXApicPage->ppr.u8Ppr;
2882 if ( !uPpr
2883 || XAPIC_PPR_GET_PP(uVector) > XAPIC_PPR_GET_PP(uPpr))
2884 {
2885 apicClearVectorInReg(&pXApicPage->irr, uVector);
2886 apicSetVectorInReg(&pXApicPage->isr, uVector);
2887 apicUpdatePpr(pVCpu);
2888 apicSignalNextPendingIntr(pVCpu);
2889
2890 /* Retrieve the interrupt source tag associated with this interrupt. */
2891 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
2892 AssertCompile(RT_ELEMENTS(pApicCpu->auSrcTags) > UINT8_MAX);
2893 *puSrcTag = pApicCpu->auSrcTags[uVector];
2894 pApicCpu->auSrcTags[uVector] = 0;
2895
2896 Log2(("APIC%u: apicGetInterrupt: Valid Interrupt. uVector=%#x\n", pVCpu->idCpu, uVector));
2897 *pu8Vector = uVector;
2898 return VINF_SUCCESS;
2899 }
2900 else
2901 {
2902 STAM_COUNTER_INC(&pVCpu->apic.s.StatMaskedByPpr);
2903 Log2(("APIC%u: apicGetInterrupt: Interrupt's priority is not higher than the PPR. uVector=%#x PPR=%#x\n",
2904 pVCpu->idCpu, uVector, uPpr));
2905 }
2906 }
2907 else
2908 Log2(("APIC%u: apicGetInterrupt: No pending bits in IRR\n", pVCpu->idCpu));
2909 }
2910 else
2911 Log2(("APIC%u: apicGetInterrupt: APIC %s disabled\n", pVCpu->idCpu, !fApicHwEnabled ? "hardware" : "software"));
2912
2913 *pu8Vector = 0;
2914 *puSrcTag = 0;
2915 return VERR_APIC_INTR_NOT_PENDING;
2916}
2917
2918
2919/**
2920 * @callback_method_impl{FNIOMMMIOREAD}
2921 */
2922APICBOTHCBDECL(int) apicReadMmio(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS GCPhysAddr, void *pv, unsigned cb)
2923{
2924 NOREF(pvUser);
2925 Assert(!(GCPhysAddr & 0xf));
2926 Assert(cb == 4); RT_NOREF_PV(cb);
2927
2928 PAPICDEV pApicDev = PDMINS_2_DATA(pDevIns, PAPICDEV);
2929 PVMCPU pVCpu = PDMDevHlpGetVMCPU(pDevIns);
2930 uint16_t offReg = GCPhysAddr & 0xff0;
2931 uint32_t uValue = 0;
2932
2933 STAM_COUNTER_INC(&pVCpu->apic.s.CTX_SUFF_Z(StatMmioRead));
2934
2935 int rc = VBOXSTRICTRC_VAL(apicReadRegister(pApicDev, pVCpu, offReg, &uValue));
2936 *(uint32_t *)pv = uValue;
2937
2938 Log2(("APIC%u: apicReadMmio: offReg=%#RX16 uValue=%#RX32\n", pVCpu->idCpu, offReg, uValue));
2939 return rc;
2940}
2941
2942
2943/**
2944 * @callback_method_impl{FNIOMMMIOWRITE}
2945 */
2946APICBOTHCBDECL(int) apicWriteMmio(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS GCPhysAddr, void const *pv, unsigned cb)
2947{
2948 NOREF(pvUser);
2949 Assert(!(GCPhysAddr & 0xf));
2950 Assert(cb == 4); RT_NOREF_PV(cb);
2951
2952 PAPICDEV pApicDev = PDMINS_2_DATA(pDevIns, PAPICDEV);
2953 PVMCPU pVCpu = PDMDevHlpGetVMCPU(pDevIns);
2954 uint16_t offReg = GCPhysAddr & 0xff0;
2955 uint32_t uValue = *(uint32_t *)pv;
2956
2957 STAM_COUNTER_INC(&pVCpu->apic.s.CTX_SUFF_Z(StatMmioWrite));
2958
2959 Log2(("APIC%u: apicWriteMmio: offReg=%#RX16 uValue=%#RX32\n", pVCpu->idCpu, offReg, uValue));
2960
2961 int rc = VBOXSTRICTRC_VAL(apicWriteRegister(pApicDev, pVCpu, offReg, uValue));
2962 return rc;
2963}
2964
2965
2966/**
2967 * Sets the interrupt pending force-flag and pokes the EMT if required.
2968 *
2969 * @param pVCpu The cross context virtual CPU structure.
2970 * @param enmType The IRQ type.
2971 */
2972VMM_INT_DECL(void) apicSetInterruptFF(PVMCPU pVCpu, PDMAPICIRQ enmType)
2973{
2974 switch (enmType)
2975 {
2976 case PDMAPICIRQ_HARDWARE:
2977 VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu);
2978 VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC);
2979 break;
2980 case PDMAPICIRQ_UPDATE_PENDING: VMCPU_FF_SET(pVCpu, VMCPU_FF_UPDATE_APIC); break;
2981 case PDMAPICIRQ_NMI: VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI); break;
2982 case PDMAPICIRQ_SMI: VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_SMI); break;
2983 case PDMAPICIRQ_EXTINT: VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_PIC); break;
2984 default:
2985 AssertMsgFailed(("enmType=%d\n", enmType));
2986 break;
2987 }
2988
2989 /*
2990 * We need to wake up the target CPU if we're not on EMT.
2991 */
2992#if defined(IN_RING0)
2993 PVM pVM = pVCpu->CTX_SUFF(pVM);
2994 VMCPUID idCpu = pVCpu->idCpu;
2995 if ( enmType != PDMAPICIRQ_HARDWARE
2996 && VMMGetCpuId(pVM) != idCpu)
2997 {
2998 switch (VMCPU_GET_STATE(pVCpu))
2999 {
3000 case VMCPUSTATE_STARTED_EXEC:
3001 GVMMR0SchedPokeNoGVMNoLock(pVM, idCpu);
3002 break;
3003
3004 case VMCPUSTATE_STARTED_HALTED:
3005 GVMMR0SchedWakeUpNoGVMNoLock(pVM, idCpu);
3006 break;
3007
3008 default:
3009 break; /* nothing to do in other states. */
3010 }
3011 }
3012#elif defined(IN_RING3)
3013# ifdef VBOX_WITH_REM
3014 REMR3NotifyInterruptSet(pVCpu->CTX_SUFF(pVM), pVCpu);
3015# endif
3016 if (enmType != PDMAPICIRQ_HARDWARE)
3017 VMR3NotifyCpuFFU(pVCpu->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM | VMNOTIFYFF_FLAGS_POKE);
3018#endif
3019}
3020
3021
3022/**
3023 * Clears the interrupt pending force-flag.
3024 *
3025 * @param pVCpu The cross context virtual CPU structure.
3026 * @param enmType The IRQ type.
3027 */
3028VMM_INT_DECL(void) apicClearInterruptFF(PVMCPU pVCpu, PDMAPICIRQ enmType)
3029{
3030 /* NMI/SMI can't be cleared. */
3031 switch (enmType)
3032 {
3033 case PDMAPICIRQ_HARDWARE: VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_APIC); break;
3034 case PDMAPICIRQ_EXTINT: VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_PIC); break;
3035 default:
3036 AssertMsgFailed(("enmType=%d\n", enmType));
3037 break;
3038 }
3039
3040#if defined(IN_RING3) && defined(VBOX_WITH_REM)
3041 REMR3NotifyInterruptClear(pVCpu->CTX_SUFF(pVM), pVCpu);
3042#endif
3043}
3044
3045
3046/**
3047 * Posts an interrupt to a target APIC.
3048 *
3049 * This function handles interrupts received from the system bus or
3050 * interrupts generated locally from the LVT or via a self IPI.
3051 *
3052 * Don't use this function to try and deliver ExtINT style interrupts.
3053 *
3054 * @returns true if the interrupt was accepted, false otherwise.
3055 * @param pVCpu The cross context virtual CPU structure.
3056 * @param uVector The vector of the interrupt to be posted.
3057 * @param enmTriggerMode The trigger mode of the interrupt.
3058 * @param uSrcTag The interrupt source tag (debugging).
3059 *
3060 * @thread Any.
3061 */
3062VMM_INT_DECL(bool) apicPostInterrupt(PVMCPU pVCpu, uint8_t uVector, XAPICTRIGGERMODE enmTriggerMode, uint32_t uSrcTag)
3063{
3064 Assert(pVCpu);
3065 Assert(uVector > XAPIC_ILLEGAL_VECTOR_END);
3066
3067 PVM pVM = pVCpu->CTX_SUFF(pVM);
3068 PCAPIC pApic = VM_TO_APIC(pVM);
3069 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
3070 bool fAccepted = true;
3071
3072 STAM_PROFILE_START(&pApicCpu->StatPostIntr, a);
3073
3074 /*
3075 * Only post valid interrupt vectors.
3076 * See Intel spec. 10.5.2 "Valid Interrupt Vectors".
3077 */
3078 if (RT_LIKELY(uVector > XAPIC_ILLEGAL_VECTOR_END))
3079 {
3080 /*
3081 * If the interrupt is already pending in the IRR we can skip the
3082 * potential expensive operation of poking the guest EMT out of execution.
3083 */
3084 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu);
3085 if (!apicTestVectorInReg(&pXApicPage->irr, uVector)) /* PAV */
3086 {
3087 /* Update the interrupt source tag (debugging). */
3088 if (!pApicCpu->auSrcTags[uVector])
3089 pApicCpu->auSrcTags[uVector] = uSrcTag;
3090 else
3091 pApicCpu->auSrcTags[uVector] |= RT_BIT_32(31);
3092
3093 Log2(("APIC: apicPostInterrupt: SrcCpu=%u TargetCpu=%u uVector=%#x\n", VMMGetCpuId(pVM), pVCpu->idCpu, uVector));
3094 if (enmTriggerMode == XAPICTRIGGERMODE_EDGE)
3095 {
3096 if (pApic->fPostedIntrsEnabled)
3097 { /** @todo posted-interrupt call to hardware */ }
3098 else
3099 {
3100 apicSetVectorInPib(pApicCpu->CTX_SUFF(pvApicPib), uVector);
3101 uint32_t const fAlreadySet = apicSetNotificationBitInPib((PAPICPIB)pApicCpu->CTX_SUFF(pvApicPib));
3102 if (!fAlreadySet)
3103 {
3104 Log2(("APIC: apicPostInterrupt: Setting UPDATE_APIC FF for edge-triggered intr. uVector=%#x\n", uVector));
3105 apicSetInterruptFF(pVCpu, PDMAPICIRQ_UPDATE_PENDING);
3106 }
3107 }
3108 }
3109 else
3110 {
3111 /*
3112 * Level-triggered interrupts requires updating of the TMR and thus cannot be
3113 * delivered asynchronously.
3114 */
3115 apicSetVectorInPib(&pApicCpu->ApicPibLevel, uVector);
3116 uint32_t const fAlreadySet = apicSetNotificationBitInPib(&pApicCpu->ApicPibLevel);
3117 if (!fAlreadySet)
3118 {
3119 Log2(("APIC: apicPostInterrupt: Setting UPDATE_APIC FF for level-triggered intr. uVector=%#x\n", uVector));
3120 apicSetInterruptFF(pVCpu, PDMAPICIRQ_UPDATE_PENDING);
3121 }
3122 }
3123 }
3124 else
3125 {
3126 Log2(("APIC: apicPostInterrupt: SrcCpu=%u TargetCpu=%u. Vector %#x Already in IRR, skipping\n", VMMGetCpuId(pVM),
3127 pVCpu->idCpu, uVector));
3128 STAM_COUNTER_INC(&pApicCpu->StatPostIntrAlreadyPending);
3129 }
3130 }
3131 else
3132 {
3133 fAccepted = false;
3134 apicSetError(pVCpu, XAPIC_ESR_RECV_ILLEGAL_VECTOR);
3135 }
3136
3137 STAM_PROFILE_STOP(&pApicCpu->StatPostIntr, a);
3138 return fAccepted;
3139}
3140
3141
3142/**
3143 * Starts the APIC timer.
3144 *
3145 * @param pVCpu The cross context virtual CPU structure.
3146 * @param uInitialCount The timer's Initial-Count Register (ICR), must be >
3147 * 0.
3148 * @thread Any.
3149 */
3150VMM_INT_DECL(void) apicStartTimer(PVMCPU pVCpu, uint32_t uInitialCount)
3151{
3152 Assert(pVCpu);
3153 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
3154 Assert(TMTimerIsLockOwner(pApicCpu->CTX_SUFF(pTimer)));
3155 Assert(uInitialCount > 0);
3156
3157 PCXAPICPAGE pXApicPage = APICCPU_TO_CXAPICPAGE(pApicCpu);
3158 uint8_t const uTimerShift = apicGetTimerShift(pXApicPage);
3159 uint64_t const cTicksToNext = (uint64_t)uInitialCount << uTimerShift;
3160
3161 Log2(("APIC%u: apicStartTimer: uInitialCount=%#RX32 uTimerShift=%u cTicksToNext=%RU64\n", pVCpu->idCpu, uInitialCount,
3162 uTimerShift, cTicksToNext));
3163
3164 /*
3165 * The assumption here is that the timer doesn't tick during this call
3166 * and thus setting a relative time to fire next is accurate. The advantage
3167 * however is updating u64TimerInitial 'atomically' while setting the next
3168 * tick.
3169 */
3170 PTMTIMER pTimer = pApicCpu->CTX_SUFF(pTimer);
3171 TMTimerSetRelative(pTimer, cTicksToNext, &pApicCpu->u64TimerInitial);
3172 apicHintTimerFreq(pApicCpu, uInitialCount, uTimerShift);
3173}
3174
3175
3176/**
3177 * Stops the APIC timer.
3178 *
3179 * @param pVCpu The cross context virtual CPU structure.
3180 * @thread Any.
3181 */
3182VMM_INT_DECL(void) apicStopTimer(PVMCPU pVCpu)
3183{
3184 Assert(pVCpu);
3185 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
3186 Assert(TMTimerIsLockOwner(pApicCpu->CTX_SUFF(pTimer)));
3187
3188 Log2(("APIC%u: apicStopTimer\n", pVCpu->idCpu));
3189
3190 PTMTIMER pTimer = pApicCpu->CTX_SUFF(pTimer);
3191 TMTimerStop(pTimer); /* This will reset the hint, no need to explicitly call TMTimerSetFrequencyHint(). */
3192 pApicCpu->uHintedTimerInitialCount = 0;
3193 pApicCpu->uHintedTimerShift = 0;
3194}
3195
3196
3197/**
3198 * Queues a pending interrupt as in-service.
3199 *
3200 * This function should only be needed without virtualized APIC
3201 * registers. With virtualized APIC registers, it's sufficient to keep
3202 * the interrupts pending in the IRR as the hardware takes care of
3203 * virtual interrupt delivery.
3204 *
3205 * @returns true if the interrupt was queued to in-service interrupts,
3206 * false otherwise.
3207 * @param pVCpu The cross context virtual CPU structure.
3208 * @param u8PendingIntr The pending interrupt to queue as
3209 * in-service.
3210 *
3211 * @remarks This assumes the caller has done the necessary checks and
3212 * is ready to take actually service the interrupt (TPR,
3213 * interrupt shadow etc.)
3214 */
3215VMM_INT_DECL(bool) APICQueueInterruptToService(PVMCPU pVCpu, uint8_t u8PendingIntr)
3216{
3217 VMCPU_ASSERT_EMT(pVCpu);
3218
3219 PVM pVM = pVCpu->CTX_SUFF(pVM);
3220 PAPIC pApic = VM_TO_APIC(pVM);
3221 Assert(!pApic->fVirtApicRegsEnabled);
3222 NOREF(pApic);
3223
3224 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
3225 bool const fIsPending = apicTestVectorInReg(&pXApicPage->irr, u8PendingIntr);
3226 if (fIsPending)
3227 {
3228 apicClearVectorInReg(&pXApicPage->irr, u8PendingIntr);
3229 apicSetVectorInReg(&pXApicPage->isr, u8PendingIntr);
3230 apicUpdatePpr(pVCpu);
3231 return true;
3232 }
3233 return false;
3234}
3235
3236
3237/**
3238 * De-queues a pending interrupt from in-service.
3239 *
3240 * This undoes APICQueueInterruptToService() for premature VM-exits before event
3241 * injection.
3242 *
3243 * @param pVCpu The cross context virtual CPU structure.
3244 * @param u8PendingIntr The pending interrupt to de-queue from
3245 * in-service.
3246 */
3247VMM_INT_DECL(void) APICDequeueInterruptFromService(PVMCPU pVCpu, uint8_t u8PendingIntr)
3248{
3249 VMCPU_ASSERT_EMT(pVCpu);
3250
3251 PVM pVM = pVCpu->CTX_SUFF(pVM);
3252 PAPIC pApic = VM_TO_APIC(pVM);
3253 Assert(!pApic->fVirtApicRegsEnabled);
3254 NOREF(pApic);
3255
3256 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
3257 bool const fInService = apicTestVectorInReg(&pXApicPage->isr, u8PendingIntr);
3258 if (fInService)
3259 {
3260 apicClearVectorInReg(&pXApicPage->isr, u8PendingIntr);
3261 apicSetVectorInReg(&pXApicPage->irr, u8PendingIntr);
3262 apicUpdatePpr(pVCpu);
3263 }
3264}
3265
3266
3267/**
3268 * Updates pending interrupts from the pending-interrupt bitmaps to the IRR.
3269 *
3270 * @param pVCpu The cross context virtual CPU structure.
3271 *
3272 * @note NEM/win is ASSUMING the an up to date TPR is not required here.
3273 */
3274VMMDECL(void) APICUpdatePendingInterrupts(PVMCPU pVCpu)
3275{
3276 VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu);
3277
3278 PAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
3279 PXAPICPAGE pXApicPage = VMCPU_TO_XAPICPAGE(pVCpu);
3280 bool fHasPendingIntrs = false;
3281
3282 Log3(("APIC%u: APICUpdatePendingInterrupts:\n", pVCpu->idCpu));
3283 STAM_PROFILE_START(&pApicCpu->StatUpdatePendingIntrs, a);
3284
3285 /* Update edge-triggered pending interrupts. */
3286 PAPICPIB pPib = (PAPICPIB)pApicCpu->CTX_SUFF(pvApicPib);
3287 for (;;)
3288 {
3289 uint32_t const fAlreadySet = apicClearNotificationBitInPib((PAPICPIB)pApicCpu->CTX_SUFF(pvApicPib));
3290 if (!fAlreadySet)
3291 break;
3292
3293 AssertCompile(RT_ELEMENTS(pXApicPage->irr.u) == 2 * RT_ELEMENTS(pPib->au64VectorBitmap));
3294 for (size_t idxPib = 0, idxReg = 0; idxPib < RT_ELEMENTS(pPib->au64VectorBitmap); idxPib++, idxReg += 2)
3295 {
3296 uint64_t const u64Fragment = ASMAtomicXchgU64(&pPib->au64VectorBitmap[idxPib], 0);
3297 if (u64Fragment)
3298 {
3299 uint32_t const u32FragmentLo = RT_LO_U32(u64Fragment);
3300 uint32_t const u32FragmentHi = RT_HI_U32(u64Fragment);
3301
3302 pXApicPage->irr.u[idxReg].u32Reg |= u32FragmentLo;
3303 pXApicPage->irr.u[idxReg + 1].u32Reg |= u32FragmentHi;
3304
3305 pXApicPage->tmr.u[idxReg].u32Reg &= ~u32FragmentLo;
3306 pXApicPage->tmr.u[idxReg + 1].u32Reg &= ~u32FragmentHi;
3307 fHasPendingIntrs = true;
3308 }
3309 }
3310 }
3311
3312 /* Update level-triggered pending interrupts. */
3313 pPib = (PAPICPIB)&pApicCpu->ApicPibLevel;
3314 for (;;)
3315 {
3316 uint32_t const fAlreadySet = apicClearNotificationBitInPib((PAPICPIB)&pApicCpu->ApicPibLevel);
3317 if (!fAlreadySet)
3318 break;
3319
3320 AssertCompile(RT_ELEMENTS(pXApicPage->irr.u) == 2 * RT_ELEMENTS(pPib->au64VectorBitmap));
3321 for (size_t idxPib = 0, idxReg = 0; idxPib < RT_ELEMENTS(pPib->au64VectorBitmap); idxPib++, idxReg += 2)
3322 {
3323 uint64_t const u64Fragment = ASMAtomicXchgU64(&pPib->au64VectorBitmap[idxPib], 0);
3324 if (u64Fragment)
3325 {
3326 uint32_t const u32FragmentLo = RT_LO_U32(u64Fragment);
3327 uint32_t const u32FragmentHi = RT_HI_U32(u64Fragment);
3328
3329 pXApicPage->irr.u[idxReg].u32Reg |= u32FragmentLo;
3330 pXApicPage->irr.u[idxReg + 1].u32Reg |= u32FragmentHi;
3331
3332 pXApicPage->tmr.u[idxReg].u32Reg |= u32FragmentLo;
3333 pXApicPage->tmr.u[idxReg + 1].u32Reg |= u32FragmentHi;
3334 fHasPendingIntrs = true;
3335 }
3336 }
3337 }
3338
3339 STAM_PROFILE_STOP(&pApicCpu->StatUpdatePendingIntrs, a);
3340 Log3(("APIC%u: APICUpdatePendingInterrupts: fHasPendingIntrs=%RTbool\n", pVCpu->idCpu, fHasPendingIntrs));
3341
3342 if ( fHasPendingIntrs
3343 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC))
3344 apicSignalNextPendingIntr(pVCpu);
3345}
3346
3347
3348/**
3349 * Gets the highest priority pending interrupt.
3350 *
3351 * @returns true if any interrupt is pending, false otherwise.
3352 * @param pVCpu The cross context virtual CPU structure.
3353 * @param pu8PendingIntr Where to store the interrupt vector if the
3354 * interrupt is pending.
3355 */
3356VMM_INT_DECL(bool) APICGetHighestPendingInterrupt(PVMCPU pVCpu, uint8_t *pu8PendingIntr)
3357{
3358 VMCPU_ASSERT_EMT(pVCpu);
3359 return apicGetHighestPendingInterrupt(pVCpu, pu8PendingIntr);
3360}
3361
3362
3363/**
3364 * Posts an interrupt to a target APIC, Hyper-V interface.
3365 *
3366 * @returns true if the interrupt was accepted, false otherwise.
3367 * @param pVCpu The cross context virtual CPU structure.
3368 * @param uVector The vector of the interrupt to be posted.
3369 * @param fAutoEoi Whether this interrupt has automatic EOI
3370 * treatment.
3371 * @param enmTriggerMode The trigger mode of the interrupt.
3372 *
3373 * @thread Any.
3374 */
3375VMM_INT_DECL(void) APICHvSendInterrupt(PVMCPU pVCpu, uint8_t uVector, bool fAutoEoi, XAPICTRIGGERMODE enmTriggerMode)
3376{
3377 Assert(pVCpu);
3378 Assert(!fAutoEoi); /** @todo AutoEOI. */
3379 RT_NOREF(fAutoEoi);
3380 apicPostInterrupt(pVCpu, uVector, enmTriggerMode, 0 /* uSrcTag */);
3381}
3382
3383
3384/**
3385 * Sets the Task Priority Register (TPR), Hyper-V interface.
3386 *
3387 * @returns Strict VBox status code.
3388 * @param pVCpu The cross context virtual CPU structure.
3389 * @param uTpr The TPR value to set.
3390 *
3391 * @remarks Validates like in x2APIC mode.
3392 */
3393VMM_INT_DECL(VBOXSTRICTRC) APICHvSetTpr(PVMCPU pVCpu, uint8_t uTpr)
3394{
3395 Assert(pVCpu);
3396 VMCPU_ASSERT_EMT(pVCpu);
3397 return apicSetTprEx(pVCpu, uTpr, true /* fForceX2ApicBehaviour */);
3398}
3399
3400
3401/**
3402 * Gets the Task Priority Register (TPR), Hyper-V interface.
3403 *
3404 * @returns The TPR value.
3405 * @param pVCpu The cross context virtual CPU structure.
3406 */
3407VMM_INT_DECL(uint8_t) APICHvGetTpr(PVMCPU pVCpu)
3408{
3409 Assert(pVCpu);
3410 VMCPU_ASSERT_EMT(pVCpu);
3411
3412 /*
3413 * The APIC could be operating in xAPIC mode and thus we should not use the apicReadMsr()
3414 * interface which validates the APIC mode and will throw a #GP(0) if not in x2APIC mode.
3415 * We could use the apicReadRegister() MMIO interface, but why bother getting the PDMDEVINS
3416 * pointer, so just directly read the APIC page.
3417 */
3418 PCXAPICPAGE pXApicPage = VMCPU_TO_CXAPICPAGE(pVCpu);
3419 return apicReadRaw32(pXApicPage, XAPIC_OFF_TPR);
3420}
3421
3422
3423/**
3424 * Sets the Interrupt Command Register (ICR), Hyper-V interface.
3425 *
3426 * @returns Strict VBox status code.
3427 * @param pVCpu The cross context virtual CPU structure.
3428 * @param uIcr The ICR value to set.
3429 */
3430VMM_INT_DECL(VBOXSTRICTRC) APICHvSetIcr(PVMCPU pVCpu, uint64_t uIcr)
3431{
3432 Assert(pVCpu);
3433 VMCPU_ASSERT_EMT(pVCpu);
3434 return apicSetIcr(pVCpu, uIcr, VINF_CPUM_R3_MSR_WRITE);
3435}
3436
3437
3438/**
3439 * Gets the Interrupt Command Register (ICR), Hyper-V interface.
3440 *
3441 * @returns The ICR value.
3442 * @param pVCpu The cross context virtual CPU structure.
3443 */
3444VMM_INT_DECL(uint64_t) APICHvGetIcr(PVMCPU pVCpu)
3445{
3446 Assert(pVCpu);
3447 VMCPU_ASSERT_EMT(pVCpu);
3448 return apicGetIcrNoCheck(pVCpu);
3449}
3450
3451
3452/**
3453 * Sets the End-Of-Interrupt (EOI) register, Hyper-V interface.
3454 *
3455 * @returns Strict VBox status code.
3456 * @param pVCpu The cross context virtual CPU structure.
3457 * @param uEoi The EOI value.
3458 */
3459VMM_INT_DECL(VBOXSTRICTRC) APICHvSetEoi(PVMCPU pVCpu, uint32_t uEoi)
3460{
3461 Assert(pVCpu);
3462 VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu);
3463 return apicSetEoi(pVCpu, uEoi, VINF_CPUM_R3_MSR_WRITE, true /* fForceX2ApicBehaviour */);
3464}
3465
3466
3467/**
3468 * Gets the APIC page pointers for the specified VCPU.
3469 *
3470 * @returns VBox status code.
3471 * @param pVCpu The cross context virtual CPU structure.
3472 * @param pHCPhys Where to store the host-context physical address.
3473 * @param pR0Ptr Where to store the ring-0 address.
3474 * @param pR3Ptr Where to store the ring-3 address (optional).
3475 * @param pRCPtr Where to store the raw-mode context address
3476 * (optional).
3477 */
3478VMM_INT_DECL(int) APICGetApicPageForCpu(PVMCPU pVCpu, PRTHCPHYS pHCPhys, PRTR0PTR pR0Ptr, PRTR3PTR pR3Ptr, PRTRCPTR pRCPtr)
3479{
3480 AssertReturn(pVCpu, VERR_INVALID_PARAMETER);
3481 AssertReturn(pHCPhys, VERR_INVALID_PARAMETER);
3482 AssertReturn(pR0Ptr, VERR_INVALID_PARAMETER);
3483
3484 Assert(PDMHasApic(pVCpu->CTX_SUFF(pVM)));
3485
3486 PCAPICCPU pApicCpu = VMCPU_TO_APICCPU(pVCpu);
3487 *pHCPhys = pApicCpu->HCPhysApicPage;
3488 *pR0Ptr = pApicCpu->pvApicPageR0;
3489 if (pR3Ptr)
3490 *pR3Ptr = pApicCpu->pvApicPageR3;
3491 if (pRCPtr)
3492 *pRCPtr = pApicCpu->pvApicPageRC;
3493 return VINF_SUCCESS;
3494}
3495
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette