1 | /* $Id: MsiCommon.cpp 76553 2019-01-01 01:45:53Z vboxsync $ */
|
---|
2 | /** @file
|
---|
3 | * MSI support routines
|
---|
4 | *
|
---|
5 | * @todo Straighten up this file!!
|
---|
6 | */
|
---|
7 |
|
---|
8 | /*
|
---|
9 | * Copyright (C) 2010-2019 Oracle Corporation
|
---|
10 | *
|
---|
11 | * This file is part of VirtualBox Open Source Edition (OSE), as
|
---|
12 | * available from http://www.virtualbox.org. This file is free software;
|
---|
13 | * you can redistribute it and/or modify it under the terms of the GNU
|
---|
14 | * General Public License (GPL) as published by the Free Software
|
---|
15 | * Foundation, in version 2 as it comes in the "COPYING" file of the
|
---|
16 | * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
|
---|
17 | * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
|
---|
18 | */
|
---|
19 |
|
---|
20 | #define LOG_GROUP LOG_GROUP_DEV_PCI
|
---|
21 | #define PDMPCIDEV_INCLUDE_PRIVATE /* Hack to get pdmpcidevint.h included at the right point. */
|
---|
22 | #include <VBox/pci.h>
|
---|
23 | #include <VBox/msi.h>
|
---|
24 | #include <VBox/vmm/pdmdev.h>
|
---|
25 | #include <VBox/log.h>
|
---|
26 |
|
---|
27 | #include "MsiCommon.h"
|
---|
28 | #include "PciInline.h"
|
---|
29 |
|
---|
30 |
|
---|
31 | DECLINLINE(uint16_t) msiGetMessageControl(PPDMPCIDEV pDev)
|
---|
32 | {
|
---|
33 | uint32_t idxMessageControl = pDev->Int.s.u8MsiCapOffset + VBOX_MSI_CAP_MESSAGE_CONTROL;
|
---|
34 | #ifdef IN_RING3
|
---|
35 | if (pciDevIsPassthrough(pDev))
|
---|
36 | return pDev->Int.s.pfnConfigRead(pDev->Int.s.CTX_SUFF(pDevIns), pDev, idxMessageControl, 2);
|
---|
37 | #endif
|
---|
38 | return PCIDevGetWord(pDev, idxMessageControl);
|
---|
39 | }
|
---|
40 |
|
---|
41 | DECLINLINE(bool) msiIs64Bit(PPDMPCIDEV pDev)
|
---|
42 | {
|
---|
43 | return pciDevIsMsi64Capable(pDev);
|
---|
44 | }
|
---|
45 |
|
---|
46 | /** @todo r=klaus This design assumes that the config space cache is always
|
---|
47 | * up to date, which is a wrong assumption for the "emulate passthrough" case
|
---|
48 | * where only the callbacks give the correct data. */
|
---|
49 | DECLINLINE(uint32_t *) msiGetMaskBits(PPDMPCIDEV pDev)
|
---|
50 | {
|
---|
51 | uint8_t iOff = msiIs64Bit(pDev) ? VBOX_MSI_CAP_MASK_BITS_64 : VBOX_MSI_CAP_MASK_BITS_32;
|
---|
52 | /* devices may have no masked/pending support */
|
---|
53 | if (iOff >= pDev->Int.s.u8MsiCapSize)
|
---|
54 | return NULL;
|
---|
55 | iOff += pDev->Int.s.u8MsiCapOffset;
|
---|
56 | return (uint32_t*)(pDev->abConfig + iOff);
|
---|
57 | }
|
---|
58 |
|
---|
59 | /** @todo r=klaus This design assumes that the config space cache is always
|
---|
60 | * up to date, which is a wrong assumption for the "emulate passthrough" case
|
---|
61 | * where only the callbacks give the correct data. */
|
---|
62 | DECLINLINE(uint32_t*) msiGetPendingBits(PPDMPCIDEV pDev)
|
---|
63 | {
|
---|
64 | uint8_t iOff = msiIs64Bit(pDev) ? VBOX_MSI_CAP_PENDING_BITS_64 : VBOX_MSI_CAP_PENDING_BITS_32;
|
---|
65 | /* devices may have no masked/pending support */
|
---|
66 | if (iOff >= pDev->Int.s.u8MsiCapSize)
|
---|
67 | return NULL;
|
---|
68 | iOff += pDev->Int.s.u8MsiCapOffset;
|
---|
69 | return (uint32_t*)(pDev->abConfig + iOff);
|
---|
70 | }
|
---|
71 |
|
---|
72 | DECLINLINE(bool) msiIsEnabled(PPDMPCIDEV pDev)
|
---|
73 | {
|
---|
74 | return (msiGetMessageControl(pDev) & VBOX_PCI_MSI_FLAGS_ENABLE) != 0;
|
---|
75 | }
|
---|
76 |
|
---|
77 | DECLINLINE(uint8_t) msiGetMme(PPDMPCIDEV pDev)
|
---|
78 | {
|
---|
79 | return (msiGetMessageControl(pDev) & VBOX_PCI_MSI_FLAGS_QSIZE) >> 4;
|
---|
80 | }
|
---|
81 |
|
---|
82 | DECLINLINE(RTGCPHYS) msiGetMsiAddress(PPDMPCIDEV pDev)
|
---|
83 | {
|
---|
84 | if (msiIs64Bit(pDev))
|
---|
85 | {
|
---|
86 | uint32_t lo = PCIDevGetDWord(pDev, pDev->Int.s.u8MsiCapOffset + VBOX_MSI_CAP_MESSAGE_ADDRESS_LO);
|
---|
87 | uint32_t hi = PCIDevGetDWord(pDev, pDev->Int.s.u8MsiCapOffset + VBOX_MSI_CAP_MESSAGE_ADDRESS_HI);
|
---|
88 | return RT_MAKE_U64(lo, hi);
|
---|
89 | }
|
---|
90 | return PCIDevGetDWord(pDev, pDev->Int.s.u8MsiCapOffset + VBOX_MSI_CAP_MESSAGE_ADDRESS_32);
|
---|
91 | }
|
---|
92 |
|
---|
93 | DECLINLINE(uint32_t) msiGetMsiData(PPDMPCIDEV pDev, int32_t iVector)
|
---|
94 | {
|
---|
95 | int16_t iOff = msiIs64Bit(pDev) ? VBOX_MSI_CAP_MESSAGE_DATA_64 : VBOX_MSI_CAP_MESSAGE_DATA_32;
|
---|
96 | uint16_t lo = PCIDevGetWord(pDev, pDev->Int.s.u8MsiCapOffset + iOff);
|
---|
97 |
|
---|
98 | // vector encoding into lower bits of message data
|
---|
99 | uint8_t bits = msiGetMme(pDev);
|
---|
100 | uint16_t uMask = ((1 << bits) - 1);
|
---|
101 | lo &= ~uMask;
|
---|
102 | lo |= iVector & uMask;
|
---|
103 |
|
---|
104 | return RT_MAKE_U32(lo, 0);
|
---|
105 | }
|
---|
106 |
|
---|
107 | #ifdef IN_RING3
|
---|
108 |
|
---|
109 | DECLINLINE(bool) msiR3BitJustCleared(uint32_t uOldValue, uint32_t uNewValue, uint32_t uMask)
|
---|
110 | {
|
---|
111 | return !!(uOldValue & uMask) && !(uNewValue & uMask);
|
---|
112 | }
|
---|
113 |
|
---|
114 | DECLINLINE(bool) msiR3BitJustSet(uint32_t uOldValue, uint32_t uNewValue, uint32_t uMask)
|
---|
115 | {
|
---|
116 | return !(uOldValue & uMask) && !!(uNewValue & uMask);
|
---|
117 | }
|
---|
118 |
|
---|
119 | /**
|
---|
120 | * PCI config space accessors for MSI registers.
|
---|
121 | */
|
---|
122 | void MsiR3PciConfigWrite(PPDMDEVINS pDevIns, PCPDMPCIHLP pPciHlp, PPDMPCIDEV pDev,
|
---|
123 | uint32_t u32Address, uint32_t val, unsigned len)
|
---|
124 | {
|
---|
125 | int32_t iOff = u32Address - pDev->Int.s.u8MsiCapOffset;
|
---|
126 | Assert(iOff >= 0 && (pciDevIsMsiCapable(pDev) && iOff < pDev->Int.s.u8MsiCapSize));
|
---|
127 |
|
---|
128 | Log2(("MsiR3PciConfigWrite: %d <- %x (%d)\n", iOff, val, len));
|
---|
129 |
|
---|
130 | uint32_t uAddr = u32Address;
|
---|
131 | bool f64Bit = msiIs64Bit(pDev);
|
---|
132 |
|
---|
133 | for (uint32_t i = 0; i < len; i++)
|
---|
134 | {
|
---|
135 | uint32_t reg = i + iOff;
|
---|
136 | uint8_t u8Val = (uint8_t)val;
|
---|
137 | switch (reg)
|
---|
138 | {
|
---|
139 | case 0: /* Capability ID, ro */
|
---|
140 | case 1: /* Next pointer, ro */
|
---|
141 | break;
|
---|
142 | case VBOX_MSI_CAP_MESSAGE_CONTROL:
|
---|
143 | /* don't change read-only bits: 1-3,7 */
|
---|
144 | u8Val &= UINT8_C(~0x8e);
|
---|
145 | pDev->abConfig[uAddr] = u8Val | (pDev->abConfig[uAddr] & UINT8_C(0x8e));
|
---|
146 | break;
|
---|
147 | case VBOX_MSI_CAP_MESSAGE_CONTROL + 1:
|
---|
148 | /* don't change read-only bit 8, and reserved 9-15 */
|
---|
149 | break;
|
---|
150 | default:
|
---|
151 | if (pDev->abConfig[uAddr] != u8Val)
|
---|
152 | {
|
---|
153 | int32_t maskUpdated = -1;
|
---|
154 |
|
---|
155 | /* If we're enabling masked vector, and have pending messages
|
---|
156 | for this vector, we have to send this message now */
|
---|
157 | if ( !f64Bit
|
---|
158 | && (reg >= VBOX_MSI_CAP_MASK_BITS_32)
|
---|
159 | && (reg < VBOX_MSI_CAP_MASK_BITS_32 + 4)
|
---|
160 | )
|
---|
161 | {
|
---|
162 | maskUpdated = reg - VBOX_MSI_CAP_MASK_BITS_32;
|
---|
163 | }
|
---|
164 | if ( f64Bit
|
---|
165 | && (reg >= VBOX_MSI_CAP_MASK_BITS_64)
|
---|
166 | && (reg < VBOX_MSI_CAP_MASK_BITS_64 + 4)
|
---|
167 | )
|
---|
168 | {
|
---|
169 | maskUpdated = reg - VBOX_MSI_CAP_MASK_BITS_64;
|
---|
170 | }
|
---|
171 |
|
---|
172 | if (maskUpdated != -1 && msiIsEnabled(pDev))
|
---|
173 | {
|
---|
174 | uint32_t* puPending = msiGetPendingBits(pDev);
|
---|
175 | for (int iBitNum = 0; iBitNum < 8; iBitNum++)
|
---|
176 | {
|
---|
177 | int32_t iBit = 1 << iBitNum;
|
---|
178 | uint32_t uVector = maskUpdated*8 + iBitNum;
|
---|
179 |
|
---|
180 | if (msiR3BitJustCleared(pDev->abConfig[uAddr], u8Val, iBit))
|
---|
181 | {
|
---|
182 | Log(("msi: mask updated bit %d@%x (%d)\n", iBitNum, uAddr, maskUpdated));
|
---|
183 |
|
---|
184 | /* To ensure that we're no longer masked */
|
---|
185 | pDev->abConfig[uAddr] &= ~iBit;
|
---|
186 | if ((*puPending & (1 << uVector)) != 0)
|
---|
187 | {
|
---|
188 | Log(("msi: notify earlier masked pending vector: %d\n", uVector));
|
---|
189 | MsiNotify(pDevIns, pPciHlp, pDev, uVector, PDM_IRQ_LEVEL_HIGH, 0 /*uTagSrc*/);
|
---|
190 | }
|
---|
191 | }
|
---|
192 | if (msiR3BitJustSet(pDev->abConfig[uAddr], u8Val, iBit))
|
---|
193 | {
|
---|
194 | Log(("msi: mask vector: %d\n", uVector));
|
---|
195 | }
|
---|
196 | }
|
---|
197 | }
|
---|
198 |
|
---|
199 | pDev->abConfig[uAddr] = u8Val;
|
---|
200 | }
|
---|
201 | }
|
---|
202 | uAddr++;
|
---|
203 | val >>= 8;
|
---|
204 | }
|
---|
205 | }
|
---|
206 |
|
---|
207 | /**
|
---|
208 | * Initializes MSI support for the given PCI device.
|
---|
209 | */
|
---|
210 | int MsiR3Init(PPDMPCIDEV pDev, PPDMMSIREG pMsiReg)
|
---|
211 | {
|
---|
212 | if (pMsiReg->cMsiVectors == 0)
|
---|
213 | return VINF_SUCCESS;
|
---|
214 |
|
---|
215 | /* XXX: done in pcirawAnalyzePciCaps() */
|
---|
216 | if (pciDevIsPassthrough(pDev))
|
---|
217 | return VINF_SUCCESS;
|
---|
218 |
|
---|
219 | uint16_t cVectors = pMsiReg->cMsiVectors;
|
---|
220 | uint8_t iCapOffset = pMsiReg->iMsiCapOffset;
|
---|
221 | uint8_t iNextOffset = pMsiReg->iMsiNextOffset;
|
---|
222 | bool f64bit = pMsiReg->fMsi64bit;
|
---|
223 | bool fNoMasking = pMsiReg->fMsiNoMasking;
|
---|
224 | uint16_t iFlags = 0;
|
---|
225 |
|
---|
226 | Assert(iCapOffset != 0 && iCapOffset < 0xff && iNextOffset < 0xff);
|
---|
227 |
|
---|
228 | if (!fNoMasking)
|
---|
229 | {
|
---|
230 | int iMmc;
|
---|
231 |
|
---|
232 | /* Compute multiple-message capable bitfield */
|
---|
233 | for (iMmc = 0; iMmc < 6; iMmc++)
|
---|
234 | {
|
---|
235 | if ((1 << iMmc) >= cVectors)
|
---|
236 | break;
|
---|
237 | }
|
---|
238 |
|
---|
239 | if ((cVectors > VBOX_MSI_MAX_ENTRIES) || (1 << iMmc) < cVectors)
|
---|
240 | return VERR_TOO_MUCH_DATA;
|
---|
241 |
|
---|
242 | /* We support per-vector masking */
|
---|
243 | iFlags |= VBOX_PCI_MSI_FLAGS_MASKBIT;
|
---|
244 | /* How many vectors we're capable of */
|
---|
245 | iFlags |= iMmc;
|
---|
246 | }
|
---|
247 |
|
---|
248 | if (f64bit)
|
---|
249 | iFlags |= VBOX_PCI_MSI_FLAGS_64BIT;
|
---|
250 |
|
---|
251 | pDev->Int.s.u8MsiCapOffset = iCapOffset;
|
---|
252 | pDev->Int.s.u8MsiCapSize = f64bit ? VBOX_MSI_CAP_SIZE_64 : VBOX_MSI_CAP_SIZE_32;
|
---|
253 |
|
---|
254 | PCIDevSetByte(pDev, iCapOffset + 0, VBOX_PCI_CAP_ID_MSI);
|
---|
255 | PCIDevSetByte(pDev, iCapOffset + 1, iNextOffset); /* next */
|
---|
256 | PCIDevSetWord(pDev, iCapOffset + VBOX_MSI_CAP_MESSAGE_CONTROL, iFlags);
|
---|
257 |
|
---|
258 | if (!fNoMasking)
|
---|
259 | {
|
---|
260 | *msiGetMaskBits(pDev) = 0;
|
---|
261 | *msiGetPendingBits(pDev) = 0;
|
---|
262 | }
|
---|
263 |
|
---|
264 | pciDevSetMsiCapable(pDev);
|
---|
265 | if (f64bit)
|
---|
266 | pciDevSetMsi64Capable(pDev);
|
---|
267 |
|
---|
268 | return VINF_SUCCESS;
|
---|
269 | }
|
---|
270 |
|
---|
271 | #endif /* IN_RING3 */
|
---|
272 |
|
---|
273 |
|
---|
274 | /**
|
---|
275 | * Checks if MSI is enabled for the given PCI device.
|
---|
276 | *
|
---|
277 | * (Must use MSINotify() for notifications when true.)
|
---|
278 | */
|
---|
279 | bool MsiIsEnabled(PPDMPCIDEV pDev)
|
---|
280 | {
|
---|
281 | return pciDevIsMsiCapable(pDev) && msiIsEnabled(pDev);
|
---|
282 | }
|
---|
283 |
|
---|
284 | /**
|
---|
285 | * Device notification (aka interrupt).
|
---|
286 | */
|
---|
287 | void MsiNotify(PPDMDEVINS pDevIns, PCPDMPCIHLP pPciHlp, PPDMPCIDEV pDev, int iVector, int iLevel, uint32_t uTagSrc)
|
---|
288 | {
|
---|
289 | AssertMsg(msiIsEnabled(pDev), ("Must be enabled to use that"));
|
---|
290 |
|
---|
291 | uint32_t uMask;
|
---|
292 | uint32_t *puPending = msiGetPendingBits(pDev);
|
---|
293 | if (puPending)
|
---|
294 | {
|
---|
295 | uint32_t *puMask = msiGetMaskBits(pDev);
|
---|
296 | AssertPtr(puMask);
|
---|
297 | uMask = *puMask;
|
---|
298 | LogFlow(("MsiNotify: %d pending=%x mask=%x\n", iVector, *puPending, uMask));
|
---|
299 | }
|
---|
300 | else
|
---|
301 | {
|
---|
302 | uMask = 0;
|
---|
303 | LogFlow(("MsiNotify: %d\n", iVector));
|
---|
304 | }
|
---|
305 |
|
---|
306 | /* We only trigger MSI on level up */
|
---|
307 | if ((iLevel & PDM_IRQ_LEVEL_HIGH) == 0)
|
---|
308 | {
|
---|
309 | /** @todo maybe clear pending interrupts on level down? */
|
---|
310 | #if 0
|
---|
311 | if (puPending)
|
---|
312 | {
|
---|
313 | *puPending &= ~(1<<iVector);
|
---|
314 | LogFlow(("msi: clear pending %d, now %x\n", iVector, *puPending));
|
---|
315 | }
|
---|
316 | #endif
|
---|
317 | return;
|
---|
318 | }
|
---|
319 |
|
---|
320 | if ((uMask & (1<<iVector)) != 0)
|
---|
321 | {
|
---|
322 | *puPending |= (1<<iVector);
|
---|
323 | LogFlow(("msi: %d is masked, mark pending, now %x\n", iVector, *puPending));
|
---|
324 | return;
|
---|
325 | }
|
---|
326 |
|
---|
327 | RTGCPHYS GCAddr = msiGetMsiAddress(pDev);
|
---|
328 | uint32_t u32Value = msiGetMsiData(pDev, iVector);
|
---|
329 |
|
---|
330 | if (puPending)
|
---|
331 | *puPending &= ~(1<<iVector);
|
---|
332 |
|
---|
333 | Assert(pPciHlp->pfnIoApicSendMsi != NULL);
|
---|
334 | pPciHlp->pfnIoApicSendMsi(pDevIns, GCAddr, u32Value, uTagSrc);
|
---|
335 | }
|
---|
336 |
|
---|