VirtualBox

source: vbox/trunk/src/VBox/Devices/Bus/MsiCommon.cpp@ 63009

Last change on this file since 63009 was 62885, checked in by vboxsync, 8 years ago

Devices: gcc warnings

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 10.7 KB
Line 
1/* $Id: MsiCommon.cpp 62885 2016-08-02 16:21:40Z vboxsync $ */
2/** @file
3 * MSI support routines
4 */
5
6/*
7 * Copyright (C) 2010-2016 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17#define LOG_GROUP LOG_GROUP_DEV_PCI
18/* Hack to get PCIDEVICEINT declare at the right point - include "PCIInternal.h". */
19#define PCI_INCLUDE_PRIVATE
20#include <VBox/pci.h>
21#include <VBox/msi.h>
22#include <VBox/vmm/pdmdev.h>
23#include <VBox/log.h>
24
25#include "MsiCommon.h"
26
27DECLINLINE(uint16_t) msiGetMessageControl(PPCIDEVICE pDev)
28{
29 uint32_t idxMessageControl = pDev->Int.s.u8MsiCapOffset + VBOX_MSI_CAP_MESSAGE_CONTROL;
30#ifdef IN_RING3
31 if (pciDevIsPassthrough(pDev)) {
32 return pDev->Int.s.pfnConfigRead(pDev, idxMessageControl, 2);
33 }
34#endif
35 return PCIDevGetWord(pDev, idxMessageControl);
36}
37
38DECLINLINE(bool) msiIs64Bit(PPCIDEVICE pDev)
39{
40 return pciDevIsMsi64Capable(pDev);
41}
42
43DECLINLINE(uint32_t*) msiGetMaskBits(PPCIDEVICE pDev)
44{
45 uint8_t iOff = msiIs64Bit(pDev) ? VBOX_MSI_CAP_MASK_BITS_64 : VBOX_MSI_CAP_MASK_BITS_32;
46 /* passthrough devices may have no masked/pending support */
47 if (iOff >= pDev->Int.s.u8MsiCapSize)
48 return NULL;
49 iOff += pDev->Int.s.u8MsiCapOffset;
50 return (uint32_t*)(pDev->config + iOff);
51}
52
53DECLINLINE(uint32_t*) msiGetPendingBits(PPCIDEVICE pDev)
54{
55 uint8_t iOff = msiIs64Bit(pDev) ? VBOX_MSI_CAP_PENDING_BITS_64 : VBOX_MSI_CAP_PENDING_BITS_32;
56 /* passthrough devices may have no masked/pending support */
57 if (iOff >= pDev->Int.s.u8MsiCapSize)
58 return NULL;
59 iOff += pDev->Int.s.u8MsiCapOffset;
60 return (uint32_t*)(pDev->config + iOff);
61}
62
63DECLINLINE(bool) msiIsEnabled(PPCIDEVICE pDev)
64{
65 return (msiGetMessageControl(pDev) & VBOX_PCI_MSI_FLAGS_ENABLE) != 0;
66}
67
68DECLINLINE(uint8_t) msiGetMme(PPCIDEVICE pDev)
69{
70 return (msiGetMessageControl(pDev) & VBOX_PCI_MSI_FLAGS_QSIZE) >> 4;
71}
72
73DECLINLINE(RTGCPHYS) msiGetMsiAddress(PPCIDEVICE pDev)
74{
75 if (msiIs64Bit(pDev))
76 {
77 uint32_t lo = PCIDevGetDWord(pDev, pDev->Int.s.u8MsiCapOffset + VBOX_MSI_CAP_MESSAGE_ADDRESS_LO);
78 uint32_t hi = PCIDevGetDWord(pDev, pDev->Int.s.u8MsiCapOffset + VBOX_MSI_CAP_MESSAGE_ADDRESS_HI);
79 return RT_MAKE_U64(lo, hi);
80 }
81 else
82 {
83 return PCIDevGetDWord(pDev, pDev->Int.s.u8MsiCapOffset + VBOX_MSI_CAP_MESSAGE_ADDRESS_32);
84 }
85}
86
87DECLINLINE(uint32_t) msiGetMsiData(PPCIDEVICE pDev, int32_t iVector)
88{
89 int16_t iOff = msiIs64Bit(pDev) ? VBOX_MSI_CAP_MESSAGE_DATA_64 : VBOX_MSI_CAP_MESSAGE_DATA_32;
90 uint16_t lo = PCIDevGetWord(pDev, pDev->Int.s.u8MsiCapOffset + iOff);
91
92 // vector encoding into lower bits of message data
93 uint8_t bits = msiGetMme(pDev);
94 uint16_t uMask = ((1 << bits) - 1);
95 lo &= ~uMask;
96 lo |= iVector & uMask;
97
98 return RT_MAKE_U32(lo, 0);
99}
100
101DECLINLINE(bool) msiBitJustCleared(uint32_t uOldValue,
102 uint32_t uNewValue,
103 uint32_t uMask)
104{
105 return (!!(uOldValue & uMask) && !(uNewValue & uMask));
106}
107
108DECLINLINE(bool) msiBitJustSet(uint32_t uOldValue,
109 uint32_t uNewValue,
110 uint32_t uMask)
111{
112 return (!(uOldValue & uMask) && !!(uNewValue & uMask));
113}
114
115#ifdef IN_RING3
116void MsiPciConfigWrite(PPDMDEVINS pDevIns, PCPDMPCIHLP pPciHlp, PPCIDEVICE pDev,
117 uint32_t u32Address, uint32_t val, unsigned len)
118{
119 int32_t iOff = u32Address - pDev->Int.s.u8MsiCapOffset;
120 Assert(iOff >= 0 && (pciDevIsMsiCapable(pDev) && iOff < pDev->Int.s.u8MsiCapSize));
121
122 Log2(("MsiPciConfigWrite: %d <- %x (%d)\n", iOff, val, len));
123
124 uint32_t uAddr = u32Address;
125 bool f64Bit = msiIs64Bit(pDev);
126
127 for (uint32_t i = 0; i < len; i++)
128 {
129 uint32_t reg = i + iOff;
130 uint8_t u8Val = (uint8_t)val;
131 switch (reg)
132 {
133 case 0: /* Capability ID, ro */
134 case 1: /* Next pointer, ro */
135 break;
136 case VBOX_MSI_CAP_MESSAGE_CONTROL:
137 /* don't change read-only bits: 1-3,7 */
138 u8Val &= UINT8_C(~0x8e);
139 pDev->config[uAddr] = u8Val | (pDev->config[uAddr] & UINT8_C(0x8e));
140 break;
141 case VBOX_MSI_CAP_MESSAGE_CONTROL + 1:
142 /* don't change read-only bit 8, and reserved 9-15 */
143 break;
144 default:
145 if (pDev->config[uAddr] != u8Val)
146 {
147 int32_t maskUpdated = -1;
148
149 /* If we're enabling masked vector, and have pending messages
150 for this vector, we have to send this message now */
151 if ( !f64Bit
152 && (reg >= VBOX_MSI_CAP_MASK_BITS_32)
153 && (reg < VBOX_MSI_CAP_MASK_BITS_32 + 4)
154 )
155 {
156 maskUpdated = reg - VBOX_MSI_CAP_MASK_BITS_32;
157 }
158 if ( f64Bit
159 && (reg >= VBOX_MSI_CAP_MASK_BITS_64)
160 && (reg < VBOX_MSI_CAP_MASK_BITS_64 + 4)
161 )
162 {
163 maskUpdated = reg - VBOX_MSI_CAP_MASK_BITS_64;
164 }
165
166 if (maskUpdated != -1 && msiIsEnabled(pDev))
167 {
168 uint32_t* puPending = msiGetPendingBits(pDev);
169 for (int iBitNum = 0; iBitNum < 8; iBitNum++)
170 {
171 int32_t iBit = 1 << iBitNum;
172 uint32_t uVector = maskUpdated*8 + iBitNum;
173
174 if (msiBitJustCleared(pDev->config[uAddr], u8Val, iBit))
175 {
176 Log(("msi: mask updated bit %d@%x (%d)\n", iBitNum, uAddr, maskUpdated));
177
178 /* To ensure that we're no longer masked */
179 pDev->config[uAddr] &= ~iBit;
180 if ((*puPending & (1 << uVector)) != 0)
181 {
182 Log(("msi: notify earlier masked pending vector: %d\n", uVector));
183 MsiNotify(pDevIns, pPciHlp, pDev, uVector, PDM_IRQ_LEVEL_HIGH, 0 /*uTagSrc*/);
184 }
185 }
186 if (msiBitJustSet(pDev->config[uAddr], u8Val, iBit))
187 {
188 Log(("msi: mask vector: %d\n", uVector));
189 }
190 }
191 }
192
193 pDev->config[uAddr] = u8Val;
194 }
195 }
196 uAddr++;
197 val >>= 8;
198 }
199}
200
201uint32_t MsiPciConfigRead (PPDMDEVINS pDevIns, PPCIDEVICE pDev, uint32_t u32Address, unsigned len)
202{
203 RT_NOREF1(pDevIns);
204#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
205 int32_t off = u32Address - pDev->Int.s.u8MsiCapOffset;
206 Assert(off >= 0 && (pciDevIsMsiCapable(pDev) && off < pDev->Int.s.u8MsiCapSize));
207#endif
208 uint32_t rv = 0;
209
210 switch (len)
211 {
212 case 1:
213 rv = PCIDevGetByte(pDev, u32Address);
214 break;
215 case 2:
216 rv = PCIDevGetWord(pDev, u32Address);
217 break;
218 case 4:
219 rv = PCIDevGetDWord(pDev, u32Address);
220 break;
221 default:
222 Assert(false);
223 }
224
225 Log2(("MsiPciConfigRead: %d (%d) -> %x\n", off, len, rv));
226
227 return rv;
228}
229
230int MsiInit(PPCIDEVICE pDev, PPDMMSIREG pMsiReg)
231{
232 if (pMsiReg->cMsiVectors == 0)
233 return VINF_SUCCESS;
234
235 /* XXX: done in pcirawAnalyzePciCaps() */
236 if (pciDevIsPassthrough(pDev))
237 return VINF_SUCCESS;
238
239 uint16_t cVectors = pMsiReg->cMsiVectors;
240 uint8_t iCapOffset = pMsiReg->iMsiCapOffset;
241 uint8_t iNextOffset = pMsiReg->iMsiNextOffset;
242 bool f64bit = pMsiReg->fMsi64bit;
243 uint16_t iFlags = 0;
244 int iMmc;
245
246 /* Compute multiple-message capable bitfield */
247 for (iMmc = 0; iMmc < 6; iMmc++)
248 {
249 if ((1 << iMmc) >= cVectors)
250 break;
251 }
252
253 if ((cVectors > VBOX_MSI_MAX_ENTRIES) || (1 << iMmc) < cVectors)
254 return VERR_TOO_MUCH_DATA;
255
256 Assert(iCapOffset != 0 && iCapOffset < 0xff && iNextOffset < 0xff);
257
258 /* We always support per-vector masking */
259 iFlags |= VBOX_PCI_MSI_FLAGS_MASKBIT | iMmc;
260 if (f64bit)
261 iFlags |= VBOX_PCI_MSI_FLAGS_64BIT;
262 /* How many vectors we're capable of */
263 iFlags |= iMmc;
264
265 pDev->Int.s.u8MsiCapOffset = iCapOffset;
266 pDev->Int.s.u8MsiCapSize = f64bit ? VBOX_MSI_CAP_SIZE_64 : VBOX_MSI_CAP_SIZE_32;
267
268 PCIDevSetByte(pDev, iCapOffset + 0, VBOX_PCI_CAP_ID_MSI);
269 PCIDevSetByte(pDev, iCapOffset + 1, iNextOffset); /* next */
270 PCIDevSetWord(pDev, iCapOffset + VBOX_MSI_CAP_MESSAGE_CONTROL, iFlags);
271
272 *msiGetMaskBits(pDev) = 0;
273 *msiGetPendingBits(pDev) = 0;
274
275 pciDevSetMsiCapable(pDev);
276
277 return VINF_SUCCESS;
278}
279
280#endif /* IN_RING3 */
281
282
283bool MsiIsEnabled(PPCIDEVICE pDev)
284{
285 return pciDevIsMsiCapable(pDev) && msiIsEnabled(pDev);
286}
287
288void MsiNotify(PPDMDEVINS pDevIns, PCPDMPCIHLP pPciHlp, PPCIDEVICE pDev, int iVector, int iLevel, uint32_t uTagSrc)
289{
290 AssertMsg(msiIsEnabled(pDev), ("Must be enabled to use that"));
291
292 uint32_t uMask;
293 uint32_t *puPending = msiGetPendingBits(pDev);
294 if (puPending)
295 {
296 uint32_t *puMask = msiGetMaskBits(pDev);
297 AssertPtr(puMask);
298 uMask = *puMask;
299 LogFlow(("MsiNotify: %d pending=%x mask=%x\n", iVector, *puPending, uMask));
300 }
301 else
302 {
303 uMask = 0;
304 LogFlow(("MsiNotify: %d\n", iVector));
305 }
306
307 /* We only trigger MSI on level up */
308 if ((iLevel & PDM_IRQ_LEVEL_HIGH) == 0)
309 {
310 /* @todo: maybe clear pending interrupts on level down? */
311#if 0
312 if (puPending)
313 {
314 *puPending &= ~(1<<iVector);
315 LogFlow(("msi: clear pending %d, now %x\n", iVector, *puPending));
316 }
317#endif
318 return;
319 }
320
321 if ((uMask & (1<<iVector)) != 0)
322 {
323 *puPending |= (1<<iVector);
324 LogFlow(("msi: %d is masked, mark pending, now %x\n", iVector, *puPending));
325 return;
326 }
327
328 RTGCPHYS GCAddr = msiGetMsiAddress(pDev);
329 uint32_t u32Value = msiGetMsiData(pDev, iVector);
330
331 if (puPending)
332 *puPending &= ~(1<<iVector);
333
334 Assert(pPciHlp->pfnIoApicSendMsi != NULL);
335 pPciHlp->pfnIoApicSendMsi(pDevIns, GCAddr, u32Value, uTagSrc);
336}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette