VirtualBox

source: vbox/trunk/src/VBox/Devices/VirtIO/VirtioCore.cpp@ 91627

Last change on this file since 91627 was 88828, checked in by vboxsync, 4 years ago

Fix small burn

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 82.0 KB
Line 
1/* $Id: VirtioCore.cpp 88828 2021-05-03 12:04:56Z vboxsync $ */
2
3/** @file
4 * VirtioCore - Virtio Core (PCI, feature & config mgt, queue mgt & proxy, notification mgt)
5 */
6
7/*
8 * Copyright (C) 2009-2021 Oracle Corporation
9 *
10 * This file is part of VirtualBox Open Source Edition (OSE), as
11 * available from http://www.virtualbox.org. This file is free software;
12 * you can redistribute it and/or modify it under the terms of the GNU
13 * General Public License (GPL) as published by the Free Software
14 * Foundation, in version 2 as it comes in the "COPYING" file of the
15 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
16 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
17 */
18
19
20/*********************************************************************************************************************************
21* Header Files *
22*********************************************************************************************************************************/
23#define LOG_GROUP LOG_GROUP_DEV_VIRTIO
24
25#include <iprt/assert.h>
26#include <iprt/uuid.h>
27#include <iprt/mem.h>
28#include <iprt/sg.h>
29#include <iprt/assert.h>
30#include <iprt/string.h>
31#include <iprt/param.h>
32#include <iprt/types.h>
33#include <VBox/log.h>
34#include <VBox/msi.h>
35#include <iprt/types.h>
36#include <VBox/AssertGuest.h>
37#include <VBox/vmm/pdmdev.h>
38#include "VirtioCore.h"
39
40
41/*********************************************************************************************************************************
42* Defined Constants And Macros *
43*********************************************************************************************************************************/
44#define INSTANCE(a_pVirtio) ((a_pVirtio)->szInstance)
45#define VIRTQNAME(a_pVirtio, a_uVirtq) ((a_pVirtio)->aVirtqueues[(a_uVirtq)].szName)
46
47#define IS_DRIVER_OK(a_pVirtio) ((a_pVirtio)->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK)
48#define IS_VIRTQ_EMPTY(pDevIns, pVirtio, pVirtq) \
49 (virtioCoreVirtqAvailBufCount_inline(pDevIns, pVirtio, pVirtq) == 0)
50
51/**
52 * This macro returns true if the @a a_offAccess and access length (@a
53 * a_cbAccess) are within the range of the mapped capability struct described by
54 * @a a_LocCapData.
55 *
56 * @param[in] a_offAccess Input: The offset into the MMIO bar of the access.
57 * @param[in] a_cbAccess Input: The access size.
58 * @param[out] a_offsetIntoCap Output: uint32_t variable to return the intra-capability offset into.
59 * @param[in] a_LocCapData Input: The capability location info.
60 */
61#define MATCHES_VIRTIO_CAP_STRUCT(a_offAccess, a_cbAccess, a_offsetIntoCap, a_LocCapData) \
62 ( ((a_offsetIntoCap) = (uint32_t)((a_offAccess) - (a_LocCapData).offMmio)) < (uint32_t)(a_LocCapData).cbMmio \
63 && (a_offsetIntoCap) + (uint32_t)(a_cbAccess) <= (uint32_t)(a_LocCapData).cbMmio )
64
65
66/** Marks the start of the virtio saved state (just for sanity). */
67#define VIRTIO_SAVEDSTATE_MARKER UINT64_C(0x1133557799bbddff)
68/** The current saved state version for the virtio core. */
69#define VIRTIO_SAVEDSTATE_VERSION UINT32_C(1)
70
71
72/*********************************************************************************************************************************
73* Structures and Typedefs *
74*********************************************************************************************************************************/
75
76
77/** @name virtq related flags
78 * @{ */
79#define VIRTQ_DESC_F_NEXT 1 /**< Indicates this descriptor chains to next */
80#define VIRTQ_DESC_F_WRITE 2 /**< Marks buffer as write-only (default ro) */
81#define VIRTQ_DESC_F_INDIRECT 4 /**< Buffer is list of buffer descriptors */
82
83#define VIRTQ_USED_F_NO_NOTIFY 1 /**< Dev to Drv: Don't notify when buf added */
84#define VIRTQ_AVAIL_F_NO_INTERRUPT 1 /**< Drv to Dev: Don't notify when buf eaten */
85/** @} */
86
87/**
88 * virtq related structs
89 * (struct names follow VirtIO 1.0 spec, typedef use VBox style)
90 */
91typedef struct virtq_desc
92{
93 uint64_t GCPhysBuf; /**< addr GC Phys. address of buffer */
94 uint32_t cb; /**< len Buffer length */
95 uint16_t fFlags; /**< flags Buffer specific flags */
96 uint16_t uDescIdxNext; /**< next Idx set if VIRTIO_DESC_F_NEXT */
97} VIRTQ_DESC_T, *PVIRTQ_DESC_T;
98
99typedef struct virtq_avail
100{
101 uint16_t fFlags; /**< flags avail ring guest-to-host flags */
102 uint16_t uIdx; /**< idx Index of next free ring slot */
103 RT_FLEXIBLE_ARRAY_EXTENSION
104 uint16_t auRing[RT_FLEXIBLE_ARRAY]; /**< ring Ring: avail drv to dev bufs */
105 //uint16_t uUsedEventIdx; /**< used_event (if VIRTQ_USED_F_EVENT_IDX) */
106} VIRTQ_AVAIL_T, *PVIRTQ_AVAIL_T;
107
108typedef struct virtq_used_elem
109{
110 uint32_t uDescIdx; /**< idx Start of used desc chain */
111 uint32_t cbElem; /**< len Total len of used desc chain */
112} VIRTQ_USED_ELEM_T;
113
114typedef struct virt_used
115{
116 uint16_t fFlags; /**< flags used ring host-to-guest flags */
117 uint16_t uIdx; /**< idx Index of next ring slot */
118 RT_FLEXIBLE_ARRAY_EXTENSION
119 VIRTQ_USED_ELEM_T aRing[RT_FLEXIBLE_ARRAY]; /**< ring Ring: used dev to drv bufs */
120 //uint16_t uAvailEventIdx; /**< avail_event if (VIRTQ_USED_F_EVENT_IDX) */
121} VIRTQ_USED_T, *PVIRTQ_USED_T;
122
123
124const char *virtioCoreGetStateChangeText(VIRTIOVMSTATECHANGED enmState)
125{
126 switch (enmState)
127 {
128 case kvirtIoVmStateChangedReset: return "VM RESET";
129 case kvirtIoVmStateChangedSuspend: return "VM SUSPEND";
130 case kvirtIoVmStateChangedPowerOff: return "VM POWER OFF";
131 case kvirtIoVmStateChangedResume: return "VM RESUME";
132 default: return "<BAD ENUM>";
133 }
134}
135
136/* Internal Functions */
137
138static void virtioCoreNotifyGuestDriver(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtq);
139static int virtioKick(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint8_t uCause, uint16_t uVec);
140
141/** @name Internal queue operations
142 * @{ */
143
144/**
145 * Accessor for virtq descriptor
146 */
147#ifdef IN_RING3
148DECLINLINE(void) virtioReadDesc(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTQUEUE pVirtq,
149 uint32_t idxDesc, PVIRTQ_DESC_T pDesc)
150{
151 AssertMsg(pVirtio->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
152 RT_NOREF(pVirtio);
153 uint16_t const cVirtqItems = RT_MAX(pVirtq->uSize, 1); /* Make sure to avoid div-by-zero. */
154 PDMDevHlpPCIPhysRead(pDevIns,
155 pVirtq->GCPhysVirtqDesc + sizeof(VIRTQ_DESC_T) * (idxDesc % cVirtqItems),
156 pDesc, sizeof(VIRTQ_DESC_T));
157}
158#endif
159
160/**
161 * Accessors for virtq avail ring
162 */
163#ifdef IN_RING3
164DECLINLINE(uint16_t) virtioReadAvailDescIdx(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTQUEUE pVirtq, uint32_t availIdx)
165{
166 uint16_t uDescIdx;
167 AssertMsg(pVirtio->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
168 RT_NOREF(pVirtio);
169 uint16_t const cVirtqItems = RT_MAX(pVirtq->uSize, 1); /* Make sure to avoid div-by-zero. */
170 PDMDevHlpPCIPhysRead(pDevIns,
171 pVirtq->GCPhysVirtqAvail + RT_UOFFSETOF_DYN(VIRTQ_AVAIL_T, auRing[availIdx % cVirtqItems]),
172 &uDescIdx, sizeof(uDescIdx));
173 return uDescIdx;
174}
175
176DECLINLINE(uint16_t) virtioReadAvailUsedEvent(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTQUEUE pVirtq)
177{
178 uint16_t uUsedEventIdx;
179 /* VirtIO 1.0 uUsedEventIdx (used_event) immediately follows ring */
180 AssertMsg(pVirtio->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
181 RT_NOREF(pVirtio);
182 PDMDevHlpPCIPhysRead(pDevIns,
183 pVirtq->GCPhysVirtqAvail + RT_UOFFSETOF_DYN(VIRTQ_AVAIL_T, auRing[pVirtq->uSize]),
184 &uUsedEventIdx, sizeof(uUsedEventIdx));
185 return uUsedEventIdx;
186}
187#endif
188
189DECLINLINE(uint16_t) virtioReadAvailRingIdx(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTQUEUE pVirtq)
190{
191 uint16_t uIdx = 0;
192 AssertMsg(pVirtio->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
193 RT_NOREF(pVirtio);
194 PDMDevHlpPCIPhysRead(pDevIns,
195 pVirtq->GCPhysVirtqAvail + RT_UOFFSETOF(VIRTQ_AVAIL_T, uIdx),
196 &uIdx, sizeof(uIdx));
197 return uIdx;
198}
199
200DECLINLINE(uint16_t) virtioReadAvailRingFlags(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTQUEUE pVirtq)
201{
202 uint16_t fFlags = 0;
203 AssertMsg(pVirtio->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
204 RT_NOREF(pVirtio);
205 PDMDevHlpPCIPhysRead(pDevIns,
206 pVirtq->GCPhysVirtqAvail + RT_UOFFSETOF(VIRTQ_AVAIL_T, fFlags),
207 &fFlags, sizeof(fFlags));
208 return fFlags;
209}
210
211/** @} */
212
213/** @name Accessors for virtq used ring
214 * @{
215 */
216
217#ifdef IN_RING3
218DECLINLINE(void) virtioWriteUsedElem(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTQUEUE pVirtq,
219 uint32_t usedIdx, uint32_t uDescIdx, uint32_t uLen)
220{
221 VIRTQ_USED_ELEM_T elem = { uDescIdx, uLen };
222 AssertMsg(pVirtio->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
223 RT_NOREF(pVirtio);
224 uint16_t const cVirtqItems = RT_MAX(pVirtq->uSize, 1); /* Make sure to avoid div-by-zero. */
225 PDMDevHlpPCIPhysWrite(pDevIns,
226 pVirtq->GCPhysVirtqUsed
227 + RT_UOFFSETOF_DYN(VIRTQ_USED_T, aRing[usedIdx % cVirtqItems]),
228 &elem, sizeof(elem));
229}
230
231DECLINLINE(void) virtioWriteUsedRingFlags(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTQUEUE pVirtq, uint16_t fFlags)
232{
233 AssertMsg(pVirtio->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
234 RT_NOREF(pVirtio);
235 RT_UNTRUSTED_VALIDATED_FENCE(); /* VirtIO 1.0, Section 3.2.1.4.1 */
236 PDMDevHlpPCIPhysWrite(pDevIns,
237 pVirtq->GCPhysVirtqUsed + RT_UOFFSETOF(VIRTQ_USED_T, fFlags),
238 &fFlags, sizeof(fFlags));
239}
240#endif
241
242DECLINLINE(void) virtioWriteUsedRingIdx(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTQUEUE pVirtq, uint16_t uIdx)
243{
244 AssertMsg(pVirtio->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
245 RT_NOREF(pVirtio);
246 PDMDevHlpPCIPhysWrite(pDevIns,
247 pVirtq->GCPhysVirtqUsed + RT_UOFFSETOF(VIRTQ_USED_T, uIdx),
248 &uIdx, sizeof(uIdx));
249}
250
251
252#ifdef IN_RING3
253DECLINLINE(uint16_t) virtioReadUsedRingIdx(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTQUEUE pVirtq)
254{
255 uint16_t uIdx = 0;
256 AssertMsg(pVirtio->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
257 RT_NOREF(pVirtio);
258 PDMDevHlpPCIPhysRead(pDevIns,
259 pVirtq->GCPhysVirtqUsed + RT_UOFFSETOF(VIRTQ_USED_T, uIdx),
260 &uIdx, sizeof(uIdx));
261 return uIdx;
262}
263
264DECLINLINE(uint16_t) virtioReadUsedRingFlags(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTQUEUE pVirtq)
265{
266 uint16_t fFlags = 0;
267 AssertMsg(pVirtio->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
268 RT_NOREF(pVirtio);
269 PDMDevHlpPCIPhysRead(pDevIns,
270 pVirtq->GCPhysVirtqUsed + RT_UOFFSETOF(VIRTQ_USED_T, fFlags),
271 &fFlags, sizeof(fFlags));
272 return fFlags;
273}
274
275DECLINLINE(void) virtioWriteUsedAvailEvent(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTQUEUE pVirtq, uint32_t uAvailEventIdx)
276{
277 /** VirtIO 1.0 uAvailEventIdx (avail_event) immediately follows ring */
278 AssertMsg(pVirtio->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK, ("Called with guest driver not ready\n"));
279 RT_NOREF(pVirtio);
280 PDMDevHlpPCIPhysWrite(pDevIns,
281 pVirtq->GCPhysVirtqUsed
282 + RT_UOFFSETOF_DYN(VIRTQ_USED_T, aRing[pVirtq->uSize]),
283 &uAvailEventIdx, sizeof(uAvailEventIdx));
284}
285#endif
286
287DECLINLINE(uint16_t) virtioCoreVirtqAvailBufCount_inline(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTQUEUE pVirtq)
288{
289 uint16_t uIdxActual = virtioReadAvailRingIdx(pDevIns, pVirtio, pVirtq);
290 uint16_t uIdxShadow = pVirtq->uAvailIdxShadow;
291 uint16_t uIdxDelta;
292
293 if (uIdxActual < uIdxShadow)
294 uIdxDelta = (uIdxActual + VIRTQ_MAX_ENTRIES) - uIdxShadow;
295 else
296 uIdxDelta = uIdxActual - uIdxShadow;
297
298 LogFunc(("%s has %u %s (idx=%u shadow=%u)\n",
299 pVirtq->szName, uIdxDelta, uIdxDelta == 1 ? "entry" : "entries",
300 uIdxActual, uIdxShadow));
301
302 return uIdxDelta;
303}
304/**
305 * Get count of new (e.g. pending) elements in available ring.
306 *
307 * @param pDevIns The device instance.
308 * @param pVirtio Pointer to the shared virtio state.
309 * @param uVirtq Virtq number
310 *
311 * @returns how many entries have been added to ring as a delta of the consumer's
312 * avail index and the queue's guest-side current avail index.
313 */
314uint16_t virtioCoreVirtqAvailBufCount(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtq)
315{
316 AssertMsgReturn(uVirtq < RT_ELEMENTS(pVirtio->aVirtqueues), ("uVirtq out of range"), 0);
317 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
318 if (!IS_DRIVER_OK(pVirtio) || !pVirtq->uEnable)
319 {
320 LogRelFunc(("Driver not ready or queue not enabled\n"));
321 return 0;
322 }
323
324 return virtioCoreVirtqAvailBufCount_inline(pDevIns, pVirtio, pVirtq);
325}
326
327#ifdef IN_RING3
328
329/** API Function: See header file*/
330void virtioCorePrintFeatures(VIRTIOCORE *pVirtio, PCDBGFINFOHLP pHlp)
331{
332 static struct
333 {
334 uint64_t fFeatureBit;
335 const char *pcszDesc;
336 } const s_aFeatures[] =
337 {
338 { VIRTIO_F_RING_INDIRECT_DESC, " RING_INDIRECT_DESC Driver can use descriptors with VIRTQ_DESC_F_INDIRECT flag set\n" },
339 { VIRTIO_F_RING_EVENT_IDX, " RING_EVENT_IDX Enables use_event and avail_event fields described in 2.4.7, 2.4.8\n" },
340 { VIRTIO_F_VERSION_1, " VERSION Used to detect legacy drivers.\n" },
341 };
342
343#define MAXLINE 80
344 /* Display as a single buf to prevent interceding log messages */
345 uint16_t cbBuf = RT_ELEMENTS(s_aFeatures) * 132;
346 char *pszBuf = (char *)RTMemAllocZ(cbBuf);
347 Assert(pszBuf);
348 char *cp = pszBuf;
349 for (unsigned i = 0; i < RT_ELEMENTS(s_aFeatures); ++i)
350 {
351 bool isOffered = RT_BOOL(pVirtio->uDeviceFeatures & s_aFeatures[i].fFeatureBit);
352 bool isNegotiated = RT_BOOL(pVirtio->uDriverFeatures & s_aFeatures[i].fFeatureBit);
353 cp += RTStrPrintf(cp, cbBuf - (cp - pszBuf), " %s %s %s",
354 isOffered ? "+" : "-", isNegotiated ? "x" : " ", s_aFeatures[i].pcszDesc);
355 }
356 if (pHlp)
357 pHlp->pfnPrintf(pHlp, "VirtIO Core Features Configuration\n\n"
358 " Offered Accepted Feature Description\n"
359 " ------- -------- ------- -----------\n"
360 "%s\n", pszBuf);
361#ifdef LOG_ENABLED
362 else
363 Log3(("VirtIO Core Features Configuration\n\n"
364 " Offered Accepted Feature Description\n"
365 " ------- -------- ------- -----------\n"
366 "%s\n", pszBuf));
367#endif
368 RTMemFree(pszBuf);
369}
370#endif
371
372#ifdef LOG_ENABLED
373
374/** API Function: See header file */
375void virtioCoreHexDump(uint8_t *pv, uint32_t cb, uint32_t uBase, const char *pszTitle)
376{
377#define ADJCURSOR(cb) pszOut += cb; cbRemain -= cb;
378 size_t cbPrint = 0, cbRemain = ((cb / 16) + 1) * 80;
379 char *pszBuf = (char *)RTMemAllocZ(cbRemain), *pszOut = pszBuf;
380 AssertMsgReturnVoid(pszBuf, ("Out of Memory"));
381 if (pszTitle)
382 {
383 cbPrint = RTStrPrintf(pszOut, cbRemain, "%s [%d bytes]:\n", pszTitle, cb);
384 ADJCURSOR(cbPrint);
385 }
386 for (uint32_t row = 0; row < RT_MAX(1, (cb / 16) + 1) && row * 16 < cb; row++)
387 {
388 cbPrint = RTStrPrintf(pszOut, cbRemain, "%04x: ", row * 16 + uBase); /* line address */
389 ADJCURSOR(cbPrint);
390 for (uint8_t col = 0; col < 16; col++)
391 {
392 uint32_t idx = row * 16 + col;
393 if (idx >= cb)
394 cbPrint = RTStrPrintf(pszOut, cbRemain, "-- %s", (col + 1) % 8 ? "" : " ");
395 else
396 cbPrint = RTStrPrintf(pszOut, cbRemain, "%02x %s", pv[idx], (col + 1) % 8 ? "" : " ");
397 ADJCURSOR(cbPrint);
398 }
399 for (uint32_t idx = row * 16; idx < row * 16 + 16; idx++)
400 {
401 cbPrint = RTStrPrintf(pszOut, cbRemain, "%c", (idx >= cb) ? ' ' : (pv[idx] >= 0x20 && pv[idx] <= 0x7e ? pv[idx] : '.'));
402 ADJCURSOR(cbPrint);
403 }
404 *pszOut++ = '\n';
405 --cbRemain;
406 }
407 Log(("%s\n", pszBuf));
408 RTMemFree(pszBuf);
409 RT_NOREF2(uBase, pv);
410#undef ADJCURSOR
411}
412
413/* API FUnction: See header file */
414void virtioCoreGCPhysHexDump(PPDMDEVINS pDevIns, RTGCPHYS GCPhys, uint16_t cb, uint32_t uBase, const char *pszTitle)
415{
416#define ADJCURSOR(cb) pszOut += cb; cbRemain -= cb;
417 size_t cbPrint = 0, cbRemain = ((cb / 16) + 1) * 80;
418 char *pszBuf = (char *)RTMemAllocZ(cbRemain), *pszOut = pszBuf;
419 AssertMsgReturnVoid(pszBuf, ("Out of Memory"));
420 if (pszTitle)
421 {
422 cbPrint = RTStrPrintf(pszOut, cbRemain, "%s [%d bytes]:\n", pszTitle, cb);
423 ADJCURSOR(cbPrint);
424 }
425 for (uint16_t row = 0; row < (uint16_t)RT_MAX(1, (cb / 16) + 1) && row * 16 < cb; row++)
426 {
427 uint8_t c;
428 cbPrint = RTStrPrintf(pszOut, cbRemain, "%04x: ", row * 16 + uBase); /* line address */
429 ADJCURSOR(cbPrint);
430 for (uint8_t col = 0; col < 16; col++)
431 {
432 uint32_t idx = row * 16 + col;
433 PDMDevHlpPCIPhysRead(pDevIns, GCPhys + idx, &c, 1);
434 if (idx >= cb)
435 cbPrint = RTStrPrintf(pszOut, cbRemain, "-- %s", (col + 1) % 8 ? "" : " ");
436 else
437 cbPrint = RTStrPrintf(pszOut, cbRemain, "%02x %s", c, (col + 1) % 8 ? "" : " ");
438 ADJCURSOR(cbPrint);
439 }
440 for (uint16_t idx = row * 16; idx < row * 16 + 16; idx++)
441 {
442 PDMDevHlpPCIPhysRead(pDevIns, GCPhys + idx, &c, 1);
443 cbPrint = RTStrPrintf(pszOut, cbRemain, "%c", (idx >= cb) ? ' ' : (c >= 0x20 && c <= 0x7e ? c : '.'));
444 ADJCURSOR(cbPrint);
445 }
446 *pszOut++ = '\n';
447 --cbRemain;
448 }
449 Log(("%s\n", pszBuf));
450 RTMemFree(pszBuf);
451 RT_NOREF(uBase);
452#undef ADJCURSOR
453}
454#endif /* LOG_ENABLED */
455
456/** API function: See header file */
457void virtioCoreLogMappedIoValue(const char *pszFunc, const char *pszMember, uint32_t uMemberSize,
458 const void *pv, uint32_t cb, uint32_t uOffset, int fWrite,
459 int fHasIndex, uint32_t idx)
460{
461 if (!LogIs6Enabled())
462 return;
463
464 char szIdx[16];
465 if (fHasIndex)
466 RTStrPrintf(szIdx, sizeof(szIdx), "[%d]", idx);
467 else
468 szIdx[0] = '\0';
469
470 if (cb == 1 || cb == 2 || cb == 4 || cb == 8)
471 {
472 char szDepiction[64];
473 size_t cchDepiction;
474 if (uOffset != 0 || cb != uMemberSize) /* display bounds if partial member access */
475 cchDepiction = RTStrPrintf(szDepiction, sizeof(szDepiction), "%s%s[%d:%d]",
476 pszMember, szIdx, uOffset, uOffset + cb - 1);
477 else
478 cchDepiction = RTStrPrintf(szDepiction, sizeof(szDepiction), "%s%s", pszMember, szIdx);
479
480 /* padding */
481 if (cchDepiction < 30)
482 szDepiction[cchDepiction++] = ' ';
483 while (cchDepiction < 30)
484 szDepiction[cchDepiction++] = '.';
485 szDepiction[cchDepiction] = '\0';
486
487 RTUINT64U uValue;
488 uValue.u = 0;
489 memcpy(uValue.au8, pv, cb);
490 Log6(("%-23s: Guest %s %s %#0*RX64\n",
491 pszFunc, fWrite ? "wrote" : "read ", szDepiction, 2 + cb * 2, uValue.u));
492 }
493 else /* odd number or oversized access, ... log inline hex-dump style */
494 {
495 Log6(("%-23s: Guest %s %s%s[%d:%d]: %.*Rhxs\n",
496 pszFunc, fWrite ? "wrote" : "read ", pszMember,
497 szIdx, uOffset, uOffset + cb, cb, pv));
498 }
499 RT_NOREF2(fWrite, pszFunc);
500}
501
502/**
503 * Makes the MMIO-mapped Virtio fDeviceStatus registers non-cryptic (buffers to
504 * keep the output clean during multi-threaded activity)
505 */
506DECLINLINE(void) virtioCoreFormatDeviceStatus(uint8_t bStatus, char *pszBuf, size_t uSize)
507{
508
509#define ADJCURSOR(len) cp += len; uSize -= len; sep = (char *)" | ";
510
511 memset(pszBuf, 0, uSize);
512 size_t len;
513 char *cp = pszBuf;
514 char *sep = (char *)"";
515
516 if (bStatus == 0) {
517 RTStrPrintf(cp, uSize, "RESET");
518 return;
519 }
520 if (bStatus & VIRTIO_STATUS_ACKNOWLEDGE)
521 {
522 len = RTStrPrintf(cp, uSize, "ACKNOWLEDGE");
523 ADJCURSOR(len);
524 }
525 if (bStatus & VIRTIO_STATUS_DRIVER)
526 {
527 len = RTStrPrintf(cp, uSize, "%sDRIVER", sep);
528 ADJCURSOR(len);
529 }
530 if (bStatus & VIRTIO_STATUS_FEATURES_OK)
531 {
532 len = RTStrPrintf(cp, uSize, "%sFEATURES_OK", sep);
533 ADJCURSOR(len);
534 }
535 if (bStatus & VIRTIO_STATUS_DRIVER_OK)
536 {
537 len = RTStrPrintf(cp, uSize, "%sDRIVER_OK", sep);
538 ADJCURSOR(len);
539 }
540 if (bStatus & VIRTIO_STATUS_FAILED)
541 {
542 len = RTStrPrintf(cp, uSize, "%sFAILED", sep);
543 ADJCURSOR(len);
544 }
545 if (bStatus & VIRTIO_STATUS_DEVICE_NEEDS_RESET)
546 RTStrPrintf(cp, uSize, "%sNEEDS_RESET", sep);
547
548#undef ADJCURSOR
549}
550
551#ifdef IN_RING3
552
553int virtioCoreR3VirtqAttach(PVIRTIOCORE pVirtio, uint16_t uVirtq, const char *pcszName)
554{
555 LogFunc(("%s\n", pcszName));
556 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
557 pVirtq->uVirtq = uVirtq;
558 pVirtq->uAvailIdxShadow = 0;
559 pVirtq->uUsedIdxShadow = 0;
560 pVirtq->fUsedRingEvent = false;
561 RTStrCopy(pVirtq->szName, sizeof(pVirtq->szName), pcszName);
562 return VINF_SUCCESS;
563}
564
565/** API Fuunction: See header file */
566void virtioCoreR3VirtqInfo(PPDMDEVINS pDevIns, PCDBGFINFOHLP pHlp, const char *pszArgs, int uVirtq)
567{
568 RT_NOREF(pszArgs);
569 PVIRTIOCORE pVirtio = PDMDEVINS_2_DATA(pDevIns, PVIRTIOCORE);
570 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
571
572 /** @todo add ability to dump physical contents described by any descriptor (using existing VirtIO core API function) */
573// bool fDump = pszArgs && (*pszArgs == 'd' || *pszArgs == 'D'); /* "dump" (avail phys descriptor)"
574
575 uint16_t uAvailIdx = virtioReadAvailRingIdx(pDevIns, pVirtio, pVirtq);
576 uint16_t uAvailIdxShadow = pVirtq->uAvailIdxShadow;
577
578 uint16_t uUsedIdx = virtioReadUsedRingIdx(pDevIns, pVirtio, pVirtq);
579 uint16_t uUsedIdxShadow = pVirtq->uUsedIdxShadow;
580
581 PVIRTQBUF pVirtqBuf = NULL;
582
583 bool fEmpty = IS_VIRTQ_EMPTY(pDevIns, pVirtio, pVirtq);
584
585 LogFunc(("%s, empty = %s\n", pVirtq->szName, fEmpty ? "true" : "false"));
586
587 int cSendSegs = 0, cReturnSegs = 0;
588 if (!fEmpty)
589 {
590 virtioCoreR3VirtqAvailBufPeek(pDevIns, pVirtio, uVirtq, &pVirtqBuf);
591 cSendSegs = pVirtqBuf->pSgPhysSend ? pVirtqBuf->pSgPhysSend->cSegs : 0;
592 cReturnSegs = pVirtqBuf->pSgPhysReturn ? pVirtqBuf->pSgPhysReturn->cSegs : 0;
593 }
594
595 bool fAvailNoInterrupt = virtioReadAvailRingFlags(pDevIns, pVirtio, pVirtq) & VIRTQ_AVAIL_F_NO_INTERRUPT;
596 bool fUsedNoNotify = virtioReadUsedRingFlags(pDevIns, pVirtio, pVirtq) & VIRTQ_USED_F_NO_NOTIFY;
597
598
599 pHlp->pfnPrintf(pHlp, " queue enabled: ........... %s\n", pVirtq->uEnable ? "true" : "false");
600 pHlp->pfnPrintf(pHlp, " size: .................... %d\n", pVirtq->uSize);
601 pHlp->pfnPrintf(pHlp, " notify offset: ........... %d\n", pVirtq->uNotifyOffset);
602 if (pVirtio->fMsiSupport)
603 pHlp->pfnPrintf(pHlp, " MSIX vector: ....... %4.4x\n", pVirtq->uMsix);
604 pHlp->pfnPrintf(pHlp, "\n");
605 pHlp->pfnPrintf(pHlp, " avail ring (%d entries):\n", uAvailIdx - uAvailIdxShadow);
606 pHlp->pfnPrintf(pHlp, " index: ................ %d\n", uAvailIdx);
607 pHlp->pfnPrintf(pHlp, " shadow: ............... %d\n", uAvailIdxShadow);
608 pHlp->pfnPrintf(pHlp, " flags: ................ %s\n", fAvailNoInterrupt ? "NO_INTERRUPT" : "");
609 pHlp->pfnPrintf(pHlp, "\n");
610 pHlp->pfnPrintf(pHlp, " used ring (%d entries):\n", uUsedIdx - uUsedIdxShadow);
611 pHlp->pfnPrintf(pHlp, " index: ................ %d\n", uUsedIdx);
612 pHlp->pfnPrintf(pHlp, " shadow: ............... %d\n", uUsedIdxShadow);
613 pHlp->pfnPrintf(pHlp, " flags: ................ %s\n", fUsedNoNotify ? "NO_NOTIFY" : "");
614 pHlp->pfnPrintf(pHlp, "\n");
615 if (!fEmpty)
616 {
617 pHlp->pfnPrintf(pHlp, " desc chain:\n");
618 pHlp->pfnPrintf(pHlp, " head idx: ............. %d\n", uUsedIdx);
619 pHlp->pfnPrintf(pHlp, " segs: ................. %d\n", cSendSegs + cReturnSegs);
620 pHlp->pfnPrintf(pHlp, " refCnt ................ %d\n", pVirtqBuf->cRefs);
621 pHlp->pfnPrintf(pHlp, "\n");
622 pHlp->pfnPrintf(pHlp, " host-to-guest (%d bytes):\n", pVirtqBuf->cbPhysSend);
623 pHlp->pfnPrintf(pHlp, " segs: .............. %d\n", cSendSegs);
624 if (cSendSegs)
625 {
626 pHlp->pfnPrintf(pHlp, " index: ............. %d\n", pVirtqBuf->pSgPhysSend->idxSeg);
627 pHlp->pfnPrintf(pHlp, " unsent ............. %d\n", pVirtqBuf->pSgPhysSend->cbSegLeft);
628 }
629 pHlp->pfnPrintf(pHlp, "\n");
630 pHlp->pfnPrintf(pHlp, " guest-to-host (%d bytes)\n", pVirtqBuf->cbPhysReturn);
631 pHlp->pfnPrintf(pHlp, " segs: .............. %d\n", cReturnSegs);
632 if (cReturnSegs)
633 {
634 pHlp->pfnPrintf(pHlp, " index: ............. %d\n", pVirtqBuf->pSgPhysReturn->idxSeg);
635 pHlp->pfnPrintf(pHlp, " unsent ............. %d\n", pVirtqBuf->pSgPhysReturn->cbSegLeft);
636 }
637 } else
638 pHlp->pfnPrintf(pHlp, " No desc chains available\n");
639 pHlp->pfnPrintf(pHlp, "\n");
640
641}
642
643/** API Function: See header file */
644uint32_t virtioCoreR3VirtqBufRetain(PVIRTQBUF pVirtqBuf)
645{
646 AssertReturn(pVirtqBuf, UINT32_MAX);
647 AssertReturn(pVirtqBuf->u32Magic == VIRTQBUF_MAGIC, UINT32_MAX);
648 uint32_t cRefs = ASMAtomicIncU32(&pVirtqBuf->cRefs);
649 Assert(cRefs > 1);
650 Assert(cRefs < 16);
651 return cRefs;
652}
653
654
655/** API Function: See header file */
656uint32_t virtioCoreR3VirtqBufRelease(PVIRTIOCORE pVirtio, PVIRTQBUF pVirtqBuf)
657{
658 if (!pVirtqBuf)
659 return 0;
660 AssertReturn(pVirtqBuf, 0);
661 AssertReturn(pVirtqBuf->u32Magic == VIRTQBUF_MAGIC, 0);
662 uint32_t cRefs = ASMAtomicDecU32(&pVirtqBuf->cRefs);
663 Assert(cRefs < 16);
664 if (cRefs == 0)
665 {
666 pVirtqBuf->u32Magic = ~VIRTQBUF_MAGIC;
667 RTMemFree(pVirtqBuf);
668 STAM_REL_COUNTER_INC(&pVirtio->StatDescChainsFreed);
669 }
670 return cRefs;
671}
672
673/** API Function: See header file */
674void virtioCoreNotifyConfigChanged(PVIRTIOCORE pVirtio)
675{
676 virtioKick(pVirtio->pDevInsR3, pVirtio, VIRTIO_ISR_DEVICE_CONFIG, pVirtio->uMsixConfig);
677}
678
679/** API Function: See header file */
680void virtioCoreVirtqEnableNotify(PVIRTIOCORE pVirtio, uint16_t uVirtq, bool fEnable)
681{
682
683 Assert(uVirtq < RT_ELEMENTS(pVirtio->aVirtqueues));
684 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
685
686 if (pVirtio->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK)
687 {
688 uint16_t fFlags = virtioReadUsedRingFlags(pVirtio->pDevInsR3, pVirtio, pVirtq);
689
690 if (fEnable)
691 fFlags &= ~VIRTQ_USED_F_NO_NOTIFY;
692 else
693 fFlags |= VIRTQ_USED_F_NO_NOTIFY;
694
695 virtioWriteUsedRingFlags(pVirtio->pDevInsR3, pVirtio, pVirtq, fFlags);
696 }
697}
698
699/** API function: See Header file */
700void virtioCoreResetAll(PVIRTIOCORE pVirtio)
701{
702 LogFunc(("\n"));
703 pVirtio->fDeviceStatus |= VIRTIO_STATUS_DEVICE_NEEDS_RESET;
704 if (pVirtio->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK)
705 {
706 pVirtio->fGenUpdatePending = true;
707 virtioKick(pVirtio->pDevInsR3, pVirtio, VIRTIO_ISR_DEVICE_CONFIG, pVirtio->uMsixConfig);
708 }
709}
710
711/** API function: See Header file */
712int virtioCoreR3VirtqAvailBufPeek(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtq,
713 PPVIRTQBUF ppVirtqBuf)
714{
715 return virtioCoreR3VirtqAvailBufGet(pDevIns, pVirtio, uVirtq, ppVirtqBuf, false);
716}
717
718/** API function: See Header file */
719int virtioCoreR3VirtqAvailBufNext(PVIRTIOCORE pVirtio, uint16_t uVirtq)
720{
721 Assert(uVirtq < RT_ELEMENTS(pVirtio->aVirtqueues));
722 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
723
724 AssertMsgReturn(IS_DRIVER_OK(pVirtio) && pVirtq->uEnable,
725 ("Guest driver not in ready state.\n"), VERR_INVALID_STATE);
726
727 if (IS_VIRTQ_EMPTY(pVirtio->pDevInsR3, pVirtio, pVirtq))
728 return VERR_NOT_AVAILABLE;
729
730 Log6Func(("%s avail shadow idx: %u\n", pVirtq->szName, pVirtq->uAvailIdxShadow));
731 pVirtq->uAvailIdxShadow++;
732
733 return VINF_SUCCESS;
734}
735
736
737/** API Function: See header file */
738int virtioCoreR3VirtqAvailBufGet(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtq,
739 uint16_t uHeadIdx, PPVIRTQBUF ppVirtqBuf)
740{
741 AssertReturn(ppVirtqBuf, VERR_INVALID_POINTER);
742 *ppVirtqBuf = NULL;
743
744 AssertMsgReturn(uVirtq < RT_ELEMENTS(pVirtio->aVirtqueues),
745 ("uVirtq out of range"), VERR_INVALID_PARAMETER);
746
747 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
748
749 AssertMsgReturn(IS_DRIVER_OK(pVirtio) && pVirtq->uEnable,
750 ("Guest driver not in ready state.\n"), VERR_INVALID_STATE);
751
752 uint16_t uDescIdx = uHeadIdx;
753
754 Log6Func(("%s DESC CHAIN: (head) desc_idx=%u\n", pVirtio->aVirtqueues[uVirtq].szName, uHeadIdx));
755
756 /*
757 * Allocate and initialize the descriptor chain structure.
758 */
759 PVIRTQBUF pVirtqBuf = (PVIRTQBUF)RTMemAllocZ(sizeof(VIRTQBUF_T));
760 AssertReturn(pVirtqBuf, VERR_NO_MEMORY);
761 pVirtqBuf->u32Magic = VIRTQBUF_MAGIC;
762 pVirtqBuf->cRefs = 1;
763 pVirtqBuf->uHeadIdx = uHeadIdx;
764 pVirtqBuf->uVirtq = uVirtq;
765 *ppVirtqBuf = pVirtqBuf;
766
767 /*
768 * Gather segments.
769 */
770 VIRTQ_DESC_T desc;
771
772 uint32_t cbIn = 0;
773 uint32_t cbOut = 0;
774 uint32_t cSegsIn = 0;
775 uint32_t cSegsOut = 0;
776
777 PVIRTIOSGSEG paSegsIn = pVirtqBuf->aSegsIn;
778 PVIRTIOSGSEG paSegsOut = pVirtqBuf->aSegsOut;
779
780 do
781 {
782 PVIRTIOSGSEG pSeg;
783
784 /*
785 * Malicious guests may go beyond paSegsIn or paSegsOut boundaries by linking
786 * several descriptors into a loop. Since there is no legitimate way to get a sequences of
787 * linked descriptors exceeding the total number of descriptors in the ring (see @bugref{8620}),
788 * the following aborts I/O if breach and employs a simple log throttling algorithm to notify.
789 */
790 if (cSegsIn + cSegsOut >= VIRTQ_MAX_ENTRIES)
791 {
792 static volatile uint32_t s_cMessages = 0;
793 static volatile uint32_t s_cThreshold = 1;
794 if (ASMAtomicIncU32(&s_cMessages) == ASMAtomicReadU32(&s_cThreshold))
795 {
796 LogRelMax(64, ("Too many linked descriptors; check if the guest arranges descriptors in a loop.\n"));
797 if (ASMAtomicReadU32(&s_cMessages) != 1)
798 LogRelMax(64, ("(the above error has occured %u times so far)\n", ASMAtomicReadU32(&s_cMessages)));
799 ASMAtomicWriteU32(&s_cThreshold, ASMAtomicReadU32(&s_cThreshold) * 10);
800 }
801 break;
802 }
803 RT_UNTRUSTED_VALIDATED_FENCE();
804
805 virtioReadDesc(pDevIns, pVirtio, pVirtq, uDescIdx, &desc);
806
807 if (desc.fFlags & VIRTQ_DESC_F_WRITE)
808 {
809 Log6Func(("%s IN desc_idx=%u seg=%u addr=%RGp cb=%u\n", pVirtq->szName, uDescIdx, cSegsIn, desc.GCPhysBuf, desc.cb));
810 cbIn += desc.cb;
811 pSeg = &paSegsIn[cSegsIn++];
812 }
813 else
814 {
815 Log6Func(("%s OUT desc_idx=%u seg=%u addr=%RGp cb=%u\n", pVirtq->szName, uDescIdx, cSegsOut, desc.GCPhysBuf, desc.cb));
816 cbOut += desc.cb;
817 pSeg = &paSegsOut[cSegsOut++];
818#ifdef DEEP_DEBUG
819 if (LogIs11Enabled())
820 {
821 virtioCoreGCPhysHexDump(pDevIns, desc.GCPhysBuf, desc.cb, 0, NULL);
822 Log(("\n"));
823 }
824#endif
825 }
826
827 pSeg->GCPhys = desc.GCPhysBuf;
828 pSeg->cbSeg = desc.cb;
829
830 uDescIdx = desc.uDescIdxNext;
831 } while (desc.fFlags & VIRTQ_DESC_F_NEXT);
832
833 /*
834 * Add segments to the descriptor chain structure.
835 */
836 if (cSegsIn)
837 {
838 virtioCoreGCPhysChainInit(&pVirtqBuf->SgBufIn, paSegsIn, cSegsIn);
839 pVirtqBuf->pSgPhysReturn = &pVirtqBuf->SgBufIn;
840 pVirtqBuf->cbPhysReturn = cbIn;
841 STAM_REL_COUNTER_ADD(&pVirtio->StatDescChainsSegsIn, cSegsIn);
842 }
843
844 if (cSegsOut)
845 {
846 virtioCoreGCPhysChainInit(&pVirtqBuf->SgBufOut, paSegsOut, cSegsOut);
847 pVirtqBuf->pSgPhysSend = &pVirtqBuf->SgBufOut;
848 pVirtqBuf->cbPhysSend = cbOut;
849 STAM_REL_COUNTER_ADD(&pVirtio->StatDescChainsSegsOut, cSegsOut);
850 }
851
852 STAM_REL_COUNTER_INC(&pVirtio->StatDescChainsAllocated);
853 Log6Func(("%s -- segs OUT: %u (%u bytes) IN: %u (%u bytes) --\n",
854 pVirtq->szName, cSegsOut, cbOut, cSegsIn, cbIn));
855
856 return VINF_SUCCESS;
857}
858
859/** API function: See Header file */
860int virtioCoreR3VirtqAvailBufGet(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtq,
861 PPVIRTQBUF ppVirtqBuf, bool fRemove)
862{
863 Assert(uVirtq < RT_ELEMENTS(pVirtio->aVirtqueues));
864 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
865
866 if (IS_VIRTQ_EMPTY(pDevIns, pVirtio, pVirtq))
867 return VERR_NOT_AVAILABLE;
868
869 uint16_t uHeadIdx = virtioReadAvailDescIdx(pDevIns, pVirtio, pVirtq, pVirtq->uAvailIdxShadow);
870
871 if (pVirtio->uDriverFeatures & VIRTIO_F_EVENT_IDX)
872 virtioWriteUsedAvailEvent(pDevIns,pVirtio, pVirtq, pVirtq->uAvailIdxShadow + 1);
873
874 if (fRemove)
875 pVirtq->uAvailIdxShadow++;
876
877 int rc = virtioCoreR3VirtqAvailBufGet(pDevIns, pVirtio, uVirtq, uHeadIdx, ppVirtqBuf);
878 return rc;
879}
880
881/** API function: See Header file */
882int virtioCoreR3VirtqUsedBufPut(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtq, PRTSGBUF pSgVirtReturn,
883 PVIRTQBUF pVirtqBuf, bool fFence)
884{
885 Assert(uVirtq < RT_ELEMENTS(pVirtio->aVirtqueues));
886 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
887
888 PVIRTIOSGBUF pSgPhysReturn = pVirtqBuf->pSgPhysReturn;
889
890 Assert(pVirtqBuf->u32Magic == VIRTQBUF_MAGIC);
891 Assert(pVirtqBuf->cRefs > 0);
892
893 AssertMsgReturn(IS_DRIVER_OK(pVirtio), ("Guest driver not in ready state.\n"), VERR_INVALID_STATE);
894
895 Log6Func(("Copying client data to %s, desc chain (head desc_idx %d)\n",
896 VIRTQNAME(pVirtio, uVirtq), virtioReadUsedRingIdx(pDevIns, pVirtio, pVirtq)));
897
898 /* Copy s/g buf (virtual memory) to guest phys mem (IN direction). */
899
900 size_t cbCopy = 0, cbTotal = 0, cbRemain = 0;
901
902 if (pSgVirtReturn)
903 {
904 size_t cbTarget = virtioCoreGCPhysChainCalcBufSize(pSgPhysReturn);
905 cbRemain = cbTotal = RTSgBufCalcTotalLength(pSgVirtReturn);
906 AssertMsgReturn(cbTarget >= cbRemain, ("No space to write data to phys memory"), VERR_BUFFER_OVERFLOW);
907 virtioCoreGCPhysChainReset(pSgPhysReturn);
908 while (cbRemain)
909 {
910 cbCopy = RT_MIN(pSgVirtReturn->cbSegLeft, pSgPhysReturn->cbSegLeft);
911 Assert(cbCopy > 0);
912 PDMDevHlpPhysWrite(pDevIns, (RTGCPHYS)pSgPhysReturn->GCPhysCur, pSgVirtReturn->pvSegCur, cbCopy);
913 RTSgBufAdvance(pSgVirtReturn, cbCopy);
914 virtioCoreGCPhysChainAdvance(pSgPhysReturn, cbCopy);
915 cbRemain -= cbCopy;
916 }
917
918 if (fFence)
919 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE(); /* needed? */
920
921 Assert(!(cbCopy >> 32));
922 }
923
924 /* If this write-ahead crosses threshold where the driver wants to get an event flag it */
925 if (pVirtio->uDriverFeatures & VIRTIO_F_EVENT_IDX)
926 if (pVirtq->uUsedIdxShadow == virtioReadAvailUsedEvent(pDevIns, pVirtio, pVirtq))
927 pVirtq->fUsedRingEvent = true;
928
929 /*
930 * Place used buffer's descriptor in used ring but don't update used ring's slot index.
931 * That will be done with a subsequent client call to virtioCoreVirtqUsedRingSync() */
932 virtioWriteUsedElem(pDevIns, pVirtio, pVirtq, pVirtq->uUsedIdxShadow++, pVirtqBuf->uHeadIdx, (uint32_t)cbTotal);
933
934 if (pSgVirtReturn)
935 Log6Func((".... Copied %zu bytes in %d segs to %u byte buffer, residual=%zu\n",
936 cbTotal - cbRemain, pSgVirtReturn->cSegs, pVirtqBuf->cbPhysReturn, pVirtqBuf->cbPhysReturn - cbTotal));
937
938 Log6Func(("Write ahead used_idx=%u, %s used_idx=%u\n",
939 pVirtq->uUsedIdxShadow, VIRTQNAME(pVirtio, uVirtq), virtioReadUsedRingIdx(pDevIns, pVirtio, pVirtq)));
940
941 return VINF_SUCCESS;
942}
943
944
945#endif /* IN_RING3 */
946
947/** API function: See Header file */
948int virtioCoreVirtqUsedRingSync(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtq)
949{
950 Assert(uVirtq < RT_ELEMENTS(pVirtio->aVirtqueues));
951 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
952
953 AssertMsgReturn(IS_DRIVER_OK(pVirtio) && pVirtq->uEnable,
954 ("Guest driver not in ready state.\n"), VERR_INVALID_STATE);
955
956 Log6Func(("Updating %s used_idx to %u\n", pVirtq->szName, pVirtq->uUsedIdxShadow));
957
958 virtioWriteUsedRingIdx(pDevIns, pVirtio, pVirtq, pVirtq->uUsedIdxShadow);
959 virtioCoreNotifyGuestDriver(pDevIns, pVirtio, uVirtq);
960
961 return VINF_SUCCESS;
962}
963
964/**
965 * This is called from the MMIO callback code when the guest does an MMIO access to the
966 * mapped queue notification capability area corresponding to a particular queue, to notify
967 * the queue handler of available data in the avail ring of the queue (VirtIO 1.0, 4.1.4.4.1)
968 *
969 * @param pDevIns The device instance.
970 * @param pVirtio Pointer to the shared virtio state.
971 * @param uVirtq Virtq to check for guest interrupt handling preference
972 * @param uNotifyIdx Notification index
973 */
974static void virtioCoreVirtqNotified(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtq, uint16_t uNotifyIdx)
975{
976 PVIRTIOCORECC pVirtioCC = PDMINS_2_DATA_CC(pDevIns, PVIRTIOCORECC);
977
978
979 /* See VirtIO 1.0, section 4.1.5.2 It implies that uVirtq and uNotifyIdx should match.
980 * Disregarding this notification may cause throughput to stop, however there's no way to know
981 * which was queue was intended for wake-up if the two parameters disagree. */
982
983 AssertMsg(uNotifyIdx == uVirtq,
984 ("Guest kicked virtq %d's notify addr w/non-corresponding virtq idx %d\n",
985 uVirtq, uNotifyIdx));
986 RT_NOREF(uNotifyIdx);
987
988 AssertReturnVoid(uVirtq < RT_ELEMENTS(pVirtio->aVirtqueues));
989 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
990
991 Log6Func(("%s (desc chains: %u)\n", pVirtq->szName,
992 virtioCoreVirtqAvailBufCount_inline(pDevIns, pVirtio, pVirtq)));
993
994 /* Inform client */
995 pVirtioCC->pfnVirtqNotified(pDevIns, pVirtio, uVirtq);
996 RT_NOREF2(pVirtio, pVirtq);
997}
998
999/**
1000 * Trigger MSI-X or INT# interrupt to notify guest of data added to used ring of
1001 * the specified virtq, depending on the interrupt configuration of the device
1002 * and depending on negotiated and realtime constraints flagged by the guest driver.
1003 *
1004 * See VirtIO 1.0 specification (section 2.4.7).
1005 *
1006 * @param pDevIns The device instance.
1007 * @param pVirtio Pointer to the shared virtio state.
1008 * @param uVirtq Virtq to check for guest interrupt handling preference
1009 */
1010static void virtioCoreNotifyGuestDriver(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtq)
1011{
1012 Assert(uVirtq < RT_ELEMENTS(pVirtio->aVirtqueues));
1013 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
1014
1015 if (!IS_DRIVER_OK(pVirtio))
1016 {
1017 LogFunc(("Guest driver not in ready state.\n"));
1018 return;
1019 }
1020
1021 if (pVirtio->uDriverFeatures & VIRTIO_F_EVENT_IDX)
1022 {
1023 if (pVirtq->fUsedRingEvent)
1024 {
1025#ifdef IN_RING3
1026 Log6Func(("...kicking guest %s, VIRTIO_F_EVENT_IDX set and threshold (%d) reached\n",
1027 pVirtq->szName, (uint16_t)virtioReadAvailUsedEvent(pDevIns, pVirtio, pVirtq)));
1028#endif
1029 virtioKick(pDevIns, pVirtio, VIRTIO_ISR_VIRTQ_INTERRUPT, pVirtq->uMsix);
1030 pVirtq->fUsedRingEvent = false;
1031 return;
1032 }
1033#ifdef IN_RING3
1034 Log6Func(("...skip interrupt %s, VIRTIO_F_EVENT_IDX set but threshold (%d) not reached (%d)\n",
1035 pVirtq->szName,(uint16_t)virtioReadAvailUsedEvent(pDevIns, pVirtio, pVirtq), pVirtq->uUsedIdxShadow));
1036#endif
1037 }
1038 else
1039 {
1040 /** If guest driver hasn't suppressed interrupts, interrupt */
1041 if (!(virtioReadAvailRingFlags(pDevIns, pVirtio, pVirtq) & VIRTQ_AVAIL_F_NO_INTERRUPT))
1042 {
1043 virtioKick(pDevIns, pVirtio, VIRTIO_ISR_VIRTQ_INTERRUPT, pVirtq->uMsix);
1044 return;
1045 }
1046 Log6Func(("...skipping interrupt for %s (guest set VIRTQ_AVAIL_F_NO_INTERRUPT)\n", pVirtq->szName));
1047 }
1048}
1049
1050/**
1051 * Raise interrupt or MSI-X
1052 *
1053 * @param pDevIns The device instance.
1054 * @param pVirtio Pointer to the shared virtio state.
1055 * @param uCause Interrupt cause bit mask to set in PCI ISR port.
1056 * @param uVec MSI-X vector, if enabled
1057 */
1058static int virtioKick(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint8_t uCause, uint16_t uMsixtor)
1059{
1060 if (uCause == VIRTIO_ISR_VIRTQ_INTERRUPT)
1061 Log6Func(("reason: buffer added to 'used' ring.\n"));
1062 else
1063 if (uCause == VIRTIO_ISR_DEVICE_CONFIG)
1064 Log6Func(("reason: device config change\n"));
1065
1066 if (!pVirtio->fMsiSupport)
1067 {
1068 pVirtio->uISR |= uCause;
1069 PDMDevHlpPCISetIrq(pDevIns, 0, PDM_IRQ_LEVEL_HIGH);
1070 }
1071 else if (uMsixtor != VIRTIO_MSI_NO_VECTOR)
1072 PDMDevHlpPCISetIrq(pDevIns, uMsixtor, 1);
1073 return VINF_SUCCESS;
1074}
1075
1076/**
1077 * Lower interrupt (Called when guest reads ISR and when resetting)
1078 *
1079 * @param pDevIns The device instance.
1080 */
1081static void virtioLowerInterrupt(PPDMDEVINS pDevIns, uint16_t uMsixtor)
1082{
1083 PVIRTIOCORE pVirtio = PDMINS_2_DATA(pDevIns, PVIRTIOCORE);
1084 if (!pVirtio->fMsiSupport)
1085 PDMDevHlpPCISetIrq(pDevIns, 0, PDM_IRQ_LEVEL_LOW);
1086 else if (uMsixtor != VIRTIO_MSI_NO_VECTOR)
1087 PDMDevHlpPCISetIrq(pDevIns, pVirtio->uMsixConfig, PDM_IRQ_LEVEL_LOW);
1088}
1089
1090#ifdef IN_RING3
1091static void virtioResetVirtq(PVIRTIOCORE pVirtio, uint16_t uVirtq)
1092{
1093 Assert(uVirtq < RT_ELEMENTS(pVirtio->aVirtqueues));
1094 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
1095
1096 pVirtq->uAvailIdxShadow = 0;
1097 pVirtq->uUsedIdxShadow = 0;
1098 pVirtq->uEnable = false;
1099 pVirtq->uSize = VIRTQ_MAX_ENTRIES;
1100 pVirtq->uNotifyOffset = uVirtq;
1101 pVirtq->uMsix = uVirtq + 2;
1102 pVirtq->fUsedRingEvent = false;
1103
1104 if (!pVirtio->fMsiSupport) /* VirtIO 1.0, 4.1.4.3 and 4.1.5.1.2 */
1105 pVirtq->uMsix = VIRTIO_MSI_NO_VECTOR;
1106
1107 virtioLowerInterrupt(pVirtio->pDevInsR3, pVirtq->uMsix);
1108}
1109
1110static void virtioResetDevice(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio)
1111{
1112 Log2Func(("\n"));
1113 pVirtio->uDeviceFeaturesSelect = 0;
1114 pVirtio->uDriverFeaturesSelect = 0;
1115 pVirtio->uConfigGeneration = 0;
1116 pVirtio->fDeviceStatus = 0;
1117 pVirtio->uISR = 0;
1118
1119 if (!pVirtio->fMsiSupport)
1120 virtioLowerInterrupt(pDevIns, 0);
1121 else
1122 {
1123 virtioLowerInterrupt(pDevIns, pVirtio->uMsixConfig);
1124 for (int i = 0; i < VIRTQ_MAX_COUNT; i++)
1125 virtioLowerInterrupt(pDevIns, pVirtio->aVirtqueues[i].uMsix);
1126 }
1127
1128 if (!pVirtio->fMsiSupport) /* VirtIO 1.0, 4.1.4.3 and 4.1.5.1.2 */
1129 pVirtio->uMsixConfig = VIRTIO_MSI_NO_VECTOR;
1130
1131 for (uint16_t uVirtq = 0; uVirtq < VIRTQ_MAX_COUNT; uVirtq++)
1132 virtioResetVirtq(pVirtio, uVirtq);
1133}
1134
1135/**
1136 * Invoked by this implementation when guest driver resets the device.
1137 * The driver itself will not until the device has read the status change.
1138 */
1139static void virtioGuestR3WasReset(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTIOCORECC pVirtioCC)
1140{
1141 LogFunc(("Guest reset the device\n"));
1142
1143 /* Let the client know */
1144 pVirtioCC->pfnStatusChanged(pVirtio, pVirtioCC, 0);
1145 virtioResetDevice(pDevIns, pVirtio);
1146}
1147#endif /* IN_RING3 */
1148
1149/**
1150 * Handle accesses to Common Configuration capability
1151 *
1152 * @returns VBox status code
1153 *
1154 * @param pDevIns The device instance.
1155 * @param pVirtio Pointer to the shared virtio state.
1156 * @param pVirtioCC Pointer to the current context virtio state.
1157 * @param fWrite Set if write access, clear if read access.
1158 * @param uOffsetOfAccess The common configuration capability offset.
1159 * @param cb Number of bytes to read or write
1160 * @param pv Pointer to location to write to or read from
1161 */
1162static int virtioCommonCfgAccessed(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTIOCORECC pVirtioCC,
1163 int fWrite, uint32_t uOffsetOfAccess, unsigned cb, void *pv)
1164{
1165 uint16_t uVirtq = pVirtio->uVirtqSelect;
1166 int rc = VINF_SUCCESS;
1167 uint64_t val;
1168 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER(uDeviceFeatures, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1169 {
1170 if (fWrite) /* Guest WRITE pCommonCfg>uDeviceFeatures */
1171 {
1172 /* VirtIO 1.0, 4.1.4.3 states device_feature is a (guest) driver readonly field,
1173 * yet the linux driver attempts to write/read it back twice */
1174 VIRTIO_DEV_CONFIG_LOG_ACCESS(uDeviceFeatures, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess);
1175 LogFunc(("... WARNING: Guest attempted to write readonly virtio_pci_common_cfg.device_feature (ignoring)\n"));
1176 return VINF_IOM_MMIO_UNUSED_00;
1177 }
1178 else /* Guest READ pCommonCfg->uDeviceFeatures */
1179 {
1180 switch (pVirtio->uDeviceFeaturesSelect)
1181 {
1182 case 0:
1183 val = pVirtio->uDeviceFeatures & UINT32_C(0xffffffff);
1184 memcpy(pv, &val, cb);
1185 VIRTIO_DEV_CONFIG_LOG_ACCESS(uDeviceFeatures, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess);
1186 break;
1187 case 1:
1188 val = pVirtio->uDeviceFeatures >> 32;
1189 memcpy(pv, &val, cb);
1190 VIRTIO_DEV_CONFIG_LOG_ACCESS(uDeviceFeatures, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess + sizeof(uint32_t));
1191 break;
1192 default:
1193 LogFunc(("Guest read uDeviceFeatures with out of range selector (%#x), returning 0\n",
1194 pVirtio->uDeviceFeaturesSelect));
1195 return VINF_IOM_MMIO_UNUSED_00;
1196 }
1197 }
1198 }
1199 else
1200 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER(uDriverFeatures, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1201 {
1202 if (fWrite) /* Guest WRITE pCommonCfg->udriverFeatures */
1203 {
1204 switch (pVirtio->uDriverFeaturesSelect)
1205 {
1206 case 0:
1207 memcpy(&pVirtio->uDriverFeatures, pv, cb);
1208 VIRTIO_DEV_CONFIG_LOG_ACCESS(uDriverFeatures, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess);
1209 break;
1210 case 1:
1211 memcpy((char *)&pVirtio->uDriverFeatures + sizeof(uint32_t), pv, cb);
1212 VIRTIO_DEV_CONFIG_LOG_ACCESS(uDriverFeatures, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess + sizeof(uint32_t));
1213 break;
1214 default:
1215 LogFunc(("Guest wrote uDriverFeatures with out of range selector (%#x), returning 0\n",
1216 pVirtio->uDriverFeaturesSelect));
1217 return VINF_SUCCESS;
1218 }
1219 }
1220 /* Guest READ pCommonCfg->udriverFeatures */
1221 {
1222 switch (pVirtio->uDriverFeaturesSelect)
1223 {
1224 case 0:
1225 val = pVirtio->uDriverFeatures & 0xffffffff;
1226 memcpy(pv, &val, cb);
1227 VIRTIO_DEV_CONFIG_LOG_ACCESS(uDriverFeatures, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess);
1228 break;
1229 case 1:
1230 val = (pVirtio->uDriverFeatures >> 32) & 0xffffffff;
1231 memcpy(pv, &val, cb);
1232 VIRTIO_DEV_CONFIG_LOG_ACCESS(uDriverFeatures, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess + 4);
1233 break;
1234 default:
1235 LogFunc(("Guest read uDriverFeatures with out of range selector (%#x), returning 0\n",
1236 pVirtio->uDriverFeaturesSelect));
1237 return VINF_IOM_MMIO_UNUSED_00;
1238 }
1239 }
1240 }
1241 else
1242 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER(uNumVirtqs, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1243 {
1244 if (fWrite)
1245 {
1246 Log2Func(("Guest attempted to write readonly virtio_pci_common_cfg.num_queues\n"));
1247 return VINF_SUCCESS;
1248 }
1249 *(uint16_t *)pv = VIRTQ_MAX_COUNT;
1250 VIRTIO_DEV_CONFIG_LOG_ACCESS(uNumVirtqs, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess);
1251 }
1252 else
1253 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER(fDeviceStatus, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1254 {
1255 if (fWrite) /* Guest WRITE pCommonCfg->fDeviceStatus */
1256 {
1257 pVirtio->fDeviceStatus = *(uint8_t *)pv;
1258 bool fDeviceReset = pVirtio->fDeviceStatus == 0;
1259
1260 if (LogIs7Enabled())
1261 {
1262 char szOut[80] = { 0 };
1263 virtioCoreFormatDeviceStatus(pVirtio->fDeviceStatus, szOut, sizeof(szOut));
1264 LogFunc(("Guest wrote fDeviceStatus ................ (%s)\n", szOut));
1265 }
1266 bool const fStatusChanged =
1267 (pVirtio->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK) != (pVirtio->uPrevDeviceStatus & VIRTIO_STATUS_DRIVER_OK);
1268
1269 if (fDeviceReset || fStatusChanged)
1270 {
1271#ifdef IN_RING0
1272 /* Since VirtIO status changes are cumbersome by nature, e.g. not a benchmark priority,
1273 * handle the rest in R3 to facilitate logging or whatever dev-specific client needs to do */
1274 Log6Func(("RING0 => RING3 (demote)\n"));
1275 return VINF_IOM_R3_MMIO_WRITE;
1276#endif
1277 }
1278
1279#ifdef IN_RING3
1280 /*
1281 * Notify client only if status actually changed from last time and when we're reset.
1282 */
1283 if (fDeviceReset)
1284 virtioGuestR3WasReset(pDevIns, pVirtio, pVirtioCC);
1285
1286 if (fStatusChanged)
1287 pVirtioCC->pfnStatusChanged(pVirtio, pVirtioCC, pVirtio->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK);
1288#endif
1289 /*
1290 * Save the current status for the next write so we can see what changed.
1291 */
1292 pVirtio->uPrevDeviceStatus = pVirtio->fDeviceStatus;
1293 }
1294 else /* Guest READ pCommonCfg->fDeviceStatus */
1295 {
1296 *(uint8_t *)pv = pVirtio->fDeviceStatus;
1297
1298 if (LogIs7Enabled())
1299 {
1300 char szOut[80] = { 0 };
1301 virtioCoreFormatDeviceStatus(pVirtio->fDeviceStatus, szOut, sizeof(szOut));
1302 LogFunc(("Guest read fDeviceStatus ................ (%s)\n", szOut));
1303 }
1304 }
1305 }
1306 else
1307 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uMsixConfig, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1308 VIRTIO_DEV_CONFIG_ACCESS( uMsixConfig, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio);
1309 else
1310 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uDeviceFeaturesSelect, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1311 VIRTIO_DEV_CONFIG_ACCESS( uDeviceFeaturesSelect, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio);
1312 else
1313 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uDriverFeaturesSelect, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1314 VIRTIO_DEV_CONFIG_ACCESS( uDriverFeaturesSelect, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio);
1315 else
1316 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uConfigGeneration, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1317 VIRTIO_DEV_CONFIG_ACCESS( uConfigGeneration, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio);
1318 else
1319 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uVirtqSelect, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1320 VIRTIO_DEV_CONFIG_ACCESS( uVirtqSelect, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio);
1321 else
1322 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( GCPhysVirtqDesc, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1323 VIRTIO_DEV_CONFIG_ACCESS_INDEXED( GCPhysVirtqDesc, uVirtq, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio->aVirtqueues);
1324 else
1325 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( GCPhysVirtqAvail, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1326 VIRTIO_DEV_CONFIG_ACCESS_INDEXED( GCPhysVirtqAvail, uVirtq, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio->aVirtqueues);
1327 else
1328 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( GCPhysVirtqUsed, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1329 VIRTIO_DEV_CONFIG_ACCESS_INDEXED( GCPhysVirtqUsed, uVirtq, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio->aVirtqueues);
1330 else
1331 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uSize, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1332 VIRTIO_DEV_CONFIG_ACCESS_INDEXED( uSize, uVirtq, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio->aVirtqueues);
1333 else
1334 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uEnable, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1335 VIRTIO_DEV_CONFIG_ACCESS_INDEXED( uEnable, uVirtq, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio->aVirtqueues);
1336 else
1337 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uNotifyOffset, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1338 VIRTIO_DEV_CONFIG_ACCESS_INDEXED( uNotifyOffset, uVirtq, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio->aVirtqueues);
1339 else
1340 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uMsix, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1341 VIRTIO_DEV_CONFIG_ACCESS_INDEXED( uMsix, uVirtq, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio->aVirtqueues);
1342 else
1343 {
1344 Log2Func(("Bad guest %s access to virtio_pci_common_cfg: uOffsetOfAccess=%#x (%d), cb=%d\n",
1345 fWrite ? "write" : "read ", uOffsetOfAccess, uOffsetOfAccess, cb));
1346 return fWrite ? VINF_SUCCESS : VINF_IOM_MMIO_UNUSED_00;
1347 }
1348
1349#ifndef IN_RING3
1350 RT_NOREF(pDevIns, pVirtioCC);
1351#endif
1352 return rc;
1353}
1354
1355/**
1356 * @callback_method_impl{FNIOMMMIONEWREAD,
1357 * Memory mapped I/O Handler for PCI Capabilities read operations.}
1358 *
1359 * This MMIO handler specifically supports the VIRTIO_PCI_CAP_PCI_CFG capability defined
1360 * in the VirtIO 1.0 specification, section 4.1.4.7, and as such is restricted to reads
1361 * of 1, 2 or 4 bytes, only.
1362 *
1363 */
1364static DECLCALLBACK(VBOXSTRICTRC) virtioMmioRead(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off, void *pv, unsigned cb)
1365{
1366 PVIRTIOCORE pVirtio = PDMINS_2_DATA(pDevIns, PVIRTIOCORE);
1367 PVIRTIOCORECC pVirtioCC = PDMINS_2_DATA_CC(pDevIns, PVIRTIOCORECC);
1368 AssertReturn(cb == 1 || cb == 2 || cb == 4, VERR_INVALID_PARAMETER);
1369 Assert(pVirtio == (PVIRTIOCORE)pvUser); RT_NOREF(pvUser);
1370
1371 uint32_t uOffset;
1372 if (MATCHES_VIRTIO_CAP_STRUCT(off, cb, uOffset, pVirtio->LocDeviceCap))
1373 {
1374#ifdef IN_RING3
1375 /*
1376 * Callback to client to manage device-specific configuration.
1377 */
1378 VBOXSTRICTRC rcStrict = pVirtioCC->pfnDevCapRead(pDevIns, uOffset, pv, cb);
1379
1380 /*
1381 * Additionally, anytime any part of the device-specific configuration (which our client maintains)
1382 * is READ it needs to be checked to see if it changed since the last time any part was read, in
1383 * order to maintain the config generation (see VirtIO 1.0 spec, section 4.1.4.3.1)
1384 */
1385 bool fDevSpecificFieldChanged = RT_BOOL(memcmp(pVirtioCC->pbDevSpecificCfg + uOffset,
1386 pVirtioCC->pbPrevDevSpecificCfg + uOffset,
1387 RT_MIN(cb, pVirtioCC->cbDevSpecificCfg - uOffset)));
1388
1389 memcpy(pVirtioCC->pbPrevDevSpecificCfg, pVirtioCC->pbDevSpecificCfg, pVirtioCC->cbDevSpecificCfg);
1390
1391 if (pVirtio->fGenUpdatePending || fDevSpecificFieldChanged)
1392 {
1393 ++pVirtio->uConfigGeneration;
1394 Log6Func(("Bumped cfg. generation to %d because %s%s\n",
1395 pVirtio->uConfigGeneration,
1396 fDevSpecificFieldChanged ? "<dev cfg changed> " : "",
1397 pVirtio->fGenUpdatePending ? "<update was pending>" : ""));
1398 pVirtio->fGenUpdatePending = false;
1399 }
1400
1401 virtioLowerInterrupt(pDevIns, 0);
1402 return rcStrict;
1403#else
1404 return VINF_IOM_R3_MMIO_READ;
1405#endif
1406 }
1407
1408 if (MATCHES_VIRTIO_CAP_STRUCT(off, cb, uOffset, pVirtio->LocCommonCfgCap))
1409 return virtioCommonCfgAccessed(pDevIns, pVirtio, pVirtioCC, false /* fWrite */, uOffset, cb, pv);
1410
1411 if (MATCHES_VIRTIO_CAP_STRUCT(off, cb, uOffset, pVirtio->LocIsrCap))
1412 {
1413 *(uint8_t *)pv = pVirtio->uISR;
1414 Log6Func(("Read and clear ISR\n"));
1415 pVirtio->uISR = 0; /* VirtIO spec requires reads of ISR to clear it */
1416 virtioLowerInterrupt(pDevIns, 0);
1417 return VINF_SUCCESS;
1418 }
1419
1420 ASSERT_GUEST_MSG_FAILED(("Bad read access to mapped capabilities region: off=%RGp cb=%u\n",
1421 off, cb));
1422 return VINF_IOM_MMIO_UNUSED_00;
1423}
1424
1425/**
1426 * @callback_method_impl{FNIOMMMIONEWREAD,
1427 * Memory mapped I/O Handler for PCI Capabilities write operations.}
1428 *
1429 * This MMIO handler specifically supports the VIRTIO_PCI_CAP_PCI_CFG capability defined
1430 * in the VirtIO 1.0 specification, section 4.1.4.7, and as such is restricted to writes
1431 * of 1, 2 or 4 bytes, only.
1432 */
1433static DECLCALLBACK(VBOXSTRICTRC) virtioMmioWrite(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off, void const *pv, unsigned cb)
1434{
1435 PVIRTIOCORE pVirtio = PDMINS_2_DATA(pDevIns, PVIRTIOCORE);
1436 PVIRTIOCORECC pVirtioCC = PDMINS_2_DATA_CC(pDevIns, PVIRTIOCORECC);
1437
1438 AssertReturn(cb == 1 || cb == 2 || cb == 4, VERR_INVALID_PARAMETER);
1439
1440 Assert(pVirtio == (PVIRTIOCORE)pvUser); RT_NOREF(pvUser);
1441 uint32_t uOffset;
1442 if (MATCHES_VIRTIO_CAP_STRUCT(off, cb, uOffset, pVirtio->LocDeviceCap))
1443 {
1444#ifdef IN_RING3
1445 /*
1446 * Foreward this MMIO write access for client to deal with.
1447 */
1448 return pVirtioCC->pfnDevCapWrite(pDevIns, uOffset, pv, cb);
1449#else
1450 return VINF_IOM_R3_MMIO_WRITE;
1451#endif
1452 }
1453
1454 if (MATCHES_VIRTIO_CAP_STRUCT(off, cb, uOffset, pVirtio->LocCommonCfgCap))
1455 return virtioCommonCfgAccessed(pDevIns, pVirtio, pVirtioCC, true /* fWrite */, uOffset, cb, (void *)pv);
1456
1457 if (MATCHES_VIRTIO_CAP_STRUCT(off, cb, uOffset, pVirtio->LocIsrCap) && cb == sizeof(uint8_t))
1458 {
1459 pVirtio->uISR = *(uint8_t *)pv;
1460 Log6Func(("Setting uISR = 0x%02x (virtq interrupt: %d, dev confg interrupt: %d)\n",
1461 pVirtio->uISR & 0xff,
1462 pVirtio->uISR & VIRTIO_ISR_VIRTQ_INTERRUPT,
1463 RT_BOOL(pVirtio->uISR & VIRTIO_ISR_DEVICE_CONFIG)));
1464 return VINF_SUCCESS;
1465 }
1466
1467 /* This *should* be guest driver dropping index of a new descriptor in avail ring */
1468 if (MATCHES_VIRTIO_CAP_STRUCT(off, cb, uOffset, pVirtio->LocNotifyCap) && cb == sizeof(uint16_t))
1469 {
1470 virtioCoreVirtqNotified(pDevIns, pVirtio, uOffset / VIRTIO_NOTIFY_OFFSET_MULTIPLIER, *(uint16_t *)pv);
1471 return VINF_SUCCESS;
1472 }
1473
1474 ASSERT_GUEST_MSG_FAILED(("Bad write access to mapped capabilities region: off=%RGp pv=%#p{%.*Rhxs} cb=%u\n", off, pv, cb, pv, cb));
1475 return VINF_SUCCESS;
1476}
1477
1478#ifdef IN_RING3
1479
1480/**
1481 * @callback_method_impl{FNPCICONFIGREAD}
1482 */
1483static DECLCALLBACK(VBOXSTRICTRC) virtioR3PciConfigRead(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev,
1484 uint32_t uAddress, unsigned cb, uint32_t *pu32Value)
1485{
1486 PVIRTIOCORE pVirtio = PDMINS_2_DATA(pDevIns, PVIRTIOCORE);
1487 PVIRTIOCORECC pVirtioCC = PDMINS_2_DATA_CC(pDevIns, PVIRTIOCORECC);
1488 RT_NOREF(pPciDev);
1489
1490 if (uAddress == pVirtio->uPciCfgDataOff)
1491 {
1492 /*
1493 * VirtIO 1.0 spec section 4.1.4.7 describes a required alternative access capability
1494 * whereby the guest driver can specify a bar, offset, and length via the PCI configuration space
1495 * (the virtio_pci_cfg_cap capability), and access data items.
1496 */
1497 struct virtio_pci_cap *pPciCap = &pVirtioCC->pPciCfgCap->pciCap;
1498 uint32_t uLength = pPciCap->uLength;
1499
1500 Log7Func((" pDevIns=%p pPciDev=%p uAddress=%#x%s cb=%u uLength=%d, bar=%d\n",
1501 pDevIns, pPciDev, uAddress, uAddress < 0x10 ? " " : "", cb, uLength, pPciCap->uBar));
1502
1503 if ( (uLength != 1 && uLength != 2 && uLength != 4)
1504 || pPciCap->uBar != VIRTIO_REGION_PCI_CAP)
1505 {
1506 ASSERT_GUEST_MSG_FAILED(("Guest read virtio_pci_cfg_cap.pci_cfg_data using mismatching config. "
1507 "Ignoring\n"));
1508 *pu32Value = UINT32_MAX;
1509 return VINF_SUCCESS;
1510 }
1511
1512 VBOXSTRICTRC rcStrict = virtioMmioRead(pDevIns, pVirtio, pPciCap->uOffset, pu32Value, cb);
1513 Log7Func((" Guest read virtio_pci_cfg_cap.pci_cfg_data, bar=%d, offset=%d, length=%d, result=0x%x -> %Rrc\n",
1514 pPciCap->uBar, pPciCap->uOffset, uLength, *pu32Value, VBOXSTRICTRC_VAL(rcStrict)));
1515 return rcStrict;
1516 }
1517 Log7Func((" pDevIns=%p pPciDev=%p uAddress=%#x%s cb=%u pu32Value=%p\n",
1518 pDevIns, pPciDev, uAddress, uAddress < 0x10 ? " " : "", cb, pu32Value));
1519 return VINF_PDM_PCI_DO_DEFAULT;
1520}
1521
1522/**
1523 * @callback_method_impl{FNPCICONFIGWRITE}
1524 */
1525static DECLCALLBACK(VBOXSTRICTRC) virtioR3PciConfigWrite(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev,
1526 uint32_t uAddress, unsigned cb, uint32_t u32Value)
1527{
1528 PVIRTIOCORE pVirtio = PDMINS_2_DATA(pDevIns, PVIRTIOCORE);
1529 PVIRTIOCORECC pVirtioCC = PDMINS_2_DATA_CC(pDevIns, PVIRTIOCORECC);
1530 RT_NOREF(pPciDev);
1531
1532 Log7Func(("pDevIns=%p pPciDev=%p uAddress=%#x %scb=%u u32Value=%#x\n", pDevIns, pPciDev, uAddress, uAddress < 0xf ? " " : "", cb, u32Value));
1533 if (uAddress == pVirtio->uPciCfgDataOff)
1534 {
1535 /* VirtIO 1.0 spec section 4.1.4.7 describes a required alternative access capability
1536 * whereby the guest driver can specify a bar, offset, and length via the PCI configuration space
1537 * (the virtio_pci_cfg_cap capability), and access data items. */
1538
1539 struct virtio_pci_cap *pPciCap = &pVirtioCC->pPciCfgCap->pciCap;
1540 uint32_t uLength = pPciCap->uLength;
1541
1542 if ( (uLength != 1 && uLength != 2 && uLength != 4)
1543 || cb != uLength
1544 || pPciCap->uBar != VIRTIO_REGION_PCI_CAP)
1545 {
1546 ASSERT_GUEST_MSG_FAILED(("Guest write virtio_pci_cfg_cap.pci_cfg_data using mismatching config. Ignoring\n"));
1547 return VINF_SUCCESS;
1548 }
1549
1550 VBOXSTRICTRC rcStrict = virtioMmioWrite(pDevIns, pVirtio, pPciCap->uOffset, &u32Value, cb);
1551 Log2Func(("Guest wrote virtio_pci_cfg_cap.pci_cfg_data, bar=%d, offset=%x, length=%x, value=%d -> %Rrc\n",
1552 pPciCap->uBar, pPciCap->uOffset, uLength, u32Value, VBOXSTRICTRC_VAL(rcStrict)));
1553 return rcStrict;
1554 }
1555 return VINF_PDM_PCI_DO_DEFAULT;
1556}
1557
1558
1559/*********************************************************************************************************************************
1560* Saved state. *
1561*********************************************************************************************************************************/
1562
1563/**
1564 * Called from the FNSSMDEVSAVEEXEC function of the device.
1565 *
1566 * @param pVirtio Pointer to the shared virtio state.
1567 * @param pHlp The ring-3 device helpers.
1568 * @param pSSM The saved state handle.
1569 * @returns VBox status code.
1570 */
1571int virtioCoreR3SaveExec(PVIRTIOCORE pVirtio, PCPDMDEVHLPR3 pHlp, PSSMHANDLE pSSM)
1572{
1573 LogFunc(("\n"));
1574 pHlp->pfnSSMPutU64(pSSM, VIRTIO_SAVEDSTATE_MARKER);
1575 pHlp->pfnSSMPutU32(pSSM, VIRTIO_SAVEDSTATE_VERSION);
1576
1577 pHlp->pfnSSMPutBool(pSSM, pVirtio->fGenUpdatePending);
1578 pHlp->pfnSSMPutU8( pSSM, pVirtio->fDeviceStatus);
1579 pHlp->pfnSSMPutU8( pSSM, pVirtio->uConfigGeneration);
1580 pHlp->pfnSSMPutU8( pSSM, pVirtio->uPciCfgDataOff);
1581 pHlp->pfnSSMPutU8( pSSM, pVirtio->uISR);
1582 pHlp->pfnSSMPutU16( pSSM, pVirtio->uVirtqSelect);
1583 pHlp->pfnSSMPutU32( pSSM, pVirtio->uDeviceFeaturesSelect);
1584 pHlp->pfnSSMPutU32( pSSM, pVirtio->uDriverFeaturesSelect);
1585 pHlp->pfnSSMPutU64( pSSM, pVirtio->uDriverFeatures);
1586
1587 for (uint32_t i = 0; i < VIRTQ_MAX_COUNT; i++)
1588 {
1589 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[i];
1590
1591 pHlp->pfnSSMPutGCPhys64( pSSM, pVirtq->GCPhysVirtqDesc);
1592 pHlp->pfnSSMPutGCPhys64( pSSM, pVirtq->GCPhysVirtqAvail);
1593 pHlp->pfnSSMPutGCPhys64( pSSM, pVirtq->GCPhysVirtqUsed);
1594 pHlp->pfnSSMPutU16( pSSM, pVirtq->uNotifyOffset);
1595 pHlp->pfnSSMPutU16( pSSM, pVirtq->uMsix);
1596 pHlp->pfnSSMPutU16( pSSM, pVirtq->uEnable);
1597 pHlp->pfnSSMPutU16( pSSM, pVirtq->uSize);
1598 pHlp->pfnSSMPutU16( pSSM, pVirtq->uAvailIdxShadow);
1599 pHlp->pfnSSMPutU16( pSSM, pVirtq->uUsedIdxShadow);
1600 int rc = pHlp->pfnSSMPutMem(pSSM, pVirtq->szName, 32);
1601 AssertRCReturn(rc, rc);
1602 }
1603
1604 return VINF_SUCCESS;
1605}
1606
1607/**
1608 * Called from the FNSSMDEVLOADEXEC function of the device.
1609 *
1610 * @param pVirtio Pointer to the shared virtio state.
1611 * @param pHlp The ring-3 device helpers.
1612 * @param pSSM The saved state handle.
1613 * @returns VBox status code.
1614 */
1615int virtioCoreR3LoadExec(PVIRTIOCORE pVirtio, PCPDMDEVHLPR3 pHlp, PSSMHANDLE pSSM)
1616{
1617 LogFunc(("\n"));
1618 /*
1619 * Check the marker and (embedded) version number.
1620 */
1621 uint64_t uMarker = 0;
1622 int rc = pHlp->pfnSSMGetU64(pSSM, &uMarker);
1623 AssertRCReturn(rc, rc);
1624 if (uMarker != VIRTIO_SAVEDSTATE_MARKER)
1625 return pHlp->pfnSSMSetLoadError(pSSM, VERR_SSM_DATA_UNIT_FORMAT_CHANGED, RT_SRC_POS,
1626 N_("Expected marker value %#RX64 found %#RX64 instead"),
1627 VIRTIO_SAVEDSTATE_MARKER, uMarker);
1628 uint32_t uVersion = 0;
1629 rc = pHlp->pfnSSMGetU32(pSSM, &uVersion);
1630 AssertRCReturn(rc, rc);
1631 if (uVersion != VIRTIO_SAVEDSTATE_VERSION)
1632 return pHlp->pfnSSMSetLoadError(pSSM, VERR_SSM_DATA_UNIT_FORMAT_CHANGED, RT_SRC_POS,
1633 N_("Unsupported virtio version: %u"), uVersion);
1634 /*
1635 * Load the state.
1636 */
1637 pHlp->pfnSSMGetBool( pSSM, &pVirtio->fGenUpdatePending);
1638 pHlp->pfnSSMGetU8( pSSM, &pVirtio->fDeviceStatus);
1639 pHlp->pfnSSMGetU8( pSSM, &pVirtio->uConfigGeneration);
1640 pHlp->pfnSSMGetU8( pSSM, &pVirtio->uPciCfgDataOff);
1641 pHlp->pfnSSMGetU8( pSSM, &pVirtio->uISR);
1642 pHlp->pfnSSMGetU16( pSSM, &pVirtio->uVirtqSelect);
1643 pHlp->pfnSSMGetU32( pSSM, &pVirtio->uDeviceFeaturesSelect);
1644 pHlp->pfnSSMGetU32( pSSM, &pVirtio->uDriverFeaturesSelect);
1645 pHlp->pfnSSMGetU64( pSSM, &pVirtio->uDriverFeatures);
1646
1647 for (uint32_t i = 0; i < VIRTQ_MAX_COUNT; i++)
1648 {
1649 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[i];
1650
1651 pHlp->pfnSSMGetGCPhys64( pSSM, &pVirtq->GCPhysVirtqDesc);
1652 pHlp->pfnSSMGetGCPhys64( pSSM, &pVirtq->GCPhysVirtqAvail);
1653 pHlp->pfnSSMGetGCPhys64( pSSM, &pVirtq->GCPhysVirtqUsed);
1654 pHlp->pfnSSMGetU16( pSSM, &pVirtq->uNotifyOffset);
1655 pHlp->pfnSSMGetU16( pSSM, &pVirtq->uMsix);
1656 pHlp->pfnSSMGetU16( pSSM, &pVirtq->uEnable);
1657 pHlp->pfnSSMGetU16( pSSM, &pVirtq->uSize);
1658 pHlp->pfnSSMGetU16( pSSM, &pVirtq->uAvailIdxShadow);
1659 pHlp->pfnSSMGetU16( pSSM, &pVirtq->uUsedIdxShadow);
1660 rc = pHlp->pfnSSMGetMem( pSSM, pVirtq->szName, sizeof(pVirtq->szName));
1661 AssertRCReturn(rc, rc);
1662 }
1663
1664 return VINF_SUCCESS;
1665}
1666
1667
1668/*********************************************************************************************************************************
1669* Device Level *
1670*********************************************************************************************************************************/
1671
1672/**
1673 * This must be called by the client to handle VM state changes
1674 * after the client takes care of its device-specific tasks for the state change.
1675 * (i.e. Reset, suspend, power-off, resume)
1676 *
1677 * @param pDevIns The device instance.
1678 * @param pVirtio Pointer to the shared virtio state.
1679 */
1680void virtioCoreR3VmStateChanged(PVIRTIOCORE pVirtio, VIRTIOVMSTATECHANGED enmState)
1681{
1682 LogFunc(("State changing to %s\n",
1683 virtioCoreGetStateChangeText(enmState)));
1684
1685 switch(enmState)
1686 {
1687 case kvirtIoVmStateChangedReset:
1688 virtioCoreResetAll(pVirtio);
1689 break;
1690 case kvirtIoVmStateChangedSuspend:
1691 break;
1692 case kvirtIoVmStateChangedPowerOff:
1693 break;
1694 case kvirtIoVmStateChangedResume:
1695 for (int uVirtq = 0; uVirtq < VIRTQ_MAX_COUNT; uVirtq++)
1696 {
1697 if (pVirtio->aVirtqueues[uVirtq].uEnable)
1698 virtioCoreNotifyGuestDriver(pVirtio->pDevInsR3, pVirtio, uVirtq);
1699 }
1700 break;
1701 default:
1702 LogRelFunc(("Bad enum value"));
1703 return;
1704 }
1705}
1706
1707/**
1708 * This should be called from PDMDEVREGR3::pfnDestruct.
1709 *
1710 * @param pDevIns The device instance.
1711 * @param pVirtio Pointer to the shared virtio state.
1712 * @param pVirtioCC Pointer to the ring-3 virtio state.
1713 */
1714void virtioCoreR3Term(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTIOCORECC pVirtioCC)
1715{
1716 if (pVirtioCC->pbPrevDevSpecificCfg)
1717 {
1718 RTMemFree(pVirtioCC->pbPrevDevSpecificCfg);
1719 pVirtioCC->pbPrevDevSpecificCfg = NULL;
1720 }
1721 RT_NOREF(pDevIns, pVirtio);
1722}
1723
1724/** API Function: See header file */
1725int virtioCoreR3Init(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTIOCORECC pVirtioCC, PVIRTIOPCIPARAMS pPciParams,
1726 const char *pcszInstance, uint64_t fDevSpecificFeatures, void *pvDevSpecificCfg, uint16_t cbDevSpecificCfg)
1727{
1728 /*
1729 * The pVirtio state must be the first member of the shared device instance
1730 * data, otherwise we cannot get our bearings in the PCI configuration callbacks.
1731 */
1732 AssertLogRelReturn(pVirtio == PDMINS_2_DATA(pDevIns, PVIRTIOCORE), VERR_STATE_CHANGED);
1733 AssertLogRelReturn(pVirtioCC == PDMINS_2_DATA_CC(pDevIns, PVIRTIOCORECC), VERR_STATE_CHANGED);
1734
1735 pVirtio->pDevInsR3 = pDevIns;
1736
1737 /*
1738 * Caller must initialize these.
1739 */
1740 AssertReturn(pVirtioCC->pfnStatusChanged, VERR_INVALID_POINTER);
1741 AssertReturn(pVirtioCC->pfnVirtqNotified, VERR_INVALID_POINTER);
1742
1743#if 0 /* Until pdmR3DvHlp_PCISetIrq() impl is fixed and Assert that limits vec to 0 is removed */
1744# ifdef VBOX_WITH_MSI_DEVICES
1745 pVirtio->fMsiSupport = true;
1746# endif
1747#endif
1748
1749 /*
1750 * The host features offered include both device-specific features
1751 * and reserved feature bits (device independent)
1752 */
1753 pVirtio->uDeviceFeatures = VIRTIO_F_VERSION_1
1754 | VIRTIO_DEV_INDEPENDENT_FEATURES_OFFERED
1755 | fDevSpecificFeatures;
1756
1757 RTStrCopy(pVirtio->szInstance, sizeof(pVirtio->szInstance), pcszInstance);
1758
1759 pVirtio->fDeviceStatus = 0;
1760 pVirtioCC->cbDevSpecificCfg = cbDevSpecificCfg;
1761 pVirtioCC->pbDevSpecificCfg = (uint8_t *)pvDevSpecificCfg;
1762 pVirtioCC->pbPrevDevSpecificCfg = (uint8_t *)RTMemDup(pvDevSpecificCfg, cbDevSpecificCfg);
1763 AssertLogRelReturn(pVirtioCC->pbPrevDevSpecificCfg, VERR_NO_MEMORY);
1764
1765 /* Set PCI config registers (assume 32-bit mode) */
1766 PPDMPCIDEV pPciDev = pDevIns->apPciDevs[0];
1767 PDMPCIDEV_ASSERT_VALID(pDevIns, pPciDev);
1768
1769 PDMPciDevSetRevisionId(pPciDev, DEVICE_PCI_REVISION_ID_VIRTIO);
1770 PDMPciDevSetVendorId(pPciDev, DEVICE_PCI_VENDOR_ID_VIRTIO);
1771 PDMPciDevSetSubSystemVendorId(pPciDev, DEVICE_PCI_VENDOR_ID_VIRTIO);
1772 PDMPciDevSetDeviceId(pPciDev, pPciParams->uDeviceId);
1773 PDMPciDevSetClassBase(pPciDev, pPciParams->uClassBase);
1774 PDMPciDevSetClassSub(pPciDev, pPciParams->uClassSub);
1775 PDMPciDevSetClassProg(pPciDev, pPciParams->uClassProg);
1776 PDMPciDevSetSubSystemId(pPciDev, pPciParams->uSubsystemId);
1777 PDMPciDevSetInterruptLine(pPciDev, pPciParams->uInterruptLine);
1778 PDMPciDevSetInterruptPin(pPciDev, pPciParams->uInterruptPin);
1779
1780 /* Register PCI device */
1781 int rc = PDMDevHlpPCIRegister(pDevIns, pPciDev);
1782 if (RT_FAILURE(rc))
1783 return PDMDEV_SET_ERROR(pDevIns, rc, N_("virtio: cannot register PCI Device")); /* can we put params in this error? */
1784
1785 rc = PDMDevHlpPCIInterceptConfigAccesses(pDevIns, pPciDev, virtioR3PciConfigRead, virtioR3PciConfigWrite);
1786 AssertRCReturn(rc, rc);
1787
1788
1789 /* Construct & map PCI vendor-specific capabilities for virtio host negotiation with guest driver */
1790
1791#define CFG_ADDR_2_IDX(addr) ((uint8_t)(((uintptr_t)(addr) - (uintptr_t)&pPciDev->abConfig[0])))
1792#define SET_PCI_CAP_LOC(a_pPciDev, a_pCfg, a_LocCap, a_uMmioLengthAlign) \
1793 do { \
1794 (a_LocCap).offMmio = (a_pCfg)->uOffset; \
1795 (a_LocCap).cbMmio = RT_ALIGN_T((a_pCfg)->uLength, a_uMmioLengthAlign, uint16_t); \
1796 (a_LocCap).offPci = (uint16_t)(uintptr_t)((uint8_t *)(a_pCfg) - &(a_pPciDev)->abConfig[0]); \
1797 (a_LocCap).cbPci = (a_pCfg)->uCapLen; \
1798 } while (0)
1799
1800 PVIRTIO_PCI_CAP_T pCfg;
1801 uint32_t cbRegion = 0;
1802
1803 /* Common capability (VirtIO 1.0 spec, section 4.1.4.3) */
1804 pCfg = (PVIRTIO_PCI_CAP_T)&pPciDev->abConfig[0x40];
1805 pCfg->uCfgType = VIRTIO_PCI_CAP_COMMON_CFG;
1806 pCfg->uCapVndr = VIRTIO_PCI_CAP_ID_VENDOR;
1807 pCfg->uCapLen = sizeof(VIRTIO_PCI_CAP_T);
1808 pCfg->uCapNext = CFG_ADDR_2_IDX(pCfg) + pCfg->uCapLen;
1809 pCfg->uBar = VIRTIO_REGION_PCI_CAP;
1810 pCfg->uOffset = RT_ALIGN_32(0, 4); /* Currently 0, but reminder to 32-bit align if changing this */
1811 pCfg->uLength = sizeof(VIRTIO_PCI_COMMON_CFG_T);
1812 cbRegion += pCfg->uLength;
1813 SET_PCI_CAP_LOC(pPciDev, pCfg, pVirtio->LocCommonCfgCap, 2);
1814 pVirtioCC->pCommonCfgCap = pCfg;
1815
1816 /*
1817 * Notify capability (VirtIO 1.0 spec, section 4.1.4.4). Note: uLength is based on the choice
1818 * of this implementation to make each queue's uNotifyOffset equal to (VirtqSelect) ordinal
1819 * value of the queue (different strategies are possible according to spec).
1820 */
1821 pCfg = (PVIRTIO_PCI_CAP_T)&pPciDev->abConfig[pCfg->uCapNext];
1822 pCfg->uCfgType = VIRTIO_PCI_CAP_NOTIFY_CFG;
1823 pCfg->uCapVndr = VIRTIO_PCI_CAP_ID_VENDOR;
1824 pCfg->uCapLen = sizeof(VIRTIO_PCI_NOTIFY_CAP_T);
1825 pCfg->uCapNext = CFG_ADDR_2_IDX(pCfg) + pCfg->uCapLen;
1826 pCfg->uBar = VIRTIO_REGION_PCI_CAP;
1827 pCfg->uOffset = pVirtioCC->pCommonCfgCap->uOffset + pVirtioCC->pCommonCfgCap->uLength;
1828 pCfg->uOffset = RT_ALIGN_32(pCfg->uOffset, 4);
1829 pCfg->uLength = VIRTQ_MAX_COUNT * VIRTIO_NOTIFY_OFFSET_MULTIPLIER + 2; /* will change in VirtIO 1.1 */
1830 cbRegion += pCfg->uLength;
1831 SET_PCI_CAP_LOC(pPciDev, pCfg, pVirtio->LocNotifyCap, 1);
1832 pVirtioCC->pNotifyCap = (PVIRTIO_PCI_NOTIFY_CAP_T)pCfg;
1833 pVirtioCC->pNotifyCap->uNotifyOffMultiplier = VIRTIO_NOTIFY_OFFSET_MULTIPLIER;
1834
1835 /* ISR capability (VirtIO 1.0 spec, section 4.1.4.5)
1836 *
1837 * VirtIO 1.0 spec says 8-bit, unaligned in MMIO space. Example/diagram
1838 * of spec shows it as a 32-bit field with upper bits 'reserved'
1839 * Will take spec's words more literally than the diagram for now.
1840 */
1841 pCfg = (PVIRTIO_PCI_CAP_T)&pPciDev->abConfig[pCfg->uCapNext];
1842 pCfg->uCfgType = VIRTIO_PCI_CAP_ISR_CFG;
1843 pCfg->uCapVndr = VIRTIO_PCI_CAP_ID_VENDOR;
1844 pCfg->uCapLen = sizeof(VIRTIO_PCI_CAP_T);
1845 pCfg->uCapNext = CFG_ADDR_2_IDX(pCfg) + pCfg->uCapLen;
1846 pCfg->uBar = VIRTIO_REGION_PCI_CAP;
1847 pCfg->uOffset = pVirtioCC->pNotifyCap->pciCap.uOffset + pVirtioCC->pNotifyCap->pciCap.uLength;
1848 pCfg->uOffset = RT_ALIGN_32(pCfg->uOffset, 4);
1849 pCfg->uLength = sizeof(uint8_t);
1850 cbRegion += pCfg->uLength;
1851 SET_PCI_CAP_LOC(pPciDev, pCfg, pVirtio->LocIsrCap, 4);
1852 pVirtioCC->pIsrCap = pCfg;
1853
1854 /* PCI Cfg capability (VirtIO 1.0 spec, section 4.1.4.7)
1855 * This capability doesn't get page-MMIO mapped. Instead uBar, uOffset and uLength are intercepted
1856 * by trapping PCI configuration I/O and get modulated by consumers to locate fetch and read/write
1857 * values from any region. NOTE: The linux driver not only doesn't use this feature, it will not
1858 * even list it as present if uLength isn't non-zero and also 4-byte-aligned as the linux driver is
1859 * initializing.
1860 */
1861 pVirtio->uPciCfgDataOff = pCfg->uCapNext + RT_OFFSETOF(VIRTIO_PCI_CFG_CAP_T, uPciCfgData);
1862 pCfg = (PVIRTIO_PCI_CAP_T)&pPciDev->abConfig[pCfg->uCapNext];
1863 pCfg->uCfgType = VIRTIO_PCI_CAP_PCI_CFG;
1864 pCfg->uCapVndr = VIRTIO_PCI_CAP_ID_VENDOR;
1865 pCfg->uCapLen = sizeof(VIRTIO_PCI_CFG_CAP_T);
1866 pCfg->uCapNext = (pVirtio->fMsiSupport || pVirtioCC->pbDevSpecificCfg) ? CFG_ADDR_2_IDX(pCfg) + pCfg->uCapLen : 0;
1867 pCfg->uBar = VIRTIO_REGION_PCI_CAP;
1868 pCfg->uOffset = 0;
1869 pCfg->uLength = 4;
1870 cbRegion += pCfg->uLength;
1871 SET_PCI_CAP_LOC(pPciDev, pCfg, pVirtio->LocPciCfgCap, 1);
1872 pVirtioCC->pPciCfgCap = (PVIRTIO_PCI_CFG_CAP_T)pCfg;
1873
1874 if (pVirtioCC->pbDevSpecificCfg)
1875 {
1876 /* Device specific config capability (via VirtIO 1.0, section 4.1.4.6).
1877 * Client defines the device-specific config struct and passes size to virtioCoreR3Init()
1878 * to inform this. */
1879 pCfg = (PVIRTIO_PCI_CAP_T)&pPciDev->abConfig[pCfg->uCapNext];
1880 pCfg->uCfgType = VIRTIO_PCI_CAP_DEVICE_CFG;
1881 pCfg->uCapVndr = VIRTIO_PCI_CAP_ID_VENDOR;
1882 pCfg->uCapLen = sizeof(VIRTIO_PCI_CAP_T);
1883 pCfg->uCapNext = pVirtio->fMsiSupport ? CFG_ADDR_2_IDX(pCfg) + pCfg->uCapLen : 0;
1884 pCfg->uBar = VIRTIO_REGION_PCI_CAP;
1885 pCfg->uOffset = pVirtioCC->pIsrCap->uOffset + pVirtioCC->pIsrCap->uLength;
1886 pCfg->uOffset = RT_ALIGN_32(pCfg->uOffset, 4);
1887 pCfg->uLength = cbDevSpecificCfg;
1888 cbRegion += pCfg->uLength;
1889 SET_PCI_CAP_LOC(pPciDev, pCfg, pVirtio->LocDeviceCap, 4);
1890 pVirtioCC->pDeviceCap = pCfg;
1891 }
1892 else
1893 Assert(pVirtio->LocDeviceCap.cbMmio == 0 && pVirtio->LocDeviceCap.cbPci == 0);
1894
1895 if (pVirtio->fMsiSupport)
1896 {
1897 PDMMSIREG aMsiReg;
1898 RT_ZERO(aMsiReg);
1899 aMsiReg.iMsixCapOffset = pCfg->uCapNext;
1900 aMsiReg.iMsixNextOffset = 0;
1901 aMsiReg.iMsixBar = VIRTIO_REGION_MSIX_CAP;
1902 aMsiReg.cMsixVectors = VBOX_MSIX_MAX_ENTRIES;
1903 rc = PDMDevHlpPCIRegisterMsi(pDevIns, &aMsiReg); /* see MsixR3init() */
1904 if (RT_FAILURE(rc))
1905 {
1906 /* See PDMDevHlp.cpp:pdmR3DevHlp_PCIRegisterMsi */
1907 LogFunc(("Failed to configure MSI-X (%Rrc). Reverting to INTx\n", rc));
1908 pVirtio->fMsiSupport = false;
1909 }
1910 else
1911 Log2Func(("Using MSI-X for guest driver notification\n"));
1912 }
1913 else
1914 LogFunc(("MSI-X not available for VBox, using INTx notification\n"));
1915
1916 /* Set offset to first capability and enable PCI dev capabilities */
1917 PDMPciDevSetCapabilityList(pPciDev, 0x40);
1918 PDMPciDevSetStatus(pPciDev, VBOX_PCI_STATUS_CAP_LIST);
1919
1920 size_t cbSize = RTStrPrintf(pVirtioCC->pcszMmioName, sizeof(pVirtioCC->pcszMmioName), "%s MMIO", pcszInstance);
1921 if (cbSize <= 0)
1922 return PDMDEV_SET_ERROR(pDevIns, rc, N_("virtio: out of memory allocating string")); /* can we put params in this error? */
1923
1924 /* Note: The Linux driver at drivers/virtio/virtio_pci_modern.c tries to map at least a page for the
1925 * 'unknown' device-specific capability without querying the capability to figure
1926 * out size, so pad with an extra page
1927 */
1928 rc = PDMDevHlpPCIIORegionCreateMmio(pDevIns, VIRTIO_REGION_PCI_CAP, RT_ALIGN_32(cbRegion + PAGE_SIZE, PAGE_SIZE),
1929 PCI_ADDRESS_SPACE_MEM, virtioMmioWrite, virtioMmioRead, pVirtio,
1930 IOMMMIO_FLAGS_READ_PASSTHRU | IOMMMIO_FLAGS_WRITE_PASSTHRU,
1931 pVirtioCC->pcszMmioName,
1932 &pVirtio->hMmioPciCap);
1933 AssertLogRelRCReturn(rc, PDMDEV_SET_ERROR(pDevIns, rc, N_("virtio: cannot register PCI Capabilities address space")));
1934 /*
1935 * Statistics.
1936 */
1937 PDMDevHlpSTAMRegisterF(pDevIns, &pVirtio->StatDescChainsAllocated, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
1938 "Total number of allocated descriptor chains", "DescChainsAllocated");
1939 PDMDevHlpSTAMRegisterF(pDevIns, &pVirtio->StatDescChainsFreed, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
1940 "Total number of freed descriptor chains", "DescChainsFreed");
1941 PDMDevHlpSTAMRegisterF(pDevIns, &pVirtio->StatDescChainsSegsIn, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
1942 "Total number of inbound segments", "DescChainsSegsIn");
1943 PDMDevHlpSTAMRegisterF(pDevIns, &pVirtio->StatDescChainsSegsOut, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
1944 "Total number of outbound segments", "DescChainsSegsOut");
1945
1946 return VINF_SUCCESS;
1947}
1948
1949#else /* !IN_RING3 */
1950
1951/**
1952 * Sets up the core ring-0/raw-mode virtio bits.
1953 *
1954 * @returns VBox status code.
1955 * @param pDevIns The device instance.
1956 * @param pVirtio Pointer to the shared virtio state. This must be the first
1957 * member in the shared device instance data!
1958 */
1959int virtioCoreRZInit(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio)
1960{
1961 AssertLogRelReturn(pVirtio == PDMINS_2_DATA(pDevIns, PVIRTIOCORE), VERR_STATE_CHANGED);
1962
1963#ifdef FUTURE_OPTIMIZATION
1964 int rc = PDMDevHlpSetDeviceCritSect(pDevIns, PDMDevHlpCritSectGetNop(pDevIns));
1965 AssertRCReturn(rc, rc);
1966#endif
1967 int rc = PDMDevHlpMmioSetUpContext(pDevIns, pVirtio->hMmioPciCap, virtioMmioWrite, virtioMmioRead, pVirtio);
1968 AssertRCReturn(rc, rc);
1969 return rc;
1970}
1971
1972#endif /* !IN_RING3 */
1973
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette