VirtualBox

source: vbox/trunk/src/VBox/Devices/VirtIO/VirtioCore.cpp@ 95818

Last change on this file since 95818 was 95609, checked in by vboxsync, 2 years ago

Devices/DevVirtioNet_1_0: Fix legacy 32-bit guest compatibility. bugref:8651

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 114.6 KB
Line 
1/* $Id: VirtioCore.cpp 95609 2022-07-12 20:28:41Z vboxsync $ */
2
3/** @file
4 * VirtioCore - Virtio Core (PCI, feature & config mgt, queue mgt & proxy, notification mgt)
5 */
6
7/*
8 * Copyright (C) 2009-2022 Oracle Corporation
9 *
10 * This file is part of VirtualBox Open Source Edition (OSE), as
11 * available from http://www.virtualbox.org. This file is free software;
12 * you can redistribute it and/or modify it under the terms of the GNU
13 * General Public License (GPL) as published by the Free Software
14 * Foundation, in version 2 as it comes in the "COPYING" file of the
15 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
16 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
17 */
18
19
20/*********************************************************************************************************************************
21* Header Files *
22*********************************************************************************************************************************/
23#define LOG_GROUP LOG_GROUP_DEV_VIRTIO
24
25#include <iprt/assert.h>
26#include <iprt/uuid.h>
27#include <iprt/mem.h>
28#include <iprt/sg.h>
29#include <iprt/assert.h>
30#include <iprt/string.h>
31#include <iprt/param.h>
32#include <iprt/types.h>
33#include <VBox/log.h>
34#include <VBox/msi.h>
35#include <iprt/types.h>
36#include <VBox/AssertGuest.h>
37#include <VBox/vmm/pdmdev.h>
38#include "VirtioCore.h"
39
40
41/*********************************************************************************************************************************
42* Defined Constants And Macros *
43*********************************************************************************************************************************/
44
45#define INSTANCE(a_pVirtio) ((a_pVirtio)->szInstance)
46#define VIRTQNAME(a_pVirtio, a_uVirtq) ((a_pVirtio)->aVirtqueues[(a_uVirtq)].szName)
47
48#define IS_VIRTQ_EMPTY(pDevIns, pVirtio, pVirtq) \
49 (virtioCoreVirtqAvailCnt(pDevIns, pVirtio, pVirtq) == 0)
50
51#define IS_DRIVER_OK(a_pVirtio) ((a_pVirtio)->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK)
52#define WAS_DRIVER_OK(a_pVirtio) ((a_pVirtio)->fPrevDeviceStatus & VIRTIO_STATUS_DRIVER_OK)
53
54/**
55 * These defines are used to track guest virtio-net driver writing driver features accepted flags
56 * in two 32-bit operations (in arbitrary order), and one bit dedicated to ensured 'features complete'
57 * is handled once.
58 */
59#define DRIVER_FEATURES_0_WRITTEN 1 /**< fDriverFeatures[0] written by guest virtio-net */
60#define DRIVER_FEATURES_1_WRITTEN 2 /**< fDriverFeatures[1] written by guest virtio-net */
61#define DRIVER_FEATURES_0_AND_1_WRITTEN 3 /**< Both 32-bit parts of fDriverFeatures[] written */
62#define DRIVER_FEATURES_COMPLETE_HANDLED 4 /**< Features negotiation complete handler called */
63
64/**
65 * This macro returns true if the @a a_offAccess and access length (@a
66 * a_cbAccess) are within the range of the mapped capability struct described by
67 * @a a_LocCapData.
68 *
69 * @param[in] a_offAccess Input: The offset into the MMIO bar of the access.
70 * @param[in] a_cbAccess Input: The access size.
71 * @param[out] a_offsetIntoCap Output: uint32_t variable to return the intra-capability offset into.
72 * @param[in] a_LocCapData Input: The capability location info.
73 */
74#define MATCHES_VIRTIO_CAP_STRUCT(a_offAccess, a_cbAccess, a_offsetIntoCap, a_LocCapData) \
75 ( ((a_offsetIntoCap) = (uint32_t)((a_offAccess) - (a_LocCapData).offMmio)) < (uint32_t)(a_LocCapData).cbMmio \
76 && (a_offsetIntoCap) + (uint32_t)(a_cbAccess) <= (uint32_t)(a_LocCapData).cbMmio )
77
78
79/*********************************************************************************************************************************
80* Structures and Typedefs *
81*********************************************************************************************************************************/
82
83/** @name virtq related flags
84 * @{ */
85#define VIRTQ_DESC_F_NEXT 1 /**< Indicates this descriptor chains to next */
86#define VIRTQ_DESC_F_WRITE 2 /**< Marks buffer as write-only (default ro) */
87#define VIRTQ_DESC_F_INDIRECT 4 /**< Buffer is list of buffer descriptors */
88
89#define VIRTQ_USED_F_NO_NOTIFY 1 /**< Dev to Drv: Don't notify when buf added */
90#define VIRTQ_AVAIL_F_NO_INTERRUPT 1 /**< Drv to Dev: Don't notify when buf eaten */
91/** @} */
92
93/**
94 * virtq-related structs
95 * (struct names follow VirtIO 1.0 spec, field names use VBox styled naming, w/respective spec'd name in comments)
96 */
97typedef struct virtq_desc
98{
99 uint64_t GCPhysBuf; /**< addr GC Phys. address of buffer */
100 uint32_t cb; /**< len Buffer length */
101 uint16_t fFlags; /**< flags Buffer specific flags */
102 uint16_t uDescIdxNext; /**< next Idx set if VIRTIO_DESC_F_NEXT */
103} VIRTQ_DESC_T, *PVIRTQ_DESC_T;
104
105typedef struct virtq_avail
106{
107 uint16_t fFlags; /**< flags avail ring guest-to-host flags */
108 uint16_t uIdx; /**< idx Index of next free ring slot */
109 RT_FLEXIBLE_ARRAY_EXTENSION
110 uint16_t auRing[RT_FLEXIBLE_ARRAY]; /**< ring Ring: avail drv to dev bufs */
111 //uint16_t uUsedEventIdx; /**< used_event (if VIRTQ_USED_F_EVENT_IDX) */
112} VIRTQ_AVAIL_T, *PVIRTQ_AVAIL_T;
113
114typedef struct virtq_used_elem
115{
116 uint32_t uDescIdx; /**< idx Start of used desc chain */
117 uint32_t cbElem; /**< len Total len of used desc chain */
118} VIRTQ_USED_ELEM_T;
119
120typedef struct virt_used
121{
122 uint16_t fFlags; /**< flags used ring host-to-guest flags */
123 uint16_t uIdx; /**< idx Index of next ring slot */
124 RT_FLEXIBLE_ARRAY_EXTENSION
125 VIRTQ_USED_ELEM_T aRing[RT_FLEXIBLE_ARRAY]; /**< ring Ring: used dev to drv bufs */
126 //uint16_t uAvailEventIdx; /**< avail_event if (VIRTQ_USED_F_EVENT_IDX) */
127} VIRTQ_USED_T, *PVIRTQ_USED_T;
128
129const char *virtioCoreGetStateChangeText(VIRTIOVMSTATECHANGED enmState)
130{
131 switch (enmState)
132 {
133 case kvirtIoVmStateChangedReset: return "VM RESET";
134 case kvirtIoVmStateChangedSuspend: return "VM SUSPEND";
135 case kvirtIoVmStateChangedPowerOff: return "VM POWER OFF";
136 case kvirtIoVmStateChangedResume: return "VM RESUME";
137 default: return "<BAD ENUM>";
138 }
139}
140
141/* Internal Functions */
142
143static void virtioCoreNotifyGuestDriver(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtq);
144static int virtioNudgeGuest(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint8_t uCause, uint16_t uVec);
145
146#ifdef IN_RING3
147# ifdef LOG_ENABLED
148DECLINLINE(uint16_t) virtioCoreR3CountPendingBufs(uint16_t uRingIdx, uint16_t uShadowIdx, uint16_t uQueueSize)
149{
150 if (uShadowIdx == uRingIdx)
151 return 0;
152 else
153 if (uShadowIdx > uRingIdx)
154 return uShadowIdx - uRingIdx;
155 return uQueueSize - (uRingIdx - uShadowIdx);
156}
157# endif
158#endif
159/** @name Internal queue operations
160 * @{ */
161
162/**
163 * Accessor for virtq descriptor
164 */
165#ifdef IN_RING3
166DECLINLINE(void) virtioReadDesc(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTQUEUE pVirtq,
167 uint32_t idxDesc, PVIRTQ_DESC_T pDesc)
168{
169 AssertMsg(IS_DRIVER_OK(pVirtio), ("Called with guest driver not ready\n"));
170 uint16_t const cVirtqItems = RT_MAX(pVirtq->uQueueSize, 1); /* Make sure to avoid div-by-zero. */
171
172 virtioCoreGCPhysRead(pVirtio, pDevIns,
173 pVirtq->GCPhysVirtqDesc + sizeof(VIRTQ_DESC_T) * (idxDesc % cVirtqItems),
174 pDesc, sizeof(VIRTQ_DESC_T));
175}
176#endif
177
178/**
179 * Accessors for virtq avail ring
180 */
181#ifdef IN_RING3
182DECLINLINE(uint16_t) virtioReadAvailDescIdx(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTQUEUE pVirtq, uint32_t availIdx)
183{
184 uint16_t uDescIdx;
185
186 AssertMsg(pVirtio->fLegacyDriver || IS_DRIVER_OK(pVirtio), ("Called with guest driver not ready\n"));
187 uint16_t const cVirtqItems = RT_MAX(pVirtq->uQueueSize, 1); /* Make sure to avoid div-by-zero. */
188 virtioCoreGCPhysRead(pVirtio, pDevIns,
189 pVirtq->GCPhysVirtqAvail + RT_UOFFSETOF_DYN(VIRTQ_AVAIL_T, auRing[availIdx % cVirtqItems]),
190 &uDescIdx, sizeof(uDescIdx));
191 return uDescIdx;
192}
193
194DECLINLINE(uint16_t) virtioReadAvailUsedEvent(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTQUEUE pVirtq)
195{
196 uint16_t uUsedEventIdx;
197 /* VirtIO 1.0 uUsedEventIdx (used_event) immediately follows ring */
198 AssertMsg(pVirtio->fLegacyDriver || IS_DRIVER_OK(pVirtio), ("Called with guest driver not ready\n"));
199 virtioCoreGCPhysRead(pVirtio, pDevIns,
200 pVirtq->GCPhysVirtqAvail + RT_UOFFSETOF_DYN(VIRTQ_AVAIL_T, auRing[pVirtq->uQueueSize]),
201 &uUsedEventIdx, sizeof(uUsedEventIdx));
202 return uUsedEventIdx;
203}
204#endif
205
206DECLINLINE(uint16_t) virtioReadAvailRingIdx(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTQUEUE pVirtq)
207{
208 uint16_t uIdx = 0;
209 AssertMsg(pVirtio->fLegacyDriver || IS_DRIVER_OK(pVirtio), ("Called with guest driver not ready\n"));
210 virtioCoreGCPhysRead(pVirtio, pDevIns,
211 pVirtq->GCPhysVirtqAvail + RT_UOFFSETOF(VIRTQ_AVAIL_T, uIdx),
212 &uIdx, sizeof(uIdx));
213 return uIdx;
214}
215
216DECLINLINE(uint16_t) virtioReadAvailRingFlags(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTQUEUE pVirtq)
217{
218 uint16_t fFlags = 0;
219 AssertMsg(pVirtio->fLegacyDriver || IS_DRIVER_OK(pVirtio), ("Called with guest driver not ready\n"));
220 virtioCoreGCPhysRead(pVirtio, pDevIns,
221 pVirtq->GCPhysVirtqAvail + RT_UOFFSETOF(VIRTQ_AVAIL_T, fFlags),
222 &fFlags, sizeof(fFlags));
223 return fFlags;
224}
225
226/** @} */
227
228/** @name Accessors for virtq used ring
229 * @{
230 */
231
232#ifdef IN_RING3
233DECLINLINE(void) virtioWriteUsedElem(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTQUEUE pVirtq,
234 uint32_t usedIdx, uint32_t uDescIdx, uint32_t uLen)
235{
236 VIRTQ_USED_ELEM_T elem = { uDescIdx, uLen };
237 AssertMsg(pVirtio->fLegacyDriver || IS_DRIVER_OK(pVirtio), ("Called with guest driver not ready\n"));
238 uint16_t const cVirtqItems = RT_MAX(pVirtq->uQueueSize, 1); /* Make sure to avoid div-by-zero. */
239 virtioCoreGCPhysWrite(pVirtio, pDevIns,
240 pVirtq->GCPhysVirtqUsed
241 + RT_UOFFSETOF_DYN(VIRTQ_USED_T, aRing[usedIdx % cVirtqItems]),
242 &elem, sizeof(elem));
243}
244
245DECLINLINE(void) virtioWriteUsedRingFlags(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTQUEUE pVirtq, uint16_t fFlags)
246{
247 AssertMsg(pVirtio->fLegacyDriver || IS_DRIVER_OK(pVirtio), ("Called with guest driver not ready\n"));
248 RT_UNTRUSTED_VALIDATED_FENCE(); /* VirtIO 1.0, Section 3.2.1.4.1 */
249 virtioCoreGCPhysWrite(pVirtio, pDevIns,
250 pVirtq->GCPhysVirtqUsed + RT_UOFFSETOF(VIRTQ_USED_T, fFlags),
251 &fFlags, sizeof(fFlags));
252}
253#endif
254
255DECLINLINE(void) virtioWriteUsedRingIdx(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTQUEUE pVirtq, uint16_t uIdx)
256{
257 AssertMsg(pVirtio->fLegacyDriver || IS_DRIVER_OK(pVirtio), ("Called with guest driver not ready\n"));
258 RT_UNTRUSTED_VALIDATED_FENCE(); /* VirtIO 1.0, Section 3.2.1.4.1 */
259 virtioCoreGCPhysWrite(pVirtio, pDevIns,
260 pVirtq->GCPhysVirtqUsed + RT_UOFFSETOF(VIRTQ_USED_T, uIdx),
261 &uIdx, sizeof(uIdx));
262}
263
264#ifdef IN_RING3
265DECLINLINE(uint16_t) virtioReadUsedRingIdx(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTQUEUE pVirtq)
266{
267 uint16_t uIdx = 0;
268 AssertMsg(pVirtio->fLegacyDriver || IS_DRIVER_OK(pVirtio), ("Called with guest driver not ready\n"));
269 virtioCoreGCPhysRead(pVirtio, pDevIns,
270 pVirtq->GCPhysVirtqUsed + RT_UOFFSETOF(VIRTQ_USED_T, uIdx),
271 &uIdx, sizeof(uIdx));
272 return uIdx;
273}
274
275DECLINLINE(uint16_t) virtioReadUsedRingFlags(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTQUEUE pVirtq)
276{
277 uint16_t fFlags = 0;
278 AssertMsg(pVirtio->fLegacyDriver || IS_DRIVER_OK(pVirtio), ("Called with guest driver not ready\n"));
279 virtioCoreGCPhysRead(pVirtio, pDevIns,
280 pVirtq->GCPhysVirtqUsed + RT_UOFFSETOF(VIRTQ_USED_T, fFlags),
281 &fFlags, sizeof(fFlags));
282 return fFlags;
283}
284
285DECLINLINE(void) virtioWriteUsedAvailEvent(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTQUEUE pVirtq, uint32_t uAvailEventIdx)
286{
287 /** VirtIO 1.0 uAvailEventIdx (avail_event) immediately follows ring */
288 AssertMsg(pVirtio->fLegacyDriver || IS_DRIVER_OK(pVirtio), ("Called with guest driver not ready\n"));
289 virtioCoreGCPhysWrite(pVirtio, pDevIns,
290 pVirtq->GCPhysVirtqUsed
291 + RT_UOFFSETOF_DYN(VIRTQ_USED_T, aRing[pVirtq->uQueueSize]),
292 &uAvailEventIdx, sizeof(uAvailEventIdx));
293}
294#endif
295/** @} */
296
297
298DECLINLINE(uint16_t) virtioCoreVirtqAvailCnt(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTQUEUE pVirtq)
299{
300 uint16_t uIdxActual = virtioReadAvailRingIdx(pDevIns, pVirtio, pVirtq);
301 uint16_t uIdxShadow = pVirtq->uAvailIdxShadow;
302 uint16_t uIdxDelta;
303
304 if (uIdxActual < uIdxShadow)
305 uIdxDelta = (uIdxActual + pVirtq->uQueueSize) - uIdxShadow;
306 else
307 uIdxDelta = uIdxActual - uIdxShadow;
308
309 return uIdxDelta;
310}
311/**
312 * Get count of new (e.g. pending) elements in available ring.
313 *
314 * @param pDevIns The device instance.
315 * @param pVirtio Pointer to the shared virtio state.
316 * @param uVirtq Virtq number
317 *
318 * @returns how many entries have been added to ring as a delta of the consumer's
319 * avail index and the queue's guest-side current avail index.
320 */
321uint16_t virtioCoreVirtqAvailBufCount(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtq)
322{
323 AssertMsgReturn(uVirtq < RT_ELEMENTS(pVirtio->aVirtqueues), ("uVirtq out of range"), 0);
324 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
325
326 if (!IS_DRIVER_OK(pVirtio))
327 {
328 LogRelFunc(("Driver not ready\n"));
329 return 0;
330 }
331 if (!pVirtio->fLegacyDriver && !pVirtq->uEnable)
332 {
333 LogRelFunc(("virtq: %s not enabled\n", VIRTQNAME(pVirtio, uVirtq)));
334 return 0;
335 }
336 return virtioCoreVirtqAvailCnt(pDevIns, pVirtio, pVirtq);
337}
338
339#ifdef IN_RING3
340
341void virtioCoreR3FeatureDump(VIRTIOCORE *pVirtio, PCDBGFINFOHLP pHlp, const VIRTIO_FEATURES_LIST *s_aFeatures, int cFeatures, int fBanner)
342{
343#define MAXLINE 80
344 /* Display as a single buf to prevent interceding log messages */
345 uint16_t cbBuf = cFeatures * 132;
346 char *pszBuf = (char *)RTMemAllocZ(cbBuf);
347 Assert(pszBuf);
348 char *cp = pszBuf;
349 for (int i = 0; i < cFeatures; ++i)
350 {
351 bool isOffered = RT_BOOL(pVirtio->uDeviceFeatures & s_aFeatures[i].fFeatureBit);
352 bool isNegotiated = RT_BOOL(pVirtio->uDriverFeatures & s_aFeatures[i].fFeatureBit);
353 cp += RTStrPrintf(cp, cbBuf - (cp - pszBuf), " %s %s %s",
354 isOffered ? "+" : "-", isNegotiated ? "x" : " ", s_aFeatures[i].pcszDesc);
355 }
356 if (pHlp) {
357 if (fBanner)
358 pHlp->pfnPrintf(pHlp, "VirtIO Features Configuration\n\n"
359 " Offered Accepted Feature Description\n"
360 " ------- -------- ------- -----------\n");
361 pHlp->pfnPrintf(pHlp, "%s\n", pszBuf);
362 }
363#ifdef LOG_ENABLED
364 else
365 {
366 if (fBanner)
367 Log(("VirtIO Features Configuration\n\n"
368 " Offered Accepted Feature Description\n"
369 " ------- -------- ------- -----------\n"));
370 Log(("%s\n", pszBuf));
371 }
372#endif
373 RTMemFree(pszBuf);
374}
375
376/** API Function: See header file*/
377void virtioCorePrintDeviceFeatures(VIRTIOCORE *pVirtio, PCDBGFINFOHLP pHlp,
378 const VIRTIO_FEATURES_LIST *s_aDevSpecificFeatures, int cFeatures) {
379 virtioCoreR3FeatureDump(pVirtio, pHlp, s_aCoreFeatures, RT_ELEMENTS(s_aCoreFeatures), 1 /*fBanner */);
380 virtioCoreR3FeatureDump(pVirtio, pHlp, s_aDevSpecificFeatures, cFeatures, 0 /*fBanner */);
381}
382
383#endif
384
385#ifdef LOG_ENABLED
386
387/** API Function: See header file */
388void virtioCoreHexDump(uint8_t *pv, uint32_t cb, uint32_t uBase, const char *pszTitle)
389{
390#define ADJCURSOR(cb) pszOut += cb; cbRemain -= cb;
391 size_t cbPrint = 0, cbRemain = ((cb / 16) + 1) * 80;
392 char *pszBuf = (char *)RTMemAllocZ(cbRemain), *pszOut = pszBuf;
393 AssertMsgReturnVoid(pszBuf, ("Out of Memory"));
394 if (pszTitle)
395 {
396 cbPrint = RTStrPrintf(pszOut, cbRemain, "%s [%d bytes]:\n", pszTitle, cb);
397 ADJCURSOR(cbPrint);
398 }
399 for (uint32_t row = 0; row < RT_MAX(1, (cb / 16) + 1) && row * 16 < cb; row++)
400 {
401 cbPrint = RTStrPrintf(pszOut, cbRemain, "%04x: ", row * 16 + uBase); /* line address */
402 ADJCURSOR(cbPrint);
403 for (uint8_t col = 0; col < 16; col++)
404 {
405 uint32_t idx = row * 16 + col;
406 if (idx >= cb)
407 cbPrint = RTStrPrintf(pszOut, cbRemain, "-- %s", (col + 1) % 8 ? "" : " ");
408 else
409 cbPrint = RTStrPrintf(pszOut, cbRemain, "%02x %s", pv[idx], (col + 1) % 8 ? "" : " ");
410 ADJCURSOR(cbPrint);
411 }
412 for (uint32_t idx = row * 16; idx < row * 16 + 16; idx++)
413 {
414 cbPrint = RTStrPrintf(pszOut, cbRemain, "%c", (idx >= cb) ? ' ' : (pv[idx] >= 0x20 && pv[idx] <= 0x7e ? pv[idx] : '.'));
415 ADJCURSOR(cbPrint);
416 }
417 *pszOut++ = '\n';
418 --cbRemain;
419 }
420 Log(("%s\n", pszBuf));
421 RTMemFree(pszBuf);
422 RT_NOREF2(uBase, pv);
423#undef ADJCURSOR
424}
425
426/* API FUnction: See header file */
427void virtioCoreGCPhysHexDump(PPDMDEVINS pDevIns, RTGCPHYS GCPhys, uint16_t cb, uint32_t uBase, const char *pszTitle)
428{
429 PVIRTIOCORE pVirtio = PDMDEVINS_2_DATA(pDevIns, PVIRTIOCORE);
430#define ADJCURSOR(cb) pszOut += cb; cbRemain -= cb;
431 size_t cbPrint = 0, cbRemain = ((cb / 16) + 1) * 80;
432 char *pszBuf = (char *)RTMemAllocZ(cbRemain), *pszOut = pszBuf;
433 AssertMsgReturnVoid(pszBuf, ("Out of Memory"));
434 if (pszTitle)
435 {
436 cbPrint = RTStrPrintf(pszOut, cbRemain, "%s [%d bytes]:\n", pszTitle, cb);
437 ADJCURSOR(cbPrint);
438 }
439 for (uint16_t row = 0; row < (uint16_t)RT_MAX(1, (cb / 16) + 1) && row * 16 < cb; row++)
440 {
441 uint8_t c;
442 cbPrint = RTStrPrintf(pszOut, cbRemain, "%04x: ", row * 16 + uBase); /* line address */
443 ADJCURSOR(cbPrint);
444 for (uint8_t col = 0; col < 16; col++)
445 {
446 uint32_t idx = row * 16 + col;
447 virtioCoreGCPhysRead(pVirtio, pDevIns, GCPhys + idx, &c, 1);
448 if (idx >= cb)
449 cbPrint = RTStrPrintf(pszOut, cbRemain, "-- %s", (col + 1) % 8 ? "" : " ");
450 else
451 cbPrint = RTStrPrintf(pszOut, cbRemain, "%02x %s", c, (col + 1) % 8 ? "" : " ");
452 ADJCURSOR(cbPrint);
453 }
454 for (uint16_t idx = row * 16; idx < row * 16 + 16; idx++)
455 {
456 virtioCoreGCPhysRead(pVirtio, pDevIns, GCPhys + idx, &c, 1);
457 cbPrint = RTStrPrintf(pszOut, cbRemain, "%c", (idx >= cb) ? ' ' : (c >= 0x20 && c <= 0x7e ? c : '.'));
458 ADJCURSOR(cbPrint);
459 }
460 *pszOut++ = '\n';
461 --cbRemain;
462 }
463 Log(("%s\n", pszBuf));
464 RTMemFree(pszBuf);
465 RT_NOREF(uBase);
466#undef ADJCURSOR
467}
468
469
470/** API function: See header file */
471void virtioCoreLogMappedIoValue(const char *pszFunc, const char *pszMember, uint32_t uMemberSize,
472 const void *pv, uint32_t cb, uint32_t uOffset, int fWrite,
473 int fHasIndex, uint32_t idx)
474{
475 if (LogIs6Enabled())
476 {
477 char szIdx[16];
478 if (fHasIndex)
479 RTStrPrintf(szIdx, sizeof(szIdx), "[%d]", idx);
480 else
481 szIdx[0] = '\0';
482
483 if (cb == 1 || cb == 2 || cb == 4 || cb == 8)
484 {
485 char szDepiction[64];
486 size_t cchDepiction;
487 if (uOffset != 0 || cb != uMemberSize) /* display bounds if partial member access */
488 cchDepiction = RTStrPrintf(szDepiction, sizeof(szDepiction), "%s%s[%d:%d]",
489 pszMember, szIdx, uOffset, uOffset + cb - 1);
490 else
491 cchDepiction = RTStrPrintf(szDepiction, sizeof(szDepiction), "%s%s", pszMember, szIdx);
492
493 /* padding */
494 if (cchDepiction < 30)
495 szDepiction[cchDepiction++] = ' ';
496 while (cchDepiction < 30)
497 szDepiction[cchDepiction++] = '.';
498 szDepiction[cchDepiction] = '\0';
499
500 RTUINT64U uValue;
501 uValue.u = 0;
502 memcpy(uValue.au8, pv, cb);
503 Log6(("%-23s: Guest %s %s %#0*RX64\n",
504 pszFunc, fWrite ? "wrote" : "read ", szDepiction, 2 + cb * 2, uValue.u));
505 }
506 else /* odd number or oversized access, ... log inline hex-dump style */
507 {
508 Log6(("%-23s: Guest %s %s%s[%d:%d]: %.*Rhxs\n",
509 pszFunc, fWrite ? "wrote" : "read ", pszMember,
510 szIdx, uOffset, uOffset + cb, cb, pv));
511 }
512 }
513 RT_NOREF2(fWrite, pszFunc);
514}
515
516/**
517 * Log MMIO-mapped Virtio fDeviceStatus register bitmask, naming the bits
518 */
519DECLINLINE(void) virtioCoreFormatDeviceStatus(uint8_t bStatus, char *pszBuf, size_t uSize)
520{
521# define ADJCURSOR(len) { cp += len; uSize -= len; sep = (char *)" | "; }
522 memset(pszBuf, 0, uSize);
523 char *cp = pszBuf, *sep = (char *)"";
524 size_t len;
525 if (bStatus == 0)
526 RTStrPrintf(cp, uSize, "RESET");
527 else
528 {
529 if (bStatus & VIRTIO_STATUS_ACKNOWLEDGE)
530 {
531 len = RTStrPrintf(cp, uSize, "ACKNOWLEDGE");
532 ADJCURSOR(len);
533 }
534 if (bStatus & VIRTIO_STATUS_DRIVER)
535 {
536 len = RTStrPrintf(cp, uSize, "%sDRIVER", sep);
537 ADJCURSOR(len);
538 }
539 if (bStatus & VIRTIO_STATUS_FEATURES_OK)
540 {
541 len = RTStrPrintf(cp, uSize, "%sFEATURES_OK", sep);
542 ADJCURSOR(len);
543 }
544 if (bStatus & VIRTIO_STATUS_DRIVER_OK)
545 {
546 len = RTStrPrintf(cp, uSize, "%sDRIVER_OK", sep);
547 ADJCURSOR(len);
548 }
549 if (bStatus & VIRTIO_STATUS_FAILED)
550 {
551 len = RTStrPrintf(cp, uSize, "%sFAILED", sep);
552 ADJCURSOR(len);
553 }
554 if (bStatus & VIRTIO_STATUS_DEVICE_NEEDS_RESET)
555 RTStrPrintf(cp, uSize, "%sNEEDS_RESET", sep);
556 }
557# undef ADJCURSOR
558}
559
560#endif /* LOG_ENABLED */
561
562/** API function: See header file */
563int virtioCoreIsLegacyMode(PVIRTIOCORE pVirtio)
564{
565 return pVirtio->fLegacyDriver;
566}
567
568#ifdef IN_RING3
569
570int virtioCoreR3VirtqAttach(PVIRTIOCORE pVirtio, uint16_t uVirtq, const char *pcszName)
571{
572 LogFunc(("Attaching %s to VirtIO core\n", pcszName));
573 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
574 pVirtq->uVirtq = uVirtq;
575 pVirtq->uAvailIdxShadow = 0;
576 pVirtq->uUsedIdxShadow = 0;
577 pVirtq->fUsedRingEvent = false;
578 pVirtq->fAttached = true;
579 RTStrCopy(pVirtq->szName, sizeof(pVirtq->szName), pcszName);
580 return VINF_SUCCESS;
581}
582
583int virtioCoreR3VirtqDetach(PVIRTIOCORE pVirtio, uint16_t uVirtqNbr)
584{
585 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtqNbr];
586 pVirtq->uVirtq = 0;
587 pVirtq->uAvailIdxShadow = 0;
588 pVirtq->uUsedIdxShadow = 0;
589 pVirtq->fUsedRingEvent = false;
590 pVirtq->fAttached = false;
591 memset(pVirtq->szName, 0, sizeof(pVirtq->szName));
592 return VINF_SUCCESS;
593}
594
595bool virtioCoreR3VirtqIsAttached(PVIRTIOCORE pVirtio, uint16_t uVirtqNbr)
596{
597 return pVirtio->aVirtqueues[uVirtqNbr].fAttached;
598}
599
600bool virtioCoreR3VirtqIsEnabled(PVIRTIOCORE pVirtio, uint16_t uVirtqNbr)
601{
602 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtqNbr];
603 return (bool)pVirtq->uEnable && pVirtq->GCPhysVirtqDesc;
604}
605
606/** API Fuunction: See header file */
607void virtioCoreR3VirtqInfo(PPDMDEVINS pDevIns, PCDBGFINFOHLP pHlp, const char *pszArgs, int uVirtq)
608{
609 RT_NOREF(pszArgs);
610 PVIRTIOCORE pVirtio = PDMDEVINS_2_DATA(pDevIns, PVIRTIOCORE);
611 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
612
613 /** @todo add ability to dump physical contents described by any descriptor (using existing VirtIO core API function) */
614// bool fDump = pszArgs && (*pszArgs == 'd' || *pszArgs == 'D'); /* "dump" (avail phys descriptor)"
615
616 uint16_t uAvailIdx = virtioReadAvailRingIdx(pDevIns, pVirtio, pVirtq);
617 uint16_t uAvailIdxShadow = pVirtq->uAvailIdxShadow;
618
619 uint16_t uUsedIdx = virtioReadUsedRingIdx(pDevIns, pVirtio, pVirtq);
620 uint16_t uUsedIdxShadow = pVirtq->uUsedIdxShadow;
621
622#ifdef VIRTIO_VBUF_ON_STACK
623 VIRTQBUF_T VirtqBuf;
624 PVIRTQBUF pVirtqBuf = &VirtqBuf;
625#else /* !VIRTIO_VBUF_ON_STACK */
626 PVIRTQBUF pVirtqBuf = NULL;
627#endif /* !VIRTIO_VBUF_ON_STACK */
628
629 bool fEmpty = IS_VIRTQ_EMPTY(pDevIns, pVirtio, pVirtq);
630
631 LogFunc(("%s, empty = %s\n", pVirtq->szName, fEmpty ? "true" : "false"));
632
633 int cSendSegs = 0, cReturnSegs = 0;
634 if (!fEmpty)
635 {
636#ifdef VIRTIO_VBUF_ON_STACK
637 virtioCoreR3VirtqAvailBufPeek(pDevIns, pVirtio, uVirtq, pVirtqBuf);
638#else /* !VIRTIO_VBUF_ON_STACK */
639 virtioCoreR3VirtqAvailBufPeek(pDevIns, pVirtio, uVirtq, &pVirtqBuf);
640#endif /* !VIRTIO_VBUF_ON_STACK */
641 cSendSegs = pVirtqBuf->pSgPhysSend ? pVirtqBuf->pSgPhysSend->cSegs : 0;
642 cReturnSegs = pVirtqBuf->pSgPhysReturn ? pVirtqBuf->pSgPhysReturn->cSegs : 0;
643 }
644
645 bool fAvailNoInterrupt = virtioReadAvailRingFlags(pDevIns, pVirtio, pVirtq) & VIRTQ_AVAIL_F_NO_INTERRUPT;
646 bool fUsedNoNotify = virtioReadUsedRingFlags(pDevIns, pVirtio, pVirtq) & VIRTQ_USED_F_NO_NOTIFY;
647
648 pHlp->pfnPrintf(pHlp, " queue enabled: ........... %s\n", pVirtq->uEnable ? "true" : "false");
649 pHlp->pfnPrintf(pHlp, " size: .................... %d\n", pVirtq->uQueueSize);
650 pHlp->pfnPrintf(pHlp, " notify offset: ........... %d\n", pVirtq->uNotifyOffset);
651 if (pVirtio->fMsiSupport)
652 pHlp->pfnPrintf(pHlp, " MSIX vector: ....... %4.4x\n", pVirtq->uMsixVector);
653 pHlp->pfnPrintf(pHlp, "\n");
654 pHlp->pfnPrintf(pHlp, " avail ring (%d entries):\n", uAvailIdx - uAvailIdxShadow);
655 pHlp->pfnPrintf(pHlp, " index: ................ %d\n", uAvailIdx);
656 pHlp->pfnPrintf(pHlp, " shadow: ............... %d\n", uAvailIdxShadow);
657 pHlp->pfnPrintf(pHlp, " flags: ................ %s\n", fAvailNoInterrupt ? "NO_INTERRUPT" : "");
658 pHlp->pfnPrintf(pHlp, "\n");
659 pHlp->pfnPrintf(pHlp, " used ring (%d entries):\n", uUsedIdx - uUsedIdxShadow);
660 pHlp->pfnPrintf(pHlp, " index: ................ %d\n", uUsedIdx);
661 pHlp->pfnPrintf(pHlp, " shadow: ............... %d\n", uUsedIdxShadow);
662 pHlp->pfnPrintf(pHlp, " flags: ................ %s\n", fUsedNoNotify ? "NO_NOTIFY" : "");
663 pHlp->pfnPrintf(pHlp, "\n");
664 if (!fEmpty)
665 {
666 pHlp->pfnPrintf(pHlp, " desc chain:\n");
667 pHlp->pfnPrintf(pHlp, " head idx: ............. %d\n", uUsedIdx);
668 pHlp->pfnPrintf(pHlp, " segs: ................. %d\n", cSendSegs + cReturnSegs);
669 pHlp->pfnPrintf(pHlp, " refCnt ................ %d\n", pVirtqBuf->cRefs);
670 pHlp->pfnPrintf(pHlp, "\n");
671 pHlp->pfnPrintf(pHlp, " host-to-guest (%d bytes):\n", pVirtqBuf->cbPhysSend);
672 pHlp->pfnPrintf(pHlp, " segs: .............. %d\n", cSendSegs);
673 if (cSendSegs)
674 {
675 pHlp->pfnPrintf(pHlp, " index: ............. %d\n", pVirtqBuf->pSgPhysSend->idxSeg);
676 pHlp->pfnPrintf(pHlp, " unsent ............. %d\n", pVirtqBuf->pSgPhysSend->cbSegLeft);
677 }
678 pHlp->pfnPrintf(pHlp, "\n");
679 pHlp->pfnPrintf(pHlp, " guest-to-host (%d bytes)\n", pVirtqBuf->cbPhysReturn);
680 pHlp->pfnPrintf(pHlp, " segs: .............. %d\n", cReturnSegs);
681 if (cReturnSegs)
682 {
683 pHlp->pfnPrintf(pHlp, " index: ............. %d\n", pVirtqBuf->pSgPhysReturn->idxSeg);
684 pHlp->pfnPrintf(pHlp, " unsent ............. %d\n", pVirtqBuf->pSgPhysReturn->cbSegLeft);
685 }
686 } else
687 pHlp->pfnPrintf(pHlp, " No desc chains available\n");
688 pHlp->pfnPrintf(pHlp, "\n");
689}
690
691#ifdef VIRTIO_VBUF_ON_STACK
692/** API Function: See header file */
693PVIRTQBUF virtioCoreR3VirtqBufAlloc(void)
694{
695 PVIRTQBUF pVirtqBuf = (PVIRTQBUF)RTMemAllocZ(sizeof(VIRTQBUF_T));
696 AssertReturn(pVirtqBuf, NULL);
697 pVirtqBuf->u32Magic = VIRTQBUF_MAGIC;
698 pVirtqBuf->cRefs = 1;
699 return pVirtqBuf;
700}
701#endif /* VIRTIO_VBUF_ON_STACK */
702
703/** API Function: See header file */
704uint32_t virtioCoreR3VirtqBufRetain(PVIRTQBUF pVirtqBuf)
705{
706 AssertReturn(pVirtqBuf, UINT32_MAX);
707 AssertReturn(pVirtqBuf->u32Magic == VIRTQBUF_MAGIC, UINT32_MAX);
708 uint32_t cRefs = ASMAtomicIncU32(&pVirtqBuf->cRefs);
709 Assert(cRefs > 1);
710 Assert(cRefs < 16);
711 return cRefs;
712}
713
714/** API Function: See header file */
715uint32_t virtioCoreR3VirtqBufRelease(PVIRTIOCORE pVirtio, PVIRTQBUF pVirtqBuf)
716{
717 if (!pVirtqBuf)
718 return 0;
719 AssertReturn(pVirtqBuf, 0);
720 AssertReturn(pVirtqBuf->u32Magic == VIRTQBUF_MAGIC, 0);
721 uint32_t cRefs = ASMAtomicDecU32(&pVirtqBuf->cRefs);
722 Assert(cRefs < 16);
723 if (cRefs == 0)
724 {
725 pVirtqBuf->u32Magic = ~VIRTQBUF_MAGIC;
726 RTMemFree(pVirtqBuf);
727#ifdef VBOX_WITH_STATISTICS
728 STAM_REL_COUNTER_INC(&pVirtio->StatDescChainsFreed);
729#endif
730 }
731 RT_NOREF(pVirtio);
732 return cRefs;
733}
734
735/** API Function: See header file */
736void virtioCoreNotifyConfigChanged(PVIRTIOCORE pVirtio)
737{
738 virtioNudgeGuest(pVirtio->pDevInsR3, pVirtio, VIRTIO_ISR_DEVICE_CONFIG, pVirtio->uMsixConfig);
739}
740
741
742/** API Function: See header file */
743void virtioCoreVirtqEnableNotify(PVIRTIOCORE pVirtio, uint16_t uVirtq, bool fEnable)
744{
745 Assert(uVirtq < RT_ELEMENTS(pVirtio->aVirtqueues));
746 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
747
748 if (IS_DRIVER_OK(pVirtio))
749 {
750 uint16_t fFlags = virtioReadUsedRingFlags(pVirtio->pDevInsR3, pVirtio, pVirtq);
751
752 if (fEnable)
753 fFlags &= ~VIRTQ_USED_F_NO_NOTIFY;
754 else
755 fFlags |= VIRTQ_USED_F_NO_NOTIFY;
756
757 virtioWriteUsedRingFlags(pVirtio->pDevInsR3, pVirtio, pVirtq, fFlags);
758 }
759}
760
761/** API function: See Header file */
762void virtioCoreResetAll(PVIRTIOCORE pVirtio)
763{
764 LogFunc(("\n"));
765 pVirtio->fDeviceStatus |= VIRTIO_STATUS_DEVICE_NEEDS_RESET;
766 if (IS_DRIVER_OK(pVirtio))
767 {
768 if (!pVirtio->fLegacyDriver)
769 pVirtio->fGenUpdatePending = true;
770 virtioNudgeGuest(pVirtio->pDevInsR3, pVirtio, VIRTIO_ISR_DEVICE_CONFIG, pVirtio->uMsixConfig);
771 }
772}
773
774/** API function: See Header file */
775#ifdef VIRTIO_VBUF_ON_STACK
776int virtioCoreR3VirtqAvailBufPeek(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtq, PVIRTQBUF pVirtqBuf)
777{
778 return virtioCoreR3VirtqAvailBufGet(pDevIns, pVirtio, uVirtq, pVirtqBuf, false);
779}
780#else /* !VIRTIO_VBUF_ON_STACK */
781int virtioCoreR3VirtqAvailBufPeek(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtq,
782 PPVIRTQBUF ppVirtqBuf)
783{
784 return virtioCoreR3VirtqAvailBufGet(pDevIns, pVirtio, uVirtq, ppVirtqBuf, false);
785}
786#endif /* !VIRTIO_VBUF_ON_STACK */
787
788/** API function: See Header file */
789int virtioCoreR3VirtqAvailBufNext(PVIRTIOCORE pVirtio, uint16_t uVirtq)
790{
791 Assert(uVirtq < RT_ELEMENTS(pVirtio->aVirtqueues));
792 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
793
794 if (!pVirtio->fLegacyDriver)
795 AssertMsgReturn((pVirtio->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK) && pVirtq->uEnable,
796 ("Guest driver not in ready state.\n"), VERR_INVALID_STATE);
797
798 if (IS_VIRTQ_EMPTY(pVirtio->pDevInsR3, pVirtio, pVirtq))
799 return VERR_NOT_AVAILABLE;
800
801 Log6Func(("%s avail shadow idx: %u\n", pVirtq->szName, pVirtq->uAvailIdxShadow));
802 pVirtq->uAvailIdxShadow++;
803
804 return VINF_SUCCESS;
805}
806
807/** API Function: See header file */
808#ifdef VIRTIO_VBUF_ON_STACK
809int virtioCoreR3VirtqAvailBufGet(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtq,
810 uint16_t uHeadIdx, PVIRTQBUF pVirtqBuf)
811#else /* !VIRTIO_VBUF_ON_STACK */
812int virtioCoreR3VirtqAvailBufGet(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtq,
813 uint16_t uHeadIdx, PPVIRTQBUF ppVirtqBuf)
814#endif /* !VIRTIO_VBUF_ON_STACK */
815{
816#ifndef VIRTIO_VBUF_ON_STACK
817 AssertReturn(ppVirtqBuf, VERR_INVALID_POINTER);
818 *ppVirtqBuf = NULL;
819#endif /* !VIRTIO_VBUF_ON_STACK */
820
821 AssertMsgReturn(uVirtq < RT_ELEMENTS(pVirtio->aVirtqueues),
822 ("uVirtq out of range"), VERR_INVALID_PARAMETER);
823
824 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
825
826 if (!pVirtio->fLegacyDriver)
827 AssertMsgReturn((pVirtio->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK) && pVirtq->uEnable,
828 ("Guest driver not in ready state.\n"), VERR_INVALID_STATE);
829
830 uint16_t uDescIdx = uHeadIdx;
831
832 Log6Func(("%s DESC CHAIN: (head idx = %u)\n", pVirtio->aVirtqueues[uVirtq].szName, uHeadIdx));
833
834 /*
835 * Allocate and initialize the descriptor chain structure.
836 */
837#ifndef VIRTIO_VBUF_ON_STACK
838 PVIRTQBUF pVirtqBuf = (PVIRTQBUF)RTMemAllocZ(sizeof(VIRTQBUF_T));
839 AssertReturn(pVirtqBuf, VERR_NO_MEMORY);
840 pVirtqBuf->u32Magic = VIRTQBUF_MAGIC;
841 pVirtqBuf->cRefs = 1;
842#endif /* !VIRTIO_VBUF_ON_STACK */
843 pVirtqBuf->uHeadIdx = uHeadIdx;
844 pVirtqBuf->uVirtq = uVirtq;
845#ifndef VIRTIO_VBUF_ON_STACK
846 *ppVirtqBuf = pVirtqBuf;
847#endif /* !VIRTIO_VBUF_ON_STACK */
848
849 /*
850 * Gather segments.
851 */
852 VIRTQ_DESC_T desc;
853
854 uint32_t cbIn = 0;
855 uint32_t cbOut = 0;
856 uint32_t cSegsIn = 0;
857 uint32_t cSegsOut = 0;
858
859 PVIRTIOSGSEG paSegsIn = pVirtqBuf->aSegsIn;
860 PVIRTIOSGSEG paSegsOut = pVirtqBuf->aSegsOut;
861
862 do
863 {
864 PVIRTIOSGSEG pSeg;
865 /*
866 * Malicious guests may go beyond paSegsIn or paSegsOut boundaries by linking
867 * several descriptors into a loop. Since there is no legitimate way to get a sequences of
868 * linked descriptors exceeding the total number of descriptors in the ring (see @bugref{8620}),
869 * the following aborts I/O if breach and employs a simple log throttling algorithm to notify.
870 */
871 if (cSegsIn + cSegsOut >= pVirtq->uQueueSize)
872 {
873 static volatile uint32_t s_cMessages = 0;
874 static volatile uint32_t s_cThreshold = 1;
875 if (ASMAtomicIncU32(&s_cMessages) == ASMAtomicReadU32(&s_cThreshold))
876 {
877 LogRelMax(64, ("Too many linked descriptors; check if the guest arranges descriptors in a loop.\n"));
878 if (ASMAtomicReadU32(&s_cMessages) != 1)
879 LogRelMax(64, ("(the above error has occured %u times so far)\n", ASMAtomicReadU32(&s_cMessages)));
880 ASMAtomicWriteU32(&s_cThreshold, ASMAtomicReadU32(&s_cThreshold) * 10);
881 }
882 break;
883 }
884 RT_UNTRUSTED_VALIDATED_FENCE();
885
886 virtioReadDesc(pDevIns, pVirtio, pVirtq, uDescIdx, &desc);
887
888 if (desc.fFlags & VIRTQ_DESC_F_WRITE)
889 {
890 Log6Func(("%s IN idx=%-4u seg=%-3u addr=%RGp cb=%u\n", pVirtq->szName, uDescIdx, cSegsIn, desc.GCPhysBuf, desc.cb));
891 cbIn += desc.cb;
892 pSeg = &paSegsIn[cSegsIn++];
893 }
894 else
895 {
896 Log6Func(("%s OUT desc_idx=%-4u seg=%-3u addr=%RGp cb=%u\n", pVirtq->szName, uDescIdx, cSegsOut, desc.GCPhysBuf, desc.cb));
897 cbOut += desc.cb;
898 pSeg = &paSegsOut[cSegsOut++];
899#ifdef DEEP_DEBUG
900 if (LogIs11Enabled())
901 {
902 virtioCoreGCPhysHexDump(pDevIns, desc.GCPhysBuf, desc.cb, 0, NULL);
903 Log(("\n"));
904 }
905#endif
906 }
907 pSeg->GCPhys = desc.GCPhysBuf;
908 pSeg->cbSeg = desc.cb;
909 uDescIdx = desc.uDescIdxNext;
910 } while (desc.fFlags & VIRTQ_DESC_F_NEXT);
911
912 /*
913 * Add segments to the descriptor chain structure.
914 */
915 if (cSegsIn)
916 {
917 virtioCoreGCPhysChainInit(&pVirtqBuf->SgBufIn, paSegsIn, cSegsIn);
918 pVirtqBuf->pSgPhysReturn = &pVirtqBuf->SgBufIn;
919 pVirtqBuf->cbPhysReturn = cbIn;
920#ifdef VBOX_WITH_STATISTICS
921 STAM_REL_COUNTER_ADD(&pVirtio->StatDescChainsSegsIn, cSegsIn);
922#endif
923 }
924
925 if (cSegsOut)
926 {
927 virtioCoreGCPhysChainInit(&pVirtqBuf->SgBufOut, paSegsOut, cSegsOut);
928 pVirtqBuf->pSgPhysSend = &pVirtqBuf->SgBufOut;
929 pVirtqBuf->cbPhysSend = cbOut;
930#ifdef VBOX_WITH_STATISTICS
931 STAM_REL_COUNTER_ADD(&pVirtio->StatDescChainsSegsOut, cSegsOut);
932#endif
933 }
934
935#ifdef VBOX_WITH_STATISTICS
936 STAM_REL_COUNTER_INC(&pVirtio->StatDescChainsAllocated);
937#endif
938 Log6Func(("%s -- segs OUT: %u (%u bytes) IN: %u (%u bytes) --\n",
939 pVirtq->szName, cSegsOut, cbOut, cSegsIn, cbIn));
940
941 return VINF_SUCCESS;
942}
943
944/** API function: See Header file */
945#ifdef VIRTIO_VBUF_ON_STACK
946int virtioCoreR3VirtqAvailBufGet(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtq,
947 PVIRTQBUF pVirtqBuf, bool fRemove)
948#else /* !VIRTIO_VBUF_ON_STACK */
949int virtioCoreR3VirtqAvailBufGet(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtq,
950 PPVIRTQBUF ppVirtqBuf, bool fRemove)
951#endif /* !VIRTIO_VBUF_ON_STACK */
952{
953 Assert(uVirtq < RT_ELEMENTS(pVirtio->aVirtqueues));
954 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
955
956 if (IS_VIRTQ_EMPTY(pDevIns, pVirtio, pVirtq))
957 return VERR_NOT_AVAILABLE;
958
959 uint16_t uHeadIdx = virtioReadAvailDescIdx(pDevIns, pVirtio, pVirtq, pVirtq->uAvailIdxShadow);
960
961 if (pVirtio->uDriverFeatures & VIRTIO_F_EVENT_IDX)
962 virtioWriteUsedAvailEvent(pDevIns,pVirtio, pVirtq, pVirtq->uAvailIdxShadow + 1);
963
964 if (fRemove)
965 pVirtq->uAvailIdxShadow++;
966
967#ifdef VIRTIO_VBUF_ON_STACK
968 int rc = virtioCoreR3VirtqAvailBufGet(pDevIns, pVirtio, uVirtq, uHeadIdx, pVirtqBuf);
969#else /* !VIRTIO_VBUF_ON_STACK */
970 int rc = virtioCoreR3VirtqAvailBufGet(pDevIns, pVirtio, uVirtq, uHeadIdx, ppVirtqBuf);
971#endif /* !VIRTIO_VBUF_ON_STACK */
972 return rc;
973}
974
975/** API function: See Header file */
976int virtioCoreR3VirtqUsedBufPut(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtq, PRTSGBUF pSgVirtReturn,
977 PVIRTQBUF pVirtqBuf, bool fFence)
978{
979 Assert(uVirtq < RT_ELEMENTS(pVirtio->aVirtqueues));
980 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
981
982 PVIRTIOSGBUF pSgPhysReturn = pVirtqBuf->pSgPhysReturn;
983
984 Assert(pVirtqBuf->u32Magic == VIRTQBUF_MAGIC);
985 Assert(pVirtqBuf->cRefs > 0);
986
987 AssertMsgReturn(IS_DRIVER_OK(pVirtio), ("Guest driver not in ready state.\n"), VERR_INVALID_STATE);
988
989 Log6Func((" Copying device data to %s, [desc:%u → used ring:%u]\n",
990 VIRTQNAME(pVirtio, uVirtq), pVirtqBuf->uHeadIdx, pVirtq->uUsedIdxShadow));
991
992 /* Copy s/g buf (virtual memory) to guest phys mem (VirtIO "IN" direction). */
993
994 size_t cbCopy = 0, cbTotal = 0, cbRemain = 0;
995
996 if (pSgVirtReturn)
997 {
998 size_t cbTarget = virtioCoreGCPhysChainCalcBufSize(pSgPhysReturn);
999 cbRemain = cbTotal = RTSgBufCalcTotalLength(pSgVirtReturn);
1000 AssertMsgReturn(cbTarget >= cbRemain, ("No space to write data to phys memory"), VERR_BUFFER_OVERFLOW);
1001 virtioCoreGCPhysChainReset(pSgPhysReturn);
1002 while (cbRemain)
1003 {
1004 cbCopy = RT_MIN(pSgVirtReturn->cbSegLeft, pSgPhysReturn->cbSegLeft);
1005 Assert(cbCopy > 0);
1006 virtioCoreGCPhysWrite(pVirtio, pDevIns, (RTGCPHYS)pSgPhysReturn->GCPhysCur, pSgVirtReturn->pvSegCur, cbCopy);
1007 RTSgBufAdvance(pSgVirtReturn, cbCopy);
1008 virtioCoreGCPhysChainAdvance(pSgPhysReturn, cbCopy);
1009 cbRemain -= cbCopy;
1010 }
1011
1012 if (fFence)
1013 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE(); /* needed? */
1014
1015 Assert(!(cbCopy >> 32));
1016 }
1017
1018 /* Flag if write-ahead crosses threshold where guest driver indicated it wants event notification */
1019 if (pVirtio->uDriverFeatures & VIRTIO_F_EVENT_IDX)
1020 if (pVirtq->uUsedIdxShadow == virtioReadAvailUsedEvent(pDevIns, pVirtio, pVirtq))
1021 pVirtq->fUsedRingEvent = true;
1022
1023 /*
1024 * Place used buffer's descriptor in used ring but don't update used ring's slot index.
1025 * That will be done with a subsequent client call to virtioCoreVirtqUsedRingSync()
1026 */
1027 virtioWriteUsedElem(pDevIns, pVirtio, pVirtq, pVirtq->uUsedIdxShadow++, pVirtqBuf->uHeadIdx, (uint32_t)cbTotal);
1028
1029#ifdef LOG_ENABLED
1030 if (LogIs6Enabled() && pSgVirtReturn)
1031 {
1032
1033 LogFunc((" ... %d segs, %zu bytes, copied to %u byte buf@offset=%u. Residual: %zu bytes\n",
1034 pSgVirtReturn->cSegs, cbTotal - cbRemain, pVirtqBuf->cbPhysReturn,
1035 ((virtioCoreGCPhysChainCalcBufSize(pVirtqBuf->pSgPhysReturn) -
1036 virtioCoreGCPhysChainCalcLengthLeft(pVirtqBuf->pSgPhysReturn)) - (cbTotal - cbRemain)),
1037 virtioCoreGCPhysChainCalcLengthLeft(pVirtqBuf->pSgPhysReturn) ));
1038
1039 uint16_t uPending = virtioCoreR3CountPendingBufs(
1040 virtioReadUsedRingIdx(pDevIns, pVirtio, pVirtq),
1041 pVirtq->uUsedIdxShadow, pVirtq->uQueueSize);
1042
1043 LogFunc((" %u used buf%s not synced in %s\n", uPending, uPending == 1 ? "" : "s ",
1044 VIRTQNAME(pVirtio, uVirtq)));
1045 }
1046#endif
1047 return VINF_SUCCESS;
1048}
1049
1050/** API function: See Header file */
1051int virtioCoreR3VirtqUsedBufPut(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtq,
1052 size_t cb, void const *pv, PVIRTQBUF pVirtqBuf, size_t cbEnqueue, bool fFence)
1053{
1054 Assert(uVirtq < RT_ELEMENTS(pVirtio->aVirtqueues));
1055 Assert(pv);
1056
1057 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
1058 PVIRTIOSGBUF pSgPhysReturn = pVirtqBuf->pSgPhysReturn;
1059
1060 Assert(pVirtqBuf->u32Magic == VIRTQBUF_MAGIC);
1061 Assert(pVirtqBuf->cRefs > 0);
1062
1063 AssertMsgReturn(IS_DRIVER_OK(pVirtio), ("Guest driver not in ready state.\n"), VERR_INVALID_STATE);
1064
1065 Log6Func((" Copying device data to %s, [desc chain head idx:%u]\n",
1066 VIRTQNAME(pVirtio, uVirtq), pVirtqBuf->uHeadIdx));
1067 /*
1068 * Convert virtual memory simple buffer to guest physical memory (VirtIO descriptor chain)
1069 */
1070 uint8_t *pvBuf = (uint8_t *)pv;
1071 size_t cbRemain = cb, cbCopy = 0;
1072 while (cbRemain)
1073 {
1074 cbCopy = RT_MIN(pSgPhysReturn->cbSegLeft, cbRemain);
1075 Assert(cbCopy > 0);
1076 virtioCoreGCPhysWrite(pVirtio, pDevIns, (RTGCPHYS)pSgPhysReturn->GCPhysCur, pvBuf, cbCopy);
1077 virtioCoreGCPhysChainAdvance(pSgPhysReturn, cbCopy);
1078 pvBuf += cbCopy;
1079 cbRemain -= cbCopy;
1080 }
1081 LogFunc((" ...%zu bytes, copied to %u byte buf@offset=%u. Residual: %zu bytes\n",
1082 cb , pVirtqBuf->cbPhysReturn,
1083 ((virtioCoreGCPhysChainCalcBufSize(pVirtqBuf->pSgPhysReturn) -
1084 virtioCoreGCPhysChainCalcLengthLeft(pVirtqBuf->pSgPhysReturn)) - cb),
1085 virtioCoreGCPhysChainCalcLengthLeft(pVirtqBuf->pSgPhysReturn)));
1086
1087 if (cbEnqueue)
1088 {
1089 if (fFence)
1090 {
1091 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE(); /* needed? */
1092 Assert(!(cbCopy >> 32));
1093 }
1094 /* Flag if write-ahead crosses threshold where guest driver indicated it wants event notification */
1095 if (pVirtio->uDriverFeatures & VIRTIO_F_EVENT_IDX)
1096 if (pVirtq->uUsedIdxShadow == virtioReadAvailUsedEvent(pDevIns, pVirtio, pVirtq))
1097 pVirtq->fUsedRingEvent = true;
1098 /*
1099 * Place used buffer's descriptor in used ring but don't update used ring's slot index.
1100 * That will be done with a subsequent client call to virtioCoreVirtqUsedRingSync()
1101 */
1102 Log6Func((" Enqueue desc chain head idx %u to %s used ring @ %u\n", pVirtqBuf->uHeadIdx,
1103 VIRTQNAME(pVirtio, uVirtq), pVirtq->uUsedIdxShadow));
1104
1105 virtioWriteUsedElem(pDevIns, pVirtio, pVirtq, pVirtq->uUsedIdxShadow++, pVirtqBuf->uHeadIdx, (uint32_t)cbEnqueue);
1106
1107#ifdef LOG_ENABLED
1108 if (LogIs6Enabled())
1109 {
1110 uint16_t uPending = virtioCoreR3CountPendingBufs(
1111 virtioReadUsedRingIdx(pDevIns, pVirtio, pVirtq),
1112 pVirtq->uUsedIdxShadow, pVirtq->uQueueSize);
1113
1114 LogFunc((" %u used buf%s not synced in %s\n",
1115 uPending, uPending == 1 ? "" : "s ", VIRTQNAME(pVirtio, uVirtq)));
1116 }
1117#endif
1118 } /* fEnqueue */
1119
1120 return VINF_SUCCESS;
1121}
1122
1123
1124#endif /* IN_RING3 */
1125
1126/** API function: See Header file */
1127int virtioCoreVirtqUsedRingSync(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtq)
1128{
1129 Assert(uVirtq < RT_ELEMENTS(pVirtio->aVirtqueues));
1130 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
1131
1132 if (!pVirtio->fLegacyDriver)
1133 AssertMsgReturn((pVirtio->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK) && pVirtq->uEnable,
1134 ("Guest driver not in ready state.\n"), VERR_INVALID_STATE);
1135
1136 Log6Func((" Sync %s used ring (%u → idx)\n",
1137 pVirtq->szName, pVirtq->uUsedIdxShadow));
1138
1139 virtioWriteUsedRingIdx(pDevIns, pVirtio, pVirtq, pVirtq->uUsedIdxShadow);
1140 virtioCoreNotifyGuestDriver(pDevIns, pVirtio, uVirtq);
1141
1142 return VINF_SUCCESS;
1143}
1144
1145/**
1146 * This is called from the MMIO callback code when the guest does an MMIO access to the
1147 * mapped queue notification capability area corresponding to a particular queue, to notify
1148 * the queue handler of available data in the avail ring of the queue (VirtIO 1.0, 4.1.4.4.1)
1149 *
1150 * @param pDevIns The device instance.
1151 * @param pVirtio Pointer to the shared virtio state.
1152 * @param uVirtq Virtq to check for guest interrupt handling preference
1153 * @param uNotifyIdx Notification index
1154 */
1155static void virtioCoreVirtqNotified(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtq, uint16_t uNotifyIdx)
1156{
1157 PVIRTIOCORECC pVirtioCC = PDMINS_2_DATA_CC(pDevIns, PVIRTIOCORECC);
1158
1159 /* VirtIO 1.0, section 4.1.5.2 implies uVirtq and uNotifyIdx should match. Disregarding any of
1160 * these notifications (if those indicies disagree) may break device/driver synchronization,
1161 * causing eternal throughput starvation, yet there's no specified way to disambiguate
1162 * which queue to wake-up in any awkward situation where the two parameters differ.
1163 */
1164 AssertMsg(uNotifyIdx == uVirtq,
1165 ("Guest kicked virtq %d's notify addr w/non-corresponding virtq idx %d\n",
1166 uVirtq, uNotifyIdx));
1167 RT_NOREF(uNotifyIdx);
1168
1169 AssertReturnVoid(uVirtq < RT_ELEMENTS(pVirtio->aVirtqueues));
1170 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
1171
1172 Log6Func(("%s: (desc chains: %u)\n", *pVirtq->szName ? pVirtq->szName : "?UNAMED QUEUE?",
1173 virtioCoreVirtqAvailCnt(pDevIns, pVirtio, pVirtq)));
1174
1175 /* Inform client */
1176 pVirtioCC->pfnVirtqNotified(pDevIns, pVirtio, uVirtq);
1177 RT_NOREF2(pVirtio, pVirtq);
1178}
1179
1180/**
1181 * Trigger MSI-X or INT# interrupt to notify guest of data added to used ring of
1182 * the specified virtq, depending on the interrupt configuration of the device
1183 * and depending on negotiated and realtime constraints flagged by the guest driver.
1184 *
1185 * See VirtIO 1.0 specification (section 2.4.7).
1186 *
1187 * @param pDevIns The device instance.
1188 * @param pVirtio Pointer to the shared virtio state.
1189 * @param uVirtq Virtq to check for guest interrupt handling preference
1190 */
1191static void virtioCoreNotifyGuestDriver(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtq)
1192{
1193 Assert(uVirtq < RT_ELEMENTS(pVirtio->aVirtqueues));
1194 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
1195
1196 if (!IS_DRIVER_OK(pVirtio))
1197 {
1198 LogFunc(("Guest driver not in ready state.\n"));
1199 return;
1200 }
1201
1202 if (pVirtio->uDriverFeatures & VIRTIO_F_EVENT_IDX)
1203 {
1204 if (pVirtq->fUsedRingEvent)
1205 {
1206#ifdef IN_RING3
1207 Log6Func(("...kicking guest %s, VIRTIO_F_EVENT_IDX set and threshold (%d) reached\n",
1208 pVirtq->szName, (uint16_t)virtioReadAvailUsedEvent(pDevIns, pVirtio, pVirtq)));
1209#endif
1210 virtioNudgeGuest(pDevIns, pVirtio, VIRTIO_ISR_VIRTQ_INTERRUPT, pVirtq->uMsixVector);
1211 pVirtq->fUsedRingEvent = false;
1212 return;
1213 }
1214#ifdef IN_RING3
1215 Log6Func(("...skip interrupt %s, VIRTIO_F_EVENT_IDX set but threshold (%d) not reached (%d)\n",
1216 pVirtq->szName,(uint16_t)virtioReadAvailUsedEvent(pDevIns, pVirtio, pVirtq), pVirtq->uUsedIdxShadow));
1217#endif
1218 }
1219 else
1220 {
1221 /** If guest driver hasn't suppressed interrupts, interrupt */
1222 if (!(virtioReadAvailRingFlags(pDevIns, pVirtio, pVirtq) & VIRTQ_AVAIL_F_NO_INTERRUPT))
1223 {
1224 virtioNudgeGuest(pDevIns, pVirtio, VIRTIO_ISR_VIRTQ_INTERRUPT, pVirtq->uMsixVector);
1225 return;
1226 }
1227 Log6Func(("...skipping interrupt for %s (guest set VIRTQ_AVAIL_F_NO_INTERRUPT)\n", pVirtq->szName));
1228 }
1229}
1230
1231/**
1232 * Raise interrupt or MSI-X
1233 *
1234 * @param pDevIns The device instance.
1235 * @param pVirtio Pointer to the shared virtio state.
1236 * @param uCause Interrupt cause bit mask to set in PCI ISR port.
1237 * @param uVec MSI-X vector, if enabled
1238 */
1239static int virtioNudgeGuest(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint8_t uCause, uint16_t uMsixVector)
1240{
1241 if (uCause == VIRTIO_ISR_VIRTQ_INTERRUPT)
1242 Log6Func(("Reason for interrupt - buffer added to 'used' ring.\n"));
1243 else
1244 if (uCause == VIRTIO_ISR_DEVICE_CONFIG)
1245 Log6Func(("Reason for interrupt - device config change\n"));
1246
1247 if (!pVirtio->fMsiSupport)
1248 {
1249 pVirtio->uISR |= uCause;
1250 PDMDevHlpPCISetIrq(pDevIns, 0, PDM_IRQ_LEVEL_HIGH);
1251 }
1252 else if (uMsixVector != VIRTIO_MSI_NO_VECTOR)
1253 PDMDevHlpPCISetIrq(pDevIns, uMsixVector, 1);
1254 return VINF_SUCCESS;
1255}
1256
1257/**
1258 * Lower interrupt (Called when guest reads ISR and when resetting)
1259 *
1260 * @param pDevIns The device instance.
1261 */
1262static void virtioLowerInterrupt(PPDMDEVINS pDevIns, uint16_t uMsixVector)
1263{
1264 PVIRTIOCORE pVirtio = PDMINS_2_DATA(pDevIns, PVIRTIOCORE);
1265 if (!pVirtio->fMsiSupport)
1266 PDMDevHlpPCISetIrq(pDevIns, 0, PDM_IRQ_LEVEL_LOW);
1267 else if (uMsixVector != VIRTIO_MSI_NO_VECTOR)
1268 PDMDevHlpPCISetIrq(pDevIns, pVirtio->uMsixConfig, PDM_IRQ_LEVEL_LOW);
1269}
1270
1271#ifdef IN_RING3
1272static void virtioResetVirtq(PVIRTIOCORE pVirtio, uint16_t uVirtq)
1273{
1274 Assert(uVirtq < RT_ELEMENTS(pVirtio->aVirtqueues));
1275 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
1276
1277 pVirtq->uQueueSize = VIRTQ_SIZE;
1278 pVirtq->uEnable = false;
1279 pVirtq->uNotifyOffset = uVirtq;
1280 pVirtq->fUsedRingEvent = false;
1281 pVirtq->uAvailIdxShadow = 0;
1282 pVirtq->uUsedIdxShadow = 0;
1283 pVirtq->uMsixVector = uVirtq + 2;
1284
1285 if (!pVirtio->fMsiSupport) /* VirtIO 1.0, 4.1.4.3 and 4.1.5.1.2 */
1286 pVirtq->uMsixVector = VIRTIO_MSI_NO_VECTOR;
1287
1288 virtioLowerInterrupt(pVirtio->pDevInsR3, pVirtq->uMsixVector);
1289}
1290
1291static void virtioResetDevice(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio)
1292{
1293 LogFunc(("Resetting device VirtIO state\n"));
1294 pVirtio->fLegacyDriver = pVirtio->fOfferLegacy; /* Cleared if VIRTIO_F_VERSION_1 feature ack'd */
1295 pVirtio->uDeviceFeaturesSelect = 0;
1296 pVirtio->uDriverFeaturesSelect = 0;
1297 pVirtio->uConfigGeneration = 0;
1298 pVirtio->fDeviceStatus = 0;
1299 pVirtio->uISR = 0;
1300
1301 if (!pVirtio->fMsiSupport)
1302 virtioLowerInterrupt(pDevIns, 0);
1303 else
1304 {
1305 virtioLowerInterrupt(pDevIns, pVirtio->uMsixConfig);
1306 for (int i = 0; i < VIRTQ_MAX_COUNT; i++)
1307 virtioLowerInterrupt(pDevIns, pVirtio->aVirtqueues[i].uMsixVector);
1308 }
1309
1310 if (!pVirtio->fMsiSupport) /* VirtIO 1.0, 4.1.4.3 and 4.1.5.1.2 */
1311 pVirtio->uMsixConfig = VIRTIO_MSI_NO_VECTOR;
1312
1313 for (uint16_t uVirtq = 0; uVirtq < VIRTQ_MAX_COUNT; uVirtq++)
1314 virtioResetVirtq(pVirtio, uVirtq);
1315}
1316
1317/**
1318 * Invoked by this implementation when guest driver resets the device.
1319 * The driver itself will not until the device has read the status change.
1320 */
1321static void virtioGuestR3WasReset(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTIOCORECC pVirtioCC)
1322{
1323 Log(("%-23s: Guest reset the device\n", __FUNCTION__));
1324
1325 /* Let the client know */
1326 pVirtioCC->pfnStatusChanged(pVirtio, pVirtioCC, 0 /* fDriverOk */);
1327 virtioResetDevice(pDevIns, pVirtio);
1328}
1329#endif /* IN_RING3 */
1330
1331/*
1332 * Determines whether guest virtio driver is modern or legacy and does callback
1333 * informing device-specific code that feature negotiation is complete.
1334 * Should be called only once (coordinated via the 'toggle' flag)
1335 */
1336#ifdef IN_RING3
1337DECLINLINE(void) virtioR3DoFeaturesCompleteOnceOnly(PVIRTIOCORE pVirtio, PVIRTIOCORECC pVirtioCC)
1338{
1339 if (pVirtio->uDriverFeatures & VIRTIO_F_VERSION_1)
1340 {
1341 LogFunc(("VIRTIO_F_VERSION_1 feature ack'd by guest\n"));
1342 pVirtio->fLegacyDriver = 0;
1343 }
1344 else
1345 {
1346 if (pVirtio->fOfferLegacy)
1347 {
1348 pVirtio->fLegacyDriver = 1;
1349 LogFunc(("VIRTIO_F_VERSION_1 feature was NOT set by guest\n"));
1350 }
1351 else
1352 AssertMsgFailed(("Guest didn't accept VIRTIO_F_VERSION_1, but fLegacyOffered flag not set.\n"));
1353 }
1354 if (pVirtioCC->pfnFeatureNegotiationComplete)
1355 pVirtioCC->pfnFeatureNegotiationComplete(pVirtio, pVirtio->uDriverFeatures, pVirtio->fLegacyDriver);
1356 pVirtio->fDriverFeaturesWritten |= DRIVER_FEATURES_COMPLETE_HANDLED;
1357}
1358#endif
1359
1360/**
1361 * Handle accesses to Common Configuration capability
1362 *
1363 * @returns VBox status code
1364 *
1365 * @param pDevIns The device instance.
1366 * @param pVirtio Pointer to the shared virtio state.
1367 * @param pVirtioCC Pointer to the current context virtio state.
1368 * @param fWrite Set if write access, clear if read access.
1369 * @param uOffsetOfAccess The common configuration capability offset.
1370 * @param cb Number of bytes to read or write
1371 * @param pv Pointer to location to write to or read from
1372 */
1373static int virtioCommonCfgAccessed(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTIOCORECC pVirtioCC,
1374 int fWrite, uint32_t uOffsetOfAccess, unsigned cb, void *pv)
1375{
1376 uint16_t uVirtq = pVirtio->uVirtqSelect;
1377 int rc = VINF_SUCCESS;
1378 uint64_t val;
1379 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER(uDeviceFeatures, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1380 {
1381 if (fWrite) /* Guest WRITE pCommonCfg>uDeviceFeatures */
1382 {
1383 /* VirtIO 1.0, 4.1.4.3 states device_feature is a (guest) driver readonly field,
1384 * yet the linux driver attempts to write/read it back twice */
1385 VIRTIO_DEV_CONFIG_LOG_ACCESS(uDeviceFeatures, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess);
1386 LogFunc(("... WARNING: Guest attempted to write readonly virtio_pci_common_cfg.device_feature (ignoring)\n"));
1387 return VINF_IOM_MMIO_UNUSED_00;
1388 }
1389 else /* Guest READ pCommonCfg->uDeviceFeatures */
1390 {
1391 switch (pVirtio->uDeviceFeaturesSelect)
1392 {
1393 case 0:
1394 val = pVirtio->uDeviceFeatures & UINT32_C(0xffffffff);
1395 memcpy(pv, &val, cb);
1396 VIRTIO_DEV_CONFIG_LOG_ACCESS(uDeviceFeatures, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess);
1397 break;
1398 case 1:
1399 val = pVirtio->uDeviceFeatures >> 32;
1400 memcpy(pv, &val, cb);
1401 VIRTIO_DEV_CONFIG_LOG_ACCESS(uDeviceFeatures, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess + sizeof(uint32_t));
1402 break;
1403 default:
1404 LogFunc(("Guest read uDeviceFeatures with out of range selector (%#x), returning 0\n",
1405 pVirtio->uDeviceFeaturesSelect));
1406 return VINF_IOM_MMIO_UNUSED_00;
1407 }
1408 }
1409 }
1410 else
1411 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER(uDriverFeatures, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1412 {
1413 if (fWrite) /* Guest WRITE pCommonCfg->udriverFeatures */
1414 {
1415 switch (pVirtio->uDriverFeaturesSelect)
1416 {
1417 case 0:
1418 memcpy(&pVirtio->uDriverFeatures, pv, cb);
1419 pVirtio->fDriverFeaturesWritten |= DRIVER_FEATURES_0_WRITTEN;
1420 LogFunc(("Set DRIVER_FEATURES_0_WRITTEN. pVirtio->fDriverFeaturesWritten=%d\n", pVirtio->fDriverFeaturesWritten));
1421 if ( (pVirtio->fDriverFeaturesWritten & DRIVER_FEATURES_0_AND_1_WRITTEN) == DRIVER_FEATURES_0_AND_1_WRITTEN
1422 && !(pVirtio->fDriverFeaturesWritten & DRIVER_FEATURES_COMPLETE_HANDLED))
1423#ifdef IN_RING0
1424 return VINF_IOM_R3_MMIO_WRITE;
1425#endif
1426#ifdef IN_RING3
1427 virtioR3DoFeaturesCompleteOnceOnly(pVirtio, pVirtioCC);
1428#endif
1429 VIRTIO_DEV_CONFIG_LOG_ACCESS(uDriverFeatures, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess);
1430 break;
1431 case 1:
1432 memcpy((char *)&pVirtio->uDriverFeatures + sizeof(uint32_t), pv, cb);
1433 pVirtio->fDriverFeaturesWritten |= DRIVER_FEATURES_1_WRITTEN;
1434 LogFunc(("Set DRIVER_FEATURES_1_WRITTEN. pVirtio->fDriverFeaturesWritten=%d\n", pVirtio->fDriverFeaturesWritten));
1435 if ( (pVirtio->fDriverFeaturesWritten & DRIVER_FEATURES_0_AND_1_WRITTEN) == DRIVER_FEATURES_0_AND_1_WRITTEN
1436 && !(pVirtio->fDriverFeaturesWritten & DRIVER_FEATURES_COMPLETE_HANDLED))
1437#ifdef IN_RING0
1438 return VINF_IOM_R3_MMIO_WRITE;
1439#endif
1440#ifdef IN_RING3
1441 virtioR3DoFeaturesCompleteOnceOnly(pVirtio, pVirtioCC);
1442#endif
1443 VIRTIO_DEV_CONFIG_LOG_ACCESS(uDriverFeatures, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess + sizeof(uint32_t));
1444 break;
1445 default:
1446 LogFunc(("Guest wrote uDriverFeatures with out of range selector (%#x), returning 0\n",
1447 pVirtio->uDriverFeaturesSelect));
1448 return VINF_SUCCESS;
1449 }
1450 }
1451 else /* Guest READ pCommonCfg->udriverFeatures */
1452 {
1453 switch (pVirtio->uDriverFeaturesSelect)
1454 {
1455 case 0:
1456 val = pVirtio->uDriverFeatures & 0xffffffff;
1457 memcpy(pv, &val, cb);
1458 VIRTIO_DEV_CONFIG_LOG_ACCESS(uDriverFeatures, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess);
1459 break;
1460 case 1:
1461 val = (pVirtio->uDriverFeatures >> 32) & 0xffffffff;
1462 memcpy(pv, &val, cb);
1463 VIRTIO_DEV_CONFIG_LOG_ACCESS(uDriverFeatures, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess + 4);
1464 break;
1465 default:
1466 LogFunc(("Guest read uDriverFeatures with out of range selector (%#x), returning 0\n",
1467 pVirtio->uDriverFeaturesSelect));
1468 return VINF_IOM_MMIO_UNUSED_00;
1469 }
1470 }
1471 }
1472 else
1473 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER(uNumVirtqs, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1474 {
1475 if (fWrite)
1476 {
1477 Log2Func(("Guest attempted to write readonly virtio_pci_common_cfg.num_queues\n"));
1478 return VINF_SUCCESS;
1479 }
1480 *(uint16_t *)pv = VIRTQ_MAX_COUNT;
1481 VIRTIO_DEV_CONFIG_LOG_ACCESS(uNumVirtqs, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess);
1482 }
1483 else
1484 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER(fDeviceStatus, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1485 {
1486 if (fWrite) /* Guest WRITE pCommonCfg->fDeviceStatus */
1487 {
1488 pVirtio->fDeviceStatus = *(uint8_t *)pv;
1489 bool fDeviceReset = pVirtio->fDeviceStatus == 0;
1490#ifdef LOG_ENABLED
1491 if (LogIs7Enabled())
1492 {
1493 char szOut[80] = { 0 };
1494 virtioCoreFormatDeviceStatus(pVirtio->fDeviceStatus, szOut, sizeof(szOut));
1495 Log(("%-23s: Guest wrote fDeviceStatus ................ (%s)\n", __FUNCTION__, szOut));
1496 }
1497#endif
1498 bool const fStatusChanged = IS_DRIVER_OK(pVirtio) != WAS_DRIVER_OK(pVirtio);
1499
1500 if (fDeviceReset || fStatusChanged)
1501 {
1502#ifdef IN_RING0
1503 /* Since VirtIO status changes are cumbersome by nature, e.g. not a benchmark priority,
1504 * handle the rest in R3 to facilitate logging or whatever dev-specific client needs to do */
1505 Log6(("%-23s: RING0 => RING3 (demote)\n", __FUNCTION__));
1506 return VINF_IOM_R3_MMIO_WRITE;
1507#endif
1508 }
1509
1510#ifdef IN_RING3
1511 /*
1512 * Notify client only if status actually changed from last time and when we're reset.
1513 */
1514 if (fDeviceReset)
1515 virtioGuestR3WasReset(pDevIns, pVirtio, pVirtioCC);
1516
1517 if (fStatusChanged)
1518 pVirtioCC->pfnStatusChanged(pVirtio, pVirtioCC, IS_DRIVER_OK(pVirtio));
1519#endif
1520 /*
1521 * Save the current status for the next write so we can see what changed.
1522 */
1523 pVirtio->fPrevDeviceStatus = pVirtio->fDeviceStatus;
1524 }
1525 else /* Guest READ pCommonCfg->fDeviceStatus */
1526 {
1527 *(uint8_t *)pv = pVirtio->fDeviceStatus;
1528#ifdef LOG_ENABLED
1529 if (LogIs7Enabled())
1530 {
1531 char szOut[80] = { 0 };
1532 virtioCoreFormatDeviceStatus(pVirtio->fDeviceStatus, szOut, sizeof(szOut));
1533 LogFunc(("Guest read fDeviceStatus ................ (%s)\n", szOut));
1534 }
1535#endif
1536 }
1537 }
1538 else
1539 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uMsixConfig, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1540 VIRTIO_DEV_CONFIG_ACCESS( uMsixConfig, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio);
1541 else
1542 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uDeviceFeaturesSelect, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1543 VIRTIO_DEV_CONFIG_ACCESS( uDeviceFeaturesSelect, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio);
1544 else
1545 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uDriverFeaturesSelect, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1546 VIRTIO_DEV_CONFIG_ACCESS( uDriverFeaturesSelect, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio);
1547 else
1548 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uConfigGeneration, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1549 VIRTIO_DEV_CONFIG_ACCESS( uConfigGeneration, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio);
1550 else
1551 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uVirtqSelect, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1552 VIRTIO_DEV_CONFIG_ACCESS( uVirtqSelect, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio);
1553 else
1554 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( GCPhysVirtqDesc, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1555 VIRTIO_DEV_CONFIG_ACCESS_INDEXED( GCPhysVirtqDesc, uVirtq, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio->aVirtqueues);
1556 else
1557 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( GCPhysVirtqAvail, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1558 VIRTIO_DEV_CONFIG_ACCESS_INDEXED( GCPhysVirtqAvail, uVirtq, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio->aVirtqueues);
1559 else
1560 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( GCPhysVirtqUsed, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1561 VIRTIO_DEV_CONFIG_ACCESS_INDEXED( GCPhysVirtqUsed, uVirtq, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio->aVirtqueues);
1562 else
1563 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uQueueSize, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1564 VIRTIO_DEV_CONFIG_ACCESS_INDEXED( uQueueSize, uVirtq, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio->aVirtqueues);
1565 else
1566 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uEnable, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1567 VIRTIO_DEV_CONFIG_ACCESS_INDEXED( uEnable, uVirtq, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio->aVirtqueues);
1568 else
1569 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uNotifyOffset, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1570 VIRTIO_DEV_CONFIG_ACCESS_INDEXED( uNotifyOffset, uVirtq, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio->aVirtqueues);
1571 else
1572 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uMsixVector, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1573 VIRTIO_DEV_CONFIG_ACCESS_INDEXED( uMsixVector, uVirtq, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio->aVirtqueues);
1574 else
1575 {
1576 Log2Func(("Bad guest %s access to virtio_pci_common_cfg: uOffsetOfAccess=%#x (%d), cb=%d\n",
1577 fWrite ? "write" : "read ", uOffsetOfAccess, uOffsetOfAccess, cb));
1578 return fWrite ? VINF_SUCCESS : VINF_IOM_MMIO_UNUSED_00;
1579 }
1580
1581#ifndef IN_RING3
1582 RT_NOREF(pDevIns, pVirtioCC);
1583#endif
1584 return rc;
1585}
1586
1587/**
1588 * @callback_method_impl{FNIOMIOPORTNEWIN)
1589 *
1590 * This I/O handler exists only to handle access from legacy drivers.
1591 */
1592static DECLCALLBACK(VBOXSTRICTRC) virtioLegacyIOPortIn(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT offPort, uint32_t *pu32, unsigned cb)
1593{
1594 PVIRTIOCORE pVirtio = PDMINS_2_DATA(pDevIns, PVIRTIOCORE);
1595 STAM_PROFILE_ADV_START(&pVirtio->CTX_SUFF(StatRead), a);
1596
1597 RT_NOREF(pvUser);
1598 Log(("%-23s: Port read at offset=%RTiop, cb=%#x%s",
1599 __FUNCTION__, offPort, cb,
1600 VIRTIO_DEV_CONFIG_MATCH_MEMBER(fIsrStatus, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort) ? "" : "\n"));
1601
1602 void *pv = pu32; /* To use existing macros */
1603 int fWrite = 0; /* To use existing macros */
1604
1605 uint16_t uVirtq = pVirtio->uVirtqSelect;
1606
1607 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER(uDeviceFeatures, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort))
1608 {
1609 uint32_t val = pVirtio->uDeviceFeatures & UINT32_C(0xffffffff);
1610 memcpy(pu32, &val, cb);
1611 VIRTIO_DEV_CONFIG_LOG_ACCESS(uDeviceFeatures, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort);
1612 }
1613 else
1614 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER(uDriverFeatures, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort))
1615 {
1616 uint32_t val = pVirtio->uDriverFeatures & UINT32_C(0xffffffff);
1617 memcpy(pu32, &val, cb);
1618 VIRTIO_DEV_CONFIG_LOG_ACCESS(uDriverFeatures, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort);
1619 }
1620 else
1621 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER(fDeviceStatus, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort))
1622 {
1623 *(uint8_t *)pu32 = pVirtio->fDeviceStatus;
1624#ifdef LOG_ENABLED
1625 if (LogIs7Enabled())
1626 {
1627 char szOut[80] = { 0 };
1628 virtioCoreFormatDeviceStatus(pVirtio->fDeviceStatus, szOut, sizeof(szOut));
1629 Log(("%-23s: Guest read fDeviceStatus ................ (%s)\n", __FUNCTION__, szOut));
1630 }
1631#endif
1632 }
1633 else
1634 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER(fIsrStatus, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort))
1635 {
1636 ASSERT_GUEST_MSG(cb == 1, ("%d\n", cb));
1637 *(uint8_t *)pu32 = pVirtio->uISR;
1638 pVirtio->uISR = 0;
1639 virtioLowerInterrupt( pDevIns, 0);
1640 Log((" (ISR read and cleared)\n"));
1641 }
1642 else
1643 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uVirtqSelect, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort))
1644 VIRTIO_DEV_CONFIG_ACCESS( uVirtqSelect, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort, pVirtio);
1645 else
1646 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uVirtqPfn, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort))
1647 {
1648 PVIRTQUEUE pVirtQueue = &pVirtio->aVirtqueues[uVirtq];
1649 *pu32 = pVirtQueue->GCPhysVirtqDesc >> GUEST_PAGE_SHIFT;
1650 Log(("%-23s: Guest read uVirtqPfn .................... %#x\n", __FUNCTION__, *pu32));
1651 }
1652 else
1653 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uQueueSize, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort))
1654 VIRTIO_DEV_CONFIG_ACCESS_INDEXED( uQueueSize, uVirtq, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort, pVirtio->aVirtqueues);
1655 else
1656 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uQueueNotify, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort))
1657 VIRTIO_DEV_CONFIG_ACCESS( uQueueNotify, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort, pVirtio);
1658#ifdef LEGACY_MSIX_SUPPORTED
1659 else
1660 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uMsixConfig, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort))
1661 VIRTIO_DEV_CONFIG_ACCESS( uMsixConfig, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort, pVirtio);
1662 else
1663 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uMsixVector, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort))
1664 VIRTIO_DEV_CONFIG_ACCESS_INDEXED( uMsixVector, uVirtq, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort, pVirtio->aVirtqueues);
1665#endif
1666 else if (offPort >= sizeof(VIRTIO_LEGACY_PCI_COMMON_CFG_T))
1667 {
1668 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatRead), a);
1669#ifdef IN_RING3
1670 /* Access device-specific configuration */
1671 PVIRTIOCORECC pVirtioCC = PDMINS_2_DATA_CC(pDevIns, PVIRTIOCORECC);
1672 int rc = pVirtioCC->pfnDevCapRead(pDevIns, offPort - sizeof(VIRTIO_LEGACY_PCI_COMMON_CFG_T), pv, cb);
1673 return rc;
1674#else
1675 return VINF_IOM_R3_IOPORT_READ;
1676#endif
1677 }
1678 else
1679 {
1680 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatRead), a);
1681 Log2Func(("Bad guest read access to virtio_legacy_pci_common_cfg: offset=%#x, cb=%x\n",
1682 offPort, cb));
1683 int rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS,
1684 "virtioLegacyIOPortIn: no valid port at offset offset=%RTiop cb=%#x\n", offPort, cb);
1685 return rc;
1686 }
1687 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatRead), a);
1688 return VINF_SUCCESS;
1689}
1690
1691/**
1692 * @callback_method_impl{ * @callback_method_impl{FNIOMIOPORTNEWOUT}
1693 *
1694 * This I/O Port interface exists only to handle access from legacy drivers.
1695 */
1696static DECLCALLBACK(VBOXSTRICTRC) virtioLegacyIOPortOut(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT offPort, uint32_t u32, unsigned cb)
1697{
1698 PVIRTIOCORE pVirtio = PDMINS_2_DATA(pDevIns, PVIRTIOCORE);
1699 STAM_PROFILE_ADV_START(&pVirtio->CTX_SUFF(StatWrite), a);
1700 RT_NOREF(pvUser);
1701
1702 uint16_t uVirtq = pVirtio->uVirtqSelect;
1703 uint32_t u32OnStack = u32; /* allows us to use this impl's MMIO parsing macros */
1704 void *pv = &u32OnStack; /* To use existing macros */
1705 int fWrite = 1; /* To use existing macros */
1706
1707 Log(("%-23s: Port written at offset=%RTiop, cb=%#x, u32=%#x\n", __FUNCTION__, offPort, cb, u32));
1708
1709 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uVirtqSelect, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort))
1710 VIRTIO_DEV_CONFIG_ACCESS( uVirtqSelect, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort, pVirtio);
1711 else
1712#ifdef LEGACY_MSIX_SUPPORTED
1713 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uMsixConfig, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort))
1714 VIRTIO_DEV_CONFIG_ACCESS( uMsixConfig, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort, pVirtio);
1715 else
1716 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uMsixVector, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort))
1717 VIRTIO_DEV_CONFIG_ACCESS_INDEXED( uMsixVector, uVirtq, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort, pVirtio->aVirtqueues);
1718 else
1719#endif
1720 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER(uDeviceFeatures, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort))
1721 {
1722 /* Check to see if guest acknowledged unsupported features */
1723 VIRTIO_DEV_CONFIG_LOG_ACCESS(uDeviceFeatures, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort);
1724 LogFunc(("... WARNING: Guest attempted to write readonly virtio_pci_common_cfg.device_feature (ignoring)\n"));
1725 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatWrite), a);
1726 return VINF_SUCCESS;
1727 }
1728 else
1729 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER(uDriverFeatures, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort))
1730 {
1731 memcpy(&pVirtio->uDriverFeatures, pv, cb);
1732 if ((pVirtio->uDriverFeatures & ~VIRTIO_DEV_INDEPENDENT_LEGACY_FEATURES_OFFERED) == 0)
1733 {
1734 Log(("Guest asked for features host does not support! (host=%x guest=%x)\n",
1735 VIRTIO_DEV_INDEPENDENT_LEGACY_FEATURES_OFFERED, pVirtio->uDriverFeatures));
1736 pVirtio->uDriverFeatures &= VIRTIO_DEV_INDEPENDENT_LEGACY_FEATURES_OFFERED;
1737 }
1738 if (!(pVirtio->fDriverFeaturesWritten & DRIVER_FEATURES_COMPLETE_HANDLED))
1739 {
1740#ifdef IN_RING0
1741 Log6(("%-23s: RING0 => RING3 (demote)\n", __FUNCTION__));
1742 return VINF_IOM_R3_IOPORT_WRITE;
1743#endif
1744#ifdef IN_RING3
1745 PVIRTIOCORECC pVirtioCC = PDMINS_2_DATA_CC(pDevIns, PVIRTIOCORECC);
1746 virtioR3DoFeaturesCompleteOnceOnly(pVirtio, pVirtioCC);
1747#endif
1748 }
1749 VIRTIO_DEV_CONFIG_LOG_ACCESS(uDriverFeatures, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort);
1750 }
1751 else
1752 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER(uQueueSize, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort))
1753 {
1754 VIRTIO_DEV_CONFIG_LOG_ACCESS(uQueueSize, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort);
1755 LogFunc(("... WARNING: Guest attempted to write readonly device_feature (queue size) (ignoring)\n"));
1756 return VINF_SUCCESS;
1757 }
1758 else
1759 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER(fDeviceStatus, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort))
1760 {
1761 bool const fDriverInitiatedReset = (pVirtio->fDeviceStatus = (uint8_t)u32) == 0;
1762 bool const fDriverStateImproved = IS_DRIVER_OK(pVirtio) && !WAS_DRIVER_OK(pVirtio);
1763#ifdef LOG_ENABLED
1764 if (LogIs7Enabled())
1765 {
1766 char szOut[80] = { 0 };
1767 virtioCoreFormatDeviceStatus(pVirtio->fDeviceStatus, szOut, sizeof(szOut));
1768 Log(("%-23s: Guest wrote fDeviceStatus ................ (%s)\n", __FUNCTION__, szOut));
1769 }
1770#endif
1771 if (fDriverStateImproved || fDriverInitiatedReset)
1772 {
1773#ifdef IN_RING0
1774 Log6(("%-23s: RING0 => RING3 (demote)\n", __FUNCTION__));
1775 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatWrite), a);
1776 return VINF_IOM_R3_IOPORT_WRITE;
1777#endif
1778 }
1779
1780#ifdef IN_RING3
1781 PVIRTIOCORECC pVirtioCC = PDMINS_2_DATA_CC(pDevIns, PVIRTIOCORECC);
1782 if (fDriverInitiatedReset)
1783 virtioGuestR3WasReset(pDevIns, pVirtio, pVirtioCC);
1784
1785 else if (fDriverStateImproved)
1786 pVirtioCC->pfnStatusChanged(pVirtio, pVirtioCC, 1 /* fDriverOk */);
1787
1788#endif
1789 pVirtio->fPrevDeviceStatus = pVirtio->fDeviceStatus;
1790 }
1791 else
1792 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER(uVirtqPfn, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort))
1793 {
1794 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
1795 uint64_t uVirtqPfn = (uint64_t)u32;
1796
1797 if (uVirtqPfn)
1798 {
1799 /* Transitional devices calculate ring physical addresses using rigid spec-defined formulae,
1800 * instead of guest conveying respective address of each ring, as "modern" VirtIO drivers do,
1801 * thus there is no virtq PFN or single base queue address stored in instance data for
1802 * this transitional device, but rather it is derived, when read back, from GCPhysVirtqDesc */
1803
1804 pVirtq->GCPhysVirtqDesc = uVirtqPfn * VIRTIO_PAGE_SIZE;
1805 pVirtq->GCPhysVirtqAvail = pVirtq->GCPhysVirtqDesc + sizeof(VIRTQ_DESC_T) * pVirtq->uQueueSize;
1806 pVirtq->GCPhysVirtqUsed =
1807 RT_ALIGN(pVirtq->GCPhysVirtqAvail + RT_UOFFSETOF_DYN(VIRTQ_AVAIL_T, auRing[pVirtq->uQueueSize]), VIRTIO_PAGE_SIZE);
1808 }
1809 else
1810 {
1811 /* Don't set ring addresses for queue (to meaningless values), when guest resets the virtq's PFN */
1812 pVirtq->GCPhysVirtqDesc = 0;
1813 pVirtq->GCPhysVirtqAvail = 0;
1814 pVirtq->GCPhysVirtqUsed = 0;
1815 }
1816 Log(("%-23s: Guest wrote uVirtqPfn .................... %#x:\n"
1817 "%68s... %p -> GCPhysVirtqDesc\n%68s... %p -> GCPhysVirtqAvail\n%68s... %p -> GCPhysVirtqUsed\n",
1818 __FUNCTION__, u32, " ", pVirtq->GCPhysVirtqDesc, " ", pVirtq->GCPhysVirtqAvail, " ", pVirtq->GCPhysVirtqUsed));
1819 }
1820 else
1821 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER(uQueueNotify, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort))
1822 {
1823#ifdef IN_RING3
1824 ASSERT_GUEST_MSG(cb == 2, ("cb=%u\n", cb));
1825 pVirtio->uQueueNotify = u32 & 0xFFFF;
1826 if (uVirtq < VIRTQ_MAX_COUNT)
1827 {
1828 RT_UNTRUSTED_VALIDATED_FENCE();
1829
1830 /* Need to check that queue is configured. Legacy spec didn't have a queue enabled flag */
1831 if (pVirtio->aVirtqueues[pVirtio->uQueueNotify].GCPhysVirtqDesc)
1832 virtioCoreVirtqNotified(pDevIns, pVirtio, pVirtio->uQueueNotify, pVirtio->uQueueNotify /* uNotifyIdx */);
1833 else
1834 Log(("The queue (#%d) being notified has not been initialized.\n", pVirtio->uQueueNotify));
1835 }
1836 else
1837 Log(("Invalid queue number (%d)\n", pVirtio->uQueueNotify));
1838#else
1839 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatWrite), a);
1840 return VINF_IOM_R3_IOPORT_WRITE;
1841#endif
1842 }
1843 else
1844 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER(fIsrStatus, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort))
1845 {
1846 VIRTIO_DEV_CONFIG_LOG_ACCESS( fIsrStatus, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort);
1847 LogFunc(("... WARNING: Guest attempted to write readonly device_feature (ISR status) (ignoring)\n"));
1848 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatWrite), a);
1849 return VINF_SUCCESS;
1850 }
1851 else if (offPort >= sizeof(VIRTIO_LEGACY_PCI_COMMON_CFG_T))
1852 {
1853#ifdef IN_RING3
1854
1855 /* Access device-specific configuration */
1856 PVIRTIOCORECC pVirtioCC = PDMINS_2_DATA_CC(pDevIns, PVIRTIOCORECC);
1857 return pVirtioCC->pfnDevCapWrite(pDevIns, offPort - sizeof(VIRTIO_LEGACY_PCI_COMMON_CFG_T), pv, cb);
1858#else
1859 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatWrite), a);
1860 return VINF_IOM_R3_IOPORT_WRITE;
1861#endif
1862 }
1863 else
1864 {
1865 Log2Func(("Bad guest write access to virtio_legacy_pci_common_cfg: offset=%#x, cb=0x%x\n",
1866 offPort, cb));
1867 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatWrite), a);
1868 int rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS,
1869 "virtioLegacyIOPortOut: no valid port at offset offset=%RTiop cb=0x%#x\n", offPort, cb);
1870 return rc;
1871 }
1872
1873 RT_NOREF(uVirtq);
1874 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatWrite), a);
1875 return VINF_SUCCESS;
1876}
1877
1878
1879/**
1880 * @callback_method_impl{FNIOMMMIONEWREAD,
1881 * Memory mapped I/O Handler for PCI Capabilities read operations.}
1882 *
1883 * This MMIO handler specifically supports the VIRTIO_PCI_CAP_PCI_CFG capability defined
1884 * in the VirtIO 1.0 specification, section 4.1.4.7, and as such is restricted to reads
1885 * of 1, 2 or 4 bytes, only.
1886 *
1887 */
1888static DECLCALLBACK(VBOXSTRICTRC) virtioMmioRead(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off, void *pv, unsigned cb)
1889{
1890 PVIRTIOCORE pVirtio = PDMINS_2_DATA(pDevIns, PVIRTIOCORE);
1891 PVIRTIOCORECC pVirtioCC = PDMINS_2_DATA_CC(pDevIns, PVIRTIOCORECC);
1892 AssertReturn(cb == 1 || cb == 2 || cb == 4, VERR_INVALID_PARAMETER);
1893 Assert(pVirtio == (PVIRTIOCORE)pvUser); RT_NOREF(pvUser);
1894 STAM_PROFILE_ADV_START(&pVirtio->CTX_SUFF(StatRead), a);
1895
1896
1897 uint32_t uOffset;
1898 if (MATCHES_VIRTIO_CAP_STRUCT(off, cb, uOffset, pVirtio->LocDeviceCap))
1899 {
1900#ifdef IN_RING3
1901 /*
1902 * Callback to client to manage device-specific configuration.
1903 */
1904 VBOXSTRICTRC rcStrict = pVirtioCC->pfnDevCapRead(pDevIns, uOffset, pv, cb);
1905
1906 /*
1907 * Anytime any part of the dev-specific dev config (which this virtio core implementation sees
1908 * as a blob, and virtio dev-specific code separates into fields) is READ, it must be compared
1909 * for deltas from previous read to maintain a config gen. seq. counter (VirtIO 1.0, section 4.1.4.3.1)
1910 */
1911 bool fDevSpecificFieldChanged = RT_BOOL(memcmp(pVirtioCC->pbDevSpecificCfg + uOffset,
1912 pVirtioCC->pbPrevDevSpecificCfg + uOffset,
1913 RT_MIN(cb, pVirtioCC->cbDevSpecificCfg - uOffset)));
1914
1915 memcpy(pVirtioCC->pbPrevDevSpecificCfg, pVirtioCC->pbDevSpecificCfg, pVirtioCC->cbDevSpecificCfg);
1916
1917 if (pVirtio->fGenUpdatePending || fDevSpecificFieldChanged)
1918 {
1919 ++pVirtio->uConfigGeneration;
1920 Log6Func(("Bumped cfg. generation to %d because %s%s\n", pVirtio->uConfigGeneration,
1921 fDevSpecificFieldChanged ? "<dev cfg changed> " : "",
1922 pVirtio->fGenUpdatePending ? "<update was pending>" : ""));
1923 pVirtio->fGenUpdatePending = false;
1924 }
1925
1926 virtioLowerInterrupt(pDevIns, 0);
1927 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatRead), a);
1928 return rcStrict;
1929#else
1930 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatRead), a);
1931 return VINF_IOM_R3_MMIO_READ;
1932#endif
1933 }
1934
1935 if (MATCHES_VIRTIO_CAP_STRUCT(off, cb, uOffset, pVirtio->LocCommonCfgCap))
1936 return virtioCommonCfgAccessed(pDevIns, pVirtio, pVirtioCC, false /* fWrite */, uOffset, cb, pv);
1937
1938 if (MATCHES_VIRTIO_CAP_STRUCT(off, cb, uOffset, pVirtio->LocIsrCap))
1939 {
1940 *(uint8_t *)pv = pVirtio->uISR;
1941 Log6Func(("Read and clear ISR\n"));
1942 pVirtio->uISR = 0; /* VirtIO spec requires reads of ISR to clear it */
1943 virtioLowerInterrupt(pDevIns, 0);
1944 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatRead), a);
1945 return VINF_SUCCESS;
1946 }
1947
1948 ASSERT_GUEST_MSG_FAILED(("Bad read access to mapped capabilities region: off=%RGp cb=%u\n", off, cb));
1949 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatRead), a);
1950 int rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS,
1951 "virtioMmioRead: Bad MMIO access to capabilities, offset=%RTiop cb=%08x\n", off, cb);
1952 return rc;
1953}
1954
1955/**
1956 * @callback_method_impl{FNIOMMMIONEWREAD,
1957 * Memory mapped I/O Handler for PCI Capabilities write operations.}
1958 *
1959 * This MMIO handler specifically supports the VIRTIO_PCI_CAP_PCI_CFG capability defined
1960 * in the VirtIO 1.0 specification, section 4.1.4.7, and as such is restricted to writes
1961 * of 1, 2 or 4 bytes, only.
1962 */
1963static DECLCALLBACK(VBOXSTRICTRC) virtioMmioWrite(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off, void const *pv, unsigned cb)
1964{
1965 PVIRTIOCORE pVirtio = PDMINS_2_DATA(pDevIns, PVIRTIOCORE);
1966 PVIRTIOCORECC pVirtioCC = PDMINS_2_DATA_CC(pDevIns, PVIRTIOCORECC);
1967 AssertReturn(cb == 1 || cb == 2 || cb == 4, VERR_INVALID_PARAMETER);
1968 Assert(pVirtio == (PVIRTIOCORE)pvUser); RT_NOREF(pvUser);
1969 STAM_PROFILE_ADV_START(&pVirtio->CTX_SUFF(StatWrite), a);
1970
1971 uint32_t uOffset;
1972 if (MATCHES_VIRTIO_CAP_STRUCT(off, cb, uOffset, pVirtio->LocDeviceCap))
1973 {
1974#ifdef IN_RING3
1975 /*
1976 * Foreward this MMIO write access for client to deal with.
1977 */
1978 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatWrite), a);
1979 return pVirtioCC->pfnDevCapWrite(pDevIns, uOffset, pv, cb);
1980#else
1981 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatWrite), a);
1982 Log6(("%-23s: RING0 => RING3 (demote)\n", __FUNCTION__));
1983 return VINF_IOM_R3_MMIO_WRITE;
1984#endif
1985 }
1986
1987 if (MATCHES_VIRTIO_CAP_STRUCT(off, cb, uOffset, pVirtio->LocCommonCfgCap))
1988 {
1989 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatWrite), a);
1990 return virtioCommonCfgAccessed(pDevIns, pVirtio, pVirtioCC, true /* fWrite */, uOffset, cb, (void *)pv);
1991 }
1992
1993 if (MATCHES_VIRTIO_CAP_STRUCT(off, cb, uOffset, pVirtio->LocIsrCap) && cb == sizeof(uint8_t))
1994 {
1995 pVirtio->uISR = *(uint8_t *)pv;
1996 Log6Func(("Setting uISR = 0x%02x (virtq interrupt: %d, dev confg interrupt: %d)\n",
1997 pVirtio->uISR & 0xff,
1998 pVirtio->uISR & VIRTIO_ISR_VIRTQ_INTERRUPT,
1999 RT_BOOL(pVirtio->uISR & VIRTIO_ISR_DEVICE_CONFIG)));
2000 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatWrite), a);
2001 return VINF_SUCCESS;
2002 }
2003
2004 /* This *should* be guest driver dropping index of a new descriptor in avail ring */
2005 if (MATCHES_VIRTIO_CAP_STRUCT(off, cb, uOffset, pVirtio->LocNotifyCap) && cb == sizeof(uint16_t))
2006 {
2007 virtioCoreVirtqNotified(pDevIns, pVirtio, uOffset / VIRTIO_NOTIFY_OFFSET_MULTIPLIER, *(uint16_t *)pv);
2008 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatWrite), a);
2009 return VINF_SUCCESS;
2010 }
2011
2012 ASSERT_GUEST_MSG_FAILED(("Bad write access to mapped capabilities region: off=%RGp pv=%#p{%.*Rhxs} cb=%u\n", off, pv, cb, pv, cb));
2013 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatWrite), a);
2014 int rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS,
2015 "virtioMmioRead: Bad MMIO access to capabilities, offset=%RTiop cb=%08x\n", off, cb);
2016 return rc;
2017}
2018
2019#ifdef IN_RING3
2020
2021/**
2022 * @callback_method_impl{FNPCICONFIGREAD}
2023 */
2024static DECLCALLBACK(VBOXSTRICTRC) virtioR3PciConfigRead(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev,
2025 uint32_t uAddress, unsigned cb, uint32_t *pu32Value)
2026{
2027 PVIRTIOCORE pVirtio = PDMINS_2_DATA(pDevIns, PVIRTIOCORE);
2028 PVIRTIOCORECC pVirtioCC = PDMINS_2_DATA_CC(pDevIns, PVIRTIOCORECC);
2029 RT_NOREF(pPciDev);
2030
2031 if (uAddress == pVirtio->uPciCfgDataOff)
2032 {
2033 /* See comments in PCI Cfg capability initialization (in capabilities setup section of this code) */
2034 struct virtio_pci_cap *pPciCap = &pVirtioCC->pPciCfgCap->pciCap;
2035 uint32_t uLength = pPciCap->uLength;
2036
2037 Log7Func((" pDevIns=%p pPciDev=%p uAddress=%#x%s cb=%u uLength=%d, bar=%d\n",
2038 pDevIns, pPciDev, uAddress, uAddress < 0x10 ? " " : "", cb, uLength, pPciCap->uBar));
2039
2040 if ( (uLength != 1 && uLength != 2 && uLength != 4)
2041 || pPciCap->uBar != VIRTIO_REGION_PCI_CAP)
2042 {
2043 ASSERT_GUEST_MSG_FAILED(("Guest read virtio_pci_cfg_cap.pci_cfg_data using mismatching config. "
2044 "Ignoring\n"));
2045 *pu32Value = UINT32_MAX;
2046 return VINF_SUCCESS;
2047 }
2048
2049 VBOXSTRICTRC rcStrict = virtioMmioRead(pDevIns, pVirtio, pPciCap->uOffset, pu32Value, cb);
2050 Log7Func((" Guest read virtio_pci_cfg_cap.pci_cfg_data, bar=%d, offset=%d, length=%d, result=0x%x -> %Rrc\n",
2051 pPciCap->uBar, pPciCap->uOffset, uLength, *pu32Value, VBOXSTRICTRC_VAL(rcStrict)));
2052 return rcStrict;
2053 }
2054 Log7Func((" pDevIns=%p pPciDev=%p uAddress=%#x%s cb=%u pu32Value=%p\n",
2055 pDevIns, pPciDev, uAddress, uAddress < 0x10 ? " " : "", cb, pu32Value));
2056 return VINF_PDM_PCI_DO_DEFAULT;
2057}
2058
2059/**
2060 * @callback_method_impl{FNPCICONFIGWRITE}
2061 */
2062static DECLCALLBACK(VBOXSTRICTRC) virtioR3PciConfigWrite(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev,
2063 uint32_t uAddress, unsigned cb, uint32_t u32Value)
2064{
2065 PVIRTIOCORE pVirtio = PDMINS_2_DATA(pDevIns, PVIRTIOCORE);
2066 PVIRTIOCORECC pVirtioCC = PDMINS_2_DATA_CC(pDevIns, PVIRTIOCORECC);
2067 RT_NOREF(pPciDev);
2068
2069 Log7Func(("pDevIns=%p pPciDev=%p uAddress=%#x %scb=%u u32Value=%#x\n", pDevIns, pPciDev, uAddress, uAddress < 0xf ? " " : "", cb, u32Value));
2070 if (uAddress == pVirtio->uPciCfgDataOff)
2071 {
2072 /* See comments in PCI Cfg capability initialization (in capabilities setup section of this code) */
2073 struct virtio_pci_cap *pPciCap = &pVirtioCC->pPciCfgCap->pciCap;
2074 uint32_t uLength = pPciCap->uLength;
2075
2076 if ( (uLength != 1 && uLength != 2 && uLength != 4)
2077 || cb != uLength
2078 || pPciCap->uBar != VIRTIO_REGION_PCI_CAP)
2079 {
2080 ASSERT_GUEST_MSG_FAILED(("Guest write virtio_pci_cfg_cap.pci_cfg_data using mismatching config. Ignoring\n"));
2081 return VINF_SUCCESS;
2082 }
2083
2084 VBOXSTRICTRC rcStrict = virtioMmioWrite(pDevIns, pVirtio, pPciCap->uOffset, &u32Value, cb);
2085 Log2Func(("Guest wrote virtio_pci_cfg_cap.pci_cfg_data, bar=%d, offset=%x, length=%x, value=%d -> %Rrc\n",
2086 pPciCap->uBar, pPciCap->uOffset, uLength, u32Value, VBOXSTRICTRC_VAL(rcStrict)));
2087 return rcStrict;
2088 }
2089 return VINF_PDM_PCI_DO_DEFAULT;
2090}
2091
2092
2093/*********************************************************************************************************************************
2094* Saved state (SSM) *
2095*********************************************************************************************************************************/
2096
2097
2098/**
2099 * Loads a saved device state (called from device-specific code on SSM final pass)
2100 *
2101 * @param pVirtio Pointer to the shared virtio state.
2102 * @param pHlp The ring-3 device helpers.
2103 * @param pSSM The saved state handle.
2104 * @returns VBox status code.
2105 */
2106int virtioCoreR3LegacyDeviceLoadExec(PVIRTIOCORE pVirtio, PCPDMDEVHLPR3 pHlp,
2107 PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uVirtioLegacy_3_1_Beta)
2108{
2109 int rc;
2110 uint32_t uDriverFeaturesLegacy32bit;
2111
2112 rc = pHlp->pfnSSMGetU32( pSSM, &uDriverFeaturesLegacy32bit);
2113 AssertRCReturn(rc, rc);
2114 pVirtio->uDriverFeatures = (uint64_t)uDriverFeaturesLegacy32bit;
2115
2116 rc = pHlp->pfnSSMGetU16( pSSM, &pVirtio->uVirtqSelect);
2117 AssertRCReturn(rc, rc);
2118
2119 rc = pHlp->pfnSSMGetU8( pSSM, &pVirtio->fDeviceStatus);
2120 AssertRCReturn(rc, rc);
2121
2122#ifdef LOG_ENABLED
2123 char szOut[80] = { 0 };
2124 virtioCoreFormatDeviceStatus(pVirtio->fDeviceStatus, szOut, sizeof(szOut));
2125 Log(("Loaded legacy device status = (%s)\n", szOut));
2126#endif
2127
2128 rc = pHlp->pfnSSMGetU8( pSSM, &pVirtio->uISR);
2129 AssertRCReturn(rc, rc);
2130
2131 uint32_t cQueues = 3; /* This constant default value copied from earliest v0.9 code */
2132 if (uVersion > uVirtioLegacy_3_1_Beta)
2133 {
2134 rc = pHlp->pfnSSMGetU32(pSSM, &cQueues);
2135 AssertRCReturn(rc, rc);
2136 }
2137
2138 AssertLogRelMsgReturn(cQueues <= VIRTQ_MAX_COUNT, ("%#x\n", cQueues), VERR_SSM_LOAD_CONFIG_MISMATCH);
2139 AssertLogRelMsgReturn(pVirtio->uVirtqSelect < cQueues || (cQueues == 0 && pVirtio->uVirtqSelect),
2140 ("uVirtqSelect=%u cQueues=%u\n", pVirtio->uVirtqSelect, cQueues),
2141 VERR_SSM_LOAD_CONFIG_MISMATCH);
2142
2143 Log(("\nRestoring %d legacy-only virtio-net device queues from saved state:\n", cQueues));
2144 for (unsigned uVirtq = 0; uVirtq < cQueues; uVirtq++)
2145 {
2146 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
2147
2148 if (uVirtq == cQueues - 1)
2149 RTStrPrintf(pVirtq->szName, sizeof(pVirtq->szName), "legacy-ctrlq");
2150 else if (uVirtq % 2)
2151 RTStrPrintf(pVirtq->szName, sizeof(pVirtq->szName), "legacy-xmitq<%d>", uVirtq / 2);
2152 else
2153 RTStrPrintf(pVirtq->szName, sizeof(pVirtq->szName), "legacy-recvq<%d>", uVirtq / 2);
2154
2155 rc = pHlp->pfnSSMGetU16(pSSM, &pVirtq->uQueueSize);
2156 AssertRCReturn(rc, rc);
2157
2158 uint32_t uVirtqPfn;
2159 rc = pHlp->pfnSSMGetU32(pSSM, &uVirtqPfn);
2160 AssertRCReturn(rc, rc);
2161
2162 rc = pHlp->pfnSSMGetU16(pSSM, &pVirtq->uAvailIdxShadow);
2163 AssertRCReturn(rc, rc);
2164
2165 rc = pHlp->pfnSSMGetU16(pSSM, &pVirtq->uUsedIdxShadow);
2166 AssertRCReturn(rc, rc);
2167
2168 if (uVirtqPfn)
2169 {
2170 pVirtq->GCPhysVirtqDesc = (uint64_t)uVirtqPfn * VIRTIO_PAGE_SIZE;
2171 pVirtq->GCPhysVirtqAvail = pVirtq->GCPhysVirtqDesc + sizeof(VIRTQ_DESC_T) * pVirtq->uQueueSize;
2172 pVirtq->GCPhysVirtqUsed =
2173 RT_ALIGN(pVirtq->GCPhysVirtqAvail + RT_UOFFSETOF_DYN(VIRTQ_AVAIL_T, auRing[pVirtq->uQueueSize]), VIRTIO_PAGE_SIZE);
2174 pVirtq->uEnable = 1;
2175 }
2176 else
2177 {
2178 LogFunc(("WARNING: QUEUE \"%s\" PAGE NUMBER ZERO IN SAVED STATE\n", pVirtq->szName));
2179 pVirtq->uEnable = 0;
2180 }
2181 pVirtq->uNotifyOffset = 0; /* unused in legacy mode */
2182 pVirtq->uMsixVector = 0; /* unused in legacy mode */
2183 }
2184 pVirtio->fGenUpdatePending = 0; /* unused in legacy mode */
2185 pVirtio->uConfigGeneration = 0; /* unused in legacy mode */
2186 pVirtio->uPciCfgDataOff = 0; /* unused in legacy mode (port I/O used instead) */
2187
2188 return VINF_SUCCESS;
2189}
2190
2191/**
2192 * Loads a saved device state (called from device-specific code on SSM final pass)
2193 *
2194 * Note: This loads state saved by a Modern (VirtIO 1.0+) device, of which this transitional device is one,
2195 * and thus supports both legacy and modern guest virtio drivers.
2196 *
2197 * @param pVirtio Pointer to the shared virtio state.
2198 * @param pHlp The ring-3 device helpers.
2199 * @param pSSM The saved state handle.
2200 * @returns VBox status code.
2201 */
2202int virtioCoreR3ModernDeviceLoadExec(PVIRTIOCORE pVirtio, PCPDMDEVHLPR3 pHlp, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uTestVersion, uint32_t cQueues)
2203{
2204 RT_NOREF2(cQueues, uVersion);
2205 LogFunc(("\n"));
2206 /*
2207 * Check the marker and (embedded) version number.
2208 */
2209 uint64_t uMarker = 0;
2210 int rc;
2211
2212 rc = pHlp->pfnSSMGetU64(pSSM, &uMarker);
2213 AssertRCReturn(rc, rc);
2214 if (uMarker != VIRTIO_SAVEDSTATE_MARKER)
2215 return pHlp->pfnSSMSetLoadError(pSSM, VERR_SSM_DATA_UNIT_FORMAT_CHANGED, RT_SRC_POS,
2216 N_("Expected marker value %#RX64 found %#RX64 instead"),
2217 VIRTIO_SAVEDSTATE_MARKER, uMarker);
2218 uint32_t uVersionSaved = 0;
2219 rc = pHlp->pfnSSMGetU32(pSSM, &uVersionSaved);
2220 AssertRCReturn(rc, rc);
2221 if (uVersionSaved != uTestVersion)
2222 return pHlp->pfnSSMSetLoadError(pSSM, VERR_SSM_DATA_UNIT_FORMAT_CHANGED, RT_SRC_POS,
2223 N_("Unsupported virtio version: %u"), uVersionSaved);
2224 /*
2225 * Load the state.
2226 */
2227 rc = pHlp->pfnSSMGetU32( pSSM, &pVirtio->fLegacyDriver);
2228 AssertRCReturn(rc, rc);
2229 rc = pHlp->pfnSSMGetBool( pSSM, &pVirtio->fGenUpdatePending);
2230 AssertRCReturn(rc, rc);
2231 rc = pHlp->pfnSSMGetU8( pSSM, &pVirtio->fDeviceStatus);
2232 AssertRCReturn(rc, rc);
2233 rc = pHlp->pfnSSMGetU8( pSSM, &pVirtio->uConfigGeneration);
2234 AssertRCReturn(rc, rc);
2235 rc = pHlp->pfnSSMGetU8( pSSM, &pVirtio->uPciCfgDataOff);
2236 AssertRCReturn(rc, rc);
2237 rc = pHlp->pfnSSMGetU8( pSSM, &pVirtio->uISR);
2238 AssertRCReturn(rc, rc);
2239 rc = pHlp->pfnSSMGetU16( pSSM, &pVirtio->uVirtqSelect);
2240 AssertRCReturn(rc, rc);
2241 rc = pHlp->pfnSSMGetU32( pSSM, &pVirtio->uDeviceFeaturesSelect);
2242 AssertRCReturn(rc, rc);
2243 rc = pHlp->pfnSSMGetU32( pSSM, &pVirtio->uDriverFeaturesSelect);
2244 AssertRCReturn(rc, rc);
2245 rc = pHlp->pfnSSMGetU64( pSSM, &pVirtio->uDriverFeatures);
2246 AssertRCReturn(rc, rc);
2247
2248 /** @todo Adapt this loop use cQueues argument instead of static queue count (safely with SSM versioning) */
2249 for (uint32_t i = 0; i < VIRTQ_MAX_COUNT; i++)
2250 {
2251 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[i];
2252 rc = pHlp->pfnSSMGetGCPhys64( pSSM, &pVirtq->GCPhysVirtqDesc);
2253 AssertRCReturn(rc, rc);
2254 rc = pHlp->pfnSSMGetGCPhys64( pSSM, &pVirtq->GCPhysVirtqAvail);
2255 AssertRCReturn(rc, rc);
2256 rc = pHlp->pfnSSMGetGCPhys64( pSSM, &pVirtq->GCPhysVirtqUsed);
2257 AssertRCReturn(rc, rc);
2258 rc = pHlp->pfnSSMGetU16( pSSM, &pVirtq->uNotifyOffset);
2259 AssertRCReturn(rc, rc);
2260 rc = pHlp->pfnSSMGetU16( pSSM, &pVirtq->uMsixVector);
2261 AssertRCReturn(rc, rc);
2262 rc = pHlp->pfnSSMGetU16( pSSM, &pVirtq->uEnable);
2263 AssertRCReturn(rc, rc);
2264 rc = pHlp->pfnSSMGetU16( pSSM, &pVirtq->uQueueSize);
2265 AssertRCReturn(rc, rc);
2266 rc = pHlp->pfnSSMGetU16( pSSM, &pVirtq->uAvailIdxShadow);
2267 AssertRCReturn(rc, rc);
2268 rc = pHlp->pfnSSMGetU16( pSSM, &pVirtq->uUsedIdxShadow);
2269 AssertRCReturn(rc, rc);
2270 rc = pHlp->pfnSSMGetMem( pSSM, pVirtq->szName, sizeof(pVirtq->szName));
2271 AssertRCReturn(rc, rc);
2272 }
2273 return VINF_SUCCESS;
2274}
2275
2276/**
2277 * Called from the FNSSMDEVSAVEEXEC function of the device.
2278 *
2279 * @param pVirtio Pointer to the shared virtio state.
2280 * @param pHlp The ring-3 device helpers.
2281 * @param pSSM The saved state handle.
2282 * @returns VBox status code.
2283 */
2284int virtioCoreR3SaveExec(PVIRTIOCORE pVirtio, PCPDMDEVHLPR3 pHlp, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t cQueues)
2285{
2286 RT_NOREF(cQueues);
2287 /** @todo figure out a way to save cQueues (with SSM versioning) */
2288
2289 LogFunc(("\n"));
2290 pHlp->pfnSSMPutU64(pSSM, VIRTIO_SAVEDSTATE_MARKER);
2291 pHlp->pfnSSMPutU32(pSSM, uVersion);
2292
2293 pHlp->pfnSSMPutU32( pSSM, pVirtio->fLegacyDriver);
2294 pHlp->pfnSSMPutBool(pSSM, pVirtio->fGenUpdatePending);
2295 pHlp->pfnSSMPutU8( pSSM, pVirtio->fDeviceStatus);
2296 pHlp->pfnSSMPutU8( pSSM, pVirtio->uConfigGeneration);
2297 pHlp->pfnSSMPutU8( pSSM, pVirtio->uPciCfgDataOff);
2298 pHlp->pfnSSMPutU8( pSSM, pVirtio->uISR);
2299 pHlp->pfnSSMPutU16( pSSM, pVirtio->uVirtqSelect);
2300 pHlp->pfnSSMPutU32( pSSM, pVirtio->uDeviceFeaturesSelect);
2301 pHlp->pfnSSMPutU32( pSSM, pVirtio->uDriverFeaturesSelect);
2302 pHlp->pfnSSMPutU64( pSSM, pVirtio->uDriverFeatures);
2303
2304 for (uint32_t i = 0; i < VIRTQ_MAX_COUNT; i++)
2305 {
2306 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[i];
2307
2308 pHlp->pfnSSMPutGCPhys64( pSSM, pVirtq->GCPhysVirtqDesc);
2309 pHlp->pfnSSMPutGCPhys64( pSSM, pVirtq->GCPhysVirtqAvail);
2310 pHlp->pfnSSMPutGCPhys64( pSSM, pVirtq->GCPhysVirtqUsed);
2311 pHlp->pfnSSMPutU16( pSSM, pVirtq->uNotifyOffset);
2312 pHlp->pfnSSMPutU16( pSSM, pVirtq->uMsixVector);
2313 pHlp->pfnSSMPutU16( pSSM, pVirtq->uEnable);
2314 pHlp->pfnSSMPutU16( pSSM, pVirtq->uQueueSize);
2315 pHlp->pfnSSMPutU16( pSSM, pVirtq->uAvailIdxShadow);
2316 pHlp->pfnSSMPutU16( pSSM, pVirtq->uUsedIdxShadow);
2317 int rc = pHlp->pfnSSMPutMem(pSSM, pVirtq->szName, 32);
2318 AssertRCReturn(rc, rc);
2319 }
2320 return VINF_SUCCESS;
2321}
2322
2323
2324/*********************************************************************************************************************************
2325* Device Level *
2326*********************************************************************************************************************************/
2327
2328/**
2329 * This must be called by the client to handle VM state changes after the client takes care of its device-specific
2330 * tasks for the state change (i.e. reset, suspend, power-off, resume)
2331 *
2332 * @param pDevIns The device instance.
2333 * @param pVirtio Pointer to the shared virtio state.
2334 */
2335void virtioCoreR3VmStateChanged(PVIRTIOCORE pVirtio, VIRTIOVMSTATECHANGED enmState)
2336{
2337 LogFunc(("State changing to %s\n",
2338 virtioCoreGetStateChangeText(enmState)));
2339
2340 switch(enmState)
2341 {
2342 case kvirtIoVmStateChangedReset:
2343 virtioCoreResetAll(pVirtio);
2344 break;
2345 case kvirtIoVmStateChangedSuspend:
2346 break;
2347 case kvirtIoVmStateChangedPowerOff:
2348 break;
2349 case kvirtIoVmStateChangedResume:
2350 for (int uVirtq = 0; uVirtq < VIRTQ_MAX_COUNT; uVirtq++)
2351 {
2352 if ((!pVirtio->fLegacyDriver && pVirtio->aVirtqueues[uVirtq].uEnable)
2353 | pVirtio->aVirtqueues[uVirtq].GCPhysVirtqDesc)
2354 virtioCoreNotifyGuestDriver(pVirtio->pDevInsR3, pVirtio, uVirtq);
2355 }
2356 break;
2357 default:
2358 LogRelFunc(("Bad enum value"));
2359 return;
2360 }
2361}
2362
2363/**
2364 * This should be called from PDMDEVREGR3::pfnDestruct.
2365 *
2366 * @param pDevIns The device instance.
2367 * @param pVirtio Pointer to the shared virtio state.
2368 * @param pVirtioCC Pointer to the ring-3 virtio state.
2369 */
2370void virtioCoreR3Term(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTIOCORECC pVirtioCC)
2371{
2372 if (pVirtioCC->pbPrevDevSpecificCfg)
2373 {
2374 RTMemFree(pVirtioCC->pbPrevDevSpecificCfg);
2375 pVirtioCC->pbPrevDevSpecificCfg = NULL;
2376 }
2377
2378 RT_NOREF(pDevIns, pVirtio);
2379}
2380
2381/** API Function: See header file */
2382int virtioCoreR3Init(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTIOCORECC pVirtioCC, PVIRTIOPCIPARAMS pPciParams,
2383 const char *pcszInstance, uint64_t fDevSpecificFeatures, uint32_t fOfferLegacy,
2384 void *pvDevSpecificCfg, uint16_t cbDevSpecificCfg)
2385{
2386 /*
2387 * Virtio state must be the first member of shared device instance data,
2388 * otherwise can't get our bearings in PCI config callbacks.
2389 */
2390 AssertLogRelReturn(pVirtio == PDMINS_2_DATA(pDevIns, PVIRTIOCORE), VERR_STATE_CHANGED);
2391 AssertLogRelReturn(pVirtioCC == PDMINS_2_DATA_CC(pDevIns, PVIRTIOCORECC), VERR_STATE_CHANGED);
2392
2393 pVirtio->pDevInsR3 = pDevIns;
2394
2395 /*
2396 * Caller must initialize these.
2397 */
2398 AssertReturn(pVirtioCC->pfnStatusChanged, VERR_INVALID_POINTER);
2399 AssertReturn(pVirtioCC->pfnVirtqNotified, VERR_INVALID_POINTER);
2400 AssertReturn(VIRTQ_SIZE > 0 && VIRTQ_SIZE <= 32768, VERR_OUT_OF_RANGE); /* VirtIO specification-defined limit */
2401
2402#if 0 /* Until pdmR3DvHlp_PCISetIrq() impl is fixed and Assert that limits vec to 0 is removed
2403 * VBox legacy MSI support has not been implemented yet
2404 */
2405# ifdef VBOX_WITH_MSI_DEVICES
2406 pVirtio->fMsiSupport = true;
2407# endif
2408#endif
2409
2410 /*
2411 * Host features (presented as a smörgasbord for guest to select from)
2412 * include both dev-specific features & reserved dev-independent features (bitmask).
2413 */
2414 pVirtio->uDeviceFeatures = VIRTIO_F_VERSION_1
2415 | VIRTIO_DEV_INDEPENDENT_FEATURES_OFFERED
2416 | fDevSpecificFeatures;
2417
2418 pVirtio->fLegacyDriver = pVirtio->fOfferLegacy = fOfferLegacy;
2419
2420 RTStrCopy(pVirtio->szInstance, sizeof(pVirtio->szInstance), pcszInstance);
2421 pVirtioCC->cbDevSpecificCfg = cbDevSpecificCfg;
2422 pVirtioCC->pbDevSpecificCfg = (uint8_t *)pvDevSpecificCfg;
2423 pVirtioCC->pbPrevDevSpecificCfg = (uint8_t *)RTMemDup(pvDevSpecificCfg, cbDevSpecificCfg);
2424 AssertLogRelReturn(pVirtioCC->pbPrevDevSpecificCfg, VERR_NO_MEMORY);
2425
2426 /* Set PCI config registers (assume 32-bit mode) */
2427 PPDMPCIDEV pPciDev = pDevIns->apPciDevs[0];
2428 PDMPCIDEV_ASSERT_VALID(pDevIns, pPciDev);
2429
2430 PDMPciDevSetVendorId(pPciDev, DEVICE_PCI_VENDOR_ID_VIRTIO);
2431 PDMPciDevSetDeviceId(pPciDev, pPciParams->uDeviceId);
2432
2433 if (pPciParams->uDeviceId < DEVICE_PCI_DEVICE_ID_VIRTIO_BASE)
2434 /* Transitional devices MUST have a PCI Revision ID of 0. */
2435 PDMPciDevSetRevisionId(pPciDev, DEVICE_PCI_REVISION_ID_VIRTIO_TRANS);
2436 else
2437 /* Non-transitional devices SHOULD have a PCI Revision ID of 1 or higher. */
2438 PDMPciDevSetRevisionId(pPciDev, DEVICE_PCI_REVISION_ID_VIRTIO_V1);
2439
2440 PDMPciDevSetSubSystemId(pPciDev, DEVICE_PCI_NETWORK_SUBSYSTEM);
2441 PDMPciDevSetSubSystemVendorId(pPciDev, DEVICE_PCI_VENDOR_ID_VIRTIO);
2442 PDMPciDevSetClassBase(pPciDev, pPciParams->uClassBase);
2443 PDMPciDevSetClassSub(pPciDev, pPciParams->uClassSub);
2444 PDMPciDevSetClassProg(pPciDev, pPciParams->uClassProg);
2445 PDMPciDevSetInterruptLine(pPciDev, pPciParams->uInterruptLine);
2446 PDMPciDevSetInterruptPin(pPciDev, pPciParams->uInterruptPin);
2447
2448 /* Register PCI device */
2449 int rc = PDMDevHlpPCIRegister(pDevIns, pPciDev);
2450 if (RT_FAILURE(rc))
2451 return PDMDEV_SET_ERROR(pDevIns, rc, N_("virtio: cannot register PCI Device")); /* can we put params in this error? */
2452
2453 rc = PDMDevHlpPCIInterceptConfigAccesses(pDevIns, pPciDev, virtioR3PciConfigRead, virtioR3PciConfigWrite);
2454 AssertRCReturn(rc, rc);
2455
2456 /* Construct & map PCI vendor-specific capabilities for virtio host negotiation with guest driver */
2457
2458#define CFG_ADDR_2_IDX(addr) ((uint8_t)(((uintptr_t)(addr) - (uintptr_t)&pPciDev->abConfig[0])))
2459#define SET_PCI_CAP_LOC(a_pPciDev, a_pCfg, a_LocCap, a_uMmioLengthAlign) \
2460 do { \
2461 (a_LocCap).offMmio = (a_pCfg)->uOffset; \
2462 (a_LocCap).cbMmio = RT_ALIGN_T((a_pCfg)->uLength, a_uMmioLengthAlign, uint16_t); \
2463 (a_LocCap).offPci = (uint16_t)(uintptr_t)((uint8_t *)(a_pCfg) - &(a_pPciDev)->abConfig[0]); \
2464 (a_LocCap).cbPci = (a_pCfg)->uCapLen; \
2465 } while (0)
2466
2467 PVIRTIO_PCI_CAP_T pCfg;
2468 uint32_t cbRegion = 0;
2469
2470 /*
2471 * Common capability (VirtIO 1.0, section 4.1.4.3)
2472 */
2473 pCfg = (PVIRTIO_PCI_CAP_T)&pPciDev->abConfig[0x40];
2474 pCfg->uCfgType = VIRTIO_PCI_CAP_COMMON_CFG;
2475 pCfg->uCapVndr = VIRTIO_PCI_CAP_ID_VENDOR;
2476 pCfg->uCapLen = sizeof(VIRTIO_PCI_CAP_T);
2477 pCfg->uCapNext = CFG_ADDR_2_IDX(pCfg) + pCfg->uCapLen;
2478 pCfg->uBar = VIRTIO_REGION_PCI_CAP;
2479 pCfg->uOffset = RT_ALIGN_32(0, 4); /* Currently 0, but reminder to 32-bit align if changing this */
2480 pCfg->uLength = sizeof(VIRTIO_PCI_COMMON_CFG_T);
2481 cbRegion += pCfg->uLength;
2482 SET_PCI_CAP_LOC(pPciDev, pCfg, pVirtio->LocCommonCfgCap, 2);
2483 pVirtioCC->pCommonCfgCap = pCfg;
2484
2485 /*
2486 * Notify capability (VirtIO 1.0, section 4.1.4.4).
2487 *
2488 * The size of the spec-defined subregion described by this VirtIO capability is
2489 * based-on the choice of this implementation to make the notification area of each
2490 * queue equal to queue's ordinal position (e.g. queue selector value). The VirtIO
2491 * specification leaves it up to implementation to define queue notification area layout.
2492 */
2493 pCfg = (PVIRTIO_PCI_CAP_T)&pPciDev->abConfig[pCfg->uCapNext];
2494 pCfg->uCfgType = VIRTIO_PCI_CAP_NOTIFY_CFG;
2495 pCfg->uCapVndr = VIRTIO_PCI_CAP_ID_VENDOR;
2496 pCfg->uCapLen = sizeof(VIRTIO_PCI_NOTIFY_CAP_T);
2497 pCfg->uCapNext = CFG_ADDR_2_IDX(pCfg) + pCfg->uCapLen;
2498 pCfg->uBar = VIRTIO_REGION_PCI_CAP;
2499 pCfg->uOffset = pVirtioCC->pCommonCfgCap->uOffset + pVirtioCC->pCommonCfgCap->uLength;
2500 pCfg->uOffset = RT_ALIGN_32(pCfg->uOffset, 4);
2501 pCfg->uLength = VIRTQ_MAX_COUNT * VIRTIO_NOTIFY_OFFSET_MULTIPLIER + 2; /* will change in VirtIO 1.1 */
2502 cbRegion += pCfg->uLength;
2503 SET_PCI_CAP_LOC(pPciDev, pCfg, pVirtio->LocNotifyCap, 1);
2504 pVirtioCC->pNotifyCap = (PVIRTIO_PCI_NOTIFY_CAP_T)pCfg;
2505 pVirtioCC->pNotifyCap->uNotifyOffMultiplier = VIRTIO_NOTIFY_OFFSET_MULTIPLIER;
2506
2507 /* ISR capability (VirtIO 1.0, section 4.1.4.5)
2508 *
2509 * VirtIO 1.0 spec says 8-bit, unaligned in MMIO space. The specification example/diagram
2510 * illustrates this capability as 32-bit field with upper bits 'reserved'. Those depictions
2511 * differ. The spec's wording, not the diagram, is seen to work in practice.
2512 */
2513 pCfg = (PVIRTIO_PCI_CAP_T)&pPciDev->abConfig[pCfg->uCapNext];
2514 pCfg->uCfgType = VIRTIO_PCI_CAP_ISR_CFG;
2515 pCfg->uCapVndr = VIRTIO_PCI_CAP_ID_VENDOR;
2516 pCfg->uCapLen = sizeof(VIRTIO_PCI_CAP_T);
2517 pCfg->uCapNext = CFG_ADDR_2_IDX(pCfg) + pCfg->uCapLen;
2518 pCfg->uBar = VIRTIO_REGION_PCI_CAP;
2519 pCfg->uOffset = pVirtioCC->pNotifyCap->pciCap.uOffset + pVirtioCC->pNotifyCap->pciCap.uLength;
2520 pCfg->uOffset = RT_ALIGN_32(pCfg->uOffset, 4);
2521 pCfg->uLength = sizeof(uint8_t);
2522 cbRegion += pCfg->uLength;
2523 SET_PCI_CAP_LOC(pPciDev, pCfg, pVirtio->LocIsrCap, 4);
2524 pVirtioCC->pIsrCap = pCfg;
2525
2526 /* PCI Cfg capability (VirtIO 1.0, section 4.1.4.7)
2527 *
2528 * This capability facilitates early-boot access to this device (BIOS).
2529 * This region isn't page-MMIO mapped. PCI configuration accesses are intercepted,
2530 * wherein uBar, uOffset and uLength are modulated by consumers to locate and read/write
2531 * values in any part of any region. (NOTE: Linux driver doesn't utilize this feature.
2532 * This capability only appears in lspci output on Linux if uLength is non-zero, 4-byte aligned,
2533 * during initialization of linux virtio driver).
2534 */
2535 pVirtio->uPciCfgDataOff = pCfg->uCapNext + RT_OFFSETOF(VIRTIO_PCI_CFG_CAP_T, uPciCfgData);
2536 pCfg = (PVIRTIO_PCI_CAP_T)&pPciDev->abConfig[pCfg->uCapNext];
2537 pCfg->uCfgType = VIRTIO_PCI_CAP_PCI_CFG;
2538 pCfg->uCapVndr = VIRTIO_PCI_CAP_ID_VENDOR;
2539 pCfg->uCapLen = sizeof(VIRTIO_PCI_CFG_CAP_T);
2540 pCfg->uCapNext = (pVirtio->fMsiSupport || pVirtioCC->pbDevSpecificCfg) ? CFG_ADDR_2_IDX(pCfg) + pCfg->uCapLen : 0;
2541 pCfg->uBar = VIRTIO_REGION_PCI_CAP;
2542 pCfg->uOffset = 0;
2543 pCfg->uLength = 4;
2544 cbRegion += pCfg->uLength;
2545 SET_PCI_CAP_LOC(pPciDev, pCfg, pVirtio->LocPciCfgCap, 1);
2546 pVirtioCC->pPciCfgCap = (PVIRTIO_PCI_CFG_CAP_T)pCfg;
2547
2548 if (pVirtioCC->pbDevSpecificCfg)
2549 {
2550 /* Device-specific config capability (VirtIO 1.0, section 4.1.4.6).
2551 *
2552 * Client defines the device-specific config struct and passes size to virtioCoreR3Init()
2553 * to inform this.
2554 */
2555 pCfg = (PVIRTIO_PCI_CAP_T)&pPciDev->abConfig[pCfg->uCapNext];
2556 pCfg->uCfgType = VIRTIO_PCI_CAP_DEVICE_CFG;
2557 pCfg->uCapVndr = VIRTIO_PCI_CAP_ID_VENDOR;
2558 pCfg->uCapLen = sizeof(VIRTIO_PCI_CAP_T);
2559 pCfg->uCapNext = pVirtio->fMsiSupport ? CFG_ADDR_2_IDX(pCfg) + pCfg->uCapLen : 0;
2560 pCfg->uBar = VIRTIO_REGION_PCI_CAP;
2561 pCfg->uOffset = pVirtioCC->pIsrCap->uOffset + pVirtioCC->pIsrCap->uLength;
2562 pCfg->uOffset = RT_ALIGN_32(pCfg->uOffset, 4);
2563 pCfg->uLength = cbDevSpecificCfg;
2564 cbRegion += pCfg->uLength;
2565 SET_PCI_CAP_LOC(pPciDev, pCfg, pVirtio->LocDeviceCap, 4);
2566 pVirtioCC->pDeviceCap = pCfg;
2567 }
2568 else
2569 Assert(pVirtio->LocDeviceCap.cbMmio == 0 && pVirtio->LocDeviceCap.cbPci == 0);
2570
2571 if (pVirtio->fMsiSupport)
2572 {
2573 PDMMSIREG aMsiReg;
2574 RT_ZERO(aMsiReg);
2575 aMsiReg.iMsixCapOffset = pCfg->uCapNext;
2576 aMsiReg.iMsixNextOffset = 0;
2577 aMsiReg.iMsixBar = VIRTIO_REGION_MSIX_CAP;
2578 aMsiReg.cMsixVectors = VBOX_MSIX_MAX_ENTRIES;
2579 rc = PDMDevHlpPCIRegisterMsi(pDevIns, &aMsiReg); /* see MsixR3init() */
2580 if (RT_FAILURE(rc))
2581 {
2582 /* See PDMDevHlp.cpp:pdmR3DevHlp_PCIRegisterMsi */
2583 LogFunc(("Failed to configure MSI-X (%Rrc). Reverting to INTx\n", rc));
2584 pVirtio->fMsiSupport = false;
2585 }
2586 else
2587 Log2Func(("Using MSI-X for guest driver notification\n"));
2588 }
2589 else
2590 LogFunc(("MSI-X not available for VBox, using INTx notification\n"));
2591
2592 /* Set offset to first capability and enable PCI dev capabilities */
2593 PDMPciDevSetCapabilityList(pPciDev, 0x40);
2594 PDMPciDevSetStatus(pPciDev, VBOX_PCI_STATUS_CAP_LIST);
2595
2596 size_t cbSize = RTStrPrintf(pVirtioCC->szMmioName, sizeof(pVirtioCC->szMmioName), "%s (modern)", pcszInstance);
2597 if (cbSize <= 0)
2598 return PDMDEV_SET_ERROR(pDevIns, rc, N_("virtio: out of memory allocating string")); /* can we put params in this error? */
2599
2600 cbSize = RTStrPrintf(pVirtioCC->szPortIoName, sizeof(pVirtioCC->szPortIoName), "%s (legacy)", pcszInstance);
2601 if (cbSize <= 0)
2602 return PDMDEV_SET_ERROR(pDevIns, rc, N_("virtio: out of memory allocating string")); /* can we put params in this error? */
2603
2604 if (pVirtio->fOfferLegacy)
2605 {
2606 /* As a transitional device that supports legacy VirtIO drivers, this VirtIO device generic implementation presents
2607 * legacy driver interface in I/O space at BAR0. The following maps the common (e.g. device independent)
2608 * dev config area as well as device-specific dev config area (whose size is passed to init function of this VirtIO
2609 * generic device code) for access via Port I/O, since legacy drivers (e.g. pre VirtIO 1.0) don't use MMIO callbacks.
2610 * (See VirtIO 1.1, Section 4.1.4.8).
2611 */
2612 rc = PDMDevHlpPCIIORegionCreateIo(pDevIns, VIRTIO_REGION_LEGACY_IO, sizeof(VIRTIO_LEGACY_PCI_COMMON_CFG_T) + cbDevSpecificCfg,
2613 virtioLegacyIOPortOut, virtioLegacyIOPortIn, NULL /*pvUser*/, pVirtioCC->szPortIoName,
2614 NULL /*paExtDescs*/, &pVirtio->hLegacyIoPorts);
2615 AssertLogRelRCReturn(rc, PDMDEV_SET_ERROR(pDevIns, rc, N_("virtio: cannot register legacy config in I/O space at BAR0 */")));
2616 }
2617
2618 /* Note: The Linux driver at drivers/virtio/virtio_pci_modern.c tries to map at least a page for the
2619 * 'unknown' device-specific capability without querying the capability to determine size, so pad w/extra page.
2620 */
2621 rc = PDMDevHlpPCIIORegionCreateMmio(pDevIns, VIRTIO_REGION_PCI_CAP, RT_ALIGN_32(cbRegion + VIRTIO_PAGE_SIZE, VIRTIO_PAGE_SIZE),
2622 PCI_ADDRESS_SPACE_MEM, virtioMmioWrite, virtioMmioRead, pVirtio,
2623 IOMMMIO_FLAGS_READ_PASSTHRU | IOMMMIO_FLAGS_WRITE_PASSTHRU,
2624 pVirtioCC->szMmioName,
2625 &pVirtio->hMmioPciCap);
2626 AssertLogRelRCReturn(rc, PDMDEV_SET_ERROR(pDevIns, rc, N_("virtio: cannot register PCI Capabilities address space")));
2627 /*
2628 * Statistics.
2629 */
2630# ifdef VBOX_WITH_STATISTICS
2631 PDMDevHlpSTAMRegisterF(pDevIns, &pVirtio->StatDescChainsAllocated, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
2632 "Total number of allocated descriptor chains", "DescChainsAllocated");
2633 PDMDevHlpSTAMRegisterF(pDevIns, &pVirtio->StatDescChainsFreed, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
2634 "Total number of freed descriptor chains", "DescChainsFreed");
2635 PDMDevHlpSTAMRegisterF(pDevIns, &pVirtio->StatDescChainsSegsIn, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
2636 "Total number of inbound segments", "DescChainsSegsIn");
2637 PDMDevHlpSTAMRegisterF(pDevIns, &pVirtio->StatDescChainsSegsOut, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
2638 "Total number of outbound segments", "DescChainsSegsOut");
2639 PDMDevHlpSTAMRegister(pDevIns, &pVirtio->StatReadR3, STAMTYPE_PROFILE, "IO/ReadR3", STAMUNIT_TICKS_PER_CALL, "Profiling IO reads in R3");
2640 PDMDevHlpSTAMRegister(pDevIns, &pVirtio->StatReadR0, STAMTYPE_PROFILE, "IO/ReadR0", STAMUNIT_TICKS_PER_CALL, "Profiling IO reads in R0");
2641 PDMDevHlpSTAMRegister(pDevIns, &pVirtio->StatReadRC, STAMTYPE_PROFILE, "IO/ReadRC", STAMUNIT_TICKS_PER_CALL, "Profiling IO reads in RC");
2642 PDMDevHlpSTAMRegister(pDevIns, &pVirtio->StatWriteR3, STAMTYPE_PROFILE, "IO/WriteR3", STAMUNIT_TICKS_PER_CALL, "Profiling IO writes in R3");
2643 PDMDevHlpSTAMRegister(pDevIns, &pVirtio->StatWriteR0, STAMTYPE_PROFILE, "IO/WriteR0", STAMUNIT_TICKS_PER_CALL, "Profiling IO writes in R0");
2644 PDMDevHlpSTAMRegister(pDevIns, &pVirtio->StatWriteRC, STAMTYPE_PROFILE, "IO/WriteRC", STAMUNIT_TICKS_PER_CALL, "Profiling IO writes in RC");
2645# endif /* VBOX_WITH_STATISTICS */
2646
2647 return VINF_SUCCESS;
2648}
2649
2650#else /* !IN_RING3 */
2651
2652/**
2653 * Sets up the core ring-0/raw-mode virtio bits.
2654 *
2655 * @returns VBox status code.
2656 * @param pDevIns The device instance.
2657 * @param pVirtio Pointer to the shared virtio state. This must be the first
2658 * member in the shared device instance data!
2659 */
2660int virtioCoreRZInit(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio)
2661{
2662 AssertLogRelReturn(pVirtio == PDMINS_2_DATA(pDevIns, PVIRTIOCORE), VERR_STATE_CHANGED);
2663 int rc;
2664#ifdef FUTURE_OPTIMIZATION
2665 rc = PDMDevHlpSetDeviceCritSect(pDevIns, PDMDevHlpCritSectGetNop(pDevIns));
2666 AssertRCReturn(rc, rc);
2667#endif
2668 rc = PDMDevHlpMmioSetUpContext(pDevIns, pVirtio->hMmioPciCap, virtioMmioWrite, virtioMmioRead, pVirtio);
2669 AssertRCReturn(rc, rc);
2670
2671 if (pVirtio->fOfferLegacy)
2672 {
2673 rc = PDMDevHlpIoPortSetUpContext(pDevIns, pVirtio->hLegacyIoPorts, virtioLegacyIOPortOut, virtioLegacyIOPortIn, NULL /*pvUser*/);
2674 AssertRCReturn(rc, rc);
2675 }
2676 return rc;
2677}
2678
2679#endif /* !IN_RING3 */
2680
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette