VirtualBox

source: vbox/trunk/src/VBox/Devices/VirtIO/VirtioCore.h@ 91999

Last change on this file since 91999 was 91703, checked in by vboxsync, 3 years ago

DevVirtioNet_1_0.cpp: Convert VirtIO to be 'transitional' device, that handles both legacy (0.9) guests and modern (1.0+) guests. Various other small improvements and reduction/improved formatting of logging. See BugRef:8561, Comment #137

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 59.1 KB
Line 
1/* $Id: VirtioCore.h 91703 2021-10-13 02:24:30Z vboxsync $ */
2
3/** @file
4 * VirtioCore.h - Virtio Declarations
5 */
6
7/*
8 * Copyright (C) 2009-2020 Oracle Corporation
9 *
10 * This file is part of VirtualBox Open Source Edition (OSE), as
11 * available from http://www.virtualbox.org. This file is free software;
12 * you can redistribute it and/or modify it under the terms of the GNU
13 * General Public License (GPL) as published by the Free Software
14 * Foundation, in version 2 as it comes in the "COPYING" file of the
15 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
16 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
17 */
18
19#ifndef VBOX_INCLUDED_SRC_VirtIO_VirtioCore_h
20#define VBOX_INCLUDED_SRC_VirtIO_VirtioCore_h
21#ifndef RT_WITHOUT_PRAGMA_ONCE
22# pragma once
23#endif
24
25#include <iprt/ctype.h>
26#include <iprt/sg.h>
27#include <iprt/types.h>
28
29#ifdef LOG_ENABLED
30# define VIRTIO_HEX_DUMP(logLevel, pv, cb, base, title) \
31 do { \
32 if (LogIsItEnabled(logLevel, LOG_GROUP)) \
33 virtioCoreHexDump((pv), (cb), (base), (title)); \
34 } while (0)
35#else
36# define VIRTIO_HEX_DUMP(logLevel, pv, cb, base, title) do { } while (0)
37#endif
38
39/** Pointer to the shared VirtIO state. */
40typedef struct VIRTIOCORE *PVIRTIOCORE;
41/** Pointer to the ring-3 VirtIO state. */
42typedef struct VIRTIOCORER3 *PVIRTIOCORER3;
43/** Pointer to the ring-0 VirtIO state. */
44typedef struct VIRTIOCORER0 *PVIRTIOCORER0;
45/** Pointer to the raw-mode VirtIO state. */
46typedef struct VIRTIOCORERC *PVIRTIOCORERC;
47/** Pointer to the instance data for the current context. */
48typedef CTX_SUFF(PVIRTIOCORE) PVIRTIOCORECC;
49
50#define VIRTIO_MAX_VIRTQ_NAME_SIZE 32 /**< Maximum length of a queue name */
51#define VIRTQ_SIZE 1024 /**< Max size (# entries) of a virtq */
52#define VIRTQ_MAX_COUNT 24 /**< Max queues we allow guest to create */
53#define VIRTIO_NOTIFY_OFFSET_MULTIPLIER 2 /**< VirtIO Notify Cap. MMIO config param */
54#define VIRTIO_REGION_LEGACY_IO 0 /**< BAR for VirtIO legacy drivers MBZ */
55#define VIRTIO_REGION_PCI_CAP 2 /**< BAR for VirtIO Cap. MMIO (impl specific) */
56#define VIRTIO_REGION_MSIX_CAP 0 /**< Bar for MSI-X handling */
57#define VIRTIO_PAGE_SIZE 4096 /**< Page size used by VirtIO specification */
58
59
60/* Note: The VirtIO specification, particularly rev. 0.95, and clarified in rev 1.0 for transitional devices,
61 says the page sized used for Queue Size calculations is usually 4096 bytes, but dependent on the
62 the transport. In an appendix of the 0.95 spec, the 'mmio device', which has not been
63 implemented by VBox legacy device in VirtualBox, says guest must report the page size. For now
64 will set page size to a static 4096 based on the original VBox legacy VirtIO implementation which
65 tied it to PAGE_SIZE which appears to work (or at least good enough for most practical purposes) */
66
67
68/** The following virtioCoreGCPhysChain*() functions mimic the functionality of the related RT s/g functions,
69 * except they work with the data type GCPhys rather than void *
70 */
71typedef struct VIRTIOSGSEG /**< An S/G entry */
72{
73 RTGCPHYS GCPhys; /**< Pointer to the segment buffer */
74 size_t cbSeg; /**< Size of the segment buffer */
75} VIRTIOSGSEG;
76
77typedef VIRTIOSGSEG *PVIRTIOSGSEG, **PPVIRTIOSGSEG;
78typedef const VIRTIOSGSEG *PCVIRTIOSGSEG;
79
80typedef struct VIRTIOSGBUF
81{
82 PVIRTIOSGSEG paSegs; /**< Pointer to the scatter/gather array */
83 unsigned cSegs; /**< Number of segs in scatter/gather array */
84 unsigned idxSeg; /**< Current segment we are in */
85 RTGCPHYS GCPhysCur; /**< Ptr to byte within the current seg */
86 size_t cbSegLeft; /**< # of bytes left in the current segment */
87} VIRTIOSGBUF;
88
89typedef VIRTIOSGBUF *PVIRTIOSGBUF, **PPVIRTIOSGBUF;
90typedef const VIRTIOSGBUF *PCVIRTIOSGBUF;
91
92/**
93 * VirtIO buffers are descriptor chains (scatter-gather vectors). Each buffer is described
94 * by the index of its head descriptor, which in optionally chains to another descriptor
95 * and so on.
96 *
97 * Each descriptor, [len, GCPhys] pair in the chain represents either an OUT segment (e.g. guest-to-host)
98 * or an IN segment (host-to-guest). A VIRTQBUF is created and retured from a call to
99 * virtioCoreR3VirtqAvailBufPeek() or virtioCoreR3VirtqAvailBufGet(). That function consolodates
100 * the VirtIO descriptor chain into a representation, where pSgPhysSend is a GCPhys s/g buffer containing
101 * all of the OUT descriptors and pSgPhysReturn is a GCPhys s/g buffer containing all of IN descriptors
102 * to be filled with data on the host to return to theguest.
103 */
104typedef struct VIRTQBUF
105{
106 uint32_t u32Magic; /**< Magic value, VIRTQBUF_MAGIC. */
107 uint16_t uVirtq; /**< VirtIO index of associated virtq */
108 uint16_t pad;
109 uint32_t volatile cRefs; /**< Reference counter. */
110 uint32_t uHeadIdx; /**< Head idx of associated desc chain */
111 size_t cbPhysSend; /**< Total size of src buffer */
112 PVIRTIOSGBUF pSgPhysSend; /**< Phys S/G buf for data from guest */
113 size_t cbPhysReturn; /**< Total size of dst buffer */
114 PVIRTIOSGBUF pSgPhysReturn; /**< Phys S/G buf to store result for guest */
115
116 /** @name Internal (bird combined 5 allocations into a single), fingers off.
117 * @{ */
118 VIRTIOSGBUF SgBufIn;
119 VIRTIOSGBUF SgBufOut;
120 VIRTIOSGSEG aSegsIn[VIRTQ_SIZE];
121 VIRTIOSGSEG aSegsOut[VIRTQ_SIZE];
122 /** @} */
123} VIRTQBUF_T;
124
125/** Pointers to a Virtio descriptor chain. */
126typedef VIRTQBUF_T *PVIRTQBUF, **PPVIRTQBUF;
127
128/** Magic value for VIRTQBUF_T::u32Magic. */
129#define VIRTQBUF_MAGIC UINT32_C(0x19600219)
130
131typedef struct VIRTIOPCIPARAMS
132{
133 uint16_t uDeviceId; /**< PCI Cfg Device ID */
134 uint16_t uClassBase; /**< PCI Cfg Base Class */
135 uint16_t uClassSub; /**< PCI Cfg Subclass */
136 uint16_t uClassProg; /**< PCI Cfg Programming Interface Class */
137 uint16_t uSubsystemId; /**< PCI Cfg Card Manufacturer Vendor ID */
138 uint16_t uInterruptLine; /**< PCI Cfg Interrupt line */
139 uint16_t uInterruptPin; /**< PCI Cfg Interrupt pin */
140} VIRTIOPCIPARAMS, *PVIRTIOPCIPARAMS;
141
142/* Virtio Platform Indepdented Reserved Feature Bits (see 1.1 specification section 6) */
143
144#define VIRTIO_F_NOTIFY_ON_EMPTY RT_BIT_64(24) /**< Legacy feature: Force intr if no AVAIL */
145#define VIRTIO_F_ANY_LAYOUT RT_BIT_64(27) /**< Doc bug: Goes under two names in spec */
146#define VIRTIO_F_INDIRECT_DESC RT_BIT_64(28) /**< Allow descs to point to list of descs */
147#define VIRTIO_F_RING_INDIRECT_DESC RT_BIT_64(28) /**< Doc bug: Goes under two names in spec */
148#define VIRTIO_F_EVENT_IDX RT_BIT_64(29) /**< Allow notification disable for n elems */
149#define VIRTIO_F_RING_EVENT_IDX RT_BIT_64(29) /**< Doc bug: Goes under two names in spec */
150#define VIRTIO_F_BAD_FEATURE RT_BIT_64(30) /**< QEMU kludge. UNUSED as of >= VirtIO 1.0 */
151#define VIRTIO_F_VERSION_1 RT_BIT_64(32) /**< Required feature bit for 1.0 devices */
152#define VIRTIO_F_ACCESS_PLATFORM RT_BIT_64(33) /**< Funky guest mem access (VirtIO 1.1 NYI) */
153#define VIRTIO_F_RING_PACKED RT_BIT_64(34) /**< Packed Queue Layout (VirtIO 1.1 NYI) */
154#define VIRTIO_F_IN_ORDER RT_BIT_64(35) /**< Honor guest buf order (VirtIO 1.1 NYI) */
155#define VIRTIO_F_ORDER_PLATFORM RT_BIT_64(36) /**< Host mem access honored (VirtIO 1.1 NYI) */
156#define VIRTIO_F_SR_IOV RT_BIT_64(37) /**< Dev Single Root I/O virt (VirtIO 1.1 NYI) */
157#define VIRTIO_F_NOTIFICAITON_DATA RT_BIT_64(38) /**< Driver passes extra data (VirtIO 1.1 NYI) */
158
159#define VIRTIO_DEV_INDEPENDENT_FEATURES_OFFERED ( 0 ) /**< TBD: Add VIRTIO_F_INDIRECT_DESC */
160#define VIRTIO_DEV_INDEPENDENT_LEGACY_FEATURES_OFFERED ( 0 ) /**< Only offered to legacy drivers */
161
162#define VIRTIO_ISR_VIRTQ_INTERRUPT RT_BIT_32(0) /**< Virtq interrupt bit of ISR register */
163#define VIRTIO_ISR_DEVICE_CONFIG RT_BIT_32(1) /**< Device configuration changed bit of ISR */
164#define DEVICE_PCI_NETWORK_SUBSYSTEM 1 /**< Network Card, per VirtIO legacy spec. */
165#define DEVICE_PCI_VENDOR_ID_VIRTIO 0x1AF4 /**< Guest driver locates dev via (mandatory) */
166#define DEVICE_PCI_REVISION_ID_VIRTIO 0 /**< VirtIO Modern Transitional driver rev MBZ */
167
168/** Reserved (*negotiated*) Feature Bits (e.g. device independent features, VirtIO 1.0 spec,section 6) */
169
170#define VIRTIO_MSI_NO_VECTOR 0xffff /**< Vector value to disable MSI for queue */
171
172/** Device Status field constants (from Virtio 1.0 spec) */
173#define VIRTIO_STATUS_ACKNOWLEDGE 0x01 /**< Guest driver: Located this VirtIO device */
174#define VIRTIO_STATUS_DRIVER 0x02 /**< Guest driver: Can drive this VirtIO dev. */
175#define VIRTIO_STATUS_DRIVER_OK 0x04 /**< Guest driver: Driver set-up and ready */
176#define VIRTIO_STATUS_FEATURES_OK 0x08 /**< Guest driver: Feature negotiation done */
177#define VIRTIO_STATUS_FAILED 0x80 /**< Guest driver: Fatal error, gave up */
178#define VIRTIO_STATUS_DEVICE_NEEDS_RESET 0x40 /**< Device experienced unrecoverable error */
179
180typedef enum VIRTIOVMSTATECHANGED
181{
182 kvirtIoVmStateChangedInvalid = 0,
183 kvirtIoVmStateChangedReset,
184 kvirtIoVmStateChangedSuspend,
185 kvirtIoVmStateChangedPowerOff,
186 kvirtIoVmStateChangedResume,
187 kvirtIoVmStateChangedFor32BitHack = 0x7fffffff
188} VIRTIOVMSTATECHANGED;
189
190/** @def Virtio Device PCI Capabilities type codes */
191#define VIRTIO_PCI_CAP_COMMON_CFG 1 /**< Common configuration PCI capability ID */
192#define VIRTIO_PCI_CAP_NOTIFY_CFG 2 /**< Notification area PCI capability ID */
193#define VIRTIO_PCI_CAP_ISR_CFG 3 /**< ISR PCI capability id */
194#define VIRTIO_PCI_CAP_DEVICE_CFG 4 /**< Device-specific PCI cfg capability ID */
195#define VIRTIO_PCI_CAP_PCI_CFG 5 /**< PCI CFG capability ID */
196
197#define VIRTIO_PCI_CAP_ID_VENDOR 0x09 /**< Vendor-specific PCI CFG Device Cap. ID */
198
199/**
200 * The following is the PCI capability struct common to all VirtIO capability types
201 */
202typedef struct virtio_pci_cap
203{
204 /* All little-endian */
205 uint8_t uCapVndr; /**< Generic PCI field: PCI_CAP_ID_VNDR */
206 uint8_t uCapNext; /**< Generic PCI field: next ptr. */
207 uint8_t uCapLen; /**< Generic PCI field: capability length */
208 uint8_t uCfgType; /**< Identifies the structure. */
209 uint8_t uBar; /**< Where to find it. */
210 uint8_t uPadding[3]; /**< Pad to full dword. */
211 uint32_t uOffset; /**< Offset within bar. (L.E.) */
212 uint32_t uLength; /**< Length of struct, in bytes. (L.E.) */
213} VIRTIO_PCI_CAP_T, *PVIRTIO_PCI_CAP_T;
214
215/**
216 * VirtIO Legacy Capabilities' related MMIO-mapped structs (see virtio-0.9.5 spec)
217 *
218 * Note: virtio_pci_device_cap is dev-specific, implemented by client. Definition unknown here.
219 */
220typedef struct virtio_legacy_pci_common_cfg
221{
222 /* Device-specific fields */
223 uint32_t uDeviceFeatures; /**< RO (device reports features to driver) */
224 uint32_t uDriverFeatures; /**< RW (driver-accepted device features) */
225 uint32_t uVirtqPfn; /**< RW (driver writes queue page number) */
226 uint16_t uQueueSize; /**< RW (queue size, 0 - 2^n) */
227 uint16_t uVirtqSelect; /**< RW (selects queue focus for these fields) */
228 uint16_t uQueueNotify; /**< RO (offset into virtqueue; see spec) */
229 uint8_t fDeviceStatus; /**< RW (driver writes device status, 0=reset) */
230 uint8_t fIsrStatus; /**< RW (driver writes ISR status, 0=reset) */
231// uint16_t uMsixConfig; /**< RW (driver sets MSI-X config vector) */
232// uint16_t uMsixVector; /**< RW (driver sets MSI-X config vector) */
233} VIRTIO_LEGACY_PCI_COMMON_CFG_T, *PVIRTIO_LEGACY_PCI_COMMON_CFG_T;
234
235/**
236 * VirtIO 1.0 Capabilities' related MMIO-mapped structs:
237 *
238 * Note: virtio_pci_device_cap is dev-specific, implemented by client. Definition unknown here.
239 */
240typedef struct virtio_pci_common_cfg
241{
242 /* Device-specific fields */
243 uint32_t uDeviceFeaturesSelect; /**< RW (driver selects device features) */
244 uint32_t uDeviceFeatures; /**< RO (device reports features to driver) */
245 uint32_t uDriverFeaturesSelect; /**< RW (driver selects driver features) */
246 uint32_t uDriverFeatures; /**< RW (driver-accepted device features) */
247 uint16_t uMsixConfig; /**< RW (driver sets MSI-X config vector) */
248 uint16_t uNumVirtqs; /**< RO (device specifies max queues) */
249 uint8_t fDeviceStatus; /**< RW (driver writes device status, 0=reset) */
250 uint8_t uConfigGeneration; /**< RO (device changes when changing configs) */
251
252 /* Virtq-specific fields (values reflect (via MMIO) info related to queue indicated by uVirtqSelect. */
253 uint16_t uVirtqSelect; /**< RW (selects queue focus for these fields) */
254 uint16_t uQueueSize; /**< RW (queue size, 0 - 2^n) */
255 uint16_t uMsixVector; /**< RW (driver selects MSI-X queue vector) */
256 uint16_t uEnable; /**< RW (driver controls usability of queue) */
257 uint16_t uNotifyOffset; /**< RO (offset into virtqueue; see spec) */
258 uint64_t GCPhysVirtqDesc; /**< RW (driver writes desc table phys addr) */
259 uint64_t GCPhysVirtqAvail; /**< RW (driver writes avail ring phys addr) */
260 uint64_t GCPhysVirtqUsed; /**< RW (driver writes used ring phys addr) */
261} VIRTIO_PCI_COMMON_CFG_T, *PVIRTIO_PCI_COMMON_CFG_T;
262
263typedef struct virtio_pci_notify_cap
264{
265 struct virtio_pci_cap pciCap; /**< Notification MMIO mapping capability */
266 uint32_t uNotifyOffMultiplier; /**< notify_off_multiplier */
267} VIRTIO_PCI_NOTIFY_CAP_T, *PVIRTIO_PCI_NOTIFY_CAP_T;
268
269typedef struct virtio_pci_cfg_cap
270{
271 struct virtio_pci_cap pciCap; /**< Cap. defines the BAR/off/len to access */
272 uint8_t uPciCfgData[4]; /**< I/O buf for above cap. */
273} VIRTIO_PCI_CFG_CAP_T, *PVIRTIO_PCI_CFG_CAP_T;
274
275/**
276 * PCI capability data locations (PCI CFG and MMIO).
277 */
278typedef struct VIRTIO_PCI_CAP_LOCATIONS_T
279{
280 uint16_t offMmio;
281 uint16_t cbMmio;
282 uint16_t offPci;
283 uint16_t cbPci;
284} VIRTIO_PCI_CAP_LOCATIONS_T;
285
286typedef struct VIRTQUEUE
287{
288 RTGCPHYS GCPhysVirtqDesc; /**< (MMIO) PhysAdr per-Q desc structs GUEST */
289 RTGCPHYS GCPhysVirtqAvail; /**< (MMIO) PhysAdr per-Q avail structs GUEST */
290 RTGCPHYS GCPhysVirtqUsed; /**< (MMIO) PhysAdr per-Q used structs GUEST */
291 uint16_t uMsixVector; /**< (MMIO) Per-queue vector for MSI-X GUEST */
292 uint16_t uEnable; /**< (MMIO) Per-queue enable GUEST */
293 uint16_t uNotifyOffset; /**< (MMIO) per-Q notify offset HOST */
294 uint16_t uQueueSize; /**< (MMIO) Per-queue size HOST/GUEST */
295 uint16_t uAvailIdxShadow; /**< Consumer's position in avail ring */
296 uint16_t uUsedIdxShadow; /**< Consumer's position in used ring */
297 uint16_t uVirtq; /**< Index of this queue */
298 char szName[32]; /**< Dev-specific name of queue */
299 bool fUsedRingEvent; /**< Flags if used idx to notify guest reached */
300 uint8_t padding[3];
301} VIRTQUEUE, *PVIRTQUEUE;
302
303/**
304 * The core/common state of the VirtIO PCI devices, shared edition.
305 */
306typedef struct VIRTIOCORE
307{
308 char szInstance[16]; /**< Instance name, e.g. "VIRTIOSCSI0" */
309 PPDMDEVINS pDevInsR0; /**< Client device instance */
310 PPDMDEVINS pDevInsR3; /**< Client device instance */
311 VIRTQUEUE aVirtqueues[VIRTQ_MAX_COUNT]; /**< (MMIO) VirtIO contexts for queues */
312 uint64_t uDeviceFeatures; /**< (MMIO) Host features offered HOST */
313 uint64_t uDriverFeatures; /**< (MMIO) Host features accepted GUEST */
314 uint32_t uDeviceFeaturesSelect; /**< (MMIO) hi/lo select uDeviceFeatures GUEST */
315 uint32_t uDriverFeaturesSelect; /**< (MMIO) hi/lo select uDriverFeatures GUEST */
316 uint32_t uMsixConfig; /**< (MMIO) MSI-X vector GUEST */
317 uint8_t fDeviceStatus; /**< (MMIO) Device Status GUEST */
318 uint8_t fPrevDeviceStatus; /**< (MMIO) Prev Device Status GUEST */
319 uint8_t uConfigGeneration; /**< (MMIO) Device config sequencer HOST */
320 uint16_t uQueueNotify; /**< Caches queue idx in legacy mode GUEST */
321 bool fGenUpdatePending; /**< If set, update cfg gen after driver reads */
322 uint8_t uPciCfgDataOff; /**< Offset to PCI configuration data area */
323 uint8_t uISR; /**< Interrupt Status Register. */
324 uint8_t fMsiSupport; /**< Flag set if using MSI instead of ISR */
325 uint8_t fLegacyDriver; /**< Set if guest driver < VirtIO 1.0 */
326 uint16_t uVirtqSelect; /**< (MMIO) queue selector GUEST */
327
328 /** @name The locations of the capability structures in PCI config space and the BAR.
329 * @{ */
330 VIRTIO_PCI_CAP_LOCATIONS_T LocPciCfgCap; /**< VIRTIO_PCI_CFG_CAP_T */
331 VIRTIO_PCI_CAP_LOCATIONS_T LocNotifyCap; /**< VIRTIO_PCI_NOTIFY_CAP_T */
332 VIRTIO_PCI_CAP_LOCATIONS_T LocCommonCfgCap; /**< VIRTIO_PCI_CAP_T */
333 VIRTIO_PCI_CAP_LOCATIONS_T LocIsrCap; /**< VIRTIO_PCI_CAP_T */
334 VIRTIO_PCI_CAP_LOCATIONS_T LocDeviceCap; /**< VIRTIO_PCI_CAP_T + custom data. */
335 /** @} */
336
337
338
339 IOMMMIOHANDLE hMmioPciCap; /**< MMIO handle of PCI cap. region (\#2) */
340 IOMIOPORTHANDLE hLegacyIoPorts; /**< Handle of legacy I/O port range. */
341
342
343#ifdef VBOX_WITH_STATISTICS
344 /** @name Statistics
345 * @{ */
346 STAMCOUNTER StatDescChainsAllocated;
347 STAMCOUNTER StatDescChainsFreed;
348 STAMCOUNTER StatDescChainsSegsIn;
349 STAMCOUNTER StatDescChainsSegsOut;
350 STAMPROFILEADV StatReadR3; /** I/O port and MMIO R3 Read profiling */
351 STAMPROFILEADV StatReadR0; /** I/O port and MMIO R0 Read profiling */
352 STAMPROFILEADV StatReadRC; /** I/O port and MMIO R3 Read profiling */
353 STAMPROFILEADV StatWriteR3; /** I/O port and MMIO R3 Write profiling */
354 STAMPROFILEADV StatWriteR0; /** I/O port and MMIO R3 Write profiling */
355 STAMPROFILEADV StatWriteRC; /** I/O port and MMIO R3 Write profiling */
356#endif
357
358 /** @} */
359} VIRTIOCORE;
360
361#define MAX_NAME 64
362
363/**
364 * The core/common state of the VirtIO PCI devices, ring-3 edition.
365 */
366typedef struct VIRTIOCORER3
367{
368 /** @name Callbacks filled by the device before calling virtioCoreR3Init.
369 * @{ */
370 /**
371 * Implementation-specific client callback to notify client of significant device status
372 * changes.
373 *
374 * @param pVirtio Pointer to the shared virtio state.
375 * @param pVirtioCC Pointer to the ring-3 virtio state.
376 * @param fDriverOk True if guest driver is okay (thus queues, etc... are
377 * valid)
378 */
379 DECLCALLBACKMEMBER(void, pfnStatusChanged,(PVIRTIOCORE pVirtio, PVIRTIOCORECC pVirtioCC, uint32_t fDriverOk));
380
381 /**
382 * Implementation-specific client callback to access VirtIO Device-specific capabilities
383 * (other VirtIO capabilities and features are handled in VirtIO implementation)
384 *
385 * @param pDevIns The device instance.
386 * @param offCap Offset within device specific capabilities struct.
387 * @param pvBuf Buffer in which to save read data.
388 * @param cbToRead Number of bytes to read.
389 */
390 DECLCALLBACKMEMBER(int, pfnDevCapRead,(PPDMDEVINS pDevIns, uint32_t offCap, void *pvBuf, uint32_t cbToRead));
391
392 /**
393 * Implementation-specific client ballback to access VirtIO Device-specific capabilities
394 * (other VirtIO capabilities and features are handled in VirtIO implementation)
395 *
396 * @param pDevIns The device instance.
397 * @param offCap Offset within device specific capabilities struct.
398 * @param pvBuf Buffer with the bytes to write.
399 * @param cbToWrite Number of bytes to write.
400 */
401 DECLCALLBACKMEMBER(int, pfnDevCapWrite,(PPDMDEVINS pDevIns, uint32_t offCap, const void *pvBuf, uint32_t cbWrite));
402
403
404 /**
405 * When guest-to-host queue notifications are enabled, the guest driver notifies the host
406 * that the avail queue has buffers, and this callback informs the client.
407 *
408 * @param pVirtio Pointer to the shared virtio state.
409 * @param pVirtioCC Pointer to the ring-3 virtio state.
410 * @param uVirtqNbr Index of the notified queue
411 */
412 DECLCALLBACKMEMBER(void, pfnVirtqNotified,(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtqNbr));
413
414 /** @} */
415
416 R3PTRTYPE(PVIRTIO_PCI_CFG_CAP_T) pPciCfgCap; /**< Pointer to struct in PCI config area. */
417 R3PTRTYPE(PVIRTIO_PCI_NOTIFY_CAP_T) pNotifyCap; /**< Pointer to struct in PCI config area. */
418 R3PTRTYPE(PVIRTIO_PCI_CAP_T) pCommonCfgCap; /**< Pointer to struct in PCI config area. */
419 R3PTRTYPE(PVIRTIO_PCI_CAP_T) pIsrCap; /**< Pointer to struct in PCI config area. */
420 R3PTRTYPE(PVIRTIO_PCI_CAP_T) pDeviceCap; /**< Pointer to struct in PCI config area. */
421
422 uint32_t cbDevSpecificCfg; /**< Size of client's dev-specific config data */
423 R3PTRTYPE(uint8_t *) pbDevSpecificCfg; /**< Pointer to client's struct */
424 R3PTRTYPE(uint8_t *) pbPrevDevSpecificCfg; /**< Previous read dev-specific cfg of client */
425 bool fGenUpdatePending; /**< If set, update cfg gen after driver reads */
426 char pcszMmioName[MAX_NAME]; /**< MMIO mapping name */
427 char pcszPortIoName[MAX_NAME]; /**< PORT mapping name */
428} VIRTIOCORER3;
429
430/**
431 * The core/common state of the VirtIO PCI devices, ring-0 edition.
432 */
433typedef struct VIRTIOCORER0
434{
435 /**
436 * When guest-to-host queue notifications are enabled, the guest driver notifies the host
437 * that the avail queue has buffers, and this callback informs the client.
438 *
439 * @param pVirtio Pointer to the shared virtio state.
440 * @param pVirtioCC Pointer to the ring-3 virtio state.
441 * @param uVirtqNbr Index of the notified queue
442 */
443 DECLCALLBACKMEMBER(void, pfnVirtqNotified,(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtqNbr));
444
445} VIRTIOCORER0;
446
447/**
448 * The core/common state of the VirtIO PCI devices, raw-mode edition.
449 */
450typedef struct VIRTIOCORERC
451{
452 uint64_t uUnusedAtTheMoment;
453} VIRTIOCORERC;
454
455
456/** @typedef VIRTIOCORECC
457 * The instance data for the current context. */
458typedef CTX_SUFF(VIRTIOCORE) VIRTIOCORECC;
459
460
461/** @name API for VirtIO parent device
462 * @{ */
463
464/**
465 * Setup PCI device controller and Virtio state
466 *
467 * This should be called from PDMDEVREGR3::pfnConstruct.
468 *
469 * @param pDevIns The device instance.
470 * @param pVirtio Pointer to the shared virtio state. This
471 * must be the first member in the shared
472 * device instance data!
473 * @param pVirtioCC Pointer to the ring-3 virtio state. This
474 * must be the first member in the ring-3
475 * device instance data!
476 * @param pPciParams Values to populate industry standard PCI Configuration Space data structure
477 * @param pcszInstance Device instance name (format-specifier)
478 * @param fDevSpecificFeatures VirtIO device-specific features offered by
479 * client
480 * @param cbDevSpecificCfg Size of virtio_pci_device_cap device-specific struct
481 * @param pvDevSpecificCfg Address of client's dev-specific
482 * configuration struct.
483 */
484int virtioCoreR3Init(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTIOCORECC pVirtioCC,
485 PVIRTIOPCIPARAMS pPciParams, const char *pcszInstance,
486 uint64_t fDevSpecificFeatures, void *pvDevSpecificCfg, uint16_t cbDevSpecificCfg);
487
488/**
489 * Initiate orderly reset procedure. This is an exposed API for clients that might need it.
490 * Invoked by client to reset the device and driver (see VirtIO 1.0 section 2.1.1/2.1.2)
491 *
492 * @param pVirtio Pointer to the virtio state.
493 */
494void virtioCoreResetAll(PVIRTIOCORE pVirtio);
495
496/**
497 * 'Attaches' host device-specific implementation's queue state to host VirtIO core
498 * virtqueue management infrastructure, informing the virtio core of the name of the
499 * queue associated with the queue number. uVirtqNbr is used as the 'handle' for virt queues
500 * in this API (and is opaquely the index into the VirtIO core's array of queue state).
501 *
502 * Virtqueue numbers are VirtIO specification defined (i.e. they are unique within each
503 * VirtIO device type).
504 *
505 * @param pVirtio Pointer to the shared virtio state.
506 * @param uVirtqNbr Virtq number
507 * @param pcszName Name to give queue
508 *
509 * @returns VBox status code.
510 */
511int virtioCoreR3VirtqAttach(PVIRTIOCORE pVirtio, uint16_t uVirtqNbr, const char *pcszName);
512
513/**
514 * Enables or disables a virtq
515 *
516 * @param pVirtio Pointer to the shared virtio state.
517 * @param uVirtqNbr Virtq number
518 * @param fEnable Flags whether to enable or disable the virtq
519 *
520 */
521void virtioCoreVirtqEnable(PVIRTIOCORE pVirtio, uint16_t uVirtqNbr, bool fEnable);
522
523/**
524 * Enable or disable notification for the specified queue.
525 *
526 * With notification enabled, the guest driver notifies the host device (via MMIO
527 * to the queue notification offset describe in VirtIO 1.0, 4.1.4.4 "Notification Structure Layout")
528 * whenever the guest driver adds a new entry to the avail ring of the respective queue.
529 *
530 * Note: In the VirtIO world, the device sets flags in the used ring to communicate to the driver how to
531 * handle notifications for the avail ring and the drivers sets flags in the avail ring to communicate
532 * to the device how to handle sending interrupts for the used ring.
533 *
534 * @param pVirtio Pointer to the shared virtio state.
535 * @param uVirtqNbr Virtq number
536 * @param fEnable Selects notification mode (enabled or disabled)
537 */
538void virtioCoreVirtqEnableNotify(PVIRTIOCORE pVirtio, uint16_t uVirtqNbr, bool fEnable);
539
540/**
541 * Notifies guest (via ISR or MSI-X) of device configuration change
542 *
543 * @param pVirtio Pointer to the shared virtio state.
544 */
545void virtioCoreNotifyConfigChanged(PVIRTIOCORE pVirtio);
546
547/**
548 * Displays the VirtIO spec-related features offered by the core component,
549 * as well as which features have been negotiated and accepted or declined by the guest driver,
550 * providing a summary view of the configuration the device is operating with.
551 *
552 * @param pVirtio Pointer to the shared virtio state.
553 * @param pHlp Pointer to the debug info hlp struct
554 */
555void virtioCorePrintFeatures(VIRTIOCORE *pVirtio, PCDBGFINFOHLP pHlp);
556
557/*
558 * Debuging assist feature displays the state of the VirtIO core code, which includes
559 * an overview of the state of all of the queues.
560 *
561 * This can be invoked when running the VirtualBox debugger, or from the command line
562 * using the command: "VboxManage debugvm <VM name or id> info <device name> [args]"
563 *
564 * Example: VBoxManage debugvm myVnetVm info "virtio-net" help
565 *
566 * This is implemented currently to be invoked by the inheriting device-specific code
567 * (see DevVirtioNet for an example, which receives the debugvm callback directly).
568 * DevVirtioNet lists the available sub-options if no arguments are provided. In that
569 * example this virtq info related function is invoked hierarchically when virtio-net
570 * displays its device-specific queue info.
571 *
572 * @param pDevIns The device instance.
573 * @param pHlp Pointer to the debug info hlp struct
574 * @param pszArgs Arguments to function
575 */
576void virtioCoreR3VirtqInfo(PPDMDEVINS pDevIns, PCDBGFINFOHLP pHlp, const char *pszArgs, int uVirtqNbr);
577
578/**
579 * Returns the number of avail bufs in the virtq.
580 *
581 * @param pDevIns The device instance.
582 * @param pVirtio Pointer to the shared virtio state.
583 * @param uVirtqNbr Virtqueue to return the count of buffers available for.
584 */
585uint16_t virtioCoreVirtqAvailBufCount(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtqNbr);
586
587/**
588 * This function is identical to virtioCoreR3VirtqAvailBufGet(), except it doesn't 'consume'
589 * the buffer from the avail ring of the virtq. The peek operation becomes identical to a get
590 * operation if virtioCoreR3VirtqAvailRingNext() is called to consume the buffer from the avail ring,
591 * at which point virtioCoreR3VirtqUsedBufPut() must be called to complete the roundtrip
592 * transaction by putting the descriptor on the used ring.
593 *
594 *
595 * @param pDevIns The device instance.
596 * @param pVirtio Pointer to the shared virtio state.
597 * @param uVirtqNbr Virtq number
598 * @param ppVirtqBuf Address to store pointer to descriptor chain that contains the
599 * pre-processed transaction information pulled from the virtq.
600 *
601 * @returns VBox status code:
602 * @retval VINF_SUCCESS Success
603 * @retval VERR_INVALID_STATE VirtIO not in ready state (asserted).
604 * @retval VERR_NOT_AVAILABLE If the queue is empty.
605 */
606int virtioCoreR3VirtqAvailBufPeek(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtqNbr,
607 PPVIRTQBUF ppVirtqBuf);
608
609/**
610 * This function fetches the next buffer (descriptor chain) from the VirtIO "avail" ring of
611 * indicated queue, and converts the buf's s/g vectors into OUT (e.g. guest-to-host)
612 * components and and IN (host-to-guest) components.
613 *
614 * The caller is responsible for GCPhys to host virtual memory conversions. If the
615 * virtq buffer being peeked at is "consumed", virtioCoreR3VirtqAvailRingNext() must
616 * be called and in that case virtioCoreR3VirtqUsedBufPut() must be called to
617 * complete the roundtrip virtq transaction.
618 *
619 * @param pDevIns The device instance.
620 * @param pVirtio Pointer to the shared virtio state.
621 * @param uVirtqNbr Virtq number
622 * @param ppVirtqBuf Address to store pointer to descriptor chain that contains the
623 * pre-processed transaction information pulled from the virtq.
624 * Returned reference must be released by calling
625 * virtioCoreR3VirtqBufRelease().
626 * @param fRemove flags whether to remove desc chain from queue (false = peek)
627 *
628 * @returns VBox status code:
629 * @retval VINF_SUCCESS Success
630 * @retval VERR_INVALID_STATE VirtIO not in ready state (asserted).
631 * @retval VERR_NOT_AVAILABLE If the queue is empty.
632 */
633int virtioCoreR3VirtqAvailBufGet(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtqNbr,
634 PPVIRTQBUF ppVirtqBuf, bool fRemove);
635
636/**
637 * Fetches a specific descriptor chain using avail ring of indicated queue and converts the descriptor
638 * chain into its OUT (to device) and IN (to guest) components.
639 *
640 * The caller is responsible for GCPhys to host virtual memory conversions and *must*
641 * return the virtq buffer using virtioCoreR3VirtqUsedBufPut() to complete the roundtrip
642 * virtq transaction.
643 * *
644 * @param pDevIns The device instance.
645 * @param pVirtio Pointer to the shared virtio state.
646 * @param uVirtqNbr Virtq number
647 * @param ppVirtqBuf Address to store pointer to descriptor chain that contains the
648 * pre-processed transaction information pulled from the virtq.
649 * Returned reference must be released by calling
650 * virtioCoreR3VirtqBufRelease().
651 * @param fRemove flags whether to remove desc chain from queue (false = peek)
652 *
653 * @returns VBox status code:
654 * @retval VINF_SUCCESS Success
655 * @retval VERR_INVALID_STATE VirtIO not in ready state (asserted).
656 * @retval VERR_NOT_AVAILABLE If the queue is empty.
657 */
658int virtioCoreR3VirtqAvailBufGet(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtqNbr,
659 uint16_t uHeadIdx, PPVIRTQBUF ppVirtqBuf);
660
661/**
662 * Returns data to the guest to complete a transaction initiated by virtioCoreR3VirtqAvailBufGet(),
663 * or virtioCoreR3VirtqAvailBufPeek()/virtioCoreR3VirtqBufSync() call pairs to complete each
664 * intervening a roundtrip transaction, ultimately putting each descriptor chain pulled from the
665 * avail ring of a queue onto the used ring of the queue. wherein I/O transactions are always
666 * initiated by the guest and completed by the host. In other words, for the host to send any
667 * data to the guest, the guest must provide buffers, for the host to fill, via the avail ring
668 * of the virtq.
669 *
670 * At some some point virtioCoreR3VirtqUsedRingSync() must be called to return data to the guest,
671 * completing all pending virtioCoreR3VirtqAvailBufPut() transactions that have accumulated since
672 * the last call to virtioCoreR3VirtqUsedRingSync()
673
674 * @note This does a write-ahead to the used ring of the guest's queue. The data
675 * written won't be seen by the guest until the next call to virtioCoreVirtqUsedRingSync()
676 *
677 *
678 * @param pDevIns The device instance (for reading).
679 * @param pVirtio Pointer to the shared virtio state.
680 * @param uVirtqNbr Virtq number
681 *
682 * @param pSgVirtReturn Points to scatter-gather buffer of virtual memory
683 * segments the caller is returning to the guest.
684 *
685 * @param pVirtqBuf This contains the context of the scatter-gather
686 * buffer originally pulled from the queue.
687 *
688 * @param fFence If true, put up copy fence (memory barrier) after
689 * copying to guest phys. mem.
690 *
691 * @returns VBox status code.
692 * @retval VINF_SUCCESS Success
693 * @retval VERR_INVALID_STATE VirtIO not in ready state
694 * @retval VERR_NOT_AVAILABLE Virtq is empty
695 *
696 * @note This function will not release any reference to pVirtqBuf. The
697 * caller must take care of that.
698 */
699int virtioCoreR3VirtqUsedBufPut(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtqNbr, PRTSGBUF pSgVirtReturn,
700 PVIRTQBUF pVirtqBuf, bool fFence);
701/**
702 * Advance index of avail ring to next entry in specified virtq (see virtioCoreR3VirtqAvailBufPeek())
703 *
704 * @param pVirtio Pointer to the virtio state.
705 * @param uVirtqNbr Index of queue
706 */
707int virtioCoreR3VirtqAvailBufNext(PVIRTIOCORE pVirtio, uint16_t uVirtqNbr);
708
709/**
710 * Checks to see if guest has acknowledged device's VIRTIO_F_VERSION_1 feature.
711 * If not, it's presumed to be a VirtIO legacy guest driver. Note that legacy drivers
712 * may start using the device prematurely, as opposed to the rigorously sane protocol
713 * prescribed by the "modern" VirtIO spec. Doing so is suggestive of a legacy driver.
714 * Therefore legacy mode is the assumption un proven otherwise.
715 *
716 * @param pVirtio Pointer to the virtio state.
717 */
718int virtioCoreIsLegacyMode(PVIRTIOCORE pVirtio);
719
720DECLINLINE(void) virtioCoreGCPhysChainInit(PVIRTIOSGBUF pGcSgBuf, PVIRTIOSGSEG paSegs, size_t cSegs)
721{
722 AssertPtr(pGcSgBuf);
723 Assert((cSegs > 0 && RT_VALID_PTR(paSegs)) || (!cSegs && !paSegs));
724 Assert(cSegs < (~(unsigned)0 >> 1));
725
726 pGcSgBuf->paSegs = paSegs;
727 pGcSgBuf->cSegs = (unsigned)cSegs;
728 pGcSgBuf->idxSeg = 0;
729 if (cSegs && paSegs)
730 {
731 pGcSgBuf->GCPhysCur = paSegs[0].GCPhys;
732 pGcSgBuf->cbSegLeft = paSegs[0].cbSeg;
733 }
734 else
735 {
736 pGcSgBuf->GCPhysCur = 0;
737 pGcSgBuf->cbSegLeft = 0;
738 }
739}
740
741DECLINLINE(RTGCPHYS) virtioCoreGCPhysChainGet(PVIRTIOSGBUF pGcSgBuf, size_t *pcbData)
742{
743 size_t cbData;
744 RTGCPHYS pGcBuf;
745
746 /* Check that the S/G buffer has memory left. */
747 if (RT_LIKELY(pGcSgBuf->idxSeg < pGcSgBuf->cSegs && pGcSgBuf->cbSegLeft))
748 { /* likely */ }
749 else
750 {
751 *pcbData = 0;
752 return 0;
753 }
754
755 AssertMsg( pGcSgBuf->cbSegLeft <= 128 * _1M
756 && (RTGCPHYS)pGcSgBuf->GCPhysCur >= (RTGCPHYS)pGcSgBuf->paSegs[pGcSgBuf->idxSeg].GCPhys
757 && (RTGCPHYS)pGcSgBuf->GCPhysCur + pGcSgBuf->cbSegLeft <=
758 (RTGCPHYS)pGcSgBuf->paSegs[pGcSgBuf->idxSeg].GCPhys + pGcSgBuf->paSegs[pGcSgBuf->idxSeg].cbSeg,
759 ("pGcSgBuf->idxSeg=%d pGcSgBuf->cSegs=%d pGcSgBuf->GCPhysCur=%p pGcSgBuf->cbSegLeft=%zd "
760 "pGcSgBuf->paSegs[%d].GCPhys=%p pGcSgBuf->paSegs[%d].cbSeg=%zd\n",
761 pGcSgBuf->idxSeg, pGcSgBuf->cSegs, pGcSgBuf->GCPhysCur, pGcSgBuf->cbSegLeft,
762 pGcSgBuf->idxSeg, pGcSgBuf->paSegs[pGcSgBuf->idxSeg].GCPhys, pGcSgBuf->idxSeg,
763 pGcSgBuf->paSegs[pGcSgBuf->idxSeg].cbSeg));
764
765 cbData = RT_MIN(*pcbData, pGcSgBuf->cbSegLeft);
766 pGcBuf = pGcSgBuf->GCPhysCur;
767 pGcSgBuf->cbSegLeft -= cbData;
768 if (!pGcSgBuf->cbSegLeft)
769 {
770 pGcSgBuf->idxSeg++;
771
772 if (pGcSgBuf->idxSeg < pGcSgBuf->cSegs)
773 {
774 pGcSgBuf->GCPhysCur = pGcSgBuf->paSegs[pGcSgBuf->idxSeg].GCPhys;
775 pGcSgBuf->cbSegLeft = pGcSgBuf->paSegs[pGcSgBuf->idxSeg].cbSeg;
776 }
777 *pcbData = cbData;
778 }
779 else
780 pGcSgBuf->GCPhysCur = pGcSgBuf->GCPhysCur + cbData;
781
782 return pGcBuf;
783}
784
785DECLINLINE(void) virtioCoreGCPhysChainReset(PVIRTIOSGBUF pGcSgBuf)
786{
787 AssertPtrReturnVoid(pGcSgBuf);
788
789 pGcSgBuf->idxSeg = 0;
790 if (pGcSgBuf->cSegs)
791 {
792 pGcSgBuf->GCPhysCur = pGcSgBuf->paSegs[0].GCPhys;
793 pGcSgBuf->cbSegLeft = pGcSgBuf->paSegs[0].cbSeg;
794 }
795 else
796 {
797 pGcSgBuf->GCPhysCur = 0;
798 pGcSgBuf->cbSegLeft = 0;
799 }
800}
801
802DECLINLINE(RTGCPHYS) virtioCoreGCPhysChainAdvance(PVIRTIOSGBUF pGcSgBuf, size_t cbAdvance)
803{
804 AssertReturn(pGcSgBuf, 0);
805
806 size_t cbLeft = cbAdvance;
807 while (cbLeft)
808 {
809 size_t cbThisAdvance = cbLeft;
810 virtioCoreGCPhysChainGet(pGcSgBuf, &cbThisAdvance);
811 if (!cbThisAdvance)
812 break;
813
814 cbLeft -= cbThisAdvance;
815 }
816 return cbAdvance - cbLeft;
817}
818
819DECLINLINE(RTGCPHYS) virtioCoreGCPhysChainGetNextSeg(PVIRTIOSGBUF pGcSgBuf, size_t *pcbSeg)
820{
821 AssertReturn(pGcSgBuf, 0);
822 AssertPtrReturn(pcbSeg, 0);
823
824 if (!*pcbSeg)
825 *pcbSeg = pGcSgBuf->cbSegLeft;
826
827 return virtioCoreGCPhysChainGet(pGcSgBuf, pcbSeg);
828}
829
830DECLINLINE(size_t) virtioCoreGCPhysChainCalcBufSize(PVIRTIOSGBUF pGcSgBuf)
831{
832 size_t cb = 0;
833 unsigned i = pGcSgBuf->cSegs;
834 while (i-- > 0)
835 cb += pGcSgBuf->paSegs[i].cbSeg;
836 return cb;
837 }
838
839#define VIRTQNAME(a_pVirtio, a_uVirtq) ((a_pVirtio)->aVirtqueues[(a_uVirtq)].szName)
840
841/**
842 * Add some bytes to a virtq (s/g) buffer, converting them from virtual memory to GCPhys
843 *
844 * To be performant it is left to the caller to validate the size of the buffer with regard
845 * to data being pulled from it to avoid overruns/underruns.
846 *
847 * @param pVirtio Pointer to the shared virtio state.
848 * @param pVirtqBuf output: virtq buffer
849 * @param pv input: virtual memory buffer to receive bytes
850 * @param cb number of bytes to add to the s/g buffer.
851 */
852DECLINLINE(void) virtioCoreR3VirqBufFill(PVIRTIOCORE pVirtio, PVIRTQBUF pVirtqBuf, void *pv, size_t cb)
853{
854 uint8_t *pb = (uint8_t *)pv;
855 size_t cbLim = RT_MIN(pVirtqBuf->cbPhysReturn, cb);
856 while (cbLim)
857 {
858 size_t cbSeg = cbLim;
859 RTGCPHYS GCPhys = virtioCoreGCPhysChainGetNextSeg(pVirtqBuf->pSgPhysReturn, &cbSeg);
860 PDMDevHlpPCIPhysWrite(pVirtio->pDevInsR3, GCPhys, pb, cbSeg);
861 pb += cbSeg;
862 cbLim -= cbSeg;
863 pVirtqBuf->cbPhysSend -= cbSeg;
864 }
865 LogFunc(("Added %d/%d bytes to %s buffer, head idx: %u (%d bytes remain)\n",
866 cb - cbLim, cb, VIRTQNAME(pVirtio, pVirtqBuf->uVirtq),
867 pVirtqBuf->uHeadIdx, pVirtqBuf->cbPhysReturn));
868}
869
870/**
871 * Extract some bytes out of a virtq (s/g) buffer, converting them from GCPhys to virtual memory
872 *
873 * To be performant it is left to the caller to validate the size of the buffer with regard
874 * to data being pulled from it to avoid overruns/underruns.
875 *
876 * @param pVirtio Pointer to the shared virtio state.
877 * @param pVirtqBuf input: virtq buffer
878 * @param pv output: virtual memory buffer to receive bytes
879 * @param cb number of bytes to Drain from buffer
880 */
881DECLINLINE(void) virtioCoreR3VirtqBufDrain(PVIRTIOCORE pVirtio, PVIRTQBUF pVirtqBuf, void *pv, size_t cb)
882{
883 uint8_t *pb = (uint8_t *)pv;
884 size_t cbLim = RT_MIN(pVirtqBuf->cbPhysSend, cb);
885 while (cbLim)
886 {
887 size_t cbSeg = cbLim;
888 RTGCPHYS GCPhys = virtioCoreGCPhysChainGetNextSeg(pVirtqBuf->pSgPhysSend, &cbSeg);
889 PDMDevHlpPCIPhysRead(pVirtio->pDevInsR3, GCPhys, pb, cbSeg);
890 pb += cbSeg;
891 cbLim -= cbSeg;
892 pVirtqBuf->cbPhysSend -= cbSeg;
893 }
894 LogFunc(("Drained %d/%d bytes from %s buffer, head idx: %u (%d bytes left)\n",
895 cb - cbLim, cb, VIRTQNAME(pVirtio, pVirtqBuf->uVirtq),
896 pVirtqBuf->uHeadIdx, pVirtqBuf->cbPhysSend));
897}
898
899#undef VIRTQNAME
900
901/**
902 * Updates indicated virtq's "used ring" descriptor index to match "shadow" index that tracks
903 * pending buffers added to the used ring, thus exposing all the data added by virtioCoreR3VirtqUsedBufPut()
904 * to the "used ring" since the last virtioCoreVirtqUsedRingSync().
905 *
906 * This *must* be invoked after one or more virtioCoreR3VirtqUsedBufPut() calls to inform guest driver
907 * there is data in the queue. If enabled by guest, IRQ or MSI-X signalling will notify guest
908 * proactively, otherwise guest detects updates by polling. (see VirtIO 1.0, Section 2.4 "Virtqueues").
909 *
910 * @param pDevIns The device instance.
911 * @param pVirtio Pointer to the shared virtio state.
912 * @param uVirtqNbr Virtq number
913 *
914 * @returns VBox status code.
915 * @retval VINF_SUCCESS Success
916 * @retval VERR_INVALID_STATE VirtIO not in ready state
917 */
918int virtioCoreVirtqUsedRingSync(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtqNbr);
919
920/**
921 * Retains a reference to the given descriptor chain.
922 *
923 * @param pVirtqBuf The descriptor chain to reference.
924 *
925 * @returns New reference count.
926 * @retval UINT32_MAX on invalid parameter.
927 */
928uint32_t virtioCoreR3VirtqBufRetain(PVIRTQBUF pVirtqBuf);
929
930/**
931 * Releases a reference to the given descriptor chain.
932 *
933 * @param pVirtio Pointer to the shared virtio state.
934 * @param pVirtqBuf The descriptor chain to reference. NULL is quietly
935 * ignored (returns 0).
936 * @returns New reference count.
937 * @retval 0 if freed or invalid parameter.
938 */
939uint32_t virtioCoreR3VirtqBufRelease(PVIRTIOCORE pVirtio, PVIRTQBUF pVirtqBuf);
940
941/**
942 * Return queue enable state
943 *
944 * @param pVirtio Pointer to the virtio state.
945 * @param uVirtqNbr Virtq number.
946 *
947 * @returns true or false indicating queue is enabled or not.
948 */
949DECLINLINE(bool) virtioCoreIsVirtqEnabled(PVIRTIOCORE pVirtio, uint16_t uVirtqNbr)
950{
951 Assert(uVirtqNbr < RT_ELEMENTS(pVirtio->aVirtqueues));
952 if (pVirtio->fLegacyDriver)
953 return pVirtio->aVirtqueues[uVirtqNbr].GCPhysVirtqDesc != 0;
954 return pVirtio->aVirtqueues[uVirtqNbr].uEnable != 0;
955}
956
957/**
958 * Get name of queue, via uVirtqNbr, assigned during virtioCoreR3VirtqAttach()
959 *
960 * @param pVirtio Pointer to the virtio state.
961 * @param uVirtqNbr Virtq number.
962 *
963 * @returns Pointer to read-only queue name.
964 */
965DECLINLINE(const char *) virtioCoreVirtqGetName(PVIRTIOCORE pVirtio, uint16_t uVirtqNbr)
966{
967 Assert((size_t)uVirtqNbr < RT_ELEMENTS(pVirtio->aVirtqueues));
968 return pVirtio->aVirtqueues[uVirtqNbr].szName;
969}
970
971/**
972 * Get the bitmask of features VirtIO is running with. This is called by the device-specific
973 * VirtIO implementation to identify this device's operational configuration after features
974 * have been negotiated with guest VirtIO driver. Feature negotiation entails host indicating
975 * to guest which features it supports, then guest accepting among those offered which features
976 * it will enable. That becomes the agreement between the host and guest. The bitmask containing
977 * virtio core features plus device-specific features is provided as a parameter to virtioCoreR3Init()
978 * by the host side device-specific virtio implementation.
979 *
980 * @param pVirtio Pointer to the virtio state.
981 *
982 * @returns Features the guest driver has accepted, finalizing the operational features
983 */
984DECLINLINE(uint64_t) virtioCoreGetNegotiatedFeatures(PVIRTIOCORE pVirtio)
985{
986 return pVirtio->uDriverFeatures;
987}
988
989/**
990 * Get the the name of the VM state change associated with the enumeration variable
991 *
992 * @param enmState VM state (enumeration value)
993 *
994 * @returns associated text.
995 */
996const char *virtioCoreGetStateChangeText(VIRTIOVMSTATECHANGED enmState);
997
998/**
999 * Debug assist code for any consumer that inherits VIRTIOCORE.
1000 * Log memory-mapped I/O input or output value.
1001 *
1002 * This is to be invoked by macros that assume they are invoked in functions with
1003 * the relevant arguments. (See Virtio_1_0.cpp).
1004 *
1005 * It is exposed via the API so inheriting device-specific clients can provide similar
1006 * logging capabilities for a consistent look-and-feel.
1007 *
1008 * @param pszFunc To avoid displaying this function's name via __FUNCTION__ or LogFunc()
1009 * @param pszMember Name of struct member
1010 * @param pv pointer to value
1011 * @param cb size of value
1012 * @param uOffset offset into member where value starts
1013 * @param fWrite True if write I/O
1014 * @param fHasIndex True if the member is indexed
1015 * @param idx The index if fHasIndex
1016 */
1017void virtioCoreLogMappedIoValue(const char *pszFunc, const char *pszMember, uint32_t uMemberSize,
1018 const void *pv, uint32_t cb, uint32_t uOffset,
1019 int fWrite, int fHasIndex, uint32_t idx);
1020
1021/**
1022 * Debug assist for any consumer
1023 *
1024 * Does a formatted hex dump using Log(()), recommend using VIRTIO_HEX_DUMP() macro to
1025 * control enabling of logging efficiently.
1026 *
1027 * @param pv pointer to buffer to dump contents of
1028 * @param cb count of characters to dump from buffer
1029 * @param uBase base address of per-row address prefixing of hex output
1030 * @param pszTitle Optional title. If present displays title that lists
1031 * provided text with value of cb to indicate VIRTQ_SIZE next to it.
1032 */
1033void virtioCoreHexDump(uint8_t *pv, uint32_t cb, uint32_t uBase, const char *pszTitle);
1034
1035/**
1036 * Debug assist for any consumer device code
1037&
1038 * Do a hex dump of memory in guest physical context
1039 *
1040 * @param GCPhys pointer to buffer to dump contents of
1041 * @param cb count of characters to dump from buffer
1042 * @param uBase base address of per-row address prefixing of hex output
1043 * @param pszTitle Optional title. If present displays title that lists
1044 * provided text with value of cb to indicate size next to it.
1045 */
1046void virtioCoreGCPhysHexDump(PPDMDEVINS pDevIns, RTGCPHYS GCPhys, uint16_t cb, uint32_t uBase, const char *pszTitle);
1047
1048/**
1049 * The following API is functions identically to the similarly-named calls pertaining to the RTSGBUF
1050 */
1051
1052/**
1053 * Calculate the length of a GCPhys s/g buffer by tallying the size of each segment.
1054 *
1055 * @param pGcSgBuf Guest Context (GCPhys) S/G buffer to calculate length of
1056 */
1057DECLINLINE(size_t) virtioCoreGCPhysChainCalcBufSize(PCVIRTIOSGBUF pGcSgBuf)
1058{
1059 size_t cb = 0;
1060 unsigned i = pGcSgBuf->cSegs;
1061 while (i-- > 0)
1062 cb += pGcSgBuf->paSegs[i].cbSeg;
1063 return cb;
1064}
1065
1066/**
1067 * This VirtIO transitional device supports "modern" (rev 1.0+) as well as "legacy" (e.g. < 1.0) VirtIO drivers.
1068 * Some legacy guest drivers are known to mishandle PCI bus mastering wherein the PCI flavor of GC phys
1069 * access functions can't be used. The following wrappers select the mem access method based on whether the
1070 * device is operating in legacy mode or not.
1071 */
1072DECLINLINE(int) virtioCoreGCPhysWrite(PVIRTIOCORE pVirtio, PPDMDEVINS pDevIns, RTGCPHYS GCPhys, void *pvBuf, size_t cbWrite)
1073{
1074 int rc;
1075 if (virtioCoreIsLegacyMode(pVirtio))
1076 rc = PDMDevHlpPhysWrite(pDevIns, GCPhys, pvBuf, cbWrite);
1077 else
1078 rc = PDMDevHlpPCIPhysWrite(pDevIns, GCPhys, pvBuf, cbWrite);
1079 return rc;
1080}
1081
1082DECLINLINE(int) virtioCoreGCPhysRead(PVIRTIOCORE pVirtio, PPDMDEVINS pDevIns, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead)
1083{
1084 int rc;
1085 if (virtioCoreIsLegacyMode(pVirtio))
1086 rc = PDMDevHlpPhysRead(pDevIns, GCPhys, pvBuf, cbRead);
1087 else
1088 rc = PDMDevHlpPCIPhysRead(pDevIns, GCPhys, pvBuf, cbRead);
1089 return rc;
1090}
1091
1092/** Misc VM and PDM boilerplate */
1093int virtioCoreR3SaveExec(PVIRTIOCORE pVirtio, PCPDMDEVHLPR3 pHlp, PSSMHANDLE pSSM);
1094int virtioCoreR3LoadExec(PVIRTIOCORE pVirtio, PCPDMDEVHLPR3 pHlp, PSSMHANDLE pSSM);
1095void virtioCoreR3VmStateChanged(PVIRTIOCORE pVirtio, VIRTIOVMSTATECHANGED enmState);
1096void virtioCoreR3Term(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTIOCORECC pVirtioCC);
1097int virtioCoreRZInit(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio);
1098const char *virtioCoreGetStateChangeText(VIRTIOVMSTATECHANGED enmState);
1099
1100/*
1101 * The following macros assist with handling/logging MMIO accesses to VirtIO dev-specific config area,
1102 * in a way that enhances code readability and debug logging consistency.
1103 *
1104 * cb, pv and fWrite are implicit parameters and must be defined by the invoker.
1105 */
1106
1107#ifdef LOG_ENABLED
1108
1109# define VIRTIO_DEV_CONFIG_LOG_ACCESS(member, tCfgStruct, uOffsetOfAccess) \
1110 if (LogIs7Enabled()) { \
1111 uint32_t uMbrOffset = uOffsetOfAccess - RT_UOFFSETOF(tCfgStruct, member); \
1112 uint32_t uMbrSize = RT_SIZEOFMEMB(tCfgStruct, member); \
1113 virtioCoreLogMappedIoValue(__FUNCTION__, #member, uMbrSize, pv, cb, uMbrOffset, fWrite, false, 0); \
1114 }
1115
1116# define VIRTIO_DEV_CONFIG_LOG_INDEXED_ACCESS(member, tCfgStruct, uOffsetOfAccess, uIdx) \
1117 if (LogIs7Enabled()) { \
1118 uint32_t uMbrOffset = uOffsetOfAccess - RT_UOFFSETOF(tCfgStruct, member); \
1119 uint32_t uMbrSize = RT_SIZEOFMEMB(tCfgStruct, member); \
1120 virtioCoreLogMappedIoValue(__FUNCTION__, #member, uMbrSize, pv, cb, uMbrOffset, fWrite, true, uIdx); \
1121 }
1122#else
1123# define VIRTIO_DEV_CONFIG_LOG_ACCESS(member, tCfgStruct, uMbrOffset) do { } while (0)
1124# define VIRTIO_DEV_CONFIG_LOG_INDEXED_ACCESS(member, tCfgStruct, uMbrOffset, uIdx) do { } while (0)
1125#endif
1126
1127DECLINLINE(bool) virtioCoreMatchMember(uint32_t uOffset, uint32_t cb, uint32_t uMemberOff,
1128 size_t uMemberSize, bool fSubFieldMatch)
1129{
1130 /* Test for 8-byte field (always accessed as two 32-bit components) */
1131 if (uMemberSize == 8)
1132 return (cb == sizeof(uint32_t)) && (uOffset == uMemberOff || uOffset == (uMemberOff + sizeof(uint32_t)));
1133
1134 if (fSubFieldMatch)
1135 return (uOffset >= uMemberOff) && (cb <= uMemberSize - (uOffset - uMemberOff));
1136
1137 /* Test for exact match */
1138 return (uOffset == uMemberOff) && (cb == uMemberSize);
1139}
1140
1141/**
1142 * Yields boolean true if uOffsetOfAccess falls within bytes of specified member of config struct
1143 */
1144#define VIRTIO_DEV_CONFIG_SUBMATCH_MEMBER(member, tCfgStruct, uOffsetOfAccess) \
1145 virtioCoreMatchMember(uOffsetOfAccess, cb, \
1146 RT_UOFFSETOF(tCfgStruct, member), \
1147 RT_SIZEOFMEMB(tCfgStruct, member), true /* fSubfieldMatch */)
1148
1149#define VIRTIO_DEV_CONFIG_MATCH_MEMBER(member, tCfgStruct, uOffsetOfAccess) \
1150 virtioCoreMatchMember(uOffsetOfAccess, cb, \
1151 RT_UOFFSETOF(tCfgStruct, member), \
1152 RT_SIZEOFMEMB(tCfgStruct, member), false /* fSubfieldMatch */)
1153
1154
1155
1156/**
1157 * Copy reads or copy writes specified member field of config struct (based on fWrite),
1158 * the memory described by cb and pv.
1159 *
1160 * cb, pv and fWrite are implicit parameters and must be defined by the invoker.
1161 */
1162#define VIRTIO_DEV_CONFIG_ACCESS(member, tCfgStruct, uOffsetOfAccess, pCfgStruct) \
1163 do \
1164 { \
1165 uint32_t uOffsetInMember = uOffsetOfAccess - RT_UOFFSETOF(tCfgStruct, member); \
1166 if (fWrite) \
1167 memcpy(((char *)&(pCfgStruct)->member) + uOffsetInMember, pv, cb); \
1168 else \
1169 memcpy(pv, ((const char *)&(pCfgStruct)->member) + uOffsetInMember, cb); \
1170 VIRTIO_DEV_CONFIG_LOG_ACCESS(member, tCfgStruct, uOffsetOfAccess); \
1171 } while(0)
1172
1173/**
1174 * Copies bytes into memory described by cb, pv from the specified member field of the config struct.
1175 * The operation is a nop and logs error if implied parameter fWrite is true.
1176 *
1177 * cb, pv and fWrite are implicit parameters and must be defined by the invoker.
1178 */
1179#define VIRTIO_DEV_CONFIG_ACCESS_READONLY(member, tCfgStruct, uOffsetOfAccess, pCfgStruct) \
1180 do \
1181 { \
1182 uint32_t uOffsetInMember = uOffsetOfAccess - RT_UOFFSETOF(tCfgStruct, member); \
1183 if (fWrite) \
1184 LogFunc(("Guest attempted to write readonly virtio config struct (member %s)\n", #member)); \
1185 else \
1186 { \
1187 memcpy(pv, ((const char *)&(pCfgStruct)->member) + uOffsetInMember, cb); \
1188 VIRTIO_DEV_CONFIG_LOG_ACCESS(member, tCfgStruct, uOffsetOfAccess); \
1189 } \
1190 } while(0)
1191
1192/**
1193 * Copies into or out of specified member field of config struct (based on fWrite),
1194 * the memory described by cb and pv.
1195 *
1196 * cb, pv and fWrite are implicit parameters and must be defined by the invoker.
1197 */
1198#define VIRTIO_DEV_CONFIG_ACCESS_INDEXED(member, uIdx, tCfgStruct, uOffsetOfAccess, pCfgStruct) \
1199 do \
1200 { \
1201 uint32_t uOffsetInMember = uOffsetOfAccess - RT_UOFFSETOF(tCfgStruct, member); \
1202 if (fWrite) \
1203 memcpy(((char *)&(pCfgStruct[uIdx].member)) + uOffsetInMember, pv, cb); \
1204 else \
1205 memcpy(pv, ((const char *)&(pCfgStruct[uIdx].member)) + uOffsetInMember, cb); \
1206 VIRTIO_DEV_CONFIG_LOG_INDEXED_ACCESS(member, tCfgStruct, uOffsetOfAccess, uIdx); \
1207 } while(0)
1208
1209/**
1210 * Copies bytes into memory described by cb, pv from the specified member field of the config struct.
1211 * The operation is a nop and logs error if implied parameter fWrite is true.
1212 *
1213 * cb, pv and fWrite are implicit parameters and must be defined by the invoker.
1214 */
1215#define VIRTIO_DEV_CONFIG_ACCESS_INDEXED_READONLY(member, uidx, tCfgStruct, uOffsetOfAccess, pCfgStruct) \
1216 do \
1217 { \
1218 uint32_t uOffsetInMember = uOffsetOfAccess - RT_UOFFSETOF(tCfgStruct, member); \
1219 if (fWrite) \
1220 LogFunc(("Guest attempted to write readonly virtio config struct (member %s)\n", #member)); \
1221 else \
1222 { \
1223 memcpy(pv, ((const char *)&(pCfgStruct[uIdx].member)) + uOffsetInMember, cb); \
1224 VIRTIO_DEV_CONFIG_LOG_INDEXED_ACCESS(member, tCfgStruct, uOffsetOfAccess, uIdx); \
1225 } \
1226 } while(0)
1227
1228/** @} */
1229
1230/** @name API for VirtIO parent device
1231 * @{ */
1232
1233#endif /* !VBOX_INCLUDED_SRC_VirtIO_VirtioCore_h */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette