VirtualBox

source: vbox/trunk/src/VBox/Devices/VirtIO/Virtio.cpp@ 97405

Last change on this file since 97405 was 96407, checked in by vboxsync, 2 years ago

scm copyright and license note update

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 34.6 KB
Line 
1/* $Id: Virtio.cpp 96407 2022-08-22 17:43:14Z vboxsync $ */
2/** @file
3 * Virtio - Virtio Common Functions (VRing, VQueue, Virtio PCI)
4 */
5
6/*
7 * Copyright (C) 2009-2022 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_DEV_VIRTIO
33
34#include <iprt/param.h>
35#include <iprt/uuid.h>
36#include <VBox/vmm/pdmdev.h>
37#include <VBox/AssertGuest.h>
38#include "Virtio.h"
39
40
41/*********************************************************************************************************************************
42* Defined Constants And Macros *
43*********************************************************************************************************************************/
44#define INSTANCE(pThis) (pThis->szInstance)
45
46
47static void vqueueReset(PVQUEUE pQueue)
48{
49 pQueue->VRing.addrDescriptors = 0;
50 pQueue->VRing.addrAvail = 0;
51 pQueue->VRing.addrUsed = 0;
52 pQueue->uNextAvailIndex = 0;
53 pQueue->uNextUsedIndex = 0;
54 pQueue->uPageNumber = 0;
55}
56
57static void vqueueInit(PVQUEUE pQueue, uint32_t uPageNumber)
58{
59 pQueue->VRing.addrDescriptors = (uint64_t)uPageNumber << GUEST_PAGE_SHIFT;
60 pQueue->VRing.addrAvail = pQueue->VRing.addrDescriptors + sizeof(VRINGDESC) * pQueue->VRing.uSize;
61 pQueue->VRing.addrUsed = RT_ALIGN(pQueue->VRing.addrAvail + RT_UOFFSETOF_DYN(VRINGAVAIL, auRing[pQueue->VRing.uSize]),
62 GUEST_PAGE_SIZE); /* The used ring must start from the next page. */
63 pQueue->uNextAvailIndex = 0;
64 pQueue->uNextUsedIndex = 0;
65}
66
67// void vqueueElemFree(PVQUEUEELEM pElem)
68// {
69// }
70
71static void vringReadDesc(PPDMDEVINS pDevIns, PVRING pVRing, uint32_t uIndex, PVRINGDESC pDesc)
72{
73 //Log(("%s vringReadDesc: ring=%p idx=%u\n", INSTANCE(pThis), pVRing, uIndex));
74 PDMDevHlpPhysRead(pDevIns,
75 pVRing->addrDescriptors + sizeof(VRINGDESC) * (uIndex % pVRing->uSize),
76 pDesc, sizeof(VRINGDESC));
77 /** @todo r=bird: Why exactly are we sometimes using PDMDevHlpPhysRead rather
78 * than PDMDevHlpPCIPhysRead? */
79}
80
81static uint16_t vringReadAvail(PPDMDEVINS pDevIns, PVRING pVRing, uint32_t uIndex)
82{
83 uint16_t tmp = 0;
84 PDMDevHlpPhysRead(pDevIns, pVRing->addrAvail + RT_UOFFSETOF_DYN(VRINGAVAIL, auRing[uIndex % pVRing->uSize]),
85 &tmp, sizeof(tmp));
86 return tmp;
87}
88
89static uint16_t vringReadAvailFlags(PPDMDEVINS pDevIns, PVRING pVRing)
90{
91 uint16_t tmp = 0;
92 PDMDevHlpPhysRead(pDevIns, pVRing->addrAvail + RT_UOFFSETOF(VRINGAVAIL, uFlags), &tmp, sizeof(tmp));
93 return tmp;
94}
95
96void vringSetNotification(PPDMDEVINS pDevIns, PVRING pVRing, bool fEnabled)
97{
98 uint16_t fState = 0;
99 PDMDevHlpPhysRead(pDevIns, pVRing->addrUsed + RT_UOFFSETOF(VRINGUSED, uFlags), &fState, sizeof(fState));
100
101 if (fEnabled)
102 fState &= ~ VRINGUSED_F_NO_NOTIFY;
103 else
104 fState |= VRINGUSED_F_NO_NOTIFY;
105
106 PDMDevHlpPCIPhysWrite(pDevIns, pVRing->addrUsed + RT_UOFFSETOF(VRINGUSED, uFlags), &fState, sizeof(fState));
107}
108
109bool vqueueSkip(PPDMDEVINS pDevIns, PVPCISTATE pThis, PVQUEUE pQueue)
110{
111 if (vqueueIsEmpty(pDevIns, pQueue))
112 return false;
113
114 Log2(("%s vqueueSkip: %s avail_idx=%u\n", INSTANCE(pThis), pQueue->szName, pQueue->uNextAvailIndex));
115 RT_NOREF(pThis);
116 pQueue->uNextAvailIndex++;
117 return true;
118}
119
120bool vqueueGet(PPDMDEVINS pDevIns, PVPCISTATE pThis, PVQUEUE pQueue, PVQUEUEELEM pElem, bool fRemove)
121{
122 if (vqueueIsEmpty(pDevIns, pQueue))
123 return false;
124
125 pElem->cIn = pElem->cOut = 0;
126
127 Log2(("%s vqueueGet: %s avail_idx=%u\n", INSTANCE(pThis), pQueue->szName, pQueue->uNextAvailIndex));
128
129 VRINGDESC desc;
130 uint16_t idx = vringReadAvail(pDevIns, &pQueue->VRing, pQueue->uNextAvailIndex);
131 if (fRemove)
132 pQueue->uNextAvailIndex++;
133 pElem->uIndex = idx;
134 do
135 {
136 VQUEUESEG *pSeg;
137
138 /*
139 * Malicious guests may try to trick us into writing beyond aSegsIn or
140 * aSegsOut boundaries by linking several descriptors into a loop. We
141 * cannot possibly get a sequence of linked descriptors exceeding the
142 * total number of descriptors in the ring (see @bugref{8620}).
143 */
144 if (pElem->cIn + pElem->cOut >= VRING_MAX_SIZE)
145 {
146 static volatile uint32_t s_cMessages = 0;
147 static volatile uint32_t s_cThreshold = 1;
148 if (ASMAtomicIncU32(&s_cMessages) == ASMAtomicReadU32(&s_cThreshold))
149 {
150 LogRel(("%s: too many linked descriptors; check if the guest arranges descriptors in a loop.\n",
151 INSTANCE(pThis)));
152 if (ASMAtomicReadU32(&s_cMessages) != 1)
153 LogRel(("%s: (the above error has occured %u times so far)\n",
154 INSTANCE(pThis), ASMAtomicReadU32(&s_cMessages)));
155 ASMAtomicWriteU32(&s_cThreshold, ASMAtomicReadU32(&s_cThreshold) * 10);
156 }
157 break;
158 }
159 RT_UNTRUSTED_VALIDATED_FENCE();
160
161 vringReadDesc(pDevIns, &pQueue->VRing, idx, &desc);
162 if (desc.u16Flags & VRINGDESC_F_WRITE)
163 {
164 Log2(("%s vqueueGet: %s IN seg=%u desc_idx=%u addr=%p cb=%u\n", INSTANCE(pThis),
165 pQueue->szName, pElem->cIn, idx, desc.u64Addr, desc.uLen));
166 pSeg = &pElem->aSegsIn[pElem->cIn++];
167 }
168 else
169 {
170 Log2(("%s vqueueGet: %s OUT seg=%u desc_idx=%u addr=%p cb=%u\n", INSTANCE(pThis),
171 pQueue->szName, pElem->cOut, idx, desc.u64Addr, desc.uLen));
172 pSeg = &pElem->aSegsOut[pElem->cOut++];
173 }
174
175 pSeg->addr = desc.u64Addr;
176 pSeg->cb = desc.uLen;
177 pSeg->pv = NULL;
178
179 idx = desc.u16Next;
180 } while (desc.u16Flags & VRINGDESC_F_NEXT);
181
182 Log2(("%s vqueueGet: %s head_desc_idx=%u nIn=%u nOut=%u\n", INSTANCE(pThis),
183 pQueue->szName, pElem->uIndex, pElem->cIn, pElem->cOut));
184 return true;
185}
186
187#ifdef LOG_ENABLED
188static uint16_t vringReadUsedIndex(PPDMDEVINS pDevIns, PVRING pVRing)
189{
190 uint16_t tmp = 0;
191 PDMDevHlpPhysRead(pDevIns, pVRing->addrUsed + RT_UOFFSETOF(VRINGUSED, uIndex), &tmp, sizeof(tmp));
192 return tmp;
193}
194#endif
195
196static void vringWriteUsedIndex(PPDMDEVINS pDevIns, PVRING pVRing, uint16_t u16Value)
197{
198 PDMDevHlpPCIPhysWrite(pDevIns,
199 pVRing->addrUsed + RT_UOFFSETOF(VRINGUSED, uIndex),
200 &u16Value, sizeof(u16Value));
201}
202
203static void vringWriteUsedElem(PPDMDEVINS pDevIns, PVRING pVRing, uint32_t uIndex, uint32_t uId, uint32_t uLen)
204{
205 VRINGUSEDELEM elem;
206
207 elem.uId = uId;
208 elem.uLen = uLen;
209 PDMDevHlpPCIPhysWrite(pDevIns,
210 pVRing->addrUsed + RT_UOFFSETOF_DYN(VRINGUSED, aRing[uIndex % pVRing->uSize]),
211 &elem, sizeof(elem));
212}
213
214void vqueuePut(PPDMDEVINS pDevIns, PVPCISTATE pThis, PVQUEUE pQueue, PVQUEUEELEM pElem, uint32_t uTotalLen, uint32_t uReserved)
215{
216 Log2(("%s vqueuePut: %s desc_idx=%u acb=%u (%u)\n", INSTANCE(pThis), pQueue->szName, pElem->uIndex, uTotalLen, uReserved));
217 RT_NOREF(pThis);
218
219 Assert(uReserved < uTotalLen);
220
221 uint32_t cbLen = uTotalLen - uReserved;
222 uint32_t cbSkip = uReserved;
223
224 for (unsigned i = 0; i < pElem->cIn && cbLen > 0; ++i)
225 {
226 if (cbSkip >= pElem->aSegsIn[i].cb) /* segment completely skipped? */
227 {
228 cbSkip -= pElem->aSegsIn[i].cb;
229 continue;
230 }
231
232 uint32_t cbSegLen = pElem->aSegsIn[i].cb - cbSkip;
233 if (cbSegLen > cbLen) /* last segment only partially used? */
234 cbSegLen = cbLen;
235
236 /*
237 * XXX: We should assert pv != NULL, but we need to check and
238 * fix all callers first.
239 */
240 if (pElem->aSegsIn[i].pv != NULL)
241 {
242 Log2(("%s vqueuePut: %s used_idx=%u seg=%u addr=%RGp pv=%p cb=%u acb=%u\n", INSTANCE(pThis), pQueue->szName,
243 pQueue->uNextUsedIndex, i, pElem->aSegsIn[i].addr, pElem->aSegsIn[i].pv, pElem->aSegsIn[i].cb, cbSegLen));
244
245 PDMDevHlpPCIPhysWrite(pDevIns,
246 pElem->aSegsIn[i].addr + cbSkip,
247 pElem->aSegsIn[i].pv,
248 cbSegLen);
249 }
250
251 cbSkip = 0;
252 cbLen -= cbSegLen;
253 }
254
255 Log2(("%s vqueuePut: %s used_idx=%u guest_used_idx=%u id=%u len=%u\n", INSTANCE(pThis), pQueue->szName,
256 pQueue->uNextUsedIndex, vringReadUsedIndex(pDevIns, &pQueue->VRing), pElem->uIndex, uTotalLen));
257
258 vringWriteUsedElem(pDevIns, &pQueue->VRing,
259 pQueue->uNextUsedIndex++,
260 pElem->uIndex, uTotalLen);
261}
262
263static void vqueueNotify(PPDMDEVINS pDevIns, PVPCISTATE pThis, PVQUEUE pQueue)
264{
265 uint16_t const fAvail = vringReadAvailFlags(pDevIns, &pQueue->VRing);
266 LogFlow(("%s vqueueNotify: %s availFlags=%x guestFeatures=%x vqueue is %sempty\n", INSTANCE(pThis), pQueue->szName,
267 fAvail, pThis->uGuestFeatures, vqueueIsEmpty(pDevIns, pQueue)?"":"not "));
268 if ( !(fAvail & VRINGAVAIL_F_NO_INTERRUPT)
269 || ((pThis->uGuestFeatures & VPCI_F_NOTIFY_ON_EMPTY) && vqueueIsEmpty(pDevIns, pQueue)))
270 {
271 int rc = vpciRaiseInterrupt(pDevIns, pThis, VERR_INTERNAL_ERROR, VPCI_ISR_QUEUE);
272 if (RT_FAILURE(rc))
273 Log(("%s vqueueNotify: Failed to raise an interrupt (%Rrc).\n", INSTANCE(pThis), rc));
274 }
275 else
276 STAM_REL_COUNTER_INC(&pThis->StatIntsSkipped);
277
278}
279
280void vqueueSync(PPDMDEVINS pDevIns, PVPCISTATE pThis, PVQUEUE pQueue)
281{
282 Log2(("%s vqueueSync: %s old_used_idx=%u new_used_idx=%u\n", INSTANCE(pThis),
283 pQueue->szName, vringReadUsedIndex(pDevIns, &pQueue->VRing), pQueue->uNextUsedIndex));
284 vringWriteUsedIndex(pDevIns, &pQueue->VRing, pQueue->uNextUsedIndex);
285 vqueueNotify(pDevIns, pThis, pQueue);
286}
287
288
289/**
290 * Raise interrupt.
291 *
292 * @param pDevIns The device instance.
293 * @param pThis The shared virtio core instance data.
294 * @param rcBusy Status code to return when the critical section is busy.
295 * @param u8IntCause Interrupt cause bit mask to set in PCI ISR port.
296 */
297int vpciRaiseInterrupt(PPDMDEVINS pDevIns, PVPCISTATE pThis, int rcBusy, uint8_t u8IntCause)
298{
299 RT_NOREF_PV(rcBusy);
300 // int rc = vpciCsEnter(pThis, rcBusy);
301 // if (RT_UNLIKELY(rc != VINF_SUCCESS))
302 // return rc;
303
304 STAM_REL_COUNTER_INC(&pThis->StatIntsRaised);
305 LogFlow(("%s vpciRaiseInterrupt: u8IntCause=%x\n", INSTANCE(pThis), u8IntCause));
306
307 pThis->uISR |= u8IntCause;
308 PDMDevHlpPCISetIrq(pDevIns, 0, 1);
309 // vpciCsLeave(pThis);
310 return VINF_SUCCESS;
311}
312
313/**
314 * Lower interrupt.
315 *
316 * @param pDevIns The device instance.
317 * @param pThis The shared virtio core instance data.
318 */
319static void vpciLowerInterrupt(PPDMDEVINS pDevIns, PVPCISTATE pThis)
320{
321 LogFlow(("%s vpciLowerInterrupt\n", INSTANCE(pThis)));
322 RT_NOREF(pThis);
323 PDMDevHlpPCISetIrq(pDevIns, 0, 0);
324}
325
326
327void vpciReset(PPDMDEVINS pDevIns, PVPCISTATE pThis)
328{
329 /* No interrupts should survive device reset, see @bugref(9556). */
330 if (pThis->uISR)
331 vpciLowerInterrupt(pDevIns, pThis);
332
333 pThis->uGuestFeatures = 0;
334 pThis->uQueueSelector = 0;
335 pThis->uStatus = 0;
336 pThis->uISR = 0;
337
338 for (unsigned i = 0; i < pThis->cQueues; i++)
339 vqueueReset(&pThis->Queues[i]);
340}
341
342
343DECLINLINE(uint32_t) vpciGetHostFeatures(PVPCISTATE pThis, PCVPCIIOCALLBACKS pCallbacks)
344{
345 return pCallbacks->pfnGetHostFeatures(pThis) | VPCI_F_NOTIFY_ON_EMPTY;
346}
347
348/**
349 * Port I/O Handler for IN operations.
350 *
351 * @returns VBox status code.
352 *
353 * @param pDevIns The device instance.
354 * @param pThis The shared virtio core instance data.
355 * @param offPort The offset into the I/O range of the port being read.
356 * @param pu32 Where to store the result.
357 * @param cb Number of bytes read.
358 * @param pCallbacks Pointer to the callbacks.
359 * @thread EMT
360 */
361int vpciIOPortIn(PPDMDEVINS pDevIns,
362 PVPCISTATE pThis,
363 RTIOPORT offPort,
364 uint32_t *pu32,
365 unsigned cb,
366 PCVPCIIOCALLBACKS pCallbacks)
367{
368 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF(StatIORead), a);
369
370 /*
371 * We probably do not need to enter critical section when reading registers
372 * as the most of them are either constant or being changed during
373 * initialization only, the exception being ISR which can be raced by all
374 * threads but I see no big harm in it. It also happens to be the most read
375 * register as it gets read in interrupt handler. By dropping cs protection
376 * here we gain the ability to deliver RX packets to the guest while TX is
377 * holding cs transmitting queued packets.
378 *
379 int rc = vpciCsEnter(pThis, VINF_IOM_R3_IOPORT_READ);
380 if (RT_UNLIKELY(rc != VINF_SUCCESS))
381 {
382 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF(StatIORead), a);
383 return rc;
384 }*/
385 int rc = VINF_SUCCESS;
386
387 switch (offPort)
388 {
389 case VPCI_HOST_FEATURES:
390 /* Tell the guest what features we support. */
391 ASSERT_GUEST_MSG(cb == 4, ("%d\n", cb));
392 *pu32 = vpciGetHostFeatures(pThis, pCallbacks) | VPCI_F_BAD_FEATURE;
393 break;
394
395 case VPCI_GUEST_FEATURES:
396 ASSERT_GUEST_MSG(cb == 4, ("%d\n", cb));
397 *pu32 = pThis->uGuestFeatures;
398 break;
399
400 case VPCI_QUEUE_PFN:
401 ASSERT_GUEST_MSG(cb == 4, ("%d\n", cb));
402 *pu32 = pThis->Queues[pThis->uQueueSelector].uPageNumber;
403 break;
404
405 case VPCI_QUEUE_NUM:
406 ASSERT_GUEST_MSG(cb == 2, ("%d\n", cb));
407 *pu32 = pThis->Queues[pThis->uQueueSelector].VRing.uSize;
408 break;
409
410 case VPCI_QUEUE_SEL:
411 ASSERT_GUEST_MSG(cb == 2, ("%d\n", cb));
412 *pu32 = pThis->uQueueSelector;
413 break;
414
415 case VPCI_STATUS:
416 ASSERT_GUEST_MSG(cb == 1, ("%d\n", cb));
417 *pu32 = pThis->uStatus;
418 break;
419
420 case VPCI_ISR:
421 ASSERT_GUEST_MSG(cb == 1, ("%d\n", cb));
422 *pu32 = pThis->uISR;
423 pThis->uISR = 0; /* read clears all interrupts */
424 vpciLowerInterrupt(pDevIns, pThis);
425 break;
426
427 default:
428 if (offPort >= VPCI_CONFIG)
429 rc = pCallbacks->pfnGetConfig(pThis, offPort - VPCI_CONFIG, cb, pu32);
430 else
431 {
432 *pu32 = UINT32_MAX;
433 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "%s vpciIOPortIn: no valid port at offset port=%RTiop cb=%08x\n",
434 INSTANCE(pThis), offPort, cb);
435 }
436 break;
437 }
438 Log3(("%s vpciIOPortIn: At %RTiop in %0*x\n", INSTANCE(pThis), offPort, cb*2, *pu32));
439
440 //vpciCsLeave(pThis);
441
442 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF(StatIORead), a);
443 return rc;
444}
445
446
447/**
448 * Port I/O Handler for OUT operations.
449 *
450 * @returns VBox status code.
451 *
452 * @param pDevIns The device instance.
453 * @param pThis The shared virtio core instance data.
454 * @param offPort The offset into the I/O range of the port being written.
455 * @param u32 The value to output.
456 * @param cb The value size in bytes.
457 * @param pCallbacks Pointer to the callbacks.
458 * @thread EMT
459 */
460int vpciIOPortOut(PPDMDEVINS pDevIns,
461 PVPCISTATE pThis,
462 PVPCISTATECC pThisCC,
463 RTIOPORT offPort,
464 uint32_t u32,
465 unsigned cb,
466 PCVPCIIOCALLBACKS pCallbacks)
467{
468 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF(StatIOWrite), a);
469 int rc = VINF_SUCCESS;
470 bool fHasBecomeReady;
471#ifndef IN_RING3
472 RT_NOREF_PV(pThisCC);
473#endif
474
475 Log3(("%s virtioIOPortOut: At offPort=%RTiop out %0*x\n", INSTANCE(pThis), offPort, cb*2, u32));
476
477 switch (offPort)
478 {
479 case VPCI_GUEST_FEATURES:
480 {
481 const uint32_t fHostFeatures = vpciGetHostFeatures(pThis, pCallbacks);
482
483 if (RT_LIKELY((u32 & ~fHostFeatures) == 0))
484 pThis->uGuestFeatures = u32;
485 else
486 {
487 /*
488 * Guest requests features we don't advertise. Stick
489 * to the minimum if negotiation looks completely
490 * botched, otherwise restrict to advertised features.
491 */
492 if (u32 & VPCI_F_BAD_FEATURE)
493 {
494 Log(("%s WARNING! Guest failed to negotiate properly (guest=%x)\n",
495 INSTANCE(pThis), u32));
496 pThis->uGuestFeatures = pCallbacks->pfnGetHostMinimalFeatures(pThis);
497 }
498 else
499 {
500 Log(("%s Guest asked for features host does not support! (host=%x guest=%x)\n",
501 INSTANCE(pThis), fHostFeatures, u32));
502 pThis->uGuestFeatures = u32 & fHostFeatures;
503 }
504 }
505 pCallbacks->pfnSetHostFeatures(pThis, pThis->uGuestFeatures);
506 break;
507 }
508
509 case VPCI_QUEUE_PFN:
510 /*
511 * The guest is responsible for allocating the pages for queues,
512 * here it provides us with the page number of descriptor table.
513 * Note that we provide the size of the queue to the guest via
514 * VIRTIO_PCI_QUEUE_NUM.
515 */
516 pThis->Queues[pThis->uQueueSelector].uPageNumber = u32;
517 if (u32)
518 vqueueInit(&pThis->Queues[pThis->uQueueSelector], u32);
519 else
520 rc = pCallbacks->pfnReset(pDevIns);
521 break;
522
523 case VPCI_QUEUE_SEL:
524 ASSERT_GUEST_MSG(cb == 2, ("cb=%u\n", cb));
525 u32 &= 0xFFFF;
526 if (u32 < pThis->cQueues)
527 pThis->uQueueSelector = u32;
528 else
529 Log3(("%s vpciIOPortOut: Invalid queue selector %08x\n", INSTANCE(pThis), u32));
530 break;
531
532 case VPCI_QUEUE_NOTIFY:
533#ifdef IN_RING3
534 ASSERT_GUEST_MSG(cb == 2, ("cb=%u\n", cb));
535 u32 &= 0xFFFF;
536 if (u32 < pThis->cQueues)
537 {
538 RT_UNTRUSTED_VALIDATED_FENCE();
539 if (pThis->Queues[u32].VRing.addrDescriptors)
540 {
541
542 // rc = vpciCsEnter(pThis, VERR_SEM_BUSY);
543 // if (RT_LIKELY(rc == VINF_SUCCESS))
544 // {
545 pThisCC->Queues[u32].pfnCallback(pDevIns, &pThis->Queues[u32]);
546 // vpciCsLeave(pThis);
547 // }
548 }
549 else
550 Log(("%s The queue (#%d) being notified has not been initialized.\n",
551 INSTANCE(pThis), u32));
552 }
553 else
554 Log(("%s Invalid queue number (%d)\n", INSTANCE(pThis), u32));
555#else
556 rc = VINF_IOM_R3_IOPORT_WRITE;
557#endif
558 break;
559
560 case VPCI_STATUS:
561 ASSERT_GUEST_MSG(cb == 1, ("cb=%u\n", cb));
562 u32 &= 0xFF;
563 fHasBecomeReady = !(pThis->uStatus & VPCI_STATUS_DRV_OK) && (u32 & VPCI_STATUS_DRV_OK);
564 pThis->uStatus = u32;
565 /* Writing 0 to the status port triggers device reset. */
566 if (u32 == 0)
567 rc = pCallbacks->pfnReset(pDevIns);
568 else if (fHasBecomeReady)
569 {
570 /* Older hypervisors were lax and did not enforce bus mastering. Older guests
571 * (Linux prior to 2.6.34, NetBSD 6.x) were lazy and did not enable bus mastering.
572 * We automagically enable bus mastering on driver initialization to make existing
573 * drivers work.
574 */
575 PPDMPCIDEV pPciDev = pDevIns->apPciDevs[0];
576 PDMPciDevSetCommand(pPciDev, PDMPciDevGetCommand(pPciDev) | PCI_COMMAND_BUSMASTER);
577
578 pCallbacks->pfnReady(pDevIns);
579 }
580 break;
581
582 default:
583 if (offPort >= VPCI_CONFIG)
584 rc = pCallbacks->pfnSetConfig(pThis, offPort - VPCI_CONFIG, cb, &u32);
585 else
586 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "%s vpciIOPortOut: no valid port at offset offPort=%RTiop cb=%08x\n",
587 INSTANCE(pThis), offPort, cb);
588 break;
589 }
590
591 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF(StatIOWrite), a);
592 return rc;
593}
594
595#ifdef IN_RING3
596
597/**
598 * Handles common IBase.pfnQueryInterface requests.
599 */
600void *vpciR3QueryInterface(PVPCISTATECC pThisCC, const char *pszIID)
601{
602 PDMIBASE_RETURN_INTERFACE(pszIID, PDMIBASE, &pThisCC->IBase);
603 PDMIBASE_RETURN_INTERFACE(pszIID, PDMILEDPORTS, &pThisCC->ILeds);
604 return NULL;
605}
606
607/**
608 * @interface_method_impl{PDMILEDPORTS,pfnQueryStatusLed}
609 */
610static DECLCALLBACK(int) vpciR3QueryStatusLed(PPDMILEDPORTS pInterface, unsigned iLUN, PPDMLED *ppLed)
611{
612 PVPCISTATECC pThisCC = RT_FROM_MEMBER(pInterface, VPCISTATECC, ILeds);
613 if (iLUN == 0)
614 {
615 *ppLed = &pThisCC->pShared->led;
616 return VINF_SUCCESS;
617 }
618 return VERR_PDM_LUN_NOT_FOUND;
619}
620
621/**
622 * Turns on/off the write status LED.
623 *
624 * @returns VBox status code.
625 * @param pThis Pointer to the device state structure.
626 * @param fOn New LED state.
627 */
628void vpciR3SetWriteLed(PVPCISTATE pThis, bool fOn)
629{
630 LogFlow(("%s vpciR3SetWriteLed: %s\n", INSTANCE(pThis), fOn?"on":"off"));
631 if (fOn)
632 pThis->led.Asserted.s.fWriting = pThis->led.Actual.s.fWriting = 1;
633 else
634 pThis->led.Actual.s.fWriting = fOn;
635}
636
637/**
638 * Turns on/off the read status LED.
639 *
640 * @returns VBox status code.
641 * @param pThis Pointer to the device state structure.
642 * @param fOn New LED state.
643 */
644void vpciR3SetReadLed(PVPCISTATE pThis, bool fOn)
645{
646 LogFlow(("%s vpciR3SetReadLed: %s\n", INSTANCE(pThis), fOn?"on":"off"));
647 if (fOn)
648 pThis->led.Asserted.s.fReading = pThis->led.Actual.s.fReading = 1;
649 else
650 pThis->led.Actual.s.fReading = fOn;
651}
652
653# if 0 /* unused */
654/**
655 * Sets 32-bit register in PCI configuration space.
656 * @param refPciDev The PCI device.
657 * @param uOffset The register offset.
658 * @param u32Value The value to store in the register.
659 * @thread EMT
660 */
661DECLINLINE(void) vpciCfgSetU32(PDMPCIDEV& refPciDev, uint32_t uOffset, uint32_t u32Value)
662{
663 Assert(uOffset+sizeof(u32Value) <= sizeof(refPciDev.config));
664 *(uint32_t*)&refPciDev.config[uOffset] = u32Value;
665}
666# endif /* unused */
667
668/**
669 * Dumps the state (useful for both logging and info items).
670 */
671void vpciR3DumpStateWorker(PVPCISTATE pThis, PCDBGFINFOHLP pHlp)
672{
673
674 pHlp->pfnPrintf(pHlp,
675 " uGuestFeatures = 0x%08x\n"
676 " uQueueSelector = 0x%04x\n"
677 " uStatus = 0x%02x\n"
678 " uISR = 0x%02x\n",
679 pThis->uGuestFeatures,
680 pThis->uQueueSelector,
681 pThis->uStatus,
682 pThis->uISR);
683
684 for (unsigned i = 0; i < pThis->cQueues; i++)
685 pHlp->pfnPrintf(pHlp,
686 " %s queue:\n"
687 " VRing.uSize = %u\n"
688 " VRing.addrDescriptors = %p\n"
689 " VRing.addrAvail = %p\n"
690 " VRing.addrUsed = %p\n"
691 " uNextAvailIndex = %u\n"
692 " uNextUsedIndex = %u\n"
693 " uPageNumber = %x\n",
694 pThis->Queues[i].szName,
695 pThis->Queues[i].VRing.uSize,
696 pThis->Queues[i].VRing.addrDescriptors,
697 pThis->Queues[i].VRing.addrAvail,
698 pThis->Queues[i].VRing.addrUsed,
699 pThis->Queues[i].uNextAvailIndex,
700 pThis->Queues[i].uNextUsedIndex,
701 pThis->Queues[i].uPageNumber);
702}
703
704# ifdef LOG_ENABLED
705void vpciR3DumpState(PPDMDEVINS pDevIns, PVPCISTATE pThis, const char *pcszCaller)
706{
707 if (LogIs2Enabled())
708 {
709 Log2(("vpciR3DumpState: (called from %s)\n", pcszCaller));
710 vpciR3DumpStateWorker(pThis, PDMDevHlpDBGFInfoLogHlp(pDevIns));
711 }
712}
713# else
714# define vpciR3DumpState(d, x, s) do { } while (0)
715# endif
716
717/**
718 * Saved the core virtio state.
719 *
720 * @returns VBox status code.
721 * @param pDevIns The device insatnce data.
722 * @param pHlp The device helpers.
723 * @param pThis The shared virtio core instance data.
724 * @param pSSM The handle to the saved state.
725 */
726int vpciR3SaveExec(PPDMDEVINS pDevIns, PCPDMDEVHLPR3 pHlp, PVPCISTATE pThis, PSSMHANDLE pSSM)
727{
728 vpciR3DumpState(pDevIns, pThis, "vpciR3SaveExec"); RT_NOREF(pDevIns);
729
730 pHlp->pfnSSMPutU32(pSSM, pThis->uGuestFeatures);
731 pHlp->pfnSSMPutU16(pSSM, pThis->uQueueSelector);
732 pHlp->pfnSSMPutU8( pSSM, pThis->uStatus);
733 pHlp->pfnSSMPutU8( pSSM, pThis->uISR);
734
735 /* Save queue states */
736 int rc = pHlp->pfnSSMPutU32(pSSM, pThis->cQueues);
737 AssertRCReturn(rc, rc);
738 for (unsigned i = 0; i < pThis->cQueues; i++)
739 {
740 pHlp->pfnSSMPutU16(pSSM, pThis->Queues[i].VRing.uSize);
741 pHlp->pfnSSMPutU32(pSSM, pThis->Queues[i].uPageNumber);
742 pHlp->pfnSSMPutU16(pSSM, pThis->Queues[i].uNextAvailIndex);
743 rc = pHlp->pfnSSMPutU16(pSSM, pThis->Queues[i].uNextUsedIndex);
744 AssertRCReturn(rc, rc);
745 }
746
747 return VINF_SUCCESS;
748}
749
750/**
751 * Loads a saved device state.
752 *
753 * @returns VBox status code.
754 * @param pDevIns The device insatnce data.
755 * @param pHlp The device helpers.
756 * @param pThis The shared virtio core instance data.
757 * @param pSSM The handle to the saved state.
758 * @param uVersion The data unit version number.
759 * @param uPass The data pass.
760 * @param cQueues The default queue count (for old states).
761 */
762int vpciR3LoadExec(PPDMDEVINS pDevIns, PCPDMDEVHLPR3 pHlp, PVPCISTATE pThis, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass, uint32_t cQueues)
763{
764 int rc;
765
766 if (uPass == SSM_PASS_FINAL)
767 {
768 /* Restore state data */
769 pHlp->pfnSSMGetU32(pSSM, &pThis->uGuestFeatures);
770 pHlp->pfnSSMGetU16(pSSM, &pThis->uQueueSelector);
771 pHlp->pfnSSMGetU8( pSSM, &pThis->uStatus);
772 pHlp->pfnSSMGetU8( pSSM, &pThis->uISR);
773
774 /* Restore queues */
775 if (uVersion > VIRTIO_SAVEDSTATE_VERSION_3_1_BETA1)
776 {
777 rc = pHlp->pfnSSMGetU32(pSSM, &pThis->cQueues);
778 AssertRCReturn(rc, rc);
779 }
780 else
781 pThis->cQueues = cQueues;
782 AssertLogRelMsgReturn(pThis->cQueues <= VIRTIO_MAX_NQUEUES, ("%#x\n", pThis->cQueues), VERR_SSM_LOAD_CONFIG_MISMATCH);
783 AssertLogRelMsgReturn(pThis->uQueueSelector < pThis->cQueues || (pThis->cQueues == 0 && pThis->uQueueSelector),
784 ("uQueueSelector=%u cQueues=%u\n", pThis->uQueueSelector, pThis->cQueues),
785 VERR_SSM_LOAD_CONFIG_MISMATCH);
786
787 for (unsigned i = 0; i < pThis->cQueues; i++)
788 {
789 rc = pHlp->pfnSSMGetU16(pSSM, &pThis->Queues[i].VRing.uSize);
790 AssertRCReturn(rc, rc);
791 rc = pHlp->pfnSSMGetU32(pSSM, &pThis->Queues[i].uPageNumber);
792 AssertRCReturn(rc, rc);
793
794 if (pThis->Queues[i].uPageNumber)
795 vqueueInit(&pThis->Queues[i], pThis->Queues[i].uPageNumber);
796
797 rc = pHlp->pfnSSMGetU16(pSSM, &pThis->Queues[i].uNextAvailIndex);
798 AssertRCReturn(rc, rc);
799 rc = pHlp->pfnSSMGetU16(pSSM, &pThis->Queues[i].uNextUsedIndex);
800 AssertRCReturn(rc, rc);
801 }
802 }
803
804 vpciR3DumpState(pDevIns, pThis, "vpciLoadExec"); RT_NOREF(pDevIns);
805
806 return VINF_SUCCESS;
807}
808
809PVQUEUE vpciR3AddQueue(PVPCISTATE pThis, PVPCISTATECC pThisCC, unsigned uSize,
810 PFNVPCIQUEUECALLBACK pfnCallback, const char *pcszName)
811{
812 /* Find an empty queue slot */
813 for (unsigned i = 0; i < pThis->cQueues; i++)
814 {
815 if (pThis->Queues[i].VRing.uSize == 0)
816 {
817 PVQUEUE pQueue = &pThis->Queues[i];
818 pQueue->VRing.uSize = uSize;
819 pQueue->VRing.addrDescriptors = 0;
820 pQueue->uPageNumber = 0;
821 int rc = RTStrCopy(pQueue->szName, sizeof(pQueue->szName), pcszName);
822 AssertRC(rc);
823 pThisCC->Queues[i].pfnCallback = pfnCallback;
824 return pQueue;
825 }
826 }
827 AssertMsgFailedReturn(("%s Too many queues being added, no empty slots available!\n", INSTANCE(pThis)), NULL);
828}
829
830/**
831 * Destruct PCI-related part of device.
832 *
833 * We need to free non-VM resources only.
834 *
835 * @returns VBox status code.
836 * @param pThis The shared virtio core instance data.
837 */
838int vpciR3Term(PPDMDEVINS pDevIns, PVPCISTATE pThis)
839{
840 Log(("%s Destroying PCI instance\n", INSTANCE(pThis)));
841
842 if (PDMDevHlpCritSectIsInitialized(pDevIns, &pThis->cs))
843 PDMDevHlpCritSectDelete(pDevIns, &pThis->cs);
844
845 return VINF_SUCCESS;
846}
847
848/**
849 * Set PCI configuration space registers.
850 *
851 * @param pPciDev Pointer to the PCI device structure.
852 * @param uDeviceId VirtiO Device Id
853 * @param uClass Class of PCI device (network, etc)
854 * @thread EMT
855 */
856static void vpciConfigure(PPDMPCIDEV pPciDev, uint16_t uDeviceId, uint16_t uClass)
857{
858 /* Configure PCI Device, assume 32-bit mode ******************************/
859 PDMPciDevSetVendorId(pPciDev, DEVICE_PCI_VENDOR_ID);
860 PDMPciDevSetDeviceId(pPciDev, DEVICE_PCI_BASE_ID + uDeviceId);
861 PDMPciDevSetWord(pPciDev, VBOX_PCI_SUBSYSTEM_VENDOR_ID, DEVICE_PCI_SUBSYSTEM_VENDOR_ID);
862 PDMPciDevSetWord(pPciDev, VBOX_PCI_SUBSYSTEM_ID, DEVICE_PCI_SUBSYSTEM_BASE_ID + uDeviceId);
863
864 /* ABI version, must be equal 0 as of 2.6.30 kernel. */
865 PDMPciDevSetByte(pPciDev, VBOX_PCI_REVISION_ID, 0x00);
866 /* Ethernet adapter */
867 PDMPciDevSetByte(pPciDev, VBOX_PCI_CLASS_PROG, 0x00);
868 PDMPciDevSetWord(pPciDev, VBOX_PCI_CLASS_DEVICE, uClass);
869 /* Interrupt Pin: INTA# */
870 PDMPciDevSetByte(pPciDev, VBOX_PCI_INTERRUPT_PIN, 0x01);
871
872# ifdef VBOX_WITH_MSI_DEVICES
873 PDMPciDevSetCapabilityList(pPciDev, 0x80);
874 PDMPciDevSetStatus(pPciDev, VBOX_PCI_STATUS_CAP_LIST);
875# endif
876}
877
878int vpciR3Init(PPDMDEVINS pDevIns, PVPCISTATE pThis, PVPCISTATECC pThisCC, uint16_t uDeviceId, uint16_t uClass, uint32_t cQueues)
879{
880 /* Init data members. */
881 pThis->cQueues = cQueues;
882 pThis->led.u32Magic = PDMLED_MAGIC;
883 pThisCC->pShared = pThis;
884 pThisCC->ILeds.pfnQueryStatusLed = vpciR3QueryStatusLed;
885 AssertReturn(pThisCC->IBase.pfnQueryInterface, VERR_INVALID_POINTER);
886 AssertReturn(pThis->szInstance[0], VERR_INVALID_PARAMETER);
887 AssertReturn(strlen(pThis->szInstance) < sizeof(pThis->szInstance), VERR_INVALID_PARAMETER);
888
889 /* Initialize critical section. */
890 int rc = PDMDevHlpCritSectInit(pDevIns, &pThis->cs, RT_SRC_POS, "%s", pThis->szInstance);
891 AssertRCReturn(rc, rc);
892
893 /*
894 * Set up the PCI device.
895 */
896 PPDMPCIDEV pPciDev = pDevIns->apPciDevs[0];
897 PDMPCIDEV_ASSERT_VALID(pDevIns, pPciDev);
898
899 /* Set PCI config registers */
900 vpciConfigure(pPciDev, uDeviceId, uClass);
901
902 /* Register PCI device */
903 rc = PDMDevHlpPCIRegister(pDevIns, pPciDev);
904 AssertRCReturn(rc, rc);
905
906# ifdef VBOX_WITH_MSI_DEVICES
907# if 0
908 {
909 PDMMSIREG aMsiReg;
910
911 RT_ZERO(aMsiReg);
912 aMsiReg.cMsixVectors = 1;
913 aMsiReg.iMsixCapOffset = 0x80;
914 aMsiReg.iMsixNextOffset = 0x0;
915 aMsiReg.iMsixBar = 0;
916 rc = PDMDevHlpPCIRegisterMsi(pDevIns, &aMsiReg);
917 if (RT_FAILURE (rc))
918 PCIDevSetCapabilityList(&pThis->pciDevice, 0x0);
919 }
920# endif
921# endif
922
923 /*
924 * Attach the status driver (optional).
925 */
926 PPDMIBASE pBase;
927 rc = PDMDevHlpDriverAttach(pDevIns, PDM_STATUS_LUN, &pThisCC->IBase, &pBase, "Status Port");
928 if (RT_SUCCESS(rc))
929 pThisCC->pLedsConnector = PDMIBASE_QUERY_INTERFACE(pBase, PDMILEDCONNECTORS);
930 else if (rc != VERR_PDM_NO_ATTACHED_DRIVER)
931 return PDMDEV_SET_ERROR(pDevIns, rc, N_("Failed to attach the status LUN"));
932
933 /*
934 * Statistics.
935 */
936 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatIntsRaised, STAMTYPE_COUNTER, "Interrupts/Raised", STAMUNIT_OCCURENCES, "Number of raised interrupts");
937 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatIntsSkipped, STAMTYPE_COUNTER, "Interrupts/Skipped", STAMUNIT_OCCURENCES, "Number of skipped interrupts");
938# ifdef VBOX_WITH_STATISTICS
939 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatIOReadR3, STAMTYPE_PROFILE, "IO/ReadR3", STAMUNIT_TICKS_PER_CALL, "Profiling IO reads in R3");
940 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatIOReadR0, STAMTYPE_PROFILE, "IO/ReadR0", STAMUNIT_TICKS_PER_CALL, "Profiling IO reads in R0");
941 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatIOReadRC, STAMTYPE_PROFILE, "IO/ReadRC", STAMUNIT_TICKS_PER_CALL, "Profiling IO reads in RC");
942 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatIOWriteR3, STAMTYPE_PROFILE, "IO/WriteR3", STAMUNIT_TICKS_PER_CALL, "Profiling IO writes in R3");
943 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatIOWriteR0, STAMTYPE_PROFILE, "IO/WriteR0", STAMUNIT_TICKS_PER_CALL, "Profiling IO writes in R0");
944 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatIOWriteRC, STAMTYPE_PROFILE, "IO/WriteRC", STAMUNIT_TICKS_PER_CALL, "Profiling IO writes in RC");
945# endif /* VBOX_WITH_STATISTICS */
946
947 return VINF_SUCCESS;
948}
949
950#else /* !IN_RING3 */
951
952/**
953 * Does ring-0/raw-mode initialization.
954 */
955int vpciRZInit(PPDMDEVINS pDevIns, PVPCISTATE pThis, PVPCISTATECC pThisCC)
956{
957 RT_NOREF(pDevIns, pThis, pThisCC);
958 return VINF_SUCCESS;
959}
960
961#endif /* !IN_RING3 */
962
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette