VirtualBox

source: vbox/trunk/src/VBox/Devices/Network/DevE1000.cpp@ 71136

Last change on this file since 71136 was 71098, checked in by vboxsync, 7 years ago

Dev/E1000: (bugref:9113) PXE boot fix: SLU write sets STATUS.LU unless cable is disconnected or LU timer is active.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 324.6 KB
Line 
1/* $Id: DevE1000.cpp 71098 2018-02-22 10:06:25Z vboxsync $ */
2/** @file
3 * DevE1000 - Intel 82540EM Ethernet Controller Emulation.
4 *
5 * Implemented in accordance with the specification:
6 *
7 * PCI/PCI-X Family of Gigabit Ethernet Controllers Software Developer's Manual
8 * 82540EP/EM, 82541xx, 82544GC/EI, 82545GM/EM, 82546GB/EB, and 82547xx
9 *
10 * 317453-002 Revision 3.5
11 *
12 * @todo IPv6 checksum offloading support
13 * @todo Flexible Filter / Wakeup (optional?)
14 */
15
16/*
17 * Copyright (C) 2007-2017 Oracle Corporation
18 *
19 * This file is part of VirtualBox Open Source Edition (OSE), as
20 * available from http://www.virtualbox.org. This file is free software;
21 * you can redistribute it and/or modify it under the terms of the GNU
22 * General Public License (GPL) as published by the Free Software
23 * Foundation, in version 2 as it comes in the "COPYING" file of the
24 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
25 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_DEV_E1000
33#include <iprt/crc.h>
34#include <iprt/ctype.h>
35#include <iprt/net.h>
36#include <iprt/semaphore.h>
37#include <iprt/string.h>
38#include <iprt/time.h>
39#include <iprt/uuid.h>
40#include <VBox/vmm/pdmdev.h>
41#include <VBox/vmm/pdmnetifs.h>
42#include <VBox/vmm/pdmnetinline.h>
43#include <VBox/param.h>
44#include "VBoxDD.h"
45
46#include "DevEEPROM.h"
47#include "DevE1000Phy.h"
48
49
50/*********************************************************************************************************************************
51* Defined Constants And Macros *
52*********************************************************************************************************************************/
53/** @name E1000 Build Options
54 * @{ */
55/** @def E1K_INIT_RA0
56 * E1K_INIT_RA0 forces E1000 to set the first entry in Receive Address filter
57 * table to MAC address obtained from CFGM. Most guests read MAC address from
58 * EEPROM and write it to RA[0] explicitly, but Mac OS X seems to depend on it
59 * being already set (see @bugref{4657}).
60 */
61#define E1K_INIT_RA0
62/** @def E1K_LSC_ON_RESET
63 * E1K_LSC_ON_RESET causes e1000 to generate Link Status Change
64 * interrupt after hard reset. This makes the E1K_LSC_ON_SLU option unnecessary.
65 * With unplugged cable, LSC is triggerred for 82543GC only.
66 */
67#define E1K_LSC_ON_RESET
68/** @def E1K_LSC_ON_SLU
69 * E1K_LSC_ON_SLU causes E1000 to generate Link Status Change interrupt when
70 * the guest driver brings up the link via STATUS.LU bit. Again the only guest
71 * that requires it is Mac OS X (see @bugref{4657}).
72 */
73//#define E1K_LSC_ON_SLU
74/** @def E1K_INIT_LINKUP_DELAY
75 * E1K_INIT_LINKUP_DELAY prevents the link going up while the driver is still
76 * in init (see @bugref{8624}).
77 */
78#define E1K_INIT_LINKUP_DELAY_US (2000 * 1000)
79/** @def E1K_IMS_INT_DELAY_NS
80 * E1K_IMS_INT_DELAY_NS prevents interrupt storms in Windows guests on enabling
81 * interrupts (see @bugref{8624}).
82 */
83#define E1K_IMS_INT_DELAY_NS 100
84/** @def E1K_TX_DELAY
85 * E1K_TX_DELAY aims to improve guest-host transfer rate for TCP streams by
86 * preventing packets to be sent immediately. It allows to send several
87 * packets in a batch reducing the number of acknowledgments. Note that it
88 * effectively disables R0 TX path, forcing sending in R3.
89 */
90//#define E1K_TX_DELAY 150
91/** @def E1K_USE_TX_TIMERS
92 * E1K_USE_TX_TIMERS aims to reduce the number of generated TX interrupts if a
93 * guest driver set the delays via the Transmit Interrupt Delay Value (TIDV)
94 * register. Enabling it showed no positive effects on existing guests so it
95 * stays disabled. See sections 3.2.7.1 and 3.4.3.1 in "8254x Family of Gigabit
96 * Ethernet Controllers Software Developer’s Manual" for more detailed
97 * explanation.
98 */
99//#define E1K_USE_TX_TIMERS
100/** @def E1K_NO_TAD
101 * E1K_NO_TAD disables one of two timers enabled by E1K_USE_TX_TIMERS, the
102 * Transmit Absolute Delay time. This timer sets the maximum time interval
103 * during which TX interrupts can be postponed (delayed). It has no effect
104 * if E1K_USE_TX_TIMERS is not defined.
105 */
106//#define E1K_NO_TAD
107/** @def E1K_REL_DEBUG
108 * E1K_REL_DEBUG enables debug logging of l1, l2, l3 in release build.
109 */
110//#define E1K_REL_DEBUG
111/** @def E1K_INT_STATS
112 * E1K_INT_STATS enables collection of internal statistics used for
113 * debugging of delayed interrupts, etc.
114 */
115#define E1K_INT_STATS
116/** @def E1K_WITH_MSI
117 * E1K_WITH_MSI enables rudimentary MSI support. Not implemented.
118 */
119//#define E1K_WITH_MSI
120/** @def E1K_WITH_TX_CS
121 * E1K_WITH_TX_CS protects e1kXmitPending with a critical section.
122 */
123#define E1K_WITH_TX_CS
124/** @def E1K_WITH_TXD_CACHE
125 * E1K_WITH_TXD_CACHE causes E1000 to fetch multiple TX descriptors in a
126 * single physical memory read (or two if it wraps around the end of TX
127 * descriptor ring). It is required for proper functioning of bandwidth
128 * resource control as it allows to compute exact sizes of packets prior
129 * to allocating their buffers (see @bugref{5582}).
130 */
131#define E1K_WITH_TXD_CACHE
132/** @def E1K_WITH_RXD_CACHE
133 * E1K_WITH_RXD_CACHE causes E1000 to fetch multiple RX descriptors in a
134 * single physical memory read (or two if it wraps around the end of RX
135 * descriptor ring). Intel's packet driver for DOS needs this option in
136 * order to work properly (see @bugref{6217}).
137 */
138#define E1K_WITH_RXD_CACHE
139/** @def E1K_WITH_PREREG_MMIO
140 * E1K_WITH_PREREG_MMIO enables a new style MMIO registration and is
141 * currently only done for testing the relateted PDM, IOM and PGM code. */
142//#define E1K_WITH_PREREG_MMIO
143/* @} */
144/* End of Options ************************************************************/
145
146#ifdef E1K_WITH_TXD_CACHE
147/**
148 * E1K_TXD_CACHE_SIZE specifies the maximum number of TX descriptors stored
149 * in the state structure. It limits the amount of descriptors loaded in one
150 * batch read. For example, Linux guest may use up to 20 descriptors per
151 * TSE packet. The largest TSE packet seen (Windows guest) was 45 descriptors.
152 */
153# define E1K_TXD_CACHE_SIZE 64u
154#endif /* E1K_WITH_TXD_CACHE */
155
156#ifdef E1K_WITH_RXD_CACHE
157/**
158 * E1K_RXD_CACHE_SIZE specifies the maximum number of RX descriptors stored
159 * in the state structure. It limits the amount of descriptors loaded in one
160 * batch read. For example, XP guest adds 15 RX descriptors at a time.
161 */
162# define E1K_RXD_CACHE_SIZE 16u
163#endif /* E1K_WITH_RXD_CACHE */
164
165
166/* Little helpers ************************************************************/
167#undef htons
168#undef ntohs
169#undef htonl
170#undef ntohl
171#define htons(x) ((((x) & 0xff00) >> 8) | (((x) & 0x00ff) << 8))
172#define ntohs(x) htons(x)
173#define htonl(x) ASMByteSwapU32(x)
174#define ntohl(x) htonl(x)
175
176#ifndef DEBUG
177# ifdef E1K_REL_DEBUG
178# define DEBUG
179# define E1kLog(a) LogRel(a)
180# define E1kLog2(a) LogRel(a)
181# define E1kLog3(a) LogRel(a)
182# define E1kLogX(x, a) LogRel(a)
183//# define E1kLog3(a) do {} while (0)
184# else
185# define E1kLog(a) do {} while (0)
186# define E1kLog2(a) do {} while (0)
187# define E1kLog3(a) do {} while (0)
188# define E1kLogX(x, a) do {} while (0)
189# endif
190#else
191# define E1kLog(a) Log(a)
192# define E1kLog2(a) Log2(a)
193# define E1kLog3(a) Log3(a)
194# define E1kLogX(x, a) LogIt(x, LOG_GROUP, a)
195//# define E1kLog(a) do {} while (0)
196//# define E1kLog2(a) do {} while (0)
197//# define E1kLog3(a) do {} while (0)
198#endif
199
200#if 0
201# define LOG_ENABLED
202# define E1kLogRel(a) LogRel(a)
203# undef Log6
204# define Log6(a) LogRel(a)
205#else
206# define E1kLogRel(a) do { } while (0)
207#endif
208
209//#undef DEBUG
210
211#define STATE_TO_DEVINS(pThis) (((PE1KSTATE )pThis)->CTX_SUFF(pDevIns))
212#define E1K_RELOCATE(p, o) *(RTHCUINTPTR *)&p += o
213
214#define E1K_INC_CNT32(cnt) \
215do { \
216 if (cnt < UINT32_MAX) \
217 cnt++; \
218} while (0)
219
220#define E1K_ADD_CNT64(cntLo, cntHi, val) \
221do { \
222 uint64_t u64Cnt = RT_MAKE_U64(cntLo, cntHi); \
223 uint64_t tmp = u64Cnt; \
224 u64Cnt += val; \
225 if (tmp > u64Cnt ) \
226 u64Cnt = UINT64_MAX; \
227 cntLo = (uint32_t)u64Cnt; \
228 cntHi = (uint32_t)(u64Cnt >> 32); \
229} while (0)
230
231#ifdef E1K_INT_STATS
232# define E1K_INC_ISTAT_CNT(cnt) do { ++cnt; } while (0)
233#else /* E1K_INT_STATS */
234# define E1K_INC_ISTAT_CNT(cnt) do { } while (0)
235#endif /* E1K_INT_STATS */
236
237
238/*****************************************************************************/
239
240typedef uint32_t E1KCHIP;
241#define E1K_CHIP_82540EM 0
242#define E1K_CHIP_82543GC 1
243#define E1K_CHIP_82545EM 2
244
245#ifdef IN_RING3
246/** Different E1000 chips. */
247static const struct E1kChips
248{
249 uint16_t uPCIVendorId;
250 uint16_t uPCIDeviceId;
251 uint16_t uPCISubsystemVendorId;
252 uint16_t uPCISubsystemId;
253 const char *pcszName;
254} g_aChips[] =
255{
256 /* Vendor Device SSVendor SubSys Name */
257 { 0x8086,
258 /* Temporary code, as MSI-aware driver dislike 0x100E. How to do that right? */
259# ifdef E1K_WITH_MSI
260 0x105E,
261# else
262 0x100E,
263# endif
264 0x8086, 0x001E, "82540EM" }, /* Intel 82540EM-A in Intel PRO/1000 MT Desktop */
265 { 0x8086, 0x1004, 0x8086, 0x1004, "82543GC" }, /* Intel 82543GC in Intel PRO/1000 T Server */
266 { 0x8086, 0x100F, 0x15AD, 0x0750, "82545EM" } /* Intel 82545EM-A in VMWare Network Adapter */
267};
268#endif /* IN_RING3 */
269
270
271/* The size of register area mapped to I/O space */
272#define E1K_IOPORT_SIZE 0x8
273/* The size of memory-mapped register area */
274#define E1K_MM_SIZE 0x20000
275
276#define E1K_MAX_TX_PKT_SIZE 16288
277#define E1K_MAX_RX_PKT_SIZE 16384
278
279/*****************************************************************************/
280
281/** Gets the specfieid bits from the register. */
282#define GET_BITS(reg, bits) ((reg & reg##_##bits##_MASK) >> reg##_##bits##_SHIFT)
283#define GET_BITS_V(val, reg, bits) ((val & reg##_##bits##_MASK) >> reg##_##bits##_SHIFT)
284#define BITS(reg, bits, bitval) (bitval << reg##_##bits##_SHIFT)
285#define SET_BITS(reg, bits, bitval) do { reg = (reg & ~reg##_##bits##_MASK) | (bitval << reg##_##bits##_SHIFT); } while (0)
286#define SET_BITS_V(val, reg, bits, bitval) do { val = (val & ~reg##_##bits##_MASK) | (bitval << reg##_##bits##_SHIFT); } while (0)
287
288#define CTRL_SLU UINT32_C(0x00000040)
289#define CTRL_MDIO UINT32_C(0x00100000)
290#define CTRL_MDC UINT32_C(0x00200000)
291#define CTRL_MDIO_DIR UINT32_C(0x01000000)
292#define CTRL_MDC_DIR UINT32_C(0x02000000)
293#define CTRL_RESET UINT32_C(0x04000000)
294#define CTRL_VME UINT32_C(0x40000000)
295
296#define STATUS_LU UINT32_C(0x00000002)
297#define STATUS_TXOFF UINT32_C(0x00000010)
298
299#define EECD_EE_WIRES UINT32_C(0x0F)
300#define EECD_EE_REQ UINT32_C(0x40)
301#define EECD_EE_GNT UINT32_C(0x80)
302
303#define EERD_START UINT32_C(0x00000001)
304#define EERD_DONE UINT32_C(0x00000010)
305#define EERD_DATA_MASK UINT32_C(0xFFFF0000)
306#define EERD_DATA_SHIFT 16
307#define EERD_ADDR_MASK UINT32_C(0x0000FF00)
308#define EERD_ADDR_SHIFT 8
309
310#define MDIC_DATA_MASK UINT32_C(0x0000FFFF)
311#define MDIC_DATA_SHIFT 0
312#define MDIC_REG_MASK UINT32_C(0x001F0000)
313#define MDIC_REG_SHIFT 16
314#define MDIC_PHY_MASK UINT32_C(0x03E00000)
315#define MDIC_PHY_SHIFT 21
316#define MDIC_OP_WRITE UINT32_C(0x04000000)
317#define MDIC_OP_READ UINT32_C(0x08000000)
318#define MDIC_READY UINT32_C(0x10000000)
319#define MDIC_INT_EN UINT32_C(0x20000000)
320#define MDIC_ERROR UINT32_C(0x40000000)
321
322#define TCTL_EN UINT32_C(0x00000002)
323#define TCTL_PSP UINT32_C(0x00000008)
324
325#define RCTL_EN UINT32_C(0x00000002)
326#define RCTL_UPE UINT32_C(0x00000008)
327#define RCTL_MPE UINT32_C(0x00000010)
328#define RCTL_LPE UINT32_C(0x00000020)
329#define RCTL_LBM_MASK UINT32_C(0x000000C0)
330#define RCTL_LBM_SHIFT 6
331#define RCTL_RDMTS_MASK UINT32_C(0x00000300)
332#define RCTL_RDMTS_SHIFT 8
333#define RCTL_LBM_TCVR UINT32_C(3) /**< PHY or external SerDes loopback. */
334#define RCTL_MO_MASK UINT32_C(0x00003000)
335#define RCTL_MO_SHIFT 12
336#define RCTL_BAM UINT32_C(0x00008000)
337#define RCTL_BSIZE_MASK UINT32_C(0x00030000)
338#define RCTL_BSIZE_SHIFT 16
339#define RCTL_VFE UINT32_C(0x00040000)
340#define RCTL_CFIEN UINT32_C(0x00080000)
341#define RCTL_CFI UINT32_C(0x00100000)
342#define RCTL_BSEX UINT32_C(0x02000000)
343#define RCTL_SECRC UINT32_C(0x04000000)
344
345#define ICR_TXDW UINT32_C(0x00000001)
346#define ICR_TXQE UINT32_C(0x00000002)
347#define ICR_LSC UINT32_C(0x00000004)
348#define ICR_RXDMT0 UINT32_C(0x00000010)
349#define ICR_RXT0 UINT32_C(0x00000080)
350#define ICR_TXD_LOW UINT32_C(0x00008000)
351#define RDTR_FPD UINT32_C(0x80000000)
352
353#define PBA_st ((PBAST*)(pThis->auRegs + PBA_IDX))
354typedef struct
355{
356 unsigned rxa : 7;
357 unsigned rxa_r : 9;
358 unsigned txa : 16;
359} PBAST;
360AssertCompileSize(PBAST, 4);
361
362#define TXDCTL_WTHRESH_MASK 0x003F0000
363#define TXDCTL_WTHRESH_SHIFT 16
364#define TXDCTL_LWTHRESH_MASK 0xFE000000
365#define TXDCTL_LWTHRESH_SHIFT 25
366
367#define RXCSUM_PCSS_MASK UINT32_C(0x000000FF)
368#define RXCSUM_PCSS_SHIFT 0
369
370/** @name Register access macros
371 * @remarks These ASSUME alocal variable @a pThis of type PE1KSTATE.
372 * @{ */
373#define CTRL pThis->auRegs[CTRL_IDX]
374#define STATUS pThis->auRegs[STATUS_IDX]
375#define EECD pThis->auRegs[EECD_IDX]
376#define EERD pThis->auRegs[EERD_IDX]
377#define CTRL_EXT pThis->auRegs[CTRL_EXT_IDX]
378#define FLA pThis->auRegs[FLA_IDX]
379#define MDIC pThis->auRegs[MDIC_IDX]
380#define FCAL pThis->auRegs[FCAL_IDX]
381#define FCAH pThis->auRegs[FCAH_IDX]
382#define FCT pThis->auRegs[FCT_IDX]
383#define VET pThis->auRegs[VET_IDX]
384#define ICR pThis->auRegs[ICR_IDX]
385#define ITR pThis->auRegs[ITR_IDX]
386#define ICS pThis->auRegs[ICS_IDX]
387#define IMS pThis->auRegs[IMS_IDX]
388#define IMC pThis->auRegs[IMC_IDX]
389#define RCTL pThis->auRegs[RCTL_IDX]
390#define FCTTV pThis->auRegs[FCTTV_IDX]
391#define TXCW pThis->auRegs[TXCW_IDX]
392#define RXCW pThis->auRegs[RXCW_IDX]
393#define TCTL pThis->auRegs[TCTL_IDX]
394#define TIPG pThis->auRegs[TIPG_IDX]
395#define AIFS pThis->auRegs[AIFS_IDX]
396#define LEDCTL pThis->auRegs[LEDCTL_IDX]
397#define PBA pThis->auRegs[PBA_IDX]
398#define FCRTL pThis->auRegs[FCRTL_IDX]
399#define FCRTH pThis->auRegs[FCRTH_IDX]
400#define RDFH pThis->auRegs[RDFH_IDX]
401#define RDFT pThis->auRegs[RDFT_IDX]
402#define RDFHS pThis->auRegs[RDFHS_IDX]
403#define RDFTS pThis->auRegs[RDFTS_IDX]
404#define RDFPC pThis->auRegs[RDFPC_IDX]
405#define RDBAL pThis->auRegs[RDBAL_IDX]
406#define RDBAH pThis->auRegs[RDBAH_IDX]
407#define RDLEN pThis->auRegs[RDLEN_IDX]
408#define RDH pThis->auRegs[RDH_IDX]
409#define RDT pThis->auRegs[RDT_IDX]
410#define RDTR pThis->auRegs[RDTR_IDX]
411#define RXDCTL pThis->auRegs[RXDCTL_IDX]
412#define RADV pThis->auRegs[RADV_IDX]
413#define RSRPD pThis->auRegs[RSRPD_IDX]
414#define TXDMAC pThis->auRegs[TXDMAC_IDX]
415#define TDFH pThis->auRegs[TDFH_IDX]
416#define TDFT pThis->auRegs[TDFT_IDX]
417#define TDFHS pThis->auRegs[TDFHS_IDX]
418#define TDFTS pThis->auRegs[TDFTS_IDX]
419#define TDFPC pThis->auRegs[TDFPC_IDX]
420#define TDBAL pThis->auRegs[TDBAL_IDX]
421#define TDBAH pThis->auRegs[TDBAH_IDX]
422#define TDLEN pThis->auRegs[TDLEN_IDX]
423#define TDH pThis->auRegs[TDH_IDX]
424#define TDT pThis->auRegs[TDT_IDX]
425#define TIDV pThis->auRegs[TIDV_IDX]
426#define TXDCTL pThis->auRegs[TXDCTL_IDX]
427#define TADV pThis->auRegs[TADV_IDX]
428#define TSPMT pThis->auRegs[TSPMT_IDX]
429#define CRCERRS pThis->auRegs[CRCERRS_IDX]
430#define ALGNERRC pThis->auRegs[ALGNERRC_IDX]
431#define SYMERRS pThis->auRegs[SYMERRS_IDX]
432#define RXERRC pThis->auRegs[RXERRC_IDX]
433#define MPC pThis->auRegs[MPC_IDX]
434#define SCC pThis->auRegs[SCC_IDX]
435#define ECOL pThis->auRegs[ECOL_IDX]
436#define MCC pThis->auRegs[MCC_IDX]
437#define LATECOL pThis->auRegs[LATECOL_IDX]
438#define COLC pThis->auRegs[COLC_IDX]
439#define DC pThis->auRegs[DC_IDX]
440#define TNCRS pThis->auRegs[TNCRS_IDX]
441/* #define SEC pThis->auRegs[SEC_IDX] Conflict with sys/time.h */
442#define CEXTERR pThis->auRegs[CEXTERR_IDX]
443#define RLEC pThis->auRegs[RLEC_IDX]
444#define XONRXC pThis->auRegs[XONRXC_IDX]
445#define XONTXC pThis->auRegs[XONTXC_IDX]
446#define XOFFRXC pThis->auRegs[XOFFRXC_IDX]
447#define XOFFTXC pThis->auRegs[XOFFTXC_IDX]
448#define FCRUC pThis->auRegs[FCRUC_IDX]
449#define PRC64 pThis->auRegs[PRC64_IDX]
450#define PRC127 pThis->auRegs[PRC127_IDX]
451#define PRC255 pThis->auRegs[PRC255_IDX]
452#define PRC511 pThis->auRegs[PRC511_IDX]
453#define PRC1023 pThis->auRegs[PRC1023_IDX]
454#define PRC1522 pThis->auRegs[PRC1522_IDX]
455#define GPRC pThis->auRegs[GPRC_IDX]
456#define BPRC pThis->auRegs[BPRC_IDX]
457#define MPRC pThis->auRegs[MPRC_IDX]
458#define GPTC pThis->auRegs[GPTC_IDX]
459#define GORCL pThis->auRegs[GORCL_IDX]
460#define GORCH pThis->auRegs[GORCH_IDX]
461#define GOTCL pThis->auRegs[GOTCL_IDX]
462#define GOTCH pThis->auRegs[GOTCH_IDX]
463#define RNBC pThis->auRegs[RNBC_IDX]
464#define RUC pThis->auRegs[RUC_IDX]
465#define RFC pThis->auRegs[RFC_IDX]
466#define ROC pThis->auRegs[ROC_IDX]
467#define RJC pThis->auRegs[RJC_IDX]
468#define MGTPRC pThis->auRegs[MGTPRC_IDX]
469#define MGTPDC pThis->auRegs[MGTPDC_IDX]
470#define MGTPTC pThis->auRegs[MGTPTC_IDX]
471#define TORL pThis->auRegs[TORL_IDX]
472#define TORH pThis->auRegs[TORH_IDX]
473#define TOTL pThis->auRegs[TOTL_IDX]
474#define TOTH pThis->auRegs[TOTH_IDX]
475#define TPR pThis->auRegs[TPR_IDX]
476#define TPT pThis->auRegs[TPT_IDX]
477#define PTC64 pThis->auRegs[PTC64_IDX]
478#define PTC127 pThis->auRegs[PTC127_IDX]
479#define PTC255 pThis->auRegs[PTC255_IDX]
480#define PTC511 pThis->auRegs[PTC511_IDX]
481#define PTC1023 pThis->auRegs[PTC1023_IDX]
482#define PTC1522 pThis->auRegs[PTC1522_IDX]
483#define MPTC pThis->auRegs[MPTC_IDX]
484#define BPTC pThis->auRegs[BPTC_IDX]
485#define TSCTC pThis->auRegs[TSCTC_IDX]
486#define TSCTFC pThis->auRegs[TSCTFC_IDX]
487#define RXCSUM pThis->auRegs[RXCSUM_IDX]
488#define WUC pThis->auRegs[WUC_IDX]
489#define WUFC pThis->auRegs[WUFC_IDX]
490#define WUS pThis->auRegs[WUS_IDX]
491#define MANC pThis->auRegs[MANC_IDX]
492#define IPAV pThis->auRegs[IPAV_IDX]
493#define WUPL pThis->auRegs[WUPL_IDX]
494/** @} */
495
496/**
497 * Indices of memory-mapped registers in register table.
498 */
499typedef enum
500{
501 CTRL_IDX,
502 STATUS_IDX,
503 EECD_IDX,
504 EERD_IDX,
505 CTRL_EXT_IDX,
506 FLA_IDX,
507 MDIC_IDX,
508 FCAL_IDX,
509 FCAH_IDX,
510 FCT_IDX,
511 VET_IDX,
512 ICR_IDX,
513 ITR_IDX,
514 ICS_IDX,
515 IMS_IDX,
516 IMC_IDX,
517 RCTL_IDX,
518 FCTTV_IDX,
519 TXCW_IDX,
520 RXCW_IDX,
521 TCTL_IDX,
522 TIPG_IDX,
523 AIFS_IDX,
524 LEDCTL_IDX,
525 PBA_IDX,
526 FCRTL_IDX,
527 FCRTH_IDX,
528 RDFH_IDX,
529 RDFT_IDX,
530 RDFHS_IDX,
531 RDFTS_IDX,
532 RDFPC_IDX,
533 RDBAL_IDX,
534 RDBAH_IDX,
535 RDLEN_IDX,
536 RDH_IDX,
537 RDT_IDX,
538 RDTR_IDX,
539 RXDCTL_IDX,
540 RADV_IDX,
541 RSRPD_IDX,
542 TXDMAC_IDX,
543 TDFH_IDX,
544 TDFT_IDX,
545 TDFHS_IDX,
546 TDFTS_IDX,
547 TDFPC_IDX,
548 TDBAL_IDX,
549 TDBAH_IDX,
550 TDLEN_IDX,
551 TDH_IDX,
552 TDT_IDX,
553 TIDV_IDX,
554 TXDCTL_IDX,
555 TADV_IDX,
556 TSPMT_IDX,
557 CRCERRS_IDX,
558 ALGNERRC_IDX,
559 SYMERRS_IDX,
560 RXERRC_IDX,
561 MPC_IDX,
562 SCC_IDX,
563 ECOL_IDX,
564 MCC_IDX,
565 LATECOL_IDX,
566 COLC_IDX,
567 DC_IDX,
568 TNCRS_IDX,
569 SEC_IDX,
570 CEXTERR_IDX,
571 RLEC_IDX,
572 XONRXC_IDX,
573 XONTXC_IDX,
574 XOFFRXC_IDX,
575 XOFFTXC_IDX,
576 FCRUC_IDX,
577 PRC64_IDX,
578 PRC127_IDX,
579 PRC255_IDX,
580 PRC511_IDX,
581 PRC1023_IDX,
582 PRC1522_IDX,
583 GPRC_IDX,
584 BPRC_IDX,
585 MPRC_IDX,
586 GPTC_IDX,
587 GORCL_IDX,
588 GORCH_IDX,
589 GOTCL_IDX,
590 GOTCH_IDX,
591 RNBC_IDX,
592 RUC_IDX,
593 RFC_IDX,
594 ROC_IDX,
595 RJC_IDX,
596 MGTPRC_IDX,
597 MGTPDC_IDX,
598 MGTPTC_IDX,
599 TORL_IDX,
600 TORH_IDX,
601 TOTL_IDX,
602 TOTH_IDX,
603 TPR_IDX,
604 TPT_IDX,
605 PTC64_IDX,
606 PTC127_IDX,
607 PTC255_IDX,
608 PTC511_IDX,
609 PTC1023_IDX,
610 PTC1522_IDX,
611 MPTC_IDX,
612 BPTC_IDX,
613 TSCTC_IDX,
614 TSCTFC_IDX,
615 RXCSUM_IDX,
616 WUC_IDX,
617 WUFC_IDX,
618 WUS_IDX,
619 MANC_IDX,
620 IPAV_IDX,
621 WUPL_IDX,
622 MTA_IDX,
623 RA_IDX,
624 VFTA_IDX,
625 IP4AT_IDX,
626 IP6AT_IDX,
627 WUPM_IDX,
628 FFLT_IDX,
629 FFMT_IDX,
630 FFVT_IDX,
631 PBM_IDX,
632 RA_82542_IDX,
633 MTA_82542_IDX,
634 VFTA_82542_IDX,
635 E1K_NUM_OF_REGS
636} E1kRegIndex;
637
638#define E1K_NUM_OF_32BIT_REGS MTA_IDX
639/** The number of registers with strictly increasing offset. */
640#define E1K_NUM_OF_BINARY_SEARCHABLE (WUPL_IDX + 1)
641
642
643/**
644 * Define E1000-specific EEPROM layout.
645 */
646struct E1kEEPROM
647{
648 public:
649 EEPROM93C46 eeprom;
650
651#ifdef IN_RING3
652 /**
653 * Initialize EEPROM content.
654 *
655 * @param macAddr MAC address of E1000.
656 */
657 void init(RTMAC &macAddr)
658 {
659 eeprom.init();
660 memcpy(eeprom.m_au16Data, macAddr.au16, sizeof(macAddr.au16));
661 eeprom.m_au16Data[0x04] = 0xFFFF;
662 /*
663 * bit 3 - full support for power management
664 * bit 10 - full duplex
665 */
666 eeprom.m_au16Data[0x0A] = 0x4408;
667 eeprom.m_au16Data[0x0B] = 0x001E;
668 eeprom.m_au16Data[0x0C] = 0x8086;
669 eeprom.m_au16Data[0x0D] = 0x100E;
670 eeprom.m_au16Data[0x0E] = 0x8086;
671 eeprom.m_au16Data[0x0F] = 0x3040;
672 eeprom.m_au16Data[0x21] = 0x7061;
673 eeprom.m_au16Data[0x22] = 0x280C;
674 eeprom.m_au16Data[0x23] = 0x00C8;
675 eeprom.m_au16Data[0x24] = 0x00C8;
676 eeprom.m_au16Data[0x2F] = 0x0602;
677 updateChecksum();
678 };
679
680 /**
681 * Compute the checksum as required by E1000 and store it
682 * in the last word.
683 */
684 void updateChecksum()
685 {
686 uint16_t u16Checksum = 0;
687
688 for (int i = 0; i < eeprom.SIZE-1; i++)
689 u16Checksum += eeprom.m_au16Data[i];
690 eeprom.m_au16Data[eeprom.SIZE-1] = 0xBABA - u16Checksum;
691 };
692
693 /**
694 * First 6 bytes of EEPROM contain MAC address.
695 *
696 * @returns MAC address of E1000.
697 */
698 void getMac(PRTMAC pMac)
699 {
700 memcpy(pMac->au16, eeprom.m_au16Data, sizeof(pMac->au16));
701 };
702
703 uint32_t read()
704 {
705 return eeprom.read();
706 }
707
708 void write(uint32_t u32Wires)
709 {
710 eeprom.write(u32Wires);
711 }
712
713 bool readWord(uint32_t u32Addr, uint16_t *pu16Value)
714 {
715 return eeprom.readWord(u32Addr, pu16Value);
716 }
717
718 int load(PSSMHANDLE pSSM)
719 {
720 return eeprom.load(pSSM);
721 }
722
723 void save(PSSMHANDLE pSSM)
724 {
725 eeprom.save(pSSM);
726 }
727#endif /* IN_RING3 */
728};
729
730
731#define E1K_SPEC_VLAN(s) (s & 0xFFF)
732#define E1K_SPEC_CFI(s) (!!((s>>12) & 0x1))
733#define E1K_SPEC_PRI(s) ((s>>13) & 0x7)
734
735struct E1kRxDStatus
736{
737 /** @name Descriptor Status field (3.2.3.1)
738 * @{ */
739 unsigned fDD : 1; /**< Descriptor Done. */
740 unsigned fEOP : 1; /**< End of packet. */
741 unsigned fIXSM : 1; /**< Ignore checksum indication. */
742 unsigned fVP : 1; /**< VLAN, matches VET. */
743 unsigned : 1;
744 unsigned fTCPCS : 1; /**< RCP Checksum calculated on the packet. */
745 unsigned fIPCS : 1; /**< IP Checksum calculated on the packet. */
746 unsigned fPIF : 1; /**< Passed in-exact filter */
747 /** @} */
748 /** @name Descriptor Errors field (3.2.3.2)
749 * (Only valid when fEOP and fDD are set.)
750 * @{ */
751 unsigned fCE : 1; /**< CRC or alignment error. */
752 unsigned : 4; /**< Reserved, varies with different models... */
753 unsigned fTCPE : 1; /**< TCP/UDP checksum error. */
754 unsigned fIPE : 1; /**< IP Checksum error. */
755 unsigned fRXE : 1; /**< RX Data error. */
756 /** @} */
757 /** @name Descriptor Special field (3.2.3.3)
758 * @{ */
759 unsigned u16Special : 16; /**< VLAN: Id, Canonical form, Priority. */
760 /** @} */
761};
762typedef struct E1kRxDStatus E1KRXDST;
763
764struct E1kRxDesc_st
765{
766 uint64_t u64BufAddr; /**< Address of data buffer */
767 uint16_t u16Length; /**< Length of data in buffer */
768 uint16_t u16Checksum; /**< Packet checksum */
769 E1KRXDST status;
770};
771typedef struct E1kRxDesc_st E1KRXDESC;
772AssertCompileSize(E1KRXDESC, 16);
773
774#define E1K_DTYP_LEGACY -1
775#define E1K_DTYP_CONTEXT 0
776#define E1K_DTYP_DATA 1
777
778struct E1kTDLegacy
779{
780 uint64_t u64BufAddr; /**< Address of data buffer */
781 struct TDLCmd_st
782 {
783 unsigned u16Length : 16;
784 unsigned u8CSO : 8;
785 /* CMD field : 8 */
786 unsigned fEOP : 1;
787 unsigned fIFCS : 1;
788 unsigned fIC : 1;
789 unsigned fRS : 1;
790 unsigned fRPS : 1;
791 unsigned fDEXT : 1;
792 unsigned fVLE : 1;
793 unsigned fIDE : 1;
794 } cmd;
795 struct TDLDw3_st
796 {
797 /* STA field */
798 unsigned fDD : 1;
799 unsigned fEC : 1;
800 unsigned fLC : 1;
801 unsigned fTURSV : 1;
802 /* RSV field */
803 unsigned u4RSV : 4;
804 /* CSS field */
805 unsigned u8CSS : 8;
806 /* Special field*/
807 unsigned u16Special: 16;
808 } dw3;
809};
810
811/**
812 * TCP/IP Context Transmit Descriptor, section 3.3.6.
813 */
814struct E1kTDContext
815{
816 struct CheckSum_st
817 {
818 /** TSE: Header start. !TSE: Checksum start. */
819 unsigned u8CSS : 8;
820 /** Checksum offset - where to store it. */
821 unsigned u8CSO : 8;
822 /** Checksum ending (inclusive) offset, 0 = end of packet. */
823 unsigned u16CSE : 16;
824 } ip;
825 struct CheckSum_st tu;
826 struct TDCDw2_st
827 {
828 /** TSE: The total number of payload bytes for this context. Sans header. */
829 unsigned u20PAYLEN : 20;
830 /** The descriptor type - E1K_DTYP_CONTEXT (0). */
831 unsigned u4DTYP : 4;
832 /** TUCMD field, 8 bits
833 * @{ */
834 /** TSE: TCP (set) or UDP (clear). */
835 unsigned fTCP : 1;
836 /** TSE: IPv4 (set) or IPv6 (clear) - for finding the payload length field in
837 * the IP header. Does not affect the checksumming.
838 * @remarks 82544GC/EI interprets a cleared field differently. */
839 unsigned fIP : 1;
840 /** TSE: TCP segmentation enable. When clear the context describes */
841 unsigned fTSE : 1;
842 /** Report status (only applies to dw3.fDD for here). */
843 unsigned fRS : 1;
844 /** Reserved, MBZ. */
845 unsigned fRSV1 : 1;
846 /** Descriptor extension, must be set for this descriptor type. */
847 unsigned fDEXT : 1;
848 /** Reserved, MBZ. */
849 unsigned fRSV2 : 1;
850 /** Interrupt delay enable. */
851 unsigned fIDE : 1;
852 /** @} */
853 } dw2;
854 struct TDCDw3_st
855 {
856 /** Descriptor Done. */
857 unsigned fDD : 1;
858 /** Reserved, MBZ. */
859 unsigned u7RSV : 7;
860 /** TSO: The header (prototype) length (Ethernet[, VLAN tag], IP, TCP/UDP. */
861 unsigned u8HDRLEN : 8;
862 /** TSO: Maximum segment size. */
863 unsigned u16MSS : 16;
864 } dw3;
865};
866typedef struct E1kTDContext E1KTXCTX;
867
868/**
869 * TCP/IP Data Transmit Descriptor, section 3.3.7.
870 */
871struct E1kTDData
872{
873 uint64_t u64BufAddr; /**< Address of data buffer */
874 struct TDDCmd_st
875 {
876 /** The total length of data pointed to by this descriptor. */
877 unsigned u20DTALEN : 20;
878 /** The descriptor type - E1K_DTYP_DATA (1). */
879 unsigned u4DTYP : 4;
880 /** @name DCMD field, 8 bits (3.3.7.1).
881 * @{ */
882 /** End of packet. Note TSCTFC update. */
883 unsigned fEOP : 1;
884 /** Insert Ethernet FCS/CRC (requires fEOP to be set). */
885 unsigned fIFCS : 1;
886 /** Use the TSE context when set and the normal when clear. */
887 unsigned fTSE : 1;
888 /** Report status (dw3.STA). */
889 unsigned fRS : 1;
890 /** Reserved. 82544GC/EI defines this report packet set (RPS). */
891 unsigned fRPS : 1;
892 /** Descriptor extension, must be set for this descriptor type. */
893 unsigned fDEXT : 1;
894 /** VLAN enable, requires CTRL.VME, auto enables FCS/CRC.
895 * Insert dw3.SPECIAL after ethernet header. */
896 unsigned fVLE : 1;
897 /** Interrupt delay enable. */
898 unsigned fIDE : 1;
899 /** @} */
900 } cmd;
901 struct TDDDw3_st
902 {
903 /** @name STA field (3.3.7.2)
904 * @{ */
905 unsigned fDD : 1; /**< Descriptor done. */
906 unsigned fEC : 1; /**< Excess collision. */
907 unsigned fLC : 1; /**< Late collision. */
908 /** Reserved, except for the usual oddball (82544GC/EI) where it's called TU. */
909 unsigned fTURSV : 1;
910 /** @} */
911 unsigned u4RSV : 4; /**< Reserved field, MBZ. */
912 /** @name POPTS (Packet Option) field (3.3.7.3)
913 * @{ */
914 unsigned fIXSM : 1; /**< Insert IP checksum. */
915 unsigned fTXSM : 1; /**< Insert TCP/UDP checksum. */
916 unsigned u6RSV : 6; /**< Reserved, MBZ. */
917 /** @} */
918 /** @name SPECIAL field - VLAN tag to be inserted after ethernet header.
919 * Requires fEOP, fVLE and CTRL.VME to be set.
920 * @{ */
921 unsigned u16Special: 16; /**< VLAN: Id, Canonical form, Priority. */
922 /** @} */
923 } dw3;
924};
925typedef struct E1kTDData E1KTXDAT;
926
927union E1kTxDesc
928{
929 struct E1kTDLegacy legacy;
930 struct E1kTDContext context;
931 struct E1kTDData data;
932};
933typedef union E1kTxDesc E1KTXDESC;
934AssertCompileSize(E1KTXDESC, 16);
935
936#define RA_CTL_AS 0x0003
937#define RA_CTL_AV 0x8000
938
939union E1kRecAddr
940{
941 uint32_t au32[32];
942 struct RAArray
943 {
944 uint8_t addr[6];
945 uint16_t ctl;
946 } array[16];
947};
948typedef struct E1kRecAddr::RAArray E1KRAELEM;
949typedef union E1kRecAddr E1KRA;
950AssertCompileSize(E1KRA, 8*16);
951
952#define E1K_IP_RF UINT16_C(0x8000) /**< reserved fragment flag */
953#define E1K_IP_DF UINT16_C(0x4000) /**< dont fragment flag */
954#define E1K_IP_MF UINT16_C(0x2000) /**< more fragments flag */
955#define E1K_IP_OFFMASK UINT16_C(0x1fff) /**< mask for fragmenting bits */
956
957/** @todo use+extend RTNETIPV4 */
958struct E1kIpHeader
959{
960 /* type of service / version / header length */
961 uint16_t tos_ver_hl;
962 /* total length */
963 uint16_t total_len;
964 /* identification */
965 uint16_t ident;
966 /* fragment offset field */
967 uint16_t offset;
968 /* time to live / protocol*/
969 uint16_t ttl_proto;
970 /* checksum */
971 uint16_t chksum;
972 /* source IP address */
973 uint32_t src;
974 /* destination IP address */
975 uint32_t dest;
976};
977AssertCompileSize(struct E1kIpHeader, 20);
978
979#define E1K_TCP_FIN UINT16_C(0x01)
980#define E1K_TCP_SYN UINT16_C(0x02)
981#define E1K_TCP_RST UINT16_C(0x04)
982#define E1K_TCP_PSH UINT16_C(0x08)
983#define E1K_TCP_ACK UINT16_C(0x10)
984#define E1K_TCP_URG UINT16_C(0x20)
985#define E1K_TCP_ECE UINT16_C(0x40)
986#define E1K_TCP_CWR UINT16_C(0x80)
987#define E1K_TCP_FLAGS UINT16_C(0x3f)
988
989/** @todo use+extend RTNETTCP */
990struct E1kTcpHeader
991{
992 uint16_t src;
993 uint16_t dest;
994 uint32_t seqno;
995 uint32_t ackno;
996 uint16_t hdrlen_flags;
997 uint16_t wnd;
998 uint16_t chksum;
999 uint16_t urgp;
1000};
1001AssertCompileSize(struct E1kTcpHeader, 20);
1002
1003
1004#ifdef E1K_WITH_TXD_CACHE
1005/** The current Saved state version. */
1006# define E1K_SAVEDSTATE_VERSION 4
1007/** Saved state version for VirtualBox 4.2 with VLAN tag fields. */
1008# define E1K_SAVEDSTATE_VERSION_VBOX_42_VTAG 3
1009#else /* !E1K_WITH_TXD_CACHE */
1010/** The current Saved state version. */
1011# define E1K_SAVEDSTATE_VERSION 3
1012#endif /* !E1K_WITH_TXD_CACHE */
1013/** Saved state version for VirtualBox 4.1 and earlier.
1014 * These did not include VLAN tag fields. */
1015#define E1K_SAVEDSTATE_VERSION_VBOX_41 2
1016/** Saved state version for VirtualBox 3.0 and earlier.
1017 * This did not include the configuration part nor the E1kEEPROM. */
1018#define E1K_SAVEDSTATE_VERSION_VBOX_30 1
1019
1020/**
1021 * Device state structure.
1022 *
1023 * Holds the current state of device.
1024 *
1025 * @implements PDMINETWORKDOWN
1026 * @implements PDMINETWORKCONFIG
1027 * @implements PDMILEDPORTS
1028 */
1029struct E1kState_st
1030{
1031 char szPrf[8]; /**< Log prefix, e.g. E1000#1. */
1032 PDMIBASE IBase;
1033 PDMINETWORKDOWN INetworkDown;
1034 PDMINETWORKCONFIG INetworkConfig;
1035 PDMILEDPORTS ILeds; /**< LED interface */
1036 R3PTRTYPE(PPDMIBASE) pDrvBase; /**< Attached network driver. */
1037 R3PTRTYPE(PPDMILEDCONNECTORS) pLedsConnector;
1038
1039 PPDMDEVINSR3 pDevInsR3; /**< Device instance - R3. */
1040 R3PTRTYPE(PPDMQUEUE) pTxQueueR3; /**< Transmit queue - R3. */
1041 R3PTRTYPE(PPDMQUEUE) pCanRxQueueR3; /**< Rx wakeup signaller - R3. */
1042 PPDMINETWORKUPR3 pDrvR3; /**< Attached network driver - R3. */
1043 PTMTIMERR3 pRIDTimerR3; /**< Receive Interrupt Delay Timer - R3. */
1044 PTMTIMERR3 pRADTimerR3; /**< Receive Absolute Delay Timer - R3. */
1045 PTMTIMERR3 pTIDTimerR3; /**< Transmit Interrupt Delay Timer - R3. */
1046 PTMTIMERR3 pTADTimerR3; /**< Transmit Absolute Delay Timer - R3. */
1047 PTMTIMERR3 pTXDTimerR3; /**< Transmit Delay Timer - R3. */
1048 PTMTIMERR3 pIntTimerR3; /**< Late Interrupt Timer - R3. */
1049 PTMTIMERR3 pLUTimerR3; /**< Link Up(/Restore) Timer. */
1050 /** The scatter / gather buffer used for the current outgoing packet - R3. */
1051 R3PTRTYPE(PPDMSCATTERGATHER) pTxSgR3;
1052
1053 PPDMDEVINSR0 pDevInsR0; /**< Device instance - R0. */
1054 R0PTRTYPE(PPDMQUEUE) pTxQueueR0; /**< Transmit queue - R0. */
1055 R0PTRTYPE(PPDMQUEUE) pCanRxQueueR0; /**< Rx wakeup signaller - R0. */
1056 PPDMINETWORKUPR0 pDrvR0; /**< Attached network driver - R0. */
1057 PTMTIMERR0 pRIDTimerR0; /**< Receive Interrupt Delay Timer - R0. */
1058 PTMTIMERR0 pRADTimerR0; /**< Receive Absolute Delay Timer - R0. */
1059 PTMTIMERR0 pTIDTimerR0; /**< Transmit Interrupt Delay Timer - R0. */
1060 PTMTIMERR0 pTADTimerR0; /**< Transmit Absolute Delay Timer - R0. */
1061 PTMTIMERR0 pTXDTimerR0; /**< Transmit Delay Timer - R0. */
1062 PTMTIMERR0 pIntTimerR0; /**< Late Interrupt Timer - R0. */
1063 PTMTIMERR0 pLUTimerR0; /**< Link Up(/Restore) Timer - R0. */
1064 /** The scatter / gather buffer used for the current outgoing packet - R0. */
1065 R0PTRTYPE(PPDMSCATTERGATHER) pTxSgR0;
1066
1067 PPDMDEVINSRC pDevInsRC; /**< Device instance - RC. */
1068 RCPTRTYPE(PPDMQUEUE) pTxQueueRC; /**< Transmit queue - RC. */
1069 RCPTRTYPE(PPDMQUEUE) pCanRxQueueRC; /**< Rx wakeup signaller - RC. */
1070 PPDMINETWORKUPRC pDrvRC; /**< Attached network driver - RC. */
1071 PTMTIMERRC pRIDTimerRC; /**< Receive Interrupt Delay Timer - RC. */
1072 PTMTIMERRC pRADTimerRC; /**< Receive Absolute Delay Timer - RC. */
1073 PTMTIMERRC pTIDTimerRC; /**< Transmit Interrupt Delay Timer - RC. */
1074 PTMTIMERRC pTADTimerRC; /**< Transmit Absolute Delay Timer - RC. */
1075 PTMTIMERRC pTXDTimerRC; /**< Transmit Delay Timer - RC. */
1076 PTMTIMERRC pIntTimerRC; /**< Late Interrupt Timer - RC. */
1077 PTMTIMERRC pLUTimerRC; /**< Link Up(/Restore) Timer - RC. */
1078 /** The scatter / gather buffer used for the current outgoing packet - RC. */
1079 RCPTRTYPE(PPDMSCATTERGATHER) pTxSgRC;
1080 RTRCPTR RCPtrAlignment;
1081
1082#if HC_ARCH_BITS != 32
1083 uint32_t Alignment1;
1084#endif
1085 PDMCRITSECT cs; /**< Critical section - what is it protecting? */
1086 PDMCRITSECT csRx; /**< RX Critical section. */
1087#ifdef E1K_WITH_TX_CS
1088 PDMCRITSECT csTx; /**< TX Critical section. */
1089#endif /* E1K_WITH_TX_CS */
1090 /** Base address of memory-mapped registers. */
1091 RTGCPHYS addrMMReg;
1092 /** MAC address obtained from the configuration. */
1093 RTMAC macConfigured;
1094 /** Base port of I/O space region. */
1095 RTIOPORT IOPortBase;
1096 /** EMT: */
1097 PDMPCIDEV pciDevice;
1098 /** EMT: Last time the interrupt was acknowledged. */
1099 uint64_t u64AckedAt;
1100 /** All: Used for eliminating spurious interrupts. */
1101 bool fIntRaised;
1102 /** EMT: false if the cable is disconnected by the GUI. */
1103 bool fCableConnected;
1104 /** EMT: */
1105 bool fR0Enabled;
1106 /** EMT: */
1107 bool fRCEnabled;
1108 /** EMT: Compute Ethernet CRC for RX packets. */
1109 bool fEthernetCRC;
1110 /** All: throttle interrupts. */
1111 bool fItrEnabled;
1112 /** All: throttle RX interrupts. */
1113 bool fItrRxEnabled;
1114 /** All: Delay TX interrupts using TIDV/TADV. */
1115 bool fTidEnabled;
1116 /** Link up delay (in milliseconds). */
1117 uint32_t cMsLinkUpDelay;
1118
1119 /** All: Device register storage. */
1120 uint32_t auRegs[E1K_NUM_OF_32BIT_REGS];
1121 /** TX/RX: Status LED. */
1122 PDMLED led;
1123 /** TX/RX: Number of packet being sent/received to show in debug log. */
1124 uint32_t u32PktNo;
1125
1126 /** EMT: Offset of the register to be read via IO. */
1127 uint32_t uSelectedReg;
1128 /** EMT: Multicast Table Array. */
1129 uint32_t auMTA[128];
1130 /** EMT: Receive Address registers. */
1131 E1KRA aRecAddr;
1132 /** EMT: VLAN filter table array. */
1133 uint32_t auVFTA[128];
1134 /** EMT: Receive buffer size. */
1135 uint16_t u16RxBSize;
1136 /** EMT: Locked state -- no state alteration possible. */
1137 bool fLocked;
1138 /** EMT: */
1139 bool fDelayInts;
1140 /** All: */
1141 bool fIntMaskUsed;
1142
1143 /** N/A: */
1144 bool volatile fMaybeOutOfSpace;
1145 /** EMT: Gets signalled when more RX descriptors become available. */
1146 RTSEMEVENT hEventMoreRxDescAvail;
1147#ifdef E1K_WITH_RXD_CACHE
1148 /** RX: Fetched RX descriptors. */
1149 E1KRXDESC aRxDescriptors[E1K_RXD_CACHE_SIZE];
1150 //uint64_t aRxDescAddr[E1K_RXD_CACHE_SIZE];
1151 /** RX: Actual number of fetched RX descriptors. */
1152 uint32_t nRxDFetched;
1153 /** RX: Index in cache of RX descriptor being processed. */
1154 uint32_t iRxDCurrent;
1155#endif /* E1K_WITH_RXD_CACHE */
1156
1157 /** TX: Context used for TCP segmentation packets. */
1158 E1KTXCTX contextTSE;
1159 /** TX: Context used for ordinary packets. */
1160 E1KTXCTX contextNormal;
1161#ifdef E1K_WITH_TXD_CACHE
1162 /** TX: Fetched TX descriptors. */
1163 E1KTXDESC aTxDescriptors[E1K_TXD_CACHE_SIZE];
1164 /** TX: Actual number of fetched TX descriptors. */
1165 uint8_t nTxDFetched;
1166 /** TX: Index in cache of TX descriptor being processed. */
1167 uint8_t iTxDCurrent;
1168 /** TX: Will this frame be sent as GSO. */
1169 bool fGSO;
1170 /** Alignment padding. */
1171 bool fReserved;
1172 /** TX: Number of bytes in next packet. */
1173 uint32_t cbTxAlloc;
1174
1175#endif /* E1K_WITH_TXD_CACHE */
1176 /** GSO context. u8Type is set to PDMNETWORKGSOTYPE_INVALID when not
1177 * applicable to the current TSE mode. */
1178 PDMNETWORKGSO GsoCtx;
1179 /** Scratch space for holding the loopback / fallback scatter / gather
1180 * descriptor. */
1181 union
1182 {
1183 PDMSCATTERGATHER Sg;
1184 uint8_t padding[8 * sizeof(RTUINTPTR)];
1185 } uTxFallback;
1186 /** TX: Transmit packet buffer use for TSE fallback and loopback. */
1187 uint8_t aTxPacketFallback[E1K_MAX_TX_PKT_SIZE];
1188 /** TX: Number of bytes assembled in TX packet buffer. */
1189 uint16_t u16TxPktLen;
1190 /** TX: False will force segmentation in e1000 instead of sending frames as GSO. */
1191 bool fGSOEnabled;
1192 /** TX: IP checksum has to be inserted if true. */
1193 bool fIPcsum;
1194 /** TX: TCP/UDP checksum has to be inserted if true. */
1195 bool fTCPcsum;
1196 /** TX: VLAN tag has to be inserted if true. */
1197 bool fVTag;
1198 /** TX: TCI part of VLAN tag to be inserted. */
1199 uint16_t u16VTagTCI;
1200 /** TX TSE fallback: Number of payload bytes remaining in TSE context. */
1201 uint32_t u32PayRemain;
1202 /** TX TSE fallback: Number of header bytes remaining in TSE context. */
1203 uint16_t u16HdrRemain;
1204 /** TX TSE fallback: Flags from template header. */
1205 uint16_t u16SavedFlags;
1206 /** TX TSE fallback: Partial checksum from template header. */
1207 uint32_t u32SavedCsum;
1208 /** ?: Emulated controller type. */
1209 E1KCHIP eChip;
1210
1211 /** EMT: EEPROM emulation */
1212 E1kEEPROM eeprom;
1213 /** EMT: Physical interface emulation. */
1214 PHY phy;
1215
1216#if 0
1217 /** Alignment padding. */
1218 uint8_t Alignment[HC_ARCH_BITS == 64 ? 8 : 4];
1219#endif
1220
1221 STAMCOUNTER StatReceiveBytes;
1222 STAMCOUNTER StatTransmitBytes;
1223#if defined(VBOX_WITH_STATISTICS)
1224 STAMPROFILEADV StatMMIOReadRZ;
1225 STAMPROFILEADV StatMMIOReadR3;
1226 STAMPROFILEADV StatMMIOWriteRZ;
1227 STAMPROFILEADV StatMMIOWriteR3;
1228 STAMPROFILEADV StatEEPROMRead;
1229 STAMPROFILEADV StatEEPROMWrite;
1230 STAMPROFILEADV StatIOReadRZ;
1231 STAMPROFILEADV StatIOReadR3;
1232 STAMPROFILEADV StatIOWriteRZ;
1233 STAMPROFILEADV StatIOWriteR3;
1234 STAMPROFILEADV StatLateIntTimer;
1235 STAMCOUNTER StatLateInts;
1236 STAMCOUNTER StatIntsRaised;
1237 STAMCOUNTER StatIntsPrevented;
1238 STAMPROFILEADV StatReceive;
1239 STAMPROFILEADV StatReceiveCRC;
1240 STAMPROFILEADV StatReceiveFilter;
1241 STAMPROFILEADV StatReceiveStore;
1242 STAMPROFILEADV StatTransmitRZ;
1243 STAMPROFILEADV StatTransmitR3;
1244 STAMPROFILE StatTransmitSendRZ;
1245 STAMPROFILE StatTransmitSendR3;
1246 STAMPROFILE StatRxOverflow;
1247 STAMCOUNTER StatRxOverflowWakeup;
1248 STAMCOUNTER StatTxDescCtxNormal;
1249 STAMCOUNTER StatTxDescCtxTSE;
1250 STAMCOUNTER StatTxDescLegacy;
1251 STAMCOUNTER StatTxDescData;
1252 STAMCOUNTER StatTxDescTSEData;
1253 STAMCOUNTER StatTxPathFallback;
1254 STAMCOUNTER StatTxPathGSO;
1255 STAMCOUNTER StatTxPathRegular;
1256 STAMCOUNTER StatPHYAccesses;
1257 STAMCOUNTER aStatRegWrites[E1K_NUM_OF_REGS];
1258 STAMCOUNTER aStatRegReads[E1K_NUM_OF_REGS];
1259#endif /* VBOX_WITH_STATISTICS */
1260
1261#ifdef E1K_INT_STATS
1262 /* Internal stats */
1263 uint64_t u64ArmedAt;
1264 uint64_t uStatMaxTxDelay;
1265 uint32_t uStatInt;
1266 uint32_t uStatIntTry;
1267 uint32_t uStatIntLower;
1268 uint32_t uStatNoIntICR;
1269 int32_t iStatIntLost;
1270 int32_t iStatIntLostOne;
1271 uint32_t uStatIntIMS;
1272 uint32_t uStatIntSkip;
1273 uint32_t uStatIntLate;
1274 uint32_t uStatIntMasked;
1275 uint32_t uStatIntEarly;
1276 uint32_t uStatIntRx;
1277 uint32_t uStatIntTx;
1278 uint32_t uStatIntICS;
1279 uint32_t uStatIntRDTR;
1280 uint32_t uStatIntRXDMT0;
1281 uint32_t uStatIntTXQE;
1282 uint32_t uStatTxNoRS;
1283 uint32_t uStatTxIDE;
1284 uint32_t uStatTxDelayed;
1285 uint32_t uStatTxDelayExp;
1286 uint32_t uStatTAD;
1287 uint32_t uStatTID;
1288 uint32_t uStatRAD;
1289 uint32_t uStatRID;
1290 uint32_t uStatRxFrm;
1291 uint32_t uStatTxFrm;
1292 uint32_t uStatDescCtx;
1293 uint32_t uStatDescDat;
1294 uint32_t uStatDescLeg;
1295 uint32_t uStatTx1514;
1296 uint32_t uStatTx2962;
1297 uint32_t uStatTx4410;
1298 uint32_t uStatTx5858;
1299 uint32_t uStatTx7306;
1300 uint32_t uStatTx8754;
1301 uint32_t uStatTx16384;
1302 uint32_t uStatTx32768;
1303 uint32_t uStatTxLarge;
1304 uint32_t uStatAlign;
1305#endif /* E1K_INT_STATS */
1306};
1307typedef struct E1kState_st E1KSTATE;
1308/** Pointer to the E1000 device state. */
1309typedef E1KSTATE *PE1KSTATE;
1310
1311#ifndef VBOX_DEVICE_STRUCT_TESTCASE
1312
1313/* Forward declarations ******************************************************/
1314static int e1kXmitPending(PE1KSTATE pThis, bool fOnWorkerThread);
1315
1316static int e1kRegReadUnimplemented (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1317static int e1kRegWriteUnimplemented(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1318static int e1kRegReadAutoClear (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1319static int e1kRegReadDefault (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1320static int e1kRegWriteDefault (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1321#if 0 /* unused */
1322static int e1kRegReadCTRL (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1323#endif
1324static int e1kRegWriteCTRL (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1325static int e1kRegReadEECD (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1326static int e1kRegWriteEECD (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1327static int e1kRegWriteEERD (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1328static int e1kRegWriteMDIC (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1329static int e1kRegReadICR (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1330static int e1kRegWriteICR (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1331static int e1kRegWriteICS (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1332static int e1kRegWriteIMS (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1333static int e1kRegWriteIMC (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1334static int e1kRegWriteRCTL (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1335static int e1kRegWritePBA (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1336static int e1kRegWriteRDT (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1337static int e1kRegWriteRDTR (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1338static int e1kRegWriteTDT (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1339static int e1kRegReadMTA (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1340static int e1kRegWriteMTA (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1341static int e1kRegReadRA (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1342static int e1kRegWriteRA (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1343static int e1kRegReadVFTA (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1344static int e1kRegWriteVFTA (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1345
1346/**
1347 * Register map table.
1348 *
1349 * Override pfnRead and pfnWrite to get register-specific behavior.
1350 */
1351static const struct E1kRegMap_st
1352{
1353 /** Register offset in the register space. */
1354 uint32_t offset;
1355 /** Size in bytes. Registers of size > 4 are in fact tables. */
1356 uint32_t size;
1357 /** Readable bits. */
1358 uint32_t readable;
1359 /** Writable bits. */
1360 uint32_t writable;
1361 /** Read callback. */
1362 int (*pfnRead)(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1363 /** Write callback. */
1364 int (*pfnWrite)(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1365 /** Abbreviated name. */
1366 const char *abbrev;
1367 /** Full name. */
1368 const char *name;
1369} g_aE1kRegMap[E1K_NUM_OF_REGS] =
1370{
1371 /* offset size read mask write mask read callback write callback abbrev full name */
1372 /*------- ------- ---------- ---------- ----------------------- ------------------------ ---------- ------------------------------*/
1373 { 0x00000, 0x00004, 0xDBF31BE9, 0xDBF31BE9, e1kRegReadDefault , e1kRegWriteCTRL , "CTRL" , "Device Control" },
1374 { 0x00008, 0x00004, 0x0000FDFF, 0x00000000, e1kRegReadDefault , e1kRegWriteUnimplemented, "STATUS" , "Device Status" },
1375 { 0x00010, 0x00004, 0x000027F0, 0x00000070, e1kRegReadEECD , e1kRegWriteEECD , "EECD" , "EEPROM/Flash Control/Data" },
1376 { 0x00014, 0x00004, 0xFFFFFF10, 0xFFFFFF00, e1kRegReadDefault , e1kRegWriteEERD , "EERD" , "EEPROM Read" },
1377 { 0x00018, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "CTRL_EXT", "Extended Device Control" },
1378 { 0x0001c, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FLA" , "Flash Access (N/A)" },
1379 { 0x00020, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteMDIC , "MDIC" , "MDI Control" },
1380 { 0x00028, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCAL" , "Flow Control Address Low" },
1381 { 0x0002c, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCAH" , "Flow Control Address High" },
1382 { 0x00030, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCT" , "Flow Control Type" },
1383 { 0x00038, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "VET" , "VLAN EtherType" },
1384 { 0x000c0, 0x00004, 0x0001F6DF, 0x0001F6DF, e1kRegReadICR , e1kRegWriteICR , "ICR" , "Interrupt Cause Read" },
1385 { 0x000c4, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "ITR" , "Interrupt Throttling" },
1386 { 0x000c8, 0x00004, 0x00000000, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteICS , "ICS" , "Interrupt Cause Set" },
1387 { 0x000d0, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteIMS , "IMS" , "Interrupt Mask Set/Read" },
1388 { 0x000d8, 0x00004, 0x00000000, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteIMC , "IMC" , "Interrupt Mask Clear" },
1389 { 0x00100, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteRCTL , "RCTL" , "Receive Control" },
1390 { 0x00170, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCTTV" , "Flow Control Transmit Timer Value" },
1391 { 0x00178, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TXCW" , "Transmit Configuration Word (N/A)" },
1392 { 0x00180, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RXCW" , "Receive Configuration Word (N/A)" },
1393 { 0x00400, 0x00004, 0x017FFFFA, 0x017FFFFA, e1kRegReadDefault , e1kRegWriteDefault , "TCTL" , "Transmit Control" },
1394 { 0x00410, 0x00004, 0x3FFFFFFF, 0x3FFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TIPG" , "Transmit IPG" },
1395 { 0x00458, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "AIFS" , "Adaptive IFS Throttle - AIT" },
1396 { 0x00e00, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "LEDCTL" , "LED Control" },
1397 { 0x01000, 0x00004, 0xFFFF007F, 0x0000007F, e1kRegReadDefault , e1kRegWritePBA , "PBA" , "Packet Buffer Allocation" },
1398 { 0x02160, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCRTL" , "Flow Control Receive Threshold Low" },
1399 { 0x02168, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCRTH" , "Flow Control Receive Threshold High" },
1400 { 0x02410, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFH" , "Receive Data FIFO Head" },
1401 { 0x02418, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFT" , "Receive Data FIFO Tail" },
1402 { 0x02420, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFHS" , "Receive Data FIFO Head Saved Register" },
1403 { 0x02428, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFTS" , "Receive Data FIFO Tail Saved Register" },
1404 { 0x02430, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFPC" , "Receive Data FIFO Packet Count" },
1405 { 0x02800, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDBAL" , "Receive Descriptor Base Low" },
1406 { 0x02804, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDBAH" , "Receive Descriptor Base High" },
1407 { 0x02808, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDLEN" , "Receive Descriptor Length" },
1408 { 0x02810, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDH" , "Receive Descriptor Head" },
1409 { 0x02818, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteRDT , "RDT" , "Receive Descriptor Tail" },
1410 { 0x02820, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteRDTR , "RDTR" , "Receive Delay Timer" },
1411 { 0x02828, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RXDCTL" , "Receive Descriptor Control" },
1412 { 0x0282c, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "RADV" , "Receive Interrupt Absolute Delay Timer" },
1413 { 0x02c00, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RSRPD" , "Receive Small Packet Detect Interrupt" },
1414 { 0x03000, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TXDMAC" , "TX DMA Control (N/A)" },
1415 { 0x03410, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFH" , "Transmit Data FIFO Head" },
1416 { 0x03418, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFT" , "Transmit Data FIFO Tail" },
1417 { 0x03420, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFHS" , "Transmit Data FIFO Head Saved Register" },
1418 { 0x03428, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFTS" , "Transmit Data FIFO Tail Saved Register" },
1419 { 0x03430, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFPC" , "Transmit Data FIFO Packet Count" },
1420 { 0x03800, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDBAL" , "Transmit Descriptor Base Low" },
1421 { 0x03804, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDBAH" , "Transmit Descriptor Base High" },
1422 { 0x03808, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDLEN" , "Transmit Descriptor Length" },
1423 { 0x03810, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDH" , "Transmit Descriptor Head" },
1424 { 0x03818, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteTDT , "TDT" , "Transmit Descriptor Tail" },
1425 { 0x03820, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "TIDV" , "Transmit Interrupt Delay Value" },
1426 { 0x03828, 0x00004, 0xFF3F3F3F, 0xFF3F3F3F, e1kRegReadDefault , e1kRegWriteDefault , "TXDCTL" , "Transmit Descriptor Control" },
1427 { 0x0382c, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "TADV" , "Transmit Absolute Interrupt Delay Timer" },
1428 { 0x03830, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TSPMT" , "TCP Segmentation Pad and Threshold" },
1429 { 0x04000, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "CRCERRS" , "CRC Error Count" },
1430 { 0x04004, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "ALGNERRC", "Alignment Error Count" },
1431 { 0x04008, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "SYMERRS" , "Symbol Error Count" },
1432 { 0x0400c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RXERRC" , "RX Error Count" },
1433 { 0x04010, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MPC" , "Missed Packets Count" },
1434 { 0x04014, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "SCC" , "Single Collision Count" },
1435 { 0x04018, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "ECOL" , "Excessive Collisions Count" },
1436 { 0x0401c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MCC" , "Multiple Collision Count" },
1437 { 0x04020, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "LATECOL" , "Late Collisions Count" },
1438 { 0x04028, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "COLC" , "Collision Count" },
1439 { 0x04030, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "DC" , "Defer Count" },
1440 { 0x04034, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TNCRS" , "Transmit - No CRS" },
1441 { 0x04038, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "SEC" , "Sequence Error Count" },
1442 { 0x0403c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "CEXTERR" , "Carrier Extension Error Count" },
1443 { 0x04040, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RLEC" , "Receive Length Error Count" },
1444 { 0x04048, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XONRXC" , "XON Received Count" },
1445 { 0x0404c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XONTXC" , "XON Transmitted Count" },
1446 { 0x04050, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XOFFRXC" , "XOFF Received Count" },
1447 { 0x04054, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XOFFTXC" , "XOFF Transmitted Count" },
1448 { 0x04058, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCRUC" , "FC Received Unsupported Count" },
1449 { 0x0405c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC64" , "Packets Received (64 Bytes) Count" },
1450 { 0x04060, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC127" , "Packets Received (65-127 Bytes) Count" },
1451 { 0x04064, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC255" , "Packets Received (128-255 Bytes) Count" },
1452 { 0x04068, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC511" , "Packets Received (256-511 Bytes) Count" },
1453 { 0x0406c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC1023" , "Packets Received (512-1023 Bytes) Count" },
1454 { 0x04070, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC1522" , "Packets Received (1024-Max Bytes)" },
1455 { 0x04074, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GPRC" , "Good Packets Received Count" },
1456 { 0x04078, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "BPRC" , "Broadcast Packets Received Count" },
1457 { 0x0407c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "MPRC" , "Multicast Packets Received Count" },
1458 { 0x04080, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GPTC" , "Good Packets Transmitted Count" },
1459 { 0x04088, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GORCL" , "Good Octets Received Count (Low)" },
1460 { 0x0408c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GORCH" , "Good Octets Received Count (Hi)" },
1461 { 0x04090, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GOTCL" , "Good Octets Transmitted Count (Low)" },
1462 { 0x04094, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GOTCH" , "Good Octets Transmitted Count (Hi)" },
1463 { 0x040a0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RNBC" , "Receive No Buffers Count" },
1464 { 0x040a4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RUC" , "Receive Undersize Count" },
1465 { 0x040a8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RFC" , "Receive Fragment Count" },
1466 { 0x040ac, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "ROC" , "Receive Oversize Count" },
1467 { 0x040b0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RJC" , "Receive Jabber Count" },
1468 { 0x040b4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MGTPRC" , "Management Packets Received Count" },
1469 { 0x040b8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MGTPDC" , "Management Packets Dropped Count" },
1470 { 0x040bc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MGTPTC" , "Management Pkts Transmitted Count" },
1471 { 0x040c0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TORL" , "Total Octets Received (Lo)" },
1472 { 0x040c4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TORH" , "Total Octets Received (Hi)" },
1473 { 0x040c8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TOTL" , "Total Octets Transmitted (Lo)" },
1474 { 0x040cc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TOTH" , "Total Octets Transmitted (Hi)" },
1475 { 0x040d0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TPR" , "Total Packets Received" },
1476 { 0x040d4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TPT" , "Total Packets Transmitted" },
1477 { 0x040d8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC64" , "Packets Transmitted (64 Bytes) Count" },
1478 { 0x040dc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC127" , "Packets Transmitted (65-127 Bytes) Count" },
1479 { 0x040e0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC255" , "Packets Transmitted (128-255 Bytes) Count" },
1480 { 0x040e4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC511" , "Packets Transmitted (256-511 Bytes) Count" },
1481 { 0x040e8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC1023" , "Packets Transmitted (512-1023 Bytes) Count" },
1482 { 0x040ec, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC1522" , "Packets Transmitted (1024 Bytes or Greater) Count" },
1483 { 0x040f0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "MPTC" , "Multicast Packets Transmitted Count" },
1484 { 0x040f4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "BPTC" , "Broadcast Packets Transmitted Count" },
1485 { 0x040f8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TSCTC" , "TCP Segmentation Context Transmitted Count" },
1486 { 0x040fc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TSCTFC" , "TCP Segmentation Context Tx Fail Count" },
1487 { 0x05000, 0x00004, 0x000007FF, 0x000007FF, e1kRegReadDefault , e1kRegWriteDefault , "RXCSUM" , "Receive Checksum Control" },
1488 { 0x05800, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUC" , "Wakeup Control" },
1489 { 0x05808, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUFC" , "Wakeup Filter Control" },
1490 { 0x05810, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUS" , "Wakeup Status" },
1491 { 0x05820, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "MANC" , "Management Control" },
1492 { 0x05838, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "IPAV" , "IP Address Valid" },
1493 { 0x05900, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUPL" , "Wakeup Packet Length" },
1494 { 0x05200, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadMTA , e1kRegWriteMTA , "MTA" , "Multicast Table Array (n)" },
1495 { 0x05400, 0x00080, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadRA , e1kRegWriteRA , "RA" , "Receive Address (64-bit) (n)" },
1496 { 0x05600, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadVFTA , e1kRegWriteVFTA , "VFTA" , "VLAN Filter Table Array (n)" },
1497 { 0x05840, 0x0001c, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "IP4AT" , "IPv4 Address Table" },
1498 { 0x05880, 0x00010, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "IP6AT" , "IPv6 Address Table" },
1499 { 0x05a00, 0x00080, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUPM" , "Wakeup Packet Memory" },
1500 { 0x05f00, 0x0001c, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FFLT" , "Flexible Filter Length Table" },
1501 { 0x09000, 0x003fc, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FFMT" , "Flexible Filter Mask Table" },
1502 { 0x09800, 0x003fc, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FFVT" , "Flexible Filter Value Table" },
1503 { 0x10000, 0x10000, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "PBM" , "Packet Buffer Memory (n)" },
1504 { 0x00040, 0x00080, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadRA , e1kRegWriteRA , "RA82542" , "Receive Address (64-bit) (n) (82542)" },
1505 { 0x00200, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadMTA , e1kRegWriteMTA , "MTA82542", "Multicast Table Array (n) (82542)" },
1506 { 0x00600, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadVFTA , e1kRegWriteVFTA , "VFTA82542", "VLAN Filter Table Array (n) (82542)" }
1507};
1508
1509#ifdef LOG_ENABLED
1510
1511/**
1512 * Convert U32 value to hex string. Masked bytes are replaced with dots.
1513 *
1514 * @remarks The mask has byte (not bit) granularity (e.g. 000000FF).
1515 *
1516 * @returns The buffer.
1517 *
1518 * @param u32 The word to convert into string.
1519 * @param mask Selects which bytes to convert.
1520 * @param buf Where to put the result.
1521 */
1522static char *e1kU32toHex(uint32_t u32, uint32_t mask, char *buf)
1523{
1524 for (char *ptr = buf + 7; ptr >= buf; --ptr, u32 >>=4, mask >>=4)
1525 {
1526 if (mask & 0xF)
1527 *ptr = (u32 & 0xF) + ((u32 & 0xF) > 9 ? '7' : '0');
1528 else
1529 *ptr = '.';
1530 }
1531 buf[8] = 0;
1532 return buf;
1533}
1534
1535/**
1536 * Returns timer name for debug purposes.
1537 *
1538 * @returns The timer name.
1539 *
1540 * @param pThis The device state structure.
1541 * @param pTimer The timer to get the name for.
1542 */
1543DECLINLINE(const char *) e1kGetTimerName(PE1KSTATE pThis, PTMTIMER pTimer)
1544{
1545 if (pTimer == pThis->CTX_SUFF(pTIDTimer))
1546 return "TID";
1547 if (pTimer == pThis->CTX_SUFF(pTADTimer))
1548 return "TAD";
1549 if (pTimer == pThis->CTX_SUFF(pRIDTimer))
1550 return "RID";
1551 if (pTimer == pThis->CTX_SUFF(pRADTimer))
1552 return "RAD";
1553 if (pTimer == pThis->CTX_SUFF(pIntTimer))
1554 return "Int";
1555 if (pTimer == pThis->CTX_SUFF(pTXDTimer))
1556 return "TXD";
1557 if (pTimer == pThis->CTX_SUFF(pLUTimer))
1558 return "LinkUp";
1559 return "unknown";
1560}
1561
1562#endif /* DEBUG */
1563
1564/**
1565 * Arm a timer.
1566 *
1567 * @param pThis Pointer to the device state structure.
1568 * @param pTimer Pointer to the timer.
1569 * @param uExpireIn Expiration interval in microseconds.
1570 */
1571DECLINLINE(void) e1kArmTimer(PE1KSTATE pThis, PTMTIMER pTimer, uint32_t uExpireIn)
1572{
1573 if (pThis->fLocked)
1574 return;
1575
1576 E1kLog2(("%s Arming %s timer to fire in %d usec...\n",
1577 pThis->szPrf, e1kGetTimerName(pThis, pTimer), uExpireIn));
1578 TMTimerSetMicro(pTimer, uExpireIn);
1579}
1580
1581#ifdef IN_RING3
1582/**
1583 * Cancel a timer.
1584 *
1585 * @param pThis Pointer to the device state structure.
1586 * @param pTimer Pointer to the timer.
1587 */
1588DECLINLINE(void) e1kCancelTimer(PE1KSTATE pThis, PTMTIMER pTimer)
1589{
1590 E1kLog2(("%s Stopping %s timer...\n",
1591 pThis->szPrf, e1kGetTimerName(pThis, pTimer)));
1592 int rc = TMTimerStop(pTimer);
1593 if (RT_FAILURE(rc))
1594 E1kLog2(("%s e1kCancelTimer: TMTimerStop() failed with %Rrc\n",
1595 pThis->szPrf, rc));
1596 RT_NOREF1(pThis);
1597}
1598#endif /* IN_RING3 */
1599
1600#define e1kCsEnter(ps, rc) PDMCritSectEnter(&ps->cs, rc)
1601#define e1kCsLeave(ps) PDMCritSectLeave(&ps->cs)
1602
1603#define e1kCsRxEnter(ps, rc) PDMCritSectEnter(&ps->csRx, rc)
1604#define e1kCsRxLeave(ps) PDMCritSectLeave(&ps->csRx)
1605#define e1kCsRxIsOwner(ps) PDMCritSectIsOwner(&ps->csRx)
1606
1607#ifndef E1K_WITH_TX_CS
1608# define e1kCsTxEnter(ps, rc) VINF_SUCCESS
1609# define e1kCsTxLeave(ps) do { } while (0)
1610#else /* E1K_WITH_TX_CS */
1611# define e1kCsTxEnter(ps, rc) PDMCritSectEnter(&ps->csTx, rc)
1612# define e1kCsTxLeave(ps) PDMCritSectLeave(&ps->csTx)
1613#endif /* E1K_WITH_TX_CS */
1614
1615#ifdef IN_RING3
1616
1617/**
1618 * Wakeup the RX thread.
1619 */
1620static void e1kWakeupReceive(PPDMDEVINS pDevIns)
1621{
1622 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
1623 if ( pThis->fMaybeOutOfSpace
1624 && pThis->hEventMoreRxDescAvail != NIL_RTSEMEVENT)
1625 {
1626 STAM_COUNTER_INC(&pThis->StatRxOverflowWakeup);
1627 E1kLog(("%s Waking up Out-of-RX-space semaphore\n", pThis->szPrf));
1628 RTSemEventSignal(pThis->hEventMoreRxDescAvail);
1629 }
1630}
1631
1632/**
1633 * Hardware reset. Revert all registers to initial values.
1634 *
1635 * @param pThis The device state structure.
1636 */
1637static void e1kHardReset(PE1KSTATE pThis)
1638{
1639 E1kLog(("%s Hard reset triggered\n", pThis->szPrf));
1640 memset(pThis->auRegs, 0, sizeof(pThis->auRegs));
1641 memset(pThis->aRecAddr.au32, 0, sizeof(pThis->aRecAddr.au32));
1642#ifdef E1K_INIT_RA0
1643 memcpy(pThis->aRecAddr.au32, pThis->macConfigured.au8,
1644 sizeof(pThis->macConfigured.au8));
1645 pThis->aRecAddr.array[0].ctl |= RA_CTL_AV;
1646#endif /* E1K_INIT_RA0 */
1647 STATUS = 0x0081; /* SPEED=10b (1000 Mb/s), FD=1b (Full Duplex) */
1648 EECD = 0x0100; /* EE_PRES=1b (EEPROM present) */
1649 CTRL = 0x0a09; /* FRCSPD=1b SPEED=10b LRST=1b FD=1b */
1650 TSPMT = 0x01000400;/* TSMT=0400h TSPBP=0100h */
1651 Assert(GET_BITS(RCTL, BSIZE) == 0);
1652 pThis->u16RxBSize = 2048;
1653
1654 /* Reset promiscuous mode */
1655 if (pThis->pDrvR3)
1656 pThis->pDrvR3->pfnSetPromiscuousMode(pThis->pDrvR3, false);
1657
1658#ifdef E1K_WITH_TXD_CACHE
1659 int rc = e1kCsTxEnter(pThis, VERR_SEM_BUSY);
1660 if (RT_LIKELY(rc == VINF_SUCCESS))
1661 {
1662 pThis->nTxDFetched = 0;
1663 pThis->iTxDCurrent = 0;
1664 pThis->fGSO = false;
1665 pThis->cbTxAlloc = 0;
1666 e1kCsTxLeave(pThis);
1667 }
1668#endif /* E1K_WITH_TXD_CACHE */
1669#ifdef E1K_WITH_RXD_CACHE
1670 if (RT_LIKELY(e1kCsRxEnter(pThis, VERR_SEM_BUSY) == VINF_SUCCESS))
1671 {
1672 pThis->iRxDCurrent = pThis->nRxDFetched = 0;
1673 e1kCsRxLeave(pThis);
1674 }
1675#endif /* E1K_WITH_RXD_CACHE */
1676#ifdef E1K_LSC_ON_RESET
1677 E1kLog(("%s Will trigger LSC in %d seconds...\n",
1678 pThis->szPrf, pThis->cMsLinkUpDelay / 1000));
1679 e1kArmTimer(pThis, pThis->CTX_SUFF(pLUTimer), pThis->cMsLinkUpDelay * 1000);
1680#endif /* E1K_LSC_ON_RESET */
1681}
1682
1683#endif /* IN_RING3 */
1684
1685/**
1686 * Compute Internet checksum.
1687 *
1688 * @remarks Refer to http://www.netfor2.com/checksum.html for short intro.
1689 *
1690 * @param pThis The device state structure.
1691 * @param cpPacket The packet.
1692 * @param cb The size of the packet.
1693 * @param pszText A string denoting direction of packet transfer.
1694 *
1695 * @return The 1's complement of the 1's complement sum.
1696 *
1697 * @thread E1000_TX
1698 */
1699static uint16_t e1kCSum16(const void *pvBuf, size_t cb)
1700{
1701 uint32_t csum = 0;
1702 uint16_t *pu16 = (uint16_t *)pvBuf;
1703
1704 while (cb > 1)
1705 {
1706 csum += *pu16++;
1707 cb -= 2;
1708 }
1709 if (cb)
1710 csum += *(uint8_t*)pu16;
1711 while (csum >> 16)
1712 csum = (csum >> 16) + (csum & 0xFFFF);
1713 return ~csum;
1714}
1715
1716/**
1717 * Dump a packet to debug log.
1718 *
1719 * @param pThis The device state structure.
1720 * @param cpPacket The packet.
1721 * @param cb The size of the packet.
1722 * @param pszText A string denoting direction of packet transfer.
1723 * @thread E1000_TX
1724 */
1725DECLINLINE(void) e1kPacketDump(PE1KSTATE pThis, const uint8_t *cpPacket, size_t cb, const char *pszText)
1726{
1727#ifdef DEBUG
1728 if (RT_LIKELY(e1kCsEnter(pThis, VERR_SEM_BUSY) == VINF_SUCCESS))
1729 {
1730 Log4(("%s --- %s packet #%d: %RTmac => %RTmac (%d bytes) ---\n",
1731 pThis->szPrf, pszText, ++pThis->u32PktNo, cpPacket+6, cpPacket, cb));
1732 if (ntohs(*(uint16_t*)(cpPacket+12)) == 0x86DD)
1733 {
1734 Log4(("%s --- IPv6: %RTnaipv6 => %RTnaipv6\n",
1735 pThis->szPrf, cpPacket+14+8, cpPacket+14+24));
1736 if (*(cpPacket+14+6) == 0x6)
1737 Log4(("%s --- TCP: seq=%x ack=%x\n", pThis->szPrf,
1738 ntohl(*(uint32_t*)(cpPacket+14+40+4)), ntohl(*(uint32_t*)(cpPacket+14+40+8))));
1739 }
1740 else if (ntohs(*(uint16_t*)(cpPacket+12)) == 0x800)
1741 {
1742 Log4(("%s --- IPv4: %RTnaipv4 => %RTnaipv4\n",
1743 pThis->szPrf, *(uint32_t*)(cpPacket+14+12), *(uint32_t*)(cpPacket+14+16)));
1744 if (*(cpPacket+14+6) == 0x6)
1745 Log4(("%s --- TCP: seq=%x ack=%x\n", pThis->szPrf,
1746 ntohl(*(uint32_t*)(cpPacket+14+20+4)), ntohl(*(uint32_t*)(cpPacket+14+20+8))));
1747 }
1748 E1kLog3(("%.*Rhxd\n", cb, cpPacket));
1749 e1kCsLeave(pThis);
1750 }
1751#else
1752 if (RT_LIKELY(e1kCsEnter(pThis, VERR_SEM_BUSY) == VINF_SUCCESS))
1753 {
1754 if (ntohs(*(uint16_t*)(cpPacket+12)) == 0x86DD)
1755 E1kLogRel(("E1000: %s packet #%d, %RTmac => %RTmac, %RTnaipv6 => %RTnaipv6, seq=%x ack=%x\n",
1756 pszText, ++pThis->u32PktNo, cpPacket+6, cpPacket, cpPacket+14+8, cpPacket+14+24,
1757 ntohl(*(uint32_t*)(cpPacket+14+40+4)), ntohl(*(uint32_t*)(cpPacket+14+40+8))));
1758 else
1759 E1kLogRel(("E1000: %s packet #%d, %RTmac => %RTmac, %RTnaipv4 => %RTnaipv4, seq=%x ack=%x\n",
1760 pszText, ++pThis->u32PktNo, cpPacket+6, cpPacket,
1761 *(uint32_t*)(cpPacket+14+12), *(uint32_t*)(cpPacket+14+16),
1762 ntohl(*(uint32_t*)(cpPacket+14+20+4)), ntohl(*(uint32_t*)(cpPacket+14+20+8))));
1763 e1kCsLeave(pThis);
1764 }
1765 RT_NOREF2(cb, pszText);
1766#endif
1767}
1768
1769/**
1770 * Determine the type of transmit descriptor.
1771 *
1772 * @returns Descriptor type. See E1K_DTYP_XXX defines.
1773 *
1774 * @param pDesc Pointer to descriptor union.
1775 * @thread E1000_TX
1776 */
1777DECLINLINE(int) e1kGetDescType(E1KTXDESC *pDesc)
1778{
1779 if (pDesc->legacy.cmd.fDEXT)
1780 return pDesc->context.dw2.u4DTYP;
1781 return E1K_DTYP_LEGACY;
1782}
1783
1784
1785#if defined(E1K_WITH_RXD_CACHE) && defined(IN_RING3) /* currently only used in ring-3 due to stack space requirements of the caller */
1786/**
1787 * Dump receive descriptor to debug log.
1788 *
1789 * @param pThis The device state structure.
1790 * @param pDesc Pointer to the descriptor.
1791 * @thread E1000_RX
1792 */
1793static void e1kPrintRDesc(PE1KSTATE pThis, E1KRXDESC *pDesc)
1794{
1795 RT_NOREF2(pThis, pDesc);
1796 E1kLog2(("%s <-- Receive Descriptor (%d bytes):\n", pThis->szPrf, pDesc->u16Length));
1797 E1kLog2((" Address=%16LX Length=%04X Csum=%04X\n",
1798 pDesc->u64BufAddr, pDesc->u16Length, pDesc->u16Checksum));
1799 E1kLog2((" STA: %s %s %s %s %s %s %s ERR: %s %s %s %s SPECIAL: %s VLAN=%03x PRI=%x\n",
1800 pDesc->status.fPIF ? "PIF" : "pif",
1801 pDesc->status.fIPCS ? "IPCS" : "ipcs",
1802 pDesc->status.fTCPCS ? "TCPCS" : "tcpcs",
1803 pDesc->status.fVP ? "VP" : "vp",
1804 pDesc->status.fIXSM ? "IXSM" : "ixsm",
1805 pDesc->status.fEOP ? "EOP" : "eop",
1806 pDesc->status.fDD ? "DD" : "dd",
1807 pDesc->status.fRXE ? "RXE" : "rxe",
1808 pDesc->status.fIPE ? "IPE" : "ipe",
1809 pDesc->status.fTCPE ? "TCPE" : "tcpe",
1810 pDesc->status.fCE ? "CE" : "ce",
1811 E1K_SPEC_CFI(pDesc->status.u16Special) ? "CFI" :"cfi",
1812 E1K_SPEC_VLAN(pDesc->status.u16Special),
1813 E1K_SPEC_PRI(pDesc->status.u16Special)));
1814}
1815#endif /* E1K_WITH_RXD_CACHE && IN_RING3 */
1816
1817/**
1818 * Dump transmit descriptor to debug log.
1819 *
1820 * @param pThis The device state structure.
1821 * @param pDesc Pointer to descriptor union.
1822 * @param pszDir A string denoting direction of descriptor transfer
1823 * @thread E1000_TX
1824 */
1825static void e1kPrintTDesc(PE1KSTATE pThis, E1KTXDESC *pDesc, const char *pszDir,
1826 unsigned uLevel = RTLOGGRPFLAGS_LEVEL_2)
1827{
1828 RT_NOREF4(pThis, pDesc, pszDir, uLevel);
1829
1830 /*
1831 * Unfortunately we cannot use our format handler here, we want R0 logging
1832 * as well.
1833 */
1834 switch (e1kGetDescType(pDesc))
1835 {
1836 case E1K_DTYP_CONTEXT:
1837 E1kLogX(uLevel, ("%s %s Context Transmit Descriptor %s\n",
1838 pThis->szPrf, pszDir, pszDir));
1839 E1kLogX(uLevel, (" IPCSS=%02X IPCSO=%02X IPCSE=%04X TUCSS=%02X TUCSO=%02X TUCSE=%04X\n",
1840 pDesc->context.ip.u8CSS, pDesc->context.ip.u8CSO, pDesc->context.ip.u16CSE,
1841 pDesc->context.tu.u8CSS, pDesc->context.tu.u8CSO, pDesc->context.tu.u16CSE));
1842 E1kLogX(uLevel, (" TUCMD:%s%s%s %s %s PAYLEN=%04x HDRLEN=%04x MSS=%04x STA: %s\n",
1843 pDesc->context.dw2.fIDE ? " IDE":"",
1844 pDesc->context.dw2.fRS ? " RS" :"",
1845 pDesc->context.dw2.fTSE ? " TSE":"",
1846 pDesc->context.dw2.fIP ? "IPv4":"IPv6",
1847 pDesc->context.dw2.fTCP ? "TCP":"UDP",
1848 pDesc->context.dw2.u20PAYLEN,
1849 pDesc->context.dw3.u8HDRLEN,
1850 pDesc->context.dw3.u16MSS,
1851 pDesc->context.dw3.fDD?"DD":""));
1852 break;
1853 case E1K_DTYP_DATA:
1854 E1kLogX(uLevel, ("%s %s Data Transmit Descriptor (%d bytes) %s\n",
1855 pThis->szPrf, pszDir, pDesc->data.cmd.u20DTALEN, pszDir));
1856 E1kLogX(uLevel, (" Address=%16LX DTALEN=%05X\n",
1857 pDesc->data.u64BufAddr,
1858 pDesc->data.cmd.u20DTALEN));
1859 E1kLogX(uLevel, (" DCMD:%s%s%s%s%s%s%s STA:%s%s%s POPTS:%s%s SPECIAL:%s VLAN=%03x PRI=%x\n",
1860 pDesc->data.cmd.fIDE ? " IDE" :"",
1861 pDesc->data.cmd.fVLE ? " VLE" :"",
1862 pDesc->data.cmd.fRPS ? " RPS" :"",
1863 pDesc->data.cmd.fRS ? " RS" :"",
1864 pDesc->data.cmd.fTSE ? " TSE" :"",
1865 pDesc->data.cmd.fIFCS? " IFCS":"",
1866 pDesc->data.cmd.fEOP ? " EOP" :"",
1867 pDesc->data.dw3.fDD ? " DD" :"",
1868 pDesc->data.dw3.fEC ? " EC" :"",
1869 pDesc->data.dw3.fLC ? " LC" :"",
1870 pDesc->data.dw3.fTXSM? " TXSM":"",
1871 pDesc->data.dw3.fIXSM? " IXSM":"",
1872 E1K_SPEC_CFI(pDesc->data.dw3.u16Special) ? "CFI" :"cfi",
1873 E1K_SPEC_VLAN(pDesc->data.dw3.u16Special),
1874 E1K_SPEC_PRI(pDesc->data.dw3.u16Special)));
1875 break;
1876 case E1K_DTYP_LEGACY:
1877 E1kLogX(uLevel, ("%s %s Legacy Transmit Descriptor (%d bytes) %s\n",
1878 pThis->szPrf, pszDir, pDesc->legacy.cmd.u16Length, pszDir));
1879 E1kLogX(uLevel, (" Address=%16LX DTALEN=%05X\n",
1880 pDesc->data.u64BufAddr,
1881 pDesc->legacy.cmd.u16Length));
1882 E1kLogX(uLevel, (" CMD:%s%s%s%s%s%s%s STA:%s%s%s CSO=%02x CSS=%02x SPECIAL:%s VLAN=%03x PRI=%x\n",
1883 pDesc->legacy.cmd.fIDE ? " IDE" :"",
1884 pDesc->legacy.cmd.fVLE ? " VLE" :"",
1885 pDesc->legacy.cmd.fRPS ? " RPS" :"",
1886 pDesc->legacy.cmd.fRS ? " RS" :"",
1887 pDesc->legacy.cmd.fIC ? " IC" :"",
1888 pDesc->legacy.cmd.fIFCS? " IFCS":"",
1889 pDesc->legacy.cmd.fEOP ? " EOP" :"",
1890 pDesc->legacy.dw3.fDD ? " DD" :"",
1891 pDesc->legacy.dw3.fEC ? " EC" :"",
1892 pDesc->legacy.dw3.fLC ? " LC" :"",
1893 pDesc->legacy.cmd.u8CSO,
1894 pDesc->legacy.dw3.u8CSS,
1895 E1K_SPEC_CFI(pDesc->legacy.dw3.u16Special) ? "CFI" :"cfi",
1896 E1K_SPEC_VLAN(pDesc->legacy.dw3.u16Special),
1897 E1K_SPEC_PRI(pDesc->legacy.dw3.u16Special)));
1898 break;
1899 default:
1900 E1kLog(("%s %s Invalid Transmit Descriptor %s\n",
1901 pThis->szPrf, pszDir, pszDir));
1902 break;
1903 }
1904}
1905
1906/**
1907 * Raise an interrupt later.
1908 *
1909 * @param pThis The device state structure.
1910 */
1911inline void e1kPostponeInterrupt(PE1KSTATE pThis, uint64_t uNanoseconds)
1912{
1913 if (!TMTimerIsActive(pThis->CTX_SUFF(pIntTimer)))
1914 TMTimerSetNano(pThis->CTX_SUFF(pIntTimer), uNanoseconds);
1915}
1916
1917/**
1918 * Raise interrupt if not masked.
1919 *
1920 * @param pThis The device state structure.
1921 */
1922static int e1kRaiseInterrupt(PE1KSTATE pThis, int rcBusy, uint32_t u32IntCause = 0)
1923{
1924 int rc = e1kCsEnter(pThis, rcBusy);
1925 if (RT_UNLIKELY(rc != VINF_SUCCESS))
1926 return rc;
1927
1928 E1K_INC_ISTAT_CNT(pThis->uStatIntTry);
1929 ICR |= u32IntCause;
1930 if (ICR & IMS)
1931 {
1932 if (pThis->fIntRaised)
1933 {
1934 E1K_INC_ISTAT_CNT(pThis->uStatIntSkip);
1935 E1kLog2(("%s e1kRaiseInterrupt: Already raised, skipped. ICR&IMS=%08x\n",
1936 pThis->szPrf, ICR & IMS));
1937 }
1938 else
1939 {
1940 uint64_t tsNow = TMTimerGet(pThis->CTX_SUFF(pIntTimer));
1941 if (!!ITR && tsNow - pThis->u64AckedAt < ITR * 256
1942 && pThis->fItrEnabled && (pThis->fItrRxEnabled || !(ICR & ICR_RXT0)))
1943 {
1944 E1K_INC_ISTAT_CNT(pThis->uStatIntEarly);
1945 E1kLog2(("%s e1kRaiseInterrupt: Too early to raise again: %d ns < %d ns.\n",
1946 pThis->szPrf, (uint32_t)(tsNow - pThis->u64AckedAt), ITR * 256));
1947 e1kPostponeInterrupt(pThis, ITR * 256);
1948 }
1949 else
1950 {
1951
1952 /* Since we are delivering the interrupt now
1953 * there is no need to do it later -- stop the timer.
1954 */
1955 TMTimerStop(pThis->CTX_SUFF(pIntTimer));
1956 E1K_INC_ISTAT_CNT(pThis->uStatInt);
1957 STAM_COUNTER_INC(&pThis->StatIntsRaised);
1958 /* Got at least one unmasked interrupt cause */
1959 pThis->fIntRaised = true;
1960 /* Raise(1) INTA(0) */
1961 E1kLogRel(("E1000: irq RAISED icr&mask=0x%x, icr=0x%x\n", ICR & IMS, ICR));
1962 PDMDevHlpPCISetIrq(pThis->CTX_SUFF(pDevIns), 0, 1);
1963 E1kLog(("%s e1kRaiseInterrupt: Raised. ICR&IMS=%08x\n",
1964 pThis->szPrf, ICR & IMS));
1965 }
1966 }
1967 }
1968 else
1969 {
1970 E1K_INC_ISTAT_CNT(pThis->uStatIntMasked);
1971 E1kLog2(("%s e1kRaiseInterrupt: Not raising, ICR=%08x, IMS=%08x\n",
1972 pThis->szPrf, ICR, IMS));
1973 }
1974 e1kCsLeave(pThis);
1975 return VINF_SUCCESS;
1976}
1977
1978/**
1979 * Compute the physical address of the descriptor.
1980 *
1981 * @returns the physical address of the descriptor.
1982 *
1983 * @param baseHigh High-order 32 bits of descriptor table address.
1984 * @param baseLow Low-order 32 bits of descriptor table address.
1985 * @param idxDesc The descriptor index in the table.
1986 */
1987DECLINLINE(RTGCPHYS) e1kDescAddr(uint32_t baseHigh, uint32_t baseLow, uint32_t idxDesc)
1988{
1989 AssertCompile(sizeof(E1KRXDESC) == sizeof(E1KTXDESC));
1990 return ((uint64_t)baseHigh << 32) + baseLow + idxDesc * sizeof(E1KRXDESC);
1991}
1992
1993#ifdef IN_RING3 /* currently only used in ring-3 due to stack space requirements of the caller */
1994/**
1995 * Advance the head pointer of the receive descriptor queue.
1996 *
1997 * @remarks RDH always points to the next available RX descriptor.
1998 *
1999 * @param pThis The device state structure.
2000 */
2001DECLINLINE(void) e1kAdvanceRDH(PE1KSTATE pThis)
2002{
2003 Assert(e1kCsRxIsOwner(pThis));
2004 //e1kCsEnter(pThis, RT_SRC_POS);
2005 if (++RDH * sizeof(E1KRXDESC) >= RDLEN)
2006 RDH = 0;
2007 /*
2008 * Compute current receive queue length and fire RXDMT0 interrupt
2009 * if we are low on receive buffers
2010 */
2011 uint32_t uRQueueLen = RDH>RDT ? RDLEN/sizeof(E1KRXDESC)-RDH+RDT : RDT-RDH;
2012 /*
2013 * The minimum threshold is controlled by RDMTS bits of RCTL:
2014 * 00 = 1/2 of RDLEN
2015 * 01 = 1/4 of RDLEN
2016 * 10 = 1/8 of RDLEN
2017 * 11 = reserved
2018 */
2019 uint32_t uMinRQThreshold = RDLEN / sizeof(E1KRXDESC) / (2 << GET_BITS(RCTL, RDMTS));
2020 if (uRQueueLen <= uMinRQThreshold)
2021 {
2022 E1kLogRel(("E1000: low on RX descriptors, RDH=%x RDT=%x len=%x threshold=%x\n", RDH, RDT, uRQueueLen, uMinRQThreshold));
2023 E1kLog2(("%s Low on RX descriptors, RDH=%x RDT=%x len=%x threshold=%x, raise an interrupt\n",
2024 pThis->szPrf, RDH, RDT, uRQueueLen, uMinRQThreshold));
2025 E1K_INC_ISTAT_CNT(pThis->uStatIntRXDMT0);
2026 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_RXDMT0);
2027 }
2028 E1kLog2(("%s e1kAdvanceRDH: at exit RDH=%x RDT=%x len=%x\n",
2029 pThis->szPrf, RDH, RDT, uRQueueLen));
2030 //e1kCsLeave(pThis);
2031}
2032#endif /* IN_RING3 */
2033
2034#ifdef E1K_WITH_RXD_CACHE
2035
2036/**
2037 * Return the number of RX descriptor that belong to the hardware.
2038 *
2039 * @returns the number of available descriptors in RX ring.
2040 * @param pThis The device state structure.
2041 * @thread ???
2042 */
2043DECLINLINE(uint32_t) e1kGetRxLen(PE1KSTATE pThis)
2044{
2045 /**
2046 * Make sure RDT won't change during computation. EMT may modify RDT at
2047 * any moment.
2048 */
2049 uint32_t rdt = RDT;
2050 return (RDH > rdt ? RDLEN/sizeof(E1KRXDESC) : 0) + rdt - RDH;
2051}
2052
2053DECLINLINE(unsigned) e1kRxDInCache(PE1KSTATE pThis)
2054{
2055 return pThis->nRxDFetched > pThis->iRxDCurrent ?
2056 pThis->nRxDFetched - pThis->iRxDCurrent : 0;
2057}
2058
2059DECLINLINE(unsigned) e1kRxDIsCacheEmpty(PE1KSTATE pThis)
2060{
2061 return pThis->iRxDCurrent >= pThis->nRxDFetched;
2062}
2063
2064/**
2065 * Load receive descriptors from guest memory. The caller needs to be in Rx
2066 * critical section.
2067 *
2068 * We need two physical reads in case the tail wrapped around the end of RX
2069 * descriptor ring.
2070 *
2071 * @returns the actual number of descriptors fetched.
2072 * @param pThis The device state structure.
2073 * @param pDesc Pointer to descriptor union.
2074 * @param addr Physical address in guest context.
2075 * @thread EMT, RX
2076 */
2077DECLINLINE(unsigned) e1kRxDPrefetch(PE1KSTATE pThis)
2078{
2079 /* We've already loaded pThis->nRxDFetched descriptors past RDH. */
2080 unsigned nDescsAvailable = e1kGetRxLen(pThis) - e1kRxDInCache(pThis);
2081 unsigned nDescsToFetch = RT_MIN(nDescsAvailable, E1K_RXD_CACHE_SIZE - pThis->nRxDFetched);
2082 unsigned nDescsTotal = RDLEN / sizeof(E1KRXDESC);
2083 Assert(nDescsTotal != 0);
2084 if (nDescsTotal == 0)
2085 return 0;
2086 unsigned nFirstNotLoaded = (RDH + e1kRxDInCache(pThis)) % nDescsTotal;
2087 unsigned nDescsInSingleRead = RT_MIN(nDescsToFetch, nDescsTotal - nFirstNotLoaded);
2088 E1kLog3(("%s e1kRxDPrefetch: nDescsAvailable=%u nDescsToFetch=%u "
2089 "nDescsTotal=%u nFirstNotLoaded=0x%x nDescsInSingleRead=%u\n",
2090 pThis->szPrf, nDescsAvailable, nDescsToFetch, nDescsTotal,
2091 nFirstNotLoaded, nDescsInSingleRead));
2092 if (nDescsToFetch == 0)
2093 return 0;
2094 E1KRXDESC* pFirstEmptyDesc = &pThis->aRxDescriptors[pThis->nRxDFetched];
2095 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns),
2096 ((uint64_t)RDBAH << 32) + RDBAL + nFirstNotLoaded * sizeof(E1KRXDESC),
2097 pFirstEmptyDesc, nDescsInSingleRead * sizeof(E1KRXDESC));
2098 // uint64_t addrBase = ((uint64_t)RDBAH << 32) + RDBAL;
2099 // unsigned i, j;
2100 // for (i = pThis->nRxDFetched; i < pThis->nRxDFetched + nDescsInSingleRead; ++i)
2101 // {
2102 // pThis->aRxDescAddr[i] = addrBase + (nFirstNotLoaded + i - pThis->nRxDFetched) * sizeof(E1KRXDESC);
2103 // E1kLog3(("%s aRxDescAddr[%d] = %p\n", pThis->szPrf, i, pThis->aRxDescAddr[i]));
2104 // }
2105 E1kLog3(("%s Fetched %u RX descriptors at %08x%08x(0x%x), RDLEN=%08x, RDH=%08x, RDT=%08x\n",
2106 pThis->szPrf, nDescsInSingleRead,
2107 RDBAH, RDBAL + RDH * sizeof(E1KRXDESC),
2108 nFirstNotLoaded, RDLEN, RDH, RDT));
2109 if (nDescsToFetch > nDescsInSingleRead)
2110 {
2111 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns),
2112 ((uint64_t)RDBAH << 32) + RDBAL,
2113 pFirstEmptyDesc + nDescsInSingleRead,
2114 (nDescsToFetch - nDescsInSingleRead) * sizeof(E1KRXDESC));
2115 // Assert(i == pThis->nRxDFetched + nDescsInSingleRead);
2116 // for (j = 0; i < pThis->nRxDFetched + nDescsToFetch; ++i, ++j)
2117 // {
2118 // pThis->aRxDescAddr[i] = addrBase + j * sizeof(E1KRXDESC);
2119 // E1kLog3(("%s aRxDescAddr[%d] = %p\n", pThis->szPrf, i, pThis->aRxDescAddr[i]));
2120 // }
2121 E1kLog3(("%s Fetched %u RX descriptors at %08x%08x\n",
2122 pThis->szPrf, nDescsToFetch - nDescsInSingleRead,
2123 RDBAH, RDBAL));
2124 }
2125 pThis->nRxDFetched += nDescsToFetch;
2126 return nDescsToFetch;
2127}
2128
2129# ifdef IN_RING3 /* currently only used in ring-3 due to stack space requirements of the caller */
2130
2131/**
2132 * Obtain the next RX descriptor from RXD cache, fetching descriptors from the
2133 * RX ring if the cache is empty.
2134 *
2135 * Note that we cannot advance the cache pointer (iRxDCurrent) yet as it will
2136 * go out of sync with RDH which will cause trouble when EMT checks if the
2137 * cache is empty to do pre-fetch @bugref(6217).
2138 *
2139 * @param pThis The device state structure.
2140 * @thread RX
2141 */
2142DECLINLINE(E1KRXDESC*) e1kRxDGet(PE1KSTATE pThis)
2143{
2144 Assert(e1kCsRxIsOwner(pThis));
2145 /* Check the cache first. */
2146 if (pThis->iRxDCurrent < pThis->nRxDFetched)
2147 return &pThis->aRxDescriptors[pThis->iRxDCurrent];
2148 /* Cache is empty, reset it and check if we can fetch more. */
2149 pThis->iRxDCurrent = pThis->nRxDFetched = 0;
2150 if (e1kRxDPrefetch(pThis))
2151 return &pThis->aRxDescriptors[pThis->iRxDCurrent];
2152 /* Out of Rx descriptors. */
2153 return NULL;
2154}
2155
2156
2157/**
2158 * Return the RX descriptor obtained with e1kRxDGet() and advance the cache
2159 * pointer. The descriptor gets written back to the RXD ring.
2160 *
2161 * @param pThis The device state structure.
2162 * @param pDesc The descriptor being "returned" to the RX ring.
2163 * @thread RX
2164 */
2165DECLINLINE(void) e1kRxDPut(PE1KSTATE pThis, E1KRXDESC* pDesc)
2166{
2167 Assert(e1kCsRxIsOwner(pThis));
2168 pThis->iRxDCurrent++;
2169 // Assert(pDesc >= pThis->aRxDescriptors);
2170 // Assert(pDesc < pThis->aRxDescriptors + E1K_RXD_CACHE_SIZE);
2171 // uint64_t addr = e1kDescAddr(RDBAH, RDBAL, RDH);
2172 // uint32_t rdh = RDH;
2173 // Assert(pThis->aRxDescAddr[pDesc - pThis->aRxDescriptors] == addr);
2174 PDMDevHlpPCIPhysWrite(pThis->CTX_SUFF(pDevIns),
2175 e1kDescAddr(RDBAH, RDBAL, RDH),
2176 pDesc, sizeof(E1KRXDESC));
2177 e1kAdvanceRDH(pThis);
2178 e1kPrintRDesc(pThis, pDesc);
2179}
2180
2181/**
2182 * Store a fragment of received packet at the specifed address.
2183 *
2184 * @param pThis The device state structure.
2185 * @param pDesc The next available RX descriptor.
2186 * @param pvBuf The fragment.
2187 * @param cb The size of the fragment.
2188 */
2189static DECLCALLBACK(void) e1kStoreRxFragment(PE1KSTATE pThis, E1KRXDESC *pDesc, const void *pvBuf, size_t cb)
2190{
2191 STAM_PROFILE_ADV_START(&pThis->StatReceiveStore, a);
2192 E1kLog2(("%s e1kStoreRxFragment: store fragment of %04X at %016LX, EOP=%d\n",
2193 pThis->szPrf, cb, pDesc->u64BufAddr, pDesc->status.fEOP));
2194 PDMDevHlpPCIPhysWrite(pThis->CTX_SUFF(pDevIns), pDesc->u64BufAddr, pvBuf, cb);
2195 pDesc->u16Length = (uint16_t)cb; Assert(pDesc->u16Length == cb);
2196 STAM_PROFILE_ADV_STOP(&pThis->StatReceiveStore, a);
2197}
2198
2199# endif
2200
2201#else /* !E1K_WITH_RXD_CACHE */
2202
2203/**
2204 * Store a fragment of received packet that fits into the next available RX
2205 * buffer.
2206 *
2207 * @remarks Trigger the RXT0 interrupt if it is the last fragment of the packet.
2208 *
2209 * @param pThis The device state structure.
2210 * @param pDesc The next available RX descriptor.
2211 * @param pvBuf The fragment.
2212 * @param cb The size of the fragment.
2213 */
2214static DECLCALLBACK(void) e1kStoreRxFragment(PE1KSTATE pThis, E1KRXDESC *pDesc, const void *pvBuf, size_t cb)
2215{
2216 STAM_PROFILE_ADV_START(&pThis->StatReceiveStore, a);
2217 E1kLog2(("%s e1kStoreRxFragment: store fragment of %04X at %016LX, EOP=%d\n", pThis->szPrf, cb, pDesc->u64BufAddr, pDesc->status.fEOP));
2218 PDMDevHlpPCIPhysWrite(pThis->CTX_SUFF(pDevIns), pDesc->u64BufAddr, pvBuf, cb);
2219 pDesc->u16Length = (uint16_t)cb; Assert(pDesc->u16Length == cb);
2220 /* Write back the descriptor */
2221 PDMDevHlpPCIPhysWrite(pThis->CTX_SUFF(pDevIns), e1kDescAddr(RDBAH, RDBAL, RDH), pDesc, sizeof(E1KRXDESC));
2222 e1kPrintRDesc(pThis, pDesc);
2223 E1kLogRel(("E1000: Wrote back RX desc, RDH=%x\n", RDH));
2224 /* Advance head */
2225 e1kAdvanceRDH(pThis);
2226 //E1kLog2(("%s e1kStoreRxFragment: EOP=%d RDTR=%08X RADV=%08X\n", pThis->szPrf, pDesc->fEOP, RDTR, RADV));
2227 if (pDesc->status.fEOP)
2228 {
2229 /* Complete packet has been stored -- it is time to let the guest know. */
2230#ifdef E1K_USE_RX_TIMERS
2231 if (RDTR)
2232 {
2233 /* Arm the timer to fire in RDTR usec (discard .024) */
2234 e1kArmTimer(pThis, pThis->CTX_SUFF(pRIDTimer), RDTR);
2235 /* If absolute timer delay is enabled and the timer is not running yet, arm it. */
2236 if (RADV != 0 && !TMTimerIsActive(pThis->CTX_SUFF(pRADTimer)))
2237 e1kArmTimer(pThis, pThis->CTX_SUFF(pRADTimer), RADV);
2238 }
2239 else
2240 {
2241#endif
2242 /* 0 delay means immediate interrupt */
2243 E1K_INC_ISTAT_CNT(pThis->uStatIntRx);
2244 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_RXT0);
2245#ifdef E1K_USE_RX_TIMERS
2246 }
2247#endif
2248 }
2249 STAM_PROFILE_ADV_STOP(&pThis->StatReceiveStore, a);
2250}
2251
2252#endif /* !E1K_WITH_RXD_CACHE */
2253
2254/**
2255 * Returns true if it is a broadcast packet.
2256 *
2257 * @returns true if destination address indicates broadcast.
2258 * @param pvBuf The ethernet packet.
2259 */
2260DECLINLINE(bool) e1kIsBroadcast(const void *pvBuf)
2261{
2262 static const uint8_t s_abBcastAddr[] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
2263 return memcmp(pvBuf, s_abBcastAddr, sizeof(s_abBcastAddr)) == 0;
2264}
2265
2266/**
2267 * Returns true if it is a multicast packet.
2268 *
2269 * @remarks returns true for broadcast packets as well.
2270 * @returns true if destination address indicates multicast.
2271 * @param pvBuf The ethernet packet.
2272 */
2273DECLINLINE(bool) e1kIsMulticast(const void *pvBuf)
2274{
2275 return (*(char*)pvBuf) & 1;
2276}
2277
2278#ifdef IN_RING3 /* currently only used in ring-3 due to stack space requirements of the caller */
2279/**
2280 * Set IXSM, IPCS and TCPCS flags according to the packet type.
2281 *
2282 * @remarks We emulate checksum offloading for major packets types only.
2283 *
2284 * @returns VBox status code.
2285 * @param pThis The device state structure.
2286 * @param pFrame The available data.
2287 * @param cb Number of bytes available in the buffer.
2288 * @param status Bit fields containing status info.
2289 */
2290static int e1kRxChecksumOffload(PE1KSTATE pThis, const uint8_t *pFrame, size_t cb, E1KRXDST *pStatus)
2291{
2292 /** @todo
2293 * It is not safe to bypass checksum verification for packets coming
2294 * from real wire. We currently unable to tell where packets are
2295 * coming from so we tell the driver to ignore our checksum flags
2296 * and do verification in software.
2297 */
2298# if 0
2299 uint16_t uEtherType = ntohs(*(uint16_t*)(pFrame + 12));
2300
2301 E1kLog2(("%s e1kRxChecksumOffload: EtherType=%x\n", pThis->szPrf, uEtherType));
2302
2303 switch (uEtherType)
2304 {
2305 case 0x800: /* IPv4 */
2306 {
2307 pStatus->fIXSM = false;
2308 pStatus->fIPCS = true;
2309 PRTNETIPV4 pIpHdr4 = (PRTNETIPV4)(pFrame + 14);
2310 /* TCP/UDP checksum offloading works with TCP and UDP only */
2311 pStatus->fTCPCS = pIpHdr4->ip_p == 6 || pIpHdr4->ip_p == 17;
2312 break;
2313 }
2314 case 0x86DD: /* IPv6 */
2315 pStatus->fIXSM = false;
2316 pStatus->fIPCS = false;
2317 pStatus->fTCPCS = true;
2318 break;
2319 default: /* ARP, VLAN, etc. */
2320 pStatus->fIXSM = true;
2321 break;
2322 }
2323# else
2324 pStatus->fIXSM = true;
2325 RT_NOREF_PV(pThis); RT_NOREF_PV(pFrame); RT_NOREF_PV(cb);
2326# endif
2327 return VINF_SUCCESS;
2328}
2329#endif /* IN_RING3 */
2330
2331/**
2332 * Pad and store received packet.
2333 *
2334 * @remarks Make sure that the packet appears to upper layer as one coming
2335 * from real Ethernet: pad it and insert FCS.
2336 *
2337 * @returns VBox status code.
2338 * @param pThis The device state structure.
2339 * @param pvBuf The available data.
2340 * @param cb Number of bytes available in the buffer.
2341 * @param status Bit fields containing status info.
2342 */
2343static int e1kHandleRxPacket(PE1KSTATE pThis, const void *pvBuf, size_t cb, E1KRXDST status)
2344{
2345#if defined(IN_RING3) /** @todo Remove this extra copying, it's gonna make us run out of kernel / hypervisor stack! */
2346 uint8_t rxPacket[E1K_MAX_RX_PKT_SIZE];
2347 uint8_t *ptr = rxPacket;
2348
2349 int rc = e1kCsRxEnter(pThis, VERR_SEM_BUSY);
2350 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2351 return rc;
2352
2353 if (cb > 70) /* unqualified guess */
2354 pThis->led.Asserted.s.fReading = pThis->led.Actual.s.fReading = 1;
2355
2356 Assert(cb <= E1K_MAX_RX_PKT_SIZE);
2357 Assert(cb > 16);
2358 size_t cbMax = ((RCTL & RCTL_LPE) ? E1K_MAX_RX_PKT_SIZE - 4 : 1518) - (status.fVP ? 0 : 4);
2359 E1kLog3(("%s Max RX packet size is %u\n", pThis->szPrf, cbMax));
2360 if (status.fVP)
2361 {
2362 /* VLAN packet -- strip VLAN tag in VLAN mode */
2363 if ((CTRL & CTRL_VME) && cb > 16)
2364 {
2365 uint16_t *u16Ptr = (uint16_t*)pvBuf;
2366 memcpy(rxPacket, pvBuf, 12); /* Copy src and dst addresses */
2367 status.u16Special = RT_BE2H_U16(u16Ptr[7]); /* Extract VLAN tag */
2368 memcpy(rxPacket + 12, (uint8_t*)pvBuf + 16, cb - 16); /* Copy the rest of the packet */
2369 cb -= 4;
2370 E1kLog3(("%s Stripped tag for VLAN %u (cb=%u)\n",
2371 pThis->szPrf, status.u16Special, cb));
2372 }
2373 else
2374 status.fVP = false; /* Set VP only if we stripped the tag */
2375 }
2376 else
2377 memcpy(rxPacket, pvBuf, cb);
2378 /* Pad short packets */
2379 if (cb < 60)
2380 {
2381 memset(rxPacket + cb, 0, 60 - cb);
2382 cb = 60;
2383 }
2384 if (!(RCTL & RCTL_SECRC) && cb <= cbMax)
2385 {
2386 STAM_PROFILE_ADV_START(&pThis->StatReceiveCRC, a);
2387 /*
2388 * Add FCS if CRC stripping is not enabled. Since the value of CRC
2389 * is ignored by most of drivers we may as well save us the trouble
2390 * of calculating it (see EthernetCRC CFGM parameter).
2391 */
2392 if (pThis->fEthernetCRC)
2393 *(uint32_t*)(rxPacket + cb) = RTCrc32(rxPacket, cb);
2394 cb += sizeof(uint32_t);
2395 STAM_PROFILE_ADV_STOP(&pThis->StatReceiveCRC, a);
2396 E1kLog3(("%s Added FCS (cb=%u)\n", pThis->szPrf, cb));
2397 }
2398 /* Compute checksum of complete packet */
2399 uint16_t checksum = e1kCSum16(rxPacket + GET_BITS(RXCSUM, PCSS), cb);
2400 e1kRxChecksumOffload(pThis, rxPacket, cb, &status);
2401
2402 /* Update stats */
2403 E1K_INC_CNT32(GPRC);
2404 if (e1kIsBroadcast(pvBuf))
2405 E1K_INC_CNT32(BPRC);
2406 else if (e1kIsMulticast(pvBuf))
2407 E1K_INC_CNT32(MPRC);
2408 /* Update octet receive counter */
2409 E1K_ADD_CNT64(GORCL, GORCH, cb);
2410 STAM_REL_COUNTER_ADD(&pThis->StatReceiveBytes, cb);
2411 if (cb == 64)
2412 E1K_INC_CNT32(PRC64);
2413 else if (cb < 128)
2414 E1K_INC_CNT32(PRC127);
2415 else if (cb < 256)
2416 E1K_INC_CNT32(PRC255);
2417 else if (cb < 512)
2418 E1K_INC_CNT32(PRC511);
2419 else if (cb < 1024)
2420 E1K_INC_CNT32(PRC1023);
2421 else
2422 E1K_INC_CNT32(PRC1522);
2423
2424 E1K_INC_ISTAT_CNT(pThis->uStatRxFrm);
2425
2426# ifdef E1K_WITH_RXD_CACHE
2427 while (cb > 0)
2428 {
2429 E1KRXDESC *pDesc = e1kRxDGet(pThis);
2430
2431 if (pDesc == NULL)
2432 {
2433 E1kLog(("%s Out of receive buffers, dropping the packet "
2434 "(cb=%u, in_cache=%u, RDH=%x RDT=%x)\n",
2435 pThis->szPrf, cb, e1kRxDInCache(pThis), RDH, RDT));
2436 break;
2437 }
2438# else /* !E1K_WITH_RXD_CACHE */
2439 if (RDH == RDT)
2440 {
2441 E1kLog(("%s Out of receive buffers, dropping the packet\n",
2442 pThis->szPrf));
2443 }
2444 /* Store the packet to receive buffers */
2445 while (RDH != RDT)
2446 {
2447 /* Load the descriptor pointed by head */
2448 E1KRXDESC desc, *pDesc = &desc;
2449 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns), e1kDescAddr(RDBAH, RDBAL, RDH),
2450 &desc, sizeof(desc));
2451# endif /* !E1K_WITH_RXD_CACHE */
2452 if (pDesc->u64BufAddr)
2453 {
2454 /* Update descriptor */
2455 pDesc->status = status;
2456 pDesc->u16Checksum = checksum;
2457 pDesc->status.fDD = true;
2458
2459 /*
2460 * We need to leave Rx critical section here or we risk deadlocking
2461 * with EMT in e1kRegWriteRDT when the write is to an unallocated
2462 * page or has an access handler associated with it.
2463 * Note that it is safe to leave the critical section here since
2464 * e1kRegWriteRDT() never modifies RDH. It never touches already
2465 * fetched RxD cache entries either.
2466 */
2467 if (cb > pThis->u16RxBSize)
2468 {
2469 pDesc->status.fEOP = false;
2470 e1kCsRxLeave(pThis);
2471 e1kStoreRxFragment(pThis, pDesc, ptr, pThis->u16RxBSize);
2472 rc = e1kCsRxEnter(pThis, VERR_SEM_BUSY);
2473 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2474 return rc;
2475 ptr += pThis->u16RxBSize;
2476 cb -= pThis->u16RxBSize;
2477 }
2478 else
2479 {
2480 pDesc->status.fEOP = true;
2481 e1kCsRxLeave(pThis);
2482 e1kStoreRxFragment(pThis, pDesc, ptr, cb);
2483# ifdef E1K_WITH_RXD_CACHE
2484 rc = e1kCsRxEnter(pThis, VERR_SEM_BUSY);
2485 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2486 return rc;
2487 cb = 0;
2488# else /* !E1K_WITH_RXD_CACHE */
2489 pThis->led.Actual.s.fReading = 0;
2490 return VINF_SUCCESS;
2491# endif /* !E1K_WITH_RXD_CACHE */
2492 }
2493 /*
2494 * Note: RDH is advanced by e1kStoreRxFragment if E1K_WITH_RXD_CACHE
2495 * is not defined.
2496 */
2497 }
2498# ifdef E1K_WITH_RXD_CACHE
2499 /* Write back the descriptor. */
2500 pDesc->status.fDD = true;
2501 e1kRxDPut(pThis, pDesc);
2502# else /* !E1K_WITH_RXD_CACHE */
2503 else
2504 {
2505 /* Write back the descriptor. */
2506 pDesc->status.fDD = true;
2507 PDMDevHlpPCIPhysWrite(pThis->CTX_SUFF(pDevIns),
2508 e1kDescAddr(RDBAH, RDBAL, RDH),
2509 pDesc, sizeof(E1KRXDESC));
2510 e1kAdvanceRDH(pThis);
2511 }
2512# endif /* !E1K_WITH_RXD_CACHE */
2513 }
2514
2515 if (cb > 0)
2516 E1kLog(("%s Out of receive buffers, dropping %u bytes", pThis->szPrf, cb));
2517
2518 pThis->led.Actual.s.fReading = 0;
2519
2520 e1kCsRxLeave(pThis);
2521# ifdef E1K_WITH_RXD_CACHE
2522 /* Complete packet has been stored -- it is time to let the guest know. */
2523# ifdef E1K_USE_RX_TIMERS
2524 if (RDTR)
2525 {
2526 /* Arm the timer to fire in RDTR usec (discard .024) */
2527 e1kArmTimer(pThis, pThis->CTX_SUFF(pRIDTimer), RDTR);
2528 /* If absolute timer delay is enabled and the timer is not running yet, arm it. */
2529 if (RADV != 0 && !TMTimerIsActive(pThis->CTX_SUFF(pRADTimer)))
2530 e1kArmTimer(pThis, pThis->CTX_SUFF(pRADTimer), RADV);
2531 }
2532 else
2533 {
2534# endif /* E1K_USE_RX_TIMERS */
2535 /* 0 delay means immediate interrupt */
2536 E1K_INC_ISTAT_CNT(pThis->uStatIntRx);
2537 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_RXT0);
2538# ifdef E1K_USE_RX_TIMERS
2539 }
2540# endif /* E1K_USE_RX_TIMERS */
2541# endif /* E1K_WITH_RXD_CACHE */
2542
2543 return VINF_SUCCESS;
2544#else /* !IN_RING3 */
2545 RT_NOREF_PV(pThis); RT_NOREF_PV(pvBuf); RT_NOREF_PV(cb); RT_NOREF_PV(status);
2546 return VERR_INTERNAL_ERROR_2;
2547#endif /* !IN_RING3 */
2548}
2549
2550
2551#ifdef IN_RING3
2552/**
2553 * Bring the link up after the configured delay, 5 seconds by default.
2554 *
2555 * @param pThis The device state structure.
2556 * @thread any
2557 */
2558DECLINLINE(void) e1kBringLinkUpDelayed(PE1KSTATE pThis)
2559{
2560 E1kLog(("%s Will bring up the link in %d seconds...\n",
2561 pThis->szPrf, pThis->cMsLinkUpDelay / 1000));
2562 e1kArmTimer(pThis, pThis->CTX_SUFF(pLUTimer), pThis->cMsLinkUpDelay * 1000);
2563}
2564
2565/**
2566 * Bring up the link immediately.
2567 *
2568 * @param pThis The device state structure.
2569 */
2570DECLINLINE(void) e1kR3LinkUp(PE1KSTATE pThis)
2571{
2572 E1kLog(("%s Link is up\n", pThis->szPrf));
2573 STATUS |= STATUS_LU;
2574 Phy::setLinkStatus(&pThis->phy, true);
2575 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_LSC);
2576 if (pThis->pDrvR3)
2577 pThis->pDrvR3->pfnNotifyLinkChanged(pThis->pDrvR3, PDMNETWORKLINKSTATE_UP);
2578 /* Process pending TX descriptors (see @bugref{8942}) */
2579 PPDMQUEUEITEMCORE pItem = PDMQueueAlloc(pThis->CTX_SUFF(pTxQueue));
2580 if (RT_UNLIKELY(pItem))
2581 PDMQueueInsert(pThis->CTX_SUFF(pTxQueue), pItem);
2582}
2583
2584/**
2585 * Bring down the link immediately.
2586 *
2587 * @param pThis The device state structure.
2588 */
2589DECLINLINE(void) e1kR3LinkDown(PE1KSTATE pThis)
2590{
2591 E1kLog(("%s Link is down\n", pThis->szPrf));
2592 STATUS &= ~STATUS_LU;
2593#ifdef E1K_LSC_ON_RESET
2594 Phy::setLinkStatus(&pThis->phy, false);
2595#endif /* E1K_LSC_ON_RESET */
2596 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_LSC);
2597 if (pThis->pDrvR3)
2598 pThis->pDrvR3->pfnNotifyLinkChanged(pThis->pDrvR3, PDMNETWORKLINKSTATE_DOWN);
2599}
2600
2601/**
2602 * Bring down the link temporarily.
2603 *
2604 * @param pThis The device state structure.
2605 */
2606DECLINLINE(void) e1kR3LinkDownTemp(PE1KSTATE pThis)
2607{
2608 E1kLog(("%s Link is down temporarily\n", pThis->szPrf));
2609 STATUS &= ~STATUS_LU;
2610 Phy::setLinkStatus(&pThis->phy, false);
2611 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_LSC);
2612 /*
2613 * Notifying the associated driver that the link went down (even temporarily)
2614 * seems to be the right thing, but it was not done before. This may cause
2615 * a regression if the driver does not expect the link to go down as a result
2616 * of sending PDMNETWORKLINKSTATE_DOWN_RESUME to this device. Earlier versions
2617 * of code notified the driver that the link was up! See @bugref{7057}.
2618 */
2619 if (pThis->pDrvR3)
2620 pThis->pDrvR3->pfnNotifyLinkChanged(pThis->pDrvR3, PDMNETWORKLINKSTATE_DOWN);
2621 e1kBringLinkUpDelayed(pThis);
2622}
2623#endif /* IN_RING3 */
2624
2625#if 0 /* unused */
2626/**
2627 * Read handler for Device Status register.
2628 *
2629 * Get the link status from PHY.
2630 *
2631 * @returns VBox status code.
2632 *
2633 * @param pThis The device state structure.
2634 * @param offset Register offset in memory-mapped frame.
2635 * @param index Register index in register array.
2636 * @param mask Used to implement partial reads (8 and 16-bit).
2637 */
2638static int e1kRegReadCTRL(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
2639{
2640 E1kLog(("%s e1kRegReadCTRL: mdio dir=%s mdc dir=%s mdc=%d\n",
2641 pThis->szPrf, (CTRL & CTRL_MDIO_DIR)?"OUT":"IN ",
2642 (CTRL & CTRL_MDC_DIR)?"OUT":"IN ", !!(CTRL & CTRL_MDC)));
2643 if ((CTRL & CTRL_MDIO_DIR) == 0 && (CTRL & CTRL_MDC))
2644 {
2645 /* MDC is high and MDIO pin is used for input, read MDIO pin from PHY */
2646 if (Phy::readMDIO(&pThis->phy))
2647 *pu32Value = CTRL | CTRL_MDIO;
2648 else
2649 *pu32Value = CTRL & ~CTRL_MDIO;
2650 E1kLog(("%s e1kRegReadCTRL: Phy::readMDIO(%d)\n",
2651 pThis->szPrf, !!(*pu32Value & CTRL_MDIO)));
2652 }
2653 else
2654 {
2655 /* MDIO pin is used for output, ignore it */
2656 *pu32Value = CTRL;
2657 }
2658 return VINF_SUCCESS;
2659}
2660#endif /* unused */
2661
2662/**
2663 * A callback used by PHY to indicate that the link needs to be updated due to
2664 * reset of PHY.
2665 *
2666 * @param pPhy A pointer to phy member of the device state structure.
2667 * @thread any
2668 */
2669void e1kPhyLinkResetCallback(PPHY pPhy)
2670{
2671 /* PHY is aggregated into e1000, get pThis from pPhy. */
2672 PE1KSTATE pThis = RT_FROM_MEMBER(pPhy, E1KSTATE, phy);
2673 /* Make sure we have cable connected and MAC can talk to PHY */
2674 if (pThis->fCableConnected && (CTRL & CTRL_SLU))
2675 e1kArmTimer(pThis, pThis->CTX_SUFF(pLUTimer), E1K_INIT_LINKUP_DELAY_US);
2676}
2677
2678/**
2679 * Write handler for Device Control register.
2680 *
2681 * Handles reset.
2682 *
2683 * @param pThis The device state structure.
2684 * @param offset Register offset in memory-mapped frame.
2685 * @param index Register index in register array.
2686 * @param value The value to store.
2687 * @param mask Used to implement partial writes (8 and 16-bit).
2688 * @thread EMT
2689 */
2690static int e1kRegWriteCTRL(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
2691{
2692 int rc = VINF_SUCCESS;
2693
2694 if (value & CTRL_RESET)
2695 { /* RST */
2696#ifndef IN_RING3
2697 return VINF_IOM_R3_MMIO_WRITE;
2698#else
2699 e1kHardReset(pThis);
2700#endif
2701 }
2702 else
2703 {
2704#ifdef E1K_LSC_ON_SLU
2705 /*
2706 * When the guest changes 'Set Link Up' bit from 0 to 1 we check if
2707 * the link is down and the cable is connected, and if they are we
2708 * bring the link up, see @bugref{8624}.
2709 */
2710 if ( (value & CTRL_SLU)
2711 && !(CTRL & CTRL_SLU)
2712 && pThis->fCableConnected
2713 && !(STATUS & STATUS_LU))
2714 {
2715 /* It should take about 2 seconds for the link to come up */
2716 e1kArmTimer(pThis, pThis->CTX_SUFF(pLUTimer), E1K_INIT_LINKUP_DELAY_US);
2717 }
2718#else /* !E1K_LSC_ON_SLU */
2719 if ( (value & CTRL_SLU)
2720 && !(CTRL & CTRL_SLU)
2721 && pThis->fCableConnected
2722 && !TMTimerIsActive(pThis->CTX_SUFF(pLUTimer)))
2723 {
2724 /* PXE does not use LSC interrupts, see @bugref{9113}. */
2725 STATUS |= STATUS_LU;
2726 }
2727#endif /* !E1K_LSC_ON_SLU */
2728 if ((value & CTRL_VME) != (CTRL & CTRL_VME))
2729 {
2730 E1kLog(("%s VLAN Mode %s\n", pThis->szPrf, (value & CTRL_VME) ? "Enabled" : "Disabled"));
2731 }
2732 Log7(("%s e1kRegWriteCTRL: mdio dir=%s mdc dir=%s mdc=%s mdio=%d\n",
2733 pThis->szPrf, (value & CTRL_MDIO_DIR)?"OUT":"IN ",
2734 (value & CTRL_MDC_DIR)?"OUT":"IN ", (value & CTRL_MDC)?"HIGH":"LOW ", !!(value & CTRL_MDIO)));
2735 if (value & CTRL_MDC)
2736 {
2737 if (value & CTRL_MDIO_DIR)
2738 {
2739 Log7(("%s e1kRegWriteCTRL: Phy::writeMDIO(%d)\n", pThis->szPrf, !!(value & CTRL_MDIO)));
2740 /* MDIO direction pin is set to output and MDC is high, write MDIO pin value to PHY */
2741 Phy::writeMDIO(&pThis->phy, !!(value & CTRL_MDIO));
2742 }
2743 else
2744 {
2745 if (Phy::readMDIO(&pThis->phy))
2746 value |= CTRL_MDIO;
2747 else
2748 value &= ~CTRL_MDIO;
2749 Log7(("%s e1kRegWriteCTRL: Phy::readMDIO(%d)\n", pThis->szPrf, !!(value & CTRL_MDIO)));
2750 }
2751 }
2752 rc = e1kRegWriteDefault(pThis, offset, index, value);
2753 }
2754
2755 return rc;
2756}
2757
2758/**
2759 * Write handler for EEPROM/Flash Control/Data register.
2760 *
2761 * Handles EEPROM access requests; forwards writes to EEPROM device if access has been granted.
2762 *
2763 * @param pThis The device state structure.
2764 * @param offset Register offset in memory-mapped frame.
2765 * @param index Register index in register array.
2766 * @param value The value to store.
2767 * @param mask Used to implement partial writes (8 and 16-bit).
2768 * @thread EMT
2769 */
2770static int e1kRegWriteEECD(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
2771{
2772 RT_NOREF(offset, index);
2773#ifdef IN_RING3
2774 /* So far we are concerned with lower byte only */
2775 if ((EECD & EECD_EE_GNT) || pThis->eChip == E1K_CHIP_82543GC)
2776 {
2777 /* Access to EEPROM granted -- forward 4-wire bits to EEPROM device */
2778 /* Note: 82543GC does not need to request EEPROM access */
2779 STAM_PROFILE_ADV_START(&pThis->StatEEPROMWrite, a);
2780 pThis->eeprom.write(value & EECD_EE_WIRES);
2781 STAM_PROFILE_ADV_STOP(&pThis->StatEEPROMWrite, a);
2782 }
2783 if (value & EECD_EE_REQ)
2784 EECD |= EECD_EE_REQ|EECD_EE_GNT;
2785 else
2786 EECD &= ~EECD_EE_GNT;
2787 //e1kRegWriteDefault(pThis, offset, index, value );
2788
2789 return VINF_SUCCESS;
2790#else /* !IN_RING3 */
2791 RT_NOREF(pThis, value);
2792 return VINF_IOM_R3_MMIO_WRITE;
2793#endif /* !IN_RING3 */
2794}
2795
2796/**
2797 * Read handler for EEPROM/Flash Control/Data register.
2798 *
2799 * Lower 4 bits come from EEPROM device if EEPROM access has been granted.
2800 *
2801 * @returns VBox status code.
2802 *
2803 * @param pThis The device state structure.
2804 * @param offset Register offset in memory-mapped frame.
2805 * @param index Register index in register array.
2806 * @param mask Used to implement partial reads (8 and 16-bit).
2807 * @thread EMT
2808 */
2809static int e1kRegReadEECD(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
2810{
2811#ifdef IN_RING3
2812 uint32_t value;
2813 int rc = e1kRegReadDefault(pThis, offset, index, &value);
2814 if (RT_SUCCESS(rc))
2815 {
2816 if ((value & EECD_EE_GNT) || pThis->eChip == E1K_CHIP_82543GC)
2817 {
2818 /* Note: 82543GC does not need to request EEPROM access */
2819 /* Access to EEPROM granted -- get 4-wire bits to EEPROM device */
2820 STAM_PROFILE_ADV_START(&pThis->StatEEPROMRead, a);
2821 value |= pThis->eeprom.read();
2822 STAM_PROFILE_ADV_STOP(&pThis->StatEEPROMRead, a);
2823 }
2824 *pu32Value = value;
2825 }
2826
2827 return rc;
2828#else /* !IN_RING3 */
2829 RT_NOREF_PV(pThis); RT_NOREF_PV(offset); RT_NOREF_PV(index); RT_NOREF_PV(pu32Value);
2830 return VINF_IOM_R3_MMIO_READ;
2831#endif /* !IN_RING3 */
2832}
2833
2834/**
2835 * Write handler for EEPROM Read register.
2836 *
2837 * Handles EEPROM word access requests, reads EEPROM and stores the result
2838 * into DATA field.
2839 *
2840 * @param pThis The device state structure.
2841 * @param offset Register offset in memory-mapped frame.
2842 * @param index Register index in register array.
2843 * @param value The value to store.
2844 * @param mask Used to implement partial writes (8 and 16-bit).
2845 * @thread EMT
2846 */
2847static int e1kRegWriteEERD(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
2848{
2849#ifdef IN_RING3
2850 /* Make use of 'writable' and 'readable' masks. */
2851 e1kRegWriteDefault(pThis, offset, index, value);
2852 /* DONE and DATA are set only if read was triggered by START. */
2853 if (value & EERD_START)
2854 {
2855 uint16_t tmp;
2856 STAM_PROFILE_ADV_START(&pThis->StatEEPROMRead, a);
2857 if (pThis->eeprom.readWord(GET_BITS_V(value, EERD, ADDR), &tmp))
2858 SET_BITS(EERD, DATA, tmp);
2859 EERD |= EERD_DONE;
2860 STAM_PROFILE_ADV_STOP(&pThis->StatEEPROMRead, a);
2861 }
2862
2863 return VINF_SUCCESS;
2864#else /* !IN_RING3 */
2865 RT_NOREF_PV(pThis); RT_NOREF_PV(offset); RT_NOREF_PV(index); RT_NOREF_PV(value);
2866 return VINF_IOM_R3_MMIO_WRITE;
2867#endif /* !IN_RING3 */
2868}
2869
2870
2871/**
2872 * Write handler for MDI Control register.
2873 *
2874 * Handles PHY read/write requests; forwards requests to internal PHY device.
2875 *
2876 * @param pThis The device state structure.
2877 * @param offset Register offset in memory-mapped frame.
2878 * @param index Register index in register array.
2879 * @param value The value to store.
2880 * @param mask Used to implement partial writes (8 and 16-bit).
2881 * @thread EMT
2882 */
2883static int e1kRegWriteMDIC(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
2884{
2885 if (value & MDIC_INT_EN)
2886 {
2887 E1kLog(("%s ERROR! Interrupt at the end of an MDI cycle is not supported yet.\n",
2888 pThis->szPrf));
2889 }
2890 else if (value & MDIC_READY)
2891 {
2892 E1kLog(("%s ERROR! Ready bit is not reset by software during write operation.\n",
2893 pThis->szPrf));
2894 }
2895 else if (GET_BITS_V(value, MDIC, PHY) != 1)
2896 {
2897 E1kLog(("%s WARNING! Access to invalid PHY detected, phy=%d.\n",
2898 pThis->szPrf, GET_BITS_V(value, MDIC, PHY)));
2899 /*
2900 * Some drivers scan the MDIO bus for a PHY. We can work with these
2901 * drivers if we set MDIC_READY and MDIC_ERROR when there isn't a PHY
2902 * at the requested address, see @bugref{7346}.
2903 */
2904 MDIC = MDIC_READY | MDIC_ERROR;
2905 }
2906 else
2907 {
2908 /* Store the value */
2909 e1kRegWriteDefault(pThis, offset, index, value);
2910 STAM_COUNTER_INC(&pThis->StatPHYAccesses);
2911 /* Forward op to PHY */
2912 if (value & MDIC_OP_READ)
2913 SET_BITS(MDIC, DATA, Phy::readRegister(&pThis->phy, GET_BITS_V(value, MDIC, REG)));
2914 else
2915 Phy::writeRegister(&pThis->phy, GET_BITS_V(value, MDIC, REG), value & MDIC_DATA_MASK);
2916 /* Let software know that we are done */
2917 MDIC |= MDIC_READY;
2918 }
2919
2920 return VINF_SUCCESS;
2921}
2922
2923/**
2924 * Write handler for Interrupt Cause Read register.
2925 *
2926 * Bits corresponding to 1s in 'value' will be cleared in ICR register.
2927 *
2928 * @param pThis The device state structure.
2929 * @param offset Register offset in memory-mapped frame.
2930 * @param index Register index in register array.
2931 * @param value The value to store.
2932 * @param mask Used to implement partial writes (8 and 16-bit).
2933 * @thread EMT
2934 */
2935static int e1kRegWriteICR(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
2936{
2937 ICR &= ~value;
2938
2939 RT_NOREF_PV(pThis); RT_NOREF_PV(offset); RT_NOREF_PV(index);
2940 return VINF_SUCCESS;
2941}
2942
2943/**
2944 * Read handler for Interrupt Cause Read register.
2945 *
2946 * Reading this register acknowledges all interrupts.
2947 *
2948 * @returns VBox status code.
2949 *
2950 * @param pThis The device state structure.
2951 * @param offset Register offset in memory-mapped frame.
2952 * @param index Register index in register array.
2953 * @param mask Not used.
2954 * @thread EMT
2955 */
2956static int e1kRegReadICR(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
2957{
2958 int rc = e1kCsEnter(pThis, VINF_IOM_R3_MMIO_READ);
2959 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2960 return rc;
2961
2962 uint32_t value = 0;
2963 rc = e1kRegReadDefault(pThis, offset, index, &value);
2964 if (RT_SUCCESS(rc))
2965 {
2966 if (value)
2967 {
2968 if (!pThis->fIntRaised)
2969 E1K_INC_ISTAT_CNT(pThis->uStatNoIntICR);
2970 /*
2971 * Not clearing ICR causes QNX to hang as it reads ICR in a loop
2972 * with disabled interrupts.
2973 */
2974 //if (IMS)
2975 if (1)
2976 {
2977 /*
2978 * Interrupts were enabled -- we are supposedly at the very
2979 * beginning of interrupt handler
2980 */
2981 E1kLogRel(("E1000: irq lowered, icr=0x%x\n", ICR));
2982 E1kLog(("%s e1kRegReadICR: Lowered IRQ (%08x)\n", pThis->szPrf, ICR));
2983 /* Clear all pending interrupts */
2984 ICR = 0;
2985 pThis->fIntRaised = false;
2986 /* Lower(0) INTA(0) */
2987 PDMDevHlpPCISetIrq(pThis->CTX_SUFF(pDevIns), 0, 0);
2988
2989 pThis->u64AckedAt = TMTimerGet(pThis->CTX_SUFF(pIntTimer));
2990 if (pThis->fIntMaskUsed)
2991 pThis->fDelayInts = true;
2992 }
2993 else
2994 {
2995 /*
2996 * Interrupts are disabled -- in windows guests ICR read is done
2997 * just before re-enabling interrupts
2998 */
2999 E1kLog(("%s e1kRegReadICR: Suppressing auto-clear due to disabled interrupts (%08x)\n", pThis->szPrf, ICR));
3000 }
3001 }
3002 *pu32Value = value;
3003 }
3004 e1kCsLeave(pThis);
3005
3006 return rc;
3007}
3008
3009/**
3010 * Write handler for Interrupt Cause Set register.
3011 *
3012 * Bits corresponding to 1s in 'value' will be set in ICR register.
3013 *
3014 * @param pThis The device state structure.
3015 * @param offset Register offset in memory-mapped frame.
3016 * @param index Register index in register array.
3017 * @param value The value to store.
3018 * @param mask Used to implement partial writes (8 and 16-bit).
3019 * @thread EMT
3020 */
3021static int e1kRegWriteICS(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3022{
3023 RT_NOREF_PV(offset); RT_NOREF_PV(index);
3024 E1K_INC_ISTAT_CNT(pThis->uStatIntICS);
3025 return e1kRaiseInterrupt(pThis, VINF_IOM_R3_MMIO_WRITE, value & g_aE1kRegMap[ICS_IDX].writable);
3026}
3027
3028/**
3029 * Write handler for Interrupt Mask Set register.
3030 *
3031 * Will trigger pending interrupts.
3032 *
3033 * @param pThis The device state structure.
3034 * @param offset Register offset in memory-mapped frame.
3035 * @param index Register index in register array.
3036 * @param value The value to store.
3037 * @param mask Used to implement partial writes (8 and 16-bit).
3038 * @thread EMT
3039 */
3040static int e1kRegWriteIMS(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3041{
3042 RT_NOREF_PV(offset); RT_NOREF_PV(index);
3043
3044 IMS |= value;
3045 E1kLogRel(("E1000: irq enabled, RDH=%x RDT=%x TDH=%x TDT=%x\n", RDH, RDT, TDH, TDT));
3046 E1kLog(("%s e1kRegWriteIMS: IRQ enabled\n", pThis->szPrf));
3047 /*
3048 * We cannot raise an interrupt here as it will occasionally cause an interrupt storm
3049 * in Windows guests (see @bugref{8624}, @bugref{5023}).
3050 */
3051 if ((ICR & IMS) && !pThis->fLocked)
3052 {
3053 E1K_INC_ISTAT_CNT(pThis->uStatIntIMS);
3054 e1kPostponeInterrupt(pThis, E1K_IMS_INT_DELAY_NS);
3055 }
3056
3057 return VINF_SUCCESS;
3058}
3059
3060/**
3061 * Write handler for Interrupt Mask Clear register.
3062 *
3063 * Bits corresponding to 1s in 'value' will be cleared in IMS register.
3064 *
3065 * @param pThis The device state structure.
3066 * @param offset Register offset in memory-mapped frame.
3067 * @param index Register index in register array.
3068 * @param value The value to store.
3069 * @param mask Used to implement partial writes (8 and 16-bit).
3070 * @thread EMT
3071 */
3072static int e1kRegWriteIMC(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3073{
3074 RT_NOREF_PV(offset); RT_NOREF_PV(index);
3075
3076 int rc = e1kCsEnter(pThis, VINF_IOM_R3_MMIO_WRITE);
3077 if (RT_UNLIKELY(rc != VINF_SUCCESS))
3078 return rc;
3079 if (pThis->fIntRaised)
3080 {
3081 /*
3082 * Technically we should reset fIntRaised in ICR read handler, but it will cause
3083 * Windows to freeze since it may receive an interrupt while still in the very beginning
3084 * of interrupt handler.
3085 */
3086 E1K_INC_ISTAT_CNT(pThis->uStatIntLower);
3087 STAM_COUNTER_INC(&pThis->StatIntsPrevented);
3088 E1kLogRel(("E1000: irq lowered (IMC), icr=0x%x\n", ICR));
3089 /* Lower(0) INTA(0) */
3090 PDMDevHlpPCISetIrq(pThis->CTX_SUFF(pDevIns), 0, 0);
3091 pThis->fIntRaised = false;
3092 E1kLog(("%s e1kRegWriteIMC: Lowered IRQ: ICR=%08x\n", pThis->szPrf, ICR));
3093 }
3094 IMS &= ~value;
3095 E1kLog(("%s e1kRegWriteIMC: IRQ disabled\n", pThis->szPrf));
3096 e1kCsLeave(pThis);
3097
3098 return VINF_SUCCESS;
3099}
3100
3101/**
3102 * Write handler for Receive Control register.
3103 *
3104 * @param pThis The device state structure.
3105 * @param offset Register offset in memory-mapped frame.
3106 * @param index Register index in register array.
3107 * @param value The value to store.
3108 * @param mask Used to implement partial writes (8 and 16-bit).
3109 * @thread EMT
3110 */
3111static int e1kRegWriteRCTL(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3112{
3113 /* Update promiscuous mode */
3114 bool fBecomePromiscous = !!(value & (RCTL_UPE | RCTL_MPE));
3115 if (fBecomePromiscous != !!( RCTL & (RCTL_UPE | RCTL_MPE)))
3116 {
3117 /* Promiscuity has changed, pass the knowledge on. */
3118#ifndef IN_RING3
3119 return VINF_IOM_R3_MMIO_WRITE;
3120#else
3121 if (pThis->pDrvR3)
3122 pThis->pDrvR3->pfnSetPromiscuousMode(pThis->pDrvR3, fBecomePromiscous);
3123#endif
3124 }
3125
3126 /* Adjust receive buffer size */
3127 unsigned cbRxBuf = 2048 >> GET_BITS_V(value, RCTL, BSIZE);
3128 if (value & RCTL_BSEX)
3129 cbRxBuf *= 16;
3130 if (cbRxBuf != pThis->u16RxBSize)
3131 E1kLog2(("%s e1kRegWriteRCTL: Setting receive buffer size to %d (old %d)\n",
3132 pThis->szPrf, cbRxBuf, pThis->u16RxBSize));
3133 pThis->u16RxBSize = cbRxBuf;
3134
3135 /* Update the register */
3136 e1kRegWriteDefault(pThis, offset, index, value);
3137
3138 return VINF_SUCCESS;
3139}
3140
3141/**
3142 * Write handler for Packet Buffer Allocation register.
3143 *
3144 * TXA = 64 - RXA.
3145 *
3146 * @param pThis The device state structure.
3147 * @param offset Register offset in memory-mapped frame.
3148 * @param index Register index in register array.
3149 * @param value The value to store.
3150 * @param mask Used to implement partial writes (8 and 16-bit).
3151 * @thread EMT
3152 */
3153static int e1kRegWritePBA(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3154{
3155 e1kRegWriteDefault(pThis, offset, index, value);
3156 PBA_st->txa = 64 - PBA_st->rxa;
3157
3158 return VINF_SUCCESS;
3159}
3160
3161/**
3162 * Write handler for Receive Descriptor Tail register.
3163 *
3164 * @remarks Write into RDT forces switch to HC and signal to
3165 * e1kR3NetworkDown_WaitReceiveAvail().
3166 *
3167 * @returns VBox status code.
3168 *
3169 * @param pThis The device state structure.
3170 * @param offset Register offset in memory-mapped frame.
3171 * @param index Register index in register array.
3172 * @param value The value to store.
3173 * @param mask Used to implement partial writes (8 and 16-bit).
3174 * @thread EMT
3175 */
3176static int e1kRegWriteRDT(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3177{
3178#ifndef IN_RING3
3179 /* XXX */
3180// return VINF_IOM_R3_MMIO_WRITE;
3181#endif
3182 int rc = e1kCsRxEnter(pThis, VINF_IOM_R3_MMIO_WRITE);
3183 if (RT_LIKELY(rc == VINF_SUCCESS))
3184 {
3185 E1kLog(("%s e1kRegWriteRDT\n", pThis->szPrf));
3186 /*
3187 * Some drivers advance RDT too far, so that it equals RDH. This
3188 * somehow manages to work with real hardware but not with this
3189 * emulated device. We can work with these drivers if we just
3190 * write 1 less when we see a driver writing RDT equal to RDH,
3191 * see @bugref{7346}.
3192 */
3193 if (value == RDH)
3194 {
3195 if (RDH == 0)
3196 value = (RDLEN / sizeof(E1KRXDESC)) - 1;
3197 else
3198 value = RDH - 1;
3199 }
3200 rc = e1kRegWriteDefault(pThis, offset, index, value);
3201#ifdef E1K_WITH_RXD_CACHE
3202 /*
3203 * We need to fetch descriptors now as RDT may go whole circle
3204 * before we attempt to store a received packet. For example,
3205 * Intel's DOS drivers use 2 (!) RX descriptors with the total ring
3206 * size being only 8 descriptors! Note that we fetch descriptors
3207 * only when the cache is empty to reduce the number of memory reads
3208 * in case of frequent RDT writes. Don't fetch anything when the
3209 * receiver is disabled either as RDH, RDT, RDLEN can be in some
3210 * messed up state.
3211 * Note that despite the cache may seem empty, meaning that there are
3212 * no more available descriptors in it, it may still be used by RX
3213 * thread which has not yet written the last descriptor back but has
3214 * temporarily released the RX lock in order to write the packet body
3215 * to descriptor's buffer. At this point we still going to do prefetch
3216 * but it won't actually fetch anything if there are no unused slots in
3217 * our "empty" cache (nRxDFetched==E1K_RXD_CACHE_SIZE). We must not
3218 * reset the cache here even if it appears empty. It will be reset at
3219 * a later point in e1kRxDGet().
3220 */
3221 if (e1kRxDIsCacheEmpty(pThis) && (RCTL & RCTL_EN))
3222 e1kRxDPrefetch(pThis);
3223#endif /* E1K_WITH_RXD_CACHE */
3224 e1kCsRxLeave(pThis);
3225 if (RT_SUCCESS(rc))
3226 {
3227/** @todo bird: Use SUPSem* for this so we can signal it in ring-0 as well
3228 * without requiring any context switches. We should also check the
3229 * wait condition before bothering to queue the item as we're currently
3230 * queuing thousands of items per second here in a normal transmit
3231 * scenario. Expect performance changes when fixing this! */
3232#ifdef IN_RING3
3233 /* Signal that we have more receive descriptors available. */
3234 e1kWakeupReceive(pThis->CTX_SUFF(pDevIns));
3235#else
3236 PPDMQUEUEITEMCORE pItem = PDMQueueAlloc(pThis->CTX_SUFF(pCanRxQueue));
3237 if (pItem)
3238 PDMQueueInsert(pThis->CTX_SUFF(pCanRxQueue), pItem);
3239#endif
3240 }
3241 }
3242 return rc;
3243}
3244
3245/**
3246 * Write handler for Receive Delay Timer register.
3247 *
3248 * @param pThis The device state structure.
3249 * @param offset Register offset in memory-mapped frame.
3250 * @param index Register index in register array.
3251 * @param value The value to store.
3252 * @param mask Used to implement partial writes (8 and 16-bit).
3253 * @thread EMT
3254 */
3255static int e1kRegWriteRDTR(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3256{
3257 e1kRegWriteDefault(pThis, offset, index, value);
3258 if (value & RDTR_FPD)
3259 {
3260 /* Flush requested, cancel both timers and raise interrupt */
3261#ifdef E1K_USE_RX_TIMERS
3262 e1kCancelTimer(pThis, pThis->CTX_SUFF(pRIDTimer));
3263 e1kCancelTimer(pThis, pThis->CTX_SUFF(pRADTimer));
3264#endif
3265 E1K_INC_ISTAT_CNT(pThis->uStatIntRDTR);
3266 return e1kRaiseInterrupt(pThis, VINF_IOM_R3_MMIO_WRITE, ICR_RXT0);
3267 }
3268
3269 return VINF_SUCCESS;
3270}
3271
3272DECLINLINE(uint32_t) e1kGetTxLen(PE1KSTATE pThis)
3273{
3274 /**
3275 * Make sure TDT won't change during computation. EMT may modify TDT at
3276 * any moment.
3277 */
3278 uint32_t tdt = TDT;
3279 return (TDH>tdt ? TDLEN/sizeof(E1KTXDESC) : 0) + tdt - TDH;
3280}
3281
3282#ifdef IN_RING3
3283
3284# ifdef E1K_TX_DELAY
3285/**
3286 * Transmit Delay Timer handler.
3287 *
3288 * @remarks We only get here when the timer expires.
3289 *
3290 * @param pDevIns Pointer to device instance structure.
3291 * @param pTimer Pointer to the timer.
3292 * @param pvUser NULL.
3293 * @thread EMT
3294 */
3295static DECLCALLBACK(void) e1kTxDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3296{
3297 PE1KSTATE pThis = (PE1KSTATE )pvUser;
3298 Assert(PDMCritSectIsOwner(&pThis->csTx));
3299
3300 E1K_INC_ISTAT_CNT(pThis->uStatTxDelayExp);
3301# ifdef E1K_INT_STATS
3302 uint64_t u64Elapsed = RTTimeNanoTS() - pThis->u64ArmedAt;
3303 if (u64Elapsed > pThis->uStatMaxTxDelay)
3304 pThis->uStatMaxTxDelay = u64Elapsed;
3305# endif
3306 int rc = e1kXmitPending(pThis, false /*fOnWorkerThread*/);
3307 AssertMsg(RT_SUCCESS(rc) || rc == VERR_TRY_AGAIN, ("%Rrc\n", rc));
3308}
3309# endif /* E1K_TX_DELAY */
3310
3311//# ifdef E1K_USE_TX_TIMERS
3312
3313/**
3314 * Transmit Interrupt Delay Timer handler.
3315 *
3316 * @remarks We only get here when the timer expires.
3317 *
3318 * @param pDevIns Pointer to device instance structure.
3319 * @param pTimer Pointer to the timer.
3320 * @param pvUser NULL.
3321 * @thread EMT
3322 */
3323static DECLCALLBACK(void) e1kTxIntDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3324{
3325 RT_NOREF(pDevIns);
3326 RT_NOREF(pTimer);
3327 PE1KSTATE pThis = (PE1KSTATE )pvUser;
3328
3329 E1K_INC_ISTAT_CNT(pThis->uStatTID);
3330 /* Cancel absolute delay timer as we have already got attention */
3331# ifndef E1K_NO_TAD
3332 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTADTimer));
3333# endif
3334 e1kRaiseInterrupt(pThis, ICR_TXDW);
3335}
3336
3337/**
3338 * Transmit Absolute Delay Timer handler.
3339 *
3340 * @remarks We only get here when the timer expires.
3341 *
3342 * @param pDevIns Pointer to device instance structure.
3343 * @param pTimer Pointer to the timer.
3344 * @param pvUser NULL.
3345 * @thread EMT
3346 */
3347static DECLCALLBACK(void) e1kTxAbsDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3348{
3349 RT_NOREF(pDevIns);
3350 RT_NOREF(pTimer);
3351 PE1KSTATE pThis = (PE1KSTATE )pvUser;
3352
3353 E1K_INC_ISTAT_CNT(pThis->uStatTAD);
3354 /* Cancel interrupt delay timer as we have already got attention */
3355 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTIDTimer));
3356 e1kRaiseInterrupt(pThis, ICR_TXDW);
3357}
3358
3359//# endif /* E1K_USE_TX_TIMERS */
3360# ifdef E1K_USE_RX_TIMERS
3361
3362/**
3363 * Receive Interrupt Delay Timer handler.
3364 *
3365 * @remarks We only get here when the timer expires.
3366 *
3367 * @param pDevIns Pointer to device instance structure.
3368 * @param pTimer Pointer to the timer.
3369 * @param pvUser NULL.
3370 * @thread EMT
3371 */
3372static DECLCALLBACK(void) e1kRxIntDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3373{
3374 PE1KSTATE pThis = (PE1KSTATE )pvUser;
3375
3376 E1K_INC_ISTAT_CNT(pThis->uStatRID);
3377 /* Cancel absolute delay timer as we have already got attention */
3378 e1kCancelTimer(pThis, pThis->CTX_SUFF(pRADTimer));
3379 e1kRaiseInterrupt(pThis, ICR_RXT0);
3380}
3381
3382/**
3383 * Receive Absolute Delay Timer handler.
3384 *
3385 * @remarks We only get here when the timer expires.
3386 *
3387 * @param pDevIns Pointer to device instance structure.
3388 * @param pTimer Pointer to the timer.
3389 * @param pvUser NULL.
3390 * @thread EMT
3391 */
3392static DECLCALLBACK(void) e1kRxAbsDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3393{
3394 PE1KSTATE pThis = (PE1KSTATE )pvUser;
3395
3396 E1K_INC_ISTAT_CNT(pThis->uStatRAD);
3397 /* Cancel interrupt delay timer as we have already got attention */
3398 e1kCancelTimer(pThis, pThis->CTX_SUFF(pRIDTimer));
3399 e1kRaiseInterrupt(pThis, ICR_RXT0);
3400}
3401
3402# endif /* E1K_USE_RX_TIMERS */
3403
3404/**
3405 * Late Interrupt Timer handler.
3406 *
3407 * @param pDevIns Pointer to device instance structure.
3408 * @param pTimer Pointer to the timer.
3409 * @param pvUser NULL.
3410 * @thread EMT
3411 */
3412static DECLCALLBACK(void) e1kLateIntTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3413{
3414 RT_NOREF(pDevIns, pTimer);
3415 PE1KSTATE pThis = (PE1KSTATE )pvUser;
3416
3417 STAM_PROFILE_ADV_START(&pThis->StatLateIntTimer, a);
3418 STAM_COUNTER_INC(&pThis->StatLateInts);
3419 E1K_INC_ISTAT_CNT(pThis->uStatIntLate);
3420# if 0
3421 if (pThis->iStatIntLost > -100)
3422 pThis->iStatIntLost--;
3423# endif
3424 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, 0);
3425 STAM_PROFILE_ADV_STOP(&pThis->StatLateIntTimer, a);
3426}
3427
3428/**
3429 * Link Up Timer handler.
3430 *
3431 * @param pDevIns Pointer to device instance structure.
3432 * @param pTimer Pointer to the timer.
3433 * @param pvUser NULL.
3434 * @thread EMT
3435 */
3436static DECLCALLBACK(void) e1kLinkUpTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3437{
3438 RT_NOREF(pDevIns, pTimer);
3439 PE1KSTATE pThis = (PE1KSTATE )pvUser;
3440
3441 /*
3442 * This can happen if we set the link status to down when the Link up timer was
3443 * already armed (shortly after e1kLoadDone() or when the cable was disconnected
3444 * and connect+disconnect the cable very quick. Moreover, 82543GC triggers LSC
3445 * on reset even if the cable is unplugged (see @bugref{8942}).
3446 */
3447 if (pThis->fCableConnected)
3448 {
3449 /* 82543GC does not have an internal PHY */
3450 if (pThis->eChip == E1K_CHIP_82543GC || (CTRL & CTRL_SLU))
3451 e1kR3LinkUp(pThis);
3452 }
3453#ifdef E1K_LSC_ON_RESET
3454 else if (pThis->eChip == E1K_CHIP_82543GC)
3455 e1kR3LinkDown(pThis);
3456#endif /* E1K_LSC_ON_RESET */
3457}
3458
3459#endif /* IN_RING3 */
3460
3461/**
3462 * Sets up the GSO context according to the TSE new context descriptor.
3463 *
3464 * @param pGso The GSO context to setup.
3465 * @param pCtx The context descriptor.
3466 */
3467DECLINLINE(void) e1kSetupGsoCtx(PPDMNETWORKGSO pGso, E1KTXCTX const *pCtx)
3468{
3469 pGso->u8Type = PDMNETWORKGSOTYPE_INVALID;
3470
3471 /*
3472 * See if the context descriptor describes something that could be TCP or
3473 * UDP over IPv[46].
3474 */
3475 /* Check the header ordering and spacing: 1. Ethernet, 2. IP, 3. TCP/UDP. */
3476 if (RT_UNLIKELY( pCtx->ip.u8CSS < sizeof(RTNETETHERHDR) ))
3477 {
3478 E1kLog(("e1kSetupGsoCtx: IPCSS=%#x\n", pCtx->ip.u8CSS));
3479 return;
3480 }
3481 if (RT_UNLIKELY( pCtx->tu.u8CSS < (size_t)pCtx->ip.u8CSS + (pCtx->dw2.fIP ? RTNETIPV4_MIN_LEN : RTNETIPV6_MIN_LEN) ))
3482 {
3483 E1kLog(("e1kSetupGsoCtx: TUCSS=%#x\n", pCtx->tu.u8CSS));
3484 return;
3485 }
3486 if (RT_UNLIKELY( pCtx->dw2.fTCP
3487 ? pCtx->dw3.u8HDRLEN < (size_t)pCtx->tu.u8CSS + RTNETTCP_MIN_LEN
3488 : pCtx->dw3.u8HDRLEN != (size_t)pCtx->tu.u8CSS + RTNETUDP_MIN_LEN ))
3489 {
3490 E1kLog(("e1kSetupGsoCtx: HDRLEN=%#x TCP=%d\n", pCtx->dw3.u8HDRLEN, pCtx->dw2.fTCP));
3491 return;
3492 }
3493
3494 /* The end of the TCP/UDP checksum should stop at the end of the packet or at least after the headers. */
3495 if (RT_UNLIKELY( pCtx->tu.u16CSE > 0 && pCtx->tu.u16CSE <= pCtx->dw3.u8HDRLEN ))
3496 {
3497 E1kLog(("e1kSetupGsoCtx: TUCSE=%#x HDRLEN=%#x\n", pCtx->tu.u16CSE, pCtx->dw3.u8HDRLEN));
3498 return;
3499 }
3500
3501 /* IPv4 checksum offset. */
3502 if (RT_UNLIKELY( pCtx->dw2.fIP && (size_t)pCtx->ip.u8CSO - pCtx->ip.u8CSS != RT_UOFFSETOF(RTNETIPV4, ip_sum) ))
3503 {
3504 E1kLog(("e1kSetupGsoCtx: IPCSO=%#x IPCSS=%#x\n", pCtx->ip.u8CSO, pCtx->ip.u8CSS));
3505 return;
3506 }
3507
3508 /* TCP/UDP checksum offsets. */
3509 if (RT_UNLIKELY( (size_t)pCtx->tu.u8CSO - pCtx->tu.u8CSS
3510 != ( pCtx->dw2.fTCP
3511 ? RT_UOFFSETOF(RTNETTCP, th_sum)
3512 : RT_UOFFSETOF(RTNETUDP, uh_sum) ) ))
3513 {
3514 E1kLog(("e1kSetupGsoCtx: TUCSO=%#x TUCSS=%#x TCP=%d\n", pCtx->ip.u8CSO, pCtx->ip.u8CSS, pCtx->dw2.fTCP));
3515 return;
3516 }
3517
3518 /*
3519 * Because of internal networking using a 16-bit size field for GSO context
3520 * plus frame, we have to make sure we don't exceed this.
3521 */
3522 if (RT_UNLIKELY( pCtx->dw3.u8HDRLEN + pCtx->dw2.u20PAYLEN > VBOX_MAX_GSO_SIZE ))
3523 {
3524 E1kLog(("e1kSetupGsoCtx: HDRLEN(=%#x) + PAYLEN(=%#x) = %#x, max is %#x\n",
3525 pCtx->dw3.u8HDRLEN, pCtx->dw2.u20PAYLEN, pCtx->dw3.u8HDRLEN + pCtx->dw2.u20PAYLEN, VBOX_MAX_GSO_SIZE));
3526 return;
3527 }
3528
3529 /*
3530 * We're good for now - we'll do more checks when seeing the data.
3531 * So, figure the type of offloading and setup the context.
3532 */
3533 if (pCtx->dw2.fIP)
3534 {
3535 if (pCtx->dw2.fTCP)
3536 {
3537 pGso->u8Type = PDMNETWORKGSOTYPE_IPV4_TCP;
3538 pGso->cbHdrsSeg = pCtx->dw3.u8HDRLEN;
3539 }
3540 else
3541 {
3542 pGso->u8Type = PDMNETWORKGSOTYPE_IPV4_UDP;
3543 pGso->cbHdrsSeg = pCtx->tu.u8CSS; /* IP header only */
3544 }
3545 /** @todo Detect IPv4-IPv6 tunneling (need test setup since linux doesn't do
3546 * this yet it seems)... */
3547 }
3548 else
3549 {
3550 pGso->cbHdrsSeg = pCtx->dw3.u8HDRLEN; /** @todo IPv6 UFO */
3551 if (pCtx->dw2.fTCP)
3552 pGso->u8Type = PDMNETWORKGSOTYPE_IPV6_TCP;
3553 else
3554 pGso->u8Type = PDMNETWORKGSOTYPE_IPV6_UDP;
3555 }
3556 pGso->offHdr1 = pCtx->ip.u8CSS;
3557 pGso->offHdr2 = pCtx->tu.u8CSS;
3558 pGso->cbHdrsTotal = pCtx->dw3.u8HDRLEN;
3559 pGso->cbMaxSeg = pCtx->dw3.u16MSS;
3560 Assert(PDMNetGsoIsValid(pGso, sizeof(*pGso), pGso->cbMaxSeg * 5));
3561 E1kLog2(("e1kSetupGsoCtx: mss=%#x hdr=%#x hdrseg=%#x hdr1=%#x hdr2=%#x %s\n",
3562 pGso->cbMaxSeg, pGso->cbHdrsTotal, pGso->cbHdrsSeg, pGso->offHdr1, pGso->offHdr2, PDMNetGsoTypeName((PDMNETWORKGSOTYPE)pGso->u8Type) ));
3563}
3564
3565/**
3566 * Checks if we can use GSO processing for the current TSE frame.
3567 *
3568 * @param pThis The device state structure.
3569 * @param pGso The GSO context.
3570 * @param pData The first data descriptor of the frame.
3571 * @param pCtx The TSO context descriptor.
3572 */
3573DECLINLINE(bool) e1kCanDoGso(PE1KSTATE pThis, PCPDMNETWORKGSO pGso, E1KTXDAT const *pData, E1KTXCTX const *pCtx)
3574{
3575 if (!pData->cmd.fTSE)
3576 {
3577 E1kLog2(("e1kCanDoGso: !TSE\n"));
3578 return false;
3579 }
3580 if (pData->cmd.fVLE) /** @todo VLAN tagging. */
3581 {
3582 E1kLog(("e1kCanDoGso: VLE\n"));
3583 return false;
3584 }
3585 if (RT_UNLIKELY(!pThis->fGSOEnabled))
3586 {
3587 E1kLog3(("e1kCanDoGso: GSO disabled via CFGM\n"));
3588 return false;
3589 }
3590
3591 switch ((PDMNETWORKGSOTYPE)pGso->u8Type)
3592 {
3593 case PDMNETWORKGSOTYPE_IPV4_TCP:
3594 case PDMNETWORKGSOTYPE_IPV4_UDP:
3595 if (!pData->dw3.fIXSM)
3596 {
3597 E1kLog(("e1kCanDoGso: !IXSM (IPv4)\n"));
3598 return false;
3599 }
3600 if (!pData->dw3.fTXSM)
3601 {
3602 E1kLog(("e1kCanDoGso: !TXSM (IPv4)\n"));
3603 return false;
3604 }
3605 /** @todo what more check should we perform here? Ethernet frame type? */
3606 E1kLog2(("e1kCanDoGso: OK, IPv4\n"));
3607 return true;
3608
3609 case PDMNETWORKGSOTYPE_IPV6_TCP:
3610 case PDMNETWORKGSOTYPE_IPV6_UDP:
3611 if (pData->dw3.fIXSM && pCtx->ip.u8CSO)
3612 {
3613 E1kLog(("e1kCanDoGso: IXSM (IPv6)\n"));
3614 return false;
3615 }
3616 if (!pData->dw3.fTXSM)
3617 {
3618 E1kLog(("e1kCanDoGso: TXSM (IPv6)\n"));
3619 return false;
3620 }
3621 /** @todo what more check should we perform here? Ethernet frame type? */
3622 E1kLog2(("e1kCanDoGso: OK, IPv4\n"));
3623 return true;
3624
3625 default:
3626 Assert(pGso->u8Type == PDMNETWORKGSOTYPE_INVALID);
3627 E1kLog2(("e1kCanDoGso: e1kSetupGsoCtx failed\n"));
3628 return false;
3629 }
3630}
3631
3632/**
3633 * Frees the current xmit buffer.
3634 *
3635 * @param pThis The device state structure.
3636 */
3637static void e1kXmitFreeBuf(PE1KSTATE pThis)
3638{
3639 PPDMSCATTERGATHER pSg = pThis->CTX_SUFF(pTxSg);
3640 if (pSg)
3641 {
3642 pThis->CTX_SUFF(pTxSg) = NULL;
3643
3644 if (pSg->pvAllocator != pThis)
3645 {
3646 PPDMINETWORKUP pDrv = pThis->CTX_SUFF(pDrv);
3647 if (pDrv)
3648 pDrv->pfnFreeBuf(pDrv, pSg);
3649 }
3650 else
3651 {
3652 /* loopback */
3653 AssertCompileMemberSize(E1KSTATE, uTxFallback.Sg, 8 * sizeof(size_t));
3654 Assert(pSg->fFlags == (PDMSCATTERGATHER_FLAGS_MAGIC | PDMSCATTERGATHER_FLAGS_OWNER_3));
3655 pSg->fFlags = 0;
3656 pSg->pvAllocator = NULL;
3657 }
3658 }
3659}
3660
3661#ifndef E1K_WITH_TXD_CACHE
3662/**
3663 * Allocates an xmit buffer.
3664 *
3665 * @returns See PDMINETWORKUP::pfnAllocBuf.
3666 * @param pThis The device state structure.
3667 * @param cbMin The minimum frame size.
3668 * @param fExactSize Whether cbMin is exact or if we have to max it
3669 * out to the max MTU size.
3670 * @param fGso Whether this is a GSO frame or not.
3671 */
3672DECLINLINE(int) e1kXmitAllocBuf(PE1KSTATE pThis, size_t cbMin, bool fExactSize, bool fGso)
3673{
3674 /* Adjust cbMin if necessary. */
3675 if (!fExactSize)
3676 cbMin = RT_MAX(cbMin, E1K_MAX_TX_PKT_SIZE);
3677
3678 /* Deal with existing buffer (descriptor screw up, reset, etc). */
3679 if (RT_UNLIKELY(pThis->CTX_SUFF(pTxSg)))
3680 e1kXmitFreeBuf(pThis);
3681 Assert(pThis->CTX_SUFF(pTxSg) == NULL);
3682
3683 /*
3684 * Allocate the buffer.
3685 */
3686 PPDMSCATTERGATHER pSg;
3687 if (RT_LIKELY(GET_BITS(RCTL, LBM) != RCTL_LBM_TCVR))
3688 {
3689 PPDMINETWORKUP pDrv = pThis->CTX_SUFF(pDrv);
3690 if (RT_UNLIKELY(!pDrv))
3691 return VERR_NET_DOWN;
3692 int rc = pDrv->pfnAllocBuf(pDrv, cbMin, fGso ? &pThis->GsoCtx : NULL, &pSg);
3693 if (RT_FAILURE(rc))
3694 {
3695 /* Suspend TX as we are out of buffers atm */
3696 STATUS |= STATUS_TXOFF;
3697 return rc;
3698 }
3699 }
3700 else
3701 {
3702 /* Create a loopback using the fallback buffer and preallocated SG. */
3703 AssertCompileMemberSize(E1KSTATE, uTxFallback.Sg, 8 * sizeof(size_t));
3704 pSg = &pThis->uTxFallback.Sg;
3705 pSg->fFlags = PDMSCATTERGATHER_FLAGS_MAGIC | PDMSCATTERGATHER_FLAGS_OWNER_3;
3706 pSg->cbUsed = 0;
3707 pSg->cbAvailable = 0;
3708 pSg->pvAllocator = pThis;
3709 pSg->pvUser = NULL; /* No GSO here. */
3710 pSg->cSegs = 1;
3711 pSg->aSegs[0].pvSeg = pThis->aTxPacketFallback;
3712 pSg->aSegs[0].cbSeg = sizeof(pThis->aTxPacketFallback);
3713 }
3714
3715 pThis->CTX_SUFF(pTxSg) = pSg;
3716 return VINF_SUCCESS;
3717}
3718#else /* E1K_WITH_TXD_CACHE */
3719/**
3720 * Allocates an xmit buffer.
3721 *
3722 * @returns See PDMINETWORKUP::pfnAllocBuf.
3723 * @param pThis The device state structure.
3724 * @param cbMin The minimum frame size.
3725 * @param fExactSize Whether cbMin is exact or if we have to max it
3726 * out to the max MTU size.
3727 * @param fGso Whether this is a GSO frame or not.
3728 */
3729DECLINLINE(int) e1kXmitAllocBuf(PE1KSTATE pThis, bool fGso)
3730{
3731 /* Deal with existing buffer (descriptor screw up, reset, etc). */
3732 if (RT_UNLIKELY(pThis->CTX_SUFF(pTxSg)))
3733 e1kXmitFreeBuf(pThis);
3734 Assert(pThis->CTX_SUFF(pTxSg) == NULL);
3735
3736 /*
3737 * Allocate the buffer.
3738 */
3739 PPDMSCATTERGATHER pSg;
3740 if (RT_LIKELY(GET_BITS(RCTL, LBM) != RCTL_LBM_TCVR))
3741 {
3742 if (pThis->cbTxAlloc == 0)
3743 {
3744 /* Zero packet, no need for the buffer */
3745 return VINF_SUCCESS;
3746 }
3747
3748 PPDMINETWORKUP pDrv = pThis->CTX_SUFF(pDrv);
3749 if (RT_UNLIKELY(!pDrv))
3750 return VERR_NET_DOWN;
3751 int rc = pDrv->pfnAllocBuf(pDrv, pThis->cbTxAlloc, fGso ? &pThis->GsoCtx : NULL, &pSg);
3752 if (RT_FAILURE(rc))
3753 {
3754 /* Suspend TX as we are out of buffers atm */
3755 STATUS |= STATUS_TXOFF;
3756 return rc;
3757 }
3758 E1kLog3(("%s Allocated buffer for TX packet: cb=%u %s%s\n",
3759 pThis->szPrf, pThis->cbTxAlloc,
3760 pThis->fVTag ? "VLAN " : "",
3761 pThis->fGSO ? "GSO " : ""));
3762 }
3763 else
3764 {
3765 /* Create a loopback using the fallback buffer and preallocated SG. */
3766 AssertCompileMemberSize(E1KSTATE, uTxFallback.Sg, 8 * sizeof(size_t));
3767 pSg = &pThis->uTxFallback.Sg;
3768 pSg->fFlags = PDMSCATTERGATHER_FLAGS_MAGIC | PDMSCATTERGATHER_FLAGS_OWNER_3;
3769 pSg->cbUsed = 0;
3770 pSg->cbAvailable = 0;
3771 pSg->pvAllocator = pThis;
3772 pSg->pvUser = NULL; /* No GSO here. */
3773 pSg->cSegs = 1;
3774 pSg->aSegs[0].pvSeg = pThis->aTxPacketFallback;
3775 pSg->aSegs[0].cbSeg = sizeof(pThis->aTxPacketFallback);
3776 }
3777 pThis->cbTxAlloc = 0;
3778
3779 pThis->CTX_SUFF(pTxSg) = pSg;
3780 return VINF_SUCCESS;
3781}
3782#endif /* E1K_WITH_TXD_CACHE */
3783
3784/**
3785 * Checks if it's a GSO buffer or not.
3786 *
3787 * @returns true / false.
3788 * @param pTxSg The scatter / gather buffer.
3789 */
3790DECLINLINE(bool) e1kXmitIsGsoBuf(PDMSCATTERGATHER const *pTxSg)
3791{
3792#if 0
3793 if (!pTxSg)
3794 E1kLog(("e1kXmitIsGsoBuf: pTxSG is NULL\n"));
3795 if (pTxSg && pTxSg->pvUser)
3796 E1kLog(("e1kXmitIsGsoBuf: pvUser is NULL\n"));
3797#endif
3798 return pTxSg && pTxSg->pvUser /* GSO indicator */;
3799}
3800
3801#ifndef E1K_WITH_TXD_CACHE
3802/**
3803 * Load transmit descriptor from guest memory.
3804 *
3805 * @param pThis The device state structure.
3806 * @param pDesc Pointer to descriptor union.
3807 * @param addr Physical address in guest context.
3808 * @thread E1000_TX
3809 */
3810DECLINLINE(void) e1kLoadDesc(PE1KSTATE pThis, E1KTXDESC *pDesc, RTGCPHYS addr)
3811{
3812 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns), addr, pDesc, sizeof(E1KTXDESC));
3813}
3814#else /* E1K_WITH_TXD_CACHE */
3815/**
3816 * Load transmit descriptors from guest memory.
3817 *
3818 * We need two physical reads in case the tail wrapped around the end of TX
3819 * descriptor ring.
3820 *
3821 * @returns the actual number of descriptors fetched.
3822 * @param pThis The device state structure.
3823 * @param pDesc Pointer to descriptor union.
3824 * @param addr Physical address in guest context.
3825 * @thread E1000_TX
3826 */
3827DECLINLINE(unsigned) e1kTxDLoadMore(PE1KSTATE pThis)
3828{
3829 Assert(pThis->iTxDCurrent == 0);
3830 /* We've already loaded pThis->nTxDFetched descriptors past TDH. */
3831 unsigned nDescsAvailable = e1kGetTxLen(pThis) - pThis->nTxDFetched;
3832 unsigned nDescsToFetch = RT_MIN(nDescsAvailable, E1K_TXD_CACHE_SIZE - pThis->nTxDFetched);
3833 unsigned nDescsTotal = TDLEN / sizeof(E1KTXDESC);
3834 unsigned nFirstNotLoaded = (TDH + pThis->nTxDFetched) % nDescsTotal;
3835 unsigned nDescsInSingleRead = RT_MIN(nDescsToFetch, nDescsTotal - nFirstNotLoaded);
3836 E1kLog3(("%s e1kTxDLoadMore: nDescsAvailable=%u nDescsToFetch=%u "
3837 "nDescsTotal=%u nFirstNotLoaded=0x%x nDescsInSingleRead=%u\n",
3838 pThis->szPrf, nDescsAvailable, nDescsToFetch, nDescsTotal,
3839 nFirstNotLoaded, nDescsInSingleRead));
3840 if (nDescsToFetch == 0)
3841 return 0;
3842 E1KTXDESC* pFirstEmptyDesc = &pThis->aTxDescriptors[pThis->nTxDFetched];
3843 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns),
3844 ((uint64_t)TDBAH << 32) + TDBAL + nFirstNotLoaded * sizeof(E1KTXDESC),
3845 pFirstEmptyDesc, nDescsInSingleRead * sizeof(E1KTXDESC));
3846 E1kLog3(("%s Fetched %u TX descriptors at %08x%08x(0x%x), TDLEN=%08x, TDH=%08x, TDT=%08x\n",
3847 pThis->szPrf, nDescsInSingleRead,
3848 TDBAH, TDBAL + TDH * sizeof(E1KTXDESC),
3849 nFirstNotLoaded, TDLEN, TDH, TDT));
3850 if (nDescsToFetch > nDescsInSingleRead)
3851 {
3852 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns),
3853 ((uint64_t)TDBAH << 32) + TDBAL,
3854 pFirstEmptyDesc + nDescsInSingleRead,
3855 (nDescsToFetch - nDescsInSingleRead) * sizeof(E1KTXDESC));
3856 E1kLog3(("%s Fetched %u TX descriptors at %08x%08x\n",
3857 pThis->szPrf, nDescsToFetch - nDescsInSingleRead,
3858 TDBAH, TDBAL));
3859 }
3860 pThis->nTxDFetched += nDescsToFetch;
3861 return nDescsToFetch;
3862}
3863
3864/**
3865 * Load transmit descriptors from guest memory only if there are no loaded
3866 * descriptors.
3867 *
3868 * @returns true if there are descriptors in cache.
3869 * @param pThis The device state structure.
3870 * @param pDesc Pointer to descriptor union.
3871 * @param addr Physical address in guest context.
3872 * @thread E1000_TX
3873 */
3874DECLINLINE(bool) e1kTxDLazyLoad(PE1KSTATE pThis)
3875{
3876 if (pThis->nTxDFetched == 0)
3877 return e1kTxDLoadMore(pThis) != 0;
3878 return true;
3879}
3880#endif /* E1K_WITH_TXD_CACHE */
3881
3882/**
3883 * Write back transmit descriptor to guest memory.
3884 *
3885 * @param pThis The device state structure.
3886 * @param pDesc Pointer to descriptor union.
3887 * @param addr Physical address in guest context.
3888 * @thread E1000_TX
3889 */
3890DECLINLINE(void) e1kWriteBackDesc(PE1KSTATE pThis, E1KTXDESC *pDesc, RTGCPHYS addr)
3891{
3892 /* Only the last half of the descriptor has to be written back. */
3893 e1kPrintTDesc(pThis, pDesc, "^^^");
3894 PDMDevHlpPCIPhysWrite(pThis->CTX_SUFF(pDevIns), addr, pDesc, sizeof(E1KTXDESC));
3895}
3896
3897/**
3898 * Transmit complete frame.
3899 *
3900 * @remarks We skip the FCS since we're not responsible for sending anything to
3901 * a real ethernet wire.
3902 *
3903 * @param pThis The device state structure.
3904 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
3905 * @thread E1000_TX
3906 */
3907static void e1kTransmitFrame(PE1KSTATE pThis, bool fOnWorkerThread)
3908{
3909 PPDMSCATTERGATHER pSg = pThis->CTX_SUFF(pTxSg);
3910 uint32_t cbFrame = pSg ? (uint32_t)pSg->cbUsed : 0;
3911 Assert(!pSg || pSg->cSegs == 1);
3912
3913 if (cbFrame > 70) /* unqualified guess */
3914 pThis->led.Asserted.s.fWriting = pThis->led.Actual.s.fWriting = 1;
3915
3916#ifdef E1K_INT_STATS
3917 if (cbFrame <= 1514)
3918 E1K_INC_ISTAT_CNT(pThis->uStatTx1514);
3919 else if (cbFrame <= 2962)
3920 E1K_INC_ISTAT_CNT(pThis->uStatTx2962);
3921 else if (cbFrame <= 4410)
3922 E1K_INC_ISTAT_CNT(pThis->uStatTx4410);
3923 else if (cbFrame <= 5858)
3924 E1K_INC_ISTAT_CNT(pThis->uStatTx5858);
3925 else if (cbFrame <= 7306)
3926 E1K_INC_ISTAT_CNT(pThis->uStatTx7306);
3927 else if (cbFrame <= 8754)
3928 E1K_INC_ISTAT_CNT(pThis->uStatTx8754);
3929 else if (cbFrame <= 16384)
3930 E1K_INC_ISTAT_CNT(pThis->uStatTx16384);
3931 else if (cbFrame <= 32768)
3932 E1K_INC_ISTAT_CNT(pThis->uStatTx32768);
3933 else
3934 E1K_INC_ISTAT_CNT(pThis->uStatTxLarge);
3935#endif /* E1K_INT_STATS */
3936
3937 /* Add VLAN tag */
3938 if (cbFrame > 12 && pThis->fVTag)
3939 {
3940 E1kLog3(("%s Inserting VLAN tag %08x\n",
3941 pThis->szPrf, RT_BE2H_U16(VET) | (RT_BE2H_U16(pThis->u16VTagTCI) << 16)));
3942 memmove((uint8_t*)pSg->aSegs[0].pvSeg + 16, (uint8_t*)pSg->aSegs[0].pvSeg + 12, cbFrame - 12);
3943 *((uint32_t*)pSg->aSegs[0].pvSeg + 3) = RT_BE2H_U16(VET) | (RT_BE2H_U16(pThis->u16VTagTCI) << 16);
3944 pSg->cbUsed += 4;
3945 cbFrame += 4;
3946 Assert(pSg->cbUsed == cbFrame);
3947 Assert(pSg->cbUsed <= pSg->cbAvailable);
3948 }
3949/* E1kLog2(("%s < < < Outgoing packet. Dump follows: > > >\n"
3950 "%.*Rhxd\n"
3951 "%s < < < < < < < < < < < < < End of dump > > > > > > > > > > > >\n",
3952 pThis->szPrf, cbFrame, pSg->aSegs[0].pvSeg, pThis->szPrf));*/
3953
3954 /* Update the stats */
3955 E1K_INC_CNT32(TPT);
3956 E1K_ADD_CNT64(TOTL, TOTH, cbFrame);
3957 E1K_INC_CNT32(GPTC);
3958 if (pSg && e1kIsBroadcast(pSg->aSegs[0].pvSeg))
3959 E1K_INC_CNT32(BPTC);
3960 else if (pSg && e1kIsMulticast(pSg->aSegs[0].pvSeg))
3961 E1K_INC_CNT32(MPTC);
3962 /* Update octet transmit counter */
3963 E1K_ADD_CNT64(GOTCL, GOTCH, cbFrame);
3964 if (pThis->CTX_SUFF(pDrv))
3965 STAM_REL_COUNTER_ADD(&pThis->StatTransmitBytes, cbFrame);
3966 if (cbFrame == 64)
3967 E1K_INC_CNT32(PTC64);
3968 else if (cbFrame < 128)
3969 E1K_INC_CNT32(PTC127);
3970 else if (cbFrame < 256)
3971 E1K_INC_CNT32(PTC255);
3972 else if (cbFrame < 512)
3973 E1K_INC_CNT32(PTC511);
3974 else if (cbFrame < 1024)
3975 E1K_INC_CNT32(PTC1023);
3976 else
3977 E1K_INC_CNT32(PTC1522);
3978
3979 E1K_INC_ISTAT_CNT(pThis->uStatTxFrm);
3980
3981 /*
3982 * Dump and send the packet.
3983 */
3984 int rc = VERR_NET_DOWN;
3985 if (pSg && pSg->pvAllocator != pThis)
3986 {
3987 e1kPacketDump(pThis, (uint8_t const *)pSg->aSegs[0].pvSeg, cbFrame, "--> Outgoing");
3988
3989 pThis->CTX_SUFF(pTxSg) = NULL;
3990 PPDMINETWORKUP pDrv = pThis->CTX_SUFF(pDrv);
3991 if (pDrv)
3992 {
3993 /* Release critical section to avoid deadlock in CanReceive */
3994 //e1kCsLeave(pThis);
3995 STAM_PROFILE_START(&pThis->CTX_SUFF_Z(StatTransmitSend), a);
3996 rc = pDrv->pfnSendBuf(pDrv, pSg, fOnWorkerThread);
3997 STAM_PROFILE_STOP(&pThis->CTX_SUFF_Z(StatTransmitSend), a);
3998 //e1kCsEnter(pThis, RT_SRC_POS);
3999 }
4000 }
4001 else if (pSg)
4002 {
4003 Assert(pSg->aSegs[0].pvSeg == pThis->aTxPacketFallback);
4004 e1kPacketDump(pThis, (uint8_t const *)pSg->aSegs[0].pvSeg, cbFrame, "--> Loopback");
4005
4006 /** @todo do we actually need to check that we're in loopback mode here? */
4007 if (GET_BITS(RCTL, LBM) == RCTL_LBM_TCVR)
4008 {
4009 E1KRXDST status;
4010 RT_ZERO(status);
4011 status.fPIF = true;
4012 e1kHandleRxPacket(pThis, pSg->aSegs[0].pvSeg, cbFrame, status);
4013 rc = VINF_SUCCESS;
4014 }
4015 e1kXmitFreeBuf(pThis);
4016 }
4017 else
4018 rc = VERR_NET_DOWN;
4019 if (RT_FAILURE(rc))
4020 {
4021 E1kLogRel(("E1000: ERROR! pfnSend returned %Rrc\n", rc));
4022 /** @todo handle VERR_NET_DOWN and VERR_NET_NO_BUFFER_SPACE. Signal error ? */
4023 }
4024
4025 pThis->led.Actual.s.fWriting = 0;
4026}
4027
4028/**
4029 * Compute and write internet checksum (e1kCSum16) at the specified offset.
4030 *
4031 * @param pThis The device state structure.
4032 * @param pPkt Pointer to the packet.
4033 * @param u16PktLen Total length of the packet.
4034 * @param cso Offset in packet to write checksum at.
4035 * @param css Offset in packet to start computing
4036 * checksum from.
4037 * @param cse Offset in packet to stop computing
4038 * checksum at.
4039 * @thread E1000_TX
4040 */
4041static void e1kInsertChecksum(PE1KSTATE pThis, uint8_t *pPkt, uint16_t u16PktLen, uint8_t cso, uint8_t css, uint16_t cse)
4042{
4043 RT_NOREF1(pThis);
4044
4045 if (css >= u16PktLen)
4046 {
4047 E1kLog2(("%s css(%X) is greater than packet length-1(%X), checksum is not inserted\n",
4048 pThis->szPrf, cso, u16PktLen));
4049 return;
4050 }
4051
4052 if (cso >= u16PktLen - 1)
4053 {
4054 E1kLog2(("%s cso(%X) is greater than packet length-2(%X), checksum is not inserted\n",
4055 pThis->szPrf, cso, u16PktLen));
4056 return;
4057 }
4058
4059 if (cse == 0)
4060 cse = u16PktLen - 1;
4061 else if (cse < css)
4062 {
4063 E1kLog2(("%s css(%X) is greater than cse(%X), checksum is not inserted\n",
4064 pThis->szPrf, css, cse));
4065 return;
4066 }
4067
4068 uint16_t u16ChkSum = e1kCSum16(pPkt + css, cse - css + 1);
4069 E1kLog2(("%s Inserting csum: %04X at %02X, old value: %04X\n", pThis->szPrf,
4070 u16ChkSum, cso, *(uint16_t*)(pPkt + cso)));
4071 *(uint16_t*)(pPkt + cso) = u16ChkSum;
4072}
4073
4074/**
4075 * Add a part of descriptor's buffer to transmit frame.
4076 *
4077 * @remarks data.u64BufAddr is used unconditionally for both data
4078 * and legacy descriptors since it is identical to
4079 * legacy.u64BufAddr.
4080 *
4081 * @param pThis The device state structure.
4082 * @param pDesc Pointer to the descriptor to transmit.
4083 * @param u16Len Length of buffer to the end of segment.
4084 * @param fSend Force packet sending.
4085 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4086 * @thread E1000_TX
4087 */
4088#ifndef E1K_WITH_TXD_CACHE
4089static void e1kFallbackAddSegment(PE1KSTATE pThis, RTGCPHYS PhysAddr, uint16_t u16Len, bool fSend, bool fOnWorkerThread)
4090{
4091 /* TCP header being transmitted */
4092 struct E1kTcpHeader *pTcpHdr = (struct E1kTcpHeader *)
4093 (pThis->aTxPacketFallback + pThis->contextTSE.tu.u8CSS);
4094 /* IP header being transmitted */
4095 struct E1kIpHeader *pIpHdr = (struct E1kIpHeader *)
4096 (pThis->aTxPacketFallback + pThis->contextTSE.ip.u8CSS);
4097
4098 E1kLog3(("%s e1kFallbackAddSegment: Length=%x, remaining payload=%x, header=%x, send=%RTbool\n",
4099 pThis->szPrf, u16Len, pThis->u32PayRemain, pThis->u16HdrRemain, fSend));
4100 Assert(pThis->u32PayRemain + pThis->u16HdrRemain > 0);
4101
4102 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns), PhysAddr,
4103 pThis->aTxPacketFallback + pThis->u16TxPktLen, u16Len);
4104 E1kLog3(("%s Dump of the segment:\n"
4105 "%.*Rhxd\n"
4106 "%s --- End of dump ---\n",
4107 pThis->szPrf, u16Len, pThis->aTxPacketFallback + pThis->u16TxPktLen, pThis->szPrf));
4108 pThis->u16TxPktLen += u16Len;
4109 E1kLog3(("%s e1kFallbackAddSegment: pThis->u16TxPktLen=%x\n",
4110 pThis->szPrf, pThis->u16TxPktLen));
4111 if (pThis->u16HdrRemain > 0)
4112 {
4113 /* The header was not complete, check if it is now */
4114 if (u16Len >= pThis->u16HdrRemain)
4115 {
4116 /* The rest is payload */
4117 u16Len -= pThis->u16HdrRemain;
4118 pThis->u16HdrRemain = 0;
4119 /* Save partial checksum and flags */
4120 pThis->u32SavedCsum = pTcpHdr->chksum;
4121 pThis->u16SavedFlags = pTcpHdr->hdrlen_flags;
4122 /* Clear FIN and PSH flags now and set them only in the last segment */
4123 pTcpHdr->hdrlen_flags &= ~htons(E1K_TCP_FIN | E1K_TCP_PSH);
4124 }
4125 else
4126 {
4127 /* Still not */
4128 pThis->u16HdrRemain -= u16Len;
4129 E1kLog3(("%s e1kFallbackAddSegment: Header is still incomplete, 0x%x bytes remain.\n",
4130 pThis->szPrf, pThis->u16HdrRemain));
4131 return;
4132 }
4133 }
4134
4135 pThis->u32PayRemain -= u16Len;
4136
4137 if (fSend)
4138 {
4139 /* Leave ethernet header intact */
4140 /* IP Total Length = payload + headers - ethernet header */
4141 pIpHdr->total_len = htons(pThis->u16TxPktLen - pThis->contextTSE.ip.u8CSS);
4142 E1kLog3(("%s e1kFallbackAddSegment: End of packet, pIpHdr->total_len=%x\n",
4143 pThis->szPrf, ntohs(pIpHdr->total_len)));
4144 /* Update IP Checksum */
4145 pIpHdr->chksum = 0;
4146 e1kInsertChecksum(pThis, pThis->aTxPacketFallback, pThis->u16TxPktLen,
4147 pThis->contextTSE.ip.u8CSO,
4148 pThis->contextTSE.ip.u8CSS,
4149 pThis->contextTSE.ip.u16CSE);
4150
4151 /* Update TCP flags */
4152 /* Restore original FIN and PSH flags for the last segment */
4153 if (pThis->u32PayRemain == 0)
4154 {
4155 pTcpHdr->hdrlen_flags = pThis->u16SavedFlags;
4156 E1K_INC_CNT32(TSCTC);
4157 }
4158 /* Add TCP length to partial pseudo header sum */
4159 uint32_t csum = pThis->u32SavedCsum
4160 + htons(pThis->u16TxPktLen - pThis->contextTSE.tu.u8CSS);
4161 while (csum >> 16)
4162 csum = (csum >> 16) + (csum & 0xFFFF);
4163 pTcpHdr->chksum = csum;
4164 /* Compute final checksum */
4165 e1kInsertChecksum(pThis, pThis->aTxPacketFallback, pThis->u16TxPktLen,
4166 pThis->contextTSE.tu.u8CSO,
4167 pThis->contextTSE.tu.u8CSS,
4168 pThis->contextTSE.tu.u16CSE);
4169
4170 /*
4171 * Transmit it. If we've use the SG already, allocate a new one before
4172 * we copy of the data.
4173 */
4174 if (!pThis->CTX_SUFF(pTxSg))
4175 e1kXmitAllocBuf(pThis, pThis->u16TxPktLen + (pThis->fVTag ? 4 : 0), true /*fExactSize*/, false /*fGso*/);
4176 if (pThis->CTX_SUFF(pTxSg))
4177 {
4178 Assert(pThis->u16TxPktLen <= pThis->CTX_SUFF(pTxSg)->cbAvailable);
4179 Assert(pThis->CTX_SUFF(pTxSg)->cSegs == 1);
4180 if (pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg != pThis->aTxPacketFallback)
4181 memcpy(pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->aTxPacketFallback, pThis->u16TxPktLen);
4182 pThis->CTX_SUFF(pTxSg)->cbUsed = pThis->u16TxPktLen;
4183 pThis->CTX_SUFF(pTxSg)->aSegs[0].cbSeg = pThis->u16TxPktLen;
4184 }
4185 e1kTransmitFrame(pThis, fOnWorkerThread);
4186
4187 /* Update Sequence Number */
4188 pTcpHdr->seqno = htonl(ntohl(pTcpHdr->seqno) + pThis->u16TxPktLen
4189 - pThis->contextTSE.dw3.u8HDRLEN);
4190 /* Increment IP identification */
4191 pIpHdr->ident = htons(ntohs(pIpHdr->ident) + 1);
4192 }
4193}
4194#else /* E1K_WITH_TXD_CACHE */
4195static int e1kFallbackAddSegment(PE1KSTATE pThis, RTGCPHYS PhysAddr, uint16_t u16Len, bool fSend, bool fOnWorkerThread)
4196{
4197 int rc = VINF_SUCCESS;
4198 /* TCP header being transmitted */
4199 struct E1kTcpHeader *pTcpHdr = (struct E1kTcpHeader *)
4200 (pThis->aTxPacketFallback + pThis->contextTSE.tu.u8CSS);
4201 /* IP header being transmitted */
4202 struct E1kIpHeader *pIpHdr = (struct E1kIpHeader *)
4203 (pThis->aTxPacketFallback + pThis->contextTSE.ip.u8CSS);
4204
4205 E1kLog3(("%s e1kFallbackAddSegment: Length=%x, remaining payload=%x, header=%x, send=%RTbool\n",
4206 pThis->szPrf, u16Len, pThis->u32PayRemain, pThis->u16HdrRemain, fSend));
4207 Assert(pThis->u32PayRemain + pThis->u16HdrRemain > 0);
4208
4209 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns), PhysAddr,
4210 pThis->aTxPacketFallback + pThis->u16TxPktLen, u16Len);
4211 E1kLog3(("%s Dump of the segment:\n"
4212 "%.*Rhxd\n"
4213 "%s --- End of dump ---\n",
4214 pThis->szPrf, u16Len, pThis->aTxPacketFallback + pThis->u16TxPktLen, pThis->szPrf));
4215 pThis->u16TxPktLen += u16Len;
4216 E1kLog3(("%s e1kFallbackAddSegment: pThis->u16TxPktLen=%x\n",
4217 pThis->szPrf, pThis->u16TxPktLen));
4218 if (pThis->u16HdrRemain > 0)
4219 {
4220 /* The header was not complete, check if it is now */
4221 if (u16Len >= pThis->u16HdrRemain)
4222 {
4223 /* The rest is payload */
4224 u16Len -= pThis->u16HdrRemain;
4225 pThis->u16HdrRemain = 0;
4226 /* Save partial checksum and flags */
4227 pThis->u32SavedCsum = pTcpHdr->chksum;
4228 pThis->u16SavedFlags = pTcpHdr->hdrlen_flags;
4229 /* Clear FIN and PSH flags now and set them only in the last segment */
4230 pTcpHdr->hdrlen_flags &= ~htons(E1K_TCP_FIN | E1K_TCP_PSH);
4231 }
4232 else
4233 {
4234 /* Still not */
4235 pThis->u16HdrRemain -= u16Len;
4236 E1kLog3(("%s e1kFallbackAddSegment: Header is still incomplete, 0x%x bytes remain.\n",
4237 pThis->szPrf, pThis->u16HdrRemain));
4238 return rc;
4239 }
4240 }
4241
4242 pThis->u32PayRemain -= u16Len;
4243
4244 if (fSend)
4245 {
4246 /* Leave ethernet header intact */
4247 /* IP Total Length = payload + headers - ethernet header */
4248 pIpHdr->total_len = htons(pThis->u16TxPktLen - pThis->contextTSE.ip.u8CSS);
4249 E1kLog3(("%s e1kFallbackAddSegment: End of packet, pIpHdr->total_len=%x\n",
4250 pThis->szPrf, ntohs(pIpHdr->total_len)));
4251 /* Update IP Checksum */
4252 pIpHdr->chksum = 0;
4253 e1kInsertChecksum(pThis, pThis->aTxPacketFallback, pThis->u16TxPktLen,
4254 pThis->contextTSE.ip.u8CSO,
4255 pThis->contextTSE.ip.u8CSS,
4256 pThis->contextTSE.ip.u16CSE);
4257
4258 /* Update TCP flags */
4259 /* Restore original FIN and PSH flags for the last segment */
4260 if (pThis->u32PayRemain == 0)
4261 {
4262 pTcpHdr->hdrlen_flags = pThis->u16SavedFlags;
4263 E1K_INC_CNT32(TSCTC);
4264 }
4265 /* Add TCP length to partial pseudo header sum */
4266 uint32_t csum = pThis->u32SavedCsum
4267 + htons(pThis->u16TxPktLen - pThis->contextTSE.tu.u8CSS);
4268 while (csum >> 16)
4269 csum = (csum >> 16) + (csum & 0xFFFF);
4270 pTcpHdr->chksum = csum;
4271 /* Compute final checksum */
4272 e1kInsertChecksum(pThis, pThis->aTxPacketFallback, pThis->u16TxPktLen,
4273 pThis->contextTSE.tu.u8CSO,
4274 pThis->contextTSE.tu.u8CSS,
4275 pThis->contextTSE.tu.u16CSE);
4276
4277 /*
4278 * Transmit it.
4279 */
4280 if (pThis->CTX_SUFF(pTxSg))
4281 {
4282 Assert(pThis->u16TxPktLen <= pThis->CTX_SUFF(pTxSg)->cbAvailable);
4283 Assert(pThis->CTX_SUFF(pTxSg)->cSegs == 1);
4284 if (pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg != pThis->aTxPacketFallback)
4285 memcpy(pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->aTxPacketFallback, pThis->u16TxPktLen);
4286 pThis->CTX_SUFF(pTxSg)->cbUsed = pThis->u16TxPktLen;
4287 pThis->CTX_SUFF(pTxSg)->aSegs[0].cbSeg = pThis->u16TxPktLen;
4288 }
4289 e1kTransmitFrame(pThis, fOnWorkerThread);
4290
4291 /* Update Sequence Number */
4292 pTcpHdr->seqno = htonl(ntohl(pTcpHdr->seqno) + pThis->u16TxPktLen
4293 - pThis->contextTSE.dw3.u8HDRLEN);
4294 /* Increment IP identification */
4295 pIpHdr->ident = htons(ntohs(pIpHdr->ident) + 1);
4296
4297 /* Allocate new buffer for the next segment. */
4298 if (pThis->u32PayRemain)
4299 {
4300 pThis->cbTxAlloc = RT_MIN(pThis->u32PayRemain,
4301 pThis->contextTSE.dw3.u16MSS)
4302 + pThis->contextTSE.dw3.u8HDRLEN
4303 + (pThis->fVTag ? 4 : 0);
4304 rc = e1kXmitAllocBuf(pThis, false /* fGSO */);
4305 }
4306 }
4307
4308 return rc;
4309}
4310#endif /* E1K_WITH_TXD_CACHE */
4311
4312#ifndef E1K_WITH_TXD_CACHE
4313/**
4314 * TCP segmentation offloading fallback: Add descriptor's buffer to transmit
4315 * frame.
4316 *
4317 * We construct the frame in the fallback buffer first and the copy it to the SG
4318 * buffer before passing it down to the network driver code.
4319 *
4320 * @returns true if the frame should be transmitted, false if not.
4321 *
4322 * @param pThis The device state structure.
4323 * @param pDesc Pointer to the descriptor to transmit.
4324 * @param cbFragment Length of descriptor's buffer.
4325 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4326 * @thread E1000_TX
4327 */
4328static bool e1kFallbackAddToFrame(PE1KSTATE pThis, E1KTXDESC *pDesc, uint32_t cbFragment, bool fOnWorkerThread)
4329{
4330 PPDMSCATTERGATHER pTxSg = pThis->CTX_SUFF(pTxSg);
4331 Assert(e1kGetDescType(pDesc) == E1K_DTYP_DATA);
4332 Assert(pDesc->data.cmd.fTSE);
4333 Assert(!e1kXmitIsGsoBuf(pTxSg));
4334
4335 uint16_t u16MaxPktLen = pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw3.u16MSS;
4336 Assert(u16MaxPktLen != 0);
4337 Assert(u16MaxPktLen < E1K_MAX_TX_PKT_SIZE);
4338
4339 /*
4340 * Carve out segments.
4341 */
4342 do
4343 {
4344 /* Calculate how many bytes we have left in this TCP segment */
4345 uint32_t cb = u16MaxPktLen - pThis->u16TxPktLen;
4346 if (cb > cbFragment)
4347 {
4348 /* This descriptor fits completely into current segment */
4349 cb = cbFragment;
4350 e1kFallbackAddSegment(pThis, pDesc->data.u64BufAddr, cb, pDesc->data.cmd.fEOP /*fSend*/, fOnWorkerThread);
4351 }
4352 else
4353 {
4354 e1kFallbackAddSegment(pThis, pDesc->data.u64BufAddr, cb, true /*fSend*/, fOnWorkerThread);
4355 /*
4356 * Rewind the packet tail pointer to the beginning of payload,
4357 * so we continue writing right beyond the header.
4358 */
4359 pThis->u16TxPktLen = pThis->contextTSE.dw3.u8HDRLEN;
4360 }
4361
4362 pDesc->data.u64BufAddr += cb;
4363 cbFragment -= cb;
4364 } while (cbFragment > 0);
4365
4366 if (pDesc->data.cmd.fEOP)
4367 {
4368 /* End of packet, next segment will contain header. */
4369 if (pThis->u32PayRemain != 0)
4370 E1K_INC_CNT32(TSCTFC);
4371 pThis->u16TxPktLen = 0;
4372 e1kXmitFreeBuf(pThis);
4373 }
4374
4375 return false;
4376}
4377#else /* E1K_WITH_TXD_CACHE */
4378/**
4379 * TCP segmentation offloading fallback: Add descriptor's buffer to transmit
4380 * frame.
4381 *
4382 * We construct the frame in the fallback buffer first and the copy it to the SG
4383 * buffer before passing it down to the network driver code.
4384 *
4385 * @returns error code
4386 *
4387 * @param pThis The device state structure.
4388 * @param pDesc Pointer to the descriptor to transmit.
4389 * @param cbFragment Length of descriptor's buffer.
4390 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4391 * @thread E1000_TX
4392 */
4393static int e1kFallbackAddToFrame(PE1KSTATE pThis, E1KTXDESC *pDesc, bool fOnWorkerThread)
4394{
4395#ifdef VBOX_STRICT
4396 PPDMSCATTERGATHER pTxSg = pThis->CTX_SUFF(pTxSg);
4397 Assert(e1kGetDescType(pDesc) == E1K_DTYP_DATA);
4398 Assert(pDesc->data.cmd.fTSE);
4399 Assert(!e1kXmitIsGsoBuf(pTxSg));
4400#endif
4401
4402 uint16_t u16MaxPktLen = pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw3.u16MSS;
4403
4404 /*
4405 * Carve out segments.
4406 */
4407 int rc = VINF_SUCCESS;
4408 do
4409 {
4410 /* Calculate how many bytes we have left in this TCP segment */
4411 uint32_t cb = u16MaxPktLen - pThis->u16TxPktLen;
4412 if (cb > pDesc->data.cmd.u20DTALEN)
4413 {
4414 /* This descriptor fits completely into current segment */
4415 cb = pDesc->data.cmd.u20DTALEN;
4416 rc = e1kFallbackAddSegment(pThis, pDesc->data.u64BufAddr, cb, pDesc->data.cmd.fEOP /*fSend*/, fOnWorkerThread);
4417 }
4418 else
4419 {
4420 rc = e1kFallbackAddSegment(pThis, pDesc->data.u64BufAddr, cb, true /*fSend*/, fOnWorkerThread);
4421 /*
4422 * Rewind the packet tail pointer to the beginning of payload,
4423 * so we continue writing right beyond the header.
4424 */
4425 pThis->u16TxPktLen = pThis->contextTSE.dw3.u8HDRLEN;
4426 }
4427
4428 pDesc->data.u64BufAddr += cb;
4429 pDesc->data.cmd.u20DTALEN -= cb;
4430 } while (pDesc->data.cmd.u20DTALEN > 0 && RT_SUCCESS(rc));
4431
4432 if (pDesc->data.cmd.fEOP)
4433 {
4434 /* End of packet, next segment will contain header. */
4435 if (pThis->u32PayRemain != 0)
4436 E1K_INC_CNT32(TSCTFC);
4437 pThis->u16TxPktLen = 0;
4438 e1kXmitFreeBuf(pThis);
4439 }
4440
4441 return VINF_SUCCESS; /// @todo consider rc;
4442}
4443#endif /* E1K_WITH_TXD_CACHE */
4444
4445
4446/**
4447 * Add descriptor's buffer to transmit frame.
4448 *
4449 * This deals with GSO and normal frames, e1kFallbackAddToFrame deals with the
4450 * TSE frames we cannot handle as GSO.
4451 *
4452 * @returns true on success, false on failure.
4453 *
4454 * @param pThis The device state structure.
4455 * @param PhysAddr The physical address of the descriptor buffer.
4456 * @param cbFragment Length of descriptor's buffer.
4457 * @thread E1000_TX
4458 */
4459static bool e1kAddToFrame(PE1KSTATE pThis, RTGCPHYS PhysAddr, uint32_t cbFragment)
4460{
4461 PPDMSCATTERGATHER pTxSg = pThis->CTX_SUFF(pTxSg);
4462 bool const fGso = e1kXmitIsGsoBuf(pTxSg);
4463 uint32_t const cbNewPkt = cbFragment + pThis->u16TxPktLen;
4464
4465 if (RT_UNLIKELY( !fGso && cbNewPkt > E1K_MAX_TX_PKT_SIZE ))
4466 {
4467 E1kLog(("%s Transmit packet is too large: %u > %u(max)\n", pThis->szPrf, cbNewPkt, E1K_MAX_TX_PKT_SIZE));
4468 return false;
4469 }
4470 if (RT_UNLIKELY( fGso && cbNewPkt > pTxSg->cbAvailable ))
4471 {
4472 E1kLog(("%s Transmit packet is too large: %u > %u(max)/GSO\n", pThis->szPrf, cbNewPkt, pTxSg->cbAvailable));
4473 return false;
4474 }
4475
4476 if (RT_LIKELY(pTxSg))
4477 {
4478 Assert(pTxSg->cSegs == 1);
4479 Assert(pTxSg->cbUsed == pThis->u16TxPktLen);
4480
4481 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns), PhysAddr,
4482 (uint8_t *)pTxSg->aSegs[0].pvSeg + pThis->u16TxPktLen, cbFragment);
4483
4484 pTxSg->cbUsed = cbNewPkt;
4485 }
4486 pThis->u16TxPktLen = cbNewPkt;
4487
4488 return true;
4489}
4490
4491
4492/**
4493 * Write the descriptor back to guest memory and notify the guest.
4494 *
4495 * @param pThis The device state structure.
4496 * @param pDesc Pointer to the descriptor have been transmitted.
4497 * @param addr Physical address of the descriptor in guest memory.
4498 * @thread E1000_TX
4499 */
4500static void e1kDescReport(PE1KSTATE pThis, E1KTXDESC *pDesc, RTGCPHYS addr)
4501{
4502 /*
4503 * We fake descriptor write-back bursting. Descriptors are written back as they are
4504 * processed.
4505 */
4506 /* Let's pretend we process descriptors. Write back with DD set. */
4507 /*
4508 * Prior to r71586 we tried to accomodate the case when write-back bursts
4509 * are enabled without actually implementing bursting by writing back all
4510 * descriptors, even the ones that do not have RS set. This caused kernel
4511 * panics with Linux SMP kernels, as the e1000 driver tried to free up skb
4512 * associated with written back descriptor if it happened to be a context
4513 * descriptor since context descriptors do not have skb associated to them.
4514 * Starting from r71586 we write back only the descriptors with RS set,
4515 * which is a little bit different from what the real hardware does in
4516 * case there is a chain of data descritors where some of them have RS set
4517 * and others do not. It is very uncommon scenario imho.
4518 * We need to check RPS as well since some legacy drivers use it instead of
4519 * RS even with newer cards.
4520 */
4521 if (pDesc->legacy.cmd.fRS || pDesc->legacy.cmd.fRPS)
4522 {
4523 pDesc->legacy.dw3.fDD = 1; /* Descriptor Done */
4524 e1kWriteBackDesc(pThis, pDesc, addr);
4525 if (pDesc->legacy.cmd.fEOP)
4526 {
4527//#ifdef E1K_USE_TX_TIMERS
4528 if (pThis->fTidEnabled && pDesc->legacy.cmd.fIDE)
4529 {
4530 E1K_INC_ISTAT_CNT(pThis->uStatTxIDE);
4531 //if (pThis->fIntRaised)
4532 //{
4533 // /* Interrupt is already pending, no need for timers */
4534 // ICR |= ICR_TXDW;
4535 //}
4536 //else {
4537 /* Arm the timer to fire in TIVD usec (discard .024) */
4538 e1kArmTimer(pThis, pThis->CTX_SUFF(pTIDTimer), TIDV);
4539# ifndef E1K_NO_TAD
4540 /* If absolute timer delay is enabled and the timer is not running yet, arm it. */
4541 E1kLog2(("%s Checking if TAD timer is running\n",
4542 pThis->szPrf));
4543 if (TADV != 0 && !TMTimerIsActive(pThis->CTX_SUFF(pTADTimer)))
4544 e1kArmTimer(pThis, pThis->CTX_SUFF(pTADTimer), TADV);
4545# endif /* E1K_NO_TAD */
4546 }
4547 else
4548 {
4549 if (pThis->fTidEnabled)
4550 {
4551 E1kLog2(("%s No IDE set, cancel TAD timer and raise interrupt\n",
4552 pThis->szPrf));
4553 /* Cancel both timers if armed and fire immediately. */
4554# ifndef E1K_NO_TAD
4555 TMTimerStop(pThis->CTX_SUFF(pTADTimer));
4556# endif
4557 TMTimerStop(pThis->CTX_SUFF(pTIDTimer));
4558 }
4559//#endif /* E1K_USE_TX_TIMERS */
4560 E1K_INC_ISTAT_CNT(pThis->uStatIntTx);
4561 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_TXDW);
4562//#ifdef E1K_USE_TX_TIMERS
4563 }
4564//#endif /* E1K_USE_TX_TIMERS */
4565 }
4566 }
4567 else
4568 {
4569 E1K_INC_ISTAT_CNT(pThis->uStatTxNoRS);
4570 }
4571}
4572
4573#ifndef E1K_WITH_TXD_CACHE
4574
4575/**
4576 * Process Transmit Descriptor.
4577 *
4578 * E1000 supports three types of transmit descriptors:
4579 * - legacy data descriptors of older format (context-less).
4580 * - data the same as legacy but providing new offloading capabilities.
4581 * - context sets up the context for following data descriptors.
4582 *
4583 * @param pThis The device state structure.
4584 * @param pDesc Pointer to descriptor union.
4585 * @param addr Physical address of descriptor in guest memory.
4586 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4587 * @thread E1000_TX
4588 */
4589static int e1kXmitDesc(PE1KSTATE pThis, E1KTXDESC *pDesc, RTGCPHYS addr, bool fOnWorkerThread)
4590{
4591 int rc = VINF_SUCCESS;
4592 uint32_t cbVTag = 0;
4593
4594 e1kPrintTDesc(pThis, pDesc, "vvv");
4595
4596//#ifdef E1K_USE_TX_TIMERS
4597 if (pThis->fTidEnabled)
4598 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTIDTimer));
4599//#endif /* E1K_USE_TX_TIMERS */
4600
4601 switch (e1kGetDescType(pDesc))
4602 {
4603 case E1K_DTYP_CONTEXT:
4604 if (pDesc->context.dw2.fTSE)
4605 {
4606 pThis->contextTSE = pDesc->context;
4607 pThis->u32PayRemain = pDesc->context.dw2.u20PAYLEN;
4608 pThis->u16HdrRemain = pDesc->context.dw3.u8HDRLEN;
4609 e1kSetupGsoCtx(&pThis->GsoCtx, &pDesc->context);
4610 STAM_COUNTER_INC(&pThis->StatTxDescCtxTSE);
4611 }
4612 else
4613 {
4614 pThis->contextNormal = pDesc->context;
4615 STAM_COUNTER_INC(&pThis->StatTxDescCtxNormal);
4616 }
4617 E1kLog2(("%s %s context updated: IP CSS=%02X, IP CSO=%02X, IP CSE=%04X"
4618 ", TU CSS=%02X, TU CSO=%02X, TU CSE=%04X\n", pThis->szPrf,
4619 pDesc->context.dw2.fTSE ? "TSE" : "Normal",
4620 pDesc->context.ip.u8CSS,
4621 pDesc->context.ip.u8CSO,
4622 pDesc->context.ip.u16CSE,
4623 pDesc->context.tu.u8CSS,
4624 pDesc->context.tu.u8CSO,
4625 pDesc->context.tu.u16CSE));
4626 E1K_INC_ISTAT_CNT(pThis->uStatDescCtx);
4627 e1kDescReport(pThis, pDesc, addr);
4628 break;
4629
4630 case E1K_DTYP_DATA:
4631 {
4632 if (pDesc->data.cmd.u20DTALEN == 0 || pDesc->data.u64BufAddr == 0)
4633 {
4634 E1kLog2(("% Empty data descriptor, skipped.\n", pThis->szPrf));
4635 /** @todo Same as legacy when !TSE. See below. */
4636 break;
4637 }
4638 STAM_COUNTER_INC(pDesc->data.cmd.fTSE?
4639 &pThis->StatTxDescTSEData:
4640 &pThis->StatTxDescData);
4641 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
4642 E1K_INC_ISTAT_CNT(pThis->uStatDescDat);
4643
4644 /*
4645 * The last descriptor of non-TSE packet must contain VLE flag.
4646 * TSE packets have VLE flag in the first descriptor. The later
4647 * case is taken care of a bit later when cbVTag gets assigned.
4648 *
4649 * 1) pDesc->data.cmd.fEOP && !pDesc->data.cmd.fTSE
4650 */
4651 if (pDesc->data.cmd.fEOP && !pDesc->data.cmd.fTSE)
4652 {
4653 pThis->fVTag = pDesc->data.cmd.fVLE;
4654 pThis->u16VTagTCI = pDesc->data.dw3.u16Special;
4655 }
4656 /*
4657 * First fragment: Allocate new buffer and save the IXSM and TXSM
4658 * packet options as these are only valid in the first fragment.
4659 */
4660 if (pThis->u16TxPktLen == 0)
4661 {
4662 pThis->fIPcsum = pDesc->data.dw3.fIXSM;
4663 pThis->fTCPcsum = pDesc->data.dw3.fTXSM;
4664 E1kLog2(("%s Saving checksum flags:%s%s; \n", pThis->szPrf,
4665 pThis->fIPcsum ? " IP" : "",
4666 pThis->fTCPcsum ? " TCP/UDP" : ""));
4667 if (pDesc->data.cmd.fTSE)
4668 {
4669 /* 2) pDesc->data.cmd.fTSE && pThis->u16TxPktLen == 0 */
4670 pThis->fVTag = pDesc->data.cmd.fVLE;
4671 pThis->u16VTagTCI = pDesc->data.dw3.u16Special;
4672 cbVTag = pThis->fVTag ? 4 : 0;
4673 }
4674 else if (pDesc->data.cmd.fEOP)
4675 cbVTag = pDesc->data.cmd.fVLE ? 4 : 0;
4676 else
4677 cbVTag = 4;
4678 E1kLog3(("%s About to allocate TX buffer: cbVTag=%u\n", pThis->szPrf, cbVTag));
4679 if (e1kCanDoGso(pThis, &pThis->GsoCtx, &pDesc->data, &pThis->contextTSE))
4680 rc = e1kXmitAllocBuf(pThis, pThis->contextTSE.dw2.u20PAYLEN + pThis->contextTSE.dw3.u8HDRLEN + cbVTag,
4681 true /*fExactSize*/, true /*fGso*/);
4682 else if (pDesc->data.cmd.fTSE)
4683 rc = e1kXmitAllocBuf(pThis, pThis->contextTSE.dw3.u16MSS + pThis->contextTSE.dw3.u8HDRLEN + cbVTag,
4684 pDesc->data.cmd.fTSE /*fExactSize*/, false /*fGso*/);
4685 else
4686 rc = e1kXmitAllocBuf(pThis, pDesc->data.cmd.u20DTALEN + cbVTag,
4687 pDesc->data.cmd.fEOP /*fExactSize*/, false /*fGso*/);
4688
4689 /**
4690 * @todo: Perhaps it is not that simple for GSO packets! We may
4691 * need to unwind some changes.
4692 */
4693 if (RT_FAILURE(rc))
4694 {
4695 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
4696 break;
4697 }
4698 /** @todo Is there any way to indicating errors other than collisions? Like
4699 * VERR_NET_DOWN. */
4700 }
4701
4702 /*
4703 * Add the descriptor data to the frame. If the frame is complete,
4704 * transmit it and reset the u16TxPktLen field.
4705 */
4706 if (e1kXmitIsGsoBuf(pThis->CTX_SUFF(pTxSg)))
4707 {
4708 STAM_COUNTER_INC(&pThis->StatTxPathGSO);
4709 bool fRc = e1kAddToFrame(pThis, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
4710 if (pDesc->data.cmd.fEOP)
4711 {
4712 if ( fRc
4713 && pThis->CTX_SUFF(pTxSg)
4714 && pThis->CTX_SUFF(pTxSg)->cbUsed == (size_t)pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw2.u20PAYLEN)
4715 {
4716 e1kTransmitFrame(pThis, fOnWorkerThread);
4717 E1K_INC_CNT32(TSCTC);
4718 }
4719 else
4720 {
4721 if (fRc)
4722 E1kLog(("%s bad GSO/TSE %p or %u < %u\n" , pThis->szPrf,
4723 pThis->CTX_SUFF(pTxSg), pThis->CTX_SUFF(pTxSg) ? pThis->CTX_SUFF(pTxSg)->cbUsed : 0,
4724 pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw2.u20PAYLEN));
4725 e1kXmitFreeBuf(pThis);
4726 E1K_INC_CNT32(TSCTFC);
4727 }
4728 pThis->u16TxPktLen = 0;
4729 }
4730 }
4731 else if (!pDesc->data.cmd.fTSE)
4732 {
4733 STAM_COUNTER_INC(&pThis->StatTxPathRegular);
4734 bool fRc = e1kAddToFrame(pThis, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
4735 if (pDesc->data.cmd.fEOP)
4736 {
4737 if (fRc && pThis->CTX_SUFF(pTxSg))
4738 {
4739 Assert(pThis->CTX_SUFF(pTxSg)->cSegs == 1);
4740 if (pThis->fIPcsum)
4741 e1kInsertChecksum(pThis, (uint8_t *)pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->u16TxPktLen,
4742 pThis->contextNormal.ip.u8CSO,
4743 pThis->contextNormal.ip.u8CSS,
4744 pThis->contextNormal.ip.u16CSE);
4745 if (pThis->fTCPcsum)
4746 e1kInsertChecksum(pThis, (uint8_t *)pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->u16TxPktLen,
4747 pThis->contextNormal.tu.u8CSO,
4748 pThis->contextNormal.tu.u8CSS,
4749 pThis->contextNormal.tu.u16CSE);
4750 e1kTransmitFrame(pThis, fOnWorkerThread);
4751 }
4752 else
4753 e1kXmitFreeBuf(pThis);
4754 pThis->u16TxPktLen = 0;
4755 }
4756 }
4757 else
4758 {
4759 STAM_COUNTER_INC(&pThis->StatTxPathFallback);
4760 e1kFallbackAddToFrame(pThis, pDesc, pDesc->data.cmd.u20DTALEN, fOnWorkerThread);
4761 }
4762
4763 e1kDescReport(pThis, pDesc, addr);
4764 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
4765 break;
4766 }
4767
4768 case E1K_DTYP_LEGACY:
4769 if (pDesc->legacy.cmd.u16Length == 0 || pDesc->legacy.u64BufAddr == 0)
4770 {
4771 E1kLog(("%s Empty legacy descriptor, skipped.\n", pThis->szPrf));
4772 /** @todo 3.3.3, Length/Buffer Address: RS set -> write DD when processing. */
4773 break;
4774 }
4775 STAM_COUNTER_INC(&pThis->StatTxDescLegacy);
4776 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
4777
4778 /* First fragment: allocate new buffer. */
4779 if (pThis->u16TxPktLen == 0)
4780 {
4781 if (pDesc->legacy.cmd.fEOP)
4782 cbVTag = pDesc->legacy.cmd.fVLE ? 4 : 0;
4783 else
4784 cbVTag = 4;
4785 E1kLog3(("%s About to allocate TX buffer: cbVTag=%u\n", pThis->szPrf, cbVTag));
4786 /** @todo reset status bits? */
4787 rc = e1kXmitAllocBuf(pThis, pDesc->legacy.cmd.u16Length + cbVTag, pDesc->legacy.cmd.fEOP, false /*fGso*/);
4788 if (RT_FAILURE(rc))
4789 {
4790 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
4791 break;
4792 }
4793
4794 /** @todo Is there any way to indicating errors other than collisions? Like
4795 * VERR_NET_DOWN. */
4796 }
4797
4798 /* Add fragment to frame. */
4799 if (e1kAddToFrame(pThis, pDesc->data.u64BufAddr, pDesc->legacy.cmd.u16Length))
4800 {
4801 E1K_INC_ISTAT_CNT(pThis->uStatDescLeg);
4802
4803 /* Last fragment: Transmit and reset the packet storage counter. */
4804 if (pDesc->legacy.cmd.fEOP)
4805 {
4806 pThis->fVTag = pDesc->legacy.cmd.fVLE;
4807 pThis->u16VTagTCI = pDesc->legacy.dw3.u16Special;
4808 /** @todo Offload processing goes here. */
4809 e1kTransmitFrame(pThis, fOnWorkerThread);
4810 pThis->u16TxPktLen = 0;
4811 }
4812 }
4813 /* Last fragment + failure: free the buffer and reset the storage counter. */
4814 else if (pDesc->legacy.cmd.fEOP)
4815 {
4816 e1kXmitFreeBuf(pThis);
4817 pThis->u16TxPktLen = 0;
4818 }
4819
4820 e1kDescReport(pThis, pDesc, addr);
4821 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
4822 break;
4823
4824 default:
4825 E1kLog(("%s ERROR Unsupported transmit descriptor type: 0x%04x\n",
4826 pThis->szPrf, e1kGetDescType(pDesc)));
4827 break;
4828 }
4829
4830 return rc;
4831}
4832
4833#else /* E1K_WITH_TXD_CACHE */
4834
4835/**
4836 * Process Transmit Descriptor.
4837 *
4838 * E1000 supports three types of transmit descriptors:
4839 * - legacy data descriptors of older format (context-less).
4840 * - data the same as legacy but providing new offloading capabilities.
4841 * - context sets up the context for following data descriptors.
4842 *
4843 * @param pThis The device state structure.
4844 * @param pDesc Pointer to descriptor union.
4845 * @param addr Physical address of descriptor in guest memory.
4846 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4847 * @param cbPacketSize Size of the packet as previously computed.
4848 * @thread E1000_TX
4849 */
4850static int e1kXmitDesc(PE1KSTATE pThis, E1KTXDESC *pDesc, RTGCPHYS addr,
4851 bool fOnWorkerThread)
4852{
4853 int rc = VINF_SUCCESS;
4854
4855 e1kPrintTDesc(pThis, pDesc, "vvv");
4856
4857//#ifdef E1K_USE_TX_TIMERS
4858 if (pThis->fTidEnabled)
4859 TMTimerStop(pThis->CTX_SUFF(pTIDTimer));
4860//#endif /* E1K_USE_TX_TIMERS */
4861
4862 switch (e1kGetDescType(pDesc))
4863 {
4864 case E1K_DTYP_CONTEXT:
4865 /* The caller have already updated the context */
4866 E1K_INC_ISTAT_CNT(pThis->uStatDescCtx);
4867 e1kDescReport(pThis, pDesc, addr);
4868 break;
4869
4870 case E1K_DTYP_DATA:
4871 {
4872 STAM_COUNTER_INC(pDesc->data.cmd.fTSE?
4873 &pThis->StatTxDescTSEData:
4874 &pThis->StatTxDescData);
4875 E1K_INC_ISTAT_CNT(pThis->uStatDescDat);
4876 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
4877 if (pDesc->data.cmd.u20DTALEN == 0 || pDesc->data.u64BufAddr == 0)
4878 {
4879 E1kLog2(("% Empty data descriptor, skipped.\n", pThis->szPrf));
4880 }
4881 else
4882 {
4883 /*
4884 * Add the descriptor data to the frame. If the frame is complete,
4885 * transmit it and reset the u16TxPktLen field.
4886 */
4887 if (e1kXmitIsGsoBuf(pThis->CTX_SUFF(pTxSg)))
4888 {
4889 STAM_COUNTER_INC(&pThis->StatTxPathGSO);
4890 bool fRc = e1kAddToFrame(pThis, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
4891 if (pDesc->data.cmd.fEOP)
4892 {
4893 if ( fRc
4894 && pThis->CTX_SUFF(pTxSg)
4895 && pThis->CTX_SUFF(pTxSg)->cbUsed == (size_t)pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw2.u20PAYLEN)
4896 {
4897 e1kTransmitFrame(pThis, fOnWorkerThread);
4898 E1K_INC_CNT32(TSCTC);
4899 }
4900 else
4901 {
4902 if (fRc)
4903 E1kLog(("%s bad GSO/TSE %p or %u < %u\n" , pThis->szPrf,
4904 pThis->CTX_SUFF(pTxSg), pThis->CTX_SUFF(pTxSg) ? pThis->CTX_SUFF(pTxSg)->cbUsed : 0,
4905 pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw2.u20PAYLEN));
4906 e1kXmitFreeBuf(pThis);
4907 E1K_INC_CNT32(TSCTFC);
4908 }
4909 pThis->u16TxPktLen = 0;
4910 }
4911 }
4912 else if (!pDesc->data.cmd.fTSE)
4913 {
4914 STAM_COUNTER_INC(&pThis->StatTxPathRegular);
4915 bool fRc = e1kAddToFrame(pThis, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
4916 if (pDesc->data.cmd.fEOP)
4917 {
4918 if (fRc && pThis->CTX_SUFF(pTxSg))
4919 {
4920 Assert(pThis->CTX_SUFF(pTxSg)->cSegs == 1);
4921 if (pThis->fIPcsum)
4922 e1kInsertChecksum(pThis, (uint8_t *)pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->u16TxPktLen,
4923 pThis->contextNormal.ip.u8CSO,
4924 pThis->contextNormal.ip.u8CSS,
4925 pThis->contextNormal.ip.u16CSE);
4926 if (pThis->fTCPcsum)
4927 e1kInsertChecksum(pThis, (uint8_t *)pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->u16TxPktLen,
4928 pThis->contextNormal.tu.u8CSO,
4929 pThis->contextNormal.tu.u8CSS,
4930 pThis->contextNormal.tu.u16CSE);
4931 e1kTransmitFrame(pThis, fOnWorkerThread);
4932 }
4933 else
4934 e1kXmitFreeBuf(pThis);
4935 pThis->u16TxPktLen = 0;
4936 }
4937 }
4938 else
4939 {
4940 STAM_COUNTER_INC(&pThis->StatTxPathFallback);
4941 rc = e1kFallbackAddToFrame(pThis, pDesc, fOnWorkerThread);
4942 }
4943 }
4944 e1kDescReport(pThis, pDesc, addr);
4945 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
4946 break;
4947 }
4948
4949 case E1K_DTYP_LEGACY:
4950 STAM_COUNTER_INC(&pThis->StatTxDescLegacy);
4951 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
4952 if (pDesc->legacy.cmd.u16Length == 0 || pDesc->legacy.u64BufAddr == 0)
4953 {
4954 E1kLog(("%s Empty legacy descriptor, skipped.\n", pThis->szPrf));
4955 }
4956 else
4957 {
4958 /* Add fragment to frame. */
4959 if (e1kAddToFrame(pThis, pDesc->data.u64BufAddr, pDesc->legacy.cmd.u16Length))
4960 {
4961 E1K_INC_ISTAT_CNT(pThis->uStatDescLeg);
4962
4963 /* Last fragment: Transmit and reset the packet storage counter. */
4964 if (pDesc->legacy.cmd.fEOP)
4965 {
4966 if (pDesc->legacy.cmd.fIC)
4967 {
4968 e1kInsertChecksum(pThis,
4969 (uint8_t *)pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg,
4970 pThis->u16TxPktLen,
4971 pDesc->legacy.cmd.u8CSO,
4972 pDesc->legacy.dw3.u8CSS,
4973 0);
4974 }
4975 e1kTransmitFrame(pThis, fOnWorkerThread);
4976 pThis->u16TxPktLen = 0;
4977 }
4978 }
4979 /* Last fragment + failure: free the buffer and reset the storage counter. */
4980 else if (pDesc->legacy.cmd.fEOP)
4981 {
4982 e1kXmitFreeBuf(pThis);
4983 pThis->u16TxPktLen = 0;
4984 }
4985 }
4986 e1kDescReport(pThis, pDesc, addr);
4987 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
4988 break;
4989
4990 default:
4991 E1kLog(("%s ERROR Unsupported transmit descriptor type: 0x%04x\n",
4992 pThis->szPrf, e1kGetDescType(pDesc)));
4993 break;
4994 }
4995
4996 return rc;
4997}
4998
4999DECLINLINE(void) e1kUpdateTxContext(PE1KSTATE pThis, E1KTXDESC *pDesc)
5000{
5001 if (pDesc->context.dw2.fTSE)
5002 {
5003 pThis->contextTSE = pDesc->context;
5004 uint32_t cbMaxSegmentSize = pThis->contextTSE.dw3.u16MSS + pThis->contextTSE.dw3.u8HDRLEN + 4; /*VTAG*/
5005 if (RT_UNLIKELY(cbMaxSegmentSize > E1K_MAX_TX_PKT_SIZE))
5006 {
5007 pThis->contextTSE.dw3.u16MSS = E1K_MAX_TX_PKT_SIZE - pThis->contextTSE.dw3.u8HDRLEN - 4; /*VTAG*/
5008 LogRelMax(10, ("%s Transmit packet is too large: %u > %u(max). Adjusted MSS to %u.\n",
5009 pThis->szPrf, cbMaxSegmentSize, E1K_MAX_TX_PKT_SIZE, pThis->contextTSE.dw3.u16MSS));
5010 }
5011 pThis->u32PayRemain = pThis->contextTSE.dw2.u20PAYLEN;
5012 pThis->u16HdrRemain = pThis->contextTSE.dw3.u8HDRLEN;
5013 e1kSetupGsoCtx(&pThis->GsoCtx, &pThis->contextTSE);
5014 STAM_COUNTER_INC(&pThis->StatTxDescCtxTSE);
5015 }
5016 else
5017 {
5018 pThis->contextNormal = pDesc->context;
5019 STAM_COUNTER_INC(&pThis->StatTxDescCtxNormal);
5020 }
5021 E1kLog2(("%s %s context updated: IP CSS=%02X, IP CSO=%02X, IP CSE=%04X"
5022 ", TU CSS=%02X, TU CSO=%02X, TU CSE=%04X\n", pThis->szPrf,
5023 pDesc->context.dw2.fTSE ? "TSE" : "Normal",
5024 pDesc->context.ip.u8CSS,
5025 pDesc->context.ip.u8CSO,
5026 pDesc->context.ip.u16CSE,
5027 pDesc->context.tu.u8CSS,
5028 pDesc->context.tu.u8CSO,
5029 pDesc->context.tu.u16CSE));
5030}
5031
5032static bool e1kLocateTxPacket(PE1KSTATE pThis)
5033{
5034 LogFlow(("%s e1kLocateTxPacket: ENTER cbTxAlloc=%d\n",
5035 pThis->szPrf, pThis->cbTxAlloc));
5036 /* Check if we have located the packet already. */
5037 if (pThis->cbTxAlloc)
5038 {
5039 LogFlow(("%s e1kLocateTxPacket: RET true cbTxAlloc=%d\n",
5040 pThis->szPrf, pThis->cbTxAlloc));
5041 return true;
5042 }
5043
5044 bool fTSE = false;
5045 uint32_t cbPacket = 0;
5046
5047 for (int i = pThis->iTxDCurrent; i < pThis->nTxDFetched; ++i)
5048 {
5049 E1KTXDESC *pDesc = &pThis->aTxDescriptors[i];
5050 switch (e1kGetDescType(pDesc))
5051 {
5052 case E1K_DTYP_CONTEXT:
5053 e1kUpdateTxContext(pThis, pDesc);
5054 continue;
5055 case E1K_DTYP_LEGACY:
5056 /* Skip empty descriptors. */
5057 if (!pDesc->legacy.u64BufAddr || !pDesc->legacy.cmd.u16Length)
5058 break;
5059 cbPacket += pDesc->legacy.cmd.u16Length;
5060 pThis->fGSO = false;
5061 break;
5062 case E1K_DTYP_DATA:
5063 /* Skip empty descriptors. */
5064 if (!pDesc->data.u64BufAddr || !pDesc->data.cmd.u20DTALEN)
5065 break;
5066 if (cbPacket == 0)
5067 {
5068 /*
5069 * The first fragment: save IXSM and TXSM options
5070 * as these are only valid in the first fragment.
5071 */
5072 pThis->fIPcsum = pDesc->data.dw3.fIXSM;
5073 pThis->fTCPcsum = pDesc->data.dw3.fTXSM;
5074 fTSE = pDesc->data.cmd.fTSE;
5075 /*
5076 * TSE descriptors have VLE bit properly set in
5077 * the first fragment.
5078 */
5079 if (fTSE)
5080 {
5081 pThis->fVTag = pDesc->data.cmd.fVLE;
5082 pThis->u16VTagTCI = pDesc->data.dw3.u16Special;
5083 }
5084 pThis->fGSO = e1kCanDoGso(pThis, &pThis->GsoCtx, &pDesc->data, &pThis->contextTSE);
5085 }
5086 cbPacket += pDesc->data.cmd.u20DTALEN;
5087 break;
5088 default:
5089 AssertMsgFailed(("Impossible descriptor type!"));
5090 }
5091 if (pDesc->legacy.cmd.fEOP)
5092 {
5093 /*
5094 * Non-TSE descriptors have VLE bit properly set in
5095 * the last fragment.
5096 */
5097 if (!fTSE)
5098 {
5099 pThis->fVTag = pDesc->data.cmd.fVLE;
5100 pThis->u16VTagTCI = pDesc->data.dw3.u16Special;
5101 }
5102 /*
5103 * Compute the required buffer size. If we cannot do GSO but still
5104 * have to do segmentation we allocate the first segment only.
5105 */
5106 pThis->cbTxAlloc = (!fTSE || pThis->fGSO) ?
5107 cbPacket :
5108 RT_MIN(cbPacket, pThis->contextTSE.dw3.u16MSS + pThis->contextTSE.dw3.u8HDRLEN);
5109 if (pThis->fVTag)
5110 pThis->cbTxAlloc += 4;
5111 LogFlow(("%s e1kLocateTxPacket: RET true cbTxAlloc=%d\n",
5112 pThis->szPrf, pThis->cbTxAlloc));
5113 return true;
5114 }
5115 }
5116
5117 if (cbPacket == 0 && pThis->nTxDFetched - pThis->iTxDCurrent > 0)
5118 {
5119 /* All descriptors were empty, we need to process them as a dummy packet */
5120 LogFlow(("%s e1kLocateTxPacket: RET true cbTxAlloc=%d, zero packet!\n",
5121 pThis->szPrf, pThis->cbTxAlloc));
5122 return true;
5123 }
5124 LogFlow(("%s e1kLocateTxPacket: RET false cbTxAlloc=%d\n",
5125 pThis->szPrf, pThis->cbTxAlloc));
5126 return false;
5127}
5128
5129static int e1kXmitPacket(PE1KSTATE pThis, bool fOnWorkerThread)
5130{
5131 int rc = VINF_SUCCESS;
5132
5133 LogFlow(("%s e1kXmitPacket: ENTER current=%d fetched=%d\n",
5134 pThis->szPrf, pThis->iTxDCurrent, pThis->nTxDFetched));
5135
5136 while (pThis->iTxDCurrent < pThis->nTxDFetched)
5137 {
5138 E1KTXDESC *pDesc = &pThis->aTxDescriptors[pThis->iTxDCurrent];
5139 E1kLog3(("%s About to process new TX descriptor at %08x%08x, TDLEN=%08x, TDH=%08x, TDT=%08x\n",
5140 pThis->szPrf, TDBAH, TDBAL + TDH * sizeof(E1KTXDESC), TDLEN, TDH, TDT));
5141 rc = e1kXmitDesc(pThis, pDesc, e1kDescAddr(TDBAH, TDBAL, TDH), fOnWorkerThread);
5142 if (RT_FAILURE(rc))
5143 break;
5144 if (++TDH * sizeof(E1KTXDESC) >= TDLEN)
5145 TDH = 0;
5146 uint32_t uLowThreshold = GET_BITS(TXDCTL, LWTHRESH)*8;
5147 if (uLowThreshold != 0 && e1kGetTxLen(pThis) <= uLowThreshold)
5148 {
5149 E1kLog2(("%s Low on transmit descriptors, raise ICR.TXD_LOW, len=%x thresh=%x\n",
5150 pThis->szPrf, e1kGetTxLen(pThis), GET_BITS(TXDCTL, LWTHRESH)*8));
5151 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_TXD_LOW);
5152 }
5153 ++pThis->iTxDCurrent;
5154 if (e1kGetDescType(pDesc) != E1K_DTYP_CONTEXT && pDesc->legacy.cmd.fEOP)
5155 break;
5156 }
5157
5158 LogFlow(("%s e1kXmitPacket: RET %Rrc current=%d fetched=%d\n",
5159 pThis->szPrf, rc, pThis->iTxDCurrent, pThis->nTxDFetched));
5160 return rc;
5161}
5162
5163#endif /* E1K_WITH_TXD_CACHE */
5164#ifndef E1K_WITH_TXD_CACHE
5165
5166/**
5167 * Transmit pending descriptors.
5168 *
5169 * @returns VBox status code. VERR_TRY_AGAIN is returned if we're busy.
5170 *
5171 * @param pThis The E1000 state.
5172 * @param fOnWorkerThread Whether we're on a worker thread or on an EMT.
5173 */
5174static int e1kXmitPending(PE1KSTATE pThis, bool fOnWorkerThread)
5175{
5176 int rc = VINF_SUCCESS;
5177
5178 /* Check if transmitter is enabled. */
5179 if (!(TCTL & TCTL_EN))
5180 return VINF_SUCCESS;
5181 /*
5182 * Grab the xmit lock of the driver as well as the E1K device state.
5183 */
5184 rc = e1kCsTxEnter(pThis, VERR_SEM_BUSY);
5185 if (RT_LIKELY(rc == VINF_SUCCESS))
5186 {
5187 PPDMINETWORKUP pDrv = pThis->CTX_SUFF(pDrv);
5188 if (pDrv)
5189 {
5190 rc = pDrv->pfnBeginXmit(pDrv, fOnWorkerThread);
5191 if (RT_FAILURE(rc))
5192 {
5193 e1kCsTxLeave(pThis);
5194 return rc;
5195 }
5196 }
5197 /*
5198 * Process all pending descriptors.
5199 * Note! Do not process descriptors in locked state
5200 */
5201 while (TDH != TDT && !pThis->fLocked)
5202 {
5203 E1KTXDESC desc;
5204 E1kLog3(("%s About to process new TX descriptor at %08x%08x, TDLEN=%08x, TDH=%08x, TDT=%08x\n",
5205 pThis->szPrf, TDBAH, TDBAL + TDH * sizeof(desc), TDLEN, TDH, TDT));
5206
5207 e1kLoadDesc(pThis, &desc, ((uint64_t)TDBAH << 32) + TDBAL + TDH * sizeof(desc));
5208 rc = e1kXmitDesc(pThis, &desc, e1kDescAddr(TDBAH, TDBAL, TDH), fOnWorkerThread);
5209 /* If we failed to transmit descriptor we will try it again later */
5210 if (RT_FAILURE(rc))
5211 break;
5212 if (++TDH * sizeof(desc) >= TDLEN)
5213 TDH = 0;
5214
5215 if (e1kGetTxLen(pThis) <= GET_BITS(TXDCTL, LWTHRESH)*8)
5216 {
5217 E1kLog2(("%s Low on transmit descriptors, raise ICR.TXD_LOW, len=%x thresh=%x\n",
5218 pThis->szPrf, e1kGetTxLen(pThis), GET_BITS(TXDCTL, LWTHRESH)*8));
5219 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_TXD_LOW);
5220 }
5221
5222 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
5223 }
5224
5225 /// @todo uncomment: pThis->uStatIntTXQE++;
5226 /// @todo uncomment: e1kRaiseInterrupt(pThis, ICR_TXQE);
5227 /*
5228 * Release the lock.
5229 */
5230 if (pDrv)
5231 pDrv->pfnEndXmit(pDrv);
5232 e1kCsTxLeave(pThis);
5233 }
5234
5235 return rc;
5236}
5237
5238#else /* E1K_WITH_TXD_CACHE */
5239
5240static void e1kDumpTxDCache(PE1KSTATE pThis)
5241{
5242 unsigned i, cDescs = TDLEN / sizeof(E1KTXDESC);
5243 uint32_t tdh = TDH;
5244 LogRel(("-- Transmit Descriptors (%d total) --\n", cDescs));
5245 for (i = 0; i < cDescs; ++i)
5246 {
5247 E1KTXDESC desc;
5248 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns), e1kDescAddr(TDBAH, TDBAL, i),
5249 &desc, sizeof(desc));
5250 if (i == tdh)
5251 LogRel((">>> "));
5252 LogRel(("%RGp: %R[e1ktxd]\n", e1kDescAddr(TDBAH, TDBAL, i), &desc));
5253 }
5254 LogRel(("-- Transmit Descriptors in Cache (at %d (TDH %d)/ fetched %d / max %d) --\n",
5255 pThis->iTxDCurrent, TDH, pThis->nTxDFetched, E1K_TXD_CACHE_SIZE));
5256 if (tdh > pThis->iTxDCurrent)
5257 tdh -= pThis->iTxDCurrent;
5258 else
5259 tdh = cDescs + tdh - pThis->iTxDCurrent;
5260 for (i = 0; i < pThis->nTxDFetched; ++i)
5261 {
5262 if (i == pThis->iTxDCurrent)
5263 LogRel((">>> "));
5264 LogRel(("%RGp: %R[e1ktxd]\n", e1kDescAddr(TDBAH, TDBAL, tdh++ % cDescs), &pThis->aTxDescriptors[i]));
5265 }
5266}
5267
5268/**
5269 * Transmit pending descriptors.
5270 *
5271 * @returns VBox status code. VERR_TRY_AGAIN is returned if we're busy.
5272 *
5273 * @param pThis The E1000 state.
5274 * @param fOnWorkerThread Whether we're on a worker thread or on an EMT.
5275 */
5276static int e1kXmitPending(PE1KSTATE pThis, bool fOnWorkerThread)
5277{
5278 int rc = VINF_SUCCESS;
5279
5280 /* Check if transmitter is enabled. */
5281 if (!(TCTL & TCTL_EN))
5282 return VINF_SUCCESS;
5283 /*
5284 * Grab the xmit lock of the driver as well as the E1K device state.
5285 */
5286 PPDMINETWORKUP pDrv = pThis->CTX_SUFF(pDrv);
5287 if (pDrv)
5288 {
5289 rc = pDrv->pfnBeginXmit(pDrv, fOnWorkerThread);
5290 if (RT_FAILURE(rc))
5291 return rc;
5292 }
5293
5294 /*
5295 * Process all pending descriptors.
5296 * Note! Do not process descriptors in locked state
5297 */
5298 rc = e1kCsTxEnter(pThis, VERR_SEM_BUSY);
5299 if (RT_LIKELY(rc == VINF_SUCCESS))
5300 {
5301 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
5302 /*
5303 * fIncomplete is set whenever we try to fetch additional descriptors
5304 * for an incomplete packet. If fail to locate a complete packet on
5305 * the next iteration we need to reset the cache or we risk to get
5306 * stuck in this loop forever.
5307 */
5308 bool fIncomplete = false;
5309 while (!pThis->fLocked && e1kTxDLazyLoad(pThis))
5310 {
5311 while (e1kLocateTxPacket(pThis))
5312 {
5313 fIncomplete = false;
5314 /* Found a complete packet, allocate it. */
5315 rc = e1kXmitAllocBuf(pThis, pThis->fGSO);
5316 /* If we're out of bandwidth we'll come back later. */
5317 if (RT_FAILURE(rc))
5318 goto out;
5319 /* Copy the packet to allocated buffer and send it. */
5320 rc = e1kXmitPacket(pThis, fOnWorkerThread);
5321 /* If we're out of bandwidth we'll come back later. */
5322 if (RT_FAILURE(rc))
5323 goto out;
5324 }
5325 uint8_t u8Remain = pThis->nTxDFetched - pThis->iTxDCurrent;
5326 if (RT_UNLIKELY(fIncomplete))
5327 {
5328 static bool fTxDCacheDumped = false;
5329 /*
5330 * The descriptor cache is full, but we were unable to find
5331 * a complete packet in it. Drop the cache and hope that
5332 * the guest driver can recover from network card error.
5333 */
5334 LogRel(("%s No complete packets in%s TxD cache! "
5335 "Fetched=%d, current=%d, TX len=%d.\n",
5336 pThis->szPrf,
5337 u8Remain == E1K_TXD_CACHE_SIZE ? " full" : "",
5338 pThis->nTxDFetched, pThis->iTxDCurrent,
5339 e1kGetTxLen(pThis)));
5340 if (!fTxDCacheDumped)
5341 {
5342 fTxDCacheDumped = true;
5343 e1kDumpTxDCache(pThis);
5344 }
5345 pThis->iTxDCurrent = pThis->nTxDFetched = 0;
5346 /*
5347 * Returning an error at this point means Guru in R0
5348 * (see @bugref{6428}).
5349 */
5350# ifdef IN_RING3
5351 rc = VERR_NET_INCOMPLETE_TX_PACKET;
5352# else /* !IN_RING3 */
5353 rc = VINF_IOM_R3_MMIO_WRITE;
5354# endif /* !IN_RING3 */
5355 goto out;
5356 }
5357 if (u8Remain > 0)
5358 {
5359 Log4(("%s Incomplete packet at %d. Already fetched %d, "
5360 "%d more are available\n",
5361 pThis->szPrf, pThis->iTxDCurrent, u8Remain,
5362 e1kGetTxLen(pThis) - u8Remain));
5363
5364 /*
5365 * A packet was partially fetched. Move incomplete packet to
5366 * the beginning of cache buffer, then load more descriptors.
5367 */
5368 memmove(pThis->aTxDescriptors,
5369 &pThis->aTxDescriptors[pThis->iTxDCurrent],
5370 u8Remain * sizeof(E1KTXDESC));
5371 pThis->iTxDCurrent = 0;
5372 pThis->nTxDFetched = u8Remain;
5373 e1kTxDLoadMore(pThis);
5374 fIncomplete = true;
5375 }
5376 else
5377 pThis->nTxDFetched = 0;
5378 pThis->iTxDCurrent = 0;
5379 }
5380 if (!pThis->fLocked && GET_BITS(TXDCTL, LWTHRESH) == 0)
5381 {
5382 E1kLog2(("%s Out of transmit descriptors, raise ICR.TXD_LOW\n",
5383 pThis->szPrf));
5384 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_TXD_LOW);
5385 }
5386out:
5387 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
5388
5389 /// @todo uncomment: pThis->uStatIntTXQE++;
5390 /// @todo uncomment: e1kRaiseInterrupt(pThis, ICR_TXQE);
5391
5392 e1kCsTxLeave(pThis);
5393 }
5394
5395
5396 /*
5397 * Release the lock.
5398 */
5399 if (pDrv)
5400 pDrv->pfnEndXmit(pDrv);
5401 return rc;
5402}
5403
5404#endif /* E1K_WITH_TXD_CACHE */
5405#ifdef IN_RING3
5406
5407/**
5408 * @interface_method_impl{PDMINETWORKDOWN,pfnXmitPending}
5409 */
5410static DECLCALLBACK(void) e1kR3NetworkDown_XmitPending(PPDMINETWORKDOWN pInterface)
5411{
5412 PE1KSTATE pThis = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkDown);
5413 /* Resume suspended transmission */
5414 STATUS &= ~STATUS_TXOFF;
5415 e1kXmitPending(pThis, true /*fOnWorkerThread*/);
5416}
5417
5418/**
5419 * Callback for consuming from transmit queue. It gets called in R3 whenever
5420 * we enqueue something in R0/GC.
5421 *
5422 * @returns true
5423 * @param pDevIns Pointer to device instance structure.
5424 * @param pItem Pointer to the element being dequeued (not used).
5425 * @thread ???
5426 */
5427static DECLCALLBACK(bool) e1kTxQueueConsumer(PPDMDEVINS pDevIns, PPDMQUEUEITEMCORE pItem)
5428{
5429 NOREF(pItem);
5430 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
5431 E1kLog2(("%s e1kTxQueueConsumer:\n", pThis->szPrf));
5432
5433 int rc = e1kXmitPending(pThis, false /*fOnWorkerThread*/); NOREF(rc);
5434#ifndef DEBUG_andy /** @todo r=andy Happens for me a lot, mute this for me. */
5435 AssertMsg(RT_SUCCESS(rc) || rc == VERR_TRY_AGAIN, ("%Rrc\n", rc));
5436#endif
5437 return true;
5438}
5439
5440/**
5441 * Handler for the wakeup signaller queue.
5442 */
5443static DECLCALLBACK(bool) e1kCanRxQueueConsumer(PPDMDEVINS pDevIns, PPDMQUEUEITEMCORE pItem)
5444{
5445 RT_NOREF(pItem);
5446 e1kWakeupReceive(pDevIns);
5447 return true;
5448}
5449
5450#endif /* IN_RING3 */
5451
5452/**
5453 * Write handler for Transmit Descriptor Tail register.
5454 *
5455 * @param pThis The device state structure.
5456 * @param offset Register offset in memory-mapped frame.
5457 * @param index Register index in register array.
5458 * @param value The value to store.
5459 * @param mask Used to implement partial writes (8 and 16-bit).
5460 * @thread EMT
5461 */
5462static int e1kRegWriteTDT(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5463{
5464 int rc = e1kRegWriteDefault(pThis, offset, index, value);
5465
5466 /* All descriptors starting with head and not including tail belong to us. */
5467 /* Process them. */
5468 E1kLog2(("%s e1kRegWriteTDT: TDBAL=%08x, TDBAH=%08x, TDLEN=%08x, TDH=%08x, TDT=%08x\n",
5469 pThis->szPrf, TDBAL, TDBAH, TDLEN, TDH, TDT));
5470
5471 /* Ignore TDT writes when the link is down. */
5472 if (TDH != TDT && (STATUS & STATUS_LU))
5473 {
5474 Log5(("E1000: TDT write: TDH=%08x, TDT=%08x, %d descriptors to process\n", TDH, TDT, e1kGetTxLen(pThis)));
5475 E1kLog(("%s e1kRegWriteTDT: %d descriptors to process\n",
5476 pThis->szPrf, e1kGetTxLen(pThis)));
5477
5478 /* Transmit pending packets if possible, defer it if we cannot do it
5479 in the current context. */
5480#ifdef E1K_TX_DELAY
5481 rc = e1kCsTxEnter(pThis, VERR_SEM_BUSY);
5482 if (RT_LIKELY(rc == VINF_SUCCESS))
5483 {
5484 if (!TMTimerIsActive(pThis->CTX_SUFF(pTXDTimer)))
5485 {
5486#ifdef E1K_INT_STATS
5487 pThis->u64ArmedAt = RTTimeNanoTS();
5488#endif
5489 e1kArmTimer(pThis, pThis->CTX_SUFF(pTXDTimer), E1K_TX_DELAY);
5490 }
5491 E1K_INC_ISTAT_CNT(pThis->uStatTxDelayed);
5492 e1kCsTxLeave(pThis);
5493 return rc;
5494 }
5495 /* We failed to enter the TX critical section -- transmit as usual. */
5496#endif /* E1K_TX_DELAY */
5497#ifndef IN_RING3
5498 if (!pThis->CTX_SUFF(pDrv))
5499 {
5500 PPDMQUEUEITEMCORE pItem = PDMQueueAlloc(pThis->CTX_SUFF(pTxQueue));
5501 if (RT_UNLIKELY(pItem))
5502 PDMQueueInsert(pThis->CTX_SUFF(pTxQueue), pItem);
5503 }
5504 else
5505#endif
5506 {
5507 rc = e1kXmitPending(pThis, false /*fOnWorkerThread*/);
5508 if (rc == VERR_TRY_AGAIN)
5509 rc = VINF_SUCCESS;
5510 else if (rc == VERR_SEM_BUSY)
5511 rc = VINF_IOM_R3_MMIO_WRITE;
5512 AssertRC(rc);
5513 }
5514 }
5515
5516 return rc;
5517}
5518
5519/**
5520 * Write handler for Multicast Table Array registers.
5521 *
5522 * @param pThis The device state structure.
5523 * @param offset Register offset in memory-mapped frame.
5524 * @param index Register index in register array.
5525 * @param value The value to store.
5526 * @thread EMT
5527 */
5528static int e1kRegWriteMTA(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5529{
5530 AssertReturn(offset - g_aE1kRegMap[index].offset < sizeof(pThis->auMTA), VERR_DEV_IO_ERROR);
5531 pThis->auMTA[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->auMTA[0])] = value;
5532
5533 return VINF_SUCCESS;
5534}
5535
5536/**
5537 * Read handler for Multicast Table Array registers.
5538 *
5539 * @returns VBox status code.
5540 *
5541 * @param pThis The device state structure.
5542 * @param offset Register offset in memory-mapped frame.
5543 * @param index Register index in register array.
5544 * @thread EMT
5545 */
5546static int e1kRegReadMTA(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5547{
5548 AssertReturn(offset - g_aE1kRegMap[index].offset< sizeof(pThis->auMTA), VERR_DEV_IO_ERROR);
5549 *pu32Value = pThis->auMTA[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->auMTA[0])];
5550
5551 return VINF_SUCCESS;
5552}
5553
5554/**
5555 * Write handler for Receive Address registers.
5556 *
5557 * @param pThis The device state structure.
5558 * @param offset Register offset in memory-mapped frame.
5559 * @param index Register index in register array.
5560 * @param value The value to store.
5561 * @thread EMT
5562 */
5563static int e1kRegWriteRA(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5564{
5565 AssertReturn(offset - g_aE1kRegMap[index].offset < sizeof(pThis->aRecAddr.au32), VERR_DEV_IO_ERROR);
5566 pThis->aRecAddr.au32[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->aRecAddr.au32[0])] = value;
5567
5568 return VINF_SUCCESS;
5569}
5570
5571/**
5572 * Read handler for Receive Address registers.
5573 *
5574 * @returns VBox status code.
5575 *
5576 * @param pThis The device state structure.
5577 * @param offset Register offset in memory-mapped frame.
5578 * @param index Register index in register array.
5579 * @thread EMT
5580 */
5581static int e1kRegReadRA(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5582{
5583 AssertReturn(offset - g_aE1kRegMap[index].offset< sizeof(pThis->aRecAddr.au32), VERR_DEV_IO_ERROR);
5584 *pu32Value = pThis->aRecAddr.au32[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->aRecAddr.au32[0])];
5585
5586 return VINF_SUCCESS;
5587}
5588
5589/**
5590 * Write handler for VLAN Filter Table Array registers.
5591 *
5592 * @param pThis The device state structure.
5593 * @param offset Register offset in memory-mapped frame.
5594 * @param index Register index in register array.
5595 * @param value The value to store.
5596 * @thread EMT
5597 */
5598static int e1kRegWriteVFTA(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5599{
5600 AssertReturn(offset - g_aE1kRegMap[index].offset < sizeof(pThis->auVFTA), VINF_SUCCESS);
5601 pThis->auVFTA[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->auVFTA[0])] = value;
5602
5603 return VINF_SUCCESS;
5604}
5605
5606/**
5607 * Read handler for VLAN Filter Table Array registers.
5608 *
5609 * @returns VBox status code.
5610 *
5611 * @param pThis The device state structure.
5612 * @param offset Register offset in memory-mapped frame.
5613 * @param index Register index in register array.
5614 * @thread EMT
5615 */
5616static int e1kRegReadVFTA(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5617{
5618 AssertReturn(offset - g_aE1kRegMap[index].offset< sizeof(pThis->auVFTA), VERR_DEV_IO_ERROR);
5619 *pu32Value = pThis->auVFTA[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->auVFTA[0])];
5620
5621 return VINF_SUCCESS;
5622}
5623
5624/**
5625 * Read handler for unimplemented registers.
5626 *
5627 * Merely reports reads from unimplemented registers.
5628 *
5629 * @returns VBox status code.
5630 *
5631 * @param pThis The device state structure.
5632 * @param offset Register offset in memory-mapped frame.
5633 * @param index Register index in register array.
5634 * @thread EMT
5635 */
5636static int e1kRegReadUnimplemented(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5637{
5638 RT_NOREF3(pThis, offset, index);
5639 E1kLog(("%s At %08X read (00000000) attempt from unimplemented register %s (%s)\n",
5640 pThis->szPrf, offset, g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
5641 *pu32Value = 0;
5642
5643 return VINF_SUCCESS;
5644}
5645
5646/**
5647 * Default register read handler with automatic clear operation.
5648 *
5649 * Retrieves the value of register from register array in device state structure.
5650 * Then resets all bits.
5651 *
5652 * @remarks The 'mask' parameter is simply ignored as masking and shifting is
5653 * done in the caller.
5654 *
5655 * @returns VBox status code.
5656 *
5657 * @param pThis The device state structure.
5658 * @param offset Register offset in memory-mapped frame.
5659 * @param index Register index in register array.
5660 * @thread EMT
5661 */
5662static int e1kRegReadAutoClear(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5663{
5664 AssertReturn(index < E1K_NUM_OF_32BIT_REGS, VERR_DEV_IO_ERROR);
5665 int rc = e1kRegReadDefault(pThis, offset, index, pu32Value);
5666 pThis->auRegs[index] = 0;
5667
5668 return rc;
5669}
5670
5671/**
5672 * Default register read handler.
5673 *
5674 * Retrieves the value of register from register array in device state structure.
5675 * Bits corresponding to 0s in 'readable' mask will always read as 0s.
5676 *
5677 * @remarks The 'mask' parameter is simply ignored as masking and shifting is
5678 * done in the caller.
5679 *
5680 * @returns VBox status code.
5681 *
5682 * @param pThis The device state structure.
5683 * @param offset Register offset in memory-mapped frame.
5684 * @param index Register index in register array.
5685 * @thread EMT
5686 */
5687static int e1kRegReadDefault(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5688{
5689 RT_NOREF_PV(offset);
5690
5691 AssertReturn(index < E1K_NUM_OF_32BIT_REGS, VERR_DEV_IO_ERROR);
5692 *pu32Value = pThis->auRegs[index] & g_aE1kRegMap[index].readable;
5693
5694 return VINF_SUCCESS;
5695}
5696
5697/**
5698 * Write handler for unimplemented registers.
5699 *
5700 * Merely reports writes to unimplemented registers.
5701 *
5702 * @param pThis The device state structure.
5703 * @param offset Register offset in memory-mapped frame.
5704 * @param index Register index in register array.
5705 * @param value The value to store.
5706 * @thread EMT
5707 */
5708
5709 static int e1kRegWriteUnimplemented(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5710{
5711 RT_NOREF_PV(pThis); RT_NOREF_PV(offset); RT_NOREF_PV(index); RT_NOREF_PV(value);
5712
5713 E1kLog(("%s At %08X write attempt (%08X) to unimplemented register %s (%s)\n",
5714 pThis->szPrf, offset, value, g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
5715
5716 return VINF_SUCCESS;
5717}
5718
5719/**
5720 * Default register write handler.
5721 *
5722 * Stores the value to the register array in device state structure. Only bits
5723 * corresponding to 1s both in 'writable' and 'mask' will be stored.
5724 *
5725 * @returns VBox status code.
5726 *
5727 * @param pThis The device state structure.
5728 * @param offset Register offset in memory-mapped frame.
5729 * @param index Register index in register array.
5730 * @param value The value to store.
5731 * @param mask Used to implement partial writes (8 and 16-bit).
5732 * @thread EMT
5733 */
5734
5735static int e1kRegWriteDefault(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5736{
5737 RT_NOREF_PV(offset);
5738
5739 AssertReturn(index < E1K_NUM_OF_32BIT_REGS, VERR_DEV_IO_ERROR);
5740 pThis->auRegs[index] = (value & g_aE1kRegMap[index].writable)
5741 | (pThis->auRegs[index] & ~g_aE1kRegMap[index].writable);
5742
5743 return VINF_SUCCESS;
5744}
5745
5746/**
5747 * Search register table for matching register.
5748 *
5749 * @returns Index in the register table or -1 if not found.
5750 *
5751 * @param offReg Register offset in memory-mapped region.
5752 * @thread EMT
5753 */
5754static int e1kRegLookup(uint32_t offReg)
5755{
5756
5757#if 0
5758 int index;
5759
5760 for (index = 0; index < E1K_NUM_OF_REGS; index++)
5761 {
5762 if (g_aE1kRegMap[index].offset <= offReg && offReg < g_aE1kRegMap[index].offset + g_aE1kRegMap[index].size)
5763 {
5764 return index;
5765 }
5766 }
5767#else
5768 int iStart = 0;
5769 int iEnd = E1K_NUM_OF_BINARY_SEARCHABLE;
5770 for (;;)
5771 {
5772 int i = (iEnd - iStart) / 2 + iStart;
5773 uint32_t offCur = g_aE1kRegMap[i].offset;
5774 if (offReg < offCur)
5775 {
5776 if (i == iStart)
5777 break;
5778 iEnd = i;
5779 }
5780 else if (offReg >= offCur + g_aE1kRegMap[i].size)
5781 {
5782 i++;
5783 if (i == iEnd)
5784 break;
5785 iStart = i;
5786 }
5787 else
5788 return i;
5789 Assert(iEnd > iStart);
5790 }
5791
5792 for (unsigned i = E1K_NUM_OF_BINARY_SEARCHABLE; i < RT_ELEMENTS(g_aE1kRegMap); i++)
5793 if (offReg - g_aE1kRegMap[i].offset < g_aE1kRegMap[i].size)
5794 return i;
5795
5796# ifdef VBOX_STRICT
5797 for (unsigned i = 0; i < RT_ELEMENTS(g_aE1kRegMap); i++)
5798 Assert(offReg - g_aE1kRegMap[i].offset >= g_aE1kRegMap[i].size);
5799# endif
5800
5801#endif
5802
5803 return -1;
5804}
5805
5806/**
5807 * Handle unaligned register read operation.
5808 *
5809 * Looks up and calls appropriate handler.
5810 *
5811 * @returns VBox status code.
5812 *
5813 * @param pThis The device state structure.
5814 * @param offReg Register offset in memory-mapped frame.
5815 * @param pv Where to store the result.
5816 * @param cb Number of bytes to read.
5817 * @thread EMT
5818 * @remarks IOM takes care of unaligned and small reads via MMIO. For I/O port
5819 * accesses we have to take care of that ourselves.
5820 */
5821static int e1kRegReadUnaligned(PE1KSTATE pThis, uint32_t offReg, void *pv, uint32_t cb)
5822{
5823 uint32_t u32 = 0;
5824 uint32_t shift;
5825 int rc = VINF_SUCCESS;
5826 int index = e1kRegLookup(offReg);
5827#ifdef LOG_ENABLED
5828 char buf[9];
5829#endif
5830
5831 /*
5832 * From the spec:
5833 * For registers that should be accessed as 32-bit double words, partial writes (less than a 32-bit
5834 * double word) is ignored. Partial reads return all 32 bits of data regardless of the byte enables.
5835 */
5836
5837 /*
5838 * To be able to read bytes and short word we convert them to properly
5839 * shifted 32-bit words and masks. The idea is to keep register-specific
5840 * handlers simple. Most accesses will be 32-bit anyway.
5841 */
5842 uint32_t mask;
5843 switch (cb)
5844 {
5845 case 4: mask = 0xFFFFFFFF; break;
5846 case 2: mask = 0x0000FFFF; break;
5847 case 1: mask = 0x000000FF; break;
5848 default:
5849 return PDMDevHlpDBGFStop(pThis->CTX_SUFF(pDevIns), RT_SRC_POS,
5850 "unsupported op size: offset=%#10x cb=%#10x\n", offReg, cb);
5851 }
5852 if (index != -1)
5853 {
5854 if (g_aE1kRegMap[index].readable)
5855 {
5856 /* Make the mask correspond to the bits we are about to read. */
5857 shift = (offReg - g_aE1kRegMap[index].offset) % sizeof(uint32_t) * 8;
5858 mask <<= shift;
5859 if (!mask)
5860 return PDMDevHlpDBGFStop(pThis->CTX_SUFF(pDevIns), RT_SRC_POS, "Zero mask: offset=%#10x cb=%#10x\n", offReg, cb);
5861 /*
5862 * Read it. Pass the mask so the handler knows what has to be read.
5863 * Mask out irrelevant bits.
5864 */
5865 //rc = e1kCsEnter(pThis, VERR_SEM_BUSY, RT_SRC_POS);
5866 if (RT_UNLIKELY(rc != VINF_SUCCESS))
5867 return rc;
5868 //pThis->fDelayInts = false;
5869 //pThis->iStatIntLost += pThis->iStatIntLostOne;
5870 //pThis->iStatIntLostOne = 0;
5871 rc = g_aE1kRegMap[index].pfnRead(pThis, offReg & 0xFFFFFFFC, index, &u32);
5872 u32 &= mask;
5873 //e1kCsLeave(pThis);
5874 E1kLog2(("%s At %08X read %s from %s (%s)\n",
5875 pThis->szPrf, offReg, e1kU32toHex(u32, mask, buf), g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
5876 Log6(("%s At %08X read %s from %s (%s) [UNALIGNED]\n",
5877 pThis->szPrf, offReg, e1kU32toHex(u32, mask, buf), g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
5878 /* Shift back the result. */
5879 u32 >>= shift;
5880 }
5881 else
5882 E1kLog(("%s At %08X read (%s) attempt from write-only register %s (%s)\n",
5883 pThis->szPrf, offReg, e1kU32toHex(u32, mask, buf), g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
5884 if (IOM_SUCCESS(rc))
5885 STAM_COUNTER_INC(&pThis->aStatRegReads[index]);
5886 }
5887 else
5888 E1kLog(("%s At %08X read (%s) attempt from non-existing register\n",
5889 pThis->szPrf, offReg, e1kU32toHex(u32, mask, buf)));
5890
5891 memcpy(pv, &u32, cb);
5892 return rc;
5893}
5894
5895/**
5896 * Handle 4 byte aligned and sized read operation.
5897 *
5898 * Looks up and calls appropriate handler.
5899 *
5900 * @returns VBox status code.
5901 *
5902 * @param pThis The device state structure.
5903 * @param offReg Register offset in memory-mapped frame.
5904 * @param pu32 Where to store the result.
5905 * @thread EMT
5906 */
5907static int e1kRegReadAlignedU32(PE1KSTATE pThis, uint32_t offReg, uint32_t *pu32)
5908{
5909 Assert(!(offReg & 3));
5910
5911 /*
5912 * Lookup the register and check that it's readable.
5913 */
5914 int rc = VINF_SUCCESS;
5915 int idxReg = e1kRegLookup(offReg);
5916 if (RT_LIKELY(idxReg != -1))
5917 {
5918 if (RT_UNLIKELY(g_aE1kRegMap[idxReg].readable))
5919 {
5920 /*
5921 * Read it. Pass the mask so the handler knows what has to be read.
5922 * Mask out irrelevant bits.
5923 */
5924 //rc = e1kCsEnter(pThis, VERR_SEM_BUSY, RT_SRC_POS);
5925 //if (RT_UNLIKELY(rc != VINF_SUCCESS))
5926 // return rc;
5927 //pThis->fDelayInts = false;
5928 //pThis->iStatIntLost += pThis->iStatIntLostOne;
5929 //pThis->iStatIntLostOne = 0;
5930 rc = g_aE1kRegMap[idxReg].pfnRead(pThis, offReg & 0xFFFFFFFC, idxReg, pu32);
5931 //e1kCsLeave(pThis);
5932 Log6(("%s At %08X read %08X from %s (%s)\n",
5933 pThis->szPrf, offReg, *pu32, g_aE1kRegMap[idxReg].abbrev, g_aE1kRegMap[idxReg].name));
5934 if (IOM_SUCCESS(rc))
5935 STAM_COUNTER_INC(&pThis->aStatRegReads[idxReg]);
5936 }
5937 else
5938 E1kLog(("%s At %08X read attempt from non-readable register %s (%s)\n",
5939 pThis->szPrf, offReg, g_aE1kRegMap[idxReg].abbrev, g_aE1kRegMap[idxReg].name));
5940 }
5941 else
5942 E1kLog(("%s At %08X read attempt from non-existing register\n", pThis->szPrf, offReg));
5943 return rc;
5944}
5945
5946/**
5947 * Handle 4 byte sized and aligned register write operation.
5948 *
5949 * Looks up and calls appropriate handler.
5950 *
5951 * @returns VBox status code.
5952 *
5953 * @param pThis The device state structure.
5954 * @param offReg Register offset in memory-mapped frame.
5955 * @param u32Value The value to write.
5956 * @thread EMT
5957 */
5958static int e1kRegWriteAlignedU32(PE1KSTATE pThis, uint32_t offReg, uint32_t u32Value)
5959{
5960 int rc = VINF_SUCCESS;
5961 int index = e1kRegLookup(offReg);
5962 if (RT_LIKELY(index != -1))
5963 {
5964 if (RT_LIKELY(g_aE1kRegMap[index].writable))
5965 {
5966 /*
5967 * Write it. Pass the mask so the handler knows what has to be written.
5968 * Mask out irrelevant bits.
5969 */
5970 Log6(("%s At %08X write %08X to %s (%s)\n",
5971 pThis->szPrf, offReg, u32Value, g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
5972 //rc = e1kCsEnter(pThis, VERR_SEM_BUSY, RT_SRC_POS);
5973 //if (RT_UNLIKELY(rc != VINF_SUCCESS))
5974 // return rc;
5975 //pThis->fDelayInts = false;
5976 //pThis->iStatIntLost += pThis->iStatIntLostOne;
5977 //pThis->iStatIntLostOne = 0;
5978 rc = g_aE1kRegMap[index].pfnWrite(pThis, offReg, index, u32Value);
5979 //e1kCsLeave(pThis);
5980 }
5981 else
5982 E1kLog(("%s At %08X write attempt (%08X) to read-only register %s (%s)\n",
5983 pThis->szPrf, offReg, u32Value, g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
5984 if (IOM_SUCCESS(rc))
5985 STAM_COUNTER_INC(&pThis->aStatRegWrites[index]);
5986 }
5987 else
5988 E1kLog(("%s At %08X write attempt (%08X) to non-existing register\n",
5989 pThis->szPrf, offReg, u32Value));
5990 return rc;
5991}
5992
5993
5994/* -=-=-=-=- MMIO and I/O Port Callbacks -=-=-=-=- */
5995
5996/**
5997 * @callback_method_impl{FNIOMMMIOREAD}
5998 */
5999PDMBOTHCBDECL(int) e1kMMIORead(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS GCPhysAddr, void *pv, unsigned cb)
6000{
6001 RT_NOREF2(pvUser, cb);
6002 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
6003 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatMMIORead), a);
6004
6005 uint32_t offReg = GCPhysAddr - pThis->addrMMReg;
6006 Assert(offReg < E1K_MM_SIZE);
6007 Assert(cb == 4);
6008 Assert(!(GCPhysAddr & 3));
6009
6010 int rc = e1kRegReadAlignedU32(pThis, offReg, (uint32_t *)pv);
6011
6012 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatMMIORead), a);
6013 return rc;
6014}
6015
6016/**
6017 * @callback_method_impl{FNIOMMMIOWRITE}
6018 */
6019PDMBOTHCBDECL(int) e1kMMIOWrite(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS GCPhysAddr, void const *pv, unsigned cb)
6020{
6021 RT_NOREF2(pvUser, cb);
6022 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
6023 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatMMIOWrite), a);
6024
6025 uint32_t offReg = GCPhysAddr - pThis->addrMMReg;
6026 Assert(offReg < E1K_MM_SIZE);
6027 Assert(cb == 4);
6028 Assert(!(GCPhysAddr & 3));
6029
6030 int rc = e1kRegWriteAlignedU32(pThis, offReg, *(uint32_t const *)pv);
6031
6032 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatMMIOWrite), a);
6033 return rc;
6034}
6035
6036/**
6037 * @callback_method_impl{FNIOMIOPORTIN}
6038 */
6039PDMBOTHCBDECL(int) e1kIOPortIn(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT uPort, uint32_t *pu32, unsigned cb)
6040{
6041 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
6042 int rc;
6043 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatIORead), a);
6044 RT_NOREF_PV(pvUser);
6045
6046 uPort -= pThis->IOPortBase;
6047 if (RT_LIKELY(cb == 4))
6048 switch (uPort)
6049 {
6050 case 0x00: /* IOADDR */
6051 *pu32 = pThis->uSelectedReg;
6052 E1kLog2(("%s e1kIOPortIn: IOADDR(0), selecting register %#010x, val=%#010x\n", pThis->szPrf, pThis->uSelectedReg, *pu32));
6053 rc = VINF_SUCCESS;
6054 break;
6055
6056 case 0x04: /* IODATA */
6057 if (!(pThis->uSelectedReg & 3))
6058 rc = e1kRegReadAlignedU32(pThis, pThis->uSelectedReg, pu32);
6059 else /** @todo r=bird: I wouldn't be surprised if this unaligned branch wasn't necessary. */
6060 rc = e1kRegReadUnaligned(pThis, pThis->uSelectedReg, pu32, cb);
6061 if (rc == VINF_IOM_R3_MMIO_READ)
6062 rc = VINF_IOM_R3_IOPORT_READ;
6063 E1kLog2(("%s e1kIOPortIn: IODATA(4), reading from selected register %#010x, val=%#010x\n", pThis->szPrf, pThis->uSelectedReg, *pu32));
6064 break;
6065
6066 default:
6067 E1kLog(("%s e1kIOPortIn: invalid port %#010x\n", pThis->szPrf, uPort));
6068 //rc = VERR_IOM_IOPORT_UNUSED; /* Why not? */
6069 rc = VINF_SUCCESS;
6070 }
6071 else
6072 {
6073 E1kLog(("%s e1kIOPortIn: invalid op size: uPort=%RTiop cb=%08x", pThis->szPrf, uPort, cb));
6074 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "%s e1kIOPortIn: invalid op size: uPort=%RTiop cb=%08x\n", pThis->szPrf, uPort, cb);
6075 }
6076 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatIORead), a);
6077 return rc;
6078}
6079
6080
6081/**
6082 * @callback_method_impl{FNIOMIOPORTOUT}
6083 */
6084PDMBOTHCBDECL(int) e1kIOPortOut(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT uPort, uint32_t u32, unsigned cb)
6085{
6086 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
6087 int rc;
6088 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatIOWrite), a);
6089 RT_NOREF_PV(pvUser);
6090
6091 E1kLog2(("%s e1kIOPortOut: uPort=%RTiop value=%08x\n", pThis->szPrf, uPort, u32));
6092 if (RT_LIKELY(cb == 4))
6093 {
6094 uPort -= pThis->IOPortBase;
6095 switch (uPort)
6096 {
6097 case 0x00: /* IOADDR */
6098 pThis->uSelectedReg = u32;
6099 E1kLog2(("%s e1kIOPortOut: IOADDR(0), selected register %08x\n", pThis->szPrf, pThis->uSelectedReg));
6100 rc = VINF_SUCCESS;
6101 break;
6102
6103 case 0x04: /* IODATA */
6104 E1kLog2(("%s e1kIOPortOut: IODATA(4), writing to selected register %#010x, value=%#010x\n", pThis->szPrf, pThis->uSelectedReg, u32));
6105 if (RT_LIKELY(!(pThis->uSelectedReg & 3)))
6106 {
6107 rc = e1kRegWriteAlignedU32(pThis, pThis->uSelectedReg, u32);
6108 if (rc == VINF_IOM_R3_MMIO_WRITE)
6109 rc = VINF_IOM_R3_IOPORT_WRITE;
6110 }
6111 else
6112 rc = PDMDevHlpDBGFStop(pThis->CTX_SUFF(pDevIns), RT_SRC_POS,
6113 "Spec violation: misaligned offset: %#10x, ignored.\n", pThis->uSelectedReg);
6114 break;
6115
6116 default:
6117 E1kLog(("%s e1kIOPortOut: invalid port %#010x\n", pThis->szPrf, uPort));
6118 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "invalid port %#010x\n", uPort);
6119 }
6120 }
6121 else
6122 {
6123 E1kLog(("%s e1kIOPortOut: invalid op size: uPort=%RTiop cb=%08x\n", pThis->szPrf, uPort, cb));
6124 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "%s: invalid op size: uPort=%RTiop cb=%#x\n", pThis->szPrf, uPort, cb);
6125 }
6126
6127 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatIOWrite), a);
6128 return rc;
6129}
6130
6131#ifdef IN_RING3
6132
6133/**
6134 * Dump complete device state to log.
6135 *
6136 * @param pThis Pointer to device state.
6137 */
6138static void e1kDumpState(PE1KSTATE pThis)
6139{
6140 RT_NOREF(pThis);
6141 for (int i = 0; i < E1K_NUM_OF_32BIT_REGS; ++i)
6142 E1kLog2(("%s %8.8s = %08x\n", pThis->szPrf, g_aE1kRegMap[i].abbrev, pThis->auRegs[i]));
6143# ifdef E1K_INT_STATS
6144 LogRel(("%s Interrupt attempts: %d\n", pThis->szPrf, pThis->uStatIntTry));
6145 LogRel(("%s Interrupts raised : %d\n", pThis->szPrf, pThis->uStatInt));
6146 LogRel(("%s Interrupts lowered: %d\n", pThis->szPrf, pThis->uStatIntLower));
6147 LogRel(("%s ICR outside ISR : %d\n", pThis->szPrf, pThis->uStatNoIntICR));
6148 LogRel(("%s IMS raised ints : %d\n", pThis->szPrf, pThis->uStatIntIMS));
6149 LogRel(("%s Interrupts skipped: %d\n", pThis->szPrf, pThis->uStatIntSkip));
6150 LogRel(("%s Masked interrupts : %d\n", pThis->szPrf, pThis->uStatIntMasked));
6151 LogRel(("%s Early interrupts : %d\n", pThis->szPrf, pThis->uStatIntEarly));
6152 LogRel(("%s Late interrupts : %d\n", pThis->szPrf, pThis->uStatIntLate));
6153 LogRel(("%s Lost interrupts : %d\n", pThis->szPrf, pThis->iStatIntLost));
6154 LogRel(("%s Interrupts by RX : %d\n", pThis->szPrf, pThis->uStatIntRx));
6155 LogRel(("%s Interrupts by TX : %d\n", pThis->szPrf, pThis->uStatIntTx));
6156 LogRel(("%s Interrupts by ICS : %d\n", pThis->szPrf, pThis->uStatIntICS));
6157 LogRel(("%s Interrupts by RDTR: %d\n", pThis->szPrf, pThis->uStatIntRDTR));
6158 LogRel(("%s Interrupts by RDMT: %d\n", pThis->szPrf, pThis->uStatIntRXDMT0));
6159 LogRel(("%s Interrupts by TXQE: %d\n", pThis->szPrf, pThis->uStatIntTXQE));
6160 LogRel(("%s TX int delay asked: %d\n", pThis->szPrf, pThis->uStatTxIDE));
6161 LogRel(("%s TX delayed: %d\n", pThis->szPrf, pThis->uStatTxDelayed));
6162 LogRel(("%s TX delay expired: %d\n", pThis->szPrf, pThis->uStatTxDelayExp));
6163 LogRel(("%s TX no report asked: %d\n", pThis->szPrf, pThis->uStatTxNoRS));
6164 LogRel(("%s TX abs timer expd : %d\n", pThis->szPrf, pThis->uStatTAD));
6165 LogRel(("%s TX int timer expd : %d\n", pThis->szPrf, pThis->uStatTID));
6166 LogRel(("%s RX abs timer expd : %d\n", pThis->szPrf, pThis->uStatRAD));
6167 LogRel(("%s RX int timer expd : %d\n", pThis->szPrf, pThis->uStatRID));
6168 LogRel(("%s TX CTX descriptors: %d\n", pThis->szPrf, pThis->uStatDescCtx));
6169 LogRel(("%s TX DAT descriptors: %d\n", pThis->szPrf, pThis->uStatDescDat));
6170 LogRel(("%s TX LEG descriptors: %d\n", pThis->szPrf, pThis->uStatDescLeg));
6171 LogRel(("%s Received frames : %d\n", pThis->szPrf, pThis->uStatRxFrm));
6172 LogRel(("%s Transmitted frames: %d\n", pThis->szPrf, pThis->uStatTxFrm));
6173 LogRel(("%s TX frames up to 1514: %d\n", pThis->szPrf, pThis->uStatTx1514));
6174 LogRel(("%s TX frames up to 2962: %d\n", pThis->szPrf, pThis->uStatTx2962));
6175 LogRel(("%s TX frames up to 4410: %d\n", pThis->szPrf, pThis->uStatTx4410));
6176 LogRel(("%s TX frames up to 5858: %d\n", pThis->szPrf, pThis->uStatTx5858));
6177 LogRel(("%s TX frames up to 7306: %d\n", pThis->szPrf, pThis->uStatTx7306));
6178 LogRel(("%s TX frames up to 8754: %d\n", pThis->szPrf, pThis->uStatTx8754));
6179 LogRel(("%s TX frames up to 16384: %d\n", pThis->szPrf, pThis->uStatTx16384));
6180 LogRel(("%s TX frames up to 32768: %d\n", pThis->szPrf, pThis->uStatTx32768));
6181 LogRel(("%s Larger TX frames : %d\n", pThis->szPrf, pThis->uStatTxLarge));
6182 LogRel(("%s Max TX Delay : %lld\n", pThis->szPrf, pThis->uStatMaxTxDelay));
6183# endif /* E1K_INT_STATS */
6184}
6185
6186/**
6187 * @callback_method_impl{FNPCIIOREGIONMAP}
6188 */
6189static DECLCALLBACK(int) e1kMap(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev, uint32_t iRegion,
6190 RTGCPHYS GCPhysAddress, RTGCPHYS cb, PCIADDRESSSPACE enmType)
6191{
6192 RT_NOREF(pPciDev, iRegion);
6193 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE *);
6194 int rc;
6195
6196 switch (enmType)
6197 {
6198 case PCI_ADDRESS_SPACE_IO:
6199 pThis->IOPortBase = (RTIOPORT)GCPhysAddress;
6200 rc = PDMDevHlpIOPortRegister(pDevIns, pThis->IOPortBase, cb, NULL /*pvUser*/,
6201 e1kIOPortOut, e1kIOPortIn, NULL, NULL, "E1000");
6202 if (pThis->fR0Enabled && RT_SUCCESS(rc))
6203 rc = PDMDevHlpIOPortRegisterR0(pDevIns, pThis->IOPortBase, cb, NIL_RTR0PTR /*pvUser*/,
6204 "e1kIOPortOut", "e1kIOPortIn", NULL, NULL, "E1000");
6205 if (pThis->fRCEnabled && RT_SUCCESS(rc))
6206 rc = PDMDevHlpIOPortRegisterRC(pDevIns, pThis->IOPortBase, cb, NIL_RTRCPTR /*pvUser*/,
6207 "e1kIOPortOut", "e1kIOPortIn", NULL, NULL, "E1000");
6208 break;
6209
6210 case PCI_ADDRESS_SPACE_MEM:
6211 /*
6212 * From the spec:
6213 * For registers that should be accessed as 32-bit double words,
6214 * partial writes (less than a 32-bit double word) is ignored.
6215 * Partial reads return all 32 bits of data regardless of the
6216 * byte enables.
6217 */
6218#ifdef E1K_WITH_PREREG_MMIO
6219 pThis->addrMMReg = GCPhysAddress;
6220 if (GCPhysAddress == NIL_RTGCPHYS)
6221 rc = VINF_SUCCESS;
6222 else
6223 {
6224 Assert(!(GCPhysAddress & 7));
6225 rc = PDMDevHlpMMIOExMap(pDevIns, pPciDev, iRegion, GCPhysAddress);
6226 }
6227#else
6228 pThis->addrMMReg = GCPhysAddress; Assert(!(GCPhysAddress & 7));
6229 rc = PDMDevHlpMMIORegister(pDevIns, GCPhysAddress, cb, NULL /*pvUser*/,
6230 IOMMMIO_FLAGS_READ_DWORD | IOMMMIO_FLAGS_WRITE_ONLY_DWORD,
6231 e1kMMIOWrite, e1kMMIORead, "E1000");
6232 if (pThis->fR0Enabled && RT_SUCCESS(rc))
6233 rc = PDMDevHlpMMIORegisterR0(pDevIns, GCPhysAddress, cb, NIL_RTR0PTR /*pvUser*/,
6234 "e1kMMIOWrite", "e1kMMIORead");
6235 if (pThis->fRCEnabled && RT_SUCCESS(rc))
6236 rc = PDMDevHlpMMIORegisterRC(pDevIns, GCPhysAddress, cb, NIL_RTRCPTR /*pvUser*/,
6237 "e1kMMIOWrite", "e1kMMIORead");
6238#endif
6239 break;
6240
6241 default:
6242 /* We should never get here */
6243 AssertMsgFailed(("Invalid PCI address space param in map callback"));
6244 rc = VERR_INTERNAL_ERROR;
6245 break;
6246 }
6247 return rc;
6248}
6249
6250
6251/* -=-=-=-=- PDMINETWORKDOWN -=-=-=-=- */
6252
6253/**
6254 * Check if the device can receive data now.
6255 * This must be called before the pfnRecieve() method is called.
6256 *
6257 * @returns Number of bytes the device can receive.
6258 * @param pInterface Pointer to the interface structure containing the called function pointer.
6259 * @thread EMT
6260 */
6261static int e1kCanReceive(PE1KSTATE pThis)
6262{
6263#ifndef E1K_WITH_RXD_CACHE
6264 size_t cb;
6265
6266 if (RT_UNLIKELY(e1kCsRxEnter(pThis, VERR_SEM_BUSY) != VINF_SUCCESS))
6267 return VERR_NET_NO_BUFFER_SPACE;
6268
6269 if (RT_UNLIKELY(RDLEN == sizeof(E1KRXDESC)))
6270 {
6271 E1KRXDESC desc;
6272 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns), e1kDescAddr(RDBAH, RDBAL, RDH),
6273 &desc, sizeof(desc));
6274 if (desc.status.fDD)
6275 cb = 0;
6276 else
6277 cb = pThis->u16RxBSize;
6278 }
6279 else if (RDH < RDT)
6280 cb = (RDT - RDH) * pThis->u16RxBSize;
6281 else if (RDH > RDT)
6282 cb = (RDLEN/sizeof(E1KRXDESC) - RDH + RDT) * pThis->u16RxBSize;
6283 else
6284 {
6285 cb = 0;
6286 E1kLogRel(("E1000: OUT of RX descriptors!\n"));
6287 }
6288 E1kLog2(("%s e1kCanReceive: at exit RDH=%d RDT=%d RDLEN=%d u16RxBSize=%d cb=%lu\n",
6289 pThis->szPrf, RDH, RDT, RDLEN, pThis->u16RxBSize, cb));
6290
6291 e1kCsRxLeave(pThis);
6292 return cb > 0 ? VINF_SUCCESS : VERR_NET_NO_BUFFER_SPACE;
6293#else /* E1K_WITH_RXD_CACHE */
6294 int rc = VINF_SUCCESS;
6295
6296 if (RT_UNLIKELY(e1kCsRxEnter(pThis, VERR_SEM_BUSY) != VINF_SUCCESS))
6297 return VERR_NET_NO_BUFFER_SPACE;
6298
6299 if (RT_UNLIKELY(RDLEN == sizeof(E1KRXDESC)))
6300 {
6301 E1KRXDESC desc;
6302 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns), e1kDescAddr(RDBAH, RDBAL, RDH),
6303 &desc, sizeof(desc));
6304 if (desc.status.fDD)
6305 rc = VERR_NET_NO_BUFFER_SPACE;
6306 }
6307 else if (e1kRxDIsCacheEmpty(pThis) && RDH == RDT)
6308 {
6309 /* Cache is empty, so is the RX ring. */
6310 rc = VERR_NET_NO_BUFFER_SPACE;
6311 }
6312 E1kLog2(("%s e1kCanReceive: at exit in_cache=%d RDH=%d RDT=%d RDLEN=%d"
6313 " u16RxBSize=%d rc=%Rrc\n", pThis->szPrf,
6314 e1kRxDInCache(pThis), RDH, RDT, RDLEN, pThis->u16RxBSize, rc));
6315
6316 e1kCsRxLeave(pThis);
6317 return rc;
6318#endif /* E1K_WITH_RXD_CACHE */
6319}
6320
6321/**
6322 * @interface_method_impl{PDMINETWORKDOWN,pfnWaitReceiveAvail}
6323 */
6324static DECLCALLBACK(int) e1kR3NetworkDown_WaitReceiveAvail(PPDMINETWORKDOWN pInterface, RTMSINTERVAL cMillies)
6325{
6326 PE1KSTATE pThis = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkDown);
6327 int rc = e1kCanReceive(pThis);
6328
6329 if (RT_SUCCESS(rc))
6330 return VINF_SUCCESS;
6331 if (RT_UNLIKELY(cMillies == 0))
6332 return VERR_NET_NO_BUFFER_SPACE;
6333
6334 rc = VERR_INTERRUPTED;
6335 ASMAtomicXchgBool(&pThis->fMaybeOutOfSpace, true);
6336 STAM_PROFILE_START(&pThis->StatRxOverflow, a);
6337 VMSTATE enmVMState;
6338 while (RT_LIKELY( (enmVMState = PDMDevHlpVMState(pThis->CTX_SUFF(pDevIns))) == VMSTATE_RUNNING
6339 || enmVMState == VMSTATE_RUNNING_LS))
6340 {
6341 int rc2 = e1kCanReceive(pThis);
6342 if (RT_SUCCESS(rc2))
6343 {
6344 rc = VINF_SUCCESS;
6345 break;
6346 }
6347 E1kLogRel(("E1000 e1kR3NetworkDown_WaitReceiveAvail: waiting cMillies=%u...\n", cMillies));
6348 E1kLog(("%s e1kR3NetworkDown_WaitReceiveAvail: waiting cMillies=%u...\n", pThis->szPrf, cMillies));
6349 RTSemEventWait(pThis->hEventMoreRxDescAvail, cMillies);
6350 }
6351 STAM_PROFILE_STOP(&pThis->StatRxOverflow, a);
6352 ASMAtomicXchgBool(&pThis->fMaybeOutOfSpace, false);
6353
6354 return rc;
6355}
6356
6357
6358/**
6359 * Matches the packet addresses against Receive Address table. Looks for
6360 * exact matches only.
6361 *
6362 * @returns true if address matches.
6363 * @param pThis Pointer to the state structure.
6364 * @param pvBuf The ethernet packet.
6365 * @param cb Number of bytes available in the packet.
6366 * @thread EMT
6367 */
6368static bool e1kPerfectMatch(PE1KSTATE pThis, const void *pvBuf)
6369{
6370 for (unsigned i = 0; i < RT_ELEMENTS(pThis->aRecAddr.array); i++)
6371 {
6372 E1KRAELEM* ra = pThis->aRecAddr.array + i;
6373
6374 /* Valid address? */
6375 if (ra->ctl & RA_CTL_AV)
6376 {
6377 Assert((ra->ctl & RA_CTL_AS) < 2);
6378 //unsigned char *pAddr = (unsigned char*)pvBuf + sizeof(ra->addr)*(ra->ctl & RA_CTL_AS);
6379 //E1kLog3(("%s Matching %02x:%02x:%02x:%02x:%02x:%02x against %02x:%02x:%02x:%02x:%02x:%02x...\n",
6380 // pThis->szPrf, pAddr[0], pAddr[1], pAddr[2], pAddr[3], pAddr[4], pAddr[5],
6381 // ra->addr[0], ra->addr[1], ra->addr[2], ra->addr[3], ra->addr[4], ra->addr[5]));
6382 /*
6383 * Address Select:
6384 * 00b = Destination address
6385 * 01b = Source address
6386 * 10b = Reserved
6387 * 11b = Reserved
6388 * Since ethernet header is (DA, SA, len) we can use address
6389 * select as index.
6390 */
6391 if (memcmp((char*)pvBuf + sizeof(ra->addr)*(ra->ctl & RA_CTL_AS),
6392 ra->addr, sizeof(ra->addr)) == 0)
6393 return true;
6394 }
6395 }
6396
6397 return false;
6398}
6399
6400/**
6401 * Matches the packet addresses against Multicast Table Array.
6402 *
6403 * @remarks This is imperfect match since it matches not exact address but
6404 * a subset of addresses.
6405 *
6406 * @returns true if address matches.
6407 * @param pThis Pointer to the state structure.
6408 * @param pvBuf The ethernet packet.
6409 * @param cb Number of bytes available in the packet.
6410 * @thread EMT
6411 */
6412static bool e1kImperfectMatch(PE1KSTATE pThis, const void *pvBuf)
6413{
6414 /* Get bits 32..47 of destination address */
6415 uint16_t u16Bit = ((uint16_t*)pvBuf)[2];
6416
6417 unsigned offset = GET_BITS(RCTL, MO);
6418 /*
6419 * offset means:
6420 * 00b = bits 36..47
6421 * 01b = bits 35..46
6422 * 10b = bits 34..45
6423 * 11b = bits 32..43
6424 */
6425 if (offset < 3)
6426 u16Bit = u16Bit >> (4 - offset);
6427 return ASMBitTest(pThis->auMTA, u16Bit & 0xFFF);
6428}
6429
6430/**
6431 * Determines if the packet is to be delivered to upper layer.
6432 *
6433 * The following filters supported:
6434 * - Exact Unicast/Multicast
6435 * - Promiscuous Unicast/Multicast
6436 * - Multicast
6437 * - VLAN
6438 *
6439 * @returns true if packet is intended for this node.
6440 * @param pThis Pointer to the state structure.
6441 * @param pvBuf The ethernet packet.
6442 * @param cb Number of bytes available in the packet.
6443 * @param pStatus Bit field to store status bits.
6444 * @thread EMT
6445 */
6446static bool e1kAddressFilter(PE1KSTATE pThis, const void *pvBuf, size_t cb, E1KRXDST *pStatus)
6447{
6448 Assert(cb > 14);
6449 /* Assume that we fail to pass exact filter. */
6450 pStatus->fPIF = false;
6451 pStatus->fVP = false;
6452 /* Discard oversized packets */
6453 if (cb > E1K_MAX_RX_PKT_SIZE)
6454 {
6455 E1kLog(("%s ERROR: Incoming packet is too big, cb=%d > max=%d\n",
6456 pThis->szPrf, cb, E1K_MAX_RX_PKT_SIZE));
6457 E1K_INC_CNT32(ROC);
6458 return false;
6459 }
6460 else if (!(RCTL & RCTL_LPE) && cb > 1522)
6461 {
6462 /* When long packet reception is disabled packets over 1522 are discarded */
6463 E1kLog(("%s Discarding incoming packet (LPE=0), cb=%d\n",
6464 pThis->szPrf, cb));
6465 E1K_INC_CNT32(ROC);
6466 return false;
6467 }
6468
6469 uint16_t *u16Ptr = (uint16_t*)pvBuf;
6470 /* Compare TPID with VLAN Ether Type */
6471 if (RT_BE2H_U16(u16Ptr[6]) == VET)
6472 {
6473 pStatus->fVP = true;
6474 /* Is VLAN filtering enabled? */
6475 if (RCTL & RCTL_VFE)
6476 {
6477 /* It is 802.1q packet indeed, let's filter by VID */
6478 if (RCTL & RCTL_CFIEN)
6479 {
6480 E1kLog3(("%s VLAN filter: VLAN=%d CFI=%d RCTL_CFI=%d\n", pThis->szPrf,
6481 E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7])),
6482 E1K_SPEC_CFI(RT_BE2H_U16(u16Ptr[7])),
6483 !!(RCTL & RCTL_CFI)));
6484 if (E1K_SPEC_CFI(RT_BE2H_U16(u16Ptr[7])) != !!(RCTL & RCTL_CFI))
6485 {
6486 E1kLog2(("%s Packet filter: CFIs do not match in packet and RCTL (%d!=%d)\n",
6487 pThis->szPrf, E1K_SPEC_CFI(RT_BE2H_U16(u16Ptr[7])), !!(RCTL & RCTL_CFI)));
6488 return false;
6489 }
6490 }
6491 else
6492 E1kLog3(("%s VLAN filter: VLAN=%d\n", pThis->szPrf,
6493 E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7]))));
6494 if (!ASMBitTest(pThis->auVFTA, E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7]))))
6495 {
6496 E1kLog2(("%s Packet filter: no VLAN match (id=%d)\n",
6497 pThis->szPrf, E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7]))));
6498 return false;
6499 }
6500 }
6501 }
6502 /* Broadcast filtering */
6503 if (e1kIsBroadcast(pvBuf) && (RCTL & RCTL_BAM))
6504 return true;
6505 E1kLog2(("%s Packet filter: not a broadcast\n", pThis->szPrf));
6506 if (e1kIsMulticast(pvBuf))
6507 {
6508 /* Is multicast promiscuous enabled? */
6509 if (RCTL & RCTL_MPE)
6510 return true;
6511 E1kLog2(("%s Packet filter: no promiscuous multicast\n", pThis->szPrf));
6512 /* Try perfect matches first */
6513 if (e1kPerfectMatch(pThis, pvBuf))
6514 {
6515 pStatus->fPIF = true;
6516 return true;
6517 }
6518 E1kLog2(("%s Packet filter: no perfect match\n", pThis->szPrf));
6519 if (e1kImperfectMatch(pThis, pvBuf))
6520 return true;
6521 E1kLog2(("%s Packet filter: no imperfect match\n", pThis->szPrf));
6522 }
6523 else {
6524 /* Is unicast promiscuous enabled? */
6525 if (RCTL & RCTL_UPE)
6526 return true;
6527 E1kLog2(("%s Packet filter: no promiscuous unicast\n", pThis->szPrf));
6528 if (e1kPerfectMatch(pThis, pvBuf))
6529 {
6530 pStatus->fPIF = true;
6531 return true;
6532 }
6533 E1kLog2(("%s Packet filter: no perfect match\n", pThis->szPrf));
6534 }
6535 E1kLog2(("%s Packet filter: packet discarded\n", pThis->szPrf));
6536 return false;
6537}
6538
6539/**
6540 * @interface_method_impl{PDMINETWORKDOWN,pfnReceive}
6541 */
6542static DECLCALLBACK(int) e1kR3NetworkDown_Receive(PPDMINETWORKDOWN pInterface, const void *pvBuf, size_t cb)
6543{
6544 PE1KSTATE pThis = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkDown);
6545 int rc = VINF_SUCCESS;
6546
6547 /*
6548 * Drop packets if the VM is not running yet/anymore.
6549 */
6550 VMSTATE enmVMState = PDMDevHlpVMState(STATE_TO_DEVINS(pThis));
6551 if ( enmVMState != VMSTATE_RUNNING
6552 && enmVMState != VMSTATE_RUNNING_LS)
6553 {
6554 E1kLog(("%s Dropping incoming packet as VM is not running.\n", pThis->szPrf));
6555 return VINF_SUCCESS;
6556 }
6557
6558 /* Discard incoming packets in locked state */
6559 if (!(RCTL & RCTL_EN) || pThis->fLocked || !(STATUS & STATUS_LU))
6560 {
6561 E1kLog(("%s Dropping incoming packet as receive operation is disabled.\n", pThis->szPrf));
6562 return VINF_SUCCESS;
6563 }
6564
6565 STAM_PROFILE_ADV_START(&pThis->StatReceive, a);
6566
6567 //if (!e1kCsEnter(pThis, RT_SRC_POS))
6568 // return VERR_PERMISSION_DENIED;
6569
6570 e1kPacketDump(pThis, (const uint8_t*)pvBuf, cb, "<-- Incoming");
6571
6572 /* Update stats */
6573 if (RT_LIKELY(e1kCsEnter(pThis, VERR_SEM_BUSY) == VINF_SUCCESS))
6574 {
6575 E1K_INC_CNT32(TPR);
6576 E1K_ADD_CNT64(TORL, TORH, cb < 64? 64 : cb);
6577 e1kCsLeave(pThis);
6578 }
6579 STAM_PROFILE_ADV_START(&pThis->StatReceiveFilter, a);
6580 E1KRXDST status;
6581 RT_ZERO(status);
6582 bool fPassed = e1kAddressFilter(pThis, pvBuf, cb, &status);
6583 STAM_PROFILE_ADV_STOP(&pThis->StatReceiveFilter, a);
6584 if (fPassed)
6585 {
6586 rc = e1kHandleRxPacket(pThis, pvBuf, cb, status);
6587 }
6588 //e1kCsLeave(pThis);
6589 STAM_PROFILE_ADV_STOP(&pThis->StatReceive, a);
6590
6591 return rc;
6592}
6593
6594
6595/* -=-=-=-=- PDMILEDPORTS -=-=-=-=- */
6596
6597/**
6598 * @interface_method_impl{PDMILEDPORTS,pfnQueryStatusLed}
6599 */
6600static DECLCALLBACK(int) e1kR3QueryStatusLed(PPDMILEDPORTS pInterface, unsigned iLUN, PPDMLED *ppLed)
6601{
6602 PE1KSTATE pThis = RT_FROM_MEMBER(pInterface, E1KSTATE, ILeds);
6603 int rc = VERR_PDM_LUN_NOT_FOUND;
6604
6605 if (iLUN == 0)
6606 {
6607 *ppLed = &pThis->led;
6608 rc = VINF_SUCCESS;
6609 }
6610 return rc;
6611}
6612
6613
6614/* -=-=-=-=- PDMINETWORKCONFIG -=-=-=-=- */
6615
6616/**
6617 * @interface_method_impl{PDMINETWORKCONFIG,pfnGetMac}
6618 */
6619static DECLCALLBACK(int) e1kR3GetMac(PPDMINETWORKCONFIG pInterface, PRTMAC pMac)
6620{
6621 PE1KSTATE pThis = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkConfig);
6622 pThis->eeprom.getMac(pMac);
6623 return VINF_SUCCESS;
6624}
6625
6626/**
6627 * @interface_method_impl{PDMINETWORKCONFIG,pfnGetLinkState}
6628 */
6629static DECLCALLBACK(PDMNETWORKLINKSTATE) e1kR3GetLinkState(PPDMINETWORKCONFIG pInterface)
6630{
6631 PE1KSTATE pThis = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkConfig);
6632 if (STATUS & STATUS_LU)
6633 return PDMNETWORKLINKSTATE_UP;
6634 return PDMNETWORKLINKSTATE_DOWN;
6635}
6636
6637/**
6638 * @interface_method_impl{PDMINETWORKCONFIG,pfnSetLinkState}
6639 */
6640static DECLCALLBACK(int) e1kR3SetLinkState(PPDMINETWORKCONFIG pInterface, PDMNETWORKLINKSTATE enmState)
6641{
6642 PE1KSTATE pThis = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkConfig);
6643
6644 E1kLog(("%s e1kR3SetLinkState: enmState=%d\n", pThis->szPrf, enmState));
6645 switch (enmState)
6646 {
6647 case PDMNETWORKLINKSTATE_UP:
6648 pThis->fCableConnected = true;
6649 /* If link was down, bring it up after a while. */
6650 if (!(STATUS & STATUS_LU))
6651 e1kBringLinkUpDelayed(pThis);
6652 break;
6653 case PDMNETWORKLINKSTATE_DOWN:
6654 pThis->fCableConnected = false;
6655 /* Always set the phy link state to down, regardless of the STATUS_LU bit.
6656 * We might have to set the link state before the driver initializes us. */
6657 Phy::setLinkStatus(&pThis->phy, false);
6658 /* If link was up, bring it down. */
6659 if (STATUS & STATUS_LU)
6660 e1kR3LinkDown(pThis);
6661 break;
6662 case PDMNETWORKLINKSTATE_DOWN_RESUME:
6663 /*
6664 * There is not much sense in bringing down the link if it has not come up yet.
6665 * If it is up though, we bring it down temporarely, then bring it up again.
6666 */
6667 if (STATUS & STATUS_LU)
6668 e1kR3LinkDownTemp(pThis);
6669 break;
6670 default:
6671 ;
6672 }
6673 return VINF_SUCCESS;
6674}
6675
6676
6677/* -=-=-=-=- PDMIBASE -=-=-=-=- */
6678
6679/**
6680 * @interface_method_impl{PDMIBASE,pfnQueryInterface}
6681 */
6682static DECLCALLBACK(void *) e1kR3QueryInterface(struct PDMIBASE *pInterface, const char *pszIID)
6683{
6684 PE1KSTATE pThis = RT_FROM_MEMBER(pInterface, E1KSTATE, IBase);
6685 Assert(&pThis->IBase == pInterface);
6686
6687 PDMIBASE_RETURN_INTERFACE(pszIID, PDMIBASE, &pThis->IBase);
6688 PDMIBASE_RETURN_INTERFACE(pszIID, PDMINETWORKDOWN, &pThis->INetworkDown);
6689 PDMIBASE_RETURN_INTERFACE(pszIID, PDMINETWORKCONFIG, &pThis->INetworkConfig);
6690 PDMIBASE_RETURN_INTERFACE(pszIID, PDMILEDPORTS, &pThis->ILeds);
6691 return NULL;
6692}
6693
6694
6695/* -=-=-=-=- Saved State -=-=-=-=- */
6696
6697/**
6698 * Saves the configuration.
6699 *
6700 * @param pThis The E1K state.
6701 * @param pSSM The handle to the saved state.
6702 */
6703static void e1kSaveConfig(PE1KSTATE pThis, PSSMHANDLE pSSM)
6704{
6705 SSMR3PutMem(pSSM, &pThis->macConfigured, sizeof(pThis->macConfigured));
6706 SSMR3PutU32(pSSM, pThis->eChip);
6707}
6708
6709/**
6710 * @callback_method_impl{FNSSMDEVLIVEEXEC,Save basic configuration.}
6711 */
6712static DECLCALLBACK(int) e1kLiveExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM, uint32_t uPass)
6713{
6714 RT_NOREF(uPass);
6715 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6716 e1kSaveConfig(pThis, pSSM);
6717 return VINF_SSM_DONT_CALL_AGAIN;
6718}
6719
6720/**
6721 * @callback_method_impl{FNSSMDEVSAVEPREP,Synchronize.}
6722 */
6723static DECLCALLBACK(int) e1kSavePrep(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
6724{
6725 RT_NOREF(pSSM);
6726 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6727
6728 int rc = e1kCsEnter(pThis, VERR_SEM_BUSY);
6729 if (RT_UNLIKELY(rc != VINF_SUCCESS))
6730 return rc;
6731 e1kCsLeave(pThis);
6732 return VINF_SUCCESS;
6733#if 0
6734 /* 1) Prevent all threads from modifying the state and memory */
6735 //pThis->fLocked = true;
6736 /* 2) Cancel all timers */
6737#ifdef E1K_TX_DELAY
6738 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTXDTimer));
6739#endif /* E1K_TX_DELAY */
6740//#ifdef E1K_USE_TX_TIMERS
6741 if (pThis->fTidEnabled)
6742 {
6743 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTIDTimer));
6744#ifndef E1K_NO_TAD
6745 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTADTimer));
6746#endif /* E1K_NO_TAD */
6747 }
6748//#endif /* E1K_USE_TX_TIMERS */
6749#ifdef E1K_USE_RX_TIMERS
6750 e1kCancelTimer(pThis, pThis->CTX_SUFF(pRIDTimer));
6751 e1kCancelTimer(pThis, pThis->CTX_SUFF(pRADTimer));
6752#endif /* E1K_USE_RX_TIMERS */
6753 e1kCancelTimer(pThis, pThis->CTX_SUFF(pIntTimer));
6754 /* 3) Did I forget anything? */
6755 E1kLog(("%s Locked\n", pThis->szPrf));
6756 return VINF_SUCCESS;
6757#endif
6758}
6759
6760/**
6761 * @callback_method_impl{FNSSMDEVSAVEEXEC}
6762 */
6763static DECLCALLBACK(int) e1kSaveExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
6764{
6765 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6766
6767 e1kSaveConfig(pThis, pSSM);
6768 pThis->eeprom.save(pSSM);
6769 e1kDumpState(pThis);
6770 SSMR3PutMem(pSSM, pThis->auRegs, sizeof(pThis->auRegs));
6771 SSMR3PutBool(pSSM, pThis->fIntRaised);
6772 Phy::saveState(pSSM, &pThis->phy);
6773 SSMR3PutU32(pSSM, pThis->uSelectedReg);
6774 SSMR3PutMem(pSSM, pThis->auMTA, sizeof(pThis->auMTA));
6775 SSMR3PutMem(pSSM, &pThis->aRecAddr, sizeof(pThis->aRecAddr));
6776 SSMR3PutMem(pSSM, pThis->auVFTA, sizeof(pThis->auVFTA));
6777 SSMR3PutU64(pSSM, pThis->u64AckedAt);
6778 SSMR3PutU16(pSSM, pThis->u16RxBSize);
6779 //SSMR3PutBool(pSSM, pThis->fDelayInts);
6780 //SSMR3PutBool(pSSM, pThis->fIntMaskUsed);
6781 SSMR3PutU16(pSSM, pThis->u16TxPktLen);
6782/** @todo State wrt to the TSE buffer is incomplete, so little point in
6783 * saving this actually. */
6784 SSMR3PutMem(pSSM, pThis->aTxPacketFallback, pThis->u16TxPktLen);
6785 SSMR3PutBool(pSSM, pThis->fIPcsum);
6786 SSMR3PutBool(pSSM, pThis->fTCPcsum);
6787 SSMR3PutMem(pSSM, &pThis->contextTSE, sizeof(pThis->contextTSE));
6788 SSMR3PutMem(pSSM, &pThis->contextNormal, sizeof(pThis->contextNormal));
6789 SSMR3PutBool(pSSM, pThis->fVTag);
6790 SSMR3PutU16(pSSM, pThis->u16VTagTCI);
6791#ifdef E1K_WITH_TXD_CACHE
6792#if 0
6793 SSMR3PutU8(pSSM, pThis->nTxDFetched);
6794 SSMR3PutMem(pSSM, pThis->aTxDescriptors,
6795 pThis->nTxDFetched * sizeof(pThis->aTxDescriptors[0]));
6796#else
6797 /*
6798 * There is no point in storing TX descriptor cache entries as we can simply
6799 * fetch them again. Moreover, normally the cache is always empty when we
6800 * save the state. Store zero entries for compatibility.
6801 */
6802 SSMR3PutU8(pSSM, 0);
6803#endif
6804#endif /* E1K_WITH_TXD_CACHE */
6805/** @todo GSO requires some more state here. */
6806 E1kLog(("%s State has been saved\n", pThis->szPrf));
6807 return VINF_SUCCESS;
6808}
6809
6810#if 0
6811/**
6812 * @callback_method_impl{FNSSMDEVSAVEDONE}
6813 */
6814static DECLCALLBACK(int) e1kSaveDone(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
6815{
6816 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6817
6818 /* If VM is being powered off unlocking will result in assertions in PGM */
6819 if (PDMDevHlpGetVM(pDevIns)->enmVMState == VMSTATE_RUNNING)
6820 pThis->fLocked = false;
6821 else
6822 E1kLog(("%s VM is not running -- remain locked\n", pThis->szPrf));
6823 E1kLog(("%s Unlocked\n", pThis->szPrf));
6824 return VINF_SUCCESS;
6825}
6826#endif
6827
6828/**
6829 * @callback_method_impl{FNSSMDEVLOADPREP,Synchronize.}
6830 */
6831static DECLCALLBACK(int) e1kLoadPrep(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
6832{
6833 RT_NOREF(pSSM);
6834 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6835
6836 int rc = e1kCsEnter(pThis, VERR_SEM_BUSY);
6837 if (RT_UNLIKELY(rc != VINF_SUCCESS))
6838 return rc;
6839 e1kCsLeave(pThis);
6840 return VINF_SUCCESS;
6841}
6842
6843/**
6844 * @callback_method_impl{FNSSMDEVLOADEXEC}
6845 */
6846static DECLCALLBACK(int) e1kLoadExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
6847{
6848 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6849 int rc;
6850
6851 if ( uVersion != E1K_SAVEDSTATE_VERSION
6852#ifdef E1K_WITH_TXD_CACHE
6853 && uVersion != E1K_SAVEDSTATE_VERSION_VBOX_42_VTAG
6854#endif /* E1K_WITH_TXD_CACHE */
6855 && uVersion != E1K_SAVEDSTATE_VERSION_VBOX_41
6856 && uVersion != E1K_SAVEDSTATE_VERSION_VBOX_30)
6857 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
6858
6859 if ( uVersion > E1K_SAVEDSTATE_VERSION_VBOX_30
6860 || uPass != SSM_PASS_FINAL)
6861 {
6862 /* config checks */
6863 RTMAC macConfigured;
6864 rc = SSMR3GetMem(pSSM, &macConfigured, sizeof(macConfigured));
6865 AssertRCReturn(rc, rc);
6866 if ( memcmp(&macConfigured, &pThis->macConfigured, sizeof(macConfigured))
6867 && (uPass == 0 || !PDMDevHlpVMTeleportedAndNotFullyResumedYet(pDevIns)) )
6868 LogRel(("%s: The mac address differs: config=%RTmac saved=%RTmac\n", pThis->szPrf, &pThis->macConfigured, &macConfigured));
6869
6870 E1KCHIP eChip;
6871 rc = SSMR3GetU32(pSSM, &eChip);
6872 AssertRCReturn(rc, rc);
6873 if (eChip != pThis->eChip)
6874 return SSMR3SetCfgError(pSSM, RT_SRC_POS, N_("The chip type differs: config=%u saved=%u"), pThis->eChip, eChip);
6875 }
6876
6877 if (uPass == SSM_PASS_FINAL)
6878 {
6879 if (uVersion > E1K_SAVEDSTATE_VERSION_VBOX_30)
6880 {
6881 rc = pThis->eeprom.load(pSSM);
6882 AssertRCReturn(rc, rc);
6883 }
6884 /* the state */
6885 SSMR3GetMem(pSSM, &pThis->auRegs, sizeof(pThis->auRegs));
6886 SSMR3GetBool(pSSM, &pThis->fIntRaised);
6887 /** @todo PHY could be made a separate device with its own versioning */
6888 Phy::loadState(pSSM, &pThis->phy);
6889 SSMR3GetU32(pSSM, &pThis->uSelectedReg);
6890 SSMR3GetMem(pSSM, &pThis->auMTA, sizeof(pThis->auMTA));
6891 SSMR3GetMem(pSSM, &pThis->aRecAddr, sizeof(pThis->aRecAddr));
6892 SSMR3GetMem(pSSM, &pThis->auVFTA, sizeof(pThis->auVFTA));
6893 SSMR3GetU64(pSSM, &pThis->u64AckedAt);
6894 SSMR3GetU16(pSSM, &pThis->u16RxBSize);
6895 //SSMR3GetBool(pSSM, pThis->fDelayInts);
6896 //SSMR3GetBool(pSSM, pThis->fIntMaskUsed);
6897 SSMR3GetU16(pSSM, &pThis->u16TxPktLen);
6898 SSMR3GetMem(pSSM, &pThis->aTxPacketFallback[0], pThis->u16TxPktLen);
6899 SSMR3GetBool(pSSM, &pThis->fIPcsum);
6900 SSMR3GetBool(pSSM, &pThis->fTCPcsum);
6901 SSMR3GetMem(pSSM, &pThis->contextTSE, sizeof(pThis->contextTSE));
6902 rc = SSMR3GetMem(pSSM, &pThis->contextNormal, sizeof(pThis->contextNormal));
6903 AssertRCReturn(rc, rc);
6904 if (uVersion > E1K_SAVEDSTATE_VERSION_VBOX_41)
6905 {
6906 SSMR3GetBool(pSSM, &pThis->fVTag);
6907 rc = SSMR3GetU16(pSSM, &pThis->u16VTagTCI);
6908 AssertRCReturn(rc, rc);
6909 }
6910 else
6911 {
6912 pThis->fVTag = false;
6913 pThis->u16VTagTCI = 0;
6914 }
6915#ifdef E1K_WITH_TXD_CACHE
6916 if (uVersion > E1K_SAVEDSTATE_VERSION_VBOX_42_VTAG)
6917 {
6918 rc = SSMR3GetU8(pSSM, &pThis->nTxDFetched);
6919 AssertRCReturn(rc, rc);
6920 if (pThis->nTxDFetched)
6921 SSMR3GetMem(pSSM, pThis->aTxDescriptors,
6922 pThis->nTxDFetched * sizeof(pThis->aTxDescriptors[0]));
6923 }
6924 else
6925 pThis->nTxDFetched = 0;
6926 /*
6927 * @todo: Perhaps we should not store TXD cache as the entries can be
6928 * simply fetched again from guest's memory. Or can't they?
6929 */
6930#endif /* E1K_WITH_TXD_CACHE */
6931#ifdef E1K_WITH_RXD_CACHE
6932 /*
6933 * There is no point in storing the RX descriptor cache in the saved
6934 * state, we just need to make sure it is empty.
6935 */
6936 pThis->iRxDCurrent = pThis->nRxDFetched = 0;
6937#endif /* E1K_WITH_RXD_CACHE */
6938 /* derived state */
6939 e1kSetupGsoCtx(&pThis->GsoCtx, &pThis->contextTSE);
6940
6941 E1kLog(("%s State has been restored\n", pThis->szPrf));
6942 e1kDumpState(pThis);
6943 }
6944 return VINF_SUCCESS;
6945}
6946
6947/**
6948 * @callback_method_impl{FNSSMDEVLOADDONE, Link status adjustments after loading.}
6949 */
6950static DECLCALLBACK(int) e1kLoadDone(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
6951{
6952 RT_NOREF(pSSM);
6953 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6954
6955 /* Update promiscuous mode */
6956 if (pThis->pDrvR3)
6957 pThis->pDrvR3->pfnSetPromiscuousMode(pThis->pDrvR3,
6958 !!(RCTL & (RCTL_UPE | RCTL_MPE)));
6959
6960 /*
6961 * Force the link down here, since PDMNETWORKLINKSTATE_DOWN_RESUME is never
6962 * passed to us. We go through all this stuff if the link was up and we
6963 * wasn't teleported.
6964 */
6965 if ( (STATUS & STATUS_LU)
6966 && !PDMDevHlpVMTeleportedAndNotFullyResumedYet(pDevIns)
6967 && pThis->cMsLinkUpDelay)
6968 {
6969 e1kR3LinkDownTemp(pThis);
6970 }
6971 return VINF_SUCCESS;
6972}
6973
6974
6975
6976/* -=-=-=-=- Debug Info + Log Types -=-=-=-=- */
6977
6978/**
6979 * @callback_method_impl{FNRTSTRFORMATTYPE}
6980 */
6981static DECLCALLBACK(size_t) e1kFmtRxDesc(PFNRTSTROUTPUT pfnOutput,
6982 void *pvArgOutput,
6983 const char *pszType,
6984 void const *pvValue,
6985 int cchWidth,
6986 int cchPrecision,
6987 unsigned fFlags,
6988 void *pvUser)
6989{
6990 RT_NOREF(cchWidth, cchPrecision, fFlags, pvUser);
6991 AssertReturn(strcmp(pszType, "e1krxd") == 0, 0);
6992 E1KRXDESC* pDesc = (E1KRXDESC*)pvValue;
6993 if (!pDesc)
6994 return RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "NULL_RXD");
6995
6996 size_t cbPrintf = 0;
6997 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Address=%16LX Length=%04X Csum=%04X\n",
6998 pDesc->u64BufAddr, pDesc->u16Length, pDesc->u16Checksum);
6999 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, " STA: %s %s %s %s %s %s %s ERR: %s %s %s %s SPECIAL: %s VLAN=%03x PRI=%x",
7000 pDesc->status.fPIF ? "PIF" : "pif",
7001 pDesc->status.fIPCS ? "IPCS" : "ipcs",
7002 pDesc->status.fTCPCS ? "TCPCS" : "tcpcs",
7003 pDesc->status.fVP ? "VP" : "vp",
7004 pDesc->status.fIXSM ? "IXSM" : "ixsm",
7005 pDesc->status.fEOP ? "EOP" : "eop",
7006 pDesc->status.fDD ? "DD" : "dd",
7007 pDesc->status.fRXE ? "RXE" : "rxe",
7008 pDesc->status.fIPE ? "IPE" : "ipe",
7009 pDesc->status.fTCPE ? "TCPE" : "tcpe",
7010 pDesc->status.fCE ? "CE" : "ce",
7011 E1K_SPEC_CFI(pDesc->status.u16Special) ? "CFI" :"cfi",
7012 E1K_SPEC_VLAN(pDesc->status.u16Special),
7013 E1K_SPEC_PRI(pDesc->status.u16Special));
7014 return cbPrintf;
7015}
7016
7017/**
7018 * @callback_method_impl{FNRTSTRFORMATTYPE}
7019 */
7020static DECLCALLBACK(size_t) e1kFmtTxDesc(PFNRTSTROUTPUT pfnOutput,
7021 void *pvArgOutput,
7022 const char *pszType,
7023 void const *pvValue,
7024 int cchWidth,
7025 int cchPrecision,
7026 unsigned fFlags,
7027 void *pvUser)
7028{
7029 RT_NOREF(cchWidth, cchPrecision, fFlags, pvUser);
7030 AssertReturn(strcmp(pszType, "e1ktxd") == 0, 0);
7031 E1KTXDESC *pDesc = (E1KTXDESC*)pvValue;
7032 if (!pDesc)
7033 return RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "NULL_TXD");
7034
7035 size_t cbPrintf = 0;
7036 switch (e1kGetDescType(pDesc))
7037 {
7038 case E1K_DTYP_CONTEXT:
7039 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Type=Context\n"
7040 " IPCSS=%02X IPCSO=%02X IPCSE=%04X TUCSS=%02X TUCSO=%02X TUCSE=%04X\n"
7041 " TUCMD:%s%s%s %s %s PAYLEN=%04x HDRLEN=%04x MSS=%04x STA: %s",
7042 pDesc->context.ip.u8CSS, pDesc->context.ip.u8CSO, pDesc->context.ip.u16CSE,
7043 pDesc->context.tu.u8CSS, pDesc->context.tu.u8CSO, pDesc->context.tu.u16CSE,
7044 pDesc->context.dw2.fIDE ? " IDE":"",
7045 pDesc->context.dw2.fRS ? " RS" :"",
7046 pDesc->context.dw2.fTSE ? " TSE":"",
7047 pDesc->context.dw2.fIP ? "IPv4":"IPv6",
7048 pDesc->context.dw2.fTCP ? "TCP":"UDP",
7049 pDesc->context.dw2.u20PAYLEN,
7050 pDesc->context.dw3.u8HDRLEN,
7051 pDesc->context.dw3.u16MSS,
7052 pDesc->context.dw3.fDD?"DD":"");
7053 break;
7054 case E1K_DTYP_DATA:
7055 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Type=Data Address=%16LX DTALEN=%05X\n"
7056 " DCMD:%s%s%s%s%s%s%s STA:%s%s%s POPTS:%s%s SPECIAL:%s VLAN=%03x PRI=%x",
7057 pDesc->data.u64BufAddr,
7058 pDesc->data.cmd.u20DTALEN,
7059 pDesc->data.cmd.fIDE ? " IDE" :"",
7060 pDesc->data.cmd.fVLE ? " VLE" :"",
7061 pDesc->data.cmd.fRPS ? " RPS" :"",
7062 pDesc->data.cmd.fRS ? " RS" :"",
7063 pDesc->data.cmd.fTSE ? " TSE" :"",
7064 pDesc->data.cmd.fIFCS? " IFCS":"",
7065 pDesc->data.cmd.fEOP ? " EOP" :"",
7066 pDesc->data.dw3.fDD ? " DD" :"",
7067 pDesc->data.dw3.fEC ? " EC" :"",
7068 pDesc->data.dw3.fLC ? " LC" :"",
7069 pDesc->data.dw3.fTXSM? " TXSM":"",
7070 pDesc->data.dw3.fIXSM? " IXSM":"",
7071 E1K_SPEC_CFI(pDesc->data.dw3.u16Special) ? "CFI" :"cfi",
7072 E1K_SPEC_VLAN(pDesc->data.dw3.u16Special),
7073 E1K_SPEC_PRI(pDesc->data.dw3.u16Special));
7074 break;
7075 case E1K_DTYP_LEGACY:
7076 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Type=Legacy Address=%16LX DTALEN=%05X\n"
7077 " CMD:%s%s%s%s%s%s%s STA:%s%s%s CSO=%02x CSS=%02x SPECIAL:%s VLAN=%03x PRI=%x",
7078 pDesc->data.u64BufAddr,
7079 pDesc->legacy.cmd.u16Length,
7080 pDesc->legacy.cmd.fIDE ? " IDE" :"",
7081 pDesc->legacy.cmd.fVLE ? " VLE" :"",
7082 pDesc->legacy.cmd.fRPS ? " RPS" :"",
7083 pDesc->legacy.cmd.fRS ? " RS" :"",
7084 pDesc->legacy.cmd.fIC ? " IC" :"",
7085 pDesc->legacy.cmd.fIFCS? " IFCS":"",
7086 pDesc->legacy.cmd.fEOP ? " EOP" :"",
7087 pDesc->legacy.dw3.fDD ? " DD" :"",
7088 pDesc->legacy.dw3.fEC ? " EC" :"",
7089 pDesc->legacy.dw3.fLC ? " LC" :"",
7090 pDesc->legacy.cmd.u8CSO,
7091 pDesc->legacy.dw3.u8CSS,
7092 E1K_SPEC_CFI(pDesc->legacy.dw3.u16Special) ? "CFI" :"cfi",
7093 E1K_SPEC_VLAN(pDesc->legacy.dw3.u16Special),
7094 E1K_SPEC_PRI(pDesc->legacy.dw3.u16Special));
7095 break;
7096 default:
7097 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Invalid Transmit Descriptor");
7098 break;
7099 }
7100
7101 return cbPrintf;
7102}
7103
7104/** Initializes debug helpers (logging format types). */
7105static int e1kInitDebugHelpers(void)
7106{
7107 int rc = VINF_SUCCESS;
7108 static bool s_fHelpersRegistered = false;
7109 if (!s_fHelpersRegistered)
7110 {
7111 s_fHelpersRegistered = true;
7112 rc = RTStrFormatTypeRegister("e1krxd", e1kFmtRxDesc, NULL);
7113 AssertRCReturn(rc, rc);
7114 rc = RTStrFormatTypeRegister("e1ktxd", e1kFmtTxDesc, NULL);
7115 AssertRCReturn(rc, rc);
7116 }
7117 return rc;
7118}
7119
7120/**
7121 * Status info callback.
7122 *
7123 * @param pDevIns The device instance.
7124 * @param pHlp The output helpers.
7125 * @param pszArgs The arguments.
7126 */
7127static DECLCALLBACK(void) e1kInfo(PPDMDEVINS pDevIns, PCDBGFINFOHLP pHlp, const char *pszArgs)
7128{
7129 RT_NOREF(pszArgs);
7130 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
7131 unsigned i;
7132 // bool fRcvRing = false;
7133 // bool fXmtRing = false;
7134
7135 /*
7136 * Parse args.
7137 if (pszArgs)
7138 {
7139 fRcvRing = strstr(pszArgs, "verbose") || strstr(pszArgs, "rcv");
7140 fXmtRing = strstr(pszArgs, "verbose") || strstr(pszArgs, "xmt");
7141 }
7142 */
7143
7144 /*
7145 * Show info.
7146 */
7147 pHlp->pfnPrintf(pHlp, "E1000 #%d: port=%RTiop mmio=%RGp mac-cfg=%RTmac %s%s%s\n",
7148 pDevIns->iInstance, pThis->IOPortBase, pThis->addrMMReg,
7149 &pThis->macConfigured, g_aChips[pThis->eChip].pcszName,
7150 pThis->fRCEnabled ? " GC" : "", pThis->fR0Enabled ? " R0" : "");
7151
7152 e1kCsEnter(pThis, VERR_INTERNAL_ERROR); /* Not sure why but PCNet does it */
7153
7154 for (i = 0; i < E1K_NUM_OF_32BIT_REGS; ++i)
7155 pHlp->pfnPrintf(pHlp, "%8.8s = %08x\n", g_aE1kRegMap[i].abbrev, pThis->auRegs[i]);
7156
7157 for (i = 0; i < RT_ELEMENTS(pThis->aRecAddr.array); i++)
7158 {
7159 E1KRAELEM* ra = pThis->aRecAddr.array + i;
7160 if (ra->ctl & RA_CTL_AV)
7161 {
7162 const char *pcszTmp;
7163 switch (ra->ctl & RA_CTL_AS)
7164 {
7165 case 0: pcszTmp = "DST"; break;
7166 case 1: pcszTmp = "SRC"; break;
7167 default: pcszTmp = "reserved";
7168 }
7169 pHlp->pfnPrintf(pHlp, "RA%02d: %s %RTmac\n", i, pcszTmp, ra->addr);
7170 }
7171 }
7172 unsigned cDescs = RDLEN / sizeof(E1KRXDESC);
7173 uint32_t rdh = RDH;
7174 pHlp->pfnPrintf(pHlp, "\n-- Receive Descriptors (%d total) --\n", cDescs);
7175 for (i = 0; i < cDescs; ++i)
7176 {
7177 E1KRXDESC desc;
7178 PDMDevHlpPhysRead(pDevIns, e1kDescAddr(RDBAH, RDBAL, i),
7179 &desc, sizeof(desc));
7180 if (i == rdh)
7181 pHlp->pfnPrintf(pHlp, ">>> ");
7182 pHlp->pfnPrintf(pHlp, "%RGp: %R[e1krxd]\n", e1kDescAddr(RDBAH, RDBAL, i), &desc);
7183 }
7184#ifdef E1K_WITH_RXD_CACHE
7185 pHlp->pfnPrintf(pHlp, "\n-- Receive Descriptors in Cache (at %d (RDH %d)/ fetched %d / max %d) --\n",
7186 pThis->iRxDCurrent, RDH, pThis->nRxDFetched, E1K_RXD_CACHE_SIZE);
7187 if (rdh > pThis->iRxDCurrent)
7188 rdh -= pThis->iRxDCurrent;
7189 else
7190 rdh = cDescs + rdh - pThis->iRxDCurrent;
7191 for (i = 0; i < pThis->nRxDFetched; ++i)
7192 {
7193 if (i == pThis->iRxDCurrent)
7194 pHlp->pfnPrintf(pHlp, ">>> ");
7195 pHlp->pfnPrintf(pHlp, "%RGp: %R[e1krxd]\n",
7196 e1kDescAddr(RDBAH, RDBAL, rdh++ % cDescs),
7197 &pThis->aRxDescriptors[i]);
7198 }
7199#endif /* E1K_WITH_RXD_CACHE */
7200
7201 cDescs = TDLEN / sizeof(E1KTXDESC);
7202 uint32_t tdh = TDH;
7203 pHlp->pfnPrintf(pHlp, "\n-- Transmit Descriptors (%d total) --\n", cDescs);
7204 for (i = 0; i < cDescs; ++i)
7205 {
7206 E1KTXDESC desc;
7207 PDMDevHlpPhysRead(pDevIns, e1kDescAddr(TDBAH, TDBAL, i),
7208 &desc, sizeof(desc));
7209 if (i == tdh)
7210 pHlp->pfnPrintf(pHlp, ">>> ");
7211 pHlp->pfnPrintf(pHlp, "%RGp: %R[e1ktxd]\n", e1kDescAddr(TDBAH, TDBAL, i), &desc);
7212 }
7213#ifdef E1K_WITH_TXD_CACHE
7214 pHlp->pfnPrintf(pHlp, "\n-- Transmit Descriptors in Cache (at %d (TDH %d)/ fetched %d / max %d) --\n",
7215 pThis->iTxDCurrent, TDH, pThis->nTxDFetched, E1K_TXD_CACHE_SIZE);
7216 if (tdh > pThis->iTxDCurrent)
7217 tdh -= pThis->iTxDCurrent;
7218 else
7219 tdh = cDescs + tdh - pThis->iTxDCurrent;
7220 for (i = 0; i < pThis->nTxDFetched; ++i)
7221 {
7222 if (i == pThis->iTxDCurrent)
7223 pHlp->pfnPrintf(pHlp, ">>> ");
7224 pHlp->pfnPrintf(pHlp, "%RGp: %R[e1ktxd]\n",
7225 e1kDescAddr(TDBAH, TDBAL, tdh++ % cDescs),
7226 &pThis->aTxDescriptors[i]);
7227 }
7228#endif /* E1K_WITH_TXD_CACHE */
7229
7230
7231#ifdef E1K_INT_STATS
7232 pHlp->pfnPrintf(pHlp, "Interrupt attempts: %d\n", pThis->uStatIntTry);
7233 pHlp->pfnPrintf(pHlp, "Interrupts raised : %d\n", pThis->uStatInt);
7234 pHlp->pfnPrintf(pHlp, "Interrupts lowered: %d\n", pThis->uStatIntLower);
7235 pHlp->pfnPrintf(pHlp, "ICR outside ISR : %d\n", pThis->uStatNoIntICR);
7236 pHlp->pfnPrintf(pHlp, "IMS raised ints : %d\n", pThis->uStatIntIMS);
7237 pHlp->pfnPrintf(pHlp, "Interrupts skipped: %d\n", pThis->uStatIntSkip);
7238 pHlp->pfnPrintf(pHlp, "Masked interrupts : %d\n", pThis->uStatIntMasked);
7239 pHlp->pfnPrintf(pHlp, "Early interrupts : %d\n", pThis->uStatIntEarly);
7240 pHlp->pfnPrintf(pHlp, "Late interrupts : %d\n", pThis->uStatIntLate);
7241 pHlp->pfnPrintf(pHlp, "Lost interrupts : %d\n", pThis->iStatIntLost);
7242 pHlp->pfnPrintf(pHlp, "Interrupts by RX : %d\n", pThis->uStatIntRx);
7243 pHlp->pfnPrintf(pHlp, "Interrupts by TX : %d\n", pThis->uStatIntTx);
7244 pHlp->pfnPrintf(pHlp, "Interrupts by ICS : %d\n", pThis->uStatIntICS);
7245 pHlp->pfnPrintf(pHlp, "Interrupts by RDTR: %d\n", pThis->uStatIntRDTR);
7246 pHlp->pfnPrintf(pHlp, "Interrupts by RDMT: %d\n", pThis->uStatIntRXDMT0);
7247 pHlp->pfnPrintf(pHlp, "Interrupts by TXQE: %d\n", pThis->uStatIntTXQE);
7248 pHlp->pfnPrintf(pHlp, "TX int delay asked: %d\n", pThis->uStatTxIDE);
7249 pHlp->pfnPrintf(pHlp, "TX delayed: %d\n", pThis->uStatTxDelayed);
7250 pHlp->pfnPrintf(pHlp, "TX delayed expired: %d\n", pThis->uStatTxDelayExp);
7251 pHlp->pfnPrintf(pHlp, "TX no report asked: %d\n", pThis->uStatTxNoRS);
7252 pHlp->pfnPrintf(pHlp, "TX abs timer expd : %d\n", pThis->uStatTAD);
7253 pHlp->pfnPrintf(pHlp, "TX int timer expd : %d\n", pThis->uStatTID);
7254 pHlp->pfnPrintf(pHlp, "RX abs timer expd : %d\n", pThis->uStatRAD);
7255 pHlp->pfnPrintf(pHlp, "RX int timer expd : %d\n", pThis->uStatRID);
7256 pHlp->pfnPrintf(pHlp, "TX CTX descriptors: %d\n", pThis->uStatDescCtx);
7257 pHlp->pfnPrintf(pHlp, "TX DAT descriptors: %d\n", pThis->uStatDescDat);
7258 pHlp->pfnPrintf(pHlp, "TX LEG descriptors: %d\n", pThis->uStatDescLeg);
7259 pHlp->pfnPrintf(pHlp, "Received frames : %d\n", pThis->uStatRxFrm);
7260 pHlp->pfnPrintf(pHlp, "Transmitted frames: %d\n", pThis->uStatTxFrm);
7261 pHlp->pfnPrintf(pHlp, "TX frames up to 1514: %d\n", pThis->uStatTx1514);
7262 pHlp->pfnPrintf(pHlp, "TX frames up to 2962: %d\n", pThis->uStatTx2962);
7263 pHlp->pfnPrintf(pHlp, "TX frames up to 4410: %d\n", pThis->uStatTx4410);
7264 pHlp->pfnPrintf(pHlp, "TX frames up to 5858: %d\n", pThis->uStatTx5858);
7265 pHlp->pfnPrintf(pHlp, "TX frames up to 7306: %d\n", pThis->uStatTx7306);
7266 pHlp->pfnPrintf(pHlp, "TX frames up to 8754: %d\n", pThis->uStatTx8754);
7267 pHlp->pfnPrintf(pHlp, "TX frames up to 16384: %d\n", pThis->uStatTx16384);
7268 pHlp->pfnPrintf(pHlp, "TX frames up to 32768: %d\n", pThis->uStatTx32768);
7269 pHlp->pfnPrintf(pHlp, "Larger TX frames : %d\n", pThis->uStatTxLarge);
7270#endif /* E1K_INT_STATS */
7271
7272 e1kCsLeave(pThis);
7273}
7274
7275
7276
7277/* -=-=-=-=- PDMDEVREG -=-=-=-=- */
7278
7279/**
7280 * Detach notification.
7281 *
7282 * One port on the network card has been disconnected from the network.
7283 *
7284 * @param pDevIns The device instance.
7285 * @param iLUN The logical unit which is being detached.
7286 * @param fFlags Flags, combination of the PDMDEVATT_FLAGS_* \#defines.
7287 */
7288static DECLCALLBACK(void) e1kR3Detach(PPDMDEVINS pDevIns, unsigned iLUN, uint32_t fFlags)
7289{
7290 RT_NOREF(fFlags);
7291 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
7292 Log(("%s e1kR3Detach:\n", pThis->szPrf));
7293
7294 AssertLogRelReturnVoid(iLUN == 0);
7295
7296 PDMCritSectEnter(&pThis->cs, VERR_SEM_BUSY);
7297
7298 /** @todo r=pritesh still need to check if i missed
7299 * to clean something in this function
7300 */
7301
7302 /*
7303 * Zero some important members.
7304 */
7305 pThis->pDrvBase = NULL;
7306 pThis->pDrvR3 = NULL;
7307 pThis->pDrvR0 = NIL_RTR0PTR;
7308 pThis->pDrvRC = NIL_RTRCPTR;
7309
7310 PDMCritSectLeave(&pThis->cs);
7311}
7312
7313/**
7314 * Attach the Network attachment.
7315 *
7316 * One port on the network card has been connected to a network.
7317 *
7318 * @returns VBox status code.
7319 * @param pDevIns The device instance.
7320 * @param iLUN The logical unit which is being attached.
7321 * @param fFlags Flags, combination of the PDMDEVATT_FLAGS_* \#defines.
7322 *
7323 * @remarks This code path is not used during construction.
7324 */
7325static DECLCALLBACK(int) e1kR3Attach(PPDMDEVINS pDevIns, unsigned iLUN, uint32_t fFlags)
7326{
7327 RT_NOREF(fFlags);
7328 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
7329 LogFlow(("%s e1kR3Attach:\n", pThis->szPrf));
7330
7331 AssertLogRelReturn(iLUN == 0, VERR_PDM_NO_SUCH_LUN);
7332
7333 PDMCritSectEnter(&pThis->cs, VERR_SEM_BUSY);
7334
7335 /*
7336 * Attach the driver.
7337 */
7338 int rc = PDMDevHlpDriverAttach(pDevIns, 0, &pThis->IBase, &pThis->pDrvBase, "Network Port");
7339 if (RT_SUCCESS(rc))
7340 {
7341 if (rc == VINF_NAT_DNS)
7342 {
7343#ifdef RT_OS_LINUX
7344 PDMDevHlpVMSetRuntimeError(pDevIns, 0 /*fFlags*/, "NoDNSforNAT",
7345 N_("A Domain Name Server (DNS) for NAT networking could not be determined. Please check your /etc/resolv.conf for <tt>nameserver</tt> entries. Either add one manually (<i>man resolv.conf</i>) or ensure that your host is correctly connected to an ISP. If you ignore this warning the guest will not be able to perform nameserver lookups and it will probably observe delays if trying so"));
7346#else
7347 PDMDevHlpVMSetRuntimeError(pDevIns, 0 /*fFlags*/, "NoDNSforNAT",
7348 N_("A Domain Name Server (DNS) for NAT networking could not be determined. Ensure that your host is correctly connected to an ISP. If you ignore this warning the guest will not be able to perform nameserver lookups and it will probably observe delays if trying so"));
7349#endif
7350 }
7351 pThis->pDrvR3 = PDMIBASE_QUERY_INTERFACE(pThis->pDrvBase, PDMINETWORKUP);
7352 AssertMsgStmt(pThis->pDrvR3, ("Failed to obtain the PDMINETWORKUP interface!\n"),
7353 rc = VERR_PDM_MISSING_INTERFACE_BELOW);
7354 if (RT_SUCCESS(rc))
7355 {
7356 PPDMIBASER0 pBaseR0 = PDMIBASE_QUERY_INTERFACE(pThis->pDrvBase, PDMIBASER0);
7357 pThis->pDrvR0 = pBaseR0 ? pBaseR0->pfnQueryInterface(pBaseR0, PDMINETWORKUP_IID) : NIL_RTR0PTR;
7358
7359 PPDMIBASERC pBaseRC = PDMIBASE_QUERY_INTERFACE(pThis->pDrvBase, PDMIBASERC);
7360 pThis->pDrvRC = pBaseRC ? pBaseRC->pfnQueryInterface(pBaseRC, PDMINETWORKUP_IID) : NIL_RTR0PTR;
7361 }
7362 }
7363 else if ( rc == VERR_PDM_NO_ATTACHED_DRIVER
7364 || rc == VERR_PDM_CFG_MISSING_DRIVER_NAME)
7365 {
7366 /* This should never happen because this function is not called
7367 * if there is no driver to attach! */
7368 Log(("%s No attached driver!\n", pThis->szPrf));
7369 }
7370
7371 /*
7372 * Temporary set the link down if it was up so that the guest
7373 * will know that we have change the configuration of the
7374 * network card
7375 */
7376 if ((STATUS & STATUS_LU) && RT_SUCCESS(rc))
7377 e1kR3LinkDownTemp(pThis);
7378
7379 PDMCritSectLeave(&pThis->cs);
7380 return rc;
7381
7382}
7383
7384/**
7385 * @copydoc FNPDMDEVPOWEROFF
7386 */
7387static DECLCALLBACK(void) e1kR3PowerOff(PPDMDEVINS pDevIns)
7388{
7389 /* Poke thread waiting for buffer space. */
7390 e1kWakeupReceive(pDevIns);
7391}
7392
7393/**
7394 * @copydoc FNPDMDEVRESET
7395 */
7396static DECLCALLBACK(void) e1kR3Reset(PPDMDEVINS pDevIns)
7397{
7398 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
7399#ifdef E1K_TX_DELAY
7400 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTXDTimer));
7401#endif /* E1K_TX_DELAY */
7402 e1kCancelTimer(pThis, pThis->CTX_SUFF(pIntTimer));
7403 e1kCancelTimer(pThis, pThis->CTX_SUFF(pLUTimer));
7404 e1kXmitFreeBuf(pThis);
7405 pThis->u16TxPktLen = 0;
7406 pThis->fIPcsum = false;
7407 pThis->fTCPcsum = false;
7408 pThis->fIntMaskUsed = false;
7409 pThis->fDelayInts = false;
7410 pThis->fLocked = false;
7411 pThis->u64AckedAt = 0;
7412 e1kHardReset(pThis);
7413}
7414
7415/**
7416 * @copydoc FNPDMDEVSUSPEND
7417 */
7418static DECLCALLBACK(void) e1kR3Suspend(PPDMDEVINS pDevIns)
7419{
7420 /* Poke thread waiting for buffer space. */
7421 e1kWakeupReceive(pDevIns);
7422}
7423
7424/**
7425 * Device relocation callback.
7426 *
7427 * When this callback is called the device instance data, and if the
7428 * device have a GC component, is being relocated, or/and the selectors
7429 * have been changed. The device must use the chance to perform the
7430 * necessary pointer relocations and data updates.
7431 *
7432 * Before the GC code is executed the first time, this function will be
7433 * called with a 0 delta so GC pointer calculations can be one in one place.
7434 *
7435 * @param pDevIns Pointer to the device instance.
7436 * @param offDelta The relocation delta relative to the old location.
7437 *
7438 * @remark A relocation CANNOT fail.
7439 */
7440static DECLCALLBACK(void) e1kR3Relocate(PPDMDEVINS pDevIns, RTGCINTPTR offDelta)
7441{
7442 RT_NOREF(offDelta);
7443 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
7444 pThis->pDevInsRC = PDMDEVINS_2_RCPTR(pDevIns);
7445 pThis->pTxQueueRC = PDMQueueRCPtr(pThis->pTxQueueR3);
7446 pThis->pCanRxQueueRC = PDMQueueRCPtr(pThis->pCanRxQueueR3);
7447#ifdef E1K_USE_RX_TIMERS
7448 pThis->pRIDTimerRC = TMTimerRCPtr(pThis->pRIDTimerR3);
7449 pThis->pRADTimerRC = TMTimerRCPtr(pThis->pRADTimerR3);
7450#endif /* E1K_USE_RX_TIMERS */
7451//#ifdef E1K_USE_TX_TIMERS
7452 if (pThis->fTidEnabled)
7453 {
7454 pThis->pTIDTimerRC = TMTimerRCPtr(pThis->pTIDTimerR3);
7455# ifndef E1K_NO_TAD
7456 pThis->pTADTimerRC = TMTimerRCPtr(pThis->pTADTimerR3);
7457# endif /* E1K_NO_TAD */
7458 }
7459//#endif /* E1K_USE_TX_TIMERS */
7460#ifdef E1K_TX_DELAY
7461 pThis->pTXDTimerRC = TMTimerRCPtr(pThis->pTXDTimerR3);
7462#endif /* E1K_TX_DELAY */
7463 pThis->pIntTimerRC = TMTimerRCPtr(pThis->pIntTimerR3);
7464 pThis->pLUTimerRC = TMTimerRCPtr(pThis->pLUTimerR3);
7465}
7466
7467/**
7468 * Destruct a device instance.
7469 *
7470 * We need to free non-VM resources only.
7471 *
7472 * @returns VBox status code.
7473 * @param pDevIns The device instance data.
7474 * @thread EMT
7475 */
7476static DECLCALLBACK(int) e1kR3Destruct(PPDMDEVINS pDevIns)
7477{
7478 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
7479 PDMDEV_CHECK_VERSIONS_RETURN_QUIET(pDevIns);
7480
7481 e1kDumpState(pThis);
7482 E1kLog(("%s Destroying instance\n", pThis->szPrf));
7483 if (PDMCritSectIsInitialized(&pThis->cs))
7484 {
7485 if (pThis->hEventMoreRxDescAvail != NIL_RTSEMEVENT)
7486 {
7487 RTSemEventSignal(pThis->hEventMoreRxDescAvail);
7488 RTSemEventDestroy(pThis->hEventMoreRxDescAvail);
7489 pThis->hEventMoreRxDescAvail = NIL_RTSEMEVENT;
7490 }
7491#ifdef E1K_WITH_TX_CS
7492 PDMR3CritSectDelete(&pThis->csTx);
7493#endif /* E1K_WITH_TX_CS */
7494 PDMR3CritSectDelete(&pThis->csRx);
7495 PDMR3CritSectDelete(&pThis->cs);
7496 }
7497 return VINF_SUCCESS;
7498}
7499
7500
7501/**
7502 * Set PCI configuration space registers.
7503 *
7504 * @param pci Reference to PCI device structure.
7505 * @thread EMT
7506 */
7507static DECLCALLBACK(void) e1kConfigurePciDev(PPDMPCIDEV pPciDev, E1KCHIP eChip)
7508{
7509 Assert(eChip < RT_ELEMENTS(g_aChips));
7510 /* Configure PCI Device, assume 32-bit mode ******************************/
7511 PCIDevSetVendorId(pPciDev, g_aChips[eChip].uPCIVendorId);
7512 PCIDevSetDeviceId(pPciDev, g_aChips[eChip].uPCIDeviceId);
7513 PCIDevSetWord( pPciDev, VBOX_PCI_SUBSYSTEM_VENDOR_ID, g_aChips[eChip].uPCISubsystemVendorId);
7514 PCIDevSetWord( pPciDev, VBOX_PCI_SUBSYSTEM_ID, g_aChips[eChip].uPCISubsystemId);
7515
7516 PCIDevSetWord( pPciDev, VBOX_PCI_COMMAND, 0x0000);
7517 /* DEVSEL Timing (medium device), 66 MHz Capable, New capabilities */
7518 PCIDevSetWord( pPciDev, VBOX_PCI_STATUS,
7519 VBOX_PCI_STATUS_DEVSEL_MEDIUM | VBOX_PCI_STATUS_CAP_LIST | VBOX_PCI_STATUS_66MHZ);
7520 /* Stepping A2 */
7521 PCIDevSetByte( pPciDev, VBOX_PCI_REVISION_ID, 0x02);
7522 /* Ethernet adapter */
7523 PCIDevSetByte( pPciDev, VBOX_PCI_CLASS_PROG, 0x00);
7524 PCIDevSetWord( pPciDev, VBOX_PCI_CLASS_DEVICE, 0x0200);
7525 /* normal single function Ethernet controller */
7526 PCIDevSetByte( pPciDev, VBOX_PCI_HEADER_TYPE, 0x00);
7527 /* Memory Register Base Address */
7528 PCIDevSetDWord(pPciDev, VBOX_PCI_BASE_ADDRESS_0, 0x00000000);
7529 /* Memory Flash Base Address */
7530 PCIDevSetDWord(pPciDev, VBOX_PCI_BASE_ADDRESS_1, 0x00000000);
7531 /* IO Register Base Address */
7532 PCIDevSetDWord(pPciDev, VBOX_PCI_BASE_ADDRESS_2, 0x00000001);
7533 /* Expansion ROM Base Address */
7534 PCIDevSetDWord(pPciDev, VBOX_PCI_ROM_ADDRESS, 0x00000000);
7535 /* Capabilities Pointer */
7536 PCIDevSetByte( pPciDev, VBOX_PCI_CAPABILITY_LIST, 0xDC);
7537 /* Interrupt Pin: INTA# */
7538 PCIDevSetByte( pPciDev, VBOX_PCI_INTERRUPT_PIN, 0x01);
7539 /* Max_Lat/Min_Gnt: very high priority and time slice */
7540 PCIDevSetByte( pPciDev, VBOX_PCI_MIN_GNT, 0xFF);
7541 PCIDevSetByte( pPciDev, VBOX_PCI_MAX_LAT, 0x00);
7542
7543 /* PCI Power Management Registers ****************************************/
7544 /* Capability ID: PCI Power Management Registers */
7545 PCIDevSetByte( pPciDev, 0xDC, VBOX_PCI_CAP_ID_PM);
7546 /* Next Item Pointer: PCI-X */
7547 PCIDevSetByte( pPciDev, 0xDC + 1, 0xE4);
7548 /* Power Management Capabilities: PM disabled, DSI */
7549 PCIDevSetWord( pPciDev, 0xDC + 2,
7550 0x0002 | VBOX_PCI_PM_CAP_DSI);
7551 /* Power Management Control / Status Register: PM disabled */
7552 PCIDevSetWord( pPciDev, 0xDC + 4, 0x0000);
7553 /* PMCSR_BSE Bridge Support Extensions: Not supported */
7554 PCIDevSetByte( pPciDev, 0xDC + 6, 0x00);
7555 /* Data Register: PM disabled, always 0 */
7556 PCIDevSetByte( pPciDev, 0xDC + 7, 0x00);
7557
7558 /* PCI-X Configuration Registers *****************************************/
7559 /* Capability ID: PCI-X Configuration Registers */
7560 PCIDevSetByte( pPciDev, 0xE4, VBOX_PCI_CAP_ID_PCIX);
7561#ifdef E1K_WITH_MSI
7562 PCIDevSetByte( pPciDev, 0xE4 + 1, 0x80);
7563#else
7564 /* Next Item Pointer: None (Message Signalled Interrupts are disabled) */
7565 PCIDevSetByte( pPciDev, 0xE4 + 1, 0x00);
7566#endif
7567 /* PCI-X Command: Enable Relaxed Ordering */
7568 PCIDevSetWord( pPciDev, 0xE4 + 2, VBOX_PCI_X_CMD_ERO);
7569 /* PCI-X Status: 32-bit, 66MHz*/
7570 /** @todo is this value really correct? fff8 doesn't look like actual PCI address */
7571 PCIDevSetDWord(pPciDev, 0xE4 + 4, 0x0040FFF8);
7572}
7573
7574/**
7575 * @interface_method_impl{PDMDEVREG,pfnConstruct}
7576 */
7577static DECLCALLBACK(int) e1kR3Construct(PPDMDEVINS pDevIns, int iInstance, PCFGMNODE pCfg)
7578{
7579 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
7580 int rc;
7581 PDMDEV_CHECK_VERSIONS_RETURN(pDevIns);
7582
7583 /*
7584 * Initialize the instance data (state).
7585 * Note! Caller has initialized it to ZERO already.
7586 */
7587 RTStrPrintf(pThis->szPrf, sizeof(pThis->szPrf), "E1000#%d", iInstance);
7588 E1kLog(("%s Constructing new instance sizeof(E1KRXDESC)=%d\n", pThis->szPrf, sizeof(E1KRXDESC)));
7589 pThis->hEventMoreRxDescAvail = NIL_RTSEMEVENT;
7590 pThis->pDevInsR3 = pDevIns;
7591 pThis->pDevInsR0 = PDMDEVINS_2_R0PTR(pDevIns);
7592 pThis->pDevInsRC = PDMDEVINS_2_RCPTR(pDevIns);
7593 pThis->u16TxPktLen = 0;
7594 pThis->fIPcsum = false;
7595 pThis->fTCPcsum = false;
7596 pThis->fIntMaskUsed = false;
7597 pThis->fDelayInts = false;
7598 pThis->fLocked = false;
7599 pThis->u64AckedAt = 0;
7600 pThis->led.u32Magic = PDMLED_MAGIC;
7601 pThis->u32PktNo = 1;
7602
7603 /* Interfaces */
7604 pThis->IBase.pfnQueryInterface = e1kR3QueryInterface;
7605
7606 pThis->INetworkDown.pfnWaitReceiveAvail = e1kR3NetworkDown_WaitReceiveAvail;
7607 pThis->INetworkDown.pfnReceive = e1kR3NetworkDown_Receive;
7608 pThis->INetworkDown.pfnXmitPending = e1kR3NetworkDown_XmitPending;
7609
7610 pThis->ILeds.pfnQueryStatusLed = e1kR3QueryStatusLed;
7611
7612 pThis->INetworkConfig.pfnGetMac = e1kR3GetMac;
7613 pThis->INetworkConfig.pfnGetLinkState = e1kR3GetLinkState;
7614 pThis->INetworkConfig.pfnSetLinkState = e1kR3SetLinkState;
7615
7616 /*
7617 * Internal validations.
7618 */
7619 for (uint32_t iReg = 1; iReg < E1K_NUM_OF_BINARY_SEARCHABLE; iReg++)
7620 AssertLogRelMsgReturn( g_aE1kRegMap[iReg].offset > g_aE1kRegMap[iReg - 1].offset
7621 && g_aE1kRegMap[iReg].offset + g_aE1kRegMap[iReg].size
7622 >= g_aE1kRegMap[iReg - 1].offset + g_aE1kRegMap[iReg - 1].size,
7623 ("%s@%#xLB%#x vs %s@%#xLB%#x\n",
7624 g_aE1kRegMap[iReg].abbrev, g_aE1kRegMap[iReg].offset, g_aE1kRegMap[iReg].size,
7625 g_aE1kRegMap[iReg - 1].abbrev, g_aE1kRegMap[iReg - 1].offset, g_aE1kRegMap[iReg - 1].size),
7626 VERR_INTERNAL_ERROR_4);
7627
7628 /*
7629 * Validate configuration.
7630 */
7631 if (!CFGMR3AreValuesValid(pCfg, "MAC\0" "CableConnected\0" "AdapterType\0"
7632 "LineSpeed\0" "GCEnabled\0" "R0Enabled\0"
7633 "ItrEnabled\0" "ItrRxEnabled\0"
7634 "EthernetCRC\0" "GSOEnabled\0" "LinkUpDelay\0"))
7635 return PDMDEV_SET_ERROR(pDevIns, VERR_PDM_DEVINS_UNKNOWN_CFG_VALUES,
7636 N_("Invalid configuration for E1000 device"));
7637
7638 /** @todo LineSpeed unused! */
7639
7640 /* Get config params */
7641 rc = CFGMR3QueryBytes(pCfg, "MAC", pThis->macConfigured.au8, sizeof(pThis->macConfigured.au8));
7642 if (RT_FAILURE(rc))
7643 return PDMDEV_SET_ERROR(pDevIns, rc,
7644 N_("Configuration error: Failed to get MAC address"));
7645 rc = CFGMR3QueryBool(pCfg, "CableConnected", &pThis->fCableConnected);
7646 if (RT_FAILURE(rc))
7647 return PDMDEV_SET_ERROR(pDevIns, rc,
7648 N_("Configuration error: Failed to get the value of 'CableConnected'"));
7649 rc = CFGMR3QueryU32(pCfg, "AdapterType", (uint32_t*)&pThis->eChip);
7650 if (RT_FAILURE(rc))
7651 return PDMDEV_SET_ERROR(pDevIns, rc,
7652 N_("Configuration error: Failed to get the value of 'AdapterType'"));
7653 Assert(pThis->eChip <= E1K_CHIP_82545EM);
7654 rc = CFGMR3QueryBoolDef(pCfg, "GCEnabled", &pThis->fRCEnabled, true);
7655 if (RT_FAILURE(rc))
7656 return PDMDEV_SET_ERROR(pDevIns, rc,
7657 N_("Configuration error: Failed to get the value of 'GCEnabled'"));
7658
7659 rc = CFGMR3QueryBoolDef(pCfg, "R0Enabled", &pThis->fR0Enabled, true);
7660 if (RT_FAILURE(rc))
7661 return PDMDEV_SET_ERROR(pDevIns, rc,
7662 N_("Configuration error: Failed to get the value of 'R0Enabled'"));
7663
7664 rc = CFGMR3QueryBoolDef(pCfg, "EthernetCRC", &pThis->fEthernetCRC, true);
7665 if (RT_FAILURE(rc))
7666 return PDMDEV_SET_ERROR(pDevIns, rc,
7667 N_("Configuration error: Failed to get the value of 'EthernetCRC'"));
7668
7669 rc = CFGMR3QueryBoolDef(pCfg, "GSOEnabled", &pThis->fGSOEnabled, true);
7670 if (RT_FAILURE(rc))
7671 return PDMDEV_SET_ERROR(pDevIns, rc,
7672 N_("Configuration error: Failed to get the value of 'GSOEnabled'"));
7673
7674 rc = CFGMR3QueryBoolDef(pCfg, "ItrEnabled", &pThis->fItrEnabled, false);
7675 if (RT_FAILURE(rc))
7676 return PDMDEV_SET_ERROR(pDevIns, rc,
7677 N_("Configuration error: Failed to get the value of 'ItrEnabled'"));
7678
7679 rc = CFGMR3QueryBoolDef(pCfg, "ItrRxEnabled", &pThis->fItrRxEnabled, true);
7680 if (RT_FAILURE(rc))
7681 return PDMDEV_SET_ERROR(pDevIns, rc,
7682 N_("Configuration error: Failed to get the value of 'ItrRxEnabled'"));
7683
7684 rc = CFGMR3QueryBoolDef(pCfg, "TidEnabled", &pThis->fTidEnabled, false);
7685 if (RT_FAILURE(rc))
7686 return PDMDEV_SET_ERROR(pDevIns, rc,
7687 N_("Configuration error: Failed to get the value of 'TidEnabled'"));
7688
7689 rc = CFGMR3QueryU32Def(pCfg, "LinkUpDelay", (uint32_t*)&pThis->cMsLinkUpDelay, 3000); /* ms */
7690 if (RT_FAILURE(rc))
7691 return PDMDEV_SET_ERROR(pDevIns, rc,
7692 N_("Configuration error: Failed to get the value of 'LinkUpDelay'"));
7693 Assert(pThis->cMsLinkUpDelay <= 300000); /* less than 5 minutes */
7694 if (pThis->cMsLinkUpDelay > 5000)
7695 LogRel(("%s WARNING! Link up delay is set to %u seconds!\n", pThis->szPrf, pThis->cMsLinkUpDelay / 1000));
7696 else if (pThis->cMsLinkUpDelay == 0)
7697 LogRel(("%s WARNING! Link up delay is disabled!\n", pThis->szPrf));
7698
7699 LogRel(("%s Chip=%s LinkUpDelay=%ums EthernetCRC=%s GSO=%s Itr=%s ItrRx=%s TID=%s R0=%s GC=%s\n", pThis->szPrf,
7700 g_aChips[pThis->eChip].pcszName, pThis->cMsLinkUpDelay,
7701 pThis->fEthernetCRC ? "on" : "off",
7702 pThis->fGSOEnabled ? "enabled" : "disabled",
7703 pThis->fItrEnabled ? "enabled" : "disabled",
7704 pThis->fItrRxEnabled ? "enabled" : "disabled",
7705 pThis->fTidEnabled ? "enabled" : "disabled",
7706 pThis->fR0Enabled ? "enabled" : "disabled",
7707 pThis->fRCEnabled ? "enabled" : "disabled"));
7708
7709 /* Initialize the EEPROM. */
7710 pThis->eeprom.init(pThis->macConfigured);
7711
7712 /* Initialize internal PHY. */
7713 Phy::init(&pThis->phy, iInstance, pThis->eChip == E1K_CHIP_82543GC ? PHY_EPID_M881000 : PHY_EPID_M881011);
7714
7715 /* Initialize critical sections. We do our own locking. */
7716 rc = PDMDevHlpSetDeviceCritSect(pDevIns, PDMDevHlpCritSectGetNop(pDevIns));
7717 AssertRCReturn(rc, rc);
7718
7719 rc = PDMDevHlpCritSectInit(pDevIns, &pThis->cs, RT_SRC_POS, "E1000#%d", iInstance);
7720 if (RT_FAILURE(rc))
7721 return rc;
7722 rc = PDMDevHlpCritSectInit(pDevIns, &pThis->csRx, RT_SRC_POS, "E1000#%dRX", iInstance);
7723 if (RT_FAILURE(rc))
7724 return rc;
7725#ifdef E1K_WITH_TX_CS
7726 rc = PDMDevHlpCritSectInit(pDevIns, &pThis->csTx, RT_SRC_POS, "E1000#%dTX", iInstance);
7727 if (RT_FAILURE(rc))
7728 return rc;
7729#endif /* E1K_WITH_TX_CS */
7730
7731 /* Saved state registration. */
7732 rc = PDMDevHlpSSMRegisterEx(pDevIns, E1K_SAVEDSTATE_VERSION, sizeof(E1KSTATE), NULL,
7733 NULL, e1kLiveExec, NULL,
7734 e1kSavePrep, e1kSaveExec, NULL,
7735 e1kLoadPrep, e1kLoadExec, e1kLoadDone);
7736 if (RT_FAILURE(rc))
7737 return rc;
7738
7739 /* Set PCI config registers and register ourselves with the PCI bus. */
7740 e1kConfigurePciDev(&pThis->pciDevice, pThis->eChip);
7741 rc = PDMDevHlpPCIRegister(pDevIns, &pThis->pciDevice);
7742 if (RT_FAILURE(rc))
7743 return rc;
7744
7745#ifdef E1K_WITH_MSI
7746 PDMMSIREG MsiReg;
7747 RT_ZERO(MsiReg);
7748 MsiReg.cMsiVectors = 1;
7749 MsiReg.iMsiCapOffset = 0x80;
7750 MsiReg.iMsiNextOffset = 0x0;
7751 MsiReg.fMsi64bit = false;
7752 rc = PDMDevHlpPCIRegisterMsi(pDevIns, &MsiReg);
7753 AssertRCReturn(rc, rc);
7754#endif
7755
7756
7757 /* Map our registers to memory space (region 0, see e1kConfigurePCI)*/
7758 rc = PDMDevHlpPCIIORegionRegister(pDevIns, 0, E1K_MM_SIZE, PCI_ADDRESS_SPACE_MEM, e1kMap);
7759 if (RT_FAILURE(rc))
7760 return rc;
7761#ifdef E1K_WITH_PREREG_MMIO
7762 rc = PDMDevHlpMMIOExPreRegister(pDevIns, 0, E1K_MM_SIZE, IOMMMIO_FLAGS_READ_DWORD | IOMMMIO_FLAGS_WRITE_ONLY_DWORD, "E1000",
7763 NULL /*pvUserR3*/, e1kMMIOWrite, e1kMMIORead, NULL /*pfnFillR3*/,
7764 NIL_RTR0PTR /*pvUserR0*/, pThis->fR0Enabled ? "e1kMMIOWrite" : NULL,
7765 pThis->fR0Enabled ? "e1kMMIORead" : NULL, NULL /*pszFillR0*/,
7766 NIL_RTRCPTR /*pvUserRC*/, pThis->fRCEnabled ? "e1kMMIOWrite" : NULL,
7767 pThis->fRCEnabled ? "e1kMMIORead" : NULL, NULL /*pszFillRC*/);
7768 AssertLogRelRCReturn(rc, rc);
7769#endif
7770 /* Map our registers to IO space (region 2, see e1kConfigurePCI) */
7771 rc = PDMDevHlpPCIIORegionRegister(pDevIns, 2, E1K_IOPORT_SIZE, PCI_ADDRESS_SPACE_IO, e1kMap);
7772 if (RT_FAILURE(rc))
7773 return rc;
7774
7775 /* Create transmit queue */
7776 rc = PDMDevHlpQueueCreate(pDevIns, sizeof(PDMQUEUEITEMCORE), 1, 0,
7777 e1kTxQueueConsumer, true, "E1000-Xmit", &pThis->pTxQueueR3);
7778 if (RT_FAILURE(rc))
7779 return rc;
7780 pThis->pTxQueueR0 = PDMQueueR0Ptr(pThis->pTxQueueR3);
7781 pThis->pTxQueueRC = PDMQueueRCPtr(pThis->pTxQueueR3);
7782
7783 /* Create the RX notifier signaller. */
7784 rc = PDMDevHlpQueueCreate(pDevIns, sizeof(PDMQUEUEITEMCORE), 1, 0,
7785 e1kCanRxQueueConsumer, true, "E1000-Rcv", &pThis->pCanRxQueueR3);
7786 if (RT_FAILURE(rc))
7787 return rc;
7788 pThis->pCanRxQueueR0 = PDMQueueR0Ptr(pThis->pCanRxQueueR3);
7789 pThis->pCanRxQueueRC = PDMQueueRCPtr(pThis->pCanRxQueueR3);
7790
7791#ifdef E1K_TX_DELAY
7792 /* Create Transmit Delay Timer */
7793 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kTxDelayTimer, pThis,
7794 TMTIMER_FLAGS_NO_CRIT_SECT,
7795 "E1000 Transmit Delay Timer", &pThis->pTXDTimerR3);
7796 if (RT_FAILURE(rc))
7797 return rc;
7798 pThis->pTXDTimerR0 = TMTimerR0Ptr(pThis->pTXDTimerR3);
7799 pThis->pTXDTimerRC = TMTimerRCPtr(pThis->pTXDTimerR3);
7800 TMR3TimerSetCritSect(pThis->pTXDTimerR3, &pThis->csTx);
7801#endif /* E1K_TX_DELAY */
7802
7803//#ifdef E1K_USE_TX_TIMERS
7804 if (pThis->fTidEnabled)
7805 {
7806 /* Create Transmit Interrupt Delay Timer */
7807 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kTxIntDelayTimer, pThis,
7808 TMTIMER_FLAGS_NO_CRIT_SECT,
7809 "E1000 Transmit Interrupt Delay Timer", &pThis->pTIDTimerR3);
7810 if (RT_FAILURE(rc))
7811 return rc;
7812 pThis->pTIDTimerR0 = TMTimerR0Ptr(pThis->pTIDTimerR3);
7813 pThis->pTIDTimerRC = TMTimerRCPtr(pThis->pTIDTimerR3);
7814
7815# ifndef E1K_NO_TAD
7816 /* Create Transmit Absolute Delay Timer */
7817 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kTxAbsDelayTimer, pThis,
7818 TMTIMER_FLAGS_NO_CRIT_SECT,
7819 "E1000 Transmit Absolute Delay Timer", &pThis->pTADTimerR3);
7820 if (RT_FAILURE(rc))
7821 return rc;
7822 pThis->pTADTimerR0 = TMTimerR0Ptr(pThis->pTADTimerR3);
7823 pThis->pTADTimerRC = TMTimerRCPtr(pThis->pTADTimerR3);
7824# endif /* E1K_NO_TAD */
7825 }
7826//#endif /* E1K_USE_TX_TIMERS */
7827
7828#ifdef E1K_USE_RX_TIMERS
7829 /* Create Receive Interrupt Delay Timer */
7830 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kRxIntDelayTimer, pThis,
7831 TMTIMER_FLAGS_NO_CRIT_SECT,
7832 "E1000 Receive Interrupt Delay Timer", &pThis->pRIDTimerR3);
7833 if (RT_FAILURE(rc))
7834 return rc;
7835 pThis->pRIDTimerR0 = TMTimerR0Ptr(pThis->pRIDTimerR3);
7836 pThis->pRIDTimerRC = TMTimerRCPtr(pThis->pRIDTimerR3);
7837
7838 /* Create Receive Absolute Delay Timer */
7839 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kRxAbsDelayTimer, pThis,
7840 TMTIMER_FLAGS_NO_CRIT_SECT,
7841 "E1000 Receive Absolute Delay Timer", &pThis->pRADTimerR3);
7842 if (RT_FAILURE(rc))
7843 return rc;
7844 pThis->pRADTimerR0 = TMTimerR0Ptr(pThis->pRADTimerR3);
7845 pThis->pRADTimerRC = TMTimerRCPtr(pThis->pRADTimerR3);
7846#endif /* E1K_USE_RX_TIMERS */
7847
7848 /* Create Late Interrupt Timer */
7849 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kLateIntTimer, pThis,
7850 TMTIMER_FLAGS_NO_CRIT_SECT,
7851 "E1000 Late Interrupt Timer", &pThis->pIntTimerR3);
7852 if (RT_FAILURE(rc))
7853 return rc;
7854 pThis->pIntTimerR0 = TMTimerR0Ptr(pThis->pIntTimerR3);
7855 pThis->pIntTimerRC = TMTimerRCPtr(pThis->pIntTimerR3);
7856
7857 /* Create Link Up Timer */
7858 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kLinkUpTimer, pThis,
7859 TMTIMER_FLAGS_NO_CRIT_SECT,
7860 "E1000 Link Up Timer", &pThis->pLUTimerR3);
7861 if (RT_FAILURE(rc))
7862 return rc;
7863 pThis->pLUTimerR0 = TMTimerR0Ptr(pThis->pLUTimerR3);
7864 pThis->pLUTimerRC = TMTimerRCPtr(pThis->pLUTimerR3);
7865
7866 /* Register the info item */
7867 char szTmp[20];
7868 RTStrPrintf(szTmp, sizeof(szTmp), "e1k%d", iInstance);
7869 PDMDevHlpDBGFInfoRegister(pDevIns, szTmp, "E1000 info.", e1kInfo);
7870
7871 /* Status driver */
7872 PPDMIBASE pBase;
7873 rc = PDMDevHlpDriverAttach(pDevIns, PDM_STATUS_LUN, &pThis->IBase, &pBase, "Status Port");
7874 if (RT_FAILURE(rc))
7875 return PDMDEV_SET_ERROR(pDevIns, rc, N_("Failed to attach the status LUN"));
7876 pThis->pLedsConnector = PDMIBASE_QUERY_INTERFACE(pBase, PDMILEDCONNECTORS);
7877
7878 /* Network driver */
7879 rc = PDMDevHlpDriverAttach(pDevIns, 0, &pThis->IBase, &pThis->pDrvBase, "Network Port");
7880 if (RT_SUCCESS(rc))
7881 {
7882 if (rc == VINF_NAT_DNS)
7883 PDMDevHlpVMSetRuntimeError(pDevIns, 0 /*fFlags*/, "NoDNSforNAT",
7884 N_("A Domain Name Server (DNS) for NAT networking could not be determined. Ensure that your host is correctly connected to an ISP. If you ignore this warning the guest will not be able to perform nameserver lookups and it will probably observe delays if trying so"));
7885 pThis->pDrvR3 = PDMIBASE_QUERY_INTERFACE(pThis->pDrvBase, PDMINETWORKUP);
7886 AssertMsgReturn(pThis->pDrvR3, ("Failed to obtain the PDMINETWORKUP interface!\n"), VERR_PDM_MISSING_INTERFACE_BELOW);
7887
7888 pThis->pDrvR0 = PDMIBASER0_QUERY_INTERFACE(PDMIBASE_QUERY_INTERFACE(pThis->pDrvBase, PDMIBASER0), PDMINETWORKUP);
7889 pThis->pDrvRC = PDMIBASERC_QUERY_INTERFACE(PDMIBASE_QUERY_INTERFACE(pThis->pDrvBase, PDMIBASERC), PDMINETWORKUP);
7890 }
7891 else if ( rc == VERR_PDM_NO_ATTACHED_DRIVER
7892 || rc == VERR_PDM_CFG_MISSING_DRIVER_NAME)
7893 {
7894 /* No error! */
7895 E1kLog(("%s This adapter is not attached to any network!\n", pThis->szPrf));
7896 }
7897 else
7898 return PDMDEV_SET_ERROR(pDevIns, rc, N_("Failed to attach the network LUN"));
7899
7900 rc = RTSemEventCreate(&pThis->hEventMoreRxDescAvail);
7901 if (RT_FAILURE(rc))
7902 return rc;
7903
7904 rc = e1kInitDebugHelpers();
7905 if (RT_FAILURE(rc))
7906 return rc;
7907
7908 e1kHardReset(pThis);
7909
7910 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceiveBytes, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Amount of data received", "/Public/Net/E1k%u/BytesReceived", iInstance);
7911 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitBytes, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Amount of data transmitted", "/Public/Net/E1k%u/BytesTransmitted", iInstance);
7912
7913 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceiveBytes, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Amount of data received", "/Devices/E1k%d/ReceiveBytes", iInstance);
7914 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitBytes, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Amount of data transmitted", "/Devices/E1k%d/TransmitBytes", iInstance);
7915
7916#if defined(VBOX_WITH_STATISTICS)
7917 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatMMIOReadRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling MMIO reads in RZ", "/Devices/E1k%d/MMIO/ReadRZ", iInstance);
7918 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatMMIOReadR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling MMIO reads in R3", "/Devices/E1k%d/MMIO/ReadR3", iInstance);
7919 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatMMIOWriteRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling MMIO writes in RZ", "/Devices/E1k%d/MMIO/WriteRZ", iInstance);
7920 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatMMIOWriteR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling MMIO writes in R3", "/Devices/E1k%d/MMIO/WriteR3", iInstance);
7921 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatEEPROMRead, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling EEPROM reads", "/Devices/E1k%d/EEPROM/Read", iInstance);
7922 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatEEPROMWrite, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling EEPROM writes", "/Devices/E1k%d/EEPROM/Write", iInstance);
7923 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIOReadRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling IO reads in RZ", "/Devices/E1k%d/IO/ReadRZ", iInstance);
7924 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIOReadR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling IO reads in R3", "/Devices/E1k%d/IO/ReadR3", iInstance);
7925 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIOWriteRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling IO writes in RZ", "/Devices/E1k%d/IO/WriteRZ", iInstance);
7926 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIOWriteR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling IO writes in R3", "/Devices/E1k%d/IO/WriteR3", iInstance);
7927 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatLateIntTimer, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling late int timer", "/Devices/E1k%d/LateInt/Timer", iInstance);
7928 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatLateInts, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of late interrupts", "/Devices/E1k%d/LateInt/Occured", iInstance);
7929 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIntsRaised, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of raised interrupts", "/Devices/E1k%d/Interrupts/Raised", iInstance);
7930 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIntsPrevented, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of prevented interrupts", "/Devices/E1k%d/Interrupts/Prevented", iInstance);
7931 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceive, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling receive", "/Devices/E1k%d/Receive/Total", iInstance);
7932 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceiveCRC, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling receive checksumming", "/Devices/E1k%d/Receive/CRC", iInstance);
7933 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceiveFilter, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling receive filtering", "/Devices/E1k%d/Receive/Filter", iInstance);
7934 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceiveStore, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling receive storing", "/Devices/E1k%d/Receive/Store", iInstance);
7935 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatRxOverflow, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_OCCURENCE, "Profiling RX overflows", "/Devices/E1k%d/RxOverflow", iInstance);
7936 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatRxOverflowWakeup, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Nr of RX overflow wakeups", "/Devices/E1k%d/RxOverflowWakeup", iInstance);
7937 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling transmits in RZ", "/Devices/E1k%d/Transmit/TotalRZ", iInstance);
7938 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling transmits in R3", "/Devices/E1k%d/Transmit/TotalR3", iInstance);
7939 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitSendRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling send transmit in RZ", "/Devices/E1k%d/Transmit/SendRZ", iInstance);
7940 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitSendR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling send transmit in R3", "/Devices/E1k%d/Transmit/SendR3", iInstance);
7941
7942 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxDescCtxNormal, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of normal context descriptors","/Devices/E1k%d/TxDesc/ContexNormal", iInstance);
7943 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxDescCtxTSE, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TSE context descriptors", "/Devices/E1k%d/TxDesc/ContextTSE", iInstance);
7944 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxDescData, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TX data descriptors", "/Devices/E1k%d/TxDesc/Data", iInstance);
7945 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxDescLegacy, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TX legacy descriptors", "/Devices/E1k%d/TxDesc/Legacy", iInstance);
7946 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxDescTSEData, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TX TSE data descriptors", "/Devices/E1k%d/TxDesc/TSEData", iInstance);
7947 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxPathFallback, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Fallback TSE descriptor path", "/Devices/E1k%d/TxPath/Fallback", iInstance);
7948 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxPathGSO, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "GSO TSE descriptor path", "/Devices/E1k%d/TxPath/GSO", iInstance);
7949 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxPathRegular, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Regular descriptor path", "/Devices/E1k%d/TxPath/Normal", iInstance);
7950 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatPHYAccesses, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of PHY accesses", "/Devices/E1k%d/PHYAccesses", iInstance);
7951 for (unsigned iReg = 0; iReg < E1K_NUM_OF_REGS; iReg++)
7952 {
7953 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->aStatRegReads[iReg], STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
7954 g_aE1kRegMap[iReg].name, "/Devices/E1k%d/Regs/%s-Reads", iInstance, g_aE1kRegMap[iReg].abbrev);
7955 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->aStatRegWrites[iReg], STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
7956 g_aE1kRegMap[iReg].name, "/Devices/E1k%d/Regs/%s-Writes", iInstance, g_aE1kRegMap[iReg].abbrev);
7957 }
7958#endif /* VBOX_WITH_STATISTICS */
7959
7960#ifdef E1K_INT_STATS
7961 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->u64ArmedAt, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "u64ArmedAt", "/Devices/E1k%d/u64ArmedAt", iInstance);
7962 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatMaxTxDelay, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatMaxTxDelay", "/Devices/E1k%d/uStatMaxTxDelay", iInstance);
7963 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatInt, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatInt", "/Devices/E1k%d/uStatInt", iInstance);
7964 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntTry, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntTry", "/Devices/E1k%d/uStatIntTry", iInstance);
7965 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntLower, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntLower", "/Devices/E1k%d/uStatIntLower", iInstance);
7966 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatNoIntICR, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatNoIntICR", "/Devices/E1k%d/uStatNoIntICR", iInstance);
7967 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->iStatIntLost, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "iStatIntLost", "/Devices/E1k%d/iStatIntLost", iInstance);
7968 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->iStatIntLostOne, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "iStatIntLostOne", "/Devices/E1k%d/iStatIntLostOne", iInstance);
7969 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntIMS, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntIMS", "/Devices/E1k%d/uStatIntIMS", iInstance);
7970 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntSkip, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntSkip", "/Devices/E1k%d/uStatIntSkip", iInstance);
7971 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntLate, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntLate", "/Devices/E1k%d/uStatIntLate", iInstance);
7972 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntMasked, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntMasked", "/Devices/E1k%d/uStatIntMasked", iInstance);
7973 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntEarly, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntEarly", "/Devices/E1k%d/uStatIntEarly", iInstance);
7974 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntRx, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntRx", "/Devices/E1k%d/uStatIntRx", iInstance);
7975 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntTx, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntTx", "/Devices/E1k%d/uStatIntTx", iInstance);
7976 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntICS, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntICS", "/Devices/E1k%d/uStatIntICS", iInstance);
7977 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntRDTR, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntRDTR", "/Devices/E1k%d/uStatIntRDTR", iInstance);
7978 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntRXDMT0, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntRXDMT0", "/Devices/E1k%d/uStatIntRXDMT0", iInstance);
7979 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntTXQE, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntTXQE", "/Devices/E1k%d/uStatIntTXQE", iInstance);
7980 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxNoRS, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxNoRS", "/Devices/E1k%d/uStatTxNoRS", iInstance);
7981 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxIDE, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxIDE", "/Devices/E1k%d/uStatTxIDE", iInstance);
7982 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxDelayed, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxDelayed", "/Devices/E1k%d/uStatTxDelayed", iInstance);
7983 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxDelayExp, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxDelayExp", "/Devices/E1k%d/uStatTxDelayExp", iInstance);
7984 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTAD, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTAD", "/Devices/E1k%d/uStatTAD", iInstance);
7985 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTID, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTID", "/Devices/E1k%d/uStatTID", iInstance);
7986 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatRAD, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatRAD", "/Devices/E1k%d/uStatRAD", iInstance);
7987 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatRID, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatRID", "/Devices/E1k%d/uStatRID", iInstance);
7988 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatRxFrm, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatRxFrm", "/Devices/E1k%d/uStatRxFrm", iInstance);
7989 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxFrm, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxFrm", "/Devices/E1k%d/uStatTxFrm", iInstance);
7990 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatDescCtx, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatDescCtx", "/Devices/E1k%d/uStatDescCtx", iInstance);
7991 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatDescDat, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatDescDat", "/Devices/E1k%d/uStatDescDat", iInstance);
7992 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatDescLeg, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatDescLeg", "/Devices/E1k%d/uStatDescLeg", iInstance);
7993 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx1514, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx1514", "/Devices/E1k%d/uStatTx1514", iInstance);
7994 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx2962, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx2962", "/Devices/E1k%d/uStatTx2962", iInstance);
7995 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx4410, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx4410", "/Devices/E1k%d/uStatTx4410", iInstance);
7996 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx5858, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx5858", "/Devices/E1k%d/uStatTx5858", iInstance);
7997 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx7306, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx7306", "/Devices/E1k%d/uStatTx7306", iInstance);
7998 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx8754, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx8754", "/Devices/E1k%d/uStatTx8754", iInstance);
7999 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx16384, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx16384", "/Devices/E1k%d/uStatTx16384", iInstance);
8000 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx32768, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx32768", "/Devices/E1k%d/uStatTx32768", iInstance);
8001 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxLarge, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxLarge", "/Devices/E1k%d/uStatTxLarge", iInstance);
8002#endif /* E1K_INT_STATS */
8003
8004 return VINF_SUCCESS;
8005}
8006
8007/**
8008 * The device registration structure.
8009 */
8010const PDMDEVREG g_DeviceE1000 =
8011{
8012 /* Structure version. PDM_DEVREG_VERSION defines the current version. */
8013 PDM_DEVREG_VERSION,
8014 /* Device name. */
8015 "e1000",
8016 /* Name of guest context module (no path).
8017 * Only evalutated if PDM_DEVREG_FLAGS_RC is set. */
8018 "VBoxDDRC.rc",
8019 /* Name of ring-0 module (no path).
8020 * Only evalutated if PDM_DEVREG_FLAGS_RC is set. */
8021 "VBoxDDR0.r0",
8022 /* The description of the device. The UTF-8 string pointed to shall, like this structure,
8023 * remain unchanged from registration till VM destruction. */
8024 "Intel PRO/1000 MT Desktop Ethernet.\n",
8025
8026 /* Flags, combination of the PDM_DEVREG_FLAGS_* \#defines. */
8027 PDM_DEVREG_FLAGS_DEFAULT_BITS | PDM_DEVREG_FLAGS_RC | PDM_DEVREG_FLAGS_R0,
8028 /* Device class(es), combination of the PDM_DEVREG_CLASS_* \#defines. */
8029 PDM_DEVREG_CLASS_NETWORK,
8030 /* Maximum number of instances (per VM). */
8031 ~0U,
8032 /* Size of the instance data. */
8033 sizeof(E1KSTATE),
8034
8035 /* pfnConstruct */
8036 e1kR3Construct,
8037 /* pfnDestruct */
8038 e1kR3Destruct,
8039 /* pfnRelocate */
8040 e1kR3Relocate,
8041 /* pfnMemSetup */
8042 NULL,
8043 /* pfnPowerOn */
8044 NULL,
8045 /* pfnReset */
8046 e1kR3Reset,
8047 /* pfnSuspend */
8048 e1kR3Suspend,
8049 /* pfnResume */
8050 NULL,
8051 /* pfnAttach */
8052 e1kR3Attach,
8053 /* pfnDeatch */
8054 e1kR3Detach,
8055 /* pfnQueryInterface */
8056 NULL,
8057 /* pfnInitComplete */
8058 NULL,
8059 /* pfnPowerOff */
8060 e1kR3PowerOff,
8061 /* pfnSoftReset */
8062 NULL,
8063
8064 /* u32VersionEnd */
8065 PDM_DEVREG_VERSION
8066};
8067
8068#endif /* IN_RING3 */
8069#endif /* !VBOX_DEVICE_STRUCT_TESTCASE */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette