VirtualBox

source: vbox/trunk/src/VBox/Devices/Network/DevE1000.cpp@ 81428

Last change on this file since 81428 was 81424, checked in by vboxsync, 5 years ago

DevE1000: VINF_IOM_R3_MMIO_WRITE status code inside #ifndef IN_RING3 as that status code has nothing to do in ring-3. Added 'R3' to the prefix of a bunch of ring-3 functions to make the code simpler to read. bugref:9218

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 330.5 KB
Line 
1/* $Id: DevE1000.cpp 81424 2019-10-21 18:10:31Z vboxsync $ */
2/** @file
3 * DevE1000 - Intel 82540EM Ethernet Controller Emulation.
4 *
5 * Implemented in accordance with the specification:
6 *
7 * PCI/PCI-X Family of Gigabit Ethernet Controllers Software Developer's Manual
8 * 82540EP/EM, 82541xx, 82544GC/EI, 82545GM/EM, 82546GB/EB, and 82547xx
9 *
10 * 317453-002 Revision 3.5
11 *
12 * @todo IPv6 checksum offloading support
13 * @todo Flexible Filter / Wakeup (optional?)
14 */
15
16/*
17 * Copyright (C) 2007-2019 Oracle Corporation
18 *
19 * This file is part of VirtualBox Open Source Edition (OSE), as
20 * available from http://www.virtualbox.org. This file is free software;
21 * you can redistribute it and/or modify it under the terms of the GNU
22 * General Public License (GPL) as published by the Free Software
23 * Foundation, in version 2 as it comes in the "COPYING" file of the
24 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
25 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_DEV_E1000
33#include <iprt/crc.h>
34#include <iprt/ctype.h>
35#include <iprt/net.h>
36#include <iprt/semaphore.h>
37#include <iprt/string.h>
38#include <iprt/time.h>
39#include <iprt/uuid.h>
40#include <VBox/vmm/pdmdev.h>
41#include <VBox/vmm/pdmnetifs.h>
42#include <VBox/vmm/pdmnetinline.h>
43#include <VBox/param.h>
44#include "VBoxDD.h"
45
46#include "DevEEPROM.h"
47#include "DevE1000Phy.h"
48
49
50/*********************************************************************************************************************************
51* Defined Constants And Macros *
52*********************************************************************************************************************************/
53/** @name E1000 Build Options
54 * @{ */
55/** @def E1K_INIT_RA0
56 * E1K_INIT_RA0 forces E1000 to set the first entry in Receive Address filter
57 * table to MAC address obtained from CFGM. Most guests read MAC address from
58 * EEPROM and write it to RA[0] explicitly, but Mac OS X seems to depend on it
59 * being already set (see @bugref{4657}).
60 */
61#define E1K_INIT_RA0
62/** @def E1K_LSC_ON_RESET
63 * E1K_LSC_ON_RESET causes e1000 to generate Link Status Change
64 * interrupt after hard reset. This makes the E1K_LSC_ON_SLU option unnecessary.
65 * With unplugged cable, LSC is triggerred for 82543GC only.
66 */
67#define E1K_LSC_ON_RESET
68/** @def E1K_LSC_ON_SLU
69 * E1K_LSC_ON_SLU causes E1000 to generate Link Status Change interrupt when
70 * the guest driver brings up the link via STATUS.LU bit. Again the only guest
71 * that requires it is Mac OS X (see @bugref{4657}).
72 */
73//#define E1K_LSC_ON_SLU
74/** @def E1K_INIT_LINKUP_DELAY
75 * E1K_INIT_LINKUP_DELAY prevents the link going up while the driver is still
76 * in init (see @bugref{8624}).
77 */
78#define E1K_INIT_LINKUP_DELAY_US (2000 * 1000)
79/** @def E1K_IMS_INT_DELAY_NS
80 * E1K_IMS_INT_DELAY_NS prevents interrupt storms in Windows guests on enabling
81 * interrupts (see @bugref{8624}).
82 */
83#define E1K_IMS_INT_DELAY_NS 100
84/** @def E1K_TX_DELAY
85 * E1K_TX_DELAY aims to improve guest-host transfer rate for TCP streams by
86 * preventing packets to be sent immediately. It allows to send several
87 * packets in a batch reducing the number of acknowledgments. Note that it
88 * effectively disables R0 TX path, forcing sending in R3.
89 */
90//#define E1K_TX_DELAY 150
91/** @def E1K_USE_TX_TIMERS
92 * E1K_USE_TX_TIMERS aims to reduce the number of generated TX interrupts if a
93 * guest driver set the delays via the Transmit Interrupt Delay Value (TIDV)
94 * register. Enabling it showed no positive effects on existing guests so it
95 * stays disabled. See sections 3.2.7.1 and 3.4.3.1 in "8254x Family of Gigabit
96 * Ethernet Controllers Software Developer’s Manual" for more detailed
97 * explanation.
98 */
99//#define E1K_USE_TX_TIMERS
100/** @def E1K_NO_TAD
101 * E1K_NO_TAD disables one of two timers enabled by E1K_USE_TX_TIMERS, the
102 * Transmit Absolute Delay time. This timer sets the maximum time interval
103 * during which TX interrupts can be postponed (delayed). It has no effect
104 * if E1K_USE_TX_TIMERS is not defined.
105 */
106//#define E1K_NO_TAD
107/** @def E1K_REL_DEBUG
108 * E1K_REL_DEBUG enables debug logging of l1, l2, l3 in release build.
109 */
110//#define E1K_REL_DEBUG
111/** @def E1K_INT_STATS
112 * E1K_INT_STATS enables collection of internal statistics used for
113 * debugging of delayed interrupts, etc.
114 */
115#define E1K_INT_STATS
116/** @def E1K_WITH_MSI
117 * E1K_WITH_MSI enables rudimentary MSI support. Not implemented.
118 */
119//#define E1K_WITH_MSI
120/** @def E1K_WITH_TX_CS
121 * E1K_WITH_TX_CS protects e1kXmitPending with a critical section.
122 */
123#define E1K_WITH_TX_CS
124/** @def E1K_WITH_TXD_CACHE
125 * E1K_WITH_TXD_CACHE causes E1000 to fetch multiple TX descriptors in a
126 * single physical memory read (or two if it wraps around the end of TX
127 * descriptor ring). It is required for proper functioning of bandwidth
128 * resource control as it allows to compute exact sizes of packets prior
129 * to allocating their buffers (see @bugref{5582}).
130 */
131#define E1K_WITH_TXD_CACHE
132/** @def E1K_WITH_RXD_CACHE
133 * E1K_WITH_RXD_CACHE causes E1000 to fetch multiple RX descriptors in a
134 * single physical memory read (or two if it wraps around the end of RX
135 * descriptor ring). Intel's packet driver for DOS needs this option in
136 * order to work properly (see @bugref{6217}).
137 */
138#define E1K_WITH_RXD_CACHE
139/** @def E1K_WITH_PREREG_MMIO
140 * E1K_WITH_PREREG_MMIO enables a new style MMIO registration and is
141 * currently only done for testing the relateted PDM, IOM and PGM code. */
142//#define E1K_WITH_PREREG_MMIO
143/* @} */
144/* End of Options ************************************************************/
145
146#ifdef E1K_WITH_TXD_CACHE
147/**
148 * E1K_TXD_CACHE_SIZE specifies the maximum number of TX descriptors stored
149 * in the state structure. It limits the amount of descriptors loaded in one
150 * batch read. For example, Linux guest may use up to 20 descriptors per
151 * TSE packet. The largest TSE packet seen (Windows guest) was 45 descriptors.
152 */
153# define E1K_TXD_CACHE_SIZE 64u
154#endif /* E1K_WITH_TXD_CACHE */
155
156#ifdef E1K_WITH_RXD_CACHE
157/**
158 * E1K_RXD_CACHE_SIZE specifies the maximum number of RX descriptors stored
159 * in the state structure. It limits the amount of descriptors loaded in one
160 * batch read. For example, XP guest adds 15 RX descriptors at a time.
161 */
162# define E1K_RXD_CACHE_SIZE 16u
163#endif /* E1K_WITH_RXD_CACHE */
164
165
166/* Little helpers ************************************************************/
167#undef htons
168#undef ntohs
169#undef htonl
170#undef ntohl
171#define htons(x) ((((x) & 0xff00) >> 8) | (((x) & 0x00ff) << 8))
172#define ntohs(x) htons(x)
173#define htonl(x) ASMByteSwapU32(x)
174#define ntohl(x) htonl(x)
175
176#ifndef DEBUG
177# ifdef E1K_REL_DEBUG
178# define DEBUG
179# define E1kLog(a) LogRel(a)
180# define E1kLog2(a) LogRel(a)
181# define E1kLog3(a) LogRel(a)
182# define E1kLogX(x, a) LogRel(a)
183//# define E1kLog3(a) do {} while (0)
184# else
185# define E1kLog(a) do {} while (0)
186# define E1kLog2(a) do {} while (0)
187# define E1kLog3(a) do {} while (0)
188# define E1kLogX(x, a) do {} while (0)
189# endif
190#else
191# define E1kLog(a) Log(a)
192# define E1kLog2(a) Log2(a)
193# define E1kLog3(a) Log3(a)
194# define E1kLogX(x, a) LogIt(x, LOG_GROUP, a)
195//# define E1kLog(a) do {} while (0)
196//# define E1kLog2(a) do {} while (0)
197//# define E1kLog3(a) do {} while (0)
198#endif
199
200#if 0
201# define LOG_ENABLED
202# define E1kLogRel(a) LogRel(a)
203# undef Log6
204# define Log6(a) LogRel(a)
205#else
206# define E1kLogRel(a) do { } while (0)
207#endif
208
209//#undef DEBUG
210
211#define E1K_RELOCATE(p, o) *(RTHCUINTPTR *)&p += o
212
213#define E1K_INC_CNT32(cnt) \
214do { \
215 if (cnt < UINT32_MAX) \
216 cnt++; \
217} while (0)
218
219#define E1K_ADD_CNT64(cntLo, cntHi, val) \
220do { \
221 uint64_t u64Cnt = RT_MAKE_U64(cntLo, cntHi); \
222 uint64_t tmp = u64Cnt; \
223 u64Cnt += val; \
224 if (tmp > u64Cnt ) \
225 u64Cnt = UINT64_MAX; \
226 cntLo = (uint32_t)u64Cnt; \
227 cntHi = (uint32_t)(u64Cnt >> 32); \
228} while (0)
229
230#ifdef E1K_INT_STATS
231# define E1K_INC_ISTAT_CNT(cnt) do { ++cnt; } while (0)
232#else /* E1K_INT_STATS */
233# define E1K_INC_ISTAT_CNT(cnt) do { } while (0)
234#endif /* E1K_INT_STATS */
235
236
237/*****************************************************************************/
238
239typedef uint32_t E1KCHIP;
240#define E1K_CHIP_82540EM 0
241#define E1K_CHIP_82543GC 1
242#define E1K_CHIP_82545EM 2
243
244#ifdef IN_RING3
245/** Different E1000 chips. */
246static const struct E1kChips
247{
248 uint16_t uPCIVendorId;
249 uint16_t uPCIDeviceId;
250 uint16_t uPCISubsystemVendorId;
251 uint16_t uPCISubsystemId;
252 const char *pcszName;
253} g_aChips[] =
254{
255 /* Vendor Device SSVendor SubSys Name */
256 { 0x8086,
257 /* Temporary code, as MSI-aware driver dislike 0x100E. How to do that right? */
258# ifdef E1K_WITH_MSI
259 0x105E,
260# else
261 0x100E,
262# endif
263 0x8086, 0x001E, "82540EM" }, /* Intel 82540EM-A in Intel PRO/1000 MT Desktop */
264 { 0x8086, 0x1004, 0x8086, 0x1004, "82543GC" }, /* Intel 82543GC in Intel PRO/1000 T Server */
265 { 0x8086, 0x100F, 0x15AD, 0x0750, "82545EM" } /* Intel 82545EM-A in VMWare Network Adapter */
266};
267#endif /* IN_RING3 */
268
269
270/* The size of register area mapped to I/O space */
271#define E1K_IOPORT_SIZE 0x8
272/* The size of memory-mapped register area */
273#define E1K_MM_SIZE 0x20000
274
275#define E1K_MAX_TX_PKT_SIZE 16288
276#define E1K_MAX_RX_PKT_SIZE 16384
277
278/*****************************************************************************/
279
280/** Gets the specfieid bits from the register. */
281#define GET_BITS(reg, bits) ((reg & reg##_##bits##_MASK) >> reg##_##bits##_SHIFT)
282#define GET_BITS_V(val, reg, bits) ((val & reg##_##bits##_MASK) >> reg##_##bits##_SHIFT)
283#define BITS(reg, bits, bitval) (bitval << reg##_##bits##_SHIFT)
284#define SET_BITS(reg, bits, bitval) do { reg = (reg & ~reg##_##bits##_MASK) | (bitval << reg##_##bits##_SHIFT); } while (0)
285#define SET_BITS_V(val, reg, bits, bitval) do { val = (val & ~reg##_##bits##_MASK) | (bitval << reg##_##bits##_SHIFT); } while (0)
286
287#define CTRL_SLU UINT32_C(0x00000040)
288#define CTRL_MDIO UINT32_C(0x00100000)
289#define CTRL_MDC UINT32_C(0x00200000)
290#define CTRL_MDIO_DIR UINT32_C(0x01000000)
291#define CTRL_MDC_DIR UINT32_C(0x02000000)
292#define CTRL_RESET UINT32_C(0x04000000)
293#define CTRL_VME UINT32_C(0x40000000)
294
295#define STATUS_LU UINT32_C(0x00000002)
296#define STATUS_TXOFF UINT32_C(0x00000010)
297
298#define EECD_EE_WIRES UINT32_C(0x0F)
299#define EECD_EE_REQ UINT32_C(0x40)
300#define EECD_EE_GNT UINT32_C(0x80)
301
302#define EERD_START UINT32_C(0x00000001)
303#define EERD_DONE UINT32_C(0x00000010)
304#define EERD_DATA_MASK UINT32_C(0xFFFF0000)
305#define EERD_DATA_SHIFT 16
306#define EERD_ADDR_MASK UINT32_C(0x0000FF00)
307#define EERD_ADDR_SHIFT 8
308
309#define MDIC_DATA_MASK UINT32_C(0x0000FFFF)
310#define MDIC_DATA_SHIFT 0
311#define MDIC_REG_MASK UINT32_C(0x001F0000)
312#define MDIC_REG_SHIFT 16
313#define MDIC_PHY_MASK UINT32_C(0x03E00000)
314#define MDIC_PHY_SHIFT 21
315#define MDIC_OP_WRITE UINT32_C(0x04000000)
316#define MDIC_OP_READ UINT32_C(0x08000000)
317#define MDIC_READY UINT32_C(0x10000000)
318#define MDIC_INT_EN UINT32_C(0x20000000)
319#define MDIC_ERROR UINT32_C(0x40000000)
320
321#define TCTL_EN UINT32_C(0x00000002)
322#define TCTL_PSP UINT32_C(0x00000008)
323
324#define RCTL_EN UINT32_C(0x00000002)
325#define RCTL_UPE UINT32_C(0x00000008)
326#define RCTL_MPE UINT32_C(0x00000010)
327#define RCTL_LPE UINT32_C(0x00000020)
328#define RCTL_LBM_MASK UINT32_C(0x000000C0)
329#define RCTL_LBM_SHIFT 6
330#define RCTL_RDMTS_MASK UINT32_C(0x00000300)
331#define RCTL_RDMTS_SHIFT 8
332#define RCTL_LBM_TCVR UINT32_C(3) /**< PHY or external SerDes loopback. */
333#define RCTL_MO_MASK UINT32_C(0x00003000)
334#define RCTL_MO_SHIFT 12
335#define RCTL_BAM UINT32_C(0x00008000)
336#define RCTL_BSIZE_MASK UINT32_C(0x00030000)
337#define RCTL_BSIZE_SHIFT 16
338#define RCTL_VFE UINT32_C(0x00040000)
339#define RCTL_CFIEN UINT32_C(0x00080000)
340#define RCTL_CFI UINT32_C(0x00100000)
341#define RCTL_BSEX UINT32_C(0x02000000)
342#define RCTL_SECRC UINT32_C(0x04000000)
343
344#define ICR_TXDW UINT32_C(0x00000001)
345#define ICR_TXQE UINT32_C(0x00000002)
346#define ICR_LSC UINT32_C(0x00000004)
347#define ICR_RXDMT0 UINT32_C(0x00000010)
348#define ICR_RXT0 UINT32_C(0x00000080)
349#define ICR_TXD_LOW UINT32_C(0x00008000)
350#define RDTR_FPD UINT32_C(0x80000000)
351
352#define PBA_st ((PBAST*)(pThis->auRegs + PBA_IDX))
353typedef struct
354{
355 unsigned rxa : 7;
356 unsigned rxa_r : 9;
357 unsigned txa : 16;
358} PBAST;
359AssertCompileSize(PBAST, 4);
360
361#define TXDCTL_WTHRESH_MASK 0x003F0000
362#define TXDCTL_WTHRESH_SHIFT 16
363#define TXDCTL_LWTHRESH_MASK 0xFE000000
364#define TXDCTL_LWTHRESH_SHIFT 25
365
366#define RXCSUM_PCSS_MASK UINT32_C(0x000000FF)
367#define RXCSUM_PCSS_SHIFT 0
368
369/** @name Register access macros
370 * @remarks These ASSUME alocal variable @a pThis of type PE1KSTATE.
371 * @{ */
372#define CTRL pThis->auRegs[CTRL_IDX]
373#define STATUS pThis->auRegs[STATUS_IDX]
374#define EECD pThis->auRegs[EECD_IDX]
375#define EERD pThis->auRegs[EERD_IDX]
376#define CTRL_EXT pThis->auRegs[CTRL_EXT_IDX]
377#define FLA pThis->auRegs[FLA_IDX]
378#define MDIC pThis->auRegs[MDIC_IDX]
379#define FCAL pThis->auRegs[FCAL_IDX]
380#define FCAH pThis->auRegs[FCAH_IDX]
381#define FCT pThis->auRegs[FCT_IDX]
382#define VET pThis->auRegs[VET_IDX]
383#define ICR pThis->auRegs[ICR_IDX]
384#define ITR pThis->auRegs[ITR_IDX]
385#define ICS pThis->auRegs[ICS_IDX]
386#define IMS pThis->auRegs[IMS_IDX]
387#define IMC pThis->auRegs[IMC_IDX]
388#define RCTL pThis->auRegs[RCTL_IDX]
389#define FCTTV pThis->auRegs[FCTTV_IDX]
390#define TXCW pThis->auRegs[TXCW_IDX]
391#define RXCW pThis->auRegs[RXCW_IDX]
392#define TCTL pThis->auRegs[TCTL_IDX]
393#define TIPG pThis->auRegs[TIPG_IDX]
394#define AIFS pThis->auRegs[AIFS_IDX]
395#define LEDCTL pThis->auRegs[LEDCTL_IDX]
396#define PBA pThis->auRegs[PBA_IDX]
397#define FCRTL pThis->auRegs[FCRTL_IDX]
398#define FCRTH pThis->auRegs[FCRTH_IDX]
399#define RDFH pThis->auRegs[RDFH_IDX]
400#define RDFT pThis->auRegs[RDFT_IDX]
401#define RDFHS pThis->auRegs[RDFHS_IDX]
402#define RDFTS pThis->auRegs[RDFTS_IDX]
403#define RDFPC pThis->auRegs[RDFPC_IDX]
404#define RDBAL pThis->auRegs[RDBAL_IDX]
405#define RDBAH pThis->auRegs[RDBAH_IDX]
406#define RDLEN pThis->auRegs[RDLEN_IDX]
407#define RDH pThis->auRegs[RDH_IDX]
408#define RDT pThis->auRegs[RDT_IDX]
409#define RDTR pThis->auRegs[RDTR_IDX]
410#define RXDCTL pThis->auRegs[RXDCTL_IDX]
411#define RADV pThis->auRegs[RADV_IDX]
412#define RSRPD pThis->auRegs[RSRPD_IDX]
413#define TXDMAC pThis->auRegs[TXDMAC_IDX]
414#define TDFH pThis->auRegs[TDFH_IDX]
415#define TDFT pThis->auRegs[TDFT_IDX]
416#define TDFHS pThis->auRegs[TDFHS_IDX]
417#define TDFTS pThis->auRegs[TDFTS_IDX]
418#define TDFPC pThis->auRegs[TDFPC_IDX]
419#define TDBAL pThis->auRegs[TDBAL_IDX]
420#define TDBAH pThis->auRegs[TDBAH_IDX]
421#define TDLEN pThis->auRegs[TDLEN_IDX]
422#define TDH pThis->auRegs[TDH_IDX]
423#define TDT pThis->auRegs[TDT_IDX]
424#define TIDV pThis->auRegs[TIDV_IDX]
425#define TXDCTL pThis->auRegs[TXDCTL_IDX]
426#define TADV pThis->auRegs[TADV_IDX]
427#define TSPMT pThis->auRegs[TSPMT_IDX]
428#define CRCERRS pThis->auRegs[CRCERRS_IDX]
429#define ALGNERRC pThis->auRegs[ALGNERRC_IDX]
430#define SYMERRS pThis->auRegs[SYMERRS_IDX]
431#define RXERRC pThis->auRegs[RXERRC_IDX]
432#define MPC pThis->auRegs[MPC_IDX]
433#define SCC pThis->auRegs[SCC_IDX]
434#define ECOL pThis->auRegs[ECOL_IDX]
435#define MCC pThis->auRegs[MCC_IDX]
436#define LATECOL pThis->auRegs[LATECOL_IDX]
437#define COLC pThis->auRegs[COLC_IDX]
438#define DC pThis->auRegs[DC_IDX]
439#define TNCRS pThis->auRegs[TNCRS_IDX]
440/* #define SEC pThis->auRegs[SEC_IDX] Conflict with sys/time.h */
441#define CEXTERR pThis->auRegs[CEXTERR_IDX]
442#define RLEC pThis->auRegs[RLEC_IDX]
443#define XONRXC pThis->auRegs[XONRXC_IDX]
444#define XONTXC pThis->auRegs[XONTXC_IDX]
445#define XOFFRXC pThis->auRegs[XOFFRXC_IDX]
446#define XOFFTXC pThis->auRegs[XOFFTXC_IDX]
447#define FCRUC pThis->auRegs[FCRUC_IDX]
448#define PRC64 pThis->auRegs[PRC64_IDX]
449#define PRC127 pThis->auRegs[PRC127_IDX]
450#define PRC255 pThis->auRegs[PRC255_IDX]
451#define PRC511 pThis->auRegs[PRC511_IDX]
452#define PRC1023 pThis->auRegs[PRC1023_IDX]
453#define PRC1522 pThis->auRegs[PRC1522_IDX]
454#define GPRC pThis->auRegs[GPRC_IDX]
455#define BPRC pThis->auRegs[BPRC_IDX]
456#define MPRC pThis->auRegs[MPRC_IDX]
457#define GPTC pThis->auRegs[GPTC_IDX]
458#define GORCL pThis->auRegs[GORCL_IDX]
459#define GORCH pThis->auRegs[GORCH_IDX]
460#define GOTCL pThis->auRegs[GOTCL_IDX]
461#define GOTCH pThis->auRegs[GOTCH_IDX]
462#define RNBC pThis->auRegs[RNBC_IDX]
463#define RUC pThis->auRegs[RUC_IDX]
464#define RFC pThis->auRegs[RFC_IDX]
465#define ROC pThis->auRegs[ROC_IDX]
466#define RJC pThis->auRegs[RJC_IDX]
467#define MGTPRC pThis->auRegs[MGTPRC_IDX]
468#define MGTPDC pThis->auRegs[MGTPDC_IDX]
469#define MGTPTC pThis->auRegs[MGTPTC_IDX]
470#define TORL pThis->auRegs[TORL_IDX]
471#define TORH pThis->auRegs[TORH_IDX]
472#define TOTL pThis->auRegs[TOTL_IDX]
473#define TOTH pThis->auRegs[TOTH_IDX]
474#define TPR pThis->auRegs[TPR_IDX]
475#define TPT pThis->auRegs[TPT_IDX]
476#define PTC64 pThis->auRegs[PTC64_IDX]
477#define PTC127 pThis->auRegs[PTC127_IDX]
478#define PTC255 pThis->auRegs[PTC255_IDX]
479#define PTC511 pThis->auRegs[PTC511_IDX]
480#define PTC1023 pThis->auRegs[PTC1023_IDX]
481#define PTC1522 pThis->auRegs[PTC1522_IDX]
482#define MPTC pThis->auRegs[MPTC_IDX]
483#define BPTC pThis->auRegs[BPTC_IDX]
484#define TSCTC pThis->auRegs[TSCTC_IDX]
485#define TSCTFC pThis->auRegs[TSCTFC_IDX]
486#define RXCSUM pThis->auRegs[RXCSUM_IDX]
487#define WUC pThis->auRegs[WUC_IDX]
488#define WUFC pThis->auRegs[WUFC_IDX]
489#define WUS pThis->auRegs[WUS_IDX]
490#define MANC pThis->auRegs[MANC_IDX]
491#define IPAV pThis->auRegs[IPAV_IDX]
492#define WUPL pThis->auRegs[WUPL_IDX]
493/** @} */
494
495/**
496 * Indices of memory-mapped registers in register table.
497 */
498typedef enum
499{
500 CTRL_IDX,
501 STATUS_IDX,
502 EECD_IDX,
503 EERD_IDX,
504 CTRL_EXT_IDX,
505 FLA_IDX,
506 MDIC_IDX,
507 FCAL_IDX,
508 FCAH_IDX,
509 FCT_IDX,
510 VET_IDX,
511 ICR_IDX,
512 ITR_IDX,
513 ICS_IDX,
514 IMS_IDX,
515 IMC_IDX,
516 RCTL_IDX,
517 FCTTV_IDX,
518 TXCW_IDX,
519 RXCW_IDX,
520 TCTL_IDX,
521 TIPG_IDX,
522 AIFS_IDX,
523 LEDCTL_IDX,
524 PBA_IDX,
525 FCRTL_IDX,
526 FCRTH_IDX,
527 RDFH_IDX,
528 RDFT_IDX,
529 RDFHS_IDX,
530 RDFTS_IDX,
531 RDFPC_IDX,
532 RDBAL_IDX,
533 RDBAH_IDX,
534 RDLEN_IDX,
535 RDH_IDX,
536 RDT_IDX,
537 RDTR_IDX,
538 RXDCTL_IDX,
539 RADV_IDX,
540 RSRPD_IDX,
541 TXDMAC_IDX,
542 TDFH_IDX,
543 TDFT_IDX,
544 TDFHS_IDX,
545 TDFTS_IDX,
546 TDFPC_IDX,
547 TDBAL_IDX,
548 TDBAH_IDX,
549 TDLEN_IDX,
550 TDH_IDX,
551 TDT_IDX,
552 TIDV_IDX,
553 TXDCTL_IDX,
554 TADV_IDX,
555 TSPMT_IDX,
556 CRCERRS_IDX,
557 ALGNERRC_IDX,
558 SYMERRS_IDX,
559 RXERRC_IDX,
560 MPC_IDX,
561 SCC_IDX,
562 ECOL_IDX,
563 MCC_IDX,
564 LATECOL_IDX,
565 COLC_IDX,
566 DC_IDX,
567 TNCRS_IDX,
568 SEC_IDX,
569 CEXTERR_IDX,
570 RLEC_IDX,
571 XONRXC_IDX,
572 XONTXC_IDX,
573 XOFFRXC_IDX,
574 XOFFTXC_IDX,
575 FCRUC_IDX,
576 PRC64_IDX,
577 PRC127_IDX,
578 PRC255_IDX,
579 PRC511_IDX,
580 PRC1023_IDX,
581 PRC1522_IDX,
582 GPRC_IDX,
583 BPRC_IDX,
584 MPRC_IDX,
585 GPTC_IDX,
586 GORCL_IDX,
587 GORCH_IDX,
588 GOTCL_IDX,
589 GOTCH_IDX,
590 RNBC_IDX,
591 RUC_IDX,
592 RFC_IDX,
593 ROC_IDX,
594 RJC_IDX,
595 MGTPRC_IDX,
596 MGTPDC_IDX,
597 MGTPTC_IDX,
598 TORL_IDX,
599 TORH_IDX,
600 TOTL_IDX,
601 TOTH_IDX,
602 TPR_IDX,
603 TPT_IDX,
604 PTC64_IDX,
605 PTC127_IDX,
606 PTC255_IDX,
607 PTC511_IDX,
608 PTC1023_IDX,
609 PTC1522_IDX,
610 MPTC_IDX,
611 BPTC_IDX,
612 TSCTC_IDX,
613 TSCTFC_IDX,
614 RXCSUM_IDX,
615 WUC_IDX,
616 WUFC_IDX,
617 WUS_IDX,
618 MANC_IDX,
619 IPAV_IDX,
620 WUPL_IDX,
621 MTA_IDX,
622 RA_IDX,
623 VFTA_IDX,
624 IP4AT_IDX,
625 IP6AT_IDX,
626 WUPM_IDX,
627 FFLT_IDX,
628 FFMT_IDX,
629 FFVT_IDX,
630 PBM_IDX,
631 RA_82542_IDX,
632 MTA_82542_IDX,
633 VFTA_82542_IDX,
634 E1K_NUM_OF_REGS
635} E1kRegIndex;
636
637#define E1K_NUM_OF_32BIT_REGS MTA_IDX
638/** The number of registers with strictly increasing offset. */
639#define E1K_NUM_OF_BINARY_SEARCHABLE (WUPL_IDX + 1)
640
641
642/**
643 * Define E1000-specific EEPROM layout.
644 */
645struct E1kEEPROM
646{
647 public:
648 EEPROM93C46 eeprom;
649
650#ifdef IN_RING3
651 /**
652 * Initialize EEPROM content.
653 *
654 * @param macAddr MAC address of E1000.
655 */
656 void init(RTMAC &macAddr)
657 {
658 eeprom.init();
659 memcpy(eeprom.m_au16Data, macAddr.au16, sizeof(macAddr.au16));
660 eeprom.m_au16Data[0x04] = 0xFFFF;
661 /*
662 * bit 3 - full support for power management
663 * bit 10 - full duplex
664 */
665 eeprom.m_au16Data[0x0A] = 0x4408;
666 eeprom.m_au16Data[0x0B] = 0x001E;
667 eeprom.m_au16Data[0x0C] = 0x8086;
668 eeprom.m_au16Data[0x0D] = 0x100E;
669 eeprom.m_au16Data[0x0E] = 0x8086;
670 eeprom.m_au16Data[0x0F] = 0x3040;
671 eeprom.m_au16Data[0x21] = 0x7061;
672 eeprom.m_au16Data[0x22] = 0x280C;
673 eeprom.m_au16Data[0x23] = 0x00C8;
674 eeprom.m_au16Data[0x24] = 0x00C8;
675 eeprom.m_au16Data[0x2F] = 0x0602;
676 updateChecksum();
677 };
678
679 /**
680 * Compute the checksum as required by E1000 and store it
681 * in the last word.
682 */
683 void updateChecksum()
684 {
685 uint16_t u16Checksum = 0;
686
687 for (int i = 0; i < eeprom.SIZE-1; i++)
688 u16Checksum += eeprom.m_au16Data[i];
689 eeprom.m_au16Data[eeprom.SIZE-1] = 0xBABA - u16Checksum;
690 };
691
692 /**
693 * First 6 bytes of EEPROM contain MAC address.
694 *
695 * @returns MAC address of E1000.
696 */
697 void getMac(PRTMAC pMac)
698 {
699 memcpy(pMac->au16, eeprom.m_au16Data, sizeof(pMac->au16));
700 };
701
702 uint32_t read()
703 {
704 return eeprom.read();
705 }
706
707 void write(uint32_t u32Wires)
708 {
709 eeprom.write(u32Wires);
710 }
711
712 bool readWord(uint32_t u32Addr, uint16_t *pu16Value)
713 {
714 return eeprom.readWord(u32Addr, pu16Value);
715 }
716
717 int load(PSSMHANDLE pSSM)
718 {
719 return eeprom.load(pSSM);
720 }
721
722 void save(PSSMHANDLE pSSM)
723 {
724 eeprom.save(pSSM);
725 }
726#endif /* IN_RING3 */
727};
728
729
730#define E1K_SPEC_VLAN(s) (s & 0xFFF)
731#define E1K_SPEC_CFI(s) (!!((s>>12) & 0x1))
732#define E1K_SPEC_PRI(s) ((s>>13) & 0x7)
733
734struct E1kRxDStatus
735{
736 /** @name Descriptor Status field (3.2.3.1)
737 * @{ */
738 unsigned fDD : 1; /**< Descriptor Done. */
739 unsigned fEOP : 1; /**< End of packet. */
740 unsigned fIXSM : 1; /**< Ignore checksum indication. */
741 unsigned fVP : 1; /**< VLAN, matches VET. */
742 unsigned : 1;
743 unsigned fTCPCS : 1; /**< RCP Checksum calculated on the packet. */
744 unsigned fIPCS : 1; /**< IP Checksum calculated on the packet. */
745 unsigned fPIF : 1; /**< Passed in-exact filter */
746 /** @} */
747 /** @name Descriptor Errors field (3.2.3.2)
748 * (Only valid when fEOP and fDD are set.)
749 * @{ */
750 unsigned fCE : 1; /**< CRC or alignment error. */
751 unsigned : 4; /**< Reserved, varies with different models... */
752 unsigned fTCPE : 1; /**< TCP/UDP checksum error. */
753 unsigned fIPE : 1; /**< IP Checksum error. */
754 unsigned fRXE : 1; /**< RX Data error. */
755 /** @} */
756 /** @name Descriptor Special field (3.2.3.3)
757 * @{ */
758 unsigned u16Special : 16; /**< VLAN: Id, Canonical form, Priority. */
759 /** @} */
760};
761typedef struct E1kRxDStatus E1KRXDST;
762
763struct E1kRxDesc_st
764{
765 uint64_t u64BufAddr; /**< Address of data buffer */
766 uint16_t u16Length; /**< Length of data in buffer */
767 uint16_t u16Checksum; /**< Packet checksum */
768 E1KRXDST status;
769};
770typedef struct E1kRxDesc_st E1KRXDESC;
771AssertCompileSize(E1KRXDESC, 16);
772
773#define E1K_DTYP_LEGACY -1
774#define E1K_DTYP_CONTEXT 0
775#define E1K_DTYP_DATA 1
776
777struct E1kTDLegacy
778{
779 uint64_t u64BufAddr; /**< Address of data buffer */
780 struct TDLCmd_st
781 {
782 unsigned u16Length : 16;
783 unsigned u8CSO : 8;
784 /* CMD field : 8 */
785 unsigned fEOP : 1;
786 unsigned fIFCS : 1;
787 unsigned fIC : 1;
788 unsigned fRS : 1;
789 unsigned fRPS : 1;
790 unsigned fDEXT : 1;
791 unsigned fVLE : 1;
792 unsigned fIDE : 1;
793 } cmd;
794 struct TDLDw3_st
795 {
796 /* STA field */
797 unsigned fDD : 1;
798 unsigned fEC : 1;
799 unsigned fLC : 1;
800 unsigned fTURSV : 1;
801 /* RSV field */
802 unsigned u4RSV : 4;
803 /* CSS field */
804 unsigned u8CSS : 8;
805 /* Special field*/
806 unsigned u16Special: 16;
807 } dw3;
808};
809
810/**
811 * TCP/IP Context Transmit Descriptor, section 3.3.6.
812 */
813struct E1kTDContext
814{
815 struct CheckSum_st
816 {
817 /** TSE: Header start. !TSE: Checksum start. */
818 unsigned u8CSS : 8;
819 /** Checksum offset - where to store it. */
820 unsigned u8CSO : 8;
821 /** Checksum ending (inclusive) offset, 0 = end of packet. */
822 unsigned u16CSE : 16;
823 } ip;
824 struct CheckSum_st tu;
825 struct TDCDw2_st
826 {
827 /** TSE: The total number of payload bytes for this context. Sans header. */
828 unsigned u20PAYLEN : 20;
829 /** The descriptor type - E1K_DTYP_CONTEXT (0). */
830 unsigned u4DTYP : 4;
831 /** TUCMD field, 8 bits
832 * @{ */
833 /** TSE: TCP (set) or UDP (clear). */
834 unsigned fTCP : 1;
835 /** TSE: IPv4 (set) or IPv6 (clear) - for finding the payload length field in
836 * the IP header. Does not affect the checksumming.
837 * @remarks 82544GC/EI interprets a cleared field differently. */
838 unsigned fIP : 1;
839 /** TSE: TCP segmentation enable. When clear the context describes */
840 unsigned fTSE : 1;
841 /** Report status (only applies to dw3.fDD for here). */
842 unsigned fRS : 1;
843 /** Reserved, MBZ. */
844 unsigned fRSV1 : 1;
845 /** Descriptor extension, must be set for this descriptor type. */
846 unsigned fDEXT : 1;
847 /** Reserved, MBZ. */
848 unsigned fRSV2 : 1;
849 /** Interrupt delay enable. */
850 unsigned fIDE : 1;
851 /** @} */
852 } dw2;
853 struct TDCDw3_st
854 {
855 /** Descriptor Done. */
856 unsigned fDD : 1;
857 /** Reserved, MBZ. */
858 unsigned u7RSV : 7;
859 /** TSO: The header (prototype) length (Ethernet[, VLAN tag], IP, TCP/UDP. */
860 unsigned u8HDRLEN : 8;
861 /** TSO: Maximum segment size. */
862 unsigned u16MSS : 16;
863 } dw3;
864};
865typedef struct E1kTDContext E1KTXCTX;
866
867/**
868 * TCP/IP Data Transmit Descriptor, section 3.3.7.
869 */
870struct E1kTDData
871{
872 uint64_t u64BufAddr; /**< Address of data buffer */
873 struct TDDCmd_st
874 {
875 /** The total length of data pointed to by this descriptor. */
876 unsigned u20DTALEN : 20;
877 /** The descriptor type - E1K_DTYP_DATA (1). */
878 unsigned u4DTYP : 4;
879 /** @name DCMD field, 8 bits (3.3.7.1).
880 * @{ */
881 /** End of packet. Note TSCTFC update. */
882 unsigned fEOP : 1;
883 /** Insert Ethernet FCS/CRC (requires fEOP to be set). */
884 unsigned fIFCS : 1;
885 /** Use the TSE context when set and the normal when clear. */
886 unsigned fTSE : 1;
887 /** Report status (dw3.STA). */
888 unsigned fRS : 1;
889 /** Reserved. 82544GC/EI defines this report packet set (RPS). */
890 unsigned fRPS : 1;
891 /** Descriptor extension, must be set for this descriptor type. */
892 unsigned fDEXT : 1;
893 /** VLAN enable, requires CTRL.VME, auto enables FCS/CRC.
894 * Insert dw3.SPECIAL after ethernet header. */
895 unsigned fVLE : 1;
896 /** Interrupt delay enable. */
897 unsigned fIDE : 1;
898 /** @} */
899 } cmd;
900 struct TDDDw3_st
901 {
902 /** @name STA field (3.3.7.2)
903 * @{ */
904 unsigned fDD : 1; /**< Descriptor done. */
905 unsigned fEC : 1; /**< Excess collision. */
906 unsigned fLC : 1; /**< Late collision. */
907 /** Reserved, except for the usual oddball (82544GC/EI) where it's called TU. */
908 unsigned fTURSV : 1;
909 /** @} */
910 unsigned u4RSV : 4; /**< Reserved field, MBZ. */
911 /** @name POPTS (Packet Option) field (3.3.7.3)
912 * @{ */
913 unsigned fIXSM : 1; /**< Insert IP checksum. */
914 unsigned fTXSM : 1; /**< Insert TCP/UDP checksum. */
915 unsigned u6RSV : 6; /**< Reserved, MBZ. */
916 /** @} */
917 /** @name SPECIAL field - VLAN tag to be inserted after ethernet header.
918 * Requires fEOP, fVLE and CTRL.VME to be set.
919 * @{ */
920 unsigned u16Special: 16; /**< VLAN: Id, Canonical form, Priority. */
921 /** @} */
922 } dw3;
923};
924typedef struct E1kTDData E1KTXDAT;
925
926union E1kTxDesc
927{
928 struct E1kTDLegacy legacy;
929 struct E1kTDContext context;
930 struct E1kTDData data;
931};
932typedef union E1kTxDesc E1KTXDESC;
933AssertCompileSize(E1KTXDESC, 16);
934
935#define RA_CTL_AS 0x0003
936#define RA_CTL_AV 0x8000
937
938union E1kRecAddr
939{
940 uint32_t au32[32];
941 struct RAArray
942 {
943 uint8_t addr[6];
944 uint16_t ctl;
945 } array[16];
946};
947typedef struct E1kRecAddr::RAArray E1KRAELEM;
948typedef union E1kRecAddr E1KRA;
949AssertCompileSize(E1KRA, 8*16);
950
951#define E1K_IP_RF UINT16_C(0x8000) /**< reserved fragment flag */
952#define E1K_IP_DF UINT16_C(0x4000) /**< dont fragment flag */
953#define E1K_IP_MF UINT16_C(0x2000) /**< more fragments flag */
954#define E1K_IP_OFFMASK UINT16_C(0x1fff) /**< mask for fragmenting bits */
955
956/** @todo use+extend RTNETIPV4 */
957struct E1kIpHeader
958{
959 /* type of service / version / header length */
960 uint16_t tos_ver_hl;
961 /* total length */
962 uint16_t total_len;
963 /* identification */
964 uint16_t ident;
965 /* fragment offset field */
966 uint16_t offset;
967 /* time to live / protocol*/
968 uint16_t ttl_proto;
969 /* checksum */
970 uint16_t chksum;
971 /* source IP address */
972 uint32_t src;
973 /* destination IP address */
974 uint32_t dest;
975};
976AssertCompileSize(struct E1kIpHeader, 20);
977
978#define E1K_TCP_FIN UINT16_C(0x01)
979#define E1K_TCP_SYN UINT16_C(0x02)
980#define E1K_TCP_RST UINT16_C(0x04)
981#define E1K_TCP_PSH UINT16_C(0x08)
982#define E1K_TCP_ACK UINT16_C(0x10)
983#define E1K_TCP_URG UINT16_C(0x20)
984#define E1K_TCP_ECE UINT16_C(0x40)
985#define E1K_TCP_CWR UINT16_C(0x80)
986#define E1K_TCP_FLAGS UINT16_C(0x3f)
987
988/** @todo use+extend RTNETTCP */
989struct E1kTcpHeader
990{
991 uint16_t src;
992 uint16_t dest;
993 uint32_t seqno;
994 uint32_t ackno;
995 uint16_t hdrlen_flags;
996 uint16_t wnd;
997 uint16_t chksum;
998 uint16_t urgp;
999};
1000AssertCompileSize(struct E1kTcpHeader, 20);
1001
1002
1003#ifdef E1K_WITH_TXD_CACHE
1004/** The current Saved state version. */
1005# define E1K_SAVEDSTATE_VERSION 4
1006/** Saved state version for VirtualBox 4.2 with VLAN tag fields. */
1007# define E1K_SAVEDSTATE_VERSION_VBOX_42_VTAG 3
1008#else /* !E1K_WITH_TXD_CACHE */
1009/** The current Saved state version. */
1010# define E1K_SAVEDSTATE_VERSION 3
1011#endif /* !E1K_WITH_TXD_CACHE */
1012/** Saved state version for VirtualBox 4.1 and earlier.
1013 * These did not include VLAN tag fields. */
1014#define E1K_SAVEDSTATE_VERSION_VBOX_41 2
1015/** Saved state version for VirtualBox 3.0 and earlier.
1016 * This did not include the configuration part nor the E1kEEPROM. */
1017#define E1K_SAVEDSTATE_VERSION_VBOX_30 1
1018
1019/**
1020 * E1000 shared device state.
1021 *
1022 * This is shared between ring-0 and ring-3.
1023 */
1024typedef struct E1KSTATE
1025{
1026 char szPrf[8]; /**< Log prefix, e.g. E1000#1. */
1027
1028 /** Handle to PCI region \#0, the MMIO region. */
1029 IOMIOPORTHANDLE hMmioRegion;
1030 /** Handle to PCI region \#2, the I/O ports. */
1031 IOMIOPORTHANDLE hIoPorts;
1032
1033 /** Receive Interrupt Delay Timer. */
1034 TMTIMERHANDLE hRIDTimer;
1035 /** Receive Absolute Delay Timer. */
1036 TMTIMERHANDLE hRADTimer;
1037 /** Transmit Interrupt Delay Timer. */
1038 TMTIMERHANDLE hTIDTimer;
1039 /** Transmit Absolute Delay Timer. */
1040 TMTIMERHANDLE hTADTimer;
1041 /** Transmit Delay Timer. */
1042 TMTIMERHANDLE hTXDTimer;
1043 /** Late Interrupt Timer. */
1044 TMTIMERHANDLE hIntTimer;
1045 /** Link Up(/Restore) Timer. */
1046 TMTIMERHANDLE hLUTimer;
1047
1048 /** Transmit task. */
1049 PDMTASKHANDLE hTxTask;
1050
1051 /** Critical section - what is it protecting? */
1052 PDMCRITSECT cs;
1053 /** RX Critical section. */
1054 PDMCRITSECT csRx;
1055#ifdef E1K_WITH_TX_CS
1056 /** TX Critical section. */
1057 PDMCRITSECT csTx;
1058#endif /* E1K_WITH_TX_CS */
1059 /** Base address of memory-mapped registers. */
1060 RTGCPHYS addrMMReg;
1061 /** MAC address obtained from the configuration. */
1062 RTMAC macConfigured;
1063 /** Base port of I/O space region. */
1064 RTIOPORT IOPortBase;
1065 /** EMT: Last time the interrupt was acknowledged. */
1066 uint64_t u64AckedAt;
1067 /** All: Used for eliminating spurious interrupts. */
1068 bool fIntRaised;
1069 /** EMT: false if the cable is disconnected by the GUI. */
1070 bool fCableConnected;
1071 /** EMT: Compute Ethernet CRC for RX packets. */
1072 bool fEthernetCRC;
1073 /** All: throttle interrupts. */
1074 bool fItrEnabled;
1075 /** All: throttle RX interrupts. */
1076 bool fItrRxEnabled;
1077 /** All: Delay TX interrupts using TIDV/TADV. */
1078 bool fTidEnabled;
1079 bool afPadding[2];
1080 /** Link up delay (in milliseconds). */
1081 uint32_t cMsLinkUpDelay;
1082
1083 /** All: Device register storage. */
1084 uint32_t auRegs[E1K_NUM_OF_32BIT_REGS];
1085 /** TX/RX: Status LED. */
1086 PDMLED led;
1087 /** TX/RX: Number of packet being sent/received to show in debug log. */
1088 uint32_t u32PktNo;
1089
1090 /** EMT: Offset of the register to be read via IO. */
1091 uint32_t uSelectedReg;
1092 /** EMT: Multicast Table Array. */
1093 uint32_t auMTA[128];
1094 /** EMT: Receive Address registers. */
1095 E1KRA aRecAddr;
1096 /** EMT: VLAN filter table array. */
1097 uint32_t auVFTA[128];
1098 /** EMT: Receive buffer size. */
1099 uint16_t u16RxBSize;
1100 /** EMT: Locked state -- no state alteration possible. */
1101 bool fLocked;
1102 /** EMT: */
1103 bool fDelayInts;
1104 /** All: */
1105 bool fIntMaskUsed;
1106
1107 /** N/A: */
1108 bool volatile fMaybeOutOfSpace;
1109 /** EMT: Gets signalled when more RX descriptors become available. */
1110 SUPSEMEVENT hEventMoreRxDescAvail;
1111#ifdef E1K_WITH_RXD_CACHE
1112 /** RX: Fetched RX descriptors. */
1113 E1KRXDESC aRxDescriptors[E1K_RXD_CACHE_SIZE];
1114 //uint64_t aRxDescAddr[E1K_RXD_CACHE_SIZE];
1115 /** RX: Actual number of fetched RX descriptors. */
1116 uint32_t nRxDFetched;
1117 /** RX: Index in cache of RX descriptor being processed. */
1118 uint32_t iRxDCurrent;
1119#endif /* E1K_WITH_RXD_CACHE */
1120
1121 /** TX: Context used for TCP segmentation packets. */
1122 E1KTXCTX contextTSE;
1123 /** TX: Context used for ordinary packets. */
1124 E1KTXCTX contextNormal;
1125#ifdef E1K_WITH_TXD_CACHE
1126 /** TX: Fetched TX descriptors. */
1127 E1KTXDESC aTxDescriptors[E1K_TXD_CACHE_SIZE];
1128 /** TX: Actual number of fetched TX descriptors. */
1129 uint8_t nTxDFetched;
1130 /** TX: Index in cache of TX descriptor being processed. */
1131 uint8_t iTxDCurrent;
1132 /** TX: Will this frame be sent as GSO. */
1133 bool fGSO;
1134 /** Alignment padding. */
1135 bool fReserved;
1136 /** TX: Number of bytes in next packet. */
1137 uint32_t cbTxAlloc;
1138
1139#endif /* E1K_WITH_TXD_CACHE */
1140 /** GSO context. u8Type is set to PDMNETWORKGSOTYPE_INVALID when not
1141 * applicable to the current TSE mode. */
1142 PDMNETWORKGSO GsoCtx;
1143 /** Scratch space for holding the loopback / fallback scatter / gather
1144 * descriptor. */
1145 union
1146 {
1147 PDMSCATTERGATHER Sg;
1148 uint8_t padding[8 * sizeof(RTUINTPTR)];
1149 } uTxFallback;
1150 /** TX: Transmit packet buffer use for TSE fallback and loopback. */
1151 uint8_t aTxPacketFallback[E1K_MAX_TX_PKT_SIZE];
1152 /** TX: Number of bytes assembled in TX packet buffer. */
1153 uint16_t u16TxPktLen;
1154 /** TX: False will force segmentation in e1000 instead of sending frames as GSO. */
1155 bool fGSOEnabled;
1156 /** TX: IP checksum has to be inserted if true. */
1157 bool fIPcsum;
1158 /** TX: TCP/UDP checksum has to be inserted if true. */
1159 bool fTCPcsum;
1160 /** TX: VLAN tag has to be inserted if true. */
1161 bool fVTag;
1162 /** TX: TCI part of VLAN tag to be inserted. */
1163 uint16_t u16VTagTCI;
1164 /** TX TSE fallback: Number of payload bytes remaining in TSE context. */
1165 uint32_t u32PayRemain;
1166 /** TX TSE fallback: Number of header bytes remaining in TSE context. */
1167 uint16_t u16HdrRemain;
1168 /** TX TSE fallback: Flags from template header. */
1169 uint16_t u16SavedFlags;
1170 /** TX TSE fallback: Partial checksum from template header. */
1171 uint32_t u32SavedCsum;
1172 /** ?: Emulated controller type. */
1173 E1KCHIP eChip;
1174
1175 /** EMT: Physical interface emulation. */
1176 PHY phy;
1177
1178#if 0
1179 /** Alignment padding. */
1180 uint8_t Alignment[HC_ARCH_BITS == 64 ? 8 : 4];
1181#endif
1182
1183 STAMCOUNTER StatReceiveBytes;
1184 STAMCOUNTER StatTransmitBytes;
1185#if defined(VBOX_WITH_STATISTICS)
1186 STAMPROFILEADV StatMMIOReadRZ;
1187 STAMPROFILEADV StatMMIOReadR3;
1188 STAMPROFILEADV StatMMIOWriteRZ;
1189 STAMPROFILEADV StatMMIOWriteR3;
1190 STAMPROFILEADV StatEEPROMRead;
1191 STAMPROFILEADV StatEEPROMWrite;
1192 STAMPROFILEADV StatIOReadRZ;
1193 STAMPROFILEADV StatIOReadR3;
1194 STAMPROFILEADV StatIOWriteRZ;
1195 STAMPROFILEADV StatIOWriteR3;
1196 STAMPROFILEADV StatLateIntTimer;
1197 STAMCOUNTER StatLateInts;
1198 STAMCOUNTER StatIntsRaised;
1199 STAMCOUNTER StatIntsPrevented;
1200 STAMPROFILEADV StatReceive;
1201 STAMPROFILEADV StatReceiveCRC;
1202 STAMPROFILEADV StatReceiveFilter;
1203 STAMPROFILEADV StatReceiveStore;
1204 STAMPROFILEADV StatTransmitRZ;
1205 STAMPROFILEADV StatTransmitR3;
1206 STAMPROFILE StatTransmitSendRZ;
1207 STAMPROFILE StatTransmitSendR3;
1208 STAMPROFILE StatRxOverflow;
1209 STAMCOUNTER StatRxOverflowWakeupRZ;
1210 STAMCOUNTER StatRxOverflowWakeupR3;
1211 STAMCOUNTER StatTxDescCtxNormal;
1212 STAMCOUNTER StatTxDescCtxTSE;
1213 STAMCOUNTER StatTxDescLegacy;
1214 STAMCOUNTER StatTxDescData;
1215 STAMCOUNTER StatTxDescTSEData;
1216 STAMCOUNTER StatTxPathFallback;
1217 STAMCOUNTER StatTxPathGSO;
1218 STAMCOUNTER StatTxPathRegular;
1219 STAMCOUNTER StatPHYAccesses;
1220 STAMCOUNTER aStatRegWrites[E1K_NUM_OF_REGS];
1221 STAMCOUNTER aStatRegReads[E1K_NUM_OF_REGS];
1222#endif /* VBOX_WITH_STATISTICS */
1223
1224#ifdef E1K_INT_STATS
1225 /* Internal stats */
1226 uint64_t u64ArmedAt;
1227 uint64_t uStatMaxTxDelay;
1228 uint32_t uStatInt;
1229 uint32_t uStatIntTry;
1230 uint32_t uStatIntLower;
1231 uint32_t uStatNoIntICR;
1232 int32_t iStatIntLost;
1233 int32_t iStatIntLostOne;
1234 uint32_t uStatIntIMS;
1235 uint32_t uStatIntSkip;
1236 uint32_t uStatIntLate;
1237 uint32_t uStatIntMasked;
1238 uint32_t uStatIntEarly;
1239 uint32_t uStatIntRx;
1240 uint32_t uStatIntTx;
1241 uint32_t uStatIntICS;
1242 uint32_t uStatIntRDTR;
1243 uint32_t uStatIntRXDMT0;
1244 uint32_t uStatIntTXQE;
1245 uint32_t uStatTxNoRS;
1246 uint32_t uStatTxIDE;
1247 uint32_t uStatTxDelayed;
1248 uint32_t uStatTxDelayExp;
1249 uint32_t uStatTAD;
1250 uint32_t uStatTID;
1251 uint32_t uStatRAD;
1252 uint32_t uStatRID;
1253 uint32_t uStatRxFrm;
1254 uint32_t uStatTxFrm;
1255 uint32_t uStatDescCtx;
1256 uint32_t uStatDescDat;
1257 uint32_t uStatDescLeg;
1258 uint32_t uStatTx1514;
1259 uint32_t uStatTx2962;
1260 uint32_t uStatTx4410;
1261 uint32_t uStatTx5858;
1262 uint32_t uStatTx7306;
1263 uint32_t uStatTx8754;
1264 uint32_t uStatTx16384;
1265 uint32_t uStatTx32768;
1266 uint32_t uStatTxLarge;
1267 uint32_t uStatAlign;
1268#endif /* E1K_INT_STATS */
1269} E1KSTATE;
1270/** Pointer to the E1000 device state. */
1271typedef E1KSTATE *PE1KSTATE;
1272
1273/**
1274 * E1000 ring-3 device state
1275 *
1276 * @implements PDMINETWORKDOWN
1277 * @implements PDMINETWORKCONFIG
1278 * @implements PDMILEDPORTS
1279 */
1280typedef struct E1KSTATER3
1281{
1282 PDMIBASE IBase;
1283 PDMINETWORKDOWN INetworkDown;
1284 PDMINETWORKCONFIG INetworkConfig;
1285 /** LED interface */
1286 PDMILEDPORTS ILeds;
1287 /** Attached network driver. */
1288 R3PTRTYPE(PPDMIBASE) pDrvBase;
1289 R3PTRTYPE(PPDMILEDCONNECTORS) pLedsConnector;
1290
1291 /** Pointer to the shared state. */
1292 R3PTRTYPE(PE1KSTATE) pShared;
1293
1294 /** Device instance. */
1295 PPDMDEVINSR3 pDevInsR3;
1296 /** Attached network driver. */
1297 PPDMINETWORKUPR3 pDrvR3;
1298 /** The scatter / gather buffer used for the current outgoing packet. */
1299 R3PTRTYPE(PPDMSCATTERGATHER) pTxSgR3;
1300
1301 /** EMT: EEPROM emulation */
1302 E1kEEPROM eeprom;
1303} E1KSTATER3;
1304/** Pointer to the E1000 ring-3 device state. */
1305typedef E1KSTATER3 *PE1KSTATER3;
1306
1307
1308/**
1309 * E1000 ring-0 device state
1310 */
1311typedef struct E1KSTATER0
1312{
1313 /** Device instance. */
1314 PPDMDEVINSR0 pDevInsR0;
1315 /** Attached network driver. */
1316 PPDMINETWORKUPR0 pDrvR0;
1317 /** The scatter / gather buffer used for the current outgoing packet - R0. */
1318 R0PTRTYPE(PPDMSCATTERGATHER) pTxSgR0;
1319} E1KSTATER0;
1320/** Pointer to the E1000 ring-0 device state. */
1321typedef E1KSTATER0 *PE1KSTATER0;
1322
1323
1324/**
1325 * E1000 raw-mode device state
1326 */
1327typedef struct E1KSTATERC
1328{
1329 /** Device instance. */
1330 PPDMDEVINSRC pDevInsRC;
1331 /** Attached network driver. */
1332 PPDMINETWORKUPRC pDrvRC;
1333 /** The scatter / gather buffer used for the current outgoing packet. */
1334 RCPTRTYPE(PPDMSCATTERGATHER) pTxSgRC;
1335} E1KSTATERC;
1336/** Pointer to the E1000 raw-mode device state. */
1337typedef E1KSTATERC *PE1KSTATERC;
1338
1339
1340/** @def PE1KSTATECC
1341 * Pointer to the instance data for the current context. */
1342#ifdef IN_RING3
1343typedef E1KSTATER3 E1KSTATECC;
1344typedef PE1KSTATER3 PE1KSTATECC;
1345#elif defined(IN_RING0)
1346typedef E1KSTATER0 E1KSTATECC;
1347typedef PE1KSTATER0 PE1KSTATECC;
1348#elif defined(IN_RC)
1349typedef E1KSTATERC E1KSTATECC;
1350typedef PE1KSTATERC PE1KSTATECC;
1351#else
1352# error "Not IN_RING3, IN_RING0 or IN_RC"
1353#endif
1354
1355
1356#ifndef VBOX_DEVICE_STRUCT_TESTCASE
1357
1358/* Forward declarations ******************************************************/
1359static int e1kXmitPending(PPDMDEVINS pDevIns, PE1KSTATE pThis, bool fOnWorkerThread);
1360
1361/**
1362 * E1000 register read handler.
1363 */
1364typedef int (FNE1KREGREAD)(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1365/**
1366 * E1000 register write handler.
1367 */
1368typedef int (FNE1KREGWRITE)(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1369
1370static FNE1KREGREAD e1kRegReadUnimplemented;
1371static FNE1KREGWRITE e1kRegWriteUnimplemented;
1372static FNE1KREGREAD e1kRegReadAutoClear;
1373static FNE1KREGREAD e1kRegReadDefault;
1374static FNE1KREGWRITE e1kRegWriteDefault;
1375#if 0 /* unused */
1376static FNE1KREGREAD e1kRegReadCTRL;
1377#endif
1378static FNE1KREGWRITE e1kRegWriteCTRL;
1379static FNE1KREGREAD e1kRegReadEECD;
1380static FNE1KREGWRITE e1kRegWriteEECD;
1381static FNE1KREGWRITE e1kRegWriteEERD;
1382static FNE1KREGWRITE e1kRegWriteMDIC;
1383static FNE1KREGREAD e1kRegReadICR;
1384static FNE1KREGWRITE e1kRegWriteICR;
1385static FNE1KREGWRITE e1kRegWriteICS;
1386static FNE1KREGWRITE e1kRegWriteIMS;
1387static FNE1KREGWRITE e1kRegWriteIMC;
1388static FNE1KREGWRITE e1kRegWriteRCTL;
1389static FNE1KREGWRITE e1kRegWritePBA;
1390static FNE1KREGWRITE e1kRegWriteRDT;
1391static FNE1KREGWRITE e1kRegWriteRDTR;
1392static FNE1KREGWRITE e1kRegWriteTDT;
1393static FNE1KREGREAD e1kRegReadMTA;
1394static FNE1KREGWRITE e1kRegWriteMTA;
1395static FNE1KREGREAD e1kRegReadRA;
1396static FNE1KREGWRITE e1kRegWriteRA;
1397static FNE1KREGREAD e1kRegReadVFTA;
1398static FNE1KREGWRITE e1kRegWriteVFTA;
1399
1400/**
1401 * Register map table.
1402 *
1403 * Override pfnRead and pfnWrite to get register-specific behavior.
1404 */
1405static const struct E1kRegMap_st
1406{
1407 /** Register offset in the register space. */
1408 uint32_t offset;
1409 /** Size in bytes. Registers of size > 4 are in fact tables. */
1410 uint32_t size;
1411 /** Readable bits. */
1412 uint32_t readable;
1413 /** Writable bits. */
1414 uint32_t writable;
1415 /** Read callback. */
1416 FNE1KREGREAD *pfnRead;
1417 /** Write callback. */
1418 FNE1KREGWRITE *pfnWrite;
1419 /** Abbreviated name. */
1420 const char *abbrev;
1421 /** Full name. */
1422 const char *name;
1423} g_aE1kRegMap[E1K_NUM_OF_REGS] =
1424{
1425 /* offset size read mask write mask read callback write callback abbrev full name */
1426 /*------- ------- ---------- ---------- ----------------------- ------------------------ ---------- ------------------------------*/
1427 { 0x00000, 0x00004, 0xDBF31BE9, 0xDBF31BE9, e1kRegReadDefault , e1kRegWriteCTRL , "CTRL" , "Device Control" },
1428 { 0x00008, 0x00004, 0x0000FDFF, 0x00000000, e1kRegReadDefault , e1kRegWriteUnimplemented, "STATUS" , "Device Status" },
1429 { 0x00010, 0x00004, 0x000027F0, 0x00000070, e1kRegReadEECD , e1kRegWriteEECD , "EECD" , "EEPROM/Flash Control/Data" },
1430 { 0x00014, 0x00004, 0xFFFFFF10, 0xFFFFFF00, e1kRegReadDefault , e1kRegWriteEERD , "EERD" , "EEPROM Read" },
1431 { 0x00018, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "CTRL_EXT", "Extended Device Control" },
1432 { 0x0001c, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FLA" , "Flash Access (N/A)" },
1433 { 0x00020, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteMDIC , "MDIC" , "MDI Control" },
1434 { 0x00028, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCAL" , "Flow Control Address Low" },
1435 { 0x0002c, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCAH" , "Flow Control Address High" },
1436 { 0x00030, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCT" , "Flow Control Type" },
1437 { 0x00038, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "VET" , "VLAN EtherType" },
1438 { 0x000c0, 0x00004, 0x0001F6DF, 0x0001F6DF, e1kRegReadICR , e1kRegWriteICR , "ICR" , "Interrupt Cause Read" },
1439 { 0x000c4, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "ITR" , "Interrupt Throttling" },
1440 { 0x000c8, 0x00004, 0x00000000, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteICS , "ICS" , "Interrupt Cause Set" },
1441 { 0x000d0, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteIMS , "IMS" , "Interrupt Mask Set/Read" },
1442 { 0x000d8, 0x00004, 0x00000000, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteIMC , "IMC" , "Interrupt Mask Clear" },
1443 { 0x00100, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteRCTL , "RCTL" , "Receive Control" },
1444 { 0x00170, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCTTV" , "Flow Control Transmit Timer Value" },
1445 { 0x00178, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TXCW" , "Transmit Configuration Word (N/A)" },
1446 { 0x00180, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RXCW" , "Receive Configuration Word (N/A)" },
1447 { 0x00400, 0x00004, 0x017FFFFA, 0x017FFFFA, e1kRegReadDefault , e1kRegWriteDefault , "TCTL" , "Transmit Control" },
1448 { 0x00410, 0x00004, 0x3FFFFFFF, 0x3FFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TIPG" , "Transmit IPG" },
1449 { 0x00458, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "AIFS" , "Adaptive IFS Throttle - AIT" },
1450 { 0x00e00, 0x00004, 0xCFCFCFCF, 0xCFCFCFCF, e1kRegReadDefault , e1kRegWriteDefault , "LEDCTL" , "LED Control" },
1451 { 0x01000, 0x00004, 0xFFFF007F, 0x0000007F, e1kRegReadDefault , e1kRegWritePBA , "PBA" , "Packet Buffer Allocation" },
1452 { 0x02160, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCRTL" , "Flow Control Receive Threshold Low" },
1453 { 0x02168, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCRTH" , "Flow Control Receive Threshold High" },
1454 { 0x02410, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFH" , "Receive Data FIFO Head" },
1455 { 0x02418, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFT" , "Receive Data FIFO Tail" },
1456 { 0x02420, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFHS" , "Receive Data FIFO Head Saved Register" },
1457 { 0x02428, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFTS" , "Receive Data FIFO Tail Saved Register" },
1458 { 0x02430, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFPC" , "Receive Data FIFO Packet Count" },
1459 { 0x02800, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDBAL" , "Receive Descriptor Base Low" },
1460 { 0x02804, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDBAH" , "Receive Descriptor Base High" },
1461 { 0x02808, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDLEN" , "Receive Descriptor Length" },
1462 { 0x02810, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDH" , "Receive Descriptor Head" },
1463 { 0x02818, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteRDT , "RDT" , "Receive Descriptor Tail" },
1464 { 0x02820, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteRDTR , "RDTR" , "Receive Delay Timer" },
1465 { 0x02828, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RXDCTL" , "Receive Descriptor Control" },
1466 { 0x0282c, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "RADV" , "Receive Interrupt Absolute Delay Timer" },
1467 { 0x02c00, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RSRPD" , "Receive Small Packet Detect Interrupt" },
1468 { 0x03000, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TXDMAC" , "TX DMA Control (N/A)" },
1469 { 0x03410, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFH" , "Transmit Data FIFO Head" },
1470 { 0x03418, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFT" , "Transmit Data FIFO Tail" },
1471 { 0x03420, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFHS" , "Transmit Data FIFO Head Saved Register" },
1472 { 0x03428, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFTS" , "Transmit Data FIFO Tail Saved Register" },
1473 { 0x03430, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFPC" , "Transmit Data FIFO Packet Count" },
1474 { 0x03800, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDBAL" , "Transmit Descriptor Base Low" },
1475 { 0x03804, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDBAH" , "Transmit Descriptor Base High" },
1476 { 0x03808, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDLEN" , "Transmit Descriptor Length" },
1477 { 0x03810, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDH" , "Transmit Descriptor Head" },
1478 { 0x03818, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteTDT , "TDT" , "Transmit Descriptor Tail" },
1479 { 0x03820, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "TIDV" , "Transmit Interrupt Delay Value" },
1480 { 0x03828, 0x00004, 0xFF3F3F3F, 0xFF3F3F3F, e1kRegReadDefault , e1kRegWriteDefault , "TXDCTL" , "Transmit Descriptor Control" },
1481 { 0x0382c, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "TADV" , "Transmit Absolute Interrupt Delay Timer" },
1482 { 0x03830, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TSPMT" , "TCP Segmentation Pad and Threshold" },
1483 { 0x04000, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "CRCERRS" , "CRC Error Count" },
1484 { 0x04004, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "ALGNERRC", "Alignment Error Count" },
1485 { 0x04008, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "SYMERRS" , "Symbol Error Count" },
1486 { 0x0400c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RXERRC" , "RX Error Count" },
1487 { 0x04010, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MPC" , "Missed Packets Count" },
1488 { 0x04014, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "SCC" , "Single Collision Count" },
1489 { 0x04018, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "ECOL" , "Excessive Collisions Count" },
1490 { 0x0401c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MCC" , "Multiple Collision Count" },
1491 { 0x04020, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "LATECOL" , "Late Collisions Count" },
1492 { 0x04028, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "COLC" , "Collision Count" },
1493 { 0x04030, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "DC" , "Defer Count" },
1494 { 0x04034, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TNCRS" , "Transmit - No CRS" },
1495 { 0x04038, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "SEC" , "Sequence Error Count" },
1496 { 0x0403c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "CEXTERR" , "Carrier Extension Error Count" },
1497 { 0x04040, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RLEC" , "Receive Length Error Count" },
1498 { 0x04048, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XONRXC" , "XON Received Count" },
1499 { 0x0404c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XONTXC" , "XON Transmitted Count" },
1500 { 0x04050, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XOFFRXC" , "XOFF Received Count" },
1501 { 0x04054, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XOFFTXC" , "XOFF Transmitted Count" },
1502 { 0x04058, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCRUC" , "FC Received Unsupported Count" },
1503 { 0x0405c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC64" , "Packets Received (64 Bytes) Count" },
1504 { 0x04060, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC127" , "Packets Received (65-127 Bytes) Count" },
1505 { 0x04064, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC255" , "Packets Received (128-255 Bytes) Count" },
1506 { 0x04068, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC511" , "Packets Received (256-511 Bytes) Count" },
1507 { 0x0406c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC1023" , "Packets Received (512-1023 Bytes) Count" },
1508 { 0x04070, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC1522" , "Packets Received (1024-Max Bytes)" },
1509 { 0x04074, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GPRC" , "Good Packets Received Count" },
1510 { 0x04078, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "BPRC" , "Broadcast Packets Received Count" },
1511 { 0x0407c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "MPRC" , "Multicast Packets Received Count" },
1512 { 0x04080, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GPTC" , "Good Packets Transmitted Count" },
1513 { 0x04088, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GORCL" , "Good Octets Received Count (Low)" },
1514 { 0x0408c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GORCH" , "Good Octets Received Count (Hi)" },
1515 { 0x04090, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GOTCL" , "Good Octets Transmitted Count (Low)" },
1516 { 0x04094, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GOTCH" , "Good Octets Transmitted Count (Hi)" },
1517 { 0x040a0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RNBC" , "Receive No Buffers Count" },
1518 { 0x040a4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RUC" , "Receive Undersize Count" },
1519 { 0x040a8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RFC" , "Receive Fragment Count" },
1520 { 0x040ac, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "ROC" , "Receive Oversize Count" },
1521 { 0x040b0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RJC" , "Receive Jabber Count" },
1522 { 0x040b4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MGTPRC" , "Management Packets Received Count" },
1523 { 0x040b8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MGTPDC" , "Management Packets Dropped Count" },
1524 { 0x040bc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MGTPTC" , "Management Pkts Transmitted Count" },
1525 { 0x040c0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TORL" , "Total Octets Received (Lo)" },
1526 { 0x040c4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TORH" , "Total Octets Received (Hi)" },
1527 { 0x040c8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TOTL" , "Total Octets Transmitted (Lo)" },
1528 { 0x040cc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TOTH" , "Total Octets Transmitted (Hi)" },
1529 { 0x040d0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TPR" , "Total Packets Received" },
1530 { 0x040d4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TPT" , "Total Packets Transmitted" },
1531 { 0x040d8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC64" , "Packets Transmitted (64 Bytes) Count" },
1532 { 0x040dc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC127" , "Packets Transmitted (65-127 Bytes) Count" },
1533 { 0x040e0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC255" , "Packets Transmitted (128-255 Bytes) Count" },
1534 { 0x040e4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC511" , "Packets Transmitted (256-511 Bytes) Count" },
1535 { 0x040e8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC1023" , "Packets Transmitted (512-1023 Bytes) Count" },
1536 { 0x040ec, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC1522" , "Packets Transmitted (1024 Bytes or Greater) Count" },
1537 { 0x040f0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "MPTC" , "Multicast Packets Transmitted Count" },
1538 { 0x040f4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "BPTC" , "Broadcast Packets Transmitted Count" },
1539 { 0x040f8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TSCTC" , "TCP Segmentation Context Transmitted Count" },
1540 { 0x040fc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TSCTFC" , "TCP Segmentation Context Tx Fail Count" },
1541 { 0x05000, 0x00004, 0x000007FF, 0x000007FF, e1kRegReadDefault , e1kRegWriteDefault , "RXCSUM" , "Receive Checksum Control" },
1542 { 0x05800, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUC" , "Wakeup Control" },
1543 { 0x05808, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUFC" , "Wakeup Filter Control" },
1544 { 0x05810, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUS" , "Wakeup Status" },
1545 { 0x05820, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "MANC" , "Management Control" },
1546 { 0x05838, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "IPAV" , "IP Address Valid" },
1547 { 0x05900, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUPL" , "Wakeup Packet Length" },
1548 { 0x05200, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadMTA , e1kRegWriteMTA , "MTA" , "Multicast Table Array (n)" },
1549 { 0x05400, 0x00080, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadRA , e1kRegWriteRA , "RA" , "Receive Address (64-bit) (n)" },
1550 { 0x05600, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadVFTA , e1kRegWriteVFTA , "VFTA" , "VLAN Filter Table Array (n)" },
1551 { 0x05840, 0x0001c, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "IP4AT" , "IPv4 Address Table" },
1552 { 0x05880, 0x00010, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "IP6AT" , "IPv6 Address Table" },
1553 { 0x05a00, 0x00080, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUPM" , "Wakeup Packet Memory" },
1554 { 0x05f00, 0x0001c, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FFLT" , "Flexible Filter Length Table" },
1555 { 0x09000, 0x003fc, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FFMT" , "Flexible Filter Mask Table" },
1556 { 0x09800, 0x003fc, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FFVT" , "Flexible Filter Value Table" },
1557 { 0x10000, 0x10000, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "PBM" , "Packet Buffer Memory (n)" },
1558 { 0x00040, 0x00080, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadRA , e1kRegWriteRA , "RA82542" , "Receive Address (64-bit) (n) (82542)" },
1559 { 0x00200, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadMTA , e1kRegWriteMTA , "MTA82542", "Multicast Table Array (n) (82542)" },
1560 { 0x00600, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadVFTA , e1kRegWriteVFTA , "VFTA82542", "VLAN Filter Table Array (n) (82542)" }
1561};
1562
1563#ifdef LOG_ENABLED
1564
1565/**
1566 * Convert U32 value to hex string. Masked bytes are replaced with dots.
1567 *
1568 * @remarks The mask has half-byte byte (not bit) granularity (e.g. 0000000F).
1569 *
1570 * @returns The buffer.
1571 *
1572 * @param u32 The word to convert into string.
1573 * @param mask Selects which bytes to convert.
1574 * @param buf Where to put the result.
1575 */
1576static char *e1kU32toHex(uint32_t u32, uint32_t mask, char *buf)
1577{
1578 for (char *ptr = buf + 7; ptr >= buf; --ptr, u32 >>=4, mask >>=4)
1579 {
1580 if (mask & 0xF)
1581 *ptr = (u32 & 0xF) + ((u32 & 0xF) > 9 ? '7' : '0');
1582 else
1583 *ptr = '.';
1584 }
1585 buf[8] = 0;
1586 return buf;
1587}
1588
1589/**
1590 * Returns timer name for debug purposes.
1591 *
1592 * @returns The timer name.
1593 *
1594 * @param pThis The device state structure.
1595 * @param hTimer The timer to name.
1596 */
1597DECLINLINE(const char *) e1kGetTimerName(PE1KSTATE pThis, TMTIMERHANDLE hTimer)
1598{
1599 if (hTimer == pThis->hTIDTimer)
1600 return "TID";
1601 if (hTimer == pThis->hTADTimer)
1602 return "TAD";
1603 if (hTimer == pThis->hRIDTimer)
1604 return "RID";
1605 if (hTimer == pThis->hRADTimer)
1606 return "RAD";
1607 if (hTimer == pThis->hIntTimer)
1608 return "Int";
1609 if (hTimer == pThis->hTXDTimer)
1610 return "TXD";
1611 if (hTimer == pThis->hLUTimer)
1612 return "LinkUp";
1613 return "unknown";
1614}
1615
1616#endif /* LOG_ENABLED */
1617
1618/**
1619 * Arm a timer.
1620 *
1621 * @param pDevIns The device instance.
1622 * @param pThis Pointer to the device state structure.
1623 * @param hTimer The timer to arm.
1624 * @param uExpireIn Expiration interval in microseconds.
1625 */
1626DECLINLINE(void) e1kArmTimer(PPDMDEVINS pDevIns, PE1KSTATE pThis, TMTIMERHANDLE hTimer, uint32_t uExpireIn)
1627{
1628 if (pThis->fLocked)
1629 return;
1630
1631 E1kLog2(("%s Arming %s timer to fire in %d usec...\n",
1632 pThis->szPrf, e1kGetTimerName(pThis, hTimer), uExpireIn));
1633 int rc = PDMDevHlpTimerSetMicro(pDevIns, hTimer, uExpireIn);
1634 AssertRC(rc);
1635}
1636
1637#ifdef IN_RING3
1638/**
1639 * Cancel a timer.
1640 *
1641 * @param pDevIns The device instance.
1642 * @param pThis Pointer to the device state structure.
1643 * @param pTimer Pointer to the timer.
1644 */
1645DECLINLINE(void) e1kCancelTimer(PPDMDEVINS pDevIns, PE1KSTATE pThis, TMTIMERHANDLE hTimer)
1646{
1647 E1kLog2(("%s Stopping %s timer...\n",
1648 pThis->szPrf, e1kGetTimerName(pThis, hTimer)));
1649 int rc = PDMDevHlpTimerStop(pDevIns, hTimer);
1650 if (RT_FAILURE(rc))
1651 E1kLog2(("%s e1kCancelTimer: TMTimerStop(%s) failed with %Rrc\n",
1652 pThis->szPrf, e1kGetTimerName(pThis, hTimer), rc));
1653 RT_NOREF_PV(pThis);
1654}
1655#endif /* IN_RING3 */
1656
1657#define e1kCsEnter(ps, rc) PDMCritSectEnter(&ps->cs, rc)
1658#define e1kCsLeave(ps) PDMCritSectLeave(&ps->cs)
1659
1660#define e1kCsRxEnter(ps, rc) PDMCritSectEnter(&ps->csRx, rc)
1661#define e1kCsRxLeave(ps) PDMCritSectLeave(&ps->csRx)
1662#define e1kCsRxIsOwner(ps) PDMCritSectIsOwner(&ps->csRx)
1663
1664#ifndef E1K_WITH_TX_CS
1665# define e1kCsTxEnter(ps, rc) VINF_SUCCESS
1666# define e1kCsTxLeave(ps) do { } while (0)
1667#else /* E1K_WITH_TX_CS */
1668# define e1kCsTxEnter(ps, rc) PDMCritSectEnter(&ps->csTx, rc)
1669# define e1kCsTxLeave(ps) PDMCritSectLeave(&ps->csTx)
1670#endif /* E1K_WITH_TX_CS */
1671
1672
1673/**
1674 * Wakeup the RX thread.
1675 */
1676static void e1kWakeupReceive(PPDMDEVINS pDevIns, PE1KSTATE pThis)
1677{
1678 if ( pThis->fMaybeOutOfSpace
1679 && pThis->hEventMoreRxDescAvail != NIL_SUPSEMEVENT)
1680 {
1681 STAM_COUNTER_INC(&pThis->CTX_SUFF_Z(StatRxOverflowWakeup));
1682 E1kLog(("%s Waking up Out-of-RX-space semaphore\n", pThis->szPrf));
1683 int rc = PDMDevHlpSUPSemEventSignal(pDevIns, pThis->hEventMoreRxDescAvail);
1684 AssertRC(rc);
1685 }
1686}
1687
1688#ifdef IN_RING3
1689
1690/**
1691 * Hardware reset. Revert all registers to initial values.
1692 *
1693 * @param pDevIns The device instance.
1694 * @param pThis The device state structure.
1695 * @param pThisCC The current context instance data.
1696 */
1697static void e1kR3HardReset(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KSTATECC pThisCC)
1698{
1699 E1kLog(("%s Hard reset triggered\n", pThis->szPrf));
1700 /* No interrupts should survive device reset, see @bugref(9556). */
1701 if (pThis->fIntRaised)
1702 {
1703 /* Lower(0) INTA(0) */
1704 PDMDevHlpPCISetIrq(pDevIns, 0, 0);
1705 pThis->fIntRaised = false;
1706 E1kLog(("%s e1kR3HardReset: Lowered IRQ: ICR=%08x\n", pThis->szPrf, ICR));
1707 }
1708 memset(pThis->auRegs, 0, sizeof(pThis->auRegs));
1709 memset(pThis->aRecAddr.au32, 0, sizeof(pThis->aRecAddr.au32));
1710#ifdef E1K_INIT_RA0
1711 memcpy(pThis->aRecAddr.au32, pThis->macConfigured.au8,
1712 sizeof(pThis->macConfigured.au8));
1713 pThis->aRecAddr.array[0].ctl |= RA_CTL_AV;
1714#endif /* E1K_INIT_RA0 */
1715 STATUS = 0x0081; /* SPEED=10b (1000 Mb/s), FD=1b (Full Duplex) */
1716 EECD = 0x0100; /* EE_PRES=1b (EEPROM present) */
1717 CTRL = 0x0a09; /* FRCSPD=1b SPEED=10b LRST=1b FD=1b */
1718 TSPMT = 0x01000400;/* TSMT=0400h TSPBP=0100h */
1719 Assert(GET_BITS(RCTL, BSIZE) == 0);
1720 pThis->u16RxBSize = 2048;
1721
1722 uint16_t u16LedCtl = 0x0602; /* LED0/LINK_UP#, LED2/LINK100# */
1723 pThisCC->eeprom.readWord(0x2F, &u16LedCtl); /* Read LEDCTL defaults from EEPROM */
1724 LEDCTL = 0x07008300 | (((uint32_t)u16LedCtl & 0xCF00) << 8) | (u16LedCtl & 0xCF); /* Only LED0 and LED2 defaults come from EEPROM */
1725
1726 /* Reset promiscuous mode */
1727 if (pThisCC->pDrvR3)
1728 pThisCC->pDrvR3->pfnSetPromiscuousMode(pThisCC->pDrvR3, false);
1729
1730#ifdef E1K_WITH_TXD_CACHE
1731 int rc = e1kCsTxEnter(pThis, VERR_SEM_BUSY);
1732 if (RT_LIKELY(rc == VINF_SUCCESS))
1733 {
1734 pThis->nTxDFetched = 0;
1735 pThis->iTxDCurrent = 0;
1736 pThis->fGSO = false;
1737 pThis->cbTxAlloc = 0;
1738 e1kCsTxLeave(pThis);
1739 }
1740#endif /* E1K_WITH_TXD_CACHE */
1741#ifdef E1K_WITH_RXD_CACHE
1742 if (RT_LIKELY(e1kCsRxEnter(pThis, VERR_SEM_BUSY) == VINF_SUCCESS))
1743 {
1744 pThis->iRxDCurrent = pThis->nRxDFetched = 0;
1745 e1kCsRxLeave(pThis);
1746 }
1747#endif /* E1K_WITH_RXD_CACHE */
1748#ifdef E1K_LSC_ON_RESET
1749 E1kLog(("%s Will trigger LSC in %d seconds...\n",
1750 pThis->szPrf, pThis->cMsLinkUpDelay / 1000));
1751 e1kArmTimer(pDevIns, pThis, pThis->hLUTimer, pThis->cMsLinkUpDelay * 1000);
1752#endif /* E1K_LSC_ON_RESET */
1753}
1754
1755#endif /* IN_RING3 */
1756
1757/**
1758 * Compute Internet checksum.
1759 *
1760 * @remarks Refer to http://www.netfor2.com/checksum.html for short intro.
1761 *
1762 * @param pThis The device state structure.
1763 * @param cpPacket The packet.
1764 * @param cb The size of the packet.
1765 * @param pszText A string denoting direction of packet transfer.
1766 *
1767 * @return The 1's complement of the 1's complement sum.
1768 *
1769 * @thread E1000_TX
1770 */
1771static uint16_t e1kCSum16(const void *pvBuf, size_t cb)
1772{
1773 uint32_t csum = 0;
1774 uint16_t *pu16 = (uint16_t *)pvBuf;
1775
1776 while (cb > 1)
1777 {
1778 csum += *pu16++;
1779 cb -= 2;
1780 }
1781 if (cb)
1782 csum += *(uint8_t*)pu16;
1783 while (csum >> 16)
1784 csum = (csum >> 16) + (csum & 0xFFFF);
1785 return ~csum;
1786}
1787
1788/**
1789 * Dump a packet to debug log.
1790 *
1791 * @param pThis The device state structure.
1792 * @param cpPacket The packet.
1793 * @param cb The size of the packet.
1794 * @param pszText A string denoting direction of packet transfer.
1795 * @thread E1000_TX
1796 */
1797DECLINLINE(void) e1kPacketDump(PE1KSTATE pThis, const uint8_t *cpPacket, size_t cb, const char *pszText)
1798{
1799#ifdef DEBUG
1800 if (RT_LIKELY(e1kCsEnter(pThis, VERR_SEM_BUSY) == VINF_SUCCESS))
1801 {
1802 Log4(("%s --- %s packet #%d: %RTmac => %RTmac (%d bytes) ---\n",
1803 pThis->szPrf, pszText, ++pThis->u32PktNo, cpPacket+6, cpPacket, cb));
1804 if (ntohs(*(uint16_t*)(cpPacket+12)) == 0x86DD)
1805 {
1806 Log4(("%s --- IPv6: %RTnaipv6 => %RTnaipv6\n",
1807 pThis->szPrf, cpPacket+14+8, cpPacket+14+24));
1808 if (*(cpPacket+14+6) == 0x6)
1809 Log4(("%s --- TCP: seq=%x ack=%x\n", pThis->szPrf,
1810 ntohl(*(uint32_t*)(cpPacket+14+40+4)), ntohl(*(uint32_t*)(cpPacket+14+40+8))));
1811 }
1812 else if (ntohs(*(uint16_t*)(cpPacket+12)) == 0x800)
1813 {
1814 Log4(("%s --- IPv4: %RTnaipv4 => %RTnaipv4\n",
1815 pThis->szPrf, *(uint32_t*)(cpPacket+14+12), *(uint32_t*)(cpPacket+14+16)));
1816 if (*(cpPacket+14+6) == 0x6)
1817 Log4(("%s --- TCP: seq=%x ack=%x\n", pThis->szPrf,
1818 ntohl(*(uint32_t*)(cpPacket+14+20+4)), ntohl(*(uint32_t*)(cpPacket+14+20+8))));
1819 }
1820 E1kLog3(("%.*Rhxd\n", cb, cpPacket));
1821 e1kCsLeave(pThis);
1822 }
1823#else
1824 if (RT_LIKELY(e1kCsEnter(pThis, VERR_SEM_BUSY) == VINF_SUCCESS))
1825 {
1826 if (ntohs(*(uint16_t*)(cpPacket+12)) == 0x86DD)
1827 E1kLogRel(("E1000: %s packet #%d, %RTmac => %RTmac, %RTnaipv6 => %RTnaipv6, seq=%x ack=%x\n",
1828 pszText, ++pThis->u32PktNo, cpPacket+6, cpPacket, cpPacket+14+8, cpPacket+14+24,
1829 ntohl(*(uint32_t*)(cpPacket+14+40+4)), ntohl(*(uint32_t*)(cpPacket+14+40+8))));
1830 else
1831 E1kLogRel(("E1000: %s packet #%d, %RTmac => %RTmac, %RTnaipv4 => %RTnaipv4, seq=%x ack=%x\n",
1832 pszText, ++pThis->u32PktNo, cpPacket+6, cpPacket,
1833 *(uint32_t*)(cpPacket+14+12), *(uint32_t*)(cpPacket+14+16),
1834 ntohl(*(uint32_t*)(cpPacket+14+20+4)), ntohl(*(uint32_t*)(cpPacket+14+20+8))));
1835 e1kCsLeave(pThis);
1836 }
1837 RT_NOREF2(cb, pszText);
1838#endif
1839}
1840
1841/**
1842 * Determine the type of transmit descriptor.
1843 *
1844 * @returns Descriptor type. See E1K_DTYP_XXX defines.
1845 *
1846 * @param pDesc Pointer to descriptor union.
1847 * @thread E1000_TX
1848 */
1849DECLINLINE(int) e1kGetDescType(E1KTXDESC *pDesc)
1850{
1851 if (pDesc->legacy.cmd.fDEXT)
1852 return pDesc->context.dw2.u4DTYP;
1853 return E1K_DTYP_LEGACY;
1854}
1855
1856
1857#ifdef E1K_WITH_RXD_CACHE
1858/**
1859 * Return the number of RX descriptor that belong to the hardware.
1860 *
1861 * @returns the number of available descriptors in RX ring.
1862 * @param pThis The device state structure.
1863 * @thread ???
1864 */
1865DECLINLINE(uint32_t) e1kGetRxLen(PE1KSTATE pThis)
1866{
1867 /**
1868 * Make sure RDT won't change during computation. EMT may modify RDT at
1869 * any moment.
1870 */
1871 uint32_t rdt = RDT;
1872 return (RDH > rdt ? RDLEN/sizeof(E1KRXDESC) : 0) + rdt - RDH;
1873}
1874
1875DECLINLINE(unsigned) e1kRxDInCache(PE1KSTATE pThis)
1876{
1877 return pThis->nRxDFetched > pThis->iRxDCurrent ?
1878 pThis->nRxDFetched - pThis->iRxDCurrent : 0;
1879}
1880
1881DECLINLINE(unsigned) e1kRxDIsCacheEmpty(PE1KSTATE pThis)
1882{
1883 return pThis->iRxDCurrent >= pThis->nRxDFetched;
1884}
1885
1886/**
1887 * Load receive descriptors from guest memory. The caller needs to be in Rx
1888 * critical section.
1889 *
1890 * We need two physical reads in case the tail wrapped around the end of RX
1891 * descriptor ring.
1892 *
1893 * @returns the actual number of descriptors fetched.
1894 * @param pDevIns The device instance.
1895 * @param pThis The device state structure.
1896 * @thread EMT, RX
1897 */
1898DECLINLINE(unsigned) e1kRxDPrefetch(PPDMDEVINS pDevIns, PE1KSTATE pThis)
1899{
1900 /* We've already loaded pThis->nRxDFetched descriptors past RDH. */
1901 unsigned nDescsAvailable = e1kGetRxLen(pThis) - e1kRxDInCache(pThis);
1902 unsigned nDescsToFetch = RT_MIN(nDescsAvailable, E1K_RXD_CACHE_SIZE - pThis->nRxDFetched);
1903 unsigned nDescsTotal = RDLEN / sizeof(E1KRXDESC);
1904 Assert(nDescsTotal != 0);
1905 if (nDescsTotal == 0)
1906 return 0;
1907 unsigned nFirstNotLoaded = (RDH + e1kRxDInCache(pThis)) % nDescsTotal;
1908 unsigned nDescsInSingleRead = RT_MIN(nDescsToFetch, nDescsTotal - nFirstNotLoaded);
1909 E1kLog3(("%s e1kRxDPrefetch: nDescsAvailable=%u nDescsToFetch=%u "
1910 "nDescsTotal=%u nFirstNotLoaded=0x%x nDescsInSingleRead=%u\n",
1911 pThis->szPrf, nDescsAvailable, nDescsToFetch, nDescsTotal,
1912 nFirstNotLoaded, nDescsInSingleRead));
1913 if (nDescsToFetch == 0)
1914 return 0;
1915 E1KRXDESC* pFirstEmptyDesc = &pThis->aRxDescriptors[pThis->nRxDFetched];
1916 PDMDevHlpPhysRead(pDevIns,
1917 ((uint64_t)RDBAH << 32) + RDBAL + nFirstNotLoaded * sizeof(E1KRXDESC),
1918 pFirstEmptyDesc, nDescsInSingleRead * sizeof(E1KRXDESC));
1919 // uint64_t addrBase = ((uint64_t)RDBAH << 32) + RDBAL;
1920 // unsigned i, j;
1921 // for (i = pThis->nRxDFetched; i < pThis->nRxDFetched + nDescsInSingleRead; ++i)
1922 // {
1923 // pThis->aRxDescAddr[i] = addrBase + (nFirstNotLoaded + i - pThis->nRxDFetched) * sizeof(E1KRXDESC);
1924 // E1kLog3(("%s aRxDescAddr[%d] = %p\n", pThis->szPrf, i, pThis->aRxDescAddr[i]));
1925 // }
1926 E1kLog3(("%s Fetched %u RX descriptors at %08x%08x(0x%x), RDLEN=%08x, RDH=%08x, RDT=%08x\n",
1927 pThis->szPrf, nDescsInSingleRead,
1928 RDBAH, RDBAL + RDH * sizeof(E1KRXDESC),
1929 nFirstNotLoaded, RDLEN, RDH, RDT));
1930 if (nDescsToFetch > nDescsInSingleRead)
1931 {
1932 PDMDevHlpPhysRead(pDevIns,
1933 ((uint64_t)RDBAH << 32) + RDBAL,
1934 pFirstEmptyDesc + nDescsInSingleRead,
1935 (nDescsToFetch - nDescsInSingleRead) * sizeof(E1KRXDESC));
1936 // Assert(i == pThis->nRxDFetched + nDescsInSingleRead);
1937 // for (j = 0; i < pThis->nRxDFetched + nDescsToFetch; ++i, ++j)
1938 // {
1939 // pThis->aRxDescAddr[i] = addrBase + j * sizeof(E1KRXDESC);
1940 // E1kLog3(("%s aRxDescAddr[%d] = %p\n", pThis->szPrf, i, pThis->aRxDescAddr[i]));
1941 // }
1942 E1kLog3(("%s Fetched %u RX descriptors at %08x%08x\n",
1943 pThis->szPrf, nDescsToFetch - nDescsInSingleRead,
1944 RDBAH, RDBAL));
1945 }
1946 pThis->nRxDFetched += nDescsToFetch;
1947 return nDescsToFetch;
1948}
1949
1950# ifdef IN_RING3 /* currently only used in ring-3 due to stack space requirements of the caller */
1951/**
1952 * Dump receive descriptor to debug log.
1953 *
1954 * @param pThis The device state structure.
1955 * @param pDesc Pointer to the descriptor.
1956 * @thread E1000_RX
1957 */
1958static void e1kPrintRDesc(PE1KSTATE pThis, E1KRXDESC *pDesc)
1959{
1960 RT_NOREF2(pThis, pDesc);
1961 E1kLog2(("%s <-- Receive Descriptor (%d bytes):\n", pThis->szPrf, pDesc->u16Length));
1962 E1kLog2((" Address=%16LX Length=%04X Csum=%04X\n",
1963 pDesc->u64BufAddr, pDesc->u16Length, pDesc->u16Checksum));
1964 E1kLog2((" STA: %s %s %s %s %s %s %s ERR: %s %s %s %s SPECIAL: %s VLAN=%03x PRI=%x\n",
1965 pDesc->status.fPIF ? "PIF" : "pif",
1966 pDesc->status.fIPCS ? "IPCS" : "ipcs",
1967 pDesc->status.fTCPCS ? "TCPCS" : "tcpcs",
1968 pDesc->status.fVP ? "VP" : "vp",
1969 pDesc->status.fIXSM ? "IXSM" : "ixsm",
1970 pDesc->status.fEOP ? "EOP" : "eop",
1971 pDesc->status.fDD ? "DD" : "dd",
1972 pDesc->status.fRXE ? "RXE" : "rxe",
1973 pDesc->status.fIPE ? "IPE" : "ipe",
1974 pDesc->status.fTCPE ? "TCPE" : "tcpe",
1975 pDesc->status.fCE ? "CE" : "ce",
1976 E1K_SPEC_CFI(pDesc->status.u16Special) ? "CFI" :"cfi",
1977 E1K_SPEC_VLAN(pDesc->status.u16Special),
1978 E1K_SPEC_PRI(pDesc->status.u16Special)));
1979}
1980# endif /* IN_RING3 */
1981#endif /* E1K_WITH_RXD_CACHE */
1982
1983/**
1984 * Dump transmit descriptor to debug log.
1985 *
1986 * @param pThis The device state structure.
1987 * @param pDesc Pointer to descriptor union.
1988 * @param pszDir A string denoting direction of descriptor transfer
1989 * @thread E1000_TX
1990 */
1991static void e1kPrintTDesc(PE1KSTATE pThis, E1KTXDESC *pDesc, const char *pszDir,
1992 unsigned uLevel = RTLOGGRPFLAGS_LEVEL_2)
1993{
1994 RT_NOREF4(pThis, pDesc, pszDir, uLevel);
1995
1996 /*
1997 * Unfortunately we cannot use our format handler here, we want R0 logging
1998 * as well.
1999 */
2000 switch (e1kGetDescType(pDesc))
2001 {
2002 case E1K_DTYP_CONTEXT:
2003 E1kLogX(uLevel, ("%s %s Context Transmit Descriptor %s\n",
2004 pThis->szPrf, pszDir, pszDir));
2005 E1kLogX(uLevel, (" IPCSS=%02X IPCSO=%02X IPCSE=%04X TUCSS=%02X TUCSO=%02X TUCSE=%04X\n",
2006 pDesc->context.ip.u8CSS, pDesc->context.ip.u8CSO, pDesc->context.ip.u16CSE,
2007 pDesc->context.tu.u8CSS, pDesc->context.tu.u8CSO, pDesc->context.tu.u16CSE));
2008 E1kLogX(uLevel, (" TUCMD:%s%s%s %s %s PAYLEN=%04x HDRLEN=%04x MSS=%04x STA: %s\n",
2009 pDesc->context.dw2.fIDE ? " IDE":"",
2010 pDesc->context.dw2.fRS ? " RS" :"",
2011 pDesc->context.dw2.fTSE ? " TSE":"",
2012 pDesc->context.dw2.fIP ? "IPv4":"IPv6",
2013 pDesc->context.dw2.fTCP ? "TCP":"UDP",
2014 pDesc->context.dw2.u20PAYLEN,
2015 pDesc->context.dw3.u8HDRLEN,
2016 pDesc->context.dw3.u16MSS,
2017 pDesc->context.dw3.fDD?"DD":""));
2018 break;
2019 case E1K_DTYP_DATA:
2020 E1kLogX(uLevel, ("%s %s Data Transmit Descriptor (%d bytes) %s\n",
2021 pThis->szPrf, pszDir, pDesc->data.cmd.u20DTALEN, pszDir));
2022 E1kLogX(uLevel, (" Address=%16LX DTALEN=%05X\n",
2023 pDesc->data.u64BufAddr,
2024 pDesc->data.cmd.u20DTALEN));
2025 E1kLogX(uLevel, (" DCMD:%s%s%s%s%s%s%s STA:%s%s%s POPTS:%s%s SPECIAL:%s VLAN=%03x PRI=%x\n",
2026 pDesc->data.cmd.fIDE ? " IDE" :"",
2027 pDesc->data.cmd.fVLE ? " VLE" :"",
2028 pDesc->data.cmd.fRPS ? " RPS" :"",
2029 pDesc->data.cmd.fRS ? " RS" :"",
2030 pDesc->data.cmd.fTSE ? " TSE" :"",
2031 pDesc->data.cmd.fIFCS? " IFCS":"",
2032 pDesc->data.cmd.fEOP ? " EOP" :"",
2033 pDesc->data.dw3.fDD ? " DD" :"",
2034 pDesc->data.dw3.fEC ? " EC" :"",
2035 pDesc->data.dw3.fLC ? " LC" :"",
2036 pDesc->data.dw3.fTXSM? " TXSM":"",
2037 pDesc->data.dw3.fIXSM? " IXSM":"",
2038 E1K_SPEC_CFI(pDesc->data.dw3.u16Special) ? "CFI" :"cfi",
2039 E1K_SPEC_VLAN(pDesc->data.dw3.u16Special),
2040 E1K_SPEC_PRI(pDesc->data.dw3.u16Special)));
2041 break;
2042 case E1K_DTYP_LEGACY:
2043 E1kLogX(uLevel, ("%s %s Legacy Transmit Descriptor (%d bytes) %s\n",
2044 pThis->szPrf, pszDir, pDesc->legacy.cmd.u16Length, pszDir));
2045 E1kLogX(uLevel, (" Address=%16LX DTALEN=%05X\n",
2046 pDesc->data.u64BufAddr,
2047 pDesc->legacy.cmd.u16Length));
2048 E1kLogX(uLevel, (" CMD:%s%s%s%s%s%s%s STA:%s%s%s CSO=%02x CSS=%02x SPECIAL:%s VLAN=%03x PRI=%x\n",
2049 pDesc->legacy.cmd.fIDE ? " IDE" :"",
2050 pDesc->legacy.cmd.fVLE ? " VLE" :"",
2051 pDesc->legacy.cmd.fRPS ? " RPS" :"",
2052 pDesc->legacy.cmd.fRS ? " RS" :"",
2053 pDesc->legacy.cmd.fIC ? " IC" :"",
2054 pDesc->legacy.cmd.fIFCS? " IFCS":"",
2055 pDesc->legacy.cmd.fEOP ? " EOP" :"",
2056 pDesc->legacy.dw3.fDD ? " DD" :"",
2057 pDesc->legacy.dw3.fEC ? " EC" :"",
2058 pDesc->legacy.dw3.fLC ? " LC" :"",
2059 pDesc->legacy.cmd.u8CSO,
2060 pDesc->legacy.dw3.u8CSS,
2061 E1K_SPEC_CFI(pDesc->legacy.dw3.u16Special) ? "CFI" :"cfi",
2062 E1K_SPEC_VLAN(pDesc->legacy.dw3.u16Special),
2063 E1K_SPEC_PRI(pDesc->legacy.dw3.u16Special)));
2064 break;
2065 default:
2066 E1kLog(("%s %s Invalid Transmit Descriptor %s\n",
2067 pThis->szPrf, pszDir, pszDir));
2068 break;
2069 }
2070}
2071
2072/**
2073 * Raise an interrupt later.
2074 *
2075 * @param pThis The device state structure.
2076 */
2077DECLINLINE(void) e1kPostponeInterrupt(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint64_t nsDeadline)
2078{
2079 if (!PDMDevHlpTimerIsActive(pDevIns, pThis->hIntTimer))
2080 PDMDevHlpTimerSetNano(pDevIns, pThis->hIntTimer, nsDeadline);
2081}
2082
2083/**
2084 * Raise interrupt if not masked.
2085 *
2086 * @param pThis The device state structure.
2087 */
2088static int e1kRaiseInterrupt(PPDMDEVINS pDevIns, PE1KSTATE pThis, int rcBusy, uint32_t u32IntCause = 0)
2089{
2090 int rc = e1kCsEnter(pThis, rcBusy);
2091 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2092 return rc;
2093
2094 E1K_INC_ISTAT_CNT(pThis->uStatIntTry);
2095 ICR |= u32IntCause;
2096 if (ICR & IMS)
2097 {
2098 if (pThis->fIntRaised)
2099 {
2100 E1K_INC_ISTAT_CNT(pThis->uStatIntSkip);
2101 E1kLog2(("%s e1kRaiseInterrupt: Already raised, skipped. ICR&IMS=%08x\n",
2102 pThis->szPrf, ICR & IMS));
2103 }
2104 else
2105 {
2106 uint64_t tsNow = PDMDevHlpTimerGet(pDevIns, pThis->hIntTimer);
2107 if (!!ITR && tsNow - pThis->u64AckedAt < ITR * 256
2108 && pThis->fItrEnabled && (pThis->fItrRxEnabled || !(ICR & ICR_RXT0)))
2109 {
2110 E1K_INC_ISTAT_CNT(pThis->uStatIntEarly);
2111 E1kLog2(("%s e1kRaiseInterrupt: Too early to raise again: %d ns < %d ns.\n",
2112 pThis->szPrf, (uint32_t)(tsNow - pThis->u64AckedAt), ITR * 256));
2113 e1kPostponeInterrupt(pDevIns, pThis, ITR * 256);
2114 }
2115 else
2116 {
2117
2118 /* Since we are delivering the interrupt now
2119 * there is no need to do it later -- stop the timer.
2120 */
2121 PDMDevHlpTimerStop(pDevIns, pThis->hIntTimer);
2122 E1K_INC_ISTAT_CNT(pThis->uStatInt);
2123 STAM_COUNTER_INC(&pThis->StatIntsRaised);
2124 /* Got at least one unmasked interrupt cause */
2125 pThis->fIntRaised = true;
2126 /* Raise(1) INTA(0) */
2127 E1kLogRel(("E1000: irq RAISED icr&mask=0x%x, icr=0x%x\n", ICR & IMS, ICR));
2128 PDMDevHlpPCISetIrq(pDevIns, 0, 1);
2129 E1kLog(("%s e1kRaiseInterrupt: Raised. ICR&IMS=%08x\n",
2130 pThis->szPrf, ICR & IMS));
2131 }
2132 }
2133 }
2134 else
2135 {
2136 E1K_INC_ISTAT_CNT(pThis->uStatIntMasked);
2137 E1kLog2(("%s e1kRaiseInterrupt: Not raising, ICR=%08x, IMS=%08x\n",
2138 pThis->szPrf, ICR, IMS));
2139 }
2140 e1kCsLeave(pThis);
2141 return VINF_SUCCESS;
2142}
2143
2144/**
2145 * Compute the physical address of the descriptor.
2146 *
2147 * @returns the physical address of the descriptor.
2148 *
2149 * @param baseHigh High-order 32 bits of descriptor table address.
2150 * @param baseLow Low-order 32 bits of descriptor table address.
2151 * @param idxDesc The descriptor index in the table.
2152 */
2153DECLINLINE(RTGCPHYS) e1kDescAddr(uint32_t baseHigh, uint32_t baseLow, uint32_t idxDesc)
2154{
2155 AssertCompile(sizeof(E1KRXDESC) == sizeof(E1KTXDESC));
2156 return ((uint64_t)baseHigh << 32) + baseLow + idxDesc * sizeof(E1KRXDESC);
2157}
2158
2159#ifdef IN_RING3 /* currently only used in ring-3 due to stack space requirements of the caller */
2160/**
2161 * Advance the head pointer of the receive descriptor queue.
2162 *
2163 * @remarks RDH always points to the next available RX descriptor.
2164 *
2165 * @param pDevIns The device instance.
2166 * @param pThis The device state structure.
2167 */
2168DECLINLINE(void) e1kAdvanceRDH(PPDMDEVINS pDevIns, PE1KSTATE pThis)
2169{
2170 Assert(e1kCsRxIsOwner(pThis));
2171 //e1kCsEnter(pThis, RT_SRC_POS);
2172 if (++RDH * sizeof(E1KRXDESC) >= RDLEN)
2173 RDH = 0;
2174#ifdef E1K_WITH_RXD_CACHE
2175 /*
2176 * We need to fetch descriptors now as the guest may advance RDT all the way
2177 * to RDH as soon as we generate RXDMT0 interrupt. This is mostly to provide
2178 * compatibility with Phar Lap ETS, see @bugref(7346). Note that we do not
2179 * check if the receiver is enabled. It must be, otherwise we won't get here
2180 * in the first place.
2181 *
2182 * Note that we should have moved both RDH and iRxDCurrent by now.
2183 */
2184 if (e1kRxDIsCacheEmpty(pThis))
2185 {
2186 /* Cache is empty, reset it and check if we can fetch more. */
2187 pThis->iRxDCurrent = pThis->nRxDFetched = 0;
2188 E1kLog3(("%s e1kAdvanceRDH: Rx cache is empty, RDH=%x RDT=%x "
2189 "iRxDCurrent=%x nRxDFetched=%x\n",
2190 pThis->szPrf, RDH, RDT, pThis->iRxDCurrent, pThis->nRxDFetched));
2191 e1kRxDPrefetch(pDevIns, pThis);
2192 }
2193#endif /* E1K_WITH_RXD_CACHE */
2194 /*
2195 * Compute current receive queue length and fire RXDMT0 interrupt
2196 * if we are low on receive buffers
2197 */
2198 uint32_t uRQueueLen = RDH>RDT ? RDLEN/sizeof(E1KRXDESC)-RDH+RDT : RDT-RDH;
2199 /*
2200 * The minimum threshold is controlled by RDMTS bits of RCTL:
2201 * 00 = 1/2 of RDLEN
2202 * 01 = 1/4 of RDLEN
2203 * 10 = 1/8 of RDLEN
2204 * 11 = reserved
2205 */
2206 uint32_t uMinRQThreshold = RDLEN / sizeof(E1KRXDESC) / (2 << GET_BITS(RCTL, RDMTS));
2207 if (uRQueueLen <= uMinRQThreshold)
2208 {
2209 E1kLogRel(("E1000: low on RX descriptors, RDH=%x RDT=%x len=%x threshold=%x\n", RDH, RDT, uRQueueLen, uMinRQThreshold));
2210 E1kLog2(("%s Low on RX descriptors, RDH=%x RDT=%x len=%x threshold=%x, raise an interrupt\n",
2211 pThis->szPrf, RDH, RDT, uRQueueLen, uMinRQThreshold));
2212 E1K_INC_ISTAT_CNT(pThis->uStatIntRXDMT0);
2213 e1kRaiseInterrupt(pDevIns, pThis, VERR_SEM_BUSY, ICR_RXDMT0);
2214 }
2215 E1kLog2(("%s e1kAdvanceRDH: at exit RDH=%x RDT=%x len=%x\n",
2216 pThis->szPrf, RDH, RDT, uRQueueLen));
2217 //e1kCsLeave(pThis);
2218}
2219#endif /* IN_RING3 */
2220
2221#ifdef E1K_WITH_RXD_CACHE
2222
2223# ifdef IN_RING3 /* currently only used in ring-3 due to stack space requirements of the caller */
2224
2225/**
2226 * Obtain the next RX descriptor from RXD cache, fetching descriptors from the
2227 * RX ring if the cache is empty.
2228 *
2229 * Note that we cannot advance the cache pointer (iRxDCurrent) yet as it will
2230 * go out of sync with RDH which will cause trouble when EMT checks if the
2231 * cache is empty to do pre-fetch @bugref(6217).
2232 *
2233 * @param pDevIns The device instance.
2234 * @param pThis The device state structure.
2235 * @thread RX
2236 */
2237DECLINLINE(E1KRXDESC *) e1kRxDGet(PPDMDEVINS pDevIns, PE1KSTATE pThis)
2238{
2239 Assert(e1kCsRxIsOwner(pThis));
2240 /* Check the cache first. */
2241 if (pThis->iRxDCurrent < pThis->nRxDFetched)
2242 return &pThis->aRxDescriptors[pThis->iRxDCurrent];
2243 /* Cache is empty, reset it and check if we can fetch more. */
2244 pThis->iRxDCurrent = pThis->nRxDFetched = 0;
2245 if (e1kRxDPrefetch(pDevIns, pThis))
2246 return &pThis->aRxDescriptors[pThis->iRxDCurrent];
2247 /* Out of Rx descriptors. */
2248 return NULL;
2249}
2250
2251
2252/**
2253 * Return the RX descriptor obtained with e1kRxDGet() and advance the cache
2254 * pointer. The descriptor gets written back to the RXD ring.
2255 *
2256 * @param pDevIns The device instance.
2257 * @param pThis The device state structure.
2258 * @param pDesc The descriptor being "returned" to the RX ring.
2259 * @thread RX
2260 */
2261DECLINLINE(void) e1kRxDPut(PPDMDEVINS pDevIns, PE1KSTATE pThis, E1KRXDESC* pDesc)
2262{
2263 Assert(e1kCsRxIsOwner(pThis));
2264 pThis->iRxDCurrent++;
2265 // Assert(pDesc >= pThis->aRxDescriptors);
2266 // Assert(pDesc < pThis->aRxDescriptors + E1K_RXD_CACHE_SIZE);
2267 // uint64_t addr = e1kDescAddr(RDBAH, RDBAL, RDH);
2268 // uint32_t rdh = RDH;
2269 // Assert(pThis->aRxDescAddr[pDesc - pThis->aRxDescriptors] == addr);
2270 PDMDevHlpPCIPhysWrite(pDevIns, e1kDescAddr(RDBAH, RDBAL, RDH), pDesc, sizeof(E1KRXDESC));
2271 /*
2272 * We need to print the descriptor before advancing RDH as it may fetch new
2273 * descriptors into the cache.
2274 */
2275 e1kPrintRDesc(pThis, pDesc);
2276 e1kAdvanceRDH(pDevIns, pThis);
2277}
2278
2279/**
2280 * Store a fragment of received packet at the specifed address.
2281 *
2282 * @param pDevIns The device instance.
2283 * @param pThis The device state structure.
2284 * @param pDesc The next available RX descriptor.
2285 * @param pvBuf The fragment.
2286 * @param cb The size of the fragment.
2287 */
2288static void e1kStoreRxFragment(PPDMDEVINS pDevIns, PE1KSTATE pThis, E1KRXDESC *pDesc, const void *pvBuf, size_t cb)
2289{
2290 STAM_PROFILE_ADV_START(&pThis->StatReceiveStore, a);
2291 E1kLog2(("%s e1kStoreRxFragment: store fragment of %04X at %016LX, EOP=%d\n",
2292 pThis->szPrf, cb, pDesc->u64BufAddr, pDesc->status.fEOP));
2293 PDMDevHlpPCIPhysWrite(pDevIns, pDesc->u64BufAddr, pvBuf, cb);
2294 pDesc->u16Length = (uint16_t)cb;
2295 Assert(pDesc->u16Length == cb);
2296 STAM_PROFILE_ADV_STOP(&pThis->StatReceiveStore, a);
2297 RT_NOREF(pThis);
2298}
2299
2300# endif /* IN_RING3 */
2301
2302#else /* !E1K_WITH_RXD_CACHE */
2303
2304/**
2305 * Store a fragment of received packet that fits into the next available RX
2306 * buffer.
2307 *
2308 * @remarks Trigger the RXT0 interrupt if it is the last fragment of the packet.
2309 *
2310 * @param pDevIns The device instance.
2311 * @param pThis The device state structure.
2312 * @param pDesc The next available RX descriptor.
2313 * @param pvBuf The fragment.
2314 * @param cb The size of the fragment.
2315 */
2316static void e1kStoreRxFragment(PPDMDEVINS pDevIns, PE1KSTATE pThis, E1KRXDESC *pDesc, const void *pvBuf, size_t cb)
2317{
2318 STAM_PROFILE_ADV_START(&pThis->StatReceiveStore, a);
2319 E1kLog2(("%s e1kStoreRxFragment: store fragment of %04X at %016LX, EOP=%d\n", pThis->szPrf, cb, pDesc->u64BufAddr, pDesc->status.fEOP));
2320 PDMDevHlpPCIPhysWrite(pDevIns, pDesc->u64BufAddr, pvBuf, cb);
2321 pDesc->u16Length = (uint16_t)cb; Assert(pDesc->u16Length == cb);
2322 /* Write back the descriptor */
2323 PDMDevHlpPCIPhysWrite(pDevIns, e1kDescAddr(RDBAH, RDBAL, RDH), pDesc, sizeof(E1KRXDESC));
2324 e1kPrintRDesc(pThis, pDesc);
2325 E1kLogRel(("E1000: Wrote back RX desc, RDH=%x\n", RDH));
2326 /* Advance head */
2327 e1kAdvanceRDH(pDevIns, pThis);
2328 //E1kLog2(("%s e1kStoreRxFragment: EOP=%d RDTR=%08X RADV=%08X\n", pThis->szPrf, pDesc->fEOP, RDTR, RADV));
2329 if (pDesc->status.fEOP)
2330 {
2331 /* Complete packet has been stored -- it is time to let the guest know. */
2332#ifdef E1K_USE_RX_TIMERS
2333 if (RDTR)
2334 {
2335 /* Arm the timer to fire in RDTR usec (discard .024) */
2336 e1kArmTimer(pDevIns, pThis, pThis->hRIDTimer, RDTR);
2337 /* If absolute timer delay is enabled and the timer is not running yet, arm it. */
2338 if (RADV != 0 && !PDMDevHlpTimerIsActive(pDevIns, pThis->CTX_SUFF(pRADTimer)))
2339 e1kArmTimer(pThis, pThis->hRADTimer, RADV);
2340 }
2341 else
2342 {
2343#endif
2344 /* 0 delay means immediate interrupt */
2345 E1K_INC_ISTAT_CNT(pThis->uStatIntRx);
2346 e1kRaiseInterrupt(pDevIns, pThis, VERR_SEM_BUSY, ICR_RXT0);
2347#ifdef E1K_USE_RX_TIMERS
2348 }
2349#endif
2350 }
2351 STAM_PROFILE_ADV_STOP(&pThis->StatReceiveStore, a);
2352}
2353
2354#endif /* !E1K_WITH_RXD_CACHE */
2355
2356/**
2357 * Returns true if it is a broadcast packet.
2358 *
2359 * @returns true if destination address indicates broadcast.
2360 * @param pvBuf The ethernet packet.
2361 */
2362DECLINLINE(bool) e1kIsBroadcast(const void *pvBuf)
2363{
2364 static const uint8_t s_abBcastAddr[] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
2365 return memcmp(pvBuf, s_abBcastAddr, sizeof(s_abBcastAddr)) == 0;
2366}
2367
2368/**
2369 * Returns true if it is a multicast packet.
2370 *
2371 * @remarks returns true for broadcast packets as well.
2372 * @returns true if destination address indicates multicast.
2373 * @param pvBuf The ethernet packet.
2374 */
2375DECLINLINE(bool) e1kIsMulticast(const void *pvBuf)
2376{
2377 return (*(char*)pvBuf) & 1;
2378}
2379
2380#ifdef IN_RING3 /* currently only used in ring-3 due to stack space requirements of the caller */
2381/**
2382 * Set IXSM, IPCS and TCPCS flags according to the packet type.
2383 *
2384 * @remarks We emulate checksum offloading for major packets types only.
2385 *
2386 * @returns VBox status code.
2387 * @param pThis The device state structure.
2388 * @param pFrame The available data.
2389 * @param cb Number of bytes available in the buffer.
2390 * @param status Bit fields containing status info.
2391 */
2392static int e1kRxChecksumOffload(PE1KSTATE pThis, const uint8_t *pFrame, size_t cb, E1KRXDST *pStatus)
2393{
2394 /** @todo
2395 * It is not safe to bypass checksum verification for packets coming
2396 * from real wire. We currently unable to tell where packets are
2397 * coming from so we tell the driver to ignore our checksum flags
2398 * and do verification in software.
2399 */
2400# if 0
2401 uint16_t uEtherType = ntohs(*(uint16_t*)(pFrame + 12));
2402
2403 E1kLog2(("%s e1kRxChecksumOffload: EtherType=%x\n", pThis->szPrf, uEtherType));
2404
2405 switch (uEtherType)
2406 {
2407 case 0x800: /* IPv4 */
2408 {
2409 pStatus->fIXSM = false;
2410 pStatus->fIPCS = true;
2411 PRTNETIPV4 pIpHdr4 = (PRTNETIPV4)(pFrame + 14);
2412 /* TCP/UDP checksum offloading works with TCP and UDP only */
2413 pStatus->fTCPCS = pIpHdr4->ip_p == 6 || pIpHdr4->ip_p == 17;
2414 break;
2415 }
2416 case 0x86DD: /* IPv6 */
2417 pStatus->fIXSM = false;
2418 pStatus->fIPCS = false;
2419 pStatus->fTCPCS = true;
2420 break;
2421 default: /* ARP, VLAN, etc. */
2422 pStatus->fIXSM = true;
2423 break;
2424 }
2425# else
2426 pStatus->fIXSM = true;
2427 RT_NOREF_PV(pThis); RT_NOREF_PV(pFrame); RT_NOREF_PV(cb);
2428# endif
2429 return VINF_SUCCESS;
2430}
2431#endif /* IN_RING3 */
2432
2433/**
2434 * Pad and store received packet.
2435 *
2436 * @remarks Make sure that the packet appears to upper layer as one coming
2437 * from real Ethernet: pad it and insert FCS.
2438 *
2439 * @returns VBox status code.
2440 * @param pDevIns The device instance.
2441 * @param pThis The device state structure.
2442 * @param pvBuf The available data.
2443 * @param cb Number of bytes available in the buffer.
2444 * @param status Bit fields containing status info.
2445 */
2446static int e1kHandleRxPacket(PPDMDEVINS pDevIns, PE1KSTATE pThis, const void *pvBuf, size_t cb, E1KRXDST status)
2447{
2448#if defined(IN_RING3) /** @todo Remove this extra copying, it's gonna make us run out of kernel / hypervisor stack! */
2449 uint8_t rxPacket[E1K_MAX_RX_PKT_SIZE];
2450 uint8_t *ptr = rxPacket;
2451
2452 int rc = e1kCsRxEnter(pThis, VERR_SEM_BUSY);
2453 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2454 return rc;
2455
2456 if (cb > 70) /* unqualified guess */
2457 pThis->led.Asserted.s.fReading = pThis->led.Actual.s.fReading = 1;
2458
2459 Assert(cb <= E1K_MAX_RX_PKT_SIZE);
2460 Assert(cb > 16);
2461 size_t cbMax = ((RCTL & RCTL_LPE) ? E1K_MAX_RX_PKT_SIZE - 4 : 1518) - (status.fVP ? 0 : 4);
2462 E1kLog3(("%s Max RX packet size is %u\n", pThis->szPrf, cbMax));
2463 if (status.fVP)
2464 {
2465 /* VLAN packet -- strip VLAN tag in VLAN mode */
2466 if ((CTRL & CTRL_VME) && cb > 16)
2467 {
2468 uint16_t *u16Ptr = (uint16_t*)pvBuf;
2469 memcpy(rxPacket, pvBuf, 12); /* Copy src and dst addresses */
2470 status.u16Special = RT_BE2H_U16(u16Ptr[7]); /* Extract VLAN tag */
2471 memcpy(rxPacket + 12, (uint8_t*)pvBuf + 16, cb - 16); /* Copy the rest of the packet */
2472 cb -= 4;
2473 E1kLog3(("%s Stripped tag for VLAN %u (cb=%u)\n",
2474 pThis->szPrf, status.u16Special, cb));
2475 }
2476 else
2477 status.fVP = false; /* Set VP only if we stripped the tag */
2478 }
2479 else
2480 memcpy(rxPacket, pvBuf, cb);
2481 /* Pad short packets */
2482 if (cb < 60)
2483 {
2484 memset(rxPacket + cb, 0, 60 - cb);
2485 cb = 60;
2486 }
2487 if (!(RCTL & RCTL_SECRC) && cb <= cbMax)
2488 {
2489 STAM_PROFILE_ADV_START(&pThis->StatReceiveCRC, a);
2490 /*
2491 * Add FCS if CRC stripping is not enabled. Since the value of CRC
2492 * is ignored by most of drivers we may as well save us the trouble
2493 * of calculating it (see EthernetCRC CFGM parameter).
2494 */
2495 if (pThis->fEthernetCRC)
2496 *(uint32_t*)(rxPacket + cb) = RTCrc32(rxPacket, cb);
2497 cb += sizeof(uint32_t);
2498 STAM_PROFILE_ADV_STOP(&pThis->StatReceiveCRC, a);
2499 E1kLog3(("%s Added FCS (cb=%u)\n", pThis->szPrf, cb));
2500 }
2501 /* Compute checksum of complete packet */
2502 size_t cbCSumStart = RT_MIN(GET_BITS(RXCSUM, PCSS), cb);
2503 uint16_t checksum = e1kCSum16(rxPacket + cbCSumStart, cb - cbCSumStart);
2504 e1kRxChecksumOffload(pThis, rxPacket, cb, &status);
2505
2506 /* Update stats */
2507 E1K_INC_CNT32(GPRC);
2508 if (e1kIsBroadcast(pvBuf))
2509 E1K_INC_CNT32(BPRC);
2510 else if (e1kIsMulticast(pvBuf))
2511 E1K_INC_CNT32(MPRC);
2512 /* Update octet receive counter */
2513 E1K_ADD_CNT64(GORCL, GORCH, cb);
2514 STAM_REL_COUNTER_ADD(&pThis->StatReceiveBytes, cb);
2515 if (cb == 64)
2516 E1K_INC_CNT32(PRC64);
2517 else if (cb < 128)
2518 E1K_INC_CNT32(PRC127);
2519 else if (cb < 256)
2520 E1K_INC_CNT32(PRC255);
2521 else if (cb < 512)
2522 E1K_INC_CNT32(PRC511);
2523 else if (cb < 1024)
2524 E1K_INC_CNT32(PRC1023);
2525 else
2526 E1K_INC_CNT32(PRC1522);
2527
2528 E1K_INC_ISTAT_CNT(pThis->uStatRxFrm);
2529
2530# ifdef E1K_WITH_RXD_CACHE
2531 while (cb > 0)
2532 {
2533 E1KRXDESC *pDesc = e1kRxDGet(pDevIns, pThis);
2534
2535 if (pDesc == NULL)
2536 {
2537 E1kLog(("%s Out of receive buffers, dropping the packet "
2538 "(cb=%u, in_cache=%u, RDH=%x RDT=%x)\n",
2539 pThis->szPrf, cb, e1kRxDInCache(pThis), RDH, RDT));
2540 break;
2541 }
2542# else /* !E1K_WITH_RXD_CACHE */
2543 if (RDH == RDT)
2544 {
2545 E1kLog(("%s Out of receive buffers, dropping the packet\n",
2546 pThis->szPrf));
2547 }
2548 /* Store the packet to receive buffers */
2549 while (RDH != RDT)
2550 {
2551 /* Load the descriptor pointed by head */
2552 E1KRXDESC desc, *pDesc = &desc;
2553 PDMDevHlpPhysRead(pDevIns, e1kDescAddr(RDBAH, RDBAL, RDH), &desc, sizeof(desc));
2554# endif /* !E1K_WITH_RXD_CACHE */
2555 if (pDesc->u64BufAddr)
2556 {
2557 uint16_t u16RxBufferSize = pThis->u16RxBSize; /* see @bugref{9427} */
2558
2559 /* Update descriptor */
2560 pDesc->status = status;
2561 pDesc->u16Checksum = checksum;
2562 pDesc->status.fDD = true;
2563
2564 /*
2565 * We need to leave Rx critical section here or we risk deadlocking
2566 * with EMT in e1kRegWriteRDT when the write is to an unallocated
2567 * page or has an access handler associated with it.
2568 * Note that it is safe to leave the critical section here since
2569 * e1kRegWriteRDT() never modifies RDH. It never touches already
2570 * fetched RxD cache entries either.
2571 */
2572 if (cb > u16RxBufferSize)
2573 {
2574 pDesc->status.fEOP = false;
2575 e1kCsRxLeave(pThis);
2576 e1kStoreRxFragment(pDevIns, pThis, pDesc, ptr, u16RxBufferSize);
2577 rc = e1kCsRxEnter(pThis, VERR_SEM_BUSY);
2578 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2579 return rc;
2580 ptr += u16RxBufferSize;
2581 cb -= u16RxBufferSize;
2582 }
2583 else
2584 {
2585 pDesc->status.fEOP = true;
2586 e1kCsRxLeave(pThis);
2587 e1kStoreRxFragment(pDevIns, pThis, pDesc, ptr, cb);
2588# ifdef E1K_WITH_RXD_CACHE
2589 rc = e1kCsRxEnter(pThis, VERR_SEM_BUSY);
2590 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2591 return rc;
2592 cb = 0;
2593# else /* !E1K_WITH_RXD_CACHE */
2594 pThis->led.Actual.s.fReading = 0;
2595 return VINF_SUCCESS;
2596# endif /* !E1K_WITH_RXD_CACHE */
2597 }
2598 /*
2599 * Note: RDH is advanced by e1kStoreRxFragment if E1K_WITH_RXD_CACHE
2600 * is not defined.
2601 */
2602 }
2603# ifdef E1K_WITH_RXD_CACHE
2604 /* Write back the descriptor. */
2605 pDesc->status.fDD = true;
2606 e1kRxDPut(pDevIns, pThis, pDesc);
2607# else /* !E1K_WITH_RXD_CACHE */
2608 else
2609 {
2610 /* Write back the descriptor. */
2611 pDesc->status.fDD = true;
2612 PDMDevHlpPCIPhysWrite(pDevIns, e1kDescAddr(RDBAH, RDBAL, RDH), pDesc, sizeof(E1KRXDESC));
2613 e1kAdvanceRDH(pDevIns, pThis);
2614 }
2615# endif /* !E1K_WITH_RXD_CACHE */
2616 }
2617
2618 if (cb > 0)
2619 E1kLog(("%s Out of receive buffers, dropping %u bytes", pThis->szPrf, cb));
2620
2621 pThis->led.Actual.s.fReading = 0;
2622
2623 e1kCsRxLeave(pThis);
2624# ifdef E1K_WITH_RXD_CACHE
2625 /* Complete packet has been stored -- it is time to let the guest know. */
2626# ifdef E1K_USE_RX_TIMERS
2627 if (RDTR)
2628 {
2629 /* Arm the timer to fire in RDTR usec (discard .024) */
2630 e1kArmTimer(pThis, pThis->hRIDTimer, RDTR);
2631 /* If absolute timer delay is enabled and the timer is not running yet, arm it. */
2632 if (RADV != 0 && !PDMDevHlpTimerIsActive(pDevIns, pThis->hRADTimer))
2633 e1kArmTimer(pThis, pThis->hRADTimer, RADV);
2634 }
2635 else
2636 {
2637# endif /* E1K_USE_RX_TIMERS */
2638 /* 0 delay means immediate interrupt */
2639 E1K_INC_ISTAT_CNT(pThis->uStatIntRx);
2640 e1kRaiseInterrupt(pDevIns, pThis, VERR_SEM_BUSY, ICR_RXT0);
2641# ifdef E1K_USE_RX_TIMERS
2642 }
2643# endif /* E1K_USE_RX_TIMERS */
2644# endif /* E1K_WITH_RXD_CACHE */
2645
2646 return VINF_SUCCESS;
2647#else /* !IN_RING3 */
2648 RT_NOREF(pDevIns, pThis, pvBuf, cb, status);
2649 return VERR_INTERNAL_ERROR_2;
2650#endif /* !IN_RING3 */
2651}
2652
2653
2654#ifdef IN_RING3
2655/**
2656 * Bring the link up after the configured delay, 5 seconds by default.
2657 *
2658 * @param pDevIns The device instance.
2659 * @param pThis The device state structure.
2660 * @thread any
2661 */
2662DECLINLINE(void) e1kBringLinkUpDelayed(PPDMDEVINS pDevIns, PE1KSTATE pThis)
2663{
2664 E1kLog(("%s Will bring up the link in %d seconds...\n",
2665 pThis->szPrf, pThis->cMsLinkUpDelay / 1000));
2666 e1kArmTimer(pDevIns, pThis, pThis->hLUTimer, pThis->cMsLinkUpDelay * 1000);
2667}
2668
2669/**
2670 * Bring up the link immediately.
2671 *
2672 * @param pDevIns The device instance.
2673 * @param pThis The device state structure.
2674 * @param pThisCC The current context instance data.
2675 */
2676DECLINLINE(void) e1kR3LinkUp(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KSTATECC pThisCC)
2677{
2678 E1kLog(("%s Link is up\n", pThis->szPrf));
2679 STATUS |= STATUS_LU;
2680 Phy::setLinkStatus(&pThis->phy, true);
2681 e1kRaiseInterrupt(pDevIns, pThis, VERR_SEM_BUSY, ICR_LSC);
2682 if (pThisCC->pDrvR3)
2683 pThisCC->pDrvR3->pfnNotifyLinkChanged(pThisCC->pDrvR3, PDMNETWORKLINKSTATE_UP);
2684 /* Trigger processing of pending TX descriptors (see @bugref{8942}). */
2685 PDMDevHlpTaskTrigger(pDevIns, pThis->hTxTask);
2686}
2687
2688/**
2689 * Bring down the link immediately.
2690 *
2691 * @param pDevIns The device instance.
2692 * @param pThis The device state structure.
2693 * @param pThisCC The current context instance data.
2694 */
2695DECLINLINE(void) e1kR3LinkDown(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KSTATECC pThisCC)
2696{
2697 E1kLog(("%s Link is down\n", pThis->szPrf));
2698 STATUS &= ~STATUS_LU;
2699#ifdef E1K_LSC_ON_RESET
2700 Phy::setLinkStatus(&pThis->phy, false);
2701#endif /* E1K_LSC_ON_RESET */
2702 e1kRaiseInterrupt(pDevIns, pThis, VERR_SEM_BUSY, ICR_LSC);
2703 if (pThisCC->pDrvR3)
2704 pThisCC->pDrvR3->pfnNotifyLinkChanged(pThisCC->pDrvR3, PDMNETWORKLINKSTATE_DOWN);
2705}
2706
2707/**
2708 * Bring down the link temporarily.
2709 *
2710 * @param pDevIns The device instance.
2711 * @param pThis The device state structure.
2712 * @param pThisCC The current context instance data.
2713 */
2714DECLINLINE(void) e1kR3LinkDownTemp(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KSTATECC pThisCC)
2715{
2716 E1kLog(("%s Link is down temporarily\n", pThis->szPrf));
2717 STATUS &= ~STATUS_LU;
2718 Phy::setLinkStatus(&pThis->phy, false);
2719 e1kRaiseInterrupt(pDevIns, pThis, VERR_SEM_BUSY, ICR_LSC);
2720 /*
2721 * Notifying the associated driver that the link went down (even temporarily)
2722 * seems to be the right thing, but it was not done before. This may cause
2723 * a regression if the driver does not expect the link to go down as a result
2724 * of sending PDMNETWORKLINKSTATE_DOWN_RESUME to this device. Earlier versions
2725 * of code notified the driver that the link was up! See @bugref{7057}.
2726 */
2727 if (pThisCC->pDrvR3)
2728 pThisCC->pDrvR3->pfnNotifyLinkChanged(pThisCC->pDrvR3, PDMNETWORKLINKSTATE_DOWN);
2729 e1kBringLinkUpDelayed(pDevIns, pThis);
2730}
2731#endif /* IN_RING3 */
2732
2733#if 0 /* unused */
2734/**
2735 * Read handler for Device Status register.
2736 *
2737 * Get the link status from PHY.
2738 *
2739 * @returns VBox status code.
2740 *
2741 * @param pThis The device state structure.
2742 * @param offset Register offset in memory-mapped frame.
2743 * @param index Register index in register array.
2744 * @param mask Used to implement partial reads (8 and 16-bit).
2745 */
2746static int e1kRegReadCTRL(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
2747{
2748 E1kLog(("%s e1kRegReadCTRL: mdio dir=%s mdc dir=%s mdc=%d\n",
2749 pThis->szPrf, (CTRL & CTRL_MDIO_DIR)?"OUT":"IN ",
2750 (CTRL & CTRL_MDC_DIR)?"OUT":"IN ", !!(CTRL & CTRL_MDC)));
2751 if ((CTRL & CTRL_MDIO_DIR) == 0 && (CTRL & CTRL_MDC))
2752 {
2753 /* MDC is high and MDIO pin is used for input, read MDIO pin from PHY */
2754 if (Phy::readMDIO(&pThis->phy))
2755 *pu32Value = CTRL | CTRL_MDIO;
2756 else
2757 *pu32Value = CTRL & ~CTRL_MDIO;
2758 E1kLog(("%s e1kRegReadCTRL: Phy::readMDIO(%d)\n",
2759 pThis->szPrf, !!(*pu32Value & CTRL_MDIO)));
2760 }
2761 else
2762 {
2763 /* MDIO pin is used for output, ignore it */
2764 *pu32Value = CTRL;
2765 }
2766 return VINF_SUCCESS;
2767}
2768#endif /* unused */
2769
2770/**
2771 * A callback used by PHY to indicate that the link needs to be updated due to
2772 * reset of PHY.
2773 *
2774 * @param pDevIns The device instance.
2775 * @thread any
2776 */
2777void e1kPhyLinkResetCallback(PPDMDEVINS pDevIns)
2778{
2779 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
2780
2781 /* Make sure we have cable connected and MAC can talk to PHY */
2782 if (pThis->fCableConnected && (CTRL & CTRL_SLU))
2783 e1kArmTimer(pDevIns, pThis, pThis->hLUTimer, E1K_INIT_LINKUP_DELAY_US);
2784}
2785
2786/**
2787 * Write handler for Device Control register.
2788 *
2789 * Handles reset.
2790 *
2791 * @param pThis The device state structure.
2792 * @param offset Register offset in memory-mapped frame.
2793 * @param index Register index in register array.
2794 * @param value The value to store.
2795 * @param mask Used to implement partial writes (8 and 16-bit).
2796 * @thread EMT
2797 */
2798static int e1kRegWriteCTRL(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
2799{
2800 int rc = VINF_SUCCESS;
2801
2802 if (value & CTRL_RESET)
2803 { /* RST */
2804#ifndef IN_RING3
2805 return VINF_IOM_R3_MMIO_WRITE;
2806#else
2807 e1kR3HardReset(pDevIns, pThis, PDMINS_2_DATA_CC(pDevIns, PE1KSTATECC));
2808#endif
2809 }
2810 else
2811 {
2812#ifdef E1K_LSC_ON_SLU
2813 /*
2814 * When the guest changes 'Set Link Up' bit from 0 to 1 we check if
2815 * the link is down and the cable is connected, and if they are we
2816 * bring the link up, see @bugref{8624}.
2817 */
2818 if ( (value & CTRL_SLU)
2819 && !(CTRL & CTRL_SLU)
2820 && pThis->fCableConnected
2821 && !(STATUS & STATUS_LU))
2822 {
2823 /* It should take about 2 seconds for the link to come up */
2824 e1kArmTimer(pDevIns, pThis, pThis->hLUTimer, E1K_INIT_LINKUP_DELAY_US);
2825 }
2826#else /* !E1K_LSC_ON_SLU */
2827 if ( (value & CTRL_SLU)
2828 && !(CTRL & CTRL_SLU)
2829 && pThis->fCableConnected
2830 && !PDMDevHlpTimerIsActive(pDevIns, pThis->hLUTimer))
2831 {
2832 /* PXE does not use LSC interrupts, see @bugref{9113}. */
2833 STATUS |= STATUS_LU;
2834 }
2835#endif /* !E1K_LSC_ON_SLU */
2836 if ((value & CTRL_VME) != (CTRL & CTRL_VME))
2837 {
2838 E1kLog(("%s VLAN Mode %s\n", pThis->szPrf, (value & CTRL_VME) ? "Enabled" : "Disabled"));
2839 }
2840 Log7(("%s e1kRegWriteCTRL: mdio dir=%s mdc dir=%s mdc=%s mdio=%d\n",
2841 pThis->szPrf, (value & CTRL_MDIO_DIR)?"OUT":"IN ",
2842 (value & CTRL_MDC_DIR)?"OUT":"IN ", (value & CTRL_MDC)?"HIGH":"LOW ", !!(value & CTRL_MDIO)));
2843 if (value & CTRL_MDC)
2844 {
2845 if (value & CTRL_MDIO_DIR)
2846 {
2847 Log7(("%s e1kRegWriteCTRL: Phy::writeMDIO(%d)\n", pThis->szPrf, !!(value & CTRL_MDIO)));
2848 /* MDIO direction pin is set to output and MDC is high, write MDIO pin value to PHY */
2849 Phy::writeMDIO(&pThis->phy, !!(value & CTRL_MDIO), pDevIns);
2850 }
2851 else
2852 {
2853 if (Phy::readMDIO(&pThis->phy))
2854 value |= CTRL_MDIO;
2855 else
2856 value &= ~CTRL_MDIO;
2857 Log7(("%s e1kRegWriteCTRL: Phy::readMDIO(%d)\n", pThis->szPrf, !!(value & CTRL_MDIO)));
2858 }
2859 }
2860 rc = e1kRegWriteDefault(pDevIns, pThis, offset, index, value);
2861 }
2862
2863 return rc;
2864}
2865
2866/**
2867 * Write handler for EEPROM/Flash Control/Data register.
2868 *
2869 * Handles EEPROM access requests; forwards writes to EEPROM device if access has been granted.
2870 *
2871 * @param pThis The device state structure.
2872 * @param offset Register offset in memory-mapped frame.
2873 * @param index Register index in register array.
2874 * @param value The value to store.
2875 * @param mask Used to implement partial writes (8 and 16-bit).
2876 * @thread EMT
2877 */
2878static int e1kRegWriteEECD(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
2879{
2880 RT_NOREF(pDevIns, offset, index);
2881#ifdef IN_RING3
2882 /* So far we are concerned with lower byte only */
2883 if ((EECD & EECD_EE_GNT) || pThis->eChip == E1K_CHIP_82543GC)
2884 {
2885 /* Access to EEPROM granted -- forward 4-wire bits to EEPROM device */
2886 /* Note: 82543GC does not need to request EEPROM access */
2887 STAM_PROFILE_ADV_START(&pThis->StatEEPROMWrite, a);
2888 PE1KSTATECC pThisCC = PDMINS_2_DATA_CC(pDevIns, PE1KSTATECC);
2889 pThisCC->eeprom.write(value & EECD_EE_WIRES);
2890 STAM_PROFILE_ADV_STOP(&pThis->StatEEPROMWrite, a);
2891 }
2892 if (value & EECD_EE_REQ)
2893 EECD |= EECD_EE_REQ|EECD_EE_GNT;
2894 else
2895 EECD &= ~EECD_EE_GNT;
2896 //e1kRegWriteDefault(pThis, offset, index, value );
2897
2898 return VINF_SUCCESS;
2899#else /* !IN_RING3 */
2900 RT_NOREF(pThis, value);
2901 return VINF_IOM_R3_MMIO_WRITE;
2902#endif /* !IN_RING3 */
2903}
2904
2905/**
2906 * Read handler for EEPROM/Flash Control/Data register.
2907 *
2908 * Lower 4 bits come from EEPROM device if EEPROM access has been granted.
2909 *
2910 * @returns VBox status code.
2911 *
2912 * @param pThis The device state structure.
2913 * @param offset Register offset in memory-mapped frame.
2914 * @param index Register index in register array.
2915 * @param mask Used to implement partial reads (8 and 16-bit).
2916 * @thread EMT
2917 */
2918static int e1kRegReadEECD(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
2919{
2920#ifdef IN_RING3
2921 uint32_t value;
2922 int rc = e1kRegReadDefault(pDevIns, pThis, offset, index, &value);
2923 if (RT_SUCCESS(rc))
2924 {
2925 if ((value & EECD_EE_GNT) || pThis->eChip == E1K_CHIP_82543GC)
2926 {
2927 /* Note: 82543GC does not need to request EEPROM access */
2928 /* Access to EEPROM granted -- get 4-wire bits to EEPROM device */
2929 STAM_PROFILE_ADV_START(&pThis->StatEEPROMRead, a);
2930 PE1KSTATECC pThisCC = PDMINS_2_DATA_CC(pDevIns, PE1KSTATECC);
2931 value |= pThisCC->eeprom.read();
2932 STAM_PROFILE_ADV_STOP(&pThis->StatEEPROMRead, a);
2933 }
2934 *pu32Value = value;
2935 }
2936
2937 return rc;
2938#else /* !IN_RING3 */
2939 RT_NOREF_PV(pDevIns); RT_NOREF_PV(pThis); RT_NOREF_PV(offset); RT_NOREF_PV(index); RT_NOREF_PV(pu32Value);
2940 return VINF_IOM_R3_MMIO_READ;
2941#endif /* !IN_RING3 */
2942}
2943
2944/**
2945 * Write handler for EEPROM Read register.
2946 *
2947 * Handles EEPROM word access requests, reads EEPROM and stores the result
2948 * into DATA field.
2949 *
2950 * @param pThis The device state structure.
2951 * @param offset Register offset in memory-mapped frame.
2952 * @param index Register index in register array.
2953 * @param value The value to store.
2954 * @param mask Used to implement partial writes (8 and 16-bit).
2955 * @thread EMT
2956 */
2957static int e1kRegWriteEERD(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
2958{
2959#ifdef IN_RING3
2960 /* Make use of 'writable' and 'readable' masks. */
2961 e1kRegWriteDefault(pDevIns, pThis, offset, index, value);
2962 /* DONE and DATA are set only if read was triggered by START. */
2963 if (value & EERD_START)
2964 {
2965 STAM_PROFILE_ADV_START(&pThis->StatEEPROMRead, a);
2966 uint16_t tmp;
2967 PE1KSTATECC pThisCC = PDMINS_2_DATA_CC(pDevIns, PE1KSTATECC);
2968 if (pThisCC->eeprom.readWord(GET_BITS_V(value, EERD, ADDR), &tmp))
2969 SET_BITS(EERD, DATA, tmp);
2970 EERD |= EERD_DONE;
2971 STAM_PROFILE_ADV_STOP(&pThis->StatEEPROMRead, a);
2972 }
2973
2974 return VINF_SUCCESS;
2975#else /* !IN_RING3 */
2976 RT_NOREF_PV(pDevIns); RT_NOREF_PV(pThis); RT_NOREF_PV(offset); RT_NOREF_PV(index); RT_NOREF_PV(value);
2977 return VINF_IOM_R3_MMIO_WRITE;
2978#endif /* !IN_RING3 */
2979}
2980
2981
2982/**
2983 * Write handler for MDI Control register.
2984 *
2985 * Handles PHY read/write requests; forwards requests to internal PHY device.
2986 *
2987 * @param pThis The device state structure.
2988 * @param offset Register offset in memory-mapped frame.
2989 * @param index Register index in register array.
2990 * @param value The value to store.
2991 * @param mask Used to implement partial writes (8 and 16-bit).
2992 * @thread EMT
2993 */
2994static int e1kRegWriteMDIC(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
2995{
2996 if (value & MDIC_INT_EN)
2997 {
2998 E1kLog(("%s ERROR! Interrupt at the end of an MDI cycle is not supported yet.\n",
2999 pThis->szPrf));
3000 }
3001 else if (value & MDIC_READY)
3002 {
3003 E1kLog(("%s ERROR! Ready bit is not reset by software during write operation.\n",
3004 pThis->szPrf));
3005 }
3006 else if (GET_BITS_V(value, MDIC, PHY) != 1)
3007 {
3008 E1kLog(("%s WARNING! Access to invalid PHY detected, phy=%d.\n",
3009 pThis->szPrf, GET_BITS_V(value, MDIC, PHY)));
3010 /*
3011 * Some drivers scan the MDIO bus for a PHY. We can work with these
3012 * drivers if we set MDIC_READY and MDIC_ERROR when there isn't a PHY
3013 * at the requested address, see @bugref{7346}.
3014 */
3015 MDIC = MDIC_READY | MDIC_ERROR;
3016 }
3017 else
3018 {
3019 /* Store the value */
3020 e1kRegWriteDefault(pDevIns, pThis, offset, index, value);
3021 STAM_COUNTER_INC(&pThis->StatPHYAccesses);
3022 /* Forward op to PHY */
3023 if (value & MDIC_OP_READ)
3024 SET_BITS(MDIC, DATA, Phy::readRegister(&pThis->phy, GET_BITS_V(value, MDIC, REG), pDevIns));
3025 else
3026 Phy::writeRegister(&pThis->phy, GET_BITS_V(value, MDIC, REG), value & MDIC_DATA_MASK, pDevIns);
3027 /* Let software know that we are done */
3028 MDIC |= MDIC_READY;
3029 }
3030
3031 return VINF_SUCCESS;
3032}
3033
3034/**
3035 * Write handler for Interrupt Cause Read register.
3036 *
3037 * Bits corresponding to 1s in 'value' will be cleared in ICR register.
3038 *
3039 * @param pThis The device state structure.
3040 * @param offset Register offset in memory-mapped frame.
3041 * @param index Register index in register array.
3042 * @param value The value to store.
3043 * @param mask Used to implement partial writes (8 and 16-bit).
3044 * @thread EMT
3045 */
3046static int e1kRegWriteICR(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3047{
3048 ICR &= ~value;
3049
3050 RT_NOREF_PV(pDevIns); RT_NOREF_PV(pThis); RT_NOREF_PV(offset); RT_NOREF_PV(index);
3051 return VINF_SUCCESS;
3052}
3053
3054/**
3055 * Read handler for Interrupt Cause Read register.
3056 *
3057 * Reading this register acknowledges all interrupts.
3058 *
3059 * @returns VBox status code.
3060 *
3061 * @param pThis The device state structure.
3062 * @param offset Register offset in memory-mapped frame.
3063 * @param index Register index in register array.
3064 * @param mask Not used.
3065 * @thread EMT
3066 */
3067static int e1kRegReadICR(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
3068{
3069 int rc = e1kCsEnter(pThis, VINF_IOM_R3_MMIO_READ);
3070 if (RT_UNLIKELY(rc != VINF_SUCCESS))
3071 return rc;
3072
3073 uint32_t value = 0;
3074 rc = e1kRegReadDefault(pDevIns, pThis, offset, index, &value);
3075 if (RT_SUCCESS(rc))
3076 {
3077 if (value)
3078 {
3079 if (!pThis->fIntRaised)
3080 E1K_INC_ISTAT_CNT(pThis->uStatNoIntICR);
3081 /*
3082 * Not clearing ICR causes QNX to hang as it reads ICR in a loop
3083 * with disabled interrupts.
3084 */
3085 //if (IMS)
3086 if (1)
3087 {
3088 /*
3089 * Interrupts were enabled -- we are supposedly at the very
3090 * beginning of interrupt handler
3091 */
3092 E1kLogRel(("E1000: irq lowered, icr=0x%x\n", ICR));
3093 E1kLog(("%s e1kRegReadICR: Lowered IRQ (%08x)\n", pThis->szPrf, ICR));
3094 /* Clear all pending interrupts */
3095 ICR = 0;
3096 pThis->fIntRaised = false;
3097 /* Lower(0) INTA(0) */
3098 PDMDevHlpPCISetIrq(pDevIns, 0, 0);
3099
3100 pThis->u64AckedAt = PDMDevHlpTimerGet(pDevIns, pThis->hIntTimer);
3101 if (pThis->fIntMaskUsed)
3102 pThis->fDelayInts = true;
3103 }
3104 else
3105 {
3106 /*
3107 * Interrupts are disabled -- in windows guests ICR read is done
3108 * just before re-enabling interrupts
3109 */
3110 E1kLog(("%s e1kRegReadICR: Suppressing auto-clear due to disabled interrupts (%08x)\n", pThis->szPrf, ICR));
3111 }
3112 }
3113 *pu32Value = value;
3114 }
3115 e1kCsLeave(pThis);
3116
3117 return rc;
3118}
3119
3120/**
3121 * Write handler for Interrupt Cause Set register.
3122 *
3123 * Bits corresponding to 1s in 'value' will be set in ICR register.
3124 *
3125 * @param pThis The device state structure.
3126 * @param offset Register offset in memory-mapped frame.
3127 * @param index Register index in register array.
3128 * @param value The value to store.
3129 * @param mask Used to implement partial writes (8 and 16-bit).
3130 * @thread EMT
3131 */
3132static int e1kRegWriteICS(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3133{
3134 RT_NOREF_PV(offset); RT_NOREF_PV(index);
3135 E1K_INC_ISTAT_CNT(pThis->uStatIntICS);
3136 return e1kRaiseInterrupt(pDevIns, pThis, VINF_IOM_R3_MMIO_WRITE, value & g_aE1kRegMap[ICS_IDX].writable);
3137}
3138
3139/**
3140 * Write handler for Interrupt Mask Set register.
3141 *
3142 * Will trigger pending interrupts.
3143 *
3144 * @param pThis The device state structure.
3145 * @param offset Register offset in memory-mapped frame.
3146 * @param index Register index in register array.
3147 * @param value The value to store.
3148 * @param mask Used to implement partial writes (8 and 16-bit).
3149 * @thread EMT
3150 */
3151static int e1kRegWriteIMS(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3152{
3153 RT_NOREF_PV(offset); RT_NOREF_PV(index);
3154
3155 IMS |= value;
3156 E1kLogRel(("E1000: irq enabled, RDH=%x RDT=%x TDH=%x TDT=%x\n", RDH, RDT, TDH, TDT));
3157 E1kLog(("%s e1kRegWriteIMS: IRQ enabled\n", pThis->szPrf));
3158 /*
3159 * We cannot raise an interrupt here as it will occasionally cause an interrupt storm
3160 * in Windows guests (see @bugref{8624}, @bugref{5023}).
3161 */
3162 if ((ICR & IMS) && !pThis->fLocked)
3163 {
3164 E1K_INC_ISTAT_CNT(pThis->uStatIntIMS);
3165 e1kPostponeInterrupt(pDevIns, pThis, E1K_IMS_INT_DELAY_NS);
3166 }
3167
3168 return VINF_SUCCESS;
3169}
3170
3171/**
3172 * Write handler for Interrupt Mask Clear register.
3173 *
3174 * Bits corresponding to 1s in 'value' will be cleared in IMS register.
3175 *
3176 * @param pThis The device state structure.
3177 * @param offset Register offset in memory-mapped frame.
3178 * @param index Register index in register array.
3179 * @param value The value to store.
3180 * @param mask Used to implement partial writes (8 and 16-bit).
3181 * @thread EMT
3182 */
3183static int e1kRegWriteIMC(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3184{
3185 RT_NOREF_PV(offset); RT_NOREF_PV(index);
3186
3187 int rc = e1kCsEnter(pThis, VINF_IOM_R3_MMIO_WRITE);
3188 if (RT_UNLIKELY(rc != VINF_SUCCESS))
3189 return rc;
3190 if (pThis->fIntRaised)
3191 {
3192 /*
3193 * Technically we should reset fIntRaised in ICR read handler, but it will cause
3194 * Windows to freeze since it may receive an interrupt while still in the very beginning
3195 * of interrupt handler.
3196 */
3197 E1K_INC_ISTAT_CNT(pThis->uStatIntLower);
3198 STAM_COUNTER_INC(&pThis->StatIntsPrevented);
3199 E1kLogRel(("E1000: irq lowered (IMC), icr=0x%x\n", ICR));
3200 /* Lower(0) INTA(0) */
3201 PDMDevHlpPCISetIrq(pDevIns, 0, 0);
3202 pThis->fIntRaised = false;
3203 E1kLog(("%s e1kRegWriteIMC: Lowered IRQ: ICR=%08x\n", pThis->szPrf, ICR));
3204 }
3205 IMS &= ~value;
3206 E1kLog(("%s e1kRegWriteIMC: IRQ disabled\n", pThis->szPrf));
3207 e1kCsLeave(pThis);
3208
3209 return VINF_SUCCESS;
3210}
3211
3212/**
3213 * Write handler for Receive Control register.
3214 *
3215 * @param pThis The device state structure.
3216 * @param offset Register offset in memory-mapped frame.
3217 * @param index Register index in register array.
3218 * @param value The value to store.
3219 * @param mask Used to implement partial writes (8 and 16-bit).
3220 * @thread EMT
3221 */
3222static int e1kRegWriteRCTL(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3223{
3224 /* Update promiscuous mode */
3225 bool fBecomePromiscous = !!(value & (RCTL_UPE | RCTL_MPE));
3226 if (fBecomePromiscous != !!( RCTL & (RCTL_UPE | RCTL_MPE)))
3227 {
3228 /* Promiscuity has changed, pass the knowledge on. */
3229#ifndef IN_RING3
3230 return VINF_IOM_R3_MMIO_WRITE;
3231#else
3232 PE1KSTATECC pThisCC = PDMINS_2_DATA_CC(pDevIns, PE1KSTATECC);
3233 if (pThisCC->pDrvR3)
3234 pThisCC->pDrvR3->pfnSetPromiscuousMode(pThisCC->pDrvR3, fBecomePromiscous);
3235#endif
3236 }
3237
3238 /* Adjust receive buffer size */
3239 unsigned cbRxBuf = 2048 >> GET_BITS_V(value, RCTL, BSIZE);
3240 if (value & RCTL_BSEX)
3241 cbRxBuf *= 16;
3242 if (cbRxBuf > E1K_MAX_RX_PKT_SIZE)
3243 cbRxBuf = E1K_MAX_RX_PKT_SIZE;
3244 if (cbRxBuf != pThis->u16RxBSize)
3245 E1kLog2(("%s e1kRegWriteRCTL: Setting receive buffer size to %d (old %d)\n",
3246 pThis->szPrf, cbRxBuf, pThis->u16RxBSize));
3247 pThis->u16RxBSize = cbRxBuf;
3248
3249 /* Update the register */
3250 return e1kRegWriteDefault(pDevIns, pThis, offset, index, value);
3251}
3252
3253/**
3254 * Write handler for Packet Buffer Allocation register.
3255 *
3256 * TXA = 64 - RXA.
3257 *
3258 * @param pThis The device state structure.
3259 * @param offset Register offset in memory-mapped frame.
3260 * @param index Register index in register array.
3261 * @param value The value to store.
3262 * @param mask Used to implement partial writes (8 and 16-bit).
3263 * @thread EMT
3264 */
3265static int e1kRegWritePBA(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3266{
3267 e1kRegWriteDefault(pDevIns, pThis, offset, index, value);
3268 PBA_st->txa = 64 - PBA_st->rxa;
3269
3270 return VINF_SUCCESS;
3271}
3272
3273/**
3274 * Write handler for Receive Descriptor Tail register.
3275 *
3276 * @remarks Write into RDT forces switch to HC and signal to
3277 * e1kR3NetworkDown_WaitReceiveAvail().
3278 *
3279 * @returns VBox status code.
3280 *
3281 * @param pThis The device state structure.
3282 * @param offset Register offset in memory-mapped frame.
3283 * @param index Register index in register array.
3284 * @param value The value to store.
3285 * @param mask Used to implement partial writes (8 and 16-bit).
3286 * @thread EMT
3287 */
3288static int e1kRegWriteRDT(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3289{
3290#ifndef IN_RING3
3291 /* XXX */
3292// return VINF_IOM_R3_MMIO_WRITE;
3293#endif
3294 int rc = e1kCsRxEnter(pThis, VINF_IOM_R3_MMIO_WRITE);
3295 if (RT_LIKELY(rc == VINF_SUCCESS))
3296 {
3297 E1kLog(("%s e1kRegWriteRDT\n", pThis->szPrf));
3298#ifndef E1K_WITH_RXD_CACHE
3299 /*
3300 * Some drivers advance RDT too far, so that it equals RDH. This
3301 * somehow manages to work with real hardware but not with this
3302 * emulated device. We can work with these drivers if we just
3303 * write 1 less when we see a driver writing RDT equal to RDH,
3304 * see @bugref{7346}.
3305 */
3306 if (value == RDH)
3307 {
3308 if (RDH == 0)
3309 value = (RDLEN / sizeof(E1KRXDESC)) - 1;
3310 else
3311 value = RDH - 1;
3312 }
3313#endif /* !E1K_WITH_RXD_CACHE */
3314 rc = e1kRegWriteDefault(pDevIns, pThis, offset, index, value);
3315#ifdef E1K_WITH_RXD_CACHE
3316 /*
3317 * We need to fetch descriptors now as RDT may go whole circle
3318 * before we attempt to store a received packet. For example,
3319 * Intel's DOS drivers use 2 (!) RX descriptors with the total ring
3320 * size being only 8 descriptors! Note that we fetch descriptors
3321 * only when the cache is empty to reduce the number of memory reads
3322 * in case of frequent RDT writes. Don't fetch anything when the
3323 * receiver is disabled either as RDH, RDT, RDLEN can be in some
3324 * messed up state.
3325 * Note that despite the cache may seem empty, meaning that there are
3326 * no more available descriptors in it, it may still be used by RX
3327 * thread which has not yet written the last descriptor back but has
3328 * temporarily released the RX lock in order to write the packet body
3329 * to descriptor's buffer. At this point we still going to do prefetch
3330 * but it won't actually fetch anything if there are no unused slots in
3331 * our "empty" cache (nRxDFetched==E1K_RXD_CACHE_SIZE). We must not
3332 * reset the cache here even if it appears empty. It will be reset at
3333 * a later point in e1kRxDGet().
3334 */
3335 if (e1kRxDIsCacheEmpty(pThis) && (RCTL & RCTL_EN))
3336 e1kRxDPrefetch(pDevIns, pThis);
3337#endif /* E1K_WITH_RXD_CACHE */
3338 e1kCsRxLeave(pThis);
3339 if (RT_SUCCESS(rc))
3340 {
3341 /* Signal that we have more receive descriptors available. */
3342 e1kWakeupReceive(pDevIns, pThis);
3343 }
3344 }
3345 return rc;
3346}
3347
3348/**
3349 * Write handler for Receive Delay Timer register.
3350 *
3351 * @param pThis The device state structure.
3352 * @param offset Register offset in memory-mapped frame.
3353 * @param index Register index in register array.
3354 * @param value The value to store.
3355 * @param mask Used to implement partial writes (8 and 16-bit).
3356 * @thread EMT
3357 */
3358static int e1kRegWriteRDTR(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3359{
3360 e1kRegWriteDefault(pDevIns, pThis, offset, index, value);
3361 if (value & RDTR_FPD)
3362 {
3363 /* Flush requested, cancel both timers and raise interrupt */
3364#ifdef E1K_USE_RX_TIMERS
3365 e1kCancelTimer(pDevIns, pThis, pThis->hRIDTimer);
3366 e1kCancelTimer(pDevIns, pThis, pThis->hRADTimer);
3367#endif
3368 E1K_INC_ISTAT_CNT(pThis->uStatIntRDTR);
3369 return e1kRaiseInterrupt(pDevIns, pThis, VINF_IOM_R3_MMIO_WRITE, ICR_RXT0);
3370 }
3371
3372 return VINF_SUCCESS;
3373}
3374
3375DECLINLINE(uint32_t) e1kGetTxLen(PE1KSTATE pThis)
3376{
3377 /**
3378 * Make sure TDT won't change during computation. EMT may modify TDT at
3379 * any moment.
3380 */
3381 uint32_t tdt = TDT;
3382 return (TDH>tdt ? TDLEN/sizeof(E1KTXDESC) : 0) + tdt - TDH;
3383}
3384
3385#ifdef IN_RING3
3386
3387# ifdef E1K_TX_DELAY
3388/**
3389 * Transmit Delay Timer handler.
3390 *
3391 * @remarks We only get here when the timer expires.
3392 *
3393 * @param pDevIns Pointer to device instance structure.
3394 * @param pTimer Pointer to the timer.
3395 * @param pvUser NULL.
3396 * @thread EMT
3397 */
3398static DECLCALLBACK(void) e1kR3TxDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3399{
3400 PE1KSTATE pThis = (PE1KSTATE)pvUser;
3401 Assert(PDMCritSectIsOwner(&pThis->csTx));
3402
3403 E1K_INC_ISTAT_CNT(pThis->uStatTxDelayExp);
3404# ifdef E1K_INT_STATS
3405 uint64_t u64Elapsed = RTTimeNanoTS() - pThis->u64ArmedAt;
3406 if (u64Elapsed > pThis->uStatMaxTxDelay)
3407 pThis->uStatMaxTxDelay = u64Elapsed;
3408# endif
3409 int rc = e1kXmitPending(pDevIns, pThis, false /*fOnWorkerThread*/);
3410 AssertMsg(RT_SUCCESS(rc) || rc == VERR_TRY_AGAIN, ("%Rrc\n", rc));
3411}
3412# endif /* E1K_TX_DELAY */
3413
3414//# ifdef E1K_USE_TX_TIMERS
3415
3416/**
3417 * Transmit Interrupt Delay Timer handler.
3418 *
3419 * @remarks We only get here when the timer expires.
3420 *
3421 * @param pDevIns Pointer to device instance structure.
3422 * @param pTimer Pointer to the timer.
3423 * @param pvUser NULL.
3424 * @thread EMT
3425 */
3426static DECLCALLBACK(void) e1kR3TxIntDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3427{
3428 RT_NOREF(pDevIns);
3429 RT_NOREF(pTimer);
3430 PE1KSTATE pThis = (PE1KSTATE)pvUser;
3431
3432 E1K_INC_ISTAT_CNT(pThis->uStatTID);
3433 /* Cancel absolute delay timer as we have already got attention */
3434# ifndef E1K_NO_TAD
3435 e1kCancelTimer(pDevIns, pThis, pThis->hTADTimer);
3436# endif
3437 e1kRaiseInterrupt(pDevIns, pThis, ICR_TXDW);
3438}
3439
3440/**
3441 * Transmit Absolute Delay Timer handler.
3442 *
3443 * @remarks We only get here when the timer expires.
3444 *
3445 * @param pDevIns Pointer to device instance structure.
3446 * @param pTimer Pointer to the timer.
3447 * @param pvUser NULL.
3448 * @thread EMT
3449 */
3450static DECLCALLBACK(void) e1kR3TxAbsDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3451{
3452 RT_NOREF(pDevIns);
3453 RT_NOREF(pTimer);
3454 PE1KSTATE pThis = (PE1KSTATE)pvUser;
3455
3456 E1K_INC_ISTAT_CNT(pThis->uStatTAD);
3457 /* Cancel interrupt delay timer as we have already got attention */
3458 e1kCancelTimer(pDevIns, pThis, pThis->hTIDTimer);
3459 e1kRaiseInterrupt(pDevIns, pThis, ICR_TXDW);
3460}
3461
3462//# endif /* E1K_USE_TX_TIMERS */
3463# ifdef E1K_USE_RX_TIMERS
3464
3465/**
3466 * Receive Interrupt Delay Timer handler.
3467 *
3468 * @remarks We only get here when the timer expires.
3469 *
3470 * @param pDevIns Pointer to device instance structure.
3471 * @param pTimer Pointer to the timer.
3472 * @param pvUser NULL.
3473 * @thread EMT
3474 */
3475static DECLCALLBACK(void) e1kR3RxIntDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3476{
3477 PE1KSTATE pThis = (PE1KSTATE)pvUser;
3478
3479 E1K_INC_ISTAT_CNT(pThis->uStatRID);
3480 /* Cancel absolute delay timer as we have already got attention */
3481 e1kCancelTimer(pDevIns, pThis, pThis->hRADTimer);
3482 e1kRaiseInterrupt(pDevIns, pThis, ICR_RXT0);
3483}
3484
3485/**
3486 * Receive Absolute Delay Timer handler.
3487 *
3488 * @remarks We only get here when the timer expires.
3489 *
3490 * @param pDevIns Pointer to device instance structure.
3491 * @param pTimer Pointer to the timer.
3492 * @param pvUser NULL.
3493 * @thread EMT
3494 */
3495static DECLCALLBACK(void) e1kR3RxAbsDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3496{
3497 PE1KSTATE pThis = (PE1KSTATE)pvUser;
3498
3499 E1K_INC_ISTAT_CNT(pThis->uStatRAD);
3500 /* Cancel interrupt delay timer as we have already got attention */
3501 e1kCancelTimer(pDevIns, pThis, pThis->hRIDTimer);
3502 e1kRaiseInterrupt(pDevIns, pThis, ICR_RXT0);
3503}
3504
3505# endif /* E1K_USE_RX_TIMERS */
3506
3507/**
3508 * Late Interrupt Timer handler.
3509 *
3510 * @param pDevIns Pointer to device instance structure.
3511 * @param pTimer Pointer to the timer.
3512 * @param pvUser NULL.
3513 * @thread EMT
3514 */
3515static DECLCALLBACK(void) e1kR3LateIntTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3516{
3517 RT_NOREF(pDevIns, pTimer);
3518 PE1KSTATE pThis = (PE1KSTATE)pvUser;
3519
3520 STAM_PROFILE_ADV_START(&pThis->StatLateIntTimer, a);
3521 STAM_COUNTER_INC(&pThis->StatLateInts);
3522 E1K_INC_ISTAT_CNT(pThis->uStatIntLate);
3523# if 0
3524 if (pThis->iStatIntLost > -100)
3525 pThis->iStatIntLost--;
3526# endif
3527 e1kRaiseInterrupt(pDevIns, pThis, VERR_SEM_BUSY, 0);
3528 STAM_PROFILE_ADV_STOP(&pThis->StatLateIntTimer, a);
3529}
3530
3531/**
3532 * Link Up Timer handler.
3533 *
3534 * @param pDevIns Pointer to device instance structure.
3535 * @param pTimer Pointer to the timer.
3536 * @param pvUser NULL.
3537 * @thread EMT
3538 */
3539static DECLCALLBACK(void) e1kR3LinkUpTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3540{
3541 RT_NOREF(pTimer);
3542 PE1KSTATE pThis = (PE1KSTATE)pvUser;
3543 PE1KSTATECC pThisCC = PDMINS_2_DATA_CC(pDevIns, PE1KSTATECC);
3544
3545 /*
3546 * This can happen if we set the link status to down when the Link up timer was
3547 * already armed (shortly after e1kLoadDone() or when the cable was disconnected
3548 * and connect+disconnect the cable very quick. Moreover, 82543GC triggers LSC
3549 * on reset even if the cable is unplugged (see @bugref{8942}).
3550 */
3551 if (pThis->fCableConnected)
3552 {
3553 /* 82543GC does not have an internal PHY */
3554 if (pThis->eChip == E1K_CHIP_82543GC || (CTRL & CTRL_SLU))
3555 e1kR3LinkUp(pDevIns, pThis, pThisCC);
3556 }
3557# ifdef E1K_LSC_ON_RESET
3558 else if (pThis->eChip == E1K_CHIP_82543GC)
3559 e1kR3LinkDown(pDevIns, pThis, pThisCC);
3560# endif /* E1K_LSC_ON_RESET */
3561}
3562
3563#endif /* IN_RING3 */
3564
3565/**
3566 * Sets up the GSO context according to the TSE new context descriptor.
3567 *
3568 * @param pGso The GSO context to setup.
3569 * @param pCtx The context descriptor.
3570 */
3571DECLINLINE(void) e1kSetupGsoCtx(PPDMNETWORKGSO pGso, E1KTXCTX const *pCtx)
3572{
3573 pGso->u8Type = PDMNETWORKGSOTYPE_INVALID;
3574
3575 /*
3576 * See if the context descriptor describes something that could be TCP or
3577 * UDP over IPv[46].
3578 */
3579 /* Check the header ordering and spacing: 1. Ethernet, 2. IP, 3. TCP/UDP. */
3580 if (RT_UNLIKELY( pCtx->ip.u8CSS < sizeof(RTNETETHERHDR) ))
3581 {
3582 E1kLog(("e1kSetupGsoCtx: IPCSS=%#x\n", pCtx->ip.u8CSS));
3583 return;
3584 }
3585 if (RT_UNLIKELY( pCtx->tu.u8CSS < (size_t)pCtx->ip.u8CSS + (pCtx->dw2.fIP ? RTNETIPV4_MIN_LEN : RTNETIPV6_MIN_LEN) ))
3586 {
3587 E1kLog(("e1kSetupGsoCtx: TUCSS=%#x\n", pCtx->tu.u8CSS));
3588 return;
3589 }
3590 if (RT_UNLIKELY( pCtx->dw2.fTCP
3591 ? pCtx->dw3.u8HDRLEN < (size_t)pCtx->tu.u8CSS + RTNETTCP_MIN_LEN
3592 : pCtx->dw3.u8HDRLEN != (size_t)pCtx->tu.u8CSS + RTNETUDP_MIN_LEN ))
3593 {
3594 E1kLog(("e1kSetupGsoCtx: HDRLEN=%#x TCP=%d\n", pCtx->dw3.u8HDRLEN, pCtx->dw2.fTCP));
3595 return;
3596 }
3597
3598 /* The end of the TCP/UDP checksum should stop at the end of the packet or at least after the headers. */
3599 if (RT_UNLIKELY( pCtx->tu.u16CSE > 0 && pCtx->tu.u16CSE <= pCtx->dw3.u8HDRLEN ))
3600 {
3601 E1kLog(("e1kSetupGsoCtx: TUCSE=%#x HDRLEN=%#x\n", pCtx->tu.u16CSE, pCtx->dw3.u8HDRLEN));
3602 return;
3603 }
3604
3605 /* IPv4 checksum offset. */
3606 if (RT_UNLIKELY( pCtx->dw2.fIP && (size_t)pCtx->ip.u8CSO - pCtx->ip.u8CSS != RT_UOFFSETOF(RTNETIPV4, ip_sum) ))
3607 {
3608 E1kLog(("e1kSetupGsoCtx: IPCSO=%#x IPCSS=%#x\n", pCtx->ip.u8CSO, pCtx->ip.u8CSS));
3609 return;
3610 }
3611
3612 /* TCP/UDP checksum offsets. */
3613 if (RT_UNLIKELY( (size_t)pCtx->tu.u8CSO - pCtx->tu.u8CSS
3614 != ( pCtx->dw2.fTCP
3615 ? RT_UOFFSETOF(RTNETTCP, th_sum)
3616 : RT_UOFFSETOF(RTNETUDP, uh_sum) ) ))
3617 {
3618 E1kLog(("e1kSetupGsoCtx: TUCSO=%#x TUCSS=%#x TCP=%d\n", pCtx->ip.u8CSO, pCtx->ip.u8CSS, pCtx->dw2.fTCP));
3619 return;
3620 }
3621
3622 /*
3623 * Because of internal networking using a 16-bit size field for GSO context
3624 * plus frame, we have to make sure we don't exceed this.
3625 */
3626 if (RT_UNLIKELY( pCtx->dw3.u8HDRLEN + pCtx->dw2.u20PAYLEN > VBOX_MAX_GSO_SIZE ))
3627 {
3628 E1kLog(("e1kSetupGsoCtx: HDRLEN(=%#x) + PAYLEN(=%#x) = %#x, max is %#x\n",
3629 pCtx->dw3.u8HDRLEN, pCtx->dw2.u20PAYLEN, pCtx->dw3.u8HDRLEN + pCtx->dw2.u20PAYLEN, VBOX_MAX_GSO_SIZE));
3630 return;
3631 }
3632
3633 /*
3634 * We're good for now - we'll do more checks when seeing the data.
3635 * So, figure the type of offloading and setup the context.
3636 */
3637 if (pCtx->dw2.fIP)
3638 {
3639 if (pCtx->dw2.fTCP)
3640 {
3641 pGso->u8Type = PDMNETWORKGSOTYPE_IPV4_TCP;
3642 pGso->cbHdrsSeg = pCtx->dw3.u8HDRLEN;
3643 }
3644 else
3645 {
3646 pGso->u8Type = PDMNETWORKGSOTYPE_IPV4_UDP;
3647 pGso->cbHdrsSeg = pCtx->tu.u8CSS; /* IP header only */
3648 }
3649 /** @todo Detect IPv4-IPv6 tunneling (need test setup since linux doesn't do
3650 * this yet it seems)... */
3651 }
3652 else
3653 {
3654 pGso->cbHdrsSeg = pCtx->dw3.u8HDRLEN; /** @todo IPv6 UFO */
3655 if (pCtx->dw2.fTCP)
3656 pGso->u8Type = PDMNETWORKGSOTYPE_IPV6_TCP;
3657 else
3658 pGso->u8Type = PDMNETWORKGSOTYPE_IPV6_UDP;
3659 }
3660 pGso->offHdr1 = pCtx->ip.u8CSS;
3661 pGso->offHdr2 = pCtx->tu.u8CSS;
3662 pGso->cbHdrsTotal = pCtx->dw3.u8HDRLEN;
3663 pGso->cbMaxSeg = pCtx->dw3.u16MSS;
3664 Assert(PDMNetGsoIsValid(pGso, sizeof(*pGso), pGso->cbMaxSeg * 5));
3665 E1kLog2(("e1kSetupGsoCtx: mss=%#x hdr=%#x hdrseg=%#x hdr1=%#x hdr2=%#x %s\n",
3666 pGso->cbMaxSeg, pGso->cbHdrsTotal, pGso->cbHdrsSeg, pGso->offHdr1, pGso->offHdr2, PDMNetGsoTypeName((PDMNETWORKGSOTYPE)pGso->u8Type) ));
3667}
3668
3669/**
3670 * Checks if we can use GSO processing for the current TSE frame.
3671 *
3672 * @param pThis The device state structure.
3673 * @param pGso The GSO context.
3674 * @param pData The first data descriptor of the frame.
3675 * @param pCtx The TSO context descriptor.
3676 */
3677DECLINLINE(bool) e1kCanDoGso(PE1KSTATE pThis, PCPDMNETWORKGSO pGso, E1KTXDAT const *pData, E1KTXCTX const *pCtx)
3678{
3679 if (!pData->cmd.fTSE)
3680 {
3681 E1kLog2(("e1kCanDoGso: !TSE\n"));
3682 return false;
3683 }
3684 if (pData->cmd.fVLE) /** @todo VLAN tagging. */
3685 {
3686 E1kLog(("e1kCanDoGso: VLE\n"));
3687 return false;
3688 }
3689 if (RT_UNLIKELY(!pThis->fGSOEnabled))
3690 {
3691 E1kLog3(("e1kCanDoGso: GSO disabled via CFGM\n"));
3692 return false;
3693 }
3694
3695 switch ((PDMNETWORKGSOTYPE)pGso->u8Type)
3696 {
3697 case PDMNETWORKGSOTYPE_IPV4_TCP:
3698 case PDMNETWORKGSOTYPE_IPV4_UDP:
3699 if (!pData->dw3.fIXSM)
3700 {
3701 E1kLog(("e1kCanDoGso: !IXSM (IPv4)\n"));
3702 return false;
3703 }
3704 if (!pData->dw3.fTXSM)
3705 {
3706 E1kLog(("e1kCanDoGso: !TXSM (IPv4)\n"));
3707 return false;
3708 }
3709 /** @todo what more check should we perform here? Ethernet frame type? */
3710 E1kLog2(("e1kCanDoGso: OK, IPv4\n"));
3711 return true;
3712
3713 case PDMNETWORKGSOTYPE_IPV6_TCP:
3714 case PDMNETWORKGSOTYPE_IPV6_UDP:
3715 if (pData->dw3.fIXSM && pCtx->ip.u8CSO)
3716 {
3717 E1kLog(("e1kCanDoGso: IXSM (IPv6)\n"));
3718 return false;
3719 }
3720 if (!pData->dw3.fTXSM)
3721 {
3722 E1kLog(("e1kCanDoGso: TXSM (IPv6)\n"));
3723 return false;
3724 }
3725 /** @todo what more check should we perform here? Ethernet frame type? */
3726 E1kLog2(("e1kCanDoGso: OK, IPv4\n"));
3727 return true;
3728
3729 default:
3730 Assert(pGso->u8Type == PDMNETWORKGSOTYPE_INVALID);
3731 E1kLog2(("e1kCanDoGso: e1kSetupGsoCtx failed\n"));
3732 return false;
3733 }
3734}
3735
3736/**
3737 * Frees the current xmit buffer.
3738 *
3739 * @param pThis The device state structure.
3740 */
3741static void e1kXmitFreeBuf(PE1KSTATE pThis, PE1KSTATECC pThisCC)
3742{
3743 PPDMSCATTERGATHER pSg = pThisCC->CTX_SUFF(pTxSg);
3744 if (pSg)
3745 {
3746 pThisCC->CTX_SUFF(pTxSg) = NULL;
3747
3748 if (pSg->pvAllocator != pThis)
3749 {
3750 PPDMINETWORKUP pDrv = pThisCC->CTX_SUFF(pDrv);
3751 if (pDrv)
3752 pDrv->pfnFreeBuf(pDrv, pSg);
3753 }
3754 else
3755 {
3756 /* loopback */
3757 AssertCompileMemberSize(E1KSTATE, uTxFallback.Sg, 8 * sizeof(size_t));
3758 Assert(pSg->fFlags == (PDMSCATTERGATHER_FLAGS_MAGIC | PDMSCATTERGATHER_FLAGS_OWNER_3));
3759 pSg->fFlags = 0;
3760 pSg->pvAllocator = NULL;
3761 }
3762 }
3763}
3764
3765#ifndef E1K_WITH_TXD_CACHE
3766/**
3767 * Allocates an xmit buffer.
3768 *
3769 * @returns See PDMINETWORKUP::pfnAllocBuf.
3770 * @param pThis The device state structure.
3771 * @param cbMin The minimum frame size.
3772 * @param fExactSize Whether cbMin is exact or if we have to max it
3773 * out to the max MTU size.
3774 * @param fGso Whether this is a GSO frame or not.
3775 */
3776DECLINLINE(int) e1kXmitAllocBuf(PE1KSTATE pThis, PE1KSTATECC pThisCC, size_t cbMin, bool fExactSize, bool fGso)
3777{
3778 /* Adjust cbMin if necessary. */
3779 if (!fExactSize)
3780 cbMin = RT_MAX(cbMin, E1K_MAX_TX_PKT_SIZE);
3781
3782 /* Deal with existing buffer (descriptor screw up, reset, etc). */
3783 if (RT_UNLIKELY(pThisCC->CTX_SUFF(pTxSg)))
3784 e1kXmitFreeBuf(pThis, pThisCC);
3785 Assert(pThisCC->CTX_SUFF(pTxSg) == NULL);
3786
3787 /*
3788 * Allocate the buffer.
3789 */
3790 PPDMSCATTERGATHER pSg;
3791 if (RT_LIKELY(GET_BITS(RCTL, LBM) != RCTL_LBM_TCVR))
3792 {
3793 PPDMINETWORKUP pDrv = pThisCC->CTX_SUFF(pDrv);
3794 if (RT_UNLIKELY(!pDrv))
3795 return VERR_NET_DOWN;
3796 int rc = pDrv->pfnAllocBuf(pDrv, cbMin, fGso ? &pThis->GsoCtx : NULL, &pSg);
3797 if (RT_FAILURE(rc))
3798 {
3799 /* Suspend TX as we are out of buffers atm */
3800 STATUS |= STATUS_TXOFF;
3801 return rc;
3802 }
3803 }
3804 else
3805 {
3806 /* Create a loopback using the fallback buffer and preallocated SG. */
3807 AssertCompileMemberSize(E1KSTATE, uTxFallback.Sg, 8 * sizeof(size_t));
3808 pSg = &pThis->uTxFallback.Sg;
3809 pSg->fFlags = PDMSCATTERGATHER_FLAGS_MAGIC | PDMSCATTERGATHER_FLAGS_OWNER_3;
3810 pSg->cbUsed = 0;
3811 pSg->cbAvailable = 0;
3812 pSg->pvAllocator = pThis;
3813 pSg->pvUser = NULL; /* No GSO here. */
3814 pSg->cSegs = 1;
3815 pSg->aSegs[0].pvSeg = pThis->aTxPacketFallback;
3816 pSg->aSegs[0].cbSeg = sizeof(pThis->aTxPacketFallback);
3817 }
3818
3819 pThisCC->CTX_SUFF(pTxSg) = pSg;
3820 return VINF_SUCCESS;
3821}
3822#else /* E1K_WITH_TXD_CACHE */
3823/**
3824 * Allocates an xmit buffer.
3825 *
3826 * @returns See PDMINETWORKUP::pfnAllocBuf.
3827 * @param pThis The device state structure.
3828 * @param cbMin The minimum frame size.
3829 * @param fExactSize Whether cbMin is exact or if we have to max it
3830 * out to the max MTU size.
3831 * @param fGso Whether this is a GSO frame or not.
3832 */
3833DECLINLINE(int) e1kXmitAllocBuf(PE1KSTATE pThis, PE1KSTATECC pThisCC, bool fGso)
3834{
3835 /* Deal with existing buffer (descriptor screw up, reset, etc). */
3836 if (RT_UNLIKELY(pThisCC->CTX_SUFF(pTxSg)))
3837 e1kXmitFreeBuf(pThis, pThisCC);
3838 Assert(pThisCC->CTX_SUFF(pTxSg) == NULL);
3839
3840 /*
3841 * Allocate the buffer.
3842 */
3843 PPDMSCATTERGATHER pSg;
3844 if (RT_LIKELY(GET_BITS(RCTL, LBM) != RCTL_LBM_TCVR))
3845 {
3846 if (pThis->cbTxAlloc == 0)
3847 {
3848 /* Zero packet, no need for the buffer */
3849 return VINF_SUCCESS;
3850 }
3851
3852 PPDMINETWORKUP pDrv = pThisCC->CTX_SUFF(pDrv);
3853 if (RT_UNLIKELY(!pDrv))
3854 return VERR_NET_DOWN;
3855 int rc = pDrv->pfnAllocBuf(pDrv, pThis->cbTxAlloc, fGso ? &pThis->GsoCtx : NULL, &pSg);
3856 if (RT_FAILURE(rc))
3857 {
3858 /* Suspend TX as we are out of buffers atm */
3859 STATUS |= STATUS_TXOFF;
3860 return rc;
3861 }
3862 E1kLog3(("%s Allocated buffer for TX packet: cb=%u %s%s\n",
3863 pThis->szPrf, pThis->cbTxAlloc,
3864 pThis->fVTag ? "VLAN " : "",
3865 pThis->fGSO ? "GSO " : ""));
3866 }
3867 else
3868 {
3869 /* Create a loopback using the fallback buffer and preallocated SG. */
3870 AssertCompileMemberSize(E1KSTATE, uTxFallback.Sg, 8 * sizeof(size_t));
3871 pSg = &pThis->uTxFallback.Sg;
3872 pSg->fFlags = PDMSCATTERGATHER_FLAGS_MAGIC | PDMSCATTERGATHER_FLAGS_OWNER_3;
3873 pSg->cbUsed = 0;
3874 pSg->cbAvailable = sizeof(pThis->aTxPacketFallback);
3875 pSg->pvAllocator = pThis;
3876 pSg->pvUser = NULL; /* No GSO here. */
3877 pSg->cSegs = 1;
3878 pSg->aSegs[0].pvSeg = pThis->aTxPacketFallback;
3879 pSg->aSegs[0].cbSeg = sizeof(pThis->aTxPacketFallback);
3880 }
3881 pThis->cbTxAlloc = 0;
3882
3883 pThisCC->CTX_SUFF(pTxSg) = pSg;
3884 return VINF_SUCCESS;
3885}
3886#endif /* E1K_WITH_TXD_CACHE */
3887
3888/**
3889 * Checks if it's a GSO buffer or not.
3890 *
3891 * @returns true / false.
3892 * @param pTxSg The scatter / gather buffer.
3893 */
3894DECLINLINE(bool) e1kXmitIsGsoBuf(PDMSCATTERGATHER const *pTxSg)
3895{
3896#if 0
3897 if (!pTxSg)
3898 E1kLog(("e1kXmitIsGsoBuf: pTxSG is NULL\n"));
3899 if (pTxSg && pTxSg->pvUser)
3900 E1kLog(("e1kXmitIsGsoBuf: pvUser is NULL\n"));
3901#endif
3902 return pTxSg && pTxSg->pvUser /* GSO indicator */;
3903}
3904
3905#ifndef E1K_WITH_TXD_CACHE
3906/**
3907 * Load transmit descriptor from guest memory.
3908 *
3909 * @param pDevIns The device instance.
3910 * @param pDesc Pointer to descriptor union.
3911 * @param addr Physical address in guest context.
3912 * @thread E1000_TX
3913 */
3914DECLINLINE(void) e1kLoadDesc(PPDMDEVINS pDevIns, E1KTXDESC *pDesc, RTGCPHYS addr)
3915{
3916 PDMDevHlpPhysRead(pDevIns, addr, pDesc, sizeof(E1KTXDESC));
3917}
3918#else /* E1K_WITH_TXD_CACHE */
3919/**
3920 * Load transmit descriptors from guest memory.
3921 *
3922 * We need two physical reads in case the tail wrapped around the end of TX
3923 * descriptor ring.
3924 *
3925 * @returns the actual number of descriptors fetched.
3926 * @param pDevIns The device instance.
3927 * @param pThis The device state structure.
3928 * @thread E1000_TX
3929 */
3930DECLINLINE(unsigned) e1kTxDLoadMore(PPDMDEVINS pDevIns, PE1KSTATE pThis)
3931{
3932 Assert(pThis->iTxDCurrent == 0);
3933 /* We've already loaded pThis->nTxDFetched descriptors past TDH. */
3934 unsigned nDescsAvailable = e1kGetTxLen(pThis) - pThis->nTxDFetched;
3935 unsigned nDescsToFetch = RT_MIN(nDescsAvailable, E1K_TXD_CACHE_SIZE - pThis->nTxDFetched);
3936 unsigned nDescsTotal = TDLEN / sizeof(E1KTXDESC);
3937 unsigned nFirstNotLoaded = (TDH + pThis->nTxDFetched) % nDescsTotal;
3938 unsigned nDescsInSingleRead = RT_MIN(nDescsToFetch, nDescsTotal - nFirstNotLoaded);
3939 E1kLog3(("%s e1kTxDLoadMore: nDescsAvailable=%u nDescsToFetch=%u nDescsTotal=%u nFirstNotLoaded=0x%x nDescsInSingleRead=%u\n",
3940 pThis->szPrf, nDescsAvailable, nDescsToFetch, nDescsTotal,
3941 nFirstNotLoaded, nDescsInSingleRead));
3942 if (nDescsToFetch == 0)
3943 return 0;
3944 E1KTXDESC* pFirstEmptyDesc = &pThis->aTxDescriptors[pThis->nTxDFetched];
3945 PDMDevHlpPhysRead(pDevIns,
3946 ((uint64_t)TDBAH << 32) + TDBAL + nFirstNotLoaded * sizeof(E1KTXDESC),
3947 pFirstEmptyDesc, nDescsInSingleRead * sizeof(E1KTXDESC));
3948 E1kLog3(("%s Fetched %u TX descriptors at %08x%08x(0x%x), TDLEN=%08x, TDH=%08x, TDT=%08x\n",
3949 pThis->szPrf, nDescsInSingleRead,
3950 TDBAH, TDBAL + TDH * sizeof(E1KTXDESC),
3951 nFirstNotLoaded, TDLEN, TDH, TDT));
3952 if (nDescsToFetch > nDescsInSingleRead)
3953 {
3954 PDMDevHlpPhysRead(pDevIns,
3955 ((uint64_t)TDBAH << 32) + TDBAL,
3956 pFirstEmptyDesc + nDescsInSingleRead,
3957 (nDescsToFetch - nDescsInSingleRead) * sizeof(E1KTXDESC));
3958 E1kLog3(("%s Fetched %u TX descriptors at %08x%08x\n",
3959 pThis->szPrf, nDescsToFetch - nDescsInSingleRead,
3960 TDBAH, TDBAL));
3961 }
3962 pThis->nTxDFetched += nDescsToFetch;
3963 return nDescsToFetch;
3964}
3965
3966/**
3967 * Load transmit descriptors from guest memory only if there are no loaded
3968 * descriptors.
3969 *
3970 * @returns true if there are descriptors in cache.
3971 * @param pDevIns The device instance.
3972 * @param pThis The device state structure.
3973 * @thread E1000_TX
3974 */
3975DECLINLINE(bool) e1kTxDLazyLoad(PPDMDEVINS pDevIns, PE1KSTATE pThis)
3976{
3977 if (pThis->nTxDFetched == 0)
3978 return e1kTxDLoadMore(pDevIns, pThis) != 0;
3979 return true;
3980}
3981#endif /* E1K_WITH_TXD_CACHE */
3982
3983/**
3984 * Write back transmit descriptor to guest memory.
3985 *
3986 * @param pDevIns The device instance.
3987 * @param pThis The device state structure.
3988 * @param pDesc Pointer to descriptor union.
3989 * @param addr Physical address in guest context.
3990 * @thread E1000_TX
3991 */
3992DECLINLINE(void) e1kWriteBackDesc(PPDMDEVINS pDevIns, PE1KSTATE pThis, E1KTXDESC *pDesc, RTGCPHYS addr)
3993{
3994 /* Only the last half of the descriptor has to be written back. */
3995 e1kPrintTDesc(pThis, pDesc, "^^^");
3996 PDMDevHlpPCIPhysWrite(pDevIns, addr, pDesc, sizeof(E1KTXDESC));
3997}
3998
3999/**
4000 * Transmit complete frame.
4001 *
4002 * @remarks We skip the FCS since we're not responsible for sending anything to
4003 * a real ethernet wire.
4004 *
4005 * @param pDevIns The device instance.
4006 * @param pThis The device state structure.
4007 * @param pThisCC The current context instance data.
4008 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4009 * @thread E1000_TX
4010 */
4011static void e1kTransmitFrame(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KSTATECC pThisCC, bool fOnWorkerThread)
4012{
4013 PPDMSCATTERGATHER pSg = pThisCC->CTX_SUFF(pTxSg);
4014 uint32_t cbFrame = pSg ? (uint32_t)pSg->cbUsed : 0;
4015 Assert(!pSg || pSg->cSegs == 1);
4016
4017 if (cbFrame > 70) /* unqualified guess */
4018 pThis->led.Asserted.s.fWriting = pThis->led.Actual.s.fWriting = 1;
4019
4020#ifdef E1K_INT_STATS
4021 if (cbFrame <= 1514)
4022 E1K_INC_ISTAT_CNT(pThis->uStatTx1514);
4023 else if (cbFrame <= 2962)
4024 E1K_INC_ISTAT_CNT(pThis->uStatTx2962);
4025 else if (cbFrame <= 4410)
4026 E1K_INC_ISTAT_CNT(pThis->uStatTx4410);
4027 else if (cbFrame <= 5858)
4028 E1K_INC_ISTAT_CNT(pThis->uStatTx5858);
4029 else if (cbFrame <= 7306)
4030 E1K_INC_ISTAT_CNT(pThis->uStatTx7306);
4031 else if (cbFrame <= 8754)
4032 E1K_INC_ISTAT_CNT(pThis->uStatTx8754);
4033 else if (cbFrame <= 16384)
4034 E1K_INC_ISTAT_CNT(pThis->uStatTx16384);
4035 else if (cbFrame <= 32768)
4036 E1K_INC_ISTAT_CNT(pThis->uStatTx32768);
4037 else
4038 E1K_INC_ISTAT_CNT(pThis->uStatTxLarge);
4039#endif /* E1K_INT_STATS */
4040
4041 /* Add VLAN tag */
4042 if (cbFrame > 12 && pThis->fVTag)
4043 {
4044 E1kLog3(("%s Inserting VLAN tag %08x\n",
4045 pThis->szPrf, RT_BE2H_U16(VET) | (RT_BE2H_U16(pThis->u16VTagTCI) << 16)));
4046 memmove((uint8_t*)pSg->aSegs[0].pvSeg + 16, (uint8_t*)pSg->aSegs[0].pvSeg + 12, cbFrame - 12);
4047 *((uint32_t*)pSg->aSegs[0].pvSeg + 3) = RT_BE2H_U16(VET) | (RT_BE2H_U16(pThis->u16VTagTCI) << 16);
4048 pSg->cbUsed += 4;
4049 cbFrame += 4;
4050 Assert(pSg->cbUsed == cbFrame);
4051 Assert(pSg->cbUsed <= pSg->cbAvailable);
4052 }
4053/* E1kLog2(("%s < < < Outgoing packet. Dump follows: > > >\n"
4054 "%.*Rhxd\n"
4055 "%s < < < < < < < < < < < < < End of dump > > > > > > > > > > > >\n",
4056 pThis->szPrf, cbFrame, pSg->aSegs[0].pvSeg, pThis->szPrf));*/
4057
4058 /* Update the stats */
4059 E1K_INC_CNT32(TPT);
4060 E1K_ADD_CNT64(TOTL, TOTH, cbFrame);
4061 E1K_INC_CNT32(GPTC);
4062 if (pSg && e1kIsBroadcast(pSg->aSegs[0].pvSeg))
4063 E1K_INC_CNT32(BPTC);
4064 else if (pSg && e1kIsMulticast(pSg->aSegs[0].pvSeg))
4065 E1K_INC_CNT32(MPTC);
4066 /* Update octet transmit counter */
4067 E1K_ADD_CNT64(GOTCL, GOTCH, cbFrame);
4068 if (pThisCC->CTX_SUFF(pDrv))
4069 STAM_REL_COUNTER_ADD(&pThis->StatTransmitBytes, cbFrame);
4070 if (cbFrame == 64)
4071 E1K_INC_CNT32(PTC64);
4072 else if (cbFrame < 128)
4073 E1K_INC_CNT32(PTC127);
4074 else if (cbFrame < 256)
4075 E1K_INC_CNT32(PTC255);
4076 else if (cbFrame < 512)
4077 E1K_INC_CNT32(PTC511);
4078 else if (cbFrame < 1024)
4079 E1K_INC_CNT32(PTC1023);
4080 else
4081 E1K_INC_CNT32(PTC1522);
4082
4083 E1K_INC_ISTAT_CNT(pThis->uStatTxFrm);
4084
4085 /*
4086 * Dump and send the packet.
4087 */
4088 int rc = VERR_NET_DOWN;
4089 if (pSg && pSg->pvAllocator != pThis)
4090 {
4091 e1kPacketDump(pThis, (uint8_t const *)pSg->aSegs[0].pvSeg, cbFrame, "--> Outgoing");
4092
4093 pThisCC->CTX_SUFF(pTxSg) = NULL;
4094 PPDMINETWORKUP pDrv = pThisCC->CTX_SUFF(pDrv);
4095 if (pDrv)
4096 {
4097 /* Release critical section to avoid deadlock in CanReceive */
4098 //e1kCsLeave(pThis);
4099 STAM_PROFILE_START(&pThis->CTX_SUFF_Z(StatTransmitSend), a);
4100 rc = pDrv->pfnSendBuf(pDrv, pSg, fOnWorkerThread);
4101 STAM_PROFILE_STOP(&pThis->CTX_SUFF_Z(StatTransmitSend), a);
4102 //e1kCsEnter(pThis, RT_SRC_POS);
4103 }
4104 }
4105 else if (pSg)
4106 {
4107 Assert(pSg->aSegs[0].pvSeg == pThis->aTxPacketFallback);
4108 e1kPacketDump(pThis, (uint8_t const *)pSg->aSegs[0].pvSeg, cbFrame, "--> Loopback");
4109
4110 /** @todo do we actually need to check that we're in loopback mode here? */
4111 if (GET_BITS(RCTL, LBM) == RCTL_LBM_TCVR)
4112 {
4113 E1KRXDST status;
4114 RT_ZERO(status);
4115 status.fPIF = true;
4116 e1kHandleRxPacket(pDevIns, pThis, pSg->aSegs[0].pvSeg, cbFrame, status);
4117 rc = VINF_SUCCESS;
4118 }
4119 e1kXmitFreeBuf(pThis, pThisCC);
4120 }
4121 else
4122 rc = VERR_NET_DOWN;
4123 if (RT_FAILURE(rc))
4124 {
4125 E1kLogRel(("E1000: ERROR! pfnSend returned %Rrc\n", rc));
4126 /** @todo handle VERR_NET_DOWN and VERR_NET_NO_BUFFER_SPACE. Signal error ? */
4127 }
4128
4129 pThis->led.Actual.s.fWriting = 0;
4130}
4131
4132/**
4133 * Compute and write internet checksum (e1kCSum16) at the specified offset.
4134 *
4135 * @param pThis The device state structure.
4136 * @param pPkt Pointer to the packet.
4137 * @param u16PktLen Total length of the packet.
4138 * @param cso Offset in packet to write checksum at.
4139 * @param css Offset in packet to start computing
4140 * checksum from.
4141 * @param cse Offset in packet to stop computing
4142 * checksum at.
4143 * @thread E1000_TX
4144 */
4145static void e1kInsertChecksum(PE1KSTATE pThis, uint8_t *pPkt, uint16_t u16PktLen, uint8_t cso, uint8_t css, uint16_t cse)
4146{
4147 RT_NOREF1(pThis);
4148
4149 if (css >= u16PktLen)
4150 {
4151 E1kLog2(("%s css(%X) is greater than packet length-1(%X), checksum is not inserted\n",
4152 pThis->szPrf, cso, u16PktLen));
4153 return;
4154 }
4155
4156 if (cso >= u16PktLen - 1)
4157 {
4158 E1kLog2(("%s cso(%X) is greater than packet length-2(%X), checksum is not inserted\n",
4159 pThis->szPrf, cso, u16PktLen));
4160 return;
4161 }
4162
4163 if (cse == 0)
4164 cse = u16PktLen - 1;
4165 else if (cse < css)
4166 {
4167 E1kLog2(("%s css(%X) is greater than cse(%X), checksum is not inserted\n",
4168 pThis->szPrf, css, cse));
4169 return;
4170 }
4171
4172 uint16_t u16ChkSum = e1kCSum16(pPkt + css, cse - css + 1);
4173 E1kLog2(("%s Inserting csum: %04X at %02X, old value: %04X\n", pThis->szPrf,
4174 u16ChkSum, cso, *(uint16_t*)(pPkt + cso)));
4175 *(uint16_t*)(pPkt + cso) = u16ChkSum;
4176}
4177
4178/**
4179 * Add a part of descriptor's buffer to transmit frame.
4180 *
4181 * @remarks data.u64BufAddr is used unconditionally for both data
4182 * and legacy descriptors since it is identical to
4183 * legacy.u64BufAddr.
4184 *
4185 * @param pDevIns The device instance.
4186 * @param pThis The device state structure.
4187 * @param pDesc Pointer to the descriptor to transmit.
4188 * @param u16Len Length of buffer to the end of segment.
4189 * @param fSend Force packet sending.
4190 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4191 * @thread E1000_TX
4192 */
4193#ifndef E1K_WITH_TXD_CACHE
4194static void e1kFallbackAddSegment(PPDMDEVINS pDevIns, PE1KSTATE pThis, RTGCPHYS PhysAddr, uint16_t u16Len, bool fSend, bool fOnWorkerThread)
4195{
4196 PE1KSTATECC pThisCC = PDMINS_2_DATA_CC(pDevIns, PE1KSTATECC);
4197 /* TCP header being transmitted */
4198 struct E1kTcpHeader *pTcpHdr = (struct E1kTcpHeader *)(pThis->aTxPacketFallback + pThis->contextTSE.tu.u8CSS);
4199 /* IP header being transmitted */
4200 struct E1kIpHeader *pIpHdr = (struct E1kIpHeader *)(pThis->aTxPacketFallback + pThis->contextTSE.ip.u8CSS);
4201
4202 E1kLog3(("%s e1kFallbackAddSegment: Length=%x, remaining payload=%x, header=%x, send=%RTbool\n",
4203 pThis->szPrf, u16Len, pThis->u32PayRemain, pThis->u16HdrRemain, fSend));
4204 Assert(pThis->u32PayRemain + pThis->u16HdrRemain > 0);
4205
4206 PDMDevHlpPhysRead(pDevIns, PhysAddr, pThis->aTxPacketFallback + pThis->u16TxPktLen, u16Len);
4207 E1kLog3(("%s Dump of the segment:\n"
4208 "%.*Rhxd\n"
4209 "%s --- End of dump ---\n",
4210 pThis->szPrf, u16Len, pThis->aTxPacketFallback + pThis->u16TxPktLen, pThis->szPrf));
4211 pThis->u16TxPktLen += u16Len;
4212 E1kLog3(("%s e1kFallbackAddSegment: pThis->u16TxPktLen=%x\n",
4213 pThis->szPrf, pThis->u16TxPktLen));
4214 if (pThis->u16HdrRemain > 0)
4215 {
4216 /* The header was not complete, check if it is now */
4217 if (u16Len >= pThis->u16HdrRemain)
4218 {
4219 /* The rest is payload */
4220 u16Len -= pThis->u16HdrRemain;
4221 pThis->u16HdrRemain = 0;
4222 /* Save partial checksum and flags */
4223 pThis->u32SavedCsum = pTcpHdr->chksum;
4224 pThis->u16SavedFlags = pTcpHdr->hdrlen_flags;
4225 /* Clear FIN and PSH flags now and set them only in the last segment */
4226 pTcpHdr->hdrlen_flags &= ~htons(E1K_TCP_FIN | E1K_TCP_PSH);
4227 }
4228 else
4229 {
4230 /* Still not */
4231 pThis->u16HdrRemain -= u16Len;
4232 E1kLog3(("%s e1kFallbackAddSegment: Header is still incomplete, 0x%x bytes remain.\n",
4233 pThis->szPrf, pThis->u16HdrRemain));
4234 return;
4235 }
4236 }
4237
4238 pThis->u32PayRemain -= u16Len;
4239
4240 if (fSend)
4241 {
4242 /* Leave ethernet header intact */
4243 /* IP Total Length = payload + headers - ethernet header */
4244 pIpHdr->total_len = htons(pThis->u16TxPktLen - pThis->contextTSE.ip.u8CSS);
4245 E1kLog3(("%s e1kFallbackAddSegment: End of packet, pIpHdr->total_len=%x\n",
4246 pThis->szPrf, ntohs(pIpHdr->total_len)));
4247 /* Update IP Checksum */
4248 pIpHdr->chksum = 0;
4249 e1kInsertChecksum(pThis, pThis->aTxPacketFallback, pThis->u16TxPktLen,
4250 pThis->contextTSE.ip.u8CSO,
4251 pThis->contextTSE.ip.u8CSS,
4252 pThis->contextTSE.ip.u16CSE);
4253
4254 /* Update TCP flags */
4255 /* Restore original FIN and PSH flags for the last segment */
4256 if (pThis->u32PayRemain == 0)
4257 {
4258 pTcpHdr->hdrlen_flags = pThis->u16SavedFlags;
4259 E1K_INC_CNT32(TSCTC);
4260 }
4261 /* Add TCP length to partial pseudo header sum */
4262 uint32_t csum = pThis->u32SavedCsum
4263 + htons(pThis->u16TxPktLen - pThis->contextTSE.tu.u8CSS);
4264 while (csum >> 16)
4265 csum = (csum >> 16) + (csum & 0xFFFF);
4266 pTcpHdr->chksum = csum;
4267 /* Compute final checksum */
4268 e1kInsertChecksum(pThis, pThis->aTxPacketFallback, pThis->u16TxPktLen,
4269 pThis->contextTSE.tu.u8CSO,
4270 pThis->contextTSE.tu.u8CSS,
4271 pThis->contextTSE.tu.u16CSE);
4272
4273 /*
4274 * Transmit it. If we've use the SG already, allocate a new one before
4275 * we copy of the data.
4276 */
4277 PPDMSCATTERGATHER pTxSg = pThisCC->CTX_SUFF(pTxSg);
4278 if (!pTxSg)
4279 {
4280 e1kXmitAllocBuf(pThis, pThisCC, pThis->u16TxPktLen + (pThis->fVTag ? 4 : 0), true /*fExactSize*/, false /*fGso*/);
4281 pTxSg = pThisCC->CTX_SUFF(pTxSg);
4282 }
4283 if (pTxSg)
4284 {
4285 Assert(pThis->u16TxPktLen <= pThisCC->CTX_SUFF(pTxSg)->cbAvailable);
4286 Assert(pTxSg->cSegs == 1);
4287 if (pThis->CCCTX_SUFF(pTxSg)->aSegs[0].pvSeg != pThis->aTxPacketFallback)
4288 memcpy(pTxSg->aSegs[0].pvSeg, pThis->aTxPacketFallback, pThis->u16TxPktLen);
4289 pTxSg->cbUsed = pThis->u16TxPktLen;
4290 pTxSg->aSegs[0].cbSeg = pThis->u16TxPktLen;
4291 }
4292 e1kTransmitFrame(pDevIns, pThis, pThisCC, fOnWorkerThread);
4293
4294 /* Update Sequence Number */
4295 pTcpHdr->seqno = htonl(ntohl(pTcpHdr->seqno) + pThis->u16TxPktLen
4296 - pThis->contextTSE.dw3.u8HDRLEN);
4297 /* Increment IP identification */
4298 pIpHdr->ident = htons(ntohs(pIpHdr->ident) + 1);
4299 }
4300}
4301#else /* E1K_WITH_TXD_CACHE */
4302static int e1kFallbackAddSegment(PPDMDEVINS pDevIns, PE1KSTATE pThis, RTGCPHYS PhysAddr, uint16_t u16Len, bool fSend, bool fOnWorkerThread)
4303{
4304 int rc = VINF_SUCCESS;
4305 PE1KSTATECC pThisCC = PDMINS_2_DATA_CC(pDevIns, PE1KSTATECC);
4306 /* TCP header being transmitted */
4307 struct E1kTcpHeader *pTcpHdr = (struct E1kTcpHeader *)(pThis->aTxPacketFallback + pThis->contextTSE.tu.u8CSS);
4308 /* IP header being transmitted */
4309 struct E1kIpHeader *pIpHdr = (struct E1kIpHeader *)(pThis->aTxPacketFallback + pThis->contextTSE.ip.u8CSS);
4310
4311 E1kLog3(("%s e1kFallbackAddSegment: Length=%x, remaining payload=%x, header=%x, send=%RTbool\n",
4312 pThis->szPrf, u16Len, pThis->u32PayRemain, pThis->u16HdrRemain, fSend));
4313 AssertReturn(pThis->u32PayRemain + pThis->u16HdrRemain > 0, VINF_SUCCESS);
4314
4315 if (pThis->u16TxPktLen + u16Len <= sizeof(pThis->aTxPacketFallback))
4316 PDMDevHlpPhysRead(pDevIns, PhysAddr, pThis->aTxPacketFallback + pThis->u16TxPktLen, u16Len);
4317 else
4318 E1kLog(("%s e1kFallbackAddSegment: writing beyond aTxPacketFallback, u16TxPktLen=%d(0x%x) + u16Len=%d(0x%x) > %d\n",
4319 pThis->szPrf, pThis->u16TxPktLen, pThis->u16TxPktLen, u16Len, u16Len, sizeof(pThis->aTxPacketFallback)));
4320 E1kLog3(("%s Dump of the segment:\n"
4321 "%.*Rhxd\n"
4322 "%s --- End of dump ---\n",
4323 pThis->szPrf, u16Len, pThis->aTxPacketFallback + pThis->u16TxPktLen, pThis->szPrf));
4324 pThis->u16TxPktLen += u16Len;
4325 E1kLog3(("%s e1kFallbackAddSegment: pThis->u16TxPktLen=%x\n",
4326 pThis->szPrf, pThis->u16TxPktLen));
4327 if (pThis->u16HdrRemain > 0)
4328 {
4329 /* The header was not complete, check if it is now */
4330 if (u16Len >= pThis->u16HdrRemain)
4331 {
4332 /* The rest is payload */
4333 u16Len -= pThis->u16HdrRemain;
4334 pThis->u16HdrRemain = 0;
4335 /* Save partial checksum and flags */
4336 pThis->u32SavedCsum = pTcpHdr->chksum;
4337 pThis->u16SavedFlags = pTcpHdr->hdrlen_flags;
4338 /* Clear FIN and PSH flags now and set them only in the last segment */
4339 pTcpHdr->hdrlen_flags &= ~htons(E1K_TCP_FIN | E1K_TCP_PSH);
4340 }
4341 else
4342 {
4343 /* Still not */
4344 pThis->u16HdrRemain -= u16Len;
4345 E1kLog3(("%s e1kFallbackAddSegment: Header is still incomplete, 0x%x bytes remain.\n",
4346 pThis->szPrf, pThis->u16HdrRemain));
4347 return rc;
4348 }
4349 }
4350
4351 if (u16Len > pThis->u32PayRemain)
4352 pThis->u32PayRemain = 0;
4353 else
4354 pThis->u32PayRemain -= u16Len;
4355
4356 if (fSend)
4357 {
4358 /* Leave ethernet header intact */
4359 /* IP Total Length = payload + headers - ethernet header */
4360 pIpHdr->total_len = htons(pThis->u16TxPktLen - pThis->contextTSE.ip.u8CSS);
4361 E1kLog3(("%s e1kFallbackAddSegment: End of packet, pIpHdr->total_len=%x\n",
4362 pThis->szPrf, ntohs(pIpHdr->total_len)));
4363 /* Update IP Checksum */
4364 pIpHdr->chksum = 0;
4365 e1kInsertChecksum(pThis, pThis->aTxPacketFallback, pThis->u16TxPktLen,
4366 pThis->contextTSE.ip.u8CSO,
4367 pThis->contextTSE.ip.u8CSS,
4368 pThis->contextTSE.ip.u16CSE);
4369
4370 /* Update TCP flags */
4371 /* Restore original FIN and PSH flags for the last segment */
4372 if (pThis->u32PayRemain == 0)
4373 {
4374 pTcpHdr->hdrlen_flags = pThis->u16SavedFlags;
4375 E1K_INC_CNT32(TSCTC);
4376 }
4377 /* Add TCP length to partial pseudo header sum */
4378 uint32_t csum = pThis->u32SavedCsum
4379 + htons(pThis->u16TxPktLen - pThis->contextTSE.tu.u8CSS);
4380 while (csum >> 16)
4381 csum = (csum >> 16) + (csum & 0xFFFF);
4382 pTcpHdr->chksum = csum;
4383 /* Compute final checksum */
4384 e1kInsertChecksum(pThis, pThis->aTxPacketFallback, pThis->u16TxPktLen,
4385 pThis->contextTSE.tu.u8CSO,
4386 pThis->contextTSE.tu.u8CSS,
4387 pThis->contextTSE.tu.u16CSE);
4388
4389 /*
4390 * Transmit it.
4391 */
4392 PPDMSCATTERGATHER pTxSg = pThisCC->CTX_SUFF(pTxSg);
4393 if (pTxSg)
4394 {
4395 /* Make sure the packet fits into the allocated buffer */
4396 size_t cbCopy = RT_MIN(pThis->u16TxPktLen, pThisCC->CTX_SUFF(pTxSg)->cbAvailable);
4397#ifdef DEBUG
4398 if (pThis->u16TxPktLen > pTxSg->cbAvailable)
4399 E1kLog(("%s e1kFallbackAddSegment: truncating packet, u16TxPktLen=%d(0x%x) > cbAvailable=%d(0x%x)\n",
4400 pThis->szPrf, pThis->u16TxPktLen, pThis->u16TxPktLen, pTxSg->cbAvailable, pTxSg->cbAvailable));
4401#endif /* DEBUG */
4402 Assert(pTxSg->cSegs == 1);
4403 if (pTxSg->aSegs[0].pvSeg != pThis->aTxPacketFallback)
4404 memcpy(pTxSg->aSegs[0].pvSeg, pThis->aTxPacketFallback, cbCopy);
4405 pTxSg->cbUsed = cbCopy;
4406 pTxSg->aSegs[0].cbSeg = cbCopy;
4407 }
4408 e1kTransmitFrame(pDevIns, pThis, pThisCC, fOnWorkerThread);
4409
4410 /* Update Sequence Number */
4411 pTcpHdr->seqno = htonl(ntohl(pTcpHdr->seqno) + pThis->u16TxPktLen
4412 - pThis->contextTSE.dw3.u8HDRLEN);
4413 /* Increment IP identification */
4414 pIpHdr->ident = htons(ntohs(pIpHdr->ident) + 1);
4415
4416 /* Allocate new buffer for the next segment. */
4417 if (pThis->u32PayRemain)
4418 {
4419 pThis->cbTxAlloc = RT_MIN(pThis->u32PayRemain,
4420 pThis->contextTSE.dw3.u16MSS)
4421 + pThis->contextTSE.dw3.u8HDRLEN
4422 + (pThis->fVTag ? 4 : 0);
4423 rc = e1kXmitAllocBuf(pThis, pThisCC, false /* fGSO */);
4424 }
4425 }
4426
4427 return rc;
4428}
4429#endif /* E1K_WITH_TXD_CACHE */
4430
4431#ifndef E1K_WITH_TXD_CACHE
4432/**
4433 * TCP segmentation offloading fallback: Add descriptor's buffer to transmit
4434 * frame.
4435 *
4436 * We construct the frame in the fallback buffer first and the copy it to the SG
4437 * buffer before passing it down to the network driver code.
4438 *
4439 * @returns true if the frame should be transmitted, false if not.
4440 *
4441 * @param pThis The device state structure.
4442 * @param pDesc Pointer to the descriptor to transmit.
4443 * @param cbFragment Length of descriptor's buffer.
4444 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4445 * @thread E1000_TX
4446 */
4447static bool e1kFallbackAddToFrame(PE1KSTATE pThis, E1KTXDESC *pDesc, uint32_t cbFragment, bool fOnWorkerThread)
4448{
4449 PPDMSCATTERGATHER pTxSg = pThisCC->CTX_SUFF(pTxSg);
4450 Assert(e1kGetDescType(pDesc) == E1K_DTYP_DATA);
4451 Assert(pDesc->data.cmd.fTSE);
4452 Assert(!e1kXmitIsGsoBuf(pTxSg));
4453
4454 uint16_t u16MaxPktLen = pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw3.u16MSS;
4455 Assert(u16MaxPktLen != 0);
4456 Assert(u16MaxPktLen < E1K_MAX_TX_PKT_SIZE);
4457
4458 /*
4459 * Carve out segments.
4460 */
4461 do
4462 {
4463 /* Calculate how many bytes we have left in this TCP segment */
4464 uint32_t cb = u16MaxPktLen - pThis->u16TxPktLen;
4465 if (cb > cbFragment)
4466 {
4467 /* This descriptor fits completely into current segment */
4468 cb = cbFragment;
4469 e1kFallbackAddSegment(pDevIns, pThis, pDesc->data.u64BufAddr, cb, pDesc->data.cmd.fEOP /*fSend*/, fOnWorkerThread);
4470 }
4471 else
4472 {
4473 e1kFallbackAddSegment(pDevIns, pThis, pDesc->data.u64BufAddr, cb, true /*fSend*/, fOnWorkerThread);
4474 /*
4475 * Rewind the packet tail pointer to the beginning of payload,
4476 * so we continue writing right beyond the header.
4477 */
4478 pThis->u16TxPktLen = pThis->contextTSE.dw3.u8HDRLEN;
4479 }
4480
4481 pDesc->data.u64BufAddr += cb;
4482 cbFragment -= cb;
4483 } while (cbFragment > 0);
4484
4485 if (pDesc->data.cmd.fEOP)
4486 {
4487 /* End of packet, next segment will contain header. */
4488 if (pThis->u32PayRemain != 0)
4489 E1K_INC_CNT32(TSCTFC);
4490 pThis->u16TxPktLen = 0;
4491 e1kXmitFreeBuf(pThis, PDMINS_2_DATA_CC(pDevIns, PE1KSTATECC));
4492 }
4493
4494 return false;
4495}
4496#else /* E1K_WITH_TXD_CACHE */
4497/**
4498 * TCP segmentation offloading fallback: Add descriptor's buffer to transmit
4499 * frame.
4500 *
4501 * We construct the frame in the fallback buffer first and the copy it to the SG
4502 * buffer before passing it down to the network driver code.
4503 *
4504 * @returns error code
4505 *
4506 * @param pDevIns The device instance.
4507 * @param pThis The device state structure.
4508 * @param pDesc Pointer to the descriptor to transmit.
4509 * @param cbFragment Length of descriptor's buffer.
4510 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4511 * @thread E1000_TX
4512 */
4513static int e1kFallbackAddToFrame(PPDMDEVINS pDevIns, PE1KSTATE pThis, E1KTXDESC *pDesc, bool fOnWorkerThread)
4514{
4515#ifdef VBOX_STRICT
4516 PPDMSCATTERGATHER pTxSg = PDMINS_2_DATA_CC(pDevIns, PE1KSTATECC)->CTX_SUFF(pTxSg);
4517 Assert(e1kGetDescType(pDesc) == E1K_DTYP_DATA);
4518 Assert(pDesc->data.cmd.fTSE);
4519 Assert(!e1kXmitIsGsoBuf(pTxSg));
4520#endif
4521
4522 uint16_t u16MaxPktLen = pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw3.u16MSS;
4523 /* We cannot produce empty packets, ignore all TX descriptors (see @bugref{9571}) */
4524 if (u16MaxPktLen == 0)
4525 return VINF_SUCCESS;
4526
4527 /*
4528 * Carve out segments.
4529 */
4530 int rc = VINF_SUCCESS;
4531 do
4532 {
4533 /* Calculate how many bytes we have left in this TCP segment */
4534 uint32_t cb = u16MaxPktLen - pThis->u16TxPktLen;
4535 if (cb > pDesc->data.cmd.u20DTALEN)
4536 {
4537 /* This descriptor fits completely into current segment */
4538 cb = pDesc->data.cmd.u20DTALEN;
4539 rc = e1kFallbackAddSegment(pDevIns, pThis, pDesc->data.u64BufAddr, cb, pDesc->data.cmd.fEOP /*fSend*/, fOnWorkerThread);
4540 }
4541 else
4542 {
4543 rc = e1kFallbackAddSegment(pDevIns, pThis, pDesc->data.u64BufAddr, cb, true /*fSend*/, fOnWorkerThread);
4544 /*
4545 * Rewind the packet tail pointer to the beginning of payload,
4546 * so we continue writing right beyond the header.
4547 */
4548 pThis->u16TxPktLen = pThis->contextTSE.dw3.u8HDRLEN;
4549 }
4550
4551 pDesc->data.u64BufAddr += cb;
4552 pDesc->data.cmd.u20DTALEN -= cb;
4553 } while (pDesc->data.cmd.u20DTALEN > 0 && RT_SUCCESS(rc));
4554
4555 if (pDesc->data.cmd.fEOP)
4556 {
4557 /* End of packet, next segment will contain header. */
4558 if (pThis->u32PayRemain != 0)
4559 E1K_INC_CNT32(TSCTFC);
4560 pThis->u16TxPktLen = 0;
4561 e1kXmitFreeBuf(pThis, PDMINS_2_DATA_CC(pDevIns, PE1KSTATECC));
4562 }
4563
4564 return VINF_SUCCESS; /// @todo consider rc;
4565}
4566#endif /* E1K_WITH_TXD_CACHE */
4567
4568
4569/**
4570 * Add descriptor's buffer to transmit frame.
4571 *
4572 * This deals with GSO and normal frames, e1kFallbackAddToFrame deals with the
4573 * TSE frames we cannot handle as GSO.
4574 *
4575 * @returns true on success, false on failure.
4576 *
4577 * @param pDevIns The device instance.
4578 * @param pThisCC The current context instance data.
4579 * @param pThis The device state structure.
4580 * @param PhysAddr The physical address of the descriptor buffer.
4581 * @param cbFragment Length of descriptor's buffer.
4582 * @thread E1000_TX
4583 */
4584static bool e1kAddToFrame(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KSTATECC pThisCC, RTGCPHYS PhysAddr, uint32_t cbFragment)
4585{
4586 PPDMSCATTERGATHER pTxSg = pThisCC->CTX_SUFF(pTxSg);
4587 bool const fGso = e1kXmitIsGsoBuf(pTxSg);
4588 uint32_t const cbNewPkt = cbFragment + pThis->u16TxPktLen;
4589
4590 LogFlow(("%s e1kAddToFrame: ENTER cbFragment=%d u16TxPktLen=%d cbUsed=%d cbAvailable=%d fGSO=%s\n",
4591 pThis->szPrf, cbFragment, pThis->u16TxPktLen, pTxSg->cbUsed, pTxSg->cbAvailable,
4592 fGso ? "true" : "false"));
4593 if (RT_UNLIKELY( !fGso && cbNewPkt > E1K_MAX_TX_PKT_SIZE ))
4594 {
4595 E1kLog(("%s Transmit packet is too large: %u > %u(max)\n", pThis->szPrf, cbNewPkt, E1K_MAX_TX_PKT_SIZE));
4596 return false;
4597 }
4598 if (RT_UNLIKELY( cbNewPkt > pTxSg->cbAvailable ))
4599 {
4600 E1kLog(("%s Transmit packet is too large: %u > %u(max)\n", pThis->szPrf, cbNewPkt, pTxSg->cbAvailable));
4601 return false;
4602 }
4603
4604 if (RT_LIKELY(pTxSg))
4605 {
4606 Assert(pTxSg->cSegs == 1);
4607 if (pTxSg->cbUsed != pThis->u16TxPktLen)
4608 E1kLog(("%s e1kAddToFrame: pTxSg->cbUsed=%d(0x%x) != u16TxPktLen=%d(0x%x)\n",
4609 pThis->szPrf, pTxSg->cbUsed, pTxSg->cbUsed, pThis->u16TxPktLen, pThis->u16TxPktLen));
4610
4611 PDMDevHlpPhysRead(pDevIns, PhysAddr, (uint8_t *)pTxSg->aSegs[0].pvSeg + pThis->u16TxPktLen, cbFragment);
4612
4613 pTxSg->cbUsed = cbNewPkt;
4614 }
4615 pThis->u16TxPktLen = cbNewPkt;
4616
4617 return true;
4618}
4619
4620
4621/**
4622 * Write the descriptor back to guest memory and notify the guest.
4623 *
4624 * @param pThis The device state structure.
4625 * @param pDesc Pointer to the descriptor have been transmitted.
4626 * @param addr Physical address of the descriptor in guest memory.
4627 * @thread E1000_TX
4628 */
4629static void e1kDescReport(PPDMDEVINS pDevIns, PE1KSTATE pThis, E1KTXDESC *pDesc, RTGCPHYS addr)
4630{
4631 /*
4632 * We fake descriptor write-back bursting. Descriptors are written back as they are
4633 * processed.
4634 */
4635 /* Let's pretend we process descriptors. Write back with DD set. */
4636 /*
4637 * Prior to r71586 we tried to accomodate the case when write-back bursts
4638 * are enabled without actually implementing bursting by writing back all
4639 * descriptors, even the ones that do not have RS set. This caused kernel
4640 * panics with Linux SMP kernels, as the e1000 driver tried to free up skb
4641 * associated with written back descriptor if it happened to be a context
4642 * descriptor since context descriptors do not have skb associated to them.
4643 * Starting from r71586 we write back only the descriptors with RS set,
4644 * which is a little bit different from what the real hardware does in
4645 * case there is a chain of data descritors where some of them have RS set
4646 * and others do not. It is very uncommon scenario imho.
4647 * We need to check RPS as well since some legacy drivers use it instead of
4648 * RS even with newer cards.
4649 */
4650 if (pDesc->legacy.cmd.fRS || pDesc->legacy.cmd.fRPS)
4651 {
4652 pDesc->legacy.dw3.fDD = 1; /* Descriptor Done */
4653 e1kWriteBackDesc(pDevIns, pThis, pDesc, addr);
4654 if (pDesc->legacy.cmd.fEOP)
4655 {
4656//#ifdef E1K_USE_TX_TIMERS
4657 if (pThis->fTidEnabled && pDesc->legacy.cmd.fIDE)
4658 {
4659 E1K_INC_ISTAT_CNT(pThis->uStatTxIDE);
4660 //if (pThis->fIntRaised)
4661 //{
4662 // /* Interrupt is already pending, no need for timers */
4663 // ICR |= ICR_TXDW;
4664 //}
4665 //else {
4666 /* Arm the timer to fire in TIVD usec (discard .024) */
4667 e1kArmTimer(pDevIns, pThis, pThis->hTIDTimer, TIDV);
4668# ifndef E1K_NO_TAD
4669 /* If absolute timer delay is enabled and the timer is not running yet, arm it. */
4670 E1kLog2(("%s Checking if TAD timer is running\n",
4671 pThis->szPrf));
4672 if (TADV != 0 && !PDMDevHlpTimerIsActive(pDevIns, pThis->hTADTimer))
4673 e1kArmTimer(pDevIns, pThis, pThis->hTADTimer, TADV);
4674# endif /* E1K_NO_TAD */
4675 }
4676 else
4677 {
4678 if (pThis->fTidEnabled)
4679 {
4680 E1kLog2(("%s No IDE set, cancel TAD timer and raise interrupt\n",
4681 pThis->szPrf));
4682 /* Cancel both timers if armed and fire immediately. */
4683# ifndef E1K_NO_TAD
4684 PDMDevHlpTimerStop(pDevIns, pThis->hTADTimer);
4685# endif
4686 PDMDevHlpTimerStop(pDevIns, pThis->hTIDTimer);
4687 }
4688//#endif /* E1K_USE_TX_TIMERS */
4689 E1K_INC_ISTAT_CNT(pThis->uStatIntTx);
4690 e1kRaiseInterrupt(pDevIns, pThis, VERR_SEM_BUSY, ICR_TXDW);
4691//#ifdef E1K_USE_TX_TIMERS
4692 }
4693//#endif /* E1K_USE_TX_TIMERS */
4694 }
4695 }
4696 else
4697 {
4698 E1K_INC_ISTAT_CNT(pThis->uStatTxNoRS);
4699 }
4700}
4701
4702#ifndef E1K_WITH_TXD_CACHE
4703
4704/**
4705 * Process Transmit Descriptor.
4706 *
4707 * E1000 supports three types of transmit descriptors:
4708 * - legacy data descriptors of older format (context-less).
4709 * - data the same as legacy but providing new offloading capabilities.
4710 * - context sets up the context for following data descriptors.
4711 *
4712 * @param pDevIns The device instance.
4713 * @param pThis The device state structure.
4714 * @param pThisCC The current context instance data.
4715 * @param pDesc Pointer to descriptor union.
4716 * @param addr Physical address of descriptor in guest memory.
4717 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4718 * @thread E1000_TX
4719 */
4720static int e1kXmitDesc(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KSTATECC pThisCC, E1KTXDESC *pDesc,
4721 RTGCPHYS addr, bool fOnWorkerThread)
4722{
4723 int rc = VINF_SUCCESS;
4724 uint32_t cbVTag = 0;
4725
4726 e1kPrintTDesc(pThis, pDesc, "vvv");
4727
4728//#ifdef E1K_USE_TX_TIMERS
4729 if (pThis->fTidEnabled)
4730 e1kCancelTimer(pDevIns, pThis, pThis->hTIDTimer);
4731//#endif /* E1K_USE_TX_TIMERS */
4732
4733 switch (e1kGetDescType(pDesc))
4734 {
4735 case E1K_DTYP_CONTEXT:
4736 if (pDesc->context.dw2.fTSE)
4737 {
4738 pThis->contextTSE = pDesc->context;
4739 pThis->u32PayRemain = pDesc->context.dw2.u20PAYLEN;
4740 pThis->u16HdrRemain = pDesc->context.dw3.u8HDRLEN;
4741 e1kSetupGsoCtx(&pThis->GsoCtx, &pDesc->context);
4742 STAM_COUNTER_INC(&pThis->StatTxDescCtxTSE);
4743 }
4744 else
4745 {
4746 pThis->contextNormal = pDesc->context;
4747 STAM_COUNTER_INC(&pThis->StatTxDescCtxNormal);
4748 }
4749 E1kLog2(("%s %s context updated: IP CSS=%02X, IP CSO=%02X, IP CSE=%04X"
4750 ", TU CSS=%02X, TU CSO=%02X, TU CSE=%04X\n", pThis->szPrf,
4751 pDesc->context.dw2.fTSE ? "TSE" : "Normal",
4752 pDesc->context.ip.u8CSS,
4753 pDesc->context.ip.u8CSO,
4754 pDesc->context.ip.u16CSE,
4755 pDesc->context.tu.u8CSS,
4756 pDesc->context.tu.u8CSO,
4757 pDesc->context.tu.u16CSE));
4758 E1K_INC_ISTAT_CNT(pThis->uStatDescCtx);
4759 e1kDescReport(pThis, pDesc, addr);
4760 break;
4761
4762 case E1K_DTYP_DATA:
4763 {
4764 if (pDesc->data.cmd.u20DTALEN == 0 || pDesc->data.u64BufAddr == 0)
4765 {
4766 E1kLog2(("% Empty data descriptor, skipped.\n", pThis->szPrf));
4767 /** @todo Same as legacy when !TSE. See below. */
4768 break;
4769 }
4770 STAM_COUNTER_INC(pDesc->data.cmd.fTSE?
4771 &pThis->StatTxDescTSEData:
4772 &pThis->StatTxDescData);
4773 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
4774 E1K_INC_ISTAT_CNT(pThis->uStatDescDat);
4775
4776 /*
4777 * The last descriptor of non-TSE packet must contain VLE flag.
4778 * TSE packets have VLE flag in the first descriptor. The later
4779 * case is taken care of a bit later when cbVTag gets assigned.
4780 *
4781 * 1) pDesc->data.cmd.fEOP && !pDesc->data.cmd.fTSE
4782 */
4783 if (pDesc->data.cmd.fEOP && !pDesc->data.cmd.fTSE)
4784 {
4785 pThis->fVTag = pDesc->data.cmd.fVLE;
4786 pThis->u16VTagTCI = pDesc->data.dw3.u16Special;
4787 }
4788 /*
4789 * First fragment: Allocate new buffer and save the IXSM and TXSM
4790 * packet options as these are only valid in the first fragment.
4791 */
4792 if (pThis->u16TxPktLen == 0)
4793 {
4794 pThis->fIPcsum = pDesc->data.dw3.fIXSM;
4795 pThis->fTCPcsum = pDesc->data.dw3.fTXSM;
4796 E1kLog2(("%s Saving checksum flags:%s%s; \n", pThis->szPrf,
4797 pThis->fIPcsum ? " IP" : "",
4798 pThis->fTCPcsum ? " TCP/UDP" : ""));
4799 if (pDesc->data.cmd.fTSE)
4800 {
4801 /* 2) pDesc->data.cmd.fTSE && pThis->u16TxPktLen == 0 */
4802 pThis->fVTag = pDesc->data.cmd.fVLE;
4803 pThis->u16VTagTCI = pDesc->data.dw3.u16Special;
4804 cbVTag = pThis->fVTag ? 4 : 0;
4805 }
4806 else if (pDesc->data.cmd.fEOP)
4807 cbVTag = pDesc->data.cmd.fVLE ? 4 : 0;
4808 else
4809 cbVTag = 4;
4810 E1kLog3(("%s About to allocate TX buffer: cbVTag=%u\n", pThis->szPrf, cbVTag));
4811 if (e1kCanDoGso(pThis, &pThis->GsoCtx, &pDesc->data, &pThis->contextTSE))
4812 rc = e1kXmitAllocBuf(pThis, pThisCC, pThis->contextTSE.dw2.u20PAYLEN + pThis->contextTSE.dw3.u8HDRLEN + cbVTag,
4813 true /*fExactSize*/, true /*fGso*/);
4814 else if (pDesc->data.cmd.fTSE)
4815 rc = e1kXmitAllocBuf(pThis, pThisCC, , pThis->contextTSE.dw3.u16MSS + pThis->contextTSE.dw3.u8HDRLEN + cbVTag,
4816 pDesc->data.cmd.fTSE /*fExactSize*/, false /*fGso*/);
4817 else
4818 rc = e1kXmitAllocBuf(pThis, pThisCC, pDesc->data.cmd.u20DTALEN + cbVTag,
4819 pDesc->data.cmd.fEOP /*fExactSize*/, false /*fGso*/);
4820
4821 /**
4822 * @todo: Perhaps it is not that simple for GSO packets! We may
4823 * need to unwind some changes.
4824 */
4825 if (RT_FAILURE(rc))
4826 {
4827 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
4828 break;
4829 }
4830 /** @todo Is there any way to indicating errors other than collisions? Like
4831 * VERR_NET_DOWN. */
4832 }
4833
4834 /*
4835 * Add the descriptor data to the frame. If the frame is complete,
4836 * transmit it and reset the u16TxPktLen field.
4837 */
4838 if (e1kXmitIsGsoBuf(pThisCC->CTX_SUFF(pTxSg)))
4839 {
4840 STAM_COUNTER_INC(&pThis->StatTxPathGSO);
4841 bool fRc = e1kAddToFrame(pDevIns, pThis, pThisCC, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
4842 if (pDesc->data.cmd.fEOP)
4843 {
4844 if ( fRc
4845 && pThisCC->CTX_SUFF(pTxSg)
4846 && pThisCC->CTX_SUFF(pTxSg)->cbUsed == (size_t)pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw2.u20PAYLEN)
4847 {
4848 e1kTransmitFrame(pDevIns, pThis, pThisCC, fOnWorkerThread);
4849 E1K_INC_CNT32(TSCTC);
4850 }
4851 else
4852 {
4853 if (fRc)
4854 E1kLog(("%s bad GSO/TSE %p or %u < %u\n" , pThis->szPrf,
4855 pThisCC->CTX_SUFF(pTxSg), pThisCC->CTX_SUFF(pTxSg) ? pThisCC->CTX_SUFF(pTxSg)->cbUsed : 0,
4856 pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw2.u20PAYLEN));
4857 e1kXmitFreeBuf(pThis);
4858 E1K_INC_CNT32(TSCTFC);
4859 }
4860 pThis->u16TxPktLen = 0;
4861 }
4862 }
4863 else if (!pDesc->data.cmd.fTSE)
4864 {
4865 STAM_COUNTER_INC(&pThis->StatTxPathRegular);
4866 bool fRc = e1kAddToFrame(pDevIns, pThis, pThisCC, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
4867 if (pDesc->data.cmd.fEOP)
4868 {
4869 if (fRc && pThisCC->CTX_SUFF(pTxSg))
4870 {
4871 Assert(pThisCC->CTX_SUFF(pTxSg)->cSegs == 1);
4872 if (pThis->fIPcsum)
4873 e1kInsertChecksum(pThis, (uint8_t *)pThisCC->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->u16TxPktLen,
4874 pThis->contextNormal.ip.u8CSO,
4875 pThis->contextNormal.ip.u8CSS,
4876 pThis->contextNormal.ip.u16CSE);
4877 if (pThis->fTCPcsum)
4878 e1kInsertChecksum(pThis, (uint8_t *)pThisCC->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->u16TxPktLen,
4879 pThis->contextNormal.tu.u8CSO,
4880 pThis->contextNormal.tu.u8CSS,
4881 pThis->contextNormal.tu.u16CSE);
4882 e1kTransmitFrame(pDevIns, pThis, pThisCC, fOnWorkerThread);
4883 }
4884 else
4885 e1kXmitFreeBuf(pThis);
4886 pThis->u16TxPktLen = 0;
4887 }
4888 }
4889 else
4890 {
4891 STAM_COUNTER_INC(&pThis->StatTxPathFallback);
4892 e1kFallbackAddToFrame(pDevIns, pThis, pDesc, pDesc->data.cmd.u20DTALEN, fOnWorkerThread);
4893 }
4894
4895 e1kDescReport(pThis, pDesc, addr);
4896 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
4897 break;
4898 }
4899
4900 case E1K_DTYP_LEGACY:
4901 if (pDesc->legacy.cmd.u16Length == 0 || pDesc->legacy.u64BufAddr == 0)
4902 {
4903 E1kLog(("%s Empty legacy descriptor, skipped.\n", pThis->szPrf));
4904 /** @todo 3.3.3, Length/Buffer Address: RS set -> write DD when processing. */
4905 break;
4906 }
4907 STAM_COUNTER_INC(&pThis->StatTxDescLegacy);
4908 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
4909
4910 /* First fragment: allocate new buffer. */
4911 if (pThis->u16TxPktLen == 0)
4912 {
4913 if (pDesc->legacy.cmd.fEOP)
4914 cbVTag = pDesc->legacy.cmd.fVLE ? 4 : 0;
4915 else
4916 cbVTag = 4;
4917 E1kLog3(("%s About to allocate TX buffer: cbVTag=%u\n", pThis->szPrf, cbVTag));
4918 /** @todo reset status bits? */
4919 rc = e1kXmitAllocBuf(pThis, pThisCC, pDesc->legacy.cmd.u16Length + cbVTag, pDesc->legacy.cmd.fEOP, false /*fGso*/);
4920 if (RT_FAILURE(rc))
4921 {
4922 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
4923 break;
4924 }
4925
4926 /** @todo Is there any way to indicating errors other than collisions? Like
4927 * VERR_NET_DOWN. */
4928 }
4929
4930 /* Add fragment to frame. */
4931 if (e1kAddToFrame(pDevIns, pThis, pThisCC, pDesc->data.u64BufAddr, pDesc->legacy.cmd.u16Length))
4932 {
4933 E1K_INC_ISTAT_CNT(pThis->uStatDescLeg);
4934
4935 /* Last fragment: Transmit and reset the packet storage counter. */
4936 if (pDesc->legacy.cmd.fEOP)
4937 {
4938 pThis->fVTag = pDesc->legacy.cmd.fVLE;
4939 pThis->u16VTagTCI = pDesc->legacy.dw3.u16Special;
4940 /** @todo Offload processing goes here. */
4941 e1kTransmitFrame(pDevIns, pThis, pThisCC, fOnWorkerThread);
4942 pThis->u16TxPktLen = 0;
4943 }
4944 }
4945 /* Last fragment + failure: free the buffer and reset the storage counter. */
4946 else if (pDesc->legacy.cmd.fEOP)
4947 {
4948 e1kXmitFreeBuf(pThis);
4949 pThis->u16TxPktLen = 0;
4950 }
4951
4952 e1kDescReport(pThis, pDesc, addr);
4953 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
4954 break;
4955
4956 default:
4957 E1kLog(("%s ERROR Unsupported transmit descriptor type: 0x%04x\n",
4958 pThis->szPrf, e1kGetDescType(pDesc)));
4959 break;
4960 }
4961
4962 return rc;
4963}
4964
4965#else /* E1K_WITH_TXD_CACHE */
4966
4967/**
4968 * Process Transmit Descriptor.
4969 *
4970 * E1000 supports three types of transmit descriptors:
4971 * - legacy data descriptors of older format (context-less).
4972 * - data the same as legacy but providing new offloading capabilities.
4973 * - context sets up the context for following data descriptors.
4974 *
4975 * @param pDevIns The device instance.
4976 * @param pThis The device state structure.
4977 * @param pThisCC The current context instance data.
4978 * @param pDesc Pointer to descriptor union.
4979 * @param addr Physical address of descriptor in guest memory.
4980 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4981 * @param cbPacketSize Size of the packet as previously computed.
4982 * @thread E1000_TX
4983 */
4984static int e1kXmitDesc(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KSTATECC pThisCC, E1KTXDESC *pDesc,
4985 RTGCPHYS addr, bool fOnWorkerThread)
4986{
4987 int rc = VINF_SUCCESS;
4988
4989 e1kPrintTDesc(pThis, pDesc, "vvv");
4990
4991 if (pDesc->legacy.dw3.fDD)
4992 {
4993 E1kLog(("%s e1kXmitDesc: skipping bad descriptor ^^^\n", pThis->szPrf));
4994 e1kDescReport(pDevIns, pThis, pDesc, addr);
4995 return VINF_SUCCESS;
4996 }
4997
4998//#ifdef E1K_USE_TX_TIMERS
4999 if (pThis->fTidEnabled)
5000 PDMDevHlpTimerStop(pDevIns, pThis->hTIDTimer);
5001//#endif /* E1K_USE_TX_TIMERS */
5002
5003 switch (e1kGetDescType(pDesc))
5004 {
5005 case E1K_DTYP_CONTEXT:
5006 /* The caller have already updated the context */
5007 E1K_INC_ISTAT_CNT(pThis->uStatDescCtx);
5008 e1kDescReport(pDevIns, pThis, pDesc, addr);
5009 break;
5010
5011 case E1K_DTYP_DATA:
5012 {
5013 STAM_COUNTER_INC(pDesc->data.cmd.fTSE?
5014 &pThis->StatTxDescTSEData:
5015 &pThis->StatTxDescData);
5016 E1K_INC_ISTAT_CNT(pThis->uStatDescDat);
5017 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
5018 if (pDesc->data.cmd.u20DTALEN == 0 || pDesc->data.u64BufAddr == 0)
5019 {
5020 E1kLog2(("% Empty data descriptor, skipped.\n", pThis->szPrf));
5021 if (pDesc->data.cmd.fEOP)
5022 {
5023 e1kTransmitFrame(pDevIns, pThis, pThisCC, fOnWorkerThread);
5024 pThis->u16TxPktLen = 0;
5025 }
5026 }
5027 else
5028 {
5029 /*
5030 * Add the descriptor data to the frame. If the frame is complete,
5031 * transmit it and reset the u16TxPktLen field.
5032 */
5033 if (e1kXmitIsGsoBuf(pThisCC->CTX_SUFF(pTxSg)))
5034 {
5035 STAM_COUNTER_INC(&pThis->StatTxPathGSO);
5036 bool fRc = e1kAddToFrame(pDevIns, pThis, pThisCC, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
5037 if (pDesc->data.cmd.fEOP)
5038 {
5039 if ( fRc
5040 && pThisCC->CTX_SUFF(pTxSg)
5041 && pThisCC->CTX_SUFF(pTxSg)->cbUsed == (size_t)pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw2.u20PAYLEN)
5042 {
5043 e1kTransmitFrame(pDevIns, pThis, pThisCC, fOnWorkerThread);
5044 E1K_INC_CNT32(TSCTC);
5045 }
5046 else
5047 {
5048 if (fRc)
5049 E1kLog(("%s bad GSO/TSE %p or %u < %u\n" , pThis->szPrf,
5050 pThisCC->CTX_SUFF(pTxSg), pThisCC->CTX_SUFF(pTxSg) ? pThisCC->CTX_SUFF(pTxSg)->cbUsed : 0,
5051 pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw2.u20PAYLEN));
5052 e1kXmitFreeBuf(pThis, pThisCC);
5053 E1K_INC_CNT32(TSCTFC);
5054 }
5055 pThis->u16TxPktLen = 0;
5056 }
5057 }
5058 else if (!pDesc->data.cmd.fTSE)
5059 {
5060 STAM_COUNTER_INC(&pThis->StatTxPathRegular);
5061 bool fRc = e1kAddToFrame(pDevIns, pThis, pThisCC, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
5062 if (pDesc->data.cmd.fEOP)
5063 {
5064 if (fRc && pThisCC->CTX_SUFF(pTxSg))
5065 {
5066 Assert(pThisCC->CTX_SUFF(pTxSg)->cSegs == 1);
5067 if (pThis->fIPcsum)
5068 e1kInsertChecksum(pThis, (uint8_t *)pThisCC->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->u16TxPktLen,
5069 pThis->contextNormal.ip.u8CSO,
5070 pThis->contextNormal.ip.u8CSS,
5071 pThis->contextNormal.ip.u16CSE);
5072 if (pThis->fTCPcsum)
5073 e1kInsertChecksum(pThis, (uint8_t *)pThisCC->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->u16TxPktLen,
5074 pThis->contextNormal.tu.u8CSO,
5075 pThis->contextNormal.tu.u8CSS,
5076 pThis->contextNormal.tu.u16CSE);
5077 e1kTransmitFrame(pDevIns, pThis, pThisCC, fOnWorkerThread);
5078 }
5079 else
5080 e1kXmitFreeBuf(pThis, pThisCC);
5081 pThis->u16TxPktLen = 0;
5082 }
5083 }
5084 else
5085 {
5086 STAM_COUNTER_INC(&pThis->StatTxPathFallback);
5087 rc = e1kFallbackAddToFrame(pDevIns, pThis, pDesc, fOnWorkerThread);
5088 }
5089 }
5090 e1kDescReport(pDevIns, pThis, pDesc, addr);
5091 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
5092 break;
5093 }
5094
5095 case E1K_DTYP_LEGACY:
5096 STAM_COUNTER_INC(&pThis->StatTxDescLegacy);
5097 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
5098 if (pDesc->legacy.cmd.u16Length == 0 || pDesc->legacy.u64BufAddr == 0)
5099 {
5100 E1kLog(("%s Empty legacy descriptor, skipped.\n", pThis->szPrf));
5101 }
5102 else
5103 {
5104 /* Add fragment to frame. */
5105 if (e1kAddToFrame(pDevIns, pThis, pThisCC, pDesc->data.u64BufAddr, pDesc->legacy.cmd.u16Length))
5106 {
5107 E1K_INC_ISTAT_CNT(pThis->uStatDescLeg);
5108
5109 /* Last fragment: Transmit and reset the packet storage counter. */
5110 if (pDesc->legacy.cmd.fEOP)
5111 {
5112 if (pDesc->legacy.cmd.fIC)
5113 {
5114 e1kInsertChecksum(pThis,
5115 (uint8_t *)pThisCC->CTX_SUFF(pTxSg)->aSegs[0].pvSeg,
5116 pThis->u16TxPktLen,
5117 pDesc->legacy.cmd.u8CSO,
5118 pDesc->legacy.dw3.u8CSS,
5119 0);
5120 }
5121 e1kTransmitFrame(pDevIns, pThis, pThisCC, fOnWorkerThread);
5122 pThis->u16TxPktLen = 0;
5123 }
5124 }
5125 /* Last fragment + failure: free the buffer and reset the storage counter. */
5126 else if (pDesc->legacy.cmd.fEOP)
5127 {
5128 e1kXmitFreeBuf(pThis, pThisCC);
5129 pThis->u16TxPktLen = 0;
5130 }
5131 }
5132 e1kDescReport(pDevIns, pThis, pDesc, addr);
5133 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
5134 break;
5135
5136 default:
5137 E1kLog(("%s ERROR Unsupported transmit descriptor type: 0x%04x\n",
5138 pThis->szPrf, e1kGetDescType(pDesc)));
5139 break;
5140 }
5141
5142 return rc;
5143}
5144
5145DECLINLINE(void) e1kUpdateTxContext(PE1KSTATE pThis, E1KTXDESC *pDesc)
5146{
5147 if (pDesc->context.dw2.fTSE)
5148 {
5149 pThis->contextTSE = pDesc->context;
5150 uint32_t cbMaxSegmentSize = pThis->contextTSE.dw3.u16MSS + pThis->contextTSE.dw3.u8HDRLEN + 4; /*VTAG*/
5151 if (RT_UNLIKELY(cbMaxSegmentSize > E1K_MAX_TX_PKT_SIZE))
5152 {
5153 pThis->contextTSE.dw3.u16MSS = E1K_MAX_TX_PKT_SIZE - pThis->contextTSE.dw3.u8HDRLEN - 4; /*VTAG*/
5154 LogRelMax(10, ("%s: Transmit packet is too large: %u > %u(max). Adjusted MSS to %u.\n",
5155 pThis->szPrf, cbMaxSegmentSize, E1K_MAX_TX_PKT_SIZE, pThis->contextTSE.dw3.u16MSS));
5156 }
5157 pThis->u32PayRemain = pThis->contextTSE.dw2.u20PAYLEN;
5158 pThis->u16HdrRemain = pThis->contextTSE.dw3.u8HDRLEN;
5159 e1kSetupGsoCtx(&pThis->GsoCtx, &pThis->contextTSE);
5160 STAM_COUNTER_INC(&pThis->StatTxDescCtxTSE);
5161 }
5162 else
5163 {
5164 pThis->contextNormal = pDesc->context;
5165 STAM_COUNTER_INC(&pThis->StatTxDescCtxNormal);
5166 }
5167 E1kLog2(("%s %s context updated: IP CSS=%02X, IP CSO=%02X, IP CSE=%04X"
5168 ", TU CSS=%02X, TU CSO=%02X, TU CSE=%04X\n", pThis->szPrf,
5169 pDesc->context.dw2.fTSE ? "TSE" : "Normal",
5170 pDesc->context.ip.u8CSS,
5171 pDesc->context.ip.u8CSO,
5172 pDesc->context.ip.u16CSE,
5173 pDesc->context.tu.u8CSS,
5174 pDesc->context.tu.u8CSO,
5175 pDesc->context.tu.u16CSE));
5176}
5177
5178static bool e1kLocateTxPacket(PE1KSTATE pThis)
5179{
5180 LogFlow(("%s e1kLocateTxPacket: ENTER cbTxAlloc=%d\n",
5181 pThis->szPrf, pThis->cbTxAlloc));
5182 /* Check if we have located the packet already. */
5183 if (pThis->cbTxAlloc)
5184 {
5185 LogFlow(("%s e1kLocateTxPacket: RET true cbTxAlloc=%d\n",
5186 pThis->szPrf, pThis->cbTxAlloc));
5187 return true;
5188 }
5189
5190 bool fTSE = false;
5191 uint32_t cbPacket = 0;
5192
5193 for (int i = pThis->iTxDCurrent; i < pThis->nTxDFetched; ++i)
5194 {
5195 E1KTXDESC *pDesc = &pThis->aTxDescriptors[i];
5196 switch (e1kGetDescType(pDesc))
5197 {
5198 case E1K_DTYP_CONTEXT:
5199 if (cbPacket == 0)
5200 e1kUpdateTxContext(pThis, pDesc);
5201 else
5202 E1kLog(("%s e1kLocateTxPacket: ignoring a context descriptor in the middle of a packet, cbPacket=%d\n",
5203 pThis->szPrf, cbPacket));
5204 continue;
5205 case E1K_DTYP_LEGACY:
5206 /* Skip invalid descriptors. */
5207 if (cbPacket > 0 && (pThis->fGSO || fTSE))
5208 {
5209 E1kLog(("%s e1kLocateTxPacket: ignoring a legacy descriptor in the segmentation context, cbPacket=%d\n",
5210 pThis->szPrf, cbPacket));
5211 pDesc->legacy.dw3.fDD = true; /* Make sure it is skipped by processing */
5212 continue;
5213 }
5214 /* Skip empty descriptors. */
5215 if (!pDesc->legacy.u64BufAddr || !pDesc->legacy.cmd.u16Length)
5216 break;
5217 cbPacket += pDesc->legacy.cmd.u16Length;
5218 pThis->fGSO = false;
5219 break;
5220 case E1K_DTYP_DATA:
5221 /* Skip invalid descriptors. */
5222 if (cbPacket > 0 && (bool)pDesc->data.cmd.fTSE != fTSE)
5223 {
5224 E1kLog(("%s e1kLocateTxPacket: ignoring %sTSE descriptor in the %ssegmentation context, cbPacket=%d\n",
5225 pThis->szPrf, pDesc->data.cmd.fTSE ? "" : "non-", fTSE ? "" : "non-", cbPacket));
5226 pDesc->data.dw3.fDD = true; /* Make sure it is skipped by processing */
5227 continue;
5228 }
5229 /* Skip empty descriptors. */
5230 if (!pDesc->data.u64BufAddr || !pDesc->data.cmd.u20DTALEN)
5231 break;
5232 if (cbPacket == 0)
5233 {
5234 /*
5235 * The first fragment: save IXSM and TXSM options
5236 * as these are only valid in the first fragment.
5237 */
5238 pThis->fIPcsum = pDesc->data.dw3.fIXSM;
5239 pThis->fTCPcsum = pDesc->data.dw3.fTXSM;
5240 fTSE = pDesc->data.cmd.fTSE;
5241 /*
5242 * TSE descriptors have VLE bit properly set in
5243 * the first fragment.
5244 */
5245 if (fTSE)
5246 {
5247 pThis->fVTag = pDesc->data.cmd.fVLE;
5248 pThis->u16VTagTCI = pDesc->data.dw3.u16Special;
5249 }
5250 pThis->fGSO = e1kCanDoGso(pThis, &pThis->GsoCtx, &pDesc->data, &pThis->contextTSE);
5251 }
5252 cbPacket += pDesc->data.cmd.u20DTALEN;
5253 break;
5254 default:
5255 AssertMsgFailed(("Impossible descriptor type!"));
5256 }
5257 if (pDesc->legacy.cmd.fEOP)
5258 {
5259 /*
5260 * Non-TSE descriptors have VLE bit properly set in
5261 * the last fragment.
5262 */
5263 if (!fTSE)
5264 {
5265 pThis->fVTag = pDesc->data.cmd.fVLE;
5266 pThis->u16VTagTCI = pDesc->data.dw3.u16Special;
5267 }
5268 /*
5269 * Compute the required buffer size. If we cannot do GSO but still
5270 * have to do segmentation we allocate the first segment only.
5271 */
5272 pThis->cbTxAlloc = (!fTSE || pThis->fGSO) ?
5273 cbPacket :
5274 RT_MIN(cbPacket, pThis->contextTSE.dw3.u16MSS + pThis->contextTSE.dw3.u8HDRLEN);
5275 if (pThis->fVTag)
5276 pThis->cbTxAlloc += 4;
5277 LogFlow(("%s e1kLocateTxPacket: RET true cbTxAlloc=%d cbPacket=%d%s%s\n",
5278 pThis->szPrf, pThis->cbTxAlloc, cbPacket,
5279 pThis->fGSO ? " GSO" : "", fTSE ? " TSE" : ""));
5280 return true;
5281 }
5282 }
5283
5284 if (cbPacket == 0 && pThis->nTxDFetched - pThis->iTxDCurrent > 0)
5285 {
5286 /* All descriptors were empty, we need to process them as a dummy packet */
5287 LogFlow(("%s e1kLocateTxPacket: RET true cbTxAlloc=%d, zero packet!\n",
5288 pThis->szPrf, pThis->cbTxAlloc));
5289 return true;
5290 }
5291 LogFlow(("%s e1kLocateTxPacket: RET false cbTxAlloc=%d cbPacket=%d\n",
5292 pThis->szPrf, pThis->cbTxAlloc, cbPacket));
5293 return false;
5294}
5295
5296static int e1kXmitPacket(PPDMDEVINS pDevIns, PE1KSTATE pThis, bool fOnWorkerThread)
5297{
5298 PE1KSTATECC pThisCC = PDMINS_2_DATA_CC(pDevIns, PE1KSTATECC);
5299 int rc = VINF_SUCCESS;
5300
5301 LogFlow(("%s e1kXmitPacket: ENTER current=%d fetched=%d\n",
5302 pThis->szPrf, pThis->iTxDCurrent, pThis->nTxDFetched));
5303
5304 while (pThis->iTxDCurrent < pThis->nTxDFetched)
5305 {
5306 E1KTXDESC *pDesc = &pThis->aTxDescriptors[pThis->iTxDCurrent];
5307 E1kLog3(("%s About to process new TX descriptor at %08x%08x, TDLEN=%08x, TDH=%08x, TDT=%08x\n",
5308 pThis->szPrf, TDBAH, TDBAL + TDH * sizeof(E1KTXDESC), TDLEN, TDH, TDT));
5309 rc = e1kXmitDesc(pDevIns, pThis, pThisCC, pDesc, e1kDescAddr(TDBAH, TDBAL, TDH), fOnWorkerThread);
5310 if (RT_FAILURE(rc))
5311 break;
5312 if (++TDH * sizeof(E1KTXDESC) >= TDLEN)
5313 TDH = 0;
5314 uint32_t uLowThreshold = GET_BITS(TXDCTL, LWTHRESH)*8;
5315 if (uLowThreshold != 0 && e1kGetTxLen(pThis) <= uLowThreshold)
5316 {
5317 E1kLog2(("%s Low on transmit descriptors, raise ICR.TXD_LOW, len=%x thresh=%x\n",
5318 pThis->szPrf, e1kGetTxLen(pThis), GET_BITS(TXDCTL, LWTHRESH)*8));
5319 e1kRaiseInterrupt(pDevIns, pThis, VERR_SEM_BUSY, ICR_TXD_LOW);
5320 }
5321 ++pThis->iTxDCurrent;
5322 if (e1kGetDescType(pDesc) != E1K_DTYP_CONTEXT && pDesc->legacy.cmd.fEOP)
5323 break;
5324 }
5325
5326 LogFlow(("%s e1kXmitPacket: RET %Rrc current=%d fetched=%d\n",
5327 pThis->szPrf, rc, pThis->iTxDCurrent, pThis->nTxDFetched));
5328 return rc;
5329}
5330
5331#endif /* E1K_WITH_TXD_CACHE */
5332#ifndef E1K_WITH_TXD_CACHE
5333
5334/**
5335 * Transmit pending descriptors.
5336 *
5337 * @returns VBox status code. VERR_TRY_AGAIN is returned if we're busy.
5338 *
5339 * @param pDevIns The device instance.
5340 * @param pThis The E1000 state.
5341 * @param fOnWorkerThread Whether we're on a worker thread or on an EMT.
5342 */
5343static int e1kXmitPending(PPDMDEVINS pDevIns, PE1KSTATE pThis, bool fOnWorkerThread)
5344{
5345 int rc = VINF_SUCCESS;
5346 PE1KSTATECC pThisCC = PDMINS_2_DATA_CC(pDevIns, PE1KSTATECC);
5347
5348 /* Check if transmitter is enabled. */
5349 if (!(TCTL & TCTL_EN))
5350 return VINF_SUCCESS;
5351 /*
5352 * Grab the xmit lock of the driver as well as the E1K device state.
5353 */
5354 rc = e1kCsTxEnter(pThis, VERR_SEM_BUSY);
5355 if (RT_LIKELY(rc == VINF_SUCCESS))
5356 {
5357 PPDMINETWORKUP pDrv = pThis->CTX_SUFF(pDrv);
5358 if (pDrv)
5359 {
5360 rc = pDrv->pfnBeginXmit(pDrv, fOnWorkerThread);
5361 if (RT_FAILURE(rc))
5362 {
5363 e1kCsTxLeave(pThis);
5364 return rc;
5365 }
5366 }
5367 /*
5368 * Process all pending descriptors.
5369 * Note! Do not process descriptors in locked state
5370 */
5371 while (TDH != TDT && !pThis->fLocked)
5372 {
5373 E1KTXDESC desc;
5374 E1kLog3(("%s About to process new TX descriptor at %08x%08x, TDLEN=%08x, TDH=%08x, TDT=%08x\n",
5375 pThis->szPrf, TDBAH, TDBAL + TDH * sizeof(desc), TDLEN, TDH, TDT));
5376
5377 e1kLoadDesc(pDevIns, &desc, ((uint64_t)TDBAH << 32) + TDBAL + TDH * sizeof(desc));
5378 rc = e1kXmitDesc(pDevIns, pThis, pThisCC, &desc, e1kDescAddr(TDBAH, TDBAL, TDH), fOnWorkerThread);
5379 /* If we failed to transmit descriptor we will try it again later */
5380 if (RT_FAILURE(rc))
5381 break;
5382 if (++TDH * sizeof(desc) >= TDLEN)
5383 TDH = 0;
5384
5385 if (e1kGetTxLen(pThis) <= GET_BITS(TXDCTL, LWTHRESH)*8)
5386 {
5387 E1kLog2(("%s Low on transmit descriptors, raise ICR.TXD_LOW, len=%x thresh=%x\n",
5388 pThis->szPrf, e1kGetTxLen(pThis), GET_BITS(TXDCTL, LWTHRESH)*8));
5389 e1kRaiseInterrupt(pDevIns, pThis, VERR_SEM_BUSY, ICR_TXD_LOW);
5390 }
5391
5392 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
5393 }
5394
5395 /// @todo uncomment: pThis->uStatIntTXQE++;
5396 /// @todo uncomment: e1kRaiseInterrupt(pDevIns, pThis, ICR_TXQE);
5397 /*
5398 * Release the lock.
5399 */
5400 if (pDrv)
5401 pDrv->pfnEndXmit(pDrv);
5402 e1kCsTxLeave(pThis);
5403 }
5404
5405 return rc;
5406}
5407
5408#else /* E1K_WITH_TXD_CACHE */
5409
5410static void e1kDumpTxDCache(PPDMDEVINS pDevIns, PE1KSTATE pThis)
5411{
5412 unsigned i, cDescs = TDLEN / sizeof(E1KTXDESC);
5413 uint32_t tdh = TDH;
5414 LogRel(("E1000: -- Transmit Descriptors (%d total) --\n", cDescs));
5415 for (i = 0; i < cDescs; ++i)
5416 {
5417 E1KTXDESC desc;
5418 PDMDevHlpPhysRead(pDevIns , e1kDescAddr(TDBAH, TDBAL, i), &desc, sizeof(desc));
5419 if (i == tdh)
5420 LogRel(("E1000: >>> "));
5421 LogRel(("E1000: %RGp: %R[e1ktxd]\n", e1kDescAddr(TDBAH, TDBAL, i), &desc));
5422 }
5423 LogRel(("E1000: -- Transmit Descriptors in Cache (at %d (TDH %d)/ fetched %d / max %d) --\n",
5424 pThis->iTxDCurrent, TDH, pThis->nTxDFetched, E1K_TXD_CACHE_SIZE));
5425 if (tdh > pThis->iTxDCurrent)
5426 tdh -= pThis->iTxDCurrent;
5427 else
5428 tdh = cDescs + tdh - pThis->iTxDCurrent;
5429 for (i = 0; i < pThis->nTxDFetched; ++i)
5430 {
5431 if (i == pThis->iTxDCurrent)
5432 LogRel(("E1000: >>> "));
5433 LogRel(("E1000: %RGp: %R[e1ktxd]\n", e1kDescAddr(TDBAH, TDBAL, tdh++ % cDescs), &pThis->aTxDescriptors[i]));
5434 }
5435}
5436
5437/**
5438 * Transmit pending descriptors.
5439 *
5440 * @returns VBox status code. VERR_TRY_AGAIN is returned if we're busy.
5441 *
5442 * @param pDevIns The device instance.
5443 * @param pThis The E1000 state.
5444 * @param fOnWorkerThread Whether we're on a worker thread or on an EMT.
5445 */
5446static int e1kXmitPending(PPDMDEVINS pDevIns, PE1KSTATE pThis, bool fOnWorkerThread)
5447{
5448 PE1KSTATECC pThisCC = PDMINS_2_DATA_CC(pDevIns, PE1KSTATECC);
5449 int rc = VINF_SUCCESS;
5450
5451 /* Check if transmitter is enabled. */
5452 if (!(TCTL & TCTL_EN))
5453 return VINF_SUCCESS;
5454 /*
5455 * Grab the xmit lock of the driver as well as the E1K device state.
5456 */
5457 PPDMINETWORKUP pDrv = pThisCC->CTX_SUFF(pDrv);
5458 if (pDrv)
5459 {
5460 rc = pDrv->pfnBeginXmit(pDrv, fOnWorkerThread);
5461 if (RT_FAILURE(rc))
5462 return rc;
5463 }
5464
5465 /*
5466 * Process all pending descriptors.
5467 * Note! Do not process descriptors in locked state
5468 */
5469 rc = e1kCsTxEnter(pThis, VERR_SEM_BUSY);
5470 if (RT_LIKELY(rc == VINF_SUCCESS))
5471 {
5472 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
5473 /*
5474 * fIncomplete is set whenever we try to fetch additional descriptors
5475 * for an incomplete packet. If fail to locate a complete packet on
5476 * the next iteration we need to reset the cache or we risk to get
5477 * stuck in this loop forever.
5478 */
5479 bool fIncomplete = false;
5480 while (!pThis->fLocked && e1kTxDLazyLoad(pDevIns, pThis))
5481 {
5482 while (e1kLocateTxPacket(pThis))
5483 {
5484 fIncomplete = false;
5485 /* Found a complete packet, allocate it. */
5486 rc = e1kXmitAllocBuf(pThis, pThisCC, pThis->fGSO);
5487 /* If we're out of bandwidth we'll come back later. */
5488 if (RT_FAILURE(rc))
5489 goto out;
5490 /* Copy the packet to allocated buffer and send it. */
5491 rc = e1kXmitPacket(pDevIns, pThis, fOnWorkerThread);
5492 /* If we're out of bandwidth we'll come back later. */
5493 if (RT_FAILURE(rc))
5494 goto out;
5495 }
5496 uint8_t u8Remain = pThis->nTxDFetched - pThis->iTxDCurrent;
5497 if (RT_UNLIKELY(fIncomplete))
5498 {
5499 static bool fTxDCacheDumped = false;
5500 /*
5501 * The descriptor cache is full, but we were unable to find
5502 * a complete packet in it. Drop the cache and hope that
5503 * the guest driver can recover from network card error.
5504 */
5505 LogRel(("%s: No complete packets in%s TxD cache! "
5506 "Fetched=%d, current=%d, TX len=%d.\n",
5507 pThis->szPrf,
5508 u8Remain == E1K_TXD_CACHE_SIZE ? " full" : "",
5509 pThis->nTxDFetched, pThis->iTxDCurrent,
5510 e1kGetTxLen(pThis)));
5511 if (!fTxDCacheDumped)
5512 {
5513 fTxDCacheDumped = true;
5514 e1kDumpTxDCache(pDevIns, pThis);
5515 }
5516 pThis->iTxDCurrent = pThis->nTxDFetched = 0;
5517 /*
5518 * Returning an error at this point means Guru in R0
5519 * (see @bugref{6428}).
5520 */
5521# ifdef IN_RING3
5522 rc = VERR_NET_INCOMPLETE_TX_PACKET;
5523# else /* !IN_RING3 */
5524 rc = VINF_IOM_R3_MMIO_WRITE;
5525# endif /* !IN_RING3 */
5526 goto out;
5527 }
5528 if (u8Remain > 0)
5529 {
5530 Log4(("%s Incomplete packet at %d. Already fetched %d, "
5531 "%d more are available\n",
5532 pThis->szPrf, pThis->iTxDCurrent, u8Remain,
5533 e1kGetTxLen(pThis) - u8Remain));
5534
5535 /*
5536 * A packet was partially fetched. Move incomplete packet to
5537 * the beginning of cache buffer, then load more descriptors.
5538 */
5539 memmove(pThis->aTxDescriptors,
5540 &pThis->aTxDescriptors[pThis->iTxDCurrent],
5541 u8Remain * sizeof(E1KTXDESC));
5542 pThis->iTxDCurrent = 0;
5543 pThis->nTxDFetched = u8Remain;
5544 e1kTxDLoadMore(pDevIns, pThis);
5545 fIncomplete = true;
5546 }
5547 else
5548 pThis->nTxDFetched = 0;
5549 pThis->iTxDCurrent = 0;
5550 }
5551 if (!pThis->fLocked && GET_BITS(TXDCTL, LWTHRESH) == 0)
5552 {
5553 E1kLog2(("%s Out of transmit descriptors, raise ICR.TXD_LOW\n",
5554 pThis->szPrf));
5555 e1kRaiseInterrupt(pDevIns, pThis, VERR_SEM_BUSY, ICR_TXD_LOW);
5556 }
5557out:
5558 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
5559
5560 /// @todo uncomment: pThis->uStatIntTXQE++;
5561 /// @todo uncomment: e1kRaiseInterrupt(pDevIns, pThis, ICR_TXQE);
5562
5563 e1kCsTxLeave(pThis);
5564 }
5565
5566
5567 /*
5568 * Release the lock.
5569 */
5570 if (pDrv)
5571 pDrv->pfnEndXmit(pDrv);
5572 return rc;
5573}
5574
5575#endif /* E1K_WITH_TXD_CACHE */
5576#ifdef IN_RING3
5577
5578/**
5579 * @interface_method_impl{PDMINETWORKDOWN,pfnXmitPending}
5580 */
5581static DECLCALLBACK(void) e1kR3NetworkDown_XmitPending(PPDMINETWORKDOWN pInterface)
5582{
5583 PE1KSTATECC pThisCC = RT_FROM_MEMBER(pInterface, E1KSTATECC, INetworkDown);
5584 PE1KSTATE pThis = pThisCC->pShared;
5585 /* Resume suspended transmission */
5586 STATUS &= ~STATUS_TXOFF;
5587 e1kXmitPending(pThisCC->pDevInsR3, pThis, true /*fOnWorkerThread*/);
5588}
5589
5590/**
5591 * @callback_method_impl{FNPDMTASKDEV,
5592 * Executes e1kXmitPending at the behest of ring-0/raw-mode.}
5593 * @note Not executed on EMT.
5594 */
5595static DECLCALLBACK(void) e1kR3TxTaskCallback(PPDMDEVINS pDevIns, void *pvUser)
5596{
5597 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
5598 E1kLog2(("%s e1kR3TxTaskCallback:\n", pThis->szPrf));
5599
5600 int rc = e1kXmitPending(pDevIns, pThis, false /*fOnWorkerThread*/);
5601 AssertMsg(RT_SUCCESS(rc) || rc == VERR_TRY_AGAIN || rc == VERR_NET_DOWN, ("%Rrc\n", rc));
5602
5603 RT_NOREF(rc, pvUser);
5604}
5605
5606#endif /* IN_RING3 */
5607
5608/**
5609 * Write handler for Transmit Descriptor Tail register.
5610 *
5611 * @param pThis The device state structure.
5612 * @param offset Register offset in memory-mapped frame.
5613 * @param index Register index in register array.
5614 * @param value The value to store.
5615 * @param mask Used to implement partial writes (8 and 16-bit).
5616 * @thread EMT
5617 */
5618static int e1kRegWriteTDT(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5619{
5620 int rc = e1kRegWriteDefault(pDevIns, pThis, offset, index, value);
5621
5622 /* All descriptors starting with head and not including tail belong to us. */
5623 /* Process them. */
5624 E1kLog2(("%s e1kRegWriteTDT: TDBAL=%08x, TDBAH=%08x, TDLEN=%08x, TDH=%08x, TDT=%08x\n",
5625 pThis->szPrf, TDBAL, TDBAH, TDLEN, TDH, TDT));
5626
5627 /* Ignore TDT writes when the link is down. */
5628 if (TDH != TDT && (STATUS & STATUS_LU))
5629 {
5630 Log5(("E1000: TDT write: TDH=%08x, TDT=%08x, %d descriptors to process\n", TDH, TDT, e1kGetTxLen(pThis)));
5631 E1kLog(("%s e1kRegWriteTDT: %d descriptors to process\n",
5632 pThis->szPrf, e1kGetTxLen(pThis)));
5633
5634 /* Transmit pending packets if possible, defer it if we cannot do it
5635 in the current context. */
5636#ifdef E1K_TX_DELAY
5637 rc = e1kCsTxEnter(pThis, VERR_SEM_BUSY);
5638 if (RT_LIKELY(rc == VINF_SUCCESS))
5639 {
5640 if (!PDMDevInsTimerIsActive(pDevIns, pThis->hTXDTimer))
5641 {
5642# ifdef E1K_INT_STATS
5643 pThis->u64ArmedAt = RTTimeNanoTS();
5644# endif
5645 e1kArmTimer(pDevIns, pThis, pThis->hTXDTimer, E1K_TX_DELAY);
5646 }
5647 E1K_INC_ISTAT_CNT(pThis->uStatTxDelayed);
5648 e1kCsTxLeave(pThis);
5649 return rc;
5650 }
5651 /* We failed to enter the TX critical section -- transmit as usual. */
5652#endif /* E1K_TX_DELAY */
5653#ifndef IN_RING3
5654 PE1KSTATECC pThisCC = PDMINS_2_DATA_CC(pDevIns, PE1KSTATECC);
5655 if (!pThisCC->CTX_SUFF(pDrv))
5656 {
5657 PDMDevHlpTaskTrigger(pDevIns, pThis->hTxTask);
5658 rc = VINF_SUCCESS;
5659 }
5660 else
5661#endif
5662 {
5663 rc = e1kXmitPending(pDevIns, pThis, false /*fOnWorkerThread*/);
5664 if (rc == VERR_TRY_AGAIN)
5665 rc = VINF_SUCCESS;
5666#ifndef IN_RING3
5667 else if (rc == VERR_SEM_BUSY)
5668 rc = VINF_IOM_R3_MMIO_WRITE;
5669#endif
5670 AssertRC(rc);
5671 }
5672 }
5673
5674 return rc;
5675}
5676
5677/**
5678 * Write handler for Multicast Table Array registers.
5679 *
5680 * @param pThis The device state structure.
5681 * @param offset Register offset in memory-mapped frame.
5682 * @param index Register index in register array.
5683 * @param value The value to store.
5684 * @thread EMT
5685 */
5686static int e1kRegWriteMTA(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5687{
5688 RT_NOREF_PV(pDevIns);
5689 AssertReturn(offset - g_aE1kRegMap[index].offset < sizeof(pThis->auMTA), VERR_DEV_IO_ERROR);
5690 pThis->auMTA[(offset - g_aE1kRegMap[index].offset) / sizeof(pThis->auMTA[0])] = value;
5691
5692 return VINF_SUCCESS;
5693}
5694
5695/**
5696 * Read handler for Multicast Table Array registers.
5697 *
5698 * @returns VBox status code.
5699 *
5700 * @param pThis The device state structure.
5701 * @param offset Register offset in memory-mapped frame.
5702 * @param index Register index in register array.
5703 * @thread EMT
5704 */
5705static int e1kRegReadMTA(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5706{
5707 RT_NOREF_PV(pDevIns);
5708 AssertReturn(offset - g_aE1kRegMap[index].offset < sizeof(pThis->auMTA), VERR_DEV_IO_ERROR);
5709 *pu32Value = pThis->auMTA[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->auMTA[0])];
5710
5711 return VINF_SUCCESS;
5712}
5713
5714/**
5715 * Write handler for Receive Address registers.
5716 *
5717 * @param pThis The device state structure.
5718 * @param offset Register offset in memory-mapped frame.
5719 * @param index Register index in register array.
5720 * @param value The value to store.
5721 * @thread EMT
5722 */
5723static int e1kRegWriteRA(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5724{
5725 RT_NOREF_PV(pDevIns);
5726 AssertReturn(offset - g_aE1kRegMap[index].offset < sizeof(pThis->aRecAddr.au32), VERR_DEV_IO_ERROR);
5727 pThis->aRecAddr.au32[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->aRecAddr.au32[0])] = value;
5728
5729 return VINF_SUCCESS;
5730}
5731
5732/**
5733 * Read handler for Receive Address registers.
5734 *
5735 * @returns VBox status code.
5736 *
5737 * @param pThis The device state structure.
5738 * @param offset Register offset in memory-mapped frame.
5739 * @param index Register index in register array.
5740 * @thread EMT
5741 */
5742static int e1kRegReadRA(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5743{
5744 RT_NOREF_PV(pDevIns);
5745 AssertReturn(offset - g_aE1kRegMap[index].offset< sizeof(pThis->aRecAddr.au32), VERR_DEV_IO_ERROR);
5746 *pu32Value = pThis->aRecAddr.au32[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->aRecAddr.au32[0])];
5747
5748 return VINF_SUCCESS;
5749}
5750
5751/**
5752 * Write handler for VLAN Filter Table Array registers.
5753 *
5754 * @param pThis The device state structure.
5755 * @param offset Register offset in memory-mapped frame.
5756 * @param index Register index in register array.
5757 * @param value The value to store.
5758 * @thread EMT
5759 */
5760static int e1kRegWriteVFTA(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5761{
5762 RT_NOREF_PV(pDevIns);
5763 AssertReturn(offset - g_aE1kRegMap[index].offset < sizeof(pThis->auVFTA), VINF_SUCCESS);
5764 pThis->auVFTA[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->auVFTA[0])] = value;
5765
5766 return VINF_SUCCESS;
5767}
5768
5769/**
5770 * Read handler for VLAN Filter Table Array registers.
5771 *
5772 * @returns VBox status code.
5773 *
5774 * @param pThis The device state structure.
5775 * @param offset Register offset in memory-mapped frame.
5776 * @param index Register index in register array.
5777 * @thread EMT
5778 */
5779static int e1kRegReadVFTA(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5780{
5781 RT_NOREF_PV(pDevIns);
5782 AssertReturn(offset - g_aE1kRegMap[index].offset< sizeof(pThis->auVFTA), VERR_DEV_IO_ERROR);
5783 *pu32Value = pThis->auVFTA[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->auVFTA[0])];
5784
5785 return VINF_SUCCESS;
5786}
5787
5788/**
5789 * Read handler for unimplemented registers.
5790 *
5791 * Merely reports reads from unimplemented registers.
5792 *
5793 * @returns VBox status code.
5794 *
5795 * @param pThis The device state structure.
5796 * @param offset Register offset in memory-mapped frame.
5797 * @param index Register index in register array.
5798 * @thread EMT
5799 */
5800static int e1kRegReadUnimplemented(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5801{
5802 RT_NOREF(pDevIns, pThis, offset, index);
5803 E1kLog(("%s At %08X read (00000000) attempt from unimplemented register %s (%s)\n",
5804 pThis->szPrf, offset, g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
5805 *pu32Value = 0;
5806
5807 return VINF_SUCCESS;
5808}
5809
5810/**
5811 * Default register read handler with automatic clear operation.
5812 *
5813 * Retrieves the value of register from register array in device state structure.
5814 * Then resets all bits.
5815 *
5816 * @remarks The 'mask' parameter is simply ignored as masking and shifting is
5817 * done in the caller.
5818 *
5819 * @returns VBox status code.
5820 *
5821 * @param pThis The device state structure.
5822 * @param offset Register offset in memory-mapped frame.
5823 * @param index Register index in register array.
5824 * @thread EMT
5825 */
5826static int e1kRegReadAutoClear(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5827{
5828 AssertReturn(index < E1K_NUM_OF_32BIT_REGS, VERR_DEV_IO_ERROR);
5829 int rc = e1kRegReadDefault(pDevIns, pThis, offset, index, pu32Value);
5830 pThis->auRegs[index] = 0;
5831
5832 return rc;
5833}
5834
5835/**
5836 * Default register read handler.
5837 *
5838 * Retrieves the value of register from register array in device state structure.
5839 * Bits corresponding to 0s in 'readable' mask will always read as 0s.
5840 *
5841 * @remarks The 'mask' parameter is simply ignored as masking and shifting is
5842 * done in the caller.
5843 *
5844 * @returns VBox status code.
5845 *
5846 * @param pThis The device state structure.
5847 * @param offset Register offset in memory-mapped frame.
5848 * @param index Register index in register array.
5849 * @thread EMT
5850 */
5851static int e1kRegReadDefault(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5852{
5853 RT_NOREF_PV(pDevIns); RT_NOREF_PV(offset);
5854
5855 AssertReturn(index < E1K_NUM_OF_32BIT_REGS, VERR_DEV_IO_ERROR);
5856 *pu32Value = pThis->auRegs[index] & g_aE1kRegMap[index].readable;
5857
5858 return VINF_SUCCESS;
5859}
5860
5861/**
5862 * Write handler for unimplemented registers.
5863 *
5864 * Merely reports writes to unimplemented registers.
5865 *
5866 * @param pThis The device state structure.
5867 * @param offset Register offset in memory-mapped frame.
5868 * @param index Register index in register array.
5869 * @param value The value to store.
5870 * @thread EMT
5871 */
5872
5873 static int e1kRegWriteUnimplemented(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5874{
5875 RT_NOREF_PV(pDevIns); RT_NOREF_PV(pThis); RT_NOREF_PV(offset); RT_NOREF_PV(index); RT_NOREF_PV(value);
5876
5877 E1kLog(("%s At %08X write attempt (%08X) to unimplemented register %s (%s)\n",
5878 pThis->szPrf, offset, value, g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
5879
5880 return VINF_SUCCESS;
5881}
5882
5883/**
5884 * Default register write handler.
5885 *
5886 * Stores the value to the register array in device state structure. Only bits
5887 * corresponding to 1s both in 'writable' and 'mask' will be stored.
5888 *
5889 * @returns VBox status code.
5890 *
5891 * @param pThis The device state structure.
5892 * @param offset Register offset in memory-mapped frame.
5893 * @param index Register index in register array.
5894 * @param value The value to store.
5895 * @param mask Used to implement partial writes (8 and 16-bit).
5896 * @thread EMT
5897 */
5898
5899static int e1kRegWriteDefault(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5900{
5901 RT_NOREF(pDevIns, offset);
5902
5903 AssertReturn(index < E1K_NUM_OF_32BIT_REGS, VERR_DEV_IO_ERROR);
5904 pThis->auRegs[index] = (value & g_aE1kRegMap[index].writable)
5905 | (pThis->auRegs[index] & ~g_aE1kRegMap[index].writable);
5906
5907 return VINF_SUCCESS;
5908}
5909
5910/**
5911 * Search register table for matching register.
5912 *
5913 * @returns Index in the register table or -1 if not found.
5914 *
5915 * @param offReg Register offset in memory-mapped region.
5916 * @thread EMT
5917 */
5918static int e1kRegLookup(uint32_t offReg)
5919{
5920
5921#if 0
5922 int index;
5923
5924 for (index = 0; index < E1K_NUM_OF_REGS; index++)
5925 {
5926 if (g_aE1kRegMap[index].offset <= offReg && offReg < g_aE1kRegMap[index].offset + g_aE1kRegMap[index].size)
5927 {
5928 return index;
5929 }
5930 }
5931#else
5932 int iStart = 0;
5933 int iEnd = E1K_NUM_OF_BINARY_SEARCHABLE;
5934 for (;;)
5935 {
5936 int i = (iEnd - iStart) / 2 + iStart;
5937 uint32_t offCur = g_aE1kRegMap[i].offset;
5938 if (offReg < offCur)
5939 {
5940 if (i == iStart)
5941 break;
5942 iEnd = i;
5943 }
5944 else if (offReg >= offCur + g_aE1kRegMap[i].size)
5945 {
5946 i++;
5947 if (i == iEnd)
5948 break;
5949 iStart = i;
5950 }
5951 else
5952 return i;
5953 Assert(iEnd > iStart);
5954 }
5955
5956 for (unsigned i = E1K_NUM_OF_BINARY_SEARCHABLE; i < RT_ELEMENTS(g_aE1kRegMap); i++)
5957 if (offReg - g_aE1kRegMap[i].offset < g_aE1kRegMap[i].size)
5958 return i;
5959
5960# ifdef VBOX_STRICT
5961 for (unsigned i = 0; i < RT_ELEMENTS(g_aE1kRegMap); i++)
5962 Assert(offReg - g_aE1kRegMap[i].offset >= g_aE1kRegMap[i].size);
5963# endif
5964
5965#endif
5966
5967 return -1;
5968}
5969
5970/**
5971 * Handle unaligned register read operation.
5972 *
5973 * Looks up and calls appropriate handler.
5974 *
5975 * @returns VBox status code.
5976 *
5977 * @param pDevIns The device instance.
5978 * @param pThis The device state structure.
5979 * @param offReg Register offset in memory-mapped frame.
5980 * @param pv Where to store the result.
5981 * @param cb Number of bytes to read.
5982 * @thread EMT
5983 * @remarks IOM takes care of unaligned and small reads via MMIO. For I/O port
5984 * accesses we have to take care of that ourselves.
5985 */
5986static int e1kRegReadUnaligned(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offReg, void *pv, uint32_t cb)
5987{
5988 uint32_t u32 = 0;
5989 uint32_t shift;
5990 int rc = VINF_SUCCESS;
5991 int index = e1kRegLookup(offReg);
5992#ifdef LOG_ENABLED
5993 char buf[9];
5994#endif
5995
5996 /*
5997 * From the spec:
5998 * For registers that should be accessed as 32-bit double words, partial writes (less than a 32-bit
5999 * double word) is ignored. Partial reads return all 32 bits of data regardless of the byte enables.
6000 */
6001
6002 /*
6003 * To be able to read bytes and short word we convert them to properly
6004 * shifted 32-bit words and masks. The idea is to keep register-specific
6005 * handlers simple. Most accesses will be 32-bit anyway.
6006 */
6007 uint32_t mask;
6008 switch (cb)
6009 {
6010 case 4: mask = 0xFFFFFFFF; break;
6011 case 2: mask = 0x0000FFFF; break;
6012 case 1: mask = 0x000000FF; break;
6013 default:
6014 return PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "unsupported op size: offset=%#10x cb=%#10x\n", offReg, cb);
6015 }
6016 if (index != -1)
6017 {
6018 RT_UNTRUSTED_VALIDATED_FENCE(); /* paranoia because of port I/O. */
6019 if (g_aE1kRegMap[index].readable)
6020 {
6021 /* Make the mask correspond to the bits we are about to read. */
6022 shift = (offReg - g_aE1kRegMap[index].offset) % sizeof(uint32_t) * 8;
6023 mask <<= shift;
6024 if (!mask)
6025 return PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "Zero mask: offset=%#10x cb=%#10x\n", offReg, cb);
6026 /*
6027 * Read it. Pass the mask so the handler knows what has to be read.
6028 * Mask out irrelevant bits.
6029 */
6030 //rc = e1kCsEnter(pThis, VERR_SEM_BUSY, RT_SRC_POS);
6031 if (RT_UNLIKELY(rc != VINF_SUCCESS))
6032 return rc;
6033 //pThis->fDelayInts = false;
6034 //pThis->iStatIntLost += pThis->iStatIntLostOne;
6035 //pThis->iStatIntLostOne = 0;
6036 rc = g_aE1kRegMap[index].pfnRead(pDevIns, pThis, offReg & 0xFFFFFFFC, index, &u32);
6037 u32 &= mask;
6038 //e1kCsLeave(pThis);
6039 E1kLog2(("%s At %08X read %s from %s (%s)\n",
6040 pThis->szPrf, offReg, e1kU32toHex(u32, mask, buf), g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
6041 Log6(("%s At %08X read %s from %s (%s) [UNALIGNED]\n",
6042 pThis->szPrf, offReg, e1kU32toHex(u32, mask, buf), g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
6043 /* Shift back the result. */
6044 u32 >>= shift;
6045 }
6046 else
6047 E1kLog(("%s At %08X read (%s) attempt from write-only register %s (%s)\n",
6048 pThis->szPrf, offReg, e1kU32toHex(u32, mask, buf), g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
6049 if (IOM_SUCCESS(rc))
6050 STAM_COUNTER_INC(&pThis->aStatRegReads[index]);
6051 }
6052 else
6053 E1kLog(("%s At %08X read (%s) attempt from non-existing register\n",
6054 pThis->szPrf, offReg, e1kU32toHex(u32, mask, buf)));
6055
6056 memcpy(pv, &u32, cb);
6057 return rc;
6058}
6059
6060/**
6061 * Handle 4 byte aligned and sized read operation.
6062 *
6063 * Looks up and calls appropriate handler.
6064 *
6065 * @returns VBox status code.
6066 *
6067 * @param pDevIns The device instance.
6068 * @param pThis The device state structure.
6069 * @param offReg Register offset in memory-mapped frame.
6070 * @param pu32 Where to store the result.
6071 * @thread EMT
6072 */
6073static VBOXSTRICTRC e1kRegReadAlignedU32(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offReg, uint32_t *pu32)
6074{
6075 Assert(!(offReg & 3));
6076
6077 /*
6078 * Lookup the register and check that it's readable.
6079 */
6080 VBOXSTRICTRC rc = VINF_SUCCESS;
6081 int idxReg = e1kRegLookup(offReg);
6082 if (RT_LIKELY(idxReg != -1))
6083 {
6084 RT_UNTRUSTED_VALIDATED_FENCE(); /* paranoia because of port I/O. */
6085 if (RT_UNLIKELY(g_aE1kRegMap[idxReg].readable))
6086 {
6087 /*
6088 * Read it. Pass the mask so the handler knows what has to be read.
6089 * Mask out irrelevant bits.
6090 */
6091 //rc = e1kCsEnter(pThis, VERR_SEM_BUSY, RT_SRC_POS);
6092 //if (RT_UNLIKELY(rc != VINF_SUCCESS))
6093 // return rc;
6094 //pThis->fDelayInts = false;
6095 //pThis->iStatIntLost += pThis->iStatIntLostOne;
6096 //pThis->iStatIntLostOne = 0;
6097 rc = g_aE1kRegMap[idxReg].pfnRead(pDevIns, pThis, offReg & 0xFFFFFFFC, idxReg, pu32);
6098 //e1kCsLeave(pThis);
6099 Log6(("%s At %08X read %08X from %s (%s)\n",
6100 pThis->szPrf, offReg, *pu32, g_aE1kRegMap[idxReg].abbrev, g_aE1kRegMap[idxReg].name));
6101 if (IOM_SUCCESS(rc))
6102 STAM_COUNTER_INC(&pThis->aStatRegReads[idxReg]);
6103 }
6104 else
6105 E1kLog(("%s At %08X read attempt from non-readable register %s (%s)\n",
6106 pThis->szPrf, offReg, g_aE1kRegMap[idxReg].abbrev, g_aE1kRegMap[idxReg].name));
6107 }
6108 else
6109 E1kLog(("%s At %08X read attempt from non-existing register\n", pThis->szPrf, offReg));
6110 return rc;
6111}
6112
6113/**
6114 * Handle 4 byte sized and aligned register write operation.
6115 *
6116 * Looks up and calls appropriate handler.
6117 *
6118 * @returns VBox status code.
6119 *
6120 * @param pDevIns The device instance.
6121 * @param pThis The device state structure.
6122 * @param offReg Register offset in memory-mapped frame.
6123 * @param u32Value The value to write.
6124 * @thread EMT
6125 */
6126static VBOXSTRICTRC e1kRegWriteAlignedU32(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offReg, uint32_t u32Value)
6127{
6128 VBOXSTRICTRC rc = VINF_SUCCESS;
6129 int index = e1kRegLookup(offReg);
6130 if (RT_LIKELY(index != -1))
6131 {
6132 RT_UNTRUSTED_VALIDATED_FENCE(); /* paranoia because of port I/O. */
6133 if (RT_LIKELY(g_aE1kRegMap[index].writable))
6134 {
6135 /*
6136 * Write it. Pass the mask so the handler knows what has to be written.
6137 * Mask out irrelevant bits.
6138 */
6139 Log6(("%s At %08X write %08X to %s (%s)\n",
6140 pThis->szPrf, offReg, u32Value, g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
6141 //rc = e1kCsEnter(pThis, VERR_SEM_BUSY, RT_SRC_POS);
6142 //if (RT_UNLIKELY(rc != VINF_SUCCESS))
6143 // return rc;
6144 //pThis->fDelayInts = false;
6145 //pThis->iStatIntLost += pThis->iStatIntLostOne;
6146 //pThis->iStatIntLostOne = 0;
6147 rc = g_aE1kRegMap[index].pfnWrite(pDevIns, pThis, offReg, index, u32Value);
6148 //e1kCsLeave(pThis);
6149 }
6150 else
6151 E1kLog(("%s At %08X write attempt (%08X) to read-only register %s (%s)\n",
6152 pThis->szPrf, offReg, u32Value, g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
6153 if (IOM_SUCCESS(rc))
6154 STAM_COUNTER_INC(&pThis->aStatRegWrites[index]);
6155 }
6156 else
6157 E1kLog(("%s At %08X write attempt (%08X) to non-existing register\n",
6158 pThis->szPrf, offReg, u32Value));
6159 return rc;
6160}
6161
6162
6163/* -=-=-=-=- MMIO and I/O Port Callbacks -=-=-=-=- */
6164
6165/**
6166 * @callback_method_impl{FNIOMMMIONEWREAD}
6167 */
6168static DECLCALLBACK(VBOXSTRICTRC) e1kMMIORead(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off, void *pv, uint32_t cb)
6169{
6170 RT_NOREF2(pvUser, cb);
6171 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
6172 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatMMIORead), a);
6173
6174 Assert(off < E1K_MM_SIZE);
6175 Assert(cb == 4);
6176 Assert(!(off & 3));
6177
6178 VBOXSTRICTRC rcStrict = e1kRegReadAlignedU32(pDevIns, pThis, (uint32_t)off, (uint32_t *)pv);
6179
6180 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatMMIORead), a);
6181 return rcStrict;
6182}
6183
6184/**
6185 * @callback_method_impl{FNIOMMMIONEWWRITE}
6186 */
6187static DECLCALLBACK(VBOXSTRICTRC) e1kMMIOWrite(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off, void const *pv, uint32_t cb)
6188{
6189 RT_NOREF2(pvUser, cb);
6190 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
6191 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatMMIOWrite), a);
6192
6193 Assert(off < E1K_MM_SIZE);
6194 Assert(cb == 4);
6195 Assert(!(off & 3));
6196
6197 VBOXSTRICTRC rcStrict = e1kRegWriteAlignedU32(pDevIns, pThis, (uint32_t)off, *(uint32_t const *)pv);
6198
6199 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatMMIOWrite), a);
6200 return rcStrict;
6201}
6202
6203/**
6204 * @callback_method_impl{FNIOMIOPORTNEWIN}
6205 */
6206static DECLCALLBACK(VBOXSTRICTRC) e1kIOPortIn(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT offPort, uint32_t *pu32, unsigned cb)
6207{
6208 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
6209 VBOXSTRICTRC rc;
6210 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatIORead), a);
6211 RT_NOREF_PV(pvUser);
6212
6213 if (RT_LIKELY(cb == 4))
6214 switch (offPort)
6215 {
6216 case 0x00: /* IOADDR */
6217 *pu32 = pThis->uSelectedReg;
6218 E1kLog2(("%s e1kIOPortIn: IOADDR(0), selecting register %#010x, val=%#010x\n", pThis->szPrf, pThis->uSelectedReg, *pu32));
6219 rc = VINF_SUCCESS;
6220 break;
6221
6222 case 0x04: /* IODATA */
6223 if (!(pThis->uSelectedReg & 3))
6224 rc = e1kRegReadAlignedU32(pDevIns, pThis, pThis->uSelectedReg, pu32);
6225 else /** @todo r=bird: I wouldn't be surprised if this unaligned branch wasn't necessary. */
6226 rc = e1kRegReadUnaligned(pDevIns, pThis, pThis->uSelectedReg, pu32, cb);
6227 if (rc == VINF_IOM_R3_MMIO_READ)
6228 rc = VINF_IOM_R3_IOPORT_READ;
6229 E1kLog2(("%s e1kIOPortIn: IODATA(4), reading from selected register %#010x, val=%#010x\n", pThis->szPrf, pThis->uSelectedReg, *pu32));
6230 break;
6231
6232 default:
6233 E1kLog(("%s e1kIOPortIn: invalid port %#010x\n", pThis->szPrf, offPort));
6234 /** @todo r=bird: Check what real hardware returns here. */
6235 //rc = VERR_IOM_IOPORT_UNUSED; /* Why not? */
6236 rc = VINF_IOM_MMIO_UNUSED_00; /* used to return VINF_SUCCESS and not touch *pu32, which amounted to this. */
6237 break;
6238 }
6239 else
6240 {
6241 E1kLog(("%s e1kIOPortIn: invalid op size: offPort=%RTiop cb=%08x", pThis->szPrf, offPort, cb));
6242 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "%s e1kIOPortIn: invalid op size: offPort=%RTiop cb=%08x\n", pThis->szPrf, offPort, cb);
6243 *pu32 = 0; /** @todo r=bird: Check what real hardware returns here. (Didn't used to set a value here, picked zero as that's what we'd end up in most cases.) */
6244 }
6245 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatIORead), a);
6246 return rc;
6247}
6248
6249
6250/**
6251 * @callback_method_impl{FNIOMIOPORTNEWOUT}
6252 */
6253static DECLCALLBACK(VBOXSTRICTRC) e1kIOPortOut(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT offPort, uint32_t u32, unsigned cb)
6254{
6255 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
6256 VBOXSTRICTRC rc;
6257 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatIOWrite), a);
6258 RT_NOREF_PV(pvUser);
6259
6260 E1kLog2(("%s e1kIOPortOut: offPort=%RTiop value=%08x\n", pThis->szPrf, offPort, u32));
6261 if (RT_LIKELY(cb == 4))
6262 {
6263 switch (offPort)
6264 {
6265 case 0x00: /* IOADDR */
6266 pThis->uSelectedReg = u32;
6267 E1kLog2(("%s e1kIOPortOut: IOADDR(0), selected register %08x\n", pThis->szPrf, pThis->uSelectedReg));
6268 rc = VINF_SUCCESS;
6269 break;
6270
6271 case 0x04: /* IODATA */
6272 E1kLog2(("%s e1kIOPortOut: IODATA(4), writing to selected register %#010x, value=%#010x\n", pThis->szPrf, pThis->uSelectedReg, u32));
6273 if (RT_LIKELY(!(pThis->uSelectedReg & 3)))
6274 {
6275 rc = e1kRegWriteAlignedU32(pDevIns, pThis, pThis->uSelectedReg, u32);
6276 if (rc == VINF_IOM_R3_MMIO_WRITE)
6277 rc = VINF_IOM_R3_IOPORT_WRITE;
6278 }
6279 else
6280 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS,
6281 "Spec violation: misaligned offset: %#10x, ignored.\n", pThis->uSelectedReg);
6282 break;
6283
6284 default:
6285 E1kLog(("%s e1kIOPortOut: invalid port %#010x\n", pThis->szPrf, offPort));
6286 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "invalid port %#010x\n", offPort);
6287 }
6288 }
6289 else
6290 {
6291 E1kLog(("%s e1kIOPortOut: invalid op size: offPort=%RTiop cb=%08x\n", pThis->szPrf, offPort, cb));
6292 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "%s: invalid op size: offPort=%RTiop cb=%#x\n", pThis->szPrf, offPort, cb);
6293 }
6294
6295 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatIOWrite), a);
6296 return rc;
6297}
6298
6299#ifdef IN_RING3
6300
6301/**
6302 * Dump complete device state to log.
6303 *
6304 * @param pThis Pointer to device state.
6305 */
6306static void e1kDumpState(PE1KSTATE pThis)
6307{
6308 RT_NOREF(pThis);
6309 for (int i = 0; i < E1K_NUM_OF_32BIT_REGS; ++i)
6310 E1kLog2(("%s: %8.8s = %08x\n", pThis->szPrf, g_aE1kRegMap[i].abbrev, pThis->auRegs[i]));
6311# ifdef E1K_INT_STATS
6312 LogRel(("%s: Interrupt attempts: %d\n", pThis->szPrf, pThis->uStatIntTry));
6313 LogRel(("%s: Interrupts raised : %d\n", pThis->szPrf, pThis->uStatInt));
6314 LogRel(("%s: Interrupts lowered: %d\n", pThis->szPrf, pThis->uStatIntLower));
6315 LogRel(("%s: ICR outside ISR : %d\n", pThis->szPrf, pThis->uStatNoIntICR));
6316 LogRel(("%s: IMS raised ints : %d\n", pThis->szPrf, pThis->uStatIntIMS));
6317 LogRel(("%s: Interrupts skipped: %d\n", pThis->szPrf, pThis->uStatIntSkip));
6318 LogRel(("%s: Masked interrupts : %d\n", pThis->szPrf, pThis->uStatIntMasked));
6319 LogRel(("%s: Early interrupts : %d\n", pThis->szPrf, pThis->uStatIntEarly));
6320 LogRel(("%s: Late interrupts : %d\n", pThis->szPrf, pThis->uStatIntLate));
6321 LogRel(("%s: Lost interrupts : %d\n", pThis->szPrf, pThis->iStatIntLost));
6322 LogRel(("%s: Interrupts by RX : %d\n", pThis->szPrf, pThis->uStatIntRx));
6323 LogRel(("%s: Interrupts by TX : %d\n", pThis->szPrf, pThis->uStatIntTx));
6324 LogRel(("%s: Interrupts by ICS : %d\n", pThis->szPrf, pThis->uStatIntICS));
6325 LogRel(("%s: Interrupts by RDTR: %d\n", pThis->szPrf, pThis->uStatIntRDTR));
6326 LogRel(("%s: Interrupts by RDMT: %d\n", pThis->szPrf, pThis->uStatIntRXDMT0));
6327 LogRel(("%s: Interrupts by TXQE: %d\n", pThis->szPrf, pThis->uStatIntTXQE));
6328 LogRel(("%s: TX int delay asked: %d\n", pThis->szPrf, pThis->uStatTxIDE));
6329 LogRel(("%s: TX delayed: %d\n", pThis->szPrf, pThis->uStatTxDelayed));
6330 LogRel(("%s: TX delay expired: %d\n", pThis->szPrf, pThis->uStatTxDelayExp));
6331 LogRel(("%s: TX no report asked: %d\n", pThis->szPrf, pThis->uStatTxNoRS));
6332 LogRel(("%s: TX abs timer expd : %d\n", pThis->szPrf, pThis->uStatTAD));
6333 LogRel(("%s: TX int timer expd : %d\n", pThis->szPrf, pThis->uStatTID));
6334 LogRel(("%s: RX abs timer expd : %d\n", pThis->szPrf, pThis->uStatRAD));
6335 LogRel(("%s: RX int timer expd : %d\n", pThis->szPrf, pThis->uStatRID));
6336 LogRel(("%s: TX CTX descriptors: %d\n", pThis->szPrf, pThis->uStatDescCtx));
6337 LogRel(("%s: TX DAT descriptors: %d\n", pThis->szPrf, pThis->uStatDescDat));
6338 LogRel(("%s: TX LEG descriptors: %d\n", pThis->szPrf, pThis->uStatDescLeg));
6339 LogRel(("%s: Received frames : %d\n", pThis->szPrf, pThis->uStatRxFrm));
6340 LogRel(("%s: Transmitted frames: %d\n", pThis->szPrf, pThis->uStatTxFrm));
6341 LogRel(("%s: TX frames up to 1514: %d\n", pThis->szPrf, pThis->uStatTx1514));
6342 LogRel(("%s: TX frames up to 2962: %d\n", pThis->szPrf, pThis->uStatTx2962));
6343 LogRel(("%s: TX frames up to 4410: %d\n", pThis->szPrf, pThis->uStatTx4410));
6344 LogRel(("%s: TX frames up to 5858: %d\n", pThis->szPrf, pThis->uStatTx5858));
6345 LogRel(("%s: TX frames up to 7306: %d\n", pThis->szPrf, pThis->uStatTx7306));
6346 LogRel(("%s: TX frames up to 8754: %d\n", pThis->szPrf, pThis->uStatTx8754));
6347 LogRel(("%s: TX frames up to 16384: %d\n", pThis->szPrf, pThis->uStatTx16384));
6348 LogRel(("%s: TX frames up to 32768: %d\n", pThis->szPrf, pThis->uStatTx32768));
6349 LogRel(("%s: Larger TX frames : %d\n", pThis->szPrf, pThis->uStatTxLarge));
6350 LogRel(("%s: Max TX Delay : %lld\n", pThis->szPrf, pThis->uStatMaxTxDelay));
6351# endif /* E1K_INT_STATS */
6352}
6353
6354/**
6355 * @callback_method_impl{FNPCIIOREGIONMAP}
6356 *
6357 * @todo Can remove this one later, it's realy just here for taking down
6358 * addresses for e1kInfo(), an alignment assertion and sentimentality.
6359 */
6360static DECLCALLBACK(int) e1kR3Map(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev, uint32_t iRegion,
6361 RTGCPHYS GCPhysAddress, RTGCPHYS cb, PCIADDRESSSPACE enmType)
6362{
6363 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
6364 E1kLog(("%s e1kR3Map: iRegion=%u GCPhysAddress=%RGp\n", pThis->szPrf, iRegion, GCPhysAddress));
6365 RT_NOREF(pPciDev, iRegion, cb);
6366 Assert(pPciDev == pDevIns->apPciDevs[0]);
6367
6368 switch (enmType)
6369 {
6370 case PCI_ADDRESS_SPACE_IO:
6371 pThis->IOPortBase = (RTIOPORT)GCPhysAddress;
6372 break;
6373
6374 case PCI_ADDRESS_SPACE_MEM:
6375 pThis->addrMMReg = GCPhysAddress;
6376 Assert(!(GCPhysAddress & 7) || GCPhysAddress == NIL_RTGCPHYS);
6377 break;
6378
6379 default:
6380 /* We should never get here */
6381 AssertMsgFailedReturn(("Invalid PCI address space param in map callback"), VERR_INTERNAL_ERROR);
6382 }
6383 return VINF_SUCCESS;
6384}
6385
6386
6387/* -=-=-=-=- PDMINETWORKDOWN -=-=-=-=- */
6388
6389/**
6390 * Check if the device can receive data now.
6391 * This must be called before the pfnRecieve() method is called.
6392 *
6393 * @returns Number of bytes the device can receive.
6394 * @param pDevIns The device instance.
6395 * @param pThis The instance data.
6396 * @thread EMT
6397 */
6398static int e1kCanReceive(PPDMDEVINS pDevIns, PE1KSTATE pThis)
6399{
6400#ifndef E1K_WITH_RXD_CACHE
6401 size_t cb;
6402
6403 if (RT_UNLIKELY(e1kCsRxEnter(pThis, VERR_SEM_BUSY) != VINF_SUCCESS))
6404 return VERR_NET_NO_BUFFER_SPACE;
6405
6406 if (RT_UNLIKELY(RDLEN == sizeof(E1KRXDESC)))
6407 {
6408 E1KRXDESC desc;
6409 PDMDevHlpPhysRead(pDevIns, e1kDescAddr(RDBAH, RDBAL, RDH), &desc, sizeof(desc));
6410 if (desc.status.fDD)
6411 cb = 0;
6412 else
6413 cb = pThis->u16RxBSize;
6414 }
6415 else if (RDH < RDT)
6416 cb = (RDT - RDH) * pThis->u16RxBSize;
6417 else if (RDH > RDT)
6418 cb = (RDLEN/sizeof(E1KRXDESC) - RDH + RDT) * pThis->u16RxBSize;
6419 else
6420 {
6421 cb = 0;
6422 E1kLogRel(("E1000: OUT of RX descriptors!\n"));
6423 }
6424 E1kLog2(("%s e1kCanReceive: at exit RDH=%d RDT=%d RDLEN=%d u16RxBSize=%d cb=%lu\n",
6425 pThis->szPrf, RDH, RDT, RDLEN, pThis->u16RxBSize, cb));
6426
6427 e1kCsRxLeave(pThis);
6428 return cb > 0 ? VINF_SUCCESS : VERR_NET_NO_BUFFER_SPACE;
6429#else /* E1K_WITH_RXD_CACHE */
6430 int rc = VINF_SUCCESS;
6431
6432 if (RT_UNLIKELY(e1kCsRxEnter(pThis, VERR_SEM_BUSY) != VINF_SUCCESS))
6433 return VERR_NET_NO_BUFFER_SPACE;
6434
6435 if (RT_UNLIKELY(RDLEN == sizeof(E1KRXDESC)))
6436 {
6437 E1KRXDESC desc;
6438 PDMDevHlpPhysRead(pDevIns, e1kDescAddr(RDBAH, RDBAL, RDH), &desc, sizeof(desc));
6439 if (desc.status.fDD)
6440 rc = VERR_NET_NO_BUFFER_SPACE;
6441 }
6442 else if (e1kRxDIsCacheEmpty(pThis) && RDH == RDT)
6443 {
6444 /* Cache is empty, so is the RX ring. */
6445 rc = VERR_NET_NO_BUFFER_SPACE;
6446 }
6447 E1kLog2(("%s e1kCanReceive: at exit in_cache=%d RDH=%d RDT=%d RDLEN=%d"
6448 " u16RxBSize=%d rc=%Rrc\n", pThis->szPrf,
6449 e1kRxDInCache(pThis), RDH, RDT, RDLEN, pThis->u16RxBSize, rc));
6450
6451 e1kCsRxLeave(pThis);
6452 return rc;
6453#endif /* E1K_WITH_RXD_CACHE */
6454}
6455
6456/**
6457 * @interface_method_impl{PDMINETWORKDOWN,pfnWaitReceiveAvail}
6458 */
6459static DECLCALLBACK(int) e1kR3NetworkDown_WaitReceiveAvail(PPDMINETWORKDOWN pInterface, RTMSINTERVAL cMillies)
6460{
6461 PE1KSTATECC pThisCC = RT_FROM_MEMBER(pInterface, E1KSTATECC, INetworkDown);
6462 PE1KSTATE pThis = pThisCC->pShared;
6463 PPDMDEVINS pDevIns = pThisCC->pDevInsR3;
6464
6465 int rc = e1kCanReceive(pDevIns, pThis);
6466
6467 if (RT_SUCCESS(rc))
6468 return VINF_SUCCESS;
6469 if (RT_UNLIKELY(cMillies == 0))
6470 return VERR_NET_NO_BUFFER_SPACE;
6471
6472 rc = VERR_INTERRUPTED;
6473 ASMAtomicXchgBool(&pThis->fMaybeOutOfSpace, true);
6474 STAM_PROFILE_START(&pThis->StatRxOverflow, a);
6475 VMSTATE enmVMState;
6476 while (RT_LIKELY( (enmVMState = PDMDevHlpVMState(pDevIns)) == VMSTATE_RUNNING
6477 || enmVMState == VMSTATE_RUNNING_LS))
6478 {
6479 int rc2 = e1kCanReceive(pDevIns, pThis);
6480 if (RT_SUCCESS(rc2))
6481 {
6482 rc = VINF_SUCCESS;
6483 break;
6484 }
6485 E1kLogRel(("E1000: e1kR3NetworkDown_WaitReceiveAvail: waiting cMillies=%u...\n", cMillies));
6486 E1kLog(("%s: e1kR3NetworkDown_WaitReceiveAvail: waiting cMillies=%u...\n", pThis->szPrf, cMillies));
6487 PDMDevHlpSUPSemEventWaitNoResume(pDevIns, pThis->hEventMoreRxDescAvail, cMillies);
6488 }
6489 STAM_PROFILE_STOP(&pThis->StatRxOverflow, a);
6490 ASMAtomicXchgBool(&pThis->fMaybeOutOfSpace, false);
6491
6492 return rc;
6493}
6494
6495
6496/**
6497 * Matches the packet addresses against Receive Address table. Looks for
6498 * exact matches only.
6499 *
6500 * @returns true if address matches.
6501 * @param pThis Pointer to the state structure.
6502 * @param pvBuf The ethernet packet.
6503 * @param cb Number of bytes available in the packet.
6504 * @thread EMT
6505 */
6506static bool e1kPerfectMatch(PE1KSTATE pThis, const void *pvBuf)
6507{
6508 for (unsigned i = 0; i < RT_ELEMENTS(pThis->aRecAddr.array); i++)
6509 {
6510 E1KRAELEM* ra = pThis->aRecAddr.array + i;
6511
6512 /* Valid address? */
6513 if (ra->ctl & RA_CTL_AV)
6514 {
6515 Assert((ra->ctl & RA_CTL_AS) < 2);
6516 //unsigned char *pAddr = (unsigned char*)pvBuf + sizeof(ra->addr)*(ra->ctl & RA_CTL_AS);
6517 //E1kLog3(("%s Matching %02x:%02x:%02x:%02x:%02x:%02x against %02x:%02x:%02x:%02x:%02x:%02x...\n",
6518 // pThis->szPrf, pAddr[0], pAddr[1], pAddr[2], pAddr[3], pAddr[4], pAddr[5],
6519 // ra->addr[0], ra->addr[1], ra->addr[2], ra->addr[3], ra->addr[4], ra->addr[5]));
6520 /*
6521 * Address Select:
6522 * 00b = Destination address
6523 * 01b = Source address
6524 * 10b = Reserved
6525 * 11b = Reserved
6526 * Since ethernet header is (DA, SA, len) we can use address
6527 * select as index.
6528 */
6529 if (memcmp((char*)pvBuf + sizeof(ra->addr)*(ra->ctl & RA_CTL_AS),
6530 ra->addr, sizeof(ra->addr)) == 0)
6531 return true;
6532 }
6533 }
6534
6535 return false;
6536}
6537
6538/**
6539 * Matches the packet addresses against Multicast Table Array.
6540 *
6541 * @remarks This is imperfect match since it matches not exact address but
6542 * a subset of addresses.
6543 *
6544 * @returns true if address matches.
6545 * @param pThis Pointer to the state structure.
6546 * @param pvBuf The ethernet packet.
6547 * @param cb Number of bytes available in the packet.
6548 * @thread EMT
6549 */
6550static bool e1kImperfectMatch(PE1KSTATE pThis, const void *pvBuf)
6551{
6552 /* Get bits 32..47 of destination address */
6553 uint16_t u16Bit = ((uint16_t*)pvBuf)[2];
6554
6555 unsigned offset = GET_BITS(RCTL, MO);
6556 /*
6557 * offset means:
6558 * 00b = bits 36..47
6559 * 01b = bits 35..46
6560 * 10b = bits 34..45
6561 * 11b = bits 32..43
6562 */
6563 if (offset < 3)
6564 u16Bit = u16Bit >> (4 - offset);
6565 return ASMBitTest(pThis->auMTA, u16Bit & 0xFFF);
6566}
6567
6568/**
6569 * Determines if the packet is to be delivered to upper layer.
6570 *
6571 * The following filters supported:
6572 * - Exact Unicast/Multicast
6573 * - Promiscuous Unicast/Multicast
6574 * - Multicast
6575 * - VLAN
6576 *
6577 * @returns true if packet is intended for this node.
6578 * @param pThis Pointer to the state structure.
6579 * @param pvBuf The ethernet packet.
6580 * @param cb Number of bytes available in the packet.
6581 * @param pStatus Bit field to store status bits.
6582 * @thread EMT
6583 */
6584static bool e1kAddressFilter(PE1KSTATE pThis, const void *pvBuf, size_t cb, E1KRXDST *pStatus)
6585{
6586 Assert(cb > 14);
6587 /* Assume that we fail to pass exact filter. */
6588 pStatus->fPIF = false;
6589 pStatus->fVP = false;
6590 /* Discard oversized packets */
6591 if (cb > E1K_MAX_RX_PKT_SIZE)
6592 {
6593 E1kLog(("%s ERROR: Incoming packet is too big, cb=%d > max=%d\n",
6594 pThis->szPrf, cb, E1K_MAX_RX_PKT_SIZE));
6595 E1K_INC_CNT32(ROC);
6596 return false;
6597 }
6598 else if (!(RCTL & RCTL_LPE) && cb > 1522)
6599 {
6600 /* When long packet reception is disabled packets over 1522 are discarded */
6601 E1kLog(("%s Discarding incoming packet (LPE=0), cb=%d\n",
6602 pThis->szPrf, cb));
6603 E1K_INC_CNT32(ROC);
6604 return false;
6605 }
6606
6607 uint16_t *u16Ptr = (uint16_t*)pvBuf;
6608 /* Compare TPID with VLAN Ether Type */
6609 if (RT_BE2H_U16(u16Ptr[6]) == VET)
6610 {
6611 pStatus->fVP = true;
6612 /* Is VLAN filtering enabled? */
6613 if (RCTL & RCTL_VFE)
6614 {
6615 /* It is 802.1q packet indeed, let's filter by VID */
6616 if (RCTL & RCTL_CFIEN)
6617 {
6618 E1kLog3(("%s VLAN filter: VLAN=%d CFI=%d RCTL_CFI=%d\n", pThis->szPrf,
6619 E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7])),
6620 E1K_SPEC_CFI(RT_BE2H_U16(u16Ptr[7])),
6621 !!(RCTL & RCTL_CFI)));
6622 if (E1K_SPEC_CFI(RT_BE2H_U16(u16Ptr[7])) != !!(RCTL & RCTL_CFI))
6623 {
6624 E1kLog2(("%s Packet filter: CFIs do not match in packet and RCTL (%d!=%d)\n",
6625 pThis->szPrf, E1K_SPEC_CFI(RT_BE2H_U16(u16Ptr[7])), !!(RCTL & RCTL_CFI)));
6626 return false;
6627 }
6628 }
6629 else
6630 E1kLog3(("%s VLAN filter: VLAN=%d\n", pThis->szPrf,
6631 E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7]))));
6632 if (!ASMBitTest(pThis->auVFTA, E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7]))))
6633 {
6634 E1kLog2(("%s Packet filter: no VLAN match (id=%d)\n",
6635 pThis->szPrf, E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7]))));
6636 return false;
6637 }
6638 }
6639 }
6640 /* Broadcast filtering */
6641 if (e1kIsBroadcast(pvBuf) && (RCTL & RCTL_BAM))
6642 return true;
6643 E1kLog2(("%s Packet filter: not a broadcast\n", pThis->szPrf));
6644 if (e1kIsMulticast(pvBuf))
6645 {
6646 /* Is multicast promiscuous enabled? */
6647 if (RCTL & RCTL_MPE)
6648 return true;
6649 E1kLog2(("%s Packet filter: no promiscuous multicast\n", pThis->szPrf));
6650 /* Try perfect matches first */
6651 if (e1kPerfectMatch(pThis, pvBuf))
6652 {
6653 pStatus->fPIF = true;
6654 return true;
6655 }
6656 E1kLog2(("%s Packet filter: no perfect match\n", pThis->szPrf));
6657 if (e1kImperfectMatch(pThis, pvBuf))
6658 return true;
6659 E1kLog2(("%s Packet filter: no imperfect match\n", pThis->szPrf));
6660 }
6661 else {
6662 /* Is unicast promiscuous enabled? */
6663 if (RCTL & RCTL_UPE)
6664 return true;
6665 E1kLog2(("%s Packet filter: no promiscuous unicast\n", pThis->szPrf));
6666 if (e1kPerfectMatch(pThis, pvBuf))
6667 {
6668 pStatus->fPIF = true;
6669 return true;
6670 }
6671 E1kLog2(("%s Packet filter: no perfect match\n", pThis->szPrf));
6672 }
6673 E1kLog2(("%s Packet filter: packet discarded\n", pThis->szPrf));
6674 return false;
6675}
6676
6677/**
6678 * @interface_method_impl{PDMINETWORKDOWN,pfnReceive}
6679 */
6680static DECLCALLBACK(int) e1kR3NetworkDown_Receive(PPDMINETWORKDOWN pInterface, const void *pvBuf, size_t cb)
6681{
6682 PE1KSTATECC pThisCC = RT_FROM_MEMBER(pInterface, E1KSTATECC, INetworkDown);
6683 PE1KSTATE pThis = pThisCC->pShared;
6684 PPDMDEVINS pDevIns = pThisCC->pDevInsR3;
6685 int rc = VINF_SUCCESS;
6686
6687 /*
6688 * Drop packets if the VM is not running yet/anymore.
6689 */
6690 VMSTATE enmVMState = PDMDevHlpVMState(pDevIns);
6691 if ( enmVMState != VMSTATE_RUNNING
6692 && enmVMState != VMSTATE_RUNNING_LS)
6693 {
6694 E1kLog(("%s Dropping incoming packet as VM is not running.\n", pThis->szPrf));
6695 return VINF_SUCCESS;
6696 }
6697
6698 /* Discard incoming packets in locked state */
6699 if (!(RCTL & RCTL_EN) || pThis->fLocked || !(STATUS & STATUS_LU))
6700 {
6701 E1kLog(("%s Dropping incoming packet as receive operation is disabled.\n", pThis->szPrf));
6702 return VINF_SUCCESS;
6703 }
6704
6705 STAM_PROFILE_ADV_START(&pThis->StatReceive, a);
6706
6707 //if (!e1kCsEnter(pThis, RT_SRC_POS))
6708 // return VERR_PERMISSION_DENIED;
6709
6710 e1kPacketDump(pThis, (const uint8_t*)pvBuf, cb, "<-- Incoming");
6711
6712 /* Update stats */
6713 if (RT_LIKELY(e1kCsEnter(pThis, VERR_SEM_BUSY) == VINF_SUCCESS))
6714 {
6715 E1K_INC_CNT32(TPR);
6716 E1K_ADD_CNT64(TORL, TORH, cb < 64? 64 : cb);
6717 e1kCsLeave(pThis);
6718 }
6719 STAM_PROFILE_ADV_START(&pThis->StatReceiveFilter, a);
6720 E1KRXDST status;
6721 RT_ZERO(status);
6722 bool fPassed = e1kAddressFilter(pThis, pvBuf, cb, &status);
6723 STAM_PROFILE_ADV_STOP(&pThis->StatReceiveFilter, a);
6724 if (fPassed)
6725 {
6726 rc = e1kHandleRxPacket(pDevIns, pThis, pvBuf, cb, status);
6727 }
6728 //e1kCsLeave(pThis);
6729 STAM_PROFILE_ADV_STOP(&pThis->StatReceive, a);
6730
6731 return rc;
6732}
6733
6734
6735/* -=-=-=-=- PDMILEDPORTS -=-=-=-=- */
6736
6737/**
6738 * @interface_method_impl{PDMILEDPORTS,pfnQueryStatusLed}
6739 */
6740static DECLCALLBACK(int) e1kR3QueryStatusLed(PPDMILEDPORTS pInterface, unsigned iLUN, PPDMLED *ppLed)
6741{
6742 if (iLUN == 0)
6743 {
6744 PE1KSTATECC pThisCC = RT_FROM_MEMBER(pInterface, E1KSTATECC, ILeds);
6745 *ppLed = &pThisCC->pShared->led;
6746 return VINF_SUCCESS;
6747 }
6748 return VERR_PDM_LUN_NOT_FOUND;
6749}
6750
6751
6752/* -=-=-=-=- PDMINETWORKCONFIG -=-=-=-=- */
6753
6754/**
6755 * @interface_method_impl{PDMINETWORKCONFIG,pfnGetMac}
6756 */
6757static DECLCALLBACK(int) e1kR3GetMac(PPDMINETWORKCONFIG pInterface, PRTMAC pMac)
6758{
6759 PE1KSTATECC pThisCC = RT_FROM_MEMBER(pInterface, E1KSTATECC, INetworkConfig);
6760 pThisCC->eeprom.getMac(pMac);
6761 return VINF_SUCCESS;
6762}
6763
6764/**
6765 * @interface_method_impl{PDMINETWORKCONFIG,pfnGetLinkState}
6766 */
6767static DECLCALLBACK(PDMNETWORKLINKSTATE) e1kR3GetLinkState(PPDMINETWORKCONFIG pInterface)
6768{
6769 PE1KSTATECC pThisCC = RT_FROM_MEMBER(pInterface, E1KSTATECC, INetworkConfig);
6770 PE1KSTATE pThis = pThisCC->pShared;
6771 if (STATUS & STATUS_LU)
6772 return PDMNETWORKLINKSTATE_UP;
6773 return PDMNETWORKLINKSTATE_DOWN;
6774}
6775
6776/**
6777 * @interface_method_impl{PDMINETWORKCONFIG,pfnSetLinkState}
6778 */
6779static DECLCALLBACK(int) e1kR3SetLinkState(PPDMINETWORKCONFIG pInterface, PDMNETWORKLINKSTATE enmState)
6780{
6781 PE1KSTATECC pThisCC = RT_FROM_MEMBER(pInterface, E1KSTATECC, INetworkConfig);
6782 PE1KSTATE pThis = pThisCC->pShared;
6783 PPDMDEVINS pDevIns = pThisCC->pDevInsR3;
6784
6785 E1kLog(("%s e1kR3SetLinkState: enmState=%d\n", pThis->szPrf, enmState));
6786 switch (enmState)
6787 {
6788 case PDMNETWORKLINKSTATE_UP:
6789 pThis->fCableConnected = true;
6790 /* If link was down, bring it up after a while. */
6791 if (!(STATUS & STATUS_LU))
6792 e1kBringLinkUpDelayed(pDevIns, pThis);
6793 break;
6794 case PDMNETWORKLINKSTATE_DOWN:
6795 pThis->fCableConnected = false;
6796 /* Always set the phy link state to down, regardless of the STATUS_LU bit.
6797 * We might have to set the link state before the driver initializes us. */
6798 Phy::setLinkStatus(&pThis->phy, false);
6799 /* If link was up, bring it down. */
6800 if (STATUS & STATUS_LU)
6801 e1kR3LinkDown(pDevIns, pThis, pThisCC);
6802 break;
6803 case PDMNETWORKLINKSTATE_DOWN_RESUME:
6804 /*
6805 * There is not much sense in bringing down the link if it has not come up yet.
6806 * If it is up though, we bring it down temporarely, then bring it up again.
6807 */
6808 if (STATUS & STATUS_LU)
6809 e1kR3LinkDownTemp(pDevIns, pThis, pThisCC);
6810 break;
6811 default:
6812 ;
6813 }
6814 return VINF_SUCCESS;
6815}
6816
6817
6818/* -=-=-=-=- PDMIBASE -=-=-=-=- */
6819
6820/**
6821 * @interface_method_impl{PDMIBASE,pfnQueryInterface}
6822 */
6823static DECLCALLBACK(void *) e1kR3QueryInterface(struct PDMIBASE *pInterface, const char *pszIID)
6824{
6825 PE1KSTATECC pThisCC = RT_FROM_MEMBER(pInterface, E1KSTATECC, IBase);
6826 Assert(&pThisCC->IBase == pInterface);
6827
6828 PDMIBASE_RETURN_INTERFACE(pszIID, PDMIBASE, &pThisCC->IBase);
6829 PDMIBASE_RETURN_INTERFACE(pszIID, PDMINETWORKDOWN, &pThisCC->INetworkDown);
6830 PDMIBASE_RETURN_INTERFACE(pszIID, PDMINETWORKCONFIG, &pThisCC->INetworkConfig);
6831 PDMIBASE_RETURN_INTERFACE(pszIID, PDMILEDPORTS, &pThisCC->ILeds);
6832 return NULL;
6833}
6834
6835
6836/* -=-=-=-=- Saved State -=-=-=-=- */
6837
6838/**
6839 * Saves the configuration.
6840 *
6841 * @param pThis The E1K state.
6842 * @param pSSM The handle to the saved state.
6843 */
6844static void e1kSaveConfig(PE1KSTATE pThis, PSSMHANDLE pSSM)
6845{
6846 SSMR3PutMem(pSSM, &pThis->macConfigured, sizeof(pThis->macConfigured));
6847 SSMR3PutU32(pSSM, pThis->eChip);
6848}
6849
6850/**
6851 * @callback_method_impl{FNSSMDEVLIVEEXEC,Save basic configuration.}
6852 */
6853static DECLCALLBACK(int) e1kLiveExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM, uint32_t uPass)
6854{
6855 RT_NOREF(uPass);
6856 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
6857 e1kSaveConfig(pThis, pSSM);
6858 return VINF_SSM_DONT_CALL_AGAIN;
6859}
6860
6861/**
6862 * @callback_method_impl{FNSSMDEVSAVEPREP,Synchronize.}
6863 */
6864static DECLCALLBACK(int) e1kSavePrep(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
6865{
6866 RT_NOREF(pSSM);
6867 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
6868
6869 int rc = e1kCsEnter(pThis, VERR_SEM_BUSY);
6870 if (RT_UNLIKELY(rc != VINF_SUCCESS))
6871 return rc;
6872 e1kCsLeave(pThis);
6873 return VINF_SUCCESS;
6874#if 0
6875 /* 1) Prevent all threads from modifying the state and memory */
6876 //pThis->fLocked = true;
6877 /* 2) Cancel all timers */
6878#ifdef E1K_TX_DELAY
6879 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTXDTimer));
6880#endif /* E1K_TX_DELAY */
6881//#ifdef E1K_USE_TX_TIMERS
6882 if (pThis->fTidEnabled)
6883 {
6884 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTIDTimer));
6885#ifndef E1K_NO_TAD
6886 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTADTimer));
6887#endif /* E1K_NO_TAD */
6888 }
6889//#endif /* E1K_USE_TX_TIMERS */
6890#ifdef E1K_USE_RX_TIMERS
6891 e1kCancelTimer(pThis, pThis->CTX_SUFF(pRIDTimer));
6892 e1kCancelTimer(pThis, pThis->CTX_SUFF(pRADTimer));
6893#endif /* E1K_USE_RX_TIMERS */
6894 e1kCancelTimer(pThis, pThis->CTX_SUFF(pIntTimer));
6895 /* 3) Did I forget anything? */
6896 E1kLog(("%s Locked\n", pThis->szPrf));
6897 return VINF_SUCCESS;
6898#endif
6899}
6900
6901/**
6902 * @callback_method_impl{FNSSMDEVSAVEEXEC}
6903 */
6904static DECLCALLBACK(int) e1kSaveExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
6905{
6906 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
6907 PE1KSTATECC pThisCC = PDMINS_2_DATA_CC(pDevIns, PE1KSTATECC);
6908
6909 e1kSaveConfig(pThis, pSSM);
6910 pThisCC->eeprom.save(pSSM);
6911 e1kDumpState(pThis);
6912 SSMR3PutMem(pSSM, pThis->auRegs, sizeof(pThis->auRegs));
6913 SSMR3PutBool(pSSM, pThis->fIntRaised);
6914 Phy::saveState(pSSM, &pThis->phy);
6915 SSMR3PutU32(pSSM, pThis->uSelectedReg);
6916 SSMR3PutMem(pSSM, pThis->auMTA, sizeof(pThis->auMTA));
6917 SSMR3PutMem(pSSM, &pThis->aRecAddr, sizeof(pThis->aRecAddr));
6918 SSMR3PutMem(pSSM, pThis->auVFTA, sizeof(pThis->auVFTA));
6919 SSMR3PutU64(pSSM, pThis->u64AckedAt);
6920 SSMR3PutU16(pSSM, pThis->u16RxBSize);
6921 //SSMR3PutBool(pSSM, pThis->fDelayInts);
6922 //SSMR3PutBool(pSSM, pThis->fIntMaskUsed);
6923 SSMR3PutU16(pSSM, pThis->u16TxPktLen);
6924/** @todo State wrt to the TSE buffer is incomplete, so little point in
6925 * saving this actually. */
6926 SSMR3PutMem(pSSM, pThis->aTxPacketFallback, pThis->u16TxPktLen);
6927 SSMR3PutBool(pSSM, pThis->fIPcsum);
6928 SSMR3PutBool(pSSM, pThis->fTCPcsum);
6929 SSMR3PutMem(pSSM, &pThis->contextTSE, sizeof(pThis->contextTSE));
6930 SSMR3PutMem(pSSM, &pThis->contextNormal, sizeof(pThis->contextNormal));
6931 SSMR3PutBool(pSSM, pThis->fVTag);
6932 SSMR3PutU16(pSSM, pThis->u16VTagTCI);
6933#ifdef E1K_WITH_TXD_CACHE
6934#if 0
6935 SSMR3PutU8(pSSM, pThis->nTxDFetched);
6936 SSMR3PutMem(pSSM, pThis->aTxDescriptors,
6937 pThis->nTxDFetched * sizeof(pThis->aTxDescriptors[0]));
6938#else
6939 /*
6940 * There is no point in storing TX descriptor cache entries as we can simply
6941 * fetch them again. Moreover, normally the cache is always empty when we
6942 * save the state. Store zero entries for compatibility.
6943 */
6944 SSMR3PutU8(pSSM, 0);
6945#endif
6946#endif /* E1K_WITH_TXD_CACHE */
6947/** @todo GSO requires some more state here. */
6948 E1kLog(("%s State has been saved\n", pThis->szPrf));
6949 return VINF_SUCCESS;
6950}
6951
6952#if 0
6953/**
6954 * @callback_method_impl{FNSSMDEVSAVEDONE}
6955 */
6956static DECLCALLBACK(int) e1kSaveDone(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
6957{
6958 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
6959
6960 /* If VM is being powered off unlocking will result in assertions in PGM */
6961 if (PDMDevHlpGetVM(pDevIns)->enmVMState == VMSTATE_RUNNING)
6962 pThis->fLocked = false;
6963 else
6964 E1kLog(("%s VM is not running -- remain locked\n", pThis->szPrf));
6965 E1kLog(("%s Unlocked\n", pThis->szPrf));
6966 return VINF_SUCCESS;
6967}
6968#endif
6969
6970/**
6971 * @callback_method_impl{FNSSMDEVLOADPREP,Synchronize.}
6972 */
6973static DECLCALLBACK(int) e1kLoadPrep(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
6974{
6975 RT_NOREF(pSSM);
6976 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
6977
6978 int rc = e1kCsEnter(pThis, VERR_SEM_BUSY);
6979 if (RT_UNLIKELY(rc != VINF_SUCCESS))
6980 return rc;
6981 e1kCsLeave(pThis);
6982 return VINF_SUCCESS;
6983}
6984
6985/**
6986 * @callback_method_impl{FNSSMDEVLOADEXEC}
6987 */
6988static DECLCALLBACK(int) e1kLoadExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
6989{
6990 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
6991 PE1KSTATECC pThisCC = PDMINS_2_DATA_CC(pDevIns, PE1KSTATECC);
6992 int rc;
6993
6994 if ( uVersion != E1K_SAVEDSTATE_VERSION
6995#ifdef E1K_WITH_TXD_CACHE
6996 && uVersion != E1K_SAVEDSTATE_VERSION_VBOX_42_VTAG
6997#endif /* E1K_WITH_TXD_CACHE */
6998 && uVersion != E1K_SAVEDSTATE_VERSION_VBOX_41
6999 && uVersion != E1K_SAVEDSTATE_VERSION_VBOX_30)
7000 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
7001
7002 if ( uVersion > E1K_SAVEDSTATE_VERSION_VBOX_30
7003 || uPass != SSM_PASS_FINAL)
7004 {
7005 /* config checks */
7006 RTMAC macConfigured;
7007 rc = SSMR3GetMem(pSSM, &macConfigured, sizeof(macConfigured));
7008 AssertRCReturn(rc, rc);
7009 if ( memcmp(&macConfigured, &pThis->macConfigured, sizeof(macConfigured))
7010 && (uPass == 0 || !PDMDevHlpVMTeleportedAndNotFullyResumedYet(pDevIns)) )
7011 LogRel(("%s: The mac address differs: config=%RTmac saved=%RTmac\n", pThis->szPrf, &pThis->macConfigured, &macConfigured));
7012
7013 E1KCHIP eChip;
7014 rc = SSMR3GetU32(pSSM, &eChip);
7015 AssertRCReturn(rc, rc);
7016 if (eChip != pThis->eChip)
7017 return SSMR3SetCfgError(pSSM, RT_SRC_POS, N_("The chip type differs: config=%u saved=%u"), pThis->eChip, eChip);
7018 }
7019
7020 if (uPass == SSM_PASS_FINAL)
7021 {
7022 if (uVersion > E1K_SAVEDSTATE_VERSION_VBOX_30)
7023 {
7024 rc = pThisCC->eeprom.load(pSSM);
7025 AssertRCReturn(rc, rc);
7026 }
7027 /* the state */
7028 SSMR3GetMem(pSSM, &pThis->auRegs, sizeof(pThis->auRegs));
7029 SSMR3GetBool(pSSM, &pThis->fIntRaised);
7030 /** @todo PHY could be made a separate device with its own versioning */
7031 Phy::loadState(pSSM, &pThis->phy);
7032 SSMR3GetU32(pSSM, &pThis->uSelectedReg);
7033 SSMR3GetMem(pSSM, &pThis->auMTA, sizeof(pThis->auMTA));
7034 SSMR3GetMem(pSSM, &pThis->aRecAddr, sizeof(pThis->aRecAddr));
7035 SSMR3GetMem(pSSM, &pThis->auVFTA, sizeof(pThis->auVFTA));
7036 SSMR3GetU64(pSSM, &pThis->u64AckedAt);
7037 SSMR3GetU16(pSSM, &pThis->u16RxBSize);
7038 //SSMR3GetBool(pSSM, pThis->fDelayInts);
7039 //SSMR3GetBool(pSSM, pThis->fIntMaskUsed);
7040 SSMR3GetU16(pSSM, &pThis->u16TxPktLen);
7041 if (pThis->u16TxPktLen > sizeof(pThis->aTxPacketFallback))
7042 pThis->u16TxPktLen = sizeof(pThis->aTxPacketFallback);
7043 SSMR3GetMem(pSSM, &pThis->aTxPacketFallback[0], pThis->u16TxPktLen);
7044 SSMR3GetBool(pSSM, &pThis->fIPcsum);
7045 SSMR3GetBool(pSSM, &pThis->fTCPcsum);
7046 SSMR3GetMem(pSSM, &pThis->contextTSE, sizeof(pThis->contextTSE));
7047 rc = SSMR3GetMem(pSSM, &pThis->contextNormal, sizeof(pThis->contextNormal));
7048 AssertRCReturn(rc, rc);
7049 if (uVersion > E1K_SAVEDSTATE_VERSION_VBOX_41)
7050 {
7051 SSMR3GetBool(pSSM, &pThis->fVTag);
7052 rc = SSMR3GetU16(pSSM, &pThis->u16VTagTCI);
7053 AssertRCReturn(rc, rc);
7054 }
7055 else
7056 {
7057 pThis->fVTag = false;
7058 pThis->u16VTagTCI = 0;
7059 }
7060#ifdef E1K_WITH_TXD_CACHE
7061 if (uVersion > E1K_SAVEDSTATE_VERSION_VBOX_42_VTAG)
7062 {
7063 rc = SSMR3GetU8(pSSM, &pThis->nTxDFetched);
7064 AssertRCReturn(rc, rc);
7065 if (pThis->nTxDFetched)
7066 SSMR3GetMem(pSSM, pThis->aTxDescriptors,
7067 pThis->nTxDFetched * sizeof(pThis->aTxDescriptors[0]));
7068 }
7069 else
7070 pThis->nTxDFetched = 0;
7071 /*
7072 * @todo: Perhaps we should not store TXD cache as the entries can be
7073 * simply fetched again from guest's memory. Or can't they?
7074 */
7075#endif /* E1K_WITH_TXD_CACHE */
7076#ifdef E1K_WITH_RXD_CACHE
7077 /*
7078 * There is no point in storing the RX descriptor cache in the saved
7079 * state, we just need to make sure it is empty.
7080 */
7081 pThis->iRxDCurrent = pThis->nRxDFetched = 0;
7082#endif /* E1K_WITH_RXD_CACHE */
7083 /* derived state */
7084 e1kSetupGsoCtx(&pThis->GsoCtx, &pThis->contextTSE);
7085
7086 E1kLog(("%s State has been restored\n", pThis->szPrf));
7087 e1kDumpState(pThis);
7088 }
7089 return VINF_SUCCESS;
7090}
7091
7092/**
7093 * @callback_method_impl{FNSSMDEVLOADDONE, Link status adjustments after loading.}
7094 */
7095static DECLCALLBACK(int) e1kLoadDone(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
7096{
7097 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
7098 PE1KSTATECC pThisCC = PDMINS_2_DATA_CC(pDevIns, PE1KSTATECC);
7099 RT_NOREF(pSSM);
7100
7101 /* Update promiscuous mode */
7102 if (pThisCC->pDrvR3)
7103 pThisCC->pDrvR3->pfnSetPromiscuousMode(pThisCC->pDrvR3, !!(RCTL & (RCTL_UPE | RCTL_MPE)));
7104
7105 /*
7106 * Force the link down here, since PDMNETWORKLINKSTATE_DOWN_RESUME is never
7107 * passed to us. We go through all this stuff if the link was up and we
7108 * wasn't teleported.
7109 */
7110 if ( (STATUS & STATUS_LU)
7111 && !PDMDevHlpVMTeleportedAndNotFullyResumedYet(pDevIns)
7112 && pThis->cMsLinkUpDelay)
7113 {
7114 e1kR3LinkDownTemp(pDevIns, pThis, pThisCC);
7115 }
7116 return VINF_SUCCESS;
7117}
7118
7119
7120
7121/* -=-=-=-=- Debug Info + Log Types -=-=-=-=- */
7122
7123/**
7124 * @callback_method_impl{FNRTSTRFORMATTYPE}
7125 */
7126static DECLCALLBACK(size_t) e1kFmtRxDesc(PFNRTSTROUTPUT pfnOutput,
7127 void *pvArgOutput,
7128 const char *pszType,
7129 void const *pvValue,
7130 int cchWidth,
7131 int cchPrecision,
7132 unsigned fFlags,
7133 void *pvUser)
7134{
7135 RT_NOREF(cchWidth, cchPrecision, fFlags, pvUser);
7136 AssertReturn(strcmp(pszType, "e1krxd") == 0, 0);
7137 E1KRXDESC* pDesc = (E1KRXDESC*)pvValue;
7138 if (!pDesc)
7139 return RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "NULL_RXD");
7140
7141 size_t cbPrintf = 0;
7142 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Address=%16LX Length=%04X Csum=%04X\n",
7143 pDesc->u64BufAddr, pDesc->u16Length, pDesc->u16Checksum);
7144 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, " STA: %s %s %s %s %s %s %s ERR: %s %s %s %s SPECIAL: %s VLAN=%03x PRI=%x",
7145 pDesc->status.fPIF ? "PIF" : "pif",
7146 pDesc->status.fIPCS ? "IPCS" : "ipcs",
7147 pDesc->status.fTCPCS ? "TCPCS" : "tcpcs",
7148 pDesc->status.fVP ? "VP" : "vp",
7149 pDesc->status.fIXSM ? "IXSM" : "ixsm",
7150 pDesc->status.fEOP ? "EOP" : "eop",
7151 pDesc->status.fDD ? "DD" : "dd",
7152 pDesc->status.fRXE ? "RXE" : "rxe",
7153 pDesc->status.fIPE ? "IPE" : "ipe",
7154 pDesc->status.fTCPE ? "TCPE" : "tcpe",
7155 pDesc->status.fCE ? "CE" : "ce",
7156 E1K_SPEC_CFI(pDesc->status.u16Special) ? "CFI" :"cfi",
7157 E1K_SPEC_VLAN(pDesc->status.u16Special),
7158 E1K_SPEC_PRI(pDesc->status.u16Special));
7159 return cbPrintf;
7160}
7161
7162/**
7163 * @callback_method_impl{FNRTSTRFORMATTYPE}
7164 */
7165static DECLCALLBACK(size_t) e1kFmtTxDesc(PFNRTSTROUTPUT pfnOutput,
7166 void *pvArgOutput,
7167 const char *pszType,
7168 void const *pvValue,
7169 int cchWidth,
7170 int cchPrecision,
7171 unsigned fFlags,
7172 void *pvUser)
7173{
7174 RT_NOREF(cchWidth, cchPrecision, fFlags, pvUser);
7175 AssertReturn(strcmp(pszType, "e1ktxd") == 0, 0);
7176 E1KTXDESC *pDesc = (E1KTXDESC*)pvValue;
7177 if (!pDesc)
7178 return RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "NULL_TXD");
7179
7180 size_t cbPrintf = 0;
7181 switch (e1kGetDescType(pDesc))
7182 {
7183 case E1K_DTYP_CONTEXT:
7184 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Type=Context\n"
7185 " IPCSS=%02X IPCSO=%02X IPCSE=%04X TUCSS=%02X TUCSO=%02X TUCSE=%04X\n"
7186 " TUCMD:%s%s%s %s %s PAYLEN=%04x HDRLEN=%04x MSS=%04x STA: %s",
7187 pDesc->context.ip.u8CSS, pDesc->context.ip.u8CSO, pDesc->context.ip.u16CSE,
7188 pDesc->context.tu.u8CSS, pDesc->context.tu.u8CSO, pDesc->context.tu.u16CSE,
7189 pDesc->context.dw2.fIDE ? " IDE":"",
7190 pDesc->context.dw2.fRS ? " RS" :"",
7191 pDesc->context.dw2.fTSE ? " TSE":"",
7192 pDesc->context.dw2.fIP ? "IPv4":"IPv6",
7193 pDesc->context.dw2.fTCP ? "TCP":"UDP",
7194 pDesc->context.dw2.u20PAYLEN,
7195 pDesc->context.dw3.u8HDRLEN,
7196 pDesc->context.dw3.u16MSS,
7197 pDesc->context.dw3.fDD?"DD":"");
7198 break;
7199 case E1K_DTYP_DATA:
7200 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Type=Data Address=%16LX DTALEN=%05X\n"
7201 " DCMD:%s%s%s%s%s%s%s STA:%s%s%s POPTS:%s%s SPECIAL:%s VLAN=%03x PRI=%x",
7202 pDesc->data.u64BufAddr,
7203 pDesc->data.cmd.u20DTALEN,
7204 pDesc->data.cmd.fIDE ? " IDE" :"",
7205 pDesc->data.cmd.fVLE ? " VLE" :"",
7206 pDesc->data.cmd.fRPS ? " RPS" :"",
7207 pDesc->data.cmd.fRS ? " RS" :"",
7208 pDesc->data.cmd.fTSE ? " TSE" :"",
7209 pDesc->data.cmd.fIFCS? " IFCS":"",
7210 pDesc->data.cmd.fEOP ? " EOP" :"",
7211 pDesc->data.dw3.fDD ? " DD" :"",
7212 pDesc->data.dw3.fEC ? " EC" :"",
7213 pDesc->data.dw3.fLC ? " LC" :"",
7214 pDesc->data.dw3.fTXSM? " TXSM":"",
7215 pDesc->data.dw3.fIXSM? " IXSM":"",
7216 E1K_SPEC_CFI(pDesc->data.dw3.u16Special) ? "CFI" :"cfi",
7217 E1K_SPEC_VLAN(pDesc->data.dw3.u16Special),
7218 E1K_SPEC_PRI(pDesc->data.dw3.u16Special));
7219 break;
7220 case E1K_DTYP_LEGACY:
7221 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Type=Legacy Address=%16LX DTALEN=%05X\n"
7222 " CMD:%s%s%s%s%s%s%s STA:%s%s%s CSO=%02x CSS=%02x SPECIAL:%s VLAN=%03x PRI=%x",
7223 pDesc->data.u64BufAddr,
7224 pDesc->legacy.cmd.u16Length,
7225 pDesc->legacy.cmd.fIDE ? " IDE" :"",
7226 pDesc->legacy.cmd.fVLE ? " VLE" :"",
7227 pDesc->legacy.cmd.fRPS ? " RPS" :"",
7228 pDesc->legacy.cmd.fRS ? " RS" :"",
7229 pDesc->legacy.cmd.fIC ? " IC" :"",
7230 pDesc->legacy.cmd.fIFCS? " IFCS":"",
7231 pDesc->legacy.cmd.fEOP ? " EOP" :"",
7232 pDesc->legacy.dw3.fDD ? " DD" :"",
7233 pDesc->legacy.dw3.fEC ? " EC" :"",
7234 pDesc->legacy.dw3.fLC ? " LC" :"",
7235 pDesc->legacy.cmd.u8CSO,
7236 pDesc->legacy.dw3.u8CSS,
7237 E1K_SPEC_CFI(pDesc->legacy.dw3.u16Special) ? "CFI" :"cfi",
7238 E1K_SPEC_VLAN(pDesc->legacy.dw3.u16Special),
7239 E1K_SPEC_PRI(pDesc->legacy.dw3.u16Special));
7240 break;
7241 default:
7242 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Invalid Transmit Descriptor");
7243 break;
7244 }
7245
7246 return cbPrintf;
7247}
7248
7249/** Initializes debug helpers (logging format types). */
7250static int e1kInitDebugHelpers(void)
7251{
7252 int rc = VINF_SUCCESS;
7253 static bool s_fHelpersRegistered = false;
7254 if (!s_fHelpersRegistered)
7255 {
7256 s_fHelpersRegistered = true;
7257 rc = RTStrFormatTypeRegister("e1krxd", e1kFmtRxDesc, NULL);
7258 AssertRCReturn(rc, rc);
7259 rc = RTStrFormatTypeRegister("e1ktxd", e1kFmtTxDesc, NULL);
7260 AssertRCReturn(rc, rc);
7261 }
7262 return rc;
7263}
7264
7265/**
7266 * Status info callback.
7267 *
7268 * @param pDevIns The device instance.
7269 * @param pHlp The output helpers.
7270 * @param pszArgs The arguments.
7271 */
7272static DECLCALLBACK(void) e1kInfo(PPDMDEVINS pDevIns, PCDBGFINFOHLP pHlp, const char *pszArgs)
7273{
7274 RT_NOREF(pszArgs);
7275 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
7276 unsigned i;
7277 // bool fRcvRing = false;
7278 // bool fXmtRing = false;
7279
7280 /*
7281 * Parse args.
7282 if (pszArgs)
7283 {
7284 fRcvRing = strstr(pszArgs, "verbose") || strstr(pszArgs, "rcv");
7285 fXmtRing = strstr(pszArgs, "verbose") || strstr(pszArgs, "xmt");
7286 }
7287 */
7288
7289 /*
7290 * Show info.
7291 */
7292 pHlp->pfnPrintf(pHlp, "E1000 #%d: port=%RTiop mmio=%RGp mac-cfg=%RTmac %s%s%s\n",
7293 pDevIns->iInstance, pThis->IOPortBase, pThis->addrMMReg,
7294 &pThis->macConfigured, g_aChips[pThis->eChip].pcszName,
7295 pDevIns->fRCEnabled ? " RC" : "", pDevIns->fR0Enabled ? " R0" : "");
7296
7297 e1kCsEnter(pThis, VERR_INTERNAL_ERROR); /* Not sure why but PCNet does it */
7298
7299 for (i = 0; i < E1K_NUM_OF_32BIT_REGS; ++i)
7300 pHlp->pfnPrintf(pHlp, "%8.8s = %08x\n", g_aE1kRegMap[i].abbrev, pThis->auRegs[i]);
7301
7302 for (i = 0; i < RT_ELEMENTS(pThis->aRecAddr.array); i++)
7303 {
7304 E1KRAELEM* ra = pThis->aRecAddr.array + i;
7305 if (ra->ctl & RA_CTL_AV)
7306 {
7307 const char *pcszTmp;
7308 switch (ra->ctl & RA_CTL_AS)
7309 {
7310 case 0: pcszTmp = "DST"; break;
7311 case 1: pcszTmp = "SRC"; break;
7312 default: pcszTmp = "reserved";
7313 }
7314 pHlp->pfnPrintf(pHlp, "RA%02d: %s %RTmac\n", i, pcszTmp, ra->addr);
7315 }
7316 }
7317 unsigned cDescs = RDLEN / sizeof(E1KRXDESC);
7318 uint32_t rdh = RDH;
7319 pHlp->pfnPrintf(pHlp, "\n-- Receive Descriptors (%d total) --\n", cDescs);
7320 for (i = 0; i < cDescs; ++i)
7321 {
7322 E1KRXDESC desc;
7323 PDMDevHlpPhysRead(pDevIns, e1kDescAddr(RDBAH, RDBAL, i),
7324 &desc, sizeof(desc));
7325 if (i == rdh)
7326 pHlp->pfnPrintf(pHlp, ">>> ");
7327 pHlp->pfnPrintf(pHlp, "%RGp: %R[e1krxd]\n", e1kDescAddr(RDBAH, RDBAL, i), &desc);
7328 }
7329#ifdef E1K_WITH_RXD_CACHE
7330 pHlp->pfnPrintf(pHlp, "\n-- Receive Descriptors in Cache (at %d (RDH %d)/ fetched %d / max %d) --\n",
7331 pThis->iRxDCurrent, RDH, pThis->nRxDFetched, E1K_RXD_CACHE_SIZE);
7332 if (rdh > pThis->iRxDCurrent)
7333 rdh -= pThis->iRxDCurrent;
7334 else
7335 rdh = cDescs + rdh - pThis->iRxDCurrent;
7336 for (i = 0; i < pThis->nRxDFetched; ++i)
7337 {
7338 if (i == pThis->iRxDCurrent)
7339 pHlp->pfnPrintf(pHlp, ">>> ");
7340 pHlp->pfnPrintf(pHlp, "%RGp: %R[e1krxd]\n",
7341 e1kDescAddr(RDBAH, RDBAL, rdh++ % cDescs),
7342 &pThis->aRxDescriptors[i]);
7343 }
7344#endif /* E1K_WITH_RXD_CACHE */
7345
7346 cDescs = TDLEN / sizeof(E1KTXDESC);
7347 uint32_t tdh = TDH;
7348 pHlp->pfnPrintf(pHlp, "\n-- Transmit Descriptors (%d total) --\n", cDescs);
7349 for (i = 0; i < cDescs; ++i)
7350 {
7351 E1KTXDESC desc;
7352 PDMDevHlpPhysRead(pDevIns, e1kDescAddr(TDBAH, TDBAL, i),
7353 &desc, sizeof(desc));
7354 if (i == tdh)
7355 pHlp->pfnPrintf(pHlp, ">>> ");
7356 pHlp->pfnPrintf(pHlp, "%RGp: %R[e1ktxd]\n", e1kDescAddr(TDBAH, TDBAL, i), &desc);
7357 }
7358#ifdef E1K_WITH_TXD_CACHE
7359 pHlp->pfnPrintf(pHlp, "\n-- Transmit Descriptors in Cache (at %d (TDH %d)/ fetched %d / max %d) --\n",
7360 pThis->iTxDCurrent, TDH, pThis->nTxDFetched, E1K_TXD_CACHE_SIZE);
7361 if (tdh > pThis->iTxDCurrent)
7362 tdh -= pThis->iTxDCurrent;
7363 else
7364 tdh = cDescs + tdh - pThis->iTxDCurrent;
7365 for (i = 0; i < pThis->nTxDFetched; ++i)
7366 {
7367 if (i == pThis->iTxDCurrent)
7368 pHlp->pfnPrintf(pHlp, ">>> ");
7369 pHlp->pfnPrintf(pHlp, "%RGp: %R[e1ktxd]\n",
7370 e1kDescAddr(TDBAH, TDBAL, tdh++ % cDescs),
7371 &pThis->aTxDescriptors[i]);
7372 }
7373#endif /* E1K_WITH_TXD_CACHE */
7374
7375
7376#ifdef E1K_INT_STATS
7377 pHlp->pfnPrintf(pHlp, "Interrupt attempts: %d\n", pThis->uStatIntTry);
7378 pHlp->pfnPrintf(pHlp, "Interrupts raised : %d\n", pThis->uStatInt);
7379 pHlp->pfnPrintf(pHlp, "Interrupts lowered: %d\n", pThis->uStatIntLower);
7380 pHlp->pfnPrintf(pHlp, "ICR outside ISR : %d\n", pThis->uStatNoIntICR);
7381 pHlp->pfnPrintf(pHlp, "IMS raised ints : %d\n", pThis->uStatIntIMS);
7382 pHlp->pfnPrintf(pHlp, "Interrupts skipped: %d\n", pThis->uStatIntSkip);
7383 pHlp->pfnPrintf(pHlp, "Masked interrupts : %d\n", pThis->uStatIntMasked);
7384 pHlp->pfnPrintf(pHlp, "Early interrupts : %d\n", pThis->uStatIntEarly);
7385 pHlp->pfnPrintf(pHlp, "Late interrupts : %d\n", pThis->uStatIntLate);
7386 pHlp->pfnPrintf(pHlp, "Lost interrupts : %d\n", pThis->iStatIntLost);
7387 pHlp->pfnPrintf(pHlp, "Interrupts by RX : %d\n", pThis->uStatIntRx);
7388 pHlp->pfnPrintf(pHlp, "Interrupts by TX : %d\n", pThis->uStatIntTx);
7389 pHlp->pfnPrintf(pHlp, "Interrupts by ICS : %d\n", pThis->uStatIntICS);
7390 pHlp->pfnPrintf(pHlp, "Interrupts by RDTR: %d\n", pThis->uStatIntRDTR);
7391 pHlp->pfnPrintf(pHlp, "Interrupts by RDMT: %d\n", pThis->uStatIntRXDMT0);
7392 pHlp->pfnPrintf(pHlp, "Interrupts by TXQE: %d\n", pThis->uStatIntTXQE);
7393 pHlp->pfnPrintf(pHlp, "TX int delay asked: %d\n", pThis->uStatTxIDE);
7394 pHlp->pfnPrintf(pHlp, "TX delayed: %d\n", pThis->uStatTxDelayed);
7395 pHlp->pfnPrintf(pHlp, "TX delayed expired: %d\n", pThis->uStatTxDelayExp);
7396 pHlp->pfnPrintf(pHlp, "TX no report asked: %d\n", pThis->uStatTxNoRS);
7397 pHlp->pfnPrintf(pHlp, "TX abs timer expd : %d\n", pThis->uStatTAD);
7398 pHlp->pfnPrintf(pHlp, "TX int timer expd : %d\n", pThis->uStatTID);
7399 pHlp->pfnPrintf(pHlp, "RX abs timer expd : %d\n", pThis->uStatRAD);
7400 pHlp->pfnPrintf(pHlp, "RX int timer expd : %d\n", pThis->uStatRID);
7401 pHlp->pfnPrintf(pHlp, "TX CTX descriptors: %d\n", pThis->uStatDescCtx);
7402 pHlp->pfnPrintf(pHlp, "TX DAT descriptors: %d\n", pThis->uStatDescDat);
7403 pHlp->pfnPrintf(pHlp, "TX LEG descriptors: %d\n", pThis->uStatDescLeg);
7404 pHlp->pfnPrintf(pHlp, "Received frames : %d\n", pThis->uStatRxFrm);
7405 pHlp->pfnPrintf(pHlp, "Transmitted frames: %d\n", pThis->uStatTxFrm);
7406 pHlp->pfnPrintf(pHlp, "TX frames up to 1514: %d\n", pThis->uStatTx1514);
7407 pHlp->pfnPrintf(pHlp, "TX frames up to 2962: %d\n", pThis->uStatTx2962);
7408 pHlp->pfnPrintf(pHlp, "TX frames up to 4410: %d\n", pThis->uStatTx4410);
7409 pHlp->pfnPrintf(pHlp, "TX frames up to 5858: %d\n", pThis->uStatTx5858);
7410 pHlp->pfnPrintf(pHlp, "TX frames up to 7306: %d\n", pThis->uStatTx7306);
7411 pHlp->pfnPrintf(pHlp, "TX frames up to 8754: %d\n", pThis->uStatTx8754);
7412 pHlp->pfnPrintf(pHlp, "TX frames up to 16384: %d\n", pThis->uStatTx16384);
7413 pHlp->pfnPrintf(pHlp, "TX frames up to 32768: %d\n", pThis->uStatTx32768);
7414 pHlp->pfnPrintf(pHlp, "Larger TX frames : %d\n", pThis->uStatTxLarge);
7415#endif /* E1K_INT_STATS */
7416
7417 e1kCsLeave(pThis);
7418}
7419
7420
7421
7422/* -=-=-=-=- PDMDEVREG -=-=-=-=- */
7423
7424/**
7425 * Detach notification.
7426 *
7427 * One port on the network card has been disconnected from the network.
7428 *
7429 * @param pDevIns The device instance.
7430 * @param iLUN The logical unit which is being detached.
7431 * @param fFlags Flags, combination of the PDMDEVATT_FLAGS_* \#defines.
7432 */
7433static DECLCALLBACK(void) e1kR3Detach(PPDMDEVINS pDevIns, unsigned iLUN, uint32_t fFlags)
7434{
7435 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
7436 PE1KSTATECC pThisCC = PDMINS_2_DATA_CC(pDevIns, PE1KSTATECC);
7437 Log(("%s e1kR3Detach:\n", pThis->szPrf));
7438 RT_NOREF(fFlags);
7439
7440 AssertLogRelReturnVoid(iLUN == 0);
7441
7442 PDMCritSectEnter(&pThis->cs, VERR_SEM_BUSY);
7443
7444 /** @todo r=pritesh still need to check if i missed
7445 * to clean something in this function
7446 */
7447
7448 /*
7449 * Zero some important members.
7450 */
7451 pThisCC->pDrvBase = NULL;
7452 pThisCC->pDrvR3 = NULL;
7453#if 0 /** @todo @bugref{9218} ring-0 driver stuff */
7454 pThisR0->pDrvR0 = NIL_RTR0PTR;
7455 pThisRC->pDrvRC = NIL_RTRCPTR;
7456#endif
7457
7458 PDMCritSectLeave(&pThis->cs);
7459}
7460
7461/**
7462 * Attach the Network attachment.
7463 *
7464 * One port on the network card has been connected to a network.
7465 *
7466 * @returns VBox status code.
7467 * @param pDevIns The device instance.
7468 * @param iLUN The logical unit which is being attached.
7469 * @param fFlags Flags, combination of the PDMDEVATT_FLAGS_* \#defines.
7470 *
7471 * @remarks This code path is not used during construction.
7472 */
7473static DECLCALLBACK(int) e1kR3Attach(PPDMDEVINS pDevIns, unsigned iLUN, uint32_t fFlags)
7474{
7475 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
7476 PE1KSTATECC pThisCC = PDMINS_2_DATA_CC(pDevIns, PE1KSTATECC);
7477 LogFlow(("%s e1kR3Attach:\n", pThis->szPrf));
7478 RT_NOREF(fFlags);
7479
7480 AssertLogRelReturn(iLUN == 0, VERR_PDM_NO_SUCH_LUN);
7481
7482 PDMCritSectEnter(&pThis->cs, VERR_SEM_BUSY);
7483
7484 /*
7485 * Attach the driver.
7486 */
7487 int rc = PDMDevHlpDriverAttach(pDevIns, 0, &pThisCC->IBase, &pThisCC->pDrvBase, "Network Port");
7488 if (RT_SUCCESS(rc))
7489 {
7490 if (rc == VINF_NAT_DNS)
7491 {
7492#ifdef RT_OS_LINUX
7493 PDMDevHlpVMSetRuntimeError(pDevIns, 0 /*fFlags*/, "NoDNSforNAT",
7494 N_("A Domain Name Server (DNS) for NAT networking could not be determined. Please check your /etc/resolv.conf for <tt>nameserver</tt> entries. Either add one manually (<i>man resolv.conf</i>) or ensure that your host is correctly connected to an ISP. If you ignore this warning the guest will not be able to perform nameserver lookups and it will probably observe delays if trying so"));
7495#else
7496 PDMDevHlpVMSetRuntimeError(pDevIns, 0 /*fFlags*/, "NoDNSforNAT",
7497 N_("A Domain Name Server (DNS) for NAT networking could not be determined. Ensure that your host is correctly connected to an ISP. If you ignore this warning the guest will not be able to perform nameserver lookups and it will probably observe delays if trying so"));
7498#endif
7499 }
7500 pThisCC->pDrvR3 = PDMIBASE_QUERY_INTERFACE(pThisCC->pDrvBase, PDMINETWORKUP);
7501 AssertMsgStmt(pThisCC->pDrvR3, ("Failed to obtain the PDMINETWORKUP interface!\n"),
7502 rc = VERR_PDM_MISSING_INTERFACE_BELOW);
7503 if (RT_SUCCESS(rc))
7504 {
7505#if 0 /** @todo @bugref{9218} ring-0 driver stuff */
7506 pThisR0->pDrvR0 = PDMIBASER0_QUERY_INTERFACE(PDMIBASE_QUERY_INTERFACE(pThisCC->pDrvBase, PDMIBASER0), PDMINETWORKUP);
7507 pThisRC->pDrvRC = PDMIBASERC_QUERY_INTERFACE(PDMIBASE_QUERY_INTERFACE(pThisCC->pDrvBase, PDMIBASERC), PDMINETWORKUP);
7508#endif
7509 }
7510 }
7511 else if ( rc == VERR_PDM_NO_ATTACHED_DRIVER
7512 || rc == VERR_PDM_CFG_MISSING_DRIVER_NAME)
7513 {
7514 /* This should never happen because this function is not called
7515 * if there is no driver to attach! */
7516 Log(("%s No attached driver!\n", pThis->szPrf));
7517 }
7518
7519 /*
7520 * Temporary set the link down if it was up so that the guest will know
7521 * that we have change the configuration of the network card
7522 */
7523 if ((STATUS & STATUS_LU) && RT_SUCCESS(rc))
7524 e1kR3LinkDownTemp(pDevIns, pThis, pThisCC);
7525
7526 PDMCritSectLeave(&pThis->cs);
7527 return rc;
7528
7529}
7530
7531/**
7532 * @copydoc FNPDMDEVPOWEROFF
7533 */
7534static DECLCALLBACK(void) e1kR3PowerOff(PPDMDEVINS pDevIns)
7535{
7536 /* Poke thread waiting for buffer space. */
7537 e1kWakeupReceive(pDevIns, PDMINS_2_DATA(pDevIns, PE1KSTATE));
7538}
7539
7540/**
7541 * @copydoc FNPDMDEVRESET
7542 */
7543static DECLCALLBACK(void) e1kR3Reset(PPDMDEVINS pDevIns)
7544{
7545 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
7546 PE1KSTATECC pThisCC = PDMINS_2_DATA_CC(pDevIns, PE1KSTATECC);
7547#ifdef E1K_TX_DELAY
7548 e1kCancelTimer(pDevIns, pThis, pThis->hTXDTimer);
7549#endif /* E1K_TX_DELAY */
7550 e1kCancelTimer(pDevIns, pThis, pThis->hIntTimer);
7551 e1kCancelTimer(pDevIns, pThis, pThis->hLUTimer);
7552 e1kXmitFreeBuf(pThis, pThisCC);
7553 pThis->u16TxPktLen = 0;
7554 pThis->fIPcsum = false;
7555 pThis->fTCPcsum = false;
7556 pThis->fIntMaskUsed = false;
7557 pThis->fDelayInts = false;
7558 pThis->fLocked = false;
7559 pThis->u64AckedAt = 0;
7560 e1kR3HardReset(pDevIns, pThis, pThisCC);
7561}
7562
7563/**
7564 * @copydoc FNPDMDEVSUSPEND
7565 */
7566static DECLCALLBACK(void) e1kR3Suspend(PPDMDEVINS pDevIns)
7567{
7568 /* Poke thread waiting for buffer space. */
7569 e1kWakeupReceive(pDevIns, PDMINS_2_DATA(pDevIns, PE1KSTATE));
7570}
7571
7572/**
7573 * Device relocation callback.
7574 *
7575 * When this callback is called the device instance data, and if the
7576 * device have a GC component, is being relocated, or/and the selectors
7577 * have been changed. The device must use the chance to perform the
7578 * necessary pointer relocations and data updates.
7579 *
7580 * Before the GC code is executed the first time, this function will be
7581 * called with a 0 delta so GC pointer calculations can be one in one place.
7582 *
7583 * @param pDevIns Pointer to the device instance.
7584 * @param offDelta The relocation delta relative to the old location.
7585 *
7586 * @remark A relocation CANNOT fail.
7587 */
7588static DECLCALLBACK(void) e1kR3Relocate(PPDMDEVINS pDevIns, RTGCINTPTR offDelta)
7589{
7590 PE1KSTATERC pThisRC = PDMINS_2_DATA_RC(pDevIns, PE1KSTATERC);
7591 if (pThisRC)
7592 pThisRC->pDevInsRC = PDMDEVINS_2_RCPTR(pDevIns);
7593 RT_NOREF(offDelta);
7594}
7595
7596/**
7597 * Destruct a device instance.
7598 *
7599 * We need to free non-VM resources only.
7600 *
7601 * @returns VBox status code.
7602 * @param pDevIns The device instance data.
7603 * @thread EMT
7604 */
7605static DECLCALLBACK(int) e1kR3Destruct(PPDMDEVINS pDevIns)
7606{
7607 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
7608 PDMDEV_CHECK_VERSIONS_RETURN_QUIET(pDevIns);
7609
7610 e1kDumpState(pThis);
7611 E1kLog(("%s Destroying instance\n", pThis->szPrf));
7612 if (PDMCritSectIsInitialized(&pThis->cs))
7613 {
7614 if (pThis->hEventMoreRxDescAvail != NIL_SUPSEMEVENT)
7615 {
7616 PDMDevHlpSUPSemEventSignal(pDevIns, pThis->hEventMoreRxDescAvail);
7617 RTThreadYield();
7618 PDMDevHlpSUPSemEventClose(pDevIns, pThis->hEventMoreRxDescAvail);
7619 pThis->hEventMoreRxDescAvail = NIL_SUPSEMEVENT;
7620 }
7621#ifdef E1K_WITH_TX_CS
7622 PDMR3CritSectDelete(&pThis->csTx);
7623#endif /* E1K_WITH_TX_CS */
7624 PDMR3CritSectDelete(&pThis->csRx);
7625 PDMR3CritSectDelete(&pThis->cs);
7626 }
7627 return VINF_SUCCESS;
7628}
7629
7630
7631/**
7632 * Set PCI configuration space registers.
7633 *
7634 * @param pci Reference to PCI device structure.
7635 * @thread EMT
7636 */
7637static void e1kR3ConfigurePciDev(PPDMPCIDEV pPciDev, E1KCHIP eChip)
7638{
7639 Assert(eChip < RT_ELEMENTS(g_aChips));
7640 /* Configure PCI Device, assume 32-bit mode ******************************/
7641 PDMPciDevSetVendorId(pPciDev, g_aChips[eChip].uPCIVendorId);
7642 PDMPciDevSetDeviceId(pPciDev, g_aChips[eChip].uPCIDeviceId);
7643 PDMPciDevSetWord( pPciDev, VBOX_PCI_SUBSYSTEM_VENDOR_ID, g_aChips[eChip].uPCISubsystemVendorId);
7644 PDMPciDevSetWord( pPciDev, VBOX_PCI_SUBSYSTEM_ID, g_aChips[eChip].uPCISubsystemId);
7645
7646 PDMPciDevSetWord( pPciDev, VBOX_PCI_COMMAND, 0x0000);
7647 /* DEVSEL Timing (medium device), 66 MHz Capable, New capabilities */
7648 PDMPciDevSetWord( pPciDev, VBOX_PCI_STATUS,
7649 VBOX_PCI_STATUS_DEVSEL_MEDIUM | VBOX_PCI_STATUS_CAP_LIST | VBOX_PCI_STATUS_66MHZ);
7650 /* Stepping A2 */
7651 PDMPciDevSetByte( pPciDev, VBOX_PCI_REVISION_ID, 0x02);
7652 /* Ethernet adapter */
7653 PDMPciDevSetByte( pPciDev, VBOX_PCI_CLASS_PROG, 0x00);
7654 PDMPciDevSetWord( pPciDev, VBOX_PCI_CLASS_DEVICE, 0x0200);
7655 /* normal single function Ethernet controller */
7656 PDMPciDevSetByte( pPciDev, VBOX_PCI_HEADER_TYPE, 0x00);
7657 /* Memory Register Base Address */
7658 PDMPciDevSetDWord(pPciDev, VBOX_PCI_BASE_ADDRESS_0, 0x00000000);
7659 /* Memory Flash Base Address */
7660 PDMPciDevSetDWord(pPciDev, VBOX_PCI_BASE_ADDRESS_1, 0x00000000);
7661 /* IO Register Base Address */
7662 PDMPciDevSetDWord(pPciDev, VBOX_PCI_BASE_ADDRESS_2, 0x00000001);
7663 /* Expansion ROM Base Address */
7664 PDMPciDevSetDWord(pPciDev, VBOX_PCI_ROM_ADDRESS, 0x00000000);
7665 /* Capabilities Pointer */
7666 PDMPciDevSetByte( pPciDev, VBOX_PCI_CAPABILITY_LIST, 0xDC);
7667 /* Interrupt Pin: INTA# */
7668 PDMPciDevSetByte( pPciDev, VBOX_PCI_INTERRUPT_PIN, 0x01);
7669 /* Max_Lat/Min_Gnt: very high priority and time slice */
7670 PDMPciDevSetByte( pPciDev, VBOX_PCI_MIN_GNT, 0xFF);
7671 PDMPciDevSetByte( pPciDev, VBOX_PCI_MAX_LAT, 0x00);
7672
7673 /* PCI Power Management Registers ****************************************/
7674 /* Capability ID: PCI Power Management Registers */
7675 PDMPciDevSetByte( pPciDev, 0xDC, VBOX_PCI_CAP_ID_PM);
7676 /* Next Item Pointer: PCI-X */
7677 PDMPciDevSetByte( pPciDev, 0xDC + 1, 0xE4);
7678 /* Power Management Capabilities: PM disabled, DSI */
7679 PDMPciDevSetWord( pPciDev, 0xDC + 2,
7680 0x0002 | VBOX_PCI_PM_CAP_DSI);
7681 /* Power Management Control / Status Register: PM disabled */
7682 PDMPciDevSetWord( pPciDev, 0xDC + 4, 0x0000);
7683 /* PMCSR_BSE Bridge Support Extensions: Not supported */
7684 PDMPciDevSetByte( pPciDev, 0xDC + 6, 0x00);
7685 /* Data Register: PM disabled, always 0 */
7686 PDMPciDevSetByte( pPciDev, 0xDC + 7, 0x00);
7687
7688 /* PCI-X Configuration Registers *****************************************/
7689 /* Capability ID: PCI-X Configuration Registers */
7690 PDMPciDevSetByte( pPciDev, 0xE4, VBOX_PCI_CAP_ID_PCIX);
7691#ifdef E1K_WITH_MSI
7692 PDMPciDevSetByte( pPciDev, 0xE4 + 1, 0x80);
7693#else
7694 /* Next Item Pointer: None (Message Signalled Interrupts are disabled) */
7695 PDMPciDevSetByte( pPciDev, 0xE4 + 1, 0x00);
7696#endif
7697 /* PCI-X Command: Enable Relaxed Ordering */
7698 PDMPciDevSetWord( pPciDev, 0xE4 + 2, VBOX_PCI_X_CMD_ERO);
7699 /* PCI-X Status: 32-bit, 66MHz*/
7700 /** @todo is this value really correct? fff8 doesn't look like actual PCI address */
7701 PDMPciDevSetDWord(pPciDev, 0xE4 + 4, 0x0040FFF8);
7702}
7703
7704/**
7705 * @interface_method_impl{PDMDEVREG,pfnConstruct}
7706 */
7707static DECLCALLBACK(int) e1kR3Construct(PPDMDEVINS pDevIns, int iInstance, PCFGMNODE pCfg)
7708{
7709 PDMDEV_CHECK_VERSIONS_RETURN(pDevIns);
7710 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
7711 PE1KSTATECC pThisCC = PDMINS_2_DATA_CC(pDevIns, PE1KSTATECC);
7712 int rc;
7713
7714 /*
7715 * Initialize the instance data (state).
7716 * Note! Caller has initialized it to ZERO already.
7717 */
7718 RTStrPrintf(pThis->szPrf, sizeof(pThis->szPrf), "E1000#%d", iInstance);
7719 E1kLog(("%s Constructing new instance sizeof(E1KRXDESC)=%d\n", pThis->szPrf, sizeof(E1KRXDESC)));
7720 pThis->hEventMoreRxDescAvail = NIL_SUPSEMEVENT;
7721 pThis->u16TxPktLen = 0;
7722 pThis->fIPcsum = false;
7723 pThis->fTCPcsum = false;
7724 pThis->fIntMaskUsed = false;
7725 pThis->fDelayInts = false;
7726 pThis->fLocked = false;
7727 pThis->u64AckedAt = 0;
7728 pThis->led.u32Magic = PDMLED_MAGIC;
7729 pThis->u32PktNo = 1;
7730
7731 pThisCC->pDevInsR3 = pDevIns;
7732 pThisCC->pShared = pThis;
7733
7734 /* Interfaces */
7735 pThisCC->IBase.pfnQueryInterface = e1kR3QueryInterface;
7736
7737 pThisCC->INetworkDown.pfnWaitReceiveAvail = e1kR3NetworkDown_WaitReceiveAvail;
7738 pThisCC->INetworkDown.pfnReceive = e1kR3NetworkDown_Receive;
7739 pThisCC->INetworkDown.pfnXmitPending = e1kR3NetworkDown_XmitPending;
7740
7741 pThisCC->ILeds.pfnQueryStatusLed = e1kR3QueryStatusLed;
7742
7743 pThisCC->INetworkConfig.pfnGetMac = e1kR3GetMac;
7744 pThisCC->INetworkConfig.pfnGetLinkState = e1kR3GetLinkState;
7745 pThisCC->INetworkConfig.pfnSetLinkState = e1kR3SetLinkState;
7746
7747 /*
7748 * Internal validations.
7749 */
7750 for (uint32_t iReg = 1; iReg < E1K_NUM_OF_BINARY_SEARCHABLE; iReg++)
7751 AssertLogRelMsgReturn( g_aE1kRegMap[iReg].offset > g_aE1kRegMap[iReg - 1].offset
7752 && g_aE1kRegMap[iReg].offset + g_aE1kRegMap[iReg].size
7753 >= g_aE1kRegMap[iReg - 1].offset + g_aE1kRegMap[iReg - 1].size,
7754 ("%s@%#xLB%#x vs %s@%#xLB%#x\n",
7755 g_aE1kRegMap[iReg].abbrev, g_aE1kRegMap[iReg].offset, g_aE1kRegMap[iReg].size,
7756 g_aE1kRegMap[iReg - 1].abbrev, g_aE1kRegMap[iReg - 1].offset, g_aE1kRegMap[iReg - 1].size),
7757 VERR_INTERNAL_ERROR_4);
7758
7759 /*
7760 * Validate configuration.
7761 */
7762 PDMDEV_VALIDATE_CONFIG_RETURN(pDevIns,
7763 "MAC|"
7764 "CableConnected|"
7765 "AdapterType|"
7766 "LineSpeed|"
7767 "ItrEnabled|"
7768 "ItrRxEnabled|"
7769 "EthernetCRC|"
7770 "GSOEnabled|"
7771 "LinkUpDelay", "");
7772
7773 /** @todo LineSpeed unused! */
7774
7775 /*
7776 * Get config params
7777 */
7778 rc = CFGMR3QueryBytes(pCfg, "MAC", pThis->macConfigured.au8, sizeof(pThis->macConfigured.au8));
7779 if (RT_FAILURE(rc))
7780 return PDMDEV_SET_ERROR(pDevIns, rc,
7781 N_("Configuration error: Failed to get MAC address"));
7782 rc = CFGMR3QueryBool(pCfg, "CableConnected", &pThis->fCableConnected);
7783 if (RT_FAILURE(rc))
7784 return PDMDEV_SET_ERROR(pDevIns, rc,
7785 N_("Configuration error: Failed to get the value of 'CableConnected'"));
7786 rc = CFGMR3QueryU32(pCfg, "AdapterType", (uint32_t*)&pThis->eChip);
7787 if (RT_FAILURE(rc))
7788 return PDMDEV_SET_ERROR(pDevIns, rc,
7789 N_("Configuration error: Failed to get the value of 'AdapterType'"));
7790 Assert(pThis->eChip <= E1K_CHIP_82545EM);
7791
7792 rc = CFGMR3QueryBoolDef(pCfg, "EthernetCRC", &pThis->fEthernetCRC, true);
7793 if (RT_FAILURE(rc))
7794 return PDMDEV_SET_ERROR(pDevIns, rc,
7795 N_("Configuration error: Failed to get the value of 'EthernetCRC'"));
7796
7797 rc = CFGMR3QueryBoolDef(pCfg, "GSOEnabled", &pThis->fGSOEnabled, true);
7798 if (RT_FAILURE(rc))
7799 return PDMDEV_SET_ERROR(pDevIns, rc,
7800 N_("Configuration error: Failed to get the value of 'GSOEnabled'"));
7801
7802 rc = CFGMR3QueryBoolDef(pCfg, "ItrEnabled", &pThis->fItrEnabled, false);
7803 if (RT_FAILURE(rc))
7804 return PDMDEV_SET_ERROR(pDevIns, rc,
7805 N_("Configuration error: Failed to get the value of 'ItrEnabled'"));
7806
7807 rc = CFGMR3QueryBoolDef(pCfg, "ItrRxEnabled", &pThis->fItrRxEnabled, true);
7808 if (RT_FAILURE(rc))
7809 return PDMDEV_SET_ERROR(pDevIns, rc,
7810 N_("Configuration error: Failed to get the value of 'ItrRxEnabled'"));
7811
7812 rc = CFGMR3QueryBoolDef(pCfg, "TidEnabled", &pThis->fTidEnabled, false);
7813 if (RT_FAILURE(rc))
7814 return PDMDEV_SET_ERROR(pDevIns, rc,
7815 N_("Configuration error: Failed to get the value of 'TidEnabled'"));
7816
7817 rc = CFGMR3QueryU32Def(pCfg, "LinkUpDelay", (uint32_t*)&pThis->cMsLinkUpDelay, 3000); /* ms */
7818 if (RT_FAILURE(rc))
7819 return PDMDEV_SET_ERROR(pDevIns, rc,
7820 N_("Configuration error: Failed to get the value of 'LinkUpDelay'"));
7821 Assert(pThis->cMsLinkUpDelay <= 300000); /* less than 5 minutes */
7822 if (pThis->cMsLinkUpDelay > 5000)
7823 LogRel(("%s: WARNING! Link up delay is set to %u seconds!\n", pThis->szPrf, pThis->cMsLinkUpDelay / 1000));
7824 else if (pThis->cMsLinkUpDelay == 0)
7825 LogRel(("%s: WARNING! Link up delay is disabled!\n", pThis->szPrf));
7826
7827 LogRel(("%s: Chip=%s LinkUpDelay=%ums EthernetCRC=%s GSO=%s Itr=%s ItrRx=%s TID=%s R0=%s RC=%s\n", pThis->szPrf,
7828 g_aChips[pThis->eChip].pcszName, pThis->cMsLinkUpDelay,
7829 pThis->fEthernetCRC ? "on" : "off",
7830 pThis->fGSOEnabled ? "enabled" : "disabled",
7831 pThis->fItrEnabled ? "enabled" : "disabled",
7832 pThis->fItrRxEnabled ? "enabled" : "disabled",
7833 pThis->fTidEnabled ? "enabled" : "disabled",
7834 pDevIns->fR0Enabled ? "enabled" : "disabled",
7835 pDevIns->fRCEnabled ? "enabled" : "disabled"));
7836
7837 /*
7838 * Initialize sub-components and register everything with the VMM.
7839 */
7840
7841 /* Initialize the EEPROM. */
7842 pThisCC->eeprom.init(pThis->macConfigured);
7843
7844 /* Initialize internal PHY. */
7845 Phy::init(&pThis->phy, iInstance, pThis->eChip == E1K_CHIP_82543GC ? PHY_EPID_M881000 : PHY_EPID_M881011);
7846
7847 /* Initialize critical sections. We do our own locking. */
7848 rc = PDMDevHlpSetDeviceCritSect(pDevIns, PDMDevHlpCritSectGetNop(pDevIns));
7849 AssertRCReturn(rc, rc);
7850
7851 rc = PDMDevHlpCritSectInit(pDevIns, &pThis->cs, RT_SRC_POS, "E1000#%d", iInstance);
7852 AssertRCReturn(rc, rc);
7853 rc = PDMDevHlpCritSectInit(pDevIns, &pThis->csRx, RT_SRC_POS, "E1000#%dRX", iInstance);
7854 AssertRCReturn(rc, rc);
7855#ifdef E1K_WITH_TX_CS
7856 rc = PDMDevHlpCritSectInit(pDevIns, &pThis->csTx, RT_SRC_POS, "E1000#%dTX", iInstance);
7857 AssertRCReturn(rc, rc);
7858#endif
7859
7860 /* Saved state registration. */
7861 rc = PDMDevHlpSSMRegisterEx(pDevIns, E1K_SAVEDSTATE_VERSION, sizeof(E1KSTATE), NULL,
7862 NULL, e1kLiveExec, NULL,
7863 e1kSavePrep, e1kSaveExec, NULL,
7864 e1kLoadPrep, e1kLoadExec, e1kLoadDone);
7865 AssertRCReturn(rc, rc);
7866
7867 /* Set PCI config registers and register ourselves with the PCI bus. */
7868 PDMPCIDEV_ASSERT_VALID(pDevIns, pDevIns->apPciDevs[0]);
7869 e1kR3ConfigurePciDev(pDevIns->apPciDevs[0], pThis->eChip);
7870 rc = PDMDevHlpPCIRegister(pDevIns, pDevIns->apPciDevs[0]);
7871 AssertRCReturn(rc, rc);
7872
7873#ifdef E1K_WITH_MSI
7874 PDMMSIREG MsiReg;
7875 RT_ZERO(MsiReg);
7876 MsiReg.cMsiVectors = 1;
7877 MsiReg.iMsiCapOffset = 0x80;
7878 MsiReg.iMsiNextOffset = 0x0;
7879 MsiReg.fMsi64bit = false;
7880 rc = PDMDevHlpPCIRegisterMsi(pDevIns, &MsiReg);
7881 AssertRCReturn(rc, rc);
7882#endif
7883
7884 /*
7885 * Map our registers to memory space (region 0, see e1kR3ConfigurePciDev)
7886 * From the spec (regarding flags):
7887 * For registers that should be accessed as 32-bit double words,
7888 * partial writes (less than a 32-bit double word) is ignored.
7889 * Partial reads return all 32 bits of data regardless of the
7890 * byte enables.
7891 */
7892 rc = PDMDevHlpMmioCreateEx(pDevIns, E1K_MM_SIZE, IOMMMIO_FLAGS_READ_DWORD | IOMMMIO_FLAGS_WRITE_ONLY_DWORD,
7893 pDevIns->apPciDevs[0], 0 /*iPciRegion*/,
7894 e1kMMIOWrite, e1kMMIORead, NULL /*pfnFill*/, NULL /*pvUser*/, "E1000", &pThis->hMmioRegion);
7895 AssertRCReturn(rc, rc);
7896 rc = PDMDevHlpPCIIORegionRegisterMmio(pDevIns, 0, E1K_MM_SIZE, PCI_ADDRESS_SPACE_MEM, pThis->hMmioRegion, e1kR3Map);
7897 AssertRCReturn(rc, rc);
7898
7899 /* Map our registers to IO space (region 2, see e1kR3ConfigurePciDev) */
7900 static IOMIOPORTDESC const s_aExtDescs[] =
7901 {
7902 { "IOADDR", "IOADDR", NULL, NULL }, { "unused", "unused", NULL, NULL }, { "unused", "unused", NULL, NULL }, { "unused", "unused", NULL, NULL },
7903 { "IODATA", "IODATA", NULL, NULL }, { "unused", "unused", NULL, NULL }, { "unused", "unused", NULL, NULL }, { "unused", "unused", NULL, NULL },
7904 { NULL, NULL, NULL, NULL }
7905 };
7906 rc = PDMDevHlpIoPortCreate(pDevIns, E1K_IOPORT_SIZE, pDevIns->apPciDevs[0], 2 /*iPciRegion*/,
7907 e1kIOPortOut, e1kIOPortIn, NULL /*pvUser*/, "E1000", s_aExtDescs, &pThis->hIoPorts);
7908 AssertRCReturn(rc, rc);
7909 rc = PDMDevHlpPCIIORegionRegisterIo(pDevIns, 2, E1K_IOPORT_SIZE, pThis->hIoPorts, e1kR3Map);
7910 AssertRCReturn(rc, rc);
7911
7912 /* Create transmit queue */
7913 rc = PDMDevHlpTaskCreate(pDevIns, PDMTASK_F_RZ, "E1000-Xmit", e1kR3TxTaskCallback, NULL, &pThis->hTxTask);
7914 AssertRCReturn(rc, rc);
7915
7916#ifdef E1K_TX_DELAY
7917 /* Create Transmit Delay Timer */
7918 rc = PDMDevHlpTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kR3TxDelayTimer, pThis, TMTIMER_FLAGS_NO_CRIT_SECT,
7919 "E1000 Transmit Delay Timer", &pThis->hTXDTimer);
7920 AssertRCReturn(rc, rc);
7921 rc = PDMDevHlpTimerSetCritSect(pDevIns, pThis->hTXDTimer, &pThis->csTx);
7922 AssertRCReturn(rc, rc);
7923#endif /* E1K_TX_DELAY */
7924
7925//#ifdef E1K_USE_TX_TIMERS
7926 if (pThis->fTidEnabled)
7927 {
7928 /* Create Transmit Interrupt Delay Timer */
7929 rc = PDMDevHlpTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kR3TxIntDelayTimer, pThis, TMTIMER_FLAGS_NO_CRIT_SECT,
7930 "E1000 Transmit Interrupt Delay Timer", &pThis->hTIDTimer);
7931 AssertRCReturn(rc, rc);
7932
7933# ifndef E1K_NO_TAD
7934 /* Create Transmit Absolute Delay Timer */
7935 rc = PDMDevHlpTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kR3TxAbsDelayTimer, pThis, TMTIMER_FLAGS_NO_CRIT_SECT,
7936 "E1000 Transmit Absolute Delay Timer", &pThis->hTADTimer);
7937 AssertRCReturn(rc, rc);
7938# endif /* E1K_NO_TAD */
7939 }
7940//#endif /* E1K_USE_TX_TIMERS */
7941
7942#ifdef E1K_USE_RX_TIMERS
7943 /* Create Receive Interrupt Delay Timer */
7944 rc = PDMDevHlpTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kR3RxIntDelayTimer, pThis, TMTIMER_FLAGS_NO_CRIT_SECT,
7945 "E1000 Receive Interrupt Delay Timer", &pThis->hRIDTimer);
7946 AssertRCReturn(rc, rc);
7947
7948 /* Create Receive Absolute Delay Timer */
7949 rc = PDMDevHlpTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kR3RxAbsDelayTimer, pThis, TMTIMER_FLAGS_NO_CRIT_SECT,
7950 "E1000 Receive Absolute Delay Timer", &pThis->hRADTimer);
7951 AssertRCReturn(rc, rc);
7952#endif /* E1K_USE_RX_TIMERS */
7953
7954 /* Create Late Interrupt Timer */
7955 rc = PDMDevHlpTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kR3LateIntTimer, pThis, TMTIMER_FLAGS_NO_CRIT_SECT,
7956 "E1000 Late Interrupt Timer", &pThis->hIntTimer);
7957 AssertRCReturn(rc, rc);
7958
7959 /* Create Link Up Timer */
7960 rc = PDMDevHlpTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kR3LinkUpTimer, pThis, TMTIMER_FLAGS_NO_CRIT_SECT,
7961 "E1000 Link Up Timer", &pThis->hLUTimer);
7962 AssertRCReturn(rc, rc);
7963
7964 /* Register the info item */
7965 char szTmp[20];
7966 RTStrPrintf(szTmp, sizeof(szTmp), "e1k%d", iInstance);
7967 PDMDevHlpDBGFInfoRegister(pDevIns, szTmp, "E1000 info.", e1kInfo);
7968
7969 /* Status driver */
7970 PPDMIBASE pBase;
7971 rc = PDMDevHlpDriverAttach(pDevIns, PDM_STATUS_LUN, &pThisCC->IBase, &pBase, "Status Port");
7972 if (RT_FAILURE(rc))
7973 return PDMDEV_SET_ERROR(pDevIns, rc, N_("Failed to attach the status LUN"));
7974 pThisCC->pLedsConnector = PDMIBASE_QUERY_INTERFACE(pBase, PDMILEDCONNECTORS);
7975
7976 /* Network driver */
7977 rc = PDMDevHlpDriverAttach(pDevIns, 0, &pThisCC->IBase, &pThisCC->pDrvBase, "Network Port");
7978 if (RT_SUCCESS(rc))
7979 {
7980 if (rc == VINF_NAT_DNS)
7981 PDMDevHlpVMSetRuntimeError(pDevIns, 0 /*fFlags*/, "NoDNSforNAT",
7982 N_("A Domain Name Server (DNS) for NAT networking could not be determined. Ensure that your host is correctly connected to an ISP. If you ignore this warning the guest will not be able to perform nameserver lookups and it will probably observe delays if trying so"));
7983 pThisCC->pDrvR3 = PDMIBASE_QUERY_INTERFACE(pThisCC->pDrvBase, PDMINETWORKUP);
7984 AssertMsgReturn(pThisCC->pDrvR3, ("Failed to obtain the PDMINETWORKUP interface!\n"), VERR_PDM_MISSING_INTERFACE_BELOW);
7985
7986#if 0 /** @todo @bugref{9218} ring-0 driver stuff */
7987 pThisR0->pDrvR0 = PDMIBASER0_QUERY_INTERFACE(PDMIBASE_QUERY_INTERFACE(pThisCC->pDrvBase, PDMIBASER0), PDMINETWORKUP);
7988 pThisRC->pDrvRC = PDMIBASERC_QUERY_INTERFACE(PDMIBASE_QUERY_INTERFACE(pThisCC->pDrvBase, PDMIBASERC), PDMINETWORKUP);
7989#endif
7990 }
7991 else if ( rc == VERR_PDM_NO_ATTACHED_DRIVER
7992 || rc == VERR_PDM_CFG_MISSING_DRIVER_NAME)
7993 {
7994 /* No error! */
7995 E1kLog(("%s This adapter is not attached to any network!\n", pThis->szPrf));
7996 }
7997 else
7998 return PDMDEV_SET_ERROR(pDevIns, rc, N_("Failed to attach the network LUN"));
7999
8000 rc = PDMDevHlpSUPSemEventCreate(pDevIns, &pThis->hEventMoreRxDescAvail);
8001 AssertRCReturn(rc, rc);
8002
8003 rc = e1kInitDebugHelpers();
8004 AssertRCReturn(rc, rc);
8005
8006 e1kR3HardReset(pDevIns, pThis, pThisCC);
8007
8008 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceiveBytes, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Amount of data received", "/Public/Net/E1k%u/BytesReceived", iInstance);
8009 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitBytes, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Amount of data transmitted", "/Public/Net/E1k%u/BytesTransmitted", iInstance);
8010
8011 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceiveBytes, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Amount of data received", "/Devices/E1k%d/ReceiveBytes", iInstance);
8012 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitBytes, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Amount of data transmitted", "/Devices/E1k%d/TransmitBytes", iInstance);
8013
8014#if defined(VBOX_WITH_STATISTICS)
8015 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatMMIOReadRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling MMIO reads in RZ", "/Devices/E1k%d/MMIO/ReadRZ", iInstance);
8016 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatMMIOReadR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling MMIO reads in R3", "/Devices/E1k%d/MMIO/ReadR3", iInstance);
8017 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatMMIOWriteRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling MMIO writes in RZ", "/Devices/E1k%d/MMIO/WriteRZ", iInstance);
8018 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatMMIOWriteR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling MMIO writes in R3", "/Devices/E1k%d/MMIO/WriteR3", iInstance);
8019 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatEEPROMRead, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling EEPROM reads", "/Devices/E1k%d/EEPROM/Read", iInstance);
8020 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatEEPROMWrite, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling EEPROM writes", "/Devices/E1k%d/EEPROM/Write", iInstance);
8021 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIOReadRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling IO reads in RZ", "/Devices/E1k%d/IO/ReadRZ", iInstance);
8022 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIOReadR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling IO reads in R3", "/Devices/E1k%d/IO/ReadR3", iInstance);
8023 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIOWriteRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling IO writes in RZ", "/Devices/E1k%d/IO/WriteRZ", iInstance);
8024 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIOWriteR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling IO writes in R3", "/Devices/E1k%d/IO/WriteR3", iInstance);
8025 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatLateIntTimer, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling late int timer", "/Devices/E1k%d/LateInt/Timer", iInstance);
8026 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatLateInts, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of late interrupts", "/Devices/E1k%d/LateInt/Occured", iInstance);
8027 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIntsRaised, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of raised interrupts", "/Devices/E1k%d/Interrupts/Raised", iInstance);
8028 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIntsPrevented, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of prevented interrupts", "/Devices/E1k%d/Interrupts/Prevented", iInstance);
8029 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceive, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling receive", "/Devices/E1k%d/Receive/Total", iInstance);
8030 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceiveCRC, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling receive checksumming", "/Devices/E1k%d/Receive/CRC", iInstance);
8031 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceiveFilter, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling receive filtering", "/Devices/E1k%d/Receive/Filter", iInstance);
8032 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceiveStore, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling receive storing", "/Devices/E1k%d/Receive/Store", iInstance);
8033 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatRxOverflow, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_OCCURENCE, "Profiling RX overflows", "/Devices/E1k%d/RxOverflow", iInstance);
8034 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatRxOverflowWakeupRZ, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Nr of RX overflow wakeups in RZ", "/Devices/E1k%d/RxOverflowWakeupRZ", iInstance);
8035 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatRxOverflowWakeupR3, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Nr of RX overflow wakeups in R3", "/Devices/E1k%d/RxOverflowWakeupR3", iInstance);
8036 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling transmits in RZ", "/Devices/E1k%d/Transmit/TotalRZ", iInstance);
8037 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling transmits in R3", "/Devices/E1k%d/Transmit/TotalR3", iInstance);
8038 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitSendRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling send transmit in RZ", "/Devices/E1k%d/Transmit/SendRZ", iInstance);
8039 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitSendR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling send transmit in R3", "/Devices/E1k%d/Transmit/SendR3", iInstance);
8040
8041 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxDescCtxNormal, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of normal context descriptors","/Devices/E1k%d/TxDesc/ContexNormal", iInstance);
8042 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxDescCtxTSE, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TSE context descriptors", "/Devices/E1k%d/TxDesc/ContextTSE", iInstance);
8043 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxDescData, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TX data descriptors", "/Devices/E1k%d/TxDesc/Data", iInstance);
8044 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxDescLegacy, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TX legacy descriptors", "/Devices/E1k%d/TxDesc/Legacy", iInstance);
8045 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxDescTSEData, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TX TSE data descriptors", "/Devices/E1k%d/TxDesc/TSEData", iInstance);
8046 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxPathFallback, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Fallback TSE descriptor path", "/Devices/E1k%d/TxPath/Fallback", iInstance);
8047 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxPathGSO, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "GSO TSE descriptor path", "/Devices/E1k%d/TxPath/GSO", iInstance);
8048 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxPathRegular, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Regular descriptor path", "/Devices/E1k%d/TxPath/Normal", iInstance);
8049 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatPHYAccesses, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of PHY accesses", "/Devices/E1k%d/PHYAccesses", iInstance);
8050 for (unsigned iReg = 0; iReg < E1K_NUM_OF_REGS; iReg++)
8051 {
8052 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->aStatRegReads[iReg], STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
8053 g_aE1kRegMap[iReg].name, "/Devices/E1k%d/Regs/%s-Reads", iInstance, g_aE1kRegMap[iReg].abbrev);
8054 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->aStatRegWrites[iReg], STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
8055 g_aE1kRegMap[iReg].name, "/Devices/E1k%d/Regs/%s-Writes", iInstance, g_aE1kRegMap[iReg].abbrev);
8056 }
8057#endif /* VBOX_WITH_STATISTICS */
8058
8059#ifdef E1K_INT_STATS
8060 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->u64ArmedAt, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "u64ArmedAt", "/Devices/E1k%d/u64ArmedAt", iInstance);
8061 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatMaxTxDelay, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatMaxTxDelay", "/Devices/E1k%d/uStatMaxTxDelay", iInstance);
8062 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatInt, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatInt", "/Devices/E1k%d/uStatInt", iInstance);
8063 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntTry, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntTry", "/Devices/E1k%d/uStatIntTry", iInstance);
8064 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntLower, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntLower", "/Devices/E1k%d/uStatIntLower", iInstance);
8065 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatNoIntICR, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatNoIntICR", "/Devices/E1k%d/uStatNoIntICR", iInstance);
8066 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->iStatIntLost, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "iStatIntLost", "/Devices/E1k%d/iStatIntLost", iInstance);
8067 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->iStatIntLostOne, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "iStatIntLostOne", "/Devices/E1k%d/iStatIntLostOne", iInstance);
8068 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntIMS, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntIMS", "/Devices/E1k%d/uStatIntIMS", iInstance);
8069 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntSkip, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntSkip", "/Devices/E1k%d/uStatIntSkip", iInstance);
8070 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntLate, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntLate", "/Devices/E1k%d/uStatIntLate", iInstance);
8071 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntMasked, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntMasked", "/Devices/E1k%d/uStatIntMasked", iInstance);
8072 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntEarly, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntEarly", "/Devices/E1k%d/uStatIntEarly", iInstance);
8073 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntRx, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntRx", "/Devices/E1k%d/uStatIntRx", iInstance);
8074 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntTx, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntTx", "/Devices/E1k%d/uStatIntTx", iInstance);
8075 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntICS, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntICS", "/Devices/E1k%d/uStatIntICS", iInstance);
8076 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntRDTR, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntRDTR", "/Devices/E1k%d/uStatIntRDTR", iInstance);
8077 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntRXDMT0, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntRXDMT0", "/Devices/E1k%d/uStatIntRXDMT0", iInstance);
8078 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntTXQE, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntTXQE", "/Devices/E1k%d/uStatIntTXQE", iInstance);
8079 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxNoRS, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxNoRS", "/Devices/E1k%d/uStatTxNoRS", iInstance);
8080 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxIDE, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxIDE", "/Devices/E1k%d/uStatTxIDE", iInstance);
8081 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxDelayed, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxDelayed", "/Devices/E1k%d/uStatTxDelayed", iInstance);
8082 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxDelayExp, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxDelayExp", "/Devices/E1k%d/uStatTxDelayExp", iInstance);
8083 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTAD, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTAD", "/Devices/E1k%d/uStatTAD", iInstance);
8084 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTID, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTID", "/Devices/E1k%d/uStatTID", iInstance);
8085 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatRAD, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatRAD", "/Devices/E1k%d/uStatRAD", iInstance);
8086 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatRID, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatRID", "/Devices/E1k%d/uStatRID", iInstance);
8087 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatRxFrm, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatRxFrm", "/Devices/E1k%d/uStatRxFrm", iInstance);
8088 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxFrm, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxFrm", "/Devices/E1k%d/uStatTxFrm", iInstance);
8089 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatDescCtx, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatDescCtx", "/Devices/E1k%d/uStatDescCtx", iInstance);
8090 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatDescDat, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatDescDat", "/Devices/E1k%d/uStatDescDat", iInstance);
8091 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatDescLeg, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatDescLeg", "/Devices/E1k%d/uStatDescLeg", iInstance);
8092 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx1514, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx1514", "/Devices/E1k%d/uStatTx1514", iInstance);
8093 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx2962, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx2962", "/Devices/E1k%d/uStatTx2962", iInstance);
8094 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx4410, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx4410", "/Devices/E1k%d/uStatTx4410", iInstance);
8095 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx5858, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx5858", "/Devices/E1k%d/uStatTx5858", iInstance);
8096 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx7306, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx7306", "/Devices/E1k%d/uStatTx7306", iInstance);
8097 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx8754, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx8754", "/Devices/E1k%d/uStatTx8754", iInstance);
8098 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx16384, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx16384", "/Devices/E1k%d/uStatTx16384", iInstance);
8099 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx32768, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx32768", "/Devices/E1k%d/uStatTx32768", iInstance);
8100 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxLarge, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxLarge", "/Devices/E1k%d/uStatTxLarge", iInstance);
8101#endif /* E1K_INT_STATS */
8102
8103 return VINF_SUCCESS;
8104}
8105
8106#else /* !IN_RING3 */
8107
8108/**
8109 * @callback_method_impl{PDMDEVREGR0,pfnConstruct}
8110 */
8111static DECLCALLBACK(int) e1kRZConstruct(PPDMDEVINS pDevIns)
8112{
8113 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
8114 PE1KSTATECC pThisCC = PDMINS_2_DATA_CC(pDevIns, PE1KSTATECC);
8115
8116 pThisCC->CTX_SUFF(pDevIns) = pDevIns;
8117 /** @todo @bugref{9218} ring-0 driver stuff */
8118 pThisCC->CTX_SUFF(pDrv) = NULL;
8119 pThisCC->CTX_SUFF(pTxSg) = NULL;
8120
8121 int rc = PDMDevHlpMmioSetUpContext(pDevIns, pThis->hMmioRegion, e1kMMIOWrite, e1kMMIORead, NULL /*pvUser*/);
8122 AssertRCReturn(rc, rc);
8123
8124 rc = PDMDevHlpIoPortSetUpContext(pDevIns, pThis->hIoPorts, e1kIOPortOut, e1kIOPortIn, NULL /*pvUser*/);
8125 AssertRCReturn(rc, rc);
8126
8127 return VINF_SUCCESS;
8128}
8129
8130#endif /* !IN_RING3 */
8131
8132/**
8133 * The device registration structure.
8134 */
8135const PDMDEVREG g_DeviceE1000 =
8136{
8137 /* .u32version = */ PDM_DEVREG_VERSION,
8138 /* .uReserved0 = */ 0,
8139 /* .szName = */ "e1000",
8140 /* .fFlags = */ PDM_DEVREG_FLAGS_DEFAULT_BITS | PDM_DEVREG_FLAGS_RC | PDM_DEVREG_FLAGS_R0,
8141 /* .fClass = */ PDM_DEVREG_CLASS_NETWORK,
8142 /* .cMaxInstances = */ ~0U,
8143 /* .uSharedVersion = */ 42,
8144 /* .cbInstanceShared = */ sizeof(E1KSTATE),
8145 /* .cbInstanceCC = */ sizeof(E1KSTATECC),
8146 /* .cbInstanceRC = */ sizeof(E1KSTATERC),
8147 /* .cMaxPciDevices = */ 1,
8148 /* .cMaxMsixVectors = */ 0,
8149 /* .pszDescription = */ "Intel PRO/1000 MT Desktop Ethernet.",
8150#if defined(IN_RING3)
8151 /* .pszRCMod = */ "VBoxDDRC.rc",
8152 /* .pszR0Mod = */ "VBoxDDR0.r0",
8153 /* .pfnConstruct = */ e1kR3Construct,
8154 /* .pfnDestruct = */ e1kR3Destruct,
8155 /* .pfnRelocate = */ e1kR3Relocate,
8156 /* .pfnMemSetup = */ NULL,
8157 /* .pfnPowerOn = */ NULL,
8158 /* .pfnReset = */ e1kR3Reset,
8159 /* .pfnSuspend = */ e1kR3Suspend,
8160 /* .pfnResume = */ NULL,
8161 /* .pfnAttach = */ e1kR3Attach,
8162 /* .pfnDeatch = */ e1kR3Detach,
8163 /* .pfnQueryInterface = */ NULL,
8164 /* .pfnInitComplete = */ NULL,
8165 /* .pfnPowerOff = */ e1kR3PowerOff,
8166 /* .pfnSoftReset = */ NULL,
8167 /* .pfnReserved0 = */ NULL,
8168 /* .pfnReserved1 = */ NULL,
8169 /* .pfnReserved2 = */ NULL,
8170 /* .pfnReserved3 = */ NULL,
8171 /* .pfnReserved4 = */ NULL,
8172 /* .pfnReserved5 = */ NULL,
8173 /* .pfnReserved6 = */ NULL,
8174 /* .pfnReserved7 = */ NULL,
8175#elif defined(IN_RING0)
8176 /* .pfnEarlyConstruct = */ NULL,
8177 /* .pfnConstruct = */ e1kRZConstruct,
8178 /* .pfnDestruct = */ NULL,
8179 /* .pfnFinalDestruct = */ NULL,
8180 /* .pfnRequest = */ NULL,
8181 /* .pfnReserved0 = */ NULL,
8182 /* .pfnReserved1 = */ NULL,
8183 /* .pfnReserved2 = */ NULL,
8184 /* .pfnReserved3 = */ NULL,
8185 /* .pfnReserved4 = */ NULL,
8186 /* .pfnReserved5 = */ NULL,
8187 /* .pfnReserved6 = */ NULL,
8188 /* .pfnReserved7 = */ NULL,
8189#elif defined(IN_RC)
8190 /* .pfnConstruct = */ e1kRZConstruct,
8191 /* .pfnReserved0 = */ NULL,
8192 /* .pfnReserved1 = */ NULL,
8193 /* .pfnReserved2 = */ NULL,
8194 /* .pfnReserved3 = */ NULL,
8195 /* .pfnReserved4 = */ NULL,
8196 /* .pfnReserved5 = */ NULL,
8197 /* .pfnReserved6 = */ NULL,
8198 /* .pfnReserved7 = */ NULL,
8199#else
8200# error "Not in IN_RING3, IN_RING0 or IN_RC!"
8201#endif
8202 /* .u32VersionEnd = */ PDM_DEVREG_VERSION
8203};
8204
8205#endif /* !VBOX_DEVICE_STRUCT_TESTCASE */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette