VirtualBox

source: vbox/trunk/src/VBox/Devices/Network/DevE1000.cpp@ 93115

Last change on this file since 93115 was 93115, checked in by vboxsync, 3 years ago

scm --update-copyright-year

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 335.1 KB
Line 
1/* $Id: DevE1000.cpp 93115 2022-01-01 11:31:46Z vboxsync $ */
2/** @file
3 * DevE1000 - Intel 82540EM Ethernet Controller Emulation.
4 *
5 * Implemented in accordance with the specification:
6 *
7 * PCI/PCI-X Family of Gigabit Ethernet Controllers Software Developer's Manual
8 * 82540EP/EM, 82541xx, 82544GC/EI, 82545GM/EM, 82546GB/EB, and 82547xx
9 *
10 * 317453-002 Revision 3.5
11 *
12 * @todo IPv6 checksum offloading support
13 * @todo Flexible Filter / Wakeup (optional?)
14 */
15
16/*
17 * Copyright (C) 2007-2022 Oracle Corporation
18 *
19 * This file is part of VirtualBox Open Source Edition (OSE), as
20 * available from http://www.virtualbox.org. This file is free software;
21 * you can redistribute it and/or modify it under the terms of the GNU
22 * General Public License (GPL) as published by the Free Software
23 * Foundation, in version 2 as it comes in the "COPYING" file of the
24 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
25 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_DEV_E1000
33#include <iprt/crc.h>
34#include <iprt/ctype.h>
35#include <iprt/net.h>
36#include <iprt/semaphore.h>
37#include <iprt/string.h>
38#include <iprt/time.h>
39#include <iprt/uuid.h>
40#include <VBox/vmm/pdmdev.h>
41#include <VBox/vmm/pdmnetifs.h>
42#include <VBox/vmm/pdmnetinline.h>
43#include <VBox/param.h>
44#include "VBoxDD.h"
45
46#include "DevEEPROM.h"
47#include "DevE1000Phy.h"
48
49
50/*********************************************************************************************************************************
51* Defined Constants And Macros *
52*********************************************************************************************************************************/
53/** @name E1000 Build Options
54 * @{ */
55/** @def E1K_INIT_RA0
56 * E1K_INIT_RA0 forces E1000 to set the first entry in Receive Address filter
57 * table to MAC address obtained from CFGM. Most guests read MAC address from
58 * EEPROM and write it to RA[0] explicitly, but Mac OS X seems to depend on it
59 * being already set (see @bugref{4657}).
60 */
61#define E1K_INIT_RA0
62/** @def E1K_LSC_ON_RESET
63 * E1K_LSC_ON_RESET causes e1000 to generate Link Status Change
64 * interrupt after hard reset. This makes the E1K_LSC_ON_SLU option unnecessary.
65 * With unplugged cable, LSC is triggerred for 82543GC only.
66 */
67#define E1K_LSC_ON_RESET
68/** @def E1K_LSC_ON_SLU
69 * E1K_LSC_ON_SLU causes E1000 to generate Link Status Change interrupt when
70 * the guest driver brings up the link via STATUS.LU bit. Again the only guest
71 * that requires it is Mac OS X (see @bugref{4657}).
72 */
73//#define E1K_LSC_ON_SLU
74/** @def E1K_INIT_LINKUP_DELAY
75 * E1K_INIT_LINKUP_DELAY prevents the link going up while the driver is still
76 * in init (see @bugref{8624}).
77 */
78#define E1K_INIT_LINKUP_DELAY_US (2000 * 1000)
79/** @def E1K_IMS_INT_DELAY_NS
80 * E1K_IMS_INT_DELAY_NS prevents interrupt storms in Windows guests on enabling
81 * interrupts (see @bugref{8624}).
82 */
83#define E1K_IMS_INT_DELAY_NS 100
84/** @def E1K_TX_DELAY
85 * E1K_TX_DELAY aims to improve guest-host transfer rate for TCP streams by
86 * preventing packets to be sent immediately. It allows to send several
87 * packets in a batch reducing the number of acknowledgments. Note that it
88 * effectively disables R0 TX path, forcing sending in R3.
89 */
90//#define E1K_TX_DELAY 150
91/** @def E1K_USE_TX_TIMERS
92 * E1K_USE_TX_TIMERS aims to reduce the number of generated TX interrupts if a
93 * guest driver set the delays via the Transmit Interrupt Delay Value (TIDV)
94 * register. Enabling it showed no positive effects on existing guests so it
95 * stays disabled. See sections 3.2.7.1 and 3.4.3.1 in "8254x Family of Gigabit
96 * Ethernet Controllers Software Developer’s Manual" for more detailed
97 * explanation.
98 */
99//#define E1K_USE_TX_TIMERS
100/** @def E1K_NO_TAD
101 * E1K_NO_TAD disables one of two timers enabled by E1K_USE_TX_TIMERS, the
102 * Transmit Absolute Delay time. This timer sets the maximum time interval
103 * during which TX interrupts can be postponed (delayed). It has no effect
104 * if E1K_USE_TX_TIMERS is not defined.
105 */
106//#define E1K_NO_TAD
107/** @def E1K_REL_DEBUG
108 * E1K_REL_DEBUG enables debug logging of l1, l2, l3 in release build.
109 */
110//#define E1K_REL_DEBUG
111/** @def E1K_INT_STATS
112 * E1K_INT_STATS enables collection of internal statistics used for
113 * debugging of delayed interrupts, etc.
114 */
115#define E1K_INT_STATS
116/** @def E1K_WITH_MSI
117 * E1K_WITH_MSI enables rudimentary MSI support. Not implemented.
118 */
119//#define E1K_WITH_MSI
120/** @def E1K_WITH_TX_CS
121 * E1K_WITH_TX_CS protects e1kXmitPending with a critical section.
122 */
123#define E1K_WITH_TX_CS
124/** @def E1K_WITH_TXD_CACHE
125 * E1K_WITH_TXD_CACHE causes E1000 to fetch multiple TX descriptors in a
126 * single physical memory read (or two if it wraps around the end of TX
127 * descriptor ring). It is required for proper functioning of bandwidth
128 * resource control as it allows to compute exact sizes of packets prior
129 * to allocating their buffers (see @bugref{5582}).
130 */
131#define E1K_WITH_TXD_CACHE
132/** @def E1K_WITH_RXD_CACHE
133 * E1K_WITH_RXD_CACHE causes E1000 to fetch multiple RX descriptors in a
134 * single physical memory read (or two if it wraps around the end of RX
135 * descriptor ring). Intel's packet driver for DOS needs this option in
136 * order to work properly (see @bugref{6217}).
137 */
138#define E1K_WITH_RXD_CACHE
139/** @def E1K_WITH_PREREG_MMIO
140 * E1K_WITH_PREREG_MMIO enables a new style MMIO registration and is
141 * currently only done for testing the relateted PDM, IOM and PGM code. */
142//#define E1K_WITH_PREREG_MMIO
143/* @} */
144/* End of Options ************************************************************/
145
146#ifdef E1K_WITH_TXD_CACHE
147/**
148 * E1K_TXD_CACHE_SIZE specifies the maximum number of TX descriptors stored
149 * in the state structure. It limits the amount of descriptors loaded in one
150 * batch read. For example, Linux guest may use up to 20 descriptors per
151 * TSE packet. The largest TSE packet seen (Windows guest) was 45 descriptors.
152 */
153# define E1K_TXD_CACHE_SIZE 64u
154#endif /* E1K_WITH_TXD_CACHE */
155
156#ifdef E1K_WITH_RXD_CACHE
157/**
158 * E1K_RXD_CACHE_SIZE specifies the maximum number of RX descriptors stored
159 * in the state structure. It limits the amount of descriptors loaded in one
160 * batch read. For example, XP guest adds 15 RX descriptors at a time.
161 */
162# define E1K_RXD_CACHE_SIZE 16u
163#endif /* E1K_WITH_RXD_CACHE */
164
165
166/* Little helpers ************************************************************/
167#undef htons
168#undef ntohs
169#undef htonl
170#undef ntohl
171#define htons(x) ((((x) & 0xff00) >> 8) | (((x) & 0x00ff) << 8))
172#define ntohs(x) htons(x)
173#define htonl(x) ASMByteSwapU32(x)
174#define ntohl(x) htonl(x)
175
176#ifndef DEBUG
177# ifdef E1K_REL_DEBUG
178# define DEBUG
179# define E1kLog(a) LogRel(a)
180# define E1kLog2(a) LogRel(a)
181# define E1kLog3(a) LogRel(a)
182# define E1kLogX(x, a) LogRel(a)
183//# define E1kLog3(a) do {} while (0)
184# else
185# define E1kLog(a) do {} while (0)
186# define E1kLog2(a) do {} while (0)
187# define E1kLog3(a) do {} while (0)
188# define E1kLogX(x, a) do {} while (0)
189# endif
190#else
191# define E1kLog(a) Log(a)
192# define E1kLog2(a) Log2(a)
193# define E1kLog3(a) Log3(a)
194# define E1kLogX(x, a) LogIt(x, LOG_GROUP, a)
195//# define E1kLog(a) do {} while (0)
196//# define E1kLog2(a) do {} while (0)
197//# define E1kLog3(a) do {} while (0)
198#endif
199
200#if 0
201# define LOG_ENABLED
202# define E1kLogRel(a) LogRel(a)
203# undef Log6
204# define Log6(a) LogRel(a)
205#else
206# define E1kLogRel(a) do { } while (0)
207#endif
208
209//#undef DEBUG
210
211#define E1K_RELOCATE(p, o) *(RTHCUINTPTR *)&p += o
212
213#define E1K_INC_CNT32(cnt) \
214do { \
215 if (cnt < UINT32_MAX) \
216 cnt++; \
217} while (0)
218
219#define E1K_ADD_CNT64(cntLo, cntHi, val) \
220do { \
221 uint64_t u64Cnt = RT_MAKE_U64(cntLo, cntHi); \
222 uint64_t tmp = u64Cnt; \
223 u64Cnt += val; \
224 if (tmp > u64Cnt ) \
225 u64Cnt = UINT64_MAX; \
226 cntLo = (uint32_t)u64Cnt; \
227 cntHi = (uint32_t)(u64Cnt >> 32); \
228} while (0)
229
230#ifdef E1K_INT_STATS
231# define E1K_INC_ISTAT_CNT(cnt) do { ++cnt; } while (0)
232#else /* E1K_INT_STATS */
233# define E1K_INC_ISTAT_CNT(cnt) do { } while (0)
234#endif /* E1K_INT_STATS */
235
236
237/*****************************************************************************/
238
239typedef uint32_t E1KCHIP;
240#define E1K_CHIP_82540EM 0
241#define E1K_CHIP_82543GC 1
242#define E1K_CHIP_82545EM 2
243
244#ifdef IN_RING3
245/** Different E1000 chips. */
246static const struct E1kChips
247{
248 uint16_t uPCIVendorId;
249 uint16_t uPCIDeviceId;
250 uint16_t uPCISubsystemVendorId;
251 uint16_t uPCISubsystemId;
252 const char *pcszName;
253} g_aChips[] =
254{
255 /* Vendor Device SSVendor SubSys Name */
256 { 0x8086,
257 /* Temporary code, as MSI-aware driver dislike 0x100E. How to do that right? */
258# ifdef E1K_WITH_MSI
259 0x105E,
260# else
261 0x100E,
262# endif
263 0x8086, 0x001E, "82540EM" }, /* Intel 82540EM-A in Intel PRO/1000 MT Desktop */
264 { 0x8086, 0x1004, 0x8086, 0x1004, "82543GC" }, /* Intel 82543GC in Intel PRO/1000 T Server */
265 { 0x8086, 0x100F, 0x15AD, 0x0750, "82545EM" } /* Intel 82545EM-A in VMWare Network Adapter */
266};
267#endif /* IN_RING3 */
268
269
270/* The size of register area mapped to I/O space */
271#define E1K_IOPORT_SIZE 0x8
272/* The size of memory-mapped register area */
273#define E1K_MM_SIZE 0x20000
274
275#define E1K_MAX_TX_PKT_SIZE 16288
276#define E1K_MAX_RX_PKT_SIZE 16384
277
278/*****************************************************************************/
279
280#ifndef VBOX_DEVICE_STRUCT_TESTCASE
281/** Gets the specfieid bits from the register. */
282#define GET_BITS(reg, bits) ((reg & reg##_##bits##_MASK) >> reg##_##bits##_SHIFT)
283#define GET_BITS_V(val, reg, bits) ((val & reg##_##bits##_MASK) >> reg##_##bits##_SHIFT)
284#define BITS(reg, bits, bitval) (bitval << reg##_##bits##_SHIFT)
285#define SET_BITS(reg, bits, bitval) do { reg = (reg & ~reg##_##bits##_MASK) | (bitval << reg##_##bits##_SHIFT); } while (0)
286#define SET_BITS_V(val, reg, bits, bitval) do { val = (val & ~reg##_##bits##_MASK) | (bitval << reg##_##bits##_SHIFT); } while (0)
287
288#define CTRL_SLU UINT32_C(0x00000040)
289#define CTRL_MDIO UINT32_C(0x00100000)
290#define CTRL_MDC UINT32_C(0x00200000)
291#define CTRL_MDIO_DIR UINT32_C(0x01000000)
292#define CTRL_MDC_DIR UINT32_C(0x02000000)
293#define CTRL_RESET UINT32_C(0x04000000)
294#define CTRL_VME UINT32_C(0x40000000)
295
296#define STATUS_LU UINT32_C(0x00000002)
297#define STATUS_TXOFF UINT32_C(0x00000010)
298
299#define EECD_EE_WIRES UINT32_C(0x0F)
300#define EECD_EE_REQ UINT32_C(0x40)
301#define EECD_EE_GNT UINT32_C(0x80)
302
303#define EERD_START UINT32_C(0x00000001)
304#define EERD_DONE UINT32_C(0x00000010)
305#define EERD_DATA_MASK UINT32_C(0xFFFF0000)
306#define EERD_DATA_SHIFT 16
307#define EERD_ADDR_MASK UINT32_C(0x0000FF00)
308#define EERD_ADDR_SHIFT 8
309
310#define MDIC_DATA_MASK UINT32_C(0x0000FFFF)
311#define MDIC_DATA_SHIFT 0
312#define MDIC_REG_MASK UINT32_C(0x001F0000)
313#define MDIC_REG_SHIFT 16
314#define MDIC_PHY_MASK UINT32_C(0x03E00000)
315#define MDIC_PHY_SHIFT 21
316#define MDIC_OP_WRITE UINT32_C(0x04000000)
317#define MDIC_OP_READ UINT32_C(0x08000000)
318#define MDIC_READY UINT32_C(0x10000000)
319#define MDIC_INT_EN UINT32_C(0x20000000)
320#define MDIC_ERROR UINT32_C(0x40000000)
321
322#define TCTL_EN UINT32_C(0x00000002)
323#define TCTL_PSP UINT32_C(0x00000008)
324
325#define RCTL_EN UINT32_C(0x00000002)
326#define RCTL_UPE UINT32_C(0x00000008)
327#define RCTL_MPE UINT32_C(0x00000010)
328#define RCTL_LPE UINT32_C(0x00000020)
329#define RCTL_LBM_MASK UINT32_C(0x000000C0)
330#define RCTL_LBM_SHIFT 6
331#define RCTL_RDMTS_MASK UINT32_C(0x00000300)
332#define RCTL_RDMTS_SHIFT 8
333#define RCTL_LBM_TCVR UINT32_C(3) /**< PHY or external SerDes loopback. */
334#define RCTL_MO_MASK UINT32_C(0x00003000)
335#define RCTL_MO_SHIFT 12
336#define RCTL_BAM UINT32_C(0x00008000)
337#define RCTL_BSIZE_MASK UINT32_C(0x00030000)
338#define RCTL_BSIZE_SHIFT 16
339#define RCTL_VFE UINT32_C(0x00040000)
340#define RCTL_CFIEN UINT32_C(0x00080000)
341#define RCTL_CFI UINT32_C(0x00100000)
342#define RCTL_BSEX UINT32_C(0x02000000)
343#define RCTL_SECRC UINT32_C(0x04000000)
344
345#define ICR_TXDW UINT32_C(0x00000001)
346#define ICR_TXQE UINT32_C(0x00000002)
347#define ICR_LSC UINT32_C(0x00000004)
348#define ICR_RXDMT0 UINT32_C(0x00000010)
349#define ICR_RXT0 UINT32_C(0x00000080)
350#define ICR_TXD_LOW UINT32_C(0x00008000)
351#define RDTR_FPD UINT32_C(0x80000000)
352
353#define PBA_st ((PBAST*)(pThis->auRegs + PBA_IDX))
354typedef struct
355{
356 unsigned rxa : 7;
357 unsigned rxa_r : 9;
358 unsigned txa : 16;
359} PBAST;
360AssertCompileSize(PBAST, 4);
361
362#define TXDCTL_WTHRESH_MASK 0x003F0000
363#define TXDCTL_WTHRESH_SHIFT 16
364#define TXDCTL_LWTHRESH_MASK 0xFE000000
365#define TXDCTL_LWTHRESH_SHIFT 25
366
367#define RXCSUM_PCSS_MASK UINT32_C(0x000000FF)
368#define RXCSUM_PCSS_SHIFT 0
369
370/** @name Register access macros
371 * @remarks These ASSUME alocal variable @a pThis of type PE1KSTATE.
372 * @{ */
373#define CTRL pThis->auRegs[CTRL_IDX]
374#define STATUS pThis->auRegs[STATUS_IDX]
375#define EECD pThis->auRegs[EECD_IDX]
376#define EERD pThis->auRegs[EERD_IDX]
377#define CTRL_EXT pThis->auRegs[CTRL_EXT_IDX]
378#define FLA pThis->auRegs[FLA_IDX]
379#define MDIC pThis->auRegs[MDIC_IDX]
380#define FCAL pThis->auRegs[FCAL_IDX]
381#define FCAH pThis->auRegs[FCAH_IDX]
382#define FCT pThis->auRegs[FCT_IDX]
383#define VET pThis->auRegs[VET_IDX]
384#define ICR pThis->auRegs[ICR_IDX]
385#define ITR pThis->auRegs[ITR_IDX]
386#define ICS pThis->auRegs[ICS_IDX]
387#define IMS pThis->auRegs[IMS_IDX]
388#define IMC pThis->auRegs[IMC_IDX]
389#define RCTL pThis->auRegs[RCTL_IDX]
390#define FCTTV pThis->auRegs[FCTTV_IDX]
391#define TXCW pThis->auRegs[TXCW_IDX]
392#define RXCW pThis->auRegs[RXCW_IDX]
393#define TCTL pThis->auRegs[TCTL_IDX]
394#define TIPG pThis->auRegs[TIPG_IDX]
395#define AIFS pThis->auRegs[AIFS_IDX]
396#define LEDCTL pThis->auRegs[LEDCTL_IDX]
397#define PBA pThis->auRegs[PBA_IDX]
398#define FCRTL pThis->auRegs[FCRTL_IDX]
399#define FCRTH pThis->auRegs[FCRTH_IDX]
400#define RDFH pThis->auRegs[RDFH_IDX]
401#define RDFT pThis->auRegs[RDFT_IDX]
402#define RDFHS pThis->auRegs[RDFHS_IDX]
403#define RDFTS pThis->auRegs[RDFTS_IDX]
404#define RDFPC pThis->auRegs[RDFPC_IDX]
405#define RDBAL pThis->auRegs[RDBAL_IDX]
406#define RDBAH pThis->auRegs[RDBAH_IDX]
407#define RDLEN pThis->auRegs[RDLEN_IDX]
408#define RDH pThis->auRegs[RDH_IDX]
409#define RDT pThis->auRegs[RDT_IDX]
410#define RDTR pThis->auRegs[RDTR_IDX]
411#define RXDCTL pThis->auRegs[RXDCTL_IDX]
412#define RADV pThis->auRegs[RADV_IDX]
413#define RSRPD pThis->auRegs[RSRPD_IDX]
414#define TXDMAC pThis->auRegs[TXDMAC_IDX]
415#define TDFH pThis->auRegs[TDFH_IDX]
416#define TDFT pThis->auRegs[TDFT_IDX]
417#define TDFHS pThis->auRegs[TDFHS_IDX]
418#define TDFTS pThis->auRegs[TDFTS_IDX]
419#define TDFPC pThis->auRegs[TDFPC_IDX]
420#define TDBAL pThis->auRegs[TDBAL_IDX]
421#define TDBAH pThis->auRegs[TDBAH_IDX]
422#define TDLEN pThis->auRegs[TDLEN_IDX]
423#define TDH pThis->auRegs[TDH_IDX]
424#define TDT pThis->auRegs[TDT_IDX]
425#define TIDV pThis->auRegs[TIDV_IDX]
426#define TXDCTL pThis->auRegs[TXDCTL_IDX]
427#define TADV pThis->auRegs[TADV_IDX]
428#define TSPMT pThis->auRegs[TSPMT_IDX]
429#define CRCERRS pThis->auRegs[CRCERRS_IDX]
430#define ALGNERRC pThis->auRegs[ALGNERRC_IDX]
431#define SYMERRS pThis->auRegs[SYMERRS_IDX]
432#define RXERRC pThis->auRegs[RXERRC_IDX]
433#define MPC pThis->auRegs[MPC_IDX]
434#define SCC pThis->auRegs[SCC_IDX]
435#define ECOL pThis->auRegs[ECOL_IDX]
436#define MCC pThis->auRegs[MCC_IDX]
437#define LATECOL pThis->auRegs[LATECOL_IDX]
438#define COLC pThis->auRegs[COLC_IDX]
439#define DC pThis->auRegs[DC_IDX]
440#define TNCRS pThis->auRegs[TNCRS_IDX]
441/* #define SEC pThis->auRegs[SEC_IDX] Conflict with sys/time.h */
442#define CEXTERR pThis->auRegs[CEXTERR_IDX]
443#define RLEC pThis->auRegs[RLEC_IDX]
444#define XONRXC pThis->auRegs[XONRXC_IDX]
445#define XONTXC pThis->auRegs[XONTXC_IDX]
446#define XOFFRXC pThis->auRegs[XOFFRXC_IDX]
447#define XOFFTXC pThis->auRegs[XOFFTXC_IDX]
448#define FCRUC pThis->auRegs[FCRUC_IDX]
449#define PRC64 pThis->auRegs[PRC64_IDX]
450#define PRC127 pThis->auRegs[PRC127_IDX]
451#define PRC255 pThis->auRegs[PRC255_IDX]
452#define PRC511 pThis->auRegs[PRC511_IDX]
453#define PRC1023 pThis->auRegs[PRC1023_IDX]
454#define PRC1522 pThis->auRegs[PRC1522_IDX]
455#define GPRC pThis->auRegs[GPRC_IDX]
456#define BPRC pThis->auRegs[BPRC_IDX]
457#define MPRC pThis->auRegs[MPRC_IDX]
458#define GPTC pThis->auRegs[GPTC_IDX]
459#define GORCL pThis->auRegs[GORCL_IDX]
460#define GORCH pThis->auRegs[GORCH_IDX]
461#define GOTCL pThis->auRegs[GOTCL_IDX]
462#define GOTCH pThis->auRegs[GOTCH_IDX]
463#define RNBC pThis->auRegs[RNBC_IDX]
464#define RUC pThis->auRegs[RUC_IDX]
465#define RFC pThis->auRegs[RFC_IDX]
466#define ROC pThis->auRegs[ROC_IDX]
467#define RJC pThis->auRegs[RJC_IDX]
468#define MGTPRC pThis->auRegs[MGTPRC_IDX]
469#define MGTPDC pThis->auRegs[MGTPDC_IDX]
470#define MGTPTC pThis->auRegs[MGTPTC_IDX]
471#define TORL pThis->auRegs[TORL_IDX]
472#define TORH pThis->auRegs[TORH_IDX]
473#define TOTL pThis->auRegs[TOTL_IDX]
474#define TOTH pThis->auRegs[TOTH_IDX]
475#define TPR pThis->auRegs[TPR_IDX]
476#define TPT pThis->auRegs[TPT_IDX]
477#define PTC64 pThis->auRegs[PTC64_IDX]
478#define PTC127 pThis->auRegs[PTC127_IDX]
479#define PTC255 pThis->auRegs[PTC255_IDX]
480#define PTC511 pThis->auRegs[PTC511_IDX]
481#define PTC1023 pThis->auRegs[PTC1023_IDX]
482#define PTC1522 pThis->auRegs[PTC1522_IDX]
483#define MPTC pThis->auRegs[MPTC_IDX]
484#define BPTC pThis->auRegs[BPTC_IDX]
485#define TSCTC pThis->auRegs[TSCTC_IDX]
486#define TSCTFC pThis->auRegs[TSCTFC_IDX]
487#define RXCSUM pThis->auRegs[RXCSUM_IDX]
488#define WUC pThis->auRegs[WUC_IDX]
489#define WUFC pThis->auRegs[WUFC_IDX]
490#define WUS pThis->auRegs[WUS_IDX]
491#define MANC pThis->auRegs[MANC_IDX]
492#define IPAV pThis->auRegs[IPAV_IDX]
493#define WUPL pThis->auRegs[WUPL_IDX]
494/** @} */
495#endif /* VBOX_DEVICE_STRUCT_TESTCASE */
496
497/**
498 * Indices of memory-mapped registers in register table.
499 */
500typedef enum
501{
502 CTRL_IDX,
503 STATUS_IDX,
504 EECD_IDX,
505 EERD_IDX,
506 CTRL_EXT_IDX,
507 FLA_IDX,
508 MDIC_IDX,
509 FCAL_IDX,
510 FCAH_IDX,
511 FCT_IDX,
512 VET_IDX,
513 ICR_IDX,
514 ITR_IDX,
515 ICS_IDX,
516 IMS_IDX,
517 IMC_IDX,
518 RCTL_IDX,
519 FCTTV_IDX,
520 TXCW_IDX,
521 RXCW_IDX,
522 TCTL_IDX,
523 TIPG_IDX,
524 AIFS_IDX,
525 LEDCTL_IDX,
526 PBA_IDX,
527 FCRTL_IDX,
528 FCRTH_IDX,
529 RDFH_IDX,
530 RDFT_IDX,
531 RDFHS_IDX,
532 RDFTS_IDX,
533 RDFPC_IDX,
534 RDBAL_IDX,
535 RDBAH_IDX,
536 RDLEN_IDX,
537 RDH_IDX,
538 RDT_IDX,
539 RDTR_IDX,
540 RXDCTL_IDX,
541 RADV_IDX,
542 RSRPD_IDX,
543 TXDMAC_IDX,
544 TDFH_IDX,
545 TDFT_IDX,
546 TDFHS_IDX,
547 TDFTS_IDX,
548 TDFPC_IDX,
549 TDBAL_IDX,
550 TDBAH_IDX,
551 TDLEN_IDX,
552 TDH_IDX,
553 TDT_IDX,
554 TIDV_IDX,
555 TXDCTL_IDX,
556 TADV_IDX,
557 TSPMT_IDX,
558 CRCERRS_IDX,
559 ALGNERRC_IDX,
560 SYMERRS_IDX,
561 RXERRC_IDX,
562 MPC_IDX,
563 SCC_IDX,
564 ECOL_IDX,
565 MCC_IDX,
566 LATECOL_IDX,
567 COLC_IDX,
568 DC_IDX,
569 TNCRS_IDX,
570 SEC_IDX,
571 CEXTERR_IDX,
572 RLEC_IDX,
573 XONRXC_IDX,
574 XONTXC_IDX,
575 XOFFRXC_IDX,
576 XOFFTXC_IDX,
577 FCRUC_IDX,
578 PRC64_IDX,
579 PRC127_IDX,
580 PRC255_IDX,
581 PRC511_IDX,
582 PRC1023_IDX,
583 PRC1522_IDX,
584 GPRC_IDX,
585 BPRC_IDX,
586 MPRC_IDX,
587 GPTC_IDX,
588 GORCL_IDX,
589 GORCH_IDX,
590 GOTCL_IDX,
591 GOTCH_IDX,
592 RNBC_IDX,
593 RUC_IDX,
594 RFC_IDX,
595 ROC_IDX,
596 RJC_IDX,
597 MGTPRC_IDX,
598 MGTPDC_IDX,
599 MGTPTC_IDX,
600 TORL_IDX,
601 TORH_IDX,
602 TOTL_IDX,
603 TOTH_IDX,
604 TPR_IDX,
605 TPT_IDX,
606 PTC64_IDX,
607 PTC127_IDX,
608 PTC255_IDX,
609 PTC511_IDX,
610 PTC1023_IDX,
611 PTC1522_IDX,
612 MPTC_IDX,
613 BPTC_IDX,
614 TSCTC_IDX,
615 TSCTFC_IDX,
616 RXCSUM_IDX,
617 WUC_IDX,
618 WUFC_IDX,
619 WUS_IDX,
620 MANC_IDX,
621 IPAV_IDX,
622 WUPL_IDX,
623 MTA_IDX,
624 RA_IDX,
625 VFTA_IDX,
626 IP4AT_IDX,
627 IP6AT_IDX,
628 WUPM_IDX,
629 FFLT_IDX,
630 FFMT_IDX,
631 FFVT_IDX,
632 PBM_IDX,
633 RA_82542_IDX,
634 MTA_82542_IDX,
635 VFTA_82542_IDX,
636 E1K_NUM_OF_REGS
637} E1kRegIndex;
638
639#define E1K_NUM_OF_32BIT_REGS MTA_IDX
640/** The number of registers with strictly increasing offset. */
641#define E1K_NUM_OF_BINARY_SEARCHABLE (WUPL_IDX + 1)
642
643
644/**
645 * Define E1000-specific EEPROM layout.
646 */
647struct E1kEEPROM
648{
649 public:
650 EEPROM93C46 eeprom;
651
652#ifdef IN_RING3
653 /**
654 * Initialize EEPROM content.
655 *
656 * @param macAddr MAC address of E1000.
657 */
658 void init(RTMAC &macAddr)
659 {
660 eeprom.init();
661 memcpy(eeprom.m_au16Data, macAddr.au16, sizeof(macAddr.au16));
662 eeprom.m_au16Data[0x04] = 0xFFFF;
663 /*
664 * bit 3 - full support for power management
665 * bit 10 - full duplex
666 */
667 eeprom.m_au16Data[0x0A] = 0x4408;
668 eeprom.m_au16Data[0x0B] = 0x001E;
669 eeprom.m_au16Data[0x0C] = 0x8086;
670 eeprom.m_au16Data[0x0D] = 0x100E;
671 eeprom.m_au16Data[0x0E] = 0x8086;
672 eeprom.m_au16Data[0x0F] = 0x3040;
673 eeprom.m_au16Data[0x21] = 0x7061;
674 eeprom.m_au16Data[0x22] = 0x280C;
675 eeprom.m_au16Data[0x23] = 0x00C8;
676 eeprom.m_au16Data[0x24] = 0x00C8;
677 eeprom.m_au16Data[0x2F] = 0x0602;
678 updateChecksum();
679 };
680
681 /**
682 * Compute the checksum as required by E1000 and store it
683 * in the last word.
684 */
685 void updateChecksum()
686 {
687 uint16_t u16Checksum = 0;
688
689 for (int i = 0; i < eeprom.SIZE-1; i++)
690 u16Checksum += eeprom.m_au16Data[i];
691 eeprom.m_au16Data[eeprom.SIZE-1] = 0xBABA - u16Checksum;
692 };
693
694 /**
695 * First 6 bytes of EEPROM contain MAC address.
696 *
697 * @returns MAC address of E1000.
698 */
699 void getMac(PRTMAC pMac)
700 {
701 memcpy(pMac->au16, eeprom.m_au16Data, sizeof(pMac->au16));
702 };
703
704 uint32_t read()
705 {
706 return eeprom.read();
707 }
708
709 void write(uint32_t u32Wires)
710 {
711 eeprom.write(u32Wires);
712 }
713
714 bool readWord(uint32_t u32Addr, uint16_t *pu16Value)
715 {
716 return eeprom.readWord(u32Addr, pu16Value);
717 }
718
719 int load(PCPDMDEVHLPR3 pHlp, PSSMHANDLE pSSM)
720 {
721 return eeprom.load(pHlp, pSSM);
722 }
723
724 void save(PCPDMDEVHLPR3 pHlp, PSSMHANDLE pSSM)
725 {
726 eeprom.save(pHlp, pSSM);
727 }
728#endif /* IN_RING3 */
729};
730
731
732#define E1K_SPEC_VLAN(s) (s & 0xFFF)
733#define E1K_SPEC_CFI(s) (!!((s>>12) & 0x1))
734#define E1K_SPEC_PRI(s) ((s>>13) & 0x7)
735
736struct E1kRxDStatus
737{
738 /** @name Descriptor Status field (3.2.3.1)
739 * @{ */
740 unsigned fDD : 1; /**< Descriptor Done. */
741 unsigned fEOP : 1; /**< End of packet. */
742 unsigned fIXSM : 1; /**< Ignore checksum indication. */
743 unsigned fVP : 1; /**< VLAN, matches VET. */
744 unsigned : 1;
745 unsigned fTCPCS : 1; /**< RCP Checksum calculated on the packet. */
746 unsigned fIPCS : 1; /**< IP Checksum calculated on the packet. */
747 unsigned fPIF : 1; /**< Passed in-exact filter */
748 /** @} */
749 /** @name Descriptor Errors field (3.2.3.2)
750 * (Only valid when fEOP and fDD are set.)
751 * @{ */
752 unsigned fCE : 1; /**< CRC or alignment error. */
753 unsigned : 4; /**< Reserved, varies with different models... */
754 unsigned fTCPE : 1; /**< TCP/UDP checksum error. */
755 unsigned fIPE : 1; /**< IP Checksum error. */
756 unsigned fRXE : 1; /**< RX Data error. */
757 /** @} */
758 /** @name Descriptor Special field (3.2.3.3)
759 * @{ */
760 unsigned u16Special : 16; /**< VLAN: Id, Canonical form, Priority. */
761 /** @} */
762};
763typedef struct E1kRxDStatus E1KRXDST;
764
765struct E1kRxDesc_st
766{
767 uint64_t u64BufAddr; /**< Address of data buffer */
768 uint16_t u16Length; /**< Length of data in buffer */
769 uint16_t u16Checksum; /**< Packet checksum */
770 E1KRXDST status;
771};
772typedef struct E1kRxDesc_st E1KRXDESC;
773AssertCompileSize(E1KRXDESC, 16);
774
775#define E1K_DTYP_LEGACY -1
776#define E1K_DTYP_CONTEXT 0
777#define E1K_DTYP_DATA 1
778
779struct E1kTDLegacy
780{
781 uint64_t u64BufAddr; /**< Address of data buffer */
782 struct TDLCmd_st
783 {
784 unsigned u16Length : 16;
785 unsigned u8CSO : 8;
786 /* CMD field : 8 */
787 unsigned fEOP : 1;
788 unsigned fIFCS : 1;
789 unsigned fIC : 1;
790 unsigned fRS : 1;
791 unsigned fRPS : 1;
792 unsigned fDEXT : 1;
793 unsigned fVLE : 1;
794 unsigned fIDE : 1;
795 } cmd;
796 struct TDLDw3_st
797 {
798 /* STA field */
799 unsigned fDD : 1;
800 unsigned fEC : 1;
801 unsigned fLC : 1;
802 unsigned fTURSV : 1;
803 /* RSV field */
804 unsigned u4RSV : 4;
805 /* CSS field */
806 unsigned u8CSS : 8;
807 /* Special field*/
808 unsigned u16Special: 16;
809 } dw3;
810};
811
812/**
813 * TCP/IP Context Transmit Descriptor, section 3.3.6.
814 */
815struct E1kTDContext
816{
817 struct CheckSum_st
818 {
819 /** TSE: Header start. !TSE: Checksum start. */
820 unsigned u8CSS : 8;
821 /** Checksum offset - where to store it. */
822 unsigned u8CSO : 8;
823 /** Checksum ending (inclusive) offset, 0 = end of packet. */
824 unsigned u16CSE : 16;
825 } ip;
826 struct CheckSum_st tu;
827 struct TDCDw2_st
828 {
829 /** TSE: The total number of payload bytes for this context. Sans header. */
830 unsigned u20PAYLEN : 20;
831 /** The descriptor type - E1K_DTYP_CONTEXT (0). */
832 unsigned u4DTYP : 4;
833 /** TUCMD field, 8 bits
834 * @{ */
835 /** TSE: TCP (set) or UDP (clear). */
836 unsigned fTCP : 1;
837 /** TSE: IPv4 (set) or IPv6 (clear) - for finding the payload length field in
838 * the IP header. Does not affect the checksumming.
839 * @remarks 82544GC/EI interprets a cleared field differently. */
840 unsigned fIP : 1;
841 /** TSE: TCP segmentation enable. When clear the context describes */
842 unsigned fTSE : 1;
843 /** Report status (only applies to dw3.fDD for here). */
844 unsigned fRS : 1;
845 /** Reserved, MBZ. */
846 unsigned fRSV1 : 1;
847 /** Descriptor extension, must be set for this descriptor type. */
848 unsigned fDEXT : 1;
849 /** Reserved, MBZ. */
850 unsigned fRSV2 : 1;
851 /** Interrupt delay enable. */
852 unsigned fIDE : 1;
853 /** @} */
854 } dw2;
855 struct TDCDw3_st
856 {
857 /** Descriptor Done. */
858 unsigned fDD : 1;
859 /** Reserved, MBZ. */
860 unsigned u7RSV : 7;
861 /** TSO: The header (prototype) length (Ethernet[, VLAN tag], IP, TCP/UDP. */
862 unsigned u8HDRLEN : 8;
863 /** TSO: Maximum segment size. */
864 unsigned u16MSS : 16;
865 } dw3;
866};
867typedef struct E1kTDContext E1KTXCTX;
868
869/**
870 * TCP/IP Data Transmit Descriptor, section 3.3.7.
871 */
872struct E1kTDData
873{
874 uint64_t u64BufAddr; /**< Address of data buffer */
875 struct TDDCmd_st
876 {
877 /** The total length of data pointed to by this descriptor. */
878 unsigned u20DTALEN : 20;
879 /** The descriptor type - E1K_DTYP_DATA (1). */
880 unsigned u4DTYP : 4;
881 /** @name DCMD field, 8 bits (3.3.7.1).
882 * @{ */
883 /** End of packet. Note TSCTFC update. */
884 unsigned fEOP : 1;
885 /** Insert Ethernet FCS/CRC (requires fEOP to be set). */
886 unsigned fIFCS : 1;
887 /** Use the TSE context when set and the normal when clear. */
888 unsigned fTSE : 1;
889 /** Report status (dw3.STA). */
890 unsigned fRS : 1;
891 /** Reserved. 82544GC/EI defines this report packet set (RPS). */
892 unsigned fRPS : 1;
893 /** Descriptor extension, must be set for this descriptor type. */
894 unsigned fDEXT : 1;
895 /** VLAN enable, requires CTRL.VME, auto enables FCS/CRC.
896 * Insert dw3.SPECIAL after ethernet header. */
897 unsigned fVLE : 1;
898 /** Interrupt delay enable. */
899 unsigned fIDE : 1;
900 /** @} */
901 } cmd;
902 struct TDDDw3_st
903 {
904 /** @name STA field (3.3.7.2)
905 * @{ */
906 unsigned fDD : 1; /**< Descriptor done. */
907 unsigned fEC : 1; /**< Excess collision. */
908 unsigned fLC : 1; /**< Late collision. */
909 /** Reserved, except for the usual oddball (82544GC/EI) where it's called TU. */
910 unsigned fTURSV : 1;
911 /** @} */
912 unsigned u4RSV : 4; /**< Reserved field, MBZ. */
913 /** @name POPTS (Packet Option) field (3.3.7.3)
914 * @{ */
915 unsigned fIXSM : 1; /**< Insert IP checksum. */
916 unsigned fTXSM : 1; /**< Insert TCP/UDP checksum. */
917 unsigned u6RSV : 6; /**< Reserved, MBZ. */
918 /** @} */
919 /** @name SPECIAL field - VLAN tag to be inserted after ethernet header.
920 * Requires fEOP, fVLE and CTRL.VME to be set.
921 * @{ */
922 unsigned u16Special: 16; /**< VLAN: Id, Canonical form, Priority. */
923 /** @} */
924 } dw3;
925};
926typedef struct E1kTDData E1KTXDAT;
927
928union E1kTxDesc
929{
930 struct E1kTDLegacy legacy;
931 struct E1kTDContext context;
932 struct E1kTDData data;
933};
934typedef union E1kTxDesc E1KTXDESC;
935AssertCompileSize(E1KTXDESC, 16);
936
937#define RA_CTL_AS 0x0003
938#define RA_CTL_AV 0x8000
939
940union E1kRecAddr
941{
942 uint32_t au32[32];
943 struct RAArray
944 {
945 uint8_t addr[6];
946 uint16_t ctl;
947 } array[16];
948};
949typedef struct E1kRecAddr::RAArray E1KRAELEM;
950typedef union E1kRecAddr E1KRA;
951AssertCompileSize(E1KRA, 8*16);
952
953#define E1K_IP_RF UINT16_C(0x8000) /**< reserved fragment flag */
954#define E1K_IP_DF UINT16_C(0x4000) /**< dont fragment flag */
955#define E1K_IP_MF UINT16_C(0x2000) /**< more fragments flag */
956#define E1K_IP_OFFMASK UINT16_C(0x1fff) /**< mask for fragmenting bits */
957
958/** @todo use+extend RTNETIPV4 */
959struct E1kIpHeader
960{
961 /* type of service / version / header length */
962 uint16_t tos_ver_hl;
963 /* total length */
964 uint16_t total_len;
965 /* identification */
966 uint16_t ident;
967 /* fragment offset field */
968 uint16_t offset;
969 /* time to live / protocol*/
970 uint16_t ttl_proto;
971 /* checksum */
972 uint16_t chksum;
973 /* source IP address */
974 uint32_t src;
975 /* destination IP address */
976 uint32_t dest;
977};
978AssertCompileSize(struct E1kIpHeader, 20);
979
980#define E1K_TCP_FIN UINT16_C(0x01)
981#define E1K_TCP_SYN UINT16_C(0x02)
982#define E1K_TCP_RST UINT16_C(0x04)
983#define E1K_TCP_PSH UINT16_C(0x08)
984#define E1K_TCP_ACK UINT16_C(0x10)
985#define E1K_TCP_URG UINT16_C(0x20)
986#define E1K_TCP_ECE UINT16_C(0x40)
987#define E1K_TCP_CWR UINT16_C(0x80)
988#define E1K_TCP_FLAGS UINT16_C(0x3f)
989
990/** @todo use+extend RTNETTCP */
991struct E1kTcpHeader
992{
993 uint16_t src;
994 uint16_t dest;
995 uint32_t seqno;
996 uint32_t ackno;
997 uint16_t hdrlen_flags;
998 uint16_t wnd;
999 uint16_t chksum;
1000 uint16_t urgp;
1001};
1002AssertCompileSize(struct E1kTcpHeader, 20);
1003
1004
1005#ifdef E1K_WITH_TXD_CACHE
1006/** The current Saved state version. */
1007# define E1K_SAVEDSTATE_VERSION 4
1008/** Saved state version for VirtualBox 4.2 with VLAN tag fields. */
1009# define E1K_SAVEDSTATE_VERSION_VBOX_42_VTAG 3
1010#else /* !E1K_WITH_TXD_CACHE */
1011/** The current Saved state version. */
1012# define E1K_SAVEDSTATE_VERSION 3
1013#endif /* !E1K_WITH_TXD_CACHE */
1014/** Saved state version for VirtualBox 4.1 and earlier.
1015 * These did not include VLAN tag fields. */
1016#define E1K_SAVEDSTATE_VERSION_VBOX_41 2
1017/** Saved state version for VirtualBox 3.0 and earlier.
1018 * This did not include the configuration part nor the E1kEEPROM. */
1019#define E1K_SAVEDSTATE_VERSION_VBOX_30 1
1020
1021/**
1022 * E1000 shared device state.
1023 *
1024 * This is shared between ring-0 and ring-3.
1025 */
1026typedef struct E1KSTATE
1027{
1028 char szPrf[8]; /**< Log prefix, e.g. E1000#1. */
1029
1030 /** Handle to PCI region \#0, the MMIO region. */
1031 IOMIOPORTHANDLE hMmioRegion;
1032 /** Handle to PCI region \#2, the I/O ports. */
1033 IOMIOPORTHANDLE hIoPorts;
1034
1035 /** Receive Interrupt Delay Timer. */
1036 TMTIMERHANDLE hRIDTimer;
1037 /** Receive Absolute Delay Timer. */
1038 TMTIMERHANDLE hRADTimer;
1039 /** Transmit Interrupt Delay Timer. */
1040 TMTIMERHANDLE hTIDTimer;
1041 /** Transmit Absolute Delay Timer. */
1042 TMTIMERHANDLE hTADTimer;
1043 /** Transmit Delay Timer. */
1044 TMTIMERHANDLE hTXDTimer;
1045 /** Late Interrupt Timer. */
1046 TMTIMERHANDLE hIntTimer;
1047 /** Link Up(/Restore) Timer. */
1048 TMTIMERHANDLE hLUTimer;
1049
1050 /** Transmit task. */
1051 PDMTASKHANDLE hTxTask;
1052
1053 /** Critical section - what is it protecting? */
1054 PDMCRITSECT cs;
1055 /** RX Critical section. */
1056 PDMCRITSECT csRx;
1057#ifdef E1K_WITH_TX_CS
1058 /** TX Critical section. */
1059 PDMCRITSECT csTx;
1060#endif /* E1K_WITH_TX_CS */
1061 /** MAC address obtained from the configuration. */
1062 RTMAC macConfigured;
1063 uint16_t u16Padding0;
1064 /** EMT: Last time the interrupt was acknowledged. */
1065 uint64_t u64AckedAt;
1066 /** All: Used for eliminating spurious interrupts. */
1067 bool fIntRaised;
1068 /** EMT: false if the cable is disconnected by the GUI. */
1069 bool fCableConnected;
1070 /** true if the device is attached to a driver. */
1071 bool fIsAttached;
1072 /** EMT: Compute Ethernet CRC for RX packets. */
1073 bool fEthernetCRC;
1074 /** All: throttle interrupts. */
1075 bool fItrEnabled;
1076 /** All: throttle RX interrupts. */
1077 bool fItrRxEnabled;
1078 /** All: Delay TX interrupts using TIDV/TADV. */
1079 bool fTidEnabled;
1080 bool afPadding[2];
1081 /** Link up delay (in milliseconds). */
1082 uint32_t cMsLinkUpDelay;
1083
1084 /** All: Device register storage. */
1085 uint32_t auRegs[E1K_NUM_OF_32BIT_REGS];
1086 /** TX/RX: Status LED. */
1087 PDMLED led;
1088 /** TX/RX: Number of packet being sent/received to show in debug log. */
1089 uint32_t u32PktNo;
1090
1091 /** EMT: Offset of the register to be read via IO. */
1092 uint32_t uSelectedReg;
1093 /** EMT: Multicast Table Array. */
1094 uint32_t auMTA[128];
1095 /** EMT: Receive Address registers. */
1096 E1KRA aRecAddr;
1097 /** EMT: VLAN filter table array. */
1098 uint32_t auVFTA[128];
1099 /** EMT: Receive buffer size. */
1100 uint16_t u16RxBSize;
1101 /** EMT: Locked state -- no state alteration possible. */
1102 bool fLocked;
1103 /** EMT: */
1104 bool fDelayInts;
1105 /** All: */
1106 bool fIntMaskUsed;
1107
1108 /** N/A: */
1109 bool volatile fMaybeOutOfSpace;
1110 /** EMT: Gets signalled when more RX descriptors become available. */
1111 SUPSEMEVENT hEventMoreRxDescAvail;
1112#ifdef E1K_WITH_RXD_CACHE
1113 /** RX: Fetched RX descriptors. */
1114 E1KRXDESC aRxDescriptors[E1K_RXD_CACHE_SIZE];
1115 //uint64_t aRxDescAddr[E1K_RXD_CACHE_SIZE];
1116 /** RX: Actual number of fetched RX descriptors. */
1117 uint32_t nRxDFetched;
1118 /** RX: Index in cache of RX descriptor being processed. */
1119 uint32_t iRxDCurrent;
1120#endif /* E1K_WITH_RXD_CACHE */
1121
1122 /** TX: Context used for TCP segmentation packets. */
1123 E1KTXCTX contextTSE;
1124 /** TX: Context used for ordinary packets. */
1125 E1KTXCTX contextNormal;
1126#ifdef E1K_WITH_TXD_CACHE
1127 /** TX: Fetched TX descriptors. */
1128 E1KTXDESC aTxDescriptors[E1K_TXD_CACHE_SIZE];
1129 /** TX: Validity of TX descriptors. Set by e1kLocateTxPacket, used by e1kXmitPacket. */
1130 bool afTxDValid[E1K_TXD_CACHE_SIZE];
1131 /** TX: Actual number of fetched TX descriptors. */
1132 uint8_t nTxDFetched;
1133 /** TX: Index in cache of TX descriptor being processed. */
1134 uint8_t iTxDCurrent;
1135 /** TX: Will this frame be sent as GSO. */
1136 bool fGSO;
1137 /** Alignment padding. */
1138 bool fReserved;
1139 /** TX: Number of bytes in next packet. */
1140 uint32_t cbTxAlloc;
1141
1142#endif /* E1K_WITH_TXD_CACHE */
1143 /** GSO context. u8Type is set to PDMNETWORKGSOTYPE_INVALID when not
1144 * applicable to the current TSE mode. */
1145 PDMNETWORKGSO GsoCtx;
1146 /** Scratch space for holding the loopback / fallback scatter / gather
1147 * descriptor. */
1148 union
1149 {
1150 PDMSCATTERGATHER Sg;
1151 uint8_t padding[8 * sizeof(RTUINTPTR)];
1152 } uTxFallback;
1153 /** TX: Transmit packet buffer use for TSE fallback and loopback. */
1154 uint8_t aTxPacketFallback[E1K_MAX_TX_PKT_SIZE];
1155 /** TX: Number of bytes assembled in TX packet buffer. */
1156 uint16_t u16TxPktLen;
1157 /** TX: False will force segmentation in e1000 instead of sending frames as GSO. */
1158 bool fGSOEnabled;
1159 /** TX: IP checksum has to be inserted if true. */
1160 bool fIPcsum;
1161 /** TX: TCP/UDP checksum has to be inserted if true. */
1162 bool fTCPcsum;
1163 /** TX: VLAN tag has to be inserted if true. */
1164 bool fVTag;
1165 /** TX: TCI part of VLAN tag to be inserted. */
1166 uint16_t u16VTagTCI;
1167 /** TX TSE fallback: Number of payload bytes remaining in TSE context. */
1168 uint32_t u32PayRemain;
1169 /** TX TSE fallback: Number of header bytes remaining in TSE context. */
1170 uint16_t u16HdrRemain;
1171 /** TX TSE fallback: Flags from template header. */
1172 uint16_t u16SavedFlags;
1173 /** TX TSE fallback: Partial checksum from template header. */
1174 uint32_t u32SavedCsum;
1175 /** ?: Emulated controller type. */
1176 E1KCHIP eChip;
1177
1178 /** EMT: Physical interface emulation. */
1179 PHY phy;
1180
1181#if 0
1182 /** Alignment padding. */
1183 uint8_t Alignment[HC_ARCH_BITS == 64 ? 8 : 4];
1184#endif
1185
1186 STAMCOUNTER StatReceiveBytes;
1187 STAMCOUNTER StatTransmitBytes;
1188#if defined(VBOX_WITH_STATISTICS)
1189 STAMPROFILEADV StatMMIOReadRZ;
1190 STAMPROFILEADV StatMMIOReadR3;
1191 STAMPROFILEADV StatMMIOWriteRZ;
1192 STAMPROFILEADV StatMMIOWriteR3;
1193 STAMPROFILEADV StatEEPROMRead;
1194 STAMPROFILEADV StatEEPROMWrite;
1195 STAMPROFILEADV StatIOReadRZ;
1196 STAMPROFILEADV StatIOReadR3;
1197 STAMPROFILEADV StatIOWriteRZ;
1198 STAMPROFILEADV StatIOWriteR3;
1199 STAMPROFILEADV StatLateIntTimer;
1200 STAMCOUNTER StatLateInts;
1201 STAMCOUNTER StatIntsRaised;
1202 STAMCOUNTER StatIntsPrevented;
1203 STAMPROFILEADV StatReceive;
1204 STAMPROFILEADV StatReceiveCRC;
1205 STAMPROFILEADV StatReceiveFilter;
1206 STAMPROFILEADV StatReceiveStore;
1207 STAMPROFILEADV StatTransmitRZ;
1208 STAMPROFILEADV StatTransmitR3;
1209 STAMPROFILE StatTransmitSendRZ;
1210 STAMPROFILE StatTransmitSendR3;
1211 STAMPROFILE StatRxOverflow;
1212 STAMCOUNTER StatRxOverflowWakeupRZ;
1213 STAMCOUNTER StatRxOverflowWakeupR3;
1214 STAMCOUNTER StatTxDescCtxNormal;
1215 STAMCOUNTER StatTxDescCtxTSE;
1216 STAMCOUNTER StatTxDescLegacy;
1217 STAMCOUNTER StatTxDescData;
1218 STAMCOUNTER StatTxDescTSEData;
1219 STAMCOUNTER StatTxPathFallback;
1220 STAMCOUNTER StatTxPathGSO;
1221 STAMCOUNTER StatTxPathRegular;
1222 STAMCOUNTER StatPHYAccesses;
1223 STAMCOUNTER aStatRegWrites[E1K_NUM_OF_REGS];
1224 STAMCOUNTER aStatRegReads[E1K_NUM_OF_REGS];
1225#endif /* VBOX_WITH_STATISTICS */
1226
1227#ifdef E1K_INT_STATS
1228 /* Internal stats */
1229 uint64_t u64ArmedAt;
1230 uint64_t uStatMaxTxDelay;
1231 uint32_t uStatInt;
1232 uint32_t uStatIntTry;
1233 uint32_t uStatIntLower;
1234 uint32_t uStatNoIntICR;
1235 int32_t iStatIntLost;
1236 int32_t iStatIntLostOne;
1237 uint32_t uStatIntIMS;
1238 uint32_t uStatIntSkip;
1239 uint32_t uStatIntLate;
1240 uint32_t uStatIntMasked;
1241 uint32_t uStatIntEarly;
1242 uint32_t uStatIntRx;
1243 uint32_t uStatIntTx;
1244 uint32_t uStatIntICS;
1245 uint32_t uStatIntRDTR;
1246 uint32_t uStatIntRXDMT0;
1247 uint32_t uStatIntTXQE;
1248 uint32_t uStatTxNoRS;
1249 uint32_t uStatTxIDE;
1250 uint32_t uStatTxDelayed;
1251 uint32_t uStatTxDelayExp;
1252 uint32_t uStatTAD;
1253 uint32_t uStatTID;
1254 uint32_t uStatRAD;
1255 uint32_t uStatRID;
1256 uint32_t uStatRxFrm;
1257 uint32_t uStatTxFrm;
1258 uint32_t uStatDescCtx;
1259 uint32_t uStatDescDat;
1260 uint32_t uStatDescLeg;
1261 uint32_t uStatTx1514;
1262 uint32_t uStatTx2962;
1263 uint32_t uStatTx4410;
1264 uint32_t uStatTx5858;
1265 uint32_t uStatTx7306;
1266 uint32_t uStatTx8754;
1267 uint32_t uStatTx16384;
1268 uint32_t uStatTx32768;
1269 uint32_t uStatTxLarge;
1270 uint32_t uStatAlign;
1271#endif /* E1K_INT_STATS */
1272} E1KSTATE;
1273/** Pointer to the E1000 device state. */
1274typedef E1KSTATE *PE1KSTATE;
1275
1276/**
1277 * E1000 ring-3 device state
1278 *
1279 * @implements PDMINETWORKDOWN
1280 * @implements PDMINETWORKCONFIG
1281 * @implements PDMILEDPORTS
1282 */
1283typedef struct E1KSTATER3
1284{
1285 PDMIBASE IBase;
1286 PDMINETWORKDOWN INetworkDown;
1287 PDMINETWORKCONFIG INetworkConfig;
1288 /** LED interface */
1289 PDMILEDPORTS ILeds;
1290 /** Attached network driver. */
1291 R3PTRTYPE(PPDMIBASE) pDrvBase;
1292 R3PTRTYPE(PPDMILEDCONNECTORS) pLedsConnector;
1293
1294 /** Pointer to the shared state. */
1295 R3PTRTYPE(PE1KSTATE) pShared;
1296
1297 /** Device instance. */
1298 PPDMDEVINSR3 pDevInsR3;
1299 /** Attached network driver. */
1300 PPDMINETWORKUPR3 pDrvR3;
1301 /** The scatter / gather buffer used for the current outgoing packet. */
1302 R3PTRTYPE(PPDMSCATTERGATHER) pTxSgR3;
1303
1304 /** EMT: EEPROM emulation */
1305 E1kEEPROM eeprom;
1306} E1KSTATER3;
1307/** Pointer to the E1000 ring-3 device state. */
1308typedef E1KSTATER3 *PE1KSTATER3;
1309
1310
1311/**
1312 * E1000 ring-0 device state
1313 */
1314typedef struct E1KSTATER0
1315{
1316 /** Device instance. */
1317 PPDMDEVINSR0 pDevInsR0;
1318 /** Attached network driver. */
1319 PPDMINETWORKUPR0 pDrvR0;
1320 /** The scatter / gather buffer used for the current outgoing packet - R0. */
1321 R0PTRTYPE(PPDMSCATTERGATHER) pTxSgR0;
1322} E1KSTATER0;
1323/** Pointer to the E1000 ring-0 device state. */
1324typedef E1KSTATER0 *PE1KSTATER0;
1325
1326
1327/**
1328 * E1000 raw-mode device state
1329 */
1330typedef struct E1KSTATERC
1331{
1332 /** Device instance. */
1333 PPDMDEVINSRC pDevInsRC;
1334 /** Attached network driver. */
1335 PPDMINETWORKUPRC pDrvRC;
1336 /** The scatter / gather buffer used for the current outgoing packet. */
1337 RCPTRTYPE(PPDMSCATTERGATHER) pTxSgRC;
1338} E1KSTATERC;
1339/** Pointer to the E1000 raw-mode device state. */
1340typedef E1KSTATERC *PE1KSTATERC;
1341
1342
1343/** @def PE1KSTATECC
1344 * Pointer to the instance data for the current context. */
1345#ifdef IN_RING3
1346typedef E1KSTATER3 E1KSTATECC;
1347typedef PE1KSTATER3 PE1KSTATECC;
1348#elif defined(IN_RING0)
1349typedef E1KSTATER0 E1KSTATECC;
1350typedef PE1KSTATER0 PE1KSTATECC;
1351#elif defined(IN_RC)
1352typedef E1KSTATERC E1KSTATECC;
1353typedef PE1KSTATERC PE1KSTATECC;
1354#else
1355# error "Not IN_RING3, IN_RING0 or IN_RC"
1356#endif
1357
1358
1359#ifndef VBOX_DEVICE_STRUCT_TESTCASE
1360
1361/* Forward declarations ******************************************************/
1362static int e1kXmitPending(PPDMDEVINS pDevIns, PE1KSTATE pThis, bool fOnWorkerThread);
1363
1364/**
1365 * E1000 register read handler.
1366 */
1367typedef int (FNE1KREGREAD)(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1368/**
1369 * E1000 register write handler.
1370 */
1371typedef int (FNE1KREGWRITE)(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1372
1373static FNE1KREGREAD e1kRegReadUnimplemented;
1374static FNE1KREGWRITE e1kRegWriteUnimplemented;
1375static FNE1KREGREAD e1kRegReadAutoClear;
1376static FNE1KREGREAD e1kRegReadDefault;
1377static FNE1KREGWRITE e1kRegWriteDefault;
1378#if 0 /* unused */
1379static FNE1KREGREAD e1kRegReadCTRL;
1380#endif
1381static FNE1KREGWRITE e1kRegWriteCTRL;
1382static FNE1KREGREAD e1kRegReadEECD;
1383static FNE1KREGWRITE e1kRegWriteEECD;
1384static FNE1KREGWRITE e1kRegWriteEERD;
1385static FNE1KREGWRITE e1kRegWriteMDIC;
1386static FNE1KREGREAD e1kRegReadICR;
1387static FNE1KREGWRITE e1kRegWriteICR;
1388static FNE1KREGREAD e1kRegReadICS;
1389static FNE1KREGWRITE e1kRegWriteICS;
1390static FNE1KREGWRITE e1kRegWriteIMS;
1391static FNE1KREGWRITE e1kRegWriteIMC;
1392static FNE1KREGWRITE e1kRegWriteRCTL;
1393static FNE1KREGWRITE e1kRegWritePBA;
1394static FNE1KREGWRITE e1kRegWriteRDT;
1395static FNE1KREGWRITE e1kRegWriteRDTR;
1396static FNE1KREGWRITE e1kRegWriteTDT;
1397static FNE1KREGREAD e1kRegReadMTA;
1398static FNE1KREGWRITE e1kRegWriteMTA;
1399static FNE1KREGREAD e1kRegReadRA;
1400static FNE1KREGWRITE e1kRegWriteRA;
1401static FNE1KREGREAD e1kRegReadVFTA;
1402static FNE1KREGWRITE e1kRegWriteVFTA;
1403
1404/**
1405 * Register map table.
1406 *
1407 * Override pfnRead and pfnWrite to get register-specific behavior.
1408 */
1409static const struct E1kRegMap_st
1410{
1411 /** Register offset in the register space. */
1412 uint32_t offset;
1413 /** Size in bytes. Registers of size > 4 are in fact tables. */
1414 uint32_t size;
1415 /** Readable bits. */
1416 uint32_t readable;
1417 /** Writable bits. */
1418 uint32_t writable;
1419 /** Read callback. */
1420 FNE1KREGREAD *pfnRead;
1421 /** Write callback. */
1422 FNE1KREGWRITE *pfnWrite;
1423 /** Abbreviated name. */
1424 const char *abbrev;
1425 /** Full name. */
1426 const char *name;
1427} g_aE1kRegMap[E1K_NUM_OF_REGS] =
1428{
1429 /* offset size read mask write mask read callback write callback abbrev full name */
1430 /*------- ------- ---------- ---------- ----------------------- ------------------------ ---------- ------------------------------*/
1431 { 0x00000, 0x00004, 0xDBF31BE9, 0xDBF31BE9, e1kRegReadDefault , e1kRegWriteCTRL , "CTRL" , "Device Control" },
1432 { 0x00008, 0x00004, 0x0000FDFF, 0x00000000, e1kRegReadDefault , e1kRegWriteUnimplemented, "STATUS" , "Device Status" },
1433 { 0x00010, 0x00004, 0x000027F0, 0x00000070, e1kRegReadEECD , e1kRegWriteEECD , "EECD" , "EEPROM/Flash Control/Data" },
1434 { 0x00014, 0x00004, 0xFFFFFF10, 0xFFFFFF00, e1kRegReadDefault , e1kRegWriteEERD , "EERD" , "EEPROM Read" },
1435 { 0x00018, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "CTRL_EXT", "Extended Device Control" },
1436 { 0x0001c, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FLA" , "Flash Access (N/A)" },
1437 { 0x00020, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteMDIC , "MDIC" , "MDI Control" },
1438 { 0x00028, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCAL" , "Flow Control Address Low" },
1439 { 0x0002c, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCAH" , "Flow Control Address High" },
1440 { 0x00030, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCT" , "Flow Control Type" },
1441 { 0x00038, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "VET" , "VLAN EtherType" },
1442 { 0x000c0, 0x00004, 0x0001F6DF, 0x0001F6DF, e1kRegReadICR , e1kRegWriteICR , "ICR" , "Interrupt Cause Read" },
1443 { 0x000c4, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "ITR" , "Interrupt Throttling" },
1444 { 0x000c8, 0x00004, 0x0001F6DF, 0xFFFFFFFF, e1kRegReadICS , e1kRegWriteICS , "ICS" , "Interrupt Cause Set" },
1445 { 0x000d0, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteIMS , "IMS" , "Interrupt Mask Set/Read" },
1446 { 0x000d8, 0x00004, 0x00000000, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteIMC , "IMC" , "Interrupt Mask Clear" },
1447 { 0x00100, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteRCTL , "RCTL" , "Receive Control" },
1448 { 0x00170, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCTTV" , "Flow Control Transmit Timer Value" },
1449 { 0x00178, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TXCW" , "Transmit Configuration Word (N/A)" },
1450 { 0x00180, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RXCW" , "Receive Configuration Word (N/A)" },
1451 { 0x00400, 0x00004, 0x017FFFFA, 0x017FFFFA, e1kRegReadDefault , e1kRegWriteDefault , "TCTL" , "Transmit Control" },
1452 { 0x00410, 0x00004, 0x3FFFFFFF, 0x3FFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TIPG" , "Transmit IPG" },
1453 { 0x00458, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "AIFS" , "Adaptive IFS Throttle - AIT" },
1454 { 0x00e00, 0x00004, 0xCFCFCFCF, 0xCFCFCFCF, e1kRegReadDefault , e1kRegWriteDefault , "LEDCTL" , "LED Control" },
1455 { 0x01000, 0x00004, 0xFFFF007F, 0x0000007F, e1kRegReadDefault , e1kRegWritePBA , "PBA" , "Packet Buffer Allocation" },
1456 { 0x02160, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCRTL" , "Flow Control Receive Threshold Low" },
1457 { 0x02168, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCRTH" , "Flow Control Receive Threshold High" },
1458 { 0x02410, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFH" , "Receive Data FIFO Head" },
1459 { 0x02418, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFT" , "Receive Data FIFO Tail" },
1460 { 0x02420, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFHS" , "Receive Data FIFO Head Saved Register" },
1461 { 0x02428, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFTS" , "Receive Data FIFO Tail Saved Register" },
1462 { 0x02430, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFPC" , "Receive Data FIFO Packet Count" },
1463 { 0x02800, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDBAL" , "Receive Descriptor Base Low" },
1464 { 0x02804, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDBAH" , "Receive Descriptor Base High" },
1465 { 0x02808, 0x00004, 0x000FFF80, 0x000FFF80, e1kRegReadDefault , e1kRegWriteDefault , "RDLEN" , "Receive Descriptor Length" },
1466 { 0x02810, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDH" , "Receive Descriptor Head" },
1467 { 0x02818, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteRDT , "RDT" , "Receive Descriptor Tail" },
1468 { 0x02820, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteRDTR , "RDTR" , "Receive Delay Timer" },
1469 { 0x02828, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RXDCTL" , "Receive Descriptor Control" },
1470 { 0x0282c, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "RADV" , "Receive Interrupt Absolute Delay Timer" },
1471 { 0x02c00, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RSRPD" , "Receive Small Packet Detect Interrupt" },
1472 { 0x03000, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TXDMAC" , "TX DMA Control (N/A)" },
1473 { 0x03410, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFH" , "Transmit Data FIFO Head" },
1474 { 0x03418, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFT" , "Transmit Data FIFO Tail" },
1475 { 0x03420, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFHS" , "Transmit Data FIFO Head Saved Register" },
1476 { 0x03428, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFTS" , "Transmit Data FIFO Tail Saved Register" },
1477 { 0x03430, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFPC" , "Transmit Data FIFO Packet Count" },
1478 { 0x03800, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDBAL" , "Transmit Descriptor Base Low" },
1479 { 0x03804, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDBAH" , "Transmit Descriptor Base High" },
1480 { 0x03808, 0x00004, 0x000FFF80, 0x000FFF80, e1kRegReadDefault , e1kRegWriteDefault , "TDLEN" , "Transmit Descriptor Length" },
1481 { 0x03810, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDH" , "Transmit Descriptor Head" },
1482 { 0x03818, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteTDT , "TDT" , "Transmit Descriptor Tail" },
1483 { 0x03820, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "TIDV" , "Transmit Interrupt Delay Value" },
1484 { 0x03828, 0x00004, 0xFF3F3F3F, 0xFF3F3F3F, e1kRegReadDefault , e1kRegWriteDefault , "TXDCTL" , "Transmit Descriptor Control" },
1485 { 0x0382c, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "TADV" , "Transmit Absolute Interrupt Delay Timer" },
1486 { 0x03830, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TSPMT" , "TCP Segmentation Pad and Threshold" },
1487 { 0x04000, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "CRCERRS" , "CRC Error Count" },
1488 { 0x04004, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "ALGNERRC", "Alignment Error Count" },
1489 { 0x04008, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "SYMERRS" , "Symbol Error Count" },
1490 { 0x0400c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RXERRC" , "RX Error Count" },
1491 { 0x04010, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MPC" , "Missed Packets Count" },
1492 { 0x04014, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "SCC" , "Single Collision Count" },
1493 { 0x04018, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "ECOL" , "Excessive Collisions Count" },
1494 { 0x0401c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MCC" , "Multiple Collision Count" },
1495 { 0x04020, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "LATECOL" , "Late Collisions Count" },
1496 { 0x04028, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "COLC" , "Collision Count" },
1497 { 0x04030, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "DC" , "Defer Count" },
1498 { 0x04034, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TNCRS" , "Transmit - No CRS" },
1499 { 0x04038, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "SEC" , "Sequence Error Count" },
1500 { 0x0403c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "CEXTERR" , "Carrier Extension Error Count" },
1501 { 0x04040, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RLEC" , "Receive Length Error Count" },
1502 { 0x04048, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XONRXC" , "XON Received Count" },
1503 { 0x0404c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XONTXC" , "XON Transmitted Count" },
1504 { 0x04050, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XOFFRXC" , "XOFF Received Count" },
1505 { 0x04054, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XOFFTXC" , "XOFF Transmitted Count" },
1506 { 0x04058, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCRUC" , "FC Received Unsupported Count" },
1507 { 0x0405c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC64" , "Packets Received (64 Bytes) Count" },
1508 { 0x04060, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC127" , "Packets Received (65-127 Bytes) Count" },
1509 { 0x04064, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC255" , "Packets Received (128-255 Bytes) Count" },
1510 { 0x04068, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC511" , "Packets Received (256-511 Bytes) Count" },
1511 { 0x0406c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC1023" , "Packets Received (512-1023 Bytes) Count" },
1512 { 0x04070, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC1522" , "Packets Received (1024-Max Bytes)" },
1513 { 0x04074, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GPRC" , "Good Packets Received Count" },
1514 { 0x04078, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "BPRC" , "Broadcast Packets Received Count" },
1515 { 0x0407c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "MPRC" , "Multicast Packets Received Count" },
1516 { 0x04080, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GPTC" , "Good Packets Transmitted Count" },
1517 { 0x04088, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GORCL" , "Good Octets Received Count (Low)" },
1518 { 0x0408c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GORCH" , "Good Octets Received Count (Hi)" },
1519 { 0x04090, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GOTCL" , "Good Octets Transmitted Count (Low)" },
1520 { 0x04094, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GOTCH" , "Good Octets Transmitted Count (Hi)" },
1521 { 0x040a0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RNBC" , "Receive No Buffers Count" },
1522 { 0x040a4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RUC" , "Receive Undersize Count" },
1523 { 0x040a8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RFC" , "Receive Fragment Count" },
1524 { 0x040ac, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "ROC" , "Receive Oversize Count" },
1525 { 0x040b0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RJC" , "Receive Jabber Count" },
1526 { 0x040b4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MGTPRC" , "Management Packets Received Count" },
1527 { 0x040b8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MGTPDC" , "Management Packets Dropped Count" },
1528 { 0x040bc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MGTPTC" , "Management Pkts Transmitted Count" },
1529 { 0x040c0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TORL" , "Total Octets Received (Lo)" },
1530 { 0x040c4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TORH" , "Total Octets Received (Hi)" },
1531 { 0x040c8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TOTL" , "Total Octets Transmitted (Lo)" },
1532 { 0x040cc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TOTH" , "Total Octets Transmitted (Hi)" },
1533 { 0x040d0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TPR" , "Total Packets Received" },
1534 { 0x040d4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TPT" , "Total Packets Transmitted" },
1535 { 0x040d8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC64" , "Packets Transmitted (64 Bytes) Count" },
1536 { 0x040dc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC127" , "Packets Transmitted (65-127 Bytes) Count" },
1537 { 0x040e0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC255" , "Packets Transmitted (128-255 Bytes) Count" },
1538 { 0x040e4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC511" , "Packets Transmitted (256-511 Bytes) Count" },
1539 { 0x040e8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC1023" , "Packets Transmitted (512-1023 Bytes) Count" },
1540 { 0x040ec, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC1522" , "Packets Transmitted (1024 Bytes or Greater) Count" },
1541 { 0x040f0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "MPTC" , "Multicast Packets Transmitted Count" },
1542 { 0x040f4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "BPTC" , "Broadcast Packets Transmitted Count" },
1543 { 0x040f8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TSCTC" , "TCP Segmentation Context Transmitted Count" },
1544 { 0x040fc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TSCTFC" , "TCP Segmentation Context Tx Fail Count" },
1545 { 0x05000, 0x00004, 0x000007FF, 0x000007FF, e1kRegReadDefault , e1kRegWriteDefault , "RXCSUM" , "Receive Checksum Control" },
1546 { 0x05800, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUC" , "Wakeup Control" },
1547 { 0x05808, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUFC" , "Wakeup Filter Control" },
1548 { 0x05810, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUS" , "Wakeup Status" },
1549 { 0x05820, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "MANC" , "Management Control" },
1550 { 0x05838, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "IPAV" , "IP Address Valid" },
1551 { 0x05900, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUPL" , "Wakeup Packet Length" },
1552 { 0x05200, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadMTA , e1kRegWriteMTA , "MTA" , "Multicast Table Array (n)" },
1553 { 0x05400, 0x00080, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadRA , e1kRegWriteRA , "RA" , "Receive Address (64-bit) (n)" },
1554 { 0x05600, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadVFTA , e1kRegWriteVFTA , "VFTA" , "VLAN Filter Table Array (n)" },
1555 { 0x05840, 0x0001c, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "IP4AT" , "IPv4 Address Table" },
1556 { 0x05880, 0x00010, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "IP6AT" , "IPv6 Address Table" },
1557 { 0x05a00, 0x00080, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUPM" , "Wakeup Packet Memory" },
1558 { 0x05f00, 0x0001c, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FFLT" , "Flexible Filter Length Table" },
1559 { 0x09000, 0x003fc, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FFMT" , "Flexible Filter Mask Table" },
1560 { 0x09800, 0x003fc, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FFVT" , "Flexible Filter Value Table" },
1561 { 0x10000, 0x10000, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "PBM" , "Packet Buffer Memory (n)" },
1562 { 0x00040, 0x00080, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadRA , e1kRegWriteRA , "RA82542" , "Receive Address (64-bit) (n) (82542)" },
1563 { 0x00200, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadMTA , e1kRegWriteMTA , "MTA82542", "Multicast Table Array (n) (82542)" },
1564 { 0x00600, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadVFTA , e1kRegWriteVFTA , "VFTA82542", "VLAN Filter Table Array (n) (82542)" }
1565};
1566
1567#ifdef LOG_ENABLED
1568
1569/**
1570 * Convert U32 value to hex string. Masked bytes are replaced with dots.
1571 *
1572 * @remarks The mask has half-byte byte (not bit) granularity (e.g. 0000000F).
1573 *
1574 * @returns The buffer.
1575 *
1576 * @param u32 The word to convert into string.
1577 * @param mask Selects which bytes to convert.
1578 * @param buf Where to put the result.
1579 */
1580static char *e1kU32toHex(uint32_t u32, uint32_t mask, char *buf)
1581{
1582 for (char *ptr = buf + 7; ptr >= buf; --ptr, u32 >>=4, mask >>=4)
1583 {
1584 if (mask & 0xF)
1585 *ptr = (u32 & 0xF) + ((u32 & 0xF) > 9 ? '7' : '0');
1586 else
1587 *ptr = '.';
1588 }
1589 buf[8] = 0;
1590 return buf;
1591}
1592
1593/**
1594 * Returns timer name for debug purposes.
1595 *
1596 * @returns The timer name.
1597 *
1598 * @param pThis The device state structure.
1599 * @param hTimer The timer to name.
1600 */
1601DECLINLINE(const char *) e1kGetTimerName(PE1KSTATE pThis, TMTIMERHANDLE hTimer)
1602{
1603 if (hTimer == pThis->hTIDTimer)
1604 return "TID";
1605 if (hTimer == pThis->hTADTimer)
1606 return "TAD";
1607 if (hTimer == pThis->hRIDTimer)
1608 return "RID";
1609 if (hTimer == pThis->hRADTimer)
1610 return "RAD";
1611 if (hTimer == pThis->hIntTimer)
1612 return "Int";
1613 if (hTimer == pThis->hTXDTimer)
1614 return "TXD";
1615 if (hTimer == pThis->hLUTimer)
1616 return "LinkUp";
1617 return "unknown";
1618}
1619
1620#endif /* LOG_ENABLED */
1621
1622/**
1623 * Arm a timer.
1624 *
1625 * @param pDevIns The device instance.
1626 * @param pThis Pointer to the device state structure.
1627 * @param hTimer The timer to arm.
1628 * @param uExpireIn Expiration interval in microseconds.
1629 */
1630DECLINLINE(void) e1kArmTimer(PPDMDEVINS pDevIns, PE1KSTATE pThis, TMTIMERHANDLE hTimer, uint32_t uExpireIn)
1631{
1632 if (pThis->fLocked)
1633 return;
1634
1635 E1kLog2(("%s Arming %s timer to fire in %d usec...\n",
1636 pThis->szPrf, e1kGetTimerName(pThis, hTimer), uExpireIn));
1637 int rc = PDMDevHlpTimerSetMicro(pDevIns, hTimer, uExpireIn);
1638 AssertRC(rc);
1639}
1640
1641#ifdef IN_RING3
1642/**
1643 * Cancel a timer.
1644 *
1645 * @param pDevIns The device instance.
1646 * @param pThis Pointer to the device state structure.
1647 * @param pTimer Pointer to the timer.
1648 */
1649DECLINLINE(void) e1kCancelTimer(PPDMDEVINS pDevIns, PE1KSTATE pThis, TMTIMERHANDLE hTimer)
1650{
1651 E1kLog2(("%s Stopping %s timer...\n",
1652 pThis->szPrf, e1kGetTimerName(pThis, hTimer)));
1653 int rc = PDMDevHlpTimerStop(pDevIns, hTimer);
1654 if (RT_FAILURE(rc))
1655 E1kLog2(("%s e1kCancelTimer: TMTimerStop(%s) failed with %Rrc\n",
1656 pThis->szPrf, e1kGetTimerName(pThis, hTimer), rc));
1657 RT_NOREF_PV(pThis);
1658}
1659#endif /* IN_RING3 */
1660
1661
1662#define e1kCsEnter(ps, rcBusy) PDMDevHlpCritSectEnter(pDevIns, &(ps)->cs, (rcBusy))
1663#define e1kCsEnterReturn(ps, rcBusy) do { \
1664 int const rcLock = PDMDevHlpCritSectEnter(pDevIns, &(ps)->cs, (rcBusy)); \
1665 if (rcLock == VINF_SUCCESS) { /* likely */ } \
1666 else return rcLock; \
1667 } while (0)
1668#define e1kR3CsEnterAsserted(ps) do { \
1669 int const rcLock = PDMDevHlpCritSectEnter(pDevIns, &(ps)->cs, VERR_SEM_BUSY); \
1670 PDM_CRITSECT_RELEASE_ASSERT_RC_DEV(pDevIns, &(ps)->cs, rcLock); \
1671 } while (0)
1672#define e1kCsLeave(ps) PDMDevHlpCritSectLeave(pDevIns, &(ps)->cs)
1673
1674
1675#define e1kCsRxEnter(ps, rcBusy) PDMDevHlpCritSectEnter(pDevIns, &(ps)->csRx, (rcBusy))
1676#define e1kCsRxEnterReturn(ps) do { \
1677 int const rcLock = PDMDevHlpCritSectEnter(pDevIns, &(ps)->csRx, VERR_SEM_BUSY); \
1678 AssertRCReturn(rcLock, rcLock); \
1679 } while (0)
1680#define e1kR3CsRxEnterAsserted(ps) do { \
1681 int const rcLock = PDMDevHlpCritSectEnter(pDevIns, &(ps)->csRx, VERR_SEM_BUSY); \
1682 PDM_CRITSECT_RELEASE_ASSERT_RC_DEV(pDevIns, &(ps)->csRx, rcLock); \
1683 } while (0)
1684#define e1kCsRxLeave(ps) PDMDevHlpCritSectLeave(pDevIns, &(ps)->csRx)
1685#define e1kCsRxIsOwner(ps) PDMDevHlpCritSectIsOwner(pDevIns, &(ps)->csRx)
1686
1687
1688#ifndef E1K_WITH_TX_CS
1689# define e1kCsTxEnter(ps, rcBusy) VINF_SUCCESS
1690# define e1kR3CsTxEnterAsserted(ps) do { } while (0)
1691# define e1kCsTxLeave(ps) do { } while (0)
1692#else /* E1K_WITH_TX_CS */
1693# define e1kCsTxEnter(ps, rcBusy) PDMDevHlpCritSectEnter(pDevIns, &(ps)->csTx, (rcBusy))
1694# define e1kR3CsTxEnterAsserted(ps) do { \
1695 int const rcLock = PDMDevHlpCritSectEnter(pDevIns, &(ps)->csTx, VERR_SEM_BUSY); \
1696 PDM_CRITSECT_RELEASE_ASSERT_RC_DEV(pDevIns, &(ps)->csTx, rcLock); \
1697 } while (0)
1698# define e1kCsTxLeave(ps) PDMDevHlpCritSectLeave(pDevIns, &(ps)->csTx)
1699# define e1kCsTxIsOwner(ps) PDMDevHlpCritSectIsOwner(pDevIns, &(ps)->csTx)
1700#endif /* E1K_WITH_TX_CS */
1701
1702
1703#ifdef E1K_WITH_TXD_CACHE
1704/*
1705 * Transmit Descriptor Register Context
1706 */
1707struct E1kTxDContext
1708{
1709 uint32_t tdlen;
1710 uint32_t tdh;
1711 uint32_t tdt;
1712};
1713typedef struct E1kTxDContext E1KTXDC, *PE1KTXDC;
1714
1715DECLINLINE(bool) e1kUpdateTxDContext(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KTXDC pContext)
1716{
1717 Assert(e1kCsTxIsOwner(pThis));
1718 if (!e1kCsTxIsOwner(pThis))
1719 {
1720 memset(pContext, 0, sizeof(E1KTXDC));
1721 return false;
1722 }
1723 pContext->tdlen = TDLEN;
1724 pContext->tdh = TDH;
1725 pContext->tdt = TDT;
1726 uint32_t cTxRingSize = pContext->tdlen / sizeof(E1KTXDESC);
1727#ifdef DEBUG
1728 if (pContext->tdh >= cTxRingSize)
1729 {
1730 Log(("%s e1kUpdateTxDContext: will return false because TDH too big (%u >= %u)\n",
1731 pThis->szPrf, pContext->tdh, cTxRingSize));
1732 return VINF_SUCCESS;
1733 }
1734 if (pContext->tdt >= cTxRingSize)
1735 {
1736 Log(("%s e1kUpdateTxDContext: will return false because TDT too big (%u >= %u)\n",
1737 pThis->szPrf, pContext->tdt, cTxRingSize));
1738 return VINF_SUCCESS;
1739 }
1740#endif /* DEBUG */
1741 return pContext->tdh < cTxRingSize && pContext->tdt < cTxRingSize;
1742}
1743#endif /* E1K_WITH_TXD_CACHE */
1744#ifdef E1K_WITH_RXD_CACHE
1745/*
1746 * Receive Descriptor Register Context
1747 */
1748struct E1kRxDContext
1749{
1750 uint32_t rdlen;
1751 uint32_t rdh;
1752 uint32_t rdt;
1753};
1754typedef struct E1kRxDContext E1KRXDC, *PE1KRXDC;
1755
1756DECLINLINE(bool) e1kUpdateRxDContext(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KRXDC pContext, const char *pcszCallee)
1757{
1758 Assert(e1kCsRxIsOwner(pThis));
1759 if (!e1kCsRxIsOwner(pThis))
1760 return false;
1761 pContext->rdlen = RDLEN;
1762 pContext->rdh = RDH;
1763 pContext->rdt = RDT;
1764 uint32_t cRxRingSize = pContext->rdlen / sizeof(E1KRXDESC);
1765 /*
1766 * Note that the checks for RDT are a bit different. Some guests, OS/2 for
1767 * example, intend to use all descriptors in RX ring, so they point RDT
1768 * right beyond the last descriptor in the ring. While this is not
1769 * acceptable for other registers, it works out fine for RDT.
1770 */
1771#ifdef DEBUG
1772 if (pContext->rdh >= cRxRingSize)
1773 {
1774 Log(("%s e1kUpdateRxDContext: called from %s, will return false because RDH too big (%u >= %u)\n",
1775 pThis->szPrf, pcszCallee, pContext->rdh, cRxRingSize));
1776 return VINF_SUCCESS;
1777 }
1778 if (pContext->rdt > cRxRingSize)
1779 {
1780 Log(("%s e1kUpdateRxDContext: called from %s, will return false because RDT too big (%u > %u)\n",
1781 pThis->szPrf, pcszCallee, pContext->rdt, cRxRingSize));
1782 return VINF_SUCCESS;
1783 }
1784#else /* !DEBUG */
1785 RT_NOREF(pcszCallee);
1786#endif /* !DEBUG */
1787 return pContext->rdh < cRxRingSize && pContext->rdt <= cRxRingSize; // && (RCTL & RCTL_EN);
1788}
1789#endif /* E1K_WITH_RXD_CACHE */
1790
1791/**
1792 * Wakeup the RX thread.
1793 */
1794static void e1kWakeupReceive(PPDMDEVINS pDevIns, PE1KSTATE pThis)
1795{
1796 if ( pThis->fMaybeOutOfSpace
1797 && pThis->hEventMoreRxDescAvail != NIL_SUPSEMEVENT)
1798 {
1799 STAM_COUNTER_INC(&pThis->CTX_SUFF_Z(StatRxOverflowWakeup));
1800 E1kLog(("%s Waking up Out-of-RX-space semaphore\n", pThis->szPrf));
1801 int rc = PDMDevHlpSUPSemEventSignal(pDevIns, pThis->hEventMoreRxDescAvail);
1802 AssertRC(rc);
1803 }
1804}
1805
1806#ifdef IN_RING3
1807
1808/**
1809 * Hardware reset. Revert all registers to initial values.
1810 *
1811 * @param pDevIns The device instance.
1812 * @param pThis The device state structure.
1813 * @param pThisCC The current context instance data.
1814 */
1815static void e1kR3HardReset(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KSTATECC pThisCC)
1816{
1817 E1kLog(("%s Hard reset triggered\n", pThis->szPrf));
1818 /* No interrupts should survive device reset, see @bugref(9556). */
1819 if (pThis->fIntRaised)
1820 {
1821 /* Lower(0) INTA(0) */
1822 PDMDevHlpPCISetIrq(pDevIns, 0, 0);
1823 pThis->fIntRaised = false;
1824 E1kLog(("%s e1kR3HardReset: Lowered IRQ: ICR=%08x\n", pThis->szPrf, ICR));
1825 }
1826 memset(pThis->auRegs, 0, sizeof(pThis->auRegs));
1827 memset(pThis->aRecAddr.au32, 0, sizeof(pThis->aRecAddr.au32));
1828# ifdef E1K_INIT_RA0
1829 memcpy(pThis->aRecAddr.au32, pThis->macConfigured.au8,
1830 sizeof(pThis->macConfigured.au8));
1831 pThis->aRecAddr.array[0].ctl |= RA_CTL_AV;
1832# endif /* E1K_INIT_RA0 */
1833 STATUS = 0x0081; /* SPEED=10b (1000 Mb/s), FD=1b (Full Duplex) */
1834 EECD = 0x0100; /* EE_PRES=1b (EEPROM present) */
1835 CTRL = 0x0a09; /* FRCSPD=1b SPEED=10b LRST=1b FD=1b */
1836 TSPMT = 0x01000400;/* TSMT=0400h TSPBP=0100h */
1837 Assert(GET_BITS(RCTL, BSIZE) == 0);
1838 pThis->u16RxBSize = 2048;
1839
1840 uint16_t u16LedCtl = 0x0602; /* LED0/LINK_UP#, LED2/LINK100# */
1841 pThisCC->eeprom.readWord(0x2F, &u16LedCtl); /* Read LEDCTL defaults from EEPROM */
1842 LEDCTL = 0x07008300 | (((uint32_t)u16LedCtl & 0xCF00) << 8) | (u16LedCtl & 0xCF); /* Only LED0 and LED2 defaults come from EEPROM */
1843
1844 /* Reset promiscuous mode */
1845 if (pThisCC->pDrvR3)
1846 pThisCC->pDrvR3->pfnSetPromiscuousMode(pThisCC->pDrvR3, false);
1847
1848# ifdef E1K_WITH_TXD_CACHE
1849 e1kR3CsTxEnterAsserted(pThis);
1850 pThis->nTxDFetched = 0;
1851 pThis->iTxDCurrent = 0;
1852 pThis->fGSO = false;
1853 pThis->cbTxAlloc = 0;
1854 e1kCsTxLeave(pThis);
1855# endif /* E1K_WITH_TXD_CACHE */
1856# ifdef E1K_WITH_RXD_CACHE
1857 e1kR3CsRxEnterAsserted(pThis);
1858 pThis->iRxDCurrent = pThis->nRxDFetched = 0;
1859 e1kCsRxLeave(pThis);
1860# endif /* E1K_WITH_RXD_CACHE */
1861# ifdef E1K_LSC_ON_RESET
1862 E1kLog(("%s Will trigger LSC in %d seconds...\n",
1863 pThis->szPrf, pThis->cMsLinkUpDelay / 1000));
1864 e1kArmTimer(pDevIns, pThis, pThis->hLUTimer, pThis->cMsLinkUpDelay * 1000);
1865# endif /* E1K_LSC_ON_RESET */
1866}
1867
1868#endif /* IN_RING3 */
1869
1870/**
1871 * Compute Internet checksum.
1872 *
1873 * @remarks Refer to http://www.netfor2.com/checksum.html for short intro.
1874 *
1875 * @param pThis The device state structure.
1876 * @param cpPacket The packet.
1877 * @param cb The size of the packet.
1878 * @param pszText A string denoting direction of packet transfer.
1879 *
1880 * @return The 1's complement of the 1's complement sum.
1881 *
1882 * @thread E1000_TX
1883 */
1884static uint16_t e1kCSum16(const void *pvBuf, size_t cb)
1885{
1886 uint32_t csum = 0;
1887 uint16_t *pu16 = (uint16_t *)pvBuf;
1888
1889 while (cb > 1)
1890 {
1891 csum += *pu16++;
1892 cb -= 2;
1893 }
1894 if (cb)
1895 csum += *(uint8_t*)pu16;
1896 while (csum >> 16)
1897 csum = (csum >> 16) + (csum & 0xFFFF);
1898 Assert(csum < 65536);
1899 return (uint16_t)~csum;
1900}
1901
1902/**
1903 * Dump a packet to debug log.
1904 *
1905 * @param pDevIns The device instance.
1906 * @param pThis The device state structure.
1907 * @param cpPacket The packet.
1908 * @param cb The size of the packet.
1909 * @param pszText A string denoting direction of packet transfer.
1910 * @thread E1000_TX
1911 */
1912DECLINLINE(void) e1kPacketDump(PPDMDEVINS pDevIns, PE1KSTATE pThis, const uint8_t *cpPacket, size_t cb, const char *pszText)
1913{
1914#ifdef DEBUG
1915 if (RT_LIKELY(e1kCsEnter(pThis, VERR_SEM_BUSY) == VINF_SUCCESS))
1916 {
1917 Log4(("%s --- %s packet #%d: %RTmac => %RTmac (%d bytes) ---\n",
1918 pThis->szPrf, pszText, ++pThis->u32PktNo, cpPacket+6, cpPacket, cb));
1919 if (ntohs(*(uint16_t*)(cpPacket+12)) == 0x86DD)
1920 {
1921 Log4(("%s --- IPv6: %RTnaipv6 => %RTnaipv6\n",
1922 pThis->szPrf, cpPacket+14+8, cpPacket+14+24));
1923 if (*(cpPacket+14+6) == 0x6)
1924 Log4(("%s --- TCP: seq=%x ack=%x\n", pThis->szPrf,
1925 ntohl(*(uint32_t*)(cpPacket+14+40+4)), ntohl(*(uint32_t*)(cpPacket+14+40+8))));
1926 }
1927 else if (ntohs(*(uint16_t*)(cpPacket+12)) == 0x800)
1928 {
1929 Log4(("%s --- IPv4: %RTnaipv4 => %RTnaipv4\n",
1930 pThis->szPrf, *(uint32_t*)(cpPacket+14+12), *(uint32_t*)(cpPacket+14+16)));
1931 if (*(cpPacket+14+6) == 0x6)
1932 Log4(("%s --- TCP: seq=%x ack=%x\n", pThis->szPrf,
1933 ntohl(*(uint32_t*)(cpPacket+14+20+4)), ntohl(*(uint32_t*)(cpPacket+14+20+8))));
1934 }
1935 E1kLog3(("%.*Rhxd\n", cb, cpPacket));
1936 e1kCsLeave(pThis);
1937 }
1938#else
1939 if (RT_LIKELY(e1kCsEnter(pThis, VERR_SEM_BUSY) == VINF_SUCCESS))
1940 {
1941 if (ntohs(*(uint16_t*)(cpPacket+12)) == 0x86DD)
1942 E1kLogRel(("E1000: %s packet #%d, %RTmac => %RTmac, %RTnaipv6 => %RTnaipv6, seq=%x ack=%x\n",
1943 pszText, ++pThis->u32PktNo, cpPacket+6, cpPacket, cpPacket+14+8, cpPacket+14+24,
1944 ntohl(*(uint32_t*)(cpPacket+14+40+4)), ntohl(*(uint32_t*)(cpPacket+14+40+8))));
1945 else
1946 E1kLogRel(("E1000: %s packet #%d, %RTmac => %RTmac, %RTnaipv4 => %RTnaipv4, seq=%x ack=%x\n",
1947 pszText, ++pThis->u32PktNo, cpPacket+6, cpPacket,
1948 *(uint32_t*)(cpPacket+14+12), *(uint32_t*)(cpPacket+14+16),
1949 ntohl(*(uint32_t*)(cpPacket+14+20+4)), ntohl(*(uint32_t*)(cpPacket+14+20+8))));
1950 e1kCsLeave(pThis);
1951 }
1952 RT_NOREF2(cb, pszText);
1953#endif
1954}
1955
1956/**
1957 * Determine the type of transmit descriptor.
1958 *
1959 * @returns Descriptor type. See E1K_DTYP_XXX defines.
1960 *
1961 * @param pDesc Pointer to descriptor union.
1962 * @thread E1000_TX
1963 */
1964DECLINLINE(int) e1kGetDescType(E1KTXDESC *pDesc)
1965{
1966 if (pDesc->legacy.cmd.fDEXT)
1967 return pDesc->context.dw2.u4DTYP;
1968 return E1K_DTYP_LEGACY;
1969}
1970
1971
1972#ifdef E1K_WITH_RXD_CACHE
1973/**
1974 * Return the number of RX descriptor that belong to the hardware.
1975 *
1976 * @returns the number of available descriptors in RX ring.
1977 * @param pRxdc The receive descriptor register context.
1978 * @thread ???
1979 */
1980DECLINLINE(uint32_t) e1kGetRxLen(PE1KRXDC pRxdc)
1981{
1982 /**
1983 * Make sure RDT won't change during computation. EMT may modify RDT at
1984 * any moment.
1985 */
1986 uint32_t rdt = pRxdc->rdt;
1987 return (pRxdc->rdh > rdt ? pRxdc->rdlen/sizeof(E1KRXDESC) : 0) + rdt - pRxdc->rdh;
1988}
1989
1990DECLINLINE(unsigned) e1kRxDInCache(PE1KSTATE pThis)
1991{
1992 return pThis->nRxDFetched > pThis->iRxDCurrent ?
1993 pThis->nRxDFetched - pThis->iRxDCurrent : 0;
1994}
1995
1996DECLINLINE(unsigned) e1kRxDIsCacheEmpty(PE1KSTATE pThis)
1997{
1998 return pThis->iRxDCurrent >= pThis->nRxDFetched;
1999}
2000
2001/**
2002 * Load receive descriptors from guest memory. The caller needs to be in Rx
2003 * critical section.
2004 *
2005 * We need two physical reads in case the tail wrapped around the end of RX
2006 * descriptor ring.
2007 *
2008 * @returns the actual number of descriptors fetched.
2009 * @param pDevIns The device instance.
2010 * @param pThis The device state structure.
2011 * @thread EMT, RX
2012 */
2013DECLINLINE(unsigned) e1kRxDPrefetch(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KRXDC pRxdc)
2014{
2015 E1kLog3(("%s e1kRxDPrefetch: RDH=%x RDT=%x RDLEN=%x "
2016 "iRxDCurrent=%x nRxDFetched=%x\n",
2017 pThis->szPrf, pRxdc->rdh, pRxdc->rdt, pRxdc->rdlen, pThis->iRxDCurrent, pThis->nRxDFetched));
2018 /* We've already loaded pThis->nRxDFetched descriptors past RDH. */
2019 unsigned nDescsAvailable = e1kGetRxLen(pRxdc) - e1kRxDInCache(pThis);
2020 unsigned nDescsToFetch = RT_MIN(nDescsAvailable, E1K_RXD_CACHE_SIZE - pThis->nRxDFetched);
2021 unsigned nDescsTotal = pRxdc->rdlen / sizeof(E1KRXDESC);
2022 Assert(nDescsTotal != 0);
2023 if (nDescsTotal == 0)
2024 return 0;
2025 unsigned nFirstNotLoaded = (pRxdc->rdh + e1kRxDInCache(pThis)) % nDescsTotal;
2026 unsigned nDescsInSingleRead = RT_MIN(nDescsToFetch, nDescsTotal - nFirstNotLoaded);
2027 E1kLog3(("%s e1kRxDPrefetch: nDescsAvailable=%u nDescsToFetch=%u "
2028 "nDescsTotal=%u nFirstNotLoaded=0x%x nDescsInSingleRead=%u\n",
2029 pThis->szPrf, nDescsAvailable, nDescsToFetch, nDescsTotal,
2030 nFirstNotLoaded, nDescsInSingleRead));
2031 if (nDescsToFetch == 0)
2032 return 0;
2033 E1KRXDESC* pFirstEmptyDesc = &pThis->aRxDescriptors[pThis->nRxDFetched];
2034 PDMDevHlpPCIPhysRead(pDevIns,
2035 ((uint64_t)RDBAH << 32) + RDBAL + nFirstNotLoaded * sizeof(E1KRXDESC),
2036 pFirstEmptyDesc, nDescsInSingleRead * sizeof(E1KRXDESC));
2037 // uint64_t addrBase = ((uint64_t)RDBAH << 32) + RDBAL;
2038 // unsigned i, j;
2039 // for (i = pThis->nRxDFetched; i < pThis->nRxDFetched + nDescsInSingleRead; ++i)
2040 // {
2041 // pThis->aRxDescAddr[i] = addrBase + (nFirstNotLoaded + i - pThis->nRxDFetched) * sizeof(E1KRXDESC);
2042 // E1kLog3(("%s aRxDescAddr[%d] = %p\n", pThis->szPrf, i, pThis->aRxDescAddr[i]));
2043 // }
2044 E1kLog3(("%s Fetched %u RX descriptors at %08x%08x(0x%x), RDLEN=%08x, RDH=%08x, RDT=%08x\n",
2045 pThis->szPrf, nDescsInSingleRead,
2046 RDBAH, RDBAL + pRxdc->rdh * sizeof(E1KRXDESC),
2047 nFirstNotLoaded, pRxdc->rdlen, pRxdc->rdh, pRxdc->rdt));
2048 if (nDescsToFetch > nDescsInSingleRead)
2049 {
2050 PDMDevHlpPCIPhysRead(pDevIns,
2051 ((uint64_t)RDBAH << 32) + RDBAL,
2052 pFirstEmptyDesc + nDescsInSingleRead,
2053 (nDescsToFetch - nDescsInSingleRead) * sizeof(E1KRXDESC));
2054 // Assert(i == pThis->nRxDFetched + nDescsInSingleRead);
2055 // for (j = 0; i < pThis->nRxDFetched + nDescsToFetch; ++i, ++j)
2056 // {
2057 // pThis->aRxDescAddr[i] = addrBase + j * sizeof(E1KRXDESC);
2058 // E1kLog3(("%s aRxDescAddr[%d] = %p\n", pThis->szPrf, i, pThis->aRxDescAddr[i]));
2059 // }
2060 E1kLog3(("%s Fetched %u RX descriptors at %08x%08x\n",
2061 pThis->szPrf, nDescsToFetch - nDescsInSingleRead,
2062 RDBAH, RDBAL));
2063 }
2064 pThis->nRxDFetched += nDescsToFetch;
2065 return nDescsToFetch;
2066}
2067
2068# ifdef IN_RING3 /* currently only used in ring-3 due to stack space requirements of the caller */
2069/**
2070 * Dump receive descriptor to debug log.
2071 *
2072 * @param pThis The device state structure.
2073 * @param pDesc Pointer to the descriptor.
2074 * @thread E1000_RX
2075 */
2076static void e1kPrintRDesc(PE1KSTATE pThis, E1KRXDESC *pDesc)
2077{
2078 RT_NOREF2(pThis, pDesc);
2079 E1kLog2(("%s <-- Receive Descriptor (%d bytes):\n", pThis->szPrf, pDesc->u16Length));
2080 E1kLog2((" Address=%16LX Length=%04X Csum=%04X\n",
2081 pDesc->u64BufAddr, pDesc->u16Length, pDesc->u16Checksum));
2082 E1kLog2((" STA: %s %s %s %s %s %s %s ERR: %s %s %s %s SPECIAL: %s VLAN=%03x PRI=%x\n",
2083 pDesc->status.fPIF ? "PIF" : "pif",
2084 pDesc->status.fIPCS ? "IPCS" : "ipcs",
2085 pDesc->status.fTCPCS ? "TCPCS" : "tcpcs",
2086 pDesc->status.fVP ? "VP" : "vp",
2087 pDesc->status.fIXSM ? "IXSM" : "ixsm",
2088 pDesc->status.fEOP ? "EOP" : "eop",
2089 pDesc->status.fDD ? "DD" : "dd",
2090 pDesc->status.fRXE ? "RXE" : "rxe",
2091 pDesc->status.fIPE ? "IPE" : "ipe",
2092 pDesc->status.fTCPE ? "TCPE" : "tcpe",
2093 pDesc->status.fCE ? "CE" : "ce",
2094 E1K_SPEC_CFI(pDesc->status.u16Special) ? "CFI" :"cfi",
2095 E1K_SPEC_VLAN(pDesc->status.u16Special),
2096 E1K_SPEC_PRI(pDesc->status.u16Special)));
2097}
2098# endif /* IN_RING3 */
2099#endif /* E1K_WITH_RXD_CACHE */
2100
2101/**
2102 * Dump transmit descriptor to debug log.
2103 *
2104 * @param pThis The device state structure.
2105 * @param pDesc Pointer to descriptor union.
2106 * @param pszDir A string denoting direction of descriptor transfer
2107 * @thread E1000_TX
2108 */
2109static void e1kPrintTDesc(PE1KSTATE pThis, E1KTXDESC *pDesc, const char *pszDir,
2110 unsigned uLevel = RTLOGGRPFLAGS_LEVEL_2)
2111{
2112 RT_NOREF4(pThis, pDesc, pszDir, uLevel);
2113
2114 /*
2115 * Unfortunately we cannot use our format handler here, we want R0 logging
2116 * as well.
2117 */
2118 switch (e1kGetDescType(pDesc))
2119 {
2120 case E1K_DTYP_CONTEXT:
2121 E1kLogX(uLevel, ("%s %s Context Transmit Descriptor %s\n",
2122 pThis->szPrf, pszDir, pszDir));
2123 E1kLogX(uLevel, (" IPCSS=%02X IPCSO=%02X IPCSE=%04X TUCSS=%02X TUCSO=%02X TUCSE=%04X\n",
2124 pDesc->context.ip.u8CSS, pDesc->context.ip.u8CSO, pDesc->context.ip.u16CSE,
2125 pDesc->context.tu.u8CSS, pDesc->context.tu.u8CSO, pDesc->context.tu.u16CSE));
2126 E1kLogX(uLevel, (" TUCMD:%s%s%s %s %s PAYLEN=%04x HDRLEN=%04x MSS=%04x STA: %s\n",
2127 pDesc->context.dw2.fIDE ? " IDE":"",
2128 pDesc->context.dw2.fRS ? " RS" :"",
2129 pDesc->context.dw2.fTSE ? " TSE":"",
2130 pDesc->context.dw2.fIP ? "IPv4":"IPv6",
2131 pDesc->context.dw2.fTCP ? "TCP":"UDP",
2132 pDesc->context.dw2.u20PAYLEN,
2133 pDesc->context.dw3.u8HDRLEN,
2134 pDesc->context.dw3.u16MSS,
2135 pDesc->context.dw3.fDD?"DD":""));
2136 break;
2137 case E1K_DTYP_DATA:
2138 E1kLogX(uLevel, ("%s %s Data Transmit Descriptor (%d bytes) %s\n",
2139 pThis->szPrf, pszDir, pDesc->data.cmd.u20DTALEN, pszDir));
2140 E1kLogX(uLevel, (" Address=%16LX DTALEN=%05X\n",
2141 pDesc->data.u64BufAddr,
2142 pDesc->data.cmd.u20DTALEN));
2143 E1kLogX(uLevel, (" DCMD:%s%s%s%s%s%s%s STA:%s%s%s POPTS:%s%s SPECIAL:%s VLAN=%03x PRI=%x\n",
2144 pDesc->data.cmd.fIDE ? " IDE" :"",
2145 pDesc->data.cmd.fVLE ? " VLE" :"",
2146 pDesc->data.cmd.fRPS ? " RPS" :"",
2147 pDesc->data.cmd.fRS ? " RS" :"",
2148 pDesc->data.cmd.fTSE ? " TSE" :"",
2149 pDesc->data.cmd.fIFCS? " IFCS":"",
2150 pDesc->data.cmd.fEOP ? " EOP" :"",
2151 pDesc->data.dw3.fDD ? " DD" :"",
2152 pDesc->data.dw3.fEC ? " EC" :"",
2153 pDesc->data.dw3.fLC ? " LC" :"",
2154 pDesc->data.dw3.fTXSM? " TXSM":"",
2155 pDesc->data.dw3.fIXSM? " IXSM":"",
2156 E1K_SPEC_CFI(pDesc->data.dw3.u16Special) ? "CFI" :"cfi",
2157 E1K_SPEC_VLAN(pDesc->data.dw3.u16Special),
2158 E1K_SPEC_PRI(pDesc->data.dw3.u16Special)));
2159 break;
2160 case E1K_DTYP_LEGACY:
2161 E1kLogX(uLevel, ("%s %s Legacy Transmit Descriptor (%d bytes) %s\n",
2162 pThis->szPrf, pszDir, pDesc->legacy.cmd.u16Length, pszDir));
2163 E1kLogX(uLevel, (" Address=%16LX DTALEN=%05X\n",
2164 pDesc->data.u64BufAddr,
2165 pDesc->legacy.cmd.u16Length));
2166 E1kLogX(uLevel, (" CMD:%s%s%s%s%s%s%s STA:%s%s%s CSO=%02x CSS=%02x SPECIAL:%s VLAN=%03x PRI=%x\n",
2167 pDesc->legacy.cmd.fIDE ? " IDE" :"",
2168 pDesc->legacy.cmd.fVLE ? " VLE" :"",
2169 pDesc->legacy.cmd.fRPS ? " RPS" :"",
2170 pDesc->legacy.cmd.fRS ? " RS" :"",
2171 pDesc->legacy.cmd.fIC ? " IC" :"",
2172 pDesc->legacy.cmd.fIFCS? " IFCS":"",
2173 pDesc->legacy.cmd.fEOP ? " EOP" :"",
2174 pDesc->legacy.dw3.fDD ? " DD" :"",
2175 pDesc->legacy.dw3.fEC ? " EC" :"",
2176 pDesc->legacy.dw3.fLC ? " LC" :"",
2177 pDesc->legacy.cmd.u8CSO,
2178 pDesc->legacy.dw3.u8CSS,
2179 E1K_SPEC_CFI(pDesc->legacy.dw3.u16Special) ? "CFI" :"cfi",
2180 E1K_SPEC_VLAN(pDesc->legacy.dw3.u16Special),
2181 E1K_SPEC_PRI(pDesc->legacy.dw3.u16Special)));
2182 break;
2183 default:
2184 E1kLog(("%s %s Invalid Transmit Descriptor %s\n",
2185 pThis->szPrf, pszDir, pszDir));
2186 break;
2187 }
2188}
2189
2190/**
2191 * Raise an interrupt later.
2192 *
2193 * @param pThis The device state structure.
2194 */
2195DECLINLINE(void) e1kPostponeInterrupt(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint64_t nsDeadline)
2196{
2197 if (!PDMDevHlpTimerIsActive(pDevIns, pThis->hIntTimer))
2198 PDMDevHlpTimerSetNano(pDevIns, pThis->hIntTimer, nsDeadline);
2199}
2200
2201/**
2202 * Raise interrupt if not masked.
2203 *
2204 * @param pThis The device state structure.
2205 */
2206static int e1kRaiseInterrupt(PPDMDEVINS pDevIns, PE1KSTATE pThis, int rcBusy, uint32_t u32IntCause)
2207{
2208 /* Do NOT use e1kCsEnterReturn here as most callers doesn't check the
2209 status code. They'll pass a negative rcBusy. */
2210 int rc = e1kCsEnter(pThis, rcBusy);
2211 if (RT_LIKELY(rc == VINF_SUCCESS))
2212 { /* likely */ }
2213 else
2214 {
2215 PDM_CRITSECT_RELEASE_ASSERT_RC_DEV(pDevIns, &pThis->cs, rc);
2216 return rc;
2217 }
2218
2219 E1K_INC_ISTAT_CNT(pThis->uStatIntTry);
2220 ICR |= u32IntCause;
2221 if (ICR & IMS)
2222 {
2223 if (pThis->fIntRaised)
2224 {
2225 E1K_INC_ISTAT_CNT(pThis->uStatIntSkip);
2226 E1kLog2(("%s e1kRaiseInterrupt: Already raised, skipped. ICR&IMS=%08x\n",
2227 pThis->szPrf, ICR & IMS));
2228 }
2229 else
2230 {
2231 uint64_t tsNow = PDMDevHlpTimerGet(pDevIns, pThis->hIntTimer);
2232 if (!!ITR && tsNow - pThis->u64AckedAt < ITR * 256
2233 && pThis->fItrEnabled && (pThis->fItrRxEnabled || !(ICR & ICR_RXT0)))
2234 {
2235 E1K_INC_ISTAT_CNT(pThis->uStatIntEarly);
2236 E1kLog2(("%s e1kRaiseInterrupt: Too early to raise again: %d ns < %d ns.\n",
2237 pThis->szPrf, (uint32_t)(tsNow - pThis->u64AckedAt), ITR * 256));
2238 e1kPostponeInterrupt(pDevIns, pThis, ITR * 256);
2239 }
2240 else
2241 {
2242
2243 /* Since we are delivering the interrupt now
2244 * there is no need to do it later -- stop the timer.
2245 */
2246 PDMDevHlpTimerStop(pDevIns, pThis->hIntTimer);
2247 E1K_INC_ISTAT_CNT(pThis->uStatInt);
2248 STAM_COUNTER_INC(&pThis->StatIntsRaised);
2249 /* Got at least one unmasked interrupt cause */
2250 pThis->fIntRaised = true;
2251 /* Raise(1) INTA(0) */
2252 E1kLogRel(("E1000: irq RAISED icr&mask=0x%x, icr=0x%x\n", ICR & IMS, ICR));
2253 PDMDevHlpPCISetIrq(pDevIns, 0, 1);
2254 E1kLog(("%s e1kRaiseInterrupt: Raised. ICR&IMS=%08x\n",
2255 pThis->szPrf, ICR & IMS));
2256 }
2257 }
2258 }
2259 else
2260 {
2261 E1K_INC_ISTAT_CNT(pThis->uStatIntMasked);
2262 E1kLog2(("%s e1kRaiseInterrupt: Not raising, ICR=%08x, IMS=%08x\n",
2263 pThis->szPrf, ICR, IMS));
2264 }
2265 e1kCsLeave(pThis);
2266 return VINF_SUCCESS;
2267}
2268
2269/**
2270 * Compute the physical address of the descriptor.
2271 *
2272 * @returns the physical address of the descriptor.
2273 *
2274 * @param baseHigh High-order 32 bits of descriptor table address.
2275 * @param baseLow Low-order 32 bits of descriptor table address.
2276 * @param idxDesc The descriptor index in the table.
2277 */
2278DECLINLINE(RTGCPHYS) e1kDescAddr(uint32_t baseHigh, uint32_t baseLow, uint32_t idxDesc)
2279{
2280 AssertCompile(sizeof(E1KRXDESC) == sizeof(E1KTXDESC));
2281 return ((uint64_t)baseHigh << 32) + baseLow + idxDesc * sizeof(E1KRXDESC);
2282}
2283
2284#ifdef IN_RING3 /* currently only used in ring-3 due to stack space requirements of the caller */
2285/**
2286 * Advance the head pointer of the receive descriptor queue.
2287 *
2288 * @remarks RDH always points to the next available RX descriptor.
2289 *
2290 * @param pDevIns The device instance.
2291 * @param pThis The device state structure.
2292 */
2293DECLINLINE(void) e1kAdvanceRDH(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KRXDC pRxdc)
2294{
2295 Assert(e1kCsRxIsOwner(pThis));
2296 //e1kR3CsEnterAsserted(pThis);
2297 if (++pRxdc->rdh * sizeof(E1KRXDESC) >= pRxdc->rdlen)
2298 pRxdc->rdh = 0;
2299 RDH = pRxdc->rdh; /* Sync the actual register and RXDC */
2300#ifdef E1K_WITH_RXD_CACHE
2301 /*
2302 * We need to fetch descriptors now as the guest may advance RDT all the way
2303 * to RDH as soon as we generate RXDMT0 interrupt. This is mostly to provide
2304 * compatibility with Phar Lap ETS, see @bugref(7346). Note that we do not
2305 * check if the receiver is enabled. It must be, otherwise we won't get here
2306 * in the first place.
2307 *
2308 * Note that we should have moved both RDH and iRxDCurrent by now.
2309 */
2310 if (e1kRxDIsCacheEmpty(pThis))
2311 {
2312 /* Cache is empty, reset it and check if we can fetch more. */
2313 pThis->iRxDCurrent = pThis->nRxDFetched = 0;
2314 E1kLog3(("%s e1kAdvanceRDH: Rx cache is empty, RDH=%x RDT=%x "
2315 "iRxDCurrent=%x nRxDFetched=%x\n",
2316 pThis->szPrf, pRxdc->rdh, pRxdc->rdt, pThis->iRxDCurrent, pThis->nRxDFetched));
2317 e1kRxDPrefetch(pDevIns, pThis, pRxdc);
2318 }
2319#endif /* E1K_WITH_RXD_CACHE */
2320 /*
2321 * Compute current receive queue length and fire RXDMT0 interrupt
2322 * if we are low on receive buffers
2323 */
2324 uint32_t uRQueueLen = pRxdc->rdh>pRxdc->rdt ? pRxdc->rdlen/sizeof(E1KRXDESC)-pRxdc->rdh+pRxdc->rdt : pRxdc->rdt-pRxdc->rdh;
2325 /*
2326 * The minimum threshold is controlled by RDMTS bits of RCTL:
2327 * 00 = 1/2 of RDLEN
2328 * 01 = 1/4 of RDLEN
2329 * 10 = 1/8 of RDLEN
2330 * 11 = reserved
2331 */
2332 uint32_t uMinRQThreshold = pRxdc->rdlen / sizeof(E1KRXDESC) / (2 << GET_BITS(RCTL, RDMTS));
2333 if (uRQueueLen <= uMinRQThreshold)
2334 {
2335 E1kLogRel(("E1000: low on RX descriptors, RDH=%x RDT=%x len=%x threshold=%x\n", pRxdc->rdh, pRxdc->rdt, uRQueueLen, uMinRQThreshold));
2336 E1kLog2(("%s Low on RX descriptors, RDH=%x RDT=%x len=%x threshold=%x, raise an interrupt\n",
2337 pThis->szPrf, pRxdc->rdh, pRxdc->rdt, uRQueueLen, uMinRQThreshold));
2338 E1K_INC_ISTAT_CNT(pThis->uStatIntRXDMT0);
2339 e1kRaiseInterrupt(pDevIns, pThis, VERR_SEM_BUSY, ICR_RXDMT0);
2340 }
2341 E1kLog2(("%s e1kAdvanceRDH: at exit RDH=%x RDT=%x len=%x\n",
2342 pThis->szPrf, pRxdc->rdh, pRxdc->rdt, uRQueueLen));
2343 //e1kCsLeave(pThis);
2344}
2345#endif /* IN_RING3 */
2346
2347#ifdef E1K_WITH_RXD_CACHE
2348
2349# ifdef IN_RING3 /* currently only used in ring-3 due to stack space requirements of the caller */
2350
2351/**
2352 * Obtain the next RX descriptor from RXD cache, fetching descriptors from the
2353 * RX ring if the cache is empty.
2354 *
2355 * Note that we cannot advance the cache pointer (iRxDCurrent) yet as it will
2356 * go out of sync with RDH which will cause trouble when EMT checks if the
2357 * cache is empty to do pre-fetch @bugref(6217).
2358 *
2359 * @param pDevIns The device instance.
2360 * @param pThis The device state structure.
2361 * @thread RX
2362 */
2363DECLINLINE(E1KRXDESC *) e1kRxDGet(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KRXDC pRxdc)
2364{
2365 Assert(e1kCsRxIsOwner(pThis));
2366 /* Check the cache first. */
2367 if (pThis->iRxDCurrent < pThis->nRxDFetched)
2368 return &pThis->aRxDescriptors[pThis->iRxDCurrent];
2369 /* Cache is empty, reset it and check if we can fetch more. */
2370 pThis->iRxDCurrent = pThis->nRxDFetched = 0;
2371 if (e1kRxDPrefetch(pDevIns, pThis, pRxdc))
2372 return &pThis->aRxDescriptors[pThis->iRxDCurrent];
2373 /* Out of Rx descriptors. */
2374 return NULL;
2375}
2376
2377
2378/**
2379 * Return the RX descriptor obtained with e1kRxDGet() and advance the cache
2380 * pointer. The descriptor gets written back to the RXD ring.
2381 *
2382 * @param pDevIns The device instance.
2383 * @param pThis The device state structure.
2384 * @param pDesc The descriptor being "returned" to the RX ring.
2385 * @thread RX
2386 */
2387DECLINLINE(void) e1kRxDPut(PPDMDEVINS pDevIns, PE1KSTATE pThis, E1KRXDESC* pDesc, PE1KRXDC pRxdc)
2388{
2389 Assert(e1kCsRxIsOwner(pThis));
2390 pThis->iRxDCurrent++;
2391 // Assert(pDesc >= pThis->aRxDescriptors);
2392 // Assert(pDesc < pThis->aRxDescriptors + E1K_RXD_CACHE_SIZE);
2393 // uint64_t addr = e1kDescAddr(RDBAH, RDBAL, RDH);
2394 // uint32_t rdh = RDH;
2395 // Assert(pThis->aRxDescAddr[pDesc - pThis->aRxDescriptors] == addr);
2396 PDMDevHlpPCIPhysWrite(pDevIns, e1kDescAddr(RDBAH, RDBAL, pRxdc->rdh), pDesc, sizeof(E1KRXDESC));
2397 /*
2398 * We need to print the descriptor before advancing RDH as it may fetch new
2399 * descriptors into the cache.
2400 */
2401 e1kPrintRDesc(pThis, pDesc);
2402 e1kAdvanceRDH(pDevIns, pThis, pRxdc);
2403}
2404
2405/**
2406 * Store a fragment of received packet at the specifed address.
2407 *
2408 * @param pDevIns The device instance.
2409 * @param pThis The device state structure.
2410 * @param pDesc The next available RX descriptor.
2411 * @param pvBuf The fragment.
2412 * @param cb The size of the fragment.
2413 */
2414static void e1kStoreRxFragment(PPDMDEVINS pDevIns, PE1KSTATE pThis, E1KRXDESC *pDesc, const void *pvBuf, size_t cb)
2415{
2416 STAM_PROFILE_ADV_START(&pThis->StatReceiveStore, a);
2417 E1kLog2(("%s e1kStoreRxFragment: store fragment of %04X at %016LX, EOP=%d\n",
2418 pThis->szPrf, cb, pDesc->u64BufAddr, pDesc->status.fEOP));
2419 PDMDevHlpPCIPhysWrite(pDevIns, pDesc->u64BufAddr, pvBuf, cb);
2420 pDesc->u16Length = (uint16_t)cb;
2421 Assert(pDesc->u16Length == cb);
2422 STAM_PROFILE_ADV_STOP(&pThis->StatReceiveStore, a);
2423 RT_NOREF(pThis);
2424}
2425
2426# endif /* IN_RING3 */
2427
2428#else /* !E1K_WITH_RXD_CACHE */
2429
2430/**
2431 * Store a fragment of received packet that fits into the next available RX
2432 * buffer.
2433 *
2434 * @remarks Trigger the RXT0 interrupt if it is the last fragment of the packet.
2435 *
2436 * @param pDevIns The device instance.
2437 * @param pThis The device state structure.
2438 * @param pDesc The next available RX descriptor.
2439 * @param pvBuf The fragment.
2440 * @param cb The size of the fragment.
2441 */
2442static void e1kStoreRxFragment(PPDMDEVINS pDevIns, PE1KSTATE pThis, E1KRXDESC *pDesc, const void *pvBuf, size_t cb)
2443{
2444 STAM_PROFILE_ADV_START(&pThis->StatReceiveStore, a);
2445 E1kLog2(("%s e1kStoreRxFragment: store fragment of %04X at %016LX, EOP=%d\n", pThis->szPrf, cb, pDesc->u64BufAddr, pDesc->status.fEOP));
2446 PDMDevHlpPCIPhysWrite(pDevIns, pDesc->u64BufAddr, pvBuf, cb);
2447 pDesc->u16Length = (uint16_t)cb; Assert(pDesc->u16Length == cb);
2448 /* Write back the descriptor */
2449 PDMDevHlpPCIPhysWrite(pDevIns, e1kDescAddr(RDBAH, RDBAL, RDH), pDesc, sizeof(E1KRXDESC));
2450 e1kPrintRDesc(pThis, pDesc);
2451 E1kLogRel(("E1000: Wrote back RX desc, RDH=%x\n", RDH));
2452 /* Advance head */
2453 e1kAdvanceRDH(pDevIns, pThis);
2454 //E1kLog2(("%s e1kStoreRxFragment: EOP=%d RDTR=%08X RADV=%08X\n", pThis->szPrf, pDesc->fEOP, RDTR, RADV));
2455 if (pDesc->status.fEOP)
2456 {
2457 /* Complete packet has been stored -- it is time to let the guest know. */
2458#ifdef E1K_USE_RX_TIMERS
2459 if (RDTR)
2460 {
2461 /* Arm the timer to fire in RDTR usec (discard .024) */
2462 e1kArmTimer(pDevIns, pThis, pThis->hRIDTimer, RDTR);
2463 /* If absolute timer delay is enabled and the timer is not running yet, arm it. */
2464 if (RADV != 0 && !PDMDevHlpTimerIsActive(pDevIns, pThis->CTX_SUFF(pRADTimer)))
2465 e1kArmTimer(pThis, pThis->hRADTimer, RADV);
2466 }
2467 else
2468 {
2469#endif
2470 /* 0 delay means immediate interrupt */
2471 E1K_INC_ISTAT_CNT(pThis->uStatIntRx);
2472 e1kRaiseInterrupt(pDevIns, pThis, VERR_SEM_BUSY, ICR_RXT0);
2473#ifdef E1K_USE_RX_TIMERS
2474 }
2475#endif
2476 }
2477 STAM_PROFILE_ADV_STOP(&pThis->StatReceiveStore, a);
2478}
2479
2480#endif /* !E1K_WITH_RXD_CACHE */
2481
2482/**
2483 * Returns true if it is a broadcast packet.
2484 *
2485 * @returns true if destination address indicates broadcast.
2486 * @param pvBuf The ethernet packet.
2487 */
2488DECLINLINE(bool) e1kIsBroadcast(const void *pvBuf)
2489{
2490 static const uint8_t s_abBcastAddr[] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
2491 return memcmp(pvBuf, s_abBcastAddr, sizeof(s_abBcastAddr)) == 0;
2492}
2493
2494/**
2495 * Returns true if it is a multicast packet.
2496 *
2497 * @remarks returns true for broadcast packets as well.
2498 * @returns true if destination address indicates multicast.
2499 * @param pvBuf The ethernet packet.
2500 */
2501DECLINLINE(bool) e1kIsMulticast(const void *pvBuf)
2502{
2503 return (*(char*)pvBuf) & 1;
2504}
2505
2506#ifdef IN_RING3 /* currently only used in ring-3 due to stack space requirements of the caller */
2507/**
2508 * Set IXSM, IPCS and TCPCS flags according to the packet type.
2509 *
2510 * @remarks We emulate checksum offloading for major packets types only.
2511 *
2512 * @returns VBox status code.
2513 * @param pThis The device state structure.
2514 * @param pFrame The available data.
2515 * @param cb Number of bytes available in the buffer.
2516 * @param status Bit fields containing status info.
2517 */
2518static int e1kRxChecksumOffload(PE1KSTATE pThis, const uint8_t *pFrame, size_t cb, E1KRXDST *pStatus)
2519{
2520 /** @todo
2521 * It is not safe to bypass checksum verification for packets coming
2522 * from real wire. We currently unable to tell where packets are
2523 * coming from so we tell the driver to ignore our checksum flags
2524 * and do verification in software.
2525 */
2526# if 0
2527 uint16_t uEtherType = ntohs(*(uint16_t*)(pFrame + 12));
2528
2529 E1kLog2(("%s e1kRxChecksumOffload: EtherType=%x\n", pThis->szPrf, uEtherType));
2530
2531 switch (uEtherType)
2532 {
2533 case 0x800: /* IPv4 */
2534 {
2535 pStatus->fIXSM = false;
2536 pStatus->fIPCS = true;
2537 PRTNETIPV4 pIpHdr4 = (PRTNETIPV4)(pFrame + 14);
2538 /* TCP/UDP checksum offloading works with TCP and UDP only */
2539 pStatus->fTCPCS = pIpHdr4->ip_p == 6 || pIpHdr4->ip_p == 17;
2540 break;
2541 }
2542 case 0x86DD: /* IPv6 */
2543 pStatus->fIXSM = false;
2544 pStatus->fIPCS = false;
2545 pStatus->fTCPCS = true;
2546 break;
2547 default: /* ARP, VLAN, etc. */
2548 pStatus->fIXSM = true;
2549 break;
2550 }
2551# else
2552 pStatus->fIXSM = true;
2553 RT_NOREF_PV(pThis); RT_NOREF_PV(pFrame); RT_NOREF_PV(cb);
2554# endif
2555 return VINF_SUCCESS;
2556}
2557#endif /* IN_RING3 */
2558
2559/**
2560 * Pad and store received packet.
2561 *
2562 * @remarks Make sure that the packet appears to upper layer as one coming
2563 * from real Ethernet: pad it and insert FCS.
2564 *
2565 * @returns VBox status code.
2566 * @param pDevIns The device instance.
2567 * @param pThis The device state structure.
2568 * @param pvBuf The available data.
2569 * @param cb Number of bytes available in the buffer.
2570 * @param status Bit fields containing status info.
2571 */
2572static int e1kHandleRxPacket(PPDMDEVINS pDevIns, PE1KSTATE pThis, const void *pvBuf, size_t cb, E1KRXDST status)
2573{
2574#if defined(IN_RING3) /** @todo Remove this extra copying, it's gonna make us run out of kernel / hypervisor stack! */
2575 uint8_t rxPacket[E1K_MAX_RX_PKT_SIZE];
2576 uint8_t *ptr = rxPacket;
2577# ifdef E1K_WITH_RXD_CACHE
2578 E1KRXDC rxdc;
2579# endif /* E1K_WITH_RXD_CACHE */
2580
2581 e1kCsRxEnterReturn(pThis);
2582# ifdef E1K_WITH_RXD_CACHE
2583 if (RT_UNLIKELY(!e1kUpdateRxDContext(pDevIns, pThis, &rxdc, "e1kHandleRxPacket")))
2584 {
2585 e1kCsRxLeave(pThis);
2586 E1kLog(("%s e1kHandleRxPacket: failed to update Rx context, returning VINF_SUCCESS\n", pThis->szPrf));
2587 return VINF_SUCCESS;
2588 }
2589# endif /* E1K_WITH_RXD_CACHE */
2590
2591 if (cb > 70) /* unqualified guess */
2592 pThis->led.Asserted.s.fReading = pThis->led.Actual.s.fReading = 1;
2593
2594 Assert(cb <= E1K_MAX_RX_PKT_SIZE);
2595 Assert(cb > 16);
2596 size_t cbMax = ((RCTL & RCTL_LPE) ? E1K_MAX_RX_PKT_SIZE - 4 : 1518) - (status.fVP ? 0 : 4);
2597 E1kLog3(("%s Max RX packet size is %u\n", pThis->szPrf, cbMax));
2598 if (status.fVP)
2599 {
2600 /* VLAN packet -- strip VLAN tag in VLAN mode */
2601 if ((CTRL & CTRL_VME) && cb > 16)
2602 {
2603 uint16_t *u16Ptr = (uint16_t*)pvBuf;
2604 memcpy(rxPacket, pvBuf, 12); /* Copy src and dst addresses */
2605 status.u16Special = RT_BE2H_U16(u16Ptr[7]); /* Extract VLAN tag */
2606 memcpy(rxPacket + 12, (uint8_t*)pvBuf + 16, cb - 16); /* Copy the rest of the packet */
2607 cb -= 4;
2608 E1kLog3(("%s Stripped tag for VLAN %u (cb=%u)\n",
2609 pThis->szPrf, status.u16Special, cb));
2610 }
2611 else
2612 {
2613 status.fVP = false; /* Set VP only if we stripped the tag */
2614 memcpy(rxPacket, pvBuf, cb);
2615 }
2616 }
2617 else
2618 memcpy(rxPacket, pvBuf, cb);
2619 /* Pad short packets */
2620 if (cb < 60)
2621 {
2622 memset(rxPacket + cb, 0, 60 - cb);
2623 cb = 60;
2624 }
2625 if (!(RCTL & RCTL_SECRC) && cb <= cbMax)
2626 {
2627 STAM_PROFILE_ADV_START(&pThis->StatReceiveCRC, a);
2628 /*
2629 * Add FCS if CRC stripping is not enabled. Since the value of CRC
2630 * is ignored by most of drivers we may as well save us the trouble
2631 * of calculating it (see EthernetCRC CFGM parameter).
2632 */
2633 if (pThis->fEthernetCRC)
2634 *(uint32_t*)(rxPacket + cb) = RTCrc32(rxPacket, cb);
2635 cb += sizeof(uint32_t);
2636 STAM_PROFILE_ADV_STOP(&pThis->StatReceiveCRC, a);
2637 E1kLog3(("%s Added FCS (cb=%u)\n", pThis->szPrf, cb));
2638 }
2639 /* Compute checksum of complete packet */
2640 size_t cbCSumStart = RT_MIN(GET_BITS(RXCSUM, PCSS), cb);
2641 uint16_t checksum = e1kCSum16(rxPacket + cbCSumStart, cb - cbCSumStart);
2642 e1kRxChecksumOffload(pThis, rxPacket, cb, &status);
2643
2644 /* Update stats */
2645 E1K_INC_CNT32(GPRC);
2646 if (e1kIsBroadcast(pvBuf))
2647 E1K_INC_CNT32(BPRC);
2648 else if (e1kIsMulticast(pvBuf))
2649 E1K_INC_CNT32(MPRC);
2650 /* Update octet receive counter */
2651 E1K_ADD_CNT64(GORCL, GORCH, cb);
2652 STAM_REL_COUNTER_ADD(&pThis->StatReceiveBytes, cb);
2653 if (cb == 64)
2654 E1K_INC_CNT32(PRC64);
2655 else if (cb < 128)
2656 E1K_INC_CNT32(PRC127);
2657 else if (cb < 256)
2658 E1K_INC_CNT32(PRC255);
2659 else if (cb < 512)
2660 E1K_INC_CNT32(PRC511);
2661 else if (cb < 1024)
2662 E1K_INC_CNT32(PRC1023);
2663 else
2664 E1K_INC_CNT32(PRC1522);
2665
2666 E1K_INC_ISTAT_CNT(pThis->uStatRxFrm);
2667
2668# ifdef E1K_WITH_RXD_CACHE
2669 while (cb > 0)
2670 {
2671 E1KRXDESC *pDesc = e1kRxDGet(pDevIns, pThis, &rxdc);
2672
2673 if (pDesc == NULL)
2674 {
2675 E1kLog(("%s Out of receive buffers, dropping the packet "
2676 "(cb=%u, in_cache=%u, RDH=%x RDT=%x)\n",
2677 pThis->szPrf, cb, e1kRxDInCache(pThis), rxdc.rdh, rxdc.rdt));
2678 break;
2679 }
2680# else /* !E1K_WITH_RXD_CACHE */
2681 if (RDH == RDT)
2682 {
2683 E1kLog(("%s Out of receive buffers, dropping the packet\n",
2684 pThis->szPrf));
2685 }
2686 /* Store the packet to receive buffers */
2687 while (RDH != RDT)
2688 {
2689 /* Load the descriptor pointed by head */
2690 E1KRXDESC desc, *pDesc = &desc;
2691 PDMDevHlpPCIPhysRead(pDevIns, e1kDescAddr(RDBAH, RDBAL, RDH), &desc, sizeof(desc));
2692# endif /* !E1K_WITH_RXD_CACHE */
2693 if (pDesc->u64BufAddr)
2694 {
2695 uint16_t u16RxBufferSize = pThis->u16RxBSize; /* see @bugref{9427} */
2696
2697 /* Update descriptor */
2698 pDesc->status = status;
2699 pDesc->u16Checksum = checksum;
2700 pDesc->status.fDD = true;
2701
2702 /*
2703 * We need to leave Rx critical section here or we risk deadlocking
2704 * with EMT in e1kRegWriteRDT when the write is to an unallocated
2705 * page or has an access handler associated with it.
2706 * Note that it is safe to leave the critical section here since
2707 * e1kRegWriteRDT() never modifies RDH. It never touches already
2708 * fetched RxD cache entries either.
2709 */
2710 if (cb > u16RxBufferSize)
2711 {
2712 pDesc->status.fEOP = false;
2713 e1kCsRxLeave(pThis);
2714 e1kStoreRxFragment(pDevIns, pThis, pDesc, ptr, u16RxBufferSize);
2715 e1kCsRxEnterReturn(pThis);
2716# ifdef E1K_WITH_RXD_CACHE
2717 if (RT_UNLIKELY(!e1kUpdateRxDContext(pDevIns, pThis, &rxdc, "e1kHandleRxPacket")))
2718 {
2719 e1kCsRxLeave(pThis);
2720 E1kLog(("%s e1kHandleRxPacket: failed to update Rx context, returning VINF_SUCCESS\n", pThis->szPrf));
2721 return VINF_SUCCESS;
2722 }
2723# endif /* E1K_WITH_RXD_CACHE */
2724 ptr += u16RxBufferSize;
2725 cb -= u16RxBufferSize;
2726 }
2727 else
2728 {
2729 pDesc->status.fEOP = true;
2730 e1kCsRxLeave(pThis);
2731 e1kStoreRxFragment(pDevIns, pThis, pDesc, ptr, cb);
2732# ifdef E1K_WITH_RXD_CACHE
2733 e1kCsRxEnterReturn(pThis);
2734 if (RT_UNLIKELY(!e1kUpdateRxDContext(pDevIns, pThis, &rxdc, "e1kHandleRxPacket")))
2735 {
2736 e1kCsRxLeave(pThis);
2737 E1kLog(("%s e1kHandleRxPacket: failed to update Rx context, returning VINF_SUCCESS\n", pThis->szPrf));
2738 return VINF_SUCCESS;
2739 }
2740 cb = 0;
2741# else /* !E1K_WITH_RXD_CACHE */
2742 pThis->led.Actual.s.fReading = 0;
2743 return VINF_SUCCESS;
2744# endif /* !E1K_WITH_RXD_CACHE */
2745 }
2746 /*
2747 * Note: RDH is advanced by e1kStoreRxFragment if E1K_WITH_RXD_CACHE
2748 * is not defined.
2749 */
2750 }
2751# ifdef E1K_WITH_RXD_CACHE
2752 /* Write back the descriptor. */
2753 pDesc->status.fDD = true;
2754 e1kRxDPut(pDevIns, pThis, pDesc, &rxdc);
2755# else /* !E1K_WITH_RXD_CACHE */
2756 else
2757 {
2758 /* Write back the descriptor. */
2759 pDesc->status.fDD = true;
2760 PDMDevHlpPCIPhysWrite(pDevIns, e1kDescAddr(RDBAH, RDBAL, RDH), pDesc, sizeof(E1KRXDESC));
2761 e1kAdvanceRDH(pDevIns, pThis);
2762 }
2763# endif /* !E1K_WITH_RXD_CACHE */
2764 }
2765
2766 if (cb > 0)
2767 E1kLog(("%s Out of receive buffers, dropping %u bytes", pThis->szPrf, cb));
2768
2769 pThis->led.Actual.s.fReading = 0;
2770
2771 e1kCsRxLeave(pThis);
2772# ifdef E1K_WITH_RXD_CACHE
2773 /* Complete packet has been stored -- it is time to let the guest know. */
2774# ifdef E1K_USE_RX_TIMERS
2775 if (RDTR)
2776 {
2777 /* Arm the timer to fire in RDTR usec (discard .024) */
2778 e1kArmTimer(pThis, pThis->hRIDTimer, RDTR);
2779 /* If absolute timer delay is enabled and the timer is not running yet, arm it. */
2780 if (RADV != 0 && !PDMDevHlpTimerIsActive(pDevIns, pThis->hRADTimer))
2781 e1kArmTimer(pThis, pThis->hRADTimer, RADV);
2782 }
2783 else
2784 {
2785# endif /* E1K_USE_RX_TIMERS */
2786 /* 0 delay means immediate interrupt */
2787 E1K_INC_ISTAT_CNT(pThis->uStatIntRx);
2788 e1kRaiseInterrupt(pDevIns, pThis, VERR_SEM_BUSY, ICR_RXT0);
2789# ifdef E1K_USE_RX_TIMERS
2790 }
2791# endif /* E1K_USE_RX_TIMERS */
2792# endif /* E1K_WITH_RXD_CACHE */
2793
2794 return VINF_SUCCESS;
2795#else /* !IN_RING3 */
2796 RT_NOREF(pDevIns, pThis, pvBuf, cb, status);
2797 return VERR_INTERNAL_ERROR_2;
2798#endif /* !IN_RING3 */
2799}
2800
2801
2802#ifdef IN_RING3
2803/**
2804 * Bring the link up after the configured delay, 5 seconds by default.
2805 *
2806 * @param pDevIns The device instance.
2807 * @param pThis The device state structure.
2808 * @thread any
2809 */
2810DECLINLINE(void) e1kBringLinkUpDelayed(PPDMDEVINS pDevIns, PE1KSTATE pThis)
2811{
2812 E1kLog(("%s Will bring up the link in %d seconds...\n",
2813 pThis->szPrf, pThis->cMsLinkUpDelay / 1000));
2814 e1kArmTimer(pDevIns, pThis, pThis->hLUTimer, pThis->cMsLinkUpDelay * 1000);
2815}
2816
2817/**
2818 * Bring up the link immediately.
2819 *
2820 * @param pDevIns The device instance.
2821 * @param pThis The device state structure.
2822 * @param pThisCC The current context instance data.
2823 */
2824DECLINLINE(void) e1kR3LinkUp(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KSTATECC pThisCC)
2825{
2826 E1kLog(("%s Link is up\n", pThis->szPrf));
2827 STATUS |= STATUS_LU;
2828 Phy::setLinkStatus(&pThis->phy, true);
2829 e1kRaiseInterrupt(pDevIns, pThis, VERR_SEM_BUSY, ICR_LSC);
2830 if (pThisCC->pDrvR3)
2831 pThisCC->pDrvR3->pfnNotifyLinkChanged(pThisCC->pDrvR3, PDMNETWORKLINKSTATE_UP);
2832 /* Trigger processing of pending TX descriptors (see @bugref{8942}). */
2833 PDMDevHlpTaskTrigger(pDevIns, pThis->hTxTask);
2834}
2835
2836/**
2837 * Bring down the link immediately.
2838 *
2839 * @param pDevIns The device instance.
2840 * @param pThis The device state structure.
2841 * @param pThisCC The current context instance data.
2842 */
2843DECLINLINE(void) e1kR3LinkDown(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KSTATECC pThisCC)
2844{
2845 E1kLog(("%s Link is down\n", pThis->szPrf));
2846 STATUS &= ~STATUS_LU;
2847#ifdef E1K_LSC_ON_RESET
2848 Phy::setLinkStatus(&pThis->phy, false);
2849#endif /* E1K_LSC_ON_RESET */
2850 e1kRaiseInterrupt(pDevIns, pThis, VERR_SEM_BUSY, ICR_LSC);
2851 if (pThisCC->pDrvR3)
2852 pThisCC->pDrvR3->pfnNotifyLinkChanged(pThisCC->pDrvR3, PDMNETWORKLINKSTATE_DOWN);
2853}
2854
2855/**
2856 * Bring down the link temporarily.
2857 *
2858 * @param pDevIns The device instance.
2859 * @param pThis The device state structure.
2860 * @param pThisCC The current context instance data.
2861 */
2862DECLINLINE(void) e1kR3LinkDownTemp(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KSTATECC pThisCC)
2863{
2864 E1kLog(("%s Link is down temporarily\n", pThis->szPrf));
2865 STATUS &= ~STATUS_LU;
2866 Phy::setLinkStatus(&pThis->phy, false);
2867 e1kRaiseInterrupt(pDevIns, pThis, VERR_SEM_BUSY, ICR_LSC);
2868 /*
2869 * Notifying the associated driver that the link went down (even temporarily)
2870 * seems to be the right thing, but it was not done before. This may cause
2871 * a regression if the driver does not expect the link to go down as a result
2872 * of sending PDMNETWORKLINKSTATE_DOWN_RESUME to this device. Earlier versions
2873 * of code notified the driver that the link was up! See @bugref{7057}.
2874 */
2875 if (pThisCC->pDrvR3)
2876 pThisCC->pDrvR3->pfnNotifyLinkChanged(pThisCC->pDrvR3, PDMNETWORKLINKSTATE_DOWN);
2877 e1kBringLinkUpDelayed(pDevIns, pThis);
2878}
2879#endif /* IN_RING3 */
2880
2881#if 0 /* unused */
2882/**
2883 * Read handler for Device Status register.
2884 *
2885 * Get the link status from PHY.
2886 *
2887 * @returns VBox status code.
2888 *
2889 * @param pThis The device state structure.
2890 * @param offset Register offset in memory-mapped frame.
2891 * @param index Register index in register array.
2892 * @param mask Used to implement partial reads (8 and 16-bit).
2893 */
2894static int e1kRegReadCTRL(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
2895{
2896 E1kLog(("%s e1kRegReadCTRL: mdio dir=%s mdc dir=%s mdc=%d\n",
2897 pThis->szPrf, (CTRL & CTRL_MDIO_DIR)?"OUT":"IN ",
2898 (CTRL & CTRL_MDC_DIR)?"OUT":"IN ", !!(CTRL & CTRL_MDC)));
2899 if ((CTRL & CTRL_MDIO_DIR) == 0 && (CTRL & CTRL_MDC))
2900 {
2901 /* MDC is high and MDIO pin is used for input, read MDIO pin from PHY */
2902 if (Phy::readMDIO(&pThis->phy))
2903 *pu32Value = CTRL | CTRL_MDIO;
2904 else
2905 *pu32Value = CTRL & ~CTRL_MDIO;
2906 E1kLog(("%s e1kRegReadCTRL: Phy::readMDIO(%d)\n",
2907 pThis->szPrf, !!(*pu32Value & CTRL_MDIO)));
2908 }
2909 else
2910 {
2911 /* MDIO pin is used for output, ignore it */
2912 *pu32Value = CTRL;
2913 }
2914 return VINF_SUCCESS;
2915}
2916#endif /* unused */
2917
2918/**
2919 * A helper function to detect the link state to the other side of "the wire".
2920 *
2921 * When deciding to bring up the link we need to take into account both if the
2922 * cable is connected and if our device is actually connected to the outside
2923 * world. If no driver is attached we won't be able to allocate TX buffers,
2924 * which will prevent us from TX descriptor processing, which will result in
2925 * "TX unit hang" in the guest.
2926 *
2927 * @returns true if the device is connected to something.
2928 *
2929 * @param pDevIns The device instance.
2930 */
2931DECLINLINE(bool) e1kIsConnected(PPDMDEVINS pDevIns)
2932{
2933 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
2934 return pThis->fCableConnected && pThis->fIsAttached;
2935}
2936
2937/**
2938 * A callback used by PHY to indicate that the link needs to be updated due to
2939 * reset of PHY.
2940 *
2941 * @param pDevIns The device instance.
2942 * @thread any
2943 */
2944void e1kPhyLinkResetCallback(PPDMDEVINS pDevIns)
2945{
2946 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
2947
2948 /* Make sure we have cable connected and MAC can talk to PHY */
2949 if (e1kIsConnected(pDevIns) && (CTRL & CTRL_SLU))
2950 e1kArmTimer(pDevIns, pThis, pThis->hLUTimer, E1K_INIT_LINKUP_DELAY_US);
2951 else
2952 Log(("%s PHY link reset callback ignored (cable %sconnected, driver %stached, CTRL_SLU=%u)\n", pThis->szPrf,
2953 pThis->fCableConnected ? "" : "dis", pThis->fIsAttached ? "at" : "de", CTRL & CTRL_SLU ? 1 : 0));
2954}
2955
2956/**
2957 * Write handler for Device Control register.
2958 *
2959 * Handles reset.
2960 *
2961 * @param pThis The device state structure.
2962 * @param offset Register offset in memory-mapped frame.
2963 * @param index Register index in register array.
2964 * @param value The value to store.
2965 * @param mask Used to implement partial writes (8 and 16-bit).
2966 * @thread EMT
2967 */
2968static int e1kRegWriteCTRL(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
2969{
2970 int rc = VINF_SUCCESS;
2971
2972 if (value & CTRL_RESET)
2973 { /* RST */
2974#ifndef IN_RING3
2975 return VINF_IOM_R3_MMIO_WRITE;
2976#else
2977 e1kR3HardReset(pDevIns, pThis, PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC));
2978#endif
2979 }
2980 else
2981 {
2982#ifdef E1K_LSC_ON_SLU
2983 /*
2984 * When the guest changes 'Set Link Up' bit from 0 to 1 we check if
2985 * the link is down and the cable is connected, and if they are we
2986 * bring the link up, see @bugref{8624}.
2987 */
2988 if ( (value & CTRL_SLU)
2989 && !(CTRL & CTRL_SLU)
2990 && pThis->fCableConnected
2991 && !(STATUS & STATUS_LU))
2992 {
2993 /* It should take about 2 seconds for the link to come up */
2994 e1kArmTimer(pDevIns, pThis, pThis->hLUTimer, E1K_INIT_LINKUP_DELAY_US);
2995 }
2996#else /* !E1K_LSC_ON_SLU */
2997 if ( (value & CTRL_SLU)
2998 && !(CTRL & CTRL_SLU)
2999 && e1kIsConnected(pDevIns)
3000 && !PDMDevHlpTimerIsActive(pDevIns, pThis->hLUTimer))
3001 {
3002 /* PXE does not use LSC interrupts, see @bugref{9113}. */
3003 STATUS |= STATUS_LU;
3004 }
3005#endif /* !E1K_LSC_ON_SLU */
3006 if ((value & CTRL_VME) != (CTRL & CTRL_VME))
3007 {
3008 E1kLog(("%s VLAN Mode %s\n", pThis->szPrf, (value & CTRL_VME) ? "Enabled" : "Disabled"));
3009 }
3010 Log7(("%s e1kRegWriteCTRL: mdio dir=%s mdc dir=%s mdc=%s mdio=%d\n",
3011 pThis->szPrf, (value & CTRL_MDIO_DIR)?"OUT":"IN ",
3012 (value & CTRL_MDC_DIR)?"OUT":"IN ", (value & CTRL_MDC)?"HIGH":"LOW ", !!(value & CTRL_MDIO)));
3013 if (value & CTRL_MDC)
3014 {
3015 if (value & CTRL_MDIO_DIR)
3016 {
3017 Log7(("%s e1kRegWriteCTRL: Phy::writeMDIO(%d)\n", pThis->szPrf, !!(value & CTRL_MDIO)));
3018 /* MDIO direction pin is set to output and MDC is high, write MDIO pin value to PHY */
3019 Phy::writeMDIO(&pThis->phy, !!(value & CTRL_MDIO), pDevIns);
3020 }
3021 else
3022 {
3023 if (Phy::readMDIO(&pThis->phy))
3024 value |= CTRL_MDIO;
3025 else
3026 value &= ~CTRL_MDIO;
3027 Log7(("%s e1kRegWriteCTRL: Phy::readMDIO(%d)\n", pThis->szPrf, !!(value & CTRL_MDIO)));
3028 }
3029 }
3030 rc = e1kRegWriteDefault(pDevIns, pThis, offset, index, value);
3031 }
3032
3033 return rc;
3034}
3035
3036/**
3037 * Write handler for EEPROM/Flash Control/Data register.
3038 *
3039 * Handles EEPROM access requests; forwards writes to EEPROM device if access has been granted.
3040 *
3041 * @param pThis The device state structure.
3042 * @param offset Register offset in memory-mapped frame.
3043 * @param index Register index in register array.
3044 * @param value The value to store.
3045 * @param mask Used to implement partial writes (8 and 16-bit).
3046 * @thread EMT
3047 */
3048static int e1kRegWriteEECD(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3049{
3050 RT_NOREF(pDevIns, offset, index);
3051#ifdef IN_RING3
3052 /* So far we are concerned with lower byte only */
3053 if ((EECD & EECD_EE_GNT) || pThis->eChip == E1K_CHIP_82543GC)
3054 {
3055 /* Access to EEPROM granted -- forward 4-wire bits to EEPROM device */
3056 /* Note: 82543GC does not need to request EEPROM access */
3057 STAM_PROFILE_ADV_START(&pThis->StatEEPROMWrite, a);
3058 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
3059 pThisCC->eeprom.write(value & EECD_EE_WIRES);
3060 STAM_PROFILE_ADV_STOP(&pThis->StatEEPROMWrite, a);
3061 }
3062 if (value & EECD_EE_REQ)
3063 EECD |= EECD_EE_REQ|EECD_EE_GNT;
3064 else
3065 EECD &= ~EECD_EE_GNT;
3066 //e1kRegWriteDefault(pThis, offset, index, value );
3067
3068 return VINF_SUCCESS;
3069#else /* !IN_RING3 */
3070 RT_NOREF(pThis, value);
3071 return VINF_IOM_R3_MMIO_WRITE;
3072#endif /* !IN_RING3 */
3073}
3074
3075/**
3076 * Read handler for EEPROM/Flash Control/Data register.
3077 *
3078 * Lower 4 bits come from EEPROM device if EEPROM access has been granted.
3079 *
3080 * @returns VBox status code.
3081 *
3082 * @param pThis The device state structure.
3083 * @param offset Register offset in memory-mapped frame.
3084 * @param index Register index in register array.
3085 * @param mask Used to implement partial reads (8 and 16-bit).
3086 * @thread EMT
3087 */
3088static int e1kRegReadEECD(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
3089{
3090#ifdef IN_RING3
3091 uint32_t value = 0; /* Get rid of false positive in parfait. */
3092 int rc = e1kRegReadDefault(pDevIns, pThis, offset, index, &value);
3093 if (RT_SUCCESS(rc))
3094 {
3095 if ((value & EECD_EE_GNT) || pThis->eChip == E1K_CHIP_82543GC)
3096 {
3097 /* Note: 82543GC does not need to request EEPROM access */
3098 /* Access to EEPROM granted -- get 4-wire bits to EEPROM device */
3099 STAM_PROFILE_ADV_START(&pThis->StatEEPROMRead, a);
3100 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
3101 value |= pThisCC->eeprom.read();
3102 STAM_PROFILE_ADV_STOP(&pThis->StatEEPROMRead, a);
3103 }
3104 *pu32Value = value;
3105 }
3106
3107 return rc;
3108#else /* !IN_RING3 */
3109 RT_NOREF_PV(pDevIns); RT_NOREF_PV(pThis); RT_NOREF_PV(offset); RT_NOREF_PV(index); RT_NOREF_PV(pu32Value);
3110 return VINF_IOM_R3_MMIO_READ;
3111#endif /* !IN_RING3 */
3112}
3113
3114/**
3115 * Write handler for EEPROM Read register.
3116 *
3117 * Handles EEPROM word access requests, reads EEPROM and stores the result
3118 * into DATA field.
3119 *
3120 * @param pThis The device state structure.
3121 * @param offset Register offset in memory-mapped frame.
3122 * @param index Register index in register array.
3123 * @param value The value to store.
3124 * @param mask Used to implement partial writes (8 and 16-bit).
3125 * @thread EMT
3126 */
3127static int e1kRegWriteEERD(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3128{
3129#ifdef IN_RING3
3130 /* Make use of 'writable' and 'readable' masks. */
3131 e1kRegWriteDefault(pDevIns, pThis, offset, index, value);
3132 /* DONE and DATA are set only if read was triggered by START. */
3133 if (value & EERD_START)
3134 {
3135 STAM_PROFILE_ADV_START(&pThis->StatEEPROMRead, a);
3136 uint16_t tmp;
3137 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
3138 if (pThisCC->eeprom.readWord(GET_BITS_V(value, EERD, ADDR), &tmp))
3139 SET_BITS(EERD, DATA, tmp);
3140 EERD |= EERD_DONE;
3141 STAM_PROFILE_ADV_STOP(&pThis->StatEEPROMRead, a);
3142 }
3143
3144 return VINF_SUCCESS;
3145#else /* !IN_RING3 */
3146 RT_NOREF_PV(pDevIns); RT_NOREF_PV(pThis); RT_NOREF_PV(offset); RT_NOREF_PV(index); RT_NOREF_PV(value);
3147 return VINF_IOM_R3_MMIO_WRITE;
3148#endif /* !IN_RING3 */
3149}
3150
3151
3152/**
3153 * Write handler for MDI Control register.
3154 *
3155 * Handles PHY read/write requests; forwards requests to internal PHY device.
3156 *
3157 * @param pThis The device state structure.
3158 * @param offset Register offset in memory-mapped frame.
3159 * @param index Register index in register array.
3160 * @param value The value to store.
3161 * @param mask Used to implement partial writes (8 and 16-bit).
3162 * @thread EMT
3163 */
3164static int e1kRegWriteMDIC(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3165{
3166 if (value & MDIC_INT_EN)
3167 {
3168 E1kLog(("%s ERROR! Interrupt at the end of an MDI cycle is not supported yet.\n",
3169 pThis->szPrf));
3170 }
3171 else if (value & MDIC_READY)
3172 {
3173 E1kLog(("%s ERROR! Ready bit is not reset by software during write operation.\n",
3174 pThis->szPrf));
3175 }
3176 else if (GET_BITS_V(value, MDIC, PHY) != 1)
3177 {
3178 E1kLog(("%s WARNING! Access to invalid PHY detected, phy=%d.\n",
3179 pThis->szPrf, GET_BITS_V(value, MDIC, PHY)));
3180 /*
3181 * Some drivers scan the MDIO bus for a PHY. We can work with these
3182 * drivers if we set MDIC_READY and MDIC_ERROR when there isn't a PHY
3183 * at the requested address, see @bugref{7346}.
3184 */
3185 MDIC = MDIC_READY | MDIC_ERROR;
3186 }
3187 else
3188 {
3189 /* Store the value */
3190 e1kRegWriteDefault(pDevIns, pThis, offset, index, value);
3191 STAM_COUNTER_INC(&pThis->StatPHYAccesses);
3192 /* Forward op to PHY */
3193 if (value & MDIC_OP_READ)
3194 SET_BITS(MDIC, DATA, Phy::readRegister(&pThis->phy, GET_BITS_V(value, MDIC, REG), pDevIns));
3195 else
3196 Phy::writeRegister(&pThis->phy, GET_BITS_V(value, MDIC, REG), value & MDIC_DATA_MASK, pDevIns);
3197 /* Let software know that we are done */
3198 MDIC |= MDIC_READY;
3199 }
3200
3201 return VINF_SUCCESS;
3202}
3203
3204/**
3205 * Write handler for Interrupt Cause Read register.
3206 *
3207 * Bits corresponding to 1s in 'value' will be cleared in ICR register.
3208 *
3209 * @param pThis The device state structure.
3210 * @param offset Register offset in memory-mapped frame.
3211 * @param index Register index in register array.
3212 * @param value The value to store.
3213 * @param mask Used to implement partial writes (8 and 16-bit).
3214 * @thread EMT
3215 */
3216static int e1kRegWriteICR(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3217{
3218 ICR &= ~value;
3219
3220 RT_NOREF_PV(pDevIns); RT_NOREF_PV(pThis); RT_NOREF_PV(offset); RT_NOREF_PV(index);
3221 return VINF_SUCCESS;
3222}
3223
3224/**
3225 * Read handler for Interrupt Cause Read register.
3226 *
3227 * Reading this register acknowledges all interrupts.
3228 *
3229 * @returns VBox status code.
3230 *
3231 * @param pThis The device state structure.
3232 * @param offset Register offset in memory-mapped frame.
3233 * @param index Register index in register array.
3234 * @param mask Not used.
3235 * @thread EMT
3236 */
3237static int e1kRegReadICR(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
3238{
3239 e1kCsEnterReturn(pThis, VINF_IOM_R3_MMIO_READ);
3240
3241 uint32_t value = 0;
3242 int rc = e1kRegReadDefault(pDevIns, pThis, offset, index, &value);
3243 if (RT_SUCCESS(rc))
3244 {
3245 if (value)
3246 {
3247 if (!pThis->fIntRaised)
3248 E1K_INC_ISTAT_CNT(pThis->uStatNoIntICR);
3249 /*
3250 * Not clearing ICR causes QNX to hang as it reads ICR in a loop
3251 * with disabled interrupts.
3252 */
3253 //if (IMS)
3254 if (1)
3255 {
3256 /*
3257 * Interrupts were enabled -- we are supposedly at the very
3258 * beginning of interrupt handler
3259 */
3260 E1kLogRel(("E1000: irq lowered, icr=0x%x\n", ICR));
3261 E1kLog(("%s e1kRegReadICR: Lowered IRQ (%08x)\n", pThis->szPrf, ICR));
3262 /* Clear all pending interrupts */
3263 ICR = 0;
3264 pThis->fIntRaised = false;
3265 /* Lower(0) INTA(0) */
3266 PDMDevHlpPCISetIrq(pDevIns, 0, 0);
3267
3268 pThis->u64AckedAt = PDMDevHlpTimerGet(pDevIns, pThis->hIntTimer);
3269 if (pThis->fIntMaskUsed)
3270 pThis->fDelayInts = true;
3271 }
3272 else
3273 {
3274 /*
3275 * Interrupts are disabled -- in windows guests ICR read is done
3276 * just before re-enabling interrupts
3277 */
3278 E1kLog(("%s e1kRegReadICR: Suppressing auto-clear due to disabled interrupts (%08x)\n", pThis->szPrf, ICR));
3279 }
3280 }
3281 *pu32Value = value;
3282 }
3283 e1kCsLeave(pThis);
3284
3285 return rc;
3286}
3287
3288/**
3289 * Read handler for Interrupt Cause Set register.
3290 *
3291 * VxWorks driver uses this undocumented feature of real H/W to read ICR without acknowledging interrupts.
3292 *
3293 * @returns VBox status code.
3294 *
3295 * @param pThis The device state structure.
3296 * @param offset Register offset in memory-mapped frame.
3297 * @param index Register index in register array.
3298 * @param pu32Value Where to store the value of the register.
3299 * @thread EMT
3300 */
3301static int e1kRegReadICS(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
3302{
3303 RT_NOREF_PV(index);
3304 return e1kRegReadDefault(pDevIns, pThis, offset, ICR_IDX, pu32Value);
3305}
3306
3307/**
3308 * Write handler for Interrupt Cause Set register.
3309 *
3310 * Bits corresponding to 1s in 'value' will be set in ICR register.
3311 *
3312 * @param pThis The device state structure.
3313 * @param offset Register offset in memory-mapped frame.
3314 * @param index Register index in register array.
3315 * @param value The value to store.
3316 * @param mask Used to implement partial writes (8 and 16-bit).
3317 * @thread EMT
3318 */
3319static int e1kRegWriteICS(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3320{
3321 RT_NOREF_PV(offset); RT_NOREF_PV(index);
3322 E1K_INC_ISTAT_CNT(pThis->uStatIntICS);
3323 return e1kRaiseInterrupt(pDevIns, pThis, VINF_IOM_R3_MMIO_WRITE, value & g_aE1kRegMap[ICS_IDX].writable);
3324}
3325
3326/**
3327 * Write handler for Interrupt Mask Set register.
3328 *
3329 * Will trigger pending interrupts.
3330 *
3331 * @param pThis The device state structure.
3332 * @param offset Register offset in memory-mapped frame.
3333 * @param index Register index in register array.
3334 * @param value The value to store.
3335 * @param mask Used to implement partial writes (8 and 16-bit).
3336 * @thread EMT
3337 */
3338static int e1kRegWriteIMS(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3339{
3340 RT_NOREF_PV(offset); RT_NOREF_PV(index);
3341
3342 IMS |= value;
3343 E1kLogRel(("E1000: irq enabled, RDH=%x RDT=%x TDH=%x TDT=%x\n", RDH, RDT, TDH, TDT));
3344 E1kLog(("%s e1kRegWriteIMS: IRQ enabled\n", pThis->szPrf));
3345 /*
3346 * We cannot raise an interrupt here as it will occasionally cause an interrupt storm
3347 * in Windows guests (see @bugref{8624}, @bugref{5023}).
3348 */
3349 if ((ICR & IMS) && !pThis->fLocked)
3350 {
3351 E1K_INC_ISTAT_CNT(pThis->uStatIntIMS);
3352 e1kPostponeInterrupt(pDevIns, pThis, E1K_IMS_INT_DELAY_NS);
3353 }
3354
3355 return VINF_SUCCESS;
3356}
3357
3358/**
3359 * Write handler for Interrupt Mask Clear register.
3360 *
3361 * Bits corresponding to 1s in 'value' will be cleared in IMS register.
3362 *
3363 * @param pThis The device state structure.
3364 * @param offset Register offset in memory-mapped frame.
3365 * @param index Register index in register array.
3366 * @param value The value to store.
3367 * @param mask Used to implement partial writes (8 and 16-bit).
3368 * @thread EMT
3369 */
3370static int e1kRegWriteIMC(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3371{
3372 RT_NOREF_PV(offset); RT_NOREF_PV(index);
3373
3374 e1kCsEnterReturn(pThis, VINF_IOM_R3_MMIO_WRITE);
3375 if (pThis->fIntRaised)
3376 {
3377 /*
3378 * Technically we should reset fIntRaised in ICR read handler, but it will cause
3379 * Windows to freeze since it may receive an interrupt while still in the very beginning
3380 * of interrupt handler.
3381 */
3382 E1K_INC_ISTAT_CNT(pThis->uStatIntLower);
3383 STAM_COUNTER_INC(&pThis->StatIntsPrevented);
3384 E1kLogRel(("E1000: irq lowered (IMC), icr=0x%x\n", ICR));
3385 /* Lower(0) INTA(0) */
3386 PDMDevHlpPCISetIrq(pDevIns, 0, 0);
3387 pThis->fIntRaised = false;
3388 E1kLog(("%s e1kRegWriteIMC: Lowered IRQ: ICR=%08x\n", pThis->szPrf, ICR));
3389 }
3390 IMS &= ~value;
3391 E1kLog(("%s e1kRegWriteIMC: IRQ disabled\n", pThis->szPrf));
3392 e1kCsLeave(pThis);
3393
3394 return VINF_SUCCESS;
3395}
3396
3397/**
3398 * Write handler for Receive Control register.
3399 *
3400 * @param pThis The device state structure.
3401 * @param offset Register offset in memory-mapped frame.
3402 * @param index Register index in register array.
3403 * @param value The value to store.
3404 * @param mask Used to implement partial writes (8 and 16-bit).
3405 * @thread EMT
3406 */
3407static int e1kRegWriteRCTL(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3408{
3409 /* Update promiscuous mode */
3410 bool fBecomePromiscous = !!(value & (RCTL_UPE | RCTL_MPE));
3411 if (fBecomePromiscous != !!( RCTL & (RCTL_UPE | RCTL_MPE)))
3412 {
3413 /* Promiscuity has changed, pass the knowledge on. */
3414#ifndef IN_RING3
3415 return VINF_IOM_R3_MMIO_WRITE;
3416#else
3417 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
3418 if (pThisCC->pDrvR3)
3419 pThisCC->pDrvR3->pfnSetPromiscuousMode(pThisCC->pDrvR3, fBecomePromiscous);
3420#endif
3421 }
3422
3423 /* Adjust receive buffer size */
3424 unsigned cbRxBuf = 2048 >> GET_BITS_V(value, RCTL, BSIZE);
3425 if (value & RCTL_BSEX)
3426 cbRxBuf *= 16;
3427 if (cbRxBuf > E1K_MAX_RX_PKT_SIZE)
3428 cbRxBuf = E1K_MAX_RX_PKT_SIZE;
3429 if (cbRxBuf != pThis->u16RxBSize)
3430 E1kLog2(("%s e1kRegWriteRCTL: Setting receive buffer size to %d (old %d)\n",
3431 pThis->szPrf, cbRxBuf, pThis->u16RxBSize));
3432 Assert(cbRxBuf < 65536);
3433 pThis->u16RxBSize = (uint16_t)cbRxBuf;
3434
3435 /* Update the register */
3436 return e1kRegWriteDefault(pDevIns, pThis, offset, index, value);
3437}
3438
3439/**
3440 * Write handler for Packet Buffer Allocation register.
3441 *
3442 * TXA = 64 - RXA.
3443 *
3444 * @param pThis The device state structure.
3445 * @param offset Register offset in memory-mapped frame.
3446 * @param index Register index in register array.
3447 * @param value The value to store.
3448 * @param mask Used to implement partial writes (8 and 16-bit).
3449 * @thread EMT
3450 */
3451static int e1kRegWritePBA(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3452{
3453 e1kRegWriteDefault(pDevIns, pThis, offset, index, value);
3454 PBA_st->txa = 64 - PBA_st->rxa;
3455
3456 return VINF_SUCCESS;
3457}
3458
3459/**
3460 * Write handler for Receive Descriptor Tail register.
3461 *
3462 * @remarks Write into RDT forces switch to HC and signal to
3463 * e1kR3NetworkDown_WaitReceiveAvail().
3464 *
3465 * @returns VBox status code.
3466 *
3467 * @param pThis The device state structure.
3468 * @param offset Register offset in memory-mapped frame.
3469 * @param index Register index in register array.
3470 * @param value The value to store.
3471 * @param mask Used to implement partial writes (8 and 16-bit).
3472 * @thread EMT
3473 */
3474static int e1kRegWriteRDT(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3475{
3476#ifndef IN_RING3
3477 /* XXX */
3478// return VINF_IOM_R3_MMIO_WRITE;
3479#endif
3480 int rc = e1kCsRxEnter(pThis, VINF_IOM_R3_MMIO_WRITE);
3481 if (RT_LIKELY(rc == VINF_SUCCESS))
3482 {
3483 E1kLog(("%s e1kRegWriteRDT\n", pThis->szPrf));
3484#ifndef E1K_WITH_RXD_CACHE
3485 /*
3486 * Some drivers advance RDT too far, so that it equals RDH. This
3487 * somehow manages to work with real hardware but not with this
3488 * emulated device. We can work with these drivers if we just
3489 * write 1 less when we see a driver writing RDT equal to RDH,
3490 * see @bugref{7346}.
3491 */
3492 if (value == RDH)
3493 {
3494 if (RDH == 0)
3495 value = (RDLEN / sizeof(E1KRXDESC)) - 1;
3496 else
3497 value = RDH - 1;
3498 }
3499#endif /* !E1K_WITH_RXD_CACHE */
3500 rc = e1kRegWriteDefault(pDevIns, pThis, offset, index, value);
3501#ifdef E1K_WITH_RXD_CACHE
3502 E1KRXDC rxdc;
3503 if (RT_UNLIKELY(!e1kUpdateRxDContext(pDevIns, pThis, &rxdc, "e1kRegWriteRDT")))
3504 {
3505 e1kCsRxLeave(pThis);
3506 E1kLog(("%s e1kRegWriteRDT: failed to update Rx context, returning VINF_SUCCESS\n", pThis->szPrf));
3507 return VINF_SUCCESS;
3508 }
3509 /*
3510 * We need to fetch descriptors now as RDT may go whole circle
3511 * before we attempt to store a received packet. For example,
3512 * Intel's DOS drivers use 2 (!) RX descriptors with the total ring
3513 * size being only 8 descriptors! Note that we fetch descriptors
3514 * only when the cache is empty to reduce the number of memory reads
3515 * in case of frequent RDT writes. Don't fetch anything when the
3516 * receiver is disabled either as RDH, RDT, RDLEN can be in some
3517 * messed up state.
3518 * Note that despite the cache may seem empty, meaning that there are
3519 * no more available descriptors in it, it may still be used by RX
3520 * thread which has not yet written the last descriptor back but has
3521 * temporarily released the RX lock in order to write the packet body
3522 * to descriptor's buffer. At this point we still going to do prefetch
3523 * but it won't actually fetch anything if there are no unused slots in
3524 * our "empty" cache (nRxDFetched==E1K_RXD_CACHE_SIZE). We must not
3525 * reset the cache here even if it appears empty. It will be reset at
3526 * a later point in e1kRxDGet().
3527 */
3528 if (e1kRxDIsCacheEmpty(pThis) && (RCTL & RCTL_EN))
3529 e1kRxDPrefetch(pDevIns, pThis, &rxdc);
3530#endif /* E1K_WITH_RXD_CACHE */
3531 e1kCsRxLeave(pThis);
3532 if (RT_SUCCESS(rc))
3533 {
3534 /* Signal that we have more receive descriptors available. */
3535 e1kWakeupReceive(pDevIns, pThis);
3536 }
3537 }
3538 return rc;
3539}
3540
3541/**
3542 * Write handler for Receive Delay Timer register.
3543 *
3544 * @param pThis The device state structure.
3545 * @param offset Register offset in memory-mapped frame.
3546 * @param index Register index in register array.
3547 * @param value The value to store.
3548 * @param mask Used to implement partial writes (8 and 16-bit).
3549 * @thread EMT
3550 */
3551static int e1kRegWriteRDTR(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3552{
3553 e1kRegWriteDefault(pDevIns, pThis, offset, index, value);
3554 if (value & RDTR_FPD)
3555 {
3556 /* Flush requested, cancel both timers and raise interrupt */
3557#ifdef E1K_USE_RX_TIMERS
3558 e1kCancelTimer(pDevIns, pThis, pThis->hRIDTimer);
3559 e1kCancelTimer(pDevIns, pThis, pThis->hRADTimer);
3560#endif
3561 E1K_INC_ISTAT_CNT(pThis->uStatIntRDTR);
3562 return e1kRaiseInterrupt(pDevIns, pThis, VINF_IOM_R3_MMIO_WRITE, ICR_RXT0);
3563 }
3564
3565 return VINF_SUCCESS;
3566}
3567
3568DECLINLINE(uint32_t) e1kGetTxLen(PE1KTXDC pTxdc)
3569{
3570 /**
3571 * Make sure TDT won't change during computation. EMT may modify TDT at
3572 * any moment.
3573 */
3574 uint32_t tdt = pTxdc->tdt;
3575 return (pTxdc->tdh > tdt ? pTxdc->tdlen/sizeof(E1KTXDESC) : 0) + tdt - pTxdc->tdh;
3576}
3577
3578#ifdef IN_RING3
3579
3580# ifdef E1K_TX_DELAY
3581/**
3582 * @callback_method_impl{FNTMTIMERDEV, Transmit Delay Timer handler.}
3583 */
3584static DECLCALLBACK(void) e1kR3TxDelayTimer(PPDMDEVINS pDevIns, TMTIMERHANDLE hTimer, void *pvUser)
3585{
3586 PE1KSTATE pThis = (PE1KSTATE)pvUser;
3587 Assert(PDMDevHlpCritSectIsOwner(pDevIns, &pThis->csTx));
3588 RT_NOREF(hTimer);
3589
3590 E1K_INC_ISTAT_CNT(pThis->uStatTxDelayExp);
3591# ifdef E1K_INT_STATS
3592 uint64_t u64Elapsed = RTTimeNanoTS() - pThis->u64ArmedAt;
3593 if (u64Elapsed > pThis->uStatMaxTxDelay)
3594 pThis->uStatMaxTxDelay = u64Elapsed;
3595# endif
3596 int rc = e1kXmitPending(pDevIns, pThis, false /*fOnWorkerThread*/);
3597 AssertMsg(RT_SUCCESS(rc) || rc == VERR_TRY_AGAIN, ("%Rrc\n", rc));
3598}
3599# endif /* E1K_TX_DELAY */
3600
3601//# ifdef E1K_USE_TX_TIMERS
3602
3603/**
3604 * @callback_method_impl{FNTMTIMERDEV, Transmit Interrupt Delay Timer handler.}
3605 */
3606static DECLCALLBACK(void) e1kR3TxIntDelayTimer(PPDMDEVINS pDevIns, TMTIMERHANDLE hTimer, void *pvUser)
3607{
3608 PE1KSTATE pThis = (PE1KSTATE)pvUser;
3609 Assert(hTimer == pThis->hTIDTimer); RT_NOREF(hTimer);
3610
3611 E1K_INC_ISTAT_CNT(pThis->uStatTID);
3612 /* Cancel absolute delay timer as we have already got attention */
3613# ifndef E1K_NO_TAD
3614 e1kCancelTimer(pDevIns, pThis, pThis->hTADTimer);
3615# endif
3616 e1kRaiseInterrupt(pDevIns, pThis, VERR_IGNORED, ICR_TXDW);
3617}
3618
3619/**
3620 * @callback_method_impl{FNTMTIMERDEV, Transmit Absolute Delay Timer handler.}
3621 */
3622static DECLCALLBACK(void) e1kR3TxAbsDelayTimer(PPDMDEVINS pDevIns, TMTIMERHANDLE hTimer, void *pvUser)
3623{
3624 PE1KSTATE pThis = (PE1KSTATE)pvUser;
3625 Assert(hTimer == pThis->hTADTimer); RT_NOREF(hTimer);
3626
3627 E1K_INC_ISTAT_CNT(pThis->uStatTAD);
3628 /* Cancel interrupt delay timer as we have already got attention */
3629 e1kCancelTimer(pDevIns, pThis, pThis->hTIDTimer);
3630 e1kRaiseInterrupt(pDevIns, pThis, VERR_IGNORED, ICR_TXDW);
3631}
3632
3633//# endif /* E1K_USE_TX_TIMERS */
3634# ifdef E1K_USE_RX_TIMERS
3635
3636/**
3637 * @callback_method_impl{FNTMTIMERDEV, Receive Interrupt Delay Timer handler.}
3638 */
3639static DECLCALLBACK(void) e1kR3RxIntDelayTimer(PPDMDEVINS pDevIns, TMTIMERHANDLE hTimer, void *pvUser)
3640{
3641 PE1KSTATE pThis = (PE1KSTATE)pvUser;
3642 Assert(hTimer == pThis->hRIDTimer); RT_NOREF(hTimer);
3643
3644 E1K_INC_ISTAT_CNT(pThis->uStatRID);
3645 /* Cancel absolute delay timer as we have already got attention */
3646 e1kCancelTimer(pDevIns, pThis, pThis->hRADTimer);
3647 e1kRaiseInterrupt(pDevIns, pThis, VERR_IGNORED, ICR_RXT0);
3648}
3649
3650/**
3651 * @callback_method_impl{FNTMTIMERDEV, Receive Absolute Delay Timer handler.}
3652 */
3653static DECLCALLBACK(void) e1kR3RxAbsDelayTimer(PPDMDEVINS pDevIns, TMTIMERHANDLE hTimer, void *pvUser)
3654{
3655 PE1KSTATE pThis = (PE1KSTATE)pvUser;
3656 Assert(hTimer == pThis->hRADTimer); RT_NOREF(hTimer);
3657
3658 E1K_INC_ISTAT_CNT(pThis->uStatRAD);
3659 /* Cancel interrupt delay timer as we have already got attention */
3660 e1kCancelTimer(pDevIns, pThis, pThis->hRIDTimer);
3661 e1kRaiseInterrupt(pDevIns, pThis, VERR_IGNORED, ICR_RXT0);
3662}
3663
3664# endif /* E1K_USE_RX_TIMERS */
3665
3666/**
3667 * @callback_method_impl{FNTMTIMERDEV, Late Interrupt Timer handler.}
3668 */
3669static DECLCALLBACK(void) e1kR3LateIntTimer(PPDMDEVINS pDevIns, TMTIMERHANDLE hTimer, void *pvUser)
3670{
3671 PE1KSTATE pThis = (PE1KSTATE)pvUser;
3672 Assert(hTimer == pThis->hIntTimer); RT_NOREF(hTimer);
3673 RT_NOREF(hTimer);
3674
3675 STAM_PROFILE_ADV_START(&pThis->StatLateIntTimer, a);
3676 STAM_COUNTER_INC(&pThis->StatLateInts);
3677 E1K_INC_ISTAT_CNT(pThis->uStatIntLate);
3678# if 0
3679 if (pThis->iStatIntLost > -100)
3680 pThis->iStatIntLost--;
3681# endif
3682 e1kRaiseInterrupt(pDevIns, pThis, VERR_SEM_BUSY, 0);
3683 STAM_PROFILE_ADV_STOP(&pThis->StatLateIntTimer, a);
3684}
3685
3686/**
3687 * @callback_method_impl{FNTMTIMERDEV, Link Up Timer handler.}
3688 */
3689static DECLCALLBACK(void) e1kR3LinkUpTimer(PPDMDEVINS pDevIns, TMTIMERHANDLE hTimer, void *pvUser)
3690{
3691 PE1KSTATE pThis = (PE1KSTATE)pvUser;
3692 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
3693 Assert(hTimer == pThis->hLUTimer); RT_NOREF(hTimer);
3694
3695 /*
3696 * This can happen if we set the link status to down when the Link up timer was
3697 * already armed (shortly after e1kR3LoadDone() or when the cable was disconnected
3698 * and connect+disconnect the cable very quick. Moreover, 82543GC triggers LSC
3699 * on reset even if the cable is unplugged (see @bugref{8942}).
3700 */
3701 if (e1kIsConnected(pDevIns))
3702 {
3703 /* 82543GC does not have an internal PHY */
3704 if (pThis->eChip == E1K_CHIP_82543GC || (CTRL & CTRL_SLU))
3705 e1kR3LinkUp(pDevIns, pThis, pThisCC);
3706 }
3707# ifdef E1K_LSC_ON_RESET
3708 else if (pThis->eChip == E1K_CHIP_82543GC)
3709 e1kR3LinkDown(pDevIns, pThis, pThisCC);
3710# endif /* E1K_LSC_ON_RESET */
3711}
3712
3713#endif /* IN_RING3 */
3714
3715/**
3716 * Sets up the GSO context according to the TSE new context descriptor.
3717 *
3718 * @param pGso The GSO context to setup.
3719 * @param pCtx The context descriptor.
3720 */
3721DECLINLINE(void) e1kSetupGsoCtx(PPDMNETWORKGSO pGso, E1KTXCTX const *pCtx)
3722{
3723 pGso->u8Type = PDMNETWORKGSOTYPE_INVALID;
3724
3725 /*
3726 * See if the context descriptor describes something that could be TCP or
3727 * UDP over IPv[46].
3728 */
3729 /* Check the header ordering and spacing: 1. Ethernet, 2. IP, 3. TCP/UDP. */
3730 if (RT_UNLIKELY( pCtx->ip.u8CSS < sizeof(RTNETETHERHDR) ))
3731 {
3732 E1kLog(("e1kSetupGsoCtx: IPCSS=%#x\n", pCtx->ip.u8CSS));
3733 return;
3734 }
3735 if (RT_UNLIKELY( pCtx->tu.u8CSS < (size_t)pCtx->ip.u8CSS + (pCtx->dw2.fIP ? RTNETIPV4_MIN_LEN : RTNETIPV6_MIN_LEN) ))
3736 {
3737 E1kLog(("e1kSetupGsoCtx: TUCSS=%#x\n", pCtx->tu.u8CSS));
3738 return;
3739 }
3740 if (RT_UNLIKELY( pCtx->dw2.fTCP
3741 ? pCtx->dw3.u8HDRLEN < (size_t)pCtx->tu.u8CSS + RTNETTCP_MIN_LEN
3742 : pCtx->dw3.u8HDRLEN != (size_t)pCtx->tu.u8CSS + RTNETUDP_MIN_LEN ))
3743 {
3744 E1kLog(("e1kSetupGsoCtx: HDRLEN=%#x TCP=%d\n", pCtx->dw3.u8HDRLEN, pCtx->dw2.fTCP));
3745 return;
3746 }
3747
3748 /* The end of the TCP/UDP checksum should stop at the end of the packet or at least after the headers. */
3749 if (RT_UNLIKELY( pCtx->tu.u16CSE > 0 && pCtx->tu.u16CSE <= pCtx->dw3.u8HDRLEN ))
3750 {
3751 E1kLog(("e1kSetupGsoCtx: TUCSE=%#x HDRLEN=%#x\n", pCtx->tu.u16CSE, pCtx->dw3.u8HDRLEN));
3752 return;
3753 }
3754
3755 /* IPv4 checksum offset. */
3756 if (RT_UNLIKELY( pCtx->dw2.fIP && (size_t)pCtx->ip.u8CSO - pCtx->ip.u8CSS != RT_UOFFSETOF(RTNETIPV4, ip_sum) ))
3757 {
3758 E1kLog(("e1kSetupGsoCtx: IPCSO=%#x IPCSS=%#x\n", pCtx->ip.u8CSO, pCtx->ip.u8CSS));
3759 return;
3760 }
3761
3762 /* TCP/UDP checksum offsets. */
3763 if (RT_UNLIKELY( (size_t)pCtx->tu.u8CSO - pCtx->tu.u8CSS
3764 != ( pCtx->dw2.fTCP
3765 ? RT_UOFFSETOF(RTNETTCP, th_sum)
3766 : RT_UOFFSETOF(RTNETUDP, uh_sum) ) ))
3767 {
3768 E1kLog(("e1kSetupGsoCtx: TUCSO=%#x TUCSS=%#x TCP=%d\n", pCtx->ip.u8CSO, pCtx->ip.u8CSS, pCtx->dw2.fTCP));
3769 return;
3770 }
3771
3772 /*
3773 * Because of internal networking using a 16-bit size field for GSO context
3774 * plus frame, we have to make sure we don't exceed this.
3775 */
3776 if (RT_UNLIKELY( pCtx->dw3.u8HDRLEN + pCtx->dw2.u20PAYLEN > VBOX_MAX_GSO_SIZE ))
3777 {
3778 E1kLog(("e1kSetupGsoCtx: HDRLEN(=%#x) + PAYLEN(=%#x) = %#x, max is %#x\n",
3779 pCtx->dw3.u8HDRLEN, pCtx->dw2.u20PAYLEN, pCtx->dw3.u8HDRLEN + pCtx->dw2.u20PAYLEN, VBOX_MAX_GSO_SIZE));
3780 return;
3781 }
3782
3783 /*
3784 * We're good for now - we'll do more checks when seeing the data.
3785 * So, figure the type of offloading and setup the context.
3786 */
3787 if (pCtx->dw2.fIP)
3788 {
3789 if (pCtx->dw2.fTCP)
3790 {
3791 pGso->u8Type = PDMNETWORKGSOTYPE_IPV4_TCP;
3792 pGso->cbHdrsSeg = pCtx->dw3.u8HDRLEN;
3793 }
3794 else
3795 {
3796 pGso->u8Type = PDMNETWORKGSOTYPE_IPV4_UDP;
3797 pGso->cbHdrsSeg = pCtx->tu.u8CSS; /* IP header only */
3798 }
3799 /** @todo Detect IPv4-IPv6 tunneling (need test setup since linux doesn't do
3800 * this yet it seems)... */
3801 }
3802 else
3803 {
3804 pGso->cbHdrsSeg = pCtx->dw3.u8HDRLEN; /** @todo IPv6 UFO */
3805 if (pCtx->dw2.fTCP)
3806 pGso->u8Type = PDMNETWORKGSOTYPE_IPV6_TCP;
3807 else
3808 pGso->u8Type = PDMNETWORKGSOTYPE_IPV6_UDP;
3809 }
3810 pGso->offHdr1 = pCtx->ip.u8CSS;
3811 pGso->offHdr2 = pCtx->tu.u8CSS;
3812 pGso->cbHdrsTotal = pCtx->dw3.u8HDRLEN;
3813 pGso->cbMaxSeg = pCtx->dw3.u16MSS + (pGso->u8Type == PDMNETWORKGSOTYPE_IPV4_UDP ? pGso->offHdr2 : 0);
3814 Assert(PDMNetGsoIsValid(pGso, sizeof(*pGso), pGso->cbMaxSeg * 5));
3815 E1kLog2(("e1kSetupGsoCtx: mss=%#x hdr=%#x hdrseg=%#x hdr1=%#x hdr2=%#x %s\n",
3816 pGso->cbMaxSeg, pGso->cbHdrsTotal, pGso->cbHdrsSeg, pGso->offHdr1, pGso->offHdr2, PDMNetGsoTypeName((PDMNETWORKGSOTYPE)pGso->u8Type) ));
3817}
3818
3819/**
3820 * Checks if we can use GSO processing for the current TSE frame.
3821 *
3822 * @param pThis The device state structure.
3823 * @param pGso The GSO context.
3824 * @param pData The first data descriptor of the frame.
3825 * @param pCtx The TSO context descriptor.
3826 */
3827DECLINLINE(bool) e1kCanDoGso(PE1KSTATE pThis, PCPDMNETWORKGSO pGso, E1KTXDAT const *pData, E1KTXCTX const *pCtx)
3828{
3829 if (!pData->cmd.fTSE)
3830 {
3831 E1kLog2(("e1kCanDoGso: !TSE\n"));
3832 return false;
3833 }
3834 if (pData->cmd.fVLE) /** @todo VLAN tagging. */
3835 {
3836 E1kLog(("e1kCanDoGso: VLE\n"));
3837 return false;
3838 }
3839 if (RT_UNLIKELY(!pThis->fGSOEnabled))
3840 {
3841 E1kLog3(("e1kCanDoGso: GSO disabled via CFGM\n"));
3842 return false;
3843 }
3844
3845 switch ((PDMNETWORKGSOTYPE)pGso->u8Type)
3846 {
3847 case PDMNETWORKGSOTYPE_IPV4_TCP:
3848 case PDMNETWORKGSOTYPE_IPV4_UDP:
3849 if (!pData->dw3.fIXSM)
3850 {
3851 E1kLog(("e1kCanDoGso: !IXSM (IPv4)\n"));
3852 return false;
3853 }
3854 if (!pData->dw3.fTXSM)
3855 {
3856 E1kLog(("e1kCanDoGso: !TXSM (IPv4)\n"));
3857 return false;
3858 }
3859 /** @todo what more check should we perform here? Ethernet frame type? */
3860 E1kLog2(("e1kCanDoGso: OK, IPv4\n"));
3861 return true;
3862
3863 case PDMNETWORKGSOTYPE_IPV6_TCP:
3864 case PDMNETWORKGSOTYPE_IPV6_UDP:
3865 if (pData->dw3.fIXSM && pCtx->ip.u8CSO)
3866 {
3867 E1kLog(("e1kCanDoGso: IXSM (IPv6)\n"));
3868 return false;
3869 }
3870 if (!pData->dw3.fTXSM)
3871 {
3872 E1kLog(("e1kCanDoGso: TXSM (IPv6)\n"));
3873 return false;
3874 }
3875 /** @todo what more check should we perform here? Ethernet frame type? */
3876 E1kLog2(("e1kCanDoGso: OK, IPv4\n"));
3877 return true;
3878
3879 default:
3880 Assert(pGso->u8Type == PDMNETWORKGSOTYPE_INVALID);
3881 E1kLog2(("e1kCanDoGso: e1kSetupGsoCtx failed\n"));
3882 return false;
3883 }
3884}
3885
3886/**
3887 * Frees the current xmit buffer.
3888 *
3889 * @param pThis The device state structure.
3890 */
3891static void e1kXmitFreeBuf(PE1KSTATE pThis, PE1KSTATECC pThisCC)
3892{
3893 PPDMSCATTERGATHER pSg = pThisCC->CTX_SUFF(pTxSg);
3894 if (pSg)
3895 {
3896 pThisCC->CTX_SUFF(pTxSg) = NULL;
3897
3898 if (pSg->pvAllocator != pThis)
3899 {
3900 PPDMINETWORKUP pDrv = pThisCC->CTX_SUFF(pDrv);
3901 if (pDrv)
3902 pDrv->pfnFreeBuf(pDrv, pSg);
3903 }
3904 else
3905 {
3906 /* loopback */
3907 AssertCompileMemberSize(E1KSTATE, uTxFallback.Sg, 8 * sizeof(size_t));
3908 Assert(pSg->fFlags == (PDMSCATTERGATHER_FLAGS_MAGIC | PDMSCATTERGATHER_FLAGS_OWNER_3));
3909 pSg->fFlags = 0;
3910 pSg->pvAllocator = NULL;
3911 }
3912 }
3913}
3914
3915#ifndef E1K_WITH_TXD_CACHE
3916/**
3917 * Allocates an xmit buffer.
3918 *
3919 * @returns See PDMINETWORKUP::pfnAllocBuf.
3920 * @param pThis The device state structure.
3921 * @param cbMin The minimum frame size.
3922 * @param fExactSize Whether cbMin is exact or if we have to max it
3923 * out to the max MTU size.
3924 * @param fGso Whether this is a GSO frame or not.
3925 */
3926DECLINLINE(int) e1kXmitAllocBuf(PE1KSTATE pThis, PE1KSTATECC pThisCC, size_t cbMin, bool fExactSize, bool fGso)
3927{
3928 /* Adjust cbMin if necessary. */
3929 if (!fExactSize)
3930 cbMin = RT_MAX(cbMin, E1K_MAX_TX_PKT_SIZE);
3931
3932 /* Deal with existing buffer (descriptor screw up, reset, etc). */
3933 if (RT_UNLIKELY(pThisCC->CTX_SUFF(pTxSg)))
3934 e1kXmitFreeBuf(pThis, pThisCC);
3935 Assert(pThisCC->CTX_SUFF(pTxSg) == NULL);
3936
3937 /*
3938 * Allocate the buffer.
3939 */
3940 PPDMSCATTERGATHER pSg;
3941 if (RT_LIKELY(GET_BITS(RCTL, LBM) != RCTL_LBM_TCVR))
3942 {
3943 PPDMINETWORKUP pDrv = pThisCC->CTX_SUFF(pDrv);
3944 if (RT_UNLIKELY(!pDrv))
3945 return VERR_NET_DOWN;
3946 int rc = pDrv->pfnAllocBuf(pDrv, cbMin, fGso ? &pThis->GsoCtx : NULL, &pSg);
3947 if (RT_FAILURE(rc))
3948 {
3949 /* Suspend TX as we are out of buffers atm */
3950 STATUS |= STATUS_TXOFF;
3951 return rc;
3952 }
3953 }
3954 else
3955 {
3956 /* Create a loopback using the fallback buffer and preallocated SG. */
3957 AssertCompileMemberSize(E1KSTATE, uTxFallback.Sg, 8 * sizeof(size_t));
3958 pSg = &pThis->uTxFallback.Sg;
3959 pSg->fFlags = PDMSCATTERGATHER_FLAGS_MAGIC | PDMSCATTERGATHER_FLAGS_OWNER_3;
3960 pSg->cbUsed = 0;
3961 pSg->cbAvailable = 0;
3962 pSg->pvAllocator = pThis;
3963 pSg->pvUser = NULL; /* No GSO here. */
3964 pSg->cSegs = 1;
3965 pSg->aSegs[0].pvSeg = pThis->aTxPacketFallback;
3966 pSg->aSegs[0].cbSeg = sizeof(pThis->aTxPacketFallback);
3967 }
3968
3969 pThisCC->CTX_SUFF(pTxSg) = pSg;
3970 return VINF_SUCCESS;
3971}
3972#else /* E1K_WITH_TXD_CACHE */
3973/**
3974 * Allocates an xmit buffer.
3975 *
3976 * @returns See PDMINETWORKUP::pfnAllocBuf.
3977 * @param pThis The device state structure.
3978 * @param cbMin The minimum frame size.
3979 * @param fExactSize Whether cbMin is exact or if we have to max it
3980 * out to the max MTU size.
3981 * @param fGso Whether this is a GSO frame or not.
3982 */
3983DECLINLINE(int) e1kXmitAllocBuf(PE1KSTATE pThis, PE1KSTATECC pThisCC, bool fGso)
3984{
3985 /* Deal with existing buffer (descriptor screw up, reset, etc). */
3986 if (RT_UNLIKELY(pThisCC->CTX_SUFF(pTxSg)))
3987 e1kXmitFreeBuf(pThis, pThisCC);
3988 Assert(pThisCC->CTX_SUFF(pTxSg) == NULL);
3989
3990 /*
3991 * Allocate the buffer.
3992 */
3993 PPDMSCATTERGATHER pSg;
3994 if (RT_LIKELY(GET_BITS(RCTL, LBM) != RCTL_LBM_TCVR))
3995 {
3996 if (pThis->cbTxAlloc == 0)
3997 {
3998 /* Zero packet, no need for the buffer */
3999 return VINF_SUCCESS;
4000 }
4001 if (fGso && pThis->GsoCtx.u8Type == PDMNETWORKGSOTYPE_INVALID)
4002 {
4003 E1kLog3(("Invalid GSO context, won't allocate this packet, cb=%u %s%s\n",
4004 pThis->cbTxAlloc, pThis->fVTag ? "VLAN " : "", pThis->fGSO ? "GSO " : ""));
4005 /* No valid GSO context is available, ignore this packet. */
4006 pThis->cbTxAlloc = 0;
4007 return VINF_SUCCESS;
4008 }
4009
4010 PPDMINETWORKUP pDrv = pThisCC->CTX_SUFF(pDrv);
4011 if (RT_UNLIKELY(!pDrv))
4012 return VERR_NET_DOWN;
4013 int rc = pDrv->pfnAllocBuf(pDrv, pThis->cbTxAlloc, fGso ? &pThis->GsoCtx : NULL, &pSg);
4014 if (RT_FAILURE(rc))
4015 {
4016 /* Suspend TX as we are out of buffers atm */
4017 STATUS |= STATUS_TXOFF;
4018 return rc;
4019 }
4020 E1kLog3(("%s Allocated buffer for TX packet: cb=%u %s%s\n",
4021 pThis->szPrf, pThis->cbTxAlloc,
4022 pThis->fVTag ? "VLAN " : "",
4023 pThis->fGSO ? "GSO " : ""));
4024 }
4025 else
4026 {
4027 /* Create a loopback using the fallback buffer and preallocated SG. */
4028 AssertCompileMemberSize(E1KSTATE, uTxFallback.Sg, 8 * sizeof(size_t));
4029 pSg = &pThis->uTxFallback.Sg;
4030 pSg->fFlags = PDMSCATTERGATHER_FLAGS_MAGIC | PDMSCATTERGATHER_FLAGS_OWNER_3;
4031 pSg->cbUsed = 0;
4032 pSg->cbAvailable = sizeof(pThis->aTxPacketFallback);
4033 pSg->pvAllocator = pThis;
4034 pSg->pvUser = NULL; /* No GSO here. */
4035 pSg->cSegs = 1;
4036 pSg->aSegs[0].pvSeg = pThis->aTxPacketFallback;
4037 pSg->aSegs[0].cbSeg = sizeof(pThis->aTxPacketFallback);
4038 }
4039 pThis->cbTxAlloc = 0;
4040
4041 pThisCC->CTX_SUFF(pTxSg) = pSg;
4042 return VINF_SUCCESS;
4043}
4044#endif /* E1K_WITH_TXD_CACHE */
4045
4046/**
4047 * Checks if it's a GSO buffer or not.
4048 *
4049 * @returns true / false.
4050 * @param pTxSg The scatter / gather buffer.
4051 */
4052DECLINLINE(bool) e1kXmitIsGsoBuf(PDMSCATTERGATHER const *pTxSg)
4053{
4054#if 0
4055 if (!pTxSg)
4056 E1kLog(("e1kXmitIsGsoBuf: pTxSG is NULL\n"));
4057 if (pTxSg && pTxSg->pvUser)
4058 E1kLog(("e1kXmitIsGsoBuf: pvUser is NULL\n"));
4059#endif
4060 return pTxSg && pTxSg->pvUser /* GSO indicator */;
4061}
4062
4063#ifndef E1K_WITH_TXD_CACHE
4064/**
4065 * Load transmit descriptor from guest memory.
4066 *
4067 * @param pDevIns The device instance.
4068 * @param pDesc Pointer to descriptor union.
4069 * @param addr Physical address in guest context.
4070 * @thread E1000_TX
4071 */
4072DECLINLINE(void) e1kLoadDesc(PPDMDEVINS pDevIns, E1KTXDESC *pDesc, RTGCPHYS addr)
4073{
4074 PDMDevHlpPCIPhysRead(pDevIns, addr, pDesc, sizeof(E1KTXDESC));
4075}
4076#else /* E1K_WITH_TXD_CACHE */
4077/**
4078 * Load transmit descriptors from guest memory.
4079 *
4080 * We need two physical reads in case the tail wrapped around the end of TX
4081 * descriptor ring.
4082 *
4083 * @returns the actual number of descriptors fetched.
4084 * @param pDevIns The device instance.
4085 * @param pThis The device state structure.
4086 * @thread E1000_TX
4087 */
4088DECLINLINE(unsigned) e1kTxDLoadMore(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KTXDC pTxdc)
4089{
4090 Assert(pThis->iTxDCurrent == 0);
4091 /* We've already loaded pThis->nTxDFetched descriptors past TDH. */
4092 unsigned nDescsAvailable = e1kGetTxLen(pTxdc) - pThis->nTxDFetched;
4093 /* The following two lines ensure that pThis->nTxDFetched never overflows. */
4094 AssertCompile(E1K_TXD_CACHE_SIZE < (256 * sizeof(pThis->nTxDFetched)));
4095 unsigned nDescsToFetch = RT_MIN(nDescsAvailable, E1K_TXD_CACHE_SIZE - pThis->nTxDFetched);
4096 unsigned nDescsTotal = pTxdc->tdlen / sizeof(E1KTXDESC);
4097 Assert(nDescsTotal != 0);
4098 if (nDescsTotal == 0)
4099 return 0;
4100 unsigned nFirstNotLoaded = (pTxdc->tdh + pThis->nTxDFetched) % nDescsTotal;
4101 unsigned nDescsInSingleRead = RT_MIN(nDescsToFetch, nDescsTotal - nFirstNotLoaded);
4102 E1kLog3(("%s e1kTxDLoadMore: nDescsAvailable=%u nDescsToFetch=%u nDescsTotal=%u nFirstNotLoaded=0x%x nDescsInSingleRead=%u\n",
4103 pThis->szPrf, nDescsAvailable, nDescsToFetch, nDescsTotal,
4104 nFirstNotLoaded, nDescsInSingleRead));
4105 if (nDescsToFetch == 0)
4106 return 0;
4107 E1KTXDESC* pFirstEmptyDesc = &pThis->aTxDescriptors[pThis->nTxDFetched];
4108 PDMDevHlpPCIPhysRead(pDevIns,
4109 ((uint64_t)TDBAH << 32) + TDBAL + nFirstNotLoaded * sizeof(E1KTXDESC),
4110 pFirstEmptyDesc, nDescsInSingleRead * sizeof(E1KTXDESC));
4111 E1kLog3(("%s Fetched %u TX descriptors at %08x%08x(0x%x), TDLEN=%08x, TDH=%08x, TDT=%08x\n",
4112 pThis->szPrf, nDescsInSingleRead,
4113 TDBAH, TDBAL + pTxdc->tdh * sizeof(E1KTXDESC),
4114 nFirstNotLoaded, pTxdc->tdlen, pTxdc->tdh, pTxdc->tdt));
4115 if (nDescsToFetch > nDescsInSingleRead)
4116 {
4117 PDMDevHlpPCIPhysRead(pDevIns,
4118 ((uint64_t)TDBAH << 32) + TDBAL,
4119 pFirstEmptyDesc + nDescsInSingleRead,
4120 (nDescsToFetch - nDescsInSingleRead) * sizeof(E1KTXDESC));
4121 E1kLog3(("%s Fetched %u TX descriptors at %08x%08x\n",
4122 pThis->szPrf, nDescsToFetch - nDescsInSingleRead,
4123 TDBAH, TDBAL));
4124 }
4125 pThis->nTxDFetched += (uint8_t)nDescsToFetch;
4126 return nDescsToFetch;
4127}
4128
4129/**
4130 * Load transmit descriptors from guest memory only if there are no loaded
4131 * descriptors.
4132 *
4133 * @returns true if there are descriptors in cache.
4134 * @param pDevIns The device instance.
4135 * @param pThis The device state structure.
4136 * @thread E1000_TX
4137 */
4138DECLINLINE(bool) e1kTxDLazyLoad(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KTXDC pTxdc)
4139{
4140 if (pThis->nTxDFetched == 0)
4141 return e1kTxDLoadMore(pDevIns, pThis, pTxdc) != 0;
4142 return true;
4143}
4144#endif /* E1K_WITH_TXD_CACHE */
4145
4146/**
4147 * Write back transmit descriptor to guest memory.
4148 *
4149 * @param pDevIns The device instance.
4150 * @param pThis The device state structure.
4151 * @param pDesc Pointer to descriptor union.
4152 * @param addr Physical address in guest context.
4153 * @thread E1000_TX
4154 */
4155DECLINLINE(void) e1kWriteBackDesc(PPDMDEVINS pDevIns, PE1KSTATE pThis, E1KTXDESC *pDesc, RTGCPHYS addr)
4156{
4157 /* Only the last half of the descriptor has to be written back. */
4158 e1kPrintTDesc(pThis, pDesc, "^^^");
4159 PDMDevHlpPCIPhysWrite(pDevIns, addr, pDesc, sizeof(E1KTXDESC));
4160}
4161
4162/**
4163 * Transmit complete frame.
4164 *
4165 * @remarks We skip the FCS since we're not responsible for sending anything to
4166 * a real ethernet wire.
4167 *
4168 * @param pDevIns The device instance.
4169 * @param pThis The device state structure.
4170 * @param pThisCC The current context instance data.
4171 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4172 * @thread E1000_TX
4173 */
4174static void e1kTransmitFrame(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KSTATECC pThisCC, bool fOnWorkerThread)
4175{
4176 PPDMSCATTERGATHER pSg = pThisCC->CTX_SUFF(pTxSg);
4177 uint32_t cbFrame = pSg ? (uint32_t)pSg->cbUsed : 0;
4178 Assert(!pSg || pSg->cSegs == 1);
4179
4180 if (cbFrame > 70) /* unqualified guess */
4181 pThis->led.Asserted.s.fWriting = pThis->led.Actual.s.fWriting = 1;
4182
4183#ifdef E1K_INT_STATS
4184 if (cbFrame <= 1514)
4185 E1K_INC_ISTAT_CNT(pThis->uStatTx1514);
4186 else if (cbFrame <= 2962)
4187 E1K_INC_ISTAT_CNT(pThis->uStatTx2962);
4188 else if (cbFrame <= 4410)
4189 E1K_INC_ISTAT_CNT(pThis->uStatTx4410);
4190 else if (cbFrame <= 5858)
4191 E1K_INC_ISTAT_CNT(pThis->uStatTx5858);
4192 else if (cbFrame <= 7306)
4193 E1K_INC_ISTAT_CNT(pThis->uStatTx7306);
4194 else if (cbFrame <= 8754)
4195 E1K_INC_ISTAT_CNT(pThis->uStatTx8754);
4196 else if (cbFrame <= 16384)
4197 E1K_INC_ISTAT_CNT(pThis->uStatTx16384);
4198 else if (cbFrame <= 32768)
4199 E1K_INC_ISTAT_CNT(pThis->uStatTx32768);
4200 else
4201 E1K_INC_ISTAT_CNT(pThis->uStatTxLarge);
4202#endif /* E1K_INT_STATS */
4203
4204 /* Add VLAN tag */
4205 if (cbFrame > 12 && pThis->fVTag)
4206 {
4207 E1kLog3(("%s Inserting VLAN tag %08x\n",
4208 pThis->szPrf, RT_BE2H_U16((uint16_t)VET) | (RT_BE2H_U16(pThis->u16VTagTCI) << 16)));
4209 memmove((uint8_t*)pSg->aSegs[0].pvSeg + 16, (uint8_t*)pSg->aSegs[0].pvSeg + 12, cbFrame - 12);
4210 *((uint32_t*)pSg->aSegs[0].pvSeg + 3) = RT_BE2H_U16((uint16_t)VET) | (RT_BE2H_U16(pThis->u16VTagTCI) << 16);
4211 pSg->cbUsed += 4;
4212 cbFrame += 4;
4213 Assert(pSg->cbUsed == cbFrame);
4214 Assert(pSg->cbUsed <= pSg->cbAvailable);
4215 }
4216/* E1kLog2(("%s < < < Outgoing packet. Dump follows: > > >\n"
4217 "%.*Rhxd\n"
4218 "%s < < < < < < < < < < < < < End of dump > > > > > > > > > > > >\n",
4219 pThis->szPrf, cbFrame, pSg->aSegs[0].pvSeg, pThis->szPrf));*/
4220
4221 /* Update the stats */
4222 E1K_INC_CNT32(TPT);
4223 E1K_ADD_CNT64(TOTL, TOTH, cbFrame);
4224 E1K_INC_CNT32(GPTC);
4225 if (pSg && e1kIsBroadcast(pSg->aSegs[0].pvSeg))
4226 E1K_INC_CNT32(BPTC);
4227 else if (pSg && e1kIsMulticast(pSg->aSegs[0].pvSeg))
4228 E1K_INC_CNT32(MPTC);
4229 /* Update octet transmit counter */
4230 E1K_ADD_CNT64(GOTCL, GOTCH, cbFrame);
4231 if (pThisCC->CTX_SUFF(pDrv))
4232 STAM_REL_COUNTER_ADD(&pThis->StatTransmitBytes, cbFrame);
4233 if (cbFrame == 64)
4234 E1K_INC_CNT32(PTC64);
4235 else if (cbFrame < 128)
4236 E1K_INC_CNT32(PTC127);
4237 else if (cbFrame < 256)
4238 E1K_INC_CNT32(PTC255);
4239 else if (cbFrame < 512)
4240 E1K_INC_CNT32(PTC511);
4241 else if (cbFrame < 1024)
4242 E1K_INC_CNT32(PTC1023);
4243 else
4244 E1K_INC_CNT32(PTC1522);
4245
4246 E1K_INC_ISTAT_CNT(pThis->uStatTxFrm);
4247
4248 /*
4249 * Dump and send the packet.
4250 */
4251 int rc = VERR_NET_DOWN;
4252 if (pSg && pSg->pvAllocator != pThis)
4253 {
4254 e1kPacketDump(pDevIns, pThis, (uint8_t const *)pSg->aSegs[0].pvSeg, cbFrame, "--> Outgoing");
4255
4256 pThisCC->CTX_SUFF(pTxSg) = NULL;
4257 PPDMINETWORKUP pDrv = pThisCC->CTX_SUFF(pDrv);
4258 if (pDrv)
4259 {
4260 /* Release critical section to avoid deadlock in CanReceive */
4261 //e1kCsLeave(pThis);
4262 STAM_PROFILE_START(&pThis->CTX_SUFF_Z(StatTransmitSend), a);
4263 rc = pDrv->pfnSendBuf(pDrv, pSg, fOnWorkerThread);
4264 STAM_PROFILE_STOP(&pThis->CTX_SUFF_Z(StatTransmitSend), a);
4265 //e1kR3CsEnterAsserted(pThis);
4266 }
4267 }
4268 else if (pSg)
4269 {
4270 Assert(pSg->aSegs[0].pvSeg == pThis->aTxPacketFallback);
4271 e1kPacketDump(pDevIns, pThis, (uint8_t const *)pSg->aSegs[0].pvSeg, cbFrame, "--> Loopback");
4272
4273 /** @todo do we actually need to check that we're in loopback mode here? */
4274 if (GET_BITS(RCTL, LBM) == RCTL_LBM_TCVR)
4275 {
4276 E1KRXDST status;
4277 RT_ZERO(status);
4278 status.fPIF = true;
4279 e1kHandleRxPacket(pDevIns, pThis, pSg->aSegs[0].pvSeg, cbFrame, status);
4280 rc = VINF_SUCCESS;
4281 }
4282 e1kXmitFreeBuf(pThis, pThisCC);
4283 }
4284 else
4285 rc = VERR_NET_DOWN;
4286 if (RT_FAILURE(rc))
4287 {
4288 E1kLogRel(("E1000: ERROR! pfnSend returned %Rrc\n", rc));
4289 /** @todo handle VERR_NET_DOWN and VERR_NET_NO_BUFFER_SPACE. Signal error ? */
4290 }
4291
4292 pThis->led.Actual.s.fWriting = 0;
4293}
4294
4295/**
4296 * Compute and write internet checksum (e1kCSum16) at the specified offset.
4297 *
4298 * @param pThis The device state structure.
4299 * @param pPkt Pointer to the packet.
4300 * @param u16PktLen Total length of the packet.
4301 * @param cso Offset in packet to write checksum at.
4302 * @param css Offset in packet to start computing
4303 * checksum from.
4304 * @param cse Offset in packet to stop computing
4305 * checksum at.
4306 * @param fUdp Replace 0 checksum with all 1s.
4307 * @thread E1000_TX
4308 */
4309static void e1kInsertChecksum(PE1KSTATE pThis, uint8_t *pPkt, uint16_t u16PktLen, uint8_t cso, uint8_t css, uint16_t cse, bool fUdp = false)
4310{
4311 RT_NOREF1(pThis);
4312
4313 if (css >= u16PktLen)
4314 {
4315 E1kLog2(("%s css(%X) is greater than packet length-1(%X), checksum is not inserted\n",
4316 pThis->szPrf, cso, u16PktLen));
4317 return;
4318 }
4319
4320 if (cso >= u16PktLen - 1)
4321 {
4322 E1kLog2(("%s cso(%X) is greater than packet length-2(%X), checksum is not inserted\n",
4323 pThis->szPrf, cso, u16PktLen));
4324 return;
4325 }
4326
4327 if (cse == 0 || cse >= u16PktLen)
4328 cse = u16PktLen - 1;
4329 else if (cse < css)
4330 {
4331 E1kLog2(("%s css(%X) is greater than cse(%X), checksum is not inserted\n",
4332 pThis->szPrf, css, cse));
4333 return;
4334 }
4335
4336 uint16_t u16ChkSum = e1kCSum16(pPkt + css, cse - css + 1);
4337 if (fUdp && u16ChkSum == 0)
4338 u16ChkSum = ~u16ChkSum; /* 0 means no checksum computed in case of UDP (see @bugref{9883}) */
4339 E1kLog2(("%s Inserting csum: %04X at %02X, old value: %04X\n", pThis->szPrf,
4340 u16ChkSum, cso, *(uint16_t*)(pPkt + cso)));
4341 *(uint16_t*)(pPkt + cso) = u16ChkSum;
4342}
4343
4344/**
4345 * Add a part of descriptor's buffer to transmit frame.
4346 *
4347 * @remarks data.u64BufAddr is used unconditionally for both data
4348 * and legacy descriptors since it is identical to
4349 * legacy.u64BufAddr.
4350 *
4351 * @param pDevIns The device instance.
4352 * @param pThis The device state structure.
4353 * @param pDesc Pointer to the descriptor to transmit.
4354 * @param u16Len Length of buffer to the end of segment.
4355 * @param fSend Force packet sending.
4356 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4357 * @thread E1000_TX
4358 */
4359#ifndef E1K_WITH_TXD_CACHE
4360static void e1kFallbackAddSegment(PPDMDEVINS pDevIns, PE1KSTATE pThis, RTGCPHYS PhysAddr, uint16_t u16Len, bool fSend, bool fOnWorkerThread)
4361{
4362 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
4363 /* TCP header being transmitted */
4364 struct E1kTcpHeader *pTcpHdr = (struct E1kTcpHeader *)(pThis->aTxPacketFallback + pThis->contextTSE.tu.u8CSS);
4365 /* IP header being transmitted */
4366 struct E1kIpHeader *pIpHdr = (struct E1kIpHeader *)(pThis->aTxPacketFallback + pThis->contextTSE.ip.u8CSS);
4367
4368 E1kLog3(("%s e1kFallbackAddSegment: Length=%x, remaining payload=%x, header=%x, send=%RTbool\n",
4369 pThis->szPrf, u16Len, pThis->u32PayRemain, pThis->u16HdrRemain, fSend));
4370 Assert(pThis->u32PayRemain + pThis->u16HdrRemain > 0);
4371
4372 PDMDevHlpPCIPhysRead(pDevIns, PhysAddr, pThis->aTxPacketFallback + pThis->u16TxPktLen, u16Len);
4373 E1kLog3(("%s Dump of the segment:\n"
4374 "%.*Rhxd\n"
4375 "%s --- End of dump ---\n",
4376 pThis->szPrf, u16Len, pThis->aTxPacketFallback + pThis->u16TxPktLen, pThis->szPrf));
4377 pThis->u16TxPktLen += u16Len;
4378 E1kLog3(("%s e1kFallbackAddSegment: pThis->u16TxPktLen=%x\n",
4379 pThis->szPrf, pThis->u16TxPktLen));
4380 if (pThis->u16HdrRemain > 0)
4381 {
4382 /* The header was not complete, check if it is now */
4383 if (u16Len >= pThis->u16HdrRemain)
4384 {
4385 /* The rest is payload */
4386 u16Len -= pThis->u16HdrRemain;
4387 pThis->u16HdrRemain = 0;
4388 /* Save partial checksum and flags */
4389 pThis->u32SavedCsum = pTcpHdr->chksum;
4390 pThis->u16SavedFlags = pTcpHdr->hdrlen_flags;
4391 /* Clear FIN and PSH flags now and set them only in the last segment */
4392 pTcpHdr->hdrlen_flags &= ~htons(E1K_TCP_FIN | E1K_TCP_PSH);
4393 }
4394 else
4395 {
4396 /* Still not */
4397 pThis->u16HdrRemain -= u16Len;
4398 E1kLog3(("%s e1kFallbackAddSegment: Header is still incomplete, 0x%x bytes remain.\n",
4399 pThis->szPrf, pThis->u16HdrRemain));
4400 return;
4401 }
4402 }
4403
4404 pThis->u32PayRemain -= u16Len;
4405
4406 if (fSend)
4407 {
4408 /* Leave ethernet header intact */
4409 /* IP Total Length = payload + headers - ethernet header */
4410 pIpHdr->total_len = htons(pThis->u16TxPktLen - pThis->contextTSE.ip.u8CSS);
4411 E1kLog3(("%s e1kFallbackAddSegment: End of packet, pIpHdr->total_len=%x\n",
4412 pThis->szPrf, ntohs(pIpHdr->total_len)));
4413 /* Update IP Checksum */
4414 pIpHdr->chksum = 0;
4415 e1kInsertChecksum(pThis, pThis->aTxPacketFallback, pThis->u16TxPktLen,
4416 pThis->contextTSE.ip.u8CSO,
4417 pThis->contextTSE.ip.u8CSS,
4418 pThis->contextTSE.ip.u16CSE);
4419
4420 /* Update TCP flags */
4421 /* Restore original FIN and PSH flags for the last segment */
4422 if (pThis->u32PayRemain == 0)
4423 {
4424 pTcpHdr->hdrlen_flags = pThis->u16SavedFlags;
4425 E1K_INC_CNT32(TSCTC);
4426 }
4427 /* Add TCP length to partial pseudo header sum */
4428 uint32_t csum = pThis->u32SavedCsum
4429 + htons(pThis->u16TxPktLen - pThis->contextTSE.tu.u8CSS);
4430 while (csum >> 16)
4431 csum = (csum >> 16) + (csum & 0xFFFF);
4432 pTcpHdr->chksum = csum;
4433 /* Compute final checksum */
4434 e1kInsertChecksum(pThis, pThis->aTxPacketFallback, pThis->u16TxPktLen,
4435 pThis->contextTSE.tu.u8CSO,
4436 pThis->contextTSE.tu.u8CSS,
4437 pThis->contextTSE.tu.u16CSE);
4438
4439 /*
4440 * Transmit it. If we've use the SG already, allocate a new one before
4441 * we copy of the data.
4442 */
4443 PPDMSCATTERGATHER pTxSg = pThisCC->CTX_SUFF(pTxSg);
4444 if (!pTxSg)
4445 {
4446 e1kXmitAllocBuf(pThis, pThisCC, pThis->u16TxPktLen + (pThis->fVTag ? 4 : 0), true /*fExactSize*/, false /*fGso*/);
4447 pTxSg = pThisCC->CTX_SUFF(pTxSg);
4448 }
4449 if (pTxSg)
4450 {
4451 Assert(pThis->u16TxPktLen <= pThisCC->CTX_SUFF(pTxSg)->cbAvailable);
4452 Assert(pTxSg->cSegs == 1);
4453 if (pThis->CCCTX_SUFF(pTxSg)->aSegs[0].pvSeg != pThis->aTxPacketFallback)
4454 memcpy(pTxSg->aSegs[0].pvSeg, pThis->aTxPacketFallback, pThis->u16TxPktLen);
4455 pTxSg->cbUsed = pThis->u16TxPktLen;
4456 pTxSg->aSegs[0].cbSeg = pThis->u16TxPktLen;
4457 }
4458 e1kTransmitFrame(pDevIns, pThis, pThisCC, fOnWorkerThread);
4459
4460 /* Update Sequence Number */
4461 pTcpHdr->seqno = htonl(ntohl(pTcpHdr->seqno) + pThis->u16TxPktLen
4462 - pThis->contextTSE.dw3.u8HDRLEN);
4463 /* Increment IP identification */
4464 pIpHdr->ident = htons(ntohs(pIpHdr->ident) + 1);
4465 }
4466}
4467#else /* E1K_WITH_TXD_CACHE */
4468static int e1kFallbackAddSegment(PPDMDEVINS pDevIns, PE1KSTATE pThis, RTGCPHYS PhysAddr, uint16_t u16Len, bool fSend, bool fOnWorkerThread)
4469{
4470 int rc = VINF_SUCCESS;
4471 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
4472 /* TCP header being transmitted */
4473 struct E1kTcpHeader *pTcpHdr = (struct E1kTcpHeader *)(pThis->aTxPacketFallback + pThis->contextTSE.tu.u8CSS);
4474 /* IP header being transmitted */
4475 struct E1kIpHeader *pIpHdr = (struct E1kIpHeader *)(pThis->aTxPacketFallback + pThis->contextTSE.ip.u8CSS);
4476
4477 E1kLog3(("%s e1kFallbackAddSegment: Length=%x, remaining payload=%x, header=%x, send=%RTbool\n",
4478 pThis->szPrf, u16Len, pThis->u32PayRemain, pThis->u16HdrRemain, fSend));
4479 AssertReturn(pThis->u32PayRemain + pThis->u16HdrRemain > 0, VINF_SUCCESS);
4480
4481 if (pThis->u16TxPktLen + u16Len <= sizeof(pThis->aTxPacketFallback))
4482 PDMDevHlpPCIPhysRead(pDevIns, PhysAddr, pThis->aTxPacketFallback + pThis->u16TxPktLen, u16Len);
4483 else
4484 E1kLog(("%s e1kFallbackAddSegment: writing beyond aTxPacketFallback, u16TxPktLen=%d(0x%x) + u16Len=%d(0x%x) > %d\n",
4485 pThis->szPrf, pThis->u16TxPktLen, pThis->u16TxPktLen, u16Len, u16Len, sizeof(pThis->aTxPacketFallback)));
4486 E1kLog3(("%s Dump of the segment:\n"
4487 "%.*Rhxd\n"
4488 "%s --- End of dump ---\n",
4489 pThis->szPrf, u16Len, pThis->aTxPacketFallback + pThis->u16TxPktLen, pThis->szPrf));
4490 pThis->u16TxPktLen += u16Len;
4491 E1kLog3(("%s e1kFallbackAddSegment: pThis->u16TxPktLen=%x\n",
4492 pThis->szPrf, pThis->u16TxPktLen));
4493 if (pThis->u16HdrRemain > 0)
4494 {
4495 /* The header was not complete, check if it is now */
4496 if (u16Len >= pThis->u16HdrRemain)
4497 {
4498 /* The rest is payload */
4499 u16Len -= pThis->u16HdrRemain;
4500 pThis->u16HdrRemain = 0;
4501 /* Save partial checksum and flags */
4502 pThis->u32SavedCsum = pTcpHdr->chksum;
4503 pThis->u16SavedFlags = pTcpHdr->hdrlen_flags;
4504 /* Clear FIN and PSH flags now and set them only in the last segment */
4505 pTcpHdr->hdrlen_flags &= ~htons(E1K_TCP_FIN | E1K_TCP_PSH);
4506 }
4507 else
4508 {
4509 /* Still not */
4510 pThis->u16HdrRemain -= u16Len;
4511 E1kLog3(("%s e1kFallbackAddSegment: Header is still incomplete, 0x%x bytes remain.\n",
4512 pThis->szPrf, pThis->u16HdrRemain));
4513 return rc;
4514 }
4515 }
4516
4517 if (u16Len > pThis->u32PayRemain)
4518 pThis->u32PayRemain = 0;
4519 else
4520 pThis->u32PayRemain -= u16Len;
4521
4522 if (fSend)
4523 {
4524 /* Leave ethernet header intact */
4525 /* IP Total Length = payload + headers - ethernet header */
4526 pIpHdr->total_len = htons(pThis->u16TxPktLen - pThis->contextTSE.ip.u8CSS);
4527 E1kLog3(("%s e1kFallbackAddSegment: End of packet, pIpHdr->total_len=%x\n",
4528 pThis->szPrf, ntohs(pIpHdr->total_len)));
4529 /* Update IP Checksum */
4530 pIpHdr->chksum = 0;
4531 e1kInsertChecksum(pThis, pThis->aTxPacketFallback, pThis->u16TxPktLen,
4532 pThis->contextTSE.ip.u8CSO,
4533 pThis->contextTSE.ip.u8CSS,
4534 pThis->contextTSE.ip.u16CSE);
4535
4536 /* Update TCP flags */
4537 /* Restore original FIN and PSH flags for the last segment */
4538 if (pThis->u32PayRemain == 0)
4539 {
4540 pTcpHdr->hdrlen_flags = pThis->u16SavedFlags;
4541 E1K_INC_CNT32(TSCTC);
4542 }
4543 /* Add TCP length to partial pseudo header sum */
4544 uint32_t csum = pThis->u32SavedCsum
4545 + htons(pThis->u16TxPktLen - pThis->contextTSE.tu.u8CSS);
4546 while (csum >> 16)
4547 csum = (csum >> 16) + (csum & 0xFFFF);
4548 Assert(csum < 65536);
4549 pTcpHdr->chksum = (uint16_t)csum;
4550 /* Compute final checksum */
4551 e1kInsertChecksum(pThis, pThis->aTxPacketFallback, pThis->u16TxPktLen,
4552 pThis->contextTSE.tu.u8CSO,
4553 pThis->contextTSE.tu.u8CSS,
4554 pThis->contextTSE.tu.u16CSE);
4555
4556 /*
4557 * Transmit it.
4558 */
4559 PPDMSCATTERGATHER pTxSg = pThisCC->CTX_SUFF(pTxSg);
4560 if (pTxSg)
4561 {
4562 /* Make sure the packet fits into the allocated buffer */
4563 size_t cbCopy = RT_MIN(pThis->u16TxPktLen, pThisCC->CTX_SUFF(pTxSg)->cbAvailable);
4564#ifdef DEBUG
4565 if (pThis->u16TxPktLen > pTxSg->cbAvailable)
4566 E1kLog(("%s e1kFallbackAddSegment: truncating packet, u16TxPktLen=%d(0x%x) > cbAvailable=%d(0x%x)\n",
4567 pThis->szPrf, pThis->u16TxPktLen, pThis->u16TxPktLen, pTxSg->cbAvailable, pTxSg->cbAvailable));
4568#endif /* DEBUG */
4569 Assert(pTxSg->cSegs == 1);
4570 if (pTxSg->aSegs[0].pvSeg != pThis->aTxPacketFallback)
4571 memcpy(pTxSg->aSegs[0].pvSeg, pThis->aTxPacketFallback, cbCopy);
4572 pTxSg->cbUsed = cbCopy;
4573 pTxSg->aSegs[0].cbSeg = cbCopy;
4574 }
4575 e1kTransmitFrame(pDevIns, pThis, pThisCC, fOnWorkerThread);
4576
4577 /* Update Sequence Number */
4578 pTcpHdr->seqno = htonl(ntohl(pTcpHdr->seqno) + pThis->u16TxPktLen
4579 - pThis->contextTSE.dw3.u8HDRLEN);
4580 /* Increment IP identification */
4581 pIpHdr->ident = htons(ntohs(pIpHdr->ident) + 1);
4582
4583 /* Allocate new buffer for the next segment. */
4584 if (pThis->u32PayRemain)
4585 {
4586 pThis->cbTxAlloc = RT_MIN(pThis->u32PayRemain,
4587 pThis->contextTSE.dw3.u16MSS)
4588 + pThis->contextTSE.dw3.u8HDRLEN;
4589 /* Do not add VLAN tags to empty packets. */
4590 if (pThis->fVTag && pThis->cbTxAlloc > 0)
4591 pThis->cbTxAlloc += 4;
4592 rc = e1kXmitAllocBuf(pThis, pThisCC, false /* fGSO */);
4593 }
4594 }
4595
4596 return rc;
4597}
4598#endif /* E1K_WITH_TXD_CACHE */
4599
4600#ifndef E1K_WITH_TXD_CACHE
4601/**
4602 * TCP segmentation offloading fallback: Add descriptor's buffer to transmit
4603 * frame.
4604 *
4605 * We construct the frame in the fallback buffer first and the copy it to the SG
4606 * buffer before passing it down to the network driver code.
4607 *
4608 * @returns true if the frame should be transmitted, false if not.
4609 *
4610 * @param pThis The device state structure.
4611 * @param pDesc Pointer to the descriptor to transmit.
4612 * @param cbFragment Length of descriptor's buffer.
4613 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4614 * @thread E1000_TX
4615 */
4616static bool e1kFallbackAddToFrame(PE1KSTATE pThis, E1KTXDESC *pDesc, uint32_t cbFragment, bool fOnWorkerThread)
4617{
4618 PPDMSCATTERGATHER pTxSg = pThisCC->CTX_SUFF(pTxSg);
4619 Assert(e1kGetDescType(pDesc) == E1K_DTYP_DATA);
4620 Assert(pDesc->data.cmd.fTSE);
4621 Assert(!e1kXmitIsGsoBuf(pTxSg));
4622
4623 uint16_t u16MaxPktLen = pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw3.u16MSS;
4624 Assert(u16MaxPktLen != 0);
4625 Assert(u16MaxPktLen < E1K_MAX_TX_PKT_SIZE);
4626
4627 /*
4628 * Carve out segments.
4629 */
4630 do
4631 {
4632 /* Calculate how many bytes we have left in this TCP segment */
4633 uint32_t cb = u16MaxPktLen - pThis->u16TxPktLen;
4634 if (cb > cbFragment)
4635 {
4636 /* This descriptor fits completely into current segment */
4637 cb = cbFragment;
4638 e1kFallbackAddSegment(pDevIns, pThis, pDesc->data.u64BufAddr, cb, pDesc->data.cmd.fEOP /*fSend*/, fOnWorkerThread);
4639 }
4640 else
4641 {
4642 e1kFallbackAddSegment(pDevIns, pThis, pDesc->data.u64BufAddr, cb, true /*fSend*/, fOnWorkerThread);
4643 /*
4644 * Rewind the packet tail pointer to the beginning of payload,
4645 * so we continue writing right beyond the header.
4646 */
4647 pThis->u16TxPktLen = pThis->contextTSE.dw3.u8HDRLEN;
4648 }
4649
4650 pDesc->data.u64BufAddr += cb;
4651 cbFragment -= cb;
4652 } while (cbFragment > 0);
4653
4654 if (pDesc->data.cmd.fEOP)
4655 {
4656 /* End of packet, next segment will contain header. */
4657 if (pThis->u32PayRemain != 0)
4658 E1K_INC_CNT32(TSCTFC);
4659 pThis->u16TxPktLen = 0;
4660 e1kXmitFreeBuf(pThis, PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC));
4661 }
4662
4663 return false;
4664}
4665#else /* E1K_WITH_TXD_CACHE */
4666/**
4667 * TCP segmentation offloading fallback: Add descriptor's buffer to transmit
4668 * frame.
4669 *
4670 * We construct the frame in the fallback buffer first and the copy it to the SG
4671 * buffer before passing it down to the network driver code.
4672 *
4673 * @returns error code
4674 *
4675 * @param pDevIns The device instance.
4676 * @param pThis The device state structure.
4677 * @param pDesc Pointer to the descriptor to transmit.
4678 * @param cbFragment Length of descriptor's buffer.
4679 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4680 * @thread E1000_TX
4681 */
4682static int e1kFallbackAddToFrame(PPDMDEVINS pDevIns, PE1KSTATE pThis, E1KTXDESC *pDesc, bool fOnWorkerThread)
4683{
4684#ifdef VBOX_STRICT
4685 PPDMSCATTERGATHER pTxSg = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC)->CTX_SUFF(pTxSg);
4686 Assert(e1kGetDescType(pDesc) == E1K_DTYP_DATA);
4687 Assert(pDesc->data.cmd.fTSE);
4688 Assert(!e1kXmitIsGsoBuf(pTxSg));
4689#endif
4690
4691 uint16_t u16MaxPktLen = pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw3.u16MSS;
4692 /* We cannot produce empty packets, ignore all TX descriptors (see @bugref{9571}) */
4693 if (u16MaxPktLen == 0)
4694 return VINF_SUCCESS;
4695
4696 /*
4697 * Carve out segments.
4698 */
4699 int rc = VINF_SUCCESS;
4700 do
4701 {
4702 /* Calculate how many bytes we have left in this TCP segment */
4703 uint16_t cb = u16MaxPktLen - pThis->u16TxPktLen;
4704 if (cb > pDesc->data.cmd.u20DTALEN)
4705 {
4706 /* This descriptor fits completely into current segment */
4707 cb = (uint16_t)pDesc->data.cmd.u20DTALEN; /* u20DTALEN at this point is guarantied to fit into 16 bits. */
4708 rc = e1kFallbackAddSegment(pDevIns, pThis, pDesc->data.u64BufAddr, cb, pDesc->data.cmd.fEOP /*fSend*/, fOnWorkerThread);
4709 }
4710 else
4711 {
4712 rc = e1kFallbackAddSegment(pDevIns, pThis, pDesc->data.u64BufAddr, cb, true /*fSend*/, fOnWorkerThread);
4713 /*
4714 * Rewind the packet tail pointer to the beginning of payload,
4715 * so we continue writing right beyond the header.
4716 */
4717 pThis->u16TxPktLen = pThis->contextTSE.dw3.u8HDRLEN;
4718 }
4719
4720 pDesc->data.u64BufAddr += cb;
4721 pDesc->data.cmd.u20DTALEN -= cb;
4722 } while (pDesc->data.cmd.u20DTALEN > 0 && RT_SUCCESS(rc));
4723
4724 if (pDesc->data.cmd.fEOP)
4725 {
4726 /* End of packet, next segment will contain header. */
4727 if (pThis->u32PayRemain != 0)
4728 E1K_INC_CNT32(TSCTFC);
4729 pThis->u16TxPktLen = 0;
4730 e1kXmitFreeBuf(pThis, PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC));
4731 }
4732
4733 return VINF_SUCCESS; /// @todo consider rc;
4734}
4735#endif /* E1K_WITH_TXD_CACHE */
4736
4737
4738/**
4739 * Add descriptor's buffer to transmit frame.
4740 *
4741 * This deals with GSO and normal frames, e1kFallbackAddToFrame deals with the
4742 * TSE frames we cannot handle as GSO.
4743 *
4744 * @returns true on success, false on failure.
4745 *
4746 * @param pDevIns The device instance.
4747 * @param pThisCC The current context instance data.
4748 * @param pThis The device state structure.
4749 * @param PhysAddr The physical address of the descriptor buffer.
4750 * @param cbFragment Length of descriptor's buffer.
4751 * @thread E1000_TX
4752 */
4753static bool e1kAddToFrame(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KSTATECC pThisCC, RTGCPHYS PhysAddr, uint32_t cbFragment)
4754{
4755 PPDMSCATTERGATHER pTxSg = pThisCC->CTX_SUFF(pTxSg);
4756 bool const fGso = e1kXmitIsGsoBuf(pTxSg);
4757 uint32_t const cbNewPkt = cbFragment + pThis->u16TxPktLen;
4758
4759 LogFlow(("%s e1kAddToFrame: ENTER cbFragment=%d u16TxPktLen=%d cbUsed=%d cbAvailable=%d fGSO=%s\n",
4760 pThis->szPrf, cbFragment, pThis->u16TxPktLen, pTxSg->cbUsed, pTxSg->cbAvailable,
4761 fGso ? "true" : "false"));
4762 PCPDMNETWORKGSO pGso = (PCPDMNETWORKGSO)pTxSg->pvUser;
4763 if (pGso)
4764 {
4765 if (RT_UNLIKELY(pGso->cbMaxSeg == 0))
4766 {
4767 E1kLog(("%s zero-sized fragments are not allowed\n", pThis->szPrf));
4768 return false;
4769 }
4770 if (RT_UNLIKELY(pGso->u8Type == PDMNETWORKGSOTYPE_IPV4_UDP))
4771 {
4772 E1kLog(("%s UDP fragmentation is no longer supported\n", pThis->szPrf));
4773 return false;
4774 }
4775 }
4776 if (RT_UNLIKELY( !fGso && cbNewPkt > E1K_MAX_TX_PKT_SIZE ))
4777 {
4778 E1kLog(("%s Transmit packet is too large: %u > %u(max)\n", pThis->szPrf, cbNewPkt, E1K_MAX_TX_PKT_SIZE));
4779 return false;
4780 }
4781 if (RT_UNLIKELY( cbNewPkt > pTxSg->cbAvailable ))
4782 {
4783 E1kLog(("%s Transmit packet is too large: %u > %u(max)\n", pThis->szPrf, cbNewPkt, pTxSg->cbAvailable));
4784 return false;
4785 }
4786
4787 if (RT_LIKELY(pTxSg))
4788 {
4789 Assert(pTxSg->cSegs == 1);
4790 if (pTxSg->cbUsed != pThis->u16TxPktLen)
4791 E1kLog(("%s e1kAddToFrame: pTxSg->cbUsed=%d(0x%x) != u16TxPktLen=%d(0x%x)\n",
4792 pThis->szPrf, pTxSg->cbUsed, pTxSg->cbUsed, pThis->u16TxPktLen, pThis->u16TxPktLen));
4793
4794 PDMDevHlpPCIPhysRead(pDevIns, PhysAddr, (uint8_t *)pTxSg->aSegs[0].pvSeg + pThis->u16TxPktLen, cbFragment);
4795
4796 pTxSg->cbUsed = cbNewPkt;
4797 }
4798 pThis->u16TxPktLen = cbNewPkt;
4799
4800 return true;
4801}
4802
4803
4804/**
4805 * Write the descriptor back to guest memory and notify the guest.
4806 *
4807 * @param pThis The device state structure.
4808 * @param pDesc Pointer to the descriptor have been transmitted.
4809 * @param addr Physical address of the descriptor in guest memory.
4810 * @thread E1000_TX
4811 */
4812static void e1kDescReport(PPDMDEVINS pDevIns, PE1KSTATE pThis, E1KTXDESC *pDesc, RTGCPHYS addr)
4813{
4814 /*
4815 * We fake descriptor write-back bursting. Descriptors are written back as they are
4816 * processed.
4817 */
4818 /* Let's pretend we process descriptors. Write back with DD set. */
4819 /*
4820 * Prior to r71586 we tried to accomodate the case when write-back bursts
4821 * are enabled without actually implementing bursting by writing back all
4822 * descriptors, even the ones that do not have RS set. This caused kernel
4823 * panics with Linux SMP kernels, as the e1000 driver tried to free up skb
4824 * associated with written back descriptor if it happened to be a context
4825 * descriptor since context descriptors do not have skb associated to them.
4826 * Starting from r71586 we write back only the descriptors with RS set,
4827 * which is a little bit different from what the real hardware does in
4828 * case there is a chain of data descritors where some of them have RS set
4829 * and others do not. It is very uncommon scenario imho.
4830 * We need to check RPS as well since some legacy drivers use it instead of
4831 * RS even with newer cards.
4832 */
4833 if (pDesc->legacy.cmd.fRS || pDesc->legacy.cmd.fRPS)
4834 {
4835 pDesc->legacy.dw3.fDD = 1; /* Descriptor Done */
4836 e1kWriteBackDesc(pDevIns, pThis, pDesc, addr);
4837 if (pDesc->legacy.cmd.fEOP)
4838 {
4839//#ifdef E1K_USE_TX_TIMERS
4840 if (pThis->fTidEnabled && pDesc->legacy.cmd.fIDE)
4841 {
4842 E1K_INC_ISTAT_CNT(pThis->uStatTxIDE);
4843 //if (pThis->fIntRaised)
4844 //{
4845 // /* Interrupt is already pending, no need for timers */
4846 // ICR |= ICR_TXDW;
4847 //}
4848 //else {
4849 /* Arm the timer to fire in TIVD usec (discard .024) */
4850 e1kArmTimer(pDevIns, pThis, pThis->hTIDTimer, TIDV);
4851# ifndef E1K_NO_TAD
4852 /* If absolute timer delay is enabled and the timer is not running yet, arm it. */
4853 E1kLog2(("%s Checking if TAD timer is running\n",
4854 pThis->szPrf));
4855 if (TADV != 0 && !PDMDevHlpTimerIsActive(pDevIns, pThis->hTADTimer))
4856 e1kArmTimer(pDevIns, pThis, pThis->hTADTimer, TADV);
4857# endif /* E1K_NO_TAD */
4858 }
4859 else
4860 {
4861 if (pThis->fTidEnabled)
4862 {
4863 E1kLog2(("%s No IDE set, cancel TAD timer and raise interrupt\n",
4864 pThis->szPrf));
4865 /* Cancel both timers if armed and fire immediately. */
4866# ifndef E1K_NO_TAD
4867 PDMDevHlpTimerStop(pDevIns, pThis->hTADTimer);
4868# endif
4869 PDMDevHlpTimerStop(pDevIns, pThis->hTIDTimer);
4870 }
4871//#endif /* E1K_USE_TX_TIMERS */
4872 E1K_INC_ISTAT_CNT(pThis->uStatIntTx);
4873 e1kRaiseInterrupt(pDevIns, pThis, VERR_SEM_BUSY, ICR_TXDW);
4874//#ifdef E1K_USE_TX_TIMERS
4875 }
4876//#endif /* E1K_USE_TX_TIMERS */
4877 }
4878 }
4879 else
4880 {
4881 E1K_INC_ISTAT_CNT(pThis->uStatTxNoRS);
4882 }
4883}
4884
4885#ifndef E1K_WITH_TXD_CACHE
4886
4887/**
4888 * Process Transmit Descriptor.
4889 *
4890 * E1000 supports three types of transmit descriptors:
4891 * - legacy data descriptors of older format (context-less).
4892 * - data the same as legacy but providing new offloading capabilities.
4893 * - context sets up the context for following data descriptors.
4894 *
4895 * @param pDevIns The device instance.
4896 * @param pThis The device state structure.
4897 * @param pThisCC The current context instance data.
4898 * @param pDesc Pointer to descriptor union.
4899 * @param addr Physical address of descriptor in guest memory.
4900 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4901 * @thread E1000_TX
4902 */
4903static int e1kXmitDesc(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KSTATECC pThisCC, E1KTXDESC *pDesc,
4904 RTGCPHYS addr, bool fOnWorkerThread)
4905{
4906 int rc = VINF_SUCCESS;
4907 uint32_t cbVTag = 0;
4908
4909 e1kPrintTDesc(pThis, pDesc, "vvv");
4910
4911//#ifdef E1K_USE_TX_TIMERS
4912 if (pThis->fTidEnabled)
4913 e1kCancelTimer(pDevIns, pThis, pThis->hTIDTimer);
4914//#endif /* E1K_USE_TX_TIMERS */
4915
4916 switch (e1kGetDescType(pDesc))
4917 {
4918 case E1K_DTYP_CONTEXT:
4919 if (pDesc->context.dw2.fTSE)
4920 {
4921 pThis->contextTSE = pDesc->context;
4922 pThis->u32PayRemain = pDesc->context.dw2.u20PAYLEN;
4923 pThis->u16HdrRemain = pDesc->context.dw3.u8HDRLEN;
4924 e1kSetupGsoCtx(&pThis->GsoCtx, &pDesc->context);
4925 STAM_COUNTER_INC(&pThis->StatTxDescCtxTSE);
4926 }
4927 else
4928 {
4929 pThis->contextNormal = pDesc->context;
4930 STAM_COUNTER_INC(&pThis->StatTxDescCtxNormal);
4931 }
4932 E1kLog2(("%s %s context updated: IP CSS=%02X, IP CSO=%02X, IP CSE=%04X"
4933 ", TU CSS=%02X, TU CSO=%02X, TU CSE=%04X\n", pThis->szPrf,
4934 pDesc->context.dw2.fTSE ? "TSE" : "Normal",
4935 pDesc->context.ip.u8CSS,
4936 pDesc->context.ip.u8CSO,
4937 pDesc->context.ip.u16CSE,
4938 pDesc->context.tu.u8CSS,
4939 pDesc->context.tu.u8CSO,
4940 pDesc->context.tu.u16CSE));
4941 E1K_INC_ISTAT_CNT(pThis->uStatDescCtx);
4942 e1kDescReport(pThis, pDesc, addr);
4943 break;
4944
4945 case E1K_DTYP_DATA:
4946 {
4947 if (pDesc->data.cmd.u20DTALEN == 0 || pDesc->data.u64BufAddr == 0)
4948 {
4949 E1kLog2(("% Empty data descriptor, skipped.\n", pThis->szPrf));
4950 /** @todo Same as legacy when !TSE. See below. */
4951 break;
4952 }
4953 STAM_COUNTER_INC(pDesc->data.cmd.fTSE?
4954 &pThis->StatTxDescTSEData:
4955 &pThis->StatTxDescData);
4956 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
4957 E1K_INC_ISTAT_CNT(pThis->uStatDescDat);
4958
4959 /*
4960 * The last descriptor of non-TSE packet must contain VLE flag.
4961 * TSE packets have VLE flag in the first descriptor. The later
4962 * case is taken care of a bit later when cbVTag gets assigned.
4963 *
4964 * 1) pDesc->data.cmd.fEOP && !pDesc->data.cmd.fTSE
4965 */
4966 if (pDesc->data.cmd.fEOP && !pDesc->data.cmd.fTSE)
4967 {
4968 pThis->fVTag = pDesc->data.cmd.fVLE;
4969 pThis->u16VTagTCI = pDesc->data.dw3.u16Special;
4970 }
4971 /*
4972 * First fragment: Allocate new buffer and save the IXSM and TXSM
4973 * packet options as these are only valid in the first fragment.
4974 */
4975 if (pThis->u16TxPktLen == 0)
4976 {
4977 pThis->fIPcsum = pDesc->data.dw3.fIXSM;
4978 pThis->fTCPcsum = pDesc->data.dw3.fTXSM;
4979 E1kLog2(("%s Saving checksum flags:%s%s; \n", pThis->szPrf,
4980 pThis->fIPcsum ? " IP" : "",
4981 pThis->fTCPcsum ? " TCP/UDP" : ""));
4982 if (pDesc->data.cmd.fTSE)
4983 {
4984 /* 2) pDesc->data.cmd.fTSE && pThis->u16TxPktLen == 0 */
4985 pThis->fVTag = pDesc->data.cmd.fVLE;
4986 pThis->u16VTagTCI = pDesc->data.dw3.u16Special;
4987 cbVTag = pThis->fVTag ? 4 : 0;
4988 }
4989 else if (pDesc->data.cmd.fEOP)
4990 cbVTag = pDesc->data.cmd.fVLE ? 4 : 0;
4991 else
4992 cbVTag = 4;
4993 E1kLog3(("%s About to allocate TX buffer: cbVTag=%u\n", pThis->szPrf, cbVTag));
4994 if (e1kCanDoGso(pThis, &pThis->GsoCtx, &pDesc->data, &pThis->contextTSE))
4995 rc = e1kXmitAllocBuf(pThis, pThisCC, pThis->contextTSE.dw2.u20PAYLEN + pThis->contextTSE.dw3.u8HDRLEN + cbVTag,
4996 true /*fExactSize*/, true /*fGso*/);
4997 else if (pDesc->data.cmd.fTSE)
4998 rc = e1kXmitAllocBuf(pThis, pThisCC, , pThis->contextTSE.dw3.u16MSS + pThis->contextTSE.dw3.u8HDRLEN + cbVTag,
4999 pDesc->data.cmd.fTSE /*fExactSize*/, false /*fGso*/);
5000 else
5001 rc = e1kXmitAllocBuf(pThis, pThisCC, pDesc->data.cmd.u20DTALEN + cbVTag,
5002 pDesc->data.cmd.fEOP /*fExactSize*/, false /*fGso*/);
5003
5004 /**
5005 * @todo: Perhaps it is not that simple for GSO packets! We may
5006 * need to unwind some changes.
5007 */
5008 if (RT_FAILURE(rc))
5009 {
5010 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
5011 break;
5012 }
5013 /** @todo Is there any way to indicating errors other than collisions? Like
5014 * VERR_NET_DOWN. */
5015 }
5016
5017 /*
5018 * Add the descriptor data to the frame. If the frame is complete,
5019 * transmit it and reset the u16TxPktLen field.
5020 */
5021 if (e1kXmitIsGsoBuf(pThisCC->CTX_SUFF(pTxSg)))
5022 {
5023 STAM_COUNTER_INC(&pThis->StatTxPathGSO);
5024 bool fRc = e1kAddToFrame(pDevIns, pThis, pThisCC, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
5025 if (pDesc->data.cmd.fEOP)
5026 {
5027 if ( fRc
5028 && pThisCC->CTX_SUFF(pTxSg)
5029 && pThisCC->CTX_SUFF(pTxSg)->cbUsed == (size_t)pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw2.u20PAYLEN)
5030 {
5031 e1kTransmitFrame(pDevIns, pThis, pThisCC, fOnWorkerThread);
5032 E1K_INC_CNT32(TSCTC);
5033 }
5034 else
5035 {
5036 if (fRc)
5037 E1kLog(("%s bad GSO/TSE %p or %u < %u\n" , pThis->szPrf,
5038 pThisCC->CTX_SUFF(pTxSg), pThisCC->CTX_SUFF(pTxSg) ? pThisCC->CTX_SUFF(pTxSg)->cbUsed : 0,
5039 pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw2.u20PAYLEN));
5040 e1kXmitFreeBuf(pThis);
5041 E1K_INC_CNT32(TSCTFC);
5042 }
5043 pThis->u16TxPktLen = 0;
5044 }
5045 }
5046 else if (!pDesc->data.cmd.fTSE)
5047 {
5048 STAM_COUNTER_INC(&pThis->StatTxPathRegular);
5049 bool fRc = e1kAddToFrame(pDevIns, pThis, pThisCC, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
5050 if (pDesc->data.cmd.fEOP)
5051 {
5052 if (fRc && pThisCC->CTX_SUFF(pTxSg))
5053 {
5054 Assert(pThisCC->CTX_SUFF(pTxSg)->cSegs == 1);
5055 if (pThis->fIPcsum)
5056 e1kInsertChecksum(pThis, (uint8_t *)pThisCC->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->u16TxPktLen,
5057 pThis->contextNormal.ip.u8CSO,
5058 pThis->contextNormal.ip.u8CSS,
5059 pThis->contextNormal.ip.u16CSE);
5060 if (pThis->fTCPcsum)
5061 e1kInsertChecksum(pThis, (uint8_t *)pThisCC->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->u16TxPktLen,
5062 pThis->contextNormal.tu.u8CSO,
5063 pThis->contextNormal.tu.u8CSS,
5064 pThis->contextNormal.tu.u16CSE,
5065 !pThis->contextNormal.dw2.fTCP);
5066 e1kTransmitFrame(pDevIns, pThis, pThisCC, fOnWorkerThread);
5067 }
5068 else
5069 e1kXmitFreeBuf(pThis);
5070 pThis->u16TxPktLen = 0;
5071 }
5072 }
5073 else
5074 {
5075 STAM_COUNTER_INC(&pThis->StatTxPathFallback);
5076 e1kFallbackAddToFrame(pDevIns, pThis, pDesc, pDesc->data.cmd.u20DTALEN, fOnWorkerThread);
5077 }
5078
5079 e1kDescReport(pThis, pDesc, addr);
5080 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
5081 break;
5082 }
5083
5084 case E1K_DTYP_LEGACY:
5085 if (pDesc->legacy.cmd.u16Length == 0 || pDesc->legacy.u64BufAddr == 0)
5086 {
5087 E1kLog(("%s Empty legacy descriptor, skipped.\n", pThis->szPrf));
5088 /** @todo 3.3.3, Length/Buffer Address: RS set -> write DD when processing. */
5089 break;
5090 }
5091 STAM_COUNTER_INC(&pThis->StatTxDescLegacy);
5092 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
5093
5094 /* First fragment: allocate new buffer. */
5095 if (pThis->u16TxPktLen == 0)
5096 {
5097 if (pDesc->legacy.cmd.fEOP)
5098 cbVTag = pDesc->legacy.cmd.fVLE ? 4 : 0;
5099 else
5100 cbVTag = 4;
5101 E1kLog3(("%s About to allocate TX buffer: cbVTag=%u\n", pThis->szPrf, cbVTag));
5102 /** @todo reset status bits? */
5103 rc = e1kXmitAllocBuf(pThis, pThisCC, pDesc->legacy.cmd.u16Length + cbVTag, pDesc->legacy.cmd.fEOP, false /*fGso*/);
5104 if (RT_FAILURE(rc))
5105 {
5106 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
5107 break;
5108 }
5109
5110 /** @todo Is there any way to indicating errors other than collisions? Like
5111 * VERR_NET_DOWN. */
5112 }
5113
5114 /* Add fragment to frame. */
5115 if (e1kAddToFrame(pDevIns, pThis, pThisCC, pDesc->data.u64BufAddr, pDesc->legacy.cmd.u16Length))
5116 {
5117 E1K_INC_ISTAT_CNT(pThis->uStatDescLeg);
5118
5119 /* Last fragment: Transmit and reset the packet storage counter. */
5120 if (pDesc->legacy.cmd.fEOP)
5121 {
5122 pThis->fVTag = pDesc->legacy.cmd.fVLE;
5123 pThis->u16VTagTCI = pDesc->legacy.dw3.u16Special;
5124 /** @todo Offload processing goes here. */
5125 e1kTransmitFrame(pDevIns, pThis, pThisCC, fOnWorkerThread);
5126 pThis->u16TxPktLen = 0;
5127 }
5128 }
5129 /* Last fragment + failure: free the buffer and reset the storage counter. */
5130 else if (pDesc->legacy.cmd.fEOP)
5131 {
5132 e1kXmitFreeBuf(pThis);
5133 pThis->u16TxPktLen = 0;
5134 }
5135
5136 e1kDescReport(pThis, pDesc, addr);
5137 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
5138 break;
5139
5140 default:
5141 E1kLog(("%s ERROR Unsupported transmit descriptor type: 0x%04x\n",
5142 pThis->szPrf, e1kGetDescType(pDesc)));
5143 break;
5144 }
5145
5146 return rc;
5147}
5148
5149#else /* E1K_WITH_TXD_CACHE */
5150
5151/**
5152 * Process Transmit Descriptor.
5153 *
5154 * E1000 supports three types of transmit descriptors:
5155 * - legacy data descriptors of older format (context-less).
5156 * - data the same as legacy but providing new offloading capabilities.
5157 * - context sets up the context for following data descriptors.
5158 *
5159 * @param pDevIns The device instance.
5160 * @param pThis The device state structure.
5161 * @param pThisCC The current context instance data.
5162 * @param pDesc Pointer to descriptor union.
5163 * @param addr Physical address of descriptor in guest memory.
5164 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
5165 * @param cbPacketSize Size of the packet as previously computed.
5166 * @thread E1000_TX
5167 */
5168static int e1kXmitDesc(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KSTATECC pThisCC, E1KTXDESC *pDesc,
5169 RTGCPHYS addr, bool fOnWorkerThread)
5170{
5171 int rc = VINF_SUCCESS;
5172
5173 e1kPrintTDesc(pThis, pDesc, "vvv");
5174
5175//#ifdef E1K_USE_TX_TIMERS
5176 if (pThis->fTidEnabled)
5177 PDMDevHlpTimerStop(pDevIns, pThis->hTIDTimer);
5178//#endif /* E1K_USE_TX_TIMERS */
5179
5180 switch (e1kGetDescType(pDesc))
5181 {
5182 case E1K_DTYP_CONTEXT:
5183 /* The caller have already updated the context */
5184 E1K_INC_ISTAT_CNT(pThis->uStatDescCtx);
5185 e1kDescReport(pDevIns, pThis, pDesc, addr);
5186 break;
5187
5188 case E1K_DTYP_DATA:
5189 {
5190 STAM_COUNTER_INC(pDesc->data.cmd.fTSE?
5191 &pThis->StatTxDescTSEData:
5192 &pThis->StatTxDescData);
5193 E1K_INC_ISTAT_CNT(pThis->uStatDescDat);
5194 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
5195 if (pDesc->data.cmd.u20DTALEN == 0 || pDesc->data.u64BufAddr == 0)
5196 {
5197 E1kLog2(("%s Empty data descriptor, skipped.\n", pThis->szPrf));
5198 if (pDesc->data.cmd.fEOP)
5199 {
5200 e1kTransmitFrame(pDevIns, pThis, pThisCC, fOnWorkerThread);
5201 pThis->u16TxPktLen = 0;
5202 }
5203 }
5204 else
5205 {
5206 /*
5207 * Add the descriptor data to the frame. If the frame is complete,
5208 * transmit it and reset the u16TxPktLen field.
5209 */
5210 if (e1kXmitIsGsoBuf(pThisCC->CTX_SUFF(pTxSg)))
5211 {
5212 STAM_COUNTER_INC(&pThis->StatTxPathGSO);
5213 bool fRc = e1kAddToFrame(pDevIns, pThis, pThisCC, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
5214 if (pDesc->data.cmd.fEOP)
5215 {
5216 if ( fRc
5217 && pThisCC->CTX_SUFF(pTxSg)
5218 && pThisCC->CTX_SUFF(pTxSg)->cbUsed == (size_t)pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw2.u20PAYLEN)
5219 {
5220 e1kTransmitFrame(pDevIns, pThis, pThisCC, fOnWorkerThread);
5221 E1K_INC_CNT32(TSCTC);
5222 }
5223 else
5224 {
5225 if (fRc)
5226 E1kLog(("%s bad GSO/TSE %p or %u < %u\n" , pThis->szPrf,
5227 pThisCC->CTX_SUFF(pTxSg), pThisCC->CTX_SUFF(pTxSg) ? pThisCC->CTX_SUFF(pTxSg)->cbUsed : 0,
5228 pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw2.u20PAYLEN));
5229 e1kXmitFreeBuf(pThis, pThisCC);
5230 E1K_INC_CNT32(TSCTFC);
5231 }
5232 pThis->u16TxPktLen = 0;
5233 }
5234 }
5235 else if (!pDesc->data.cmd.fTSE)
5236 {
5237 STAM_COUNTER_INC(&pThis->StatTxPathRegular);
5238 bool fRc = e1kAddToFrame(pDevIns, pThis, pThisCC, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
5239 if (pDesc->data.cmd.fEOP)
5240 {
5241 if (fRc && pThisCC->CTX_SUFF(pTxSg))
5242 {
5243 Assert(pThisCC->CTX_SUFF(pTxSg)->cSegs == 1);
5244 if (pThis->fIPcsum)
5245 e1kInsertChecksum(pThis, (uint8_t *)pThisCC->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->u16TxPktLen,
5246 pThis->contextNormal.ip.u8CSO,
5247 pThis->contextNormal.ip.u8CSS,
5248 pThis->contextNormal.ip.u16CSE);
5249 if (pThis->fTCPcsum)
5250 e1kInsertChecksum(pThis, (uint8_t *)pThisCC->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->u16TxPktLen,
5251 pThis->contextNormal.tu.u8CSO,
5252 pThis->contextNormal.tu.u8CSS,
5253 pThis->contextNormal.tu.u16CSE,
5254 !pThis->contextNormal.dw2.fTCP);
5255 e1kTransmitFrame(pDevIns, pThis, pThisCC, fOnWorkerThread);
5256 }
5257 else
5258 e1kXmitFreeBuf(pThis, pThisCC);
5259 pThis->u16TxPktLen = 0;
5260 }
5261 }
5262 else
5263 {
5264 STAM_COUNTER_INC(&pThis->StatTxPathFallback);
5265 rc = e1kFallbackAddToFrame(pDevIns, pThis, pDesc, fOnWorkerThread);
5266 }
5267 }
5268 e1kDescReport(pDevIns, pThis, pDesc, addr);
5269 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
5270 break;
5271 }
5272
5273 case E1K_DTYP_LEGACY:
5274 STAM_COUNTER_INC(&pThis->StatTxDescLegacy);
5275 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
5276 if (pDesc->legacy.cmd.u16Length == 0 || pDesc->legacy.u64BufAddr == 0)
5277 {
5278 E1kLog(("%s Empty legacy descriptor, skipped.\n", pThis->szPrf));
5279 }
5280 else
5281 {
5282 /* Add fragment to frame. */
5283 if (e1kAddToFrame(pDevIns, pThis, pThisCC, pDesc->data.u64BufAddr, pDesc->legacy.cmd.u16Length))
5284 {
5285 E1K_INC_ISTAT_CNT(pThis->uStatDescLeg);
5286
5287 /* Last fragment: Transmit and reset the packet storage counter. */
5288 if (pDesc->legacy.cmd.fEOP)
5289 {
5290 if (pDesc->legacy.cmd.fIC)
5291 {
5292 e1kInsertChecksum(pThis,
5293 (uint8_t *)pThisCC->CTX_SUFF(pTxSg)->aSegs[0].pvSeg,
5294 pThis->u16TxPktLen,
5295 pDesc->legacy.cmd.u8CSO,
5296 pDesc->legacy.dw3.u8CSS,
5297 0);
5298 }
5299 e1kTransmitFrame(pDevIns, pThis, pThisCC, fOnWorkerThread);
5300 pThis->u16TxPktLen = 0;
5301 }
5302 }
5303 /* Last fragment + failure: free the buffer and reset the storage counter. */
5304 else if (pDesc->legacy.cmd.fEOP)
5305 {
5306 e1kXmitFreeBuf(pThis, pThisCC);
5307 pThis->u16TxPktLen = 0;
5308 }
5309 }
5310 e1kDescReport(pDevIns, pThis, pDesc, addr);
5311 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
5312 break;
5313
5314 default:
5315 E1kLog(("%s ERROR Unsupported transmit descriptor type: 0x%04x\n",
5316 pThis->szPrf, e1kGetDescType(pDesc)));
5317 break;
5318 }
5319
5320 return rc;
5321}
5322
5323DECLINLINE(bool) e1kUpdateTxContext(PE1KSTATE pThis, E1KTXDESC *pDesc)
5324{
5325 if (pDesc->context.dw2.fTSE)
5326 {
5327 pThis->contextTSE = pDesc->context;
5328 uint32_t cbMaxSegmentSize = pThis->contextTSE.dw3.u16MSS + pThis->contextTSE.dw3.u8HDRLEN + 4; /*VTAG*/
5329 if (RT_UNLIKELY(cbMaxSegmentSize > E1K_MAX_TX_PKT_SIZE))
5330 {
5331 pThis->contextTSE.dw3.u16MSS = E1K_MAX_TX_PKT_SIZE - pThis->contextTSE.dw3.u8HDRLEN - 4; /*VTAG*/
5332 LogRelMax(10, ("%s: Transmit packet is too large: %u > %u(max). Adjusted MSS to %u.\n",
5333 pThis->szPrf, cbMaxSegmentSize, E1K_MAX_TX_PKT_SIZE, pThis->contextTSE.dw3.u16MSS));
5334 }
5335 pThis->u32PayRemain = pThis->contextTSE.dw2.u20PAYLEN;
5336 pThis->u16HdrRemain = pThis->contextTSE.dw3.u8HDRLEN;
5337 e1kSetupGsoCtx(&pThis->GsoCtx, &pThis->contextTSE);
5338 STAM_COUNTER_INC(&pThis->StatTxDescCtxTSE);
5339 }
5340 else
5341 {
5342 pThis->contextNormal = pDesc->context;
5343 STAM_COUNTER_INC(&pThis->StatTxDescCtxNormal);
5344 }
5345 E1kLog2(("%s %s context updated: IP CSS=%02X, IP CSO=%02X, IP CSE=%04X"
5346 ", TU CSS=%02X, TU CSO=%02X, TU CSE=%04X\n", pThis->szPrf,
5347 pDesc->context.dw2.fTSE ? "TSE" : "Normal",
5348 pDesc->context.ip.u8CSS,
5349 pDesc->context.ip.u8CSO,
5350 pDesc->context.ip.u16CSE,
5351 pDesc->context.tu.u8CSS,
5352 pDesc->context.tu.u8CSO,
5353 pDesc->context.tu.u16CSE));
5354 return true; /* Consider returning false for invalid descriptors */
5355}
5356
5357static bool e1kLocateTxPacket(PE1KSTATE pThis)
5358{
5359 LogFlow(("%s e1kLocateTxPacket: ENTER cbTxAlloc=%d\n",
5360 pThis->szPrf, pThis->cbTxAlloc));
5361 /* Check if we have located the packet already. */
5362 if (pThis->cbTxAlloc)
5363 {
5364 LogFlow(("%s e1kLocateTxPacket: RET true cbTxAlloc=%d\n",
5365 pThis->szPrf, pThis->cbTxAlloc));
5366 return true;
5367 }
5368
5369 bool fTSE = false;
5370 uint32_t cbPacket = 0;
5371
5372 /* Since we process one packet at a time we will only mark current packet's descriptors as valid */
5373 memset(pThis->afTxDValid, 0, sizeof(pThis->afTxDValid));
5374 for (int i = pThis->iTxDCurrent; i < pThis->nTxDFetched; ++i)
5375 {
5376 E1KTXDESC *pDesc = &pThis->aTxDescriptors[i];
5377 /* Assume the descriptor valid until proven otherwise. */
5378 pThis->afTxDValid[i] = true;
5379 switch (e1kGetDescType(pDesc))
5380 {
5381 case E1K_DTYP_CONTEXT:
5382 if (cbPacket == 0)
5383 pThis->afTxDValid[i] = e1kUpdateTxContext(pThis, pDesc);
5384 else
5385 E1kLog(("%s e1kLocateTxPacket: ignoring a context descriptor in the middle of a packet, cbPacket=%d\n",
5386 pThis->szPrf, cbPacket));
5387 continue;
5388 case E1K_DTYP_LEGACY:
5389 /* Skip invalid descriptors. */
5390 if (cbPacket > 0 && (pThis->fGSO || fTSE))
5391 {
5392 E1kLog(("%s e1kLocateTxPacket: ignoring a legacy descriptor in the segmentation context, cbPacket=%d\n",
5393 pThis->szPrf, cbPacket));
5394 pThis->afTxDValid[i] = false; /* Make sure it is skipped by processing */
5395 continue;
5396 }
5397 /* Skip empty descriptors. */
5398 if (!pDesc->legacy.u64BufAddr || !pDesc->legacy.cmd.u16Length)
5399 break;
5400 cbPacket += pDesc->legacy.cmd.u16Length;
5401 pThis->fGSO = false;
5402 break;
5403 case E1K_DTYP_DATA:
5404 /* Skip invalid descriptors. */
5405 if (cbPacket > 0 && (bool)pDesc->data.cmd.fTSE != fTSE)
5406 {
5407 E1kLog(("%s e1kLocateTxPacket: ignoring %sTSE descriptor in the %ssegmentation context, cbPacket=%d\n",
5408 pThis->szPrf, pDesc->data.cmd.fTSE ? "" : "non-", fTSE ? "" : "non-", cbPacket));
5409 pThis->afTxDValid[i] = false; /* Make sure it is skipped by processing */
5410 continue;
5411 }
5412 /* Skip empty descriptors. */
5413 if (!pDesc->data.u64BufAddr || !pDesc->data.cmd.u20DTALEN)
5414 break;
5415 if (cbPacket == 0)
5416 {
5417 /*
5418 * The first fragment: save IXSM and TXSM options
5419 * as these are only valid in the first fragment.
5420 */
5421 pThis->fIPcsum = pDesc->data.dw3.fIXSM;
5422 pThis->fTCPcsum = pDesc->data.dw3.fTXSM;
5423 fTSE = pDesc->data.cmd.fTSE;
5424 /*
5425 * TSE descriptors have VLE bit properly set in
5426 * the first fragment.
5427 */
5428 if (fTSE)
5429 {
5430 pThis->fVTag = pDesc->data.cmd.fVLE;
5431 pThis->u16VTagTCI = pDesc->data.dw3.u16Special;
5432 }
5433 pThis->fGSO = e1kCanDoGso(pThis, &pThis->GsoCtx, &pDesc->data, &pThis->contextTSE);
5434 }
5435 cbPacket += pDesc->data.cmd.u20DTALEN;
5436 break;
5437 default:
5438 AssertMsgFailed(("Impossible descriptor type!"));
5439 continue;
5440 }
5441 if (pDesc->legacy.cmd.fEOP)
5442 {
5443 /*
5444 * Non-TSE descriptors have VLE bit properly set in
5445 * the last fragment.
5446 */
5447 if (!fTSE)
5448 {
5449 pThis->fVTag = pDesc->data.cmd.fVLE;
5450 pThis->u16VTagTCI = pDesc->data.dw3.u16Special;
5451 }
5452 /*
5453 * Compute the required buffer size. If we cannot do GSO but still
5454 * have to do segmentation we allocate the first segment only.
5455 */
5456 pThis->cbTxAlloc = (!fTSE || pThis->fGSO) ?
5457 cbPacket :
5458 RT_MIN(cbPacket, pThis->contextTSE.dw3.u16MSS + pThis->contextTSE.dw3.u8HDRLEN);
5459 /* Do not add VLAN tags to empty packets. */
5460 if (pThis->fVTag && pThis->cbTxAlloc > 0)
5461 pThis->cbTxAlloc += 4;
5462 LogFlow(("%s e1kLocateTxPacket: RET true cbTxAlloc=%d cbPacket=%d%s%s\n",
5463 pThis->szPrf, pThis->cbTxAlloc, cbPacket,
5464 pThis->fGSO ? " GSO" : "", fTSE ? " TSE" : ""));
5465 return true;
5466 }
5467 }
5468
5469 if (cbPacket == 0 && pThis->nTxDFetched - pThis->iTxDCurrent > 0)
5470 {
5471 /* All descriptors were empty, we need to process them as a dummy packet */
5472 LogFlow(("%s e1kLocateTxPacket: RET true cbTxAlloc=%d, zero packet!\n",
5473 pThis->szPrf, pThis->cbTxAlloc));
5474 return true;
5475 }
5476 LogFlow(("%s e1kLocateTxPacket: RET false cbTxAlloc=%d cbPacket=%d\n",
5477 pThis->szPrf, pThis->cbTxAlloc, cbPacket));
5478 return false;
5479}
5480
5481static int e1kXmitPacket(PPDMDEVINS pDevIns, PE1KSTATE pThis, bool fOnWorkerThread, PE1KTXDC pTxdc)
5482{
5483 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
5484 int rc = VINF_SUCCESS;
5485
5486 LogFlow(("%s e1kXmitPacket: ENTER current=%d fetched=%d\n",
5487 pThis->szPrf, pThis->iTxDCurrent, pThis->nTxDFetched));
5488
5489 while (pThis->iTxDCurrent < pThis->nTxDFetched)
5490 {
5491 E1KTXDESC *pDesc = &pThis->aTxDescriptors[pThis->iTxDCurrent];
5492 E1kLog3(("%s About to process new TX descriptor at %08x%08x, TDLEN=%08x, TDH=%08x, TDT=%08x\n",
5493 pThis->szPrf, TDBAH, TDBAL + pTxdc->tdh * sizeof(E1KTXDESC), pTxdc->tdlen, pTxdc->tdh, pTxdc->tdt));
5494 if (!pThis->afTxDValid[pThis->iTxDCurrent])
5495 {
5496 e1kPrintTDesc(pThis, pDesc, "vvv");
5497 E1kLog(("%s e1kXmitDesc: skipping bad descriptor ^^^\n", pThis->szPrf));
5498 e1kDescReport(pDevIns, pThis, pDesc, e1kDescAddr(TDBAH, TDBAL, pTxdc->tdh));
5499 rc = VINF_SUCCESS;
5500 }
5501 else
5502 rc = e1kXmitDesc(pDevIns, pThis, pThisCC, pDesc, e1kDescAddr(TDBAH, TDBAL, pTxdc->tdh), fOnWorkerThread);
5503 if (RT_FAILURE(rc))
5504 break;
5505 if (++pTxdc->tdh * sizeof(E1KTXDESC) >= pTxdc->tdlen)
5506 pTxdc->tdh = 0;
5507 TDH = pTxdc->tdh; /* Sync the actual register and TXDC */
5508 uint32_t uLowThreshold = GET_BITS(TXDCTL, LWTHRESH)*8;
5509 if (uLowThreshold != 0 && e1kGetTxLen(pTxdc) <= uLowThreshold)
5510 {
5511 E1kLog2(("%s Low on transmit descriptors, raise ICR.TXD_LOW, len=%x thresh=%x\n",
5512 pThis->szPrf, e1kGetTxLen(pTxdc), GET_BITS(TXDCTL, LWTHRESH)*8));
5513 e1kRaiseInterrupt(pDevIns, pThis, VERR_SEM_BUSY, ICR_TXD_LOW);
5514 }
5515 ++pThis->iTxDCurrent;
5516 if (e1kGetDescType(pDesc) != E1K_DTYP_CONTEXT && pDesc->legacy.cmd.fEOP)
5517 break;
5518 }
5519
5520 LogFlow(("%s e1kXmitPacket: RET %Rrc current=%d fetched=%d\n",
5521 pThis->szPrf, rc, pThis->iTxDCurrent, pThis->nTxDFetched));
5522 return rc;
5523}
5524
5525#endif /* E1K_WITH_TXD_CACHE */
5526#ifndef E1K_WITH_TXD_CACHE
5527
5528/**
5529 * Transmit pending descriptors.
5530 *
5531 * @returns VBox status code. VERR_TRY_AGAIN is returned if we're busy.
5532 *
5533 * @param pDevIns The device instance.
5534 * @param pThis The E1000 state.
5535 * @param fOnWorkerThread Whether we're on a worker thread or on an EMT.
5536 */
5537static int e1kXmitPending(PPDMDEVINS pDevIns, PE1KSTATE pThis, bool fOnWorkerThread)
5538{
5539 int rc = VINF_SUCCESS;
5540 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
5541
5542 /* Check if transmitter is enabled. */
5543 if (!(TCTL & TCTL_EN))
5544 return VINF_SUCCESS;
5545 /*
5546 * Grab the xmit lock of the driver as well as the E1K device state.
5547 */
5548 rc = e1kCsTxEnter(pThis, VERR_SEM_BUSY);
5549 if (RT_LIKELY(rc == VINF_SUCCESS))
5550 {
5551 PPDMINETWORKUP pDrv = pThis->CTX_SUFF(pDrv);
5552 if (pDrv)
5553 {
5554 rc = pDrv->pfnBeginXmit(pDrv, fOnWorkerThread);
5555 if (RT_FAILURE(rc))
5556 {
5557 e1kCsTxLeave(pThis);
5558 return rc;
5559 }
5560 }
5561 /*
5562 * Process all pending descriptors.
5563 * Note! Do not process descriptors in locked state
5564 */
5565 while (TDH != TDT && !pThis->fLocked)
5566 {
5567 E1KTXDESC desc;
5568 E1kLog3(("%s About to process new TX descriptor at %08x%08x, TDLEN=%08x, TDH=%08x, TDT=%08x\n",
5569 pThis->szPrf, TDBAH, TDBAL + TDH * sizeof(desc), TDLEN, TDH, TDT));
5570
5571 e1kLoadDesc(pDevIns, &desc, ((uint64_t)TDBAH << 32) + TDBAL + TDH * sizeof(desc));
5572 rc = e1kXmitDesc(pDevIns, pThis, pThisCC, &desc, e1kDescAddr(TDBAH, TDBAL, TDH), fOnWorkerThread);
5573 /* If we failed to transmit descriptor we will try it again later */
5574 if (RT_FAILURE(rc))
5575 break;
5576 if (++TDH * sizeof(desc) >= TDLEN)
5577 TDH = 0;
5578
5579 if (e1kGetTxLen(pThis) <= GET_BITS(TXDCTL, LWTHRESH)*8)
5580 {
5581 E1kLog2(("%s Low on transmit descriptors, raise ICR.TXD_LOW, len=%x thresh=%x\n",
5582 pThis->szPrf, e1kGetTxLen(pThis), GET_BITS(TXDCTL, LWTHRESH)*8));
5583 e1kRaiseInterrupt(pDevIns, pThis, VERR_SEM_BUSY, ICR_TXD_LOW);
5584 }
5585
5586 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
5587 }
5588
5589 /// @todo uncomment: pThis->uStatIntTXQE++;
5590 /// @todo uncomment: e1kRaiseInterrupt(pDevIns, pThis, ICR_TXQE);
5591 /*
5592 * Release the lock.
5593 */
5594 if (pDrv)
5595 pDrv->pfnEndXmit(pDrv);
5596 e1kCsTxLeave(pThis);
5597 }
5598
5599 return rc;
5600}
5601
5602#else /* E1K_WITH_TXD_CACHE */
5603
5604static void e1kDumpTxDCache(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KTXDC pTxdc)
5605{
5606 unsigned i, cDescs = pTxdc->tdlen / sizeof(E1KTXDESC);
5607 uint32_t tdh = pTxdc->tdh;
5608 LogRel(("E1000: -- Transmit Descriptors (%d total) --\n", cDescs));
5609 for (i = 0; i < cDescs; ++i)
5610 {
5611 E1KTXDESC desc;
5612 PDMDevHlpPCIPhysRead(pDevIns , e1kDescAddr(TDBAH, TDBAL, i), &desc, sizeof(desc));
5613 if (i == tdh)
5614 LogRel(("E1000: >>> "));
5615 LogRel(("E1000: %RGp: %R[e1ktxd]\n", e1kDescAddr(TDBAH, TDBAL, i), &desc));
5616 }
5617 LogRel(("E1000: -- Transmit Descriptors in Cache (at %d (TDH %d)/ fetched %d / max %d) --\n",
5618 pThis->iTxDCurrent, pTxdc->tdh, pThis->nTxDFetched, E1K_TXD_CACHE_SIZE));
5619 if (tdh > pThis->iTxDCurrent)
5620 tdh -= pThis->iTxDCurrent;
5621 else
5622 tdh = cDescs + tdh - pThis->iTxDCurrent;
5623 for (i = 0; i < pThis->nTxDFetched; ++i)
5624 {
5625 if (i == pThis->iTxDCurrent)
5626 LogRel(("E1000: >>> "));
5627 if (cDescs)
5628 LogRel(("E1000: %RGp: %R[e1ktxd]\n", e1kDescAddr(TDBAH, TDBAL, tdh++ % cDescs), &pThis->aTxDescriptors[i]));
5629 else
5630 LogRel(("E1000: <lost>: %R[e1ktxd]\n", &pThis->aTxDescriptors[i]));
5631 }
5632}
5633
5634/**
5635 * Transmit pending descriptors.
5636 *
5637 * @returns VBox status code. VERR_TRY_AGAIN is returned if we're busy.
5638 *
5639 * @param pDevIns The device instance.
5640 * @param pThis The E1000 state.
5641 * @param fOnWorkerThread Whether we're on a worker thread or on an EMT.
5642 */
5643static int e1kXmitPending(PPDMDEVINS pDevIns, PE1KSTATE pThis, bool fOnWorkerThread)
5644{
5645 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
5646 int rc = VINF_SUCCESS;
5647
5648 /* Check if transmitter is enabled. */
5649 if (!(TCTL & TCTL_EN))
5650 return VINF_SUCCESS;
5651 /*
5652 * Grab the xmit lock of the driver as well as the E1K device state.
5653 */
5654 PPDMINETWORKUP pDrv = pThisCC->CTX_SUFF(pDrv);
5655 if (pDrv)
5656 {
5657 rc = pDrv->pfnBeginXmit(pDrv, fOnWorkerThread);
5658 if (RT_FAILURE(rc))
5659 return rc;
5660 }
5661
5662 /*
5663 * Process all pending descriptors.
5664 * Note! Do not process descriptors in locked state
5665 */
5666 rc = e1kCsTxEnter(pThis, VERR_SEM_BUSY);
5667 if (RT_LIKELY(rc == VINF_SUCCESS && (TCTL & TCTL_EN)))
5668 {
5669 E1KTXDC txdc;
5670 bool fTxContextValid = e1kUpdateTxDContext(pDevIns, pThis, &txdc);
5671 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
5672 /*
5673 * fIncomplete is set whenever we try to fetch additional descriptors
5674 * for an incomplete packet. If fail to locate a complete packet on
5675 * the next iteration we need to reset the cache or we risk to get
5676 * stuck in this loop forever.
5677 */
5678 bool fIncomplete = false;
5679 while (fTxContextValid && !pThis->fLocked && e1kTxDLazyLoad(pDevIns, pThis, &txdc))
5680 {
5681 while (e1kLocateTxPacket(pThis))
5682 {
5683 fIncomplete = false;
5684 /* Found a complete packet, allocate it. */
5685 rc = e1kXmitAllocBuf(pThis, pThisCC, pThis->fGSO);
5686 /* If we're out of bandwidth we'll come back later. */
5687 if (RT_FAILURE(rc))
5688 goto out;
5689 /* Copy the packet to allocated buffer and send it. */
5690 rc = e1kXmitPacket(pDevIns, pThis, fOnWorkerThread, &txdc);
5691 /* If we're out of bandwidth we'll come back later. */
5692 if (RT_FAILURE(rc))
5693 goto out;
5694 }
5695 uint8_t u8Remain = pThis->nTxDFetched - pThis->iTxDCurrent;
5696 if (RT_UNLIKELY(fIncomplete))
5697 {
5698 static bool fTxDCacheDumped = false;
5699 /*
5700 * The descriptor cache is full, but we were unable to find
5701 * a complete packet in it. Drop the cache and hope that
5702 * the guest driver can recover from network card error.
5703 */
5704 LogRel(("%s: No complete packets in%s TxD cache! "
5705 "Fetched=%d, current=%d, TX len=%d.\n",
5706 pThis->szPrf,
5707 u8Remain == E1K_TXD_CACHE_SIZE ? " full" : "",
5708 pThis->nTxDFetched, pThis->iTxDCurrent,
5709 e1kGetTxLen(&txdc)));
5710 if (!fTxDCacheDumped)
5711 {
5712 fTxDCacheDumped = true;
5713 e1kDumpTxDCache(pDevIns, pThis, &txdc);
5714 }
5715 pThis->iTxDCurrent = pThis->nTxDFetched = 0;
5716 /*
5717 * Returning an error at this point means Guru in R0
5718 * (see @bugref{6428}).
5719 */
5720# ifdef IN_RING3
5721 rc = VERR_NET_INCOMPLETE_TX_PACKET;
5722# else /* !IN_RING3 */
5723 rc = VINF_IOM_R3_MMIO_WRITE;
5724# endif /* !IN_RING3 */
5725 goto out;
5726 }
5727 if (u8Remain > 0)
5728 {
5729 Log4(("%s Incomplete packet at %d. Already fetched %d, "
5730 "%d more are available\n",
5731 pThis->szPrf, pThis->iTxDCurrent, u8Remain,
5732 e1kGetTxLen(&txdc) - u8Remain));
5733
5734 /*
5735 * A packet was partially fetched. Move incomplete packet to
5736 * the beginning of cache buffer, then load more descriptors.
5737 */
5738 memmove(pThis->aTxDescriptors,
5739 &pThis->aTxDescriptors[pThis->iTxDCurrent],
5740 u8Remain * sizeof(E1KTXDESC));
5741 pThis->iTxDCurrent = 0;
5742 pThis->nTxDFetched = u8Remain;
5743 e1kTxDLoadMore(pDevIns, pThis, &txdc);
5744 fIncomplete = true;
5745 }
5746 else
5747 pThis->nTxDFetched = 0;
5748 pThis->iTxDCurrent = 0;
5749 }
5750 if (!pThis->fLocked && GET_BITS(TXDCTL, LWTHRESH) == 0)
5751 {
5752 E1kLog2(("%s Out of transmit descriptors, raise ICR.TXD_LOW\n",
5753 pThis->szPrf));
5754 e1kRaiseInterrupt(pDevIns, pThis, VERR_SEM_BUSY, ICR_TXD_LOW);
5755 }
5756out:
5757 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
5758
5759 /// @todo uncomment: pThis->uStatIntTXQE++;
5760 /// @todo uncomment: e1kRaiseInterrupt(pDevIns, pThis, ICR_TXQE);
5761
5762 e1kCsTxLeave(pThis);
5763 }
5764
5765
5766 /*
5767 * Release the lock.
5768 */
5769 if (pDrv)
5770 pDrv->pfnEndXmit(pDrv);
5771 return rc;
5772}
5773
5774#endif /* E1K_WITH_TXD_CACHE */
5775#ifdef IN_RING3
5776
5777/**
5778 * @interface_method_impl{PDMINETWORKDOWN,pfnXmitPending}
5779 */
5780static DECLCALLBACK(void) e1kR3NetworkDown_XmitPending(PPDMINETWORKDOWN pInterface)
5781{
5782 PE1KSTATECC pThisCC = RT_FROM_MEMBER(pInterface, E1KSTATECC, INetworkDown);
5783 PE1KSTATE pThis = pThisCC->pShared;
5784 /* Resume suspended transmission */
5785 STATUS &= ~STATUS_TXOFF;
5786 e1kXmitPending(pThisCC->pDevInsR3, pThis, true /*fOnWorkerThread*/);
5787}
5788
5789/**
5790 * @callback_method_impl{FNPDMTASKDEV,
5791 * Executes e1kXmitPending at the behest of ring-0/raw-mode.}
5792 * @note Not executed on EMT.
5793 */
5794static DECLCALLBACK(void) e1kR3TxTaskCallback(PPDMDEVINS pDevIns, void *pvUser)
5795{
5796 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
5797 E1kLog2(("%s e1kR3TxTaskCallback:\n", pThis->szPrf));
5798
5799 int rc = e1kXmitPending(pDevIns, pThis, false /*fOnWorkerThread*/);
5800 AssertMsg(RT_SUCCESS(rc) || rc == VERR_TRY_AGAIN || rc == VERR_NET_DOWN, ("%Rrc\n", rc));
5801
5802 RT_NOREF(rc, pvUser);
5803}
5804
5805#endif /* IN_RING3 */
5806
5807/**
5808 * Write handler for Transmit Descriptor Tail register.
5809 *
5810 * @param pThis The device state structure.
5811 * @param offset Register offset in memory-mapped frame.
5812 * @param index Register index in register array.
5813 * @param value The value to store.
5814 * @param mask Used to implement partial writes (8 and 16-bit).
5815 * @thread EMT
5816 */
5817static int e1kRegWriteTDT(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5818{
5819 int rc = e1kRegWriteDefault(pDevIns, pThis, offset, index, value);
5820
5821 /* All descriptors starting with head and not including tail belong to us. */
5822 /* Process them. */
5823 E1kLog2(("%s e1kRegWriteTDT: TDBAL=%08x, TDBAH=%08x, TDLEN=%08x, TDH=%08x, TDT=%08x\n",
5824 pThis->szPrf, TDBAL, TDBAH, TDLEN, TDH, TDT));
5825
5826 /* Compose a temporary TX context, breaking TX CS rule, for debugging purposes. */
5827 /* If we decide to transmit, the TX critical section will be entered later in e1kXmitPending(). */
5828 E1KTXDC txdc;
5829 txdc.tdlen = TDLEN;
5830 txdc.tdh = TDH;
5831 txdc.tdt = TDT;
5832 /* Ignore TDT writes when the link is down. */
5833 if (txdc.tdh != txdc.tdt && (STATUS & STATUS_LU))
5834 {
5835 Log5(("E1000: TDT write: TDH=%08x, TDT=%08x, %d descriptors to process\n", txdc.tdh, txdc.tdt, e1kGetTxLen(&txdc)));
5836 E1kLog(("%s e1kRegWriteTDT: %d descriptors to process\n",
5837 pThis->szPrf, e1kGetTxLen(&txdc)));
5838
5839 /* Transmit pending packets if possible, defer it if we cannot do it
5840 in the current context. */
5841#ifdef E1K_TX_DELAY
5842 rc = e1kCsTxEnter(pThis, VERR_SEM_BUSY);
5843 if (RT_LIKELY(rc == VINF_SUCCESS))
5844 {
5845 if (!PDMDevInsTimerIsActive(pDevIns, pThis->hTXDTimer))
5846 {
5847# ifdef E1K_INT_STATS
5848 pThis->u64ArmedAt = RTTimeNanoTS();
5849# endif
5850 e1kArmTimer(pDevIns, pThis, pThis->hTXDTimer, E1K_TX_DELAY);
5851 }
5852 E1K_INC_ISTAT_CNT(pThis->uStatTxDelayed);
5853 e1kCsTxLeave(pThis);
5854 return rc;
5855 }
5856 /* We failed to enter the TX critical section -- transmit as usual. */
5857#endif /* E1K_TX_DELAY */
5858#ifndef IN_RING3
5859 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
5860 if (!pThisCC->CTX_SUFF(pDrv))
5861 {
5862 PDMDevHlpTaskTrigger(pDevIns, pThis->hTxTask);
5863 rc = VINF_SUCCESS;
5864 }
5865 else
5866#endif
5867 {
5868 rc = e1kXmitPending(pDevIns, pThis, false /*fOnWorkerThread*/);
5869 if (rc == VERR_TRY_AGAIN)
5870 rc = VINF_SUCCESS;
5871#ifndef IN_RING3
5872 else if (rc == VERR_SEM_BUSY)
5873 rc = VINF_IOM_R3_MMIO_WRITE;
5874#endif
5875 AssertRC(rc);
5876 }
5877 }
5878
5879 return rc;
5880}
5881
5882/**
5883 * Write handler for Multicast Table Array registers.
5884 *
5885 * @param pThis The device state structure.
5886 * @param offset Register offset in memory-mapped frame.
5887 * @param index Register index in register array.
5888 * @param value The value to store.
5889 * @thread EMT
5890 */
5891static int e1kRegWriteMTA(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5892{
5893 RT_NOREF_PV(pDevIns);
5894 AssertReturn(offset - g_aE1kRegMap[index].offset < sizeof(pThis->auMTA), VERR_DEV_IO_ERROR);
5895 pThis->auMTA[(offset - g_aE1kRegMap[index].offset) / sizeof(pThis->auMTA[0])] = value;
5896
5897 return VINF_SUCCESS;
5898}
5899
5900/**
5901 * Read handler for Multicast Table Array registers.
5902 *
5903 * @returns VBox status code.
5904 *
5905 * @param pThis The device state structure.
5906 * @param offset Register offset in memory-mapped frame.
5907 * @param index Register index in register array.
5908 * @thread EMT
5909 */
5910static int e1kRegReadMTA(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5911{
5912 RT_NOREF_PV(pDevIns);
5913 AssertReturn(offset - g_aE1kRegMap[index].offset < sizeof(pThis->auMTA), VERR_DEV_IO_ERROR);
5914 *pu32Value = pThis->auMTA[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->auMTA[0])];
5915
5916 return VINF_SUCCESS;
5917}
5918
5919/**
5920 * Write handler for Receive Address registers.
5921 *
5922 * @param pThis The device state structure.
5923 * @param offset Register offset in memory-mapped frame.
5924 * @param index Register index in register array.
5925 * @param value The value to store.
5926 * @thread EMT
5927 */
5928static int e1kRegWriteRA(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5929{
5930 RT_NOREF_PV(pDevIns);
5931 AssertReturn(offset - g_aE1kRegMap[index].offset < sizeof(pThis->aRecAddr.au32), VERR_DEV_IO_ERROR);
5932 pThis->aRecAddr.au32[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->aRecAddr.au32[0])] = value;
5933
5934 return VINF_SUCCESS;
5935}
5936
5937/**
5938 * Read handler for Receive Address registers.
5939 *
5940 * @returns VBox status code.
5941 *
5942 * @param pThis The device state structure.
5943 * @param offset Register offset in memory-mapped frame.
5944 * @param index Register index in register array.
5945 * @thread EMT
5946 */
5947static int e1kRegReadRA(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5948{
5949 RT_NOREF_PV(pDevIns);
5950 AssertReturn(offset - g_aE1kRegMap[index].offset< sizeof(pThis->aRecAddr.au32), VERR_DEV_IO_ERROR);
5951 *pu32Value = pThis->aRecAddr.au32[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->aRecAddr.au32[0])];
5952
5953 return VINF_SUCCESS;
5954}
5955
5956/**
5957 * Write handler for VLAN Filter Table Array registers.
5958 *
5959 * @param pThis The device state structure.
5960 * @param offset Register offset in memory-mapped frame.
5961 * @param index Register index in register array.
5962 * @param value The value to store.
5963 * @thread EMT
5964 */
5965static int e1kRegWriteVFTA(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5966{
5967 RT_NOREF_PV(pDevIns);
5968 AssertReturn(offset - g_aE1kRegMap[index].offset < sizeof(pThis->auVFTA), VINF_SUCCESS);
5969 pThis->auVFTA[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->auVFTA[0])] = value;
5970
5971 return VINF_SUCCESS;
5972}
5973
5974/**
5975 * Read handler for VLAN Filter Table Array registers.
5976 *
5977 * @returns VBox status code.
5978 *
5979 * @param pThis The device state structure.
5980 * @param offset Register offset in memory-mapped frame.
5981 * @param index Register index in register array.
5982 * @thread EMT
5983 */
5984static int e1kRegReadVFTA(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5985{
5986 RT_NOREF_PV(pDevIns);
5987 AssertReturn(offset - g_aE1kRegMap[index].offset< sizeof(pThis->auVFTA), VERR_DEV_IO_ERROR);
5988 *pu32Value = pThis->auVFTA[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->auVFTA[0])];
5989
5990 return VINF_SUCCESS;
5991}
5992
5993/**
5994 * Read handler for unimplemented registers.
5995 *
5996 * Merely reports reads from unimplemented registers.
5997 *
5998 * @returns VBox status code.
5999 *
6000 * @param pThis The device state structure.
6001 * @param offset Register offset in memory-mapped frame.
6002 * @param index Register index in register array.
6003 * @thread EMT
6004 */
6005static int e1kRegReadUnimplemented(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
6006{
6007 RT_NOREF(pDevIns, pThis, offset, index);
6008 E1kLog(("%s At %08X read (00000000) attempt from unimplemented register %s (%s)\n",
6009 pThis->szPrf, offset, g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
6010 *pu32Value = 0;
6011
6012 return VINF_SUCCESS;
6013}
6014
6015/**
6016 * Default register read handler with automatic clear operation.
6017 *
6018 * Retrieves the value of register from register array in device state structure.
6019 * Then resets all bits.
6020 *
6021 * @remarks The 'mask' parameter is simply ignored as masking and shifting is
6022 * done in the caller.
6023 *
6024 * @returns VBox status code.
6025 *
6026 * @param pThis The device state structure.
6027 * @param offset Register offset in memory-mapped frame.
6028 * @param index Register index in register array.
6029 * @thread EMT
6030 */
6031static int e1kRegReadAutoClear(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
6032{
6033 AssertReturn(index < E1K_NUM_OF_32BIT_REGS, VERR_DEV_IO_ERROR);
6034 int rc = e1kRegReadDefault(pDevIns, pThis, offset, index, pu32Value);
6035 pThis->auRegs[index] = 0;
6036
6037 return rc;
6038}
6039
6040/**
6041 * Default register read handler.
6042 *
6043 * Retrieves the value of register from register array in device state structure.
6044 * Bits corresponding to 0s in 'readable' mask will always read as 0s.
6045 *
6046 * @remarks The 'mask' parameter is simply ignored as masking and shifting is
6047 * done in the caller.
6048 *
6049 * @returns VBox status code.
6050 *
6051 * @param pThis The device state structure.
6052 * @param offset Register offset in memory-mapped frame.
6053 * @param index Register index in register array.
6054 * @thread EMT
6055 */
6056static int e1kRegReadDefault(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
6057{
6058 RT_NOREF_PV(pDevIns); RT_NOREF_PV(offset);
6059
6060 AssertReturn(index < E1K_NUM_OF_32BIT_REGS, VERR_DEV_IO_ERROR);
6061 *pu32Value = pThis->auRegs[index] & g_aE1kRegMap[index].readable;
6062
6063 return VINF_SUCCESS;
6064}
6065
6066/**
6067 * Write handler for unimplemented registers.
6068 *
6069 * Merely reports writes to unimplemented registers.
6070 *
6071 * @param pThis The device state structure.
6072 * @param offset Register offset in memory-mapped frame.
6073 * @param index Register index in register array.
6074 * @param value The value to store.
6075 * @thread EMT
6076 */
6077
6078 static int e1kRegWriteUnimplemented(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
6079{
6080 RT_NOREF_PV(pDevIns); RT_NOREF_PV(pThis); RT_NOREF_PV(offset); RT_NOREF_PV(index); RT_NOREF_PV(value);
6081
6082 E1kLog(("%s At %08X write attempt (%08X) to unimplemented register %s (%s)\n",
6083 pThis->szPrf, offset, value, g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
6084
6085 return VINF_SUCCESS;
6086}
6087
6088/**
6089 * Default register write handler.
6090 *
6091 * Stores the value to the register array in device state structure. Only bits
6092 * corresponding to 1s both in 'writable' and 'mask' will be stored.
6093 *
6094 * @returns VBox status code.
6095 *
6096 * @param pThis The device state structure.
6097 * @param offset Register offset in memory-mapped frame.
6098 * @param index Register index in register array.
6099 * @param value The value to store.
6100 * @param mask Used to implement partial writes (8 and 16-bit).
6101 * @thread EMT
6102 */
6103
6104static int e1kRegWriteDefault(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
6105{
6106 RT_NOREF(pDevIns, offset);
6107
6108 AssertReturn(index < E1K_NUM_OF_32BIT_REGS, VERR_DEV_IO_ERROR);
6109 pThis->auRegs[index] = (value & g_aE1kRegMap[index].writable)
6110 | (pThis->auRegs[index] & ~g_aE1kRegMap[index].writable);
6111
6112 return VINF_SUCCESS;
6113}
6114
6115/**
6116 * Search register table for matching register.
6117 *
6118 * @returns Index in the register table or -1 if not found.
6119 *
6120 * @param offReg Register offset in memory-mapped region.
6121 * @thread EMT
6122 */
6123static int e1kRegLookup(uint32_t offReg)
6124{
6125
6126#if 0
6127 int index;
6128
6129 for (index = 0; index < E1K_NUM_OF_REGS; index++)
6130 {
6131 if (g_aE1kRegMap[index].offset <= offReg && offReg < g_aE1kRegMap[index].offset + g_aE1kRegMap[index].size)
6132 {
6133 return index;
6134 }
6135 }
6136#else
6137 int iStart = 0;
6138 int iEnd = E1K_NUM_OF_BINARY_SEARCHABLE;
6139 for (;;)
6140 {
6141 int i = (iEnd - iStart) / 2 + iStart;
6142 uint32_t offCur = g_aE1kRegMap[i].offset;
6143 if (offReg < offCur)
6144 {
6145 if (i == iStart)
6146 break;
6147 iEnd = i;
6148 }
6149 else if (offReg >= offCur + g_aE1kRegMap[i].size)
6150 {
6151 i++;
6152 if (i == iEnd)
6153 break;
6154 iStart = i;
6155 }
6156 else
6157 return i;
6158 Assert(iEnd > iStart);
6159 }
6160
6161 for (unsigned i = E1K_NUM_OF_BINARY_SEARCHABLE; i < RT_ELEMENTS(g_aE1kRegMap); i++)
6162 if (offReg - g_aE1kRegMap[i].offset < g_aE1kRegMap[i].size)
6163 return (int)i;
6164
6165# ifdef VBOX_STRICT
6166 for (unsigned i = 0; i < RT_ELEMENTS(g_aE1kRegMap); i++)
6167 Assert(offReg - g_aE1kRegMap[i].offset >= g_aE1kRegMap[i].size);
6168# endif
6169
6170#endif
6171
6172 return -1;
6173}
6174
6175/**
6176 * Handle unaligned register read operation.
6177 *
6178 * Looks up and calls appropriate handler.
6179 *
6180 * @returns VBox status code.
6181 *
6182 * @param pDevIns The device instance.
6183 * @param pThis The device state structure.
6184 * @param offReg Register offset in memory-mapped frame.
6185 * @param pv Where to store the result.
6186 * @param cb Number of bytes to read.
6187 * @thread EMT
6188 * @remarks IOM takes care of unaligned and small reads via MMIO. For I/O port
6189 * accesses we have to take care of that ourselves.
6190 */
6191static int e1kRegReadUnaligned(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offReg, void *pv, uint32_t cb)
6192{
6193 uint32_t u32 = 0;
6194 uint32_t shift;
6195 int rc = VINF_SUCCESS;
6196 int index = e1kRegLookup(offReg);
6197#ifdef LOG_ENABLED
6198 char buf[9];
6199#endif
6200
6201 /*
6202 * From the spec:
6203 * For registers that should be accessed as 32-bit double words, partial writes (less than a 32-bit
6204 * double word) is ignored. Partial reads return all 32 bits of data regardless of the byte enables.
6205 */
6206
6207 /*
6208 * To be able to read bytes and short word we convert them to properly
6209 * shifted 32-bit words and masks. The idea is to keep register-specific
6210 * handlers simple. Most accesses will be 32-bit anyway.
6211 */
6212 uint32_t mask;
6213 switch (cb)
6214 {
6215 case 4: mask = 0xFFFFFFFF; break;
6216 case 2: mask = 0x0000FFFF; break;
6217 case 1: mask = 0x000000FF; break;
6218 default:
6219 return PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "unsupported op size: offset=%#10x cb=%#10x\n", offReg, cb);
6220 }
6221 if (index >= 0)
6222 {
6223 RT_UNTRUSTED_VALIDATED_FENCE(); /* paranoia because of port I/O. */
6224 if (g_aE1kRegMap[index].readable)
6225 {
6226 /* Make the mask correspond to the bits we are about to read. */
6227 shift = (offReg - g_aE1kRegMap[index].offset) % sizeof(uint32_t) * 8;
6228 mask <<= shift;
6229 if (!mask)
6230 return PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "Zero mask: offset=%#10x cb=%#10x\n", offReg, cb);
6231 /*
6232 * Read it. Pass the mask so the handler knows what has to be read.
6233 * Mask out irrelevant bits.
6234 */
6235 //e1kCsEnterReturn(pThis, VERR_SEM_BUSY);
6236 //pThis->fDelayInts = false;
6237 //pThis->iStatIntLost += pThis->iStatIntLostOne;
6238 //pThis->iStatIntLostOne = 0;
6239 rc = g_aE1kRegMap[index].pfnRead(pDevIns, pThis, offReg & 0xFFFFFFFC, (uint32_t)index, &u32);
6240 u32 &= mask;
6241 //e1kCsLeave(pThis);
6242 E1kLog2(("%s At %08X read %s from %s (%s)\n",
6243 pThis->szPrf, offReg, e1kU32toHex(u32, mask, buf), g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
6244 Log6(("%s At %08X read %s from %s (%s) [UNALIGNED]\n",
6245 pThis->szPrf, offReg, e1kU32toHex(u32, mask, buf), g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
6246 /* Shift back the result. */
6247 u32 >>= shift;
6248 }
6249 else
6250 E1kLog(("%s At %08X read (%s) attempt from write-only register %s (%s)\n",
6251 pThis->szPrf, offReg, e1kU32toHex(u32, mask, buf), g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
6252 if (IOM_SUCCESS(rc))
6253 STAM_COUNTER_INC(&pThis->aStatRegReads[index]);
6254 }
6255 else
6256 E1kLog(("%s At %08X read (%s) attempt from non-existing register\n",
6257 pThis->szPrf, offReg, e1kU32toHex(u32, mask, buf)));
6258
6259 memcpy(pv, &u32, cb);
6260 return rc;
6261}
6262
6263/**
6264 * Handle 4 byte aligned and sized read operation.
6265 *
6266 * Looks up and calls appropriate handler.
6267 *
6268 * @returns VBox status code.
6269 *
6270 * @param pDevIns The device instance.
6271 * @param pThis The device state structure.
6272 * @param offReg Register offset in memory-mapped frame.
6273 * @param pu32 Where to store the result.
6274 * @thread EMT
6275 */
6276static VBOXSTRICTRC e1kRegReadAlignedU32(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offReg, uint32_t *pu32)
6277{
6278 Assert(!(offReg & 3));
6279
6280 /*
6281 * Lookup the register and check that it's readable.
6282 */
6283 VBOXSTRICTRC rc = VINF_SUCCESS;
6284 int idxReg = e1kRegLookup(offReg);
6285 if (RT_LIKELY(idxReg >= 0))
6286 {
6287 RT_UNTRUSTED_VALIDATED_FENCE(); /* paranoia because of port I/O. */
6288 if (RT_UNLIKELY(g_aE1kRegMap[idxReg].readable))
6289 {
6290 /*
6291 * Read it. Pass the mask so the handler knows what has to be read.
6292 * Mask out irrelevant bits.
6293 */
6294 //e1kCsEnterReturn(pThis, VERR_SEM_BUSY);
6295 //pThis->fDelayInts = false;
6296 //pThis->iStatIntLost += pThis->iStatIntLostOne;
6297 //pThis->iStatIntLostOne = 0;
6298 rc = g_aE1kRegMap[idxReg].pfnRead(pDevIns, pThis, offReg & 0xFFFFFFFC, (uint32_t)idxReg, pu32);
6299 //e1kCsLeave(pThis);
6300 Log6(("%s At %08X read %08X from %s (%s)\n",
6301 pThis->szPrf, offReg, *pu32, g_aE1kRegMap[idxReg].abbrev, g_aE1kRegMap[idxReg].name));
6302 if (IOM_SUCCESS(rc))
6303 STAM_COUNTER_INC(&pThis->aStatRegReads[idxReg]);
6304 }
6305 else
6306 E1kLog(("%s At %08X read attempt from non-readable register %s (%s)\n",
6307 pThis->szPrf, offReg, g_aE1kRegMap[idxReg].abbrev, g_aE1kRegMap[idxReg].name));
6308 }
6309 else
6310 E1kLog(("%s At %08X read attempt from non-existing register\n", pThis->szPrf, offReg));
6311 return rc;
6312}
6313
6314/**
6315 * Handle 4 byte sized and aligned register write operation.
6316 *
6317 * Looks up and calls appropriate handler.
6318 *
6319 * @returns VBox status code.
6320 *
6321 * @param pDevIns The device instance.
6322 * @param pThis The device state structure.
6323 * @param offReg Register offset in memory-mapped frame.
6324 * @param u32Value The value to write.
6325 * @thread EMT
6326 */
6327static VBOXSTRICTRC e1kRegWriteAlignedU32(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offReg, uint32_t u32Value)
6328{
6329 VBOXSTRICTRC rc = VINF_SUCCESS;
6330 int index = e1kRegLookup(offReg);
6331 if (RT_LIKELY(index >= 0))
6332 {
6333 RT_UNTRUSTED_VALIDATED_FENCE(); /* paranoia because of port I/O. */
6334 if (RT_LIKELY(g_aE1kRegMap[index].writable))
6335 {
6336 /*
6337 * Write it. Pass the mask so the handler knows what has to be written.
6338 * Mask out irrelevant bits.
6339 */
6340 Log6(("%s At %08X write %08X to %s (%s)\n",
6341 pThis->szPrf, offReg, u32Value, g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
6342 //e1kCsEnterReturn(pThis, VERR_SEM_BUSY);
6343 //pThis->fDelayInts = false;
6344 //pThis->iStatIntLost += pThis->iStatIntLostOne;
6345 //pThis->iStatIntLostOne = 0;
6346 rc = g_aE1kRegMap[index].pfnWrite(pDevIns, pThis, offReg, (uint32_t)index, u32Value);
6347 //e1kCsLeave(pThis);
6348 }
6349 else
6350 E1kLog(("%s At %08X write attempt (%08X) to read-only register %s (%s)\n",
6351 pThis->szPrf, offReg, u32Value, g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
6352 if (IOM_SUCCESS(rc))
6353 STAM_COUNTER_INC(&pThis->aStatRegWrites[index]);
6354 }
6355 else
6356 E1kLog(("%s At %08X write attempt (%08X) to non-existing register\n",
6357 pThis->szPrf, offReg, u32Value));
6358 return rc;
6359}
6360
6361
6362/* -=-=-=-=- MMIO and I/O Port Callbacks -=-=-=-=- */
6363
6364/**
6365 * @callback_method_impl{FNIOMMMIONEWREAD}
6366 */
6367static DECLCALLBACK(VBOXSTRICTRC) e1kMMIORead(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off, void *pv, uint32_t cb)
6368{
6369 RT_NOREF2(pvUser, cb);
6370 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
6371 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatMMIORead), a);
6372
6373 Assert(off < E1K_MM_SIZE);
6374 Assert(cb == 4);
6375 Assert(!(off & 3));
6376
6377 VBOXSTRICTRC rcStrict = e1kRegReadAlignedU32(pDevIns, pThis, (uint32_t)off, (uint32_t *)pv);
6378
6379 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatMMIORead), a);
6380 return rcStrict;
6381}
6382
6383/**
6384 * @callback_method_impl{FNIOMMMIONEWWRITE}
6385 */
6386static DECLCALLBACK(VBOXSTRICTRC) e1kMMIOWrite(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off, void const *pv, uint32_t cb)
6387{
6388 RT_NOREF2(pvUser, cb);
6389 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
6390 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatMMIOWrite), a);
6391
6392 Assert(off < E1K_MM_SIZE);
6393 Assert(cb == 4);
6394 Assert(!(off & 3));
6395
6396 VBOXSTRICTRC rcStrict = e1kRegWriteAlignedU32(pDevIns, pThis, (uint32_t)off, *(uint32_t const *)pv);
6397
6398 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatMMIOWrite), a);
6399 return rcStrict;
6400}
6401
6402/**
6403 * @callback_method_impl{FNIOMIOPORTNEWIN}
6404 */
6405static DECLCALLBACK(VBOXSTRICTRC) e1kIOPortIn(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT offPort, uint32_t *pu32, unsigned cb)
6406{
6407 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
6408 VBOXSTRICTRC rc;
6409 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatIORead), a);
6410 RT_NOREF_PV(pvUser);
6411
6412 if (RT_LIKELY(cb == 4))
6413 switch (offPort)
6414 {
6415 case 0x00: /* IOADDR */
6416 *pu32 = pThis->uSelectedReg;
6417 Log9(("%s e1kIOPortIn: IOADDR(0), selecting register %#010x, val=%#010x\n", pThis->szPrf, pThis->uSelectedReg, *pu32));
6418 rc = VINF_SUCCESS;
6419 break;
6420
6421 case 0x04: /* IODATA */
6422 if (!(pThis->uSelectedReg & 3))
6423 rc = e1kRegReadAlignedU32(pDevIns, pThis, pThis->uSelectedReg, pu32);
6424 else /** @todo r=bird: I wouldn't be surprised if this unaligned branch wasn't necessary. */
6425 rc = e1kRegReadUnaligned(pDevIns, pThis, pThis->uSelectedReg, pu32, cb);
6426 if (rc == VINF_IOM_R3_MMIO_READ)
6427 rc = VINF_IOM_R3_IOPORT_READ;
6428 Log9(("%s e1kIOPortIn: IODATA(4), reading from selected register %#010x, val=%#010x\n", pThis->szPrf, pThis->uSelectedReg, *pu32));
6429 break;
6430
6431 default:
6432 E1kLog(("%s e1kIOPortIn: invalid port %#010x\n", pThis->szPrf, offPort));
6433 /** @todo r=bird: Check what real hardware returns here. */
6434 //rc = VERR_IOM_IOPORT_UNUSED; /* Why not? */
6435 rc = VINF_IOM_MMIO_UNUSED_00; /* used to return VINF_SUCCESS and not touch *pu32, which amounted to this. */
6436 break;
6437 }
6438 else
6439 {
6440 E1kLog(("%s e1kIOPortIn: invalid op size: offPort=%RTiop cb=%08x", pThis->szPrf, offPort, cb));
6441 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "%s e1kIOPortIn: invalid op size: offPort=%RTiop cb=%08x\n", pThis->szPrf, offPort, cb);
6442 *pu32 = 0; /** @todo r=bird: Check what real hardware returns here. (Didn't used to set a value here, picked zero as that's what we'd end up in most cases.) */
6443 }
6444 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatIORead), a);
6445 return rc;
6446}
6447
6448
6449/**
6450 * @callback_method_impl{FNIOMIOPORTNEWOUT}
6451 */
6452static DECLCALLBACK(VBOXSTRICTRC) e1kIOPortOut(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT offPort, uint32_t u32, unsigned cb)
6453{
6454 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
6455 VBOXSTRICTRC rc;
6456 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatIOWrite), a);
6457 RT_NOREF_PV(pvUser);
6458
6459 Log9(("%s e1kIOPortOut: offPort=%RTiop value=%08x\n", pThis->szPrf, offPort, u32));
6460 if (RT_LIKELY(cb == 4))
6461 {
6462 switch (offPort)
6463 {
6464 case 0x00: /* IOADDR */
6465 pThis->uSelectedReg = u32;
6466 Log9(("%s e1kIOPortOut: IOADDR(0), selected register %08x\n", pThis->szPrf, pThis->uSelectedReg));
6467 rc = VINF_SUCCESS;
6468 break;
6469
6470 case 0x04: /* IODATA */
6471 Log9(("%s e1kIOPortOut: IODATA(4), writing to selected register %#010x, value=%#010x\n", pThis->szPrf, pThis->uSelectedReg, u32));
6472 if (RT_LIKELY(!(pThis->uSelectedReg & 3)))
6473 {
6474 rc = e1kRegWriteAlignedU32(pDevIns, pThis, pThis->uSelectedReg, u32);
6475 if (rc == VINF_IOM_R3_MMIO_WRITE)
6476 rc = VINF_IOM_R3_IOPORT_WRITE;
6477 }
6478 else
6479 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS,
6480 "Spec violation: misaligned offset: %#10x, ignored.\n", pThis->uSelectedReg);
6481 break;
6482
6483 default:
6484 E1kLog(("%s e1kIOPortOut: invalid port %#010x\n", pThis->szPrf, offPort));
6485 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "invalid port %#010x\n", offPort);
6486 }
6487 }
6488 else
6489 {
6490 E1kLog(("%s e1kIOPortOut: invalid op size: offPort=%RTiop cb=%08x\n", pThis->szPrf, offPort, cb));
6491 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "%s: invalid op size: offPort=%RTiop cb=%#x\n", pThis->szPrf, offPort, cb);
6492 }
6493
6494 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatIOWrite), a);
6495 return rc;
6496}
6497
6498#ifdef IN_RING3
6499
6500/**
6501 * Dump complete device state to log.
6502 *
6503 * @param pThis Pointer to device state.
6504 */
6505static void e1kDumpState(PE1KSTATE pThis)
6506{
6507 RT_NOREF(pThis);
6508 for (int i = 0; i < E1K_NUM_OF_32BIT_REGS; ++i)
6509 E1kLog2(("%s: %8.8s = %08x\n", pThis->szPrf, g_aE1kRegMap[i].abbrev, pThis->auRegs[i]));
6510# ifdef E1K_INT_STATS
6511 LogRel(("%s: Interrupt attempts: %d\n", pThis->szPrf, pThis->uStatIntTry));
6512 LogRel(("%s: Interrupts raised : %d\n", pThis->szPrf, pThis->uStatInt));
6513 LogRel(("%s: Interrupts lowered: %d\n", pThis->szPrf, pThis->uStatIntLower));
6514 LogRel(("%s: ICR outside ISR : %d\n", pThis->szPrf, pThis->uStatNoIntICR));
6515 LogRel(("%s: IMS raised ints : %d\n", pThis->szPrf, pThis->uStatIntIMS));
6516 LogRel(("%s: Interrupts skipped: %d\n", pThis->szPrf, pThis->uStatIntSkip));
6517 LogRel(("%s: Masked interrupts : %d\n", pThis->szPrf, pThis->uStatIntMasked));
6518 LogRel(("%s: Early interrupts : %d\n", pThis->szPrf, pThis->uStatIntEarly));
6519 LogRel(("%s: Late interrupts : %d\n", pThis->szPrf, pThis->uStatIntLate));
6520 LogRel(("%s: Lost interrupts : %d\n", pThis->szPrf, pThis->iStatIntLost));
6521 LogRel(("%s: Interrupts by RX : %d\n", pThis->szPrf, pThis->uStatIntRx));
6522 LogRel(("%s: Interrupts by TX : %d\n", pThis->szPrf, pThis->uStatIntTx));
6523 LogRel(("%s: Interrupts by ICS : %d\n", pThis->szPrf, pThis->uStatIntICS));
6524 LogRel(("%s: Interrupts by RDTR: %d\n", pThis->szPrf, pThis->uStatIntRDTR));
6525 LogRel(("%s: Interrupts by RDMT: %d\n", pThis->szPrf, pThis->uStatIntRXDMT0));
6526 LogRel(("%s: Interrupts by TXQE: %d\n", pThis->szPrf, pThis->uStatIntTXQE));
6527 LogRel(("%s: TX int delay asked: %d\n", pThis->szPrf, pThis->uStatTxIDE));
6528 LogRel(("%s: TX delayed: %d\n", pThis->szPrf, pThis->uStatTxDelayed));
6529 LogRel(("%s: TX delay expired: %d\n", pThis->szPrf, pThis->uStatTxDelayExp));
6530 LogRel(("%s: TX no report asked: %d\n", pThis->szPrf, pThis->uStatTxNoRS));
6531 LogRel(("%s: TX abs timer expd : %d\n", pThis->szPrf, pThis->uStatTAD));
6532 LogRel(("%s: TX int timer expd : %d\n", pThis->szPrf, pThis->uStatTID));
6533 LogRel(("%s: RX abs timer expd : %d\n", pThis->szPrf, pThis->uStatRAD));
6534 LogRel(("%s: RX int timer expd : %d\n", pThis->szPrf, pThis->uStatRID));
6535 LogRel(("%s: TX CTX descriptors: %d\n", pThis->szPrf, pThis->uStatDescCtx));
6536 LogRel(("%s: TX DAT descriptors: %d\n", pThis->szPrf, pThis->uStatDescDat));
6537 LogRel(("%s: TX LEG descriptors: %d\n", pThis->szPrf, pThis->uStatDescLeg));
6538 LogRel(("%s: Received frames : %d\n", pThis->szPrf, pThis->uStatRxFrm));
6539 LogRel(("%s: Transmitted frames: %d\n", pThis->szPrf, pThis->uStatTxFrm));
6540 LogRel(("%s: TX frames up to 1514: %d\n", pThis->szPrf, pThis->uStatTx1514));
6541 LogRel(("%s: TX frames up to 2962: %d\n", pThis->szPrf, pThis->uStatTx2962));
6542 LogRel(("%s: TX frames up to 4410: %d\n", pThis->szPrf, pThis->uStatTx4410));
6543 LogRel(("%s: TX frames up to 5858: %d\n", pThis->szPrf, pThis->uStatTx5858));
6544 LogRel(("%s: TX frames up to 7306: %d\n", pThis->szPrf, pThis->uStatTx7306));
6545 LogRel(("%s: TX frames up to 8754: %d\n", pThis->szPrf, pThis->uStatTx8754));
6546 LogRel(("%s: TX frames up to 16384: %d\n", pThis->szPrf, pThis->uStatTx16384));
6547 LogRel(("%s: TX frames up to 32768: %d\n", pThis->szPrf, pThis->uStatTx32768));
6548 LogRel(("%s: Larger TX frames : %d\n", pThis->szPrf, pThis->uStatTxLarge));
6549 LogRel(("%s: Max TX Delay : %lld\n", pThis->szPrf, pThis->uStatMaxTxDelay));
6550# endif /* E1K_INT_STATS */
6551}
6552
6553
6554/* -=-=-=-=- PDMINETWORKDOWN -=-=-=-=- */
6555
6556/**
6557 * Check if the device can receive data now.
6558 * This must be called before the pfnRecieve() method is called.
6559 *
6560 * @returns VBox status code.
6561 * @retval VERR_NET_NO_BUFFER_SPACE if we cannot receive.
6562 * @param pDevIns The device instance.
6563 * @param pThis The instance data.
6564 * @thread EMT
6565 */
6566static int e1kR3CanReceive(PPDMDEVINS pDevIns, PE1KSTATE pThis)
6567{
6568# ifndef E1K_WITH_RXD_CACHE
6569 size_t cb;
6570
6571 e1kCsRxEnterReturn(pThis);
6572
6573 if (RT_UNLIKELY(RDLEN == sizeof(E1KRXDESC)))
6574 {
6575 E1KRXDESC desc;
6576 PDMDevHlpPCIPhysRead(pDevIns, e1kDescAddr(RDBAH, RDBAL, RDH), &desc, sizeof(desc));
6577 if (desc.status.fDD)
6578 cb = 0;
6579 else
6580 cb = pThis->u16RxBSize;
6581 }
6582 else if (RDH < RDT)
6583 cb = (RDT - RDH) * pThis->u16RxBSize;
6584 else if (RDH > RDT)
6585 cb = (RDLEN / sizeof(E1KRXDESC) - RDH + RDT) * pThis->u16RxBSize;
6586 else
6587 {
6588 cb = 0;
6589 E1kLogRel(("E1000: OUT of RX descriptors!\n"));
6590 }
6591 E1kLog2(("%s e1kR3CanReceive: at exit RDH=%d RDT=%d RDLEN=%d u16RxBSize=%d cb=%lu\n",
6592 pThis->szPrf, RDH, RDT, RDLEN, pThis->u16RxBSize, cb));
6593
6594 e1kCsRxLeave(pThis);
6595 return cb > 0 ? VINF_SUCCESS : VERR_NET_NO_BUFFER_SPACE;
6596# else /* E1K_WITH_RXD_CACHE */
6597
6598 e1kCsRxEnterReturn(pThis);
6599
6600 E1KRXDC rxdc;
6601 if (RT_UNLIKELY(!e1kUpdateRxDContext(pDevIns, pThis, &rxdc, "e1kR3CanReceive")))
6602 {
6603 e1kCsRxLeave(pThis);
6604 E1kLog(("%s e1kR3CanReceive: failed to update Rx context, returning VERR_NET_NO_BUFFER_SPACE\n", pThis->szPrf));
6605 return VERR_NET_NO_BUFFER_SPACE;
6606 }
6607
6608 int rc = VINF_SUCCESS;
6609 if (RT_UNLIKELY(rxdc.rdlen == sizeof(E1KRXDESC)))
6610 {
6611 E1KRXDESC desc;
6612 PDMDevHlpPCIPhysRead(pDevIns, e1kDescAddr(RDBAH, RDBAL, rxdc.rdh), &desc, sizeof(desc));
6613 if (desc.status.fDD)
6614 rc = VERR_NET_NO_BUFFER_SPACE;
6615 }
6616 else if (e1kRxDIsCacheEmpty(pThis) && rxdc.rdh == rxdc.rdt)
6617 {
6618 /* Cache is empty, so is the RX ring. */
6619 rc = VERR_NET_NO_BUFFER_SPACE;
6620 }
6621 E1kLog2(("%s e1kR3CanReceive: at exit in_cache=%d RDH=%d RDT=%d RDLEN=%d u16RxBSize=%d rc=%Rrc\n", pThis->szPrf,
6622 e1kRxDInCache(pThis), rxdc.rdh, rxdc.rdt, rxdc.rdlen, pThis->u16RxBSize, rc));
6623
6624 e1kCsRxLeave(pThis);
6625 return rc;
6626# endif /* E1K_WITH_RXD_CACHE */
6627}
6628
6629/**
6630 * @interface_method_impl{PDMINETWORKDOWN,pfnWaitReceiveAvail}
6631 */
6632static DECLCALLBACK(int) e1kR3NetworkDown_WaitReceiveAvail(PPDMINETWORKDOWN pInterface, RTMSINTERVAL cMillies)
6633{
6634 PE1KSTATECC pThisCC = RT_FROM_MEMBER(pInterface, E1KSTATECC, INetworkDown);
6635 PE1KSTATE pThis = pThisCC->pShared;
6636 PPDMDEVINS pDevIns = pThisCC->pDevInsR3;
6637
6638 int rc = e1kR3CanReceive(pDevIns, pThis);
6639 if (RT_SUCCESS(rc))
6640 return VINF_SUCCESS;
6641
6642 if (RT_UNLIKELY(cMillies == 0))
6643 return VERR_NET_NO_BUFFER_SPACE;
6644
6645 rc = VERR_INTERRUPTED;
6646 ASMAtomicXchgBool(&pThis->fMaybeOutOfSpace, true);
6647 STAM_PROFILE_START(&pThis->StatRxOverflow, a);
6648 VMSTATE enmVMState;
6649 while (RT_LIKELY( (enmVMState = PDMDevHlpVMState(pDevIns)) == VMSTATE_RUNNING
6650 || enmVMState == VMSTATE_RUNNING_LS))
6651 {
6652 int rc2 = e1kR3CanReceive(pDevIns, pThis);
6653 if (RT_SUCCESS(rc2))
6654 {
6655 rc = VINF_SUCCESS;
6656 break;
6657 }
6658 E1kLogRel(("E1000: e1kR3NetworkDown_WaitReceiveAvail: waiting cMillies=%u...\n", cMillies));
6659 E1kLog(("%s: e1kR3NetworkDown_WaitReceiveAvail: waiting cMillies=%u...\n", pThis->szPrf, cMillies));
6660 PDMDevHlpSUPSemEventWaitNoResume(pDevIns, pThis->hEventMoreRxDescAvail, cMillies);
6661 }
6662 STAM_PROFILE_STOP(&pThis->StatRxOverflow, a);
6663 ASMAtomicXchgBool(&pThis->fMaybeOutOfSpace, false);
6664
6665 return rc;
6666}
6667
6668
6669/**
6670 * Matches the packet addresses against Receive Address table. Looks for
6671 * exact matches only.
6672 *
6673 * @returns true if address matches.
6674 * @param pThis Pointer to the state structure.
6675 * @param pvBuf The ethernet packet.
6676 * @param cb Number of bytes available in the packet.
6677 * @thread EMT
6678 */
6679static bool e1kPerfectMatch(PE1KSTATE pThis, const void *pvBuf)
6680{
6681 for (unsigned i = 0; i < RT_ELEMENTS(pThis->aRecAddr.array); i++)
6682 {
6683 E1KRAELEM* ra = pThis->aRecAddr.array + i;
6684
6685 /* Valid address? */
6686 if (ra->ctl & RA_CTL_AV)
6687 {
6688 Assert((ra->ctl & RA_CTL_AS) < 2);
6689 //unsigned char *pAddr = (unsigned char*)pvBuf + sizeof(ra->addr)*(ra->ctl & RA_CTL_AS);
6690 //E1kLog3(("%s Matching %02x:%02x:%02x:%02x:%02x:%02x against %02x:%02x:%02x:%02x:%02x:%02x...\n",
6691 // pThis->szPrf, pAddr[0], pAddr[1], pAddr[2], pAddr[3], pAddr[4], pAddr[5],
6692 // ra->addr[0], ra->addr[1], ra->addr[2], ra->addr[3], ra->addr[4], ra->addr[5]));
6693 /*
6694 * Address Select:
6695 * 00b = Destination address
6696 * 01b = Source address
6697 * 10b = Reserved
6698 * 11b = Reserved
6699 * Since ethernet header is (DA, SA, len) we can use address
6700 * select as index.
6701 */
6702 if (memcmp((char*)pvBuf + sizeof(ra->addr)*(ra->ctl & RA_CTL_AS),
6703 ra->addr, sizeof(ra->addr)) == 0)
6704 return true;
6705 }
6706 }
6707
6708 return false;
6709}
6710
6711/**
6712 * Matches the packet addresses against Multicast Table Array.
6713 *
6714 * @remarks This is imperfect match since it matches not exact address but
6715 * a subset of addresses.
6716 *
6717 * @returns true if address matches.
6718 * @param pThis Pointer to the state structure.
6719 * @param pvBuf The ethernet packet.
6720 * @param cb Number of bytes available in the packet.
6721 * @thread EMT
6722 */
6723static bool e1kImperfectMatch(PE1KSTATE pThis, const void *pvBuf)
6724{
6725 /* Get bits 32..47 of destination address */
6726 uint16_t u16Bit = ((uint16_t*)pvBuf)[2];
6727
6728 unsigned offset = GET_BITS(RCTL, MO);
6729 /*
6730 * offset means:
6731 * 00b = bits 36..47
6732 * 01b = bits 35..46
6733 * 10b = bits 34..45
6734 * 11b = bits 32..43
6735 */
6736 if (offset < 3)
6737 u16Bit = u16Bit >> (4 - offset);
6738 return ASMBitTest(pThis->auMTA, u16Bit & 0xFFF);
6739}
6740
6741/**
6742 * Determines if the packet is to be delivered to upper layer.
6743 *
6744 * The following filters supported:
6745 * - Exact Unicast/Multicast
6746 * - Promiscuous Unicast/Multicast
6747 * - Multicast
6748 * - VLAN
6749 *
6750 * @returns true if packet is intended for this node.
6751 * @param pThis Pointer to the state structure.
6752 * @param pvBuf The ethernet packet.
6753 * @param cb Number of bytes available in the packet.
6754 * @param pStatus Bit field to store status bits.
6755 * @thread EMT
6756 */
6757static bool e1kAddressFilter(PE1KSTATE pThis, const void *pvBuf, size_t cb, E1KRXDST *pStatus)
6758{
6759 Assert(cb > 14);
6760 /* Assume that we fail to pass exact filter. */
6761 pStatus->fPIF = false;
6762 pStatus->fVP = false;
6763 /* Discard oversized packets */
6764 if (cb > E1K_MAX_RX_PKT_SIZE)
6765 {
6766 E1kLog(("%s ERROR: Incoming packet is too big, cb=%d > max=%d\n",
6767 pThis->szPrf, cb, E1K_MAX_RX_PKT_SIZE));
6768 E1K_INC_CNT32(ROC);
6769 return false;
6770 }
6771 else if (!(RCTL & RCTL_LPE) && cb > 1522)
6772 {
6773 /* When long packet reception is disabled packets over 1522 are discarded */
6774 E1kLog(("%s Discarding incoming packet (LPE=0), cb=%d\n",
6775 pThis->szPrf, cb));
6776 E1K_INC_CNT32(ROC);
6777 return false;
6778 }
6779
6780 uint16_t *u16Ptr = (uint16_t*)pvBuf;
6781 /* Compare TPID with VLAN Ether Type */
6782 if (RT_BE2H_U16(u16Ptr[6]) == VET)
6783 {
6784 pStatus->fVP = true;
6785 /* Is VLAN filtering enabled? */
6786 if (RCTL & RCTL_VFE)
6787 {
6788 /* It is 802.1q packet indeed, let's filter by VID */
6789 if (RCTL & RCTL_CFIEN)
6790 {
6791 E1kLog3(("%s VLAN filter: VLAN=%d CFI=%d RCTL_CFI=%d\n", pThis->szPrf,
6792 E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7])),
6793 E1K_SPEC_CFI(RT_BE2H_U16(u16Ptr[7])),
6794 !!(RCTL & RCTL_CFI)));
6795 if (E1K_SPEC_CFI(RT_BE2H_U16(u16Ptr[7])) != !!(RCTL & RCTL_CFI))
6796 {
6797 E1kLog2(("%s Packet filter: CFIs do not match in packet and RCTL (%d!=%d)\n",
6798 pThis->szPrf, E1K_SPEC_CFI(RT_BE2H_U16(u16Ptr[7])), !!(RCTL & RCTL_CFI)));
6799 return false;
6800 }
6801 }
6802 else
6803 E1kLog3(("%s VLAN filter: VLAN=%d\n", pThis->szPrf,
6804 E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7]))));
6805 if (!ASMBitTest(pThis->auVFTA, E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7]))))
6806 {
6807 E1kLog2(("%s Packet filter: no VLAN match (id=%d)\n",
6808 pThis->szPrf, E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7]))));
6809 return false;
6810 }
6811 }
6812 }
6813 /* Broadcast filtering */
6814 if (e1kIsBroadcast(pvBuf) && (RCTL & RCTL_BAM))
6815 return true;
6816 E1kLog2(("%s Packet filter: not a broadcast\n", pThis->szPrf));
6817 if (e1kIsMulticast(pvBuf))
6818 {
6819 /* Is multicast promiscuous enabled? */
6820 if (RCTL & RCTL_MPE)
6821 return true;
6822 E1kLog2(("%s Packet filter: no promiscuous multicast\n", pThis->szPrf));
6823 /* Try perfect matches first */
6824 if (e1kPerfectMatch(pThis, pvBuf))
6825 {
6826 pStatus->fPIF = true;
6827 return true;
6828 }
6829 E1kLog2(("%s Packet filter: no perfect match\n", pThis->szPrf));
6830 if (e1kImperfectMatch(pThis, pvBuf))
6831 return true;
6832 E1kLog2(("%s Packet filter: no imperfect match\n", pThis->szPrf));
6833 }
6834 else {
6835 /* Is unicast promiscuous enabled? */
6836 if (RCTL & RCTL_UPE)
6837 return true;
6838 E1kLog2(("%s Packet filter: no promiscuous unicast\n", pThis->szPrf));
6839 if (e1kPerfectMatch(pThis, pvBuf))
6840 {
6841 pStatus->fPIF = true;
6842 return true;
6843 }
6844 E1kLog2(("%s Packet filter: no perfect match\n", pThis->szPrf));
6845 }
6846 E1kLog2(("%s Packet filter: packet discarded\n", pThis->szPrf));
6847 return false;
6848}
6849
6850/**
6851 * @interface_method_impl{PDMINETWORKDOWN,pfnReceive}
6852 */
6853static DECLCALLBACK(int) e1kR3NetworkDown_Receive(PPDMINETWORKDOWN pInterface, const void *pvBuf, size_t cb)
6854{
6855 PE1KSTATECC pThisCC = RT_FROM_MEMBER(pInterface, E1KSTATECC, INetworkDown);
6856 PE1KSTATE pThis = pThisCC->pShared;
6857 PPDMDEVINS pDevIns = pThisCC->pDevInsR3;
6858 int rc = VINF_SUCCESS;
6859
6860 /*
6861 * Drop packets if the VM is not running yet/anymore.
6862 */
6863 VMSTATE enmVMState = PDMDevHlpVMState(pDevIns);
6864 if ( enmVMState != VMSTATE_RUNNING
6865 && enmVMState != VMSTATE_RUNNING_LS)
6866 {
6867 E1kLog(("%s Dropping incoming packet as VM is not running.\n", pThis->szPrf));
6868 return VINF_SUCCESS;
6869 }
6870
6871 /* Discard incoming packets in locked state */
6872 if (!(RCTL & RCTL_EN) || pThis->fLocked || !(STATUS & STATUS_LU))
6873 {
6874 E1kLog(("%s Dropping incoming packet as receive operation is disabled.\n", pThis->szPrf));
6875 return VINF_SUCCESS;
6876 }
6877
6878 STAM_PROFILE_ADV_START(&pThis->StatReceive, a);
6879
6880 //e1kR3CsEnterAsserted(pThis);
6881
6882 e1kPacketDump(pDevIns, pThis, (const uint8_t*)pvBuf, cb, "<-- Incoming");
6883
6884 /* Update stats */
6885 e1kR3CsEnterAsserted(pThis);
6886 E1K_INC_CNT32(TPR);
6887 E1K_ADD_CNT64(TORL, TORH, cb < 64? 64 : cb);
6888 e1kCsLeave(pThis);
6889
6890 STAM_PROFILE_ADV_START(&pThis->StatReceiveFilter, a);
6891 E1KRXDST status;
6892 RT_ZERO(status);
6893 bool fPassed = e1kAddressFilter(pThis, pvBuf, cb, &status);
6894 STAM_PROFILE_ADV_STOP(&pThis->StatReceiveFilter, a);
6895 if (fPassed)
6896 {
6897 rc = e1kHandleRxPacket(pDevIns, pThis, pvBuf, cb, status);
6898 }
6899 //e1kCsLeave(pThis);
6900 STAM_PROFILE_ADV_STOP(&pThis->StatReceive, a);
6901
6902 return rc;
6903}
6904
6905
6906/* -=-=-=-=- PDMILEDPORTS -=-=-=-=- */
6907
6908/**
6909 * @interface_method_impl{PDMILEDPORTS,pfnQueryStatusLed}
6910 */
6911static DECLCALLBACK(int) e1kR3QueryStatusLed(PPDMILEDPORTS pInterface, unsigned iLUN, PPDMLED *ppLed)
6912{
6913 if (iLUN == 0)
6914 {
6915 PE1KSTATECC pThisCC = RT_FROM_MEMBER(pInterface, E1KSTATECC, ILeds);
6916 *ppLed = &pThisCC->pShared->led;
6917 return VINF_SUCCESS;
6918 }
6919 return VERR_PDM_LUN_NOT_FOUND;
6920}
6921
6922
6923/* -=-=-=-=- PDMINETWORKCONFIG -=-=-=-=- */
6924
6925/**
6926 * @interface_method_impl{PDMINETWORKCONFIG,pfnGetMac}
6927 */
6928static DECLCALLBACK(int) e1kR3GetMac(PPDMINETWORKCONFIG pInterface, PRTMAC pMac)
6929{
6930 PE1KSTATECC pThisCC = RT_FROM_MEMBER(pInterface, E1KSTATECC, INetworkConfig);
6931 pThisCC->eeprom.getMac(pMac);
6932 return VINF_SUCCESS;
6933}
6934
6935/**
6936 * @interface_method_impl{PDMINETWORKCONFIG,pfnGetLinkState}
6937 */
6938static DECLCALLBACK(PDMNETWORKLINKSTATE) e1kR3GetLinkState(PPDMINETWORKCONFIG pInterface)
6939{
6940 PE1KSTATECC pThisCC = RT_FROM_MEMBER(pInterface, E1KSTATECC, INetworkConfig);
6941 PE1KSTATE pThis = pThisCC->pShared;
6942 if (STATUS & STATUS_LU)
6943 return PDMNETWORKLINKSTATE_UP;
6944 return PDMNETWORKLINKSTATE_DOWN;
6945}
6946
6947/**
6948 * @interface_method_impl{PDMINETWORKCONFIG,pfnSetLinkState}
6949 */
6950static DECLCALLBACK(int) e1kR3SetLinkState(PPDMINETWORKCONFIG pInterface, PDMNETWORKLINKSTATE enmState)
6951{
6952 PE1KSTATECC pThisCC = RT_FROM_MEMBER(pInterface, E1KSTATECC, INetworkConfig);
6953 PE1KSTATE pThis = pThisCC->pShared;
6954 PPDMDEVINS pDevIns = pThisCC->pDevInsR3;
6955
6956 E1kLog(("%s e1kR3SetLinkState: enmState=%d\n", pThis->szPrf, enmState));
6957 switch (enmState)
6958 {
6959 case PDMNETWORKLINKSTATE_UP:
6960 pThis->fCableConnected = true;
6961 /* If link was down, bring it up after a while. */
6962 if (!(STATUS & STATUS_LU))
6963 e1kBringLinkUpDelayed(pDevIns, pThis);
6964 break;
6965 case PDMNETWORKLINKSTATE_DOWN:
6966 pThis->fCableConnected = false;
6967 /* Always set the phy link state to down, regardless of the STATUS_LU bit.
6968 * We might have to set the link state before the driver initializes us. */
6969 Phy::setLinkStatus(&pThis->phy, false);
6970 /* If link was up, bring it down. */
6971 if (STATUS & STATUS_LU)
6972 e1kR3LinkDown(pDevIns, pThis, pThisCC);
6973 break;
6974 case PDMNETWORKLINKSTATE_DOWN_RESUME:
6975 /*
6976 * There is not much sense in bringing down the link if it has not come up yet.
6977 * If it is up though, we bring it down temporarely, then bring it up again.
6978 */
6979 if (STATUS & STATUS_LU)
6980 e1kR3LinkDownTemp(pDevIns, pThis, pThisCC);
6981 break;
6982 default:
6983 ;
6984 }
6985 return VINF_SUCCESS;
6986}
6987
6988
6989/* -=-=-=-=- PDMIBASE -=-=-=-=- */
6990
6991/**
6992 * @interface_method_impl{PDMIBASE,pfnQueryInterface}
6993 */
6994static DECLCALLBACK(void *) e1kR3QueryInterface(struct PDMIBASE *pInterface, const char *pszIID)
6995{
6996 PE1KSTATECC pThisCC = RT_FROM_MEMBER(pInterface, E1KSTATECC, IBase);
6997 Assert(&pThisCC->IBase == pInterface);
6998
6999 PDMIBASE_RETURN_INTERFACE(pszIID, PDMIBASE, &pThisCC->IBase);
7000 PDMIBASE_RETURN_INTERFACE(pszIID, PDMINETWORKDOWN, &pThisCC->INetworkDown);
7001 PDMIBASE_RETURN_INTERFACE(pszIID, PDMINETWORKCONFIG, &pThisCC->INetworkConfig);
7002 PDMIBASE_RETURN_INTERFACE(pszIID, PDMILEDPORTS, &pThisCC->ILeds);
7003 return NULL;
7004}
7005
7006
7007/* -=-=-=-=- Saved State -=-=-=-=- */
7008
7009/**
7010 * Saves the configuration.
7011 *
7012 * @param pThis The E1K state.
7013 * @param pSSM The handle to the saved state.
7014 */
7015static void e1kR3SaveConfig(PCPDMDEVHLPR3 pHlp, PE1KSTATE pThis, PSSMHANDLE pSSM)
7016{
7017 pHlp->pfnSSMPutMem(pSSM, &pThis->macConfigured, sizeof(pThis->macConfigured));
7018 pHlp->pfnSSMPutU32(pSSM, pThis->eChip);
7019}
7020
7021/**
7022 * @callback_method_impl{FNSSMDEVLIVEEXEC,Save basic configuration.}
7023 */
7024static DECLCALLBACK(int) e1kR3LiveExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM, uint32_t uPass)
7025{
7026 RT_NOREF(uPass);
7027 e1kR3SaveConfig(pDevIns->pHlpR3, PDMDEVINS_2_DATA(pDevIns, PE1KSTATE), pSSM);
7028 return VINF_SSM_DONT_CALL_AGAIN;
7029}
7030
7031/**
7032 * @callback_method_impl{FNSSMDEVSAVEPREP,Synchronize.}
7033 */
7034static DECLCALLBACK(int) e1kR3SavePrep(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
7035{
7036 RT_NOREF(pSSM);
7037 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
7038
7039 e1kCsEnterReturn(pThis, VERR_SEM_BUSY);
7040 e1kCsLeave(pThis);
7041 return VINF_SUCCESS;
7042#if 0
7043 /* 1) Prevent all threads from modifying the state and memory */
7044 //pThis->fLocked = true;
7045 /* 2) Cancel all timers */
7046#ifdef E1K_TX_DELAY
7047 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTXDTimer));
7048#endif /* E1K_TX_DELAY */
7049//#ifdef E1K_USE_TX_TIMERS
7050 if (pThis->fTidEnabled)
7051 {
7052 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTIDTimer));
7053#ifndef E1K_NO_TAD
7054 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTADTimer));
7055#endif /* E1K_NO_TAD */
7056 }
7057//#endif /* E1K_USE_TX_TIMERS */
7058#ifdef E1K_USE_RX_TIMERS
7059 e1kCancelTimer(pThis, pThis->CTX_SUFF(pRIDTimer));
7060 e1kCancelTimer(pThis, pThis->CTX_SUFF(pRADTimer));
7061#endif /* E1K_USE_RX_TIMERS */
7062 e1kCancelTimer(pThis, pThis->CTX_SUFF(pIntTimer));
7063 /* 3) Did I forget anything? */
7064 E1kLog(("%s Locked\n", pThis->szPrf));
7065 return VINF_SUCCESS;
7066#endif
7067}
7068
7069/**
7070 * @callback_method_impl{FNSSMDEVSAVEEXEC}
7071 */
7072static DECLCALLBACK(int) e1kR3SaveExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
7073{
7074 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
7075 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
7076 PCPDMDEVHLPR3 pHlp = pDevIns->pHlpR3;
7077
7078 e1kR3SaveConfig(pHlp, pThis, pSSM);
7079 pThisCC->eeprom.save(pHlp, pSSM);
7080 e1kDumpState(pThis);
7081 pHlp->pfnSSMPutMem(pSSM, pThis->auRegs, sizeof(pThis->auRegs));
7082 pHlp->pfnSSMPutBool(pSSM, pThis->fIntRaised);
7083 Phy::saveState(pHlp, pSSM, &pThis->phy);
7084 pHlp->pfnSSMPutU32(pSSM, pThis->uSelectedReg);
7085 pHlp->pfnSSMPutMem(pSSM, pThis->auMTA, sizeof(pThis->auMTA));
7086 pHlp->pfnSSMPutMem(pSSM, &pThis->aRecAddr, sizeof(pThis->aRecAddr));
7087 pHlp->pfnSSMPutMem(pSSM, pThis->auVFTA, sizeof(pThis->auVFTA));
7088 pHlp->pfnSSMPutU64(pSSM, pThis->u64AckedAt);
7089 pHlp->pfnSSMPutU16(pSSM, pThis->u16RxBSize);
7090 //pHlp->pfnSSMPutBool(pSSM, pThis->fDelayInts);
7091 //pHlp->pfnSSMPutBool(pSSM, pThis->fIntMaskUsed);
7092 pHlp->pfnSSMPutU16(pSSM, pThis->u16TxPktLen);
7093/** @todo State wrt to the TSE buffer is incomplete, so little point in
7094 * saving this actually. */
7095 pHlp->pfnSSMPutMem(pSSM, pThis->aTxPacketFallback, pThis->u16TxPktLen);
7096 pHlp->pfnSSMPutBool(pSSM, pThis->fIPcsum);
7097 pHlp->pfnSSMPutBool(pSSM, pThis->fTCPcsum);
7098 pHlp->pfnSSMPutMem(pSSM, &pThis->contextTSE, sizeof(pThis->contextTSE));
7099 pHlp->pfnSSMPutMem(pSSM, &pThis->contextNormal, sizeof(pThis->contextNormal));
7100 pHlp->pfnSSMPutBool(pSSM, pThis->fVTag);
7101 pHlp->pfnSSMPutU16(pSSM, pThis->u16VTagTCI);
7102#ifdef E1K_WITH_TXD_CACHE
7103# if 0
7104 pHlp->pfnSSMPutU8(pSSM, pThis->nTxDFetched);
7105 pHlp->pfnSSMPutMem(pSSM, pThis->aTxDescriptors,
7106 pThis->nTxDFetched * sizeof(pThis->aTxDescriptors[0]));
7107# else
7108 /*
7109 * There is no point in storing TX descriptor cache entries as we can simply
7110 * fetch them again. Moreover, normally the cache is always empty when we
7111 * save the state. Store zero entries for compatibility.
7112 */
7113 pHlp->pfnSSMPutU8(pSSM, 0);
7114# endif
7115#endif /* E1K_WITH_TXD_CACHE */
7116/** @todo GSO requires some more state here. */
7117 E1kLog(("%s State has been saved\n", pThis->szPrf));
7118 return VINF_SUCCESS;
7119}
7120
7121#if 0
7122/**
7123 * @callback_method_impl{FNSSMDEVSAVEDONE}
7124 */
7125static DECLCALLBACK(int) e1kSaveDone(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
7126{
7127 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
7128
7129 /* If VM is being powered off unlocking will result in assertions in PGM */
7130 if (PDMDevHlpGetVM(pDevIns)->enmVMState == VMSTATE_RUNNING)
7131 pThis->fLocked = false;
7132 else
7133 E1kLog(("%s VM is not running -- remain locked\n", pThis->szPrf));
7134 E1kLog(("%s Unlocked\n", pThis->szPrf));
7135 return VINF_SUCCESS;
7136}
7137#endif
7138
7139/**
7140 * @callback_method_impl{FNSSMDEVLOADPREP,Synchronize.}
7141 */
7142static DECLCALLBACK(int) e1kR3LoadPrep(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
7143{
7144 RT_NOREF(pSSM);
7145 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
7146
7147 e1kCsEnterReturn(pThis, VERR_SEM_BUSY);
7148 e1kCsLeave(pThis);
7149 return VINF_SUCCESS;
7150}
7151
7152/**
7153 * @callback_method_impl{FNSSMDEVLOADEXEC}
7154 */
7155static DECLCALLBACK(int) e1kR3LoadExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
7156{
7157 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
7158 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
7159 PCPDMDEVHLPR3 pHlp = pDevIns->pHlpR3;
7160 int rc;
7161
7162 if ( uVersion != E1K_SAVEDSTATE_VERSION
7163#ifdef E1K_WITH_TXD_CACHE
7164 && uVersion != E1K_SAVEDSTATE_VERSION_VBOX_42_VTAG
7165#endif /* E1K_WITH_TXD_CACHE */
7166 && uVersion != E1K_SAVEDSTATE_VERSION_VBOX_41
7167 && uVersion != E1K_SAVEDSTATE_VERSION_VBOX_30)
7168 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
7169
7170 if ( uVersion > E1K_SAVEDSTATE_VERSION_VBOX_30
7171 || uPass != SSM_PASS_FINAL)
7172 {
7173 /* config checks */
7174 RTMAC macConfigured;
7175 rc = pHlp->pfnSSMGetMem(pSSM, &macConfigured, sizeof(macConfigured));
7176 AssertRCReturn(rc, rc);
7177 if ( memcmp(&macConfigured, &pThis->macConfigured, sizeof(macConfigured))
7178 && (uPass == 0 || !PDMDevHlpVMTeleportedAndNotFullyResumedYet(pDevIns)) )
7179 LogRel(("%s: The mac address differs: config=%RTmac saved=%RTmac\n", pThis->szPrf, &pThis->macConfigured, &macConfigured));
7180
7181 E1KCHIP eChip;
7182 rc = pHlp->pfnSSMGetU32(pSSM, &eChip);
7183 AssertRCReturn(rc, rc);
7184 if (eChip != pThis->eChip)
7185 return pHlp->pfnSSMSetCfgError(pSSM, RT_SRC_POS, N_("The chip type differs: config=%u saved=%u"), pThis->eChip, eChip);
7186 }
7187
7188 if (uPass == SSM_PASS_FINAL)
7189 {
7190 if (uVersion > E1K_SAVEDSTATE_VERSION_VBOX_30)
7191 {
7192 rc = pThisCC->eeprom.load(pHlp, pSSM);
7193 AssertRCReturn(rc, rc);
7194 }
7195 /* the state */
7196 pHlp->pfnSSMGetMem(pSSM, &pThis->auRegs, sizeof(pThis->auRegs));
7197 pHlp->pfnSSMGetBool(pSSM, &pThis->fIntRaised);
7198 /** @todo PHY could be made a separate device with its own versioning */
7199 Phy::loadState(pHlp, pSSM, &pThis->phy);
7200 pHlp->pfnSSMGetU32(pSSM, &pThis->uSelectedReg);
7201 pHlp->pfnSSMGetMem(pSSM, &pThis->auMTA, sizeof(pThis->auMTA));
7202 pHlp->pfnSSMGetMem(pSSM, &pThis->aRecAddr, sizeof(pThis->aRecAddr));
7203 pHlp->pfnSSMGetMem(pSSM, &pThis->auVFTA, sizeof(pThis->auVFTA));
7204 pHlp->pfnSSMGetU64(pSSM, &pThis->u64AckedAt);
7205 pHlp->pfnSSMGetU16(pSSM, &pThis->u16RxBSize);
7206 //pHlp->pfnSSMGetBool(pSSM, pThis->fDelayInts);
7207 //pHlp->pfnSSMGetBool(pSSM, pThis->fIntMaskUsed);
7208 rc = pHlp->pfnSSMGetU16(pSSM, &pThis->u16TxPktLen);
7209 AssertRCReturn(rc, rc);
7210 if (pThis->u16TxPktLen > sizeof(pThis->aTxPacketFallback))
7211 pThis->u16TxPktLen = sizeof(pThis->aTxPacketFallback);
7212 pHlp->pfnSSMGetMem(pSSM, &pThis->aTxPacketFallback[0], pThis->u16TxPktLen);
7213 pHlp->pfnSSMGetBool(pSSM, &pThis->fIPcsum);
7214 pHlp->pfnSSMGetBool(pSSM, &pThis->fTCPcsum);
7215 pHlp->pfnSSMGetMem(pSSM, &pThis->contextTSE, sizeof(pThis->contextTSE));
7216 rc = pHlp->pfnSSMGetMem(pSSM, &pThis->contextNormal, sizeof(pThis->contextNormal));
7217 AssertRCReturn(rc, rc);
7218 if (uVersion > E1K_SAVEDSTATE_VERSION_VBOX_41)
7219 {
7220 pHlp->pfnSSMGetBool(pSSM, &pThis->fVTag);
7221 rc = pHlp->pfnSSMGetU16(pSSM, &pThis->u16VTagTCI);
7222 AssertRCReturn(rc, rc);
7223 }
7224 else
7225 {
7226 pThis->fVTag = false;
7227 pThis->u16VTagTCI = 0;
7228 }
7229#ifdef E1K_WITH_TXD_CACHE
7230 if (uVersion > E1K_SAVEDSTATE_VERSION_VBOX_42_VTAG)
7231 {
7232 rc = pHlp->pfnSSMGetU8(pSSM, &pThis->nTxDFetched);
7233 AssertRCReturn(rc, rc);
7234 if (pThis->nTxDFetched)
7235 pHlp->pfnSSMGetMem(pSSM, pThis->aTxDescriptors,
7236 pThis->nTxDFetched * sizeof(pThis->aTxDescriptors[0]));
7237 }
7238 else
7239 pThis->nTxDFetched = 0;
7240 /**
7241 * @todo Perhaps we should not store TXD cache as the entries can be
7242 * simply fetched again from guest's memory. Or can't they?
7243 */
7244#endif /* E1K_WITH_TXD_CACHE */
7245#ifdef E1K_WITH_RXD_CACHE
7246 /*
7247 * There is no point in storing the RX descriptor cache in the saved
7248 * state, we just need to make sure it is empty.
7249 */
7250 pThis->iRxDCurrent = pThis->nRxDFetched = 0;
7251#endif /* E1K_WITH_RXD_CACHE */
7252 rc = pHlp->pfnSSMHandleGetStatus(pSSM);
7253 AssertRCReturn(rc, rc);
7254
7255 /* derived state */
7256 e1kSetupGsoCtx(&pThis->GsoCtx, &pThis->contextTSE);
7257
7258 E1kLog(("%s State has been restored\n", pThis->szPrf));
7259 e1kDumpState(pThis);
7260 }
7261 return VINF_SUCCESS;
7262}
7263
7264/**
7265 * @callback_method_impl{FNSSMDEVLOADDONE, Link status adjustments after loading.}
7266 */
7267static DECLCALLBACK(int) e1kR3LoadDone(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
7268{
7269 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
7270 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
7271 RT_NOREF(pSSM);
7272
7273 /* Update promiscuous mode */
7274 if (pThisCC->pDrvR3)
7275 pThisCC->pDrvR3->pfnSetPromiscuousMode(pThisCC->pDrvR3, !!(RCTL & (RCTL_UPE | RCTL_MPE)));
7276
7277 /*
7278 * Force the link down here, since PDMNETWORKLINKSTATE_DOWN_RESUME is never
7279 * passed to us. We go through all this stuff if the link was up and we
7280 * wasn't teleported.
7281 */
7282 if ( (STATUS & STATUS_LU)
7283 && !PDMDevHlpVMTeleportedAndNotFullyResumedYet(pDevIns)
7284 && pThis->cMsLinkUpDelay)
7285 {
7286 e1kR3LinkDownTemp(pDevIns, pThis, pThisCC);
7287 }
7288 return VINF_SUCCESS;
7289}
7290
7291
7292
7293/* -=-=-=-=- Debug Info + Log Types -=-=-=-=- */
7294
7295/**
7296 * @callback_method_impl{FNRTSTRFORMATTYPE}
7297 */
7298static DECLCALLBACK(size_t) e1kR3FmtRxDesc(PFNRTSTROUTPUT pfnOutput,
7299 void *pvArgOutput,
7300 const char *pszType,
7301 void const *pvValue,
7302 int cchWidth,
7303 int cchPrecision,
7304 unsigned fFlags,
7305 void *pvUser)
7306{
7307 RT_NOREF(cchWidth, cchPrecision, fFlags, pvUser);
7308 AssertReturn(strcmp(pszType, "e1krxd") == 0, 0);
7309 E1KRXDESC* pDesc = (E1KRXDESC*)pvValue;
7310 if (!pDesc)
7311 return RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "NULL_RXD");
7312
7313 size_t cbPrintf = 0;
7314 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Address=%16LX Length=%04X Csum=%04X\n",
7315 pDesc->u64BufAddr, pDesc->u16Length, pDesc->u16Checksum);
7316 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, " STA: %s %s %s %s %s %s %s ERR: %s %s %s %s SPECIAL: %s VLAN=%03x PRI=%x",
7317 pDesc->status.fPIF ? "PIF" : "pif",
7318 pDesc->status.fIPCS ? "IPCS" : "ipcs",
7319 pDesc->status.fTCPCS ? "TCPCS" : "tcpcs",
7320 pDesc->status.fVP ? "VP" : "vp",
7321 pDesc->status.fIXSM ? "IXSM" : "ixsm",
7322 pDesc->status.fEOP ? "EOP" : "eop",
7323 pDesc->status.fDD ? "DD" : "dd",
7324 pDesc->status.fRXE ? "RXE" : "rxe",
7325 pDesc->status.fIPE ? "IPE" : "ipe",
7326 pDesc->status.fTCPE ? "TCPE" : "tcpe",
7327 pDesc->status.fCE ? "CE" : "ce",
7328 E1K_SPEC_CFI(pDesc->status.u16Special) ? "CFI" :"cfi",
7329 E1K_SPEC_VLAN(pDesc->status.u16Special),
7330 E1K_SPEC_PRI(pDesc->status.u16Special));
7331 return cbPrintf;
7332}
7333
7334/**
7335 * @callback_method_impl{FNRTSTRFORMATTYPE}
7336 */
7337static DECLCALLBACK(size_t) e1kR3FmtTxDesc(PFNRTSTROUTPUT pfnOutput,
7338 void *pvArgOutput,
7339 const char *pszType,
7340 void const *pvValue,
7341 int cchWidth,
7342 int cchPrecision,
7343 unsigned fFlags,
7344 void *pvUser)
7345{
7346 RT_NOREF(cchWidth, cchPrecision, fFlags, pvUser);
7347 AssertReturn(strcmp(pszType, "e1ktxd") == 0, 0);
7348 E1KTXDESC *pDesc = (E1KTXDESC*)pvValue;
7349 if (!pDesc)
7350 return RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "NULL_TXD");
7351
7352 size_t cbPrintf = 0;
7353 switch (e1kGetDescType(pDesc))
7354 {
7355 case E1K_DTYP_CONTEXT:
7356 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Type=Context\n"
7357 " IPCSS=%02X IPCSO=%02X IPCSE=%04X TUCSS=%02X TUCSO=%02X TUCSE=%04X\n"
7358 " TUCMD:%s%s%s %s %s PAYLEN=%04x HDRLEN=%04x MSS=%04x STA: %s",
7359 pDesc->context.ip.u8CSS, pDesc->context.ip.u8CSO, pDesc->context.ip.u16CSE,
7360 pDesc->context.tu.u8CSS, pDesc->context.tu.u8CSO, pDesc->context.tu.u16CSE,
7361 pDesc->context.dw2.fIDE ? " IDE":"",
7362 pDesc->context.dw2.fRS ? " RS" :"",
7363 pDesc->context.dw2.fTSE ? " TSE":"",
7364 pDesc->context.dw2.fIP ? "IPv4":"IPv6",
7365 pDesc->context.dw2.fTCP ? "TCP":"UDP",
7366 pDesc->context.dw2.u20PAYLEN,
7367 pDesc->context.dw3.u8HDRLEN,
7368 pDesc->context.dw3.u16MSS,
7369 pDesc->context.dw3.fDD?"DD":"");
7370 break;
7371 case E1K_DTYP_DATA:
7372 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Type=Data Address=%16LX DTALEN=%05X\n"
7373 " DCMD:%s%s%s%s%s%s%s STA:%s%s%s POPTS:%s%s SPECIAL:%s VLAN=%03x PRI=%x",
7374 pDesc->data.u64BufAddr,
7375 pDesc->data.cmd.u20DTALEN,
7376 pDesc->data.cmd.fIDE ? " IDE" :"",
7377 pDesc->data.cmd.fVLE ? " VLE" :"",
7378 pDesc->data.cmd.fRPS ? " RPS" :"",
7379 pDesc->data.cmd.fRS ? " RS" :"",
7380 pDesc->data.cmd.fTSE ? " TSE" :"",
7381 pDesc->data.cmd.fIFCS? " IFCS":"",
7382 pDesc->data.cmd.fEOP ? " EOP" :"",
7383 pDesc->data.dw3.fDD ? " DD" :"",
7384 pDesc->data.dw3.fEC ? " EC" :"",
7385 pDesc->data.dw3.fLC ? " LC" :"",
7386 pDesc->data.dw3.fTXSM? " TXSM":"",
7387 pDesc->data.dw3.fIXSM? " IXSM":"",
7388 E1K_SPEC_CFI(pDesc->data.dw3.u16Special) ? "CFI" :"cfi",
7389 E1K_SPEC_VLAN(pDesc->data.dw3.u16Special),
7390 E1K_SPEC_PRI(pDesc->data.dw3.u16Special));
7391 break;
7392 case E1K_DTYP_LEGACY:
7393 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Type=Legacy Address=%16LX DTALEN=%05X\n"
7394 " CMD:%s%s%s%s%s%s%s STA:%s%s%s CSO=%02x CSS=%02x SPECIAL:%s VLAN=%03x PRI=%x",
7395 pDesc->data.u64BufAddr,
7396 pDesc->legacy.cmd.u16Length,
7397 pDesc->legacy.cmd.fIDE ? " IDE" :"",
7398 pDesc->legacy.cmd.fVLE ? " VLE" :"",
7399 pDesc->legacy.cmd.fRPS ? " RPS" :"",
7400 pDesc->legacy.cmd.fRS ? " RS" :"",
7401 pDesc->legacy.cmd.fIC ? " IC" :"",
7402 pDesc->legacy.cmd.fIFCS? " IFCS":"",
7403 pDesc->legacy.cmd.fEOP ? " EOP" :"",
7404 pDesc->legacy.dw3.fDD ? " DD" :"",
7405 pDesc->legacy.dw3.fEC ? " EC" :"",
7406 pDesc->legacy.dw3.fLC ? " LC" :"",
7407 pDesc->legacy.cmd.u8CSO,
7408 pDesc->legacy.dw3.u8CSS,
7409 E1K_SPEC_CFI(pDesc->legacy.dw3.u16Special) ? "CFI" :"cfi",
7410 E1K_SPEC_VLAN(pDesc->legacy.dw3.u16Special),
7411 E1K_SPEC_PRI(pDesc->legacy.dw3.u16Special));
7412 break;
7413 default:
7414 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Invalid Transmit Descriptor");
7415 break;
7416 }
7417
7418 return cbPrintf;
7419}
7420
7421/** Initializes debug helpers (logging format types). */
7422static int e1kR3InitDebugHelpers(void)
7423{
7424 int rc = VINF_SUCCESS;
7425 static bool s_fHelpersRegistered = false;
7426 if (!s_fHelpersRegistered)
7427 {
7428 s_fHelpersRegistered = true;
7429 rc = RTStrFormatTypeRegister("e1krxd", e1kR3FmtRxDesc, NULL);
7430 AssertRCReturn(rc, rc);
7431 rc = RTStrFormatTypeRegister("e1ktxd", e1kR3FmtTxDesc, NULL);
7432 AssertRCReturn(rc, rc);
7433 }
7434 return rc;
7435}
7436
7437/**
7438 * Status info callback.
7439 *
7440 * @param pDevIns The device instance.
7441 * @param pHlp The output helpers.
7442 * @param pszArgs The arguments.
7443 */
7444static DECLCALLBACK(void) e1kR3Info(PPDMDEVINS pDevIns, PCDBGFINFOHLP pHlp, const char *pszArgs)
7445{
7446 RT_NOREF(pszArgs);
7447 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
7448 unsigned i;
7449 // bool fRcvRing = false;
7450 // bool fXmtRing = false;
7451
7452 /*
7453 * Parse args.
7454 if (pszArgs)
7455 {
7456 fRcvRing = strstr(pszArgs, "verbose") || strstr(pszArgs, "rcv");
7457 fXmtRing = strstr(pszArgs, "verbose") || strstr(pszArgs, "xmt");
7458 }
7459 */
7460
7461 /*
7462 * Show info.
7463 */
7464 pHlp->pfnPrintf(pHlp, "E1000 #%d: port=%04x mmio=%RGp mac-cfg=%RTmac %s%s%s\n",
7465 pDevIns->iInstance,
7466 PDMDevHlpIoPortGetMappingAddress(pDevIns, pThis->hIoPorts),
7467 PDMDevHlpMmioGetMappingAddress(pDevIns, pThis->hMmioRegion),
7468 &pThis->macConfigured, g_aChips[pThis->eChip].pcszName,
7469 pDevIns->fRCEnabled ? " RC" : "", pDevIns->fR0Enabled ? " R0" : "");
7470
7471 e1kR3CsEnterAsserted(pThis); /* Not sure why but PCNet does it */
7472
7473 for (i = 0; i < E1K_NUM_OF_32BIT_REGS; ++i)
7474 pHlp->pfnPrintf(pHlp, "%8.8s = %08x\n", g_aE1kRegMap[i].abbrev, pThis->auRegs[i]);
7475
7476 for (i = 0; i < RT_ELEMENTS(pThis->aRecAddr.array); i++)
7477 {
7478 E1KRAELEM* ra = pThis->aRecAddr.array + i;
7479 if (ra->ctl & RA_CTL_AV)
7480 {
7481 const char *pcszTmp;
7482 switch (ra->ctl & RA_CTL_AS)
7483 {
7484 case 0: pcszTmp = "DST"; break;
7485 case 1: pcszTmp = "SRC"; break;
7486 default: pcszTmp = "reserved";
7487 }
7488 pHlp->pfnPrintf(pHlp, "RA%02d: %s %RTmac\n", i, pcszTmp, ra->addr);
7489 }
7490 }
7491 unsigned cDescs = RDLEN / sizeof(E1KRXDESC);
7492 uint32_t rdh = RDH;
7493 pHlp->pfnPrintf(pHlp, "\n-- Receive Descriptors (%d total) --\n", cDescs);
7494 for (i = 0; i < cDescs; ++i)
7495 {
7496 E1KRXDESC desc;
7497 PDMDevHlpPCIPhysRead(pDevIns, e1kDescAddr(RDBAH, RDBAL, i),
7498 &desc, sizeof(desc));
7499 if (i == rdh)
7500 pHlp->pfnPrintf(pHlp, ">>> ");
7501 pHlp->pfnPrintf(pHlp, "%RGp: %R[e1krxd]\n", e1kDescAddr(RDBAH, RDBAL, i), &desc);
7502 }
7503#ifdef E1K_WITH_RXD_CACHE
7504 pHlp->pfnPrintf(pHlp, "\n-- Receive Descriptors in Cache (at %d (RDH %d)/ fetched %d / max %d) --\n",
7505 pThis->iRxDCurrent, RDH, pThis->nRxDFetched, E1K_RXD_CACHE_SIZE);
7506 if (rdh > pThis->iRxDCurrent)
7507 rdh -= pThis->iRxDCurrent;
7508 else
7509 rdh = cDescs + rdh - pThis->iRxDCurrent;
7510 for (i = 0; i < pThis->nRxDFetched; ++i)
7511 {
7512 if (i == pThis->iRxDCurrent)
7513 pHlp->pfnPrintf(pHlp, ">>> ");
7514 if (cDescs)
7515 pHlp->pfnPrintf(pHlp, "%RGp: %R[e1krxd]\n",
7516 e1kDescAddr(RDBAH, RDBAL, rdh++ % cDescs),
7517 &pThis->aRxDescriptors[i]);
7518 else
7519 pHlp->pfnPrintf(pHlp, "<lost>: %R[e1krxd]\n",
7520 &pThis->aRxDescriptors[i]);
7521 }
7522#endif /* E1K_WITH_RXD_CACHE */
7523
7524 cDescs = TDLEN / sizeof(E1KTXDESC);
7525 uint32_t tdh = TDH;
7526 pHlp->pfnPrintf(pHlp, "\n-- Transmit Descriptors (%d total) --\n", cDescs);
7527 for (i = 0; i < cDescs; ++i)
7528 {
7529 E1KTXDESC desc;
7530 PDMDevHlpPCIPhysRead(pDevIns, e1kDescAddr(TDBAH, TDBAL, i),
7531 &desc, sizeof(desc));
7532 if (i == tdh)
7533 pHlp->pfnPrintf(pHlp, ">>> ");
7534 pHlp->pfnPrintf(pHlp, "%RGp: %R[e1ktxd]\n", e1kDescAddr(TDBAH, TDBAL, i), &desc);
7535 }
7536#ifdef E1K_WITH_TXD_CACHE
7537 pHlp->pfnPrintf(pHlp, "\n-- Transmit Descriptors in Cache (at %d (TDH %d)/ fetched %d / max %d) --\n",
7538 pThis->iTxDCurrent, TDH, pThis->nTxDFetched, E1K_TXD_CACHE_SIZE);
7539 if (tdh > pThis->iTxDCurrent)
7540 tdh -= pThis->iTxDCurrent;
7541 else
7542 tdh = cDescs + tdh - pThis->iTxDCurrent;
7543 for (i = 0; i < pThis->nTxDFetched; ++i)
7544 {
7545 if (i == pThis->iTxDCurrent)
7546 pHlp->pfnPrintf(pHlp, ">>> ");
7547 if (cDescs)
7548 pHlp->pfnPrintf(pHlp, "%RGp: %R[e1ktxd]\n",
7549 e1kDescAddr(TDBAH, TDBAL, tdh++ % cDescs),
7550 &pThis->aTxDescriptors[i]);
7551 else
7552 pHlp->pfnPrintf(pHlp, "<lost>: %R[e1ktxd]\n",
7553 &pThis->aTxDescriptors[i]);
7554 }
7555#endif /* E1K_WITH_TXD_CACHE */
7556
7557
7558#ifdef E1K_INT_STATS
7559 pHlp->pfnPrintf(pHlp, "Interrupt attempts: %d\n", pThis->uStatIntTry);
7560 pHlp->pfnPrintf(pHlp, "Interrupts raised : %d\n", pThis->uStatInt);
7561 pHlp->pfnPrintf(pHlp, "Interrupts lowered: %d\n", pThis->uStatIntLower);
7562 pHlp->pfnPrintf(pHlp, "ICR outside ISR : %d\n", pThis->uStatNoIntICR);
7563 pHlp->pfnPrintf(pHlp, "IMS raised ints : %d\n", pThis->uStatIntIMS);
7564 pHlp->pfnPrintf(pHlp, "Interrupts skipped: %d\n", pThis->uStatIntSkip);
7565 pHlp->pfnPrintf(pHlp, "Masked interrupts : %d\n", pThis->uStatIntMasked);
7566 pHlp->pfnPrintf(pHlp, "Early interrupts : %d\n", pThis->uStatIntEarly);
7567 pHlp->pfnPrintf(pHlp, "Late interrupts : %d\n", pThis->uStatIntLate);
7568 pHlp->pfnPrintf(pHlp, "Lost interrupts : %d\n", pThis->iStatIntLost);
7569 pHlp->pfnPrintf(pHlp, "Interrupts by RX : %d\n", pThis->uStatIntRx);
7570 pHlp->pfnPrintf(pHlp, "Interrupts by TX : %d\n", pThis->uStatIntTx);
7571 pHlp->pfnPrintf(pHlp, "Interrupts by ICS : %d\n", pThis->uStatIntICS);
7572 pHlp->pfnPrintf(pHlp, "Interrupts by RDTR: %d\n", pThis->uStatIntRDTR);
7573 pHlp->pfnPrintf(pHlp, "Interrupts by RDMT: %d\n", pThis->uStatIntRXDMT0);
7574 pHlp->pfnPrintf(pHlp, "Interrupts by TXQE: %d\n", pThis->uStatIntTXQE);
7575 pHlp->pfnPrintf(pHlp, "TX int delay asked: %d\n", pThis->uStatTxIDE);
7576 pHlp->pfnPrintf(pHlp, "TX delayed: %d\n", pThis->uStatTxDelayed);
7577 pHlp->pfnPrintf(pHlp, "TX delayed expired: %d\n", pThis->uStatTxDelayExp);
7578 pHlp->pfnPrintf(pHlp, "TX no report asked: %d\n", pThis->uStatTxNoRS);
7579 pHlp->pfnPrintf(pHlp, "TX abs timer expd : %d\n", pThis->uStatTAD);
7580 pHlp->pfnPrintf(pHlp, "TX int timer expd : %d\n", pThis->uStatTID);
7581 pHlp->pfnPrintf(pHlp, "RX abs timer expd : %d\n", pThis->uStatRAD);
7582 pHlp->pfnPrintf(pHlp, "RX int timer expd : %d\n", pThis->uStatRID);
7583 pHlp->pfnPrintf(pHlp, "TX CTX descriptors: %d\n", pThis->uStatDescCtx);
7584 pHlp->pfnPrintf(pHlp, "TX DAT descriptors: %d\n", pThis->uStatDescDat);
7585 pHlp->pfnPrintf(pHlp, "TX LEG descriptors: %d\n", pThis->uStatDescLeg);
7586 pHlp->pfnPrintf(pHlp, "Received frames : %d\n", pThis->uStatRxFrm);
7587 pHlp->pfnPrintf(pHlp, "Transmitted frames: %d\n", pThis->uStatTxFrm);
7588 pHlp->pfnPrintf(pHlp, "TX frames up to 1514: %d\n", pThis->uStatTx1514);
7589 pHlp->pfnPrintf(pHlp, "TX frames up to 2962: %d\n", pThis->uStatTx2962);
7590 pHlp->pfnPrintf(pHlp, "TX frames up to 4410: %d\n", pThis->uStatTx4410);
7591 pHlp->pfnPrintf(pHlp, "TX frames up to 5858: %d\n", pThis->uStatTx5858);
7592 pHlp->pfnPrintf(pHlp, "TX frames up to 7306: %d\n", pThis->uStatTx7306);
7593 pHlp->pfnPrintf(pHlp, "TX frames up to 8754: %d\n", pThis->uStatTx8754);
7594 pHlp->pfnPrintf(pHlp, "TX frames up to 16384: %d\n", pThis->uStatTx16384);
7595 pHlp->pfnPrintf(pHlp, "TX frames up to 32768: %d\n", pThis->uStatTx32768);
7596 pHlp->pfnPrintf(pHlp, "Larger TX frames : %d\n", pThis->uStatTxLarge);
7597#endif /* E1K_INT_STATS */
7598
7599 e1kCsLeave(pThis);
7600}
7601
7602
7603
7604/* -=-=-=-=- PDMDEVREG -=-=-=-=- */
7605
7606/**
7607 * Detach notification.
7608 *
7609 * One port on the network card has been disconnected from the network.
7610 *
7611 * @param pDevIns The device instance.
7612 * @param iLUN The logical unit which is being detached.
7613 * @param fFlags Flags, combination of the PDMDEVATT_FLAGS_* \#defines.
7614 */
7615static DECLCALLBACK(void) e1kR3Detach(PPDMDEVINS pDevIns, unsigned iLUN, uint32_t fFlags)
7616{
7617 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
7618 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
7619 Log(("%s e1kR3Detach:\n", pThis->szPrf));
7620 RT_NOREF(fFlags);
7621
7622 AssertLogRelReturnVoid(iLUN == 0);
7623
7624 e1kR3CsEnterAsserted(pThis);
7625
7626 /* Mark device as detached. */
7627 pThis->fIsAttached = false;
7628 /*
7629 * Zero some important members.
7630 */
7631 pThisCC->pDrvBase = NULL;
7632 pThisCC->pDrvR3 = NULL;
7633#if 0 /** @todo @bugref{9218} ring-0 driver stuff */
7634 pThisR0->pDrvR0 = NIL_RTR0PTR;
7635 pThisRC->pDrvRC = NIL_RTRCPTR;
7636#endif
7637
7638 PDMDevHlpCritSectLeave(pDevIns, &pThis->cs);
7639}
7640
7641/**
7642 * Attach the Network attachment.
7643 *
7644 * One port on the network card has been connected to a network.
7645 *
7646 * @returns VBox status code.
7647 * @param pDevIns The device instance.
7648 * @param iLUN The logical unit which is being attached.
7649 * @param fFlags Flags, combination of the PDMDEVATT_FLAGS_* \#defines.
7650 *
7651 * @remarks This code path is not used during construction.
7652 */
7653static DECLCALLBACK(int) e1kR3Attach(PPDMDEVINS pDevIns, unsigned iLUN, uint32_t fFlags)
7654{
7655 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
7656 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
7657 LogFlow(("%s e1kR3Attach:\n", pThis->szPrf));
7658 RT_NOREF(fFlags);
7659
7660 AssertLogRelReturn(iLUN == 0, VERR_PDM_NO_SUCH_LUN);
7661
7662 e1kR3CsEnterAsserted(pThis);
7663
7664 /*
7665 * Attach the driver.
7666 */
7667 int rc = PDMDevHlpDriverAttach(pDevIns, 0, &pThisCC->IBase, &pThisCC->pDrvBase, "Network Port");
7668 if (RT_SUCCESS(rc))
7669 {
7670 pThisCC->pDrvR3 = PDMIBASE_QUERY_INTERFACE(pThisCC->pDrvBase, PDMINETWORKUP);
7671 AssertMsgStmt(pThisCC->pDrvR3, ("Failed to obtain the PDMINETWORKUP interface!\n"),
7672 rc = VERR_PDM_MISSING_INTERFACE_BELOW);
7673 if (RT_SUCCESS(rc))
7674 {
7675#if 0 /** @todo @bugref{9218} ring-0 driver stuff */
7676 pThisR0->pDrvR0 = PDMIBASER0_QUERY_INTERFACE(PDMIBASE_QUERY_INTERFACE(pThisCC->pDrvBase, PDMIBASER0), PDMINETWORKUP);
7677 pThisRC->pDrvRC = PDMIBASERC_QUERY_INTERFACE(PDMIBASE_QUERY_INTERFACE(pThisCC->pDrvBase, PDMIBASERC), PDMINETWORKUP);
7678#endif
7679 /* Mark device as attached. */
7680 pThis->fIsAttached = true;
7681 }
7682 }
7683 else if ( rc == VERR_PDM_NO_ATTACHED_DRIVER
7684 || rc == VERR_PDM_CFG_MISSING_DRIVER_NAME)
7685 {
7686 /* This should never happen because this function is not called
7687 * if there is no driver to attach! */
7688 Log(("%s No attached driver!\n", pThis->szPrf));
7689 }
7690
7691 /*
7692 * Temporary set the link down if it was up so that the guest will know
7693 * that we have change the configuration of the network card
7694 */
7695 if ((STATUS & STATUS_LU) && RT_SUCCESS(rc))
7696 e1kR3LinkDownTemp(pDevIns, pThis, pThisCC);
7697
7698 PDMDevHlpCritSectLeave(pDevIns, &pThis->cs);
7699 return rc;
7700}
7701
7702/**
7703 * @copydoc FNPDMDEVPOWEROFF
7704 */
7705static DECLCALLBACK(void) e1kR3PowerOff(PPDMDEVINS pDevIns)
7706{
7707 /* Poke thread waiting for buffer space. */
7708 e1kWakeupReceive(pDevIns, PDMDEVINS_2_DATA(pDevIns, PE1KSTATE));
7709}
7710
7711/**
7712 * @copydoc FNPDMDEVRESET
7713 */
7714static DECLCALLBACK(void) e1kR3Reset(PPDMDEVINS pDevIns)
7715{
7716 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
7717 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
7718#ifdef E1K_TX_DELAY
7719 e1kCancelTimer(pDevIns, pThis, pThis->hTXDTimer);
7720#endif /* E1K_TX_DELAY */
7721 e1kCancelTimer(pDevIns, pThis, pThis->hIntTimer);
7722 e1kCancelTimer(pDevIns, pThis, pThis->hLUTimer);
7723 e1kXmitFreeBuf(pThis, pThisCC);
7724 pThis->u16TxPktLen = 0;
7725 pThis->fIPcsum = false;
7726 pThis->fTCPcsum = false;
7727 pThis->fIntMaskUsed = false;
7728 pThis->fDelayInts = false;
7729 pThis->fLocked = false;
7730 pThis->u64AckedAt = 0;
7731 e1kR3HardReset(pDevIns, pThis, pThisCC);
7732}
7733
7734/**
7735 * @copydoc FNPDMDEVSUSPEND
7736 */
7737static DECLCALLBACK(void) e1kR3Suspend(PPDMDEVINS pDevIns)
7738{
7739 /* Poke thread waiting for buffer space. */
7740 e1kWakeupReceive(pDevIns, PDMDEVINS_2_DATA(pDevIns, PE1KSTATE));
7741}
7742
7743/**
7744 * Device relocation callback.
7745 *
7746 * When this callback is called the device instance data, and if the
7747 * device have a GC component, is being relocated, or/and the selectors
7748 * have been changed. The device must use the chance to perform the
7749 * necessary pointer relocations and data updates.
7750 *
7751 * Before the GC code is executed the first time, this function will be
7752 * called with a 0 delta so GC pointer calculations can be one in one place.
7753 *
7754 * @param pDevIns Pointer to the device instance.
7755 * @param offDelta The relocation delta relative to the old location.
7756 *
7757 * @remark A relocation CANNOT fail.
7758 */
7759static DECLCALLBACK(void) e1kR3Relocate(PPDMDEVINS pDevIns, RTGCINTPTR offDelta)
7760{
7761 PE1KSTATERC pThisRC = PDMINS_2_DATA_RC(pDevIns, PE1KSTATERC);
7762 if (pThisRC)
7763 pThisRC->pDevInsRC = PDMDEVINS_2_RCPTR(pDevIns);
7764 RT_NOREF(offDelta);
7765}
7766
7767/**
7768 * Destruct a device instance.
7769 *
7770 * We need to free non-VM resources only.
7771 *
7772 * @returns VBox status code.
7773 * @param pDevIns The device instance data.
7774 * @thread EMT
7775 */
7776static DECLCALLBACK(int) e1kR3Destruct(PPDMDEVINS pDevIns)
7777{
7778 PDMDEV_CHECK_VERSIONS_RETURN_QUIET(pDevIns);
7779 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
7780
7781 e1kDumpState(pThis);
7782 E1kLog(("%s Destroying instance\n", pThis->szPrf));
7783 if (PDMDevHlpCritSectIsInitialized(pDevIns, &pThis->cs))
7784 {
7785 if (pThis->hEventMoreRxDescAvail != NIL_SUPSEMEVENT)
7786 {
7787 PDMDevHlpSUPSemEventSignal(pDevIns, pThis->hEventMoreRxDescAvail);
7788 RTThreadYield();
7789 PDMDevHlpSUPSemEventClose(pDevIns, pThis->hEventMoreRxDescAvail);
7790 pThis->hEventMoreRxDescAvail = NIL_SUPSEMEVENT;
7791 }
7792#ifdef E1K_WITH_TX_CS
7793 PDMDevHlpCritSectDelete(pDevIns, &pThis->csTx);
7794#endif /* E1K_WITH_TX_CS */
7795 PDMDevHlpCritSectDelete(pDevIns, &pThis->csRx);
7796 PDMDevHlpCritSectDelete(pDevIns, &pThis->cs);
7797 }
7798 return VINF_SUCCESS;
7799}
7800
7801
7802/**
7803 * Set PCI configuration space registers.
7804 *
7805 * @param pci Reference to PCI device structure.
7806 * @thread EMT
7807 */
7808static void e1kR3ConfigurePciDev(PPDMPCIDEV pPciDev, E1KCHIP eChip)
7809{
7810 Assert(eChip < RT_ELEMENTS(g_aChips));
7811 /* Configure PCI Device, assume 32-bit mode ******************************/
7812 PDMPciDevSetVendorId(pPciDev, g_aChips[eChip].uPCIVendorId);
7813 PDMPciDevSetDeviceId(pPciDev, g_aChips[eChip].uPCIDeviceId);
7814 PDMPciDevSetWord( pPciDev, VBOX_PCI_SUBSYSTEM_VENDOR_ID, g_aChips[eChip].uPCISubsystemVendorId);
7815 PDMPciDevSetWord( pPciDev, VBOX_PCI_SUBSYSTEM_ID, g_aChips[eChip].uPCISubsystemId);
7816
7817 PDMPciDevSetWord( pPciDev, VBOX_PCI_COMMAND, 0x0000);
7818 /* DEVSEL Timing (medium device), 66 MHz Capable, New capabilities */
7819 PDMPciDevSetWord( pPciDev, VBOX_PCI_STATUS,
7820 VBOX_PCI_STATUS_DEVSEL_MEDIUM | VBOX_PCI_STATUS_CAP_LIST | VBOX_PCI_STATUS_66MHZ);
7821 /* Stepping A2 */
7822 PDMPciDevSetByte( pPciDev, VBOX_PCI_REVISION_ID, 0x02);
7823 /* Ethernet adapter */
7824 PDMPciDevSetByte( pPciDev, VBOX_PCI_CLASS_PROG, 0x00);
7825 PDMPciDevSetWord( pPciDev, VBOX_PCI_CLASS_DEVICE, 0x0200);
7826 /* normal single function Ethernet controller */
7827 PDMPciDevSetByte( pPciDev, VBOX_PCI_HEADER_TYPE, 0x00);
7828 /* Memory Register Base Address */
7829 PDMPciDevSetDWord(pPciDev, VBOX_PCI_BASE_ADDRESS_0, 0x00000000);
7830 /* Memory Flash Base Address */
7831 PDMPciDevSetDWord(pPciDev, VBOX_PCI_BASE_ADDRESS_1, 0x00000000);
7832 /* IO Register Base Address */
7833 PDMPciDevSetDWord(pPciDev, VBOX_PCI_BASE_ADDRESS_2, 0x00000001);
7834 /* Expansion ROM Base Address */
7835 PDMPciDevSetDWord(pPciDev, VBOX_PCI_ROM_ADDRESS, 0x00000000);
7836 /* Capabilities Pointer */
7837 PDMPciDevSetByte( pPciDev, VBOX_PCI_CAPABILITY_LIST, 0xDC);
7838 /* Interrupt Pin: INTA# */
7839 PDMPciDevSetByte( pPciDev, VBOX_PCI_INTERRUPT_PIN, 0x01);
7840 /* Max_Lat/Min_Gnt: very high priority and time slice */
7841 PDMPciDevSetByte( pPciDev, VBOX_PCI_MIN_GNT, 0xFF);
7842 PDMPciDevSetByte( pPciDev, VBOX_PCI_MAX_LAT, 0x00);
7843
7844 /* PCI Power Management Registers ****************************************/
7845 /* Capability ID: PCI Power Management Registers */
7846 PDMPciDevSetByte( pPciDev, 0xDC, VBOX_PCI_CAP_ID_PM);
7847 /* Next Item Pointer: PCI-X */
7848 PDMPciDevSetByte( pPciDev, 0xDC + 1, 0xE4);
7849 /* Power Management Capabilities: PM disabled, DSI */
7850 PDMPciDevSetWord( pPciDev, 0xDC + 2,
7851 0x0002 | VBOX_PCI_PM_CAP_DSI);
7852 /* Power Management Control / Status Register: PM disabled */
7853 PDMPciDevSetWord( pPciDev, 0xDC + 4, 0x0000);
7854 /* PMCSR_BSE Bridge Support Extensions: Not supported */
7855 PDMPciDevSetByte( pPciDev, 0xDC + 6, 0x00);
7856 /* Data Register: PM disabled, always 0 */
7857 PDMPciDevSetByte( pPciDev, 0xDC + 7, 0x00);
7858
7859 /* PCI-X Configuration Registers *****************************************/
7860 /* Capability ID: PCI-X Configuration Registers */
7861 PDMPciDevSetByte( pPciDev, 0xE4, VBOX_PCI_CAP_ID_PCIX);
7862#ifdef E1K_WITH_MSI
7863 PDMPciDevSetByte( pPciDev, 0xE4 + 1, 0x80);
7864#else
7865 /* Next Item Pointer: None (Message Signalled Interrupts are disabled) */
7866 PDMPciDevSetByte( pPciDev, 0xE4 + 1, 0x00);
7867#endif
7868 /* PCI-X Command: Enable Relaxed Ordering */
7869 PDMPciDevSetWord( pPciDev, 0xE4 + 2, VBOX_PCI_X_CMD_ERO);
7870 /* PCI-X Status: 32-bit, 66MHz*/
7871 /** @todo is this value really correct? fff8 doesn't look like actual PCI address */
7872 PDMPciDevSetDWord(pPciDev, 0xE4 + 4, 0x0040FFF8);
7873}
7874
7875/**
7876 * @interface_method_impl{PDMDEVREG,pfnConstruct}
7877 */
7878static DECLCALLBACK(int) e1kR3Construct(PPDMDEVINS pDevIns, int iInstance, PCFGMNODE pCfg)
7879{
7880 PDMDEV_CHECK_VERSIONS_RETURN(pDevIns);
7881 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
7882 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
7883 int rc;
7884
7885 /*
7886 * Initialize the instance data (state).
7887 * Note! Caller has initialized it to ZERO already.
7888 */
7889 RTStrPrintf(pThis->szPrf, sizeof(pThis->szPrf), "E1000#%d", iInstance);
7890 E1kLog(("%s Constructing new instance sizeof(E1KRXDESC)=%d\n", pThis->szPrf, sizeof(E1KRXDESC)));
7891 pThis->hEventMoreRxDescAvail = NIL_SUPSEMEVENT;
7892 pThis->u16TxPktLen = 0;
7893 pThis->fIPcsum = false;
7894 pThis->fTCPcsum = false;
7895 pThis->fIntMaskUsed = false;
7896 pThis->fDelayInts = false;
7897 pThis->fLocked = false;
7898 pThis->u64AckedAt = 0;
7899 pThis->led.u32Magic = PDMLED_MAGIC;
7900 pThis->u32PktNo = 1;
7901 pThis->fIsAttached = false;
7902
7903 pThisCC->pDevInsR3 = pDevIns;
7904 pThisCC->pShared = pThis;
7905
7906 /* Interfaces */
7907 pThisCC->IBase.pfnQueryInterface = e1kR3QueryInterface;
7908
7909 pThisCC->INetworkDown.pfnWaitReceiveAvail = e1kR3NetworkDown_WaitReceiveAvail;
7910 pThisCC->INetworkDown.pfnReceive = e1kR3NetworkDown_Receive;
7911 pThisCC->INetworkDown.pfnXmitPending = e1kR3NetworkDown_XmitPending;
7912
7913 pThisCC->ILeds.pfnQueryStatusLed = e1kR3QueryStatusLed;
7914
7915 pThisCC->INetworkConfig.pfnGetMac = e1kR3GetMac;
7916 pThisCC->INetworkConfig.pfnGetLinkState = e1kR3GetLinkState;
7917 pThisCC->INetworkConfig.pfnSetLinkState = e1kR3SetLinkState;
7918
7919 /*
7920 * Internal validations.
7921 */
7922 for (uint32_t iReg = 1; iReg < E1K_NUM_OF_BINARY_SEARCHABLE; iReg++)
7923 AssertLogRelMsgReturn( g_aE1kRegMap[iReg].offset > g_aE1kRegMap[iReg - 1].offset
7924 && g_aE1kRegMap[iReg].offset + g_aE1kRegMap[iReg].size
7925 >= g_aE1kRegMap[iReg - 1].offset + g_aE1kRegMap[iReg - 1].size,
7926 ("%s@%#xLB%#x vs %s@%#xLB%#x\n",
7927 g_aE1kRegMap[iReg].abbrev, g_aE1kRegMap[iReg].offset, g_aE1kRegMap[iReg].size,
7928 g_aE1kRegMap[iReg - 1].abbrev, g_aE1kRegMap[iReg - 1].offset, g_aE1kRegMap[iReg - 1].size),
7929 VERR_INTERNAL_ERROR_4);
7930
7931 /*
7932 * Validate configuration.
7933 */
7934 PDMDEV_VALIDATE_CONFIG_RETURN(pDevIns,
7935 "MAC|"
7936 "CableConnected|"
7937 "AdapterType|"
7938 "LineSpeed|"
7939 "ItrEnabled|"
7940 "ItrRxEnabled|"
7941 "EthernetCRC|"
7942 "GSOEnabled|"
7943 "LinkUpDelay|"
7944 "StatNo",
7945 "");
7946
7947 /** @todo LineSpeed unused! */
7948
7949 /*
7950 * Get config params
7951 */
7952 PCPDMDEVHLPR3 pHlp = pDevIns->pHlpR3;
7953 rc = pHlp->pfnCFGMQueryBytes(pCfg, "MAC", pThis->macConfigured.au8, sizeof(pThis->macConfigured.au8));
7954 if (RT_FAILURE(rc))
7955 return PDMDEV_SET_ERROR(pDevIns, rc,
7956 N_("Configuration error: Failed to get MAC address"));
7957 rc = pHlp->pfnCFGMQueryBool(pCfg, "CableConnected", &pThis->fCableConnected);
7958 if (RT_FAILURE(rc))
7959 return PDMDEV_SET_ERROR(pDevIns, rc,
7960 N_("Configuration error: Failed to get the value of 'CableConnected'"));
7961 rc = pHlp->pfnCFGMQueryU32(pCfg, "AdapterType", (uint32_t*)&pThis->eChip);
7962 if (RT_FAILURE(rc))
7963 return PDMDEV_SET_ERROR(pDevIns, rc,
7964 N_("Configuration error: Failed to get the value of 'AdapterType'"));
7965 Assert(pThis->eChip <= E1K_CHIP_82545EM);
7966
7967 rc = pHlp->pfnCFGMQueryBoolDef(pCfg, "EthernetCRC", &pThis->fEthernetCRC, true);
7968 if (RT_FAILURE(rc))
7969 return PDMDEV_SET_ERROR(pDevIns, rc,
7970 N_("Configuration error: Failed to get the value of 'EthernetCRC'"));
7971
7972 rc = pHlp->pfnCFGMQueryBoolDef(pCfg, "GSOEnabled", &pThis->fGSOEnabled, true);
7973 if (RT_FAILURE(rc))
7974 return PDMDEV_SET_ERROR(pDevIns, rc,
7975 N_("Configuration error: Failed to get the value of 'GSOEnabled'"));
7976
7977 rc = pHlp->pfnCFGMQueryBoolDef(pCfg, "ItrEnabled", &pThis->fItrEnabled, false);
7978 if (RT_FAILURE(rc))
7979 return PDMDEV_SET_ERROR(pDevIns, rc,
7980 N_("Configuration error: Failed to get the value of 'ItrEnabled'"));
7981
7982 rc = pHlp->pfnCFGMQueryBoolDef(pCfg, "ItrRxEnabled", &pThis->fItrRxEnabled, true);
7983 if (RT_FAILURE(rc))
7984 return PDMDEV_SET_ERROR(pDevIns, rc,
7985 N_("Configuration error: Failed to get the value of 'ItrRxEnabled'"));
7986
7987 rc = pHlp->pfnCFGMQueryBoolDef(pCfg, "TidEnabled", &pThis->fTidEnabled, false);
7988 if (RT_FAILURE(rc))
7989 return PDMDEV_SET_ERROR(pDevIns, rc,
7990 N_("Configuration error: Failed to get the value of 'TidEnabled'"));
7991
7992 /*
7993 * Increased the link up delay from 3 to 5 seconds to make sure a guest notices the link loss
7994 * and updates its network configuration when the link is restored. See @bugref{10114}.
7995 */
7996 rc = pHlp->pfnCFGMQueryU32Def(pCfg, "LinkUpDelay", (uint32_t*)&pThis->cMsLinkUpDelay, 5000); /* ms */
7997 if (RT_FAILURE(rc))
7998 return PDMDEV_SET_ERROR(pDevIns, rc,
7999 N_("Configuration error: Failed to get the value of 'LinkUpDelay'"));
8000 Assert(pThis->cMsLinkUpDelay <= 300000); /* less than 5 minutes */
8001 if (pThis->cMsLinkUpDelay > 5000)
8002 LogRel(("%s: WARNING! Link up delay is set to %u seconds!\n", pThis->szPrf, pThis->cMsLinkUpDelay / 1000));
8003 else if (pThis->cMsLinkUpDelay == 0)
8004 LogRel(("%s: WARNING! Link up delay is disabled!\n", pThis->szPrf));
8005
8006 uint32_t uStatNo = (uint32_t)iInstance;
8007 rc = pHlp->pfnCFGMQueryU32Def(pCfg, "StatNo", &uStatNo, (uint32_t)iInstance);
8008 if (RT_FAILURE(rc))
8009 return PDMDEV_SET_ERROR(pDevIns, rc, N_("Configuration error: Failed to get the \"StatNo\" value"));
8010
8011 LogRel(("%s: Chip=%s LinkUpDelay=%ums EthernetCRC=%s GSO=%s Itr=%s ItrRx=%s TID=%s R0=%s RC=%s\n", pThis->szPrf,
8012 g_aChips[pThis->eChip].pcszName, pThis->cMsLinkUpDelay,
8013 pThis->fEthernetCRC ? "on" : "off",
8014 pThis->fGSOEnabled ? "enabled" : "disabled",
8015 pThis->fItrEnabled ? "enabled" : "disabled",
8016 pThis->fItrRxEnabled ? "enabled" : "disabled",
8017 pThis->fTidEnabled ? "enabled" : "disabled",
8018 pDevIns->fR0Enabled ? "enabled" : "disabled",
8019 pDevIns->fRCEnabled ? "enabled" : "disabled"));
8020
8021 /*
8022 * Initialize sub-components and register everything with the VMM.
8023 */
8024
8025 /* Initialize the EEPROM. */
8026 pThisCC->eeprom.init(pThis->macConfigured);
8027
8028 /* Initialize internal PHY. */
8029 Phy::init(&pThis->phy, iInstance, pThis->eChip == E1K_CHIP_82543GC ? PHY_EPID_M881000 : PHY_EPID_M881011);
8030
8031 /* Initialize critical sections. We do our own locking. */
8032 rc = PDMDevHlpSetDeviceCritSect(pDevIns, PDMDevHlpCritSectGetNop(pDevIns));
8033 AssertRCReturn(rc, rc);
8034
8035 rc = PDMDevHlpCritSectInit(pDevIns, &pThis->cs, RT_SRC_POS, "E1000#%d", iInstance);
8036 AssertRCReturn(rc, rc);
8037 rc = PDMDevHlpCritSectInit(pDevIns, &pThis->csRx, RT_SRC_POS, "E1000#%dRX", iInstance);
8038 AssertRCReturn(rc, rc);
8039#ifdef E1K_WITH_TX_CS
8040 rc = PDMDevHlpCritSectInit(pDevIns, &pThis->csTx, RT_SRC_POS, "E1000#%dTX", iInstance);
8041 AssertRCReturn(rc, rc);
8042#endif
8043
8044 /* Saved state registration. */
8045 rc = PDMDevHlpSSMRegisterEx(pDevIns, E1K_SAVEDSTATE_VERSION, sizeof(E1KSTATE), NULL,
8046 NULL, e1kR3LiveExec, NULL,
8047 e1kR3SavePrep, e1kR3SaveExec, NULL,
8048 e1kR3LoadPrep, e1kR3LoadExec, e1kR3LoadDone);
8049 AssertRCReturn(rc, rc);
8050
8051 /* Set PCI config registers and register ourselves with the PCI bus. */
8052 PDMPCIDEV_ASSERT_VALID(pDevIns, pDevIns->apPciDevs[0]);
8053 e1kR3ConfigurePciDev(pDevIns->apPciDevs[0], pThis->eChip);
8054 rc = PDMDevHlpPCIRegister(pDevIns, pDevIns->apPciDevs[0]);
8055 AssertRCReturn(rc, rc);
8056
8057#ifdef E1K_WITH_MSI
8058 PDMMSIREG MsiReg;
8059 RT_ZERO(MsiReg);
8060 MsiReg.cMsiVectors = 1;
8061 MsiReg.iMsiCapOffset = 0x80;
8062 MsiReg.iMsiNextOffset = 0x0;
8063 MsiReg.fMsi64bit = false;
8064 rc = PDMDevHlpPCIRegisterMsi(pDevIns, &MsiReg);
8065 AssertRCReturn(rc, rc);
8066#endif
8067
8068 /*
8069 * Map our registers to memory space (region 0, see e1kR3ConfigurePciDev)
8070 * From the spec (regarding flags):
8071 * For registers that should be accessed as 32-bit double words,
8072 * partial writes (less than a 32-bit double word) is ignored.
8073 * Partial reads return all 32 bits of data regardless of the
8074 * byte enables.
8075 */
8076 rc = PDMDevHlpMmioCreateEx(pDevIns, E1K_MM_SIZE, IOMMMIO_FLAGS_READ_DWORD | IOMMMIO_FLAGS_WRITE_ONLY_DWORD,
8077 pDevIns->apPciDevs[0], 0 /*iPciRegion*/,
8078 e1kMMIOWrite, e1kMMIORead, NULL /*pfnFill*/, NULL /*pvUser*/, "E1000", &pThis->hMmioRegion);
8079 AssertRCReturn(rc, rc);
8080 rc = PDMDevHlpPCIIORegionRegisterMmio(pDevIns, 0, E1K_MM_SIZE, PCI_ADDRESS_SPACE_MEM, pThis->hMmioRegion, NULL);
8081 AssertRCReturn(rc, rc);
8082
8083 /* Map our registers to IO space (region 2, see e1kR3ConfigurePciDev) */
8084 static IOMIOPORTDESC const s_aExtDescs[] =
8085 {
8086 { "IOADDR", "IOADDR", NULL, NULL }, { "unused", "unused", NULL, NULL }, { "unused", "unused", NULL, NULL }, { "unused", "unused", NULL, NULL },
8087 { "IODATA", "IODATA", NULL, NULL }, { "unused", "unused", NULL, NULL }, { "unused", "unused", NULL, NULL }, { "unused", "unused", NULL, NULL },
8088 { NULL, NULL, NULL, NULL }
8089 };
8090 rc = PDMDevHlpIoPortCreate(pDevIns, E1K_IOPORT_SIZE, pDevIns->apPciDevs[0], 2 /*iPciRegion*/,
8091 e1kIOPortOut, e1kIOPortIn, NULL /*pvUser*/, "E1000", s_aExtDescs, &pThis->hIoPorts);
8092 AssertRCReturn(rc, rc);
8093 rc = PDMDevHlpPCIIORegionRegisterIo(pDevIns, 2, E1K_IOPORT_SIZE, pThis->hIoPorts);
8094 AssertRCReturn(rc, rc);
8095
8096 /* Create transmit queue */
8097 rc = PDMDevHlpTaskCreate(pDevIns, PDMTASK_F_RZ, "E1000-Xmit", e1kR3TxTaskCallback, NULL, &pThis->hTxTask);
8098 AssertRCReturn(rc, rc);
8099
8100#ifdef E1K_TX_DELAY
8101 /* Create Transmit Delay Timer */
8102 rc = PDMDevHlpTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kR3TxDelayTimer, pThis,
8103 TMTIMER_FLAGS_NO_CRIT_SECT | TMTIMER_FLAGS_RING0, "E1000 Xmit Delay", &pThis->hTXDTimer);
8104 AssertRCReturn(rc, rc);
8105 rc = PDMDevHlpTimerSetCritSect(pDevIns, pThis->hTXDTimer, &pThis->csTx);
8106 AssertRCReturn(rc, rc);
8107#endif /* E1K_TX_DELAY */
8108
8109//#ifdef E1K_USE_TX_TIMERS
8110 if (pThis->fTidEnabled)
8111 {
8112 /* Create Transmit Interrupt Delay Timer */
8113 rc = PDMDevHlpTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kR3TxIntDelayTimer, pThis,
8114 TMTIMER_FLAGS_NO_CRIT_SECT | TMTIMER_FLAGS_RING0, "E1000 Xmit IRQ Delay", &pThis->hTIDTimer);
8115 AssertRCReturn(rc, rc);
8116
8117# ifndef E1K_NO_TAD
8118 /* Create Transmit Absolute Delay Timer */
8119 rc = PDMDevHlpTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kR3TxAbsDelayTimer, pThis,
8120 TMTIMER_FLAGS_NO_CRIT_SECT | TMTIMER_FLAGS_RING0, "E1000 Xmit Abs Delay", &pThis->hTADTimer);
8121 AssertRCReturn(rc, rc);
8122# endif /* E1K_NO_TAD */
8123 }
8124//#endif /* E1K_USE_TX_TIMERS */
8125
8126#ifdef E1K_USE_RX_TIMERS
8127 /* Create Receive Interrupt Delay Timer */
8128 rc = PDMDevHlpTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kR3RxIntDelayTimer, pThis,
8129 TMTIMER_FLAGS_NO_CRIT_SECT | TMTIMER_FLAGS_RING0, "E1000 Recv IRQ Delay", &pThis->hRIDTimer);
8130 AssertRCReturn(rc, rc);
8131
8132 /* Create Receive Absolute Delay Timer */
8133 rc = PDMDevHlpTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kR3RxAbsDelayTimer, pThis,
8134 TMTIMER_FLAGS_NO_CRIT_SECT | TMTIMER_FLAGS_RING0, "E1000 Recv Abs Delay", &pThis->hRADTimer);
8135 AssertRCReturn(rc, rc);
8136#endif /* E1K_USE_RX_TIMERS */
8137
8138 /* Create Late Interrupt Timer */
8139 rc = PDMDevHlpTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kR3LateIntTimer, pThis,
8140 TMTIMER_FLAGS_NO_CRIT_SECT | TMTIMER_FLAGS_RING0, "E1000 Late IRQ", &pThis->hIntTimer);
8141 AssertRCReturn(rc, rc);
8142
8143 /* Create Link Up Timer */
8144 rc = PDMDevHlpTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kR3LinkUpTimer, pThis,
8145 TMTIMER_FLAGS_NO_CRIT_SECT | TMTIMER_FLAGS_RING0, "E1000 Link Up", &pThis->hLUTimer);
8146 AssertRCReturn(rc, rc);
8147
8148 /* Register the info item */
8149 char szTmp[20];
8150 RTStrPrintf(szTmp, sizeof(szTmp), "e1k%d", iInstance);
8151 PDMDevHlpDBGFInfoRegister(pDevIns, szTmp, "E1000 info.", e1kR3Info);
8152
8153 /* Status driver */
8154 PPDMIBASE pBase;
8155 rc = PDMDevHlpDriverAttach(pDevIns, PDM_STATUS_LUN, &pThisCC->IBase, &pBase, "Status Port");
8156 if (RT_FAILURE(rc))
8157 return PDMDEV_SET_ERROR(pDevIns, rc, N_("Failed to attach the status LUN"));
8158 pThisCC->pLedsConnector = PDMIBASE_QUERY_INTERFACE(pBase, PDMILEDCONNECTORS);
8159
8160 /* Network driver */
8161 rc = PDMDevHlpDriverAttach(pDevIns, 0, &pThisCC->IBase, &pThisCC->pDrvBase, "Network Port");
8162 if (RT_SUCCESS(rc))
8163 {
8164 pThisCC->pDrvR3 = PDMIBASE_QUERY_INTERFACE(pThisCC->pDrvBase, PDMINETWORKUP);
8165 AssertMsgReturn(pThisCC->pDrvR3, ("Failed to obtain the PDMINETWORKUP interface!\n"), VERR_PDM_MISSING_INTERFACE_BELOW);
8166
8167#if 0 /** @todo @bugref{9218} ring-0 driver stuff */
8168 pThisR0->pDrvR0 = PDMIBASER0_QUERY_INTERFACE(PDMIBASE_QUERY_INTERFACE(pThisCC->pDrvBase, PDMIBASER0), PDMINETWORKUP);
8169 pThisRC->pDrvRC = PDMIBASERC_QUERY_INTERFACE(PDMIBASE_QUERY_INTERFACE(pThisCC->pDrvBase, PDMIBASERC), PDMINETWORKUP);
8170#endif
8171 /* Mark device as attached. */
8172 pThis->fIsAttached = true;
8173 }
8174 else if ( rc == VERR_PDM_NO_ATTACHED_DRIVER
8175 || rc == VERR_PDM_CFG_MISSING_DRIVER_NAME)
8176 {
8177 /* No error! */
8178 E1kLog(("%s This adapter is not attached to any network!\n", pThis->szPrf));
8179 }
8180 else
8181 return PDMDEV_SET_ERROR(pDevIns, rc, N_("Failed to attach the network LUN"));
8182
8183 rc = PDMDevHlpSUPSemEventCreate(pDevIns, &pThis->hEventMoreRxDescAvail);
8184 AssertRCReturn(rc, rc);
8185
8186 rc = e1kR3InitDebugHelpers();
8187 AssertRCReturn(rc, rc);
8188
8189 e1kR3HardReset(pDevIns, pThis, pThisCC);
8190
8191 /*
8192 * Register statistics.
8193 * The /Public/ bits are official and used by session info in the GUI.
8194 */
8195 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceiveBytes, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES,
8196 "Amount of data received", "/Public/NetAdapter/%u/BytesReceived", uStatNo);
8197 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitBytes, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES,
8198 "Amount of data transmitted", "/Public/NetAdapter/%u/BytesTransmitted", uStatNo);
8199 PDMDevHlpSTAMRegisterF(pDevIns, &pDevIns->iInstance, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
8200 "Device instance number", "/Public/NetAdapter/%u/%s", uStatNo, pDevIns->pReg->szName);
8201
8202 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatReceiveBytes, STAMTYPE_COUNTER, "ReceiveBytes", STAMUNIT_BYTES, "Amount of data received");
8203 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatTransmitBytes, STAMTYPE_COUNTER, "TransmitBytes", STAMUNIT_BYTES, "Amount of data transmitted");
8204
8205#if defined(VBOX_WITH_STATISTICS)
8206 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatMMIOReadRZ, STAMTYPE_PROFILE, "MMIO/ReadRZ", STAMUNIT_TICKS_PER_CALL, "Profiling MMIO reads in RZ");
8207 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatMMIOReadR3, STAMTYPE_PROFILE, "MMIO/ReadR3", STAMUNIT_TICKS_PER_CALL, "Profiling MMIO reads in R3");
8208 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatMMIOWriteRZ, STAMTYPE_PROFILE, "MMIO/WriteRZ", STAMUNIT_TICKS_PER_CALL, "Profiling MMIO writes in RZ");
8209 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatMMIOWriteR3, STAMTYPE_PROFILE, "MMIO/WriteR3", STAMUNIT_TICKS_PER_CALL, "Profiling MMIO writes in R3");
8210 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatEEPROMRead, STAMTYPE_PROFILE, "EEPROM/Read", STAMUNIT_TICKS_PER_CALL, "Profiling EEPROM reads");
8211 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatEEPROMWrite, STAMTYPE_PROFILE, "EEPROM/Write", STAMUNIT_TICKS_PER_CALL, "Profiling EEPROM writes");
8212 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatIOReadRZ, STAMTYPE_PROFILE, "IO/ReadRZ", STAMUNIT_TICKS_PER_CALL, "Profiling IO reads in RZ");
8213 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatIOReadR3, STAMTYPE_PROFILE, "IO/ReadR3", STAMUNIT_TICKS_PER_CALL, "Profiling IO reads in R3");
8214 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatIOWriteRZ, STAMTYPE_PROFILE, "IO/WriteRZ", STAMUNIT_TICKS_PER_CALL, "Profiling IO writes in RZ");
8215 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatIOWriteR3, STAMTYPE_PROFILE, "IO/WriteR3", STAMUNIT_TICKS_PER_CALL, "Profiling IO writes in R3");
8216 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatLateIntTimer, STAMTYPE_PROFILE, "LateInt/Timer", STAMUNIT_TICKS_PER_CALL, "Profiling late int timer");
8217 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatLateInts, STAMTYPE_COUNTER, "LateInt/Occured", STAMUNIT_OCCURENCES, "Number of late interrupts");
8218 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatIntsRaised, STAMTYPE_COUNTER, "Interrupts/Raised", STAMUNIT_OCCURENCES, "Number of raised interrupts");
8219 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatIntsPrevented, STAMTYPE_COUNTER, "Interrupts/Prevented", STAMUNIT_OCCURENCES, "Number of prevented interrupts");
8220 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatReceive, STAMTYPE_PROFILE, "Receive/Total", STAMUNIT_TICKS_PER_CALL, "Profiling receive");
8221 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatReceiveCRC, STAMTYPE_PROFILE, "Receive/CRC", STAMUNIT_TICKS_PER_CALL, "Profiling receive checksumming");
8222 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatReceiveFilter, STAMTYPE_PROFILE, "Receive/Filter", STAMUNIT_TICKS_PER_CALL, "Profiling receive filtering");
8223 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatReceiveStore, STAMTYPE_PROFILE, "Receive/Store", STAMUNIT_TICKS_PER_CALL, "Profiling receive storing");
8224 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatRxOverflow, STAMTYPE_PROFILE, "RxOverflow", STAMUNIT_TICKS_PER_OCCURENCE, "Profiling RX overflows");
8225 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatRxOverflowWakeupRZ, STAMTYPE_COUNTER, "RxOverflowWakeupRZ", STAMUNIT_OCCURENCES, "Nr of RX overflow wakeups in RZ");
8226 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatRxOverflowWakeupR3, STAMTYPE_COUNTER, "RxOverflowWakeupR3", STAMUNIT_OCCURENCES, "Nr of RX overflow wakeups in R3");
8227 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatTransmitRZ, STAMTYPE_PROFILE, "Transmit/TotalRZ", STAMUNIT_TICKS_PER_CALL, "Profiling transmits in RZ");
8228 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatTransmitR3, STAMTYPE_PROFILE, "Transmit/TotalR3", STAMUNIT_TICKS_PER_CALL, "Profiling transmits in R3");
8229 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatTransmitSendRZ, STAMTYPE_PROFILE, "Transmit/SendRZ", STAMUNIT_TICKS_PER_CALL, "Profiling send transmit in RZ");
8230 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatTransmitSendR3, STAMTYPE_PROFILE, "Transmit/SendR3", STAMUNIT_TICKS_PER_CALL, "Profiling send transmit in R3");
8231
8232 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatTxDescCtxNormal, STAMTYPE_COUNTER, "TxDesc/ContexNormal", STAMUNIT_OCCURENCES, "Number of normal context descriptors");
8233 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatTxDescCtxTSE, STAMTYPE_COUNTER, "TxDesc/ContextTSE", STAMUNIT_OCCURENCES, "Number of TSE context descriptors");
8234 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatTxDescData, STAMTYPE_COUNTER, "TxDesc/Data", STAMUNIT_OCCURENCES, "Number of TX data descriptors");
8235 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatTxDescLegacy, STAMTYPE_COUNTER, "TxDesc/Legacy", STAMUNIT_OCCURENCES, "Number of TX legacy descriptors");
8236 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatTxDescTSEData, STAMTYPE_COUNTER, "TxDesc/TSEData", STAMUNIT_OCCURENCES, "Number of TX TSE data descriptors");
8237 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatTxPathFallback, STAMTYPE_COUNTER, "TxPath/Fallback", STAMUNIT_OCCURENCES, "Fallback TSE descriptor path");
8238 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatTxPathGSO, STAMTYPE_COUNTER, "TxPath/GSO", STAMUNIT_OCCURENCES, "GSO TSE descriptor path");
8239 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatTxPathRegular, STAMTYPE_COUNTER, "TxPath/Normal", STAMUNIT_OCCURENCES, "Regular descriptor path");
8240 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatPHYAccesses, STAMTYPE_COUNTER, "PHYAccesses", STAMUNIT_OCCURENCES, "Number of PHY accesses");
8241 for (unsigned iReg = 0; iReg < E1K_NUM_OF_REGS; iReg++)
8242 {
8243 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->aStatRegReads[iReg], STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
8244 g_aE1kRegMap[iReg].name, "Regs/%s-Reads", g_aE1kRegMap[iReg].abbrev);
8245 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->aStatRegWrites[iReg], STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
8246 g_aE1kRegMap[iReg].name, "Regs/%s-Writes", g_aE1kRegMap[iReg].abbrev);
8247 }
8248#endif /* VBOX_WITH_STATISTICS */
8249
8250#ifdef E1K_INT_STATS
8251 PDMDevHlpSTAMRegister(pDevIns, &pThis->u64ArmedAt, STAMTYPE_U64, "u64ArmedAt", STAMUNIT_NS, NULL);
8252 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatMaxTxDelay, STAMTYPE_U64, "uStatMaxTxDelay", STAMUNIT_NS, NULL);
8253 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatInt, STAMTYPE_U32, "uStatInt", STAMUNIT_NS, NULL);
8254 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatIntTry, STAMTYPE_U32, "uStatIntTry", STAMUNIT_NS, NULL);
8255 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatIntLower, STAMTYPE_U32, "uStatIntLower", STAMUNIT_NS, NULL);
8256 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatNoIntICR, STAMTYPE_U32, "uStatNoIntICR", STAMUNIT_NS, NULL);
8257 PDMDevHlpSTAMRegister(pDevIns, &pThis->iStatIntLost, STAMTYPE_U32, "iStatIntLost", STAMUNIT_NS, NULL);
8258 PDMDevHlpSTAMRegister(pDevIns, &pThis->iStatIntLostOne, STAMTYPE_U32, "iStatIntLostOne", STAMUNIT_NS, NULL);
8259 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatIntIMS, STAMTYPE_U32, "uStatIntIMS", STAMUNIT_NS, NULL);
8260 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatIntSkip, STAMTYPE_U32, "uStatIntSkip", STAMUNIT_NS, NULL);
8261 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatIntLate, STAMTYPE_U32, "uStatIntLate", STAMUNIT_NS, NULL);
8262 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatIntMasked, STAMTYPE_U32, "uStatIntMasked", STAMUNIT_NS, NULL);
8263 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatIntEarly, STAMTYPE_U32, "uStatIntEarly", STAMUNIT_NS, NULL);
8264 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatIntRx, STAMTYPE_U32, "uStatIntRx", STAMUNIT_NS, NULL);
8265 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatIntTx, STAMTYPE_U32, "uStatIntTx", STAMUNIT_NS, NULL);
8266 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatIntICS, STAMTYPE_U32, "uStatIntICS", STAMUNIT_NS, NULL);
8267 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatIntRDTR, STAMTYPE_U32, "uStatIntRDTR", STAMUNIT_NS, NULL);
8268 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatIntRXDMT0, STAMTYPE_U32, "uStatIntRXDMT0", STAMUNIT_NS, NULL);
8269 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatIntTXQE, STAMTYPE_U32, "uStatIntTXQE", STAMUNIT_NS, NULL);
8270 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatTxNoRS, STAMTYPE_U32, "uStatTxNoRS", STAMUNIT_NS, NULL);
8271 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatTxIDE, STAMTYPE_U32, "uStatTxIDE", STAMUNIT_NS, NULL);
8272 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatTxDelayed, STAMTYPE_U32, "uStatTxDelayed", STAMUNIT_NS, NULL);
8273 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatTxDelayExp, STAMTYPE_U32, "uStatTxDelayExp", STAMUNIT_NS, NULL);
8274 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatTAD, STAMTYPE_U32, "uStatTAD", STAMUNIT_NS, NULL);
8275 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatTID, STAMTYPE_U32, "uStatTID", STAMUNIT_NS, NULL);
8276 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatRAD, STAMTYPE_U32, "uStatRAD", STAMUNIT_NS, NULL);
8277 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatRID, STAMTYPE_U32, "uStatRID", STAMUNIT_NS, NULL);
8278 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatRxFrm, STAMTYPE_U32, "uStatRxFrm", STAMUNIT_NS, NULL);
8279 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatTxFrm, STAMTYPE_U32, "uStatTxFrm", STAMUNIT_NS, NULL);
8280 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatDescCtx, STAMTYPE_U32, "uStatDescCtx", STAMUNIT_NS, NULL);
8281 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatDescDat, STAMTYPE_U32, "uStatDescDat", STAMUNIT_NS, NULL);
8282 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatDescLeg, STAMTYPE_U32, "uStatDescLeg", STAMUNIT_NS, NULL);
8283 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatTx1514, STAMTYPE_U32, "uStatTx1514", STAMUNIT_NS, NULL);
8284 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatTx2962, STAMTYPE_U32, "uStatTx2962", STAMUNIT_NS, NULL);
8285 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatTx4410, STAMTYPE_U32, "uStatTx4410", STAMUNIT_NS, NULL);
8286 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatTx5858, STAMTYPE_U32, "uStatTx5858", STAMUNIT_NS, NULL);
8287 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatTx7306, STAMTYPE_U32, "uStatTx7306", STAMUNIT_NS, NULL);
8288 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatTx8754, STAMTYPE_U32, "uStatTx8754", STAMUNIT_NS, NULL);
8289 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatTx16384, STAMTYPE_U32, "uStatTx16384", STAMUNIT_NS, NULL);
8290 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatTx32768, STAMTYPE_U32, "uStatTx32768", STAMUNIT_NS, NULL);
8291 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatTxLarge, STAMTYPE_U32, "uStatTxLarge", STAMUNIT_NS, NULL);
8292#endif /* E1K_INT_STATS */
8293
8294 return VINF_SUCCESS;
8295}
8296
8297#else /* !IN_RING3 */
8298
8299/**
8300 * @callback_method_impl{PDMDEVREGR0,pfnConstruct}
8301 */
8302static DECLCALLBACK(int) e1kRZConstruct(PPDMDEVINS pDevIns)
8303{
8304 PDMDEV_CHECK_VERSIONS_RETURN(pDevIns);
8305 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
8306 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
8307
8308 /* Initialize context specific state data: */
8309 pThisCC->CTX_SUFF(pDevIns) = pDevIns;
8310 /** @todo @bugref{9218} ring-0 driver stuff */
8311 pThisCC->CTX_SUFF(pDrv) = NULL;
8312 pThisCC->CTX_SUFF(pTxSg) = NULL;
8313
8314 /* Configure critical sections the same way: */
8315 int rc = PDMDevHlpSetDeviceCritSect(pDevIns, PDMDevHlpCritSectGetNop(pDevIns));
8316 AssertRCReturn(rc, rc);
8317
8318 /* Set up MMIO and I/O port callbacks for this context: */
8319 rc = PDMDevHlpMmioSetUpContext(pDevIns, pThis->hMmioRegion, e1kMMIOWrite, e1kMMIORead, NULL /*pvUser*/);
8320 AssertRCReturn(rc, rc);
8321
8322 rc = PDMDevHlpIoPortSetUpContext(pDevIns, pThis->hIoPorts, e1kIOPortOut, e1kIOPortIn, NULL /*pvUser*/);
8323 AssertRCReturn(rc, rc);
8324
8325 return VINF_SUCCESS;
8326}
8327
8328#endif /* !IN_RING3 */
8329
8330/**
8331 * The device registration structure.
8332 */
8333const PDMDEVREG g_DeviceE1000 =
8334{
8335 /* .u32version = */ PDM_DEVREG_VERSION,
8336 /* .uReserved0 = */ 0,
8337 /* .szName = */ "e1000",
8338 /* .fFlags = */ PDM_DEVREG_FLAGS_DEFAULT_BITS | PDM_DEVREG_FLAGS_RZ | PDM_DEVREG_FLAGS_NEW_STYLE,
8339 /* .fClass = */ PDM_DEVREG_CLASS_NETWORK,
8340 /* .cMaxInstances = */ ~0U,
8341 /* .uSharedVersion = */ 42,
8342 /* .cbInstanceShared = */ sizeof(E1KSTATE),
8343 /* .cbInstanceCC = */ sizeof(E1KSTATECC),
8344 /* .cbInstanceRC = */ sizeof(E1KSTATERC),
8345 /* .cMaxPciDevices = */ 1,
8346 /* .cMaxMsixVectors = */ 0,
8347 /* .pszDescription = */ "Intel PRO/1000 MT Desktop Ethernet.",
8348#if defined(IN_RING3)
8349 /* .pszRCMod = */ "VBoxDDRC.rc",
8350 /* .pszR0Mod = */ "VBoxDDR0.r0",
8351 /* .pfnConstruct = */ e1kR3Construct,
8352 /* .pfnDestruct = */ e1kR3Destruct,
8353 /* .pfnRelocate = */ e1kR3Relocate,
8354 /* .pfnMemSetup = */ NULL,
8355 /* .pfnPowerOn = */ NULL,
8356 /* .pfnReset = */ e1kR3Reset,
8357 /* .pfnSuspend = */ e1kR3Suspend,
8358 /* .pfnResume = */ NULL,
8359 /* .pfnAttach = */ e1kR3Attach,
8360 /* .pfnDeatch = */ e1kR3Detach,
8361 /* .pfnQueryInterface = */ NULL,
8362 /* .pfnInitComplete = */ NULL,
8363 /* .pfnPowerOff = */ e1kR3PowerOff,
8364 /* .pfnSoftReset = */ NULL,
8365 /* .pfnReserved0 = */ NULL,
8366 /* .pfnReserved1 = */ NULL,
8367 /* .pfnReserved2 = */ NULL,
8368 /* .pfnReserved3 = */ NULL,
8369 /* .pfnReserved4 = */ NULL,
8370 /* .pfnReserved5 = */ NULL,
8371 /* .pfnReserved6 = */ NULL,
8372 /* .pfnReserved7 = */ NULL,
8373#elif defined(IN_RING0)
8374 /* .pfnEarlyConstruct = */ NULL,
8375 /* .pfnConstruct = */ e1kRZConstruct,
8376 /* .pfnDestruct = */ NULL,
8377 /* .pfnFinalDestruct = */ NULL,
8378 /* .pfnRequest = */ NULL,
8379 /* .pfnReserved0 = */ NULL,
8380 /* .pfnReserved1 = */ NULL,
8381 /* .pfnReserved2 = */ NULL,
8382 /* .pfnReserved3 = */ NULL,
8383 /* .pfnReserved4 = */ NULL,
8384 /* .pfnReserved5 = */ NULL,
8385 /* .pfnReserved6 = */ NULL,
8386 /* .pfnReserved7 = */ NULL,
8387#elif defined(IN_RC)
8388 /* .pfnConstruct = */ e1kRZConstruct,
8389 /* .pfnReserved0 = */ NULL,
8390 /* .pfnReserved1 = */ NULL,
8391 /* .pfnReserved2 = */ NULL,
8392 /* .pfnReserved3 = */ NULL,
8393 /* .pfnReserved4 = */ NULL,
8394 /* .pfnReserved5 = */ NULL,
8395 /* .pfnReserved6 = */ NULL,
8396 /* .pfnReserved7 = */ NULL,
8397#else
8398# error "Not in IN_RING3, IN_RING0 or IN_RC!"
8399#endif
8400 /* .u32VersionEnd = */ PDM_DEVREG_VERSION
8401};
8402
8403#endif /* !VBOX_DEVICE_STRUCT_TESTCASE */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette