VirtualBox

source: vbox/trunk/src/VBox/Devices/Network/DevE1000.cpp@ 62956

Last change on this file since 62956 was 62632, checked in by vboxsync, 8 years ago

Devices: unused parameter warnings.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 318.0 KB
Line 
1/* $Id: DevE1000.cpp 62632 2016-07-28 15:58:14Z vboxsync $ */
2/** @file
3 * DevE1000 - Intel 82540EM Ethernet Controller Emulation.
4 *
5 * Implemented in accordance with the specification:
6 *
7 * PCI/PCI-X Family of Gigabit Ethernet Controllers Software Developer's Manual
8 * 82540EP/EM, 82541xx, 82544GC/EI, 82545GM/EM, 82546GB/EB, and 82547xx
9 *
10 * 317453-002 Revision 3.5
11 *
12 * @todo IPv6 checksum offloading support
13 * @todo Flexible Filter / Wakeup (optional?)
14 */
15
16/*
17 * Copyright (C) 2007-2016 Oracle Corporation
18 *
19 * This file is part of VirtualBox Open Source Edition (OSE), as
20 * available from http://www.virtualbox.org. This file is free software;
21 * you can redistribute it and/or modify it under the terms of the GNU
22 * General Public License (GPL) as published by the Free Software
23 * Foundation, in version 2 as it comes in the "COPYING" file of the
24 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
25 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_DEV_E1000
33#include <iprt/crc.h>
34#include <iprt/ctype.h>
35#include <iprt/net.h>
36#include <iprt/semaphore.h>
37#include <iprt/string.h>
38#include <iprt/time.h>
39#include <iprt/uuid.h>
40#include <VBox/vmm/pdmdev.h>
41#include <VBox/vmm/pdmnetifs.h>
42#include <VBox/vmm/pdmnetinline.h>
43#include <VBox/param.h>
44#include "VBoxDD.h"
45
46#include "DevEEPROM.h"
47#include "DevE1000Phy.h"
48
49
50/* Options *******************************************************************/
51/** @def E1K_INIT_RA0
52 * E1K_INIT_RA0 forces E1000 to set the first entry in Receive Address filter
53 * table to MAC address obtained from CFGM. Most guests read MAC address from
54 * EEPROM and write it to RA[0] explicitly, but Mac OS X seems to depend on it
55 * being already set (see @bugref{4657}).
56 */
57#define E1K_INIT_RA0
58/** @def E1K_LSC_ON_SLU
59 * E1K_LSC_ON_SLU causes E1000 to generate Link Status Change interrupt when
60 * the guest driver brings up the link via STATUS.LU bit. Again the only guest
61 * that requires it is Mac OS X (see @bugref{4657}).
62 */
63#define E1K_LSC_ON_SLU
64/** @def E1K_TX_DELAY
65 * E1K_TX_DELAY aims to improve guest-host transfer rate for TCP streams by
66 * preventing packets to be sent immediately. It allows to send several
67 * packets in a batch reducing the number of acknowledgments. Note that it
68 * effectively disables R0 TX path, forcing sending in R3.
69 */
70//#define E1K_TX_DELAY 150
71/** @def E1K_USE_TX_TIMERS
72 * E1K_USE_TX_TIMERS aims to reduce the number of generated TX interrupts if a
73 * guest driver set the delays via the Transmit Interrupt Delay Value (TIDV)
74 * register. Enabling it showed no positive effects on existing guests so it
75 * stays disabled. See sections 3.2.7.1 and 3.4.3.1 in "8254x Family of Gigabit
76 * Ethernet Controllers Software Developer’s Manual" for more detailed
77 * explanation.
78 */
79//#define E1K_USE_TX_TIMERS
80/** @def E1K_NO_TAD
81 * E1K_NO_TAD disables one of two timers enabled by E1K_USE_TX_TIMERS, the
82 * Transmit Absolute Delay time. This timer sets the maximum time interval
83 * during which TX interrupts can be postponed (delayed). It has no effect
84 * if E1K_USE_TX_TIMERS is not defined.
85 */
86//#define E1K_NO_TAD
87/** @def E1K_REL_DEBUG
88 * E1K_REL_DEBUG enables debug logging of l1, l2, l3 in release build.
89 */
90//#define E1K_REL_DEBUG
91/** @def E1K_INT_STATS
92 * E1K_INT_STATS enables collection of internal statistics used for
93 * debugging of delayed interrupts, etc.
94 */
95//#define E1K_INT_STATS
96/** @def E1K_WITH_MSI
97 * E1K_WITH_MSI enables rudimentary MSI support. Not implemented.
98 */
99//#define E1K_WITH_MSI
100/** @def E1K_WITH_TX_CS
101 * E1K_WITH_TX_CS protects e1kXmitPending with a critical section.
102 */
103#define E1K_WITH_TX_CS
104/** @def E1K_WITH_TXD_CACHE
105 * E1K_WITH_TXD_CACHE causes E1000 to fetch multiple TX descriptors in a
106 * single physical memory read (or two if it wraps around the end of TX
107 * descriptor ring). It is required for proper functioning of bandwidth
108 * resource control as it allows to compute exact sizes of packets prior
109 * to allocating their buffers (see @bugref{5582}).
110 */
111#define E1K_WITH_TXD_CACHE
112/** @def E1K_WITH_RXD_CACHE
113 * E1K_WITH_RXD_CACHE causes E1000 to fetch multiple RX descriptors in a
114 * single physical memory read (or two if it wraps around the end of RX
115 * descriptor ring). Intel's packet driver for DOS needs this option in
116 * order to work properly (see @bugref{6217}).
117 */
118#define E1K_WITH_RXD_CACHE
119/* End of Options ************************************************************/
120
121#ifdef E1K_WITH_TXD_CACHE
122/**
123 * E1K_TXD_CACHE_SIZE specifies the maximum number of TX descriptors stored
124 * in the state structure. It limits the amount of descriptors loaded in one
125 * batch read. For example, Linux guest may use up to 20 descriptors per
126 * TSE packet. The largest TSE packet seen (Windows guest) was 45 descriptors.
127 */
128# define E1K_TXD_CACHE_SIZE 64u
129#endif /* E1K_WITH_TXD_CACHE */
130
131#ifdef E1K_WITH_RXD_CACHE
132/**
133 * E1K_RXD_CACHE_SIZE specifies the maximum number of RX descriptors stored
134 * in the state structure. It limits the amount of descriptors loaded in one
135 * batch read. For example, XP guest adds 15 RX descriptors at a time.
136 */
137# define E1K_RXD_CACHE_SIZE 16u
138#endif /* E1K_WITH_RXD_CACHE */
139
140
141/* Little helpers ************************************************************/
142#undef htons
143#undef ntohs
144#undef htonl
145#undef ntohl
146#define htons(x) ((((x) & 0xff00) >> 8) | (((x) & 0x00ff) << 8))
147#define ntohs(x) htons(x)
148#define htonl(x) ASMByteSwapU32(x)
149#define ntohl(x) htonl(x)
150
151#ifndef DEBUG
152# ifdef E1K_REL_DEBUG
153# define DEBUG
154# define E1kLog(a) LogRel(a)
155# define E1kLog2(a) LogRel(a)
156# define E1kLog3(a) LogRel(a)
157# define E1kLogX(x, a) LogRel(a)
158//# define E1kLog3(a) do {} while (0)
159# else
160# define E1kLog(a) do {} while (0)
161# define E1kLog2(a) do {} while (0)
162# define E1kLog3(a) do {} while (0)
163# define E1kLogX(x, a) do {} while (0)
164# endif
165#else
166# define E1kLog(a) Log(a)
167# define E1kLog2(a) Log2(a)
168# define E1kLog3(a) Log3(a)
169# define E1kLogX(x, a) LogIt(x, LOG_GROUP, a)
170//# define E1kLog(a) do {} while (0)
171//# define E1kLog2(a) do {} while (0)
172//# define E1kLog3(a) do {} while (0)
173#endif
174
175#if 0
176# define LOG_ENABLED
177# define E1kLogRel(a) LogRel(a)
178# undef Log6
179# define Log6(a) LogRel(a)
180#else
181# define E1kLogRel(a) do { } while (0)
182#endif
183
184//#undef DEBUG
185
186#define STATE_TO_DEVINS(pThis) (((PE1KSTATE )pThis)->CTX_SUFF(pDevIns))
187#define E1K_RELOCATE(p, o) *(RTHCUINTPTR *)&p += o
188
189#define E1K_INC_CNT32(cnt) \
190do { \
191 if (cnt < UINT32_MAX) \
192 cnt++; \
193} while (0)
194
195#define E1K_ADD_CNT64(cntLo, cntHi, val) \
196do { \
197 uint64_t u64Cnt = RT_MAKE_U64(cntLo, cntHi); \
198 uint64_t tmp = u64Cnt; \
199 u64Cnt += val; \
200 if (tmp > u64Cnt ) \
201 u64Cnt = UINT64_MAX; \
202 cntLo = (uint32_t)u64Cnt; \
203 cntHi = (uint32_t)(u64Cnt >> 32); \
204} while (0)
205
206#ifdef E1K_INT_STATS
207# define E1K_INC_ISTAT_CNT(cnt) do { ++cnt; } while (0)
208#else /* E1K_INT_STATS */
209# define E1K_INC_ISTAT_CNT(cnt) do { } while (0)
210#endif /* E1K_INT_STATS */
211
212
213/*****************************************************************************/
214
215typedef uint32_t E1KCHIP;
216#define E1K_CHIP_82540EM 0
217#define E1K_CHIP_82543GC 1
218#define E1K_CHIP_82545EM 2
219
220/** Different E1000 chips. */
221static const struct E1kChips
222{
223 uint16_t uPCIVendorId;
224 uint16_t uPCIDeviceId;
225 uint16_t uPCISubsystemVendorId;
226 uint16_t uPCISubsystemId;
227 const char *pcszName;
228} g_Chips[] =
229{
230 /* Vendor Device SSVendor SubSys Name */
231 { 0x8086,
232 /* Temporary code, as MSI-aware driver dislike 0x100E. How to do that right? */
233#ifdef E1K_WITH_MSI
234 0x105E,
235#else
236 0x100E,
237#endif
238 0x8086, 0x001E, "82540EM" }, /* Intel 82540EM-A in Intel PRO/1000 MT Desktop */
239 { 0x8086, 0x1004, 0x8086, 0x1004, "82543GC" }, /* Intel 82543GC in Intel PRO/1000 T Server */
240 { 0x8086, 0x100F, 0x15AD, 0x0750, "82545EM" } /* Intel 82545EM-A in VMWare Network Adapter */
241};
242
243
244/* The size of register area mapped to I/O space */
245#define E1K_IOPORT_SIZE 0x8
246/* The size of memory-mapped register area */
247#define E1K_MM_SIZE 0x20000
248
249#define E1K_MAX_TX_PKT_SIZE 16288
250#define E1K_MAX_RX_PKT_SIZE 16384
251
252/*****************************************************************************/
253
254/** Gets the specfieid bits from the register. */
255#define GET_BITS(reg, bits) ((reg & reg##_##bits##_MASK) >> reg##_##bits##_SHIFT)
256#define GET_BITS_V(val, reg, bits) ((val & reg##_##bits##_MASK) >> reg##_##bits##_SHIFT)
257#define BITS(reg, bits, bitval) (bitval << reg##_##bits##_SHIFT)
258#define SET_BITS(reg, bits, bitval) do { reg = (reg & ~reg##_##bits##_MASK) | (bitval << reg##_##bits##_SHIFT); } while (0)
259#define SET_BITS_V(val, reg, bits, bitval) do { val = (val & ~reg##_##bits##_MASK) | (bitval << reg##_##bits##_SHIFT); } while (0)
260
261#define CTRL_SLU UINT32_C(0x00000040)
262#define CTRL_MDIO UINT32_C(0x00100000)
263#define CTRL_MDC UINT32_C(0x00200000)
264#define CTRL_MDIO_DIR UINT32_C(0x01000000)
265#define CTRL_MDC_DIR UINT32_C(0x02000000)
266#define CTRL_RESET UINT32_C(0x04000000)
267#define CTRL_VME UINT32_C(0x40000000)
268
269#define STATUS_LU UINT32_C(0x00000002)
270#define STATUS_TXOFF UINT32_C(0x00000010)
271
272#define EECD_EE_WIRES UINT32_C(0x0F)
273#define EECD_EE_REQ UINT32_C(0x40)
274#define EECD_EE_GNT UINT32_C(0x80)
275
276#define EERD_START UINT32_C(0x00000001)
277#define EERD_DONE UINT32_C(0x00000010)
278#define EERD_DATA_MASK UINT32_C(0xFFFF0000)
279#define EERD_DATA_SHIFT 16
280#define EERD_ADDR_MASK UINT32_C(0x0000FF00)
281#define EERD_ADDR_SHIFT 8
282
283#define MDIC_DATA_MASK UINT32_C(0x0000FFFF)
284#define MDIC_DATA_SHIFT 0
285#define MDIC_REG_MASK UINT32_C(0x001F0000)
286#define MDIC_REG_SHIFT 16
287#define MDIC_PHY_MASK UINT32_C(0x03E00000)
288#define MDIC_PHY_SHIFT 21
289#define MDIC_OP_WRITE UINT32_C(0x04000000)
290#define MDIC_OP_READ UINT32_C(0x08000000)
291#define MDIC_READY UINT32_C(0x10000000)
292#define MDIC_INT_EN UINT32_C(0x20000000)
293#define MDIC_ERROR UINT32_C(0x40000000)
294
295#define TCTL_EN UINT32_C(0x00000002)
296#define TCTL_PSP UINT32_C(0x00000008)
297
298#define RCTL_EN UINT32_C(0x00000002)
299#define RCTL_UPE UINT32_C(0x00000008)
300#define RCTL_MPE UINT32_C(0x00000010)
301#define RCTL_LPE UINT32_C(0x00000020)
302#define RCTL_LBM_MASK UINT32_C(0x000000C0)
303#define RCTL_LBM_SHIFT 6
304#define RCTL_RDMTS_MASK UINT32_C(0x00000300)
305#define RCTL_RDMTS_SHIFT 8
306#define RCTL_LBM_TCVR UINT32_C(3) /**< PHY or external SerDes loopback. */
307#define RCTL_MO_MASK UINT32_C(0x00003000)
308#define RCTL_MO_SHIFT 12
309#define RCTL_BAM UINT32_C(0x00008000)
310#define RCTL_BSIZE_MASK UINT32_C(0x00030000)
311#define RCTL_BSIZE_SHIFT 16
312#define RCTL_VFE UINT32_C(0x00040000)
313#define RCTL_CFIEN UINT32_C(0x00080000)
314#define RCTL_CFI UINT32_C(0x00100000)
315#define RCTL_BSEX UINT32_C(0x02000000)
316#define RCTL_SECRC UINT32_C(0x04000000)
317
318#define ICR_TXDW UINT32_C(0x00000001)
319#define ICR_TXQE UINT32_C(0x00000002)
320#define ICR_LSC UINT32_C(0x00000004)
321#define ICR_RXDMT0 UINT32_C(0x00000010)
322#define ICR_RXT0 UINT32_C(0x00000080)
323#define ICR_TXD_LOW UINT32_C(0x00008000)
324#define RDTR_FPD UINT32_C(0x80000000)
325
326#define PBA_st ((PBAST*)(pThis->auRegs + PBA_IDX))
327typedef struct
328{
329 unsigned rxa : 7;
330 unsigned rxa_r : 9;
331 unsigned txa : 16;
332} PBAST;
333AssertCompileSize(PBAST, 4);
334
335#define TXDCTL_WTHRESH_MASK 0x003F0000
336#define TXDCTL_WTHRESH_SHIFT 16
337#define TXDCTL_LWTHRESH_MASK 0xFE000000
338#define TXDCTL_LWTHRESH_SHIFT 25
339
340#define RXCSUM_PCSS_MASK UINT32_C(0x000000FF)
341#define RXCSUM_PCSS_SHIFT 0
342
343/** @name Register access macros
344 * @remarks These ASSUME alocal variable @a pThis of type PE1KSTATE.
345 * @{ */
346#define CTRL pThis->auRegs[CTRL_IDX]
347#define STATUS pThis->auRegs[STATUS_IDX]
348#define EECD pThis->auRegs[EECD_IDX]
349#define EERD pThis->auRegs[EERD_IDX]
350#define CTRL_EXT pThis->auRegs[CTRL_EXT_IDX]
351#define FLA pThis->auRegs[FLA_IDX]
352#define MDIC pThis->auRegs[MDIC_IDX]
353#define FCAL pThis->auRegs[FCAL_IDX]
354#define FCAH pThis->auRegs[FCAH_IDX]
355#define FCT pThis->auRegs[FCT_IDX]
356#define VET pThis->auRegs[VET_IDX]
357#define ICR pThis->auRegs[ICR_IDX]
358#define ITR pThis->auRegs[ITR_IDX]
359#define ICS pThis->auRegs[ICS_IDX]
360#define IMS pThis->auRegs[IMS_IDX]
361#define IMC pThis->auRegs[IMC_IDX]
362#define RCTL pThis->auRegs[RCTL_IDX]
363#define FCTTV pThis->auRegs[FCTTV_IDX]
364#define TXCW pThis->auRegs[TXCW_IDX]
365#define RXCW pThis->auRegs[RXCW_IDX]
366#define TCTL pThis->auRegs[TCTL_IDX]
367#define TIPG pThis->auRegs[TIPG_IDX]
368#define AIFS pThis->auRegs[AIFS_IDX]
369#define LEDCTL pThis->auRegs[LEDCTL_IDX]
370#define PBA pThis->auRegs[PBA_IDX]
371#define FCRTL pThis->auRegs[FCRTL_IDX]
372#define FCRTH pThis->auRegs[FCRTH_IDX]
373#define RDFH pThis->auRegs[RDFH_IDX]
374#define RDFT pThis->auRegs[RDFT_IDX]
375#define RDFHS pThis->auRegs[RDFHS_IDX]
376#define RDFTS pThis->auRegs[RDFTS_IDX]
377#define RDFPC pThis->auRegs[RDFPC_IDX]
378#define RDBAL pThis->auRegs[RDBAL_IDX]
379#define RDBAH pThis->auRegs[RDBAH_IDX]
380#define RDLEN pThis->auRegs[RDLEN_IDX]
381#define RDH pThis->auRegs[RDH_IDX]
382#define RDT pThis->auRegs[RDT_IDX]
383#define RDTR pThis->auRegs[RDTR_IDX]
384#define RXDCTL pThis->auRegs[RXDCTL_IDX]
385#define RADV pThis->auRegs[RADV_IDX]
386#define RSRPD pThis->auRegs[RSRPD_IDX]
387#define TXDMAC pThis->auRegs[TXDMAC_IDX]
388#define TDFH pThis->auRegs[TDFH_IDX]
389#define TDFT pThis->auRegs[TDFT_IDX]
390#define TDFHS pThis->auRegs[TDFHS_IDX]
391#define TDFTS pThis->auRegs[TDFTS_IDX]
392#define TDFPC pThis->auRegs[TDFPC_IDX]
393#define TDBAL pThis->auRegs[TDBAL_IDX]
394#define TDBAH pThis->auRegs[TDBAH_IDX]
395#define TDLEN pThis->auRegs[TDLEN_IDX]
396#define TDH pThis->auRegs[TDH_IDX]
397#define TDT pThis->auRegs[TDT_IDX]
398#define TIDV pThis->auRegs[TIDV_IDX]
399#define TXDCTL pThis->auRegs[TXDCTL_IDX]
400#define TADV pThis->auRegs[TADV_IDX]
401#define TSPMT pThis->auRegs[TSPMT_IDX]
402#define CRCERRS pThis->auRegs[CRCERRS_IDX]
403#define ALGNERRC pThis->auRegs[ALGNERRC_IDX]
404#define SYMERRS pThis->auRegs[SYMERRS_IDX]
405#define RXERRC pThis->auRegs[RXERRC_IDX]
406#define MPC pThis->auRegs[MPC_IDX]
407#define SCC pThis->auRegs[SCC_IDX]
408#define ECOL pThis->auRegs[ECOL_IDX]
409#define MCC pThis->auRegs[MCC_IDX]
410#define LATECOL pThis->auRegs[LATECOL_IDX]
411#define COLC pThis->auRegs[COLC_IDX]
412#define DC pThis->auRegs[DC_IDX]
413#define TNCRS pThis->auRegs[TNCRS_IDX]
414/* #define SEC pThis->auRegs[SEC_IDX] Conflict with sys/time.h */
415#define CEXTERR pThis->auRegs[CEXTERR_IDX]
416#define RLEC pThis->auRegs[RLEC_IDX]
417#define XONRXC pThis->auRegs[XONRXC_IDX]
418#define XONTXC pThis->auRegs[XONTXC_IDX]
419#define XOFFRXC pThis->auRegs[XOFFRXC_IDX]
420#define XOFFTXC pThis->auRegs[XOFFTXC_IDX]
421#define FCRUC pThis->auRegs[FCRUC_IDX]
422#define PRC64 pThis->auRegs[PRC64_IDX]
423#define PRC127 pThis->auRegs[PRC127_IDX]
424#define PRC255 pThis->auRegs[PRC255_IDX]
425#define PRC511 pThis->auRegs[PRC511_IDX]
426#define PRC1023 pThis->auRegs[PRC1023_IDX]
427#define PRC1522 pThis->auRegs[PRC1522_IDX]
428#define GPRC pThis->auRegs[GPRC_IDX]
429#define BPRC pThis->auRegs[BPRC_IDX]
430#define MPRC pThis->auRegs[MPRC_IDX]
431#define GPTC pThis->auRegs[GPTC_IDX]
432#define GORCL pThis->auRegs[GORCL_IDX]
433#define GORCH pThis->auRegs[GORCH_IDX]
434#define GOTCL pThis->auRegs[GOTCL_IDX]
435#define GOTCH pThis->auRegs[GOTCH_IDX]
436#define RNBC pThis->auRegs[RNBC_IDX]
437#define RUC pThis->auRegs[RUC_IDX]
438#define RFC pThis->auRegs[RFC_IDX]
439#define ROC pThis->auRegs[ROC_IDX]
440#define RJC pThis->auRegs[RJC_IDX]
441#define MGTPRC pThis->auRegs[MGTPRC_IDX]
442#define MGTPDC pThis->auRegs[MGTPDC_IDX]
443#define MGTPTC pThis->auRegs[MGTPTC_IDX]
444#define TORL pThis->auRegs[TORL_IDX]
445#define TORH pThis->auRegs[TORH_IDX]
446#define TOTL pThis->auRegs[TOTL_IDX]
447#define TOTH pThis->auRegs[TOTH_IDX]
448#define TPR pThis->auRegs[TPR_IDX]
449#define TPT pThis->auRegs[TPT_IDX]
450#define PTC64 pThis->auRegs[PTC64_IDX]
451#define PTC127 pThis->auRegs[PTC127_IDX]
452#define PTC255 pThis->auRegs[PTC255_IDX]
453#define PTC511 pThis->auRegs[PTC511_IDX]
454#define PTC1023 pThis->auRegs[PTC1023_IDX]
455#define PTC1522 pThis->auRegs[PTC1522_IDX]
456#define MPTC pThis->auRegs[MPTC_IDX]
457#define BPTC pThis->auRegs[BPTC_IDX]
458#define TSCTC pThis->auRegs[TSCTC_IDX]
459#define TSCTFC pThis->auRegs[TSCTFC_IDX]
460#define RXCSUM pThis->auRegs[RXCSUM_IDX]
461#define WUC pThis->auRegs[WUC_IDX]
462#define WUFC pThis->auRegs[WUFC_IDX]
463#define WUS pThis->auRegs[WUS_IDX]
464#define MANC pThis->auRegs[MANC_IDX]
465#define IPAV pThis->auRegs[IPAV_IDX]
466#define WUPL pThis->auRegs[WUPL_IDX]
467/** @} */
468
469/**
470 * Indices of memory-mapped registers in register table.
471 */
472typedef enum
473{
474 CTRL_IDX,
475 STATUS_IDX,
476 EECD_IDX,
477 EERD_IDX,
478 CTRL_EXT_IDX,
479 FLA_IDX,
480 MDIC_IDX,
481 FCAL_IDX,
482 FCAH_IDX,
483 FCT_IDX,
484 VET_IDX,
485 ICR_IDX,
486 ITR_IDX,
487 ICS_IDX,
488 IMS_IDX,
489 IMC_IDX,
490 RCTL_IDX,
491 FCTTV_IDX,
492 TXCW_IDX,
493 RXCW_IDX,
494 TCTL_IDX,
495 TIPG_IDX,
496 AIFS_IDX,
497 LEDCTL_IDX,
498 PBA_IDX,
499 FCRTL_IDX,
500 FCRTH_IDX,
501 RDFH_IDX,
502 RDFT_IDX,
503 RDFHS_IDX,
504 RDFTS_IDX,
505 RDFPC_IDX,
506 RDBAL_IDX,
507 RDBAH_IDX,
508 RDLEN_IDX,
509 RDH_IDX,
510 RDT_IDX,
511 RDTR_IDX,
512 RXDCTL_IDX,
513 RADV_IDX,
514 RSRPD_IDX,
515 TXDMAC_IDX,
516 TDFH_IDX,
517 TDFT_IDX,
518 TDFHS_IDX,
519 TDFTS_IDX,
520 TDFPC_IDX,
521 TDBAL_IDX,
522 TDBAH_IDX,
523 TDLEN_IDX,
524 TDH_IDX,
525 TDT_IDX,
526 TIDV_IDX,
527 TXDCTL_IDX,
528 TADV_IDX,
529 TSPMT_IDX,
530 CRCERRS_IDX,
531 ALGNERRC_IDX,
532 SYMERRS_IDX,
533 RXERRC_IDX,
534 MPC_IDX,
535 SCC_IDX,
536 ECOL_IDX,
537 MCC_IDX,
538 LATECOL_IDX,
539 COLC_IDX,
540 DC_IDX,
541 TNCRS_IDX,
542 SEC_IDX,
543 CEXTERR_IDX,
544 RLEC_IDX,
545 XONRXC_IDX,
546 XONTXC_IDX,
547 XOFFRXC_IDX,
548 XOFFTXC_IDX,
549 FCRUC_IDX,
550 PRC64_IDX,
551 PRC127_IDX,
552 PRC255_IDX,
553 PRC511_IDX,
554 PRC1023_IDX,
555 PRC1522_IDX,
556 GPRC_IDX,
557 BPRC_IDX,
558 MPRC_IDX,
559 GPTC_IDX,
560 GORCL_IDX,
561 GORCH_IDX,
562 GOTCL_IDX,
563 GOTCH_IDX,
564 RNBC_IDX,
565 RUC_IDX,
566 RFC_IDX,
567 ROC_IDX,
568 RJC_IDX,
569 MGTPRC_IDX,
570 MGTPDC_IDX,
571 MGTPTC_IDX,
572 TORL_IDX,
573 TORH_IDX,
574 TOTL_IDX,
575 TOTH_IDX,
576 TPR_IDX,
577 TPT_IDX,
578 PTC64_IDX,
579 PTC127_IDX,
580 PTC255_IDX,
581 PTC511_IDX,
582 PTC1023_IDX,
583 PTC1522_IDX,
584 MPTC_IDX,
585 BPTC_IDX,
586 TSCTC_IDX,
587 TSCTFC_IDX,
588 RXCSUM_IDX,
589 WUC_IDX,
590 WUFC_IDX,
591 WUS_IDX,
592 MANC_IDX,
593 IPAV_IDX,
594 WUPL_IDX,
595 MTA_IDX,
596 RA_IDX,
597 VFTA_IDX,
598 IP4AT_IDX,
599 IP6AT_IDX,
600 WUPM_IDX,
601 FFLT_IDX,
602 FFMT_IDX,
603 FFVT_IDX,
604 PBM_IDX,
605 RA_82542_IDX,
606 MTA_82542_IDX,
607 VFTA_82542_IDX,
608 E1K_NUM_OF_REGS
609} E1kRegIndex;
610
611#define E1K_NUM_OF_32BIT_REGS MTA_IDX
612/** The number of registers with strictly increasing offset. */
613#define E1K_NUM_OF_BINARY_SEARCHABLE (WUPL_IDX + 1)
614
615
616/**
617 * Define E1000-specific EEPROM layout.
618 */
619struct E1kEEPROM
620{
621 public:
622 EEPROM93C46 eeprom;
623
624#ifdef IN_RING3
625 /**
626 * Initialize EEPROM content.
627 *
628 * @param macAddr MAC address of E1000.
629 */
630 void init(RTMAC &macAddr)
631 {
632 eeprom.init();
633 memcpy(eeprom.m_au16Data, macAddr.au16, sizeof(macAddr.au16));
634 eeprom.m_au16Data[0x04] = 0xFFFF;
635 /*
636 * bit 3 - full support for power management
637 * bit 10 - full duplex
638 */
639 eeprom.m_au16Data[0x0A] = 0x4408;
640 eeprom.m_au16Data[0x0B] = 0x001E;
641 eeprom.m_au16Data[0x0C] = 0x8086;
642 eeprom.m_au16Data[0x0D] = 0x100E;
643 eeprom.m_au16Data[0x0E] = 0x8086;
644 eeprom.m_au16Data[0x0F] = 0x3040;
645 eeprom.m_au16Data[0x21] = 0x7061;
646 eeprom.m_au16Data[0x22] = 0x280C;
647 eeprom.m_au16Data[0x23] = 0x00C8;
648 eeprom.m_au16Data[0x24] = 0x00C8;
649 eeprom.m_au16Data[0x2F] = 0x0602;
650 updateChecksum();
651 };
652
653 /**
654 * Compute the checksum as required by E1000 and store it
655 * in the last word.
656 */
657 void updateChecksum()
658 {
659 uint16_t u16Checksum = 0;
660
661 for (int i = 0; i < eeprom.SIZE-1; i++)
662 u16Checksum += eeprom.m_au16Data[i];
663 eeprom.m_au16Data[eeprom.SIZE-1] = 0xBABA - u16Checksum;
664 };
665
666 /**
667 * First 6 bytes of EEPROM contain MAC address.
668 *
669 * @returns MAC address of E1000.
670 */
671 void getMac(PRTMAC pMac)
672 {
673 memcpy(pMac->au16, eeprom.m_au16Data, sizeof(pMac->au16));
674 };
675
676 uint32_t read()
677 {
678 return eeprom.read();
679 }
680
681 void write(uint32_t u32Wires)
682 {
683 eeprom.write(u32Wires);
684 }
685
686 bool readWord(uint32_t u32Addr, uint16_t *pu16Value)
687 {
688 return eeprom.readWord(u32Addr, pu16Value);
689 }
690
691 int load(PSSMHANDLE pSSM)
692 {
693 return eeprom.load(pSSM);
694 }
695
696 void save(PSSMHANDLE pSSM)
697 {
698 eeprom.save(pSSM);
699 }
700#endif /* IN_RING3 */
701};
702
703
704#define E1K_SPEC_VLAN(s) (s & 0xFFF)
705#define E1K_SPEC_CFI(s) (!!((s>>12) & 0x1))
706#define E1K_SPEC_PRI(s) ((s>>13) & 0x7)
707
708struct E1kRxDStatus
709{
710 /** @name Descriptor Status field (3.2.3.1)
711 * @{ */
712 unsigned fDD : 1; /**< Descriptor Done. */
713 unsigned fEOP : 1; /**< End of packet. */
714 unsigned fIXSM : 1; /**< Ignore checksum indication. */
715 unsigned fVP : 1; /**< VLAN, matches VET. */
716 unsigned : 1;
717 unsigned fTCPCS : 1; /**< RCP Checksum calculated on the packet. */
718 unsigned fIPCS : 1; /**< IP Checksum calculated on the packet. */
719 unsigned fPIF : 1; /**< Passed in-exact filter */
720 /** @} */
721 /** @name Descriptor Errors field (3.2.3.2)
722 * (Only valid when fEOP and fDD are set.)
723 * @{ */
724 unsigned fCE : 1; /**< CRC or alignment error. */
725 unsigned : 4; /**< Reserved, varies with different models... */
726 unsigned fTCPE : 1; /**< TCP/UDP checksum error. */
727 unsigned fIPE : 1; /**< IP Checksum error. */
728 unsigned fRXE : 1; /**< RX Data error. */
729 /** @} */
730 /** @name Descriptor Special field (3.2.3.3)
731 * @{ */
732 unsigned u16Special : 16; /**< VLAN: Id, Canonical form, Priority. */
733 /** @} */
734};
735typedef struct E1kRxDStatus E1KRXDST;
736
737struct E1kRxDesc_st
738{
739 uint64_t u64BufAddr; /**< Address of data buffer */
740 uint16_t u16Length; /**< Length of data in buffer */
741 uint16_t u16Checksum; /**< Packet checksum */
742 E1KRXDST status;
743};
744typedef struct E1kRxDesc_st E1KRXDESC;
745AssertCompileSize(E1KRXDESC, 16);
746
747#define E1K_DTYP_LEGACY -1
748#define E1K_DTYP_CONTEXT 0
749#define E1K_DTYP_DATA 1
750
751struct E1kTDLegacy
752{
753 uint64_t u64BufAddr; /**< Address of data buffer */
754 struct TDLCmd_st
755 {
756 unsigned u16Length : 16;
757 unsigned u8CSO : 8;
758 /* CMD field : 8 */
759 unsigned fEOP : 1;
760 unsigned fIFCS : 1;
761 unsigned fIC : 1;
762 unsigned fRS : 1;
763 unsigned fRPS : 1;
764 unsigned fDEXT : 1;
765 unsigned fVLE : 1;
766 unsigned fIDE : 1;
767 } cmd;
768 struct TDLDw3_st
769 {
770 /* STA field */
771 unsigned fDD : 1;
772 unsigned fEC : 1;
773 unsigned fLC : 1;
774 unsigned fTURSV : 1;
775 /* RSV field */
776 unsigned u4RSV : 4;
777 /* CSS field */
778 unsigned u8CSS : 8;
779 /* Special field*/
780 unsigned u16Special: 16;
781 } dw3;
782};
783
784/**
785 * TCP/IP Context Transmit Descriptor, section 3.3.6.
786 */
787struct E1kTDContext
788{
789 struct CheckSum_st
790 {
791 /** TSE: Header start. !TSE: Checksum start. */
792 unsigned u8CSS : 8;
793 /** Checksum offset - where to store it. */
794 unsigned u8CSO : 8;
795 /** Checksum ending (inclusive) offset, 0 = end of packet. */
796 unsigned u16CSE : 16;
797 } ip;
798 struct CheckSum_st tu;
799 struct TDCDw2_st
800 {
801 /** TSE: The total number of payload bytes for this context. Sans header. */
802 unsigned u20PAYLEN : 20;
803 /** The descriptor type - E1K_DTYP_CONTEXT (0). */
804 unsigned u4DTYP : 4;
805 /** TUCMD field, 8 bits
806 * @{ */
807 /** TSE: TCP (set) or UDP (clear). */
808 unsigned fTCP : 1;
809 /** TSE: IPv4 (set) or IPv6 (clear) - for finding the payload length field in
810 * the IP header. Does not affect the checksumming.
811 * @remarks 82544GC/EI interprets a cleared field differently. */
812 unsigned fIP : 1;
813 /** TSE: TCP segmentation enable. When clear the context describes */
814 unsigned fTSE : 1;
815 /** Report status (only applies to dw3.fDD for here). */
816 unsigned fRS : 1;
817 /** Reserved, MBZ. */
818 unsigned fRSV1 : 1;
819 /** Descriptor extension, must be set for this descriptor type. */
820 unsigned fDEXT : 1;
821 /** Reserved, MBZ. */
822 unsigned fRSV2 : 1;
823 /** Interrupt delay enable. */
824 unsigned fIDE : 1;
825 /** @} */
826 } dw2;
827 struct TDCDw3_st
828 {
829 /** Descriptor Done. */
830 unsigned fDD : 1;
831 /** Reserved, MBZ. */
832 unsigned u7RSV : 7;
833 /** TSO: The header (prototype) length (Ethernet[, VLAN tag], IP, TCP/UDP. */
834 unsigned u8HDRLEN : 8;
835 /** TSO: Maximum segment size. */
836 unsigned u16MSS : 16;
837 } dw3;
838};
839typedef struct E1kTDContext E1KTXCTX;
840
841/**
842 * TCP/IP Data Transmit Descriptor, section 3.3.7.
843 */
844struct E1kTDData
845{
846 uint64_t u64BufAddr; /**< Address of data buffer */
847 struct TDDCmd_st
848 {
849 /** The total length of data pointed to by this descriptor. */
850 unsigned u20DTALEN : 20;
851 /** The descriptor type - E1K_DTYP_DATA (1). */
852 unsigned u4DTYP : 4;
853 /** @name DCMD field, 8 bits (3.3.7.1).
854 * @{ */
855 /** End of packet. Note TSCTFC update. */
856 unsigned fEOP : 1;
857 /** Insert Ethernet FCS/CRC (requires fEOP to be set). */
858 unsigned fIFCS : 1;
859 /** Use the TSE context when set and the normal when clear. */
860 unsigned fTSE : 1;
861 /** Report status (dw3.STA). */
862 unsigned fRS : 1;
863 /** Reserved. 82544GC/EI defines this report packet set (RPS). */
864 unsigned fRPS : 1;
865 /** Descriptor extension, must be set for this descriptor type. */
866 unsigned fDEXT : 1;
867 /** VLAN enable, requires CTRL.VME, auto enables FCS/CRC.
868 * Insert dw3.SPECIAL after ethernet header. */
869 unsigned fVLE : 1;
870 /** Interrupt delay enable. */
871 unsigned fIDE : 1;
872 /** @} */
873 } cmd;
874 struct TDDDw3_st
875 {
876 /** @name STA field (3.3.7.2)
877 * @{ */
878 unsigned fDD : 1; /**< Descriptor done. */
879 unsigned fEC : 1; /**< Excess collision. */
880 unsigned fLC : 1; /**< Late collision. */
881 /** Reserved, except for the usual oddball (82544GC/EI) where it's called TU. */
882 unsigned fTURSV : 1;
883 /** @} */
884 unsigned u4RSV : 4; /**< Reserved field, MBZ. */
885 /** @name POPTS (Packet Option) field (3.3.7.3)
886 * @{ */
887 unsigned fIXSM : 1; /**< Insert IP checksum. */
888 unsigned fTXSM : 1; /**< Insert TCP/UDP checksum. */
889 unsigned u6RSV : 6; /**< Reserved, MBZ. */
890 /** @} */
891 /** @name SPECIAL field - VLAN tag to be inserted after ethernet header.
892 * Requires fEOP, fVLE and CTRL.VME to be set.
893 * @{ */
894 unsigned u16Special: 16; /**< VLAN: Id, Canonical form, Priority. */
895 /** @} */
896 } dw3;
897};
898typedef struct E1kTDData E1KTXDAT;
899
900union E1kTxDesc
901{
902 struct E1kTDLegacy legacy;
903 struct E1kTDContext context;
904 struct E1kTDData data;
905};
906typedef union E1kTxDesc E1KTXDESC;
907AssertCompileSize(E1KTXDESC, 16);
908
909#define RA_CTL_AS 0x0003
910#define RA_CTL_AV 0x8000
911
912union E1kRecAddr
913{
914 uint32_t au32[32];
915 struct RAArray
916 {
917 uint8_t addr[6];
918 uint16_t ctl;
919 } array[16];
920};
921typedef struct E1kRecAddr::RAArray E1KRAELEM;
922typedef union E1kRecAddr E1KRA;
923AssertCompileSize(E1KRA, 8*16);
924
925#define E1K_IP_RF UINT16_C(0x8000) /**< reserved fragment flag */
926#define E1K_IP_DF UINT16_C(0x4000) /**< dont fragment flag */
927#define E1K_IP_MF UINT16_C(0x2000) /**< more fragments flag */
928#define E1K_IP_OFFMASK UINT16_C(0x1fff) /**< mask for fragmenting bits */
929
930/** @todo use+extend RTNETIPV4 */
931struct E1kIpHeader
932{
933 /* type of service / version / header length */
934 uint16_t tos_ver_hl;
935 /* total length */
936 uint16_t total_len;
937 /* identification */
938 uint16_t ident;
939 /* fragment offset field */
940 uint16_t offset;
941 /* time to live / protocol*/
942 uint16_t ttl_proto;
943 /* checksum */
944 uint16_t chksum;
945 /* source IP address */
946 uint32_t src;
947 /* destination IP address */
948 uint32_t dest;
949};
950AssertCompileSize(struct E1kIpHeader, 20);
951
952#define E1K_TCP_FIN UINT16_C(0x01)
953#define E1K_TCP_SYN UINT16_C(0x02)
954#define E1K_TCP_RST UINT16_C(0x04)
955#define E1K_TCP_PSH UINT16_C(0x08)
956#define E1K_TCP_ACK UINT16_C(0x10)
957#define E1K_TCP_URG UINT16_C(0x20)
958#define E1K_TCP_ECE UINT16_C(0x40)
959#define E1K_TCP_CWR UINT16_C(0x80)
960#define E1K_TCP_FLAGS UINT16_C(0x3f)
961
962/** @todo use+extend RTNETTCP */
963struct E1kTcpHeader
964{
965 uint16_t src;
966 uint16_t dest;
967 uint32_t seqno;
968 uint32_t ackno;
969 uint16_t hdrlen_flags;
970 uint16_t wnd;
971 uint16_t chksum;
972 uint16_t urgp;
973};
974AssertCompileSize(struct E1kTcpHeader, 20);
975
976
977#ifdef E1K_WITH_TXD_CACHE
978/** The current Saved state version. */
979# define E1K_SAVEDSTATE_VERSION 4
980/** Saved state version for VirtualBox 4.2 with VLAN tag fields. */
981# define E1K_SAVEDSTATE_VERSION_VBOX_42_VTAG 3
982#else /* !E1K_WITH_TXD_CACHE */
983/** The current Saved state version. */
984# define E1K_SAVEDSTATE_VERSION 3
985#endif /* !E1K_WITH_TXD_CACHE */
986/** Saved state version for VirtualBox 4.1 and earlier.
987 * These did not include VLAN tag fields. */
988#define E1K_SAVEDSTATE_VERSION_VBOX_41 2
989/** Saved state version for VirtualBox 3.0 and earlier.
990 * This did not include the configuration part nor the E1kEEPROM. */
991#define E1K_SAVEDSTATE_VERSION_VBOX_30 1
992
993/**
994 * Device state structure.
995 *
996 * Holds the current state of device.
997 *
998 * @implements PDMINETWORKDOWN
999 * @implements PDMINETWORKCONFIG
1000 * @implements PDMILEDPORTS
1001 */
1002struct E1kState_st
1003{
1004 char szPrf[8]; /**< Log prefix, e.g. E1000#1. */
1005 PDMIBASE IBase;
1006 PDMINETWORKDOWN INetworkDown;
1007 PDMINETWORKCONFIG INetworkConfig;
1008 PDMILEDPORTS ILeds; /**< LED interface */
1009 R3PTRTYPE(PPDMIBASE) pDrvBase; /**< Attached network driver. */
1010 R3PTRTYPE(PPDMILEDCONNECTORS) pLedsConnector;
1011
1012 PPDMDEVINSR3 pDevInsR3; /**< Device instance - R3. */
1013 R3PTRTYPE(PPDMQUEUE) pTxQueueR3; /**< Transmit queue - R3. */
1014 R3PTRTYPE(PPDMQUEUE) pCanRxQueueR3; /**< Rx wakeup signaller - R3. */
1015 PPDMINETWORKUPR3 pDrvR3; /**< Attached network driver - R3. */
1016 PTMTIMERR3 pRIDTimerR3; /**< Receive Interrupt Delay Timer - R3. */
1017 PTMTIMERR3 pRADTimerR3; /**< Receive Absolute Delay Timer - R3. */
1018 PTMTIMERR3 pTIDTimerR3; /**< Transmit Interrupt Delay Timer - R3. */
1019 PTMTIMERR3 pTADTimerR3; /**< Transmit Absolute Delay Timer - R3. */
1020 PTMTIMERR3 pTXDTimerR3; /**< Transmit Delay Timer - R3. */
1021 PTMTIMERR3 pIntTimerR3; /**< Late Interrupt Timer - R3. */
1022 PTMTIMERR3 pLUTimerR3; /**< Link Up(/Restore) Timer. */
1023 /** The scatter / gather buffer used for the current outgoing packet - R3. */
1024 R3PTRTYPE(PPDMSCATTERGATHER) pTxSgR3;
1025
1026 PPDMDEVINSR0 pDevInsR0; /**< Device instance - R0. */
1027 R0PTRTYPE(PPDMQUEUE) pTxQueueR0; /**< Transmit queue - R0. */
1028 R0PTRTYPE(PPDMQUEUE) pCanRxQueueR0; /**< Rx wakeup signaller - R0. */
1029 PPDMINETWORKUPR0 pDrvR0; /**< Attached network driver - R0. */
1030 PTMTIMERR0 pRIDTimerR0; /**< Receive Interrupt Delay Timer - R0. */
1031 PTMTIMERR0 pRADTimerR0; /**< Receive Absolute Delay Timer - R0. */
1032 PTMTIMERR0 pTIDTimerR0; /**< Transmit Interrupt Delay Timer - R0. */
1033 PTMTIMERR0 pTADTimerR0; /**< Transmit Absolute Delay Timer - R0. */
1034 PTMTIMERR0 pTXDTimerR0; /**< Transmit Delay Timer - R0. */
1035 PTMTIMERR0 pIntTimerR0; /**< Late Interrupt Timer - R0. */
1036 PTMTIMERR0 pLUTimerR0; /**< Link Up(/Restore) Timer - R0. */
1037 /** The scatter / gather buffer used for the current outgoing packet - R0. */
1038 R0PTRTYPE(PPDMSCATTERGATHER) pTxSgR0;
1039
1040 PPDMDEVINSRC pDevInsRC; /**< Device instance - RC. */
1041 RCPTRTYPE(PPDMQUEUE) pTxQueueRC; /**< Transmit queue - RC. */
1042 RCPTRTYPE(PPDMQUEUE) pCanRxQueueRC; /**< Rx wakeup signaller - RC. */
1043 PPDMINETWORKUPRC pDrvRC; /**< Attached network driver - RC. */
1044 PTMTIMERRC pRIDTimerRC; /**< Receive Interrupt Delay Timer - RC. */
1045 PTMTIMERRC pRADTimerRC; /**< Receive Absolute Delay Timer - RC. */
1046 PTMTIMERRC pTIDTimerRC; /**< Transmit Interrupt Delay Timer - RC. */
1047 PTMTIMERRC pTADTimerRC; /**< Transmit Absolute Delay Timer - RC. */
1048 PTMTIMERRC pTXDTimerRC; /**< Transmit Delay Timer - RC. */
1049 PTMTIMERRC pIntTimerRC; /**< Late Interrupt Timer - RC. */
1050 PTMTIMERRC pLUTimerRC; /**< Link Up(/Restore) Timer - RC. */
1051 /** The scatter / gather buffer used for the current outgoing packet - RC. */
1052 RCPTRTYPE(PPDMSCATTERGATHER) pTxSgRC;
1053 RTRCPTR RCPtrAlignment;
1054
1055#if HC_ARCH_BITS != 32
1056 uint32_t Alignment1;
1057#endif
1058 PDMCRITSECT cs; /**< Critical section - what is it protecting? */
1059 PDMCRITSECT csRx; /**< RX Critical section. */
1060#ifdef E1K_WITH_TX_CS
1061 PDMCRITSECT csTx; /**< TX Critical section. */
1062#endif /* E1K_WITH_TX_CS */
1063 /** Base address of memory-mapped registers. */
1064 RTGCPHYS addrMMReg;
1065 /** MAC address obtained from the configuration. */
1066 RTMAC macConfigured;
1067 /** Base port of I/O space region. */
1068 RTIOPORT IOPortBase;
1069 /** EMT: */
1070 PCIDEVICE pciDevice;
1071 /** EMT: Last time the interrupt was acknowledged. */
1072 uint64_t u64AckedAt;
1073 /** All: Used for eliminating spurious interrupts. */
1074 bool fIntRaised;
1075 /** EMT: false if the cable is disconnected by the GUI. */
1076 bool fCableConnected;
1077 /** EMT: */
1078 bool fR0Enabled;
1079 /** EMT: */
1080 bool fRCEnabled;
1081 /** EMT: Compute Ethernet CRC for RX packets. */
1082 bool fEthernetCRC;
1083 /** All: throttle interrupts. */
1084 bool fItrEnabled;
1085 /** All: throttle RX interrupts. */
1086 bool fItrRxEnabled;
1087
1088 bool Alignment2;
1089 /** Link up delay (in milliseconds). */
1090 uint32_t cMsLinkUpDelay;
1091
1092 /** All: Device register storage. */
1093 uint32_t auRegs[E1K_NUM_OF_32BIT_REGS];
1094 /** TX/RX: Status LED. */
1095 PDMLED led;
1096 /** TX/RX: Number of packet being sent/received to show in debug log. */
1097 uint32_t u32PktNo;
1098
1099 /** EMT: Offset of the register to be read via IO. */
1100 uint32_t uSelectedReg;
1101 /** EMT: Multicast Table Array. */
1102 uint32_t auMTA[128];
1103 /** EMT: Receive Address registers. */
1104 E1KRA aRecAddr;
1105 /** EMT: VLAN filter table array. */
1106 uint32_t auVFTA[128];
1107 /** EMT: Receive buffer size. */
1108 uint16_t u16RxBSize;
1109 /** EMT: Locked state -- no state alteration possible. */
1110 bool fLocked;
1111 /** EMT: */
1112 bool fDelayInts;
1113 /** All: */
1114 bool fIntMaskUsed;
1115
1116 /** N/A: */
1117 bool volatile fMaybeOutOfSpace;
1118 /** EMT: Gets signalled when more RX descriptors become available. */
1119 RTSEMEVENT hEventMoreRxDescAvail;
1120#ifdef E1K_WITH_RXD_CACHE
1121 /** RX: Fetched RX descriptors. */
1122 E1KRXDESC aRxDescriptors[E1K_RXD_CACHE_SIZE];
1123 //uint64_t aRxDescAddr[E1K_RXD_CACHE_SIZE];
1124 /** RX: Actual number of fetched RX descriptors. */
1125 uint32_t nRxDFetched;
1126 /** RX: Index in cache of RX descriptor being processed. */
1127 uint32_t iRxDCurrent;
1128#endif /* E1K_WITH_RXD_CACHE */
1129
1130 /** TX: Context used for TCP segmentation packets. */
1131 E1KTXCTX contextTSE;
1132 /** TX: Context used for ordinary packets. */
1133 E1KTXCTX contextNormal;
1134#ifdef E1K_WITH_TXD_CACHE
1135 /** TX: Fetched TX descriptors. */
1136 E1KTXDESC aTxDescriptors[E1K_TXD_CACHE_SIZE];
1137 /** TX: Actual number of fetched TX descriptors. */
1138 uint8_t nTxDFetched;
1139 /** TX: Index in cache of TX descriptor being processed. */
1140 uint8_t iTxDCurrent;
1141 /** TX: Will this frame be sent as GSO. */
1142 bool fGSO;
1143 /** Alignment padding. */
1144 bool fReserved;
1145 /** TX: Number of bytes in next packet. */
1146 uint32_t cbTxAlloc;
1147
1148#endif /* E1K_WITH_TXD_CACHE */
1149 /** GSO context. u8Type is set to PDMNETWORKGSOTYPE_INVALID when not
1150 * applicable to the current TSE mode. */
1151 PDMNETWORKGSO GsoCtx;
1152 /** Scratch space for holding the loopback / fallback scatter / gather
1153 * descriptor. */
1154 union
1155 {
1156 PDMSCATTERGATHER Sg;
1157 uint8_t padding[8 * sizeof(RTUINTPTR)];
1158 } uTxFallback;
1159 /** TX: Transmit packet buffer use for TSE fallback and loopback. */
1160 uint8_t aTxPacketFallback[E1K_MAX_TX_PKT_SIZE];
1161 /** TX: Number of bytes assembled in TX packet buffer. */
1162 uint16_t u16TxPktLen;
1163 /** TX: False will force segmentation in e1000 instead of sending frames as GSO. */
1164 bool fGSOEnabled;
1165 /** TX: IP checksum has to be inserted if true. */
1166 bool fIPcsum;
1167 /** TX: TCP/UDP checksum has to be inserted if true. */
1168 bool fTCPcsum;
1169 /** TX: VLAN tag has to be inserted if true. */
1170 bool fVTag;
1171 /** TX: TCI part of VLAN tag to be inserted. */
1172 uint16_t u16VTagTCI;
1173 /** TX TSE fallback: Number of payload bytes remaining in TSE context. */
1174 uint32_t u32PayRemain;
1175 /** TX TSE fallback: Number of header bytes remaining in TSE context. */
1176 uint16_t u16HdrRemain;
1177 /** TX TSE fallback: Flags from template header. */
1178 uint16_t u16SavedFlags;
1179 /** TX TSE fallback: Partial checksum from template header. */
1180 uint32_t u32SavedCsum;
1181 /** ?: Emulated controller type. */
1182 E1KCHIP eChip;
1183
1184 /** EMT: EEPROM emulation */
1185 E1kEEPROM eeprom;
1186 /** EMT: Physical interface emulation. */
1187 PHY phy;
1188
1189#if 0
1190 /** Alignment padding. */
1191 uint8_t Alignment[HC_ARCH_BITS == 64 ? 8 : 4];
1192#endif
1193
1194 STAMCOUNTER StatReceiveBytes;
1195 STAMCOUNTER StatTransmitBytes;
1196#if defined(VBOX_WITH_STATISTICS)
1197 STAMPROFILEADV StatMMIOReadRZ;
1198 STAMPROFILEADV StatMMIOReadR3;
1199 STAMPROFILEADV StatMMIOWriteRZ;
1200 STAMPROFILEADV StatMMIOWriteR3;
1201 STAMPROFILEADV StatEEPROMRead;
1202 STAMPROFILEADV StatEEPROMWrite;
1203 STAMPROFILEADV StatIOReadRZ;
1204 STAMPROFILEADV StatIOReadR3;
1205 STAMPROFILEADV StatIOWriteRZ;
1206 STAMPROFILEADV StatIOWriteR3;
1207 STAMPROFILEADV StatLateIntTimer;
1208 STAMCOUNTER StatLateInts;
1209 STAMCOUNTER StatIntsRaised;
1210 STAMCOUNTER StatIntsPrevented;
1211 STAMPROFILEADV StatReceive;
1212 STAMPROFILEADV StatReceiveCRC;
1213 STAMPROFILEADV StatReceiveFilter;
1214 STAMPROFILEADV StatReceiveStore;
1215 STAMPROFILEADV StatTransmitRZ;
1216 STAMPROFILEADV StatTransmitR3;
1217 STAMPROFILE StatTransmitSendRZ;
1218 STAMPROFILE StatTransmitSendR3;
1219 STAMPROFILE StatRxOverflow;
1220 STAMCOUNTER StatRxOverflowWakeup;
1221 STAMCOUNTER StatTxDescCtxNormal;
1222 STAMCOUNTER StatTxDescCtxTSE;
1223 STAMCOUNTER StatTxDescLegacy;
1224 STAMCOUNTER StatTxDescData;
1225 STAMCOUNTER StatTxDescTSEData;
1226 STAMCOUNTER StatTxPathFallback;
1227 STAMCOUNTER StatTxPathGSO;
1228 STAMCOUNTER StatTxPathRegular;
1229 STAMCOUNTER StatPHYAccesses;
1230 STAMCOUNTER aStatRegWrites[E1K_NUM_OF_REGS];
1231 STAMCOUNTER aStatRegReads[E1K_NUM_OF_REGS];
1232#endif /* VBOX_WITH_STATISTICS */
1233
1234#ifdef E1K_INT_STATS
1235 /* Internal stats */
1236 uint64_t u64ArmedAt;
1237 uint64_t uStatMaxTxDelay;
1238 uint32_t uStatInt;
1239 uint32_t uStatIntTry;
1240 uint32_t uStatIntLower;
1241 uint32_t uStatIntDly;
1242 int32_t iStatIntLost;
1243 int32_t iStatIntLostOne;
1244 uint32_t uStatDisDly;
1245 uint32_t uStatIntSkip;
1246 uint32_t uStatIntLate;
1247 uint32_t uStatIntMasked;
1248 uint32_t uStatIntEarly;
1249 uint32_t uStatIntRx;
1250 uint32_t uStatIntTx;
1251 uint32_t uStatIntICS;
1252 uint32_t uStatIntRDTR;
1253 uint32_t uStatIntRXDMT0;
1254 uint32_t uStatIntTXQE;
1255 uint32_t uStatTxNoRS;
1256 uint32_t uStatTxIDE;
1257 uint32_t uStatTxDelayed;
1258 uint32_t uStatTxDelayExp;
1259 uint32_t uStatTAD;
1260 uint32_t uStatTID;
1261 uint32_t uStatRAD;
1262 uint32_t uStatRID;
1263 uint32_t uStatRxFrm;
1264 uint32_t uStatTxFrm;
1265 uint32_t uStatDescCtx;
1266 uint32_t uStatDescDat;
1267 uint32_t uStatDescLeg;
1268 uint32_t uStatTx1514;
1269 uint32_t uStatTx2962;
1270 uint32_t uStatTx4410;
1271 uint32_t uStatTx5858;
1272 uint32_t uStatTx7306;
1273 uint32_t uStatTx8754;
1274 uint32_t uStatTx16384;
1275 uint32_t uStatTx32768;
1276 uint32_t uStatTxLarge;
1277 uint32_t uStatAlign;
1278#endif /* E1K_INT_STATS */
1279};
1280typedef struct E1kState_st E1KSTATE;
1281/** Pointer to the E1000 device state. */
1282typedef E1KSTATE *PE1KSTATE;
1283
1284#ifndef VBOX_DEVICE_STRUCT_TESTCASE
1285
1286/* Forward declarations ******************************************************/
1287static int e1kXmitPending(PE1KSTATE pThis, bool fOnWorkerThread);
1288
1289static int e1kRegReadUnimplemented (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1290static int e1kRegWriteUnimplemented(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1291static int e1kRegReadAutoClear (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1292static int e1kRegReadDefault (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1293static int e1kRegWriteDefault (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1294#if 0 /* unused */
1295static int e1kRegReadCTRL (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1296#endif
1297static int e1kRegWriteCTRL (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1298static int e1kRegReadEECD (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1299static int e1kRegWriteEECD (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1300static int e1kRegWriteEERD (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1301static int e1kRegWriteMDIC (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1302static int e1kRegReadICR (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1303static int e1kRegWriteICR (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1304static int e1kRegWriteICS (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1305static int e1kRegWriteIMS (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1306static int e1kRegWriteIMC (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1307static int e1kRegWriteRCTL (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1308static int e1kRegWritePBA (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1309static int e1kRegWriteRDT (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1310static int e1kRegWriteRDTR (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1311static int e1kRegWriteTDT (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1312static int e1kRegReadMTA (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1313static int e1kRegWriteMTA (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1314static int e1kRegReadRA (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1315static int e1kRegWriteRA (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1316static int e1kRegReadVFTA (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1317static int e1kRegWriteVFTA (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1318
1319/**
1320 * Register map table.
1321 *
1322 * Override pfnRead and pfnWrite to get register-specific behavior.
1323 */
1324static const struct E1kRegMap_st
1325{
1326 /** Register offset in the register space. */
1327 uint32_t offset;
1328 /** Size in bytes. Registers of size > 4 are in fact tables. */
1329 uint32_t size;
1330 /** Readable bits. */
1331 uint32_t readable;
1332 /** Writable bits. */
1333 uint32_t writable;
1334 /** Read callback. */
1335 int (*pfnRead)(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1336 /** Write callback. */
1337 int (*pfnWrite)(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1338 /** Abbreviated name. */
1339 const char *abbrev;
1340 /** Full name. */
1341 const char *name;
1342} g_aE1kRegMap[E1K_NUM_OF_REGS] =
1343{
1344 /* offset size read mask write mask read callback write callback abbrev full name */
1345 /*------- ------- ---------- ---------- ----------------------- ------------------------ ---------- ------------------------------*/
1346 { 0x00000, 0x00004, 0xDBF31BE9, 0xDBF31BE9, e1kRegReadDefault , e1kRegWriteCTRL , "CTRL" , "Device Control" },
1347 { 0x00008, 0x00004, 0x0000FDFF, 0x00000000, e1kRegReadDefault , e1kRegWriteUnimplemented, "STATUS" , "Device Status" },
1348 { 0x00010, 0x00004, 0x000027F0, 0x00000070, e1kRegReadEECD , e1kRegWriteEECD , "EECD" , "EEPROM/Flash Control/Data" },
1349 { 0x00014, 0x00004, 0xFFFFFF10, 0xFFFFFF00, e1kRegReadDefault , e1kRegWriteEERD , "EERD" , "EEPROM Read" },
1350 { 0x00018, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "CTRL_EXT", "Extended Device Control" },
1351 { 0x0001c, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FLA" , "Flash Access (N/A)" },
1352 { 0x00020, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteMDIC , "MDIC" , "MDI Control" },
1353 { 0x00028, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCAL" , "Flow Control Address Low" },
1354 { 0x0002c, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCAH" , "Flow Control Address High" },
1355 { 0x00030, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCT" , "Flow Control Type" },
1356 { 0x00038, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "VET" , "VLAN EtherType" },
1357 { 0x000c0, 0x00004, 0x0001F6DF, 0x0001F6DF, e1kRegReadICR , e1kRegWriteICR , "ICR" , "Interrupt Cause Read" },
1358 { 0x000c4, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "ITR" , "Interrupt Throttling" },
1359 { 0x000c8, 0x00004, 0x00000000, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteICS , "ICS" , "Interrupt Cause Set" },
1360 { 0x000d0, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteIMS , "IMS" , "Interrupt Mask Set/Read" },
1361 { 0x000d8, 0x00004, 0x00000000, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteIMC , "IMC" , "Interrupt Mask Clear" },
1362 { 0x00100, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteRCTL , "RCTL" , "Receive Control" },
1363 { 0x00170, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCTTV" , "Flow Control Transmit Timer Value" },
1364 { 0x00178, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TXCW" , "Transmit Configuration Word (N/A)" },
1365 { 0x00180, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RXCW" , "Receive Configuration Word (N/A)" },
1366 { 0x00400, 0x00004, 0x017FFFFA, 0x017FFFFA, e1kRegReadDefault , e1kRegWriteDefault , "TCTL" , "Transmit Control" },
1367 { 0x00410, 0x00004, 0x3FFFFFFF, 0x3FFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TIPG" , "Transmit IPG" },
1368 { 0x00458, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "AIFS" , "Adaptive IFS Throttle - AIT" },
1369 { 0x00e00, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "LEDCTL" , "LED Control" },
1370 { 0x01000, 0x00004, 0xFFFF007F, 0x0000007F, e1kRegReadDefault , e1kRegWritePBA , "PBA" , "Packet Buffer Allocation" },
1371 { 0x02160, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCRTL" , "Flow Control Receive Threshold Low" },
1372 { 0x02168, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCRTH" , "Flow Control Receive Threshold High" },
1373 { 0x02410, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFH" , "Receive Data FIFO Head" },
1374 { 0x02418, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFT" , "Receive Data FIFO Tail" },
1375 { 0x02420, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFHS" , "Receive Data FIFO Head Saved Register" },
1376 { 0x02428, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFTS" , "Receive Data FIFO Tail Saved Register" },
1377 { 0x02430, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFPC" , "Receive Data FIFO Packet Count" },
1378 { 0x02800, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDBAL" , "Receive Descriptor Base Low" },
1379 { 0x02804, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDBAH" , "Receive Descriptor Base High" },
1380 { 0x02808, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDLEN" , "Receive Descriptor Length" },
1381 { 0x02810, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDH" , "Receive Descriptor Head" },
1382 { 0x02818, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteRDT , "RDT" , "Receive Descriptor Tail" },
1383 { 0x02820, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteRDTR , "RDTR" , "Receive Delay Timer" },
1384 { 0x02828, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RXDCTL" , "Receive Descriptor Control" },
1385 { 0x0282c, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "RADV" , "Receive Interrupt Absolute Delay Timer" },
1386 { 0x02c00, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RSRPD" , "Receive Small Packet Detect Interrupt" },
1387 { 0x03000, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TXDMAC" , "TX DMA Control (N/A)" },
1388 { 0x03410, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFH" , "Transmit Data FIFO Head" },
1389 { 0x03418, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFT" , "Transmit Data FIFO Tail" },
1390 { 0x03420, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFHS" , "Transmit Data FIFO Head Saved Register" },
1391 { 0x03428, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFTS" , "Transmit Data FIFO Tail Saved Register" },
1392 { 0x03430, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFPC" , "Transmit Data FIFO Packet Count" },
1393 { 0x03800, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDBAL" , "Transmit Descriptor Base Low" },
1394 { 0x03804, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDBAH" , "Transmit Descriptor Base High" },
1395 { 0x03808, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDLEN" , "Transmit Descriptor Length" },
1396 { 0x03810, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDH" , "Transmit Descriptor Head" },
1397 { 0x03818, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteTDT , "TDT" , "Transmit Descriptor Tail" },
1398 { 0x03820, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "TIDV" , "Transmit Interrupt Delay Value" },
1399 { 0x03828, 0x00004, 0xFF3F3F3F, 0xFF3F3F3F, e1kRegReadDefault , e1kRegWriteDefault , "TXDCTL" , "Transmit Descriptor Control" },
1400 { 0x0382c, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "TADV" , "Transmit Absolute Interrupt Delay Timer" },
1401 { 0x03830, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TSPMT" , "TCP Segmentation Pad and Threshold" },
1402 { 0x04000, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "CRCERRS" , "CRC Error Count" },
1403 { 0x04004, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "ALGNERRC", "Alignment Error Count" },
1404 { 0x04008, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "SYMERRS" , "Symbol Error Count" },
1405 { 0x0400c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RXERRC" , "RX Error Count" },
1406 { 0x04010, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MPC" , "Missed Packets Count" },
1407 { 0x04014, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "SCC" , "Single Collision Count" },
1408 { 0x04018, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "ECOL" , "Excessive Collisions Count" },
1409 { 0x0401c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MCC" , "Multiple Collision Count" },
1410 { 0x04020, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "LATECOL" , "Late Collisions Count" },
1411 { 0x04028, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "COLC" , "Collision Count" },
1412 { 0x04030, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "DC" , "Defer Count" },
1413 { 0x04034, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TNCRS" , "Transmit - No CRS" },
1414 { 0x04038, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "SEC" , "Sequence Error Count" },
1415 { 0x0403c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "CEXTERR" , "Carrier Extension Error Count" },
1416 { 0x04040, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RLEC" , "Receive Length Error Count" },
1417 { 0x04048, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XONRXC" , "XON Received Count" },
1418 { 0x0404c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XONTXC" , "XON Transmitted Count" },
1419 { 0x04050, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XOFFRXC" , "XOFF Received Count" },
1420 { 0x04054, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XOFFTXC" , "XOFF Transmitted Count" },
1421 { 0x04058, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCRUC" , "FC Received Unsupported Count" },
1422 { 0x0405c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC64" , "Packets Received (64 Bytes) Count" },
1423 { 0x04060, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC127" , "Packets Received (65-127 Bytes) Count" },
1424 { 0x04064, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC255" , "Packets Received (128-255 Bytes) Count" },
1425 { 0x04068, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC511" , "Packets Received (256-511 Bytes) Count" },
1426 { 0x0406c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC1023" , "Packets Received (512-1023 Bytes) Count" },
1427 { 0x04070, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC1522" , "Packets Received (1024-Max Bytes)" },
1428 { 0x04074, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GPRC" , "Good Packets Received Count" },
1429 { 0x04078, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "BPRC" , "Broadcast Packets Received Count" },
1430 { 0x0407c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "MPRC" , "Multicast Packets Received Count" },
1431 { 0x04080, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GPTC" , "Good Packets Transmitted Count" },
1432 { 0x04088, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GORCL" , "Good Octets Received Count (Low)" },
1433 { 0x0408c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GORCH" , "Good Octets Received Count (Hi)" },
1434 { 0x04090, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GOTCL" , "Good Octets Transmitted Count (Low)" },
1435 { 0x04094, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GOTCH" , "Good Octets Transmitted Count (Hi)" },
1436 { 0x040a0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RNBC" , "Receive No Buffers Count" },
1437 { 0x040a4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RUC" , "Receive Undersize Count" },
1438 { 0x040a8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RFC" , "Receive Fragment Count" },
1439 { 0x040ac, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "ROC" , "Receive Oversize Count" },
1440 { 0x040b0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RJC" , "Receive Jabber Count" },
1441 { 0x040b4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MGTPRC" , "Management Packets Received Count" },
1442 { 0x040b8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MGTPDC" , "Management Packets Dropped Count" },
1443 { 0x040bc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MGTPTC" , "Management Pkts Transmitted Count" },
1444 { 0x040c0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TORL" , "Total Octets Received (Lo)" },
1445 { 0x040c4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TORH" , "Total Octets Received (Hi)" },
1446 { 0x040c8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TOTL" , "Total Octets Transmitted (Lo)" },
1447 { 0x040cc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TOTH" , "Total Octets Transmitted (Hi)" },
1448 { 0x040d0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TPR" , "Total Packets Received" },
1449 { 0x040d4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TPT" , "Total Packets Transmitted" },
1450 { 0x040d8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC64" , "Packets Transmitted (64 Bytes) Count" },
1451 { 0x040dc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC127" , "Packets Transmitted (65-127 Bytes) Count" },
1452 { 0x040e0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC255" , "Packets Transmitted (128-255 Bytes) Count" },
1453 { 0x040e4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC511" , "Packets Transmitted (256-511 Bytes) Count" },
1454 { 0x040e8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC1023" , "Packets Transmitted (512-1023 Bytes) Count" },
1455 { 0x040ec, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC1522" , "Packets Transmitted (1024 Bytes or Greater) Count" },
1456 { 0x040f0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "MPTC" , "Multicast Packets Transmitted Count" },
1457 { 0x040f4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "BPTC" , "Broadcast Packets Transmitted Count" },
1458 { 0x040f8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TSCTC" , "TCP Segmentation Context Transmitted Count" },
1459 { 0x040fc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TSCTFC" , "TCP Segmentation Context Tx Fail Count" },
1460 { 0x05000, 0x00004, 0x000007FF, 0x000007FF, e1kRegReadDefault , e1kRegWriteDefault , "RXCSUM" , "Receive Checksum Control" },
1461 { 0x05800, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUC" , "Wakeup Control" },
1462 { 0x05808, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUFC" , "Wakeup Filter Control" },
1463 { 0x05810, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUS" , "Wakeup Status" },
1464 { 0x05820, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "MANC" , "Management Control" },
1465 { 0x05838, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "IPAV" , "IP Address Valid" },
1466 { 0x05900, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUPL" , "Wakeup Packet Length" },
1467 { 0x05200, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadMTA , e1kRegWriteMTA , "MTA" , "Multicast Table Array (n)" },
1468 { 0x05400, 0x00080, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadRA , e1kRegWriteRA , "RA" , "Receive Address (64-bit) (n)" },
1469 { 0x05600, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadVFTA , e1kRegWriteVFTA , "VFTA" , "VLAN Filter Table Array (n)" },
1470 { 0x05840, 0x0001c, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "IP4AT" , "IPv4 Address Table" },
1471 { 0x05880, 0x00010, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "IP6AT" , "IPv6 Address Table" },
1472 { 0x05a00, 0x00080, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUPM" , "Wakeup Packet Memory" },
1473 { 0x05f00, 0x0001c, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FFLT" , "Flexible Filter Length Table" },
1474 { 0x09000, 0x003fc, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FFMT" , "Flexible Filter Mask Table" },
1475 { 0x09800, 0x003fc, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FFVT" , "Flexible Filter Value Table" },
1476 { 0x10000, 0x10000, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "PBM" , "Packet Buffer Memory (n)" },
1477 { 0x00040, 0x00080, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadRA , e1kRegWriteRA , "RA82542" , "Receive Address (64-bit) (n) (82542)" },
1478 { 0x00200, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadMTA , e1kRegWriteMTA , "MTA82542", "Multicast Table Array (n) (82542)" },
1479 { 0x00600, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadVFTA , e1kRegWriteVFTA , "VFTA82542", "VLAN Filter Table Array (n) (82542)" }
1480};
1481
1482#ifdef LOG_ENABLED
1483
1484/**
1485 * Convert U32 value to hex string. Masked bytes are replaced with dots.
1486 *
1487 * @remarks The mask has byte (not bit) granularity (e.g. 000000FF).
1488 *
1489 * @returns The buffer.
1490 *
1491 * @param u32 The word to convert into string.
1492 * @param mask Selects which bytes to convert.
1493 * @param buf Where to put the result.
1494 */
1495static char *e1kU32toHex(uint32_t u32, uint32_t mask, char *buf)
1496{
1497 for (char *ptr = buf + 7; ptr >= buf; --ptr, u32 >>=4, mask >>=4)
1498 {
1499 if (mask & 0xF)
1500 *ptr = (u32 & 0xF) + ((u32 & 0xF) > 9 ? '7' : '0');
1501 else
1502 *ptr = '.';
1503 }
1504 buf[8] = 0;
1505 return buf;
1506}
1507
1508/**
1509 * Returns timer name for debug purposes.
1510 *
1511 * @returns The timer name.
1512 *
1513 * @param pThis The device state structure.
1514 * @param pTimer The timer to get the name for.
1515 */
1516DECLINLINE(const char *) e1kGetTimerName(PE1KSTATE pThis, PTMTIMER pTimer)
1517{
1518 if (pTimer == pThis->CTX_SUFF(pTIDTimer))
1519 return "TID";
1520 if (pTimer == pThis->CTX_SUFF(pTADTimer))
1521 return "TAD";
1522 if (pTimer == pThis->CTX_SUFF(pRIDTimer))
1523 return "RID";
1524 if (pTimer == pThis->CTX_SUFF(pRADTimer))
1525 return "RAD";
1526 if (pTimer == pThis->CTX_SUFF(pIntTimer))
1527 return "Int";
1528 if (pTimer == pThis->CTX_SUFF(pTXDTimer))
1529 return "TXD";
1530 if (pTimer == pThis->CTX_SUFF(pLUTimer))
1531 return "LinkUp";
1532 return "unknown";
1533}
1534
1535#endif /* DEBUG */
1536
1537/**
1538 * Arm a timer.
1539 *
1540 * @param pThis Pointer to the device state structure.
1541 * @param pTimer Pointer to the timer.
1542 * @param uExpireIn Expiration interval in microseconds.
1543 */
1544DECLINLINE(void) e1kArmTimer(PE1KSTATE pThis, PTMTIMER pTimer, uint32_t uExpireIn)
1545{
1546 if (pThis->fLocked)
1547 return;
1548
1549 E1kLog2(("%s Arming %s timer to fire in %d usec...\n",
1550 pThis->szPrf, e1kGetTimerName(pThis, pTimer), uExpireIn));
1551 TMTimerSetMicro(pTimer, uExpireIn);
1552}
1553
1554/**
1555 * Cancel a timer.
1556 *
1557 * @param pThis Pointer to the device state structure.
1558 * @param pTimer Pointer to the timer.
1559 */
1560DECLINLINE(void) e1kCancelTimer(PE1KSTATE pThis, PTMTIMER pTimer)
1561{
1562 E1kLog2(("%s Stopping %s timer...\n",
1563 pThis->szPrf, e1kGetTimerName(pThis, pTimer)));
1564 int rc = TMTimerStop(pTimer);
1565 if (RT_FAILURE(rc))
1566 E1kLog2(("%s e1kCancelTimer: TMTimerStop() failed with %Rrc\n",
1567 pThis->szPrf, rc));
1568 RT_NOREF1(pThis);
1569}
1570
1571#define e1kCsEnter(ps, rc) PDMCritSectEnter(&ps->cs, rc)
1572#define e1kCsLeave(ps) PDMCritSectLeave(&ps->cs)
1573
1574#define e1kCsRxEnter(ps, rc) PDMCritSectEnter(&ps->csRx, rc)
1575#define e1kCsRxLeave(ps) PDMCritSectLeave(&ps->csRx)
1576#define e1kCsRxIsOwner(ps) PDMCritSectIsOwner(&ps->csRx)
1577
1578#ifndef E1K_WITH_TX_CS
1579# define e1kCsTxEnter(ps, rc) VINF_SUCCESS
1580# define e1kCsTxLeave(ps) do { } while (0)
1581#else /* E1K_WITH_TX_CS */
1582# define e1kCsTxEnter(ps, rc) PDMCritSectEnter(&ps->csTx, rc)
1583# define e1kCsTxLeave(ps) PDMCritSectLeave(&ps->csTx)
1584#endif /* E1K_WITH_TX_CS */
1585
1586#ifdef IN_RING3
1587
1588/**
1589 * Wakeup the RX thread.
1590 */
1591static void e1kWakeupReceive(PPDMDEVINS pDevIns)
1592{
1593 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
1594 if ( pThis->fMaybeOutOfSpace
1595 && pThis->hEventMoreRxDescAvail != NIL_RTSEMEVENT)
1596 {
1597 STAM_COUNTER_INC(&pThis->StatRxOverflowWakeup);
1598 E1kLog(("%s Waking up Out-of-RX-space semaphore\n", pThis->szPrf));
1599 RTSemEventSignal(pThis->hEventMoreRxDescAvail);
1600 }
1601}
1602
1603/**
1604 * Hardware reset. Revert all registers to initial values.
1605 *
1606 * @param pThis The device state structure.
1607 */
1608static void e1kHardReset(PE1KSTATE pThis)
1609{
1610 E1kLog(("%s Hard reset triggered\n", pThis->szPrf));
1611 memset(pThis->auRegs, 0, sizeof(pThis->auRegs));
1612 memset(pThis->aRecAddr.au32, 0, sizeof(pThis->aRecAddr.au32));
1613#ifdef E1K_INIT_RA0
1614 memcpy(pThis->aRecAddr.au32, pThis->macConfigured.au8,
1615 sizeof(pThis->macConfigured.au8));
1616 pThis->aRecAddr.array[0].ctl |= RA_CTL_AV;
1617#endif /* E1K_INIT_RA0 */
1618 STATUS = 0x0081; /* SPEED=10b (1000 Mb/s), FD=1b (Full Duplex) */
1619 EECD = 0x0100; /* EE_PRES=1b (EEPROM present) */
1620 CTRL = 0x0a09; /* FRCSPD=1b SPEED=10b LRST=1b FD=1b */
1621 TSPMT = 0x01000400;/* TSMT=0400h TSPBP=0100h */
1622 Assert(GET_BITS(RCTL, BSIZE) == 0);
1623 pThis->u16RxBSize = 2048;
1624
1625 /* Reset promiscuous mode */
1626 if (pThis->pDrvR3)
1627 pThis->pDrvR3->pfnSetPromiscuousMode(pThis->pDrvR3, false);
1628
1629#ifdef E1K_WITH_TXD_CACHE
1630 int rc = e1kCsTxEnter(pThis, VERR_SEM_BUSY);
1631 if (RT_LIKELY(rc == VINF_SUCCESS))
1632 {
1633 pThis->nTxDFetched = 0;
1634 pThis->iTxDCurrent = 0;
1635 pThis->fGSO = false;
1636 pThis->cbTxAlloc = 0;
1637 e1kCsTxLeave(pThis);
1638 }
1639#endif /* E1K_WITH_TXD_CACHE */
1640#ifdef E1K_WITH_RXD_CACHE
1641 if (RT_LIKELY(e1kCsRxEnter(pThis, VERR_SEM_BUSY) == VINF_SUCCESS))
1642 {
1643 pThis->iRxDCurrent = pThis->nRxDFetched = 0;
1644 e1kCsRxLeave(pThis);
1645 }
1646#endif /* E1K_WITH_RXD_CACHE */
1647}
1648
1649#endif /* IN_RING3 */
1650
1651/**
1652 * Compute Internet checksum.
1653 *
1654 * @remarks Refer to http://www.netfor2.com/checksum.html for short intro.
1655 *
1656 * @param pThis The device state structure.
1657 * @param cpPacket The packet.
1658 * @param cb The size of the packet.
1659 * @param pszText A string denoting direction of packet transfer.
1660 *
1661 * @return The 1's complement of the 1's complement sum.
1662 *
1663 * @thread E1000_TX
1664 */
1665static uint16_t e1kCSum16(const void *pvBuf, size_t cb)
1666{
1667 uint32_t csum = 0;
1668 uint16_t *pu16 = (uint16_t *)pvBuf;
1669
1670 while (cb > 1)
1671 {
1672 csum += *pu16++;
1673 cb -= 2;
1674 }
1675 if (cb)
1676 csum += *(uint8_t*)pu16;
1677 while (csum >> 16)
1678 csum = (csum >> 16) + (csum & 0xFFFF);
1679 return ~csum;
1680}
1681
1682/**
1683 * Dump a packet to debug log.
1684 *
1685 * @param pThis The device state structure.
1686 * @param cpPacket The packet.
1687 * @param cb The size of the packet.
1688 * @param pszText A string denoting direction of packet transfer.
1689 * @thread E1000_TX
1690 */
1691DECLINLINE(void) e1kPacketDump(PE1KSTATE pThis, const uint8_t *cpPacket, size_t cb, const char *pszText)
1692{
1693#ifdef DEBUG
1694 if (RT_LIKELY(e1kCsEnter(pThis, VERR_SEM_BUSY) == VINF_SUCCESS))
1695 {
1696 Log4(("%s --- %s packet #%d: %RTmac => %RTmac (%d bytes) ---\n",
1697 pThis->szPrf, pszText, ++pThis->u32PktNo, cpPacket+6, cpPacket, cb));
1698 if (ntohs(*(uint16_t*)(cpPacket+12)) == 0x86DD)
1699 {
1700 Log4(("%s --- IPv6: %RTnaipv6 => %RTnaipv6\n",
1701 pThis->szPrf, cpPacket+14+8, cpPacket+14+24));
1702 if (*(cpPacket+14+6) == 0x6)
1703 Log4(("%s --- TCP: seq=%x ack=%x\n", pThis->szPrf,
1704 ntohl(*(uint32_t*)(cpPacket+14+40+4)), ntohl(*(uint32_t*)(cpPacket+14+40+8))));
1705 }
1706 else if (ntohs(*(uint16_t*)(cpPacket+12)) == 0x800)
1707 {
1708 Log4(("%s --- IPv4: %RTnaipv4 => %RTnaipv4\n",
1709 pThis->szPrf, *(uint32_t*)(cpPacket+14+12), *(uint32_t*)(cpPacket+14+16)));
1710 if (*(cpPacket+14+6) == 0x6)
1711 Log4(("%s --- TCP: seq=%x ack=%x\n", pThis->szPrf,
1712 ntohl(*(uint32_t*)(cpPacket+14+20+4)), ntohl(*(uint32_t*)(cpPacket+14+20+8))));
1713 }
1714 E1kLog3(("%.*Rhxd\n", cb, cpPacket));
1715 e1kCsLeave(pThis);
1716 }
1717#else
1718 if (RT_LIKELY(e1kCsEnter(pThis, VERR_SEM_BUSY) == VINF_SUCCESS))
1719 {
1720 if (ntohs(*(uint16_t*)(cpPacket+12)) == 0x86DD)
1721 E1kLogRel(("E1000: %s packet #%d, %RTmac => %RTmac, %RTnaipv6 => %RTnaipv6, seq=%x ack=%x\n",
1722 pszText, ++pThis->u32PktNo, cpPacket+6, cpPacket, cpPacket+14+8, cpPacket+14+24,
1723 ntohl(*(uint32_t*)(cpPacket+14+40+4)), ntohl(*(uint32_t*)(cpPacket+14+40+8))));
1724 else
1725 E1kLogRel(("E1000: %s packet #%d, %RTmac => %RTmac, %RTnaipv4 => %RTnaipv4, seq=%x ack=%x\n",
1726 pszText, ++pThis->u32PktNo, cpPacket+6, cpPacket,
1727 *(uint32_t*)(cpPacket+14+12), *(uint32_t*)(cpPacket+14+16),
1728 ntohl(*(uint32_t*)(cpPacket+14+20+4)), ntohl(*(uint32_t*)(cpPacket+14+20+8))));
1729 e1kCsLeave(pThis);
1730 }
1731 RT_NOREF2(cb, pszText);
1732#endif
1733}
1734
1735/**
1736 * Determine the type of transmit descriptor.
1737 *
1738 * @returns Descriptor type. See E1K_DTYP_XXX defines.
1739 *
1740 * @param pDesc Pointer to descriptor union.
1741 * @thread E1000_TX
1742 */
1743DECLINLINE(int) e1kGetDescType(E1KTXDESC *pDesc)
1744{
1745 if (pDesc->legacy.cmd.fDEXT)
1746 return pDesc->context.dw2.u4DTYP;
1747 return E1K_DTYP_LEGACY;
1748}
1749
1750/**
1751 * Dump receive descriptor to debug log.
1752 *
1753 * @param pThis The device state structure.
1754 * @param pDesc Pointer to the descriptor.
1755 * @thread E1000_RX
1756 */
1757static void e1kPrintRDesc(PE1KSTATE pThis, E1KRXDESC *pDesc)
1758{
1759 RT_NOREF2(pThis, pDesc);
1760 E1kLog2(("%s <-- Receive Descriptor (%d bytes):\n", pThis->szPrf, pDesc->u16Length));
1761 E1kLog2((" Address=%16LX Length=%04X Csum=%04X\n",
1762 pDesc->u64BufAddr, pDesc->u16Length, pDesc->u16Checksum));
1763 E1kLog2((" STA: %s %s %s %s %s %s %s ERR: %s %s %s %s SPECIAL: %s VLAN=%03x PRI=%x\n",
1764 pDesc->status.fPIF ? "PIF" : "pif",
1765 pDesc->status.fIPCS ? "IPCS" : "ipcs",
1766 pDesc->status.fTCPCS ? "TCPCS" : "tcpcs",
1767 pDesc->status.fVP ? "VP" : "vp",
1768 pDesc->status.fIXSM ? "IXSM" : "ixsm",
1769 pDesc->status.fEOP ? "EOP" : "eop",
1770 pDesc->status.fDD ? "DD" : "dd",
1771 pDesc->status.fRXE ? "RXE" : "rxe",
1772 pDesc->status.fIPE ? "IPE" : "ipe",
1773 pDesc->status.fTCPE ? "TCPE" : "tcpe",
1774 pDesc->status.fCE ? "CE" : "ce",
1775 E1K_SPEC_CFI(pDesc->status.u16Special) ? "CFI" :"cfi",
1776 E1K_SPEC_VLAN(pDesc->status.u16Special),
1777 E1K_SPEC_PRI(pDesc->status.u16Special)));
1778}
1779
1780/**
1781 * Dump transmit descriptor to debug log.
1782 *
1783 * @param pThis The device state structure.
1784 * @param pDesc Pointer to descriptor union.
1785 * @param pszDir A string denoting direction of descriptor transfer
1786 * @thread E1000_TX
1787 */
1788static void e1kPrintTDesc(PE1KSTATE pThis, E1KTXDESC *pDesc, const char *pszDir,
1789 unsigned uLevel = RTLOGGRPFLAGS_LEVEL_2)
1790{
1791 RT_NOREF4(pThis, pDesc, pszDir, uLevel);
1792
1793 /*
1794 * Unfortunately we cannot use our format handler here, we want R0 logging
1795 * as well.
1796 */
1797 switch (e1kGetDescType(pDesc))
1798 {
1799 case E1K_DTYP_CONTEXT:
1800 E1kLogX(uLevel, ("%s %s Context Transmit Descriptor %s\n",
1801 pThis->szPrf, pszDir, pszDir));
1802 E1kLogX(uLevel, (" IPCSS=%02X IPCSO=%02X IPCSE=%04X TUCSS=%02X TUCSO=%02X TUCSE=%04X\n",
1803 pDesc->context.ip.u8CSS, pDesc->context.ip.u8CSO, pDesc->context.ip.u16CSE,
1804 pDesc->context.tu.u8CSS, pDesc->context.tu.u8CSO, pDesc->context.tu.u16CSE));
1805 E1kLogX(uLevel, (" TUCMD:%s%s%s %s %s PAYLEN=%04x HDRLEN=%04x MSS=%04x STA: %s\n",
1806 pDesc->context.dw2.fIDE ? " IDE":"",
1807 pDesc->context.dw2.fRS ? " RS" :"",
1808 pDesc->context.dw2.fTSE ? " TSE":"",
1809 pDesc->context.dw2.fIP ? "IPv4":"IPv6",
1810 pDesc->context.dw2.fTCP ? "TCP":"UDP",
1811 pDesc->context.dw2.u20PAYLEN,
1812 pDesc->context.dw3.u8HDRLEN,
1813 pDesc->context.dw3.u16MSS,
1814 pDesc->context.dw3.fDD?"DD":""));
1815 break;
1816 case E1K_DTYP_DATA:
1817 E1kLogX(uLevel, ("%s %s Data Transmit Descriptor (%d bytes) %s\n",
1818 pThis->szPrf, pszDir, pDesc->data.cmd.u20DTALEN, pszDir));
1819 E1kLogX(uLevel, (" Address=%16LX DTALEN=%05X\n",
1820 pDesc->data.u64BufAddr,
1821 pDesc->data.cmd.u20DTALEN));
1822 E1kLogX(uLevel, (" DCMD:%s%s%s%s%s%s%s STA:%s%s%s POPTS:%s%s SPECIAL:%s VLAN=%03x PRI=%x\n",
1823 pDesc->data.cmd.fIDE ? " IDE" :"",
1824 pDesc->data.cmd.fVLE ? " VLE" :"",
1825 pDesc->data.cmd.fRPS ? " RPS" :"",
1826 pDesc->data.cmd.fRS ? " RS" :"",
1827 pDesc->data.cmd.fTSE ? " TSE" :"",
1828 pDesc->data.cmd.fIFCS? " IFCS":"",
1829 pDesc->data.cmd.fEOP ? " EOP" :"",
1830 pDesc->data.dw3.fDD ? " DD" :"",
1831 pDesc->data.dw3.fEC ? " EC" :"",
1832 pDesc->data.dw3.fLC ? " LC" :"",
1833 pDesc->data.dw3.fTXSM? " TXSM":"",
1834 pDesc->data.dw3.fIXSM? " IXSM":"",
1835 E1K_SPEC_CFI(pDesc->data.dw3.u16Special) ? "CFI" :"cfi",
1836 E1K_SPEC_VLAN(pDesc->data.dw3.u16Special),
1837 E1K_SPEC_PRI(pDesc->data.dw3.u16Special)));
1838 break;
1839 case E1K_DTYP_LEGACY:
1840 E1kLogX(uLevel, ("%s %s Legacy Transmit Descriptor (%d bytes) %s\n",
1841 pThis->szPrf, pszDir, pDesc->legacy.cmd.u16Length, pszDir));
1842 E1kLogX(uLevel, (" Address=%16LX DTALEN=%05X\n",
1843 pDesc->data.u64BufAddr,
1844 pDesc->legacy.cmd.u16Length));
1845 E1kLogX(uLevel, (" CMD:%s%s%s%s%s%s%s STA:%s%s%s CSO=%02x CSS=%02x SPECIAL:%s VLAN=%03x PRI=%x\n",
1846 pDesc->legacy.cmd.fIDE ? " IDE" :"",
1847 pDesc->legacy.cmd.fVLE ? " VLE" :"",
1848 pDesc->legacy.cmd.fRPS ? " RPS" :"",
1849 pDesc->legacy.cmd.fRS ? " RS" :"",
1850 pDesc->legacy.cmd.fIC ? " IC" :"",
1851 pDesc->legacy.cmd.fIFCS? " IFCS":"",
1852 pDesc->legacy.cmd.fEOP ? " EOP" :"",
1853 pDesc->legacy.dw3.fDD ? " DD" :"",
1854 pDesc->legacy.dw3.fEC ? " EC" :"",
1855 pDesc->legacy.dw3.fLC ? " LC" :"",
1856 pDesc->legacy.cmd.u8CSO,
1857 pDesc->legacy.dw3.u8CSS,
1858 E1K_SPEC_CFI(pDesc->legacy.dw3.u16Special) ? "CFI" :"cfi",
1859 E1K_SPEC_VLAN(pDesc->legacy.dw3.u16Special),
1860 E1K_SPEC_PRI(pDesc->legacy.dw3.u16Special)));
1861 break;
1862 default:
1863 E1kLog(("%s %s Invalid Transmit Descriptor %s\n",
1864 pThis->szPrf, pszDir, pszDir));
1865 break;
1866 }
1867}
1868
1869/**
1870 * Raise an interrupt later.
1871 *
1872 * @param pThis The device state structure.
1873 */
1874inline void e1kPostponeInterrupt(PE1KSTATE pThis, uint64_t uNanoseconds)
1875{
1876 if (!TMTimerIsActive(pThis->CTX_SUFF(pIntTimer)))
1877 TMTimerSetNano(pThis->CTX_SUFF(pIntTimer), uNanoseconds);
1878}
1879
1880/**
1881 * Raise interrupt if not masked.
1882 *
1883 * @param pThis The device state structure.
1884 */
1885static int e1kRaiseInterrupt(PE1KSTATE pThis, int rcBusy, uint32_t u32IntCause = 0)
1886{
1887 int rc = e1kCsEnter(pThis, rcBusy);
1888 if (RT_UNLIKELY(rc != VINF_SUCCESS))
1889 return rc;
1890
1891 E1K_INC_ISTAT_CNT(pThis->uStatIntTry);
1892 ICR |= u32IntCause;
1893 if (ICR & IMS)
1894 {
1895 if (pThis->fIntRaised)
1896 {
1897 E1K_INC_ISTAT_CNT(pThis->uStatIntSkip);
1898 E1kLog2(("%s e1kRaiseInterrupt: Already raised, skipped. ICR&IMS=%08x\n",
1899 pThis->szPrf, ICR & IMS));
1900 }
1901 else
1902 {
1903 uint64_t tsNow = TMTimerGet(pThis->CTX_SUFF(pIntTimer));
1904 if (!!ITR && tsNow - pThis->u64AckedAt < ITR * 256
1905 && pThis->fItrEnabled && (pThis->fItrRxEnabled || !(ICR & ICR_RXT0)))
1906 {
1907 E1K_INC_ISTAT_CNT(pThis->uStatIntEarly);
1908 E1kLog2(("%s e1kRaiseInterrupt: Too early to raise again: %d ns < %d ns.\n",
1909 pThis->szPrf, (uint32_t)(tsNow - pThis->u64AckedAt), ITR * 256));
1910 e1kPostponeInterrupt(pThis, ITR * 256);
1911 }
1912 else
1913 {
1914
1915 /* Since we are delivering the interrupt now
1916 * there is no need to do it later -- stop the timer.
1917 */
1918 TMTimerStop(pThis->CTX_SUFF(pIntTimer));
1919 E1K_INC_ISTAT_CNT(pThis->uStatInt);
1920 STAM_COUNTER_INC(&pThis->StatIntsRaised);
1921 /* Got at least one unmasked interrupt cause */
1922 pThis->fIntRaised = true;
1923 /* Raise(1) INTA(0) */
1924 E1kLogRel(("E1000: irq RAISED icr&mask=0x%x, icr=0x%x\n", ICR & IMS, ICR));
1925 PDMDevHlpPCISetIrq(pThis->CTX_SUFF(pDevIns), 0, 1);
1926 E1kLog(("%s e1kRaiseInterrupt: Raised. ICR&IMS=%08x\n",
1927 pThis->szPrf, ICR & IMS));
1928 }
1929 }
1930 }
1931 else
1932 {
1933 E1K_INC_ISTAT_CNT(pThis->uStatIntMasked);
1934 E1kLog2(("%s e1kRaiseInterrupt: Not raising, ICR=%08x, IMS=%08x\n",
1935 pThis->szPrf, ICR, IMS));
1936 }
1937 e1kCsLeave(pThis);
1938 return VINF_SUCCESS;
1939}
1940
1941/**
1942 * Compute the physical address of the descriptor.
1943 *
1944 * @returns the physical address of the descriptor.
1945 *
1946 * @param baseHigh High-order 32 bits of descriptor table address.
1947 * @param baseLow Low-order 32 bits of descriptor table address.
1948 * @param idxDesc The descriptor index in the table.
1949 */
1950DECLINLINE(RTGCPHYS) e1kDescAddr(uint32_t baseHigh, uint32_t baseLow, uint32_t idxDesc)
1951{
1952 AssertCompile(sizeof(E1KRXDESC) == sizeof(E1KTXDESC));
1953 return ((uint64_t)baseHigh << 32) + baseLow + idxDesc * sizeof(E1KRXDESC);
1954}
1955
1956/**
1957 * Advance the head pointer of the receive descriptor queue.
1958 *
1959 * @remarks RDH always points to the next available RX descriptor.
1960 *
1961 * @param pThis The device state structure.
1962 */
1963DECLINLINE(void) e1kAdvanceRDH(PE1KSTATE pThis)
1964{
1965 Assert(e1kCsRxIsOwner(pThis));
1966 //e1kCsEnter(pThis, RT_SRC_POS);
1967 if (++RDH * sizeof(E1KRXDESC) >= RDLEN)
1968 RDH = 0;
1969 /*
1970 * Compute current receive queue length and fire RXDMT0 interrupt
1971 * if we are low on receive buffers
1972 */
1973 uint32_t uRQueueLen = RDH>RDT ? RDLEN/sizeof(E1KRXDESC)-RDH+RDT : RDT-RDH;
1974 /*
1975 * The minimum threshold is controlled by RDMTS bits of RCTL:
1976 * 00 = 1/2 of RDLEN
1977 * 01 = 1/4 of RDLEN
1978 * 10 = 1/8 of RDLEN
1979 * 11 = reserved
1980 */
1981 uint32_t uMinRQThreshold = RDLEN / sizeof(E1KRXDESC) / (2 << GET_BITS(RCTL, RDMTS));
1982 if (uRQueueLen <= uMinRQThreshold)
1983 {
1984 E1kLogRel(("E1000: low on RX descriptors, RDH=%x RDT=%x len=%x threshold=%x\n", RDH, RDT, uRQueueLen, uMinRQThreshold));
1985 E1kLog2(("%s Low on RX descriptors, RDH=%x RDT=%x len=%x threshold=%x, raise an interrupt\n",
1986 pThis->szPrf, RDH, RDT, uRQueueLen, uMinRQThreshold));
1987 E1K_INC_ISTAT_CNT(pThis->uStatIntRXDMT0);
1988 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_RXDMT0);
1989 }
1990 E1kLog2(("%s e1kAdvanceRDH: at exit RDH=%x RDT=%x len=%x\n",
1991 pThis->szPrf, RDH, RDT, uRQueueLen));
1992 //e1kCsLeave(pThis);
1993}
1994
1995#ifdef E1K_WITH_RXD_CACHE
1996/**
1997 * Return the number of RX descriptor that belong to the hardware.
1998 *
1999 * @returns the number of available descriptors in RX ring.
2000 * @param pThis The device state structure.
2001 * @thread ???
2002 */
2003DECLINLINE(uint32_t) e1kGetRxLen(PE1KSTATE pThis)
2004{
2005 /**
2006 * Make sure RDT won't change during computation. EMT may modify RDT at
2007 * any moment.
2008 */
2009 uint32_t rdt = RDT;
2010 return (RDH > rdt ? RDLEN/sizeof(E1KRXDESC) : 0) + rdt - RDH;
2011}
2012
2013DECLINLINE(unsigned) e1kRxDInCache(PE1KSTATE pThis)
2014{
2015 return pThis->nRxDFetched > pThis->iRxDCurrent ?
2016 pThis->nRxDFetched - pThis->iRxDCurrent : 0;
2017}
2018
2019DECLINLINE(unsigned) e1kRxDIsCacheEmpty(PE1KSTATE pThis)
2020{
2021 return pThis->iRxDCurrent >= pThis->nRxDFetched;
2022}
2023
2024/**
2025 * Load receive descriptors from guest memory. The caller needs to be in Rx
2026 * critical section.
2027 *
2028 * We need two physical reads in case the tail wrapped around the end of RX
2029 * descriptor ring.
2030 *
2031 * @returns the actual number of descriptors fetched.
2032 * @param pThis The device state structure.
2033 * @param pDesc Pointer to descriptor union.
2034 * @param addr Physical address in guest context.
2035 * @thread EMT, RX
2036 */
2037DECLINLINE(unsigned) e1kRxDPrefetch(PE1KSTATE pThis)
2038{
2039 /* We've already loaded pThis->nRxDFetched descriptors past RDH. */
2040 unsigned nDescsAvailable = e1kGetRxLen(pThis) - e1kRxDInCache(pThis);
2041 unsigned nDescsToFetch = RT_MIN(nDescsAvailable, E1K_RXD_CACHE_SIZE - pThis->nRxDFetched);
2042 unsigned nDescsTotal = RDLEN / sizeof(E1KRXDESC);
2043 Assert(nDescsTotal != 0);
2044 if (nDescsTotal == 0)
2045 return 0;
2046 unsigned nFirstNotLoaded = (RDH + e1kRxDInCache(pThis)) % nDescsTotal;
2047 unsigned nDescsInSingleRead = RT_MIN(nDescsToFetch, nDescsTotal - nFirstNotLoaded);
2048 E1kLog3(("%s e1kRxDPrefetch: nDescsAvailable=%u nDescsToFetch=%u "
2049 "nDescsTotal=%u nFirstNotLoaded=0x%x nDescsInSingleRead=%u\n",
2050 pThis->szPrf, nDescsAvailable, nDescsToFetch, nDescsTotal,
2051 nFirstNotLoaded, nDescsInSingleRead));
2052 if (nDescsToFetch == 0)
2053 return 0;
2054 E1KRXDESC* pFirstEmptyDesc = &pThis->aRxDescriptors[pThis->nRxDFetched];
2055 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns),
2056 ((uint64_t)RDBAH << 32) + RDBAL + nFirstNotLoaded * sizeof(E1KRXDESC),
2057 pFirstEmptyDesc, nDescsInSingleRead * sizeof(E1KRXDESC));
2058 // uint64_t addrBase = ((uint64_t)RDBAH << 32) + RDBAL;
2059 // unsigned i, j;
2060 // for (i = pThis->nRxDFetched; i < pThis->nRxDFetched + nDescsInSingleRead; ++i)
2061 // {
2062 // pThis->aRxDescAddr[i] = addrBase + (nFirstNotLoaded + i - pThis->nRxDFetched) * sizeof(E1KRXDESC);
2063 // E1kLog3(("%s aRxDescAddr[%d] = %p\n", pThis->szPrf, i, pThis->aRxDescAddr[i]));
2064 // }
2065 E1kLog3(("%s Fetched %u RX descriptors at %08x%08x(0x%x), RDLEN=%08x, RDH=%08x, RDT=%08x\n",
2066 pThis->szPrf, nDescsInSingleRead,
2067 RDBAH, RDBAL + RDH * sizeof(E1KRXDESC),
2068 nFirstNotLoaded, RDLEN, RDH, RDT));
2069 if (nDescsToFetch > nDescsInSingleRead)
2070 {
2071 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns),
2072 ((uint64_t)RDBAH << 32) + RDBAL,
2073 pFirstEmptyDesc + nDescsInSingleRead,
2074 (nDescsToFetch - nDescsInSingleRead) * sizeof(E1KRXDESC));
2075 // Assert(i == pThis->nRxDFetched + nDescsInSingleRead);
2076 // for (j = 0; i < pThis->nRxDFetched + nDescsToFetch; ++i, ++j)
2077 // {
2078 // pThis->aRxDescAddr[i] = addrBase + j * sizeof(E1KRXDESC);
2079 // E1kLog3(("%s aRxDescAddr[%d] = %p\n", pThis->szPrf, i, pThis->aRxDescAddr[i]));
2080 // }
2081 E1kLog3(("%s Fetched %u RX descriptors at %08x%08x\n",
2082 pThis->szPrf, nDescsToFetch - nDescsInSingleRead,
2083 RDBAH, RDBAL));
2084 }
2085 pThis->nRxDFetched += nDescsToFetch;
2086 return nDescsToFetch;
2087}
2088
2089/**
2090 * Obtain the next RX descriptor from RXD cache, fetching descriptors from the
2091 * RX ring if the cache is empty.
2092 *
2093 * Note that we cannot advance the cache pointer (iRxDCurrent) yet as it will
2094 * go out of sync with RDH which will cause trouble when EMT checks if the
2095 * cache is empty to do pre-fetch @bugref(6217).
2096 *
2097 * @param pThis The device state structure.
2098 * @thread RX
2099 */
2100DECLINLINE(E1KRXDESC*) e1kRxDGet(PE1KSTATE pThis)
2101{
2102 Assert(e1kCsRxIsOwner(pThis));
2103 /* Check the cache first. */
2104 if (pThis->iRxDCurrent < pThis->nRxDFetched)
2105 return &pThis->aRxDescriptors[pThis->iRxDCurrent];
2106 /* Cache is empty, reset it and check if we can fetch more. */
2107 pThis->iRxDCurrent = pThis->nRxDFetched = 0;
2108 if (e1kRxDPrefetch(pThis))
2109 return &pThis->aRxDescriptors[pThis->iRxDCurrent];
2110 /* Out of Rx descriptors. */
2111 return NULL;
2112}
2113
2114/**
2115 * Return the RX descriptor obtained with e1kRxDGet() and advance the cache
2116 * pointer. The descriptor gets written back to the RXD ring.
2117 *
2118 * @param pThis The device state structure.
2119 * @param pDesc The descriptor being "returned" to the RX ring.
2120 * @thread RX
2121 */
2122DECLINLINE(void) e1kRxDPut(PE1KSTATE pThis, E1KRXDESC* pDesc)
2123{
2124 Assert(e1kCsRxIsOwner(pThis));
2125 pThis->iRxDCurrent++;
2126 // Assert(pDesc >= pThis->aRxDescriptors);
2127 // Assert(pDesc < pThis->aRxDescriptors + E1K_RXD_CACHE_SIZE);
2128 // uint64_t addr = e1kDescAddr(RDBAH, RDBAL, RDH);
2129 // uint32_t rdh = RDH;
2130 // Assert(pThis->aRxDescAddr[pDesc - pThis->aRxDescriptors] == addr);
2131 PDMDevHlpPCIPhysWrite(pThis->CTX_SUFF(pDevIns),
2132 e1kDescAddr(RDBAH, RDBAL, RDH),
2133 pDesc, sizeof(E1KRXDESC));
2134 e1kAdvanceRDH(pThis);
2135 e1kPrintRDesc(pThis, pDesc);
2136}
2137
2138/**
2139 * Store a fragment of received packet at the specifed address.
2140 *
2141 * @param pThis The device state structure.
2142 * @param pDesc The next available RX descriptor.
2143 * @param pvBuf The fragment.
2144 * @param cb The size of the fragment.
2145 */
2146static DECLCALLBACK(void) e1kStoreRxFragment(PE1KSTATE pThis, E1KRXDESC *pDesc, const void *pvBuf, size_t cb)
2147{
2148 STAM_PROFILE_ADV_START(&pThis->StatReceiveStore, a);
2149 E1kLog2(("%s e1kStoreRxFragment: store fragment of %04X at %016LX, EOP=%d\n",
2150 pThis->szPrf, cb, pDesc->u64BufAddr, pDesc->status.fEOP));
2151 PDMDevHlpPCIPhysWrite(pThis->CTX_SUFF(pDevIns), pDesc->u64BufAddr, pvBuf, cb);
2152 pDesc->u16Length = (uint16_t)cb; Assert(pDesc->u16Length == cb);
2153 STAM_PROFILE_ADV_STOP(&pThis->StatReceiveStore, a);
2154}
2155
2156#else /* !E1K_WITH_RXD_CACHE */
2157
2158/**
2159 * Store a fragment of received packet that fits into the next available RX
2160 * buffer.
2161 *
2162 * @remarks Trigger the RXT0 interrupt if it is the last fragment of the packet.
2163 *
2164 * @param pThis The device state structure.
2165 * @param pDesc The next available RX descriptor.
2166 * @param pvBuf The fragment.
2167 * @param cb The size of the fragment.
2168 */
2169static DECLCALLBACK(void) e1kStoreRxFragment(PE1KSTATE pThis, E1KRXDESC *pDesc, const void *pvBuf, size_t cb)
2170{
2171 STAM_PROFILE_ADV_START(&pThis->StatReceiveStore, a);
2172 E1kLog2(("%s e1kStoreRxFragment: store fragment of %04X at %016LX, EOP=%d\n", pThis->szPrf, cb, pDesc->u64BufAddr, pDesc->status.fEOP));
2173 PDMDevHlpPCIPhysWrite(pThis->CTX_SUFF(pDevIns), pDesc->u64BufAddr, pvBuf, cb);
2174 pDesc->u16Length = (uint16_t)cb; Assert(pDesc->u16Length == cb);
2175 /* Write back the descriptor */
2176 PDMDevHlpPCIPhysWrite(pThis->CTX_SUFF(pDevIns), e1kDescAddr(RDBAH, RDBAL, RDH), pDesc, sizeof(E1KRXDESC));
2177 e1kPrintRDesc(pThis, pDesc);
2178 E1kLogRel(("E1000: Wrote back RX desc, RDH=%x\n", RDH));
2179 /* Advance head */
2180 e1kAdvanceRDH(pThis);
2181 //E1kLog2(("%s e1kStoreRxFragment: EOP=%d RDTR=%08X RADV=%08X\n", pThis->szPrf, pDesc->fEOP, RDTR, RADV));
2182 if (pDesc->status.fEOP)
2183 {
2184 /* Complete packet has been stored -- it is time to let the guest know. */
2185#ifdef E1K_USE_RX_TIMERS
2186 if (RDTR)
2187 {
2188 /* Arm the timer to fire in RDTR usec (discard .024) */
2189 e1kArmTimer(pThis, pThis->CTX_SUFF(pRIDTimer), RDTR);
2190 /* If absolute timer delay is enabled and the timer is not running yet, arm it. */
2191 if (RADV != 0 && !TMTimerIsActive(pThis->CTX_SUFF(pRADTimer)))
2192 e1kArmTimer(pThis, pThis->CTX_SUFF(pRADTimer), RADV);
2193 }
2194 else
2195 {
2196#endif
2197 /* 0 delay means immediate interrupt */
2198 E1K_INC_ISTAT_CNT(pThis->uStatIntRx);
2199 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_RXT0);
2200#ifdef E1K_USE_RX_TIMERS
2201 }
2202#endif
2203 }
2204 STAM_PROFILE_ADV_STOP(&pThis->StatReceiveStore, a);
2205}
2206#endif /* !E1K_WITH_RXD_CACHE */
2207
2208/**
2209 * Returns true if it is a broadcast packet.
2210 *
2211 * @returns true if destination address indicates broadcast.
2212 * @param pvBuf The ethernet packet.
2213 */
2214DECLINLINE(bool) e1kIsBroadcast(const void *pvBuf)
2215{
2216 static const uint8_t s_abBcastAddr[] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
2217 return memcmp(pvBuf, s_abBcastAddr, sizeof(s_abBcastAddr)) == 0;
2218}
2219
2220/**
2221 * Returns true if it is a multicast packet.
2222 *
2223 * @remarks returns true for broadcast packets as well.
2224 * @returns true if destination address indicates multicast.
2225 * @param pvBuf The ethernet packet.
2226 */
2227DECLINLINE(bool) e1kIsMulticast(const void *pvBuf)
2228{
2229 return (*(char*)pvBuf) & 1;
2230}
2231
2232/**
2233 * Set IXSM, IPCS and TCPCS flags according to the packet type.
2234 *
2235 * @remarks We emulate checksum offloading for major packets types only.
2236 *
2237 * @returns VBox status code.
2238 * @param pThis The device state structure.
2239 * @param pFrame The available data.
2240 * @param cb Number of bytes available in the buffer.
2241 * @param status Bit fields containing status info.
2242 */
2243static int e1kRxChecksumOffload(PE1KSTATE pThis, const uint8_t *pFrame, size_t cb, E1KRXDST *pStatus)
2244{
2245 /** @todo
2246 * It is not safe to bypass checksum verification for packets coming
2247 * from real wire. We currently unable to tell where packets are
2248 * coming from so we tell the driver to ignore our checksum flags
2249 * and do verification in software.
2250 */
2251#if 0
2252 uint16_t uEtherType = ntohs(*(uint16_t*)(pFrame + 12));
2253
2254 E1kLog2(("%s e1kRxChecksumOffload: EtherType=%x\n", pThis->szPrf, uEtherType));
2255
2256 switch (uEtherType)
2257 {
2258 case 0x800: /* IPv4 */
2259 {
2260 pStatus->fIXSM = false;
2261 pStatus->fIPCS = true;
2262 PRTNETIPV4 pIpHdr4 = (PRTNETIPV4)(pFrame + 14);
2263 /* TCP/UDP checksum offloading works with TCP and UDP only */
2264 pStatus->fTCPCS = pIpHdr4->ip_p == 6 || pIpHdr4->ip_p == 17;
2265 break;
2266 }
2267 case 0x86DD: /* IPv6 */
2268 pStatus->fIXSM = false;
2269 pStatus->fIPCS = false;
2270 pStatus->fTCPCS = true;
2271 break;
2272 default: /* ARP, VLAN, etc. */
2273 pStatus->fIXSM = true;
2274 break;
2275 }
2276#else
2277 pStatus->fIXSM = true;
2278 RT_NOREF_PV(pThis); RT_NOREF_PV(pFrame); RT_NOREF_PV(cb);
2279#endif
2280 return VINF_SUCCESS;
2281}
2282
2283/**
2284 * Pad and store received packet.
2285 *
2286 * @remarks Make sure that the packet appears to upper layer as one coming
2287 * from real Ethernet: pad it and insert FCS.
2288 *
2289 * @returns VBox status code.
2290 * @param pThis The device state structure.
2291 * @param pvBuf The available data.
2292 * @param cb Number of bytes available in the buffer.
2293 * @param status Bit fields containing status info.
2294 */
2295static int e1kHandleRxPacket(PE1KSTATE pThis, const void *pvBuf, size_t cb, E1KRXDST status)
2296{
2297#if defined(IN_RING3) /** @todo Remove this extra copying, it's gonna make us run out of kernel / hypervisor stack! */
2298 uint8_t rxPacket[E1K_MAX_RX_PKT_SIZE];
2299 uint8_t *ptr = rxPacket;
2300
2301 int rc = e1kCsRxEnter(pThis, VERR_SEM_BUSY);
2302 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2303 return rc;
2304
2305 if (cb > 70) /* unqualified guess */
2306 pThis->led.Asserted.s.fReading = pThis->led.Actual.s.fReading = 1;
2307
2308 Assert(cb <= E1K_MAX_RX_PKT_SIZE);
2309 Assert(cb > 16);
2310 size_t cbMax = ((RCTL & RCTL_LPE) ? E1K_MAX_RX_PKT_SIZE - 4 : 1518) - (status.fVP ? 0 : 4);
2311 E1kLog3(("%s Max RX packet size is %u\n", pThis->szPrf, cbMax));
2312 if (status.fVP)
2313 {
2314 /* VLAN packet -- strip VLAN tag in VLAN mode */
2315 if ((CTRL & CTRL_VME) && cb > 16)
2316 {
2317 uint16_t *u16Ptr = (uint16_t*)pvBuf;
2318 memcpy(rxPacket, pvBuf, 12); /* Copy src and dst addresses */
2319 status.u16Special = RT_BE2H_U16(u16Ptr[7]); /* Extract VLAN tag */
2320 memcpy(rxPacket + 12, (uint8_t*)pvBuf + 16, cb - 16); /* Copy the rest of the packet */
2321 cb -= 4;
2322 E1kLog3(("%s Stripped tag for VLAN %u (cb=%u)\n",
2323 pThis->szPrf, status.u16Special, cb));
2324 }
2325 else
2326 status.fVP = false; /* Set VP only if we stripped the tag */
2327 }
2328 else
2329 memcpy(rxPacket, pvBuf, cb);
2330 /* Pad short packets */
2331 if (cb < 60)
2332 {
2333 memset(rxPacket + cb, 0, 60 - cb);
2334 cb = 60;
2335 }
2336 if (!(RCTL & RCTL_SECRC) && cb <= cbMax)
2337 {
2338 STAM_PROFILE_ADV_START(&pThis->StatReceiveCRC, a);
2339 /*
2340 * Add FCS if CRC stripping is not enabled. Since the value of CRC
2341 * is ignored by most of drivers we may as well save us the trouble
2342 * of calculating it (see EthernetCRC CFGM parameter).
2343 */
2344 if (pThis->fEthernetCRC)
2345 *(uint32_t*)(rxPacket + cb) = RTCrc32(rxPacket, cb);
2346 cb += sizeof(uint32_t);
2347 STAM_PROFILE_ADV_STOP(&pThis->StatReceiveCRC, a);
2348 E1kLog3(("%s Added FCS (cb=%u)\n", pThis->szPrf, cb));
2349 }
2350 /* Compute checksum of complete packet */
2351 uint16_t checksum = e1kCSum16(rxPacket + GET_BITS(RXCSUM, PCSS), cb);
2352 e1kRxChecksumOffload(pThis, rxPacket, cb, &status);
2353
2354 /* Update stats */
2355 E1K_INC_CNT32(GPRC);
2356 if (e1kIsBroadcast(pvBuf))
2357 E1K_INC_CNT32(BPRC);
2358 else if (e1kIsMulticast(pvBuf))
2359 E1K_INC_CNT32(MPRC);
2360 /* Update octet receive counter */
2361 E1K_ADD_CNT64(GORCL, GORCH, cb);
2362 STAM_REL_COUNTER_ADD(&pThis->StatReceiveBytes, cb);
2363 if (cb == 64)
2364 E1K_INC_CNT32(PRC64);
2365 else if (cb < 128)
2366 E1K_INC_CNT32(PRC127);
2367 else if (cb < 256)
2368 E1K_INC_CNT32(PRC255);
2369 else if (cb < 512)
2370 E1K_INC_CNT32(PRC511);
2371 else if (cb < 1024)
2372 E1K_INC_CNT32(PRC1023);
2373 else
2374 E1K_INC_CNT32(PRC1522);
2375
2376 E1K_INC_ISTAT_CNT(pThis->uStatRxFrm);
2377
2378# ifdef E1K_WITH_RXD_CACHE
2379 while (cb > 0)
2380 {
2381 E1KRXDESC *pDesc = e1kRxDGet(pThis);
2382
2383 if (pDesc == NULL)
2384 {
2385 E1kLog(("%s Out of receive buffers, dropping the packet "
2386 "(cb=%u, in_cache=%u, RDH=%x RDT=%x)\n",
2387 pThis->szPrf, cb, e1kRxDInCache(pThis), RDH, RDT));
2388 break;
2389 }
2390# else /* !E1K_WITH_RXD_CACHE */
2391 if (RDH == RDT)
2392 {
2393 E1kLog(("%s Out of receive buffers, dropping the packet\n",
2394 pThis->szPrf));
2395 }
2396 /* Store the packet to receive buffers */
2397 while (RDH != RDT)
2398 {
2399 /* Load the descriptor pointed by head */
2400 E1KRXDESC desc, *pDesc = &desc;
2401 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns), e1kDescAddr(RDBAH, RDBAL, RDH),
2402 &desc, sizeof(desc));
2403# endif /* !E1K_WITH_RXD_CACHE */
2404 if (pDesc->u64BufAddr)
2405 {
2406 /* Update descriptor */
2407 pDesc->status = status;
2408 pDesc->u16Checksum = checksum;
2409 pDesc->status.fDD = true;
2410
2411 /*
2412 * We need to leave Rx critical section here or we risk deadlocking
2413 * with EMT in e1kRegWriteRDT when the write is to an unallocated
2414 * page or has an access handler associated with it.
2415 * Note that it is safe to leave the critical section here since
2416 * e1kRegWriteRDT() never modifies RDH. It never touches already
2417 * fetched RxD cache entries either.
2418 */
2419 if (cb > pThis->u16RxBSize)
2420 {
2421 pDesc->status.fEOP = false;
2422 e1kCsRxLeave(pThis);
2423 e1kStoreRxFragment(pThis, pDesc, ptr, pThis->u16RxBSize);
2424 rc = e1kCsRxEnter(pThis, VERR_SEM_BUSY);
2425 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2426 return rc;
2427 ptr += pThis->u16RxBSize;
2428 cb -= pThis->u16RxBSize;
2429 }
2430 else
2431 {
2432 pDesc->status.fEOP = true;
2433 e1kCsRxLeave(pThis);
2434 e1kStoreRxFragment(pThis, pDesc, ptr, cb);
2435# ifdef E1K_WITH_RXD_CACHE
2436 rc = e1kCsRxEnter(pThis, VERR_SEM_BUSY);
2437 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2438 return rc;
2439 cb = 0;
2440# else /* !E1K_WITH_RXD_CACHE */
2441 pThis->led.Actual.s.fReading = 0;
2442 return VINF_SUCCESS;
2443# endif /* !E1K_WITH_RXD_CACHE */
2444 }
2445 /*
2446 * Note: RDH is advanced by e1kStoreRxFragment if E1K_WITH_RXD_CACHE
2447 * is not defined.
2448 */
2449 }
2450# ifdef E1K_WITH_RXD_CACHE
2451 /* Write back the descriptor. */
2452 pDesc->status.fDD = true;
2453 e1kRxDPut(pThis, pDesc);
2454# else /* !E1K_WITH_RXD_CACHE */
2455 else
2456 {
2457 /* Write back the descriptor. */
2458 pDesc->status.fDD = true;
2459 PDMDevHlpPCIPhysWrite(pThis->CTX_SUFF(pDevIns),
2460 e1kDescAddr(RDBAH, RDBAL, RDH),
2461 pDesc, sizeof(E1KRXDESC));
2462 e1kAdvanceRDH(pThis);
2463 }
2464# endif /* !E1K_WITH_RXD_CACHE */
2465 }
2466
2467 if (cb > 0)
2468 E1kLog(("%s Out of receive buffers, dropping %u bytes", pThis->szPrf, cb));
2469
2470 pThis->led.Actual.s.fReading = 0;
2471
2472 e1kCsRxLeave(pThis);
2473# ifdef E1K_WITH_RXD_CACHE
2474 /* Complete packet has been stored -- it is time to let the guest know. */
2475# ifdef E1K_USE_RX_TIMERS
2476 if (RDTR)
2477 {
2478 /* Arm the timer to fire in RDTR usec (discard .024) */
2479 e1kArmTimer(pThis, pThis->CTX_SUFF(pRIDTimer), RDTR);
2480 /* If absolute timer delay is enabled and the timer is not running yet, arm it. */
2481 if (RADV != 0 && !TMTimerIsActive(pThis->CTX_SUFF(pRADTimer)))
2482 e1kArmTimer(pThis, pThis->CTX_SUFF(pRADTimer), RADV);
2483 }
2484 else
2485 {
2486# endif /* E1K_USE_RX_TIMERS */
2487 /* 0 delay means immediate interrupt */
2488 E1K_INC_ISTAT_CNT(pThis->uStatIntRx);
2489 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_RXT0);
2490# ifdef E1K_USE_RX_TIMERS
2491 }
2492# endif /* E1K_USE_RX_TIMERS */
2493# endif /* E1K_WITH_RXD_CACHE */
2494
2495 return VINF_SUCCESS;
2496#else /* !IN_RING3 */
2497 RT_NOREF_PV(pThis); RT_NOREF_PV(pvBuf); RT_NOREF_PV(cb); RT_NOREF_PV(status);
2498 return VERR_INTERNAL_ERROR_2;
2499#endif /* !IN_RING3 */
2500}
2501
2502
2503/**
2504 * Bring the link up after the configured delay, 5 seconds by default.
2505 *
2506 * @param pThis The device state structure.
2507 * @thread any
2508 */
2509DECLINLINE(void) e1kBringLinkUpDelayed(PE1KSTATE pThis)
2510{
2511 E1kLog(("%s Will bring up the link in %d seconds...\n",
2512 pThis->szPrf, pThis->cMsLinkUpDelay / 1000));
2513 e1kArmTimer(pThis, pThis->CTX_SUFF(pLUTimer), pThis->cMsLinkUpDelay * 1000);
2514}
2515
2516#ifdef IN_RING3
2517/**
2518 * Bring up the link immediately.
2519 *
2520 * @param pThis The device state structure.
2521 */
2522DECLINLINE(void) e1kR3LinkUp(PE1KSTATE pThis)
2523{
2524 E1kLog(("%s Link is up\n", pThis->szPrf));
2525 STATUS |= STATUS_LU;
2526 Phy::setLinkStatus(&pThis->phy, true);
2527 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_LSC);
2528 if (pThis->pDrvR3)
2529 pThis->pDrvR3->pfnNotifyLinkChanged(pThis->pDrvR3, PDMNETWORKLINKSTATE_UP);
2530}
2531
2532/**
2533 * Bring down the link immediately.
2534 *
2535 * @param pThis The device state structure.
2536 */
2537DECLINLINE(void) e1kR3LinkDown(PE1KSTATE pThis)
2538{
2539 E1kLog(("%s Link is down\n", pThis->szPrf));
2540 STATUS &= ~STATUS_LU;
2541 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_LSC);
2542 if (pThis->pDrvR3)
2543 pThis->pDrvR3->pfnNotifyLinkChanged(pThis->pDrvR3, PDMNETWORKLINKSTATE_DOWN);
2544}
2545
2546/**
2547 * Bring down the link temporarily.
2548 *
2549 * @param pThis The device state structure.
2550 */
2551DECLINLINE(void) e1kR3LinkDownTemp(PE1KSTATE pThis)
2552{
2553 E1kLog(("%s Link is down temporarily\n", pThis->szPrf));
2554 STATUS &= ~STATUS_LU;
2555 Phy::setLinkStatus(&pThis->phy, false);
2556 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_LSC);
2557 /*
2558 * Notifying the associated driver that the link went down (even temporarily)
2559 * seems to be the right thing, but it was not done before. This may cause
2560 * a regression if the driver does not expect the link to go down as a result
2561 * of sending PDMNETWORKLINKSTATE_DOWN_RESUME to this device. Earlier versions
2562 * of code notified the driver that the link was up! See @bugref{7057}.
2563 */
2564 if (pThis->pDrvR3)
2565 pThis->pDrvR3->pfnNotifyLinkChanged(pThis->pDrvR3, PDMNETWORKLINKSTATE_DOWN);
2566 e1kBringLinkUpDelayed(pThis);
2567}
2568#endif /* IN_RING3 */
2569
2570#if 0 /* unused */
2571/**
2572 * Read handler for Device Status register.
2573 *
2574 * Get the link status from PHY.
2575 *
2576 * @returns VBox status code.
2577 *
2578 * @param pThis The device state structure.
2579 * @param offset Register offset in memory-mapped frame.
2580 * @param index Register index in register array.
2581 * @param mask Used to implement partial reads (8 and 16-bit).
2582 */
2583static int e1kRegReadCTRL(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
2584{
2585 E1kLog(("%s e1kRegReadCTRL: mdio dir=%s mdc dir=%s mdc=%d\n",
2586 pThis->szPrf, (CTRL & CTRL_MDIO_DIR)?"OUT":"IN ",
2587 (CTRL & CTRL_MDC_DIR)?"OUT":"IN ", !!(CTRL & CTRL_MDC)));
2588 if ((CTRL & CTRL_MDIO_DIR) == 0 && (CTRL & CTRL_MDC))
2589 {
2590 /* MDC is high and MDIO pin is used for input, read MDIO pin from PHY */
2591 if (Phy::readMDIO(&pThis->phy))
2592 *pu32Value = CTRL | CTRL_MDIO;
2593 else
2594 *pu32Value = CTRL & ~CTRL_MDIO;
2595 E1kLog(("%s e1kRegReadCTRL: Phy::readMDIO(%d)\n",
2596 pThis->szPrf, !!(*pu32Value & CTRL_MDIO)));
2597 }
2598 else
2599 {
2600 /* MDIO pin is used for output, ignore it */
2601 *pu32Value = CTRL;
2602 }
2603 return VINF_SUCCESS;
2604}
2605#endif /* unused */
2606
2607/**
2608 * Write handler for Device Control register.
2609 *
2610 * Handles reset.
2611 *
2612 * @param pThis The device state structure.
2613 * @param offset Register offset in memory-mapped frame.
2614 * @param index Register index in register array.
2615 * @param value The value to store.
2616 * @param mask Used to implement partial writes (8 and 16-bit).
2617 * @thread EMT
2618 */
2619static int e1kRegWriteCTRL(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
2620{
2621 int rc = VINF_SUCCESS;
2622
2623 if (value & CTRL_RESET)
2624 { /* RST */
2625#ifndef IN_RING3
2626 return VINF_IOM_R3_MMIO_WRITE;
2627#else
2628 e1kHardReset(pThis);
2629#endif
2630 }
2631 else
2632 {
2633 if ( (value & CTRL_SLU)
2634 && pThis->fCableConnected
2635 && !(STATUS & STATUS_LU))
2636 {
2637 /* The driver indicates that we should bring up the link */
2638 /* Do so in 5 seconds (by default). */
2639 e1kBringLinkUpDelayed(pThis);
2640 /*
2641 * Change the status (but not PHY status) anyway as Windows expects
2642 * it for 82543GC.
2643 */
2644 STATUS |= STATUS_LU;
2645 }
2646 if (value & CTRL_VME)
2647 {
2648 E1kLog(("%s VLAN Mode Enabled\n", pThis->szPrf));
2649 }
2650 E1kLog(("%s e1kRegWriteCTRL: mdio dir=%s mdc dir=%s mdc=%s mdio=%d\n",
2651 pThis->szPrf, (value & CTRL_MDIO_DIR)?"OUT":"IN ",
2652 (value & CTRL_MDC_DIR)?"OUT":"IN ", (value & CTRL_MDC)?"HIGH":"LOW ", !!(value & CTRL_MDIO)));
2653 if (value & CTRL_MDC)
2654 {
2655 if (value & CTRL_MDIO_DIR)
2656 {
2657 E1kLog(("%s e1kRegWriteCTRL: Phy::writeMDIO(%d)\n", pThis->szPrf, !!(value & CTRL_MDIO)));
2658 /* MDIO direction pin is set to output and MDC is high, write MDIO pin value to PHY */
2659 Phy::writeMDIO(&pThis->phy, !!(value & CTRL_MDIO));
2660 }
2661 else
2662 {
2663 if (Phy::readMDIO(&pThis->phy))
2664 value |= CTRL_MDIO;
2665 else
2666 value &= ~CTRL_MDIO;
2667 E1kLog(("%s e1kRegWriteCTRL: Phy::readMDIO(%d)\n",
2668 pThis->szPrf, !!(value & CTRL_MDIO)));
2669 }
2670 }
2671 rc = e1kRegWriteDefault(pThis, offset, index, value);
2672 }
2673
2674 return rc;
2675}
2676
2677/**
2678 * Write handler for EEPROM/Flash Control/Data register.
2679 *
2680 * Handles EEPROM access requests; forwards writes to EEPROM device if access has been granted.
2681 *
2682 * @param pThis The device state structure.
2683 * @param offset Register offset in memory-mapped frame.
2684 * @param index Register index in register array.
2685 * @param value The value to store.
2686 * @param mask Used to implement partial writes (8 and 16-bit).
2687 * @thread EMT
2688 */
2689static int e1kRegWriteEECD(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
2690{
2691#ifdef IN_RING3
2692 /* So far we are concerned with lower byte only */
2693 if ((EECD & EECD_EE_GNT) || pThis->eChip == E1K_CHIP_82543GC)
2694 {
2695 /* Access to EEPROM granted -- forward 4-wire bits to EEPROM device */
2696 /* Note: 82543GC does not need to request EEPROM access */
2697 STAM_PROFILE_ADV_START(&pThis->StatEEPROMWrite, a);
2698 pThis->eeprom.write(value & EECD_EE_WIRES);
2699 STAM_PROFILE_ADV_STOP(&pThis->StatEEPROMWrite, a);
2700 }
2701 if (value & EECD_EE_REQ)
2702 EECD |= EECD_EE_REQ|EECD_EE_GNT;
2703 else
2704 EECD &= ~EECD_EE_GNT;
2705 //e1kRegWriteDefault(pThis, offset, index, value );
2706
2707 return VINF_SUCCESS;
2708#else /* !IN_RING3 */
2709 RT_NOREF_PV(pThis); RT_NOREF_PV(offset); RT_NOREF_PV(index); RT_NOREF_PV(value);
2710 return VINF_IOM_R3_MMIO_WRITE;
2711#endif /* !IN_RING3 */
2712}
2713
2714/**
2715 * Read handler for EEPROM/Flash Control/Data register.
2716 *
2717 * Lower 4 bits come from EEPROM device if EEPROM access has been granted.
2718 *
2719 * @returns VBox status code.
2720 *
2721 * @param pThis The device state structure.
2722 * @param offset Register offset in memory-mapped frame.
2723 * @param index Register index in register array.
2724 * @param mask Used to implement partial reads (8 and 16-bit).
2725 * @thread EMT
2726 */
2727static int e1kRegReadEECD(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
2728{
2729#ifdef IN_RING3
2730 uint32_t value;
2731 int rc = e1kRegReadDefault(pThis, offset, index, &value);
2732 if (RT_SUCCESS(rc))
2733 {
2734 if ((value & EECD_EE_GNT) || pThis->eChip == E1K_CHIP_82543GC)
2735 {
2736 /* Note: 82543GC does not need to request EEPROM access */
2737 /* Access to EEPROM granted -- get 4-wire bits to EEPROM device */
2738 STAM_PROFILE_ADV_START(&pThis->StatEEPROMRead, a);
2739 value |= pThis->eeprom.read();
2740 STAM_PROFILE_ADV_STOP(&pThis->StatEEPROMRead, a);
2741 }
2742 *pu32Value = value;
2743 }
2744
2745 return rc;
2746#else /* !IN_RING3 */
2747 RT_NOREF_PV(pThis); RT_NOREF_PV(offset); RT_NOREF_PV(index); RT_NOREF_PV(pu32Value);
2748 return VINF_IOM_R3_MMIO_READ;
2749#endif /* !IN_RING3 */
2750}
2751
2752/**
2753 * Write handler for EEPROM Read register.
2754 *
2755 * Handles EEPROM word access requests, reads EEPROM and stores the result
2756 * into DATA field.
2757 *
2758 * @param pThis The device state structure.
2759 * @param offset Register offset in memory-mapped frame.
2760 * @param index Register index in register array.
2761 * @param value The value to store.
2762 * @param mask Used to implement partial writes (8 and 16-bit).
2763 * @thread EMT
2764 */
2765static int e1kRegWriteEERD(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
2766{
2767#ifdef IN_RING3
2768 /* Make use of 'writable' and 'readable' masks. */
2769 e1kRegWriteDefault(pThis, offset, index, value);
2770 /* DONE and DATA are set only if read was triggered by START. */
2771 if (value & EERD_START)
2772 {
2773 uint16_t tmp;
2774 STAM_PROFILE_ADV_START(&pThis->StatEEPROMRead, a);
2775 if (pThis->eeprom.readWord(GET_BITS_V(value, EERD, ADDR), &tmp))
2776 SET_BITS(EERD, DATA, tmp);
2777 EERD |= EERD_DONE;
2778 STAM_PROFILE_ADV_STOP(&pThis->StatEEPROMRead, a);
2779 }
2780
2781 return VINF_SUCCESS;
2782#else /* !IN_RING3 */
2783 RT_NOREF_PV(pThis); RT_NOREF_PV(offset); RT_NOREF_PV(index); RT_NOREF_PV(value);
2784 return VINF_IOM_R3_MMIO_WRITE;
2785#endif /* !IN_RING3 */
2786}
2787
2788
2789/**
2790 * Write handler for MDI Control register.
2791 *
2792 * Handles PHY read/write requests; forwards requests to internal PHY device.
2793 *
2794 * @param pThis The device state structure.
2795 * @param offset Register offset in memory-mapped frame.
2796 * @param index Register index in register array.
2797 * @param value The value to store.
2798 * @param mask Used to implement partial writes (8 and 16-bit).
2799 * @thread EMT
2800 */
2801static int e1kRegWriteMDIC(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
2802{
2803 if (value & MDIC_INT_EN)
2804 {
2805 E1kLog(("%s ERROR! Interrupt at the end of an MDI cycle is not supported yet.\n",
2806 pThis->szPrf));
2807 }
2808 else if (value & MDIC_READY)
2809 {
2810 E1kLog(("%s ERROR! Ready bit is not reset by software during write operation.\n",
2811 pThis->szPrf));
2812 }
2813 else if (GET_BITS_V(value, MDIC, PHY) != 1)
2814 {
2815 E1kLog(("%s WARNING! Access to invalid PHY detected, phy=%d.\n",
2816 pThis->szPrf, GET_BITS_V(value, MDIC, PHY)));
2817 /*
2818 * Some drivers scan the MDIO bus for a PHY. We can work with these
2819 * drivers if we set MDIC_READY and MDIC_ERROR when there isn't a PHY
2820 * at the requested address, see @bugref{7346}.
2821 */
2822 MDIC = MDIC_READY | MDIC_ERROR;
2823 }
2824 else
2825 {
2826 /* Store the value */
2827 e1kRegWriteDefault(pThis, offset, index, value);
2828 STAM_COUNTER_INC(&pThis->StatPHYAccesses);
2829 /* Forward op to PHY */
2830 if (value & MDIC_OP_READ)
2831 SET_BITS(MDIC, DATA, Phy::readRegister(&pThis->phy, GET_BITS_V(value, MDIC, REG)));
2832 else
2833 Phy::writeRegister(&pThis->phy, GET_BITS_V(value, MDIC, REG), value & MDIC_DATA_MASK);
2834 /* Let software know that we are done */
2835 MDIC |= MDIC_READY;
2836 }
2837
2838 return VINF_SUCCESS;
2839}
2840
2841/**
2842 * Write handler for Interrupt Cause Read register.
2843 *
2844 * Bits corresponding to 1s in 'value' will be cleared in ICR register.
2845 *
2846 * @param pThis The device state structure.
2847 * @param offset Register offset in memory-mapped frame.
2848 * @param index Register index in register array.
2849 * @param value The value to store.
2850 * @param mask Used to implement partial writes (8 and 16-bit).
2851 * @thread EMT
2852 */
2853static int e1kRegWriteICR(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
2854{
2855 ICR &= ~value;
2856
2857 RT_NOREF_PV(pThis); RT_NOREF_PV(offset); RT_NOREF_PV(index);
2858 return VINF_SUCCESS;
2859}
2860
2861/**
2862 * Read handler for Interrupt Cause Read register.
2863 *
2864 * Reading this register acknowledges all interrupts.
2865 *
2866 * @returns VBox status code.
2867 *
2868 * @param pThis The device state structure.
2869 * @param offset Register offset in memory-mapped frame.
2870 * @param index Register index in register array.
2871 * @param mask Not used.
2872 * @thread EMT
2873 */
2874static int e1kRegReadICR(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
2875{
2876 int rc = e1kCsEnter(pThis, VINF_IOM_R3_MMIO_READ);
2877 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2878 return rc;
2879
2880 uint32_t value = 0;
2881 rc = e1kRegReadDefault(pThis, offset, index, &value);
2882 if (RT_SUCCESS(rc))
2883 {
2884 /* Do not return masked bits. */
2885 value &= IMS;
2886 if (value)
2887 {
2888 /*
2889 * Not clearing ICR causes QNX to hang as it reads ICR in a loop
2890 * with disabled interrupts.
2891 */
2892 //if (IMS)
2893 if (1)
2894 {
2895 /*
2896 * Interrupts were enabled -- we are supposedly at the very
2897 * beginning of interrupt handler
2898 */
2899 E1kLogRel(("E1000: irq lowered, icr=0x%x\n", ICR));
2900 E1kLog(("%s e1kRegReadICR: Lowered IRQ (%08x)\n", pThis->szPrf, ICR));
2901 /* Clear all pending interrupts */
2902 ICR = 0;
2903 pThis->fIntRaised = false;
2904 /* Lower(0) INTA(0) */
2905 PDMDevHlpPCISetIrq(pThis->CTX_SUFF(pDevIns), 0, 0);
2906
2907 pThis->u64AckedAt = TMTimerGet(pThis->CTX_SUFF(pIntTimer));
2908 if (pThis->fIntMaskUsed)
2909 pThis->fDelayInts = true;
2910 }
2911 else
2912 {
2913 /*
2914 * Interrupts are disabled -- in windows guests ICR read is done
2915 * just before re-enabling interrupts
2916 */
2917 E1kLog(("%s e1kRegReadICR: Suppressing auto-clear due to disabled interrupts (%08x)\n", pThis->szPrf, ICR));
2918 }
2919 }
2920 *pu32Value = value;
2921 }
2922 e1kCsLeave(pThis);
2923
2924 return rc;
2925}
2926
2927/**
2928 * Write handler for Interrupt Cause Set register.
2929 *
2930 * Bits corresponding to 1s in 'value' will be set in ICR register.
2931 *
2932 * @param pThis The device state structure.
2933 * @param offset Register offset in memory-mapped frame.
2934 * @param index Register index in register array.
2935 * @param value The value to store.
2936 * @param mask Used to implement partial writes (8 and 16-bit).
2937 * @thread EMT
2938 */
2939static int e1kRegWriteICS(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
2940{
2941 RT_NOREF_PV(offset); RT_NOREF_PV(index);
2942 E1K_INC_ISTAT_CNT(pThis->uStatIntICS);
2943 return e1kRaiseInterrupt(pThis, VINF_IOM_R3_MMIO_WRITE, value & g_aE1kRegMap[ICS_IDX].writable);
2944}
2945
2946/**
2947 * Write handler for Interrupt Mask Set register.
2948 *
2949 * Will trigger pending interrupts.
2950 *
2951 * @param pThis The device state structure.
2952 * @param offset Register offset in memory-mapped frame.
2953 * @param index Register index in register array.
2954 * @param value The value to store.
2955 * @param mask Used to implement partial writes (8 and 16-bit).
2956 * @thread EMT
2957 */
2958static int e1kRegWriteIMS(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
2959{
2960 RT_NOREF_PV(offset); RT_NOREF_PV(index);
2961
2962 IMS |= value;
2963 E1kLogRel(("E1000: irq enabled, RDH=%x RDT=%x TDH=%x TDT=%x\n", RDH, RDT, TDH, TDT));
2964 E1kLog(("%s e1kRegWriteIMS: IRQ enabled\n", pThis->szPrf));
2965 e1kRaiseInterrupt(pThis, VINF_IOM_R3_MMIO_WRITE, 0);
2966
2967 return VINF_SUCCESS;
2968}
2969
2970/**
2971 * Write handler for Interrupt Mask Clear register.
2972 *
2973 * Bits corresponding to 1s in 'value' will be cleared in IMS register.
2974 *
2975 * @param pThis The device state structure.
2976 * @param offset Register offset in memory-mapped frame.
2977 * @param index Register index in register array.
2978 * @param value The value to store.
2979 * @param mask Used to implement partial writes (8 and 16-bit).
2980 * @thread EMT
2981 */
2982static int e1kRegWriteIMC(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
2983{
2984 RT_NOREF_PV(offset); RT_NOREF_PV(index);
2985
2986 int rc = e1kCsEnter(pThis, VINF_IOM_R3_MMIO_WRITE);
2987 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2988 return rc;
2989 if (pThis->fIntRaised)
2990 {
2991 /*
2992 * Technically we should reset fIntRaised in ICR read handler, but it will cause
2993 * Windows to freeze since it may receive an interrupt while still in the very beginning
2994 * of interrupt handler.
2995 */
2996 E1K_INC_ISTAT_CNT(pThis->uStatIntLower);
2997 STAM_COUNTER_INC(&pThis->StatIntsPrevented);
2998 E1kLogRel(("E1000: irq lowered (IMC), icr=0x%x\n", ICR));
2999 /* Lower(0) INTA(0) */
3000 PDMDevHlpPCISetIrq(pThis->CTX_SUFF(pDevIns), 0, 0);
3001 pThis->fIntRaised = false;
3002 E1kLog(("%s e1kRegWriteIMC: Lowered IRQ: ICR=%08x\n", pThis->szPrf, ICR));
3003 }
3004 IMS &= ~value;
3005 E1kLog(("%s e1kRegWriteIMC: IRQ disabled\n", pThis->szPrf));
3006 e1kCsLeave(pThis);
3007
3008 return VINF_SUCCESS;
3009}
3010
3011/**
3012 * Write handler for Receive Control register.
3013 *
3014 * @param pThis The device state structure.
3015 * @param offset Register offset in memory-mapped frame.
3016 * @param index Register index in register array.
3017 * @param value The value to store.
3018 * @param mask Used to implement partial writes (8 and 16-bit).
3019 * @thread EMT
3020 */
3021static int e1kRegWriteRCTL(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3022{
3023 /* Update promiscuous mode */
3024 bool fBecomePromiscous = !!(value & (RCTL_UPE | RCTL_MPE));
3025 if (fBecomePromiscous != !!( RCTL & (RCTL_UPE | RCTL_MPE)))
3026 {
3027 /* Promiscuity has changed, pass the knowledge on. */
3028#ifndef IN_RING3
3029 return VINF_IOM_R3_MMIO_WRITE;
3030#else
3031 if (pThis->pDrvR3)
3032 pThis->pDrvR3->pfnSetPromiscuousMode(pThis->pDrvR3, fBecomePromiscous);
3033#endif
3034 }
3035
3036 /* Adjust receive buffer size */
3037 unsigned cbRxBuf = 2048 >> GET_BITS_V(value, RCTL, BSIZE);
3038 if (value & RCTL_BSEX)
3039 cbRxBuf *= 16;
3040 if (cbRxBuf != pThis->u16RxBSize)
3041 E1kLog2(("%s e1kRegWriteRCTL: Setting receive buffer size to %d (old %d)\n",
3042 pThis->szPrf, cbRxBuf, pThis->u16RxBSize));
3043 pThis->u16RxBSize = cbRxBuf;
3044
3045 /* Update the register */
3046 e1kRegWriteDefault(pThis, offset, index, value);
3047
3048 return VINF_SUCCESS;
3049}
3050
3051/**
3052 * Write handler for Packet Buffer Allocation register.
3053 *
3054 * TXA = 64 - RXA.
3055 *
3056 * @param pThis The device state structure.
3057 * @param offset Register offset in memory-mapped frame.
3058 * @param index Register index in register array.
3059 * @param value The value to store.
3060 * @param mask Used to implement partial writes (8 and 16-bit).
3061 * @thread EMT
3062 */
3063static int e1kRegWritePBA(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3064{
3065 e1kRegWriteDefault(pThis, offset, index, value);
3066 PBA_st->txa = 64 - PBA_st->rxa;
3067
3068 return VINF_SUCCESS;
3069}
3070
3071/**
3072 * Write handler for Receive Descriptor Tail register.
3073 *
3074 * @remarks Write into RDT forces switch to HC and signal to
3075 * e1kR3NetworkDown_WaitReceiveAvail().
3076 *
3077 * @returns VBox status code.
3078 *
3079 * @param pThis The device state structure.
3080 * @param offset Register offset in memory-mapped frame.
3081 * @param index Register index in register array.
3082 * @param value The value to store.
3083 * @param mask Used to implement partial writes (8 and 16-bit).
3084 * @thread EMT
3085 */
3086static int e1kRegWriteRDT(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3087{
3088#ifndef IN_RING3
3089 /* XXX */
3090// return VINF_IOM_R3_MMIO_WRITE;
3091#endif
3092 int rc = e1kCsRxEnter(pThis, VINF_IOM_R3_MMIO_WRITE);
3093 if (RT_LIKELY(rc == VINF_SUCCESS))
3094 {
3095 E1kLog(("%s e1kRegWriteRDT\n", pThis->szPrf));
3096 /*
3097 * Some drivers advance RDT too far, so that it equals RDH. This
3098 * somehow manages to work with real hardware but not with this
3099 * emulated device. We can work with these drivers if we just
3100 * write 1 less when we see a driver writing RDT equal to RDH,
3101 * see @bugref{7346}.
3102 */
3103 if (value == RDH)
3104 {
3105 if (RDH == 0)
3106 value = (RDLEN / sizeof(E1KRXDESC)) - 1;
3107 else
3108 value = RDH - 1;
3109 }
3110 rc = e1kRegWriteDefault(pThis, offset, index, value);
3111#ifdef E1K_WITH_RXD_CACHE
3112 /*
3113 * We need to fetch descriptors now as RDT may go whole circle
3114 * before we attempt to store a received packet. For example,
3115 * Intel's DOS drivers use 2 (!) RX descriptors with the total ring
3116 * size being only 8 descriptors! Note that we fetch descriptors
3117 * only when the cache is empty to reduce the number of memory reads
3118 * in case of frequent RDT writes. Don't fetch anything when the
3119 * receiver is disabled either as RDH, RDT, RDLEN can be in some
3120 * messed up state.
3121 * Note that despite the cache may seem empty, meaning that there are
3122 * no more available descriptors in it, it may still be used by RX
3123 * thread which has not yet written the last descriptor back but has
3124 * temporarily released the RX lock in order to write the packet body
3125 * to descriptor's buffer. At this point we still going to do prefetch
3126 * but it won't actually fetch anything if there are no unused slots in
3127 * our "empty" cache (nRxDFetched==E1K_RXD_CACHE_SIZE). We must not
3128 * reset the cache here even if it appears empty. It will be reset at
3129 * a later point in e1kRxDGet().
3130 */
3131 if (e1kRxDIsCacheEmpty(pThis) && (RCTL & RCTL_EN))
3132 e1kRxDPrefetch(pThis);
3133#endif /* E1K_WITH_RXD_CACHE */
3134 e1kCsRxLeave(pThis);
3135 if (RT_SUCCESS(rc))
3136 {
3137/** @todo bird: Use SUPSem* for this so we can signal it in ring-0 as well
3138 * without requiring any context switches. We should also check the
3139 * wait condition before bothering to queue the item as we're currently
3140 * queuing thousands of items per second here in a normal transmit
3141 * scenario. Expect performance changes when fixing this! */
3142#ifdef IN_RING3
3143 /* Signal that we have more receive descriptors available. */
3144 e1kWakeupReceive(pThis->CTX_SUFF(pDevIns));
3145#else
3146 PPDMQUEUEITEMCORE pItem = PDMQueueAlloc(pThis->CTX_SUFF(pCanRxQueue));
3147 if (pItem)
3148 PDMQueueInsert(pThis->CTX_SUFF(pCanRxQueue), pItem);
3149#endif
3150 }
3151 }
3152 return rc;
3153}
3154
3155/**
3156 * Write handler for Receive Delay Timer register.
3157 *
3158 * @param pThis The device state structure.
3159 * @param offset Register offset in memory-mapped frame.
3160 * @param index Register index in register array.
3161 * @param value The value to store.
3162 * @param mask Used to implement partial writes (8 and 16-bit).
3163 * @thread EMT
3164 */
3165static int e1kRegWriteRDTR(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3166{
3167 e1kRegWriteDefault(pThis, offset, index, value);
3168 if (value & RDTR_FPD)
3169 {
3170 /* Flush requested, cancel both timers and raise interrupt */
3171#ifdef E1K_USE_RX_TIMERS
3172 e1kCancelTimer(pThis, pThis->CTX_SUFF(pRIDTimer));
3173 e1kCancelTimer(pThis, pThis->CTX_SUFF(pRADTimer));
3174#endif
3175 E1K_INC_ISTAT_CNT(pThis->uStatIntRDTR);
3176 return e1kRaiseInterrupt(pThis, VINF_IOM_R3_MMIO_WRITE, ICR_RXT0);
3177 }
3178
3179 return VINF_SUCCESS;
3180}
3181
3182DECLINLINE(uint32_t) e1kGetTxLen(PE1KSTATE pThis)
3183{
3184 /**
3185 * Make sure TDT won't change during computation. EMT may modify TDT at
3186 * any moment.
3187 */
3188 uint32_t tdt = TDT;
3189 return (TDH>tdt ? TDLEN/sizeof(E1KTXDESC) : 0) + tdt - TDH;
3190}
3191
3192#ifdef IN_RING3
3193#ifdef E1K_TX_DELAY
3194
3195/**
3196 * Transmit Delay Timer handler.
3197 *
3198 * @remarks We only get here when the timer expires.
3199 *
3200 * @param pDevIns Pointer to device instance structure.
3201 * @param pTimer Pointer to the timer.
3202 * @param pvUser NULL.
3203 * @thread EMT
3204 */
3205static DECLCALLBACK(void) e1kTxDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3206{
3207 PE1KSTATE pThis = (PE1KSTATE )pvUser;
3208 Assert(PDMCritSectIsOwner(&pThis->csTx));
3209
3210 E1K_INC_ISTAT_CNT(pThis->uStatTxDelayExp);
3211#ifdef E1K_INT_STATS
3212 uint64_t u64Elapsed = RTTimeNanoTS() - pThis->u64ArmedAt;
3213 if (u64Elapsed > pThis->uStatMaxTxDelay)
3214 pThis->uStatMaxTxDelay = u64Elapsed;
3215#endif
3216 int rc = e1kXmitPending(pThis, false /*fOnWorkerThread*/);
3217 AssertMsg(RT_SUCCESS(rc) || rc == VERR_TRY_AGAIN, ("%Rrc\n", rc));
3218}
3219#endif /* E1K_TX_DELAY */
3220
3221#ifdef E1K_USE_TX_TIMERS
3222
3223/**
3224 * Transmit Interrupt Delay Timer handler.
3225 *
3226 * @remarks We only get here when the timer expires.
3227 *
3228 * @param pDevIns Pointer to device instance structure.
3229 * @param pTimer Pointer to the timer.
3230 * @param pvUser NULL.
3231 * @thread EMT
3232 */
3233static DECLCALLBACK(void) e1kTxIntDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3234{
3235 PE1KSTATE pThis = (PE1KSTATE )pvUser;
3236
3237 E1K_INC_ISTAT_CNT(pThis->uStatTID);
3238 /* Cancel absolute delay timer as we have already got attention */
3239#ifndef E1K_NO_TAD
3240 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTADTimer));
3241#endif /* E1K_NO_TAD */
3242 e1kRaiseInterrupt(pThis, ICR_TXDW);
3243}
3244
3245/**
3246 * Transmit Absolute Delay Timer handler.
3247 *
3248 * @remarks We only get here when the timer expires.
3249 *
3250 * @param pDevIns Pointer to device instance structure.
3251 * @param pTimer Pointer to the timer.
3252 * @param pvUser NULL.
3253 * @thread EMT
3254 */
3255static DECLCALLBACK(void) e1kTxAbsDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3256{
3257 PE1KSTATE pThis = (PE1KSTATE )pvUser;
3258
3259 E1K_INC_ISTAT_CNT(pThis->uStatTAD);
3260 /* Cancel interrupt delay timer as we have already got attention */
3261 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTIDTimer));
3262 e1kRaiseInterrupt(pThis, ICR_TXDW);
3263}
3264
3265#endif /* E1K_USE_TX_TIMERS */
3266#ifdef E1K_USE_RX_TIMERS
3267
3268/**
3269 * Receive Interrupt Delay Timer handler.
3270 *
3271 * @remarks We only get here when the timer expires.
3272 *
3273 * @param pDevIns Pointer to device instance structure.
3274 * @param pTimer Pointer to the timer.
3275 * @param pvUser NULL.
3276 * @thread EMT
3277 */
3278static DECLCALLBACK(void) e1kRxIntDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3279{
3280 PE1KSTATE pThis = (PE1KSTATE )pvUser;
3281
3282 E1K_INC_ISTAT_CNT(pThis->uStatRID);
3283 /* Cancel absolute delay timer as we have already got attention */
3284 e1kCancelTimer(pThis, pThis->CTX_SUFF(pRADTimer));
3285 e1kRaiseInterrupt(pThis, ICR_RXT0);
3286}
3287
3288/**
3289 * Receive Absolute Delay Timer handler.
3290 *
3291 * @remarks We only get here when the timer expires.
3292 *
3293 * @param pDevIns Pointer to device instance structure.
3294 * @param pTimer Pointer to the timer.
3295 * @param pvUser NULL.
3296 * @thread EMT
3297 */
3298static DECLCALLBACK(void) e1kRxAbsDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3299{
3300 PE1KSTATE pThis = (PE1KSTATE )pvUser;
3301
3302 E1K_INC_ISTAT_CNT(pThis->uStatRAD);
3303 /* Cancel interrupt delay timer as we have already got attention */
3304 e1kCancelTimer(pThis, pThis->CTX_SUFF(pRIDTimer));
3305 e1kRaiseInterrupt(pThis, ICR_RXT0);
3306}
3307
3308#endif /* E1K_USE_RX_TIMERS */
3309
3310/**
3311 * Late Interrupt Timer handler.
3312 *
3313 * @param pDevIns Pointer to device instance structure.
3314 * @param pTimer Pointer to the timer.
3315 * @param pvUser NULL.
3316 * @thread EMT
3317 */
3318static DECLCALLBACK(void) e1kLateIntTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3319{
3320 PE1KSTATE pThis = (PE1KSTATE )pvUser;
3321
3322 STAM_PROFILE_ADV_START(&pThis->StatLateIntTimer, a);
3323 STAM_COUNTER_INC(&pThis->StatLateInts);
3324 E1K_INC_ISTAT_CNT(pThis->uStatIntLate);
3325#if 0
3326 if (pThis->iStatIntLost > -100)
3327 pThis->iStatIntLost--;
3328#endif
3329 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, 0);
3330 STAM_PROFILE_ADV_STOP(&pThis->StatLateIntTimer, a);
3331}
3332
3333/**
3334 * Link Up Timer handler.
3335 *
3336 * @param pDevIns Pointer to device instance structure.
3337 * @param pTimer Pointer to the timer.
3338 * @param pvUser NULL.
3339 * @thread EMT
3340 */
3341static DECLCALLBACK(void) e1kLinkUpTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3342{
3343 PE1KSTATE pThis = (PE1KSTATE )pvUser;
3344
3345 /*
3346 * This can happen if we set the link status to down when the Link up timer was
3347 * already armed (shortly after e1kLoadDone() or when the cable was disconnected
3348 * and connect+disconnect the cable very quick.
3349 */
3350 if (!pThis->fCableConnected)
3351 return;
3352
3353 e1kR3LinkUp(pThis);
3354}
3355
3356#endif /* IN_RING3 */
3357
3358/**
3359 * Sets up the GSO context according to the TSE new context descriptor.
3360 *
3361 * @param pGso The GSO context to setup.
3362 * @param pCtx The context descriptor.
3363 */
3364DECLINLINE(void) e1kSetupGsoCtx(PPDMNETWORKGSO pGso, E1KTXCTX const *pCtx)
3365{
3366 pGso->u8Type = PDMNETWORKGSOTYPE_INVALID;
3367
3368 /*
3369 * See if the context descriptor describes something that could be TCP or
3370 * UDP over IPv[46].
3371 */
3372 /* Check the header ordering and spacing: 1. Ethernet, 2. IP, 3. TCP/UDP. */
3373 if (RT_UNLIKELY( pCtx->ip.u8CSS < sizeof(RTNETETHERHDR) ))
3374 {
3375 E1kLog(("e1kSetupGsoCtx: IPCSS=%#x\n", pCtx->ip.u8CSS));
3376 return;
3377 }
3378 if (RT_UNLIKELY( pCtx->tu.u8CSS < (size_t)pCtx->ip.u8CSS + (pCtx->dw2.fIP ? RTNETIPV4_MIN_LEN : RTNETIPV6_MIN_LEN) ))
3379 {
3380 E1kLog(("e1kSetupGsoCtx: TUCSS=%#x\n", pCtx->tu.u8CSS));
3381 return;
3382 }
3383 if (RT_UNLIKELY( pCtx->dw2.fTCP
3384 ? pCtx->dw3.u8HDRLEN < (size_t)pCtx->tu.u8CSS + RTNETTCP_MIN_LEN
3385 : pCtx->dw3.u8HDRLEN != (size_t)pCtx->tu.u8CSS + RTNETUDP_MIN_LEN ))
3386 {
3387 E1kLog(("e1kSetupGsoCtx: HDRLEN=%#x TCP=%d\n", pCtx->dw3.u8HDRLEN, pCtx->dw2.fTCP));
3388 return;
3389 }
3390
3391 /* The end of the TCP/UDP checksum should stop at the end of the packet or at least after the headers. */
3392 if (RT_UNLIKELY( pCtx->tu.u16CSE > 0 && pCtx->tu.u16CSE <= pCtx->dw3.u8HDRLEN ))
3393 {
3394 E1kLog(("e1kSetupGsoCtx: TUCSE=%#x HDRLEN=%#x\n", pCtx->tu.u16CSE, pCtx->dw3.u8HDRLEN));
3395 return;
3396 }
3397
3398 /* IPv4 checksum offset. */
3399 if (RT_UNLIKELY( pCtx->dw2.fIP && (size_t)pCtx->ip.u8CSO - pCtx->ip.u8CSS != RT_UOFFSETOF(RTNETIPV4, ip_sum) ))
3400 {
3401 E1kLog(("e1kSetupGsoCtx: IPCSO=%#x IPCSS=%#x\n", pCtx->ip.u8CSO, pCtx->ip.u8CSS));
3402 return;
3403 }
3404
3405 /* TCP/UDP checksum offsets. */
3406 if (RT_UNLIKELY( (size_t)pCtx->tu.u8CSO - pCtx->tu.u8CSS
3407 != ( pCtx->dw2.fTCP
3408 ? RT_UOFFSETOF(RTNETTCP, th_sum)
3409 : RT_UOFFSETOF(RTNETUDP, uh_sum) ) ))
3410 {
3411 E1kLog(("e1kSetupGsoCtx: TUCSO=%#x TUCSS=%#x TCP=%d\n", pCtx->ip.u8CSO, pCtx->ip.u8CSS, pCtx->dw2.fTCP));
3412 return;
3413 }
3414
3415 /*
3416 * Because of internal networking using a 16-bit size field for GSO context
3417 * plus frame, we have to make sure we don't exceed this.
3418 */
3419 if (RT_UNLIKELY( pCtx->dw3.u8HDRLEN + pCtx->dw2.u20PAYLEN > VBOX_MAX_GSO_SIZE ))
3420 {
3421 E1kLog(("e1kSetupGsoCtx: HDRLEN(=%#x) + PAYLEN(=%#x) = %#x, max is %#x\n",
3422 pCtx->dw3.u8HDRLEN, pCtx->dw2.u20PAYLEN, pCtx->dw3.u8HDRLEN + pCtx->dw2.u20PAYLEN, VBOX_MAX_GSO_SIZE));
3423 return;
3424 }
3425
3426 /*
3427 * We're good for now - we'll do more checks when seeing the data.
3428 * So, figure the type of offloading and setup the context.
3429 */
3430 if (pCtx->dw2.fIP)
3431 {
3432 if (pCtx->dw2.fTCP)
3433 {
3434 pGso->u8Type = PDMNETWORKGSOTYPE_IPV4_TCP;
3435 pGso->cbHdrsSeg = pCtx->dw3.u8HDRLEN;
3436 }
3437 else
3438 {
3439 pGso->u8Type = PDMNETWORKGSOTYPE_IPV4_UDP;
3440 pGso->cbHdrsSeg = pCtx->tu.u8CSS; /* IP header only */
3441 }
3442 /** @todo Detect IPv4-IPv6 tunneling (need test setup since linux doesn't do
3443 * this yet it seems)... */
3444 }
3445 else
3446 {
3447 pGso->cbHdrsSeg = pCtx->dw3.u8HDRLEN; /* @todo IPv6 UFO */
3448 if (pCtx->dw2.fTCP)
3449 pGso->u8Type = PDMNETWORKGSOTYPE_IPV6_TCP;
3450 else
3451 pGso->u8Type = PDMNETWORKGSOTYPE_IPV6_UDP;
3452 }
3453 pGso->offHdr1 = pCtx->ip.u8CSS;
3454 pGso->offHdr2 = pCtx->tu.u8CSS;
3455 pGso->cbHdrsTotal = pCtx->dw3.u8HDRLEN;
3456 pGso->cbMaxSeg = pCtx->dw3.u16MSS;
3457 Assert(PDMNetGsoIsValid(pGso, sizeof(*pGso), pGso->cbMaxSeg * 5));
3458 E1kLog2(("e1kSetupGsoCtx: mss=%#x hdr=%#x hdrseg=%#x hdr1=%#x hdr2=%#x %s\n",
3459 pGso->cbMaxSeg, pGso->cbHdrsTotal, pGso->cbHdrsSeg, pGso->offHdr1, pGso->offHdr2, PDMNetGsoTypeName((PDMNETWORKGSOTYPE)pGso->u8Type) ));
3460}
3461
3462/**
3463 * Checks if we can use GSO processing for the current TSE frame.
3464 *
3465 * @param pThis The device state structure.
3466 * @param pGso The GSO context.
3467 * @param pData The first data descriptor of the frame.
3468 * @param pCtx The TSO context descriptor.
3469 */
3470DECLINLINE(bool) e1kCanDoGso(PE1KSTATE pThis, PCPDMNETWORKGSO pGso, E1KTXDAT const *pData, E1KTXCTX const *pCtx)
3471{
3472 if (!pData->cmd.fTSE)
3473 {
3474 E1kLog2(("e1kCanDoGso: !TSE\n"));
3475 return false;
3476 }
3477 if (pData->cmd.fVLE) /** @todo VLAN tagging. */
3478 {
3479 E1kLog(("e1kCanDoGso: VLE\n"));
3480 return false;
3481 }
3482 if (RT_UNLIKELY(!pThis->fGSOEnabled))
3483 {
3484 E1kLog3(("e1kCanDoGso: GSO disabled via CFGM\n"));
3485 return false;
3486 }
3487
3488 switch ((PDMNETWORKGSOTYPE)pGso->u8Type)
3489 {
3490 case PDMNETWORKGSOTYPE_IPV4_TCP:
3491 case PDMNETWORKGSOTYPE_IPV4_UDP:
3492 if (!pData->dw3.fIXSM)
3493 {
3494 E1kLog(("e1kCanDoGso: !IXSM (IPv4)\n"));
3495 return false;
3496 }
3497 if (!pData->dw3.fTXSM)
3498 {
3499 E1kLog(("e1kCanDoGso: !TXSM (IPv4)\n"));
3500 return false;
3501 }
3502 /** @todo what more check should we perform here? Ethernet frame type? */
3503 E1kLog2(("e1kCanDoGso: OK, IPv4\n"));
3504 return true;
3505
3506 case PDMNETWORKGSOTYPE_IPV6_TCP:
3507 case PDMNETWORKGSOTYPE_IPV6_UDP:
3508 if (pData->dw3.fIXSM && pCtx->ip.u8CSO)
3509 {
3510 E1kLog(("e1kCanDoGso: IXSM (IPv6)\n"));
3511 return false;
3512 }
3513 if (!pData->dw3.fTXSM)
3514 {
3515 E1kLog(("e1kCanDoGso: TXSM (IPv6)\n"));
3516 return false;
3517 }
3518 /** @todo what more check should we perform here? Ethernet frame type? */
3519 E1kLog2(("e1kCanDoGso: OK, IPv4\n"));
3520 return true;
3521
3522 default:
3523 Assert(pGso->u8Type == PDMNETWORKGSOTYPE_INVALID);
3524 E1kLog2(("e1kCanDoGso: e1kSetupGsoCtx failed\n"));
3525 return false;
3526 }
3527}
3528
3529/**
3530 * Frees the current xmit buffer.
3531 *
3532 * @param pThis The device state structure.
3533 */
3534static void e1kXmitFreeBuf(PE1KSTATE pThis)
3535{
3536 PPDMSCATTERGATHER pSg = pThis->CTX_SUFF(pTxSg);
3537 if (pSg)
3538 {
3539 pThis->CTX_SUFF(pTxSg) = NULL;
3540
3541 if (pSg->pvAllocator != pThis)
3542 {
3543 PPDMINETWORKUP pDrv = pThis->CTX_SUFF(pDrv);
3544 if (pDrv)
3545 pDrv->pfnFreeBuf(pDrv, pSg);
3546 }
3547 else
3548 {
3549 /* loopback */
3550 AssertCompileMemberSize(E1KSTATE, uTxFallback.Sg, 8 * sizeof(size_t));
3551 Assert(pSg->fFlags == (PDMSCATTERGATHER_FLAGS_MAGIC | PDMSCATTERGATHER_FLAGS_OWNER_3));
3552 pSg->fFlags = 0;
3553 pSg->pvAllocator = NULL;
3554 }
3555 }
3556}
3557
3558#ifndef E1K_WITH_TXD_CACHE
3559/**
3560 * Allocates an xmit buffer.
3561 *
3562 * @returns See PDMINETWORKUP::pfnAllocBuf.
3563 * @param pThis The device state structure.
3564 * @param cbMin The minimum frame size.
3565 * @param fExactSize Whether cbMin is exact or if we have to max it
3566 * out to the max MTU size.
3567 * @param fGso Whether this is a GSO frame or not.
3568 */
3569DECLINLINE(int) e1kXmitAllocBuf(PE1KSTATE pThis, size_t cbMin, bool fExactSize, bool fGso)
3570{
3571 /* Adjust cbMin if necessary. */
3572 if (!fExactSize)
3573 cbMin = RT_MAX(cbMin, E1K_MAX_TX_PKT_SIZE);
3574
3575 /* Deal with existing buffer (descriptor screw up, reset, etc). */
3576 if (RT_UNLIKELY(pThis->CTX_SUFF(pTxSg)))
3577 e1kXmitFreeBuf(pThis);
3578 Assert(pThis->CTX_SUFF(pTxSg) == NULL);
3579
3580 /*
3581 * Allocate the buffer.
3582 */
3583 PPDMSCATTERGATHER pSg;
3584 if (RT_LIKELY(GET_BITS(RCTL, LBM) != RCTL_LBM_TCVR))
3585 {
3586 PPDMINETWORKUP pDrv = pThis->CTX_SUFF(pDrv);
3587 if (RT_UNLIKELY(!pDrv))
3588 return VERR_NET_DOWN;
3589 int rc = pDrv->pfnAllocBuf(pDrv, cbMin, fGso ? &pThis->GsoCtx : NULL, &pSg);
3590 if (RT_FAILURE(rc))
3591 {
3592 /* Suspend TX as we are out of buffers atm */
3593 STATUS |= STATUS_TXOFF;
3594 return rc;
3595 }
3596 }
3597 else
3598 {
3599 /* Create a loopback using the fallback buffer and preallocated SG. */
3600 AssertCompileMemberSize(E1KSTATE, uTxFallback.Sg, 8 * sizeof(size_t));
3601 pSg = &pThis->uTxFallback.Sg;
3602 pSg->fFlags = PDMSCATTERGATHER_FLAGS_MAGIC | PDMSCATTERGATHER_FLAGS_OWNER_3;
3603 pSg->cbUsed = 0;
3604 pSg->cbAvailable = 0;
3605 pSg->pvAllocator = pThis;
3606 pSg->pvUser = NULL; /* No GSO here. */
3607 pSg->cSegs = 1;
3608 pSg->aSegs[0].pvSeg = pThis->aTxPacketFallback;
3609 pSg->aSegs[0].cbSeg = sizeof(pThis->aTxPacketFallback);
3610 }
3611
3612 pThis->CTX_SUFF(pTxSg) = pSg;
3613 return VINF_SUCCESS;
3614}
3615#else /* E1K_WITH_TXD_CACHE */
3616/**
3617 * Allocates an xmit buffer.
3618 *
3619 * @returns See PDMINETWORKUP::pfnAllocBuf.
3620 * @param pThis The device state structure.
3621 * @param cbMin The minimum frame size.
3622 * @param fExactSize Whether cbMin is exact or if we have to max it
3623 * out to the max MTU size.
3624 * @param fGso Whether this is a GSO frame or not.
3625 */
3626DECLINLINE(int) e1kXmitAllocBuf(PE1KSTATE pThis, bool fGso)
3627{
3628 /* Deal with existing buffer (descriptor screw up, reset, etc). */
3629 if (RT_UNLIKELY(pThis->CTX_SUFF(pTxSg)))
3630 e1kXmitFreeBuf(pThis);
3631 Assert(pThis->CTX_SUFF(pTxSg) == NULL);
3632
3633 /*
3634 * Allocate the buffer.
3635 */
3636 PPDMSCATTERGATHER pSg;
3637 if (RT_LIKELY(GET_BITS(RCTL, LBM) != RCTL_LBM_TCVR))
3638 {
3639 if (pThis->cbTxAlloc == 0)
3640 {
3641 /* Zero packet, no need for the buffer */
3642 return VINF_SUCCESS;
3643 }
3644
3645 PPDMINETWORKUP pDrv = pThis->CTX_SUFF(pDrv);
3646 if (RT_UNLIKELY(!pDrv))
3647 return VERR_NET_DOWN;
3648 int rc = pDrv->pfnAllocBuf(pDrv, pThis->cbTxAlloc, fGso ? &pThis->GsoCtx : NULL, &pSg);
3649 if (RT_FAILURE(rc))
3650 {
3651 /* Suspend TX as we are out of buffers atm */
3652 STATUS |= STATUS_TXOFF;
3653 return rc;
3654 }
3655 E1kLog3(("%s Allocated buffer for TX packet: cb=%u %s%s\n",
3656 pThis->szPrf, pThis->cbTxAlloc,
3657 pThis->fVTag ? "VLAN " : "",
3658 pThis->fGSO ? "GSO " : ""));
3659 pThis->cbTxAlloc = 0;
3660 }
3661 else
3662 {
3663 /* Create a loopback using the fallback buffer and preallocated SG. */
3664 AssertCompileMemberSize(E1KSTATE, uTxFallback.Sg, 8 * sizeof(size_t));
3665 pSg = &pThis->uTxFallback.Sg;
3666 pSg->fFlags = PDMSCATTERGATHER_FLAGS_MAGIC | PDMSCATTERGATHER_FLAGS_OWNER_3;
3667 pSg->cbUsed = 0;
3668 pSg->cbAvailable = 0;
3669 pSg->pvAllocator = pThis;
3670 pSg->pvUser = NULL; /* No GSO here. */
3671 pSg->cSegs = 1;
3672 pSg->aSegs[0].pvSeg = pThis->aTxPacketFallback;
3673 pSg->aSegs[0].cbSeg = sizeof(pThis->aTxPacketFallback);
3674 }
3675
3676 pThis->CTX_SUFF(pTxSg) = pSg;
3677 return VINF_SUCCESS;
3678}
3679#endif /* E1K_WITH_TXD_CACHE */
3680
3681/**
3682 * Checks if it's a GSO buffer or not.
3683 *
3684 * @returns true / false.
3685 * @param pTxSg The scatter / gather buffer.
3686 */
3687DECLINLINE(bool) e1kXmitIsGsoBuf(PDMSCATTERGATHER const *pTxSg)
3688{
3689#if 0
3690 if (!pTxSg)
3691 E1kLog(("e1kXmitIsGsoBuf: pTxSG is NULL\n"));
3692 if (pTxSg && pTxSg->pvUser)
3693 E1kLog(("e1kXmitIsGsoBuf: pvUser is NULL\n"));
3694#endif
3695 return pTxSg && pTxSg->pvUser /* GSO indicator */;
3696}
3697
3698#ifndef E1K_WITH_TXD_CACHE
3699/**
3700 * Load transmit descriptor from guest memory.
3701 *
3702 * @param pThis The device state structure.
3703 * @param pDesc Pointer to descriptor union.
3704 * @param addr Physical address in guest context.
3705 * @thread E1000_TX
3706 */
3707DECLINLINE(void) e1kLoadDesc(PE1KSTATE pThis, E1KTXDESC *pDesc, RTGCPHYS addr)
3708{
3709 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns), addr, pDesc, sizeof(E1KTXDESC));
3710}
3711#else /* E1K_WITH_TXD_CACHE */
3712/**
3713 * Load transmit descriptors from guest memory.
3714 *
3715 * We need two physical reads in case the tail wrapped around the end of TX
3716 * descriptor ring.
3717 *
3718 * @returns the actual number of descriptors fetched.
3719 * @param pThis The device state structure.
3720 * @param pDesc Pointer to descriptor union.
3721 * @param addr Physical address in guest context.
3722 * @thread E1000_TX
3723 */
3724DECLINLINE(unsigned) e1kTxDLoadMore(PE1KSTATE pThis)
3725{
3726 Assert(pThis->iTxDCurrent == 0);
3727 /* We've already loaded pThis->nTxDFetched descriptors past TDH. */
3728 unsigned nDescsAvailable = e1kGetTxLen(pThis) - pThis->nTxDFetched;
3729 unsigned nDescsToFetch = RT_MIN(nDescsAvailable, E1K_TXD_CACHE_SIZE - pThis->nTxDFetched);
3730 unsigned nDescsTotal = TDLEN / sizeof(E1KTXDESC);
3731 unsigned nFirstNotLoaded = (TDH + pThis->nTxDFetched) % nDescsTotal;
3732 unsigned nDescsInSingleRead = RT_MIN(nDescsToFetch, nDescsTotal - nFirstNotLoaded);
3733 E1kLog3(("%s e1kTxDLoadMore: nDescsAvailable=%u nDescsToFetch=%u "
3734 "nDescsTotal=%u nFirstNotLoaded=0x%x nDescsInSingleRead=%u\n",
3735 pThis->szPrf, nDescsAvailable, nDescsToFetch, nDescsTotal,
3736 nFirstNotLoaded, nDescsInSingleRead));
3737 if (nDescsToFetch == 0)
3738 return 0;
3739 E1KTXDESC* pFirstEmptyDesc = &pThis->aTxDescriptors[pThis->nTxDFetched];
3740 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns),
3741 ((uint64_t)TDBAH << 32) + TDBAL + nFirstNotLoaded * sizeof(E1KTXDESC),
3742 pFirstEmptyDesc, nDescsInSingleRead * sizeof(E1KTXDESC));
3743 E1kLog3(("%s Fetched %u TX descriptors at %08x%08x(0x%x), TDLEN=%08x, TDH=%08x, TDT=%08x\n",
3744 pThis->szPrf, nDescsInSingleRead,
3745 TDBAH, TDBAL + TDH * sizeof(E1KTXDESC),
3746 nFirstNotLoaded, TDLEN, TDH, TDT));
3747 if (nDescsToFetch > nDescsInSingleRead)
3748 {
3749 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns),
3750 ((uint64_t)TDBAH << 32) + TDBAL,
3751 pFirstEmptyDesc + nDescsInSingleRead,
3752 (nDescsToFetch - nDescsInSingleRead) * sizeof(E1KTXDESC));
3753 E1kLog3(("%s Fetched %u TX descriptors at %08x%08x\n",
3754 pThis->szPrf, nDescsToFetch - nDescsInSingleRead,
3755 TDBAH, TDBAL));
3756 }
3757 pThis->nTxDFetched += nDescsToFetch;
3758 return nDescsToFetch;
3759}
3760
3761/**
3762 * Load transmit descriptors from guest memory only if there are no loaded
3763 * descriptors.
3764 *
3765 * @returns true if there are descriptors in cache.
3766 * @param pThis The device state structure.
3767 * @param pDesc Pointer to descriptor union.
3768 * @param addr Physical address in guest context.
3769 * @thread E1000_TX
3770 */
3771DECLINLINE(bool) e1kTxDLazyLoad(PE1KSTATE pThis)
3772{
3773 if (pThis->nTxDFetched == 0)
3774 return e1kTxDLoadMore(pThis) != 0;
3775 return true;
3776}
3777#endif /* E1K_WITH_TXD_CACHE */
3778
3779/**
3780 * Write back transmit descriptor to guest memory.
3781 *
3782 * @param pThis The device state structure.
3783 * @param pDesc Pointer to descriptor union.
3784 * @param addr Physical address in guest context.
3785 * @thread E1000_TX
3786 */
3787DECLINLINE(void) e1kWriteBackDesc(PE1KSTATE pThis, E1KTXDESC *pDesc, RTGCPHYS addr)
3788{
3789 /* Only the last half of the descriptor has to be written back. */
3790 e1kPrintTDesc(pThis, pDesc, "^^^");
3791 PDMDevHlpPCIPhysWrite(pThis->CTX_SUFF(pDevIns), addr, pDesc, sizeof(E1KTXDESC));
3792}
3793
3794/**
3795 * Transmit complete frame.
3796 *
3797 * @remarks We skip the FCS since we're not responsible for sending anything to
3798 * a real ethernet wire.
3799 *
3800 * @param pThis The device state structure.
3801 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
3802 * @thread E1000_TX
3803 */
3804static void e1kTransmitFrame(PE1KSTATE pThis, bool fOnWorkerThread)
3805{
3806 PPDMSCATTERGATHER pSg = pThis->CTX_SUFF(pTxSg);
3807 uint32_t cbFrame = pSg ? (uint32_t)pSg->cbUsed : 0;
3808 Assert(!pSg || pSg->cSegs == 1);
3809
3810 if (cbFrame > 70) /* unqualified guess */
3811 pThis->led.Asserted.s.fWriting = pThis->led.Actual.s.fWriting = 1;
3812
3813#ifdef E1K_INT_STATS
3814 if (cbFrame <= 1514)
3815 E1K_INC_ISTAT_CNT(pThis->uStatTx1514);
3816 else if (cbFrame <= 2962)
3817 E1K_INC_ISTAT_CNT(pThis->uStatTx2962);
3818 else if (cbFrame <= 4410)
3819 E1K_INC_ISTAT_CNT(pThis->uStatTx4410);
3820 else if (cbFrame <= 5858)
3821 E1K_INC_ISTAT_CNT(pThis->uStatTx5858);
3822 else if (cbFrame <= 7306)
3823 E1K_INC_ISTAT_CNT(pThis->uStatTx7306);
3824 else if (cbFrame <= 8754)
3825 E1K_INC_ISTAT_CNT(pThis->uStatTx8754);
3826 else if (cbFrame <= 16384)
3827 E1K_INC_ISTAT_CNT(pThis->uStatTx16384);
3828 else if (cbFrame <= 32768)
3829 E1K_INC_ISTAT_CNT(pThis->uStatTx32768);
3830 else
3831 E1K_INC_ISTAT_CNT(pThis->uStatTxLarge);
3832#endif /* E1K_INT_STATS */
3833
3834 /* Add VLAN tag */
3835 if (cbFrame > 12 && pThis->fVTag)
3836 {
3837 E1kLog3(("%s Inserting VLAN tag %08x\n",
3838 pThis->szPrf, RT_BE2H_U16(VET) | (RT_BE2H_U16(pThis->u16VTagTCI) << 16)));
3839 memmove((uint8_t*)pSg->aSegs[0].pvSeg + 16, (uint8_t*)pSg->aSegs[0].pvSeg + 12, cbFrame - 12);
3840 *((uint32_t*)pSg->aSegs[0].pvSeg + 3) = RT_BE2H_U16(VET) | (RT_BE2H_U16(pThis->u16VTagTCI) << 16);
3841 pSg->cbUsed += 4;
3842 cbFrame += 4;
3843 Assert(pSg->cbUsed == cbFrame);
3844 Assert(pSg->cbUsed <= pSg->cbAvailable);
3845 }
3846/* E1kLog2(("%s < < < Outgoing packet. Dump follows: > > >\n"
3847 "%.*Rhxd\n"
3848 "%s < < < < < < < < < < < < < End of dump > > > > > > > > > > > >\n",
3849 pThis->szPrf, cbFrame, pSg->aSegs[0].pvSeg, pThis->szPrf));*/
3850
3851 /* Update the stats */
3852 E1K_INC_CNT32(TPT);
3853 E1K_ADD_CNT64(TOTL, TOTH, cbFrame);
3854 E1K_INC_CNT32(GPTC);
3855 if (pSg && e1kIsBroadcast(pSg->aSegs[0].pvSeg))
3856 E1K_INC_CNT32(BPTC);
3857 else if (pSg && e1kIsMulticast(pSg->aSegs[0].pvSeg))
3858 E1K_INC_CNT32(MPTC);
3859 /* Update octet transmit counter */
3860 E1K_ADD_CNT64(GOTCL, GOTCH, cbFrame);
3861 if (pThis->CTX_SUFF(pDrv))
3862 STAM_REL_COUNTER_ADD(&pThis->StatTransmitBytes, cbFrame);
3863 if (cbFrame == 64)
3864 E1K_INC_CNT32(PTC64);
3865 else if (cbFrame < 128)
3866 E1K_INC_CNT32(PTC127);
3867 else if (cbFrame < 256)
3868 E1K_INC_CNT32(PTC255);
3869 else if (cbFrame < 512)
3870 E1K_INC_CNT32(PTC511);
3871 else if (cbFrame < 1024)
3872 E1K_INC_CNT32(PTC1023);
3873 else
3874 E1K_INC_CNT32(PTC1522);
3875
3876 E1K_INC_ISTAT_CNT(pThis->uStatTxFrm);
3877
3878 /*
3879 * Dump and send the packet.
3880 */
3881 int rc = VERR_NET_DOWN;
3882 if (pSg && pSg->pvAllocator != pThis)
3883 {
3884 e1kPacketDump(pThis, (uint8_t const *)pSg->aSegs[0].pvSeg, cbFrame, "--> Outgoing");
3885
3886 pThis->CTX_SUFF(pTxSg) = NULL;
3887 PPDMINETWORKUP pDrv = pThis->CTX_SUFF(pDrv);
3888 if (pDrv)
3889 {
3890 /* Release critical section to avoid deadlock in CanReceive */
3891 //e1kCsLeave(pThis);
3892 STAM_PROFILE_START(&pThis->CTX_SUFF_Z(StatTransmitSend), a);
3893 rc = pDrv->pfnSendBuf(pDrv, pSg, fOnWorkerThread);
3894 STAM_PROFILE_STOP(&pThis->CTX_SUFF_Z(StatTransmitSend), a);
3895 //e1kCsEnter(pThis, RT_SRC_POS);
3896 }
3897 }
3898 else if (pSg)
3899 {
3900 Assert(pSg->aSegs[0].pvSeg == pThis->aTxPacketFallback);
3901 e1kPacketDump(pThis, (uint8_t const *)pSg->aSegs[0].pvSeg, cbFrame, "--> Loopback");
3902
3903 /** @todo do we actually need to check that we're in loopback mode here? */
3904 if (GET_BITS(RCTL, LBM) == RCTL_LBM_TCVR)
3905 {
3906 E1KRXDST status;
3907 RT_ZERO(status);
3908 status.fPIF = true;
3909 e1kHandleRxPacket(pThis, pSg->aSegs[0].pvSeg, cbFrame, status);
3910 rc = VINF_SUCCESS;
3911 }
3912 e1kXmitFreeBuf(pThis);
3913 }
3914 else
3915 rc = VERR_NET_DOWN;
3916 if (RT_FAILURE(rc))
3917 {
3918 E1kLogRel(("E1000: ERROR! pfnSend returned %Rrc\n", rc));
3919 /** @todo handle VERR_NET_DOWN and VERR_NET_NO_BUFFER_SPACE. Signal error ? */
3920 }
3921
3922 pThis->led.Actual.s.fWriting = 0;
3923}
3924
3925/**
3926 * Compute and write internet checksum (e1kCSum16) at the specified offset.
3927 *
3928 * @param pThis The device state structure.
3929 * @param pPkt Pointer to the packet.
3930 * @param u16PktLen Total length of the packet.
3931 * @param cso Offset in packet to write checksum at.
3932 * @param css Offset in packet to start computing
3933 * checksum from.
3934 * @param cse Offset in packet to stop computing
3935 * checksum at.
3936 * @thread E1000_TX
3937 */
3938static void e1kInsertChecksum(PE1KSTATE pThis, uint8_t *pPkt, uint16_t u16PktLen, uint8_t cso, uint8_t css, uint16_t cse)
3939{
3940 RT_NOREF1(pThis);
3941
3942 if (css >= u16PktLen)
3943 {
3944 E1kLog2(("%s css(%X) is greater than packet length-1(%X), checksum is not inserted\n",
3945 pThis->szPrf, cso, u16PktLen));
3946 return;
3947 }
3948
3949 if (cso >= u16PktLen - 1)
3950 {
3951 E1kLog2(("%s cso(%X) is greater than packet length-2(%X), checksum is not inserted\n",
3952 pThis->szPrf, cso, u16PktLen));
3953 return;
3954 }
3955
3956 if (cse == 0)
3957 cse = u16PktLen - 1;
3958 uint16_t u16ChkSum = e1kCSum16(pPkt + css, cse - css + 1);
3959 E1kLog2(("%s Inserting csum: %04X at %02X, old value: %04X\n", pThis->szPrf,
3960 u16ChkSum, cso, *(uint16_t*)(pPkt + cso)));
3961 *(uint16_t*)(pPkt + cso) = u16ChkSum;
3962}
3963
3964/**
3965 * Add a part of descriptor's buffer to transmit frame.
3966 *
3967 * @remarks data.u64BufAddr is used unconditionally for both data
3968 * and legacy descriptors since it is identical to
3969 * legacy.u64BufAddr.
3970 *
3971 * @param pThis The device state structure.
3972 * @param pDesc Pointer to the descriptor to transmit.
3973 * @param u16Len Length of buffer to the end of segment.
3974 * @param fSend Force packet sending.
3975 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
3976 * @thread E1000_TX
3977 */
3978#ifndef E1K_WITH_TXD_CACHE
3979static void e1kFallbackAddSegment(PE1KSTATE pThis, RTGCPHYS PhysAddr, uint16_t u16Len, bool fSend, bool fOnWorkerThread)
3980{
3981 /* TCP header being transmitted */
3982 struct E1kTcpHeader *pTcpHdr = (struct E1kTcpHeader *)
3983 (pThis->aTxPacketFallback + pThis->contextTSE.tu.u8CSS);
3984 /* IP header being transmitted */
3985 struct E1kIpHeader *pIpHdr = (struct E1kIpHeader *)
3986 (pThis->aTxPacketFallback + pThis->contextTSE.ip.u8CSS);
3987
3988 E1kLog3(("%s e1kFallbackAddSegment: Length=%x, remaining payload=%x, header=%x, send=%RTbool\n",
3989 pThis->szPrf, u16Len, pThis->u32PayRemain, pThis->u16HdrRemain, fSend));
3990 Assert(pThis->u32PayRemain + pThis->u16HdrRemain > 0);
3991
3992 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns), PhysAddr,
3993 pThis->aTxPacketFallback + pThis->u16TxPktLen, u16Len);
3994 E1kLog3(("%s Dump of the segment:\n"
3995 "%.*Rhxd\n"
3996 "%s --- End of dump ---\n",
3997 pThis->szPrf, u16Len, pThis->aTxPacketFallback + pThis->u16TxPktLen, pThis->szPrf));
3998 pThis->u16TxPktLen += u16Len;
3999 E1kLog3(("%s e1kFallbackAddSegment: pThis->u16TxPktLen=%x\n",
4000 pThis->szPrf, pThis->u16TxPktLen));
4001 if (pThis->u16HdrRemain > 0)
4002 {
4003 /* The header was not complete, check if it is now */
4004 if (u16Len >= pThis->u16HdrRemain)
4005 {
4006 /* The rest is payload */
4007 u16Len -= pThis->u16HdrRemain;
4008 pThis->u16HdrRemain = 0;
4009 /* Save partial checksum and flags */
4010 pThis->u32SavedCsum = pTcpHdr->chksum;
4011 pThis->u16SavedFlags = pTcpHdr->hdrlen_flags;
4012 /* Clear FIN and PSH flags now and set them only in the last segment */
4013 pTcpHdr->hdrlen_flags &= ~htons(E1K_TCP_FIN | E1K_TCP_PSH);
4014 }
4015 else
4016 {
4017 /* Still not */
4018 pThis->u16HdrRemain -= u16Len;
4019 E1kLog3(("%s e1kFallbackAddSegment: Header is still incomplete, 0x%x bytes remain.\n",
4020 pThis->szPrf, pThis->u16HdrRemain));
4021 return;
4022 }
4023 }
4024
4025 pThis->u32PayRemain -= u16Len;
4026
4027 if (fSend)
4028 {
4029 /* Leave ethernet header intact */
4030 /* IP Total Length = payload + headers - ethernet header */
4031 pIpHdr->total_len = htons(pThis->u16TxPktLen - pThis->contextTSE.ip.u8CSS);
4032 E1kLog3(("%s e1kFallbackAddSegment: End of packet, pIpHdr->total_len=%x\n",
4033 pThis->szPrf, ntohs(pIpHdr->total_len)));
4034 /* Update IP Checksum */
4035 pIpHdr->chksum = 0;
4036 e1kInsertChecksum(pThis, pThis->aTxPacketFallback, pThis->u16TxPktLen,
4037 pThis->contextTSE.ip.u8CSO,
4038 pThis->contextTSE.ip.u8CSS,
4039 pThis->contextTSE.ip.u16CSE);
4040
4041 /* Update TCP flags */
4042 /* Restore original FIN and PSH flags for the last segment */
4043 if (pThis->u32PayRemain == 0)
4044 {
4045 pTcpHdr->hdrlen_flags = pThis->u16SavedFlags;
4046 E1K_INC_CNT32(TSCTC);
4047 }
4048 /* Add TCP length to partial pseudo header sum */
4049 uint32_t csum = pThis->u32SavedCsum
4050 + htons(pThis->u16TxPktLen - pThis->contextTSE.tu.u8CSS);
4051 while (csum >> 16)
4052 csum = (csum >> 16) + (csum & 0xFFFF);
4053 pTcpHdr->chksum = csum;
4054 /* Compute final checksum */
4055 e1kInsertChecksum(pThis, pThis->aTxPacketFallback, pThis->u16TxPktLen,
4056 pThis->contextTSE.tu.u8CSO,
4057 pThis->contextTSE.tu.u8CSS,
4058 pThis->contextTSE.tu.u16CSE);
4059
4060 /*
4061 * Transmit it. If we've use the SG already, allocate a new one before
4062 * we copy of the data.
4063 */
4064 if (!pThis->CTX_SUFF(pTxSg))
4065 e1kXmitAllocBuf(pThis, pThis->u16TxPktLen + (pThis->fVTag ? 4 : 0), true /*fExactSize*/, false /*fGso*/);
4066 if (pThis->CTX_SUFF(pTxSg))
4067 {
4068 Assert(pThis->u16TxPktLen <= pThis->CTX_SUFF(pTxSg)->cbAvailable);
4069 Assert(pThis->CTX_SUFF(pTxSg)->cSegs == 1);
4070 if (pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg != pThis->aTxPacketFallback)
4071 memcpy(pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->aTxPacketFallback, pThis->u16TxPktLen);
4072 pThis->CTX_SUFF(pTxSg)->cbUsed = pThis->u16TxPktLen;
4073 pThis->CTX_SUFF(pTxSg)->aSegs[0].cbSeg = pThis->u16TxPktLen;
4074 }
4075 e1kTransmitFrame(pThis, fOnWorkerThread);
4076
4077 /* Update Sequence Number */
4078 pTcpHdr->seqno = htonl(ntohl(pTcpHdr->seqno) + pThis->u16TxPktLen
4079 - pThis->contextTSE.dw3.u8HDRLEN);
4080 /* Increment IP identification */
4081 pIpHdr->ident = htons(ntohs(pIpHdr->ident) + 1);
4082 }
4083}
4084#else /* E1K_WITH_TXD_CACHE */
4085static int e1kFallbackAddSegment(PE1KSTATE pThis, RTGCPHYS PhysAddr, uint16_t u16Len, bool fSend, bool fOnWorkerThread)
4086{
4087 int rc = VINF_SUCCESS;
4088 /* TCP header being transmitted */
4089 struct E1kTcpHeader *pTcpHdr = (struct E1kTcpHeader *)
4090 (pThis->aTxPacketFallback + pThis->contextTSE.tu.u8CSS);
4091 /* IP header being transmitted */
4092 struct E1kIpHeader *pIpHdr = (struct E1kIpHeader *)
4093 (pThis->aTxPacketFallback + pThis->contextTSE.ip.u8CSS);
4094
4095 E1kLog3(("%s e1kFallbackAddSegment: Length=%x, remaining payload=%x, header=%x, send=%RTbool\n",
4096 pThis->szPrf, u16Len, pThis->u32PayRemain, pThis->u16HdrRemain, fSend));
4097 Assert(pThis->u32PayRemain + pThis->u16HdrRemain > 0);
4098
4099 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns), PhysAddr,
4100 pThis->aTxPacketFallback + pThis->u16TxPktLen, u16Len);
4101 E1kLog3(("%s Dump of the segment:\n"
4102 "%.*Rhxd\n"
4103 "%s --- End of dump ---\n",
4104 pThis->szPrf, u16Len, pThis->aTxPacketFallback + pThis->u16TxPktLen, pThis->szPrf));
4105 pThis->u16TxPktLen += u16Len;
4106 E1kLog3(("%s e1kFallbackAddSegment: pThis->u16TxPktLen=%x\n",
4107 pThis->szPrf, pThis->u16TxPktLen));
4108 if (pThis->u16HdrRemain > 0)
4109 {
4110 /* The header was not complete, check if it is now */
4111 if (u16Len >= pThis->u16HdrRemain)
4112 {
4113 /* The rest is payload */
4114 u16Len -= pThis->u16HdrRemain;
4115 pThis->u16HdrRemain = 0;
4116 /* Save partial checksum and flags */
4117 pThis->u32SavedCsum = pTcpHdr->chksum;
4118 pThis->u16SavedFlags = pTcpHdr->hdrlen_flags;
4119 /* Clear FIN and PSH flags now and set them only in the last segment */
4120 pTcpHdr->hdrlen_flags &= ~htons(E1K_TCP_FIN | E1K_TCP_PSH);
4121 }
4122 else
4123 {
4124 /* Still not */
4125 pThis->u16HdrRemain -= u16Len;
4126 E1kLog3(("%s e1kFallbackAddSegment: Header is still incomplete, 0x%x bytes remain.\n",
4127 pThis->szPrf, pThis->u16HdrRemain));
4128 return rc;
4129 }
4130 }
4131
4132 pThis->u32PayRemain -= u16Len;
4133
4134 if (fSend)
4135 {
4136 /* Leave ethernet header intact */
4137 /* IP Total Length = payload + headers - ethernet header */
4138 pIpHdr->total_len = htons(pThis->u16TxPktLen - pThis->contextTSE.ip.u8CSS);
4139 E1kLog3(("%s e1kFallbackAddSegment: End of packet, pIpHdr->total_len=%x\n",
4140 pThis->szPrf, ntohs(pIpHdr->total_len)));
4141 /* Update IP Checksum */
4142 pIpHdr->chksum = 0;
4143 e1kInsertChecksum(pThis, pThis->aTxPacketFallback, pThis->u16TxPktLen,
4144 pThis->contextTSE.ip.u8CSO,
4145 pThis->contextTSE.ip.u8CSS,
4146 pThis->contextTSE.ip.u16CSE);
4147
4148 /* Update TCP flags */
4149 /* Restore original FIN and PSH flags for the last segment */
4150 if (pThis->u32PayRemain == 0)
4151 {
4152 pTcpHdr->hdrlen_flags = pThis->u16SavedFlags;
4153 E1K_INC_CNT32(TSCTC);
4154 }
4155 /* Add TCP length to partial pseudo header sum */
4156 uint32_t csum = pThis->u32SavedCsum
4157 + htons(pThis->u16TxPktLen - pThis->contextTSE.tu.u8CSS);
4158 while (csum >> 16)
4159 csum = (csum >> 16) + (csum & 0xFFFF);
4160 pTcpHdr->chksum = csum;
4161 /* Compute final checksum */
4162 e1kInsertChecksum(pThis, pThis->aTxPacketFallback, pThis->u16TxPktLen,
4163 pThis->contextTSE.tu.u8CSO,
4164 pThis->contextTSE.tu.u8CSS,
4165 pThis->contextTSE.tu.u16CSE);
4166
4167 /*
4168 * Transmit it.
4169 */
4170 if (pThis->CTX_SUFF(pTxSg))
4171 {
4172 Assert(pThis->u16TxPktLen <= pThis->CTX_SUFF(pTxSg)->cbAvailable);
4173 Assert(pThis->CTX_SUFF(pTxSg)->cSegs == 1);
4174 if (pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg != pThis->aTxPacketFallback)
4175 memcpy(pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->aTxPacketFallback, pThis->u16TxPktLen);
4176 pThis->CTX_SUFF(pTxSg)->cbUsed = pThis->u16TxPktLen;
4177 pThis->CTX_SUFF(pTxSg)->aSegs[0].cbSeg = pThis->u16TxPktLen;
4178 }
4179 e1kTransmitFrame(pThis, fOnWorkerThread);
4180
4181 /* Update Sequence Number */
4182 pTcpHdr->seqno = htonl(ntohl(pTcpHdr->seqno) + pThis->u16TxPktLen
4183 - pThis->contextTSE.dw3.u8HDRLEN);
4184 /* Increment IP identification */
4185 pIpHdr->ident = htons(ntohs(pIpHdr->ident) + 1);
4186
4187 /* Allocate new buffer for the next segment. */
4188 if (pThis->u32PayRemain)
4189 {
4190 pThis->cbTxAlloc = RT_MIN(pThis->u32PayRemain,
4191 pThis->contextTSE.dw3.u16MSS)
4192 + pThis->contextTSE.dw3.u8HDRLEN
4193 + (pThis->fVTag ? 4 : 0);
4194 rc = e1kXmitAllocBuf(pThis, false /* fGSO */);
4195 }
4196 }
4197
4198 return rc;
4199}
4200#endif /* E1K_WITH_TXD_CACHE */
4201
4202#ifndef E1K_WITH_TXD_CACHE
4203/**
4204 * TCP segmentation offloading fallback: Add descriptor's buffer to transmit
4205 * frame.
4206 *
4207 * We construct the frame in the fallback buffer first and the copy it to the SG
4208 * buffer before passing it down to the network driver code.
4209 *
4210 * @returns true if the frame should be transmitted, false if not.
4211 *
4212 * @param pThis The device state structure.
4213 * @param pDesc Pointer to the descriptor to transmit.
4214 * @param cbFragment Length of descriptor's buffer.
4215 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4216 * @thread E1000_TX
4217 */
4218static bool e1kFallbackAddToFrame(PE1KSTATE pThis, E1KTXDESC *pDesc, uint32_t cbFragment, bool fOnWorkerThread)
4219{
4220 PPDMSCATTERGATHER pTxSg = pThis->CTX_SUFF(pTxSg);
4221 Assert(e1kGetDescType(pDesc) == E1K_DTYP_DATA);
4222 Assert(pDesc->data.cmd.fTSE);
4223 Assert(!e1kXmitIsGsoBuf(pTxSg));
4224
4225 uint16_t u16MaxPktLen = pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw3.u16MSS;
4226 Assert(u16MaxPktLen != 0);
4227 Assert(u16MaxPktLen < E1K_MAX_TX_PKT_SIZE);
4228
4229 /*
4230 * Carve out segments.
4231 */
4232 do
4233 {
4234 /* Calculate how many bytes we have left in this TCP segment */
4235 uint32_t cb = u16MaxPktLen - pThis->u16TxPktLen;
4236 if (cb > cbFragment)
4237 {
4238 /* This descriptor fits completely into current segment */
4239 cb = cbFragment;
4240 e1kFallbackAddSegment(pThis, pDesc->data.u64BufAddr, cb, pDesc->data.cmd.fEOP /*fSend*/, fOnWorkerThread);
4241 }
4242 else
4243 {
4244 e1kFallbackAddSegment(pThis, pDesc->data.u64BufAddr, cb, true /*fSend*/, fOnWorkerThread);
4245 /*
4246 * Rewind the packet tail pointer to the beginning of payload,
4247 * so we continue writing right beyond the header.
4248 */
4249 pThis->u16TxPktLen = pThis->contextTSE.dw3.u8HDRLEN;
4250 }
4251
4252 pDesc->data.u64BufAddr += cb;
4253 cbFragment -= cb;
4254 } while (cbFragment > 0);
4255
4256 if (pDesc->data.cmd.fEOP)
4257 {
4258 /* End of packet, next segment will contain header. */
4259 if (pThis->u32PayRemain != 0)
4260 E1K_INC_CNT32(TSCTFC);
4261 pThis->u16TxPktLen = 0;
4262 e1kXmitFreeBuf(pThis);
4263 }
4264
4265 return false;
4266}
4267#else /* E1K_WITH_TXD_CACHE */
4268/**
4269 * TCP segmentation offloading fallback: Add descriptor's buffer to transmit
4270 * frame.
4271 *
4272 * We construct the frame in the fallback buffer first and the copy it to the SG
4273 * buffer before passing it down to the network driver code.
4274 *
4275 * @returns error code
4276 *
4277 * @param pThis The device state structure.
4278 * @param pDesc Pointer to the descriptor to transmit.
4279 * @param cbFragment Length of descriptor's buffer.
4280 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4281 * @thread E1000_TX
4282 */
4283static int e1kFallbackAddToFrame(PE1KSTATE pThis, E1KTXDESC *pDesc, bool fOnWorkerThread)
4284{
4285#ifdef VBOX_STRICT
4286 PPDMSCATTERGATHER pTxSg = pThis->CTX_SUFF(pTxSg);
4287 Assert(e1kGetDescType(pDesc) == E1K_DTYP_DATA);
4288 Assert(pDesc->data.cmd.fTSE);
4289 Assert(!e1kXmitIsGsoBuf(pTxSg));
4290#endif
4291
4292 uint16_t u16MaxPktLen = pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw3.u16MSS;
4293 Assert(u16MaxPktLen != 0);
4294 Assert(u16MaxPktLen < E1K_MAX_TX_PKT_SIZE);
4295
4296 /*
4297 * Carve out segments.
4298 */
4299 int rc;
4300 do
4301 {
4302 /* Calculate how many bytes we have left in this TCP segment */
4303 uint32_t cb = u16MaxPktLen - pThis->u16TxPktLen;
4304 if (cb > pDesc->data.cmd.u20DTALEN)
4305 {
4306 /* This descriptor fits completely into current segment */
4307 cb = pDesc->data.cmd.u20DTALEN;
4308 rc = e1kFallbackAddSegment(pThis, pDesc->data.u64BufAddr, cb, pDesc->data.cmd.fEOP /*fSend*/, fOnWorkerThread);
4309 }
4310 else
4311 {
4312 rc = e1kFallbackAddSegment(pThis, pDesc->data.u64BufAddr, cb, true /*fSend*/, fOnWorkerThread);
4313 /*
4314 * Rewind the packet tail pointer to the beginning of payload,
4315 * so we continue writing right beyond the header.
4316 */
4317 pThis->u16TxPktLen = pThis->contextTSE.dw3.u8HDRLEN;
4318 }
4319
4320 pDesc->data.u64BufAddr += cb;
4321 pDesc->data.cmd.u20DTALEN -= cb;
4322 } while (pDesc->data.cmd.u20DTALEN > 0 && RT_SUCCESS(rc));
4323
4324 if (pDesc->data.cmd.fEOP)
4325 {
4326 /* End of packet, next segment will contain header. */
4327 if (pThis->u32PayRemain != 0)
4328 E1K_INC_CNT32(TSCTFC);
4329 pThis->u16TxPktLen = 0;
4330 e1kXmitFreeBuf(pThis);
4331 }
4332
4333 return false;
4334}
4335#endif /* E1K_WITH_TXD_CACHE */
4336
4337
4338/**
4339 * Add descriptor's buffer to transmit frame.
4340 *
4341 * This deals with GSO and normal frames, e1kFallbackAddToFrame deals with the
4342 * TSE frames we cannot handle as GSO.
4343 *
4344 * @returns true on success, false on failure.
4345 *
4346 * @param pThis The device state structure.
4347 * @param PhysAddr The physical address of the descriptor buffer.
4348 * @param cbFragment Length of descriptor's buffer.
4349 * @thread E1000_TX
4350 */
4351static bool e1kAddToFrame(PE1KSTATE pThis, RTGCPHYS PhysAddr, uint32_t cbFragment)
4352{
4353 PPDMSCATTERGATHER pTxSg = pThis->CTX_SUFF(pTxSg);
4354 bool const fGso = e1kXmitIsGsoBuf(pTxSg);
4355 uint32_t const cbNewPkt = cbFragment + pThis->u16TxPktLen;
4356
4357 if (RT_UNLIKELY( !fGso && cbNewPkt > E1K_MAX_TX_PKT_SIZE ))
4358 {
4359 E1kLog(("%s Transmit packet is too large: %u > %u(max)\n", pThis->szPrf, cbNewPkt, E1K_MAX_TX_PKT_SIZE));
4360 return false;
4361 }
4362 if (RT_UNLIKELY( fGso && cbNewPkt > pTxSg->cbAvailable ))
4363 {
4364 E1kLog(("%s Transmit packet is too large: %u > %u(max)/GSO\n", pThis->szPrf, cbNewPkt, pTxSg->cbAvailable));
4365 return false;
4366 }
4367
4368 if (RT_LIKELY(pTxSg))
4369 {
4370 Assert(pTxSg->cSegs == 1);
4371 Assert(pTxSg->cbUsed == pThis->u16TxPktLen);
4372
4373 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns), PhysAddr,
4374 (uint8_t *)pTxSg->aSegs[0].pvSeg + pThis->u16TxPktLen, cbFragment);
4375
4376 pTxSg->cbUsed = cbNewPkt;
4377 }
4378 pThis->u16TxPktLen = cbNewPkt;
4379
4380 return true;
4381}
4382
4383
4384/**
4385 * Write the descriptor back to guest memory and notify the guest.
4386 *
4387 * @param pThis The device state structure.
4388 * @param pDesc Pointer to the descriptor have been transmitted.
4389 * @param addr Physical address of the descriptor in guest memory.
4390 * @thread E1000_TX
4391 */
4392static void e1kDescReport(PE1KSTATE pThis, E1KTXDESC *pDesc, RTGCPHYS addr)
4393{
4394 /*
4395 * We fake descriptor write-back bursting. Descriptors are written back as they are
4396 * processed.
4397 */
4398 /* Let's pretend we process descriptors. Write back with DD set. */
4399 /*
4400 * Prior to r71586 we tried to accomodate the case when write-back bursts
4401 * are enabled without actually implementing bursting by writing back all
4402 * descriptors, even the ones that do not have RS set. This caused kernel
4403 * panics with Linux SMP kernels, as the e1000 driver tried to free up skb
4404 * associated with written back descriptor if it happened to be a context
4405 * descriptor since context descriptors do not have skb associated to them.
4406 * Starting from r71586 we write back only the descriptors with RS set,
4407 * which is a little bit different from what the real hardware does in
4408 * case there is a chain of data descritors where some of them have RS set
4409 * and others do not. It is very uncommon scenario imho.
4410 * We need to check RPS as well since some legacy drivers use it instead of
4411 * RS even with newer cards.
4412 */
4413 if (pDesc->legacy.cmd.fRS || pDesc->legacy.cmd.fRPS)
4414 {
4415 pDesc->legacy.dw3.fDD = 1; /* Descriptor Done */
4416 e1kWriteBackDesc(pThis, pDesc, addr);
4417 if (pDesc->legacy.cmd.fEOP)
4418 {
4419#ifdef E1K_USE_TX_TIMERS
4420 if (pDesc->legacy.cmd.fIDE)
4421 {
4422 E1K_INC_ISTAT_CNT(pThis->uStatTxIDE);
4423 //if (pThis->fIntRaised)
4424 //{
4425 // /* Interrupt is already pending, no need for timers */
4426 // ICR |= ICR_TXDW;
4427 //}
4428 //else {
4429 /* Arm the timer to fire in TIVD usec (discard .024) */
4430 e1kArmTimer(pThis, pThis->CTX_SUFF(pTIDTimer), TIDV);
4431# ifndef E1K_NO_TAD
4432 /* If absolute timer delay is enabled and the timer is not running yet, arm it. */
4433 E1kLog2(("%s Checking if TAD timer is running\n",
4434 pThis->szPrf));
4435 if (TADV != 0 && !TMTimerIsActive(pThis->CTX_SUFF(pTADTimer)))
4436 e1kArmTimer(pThis, pThis->CTX_SUFF(pTADTimer), TADV);
4437# endif /* E1K_NO_TAD */
4438 }
4439 else
4440 {
4441 E1kLog2(("%s No IDE set, cancel TAD timer and raise interrupt\n",
4442 pThis->szPrf));
4443# ifndef E1K_NO_TAD
4444 /* Cancel both timers if armed and fire immediately. */
4445 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTADTimer));
4446# endif /* E1K_NO_TAD */
4447#endif /* E1K_USE_TX_TIMERS */
4448 E1K_INC_ISTAT_CNT(pThis->uStatIntTx);
4449 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_TXDW);
4450#ifdef E1K_USE_TX_TIMERS
4451 }
4452#endif /* E1K_USE_TX_TIMERS */
4453 }
4454 }
4455 else
4456 {
4457 E1K_INC_ISTAT_CNT(pThis->uStatTxNoRS);
4458 }
4459}
4460
4461#ifndef E1K_WITH_TXD_CACHE
4462
4463/**
4464 * Process Transmit Descriptor.
4465 *
4466 * E1000 supports three types of transmit descriptors:
4467 * - legacy data descriptors of older format (context-less).
4468 * - data the same as legacy but providing new offloading capabilities.
4469 * - context sets up the context for following data descriptors.
4470 *
4471 * @param pThis The device state structure.
4472 * @param pDesc Pointer to descriptor union.
4473 * @param addr Physical address of descriptor in guest memory.
4474 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4475 * @thread E1000_TX
4476 */
4477static int e1kXmitDesc(PE1KSTATE pThis, E1KTXDESC *pDesc, RTGCPHYS addr, bool fOnWorkerThread)
4478{
4479 int rc = VINF_SUCCESS;
4480 uint32_t cbVTag = 0;
4481
4482 e1kPrintTDesc(pThis, pDesc, "vvv");
4483
4484#ifdef E1K_USE_TX_TIMERS
4485 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTIDTimer));
4486#endif /* E1K_USE_TX_TIMERS */
4487
4488 switch (e1kGetDescType(pDesc))
4489 {
4490 case E1K_DTYP_CONTEXT:
4491 if (pDesc->context.dw2.fTSE)
4492 {
4493 pThis->contextTSE = pDesc->context;
4494 pThis->u32PayRemain = pDesc->context.dw2.u20PAYLEN;
4495 pThis->u16HdrRemain = pDesc->context.dw3.u8HDRLEN;
4496 e1kSetupGsoCtx(&pThis->GsoCtx, &pDesc->context);
4497 STAM_COUNTER_INC(&pThis->StatTxDescCtxTSE);
4498 }
4499 else
4500 {
4501 pThis->contextNormal = pDesc->context;
4502 STAM_COUNTER_INC(&pThis->StatTxDescCtxNormal);
4503 }
4504 E1kLog2(("%s %s context updated: IP CSS=%02X, IP CSO=%02X, IP CSE=%04X"
4505 ", TU CSS=%02X, TU CSO=%02X, TU CSE=%04X\n", pThis->szPrf,
4506 pDesc->context.dw2.fTSE ? "TSE" : "Normal",
4507 pDesc->context.ip.u8CSS,
4508 pDesc->context.ip.u8CSO,
4509 pDesc->context.ip.u16CSE,
4510 pDesc->context.tu.u8CSS,
4511 pDesc->context.tu.u8CSO,
4512 pDesc->context.tu.u16CSE));
4513 E1K_INC_ISTAT_CNT(pThis->uStatDescCtx);
4514 e1kDescReport(pThis, pDesc, addr);
4515 break;
4516
4517 case E1K_DTYP_DATA:
4518 {
4519 if (pDesc->data.cmd.u20DTALEN == 0 || pDesc->data.u64BufAddr == 0)
4520 {
4521 E1kLog2(("% Empty data descriptor, skipped.\n", pThis->szPrf));
4522 /** @todo Same as legacy when !TSE. See below. */
4523 break;
4524 }
4525 STAM_COUNTER_INC(pDesc->data.cmd.fTSE?
4526 &pThis->StatTxDescTSEData:
4527 &pThis->StatTxDescData);
4528 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
4529 E1K_INC_ISTAT_CNT(pThis->uStatDescDat);
4530
4531 /*
4532 * The last descriptor of non-TSE packet must contain VLE flag.
4533 * TSE packets have VLE flag in the first descriptor. The later
4534 * case is taken care of a bit later when cbVTag gets assigned.
4535 *
4536 * 1) pDesc->data.cmd.fEOP && !pDesc->data.cmd.fTSE
4537 */
4538 if (pDesc->data.cmd.fEOP && !pDesc->data.cmd.fTSE)
4539 {
4540 pThis->fVTag = pDesc->data.cmd.fVLE;
4541 pThis->u16VTagTCI = pDesc->data.dw3.u16Special;
4542 }
4543 /*
4544 * First fragment: Allocate new buffer and save the IXSM and TXSM
4545 * packet options as these are only valid in the first fragment.
4546 */
4547 if (pThis->u16TxPktLen == 0)
4548 {
4549 pThis->fIPcsum = pDesc->data.dw3.fIXSM;
4550 pThis->fTCPcsum = pDesc->data.dw3.fTXSM;
4551 E1kLog2(("%s Saving checksum flags:%s%s; \n", pThis->szPrf,
4552 pThis->fIPcsum ? " IP" : "",
4553 pThis->fTCPcsum ? " TCP/UDP" : ""));
4554 if (pDesc->data.cmd.fTSE)
4555 {
4556 /* 2) pDesc->data.cmd.fTSE && pThis->u16TxPktLen == 0 */
4557 pThis->fVTag = pDesc->data.cmd.fVLE;
4558 pThis->u16VTagTCI = pDesc->data.dw3.u16Special;
4559 cbVTag = pThis->fVTag ? 4 : 0;
4560 }
4561 else if (pDesc->data.cmd.fEOP)
4562 cbVTag = pDesc->data.cmd.fVLE ? 4 : 0;
4563 else
4564 cbVTag = 4;
4565 E1kLog3(("%s About to allocate TX buffer: cbVTag=%u\n", pThis->szPrf, cbVTag));
4566 if (e1kCanDoGso(pThis, &pThis->GsoCtx, &pDesc->data, &pThis->contextTSE))
4567 rc = e1kXmitAllocBuf(pThis, pThis->contextTSE.dw2.u20PAYLEN + pThis->contextTSE.dw3.u8HDRLEN + cbVTag,
4568 true /*fExactSize*/, true /*fGso*/);
4569 else if (pDesc->data.cmd.fTSE)
4570 rc = e1kXmitAllocBuf(pThis, pThis->contextTSE.dw3.u16MSS + pThis->contextTSE.dw3.u8HDRLEN + cbVTag,
4571 pDesc->data.cmd.fTSE /*fExactSize*/, false /*fGso*/);
4572 else
4573 rc = e1kXmitAllocBuf(pThis, pDesc->data.cmd.u20DTALEN + cbVTag,
4574 pDesc->data.cmd.fEOP /*fExactSize*/, false /*fGso*/);
4575
4576 /**
4577 * @todo: Perhaps it is not that simple for GSO packets! We may
4578 * need to unwind some changes.
4579 */
4580 if (RT_FAILURE(rc))
4581 {
4582 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
4583 break;
4584 }
4585 /** @todo Is there any way to indicating errors other than collisions? Like
4586 * VERR_NET_DOWN. */
4587 }
4588
4589 /*
4590 * Add the descriptor data to the frame. If the frame is complete,
4591 * transmit it and reset the u16TxPktLen field.
4592 */
4593 if (e1kXmitIsGsoBuf(pThis->CTX_SUFF(pTxSg)))
4594 {
4595 STAM_COUNTER_INC(&pThis->StatTxPathGSO);
4596 bool fRc = e1kAddToFrame(pThis, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
4597 if (pDesc->data.cmd.fEOP)
4598 {
4599 if ( fRc
4600 && pThis->CTX_SUFF(pTxSg)
4601 && pThis->CTX_SUFF(pTxSg)->cbUsed == (size_t)pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw2.u20PAYLEN)
4602 {
4603 e1kTransmitFrame(pThis, fOnWorkerThread);
4604 E1K_INC_CNT32(TSCTC);
4605 }
4606 else
4607 {
4608 if (fRc)
4609 E1kLog(("%s bad GSO/TSE %p or %u < %u\n" , pThis->szPrf,
4610 pThis->CTX_SUFF(pTxSg), pThis->CTX_SUFF(pTxSg) ? pThis->CTX_SUFF(pTxSg)->cbUsed : 0,
4611 pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw2.u20PAYLEN));
4612 e1kXmitFreeBuf(pThis);
4613 E1K_INC_CNT32(TSCTFC);
4614 }
4615 pThis->u16TxPktLen = 0;
4616 }
4617 }
4618 else if (!pDesc->data.cmd.fTSE)
4619 {
4620 STAM_COUNTER_INC(&pThis->StatTxPathRegular);
4621 bool fRc = e1kAddToFrame(pThis, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
4622 if (pDesc->data.cmd.fEOP)
4623 {
4624 if (fRc && pThis->CTX_SUFF(pTxSg))
4625 {
4626 Assert(pThis->CTX_SUFF(pTxSg)->cSegs == 1);
4627 if (pThis->fIPcsum)
4628 e1kInsertChecksum(pThis, (uint8_t *)pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->u16TxPktLen,
4629 pThis->contextNormal.ip.u8CSO,
4630 pThis->contextNormal.ip.u8CSS,
4631 pThis->contextNormal.ip.u16CSE);
4632 if (pThis->fTCPcsum)
4633 e1kInsertChecksum(pThis, (uint8_t *)pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->u16TxPktLen,
4634 pThis->contextNormal.tu.u8CSO,
4635 pThis->contextNormal.tu.u8CSS,
4636 pThis->contextNormal.tu.u16CSE);
4637 e1kTransmitFrame(pThis, fOnWorkerThread);
4638 }
4639 else
4640 e1kXmitFreeBuf(pThis);
4641 pThis->u16TxPktLen = 0;
4642 }
4643 }
4644 else
4645 {
4646 STAM_COUNTER_INC(&pThis->StatTxPathFallback);
4647 e1kFallbackAddToFrame(pThis, pDesc, pDesc->data.cmd.u20DTALEN, fOnWorkerThread);
4648 }
4649
4650 e1kDescReport(pThis, pDesc, addr);
4651 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
4652 break;
4653 }
4654
4655 case E1K_DTYP_LEGACY:
4656 if (pDesc->legacy.cmd.u16Length == 0 || pDesc->legacy.u64BufAddr == 0)
4657 {
4658 E1kLog(("%s Empty legacy descriptor, skipped.\n", pThis->szPrf));
4659 /** @todo 3.3.3, Length/Buffer Address: RS set -> write DD when processing. */
4660 break;
4661 }
4662 STAM_COUNTER_INC(&pThis->StatTxDescLegacy);
4663 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
4664
4665 /* First fragment: allocate new buffer. */
4666 if (pThis->u16TxPktLen == 0)
4667 {
4668 if (pDesc->legacy.cmd.fEOP)
4669 cbVTag = pDesc->legacy.cmd.fVLE ? 4 : 0;
4670 else
4671 cbVTag = 4;
4672 E1kLog3(("%s About to allocate TX buffer: cbVTag=%u\n", pThis->szPrf, cbVTag));
4673 /** @todo reset status bits? */
4674 rc = e1kXmitAllocBuf(pThis, pDesc->legacy.cmd.u16Length + cbVTag, pDesc->legacy.cmd.fEOP, false /*fGso*/);
4675 if (RT_FAILURE(rc))
4676 {
4677 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
4678 break;
4679 }
4680
4681 /** @todo Is there any way to indicating errors other than collisions? Like
4682 * VERR_NET_DOWN. */
4683 }
4684
4685 /* Add fragment to frame. */
4686 if (e1kAddToFrame(pThis, pDesc->data.u64BufAddr, pDesc->legacy.cmd.u16Length))
4687 {
4688 E1K_INC_ISTAT_CNT(pThis->uStatDescLeg);
4689
4690 /* Last fragment: Transmit and reset the packet storage counter. */
4691 if (pDesc->legacy.cmd.fEOP)
4692 {
4693 pThis->fVTag = pDesc->legacy.cmd.fVLE;
4694 pThis->u16VTagTCI = pDesc->legacy.dw3.u16Special;
4695 /** @todo Offload processing goes here. */
4696 e1kTransmitFrame(pThis, fOnWorkerThread);
4697 pThis->u16TxPktLen = 0;
4698 }
4699 }
4700 /* Last fragment + failure: free the buffer and reset the storage counter. */
4701 else if (pDesc->legacy.cmd.fEOP)
4702 {
4703 e1kXmitFreeBuf(pThis);
4704 pThis->u16TxPktLen = 0;
4705 }
4706
4707 e1kDescReport(pThis, pDesc, addr);
4708 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
4709 break;
4710
4711 default:
4712 E1kLog(("%s ERROR Unsupported transmit descriptor type: 0x%04x\n",
4713 pThis->szPrf, e1kGetDescType(pDesc)));
4714 break;
4715 }
4716
4717 return rc;
4718}
4719
4720#else /* E1K_WITH_TXD_CACHE */
4721
4722/**
4723 * Process Transmit Descriptor.
4724 *
4725 * E1000 supports three types of transmit descriptors:
4726 * - legacy data descriptors of older format (context-less).
4727 * - data the same as legacy but providing new offloading capabilities.
4728 * - context sets up the context for following data descriptors.
4729 *
4730 * @param pThis The device state structure.
4731 * @param pDesc Pointer to descriptor union.
4732 * @param addr Physical address of descriptor in guest memory.
4733 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4734 * @param cbPacketSize Size of the packet as previously computed.
4735 * @thread E1000_TX
4736 */
4737static int e1kXmitDesc(PE1KSTATE pThis, E1KTXDESC *pDesc, RTGCPHYS addr,
4738 bool fOnWorkerThread)
4739{
4740 int rc = VINF_SUCCESS;
4741
4742 e1kPrintTDesc(pThis, pDesc, "vvv");
4743
4744#ifdef E1K_USE_TX_TIMERS
4745 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTIDTimer));
4746#endif /* E1K_USE_TX_TIMERS */
4747
4748 switch (e1kGetDescType(pDesc))
4749 {
4750 case E1K_DTYP_CONTEXT:
4751 /* The caller have already updated the context */
4752 E1K_INC_ISTAT_CNT(pThis->uStatDescCtx);
4753 e1kDescReport(pThis, pDesc, addr);
4754 break;
4755
4756 case E1K_DTYP_DATA:
4757 {
4758 STAM_COUNTER_INC(pDesc->data.cmd.fTSE?
4759 &pThis->StatTxDescTSEData:
4760 &pThis->StatTxDescData);
4761 E1K_INC_ISTAT_CNT(pThis->uStatDescDat);
4762 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
4763 if (pDesc->data.cmd.u20DTALEN == 0 || pDesc->data.u64BufAddr == 0)
4764 {
4765 E1kLog2(("% Empty data descriptor, skipped.\n", pThis->szPrf));
4766 }
4767 else
4768 {
4769 /*
4770 * Add the descriptor data to the frame. If the frame is complete,
4771 * transmit it and reset the u16TxPktLen field.
4772 */
4773 if (e1kXmitIsGsoBuf(pThis->CTX_SUFF(pTxSg)))
4774 {
4775 STAM_COUNTER_INC(&pThis->StatTxPathGSO);
4776 bool fRc = e1kAddToFrame(pThis, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
4777 if (pDesc->data.cmd.fEOP)
4778 {
4779 if ( fRc
4780 && pThis->CTX_SUFF(pTxSg)
4781 && pThis->CTX_SUFF(pTxSg)->cbUsed == (size_t)pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw2.u20PAYLEN)
4782 {
4783 e1kTransmitFrame(pThis, fOnWorkerThread);
4784 E1K_INC_CNT32(TSCTC);
4785 }
4786 else
4787 {
4788 if (fRc)
4789 E1kLog(("%s bad GSO/TSE %p or %u < %u\n" , pThis->szPrf,
4790 pThis->CTX_SUFF(pTxSg), pThis->CTX_SUFF(pTxSg) ? pThis->CTX_SUFF(pTxSg)->cbUsed : 0,
4791 pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw2.u20PAYLEN));
4792 e1kXmitFreeBuf(pThis);
4793 E1K_INC_CNT32(TSCTFC);
4794 }
4795 pThis->u16TxPktLen = 0;
4796 }
4797 }
4798 else if (!pDesc->data.cmd.fTSE)
4799 {
4800 STAM_COUNTER_INC(&pThis->StatTxPathRegular);
4801 bool fRc = e1kAddToFrame(pThis, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
4802 if (pDesc->data.cmd.fEOP)
4803 {
4804 if (fRc && pThis->CTX_SUFF(pTxSg))
4805 {
4806 Assert(pThis->CTX_SUFF(pTxSg)->cSegs == 1);
4807 if (pThis->fIPcsum)
4808 e1kInsertChecksum(pThis, (uint8_t *)pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->u16TxPktLen,
4809 pThis->contextNormal.ip.u8CSO,
4810 pThis->contextNormal.ip.u8CSS,
4811 pThis->contextNormal.ip.u16CSE);
4812 if (pThis->fTCPcsum)
4813 e1kInsertChecksum(pThis, (uint8_t *)pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->u16TxPktLen,
4814 pThis->contextNormal.tu.u8CSO,
4815 pThis->contextNormal.tu.u8CSS,
4816 pThis->contextNormal.tu.u16CSE);
4817 e1kTransmitFrame(pThis, fOnWorkerThread);
4818 }
4819 else
4820 e1kXmitFreeBuf(pThis);
4821 pThis->u16TxPktLen = 0;
4822 }
4823 }
4824 else
4825 {
4826 STAM_COUNTER_INC(&pThis->StatTxPathFallback);
4827 rc = e1kFallbackAddToFrame(pThis, pDesc, fOnWorkerThread);
4828 }
4829 }
4830 e1kDescReport(pThis, pDesc, addr);
4831 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
4832 break;
4833 }
4834
4835 case E1K_DTYP_LEGACY:
4836 STAM_COUNTER_INC(&pThis->StatTxDescLegacy);
4837 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
4838 if (pDesc->legacy.cmd.u16Length == 0 || pDesc->legacy.u64BufAddr == 0)
4839 {
4840 E1kLog(("%s Empty legacy descriptor, skipped.\n", pThis->szPrf));
4841 }
4842 else
4843 {
4844 /* Add fragment to frame. */
4845 if (e1kAddToFrame(pThis, pDesc->data.u64BufAddr, pDesc->legacy.cmd.u16Length))
4846 {
4847 E1K_INC_ISTAT_CNT(pThis->uStatDescLeg);
4848
4849 /* Last fragment: Transmit and reset the packet storage counter. */
4850 if (pDesc->legacy.cmd.fEOP)
4851 {
4852 if (pDesc->legacy.cmd.fIC)
4853 {
4854 e1kInsertChecksum(pThis,
4855 (uint8_t *)pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg,
4856 pThis->u16TxPktLen,
4857 pDesc->legacy.cmd.u8CSO,
4858 pDesc->legacy.dw3.u8CSS,
4859 0);
4860 }
4861 e1kTransmitFrame(pThis, fOnWorkerThread);
4862 pThis->u16TxPktLen = 0;
4863 }
4864 }
4865 /* Last fragment + failure: free the buffer and reset the storage counter. */
4866 else if (pDesc->legacy.cmd.fEOP)
4867 {
4868 e1kXmitFreeBuf(pThis);
4869 pThis->u16TxPktLen = 0;
4870 }
4871 }
4872 e1kDescReport(pThis, pDesc, addr);
4873 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
4874 break;
4875
4876 default:
4877 E1kLog(("%s ERROR Unsupported transmit descriptor type: 0x%04x\n",
4878 pThis->szPrf, e1kGetDescType(pDesc)));
4879 break;
4880 }
4881
4882 return rc;
4883}
4884
4885DECLINLINE(void) e1kUpdateTxContext(PE1KSTATE pThis, E1KTXDESC *pDesc)
4886{
4887 if (pDesc->context.dw2.fTSE)
4888 {
4889 pThis->contextTSE = pDesc->context;
4890 pThis->u32PayRemain = pDesc->context.dw2.u20PAYLEN;
4891 pThis->u16HdrRemain = pDesc->context.dw3.u8HDRLEN;
4892 e1kSetupGsoCtx(&pThis->GsoCtx, &pDesc->context);
4893 STAM_COUNTER_INC(&pThis->StatTxDescCtxTSE);
4894 }
4895 else
4896 {
4897 pThis->contextNormal = pDesc->context;
4898 STAM_COUNTER_INC(&pThis->StatTxDescCtxNormal);
4899 }
4900 E1kLog2(("%s %s context updated: IP CSS=%02X, IP CSO=%02X, IP CSE=%04X"
4901 ", TU CSS=%02X, TU CSO=%02X, TU CSE=%04X\n", pThis->szPrf,
4902 pDesc->context.dw2.fTSE ? "TSE" : "Normal",
4903 pDesc->context.ip.u8CSS,
4904 pDesc->context.ip.u8CSO,
4905 pDesc->context.ip.u16CSE,
4906 pDesc->context.tu.u8CSS,
4907 pDesc->context.tu.u8CSO,
4908 pDesc->context.tu.u16CSE));
4909}
4910
4911static bool e1kLocateTxPacket(PE1KSTATE pThis)
4912{
4913 LogFlow(("%s e1kLocateTxPacket: ENTER cbTxAlloc=%d\n",
4914 pThis->szPrf, pThis->cbTxAlloc));
4915 /* Check if we have located the packet already. */
4916 if (pThis->cbTxAlloc)
4917 {
4918 LogFlow(("%s e1kLocateTxPacket: RET true cbTxAlloc=%d\n",
4919 pThis->szPrf, pThis->cbTxAlloc));
4920 return true;
4921 }
4922
4923 bool fTSE = false;
4924 uint32_t cbPacket = 0;
4925
4926 for (int i = pThis->iTxDCurrent; i < pThis->nTxDFetched; ++i)
4927 {
4928 E1KTXDESC *pDesc = &pThis->aTxDescriptors[i];
4929 switch (e1kGetDescType(pDesc))
4930 {
4931 case E1K_DTYP_CONTEXT:
4932 e1kUpdateTxContext(pThis, pDesc);
4933 continue;
4934 case E1K_DTYP_LEGACY:
4935 /* Skip empty descriptors. */
4936 if (!pDesc->legacy.u64BufAddr || !pDesc->legacy.cmd.u16Length)
4937 break;
4938 cbPacket += pDesc->legacy.cmd.u16Length;
4939 pThis->fGSO = false;
4940 break;
4941 case E1K_DTYP_DATA:
4942 /* Skip empty descriptors. */
4943 if (!pDesc->data.u64BufAddr || !pDesc->data.cmd.u20DTALEN)
4944 break;
4945 if (cbPacket == 0)
4946 {
4947 /*
4948 * The first fragment: save IXSM and TXSM options
4949 * as these are only valid in the first fragment.
4950 */
4951 pThis->fIPcsum = pDesc->data.dw3.fIXSM;
4952 pThis->fTCPcsum = pDesc->data.dw3.fTXSM;
4953 fTSE = pDesc->data.cmd.fTSE;
4954 /*
4955 * TSE descriptors have VLE bit properly set in
4956 * the first fragment.
4957 */
4958 if (fTSE)
4959 {
4960 pThis->fVTag = pDesc->data.cmd.fVLE;
4961 pThis->u16VTagTCI = pDesc->data.dw3.u16Special;
4962 }
4963 pThis->fGSO = e1kCanDoGso(pThis, &pThis->GsoCtx, &pDesc->data, &pThis->contextTSE);
4964 }
4965 cbPacket += pDesc->data.cmd.u20DTALEN;
4966 break;
4967 default:
4968 AssertMsgFailed(("Impossible descriptor type!"));
4969 }
4970 if (pDesc->legacy.cmd.fEOP)
4971 {
4972 /*
4973 * Non-TSE descriptors have VLE bit properly set in
4974 * the last fragment.
4975 */
4976 if (!fTSE)
4977 {
4978 pThis->fVTag = pDesc->data.cmd.fVLE;
4979 pThis->u16VTagTCI = pDesc->data.dw3.u16Special;
4980 }
4981 /*
4982 * Compute the required buffer size. If we cannot do GSO but still
4983 * have to do segmentation we allocate the first segment only.
4984 */
4985 pThis->cbTxAlloc = (!fTSE || pThis->fGSO) ?
4986 cbPacket :
4987 RT_MIN(cbPacket, pThis->contextTSE.dw3.u16MSS + pThis->contextTSE.dw3.u8HDRLEN);
4988 if (pThis->fVTag)
4989 pThis->cbTxAlloc += 4;
4990 LogFlow(("%s e1kLocateTxPacket: RET true cbTxAlloc=%d\n",
4991 pThis->szPrf, pThis->cbTxAlloc));
4992 return true;
4993 }
4994 }
4995
4996 if (cbPacket == 0 && pThis->nTxDFetched - pThis->iTxDCurrent > 0)
4997 {
4998 /* All descriptors were empty, we need to process them as a dummy packet */
4999 LogFlow(("%s e1kLocateTxPacket: RET true cbTxAlloc=%d, zero packet!\n",
5000 pThis->szPrf, pThis->cbTxAlloc));
5001 return true;
5002 }
5003 LogFlow(("%s e1kLocateTxPacket: RET false cbTxAlloc=%d\n",
5004 pThis->szPrf, pThis->cbTxAlloc));
5005 return false;
5006}
5007
5008static int e1kXmitPacket(PE1KSTATE pThis, bool fOnWorkerThread)
5009{
5010 int rc = VINF_SUCCESS;
5011
5012 LogFlow(("%s e1kXmitPacket: ENTER current=%d fetched=%d\n",
5013 pThis->szPrf, pThis->iTxDCurrent, pThis->nTxDFetched));
5014
5015 while (pThis->iTxDCurrent < pThis->nTxDFetched)
5016 {
5017 E1KTXDESC *pDesc = &pThis->aTxDescriptors[pThis->iTxDCurrent];
5018 E1kLog3(("%s About to process new TX descriptor at %08x%08x, TDLEN=%08x, TDH=%08x, TDT=%08x\n",
5019 pThis->szPrf, TDBAH, TDBAL + TDH * sizeof(E1KTXDESC), TDLEN, TDH, TDT));
5020 rc = e1kXmitDesc(pThis, pDesc, e1kDescAddr(TDBAH, TDBAL, TDH), fOnWorkerThread);
5021 if (RT_FAILURE(rc))
5022 break;
5023 if (++TDH * sizeof(E1KTXDESC) >= TDLEN)
5024 TDH = 0;
5025 uint32_t uLowThreshold = GET_BITS(TXDCTL, LWTHRESH)*8;
5026 if (uLowThreshold != 0 && e1kGetTxLen(pThis) <= uLowThreshold)
5027 {
5028 E1kLog2(("%s Low on transmit descriptors, raise ICR.TXD_LOW, len=%x thresh=%x\n",
5029 pThis->szPrf, e1kGetTxLen(pThis), GET_BITS(TXDCTL, LWTHRESH)*8));
5030 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_TXD_LOW);
5031 }
5032 ++pThis->iTxDCurrent;
5033 if (e1kGetDescType(pDesc) != E1K_DTYP_CONTEXT && pDesc->legacy.cmd.fEOP)
5034 break;
5035 }
5036
5037 LogFlow(("%s e1kXmitPacket: RET %Rrc current=%d fetched=%d\n",
5038 pThis->szPrf, rc, pThis->iTxDCurrent, pThis->nTxDFetched));
5039 return rc;
5040}
5041
5042#endif /* E1K_WITH_TXD_CACHE */
5043#ifndef E1K_WITH_TXD_CACHE
5044
5045/**
5046 * Transmit pending descriptors.
5047 *
5048 * @returns VBox status code. VERR_TRY_AGAIN is returned if we're busy.
5049 *
5050 * @param pThis The E1000 state.
5051 * @param fOnWorkerThread Whether we're on a worker thread or on an EMT.
5052 */
5053static int e1kXmitPending(PE1KSTATE pThis, bool fOnWorkerThread)
5054{
5055 int rc = VINF_SUCCESS;
5056
5057 /* Check if transmitter is enabled. */
5058 if (!(TCTL & TCTL_EN))
5059 return VINF_SUCCESS;
5060 /*
5061 * Grab the xmit lock of the driver as well as the E1K device state.
5062 */
5063 rc = e1kCsTxEnter(pThis, VERR_SEM_BUSY);
5064 if (RT_LIKELY(rc == VINF_SUCCESS))
5065 {
5066 PPDMINETWORKUP pDrv = pThis->CTX_SUFF(pDrv);
5067 if (pDrv)
5068 {
5069 rc = pDrv->pfnBeginXmit(pDrv, fOnWorkerThread);
5070 if (RT_FAILURE(rc))
5071 {
5072 e1kCsTxLeave(pThis);
5073 return rc;
5074 }
5075 }
5076 /*
5077 * Process all pending descriptors.
5078 * Note! Do not process descriptors in locked state
5079 */
5080 while (TDH != TDT && !pThis->fLocked)
5081 {
5082 E1KTXDESC desc;
5083 E1kLog3(("%s About to process new TX descriptor at %08x%08x, TDLEN=%08x, TDH=%08x, TDT=%08x\n",
5084 pThis->szPrf, TDBAH, TDBAL + TDH * sizeof(desc), TDLEN, TDH, TDT));
5085
5086 e1kLoadDesc(pThis, &desc, ((uint64_t)TDBAH << 32) + TDBAL + TDH * sizeof(desc));
5087 rc = e1kXmitDesc(pThis, &desc, e1kDescAddr(TDBAH, TDBAL, TDH), fOnWorkerThread);
5088 /* If we failed to transmit descriptor we will try it again later */
5089 if (RT_FAILURE(rc))
5090 break;
5091 if (++TDH * sizeof(desc) >= TDLEN)
5092 TDH = 0;
5093
5094 if (e1kGetTxLen(pThis) <= GET_BITS(TXDCTL, LWTHRESH)*8)
5095 {
5096 E1kLog2(("%s Low on transmit descriptors, raise ICR.TXD_LOW, len=%x thresh=%x\n",
5097 pThis->szPrf, e1kGetTxLen(pThis), GET_BITS(TXDCTL, LWTHRESH)*8));
5098 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_TXD_LOW);
5099 }
5100
5101 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
5102 }
5103
5104 /// @todo: uncomment: pThis->uStatIntTXQE++;
5105 /// @todo: uncomment: e1kRaiseInterrupt(pThis, ICR_TXQE);
5106 /*
5107 * Release the lock.
5108 */
5109 if (pDrv)
5110 pDrv->pfnEndXmit(pDrv);
5111 e1kCsTxLeave(pThis);
5112 }
5113
5114 return rc;
5115}
5116
5117#else /* E1K_WITH_TXD_CACHE */
5118
5119static void e1kDumpTxDCache(PE1KSTATE pThis)
5120{
5121 unsigned i, cDescs = TDLEN / sizeof(E1KTXDESC);
5122 uint32_t tdh = TDH;
5123 LogRel(("-- Transmit Descriptors (%d total) --\n", cDescs));
5124 for (i = 0; i < cDescs; ++i)
5125 {
5126 E1KTXDESC desc;
5127 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns), e1kDescAddr(TDBAH, TDBAL, i),
5128 &desc, sizeof(desc));
5129 if (i == tdh)
5130 LogRel((">>> "));
5131 LogRel(("%RGp: %R[e1ktxd]\n", e1kDescAddr(TDBAH, TDBAL, i), &desc));
5132 }
5133 LogRel(("-- Transmit Descriptors in Cache (at %d (TDH %d)/ fetched %d / max %d) --\n",
5134 pThis->iTxDCurrent, TDH, pThis->nTxDFetched, E1K_TXD_CACHE_SIZE));
5135 if (tdh > pThis->iTxDCurrent)
5136 tdh -= pThis->iTxDCurrent;
5137 else
5138 tdh = cDescs + tdh - pThis->iTxDCurrent;
5139 for (i = 0; i < pThis->nTxDFetched; ++i)
5140 {
5141 if (i == pThis->iTxDCurrent)
5142 LogRel((">>> "));
5143 LogRel(("%RGp: %R[e1ktxd]\n", e1kDescAddr(TDBAH, TDBAL, tdh++ % cDescs), &pThis->aTxDescriptors[i]));
5144 }
5145}
5146
5147/**
5148 * Transmit pending descriptors.
5149 *
5150 * @returns VBox status code. VERR_TRY_AGAIN is returned if we're busy.
5151 *
5152 * @param pThis The E1000 state.
5153 * @param fOnWorkerThread Whether we're on a worker thread or on an EMT.
5154 */
5155static int e1kXmitPending(PE1KSTATE pThis, bool fOnWorkerThread)
5156{
5157 int rc = VINF_SUCCESS;
5158
5159 /* Check if transmitter is enabled. */
5160 if (!(TCTL & TCTL_EN))
5161 return VINF_SUCCESS;
5162 /*
5163 * Grab the xmit lock of the driver as well as the E1K device state.
5164 */
5165 PPDMINETWORKUP pDrv = pThis->CTX_SUFF(pDrv);
5166 if (pDrv)
5167 {
5168 rc = pDrv->pfnBeginXmit(pDrv, fOnWorkerThread);
5169 if (RT_FAILURE(rc))
5170 return rc;
5171 }
5172
5173 /*
5174 * Process all pending descriptors.
5175 * Note! Do not process descriptors in locked state
5176 */
5177 rc = e1kCsTxEnter(pThis, VERR_SEM_BUSY);
5178 if (RT_LIKELY(rc == VINF_SUCCESS))
5179 {
5180 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
5181 /*
5182 * fIncomplete is set whenever we try to fetch additional descriptors
5183 * for an incomplete packet. If fail to locate a complete packet on
5184 * the next iteration we need to reset the cache or we risk to get
5185 * stuck in this loop forever.
5186 */
5187 bool fIncomplete = false;
5188 while (!pThis->fLocked && e1kTxDLazyLoad(pThis))
5189 {
5190 while (e1kLocateTxPacket(pThis))
5191 {
5192 fIncomplete = false;
5193 /* Found a complete packet, allocate it. */
5194 rc = e1kXmitAllocBuf(pThis, pThis->fGSO);
5195 /* If we're out of bandwidth we'll come back later. */
5196 if (RT_FAILURE(rc))
5197 goto out;
5198 /* Copy the packet to allocated buffer and send it. */
5199 rc = e1kXmitPacket(pThis, fOnWorkerThread);
5200 /* If we're out of bandwidth we'll come back later. */
5201 if (RT_FAILURE(rc))
5202 goto out;
5203 }
5204 uint8_t u8Remain = pThis->nTxDFetched - pThis->iTxDCurrent;
5205 if (RT_UNLIKELY(fIncomplete))
5206 {
5207 static bool fTxDCacheDumped = false;
5208 /*
5209 * The descriptor cache is full, but we were unable to find
5210 * a complete packet in it. Drop the cache and hope that
5211 * the guest driver can recover from network card error.
5212 */
5213 LogRel(("%s No complete packets in%s TxD cache! "
5214 "Fetched=%d, current=%d, TX len=%d.\n",
5215 pThis->szPrf,
5216 u8Remain == E1K_TXD_CACHE_SIZE ? " full" : "",
5217 pThis->nTxDFetched, pThis->iTxDCurrent,
5218 e1kGetTxLen(pThis)));
5219 if (!fTxDCacheDumped)
5220 {
5221 fTxDCacheDumped = true;
5222 e1kDumpTxDCache(pThis);
5223 }
5224 pThis->iTxDCurrent = pThis->nTxDFetched = 0;
5225 /*
5226 * Returning an error at this point means Guru in R0
5227 * (see @bugref{6428}).
5228 */
5229# ifdef IN_RING3
5230 rc = VERR_NET_INCOMPLETE_TX_PACKET;
5231# else /* !IN_RING3 */
5232 rc = VINF_IOM_R3_MMIO_WRITE;
5233# endif /* !IN_RING3 */
5234 goto out;
5235 }
5236 if (u8Remain > 0)
5237 {
5238 Log4(("%s Incomplete packet at %d. Already fetched %d, "
5239 "%d more are available\n",
5240 pThis->szPrf, pThis->iTxDCurrent, u8Remain,
5241 e1kGetTxLen(pThis) - u8Remain));
5242
5243 /*
5244 * A packet was partially fetched. Move incomplete packet to
5245 * the beginning of cache buffer, then load more descriptors.
5246 */
5247 memmove(pThis->aTxDescriptors,
5248 &pThis->aTxDescriptors[pThis->iTxDCurrent],
5249 u8Remain * sizeof(E1KTXDESC));
5250 pThis->iTxDCurrent = 0;
5251 pThis->nTxDFetched = u8Remain;
5252 e1kTxDLoadMore(pThis);
5253 fIncomplete = true;
5254 }
5255 else
5256 pThis->nTxDFetched = 0;
5257 pThis->iTxDCurrent = 0;
5258 }
5259 if (!pThis->fLocked && GET_BITS(TXDCTL, LWTHRESH) == 0)
5260 {
5261 E1kLog2(("%s Out of transmit descriptors, raise ICR.TXD_LOW\n",
5262 pThis->szPrf));
5263 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_TXD_LOW);
5264 }
5265out:
5266 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
5267
5268 /// @todo: uncomment: pThis->uStatIntTXQE++;
5269 /// @todo: uncomment: e1kRaiseInterrupt(pThis, ICR_TXQE);
5270
5271 e1kCsTxLeave(pThis);
5272 }
5273
5274
5275 /*
5276 * Release the lock.
5277 */
5278 if (pDrv)
5279 pDrv->pfnEndXmit(pDrv);
5280 return rc;
5281}
5282
5283#endif /* E1K_WITH_TXD_CACHE */
5284#ifdef IN_RING3
5285
5286/**
5287 * @interface_method_impl{PDMINETWORKDOWN,pfnXmitPending}
5288 */
5289static DECLCALLBACK(void) e1kR3NetworkDown_XmitPending(PPDMINETWORKDOWN pInterface)
5290{
5291 PE1KSTATE pThis = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkDown);
5292 /* Resume suspended transmission */
5293 STATUS &= ~STATUS_TXOFF;
5294 e1kXmitPending(pThis, true /*fOnWorkerThread*/);
5295}
5296
5297/**
5298 * Callback for consuming from transmit queue. It gets called in R3 whenever
5299 * we enqueue something in R0/GC.
5300 *
5301 * @returns true
5302 * @param pDevIns Pointer to device instance structure.
5303 * @param pItem Pointer to the element being dequeued (not used).
5304 * @thread ???
5305 */
5306static DECLCALLBACK(bool) e1kTxQueueConsumer(PPDMDEVINS pDevIns, PPDMQUEUEITEMCORE pItem)
5307{
5308 NOREF(pItem);
5309 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
5310 E1kLog2(("%s e1kTxQueueConsumer:\n", pThis->szPrf));
5311
5312 int rc = e1kXmitPending(pThis, false /*fOnWorkerThread*/);
5313#ifndef DEBUG_andy /** @todo r=andy Happens for me a lot, mute this for me. */
5314 AssertMsg(RT_SUCCESS(rc) || rc == VERR_TRY_AGAIN, ("%Rrc\n", rc));
5315#endif
5316 return true;
5317}
5318
5319/**
5320 * Handler for the wakeup signaller queue.
5321 */
5322static DECLCALLBACK(bool) e1kCanRxQueueConsumer(PPDMDEVINS pDevIns, PPDMQUEUEITEMCORE pItem)
5323{
5324 e1kWakeupReceive(pDevIns);
5325 return true;
5326}
5327
5328#endif /* IN_RING3 */
5329
5330/**
5331 * Write handler for Transmit Descriptor Tail register.
5332 *
5333 * @param pThis The device state structure.
5334 * @param offset Register offset in memory-mapped frame.
5335 * @param index Register index in register array.
5336 * @param value The value to store.
5337 * @param mask Used to implement partial writes (8 and 16-bit).
5338 * @thread EMT
5339 */
5340static int e1kRegWriteTDT(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5341{
5342 int rc = e1kRegWriteDefault(pThis, offset, index, value);
5343
5344 /* All descriptors starting with head and not including tail belong to us. */
5345 /* Process them. */
5346 E1kLog2(("%s e1kRegWriteTDT: TDBAL=%08x, TDBAH=%08x, TDLEN=%08x, TDH=%08x, TDT=%08x\n",
5347 pThis->szPrf, TDBAL, TDBAH, TDLEN, TDH, TDT));
5348
5349 /* Ignore TDT writes when the link is down. */
5350 if (TDH != TDT && (STATUS & STATUS_LU))
5351 {
5352 Log5(("E1000: TDT write: TDH=%08x, TDT=%08x, %d descriptors to process\n", TDH, TDT, e1kGetTxLen(pThis)));
5353 E1kLog(("%s e1kRegWriteTDT: %d descriptors to process\n",
5354 pThis->szPrf, e1kGetTxLen(pThis)));
5355
5356 /* Transmit pending packets if possible, defer it if we cannot do it
5357 in the current context. */
5358#ifdef E1K_TX_DELAY
5359 rc = e1kCsTxEnter(pThis, VERR_SEM_BUSY);
5360 if (RT_LIKELY(rc == VINF_SUCCESS))
5361 {
5362 if (!TMTimerIsActive(pThis->CTX_SUFF(pTXDTimer)))
5363 {
5364#ifdef E1K_INT_STATS
5365 pThis->u64ArmedAt = RTTimeNanoTS();
5366#endif
5367 e1kArmTimer(pThis, pThis->CTX_SUFF(pTXDTimer), E1K_TX_DELAY);
5368 }
5369 E1K_INC_ISTAT_CNT(pThis->uStatTxDelayed);
5370 e1kCsTxLeave(pThis);
5371 return rc;
5372 }
5373 /* We failed to enter the TX critical section -- transmit as usual. */
5374#endif /* E1K_TX_DELAY */
5375#ifndef IN_RING3
5376 if (!pThis->CTX_SUFF(pDrv))
5377 {
5378 PPDMQUEUEITEMCORE pItem = PDMQueueAlloc(pThis->CTX_SUFF(pTxQueue));
5379 if (RT_UNLIKELY(pItem))
5380 PDMQueueInsert(pThis->CTX_SUFF(pTxQueue), pItem);
5381 }
5382 else
5383#endif
5384 {
5385 rc = e1kXmitPending(pThis, false /*fOnWorkerThread*/);
5386 if (rc == VERR_TRY_AGAIN)
5387 rc = VINF_SUCCESS;
5388 else if (rc == VERR_SEM_BUSY)
5389 rc = VINF_IOM_R3_MMIO_WRITE;
5390 AssertRC(rc);
5391 }
5392 }
5393
5394 return rc;
5395}
5396
5397/**
5398 * Write handler for Multicast Table Array registers.
5399 *
5400 * @param pThis The device state structure.
5401 * @param offset Register offset in memory-mapped frame.
5402 * @param index Register index in register array.
5403 * @param value The value to store.
5404 * @thread EMT
5405 */
5406static int e1kRegWriteMTA(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5407{
5408 AssertReturn(offset - g_aE1kRegMap[index].offset < sizeof(pThis->auMTA), VERR_DEV_IO_ERROR);
5409 pThis->auMTA[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->auMTA[0])] = value;
5410
5411 return VINF_SUCCESS;
5412}
5413
5414/**
5415 * Read handler for Multicast Table Array registers.
5416 *
5417 * @returns VBox status code.
5418 *
5419 * @param pThis The device state structure.
5420 * @param offset Register offset in memory-mapped frame.
5421 * @param index Register index in register array.
5422 * @thread EMT
5423 */
5424static int e1kRegReadMTA(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5425{
5426 AssertReturn(offset - g_aE1kRegMap[index].offset< sizeof(pThis->auMTA), VERR_DEV_IO_ERROR);
5427 *pu32Value = pThis->auMTA[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->auMTA[0])];
5428
5429 return VINF_SUCCESS;
5430}
5431
5432/**
5433 * Write handler for Receive Address registers.
5434 *
5435 * @param pThis The device state structure.
5436 * @param offset Register offset in memory-mapped frame.
5437 * @param index Register index in register array.
5438 * @param value The value to store.
5439 * @thread EMT
5440 */
5441static int e1kRegWriteRA(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5442{
5443 AssertReturn(offset - g_aE1kRegMap[index].offset < sizeof(pThis->aRecAddr.au32), VERR_DEV_IO_ERROR);
5444 pThis->aRecAddr.au32[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->aRecAddr.au32[0])] = value;
5445
5446 return VINF_SUCCESS;
5447}
5448
5449/**
5450 * Read handler for Receive Address registers.
5451 *
5452 * @returns VBox status code.
5453 *
5454 * @param pThis The device state structure.
5455 * @param offset Register offset in memory-mapped frame.
5456 * @param index Register index in register array.
5457 * @thread EMT
5458 */
5459static int e1kRegReadRA(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5460{
5461 AssertReturn(offset - g_aE1kRegMap[index].offset< sizeof(pThis->aRecAddr.au32), VERR_DEV_IO_ERROR);
5462 *pu32Value = pThis->aRecAddr.au32[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->aRecAddr.au32[0])];
5463
5464 return VINF_SUCCESS;
5465}
5466
5467/**
5468 * Write handler for VLAN Filter Table Array registers.
5469 *
5470 * @param pThis The device state structure.
5471 * @param offset Register offset in memory-mapped frame.
5472 * @param index Register index in register array.
5473 * @param value The value to store.
5474 * @thread EMT
5475 */
5476static int e1kRegWriteVFTA(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5477{
5478 AssertReturn(offset - g_aE1kRegMap[index].offset < sizeof(pThis->auVFTA), VINF_SUCCESS);
5479 pThis->auVFTA[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->auVFTA[0])] = value;
5480
5481 return VINF_SUCCESS;
5482}
5483
5484/**
5485 * Read handler for VLAN Filter Table Array registers.
5486 *
5487 * @returns VBox status code.
5488 *
5489 * @param pThis The device state structure.
5490 * @param offset Register offset in memory-mapped frame.
5491 * @param index Register index in register array.
5492 * @thread EMT
5493 */
5494static int e1kRegReadVFTA(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5495{
5496 AssertReturn(offset - g_aE1kRegMap[index].offset< sizeof(pThis->auVFTA), VERR_DEV_IO_ERROR);
5497 *pu32Value = pThis->auVFTA[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->auVFTA[0])];
5498
5499 return VINF_SUCCESS;
5500}
5501
5502/**
5503 * Read handler for unimplemented registers.
5504 *
5505 * Merely reports reads from unimplemented registers.
5506 *
5507 * @returns VBox status code.
5508 *
5509 * @param pThis The device state structure.
5510 * @param offset Register offset in memory-mapped frame.
5511 * @param index Register index in register array.
5512 * @thread EMT
5513 */
5514static int e1kRegReadUnimplemented(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5515{
5516 RT_NOREF3(pThis, offset, index);
5517 E1kLog(("%s At %08X read (00000000) attempt from unimplemented register %s (%s)\n",
5518 pThis->szPrf, offset, g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
5519 *pu32Value = 0;
5520
5521 return VINF_SUCCESS;
5522}
5523
5524/**
5525 * Default register read handler with automatic clear operation.
5526 *
5527 * Retrieves the value of register from register array in device state structure.
5528 * Then resets all bits.
5529 *
5530 * @remarks The 'mask' parameter is simply ignored as masking and shifting is
5531 * done in the caller.
5532 *
5533 * @returns VBox status code.
5534 *
5535 * @param pThis The device state structure.
5536 * @param offset Register offset in memory-mapped frame.
5537 * @param index Register index in register array.
5538 * @thread EMT
5539 */
5540static int e1kRegReadAutoClear(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5541{
5542 AssertReturn(index < E1K_NUM_OF_32BIT_REGS, VERR_DEV_IO_ERROR);
5543 int rc = e1kRegReadDefault(pThis, offset, index, pu32Value);
5544 pThis->auRegs[index] = 0;
5545
5546 return rc;
5547}
5548
5549/**
5550 * Default register read handler.
5551 *
5552 * Retrieves the value of register from register array in device state structure.
5553 * Bits corresponding to 0s in 'readable' mask will always read as 0s.
5554 *
5555 * @remarks The 'mask' parameter is simply ignored as masking and shifting is
5556 * done in the caller.
5557 *
5558 * @returns VBox status code.
5559 *
5560 * @param pThis The device state structure.
5561 * @param offset Register offset in memory-mapped frame.
5562 * @param index Register index in register array.
5563 * @thread EMT
5564 */
5565static int e1kRegReadDefault(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5566{
5567 RT_NOREF_PV(offset);
5568
5569 AssertReturn(index < E1K_NUM_OF_32BIT_REGS, VERR_DEV_IO_ERROR);
5570 *pu32Value = pThis->auRegs[index] & g_aE1kRegMap[index].readable;
5571
5572 return VINF_SUCCESS;
5573}
5574
5575/**
5576 * Write handler for unimplemented registers.
5577 *
5578 * Merely reports writes to unimplemented registers.
5579 *
5580 * @param pThis The device state structure.
5581 * @param offset Register offset in memory-mapped frame.
5582 * @param index Register index in register array.
5583 * @param value The value to store.
5584 * @thread EMT
5585 */
5586
5587 static int e1kRegWriteUnimplemented(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5588{
5589 RT_NOREF_PV(pThis); RT_NOREF_PV(offset); RT_NOREF_PV(index); RT_NOREF_PV(value);
5590
5591 E1kLog(("%s At %08X write attempt (%08X) to unimplemented register %s (%s)\n",
5592 pThis->szPrf, offset, value, g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
5593
5594 return VINF_SUCCESS;
5595}
5596
5597/**
5598 * Default register write handler.
5599 *
5600 * Stores the value to the register array in device state structure. Only bits
5601 * corresponding to 1s both in 'writable' and 'mask' will be stored.
5602 *
5603 * @returns VBox status code.
5604 *
5605 * @param pThis The device state structure.
5606 * @param offset Register offset in memory-mapped frame.
5607 * @param index Register index in register array.
5608 * @param value The value to store.
5609 * @param mask Used to implement partial writes (8 and 16-bit).
5610 * @thread EMT
5611 */
5612
5613static int e1kRegWriteDefault(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5614{
5615 RT_NOREF_PV(offset);
5616
5617 AssertReturn(index < E1K_NUM_OF_32BIT_REGS, VERR_DEV_IO_ERROR);
5618 pThis->auRegs[index] = (value & g_aE1kRegMap[index].writable)
5619 | (pThis->auRegs[index] & ~g_aE1kRegMap[index].writable);
5620
5621 return VINF_SUCCESS;
5622}
5623
5624/**
5625 * Search register table for matching register.
5626 *
5627 * @returns Index in the register table or -1 if not found.
5628 *
5629 * @param offReg Register offset in memory-mapped region.
5630 * @thread EMT
5631 */
5632static int e1kRegLookup(uint32_t offReg)
5633{
5634
5635#if 0
5636 int index;
5637
5638 for (index = 0; index < E1K_NUM_OF_REGS; index++)
5639 {
5640 if (g_aE1kRegMap[index].offset <= offReg && offReg < g_aE1kRegMap[index].offset + g_aE1kRegMap[index].size)
5641 {
5642 return index;
5643 }
5644 }
5645#else
5646 int iStart = 0;
5647 int iEnd = E1K_NUM_OF_BINARY_SEARCHABLE;
5648 for (;;)
5649 {
5650 int i = (iEnd - iStart) / 2 + iStart;
5651 uint32_t offCur = g_aE1kRegMap[i].offset;
5652 if (offReg < offCur)
5653 {
5654 if (i == iStart)
5655 break;
5656 iEnd = i;
5657 }
5658 else if (offReg >= offCur + g_aE1kRegMap[i].size)
5659 {
5660 i++;
5661 if (i == iEnd)
5662 break;
5663 iStart = i;
5664 }
5665 else
5666 return i;
5667 Assert(iEnd > iStart);
5668 }
5669
5670 for (unsigned i = E1K_NUM_OF_BINARY_SEARCHABLE; i < RT_ELEMENTS(g_aE1kRegMap); i++)
5671 if (offReg - g_aE1kRegMap[i].offset < g_aE1kRegMap[i].size)
5672 return i;
5673
5674# ifdef VBOX_STRICT
5675 for (unsigned i = 0; i < RT_ELEMENTS(g_aE1kRegMap); i++)
5676 Assert(offReg - g_aE1kRegMap[i].offset >= g_aE1kRegMap[i].size);
5677# endif
5678
5679#endif
5680
5681 return -1;
5682}
5683
5684/**
5685 * Handle unaligned register read operation.
5686 *
5687 * Looks up and calls appropriate handler.
5688 *
5689 * @returns VBox status code.
5690 *
5691 * @param pThis The device state structure.
5692 * @param offReg Register offset in memory-mapped frame.
5693 * @param pv Where to store the result.
5694 * @param cb Number of bytes to read.
5695 * @thread EMT
5696 * @remarks IOM takes care of unaligned and small reads via MMIO. For I/O port
5697 * accesses we have to take care of that ourselves.
5698 */
5699static int e1kRegReadUnaligned(PE1KSTATE pThis, uint32_t offReg, void *pv, uint32_t cb)
5700{
5701 uint32_t u32 = 0;
5702 uint32_t shift;
5703 int rc = VINF_SUCCESS;
5704 int index = e1kRegLookup(offReg);
5705#ifdef LOG_ENABLED
5706 char buf[9];
5707#endif
5708
5709 /*
5710 * From the spec:
5711 * For registers that should be accessed as 32-bit double words, partial writes (less than a 32-bit
5712 * double word) is ignored. Partial reads return all 32 bits of data regardless of the byte enables.
5713 */
5714
5715 /*
5716 * To be able to read bytes and short word we convert them to properly
5717 * shifted 32-bit words and masks. The idea is to keep register-specific
5718 * handlers simple. Most accesses will be 32-bit anyway.
5719 */
5720 uint32_t mask;
5721 switch (cb)
5722 {
5723 case 4: mask = 0xFFFFFFFF; break;
5724 case 2: mask = 0x0000FFFF; break;
5725 case 1: mask = 0x000000FF; break;
5726 default:
5727 return PDMDevHlpDBGFStop(pThis->CTX_SUFF(pDevIns), RT_SRC_POS,
5728 "unsupported op size: offset=%#10x cb=%#10x\n", offReg, cb);
5729 }
5730 if (index != -1)
5731 {
5732 if (g_aE1kRegMap[index].readable)
5733 {
5734 /* Make the mask correspond to the bits we are about to read. */
5735 shift = (offReg - g_aE1kRegMap[index].offset) % sizeof(uint32_t) * 8;
5736 mask <<= shift;
5737 if (!mask)
5738 return PDMDevHlpDBGFStop(pThis->CTX_SUFF(pDevIns), RT_SRC_POS, "Zero mask: offset=%#10x cb=%#10x\n", offReg, cb);
5739 /*
5740 * Read it. Pass the mask so the handler knows what has to be read.
5741 * Mask out irrelevant bits.
5742 */
5743 //rc = e1kCsEnter(pThis, VERR_SEM_BUSY, RT_SRC_POS);
5744 if (RT_UNLIKELY(rc != VINF_SUCCESS))
5745 return rc;
5746 //pThis->fDelayInts = false;
5747 //pThis->iStatIntLost += pThis->iStatIntLostOne;
5748 //pThis->iStatIntLostOne = 0;
5749 rc = g_aE1kRegMap[index].pfnRead(pThis, offReg & 0xFFFFFFFC, index, &u32);
5750 u32 &= mask;
5751 //e1kCsLeave(pThis);
5752 E1kLog2(("%s At %08X read %s from %s (%s)\n",
5753 pThis->szPrf, offReg, e1kU32toHex(u32, mask, buf), g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
5754 Log6(("%s At %08X read %s from %s (%s) [UNALIGNED]\n",
5755 pThis->szPrf, offReg, e1kU32toHex(u32, mask, buf), g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
5756 /* Shift back the result. */
5757 u32 >>= shift;
5758 }
5759 else
5760 E1kLog(("%s At %08X read (%s) attempt from write-only register %s (%s)\n",
5761 pThis->szPrf, offReg, e1kU32toHex(u32, mask, buf), g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
5762 if (IOM_SUCCESS(rc))
5763 STAM_COUNTER_INC(&pThis->aStatRegReads[index]);
5764 }
5765 else
5766 E1kLog(("%s At %08X read (%s) attempt from non-existing register\n",
5767 pThis->szPrf, offReg, e1kU32toHex(u32, mask, buf)));
5768
5769 memcpy(pv, &u32, cb);
5770 return rc;
5771}
5772
5773/**
5774 * Handle 4 byte aligned and sized read operation.
5775 *
5776 * Looks up and calls appropriate handler.
5777 *
5778 * @returns VBox status code.
5779 *
5780 * @param pThis The device state structure.
5781 * @param offReg Register offset in memory-mapped frame.
5782 * @param pu32 Where to store the result.
5783 * @thread EMT
5784 */
5785static int e1kRegReadAlignedU32(PE1KSTATE pThis, uint32_t offReg, uint32_t *pu32)
5786{
5787 Assert(!(offReg & 3));
5788
5789 /*
5790 * Lookup the register and check that it's readable.
5791 */
5792 int rc = VINF_SUCCESS;
5793 int idxReg = e1kRegLookup(offReg);
5794 if (RT_LIKELY(idxReg != -1))
5795 {
5796 if (RT_UNLIKELY(g_aE1kRegMap[idxReg].readable))
5797 {
5798 /*
5799 * Read it. Pass the mask so the handler knows what has to be read.
5800 * Mask out irrelevant bits.
5801 */
5802 //rc = e1kCsEnter(pThis, VERR_SEM_BUSY, RT_SRC_POS);
5803 //if (RT_UNLIKELY(rc != VINF_SUCCESS))
5804 // return rc;
5805 //pThis->fDelayInts = false;
5806 //pThis->iStatIntLost += pThis->iStatIntLostOne;
5807 //pThis->iStatIntLostOne = 0;
5808 rc = g_aE1kRegMap[idxReg].pfnRead(pThis, offReg & 0xFFFFFFFC, idxReg, pu32);
5809 //e1kCsLeave(pThis);
5810 Log6(("%s At %08X read %08X from %s (%s)\n",
5811 pThis->szPrf, offReg, *pu32, g_aE1kRegMap[idxReg].abbrev, g_aE1kRegMap[idxReg].name));
5812 if (IOM_SUCCESS(rc))
5813 STAM_COUNTER_INC(&pThis->aStatRegReads[idxReg]);
5814 }
5815 else
5816 E1kLog(("%s At %08X read attempt from non-readable register %s (%s)\n",
5817 pThis->szPrf, offReg, g_aE1kRegMap[idxReg].abbrev, g_aE1kRegMap[idxReg].name));
5818 }
5819 else
5820 E1kLog(("%s At %08X read attempt from non-existing register\n", pThis->szPrf, offReg));
5821 return rc;
5822}
5823
5824/**
5825 * Handle 4 byte sized and aligned register write operation.
5826 *
5827 * Looks up and calls appropriate handler.
5828 *
5829 * @returns VBox status code.
5830 *
5831 * @param pThis The device state structure.
5832 * @param offReg Register offset in memory-mapped frame.
5833 * @param u32Value The value to write.
5834 * @thread EMT
5835 */
5836static int e1kRegWriteAlignedU32(PE1KSTATE pThis, uint32_t offReg, uint32_t u32Value)
5837{
5838 int rc = VINF_SUCCESS;
5839 int index = e1kRegLookup(offReg);
5840 if (RT_LIKELY(index != -1))
5841 {
5842 if (RT_LIKELY(g_aE1kRegMap[index].writable))
5843 {
5844 /*
5845 * Write it. Pass the mask so the handler knows what has to be written.
5846 * Mask out irrelevant bits.
5847 */
5848 Log6(("%s At %08X write %08X to %s (%s)\n",
5849 pThis->szPrf, offReg, u32Value, g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
5850 //rc = e1kCsEnter(pThis, VERR_SEM_BUSY, RT_SRC_POS);
5851 //if (RT_UNLIKELY(rc != VINF_SUCCESS))
5852 // return rc;
5853 //pThis->fDelayInts = false;
5854 //pThis->iStatIntLost += pThis->iStatIntLostOne;
5855 //pThis->iStatIntLostOne = 0;
5856 rc = g_aE1kRegMap[index].pfnWrite(pThis, offReg, index, u32Value);
5857 //e1kCsLeave(pThis);
5858 }
5859 else
5860 E1kLog(("%s At %08X write attempt (%08X) to read-only register %s (%s)\n",
5861 pThis->szPrf, offReg, u32Value, g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
5862 if (IOM_SUCCESS(rc))
5863 STAM_COUNTER_INC(&pThis->aStatRegWrites[index]);
5864 }
5865 else
5866 E1kLog(("%s At %08X write attempt (%08X) to non-existing register\n",
5867 pThis->szPrf, offReg, u32Value));
5868 return rc;
5869}
5870
5871
5872/* -=-=-=-=- MMIO and I/O Port Callbacks -=-=-=-=- */
5873
5874/**
5875 * @callback_method_impl{FNIOMMMIOREAD}
5876 */
5877PDMBOTHCBDECL(int) e1kMMIORead(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS GCPhysAddr, void *pv, unsigned cb)
5878{
5879 RT_NOREF2(pvUser, cb);
5880 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
5881 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatMMIORead), a);
5882
5883 uint32_t offReg = GCPhysAddr - pThis->addrMMReg;
5884 Assert(offReg < E1K_MM_SIZE);
5885 Assert(cb == 4);
5886 Assert(!(GCPhysAddr & 3));
5887
5888 int rc = e1kRegReadAlignedU32(pThis, offReg, (uint32_t *)pv);
5889
5890 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatMMIORead), a);
5891 return rc;
5892}
5893
5894/**
5895 * @callback_method_impl{FNIOMMMIOWRITE}
5896 */
5897PDMBOTHCBDECL(int) e1kMMIOWrite(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS GCPhysAddr, void const *pv, unsigned cb)
5898{
5899 RT_NOREF2(pvUser, cb);
5900 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
5901 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatMMIOWrite), a);
5902
5903 uint32_t offReg = GCPhysAddr - pThis->addrMMReg;
5904 Assert(offReg < E1K_MM_SIZE);
5905 Assert(cb == 4);
5906 Assert(!(GCPhysAddr & 3));
5907
5908 int rc = e1kRegWriteAlignedU32(pThis, offReg, *(uint32_t const *)pv);
5909
5910 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatMMIOWrite), a);
5911 return rc;
5912}
5913
5914/**
5915 * @callback_method_impl{FNIOMIOPORTIN}
5916 */
5917PDMBOTHCBDECL(int) e1kIOPortIn(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT uPort, uint32_t *pu32, unsigned cb)
5918{
5919 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
5920 int rc;
5921 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatIORead), a);
5922 RT_NOREF_PV(pvUser);
5923
5924 uPort -= pThis->IOPortBase;
5925 if (RT_LIKELY(cb == 4))
5926 switch (uPort)
5927 {
5928 case 0x00: /* IOADDR */
5929 *pu32 = pThis->uSelectedReg;
5930 E1kLog2(("%s e1kIOPortIn: IOADDR(0), selecting register %#010x, val=%#010x\n", pThis->szPrf, pThis->uSelectedReg, *pu32));
5931 rc = VINF_SUCCESS;
5932 break;
5933
5934 case 0x04: /* IODATA */
5935 if (!(pThis->uSelectedReg & 3))
5936 rc = e1kRegReadAlignedU32(pThis, pThis->uSelectedReg, pu32);
5937 else /** @todo r=bird: I wouldn't be surprised if this unaligned branch wasn't necessary. */
5938 rc = e1kRegReadUnaligned(pThis, pThis->uSelectedReg, pu32, cb);
5939 if (rc == VINF_IOM_R3_MMIO_READ)
5940 rc = VINF_IOM_R3_IOPORT_READ;
5941 E1kLog2(("%s e1kIOPortIn: IODATA(4), reading from selected register %#010x, val=%#010x\n", pThis->szPrf, pThis->uSelectedReg, *pu32));
5942 break;
5943
5944 default:
5945 E1kLog(("%s e1kIOPortIn: invalid port %#010x\n", pThis->szPrf, uPort));
5946 //rc = VERR_IOM_IOPORT_UNUSED; /* Why not? */
5947 rc = VINF_SUCCESS;
5948 }
5949 else
5950 {
5951 E1kLog(("%s e1kIOPortIn: invalid op size: uPort=%RTiop cb=%08x", pThis->szPrf, uPort, cb));
5952 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "%s e1kIOPortIn: invalid op size: uPort=%RTiop cb=%08x\n", pThis->szPrf, uPort, cb);
5953 }
5954 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatIORead), a);
5955 return rc;
5956}
5957
5958
5959/**
5960 * @callback_method_impl{FNIOMIOPORTOUT}
5961 */
5962PDMBOTHCBDECL(int) e1kIOPortOut(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT uPort, uint32_t u32, unsigned cb)
5963{
5964 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
5965 int rc;
5966 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatIOWrite), a);
5967 RT_NOREF_PV(pvUser);
5968
5969 E1kLog2(("%s e1kIOPortOut: uPort=%RTiop value=%08x\n", pThis->szPrf, uPort, u32));
5970 if (RT_LIKELY(cb == 4))
5971 {
5972 uPort -= pThis->IOPortBase;
5973 switch (uPort)
5974 {
5975 case 0x00: /* IOADDR */
5976 pThis->uSelectedReg = u32;
5977 E1kLog2(("%s e1kIOPortOut: IOADDR(0), selected register %08x\n", pThis->szPrf, pThis->uSelectedReg));
5978 rc = VINF_SUCCESS;
5979 break;
5980
5981 case 0x04: /* IODATA */
5982 E1kLog2(("%s e1kIOPortOut: IODATA(4), writing to selected register %#010x, value=%#010x\n", pThis->szPrf, pThis->uSelectedReg, u32));
5983 if (RT_LIKELY(!(pThis->uSelectedReg & 3)))
5984 {
5985 rc = e1kRegWriteAlignedU32(pThis, pThis->uSelectedReg, u32);
5986 if (rc == VINF_IOM_R3_MMIO_WRITE)
5987 rc = VINF_IOM_R3_IOPORT_WRITE;
5988 }
5989 else
5990 rc = PDMDevHlpDBGFStop(pThis->CTX_SUFF(pDevIns), RT_SRC_POS,
5991 "Spec violation: misaligned offset: %#10x, ignored.\n", pThis->uSelectedReg);
5992 break;
5993
5994 default:
5995 E1kLog(("%s e1kIOPortOut: invalid port %#010x\n", pThis->szPrf, uPort));
5996 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "invalid port %#010x\n", uPort);
5997 }
5998 }
5999 else
6000 {
6001 E1kLog(("%s e1kIOPortOut: invalid op size: uPort=%RTiop cb=%08x\n", pThis->szPrf, uPort, cb));
6002 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "%s: invalid op size: uPort=%RTiop cb=%#x\n", pThis->szPrf, uPort, cb);
6003 }
6004
6005 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatIOWrite), a);
6006 return rc;
6007}
6008
6009#ifdef IN_RING3
6010
6011/**
6012 * Dump complete device state to log.
6013 *
6014 * @param pThis Pointer to device state.
6015 */
6016static void e1kDumpState(PE1KSTATE pThis)
6017{
6018 for (int i = 0; i < E1K_NUM_OF_32BIT_REGS; ++i)
6019 {
6020 E1kLog2(("%s %8.8s = %08x\n", pThis->szPrf,
6021 g_aE1kRegMap[i].abbrev, pThis->auRegs[i]));
6022 }
6023# ifdef E1K_INT_STATS
6024 LogRel(("%s Interrupt attempts: %d\n", pThis->szPrf, pThis->uStatIntTry));
6025 LogRel(("%s Interrupts raised : %d\n", pThis->szPrf, pThis->uStatInt));
6026 LogRel(("%s Interrupts lowered: %d\n", pThis->szPrf, pThis->uStatIntLower));
6027 LogRel(("%s Interrupts delayed: %d\n", pThis->szPrf, pThis->uStatIntDly));
6028 LogRel(("%s Disabled delayed: %d\n", pThis->szPrf, pThis->uStatDisDly));
6029 LogRel(("%s Interrupts skipped: %d\n", pThis->szPrf, pThis->uStatIntSkip));
6030 LogRel(("%s Masked interrupts : %d\n", pThis->szPrf, pThis->uStatIntMasked));
6031 LogRel(("%s Early interrupts : %d\n", pThis->szPrf, pThis->uStatIntEarly));
6032 LogRel(("%s Late interrupts : %d\n", pThis->szPrf, pThis->uStatIntLate));
6033 LogRel(("%s Lost interrupts : %d\n", pThis->szPrf, pThis->iStatIntLost));
6034 LogRel(("%s Interrupts by RX : %d\n", pThis->szPrf, pThis->uStatIntRx));
6035 LogRel(("%s Interrupts by TX : %d\n", pThis->szPrf, pThis->uStatIntTx));
6036 LogRel(("%s Interrupts by ICS : %d\n", pThis->szPrf, pThis->uStatIntICS));
6037 LogRel(("%s Interrupts by RDTR: %d\n", pThis->szPrf, pThis->uStatIntRDTR));
6038 LogRel(("%s Interrupts by RDMT: %d\n", pThis->szPrf, pThis->uStatIntRXDMT0));
6039 LogRel(("%s Interrupts by TXQE: %d\n", pThis->szPrf, pThis->uStatIntTXQE));
6040 LogRel(("%s TX int delay asked: %d\n", pThis->szPrf, pThis->uStatTxIDE));
6041 LogRel(("%s TX delayed: %d\n", pThis->szPrf, pThis->uStatTxDelayed));
6042 LogRel(("%s TX delay expired: %d\n", pThis->szPrf, pThis->uStatTxDelayExp));
6043 LogRel(("%s TX no report asked: %d\n", pThis->szPrf, pThis->uStatTxNoRS));
6044 LogRel(("%s TX abs timer expd : %d\n", pThis->szPrf, pThis->uStatTAD));
6045 LogRel(("%s TX int timer expd : %d\n", pThis->szPrf, pThis->uStatTID));
6046 LogRel(("%s RX abs timer expd : %d\n", pThis->szPrf, pThis->uStatRAD));
6047 LogRel(("%s RX int timer expd : %d\n", pThis->szPrf, pThis->uStatRID));
6048 LogRel(("%s TX CTX descriptors: %d\n", pThis->szPrf, pThis->uStatDescCtx));
6049 LogRel(("%s TX DAT descriptors: %d\n", pThis->szPrf, pThis->uStatDescDat));
6050 LogRel(("%s TX LEG descriptors: %d\n", pThis->szPrf, pThis->uStatDescLeg));
6051 LogRel(("%s Received frames : %d\n", pThis->szPrf, pThis->uStatRxFrm));
6052 LogRel(("%s Transmitted frames: %d\n", pThis->szPrf, pThis->uStatTxFrm));
6053 LogRel(("%s TX frames up to 1514: %d\n", pThis->szPrf, pThis->uStatTx1514));
6054 LogRel(("%s TX frames up to 2962: %d\n", pThis->szPrf, pThis->uStatTx2962));
6055 LogRel(("%s TX frames up to 4410: %d\n", pThis->szPrf, pThis->uStatTx4410));
6056 LogRel(("%s TX frames up to 5858: %d\n", pThis->szPrf, pThis->uStatTx5858));
6057 LogRel(("%s TX frames up to 7306: %d\n", pThis->szPrf, pThis->uStatTx7306));
6058 LogRel(("%s TX frames up to 8754: %d\n", pThis->szPrf, pThis->uStatTx8754));
6059 LogRel(("%s TX frames up to 16384: %d\n", pThis->szPrf, pThis->uStatTx16384));
6060 LogRel(("%s TX frames up to 32768: %d\n", pThis->szPrf, pThis->uStatTx32768));
6061 LogRel(("%s Larger TX frames : %d\n", pThis->szPrf, pThis->uStatTxLarge));
6062 LogRel(("%s Max TX Delay : %lld\n", pThis->szPrf, pThis->uStatMaxTxDelay));
6063# endif /* E1K_INT_STATS */
6064}
6065
6066/**
6067 * @callback_method_impl{FNPCIIOREGIONMAP}
6068 */
6069static DECLCALLBACK(int) e1kMap(PPCIDEVICE pPciDev, int iRegion, RTGCPHYS GCPhysAddress, uint32_t cb, PCIADDRESSSPACE enmType)
6070{
6071 PE1KSTATE pThis = PDMINS_2_DATA(pPciDev->pDevIns, E1KSTATE*);
6072 int rc;
6073
6074 switch (enmType)
6075 {
6076 case PCI_ADDRESS_SPACE_IO:
6077 pThis->IOPortBase = (RTIOPORT)GCPhysAddress;
6078 rc = PDMDevHlpIOPortRegister(pPciDev->pDevIns, pThis->IOPortBase, cb, NULL /*pvUser*/,
6079 e1kIOPortOut, e1kIOPortIn, NULL, NULL, "E1000");
6080 if (pThis->fR0Enabled && RT_SUCCESS(rc))
6081 rc = PDMDevHlpIOPortRegisterR0(pPciDev->pDevIns, pThis->IOPortBase, cb, NIL_RTR0PTR /*pvUser*/,
6082 "e1kIOPortOut", "e1kIOPortIn", NULL, NULL, "E1000");
6083 if (pThis->fRCEnabled && RT_SUCCESS(rc))
6084 rc = PDMDevHlpIOPortRegisterRC(pPciDev->pDevIns, pThis->IOPortBase, cb, NIL_RTRCPTR /*pvUser*/,
6085 "e1kIOPortOut", "e1kIOPortIn", NULL, NULL, "E1000");
6086 break;
6087
6088 case PCI_ADDRESS_SPACE_MEM:
6089 /*
6090 * From the spec:
6091 * For registers that should be accessed as 32-bit double words,
6092 * partial writes (less than a 32-bit double word) is ignored.
6093 * Partial reads return all 32 bits of data regardless of the
6094 * byte enables.
6095 */
6096 pThis->addrMMReg = GCPhysAddress; Assert(!(GCPhysAddress & 7));
6097 rc = PDMDevHlpMMIORegister(pPciDev->pDevIns, GCPhysAddress, cb, NULL /*pvUser*/,
6098 IOMMMIO_FLAGS_READ_DWORD | IOMMMIO_FLAGS_WRITE_ONLY_DWORD,
6099 e1kMMIOWrite, e1kMMIORead, "E1000");
6100 if (pThis->fR0Enabled && RT_SUCCESS(rc))
6101 rc = PDMDevHlpMMIORegisterR0(pPciDev->pDevIns, GCPhysAddress, cb, NIL_RTR0PTR /*pvUser*/,
6102 "e1kMMIOWrite", "e1kMMIORead");
6103 if (pThis->fRCEnabled && RT_SUCCESS(rc))
6104 rc = PDMDevHlpMMIORegisterRC(pPciDev->pDevIns, GCPhysAddress, cb, NIL_RTRCPTR /*pvUser*/,
6105 "e1kMMIOWrite", "e1kMMIORead");
6106 break;
6107
6108 default:
6109 /* We should never get here */
6110 AssertMsgFailed(("Invalid PCI address space param in map callback"));
6111 rc = VERR_INTERNAL_ERROR;
6112 break;
6113 }
6114 return rc;
6115}
6116
6117
6118/* -=-=-=-=- PDMINETWORKDOWN -=-=-=-=- */
6119
6120/**
6121 * Check if the device can receive data now.
6122 * This must be called before the pfnRecieve() method is called.
6123 *
6124 * @returns Number of bytes the device can receive.
6125 * @param pInterface Pointer to the interface structure containing the called function pointer.
6126 * @thread EMT
6127 */
6128static int e1kCanReceive(PE1KSTATE pThis)
6129{
6130#ifndef E1K_WITH_RXD_CACHE
6131 size_t cb;
6132
6133 if (RT_UNLIKELY(e1kCsRxEnter(pThis, VERR_SEM_BUSY) != VINF_SUCCESS))
6134 return VERR_NET_NO_BUFFER_SPACE;
6135
6136 if (RT_UNLIKELY(RDLEN == sizeof(E1KRXDESC)))
6137 {
6138 E1KRXDESC desc;
6139 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns), e1kDescAddr(RDBAH, RDBAL, RDH),
6140 &desc, sizeof(desc));
6141 if (desc.status.fDD)
6142 cb = 0;
6143 else
6144 cb = pThis->u16RxBSize;
6145 }
6146 else if (RDH < RDT)
6147 cb = (RDT - RDH) * pThis->u16RxBSize;
6148 else if (RDH > RDT)
6149 cb = (RDLEN/sizeof(E1KRXDESC) - RDH + RDT) * pThis->u16RxBSize;
6150 else
6151 {
6152 cb = 0;
6153 E1kLogRel(("E1000: OUT of RX descriptors!\n"));
6154 }
6155 E1kLog2(("%s e1kCanReceive: at exit RDH=%d RDT=%d RDLEN=%d u16RxBSize=%d cb=%lu\n",
6156 pThis->szPrf, RDH, RDT, RDLEN, pThis->u16RxBSize, cb));
6157
6158 e1kCsRxLeave(pThis);
6159 return cb > 0 ? VINF_SUCCESS : VERR_NET_NO_BUFFER_SPACE;
6160#else /* E1K_WITH_RXD_CACHE */
6161 int rc = VINF_SUCCESS;
6162
6163 if (RT_UNLIKELY(e1kCsRxEnter(pThis, VERR_SEM_BUSY) != VINF_SUCCESS))
6164 return VERR_NET_NO_BUFFER_SPACE;
6165
6166 if (RT_UNLIKELY(RDLEN == sizeof(E1KRXDESC)))
6167 {
6168 E1KRXDESC desc;
6169 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns), e1kDescAddr(RDBAH, RDBAL, RDH),
6170 &desc, sizeof(desc));
6171 if (desc.status.fDD)
6172 rc = VERR_NET_NO_BUFFER_SPACE;
6173 }
6174 else if (e1kRxDIsCacheEmpty(pThis) && RDH == RDT)
6175 {
6176 /* Cache is empty, so is the RX ring. */
6177 rc = VERR_NET_NO_BUFFER_SPACE;
6178 }
6179 E1kLog2(("%s e1kCanReceive: at exit in_cache=%d RDH=%d RDT=%d RDLEN=%d"
6180 " u16RxBSize=%d rc=%Rrc\n", pThis->szPrf,
6181 e1kRxDInCache(pThis), RDH, RDT, RDLEN, pThis->u16RxBSize, rc));
6182
6183 e1kCsRxLeave(pThis);
6184 return rc;
6185#endif /* E1K_WITH_RXD_CACHE */
6186}
6187
6188/**
6189 * @interface_method_impl{PDMINETWORKDOWN,pfnWaitReceiveAvail}
6190 */
6191static DECLCALLBACK(int) e1kR3NetworkDown_WaitReceiveAvail(PPDMINETWORKDOWN pInterface, RTMSINTERVAL cMillies)
6192{
6193 PE1KSTATE pThis = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkDown);
6194 int rc = e1kCanReceive(pThis);
6195
6196 if (RT_SUCCESS(rc))
6197 return VINF_SUCCESS;
6198 if (RT_UNLIKELY(cMillies == 0))
6199 return VERR_NET_NO_BUFFER_SPACE;
6200
6201 rc = VERR_INTERRUPTED;
6202 ASMAtomicXchgBool(&pThis->fMaybeOutOfSpace, true);
6203 STAM_PROFILE_START(&pThis->StatRxOverflow, a);
6204 VMSTATE enmVMState;
6205 while (RT_LIKELY( (enmVMState = PDMDevHlpVMState(pThis->CTX_SUFF(pDevIns))) == VMSTATE_RUNNING
6206 || enmVMState == VMSTATE_RUNNING_LS))
6207 {
6208 int rc2 = e1kCanReceive(pThis);
6209 if (RT_SUCCESS(rc2))
6210 {
6211 rc = VINF_SUCCESS;
6212 break;
6213 }
6214 E1kLogRel(("E1000 e1kR3NetworkDown_WaitReceiveAvail: waiting cMillies=%u...\n", cMillies));
6215 E1kLog(("%s e1kR3NetworkDown_WaitReceiveAvail: waiting cMillies=%u...\n", pThis->szPrf, cMillies));
6216 RTSemEventWait(pThis->hEventMoreRxDescAvail, cMillies);
6217 }
6218 STAM_PROFILE_STOP(&pThis->StatRxOverflow, a);
6219 ASMAtomicXchgBool(&pThis->fMaybeOutOfSpace, false);
6220
6221 return rc;
6222}
6223
6224
6225/**
6226 * Matches the packet addresses against Receive Address table. Looks for
6227 * exact matches only.
6228 *
6229 * @returns true if address matches.
6230 * @param pThis Pointer to the state structure.
6231 * @param pvBuf The ethernet packet.
6232 * @param cb Number of bytes available in the packet.
6233 * @thread EMT
6234 */
6235static bool e1kPerfectMatch(PE1KSTATE pThis, const void *pvBuf)
6236{
6237 for (unsigned i = 0; i < RT_ELEMENTS(pThis->aRecAddr.array); i++)
6238 {
6239 E1KRAELEM* ra = pThis->aRecAddr.array + i;
6240
6241 /* Valid address? */
6242 if (ra->ctl & RA_CTL_AV)
6243 {
6244 Assert((ra->ctl & RA_CTL_AS) < 2);
6245 //unsigned char *pAddr = (unsigned char*)pvBuf + sizeof(ra->addr)*(ra->ctl & RA_CTL_AS);
6246 //E1kLog3(("%s Matching %02x:%02x:%02x:%02x:%02x:%02x against %02x:%02x:%02x:%02x:%02x:%02x...\n",
6247 // pThis->szPrf, pAddr[0], pAddr[1], pAddr[2], pAddr[3], pAddr[4], pAddr[5],
6248 // ra->addr[0], ra->addr[1], ra->addr[2], ra->addr[3], ra->addr[4], ra->addr[5]));
6249 /*
6250 * Address Select:
6251 * 00b = Destination address
6252 * 01b = Source address
6253 * 10b = Reserved
6254 * 11b = Reserved
6255 * Since ethernet header is (DA, SA, len) we can use address
6256 * select as index.
6257 */
6258 if (memcmp((char*)pvBuf + sizeof(ra->addr)*(ra->ctl & RA_CTL_AS),
6259 ra->addr, sizeof(ra->addr)) == 0)
6260 return true;
6261 }
6262 }
6263
6264 return false;
6265}
6266
6267/**
6268 * Matches the packet addresses against Multicast Table Array.
6269 *
6270 * @remarks This is imperfect match since it matches not exact address but
6271 * a subset of addresses.
6272 *
6273 * @returns true if address matches.
6274 * @param pThis Pointer to the state structure.
6275 * @param pvBuf The ethernet packet.
6276 * @param cb Number of bytes available in the packet.
6277 * @thread EMT
6278 */
6279static bool e1kImperfectMatch(PE1KSTATE pThis, const void *pvBuf)
6280{
6281 /* Get bits 32..47 of destination address */
6282 uint16_t u16Bit = ((uint16_t*)pvBuf)[2];
6283
6284 unsigned offset = GET_BITS(RCTL, MO);
6285 /*
6286 * offset means:
6287 * 00b = bits 36..47
6288 * 01b = bits 35..46
6289 * 10b = bits 34..45
6290 * 11b = bits 32..43
6291 */
6292 if (offset < 3)
6293 u16Bit = u16Bit >> (4 - offset);
6294 return ASMBitTest(pThis->auMTA, u16Bit & 0xFFF);
6295}
6296
6297/**
6298 * Determines if the packet is to be delivered to upper layer.
6299 *
6300 * The following filters supported:
6301 * - Exact Unicast/Multicast
6302 * - Promiscuous Unicast/Multicast
6303 * - Multicast
6304 * - VLAN
6305 *
6306 * @returns true if packet is intended for this node.
6307 * @param pThis Pointer to the state structure.
6308 * @param pvBuf The ethernet packet.
6309 * @param cb Number of bytes available in the packet.
6310 * @param pStatus Bit field to store status bits.
6311 * @thread EMT
6312 */
6313static bool e1kAddressFilter(PE1KSTATE pThis, const void *pvBuf, size_t cb, E1KRXDST *pStatus)
6314{
6315 Assert(cb > 14);
6316 /* Assume that we fail to pass exact filter. */
6317 pStatus->fPIF = false;
6318 pStatus->fVP = false;
6319 /* Discard oversized packets */
6320 if (cb > E1K_MAX_RX_PKT_SIZE)
6321 {
6322 E1kLog(("%s ERROR: Incoming packet is too big, cb=%d > max=%d\n",
6323 pThis->szPrf, cb, E1K_MAX_RX_PKT_SIZE));
6324 E1K_INC_CNT32(ROC);
6325 return false;
6326 }
6327 else if (!(RCTL & RCTL_LPE) && cb > 1522)
6328 {
6329 /* When long packet reception is disabled packets over 1522 are discarded */
6330 E1kLog(("%s Discarding incoming packet (LPE=0), cb=%d\n",
6331 pThis->szPrf, cb));
6332 E1K_INC_CNT32(ROC);
6333 return false;
6334 }
6335
6336 uint16_t *u16Ptr = (uint16_t*)pvBuf;
6337 /* Compare TPID with VLAN Ether Type */
6338 if (RT_BE2H_U16(u16Ptr[6]) == VET)
6339 {
6340 pStatus->fVP = true;
6341 /* Is VLAN filtering enabled? */
6342 if (RCTL & RCTL_VFE)
6343 {
6344 /* It is 802.1q packet indeed, let's filter by VID */
6345 if (RCTL & RCTL_CFIEN)
6346 {
6347 E1kLog3(("%s VLAN filter: VLAN=%d CFI=%d RCTL_CFI=%d\n", pThis->szPrf,
6348 E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7])),
6349 E1K_SPEC_CFI(RT_BE2H_U16(u16Ptr[7])),
6350 !!(RCTL & RCTL_CFI)));
6351 if (E1K_SPEC_CFI(RT_BE2H_U16(u16Ptr[7])) != !!(RCTL & RCTL_CFI))
6352 {
6353 E1kLog2(("%s Packet filter: CFIs do not match in packet and RCTL (%d!=%d)\n",
6354 pThis->szPrf, E1K_SPEC_CFI(RT_BE2H_U16(u16Ptr[7])), !!(RCTL & RCTL_CFI)));
6355 return false;
6356 }
6357 }
6358 else
6359 E1kLog3(("%s VLAN filter: VLAN=%d\n", pThis->szPrf,
6360 E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7]))));
6361 if (!ASMBitTest(pThis->auVFTA, E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7]))))
6362 {
6363 E1kLog2(("%s Packet filter: no VLAN match (id=%d)\n",
6364 pThis->szPrf, E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7]))));
6365 return false;
6366 }
6367 }
6368 }
6369 /* Broadcast filtering */
6370 if (e1kIsBroadcast(pvBuf) && (RCTL & RCTL_BAM))
6371 return true;
6372 E1kLog2(("%s Packet filter: not a broadcast\n", pThis->szPrf));
6373 if (e1kIsMulticast(pvBuf))
6374 {
6375 /* Is multicast promiscuous enabled? */
6376 if (RCTL & RCTL_MPE)
6377 return true;
6378 E1kLog2(("%s Packet filter: no promiscuous multicast\n", pThis->szPrf));
6379 /* Try perfect matches first */
6380 if (e1kPerfectMatch(pThis, pvBuf))
6381 {
6382 pStatus->fPIF = true;
6383 return true;
6384 }
6385 E1kLog2(("%s Packet filter: no perfect match\n", pThis->szPrf));
6386 if (e1kImperfectMatch(pThis, pvBuf))
6387 return true;
6388 E1kLog2(("%s Packet filter: no imperfect match\n", pThis->szPrf));
6389 }
6390 else {
6391 /* Is unicast promiscuous enabled? */
6392 if (RCTL & RCTL_UPE)
6393 return true;
6394 E1kLog2(("%s Packet filter: no promiscuous unicast\n", pThis->szPrf));
6395 if (e1kPerfectMatch(pThis, pvBuf))
6396 {
6397 pStatus->fPIF = true;
6398 return true;
6399 }
6400 E1kLog2(("%s Packet filter: no perfect match\n", pThis->szPrf));
6401 }
6402 E1kLog2(("%s Packet filter: packet discarded\n", pThis->szPrf));
6403 return false;
6404}
6405
6406/**
6407 * @interface_method_impl{PDMINETWORKDOWN,pfnReceive}
6408 */
6409static DECLCALLBACK(int) e1kR3NetworkDown_Receive(PPDMINETWORKDOWN pInterface, const void *pvBuf, size_t cb)
6410{
6411 PE1KSTATE pThis = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkDown);
6412 int rc = VINF_SUCCESS;
6413
6414 /*
6415 * Drop packets if the VM is not running yet/anymore.
6416 */
6417 VMSTATE enmVMState = PDMDevHlpVMState(STATE_TO_DEVINS(pThis));
6418 if ( enmVMState != VMSTATE_RUNNING
6419 && enmVMState != VMSTATE_RUNNING_LS)
6420 {
6421 E1kLog(("%s Dropping incoming packet as VM is not running.\n", pThis->szPrf));
6422 return VINF_SUCCESS;
6423 }
6424
6425 /* Discard incoming packets in locked state */
6426 if (!(RCTL & RCTL_EN) || pThis->fLocked || !(STATUS & STATUS_LU))
6427 {
6428 E1kLog(("%s Dropping incoming packet as receive operation is disabled.\n", pThis->szPrf));
6429 return VINF_SUCCESS;
6430 }
6431
6432 STAM_PROFILE_ADV_START(&pThis->StatReceive, a);
6433
6434 //if (!e1kCsEnter(pThis, RT_SRC_POS))
6435 // return VERR_PERMISSION_DENIED;
6436
6437 e1kPacketDump(pThis, (const uint8_t*)pvBuf, cb, "<-- Incoming");
6438
6439 /* Update stats */
6440 if (RT_LIKELY(e1kCsEnter(pThis, VERR_SEM_BUSY) == VINF_SUCCESS))
6441 {
6442 E1K_INC_CNT32(TPR);
6443 E1K_ADD_CNT64(TORL, TORH, cb < 64? 64 : cb);
6444 e1kCsLeave(pThis);
6445 }
6446 STAM_PROFILE_ADV_START(&pThis->StatReceiveFilter, a);
6447 E1KRXDST status;
6448 RT_ZERO(status);
6449 bool fPassed = e1kAddressFilter(pThis, pvBuf, cb, &status);
6450 STAM_PROFILE_ADV_STOP(&pThis->StatReceiveFilter, a);
6451 if (fPassed)
6452 {
6453 rc = e1kHandleRxPacket(pThis, pvBuf, cb, status);
6454 }
6455 //e1kCsLeave(pThis);
6456 STAM_PROFILE_ADV_STOP(&pThis->StatReceive, a);
6457
6458 return rc;
6459}
6460
6461
6462/* -=-=-=-=- PDMILEDPORTS -=-=-=-=- */
6463
6464/**
6465 * @interface_method_impl{PDMILEDPORTS,pfnQueryStatusLed}
6466 */
6467static DECLCALLBACK(int) e1kR3QueryStatusLed(PPDMILEDPORTS pInterface, unsigned iLUN, PPDMLED *ppLed)
6468{
6469 PE1KSTATE pThis = RT_FROM_MEMBER(pInterface, E1KSTATE, ILeds);
6470 int rc = VERR_PDM_LUN_NOT_FOUND;
6471
6472 if (iLUN == 0)
6473 {
6474 *ppLed = &pThis->led;
6475 rc = VINF_SUCCESS;
6476 }
6477 return rc;
6478}
6479
6480
6481/* -=-=-=-=- PDMINETWORKCONFIG -=-=-=-=- */
6482
6483/**
6484 * @interface_method_impl{PDMINETWORKCONFIG,pfnGetMac}
6485 */
6486static DECLCALLBACK(int) e1kR3GetMac(PPDMINETWORKCONFIG pInterface, PRTMAC pMac)
6487{
6488 PE1KSTATE pThis = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkConfig);
6489 pThis->eeprom.getMac(pMac);
6490 return VINF_SUCCESS;
6491}
6492
6493/**
6494 * @interface_method_impl{PDMINETWORKCONFIG,pfnGetLinkState}
6495 */
6496static DECLCALLBACK(PDMNETWORKLINKSTATE) e1kR3GetLinkState(PPDMINETWORKCONFIG pInterface)
6497{
6498 PE1KSTATE pThis = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkConfig);
6499 if (STATUS & STATUS_LU)
6500 return PDMNETWORKLINKSTATE_UP;
6501 return PDMNETWORKLINKSTATE_DOWN;
6502}
6503
6504/**
6505 * @interface_method_impl{PDMINETWORKCONFIG,pfnSetLinkState}
6506 */
6507static DECLCALLBACK(int) e1kR3SetLinkState(PPDMINETWORKCONFIG pInterface, PDMNETWORKLINKSTATE enmState)
6508{
6509 PE1KSTATE pThis = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkConfig);
6510
6511 E1kLog(("%s e1kR3SetLinkState: enmState=%d\n", pThis->szPrf, enmState));
6512 switch (enmState)
6513 {
6514 case PDMNETWORKLINKSTATE_UP:
6515 pThis->fCableConnected = true;
6516 /* If link was down, bring it up after a while. */
6517 if (!(STATUS & STATUS_LU))
6518 e1kBringLinkUpDelayed(pThis);
6519 break;
6520 case PDMNETWORKLINKSTATE_DOWN:
6521 pThis->fCableConnected = false;
6522 /* Always set the phy link state to down, regardless of the STATUS_LU bit.
6523 * We might have to set the link state before the driver initializes us. */
6524 Phy::setLinkStatus(&pThis->phy, false);
6525 /* If link was up, bring it down. */
6526 if (STATUS & STATUS_LU)
6527 e1kR3LinkDown(pThis);
6528 break;
6529 case PDMNETWORKLINKSTATE_DOWN_RESUME:
6530 /*
6531 * There is not much sense in bringing down the link if it has not come up yet.
6532 * If it is up though, we bring it down temporarely, then bring it up again.
6533 */
6534 if (STATUS & STATUS_LU)
6535 e1kR3LinkDownTemp(pThis);
6536 break;
6537 default:
6538 ;
6539 }
6540 return VINF_SUCCESS;
6541}
6542
6543
6544/* -=-=-=-=- PDMIBASE -=-=-=-=- */
6545
6546/**
6547 * @interface_method_impl{PDMIBASE,pfnQueryInterface}
6548 */
6549static DECLCALLBACK(void *) e1kR3QueryInterface(struct PDMIBASE *pInterface, const char *pszIID)
6550{
6551 PE1KSTATE pThis = RT_FROM_MEMBER(pInterface, E1KSTATE, IBase);
6552 Assert(&pThis->IBase == pInterface);
6553
6554 PDMIBASE_RETURN_INTERFACE(pszIID, PDMIBASE, &pThis->IBase);
6555 PDMIBASE_RETURN_INTERFACE(pszIID, PDMINETWORKDOWN, &pThis->INetworkDown);
6556 PDMIBASE_RETURN_INTERFACE(pszIID, PDMINETWORKCONFIG, &pThis->INetworkConfig);
6557 PDMIBASE_RETURN_INTERFACE(pszIID, PDMILEDPORTS, &pThis->ILeds);
6558 return NULL;
6559}
6560
6561
6562/* -=-=-=-=- Saved State -=-=-=-=- */
6563
6564/**
6565 * Saves the configuration.
6566 *
6567 * @param pThis The E1K state.
6568 * @param pSSM The handle to the saved state.
6569 */
6570static void e1kSaveConfig(PE1KSTATE pThis, PSSMHANDLE pSSM)
6571{
6572 SSMR3PutMem(pSSM, &pThis->macConfigured, sizeof(pThis->macConfigured));
6573 SSMR3PutU32(pSSM, pThis->eChip);
6574}
6575
6576/**
6577 * @callback_method_impl{FNSSMDEVLIVEEXEC,Save basic configuration.}
6578 */
6579static DECLCALLBACK(int) e1kLiveExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM, uint32_t uPass)
6580{
6581 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6582 e1kSaveConfig(pThis, pSSM);
6583 return VINF_SSM_DONT_CALL_AGAIN;
6584}
6585
6586/**
6587 * @callback_method_impl{FNSSMDEVSAVEPREP,Synchronize.}
6588 */
6589static DECLCALLBACK(int) e1kSavePrep(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
6590{
6591 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6592
6593 int rc = e1kCsEnter(pThis, VERR_SEM_BUSY);
6594 if (RT_UNLIKELY(rc != VINF_SUCCESS))
6595 return rc;
6596 e1kCsLeave(pThis);
6597 return VINF_SUCCESS;
6598#if 0
6599 /* 1) Prevent all threads from modifying the state and memory */
6600 //pThis->fLocked = true;
6601 /* 2) Cancel all timers */
6602#ifdef E1K_TX_DELAY
6603 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTXDTimer));
6604#endif /* E1K_TX_DELAY */
6605#ifdef E1K_USE_TX_TIMERS
6606 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTIDTimer));
6607#ifndef E1K_NO_TAD
6608 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTADTimer));
6609#endif /* E1K_NO_TAD */
6610#endif /* E1K_USE_TX_TIMERS */
6611#ifdef E1K_USE_RX_TIMERS
6612 e1kCancelTimer(pThis, pThis->CTX_SUFF(pRIDTimer));
6613 e1kCancelTimer(pThis, pThis->CTX_SUFF(pRADTimer));
6614#endif /* E1K_USE_RX_TIMERS */
6615 e1kCancelTimer(pThis, pThis->CTX_SUFF(pIntTimer));
6616 /* 3) Did I forget anything? */
6617 E1kLog(("%s Locked\n", pThis->szPrf));
6618 return VINF_SUCCESS;
6619#endif
6620}
6621
6622/**
6623 * @callback_method_impl{FNSSMDEVSAVEEXEC}
6624 */
6625static DECLCALLBACK(int) e1kSaveExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
6626{
6627 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6628
6629 e1kSaveConfig(pThis, pSSM);
6630 pThis->eeprom.save(pSSM);
6631 e1kDumpState(pThis);
6632 SSMR3PutMem(pSSM, pThis->auRegs, sizeof(pThis->auRegs));
6633 SSMR3PutBool(pSSM, pThis->fIntRaised);
6634 Phy::saveState(pSSM, &pThis->phy);
6635 SSMR3PutU32(pSSM, pThis->uSelectedReg);
6636 SSMR3PutMem(pSSM, pThis->auMTA, sizeof(pThis->auMTA));
6637 SSMR3PutMem(pSSM, &pThis->aRecAddr, sizeof(pThis->aRecAddr));
6638 SSMR3PutMem(pSSM, pThis->auVFTA, sizeof(pThis->auVFTA));
6639 SSMR3PutU64(pSSM, pThis->u64AckedAt);
6640 SSMR3PutU16(pSSM, pThis->u16RxBSize);
6641 //SSMR3PutBool(pSSM, pThis->fDelayInts);
6642 //SSMR3PutBool(pSSM, pThis->fIntMaskUsed);
6643 SSMR3PutU16(pSSM, pThis->u16TxPktLen);
6644/** @todo State wrt to the TSE buffer is incomplete, so little point in
6645 * saving this actually. */
6646 SSMR3PutMem(pSSM, pThis->aTxPacketFallback, pThis->u16TxPktLen);
6647 SSMR3PutBool(pSSM, pThis->fIPcsum);
6648 SSMR3PutBool(pSSM, pThis->fTCPcsum);
6649 SSMR3PutMem(pSSM, &pThis->contextTSE, sizeof(pThis->contextTSE));
6650 SSMR3PutMem(pSSM, &pThis->contextNormal, sizeof(pThis->contextNormal));
6651 SSMR3PutBool(pSSM, pThis->fVTag);
6652 SSMR3PutU16(pSSM, pThis->u16VTagTCI);
6653#ifdef E1K_WITH_TXD_CACHE
6654#if 0
6655 SSMR3PutU8(pSSM, pThis->nTxDFetched);
6656 SSMR3PutMem(pSSM, pThis->aTxDescriptors,
6657 pThis->nTxDFetched * sizeof(pThis->aTxDescriptors[0]));
6658#else
6659 /*
6660 * There is no point in storing TX descriptor cache entries as we can simply
6661 * fetch them again. Moreover, normally the cache is always empty when we
6662 * save the state. Store zero entries for compatibility.
6663 */
6664 SSMR3PutU8(pSSM, 0);
6665#endif
6666#endif /* E1K_WITH_TXD_CACHE */
6667/**@todo GSO requires some more state here. */
6668 E1kLog(("%s State has been saved\n", pThis->szPrf));
6669 return VINF_SUCCESS;
6670}
6671
6672#if 0
6673/**
6674 * @callback_method_impl{FNSSMDEVSAVEDONE}
6675 */
6676static DECLCALLBACK(int) e1kSaveDone(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
6677{
6678 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6679
6680 /* If VM is being powered off unlocking will result in assertions in PGM */
6681 if (PDMDevHlpGetVM(pDevIns)->enmVMState == VMSTATE_RUNNING)
6682 pThis->fLocked = false;
6683 else
6684 E1kLog(("%s VM is not running -- remain locked\n", pThis->szPrf));
6685 E1kLog(("%s Unlocked\n", pThis->szPrf));
6686 return VINF_SUCCESS;
6687}
6688#endif
6689
6690/**
6691 * @callback_method_impl{FNSSMDEVLOADPREP,Synchronize.}
6692 */
6693static DECLCALLBACK(int) e1kLoadPrep(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
6694{
6695 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6696
6697 int rc = e1kCsEnter(pThis, VERR_SEM_BUSY);
6698 if (RT_UNLIKELY(rc != VINF_SUCCESS))
6699 return rc;
6700 e1kCsLeave(pThis);
6701 return VINF_SUCCESS;
6702}
6703
6704/**
6705 * @callback_method_impl{FNSSMDEVLOADEXEC}
6706 */
6707static DECLCALLBACK(int) e1kLoadExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
6708{
6709 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6710 int rc;
6711
6712 if ( uVersion != E1K_SAVEDSTATE_VERSION
6713#ifdef E1K_WITH_TXD_CACHE
6714 && uVersion != E1K_SAVEDSTATE_VERSION_VBOX_42_VTAG
6715#endif /* E1K_WITH_TXD_CACHE */
6716 && uVersion != E1K_SAVEDSTATE_VERSION_VBOX_41
6717 && uVersion != E1K_SAVEDSTATE_VERSION_VBOX_30)
6718 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
6719
6720 if ( uVersion > E1K_SAVEDSTATE_VERSION_VBOX_30
6721 || uPass != SSM_PASS_FINAL)
6722 {
6723 /* config checks */
6724 RTMAC macConfigured;
6725 rc = SSMR3GetMem(pSSM, &macConfigured, sizeof(macConfigured));
6726 AssertRCReturn(rc, rc);
6727 if ( memcmp(&macConfigured, &pThis->macConfigured, sizeof(macConfigured))
6728 && (uPass == 0 || !PDMDevHlpVMTeleportedAndNotFullyResumedYet(pDevIns)) )
6729 LogRel(("%s: The mac address differs: config=%RTmac saved=%RTmac\n", pThis->szPrf, &pThis->macConfigured, &macConfigured));
6730
6731 E1KCHIP eChip;
6732 rc = SSMR3GetU32(pSSM, &eChip);
6733 AssertRCReturn(rc, rc);
6734 if (eChip != pThis->eChip)
6735 return SSMR3SetCfgError(pSSM, RT_SRC_POS, N_("The chip type differs: config=%u saved=%u"), pThis->eChip, eChip);
6736 }
6737
6738 if (uPass == SSM_PASS_FINAL)
6739 {
6740 if (uVersion > E1K_SAVEDSTATE_VERSION_VBOX_30)
6741 {
6742 rc = pThis->eeprom.load(pSSM);
6743 AssertRCReturn(rc, rc);
6744 }
6745 /* the state */
6746 SSMR3GetMem(pSSM, &pThis->auRegs, sizeof(pThis->auRegs));
6747 SSMR3GetBool(pSSM, &pThis->fIntRaised);
6748 /** @todo: PHY could be made a separate device with its own versioning */
6749 Phy::loadState(pSSM, &pThis->phy);
6750 SSMR3GetU32(pSSM, &pThis->uSelectedReg);
6751 SSMR3GetMem(pSSM, &pThis->auMTA, sizeof(pThis->auMTA));
6752 SSMR3GetMem(pSSM, &pThis->aRecAddr, sizeof(pThis->aRecAddr));
6753 SSMR3GetMem(pSSM, &pThis->auVFTA, sizeof(pThis->auVFTA));
6754 SSMR3GetU64(pSSM, &pThis->u64AckedAt);
6755 SSMR3GetU16(pSSM, &pThis->u16RxBSize);
6756 //SSMR3GetBool(pSSM, pThis->fDelayInts);
6757 //SSMR3GetBool(pSSM, pThis->fIntMaskUsed);
6758 SSMR3GetU16(pSSM, &pThis->u16TxPktLen);
6759 SSMR3GetMem(pSSM, &pThis->aTxPacketFallback[0], pThis->u16TxPktLen);
6760 SSMR3GetBool(pSSM, &pThis->fIPcsum);
6761 SSMR3GetBool(pSSM, &pThis->fTCPcsum);
6762 SSMR3GetMem(pSSM, &pThis->contextTSE, sizeof(pThis->contextTSE));
6763 rc = SSMR3GetMem(pSSM, &pThis->contextNormal, sizeof(pThis->contextNormal));
6764 AssertRCReturn(rc, rc);
6765 if (uVersion > E1K_SAVEDSTATE_VERSION_VBOX_41)
6766 {
6767 SSMR3GetBool(pSSM, &pThis->fVTag);
6768 rc = SSMR3GetU16(pSSM, &pThis->u16VTagTCI);
6769 AssertRCReturn(rc, rc);
6770 }
6771 else
6772 {
6773 pThis->fVTag = false;
6774 pThis->u16VTagTCI = 0;
6775 }
6776#ifdef E1K_WITH_TXD_CACHE
6777 if (uVersion > E1K_SAVEDSTATE_VERSION_VBOX_42_VTAG)
6778 {
6779 rc = SSMR3GetU8(pSSM, &pThis->nTxDFetched);
6780 AssertRCReturn(rc, rc);
6781 if (pThis->nTxDFetched)
6782 SSMR3GetMem(pSSM, pThis->aTxDescriptors,
6783 pThis->nTxDFetched * sizeof(pThis->aTxDescriptors[0]));
6784 }
6785 else
6786 pThis->nTxDFetched = 0;
6787 /*
6788 * @todo: Perhaps we should not store TXD cache as the entries can be
6789 * simply fetched again from guest's memory. Or can't they?
6790 */
6791#endif /* E1K_WITH_TXD_CACHE */
6792#ifdef E1K_WITH_RXD_CACHE
6793 /*
6794 * There is no point in storing the RX descriptor cache in the saved
6795 * state, we just need to make sure it is empty.
6796 */
6797 pThis->iRxDCurrent = pThis->nRxDFetched = 0;
6798#endif /* E1K_WITH_RXD_CACHE */
6799 /* derived state */
6800 e1kSetupGsoCtx(&pThis->GsoCtx, &pThis->contextTSE);
6801
6802 E1kLog(("%s State has been restored\n", pThis->szPrf));
6803 e1kDumpState(pThis);
6804 }
6805 return VINF_SUCCESS;
6806}
6807
6808/**
6809 * @callback_method_impl{FNSSMDEVLOADDONE, Link status adjustments after loading.}
6810 */
6811static DECLCALLBACK(int) e1kLoadDone(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
6812{
6813 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6814
6815 /* Update promiscuous mode */
6816 if (pThis->pDrvR3)
6817 pThis->pDrvR3->pfnSetPromiscuousMode(pThis->pDrvR3,
6818 !!(RCTL & (RCTL_UPE | RCTL_MPE)));
6819
6820 /*
6821 * Force the link down here, since PDMNETWORKLINKSTATE_DOWN_RESUME is never
6822 * passed to us. We go through all this stuff if the link was up and we
6823 * wasn't teleported.
6824 */
6825 if ( (STATUS & STATUS_LU)
6826 && !PDMDevHlpVMTeleportedAndNotFullyResumedYet(pDevIns)
6827 && pThis->cMsLinkUpDelay)
6828 {
6829 e1kR3LinkDownTemp(pThis);
6830 }
6831 return VINF_SUCCESS;
6832}
6833
6834
6835
6836/* -=-=-=-=- Debug Info + Log Types -=-=-=-=- */
6837
6838/**
6839 * @callback_method_impl{FNRTSTRFORMATTYPE}
6840 */
6841static DECLCALLBACK(size_t) e1kFmtRxDesc(PFNRTSTROUTPUT pfnOutput,
6842 void *pvArgOutput,
6843 const char *pszType,
6844 void const *pvValue,
6845 int cchWidth,
6846 int cchPrecision,
6847 unsigned fFlags,
6848 void *pvUser)
6849{
6850 AssertReturn(strcmp(pszType, "e1krxd") == 0, 0);
6851 E1KRXDESC* pDesc = (E1KRXDESC*)pvValue;
6852 if (!pDesc)
6853 return RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "NULL_RXD");
6854
6855 size_t cbPrintf = 0;
6856 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Address=%16LX Length=%04X Csum=%04X\n",
6857 pDesc->u64BufAddr, pDesc->u16Length, pDesc->u16Checksum);
6858 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, " STA: %s %s %s %s %s %s %s ERR: %s %s %s %s SPECIAL: %s VLAN=%03x PRI=%x",
6859 pDesc->status.fPIF ? "PIF" : "pif",
6860 pDesc->status.fIPCS ? "IPCS" : "ipcs",
6861 pDesc->status.fTCPCS ? "TCPCS" : "tcpcs",
6862 pDesc->status.fVP ? "VP" : "vp",
6863 pDesc->status.fIXSM ? "IXSM" : "ixsm",
6864 pDesc->status.fEOP ? "EOP" : "eop",
6865 pDesc->status.fDD ? "DD" : "dd",
6866 pDesc->status.fRXE ? "RXE" : "rxe",
6867 pDesc->status.fIPE ? "IPE" : "ipe",
6868 pDesc->status.fTCPE ? "TCPE" : "tcpe",
6869 pDesc->status.fCE ? "CE" : "ce",
6870 E1K_SPEC_CFI(pDesc->status.u16Special) ? "CFI" :"cfi",
6871 E1K_SPEC_VLAN(pDesc->status.u16Special),
6872 E1K_SPEC_PRI(pDesc->status.u16Special));
6873 return cbPrintf;
6874}
6875
6876/**
6877 * @callback_method_impl{FNRTSTRFORMATTYPE}
6878 */
6879static DECLCALLBACK(size_t) e1kFmtTxDesc(PFNRTSTROUTPUT pfnOutput,
6880 void *pvArgOutput,
6881 const char *pszType,
6882 void const *pvValue,
6883 int cchWidth,
6884 int cchPrecision,
6885 unsigned fFlags,
6886 void *pvUser)
6887{
6888 AssertReturn(strcmp(pszType, "e1ktxd") == 0, 0);
6889 E1KTXDESC *pDesc = (E1KTXDESC*)pvValue;
6890 if (!pDesc)
6891 return RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "NULL_TXD");
6892
6893 size_t cbPrintf = 0;
6894 switch (e1kGetDescType(pDesc))
6895 {
6896 case E1K_DTYP_CONTEXT:
6897 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Type=Context\n"
6898 " IPCSS=%02X IPCSO=%02X IPCSE=%04X TUCSS=%02X TUCSO=%02X TUCSE=%04X\n"
6899 " TUCMD:%s%s%s %s %s PAYLEN=%04x HDRLEN=%04x MSS=%04x STA: %s",
6900 pDesc->context.ip.u8CSS, pDesc->context.ip.u8CSO, pDesc->context.ip.u16CSE,
6901 pDesc->context.tu.u8CSS, pDesc->context.tu.u8CSO, pDesc->context.tu.u16CSE,
6902 pDesc->context.dw2.fIDE ? " IDE":"",
6903 pDesc->context.dw2.fRS ? " RS" :"",
6904 pDesc->context.dw2.fTSE ? " TSE":"",
6905 pDesc->context.dw2.fIP ? "IPv4":"IPv6",
6906 pDesc->context.dw2.fTCP ? "TCP":"UDP",
6907 pDesc->context.dw2.u20PAYLEN,
6908 pDesc->context.dw3.u8HDRLEN,
6909 pDesc->context.dw3.u16MSS,
6910 pDesc->context.dw3.fDD?"DD":"");
6911 break;
6912 case E1K_DTYP_DATA:
6913 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Type=Data Address=%16LX DTALEN=%05X\n"
6914 " DCMD:%s%s%s%s%s%s%s STA:%s%s%s POPTS:%s%s SPECIAL:%s VLAN=%03x PRI=%x",
6915 pDesc->data.u64BufAddr,
6916 pDesc->data.cmd.u20DTALEN,
6917 pDesc->data.cmd.fIDE ? " IDE" :"",
6918 pDesc->data.cmd.fVLE ? " VLE" :"",
6919 pDesc->data.cmd.fRPS ? " RPS" :"",
6920 pDesc->data.cmd.fRS ? " RS" :"",
6921 pDesc->data.cmd.fTSE ? " TSE" :"",
6922 pDesc->data.cmd.fIFCS? " IFCS":"",
6923 pDesc->data.cmd.fEOP ? " EOP" :"",
6924 pDesc->data.dw3.fDD ? " DD" :"",
6925 pDesc->data.dw3.fEC ? " EC" :"",
6926 pDesc->data.dw3.fLC ? " LC" :"",
6927 pDesc->data.dw3.fTXSM? " TXSM":"",
6928 pDesc->data.dw3.fIXSM? " IXSM":"",
6929 E1K_SPEC_CFI(pDesc->data.dw3.u16Special) ? "CFI" :"cfi",
6930 E1K_SPEC_VLAN(pDesc->data.dw3.u16Special),
6931 E1K_SPEC_PRI(pDesc->data.dw3.u16Special));
6932 break;
6933 case E1K_DTYP_LEGACY:
6934 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Type=Legacy Address=%16LX DTALEN=%05X\n"
6935 " CMD:%s%s%s%s%s%s%s STA:%s%s%s CSO=%02x CSS=%02x SPECIAL:%s VLAN=%03x PRI=%x",
6936 pDesc->data.u64BufAddr,
6937 pDesc->legacy.cmd.u16Length,
6938 pDesc->legacy.cmd.fIDE ? " IDE" :"",
6939 pDesc->legacy.cmd.fVLE ? " VLE" :"",
6940 pDesc->legacy.cmd.fRPS ? " RPS" :"",
6941 pDesc->legacy.cmd.fRS ? " RS" :"",
6942 pDesc->legacy.cmd.fIC ? " IC" :"",
6943 pDesc->legacy.cmd.fIFCS? " IFCS":"",
6944 pDesc->legacy.cmd.fEOP ? " EOP" :"",
6945 pDesc->legacy.dw3.fDD ? " DD" :"",
6946 pDesc->legacy.dw3.fEC ? " EC" :"",
6947 pDesc->legacy.dw3.fLC ? " LC" :"",
6948 pDesc->legacy.cmd.u8CSO,
6949 pDesc->legacy.dw3.u8CSS,
6950 E1K_SPEC_CFI(pDesc->legacy.dw3.u16Special) ? "CFI" :"cfi",
6951 E1K_SPEC_VLAN(pDesc->legacy.dw3.u16Special),
6952 E1K_SPEC_PRI(pDesc->legacy.dw3.u16Special));
6953 break;
6954 default:
6955 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Invalid Transmit Descriptor");
6956 break;
6957 }
6958
6959 return cbPrintf;
6960}
6961
6962/** Initializes debug helpers (logging format types). */
6963static int e1kInitDebugHelpers(void)
6964{
6965 int rc = VINF_SUCCESS;
6966 static bool s_fHelpersRegistered = false;
6967 if (!s_fHelpersRegistered)
6968 {
6969 s_fHelpersRegistered = true;
6970 rc = RTStrFormatTypeRegister("e1krxd", e1kFmtRxDesc, NULL);
6971 AssertRCReturn(rc, rc);
6972 rc = RTStrFormatTypeRegister("e1ktxd", e1kFmtTxDesc, NULL);
6973 AssertRCReturn(rc, rc);
6974 }
6975 return rc;
6976}
6977
6978/**
6979 * Status info callback.
6980 *
6981 * @param pDevIns The device instance.
6982 * @param pHlp The output helpers.
6983 * @param pszArgs The arguments.
6984 */
6985static DECLCALLBACK(void) e1kInfo(PPDMDEVINS pDevIns, PCDBGFINFOHLP pHlp, const char *pszArgs)
6986{
6987 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6988 unsigned i;
6989 // bool fRcvRing = false;
6990 // bool fXmtRing = false;
6991
6992 /*
6993 * Parse args.
6994 if (pszArgs)
6995 {
6996 fRcvRing = strstr(pszArgs, "verbose") || strstr(pszArgs, "rcv");
6997 fXmtRing = strstr(pszArgs, "verbose") || strstr(pszArgs, "xmt");
6998 }
6999 */
7000
7001 /*
7002 * Show info.
7003 */
7004 pHlp->pfnPrintf(pHlp, "E1000 #%d: port=%RTiop mmio=%RGp mac-cfg=%RTmac %s%s%s\n",
7005 pDevIns->iInstance, pThis->IOPortBase, pThis->addrMMReg,
7006 &pThis->macConfigured, g_Chips[pThis->eChip].pcszName,
7007 pThis->fRCEnabled ? " GC" : "", pThis->fR0Enabled ? " R0" : "");
7008
7009 e1kCsEnter(pThis, VERR_INTERNAL_ERROR); /* Not sure why but PCNet does it */
7010
7011 for (i = 0; i < E1K_NUM_OF_32BIT_REGS; ++i)
7012 pHlp->pfnPrintf(pHlp, "%8.8s = %08x\n", g_aE1kRegMap[i].abbrev, pThis->auRegs[i]);
7013
7014 for (i = 0; i < RT_ELEMENTS(pThis->aRecAddr.array); i++)
7015 {
7016 E1KRAELEM* ra = pThis->aRecAddr.array + i;
7017 if (ra->ctl & RA_CTL_AV)
7018 {
7019 const char *pcszTmp;
7020 switch (ra->ctl & RA_CTL_AS)
7021 {
7022 case 0: pcszTmp = "DST"; break;
7023 case 1: pcszTmp = "SRC"; break;
7024 default: pcszTmp = "reserved";
7025 }
7026 pHlp->pfnPrintf(pHlp, "RA%02d: %s %RTmac\n", i, pcszTmp, ra->addr);
7027 }
7028 }
7029 unsigned cDescs = RDLEN / sizeof(E1KRXDESC);
7030 uint32_t rdh = RDH;
7031 pHlp->pfnPrintf(pHlp, "\n-- Receive Descriptors (%d total) --\n", cDescs);
7032 for (i = 0; i < cDescs; ++i)
7033 {
7034 E1KRXDESC desc;
7035 PDMDevHlpPhysRead(pDevIns, e1kDescAddr(RDBAH, RDBAL, i),
7036 &desc, sizeof(desc));
7037 if (i == rdh)
7038 pHlp->pfnPrintf(pHlp, ">>> ");
7039 pHlp->pfnPrintf(pHlp, "%RGp: %R[e1krxd]\n", e1kDescAddr(RDBAH, RDBAL, i), &desc);
7040 }
7041#ifdef E1K_WITH_RXD_CACHE
7042 pHlp->pfnPrintf(pHlp, "\n-- Receive Descriptors in Cache (at %d (RDH %d)/ fetched %d / max %d) --\n",
7043 pThis->iRxDCurrent, RDH, pThis->nRxDFetched, E1K_RXD_CACHE_SIZE);
7044 if (rdh > pThis->iRxDCurrent)
7045 rdh -= pThis->iRxDCurrent;
7046 else
7047 rdh = cDescs + rdh - pThis->iRxDCurrent;
7048 for (i = 0; i < pThis->nRxDFetched; ++i)
7049 {
7050 if (i == pThis->iRxDCurrent)
7051 pHlp->pfnPrintf(pHlp, ">>> ");
7052 pHlp->pfnPrintf(pHlp, "%RGp: %R[e1krxd]\n",
7053 e1kDescAddr(RDBAH, RDBAL, rdh++ % cDescs),
7054 &pThis->aRxDescriptors[i]);
7055 }
7056#endif /* E1K_WITH_RXD_CACHE */
7057
7058 cDescs = TDLEN / sizeof(E1KTXDESC);
7059 uint32_t tdh = TDH;
7060 pHlp->pfnPrintf(pHlp, "\n-- Transmit Descriptors (%d total) --\n", cDescs);
7061 for (i = 0; i < cDescs; ++i)
7062 {
7063 E1KTXDESC desc;
7064 PDMDevHlpPhysRead(pDevIns, e1kDescAddr(TDBAH, TDBAL, i),
7065 &desc, sizeof(desc));
7066 if (i == tdh)
7067 pHlp->pfnPrintf(pHlp, ">>> ");
7068 pHlp->pfnPrintf(pHlp, "%RGp: %R[e1ktxd]\n", e1kDescAddr(TDBAH, TDBAL, i), &desc);
7069 }
7070#ifdef E1K_WITH_TXD_CACHE
7071 pHlp->pfnPrintf(pHlp, "\n-- Transmit Descriptors in Cache (at %d (TDH %d)/ fetched %d / max %d) --\n",
7072 pThis->iTxDCurrent, TDH, pThis->nTxDFetched, E1K_TXD_CACHE_SIZE);
7073 if (tdh > pThis->iTxDCurrent)
7074 tdh -= pThis->iTxDCurrent;
7075 else
7076 tdh = cDescs + tdh - pThis->iTxDCurrent;
7077 for (i = 0; i < pThis->nTxDFetched; ++i)
7078 {
7079 if (i == pThis->iTxDCurrent)
7080 pHlp->pfnPrintf(pHlp, ">>> ");
7081 pHlp->pfnPrintf(pHlp, "%RGp: %R[e1ktxd]\n",
7082 e1kDescAddr(TDBAH, TDBAL, tdh++ % cDescs),
7083 &pThis->aTxDescriptors[i]);
7084 }
7085#endif /* E1K_WITH_TXD_CACHE */
7086
7087
7088#ifdef E1K_INT_STATS
7089 pHlp->pfnPrintf(pHlp, "Interrupt attempts: %d\n", pThis->uStatIntTry);
7090 pHlp->pfnPrintf(pHlp, "Interrupts raised : %d\n", pThis->uStatInt);
7091 pHlp->pfnPrintf(pHlp, "Interrupts lowered: %d\n", pThis->uStatIntLower);
7092 pHlp->pfnPrintf(pHlp, "Interrupts delayed: %d\n", pThis->uStatIntDly);
7093 pHlp->pfnPrintf(pHlp, "Disabled delayed: %d\n", pThis->uStatDisDly);
7094 pHlp->pfnPrintf(pHlp, "Interrupts skipped: %d\n", pThis->uStatIntSkip);
7095 pHlp->pfnPrintf(pHlp, "Masked interrupts : %d\n", pThis->uStatIntMasked);
7096 pHlp->pfnPrintf(pHlp, "Early interrupts : %d\n", pThis->uStatIntEarly);
7097 pHlp->pfnPrintf(pHlp, "Late interrupts : %d\n", pThis->uStatIntLate);
7098 pHlp->pfnPrintf(pHlp, "Lost interrupts : %d\n", pThis->iStatIntLost);
7099 pHlp->pfnPrintf(pHlp, "Interrupts by RX : %d\n", pThis->uStatIntRx);
7100 pHlp->pfnPrintf(pHlp, "Interrupts by TX : %d\n", pThis->uStatIntTx);
7101 pHlp->pfnPrintf(pHlp, "Interrupts by ICS : %d\n", pThis->uStatIntICS);
7102 pHlp->pfnPrintf(pHlp, "Interrupts by RDTR: %d\n", pThis->uStatIntRDTR);
7103 pHlp->pfnPrintf(pHlp, "Interrupts by RDMT: %d\n", pThis->uStatIntRXDMT0);
7104 pHlp->pfnPrintf(pHlp, "Interrupts by TXQE: %d\n", pThis->uStatIntTXQE);
7105 pHlp->pfnPrintf(pHlp, "TX int delay asked: %d\n", pThis->uStatTxIDE);
7106 pHlp->pfnPrintf(pHlp, "TX delayed: %d\n", pThis->uStatTxDelayed);
7107 pHlp->pfnPrintf(pHlp, "TX delayed expired: %d\n", pThis->uStatTxDelayExp);
7108 pHlp->pfnPrintf(pHlp, "TX no report asked: %d\n", pThis->uStatTxNoRS);
7109 pHlp->pfnPrintf(pHlp, "TX abs timer expd : %d\n", pThis->uStatTAD);
7110 pHlp->pfnPrintf(pHlp, "TX int timer expd : %d\n", pThis->uStatTID);
7111 pHlp->pfnPrintf(pHlp, "RX abs timer expd : %d\n", pThis->uStatRAD);
7112 pHlp->pfnPrintf(pHlp, "RX int timer expd : %d\n", pThis->uStatRID);
7113 pHlp->pfnPrintf(pHlp, "TX CTX descriptors: %d\n", pThis->uStatDescCtx);
7114 pHlp->pfnPrintf(pHlp, "TX DAT descriptors: %d\n", pThis->uStatDescDat);
7115 pHlp->pfnPrintf(pHlp, "TX LEG descriptors: %d\n", pThis->uStatDescLeg);
7116 pHlp->pfnPrintf(pHlp, "Received frames : %d\n", pThis->uStatRxFrm);
7117 pHlp->pfnPrintf(pHlp, "Transmitted frames: %d\n", pThis->uStatTxFrm);
7118 pHlp->pfnPrintf(pHlp, "TX frames up to 1514: %d\n", pThis->uStatTx1514);
7119 pHlp->pfnPrintf(pHlp, "TX frames up to 2962: %d\n", pThis->uStatTx2962);
7120 pHlp->pfnPrintf(pHlp, "TX frames up to 4410: %d\n", pThis->uStatTx4410);
7121 pHlp->pfnPrintf(pHlp, "TX frames up to 5858: %d\n", pThis->uStatTx5858);
7122 pHlp->pfnPrintf(pHlp, "TX frames up to 7306: %d\n", pThis->uStatTx7306);
7123 pHlp->pfnPrintf(pHlp, "TX frames up to 8754: %d\n", pThis->uStatTx8754);
7124 pHlp->pfnPrintf(pHlp, "TX frames up to 16384: %d\n", pThis->uStatTx16384);
7125 pHlp->pfnPrintf(pHlp, "TX frames up to 32768: %d\n", pThis->uStatTx32768);
7126 pHlp->pfnPrintf(pHlp, "Larger TX frames : %d\n", pThis->uStatTxLarge);
7127#endif /* E1K_INT_STATS */
7128
7129 e1kCsLeave(pThis);
7130}
7131
7132
7133
7134/* -=-=-=-=- PDMDEVREG -=-=-=-=- */
7135
7136/**
7137 * Detach notification.
7138 *
7139 * One port on the network card has been disconnected from the network.
7140 *
7141 * @param pDevIns The device instance.
7142 * @param iLUN The logical unit which is being detached.
7143 * @param fFlags Flags, combination of the PDMDEVATT_FLAGS_* \#defines.
7144 */
7145static DECLCALLBACK(void) e1kR3Detach(PPDMDEVINS pDevIns, unsigned iLUN, uint32_t fFlags)
7146{
7147 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
7148 Log(("%s e1kR3Detach:\n", pThis->szPrf));
7149
7150 AssertLogRelReturnVoid(iLUN == 0);
7151
7152 PDMCritSectEnter(&pThis->cs, VERR_SEM_BUSY);
7153
7154 /** @todo: r=pritesh still need to check if i missed
7155 * to clean something in this function
7156 */
7157
7158 /*
7159 * Zero some important members.
7160 */
7161 pThis->pDrvBase = NULL;
7162 pThis->pDrvR3 = NULL;
7163 pThis->pDrvR0 = NIL_RTR0PTR;
7164 pThis->pDrvRC = NIL_RTRCPTR;
7165
7166 PDMCritSectLeave(&pThis->cs);
7167}
7168
7169/**
7170 * Attach the Network attachment.
7171 *
7172 * One port on the network card has been connected to a network.
7173 *
7174 * @returns VBox status code.
7175 * @param pDevIns The device instance.
7176 * @param iLUN The logical unit which is being attached.
7177 * @param fFlags Flags, combination of the PDMDEVATT_FLAGS_* \#defines.
7178 *
7179 * @remarks This code path is not used during construction.
7180 */
7181static DECLCALLBACK(int) e1kR3Attach(PPDMDEVINS pDevIns, unsigned iLUN, uint32_t fFlags)
7182{
7183 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
7184 LogFlow(("%s e1kR3Attach:\n", pThis->szPrf));
7185
7186 AssertLogRelReturn(iLUN == 0, VERR_PDM_NO_SUCH_LUN);
7187
7188 PDMCritSectEnter(&pThis->cs, VERR_SEM_BUSY);
7189
7190 /*
7191 * Attach the driver.
7192 */
7193 int rc = PDMDevHlpDriverAttach(pDevIns, 0, &pThis->IBase, &pThis->pDrvBase, "Network Port");
7194 if (RT_SUCCESS(rc))
7195 {
7196 if (rc == VINF_NAT_DNS)
7197 {
7198#ifdef RT_OS_LINUX
7199 PDMDevHlpVMSetRuntimeError(pDevIns, 0 /*fFlags*/, "NoDNSforNAT",
7200 N_("A Domain Name Server (DNS) for NAT networking could not be determined. Please check your /etc/resolv.conf for <tt>nameserver</tt> entries. Either add one manually (<i>man resolv.conf</i>) or ensure that your host is correctly connected to an ISP. If you ignore this warning the guest will not be able to perform nameserver lookups and it will probably observe delays if trying so"));
7201#else
7202 PDMDevHlpVMSetRuntimeError(pDevIns, 0 /*fFlags*/, "NoDNSforNAT",
7203 N_("A Domain Name Server (DNS) for NAT networking could not be determined. Ensure that your host is correctly connected to an ISP. If you ignore this warning the guest will not be able to perform nameserver lookups and it will probably observe delays if trying so"));
7204#endif
7205 }
7206 pThis->pDrvR3 = PDMIBASE_QUERY_INTERFACE(pThis->pDrvBase, PDMINETWORKUP);
7207 AssertMsgStmt(pThis->pDrvR3, ("Failed to obtain the PDMINETWORKUP interface!\n"),
7208 rc = VERR_PDM_MISSING_INTERFACE_BELOW);
7209 if (RT_SUCCESS(rc))
7210 {
7211 PPDMIBASER0 pBaseR0 = PDMIBASE_QUERY_INTERFACE(pThis->pDrvBase, PDMIBASER0);
7212 pThis->pDrvR0 = pBaseR0 ? pBaseR0->pfnQueryInterface(pBaseR0, PDMINETWORKUP_IID) : NIL_RTR0PTR;
7213
7214 PPDMIBASERC pBaseRC = PDMIBASE_QUERY_INTERFACE(pThis->pDrvBase, PDMIBASERC);
7215 pThis->pDrvRC = pBaseRC ? pBaseRC->pfnQueryInterface(pBaseRC, PDMINETWORKUP_IID) : NIL_RTR0PTR;
7216 }
7217 }
7218 else if ( rc == VERR_PDM_NO_ATTACHED_DRIVER
7219 || rc == VERR_PDM_CFG_MISSING_DRIVER_NAME)
7220 {
7221 /* This should never happen because this function is not called
7222 * if there is no driver to attach! */
7223 Log(("%s No attached driver!\n", pThis->szPrf));
7224 }
7225
7226 /*
7227 * Temporary set the link down if it was up so that the guest
7228 * will know that we have change the configuration of the
7229 * network card
7230 */
7231 if ((STATUS & STATUS_LU) && RT_SUCCESS(rc))
7232 e1kR3LinkDownTemp(pThis);
7233
7234 PDMCritSectLeave(&pThis->cs);
7235 return rc;
7236
7237}
7238
7239/**
7240 * @copydoc FNPDMDEVPOWEROFF
7241 */
7242static DECLCALLBACK(void) e1kR3PowerOff(PPDMDEVINS pDevIns)
7243{
7244 /* Poke thread waiting for buffer space. */
7245 e1kWakeupReceive(pDevIns);
7246}
7247
7248/**
7249 * @copydoc FNPDMDEVRESET
7250 */
7251static DECLCALLBACK(void) e1kR3Reset(PPDMDEVINS pDevIns)
7252{
7253 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
7254#ifdef E1K_TX_DELAY
7255 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTXDTimer));
7256#endif /* E1K_TX_DELAY */
7257 e1kCancelTimer(pThis, pThis->CTX_SUFF(pIntTimer));
7258 e1kCancelTimer(pThis, pThis->CTX_SUFF(pLUTimer));
7259 e1kXmitFreeBuf(pThis);
7260 pThis->u16TxPktLen = 0;
7261 pThis->fIPcsum = false;
7262 pThis->fTCPcsum = false;
7263 pThis->fIntMaskUsed = false;
7264 pThis->fDelayInts = false;
7265 pThis->fLocked = false;
7266 pThis->u64AckedAt = 0;
7267 e1kHardReset(pThis);
7268}
7269
7270/**
7271 * @copydoc FNPDMDEVSUSPEND
7272 */
7273static DECLCALLBACK(void) e1kR3Suspend(PPDMDEVINS pDevIns)
7274{
7275 /* Poke thread waiting for buffer space. */
7276 e1kWakeupReceive(pDevIns);
7277}
7278
7279/**
7280 * Device relocation callback.
7281 *
7282 * When this callback is called the device instance data, and if the
7283 * device have a GC component, is being relocated, or/and the selectors
7284 * have been changed. The device must use the chance to perform the
7285 * necessary pointer relocations and data updates.
7286 *
7287 * Before the GC code is executed the first time, this function will be
7288 * called with a 0 delta so GC pointer calculations can be one in one place.
7289 *
7290 * @param pDevIns Pointer to the device instance.
7291 * @param offDelta The relocation delta relative to the old location.
7292 *
7293 * @remark A relocation CANNOT fail.
7294 */
7295static DECLCALLBACK(void) e1kR3Relocate(PPDMDEVINS pDevIns, RTGCINTPTR offDelta)
7296{
7297 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
7298 pThis->pDevInsRC = PDMDEVINS_2_RCPTR(pDevIns);
7299 pThis->pTxQueueRC = PDMQueueRCPtr(pThis->pTxQueueR3);
7300 pThis->pCanRxQueueRC = PDMQueueRCPtr(pThis->pCanRxQueueR3);
7301#ifdef E1K_USE_RX_TIMERS
7302 pThis->pRIDTimerRC = TMTimerRCPtr(pThis->pRIDTimerR3);
7303 pThis->pRADTimerRC = TMTimerRCPtr(pThis->pRADTimerR3);
7304#endif /* E1K_USE_RX_TIMERS */
7305#ifdef E1K_USE_TX_TIMERS
7306 pThis->pTIDTimerRC = TMTimerRCPtr(pThis->pTIDTimerR3);
7307# ifndef E1K_NO_TAD
7308 pThis->pTADTimerRC = TMTimerRCPtr(pThis->pTADTimerR3);
7309# endif /* E1K_NO_TAD */
7310#endif /* E1K_USE_TX_TIMERS */
7311#ifdef E1K_TX_DELAY
7312 pThis->pTXDTimerRC = TMTimerRCPtr(pThis->pTXDTimerR3);
7313#endif /* E1K_TX_DELAY */
7314 pThis->pIntTimerRC = TMTimerRCPtr(pThis->pIntTimerR3);
7315 pThis->pLUTimerRC = TMTimerRCPtr(pThis->pLUTimerR3);
7316}
7317
7318/**
7319 * Destruct a device instance.
7320 *
7321 * We need to free non-VM resources only.
7322 *
7323 * @returns VBox status code.
7324 * @param pDevIns The device instance data.
7325 * @thread EMT
7326 */
7327static DECLCALLBACK(int) e1kR3Destruct(PPDMDEVINS pDevIns)
7328{
7329 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
7330 PDMDEV_CHECK_VERSIONS_RETURN_QUIET(pDevIns);
7331
7332 e1kDumpState(pThis);
7333 E1kLog(("%s Destroying instance\n", pThis->szPrf));
7334 if (PDMCritSectIsInitialized(&pThis->cs))
7335 {
7336 if (pThis->hEventMoreRxDescAvail != NIL_RTSEMEVENT)
7337 {
7338 RTSemEventSignal(pThis->hEventMoreRxDescAvail);
7339 RTSemEventDestroy(pThis->hEventMoreRxDescAvail);
7340 pThis->hEventMoreRxDescAvail = NIL_RTSEMEVENT;
7341 }
7342#ifdef E1K_WITH_TX_CS
7343 PDMR3CritSectDelete(&pThis->csTx);
7344#endif /* E1K_WITH_TX_CS */
7345 PDMR3CritSectDelete(&pThis->csRx);
7346 PDMR3CritSectDelete(&pThis->cs);
7347 }
7348 return VINF_SUCCESS;
7349}
7350
7351
7352/**
7353 * Set PCI configuration space registers.
7354 *
7355 * @param pci Reference to PCI device structure.
7356 * @thread EMT
7357 */
7358static DECLCALLBACK(void) e1kConfigurePciDev(PPCIDEVICE pPciDev, E1KCHIP eChip)
7359{
7360 Assert(eChip < RT_ELEMENTS(g_Chips));
7361 /* Configure PCI Device, assume 32-bit mode ******************************/
7362 PCIDevSetVendorId(pPciDev, g_Chips[eChip].uPCIVendorId);
7363 PCIDevSetDeviceId(pPciDev, g_Chips[eChip].uPCIDeviceId);
7364 PCIDevSetWord( pPciDev, VBOX_PCI_SUBSYSTEM_VENDOR_ID, g_Chips[eChip].uPCISubsystemVendorId);
7365 PCIDevSetWord( pPciDev, VBOX_PCI_SUBSYSTEM_ID, g_Chips[eChip].uPCISubsystemId);
7366
7367 PCIDevSetWord( pPciDev, VBOX_PCI_COMMAND, 0x0000);
7368 /* DEVSEL Timing (medium device), 66 MHz Capable, New capabilities */
7369 PCIDevSetWord( pPciDev, VBOX_PCI_STATUS,
7370 VBOX_PCI_STATUS_DEVSEL_MEDIUM | VBOX_PCI_STATUS_CAP_LIST | VBOX_PCI_STATUS_66MHZ);
7371 /* Stepping A2 */
7372 PCIDevSetByte( pPciDev, VBOX_PCI_REVISION_ID, 0x02);
7373 /* Ethernet adapter */
7374 PCIDevSetByte( pPciDev, VBOX_PCI_CLASS_PROG, 0x00);
7375 PCIDevSetWord( pPciDev, VBOX_PCI_CLASS_DEVICE, 0x0200);
7376 /* normal single function Ethernet controller */
7377 PCIDevSetByte( pPciDev, VBOX_PCI_HEADER_TYPE, 0x00);
7378 /* Memory Register Base Address */
7379 PCIDevSetDWord(pPciDev, VBOX_PCI_BASE_ADDRESS_0, 0x00000000);
7380 /* Memory Flash Base Address */
7381 PCIDevSetDWord(pPciDev, VBOX_PCI_BASE_ADDRESS_1, 0x00000000);
7382 /* IO Register Base Address */
7383 PCIDevSetDWord(pPciDev, VBOX_PCI_BASE_ADDRESS_2, 0x00000001);
7384 /* Expansion ROM Base Address */
7385 PCIDevSetDWord(pPciDev, VBOX_PCI_ROM_ADDRESS, 0x00000000);
7386 /* Capabilities Pointer */
7387 PCIDevSetByte( pPciDev, VBOX_PCI_CAPABILITY_LIST, 0xDC);
7388 /* Interrupt Pin: INTA# */
7389 PCIDevSetByte( pPciDev, VBOX_PCI_INTERRUPT_PIN, 0x01);
7390 /* Max_Lat/Min_Gnt: very high priority and time slice */
7391 PCIDevSetByte( pPciDev, VBOX_PCI_MIN_GNT, 0xFF);
7392 PCIDevSetByte( pPciDev, VBOX_PCI_MAX_LAT, 0x00);
7393
7394 /* PCI Power Management Registers ****************************************/
7395 /* Capability ID: PCI Power Management Registers */
7396 PCIDevSetByte( pPciDev, 0xDC, VBOX_PCI_CAP_ID_PM);
7397 /* Next Item Pointer: PCI-X */
7398 PCIDevSetByte( pPciDev, 0xDC + 1, 0xE4);
7399 /* Power Management Capabilities: PM disabled, DSI */
7400 PCIDevSetWord( pPciDev, 0xDC + 2,
7401 0x0002 | VBOX_PCI_PM_CAP_DSI);
7402 /* Power Management Control / Status Register: PM disabled */
7403 PCIDevSetWord( pPciDev, 0xDC + 4, 0x0000);
7404 /* PMCSR_BSE Bridge Support Extensions: Not supported */
7405 PCIDevSetByte( pPciDev, 0xDC + 6, 0x00);
7406 /* Data Register: PM disabled, always 0 */
7407 PCIDevSetByte( pPciDev, 0xDC + 7, 0x00);
7408
7409 /* PCI-X Configuration Registers *****************************************/
7410 /* Capability ID: PCI-X Configuration Registers */
7411 PCIDevSetByte( pPciDev, 0xE4, VBOX_PCI_CAP_ID_PCIX);
7412#ifdef E1K_WITH_MSI
7413 PCIDevSetByte( pPciDev, 0xE4 + 1, 0x80);
7414#else
7415 /* Next Item Pointer: None (Message Signalled Interrupts are disabled) */
7416 PCIDevSetByte( pPciDev, 0xE4 + 1, 0x00);
7417#endif
7418 /* PCI-X Command: Enable Relaxed Ordering */
7419 PCIDevSetWord( pPciDev, 0xE4 + 2, VBOX_PCI_X_CMD_ERO);
7420 /* PCI-X Status: 32-bit, 66MHz*/
7421 /** @todo is this value really correct? fff8 doesn't look like actual PCI address */
7422 PCIDevSetDWord(pPciDev, 0xE4 + 4, 0x0040FFF8);
7423}
7424
7425/**
7426 * @interface_method_impl{PDMDEVREG,pfnConstruct}
7427 */
7428static DECLCALLBACK(int) e1kR3Construct(PPDMDEVINS pDevIns, int iInstance, PCFGMNODE pCfg)
7429{
7430 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
7431 int rc;
7432 PDMDEV_CHECK_VERSIONS_RETURN(pDevIns);
7433
7434 /*
7435 * Initialize the instance data (state).
7436 * Note! Caller has initialized it to ZERO already.
7437 */
7438 RTStrPrintf(pThis->szPrf, sizeof(pThis->szPrf), "E1000#%d", iInstance);
7439 E1kLog(("%s Constructing new instance sizeof(E1KRXDESC)=%d\n", pThis->szPrf, sizeof(E1KRXDESC)));
7440 pThis->hEventMoreRxDescAvail = NIL_RTSEMEVENT;
7441 pThis->pDevInsR3 = pDevIns;
7442 pThis->pDevInsR0 = PDMDEVINS_2_R0PTR(pDevIns);
7443 pThis->pDevInsRC = PDMDEVINS_2_RCPTR(pDevIns);
7444 pThis->u16TxPktLen = 0;
7445 pThis->fIPcsum = false;
7446 pThis->fTCPcsum = false;
7447 pThis->fIntMaskUsed = false;
7448 pThis->fDelayInts = false;
7449 pThis->fLocked = false;
7450 pThis->u64AckedAt = 0;
7451 pThis->led.u32Magic = PDMLED_MAGIC;
7452 pThis->u32PktNo = 1;
7453
7454 /* Interfaces */
7455 pThis->IBase.pfnQueryInterface = e1kR3QueryInterface;
7456
7457 pThis->INetworkDown.pfnWaitReceiveAvail = e1kR3NetworkDown_WaitReceiveAvail;
7458 pThis->INetworkDown.pfnReceive = e1kR3NetworkDown_Receive;
7459 pThis->INetworkDown.pfnXmitPending = e1kR3NetworkDown_XmitPending;
7460
7461 pThis->ILeds.pfnQueryStatusLed = e1kR3QueryStatusLed;
7462
7463 pThis->INetworkConfig.pfnGetMac = e1kR3GetMac;
7464 pThis->INetworkConfig.pfnGetLinkState = e1kR3GetLinkState;
7465 pThis->INetworkConfig.pfnSetLinkState = e1kR3SetLinkState;
7466
7467 /*
7468 * Internal validations.
7469 */
7470 for (uint32_t iReg = 1; iReg < E1K_NUM_OF_BINARY_SEARCHABLE; iReg++)
7471 AssertLogRelMsgReturn( g_aE1kRegMap[iReg].offset > g_aE1kRegMap[iReg - 1].offset
7472 && g_aE1kRegMap[iReg].offset + g_aE1kRegMap[iReg].size
7473 >= g_aE1kRegMap[iReg - 1].offset + g_aE1kRegMap[iReg - 1].size,
7474 ("%s@%#xLB%#x vs %s@%#xLB%#x\n",
7475 g_aE1kRegMap[iReg].abbrev, g_aE1kRegMap[iReg].offset, g_aE1kRegMap[iReg].size,
7476 g_aE1kRegMap[iReg - 1].abbrev, g_aE1kRegMap[iReg - 1].offset, g_aE1kRegMap[iReg - 1].size),
7477 VERR_INTERNAL_ERROR_4);
7478
7479 /*
7480 * Validate configuration.
7481 */
7482 if (!CFGMR3AreValuesValid(pCfg, "MAC\0" "CableConnected\0" "AdapterType\0"
7483 "LineSpeed\0" "GCEnabled\0" "R0Enabled\0"
7484 "ItrEnabled\0" "ItrRxEnabled\0"
7485 "EthernetCRC\0" "GSOEnabled\0" "LinkUpDelay\0"))
7486 return PDMDEV_SET_ERROR(pDevIns, VERR_PDM_DEVINS_UNKNOWN_CFG_VALUES,
7487 N_("Invalid configuration for E1000 device"));
7488
7489 /** @todo: LineSpeed unused! */
7490
7491 pThis->fR0Enabled = true;
7492 pThis->fRCEnabled = true;
7493 pThis->fEthernetCRC = true;
7494 pThis->fGSOEnabled = true;
7495 pThis->fItrEnabled = true;
7496 pThis->fItrRxEnabled = true;
7497
7498 /* Get config params */
7499 rc = CFGMR3QueryBytes(pCfg, "MAC", pThis->macConfigured.au8, sizeof(pThis->macConfigured.au8));
7500 if (RT_FAILURE(rc))
7501 return PDMDEV_SET_ERROR(pDevIns, rc,
7502 N_("Configuration error: Failed to get MAC address"));
7503 rc = CFGMR3QueryBool(pCfg, "CableConnected", &pThis->fCableConnected);
7504 if (RT_FAILURE(rc))
7505 return PDMDEV_SET_ERROR(pDevIns, rc,
7506 N_("Configuration error: Failed to get the value of 'CableConnected'"));
7507 rc = CFGMR3QueryU32(pCfg, "AdapterType", (uint32_t*)&pThis->eChip);
7508 if (RT_FAILURE(rc))
7509 return PDMDEV_SET_ERROR(pDevIns, rc,
7510 N_("Configuration error: Failed to get the value of 'AdapterType'"));
7511 Assert(pThis->eChip <= E1K_CHIP_82545EM);
7512 rc = CFGMR3QueryBoolDef(pCfg, "GCEnabled", &pThis->fRCEnabled, true);
7513 if (RT_FAILURE(rc))
7514 return PDMDEV_SET_ERROR(pDevIns, rc,
7515 N_("Configuration error: Failed to get the value of 'GCEnabled'"));
7516
7517 rc = CFGMR3QueryBoolDef(pCfg, "R0Enabled", &pThis->fR0Enabled, true);
7518 if (RT_FAILURE(rc))
7519 return PDMDEV_SET_ERROR(pDevIns, rc,
7520 N_("Configuration error: Failed to get the value of 'R0Enabled'"));
7521
7522 rc = CFGMR3QueryBoolDef(pCfg, "EthernetCRC", &pThis->fEthernetCRC, true);
7523 if (RT_FAILURE(rc))
7524 return PDMDEV_SET_ERROR(pDevIns, rc,
7525 N_("Configuration error: Failed to get the value of 'EthernetCRC'"));
7526
7527 rc = CFGMR3QueryBoolDef(pCfg, "GSOEnabled", &pThis->fGSOEnabled, true);
7528 if (RT_FAILURE(rc))
7529 return PDMDEV_SET_ERROR(pDevIns, rc,
7530 N_("Configuration error: Failed to get the value of 'GSOEnabled'"));
7531
7532 rc = CFGMR3QueryBoolDef(pCfg, "ItrEnabled", &pThis->fItrEnabled, true);
7533 if (RT_FAILURE(rc))
7534 return PDMDEV_SET_ERROR(pDevIns, rc,
7535 N_("Configuration error: Failed to get the value of 'ItrEnabled'"));
7536
7537 rc = CFGMR3QueryBoolDef(pCfg, "ItrRxEnabled", &pThis->fItrRxEnabled, true);
7538 if (RT_FAILURE(rc))
7539 return PDMDEV_SET_ERROR(pDevIns, rc,
7540 N_("Configuration error: Failed to get the value of 'ItrRxEnabled'"));
7541
7542 rc = CFGMR3QueryU32Def(pCfg, "LinkUpDelay", (uint32_t*)&pThis->cMsLinkUpDelay, 5000); /* ms */
7543 if (RT_FAILURE(rc))
7544 return PDMDEV_SET_ERROR(pDevIns, rc,
7545 N_("Configuration error: Failed to get the value of 'LinkUpDelay'"));
7546 Assert(pThis->cMsLinkUpDelay <= 300000); /* less than 5 minutes */
7547 if (pThis->cMsLinkUpDelay > 5000)
7548 LogRel(("%s WARNING! Link up delay is set to %u seconds!\n", pThis->szPrf, pThis->cMsLinkUpDelay / 1000));
7549 else if (pThis->cMsLinkUpDelay == 0)
7550 LogRel(("%s WARNING! Link up delay is disabled!\n", pThis->szPrf));
7551
7552 LogRel(("%s Chip=%s LinkUpDelay=%ums EthernetCRC=%s GSO=%s Itr=%s ItrRx=%s R0=%s GC=%s\n", pThis->szPrf,
7553 g_Chips[pThis->eChip].pcszName, pThis->cMsLinkUpDelay,
7554 pThis->fEthernetCRC ? "on" : "off",
7555 pThis->fGSOEnabled ? "enabled" : "disabled",
7556 pThis->fItrEnabled ? "enabled" : "disabled",
7557 pThis->fItrRxEnabled ? "enabled" : "disabled",
7558 pThis->fR0Enabled ? "enabled" : "disabled",
7559 pThis->fRCEnabled ? "enabled" : "disabled"));
7560
7561 /* Initialize the EEPROM. */
7562 pThis->eeprom.init(pThis->macConfigured);
7563
7564 /* Initialize internal PHY. */
7565 Phy::init(&pThis->phy, iInstance, pThis->eChip == E1K_CHIP_82543GC ? PHY_EPID_M881000 : PHY_EPID_M881011);
7566 Phy::setLinkStatus(&pThis->phy, pThis->fCableConnected);
7567
7568 /* Initialize critical sections. We do our own locking. */
7569 rc = PDMDevHlpSetDeviceCritSect(pDevIns, PDMDevHlpCritSectGetNop(pDevIns));
7570 AssertRCReturn(rc, rc);
7571
7572 rc = PDMDevHlpCritSectInit(pDevIns, &pThis->cs, RT_SRC_POS, "E1000#%d", iInstance);
7573 if (RT_FAILURE(rc))
7574 return rc;
7575 rc = PDMDevHlpCritSectInit(pDevIns, &pThis->csRx, RT_SRC_POS, "E1000#%dRX", iInstance);
7576 if (RT_FAILURE(rc))
7577 return rc;
7578#ifdef E1K_WITH_TX_CS
7579 rc = PDMDevHlpCritSectInit(pDevIns, &pThis->csTx, RT_SRC_POS, "E1000#%dTX", iInstance);
7580 if (RT_FAILURE(rc))
7581 return rc;
7582#endif /* E1K_WITH_TX_CS */
7583
7584 /* Saved state registration. */
7585 rc = PDMDevHlpSSMRegisterEx(pDevIns, E1K_SAVEDSTATE_VERSION, sizeof(E1KSTATE), NULL,
7586 NULL, e1kLiveExec, NULL,
7587 e1kSavePrep, e1kSaveExec, NULL,
7588 e1kLoadPrep, e1kLoadExec, e1kLoadDone);
7589 if (RT_FAILURE(rc))
7590 return rc;
7591
7592 /* Set PCI config registers and register ourselves with the PCI bus. */
7593 e1kConfigurePciDev(&pThis->pciDevice, pThis->eChip);
7594 rc = PDMDevHlpPCIRegister(pDevIns, &pThis->pciDevice);
7595 if (RT_FAILURE(rc))
7596 return rc;
7597
7598#ifdef E1K_WITH_MSI
7599 PDMMSIREG MsiReg;
7600 RT_ZERO(MsiReg);
7601 MsiReg.cMsiVectors = 1;
7602 MsiReg.iMsiCapOffset = 0x80;
7603 MsiReg.iMsiNextOffset = 0x0;
7604 MsiReg.fMsi64bit = false;
7605 rc = PDMDevHlpPCIRegisterMsi(pDevIns, &MsiReg);
7606 AssertRCReturn(rc, rc);
7607#endif
7608
7609
7610 /* Map our registers to memory space (region 0, see e1kConfigurePCI)*/
7611 rc = PDMDevHlpPCIIORegionRegister(pDevIns, 0, E1K_MM_SIZE, PCI_ADDRESS_SPACE_MEM, e1kMap);
7612 if (RT_FAILURE(rc))
7613 return rc;
7614 /* Map our registers to IO space (region 2, see e1kConfigurePCI) */
7615 rc = PDMDevHlpPCIIORegionRegister(pDevIns, 2, E1K_IOPORT_SIZE, PCI_ADDRESS_SPACE_IO, e1kMap);
7616 if (RT_FAILURE(rc))
7617 return rc;
7618
7619 /* Create transmit queue */
7620 rc = PDMDevHlpQueueCreate(pDevIns, sizeof(PDMQUEUEITEMCORE), 1, 0,
7621 e1kTxQueueConsumer, true, "E1000-Xmit", &pThis->pTxQueueR3);
7622 if (RT_FAILURE(rc))
7623 return rc;
7624 pThis->pTxQueueR0 = PDMQueueR0Ptr(pThis->pTxQueueR3);
7625 pThis->pTxQueueRC = PDMQueueRCPtr(pThis->pTxQueueR3);
7626
7627 /* Create the RX notifier signaller. */
7628 rc = PDMDevHlpQueueCreate(pDevIns, sizeof(PDMQUEUEITEMCORE), 1, 0,
7629 e1kCanRxQueueConsumer, true, "E1000-Rcv", &pThis->pCanRxQueueR3);
7630 if (RT_FAILURE(rc))
7631 return rc;
7632 pThis->pCanRxQueueR0 = PDMQueueR0Ptr(pThis->pCanRxQueueR3);
7633 pThis->pCanRxQueueRC = PDMQueueRCPtr(pThis->pCanRxQueueR3);
7634
7635#ifdef E1K_TX_DELAY
7636 /* Create Transmit Delay Timer */
7637 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kTxDelayTimer, pThis,
7638 TMTIMER_FLAGS_NO_CRIT_SECT,
7639 "E1000 Transmit Delay Timer", &pThis->pTXDTimerR3);
7640 if (RT_FAILURE(rc))
7641 return rc;
7642 pThis->pTXDTimerR0 = TMTimerR0Ptr(pThis->pTXDTimerR3);
7643 pThis->pTXDTimerRC = TMTimerRCPtr(pThis->pTXDTimerR3);
7644 TMR3TimerSetCritSect(pThis->pTXDTimerR3, &pThis->csTx);
7645#endif /* E1K_TX_DELAY */
7646
7647#ifdef E1K_USE_TX_TIMERS
7648 /* Create Transmit Interrupt Delay Timer */
7649 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kTxIntDelayTimer, pThis,
7650 TMTIMER_FLAGS_NO_CRIT_SECT,
7651 "E1000 Transmit Interrupt Delay Timer", &pThis->pTIDTimerR3);
7652 if (RT_FAILURE(rc))
7653 return rc;
7654 pThis->pTIDTimerR0 = TMTimerR0Ptr(pThis->pTIDTimerR3);
7655 pThis->pTIDTimerRC = TMTimerRCPtr(pThis->pTIDTimerR3);
7656
7657# ifndef E1K_NO_TAD
7658 /* Create Transmit Absolute Delay Timer */
7659 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kTxAbsDelayTimer, pThis,
7660 TMTIMER_FLAGS_NO_CRIT_SECT,
7661 "E1000 Transmit Absolute Delay Timer", &pThis->pTADTimerR3);
7662 if (RT_FAILURE(rc))
7663 return rc;
7664 pThis->pTADTimerR0 = TMTimerR0Ptr(pThis->pTADTimerR3);
7665 pThis->pTADTimerRC = TMTimerRCPtr(pThis->pTADTimerR3);
7666# endif /* E1K_NO_TAD */
7667#endif /* E1K_USE_TX_TIMERS */
7668
7669#ifdef E1K_USE_RX_TIMERS
7670 /* Create Receive Interrupt Delay Timer */
7671 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kRxIntDelayTimer, pThis,
7672 TMTIMER_FLAGS_NO_CRIT_SECT,
7673 "E1000 Receive Interrupt Delay Timer", &pThis->pRIDTimerR3);
7674 if (RT_FAILURE(rc))
7675 return rc;
7676 pThis->pRIDTimerR0 = TMTimerR0Ptr(pThis->pRIDTimerR3);
7677 pThis->pRIDTimerRC = TMTimerRCPtr(pThis->pRIDTimerR3);
7678
7679 /* Create Receive Absolute Delay Timer */
7680 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kRxAbsDelayTimer, pThis,
7681 TMTIMER_FLAGS_NO_CRIT_SECT,
7682 "E1000 Receive Absolute Delay Timer", &pThis->pRADTimerR3);
7683 if (RT_FAILURE(rc))
7684 return rc;
7685 pThis->pRADTimerR0 = TMTimerR0Ptr(pThis->pRADTimerR3);
7686 pThis->pRADTimerRC = TMTimerRCPtr(pThis->pRADTimerR3);
7687#endif /* E1K_USE_RX_TIMERS */
7688
7689 /* Create Late Interrupt Timer */
7690 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kLateIntTimer, pThis,
7691 TMTIMER_FLAGS_NO_CRIT_SECT,
7692 "E1000 Late Interrupt Timer", &pThis->pIntTimerR3);
7693 if (RT_FAILURE(rc))
7694 return rc;
7695 pThis->pIntTimerR0 = TMTimerR0Ptr(pThis->pIntTimerR3);
7696 pThis->pIntTimerRC = TMTimerRCPtr(pThis->pIntTimerR3);
7697
7698 /* Create Link Up Timer */
7699 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kLinkUpTimer, pThis,
7700 TMTIMER_FLAGS_NO_CRIT_SECT,
7701 "E1000 Link Up Timer", &pThis->pLUTimerR3);
7702 if (RT_FAILURE(rc))
7703 return rc;
7704 pThis->pLUTimerR0 = TMTimerR0Ptr(pThis->pLUTimerR3);
7705 pThis->pLUTimerRC = TMTimerRCPtr(pThis->pLUTimerR3);
7706
7707 /* Register the info item */
7708 char szTmp[20];
7709 RTStrPrintf(szTmp, sizeof(szTmp), "e1k%d", iInstance);
7710 PDMDevHlpDBGFInfoRegister(pDevIns, szTmp, "E1000 info.", e1kInfo);
7711
7712 /* Status driver */
7713 PPDMIBASE pBase;
7714 rc = PDMDevHlpDriverAttach(pDevIns, PDM_STATUS_LUN, &pThis->IBase, &pBase, "Status Port");
7715 if (RT_FAILURE(rc))
7716 return PDMDEV_SET_ERROR(pDevIns, rc, N_("Failed to attach the status LUN"));
7717 pThis->pLedsConnector = PDMIBASE_QUERY_INTERFACE(pBase, PDMILEDCONNECTORS);
7718
7719 /* Network driver */
7720 rc = PDMDevHlpDriverAttach(pDevIns, 0, &pThis->IBase, &pThis->pDrvBase, "Network Port");
7721 if (RT_SUCCESS(rc))
7722 {
7723 if (rc == VINF_NAT_DNS)
7724 PDMDevHlpVMSetRuntimeError(pDevIns, 0 /*fFlags*/, "NoDNSforNAT",
7725 N_("A Domain Name Server (DNS) for NAT networking could not be determined. Ensure that your host is correctly connected to an ISP. If you ignore this warning the guest will not be able to perform nameserver lookups and it will probably observe delays if trying so"));
7726 pThis->pDrvR3 = PDMIBASE_QUERY_INTERFACE(pThis->pDrvBase, PDMINETWORKUP);
7727 AssertMsgReturn(pThis->pDrvR3, ("Failed to obtain the PDMINETWORKUP interface!\n"), VERR_PDM_MISSING_INTERFACE_BELOW);
7728
7729 pThis->pDrvR0 = PDMIBASER0_QUERY_INTERFACE(PDMIBASE_QUERY_INTERFACE(pThis->pDrvBase, PDMIBASER0), PDMINETWORKUP);
7730 pThis->pDrvRC = PDMIBASERC_QUERY_INTERFACE(PDMIBASE_QUERY_INTERFACE(pThis->pDrvBase, PDMIBASERC), PDMINETWORKUP);
7731 }
7732 else if ( rc == VERR_PDM_NO_ATTACHED_DRIVER
7733 || rc == VERR_PDM_CFG_MISSING_DRIVER_NAME)
7734 {
7735 /* No error! */
7736 E1kLog(("%s This adapter is not attached to any network!\n", pThis->szPrf));
7737 }
7738 else
7739 return PDMDEV_SET_ERROR(pDevIns, rc, N_("Failed to attach the network LUN"));
7740
7741 rc = RTSemEventCreate(&pThis->hEventMoreRxDescAvail);
7742 if (RT_FAILURE(rc))
7743 return rc;
7744
7745 rc = e1kInitDebugHelpers();
7746 if (RT_FAILURE(rc))
7747 return rc;
7748
7749 e1kHardReset(pThis);
7750
7751 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceiveBytes, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Amount of data received", "/Public/Net/E1k%u/BytesReceived", iInstance);
7752 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitBytes, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Amount of data transmitted", "/Public/Net/E1k%u/BytesTransmitted", iInstance);
7753
7754 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceiveBytes, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Amount of data received", "/Devices/E1k%d/ReceiveBytes", iInstance);
7755 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitBytes, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Amount of data transmitted", "/Devices/E1k%d/TransmitBytes", iInstance);
7756
7757#if defined(VBOX_WITH_STATISTICS)
7758 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatMMIOReadRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling MMIO reads in RZ", "/Devices/E1k%d/MMIO/ReadRZ", iInstance);
7759 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatMMIOReadR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling MMIO reads in R3", "/Devices/E1k%d/MMIO/ReadR3", iInstance);
7760 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatMMIOWriteRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling MMIO writes in RZ", "/Devices/E1k%d/MMIO/WriteRZ", iInstance);
7761 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatMMIOWriteR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling MMIO writes in R3", "/Devices/E1k%d/MMIO/WriteR3", iInstance);
7762 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatEEPROMRead, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling EEPROM reads", "/Devices/E1k%d/EEPROM/Read", iInstance);
7763 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatEEPROMWrite, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling EEPROM writes", "/Devices/E1k%d/EEPROM/Write", iInstance);
7764 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIOReadRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling IO reads in RZ", "/Devices/E1k%d/IO/ReadRZ", iInstance);
7765 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIOReadR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling IO reads in R3", "/Devices/E1k%d/IO/ReadR3", iInstance);
7766 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIOWriteRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling IO writes in RZ", "/Devices/E1k%d/IO/WriteRZ", iInstance);
7767 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIOWriteR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling IO writes in R3", "/Devices/E1k%d/IO/WriteR3", iInstance);
7768 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatLateIntTimer, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling late int timer", "/Devices/E1k%d/LateInt/Timer", iInstance);
7769 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatLateInts, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of late interrupts", "/Devices/E1k%d/LateInt/Occured", iInstance);
7770 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIntsRaised, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of raised interrupts", "/Devices/E1k%d/Interrupts/Raised", iInstance);
7771 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIntsPrevented, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of prevented interrupts", "/Devices/E1k%d/Interrupts/Prevented", iInstance);
7772 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceive, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling receive", "/Devices/E1k%d/Receive/Total", iInstance);
7773 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceiveCRC, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling receive checksumming", "/Devices/E1k%d/Receive/CRC", iInstance);
7774 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceiveFilter, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling receive filtering", "/Devices/E1k%d/Receive/Filter", iInstance);
7775 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceiveStore, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling receive storing", "/Devices/E1k%d/Receive/Store", iInstance);
7776 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatRxOverflow, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_OCCURENCE, "Profiling RX overflows", "/Devices/E1k%d/RxOverflow", iInstance);
7777 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatRxOverflowWakeup, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Nr of RX overflow wakeups", "/Devices/E1k%d/RxOverflowWakeup", iInstance);
7778 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling transmits in RZ", "/Devices/E1k%d/Transmit/TotalRZ", iInstance);
7779 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling transmits in R3", "/Devices/E1k%d/Transmit/TotalR3", iInstance);
7780 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitSendRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling send transmit in RZ", "/Devices/E1k%d/Transmit/SendRZ", iInstance);
7781 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitSendR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling send transmit in R3", "/Devices/E1k%d/Transmit/SendR3", iInstance);
7782
7783 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxDescCtxNormal, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of normal context descriptors","/Devices/E1k%d/TxDesc/ContexNormal", iInstance);
7784 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxDescCtxTSE, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TSE context descriptors", "/Devices/E1k%d/TxDesc/ContextTSE", iInstance);
7785 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxDescData, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TX data descriptors", "/Devices/E1k%d/TxDesc/Data", iInstance);
7786 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxDescLegacy, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TX legacy descriptors", "/Devices/E1k%d/TxDesc/Legacy", iInstance);
7787 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxDescTSEData, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TX TSE data descriptors", "/Devices/E1k%d/TxDesc/TSEData", iInstance);
7788 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxPathFallback, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Fallback TSE descriptor path", "/Devices/E1k%d/TxPath/Fallback", iInstance);
7789 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxPathGSO, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "GSO TSE descriptor path", "/Devices/E1k%d/TxPath/GSO", iInstance);
7790 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxPathRegular, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Regular descriptor path", "/Devices/E1k%d/TxPath/Normal", iInstance);
7791 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatPHYAccesses, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of PHY accesses", "/Devices/E1k%d/PHYAccesses", iInstance);
7792 for (unsigned iReg = 0; iReg < E1K_NUM_OF_REGS; iReg++)
7793 {
7794 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->aStatRegReads[iReg], STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
7795 g_aE1kRegMap[iReg].name, "/Devices/E1k%d/Regs/%s-Reads", iInstance, g_aE1kRegMap[iReg].abbrev);
7796 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->aStatRegWrites[iReg], STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
7797 g_aE1kRegMap[iReg].name, "/Devices/E1k%d/Regs/%s-Writes", iInstance, g_aE1kRegMap[iReg].abbrev);
7798 }
7799#endif /* VBOX_WITH_STATISTICS */
7800
7801#ifdef E1K_INT_STATS
7802 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->u64ArmedAt, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "u64ArmedAt", "/Devices/E1k%d/u64ArmedAt", iInstance);
7803 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatMaxTxDelay, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatMaxTxDelay", "/Devices/E1k%d/uStatMaxTxDelay", iInstance);
7804 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatInt, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatInt", "/Devices/E1k%d/uStatInt", iInstance);
7805 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntTry, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntTry", "/Devices/E1k%d/uStatIntTry", iInstance);
7806 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntLower, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntLower", "/Devices/E1k%d/uStatIntLower", iInstance);
7807 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntDly, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntDly", "/Devices/E1k%d/uStatIntDly", iInstance);
7808 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->iStatIntLost, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "iStatIntLost", "/Devices/E1k%d/iStatIntLost", iInstance);
7809 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->iStatIntLostOne, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "iStatIntLostOne", "/Devices/E1k%d/iStatIntLostOne", iInstance);
7810 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatDisDly, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatDisDly", "/Devices/E1k%d/uStatDisDly", iInstance);
7811 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntSkip, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntSkip", "/Devices/E1k%d/uStatIntSkip", iInstance);
7812 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntLate, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntLate", "/Devices/E1k%d/uStatIntLate", iInstance);
7813 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntMasked, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntMasked", "/Devices/E1k%d/uStatIntMasked", iInstance);
7814 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntEarly, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntEarly", "/Devices/E1k%d/uStatIntEarly", iInstance);
7815 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntRx, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntRx", "/Devices/E1k%d/uStatIntRx", iInstance);
7816 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntTx, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntTx", "/Devices/E1k%d/uStatIntTx", iInstance);
7817 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntICS, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntICS", "/Devices/E1k%d/uStatIntICS", iInstance);
7818 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntRDTR, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntRDTR", "/Devices/E1k%d/uStatIntRDTR", iInstance);
7819 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntRXDMT0, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntRXDMT0", "/Devices/E1k%d/uStatIntRXDMT0", iInstance);
7820 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntTXQE, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntTXQE", "/Devices/E1k%d/uStatIntTXQE", iInstance);
7821 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxNoRS, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxNoRS", "/Devices/E1k%d/uStatTxNoRS", iInstance);
7822 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxIDE, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxIDE", "/Devices/E1k%d/uStatTxIDE", iInstance);
7823 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxDelayed, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxDelayed", "/Devices/E1k%d/uStatTxDelayed", iInstance);
7824 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxDelayExp, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxDelayExp", "/Devices/E1k%d/uStatTxDelayExp", iInstance);
7825 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTAD, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTAD", "/Devices/E1k%d/uStatTAD", iInstance);
7826 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTID, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTID", "/Devices/E1k%d/uStatTID", iInstance);
7827 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatRAD, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatRAD", "/Devices/E1k%d/uStatRAD", iInstance);
7828 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatRID, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatRID", "/Devices/E1k%d/uStatRID", iInstance);
7829 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatRxFrm, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatRxFrm", "/Devices/E1k%d/uStatRxFrm", iInstance);
7830 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxFrm, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxFrm", "/Devices/E1k%d/uStatTxFrm", iInstance);
7831 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatDescCtx, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatDescCtx", "/Devices/E1k%d/uStatDescCtx", iInstance);
7832 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatDescDat, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatDescDat", "/Devices/E1k%d/uStatDescDat", iInstance);
7833 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatDescLeg, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatDescLeg", "/Devices/E1k%d/uStatDescLeg", iInstance);
7834 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx1514, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx1514", "/Devices/E1k%d/uStatTx1514", iInstance);
7835 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx2962, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx2962", "/Devices/E1k%d/uStatTx2962", iInstance);
7836 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx4410, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx4410", "/Devices/E1k%d/uStatTx4410", iInstance);
7837 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx5858, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx5858", "/Devices/E1k%d/uStatTx5858", iInstance);
7838 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx7306, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx7306", "/Devices/E1k%d/uStatTx7306", iInstance);
7839 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx8754, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx8754", "/Devices/E1k%d/uStatTx8754", iInstance);
7840 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx16384, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx16384", "/Devices/E1k%d/uStatTx16384", iInstance);
7841 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx32768, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx32768", "/Devices/E1k%d/uStatTx32768", iInstance);
7842 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxLarge, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxLarge", "/Devices/E1k%d/uStatTxLarge", iInstance);
7843#endif /* E1K_INT_STATS */
7844
7845 return VINF_SUCCESS;
7846}
7847
7848/**
7849 * The device registration structure.
7850 */
7851const PDMDEVREG g_DeviceE1000 =
7852{
7853 /* Structure version. PDM_DEVREG_VERSION defines the current version. */
7854 PDM_DEVREG_VERSION,
7855 /* Device name. */
7856 "e1000",
7857 /* Name of guest context module (no path).
7858 * Only evalutated if PDM_DEVREG_FLAGS_RC is set. */
7859 "VBoxDDRC.rc",
7860 /* Name of ring-0 module (no path).
7861 * Only evalutated if PDM_DEVREG_FLAGS_RC is set. */
7862 "VBoxDDR0.r0",
7863 /* The description of the device. The UTF-8 string pointed to shall, like this structure,
7864 * remain unchanged from registration till VM destruction. */
7865 "Intel PRO/1000 MT Desktop Ethernet.\n",
7866
7867 /* Flags, combination of the PDM_DEVREG_FLAGS_* \#defines. */
7868 PDM_DEVREG_FLAGS_DEFAULT_BITS | PDM_DEVREG_FLAGS_RC | PDM_DEVREG_FLAGS_R0,
7869 /* Device class(es), combination of the PDM_DEVREG_CLASS_* \#defines. */
7870 PDM_DEVREG_CLASS_NETWORK,
7871 /* Maximum number of instances (per VM). */
7872 ~0U,
7873 /* Size of the instance data. */
7874 sizeof(E1KSTATE),
7875
7876 /* pfnConstruct */
7877 e1kR3Construct,
7878 /* pfnDestruct */
7879 e1kR3Destruct,
7880 /* pfnRelocate */
7881 e1kR3Relocate,
7882 /* pfnMemSetup */
7883 NULL,
7884 /* pfnPowerOn */
7885 NULL,
7886 /* pfnReset */
7887 e1kR3Reset,
7888 /* pfnSuspend */
7889 e1kR3Suspend,
7890 /* pfnResume */
7891 NULL,
7892 /* pfnAttach */
7893 e1kR3Attach,
7894 /* pfnDeatch */
7895 e1kR3Detach,
7896 /* pfnQueryInterface */
7897 NULL,
7898 /* pfnInitComplete */
7899 NULL,
7900 /* pfnPowerOff */
7901 e1kR3PowerOff,
7902 /* pfnSoftReset */
7903 NULL,
7904
7905 /* u32VersionEnd */
7906 PDM_DEVREG_VERSION
7907};
7908
7909#endif /* IN_RING3 */
7910#endif /* !VBOX_DEVICE_STRUCT_TESTCASE */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette