VirtualBox

source: vbox/trunk/src/VBox/Devices/Network/DevE1000.cpp@ 63562

Last change on this file since 63562 was 63562, checked in by vboxsync, 9 years ago

scm: cleaning up todos

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 318.9 KB
Line 
1/* $Id: DevE1000.cpp 63562 2016-08-16 14:04:03Z vboxsync $ */
2/** @file
3 * DevE1000 - Intel 82540EM Ethernet Controller Emulation.
4 *
5 * Implemented in accordance with the specification:
6 *
7 * PCI/PCI-X Family of Gigabit Ethernet Controllers Software Developer's Manual
8 * 82540EP/EM, 82541xx, 82544GC/EI, 82545GM/EM, 82546GB/EB, and 82547xx
9 *
10 * 317453-002 Revision 3.5
11 *
12 * @todo IPv6 checksum offloading support
13 * @todo Flexible Filter / Wakeup (optional?)
14 */
15
16/*
17 * Copyright (C) 2007-2016 Oracle Corporation
18 *
19 * This file is part of VirtualBox Open Source Edition (OSE), as
20 * available from http://www.virtualbox.org. This file is free software;
21 * you can redistribute it and/or modify it under the terms of the GNU
22 * General Public License (GPL) as published by the Free Software
23 * Foundation, in version 2 as it comes in the "COPYING" file of the
24 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
25 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_DEV_E1000
33#include <iprt/crc.h>
34#include <iprt/ctype.h>
35#include <iprt/net.h>
36#include <iprt/semaphore.h>
37#include <iprt/string.h>
38#include <iprt/time.h>
39#include <iprt/uuid.h>
40#include <VBox/vmm/pdmdev.h>
41#include <VBox/vmm/pdmnetifs.h>
42#include <VBox/vmm/pdmnetinline.h>
43#include <VBox/param.h>
44#include "VBoxDD.h"
45
46#include "DevEEPROM.h"
47#include "DevE1000Phy.h"
48
49
50/* Options *******************************************************************/
51/** @def E1K_INIT_RA0
52 * E1K_INIT_RA0 forces E1000 to set the first entry in Receive Address filter
53 * table to MAC address obtained from CFGM. Most guests read MAC address from
54 * EEPROM and write it to RA[0] explicitly, but Mac OS X seems to depend on it
55 * being already set (see @bugref{4657}).
56 */
57#define E1K_INIT_RA0
58/** @def E1K_LSC_ON_SLU
59 * E1K_LSC_ON_SLU causes E1000 to generate Link Status Change interrupt when
60 * the guest driver brings up the link via STATUS.LU bit. Again the only guest
61 * that requires it is Mac OS X (see @bugref{4657}).
62 */
63#define E1K_LSC_ON_SLU
64/** @def E1K_TX_DELAY
65 * E1K_TX_DELAY aims to improve guest-host transfer rate for TCP streams by
66 * preventing packets to be sent immediately. It allows to send several
67 * packets in a batch reducing the number of acknowledgments. Note that it
68 * effectively disables R0 TX path, forcing sending in R3.
69 */
70//#define E1K_TX_DELAY 150
71/** @def E1K_USE_TX_TIMERS
72 * E1K_USE_TX_TIMERS aims to reduce the number of generated TX interrupts if a
73 * guest driver set the delays via the Transmit Interrupt Delay Value (TIDV)
74 * register. Enabling it showed no positive effects on existing guests so it
75 * stays disabled. See sections 3.2.7.1 and 3.4.3.1 in "8254x Family of Gigabit
76 * Ethernet Controllers Software Developer’s Manual" for more detailed
77 * explanation.
78 */
79//#define E1K_USE_TX_TIMERS
80/** @def E1K_NO_TAD
81 * E1K_NO_TAD disables one of two timers enabled by E1K_USE_TX_TIMERS, the
82 * Transmit Absolute Delay time. This timer sets the maximum time interval
83 * during which TX interrupts can be postponed (delayed). It has no effect
84 * if E1K_USE_TX_TIMERS is not defined.
85 */
86//#define E1K_NO_TAD
87/** @def E1K_REL_DEBUG
88 * E1K_REL_DEBUG enables debug logging of l1, l2, l3 in release build.
89 */
90//#define E1K_REL_DEBUG
91/** @def E1K_INT_STATS
92 * E1K_INT_STATS enables collection of internal statistics used for
93 * debugging of delayed interrupts, etc.
94 */
95//#define E1K_INT_STATS
96/** @def E1K_WITH_MSI
97 * E1K_WITH_MSI enables rudimentary MSI support. Not implemented.
98 */
99//#define E1K_WITH_MSI
100/** @def E1K_WITH_TX_CS
101 * E1K_WITH_TX_CS protects e1kXmitPending with a critical section.
102 */
103#define E1K_WITH_TX_CS
104/** @def E1K_WITH_TXD_CACHE
105 * E1K_WITH_TXD_CACHE causes E1000 to fetch multiple TX descriptors in a
106 * single physical memory read (or two if it wraps around the end of TX
107 * descriptor ring). It is required for proper functioning of bandwidth
108 * resource control as it allows to compute exact sizes of packets prior
109 * to allocating their buffers (see @bugref{5582}).
110 */
111#define E1K_WITH_TXD_CACHE
112/** @def E1K_WITH_RXD_CACHE
113 * E1K_WITH_RXD_CACHE causes E1000 to fetch multiple RX descriptors in a
114 * single physical memory read (or two if it wraps around the end of RX
115 * descriptor ring). Intel's packet driver for DOS needs this option in
116 * order to work properly (see @bugref{6217}).
117 */
118#define E1K_WITH_RXD_CACHE
119/* End of Options ************************************************************/
120
121#ifdef E1K_WITH_TXD_CACHE
122/**
123 * E1K_TXD_CACHE_SIZE specifies the maximum number of TX descriptors stored
124 * in the state structure. It limits the amount of descriptors loaded in one
125 * batch read. For example, Linux guest may use up to 20 descriptors per
126 * TSE packet. The largest TSE packet seen (Windows guest) was 45 descriptors.
127 */
128# define E1K_TXD_CACHE_SIZE 64u
129#endif /* E1K_WITH_TXD_CACHE */
130
131#ifdef E1K_WITH_RXD_CACHE
132/**
133 * E1K_RXD_CACHE_SIZE specifies the maximum number of RX descriptors stored
134 * in the state structure. It limits the amount of descriptors loaded in one
135 * batch read. For example, XP guest adds 15 RX descriptors at a time.
136 */
137# define E1K_RXD_CACHE_SIZE 16u
138#endif /* E1K_WITH_RXD_CACHE */
139
140
141/* Little helpers ************************************************************/
142#undef htons
143#undef ntohs
144#undef htonl
145#undef ntohl
146#define htons(x) ((((x) & 0xff00) >> 8) | (((x) & 0x00ff) << 8))
147#define ntohs(x) htons(x)
148#define htonl(x) ASMByteSwapU32(x)
149#define ntohl(x) htonl(x)
150
151#ifndef DEBUG
152# ifdef E1K_REL_DEBUG
153# define DEBUG
154# define E1kLog(a) LogRel(a)
155# define E1kLog2(a) LogRel(a)
156# define E1kLog3(a) LogRel(a)
157# define E1kLogX(x, a) LogRel(a)
158//# define E1kLog3(a) do {} while (0)
159# else
160# define E1kLog(a) do {} while (0)
161# define E1kLog2(a) do {} while (0)
162# define E1kLog3(a) do {} while (0)
163# define E1kLogX(x, a) do {} while (0)
164# endif
165#else
166# define E1kLog(a) Log(a)
167# define E1kLog2(a) Log2(a)
168# define E1kLog3(a) Log3(a)
169# define E1kLogX(x, a) LogIt(x, LOG_GROUP, a)
170//# define E1kLog(a) do {} while (0)
171//# define E1kLog2(a) do {} while (0)
172//# define E1kLog3(a) do {} while (0)
173#endif
174
175#if 0
176# define LOG_ENABLED
177# define E1kLogRel(a) LogRel(a)
178# undef Log6
179# define Log6(a) LogRel(a)
180#else
181# define E1kLogRel(a) do { } while (0)
182#endif
183
184//#undef DEBUG
185
186#define STATE_TO_DEVINS(pThis) (((PE1KSTATE )pThis)->CTX_SUFF(pDevIns))
187#define E1K_RELOCATE(p, o) *(RTHCUINTPTR *)&p += o
188
189#define E1K_INC_CNT32(cnt) \
190do { \
191 if (cnt < UINT32_MAX) \
192 cnt++; \
193} while (0)
194
195#define E1K_ADD_CNT64(cntLo, cntHi, val) \
196do { \
197 uint64_t u64Cnt = RT_MAKE_U64(cntLo, cntHi); \
198 uint64_t tmp = u64Cnt; \
199 u64Cnt += val; \
200 if (tmp > u64Cnt ) \
201 u64Cnt = UINT64_MAX; \
202 cntLo = (uint32_t)u64Cnt; \
203 cntHi = (uint32_t)(u64Cnt >> 32); \
204} while (0)
205
206#ifdef E1K_INT_STATS
207# define E1K_INC_ISTAT_CNT(cnt) do { ++cnt; } while (0)
208#else /* E1K_INT_STATS */
209# define E1K_INC_ISTAT_CNT(cnt) do { } while (0)
210#endif /* E1K_INT_STATS */
211
212
213/*****************************************************************************/
214
215typedef uint32_t E1KCHIP;
216#define E1K_CHIP_82540EM 0
217#define E1K_CHIP_82543GC 1
218#define E1K_CHIP_82545EM 2
219
220#ifdef IN_RING3
221/** Different E1000 chips. */
222static const struct E1kChips
223{
224 uint16_t uPCIVendorId;
225 uint16_t uPCIDeviceId;
226 uint16_t uPCISubsystemVendorId;
227 uint16_t uPCISubsystemId;
228 const char *pcszName;
229} g_aChips[] =
230{
231 /* Vendor Device SSVendor SubSys Name */
232 { 0x8086,
233 /* Temporary code, as MSI-aware driver dislike 0x100E. How to do that right? */
234# ifdef E1K_WITH_MSI
235 0x105E,
236# else
237 0x100E,
238# endif
239 0x8086, 0x001E, "82540EM" }, /* Intel 82540EM-A in Intel PRO/1000 MT Desktop */
240 { 0x8086, 0x1004, 0x8086, 0x1004, "82543GC" }, /* Intel 82543GC in Intel PRO/1000 T Server */
241 { 0x8086, 0x100F, 0x15AD, 0x0750, "82545EM" } /* Intel 82545EM-A in VMWare Network Adapter */
242};
243#endif /* IN_RING3 */
244
245
246/* The size of register area mapped to I/O space */
247#define E1K_IOPORT_SIZE 0x8
248/* The size of memory-mapped register area */
249#define E1K_MM_SIZE 0x20000
250
251#define E1K_MAX_TX_PKT_SIZE 16288
252#define E1K_MAX_RX_PKT_SIZE 16384
253
254/*****************************************************************************/
255
256/** Gets the specfieid bits from the register. */
257#define GET_BITS(reg, bits) ((reg & reg##_##bits##_MASK) >> reg##_##bits##_SHIFT)
258#define GET_BITS_V(val, reg, bits) ((val & reg##_##bits##_MASK) >> reg##_##bits##_SHIFT)
259#define BITS(reg, bits, bitval) (bitval << reg##_##bits##_SHIFT)
260#define SET_BITS(reg, bits, bitval) do { reg = (reg & ~reg##_##bits##_MASK) | (bitval << reg##_##bits##_SHIFT); } while (0)
261#define SET_BITS_V(val, reg, bits, bitval) do { val = (val & ~reg##_##bits##_MASK) | (bitval << reg##_##bits##_SHIFT); } while (0)
262
263#define CTRL_SLU UINT32_C(0x00000040)
264#define CTRL_MDIO UINT32_C(0x00100000)
265#define CTRL_MDC UINT32_C(0x00200000)
266#define CTRL_MDIO_DIR UINT32_C(0x01000000)
267#define CTRL_MDC_DIR UINT32_C(0x02000000)
268#define CTRL_RESET UINT32_C(0x04000000)
269#define CTRL_VME UINT32_C(0x40000000)
270
271#define STATUS_LU UINT32_C(0x00000002)
272#define STATUS_TXOFF UINT32_C(0x00000010)
273
274#define EECD_EE_WIRES UINT32_C(0x0F)
275#define EECD_EE_REQ UINT32_C(0x40)
276#define EECD_EE_GNT UINT32_C(0x80)
277
278#define EERD_START UINT32_C(0x00000001)
279#define EERD_DONE UINT32_C(0x00000010)
280#define EERD_DATA_MASK UINT32_C(0xFFFF0000)
281#define EERD_DATA_SHIFT 16
282#define EERD_ADDR_MASK UINT32_C(0x0000FF00)
283#define EERD_ADDR_SHIFT 8
284
285#define MDIC_DATA_MASK UINT32_C(0x0000FFFF)
286#define MDIC_DATA_SHIFT 0
287#define MDIC_REG_MASK UINT32_C(0x001F0000)
288#define MDIC_REG_SHIFT 16
289#define MDIC_PHY_MASK UINT32_C(0x03E00000)
290#define MDIC_PHY_SHIFT 21
291#define MDIC_OP_WRITE UINT32_C(0x04000000)
292#define MDIC_OP_READ UINT32_C(0x08000000)
293#define MDIC_READY UINT32_C(0x10000000)
294#define MDIC_INT_EN UINT32_C(0x20000000)
295#define MDIC_ERROR UINT32_C(0x40000000)
296
297#define TCTL_EN UINT32_C(0x00000002)
298#define TCTL_PSP UINT32_C(0x00000008)
299
300#define RCTL_EN UINT32_C(0x00000002)
301#define RCTL_UPE UINT32_C(0x00000008)
302#define RCTL_MPE UINT32_C(0x00000010)
303#define RCTL_LPE UINT32_C(0x00000020)
304#define RCTL_LBM_MASK UINT32_C(0x000000C0)
305#define RCTL_LBM_SHIFT 6
306#define RCTL_RDMTS_MASK UINT32_C(0x00000300)
307#define RCTL_RDMTS_SHIFT 8
308#define RCTL_LBM_TCVR UINT32_C(3) /**< PHY or external SerDes loopback. */
309#define RCTL_MO_MASK UINT32_C(0x00003000)
310#define RCTL_MO_SHIFT 12
311#define RCTL_BAM UINT32_C(0x00008000)
312#define RCTL_BSIZE_MASK UINT32_C(0x00030000)
313#define RCTL_BSIZE_SHIFT 16
314#define RCTL_VFE UINT32_C(0x00040000)
315#define RCTL_CFIEN UINT32_C(0x00080000)
316#define RCTL_CFI UINT32_C(0x00100000)
317#define RCTL_BSEX UINT32_C(0x02000000)
318#define RCTL_SECRC UINT32_C(0x04000000)
319
320#define ICR_TXDW UINT32_C(0x00000001)
321#define ICR_TXQE UINT32_C(0x00000002)
322#define ICR_LSC UINT32_C(0x00000004)
323#define ICR_RXDMT0 UINT32_C(0x00000010)
324#define ICR_RXT0 UINT32_C(0x00000080)
325#define ICR_TXD_LOW UINT32_C(0x00008000)
326#define RDTR_FPD UINT32_C(0x80000000)
327
328#define PBA_st ((PBAST*)(pThis->auRegs + PBA_IDX))
329typedef struct
330{
331 unsigned rxa : 7;
332 unsigned rxa_r : 9;
333 unsigned txa : 16;
334} PBAST;
335AssertCompileSize(PBAST, 4);
336
337#define TXDCTL_WTHRESH_MASK 0x003F0000
338#define TXDCTL_WTHRESH_SHIFT 16
339#define TXDCTL_LWTHRESH_MASK 0xFE000000
340#define TXDCTL_LWTHRESH_SHIFT 25
341
342#define RXCSUM_PCSS_MASK UINT32_C(0x000000FF)
343#define RXCSUM_PCSS_SHIFT 0
344
345/** @name Register access macros
346 * @remarks These ASSUME alocal variable @a pThis of type PE1KSTATE.
347 * @{ */
348#define CTRL pThis->auRegs[CTRL_IDX]
349#define STATUS pThis->auRegs[STATUS_IDX]
350#define EECD pThis->auRegs[EECD_IDX]
351#define EERD pThis->auRegs[EERD_IDX]
352#define CTRL_EXT pThis->auRegs[CTRL_EXT_IDX]
353#define FLA pThis->auRegs[FLA_IDX]
354#define MDIC pThis->auRegs[MDIC_IDX]
355#define FCAL pThis->auRegs[FCAL_IDX]
356#define FCAH pThis->auRegs[FCAH_IDX]
357#define FCT pThis->auRegs[FCT_IDX]
358#define VET pThis->auRegs[VET_IDX]
359#define ICR pThis->auRegs[ICR_IDX]
360#define ITR pThis->auRegs[ITR_IDX]
361#define ICS pThis->auRegs[ICS_IDX]
362#define IMS pThis->auRegs[IMS_IDX]
363#define IMC pThis->auRegs[IMC_IDX]
364#define RCTL pThis->auRegs[RCTL_IDX]
365#define FCTTV pThis->auRegs[FCTTV_IDX]
366#define TXCW pThis->auRegs[TXCW_IDX]
367#define RXCW pThis->auRegs[RXCW_IDX]
368#define TCTL pThis->auRegs[TCTL_IDX]
369#define TIPG pThis->auRegs[TIPG_IDX]
370#define AIFS pThis->auRegs[AIFS_IDX]
371#define LEDCTL pThis->auRegs[LEDCTL_IDX]
372#define PBA pThis->auRegs[PBA_IDX]
373#define FCRTL pThis->auRegs[FCRTL_IDX]
374#define FCRTH pThis->auRegs[FCRTH_IDX]
375#define RDFH pThis->auRegs[RDFH_IDX]
376#define RDFT pThis->auRegs[RDFT_IDX]
377#define RDFHS pThis->auRegs[RDFHS_IDX]
378#define RDFTS pThis->auRegs[RDFTS_IDX]
379#define RDFPC pThis->auRegs[RDFPC_IDX]
380#define RDBAL pThis->auRegs[RDBAL_IDX]
381#define RDBAH pThis->auRegs[RDBAH_IDX]
382#define RDLEN pThis->auRegs[RDLEN_IDX]
383#define RDH pThis->auRegs[RDH_IDX]
384#define RDT pThis->auRegs[RDT_IDX]
385#define RDTR pThis->auRegs[RDTR_IDX]
386#define RXDCTL pThis->auRegs[RXDCTL_IDX]
387#define RADV pThis->auRegs[RADV_IDX]
388#define RSRPD pThis->auRegs[RSRPD_IDX]
389#define TXDMAC pThis->auRegs[TXDMAC_IDX]
390#define TDFH pThis->auRegs[TDFH_IDX]
391#define TDFT pThis->auRegs[TDFT_IDX]
392#define TDFHS pThis->auRegs[TDFHS_IDX]
393#define TDFTS pThis->auRegs[TDFTS_IDX]
394#define TDFPC pThis->auRegs[TDFPC_IDX]
395#define TDBAL pThis->auRegs[TDBAL_IDX]
396#define TDBAH pThis->auRegs[TDBAH_IDX]
397#define TDLEN pThis->auRegs[TDLEN_IDX]
398#define TDH pThis->auRegs[TDH_IDX]
399#define TDT pThis->auRegs[TDT_IDX]
400#define TIDV pThis->auRegs[TIDV_IDX]
401#define TXDCTL pThis->auRegs[TXDCTL_IDX]
402#define TADV pThis->auRegs[TADV_IDX]
403#define TSPMT pThis->auRegs[TSPMT_IDX]
404#define CRCERRS pThis->auRegs[CRCERRS_IDX]
405#define ALGNERRC pThis->auRegs[ALGNERRC_IDX]
406#define SYMERRS pThis->auRegs[SYMERRS_IDX]
407#define RXERRC pThis->auRegs[RXERRC_IDX]
408#define MPC pThis->auRegs[MPC_IDX]
409#define SCC pThis->auRegs[SCC_IDX]
410#define ECOL pThis->auRegs[ECOL_IDX]
411#define MCC pThis->auRegs[MCC_IDX]
412#define LATECOL pThis->auRegs[LATECOL_IDX]
413#define COLC pThis->auRegs[COLC_IDX]
414#define DC pThis->auRegs[DC_IDX]
415#define TNCRS pThis->auRegs[TNCRS_IDX]
416/* #define SEC pThis->auRegs[SEC_IDX] Conflict with sys/time.h */
417#define CEXTERR pThis->auRegs[CEXTERR_IDX]
418#define RLEC pThis->auRegs[RLEC_IDX]
419#define XONRXC pThis->auRegs[XONRXC_IDX]
420#define XONTXC pThis->auRegs[XONTXC_IDX]
421#define XOFFRXC pThis->auRegs[XOFFRXC_IDX]
422#define XOFFTXC pThis->auRegs[XOFFTXC_IDX]
423#define FCRUC pThis->auRegs[FCRUC_IDX]
424#define PRC64 pThis->auRegs[PRC64_IDX]
425#define PRC127 pThis->auRegs[PRC127_IDX]
426#define PRC255 pThis->auRegs[PRC255_IDX]
427#define PRC511 pThis->auRegs[PRC511_IDX]
428#define PRC1023 pThis->auRegs[PRC1023_IDX]
429#define PRC1522 pThis->auRegs[PRC1522_IDX]
430#define GPRC pThis->auRegs[GPRC_IDX]
431#define BPRC pThis->auRegs[BPRC_IDX]
432#define MPRC pThis->auRegs[MPRC_IDX]
433#define GPTC pThis->auRegs[GPTC_IDX]
434#define GORCL pThis->auRegs[GORCL_IDX]
435#define GORCH pThis->auRegs[GORCH_IDX]
436#define GOTCL pThis->auRegs[GOTCL_IDX]
437#define GOTCH pThis->auRegs[GOTCH_IDX]
438#define RNBC pThis->auRegs[RNBC_IDX]
439#define RUC pThis->auRegs[RUC_IDX]
440#define RFC pThis->auRegs[RFC_IDX]
441#define ROC pThis->auRegs[ROC_IDX]
442#define RJC pThis->auRegs[RJC_IDX]
443#define MGTPRC pThis->auRegs[MGTPRC_IDX]
444#define MGTPDC pThis->auRegs[MGTPDC_IDX]
445#define MGTPTC pThis->auRegs[MGTPTC_IDX]
446#define TORL pThis->auRegs[TORL_IDX]
447#define TORH pThis->auRegs[TORH_IDX]
448#define TOTL pThis->auRegs[TOTL_IDX]
449#define TOTH pThis->auRegs[TOTH_IDX]
450#define TPR pThis->auRegs[TPR_IDX]
451#define TPT pThis->auRegs[TPT_IDX]
452#define PTC64 pThis->auRegs[PTC64_IDX]
453#define PTC127 pThis->auRegs[PTC127_IDX]
454#define PTC255 pThis->auRegs[PTC255_IDX]
455#define PTC511 pThis->auRegs[PTC511_IDX]
456#define PTC1023 pThis->auRegs[PTC1023_IDX]
457#define PTC1522 pThis->auRegs[PTC1522_IDX]
458#define MPTC pThis->auRegs[MPTC_IDX]
459#define BPTC pThis->auRegs[BPTC_IDX]
460#define TSCTC pThis->auRegs[TSCTC_IDX]
461#define TSCTFC pThis->auRegs[TSCTFC_IDX]
462#define RXCSUM pThis->auRegs[RXCSUM_IDX]
463#define WUC pThis->auRegs[WUC_IDX]
464#define WUFC pThis->auRegs[WUFC_IDX]
465#define WUS pThis->auRegs[WUS_IDX]
466#define MANC pThis->auRegs[MANC_IDX]
467#define IPAV pThis->auRegs[IPAV_IDX]
468#define WUPL pThis->auRegs[WUPL_IDX]
469/** @} */
470
471/**
472 * Indices of memory-mapped registers in register table.
473 */
474typedef enum
475{
476 CTRL_IDX,
477 STATUS_IDX,
478 EECD_IDX,
479 EERD_IDX,
480 CTRL_EXT_IDX,
481 FLA_IDX,
482 MDIC_IDX,
483 FCAL_IDX,
484 FCAH_IDX,
485 FCT_IDX,
486 VET_IDX,
487 ICR_IDX,
488 ITR_IDX,
489 ICS_IDX,
490 IMS_IDX,
491 IMC_IDX,
492 RCTL_IDX,
493 FCTTV_IDX,
494 TXCW_IDX,
495 RXCW_IDX,
496 TCTL_IDX,
497 TIPG_IDX,
498 AIFS_IDX,
499 LEDCTL_IDX,
500 PBA_IDX,
501 FCRTL_IDX,
502 FCRTH_IDX,
503 RDFH_IDX,
504 RDFT_IDX,
505 RDFHS_IDX,
506 RDFTS_IDX,
507 RDFPC_IDX,
508 RDBAL_IDX,
509 RDBAH_IDX,
510 RDLEN_IDX,
511 RDH_IDX,
512 RDT_IDX,
513 RDTR_IDX,
514 RXDCTL_IDX,
515 RADV_IDX,
516 RSRPD_IDX,
517 TXDMAC_IDX,
518 TDFH_IDX,
519 TDFT_IDX,
520 TDFHS_IDX,
521 TDFTS_IDX,
522 TDFPC_IDX,
523 TDBAL_IDX,
524 TDBAH_IDX,
525 TDLEN_IDX,
526 TDH_IDX,
527 TDT_IDX,
528 TIDV_IDX,
529 TXDCTL_IDX,
530 TADV_IDX,
531 TSPMT_IDX,
532 CRCERRS_IDX,
533 ALGNERRC_IDX,
534 SYMERRS_IDX,
535 RXERRC_IDX,
536 MPC_IDX,
537 SCC_IDX,
538 ECOL_IDX,
539 MCC_IDX,
540 LATECOL_IDX,
541 COLC_IDX,
542 DC_IDX,
543 TNCRS_IDX,
544 SEC_IDX,
545 CEXTERR_IDX,
546 RLEC_IDX,
547 XONRXC_IDX,
548 XONTXC_IDX,
549 XOFFRXC_IDX,
550 XOFFTXC_IDX,
551 FCRUC_IDX,
552 PRC64_IDX,
553 PRC127_IDX,
554 PRC255_IDX,
555 PRC511_IDX,
556 PRC1023_IDX,
557 PRC1522_IDX,
558 GPRC_IDX,
559 BPRC_IDX,
560 MPRC_IDX,
561 GPTC_IDX,
562 GORCL_IDX,
563 GORCH_IDX,
564 GOTCL_IDX,
565 GOTCH_IDX,
566 RNBC_IDX,
567 RUC_IDX,
568 RFC_IDX,
569 ROC_IDX,
570 RJC_IDX,
571 MGTPRC_IDX,
572 MGTPDC_IDX,
573 MGTPTC_IDX,
574 TORL_IDX,
575 TORH_IDX,
576 TOTL_IDX,
577 TOTH_IDX,
578 TPR_IDX,
579 TPT_IDX,
580 PTC64_IDX,
581 PTC127_IDX,
582 PTC255_IDX,
583 PTC511_IDX,
584 PTC1023_IDX,
585 PTC1522_IDX,
586 MPTC_IDX,
587 BPTC_IDX,
588 TSCTC_IDX,
589 TSCTFC_IDX,
590 RXCSUM_IDX,
591 WUC_IDX,
592 WUFC_IDX,
593 WUS_IDX,
594 MANC_IDX,
595 IPAV_IDX,
596 WUPL_IDX,
597 MTA_IDX,
598 RA_IDX,
599 VFTA_IDX,
600 IP4AT_IDX,
601 IP6AT_IDX,
602 WUPM_IDX,
603 FFLT_IDX,
604 FFMT_IDX,
605 FFVT_IDX,
606 PBM_IDX,
607 RA_82542_IDX,
608 MTA_82542_IDX,
609 VFTA_82542_IDX,
610 E1K_NUM_OF_REGS
611} E1kRegIndex;
612
613#define E1K_NUM_OF_32BIT_REGS MTA_IDX
614/** The number of registers with strictly increasing offset. */
615#define E1K_NUM_OF_BINARY_SEARCHABLE (WUPL_IDX + 1)
616
617
618/**
619 * Define E1000-specific EEPROM layout.
620 */
621struct E1kEEPROM
622{
623 public:
624 EEPROM93C46 eeprom;
625
626#ifdef IN_RING3
627 /**
628 * Initialize EEPROM content.
629 *
630 * @param macAddr MAC address of E1000.
631 */
632 void init(RTMAC &macAddr)
633 {
634 eeprom.init();
635 memcpy(eeprom.m_au16Data, macAddr.au16, sizeof(macAddr.au16));
636 eeprom.m_au16Data[0x04] = 0xFFFF;
637 /*
638 * bit 3 - full support for power management
639 * bit 10 - full duplex
640 */
641 eeprom.m_au16Data[0x0A] = 0x4408;
642 eeprom.m_au16Data[0x0B] = 0x001E;
643 eeprom.m_au16Data[0x0C] = 0x8086;
644 eeprom.m_au16Data[0x0D] = 0x100E;
645 eeprom.m_au16Data[0x0E] = 0x8086;
646 eeprom.m_au16Data[0x0F] = 0x3040;
647 eeprom.m_au16Data[0x21] = 0x7061;
648 eeprom.m_au16Data[0x22] = 0x280C;
649 eeprom.m_au16Data[0x23] = 0x00C8;
650 eeprom.m_au16Data[0x24] = 0x00C8;
651 eeprom.m_au16Data[0x2F] = 0x0602;
652 updateChecksum();
653 };
654
655 /**
656 * Compute the checksum as required by E1000 and store it
657 * in the last word.
658 */
659 void updateChecksum()
660 {
661 uint16_t u16Checksum = 0;
662
663 for (int i = 0; i < eeprom.SIZE-1; i++)
664 u16Checksum += eeprom.m_au16Data[i];
665 eeprom.m_au16Data[eeprom.SIZE-1] = 0xBABA - u16Checksum;
666 };
667
668 /**
669 * First 6 bytes of EEPROM contain MAC address.
670 *
671 * @returns MAC address of E1000.
672 */
673 void getMac(PRTMAC pMac)
674 {
675 memcpy(pMac->au16, eeprom.m_au16Data, sizeof(pMac->au16));
676 };
677
678 uint32_t read()
679 {
680 return eeprom.read();
681 }
682
683 void write(uint32_t u32Wires)
684 {
685 eeprom.write(u32Wires);
686 }
687
688 bool readWord(uint32_t u32Addr, uint16_t *pu16Value)
689 {
690 return eeprom.readWord(u32Addr, pu16Value);
691 }
692
693 int load(PSSMHANDLE pSSM)
694 {
695 return eeprom.load(pSSM);
696 }
697
698 void save(PSSMHANDLE pSSM)
699 {
700 eeprom.save(pSSM);
701 }
702#endif /* IN_RING3 */
703};
704
705
706#define E1K_SPEC_VLAN(s) (s & 0xFFF)
707#define E1K_SPEC_CFI(s) (!!((s>>12) & 0x1))
708#define E1K_SPEC_PRI(s) ((s>>13) & 0x7)
709
710struct E1kRxDStatus
711{
712 /** @name Descriptor Status field (3.2.3.1)
713 * @{ */
714 unsigned fDD : 1; /**< Descriptor Done. */
715 unsigned fEOP : 1; /**< End of packet. */
716 unsigned fIXSM : 1; /**< Ignore checksum indication. */
717 unsigned fVP : 1; /**< VLAN, matches VET. */
718 unsigned : 1;
719 unsigned fTCPCS : 1; /**< RCP Checksum calculated on the packet. */
720 unsigned fIPCS : 1; /**< IP Checksum calculated on the packet. */
721 unsigned fPIF : 1; /**< Passed in-exact filter */
722 /** @} */
723 /** @name Descriptor Errors field (3.2.3.2)
724 * (Only valid when fEOP and fDD are set.)
725 * @{ */
726 unsigned fCE : 1; /**< CRC or alignment error. */
727 unsigned : 4; /**< Reserved, varies with different models... */
728 unsigned fTCPE : 1; /**< TCP/UDP checksum error. */
729 unsigned fIPE : 1; /**< IP Checksum error. */
730 unsigned fRXE : 1; /**< RX Data error. */
731 /** @} */
732 /** @name Descriptor Special field (3.2.3.3)
733 * @{ */
734 unsigned u16Special : 16; /**< VLAN: Id, Canonical form, Priority. */
735 /** @} */
736};
737typedef struct E1kRxDStatus E1KRXDST;
738
739struct E1kRxDesc_st
740{
741 uint64_t u64BufAddr; /**< Address of data buffer */
742 uint16_t u16Length; /**< Length of data in buffer */
743 uint16_t u16Checksum; /**< Packet checksum */
744 E1KRXDST status;
745};
746typedef struct E1kRxDesc_st E1KRXDESC;
747AssertCompileSize(E1KRXDESC, 16);
748
749#define E1K_DTYP_LEGACY -1
750#define E1K_DTYP_CONTEXT 0
751#define E1K_DTYP_DATA 1
752
753struct E1kTDLegacy
754{
755 uint64_t u64BufAddr; /**< Address of data buffer */
756 struct TDLCmd_st
757 {
758 unsigned u16Length : 16;
759 unsigned u8CSO : 8;
760 /* CMD field : 8 */
761 unsigned fEOP : 1;
762 unsigned fIFCS : 1;
763 unsigned fIC : 1;
764 unsigned fRS : 1;
765 unsigned fRPS : 1;
766 unsigned fDEXT : 1;
767 unsigned fVLE : 1;
768 unsigned fIDE : 1;
769 } cmd;
770 struct TDLDw3_st
771 {
772 /* STA field */
773 unsigned fDD : 1;
774 unsigned fEC : 1;
775 unsigned fLC : 1;
776 unsigned fTURSV : 1;
777 /* RSV field */
778 unsigned u4RSV : 4;
779 /* CSS field */
780 unsigned u8CSS : 8;
781 /* Special field*/
782 unsigned u16Special: 16;
783 } dw3;
784};
785
786/**
787 * TCP/IP Context Transmit Descriptor, section 3.3.6.
788 */
789struct E1kTDContext
790{
791 struct CheckSum_st
792 {
793 /** TSE: Header start. !TSE: Checksum start. */
794 unsigned u8CSS : 8;
795 /** Checksum offset - where to store it. */
796 unsigned u8CSO : 8;
797 /** Checksum ending (inclusive) offset, 0 = end of packet. */
798 unsigned u16CSE : 16;
799 } ip;
800 struct CheckSum_st tu;
801 struct TDCDw2_st
802 {
803 /** TSE: The total number of payload bytes for this context. Sans header. */
804 unsigned u20PAYLEN : 20;
805 /** The descriptor type - E1K_DTYP_CONTEXT (0). */
806 unsigned u4DTYP : 4;
807 /** TUCMD field, 8 bits
808 * @{ */
809 /** TSE: TCP (set) or UDP (clear). */
810 unsigned fTCP : 1;
811 /** TSE: IPv4 (set) or IPv6 (clear) - for finding the payload length field in
812 * the IP header. Does not affect the checksumming.
813 * @remarks 82544GC/EI interprets a cleared field differently. */
814 unsigned fIP : 1;
815 /** TSE: TCP segmentation enable. When clear the context describes */
816 unsigned fTSE : 1;
817 /** Report status (only applies to dw3.fDD for here). */
818 unsigned fRS : 1;
819 /** Reserved, MBZ. */
820 unsigned fRSV1 : 1;
821 /** Descriptor extension, must be set for this descriptor type. */
822 unsigned fDEXT : 1;
823 /** Reserved, MBZ. */
824 unsigned fRSV2 : 1;
825 /** Interrupt delay enable. */
826 unsigned fIDE : 1;
827 /** @} */
828 } dw2;
829 struct TDCDw3_st
830 {
831 /** Descriptor Done. */
832 unsigned fDD : 1;
833 /** Reserved, MBZ. */
834 unsigned u7RSV : 7;
835 /** TSO: The header (prototype) length (Ethernet[, VLAN tag], IP, TCP/UDP. */
836 unsigned u8HDRLEN : 8;
837 /** TSO: Maximum segment size. */
838 unsigned u16MSS : 16;
839 } dw3;
840};
841typedef struct E1kTDContext E1KTXCTX;
842
843/**
844 * TCP/IP Data Transmit Descriptor, section 3.3.7.
845 */
846struct E1kTDData
847{
848 uint64_t u64BufAddr; /**< Address of data buffer */
849 struct TDDCmd_st
850 {
851 /** The total length of data pointed to by this descriptor. */
852 unsigned u20DTALEN : 20;
853 /** The descriptor type - E1K_DTYP_DATA (1). */
854 unsigned u4DTYP : 4;
855 /** @name DCMD field, 8 bits (3.3.7.1).
856 * @{ */
857 /** End of packet. Note TSCTFC update. */
858 unsigned fEOP : 1;
859 /** Insert Ethernet FCS/CRC (requires fEOP to be set). */
860 unsigned fIFCS : 1;
861 /** Use the TSE context when set and the normal when clear. */
862 unsigned fTSE : 1;
863 /** Report status (dw3.STA). */
864 unsigned fRS : 1;
865 /** Reserved. 82544GC/EI defines this report packet set (RPS). */
866 unsigned fRPS : 1;
867 /** Descriptor extension, must be set for this descriptor type. */
868 unsigned fDEXT : 1;
869 /** VLAN enable, requires CTRL.VME, auto enables FCS/CRC.
870 * Insert dw3.SPECIAL after ethernet header. */
871 unsigned fVLE : 1;
872 /** Interrupt delay enable. */
873 unsigned fIDE : 1;
874 /** @} */
875 } cmd;
876 struct TDDDw3_st
877 {
878 /** @name STA field (3.3.7.2)
879 * @{ */
880 unsigned fDD : 1; /**< Descriptor done. */
881 unsigned fEC : 1; /**< Excess collision. */
882 unsigned fLC : 1; /**< Late collision. */
883 /** Reserved, except for the usual oddball (82544GC/EI) where it's called TU. */
884 unsigned fTURSV : 1;
885 /** @} */
886 unsigned u4RSV : 4; /**< Reserved field, MBZ. */
887 /** @name POPTS (Packet Option) field (3.3.7.3)
888 * @{ */
889 unsigned fIXSM : 1; /**< Insert IP checksum. */
890 unsigned fTXSM : 1; /**< Insert TCP/UDP checksum. */
891 unsigned u6RSV : 6; /**< Reserved, MBZ. */
892 /** @} */
893 /** @name SPECIAL field - VLAN tag to be inserted after ethernet header.
894 * Requires fEOP, fVLE and CTRL.VME to be set.
895 * @{ */
896 unsigned u16Special: 16; /**< VLAN: Id, Canonical form, Priority. */
897 /** @} */
898 } dw3;
899};
900typedef struct E1kTDData E1KTXDAT;
901
902union E1kTxDesc
903{
904 struct E1kTDLegacy legacy;
905 struct E1kTDContext context;
906 struct E1kTDData data;
907};
908typedef union E1kTxDesc E1KTXDESC;
909AssertCompileSize(E1KTXDESC, 16);
910
911#define RA_CTL_AS 0x0003
912#define RA_CTL_AV 0x8000
913
914union E1kRecAddr
915{
916 uint32_t au32[32];
917 struct RAArray
918 {
919 uint8_t addr[6];
920 uint16_t ctl;
921 } array[16];
922};
923typedef struct E1kRecAddr::RAArray E1KRAELEM;
924typedef union E1kRecAddr E1KRA;
925AssertCompileSize(E1KRA, 8*16);
926
927#define E1K_IP_RF UINT16_C(0x8000) /**< reserved fragment flag */
928#define E1K_IP_DF UINT16_C(0x4000) /**< dont fragment flag */
929#define E1K_IP_MF UINT16_C(0x2000) /**< more fragments flag */
930#define E1K_IP_OFFMASK UINT16_C(0x1fff) /**< mask for fragmenting bits */
931
932/** @todo use+extend RTNETIPV4 */
933struct E1kIpHeader
934{
935 /* type of service / version / header length */
936 uint16_t tos_ver_hl;
937 /* total length */
938 uint16_t total_len;
939 /* identification */
940 uint16_t ident;
941 /* fragment offset field */
942 uint16_t offset;
943 /* time to live / protocol*/
944 uint16_t ttl_proto;
945 /* checksum */
946 uint16_t chksum;
947 /* source IP address */
948 uint32_t src;
949 /* destination IP address */
950 uint32_t dest;
951};
952AssertCompileSize(struct E1kIpHeader, 20);
953
954#define E1K_TCP_FIN UINT16_C(0x01)
955#define E1K_TCP_SYN UINT16_C(0x02)
956#define E1K_TCP_RST UINT16_C(0x04)
957#define E1K_TCP_PSH UINT16_C(0x08)
958#define E1K_TCP_ACK UINT16_C(0x10)
959#define E1K_TCP_URG UINT16_C(0x20)
960#define E1K_TCP_ECE UINT16_C(0x40)
961#define E1K_TCP_CWR UINT16_C(0x80)
962#define E1K_TCP_FLAGS UINT16_C(0x3f)
963
964/** @todo use+extend RTNETTCP */
965struct E1kTcpHeader
966{
967 uint16_t src;
968 uint16_t dest;
969 uint32_t seqno;
970 uint32_t ackno;
971 uint16_t hdrlen_flags;
972 uint16_t wnd;
973 uint16_t chksum;
974 uint16_t urgp;
975};
976AssertCompileSize(struct E1kTcpHeader, 20);
977
978
979#ifdef E1K_WITH_TXD_CACHE
980/** The current Saved state version. */
981# define E1K_SAVEDSTATE_VERSION 4
982/** Saved state version for VirtualBox 4.2 with VLAN tag fields. */
983# define E1K_SAVEDSTATE_VERSION_VBOX_42_VTAG 3
984#else /* !E1K_WITH_TXD_CACHE */
985/** The current Saved state version. */
986# define E1K_SAVEDSTATE_VERSION 3
987#endif /* !E1K_WITH_TXD_CACHE */
988/** Saved state version for VirtualBox 4.1 and earlier.
989 * These did not include VLAN tag fields. */
990#define E1K_SAVEDSTATE_VERSION_VBOX_41 2
991/** Saved state version for VirtualBox 3.0 and earlier.
992 * This did not include the configuration part nor the E1kEEPROM. */
993#define E1K_SAVEDSTATE_VERSION_VBOX_30 1
994
995/**
996 * Device state structure.
997 *
998 * Holds the current state of device.
999 *
1000 * @implements PDMINETWORKDOWN
1001 * @implements PDMINETWORKCONFIG
1002 * @implements PDMILEDPORTS
1003 */
1004struct E1kState_st
1005{
1006 char szPrf[8]; /**< Log prefix, e.g. E1000#1. */
1007 PDMIBASE IBase;
1008 PDMINETWORKDOWN INetworkDown;
1009 PDMINETWORKCONFIG INetworkConfig;
1010 PDMILEDPORTS ILeds; /**< LED interface */
1011 R3PTRTYPE(PPDMIBASE) pDrvBase; /**< Attached network driver. */
1012 R3PTRTYPE(PPDMILEDCONNECTORS) pLedsConnector;
1013
1014 PPDMDEVINSR3 pDevInsR3; /**< Device instance - R3. */
1015 R3PTRTYPE(PPDMQUEUE) pTxQueueR3; /**< Transmit queue - R3. */
1016 R3PTRTYPE(PPDMQUEUE) pCanRxQueueR3; /**< Rx wakeup signaller - R3. */
1017 PPDMINETWORKUPR3 pDrvR3; /**< Attached network driver - R3. */
1018 PTMTIMERR3 pRIDTimerR3; /**< Receive Interrupt Delay Timer - R3. */
1019 PTMTIMERR3 pRADTimerR3; /**< Receive Absolute Delay Timer - R3. */
1020 PTMTIMERR3 pTIDTimerR3; /**< Transmit Interrupt Delay Timer - R3. */
1021 PTMTIMERR3 pTADTimerR3; /**< Transmit Absolute Delay Timer - R3. */
1022 PTMTIMERR3 pTXDTimerR3; /**< Transmit Delay Timer - R3. */
1023 PTMTIMERR3 pIntTimerR3; /**< Late Interrupt Timer - R3. */
1024 PTMTIMERR3 pLUTimerR3; /**< Link Up(/Restore) Timer. */
1025 /** The scatter / gather buffer used for the current outgoing packet - R3. */
1026 R3PTRTYPE(PPDMSCATTERGATHER) pTxSgR3;
1027
1028 PPDMDEVINSR0 pDevInsR0; /**< Device instance - R0. */
1029 R0PTRTYPE(PPDMQUEUE) pTxQueueR0; /**< Transmit queue - R0. */
1030 R0PTRTYPE(PPDMQUEUE) pCanRxQueueR0; /**< Rx wakeup signaller - R0. */
1031 PPDMINETWORKUPR0 pDrvR0; /**< Attached network driver - R0. */
1032 PTMTIMERR0 pRIDTimerR0; /**< Receive Interrupt Delay Timer - R0. */
1033 PTMTIMERR0 pRADTimerR0; /**< Receive Absolute Delay Timer - R0. */
1034 PTMTIMERR0 pTIDTimerR0; /**< Transmit Interrupt Delay Timer - R0. */
1035 PTMTIMERR0 pTADTimerR0; /**< Transmit Absolute Delay Timer - R0. */
1036 PTMTIMERR0 pTXDTimerR0; /**< Transmit Delay Timer - R0. */
1037 PTMTIMERR0 pIntTimerR0; /**< Late Interrupt Timer - R0. */
1038 PTMTIMERR0 pLUTimerR0; /**< Link Up(/Restore) Timer - R0. */
1039 /** The scatter / gather buffer used for the current outgoing packet - R0. */
1040 R0PTRTYPE(PPDMSCATTERGATHER) pTxSgR0;
1041
1042 PPDMDEVINSRC pDevInsRC; /**< Device instance - RC. */
1043 RCPTRTYPE(PPDMQUEUE) pTxQueueRC; /**< Transmit queue - RC. */
1044 RCPTRTYPE(PPDMQUEUE) pCanRxQueueRC; /**< Rx wakeup signaller - RC. */
1045 PPDMINETWORKUPRC pDrvRC; /**< Attached network driver - RC. */
1046 PTMTIMERRC pRIDTimerRC; /**< Receive Interrupt Delay Timer - RC. */
1047 PTMTIMERRC pRADTimerRC; /**< Receive Absolute Delay Timer - RC. */
1048 PTMTIMERRC pTIDTimerRC; /**< Transmit Interrupt Delay Timer - RC. */
1049 PTMTIMERRC pTADTimerRC; /**< Transmit Absolute Delay Timer - RC. */
1050 PTMTIMERRC pTXDTimerRC; /**< Transmit Delay Timer - RC. */
1051 PTMTIMERRC pIntTimerRC; /**< Late Interrupt Timer - RC. */
1052 PTMTIMERRC pLUTimerRC; /**< Link Up(/Restore) Timer - RC. */
1053 /** The scatter / gather buffer used for the current outgoing packet - RC. */
1054 RCPTRTYPE(PPDMSCATTERGATHER) pTxSgRC;
1055 RTRCPTR RCPtrAlignment;
1056
1057#if HC_ARCH_BITS != 32
1058 uint32_t Alignment1;
1059#endif
1060 PDMCRITSECT cs; /**< Critical section - what is it protecting? */
1061 PDMCRITSECT csRx; /**< RX Critical section. */
1062#ifdef E1K_WITH_TX_CS
1063 PDMCRITSECT csTx; /**< TX Critical section. */
1064#endif /* E1K_WITH_TX_CS */
1065 /** Base address of memory-mapped registers. */
1066 RTGCPHYS addrMMReg;
1067 /** MAC address obtained from the configuration. */
1068 RTMAC macConfigured;
1069 /** Base port of I/O space region. */
1070 RTIOPORT IOPortBase;
1071 /** EMT: */
1072 PCIDEVICE pciDevice;
1073 /** EMT: Last time the interrupt was acknowledged. */
1074 uint64_t u64AckedAt;
1075 /** All: Used for eliminating spurious interrupts. */
1076 bool fIntRaised;
1077 /** EMT: false if the cable is disconnected by the GUI. */
1078 bool fCableConnected;
1079 /** EMT: */
1080 bool fR0Enabled;
1081 /** EMT: */
1082 bool fRCEnabled;
1083 /** EMT: Compute Ethernet CRC for RX packets. */
1084 bool fEthernetCRC;
1085 /** All: throttle interrupts. */
1086 bool fItrEnabled;
1087 /** All: throttle RX interrupts. */
1088 bool fItrRxEnabled;
1089
1090 bool Alignment2;
1091 /** Link up delay (in milliseconds). */
1092 uint32_t cMsLinkUpDelay;
1093
1094 /** All: Device register storage. */
1095 uint32_t auRegs[E1K_NUM_OF_32BIT_REGS];
1096 /** TX/RX: Status LED. */
1097 PDMLED led;
1098 /** TX/RX: Number of packet being sent/received to show in debug log. */
1099 uint32_t u32PktNo;
1100
1101 /** EMT: Offset of the register to be read via IO. */
1102 uint32_t uSelectedReg;
1103 /** EMT: Multicast Table Array. */
1104 uint32_t auMTA[128];
1105 /** EMT: Receive Address registers. */
1106 E1KRA aRecAddr;
1107 /** EMT: VLAN filter table array. */
1108 uint32_t auVFTA[128];
1109 /** EMT: Receive buffer size. */
1110 uint16_t u16RxBSize;
1111 /** EMT: Locked state -- no state alteration possible. */
1112 bool fLocked;
1113 /** EMT: */
1114 bool fDelayInts;
1115 /** All: */
1116 bool fIntMaskUsed;
1117
1118 /** N/A: */
1119 bool volatile fMaybeOutOfSpace;
1120 /** EMT: Gets signalled when more RX descriptors become available. */
1121 RTSEMEVENT hEventMoreRxDescAvail;
1122#ifdef E1K_WITH_RXD_CACHE
1123 /** RX: Fetched RX descriptors. */
1124 E1KRXDESC aRxDescriptors[E1K_RXD_CACHE_SIZE];
1125 //uint64_t aRxDescAddr[E1K_RXD_CACHE_SIZE];
1126 /** RX: Actual number of fetched RX descriptors. */
1127 uint32_t nRxDFetched;
1128 /** RX: Index in cache of RX descriptor being processed. */
1129 uint32_t iRxDCurrent;
1130#endif /* E1K_WITH_RXD_CACHE */
1131
1132 /** TX: Context used for TCP segmentation packets. */
1133 E1KTXCTX contextTSE;
1134 /** TX: Context used for ordinary packets. */
1135 E1KTXCTX contextNormal;
1136#ifdef E1K_WITH_TXD_CACHE
1137 /** TX: Fetched TX descriptors. */
1138 E1KTXDESC aTxDescriptors[E1K_TXD_CACHE_SIZE];
1139 /** TX: Actual number of fetched TX descriptors. */
1140 uint8_t nTxDFetched;
1141 /** TX: Index in cache of TX descriptor being processed. */
1142 uint8_t iTxDCurrent;
1143 /** TX: Will this frame be sent as GSO. */
1144 bool fGSO;
1145 /** Alignment padding. */
1146 bool fReserved;
1147 /** TX: Number of bytes in next packet. */
1148 uint32_t cbTxAlloc;
1149
1150#endif /* E1K_WITH_TXD_CACHE */
1151 /** GSO context. u8Type is set to PDMNETWORKGSOTYPE_INVALID when not
1152 * applicable to the current TSE mode. */
1153 PDMNETWORKGSO GsoCtx;
1154 /** Scratch space for holding the loopback / fallback scatter / gather
1155 * descriptor. */
1156 union
1157 {
1158 PDMSCATTERGATHER Sg;
1159 uint8_t padding[8 * sizeof(RTUINTPTR)];
1160 } uTxFallback;
1161 /** TX: Transmit packet buffer use for TSE fallback and loopback. */
1162 uint8_t aTxPacketFallback[E1K_MAX_TX_PKT_SIZE];
1163 /** TX: Number of bytes assembled in TX packet buffer. */
1164 uint16_t u16TxPktLen;
1165 /** TX: False will force segmentation in e1000 instead of sending frames as GSO. */
1166 bool fGSOEnabled;
1167 /** TX: IP checksum has to be inserted if true. */
1168 bool fIPcsum;
1169 /** TX: TCP/UDP checksum has to be inserted if true. */
1170 bool fTCPcsum;
1171 /** TX: VLAN tag has to be inserted if true. */
1172 bool fVTag;
1173 /** TX: TCI part of VLAN tag to be inserted. */
1174 uint16_t u16VTagTCI;
1175 /** TX TSE fallback: Number of payload bytes remaining in TSE context. */
1176 uint32_t u32PayRemain;
1177 /** TX TSE fallback: Number of header bytes remaining in TSE context. */
1178 uint16_t u16HdrRemain;
1179 /** TX TSE fallback: Flags from template header. */
1180 uint16_t u16SavedFlags;
1181 /** TX TSE fallback: Partial checksum from template header. */
1182 uint32_t u32SavedCsum;
1183 /** ?: Emulated controller type. */
1184 E1KCHIP eChip;
1185
1186 /** EMT: EEPROM emulation */
1187 E1kEEPROM eeprom;
1188 /** EMT: Physical interface emulation. */
1189 PHY phy;
1190
1191#if 0
1192 /** Alignment padding. */
1193 uint8_t Alignment[HC_ARCH_BITS == 64 ? 8 : 4];
1194#endif
1195
1196 STAMCOUNTER StatReceiveBytes;
1197 STAMCOUNTER StatTransmitBytes;
1198#if defined(VBOX_WITH_STATISTICS)
1199 STAMPROFILEADV StatMMIOReadRZ;
1200 STAMPROFILEADV StatMMIOReadR3;
1201 STAMPROFILEADV StatMMIOWriteRZ;
1202 STAMPROFILEADV StatMMIOWriteR3;
1203 STAMPROFILEADV StatEEPROMRead;
1204 STAMPROFILEADV StatEEPROMWrite;
1205 STAMPROFILEADV StatIOReadRZ;
1206 STAMPROFILEADV StatIOReadR3;
1207 STAMPROFILEADV StatIOWriteRZ;
1208 STAMPROFILEADV StatIOWriteR3;
1209 STAMPROFILEADV StatLateIntTimer;
1210 STAMCOUNTER StatLateInts;
1211 STAMCOUNTER StatIntsRaised;
1212 STAMCOUNTER StatIntsPrevented;
1213 STAMPROFILEADV StatReceive;
1214 STAMPROFILEADV StatReceiveCRC;
1215 STAMPROFILEADV StatReceiveFilter;
1216 STAMPROFILEADV StatReceiveStore;
1217 STAMPROFILEADV StatTransmitRZ;
1218 STAMPROFILEADV StatTransmitR3;
1219 STAMPROFILE StatTransmitSendRZ;
1220 STAMPROFILE StatTransmitSendR3;
1221 STAMPROFILE StatRxOverflow;
1222 STAMCOUNTER StatRxOverflowWakeup;
1223 STAMCOUNTER StatTxDescCtxNormal;
1224 STAMCOUNTER StatTxDescCtxTSE;
1225 STAMCOUNTER StatTxDescLegacy;
1226 STAMCOUNTER StatTxDescData;
1227 STAMCOUNTER StatTxDescTSEData;
1228 STAMCOUNTER StatTxPathFallback;
1229 STAMCOUNTER StatTxPathGSO;
1230 STAMCOUNTER StatTxPathRegular;
1231 STAMCOUNTER StatPHYAccesses;
1232 STAMCOUNTER aStatRegWrites[E1K_NUM_OF_REGS];
1233 STAMCOUNTER aStatRegReads[E1K_NUM_OF_REGS];
1234#endif /* VBOX_WITH_STATISTICS */
1235
1236#ifdef E1K_INT_STATS
1237 /* Internal stats */
1238 uint64_t u64ArmedAt;
1239 uint64_t uStatMaxTxDelay;
1240 uint32_t uStatInt;
1241 uint32_t uStatIntTry;
1242 uint32_t uStatIntLower;
1243 uint32_t uStatIntDly;
1244 int32_t iStatIntLost;
1245 int32_t iStatIntLostOne;
1246 uint32_t uStatDisDly;
1247 uint32_t uStatIntSkip;
1248 uint32_t uStatIntLate;
1249 uint32_t uStatIntMasked;
1250 uint32_t uStatIntEarly;
1251 uint32_t uStatIntRx;
1252 uint32_t uStatIntTx;
1253 uint32_t uStatIntICS;
1254 uint32_t uStatIntRDTR;
1255 uint32_t uStatIntRXDMT0;
1256 uint32_t uStatIntTXQE;
1257 uint32_t uStatTxNoRS;
1258 uint32_t uStatTxIDE;
1259 uint32_t uStatTxDelayed;
1260 uint32_t uStatTxDelayExp;
1261 uint32_t uStatTAD;
1262 uint32_t uStatTID;
1263 uint32_t uStatRAD;
1264 uint32_t uStatRID;
1265 uint32_t uStatRxFrm;
1266 uint32_t uStatTxFrm;
1267 uint32_t uStatDescCtx;
1268 uint32_t uStatDescDat;
1269 uint32_t uStatDescLeg;
1270 uint32_t uStatTx1514;
1271 uint32_t uStatTx2962;
1272 uint32_t uStatTx4410;
1273 uint32_t uStatTx5858;
1274 uint32_t uStatTx7306;
1275 uint32_t uStatTx8754;
1276 uint32_t uStatTx16384;
1277 uint32_t uStatTx32768;
1278 uint32_t uStatTxLarge;
1279 uint32_t uStatAlign;
1280#endif /* E1K_INT_STATS */
1281};
1282typedef struct E1kState_st E1KSTATE;
1283/** Pointer to the E1000 device state. */
1284typedef E1KSTATE *PE1KSTATE;
1285
1286#ifndef VBOX_DEVICE_STRUCT_TESTCASE
1287
1288/* Forward declarations ******************************************************/
1289static int e1kXmitPending(PE1KSTATE pThis, bool fOnWorkerThread);
1290
1291static int e1kRegReadUnimplemented (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1292static int e1kRegWriteUnimplemented(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1293static int e1kRegReadAutoClear (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1294static int e1kRegReadDefault (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1295static int e1kRegWriteDefault (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1296#if 0 /* unused */
1297static int e1kRegReadCTRL (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1298#endif
1299static int e1kRegWriteCTRL (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1300static int e1kRegReadEECD (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1301static int e1kRegWriteEECD (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1302static int e1kRegWriteEERD (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1303static int e1kRegWriteMDIC (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1304static int e1kRegReadICR (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1305static int e1kRegWriteICR (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1306static int e1kRegWriteICS (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1307static int e1kRegWriteIMS (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1308static int e1kRegWriteIMC (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1309static int e1kRegWriteRCTL (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1310static int e1kRegWritePBA (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1311static int e1kRegWriteRDT (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1312static int e1kRegWriteRDTR (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1313static int e1kRegWriteTDT (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1314static int e1kRegReadMTA (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1315static int e1kRegWriteMTA (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1316static int e1kRegReadRA (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1317static int e1kRegWriteRA (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1318static int e1kRegReadVFTA (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1319static int e1kRegWriteVFTA (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1320
1321/**
1322 * Register map table.
1323 *
1324 * Override pfnRead and pfnWrite to get register-specific behavior.
1325 */
1326static const struct E1kRegMap_st
1327{
1328 /** Register offset in the register space. */
1329 uint32_t offset;
1330 /** Size in bytes. Registers of size > 4 are in fact tables. */
1331 uint32_t size;
1332 /** Readable bits. */
1333 uint32_t readable;
1334 /** Writable bits. */
1335 uint32_t writable;
1336 /** Read callback. */
1337 int (*pfnRead)(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1338 /** Write callback. */
1339 int (*pfnWrite)(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1340 /** Abbreviated name. */
1341 const char *abbrev;
1342 /** Full name. */
1343 const char *name;
1344} g_aE1kRegMap[E1K_NUM_OF_REGS] =
1345{
1346 /* offset size read mask write mask read callback write callback abbrev full name */
1347 /*------- ------- ---------- ---------- ----------------------- ------------------------ ---------- ------------------------------*/
1348 { 0x00000, 0x00004, 0xDBF31BE9, 0xDBF31BE9, e1kRegReadDefault , e1kRegWriteCTRL , "CTRL" , "Device Control" },
1349 { 0x00008, 0x00004, 0x0000FDFF, 0x00000000, e1kRegReadDefault , e1kRegWriteUnimplemented, "STATUS" , "Device Status" },
1350 { 0x00010, 0x00004, 0x000027F0, 0x00000070, e1kRegReadEECD , e1kRegWriteEECD , "EECD" , "EEPROM/Flash Control/Data" },
1351 { 0x00014, 0x00004, 0xFFFFFF10, 0xFFFFFF00, e1kRegReadDefault , e1kRegWriteEERD , "EERD" , "EEPROM Read" },
1352 { 0x00018, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "CTRL_EXT", "Extended Device Control" },
1353 { 0x0001c, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FLA" , "Flash Access (N/A)" },
1354 { 0x00020, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteMDIC , "MDIC" , "MDI Control" },
1355 { 0x00028, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCAL" , "Flow Control Address Low" },
1356 { 0x0002c, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCAH" , "Flow Control Address High" },
1357 { 0x00030, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCT" , "Flow Control Type" },
1358 { 0x00038, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "VET" , "VLAN EtherType" },
1359 { 0x000c0, 0x00004, 0x0001F6DF, 0x0001F6DF, e1kRegReadICR , e1kRegWriteICR , "ICR" , "Interrupt Cause Read" },
1360 { 0x000c4, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "ITR" , "Interrupt Throttling" },
1361 { 0x000c8, 0x00004, 0x00000000, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteICS , "ICS" , "Interrupt Cause Set" },
1362 { 0x000d0, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteIMS , "IMS" , "Interrupt Mask Set/Read" },
1363 { 0x000d8, 0x00004, 0x00000000, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteIMC , "IMC" , "Interrupt Mask Clear" },
1364 { 0x00100, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteRCTL , "RCTL" , "Receive Control" },
1365 { 0x00170, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCTTV" , "Flow Control Transmit Timer Value" },
1366 { 0x00178, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TXCW" , "Transmit Configuration Word (N/A)" },
1367 { 0x00180, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RXCW" , "Receive Configuration Word (N/A)" },
1368 { 0x00400, 0x00004, 0x017FFFFA, 0x017FFFFA, e1kRegReadDefault , e1kRegWriteDefault , "TCTL" , "Transmit Control" },
1369 { 0x00410, 0x00004, 0x3FFFFFFF, 0x3FFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TIPG" , "Transmit IPG" },
1370 { 0x00458, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "AIFS" , "Adaptive IFS Throttle - AIT" },
1371 { 0x00e00, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "LEDCTL" , "LED Control" },
1372 { 0x01000, 0x00004, 0xFFFF007F, 0x0000007F, e1kRegReadDefault , e1kRegWritePBA , "PBA" , "Packet Buffer Allocation" },
1373 { 0x02160, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCRTL" , "Flow Control Receive Threshold Low" },
1374 { 0x02168, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCRTH" , "Flow Control Receive Threshold High" },
1375 { 0x02410, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFH" , "Receive Data FIFO Head" },
1376 { 0x02418, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFT" , "Receive Data FIFO Tail" },
1377 { 0x02420, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFHS" , "Receive Data FIFO Head Saved Register" },
1378 { 0x02428, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFTS" , "Receive Data FIFO Tail Saved Register" },
1379 { 0x02430, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFPC" , "Receive Data FIFO Packet Count" },
1380 { 0x02800, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDBAL" , "Receive Descriptor Base Low" },
1381 { 0x02804, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDBAH" , "Receive Descriptor Base High" },
1382 { 0x02808, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDLEN" , "Receive Descriptor Length" },
1383 { 0x02810, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDH" , "Receive Descriptor Head" },
1384 { 0x02818, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteRDT , "RDT" , "Receive Descriptor Tail" },
1385 { 0x02820, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteRDTR , "RDTR" , "Receive Delay Timer" },
1386 { 0x02828, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RXDCTL" , "Receive Descriptor Control" },
1387 { 0x0282c, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "RADV" , "Receive Interrupt Absolute Delay Timer" },
1388 { 0x02c00, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RSRPD" , "Receive Small Packet Detect Interrupt" },
1389 { 0x03000, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TXDMAC" , "TX DMA Control (N/A)" },
1390 { 0x03410, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFH" , "Transmit Data FIFO Head" },
1391 { 0x03418, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFT" , "Transmit Data FIFO Tail" },
1392 { 0x03420, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFHS" , "Transmit Data FIFO Head Saved Register" },
1393 { 0x03428, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFTS" , "Transmit Data FIFO Tail Saved Register" },
1394 { 0x03430, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFPC" , "Transmit Data FIFO Packet Count" },
1395 { 0x03800, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDBAL" , "Transmit Descriptor Base Low" },
1396 { 0x03804, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDBAH" , "Transmit Descriptor Base High" },
1397 { 0x03808, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDLEN" , "Transmit Descriptor Length" },
1398 { 0x03810, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDH" , "Transmit Descriptor Head" },
1399 { 0x03818, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteTDT , "TDT" , "Transmit Descriptor Tail" },
1400 { 0x03820, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "TIDV" , "Transmit Interrupt Delay Value" },
1401 { 0x03828, 0x00004, 0xFF3F3F3F, 0xFF3F3F3F, e1kRegReadDefault , e1kRegWriteDefault , "TXDCTL" , "Transmit Descriptor Control" },
1402 { 0x0382c, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "TADV" , "Transmit Absolute Interrupt Delay Timer" },
1403 { 0x03830, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TSPMT" , "TCP Segmentation Pad and Threshold" },
1404 { 0x04000, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "CRCERRS" , "CRC Error Count" },
1405 { 0x04004, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "ALGNERRC", "Alignment Error Count" },
1406 { 0x04008, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "SYMERRS" , "Symbol Error Count" },
1407 { 0x0400c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RXERRC" , "RX Error Count" },
1408 { 0x04010, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MPC" , "Missed Packets Count" },
1409 { 0x04014, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "SCC" , "Single Collision Count" },
1410 { 0x04018, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "ECOL" , "Excessive Collisions Count" },
1411 { 0x0401c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MCC" , "Multiple Collision Count" },
1412 { 0x04020, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "LATECOL" , "Late Collisions Count" },
1413 { 0x04028, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "COLC" , "Collision Count" },
1414 { 0x04030, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "DC" , "Defer Count" },
1415 { 0x04034, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TNCRS" , "Transmit - No CRS" },
1416 { 0x04038, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "SEC" , "Sequence Error Count" },
1417 { 0x0403c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "CEXTERR" , "Carrier Extension Error Count" },
1418 { 0x04040, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RLEC" , "Receive Length Error Count" },
1419 { 0x04048, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XONRXC" , "XON Received Count" },
1420 { 0x0404c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XONTXC" , "XON Transmitted Count" },
1421 { 0x04050, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XOFFRXC" , "XOFF Received Count" },
1422 { 0x04054, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XOFFTXC" , "XOFF Transmitted Count" },
1423 { 0x04058, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCRUC" , "FC Received Unsupported Count" },
1424 { 0x0405c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC64" , "Packets Received (64 Bytes) Count" },
1425 { 0x04060, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC127" , "Packets Received (65-127 Bytes) Count" },
1426 { 0x04064, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC255" , "Packets Received (128-255 Bytes) Count" },
1427 { 0x04068, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC511" , "Packets Received (256-511 Bytes) Count" },
1428 { 0x0406c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC1023" , "Packets Received (512-1023 Bytes) Count" },
1429 { 0x04070, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC1522" , "Packets Received (1024-Max Bytes)" },
1430 { 0x04074, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GPRC" , "Good Packets Received Count" },
1431 { 0x04078, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "BPRC" , "Broadcast Packets Received Count" },
1432 { 0x0407c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "MPRC" , "Multicast Packets Received Count" },
1433 { 0x04080, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GPTC" , "Good Packets Transmitted Count" },
1434 { 0x04088, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GORCL" , "Good Octets Received Count (Low)" },
1435 { 0x0408c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GORCH" , "Good Octets Received Count (Hi)" },
1436 { 0x04090, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GOTCL" , "Good Octets Transmitted Count (Low)" },
1437 { 0x04094, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GOTCH" , "Good Octets Transmitted Count (Hi)" },
1438 { 0x040a0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RNBC" , "Receive No Buffers Count" },
1439 { 0x040a4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RUC" , "Receive Undersize Count" },
1440 { 0x040a8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RFC" , "Receive Fragment Count" },
1441 { 0x040ac, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "ROC" , "Receive Oversize Count" },
1442 { 0x040b0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RJC" , "Receive Jabber Count" },
1443 { 0x040b4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MGTPRC" , "Management Packets Received Count" },
1444 { 0x040b8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MGTPDC" , "Management Packets Dropped Count" },
1445 { 0x040bc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MGTPTC" , "Management Pkts Transmitted Count" },
1446 { 0x040c0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TORL" , "Total Octets Received (Lo)" },
1447 { 0x040c4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TORH" , "Total Octets Received (Hi)" },
1448 { 0x040c8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TOTL" , "Total Octets Transmitted (Lo)" },
1449 { 0x040cc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TOTH" , "Total Octets Transmitted (Hi)" },
1450 { 0x040d0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TPR" , "Total Packets Received" },
1451 { 0x040d4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TPT" , "Total Packets Transmitted" },
1452 { 0x040d8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC64" , "Packets Transmitted (64 Bytes) Count" },
1453 { 0x040dc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC127" , "Packets Transmitted (65-127 Bytes) Count" },
1454 { 0x040e0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC255" , "Packets Transmitted (128-255 Bytes) Count" },
1455 { 0x040e4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC511" , "Packets Transmitted (256-511 Bytes) Count" },
1456 { 0x040e8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC1023" , "Packets Transmitted (512-1023 Bytes) Count" },
1457 { 0x040ec, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC1522" , "Packets Transmitted (1024 Bytes or Greater) Count" },
1458 { 0x040f0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "MPTC" , "Multicast Packets Transmitted Count" },
1459 { 0x040f4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "BPTC" , "Broadcast Packets Transmitted Count" },
1460 { 0x040f8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TSCTC" , "TCP Segmentation Context Transmitted Count" },
1461 { 0x040fc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TSCTFC" , "TCP Segmentation Context Tx Fail Count" },
1462 { 0x05000, 0x00004, 0x000007FF, 0x000007FF, e1kRegReadDefault , e1kRegWriteDefault , "RXCSUM" , "Receive Checksum Control" },
1463 { 0x05800, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUC" , "Wakeup Control" },
1464 { 0x05808, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUFC" , "Wakeup Filter Control" },
1465 { 0x05810, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUS" , "Wakeup Status" },
1466 { 0x05820, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "MANC" , "Management Control" },
1467 { 0x05838, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "IPAV" , "IP Address Valid" },
1468 { 0x05900, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUPL" , "Wakeup Packet Length" },
1469 { 0x05200, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadMTA , e1kRegWriteMTA , "MTA" , "Multicast Table Array (n)" },
1470 { 0x05400, 0x00080, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadRA , e1kRegWriteRA , "RA" , "Receive Address (64-bit) (n)" },
1471 { 0x05600, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadVFTA , e1kRegWriteVFTA , "VFTA" , "VLAN Filter Table Array (n)" },
1472 { 0x05840, 0x0001c, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "IP4AT" , "IPv4 Address Table" },
1473 { 0x05880, 0x00010, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "IP6AT" , "IPv6 Address Table" },
1474 { 0x05a00, 0x00080, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUPM" , "Wakeup Packet Memory" },
1475 { 0x05f00, 0x0001c, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FFLT" , "Flexible Filter Length Table" },
1476 { 0x09000, 0x003fc, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FFMT" , "Flexible Filter Mask Table" },
1477 { 0x09800, 0x003fc, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FFVT" , "Flexible Filter Value Table" },
1478 { 0x10000, 0x10000, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "PBM" , "Packet Buffer Memory (n)" },
1479 { 0x00040, 0x00080, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadRA , e1kRegWriteRA , "RA82542" , "Receive Address (64-bit) (n) (82542)" },
1480 { 0x00200, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadMTA , e1kRegWriteMTA , "MTA82542", "Multicast Table Array (n) (82542)" },
1481 { 0x00600, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadVFTA , e1kRegWriteVFTA , "VFTA82542", "VLAN Filter Table Array (n) (82542)" }
1482};
1483
1484#ifdef LOG_ENABLED
1485
1486/**
1487 * Convert U32 value to hex string. Masked bytes are replaced with dots.
1488 *
1489 * @remarks The mask has byte (not bit) granularity (e.g. 000000FF).
1490 *
1491 * @returns The buffer.
1492 *
1493 * @param u32 The word to convert into string.
1494 * @param mask Selects which bytes to convert.
1495 * @param buf Where to put the result.
1496 */
1497static char *e1kU32toHex(uint32_t u32, uint32_t mask, char *buf)
1498{
1499 for (char *ptr = buf + 7; ptr >= buf; --ptr, u32 >>=4, mask >>=4)
1500 {
1501 if (mask & 0xF)
1502 *ptr = (u32 & 0xF) + ((u32 & 0xF) > 9 ? '7' : '0');
1503 else
1504 *ptr = '.';
1505 }
1506 buf[8] = 0;
1507 return buf;
1508}
1509
1510/**
1511 * Returns timer name for debug purposes.
1512 *
1513 * @returns The timer name.
1514 *
1515 * @param pThis The device state structure.
1516 * @param pTimer The timer to get the name for.
1517 */
1518DECLINLINE(const char *) e1kGetTimerName(PE1KSTATE pThis, PTMTIMER pTimer)
1519{
1520 if (pTimer == pThis->CTX_SUFF(pTIDTimer))
1521 return "TID";
1522 if (pTimer == pThis->CTX_SUFF(pTADTimer))
1523 return "TAD";
1524 if (pTimer == pThis->CTX_SUFF(pRIDTimer))
1525 return "RID";
1526 if (pTimer == pThis->CTX_SUFF(pRADTimer))
1527 return "RAD";
1528 if (pTimer == pThis->CTX_SUFF(pIntTimer))
1529 return "Int";
1530 if (pTimer == pThis->CTX_SUFF(pTXDTimer))
1531 return "TXD";
1532 if (pTimer == pThis->CTX_SUFF(pLUTimer))
1533 return "LinkUp";
1534 return "unknown";
1535}
1536
1537#endif /* DEBUG */
1538
1539/**
1540 * Arm a timer.
1541 *
1542 * @param pThis Pointer to the device state structure.
1543 * @param pTimer Pointer to the timer.
1544 * @param uExpireIn Expiration interval in microseconds.
1545 */
1546DECLINLINE(void) e1kArmTimer(PE1KSTATE pThis, PTMTIMER pTimer, uint32_t uExpireIn)
1547{
1548 if (pThis->fLocked)
1549 return;
1550
1551 E1kLog2(("%s Arming %s timer to fire in %d usec...\n",
1552 pThis->szPrf, e1kGetTimerName(pThis, pTimer), uExpireIn));
1553 TMTimerSetMicro(pTimer, uExpireIn);
1554}
1555
1556#ifdef IN_RING3
1557/**
1558 * Cancel a timer.
1559 *
1560 * @param pThis Pointer to the device state structure.
1561 * @param pTimer Pointer to the timer.
1562 */
1563DECLINLINE(void) e1kCancelTimer(PE1KSTATE pThis, PTMTIMER pTimer)
1564{
1565 E1kLog2(("%s Stopping %s timer...\n",
1566 pThis->szPrf, e1kGetTimerName(pThis, pTimer)));
1567 int rc = TMTimerStop(pTimer);
1568 if (RT_FAILURE(rc))
1569 E1kLog2(("%s e1kCancelTimer: TMTimerStop() failed with %Rrc\n",
1570 pThis->szPrf, rc));
1571 RT_NOREF1(pThis);
1572}
1573#endif /* IN_RING3 */
1574
1575#define e1kCsEnter(ps, rc) PDMCritSectEnter(&ps->cs, rc)
1576#define e1kCsLeave(ps) PDMCritSectLeave(&ps->cs)
1577
1578#define e1kCsRxEnter(ps, rc) PDMCritSectEnter(&ps->csRx, rc)
1579#define e1kCsRxLeave(ps) PDMCritSectLeave(&ps->csRx)
1580#define e1kCsRxIsOwner(ps) PDMCritSectIsOwner(&ps->csRx)
1581
1582#ifndef E1K_WITH_TX_CS
1583# define e1kCsTxEnter(ps, rc) VINF_SUCCESS
1584# define e1kCsTxLeave(ps) do { } while (0)
1585#else /* E1K_WITH_TX_CS */
1586# define e1kCsTxEnter(ps, rc) PDMCritSectEnter(&ps->csTx, rc)
1587# define e1kCsTxLeave(ps) PDMCritSectLeave(&ps->csTx)
1588#endif /* E1K_WITH_TX_CS */
1589
1590#ifdef IN_RING3
1591
1592/**
1593 * Wakeup the RX thread.
1594 */
1595static void e1kWakeupReceive(PPDMDEVINS pDevIns)
1596{
1597 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
1598 if ( pThis->fMaybeOutOfSpace
1599 && pThis->hEventMoreRxDescAvail != NIL_RTSEMEVENT)
1600 {
1601 STAM_COUNTER_INC(&pThis->StatRxOverflowWakeup);
1602 E1kLog(("%s Waking up Out-of-RX-space semaphore\n", pThis->szPrf));
1603 RTSemEventSignal(pThis->hEventMoreRxDescAvail);
1604 }
1605}
1606
1607/**
1608 * Hardware reset. Revert all registers to initial values.
1609 *
1610 * @param pThis The device state structure.
1611 */
1612static void e1kHardReset(PE1KSTATE pThis)
1613{
1614 E1kLog(("%s Hard reset triggered\n", pThis->szPrf));
1615 memset(pThis->auRegs, 0, sizeof(pThis->auRegs));
1616 memset(pThis->aRecAddr.au32, 0, sizeof(pThis->aRecAddr.au32));
1617#ifdef E1K_INIT_RA0
1618 memcpy(pThis->aRecAddr.au32, pThis->macConfigured.au8,
1619 sizeof(pThis->macConfigured.au8));
1620 pThis->aRecAddr.array[0].ctl |= RA_CTL_AV;
1621#endif /* E1K_INIT_RA0 */
1622 STATUS = 0x0081; /* SPEED=10b (1000 Mb/s), FD=1b (Full Duplex) */
1623 EECD = 0x0100; /* EE_PRES=1b (EEPROM present) */
1624 CTRL = 0x0a09; /* FRCSPD=1b SPEED=10b LRST=1b FD=1b */
1625 TSPMT = 0x01000400;/* TSMT=0400h TSPBP=0100h */
1626 Assert(GET_BITS(RCTL, BSIZE) == 0);
1627 pThis->u16RxBSize = 2048;
1628
1629 /* Reset promiscuous mode */
1630 if (pThis->pDrvR3)
1631 pThis->pDrvR3->pfnSetPromiscuousMode(pThis->pDrvR3, false);
1632
1633#ifdef E1K_WITH_TXD_CACHE
1634 int rc = e1kCsTxEnter(pThis, VERR_SEM_BUSY);
1635 if (RT_LIKELY(rc == VINF_SUCCESS))
1636 {
1637 pThis->nTxDFetched = 0;
1638 pThis->iTxDCurrent = 0;
1639 pThis->fGSO = false;
1640 pThis->cbTxAlloc = 0;
1641 e1kCsTxLeave(pThis);
1642 }
1643#endif /* E1K_WITH_TXD_CACHE */
1644#ifdef E1K_WITH_RXD_CACHE
1645 if (RT_LIKELY(e1kCsRxEnter(pThis, VERR_SEM_BUSY) == VINF_SUCCESS))
1646 {
1647 pThis->iRxDCurrent = pThis->nRxDFetched = 0;
1648 e1kCsRxLeave(pThis);
1649 }
1650#endif /* E1K_WITH_RXD_CACHE */
1651}
1652
1653#endif /* IN_RING3 */
1654
1655/**
1656 * Compute Internet checksum.
1657 *
1658 * @remarks Refer to http://www.netfor2.com/checksum.html for short intro.
1659 *
1660 * @param pThis The device state structure.
1661 * @param cpPacket The packet.
1662 * @param cb The size of the packet.
1663 * @param pszText A string denoting direction of packet transfer.
1664 *
1665 * @return The 1's complement of the 1's complement sum.
1666 *
1667 * @thread E1000_TX
1668 */
1669static uint16_t e1kCSum16(const void *pvBuf, size_t cb)
1670{
1671 uint32_t csum = 0;
1672 uint16_t *pu16 = (uint16_t *)pvBuf;
1673
1674 while (cb > 1)
1675 {
1676 csum += *pu16++;
1677 cb -= 2;
1678 }
1679 if (cb)
1680 csum += *(uint8_t*)pu16;
1681 while (csum >> 16)
1682 csum = (csum >> 16) + (csum & 0xFFFF);
1683 return ~csum;
1684}
1685
1686/**
1687 * Dump a packet to debug log.
1688 *
1689 * @param pThis The device state structure.
1690 * @param cpPacket The packet.
1691 * @param cb The size of the packet.
1692 * @param pszText A string denoting direction of packet transfer.
1693 * @thread E1000_TX
1694 */
1695DECLINLINE(void) e1kPacketDump(PE1KSTATE pThis, const uint8_t *cpPacket, size_t cb, const char *pszText)
1696{
1697#ifdef DEBUG
1698 if (RT_LIKELY(e1kCsEnter(pThis, VERR_SEM_BUSY) == VINF_SUCCESS))
1699 {
1700 Log4(("%s --- %s packet #%d: %RTmac => %RTmac (%d bytes) ---\n",
1701 pThis->szPrf, pszText, ++pThis->u32PktNo, cpPacket+6, cpPacket, cb));
1702 if (ntohs(*(uint16_t*)(cpPacket+12)) == 0x86DD)
1703 {
1704 Log4(("%s --- IPv6: %RTnaipv6 => %RTnaipv6\n",
1705 pThis->szPrf, cpPacket+14+8, cpPacket+14+24));
1706 if (*(cpPacket+14+6) == 0x6)
1707 Log4(("%s --- TCP: seq=%x ack=%x\n", pThis->szPrf,
1708 ntohl(*(uint32_t*)(cpPacket+14+40+4)), ntohl(*(uint32_t*)(cpPacket+14+40+8))));
1709 }
1710 else if (ntohs(*(uint16_t*)(cpPacket+12)) == 0x800)
1711 {
1712 Log4(("%s --- IPv4: %RTnaipv4 => %RTnaipv4\n",
1713 pThis->szPrf, *(uint32_t*)(cpPacket+14+12), *(uint32_t*)(cpPacket+14+16)));
1714 if (*(cpPacket+14+6) == 0x6)
1715 Log4(("%s --- TCP: seq=%x ack=%x\n", pThis->szPrf,
1716 ntohl(*(uint32_t*)(cpPacket+14+20+4)), ntohl(*(uint32_t*)(cpPacket+14+20+8))));
1717 }
1718 E1kLog3(("%.*Rhxd\n", cb, cpPacket));
1719 e1kCsLeave(pThis);
1720 }
1721#else
1722 if (RT_LIKELY(e1kCsEnter(pThis, VERR_SEM_BUSY) == VINF_SUCCESS))
1723 {
1724 if (ntohs(*(uint16_t*)(cpPacket+12)) == 0x86DD)
1725 E1kLogRel(("E1000: %s packet #%d, %RTmac => %RTmac, %RTnaipv6 => %RTnaipv6, seq=%x ack=%x\n",
1726 pszText, ++pThis->u32PktNo, cpPacket+6, cpPacket, cpPacket+14+8, cpPacket+14+24,
1727 ntohl(*(uint32_t*)(cpPacket+14+40+4)), ntohl(*(uint32_t*)(cpPacket+14+40+8))));
1728 else
1729 E1kLogRel(("E1000: %s packet #%d, %RTmac => %RTmac, %RTnaipv4 => %RTnaipv4, seq=%x ack=%x\n",
1730 pszText, ++pThis->u32PktNo, cpPacket+6, cpPacket,
1731 *(uint32_t*)(cpPacket+14+12), *(uint32_t*)(cpPacket+14+16),
1732 ntohl(*(uint32_t*)(cpPacket+14+20+4)), ntohl(*(uint32_t*)(cpPacket+14+20+8))));
1733 e1kCsLeave(pThis);
1734 }
1735 RT_NOREF2(cb, pszText);
1736#endif
1737}
1738
1739/**
1740 * Determine the type of transmit descriptor.
1741 *
1742 * @returns Descriptor type. See E1K_DTYP_XXX defines.
1743 *
1744 * @param pDesc Pointer to descriptor union.
1745 * @thread E1000_TX
1746 */
1747DECLINLINE(int) e1kGetDescType(E1KTXDESC *pDesc)
1748{
1749 if (pDesc->legacy.cmd.fDEXT)
1750 return pDesc->context.dw2.u4DTYP;
1751 return E1K_DTYP_LEGACY;
1752}
1753
1754
1755#if defined(E1K_WITH_RXD_CACHE) && defined(IN_RING3) /* currently only used in ring-3 due to stack space requirements of the caller */
1756/**
1757 * Dump receive descriptor to debug log.
1758 *
1759 * @param pThis The device state structure.
1760 * @param pDesc Pointer to the descriptor.
1761 * @thread E1000_RX
1762 */
1763static void e1kPrintRDesc(PE1KSTATE pThis, E1KRXDESC *pDesc)
1764{
1765 RT_NOREF2(pThis, pDesc);
1766 E1kLog2(("%s <-- Receive Descriptor (%d bytes):\n", pThis->szPrf, pDesc->u16Length));
1767 E1kLog2((" Address=%16LX Length=%04X Csum=%04X\n",
1768 pDesc->u64BufAddr, pDesc->u16Length, pDesc->u16Checksum));
1769 E1kLog2((" STA: %s %s %s %s %s %s %s ERR: %s %s %s %s SPECIAL: %s VLAN=%03x PRI=%x\n",
1770 pDesc->status.fPIF ? "PIF" : "pif",
1771 pDesc->status.fIPCS ? "IPCS" : "ipcs",
1772 pDesc->status.fTCPCS ? "TCPCS" : "tcpcs",
1773 pDesc->status.fVP ? "VP" : "vp",
1774 pDesc->status.fIXSM ? "IXSM" : "ixsm",
1775 pDesc->status.fEOP ? "EOP" : "eop",
1776 pDesc->status.fDD ? "DD" : "dd",
1777 pDesc->status.fRXE ? "RXE" : "rxe",
1778 pDesc->status.fIPE ? "IPE" : "ipe",
1779 pDesc->status.fTCPE ? "TCPE" : "tcpe",
1780 pDesc->status.fCE ? "CE" : "ce",
1781 E1K_SPEC_CFI(pDesc->status.u16Special) ? "CFI" :"cfi",
1782 E1K_SPEC_VLAN(pDesc->status.u16Special),
1783 E1K_SPEC_PRI(pDesc->status.u16Special)));
1784}
1785#endif /* E1K_WITH_RXD_CACHE && IN_RING3 */
1786
1787/**
1788 * Dump transmit descriptor to debug log.
1789 *
1790 * @param pThis The device state structure.
1791 * @param pDesc Pointer to descriptor union.
1792 * @param pszDir A string denoting direction of descriptor transfer
1793 * @thread E1000_TX
1794 */
1795static void e1kPrintTDesc(PE1KSTATE pThis, E1KTXDESC *pDesc, const char *pszDir,
1796 unsigned uLevel = RTLOGGRPFLAGS_LEVEL_2)
1797{
1798 RT_NOREF4(pThis, pDesc, pszDir, uLevel);
1799
1800 /*
1801 * Unfortunately we cannot use our format handler here, we want R0 logging
1802 * as well.
1803 */
1804 switch (e1kGetDescType(pDesc))
1805 {
1806 case E1K_DTYP_CONTEXT:
1807 E1kLogX(uLevel, ("%s %s Context Transmit Descriptor %s\n",
1808 pThis->szPrf, pszDir, pszDir));
1809 E1kLogX(uLevel, (" IPCSS=%02X IPCSO=%02X IPCSE=%04X TUCSS=%02X TUCSO=%02X TUCSE=%04X\n",
1810 pDesc->context.ip.u8CSS, pDesc->context.ip.u8CSO, pDesc->context.ip.u16CSE,
1811 pDesc->context.tu.u8CSS, pDesc->context.tu.u8CSO, pDesc->context.tu.u16CSE));
1812 E1kLogX(uLevel, (" TUCMD:%s%s%s %s %s PAYLEN=%04x HDRLEN=%04x MSS=%04x STA: %s\n",
1813 pDesc->context.dw2.fIDE ? " IDE":"",
1814 pDesc->context.dw2.fRS ? " RS" :"",
1815 pDesc->context.dw2.fTSE ? " TSE":"",
1816 pDesc->context.dw2.fIP ? "IPv4":"IPv6",
1817 pDesc->context.dw2.fTCP ? "TCP":"UDP",
1818 pDesc->context.dw2.u20PAYLEN,
1819 pDesc->context.dw3.u8HDRLEN,
1820 pDesc->context.dw3.u16MSS,
1821 pDesc->context.dw3.fDD?"DD":""));
1822 break;
1823 case E1K_DTYP_DATA:
1824 E1kLogX(uLevel, ("%s %s Data Transmit Descriptor (%d bytes) %s\n",
1825 pThis->szPrf, pszDir, pDesc->data.cmd.u20DTALEN, pszDir));
1826 E1kLogX(uLevel, (" Address=%16LX DTALEN=%05X\n",
1827 pDesc->data.u64BufAddr,
1828 pDesc->data.cmd.u20DTALEN));
1829 E1kLogX(uLevel, (" DCMD:%s%s%s%s%s%s%s STA:%s%s%s POPTS:%s%s SPECIAL:%s VLAN=%03x PRI=%x\n",
1830 pDesc->data.cmd.fIDE ? " IDE" :"",
1831 pDesc->data.cmd.fVLE ? " VLE" :"",
1832 pDesc->data.cmd.fRPS ? " RPS" :"",
1833 pDesc->data.cmd.fRS ? " RS" :"",
1834 pDesc->data.cmd.fTSE ? " TSE" :"",
1835 pDesc->data.cmd.fIFCS? " IFCS":"",
1836 pDesc->data.cmd.fEOP ? " EOP" :"",
1837 pDesc->data.dw3.fDD ? " DD" :"",
1838 pDesc->data.dw3.fEC ? " EC" :"",
1839 pDesc->data.dw3.fLC ? " LC" :"",
1840 pDesc->data.dw3.fTXSM? " TXSM":"",
1841 pDesc->data.dw3.fIXSM? " IXSM":"",
1842 E1K_SPEC_CFI(pDesc->data.dw3.u16Special) ? "CFI" :"cfi",
1843 E1K_SPEC_VLAN(pDesc->data.dw3.u16Special),
1844 E1K_SPEC_PRI(pDesc->data.dw3.u16Special)));
1845 break;
1846 case E1K_DTYP_LEGACY:
1847 E1kLogX(uLevel, ("%s %s Legacy Transmit Descriptor (%d bytes) %s\n",
1848 pThis->szPrf, pszDir, pDesc->legacy.cmd.u16Length, pszDir));
1849 E1kLogX(uLevel, (" Address=%16LX DTALEN=%05X\n",
1850 pDesc->data.u64BufAddr,
1851 pDesc->legacy.cmd.u16Length));
1852 E1kLogX(uLevel, (" CMD:%s%s%s%s%s%s%s STA:%s%s%s CSO=%02x CSS=%02x SPECIAL:%s VLAN=%03x PRI=%x\n",
1853 pDesc->legacy.cmd.fIDE ? " IDE" :"",
1854 pDesc->legacy.cmd.fVLE ? " VLE" :"",
1855 pDesc->legacy.cmd.fRPS ? " RPS" :"",
1856 pDesc->legacy.cmd.fRS ? " RS" :"",
1857 pDesc->legacy.cmd.fIC ? " IC" :"",
1858 pDesc->legacy.cmd.fIFCS? " IFCS":"",
1859 pDesc->legacy.cmd.fEOP ? " EOP" :"",
1860 pDesc->legacy.dw3.fDD ? " DD" :"",
1861 pDesc->legacy.dw3.fEC ? " EC" :"",
1862 pDesc->legacy.dw3.fLC ? " LC" :"",
1863 pDesc->legacy.cmd.u8CSO,
1864 pDesc->legacy.dw3.u8CSS,
1865 E1K_SPEC_CFI(pDesc->legacy.dw3.u16Special) ? "CFI" :"cfi",
1866 E1K_SPEC_VLAN(pDesc->legacy.dw3.u16Special),
1867 E1K_SPEC_PRI(pDesc->legacy.dw3.u16Special)));
1868 break;
1869 default:
1870 E1kLog(("%s %s Invalid Transmit Descriptor %s\n",
1871 pThis->szPrf, pszDir, pszDir));
1872 break;
1873 }
1874}
1875
1876/**
1877 * Raise an interrupt later.
1878 *
1879 * @param pThis The device state structure.
1880 */
1881inline void e1kPostponeInterrupt(PE1KSTATE pThis, uint64_t uNanoseconds)
1882{
1883 if (!TMTimerIsActive(pThis->CTX_SUFF(pIntTimer)))
1884 TMTimerSetNano(pThis->CTX_SUFF(pIntTimer), uNanoseconds);
1885}
1886
1887/**
1888 * Raise interrupt if not masked.
1889 *
1890 * @param pThis The device state structure.
1891 */
1892static int e1kRaiseInterrupt(PE1KSTATE pThis, int rcBusy, uint32_t u32IntCause = 0)
1893{
1894 int rc = e1kCsEnter(pThis, rcBusy);
1895 if (RT_UNLIKELY(rc != VINF_SUCCESS))
1896 return rc;
1897
1898 E1K_INC_ISTAT_CNT(pThis->uStatIntTry);
1899 ICR |= u32IntCause;
1900 if (ICR & IMS)
1901 {
1902 if (pThis->fIntRaised)
1903 {
1904 E1K_INC_ISTAT_CNT(pThis->uStatIntSkip);
1905 E1kLog2(("%s e1kRaiseInterrupt: Already raised, skipped. ICR&IMS=%08x\n",
1906 pThis->szPrf, ICR & IMS));
1907 }
1908 else
1909 {
1910 uint64_t tsNow = TMTimerGet(pThis->CTX_SUFF(pIntTimer));
1911 if (!!ITR && tsNow - pThis->u64AckedAt < ITR * 256
1912 && pThis->fItrEnabled && (pThis->fItrRxEnabled || !(ICR & ICR_RXT0)))
1913 {
1914 E1K_INC_ISTAT_CNT(pThis->uStatIntEarly);
1915 E1kLog2(("%s e1kRaiseInterrupt: Too early to raise again: %d ns < %d ns.\n",
1916 pThis->szPrf, (uint32_t)(tsNow - pThis->u64AckedAt), ITR * 256));
1917 e1kPostponeInterrupt(pThis, ITR * 256);
1918 }
1919 else
1920 {
1921
1922 /* Since we are delivering the interrupt now
1923 * there is no need to do it later -- stop the timer.
1924 */
1925 TMTimerStop(pThis->CTX_SUFF(pIntTimer));
1926 E1K_INC_ISTAT_CNT(pThis->uStatInt);
1927 STAM_COUNTER_INC(&pThis->StatIntsRaised);
1928 /* Got at least one unmasked interrupt cause */
1929 pThis->fIntRaised = true;
1930 /* Raise(1) INTA(0) */
1931 E1kLogRel(("E1000: irq RAISED icr&mask=0x%x, icr=0x%x\n", ICR & IMS, ICR));
1932 PDMDevHlpPCISetIrq(pThis->CTX_SUFF(pDevIns), 0, 1);
1933 E1kLog(("%s e1kRaiseInterrupt: Raised. ICR&IMS=%08x\n",
1934 pThis->szPrf, ICR & IMS));
1935 }
1936 }
1937 }
1938 else
1939 {
1940 E1K_INC_ISTAT_CNT(pThis->uStatIntMasked);
1941 E1kLog2(("%s e1kRaiseInterrupt: Not raising, ICR=%08x, IMS=%08x\n",
1942 pThis->szPrf, ICR, IMS));
1943 }
1944 e1kCsLeave(pThis);
1945 return VINF_SUCCESS;
1946}
1947
1948/**
1949 * Compute the physical address of the descriptor.
1950 *
1951 * @returns the physical address of the descriptor.
1952 *
1953 * @param baseHigh High-order 32 bits of descriptor table address.
1954 * @param baseLow Low-order 32 bits of descriptor table address.
1955 * @param idxDesc The descriptor index in the table.
1956 */
1957DECLINLINE(RTGCPHYS) e1kDescAddr(uint32_t baseHigh, uint32_t baseLow, uint32_t idxDesc)
1958{
1959 AssertCompile(sizeof(E1KRXDESC) == sizeof(E1KTXDESC));
1960 return ((uint64_t)baseHigh << 32) + baseLow + idxDesc * sizeof(E1KRXDESC);
1961}
1962
1963#ifdef IN_RING3 /* currently only used in ring-3 due to stack space requirements of the caller */
1964/**
1965 * Advance the head pointer of the receive descriptor queue.
1966 *
1967 * @remarks RDH always points to the next available RX descriptor.
1968 *
1969 * @param pThis The device state structure.
1970 */
1971DECLINLINE(void) e1kAdvanceRDH(PE1KSTATE pThis)
1972{
1973 Assert(e1kCsRxIsOwner(pThis));
1974 //e1kCsEnter(pThis, RT_SRC_POS);
1975 if (++RDH * sizeof(E1KRXDESC) >= RDLEN)
1976 RDH = 0;
1977 /*
1978 * Compute current receive queue length and fire RXDMT0 interrupt
1979 * if we are low on receive buffers
1980 */
1981 uint32_t uRQueueLen = RDH>RDT ? RDLEN/sizeof(E1KRXDESC)-RDH+RDT : RDT-RDH;
1982 /*
1983 * The minimum threshold is controlled by RDMTS bits of RCTL:
1984 * 00 = 1/2 of RDLEN
1985 * 01 = 1/4 of RDLEN
1986 * 10 = 1/8 of RDLEN
1987 * 11 = reserved
1988 */
1989 uint32_t uMinRQThreshold = RDLEN / sizeof(E1KRXDESC) / (2 << GET_BITS(RCTL, RDMTS));
1990 if (uRQueueLen <= uMinRQThreshold)
1991 {
1992 E1kLogRel(("E1000: low on RX descriptors, RDH=%x RDT=%x len=%x threshold=%x\n", RDH, RDT, uRQueueLen, uMinRQThreshold));
1993 E1kLog2(("%s Low on RX descriptors, RDH=%x RDT=%x len=%x threshold=%x, raise an interrupt\n",
1994 pThis->szPrf, RDH, RDT, uRQueueLen, uMinRQThreshold));
1995 E1K_INC_ISTAT_CNT(pThis->uStatIntRXDMT0);
1996 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_RXDMT0);
1997 }
1998 E1kLog2(("%s e1kAdvanceRDH: at exit RDH=%x RDT=%x len=%x\n",
1999 pThis->szPrf, RDH, RDT, uRQueueLen));
2000 //e1kCsLeave(pThis);
2001}
2002#endif /* IN_RING3 */
2003
2004#ifdef E1K_WITH_RXD_CACHE
2005
2006/**
2007 * Return the number of RX descriptor that belong to the hardware.
2008 *
2009 * @returns the number of available descriptors in RX ring.
2010 * @param pThis The device state structure.
2011 * @thread ???
2012 */
2013DECLINLINE(uint32_t) e1kGetRxLen(PE1KSTATE pThis)
2014{
2015 /**
2016 * Make sure RDT won't change during computation. EMT may modify RDT at
2017 * any moment.
2018 */
2019 uint32_t rdt = RDT;
2020 return (RDH > rdt ? RDLEN/sizeof(E1KRXDESC) : 0) + rdt - RDH;
2021}
2022
2023DECLINLINE(unsigned) e1kRxDInCache(PE1KSTATE pThis)
2024{
2025 return pThis->nRxDFetched > pThis->iRxDCurrent ?
2026 pThis->nRxDFetched - pThis->iRxDCurrent : 0;
2027}
2028
2029DECLINLINE(unsigned) e1kRxDIsCacheEmpty(PE1KSTATE pThis)
2030{
2031 return pThis->iRxDCurrent >= pThis->nRxDFetched;
2032}
2033
2034/**
2035 * Load receive descriptors from guest memory. The caller needs to be in Rx
2036 * critical section.
2037 *
2038 * We need two physical reads in case the tail wrapped around the end of RX
2039 * descriptor ring.
2040 *
2041 * @returns the actual number of descriptors fetched.
2042 * @param pThis The device state structure.
2043 * @param pDesc Pointer to descriptor union.
2044 * @param addr Physical address in guest context.
2045 * @thread EMT, RX
2046 */
2047DECLINLINE(unsigned) e1kRxDPrefetch(PE1KSTATE pThis)
2048{
2049 /* We've already loaded pThis->nRxDFetched descriptors past RDH. */
2050 unsigned nDescsAvailable = e1kGetRxLen(pThis) - e1kRxDInCache(pThis);
2051 unsigned nDescsToFetch = RT_MIN(nDescsAvailable, E1K_RXD_CACHE_SIZE - pThis->nRxDFetched);
2052 unsigned nDescsTotal = RDLEN / sizeof(E1KRXDESC);
2053 Assert(nDescsTotal != 0);
2054 if (nDescsTotal == 0)
2055 return 0;
2056 unsigned nFirstNotLoaded = (RDH + e1kRxDInCache(pThis)) % nDescsTotal;
2057 unsigned nDescsInSingleRead = RT_MIN(nDescsToFetch, nDescsTotal - nFirstNotLoaded);
2058 E1kLog3(("%s e1kRxDPrefetch: nDescsAvailable=%u nDescsToFetch=%u "
2059 "nDescsTotal=%u nFirstNotLoaded=0x%x nDescsInSingleRead=%u\n",
2060 pThis->szPrf, nDescsAvailable, nDescsToFetch, nDescsTotal,
2061 nFirstNotLoaded, nDescsInSingleRead));
2062 if (nDescsToFetch == 0)
2063 return 0;
2064 E1KRXDESC* pFirstEmptyDesc = &pThis->aRxDescriptors[pThis->nRxDFetched];
2065 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns),
2066 ((uint64_t)RDBAH << 32) + RDBAL + nFirstNotLoaded * sizeof(E1KRXDESC),
2067 pFirstEmptyDesc, nDescsInSingleRead * sizeof(E1KRXDESC));
2068 // uint64_t addrBase = ((uint64_t)RDBAH << 32) + RDBAL;
2069 // unsigned i, j;
2070 // for (i = pThis->nRxDFetched; i < pThis->nRxDFetched + nDescsInSingleRead; ++i)
2071 // {
2072 // pThis->aRxDescAddr[i] = addrBase + (nFirstNotLoaded + i - pThis->nRxDFetched) * sizeof(E1KRXDESC);
2073 // E1kLog3(("%s aRxDescAddr[%d] = %p\n", pThis->szPrf, i, pThis->aRxDescAddr[i]));
2074 // }
2075 E1kLog3(("%s Fetched %u RX descriptors at %08x%08x(0x%x), RDLEN=%08x, RDH=%08x, RDT=%08x\n",
2076 pThis->szPrf, nDescsInSingleRead,
2077 RDBAH, RDBAL + RDH * sizeof(E1KRXDESC),
2078 nFirstNotLoaded, RDLEN, RDH, RDT));
2079 if (nDescsToFetch > nDescsInSingleRead)
2080 {
2081 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns),
2082 ((uint64_t)RDBAH << 32) + RDBAL,
2083 pFirstEmptyDesc + nDescsInSingleRead,
2084 (nDescsToFetch - nDescsInSingleRead) * sizeof(E1KRXDESC));
2085 // Assert(i == pThis->nRxDFetched + nDescsInSingleRead);
2086 // for (j = 0; i < pThis->nRxDFetched + nDescsToFetch; ++i, ++j)
2087 // {
2088 // pThis->aRxDescAddr[i] = addrBase + j * sizeof(E1KRXDESC);
2089 // E1kLog3(("%s aRxDescAddr[%d] = %p\n", pThis->szPrf, i, pThis->aRxDescAddr[i]));
2090 // }
2091 E1kLog3(("%s Fetched %u RX descriptors at %08x%08x\n",
2092 pThis->szPrf, nDescsToFetch - nDescsInSingleRead,
2093 RDBAH, RDBAL));
2094 }
2095 pThis->nRxDFetched += nDescsToFetch;
2096 return nDescsToFetch;
2097}
2098
2099# ifdef IN_RING3 /* currently only used in ring-3 due to stack space requirements of the caller */
2100
2101/**
2102 * Obtain the next RX descriptor from RXD cache, fetching descriptors from the
2103 * RX ring if the cache is empty.
2104 *
2105 * Note that we cannot advance the cache pointer (iRxDCurrent) yet as it will
2106 * go out of sync with RDH which will cause trouble when EMT checks if the
2107 * cache is empty to do pre-fetch @bugref(6217).
2108 *
2109 * @param pThis The device state structure.
2110 * @thread RX
2111 */
2112DECLINLINE(E1KRXDESC*) e1kRxDGet(PE1KSTATE pThis)
2113{
2114 Assert(e1kCsRxIsOwner(pThis));
2115 /* Check the cache first. */
2116 if (pThis->iRxDCurrent < pThis->nRxDFetched)
2117 return &pThis->aRxDescriptors[pThis->iRxDCurrent];
2118 /* Cache is empty, reset it and check if we can fetch more. */
2119 pThis->iRxDCurrent = pThis->nRxDFetched = 0;
2120 if (e1kRxDPrefetch(pThis))
2121 return &pThis->aRxDescriptors[pThis->iRxDCurrent];
2122 /* Out of Rx descriptors. */
2123 return NULL;
2124}
2125
2126
2127/**
2128 * Return the RX descriptor obtained with e1kRxDGet() and advance the cache
2129 * pointer. The descriptor gets written back to the RXD ring.
2130 *
2131 * @param pThis The device state structure.
2132 * @param pDesc The descriptor being "returned" to the RX ring.
2133 * @thread RX
2134 */
2135DECLINLINE(void) e1kRxDPut(PE1KSTATE pThis, E1KRXDESC* pDesc)
2136{
2137 Assert(e1kCsRxIsOwner(pThis));
2138 pThis->iRxDCurrent++;
2139 // Assert(pDesc >= pThis->aRxDescriptors);
2140 // Assert(pDesc < pThis->aRxDescriptors + E1K_RXD_CACHE_SIZE);
2141 // uint64_t addr = e1kDescAddr(RDBAH, RDBAL, RDH);
2142 // uint32_t rdh = RDH;
2143 // Assert(pThis->aRxDescAddr[pDesc - pThis->aRxDescriptors] == addr);
2144 PDMDevHlpPCIPhysWrite(pThis->CTX_SUFF(pDevIns),
2145 e1kDescAddr(RDBAH, RDBAL, RDH),
2146 pDesc, sizeof(E1KRXDESC));
2147 e1kAdvanceRDH(pThis);
2148 e1kPrintRDesc(pThis, pDesc);
2149}
2150
2151/**
2152 * Store a fragment of received packet at the specifed address.
2153 *
2154 * @param pThis The device state structure.
2155 * @param pDesc The next available RX descriptor.
2156 * @param pvBuf The fragment.
2157 * @param cb The size of the fragment.
2158 */
2159static DECLCALLBACK(void) e1kStoreRxFragment(PE1KSTATE pThis, E1KRXDESC *pDesc, const void *pvBuf, size_t cb)
2160{
2161 STAM_PROFILE_ADV_START(&pThis->StatReceiveStore, a);
2162 E1kLog2(("%s e1kStoreRxFragment: store fragment of %04X at %016LX, EOP=%d\n",
2163 pThis->szPrf, cb, pDesc->u64BufAddr, pDesc->status.fEOP));
2164 PDMDevHlpPCIPhysWrite(pThis->CTX_SUFF(pDevIns), pDesc->u64BufAddr, pvBuf, cb);
2165 pDesc->u16Length = (uint16_t)cb; Assert(pDesc->u16Length == cb);
2166 STAM_PROFILE_ADV_STOP(&pThis->StatReceiveStore, a);
2167}
2168
2169# endif
2170
2171#else /* !E1K_WITH_RXD_CACHE */
2172
2173/**
2174 * Store a fragment of received packet that fits into the next available RX
2175 * buffer.
2176 *
2177 * @remarks Trigger the RXT0 interrupt if it is the last fragment of the packet.
2178 *
2179 * @param pThis The device state structure.
2180 * @param pDesc The next available RX descriptor.
2181 * @param pvBuf The fragment.
2182 * @param cb The size of the fragment.
2183 */
2184static DECLCALLBACK(void) e1kStoreRxFragment(PE1KSTATE pThis, E1KRXDESC *pDesc, const void *pvBuf, size_t cb)
2185{
2186 STAM_PROFILE_ADV_START(&pThis->StatReceiveStore, a);
2187 E1kLog2(("%s e1kStoreRxFragment: store fragment of %04X at %016LX, EOP=%d\n", pThis->szPrf, cb, pDesc->u64BufAddr, pDesc->status.fEOP));
2188 PDMDevHlpPCIPhysWrite(pThis->CTX_SUFF(pDevIns), pDesc->u64BufAddr, pvBuf, cb);
2189 pDesc->u16Length = (uint16_t)cb; Assert(pDesc->u16Length == cb);
2190 /* Write back the descriptor */
2191 PDMDevHlpPCIPhysWrite(pThis->CTX_SUFF(pDevIns), e1kDescAddr(RDBAH, RDBAL, RDH), pDesc, sizeof(E1KRXDESC));
2192 e1kPrintRDesc(pThis, pDesc);
2193 E1kLogRel(("E1000: Wrote back RX desc, RDH=%x\n", RDH));
2194 /* Advance head */
2195 e1kAdvanceRDH(pThis);
2196 //E1kLog2(("%s e1kStoreRxFragment: EOP=%d RDTR=%08X RADV=%08X\n", pThis->szPrf, pDesc->fEOP, RDTR, RADV));
2197 if (pDesc->status.fEOP)
2198 {
2199 /* Complete packet has been stored -- it is time to let the guest know. */
2200#ifdef E1K_USE_RX_TIMERS
2201 if (RDTR)
2202 {
2203 /* Arm the timer to fire in RDTR usec (discard .024) */
2204 e1kArmTimer(pThis, pThis->CTX_SUFF(pRIDTimer), RDTR);
2205 /* If absolute timer delay is enabled and the timer is not running yet, arm it. */
2206 if (RADV != 0 && !TMTimerIsActive(pThis->CTX_SUFF(pRADTimer)))
2207 e1kArmTimer(pThis, pThis->CTX_SUFF(pRADTimer), RADV);
2208 }
2209 else
2210 {
2211#endif
2212 /* 0 delay means immediate interrupt */
2213 E1K_INC_ISTAT_CNT(pThis->uStatIntRx);
2214 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_RXT0);
2215#ifdef E1K_USE_RX_TIMERS
2216 }
2217#endif
2218 }
2219 STAM_PROFILE_ADV_STOP(&pThis->StatReceiveStore, a);
2220}
2221
2222#endif /* !E1K_WITH_RXD_CACHE */
2223
2224/**
2225 * Returns true if it is a broadcast packet.
2226 *
2227 * @returns true if destination address indicates broadcast.
2228 * @param pvBuf The ethernet packet.
2229 */
2230DECLINLINE(bool) e1kIsBroadcast(const void *pvBuf)
2231{
2232 static const uint8_t s_abBcastAddr[] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
2233 return memcmp(pvBuf, s_abBcastAddr, sizeof(s_abBcastAddr)) == 0;
2234}
2235
2236/**
2237 * Returns true if it is a multicast packet.
2238 *
2239 * @remarks returns true for broadcast packets as well.
2240 * @returns true if destination address indicates multicast.
2241 * @param pvBuf The ethernet packet.
2242 */
2243DECLINLINE(bool) e1kIsMulticast(const void *pvBuf)
2244{
2245 return (*(char*)pvBuf) & 1;
2246}
2247
2248#ifdef IN_RING3 /* currently only used in ring-3 due to stack space requirements of the caller */
2249/**
2250 * Set IXSM, IPCS and TCPCS flags according to the packet type.
2251 *
2252 * @remarks We emulate checksum offloading for major packets types only.
2253 *
2254 * @returns VBox status code.
2255 * @param pThis The device state structure.
2256 * @param pFrame The available data.
2257 * @param cb Number of bytes available in the buffer.
2258 * @param status Bit fields containing status info.
2259 */
2260static int e1kRxChecksumOffload(PE1KSTATE pThis, const uint8_t *pFrame, size_t cb, E1KRXDST *pStatus)
2261{
2262 /** @todo
2263 * It is not safe to bypass checksum verification for packets coming
2264 * from real wire. We currently unable to tell where packets are
2265 * coming from so we tell the driver to ignore our checksum flags
2266 * and do verification in software.
2267 */
2268# if 0
2269 uint16_t uEtherType = ntohs(*(uint16_t*)(pFrame + 12));
2270
2271 E1kLog2(("%s e1kRxChecksumOffload: EtherType=%x\n", pThis->szPrf, uEtherType));
2272
2273 switch (uEtherType)
2274 {
2275 case 0x800: /* IPv4 */
2276 {
2277 pStatus->fIXSM = false;
2278 pStatus->fIPCS = true;
2279 PRTNETIPV4 pIpHdr4 = (PRTNETIPV4)(pFrame + 14);
2280 /* TCP/UDP checksum offloading works with TCP and UDP only */
2281 pStatus->fTCPCS = pIpHdr4->ip_p == 6 || pIpHdr4->ip_p == 17;
2282 break;
2283 }
2284 case 0x86DD: /* IPv6 */
2285 pStatus->fIXSM = false;
2286 pStatus->fIPCS = false;
2287 pStatus->fTCPCS = true;
2288 break;
2289 default: /* ARP, VLAN, etc. */
2290 pStatus->fIXSM = true;
2291 break;
2292 }
2293# else
2294 pStatus->fIXSM = true;
2295 RT_NOREF_PV(pThis); RT_NOREF_PV(pFrame); RT_NOREF_PV(cb);
2296# endif
2297 return VINF_SUCCESS;
2298}
2299#endif /* IN_RING3 */
2300
2301/**
2302 * Pad and store received packet.
2303 *
2304 * @remarks Make sure that the packet appears to upper layer as one coming
2305 * from real Ethernet: pad it and insert FCS.
2306 *
2307 * @returns VBox status code.
2308 * @param pThis The device state structure.
2309 * @param pvBuf The available data.
2310 * @param cb Number of bytes available in the buffer.
2311 * @param status Bit fields containing status info.
2312 */
2313static int e1kHandleRxPacket(PE1KSTATE pThis, const void *pvBuf, size_t cb, E1KRXDST status)
2314{
2315#if defined(IN_RING3) /** @todo Remove this extra copying, it's gonna make us run out of kernel / hypervisor stack! */
2316 uint8_t rxPacket[E1K_MAX_RX_PKT_SIZE];
2317 uint8_t *ptr = rxPacket;
2318
2319 int rc = e1kCsRxEnter(pThis, VERR_SEM_BUSY);
2320 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2321 return rc;
2322
2323 if (cb > 70) /* unqualified guess */
2324 pThis->led.Asserted.s.fReading = pThis->led.Actual.s.fReading = 1;
2325
2326 Assert(cb <= E1K_MAX_RX_PKT_SIZE);
2327 Assert(cb > 16);
2328 size_t cbMax = ((RCTL & RCTL_LPE) ? E1K_MAX_RX_PKT_SIZE - 4 : 1518) - (status.fVP ? 0 : 4);
2329 E1kLog3(("%s Max RX packet size is %u\n", pThis->szPrf, cbMax));
2330 if (status.fVP)
2331 {
2332 /* VLAN packet -- strip VLAN tag in VLAN mode */
2333 if ((CTRL & CTRL_VME) && cb > 16)
2334 {
2335 uint16_t *u16Ptr = (uint16_t*)pvBuf;
2336 memcpy(rxPacket, pvBuf, 12); /* Copy src and dst addresses */
2337 status.u16Special = RT_BE2H_U16(u16Ptr[7]); /* Extract VLAN tag */
2338 memcpy(rxPacket + 12, (uint8_t*)pvBuf + 16, cb - 16); /* Copy the rest of the packet */
2339 cb -= 4;
2340 E1kLog3(("%s Stripped tag for VLAN %u (cb=%u)\n",
2341 pThis->szPrf, status.u16Special, cb));
2342 }
2343 else
2344 status.fVP = false; /* Set VP only if we stripped the tag */
2345 }
2346 else
2347 memcpy(rxPacket, pvBuf, cb);
2348 /* Pad short packets */
2349 if (cb < 60)
2350 {
2351 memset(rxPacket + cb, 0, 60 - cb);
2352 cb = 60;
2353 }
2354 if (!(RCTL & RCTL_SECRC) && cb <= cbMax)
2355 {
2356 STAM_PROFILE_ADV_START(&pThis->StatReceiveCRC, a);
2357 /*
2358 * Add FCS if CRC stripping is not enabled. Since the value of CRC
2359 * is ignored by most of drivers we may as well save us the trouble
2360 * of calculating it (see EthernetCRC CFGM parameter).
2361 */
2362 if (pThis->fEthernetCRC)
2363 *(uint32_t*)(rxPacket + cb) = RTCrc32(rxPacket, cb);
2364 cb += sizeof(uint32_t);
2365 STAM_PROFILE_ADV_STOP(&pThis->StatReceiveCRC, a);
2366 E1kLog3(("%s Added FCS (cb=%u)\n", pThis->szPrf, cb));
2367 }
2368 /* Compute checksum of complete packet */
2369 uint16_t checksum = e1kCSum16(rxPacket + GET_BITS(RXCSUM, PCSS), cb);
2370 e1kRxChecksumOffload(pThis, rxPacket, cb, &status);
2371
2372 /* Update stats */
2373 E1K_INC_CNT32(GPRC);
2374 if (e1kIsBroadcast(pvBuf))
2375 E1K_INC_CNT32(BPRC);
2376 else if (e1kIsMulticast(pvBuf))
2377 E1K_INC_CNT32(MPRC);
2378 /* Update octet receive counter */
2379 E1K_ADD_CNT64(GORCL, GORCH, cb);
2380 STAM_REL_COUNTER_ADD(&pThis->StatReceiveBytes, cb);
2381 if (cb == 64)
2382 E1K_INC_CNT32(PRC64);
2383 else if (cb < 128)
2384 E1K_INC_CNT32(PRC127);
2385 else if (cb < 256)
2386 E1K_INC_CNT32(PRC255);
2387 else if (cb < 512)
2388 E1K_INC_CNT32(PRC511);
2389 else if (cb < 1024)
2390 E1K_INC_CNT32(PRC1023);
2391 else
2392 E1K_INC_CNT32(PRC1522);
2393
2394 E1K_INC_ISTAT_CNT(pThis->uStatRxFrm);
2395
2396# ifdef E1K_WITH_RXD_CACHE
2397 while (cb > 0)
2398 {
2399 E1KRXDESC *pDesc = e1kRxDGet(pThis);
2400
2401 if (pDesc == NULL)
2402 {
2403 E1kLog(("%s Out of receive buffers, dropping the packet "
2404 "(cb=%u, in_cache=%u, RDH=%x RDT=%x)\n",
2405 pThis->szPrf, cb, e1kRxDInCache(pThis), RDH, RDT));
2406 break;
2407 }
2408# else /* !E1K_WITH_RXD_CACHE */
2409 if (RDH == RDT)
2410 {
2411 E1kLog(("%s Out of receive buffers, dropping the packet\n",
2412 pThis->szPrf));
2413 }
2414 /* Store the packet to receive buffers */
2415 while (RDH != RDT)
2416 {
2417 /* Load the descriptor pointed by head */
2418 E1KRXDESC desc, *pDesc = &desc;
2419 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns), e1kDescAddr(RDBAH, RDBAL, RDH),
2420 &desc, sizeof(desc));
2421# endif /* !E1K_WITH_RXD_CACHE */
2422 if (pDesc->u64BufAddr)
2423 {
2424 /* Update descriptor */
2425 pDesc->status = status;
2426 pDesc->u16Checksum = checksum;
2427 pDesc->status.fDD = true;
2428
2429 /*
2430 * We need to leave Rx critical section here or we risk deadlocking
2431 * with EMT in e1kRegWriteRDT when the write is to an unallocated
2432 * page or has an access handler associated with it.
2433 * Note that it is safe to leave the critical section here since
2434 * e1kRegWriteRDT() never modifies RDH. It never touches already
2435 * fetched RxD cache entries either.
2436 */
2437 if (cb > pThis->u16RxBSize)
2438 {
2439 pDesc->status.fEOP = false;
2440 e1kCsRxLeave(pThis);
2441 e1kStoreRxFragment(pThis, pDesc, ptr, pThis->u16RxBSize);
2442 rc = e1kCsRxEnter(pThis, VERR_SEM_BUSY);
2443 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2444 return rc;
2445 ptr += pThis->u16RxBSize;
2446 cb -= pThis->u16RxBSize;
2447 }
2448 else
2449 {
2450 pDesc->status.fEOP = true;
2451 e1kCsRxLeave(pThis);
2452 e1kStoreRxFragment(pThis, pDesc, ptr, cb);
2453# ifdef E1K_WITH_RXD_CACHE
2454 rc = e1kCsRxEnter(pThis, VERR_SEM_BUSY);
2455 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2456 return rc;
2457 cb = 0;
2458# else /* !E1K_WITH_RXD_CACHE */
2459 pThis->led.Actual.s.fReading = 0;
2460 return VINF_SUCCESS;
2461# endif /* !E1K_WITH_RXD_CACHE */
2462 }
2463 /*
2464 * Note: RDH is advanced by e1kStoreRxFragment if E1K_WITH_RXD_CACHE
2465 * is not defined.
2466 */
2467 }
2468# ifdef E1K_WITH_RXD_CACHE
2469 /* Write back the descriptor. */
2470 pDesc->status.fDD = true;
2471 e1kRxDPut(pThis, pDesc);
2472# else /* !E1K_WITH_RXD_CACHE */
2473 else
2474 {
2475 /* Write back the descriptor. */
2476 pDesc->status.fDD = true;
2477 PDMDevHlpPCIPhysWrite(pThis->CTX_SUFF(pDevIns),
2478 e1kDescAddr(RDBAH, RDBAL, RDH),
2479 pDesc, sizeof(E1KRXDESC));
2480 e1kAdvanceRDH(pThis);
2481 }
2482# endif /* !E1K_WITH_RXD_CACHE */
2483 }
2484
2485 if (cb > 0)
2486 E1kLog(("%s Out of receive buffers, dropping %u bytes", pThis->szPrf, cb));
2487
2488 pThis->led.Actual.s.fReading = 0;
2489
2490 e1kCsRxLeave(pThis);
2491# ifdef E1K_WITH_RXD_CACHE
2492 /* Complete packet has been stored -- it is time to let the guest know. */
2493# ifdef E1K_USE_RX_TIMERS
2494 if (RDTR)
2495 {
2496 /* Arm the timer to fire in RDTR usec (discard .024) */
2497 e1kArmTimer(pThis, pThis->CTX_SUFF(pRIDTimer), RDTR);
2498 /* If absolute timer delay is enabled and the timer is not running yet, arm it. */
2499 if (RADV != 0 && !TMTimerIsActive(pThis->CTX_SUFF(pRADTimer)))
2500 e1kArmTimer(pThis, pThis->CTX_SUFF(pRADTimer), RADV);
2501 }
2502 else
2503 {
2504# endif /* E1K_USE_RX_TIMERS */
2505 /* 0 delay means immediate interrupt */
2506 E1K_INC_ISTAT_CNT(pThis->uStatIntRx);
2507 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_RXT0);
2508# ifdef E1K_USE_RX_TIMERS
2509 }
2510# endif /* E1K_USE_RX_TIMERS */
2511# endif /* E1K_WITH_RXD_CACHE */
2512
2513 return VINF_SUCCESS;
2514#else /* !IN_RING3 */
2515 RT_NOREF_PV(pThis); RT_NOREF_PV(pvBuf); RT_NOREF_PV(cb); RT_NOREF_PV(status);
2516 return VERR_INTERNAL_ERROR_2;
2517#endif /* !IN_RING3 */
2518}
2519
2520
2521/**
2522 * Bring the link up after the configured delay, 5 seconds by default.
2523 *
2524 * @param pThis The device state structure.
2525 * @thread any
2526 */
2527DECLINLINE(void) e1kBringLinkUpDelayed(PE1KSTATE pThis)
2528{
2529 E1kLog(("%s Will bring up the link in %d seconds...\n",
2530 pThis->szPrf, pThis->cMsLinkUpDelay / 1000));
2531 e1kArmTimer(pThis, pThis->CTX_SUFF(pLUTimer), pThis->cMsLinkUpDelay * 1000);
2532}
2533
2534#ifdef IN_RING3
2535/**
2536 * Bring up the link immediately.
2537 *
2538 * @param pThis The device state structure.
2539 */
2540DECLINLINE(void) e1kR3LinkUp(PE1KSTATE pThis)
2541{
2542 E1kLog(("%s Link is up\n", pThis->szPrf));
2543 STATUS |= STATUS_LU;
2544 Phy::setLinkStatus(&pThis->phy, true);
2545 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_LSC);
2546 if (pThis->pDrvR3)
2547 pThis->pDrvR3->pfnNotifyLinkChanged(pThis->pDrvR3, PDMNETWORKLINKSTATE_UP);
2548}
2549
2550/**
2551 * Bring down the link immediately.
2552 *
2553 * @param pThis The device state structure.
2554 */
2555DECLINLINE(void) e1kR3LinkDown(PE1KSTATE pThis)
2556{
2557 E1kLog(("%s Link is down\n", pThis->szPrf));
2558 STATUS &= ~STATUS_LU;
2559 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_LSC);
2560 if (pThis->pDrvR3)
2561 pThis->pDrvR3->pfnNotifyLinkChanged(pThis->pDrvR3, PDMNETWORKLINKSTATE_DOWN);
2562}
2563
2564/**
2565 * Bring down the link temporarily.
2566 *
2567 * @param pThis The device state structure.
2568 */
2569DECLINLINE(void) e1kR3LinkDownTemp(PE1KSTATE pThis)
2570{
2571 E1kLog(("%s Link is down temporarily\n", pThis->szPrf));
2572 STATUS &= ~STATUS_LU;
2573 Phy::setLinkStatus(&pThis->phy, false);
2574 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_LSC);
2575 /*
2576 * Notifying the associated driver that the link went down (even temporarily)
2577 * seems to be the right thing, but it was not done before. This may cause
2578 * a regression if the driver does not expect the link to go down as a result
2579 * of sending PDMNETWORKLINKSTATE_DOWN_RESUME to this device. Earlier versions
2580 * of code notified the driver that the link was up! See @bugref{7057}.
2581 */
2582 if (pThis->pDrvR3)
2583 pThis->pDrvR3->pfnNotifyLinkChanged(pThis->pDrvR3, PDMNETWORKLINKSTATE_DOWN);
2584 e1kBringLinkUpDelayed(pThis);
2585}
2586#endif /* IN_RING3 */
2587
2588#if 0 /* unused */
2589/**
2590 * Read handler for Device Status register.
2591 *
2592 * Get the link status from PHY.
2593 *
2594 * @returns VBox status code.
2595 *
2596 * @param pThis The device state structure.
2597 * @param offset Register offset in memory-mapped frame.
2598 * @param index Register index in register array.
2599 * @param mask Used to implement partial reads (8 and 16-bit).
2600 */
2601static int e1kRegReadCTRL(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
2602{
2603 E1kLog(("%s e1kRegReadCTRL: mdio dir=%s mdc dir=%s mdc=%d\n",
2604 pThis->szPrf, (CTRL & CTRL_MDIO_DIR)?"OUT":"IN ",
2605 (CTRL & CTRL_MDC_DIR)?"OUT":"IN ", !!(CTRL & CTRL_MDC)));
2606 if ((CTRL & CTRL_MDIO_DIR) == 0 && (CTRL & CTRL_MDC))
2607 {
2608 /* MDC is high and MDIO pin is used for input, read MDIO pin from PHY */
2609 if (Phy::readMDIO(&pThis->phy))
2610 *pu32Value = CTRL | CTRL_MDIO;
2611 else
2612 *pu32Value = CTRL & ~CTRL_MDIO;
2613 E1kLog(("%s e1kRegReadCTRL: Phy::readMDIO(%d)\n",
2614 pThis->szPrf, !!(*pu32Value & CTRL_MDIO)));
2615 }
2616 else
2617 {
2618 /* MDIO pin is used for output, ignore it */
2619 *pu32Value = CTRL;
2620 }
2621 return VINF_SUCCESS;
2622}
2623#endif /* unused */
2624
2625/**
2626 * Write handler for Device Control register.
2627 *
2628 * Handles reset.
2629 *
2630 * @param pThis The device state structure.
2631 * @param offset Register offset in memory-mapped frame.
2632 * @param index Register index in register array.
2633 * @param value The value to store.
2634 * @param mask Used to implement partial writes (8 and 16-bit).
2635 * @thread EMT
2636 */
2637static int e1kRegWriteCTRL(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
2638{
2639 int rc = VINF_SUCCESS;
2640
2641 if (value & CTRL_RESET)
2642 { /* RST */
2643#ifndef IN_RING3
2644 return VINF_IOM_R3_MMIO_WRITE;
2645#else
2646 e1kHardReset(pThis);
2647#endif
2648 }
2649 else
2650 {
2651 if ( (value & CTRL_SLU)
2652 && pThis->fCableConnected
2653 && !(STATUS & STATUS_LU))
2654 {
2655 /* The driver indicates that we should bring up the link */
2656 /* Do so in 5 seconds (by default). */
2657 e1kBringLinkUpDelayed(pThis);
2658 /*
2659 * Change the status (but not PHY status) anyway as Windows expects
2660 * it for 82543GC.
2661 */
2662 STATUS |= STATUS_LU;
2663 }
2664 if (value & CTRL_VME)
2665 {
2666 E1kLog(("%s VLAN Mode Enabled\n", pThis->szPrf));
2667 }
2668 E1kLog(("%s e1kRegWriteCTRL: mdio dir=%s mdc dir=%s mdc=%s mdio=%d\n",
2669 pThis->szPrf, (value & CTRL_MDIO_DIR)?"OUT":"IN ",
2670 (value & CTRL_MDC_DIR)?"OUT":"IN ", (value & CTRL_MDC)?"HIGH":"LOW ", !!(value & CTRL_MDIO)));
2671 if (value & CTRL_MDC)
2672 {
2673 if (value & CTRL_MDIO_DIR)
2674 {
2675 E1kLog(("%s e1kRegWriteCTRL: Phy::writeMDIO(%d)\n", pThis->szPrf, !!(value & CTRL_MDIO)));
2676 /* MDIO direction pin is set to output and MDC is high, write MDIO pin value to PHY */
2677 Phy::writeMDIO(&pThis->phy, !!(value & CTRL_MDIO));
2678 }
2679 else
2680 {
2681 if (Phy::readMDIO(&pThis->phy))
2682 value |= CTRL_MDIO;
2683 else
2684 value &= ~CTRL_MDIO;
2685 E1kLog(("%s e1kRegWriteCTRL: Phy::readMDIO(%d)\n",
2686 pThis->szPrf, !!(value & CTRL_MDIO)));
2687 }
2688 }
2689 rc = e1kRegWriteDefault(pThis, offset, index, value);
2690 }
2691
2692 return rc;
2693}
2694
2695/**
2696 * Write handler for EEPROM/Flash Control/Data register.
2697 *
2698 * Handles EEPROM access requests; forwards writes to EEPROM device if access has been granted.
2699 *
2700 * @param pThis The device state structure.
2701 * @param offset Register offset in memory-mapped frame.
2702 * @param index Register index in register array.
2703 * @param value The value to store.
2704 * @param mask Used to implement partial writes (8 and 16-bit).
2705 * @thread EMT
2706 */
2707static int e1kRegWriteEECD(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
2708{
2709 RT_NOREF(offset, index);
2710#ifdef IN_RING3
2711 /* So far we are concerned with lower byte only */
2712 if ((EECD & EECD_EE_GNT) || pThis->eChip == E1K_CHIP_82543GC)
2713 {
2714 /* Access to EEPROM granted -- forward 4-wire bits to EEPROM device */
2715 /* Note: 82543GC does not need to request EEPROM access */
2716 STAM_PROFILE_ADV_START(&pThis->StatEEPROMWrite, a);
2717 pThis->eeprom.write(value & EECD_EE_WIRES);
2718 STAM_PROFILE_ADV_STOP(&pThis->StatEEPROMWrite, a);
2719 }
2720 if (value & EECD_EE_REQ)
2721 EECD |= EECD_EE_REQ|EECD_EE_GNT;
2722 else
2723 EECD &= ~EECD_EE_GNT;
2724 //e1kRegWriteDefault(pThis, offset, index, value );
2725
2726 return VINF_SUCCESS;
2727#else /* !IN_RING3 */
2728 RT_NOREF(pThis, value);
2729 return VINF_IOM_R3_MMIO_WRITE;
2730#endif /* !IN_RING3 */
2731}
2732
2733/**
2734 * Read handler for EEPROM/Flash Control/Data register.
2735 *
2736 * Lower 4 bits come from EEPROM device if EEPROM access has been granted.
2737 *
2738 * @returns VBox status code.
2739 *
2740 * @param pThis The device state structure.
2741 * @param offset Register offset in memory-mapped frame.
2742 * @param index Register index in register array.
2743 * @param mask Used to implement partial reads (8 and 16-bit).
2744 * @thread EMT
2745 */
2746static int e1kRegReadEECD(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
2747{
2748#ifdef IN_RING3
2749 uint32_t value;
2750 int rc = e1kRegReadDefault(pThis, offset, index, &value);
2751 if (RT_SUCCESS(rc))
2752 {
2753 if ((value & EECD_EE_GNT) || pThis->eChip == E1K_CHIP_82543GC)
2754 {
2755 /* Note: 82543GC does not need to request EEPROM access */
2756 /* Access to EEPROM granted -- get 4-wire bits to EEPROM device */
2757 STAM_PROFILE_ADV_START(&pThis->StatEEPROMRead, a);
2758 value |= pThis->eeprom.read();
2759 STAM_PROFILE_ADV_STOP(&pThis->StatEEPROMRead, a);
2760 }
2761 *pu32Value = value;
2762 }
2763
2764 return rc;
2765#else /* !IN_RING3 */
2766 RT_NOREF_PV(pThis); RT_NOREF_PV(offset); RT_NOREF_PV(index); RT_NOREF_PV(pu32Value);
2767 return VINF_IOM_R3_MMIO_READ;
2768#endif /* !IN_RING3 */
2769}
2770
2771/**
2772 * Write handler for EEPROM Read register.
2773 *
2774 * Handles EEPROM word access requests, reads EEPROM and stores the result
2775 * into DATA field.
2776 *
2777 * @param pThis The device state structure.
2778 * @param offset Register offset in memory-mapped frame.
2779 * @param index Register index in register array.
2780 * @param value The value to store.
2781 * @param mask Used to implement partial writes (8 and 16-bit).
2782 * @thread EMT
2783 */
2784static int e1kRegWriteEERD(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
2785{
2786#ifdef IN_RING3
2787 /* Make use of 'writable' and 'readable' masks. */
2788 e1kRegWriteDefault(pThis, offset, index, value);
2789 /* DONE and DATA are set only if read was triggered by START. */
2790 if (value & EERD_START)
2791 {
2792 uint16_t tmp;
2793 STAM_PROFILE_ADV_START(&pThis->StatEEPROMRead, a);
2794 if (pThis->eeprom.readWord(GET_BITS_V(value, EERD, ADDR), &tmp))
2795 SET_BITS(EERD, DATA, tmp);
2796 EERD |= EERD_DONE;
2797 STAM_PROFILE_ADV_STOP(&pThis->StatEEPROMRead, a);
2798 }
2799
2800 return VINF_SUCCESS;
2801#else /* !IN_RING3 */
2802 RT_NOREF_PV(pThis); RT_NOREF_PV(offset); RT_NOREF_PV(index); RT_NOREF_PV(value);
2803 return VINF_IOM_R3_MMIO_WRITE;
2804#endif /* !IN_RING3 */
2805}
2806
2807
2808/**
2809 * Write handler for MDI Control register.
2810 *
2811 * Handles PHY read/write requests; forwards requests to internal PHY device.
2812 *
2813 * @param pThis The device state structure.
2814 * @param offset Register offset in memory-mapped frame.
2815 * @param index Register index in register array.
2816 * @param value The value to store.
2817 * @param mask Used to implement partial writes (8 and 16-bit).
2818 * @thread EMT
2819 */
2820static int e1kRegWriteMDIC(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
2821{
2822 if (value & MDIC_INT_EN)
2823 {
2824 E1kLog(("%s ERROR! Interrupt at the end of an MDI cycle is not supported yet.\n",
2825 pThis->szPrf));
2826 }
2827 else if (value & MDIC_READY)
2828 {
2829 E1kLog(("%s ERROR! Ready bit is not reset by software during write operation.\n",
2830 pThis->szPrf));
2831 }
2832 else if (GET_BITS_V(value, MDIC, PHY) != 1)
2833 {
2834 E1kLog(("%s WARNING! Access to invalid PHY detected, phy=%d.\n",
2835 pThis->szPrf, GET_BITS_V(value, MDIC, PHY)));
2836 /*
2837 * Some drivers scan the MDIO bus for a PHY. We can work with these
2838 * drivers if we set MDIC_READY and MDIC_ERROR when there isn't a PHY
2839 * at the requested address, see @bugref{7346}.
2840 */
2841 MDIC = MDIC_READY | MDIC_ERROR;
2842 }
2843 else
2844 {
2845 /* Store the value */
2846 e1kRegWriteDefault(pThis, offset, index, value);
2847 STAM_COUNTER_INC(&pThis->StatPHYAccesses);
2848 /* Forward op to PHY */
2849 if (value & MDIC_OP_READ)
2850 SET_BITS(MDIC, DATA, Phy::readRegister(&pThis->phy, GET_BITS_V(value, MDIC, REG)));
2851 else
2852 Phy::writeRegister(&pThis->phy, GET_BITS_V(value, MDIC, REG), value & MDIC_DATA_MASK);
2853 /* Let software know that we are done */
2854 MDIC |= MDIC_READY;
2855 }
2856
2857 return VINF_SUCCESS;
2858}
2859
2860/**
2861 * Write handler for Interrupt Cause Read register.
2862 *
2863 * Bits corresponding to 1s in 'value' will be cleared in ICR register.
2864 *
2865 * @param pThis The device state structure.
2866 * @param offset Register offset in memory-mapped frame.
2867 * @param index Register index in register array.
2868 * @param value The value to store.
2869 * @param mask Used to implement partial writes (8 and 16-bit).
2870 * @thread EMT
2871 */
2872static int e1kRegWriteICR(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
2873{
2874 ICR &= ~value;
2875
2876 RT_NOREF_PV(pThis); RT_NOREF_PV(offset); RT_NOREF_PV(index);
2877 return VINF_SUCCESS;
2878}
2879
2880/**
2881 * Read handler for Interrupt Cause Read register.
2882 *
2883 * Reading this register acknowledges all interrupts.
2884 *
2885 * @returns VBox status code.
2886 *
2887 * @param pThis The device state structure.
2888 * @param offset Register offset in memory-mapped frame.
2889 * @param index Register index in register array.
2890 * @param mask Not used.
2891 * @thread EMT
2892 */
2893static int e1kRegReadICR(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
2894{
2895 int rc = e1kCsEnter(pThis, VINF_IOM_R3_MMIO_READ);
2896 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2897 return rc;
2898
2899 uint32_t value = 0;
2900 rc = e1kRegReadDefault(pThis, offset, index, &value);
2901 if (RT_SUCCESS(rc))
2902 {
2903 /* Do not return masked bits. */
2904 value &= IMS;
2905 if (value)
2906 {
2907 /*
2908 * Not clearing ICR causes QNX to hang as it reads ICR in a loop
2909 * with disabled interrupts.
2910 */
2911 //if (IMS)
2912 if (1)
2913 {
2914 /*
2915 * Interrupts were enabled -- we are supposedly at the very
2916 * beginning of interrupt handler
2917 */
2918 E1kLogRel(("E1000: irq lowered, icr=0x%x\n", ICR));
2919 E1kLog(("%s e1kRegReadICR: Lowered IRQ (%08x)\n", pThis->szPrf, ICR));
2920 /* Clear all pending interrupts */
2921 ICR = 0;
2922 pThis->fIntRaised = false;
2923 /* Lower(0) INTA(0) */
2924 PDMDevHlpPCISetIrq(pThis->CTX_SUFF(pDevIns), 0, 0);
2925
2926 pThis->u64AckedAt = TMTimerGet(pThis->CTX_SUFF(pIntTimer));
2927 if (pThis->fIntMaskUsed)
2928 pThis->fDelayInts = true;
2929 }
2930 else
2931 {
2932 /*
2933 * Interrupts are disabled -- in windows guests ICR read is done
2934 * just before re-enabling interrupts
2935 */
2936 E1kLog(("%s e1kRegReadICR: Suppressing auto-clear due to disabled interrupts (%08x)\n", pThis->szPrf, ICR));
2937 }
2938 }
2939 *pu32Value = value;
2940 }
2941 e1kCsLeave(pThis);
2942
2943 return rc;
2944}
2945
2946/**
2947 * Write handler for Interrupt Cause Set register.
2948 *
2949 * Bits corresponding to 1s in 'value' will be set in ICR register.
2950 *
2951 * @param pThis The device state structure.
2952 * @param offset Register offset in memory-mapped frame.
2953 * @param index Register index in register array.
2954 * @param value The value to store.
2955 * @param mask Used to implement partial writes (8 and 16-bit).
2956 * @thread EMT
2957 */
2958static int e1kRegWriteICS(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
2959{
2960 RT_NOREF_PV(offset); RT_NOREF_PV(index);
2961 E1K_INC_ISTAT_CNT(pThis->uStatIntICS);
2962 return e1kRaiseInterrupt(pThis, VINF_IOM_R3_MMIO_WRITE, value & g_aE1kRegMap[ICS_IDX].writable);
2963}
2964
2965/**
2966 * Write handler for Interrupt Mask Set register.
2967 *
2968 * Will trigger pending interrupts.
2969 *
2970 * @param pThis The device state structure.
2971 * @param offset Register offset in memory-mapped frame.
2972 * @param index Register index in register array.
2973 * @param value The value to store.
2974 * @param mask Used to implement partial writes (8 and 16-bit).
2975 * @thread EMT
2976 */
2977static int e1kRegWriteIMS(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
2978{
2979 RT_NOREF_PV(offset); RT_NOREF_PV(index);
2980
2981 IMS |= value;
2982 E1kLogRel(("E1000: irq enabled, RDH=%x RDT=%x TDH=%x TDT=%x\n", RDH, RDT, TDH, TDT));
2983 E1kLog(("%s e1kRegWriteIMS: IRQ enabled\n", pThis->szPrf));
2984 e1kRaiseInterrupt(pThis, VINF_IOM_R3_MMIO_WRITE, 0);
2985
2986 return VINF_SUCCESS;
2987}
2988
2989/**
2990 * Write handler for Interrupt Mask Clear register.
2991 *
2992 * Bits corresponding to 1s in 'value' will be cleared in IMS register.
2993 *
2994 * @param pThis The device state structure.
2995 * @param offset Register offset in memory-mapped frame.
2996 * @param index Register index in register array.
2997 * @param value The value to store.
2998 * @param mask Used to implement partial writes (8 and 16-bit).
2999 * @thread EMT
3000 */
3001static int e1kRegWriteIMC(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3002{
3003 RT_NOREF_PV(offset); RT_NOREF_PV(index);
3004
3005 int rc = e1kCsEnter(pThis, VINF_IOM_R3_MMIO_WRITE);
3006 if (RT_UNLIKELY(rc != VINF_SUCCESS))
3007 return rc;
3008 if (pThis->fIntRaised)
3009 {
3010 /*
3011 * Technically we should reset fIntRaised in ICR read handler, but it will cause
3012 * Windows to freeze since it may receive an interrupt while still in the very beginning
3013 * of interrupt handler.
3014 */
3015 E1K_INC_ISTAT_CNT(pThis->uStatIntLower);
3016 STAM_COUNTER_INC(&pThis->StatIntsPrevented);
3017 E1kLogRel(("E1000: irq lowered (IMC), icr=0x%x\n", ICR));
3018 /* Lower(0) INTA(0) */
3019 PDMDevHlpPCISetIrq(pThis->CTX_SUFF(pDevIns), 0, 0);
3020 pThis->fIntRaised = false;
3021 E1kLog(("%s e1kRegWriteIMC: Lowered IRQ: ICR=%08x\n", pThis->szPrf, ICR));
3022 }
3023 IMS &= ~value;
3024 E1kLog(("%s e1kRegWriteIMC: IRQ disabled\n", pThis->szPrf));
3025 e1kCsLeave(pThis);
3026
3027 return VINF_SUCCESS;
3028}
3029
3030/**
3031 * Write handler for Receive Control register.
3032 *
3033 * @param pThis The device state structure.
3034 * @param offset Register offset in memory-mapped frame.
3035 * @param index Register index in register array.
3036 * @param value The value to store.
3037 * @param mask Used to implement partial writes (8 and 16-bit).
3038 * @thread EMT
3039 */
3040static int e1kRegWriteRCTL(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3041{
3042 /* Update promiscuous mode */
3043 bool fBecomePromiscous = !!(value & (RCTL_UPE | RCTL_MPE));
3044 if (fBecomePromiscous != !!( RCTL & (RCTL_UPE | RCTL_MPE)))
3045 {
3046 /* Promiscuity has changed, pass the knowledge on. */
3047#ifndef IN_RING3
3048 return VINF_IOM_R3_MMIO_WRITE;
3049#else
3050 if (pThis->pDrvR3)
3051 pThis->pDrvR3->pfnSetPromiscuousMode(pThis->pDrvR3, fBecomePromiscous);
3052#endif
3053 }
3054
3055 /* Adjust receive buffer size */
3056 unsigned cbRxBuf = 2048 >> GET_BITS_V(value, RCTL, BSIZE);
3057 if (value & RCTL_BSEX)
3058 cbRxBuf *= 16;
3059 if (cbRxBuf != pThis->u16RxBSize)
3060 E1kLog2(("%s e1kRegWriteRCTL: Setting receive buffer size to %d (old %d)\n",
3061 pThis->szPrf, cbRxBuf, pThis->u16RxBSize));
3062 pThis->u16RxBSize = cbRxBuf;
3063
3064 /* Update the register */
3065 e1kRegWriteDefault(pThis, offset, index, value);
3066
3067 return VINF_SUCCESS;
3068}
3069
3070/**
3071 * Write handler for Packet Buffer Allocation register.
3072 *
3073 * TXA = 64 - RXA.
3074 *
3075 * @param pThis The device state structure.
3076 * @param offset Register offset in memory-mapped frame.
3077 * @param index Register index in register array.
3078 * @param value The value to store.
3079 * @param mask Used to implement partial writes (8 and 16-bit).
3080 * @thread EMT
3081 */
3082static int e1kRegWritePBA(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3083{
3084 e1kRegWriteDefault(pThis, offset, index, value);
3085 PBA_st->txa = 64 - PBA_st->rxa;
3086
3087 return VINF_SUCCESS;
3088}
3089
3090/**
3091 * Write handler for Receive Descriptor Tail register.
3092 *
3093 * @remarks Write into RDT forces switch to HC and signal to
3094 * e1kR3NetworkDown_WaitReceiveAvail().
3095 *
3096 * @returns VBox status code.
3097 *
3098 * @param pThis The device state structure.
3099 * @param offset Register offset in memory-mapped frame.
3100 * @param index Register index in register array.
3101 * @param value The value to store.
3102 * @param mask Used to implement partial writes (8 and 16-bit).
3103 * @thread EMT
3104 */
3105static int e1kRegWriteRDT(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3106{
3107#ifndef IN_RING3
3108 /* XXX */
3109// return VINF_IOM_R3_MMIO_WRITE;
3110#endif
3111 int rc = e1kCsRxEnter(pThis, VINF_IOM_R3_MMIO_WRITE);
3112 if (RT_LIKELY(rc == VINF_SUCCESS))
3113 {
3114 E1kLog(("%s e1kRegWriteRDT\n", pThis->szPrf));
3115 /*
3116 * Some drivers advance RDT too far, so that it equals RDH. This
3117 * somehow manages to work with real hardware but not with this
3118 * emulated device. We can work with these drivers if we just
3119 * write 1 less when we see a driver writing RDT equal to RDH,
3120 * see @bugref{7346}.
3121 */
3122 if (value == RDH)
3123 {
3124 if (RDH == 0)
3125 value = (RDLEN / sizeof(E1KRXDESC)) - 1;
3126 else
3127 value = RDH - 1;
3128 }
3129 rc = e1kRegWriteDefault(pThis, offset, index, value);
3130#ifdef E1K_WITH_RXD_CACHE
3131 /*
3132 * We need to fetch descriptors now as RDT may go whole circle
3133 * before we attempt to store a received packet. For example,
3134 * Intel's DOS drivers use 2 (!) RX descriptors with the total ring
3135 * size being only 8 descriptors! Note that we fetch descriptors
3136 * only when the cache is empty to reduce the number of memory reads
3137 * in case of frequent RDT writes. Don't fetch anything when the
3138 * receiver is disabled either as RDH, RDT, RDLEN can be in some
3139 * messed up state.
3140 * Note that despite the cache may seem empty, meaning that there are
3141 * no more available descriptors in it, it may still be used by RX
3142 * thread which has not yet written the last descriptor back but has
3143 * temporarily released the RX lock in order to write the packet body
3144 * to descriptor's buffer. At this point we still going to do prefetch
3145 * but it won't actually fetch anything if there are no unused slots in
3146 * our "empty" cache (nRxDFetched==E1K_RXD_CACHE_SIZE). We must not
3147 * reset the cache here even if it appears empty. It will be reset at
3148 * a later point in e1kRxDGet().
3149 */
3150 if (e1kRxDIsCacheEmpty(pThis) && (RCTL & RCTL_EN))
3151 e1kRxDPrefetch(pThis);
3152#endif /* E1K_WITH_RXD_CACHE */
3153 e1kCsRxLeave(pThis);
3154 if (RT_SUCCESS(rc))
3155 {
3156/** @todo bird: Use SUPSem* for this so we can signal it in ring-0 as well
3157 * without requiring any context switches. We should also check the
3158 * wait condition before bothering to queue the item as we're currently
3159 * queuing thousands of items per second here in a normal transmit
3160 * scenario. Expect performance changes when fixing this! */
3161#ifdef IN_RING3
3162 /* Signal that we have more receive descriptors available. */
3163 e1kWakeupReceive(pThis->CTX_SUFF(pDevIns));
3164#else
3165 PPDMQUEUEITEMCORE pItem = PDMQueueAlloc(pThis->CTX_SUFF(pCanRxQueue));
3166 if (pItem)
3167 PDMQueueInsert(pThis->CTX_SUFF(pCanRxQueue), pItem);
3168#endif
3169 }
3170 }
3171 return rc;
3172}
3173
3174/**
3175 * Write handler for Receive Delay Timer register.
3176 *
3177 * @param pThis The device state structure.
3178 * @param offset Register offset in memory-mapped frame.
3179 * @param index Register index in register array.
3180 * @param value The value to store.
3181 * @param mask Used to implement partial writes (8 and 16-bit).
3182 * @thread EMT
3183 */
3184static int e1kRegWriteRDTR(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3185{
3186 e1kRegWriteDefault(pThis, offset, index, value);
3187 if (value & RDTR_FPD)
3188 {
3189 /* Flush requested, cancel both timers and raise interrupt */
3190#ifdef E1K_USE_RX_TIMERS
3191 e1kCancelTimer(pThis, pThis->CTX_SUFF(pRIDTimer));
3192 e1kCancelTimer(pThis, pThis->CTX_SUFF(pRADTimer));
3193#endif
3194 E1K_INC_ISTAT_CNT(pThis->uStatIntRDTR);
3195 return e1kRaiseInterrupt(pThis, VINF_IOM_R3_MMIO_WRITE, ICR_RXT0);
3196 }
3197
3198 return VINF_SUCCESS;
3199}
3200
3201DECLINLINE(uint32_t) e1kGetTxLen(PE1KSTATE pThis)
3202{
3203 /**
3204 * Make sure TDT won't change during computation. EMT may modify TDT at
3205 * any moment.
3206 */
3207 uint32_t tdt = TDT;
3208 return (TDH>tdt ? TDLEN/sizeof(E1KTXDESC) : 0) + tdt - TDH;
3209}
3210
3211#ifdef IN_RING3
3212
3213# ifdef E1K_TX_DELAY
3214/**
3215 * Transmit Delay Timer handler.
3216 *
3217 * @remarks We only get here when the timer expires.
3218 *
3219 * @param pDevIns Pointer to device instance structure.
3220 * @param pTimer Pointer to the timer.
3221 * @param pvUser NULL.
3222 * @thread EMT
3223 */
3224static DECLCALLBACK(void) e1kTxDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3225{
3226 PE1KSTATE pThis = (PE1KSTATE )pvUser;
3227 Assert(PDMCritSectIsOwner(&pThis->csTx));
3228
3229 E1K_INC_ISTAT_CNT(pThis->uStatTxDelayExp);
3230# ifdef E1K_INT_STATS
3231 uint64_t u64Elapsed = RTTimeNanoTS() - pThis->u64ArmedAt;
3232 if (u64Elapsed > pThis->uStatMaxTxDelay)
3233 pThis->uStatMaxTxDelay = u64Elapsed;
3234# endif
3235 int rc = e1kXmitPending(pThis, false /*fOnWorkerThread*/);
3236 AssertMsg(RT_SUCCESS(rc) || rc == VERR_TRY_AGAIN, ("%Rrc\n", rc));
3237}
3238# endif /* E1K_TX_DELAY */
3239
3240# ifdef E1K_USE_TX_TIMERS
3241
3242/**
3243 * Transmit Interrupt Delay Timer handler.
3244 *
3245 * @remarks We only get here when the timer expires.
3246 *
3247 * @param pDevIns Pointer to device instance structure.
3248 * @param pTimer Pointer to the timer.
3249 * @param pvUser NULL.
3250 * @thread EMT
3251 */
3252static DECLCALLBACK(void) e1kTxIntDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3253{
3254 PE1KSTATE pThis = (PE1KSTATE )pvUser;
3255
3256 E1K_INC_ISTAT_CNT(pThis->uStatTID);
3257 /* Cancel absolute delay timer as we have already got attention */
3258# ifndef E1K_NO_TAD
3259 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTADTimer));
3260# endif
3261 e1kRaiseInterrupt(pThis, ICR_TXDW);
3262}
3263
3264/**
3265 * Transmit Absolute Delay Timer handler.
3266 *
3267 * @remarks We only get here when the timer expires.
3268 *
3269 * @param pDevIns Pointer to device instance structure.
3270 * @param pTimer Pointer to the timer.
3271 * @param pvUser NULL.
3272 * @thread EMT
3273 */
3274static DECLCALLBACK(void) e1kTxAbsDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3275{
3276 PE1KSTATE pThis = (PE1KSTATE )pvUser;
3277
3278 E1K_INC_ISTAT_CNT(pThis->uStatTAD);
3279 /* Cancel interrupt delay timer as we have already got attention */
3280 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTIDTimer));
3281 e1kRaiseInterrupt(pThis, ICR_TXDW);
3282}
3283
3284# endif /* E1K_USE_TX_TIMERS */
3285# ifdef E1K_USE_RX_TIMERS
3286
3287/**
3288 * Receive Interrupt Delay Timer handler.
3289 *
3290 * @remarks We only get here when the timer expires.
3291 *
3292 * @param pDevIns Pointer to device instance structure.
3293 * @param pTimer Pointer to the timer.
3294 * @param pvUser NULL.
3295 * @thread EMT
3296 */
3297static DECLCALLBACK(void) e1kRxIntDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3298{
3299 PE1KSTATE pThis = (PE1KSTATE )pvUser;
3300
3301 E1K_INC_ISTAT_CNT(pThis->uStatRID);
3302 /* Cancel absolute delay timer as we have already got attention */
3303 e1kCancelTimer(pThis, pThis->CTX_SUFF(pRADTimer));
3304 e1kRaiseInterrupt(pThis, ICR_RXT0);
3305}
3306
3307/**
3308 * Receive Absolute Delay Timer handler.
3309 *
3310 * @remarks We only get here when the timer expires.
3311 *
3312 * @param pDevIns Pointer to device instance structure.
3313 * @param pTimer Pointer to the timer.
3314 * @param pvUser NULL.
3315 * @thread EMT
3316 */
3317static DECLCALLBACK(void) e1kRxAbsDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3318{
3319 PE1KSTATE pThis = (PE1KSTATE )pvUser;
3320
3321 E1K_INC_ISTAT_CNT(pThis->uStatRAD);
3322 /* Cancel interrupt delay timer as we have already got attention */
3323 e1kCancelTimer(pThis, pThis->CTX_SUFF(pRIDTimer));
3324 e1kRaiseInterrupt(pThis, ICR_RXT0);
3325}
3326
3327# endif /* E1K_USE_RX_TIMERS */
3328
3329/**
3330 * Late Interrupt Timer handler.
3331 *
3332 * @param pDevIns Pointer to device instance structure.
3333 * @param pTimer Pointer to the timer.
3334 * @param pvUser NULL.
3335 * @thread EMT
3336 */
3337static DECLCALLBACK(void) e1kLateIntTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3338{
3339 RT_NOREF(pDevIns, pTimer);
3340 PE1KSTATE pThis = (PE1KSTATE )pvUser;
3341
3342 STAM_PROFILE_ADV_START(&pThis->StatLateIntTimer, a);
3343 STAM_COUNTER_INC(&pThis->StatLateInts);
3344 E1K_INC_ISTAT_CNT(pThis->uStatIntLate);
3345# if 0
3346 if (pThis->iStatIntLost > -100)
3347 pThis->iStatIntLost--;
3348# endif
3349 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, 0);
3350 STAM_PROFILE_ADV_STOP(&pThis->StatLateIntTimer, a);
3351}
3352
3353/**
3354 * Link Up Timer handler.
3355 *
3356 * @param pDevIns Pointer to device instance structure.
3357 * @param pTimer Pointer to the timer.
3358 * @param pvUser NULL.
3359 * @thread EMT
3360 */
3361static DECLCALLBACK(void) e1kLinkUpTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3362{
3363 RT_NOREF(pDevIns, pTimer);
3364 PE1KSTATE pThis = (PE1KSTATE )pvUser;
3365
3366 /*
3367 * This can happen if we set the link status to down when the Link up timer was
3368 * already armed (shortly after e1kLoadDone() or when the cable was disconnected
3369 * and connect+disconnect the cable very quick.
3370 */
3371 if (!pThis->fCableConnected)
3372 return;
3373
3374 e1kR3LinkUp(pThis);
3375}
3376
3377#endif /* IN_RING3 */
3378
3379/**
3380 * Sets up the GSO context according to the TSE new context descriptor.
3381 *
3382 * @param pGso The GSO context to setup.
3383 * @param pCtx The context descriptor.
3384 */
3385DECLINLINE(void) e1kSetupGsoCtx(PPDMNETWORKGSO pGso, E1KTXCTX const *pCtx)
3386{
3387 pGso->u8Type = PDMNETWORKGSOTYPE_INVALID;
3388
3389 /*
3390 * See if the context descriptor describes something that could be TCP or
3391 * UDP over IPv[46].
3392 */
3393 /* Check the header ordering and spacing: 1. Ethernet, 2. IP, 3. TCP/UDP. */
3394 if (RT_UNLIKELY( pCtx->ip.u8CSS < sizeof(RTNETETHERHDR) ))
3395 {
3396 E1kLog(("e1kSetupGsoCtx: IPCSS=%#x\n", pCtx->ip.u8CSS));
3397 return;
3398 }
3399 if (RT_UNLIKELY( pCtx->tu.u8CSS < (size_t)pCtx->ip.u8CSS + (pCtx->dw2.fIP ? RTNETIPV4_MIN_LEN : RTNETIPV6_MIN_LEN) ))
3400 {
3401 E1kLog(("e1kSetupGsoCtx: TUCSS=%#x\n", pCtx->tu.u8CSS));
3402 return;
3403 }
3404 if (RT_UNLIKELY( pCtx->dw2.fTCP
3405 ? pCtx->dw3.u8HDRLEN < (size_t)pCtx->tu.u8CSS + RTNETTCP_MIN_LEN
3406 : pCtx->dw3.u8HDRLEN != (size_t)pCtx->tu.u8CSS + RTNETUDP_MIN_LEN ))
3407 {
3408 E1kLog(("e1kSetupGsoCtx: HDRLEN=%#x TCP=%d\n", pCtx->dw3.u8HDRLEN, pCtx->dw2.fTCP));
3409 return;
3410 }
3411
3412 /* The end of the TCP/UDP checksum should stop at the end of the packet or at least after the headers. */
3413 if (RT_UNLIKELY( pCtx->tu.u16CSE > 0 && pCtx->tu.u16CSE <= pCtx->dw3.u8HDRLEN ))
3414 {
3415 E1kLog(("e1kSetupGsoCtx: TUCSE=%#x HDRLEN=%#x\n", pCtx->tu.u16CSE, pCtx->dw3.u8HDRLEN));
3416 return;
3417 }
3418
3419 /* IPv4 checksum offset. */
3420 if (RT_UNLIKELY( pCtx->dw2.fIP && (size_t)pCtx->ip.u8CSO - pCtx->ip.u8CSS != RT_UOFFSETOF(RTNETIPV4, ip_sum) ))
3421 {
3422 E1kLog(("e1kSetupGsoCtx: IPCSO=%#x IPCSS=%#x\n", pCtx->ip.u8CSO, pCtx->ip.u8CSS));
3423 return;
3424 }
3425
3426 /* TCP/UDP checksum offsets. */
3427 if (RT_UNLIKELY( (size_t)pCtx->tu.u8CSO - pCtx->tu.u8CSS
3428 != ( pCtx->dw2.fTCP
3429 ? RT_UOFFSETOF(RTNETTCP, th_sum)
3430 : RT_UOFFSETOF(RTNETUDP, uh_sum) ) ))
3431 {
3432 E1kLog(("e1kSetupGsoCtx: TUCSO=%#x TUCSS=%#x TCP=%d\n", pCtx->ip.u8CSO, pCtx->ip.u8CSS, pCtx->dw2.fTCP));
3433 return;
3434 }
3435
3436 /*
3437 * Because of internal networking using a 16-bit size field for GSO context
3438 * plus frame, we have to make sure we don't exceed this.
3439 */
3440 if (RT_UNLIKELY( pCtx->dw3.u8HDRLEN + pCtx->dw2.u20PAYLEN > VBOX_MAX_GSO_SIZE ))
3441 {
3442 E1kLog(("e1kSetupGsoCtx: HDRLEN(=%#x) + PAYLEN(=%#x) = %#x, max is %#x\n",
3443 pCtx->dw3.u8HDRLEN, pCtx->dw2.u20PAYLEN, pCtx->dw3.u8HDRLEN + pCtx->dw2.u20PAYLEN, VBOX_MAX_GSO_SIZE));
3444 return;
3445 }
3446
3447 /*
3448 * We're good for now - we'll do more checks when seeing the data.
3449 * So, figure the type of offloading and setup the context.
3450 */
3451 if (pCtx->dw2.fIP)
3452 {
3453 if (pCtx->dw2.fTCP)
3454 {
3455 pGso->u8Type = PDMNETWORKGSOTYPE_IPV4_TCP;
3456 pGso->cbHdrsSeg = pCtx->dw3.u8HDRLEN;
3457 }
3458 else
3459 {
3460 pGso->u8Type = PDMNETWORKGSOTYPE_IPV4_UDP;
3461 pGso->cbHdrsSeg = pCtx->tu.u8CSS; /* IP header only */
3462 }
3463 /** @todo Detect IPv4-IPv6 tunneling (need test setup since linux doesn't do
3464 * this yet it seems)... */
3465 }
3466 else
3467 {
3468 pGso->cbHdrsSeg = pCtx->dw3.u8HDRLEN; /** @todo IPv6 UFO */
3469 if (pCtx->dw2.fTCP)
3470 pGso->u8Type = PDMNETWORKGSOTYPE_IPV6_TCP;
3471 else
3472 pGso->u8Type = PDMNETWORKGSOTYPE_IPV6_UDP;
3473 }
3474 pGso->offHdr1 = pCtx->ip.u8CSS;
3475 pGso->offHdr2 = pCtx->tu.u8CSS;
3476 pGso->cbHdrsTotal = pCtx->dw3.u8HDRLEN;
3477 pGso->cbMaxSeg = pCtx->dw3.u16MSS;
3478 Assert(PDMNetGsoIsValid(pGso, sizeof(*pGso), pGso->cbMaxSeg * 5));
3479 E1kLog2(("e1kSetupGsoCtx: mss=%#x hdr=%#x hdrseg=%#x hdr1=%#x hdr2=%#x %s\n",
3480 pGso->cbMaxSeg, pGso->cbHdrsTotal, pGso->cbHdrsSeg, pGso->offHdr1, pGso->offHdr2, PDMNetGsoTypeName((PDMNETWORKGSOTYPE)pGso->u8Type) ));
3481}
3482
3483/**
3484 * Checks if we can use GSO processing for the current TSE frame.
3485 *
3486 * @param pThis The device state structure.
3487 * @param pGso The GSO context.
3488 * @param pData The first data descriptor of the frame.
3489 * @param pCtx The TSO context descriptor.
3490 */
3491DECLINLINE(bool) e1kCanDoGso(PE1KSTATE pThis, PCPDMNETWORKGSO pGso, E1KTXDAT const *pData, E1KTXCTX const *pCtx)
3492{
3493 if (!pData->cmd.fTSE)
3494 {
3495 E1kLog2(("e1kCanDoGso: !TSE\n"));
3496 return false;
3497 }
3498 if (pData->cmd.fVLE) /** @todo VLAN tagging. */
3499 {
3500 E1kLog(("e1kCanDoGso: VLE\n"));
3501 return false;
3502 }
3503 if (RT_UNLIKELY(!pThis->fGSOEnabled))
3504 {
3505 E1kLog3(("e1kCanDoGso: GSO disabled via CFGM\n"));
3506 return false;
3507 }
3508
3509 switch ((PDMNETWORKGSOTYPE)pGso->u8Type)
3510 {
3511 case PDMNETWORKGSOTYPE_IPV4_TCP:
3512 case PDMNETWORKGSOTYPE_IPV4_UDP:
3513 if (!pData->dw3.fIXSM)
3514 {
3515 E1kLog(("e1kCanDoGso: !IXSM (IPv4)\n"));
3516 return false;
3517 }
3518 if (!pData->dw3.fTXSM)
3519 {
3520 E1kLog(("e1kCanDoGso: !TXSM (IPv4)\n"));
3521 return false;
3522 }
3523 /** @todo what more check should we perform here? Ethernet frame type? */
3524 E1kLog2(("e1kCanDoGso: OK, IPv4\n"));
3525 return true;
3526
3527 case PDMNETWORKGSOTYPE_IPV6_TCP:
3528 case PDMNETWORKGSOTYPE_IPV6_UDP:
3529 if (pData->dw3.fIXSM && pCtx->ip.u8CSO)
3530 {
3531 E1kLog(("e1kCanDoGso: IXSM (IPv6)\n"));
3532 return false;
3533 }
3534 if (!pData->dw3.fTXSM)
3535 {
3536 E1kLog(("e1kCanDoGso: TXSM (IPv6)\n"));
3537 return false;
3538 }
3539 /** @todo what more check should we perform here? Ethernet frame type? */
3540 E1kLog2(("e1kCanDoGso: OK, IPv4\n"));
3541 return true;
3542
3543 default:
3544 Assert(pGso->u8Type == PDMNETWORKGSOTYPE_INVALID);
3545 E1kLog2(("e1kCanDoGso: e1kSetupGsoCtx failed\n"));
3546 return false;
3547 }
3548}
3549
3550/**
3551 * Frees the current xmit buffer.
3552 *
3553 * @param pThis The device state structure.
3554 */
3555static void e1kXmitFreeBuf(PE1KSTATE pThis)
3556{
3557 PPDMSCATTERGATHER pSg = pThis->CTX_SUFF(pTxSg);
3558 if (pSg)
3559 {
3560 pThis->CTX_SUFF(pTxSg) = NULL;
3561
3562 if (pSg->pvAllocator != pThis)
3563 {
3564 PPDMINETWORKUP pDrv = pThis->CTX_SUFF(pDrv);
3565 if (pDrv)
3566 pDrv->pfnFreeBuf(pDrv, pSg);
3567 }
3568 else
3569 {
3570 /* loopback */
3571 AssertCompileMemberSize(E1KSTATE, uTxFallback.Sg, 8 * sizeof(size_t));
3572 Assert(pSg->fFlags == (PDMSCATTERGATHER_FLAGS_MAGIC | PDMSCATTERGATHER_FLAGS_OWNER_3));
3573 pSg->fFlags = 0;
3574 pSg->pvAllocator = NULL;
3575 }
3576 }
3577}
3578
3579#ifndef E1K_WITH_TXD_CACHE
3580/**
3581 * Allocates an xmit buffer.
3582 *
3583 * @returns See PDMINETWORKUP::pfnAllocBuf.
3584 * @param pThis The device state structure.
3585 * @param cbMin The minimum frame size.
3586 * @param fExactSize Whether cbMin is exact or if we have to max it
3587 * out to the max MTU size.
3588 * @param fGso Whether this is a GSO frame or not.
3589 */
3590DECLINLINE(int) e1kXmitAllocBuf(PE1KSTATE pThis, size_t cbMin, bool fExactSize, bool fGso)
3591{
3592 /* Adjust cbMin if necessary. */
3593 if (!fExactSize)
3594 cbMin = RT_MAX(cbMin, E1K_MAX_TX_PKT_SIZE);
3595
3596 /* Deal with existing buffer (descriptor screw up, reset, etc). */
3597 if (RT_UNLIKELY(pThis->CTX_SUFF(pTxSg)))
3598 e1kXmitFreeBuf(pThis);
3599 Assert(pThis->CTX_SUFF(pTxSg) == NULL);
3600
3601 /*
3602 * Allocate the buffer.
3603 */
3604 PPDMSCATTERGATHER pSg;
3605 if (RT_LIKELY(GET_BITS(RCTL, LBM) != RCTL_LBM_TCVR))
3606 {
3607 PPDMINETWORKUP pDrv = pThis->CTX_SUFF(pDrv);
3608 if (RT_UNLIKELY(!pDrv))
3609 return VERR_NET_DOWN;
3610 int rc = pDrv->pfnAllocBuf(pDrv, cbMin, fGso ? &pThis->GsoCtx : NULL, &pSg);
3611 if (RT_FAILURE(rc))
3612 {
3613 /* Suspend TX as we are out of buffers atm */
3614 STATUS |= STATUS_TXOFF;
3615 return rc;
3616 }
3617 }
3618 else
3619 {
3620 /* Create a loopback using the fallback buffer and preallocated SG. */
3621 AssertCompileMemberSize(E1KSTATE, uTxFallback.Sg, 8 * sizeof(size_t));
3622 pSg = &pThis->uTxFallback.Sg;
3623 pSg->fFlags = PDMSCATTERGATHER_FLAGS_MAGIC | PDMSCATTERGATHER_FLAGS_OWNER_3;
3624 pSg->cbUsed = 0;
3625 pSg->cbAvailable = 0;
3626 pSg->pvAllocator = pThis;
3627 pSg->pvUser = NULL; /* No GSO here. */
3628 pSg->cSegs = 1;
3629 pSg->aSegs[0].pvSeg = pThis->aTxPacketFallback;
3630 pSg->aSegs[0].cbSeg = sizeof(pThis->aTxPacketFallback);
3631 }
3632
3633 pThis->CTX_SUFF(pTxSg) = pSg;
3634 return VINF_SUCCESS;
3635}
3636#else /* E1K_WITH_TXD_CACHE */
3637/**
3638 * Allocates an xmit buffer.
3639 *
3640 * @returns See PDMINETWORKUP::pfnAllocBuf.
3641 * @param pThis The device state structure.
3642 * @param cbMin The minimum frame size.
3643 * @param fExactSize Whether cbMin is exact or if we have to max it
3644 * out to the max MTU size.
3645 * @param fGso Whether this is a GSO frame or not.
3646 */
3647DECLINLINE(int) e1kXmitAllocBuf(PE1KSTATE pThis, bool fGso)
3648{
3649 /* Deal with existing buffer (descriptor screw up, reset, etc). */
3650 if (RT_UNLIKELY(pThis->CTX_SUFF(pTxSg)))
3651 e1kXmitFreeBuf(pThis);
3652 Assert(pThis->CTX_SUFF(pTxSg) == NULL);
3653
3654 /*
3655 * Allocate the buffer.
3656 */
3657 PPDMSCATTERGATHER pSg;
3658 if (RT_LIKELY(GET_BITS(RCTL, LBM) != RCTL_LBM_TCVR))
3659 {
3660 if (pThis->cbTxAlloc == 0)
3661 {
3662 /* Zero packet, no need for the buffer */
3663 return VINF_SUCCESS;
3664 }
3665
3666 PPDMINETWORKUP pDrv = pThis->CTX_SUFF(pDrv);
3667 if (RT_UNLIKELY(!pDrv))
3668 return VERR_NET_DOWN;
3669 int rc = pDrv->pfnAllocBuf(pDrv, pThis->cbTxAlloc, fGso ? &pThis->GsoCtx : NULL, &pSg);
3670 if (RT_FAILURE(rc))
3671 {
3672 /* Suspend TX as we are out of buffers atm */
3673 STATUS |= STATUS_TXOFF;
3674 return rc;
3675 }
3676 E1kLog3(("%s Allocated buffer for TX packet: cb=%u %s%s\n",
3677 pThis->szPrf, pThis->cbTxAlloc,
3678 pThis->fVTag ? "VLAN " : "",
3679 pThis->fGSO ? "GSO " : ""));
3680 pThis->cbTxAlloc = 0;
3681 }
3682 else
3683 {
3684 /* Create a loopback using the fallback buffer and preallocated SG. */
3685 AssertCompileMemberSize(E1KSTATE, uTxFallback.Sg, 8 * sizeof(size_t));
3686 pSg = &pThis->uTxFallback.Sg;
3687 pSg->fFlags = PDMSCATTERGATHER_FLAGS_MAGIC | PDMSCATTERGATHER_FLAGS_OWNER_3;
3688 pSg->cbUsed = 0;
3689 pSg->cbAvailable = 0;
3690 pSg->pvAllocator = pThis;
3691 pSg->pvUser = NULL; /* No GSO here. */
3692 pSg->cSegs = 1;
3693 pSg->aSegs[0].pvSeg = pThis->aTxPacketFallback;
3694 pSg->aSegs[0].cbSeg = sizeof(pThis->aTxPacketFallback);
3695 }
3696
3697 pThis->CTX_SUFF(pTxSg) = pSg;
3698 return VINF_SUCCESS;
3699}
3700#endif /* E1K_WITH_TXD_CACHE */
3701
3702/**
3703 * Checks if it's a GSO buffer or not.
3704 *
3705 * @returns true / false.
3706 * @param pTxSg The scatter / gather buffer.
3707 */
3708DECLINLINE(bool) e1kXmitIsGsoBuf(PDMSCATTERGATHER const *pTxSg)
3709{
3710#if 0
3711 if (!pTxSg)
3712 E1kLog(("e1kXmitIsGsoBuf: pTxSG is NULL\n"));
3713 if (pTxSg && pTxSg->pvUser)
3714 E1kLog(("e1kXmitIsGsoBuf: pvUser is NULL\n"));
3715#endif
3716 return pTxSg && pTxSg->pvUser /* GSO indicator */;
3717}
3718
3719#ifndef E1K_WITH_TXD_CACHE
3720/**
3721 * Load transmit descriptor from guest memory.
3722 *
3723 * @param pThis The device state structure.
3724 * @param pDesc Pointer to descriptor union.
3725 * @param addr Physical address in guest context.
3726 * @thread E1000_TX
3727 */
3728DECLINLINE(void) e1kLoadDesc(PE1KSTATE pThis, E1KTXDESC *pDesc, RTGCPHYS addr)
3729{
3730 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns), addr, pDesc, sizeof(E1KTXDESC));
3731}
3732#else /* E1K_WITH_TXD_CACHE */
3733/**
3734 * Load transmit descriptors from guest memory.
3735 *
3736 * We need two physical reads in case the tail wrapped around the end of TX
3737 * descriptor ring.
3738 *
3739 * @returns the actual number of descriptors fetched.
3740 * @param pThis The device state structure.
3741 * @param pDesc Pointer to descriptor union.
3742 * @param addr Physical address in guest context.
3743 * @thread E1000_TX
3744 */
3745DECLINLINE(unsigned) e1kTxDLoadMore(PE1KSTATE pThis)
3746{
3747 Assert(pThis->iTxDCurrent == 0);
3748 /* We've already loaded pThis->nTxDFetched descriptors past TDH. */
3749 unsigned nDescsAvailable = e1kGetTxLen(pThis) - pThis->nTxDFetched;
3750 unsigned nDescsToFetch = RT_MIN(nDescsAvailable, E1K_TXD_CACHE_SIZE - pThis->nTxDFetched);
3751 unsigned nDescsTotal = TDLEN / sizeof(E1KTXDESC);
3752 unsigned nFirstNotLoaded = (TDH + pThis->nTxDFetched) % nDescsTotal;
3753 unsigned nDescsInSingleRead = RT_MIN(nDescsToFetch, nDescsTotal - nFirstNotLoaded);
3754 E1kLog3(("%s e1kTxDLoadMore: nDescsAvailable=%u nDescsToFetch=%u "
3755 "nDescsTotal=%u nFirstNotLoaded=0x%x nDescsInSingleRead=%u\n",
3756 pThis->szPrf, nDescsAvailable, nDescsToFetch, nDescsTotal,
3757 nFirstNotLoaded, nDescsInSingleRead));
3758 if (nDescsToFetch == 0)
3759 return 0;
3760 E1KTXDESC* pFirstEmptyDesc = &pThis->aTxDescriptors[pThis->nTxDFetched];
3761 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns),
3762 ((uint64_t)TDBAH << 32) + TDBAL + nFirstNotLoaded * sizeof(E1KTXDESC),
3763 pFirstEmptyDesc, nDescsInSingleRead * sizeof(E1KTXDESC));
3764 E1kLog3(("%s Fetched %u TX descriptors at %08x%08x(0x%x), TDLEN=%08x, TDH=%08x, TDT=%08x\n",
3765 pThis->szPrf, nDescsInSingleRead,
3766 TDBAH, TDBAL + TDH * sizeof(E1KTXDESC),
3767 nFirstNotLoaded, TDLEN, TDH, TDT));
3768 if (nDescsToFetch > nDescsInSingleRead)
3769 {
3770 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns),
3771 ((uint64_t)TDBAH << 32) + TDBAL,
3772 pFirstEmptyDesc + nDescsInSingleRead,
3773 (nDescsToFetch - nDescsInSingleRead) * sizeof(E1KTXDESC));
3774 E1kLog3(("%s Fetched %u TX descriptors at %08x%08x\n",
3775 pThis->szPrf, nDescsToFetch - nDescsInSingleRead,
3776 TDBAH, TDBAL));
3777 }
3778 pThis->nTxDFetched += nDescsToFetch;
3779 return nDescsToFetch;
3780}
3781
3782/**
3783 * Load transmit descriptors from guest memory only if there are no loaded
3784 * descriptors.
3785 *
3786 * @returns true if there are descriptors in cache.
3787 * @param pThis The device state structure.
3788 * @param pDesc Pointer to descriptor union.
3789 * @param addr Physical address in guest context.
3790 * @thread E1000_TX
3791 */
3792DECLINLINE(bool) e1kTxDLazyLoad(PE1KSTATE pThis)
3793{
3794 if (pThis->nTxDFetched == 0)
3795 return e1kTxDLoadMore(pThis) != 0;
3796 return true;
3797}
3798#endif /* E1K_WITH_TXD_CACHE */
3799
3800/**
3801 * Write back transmit descriptor to guest memory.
3802 *
3803 * @param pThis The device state structure.
3804 * @param pDesc Pointer to descriptor union.
3805 * @param addr Physical address in guest context.
3806 * @thread E1000_TX
3807 */
3808DECLINLINE(void) e1kWriteBackDesc(PE1KSTATE pThis, E1KTXDESC *pDesc, RTGCPHYS addr)
3809{
3810 /* Only the last half of the descriptor has to be written back. */
3811 e1kPrintTDesc(pThis, pDesc, "^^^");
3812 PDMDevHlpPCIPhysWrite(pThis->CTX_SUFF(pDevIns), addr, pDesc, sizeof(E1KTXDESC));
3813}
3814
3815/**
3816 * Transmit complete frame.
3817 *
3818 * @remarks We skip the FCS since we're not responsible for sending anything to
3819 * a real ethernet wire.
3820 *
3821 * @param pThis The device state structure.
3822 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
3823 * @thread E1000_TX
3824 */
3825static void e1kTransmitFrame(PE1KSTATE pThis, bool fOnWorkerThread)
3826{
3827 PPDMSCATTERGATHER pSg = pThis->CTX_SUFF(pTxSg);
3828 uint32_t cbFrame = pSg ? (uint32_t)pSg->cbUsed : 0;
3829 Assert(!pSg || pSg->cSegs == 1);
3830
3831 if (cbFrame > 70) /* unqualified guess */
3832 pThis->led.Asserted.s.fWriting = pThis->led.Actual.s.fWriting = 1;
3833
3834#ifdef E1K_INT_STATS
3835 if (cbFrame <= 1514)
3836 E1K_INC_ISTAT_CNT(pThis->uStatTx1514);
3837 else if (cbFrame <= 2962)
3838 E1K_INC_ISTAT_CNT(pThis->uStatTx2962);
3839 else if (cbFrame <= 4410)
3840 E1K_INC_ISTAT_CNT(pThis->uStatTx4410);
3841 else if (cbFrame <= 5858)
3842 E1K_INC_ISTAT_CNT(pThis->uStatTx5858);
3843 else if (cbFrame <= 7306)
3844 E1K_INC_ISTAT_CNT(pThis->uStatTx7306);
3845 else if (cbFrame <= 8754)
3846 E1K_INC_ISTAT_CNT(pThis->uStatTx8754);
3847 else if (cbFrame <= 16384)
3848 E1K_INC_ISTAT_CNT(pThis->uStatTx16384);
3849 else if (cbFrame <= 32768)
3850 E1K_INC_ISTAT_CNT(pThis->uStatTx32768);
3851 else
3852 E1K_INC_ISTAT_CNT(pThis->uStatTxLarge);
3853#endif /* E1K_INT_STATS */
3854
3855 /* Add VLAN tag */
3856 if (cbFrame > 12 && pThis->fVTag)
3857 {
3858 E1kLog3(("%s Inserting VLAN tag %08x\n",
3859 pThis->szPrf, RT_BE2H_U16(VET) | (RT_BE2H_U16(pThis->u16VTagTCI) << 16)));
3860 memmove((uint8_t*)pSg->aSegs[0].pvSeg + 16, (uint8_t*)pSg->aSegs[0].pvSeg + 12, cbFrame - 12);
3861 *((uint32_t*)pSg->aSegs[0].pvSeg + 3) = RT_BE2H_U16(VET) | (RT_BE2H_U16(pThis->u16VTagTCI) << 16);
3862 pSg->cbUsed += 4;
3863 cbFrame += 4;
3864 Assert(pSg->cbUsed == cbFrame);
3865 Assert(pSg->cbUsed <= pSg->cbAvailable);
3866 }
3867/* E1kLog2(("%s < < < Outgoing packet. Dump follows: > > >\n"
3868 "%.*Rhxd\n"
3869 "%s < < < < < < < < < < < < < End of dump > > > > > > > > > > > >\n",
3870 pThis->szPrf, cbFrame, pSg->aSegs[0].pvSeg, pThis->szPrf));*/
3871
3872 /* Update the stats */
3873 E1K_INC_CNT32(TPT);
3874 E1K_ADD_CNT64(TOTL, TOTH, cbFrame);
3875 E1K_INC_CNT32(GPTC);
3876 if (pSg && e1kIsBroadcast(pSg->aSegs[0].pvSeg))
3877 E1K_INC_CNT32(BPTC);
3878 else if (pSg && e1kIsMulticast(pSg->aSegs[0].pvSeg))
3879 E1K_INC_CNT32(MPTC);
3880 /* Update octet transmit counter */
3881 E1K_ADD_CNT64(GOTCL, GOTCH, cbFrame);
3882 if (pThis->CTX_SUFF(pDrv))
3883 STAM_REL_COUNTER_ADD(&pThis->StatTransmitBytes, cbFrame);
3884 if (cbFrame == 64)
3885 E1K_INC_CNT32(PTC64);
3886 else if (cbFrame < 128)
3887 E1K_INC_CNT32(PTC127);
3888 else if (cbFrame < 256)
3889 E1K_INC_CNT32(PTC255);
3890 else if (cbFrame < 512)
3891 E1K_INC_CNT32(PTC511);
3892 else if (cbFrame < 1024)
3893 E1K_INC_CNT32(PTC1023);
3894 else
3895 E1K_INC_CNT32(PTC1522);
3896
3897 E1K_INC_ISTAT_CNT(pThis->uStatTxFrm);
3898
3899 /*
3900 * Dump and send the packet.
3901 */
3902 int rc = VERR_NET_DOWN;
3903 if (pSg && pSg->pvAllocator != pThis)
3904 {
3905 e1kPacketDump(pThis, (uint8_t const *)pSg->aSegs[0].pvSeg, cbFrame, "--> Outgoing");
3906
3907 pThis->CTX_SUFF(pTxSg) = NULL;
3908 PPDMINETWORKUP pDrv = pThis->CTX_SUFF(pDrv);
3909 if (pDrv)
3910 {
3911 /* Release critical section to avoid deadlock in CanReceive */
3912 //e1kCsLeave(pThis);
3913 STAM_PROFILE_START(&pThis->CTX_SUFF_Z(StatTransmitSend), a);
3914 rc = pDrv->pfnSendBuf(pDrv, pSg, fOnWorkerThread);
3915 STAM_PROFILE_STOP(&pThis->CTX_SUFF_Z(StatTransmitSend), a);
3916 //e1kCsEnter(pThis, RT_SRC_POS);
3917 }
3918 }
3919 else if (pSg)
3920 {
3921 Assert(pSg->aSegs[0].pvSeg == pThis->aTxPacketFallback);
3922 e1kPacketDump(pThis, (uint8_t const *)pSg->aSegs[0].pvSeg, cbFrame, "--> Loopback");
3923
3924 /** @todo do we actually need to check that we're in loopback mode here? */
3925 if (GET_BITS(RCTL, LBM) == RCTL_LBM_TCVR)
3926 {
3927 E1KRXDST status;
3928 RT_ZERO(status);
3929 status.fPIF = true;
3930 e1kHandleRxPacket(pThis, pSg->aSegs[0].pvSeg, cbFrame, status);
3931 rc = VINF_SUCCESS;
3932 }
3933 e1kXmitFreeBuf(pThis);
3934 }
3935 else
3936 rc = VERR_NET_DOWN;
3937 if (RT_FAILURE(rc))
3938 {
3939 E1kLogRel(("E1000: ERROR! pfnSend returned %Rrc\n", rc));
3940 /** @todo handle VERR_NET_DOWN and VERR_NET_NO_BUFFER_SPACE. Signal error ? */
3941 }
3942
3943 pThis->led.Actual.s.fWriting = 0;
3944}
3945
3946/**
3947 * Compute and write internet checksum (e1kCSum16) at the specified offset.
3948 *
3949 * @param pThis The device state structure.
3950 * @param pPkt Pointer to the packet.
3951 * @param u16PktLen Total length of the packet.
3952 * @param cso Offset in packet to write checksum at.
3953 * @param css Offset in packet to start computing
3954 * checksum from.
3955 * @param cse Offset in packet to stop computing
3956 * checksum at.
3957 * @thread E1000_TX
3958 */
3959static void e1kInsertChecksum(PE1KSTATE pThis, uint8_t *pPkt, uint16_t u16PktLen, uint8_t cso, uint8_t css, uint16_t cse)
3960{
3961 RT_NOREF1(pThis);
3962
3963 if (css >= u16PktLen)
3964 {
3965 E1kLog2(("%s css(%X) is greater than packet length-1(%X), checksum is not inserted\n",
3966 pThis->szPrf, cso, u16PktLen));
3967 return;
3968 }
3969
3970 if (cso >= u16PktLen - 1)
3971 {
3972 E1kLog2(("%s cso(%X) is greater than packet length-2(%X), checksum is not inserted\n",
3973 pThis->szPrf, cso, u16PktLen));
3974 return;
3975 }
3976
3977 if (cse == 0)
3978 cse = u16PktLen - 1;
3979 uint16_t u16ChkSum = e1kCSum16(pPkt + css, cse - css + 1);
3980 E1kLog2(("%s Inserting csum: %04X at %02X, old value: %04X\n", pThis->szPrf,
3981 u16ChkSum, cso, *(uint16_t*)(pPkt + cso)));
3982 *(uint16_t*)(pPkt + cso) = u16ChkSum;
3983}
3984
3985/**
3986 * Add a part of descriptor's buffer to transmit frame.
3987 *
3988 * @remarks data.u64BufAddr is used unconditionally for both data
3989 * and legacy descriptors since it is identical to
3990 * legacy.u64BufAddr.
3991 *
3992 * @param pThis The device state structure.
3993 * @param pDesc Pointer to the descriptor to transmit.
3994 * @param u16Len Length of buffer to the end of segment.
3995 * @param fSend Force packet sending.
3996 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
3997 * @thread E1000_TX
3998 */
3999#ifndef E1K_WITH_TXD_CACHE
4000static void e1kFallbackAddSegment(PE1KSTATE pThis, RTGCPHYS PhysAddr, uint16_t u16Len, bool fSend, bool fOnWorkerThread)
4001{
4002 /* TCP header being transmitted */
4003 struct E1kTcpHeader *pTcpHdr = (struct E1kTcpHeader *)
4004 (pThis->aTxPacketFallback + pThis->contextTSE.tu.u8CSS);
4005 /* IP header being transmitted */
4006 struct E1kIpHeader *pIpHdr = (struct E1kIpHeader *)
4007 (pThis->aTxPacketFallback + pThis->contextTSE.ip.u8CSS);
4008
4009 E1kLog3(("%s e1kFallbackAddSegment: Length=%x, remaining payload=%x, header=%x, send=%RTbool\n",
4010 pThis->szPrf, u16Len, pThis->u32PayRemain, pThis->u16HdrRemain, fSend));
4011 Assert(pThis->u32PayRemain + pThis->u16HdrRemain > 0);
4012
4013 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns), PhysAddr,
4014 pThis->aTxPacketFallback + pThis->u16TxPktLen, u16Len);
4015 E1kLog3(("%s Dump of the segment:\n"
4016 "%.*Rhxd\n"
4017 "%s --- End of dump ---\n",
4018 pThis->szPrf, u16Len, pThis->aTxPacketFallback + pThis->u16TxPktLen, pThis->szPrf));
4019 pThis->u16TxPktLen += u16Len;
4020 E1kLog3(("%s e1kFallbackAddSegment: pThis->u16TxPktLen=%x\n",
4021 pThis->szPrf, pThis->u16TxPktLen));
4022 if (pThis->u16HdrRemain > 0)
4023 {
4024 /* The header was not complete, check if it is now */
4025 if (u16Len >= pThis->u16HdrRemain)
4026 {
4027 /* The rest is payload */
4028 u16Len -= pThis->u16HdrRemain;
4029 pThis->u16HdrRemain = 0;
4030 /* Save partial checksum and flags */
4031 pThis->u32SavedCsum = pTcpHdr->chksum;
4032 pThis->u16SavedFlags = pTcpHdr->hdrlen_flags;
4033 /* Clear FIN and PSH flags now and set them only in the last segment */
4034 pTcpHdr->hdrlen_flags &= ~htons(E1K_TCP_FIN | E1K_TCP_PSH);
4035 }
4036 else
4037 {
4038 /* Still not */
4039 pThis->u16HdrRemain -= u16Len;
4040 E1kLog3(("%s e1kFallbackAddSegment: Header is still incomplete, 0x%x bytes remain.\n",
4041 pThis->szPrf, pThis->u16HdrRemain));
4042 return;
4043 }
4044 }
4045
4046 pThis->u32PayRemain -= u16Len;
4047
4048 if (fSend)
4049 {
4050 /* Leave ethernet header intact */
4051 /* IP Total Length = payload + headers - ethernet header */
4052 pIpHdr->total_len = htons(pThis->u16TxPktLen - pThis->contextTSE.ip.u8CSS);
4053 E1kLog3(("%s e1kFallbackAddSegment: End of packet, pIpHdr->total_len=%x\n",
4054 pThis->szPrf, ntohs(pIpHdr->total_len)));
4055 /* Update IP Checksum */
4056 pIpHdr->chksum = 0;
4057 e1kInsertChecksum(pThis, pThis->aTxPacketFallback, pThis->u16TxPktLen,
4058 pThis->contextTSE.ip.u8CSO,
4059 pThis->contextTSE.ip.u8CSS,
4060 pThis->contextTSE.ip.u16CSE);
4061
4062 /* Update TCP flags */
4063 /* Restore original FIN and PSH flags for the last segment */
4064 if (pThis->u32PayRemain == 0)
4065 {
4066 pTcpHdr->hdrlen_flags = pThis->u16SavedFlags;
4067 E1K_INC_CNT32(TSCTC);
4068 }
4069 /* Add TCP length to partial pseudo header sum */
4070 uint32_t csum = pThis->u32SavedCsum
4071 + htons(pThis->u16TxPktLen - pThis->contextTSE.tu.u8CSS);
4072 while (csum >> 16)
4073 csum = (csum >> 16) + (csum & 0xFFFF);
4074 pTcpHdr->chksum = csum;
4075 /* Compute final checksum */
4076 e1kInsertChecksum(pThis, pThis->aTxPacketFallback, pThis->u16TxPktLen,
4077 pThis->contextTSE.tu.u8CSO,
4078 pThis->contextTSE.tu.u8CSS,
4079 pThis->contextTSE.tu.u16CSE);
4080
4081 /*
4082 * Transmit it. If we've use the SG already, allocate a new one before
4083 * we copy of the data.
4084 */
4085 if (!pThis->CTX_SUFF(pTxSg))
4086 e1kXmitAllocBuf(pThis, pThis->u16TxPktLen + (pThis->fVTag ? 4 : 0), true /*fExactSize*/, false /*fGso*/);
4087 if (pThis->CTX_SUFF(pTxSg))
4088 {
4089 Assert(pThis->u16TxPktLen <= pThis->CTX_SUFF(pTxSg)->cbAvailable);
4090 Assert(pThis->CTX_SUFF(pTxSg)->cSegs == 1);
4091 if (pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg != pThis->aTxPacketFallback)
4092 memcpy(pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->aTxPacketFallback, pThis->u16TxPktLen);
4093 pThis->CTX_SUFF(pTxSg)->cbUsed = pThis->u16TxPktLen;
4094 pThis->CTX_SUFF(pTxSg)->aSegs[0].cbSeg = pThis->u16TxPktLen;
4095 }
4096 e1kTransmitFrame(pThis, fOnWorkerThread);
4097
4098 /* Update Sequence Number */
4099 pTcpHdr->seqno = htonl(ntohl(pTcpHdr->seqno) + pThis->u16TxPktLen
4100 - pThis->contextTSE.dw3.u8HDRLEN);
4101 /* Increment IP identification */
4102 pIpHdr->ident = htons(ntohs(pIpHdr->ident) + 1);
4103 }
4104}
4105#else /* E1K_WITH_TXD_CACHE */
4106static int e1kFallbackAddSegment(PE1KSTATE pThis, RTGCPHYS PhysAddr, uint16_t u16Len, bool fSend, bool fOnWorkerThread)
4107{
4108 int rc = VINF_SUCCESS;
4109 /* TCP header being transmitted */
4110 struct E1kTcpHeader *pTcpHdr = (struct E1kTcpHeader *)
4111 (pThis->aTxPacketFallback + pThis->contextTSE.tu.u8CSS);
4112 /* IP header being transmitted */
4113 struct E1kIpHeader *pIpHdr = (struct E1kIpHeader *)
4114 (pThis->aTxPacketFallback + pThis->contextTSE.ip.u8CSS);
4115
4116 E1kLog3(("%s e1kFallbackAddSegment: Length=%x, remaining payload=%x, header=%x, send=%RTbool\n",
4117 pThis->szPrf, u16Len, pThis->u32PayRemain, pThis->u16HdrRemain, fSend));
4118 Assert(pThis->u32PayRemain + pThis->u16HdrRemain > 0);
4119
4120 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns), PhysAddr,
4121 pThis->aTxPacketFallback + pThis->u16TxPktLen, u16Len);
4122 E1kLog3(("%s Dump of the segment:\n"
4123 "%.*Rhxd\n"
4124 "%s --- End of dump ---\n",
4125 pThis->szPrf, u16Len, pThis->aTxPacketFallback + pThis->u16TxPktLen, pThis->szPrf));
4126 pThis->u16TxPktLen += u16Len;
4127 E1kLog3(("%s e1kFallbackAddSegment: pThis->u16TxPktLen=%x\n",
4128 pThis->szPrf, pThis->u16TxPktLen));
4129 if (pThis->u16HdrRemain > 0)
4130 {
4131 /* The header was not complete, check if it is now */
4132 if (u16Len >= pThis->u16HdrRemain)
4133 {
4134 /* The rest is payload */
4135 u16Len -= pThis->u16HdrRemain;
4136 pThis->u16HdrRemain = 0;
4137 /* Save partial checksum and flags */
4138 pThis->u32SavedCsum = pTcpHdr->chksum;
4139 pThis->u16SavedFlags = pTcpHdr->hdrlen_flags;
4140 /* Clear FIN and PSH flags now and set them only in the last segment */
4141 pTcpHdr->hdrlen_flags &= ~htons(E1K_TCP_FIN | E1K_TCP_PSH);
4142 }
4143 else
4144 {
4145 /* Still not */
4146 pThis->u16HdrRemain -= u16Len;
4147 E1kLog3(("%s e1kFallbackAddSegment: Header is still incomplete, 0x%x bytes remain.\n",
4148 pThis->szPrf, pThis->u16HdrRemain));
4149 return rc;
4150 }
4151 }
4152
4153 pThis->u32PayRemain -= u16Len;
4154
4155 if (fSend)
4156 {
4157 /* Leave ethernet header intact */
4158 /* IP Total Length = payload + headers - ethernet header */
4159 pIpHdr->total_len = htons(pThis->u16TxPktLen - pThis->contextTSE.ip.u8CSS);
4160 E1kLog3(("%s e1kFallbackAddSegment: End of packet, pIpHdr->total_len=%x\n",
4161 pThis->szPrf, ntohs(pIpHdr->total_len)));
4162 /* Update IP Checksum */
4163 pIpHdr->chksum = 0;
4164 e1kInsertChecksum(pThis, pThis->aTxPacketFallback, pThis->u16TxPktLen,
4165 pThis->contextTSE.ip.u8CSO,
4166 pThis->contextTSE.ip.u8CSS,
4167 pThis->contextTSE.ip.u16CSE);
4168
4169 /* Update TCP flags */
4170 /* Restore original FIN and PSH flags for the last segment */
4171 if (pThis->u32PayRemain == 0)
4172 {
4173 pTcpHdr->hdrlen_flags = pThis->u16SavedFlags;
4174 E1K_INC_CNT32(TSCTC);
4175 }
4176 /* Add TCP length to partial pseudo header sum */
4177 uint32_t csum = pThis->u32SavedCsum
4178 + htons(pThis->u16TxPktLen - pThis->contextTSE.tu.u8CSS);
4179 while (csum >> 16)
4180 csum = (csum >> 16) + (csum & 0xFFFF);
4181 pTcpHdr->chksum = csum;
4182 /* Compute final checksum */
4183 e1kInsertChecksum(pThis, pThis->aTxPacketFallback, pThis->u16TxPktLen,
4184 pThis->contextTSE.tu.u8CSO,
4185 pThis->contextTSE.tu.u8CSS,
4186 pThis->contextTSE.tu.u16CSE);
4187
4188 /*
4189 * Transmit it.
4190 */
4191 if (pThis->CTX_SUFF(pTxSg))
4192 {
4193 Assert(pThis->u16TxPktLen <= pThis->CTX_SUFF(pTxSg)->cbAvailable);
4194 Assert(pThis->CTX_SUFF(pTxSg)->cSegs == 1);
4195 if (pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg != pThis->aTxPacketFallback)
4196 memcpy(pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->aTxPacketFallback, pThis->u16TxPktLen);
4197 pThis->CTX_SUFF(pTxSg)->cbUsed = pThis->u16TxPktLen;
4198 pThis->CTX_SUFF(pTxSg)->aSegs[0].cbSeg = pThis->u16TxPktLen;
4199 }
4200 e1kTransmitFrame(pThis, fOnWorkerThread);
4201
4202 /* Update Sequence Number */
4203 pTcpHdr->seqno = htonl(ntohl(pTcpHdr->seqno) + pThis->u16TxPktLen
4204 - pThis->contextTSE.dw3.u8HDRLEN);
4205 /* Increment IP identification */
4206 pIpHdr->ident = htons(ntohs(pIpHdr->ident) + 1);
4207
4208 /* Allocate new buffer for the next segment. */
4209 if (pThis->u32PayRemain)
4210 {
4211 pThis->cbTxAlloc = RT_MIN(pThis->u32PayRemain,
4212 pThis->contextTSE.dw3.u16MSS)
4213 + pThis->contextTSE.dw3.u8HDRLEN
4214 + (pThis->fVTag ? 4 : 0);
4215 rc = e1kXmitAllocBuf(pThis, false /* fGSO */);
4216 }
4217 }
4218
4219 return rc;
4220}
4221#endif /* E1K_WITH_TXD_CACHE */
4222
4223#ifndef E1K_WITH_TXD_CACHE
4224/**
4225 * TCP segmentation offloading fallback: Add descriptor's buffer to transmit
4226 * frame.
4227 *
4228 * We construct the frame in the fallback buffer first and the copy it to the SG
4229 * buffer before passing it down to the network driver code.
4230 *
4231 * @returns true if the frame should be transmitted, false if not.
4232 *
4233 * @param pThis The device state structure.
4234 * @param pDesc Pointer to the descriptor to transmit.
4235 * @param cbFragment Length of descriptor's buffer.
4236 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4237 * @thread E1000_TX
4238 */
4239static bool e1kFallbackAddToFrame(PE1KSTATE pThis, E1KTXDESC *pDesc, uint32_t cbFragment, bool fOnWorkerThread)
4240{
4241 PPDMSCATTERGATHER pTxSg = pThis->CTX_SUFF(pTxSg);
4242 Assert(e1kGetDescType(pDesc) == E1K_DTYP_DATA);
4243 Assert(pDesc->data.cmd.fTSE);
4244 Assert(!e1kXmitIsGsoBuf(pTxSg));
4245
4246 uint16_t u16MaxPktLen = pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw3.u16MSS;
4247 Assert(u16MaxPktLen != 0);
4248 Assert(u16MaxPktLen < E1K_MAX_TX_PKT_SIZE);
4249
4250 /*
4251 * Carve out segments.
4252 */
4253 do
4254 {
4255 /* Calculate how many bytes we have left in this TCP segment */
4256 uint32_t cb = u16MaxPktLen - pThis->u16TxPktLen;
4257 if (cb > cbFragment)
4258 {
4259 /* This descriptor fits completely into current segment */
4260 cb = cbFragment;
4261 e1kFallbackAddSegment(pThis, pDesc->data.u64BufAddr, cb, pDesc->data.cmd.fEOP /*fSend*/, fOnWorkerThread);
4262 }
4263 else
4264 {
4265 e1kFallbackAddSegment(pThis, pDesc->data.u64BufAddr, cb, true /*fSend*/, fOnWorkerThread);
4266 /*
4267 * Rewind the packet tail pointer to the beginning of payload,
4268 * so we continue writing right beyond the header.
4269 */
4270 pThis->u16TxPktLen = pThis->contextTSE.dw3.u8HDRLEN;
4271 }
4272
4273 pDesc->data.u64BufAddr += cb;
4274 cbFragment -= cb;
4275 } while (cbFragment > 0);
4276
4277 if (pDesc->data.cmd.fEOP)
4278 {
4279 /* End of packet, next segment will contain header. */
4280 if (pThis->u32PayRemain != 0)
4281 E1K_INC_CNT32(TSCTFC);
4282 pThis->u16TxPktLen = 0;
4283 e1kXmitFreeBuf(pThis);
4284 }
4285
4286 return false;
4287}
4288#else /* E1K_WITH_TXD_CACHE */
4289/**
4290 * TCP segmentation offloading fallback: Add descriptor's buffer to transmit
4291 * frame.
4292 *
4293 * We construct the frame in the fallback buffer first and the copy it to the SG
4294 * buffer before passing it down to the network driver code.
4295 *
4296 * @returns error code
4297 *
4298 * @param pThis The device state structure.
4299 * @param pDesc Pointer to the descriptor to transmit.
4300 * @param cbFragment Length of descriptor's buffer.
4301 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4302 * @thread E1000_TX
4303 */
4304static int e1kFallbackAddToFrame(PE1KSTATE pThis, E1KTXDESC *pDesc, bool fOnWorkerThread)
4305{
4306#ifdef VBOX_STRICT
4307 PPDMSCATTERGATHER pTxSg = pThis->CTX_SUFF(pTxSg);
4308 Assert(e1kGetDescType(pDesc) == E1K_DTYP_DATA);
4309 Assert(pDesc->data.cmd.fTSE);
4310 Assert(!e1kXmitIsGsoBuf(pTxSg));
4311#endif
4312
4313 uint16_t u16MaxPktLen = pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw3.u16MSS;
4314 Assert(u16MaxPktLen != 0);
4315 Assert(u16MaxPktLen < E1K_MAX_TX_PKT_SIZE);
4316
4317 /*
4318 * Carve out segments.
4319 */
4320 int rc;
4321 do
4322 {
4323 /* Calculate how many bytes we have left in this TCP segment */
4324 uint32_t cb = u16MaxPktLen - pThis->u16TxPktLen;
4325 if (cb > pDesc->data.cmd.u20DTALEN)
4326 {
4327 /* This descriptor fits completely into current segment */
4328 cb = pDesc->data.cmd.u20DTALEN;
4329 rc = e1kFallbackAddSegment(pThis, pDesc->data.u64BufAddr, cb, pDesc->data.cmd.fEOP /*fSend*/, fOnWorkerThread);
4330 }
4331 else
4332 {
4333 rc = e1kFallbackAddSegment(pThis, pDesc->data.u64BufAddr, cb, true /*fSend*/, fOnWorkerThread);
4334 /*
4335 * Rewind the packet tail pointer to the beginning of payload,
4336 * so we continue writing right beyond the header.
4337 */
4338 pThis->u16TxPktLen = pThis->contextTSE.dw3.u8HDRLEN;
4339 }
4340
4341 pDesc->data.u64BufAddr += cb;
4342 pDesc->data.cmd.u20DTALEN -= cb;
4343 } while (pDesc->data.cmd.u20DTALEN > 0 && RT_SUCCESS(rc));
4344
4345 if (pDesc->data.cmd.fEOP)
4346 {
4347 /* End of packet, next segment will contain header. */
4348 if (pThis->u32PayRemain != 0)
4349 E1K_INC_CNT32(TSCTFC);
4350 pThis->u16TxPktLen = 0;
4351 e1kXmitFreeBuf(pThis);
4352 }
4353
4354 return false;
4355}
4356#endif /* E1K_WITH_TXD_CACHE */
4357
4358
4359/**
4360 * Add descriptor's buffer to transmit frame.
4361 *
4362 * This deals with GSO and normal frames, e1kFallbackAddToFrame deals with the
4363 * TSE frames we cannot handle as GSO.
4364 *
4365 * @returns true on success, false on failure.
4366 *
4367 * @param pThis The device state structure.
4368 * @param PhysAddr The physical address of the descriptor buffer.
4369 * @param cbFragment Length of descriptor's buffer.
4370 * @thread E1000_TX
4371 */
4372static bool e1kAddToFrame(PE1KSTATE pThis, RTGCPHYS PhysAddr, uint32_t cbFragment)
4373{
4374 PPDMSCATTERGATHER pTxSg = pThis->CTX_SUFF(pTxSg);
4375 bool const fGso = e1kXmitIsGsoBuf(pTxSg);
4376 uint32_t const cbNewPkt = cbFragment + pThis->u16TxPktLen;
4377
4378 if (RT_UNLIKELY( !fGso && cbNewPkt > E1K_MAX_TX_PKT_SIZE ))
4379 {
4380 E1kLog(("%s Transmit packet is too large: %u > %u(max)\n", pThis->szPrf, cbNewPkt, E1K_MAX_TX_PKT_SIZE));
4381 return false;
4382 }
4383 if (RT_UNLIKELY( fGso && cbNewPkt > pTxSg->cbAvailable ))
4384 {
4385 E1kLog(("%s Transmit packet is too large: %u > %u(max)/GSO\n", pThis->szPrf, cbNewPkt, pTxSg->cbAvailable));
4386 return false;
4387 }
4388
4389 if (RT_LIKELY(pTxSg))
4390 {
4391 Assert(pTxSg->cSegs == 1);
4392 Assert(pTxSg->cbUsed == pThis->u16TxPktLen);
4393
4394 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns), PhysAddr,
4395 (uint8_t *)pTxSg->aSegs[0].pvSeg + pThis->u16TxPktLen, cbFragment);
4396
4397 pTxSg->cbUsed = cbNewPkt;
4398 }
4399 pThis->u16TxPktLen = cbNewPkt;
4400
4401 return true;
4402}
4403
4404
4405/**
4406 * Write the descriptor back to guest memory and notify the guest.
4407 *
4408 * @param pThis The device state structure.
4409 * @param pDesc Pointer to the descriptor have been transmitted.
4410 * @param addr Physical address of the descriptor in guest memory.
4411 * @thread E1000_TX
4412 */
4413static void e1kDescReport(PE1KSTATE pThis, E1KTXDESC *pDesc, RTGCPHYS addr)
4414{
4415 /*
4416 * We fake descriptor write-back bursting. Descriptors are written back as they are
4417 * processed.
4418 */
4419 /* Let's pretend we process descriptors. Write back with DD set. */
4420 /*
4421 * Prior to r71586 we tried to accomodate the case when write-back bursts
4422 * are enabled without actually implementing bursting by writing back all
4423 * descriptors, even the ones that do not have RS set. This caused kernel
4424 * panics with Linux SMP kernels, as the e1000 driver tried to free up skb
4425 * associated with written back descriptor if it happened to be a context
4426 * descriptor since context descriptors do not have skb associated to them.
4427 * Starting from r71586 we write back only the descriptors with RS set,
4428 * which is a little bit different from what the real hardware does in
4429 * case there is a chain of data descritors where some of them have RS set
4430 * and others do not. It is very uncommon scenario imho.
4431 * We need to check RPS as well since some legacy drivers use it instead of
4432 * RS even with newer cards.
4433 */
4434 if (pDesc->legacy.cmd.fRS || pDesc->legacy.cmd.fRPS)
4435 {
4436 pDesc->legacy.dw3.fDD = 1; /* Descriptor Done */
4437 e1kWriteBackDesc(pThis, pDesc, addr);
4438 if (pDesc->legacy.cmd.fEOP)
4439 {
4440#ifdef E1K_USE_TX_TIMERS
4441 if (pDesc->legacy.cmd.fIDE)
4442 {
4443 E1K_INC_ISTAT_CNT(pThis->uStatTxIDE);
4444 //if (pThis->fIntRaised)
4445 //{
4446 // /* Interrupt is already pending, no need for timers */
4447 // ICR |= ICR_TXDW;
4448 //}
4449 //else {
4450 /* Arm the timer to fire in TIVD usec (discard .024) */
4451 e1kArmTimer(pThis, pThis->CTX_SUFF(pTIDTimer), TIDV);
4452# ifndef E1K_NO_TAD
4453 /* If absolute timer delay is enabled and the timer is not running yet, arm it. */
4454 E1kLog2(("%s Checking if TAD timer is running\n",
4455 pThis->szPrf));
4456 if (TADV != 0 && !TMTimerIsActive(pThis->CTX_SUFF(pTADTimer)))
4457 e1kArmTimer(pThis, pThis->CTX_SUFF(pTADTimer), TADV);
4458# endif /* E1K_NO_TAD */
4459 }
4460 else
4461 {
4462 E1kLog2(("%s No IDE set, cancel TAD timer and raise interrupt\n",
4463 pThis->szPrf));
4464# ifndef E1K_NO_TAD
4465 /* Cancel both timers if armed and fire immediately. */
4466 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTADTimer));
4467# endif
4468#endif /* E1K_USE_TX_TIMERS */
4469 E1K_INC_ISTAT_CNT(pThis->uStatIntTx);
4470 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_TXDW);
4471#ifdef E1K_USE_TX_TIMERS
4472 }
4473#endif /* E1K_USE_TX_TIMERS */
4474 }
4475 }
4476 else
4477 {
4478 E1K_INC_ISTAT_CNT(pThis->uStatTxNoRS);
4479 }
4480}
4481
4482#ifndef E1K_WITH_TXD_CACHE
4483
4484/**
4485 * Process Transmit Descriptor.
4486 *
4487 * E1000 supports three types of transmit descriptors:
4488 * - legacy data descriptors of older format (context-less).
4489 * - data the same as legacy but providing new offloading capabilities.
4490 * - context sets up the context for following data descriptors.
4491 *
4492 * @param pThis The device state structure.
4493 * @param pDesc Pointer to descriptor union.
4494 * @param addr Physical address of descriptor in guest memory.
4495 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4496 * @thread E1000_TX
4497 */
4498static int e1kXmitDesc(PE1KSTATE pThis, E1KTXDESC *pDesc, RTGCPHYS addr, bool fOnWorkerThread)
4499{
4500 int rc = VINF_SUCCESS;
4501 uint32_t cbVTag = 0;
4502
4503 e1kPrintTDesc(pThis, pDesc, "vvv");
4504
4505#ifdef E1K_USE_TX_TIMERS
4506 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTIDTimer));
4507#endif /* E1K_USE_TX_TIMERS */
4508
4509 switch (e1kGetDescType(pDesc))
4510 {
4511 case E1K_DTYP_CONTEXT:
4512 if (pDesc->context.dw2.fTSE)
4513 {
4514 pThis->contextTSE = pDesc->context;
4515 pThis->u32PayRemain = pDesc->context.dw2.u20PAYLEN;
4516 pThis->u16HdrRemain = pDesc->context.dw3.u8HDRLEN;
4517 e1kSetupGsoCtx(&pThis->GsoCtx, &pDesc->context);
4518 STAM_COUNTER_INC(&pThis->StatTxDescCtxTSE);
4519 }
4520 else
4521 {
4522 pThis->contextNormal = pDesc->context;
4523 STAM_COUNTER_INC(&pThis->StatTxDescCtxNormal);
4524 }
4525 E1kLog2(("%s %s context updated: IP CSS=%02X, IP CSO=%02X, IP CSE=%04X"
4526 ", TU CSS=%02X, TU CSO=%02X, TU CSE=%04X\n", pThis->szPrf,
4527 pDesc->context.dw2.fTSE ? "TSE" : "Normal",
4528 pDesc->context.ip.u8CSS,
4529 pDesc->context.ip.u8CSO,
4530 pDesc->context.ip.u16CSE,
4531 pDesc->context.tu.u8CSS,
4532 pDesc->context.tu.u8CSO,
4533 pDesc->context.tu.u16CSE));
4534 E1K_INC_ISTAT_CNT(pThis->uStatDescCtx);
4535 e1kDescReport(pThis, pDesc, addr);
4536 break;
4537
4538 case E1K_DTYP_DATA:
4539 {
4540 if (pDesc->data.cmd.u20DTALEN == 0 || pDesc->data.u64BufAddr == 0)
4541 {
4542 E1kLog2(("% Empty data descriptor, skipped.\n", pThis->szPrf));
4543 /** @todo Same as legacy when !TSE. See below. */
4544 break;
4545 }
4546 STAM_COUNTER_INC(pDesc->data.cmd.fTSE?
4547 &pThis->StatTxDescTSEData:
4548 &pThis->StatTxDescData);
4549 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
4550 E1K_INC_ISTAT_CNT(pThis->uStatDescDat);
4551
4552 /*
4553 * The last descriptor of non-TSE packet must contain VLE flag.
4554 * TSE packets have VLE flag in the first descriptor. The later
4555 * case is taken care of a bit later when cbVTag gets assigned.
4556 *
4557 * 1) pDesc->data.cmd.fEOP && !pDesc->data.cmd.fTSE
4558 */
4559 if (pDesc->data.cmd.fEOP && !pDesc->data.cmd.fTSE)
4560 {
4561 pThis->fVTag = pDesc->data.cmd.fVLE;
4562 pThis->u16VTagTCI = pDesc->data.dw3.u16Special;
4563 }
4564 /*
4565 * First fragment: Allocate new buffer and save the IXSM and TXSM
4566 * packet options as these are only valid in the first fragment.
4567 */
4568 if (pThis->u16TxPktLen == 0)
4569 {
4570 pThis->fIPcsum = pDesc->data.dw3.fIXSM;
4571 pThis->fTCPcsum = pDesc->data.dw3.fTXSM;
4572 E1kLog2(("%s Saving checksum flags:%s%s; \n", pThis->szPrf,
4573 pThis->fIPcsum ? " IP" : "",
4574 pThis->fTCPcsum ? " TCP/UDP" : ""));
4575 if (pDesc->data.cmd.fTSE)
4576 {
4577 /* 2) pDesc->data.cmd.fTSE && pThis->u16TxPktLen == 0 */
4578 pThis->fVTag = pDesc->data.cmd.fVLE;
4579 pThis->u16VTagTCI = pDesc->data.dw3.u16Special;
4580 cbVTag = pThis->fVTag ? 4 : 0;
4581 }
4582 else if (pDesc->data.cmd.fEOP)
4583 cbVTag = pDesc->data.cmd.fVLE ? 4 : 0;
4584 else
4585 cbVTag = 4;
4586 E1kLog3(("%s About to allocate TX buffer: cbVTag=%u\n", pThis->szPrf, cbVTag));
4587 if (e1kCanDoGso(pThis, &pThis->GsoCtx, &pDesc->data, &pThis->contextTSE))
4588 rc = e1kXmitAllocBuf(pThis, pThis->contextTSE.dw2.u20PAYLEN + pThis->contextTSE.dw3.u8HDRLEN + cbVTag,
4589 true /*fExactSize*/, true /*fGso*/);
4590 else if (pDesc->data.cmd.fTSE)
4591 rc = e1kXmitAllocBuf(pThis, pThis->contextTSE.dw3.u16MSS + pThis->contextTSE.dw3.u8HDRLEN + cbVTag,
4592 pDesc->data.cmd.fTSE /*fExactSize*/, false /*fGso*/);
4593 else
4594 rc = e1kXmitAllocBuf(pThis, pDesc->data.cmd.u20DTALEN + cbVTag,
4595 pDesc->data.cmd.fEOP /*fExactSize*/, false /*fGso*/);
4596
4597 /**
4598 * @todo: Perhaps it is not that simple for GSO packets! We may
4599 * need to unwind some changes.
4600 */
4601 if (RT_FAILURE(rc))
4602 {
4603 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
4604 break;
4605 }
4606 /** @todo Is there any way to indicating errors other than collisions? Like
4607 * VERR_NET_DOWN. */
4608 }
4609
4610 /*
4611 * Add the descriptor data to the frame. If the frame is complete,
4612 * transmit it and reset the u16TxPktLen field.
4613 */
4614 if (e1kXmitIsGsoBuf(pThis->CTX_SUFF(pTxSg)))
4615 {
4616 STAM_COUNTER_INC(&pThis->StatTxPathGSO);
4617 bool fRc = e1kAddToFrame(pThis, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
4618 if (pDesc->data.cmd.fEOP)
4619 {
4620 if ( fRc
4621 && pThis->CTX_SUFF(pTxSg)
4622 && pThis->CTX_SUFF(pTxSg)->cbUsed == (size_t)pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw2.u20PAYLEN)
4623 {
4624 e1kTransmitFrame(pThis, fOnWorkerThread);
4625 E1K_INC_CNT32(TSCTC);
4626 }
4627 else
4628 {
4629 if (fRc)
4630 E1kLog(("%s bad GSO/TSE %p or %u < %u\n" , pThis->szPrf,
4631 pThis->CTX_SUFF(pTxSg), pThis->CTX_SUFF(pTxSg) ? pThis->CTX_SUFF(pTxSg)->cbUsed : 0,
4632 pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw2.u20PAYLEN));
4633 e1kXmitFreeBuf(pThis);
4634 E1K_INC_CNT32(TSCTFC);
4635 }
4636 pThis->u16TxPktLen = 0;
4637 }
4638 }
4639 else if (!pDesc->data.cmd.fTSE)
4640 {
4641 STAM_COUNTER_INC(&pThis->StatTxPathRegular);
4642 bool fRc = e1kAddToFrame(pThis, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
4643 if (pDesc->data.cmd.fEOP)
4644 {
4645 if (fRc && pThis->CTX_SUFF(pTxSg))
4646 {
4647 Assert(pThis->CTX_SUFF(pTxSg)->cSegs == 1);
4648 if (pThis->fIPcsum)
4649 e1kInsertChecksum(pThis, (uint8_t *)pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->u16TxPktLen,
4650 pThis->contextNormal.ip.u8CSO,
4651 pThis->contextNormal.ip.u8CSS,
4652 pThis->contextNormal.ip.u16CSE);
4653 if (pThis->fTCPcsum)
4654 e1kInsertChecksum(pThis, (uint8_t *)pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->u16TxPktLen,
4655 pThis->contextNormal.tu.u8CSO,
4656 pThis->contextNormal.tu.u8CSS,
4657 pThis->contextNormal.tu.u16CSE);
4658 e1kTransmitFrame(pThis, fOnWorkerThread);
4659 }
4660 else
4661 e1kXmitFreeBuf(pThis);
4662 pThis->u16TxPktLen = 0;
4663 }
4664 }
4665 else
4666 {
4667 STAM_COUNTER_INC(&pThis->StatTxPathFallback);
4668 e1kFallbackAddToFrame(pThis, pDesc, pDesc->data.cmd.u20DTALEN, fOnWorkerThread);
4669 }
4670
4671 e1kDescReport(pThis, pDesc, addr);
4672 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
4673 break;
4674 }
4675
4676 case E1K_DTYP_LEGACY:
4677 if (pDesc->legacy.cmd.u16Length == 0 || pDesc->legacy.u64BufAddr == 0)
4678 {
4679 E1kLog(("%s Empty legacy descriptor, skipped.\n", pThis->szPrf));
4680 /** @todo 3.3.3, Length/Buffer Address: RS set -> write DD when processing. */
4681 break;
4682 }
4683 STAM_COUNTER_INC(&pThis->StatTxDescLegacy);
4684 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
4685
4686 /* First fragment: allocate new buffer. */
4687 if (pThis->u16TxPktLen == 0)
4688 {
4689 if (pDesc->legacy.cmd.fEOP)
4690 cbVTag = pDesc->legacy.cmd.fVLE ? 4 : 0;
4691 else
4692 cbVTag = 4;
4693 E1kLog3(("%s About to allocate TX buffer: cbVTag=%u\n", pThis->szPrf, cbVTag));
4694 /** @todo reset status bits? */
4695 rc = e1kXmitAllocBuf(pThis, pDesc->legacy.cmd.u16Length + cbVTag, pDesc->legacy.cmd.fEOP, false /*fGso*/);
4696 if (RT_FAILURE(rc))
4697 {
4698 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
4699 break;
4700 }
4701
4702 /** @todo Is there any way to indicating errors other than collisions? Like
4703 * VERR_NET_DOWN. */
4704 }
4705
4706 /* Add fragment to frame. */
4707 if (e1kAddToFrame(pThis, pDesc->data.u64BufAddr, pDesc->legacy.cmd.u16Length))
4708 {
4709 E1K_INC_ISTAT_CNT(pThis->uStatDescLeg);
4710
4711 /* Last fragment: Transmit and reset the packet storage counter. */
4712 if (pDesc->legacy.cmd.fEOP)
4713 {
4714 pThis->fVTag = pDesc->legacy.cmd.fVLE;
4715 pThis->u16VTagTCI = pDesc->legacy.dw3.u16Special;
4716 /** @todo Offload processing goes here. */
4717 e1kTransmitFrame(pThis, fOnWorkerThread);
4718 pThis->u16TxPktLen = 0;
4719 }
4720 }
4721 /* Last fragment + failure: free the buffer and reset the storage counter. */
4722 else if (pDesc->legacy.cmd.fEOP)
4723 {
4724 e1kXmitFreeBuf(pThis);
4725 pThis->u16TxPktLen = 0;
4726 }
4727
4728 e1kDescReport(pThis, pDesc, addr);
4729 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
4730 break;
4731
4732 default:
4733 E1kLog(("%s ERROR Unsupported transmit descriptor type: 0x%04x\n",
4734 pThis->szPrf, e1kGetDescType(pDesc)));
4735 break;
4736 }
4737
4738 return rc;
4739}
4740
4741#else /* E1K_WITH_TXD_CACHE */
4742
4743/**
4744 * Process Transmit Descriptor.
4745 *
4746 * E1000 supports three types of transmit descriptors:
4747 * - legacy data descriptors of older format (context-less).
4748 * - data the same as legacy but providing new offloading capabilities.
4749 * - context sets up the context for following data descriptors.
4750 *
4751 * @param pThis The device state structure.
4752 * @param pDesc Pointer to descriptor union.
4753 * @param addr Physical address of descriptor in guest memory.
4754 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4755 * @param cbPacketSize Size of the packet as previously computed.
4756 * @thread E1000_TX
4757 */
4758static int e1kXmitDesc(PE1KSTATE pThis, E1KTXDESC *pDesc, RTGCPHYS addr,
4759 bool fOnWorkerThread)
4760{
4761 int rc = VINF_SUCCESS;
4762
4763 e1kPrintTDesc(pThis, pDesc, "vvv");
4764
4765#ifdef E1K_USE_TX_TIMERS
4766 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTIDTimer));
4767#endif /* E1K_USE_TX_TIMERS */
4768
4769 switch (e1kGetDescType(pDesc))
4770 {
4771 case E1K_DTYP_CONTEXT:
4772 /* The caller have already updated the context */
4773 E1K_INC_ISTAT_CNT(pThis->uStatDescCtx);
4774 e1kDescReport(pThis, pDesc, addr);
4775 break;
4776
4777 case E1K_DTYP_DATA:
4778 {
4779 STAM_COUNTER_INC(pDesc->data.cmd.fTSE?
4780 &pThis->StatTxDescTSEData:
4781 &pThis->StatTxDescData);
4782 E1K_INC_ISTAT_CNT(pThis->uStatDescDat);
4783 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
4784 if (pDesc->data.cmd.u20DTALEN == 0 || pDesc->data.u64BufAddr == 0)
4785 {
4786 E1kLog2(("% Empty data descriptor, skipped.\n", pThis->szPrf));
4787 }
4788 else
4789 {
4790 /*
4791 * Add the descriptor data to the frame. If the frame is complete,
4792 * transmit it and reset the u16TxPktLen field.
4793 */
4794 if (e1kXmitIsGsoBuf(pThis->CTX_SUFF(pTxSg)))
4795 {
4796 STAM_COUNTER_INC(&pThis->StatTxPathGSO);
4797 bool fRc = e1kAddToFrame(pThis, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
4798 if (pDesc->data.cmd.fEOP)
4799 {
4800 if ( fRc
4801 && pThis->CTX_SUFF(pTxSg)
4802 && pThis->CTX_SUFF(pTxSg)->cbUsed == (size_t)pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw2.u20PAYLEN)
4803 {
4804 e1kTransmitFrame(pThis, fOnWorkerThread);
4805 E1K_INC_CNT32(TSCTC);
4806 }
4807 else
4808 {
4809 if (fRc)
4810 E1kLog(("%s bad GSO/TSE %p or %u < %u\n" , pThis->szPrf,
4811 pThis->CTX_SUFF(pTxSg), pThis->CTX_SUFF(pTxSg) ? pThis->CTX_SUFF(pTxSg)->cbUsed : 0,
4812 pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw2.u20PAYLEN));
4813 e1kXmitFreeBuf(pThis);
4814 E1K_INC_CNT32(TSCTFC);
4815 }
4816 pThis->u16TxPktLen = 0;
4817 }
4818 }
4819 else if (!pDesc->data.cmd.fTSE)
4820 {
4821 STAM_COUNTER_INC(&pThis->StatTxPathRegular);
4822 bool fRc = e1kAddToFrame(pThis, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
4823 if (pDesc->data.cmd.fEOP)
4824 {
4825 if (fRc && pThis->CTX_SUFF(pTxSg))
4826 {
4827 Assert(pThis->CTX_SUFF(pTxSg)->cSegs == 1);
4828 if (pThis->fIPcsum)
4829 e1kInsertChecksum(pThis, (uint8_t *)pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->u16TxPktLen,
4830 pThis->contextNormal.ip.u8CSO,
4831 pThis->contextNormal.ip.u8CSS,
4832 pThis->contextNormal.ip.u16CSE);
4833 if (pThis->fTCPcsum)
4834 e1kInsertChecksum(pThis, (uint8_t *)pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->u16TxPktLen,
4835 pThis->contextNormal.tu.u8CSO,
4836 pThis->contextNormal.tu.u8CSS,
4837 pThis->contextNormal.tu.u16CSE);
4838 e1kTransmitFrame(pThis, fOnWorkerThread);
4839 }
4840 else
4841 e1kXmitFreeBuf(pThis);
4842 pThis->u16TxPktLen = 0;
4843 }
4844 }
4845 else
4846 {
4847 STAM_COUNTER_INC(&pThis->StatTxPathFallback);
4848 rc = e1kFallbackAddToFrame(pThis, pDesc, fOnWorkerThread);
4849 }
4850 }
4851 e1kDescReport(pThis, pDesc, addr);
4852 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
4853 break;
4854 }
4855
4856 case E1K_DTYP_LEGACY:
4857 STAM_COUNTER_INC(&pThis->StatTxDescLegacy);
4858 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
4859 if (pDesc->legacy.cmd.u16Length == 0 || pDesc->legacy.u64BufAddr == 0)
4860 {
4861 E1kLog(("%s Empty legacy descriptor, skipped.\n", pThis->szPrf));
4862 }
4863 else
4864 {
4865 /* Add fragment to frame. */
4866 if (e1kAddToFrame(pThis, pDesc->data.u64BufAddr, pDesc->legacy.cmd.u16Length))
4867 {
4868 E1K_INC_ISTAT_CNT(pThis->uStatDescLeg);
4869
4870 /* Last fragment: Transmit and reset the packet storage counter. */
4871 if (pDesc->legacy.cmd.fEOP)
4872 {
4873 if (pDesc->legacy.cmd.fIC)
4874 {
4875 e1kInsertChecksum(pThis,
4876 (uint8_t *)pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg,
4877 pThis->u16TxPktLen,
4878 pDesc->legacy.cmd.u8CSO,
4879 pDesc->legacy.dw3.u8CSS,
4880 0);
4881 }
4882 e1kTransmitFrame(pThis, fOnWorkerThread);
4883 pThis->u16TxPktLen = 0;
4884 }
4885 }
4886 /* Last fragment + failure: free the buffer and reset the storage counter. */
4887 else if (pDesc->legacy.cmd.fEOP)
4888 {
4889 e1kXmitFreeBuf(pThis);
4890 pThis->u16TxPktLen = 0;
4891 }
4892 }
4893 e1kDescReport(pThis, pDesc, addr);
4894 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
4895 break;
4896
4897 default:
4898 E1kLog(("%s ERROR Unsupported transmit descriptor type: 0x%04x\n",
4899 pThis->szPrf, e1kGetDescType(pDesc)));
4900 break;
4901 }
4902
4903 return rc;
4904}
4905
4906DECLINLINE(void) e1kUpdateTxContext(PE1KSTATE pThis, E1KTXDESC *pDesc)
4907{
4908 if (pDesc->context.dw2.fTSE)
4909 {
4910 pThis->contextTSE = pDesc->context;
4911 pThis->u32PayRemain = pDesc->context.dw2.u20PAYLEN;
4912 pThis->u16HdrRemain = pDesc->context.dw3.u8HDRLEN;
4913 e1kSetupGsoCtx(&pThis->GsoCtx, &pDesc->context);
4914 STAM_COUNTER_INC(&pThis->StatTxDescCtxTSE);
4915 }
4916 else
4917 {
4918 pThis->contextNormal = pDesc->context;
4919 STAM_COUNTER_INC(&pThis->StatTxDescCtxNormal);
4920 }
4921 E1kLog2(("%s %s context updated: IP CSS=%02X, IP CSO=%02X, IP CSE=%04X"
4922 ", TU CSS=%02X, TU CSO=%02X, TU CSE=%04X\n", pThis->szPrf,
4923 pDesc->context.dw2.fTSE ? "TSE" : "Normal",
4924 pDesc->context.ip.u8CSS,
4925 pDesc->context.ip.u8CSO,
4926 pDesc->context.ip.u16CSE,
4927 pDesc->context.tu.u8CSS,
4928 pDesc->context.tu.u8CSO,
4929 pDesc->context.tu.u16CSE));
4930}
4931
4932static bool e1kLocateTxPacket(PE1KSTATE pThis)
4933{
4934 LogFlow(("%s e1kLocateTxPacket: ENTER cbTxAlloc=%d\n",
4935 pThis->szPrf, pThis->cbTxAlloc));
4936 /* Check if we have located the packet already. */
4937 if (pThis->cbTxAlloc)
4938 {
4939 LogFlow(("%s e1kLocateTxPacket: RET true cbTxAlloc=%d\n",
4940 pThis->szPrf, pThis->cbTxAlloc));
4941 return true;
4942 }
4943
4944 bool fTSE = false;
4945 uint32_t cbPacket = 0;
4946
4947 for (int i = pThis->iTxDCurrent; i < pThis->nTxDFetched; ++i)
4948 {
4949 E1KTXDESC *pDesc = &pThis->aTxDescriptors[i];
4950 switch (e1kGetDescType(pDesc))
4951 {
4952 case E1K_DTYP_CONTEXT:
4953 e1kUpdateTxContext(pThis, pDesc);
4954 continue;
4955 case E1K_DTYP_LEGACY:
4956 /* Skip empty descriptors. */
4957 if (!pDesc->legacy.u64BufAddr || !pDesc->legacy.cmd.u16Length)
4958 break;
4959 cbPacket += pDesc->legacy.cmd.u16Length;
4960 pThis->fGSO = false;
4961 break;
4962 case E1K_DTYP_DATA:
4963 /* Skip empty descriptors. */
4964 if (!pDesc->data.u64BufAddr || !pDesc->data.cmd.u20DTALEN)
4965 break;
4966 if (cbPacket == 0)
4967 {
4968 /*
4969 * The first fragment: save IXSM and TXSM options
4970 * as these are only valid in the first fragment.
4971 */
4972 pThis->fIPcsum = pDesc->data.dw3.fIXSM;
4973 pThis->fTCPcsum = pDesc->data.dw3.fTXSM;
4974 fTSE = pDesc->data.cmd.fTSE;
4975 /*
4976 * TSE descriptors have VLE bit properly set in
4977 * the first fragment.
4978 */
4979 if (fTSE)
4980 {
4981 pThis->fVTag = pDesc->data.cmd.fVLE;
4982 pThis->u16VTagTCI = pDesc->data.dw3.u16Special;
4983 }
4984 pThis->fGSO = e1kCanDoGso(pThis, &pThis->GsoCtx, &pDesc->data, &pThis->contextTSE);
4985 }
4986 cbPacket += pDesc->data.cmd.u20DTALEN;
4987 break;
4988 default:
4989 AssertMsgFailed(("Impossible descriptor type!"));
4990 }
4991 if (pDesc->legacy.cmd.fEOP)
4992 {
4993 /*
4994 * Non-TSE descriptors have VLE bit properly set in
4995 * the last fragment.
4996 */
4997 if (!fTSE)
4998 {
4999 pThis->fVTag = pDesc->data.cmd.fVLE;
5000 pThis->u16VTagTCI = pDesc->data.dw3.u16Special;
5001 }
5002 /*
5003 * Compute the required buffer size. If we cannot do GSO but still
5004 * have to do segmentation we allocate the first segment only.
5005 */
5006 pThis->cbTxAlloc = (!fTSE || pThis->fGSO) ?
5007 cbPacket :
5008 RT_MIN(cbPacket, pThis->contextTSE.dw3.u16MSS + pThis->contextTSE.dw3.u8HDRLEN);
5009 if (pThis->fVTag)
5010 pThis->cbTxAlloc += 4;
5011 LogFlow(("%s e1kLocateTxPacket: RET true cbTxAlloc=%d\n",
5012 pThis->szPrf, pThis->cbTxAlloc));
5013 return true;
5014 }
5015 }
5016
5017 if (cbPacket == 0 && pThis->nTxDFetched - pThis->iTxDCurrent > 0)
5018 {
5019 /* All descriptors were empty, we need to process them as a dummy packet */
5020 LogFlow(("%s e1kLocateTxPacket: RET true cbTxAlloc=%d, zero packet!\n",
5021 pThis->szPrf, pThis->cbTxAlloc));
5022 return true;
5023 }
5024 LogFlow(("%s e1kLocateTxPacket: RET false cbTxAlloc=%d\n",
5025 pThis->szPrf, pThis->cbTxAlloc));
5026 return false;
5027}
5028
5029static int e1kXmitPacket(PE1KSTATE pThis, bool fOnWorkerThread)
5030{
5031 int rc = VINF_SUCCESS;
5032
5033 LogFlow(("%s e1kXmitPacket: ENTER current=%d fetched=%d\n",
5034 pThis->szPrf, pThis->iTxDCurrent, pThis->nTxDFetched));
5035
5036 while (pThis->iTxDCurrent < pThis->nTxDFetched)
5037 {
5038 E1KTXDESC *pDesc = &pThis->aTxDescriptors[pThis->iTxDCurrent];
5039 E1kLog3(("%s About to process new TX descriptor at %08x%08x, TDLEN=%08x, TDH=%08x, TDT=%08x\n",
5040 pThis->szPrf, TDBAH, TDBAL + TDH * sizeof(E1KTXDESC), TDLEN, TDH, TDT));
5041 rc = e1kXmitDesc(pThis, pDesc, e1kDescAddr(TDBAH, TDBAL, TDH), fOnWorkerThread);
5042 if (RT_FAILURE(rc))
5043 break;
5044 if (++TDH * sizeof(E1KTXDESC) >= TDLEN)
5045 TDH = 0;
5046 uint32_t uLowThreshold = GET_BITS(TXDCTL, LWTHRESH)*8;
5047 if (uLowThreshold != 0 && e1kGetTxLen(pThis) <= uLowThreshold)
5048 {
5049 E1kLog2(("%s Low on transmit descriptors, raise ICR.TXD_LOW, len=%x thresh=%x\n",
5050 pThis->szPrf, e1kGetTxLen(pThis), GET_BITS(TXDCTL, LWTHRESH)*8));
5051 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_TXD_LOW);
5052 }
5053 ++pThis->iTxDCurrent;
5054 if (e1kGetDescType(pDesc) != E1K_DTYP_CONTEXT && pDesc->legacy.cmd.fEOP)
5055 break;
5056 }
5057
5058 LogFlow(("%s e1kXmitPacket: RET %Rrc current=%d fetched=%d\n",
5059 pThis->szPrf, rc, pThis->iTxDCurrent, pThis->nTxDFetched));
5060 return rc;
5061}
5062
5063#endif /* E1K_WITH_TXD_CACHE */
5064#ifndef E1K_WITH_TXD_CACHE
5065
5066/**
5067 * Transmit pending descriptors.
5068 *
5069 * @returns VBox status code. VERR_TRY_AGAIN is returned if we're busy.
5070 *
5071 * @param pThis The E1000 state.
5072 * @param fOnWorkerThread Whether we're on a worker thread or on an EMT.
5073 */
5074static int e1kXmitPending(PE1KSTATE pThis, bool fOnWorkerThread)
5075{
5076 int rc = VINF_SUCCESS;
5077
5078 /* Check if transmitter is enabled. */
5079 if (!(TCTL & TCTL_EN))
5080 return VINF_SUCCESS;
5081 /*
5082 * Grab the xmit lock of the driver as well as the E1K device state.
5083 */
5084 rc = e1kCsTxEnter(pThis, VERR_SEM_BUSY);
5085 if (RT_LIKELY(rc == VINF_SUCCESS))
5086 {
5087 PPDMINETWORKUP pDrv = pThis->CTX_SUFF(pDrv);
5088 if (pDrv)
5089 {
5090 rc = pDrv->pfnBeginXmit(pDrv, fOnWorkerThread);
5091 if (RT_FAILURE(rc))
5092 {
5093 e1kCsTxLeave(pThis);
5094 return rc;
5095 }
5096 }
5097 /*
5098 * Process all pending descriptors.
5099 * Note! Do not process descriptors in locked state
5100 */
5101 while (TDH != TDT && !pThis->fLocked)
5102 {
5103 E1KTXDESC desc;
5104 E1kLog3(("%s About to process new TX descriptor at %08x%08x, TDLEN=%08x, TDH=%08x, TDT=%08x\n",
5105 pThis->szPrf, TDBAH, TDBAL + TDH * sizeof(desc), TDLEN, TDH, TDT));
5106
5107 e1kLoadDesc(pThis, &desc, ((uint64_t)TDBAH << 32) + TDBAL + TDH * sizeof(desc));
5108 rc = e1kXmitDesc(pThis, &desc, e1kDescAddr(TDBAH, TDBAL, TDH), fOnWorkerThread);
5109 /* If we failed to transmit descriptor we will try it again later */
5110 if (RT_FAILURE(rc))
5111 break;
5112 if (++TDH * sizeof(desc) >= TDLEN)
5113 TDH = 0;
5114
5115 if (e1kGetTxLen(pThis) <= GET_BITS(TXDCTL, LWTHRESH)*8)
5116 {
5117 E1kLog2(("%s Low on transmit descriptors, raise ICR.TXD_LOW, len=%x thresh=%x\n",
5118 pThis->szPrf, e1kGetTxLen(pThis), GET_BITS(TXDCTL, LWTHRESH)*8));
5119 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_TXD_LOW);
5120 }
5121
5122 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
5123 }
5124
5125 /// @todo uncomment: pThis->uStatIntTXQE++;
5126 /// @todo uncomment: e1kRaiseInterrupt(pThis, ICR_TXQE);
5127 /*
5128 * Release the lock.
5129 */
5130 if (pDrv)
5131 pDrv->pfnEndXmit(pDrv);
5132 e1kCsTxLeave(pThis);
5133 }
5134
5135 return rc;
5136}
5137
5138#else /* E1K_WITH_TXD_CACHE */
5139
5140static void e1kDumpTxDCache(PE1KSTATE pThis)
5141{
5142 unsigned i, cDescs = TDLEN / sizeof(E1KTXDESC);
5143 uint32_t tdh = TDH;
5144 LogRel(("-- Transmit Descriptors (%d total) --\n", cDescs));
5145 for (i = 0; i < cDescs; ++i)
5146 {
5147 E1KTXDESC desc;
5148 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns), e1kDescAddr(TDBAH, TDBAL, i),
5149 &desc, sizeof(desc));
5150 if (i == tdh)
5151 LogRel((">>> "));
5152 LogRel(("%RGp: %R[e1ktxd]\n", e1kDescAddr(TDBAH, TDBAL, i), &desc));
5153 }
5154 LogRel(("-- Transmit Descriptors in Cache (at %d (TDH %d)/ fetched %d / max %d) --\n",
5155 pThis->iTxDCurrent, TDH, pThis->nTxDFetched, E1K_TXD_CACHE_SIZE));
5156 if (tdh > pThis->iTxDCurrent)
5157 tdh -= pThis->iTxDCurrent;
5158 else
5159 tdh = cDescs + tdh - pThis->iTxDCurrent;
5160 for (i = 0; i < pThis->nTxDFetched; ++i)
5161 {
5162 if (i == pThis->iTxDCurrent)
5163 LogRel((">>> "));
5164 LogRel(("%RGp: %R[e1ktxd]\n", e1kDescAddr(TDBAH, TDBAL, tdh++ % cDescs), &pThis->aTxDescriptors[i]));
5165 }
5166}
5167
5168/**
5169 * Transmit pending descriptors.
5170 *
5171 * @returns VBox status code. VERR_TRY_AGAIN is returned if we're busy.
5172 *
5173 * @param pThis The E1000 state.
5174 * @param fOnWorkerThread Whether we're on a worker thread or on an EMT.
5175 */
5176static int e1kXmitPending(PE1KSTATE pThis, bool fOnWorkerThread)
5177{
5178 int rc = VINF_SUCCESS;
5179
5180 /* Check if transmitter is enabled. */
5181 if (!(TCTL & TCTL_EN))
5182 return VINF_SUCCESS;
5183 /*
5184 * Grab the xmit lock of the driver as well as the E1K device state.
5185 */
5186 PPDMINETWORKUP pDrv = pThis->CTX_SUFF(pDrv);
5187 if (pDrv)
5188 {
5189 rc = pDrv->pfnBeginXmit(pDrv, fOnWorkerThread);
5190 if (RT_FAILURE(rc))
5191 return rc;
5192 }
5193
5194 /*
5195 * Process all pending descriptors.
5196 * Note! Do not process descriptors in locked state
5197 */
5198 rc = e1kCsTxEnter(pThis, VERR_SEM_BUSY);
5199 if (RT_LIKELY(rc == VINF_SUCCESS))
5200 {
5201 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
5202 /*
5203 * fIncomplete is set whenever we try to fetch additional descriptors
5204 * for an incomplete packet. If fail to locate a complete packet on
5205 * the next iteration we need to reset the cache or we risk to get
5206 * stuck in this loop forever.
5207 */
5208 bool fIncomplete = false;
5209 while (!pThis->fLocked && e1kTxDLazyLoad(pThis))
5210 {
5211 while (e1kLocateTxPacket(pThis))
5212 {
5213 fIncomplete = false;
5214 /* Found a complete packet, allocate it. */
5215 rc = e1kXmitAllocBuf(pThis, pThis->fGSO);
5216 /* If we're out of bandwidth we'll come back later. */
5217 if (RT_FAILURE(rc))
5218 goto out;
5219 /* Copy the packet to allocated buffer and send it. */
5220 rc = e1kXmitPacket(pThis, fOnWorkerThread);
5221 /* If we're out of bandwidth we'll come back later. */
5222 if (RT_FAILURE(rc))
5223 goto out;
5224 }
5225 uint8_t u8Remain = pThis->nTxDFetched - pThis->iTxDCurrent;
5226 if (RT_UNLIKELY(fIncomplete))
5227 {
5228 static bool fTxDCacheDumped = false;
5229 /*
5230 * The descriptor cache is full, but we were unable to find
5231 * a complete packet in it. Drop the cache and hope that
5232 * the guest driver can recover from network card error.
5233 */
5234 LogRel(("%s No complete packets in%s TxD cache! "
5235 "Fetched=%d, current=%d, TX len=%d.\n",
5236 pThis->szPrf,
5237 u8Remain == E1K_TXD_CACHE_SIZE ? " full" : "",
5238 pThis->nTxDFetched, pThis->iTxDCurrent,
5239 e1kGetTxLen(pThis)));
5240 if (!fTxDCacheDumped)
5241 {
5242 fTxDCacheDumped = true;
5243 e1kDumpTxDCache(pThis);
5244 }
5245 pThis->iTxDCurrent = pThis->nTxDFetched = 0;
5246 /*
5247 * Returning an error at this point means Guru in R0
5248 * (see @bugref{6428}).
5249 */
5250# ifdef IN_RING3
5251 rc = VERR_NET_INCOMPLETE_TX_PACKET;
5252# else /* !IN_RING3 */
5253 rc = VINF_IOM_R3_MMIO_WRITE;
5254# endif /* !IN_RING3 */
5255 goto out;
5256 }
5257 if (u8Remain > 0)
5258 {
5259 Log4(("%s Incomplete packet at %d. Already fetched %d, "
5260 "%d more are available\n",
5261 pThis->szPrf, pThis->iTxDCurrent, u8Remain,
5262 e1kGetTxLen(pThis) - u8Remain));
5263
5264 /*
5265 * A packet was partially fetched. Move incomplete packet to
5266 * the beginning of cache buffer, then load more descriptors.
5267 */
5268 memmove(pThis->aTxDescriptors,
5269 &pThis->aTxDescriptors[pThis->iTxDCurrent],
5270 u8Remain * sizeof(E1KTXDESC));
5271 pThis->iTxDCurrent = 0;
5272 pThis->nTxDFetched = u8Remain;
5273 e1kTxDLoadMore(pThis);
5274 fIncomplete = true;
5275 }
5276 else
5277 pThis->nTxDFetched = 0;
5278 pThis->iTxDCurrent = 0;
5279 }
5280 if (!pThis->fLocked && GET_BITS(TXDCTL, LWTHRESH) == 0)
5281 {
5282 E1kLog2(("%s Out of transmit descriptors, raise ICR.TXD_LOW\n",
5283 pThis->szPrf));
5284 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_TXD_LOW);
5285 }
5286out:
5287 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
5288
5289 /// @todo uncomment: pThis->uStatIntTXQE++;
5290 /// @todo uncomment: e1kRaiseInterrupt(pThis, ICR_TXQE);
5291
5292 e1kCsTxLeave(pThis);
5293 }
5294
5295
5296 /*
5297 * Release the lock.
5298 */
5299 if (pDrv)
5300 pDrv->pfnEndXmit(pDrv);
5301 return rc;
5302}
5303
5304#endif /* E1K_WITH_TXD_CACHE */
5305#ifdef IN_RING3
5306
5307/**
5308 * @interface_method_impl{PDMINETWORKDOWN,pfnXmitPending}
5309 */
5310static DECLCALLBACK(void) e1kR3NetworkDown_XmitPending(PPDMINETWORKDOWN pInterface)
5311{
5312 PE1KSTATE pThis = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkDown);
5313 /* Resume suspended transmission */
5314 STATUS &= ~STATUS_TXOFF;
5315 e1kXmitPending(pThis, true /*fOnWorkerThread*/);
5316}
5317
5318/**
5319 * Callback for consuming from transmit queue. It gets called in R3 whenever
5320 * we enqueue something in R0/GC.
5321 *
5322 * @returns true
5323 * @param pDevIns Pointer to device instance structure.
5324 * @param pItem Pointer to the element being dequeued (not used).
5325 * @thread ???
5326 */
5327static DECLCALLBACK(bool) e1kTxQueueConsumer(PPDMDEVINS pDevIns, PPDMQUEUEITEMCORE pItem)
5328{
5329 NOREF(pItem);
5330 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
5331 E1kLog2(("%s e1kTxQueueConsumer:\n", pThis->szPrf));
5332
5333 int rc = e1kXmitPending(pThis, false /*fOnWorkerThread*/); NOREF(rc);
5334#ifndef DEBUG_andy /** @todo r=andy Happens for me a lot, mute this for me. */
5335 AssertMsg(RT_SUCCESS(rc) || rc == VERR_TRY_AGAIN, ("%Rrc\n", rc));
5336#endif
5337 return true;
5338}
5339
5340/**
5341 * Handler for the wakeup signaller queue.
5342 */
5343static DECLCALLBACK(bool) e1kCanRxQueueConsumer(PPDMDEVINS pDevIns, PPDMQUEUEITEMCORE pItem)
5344{
5345 RT_NOREF(pItem);
5346 e1kWakeupReceive(pDevIns);
5347 return true;
5348}
5349
5350#endif /* IN_RING3 */
5351
5352/**
5353 * Write handler for Transmit Descriptor Tail register.
5354 *
5355 * @param pThis The device state structure.
5356 * @param offset Register offset in memory-mapped frame.
5357 * @param index Register index in register array.
5358 * @param value The value to store.
5359 * @param mask Used to implement partial writes (8 and 16-bit).
5360 * @thread EMT
5361 */
5362static int e1kRegWriteTDT(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5363{
5364 int rc = e1kRegWriteDefault(pThis, offset, index, value);
5365
5366 /* All descriptors starting with head and not including tail belong to us. */
5367 /* Process them. */
5368 E1kLog2(("%s e1kRegWriteTDT: TDBAL=%08x, TDBAH=%08x, TDLEN=%08x, TDH=%08x, TDT=%08x\n",
5369 pThis->szPrf, TDBAL, TDBAH, TDLEN, TDH, TDT));
5370
5371 /* Ignore TDT writes when the link is down. */
5372 if (TDH != TDT && (STATUS & STATUS_LU))
5373 {
5374 Log5(("E1000: TDT write: TDH=%08x, TDT=%08x, %d descriptors to process\n", TDH, TDT, e1kGetTxLen(pThis)));
5375 E1kLog(("%s e1kRegWriteTDT: %d descriptors to process\n",
5376 pThis->szPrf, e1kGetTxLen(pThis)));
5377
5378 /* Transmit pending packets if possible, defer it if we cannot do it
5379 in the current context. */
5380#ifdef E1K_TX_DELAY
5381 rc = e1kCsTxEnter(pThis, VERR_SEM_BUSY);
5382 if (RT_LIKELY(rc == VINF_SUCCESS))
5383 {
5384 if (!TMTimerIsActive(pThis->CTX_SUFF(pTXDTimer)))
5385 {
5386#ifdef E1K_INT_STATS
5387 pThis->u64ArmedAt = RTTimeNanoTS();
5388#endif
5389 e1kArmTimer(pThis, pThis->CTX_SUFF(pTXDTimer), E1K_TX_DELAY);
5390 }
5391 E1K_INC_ISTAT_CNT(pThis->uStatTxDelayed);
5392 e1kCsTxLeave(pThis);
5393 return rc;
5394 }
5395 /* We failed to enter the TX critical section -- transmit as usual. */
5396#endif /* E1K_TX_DELAY */
5397#ifndef IN_RING3
5398 if (!pThis->CTX_SUFF(pDrv))
5399 {
5400 PPDMQUEUEITEMCORE pItem = PDMQueueAlloc(pThis->CTX_SUFF(pTxQueue));
5401 if (RT_UNLIKELY(pItem))
5402 PDMQueueInsert(pThis->CTX_SUFF(pTxQueue), pItem);
5403 }
5404 else
5405#endif
5406 {
5407 rc = e1kXmitPending(pThis, false /*fOnWorkerThread*/);
5408 if (rc == VERR_TRY_AGAIN)
5409 rc = VINF_SUCCESS;
5410 else if (rc == VERR_SEM_BUSY)
5411 rc = VINF_IOM_R3_MMIO_WRITE;
5412 AssertRC(rc);
5413 }
5414 }
5415
5416 return rc;
5417}
5418
5419/**
5420 * Write handler for Multicast Table Array registers.
5421 *
5422 * @param pThis The device state structure.
5423 * @param offset Register offset in memory-mapped frame.
5424 * @param index Register index in register array.
5425 * @param value The value to store.
5426 * @thread EMT
5427 */
5428static int e1kRegWriteMTA(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5429{
5430 AssertReturn(offset - g_aE1kRegMap[index].offset < sizeof(pThis->auMTA), VERR_DEV_IO_ERROR);
5431 pThis->auMTA[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->auMTA[0])] = value;
5432
5433 return VINF_SUCCESS;
5434}
5435
5436/**
5437 * Read handler for Multicast Table Array registers.
5438 *
5439 * @returns VBox status code.
5440 *
5441 * @param pThis The device state structure.
5442 * @param offset Register offset in memory-mapped frame.
5443 * @param index Register index in register array.
5444 * @thread EMT
5445 */
5446static int e1kRegReadMTA(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5447{
5448 AssertReturn(offset - g_aE1kRegMap[index].offset< sizeof(pThis->auMTA), VERR_DEV_IO_ERROR);
5449 *pu32Value = pThis->auMTA[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->auMTA[0])];
5450
5451 return VINF_SUCCESS;
5452}
5453
5454/**
5455 * Write handler for Receive Address registers.
5456 *
5457 * @param pThis The device state structure.
5458 * @param offset Register offset in memory-mapped frame.
5459 * @param index Register index in register array.
5460 * @param value The value to store.
5461 * @thread EMT
5462 */
5463static int e1kRegWriteRA(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5464{
5465 AssertReturn(offset - g_aE1kRegMap[index].offset < sizeof(pThis->aRecAddr.au32), VERR_DEV_IO_ERROR);
5466 pThis->aRecAddr.au32[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->aRecAddr.au32[0])] = value;
5467
5468 return VINF_SUCCESS;
5469}
5470
5471/**
5472 * Read handler for Receive Address registers.
5473 *
5474 * @returns VBox status code.
5475 *
5476 * @param pThis The device state structure.
5477 * @param offset Register offset in memory-mapped frame.
5478 * @param index Register index in register array.
5479 * @thread EMT
5480 */
5481static int e1kRegReadRA(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5482{
5483 AssertReturn(offset - g_aE1kRegMap[index].offset< sizeof(pThis->aRecAddr.au32), VERR_DEV_IO_ERROR);
5484 *pu32Value = pThis->aRecAddr.au32[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->aRecAddr.au32[0])];
5485
5486 return VINF_SUCCESS;
5487}
5488
5489/**
5490 * Write handler for VLAN Filter Table Array registers.
5491 *
5492 * @param pThis The device state structure.
5493 * @param offset Register offset in memory-mapped frame.
5494 * @param index Register index in register array.
5495 * @param value The value to store.
5496 * @thread EMT
5497 */
5498static int e1kRegWriteVFTA(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5499{
5500 AssertReturn(offset - g_aE1kRegMap[index].offset < sizeof(pThis->auVFTA), VINF_SUCCESS);
5501 pThis->auVFTA[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->auVFTA[0])] = value;
5502
5503 return VINF_SUCCESS;
5504}
5505
5506/**
5507 * Read handler for VLAN Filter Table Array registers.
5508 *
5509 * @returns VBox status code.
5510 *
5511 * @param pThis The device state structure.
5512 * @param offset Register offset in memory-mapped frame.
5513 * @param index Register index in register array.
5514 * @thread EMT
5515 */
5516static int e1kRegReadVFTA(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5517{
5518 AssertReturn(offset - g_aE1kRegMap[index].offset< sizeof(pThis->auVFTA), VERR_DEV_IO_ERROR);
5519 *pu32Value = pThis->auVFTA[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->auVFTA[0])];
5520
5521 return VINF_SUCCESS;
5522}
5523
5524/**
5525 * Read handler for unimplemented registers.
5526 *
5527 * Merely reports reads from unimplemented registers.
5528 *
5529 * @returns VBox status code.
5530 *
5531 * @param pThis The device state structure.
5532 * @param offset Register offset in memory-mapped frame.
5533 * @param index Register index in register array.
5534 * @thread EMT
5535 */
5536static int e1kRegReadUnimplemented(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5537{
5538 RT_NOREF3(pThis, offset, index);
5539 E1kLog(("%s At %08X read (00000000) attempt from unimplemented register %s (%s)\n",
5540 pThis->szPrf, offset, g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
5541 *pu32Value = 0;
5542
5543 return VINF_SUCCESS;
5544}
5545
5546/**
5547 * Default register read handler with automatic clear operation.
5548 *
5549 * Retrieves the value of register from register array in device state structure.
5550 * Then resets all bits.
5551 *
5552 * @remarks The 'mask' parameter is simply ignored as masking and shifting is
5553 * done in the caller.
5554 *
5555 * @returns VBox status code.
5556 *
5557 * @param pThis The device state structure.
5558 * @param offset Register offset in memory-mapped frame.
5559 * @param index Register index in register array.
5560 * @thread EMT
5561 */
5562static int e1kRegReadAutoClear(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5563{
5564 AssertReturn(index < E1K_NUM_OF_32BIT_REGS, VERR_DEV_IO_ERROR);
5565 int rc = e1kRegReadDefault(pThis, offset, index, pu32Value);
5566 pThis->auRegs[index] = 0;
5567
5568 return rc;
5569}
5570
5571/**
5572 * Default register read handler.
5573 *
5574 * Retrieves the value of register from register array in device state structure.
5575 * Bits corresponding to 0s in 'readable' mask will always read as 0s.
5576 *
5577 * @remarks The 'mask' parameter is simply ignored as masking and shifting is
5578 * done in the caller.
5579 *
5580 * @returns VBox status code.
5581 *
5582 * @param pThis The device state structure.
5583 * @param offset Register offset in memory-mapped frame.
5584 * @param index Register index in register array.
5585 * @thread EMT
5586 */
5587static int e1kRegReadDefault(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5588{
5589 RT_NOREF_PV(offset);
5590
5591 AssertReturn(index < E1K_NUM_OF_32BIT_REGS, VERR_DEV_IO_ERROR);
5592 *pu32Value = pThis->auRegs[index] & g_aE1kRegMap[index].readable;
5593
5594 return VINF_SUCCESS;
5595}
5596
5597/**
5598 * Write handler for unimplemented registers.
5599 *
5600 * Merely reports writes to unimplemented registers.
5601 *
5602 * @param pThis The device state structure.
5603 * @param offset Register offset in memory-mapped frame.
5604 * @param index Register index in register array.
5605 * @param value The value to store.
5606 * @thread EMT
5607 */
5608
5609 static int e1kRegWriteUnimplemented(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5610{
5611 RT_NOREF_PV(pThis); RT_NOREF_PV(offset); RT_NOREF_PV(index); RT_NOREF_PV(value);
5612
5613 E1kLog(("%s At %08X write attempt (%08X) to unimplemented register %s (%s)\n",
5614 pThis->szPrf, offset, value, g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
5615
5616 return VINF_SUCCESS;
5617}
5618
5619/**
5620 * Default register write handler.
5621 *
5622 * Stores the value to the register array in device state structure. Only bits
5623 * corresponding to 1s both in 'writable' and 'mask' will be stored.
5624 *
5625 * @returns VBox status code.
5626 *
5627 * @param pThis The device state structure.
5628 * @param offset Register offset in memory-mapped frame.
5629 * @param index Register index in register array.
5630 * @param value The value to store.
5631 * @param mask Used to implement partial writes (8 and 16-bit).
5632 * @thread EMT
5633 */
5634
5635static int e1kRegWriteDefault(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5636{
5637 RT_NOREF_PV(offset);
5638
5639 AssertReturn(index < E1K_NUM_OF_32BIT_REGS, VERR_DEV_IO_ERROR);
5640 pThis->auRegs[index] = (value & g_aE1kRegMap[index].writable)
5641 | (pThis->auRegs[index] & ~g_aE1kRegMap[index].writable);
5642
5643 return VINF_SUCCESS;
5644}
5645
5646/**
5647 * Search register table for matching register.
5648 *
5649 * @returns Index in the register table or -1 if not found.
5650 *
5651 * @param offReg Register offset in memory-mapped region.
5652 * @thread EMT
5653 */
5654static int e1kRegLookup(uint32_t offReg)
5655{
5656
5657#if 0
5658 int index;
5659
5660 for (index = 0; index < E1K_NUM_OF_REGS; index++)
5661 {
5662 if (g_aE1kRegMap[index].offset <= offReg && offReg < g_aE1kRegMap[index].offset + g_aE1kRegMap[index].size)
5663 {
5664 return index;
5665 }
5666 }
5667#else
5668 int iStart = 0;
5669 int iEnd = E1K_NUM_OF_BINARY_SEARCHABLE;
5670 for (;;)
5671 {
5672 int i = (iEnd - iStart) / 2 + iStart;
5673 uint32_t offCur = g_aE1kRegMap[i].offset;
5674 if (offReg < offCur)
5675 {
5676 if (i == iStart)
5677 break;
5678 iEnd = i;
5679 }
5680 else if (offReg >= offCur + g_aE1kRegMap[i].size)
5681 {
5682 i++;
5683 if (i == iEnd)
5684 break;
5685 iStart = i;
5686 }
5687 else
5688 return i;
5689 Assert(iEnd > iStart);
5690 }
5691
5692 for (unsigned i = E1K_NUM_OF_BINARY_SEARCHABLE; i < RT_ELEMENTS(g_aE1kRegMap); i++)
5693 if (offReg - g_aE1kRegMap[i].offset < g_aE1kRegMap[i].size)
5694 return i;
5695
5696# ifdef VBOX_STRICT
5697 for (unsigned i = 0; i < RT_ELEMENTS(g_aE1kRegMap); i++)
5698 Assert(offReg - g_aE1kRegMap[i].offset >= g_aE1kRegMap[i].size);
5699# endif
5700
5701#endif
5702
5703 return -1;
5704}
5705
5706/**
5707 * Handle unaligned register read operation.
5708 *
5709 * Looks up and calls appropriate handler.
5710 *
5711 * @returns VBox status code.
5712 *
5713 * @param pThis The device state structure.
5714 * @param offReg Register offset in memory-mapped frame.
5715 * @param pv Where to store the result.
5716 * @param cb Number of bytes to read.
5717 * @thread EMT
5718 * @remarks IOM takes care of unaligned and small reads via MMIO. For I/O port
5719 * accesses we have to take care of that ourselves.
5720 */
5721static int e1kRegReadUnaligned(PE1KSTATE pThis, uint32_t offReg, void *pv, uint32_t cb)
5722{
5723 uint32_t u32 = 0;
5724 uint32_t shift;
5725 int rc = VINF_SUCCESS;
5726 int index = e1kRegLookup(offReg);
5727#ifdef LOG_ENABLED
5728 char buf[9];
5729#endif
5730
5731 /*
5732 * From the spec:
5733 * For registers that should be accessed as 32-bit double words, partial writes (less than a 32-bit
5734 * double word) is ignored. Partial reads return all 32 bits of data regardless of the byte enables.
5735 */
5736
5737 /*
5738 * To be able to read bytes and short word we convert them to properly
5739 * shifted 32-bit words and masks. The idea is to keep register-specific
5740 * handlers simple. Most accesses will be 32-bit anyway.
5741 */
5742 uint32_t mask;
5743 switch (cb)
5744 {
5745 case 4: mask = 0xFFFFFFFF; break;
5746 case 2: mask = 0x0000FFFF; break;
5747 case 1: mask = 0x000000FF; break;
5748 default:
5749 return PDMDevHlpDBGFStop(pThis->CTX_SUFF(pDevIns), RT_SRC_POS,
5750 "unsupported op size: offset=%#10x cb=%#10x\n", offReg, cb);
5751 }
5752 if (index != -1)
5753 {
5754 if (g_aE1kRegMap[index].readable)
5755 {
5756 /* Make the mask correspond to the bits we are about to read. */
5757 shift = (offReg - g_aE1kRegMap[index].offset) % sizeof(uint32_t) * 8;
5758 mask <<= shift;
5759 if (!mask)
5760 return PDMDevHlpDBGFStop(pThis->CTX_SUFF(pDevIns), RT_SRC_POS, "Zero mask: offset=%#10x cb=%#10x\n", offReg, cb);
5761 /*
5762 * Read it. Pass the mask so the handler knows what has to be read.
5763 * Mask out irrelevant bits.
5764 */
5765 //rc = e1kCsEnter(pThis, VERR_SEM_BUSY, RT_SRC_POS);
5766 if (RT_UNLIKELY(rc != VINF_SUCCESS))
5767 return rc;
5768 //pThis->fDelayInts = false;
5769 //pThis->iStatIntLost += pThis->iStatIntLostOne;
5770 //pThis->iStatIntLostOne = 0;
5771 rc = g_aE1kRegMap[index].pfnRead(pThis, offReg & 0xFFFFFFFC, index, &u32);
5772 u32 &= mask;
5773 //e1kCsLeave(pThis);
5774 E1kLog2(("%s At %08X read %s from %s (%s)\n",
5775 pThis->szPrf, offReg, e1kU32toHex(u32, mask, buf), g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
5776 Log6(("%s At %08X read %s from %s (%s) [UNALIGNED]\n",
5777 pThis->szPrf, offReg, e1kU32toHex(u32, mask, buf), g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
5778 /* Shift back the result. */
5779 u32 >>= shift;
5780 }
5781 else
5782 E1kLog(("%s At %08X read (%s) attempt from write-only register %s (%s)\n",
5783 pThis->szPrf, offReg, e1kU32toHex(u32, mask, buf), g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
5784 if (IOM_SUCCESS(rc))
5785 STAM_COUNTER_INC(&pThis->aStatRegReads[index]);
5786 }
5787 else
5788 E1kLog(("%s At %08X read (%s) attempt from non-existing register\n",
5789 pThis->szPrf, offReg, e1kU32toHex(u32, mask, buf)));
5790
5791 memcpy(pv, &u32, cb);
5792 return rc;
5793}
5794
5795/**
5796 * Handle 4 byte aligned and sized read operation.
5797 *
5798 * Looks up and calls appropriate handler.
5799 *
5800 * @returns VBox status code.
5801 *
5802 * @param pThis The device state structure.
5803 * @param offReg Register offset in memory-mapped frame.
5804 * @param pu32 Where to store the result.
5805 * @thread EMT
5806 */
5807static int e1kRegReadAlignedU32(PE1KSTATE pThis, uint32_t offReg, uint32_t *pu32)
5808{
5809 Assert(!(offReg & 3));
5810
5811 /*
5812 * Lookup the register and check that it's readable.
5813 */
5814 int rc = VINF_SUCCESS;
5815 int idxReg = e1kRegLookup(offReg);
5816 if (RT_LIKELY(idxReg != -1))
5817 {
5818 if (RT_UNLIKELY(g_aE1kRegMap[idxReg].readable))
5819 {
5820 /*
5821 * Read it. Pass the mask so the handler knows what has to be read.
5822 * Mask out irrelevant bits.
5823 */
5824 //rc = e1kCsEnter(pThis, VERR_SEM_BUSY, RT_SRC_POS);
5825 //if (RT_UNLIKELY(rc != VINF_SUCCESS))
5826 // return rc;
5827 //pThis->fDelayInts = false;
5828 //pThis->iStatIntLost += pThis->iStatIntLostOne;
5829 //pThis->iStatIntLostOne = 0;
5830 rc = g_aE1kRegMap[idxReg].pfnRead(pThis, offReg & 0xFFFFFFFC, idxReg, pu32);
5831 //e1kCsLeave(pThis);
5832 Log6(("%s At %08X read %08X from %s (%s)\n",
5833 pThis->szPrf, offReg, *pu32, g_aE1kRegMap[idxReg].abbrev, g_aE1kRegMap[idxReg].name));
5834 if (IOM_SUCCESS(rc))
5835 STAM_COUNTER_INC(&pThis->aStatRegReads[idxReg]);
5836 }
5837 else
5838 E1kLog(("%s At %08X read attempt from non-readable register %s (%s)\n",
5839 pThis->szPrf, offReg, g_aE1kRegMap[idxReg].abbrev, g_aE1kRegMap[idxReg].name));
5840 }
5841 else
5842 E1kLog(("%s At %08X read attempt from non-existing register\n", pThis->szPrf, offReg));
5843 return rc;
5844}
5845
5846/**
5847 * Handle 4 byte sized and aligned register write operation.
5848 *
5849 * Looks up and calls appropriate handler.
5850 *
5851 * @returns VBox status code.
5852 *
5853 * @param pThis The device state structure.
5854 * @param offReg Register offset in memory-mapped frame.
5855 * @param u32Value The value to write.
5856 * @thread EMT
5857 */
5858static int e1kRegWriteAlignedU32(PE1KSTATE pThis, uint32_t offReg, uint32_t u32Value)
5859{
5860 int rc = VINF_SUCCESS;
5861 int index = e1kRegLookup(offReg);
5862 if (RT_LIKELY(index != -1))
5863 {
5864 if (RT_LIKELY(g_aE1kRegMap[index].writable))
5865 {
5866 /*
5867 * Write it. Pass the mask so the handler knows what has to be written.
5868 * Mask out irrelevant bits.
5869 */
5870 Log6(("%s At %08X write %08X to %s (%s)\n",
5871 pThis->szPrf, offReg, u32Value, g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
5872 //rc = e1kCsEnter(pThis, VERR_SEM_BUSY, RT_SRC_POS);
5873 //if (RT_UNLIKELY(rc != VINF_SUCCESS))
5874 // return rc;
5875 //pThis->fDelayInts = false;
5876 //pThis->iStatIntLost += pThis->iStatIntLostOne;
5877 //pThis->iStatIntLostOne = 0;
5878 rc = g_aE1kRegMap[index].pfnWrite(pThis, offReg, index, u32Value);
5879 //e1kCsLeave(pThis);
5880 }
5881 else
5882 E1kLog(("%s At %08X write attempt (%08X) to read-only register %s (%s)\n",
5883 pThis->szPrf, offReg, u32Value, g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
5884 if (IOM_SUCCESS(rc))
5885 STAM_COUNTER_INC(&pThis->aStatRegWrites[index]);
5886 }
5887 else
5888 E1kLog(("%s At %08X write attempt (%08X) to non-existing register\n",
5889 pThis->szPrf, offReg, u32Value));
5890 return rc;
5891}
5892
5893
5894/* -=-=-=-=- MMIO and I/O Port Callbacks -=-=-=-=- */
5895
5896/**
5897 * @callback_method_impl{FNIOMMMIOREAD}
5898 */
5899PDMBOTHCBDECL(int) e1kMMIORead(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS GCPhysAddr, void *pv, unsigned cb)
5900{
5901 RT_NOREF2(pvUser, cb);
5902 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
5903 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatMMIORead), a);
5904
5905 uint32_t offReg = GCPhysAddr - pThis->addrMMReg;
5906 Assert(offReg < E1K_MM_SIZE);
5907 Assert(cb == 4);
5908 Assert(!(GCPhysAddr & 3));
5909
5910 int rc = e1kRegReadAlignedU32(pThis, offReg, (uint32_t *)pv);
5911
5912 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatMMIORead), a);
5913 return rc;
5914}
5915
5916/**
5917 * @callback_method_impl{FNIOMMMIOWRITE}
5918 */
5919PDMBOTHCBDECL(int) e1kMMIOWrite(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS GCPhysAddr, void const *pv, unsigned cb)
5920{
5921 RT_NOREF2(pvUser, cb);
5922 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
5923 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatMMIOWrite), a);
5924
5925 uint32_t offReg = GCPhysAddr - pThis->addrMMReg;
5926 Assert(offReg < E1K_MM_SIZE);
5927 Assert(cb == 4);
5928 Assert(!(GCPhysAddr & 3));
5929
5930 int rc = e1kRegWriteAlignedU32(pThis, offReg, *(uint32_t const *)pv);
5931
5932 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatMMIOWrite), a);
5933 return rc;
5934}
5935
5936/**
5937 * @callback_method_impl{FNIOMIOPORTIN}
5938 */
5939PDMBOTHCBDECL(int) e1kIOPortIn(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT uPort, uint32_t *pu32, unsigned cb)
5940{
5941 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
5942 int rc;
5943 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatIORead), a);
5944 RT_NOREF_PV(pvUser);
5945
5946 uPort -= pThis->IOPortBase;
5947 if (RT_LIKELY(cb == 4))
5948 switch (uPort)
5949 {
5950 case 0x00: /* IOADDR */
5951 *pu32 = pThis->uSelectedReg;
5952 E1kLog2(("%s e1kIOPortIn: IOADDR(0), selecting register %#010x, val=%#010x\n", pThis->szPrf, pThis->uSelectedReg, *pu32));
5953 rc = VINF_SUCCESS;
5954 break;
5955
5956 case 0x04: /* IODATA */
5957 if (!(pThis->uSelectedReg & 3))
5958 rc = e1kRegReadAlignedU32(pThis, pThis->uSelectedReg, pu32);
5959 else /** @todo r=bird: I wouldn't be surprised if this unaligned branch wasn't necessary. */
5960 rc = e1kRegReadUnaligned(pThis, pThis->uSelectedReg, pu32, cb);
5961 if (rc == VINF_IOM_R3_MMIO_READ)
5962 rc = VINF_IOM_R3_IOPORT_READ;
5963 E1kLog2(("%s e1kIOPortIn: IODATA(4), reading from selected register %#010x, val=%#010x\n", pThis->szPrf, pThis->uSelectedReg, *pu32));
5964 break;
5965
5966 default:
5967 E1kLog(("%s e1kIOPortIn: invalid port %#010x\n", pThis->szPrf, uPort));
5968 //rc = VERR_IOM_IOPORT_UNUSED; /* Why not? */
5969 rc = VINF_SUCCESS;
5970 }
5971 else
5972 {
5973 E1kLog(("%s e1kIOPortIn: invalid op size: uPort=%RTiop cb=%08x", pThis->szPrf, uPort, cb));
5974 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "%s e1kIOPortIn: invalid op size: uPort=%RTiop cb=%08x\n", pThis->szPrf, uPort, cb);
5975 }
5976 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatIORead), a);
5977 return rc;
5978}
5979
5980
5981/**
5982 * @callback_method_impl{FNIOMIOPORTOUT}
5983 */
5984PDMBOTHCBDECL(int) e1kIOPortOut(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT uPort, uint32_t u32, unsigned cb)
5985{
5986 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
5987 int rc;
5988 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatIOWrite), a);
5989 RT_NOREF_PV(pvUser);
5990
5991 E1kLog2(("%s e1kIOPortOut: uPort=%RTiop value=%08x\n", pThis->szPrf, uPort, u32));
5992 if (RT_LIKELY(cb == 4))
5993 {
5994 uPort -= pThis->IOPortBase;
5995 switch (uPort)
5996 {
5997 case 0x00: /* IOADDR */
5998 pThis->uSelectedReg = u32;
5999 E1kLog2(("%s e1kIOPortOut: IOADDR(0), selected register %08x\n", pThis->szPrf, pThis->uSelectedReg));
6000 rc = VINF_SUCCESS;
6001 break;
6002
6003 case 0x04: /* IODATA */
6004 E1kLog2(("%s e1kIOPortOut: IODATA(4), writing to selected register %#010x, value=%#010x\n", pThis->szPrf, pThis->uSelectedReg, u32));
6005 if (RT_LIKELY(!(pThis->uSelectedReg & 3)))
6006 {
6007 rc = e1kRegWriteAlignedU32(pThis, pThis->uSelectedReg, u32);
6008 if (rc == VINF_IOM_R3_MMIO_WRITE)
6009 rc = VINF_IOM_R3_IOPORT_WRITE;
6010 }
6011 else
6012 rc = PDMDevHlpDBGFStop(pThis->CTX_SUFF(pDevIns), RT_SRC_POS,
6013 "Spec violation: misaligned offset: %#10x, ignored.\n", pThis->uSelectedReg);
6014 break;
6015
6016 default:
6017 E1kLog(("%s e1kIOPortOut: invalid port %#010x\n", pThis->szPrf, uPort));
6018 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "invalid port %#010x\n", uPort);
6019 }
6020 }
6021 else
6022 {
6023 E1kLog(("%s e1kIOPortOut: invalid op size: uPort=%RTiop cb=%08x\n", pThis->szPrf, uPort, cb));
6024 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "%s: invalid op size: uPort=%RTiop cb=%#x\n", pThis->szPrf, uPort, cb);
6025 }
6026
6027 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatIOWrite), a);
6028 return rc;
6029}
6030
6031#ifdef IN_RING3
6032
6033/**
6034 * Dump complete device state to log.
6035 *
6036 * @param pThis Pointer to device state.
6037 */
6038static void e1kDumpState(PE1KSTATE pThis)
6039{
6040 RT_NOREF(pThis);
6041 for (int i = 0; i < E1K_NUM_OF_32BIT_REGS; ++i)
6042 E1kLog2(("%s %8.8s = %08x\n", pThis->szPrf, g_aE1kRegMap[i].abbrev, pThis->auRegs[i]));
6043# ifdef E1K_INT_STATS
6044 LogRel(("%s Interrupt attempts: %d\n", pThis->szPrf, pThis->uStatIntTry));
6045 LogRel(("%s Interrupts raised : %d\n", pThis->szPrf, pThis->uStatInt));
6046 LogRel(("%s Interrupts lowered: %d\n", pThis->szPrf, pThis->uStatIntLower));
6047 LogRel(("%s Interrupts delayed: %d\n", pThis->szPrf, pThis->uStatIntDly));
6048 LogRel(("%s Disabled delayed: %d\n", pThis->szPrf, pThis->uStatDisDly));
6049 LogRel(("%s Interrupts skipped: %d\n", pThis->szPrf, pThis->uStatIntSkip));
6050 LogRel(("%s Masked interrupts : %d\n", pThis->szPrf, pThis->uStatIntMasked));
6051 LogRel(("%s Early interrupts : %d\n", pThis->szPrf, pThis->uStatIntEarly));
6052 LogRel(("%s Late interrupts : %d\n", pThis->szPrf, pThis->uStatIntLate));
6053 LogRel(("%s Lost interrupts : %d\n", pThis->szPrf, pThis->iStatIntLost));
6054 LogRel(("%s Interrupts by RX : %d\n", pThis->szPrf, pThis->uStatIntRx));
6055 LogRel(("%s Interrupts by TX : %d\n", pThis->szPrf, pThis->uStatIntTx));
6056 LogRel(("%s Interrupts by ICS : %d\n", pThis->szPrf, pThis->uStatIntICS));
6057 LogRel(("%s Interrupts by RDTR: %d\n", pThis->szPrf, pThis->uStatIntRDTR));
6058 LogRel(("%s Interrupts by RDMT: %d\n", pThis->szPrf, pThis->uStatIntRXDMT0));
6059 LogRel(("%s Interrupts by TXQE: %d\n", pThis->szPrf, pThis->uStatIntTXQE));
6060 LogRel(("%s TX int delay asked: %d\n", pThis->szPrf, pThis->uStatTxIDE));
6061 LogRel(("%s TX delayed: %d\n", pThis->szPrf, pThis->uStatTxDelayed));
6062 LogRel(("%s TX delay expired: %d\n", pThis->szPrf, pThis->uStatTxDelayExp));
6063 LogRel(("%s TX no report asked: %d\n", pThis->szPrf, pThis->uStatTxNoRS));
6064 LogRel(("%s TX abs timer expd : %d\n", pThis->szPrf, pThis->uStatTAD));
6065 LogRel(("%s TX int timer expd : %d\n", pThis->szPrf, pThis->uStatTID));
6066 LogRel(("%s RX abs timer expd : %d\n", pThis->szPrf, pThis->uStatRAD));
6067 LogRel(("%s RX int timer expd : %d\n", pThis->szPrf, pThis->uStatRID));
6068 LogRel(("%s TX CTX descriptors: %d\n", pThis->szPrf, pThis->uStatDescCtx));
6069 LogRel(("%s TX DAT descriptors: %d\n", pThis->szPrf, pThis->uStatDescDat));
6070 LogRel(("%s TX LEG descriptors: %d\n", pThis->szPrf, pThis->uStatDescLeg));
6071 LogRel(("%s Received frames : %d\n", pThis->szPrf, pThis->uStatRxFrm));
6072 LogRel(("%s Transmitted frames: %d\n", pThis->szPrf, pThis->uStatTxFrm));
6073 LogRel(("%s TX frames up to 1514: %d\n", pThis->szPrf, pThis->uStatTx1514));
6074 LogRel(("%s TX frames up to 2962: %d\n", pThis->szPrf, pThis->uStatTx2962));
6075 LogRel(("%s TX frames up to 4410: %d\n", pThis->szPrf, pThis->uStatTx4410));
6076 LogRel(("%s TX frames up to 5858: %d\n", pThis->szPrf, pThis->uStatTx5858));
6077 LogRel(("%s TX frames up to 7306: %d\n", pThis->szPrf, pThis->uStatTx7306));
6078 LogRel(("%s TX frames up to 8754: %d\n", pThis->szPrf, pThis->uStatTx8754));
6079 LogRel(("%s TX frames up to 16384: %d\n", pThis->szPrf, pThis->uStatTx16384));
6080 LogRel(("%s TX frames up to 32768: %d\n", pThis->szPrf, pThis->uStatTx32768));
6081 LogRel(("%s Larger TX frames : %d\n", pThis->szPrf, pThis->uStatTxLarge));
6082 LogRel(("%s Max TX Delay : %lld\n", pThis->szPrf, pThis->uStatMaxTxDelay));
6083# endif /* E1K_INT_STATS */
6084}
6085
6086/**
6087 * @callback_method_impl{FNPCIIOREGIONMAP}
6088 */
6089static DECLCALLBACK(int) e1kMap(PPCIDEVICE pPciDev, int iRegion, RTGCPHYS GCPhysAddress, uint32_t cb, PCIADDRESSSPACE enmType)
6090{
6091 RT_NOREF(iRegion);
6092 PE1KSTATE pThis = PDMINS_2_DATA(pPciDev->pDevIns, E1KSTATE*);
6093 int rc;
6094
6095 switch (enmType)
6096 {
6097 case PCI_ADDRESS_SPACE_IO:
6098 pThis->IOPortBase = (RTIOPORT)GCPhysAddress;
6099 rc = PDMDevHlpIOPortRegister(pPciDev->pDevIns, pThis->IOPortBase, cb, NULL /*pvUser*/,
6100 e1kIOPortOut, e1kIOPortIn, NULL, NULL, "E1000");
6101 if (pThis->fR0Enabled && RT_SUCCESS(rc))
6102 rc = PDMDevHlpIOPortRegisterR0(pPciDev->pDevIns, pThis->IOPortBase, cb, NIL_RTR0PTR /*pvUser*/,
6103 "e1kIOPortOut", "e1kIOPortIn", NULL, NULL, "E1000");
6104 if (pThis->fRCEnabled && RT_SUCCESS(rc))
6105 rc = PDMDevHlpIOPortRegisterRC(pPciDev->pDevIns, pThis->IOPortBase, cb, NIL_RTRCPTR /*pvUser*/,
6106 "e1kIOPortOut", "e1kIOPortIn", NULL, NULL, "E1000");
6107 break;
6108
6109 case PCI_ADDRESS_SPACE_MEM:
6110 /*
6111 * From the spec:
6112 * For registers that should be accessed as 32-bit double words,
6113 * partial writes (less than a 32-bit double word) is ignored.
6114 * Partial reads return all 32 bits of data regardless of the
6115 * byte enables.
6116 */
6117 pThis->addrMMReg = GCPhysAddress; Assert(!(GCPhysAddress & 7));
6118 rc = PDMDevHlpMMIORegister(pPciDev->pDevIns, GCPhysAddress, cb, NULL /*pvUser*/,
6119 IOMMMIO_FLAGS_READ_DWORD | IOMMMIO_FLAGS_WRITE_ONLY_DWORD,
6120 e1kMMIOWrite, e1kMMIORead, "E1000");
6121 if (pThis->fR0Enabled && RT_SUCCESS(rc))
6122 rc = PDMDevHlpMMIORegisterR0(pPciDev->pDevIns, GCPhysAddress, cb, NIL_RTR0PTR /*pvUser*/,
6123 "e1kMMIOWrite", "e1kMMIORead");
6124 if (pThis->fRCEnabled && RT_SUCCESS(rc))
6125 rc = PDMDevHlpMMIORegisterRC(pPciDev->pDevIns, GCPhysAddress, cb, NIL_RTRCPTR /*pvUser*/,
6126 "e1kMMIOWrite", "e1kMMIORead");
6127 break;
6128
6129 default:
6130 /* We should never get here */
6131 AssertMsgFailed(("Invalid PCI address space param in map callback"));
6132 rc = VERR_INTERNAL_ERROR;
6133 break;
6134 }
6135 return rc;
6136}
6137
6138
6139/* -=-=-=-=- PDMINETWORKDOWN -=-=-=-=- */
6140
6141/**
6142 * Check if the device can receive data now.
6143 * This must be called before the pfnRecieve() method is called.
6144 *
6145 * @returns Number of bytes the device can receive.
6146 * @param pInterface Pointer to the interface structure containing the called function pointer.
6147 * @thread EMT
6148 */
6149static int e1kCanReceive(PE1KSTATE pThis)
6150{
6151#ifndef E1K_WITH_RXD_CACHE
6152 size_t cb;
6153
6154 if (RT_UNLIKELY(e1kCsRxEnter(pThis, VERR_SEM_BUSY) != VINF_SUCCESS))
6155 return VERR_NET_NO_BUFFER_SPACE;
6156
6157 if (RT_UNLIKELY(RDLEN == sizeof(E1KRXDESC)))
6158 {
6159 E1KRXDESC desc;
6160 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns), e1kDescAddr(RDBAH, RDBAL, RDH),
6161 &desc, sizeof(desc));
6162 if (desc.status.fDD)
6163 cb = 0;
6164 else
6165 cb = pThis->u16RxBSize;
6166 }
6167 else if (RDH < RDT)
6168 cb = (RDT - RDH) * pThis->u16RxBSize;
6169 else if (RDH > RDT)
6170 cb = (RDLEN/sizeof(E1KRXDESC) - RDH + RDT) * pThis->u16RxBSize;
6171 else
6172 {
6173 cb = 0;
6174 E1kLogRel(("E1000: OUT of RX descriptors!\n"));
6175 }
6176 E1kLog2(("%s e1kCanReceive: at exit RDH=%d RDT=%d RDLEN=%d u16RxBSize=%d cb=%lu\n",
6177 pThis->szPrf, RDH, RDT, RDLEN, pThis->u16RxBSize, cb));
6178
6179 e1kCsRxLeave(pThis);
6180 return cb > 0 ? VINF_SUCCESS : VERR_NET_NO_BUFFER_SPACE;
6181#else /* E1K_WITH_RXD_CACHE */
6182 int rc = VINF_SUCCESS;
6183
6184 if (RT_UNLIKELY(e1kCsRxEnter(pThis, VERR_SEM_BUSY) != VINF_SUCCESS))
6185 return VERR_NET_NO_BUFFER_SPACE;
6186
6187 if (RT_UNLIKELY(RDLEN == sizeof(E1KRXDESC)))
6188 {
6189 E1KRXDESC desc;
6190 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns), e1kDescAddr(RDBAH, RDBAL, RDH),
6191 &desc, sizeof(desc));
6192 if (desc.status.fDD)
6193 rc = VERR_NET_NO_BUFFER_SPACE;
6194 }
6195 else if (e1kRxDIsCacheEmpty(pThis) && RDH == RDT)
6196 {
6197 /* Cache is empty, so is the RX ring. */
6198 rc = VERR_NET_NO_BUFFER_SPACE;
6199 }
6200 E1kLog2(("%s e1kCanReceive: at exit in_cache=%d RDH=%d RDT=%d RDLEN=%d"
6201 " u16RxBSize=%d rc=%Rrc\n", pThis->szPrf,
6202 e1kRxDInCache(pThis), RDH, RDT, RDLEN, pThis->u16RxBSize, rc));
6203
6204 e1kCsRxLeave(pThis);
6205 return rc;
6206#endif /* E1K_WITH_RXD_CACHE */
6207}
6208
6209/**
6210 * @interface_method_impl{PDMINETWORKDOWN,pfnWaitReceiveAvail}
6211 */
6212static DECLCALLBACK(int) e1kR3NetworkDown_WaitReceiveAvail(PPDMINETWORKDOWN pInterface, RTMSINTERVAL cMillies)
6213{
6214 PE1KSTATE pThis = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkDown);
6215 int rc = e1kCanReceive(pThis);
6216
6217 if (RT_SUCCESS(rc))
6218 return VINF_SUCCESS;
6219 if (RT_UNLIKELY(cMillies == 0))
6220 return VERR_NET_NO_BUFFER_SPACE;
6221
6222 rc = VERR_INTERRUPTED;
6223 ASMAtomicXchgBool(&pThis->fMaybeOutOfSpace, true);
6224 STAM_PROFILE_START(&pThis->StatRxOverflow, a);
6225 VMSTATE enmVMState;
6226 while (RT_LIKELY( (enmVMState = PDMDevHlpVMState(pThis->CTX_SUFF(pDevIns))) == VMSTATE_RUNNING
6227 || enmVMState == VMSTATE_RUNNING_LS))
6228 {
6229 int rc2 = e1kCanReceive(pThis);
6230 if (RT_SUCCESS(rc2))
6231 {
6232 rc = VINF_SUCCESS;
6233 break;
6234 }
6235 E1kLogRel(("E1000 e1kR3NetworkDown_WaitReceiveAvail: waiting cMillies=%u...\n", cMillies));
6236 E1kLog(("%s e1kR3NetworkDown_WaitReceiveAvail: waiting cMillies=%u...\n", pThis->szPrf, cMillies));
6237 RTSemEventWait(pThis->hEventMoreRxDescAvail, cMillies);
6238 }
6239 STAM_PROFILE_STOP(&pThis->StatRxOverflow, a);
6240 ASMAtomicXchgBool(&pThis->fMaybeOutOfSpace, false);
6241
6242 return rc;
6243}
6244
6245
6246/**
6247 * Matches the packet addresses against Receive Address table. Looks for
6248 * exact matches only.
6249 *
6250 * @returns true if address matches.
6251 * @param pThis Pointer to the state structure.
6252 * @param pvBuf The ethernet packet.
6253 * @param cb Number of bytes available in the packet.
6254 * @thread EMT
6255 */
6256static bool e1kPerfectMatch(PE1KSTATE pThis, const void *pvBuf)
6257{
6258 for (unsigned i = 0; i < RT_ELEMENTS(pThis->aRecAddr.array); i++)
6259 {
6260 E1KRAELEM* ra = pThis->aRecAddr.array + i;
6261
6262 /* Valid address? */
6263 if (ra->ctl & RA_CTL_AV)
6264 {
6265 Assert((ra->ctl & RA_CTL_AS) < 2);
6266 //unsigned char *pAddr = (unsigned char*)pvBuf + sizeof(ra->addr)*(ra->ctl & RA_CTL_AS);
6267 //E1kLog3(("%s Matching %02x:%02x:%02x:%02x:%02x:%02x against %02x:%02x:%02x:%02x:%02x:%02x...\n",
6268 // pThis->szPrf, pAddr[0], pAddr[1], pAddr[2], pAddr[3], pAddr[4], pAddr[5],
6269 // ra->addr[0], ra->addr[1], ra->addr[2], ra->addr[3], ra->addr[4], ra->addr[5]));
6270 /*
6271 * Address Select:
6272 * 00b = Destination address
6273 * 01b = Source address
6274 * 10b = Reserved
6275 * 11b = Reserved
6276 * Since ethernet header is (DA, SA, len) we can use address
6277 * select as index.
6278 */
6279 if (memcmp((char*)pvBuf + sizeof(ra->addr)*(ra->ctl & RA_CTL_AS),
6280 ra->addr, sizeof(ra->addr)) == 0)
6281 return true;
6282 }
6283 }
6284
6285 return false;
6286}
6287
6288/**
6289 * Matches the packet addresses against Multicast Table Array.
6290 *
6291 * @remarks This is imperfect match since it matches not exact address but
6292 * a subset of addresses.
6293 *
6294 * @returns true if address matches.
6295 * @param pThis Pointer to the state structure.
6296 * @param pvBuf The ethernet packet.
6297 * @param cb Number of bytes available in the packet.
6298 * @thread EMT
6299 */
6300static bool e1kImperfectMatch(PE1KSTATE pThis, const void *pvBuf)
6301{
6302 /* Get bits 32..47 of destination address */
6303 uint16_t u16Bit = ((uint16_t*)pvBuf)[2];
6304
6305 unsigned offset = GET_BITS(RCTL, MO);
6306 /*
6307 * offset means:
6308 * 00b = bits 36..47
6309 * 01b = bits 35..46
6310 * 10b = bits 34..45
6311 * 11b = bits 32..43
6312 */
6313 if (offset < 3)
6314 u16Bit = u16Bit >> (4 - offset);
6315 return ASMBitTest(pThis->auMTA, u16Bit & 0xFFF);
6316}
6317
6318/**
6319 * Determines if the packet is to be delivered to upper layer.
6320 *
6321 * The following filters supported:
6322 * - Exact Unicast/Multicast
6323 * - Promiscuous Unicast/Multicast
6324 * - Multicast
6325 * - VLAN
6326 *
6327 * @returns true if packet is intended for this node.
6328 * @param pThis Pointer to the state structure.
6329 * @param pvBuf The ethernet packet.
6330 * @param cb Number of bytes available in the packet.
6331 * @param pStatus Bit field to store status bits.
6332 * @thread EMT
6333 */
6334static bool e1kAddressFilter(PE1KSTATE pThis, const void *pvBuf, size_t cb, E1KRXDST *pStatus)
6335{
6336 Assert(cb > 14);
6337 /* Assume that we fail to pass exact filter. */
6338 pStatus->fPIF = false;
6339 pStatus->fVP = false;
6340 /* Discard oversized packets */
6341 if (cb > E1K_MAX_RX_PKT_SIZE)
6342 {
6343 E1kLog(("%s ERROR: Incoming packet is too big, cb=%d > max=%d\n",
6344 pThis->szPrf, cb, E1K_MAX_RX_PKT_SIZE));
6345 E1K_INC_CNT32(ROC);
6346 return false;
6347 }
6348 else if (!(RCTL & RCTL_LPE) && cb > 1522)
6349 {
6350 /* When long packet reception is disabled packets over 1522 are discarded */
6351 E1kLog(("%s Discarding incoming packet (LPE=0), cb=%d\n",
6352 pThis->szPrf, cb));
6353 E1K_INC_CNT32(ROC);
6354 return false;
6355 }
6356
6357 uint16_t *u16Ptr = (uint16_t*)pvBuf;
6358 /* Compare TPID with VLAN Ether Type */
6359 if (RT_BE2H_U16(u16Ptr[6]) == VET)
6360 {
6361 pStatus->fVP = true;
6362 /* Is VLAN filtering enabled? */
6363 if (RCTL & RCTL_VFE)
6364 {
6365 /* It is 802.1q packet indeed, let's filter by VID */
6366 if (RCTL & RCTL_CFIEN)
6367 {
6368 E1kLog3(("%s VLAN filter: VLAN=%d CFI=%d RCTL_CFI=%d\n", pThis->szPrf,
6369 E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7])),
6370 E1K_SPEC_CFI(RT_BE2H_U16(u16Ptr[7])),
6371 !!(RCTL & RCTL_CFI)));
6372 if (E1K_SPEC_CFI(RT_BE2H_U16(u16Ptr[7])) != !!(RCTL & RCTL_CFI))
6373 {
6374 E1kLog2(("%s Packet filter: CFIs do not match in packet and RCTL (%d!=%d)\n",
6375 pThis->szPrf, E1K_SPEC_CFI(RT_BE2H_U16(u16Ptr[7])), !!(RCTL & RCTL_CFI)));
6376 return false;
6377 }
6378 }
6379 else
6380 E1kLog3(("%s VLAN filter: VLAN=%d\n", pThis->szPrf,
6381 E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7]))));
6382 if (!ASMBitTest(pThis->auVFTA, E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7]))))
6383 {
6384 E1kLog2(("%s Packet filter: no VLAN match (id=%d)\n",
6385 pThis->szPrf, E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7]))));
6386 return false;
6387 }
6388 }
6389 }
6390 /* Broadcast filtering */
6391 if (e1kIsBroadcast(pvBuf) && (RCTL & RCTL_BAM))
6392 return true;
6393 E1kLog2(("%s Packet filter: not a broadcast\n", pThis->szPrf));
6394 if (e1kIsMulticast(pvBuf))
6395 {
6396 /* Is multicast promiscuous enabled? */
6397 if (RCTL & RCTL_MPE)
6398 return true;
6399 E1kLog2(("%s Packet filter: no promiscuous multicast\n", pThis->szPrf));
6400 /* Try perfect matches first */
6401 if (e1kPerfectMatch(pThis, pvBuf))
6402 {
6403 pStatus->fPIF = true;
6404 return true;
6405 }
6406 E1kLog2(("%s Packet filter: no perfect match\n", pThis->szPrf));
6407 if (e1kImperfectMatch(pThis, pvBuf))
6408 return true;
6409 E1kLog2(("%s Packet filter: no imperfect match\n", pThis->szPrf));
6410 }
6411 else {
6412 /* Is unicast promiscuous enabled? */
6413 if (RCTL & RCTL_UPE)
6414 return true;
6415 E1kLog2(("%s Packet filter: no promiscuous unicast\n", pThis->szPrf));
6416 if (e1kPerfectMatch(pThis, pvBuf))
6417 {
6418 pStatus->fPIF = true;
6419 return true;
6420 }
6421 E1kLog2(("%s Packet filter: no perfect match\n", pThis->szPrf));
6422 }
6423 E1kLog2(("%s Packet filter: packet discarded\n", pThis->szPrf));
6424 return false;
6425}
6426
6427/**
6428 * @interface_method_impl{PDMINETWORKDOWN,pfnReceive}
6429 */
6430static DECLCALLBACK(int) e1kR3NetworkDown_Receive(PPDMINETWORKDOWN pInterface, const void *pvBuf, size_t cb)
6431{
6432 PE1KSTATE pThis = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkDown);
6433 int rc = VINF_SUCCESS;
6434
6435 /*
6436 * Drop packets if the VM is not running yet/anymore.
6437 */
6438 VMSTATE enmVMState = PDMDevHlpVMState(STATE_TO_DEVINS(pThis));
6439 if ( enmVMState != VMSTATE_RUNNING
6440 && enmVMState != VMSTATE_RUNNING_LS)
6441 {
6442 E1kLog(("%s Dropping incoming packet as VM is not running.\n", pThis->szPrf));
6443 return VINF_SUCCESS;
6444 }
6445
6446 /* Discard incoming packets in locked state */
6447 if (!(RCTL & RCTL_EN) || pThis->fLocked || !(STATUS & STATUS_LU))
6448 {
6449 E1kLog(("%s Dropping incoming packet as receive operation is disabled.\n", pThis->szPrf));
6450 return VINF_SUCCESS;
6451 }
6452
6453 STAM_PROFILE_ADV_START(&pThis->StatReceive, a);
6454
6455 //if (!e1kCsEnter(pThis, RT_SRC_POS))
6456 // return VERR_PERMISSION_DENIED;
6457
6458 e1kPacketDump(pThis, (const uint8_t*)pvBuf, cb, "<-- Incoming");
6459
6460 /* Update stats */
6461 if (RT_LIKELY(e1kCsEnter(pThis, VERR_SEM_BUSY) == VINF_SUCCESS))
6462 {
6463 E1K_INC_CNT32(TPR);
6464 E1K_ADD_CNT64(TORL, TORH, cb < 64? 64 : cb);
6465 e1kCsLeave(pThis);
6466 }
6467 STAM_PROFILE_ADV_START(&pThis->StatReceiveFilter, a);
6468 E1KRXDST status;
6469 RT_ZERO(status);
6470 bool fPassed = e1kAddressFilter(pThis, pvBuf, cb, &status);
6471 STAM_PROFILE_ADV_STOP(&pThis->StatReceiveFilter, a);
6472 if (fPassed)
6473 {
6474 rc = e1kHandleRxPacket(pThis, pvBuf, cb, status);
6475 }
6476 //e1kCsLeave(pThis);
6477 STAM_PROFILE_ADV_STOP(&pThis->StatReceive, a);
6478
6479 return rc;
6480}
6481
6482
6483/* -=-=-=-=- PDMILEDPORTS -=-=-=-=- */
6484
6485/**
6486 * @interface_method_impl{PDMILEDPORTS,pfnQueryStatusLed}
6487 */
6488static DECLCALLBACK(int) e1kR3QueryStatusLed(PPDMILEDPORTS pInterface, unsigned iLUN, PPDMLED *ppLed)
6489{
6490 PE1KSTATE pThis = RT_FROM_MEMBER(pInterface, E1KSTATE, ILeds);
6491 int rc = VERR_PDM_LUN_NOT_FOUND;
6492
6493 if (iLUN == 0)
6494 {
6495 *ppLed = &pThis->led;
6496 rc = VINF_SUCCESS;
6497 }
6498 return rc;
6499}
6500
6501
6502/* -=-=-=-=- PDMINETWORKCONFIG -=-=-=-=- */
6503
6504/**
6505 * @interface_method_impl{PDMINETWORKCONFIG,pfnGetMac}
6506 */
6507static DECLCALLBACK(int) e1kR3GetMac(PPDMINETWORKCONFIG pInterface, PRTMAC pMac)
6508{
6509 PE1KSTATE pThis = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkConfig);
6510 pThis->eeprom.getMac(pMac);
6511 return VINF_SUCCESS;
6512}
6513
6514/**
6515 * @interface_method_impl{PDMINETWORKCONFIG,pfnGetLinkState}
6516 */
6517static DECLCALLBACK(PDMNETWORKLINKSTATE) e1kR3GetLinkState(PPDMINETWORKCONFIG pInterface)
6518{
6519 PE1KSTATE pThis = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkConfig);
6520 if (STATUS & STATUS_LU)
6521 return PDMNETWORKLINKSTATE_UP;
6522 return PDMNETWORKLINKSTATE_DOWN;
6523}
6524
6525/**
6526 * @interface_method_impl{PDMINETWORKCONFIG,pfnSetLinkState}
6527 */
6528static DECLCALLBACK(int) e1kR3SetLinkState(PPDMINETWORKCONFIG pInterface, PDMNETWORKLINKSTATE enmState)
6529{
6530 PE1KSTATE pThis = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkConfig);
6531
6532 E1kLog(("%s e1kR3SetLinkState: enmState=%d\n", pThis->szPrf, enmState));
6533 switch (enmState)
6534 {
6535 case PDMNETWORKLINKSTATE_UP:
6536 pThis->fCableConnected = true;
6537 /* If link was down, bring it up after a while. */
6538 if (!(STATUS & STATUS_LU))
6539 e1kBringLinkUpDelayed(pThis);
6540 break;
6541 case PDMNETWORKLINKSTATE_DOWN:
6542 pThis->fCableConnected = false;
6543 /* Always set the phy link state to down, regardless of the STATUS_LU bit.
6544 * We might have to set the link state before the driver initializes us. */
6545 Phy::setLinkStatus(&pThis->phy, false);
6546 /* If link was up, bring it down. */
6547 if (STATUS & STATUS_LU)
6548 e1kR3LinkDown(pThis);
6549 break;
6550 case PDMNETWORKLINKSTATE_DOWN_RESUME:
6551 /*
6552 * There is not much sense in bringing down the link if it has not come up yet.
6553 * If it is up though, we bring it down temporarely, then bring it up again.
6554 */
6555 if (STATUS & STATUS_LU)
6556 e1kR3LinkDownTemp(pThis);
6557 break;
6558 default:
6559 ;
6560 }
6561 return VINF_SUCCESS;
6562}
6563
6564
6565/* -=-=-=-=- PDMIBASE -=-=-=-=- */
6566
6567/**
6568 * @interface_method_impl{PDMIBASE,pfnQueryInterface}
6569 */
6570static DECLCALLBACK(void *) e1kR3QueryInterface(struct PDMIBASE *pInterface, const char *pszIID)
6571{
6572 PE1KSTATE pThis = RT_FROM_MEMBER(pInterface, E1KSTATE, IBase);
6573 Assert(&pThis->IBase == pInterface);
6574
6575 PDMIBASE_RETURN_INTERFACE(pszIID, PDMIBASE, &pThis->IBase);
6576 PDMIBASE_RETURN_INTERFACE(pszIID, PDMINETWORKDOWN, &pThis->INetworkDown);
6577 PDMIBASE_RETURN_INTERFACE(pszIID, PDMINETWORKCONFIG, &pThis->INetworkConfig);
6578 PDMIBASE_RETURN_INTERFACE(pszIID, PDMILEDPORTS, &pThis->ILeds);
6579 return NULL;
6580}
6581
6582
6583/* -=-=-=-=- Saved State -=-=-=-=- */
6584
6585/**
6586 * Saves the configuration.
6587 *
6588 * @param pThis The E1K state.
6589 * @param pSSM The handle to the saved state.
6590 */
6591static void e1kSaveConfig(PE1KSTATE pThis, PSSMHANDLE pSSM)
6592{
6593 SSMR3PutMem(pSSM, &pThis->macConfigured, sizeof(pThis->macConfigured));
6594 SSMR3PutU32(pSSM, pThis->eChip);
6595}
6596
6597/**
6598 * @callback_method_impl{FNSSMDEVLIVEEXEC,Save basic configuration.}
6599 */
6600static DECLCALLBACK(int) e1kLiveExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM, uint32_t uPass)
6601{
6602 RT_NOREF(uPass);
6603 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6604 e1kSaveConfig(pThis, pSSM);
6605 return VINF_SSM_DONT_CALL_AGAIN;
6606}
6607
6608/**
6609 * @callback_method_impl{FNSSMDEVSAVEPREP,Synchronize.}
6610 */
6611static DECLCALLBACK(int) e1kSavePrep(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
6612{
6613 RT_NOREF(pSSM);
6614 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6615
6616 int rc = e1kCsEnter(pThis, VERR_SEM_BUSY);
6617 if (RT_UNLIKELY(rc != VINF_SUCCESS))
6618 return rc;
6619 e1kCsLeave(pThis);
6620 return VINF_SUCCESS;
6621#if 0
6622 /* 1) Prevent all threads from modifying the state and memory */
6623 //pThis->fLocked = true;
6624 /* 2) Cancel all timers */
6625#ifdef E1K_TX_DELAY
6626 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTXDTimer));
6627#endif /* E1K_TX_DELAY */
6628#ifdef E1K_USE_TX_TIMERS
6629 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTIDTimer));
6630#ifndef E1K_NO_TAD
6631 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTADTimer));
6632#endif /* E1K_NO_TAD */
6633#endif /* E1K_USE_TX_TIMERS */
6634#ifdef E1K_USE_RX_TIMERS
6635 e1kCancelTimer(pThis, pThis->CTX_SUFF(pRIDTimer));
6636 e1kCancelTimer(pThis, pThis->CTX_SUFF(pRADTimer));
6637#endif /* E1K_USE_RX_TIMERS */
6638 e1kCancelTimer(pThis, pThis->CTX_SUFF(pIntTimer));
6639 /* 3) Did I forget anything? */
6640 E1kLog(("%s Locked\n", pThis->szPrf));
6641 return VINF_SUCCESS;
6642#endif
6643}
6644
6645/**
6646 * @callback_method_impl{FNSSMDEVSAVEEXEC}
6647 */
6648static DECLCALLBACK(int) e1kSaveExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
6649{
6650 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6651
6652 e1kSaveConfig(pThis, pSSM);
6653 pThis->eeprom.save(pSSM);
6654 e1kDumpState(pThis);
6655 SSMR3PutMem(pSSM, pThis->auRegs, sizeof(pThis->auRegs));
6656 SSMR3PutBool(pSSM, pThis->fIntRaised);
6657 Phy::saveState(pSSM, &pThis->phy);
6658 SSMR3PutU32(pSSM, pThis->uSelectedReg);
6659 SSMR3PutMem(pSSM, pThis->auMTA, sizeof(pThis->auMTA));
6660 SSMR3PutMem(pSSM, &pThis->aRecAddr, sizeof(pThis->aRecAddr));
6661 SSMR3PutMem(pSSM, pThis->auVFTA, sizeof(pThis->auVFTA));
6662 SSMR3PutU64(pSSM, pThis->u64AckedAt);
6663 SSMR3PutU16(pSSM, pThis->u16RxBSize);
6664 //SSMR3PutBool(pSSM, pThis->fDelayInts);
6665 //SSMR3PutBool(pSSM, pThis->fIntMaskUsed);
6666 SSMR3PutU16(pSSM, pThis->u16TxPktLen);
6667/** @todo State wrt to the TSE buffer is incomplete, so little point in
6668 * saving this actually. */
6669 SSMR3PutMem(pSSM, pThis->aTxPacketFallback, pThis->u16TxPktLen);
6670 SSMR3PutBool(pSSM, pThis->fIPcsum);
6671 SSMR3PutBool(pSSM, pThis->fTCPcsum);
6672 SSMR3PutMem(pSSM, &pThis->contextTSE, sizeof(pThis->contextTSE));
6673 SSMR3PutMem(pSSM, &pThis->contextNormal, sizeof(pThis->contextNormal));
6674 SSMR3PutBool(pSSM, pThis->fVTag);
6675 SSMR3PutU16(pSSM, pThis->u16VTagTCI);
6676#ifdef E1K_WITH_TXD_CACHE
6677#if 0
6678 SSMR3PutU8(pSSM, pThis->nTxDFetched);
6679 SSMR3PutMem(pSSM, pThis->aTxDescriptors,
6680 pThis->nTxDFetched * sizeof(pThis->aTxDescriptors[0]));
6681#else
6682 /*
6683 * There is no point in storing TX descriptor cache entries as we can simply
6684 * fetch them again. Moreover, normally the cache is always empty when we
6685 * save the state. Store zero entries for compatibility.
6686 */
6687 SSMR3PutU8(pSSM, 0);
6688#endif
6689#endif /* E1K_WITH_TXD_CACHE */
6690/** @todo GSO requires some more state here. */
6691 E1kLog(("%s State has been saved\n", pThis->szPrf));
6692 return VINF_SUCCESS;
6693}
6694
6695#if 0
6696/**
6697 * @callback_method_impl{FNSSMDEVSAVEDONE}
6698 */
6699static DECLCALLBACK(int) e1kSaveDone(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
6700{
6701 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6702
6703 /* If VM is being powered off unlocking will result in assertions in PGM */
6704 if (PDMDevHlpGetVM(pDevIns)->enmVMState == VMSTATE_RUNNING)
6705 pThis->fLocked = false;
6706 else
6707 E1kLog(("%s VM is not running -- remain locked\n", pThis->szPrf));
6708 E1kLog(("%s Unlocked\n", pThis->szPrf));
6709 return VINF_SUCCESS;
6710}
6711#endif
6712
6713/**
6714 * @callback_method_impl{FNSSMDEVLOADPREP,Synchronize.}
6715 */
6716static DECLCALLBACK(int) e1kLoadPrep(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
6717{
6718 RT_NOREF(pSSM);
6719 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6720
6721 int rc = e1kCsEnter(pThis, VERR_SEM_BUSY);
6722 if (RT_UNLIKELY(rc != VINF_SUCCESS))
6723 return rc;
6724 e1kCsLeave(pThis);
6725 return VINF_SUCCESS;
6726}
6727
6728/**
6729 * @callback_method_impl{FNSSMDEVLOADEXEC}
6730 */
6731static DECLCALLBACK(int) e1kLoadExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
6732{
6733 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6734 int rc;
6735
6736 if ( uVersion != E1K_SAVEDSTATE_VERSION
6737#ifdef E1K_WITH_TXD_CACHE
6738 && uVersion != E1K_SAVEDSTATE_VERSION_VBOX_42_VTAG
6739#endif /* E1K_WITH_TXD_CACHE */
6740 && uVersion != E1K_SAVEDSTATE_VERSION_VBOX_41
6741 && uVersion != E1K_SAVEDSTATE_VERSION_VBOX_30)
6742 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
6743
6744 if ( uVersion > E1K_SAVEDSTATE_VERSION_VBOX_30
6745 || uPass != SSM_PASS_FINAL)
6746 {
6747 /* config checks */
6748 RTMAC macConfigured;
6749 rc = SSMR3GetMem(pSSM, &macConfigured, sizeof(macConfigured));
6750 AssertRCReturn(rc, rc);
6751 if ( memcmp(&macConfigured, &pThis->macConfigured, sizeof(macConfigured))
6752 && (uPass == 0 || !PDMDevHlpVMTeleportedAndNotFullyResumedYet(pDevIns)) )
6753 LogRel(("%s: The mac address differs: config=%RTmac saved=%RTmac\n", pThis->szPrf, &pThis->macConfigured, &macConfigured));
6754
6755 E1KCHIP eChip;
6756 rc = SSMR3GetU32(pSSM, &eChip);
6757 AssertRCReturn(rc, rc);
6758 if (eChip != pThis->eChip)
6759 return SSMR3SetCfgError(pSSM, RT_SRC_POS, N_("The chip type differs: config=%u saved=%u"), pThis->eChip, eChip);
6760 }
6761
6762 if (uPass == SSM_PASS_FINAL)
6763 {
6764 if (uVersion > E1K_SAVEDSTATE_VERSION_VBOX_30)
6765 {
6766 rc = pThis->eeprom.load(pSSM);
6767 AssertRCReturn(rc, rc);
6768 }
6769 /* the state */
6770 SSMR3GetMem(pSSM, &pThis->auRegs, sizeof(pThis->auRegs));
6771 SSMR3GetBool(pSSM, &pThis->fIntRaised);
6772 /** @todo PHY could be made a separate device with its own versioning */
6773 Phy::loadState(pSSM, &pThis->phy);
6774 SSMR3GetU32(pSSM, &pThis->uSelectedReg);
6775 SSMR3GetMem(pSSM, &pThis->auMTA, sizeof(pThis->auMTA));
6776 SSMR3GetMem(pSSM, &pThis->aRecAddr, sizeof(pThis->aRecAddr));
6777 SSMR3GetMem(pSSM, &pThis->auVFTA, sizeof(pThis->auVFTA));
6778 SSMR3GetU64(pSSM, &pThis->u64AckedAt);
6779 SSMR3GetU16(pSSM, &pThis->u16RxBSize);
6780 //SSMR3GetBool(pSSM, pThis->fDelayInts);
6781 //SSMR3GetBool(pSSM, pThis->fIntMaskUsed);
6782 SSMR3GetU16(pSSM, &pThis->u16TxPktLen);
6783 SSMR3GetMem(pSSM, &pThis->aTxPacketFallback[0], pThis->u16TxPktLen);
6784 SSMR3GetBool(pSSM, &pThis->fIPcsum);
6785 SSMR3GetBool(pSSM, &pThis->fTCPcsum);
6786 SSMR3GetMem(pSSM, &pThis->contextTSE, sizeof(pThis->contextTSE));
6787 rc = SSMR3GetMem(pSSM, &pThis->contextNormal, sizeof(pThis->contextNormal));
6788 AssertRCReturn(rc, rc);
6789 if (uVersion > E1K_SAVEDSTATE_VERSION_VBOX_41)
6790 {
6791 SSMR3GetBool(pSSM, &pThis->fVTag);
6792 rc = SSMR3GetU16(pSSM, &pThis->u16VTagTCI);
6793 AssertRCReturn(rc, rc);
6794 }
6795 else
6796 {
6797 pThis->fVTag = false;
6798 pThis->u16VTagTCI = 0;
6799 }
6800#ifdef E1K_WITH_TXD_CACHE
6801 if (uVersion > E1K_SAVEDSTATE_VERSION_VBOX_42_VTAG)
6802 {
6803 rc = SSMR3GetU8(pSSM, &pThis->nTxDFetched);
6804 AssertRCReturn(rc, rc);
6805 if (pThis->nTxDFetched)
6806 SSMR3GetMem(pSSM, pThis->aTxDescriptors,
6807 pThis->nTxDFetched * sizeof(pThis->aTxDescriptors[0]));
6808 }
6809 else
6810 pThis->nTxDFetched = 0;
6811 /*
6812 * @todo: Perhaps we should not store TXD cache as the entries can be
6813 * simply fetched again from guest's memory. Or can't they?
6814 */
6815#endif /* E1K_WITH_TXD_CACHE */
6816#ifdef E1K_WITH_RXD_CACHE
6817 /*
6818 * There is no point in storing the RX descriptor cache in the saved
6819 * state, we just need to make sure it is empty.
6820 */
6821 pThis->iRxDCurrent = pThis->nRxDFetched = 0;
6822#endif /* E1K_WITH_RXD_CACHE */
6823 /* derived state */
6824 e1kSetupGsoCtx(&pThis->GsoCtx, &pThis->contextTSE);
6825
6826 E1kLog(("%s State has been restored\n", pThis->szPrf));
6827 e1kDumpState(pThis);
6828 }
6829 return VINF_SUCCESS;
6830}
6831
6832/**
6833 * @callback_method_impl{FNSSMDEVLOADDONE, Link status adjustments after loading.}
6834 */
6835static DECLCALLBACK(int) e1kLoadDone(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
6836{
6837 RT_NOREF(pSSM);
6838 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6839
6840 /* Update promiscuous mode */
6841 if (pThis->pDrvR3)
6842 pThis->pDrvR3->pfnSetPromiscuousMode(pThis->pDrvR3,
6843 !!(RCTL & (RCTL_UPE | RCTL_MPE)));
6844
6845 /*
6846 * Force the link down here, since PDMNETWORKLINKSTATE_DOWN_RESUME is never
6847 * passed to us. We go through all this stuff if the link was up and we
6848 * wasn't teleported.
6849 */
6850 if ( (STATUS & STATUS_LU)
6851 && !PDMDevHlpVMTeleportedAndNotFullyResumedYet(pDevIns)
6852 && pThis->cMsLinkUpDelay)
6853 {
6854 e1kR3LinkDownTemp(pThis);
6855 }
6856 return VINF_SUCCESS;
6857}
6858
6859
6860
6861/* -=-=-=-=- Debug Info + Log Types -=-=-=-=- */
6862
6863/**
6864 * @callback_method_impl{FNRTSTRFORMATTYPE}
6865 */
6866static DECLCALLBACK(size_t) e1kFmtRxDesc(PFNRTSTROUTPUT pfnOutput,
6867 void *pvArgOutput,
6868 const char *pszType,
6869 void const *pvValue,
6870 int cchWidth,
6871 int cchPrecision,
6872 unsigned fFlags,
6873 void *pvUser)
6874{
6875 RT_NOREF(cchWidth, cchPrecision, fFlags, pvUser);
6876 AssertReturn(strcmp(pszType, "e1krxd") == 0, 0);
6877 E1KRXDESC* pDesc = (E1KRXDESC*)pvValue;
6878 if (!pDesc)
6879 return RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "NULL_RXD");
6880
6881 size_t cbPrintf = 0;
6882 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Address=%16LX Length=%04X Csum=%04X\n",
6883 pDesc->u64BufAddr, pDesc->u16Length, pDesc->u16Checksum);
6884 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, " STA: %s %s %s %s %s %s %s ERR: %s %s %s %s SPECIAL: %s VLAN=%03x PRI=%x",
6885 pDesc->status.fPIF ? "PIF" : "pif",
6886 pDesc->status.fIPCS ? "IPCS" : "ipcs",
6887 pDesc->status.fTCPCS ? "TCPCS" : "tcpcs",
6888 pDesc->status.fVP ? "VP" : "vp",
6889 pDesc->status.fIXSM ? "IXSM" : "ixsm",
6890 pDesc->status.fEOP ? "EOP" : "eop",
6891 pDesc->status.fDD ? "DD" : "dd",
6892 pDesc->status.fRXE ? "RXE" : "rxe",
6893 pDesc->status.fIPE ? "IPE" : "ipe",
6894 pDesc->status.fTCPE ? "TCPE" : "tcpe",
6895 pDesc->status.fCE ? "CE" : "ce",
6896 E1K_SPEC_CFI(pDesc->status.u16Special) ? "CFI" :"cfi",
6897 E1K_SPEC_VLAN(pDesc->status.u16Special),
6898 E1K_SPEC_PRI(pDesc->status.u16Special));
6899 return cbPrintf;
6900}
6901
6902/**
6903 * @callback_method_impl{FNRTSTRFORMATTYPE}
6904 */
6905static DECLCALLBACK(size_t) e1kFmtTxDesc(PFNRTSTROUTPUT pfnOutput,
6906 void *pvArgOutput,
6907 const char *pszType,
6908 void const *pvValue,
6909 int cchWidth,
6910 int cchPrecision,
6911 unsigned fFlags,
6912 void *pvUser)
6913{
6914 RT_NOREF(cchWidth, cchPrecision, fFlags, pvUser);
6915 AssertReturn(strcmp(pszType, "e1ktxd") == 0, 0);
6916 E1KTXDESC *pDesc = (E1KTXDESC*)pvValue;
6917 if (!pDesc)
6918 return RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "NULL_TXD");
6919
6920 size_t cbPrintf = 0;
6921 switch (e1kGetDescType(pDesc))
6922 {
6923 case E1K_DTYP_CONTEXT:
6924 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Type=Context\n"
6925 " IPCSS=%02X IPCSO=%02X IPCSE=%04X TUCSS=%02X TUCSO=%02X TUCSE=%04X\n"
6926 " TUCMD:%s%s%s %s %s PAYLEN=%04x HDRLEN=%04x MSS=%04x STA: %s",
6927 pDesc->context.ip.u8CSS, pDesc->context.ip.u8CSO, pDesc->context.ip.u16CSE,
6928 pDesc->context.tu.u8CSS, pDesc->context.tu.u8CSO, pDesc->context.tu.u16CSE,
6929 pDesc->context.dw2.fIDE ? " IDE":"",
6930 pDesc->context.dw2.fRS ? " RS" :"",
6931 pDesc->context.dw2.fTSE ? " TSE":"",
6932 pDesc->context.dw2.fIP ? "IPv4":"IPv6",
6933 pDesc->context.dw2.fTCP ? "TCP":"UDP",
6934 pDesc->context.dw2.u20PAYLEN,
6935 pDesc->context.dw3.u8HDRLEN,
6936 pDesc->context.dw3.u16MSS,
6937 pDesc->context.dw3.fDD?"DD":"");
6938 break;
6939 case E1K_DTYP_DATA:
6940 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Type=Data Address=%16LX DTALEN=%05X\n"
6941 " DCMD:%s%s%s%s%s%s%s STA:%s%s%s POPTS:%s%s SPECIAL:%s VLAN=%03x PRI=%x",
6942 pDesc->data.u64BufAddr,
6943 pDesc->data.cmd.u20DTALEN,
6944 pDesc->data.cmd.fIDE ? " IDE" :"",
6945 pDesc->data.cmd.fVLE ? " VLE" :"",
6946 pDesc->data.cmd.fRPS ? " RPS" :"",
6947 pDesc->data.cmd.fRS ? " RS" :"",
6948 pDesc->data.cmd.fTSE ? " TSE" :"",
6949 pDesc->data.cmd.fIFCS? " IFCS":"",
6950 pDesc->data.cmd.fEOP ? " EOP" :"",
6951 pDesc->data.dw3.fDD ? " DD" :"",
6952 pDesc->data.dw3.fEC ? " EC" :"",
6953 pDesc->data.dw3.fLC ? " LC" :"",
6954 pDesc->data.dw3.fTXSM? " TXSM":"",
6955 pDesc->data.dw3.fIXSM? " IXSM":"",
6956 E1K_SPEC_CFI(pDesc->data.dw3.u16Special) ? "CFI" :"cfi",
6957 E1K_SPEC_VLAN(pDesc->data.dw3.u16Special),
6958 E1K_SPEC_PRI(pDesc->data.dw3.u16Special));
6959 break;
6960 case E1K_DTYP_LEGACY:
6961 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Type=Legacy Address=%16LX DTALEN=%05X\n"
6962 " CMD:%s%s%s%s%s%s%s STA:%s%s%s CSO=%02x CSS=%02x SPECIAL:%s VLAN=%03x PRI=%x",
6963 pDesc->data.u64BufAddr,
6964 pDesc->legacy.cmd.u16Length,
6965 pDesc->legacy.cmd.fIDE ? " IDE" :"",
6966 pDesc->legacy.cmd.fVLE ? " VLE" :"",
6967 pDesc->legacy.cmd.fRPS ? " RPS" :"",
6968 pDesc->legacy.cmd.fRS ? " RS" :"",
6969 pDesc->legacy.cmd.fIC ? " IC" :"",
6970 pDesc->legacy.cmd.fIFCS? " IFCS":"",
6971 pDesc->legacy.cmd.fEOP ? " EOP" :"",
6972 pDesc->legacy.dw3.fDD ? " DD" :"",
6973 pDesc->legacy.dw3.fEC ? " EC" :"",
6974 pDesc->legacy.dw3.fLC ? " LC" :"",
6975 pDesc->legacy.cmd.u8CSO,
6976 pDesc->legacy.dw3.u8CSS,
6977 E1K_SPEC_CFI(pDesc->legacy.dw3.u16Special) ? "CFI" :"cfi",
6978 E1K_SPEC_VLAN(pDesc->legacy.dw3.u16Special),
6979 E1K_SPEC_PRI(pDesc->legacy.dw3.u16Special));
6980 break;
6981 default:
6982 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Invalid Transmit Descriptor");
6983 break;
6984 }
6985
6986 return cbPrintf;
6987}
6988
6989/** Initializes debug helpers (logging format types). */
6990static int e1kInitDebugHelpers(void)
6991{
6992 int rc = VINF_SUCCESS;
6993 static bool s_fHelpersRegistered = false;
6994 if (!s_fHelpersRegistered)
6995 {
6996 s_fHelpersRegistered = true;
6997 rc = RTStrFormatTypeRegister("e1krxd", e1kFmtRxDesc, NULL);
6998 AssertRCReturn(rc, rc);
6999 rc = RTStrFormatTypeRegister("e1ktxd", e1kFmtTxDesc, NULL);
7000 AssertRCReturn(rc, rc);
7001 }
7002 return rc;
7003}
7004
7005/**
7006 * Status info callback.
7007 *
7008 * @param pDevIns The device instance.
7009 * @param pHlp The output helpers.
7010 * @param pszArgs The arguments.
7011 */
7012static DECLCALLBACK(void) e1kInfo(PPDMDEVINS pDevIns, PCDBGFINFOHLP pHlp, const char *pszArgs)
7013{
7014 RT_NOREF(pszArgs);
7015 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
7016 unsigned i;
7017 // bool fRcvRing = false;
7018 // bool fXmtRing = false;
7019
7020 /*
7021 * Parse args.
7022 if (pszArgs)
7023 {
7024 fRcvRing = strstr(pszArgs, "verbose") || strstr(pszArgs, "rcv");
7025 fXmtRing = strstr(pszArgs, "verbose") || strstr(pszArgs, "xmt");
7026 }
7027 */
7028
7029 /*
7030 * Show info.
7031 */
7032 pHlp->pfnPrintf(pHlp, "E1000 #%d: port=%RTiop mmio=%RGp mac-cfg=%RTmac %s%s%s\n",
7033 pDevIns->iInstance, pThis->IOPortBase, pThis->addrMMReg,
7034 &pThis->macConfigured, g_aChips[pThis->eChip].pcszName,
7035 pThis->fRCEnabled ? " GC" : "", pThis->fR0Enabled ? " R0" : "");
7036
7037 e1kCsEnter(pThis, VERR_INTERNAL_ERROR); /* Not sure why but PCNet does it */
7038
7039 for (i = 0; i < E1K_NUM_OF_32BIT_REGS; ++i)
7040 pHlp->pfnPrintf(pHlp, "%8.8s = %08x\n", g_aE1kRegMap[i].abbrev, pThis->auRegs[i]);
7041
7042 for (i = 0; i < RT_ELEMENTS(pThis->aRecAddr.array); i++)
7043 {
7044 E1KRAELEM* ra = pThis->aRecAddr.array + i;
7045 if (ra->ctl & RA_CTL_AV)
7046 {
7047 const char *pcszTmp;
7048 switch (ra->ctl & RA_CTL_AS)
7049 {
7050 case 0: pcszTmp = "DST"; break;
7051 case 1: pcszTmp = "SRC"; break;
7052 default: pcszTmp = "reserved";
7053 }
7054 pHlp->pfnPrintf(pHlp, "RA%02d: %s %RTmac\n", i, pcszTmp, ra->addr);
7055 }
7056 }
7057 unsigned cDescs = RDLEN / sizeof(E1KRXDESC);
7058 uint32_t rdh = RDH;
7059 pHlp->pfnPrintf(pHlp, "\n-- Receive Descriptors (%d total) --\n", cDescs);
7060 for (i = 0; i < cDescs; ++i)
7061 {
7062 E1KRXDESC desc;
7063 PDMDevHlpPhysRead(pDevIns, e1kDescAddr(RDBAH, RDBAL, i),
7064 &desc, sizeof(desc));
7065 if (i == rdh)
7066 pHlp->pfnPrintf(pHlp, ">>> ");
7067 pHlp->pfnPrintf(pHlp, "%RGp: %R[e1krxd]\n", e1kDescAddr(RDBAH, RDBAL, i), &desc);
7068 }
7069#ifdef E1K_WITH_RXD_CACHE
7070 pHlp->pfnPrintf(pHlp, "\n-- Receive Descriptors in Cache (at %d (RDH %d)/ fetched %d / max %d) --\n",
7071 pThis->iRxDCurrent, RDH, pThis->nRxDFetched, E1K_RXD_CACHE_SIZE);
7072 if (rdh > pThis->iRxDCurrent)
7073 rdh -= pThis->iRxDCurrent;
7074 else
7075 rdh = cDescs + rdh - pThis->iRxDCurrent;
7076 for (i = 0; i < pThis->nRxDFetched; ++i)
7077 {
7078 if (i == pThis->iRxDCurrent)
7079 pHlp->pfnPrintf(pHlp, ">>> ");
7080 pHlp->pfnPrintf(pHlp, "%RGp: %R[e1krxd]\n",
7081 e1kDescAddr(RDBAH, RDBAL, rdh++ % cDescs),
7082 &pThis->aRxDescriptors[i]);
7083 }
7084#endif /* E1K_WITH_RXD_CACHE */
7085
7086 cDescs = TDLEN / sizeof(E1KTXDESC);
7087 uint32_t tdh = TDH;
7088 pHlp->pfnPrintf(pHlp, "\n-- Transmit Descriptors (%d total) --\n", cDescs);
7089 for (i = 0; i < cDescs; ++i)
7090 {
7091 E1KTXDESC desc;
7092 PDMDevHlpPhysRead(pDevIns, e1kDescAddr(TDBAH, TDBAL, i),
7093 &desc, sizeof(desc));
7094 if (i == tdh)
7095 pHlp->pfnPrintf(pHlp, ">>> ");
7096 pHlp->pfnPrintf(pHlp, "%RGp: %R[e1ktxd]\n", e1kDescAddr(TDBAH, TDBAL, i), &desc);
7097 }
7098#ifdef E1K_WITH_TXD_CACHE
7099 pHlp->pfnPrintf(pHlp, "\n-- Transmit Descriptors in Cache (at %d (TDH %d)/ fetched %d / max %d) --\n",
7100 pThis->iTxDCurrent, TDH, pThis->nTxDFetched, E1K_TXD_CACHE_SIZE);
7101 if (tdh > pThis->iTxDCurrent)
7102 tdh -= pThis->iTxDCurrent;
7103 else
7104 tdh = cDescs + tdh - pThis->iTxDCurrent;
7105 for (i = 0; i < pThis->nTxDFetched; ++i)
7106 {
7107 if (i == pThis->iTxDCurrent)
7108 pHlp->pfnPrintf(pHlp, ">>> ");
7109 pHlp->pfnPrintf(pHlp, "%RGp: %R[e1ktxd]\n",
7110 e1kDescAddr(TDBAH, TDBAL, tdh++ % cDescs),
7111 &pThis->aTxDescriptors[i]);
7112 }
7113#endif /* E1K_WITH_TXD_CACHE */
7114
7115
7116#ifdef E1K_INT_STATS
7117 pHlp->pfnPrintf(pHlp, "Interrupt attempts: %d\n", pThis->uStatIntTry);
7118 pHlp->pfnPrintf(pHlp, "Interrupts raised : %d\n", pThis->uStatInt);
7119 pHlp->pfnPrintf(pHlp, "Interrupts lowered: %d\n", pThis->uStatIntLower);
7120 pHlp->pfnPrintf(pHlp, "Interrupts delayed: %d\n", pThis->uStatIntDly);
7121 pHlp->pfnPrintf(pHlp, "Disabled delayed: %d\n", pThis->uStatDisDly);
7122 pHlp->pfnPrintf(pHlp, "Interrupts skipped: %d\n", pThis->uStatIntSkip);
7123 pHlp->pfnPrintf(pHlp, "Masked interrupts : %d\n", pThis->uStatIntMasked);
7124 pHlp->pfnPrintf(pHlp, "Early interrupts : %d\n", pThis->uStatIntEarly);
7125 pHlp->pfnPrintf(pHlp, "Late interrupts : %d\n", pThis->uStatIntLate);
7126 pHlp->pfnPrintf(pHlp, "Lost interrupts : %d\n", pThis->iStatIntLost);
7127 pHlp->pfnPrintf(pHlp, "Interrupts by RX : %d\n", pThis->uStatIntRx);
7128 pHlp->pfnPrintf(pHlp, "Interrupts by TX : %d\n", pThis->uStatIntTx);
7129 pHlp->pfnPrintf(pHlp, "Interrupts by ICS : %d\n", pThis->uStatIntICS);
7130 pHlp->pfnPrintf(pHlp, "Interrupts by RDTR: %d\n", pThis->uStatIntRDTR);
7131 pHlp->pfnPrintf(pHlp, "Interrupts by RDMT: %d\n", pThis->uStatIntRXDMT0);
7132 pHlp->pfnPrintf(pHlp, "Interrupts by TXQE: %d\n", pThis->uStatIntTXQE);
7133 pHlp->pfnPrintf(pHlp, "TX int delay asked: %d\n", pThis->uStatTxIDE);
7134 pHlp->pfnPrintf(pHlp, "TX delayed: %d\n", pThis->uStatTxDelayed);
7135 pHlp->pfnPrintf(pHlp, "TX delayed expired: %d\n", pThis->uStatTxDelayExp);
7136 pHlp->pfnPrintf(pHlp, "TX no report asked: %d\n", pThis->uStatTxNoRS);
7137 pHlp->pfnPrintf(pHlp, "TX abs timer expd : %d\n", pThis->uStatTAD);
7138 pHlp->pfnPrintf(pHlp, "TX int timer expd : %d\n", pThis->uStatTID);
7139 pHlp->pfnPrintf(pHlp, "RX abs timer expd : %d\n", pThis->uStatRAD);
7140 pHlp->pfnPrintf(pHlp, "RX int timer expd : %d\n", pThis->uStatRID);
7141 pHlp->pfnPrintf(pHlp, "TX CTX descriptors: %d\n", pThis->uStatDescCtx);
7142 pHlp->pfnPrintf(pHlp, "TX DAT descriptors: %d\n", pThis->uStatDescDat);
7143 pHlp->pfnPrintf(pHlp, "TX LEG descriptors: %d\n", pThis->uStatDescLeg);
7144 pHlp->pfnPrintf(pHlp, "Received frames : %d\n", pThis->uStatRxFrm);
7145 pHlp->pfnPrintf(pHlp, "Transmitted frames: %d\n", pThis->uStatTxFrm);
7146 pHlp->pfnPrintf(pHlp, "TX frames up to 1514: %d\n", pThis->uStatTx1514);
7147 pHlp->pfnPrintf(pHlp, "TX frames up to 2962: %d\n", pThis->uStatTx2962);
7148 pHlp->pfnPrintf(pHlp, "TX frames up to 4410: %d\n", pThis->uStatTx4410);
7149 pHlp->pfnPrintf(pHlp, "TX frames up to 5858: %d\n", pThis->uStatTx5858);
7150 pHlp->pfnPrintf(pHlp, "TX frames up to 7306: %d\n", pThis->uStatTx7306);
7151 pHlp->pfnPrintf(pHlp, "TX frames up to 8754: %d\n", pThis->uStatTx8754);
7152 pHlp->pfnPrintf(pHlp, "TX frames up to 16384: %d\n", pThis->uStatTx16384);
7153 pHlp->pfnPrintf(pHlp, "TX frames up to 32768: %d\n", pThis->uStatTx32768);
7154 pHlp->pfnPrintf(pHlp, "Larger TX frames : %d\n", pThis->uStatTxLarge);
7155#endif /* E1K_INT_STATS */
7156
7157 e1kCsLeave(pThis);
7158}
7159
7160
7161
7162/* -=-=-=-=- PDMDEVREG -=-=-=-=- */
7163
7164/**
7165 * Detach notification.
7166 *
7167 * One port on the network card has been disconnected from the network.
7168 *
7169 * @param pDevIns The device instance.
7170 * @param iLUN The logical unit which is being detached.
7171 * @param fFlags Flags, combination of the PDMDEVATT_FLAGS_* \#defines.
7172 */
7173static DECLCALLBACK(void) e1kR3Detach(PPDMDEVINS pDevIns, unsigned iLUN, uint32_t fFlags)
7174{
7175 RT_NOREF(fFlags);
7176 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
7177 Log(("%s e1kR3Detach:\n", pThis->szPrf));
7178
7179 AssertLogRelReturnVoid(iLUN == 0);
7180
7181 PDMCritSectEnter(&pThis->cs, VERR_SEM_BUSY);
7182
7183 /** @todo r=pritesh still need to check if i missed
7184 * to clean something in this function
7185 */
7186
7187 /*
7188 * Zero some important members.
7189 */
7190 pThis->pDrvBase = NULL;
7191 pThis->pDrvR3 = NULL;
7192 pThis->pDrvR0 = NIL_RTR0PTR;
7193 pThis->pDrvRC = NIL_RTRCPTR;
7194
7195 PDMCritSectLeave(&pThis->cs);
7196}
7197
7198/**
7199 * Attach the Network attachment.
7200 *
7201 * One port on the network card has been connected to a network.
7202 *
7203 * @returns VBox status code.
7204 * @param pDevIns The device instance.
7205 * @param iLUN The logical unit which is being attached.
7206 * @param fFlags Flags, combination of the PDMDEVATT_FLAGS_* \#defines.
7207 *
7208 * @remarks This code path is not used during construction.
7209 */
7210static DECLCALLBACK(int) e1kR3Attach(PPDMDEVINS pDevIns, unsigned iLUN, uint32_t fFlags)
7211{
7212 RT_NOREF(fFlags);
7213 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
7214 LogFlow(("%s e1kR3Attach:\n", pThis->szPrf));
7215
7216 AssertLogRelReturn(iLUN == 0, VERR_PDM_NO_SUCH_LUN);
7217
7218 PDMCritSectEnter(&pThis->cs, VERR_SEM_BUSY);
7219
7220 /*
7221 * Attach the driver.
7222 */
7223 int rc = PDMDevHlpDriverAttach(pDevIns, 0, &pThis->IBase, &pThis->pDrvBase, "Network Port");
7224 if (RT_SUCCESS(rc))
7225 {
7226 if (rc == VINF_NAT_DNS)
7227 {
7228#ifdef RT_OS_LINUX
7229 PDMDevHlpVMSetRuntimeError(pDevIns, 0 /*fFlags*/, "NoDNSforNAT",
7230 N_("A Domain Name Server (DNS) for NAT networking could not be determined. Please check your /etc/resolv.conf for <tt>nameserver</tt> entries. Either add one manually (<i>man resolv.conf</i>) or ensure that your host is correctly connected to an ISP. If you ignore this warning the guest will not be able to perform nameserver lookups and it will probably observe delays if trying so"));
7231#else
7232 PDMDevHlpVMSetRuntimeError(pDevIns, 0 /*fFlags*/, "NoDNSforNAT",
7233 N_("A Domain Name Server (DNS) for NAT networking could not be determined. Ensure that your host is correctly connected to an ISP. If you ignore this warning the guest will not be able to perform nameserver lookups and it will probably observe delays if trying so"));
7234#endif
7235 }
7236 pThis->pDrvR3 = PDMIBASE_QUERY_INTERFACE(pThis->pDrvBase, PDMINETWORKUP);
7237 AssertMsgStmt(pThis->pDrvR3, ("Failed to obtain the PDMINETWORKUP interface!\n"),
7238 rc = VERR_PDM_MISSING_INTERFACE_BELOW);
7239 if (RT_SUCCESS(rc))
7240 {
7241 PPDMIBASER0 pBaseR0 = PDMIBASE_QUERY_INTERFACE(pThis->pDrvBase, PDMIBASER0);
7242 pThis->pDrvR0 = pBaseR0 ? pBaseR0->pfnQueryInterface(pBaseR0, PDMINETWORKUP_IID) : NIL_RTR0PTR;
7243
7244 PPDMIBASERC pBaseRC = PDMIBASE_QUERY_INTERFACE(pThis->pDrvBase, PDMIBASERC);
7245 pThis->pDrvRC = pBaseRC ? pBaseRC->pfnQueryInterface(pBaseRC, PDMINETWORKUP_IID) : NIL_RTR0PTR;
7246 }
7247 }
7248 else if ( rc == VERR_PDM_NO_ATTACHED_DRIVER
7249 || rc == VERR_PDM_CFG_MISSING_DRIVER_NAME)
7250 {
7251 /* This should never happen because this function is not called
7252 * if there is no driver to attach! */
7253 Log(("%s No attached driver!\n", pThis->szPrf));
7254 }
7255
7256 /*
7257 * Temporary set the link down if it was up so that the guest
7258 * will know that we have change the configuration of the
7259 * network card
7260 */
7261 if ((STATUS & STATUS_LU) && RT_SUCCESS(rc))
7262 e1kR3LinkDownTemp(pThis);
7263
7264 PDMCritSectLeave(&pThis->cs);
7265 return rc;
7266
7267}
7268
7269/**
7270 * @copydoc FNPDMDEVPOWEROFF
7271 */
7272static DECLCALLBACK(void) e1kR3PowerOff(PPDMDEVINS pDevIns)
7273{
7274 /* Poke thread waiting for buffer space. */
7275 e1kWakeupReceive(pDevIns);
7276}
7277
7278/**
7279 * @copydoc FNPDMDEVRESET
7280 */
7281static DECLCALLBACK(void) e1kR3Reset(PPDMDEVINS pDevIns)
7282{
7283 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
7284#ifdef E1K_TX_DELAY
7285 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTXDTimer));
7286#endif /* E1K_TX_DELAY */
7287 e1kCancelTimer(pThis, pThis->CTX_SUFF(pIntTimer));
7288 e1kCancelTimer(pThis, pThis->CTX_SUFF(pLUTimer));
7289 e1kXmitFreeBuf(pThis);
7290 pThis->u16TxPktLen = 0;
7291 pThis->fIPcsum = false;
7292 pThis->fTCPcsum = false;
7293 pThis->fIntMaskUsed = false;
7294 pThis->fDelayInts = false;
7295 pThis->fLocked = false;
7296 pThis->u64AckedAt = 0;
7297 e1kHardReset(pThis);
7298}
7299
7300/**
7301 * @copydoc FNPDMDEVSUSPEND
7302 */
7303static DECLCALLBACK(void) e1kR3Suspend(PPDMDEVINS pDevIns)
7304{
7305 /* Poke thread waiting for buffer space. */
7306 e1kWakeupReceive(pDevIns);
7307}
7308
7309/**
7310 * Device relocation callback.
7311 *
7312 * When this callback is called the device instance data, and if the
7313 * device have a GC component, is being relocated, or/and the selectors
7314 * have been changed. The device must use the chance to perform the
7315 * necessary pointer relocations and data updates.
7316 *
7317 * Before the GC code is executed the first time, this function will be
7318 * called with a 0 delta so GC pointer calculations can be one in one place.
7319 *
7320 * @param pDevIns Pointer to the device instance.
7321 * @param offDelta The relocation delta relative to the old location.
7322 *
7323 * @remark A relocation CANNOT fail.
7324 */
7325static DECLCALLBACK(void) e1kR3Relocate(PPDMDEVINS pDevIns, RTGCINTPTR offDelta)
7326{
7327 RT_NOREF(offDelta);
7328 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
7329 pThis->pDevInsRC = PDMDEVINS_2_RCPTR(pDevIns);
7330 pThis->pTxQueueRC = PDMQueueRCPtr(pThis->pTxQueueR3);
7331 pThis->pCanRxQueueRC = PDMQueueRCPtr(pThis->pCanRxQueueR3);
7332#ifdef E1K_USE_RX_TIMERS
7333 pThis->pRIDTimerRC = TMTimerRCPtr(pThis->pRIDTimerR3);
7334 pThis->pRADTimerRC = TMTimerRCPtr(pThis->pRADTimerR3);
7335#endif /* E1K_USE_RX_TIMERS */
7336#ifdef E1K_USE_TX_TIMERS
7337 pThis->pTIDTimerRC = TMTimerRCPtr(pThis->pTIDTimerR3);
7338# ifndef E1K_NO_TAD
7339 pThis->pTADTimerRC = TMTimerRCPtr(pThis->pTADTimerR3);
7340# endif /* E1K_NO_TAD */
7341#endif /* E1K_USE_TX_TIMERS */
7342#ifdef E1K_TX_DELAY
7343 pThis->pTXDTimerRC = TMTimerRCPtr(pThis->pTXDTimerR3);
7344#endif /* E1K_TX_DELAY */
7345 pThis->pIntTimerRC = TMTimerRCPtr(pThis->pIntTimerR3);
7346 pThis->pLUTimerRC = TMTimerRCPtr(pThis->pLUTimerR3);
7347}
7348
7349/**
7350 * Destruct a device instance.
7351 *
7352 * We need to free non-VM resources only.
7353 *
7354 * @returns VBox status code.
7355 * @param pDevIns The device instance data.
7356 * @thread EMT
7357 */
7358static DECLCALLBACK(int) e1kR3Destruct(PPDMDEVINS pDevIns)
7359{
7360 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
7361 PDMDEV_CHECK_VERSIONS_RETURN_QUIET(pDevIns);
7362
7363 e1kDumpState(pThis);
7364 E1kLog(("%s Destroying instance\n", pThis->szPrf));
7365 if (PDMCritSectIsInitialized(&pThis->cs))
7366 {
7367 if (pThis->hEventMoreRxDescAvail != NIL_RTSEMEVENT)
7368 {
7369 RTSemEventSignal(pThis->hEventMoreRxDescAvail);
7370 RTSemEventDestroy(pThis->hEventMoreRxDescAvail);
7371 pThis->hEventMoreRxDescAvail = NIL_RTSEMEVENT;
7372 }
7373#ifdef E1K_WITH_TX_CS
7374 PDMR3CritSectDelete(&pThis->csTx);
7375#endif /* E1K_WITH_TX_CS */
7376 PDMR3CritSectDelete(&pThis->csRx);
7377 PDMR3CritSectDelete(&pThis->cs);
7378 }
7379 return VINF_SUCCESS;
7380}
7381
7382
7383/**
7384 * Set PCI configuration space registers.
7385 *
7386 * @param pci Reference to PCI device structure.
7387 * @thread EMT
7388 */
7389static DECLCALLBACK(void) e1kConfigurePciDev(PPCIDEVICE pPciDev, E1KCHIP eChip)
7390{
7391 Assert(eChip < RT_ELEMENTS(g_aChips));
7392 /* Configure PCI Device, assume 32-bit mode ******************************/
7393 PCIDevSetVendorId(pPciDev, g_aChips[eChip].uPCIVendorId);
7394 PCIDevSetDeviceId(pPciDev, g_aChips[eChip].uPCIDeviceId);
7395 PCIDevSetWord( pPciDev, VBOX_PCI_SUBSYSTEM_VENDOR_ID, g_aChips[eChip].uPCISubsystemVendorId);
7396 PCIDevSetWord( pPciDev, VBOX_PCI_SUBSYSTEM_ID, g_aChips[eChip].uPCISubsystemId);
7397
7398 PCIDevSetWord( pPciDev, VBOX_PCI_COMMAND, 0x0000);
7399 /* DEVSEL Timing (medium device), 66 MHz Capable, New capabilities */
7400 PCIDevSetWord( pPciDev, VBOX_PCI_STATUS,
7401 VBOX_PCI_STATUS_DEVSEL_MEDIUM | VBOX_PCI_STATUS_CAP_LIST | VBOX_PCI_STATUS_66MHZ);
7402 /* Stepping A2 */
7403 PCIDevSetByte( pPciDev, VBOX_PCI_REVISION_ID, 0x02);
7404 /* Ethernet adapter */
7405 PCIDevSetByte( pPciDev, VBOX_PCI_CLASS_PROG, 0x00);
7406 PCIDevSetWord( pPciDev, VBOX_PCI_CLASS_DEVICE, 0x0200);
7407 /* normal single function Ethernet controller */
7408 PCIDevSetByte( pPciDev, VBOX_PCI_HEADER_TYPE, 0x00);
7409 /* Memory Register Base Address */
7410 PCIDevSetDWord(pPciDev, VBOX_PCI_BASE_ADDRESS_0, 0x00000000);
7411 /* Memory Flash Base Address */
7412 PCIDevSetDWord(pPciDev, VBOX_PCI_BASE_ADDRESS_1, 0x00000000);
7413 /* IO Register Base Address */
7414 PCIDevSetDWord(pPciDev, VBOX_PCI_BASE_ADDRESS_2, 0x00000001);
7415 /* Expansion ROM Base Address */
7416 PCIDevSetDWord(pPciDev, VBOX_PCI_ROM_ADDRESS, 0x00000000);
7417 /* Capabilities Pointer */
7418 PCIDevSetByte( pPciDev, VBOX_PCI_CAPABILITY_LIST, 0xDC);
7419 /* Interrupt Pin: INTA# */
7420 PCIDevSetByte( pPciDev, VBOX_PCI_INTERRUPT_PIN, 0x01);
7421 /* Max_Lat/Min_Gnt: very high priority and time slice */
7422 PCIDevSetByte( pPciDev, VBOX_PCI_MIN_GNT, 0xFF);
7423 PCIDevSetByte( pPciDev, VBOX_PCI_MAX_LAT, 0x00);
7424
7425 /* PCI Power Management Registers ****************************************/
7426 /* Capability ID: PCI Power Management Registers */
7427 PCIDevSetByte( pPciDev, 0xDC, VBOX_PCI_CAP_ID_PM);
7428 /* Next Item Pointer: PCI-X */
7429 PCIDevSetByte( pPciDev, 0xDC + 1, 0xE4);
7430 /* Power Management Capabilities: PM disabled, DSI */
7431 PCIDevSetWord( pPciDev, 0xDC + 2,
7432 0x0002 | VBOX_PCI_PM_CAP_DSI);
7433 /* Power Management Control / Status Register: PM disabled */
7434 PCIDevSetWord( pPciDev, 0xDC + 4, 0x0000);
7435 /* PMCSR_BSE Bridge Support Extensions: Not supported */
7436 PCIDevSetByte( pPciDev, 0xDC + 6, 0x00);
7437 /* Data Register: PM disabled, always 0 */
7438 PCIDevSetByte( pPciDev, 0xDC + 7, 0x00);
7439
7440 /* PCI-X Configuration Registers *****************************************/
7441 /* Capability ID: PCI-X Configuration Registers */
7442 PCIDevSetByte( pPciDev, 0xE4, VBOX_PCI_CAP_ID_PCIX);
7443#ifdef E1K_WITH_MSI
7444 PCIDevSetByte( pPciDev, 0xE4 + 1, 0x80);
7445#else
7446 /* Next Item Pointer: None (Message Signalled Interrupts are disabled) */
7447 PCIDevSetByte( pPciDev, 0xE4 + 1, 0x00);
7448#endif
7449 /* PCI-X Command: Enable Relaxed Ordering */
7450 PCIDevSetWord( pPciDev, 0xE4 + 2, VBOX_PCI_X_CMD_ERO);
7451 /* PCI-X Status: 32-bit, 66MHz*/
7452 /** @todo is this value really correct? fff8 doesn't look like actual PCI address */
7453 PCIDevSetDWord(pPciDev, 0xE4 + 4, 0x0040FFF8);
7454}
7455
7456/**
7457 * @interface_method_impl{PDMDEVREG,pfnConstruct}
7458 */
7459static DECLCALLBACK(int) e1kR3Construct(PPDMDEVINS pDevIns, int iInstance, PCFGMNODE pCfg)
7460{
7461 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
7462 int rc;
7463 PDMDEV_CHECK_VERSIONS_RETURN(pDevIns);
7464
7465 /*
7466 * Initialize the instance data (state).
7467 * Note! Caller has initialized it to ZERO already.
7468 */
7469 RTStrPrintf(pThis->szPrf, sizeof(pThis->szPrf), "E1000#%d", iInstance);
7470 E1kLog(("%s Constructing new instance sizeof(E1KRXDESC)=%d\n", pThis->szPrf, sizeof(E1KRXDESC)));
7471 pThis->hEventMoreRxDescAvail = NIL_RTSEMEVENT;
7472 pThis->pDevInsR3 = pDevIns;
7473 pThis->pDevInsR0 = PDMDEVINS_2_R0PTR(pDevIns);
7474 pThis->pDevInsRC = PDMDEVINS_2_RCPTR(pDevIns);
7475 pThis->u16TxPktLen = 0;
7476 pThis->fIPcsum = false;
7477 pThis->fTCPcsum = false;
7478 pThis->fIntMaskUsed = false;
7479 pThis->fDelayInts = false;
7480 pThis->fLocked = false;
7481 pThis->u64AckedAt = 0;
7482 pThis->led.u32Magic = PDMLED_MAGIC;
7483 pThis->u32PktNo = 1;
7484
7485 /* Interfaces */
7486 pThis->IBase.pfnQueryInterface = e1kR3QueryInterface;
7487
7488 pThis->INetworkDown.pfnWaitReceiveAvail = e1kR3NetworkDown_WaitReceiveAvail;
7489 pThis->INetworkDown.pfnReceive = e1kR3NetworkDown_Receive;
7490 pThis->INetworkDown.pfnXmitPending = e1kR3NetworkDown_XmitPending;
7491
7492 pThis->ILeds.pfnQueryStatusLed = e1kR3QueryStatusLed;
7493
7494 pThis->INetworkConfig.pfnGetMac = e1kR3GetMac;
7495 pThis->INetworkConfig.pfnGetLinkState = e1kR3GetLinkState;
7496 pThis->INetworkConfig.pfnSetLinkState = e1kR3SetLinkState;
7497
7498 /*
7499 * Internal validations.
7500 */
7501 for (uint32_t iReg = 1; iReg < E1K_NUM_OF_BINARY_SEARCHABLE; iReg++)
7502 AssertLogRelMsgReturn( g_aE1kRegMap[iReg].offset > g_aE1kRegMap[iReg - 1].offset
7503 && g_aE1kRegMap[iReg].offset + g_aE1kRegMap[iReg].size
7504 >= g_aE1kRegMap[iReg - 1].offset + g_aE1kRegMap[iReg - 1].size,
7505 ("%s@%#xLB%#x vs %s@%#xLB%#x\n",
7506 g_aE1kRegMap[iReg].abbrev, g_aE1kRegMap[iReg].offset, g_aE1kRegMap[iReg].size,
7507 g_aE1kRegMap[iReg - 1].abbrev, g_aE1kRegMap[iReg - 1].offset, g_aE1kRegMap[iReg - 1].size),
7508 VERR_INTERNAL_ERROR_4);
7509
7510 /*
7511 * Validate configuration.
7512 */
7513 if (!CFGMR3AreValuesValid(pCfg, "MAC\0" "CableConnected\0" "AdapterType\0"
7514 "LineSpeed\0" "GCEnabled\0" "R0Enabled\0"
7515 "ItrEnabled\0" "ItrRxEnabled\0"
7516 "EthernetCRC\0" "GSOEnabled\0" "LinkUpDelay\0"))
7517 return PDMDEV_SET_ERROR(pDevIns, VERR_PDM_DEVINS_UNKNOWN_CFG_VALUES,
7518 N_("Invalid configuration for E1000 device"));
7519
7520 /** @todo LineSpeed unused! */
7521
7522 pThis->fR0Enabled = true;
7523 pThis->fRCEnabled = true;
7524 pThis->fEthernetCRC = true;
7525 pThis->fGSOEnabled = true;
7526 pThis->fItrEnabled = true;
7527 pThis->fItrRxEnabled = true;
7528
7529 /* Get config params */
7530 rc = CFGMR3QueryBytes(pCfg, "MAC", pThis->macConfigured.au8, sizeof(pThis->macConfigured.au8));
7531 if (RT_FAILURE(rc))
7532 return PDMDEV_SET_ERROR(pDevIns, rc,
7533 N_("Configuration error: Failed to get MAC address"));
7534 rc = CFGMR3QueryBool(pCfg, "CableConnected", &pThis->fCableConnected);
7535 if (RT_FAILURE(rc))
7536 return PDMDEV_SET_ERROR(pDevIns, rc,
7537 N_("Configuration error: Failed to get the value of 'CableConnected'"));
7538 rc = CFGMR3QueryU32(pCfg, "AdapterType", (uint32_t*)&pThis->eChip);
7539 if (RT_FAILURE(rc))
7540 return PDMDEV_SET_ERROR(pDevIns, rc,
7541 N_("Configuration error: Failed to get the value of 'AdapterType'"));
7542 Assert(pThis->eChip <= E1K_CHIP_82545EM);
7543 rc = CFGMR3QueryBoolDef(pCfg, "GCEnabled", &pThis->fRCEnabled, true);
7544 if (RT_FAILURE(rc))
7545 return PDMDEV_SET_ERROR(pDevIns, rc,
7546 N_("Configuration error: Failed to get the value of 'GCEnabled'"));
7547
7548 rc = CFGMR3QueryBoolDef(pCfg, "R0Enabled", &pThis->fR0Enabled, true);
7549 if (RT_FAILURE(rc))
7550 return PDMDEV_SET_ERROR(pDevIns, rc,
7551 N_("Configuration error: Failed to get the value of 'R0Enabled'"));
7552
7553 rc = CFGMR3QueryBoolDef(pCfg, "EthernetCRC", &pThis->fEthernetCRC, true);
7554 if (RT_FAILURE(rc))
7555 return PDMDEV_SET_ERROR(pDevIns, rc,
7556 N_("Configuration error: Failed to get the value of 'EthernetCRC'"));
7557
7558 rc = CFGMR3QueryBoolDef(pCfg, "GSOEnabled", &pThis->fGSOEnabled, true);
7559 if (RT_FAILURE(rc))
7560 return PDMDEV_SET_ERROR(pDevIns, rc,
7561 N_("Configuration error: Failed to get the value of 'GSOEnabled'"));
7562
7563 rc = CFGMR3QueryBoolDef(pCfg, "ItrEnabled", &pThis->fItrEnabled, true);
7564 if (RT_FAILURE(rc))
7565 return PDMDEV_SET_ERROR(pDevIns, rc,
7566 N_("Configuration error: Failed to get the value of 'ItrEnabled'"));
7567
7568 rc = CFGMR3QueryBoolDef(pCfg, "ItrRxEnabled", &pThis->fItrRxEnabled, true);
7569 if (RT_FAILURE(rc))
7570 return PDMDEV_SET_ERROR(pDevIns, rc,
7571 N_("Configuration error: Failed to get the value of 'ItrRxEnabled'"));
7572
7573 rc = CFGMR3QueryU32Def(pCfg, "LinkUpDelay", (uint32_t*)&pThis->cMsLinkUpDelay, 5000); /* ms */
7574 if (RT_FAILURE(rc))
7575 return PDMDEV_SET_ERROR(pDevIns, rc,
7576 N_("Configuration error: Failed to get the value of 'LinkUpDelay'"));
7577 Assert(pThis->cMsLinkUpDelay <= 300000); /* less than 5 minutes */
7578 if (pThis->cMsLinkUpDelay > 5000)
7579 LogRel(("%s WARNING! Link up delay is set to %u seconds!\n", pThis->szPrf, pThis->cMsLinkUpDelay / 1000));
7580 else if (pThis->cMsLinkUpDelay == 0)
7581 LogRel(("%s WARNING! Link up delay is disabled!\n", pThis->szPrf));
7582
7583 LogRel(("%s Chip=%s LinkUpDelay=%ums EthernetCRC=%s GSO=%s Itr=%s ItrRx=%s R0=%s GC=%s\n", pThis->szPrf,
7584 g_aChips[pThis->eChip].pcszName, pThis->cMsLinkUpDelay,
7585 pThis->fEthernetCRC ? "on" : "off",
7586 pThis->fGSOEnabled ? "enabled" : "disabled",
7587 pThis->fItrEnabled ? "enabled" : "disabled",
7588 pThis->fItrRxEnabled ? "enabled" : "disabled",
7589 pThis->fR0Enabled ? "enabled" : "disabled",
7590 pThis->fRCEnabled ? "enabled" : "disabled"));
7591
7592 /* Initialize the EEPROM. */
7593 pThis->eeprom.init(pThis->macConfigured);
7594
7595 /* Initialize internal PHY. */
7596 Phy::init(&pThis->phy, iInstance, pThis->eChip == E1K_CHIP_82543GC ? PHY_EPID_M881000 : PHY_EPID_M881011);
7597 Phy::setLinkStatus(&pThis->phy, pThis->fCableConnected);
7598
7599 /* Initialize critical sections. We do our own locking. */
7600 rc = PDMDevHlpSetDeviceCritSect(pDevIns, PDMDevHlpCritSectGetNop(pDevIns));
7601 AssertRCReturn(rc, rc);
7602
7603 rc = PDMDevHlpCritSectInit(pDevIns, &pThis->cs, RT_SRC_POS, "E1000#%d", iInstance);
7604 if (RT_FAILURE(rc))
7605 return rc;
7606 rc = PDMDevHlpCritSectInit(pDevIns, &pThis->csRx, RT_SRC_POS, "E1000#%dRX", iInstance);
7607 if (RT_FAILURE(rc))
7608 return rc;
7609#ifdef E1K_WITH_TX_CS
7610 rc = PDMDevHlpCritSectInit(pDevIns, &pThis->csTx, RT_SRC_POS, "E1000#%dTX", iInstance);
7611 if (RT_FAILURE(rc))
7612 return rc;
7613#endif /* E1K_WITH_TX_CS */
7614
7615 /* Saved state registration. */
7616 rc = PDMDevHlpSSMRegisterEx(pDevIns, E1K_SAVEDSTATE_VERSION, sizeof(E1KSTATE), NULL,
7617 NULL, e1kLiveExec, NULL,
7618 e1kSavePrep, e1kSaveExec, NULL,
7619 e1kLoadPrep, e1kLoadExec, e1kLoadDone);
7620 if (RT_FAILURE(rc))
7621 return rc;
7622
7623 /* Set PCI config registers and register ourselves with the PCI bus. */
7624 e1kConfigurePciDev(&pThis->pciDevice, pThis->eChip);
7625 rc = PDMDevHlpPCIRegister(pDevIns, &pThis->pciDevice);
7626 if (RT_FAILURE(rc))
7627 return rc;
7628
7629#ifdef E1K_WITH_MSI
7630 PDMMSIREG MsiReg;
7631 RT_ZERO(MsiReg);
7632 MsiReg.cMsiVectors = 1;
7633 MsiReg.iMsiCapOffset = 0x80;
7634 MsiReg.iMsiNextOffset = 0x0;
7635 MsiReg.fMsi64bit = false;
7636 rc = PDMDevHlpPCIRegisterMsi(pDevIns, &MsiReg);
7637 AssertRCReturn(rc, rc);
7638#endif
7639
7640
7641 /* Map our registers to memory space (region 0, see e1kConfigurePCI)*/
7642 rc = PDMDevHlpPCIIORegionRegister(pDevIns, 0, E1K_MM_SIZE, PCI_ADDRESS_SPACE_MEM, e1kMap);
7643 if (RT_FAILURE(rc))
7644 return rc;
7645 /* Map our registers to IO space (region 2, see e1kConfigurePCI) */
7646 rc = PDMDevHlpPCIIORegionRegister(pDevIns, 2, E1K_IOPORT_SIZE, PCI_ADDRESS_SPACE_IO, e1kMap);
7647 if (RT_FAILURE(rc))
7648 return rc;
7649
7650 /* Create transmit queue */
7651 rc = PDMDevHlpQueueCreate(pDevIns, sizeof(PDMQUEUEITEMCORE), 1, 0,
7652 e1kTxQueueConsumer, true, "E1000-Xmit", &pThis->pTxQueueR3);
7653 if (RT_FAILURE(rc))
7654 return rc;
7655 pThis->pTxQueueR0 = PDMQueueR0Ptr(pThis->pTxQueueR3);
7656 pThis->pTxQueueRC = PDMQueueRCPtr(pThis->pTxQueueR3);
7657
7658 /* Create the RX notifier signaller. */
7659 rc = PDMDevHlpQueueCreate(pDevIns, sizeof(PDMQUEUEITEMCORE), 1, 0,
7660 e1kCanRxQueueConsumer, true, "E1000-Rcv", &pThis->pCanRxQueueR3);
7661 if (RT_FAILURE(rc))
7662 return rc;
7663 pThis->pCanRxQueueR0 = PDMQueueR0Ptr(pThis->pCanRxQueueR3);
7664 pThis->pCanRxQueueRC = PDMQueueRCPtr(pThis->pCanRxQueueR3);
7665
7666#ifdef E1K_TX_DELAY
7667 /* Create Transmit Delay Timer */
7668 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kTxDelayTimer, pThis,
7669 TMTIMER_FLAGS_NO_CRIT_SECT,
7670 "E1000 Transmit Delay Timer", &pThis->pTXDTimerR3);
7671 if (RT_FAILURE(rc))
7672 return rc;
7673 pThis->pTXDTimerR0 = TMTimerR0Ptr(pThis->pTXDTimerR3);
7674 pThis->pTXDTimerRC = TMTimerRCPtr(pThis->pTXDTimerR3);
7675 TMR3TimerSetCritSect(pThis->pTXDTimerR3, &pThis->csTx);
7676#endif /* E1K_TX_DELAY */
7677
7678#ifdef E1K_USE_TX_TIMERS
7679 /* Create Transmit Interrupt Delay Timer */
7680 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kTxIntDelayTimer, pThis,
7681 TMTIMER_FLAGS_NO_CRIT_SECT,
7682 "E1000 Transmit Interrupt Delay Timer", &pThis->pTIDTimerR3);
7683 if (RT_FAILURE(rc))
7684 return rc;
7685 pThis->pTIDTimerR0 = TMTimerR0Ptr(pThis->pTIDTimerR3);
7686 pThis->pTIDTimerRC = TMTimerRCPtr(pThis->pTIDTimerR3);
7687
7688# ifndef E1K_NO_TAD
7689 /* Create Transmit Absolute Delay Timer */
7690 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kTxAbsDelayTimer, pThis,
7691 TMTIMER_FLAGS_NO_CRIT_SECT,
7692 "E1000 Transmit Absolute Delay Timer", &pThis->pTADTimerR3);
7693 if (RT_FAILURE(rc))
7694 return rc;
7695 pThis->pTADTimerR0 = TMTimerR0Ptr(pThis->pTADTimerR3);
7696 pThis->pTADTimerRC = TMTimerRCPtr(pThis->pTADTimerR3);
7697# endif /* E1K_NO_TAD */
7698#endif /* E1K_USE_TX_TIMERS */
7699
7700#ifdef E1K_USE_RX_TIMERS
7701 /* Create Receive Interrupt Delay Timer */
7702 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kRxIntDelayTimer, pThis,
7703 TMTIMER_FLAGS_NO_CRIT_SECT,
7704 "E1000 Receive Interrupt Delay Timer", &pThis->pRIDTimerR3);
7705 if (RT_FAILURE(rc))
7706 return rc;
7707 pThis->pRIDTimerR0 = TMTimerR0Ptr(pThis->pRIDTimerR3);
7708 pThis->pRIDTimerRC = TMTimerRCPtr(pThis->pRIDTimerR3);
7709
7710 /* Create Receive Absolute Delay Timer */
7711 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kRxAbsDelayTimer, pThis,
7712 TMTIMER_FLAGS_NO_CRIT_SECT,
7713 "E1000 Receive Absolute Delay Timer", &pThis->pRADTimerR3);
7714 if (RT_FAILURE(rc))
7715 return rc;
7716 pThis->pRADTimerR0 = TMTimerR0Ptr(pThis->pRADTimerR3);
7717 pThis->pRADTimerRC = TMTimerRCPtr(pThis->pRADTimerR3);
7718#endif /* E1K_USE_RX_TIMERS */
7719
7720 /* Create Late Interrupt Timer */
7721 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kLateIntTimer, pThis,
7722 TMTIMER_FLAGS_NO_CRIT_SECT,
7723 "E1000 Late Interrupt Timer", &pThis->pIntTimerR3);
7724 if (RT_FAILURE(rc))
7725 return rc;
7726 pThis->pIntTimerR0 = TMTimerR0Ptr(pThis->pIntTimerR3);
7727 pThis->pIntTimerRC = TMTimerRCPtr(pThis->pIntTimerR3);
7728
7729 /* Create Link Up Timer */
7730 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kLinkUpTimer, pThis,
7731 TMTIMER_FLAGS_NO_CRIT_SECT,
7732 "E1000 Link Up Timer", &pThis->pLUTimerR3);
7733 if (RT_FAILURE(rc))
7734 return rc;
7735 pThis->pLUTimerR0 = TMTimerR0Ptr(pThis->pLUTimerR3);
7736 pThis->pLUTimerRC = TMTimerRCPtr(pThis->pLUTimerR3);
7737
7738 /* Register the info item */
7739 char szTmp[20];
7740 RTStrPrintf(szTmp, sizeof(szTmp), "e1k%d", iInstance);
7741 PDMDevHlpDBGFInfoRegister(pDevIns, szTmp, "E1000 info.", e1kInfo);
7742
7743 /* Status driver */
7744 PPDMIBASE pBase;
7745 rc = PDMDevHlpDriverAttach(pDevIns, PDM_STATUS_LUN, &pThis->IBase, &pBase, "Status Port");
7746 if (RT_FAILURE(rc))
7747 return PDMDEV_SET_ERROR(pDevIns, rc, N_("Failed to attach the status LUN"));
7748 pThis->pLedsConnector = PDMIBASE_QUERY_INTERFACE(pBase, PDMILEDCONNECTORS);
7749
7750 /* Network driver */
7751 rc = PDMDevHlpDriverAttach(pDevIns, 0, &pThis->IBase, &pThis->pDrvBase, "Network Port");
7752 if (RT_SUCCESS(rc))
7753 {
7754 if (rc == VINF_NAT_DNS)
7755 PDMDevHlpVMSetRuntimeError(pDevIns, 0 /*fFlags*/, "NoDNSforNAT",
7756 N_("A Domain Name Server (DNS) for NAT networking could not be determined. Ensure that your host is correctly connected to an ISP. If you ignore this warning the guest will not be able to perform nameserver lookups and it will probably observe delays if trying so"));
7757 pThis->pDrvR3 = PDMIBASE_QUERY_INTERFACE(pThis->pDrvBase, PDMINETWORKUP);
7758 AssertMsgReturn(pThis->pDrvR3, ("Failed to obtain the PDMINETWORKUP interface!\n"), VERR_PDM_MISSING_INTERFACE_BELOW);
7759
7760 pThis->pDrvR0 = PDMIBASER0_QUERY_INTERFACE(PDMIBASE_QUERY_INTERFACE(pThis->pDrvBase, PDMIBASER0), PDMINETWORKUP);
7761 pThis->pDrvRC = PDMIBASERC_QUERY_INTERFACE(PDMIBASE_QUERY_INTERFACE(pThis->pDrvBase, PDMIBASERC), PDMINETWORKUP);
7762 }
7763 else if ( rc == VERR_PDM_NO_ATTACHED_DRIVER
7764 || rc == VERR_PDM_CFG_MISSING_DRIVER_NAME)
7765 {
7766 /* No error! */
7767 E1kLog(("%s This adapter is not attached to any network!\n", pThis->szPrf));
7768 }
7769 else
7770 return PDMDEV_SET_ERROR(pDevIns, rc, N_("Failed to attach the network LUN"));
7771
7772 rc = RTSemEventCreate(&pThis->hEventMoreRxDescAvail);
7773 if (RT_FAILURE(rc))
7774 return rc;
7775
7776 rc = e1kInitDebugHelpers();
7777 if (RT_FAILURE(rc))
7778 return rc;
7779
7780 e1kHardReset(pThis);
7781
7782 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceiveBytes, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Amount of data received", "/Public/Net/E1k%u/BytesReceived", iInstance);
7783 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitBytes, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Amount of data transmitted", "/Public/Net/E1k%u/BytesTransmitted", iInstance);
7784
7785 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceiveBytes, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Amount of data received", "/Devices/E1k%d/ReceiveBytes", iInstance);
7786 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitBytes, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Amount of data transmitted", "/Devices/E1k%d/TransmitBytes", iInstance);
7787
7788#if defined(VBOX_WITH_STATISTICS)
7789 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatMMIOReadRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling MMIO reads in RZ", "/Devices/E1k%d/MMIO/ReadRZ", iInstance);
7790 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatMMIOReadR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling MMIO reads in R3", "/Devices/E1k%d/MMIO/ReadR3", iInstance);
7791 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatMMIOWriteRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling MMIO writes in RZ", "/Devices/E1k%d/MMIO/WriteRZ", iInstance);
7792 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatMMIOWriteR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling MMIO writes in R3", "/Devices/E1k%d/MMIO/WriteR3", iInstance);
7793 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatEEPROMRead, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling EEPROM reads", "/Devices/E1k%d/EEPROM/Read", iInstance);
7794 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatEEPROMWrite, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling EEPROM writes", "/Devices/E1k%d/EEPROM/Write", iInstance);
7795 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIOReadRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling IO reads in RZ", "/Devices/E1k%d/IO/ReadRZ", iInstance);
7796 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIOReadR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling IO reads in R3", "/Devices/E1k%d/IO/ReadR3", iInstance);
7797 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIOWriteRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling IO writes in RZ", "/Devices/E1k%d/IO/WriteRZ", iInstance);
7798 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIOWriteR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling IO writes in R3", "/Devices/E1k%d/IO/WriteR3", iInstance);
7799 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatLateIntTimer, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling late int timer", "/Devices/E1k%d/LateInt/Timer", iInstance);
7800 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatLateInts, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of late interrupts", "/Devices/E1k%d/LateInt/Occured", iInstance);
7801 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIntsRaised, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of raised interrupts", "/Devices/E1k%d/Interrupts/Raised", iInstance);
7802 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIntsPrevented, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of prevented interrupts", "/Devices/E1k%d/Interrupts/Prevented", iInstance);
7803 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceive, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling receive", "/Devices/E1k%d/Receive/Total", iInstance);
7804 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceiveCRC, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling receive checksumming", "/Devices/E1k%d/Receive/CRC", iInstance);
7805 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceiveFilter, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling receive filtering", "/Devices/E1k%d/Receive/Filter", iInstance);
7806 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceiveStore, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling receive storing", "/Devices/E1k%d/Receive/Store", iInstance);
7807 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatRxOverflow, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_OCCURENCE, "Profiling RX overflows", "/Devices/E1k%d/RxOverflow", iInstance);
7808 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatRxOverflowWakeup, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Nr of RX overflow wakeups", "/Devices/E1k%d/RxOverflowWakeup", iInstance);
7809 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling transmits in RZ", "/Devices/E1k%d/Transmit/TotalRZ", iInstance);
7810 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling transmits in R3", "/Devices/E1k%d/Transmit/TotalR3", iInstance);
7811 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitSendRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling send transmit in RZ", "/Devices/E1k%d/Transmit/SendRZ", iInstance);
7812 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitSendR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling send transmit in R3", "/Devices/E1k%d/Transmit/SendR3", iInstance);
7813
7814 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxDescCtxNormal, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of normal context descriptors","/Devices/E1k%d/TxDesc/ContexNormal", iInstance);
7815 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxDescCtxTSE, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TSE context descriptors", "/Devices/E1k%d/TxDesc/ContextTSE", iInstance);
7816 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxDescData, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TX data descriptors", "/Devices/E1k%d/TxDesc/Data", iInstance);
7817 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxDescLegacy, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TX legacy descriptors", "/Devices/E1k%d/TxDesc/Legacy", iInstance);
7818 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxDescTSEData, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TX TSE data descriptors", "/Devices/E1k%d/TxDesc/TSEData", iInstance);
7819 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxPathFallback, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Fallback TSE descriptor path", "/Devices/E1k%d/TxPath/Fallback", iInstance);
7820 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxPathGSO, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "GSO TSE descriptor path", "/Devices/E1k%d/TxPath/GSO", iInstance);
7821 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxPathRegular, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Regular descriptor path", "/Devices/E1k%d/TxPath/Normal", iInstance);
7822 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatPHYAccesses, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of PHY accesses", "/Devices/E1k%d/PHYAccesses", iInstance);
7823 for (unsigned iReg = 0; iReg < E1K_NUM_OF_REGS; iReg++)
7824 {
7825 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->aStatRegReads[iReg], STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
7826 g_aE1kRegMap[iReg].name, "/Devices/E1k%d/Regs/%s-Reads", iInstance, g_aE1kRegMap[iReg].abbrev);
7827 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->aStatRegWrites[iReg], STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
7828 g_aE1kRegMap[iReg].name, "/Devices/E1k%d/Regs/%s-Writes", iInstance, g_aE1kRegMap[iReg].abbrev);
7829 }
7830#endif /* VBOX_WITH_STATISTICS */
7831
7832#ifdef E1K_INT_STATS
7833 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->u64ArmedAt, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "u64ArmedAt", "/Devices/E1k%d/u64ArmedAt", iInstance);
7834 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatMaxTxDelay, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatMaxTxDelay", "/Devices/E1k%d/uStatMaxTxDelay", iInstance);
7835 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatInt, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatInt", "/Devices/E1k%d/uStatInt", iInstance);
7836 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntTry, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntTry", "/Devices/E1k%d/uStatIntTry", iInstance);
7837 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntLower, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntLower", "/Devices/E1k%d/uStatIntLower", iInstance);
7838 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntDly, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntDly", "/Devices/E1k%d/uStatIntDly", iInstance);
7839 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->iStatIntLost, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "iStatIntLost", "/Devices/E1k%d/iStatIntLost", iInstance);
7840 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->iStatIntLostOne, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "iStatIntLostOne", "/Devices/E1k%d/iStatIntLostOne", iInstance);
7841 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatDisDly, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatDisDly", "/Devices/E1k%d/uStatDisDly", iInstance);
7842 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntSkip, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntSkip", "/Devices/E1k%d/uStatIntSkip", iInstance);
7843 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntLate, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntLate", "/Devices/E1k%d/uStatIntLate", iInstance);
7844 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntMasked, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntMasked", "/Devices/E1k%d/uStatIntMasked", iInstance);
7845 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntEarly, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntEarly", "/Devices/E1k%d/uStatIntEarly", iInstance);
7846 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntRx, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntRx", "/Devices/E1k%d/uStatIntRx", iInstance);
7847 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntTx, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntTx", "/Devices/E1k%d/uStatIntTx", iInstance);
7848 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntICS, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntICS", "/Devices/E1k%d/uStatIntICS", iInstance);
7849 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntRDTR, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntRDTR", "/Devices/E1k%d/uStatIntRDTR", iInstance);
7850 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntRXDMT0, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntRXDMT0", "/Devices/E1k%d/uStatIntRXDMT0", iInstance);
7851 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntTXQE, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntTXQE", "/Devices/E1k%d/uStatIntTXQE", iInstance);
7852 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxNoRS, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxNoRS", "/Devices/E1k%d/uStatTxNoRS", iInstance);
7853 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxIDE, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxIDE", "/Devices/E1k%d/uStatTxIDE", iInstance);
7854 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxDelayed, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxDelayed", "/Devices/E1k%d/uStatTxDelayed", iInstance);
7855 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxDelayExp, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxDelayExp", "/Devices/E1k%d/uStatTxDelayExp", iInstance);
7856 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTAD, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTAD", "/Devices/E1k%d/uStatTAD", iInstance);
7857 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTID, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTID", "/Devices/E1k%d/uStatTID", iInstance);
7858 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatRAD, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatRAD", "/Devices/E1k%d/uStatRAD", iInstance);
7859 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatRID, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatRID", "/Devices/E1k%d/uStatRID", iInstance);
7860 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatRxFrm, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatRxFrm", "/Devices/E1k%d/uStatRxFrm", iInstance);
7861 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxFrm, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxFrm", "/Devices/E1k%d/uStatTxFrm", iInstance);
7862 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatDescCtx, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatDescCtx", "/Devices/E1k%d/uStatDescCtx", iInstance);
7863 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatDescDat, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatDescDat", "/Devices/E1k%d/uStatDescDat", iInstance);
7864 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatDescLeg, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatDescLeg", "/Devices/E1k%d/uStatDescLeg", iInstance);
7865 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx1514, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx1514", "/Devices/E1k%d/uStatTx1514", iInstance);
7866 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx2962, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx2962", "/Devices/E1k%d/uStatTx2962", iInstance);
7867 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx4410, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx4410", "/Devices/E1k%d/uStatTx4410", iInstance);
7868 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx5858, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx5858", "/Devices/E1k%d/uStatTx5858", iInstance);
7869 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx7306, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx7306", "/Devices/E1k%d/uStatTx7306", iInstance);
7870 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx8754, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx8754", "/Devices/E1k%d/uStatTx8754", iInstance);
7871 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx16384, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx16384", "/Devices/E1k%d/uStatTx16384", iInstance);
7872 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx32768, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx32768", "/Devices/E1k%d/uStatTx32768", iInstance);
7873 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxLarge, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxLarge", "/Devices/E1k%d/uStatTxLarge", iInstance);
7874#endif /* E1K_INT_STATS */
7875
7876 return VINF_SUCCESS;
7877}
7878
7879/**
7880 * The device registration structure.
7881 */
7882const PDMDEVREG g_DeviceE1000 =
7883{
7884 /* Structure version. PDM_DEVREG_VERSION defines the current version. */
7885 PDM_DEVREG_VERSION,
7886 /* Device name. */
7887 "e1000",
7888 /* Name of guest context module (no path).
7889 * Only evalutated if PDM_DEVREG_FLAGS_RC is set. */
7890 "VBoxDDRC.rc",
7891 /* Name of ring-0 module (no path).
7892 * Only evalutated if PDM_DEVREG_FLAGS_RC is set. */
7893 "VBoxDDR0.r0",
7894 /* The description of the device. The UTF-8 string pointed to shall, like this structure,
7895 * remain unchanged from registration till VM destruction. */
7896 "Intel PRO/1000 MT Desktop Ethernet.\n",
7897
7898 /* Flags, combination of the PDM_DEVREG_FLAGS_* \#defines. */
7899 PDM_DEVREG_FLAGS_DEFAULT_BITS | PDM_DEVREG_FLAGS_RC | PDM_DEVREG_FLAGS_R0,
7900 /* Device class(es), combination of the PDM_DEVREG_CLASS_* \#defines. */
7901 PDM_DEVREG_CLASS_NETWORK,
7902 /* Maximum number of instances (per VM). */
7903 ~0U,
7904 /* Size of the instance data. */
7905 sizeof(E1KSTATE),
7906
7907 /* pfnConstruct */
7908 e1kR3Construct,
7909 /* pfnDestruct */
7910 e1kR3Destruct,
7911 /* pfnRelocate */
7912 e1kR3Relocate,
7913 /* pfnMemSetup */
7914 NULL,
7915 /* pfnPowerOn */
7916 NULL,
7917 /* pfnReset */
7918 e1kR3Reset,
7919 /* pfnSuspend */
7920 e1kR3Suspend,
7921 /* pfnResume */
7922 NULL,
7923 /* pfnAttach */
7924 e1kR3Attach,
7925 /* pfnDeatch */
7926 e1kR3Detach,
7927 /* pfnQueryInterface */
7928 NULL,
7929 /* pfnInitComplete */
7930 NULL,
7931 /* pfnPowerOff */
7932 e1kR3PowerOff,
7933 /* pfnSoftReset */
7934 NULL,
7935
7936 /* u32VersionEnd */
7937 PDM_DEVREG_VERSION
7938};
7939
7940#endif /* IN_RING3 */
7941#endif /* !VBOX_DEVICE_STRUCT_TESTCASE */
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette