xref: /openbmc/qemu/hw/net/e1000.c (revision acb0ef58)
1 /*
2  * QEMU e1000 emulation
3  *
4  * Software developer's manual:
5  * http://download.intel.com/design/network/manuals/8254x_GBe_SDM.pdf
6  *
7  * Nir Peleg, Tutis Systems Ltd. for Qumranet Inc.
8  * Copyright (c) 2008 Qumranet
9  * Based on work done by:
10  * Copyright (c) 2007 Dan Aloni
11  * Copyright (c) 2004 Antony T Curtis
12  *
13  * This library is free software; you can redistribute it and/or
14  * modify it under the terms of the GNU Lesser General Public
15  * License as published by the Free Software Foundation; either
16  * version 2 of the License, or (at your option) any later version.
17  *
18  * This library is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
21  * Lesser General Public License for more details.
22  *
23  * You should have received a copy of the GNU Lesser General Public
24  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
25  */
26 
27 
28 #include "hw/hw.h"
29 #include "hw/pci/pci.h"
30 #include "net/net.h"
31 #include "net/checksum.h"
32 #include "hw/loader.h"
33 #include "sysemu/sysemu.h"
34 #include "sysemu/dma.h"
35 #include "qemu/iov.h"
36 
37 #include "e1000_regs.h"
38 
39 #define E1000_DEBUG
40 
41 #ifdef E1000_DEBUG
42 enum {
43     DEBUG_GENERAL,	DEBUG_IO,	DEBUG_MMIO,	DEBUG_INTERRUPT,
44     DEBUG_RX,		DEBUG_TX,	DEBUG_MDIC,	DEBUG_EEPROM,
45     DEBUG_UNKNOWN,	DEBUG_TXSUM,	DEBUG_TXERR,	DEBUG_RXERR,
46     DEBUG_RXFILTER,     DEBUG_PHY,      DEBUG_NOTYET,
47 };
48 #define DBGBIT(x)	(1<<DEBUG_##x)
49 static int debugflags = DBGBIT(TXERR) | DBGBIT(GENERAL);
50 
51 #define	DBGOUT(what, fmt, ...) do { \
52     if (debugflags & DBGBIT(what)) \
53         fprintf(stderr, "e1000: " fmt, ## __VA_ARGS__); \
54     } while (0)
55 #else
56 #define	DBGOUT(what, fmt, ...) do {} while (0)
57 #endif
58 
59 #define IOPORT_SIZE       0x40
60 #define PNPMMIO_SIZE      0x20000
61 #define MIN_BUF_SIZE      60 /* Min. octets in an ethernet frame sans FCS */
62 
63 /* this is the size past which hardware will drop packets when setting LPE=0 */
64 #define MAXIMUM_ETHERNET_VLAN_SIZE 1522
65 /* this is the size past which hardware will drop packets when setting LPE=1 */
66 #define MAXIMUM_ETHERNET_LPE_SIZE 16384
67 
68 #define MAXIMUM_ETHERNET_HDR_LEN (14+4)
69 
70 /*
71  * HW models:
72  *  E1000_DEV_ID_82540EM works with Windows, Linux, and OS X <= 10.8
73  *  E1000_DEV_ID_82544GC_COPPER appears to work; not well tested
74  *  E1000_DEV_ID_82545EM_COPPER works with Linux and OS X >= 10.6
75  *  Others never tested
76  */
77 
78 typedef struct E1000State_st {
79     /*< private >*/
80     PCIDevice parent_obj;
81     /*< public >*/
82 
83     NICState *nic;
84     NICConf conf;
85     MemoryRegion mmio;
86     MemoryRegion io;
87 
88     uint32_t mac_reg[0x8000];
89     uint16_t phy_reg[0x20];
90     uint16_t eeprom_data[64];
91 
92     uint32_t rxbuf_size;
93     uint32_t rxbuf_min_shift;
94     struct e1000_tx {
95         unsigned char header[256];
96         unsigned char vlan_header[4];
97         /* Fields vlan and data must not be reordered or separated. */
98         unsigned char vlan[4];
99         unsigned char data[0x10000];
100         uint16_t size;
101         unsigned char sum_needed;
102         unsigned char vlan_needed;
103         uint8_t ipcss;
104         uint8_t ipcso;
105         uint16_t ipcse;
106         uint8_t tucss;
107         uint8_t tucso;
108         uint16_t tucse;
109         uint8_t hdr_len;
110         uint16_t mss;
111         uint32_t paylen;
112         uint16_t tso_frames;
113         char tse;
114         int8_t ip;
115         int8_t tcp;
116         char cptse;     // current packet tse bit
117     } tx;
118 
119     struct {
120         uint32_t val_in;	// shifted in from guest driver
121         uint16_t bitnum_in;
122         uint16_t bitnum_out;
123         uint16_t reading;
124         uint32_t old_eecd;
125     } eecd_state;
126 
127     QEMUTimer *autoneg_timer;
128 
129     QEMUTimer *mit_timer;      /* Mitigation timer. */
130     bool mit_timer_on;         /* Mitigation timer is running. */
131     bool mit_irq_level;        /* Tracks interrupt pin level. */
132     uint32_t mit_ide;          /* Tracks E1000_TXD_CMD_IDE bit. */
133 
134 /* Compatibility flags for migration to/from qemu 1.3.0 and older */
135 #define E1000_FLAG_AUTONEG_BIT 0
136 #define E1000_FLAG_MIT_BIT 1
137 #define E1000_FLAG_AUTONEG (1 << E1000_FLAG_AUTONEG_BIT)
138 #define E1000_FLAG_MIT (1 << E1000_FLAG_MIT_BIT)
139     uint32_t compat_flags;
140 } E1000State;
141 
142 typedef struct E1000BaseClass {
143     PCIDeviceClass parent_class;
144     uint16_t phy_id2;
145 } E1000BaseClass;
146 
147 #define TYPE_E1000_BASE "e1000-base"
148 
149 #define E1000(obj) \
150     OBJECT_CHECK(E1000State, (obj), TYPE_E1000_BASE)
151 
152 #define E1000_DEVICE_CLASS(klass) \
153      OBJECT_CLASS_CHECK(E1000BaseClass, (klass), TYPE_E1000_BASE)
154 #define E1000_DEVICE_GET_CLASS(obj) \
155     OBJECT_GET_CLASS(E1000BaseClass, (obj), TYPE_E1000_BASE)
156 
157 #define	defreg(x)	x = (E1000_##x>>2)
158 enum {
159     defreg(CTRL),	defreg(EECD),	defreg(EERD),	defreg(GPRC),
160     defreg(GPTC),	defreg(ICR),	defreg(ICS),	defreg(IMC),
161     defreg(IMS),	defreg(LEDCTL),	defreg(MANC),	defreg(MDIC),
162     defreg(MPC),	defreg(PBA),	defreg(RCTL),	defreg(RDBAH),
163     defreg(RDBAL),	defreg(RDH),	defreg(RDLEN),	defreg(RDT),
164     defreg(STATUS),	defreg(SWSM),	defreg(TCTL),	defreg(TDBAH),
165     defreg(TDBAL),	defreg(TDH),	defreg(TDLEN),	defreg(TDT),
166     defreg(TORH),	defreg(TORL),	defreg(TOTH),	defreg(TOTL),
167     defreg(TPR),	defreg(TPT),	defreg(TXDCTL),	defreg(WUFC),
168     defreg(RA),		defreg(MTA),	defreg(CRCERRS),defreg(VFTA),
169     defreg(VET),        defreg(RDTR),   defreg(RADV),   defreg(TADV),
170     defreg(ITR),
171 };
172 
173 static void
174 e1000_link_down(E1000State *s)
175 {
176     s->mac_reg[STATUS] &= ~E1000_STATUS_LU;
177     s->phy_reg[PHY_STATUS] &= ~MII_SR_LINK_STATUS;
178 }
179 
180 static void
181 e1000_link_up(E1000State *s)
182 {
183     s->mac_reg[STATUS] |= E1000_STATUS_LU;
184     s->phy_reg[PHY_STATUS] |= MII_SR_LINK_STATUS;
185 }
186 
187 static void
188 set_phy_ctrl(E1000State *s, int index, uint16_t val)
189 {
190     /*
191      * QEMU 1.3 does not support link auto-negotiation emulation, so if we
192      * migrate during auto negotiation, after migration the link will be
193      * down.
194      */
195     if (!(s->compat_flags & E1000_FLAG_AUTONEG)) {
196         return;
197     }
198     if ((val & MII_CR_AUTO_NEG_EN) && (val & MII_CR_RESTART_AUTO_NEG)) {
199         e1000_link_down(s);
200         s->phy_reg[PHY_STATUS] &= ~MII_SR_AUTONEG_COMPLETE;
201         DBGOUT(PHY, "Start link auto negotiation\n");
202         timer_mod(s->autoneg_timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 500);
203     }
204 }
205 
206 static void
207 e1000_autoneg_timer(void *opaque)
208 {
209     E1000State *s = opaque;
210     if (!qemu_get_queue(s->nic)->link_down) {
211         e1000_link_up(s);
212     }
213     s->phy_reg[PHY_STATUS] |= MII_SR_AUTONEG_COMPLETE;
214     DBGOUT(PHY, "Auto negotiation is completed\n");
215 }
216 
217 static void (*phyreg_writeops[])(E1000State *, int, uint16_t) = {
218     [PHY_CTRL] = set_phy_ctrl,
219 };
220 
221 enum { NPHYWRITEOPS = ARRAY_SIZE(phyreg_writeops) };
222 
223 enum { PHY_R = 1, PHY_W = 2, PHY_RW = PHY_R | PHY_W };
224 static const char phy_regcap[0x20] = {
225     [PHY_STATUS] = PHY_R,	[M88E1000_EXT_PHY_SPEC_CTRL] = PHY_RW,
226     [PHY_ID1] = PHY_R,		[M88E1000_PHY_SPEC_CTRL] = PHY_RW,
227     [PHY_CTRL] = PHY_RW,	[PHY_1000T_CTRL] = PHY_RW,
228     [PHY_LP_ABILITY] = PHY_R,	[PHY_1000T_STATUS] = PHY_R,
229     [PHY_AUTONEG_ADV] = PHY_RW,	[M88E1000_RX_ERR_CNTR] = PHY_R,
230     [PHY_ID2] = PHY_R,		[M88E1000_PHY_SPEC_STATUS] = PHY_R
231 };
232 
233 /* PHY_ID2 documented in 8254x_GBe_SDM.pdf, pp. 250 */
234 static const uint16_t phy_reg_init[] = {
235     [PHY_CTRL] = 0x1140,
236     [PHY_STATUS] = 0x794d, /* link initially up with not completed autoneg */
237     [PHY_ID1] = 0x141, /* [PHY_ID2] configured per DevId, from e1000_reset() */
238     [PHY_1000T_CTRL] = 0x0e00,			[M88E1000_PHY_SPEC_CTRL] = 0x360,
239     [M88E1000_EXT_PHY_SPEC_CTRL] = 0x0d60,	[PHY_AUTONEG_ADV] = 0xde1,
240     [PHY_LP_ABILITY] = 0x1e0,			[PHY_1000T_STATUS] = 0x3c00,
241     [M88E1000_PHY_SPEC_STATUS] = 0xac00,
242 };
243 
244 static const uint32_t mac_reg_init[] = {
245     [PBA] =     0x00100030,
246     [LEDCTL] =  0x602,
247     [CTRL] =    E1000_CTRL_SWDPIN2 | E1000_CTRL_SWDPIN0 |
248                 E1000_CTRL_SPD_1000 | E1000_CTRL_SLU,
249     [STATUS] =  0x80000000 | E1000_STATUS_GIO_MASTER_ENABLE |
250                 E1000_STATUS_ASDV | E1000_STATUS_MTXCKOK |
251                 E1000_STATUS_SPEED_1000 | E1000_STATUS_FD |
252                 E1000_STATUS_LU,
253     [MANC] =    E1000_MANC_EN_MNG2HOST | E1000_MANC_RCV_TCO_EN |
254                 E1000_MANC_ARP_EN | E1000_MANC_0298_EN |
255                 E1000_MANC_RMCP_EN,
256 };
257 
258 /* Helper function, *curr == 0 means the value is not set */
259 static inline void
260 mit_update_delay(uint32_t *curr, uint32_t value)
261 {
262     if (value && (*curr == 0 || value < *curr)) {
263         *curr = value;
264     }
265 }
266 
267 static void
268 set_interrupt_cause(E1000State *s, int index, uint32_t val)
269 {
270     PCIDevice *d = PCI_DEVICE(s);
271     uint32_t pending_ints;
272     uint32_t mit_delay;
273 
274     s->mac_reg[ICR] = val;
275 
276     /*
277      * Make sure ICR and ICS registers have the same value.
278      * The spec says that the ICS register is write-only.  However in practice,
279      * on real hardware ICS is readable, and for reads it has the same value as
280      * ICR (except that ICS does not have the clear on read behaviour of ICR).
281      *
282      * The VxWorks PRO/1000 driver uses this behaviour.
283      */
284     s->mac_reg[ICS] = val;
285 
286     pending_ints = (s->mac_reg[IMS] & s->mac_reg[ICR]);
287     if (!s->mit_irq_level && pending_ints) {
288         /*
289          * Here we detect a potential raising edge. We postpone raising the
290          * interrupt line if we are inside the mitigation delay window
291          * (s->mit_timer_on == 1).
292          * We provide a partial implementation of interrupt mitigation,
293          * emulating only RADV, TADV and ITR (lower 16 bits, 1024ns units for
294          * RADV and TADV, 256ns units for ITR). RDTR is only used to enable
295          * RADV; relative timers based on TIDV and RDTR are not implemented.
296          */
297         if (s->mit_timer_on) {
298             return;
299         }
300         if (s->compat_flags & E1000_FLAG_MIT) {
301             /* Compute the next mitigation delay according to pending
302              * interrupts and the current values of RADV (provided
303              * RDTR!=0), TADV and ITR.
304              * Then rearm the timer.
305              */
306             mit_delay = 0;
307             if (s->mit_ide &&
308                     (pending_ints & (E1000_ICR_TXQE | E1000_ICR_TXDW))) {
309                 mit_update_delay(&mit_delay, s->mac_reg[TADV] * 4);
310             }
311             if (s->mac_reg[RDTR] && (pending_ints & E1000_ICS_RXT0)) {
312                 mit_update_delay(&mit_delay, s->mac_reg[RADV] * 4);
313             }
314             mit_update_delay(&mit_delay, s->mac_reg[ITR]);
315 
316             if (mit_delay) {
317                 s->mit_timer_on = 1;
318                 timer_mod(s->mit_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
319                           mit_delay * 256);
320             }
321             s->mit_ide = 0;
322         }
323     }
324 
325     s->mit_irq_level = (pending_ints != 0);
326     pci_set_irq(d, s->mit_irq_level);
327 }
328 
329 static void
330 e1000_mit_timer(void *opaque)
331 {
332     E1000State *s = opaque;
333 
334     s->mit_timer_on = 0;
335     /* Call set_interrupt_cause to update the irq level (if necessary). */
336     set_interrupt_cause(s, 0, s->mac_reg[ICR]);
337 }
338 
339 static void
340 set_ics(E1000State *s, int index, uint32_t val)
341 {
342     DBGOUT(INTERRUPT, "set_ics %x, ICR %x, IMR %x\n", val, s->mac_reg[ICR],
343         s->mac_reg[IMS]);
344     set_interrupt_cause(s, 0, val | s->mac_reg[ICR]);
345 }
346 
347 static int
348 rxbufsize(uint32_t v)
349 {
350     v &= E1000_RCTL_BSEX | E1000_RCTL_SZ_16384 | E1000_RCTL_SZ_8192 |
351          E1000_RCTL_SZ_4096 | E1000_RCTL_SZ_2048 | E1000_RCTL_SZ_1024 |
352          E1000_RCTL_SZ_512 | E1000_RCTL_SZ_256;
353     switch (v) {
354     case E1000_RCTL_BSEX | E1000_RCTL_SZ_16384:
355         return 16384;
356     case E1000_RCTL_BSEX | E1000_RCTL_SZ_8192:
357         return 8192;
358     case E1000_RCTL_BSEX | E1000_RCTL_SZ_4096:
359         return 4096;
360     case E1000_RCTL_SZ_1024:
361         return 1024;
362     case E1000_RCTL_SZ_512:
363         return 512;
364     case E1000_RCTL_SZ_256:
365         return 256;
366     }
367     return 2048;
368 }
369 
370 static void e1000_reset(void *opaque)
371 {
372     E1000State *d = opaque;
373     E1000BaseClass *edc = E1000_DEVICE_GET_CLASS(d);
374     uint8_t *macaddr = d->conf.macaddr.a;
375     int i;
376 
377     timer_del(d->autoneg_timer);
378     timer_del(d->mit_timer);
379     d->mit_timer_on = 0;
380     d->mit_irq_level = 0;
381     d->mit_ide = 0;
382     memset(d->phy_reg, 0, sizeof d->phy_reg);
383     memmove(d->phy_reg, phy_reg_init, sizeof phy_reg_init);
384     d->phy_reg[PHY_ID2] = edc->phy_id2;
385     memset(d->mac_reg, 0, sizeof d->mac_reg);
386     memmove(d->mac_reg, mac_reg_init, sizeof mac_reg_init);
387     d->rxbuf_min_shift = 1;
388     memset(&d->tx, 0, sizeof d->tx);
389 
390     if (qemu_get_queue(d->nic)->link_down) {
391         e1000_link_down(d);
392     }
393 
394     /* Some guests expect pre-initialized RAH/RAL (AddrValid flag + MACaddr) */
395     d->mac_reg[RA] = 0;
396     d->mac_reg[RA + 1] = E1000_RAH_AV;
397     for (i = 0; i < 4; i++) {
398         d->mac_reg[RA] |= macaddr[i] << (8 * i);
399         d->mac_reg[RA + 1] |= (i < 2) ? macaddr[i + 4] << (8 * i) : 0;
400     }
401     qemu_format_nic_info_str(qemu_get_queue(d->nic), macaddr);
402 }
403 
404 static void
405 set_ctrl(E1000State *s, int index, uint32_t val)
406 {
407     /* RST is self clearing */
408     s->mac_reg[CTRL] = val & ~E1000_CTRL_RST;
409 }
410 
411 static void
412 set_rx_control(E1000State *s, int index, uint32_t val)
413 {
414     s->mac_reg[RCTL] = val;
415     s->rxbuf_size = rxbufsize(val);
416     s->rxbuf_min_shift = ((val / E1000_RCTL_RDMTS_QUAT) & 3) + 1;
417     DBGOUT(RX, "RCTL: %d, mac_reg[RCTL] = 0x%x\n", s->mac_reg[RDT],
418            s->mac_reg[RCTL]);
419     qemu_flush_queued_packets(qemu_get_queue(s->nic));
420 }
421 
422 static void
423 set_mdic(E1000State *s, int index, uint32_t val)
424 {
425     uint32_t data = val & E1000_MDIC_DATA_MASK;
426     uint32_t addr = ((val & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT);
427 
428     if ((val & E1000_MDIC_PHY_MASK) >> E1000_MDIC_PHY_SHIFT != 1) // phy #
429         val = s->mac_reg[MDIC] | E1000_MDIC_ERROR;
430     else if (val & E1000_MDIC_OP_READ) {
431         DBGOUT(MDIC, "MDIC read reg 0x%x\n", addr);
432         if (!(phy_regcap[addr] & PHY_R)) {
433             DBGOUT(MDIC, "MDIC read reg %x unhandled\n", addr);
434             val |= E1000_MDIC_ERROR;
435         } else
436             val = (val ^ data) | s->phy_reg[addr];
437     } else if (val & E1000_MDIC_OP_WRITE) {
438         DBGOUT(MDIC, "MDIC write reg 0x%x, value 0x%x\n", addr, data);
439         if (!(phy_regcap[addr] & PHY_W)) {
440             DBGOUT(MDIC, "MDIC write reg %x unhandled\n", addr);
441             val |= E1000_MDIC_ERROR;
442         } else {
443             if (addr < NPHYWRITEOPS && phyreg_writeops[addr]) {
444                 phyreg_writeops[addr](s, index, data);
445             }
446             s->phy_reg[addr] = data;
447         }
448     }
449     s->mac_reg[MDIC] = val | E1000_MDIC_READY;
450 
451     if (val & E1000_MDIC_INT_EN) {
452         set_ics(s, 0, E1000_ICR_MDAC);
453     }
454 }
455 
456 static uint32_t
457 get_eecd(E1000State *s, int index)
458 {
459     uint32_t ret = E1000_EECD_PRES|E1000_EECD_GNT | s->eecd_state.old_eecd;
460 
461     DBGOUT(EEPROM, "reading eeprom bit %d (reading %d)\n",
462            s->eecd_state.bitnum_out, s->eecd_state.reading);
463     if (!s->eecd_state.reading ||
464         ((s->eeprom_data[(s->eecd_state.bitnum_out >> 4) & 0x3f] >>
465           ((s->eecd_state.bitnum_out & 0xf) ^ 0xf))) & 1)
466         ret |= E1000_EECD_DO;
467     return ret;
468 }
469 
470 static void
471 set_eecd(E1000State *s, int index, uint32_t val)
472 {
473     uint32_t oldval = s->eecd_state.old_eecd;
474 
475     s->eecd_state.old_eecd = val & (E1000_EECD_SK | E1000_EECD_CS |
476             E1000_EECD_DI|E1000_EECD_FWE_MASK|E1000_EECD_REQ);
477     if (!(E1000_EECD_CS & val))			// CS inactive; nothing to do
478 	return;
479     if (E1000_EECD_CS & (val ^ oldval)) {	// CS rise edge; reset state
480 	s->eecd_state.val_in = 0;
481 	s->eecd_state.bitnum_in = 0;
482 	s->eecd_state.bitnum_out = 0;
483 	s->eecd_state.reading = 0;
484     }
485     if (!(E1000_EECD_SK & (val ^ oldval)))	// no clock edge
486         return;
487     if (!(E1000_EECD_SK & val)) {		// falling edge
488         s->eecd_state.bitnum_out++;
489         return;
490     }
491     s->eecd_state.val_in <<= 1;
492     if (val & E1000_EECD_DI)
493         s->eecd_state.val_in |= 1;
494     if (++s->eecd_state.bitnum_in == 9 && !s->eecd_state.reading) {
495         s->eecd_state.bitnum_out = ((s->eecd_state.val_in & 0x3f)<<4)-1;
496         s->eecd_state.reading = (((s->eecd_state.val_in >> 6) & 7) ==
497             EEPROM_READ_OPCODE_MICROWIRE);
498     }
499     DBGOUT(EEPROM, "eeprom bitnum in %d out %d, reading %d\n",
500            s->eecd_state.bitnum_in, s->eecd_state.bitnum_out,
501            s->eecd_state.reading);
502 }
503 
504 static uint32_t
505 flash_eerd_read(E1000State *s, int x)
506 {
507     unsigned int index, r = s->mac_reg[EERD] & ~E1000_EEPROM_RW_REG_START;
508 
509     if ((s->mac_reg[EERD] & E1000_EEPROM_RW_REG_START) == 0)
510         return (s->mac_reg[EERD]);
511 
512     if ((index = r >> E1000_EEPROM_RW_ADDR_SHIFT) > EEPROM_CHECKSUM_REG)
513         return (E1000_EEPROM_RW_REG_DONE | r);
514 
515     return ((s->eeprom_data[index] << E1000_EEPROM_RW_REG_DATA) |
516            E1000_EEPROM_RW_REG_DONE | r);
517 }
518 
519 static void
520 putsum(uint8_t *data, uint32_t n, uint32_t sloc, uint32_t css, uint32_t cse)
521 {
522     uint32_t sum;
523 
524     if (cse && cse < n)
525         n = cse + 1;
526     if (sloc < n-1) {
527         sum = net_checksum_add(n-css, data+css);
528         stw_be_p(data + sloc, net_checksum_finish(sum));
529     }
530 }
531 
532 static inline int
533 vlan_enabled(E1000State *s)
534 {
535     return ((s->mac_reg[CTRL] & E1000_CTRL_VME) != 0);
536 }
537 
538 static inline int
539 vlan_rx_filter_enabled(E1000State *s)
540 {
541     return ((s->mac_reg[RCTL] & E1000_RCTL_VFE) != 0);
542 }
543 
544 static inline int
545 is_vlan_packet(E1000State *s, const uint8_t *buf)
546 {
547     return (be16_to_cpup((uint16_t *)(buf + 12)) ==
548                 le16_to_cpup((uint16_t *)(s->mac_reg + VET)));
549 }
550 
551 static inline int
552 is_vlan_txd(uint32_t txd_lower)
553 {
554     return ((txd_lower & E1000_TXD_CMD_VLE) != 0);
555 }
556 
557 /* FCS aka Ethernet CRC-32. We don't get it from backends and can't
558  * fill it in, just pad descriptor length by 4 bytes unless guest
559  * told us to strip it off the packet. */
560 static inline int
561 fcs_len(E1000State *s)
562 {
563     return (s->mac_reg[RCTL] & E1000_RCTL_SECRC) ? 0 : 4;
564 }
565 
566 static void
567 e1000_send_packet(E1000State *s, const uint8_t *buf, int size)
568 {
569     NetClientState *nc = qemu_get_queue(s->nic);
570     if (s->phy_reg[PHY_CTRL] & MII_CR_LOOPBACK) {
571         nc->info->receive(nc, buf, size);
572     } else {
573         qemu_send_packet(nc, buf, size);
574     }
575 }
576 
577 static void
578 xmit_seg(E1000State *s)
579 {
580     uint16_t len, *sp;
581     unsigned int frames = s->tx.tso_frames, css, sofar, n;
582     struct e1000_tx *tp = &s->tx;
583 
584     if (tp->tse && tp->cptse) {
585         css = tp->ipcss;
586         DBGOUT(TXSUM, "frames %d size %d ipcss %d\n",
587                frames, tp->size, css);
588         if (tp->ip) {		// IPv4
589             stw_be_p(tp->data+css+2, tp->size - css);
590             stw_be_p(tp->data+css+4,
591                           be16_to_cpup((uint16_t *)(tp->data+css+4))+frames);
592         } else			// IPv6
593             stw_be_p(tp->data+css+4, tp->size - css);
594         css = tp->tucss;
595         len = tp->size - css;
596         DBGOUT(TXSUM, "tcp %d tucss %d len %d\n", tp->tcp, css, len);
597         if (tp->tcp) {
598             sofar = frames * tp->mss;
599             stl_be_p(tp->data+css+4, ldl_be_p(tp->data+css+4)+sofar); /* seq */
600             if (tp->paylen - sofar > tp->mss)
601                 tp->data[css + 13] &= ~9;		// PSH, FIN
602         } else	// UDP
603             stw_be_p(tp->data+css+4, len);
604         if (tp->sum_needed & E1000_TXD_POPTS_TXSM) {
605             unsigned int phsum;
606             // add pseudo-header length before checksum calculation
607             sp = (uint16_t *)(tp->data + tp->tucso);
608             phsum = be16_to_cpup(sp) + len;
609             phsum = (phsum >> 16) + (phsum & 0xffff);
610             stw_be_p(sp, phsum);
611         }
612         tp->tso_frames++;
613     }
614 
615     if (tp->sum_needed & E1000_TXD_POPTS_TXSM)
616         putsum(tp->data, tp->size, tp->tucso, tp->tucss, tp->tucse);
617     if (tp->sum_needed & E1000_TXD_POPTS_IXSM)
618         putsum(tp->data, tp->size, tp->ipcso, tp->ipcss, tp->ipcse);
619     if (tp->vlan_needed) {
620         memmove(tp->vlan, tp->data, 4);
621         memmove(tp->data, tp->data + 4, 8);
622         memcpy(tp->data + 8, tp->vlan_header, 4);
623         e1000_send_packet(s, tp->vlan, tp->size + 4);
624     } else
625         e1000_send_packet(s, tp->data, tp->size);
626     s->mac_reg[TPT]++;
627     s->mac_reg[GPTC]++;
628     n = s->mac_reg[TOTL];
629     if ((s->mac_reg[TOTL] += s->tx.size) < n)
630         s->mac_reg[TOTH]++;
631 }
632 
633 static void
634 process_tx_desc(E1000State *s, struct e1000_tx_desc *dp)
635 {
636     PCIDevice *d = PCI_DEVICE(s);
637     uint32_t txd_lower = le32_to_cpu(dp->lower.data);
638     uint32_t dtype = txd_lower & (E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D);
639     unsigned int split_size = txd_lower & 0xffff, bytes, sz, op;
640     unsigned int msh = 0xfffff;
641     uint64_t addr;
642     struct e1000_context_desc *xp = (struct e1000_context_desc *)dp;
643     struct e1000_tx *tp = &s->tx;
644 
645     s->mit_ide |= (txd_lower & E1000_TXD_CMD_IDE);
646     if (dtype == E1000_TXD_CMD_DEXT) {	// context descriptor
647         op = le32_to_cpu(xp->cmd_and_length);
648         tp->ipcss = xp->lower_setup.ip_fields.ipcss;
649         tp->ipcso = xp->lower_setup.ip_fields.ipcso;
650         tp->ipcse = le16_to_cpu(xp->lower_setup.ip_fields.ipcse);
651         tp->tucss = xp->upper_setup.tcp_fields.tucss;
652         tp->tucso = xp->upper_setup.tcp_fields.tucso;
653         tp->tucse = le16_to_cpu(xp->upper_setup.tcp_fields.tucse);
654         tp->paylen = op & 0xfffff;
655         tp->hdr_len = xp->tcp_seg_setup.fields.hdr_len;
656         tp->mss = le16_to_cpu(xp->tcp_seg_setup.fields.mss);
657         tp->ip = (op & E1000_TXD_CMD_IP) ? 1 : 0;
658         tp->tcp = (op & E1000_TXD_CMD_TCP) ? 1 : 0;
659         tp->tse = (op & E1000_TXD_CMD_TSE) ? 1 : 0;
660         tp->tso_frames = 0;
661         if (tp->tucso == 0) {	// this is probably wrong
662             DBGOUT(TXSUM, "TCP/UDP: cso 0!\n");
663             tp->tucso = tp->tucss + (tp->tcp ? 16 : 6);
664         }
665         return;
666     } else if (dtype == (E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D)) {
667         // data descriptor
668         if (tp->size == 0) {
669             tp->sum_needed = le32_to_cpu(dp->upper.data) >> 8;
670         }
671         tp->cptse = ( txd_lower & E1000_TXD_CMD_TSE ) ? 1 : 0;
672     } else {
673         // legacy descriptor
674         tp->cptse = 0;
675     }
676 
677     if (vlan_enabled(s) && is_vlan_txd(txd_lower) &&
678         (tp->cptse || txd_lower & E1000_TXD_CMD_EOP)) {
679         tp->vlan_needed = 1;
680         stw_be_p(tp->vlan_header,
681                       le16_to_cpup((uint16_t *)(s->mac_reg + VET)));
682         stw_be_p(tp->vlan_header + 2,
683                       le16_to_cpu(dp->upper.fields.special));
684     }
685 
686     addr = le64_to_cpu(dp->buffer_addr);
687     if (tp->tse && tp->cptse) {
688         msh = tp->hdr_len + tp->mss;
689         do {
690             bytes = split_size;
691             if (tp->size + bytes > msh)
692                 bytes = msh - tp->size;
693 
694             bytes = MIN(sizeof(tp->data) - tp->size, bytes);
695             pci_dma_read(d, addr, tp->data + tp->size, bytes);
696             sz = tp->size + bytes;
697             if (sz >= tp->hdr_len && tp->size < tp->hdr_len) {
698                 memmove(tp->header, tp->data, tp->hdr_len);
699             }
700             tp->size = sz;
701             addr += bytes;
702             if (sz == msh) {
703                 xmit_seg(s);
704                 memmove(tp->data, tp->header, tp->hdr_len);
705                 tp->size = tp->hdr_len;
706             }
707         } while (split_size -= bytes);
708     } else if (!tp->tse && tp->cptse) {
709         // context descriptor TSE is not set, while data descriptor TSE is set
710         DBGOUT(TXERR, "TCP segmentation error\n");
711     } else {
712         split_size = MIN(sizeof(tp->data) - tp->size, split_size);
713         pci_dma_read(d, addr, tp->data + tp->size, split_size);
714         tp->size += split_size;
715     }
716 
717     if (!(txd_lower & E1000_TXD_CMD_EOP))
718         return;
719     if (!(tp->tse && tp->cptse && tp->size < tp->hdr_len)) {
720         xmit_seg(s);
721     }
722     tp->tso_frames = 0;
723     tp->sum_needed = 0;
724     tp->vlan_needed = 0;
725     tp->size = 0;
726     tp->cptse = 0;
727 }
728 
729 static uint32_t
730 txdesc_writeback(E1000State *s, dma_addr_t base, struct e1000_tx_desc *dp)
731 {
732     PCIDevice *d = PCI_DEVICE(s);
733     uint32_t txd_upper, txd_lower = le32_to_cpu(dp->lower.data);
734 
735     if (!(txd_lower & (E1000_TXD_CMD_RS|E1000_TXD_CMD_RPS)))
736         return 0;
737     txd_upper = (le32_to_cpu(dp->upper.data) | E1000_TXD_STAT_DD) &
738                 ~(E1000_TXD_STAT_EC | E1000_TXD_STAT_LC | E1000_TXD_STAT_TU);
739     dp->upper.data = cpu_to_le32(txd_upper);
740     pci_dma_write(d, base + ((char *)&dp->upper - (char *)dp),
741                   &dp->upper, sizeof(dp->upper));
742     return E1000_ICR_TXDW;
743 }
744 
745 static uint64_t tx_desc_base(E1000State *s)
746 {
747     uint64_t bah = s->mac_reg[TDBAH];
748     uint64_t bal = s->mac_reg[TDBAL] & ~0xf;
749 
750     return (bah << 32) + bal;
751 }
752 
753 static void
754 start_xmit(E1000State *s)
755 {
756     PCIDevice *d = PCI_DEVICE(s);
757     dma_addr_t base;
758     struct e1000_tx_desc desc;
759     uint32_t tdh_start = s->mac_reg[TDH], cause = E1000_ICS_TXQE;
760 
761     if (!(s->mac_reg[TCTL] & E1000_TCTL_EN)) {
762         DBGOUT(TX, "tx disabled\n");
763         return;
764     }
765 
766     while (s->mac_reg[TDH] != s->mac_reg[TDT]) {
767         base = tx_desc_base(s) +
768                sizeof(struct e1000_tx_desc) * s->mac_reg[TDH];
769         pci_dma_read(d, base, &desc, sizeof(desc));
770 
771         DBGOUT(TX, "index %d: %p : %x %x\n", s->mac_reg[TDH],
772                (void *)(intptr_t)desc.buffer_addr, desc.lower.data,
773                desc.upper.data);
774 
775         process_tx_desc(s, &desc);
776         cause |= txdesc_writeback(s, base, &desc);
777 
778         if (++s->mac_reg[TDH] * sizeof(desc) >= s->mac_reg[TDLEN])
779             s->mac_reg[TDH] = 0;
780         /*
781          * the following could happen only if guest sw assigns
782          * bogus values to TDT/TDLEN.
783          * there's nothing too intelligent we could do about this.
784          */
785         if (s->mac_reg[TDH] == tdh_start) {
786             DBGOUT(TXERR, "TDH wraparound @%x, TDT %x, TDLEN %x\n",
787                    tdh_start, s->mac_reg[TDT], s->mac_reg[TDLEN]);
788             break;
789         }
790     }
791     set_ics(s, 0, cause);
792 }
793 
794 static int
795 receive_filter(E1000State *s, const uint8_t *buf, int size)
796 {
797     static const uint8_t bcast[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
798     static const int mta_shift[] = {4, 3, 2, 0};
799     uint32_t f, rctl = s->mac_reg[RCTL], ra[2], *rp;
800 
801     if (is_vlan_packet(s, buf) && vlan_rx_filter_enabled(s)) {
802         uint16_t vid = be16_to_cpup((uint16_t *)(buf + 14));
803         uint32_t vfta = le32_to_cpup((uint32_t *)(s->mac_reg + VFTA) +
804                                      ((vid >> 5) & 0x7f));
805         if ((vfta & (1 << (vid & 0x1f))) == 0)
806             return 0;
807     }
808 
809     if (rctl & E1000_RCTL_UPE)			// promiscuous
810         return 1;
811 
812     if ((buf[0] & 1) && (rctl & E1000_RCTL_MPE))	// promiscuous mcast
813         return 1;
814 
815     if ((rctl & E1000_RCTL_BAM) && !memcmp(buf, bcast, sizeof bcast))
816         return 1;
817 
818     for (rp = s->mac_reg + RA; rp < s->mac_reg + RA + 32; rp += 2) {
819         if (!(rp[1] & E1000_RAH_AV))
820             continue;
821         ra[0] = cpu_to_le32(rp[0]);
822         ra[1] = cpu_to_le32(rp[1]);
823         if (!memcmp(buf, (uint8_t *)ra, 6)) {
824             DBGOUT(RXFILTER,
825                    "unicast match[%d]: %02x:%02x:%02x:%02x:%02x:%02x\n",
826                    (int)(rp - s->mac_reg - RA)/2,
827                    buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]);
828             return 1;
829         }
830     }
831     DBGOUT(RXFILTER, "unicast mismatch: %02x:%02x:%02x:%02x:%02x:%02x\n",
832            buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]);
833 
834     f = mta_shift[(rctl >> E1000_RCTL_MO_SHIFT) & 3];
835     f = (((buf[5] << 8) | buf[4]) >> f) & 0xfff;
836     if (s->mac_reg[MTA + (f >> 5)] & (1 << (f & 0x1f)))
837         return 1;
838     DBGOUT(RXFILTER,
839            "dropping, inexact filter mismatch: %02x:%02x:%02x:%02x:%02x:%02x MO %d MTA[%d] %x\n",
840            buf[0], buf[1], buf[2], buf[3], buf[4], buf[5],
841            (rctl >> E1000_RCTL_MO_SHIFT) & 3, f >> 5,
842            s->mac_reg[MTA + (f >> 5)]);
843 
844     return 0;
845 }
846 
847 static void
848 e1000_set_link_status(NetClientState *nc)
849 {
850     E1000State *s = qemu_get_nic_opaque(nc);
851     uint32_t old_status = s->mac_reg[STATUS];
852 
853     if (nc->link_down) {
854         e1000_link_down(s);
855     } else {
856         e1000_link_up(s);
857     }
858 
859     if (s->mac_reg[STATUS] != old_status)
860         set_ics(s, 0, E1000_ICR_LSC);
861 }
862 
863 static bool e1000_has_rxbufs(E1000State *s, size_t total_size)
864 {
865     int bufs;
866     /* Fast-path short packets */
867     if (total_size <= s->rxbuf_size) {
868         return s->mac_reg[RDH] != s->mac_reg[RDT];
869     }
870     if (s->mac_reg[RDH] < s->mac_reg[RDT]) {
871         bufs = s->mac_reg[RDT] - s->mac_reg[RDH];
872     } else if (s->mac_reg[RDH] > s->mac_reg[RDT]) {
873         bufs = s->mac_reg[RDLEN] /  sizeof(struct e1000_rx_desc) +
874             s->mac_reg[RDT] - s->mac_reg[RDH];
875     } else {
876         return false;
877     }
878     return total_size <= bufs * s->rxbuf_size;
879 }
880 
881 static int
882 e1000_can_receive(NetClientState *nc)
883 {
884     E1000State *s = qemu_get_nic_opaque(nc);
885 
886     return (s->mac_reg[STATUS] & E1000_STATUS_LU) &&
887         (s->mac_reg[RCTL] & E1000_RCTL_EN) && e1000_has_rxbufs(s, 1);
888 }
889 
890 static uint64_t rx_desc_base(E1000State *s)
891 {
892     uint64_t bah = s->mac_reg[RDBAH];
893     uint64_t bal = s->mac_reg[RDBAL] & ~0xf;
894 
895     return (bah << 32) + bal;
896 }
897 
898 static ssize_t
899 e1000_receive_iov(NetClientState *nc, const struct iovec *iov, int iovcnt)
900 {
901     E1000State *s = qemu_get_nic_opaque(nc);
902     PCIDevice *d = PCI_DEVICE(s);
903     struct e1000_rx_desc desc;
904     dma_addr_t base;
905     unsigned int n, rdt;
906     uint32_t rdh_start;
907     uint16_t vlan_special = 0;
908     uint8_t vlan_status = 0;
909     uint8_t min_buf[MIN_BUF_SIZE];
910     struct iovec min_iov;
911     uint8_t *filter_buf = iov->iov_base;
912     size_t size = iov_size(iov, iovcnt);
913     size_t iov_ofs = 0;
914     size_t desc_offset;
915     size_t desc_size;
916     size_t total_size;
917 
918     if (!(s->mac_reg[STATUS] & E1000_STATUS_LU)) {
919         return -1;
920     }
921 
922     if (!(s->mac_reg[RCTL] & E1000_RCTL_EN)) {
923         return -1;
924     }
925 
926     /* Pad to minimum Ethernet frame length */
927     if (size < sizeof(min_buf)) {
928         iov_to_buf(iov, iovcnt, 0, min_buf, size);
929         memset(&min_buf[size], 0, sizeof(min_buf) - size);
930         min_iov.iov_base = filter_buf = min_buf;
931         min_iov.iov_len = size = sizeof(min_buf);
932         iovcnt = 1;
933         iov = &min_iov;
934     } else if (iov->iov_len < MAXIMUM_ETHERNET_HDR_LEN) {
935         /* This is very unlikely, but may happen. */
936         iov_to_buf(iov, iovcnt, 0, min_buf, MAXIMUM_ETHERNET_HDR_LEN);
937         filter_buf = min_buf;
938     }
939 
940     /* Discard oversized packets if !LPE and !SBP. */
941     if ((size > MAXIMUM_ETHERNET_LPE_SIZE ||
942         (size > MAXIMUM_ETHERNET_VLAN_SIZE
943         && !(s->mac_reg[RCTL] & E1000_RCTL_LPE)))
944         && !(s->mac_reg[RCTL] & E1000_RCTL_SBP)) {
945         return size;
946     }
947 
948     if (!receive_filter(s, filter_buf, size)) {
949         return size;
950     }
951 
952     if (vlan_enabled(s) && is_vlan_packet(s, filter_buf)) {
953         vlan_special = cpu_to_le16(be16_to_cpup((uint16_t *)(filter_buf
954                                                                 + 14)));
955         iov_ofs = 4;
956         if (filter_buf == iov->iov_base) {
957             memmove(filter_buf + 4, filter_buf, 12);
958         } else {
959             iov_from_buf(iov, iovcnt, 4, filter_buf, 12);
960             while (iov->iov_len <= iov_ofs) {
961                 iov_ofs -= iov->iov_len;
962                 iov++;
963             }
964         }
965         vlan_status = E1000_RXD_STAT_VP;
966         size -= 4;
967     }
968 
969     rdh_start = s->mac_reg[RDH];
970     desc_offset = 0;
971     total_size = size + fcs_len(s);
972     if (!e1000_has_rxbufs(s, total_size)) {
973             set_ics(s, 0, E1000_ICS_RXO);
974             return -1;
975     }
976     do {
977         desc_size = total_size - desc_offset;
978         if (desc_size > s->rxbuf_size) {
979             desc_size = s->rxbuf_size;
980         }
981         base = rx_desc_base(s) + sizeof(desc) * s->mac_reg[RDH];
982         pci_dma_read(d, base, &desc, sizeof(desc));
983         desc.special = vlan_special;
984         desc.status |= (vlan_status | E1000_RXD_STAT_DD);
985         if (desc.buffer_addr) {
986             if (desc_offset < size) {
987                 size_t iov_copy;
988                 hwaddr ba = le64_to_cpu(desc.buffer_addr);
989                 size_t copy_size = size - desc_offset;
990                 if (copy_size > s->rxbuf_size) {
991                     copy_size = s->rxbuf_size;
992                 }
993                 do {
994                     iov_copy = MIN(copy_size, iov->iov_len - iov_ofs);
995                     pci_dma_write(d, ba, iov->iov_base + iov_ofs, iov_copy);
996                     copy_size -= iov_copy;
997                     ba += iov_copy;
998                     iov_ofs += iov_copy;
999                     if (iov_ofs == iov->iov_len) {
1000                         iov++;
1001                         iov_ofs = 0;
1002                     }
1003                 } while (copy_size);
1004             }
1005             desc_offset += desc_size;
1006             desc.length = cpu_to_le16(desc_size);
1007             if (desc_offset >= total_size) {
1008                 desc.status |= E1000_RXD_STAT_EOP | E1000_RXD_STAT_IXSM;
1009             } else {
1010                 /* Guest zeroing out status is not a hardware requirement.
1011                    Clear EOP in case guest didn't do it. */
1012                 desc.status &= ~E1000_RXD_STAT_EOP;
1013             }
1014         } else { // as per intel docs; skip descriptors with null buf addr
1015             DBGOUT(RX, "Null RX descriptor!!\n");
1016         }
1017         pci_dma_write(d, base, &desc, sizeof(desc));
1018 
1019         if (++s->mac_reg[RDH] * sizeof(desc) >= s->mac_reg[RDLEN])
1020             s->mac_reg[RDH] = 0;
1021         /* see comment in start_xmit; same here */
1022         if (s->mac_reg[RDH] == rdh_start) {
1023             DBGOUT(RXERR, "RDH wraparound @%x, RDT %x, RDLEN %x\n",
1024                    rdh_start, s->mac_reg[RDT], s->mac_reg[RDLEN]);
1025             set_ics(s, 0, E1000_ICS_RXO);
1026             return -1;
1027         }
1028     } while (desc_offset < total_size);
1029 
1030     s->mac_reg[GPRC]++;
1031     s->mac_reg[TPR]++;
1032     /* TOR - Total Octets Received:
1033      * This register includes bytes received in a packet from the <Destination
1034      * Address> field through the <CRC> field, inclusively.
1035      */
1036     n = s->mac_reg[TORL] + size + /* Always include FCS length. */ 4;
1037     if (n < s->mac_reg[TORL])
1038         s->mac_reg[TORH]++;
1039     s->mac_reg[TORL] = n;
1040 
1041     n = E1000_ICS_RXT0;
1042     if ((rdt = s->mac_reg[RDT]) < s->mac_reg[RDH])
1043         rdt += s->mac_reg[RDLEN] / sizeof(desc);
1044     if (((rdt - s->mac_reg[RDH]) * sizeof(desc)) <= s->mac_reg[RDLEN] >>
1045         s->rxbuf_min_shift)
1046         n |= E1000_ICS_RXDMT0;
1047 
1048     set_ics(s, 0, n);
1049 
1050     return size;
1051 }
1052 
1053 static ssize_t
1054 e1000_receive(NetClientState *nc, const uint8_t *buf, size_t size)
1055 {
1056     const struct iovec iov = {
1057         .iov_base = (uint8_t *)buf,
1058         .iov_len = size
1059     };
1060 
1061     return e1000_receive_iov(nc, &iov, 1);
1062 }
1063 
1064 static uint32_t
1065 mac_readreg(E1000State *s, int index)
1066 {
1067     return s->mac_reg[index];
1068 }
1069 
1070 static uint32_t
1071 mac_icr_read(E1000State *s, int index)
1072 {
1073     uint32_t ret = s->mac_reg[ICR];
1074 
1075     DBGOUT(INTERRUPT, "ICR read: %x\n", ret);
1076     set_interrupt_cause(s, 0, 0);
1077     return ret;
1078 }
1079 
1080 static uint32_t
1081 mac_read_clr4(E1000State *s, int index)
1082 {
1083     uint32_t ret = s->mac_reg[index];
1084 
1085     s->mac_reg[index] = 0;
1086     return ret;
1087 }
1088 
1089 static uint32_t
1090 mac_read_clr8(E1000State *s, int index)
1091 {
1092     uint32_t ret = s->mac_reg[index];
1093 
1094     s->mac_reg[index] = 0;
1095     s->mac_reg[index-1] = 0;
1096     return ret;
1097 }
1098 
1099 static void
1100 mac_writereg(E1000State *s, int index, uint32_t val)
1101 {
1102     uint32_t macaddr[2];
1103 
1104     s->mac_reg[index] = val;
1105 
1106     if (index == RA + 1) {
1107         macaddr[0] = cpu_to_le32(s->mac_reg[RA]);
1108         macaddr[1] = cpu_to_le32(s->mac_reg[RA + 1]);
1109         qemu_format_nic_info_str(qemu_get_queue(s->nic), (uint8_t *)macaddr);
1110     }
1111 }
1112 
1113 static void
1114 set_rdt(E1000State *s, int index, uint32_t val)
1115 {
1116     s->mac_reg[index] = val & 0xffff;
1117     if (e1000_has_rxbufs(s, 1)) {
1118         qemu_flush_queued_packets(qemu_get_queue(s->nic));
1119     }
1120 }
1121 
1122 static void
1123 set_16bit(E1000State *s, int index, uint32_t val)
1124 {
1125     s->mac_reg[index] = val & 0xffff;
1126 }
1127 
1128 static void
1129 set_dlen(E1000State *s, int index, uint32_t val)
1130 {
1131     s->mac_reg[index] = val & 0xfff80;
1132 }
1133 
1134 static void
1135 set_tctl(E1000State *s, int index, uint32_t val)
1136 {
1137     s->mac_reg[index] = val;
1138     s->mac_reg[TDT] &= 0xffff;
1139     start_xmit(s);
1140 }
1141 
1142 static void
1143 set_icr(E1000State *s, int index, uint32_t val)
1144 {
1145     DBGOUT(INTERRUPT, "set_icr %x\n", val);
1146     set_interrupt_cause(s, 0, s->mac_reg[ICR] & ~val);
1147 }
1148 
1149 static void
1150 set_imc(E1000State *s, int index, uint32_t val)
1151 {
1152     s->mac_reg[IMS] &= ~val;
1153     set_ics(s, 0, 0);
1154 }
1155 
1156 static void
1157 set_ims(E1000State *s, int index, uint32_t val)
1158 {
1159     s->mac_reg[IMS] |= val;
1160     set_ics(s, 0, 0);
1161 }
1162 
1163 #define getreg(x)	[x] = mac_readreg
1164 static uint32_t (*macreg_readops[])(E1000State *, int) = {
1165     getreg(PBA),	getreg(RCTL),	getreg(TDH),	getreg(TXDCTL),
1166     getreg(WUFC),	getreg(TDT),	getreg(CTRL),	getreg(LEDCTL),
1167     getreg(MANC),	getreg(MDIC),	getreg(SWSM),	getreg(STATUS),
1168     getreg(TORL),	getreg(TOTL),	getreg(IMS),	getreg(TCTL),
1169     getreg(RDH),	getreg(RDT),	getreg(VET),	getreg(ICS),
1170     getreg(TDBAL),	getreg(TDBAH),	getreg(RDBAH),	getreg(RDBAL),
1171     getreg(TDLEN),      getreg(RDLEN),  getreg(RDTR),   getreg(RADV),
1172     getreg(TADV),       getreg(ITR),
1173 
1174     [TOTH] = mac_read_clr8,	[TORH] = mac_read_clr8,	[GPRC] = mac_read_clr4,
1175     [GPTC] = mac_read_clr4,	[TPR] = mac_read_clr4,	[TPT] = mac_read_clr4,
1176     [ICR] = mac_icr_read,	[EECD] = get_eecd,	[EERD] = flash_eerd_read,
1177     [CRCERRS ... MPC] = &mac_readreg,
1178     [RA ... RA+31] = &mac_readreg,
1179     [MTA ... MTA+127] = &mac_readreg,
1180     [VFTA ... VFTA+127] = &mac_readreg,
1181 };
1182 enum { NREADOPS = ARRAY_SIZE(macreg_readops) };
1183 
1184 #define putreg(x)	[x] = mac_writereg
1185 static void (*macreg_writeops[])(E1000State *, int, uint32_t) = {
1186     putreg(PBA),	putreg(EERD),	putreg(SWSM),	putreg(WUFC),
1187     putreg(TDBAL),	putreg(TDBAH),	putreg(TXDCTL),	putreg(RDBAH),
1188     putreg(RDBAL),	putreg(LEDCTL), putreg(VET),
1189     [TDLEN] = set_dlen,	[RDLEN] = set_dlen,	[TCTL] = set_tctl,
1190     [TDT] = set_tctl,	[MDIC] = set_mdic,	[ICS] = set_ics,
1191     [TDH] = set_16bit,	[RDH] = set_16bit,	[RDT] = set_rdt,
1192     [IMC] = set_imc,	[IMS] = set_ims,	[ICR] = set_icr,
1193     [EECD] = set_eecd,	[RCTL] = set_rx_control, [CTRL] = set_ctrl,
1194     [RDTR] = set_16bit, [RADV] = set_16bit,     [TADV] = set_16bit,
1195     [ITR] = set_16bit,
1196     [RA ... RA+31] = &mac_writereg,
1197     [MTA ... MTA+127] = &mac_writereg,
1198     [VFTA ... VFTA+127] = &mac_writereg,
1199 };
1200 
1201 enum { NWRITEOPS = ARRAY_SIZE(macreg_writeops) };
1202 
1203 static void
1204 e1000_mmio_write(void *opaque, hwaddr addr, uint64_t val,
1205                  unsigned size)
1206 {
1207     E1000State *s = opaque;
1208     unsigned int index = (addr & 0x1ffff) >> 2;
1209 
1210     if (index < NWRITEOPS && macreg_writeops[index]) {
1211         macreg_writeops[index](s, index, val);
1212     } else if (index < NREADOPS && macreg_readops[index]) {
1213         DBGOUT(MMIO, "e1000_mmio_writel RO %x: 0x%04"PRIx64"\n", index<<2, val);
1214     } else {
1215         DBGOUT(UNKNOWN, "MMIO unknown write addr=0x%08x,val=0x%08"PRIx64"\n",
1216                index<<2, val);
1217     }
1218 }
1219 
1220 static uint64_t
1221 e1000_mmio_read(void *opaque, hwaddr addr, unsigned size)
1222 {
1223     E1000State *s = opaque;
1224     unsigned int index = (addr & 0x1ffff) >> 2;
1225 
1226     if (index < NREADOPS && macreg_readops[index])
1227     {
1228         return macreg_readops[index](s, index);
1229     }
1230     DBGOUT(UNKNOWN, "MMIO unknown read addr=0x%08x\n", index<<2);
1231     return 0;
1232 }
1233 
1234 static const MemoryRegionOps e1000_mmio_ops = {
1235     .read = e1000_mmio_read,
1236     .write = e1000_mmio_write,
1237     .endianness = DEVICE_LITTLE_ENDIAN,
1238     .impl = {
1239         .min_access_size = 4,
1240         .max_access_size = 4,
1241     },
1242 };
1243 
1244 static uint64_t e1000_io_read(void *opaque, hwaddr addr,
1245                               unsigned size)
1246 {
1247     E1000State *s = opaque;
1248 
1249     (void)s;
1250     return 0;
1251 }
1252 
1253 static void e1000_io_write(void *opaque, hwaddr addr,
1254                            uint64_t val, unsigned size)
1255 {
1256     E1000State *s = opaque;
1257 
1258     (void)s;
1259 }
1260 
1261 static const MemoryRegionOps e1000_io_ops = {
1262     .read = e1000_io_read,
1263     .write = e1000_io_write,
1264     .endianness = DEVICE_LITTLE_ENDIAN,
1265 };
1266 
1267 static bool is_version_1(void *opaque, int version_id)
1268 {
1269     return version_id == 1;
1270 }
1271 
1272 static void e1000_pre_save(void *opaque)
1273 {
1274     E1000State *s = opaque;
1275     NetClientState *nc = qemu_get_queue(s->nic);
1276 
1277     /* If the mitigation timer is active, emulate a timeout now. */
1278     if (s->mit_timer_on) {
1279         e1000_mit_timer(s);
1280     }
1281 
1282     if (!(s->compat_flags & E1000_FLAG_AUTONEG)) {
1283         return;
1284     }
1285 
1286     /*
1287      * If link is down and auto-negotiation is ongoing, complete
1288      * auto-negotiation immediately.  This allows is to look at
1289      * MII_SR_AUTONEG_COMPLETE to infer link status on load.
1290      */
1291     if (nc->link_down &&
1292         s->phy_reg[PHY_CTRL] & MII_CR_AUTO_NEG_EN &&
1293         s->phy_reg[PHY_CTRL] & MII_CR_RESTART_AUTO_NEG) {
1294          s->phy_reg[PHY_STATUS] |= MII_SR_AUTONEG_COMPLETE;
1295     }
1296 }
1297 
1298 static int e1000_post_load(void *opaque, int version_id)
1299 {
1300     E1000State *s = opaque;
1301     NetClientState *nc = qemu_get_queue(s->nic);
1302 
1303     if (!(s->compat_flags & E1000_FLAG_MIT)) {
1304         s->mac_reg[ITR] = s->mac_reg[RDTR] = s->mac_reg[RADV] =
1305             s->mac_reg[TADV] = 0;
1306         s->mit_irq_level = false;
1307     }
1308     s->mit_ide = 0;
1309     s->mit_timer_on = false;
1310 
1311     /* nc.link_down can't be migrated, so infer link_down according
1312      * to link status bit in mac_reg[STATUS].
1313      * Alternatively, restart link negotiation if it was in progress. */
1314     nc->link_down = (s->mac_reg[STATUS] & E1000_STATUS_LU) == 0;
1315 
1316     if (!(s->compat_flags & E1000_FLAG_AUTONEG)) {
1317         return 0;
1318     }
1319 
1320     if (s->phy_reg[PHY_CTRL] & MII_CR_AUTO_NEG_EN &&
1321         s->phy_reg[PHY_CTRL] & MII_CR_RESTART_AUTO_NEG &&
1322         !(s->phy_reg[PHY_STATUS] & MII_SR_AUTONEG_COMPLETE)) {
1323         nc->link_down = false;
1324         timer_mod(s->autoneg_timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 500);
1325     }
1326 
1327     return 0;
1328 }
1329 
1330 static bool e1000_mit_state_needed(void *opaque)
1331 {
1332     E1000State *s = opaque;
1333 
1334     return s->compat_flags & E1000_FLAG_MIT;
1335 }
1336 
1337 static const VMStateDescription vmstate_e1000_mit_state = {
1338     .name = "e1000/mit_state",
1339     .version_id = 1,
1340     .minimum_version_id = 1,
1341     .fields = (VMStateField[]) {
1342         VMSTATE_UINT32(mac_reg[RDTR], E1000State),
1343         VMSTATE_UINT32(mac_reg[RADV], E1000State),
1344         VMSTATE_UINT32(mac_reg[TADV], E1000State),
1345         VMSTATE_UINT32(mac_reg[ITR], E1000State),
1346         VMSTATE_BOOL(mit_irq_level, E1000State),
1347         VMSTATE_END_OF_LIST()
1348     }
1349 };
1350 
1351 static const VMStateDescription vmstate_e1000 = {
1352     .name = "e1000",
1353     .version_id = 2,
1354     .minimum_version_id = 1,
1355     .pre_save = e1000_pre_save,
1356     .post_load = e1000_post_load,
1357     .fields = (VMStateField[]) {
1358         VMSTATE_PCI_DEVICE(parent_obj, E1000State),
1359         VMSTATE_UNUSED_TEST(is_version_1, 4), /* was instance id */
1360         VMSTATE_UNUSED(4), /* Was mmio_base.  */
1361         VMSTATE_UINT32(rxbuf_size, E1000State),
1362         VMSTATE_UINT32(rxbuf_min_shift, E1000State),
1363         VMSTATE_UINT32(eecd_state.val_in, E1000State),
1364         VMSTATE_UINT16(eecd_state.bitnum_in, E1000State),
1365         VMSTATE_UINT16(eecd_state.bitnum_out, E1000State),
1366         VMSTATE_UINT16(eecd_state.reading, E1000State),
1367         VMSTATE_UINT32(eecd_state.old_eecd, E1000State),
1368         VMSTATE_UINT8(tx.ipcss, E1000State),
1369         VMSTATE_UINT8(tx.ipcso, E1000State),
1370         VMSTATE_UINT16(tx.ipcse, E1000State),
1371         VMSTATE_UINT8(tx.tucss, E1000State),
1372         VMSTATE_UINT8(tx.tucso, E1000State),
1373         VMSTATE_UINT16(tx.tucse, E1000State),
1374         VMSTATE_UINT32(tx.paylen, E1000State),
1375         VMSTATE_UINT8(tx.hdr_len, E1000State),
1376         VMSTATE_UINT16(tx.mss, E1000State),
1377         VMSTATE_UINT16(tx.size, E1000State),
1378         VMSTATE_UINT16(tx.tso_frames, E1000State),
1379         VMSTATE_UINT8(tx.sum_needed, E1000State),
1380         VMSTATE_INT8(tx.ip, E1000State),
1381         VMSTATE_INT8(tx.tcp, E1000State),
1382         VMSTATE_BUFFER(tx.header, E1000State),
1383         VMSTATE_BUFFER(tx.data, E1000State),
1384         VMSTATE_UINT16_ARRAY(eeprom_data, E1000State, 64),
1385         VMSTATE_UINT16_ARRAY(phy_reg, E1000State, 0x20),
1386         VMSTATE_UINT32(mac_reg[CTRL], E1000State),
1387         VMSTATE_UINT32(mac_reg[EECD], E1000State),
1388         VMSTATE_UINT32(mac_reg[EERD], E1000State),
1389         VMSTATE_UINT32(mac_reg[GPRC], E1000State),
1390         VMSTATE_UINT32(mac_reg[GPTC], E1000State),
1391         VMSTATE_UINT32(mac_reg[ICR], E1000State),
1392         VMSTATE_UINT32(mac_reg[ICS], E1000State),
1393         VMSTATE_UINT32(mac_reg[IMC], E1000State),
1394         VMSTATE_UINT32(mac_reg[IMS], E1000State),
1395         VMSTATE_UINT32(mac_reg[LEDCTL], E1000State),
1396         VMSTATE_UINT32(mac_reg[MANC], E1000State),
1397         VMSTATE_UINT32(mac_reg[MDIC], E1000State),
1398         VMSTATE_UINT32(mac_reg[MPC], E1000State),
1399         VMSTATE_UINT32(mac_reg[PBA], E1000State),
1400         VMSTATE_UINT32(mac_reg[RCTL], E1000State),
1401         VMSTATE_UINT32(mac_reg[RDBAH], E1000State),
1402         VMSTATE_UINT32(mac_reg[RDBAL], E1000State),
1403         VMSTATE_UINT32(mac_reg[RDH], E1000State),
1404         VMSTATE_UINT32(mac_reg[RDLEN], E1000State),
1405         VMSTATE_UINT32(mac_reg[RDT], E1000State),
1406         VMSTATE_UINT32(mac_reg[STATUS], E1000State),
1407         VMSTATE_UINT32(mac_reg[SWSM], E1000State),
1408         VMSTATE_UINT32(mac_reg[TCTL], E1000State),
1409         VMSTATE_UINT32(mac_reg[TDBAH], E1000State),
1410         VMSTATE_UINT32(mac_reg[TDBAL], E1000State),
1411         VMSTATE_UINT32(mac_reg[TDH], E1000State),
1412         VMSTATE_UINT32(mac_reg[TDLEN], E1000State),
1413         VMSTATE_UINT32(mac_reg[TDT], E1000State),
1414         VMSTATE_UINT32(mac_reg[TORH], E1000State),
1415         VMSTATE_UINT32(mac_reg[TORL], E1000State),
1416         VMSTATE_UINT32(mac_reg[TOTH], E1000State),
1417         VMSTATE_UINT32(mac_reg[TOTL], E1000State),
1418         VMSTATE_UINT32(mac_reg[TPR], E1000State),
1419         VMSTATE_UINT32(mac_reg[TPT], E1000State),
1420         VMSTATE_UINT32(mac_reg[TXDCTL], E1000State),
1421         VMSTATE_UINT32(mac_reg[WUFC], E1000State),
1422         VMSTATE_UINT32(mac_reg[VET], E1000State),
1423         VMSTATE_UINT32_SUB_ARRAY(mac_reg, E1000State, RA, 32),
1424         VMSTATE_UINT32_SUB_ARRAY(mac_reg, E1000State, MTA, 128),
1425         VMSTATE_UINT32_SUB_ARRAY(mac_reg, E1000State, VFTA, 128),
1426         VMSTATE_END_OF_LIST()
1427     },
1428     .subsections = (VMStateSubsection[]) {
1429         {
1430             .vmsd = &vmstate_e1000_mit_state,
1431             .needed = e1000_mit_state_needed,
1432         }, {
1433             /* empty */
1434         }
1435     }
1436 };
1437 
1438 /*
1439  * EEPROM contents documented in Tables 5-2 and 5-3, pp. 98-102.
1440  * Note: A valid DevId will be inserted during pci_e1000_init().
1441  */
1442 static const uint16_t e1000_eeprom_template[64] = {
1443     0x0000, 0x0000, 0x0000, 0x0000,      0xffff, 0x0000,      0x0000, 0x0000,
1444     0x3000, 0x1000, 0x6403, 0 /*DevId*/, 0x8086, 0 /*DevId*/, 0x8086, 0x3040,
1445     0x0008, 0x2000, 0x7e14, 0x0048,      0x1000, 0x00d8,      0x0000, 0x2700,
1446     0x6cc9, 0x3150, 0x0722, 0x040b,      0x0984, 0x0000,      0xc000, 0x0706,
1447     0x1008, 0x0000, 0x0f04, 0x7fff,      0x4d01, 0xffff,      0xffff, 0xffff,
1448     0xffff, 0xffff, 0xffff, 0xffff,      0xffff, 0xffff,      0xffff, 0xffff,
1449     0x0100, 0x4000, 0x121c, 0xffff,      0xffff, 0xffff,      0xffff, 0xffff,
1450     0xffff, 0xffff, 0xffff, 0xffff,      0xffff, 0xffff,      0xffff, 0x0000,
1451 };
1452 
1453 /* PCI interface */
1454 
1455 static void
1456 e1000_mmio_setup(E1000State *d)
1457 {
1458     int i;
1459     const uint32_t excluded_regs[] = {
1460         E1000_MDIC, E1000_ICR, E1000_ICS, E1000_IMS,
1461         E1000_IMC, E1000_TCTL, E1000_TDT, PNPMMIO_SIZE
1462     };
1463 
1464     memory_region_init_io(&d->mmio, OBJECT(d), &e1000_mmio_ops, d,
1465                           "e1000-mmio", PNPMMIO_SIZE);
1466     memory_region_add_coalescing(&d->mmio, 0, excluded_regs[0]);
1467     for (i = 0; excluded_regs[i] != PNPMMIO_SIZE; i++)
1468         memory_region_add_coalescing(&d->mmio, excluded_regs[i] + 4,
1469                                      excluded_regs[i+1] - excluded_regs[i] - 4);
1470     memory_region_init_io(&d->io, OBJECT(d), &e1000_io_ops, d, "e1000-io", IOPORT_SIZE);
1471 }
1472 
1473 static void
1474 e1000_cleanup(NetClientState *nc)
1475 {
1476     E1000State *s = qemu_get_nic_opaque(nc);
1477 
1478     s->nic = NULL;
1479 }
1480 
1481 static void
1482 pci_e1000_uninit(PCIDevice *dev)
1483 {
1484     E1000State *d = E1000(dev);
1485 
1486     timer_del(d->autoneg_timer);
1487     timer_free(d->autoneg_timer);
1488     timer_del(d->mit_timer);
1489     timer_free(d->mit_timer);
1490     memory_region_destroy(&d->mmio);
1491     memory_region_destroy(&d->io);
1492     qemu_del_nic(d->nic);
1493 }
1494 
1495 static NetClientInfo net_e1000_info = {
1496     .type = NET_CLIENT_OPTIONS_KIND_NIC,
1497     .size = sizeof(NICState),
1498     .can_receive = e1000_can_receive,
1499     .receive = e1000_receive,
1500     .receive_iov = e1000_receive_iov,
1501     .cleanup = e1000_cleanup,
1502     .link_status_changed = e1000_set_link_status,
1503 };
1504 
1505 static int pci_e1000_init(PCIDevice *pci_dev)
1506 {
1507     DeviceState *dev = DEVICE(pci_dev);
1508     E1000State *d = E1000(pci_dev);
1509     PCIDeviceClass *pdc = PCI_DEVICE_GET_CLASS(pci_dev);
1510     uint8_t *pci_conf;
1511     uint16_t checksum = 0;
1512     int i;
1513     uint8_t *macaddr;
1514 
1515     pci_conf = pci_dev->config;
1516 
1517     /* TODO: RST# value should be 0, PCI spec 6.2.4 */
1518     pci_conf[PCI_CACHE_LINE_SIZE] = 0x10;
1519 
1520     pci_conf[PCI_INTERRUPT_PIN] = 1; /* interrupt pin A */
1521 
1522     e1000_mmio_setup(d);
1523 
1524     pci_register_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &d->mmio);
1525 
1526     pci_register_bar(pci_dev, 1, PCI_BASE_ADDRESS_SPACE_IO, &d->io);
1527 
1528     memmove(d->eeprom_data, e1000_eeprom_template,
1529         sizeof e1000_eeprom_template);
1530     qemu_macaddr_default_if_unset(&d->conf.macaddr);
1531     macaddr = d->conf.macaddr.a;
1532     for (i = 0; i < 3; i++)
1533         d->eeprom_data[i] = (macaddr[2*i+1]<<8) | macaddr[2*i];
1534     d->eeprom_data[11] = d->eeprom_data[13] = pdc->device_id;
1535     for (i = 0; i < EEPROM_CHECKSUM_REG; i++)
1536         checksum += d->eeprom_data[i];
1537     checksum = (uint16_t) EEPROM_SUM - checksum;
1538     d->eeprom_data[EEPROM_CHECKSUM_REG] = checksum;
1539 
1540     d->nic = qemu_new_nic(&net_e1000_info, &d->conf,
1541                           object_get_typename(OBJECT(d)), dev->id, d);
1542 
1543     qemu_format_nic_info_str(qemu_get_queue(d->nic), macaddr);
1544 
1545     add_boot_device_path(d->conf.bootindex, dev, "/ethernet-phy@0");
1546 
1547     d->autoneg_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL, e1000_autoneg_timer, d);
1548     d->mit_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, e1000_mit_timer, d);
1549 
1550     return 0;
1551 }
1552 
1553 static void qdev_e1000_reset(DeviceState *dev)
1554 {
1555     E1000State *d = E1000(dev);
1556     e1000_reset(d);
1557 }
1558 
1559 static Property e1000_properties[] = {
1560     DEFINE_NIC_PROPERTIES(E1000State, conf),
1561     DEFINE_PROP_BIT("autonegotiation", E1000State,
1562                     compat_flags, E1000_FLAG_AUTONEG_BIT, true),
1563     DEFINE_PROP_BIT("mitigation", E1000State,
1564                     compat_flags, E1000_FLAG_MIT_BIT, true),
1565     DEFINE_PROP_END_OF_LIST(),
1566 };
1567 
1568 typedef struct E1000Info {
1569     const char *name;
1570     uint16_t   device_id;
1571     uint8_t    revision;
1572     uint16_t   phy_id2;
1573 } E1000Info;
1574 
1575 static void e1000_class_init(ObjectClass *klass, void *data)
1576 {
1577     DeviceClass *dc = DEVICE_CLASS(klass);
1578     PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
1579     E1000BaseClass *e = E1000_DEVICE_CLASS(klass);
1580     const E1000Info *info = data;
1581 
1582     k->init = pci_e1000_init;
1583     k->exit = pci_e1000_uninit;
1584     k->romfile = "efi-e1000.rom";
1585     k->vendor_id = PCI_VENDOR_ID_INTEL;
1586     k->device_id = info->device_id;
1587     k->revision = info->revision;
1588     e->phy_id2 = info->phy_id2;
1589     k->class_id = PCI_CLASS_NETWORK_ETHERNET;
1590     set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
1591     dc->desc = "Intel Gigabit Ethernet";
1592     dc->reset = qdev_e1000_reset;
1593     dc->vmsd = &vmstate_e1000;
1594     dc->props = e1000_properties;
1595 }
1596 
1597 static const TypeInfo e1000_base_info = {
1598     .name          = TYPE_E1000_BASE,
1599     .parent        = TYPE_PCI_DEVICE,
1600     .instance_size = sizeof(E1000State),
1601     .class_size    = sizeof(E1000BaseClass),
1602     .abstract      = true,
1603 };
1604 
1605 static const E1000Info e1000_devices[] = {
1606     {
1607         .name      = "e1000-82540em",
1608         .device_id = E1000_DEV_ID_82540EM,
1609         .revision  = 0x03,
1610         .phy_id2   = E1000_PHY_ID2_8254xx_DEFAULT,
1611     },
1612     {
1613         .name      = "e1000-82544gc",
1614         .device_id = E1000_DEV_ID_82544GC_COPPER,
1615         .revision  = 0x03,
1616         .phy_id2   = E1000_PHY_ID2_82544x,
1617     },
1618     {
1619         .name      = "e1000-82545em",
1620         .device_id = E1000_DEV_ID_82545EM_COPPER,
1621         .revision  = 0x03,
1622         .phy_id2   = E1000_PHY_ID2_8254xx_DEFAULT,
1623     },
1624 };
1625 
1626 static const TypeInfo e1000_default_info = {
1627     .name          = "e1000",
1628     .parent        = "e1000-82540em",
1629 };
1630 
1631 static void e1000_register_types(void)
1632 {
1633     int i;
1634 
1635     type_register_static(&e1000_base_info);
1636     for (i = 0; i < ARRAY_SIZE(e1000_devices); i++) {
1637         const E1000Info *info = &e1000_devices[i];
1638         TypeInfo type_info = {};
1639 
1640         type_info.name = info->name;
1641         type_info.parent = TYPE_E1000_BASE;
1642         type_info.class_data = (void *)info;
1643         type_info.class_init = e1000_class_init;
1644 
1645         type_register(&type_info);
1646     }
1647     type_register_static(&e1000_default_info);
1648 }
1649 
1650 type_init(e1000_register_types)
1651