1 /*
2 * i.MX Fast Ethernet Controller emulation.
3 *
4 * Copyright (c) 2013 Jean-Christophe Dubois. <jcd@tribudubois.net>
5 *
6 * Based on Coldfire Fast Ethernet Controller emulation.
7 *
8 * Copyright (c) 2007 CodeSourcery.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 * for more details.
19 *
20 * You should have received a copy of the GNU General Public License along
21 * with this program; if not, see <http://www.gnu.org/licenses/>.
22 */
23
24 #include "qemu/osdep.h"
25 #include "hw/irq.h"
26 #include "hw/net/imx_fec.h"
27 #include "hw/qdev-properties.h"
28 #include "migration/vmstate.h"
29 #include "sysemu/dma.h"
30 #include "qemu/log.h"
31 #include "qemu/module.h"
32 #include "net/checksum.h"
33 #include "net/eth.h"
34 #include "trace.h"
35
36 /* For crc32 */
37 #include <zlib.h>
38
39 #define IMX_MAX_DESC 1024
40
imx_default_reg_name(IMXFECState * s,uint32_t index)41 static const char *imx_default_reg_name(IMXFECState *s, uint32_t index)
42 {
43 static char tmp[20];
44 snprintf(tmp, sizeof(tmp), "index %d", index);
45 return tmp;
46 }
47
imx_fec_reg_name(IMXFECState * s,uint32_t index)48 static const char *imx_fec_reg_name(IMXFECState *s, uint32_t index)
49 {
50 switch (index) {
51 case ENET_FRBR:
52 return "FRBR";
53 case ENET_FRSR:
54 return "FRSR";
55 case ENET_MIIGSK_CFGR:
56 return "MIIGSK_CFGR";
57 case ENET_MIIGSK_ENR:
58 return "MIIGSK_ENR";
59 default:
60 return imx_default_reg_name(s, index);
61 }
62 }
63
imx_enet_reg_name(IMXFECState * s,uint32_t index)64 static const char *imx_enet_reg_name(IMXFECState *s, uint32_t index)
65 {
66 switch (index) {
67 case ENET_RSFL:
68 return "RSFL";
69 case ENET_RSEM:
70 return "RSEM";
71 case ENET_RAEM:
72 return "RAEM";
73 case ENET_RAFL:
74 return "RAFL";
75 case ENET_TSEM:
76 return "TSEM";
77 case ENET_TAEM:
78 return "TAEM";
79 case ENET_TAFL:
80 return "TAFL";
81 case ENET_TIPG:
82 return "TIPG";
83 case ENET_FTRL:
84 return "FTRL";
85 case ENET_TACC:
86 return "TACC";
87 case ENET_RACC:
88 return "RACC";
89 case ENET_ATCR:
90 return "ATCR";
91 case ENET_ATVR:
92 return "ATVR";
93 case ENET_ATOFF:
94 return "ATOFF";
95 case ENET_ATPER:
96 return "ATPER";
97 case ENET_ATCOR:
98 return "ATCOR";
99 case ENET_ATINC:
100 return "ATINC";
101 case ENET_ATSTMP:
102 return "ATSTMP";
103 case ENET_TGSR:
104 return "TGSR";
105 case ENET_TCSR0:
106 return "TCSR0";
107 case ENET_TCCR0:
108 return "TCCR0";
109 case ENET_TCSR1:
110 return "TCSR1";
111 case ENET_TCCR1:
112 return "TCCR1";
113 case ENET_TCSR2:
114 return "TCSR2";
115 case ENET_TCCR2:
116 return "TCCR2";
117 case ENET_TCSR3:
118 return "TCSR3";
119 case ENET_TCCR3:
120 return "TCCR3";
121 default:
122 return imx_default_reg_name(s, index);
123 }
124 }
125
imx_eth_reg_name(IMXFECState * s,uint32_t index)126 static const char *imx_eth_reg_name(IMXFECState *s, uint32_t index)
127 {
128 switch (index) {
129 case ENET_EIR:
130 return "EIR";
131 case ENET_EIMR:
132 return "EIMR";
133 case ENET_RDAR:
134 return "RDAR";
135 case ENET_TDAR:
136 return "TDAR";
137 case ENET_ECR:
138 return "ECR";
139 case ENET_MMFR:
140 return "MMFR";
141 case ENET_MSCR:
142 return "MSCR";
143 case ENET_MIBC:
144 return "MIBC";
145 case ENET_RCR:
146 return "RCR";
147 case ENET_TCR:
148 return "TCR";
149 case ENET_PALR:
150 return "PALR";
151 case ENET_PAUR:
152 return "PAUR";
153 case ENET_OPD:
154 return "OPD";
155 case ENET_IAUR:
156 return "IAUR";
157 case ENET_IALR:
158 return "IALR";
159 case ENET_GAUR:
160 return "GAUR";
161 case ENET_GALR:
162 return "GALR";
163 case ENET_TFWR:
164 return "TFWR";
165 case ENET_RDSR:
166 return "RDSR";
167 case ENET_TDSR:
168 return "TDSR";
169 case ENET_MRBR:
170 return "MRBR";
171 default:
172 if (s->is_fec) {
173 return imx_fec_reg_name(s, index);
174 } else {
175 return imx_enet_reg_name(s, index);
176 }
177 }
178 }
179
180 /*
181 * Versions of this device with more than one TX descriptor save the
182 * 2nd and 3rd descriptors in a subsection, to maintain migration
183 * compatibility with previous versions of the device that only
184 * supported a single descriptor.
185 */
imx_eth_is_multi_tx_ring(void * opaque)186 static bool imx_eth_is_multi_tx_ring(void *opaque)
187 {
188 IMXFECState *s = IMX_FEC(opaque);
189
190 return s->tx_ring_num > 1;
191 }
192
193 static const VMStateDescription vmstate_imx_eth_txdescs = {
194 .name = "imx.fec/txdescs",
195 .version_id = 1,
196 .minimum_version_id = 1,
197 .needed = imx_eth_is_multi_tx_ring,
198 .fields = (const VMStateField[]) {
199 VMSTATE_UINT32(tx_descriptor[1], IMXFECState),
200 VMSTATE_UINT32(tx_descriptor[2], IMXFECState),
201 VMSTATE_END_OF_LIST()
202 }
203 };
204
205 static const VMStateDescription vmstate_imx_eth = {
206 .name = TYPE_IMX_FEC,
207 .version_id = 2,
208 .minimum_version_id = 2,
209 .fields = (const VMStateField[]) {
210 VMSTATE_UINT32_ARRAY(regs, IMXFECState, ENET_MAX),
211 VMSTATE_UINT32(rx_descriptor, IMXFECState),
212 VMSTATE_UINT32(tx_descriptor[0], IMXFECState),
213 VMSTATE_UINT32(phy_status, IMXFECState),
214 VMSTATE_UINT32(phy_control, IMXFECState),
215 VMSTATE_UINT32(phy_advertise, IMXFECState),
216 VMSTATE_UINT32(phy_int, IMXFECState),
217 VMSTATE_UINT32(phy_int_mask, IMXFECState),
218 VMSTATE_END_OF_LIST()
219 },
220 .subsections = (const VMStateDescription * const []) {
221 &vmstate_imx_eth_txdescs,
222 NULL
223 },
224 };
225
226 #define PHY_INT_ENERGYON (1 << 7)
227 #define PHY_INT_AUTONEG_COMPLETE (1 << 6)
228 #define PHY_INT_FAULT (1 << 5)
229 #define PHY_INT_DOWN (1 << 4)
230 #define PHY_INT_AUTONEG_LP (1 << 3)
231 #define PHY_INT_PARFAULT (1 << 2)
232 #define PHY_INT_AUTONEG_PAGE (1 << 1)
233
234 static void imx_eth_update(IMXFECState *s);
235
236 /*
237 * The MII phy could raise a GPIO to the processor which in turn
238 * could be handled as an interrpt by the OS.
239 * For now we don't handle any GPIO/interrupt line, so the OS will
240 * have to poll for the PHY status.
241 */
imx_phy_update_irq(IMXFECState * s)242 static void imx_phy_update_irq(IMXFECState *s)
243 {
244 imx_eth_update(s);
245 }
246
imx_phy_update_link(IMXFECState * s)247 static void imx_phy_update_link(IMXFECState *s)
248 {
249 /* Autonegotiation status mirrors link status. */
250 if (qemu_get_queue(s->nic)->link_down) {
251 trace_imx_phy_update_link("down");
252 s->phy_status &= ~0x0024;
253 s->phy_int |= PHY_INT_DOWN;
254 } else {
255 trace_imx_phy_update_link("up");
256 s->phy_status |= 0x0024;
257 s->phy_int |= PHY_INT_ENERGYON;
258 s->phy_int |= PHY_INT_AUTONEG_COMPLETE;
259 }
260 imx_phy_update_irq(s);
261 }
262
imx_eth_set_link(NetClientState * nc)263 static void imx_eth_set_link(NetClientState *nc)
264 {
265 imx_phy_update_link(IMX_FEC(qemu_get_nic_opaque(nc)));
266 }
267
imx_phy_reset(IMXFECState * s)268 static void imx_phy_reset(IMXFECState *s)
269 {
270 trace_imx_phy_reset();
271
272 s->phy_status = 0x7809;
273 s->phy_control = 0x3000;
274 s->phy_advertise = 0x01e1;
275 s->phy_int_mask = 0;
276 s->phy_int = 0;
277 imx_phy_update_link(s);
278 }
279
imx_phy_read(IMXFECState * s,int reg)280 static uint32_t imx_phy_read(IMXFECState *s, int reg)
281 {
282 uint32_t val;
283 uint32_t phy = reg / 32;
284
285 if (!s->phy_connected) {
286 return 0xffff;
287 }
288
289 if (phy != s->phy_num) {
290 if (s->phy_consumer && phy == s->phy_consumer->phy_num) {
291 s = s->phy_consumer;
292 } else {
293 trace_imx_phy_read_num(phy, s->phy_num);
294 return 0xffff;
295 }
296 }
297
298 reg %= 32;
299
300 switch (reg) {
301 case 0: /* Basic Control */
302 val = s->phy_control;
303 break;
304 case 1: /* Basic Status */
305 val = s->phy_status;
306 break;
307 case 2: /* ID1 */
308 val = 0x0007;
309 break;
310 case 3: /* ID2 */
311 val = 0xc0d1;
312 break;
313 case 4: /* Auto-neg advertisement */
314 val = s->phy_advertise;
315 break;
316 case 5: /* Auto-neg Link Partner Ability */
317 val = 0x0f71;
318 break;
319 case 6: /* Auto-neg Expansion */
320 val = 1;
321 break;
322 case 29: /* Interrupt source. */
323 val = s->phy_int;
324 s->phy_int = 0;
325 imx_phy_update_irq(s);
326 break;
327 case 30: /* Interrupt mask */
328 val = s->phy_int_mask;
329 break;
330 case 17:
331 case 18:
332 case 27:
333 case 31:
334 qemu_log_mask(LOG_UNIMP, "[%s.phy]%s: reg %d not implemented\n",
335 TYPE_IMX_FEC, __func__, reg);
336 val = 0;
337 break;
338 default:
339 qemu_log_mask(LOG_GUEST_ERROR, "[%s.phy]%s: Bad address at offset %d\n",
340 TYPE_IMX_FEC, __func__, reg);
341 val = 0;
342 break;
343 }
344
345 trace_imx_phy_read(val, phy, reg);
346
347 return val;
348 }
349
imx_phy_write(IMXFECState * s,int reg,uint32_t val)350 static void imx_phy_write(IMXFECState *s, int reg, uint32_t val)
351 {
352 uint32_t phy = reg / 32;
353
354 if (!s->phy_connected) {
355 return;
356 }
357
358 if (phy != s->phy_num) {
359 if (s->phy_consumer && phy == s->phy_consumer->phy_num) {
360 s = s->phy_consumer;
361 } else {
362 trace_imx_phy_write_num(phy, s->phy_num);
363 return;
364 }
365 }
366
367 reg %= 32;
368
369 trace_imx_phy_write(val, phy, reg);
370
371 switch (reg) {
372 case 0: /* Basic Control */
373 if (val & 0x8000) {
374 imx_phy_reset(s);
375 } else {
376 s->phy_control = val & 0x7980;
377 /* Complete autonegotiation immediately. */
378 if (val & 0x1000) {
379 s->phy_status |= 0x0020;
380 }
381 }
382 break;
383 case 4: /* Auto-neg advertisement */
384 s->phy_advertise = (val & 0x2d7f) | 0x80;
385 break;
386 case 30: /* Interrupt mask */
387 s->phy_int_mask = val & 0xff;
388 imx_phy_update_irq(s);
389 break;
390 case 17:
391 case 18:
392 case 27:
393 case 31:
394 qemu_log_mask(LOG_UNIMP, "[%s.phy)%s: reg %d not implemented\n",
395 TYPE_IMX_FEC, __func__, reg);
396 break;
397 default:
398 qemu_log_mask(LOG_GUEST_ERROR, "[%s.phy]%s: Bad address at offset %d\n",
399 TYPE_IMX_FEC, __func__, reg);
400 break;
401 }
402 }
403
imx_fec_read_bd(IMXFECBufDesc * bd,dma_addr_t addr)404 static void imx_fec_read_bd(IMXFECBufDesc *bd, dma_addr_t addr)
405 {
406 dma_memory_read(&address_space_memory, addr, bd, sizeof(*bd),
407 MEMTXATTRS_UNSPECIFIED);
408
409 trace_imx_fec_read_bd(addr, bd->flags, bd->length, bd->data);
410 }
411
imx_fec_write_bd(IMXFECBufDesc * bd,dma_addr_t addr)412 static void imx_fec_write_bd(IMXFECBufDesc *bd, dma_addr_t addr)
413 {
414 dma_memory_write(&address_space_memory, addr, bd, sizeof(*bd),
415 MEMTXATTRS_UNSPECIFIED);
416 }
417
imx_enet_read_bd(IMXENETBufDesc * bd,dma_addr_t addr)418 static void imx_enet_read_bd(IMXENETBufDesc *bd, dma_addr_t addr)
419 {
420 dma_memory_read(&address_space_memory, addr, bd, sizeof(*bd),
421 MEMTXATTRS_UNSPECIFIED);
422
423 trace_imx_enet_read_bd(addr, bd->flags, bd->length, bd->data,
424 bd->option, bd->status);
425 }
426
imx_enet_write_bd(IMXENETBufDesc * bd,dma_addr_t addr)427 static void imx_enet_write_bd(IMXENETBufDesc *bd, dma_addr_t addr)
428 {
429 dma_memory_write(&address_space_memory, addr, bd, sizeof(*bd),
430 MEMTXATTRS_UNSPECIFIED);
431 }
432
imx_eth_update(IMXFECState * s)433 static void imx_eth_update(IMXFECState *s)
434 {
435 /*
436 * Previous versions of qemu had the ENET_INT_MAC and ENET_INT_TS_TIMER
437 * interrupts swapped. This worked with older versions of Linux (4.14
438 * and older) since Linux associated both interrupt lines with Ethernet
439 * MAC interrupts. Specifically,
440 * - Linux 4.15 and later have separate interrupt handlers for the MAC and
441 * timer interrupts. Those versions of Linux fail with versions of QEMU
442 * with swapped interrupt assignments.
443 * - In linux 4.14, both interrupt lines were registered with the Ethernet
444 * MAC interrupt handler. As a result, all versions of qemu happen to
445 * work, though that is accidental.
446 * - In Linux 4.9 and older, the timer interrupt was registered directly
447 * with the Ethernet MAC interrupt handler. The MAC interrupt was
448 * redirected to a GPIO interrupt to work around erratum ERR006687.
449 * This was implemented using the SOC's IOMUX block. In qemu, this GPIO
450 * interrupt never fired since IOMUX is currently not supported in qemu.
451 * Linux instead received MAC interrupts on the timer interrupt.
452 * As a result, qemu versions with the swapped interrupt assignment work,
453 * albeit accidentally, but qemu versions with the correct interrupt
454 * assignment fail.
455 *
456 * To ensure that all versions of Linux work, generate ENET_INT_MAC
457 * interrupts on both interrupt lines. This should be changed if and when
458 * qemu supports IOMUX.
459 */
460 if (s->regs[ENET_EIR] & s->regs[ENET_EIMR] &
461 (ENET_INT_MAC | ENET_INT_TS_TIMER)) {
462 qemu_set_irq(s->irq[1], 1);
463 } else {
464 qemu_set_irq(s->irq[1], 0);
465 }
466
467 if (s->regs[ENET_EIR] & s->regs[ENET_EIMR] & ENET_INT_MAC) {
468 qemu_set_irq(s->irq[0], 1);
469 } else {
470 qemu_set_irq(s->irq[0], 0);
471 }
472 }
473
imx_fec_do_tx(IMXFECState * s)474 static void imx_fec_do_tx(IMXFECState *s)
475 {
476 int frame_size = 0, descnt = 0;
477 uint8_t *ptr = s->frame;
478 uint32_t addr = s->tx_descriptor[0];
479
480 while (descnt++ < IMX_MAX_DESC) {
481 IMXFECBufDesc bd;
482 int len;
483
484 imx_fec_read_bd(&bd, addr);
485 if ((bd.flags & ENET_BD_R) == 0) {
486
487 /* Run out of descriptors to transmit. */
488 trace_imx_eth_tx_bd_busy();
489
490 break;
491 }
492 len = bd.length;
493 if (frame_size + len > ENET_MAX_FRAME_SIZE) {
494 len = ENET_MAX_FRAME_SIZE - frame_size;
495 s->regs[ENET_EIR] |= ENET_INT_BABT;
496 }
497 dma_memory_read(&address_space_memory, bd.data, ptr, len,
498 MEMTXATTRS_UNSPECIFIED);
499 ptr += len;
500 frame_size += len;
501 if (bd.flags & ENET_BD_L) {
502 /* Last buffer in frame. */
503 qemu_send_packet(qemu_get_queue(s->nic), s->frame, frame_size);
504 ptr = s->frame;
505 frame_size = 0;
506 s->regs[ENET_EIR] |= ENET_INT_TXF;
507 }
508 s->regs[ENET_EIR] |= ENET_INT_TXB;
509 bd.flags &= ~ENET_BD_R;
510 /* Write back the modified descriptor. */
511 imx_fec_write_bd(&bd, addr);
512 /* Advance to the next descriptor. */
513 if ((bd.flags & ENET_BD_W) != 0) {
514 addr = s->regs[ENET_TDSR];
515 } else {
516 addr += sizeof(bd);
517 }
518 }
519
520 s->tx_descriptor[0] = addr;
521
522 imx_eth_update(s);
523 }
524
imx_enet_do_tx(IMXFECState * s,uint32_t index)525 static void imx_enet_do_tx(IMXFECState *s, uint32_t index)
526 {
527 int frame_size = 0, descnt = 0;
528
529 uint8_t *ptr = s->frame;
530 uint32_t addr, int_txb, int_txf, tdsr;
531 size_t ring;
532
533 switch (index) {
534 case ENET_TDAR:
535 ring = 0;
536 int_txb = ENET_INT_TXB;
537 int_txf = ENET_INT_TXF;
538 tdsr = ENET_TDSR;
539 break;
540 case ENET_TDAR1:
541 ring = 1;
542 int_txb = ENET_INT_TXB1;
543 int_txf = ENET_INT_TXF1;
544 tdsr = ENET_TDSR1;
545 break;
546 case ENET_TDAR2:
547 ring = 2;
548 int_txb = ENET_INT_TXB2;
549 int_txf = ENET_INT_TXF2;
550 tdsr = ENET_TDSR2;
551 break;
552 default:
553 qemu_log_mask(LOG_GUEST_ERROR,
554 "%s: bogus value for index %x\n",
555 __func__, index);
556 abort();
557 break;
558 }
559
560 addr = s->tx_descriptor[ring];
561
562 while (descnt++ < IMX_MAX_DESC) {
563 IMXENETBufDesc bd;
564 int len;
565
566 imx_enet_read_bd(&bd, addr);
567 if ((bd.flags & ENET_BD_R) == 0) {
568 /* Run out of descriptors to transmit. */
569
570 trace_imx_eth_tx_bd_busy();
571
572 break;
573 }
574 len = bd.length;
575 if (frame_size + len > ENET_MAX_FRAME_SIZE) {
576 len = ENET_MAX_FRAME_SIZE - frame_size;
577 s->regs[ENET_EIR] |= ENET_INT_BABT;
578 }
579 dma_memory_read(&address_space_memory, bd.data, ptr, len,
580 MEMTXATTRS_UNSPECIFIED);
581 ptr += len;
582 frame_size += len;
583 if (bd.flags & ENET_BD_L) {
584 int csum = 0;
585
586 if (bd.option & ENET_BD_PINS) {
587 csum |= (CSUM_TCP | CSUM_UDP);
588 }
589 if (bd.option & ENET_BD_IINS) {
590 csum |= CSUM_IP;
591 }
592 if (csum) {
593 net_checksum_calculate(s->frame, frame_size, csum);
594 }
595
596 /* Last buffer in frame. */
597
598 qemu_send_packet(qemu_get_queue(s->nic), s->frame, frame_size);
599 ptr = s->frame;
600
601 frame_size = 0;
602 if (bd.option & ENET_BD_TX_INT) {
603 s->regs[ENET_EIR] |= int_txf;
604 }
605 /* Indicate that we've updated the last buffer descriptor. */
606 bd.last_buffer = ENET_BD_BDU;
607 }
608 if (bd.option & ENET_BD_TX_INT) {
609 s->regs[ENET_EIR] |= int_txb;
610 }
611 bd.flags &= ~ENET_BD_R;
612 /* Write back the modified descriptor. */
613 imx_enet_write_bd(&bd, addr);
614 /* Advance to the next descriptor. */
615 if ((bd.flags & ENET_BD_W) != 0) {
616 addr = s->regs[tdsr];
617 } else {
618 addr += sizeof(bd);
619 }
620 }
621
622 s->tx_descriptor[ring] = addr;
623
624 imx_eth_update(s);
625 }
626
imx_eth_do_tx(IMXFECState * s,uint32_t index)627 static void imx_eth_do_tx(IMXFECState *s, uint32_t index)
628 {
629 if (!s->is_fec && (s->regs[ENET_ECR] & ENET_ECR_EN1588)) {
630 imx_enet_do_tx(s, index);
631 } else {
632 imx_fec_do_tx(s);
633 }
634 }
635
imx_eth_enable_rx(IMXFECState * s,bool flush)636 static void imx_eth_enable_rx(IMXFECState *s, bool flush)
637 {
638 IMXFECBufDesc bd;
639
640 imx_fec_read_bd(&bd, s->rx_descriptor);
641
642 s->regs[ENET_RDAR] = (bd.flags & ENET_BD_E) ? ENET_RDAR_RDAR : 0;
643
644 if (!s->regs[ENET_RDAR]) {
645 trace_imx_eth_rx_bd_full();
646 } else if (flush) {
647 qemu_flush_queued_packets(qemu_get_queue(s->nic));
648 }
649 }
650
imx_eth_reset(DeviceState * d)651 static void imx_eth_reset(DeviceState *d)
652 {
653 IMXFECState *s = IMX_FEC(d);
654
655 /* Reset the Device */
656 memset(s->regs, 0, sizeof(s->regs));
657 s->regs[ENET_ECR] = 0xf0000000;
658 s->regs[ENET_MIBC] = 0xc0000000;
659 s->regs[ENET_RCR] = 0x05ee0001;
660 s->regs[ENET_OPD] = 0x00010000;
661
662 s->regs[ENET_PALR] = (s->conf.macaddr.a[0] << 24)
663 | (s->conf.macaddr.a[1] << 16)
664 | (s->conf.macaddr.a[2] << 8)
665 | s->conf.macaddr.a[3];
666 s->regs[ENET_PAUR] = (s->conf.macaddr.a[4] << 24)
667 | (s->conf.macaddr.a[5] << 16)
668 | 0x8808;
669
670 if (s->is_fec) {
671 s->regs[ENET_FRBR] = 0x00000600;
672 s->regs[ENET_FRSR] = 0x00000500;
673 s->regs[ENET_MIIGSK_ENR] = 0x00000006;
674 } else {
675 s->regs[ENET_RAEM] = 0x00000004;
676 s->regs[ENET_RAFL] = 0x00000004;
677 s->regs[ENET_TAEM] = 0x00000004;
678 s->regs[ENET_TAFL] = 0x00000008;
679 s->regs[ENET_TIPG] = 0x0000000c;
680 s->regs[ENET_FTRL] = 0x000007ff;
681 s->regs[ENET_ATPER] = 0x3b9aca00;
682 }
683
684 s->rx_descriptor = 0;
685 memset(s->tx_descriptor, 0, sizeof(s->tx_descriptor));
686
687 /* We also reset the PHY */
688 imx_phy_reset(s);
689 }
690
imx_default_read(IMXFECState * s,uint32_t index)691 static uint32_t imx_default_read(IMXFECState *s, uint32_t index)
692 {
693 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Bad register at offset 0x%"
694 PRIx32 "\n", TYPE_IMX_FEC, __func__, index * 4);
695 return 0;
696 }
697
imx_fec_read(IMXFECState * s,uint32_t index)698 static uint32_t imx_fec_read(IMXFECState *s, uint32_t index)
699 {
700 switch (index) {
701 case ENET_FRBR:
702 case ENET_FRSR:
703 case ENET_MIIGSK_CFGR:
704 case ENET_MIIGSK_ENR:
705 return s->regs[index];
706 default:
707 return imx_default_read(s, index);
708 }
709 }
710
imx_enet_read(IMXFECState * s,uint32_t index)711 static uint32_t imx_enet_read(IMXFECState *s, uint32_t index)
712 {
713 switch (index) {
714 case ENET_RSFL:
715 case ENET_RSEM:
716 case ENET_RAEM:
717 case ENET_RAFL:
718 case ENET_TSEM:
719 case ENET_TAEM:
720 case ENET_TAFL:
721 case ENET_TIPG:
722 case ENET_FTRL:
723 case ENET_TACC:
724 case ENET_RACC:
725 case ENET_ATCR:
726 case ENET_ATVR:
727 case ENET_ATOFF:
728 case ENET_ATPER:
729 case ENET_ATCOR:
730 case ENET_ATINC:
731 case ENET_ATSTMP:
732 case ENET_TGSR:
733 case ENET_TCSR0:
734 case ENET_TCCR0:
735 case ENET_TCSR1:
736 case ENET_TCCR1:
737 case ENET_TCSR2:
738 case ENET_TCCR2:
739 case ENET_TCSR3:
740 case ENET_TCCR3:
741 return s->regs[index];
742 default:
743 return imx_default_read(s, index);
744 }
745 }
746
imx_eth_read(void * opaque,hwaddr offset,unsigned size)747 static uint64_t imx_eth_read(void *opaque, hwaddr offset, unsigned size)
748 {
749 uint32_t value = 0;
750 IMXFECState *s = IMX_FEC(opaque);
751 uint32_t index = offset >> 2;
752
753 switch (index) {
754 case ENET_EIR:
755 case ENET_EIMR:
756 case ENET_RDAR:
757 case ENET_TDAR:
758 case ENET_ECR:
759 case ENET_MMFR:
760 case ENET_MSCR:
761 case ENET_MIBC:
762 case ENET_RCR:
763 case ENET_TCR:
764 case ENET_PALR:
765 case ENET_PAUR:
766 case ENET_OPD:
767 case ENET_IAUR:
768 case ENET_IALR:
769 case ENET_GAUR:
770 case ENET_GALR:
771 case ENET_TFWR:
772 case ENET_RDSR:
773 case ENET_TDSR:
774 case ENET_MRBR:
775 value = s->regs[index];
776 break;
777 default:
778 if (s->is_fec) {
779 value = imx_fec_read(s, index);
780 } else {
781 value = imx_enet_read(s, index);
782 }
783 break;
784 }
785
786 trace_imx_eth_read(index, imx_eth_reg_name(s, index), value);
787
788 return value;
789 }
790
imx_default_write(IMXFECState * s,uint32_t index,uint32_t value)791 static void imx_default_write(IMXFECState *s, uint32_t index, uint32_t value)
792 {
793 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Bad address at offset 0x%"
794 PRIx32 "\n", TYPE_IMX_FEC, __func__, index * 4);
795 return;
796 }
797
imx_fec_write(IMXFECState * s,uint32_t index,uint32_t value)798 static void imx_fec_write(IMXFECState *s, uint32_t index, uint32_t value)
799 {
800 switch (index) {
801 case ENET_FRBR:
802 /* FRBR is read only */
803 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Register FRBR is read only\n",
804 TYPE_IMX_FEC, __func__);
805 break;
806 case ENET_FRSR:
807 s->regs[index] = (value & 0x000003fc) | 0x00000400;
808 break;
809 case ENET_MIIGSK_CFGR:
810 s->regs[index] = value & 0x00000053;
811 break;
812 case ENET_MIIGSK_ENR:
813 s->regs[index] = (value & 0x00000002) ? 0x00000006 : 0;
814 break;
815 default:
816 imx_default_write(s, index, value);
817 break;
818 }
819 }
820
imx_enet_write(IMXFECState * s,uint32_t index,uint32_t value)821 static void imx_enet_write(IMXFECState *s, uint32_t index, uint32_t value)
822 {
823 switch (index) {
824 case ENET_RSFL:
825 case ENET_RSEM:
826 case ENET_RAEM:
827 case ENET_RAFL:
828 case ENET_TSEM:
829 case ENET_TAEM:
830 case ENET_TAFL:
831 s->regs[index] = value & 0x000001ff;
832 break;
833 case ENET_TIPG:
834 s->regs[index] = value & 0x0000001f;
835 break;
836 case ENET_FTRL:
837 s->regs[index] = value & 0x00003fff;
838 break;
839 case ENET_TACC:
840 s->regs[index] = value & 0x00000019;
841 break;
842 case ENET_RACC:
843 s->regs[index] = value & 0x000000C7;
844 break;
845 case ENET_ATCR:
846 s->regs[index] = value & 0x00002a9d;
847 break;
848 case ENET_ATVR:
849 case ENET_ATOFF:
850 case ENET_ATPER:
851 s->regs[index] = value;
852 break;
853 case ENET_ATSTMP:
854 /* ATSTMP is read only */
855 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Register ATSTMP is read only\n",
856 TYPE_IMX_FEC, __func__);
857 break;
858 case ENET_ATCOR:
859 s->regs[index] = value & 0x7fffffff;
860 break;
861 case ENET_ATINC:
862 s->regs[index] = value & 0x00007f7f;
863 break;
864 case ENET_TGSR:
865 /* implement clear timer flag */
866 s->regs[index] &= ~(value & 0x0000000f); /* all bits W1C */
867 break;
868 case ENET_TCSR0:
869 case ENET_TCSR1:
870 case ENET_TCSR2:
871 case ENET_TCSR3:
872 s->regs[index] &= ~(value & 0x00000080); /* W1C bits */
873 s->regs[index] &= ~0x0000007d; /* writable fields */
874 s->regs[index] |= (value & 0x0000007d);
875 break;
876 case ENET_TCCR0:
877 case ENET_TCCR1:
878 case ENET_TCCR2:
879 case ENET_TCCR3:
880 s->regs[index] = value;
881 break;
882 default:
883 imx_default_write(s, index, value);
884 break;
885 }
886 }
887
imx_eth_write(void * opaque,hwaddr offset,uint64_t value,unsigned size)888 static void imx_eth_write(void *opaque, hwaddr offset, uint64_t value,
889 unsigned size)
890 {
891 IMXFECState *s = IMX_FEC(opaque);
892 const bool single_tx_ring = !imx_eth_is_multi_tx_ring(s);
893 uint32_t index = offset >> 2;
894
895 trace_imx_eth_write(index, imx_eth_reg_name(s, index), value);
896
897 switch (index) {
898 case ENET_EIR:
899 s->regs[index] &= ~value;
900 break;
901 case ENET_EIMR:
902 s->regs[index] = value;
903 break;
904 case ENET_RDAR:
905 if (s->regs[ENET_ECR] & ENET_ECR_ETHEREN) {
906 if (!s->regs[index]) {
907 imx_eth_enable_rx(s, true);
908 }
909 } else {
910 s->regs[index] = 0;
911 }
912 break;
913 case ENET_TDAR1:
914 case ENET_TDAR2:
915 if (unlikely(single_tx_ring)) {
916 qemu_log_mask(LOG_GUEST_ERROR,
917 "[%s]%s: trying to access TDAR2 or TDAR1\n",
918 TYPE_IMX_FEC, __func__);
919 return;
920 }
921 /* fall through */
922 case ENET_TDAR:
923 if (s->regs[ENET_ECR] & ENET_ECR_ETHEREN) {
924 s->regs[index] = ENET_TDAR_TDAR;
925 imx_eth_do_tx(s, index);
926 }
927 s->regs[index] = 0;
928 break;
929 case ENET_ECR:
930 if (value & ENET_ECR_RESET) {
931 return imx_eth_reset(DEVICE(s));
932 }
933 s->regs[index] = value;
934 if ((s->regs[index] & ENET_ECR_ETHEREN) == 0) {
935 s->regs[ENET_RDAR] = 0;
936 s->rx_descriptor = s->regs[ENET_RDSR];
937 s->regs[ENET_TDAR] = 0;
938 s->regs[ENET_TDAR1] = 0;
939 s->regs[ENET_TDAR2] = 0;
940 s->tx_descriptor[0] = s->regs[ENET_TDSR];
941 s->tx_descriptor[1] = s->regs[ENET_TDSR1];
942 s->tx_descriptor[2] = s->regs[ENET_TDSR2];
943 }
944 break;
945 case ENET_MMFR:
946 s->regs[index] = value;
947 if (extract32(value, 29, 1)) {
948 /* This is a read operation */
949 s->regs[ENET_MMFR] = deposit32(s->regs[ENET_MMFR], 0, 16,
950 imx_phy_read(s,
951 extract32(value,
952 18, 10)));
953 } else {
954 /* This is a write operation */
955 imx_phy_write(s, extract32(value, 18, 10), extract32(value, 0, 16));
956 }
957 /* raise the interrupt as the PHY operation is done */
958 s->regs[ENET_EIR] |= ENET_INT_MII;
959 break;
960 case ENET_MSCR:
961 s->regs[index] = value & 0xfe;
962 break;
963 case ENET_MIBC:
964 /* TODO: Implement MIB. */
965 s->regs[index] = (value & 0x80000000) ? 0xc0000000 : 0;
966 break;
967 case ENET_RCR:
968 s->regs[index] = value & 0x07ff003f;
969 /* TODO: Implement LOOP mode. */
970 break;
971 case ENET_TCR:
972 /* We transmit immediately, so raise GRA immediately. */
973 s->regs[index] = value;
974 if (value & 1) {
975 s->regs[ENET_EIR] |= ENET_INT_GRA;
976 }
977 break;
978 case ENET_PALR:
979 s->regs[index] = value;
980 s->conf.macaddr.a[0] = value >> 24;
981 s->conf.macaddr.a[1] = value >> 16;
982 s->conf.macaddr.a[2] = value >> 8;
983 s->conf.macaddr.a[3] = value;
984 break;
985 case ENET_PAUR:
986 s->regs[index] = (value | 0x0000ffff) & 0xffff8808;
987 s->conf.macaddr.a[4] = value >> 24;
988 s->conf.macaddr.a[5] = value >> 16;
989 break;
990 case ENET_OPD:
991 s->regs[index] = (value & 0x0000ffff) | 0x00010000;
992 break;
993 case ENET_IAUR:
994 case ENET_IALR:
995 case ENET_GAUR:
996 case ENET_GALR:
997 /* TODO: implement MAC hash filtering. */
998 break;
999 case ENET_TFWR:
1000 if (s->is_fec) {
1001 s->regs[index] = value & 0x3;
1002 } else {
1003 s->regs[index] = value & 0x13f;
1004 }
1005 break;
1006 case ENET_RDSR:
1007 if (s->is_fec) {
1008 s->regs[index] = value & ~3;
1009 } else {
1010 s->regs[index] = value & ~7;
1011 }
1012 s->rx_descriptor = s->regs[index];
1013 break;
1014 case ENET_TDSR:
1015 if (s->is_fec) {
1016 s->regs[index] = value & ~3;
1017 } else {
1018 s->regs[index] = value & ~7;
1019 }
1020 s->tx_descriptor[0] = s->regs[index];
1021 break;
1022 case ENET_TDSR1:
1023 if (unlikely(single_tx_ring)) {
1024 qemu_log_mask(LOG_GUEST_ERROR,
1025 "[%s]%s: trying to access TDSR1\n",
1026 TYPE_IMX_FEC, __func__);
1027 return;
1028 }
1029
1030 s->regs[index] = value & ~7;
1031 s->tx_descriptor[1] = s->regs[index];
1032 break;
1033 case ENET_TDSR2:
1034 if (unlikely(single_tx_ring)) {
1035 qemu_log_mask(LOG_GUEST_ERROR,
1036 "[%s]%s: trying to access TDSR2\n",
1037 TYPE_IMX_FEC, __func__);
1038 return;
1039 }
1040
1041 s->regs[index] = value & ~7;
1042 s->tx_descriptor[2] = s->regs[index];
1043 break;
1044 case ENET_MRBR:
1045 s->regs[index] = value & 0x00003ff0;
1046 break;
1047 default:
1048 if (s->is_fec) {
1049 imx_fec_write(s, index, value);
1050 } else {
1051 imx_enet_write(s, index, value);
1052 }
1053 return;
1054 }
1055
1056 imx_eth_update(s);
1057 }
1058
imx_eth_can_receive(NetClientState * nc)1059 static bool imx_eth_can_receive(NetClientState *nc)
1060 {
1061 IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc));
1062
1063 return !!s->regs[ENET_RDAR];
1064 }
1065
imx_fec_receive(NetClientState * nc,const uint8_t * buf,size_t len)1066 static ssize_t imx_fec_receive(NetClientState *nc, const uint8_t *buf,
1067 size_t len)
1068 {
1069 IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc));
1070 IMXFECBufDesc bd;
1071 uint32_t flags = 0;
1072 uint32_t addr;
1073 uint32_t crc;
1074 uint32_t buf_addr;
1075 uint8_t *crc_ptr;
1076 unsigned int buf_len;
1077 size_t size = len;
1078
1079 trace_imx_fec_receive(size);
1080
1081 if (!s->regs[ENET_RDAR]) {
1082 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Unexpected packet\n",
1083 TYPE_IMX_FEC, __func__);
1084 return 0;
1085 }
1086
1087 crc = cpu_to_be32(crc32(~0, buf, size));
1088 /* Increase size by 4, loop below reads the last 4 bytes from crc_ptr. */
1089 size += 4;
1090 crc_ptr = (uint8_t *) &crc;
1091
1092 /* Huge frames are truncated. */
1093 if (size > ENET_MAX_FRAME_SIZE) {
1094 size = ENET_MAX_FRAME_SIZE;
1095 flags |= ENET_BD_TR | ENET_BD_LG;
1096 }
1097
1098 /* Frames larger than the user limit just set error flags. */
1099 if (size > (s->regs[ENET_RCR] >> 16)) {
1100 flags |= ENET_BD_LG;
1101 }
1102
1103 addr = s->rx_descriptor;
1104 while (size > 0) {
1105 imx_fec_read_bd(&bd, addr);
1106 if ((bd.flags & ENET_BD_E) == 0) {
1107 /* No descriptors available. Bail out. */
1108 /*
1109 * FIXME: This is wrong. We should probably either
1110 * save the remainder for when more RX buffers are
1111 * available, or flag an error.
1112 */
1113 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Lost end of frame\n",
1114 TYPE_IMX_FEC, __func__);
1115 break;
1116 }
1117 buf_len = (size <= s->regs[ENET_MRBR]) ? size : s->regs[ENET_MRBR];
1118 bd.length = buf_len;
1119 size -= buf_len;
1120
1121 trace_imx_fec_receive_len(addr, bd.length);
1122
1123 /* The last 4 bytes are the CRC. */
1124 if (size < 4) {
1125 buf_len += size - 4;
1126 }
1127 buf_addr = bd.data;
1128 dma_memory_write(&address_space_memory, buf_addr, buf, buf_len,
1129 MEMTXATTRS_UNSPECIFIED);
1130 buf += buf_len;
1131 if (size < 4) {
1132 dma_memory_write(&address_space_memory, buf_addr + buf_len,
1133 crc_ptr, 4 - size, MEMTXATTRS_UNSPECIFIED);
1134 crc_ptr += 4 - size;
1135 }
1136 bd.flags &= ~ENET_BD_E;
1137 if (size == 0) {
1138 /* Last buffer in frame. */
1139 bd.flags |= flags | ENET_BD_L;
1140
1141 trace_imx_fec_receive_last(bd.flags);
1142
1143 s->regs[ENET_EIR] |= ENET_INT_RXF;
1144 } else {
1145 s->regs[ENET_EIR] |= ENET_INT_RXB;
1146 }
1147 imx_fec_write_bd(&bd, addr);
1148 /* Advance to the next descriptor. */
1149 if ((bd.flags & ENET_BD_W) != 0) {
1150 addr = s->regs[ENET_RDSR];
1151 } else {
1152 addr += sizeof(bd);
1153 }
1154 }
1155 s->rx_descriptor = addr;
1156 imx_eth_enable_rx(s, false);
1157 imx_eth_update(s);
1158 return len;
1159 }
1160
imx_enet_receive(NetClientState * nc,const uint8_t * buf,size_t len)1161 static ssize_t imx_enet_receive(NetClientState *nc, const uint8_t *buf,
1162 size_t len)
1163 {
1164 IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc));
1165 IMXENETBufDesc bd;
1166 uint32_t flags = 0;
1167 uint32_t addr;
1168 uint32_t crc;
1169 uint32_t buf_addr;
1170 uint8_t *crc_ptr;
1171 unsigned int buf_len;
1172 size_t size = len;
1173 bool shift16 = s->regs[ENET_RACC] & ENET_RACC_SHIFT16;
1174
1175 trace_imx_enet_receive(size);
1176
1177 if (!s->regs[ENET_RDAR]) {
1178 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Unexpected packet\n",
1179 TYPE_IMX_FEC, __func__);
1180 return 0;
1181 }
1182
1183 crc = cpu_to_be32(crc32(~0, buf, size));
1184 /* Increase size by 4, loop below reads the last 4 bytes from crc_ptr. */
1185 size += 4;
1186 crc_ptr = (uint8_t *) &crc;
1187
1188 if (shift16) {
1189 size += 2;
1190 }
1191
1192 /* Huge frames are truncated. */
1193 if (size > s->regs[ENET_FTRL]) {
1194 size = s->regs[ENET_FTRL];
1195 flags |= ENET_BD_TR | ENET_BD_LG;
1196 }
1197
1198 /* Frames larger than the user limit just set error flags. */
1199 if (size > (s->regs[ENET_RCR] >> 16)) {
1200 flags |= ENET_BD_LG;
1201 }
1202
1203 addr = s->rx_descriptor;
1204 while (size > 0) {
1205 imx_enet_read_bd(&bd, addr);
1206 if ((bd.flags & ENET_BD_E) == 0) {
1207 /* No descriptors available. Bail out. */
1208 /*
1209 * FIXME: This is wrong. We should probably either
1210 * save the remainder for when more RX buffers are
1211 * available, or flag an error.
1212 */
1213 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Lost end of frame\n",
1214 TYPE_IMX_FEC, __func__);
1215 break;
1216 }
1217 buf_len = MIN(size, s->regs[ENET_MRBR]);
1218 bd.length = buf_len;
1219 size -= buf_len;
1220
1221 trace_imx_enet_receive_len(addr, bd.length);
1222
1223 /* The last 4 bytes are the CRC. */
1224 if (size < 4) {
1225 buf_len += size - 4;
1226 }
1227 buf_addr = bd.data;
1228
1229 if (shift16) {
1230 /*
1231 * If SHIFT16 bit of ENETx_RACC register is set we need to
1232 * align the payload to 4-byte boundary.
1233 */
1234 const uint8_t zeros[2] = { 0 };
1235
1236 dma_memory_write(&address_space_memory, buf_addr, zeros,
1237 sizeof(zeros), MEMTXATTRS_UNSPECIFIED);
1238
1239 buf_addr += sizeof(zeros);
1240 buf_len -= sizeof(zeros);
1241
1242 /* We only do this once per Ethernet frame */
1243 shift16 = false;
1244 }
1245
1246 dma_memory_write(&address_space_memory, buf_addr, buf, buf_len,
1247 MEMTXATTRS_UNSPECIFIED);
1248 buf += buf_len;
1249 if (size < 4) {
1250 dma_memory_write(&address_space_memory, buf_addr + buf_len,
1251 crc_ptr, 4 - size, MEMTXATTRS_UNSPECIFIED);
1252 crc_ptr += 4 - size;
1253 }
1254 bd.flags &= ~ENET_BD_E;
1255 if (size == 0) {
1256 /* Last buffer in frame. */
1257 bd.flags |= flags | ENET_BD_L;
1258
1259 trace_imx_enet_receive_last(bd.flags);
1260
1261 /* Indicate that we've updated the last buffer descriptor. */
1262 bd.last_buffer = ENET_BD_BDU;
1263 if (bd.option & ENET_BD_RX_INT) {
1264 s->regs[ENET_EIR] |= ENET_INT_RXF;
1265 }
1266 } else {
1267 if (bd.option & ENET_BD_RX_INT) {
1268 s->regs[ENET_EIR] |= ENET_INT_RXB;
1269 }
1270 }
1271 imx_enet_write_bd(&bd, addr);
1272 /* Advance to the next descriptor. */
1273 if ((bd.flags & ENET_BD_W) != 0) {
1274 addr = s->regs[ENET_RDSR];
1275 } else {
1276 addr += sizeof(bd);
1277 }
1278 }
1279 s->rx_descriptor = addr;
1280 imx_eth_enable_rx(s, false);
1281 imx_eth_update(s);
1282 return len;
1283 }
1284
imx_eth_receive(NetClientState * nc,const uint8_t * buf,size_t len)1285 static ssize_t imx_eth_receive(NetClientState *nc, const uint8_t *buf,
1286 size_t len)
1287 {
1288 IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc));
1289
1290 if (!s->is_fec && (s->regs[ENET_ECR] & ENET_ECR_EN1588)) {
1291 return imx_enet_receive(nc, buf, len);
1292 } else {
1293 return imx_fec_receive(nc, buf, len);
1294 }
1295 }
1296
1297 static const MemoryRegionOps imx_eth_ops = {
1298 .read = imx_eth_read,
1299 .write = imx_eth_write,
1300 .valid.min_access_size = 4,
1301 .valid.max_access_size = 4,
1302 .endianness = DEVICE_NATIVE_ENDIAN,
1303 };
1304
imx_eth_cleanup(NetClientState * nc)1305 static void imx_eth_cleanup(NetClientState *nc)
1306 {
1307 IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc));
1308
1309 s->nic = NULL;
1310 }
1311
1312 static NetClientInfo imx_eth_net_info = {
1313 .type = NET_CLIENT_DRIVER_NIC,
1314 .size = sizeof(NICState),
1315 .can_receive = imx_eth_can_receive,
1316 .receive = imx_eth_receive,
1317 .cleanup = imx_eth_cleanup,
1318 .link_status_changed = imx_eth_set_link,
1319 };
1320
1321
imx_eth_realize(DeviceState * dev,Error ** errp)1322 static void imx_eth_realize(DeviceState *dev, Error **errp)
1323 {
1324 IMXFECState *s = IMX_FEC(dev);
1325 SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
1326
1327 memory_region_init_io(&s->iomem, OBJECT(dev), &imx_eth_ops, s,
1328 TYPE_IMX_FEC, FSL_IMX25_FEC_SIZE);
1329 sysbus_init_mmio(sbd, &s->iomem);
1330 sysbus_init_irq(sbd, &s->irq[0]);
1331 sysbus_init_irq(sbd, &s->irq[1]);
1332
1333 qemu_macaddr_default_if_unset(&s->conf.macaddr);
1334
1335 s->nic = qemu_new_nic(&imx_eth_net_info, &s->conf,
1336 object_get_typename(OBJECT(dev)),
1337 dev->id, &dev->mem_reentrancy_guard, s);
1338
1339 qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a);
1340 }
1341
1342 static Property imx_eth_properties[] = {
1343 DEFINE_NIC_PROPERTIES(IMXFECState, conf),
1344 DEFINE_PROP_UINT32("tx-ring-num", IMXFECState, tx_ring_num, 1),
1345 DEFINE_PROP_UINT32("phy-num", IMXFECState, phy_num, 0),
1346 DEFINE_PROP_BOOL("phy-connected", IMXFECState, phy_connected, true),
1347 DEFINE_PROP_LINK("phy-consumer", IMXFECState, phy_consumer, TYPE_IMX_FEC,
1348 IMXFECState *),
1349 DEFINE_PROP_END_OF_LIST(),
1350 };
1351
imx_eth_class_init(ObjectClass * klass,void * data)1352 static void imx_eth_class_init(ObjectClass *klass, void *data)
1353 {
1354 DeviceClass *dc = DEVICE_CLASS(klass);
1355
1356 dc->vmsd = &vmstate_imx_eth;
1357 dc->reset = imx_eth_reset;
1358 device_class_set_props(dc, imx_eth_properties);
1359 dc->realize = imx_eth_realize;
1360 dc->desc = "i.MX FEC/ENET Ethernet Controller";
1361 }
1362
imx_fec_init(Object * obj)1363 static void imx_fec_init(Object *obj)
1364 {
1365 IMXFECState *s = IMX_FEC(obj);
1366
1367 s->is_fec = true;
1368 }
1369
imx_enet_init(Object * obj)1370 static void imx_enet_init(Object *obj)
1371 {
1372 IMXFECState *s = IMX_FEC(obj);
1373
1374 s->is_fec = false;
1375 }
1376
1377 static const TypeInfo imx_fec_info = {
1378 .name = TYPE_IMX_FEC,
1379 .parent = TYPE_SYS_BUS_DEVICE,
1380 .instance_size = sizeof(IMXFECState),
1381 .instance_init = imx_fec_init,
1382 .class_init = imx_eth_class_init,
1383 };
1384
1385 static const TypeInfo imx_enet_info = {
1386 .name = TYPE_IMX_ENET,
1387 .parent = TYPE_IMX_FEC,
1388 .instance_init = imx_enet_init,
1389 };
1390
imx_eth_register_types(void)1391 static void imx_eth_register_types(void)
1392 {
1393 type_register_static(&imx_fec_info);
1394 type_register_static(&imx_enet_info);
1395 }
1396
1397 type_init(imx_eth_register_types)
1398