1 /*
2 * i.MX Fast Ethernet Controller emulation.
3 *
4 * Copyright (c) 2013 Jean-Christophe Dubois. <jcd@tribudubois.net>
5 *
6 * Based on Coldfire Fast Ethernet Controller emulation.
7 *
8 * Copyright (c) 2007 CodeSourcery.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 * for more details.
19 *
20 * You should have received a copy of the GNU General Public License along
21 * with this program; if not, see <http://www.gnu.org/licenses/>.
22 */
23
24 #include "qemu/osdep.h"
25 #include "hw/irq.h"
26 #include "hw/net/imx_fec.h"
27 #include "hw/qdev-properties.h"
28 #include "migration/vmstate.h"
29 #include "system/dma.h"
30 #include "qemu/log.h"
31 #include "qemu/module.h"
32 #include "net/checksum.h"
33 #include "net/eth.h"
34 #include "trace.h"
35
36 #include <zlib.h> /* for crc32 */
37
38 #define IMX_MAX_DESC 1024
39
imx_default_reg_name(IMXFECState * s,uint32_t index)40 static const char *imx_default_reg_name(IMXFECState *s, uint32_t index)
41 {
42 static char tmp[20];
43 snprintf(tmp, sizeof(tmp), "index %d", index);
44 return tmp;
45 }
46
imx_fec_reg_name(IMXFECState * s,uint32_t index)47 static const char *imx_fec_reg_name(IMXFECState *s, uint32_t index)
48 {
49 switch (index) {
50 case ENET_FRBR:
51 return "FRBR";
52 case ENET_FRSR:
53 return "FRSR";
54 case ENET_MIIGSK_CFGR:
55 return "MIIGSK_CFGR";
56 case ENET_MIIGSK_ENR:
57 return "MIIGSK_ENR";
58 default:
59 return imx_default_reg_name(s, index);
60 }
61 }
62
imx_enet_reg_name(IMXFECState * s,uint32_t index)63 static const char *imx_enet_reg_name(IMXFECState *s, uint32_t index)
64 {
65 switch (index) {
66 case ENET_RSFL:
67 return "RSFL";
68 case ENET_RSEM:
69 return "RSEM";
70 case ENET_RAEM:
71 return "RAEM";
72 case ENET_RAFL:
73 return "RAFL";
74 case ENET_TSEM:
75 return "TSEM";
76 case ENET_TAEM:
77 return "TAEM";
78 case ENET_TAFL:
79 return "TAFL";
80 case ENET_TIPG:
81 return "TIPG";
82 case ENET_FTRL:
83 return "FTRL";
84 case ENET_TACC:
85 return "TACC";
86 case ENET_RACC:
87 return "RACC";
88 case ENET_ATCR:
89 return "ATCR";
90 case ENET_ATVR:
91 return "ATVR";
92 case ENET_ATOFF:
93 return "ATOFF";
94 case ENET_ATPER:
95 return "ATPER";
96 case ENET_ATCOR:
97 return "ATCOR";
98 case ENET_ATINC:
99 return "ATINC";
100 case ENET_ATSTMP:
101 return "ATSTMP";
102 case ENET_TGSR:
103 return "TGSR";
104 case ENET_TCSR0:
105 return "TCSR0";
106 case ENET_TCCR0:
107 return "TCCR0";
108 case ENET_TCSR1:
109 return "TCSR1";
110 case ENET_TCCR1:
111 return "TCCR1";
112 case ENET_TCSR2:
113 return "TCSR2";
114 case ENET_TCCR2:
115 return "TCCR2";
116 case ENET_TCSR3:
117 return "TCSR3";
118 case ENET_TCCR3:
119 return "TCCR3";
120 default:
121 return imx_default_reg_name(s, index);
122 }
123 }
124
imx_eth_reg_name(IMXFECState * s,uint32_t index)125 static const char *imx_eth_reg_name(IMXFECState *s, uint32_t index)
126 {
127 switch (index) {
128 case ENET_EIR:
129 return "EIR";
130 case ENET_EIMR:
131 return "EIMR";
132 case ENET_RDAR:
133 return "RDAR";
134 case ENET_TDAR:
135 return "TDAR";
136 case ENET_ECR:
137 return "ECR";
138 case ENET_MMFR:
139 return "MMFR";
140 case ENET_MSCR:
141 return "MSCR";
142 case ENET_MIBC:
143 return "MIBC";
144 case ENET_RCR:
145 return "RCR";
146 case ENET_TCR:
147 return "TCR";
148 case ENET_PALR:
149 return "PALR";
150 case ENET_PAUR:
151 return "PAUR";
152 case ENET_OPD:
153 return "OPD";
154 case ENET_IAUR:
155 return "IAUR";
156 case ENET_IALR:
157 return "IALR";
158 case ENET_GAUR:
159 return "GAUR";
160 case ENET_GALR:
161 return "GALR";
162 case ENET_TFWR:
163 return "TFWR";
164 case ENET_RDSR:
165 return "RDSR";
166 case ENET_TDSR:
167 return "TDSR";
168 case ENET_MRBR:
169 return "MRBR";
170 default:
171 if (s->is_fec) {
172 return imx_fec_reg_name(s, index);
173 } else {
174 return imx_enet_reg_name(s, index);
175 }
176 }
177 }
178
179 /*
180 * Versions of this device with more than one TX descriptor save the
181 * 2nd and 3rd descriptors in a subsection, to maintain migration
182 * compatibility with previous versions of the device that only
183 * supported a single descriptor.
184 */
imx_eth_is_multi_tx_ring(void * opaque)185 static bool imx_eth_is_multi_tx_ring(void *opaque)
186 {
187 IMXFECState *s = IMX_FEC(opaque);
188
189 return s->tx_ring_num > 1;
190 }
191
192 static const VMStateDescription vmstate_imx_eth_txdescs = {
193 .name = "imx.fec/txdescs",
194 .version_id = 1,
195 .minimum_version_id = 1,
196 .needed = imx_eth_is_multi_tx_ring,
197 .fields = (const VMStateField[]) {
198 VMSTATE_UINT32(tx_descriptor[1], IMXFECState),
199 VMSTATE_UINT32(tx_descriptor[2], IMXFECState),
200 VMSTATE_END_OF_LIST()
201 }
202 };
203
204 static const VMStateDescription vmstate_imx_eth = {
205 .name = TYPE_IMX_FEC,
206 .version_id = 3,
207 .minimum_version_id = 3,
208 .fields = (const VMStateField[]) {
209 VMSTATE_UINT32_ARRAY(regs, IMXFECState, ENET_MAX),
210 VMSTATE_UINT32(rx_descriptor, IMXFECState),
211 VMSTATE_UINT32(tx_descriptor[0], IMXFECState),
212 VMSTATE_END_OF_LIST()
213 },
214 .subsections = (const VMStateDescription * const []) {
215 &vmstate_imx_eth_txdescs,
216 NULL
217 },
218 };
219
220 static void imx_eth_update(IMXFECState *s);
221
222 /*
223 * The MII phy could raise a GPIO to the processor which in turn
224 * could be handled as an interrpt by the OS.
225 * For now we don't handle any GPIO/interrupt line, so the OS will
226 * have to poll for the PHY status.
227 */
imx_phy_update_irq(void * opaque,int n,int level)228 static void imx_phy_update_irq(void *opaque, int n, int level)
229 {
230 imx_eth_update(opaque);
231 }
232
imx_eth_set_link(NetClientState * nc)233 static void imx_eth_set_link(NetClientState *nc)
234 {
235 lan9118_phy_update_link(&IMX_FEC(qemu_get_nic_opaque(nc))->mii,
236 nc->link_down);
237 }
238
imx_phy_read(IMXFECState * s,int reg)239 static uint32_t imx_phy_read(IMXFECState *s, int reg)
240 {
241 uint32_t phy = reg / 32;
242
243 if (!s->phy_connected) {
244 return 0xffff;
245 }
246
247 if (phy != s->phy_num) {
248 if (s->phy_consumer && phy == s->phy_consumer->phy_num) {
249 s = s->phy_consumer;
250 } else {
251 trace_imx_phy_read_num(phy, s->phy_num);
252 return 0xffff;
253 }
254 }
255
256 reg %= 32;
257
258 return lan9118_phy_read(&s->mii, reg);
259 }
260
imx_phy_write(IMXFECState * s,int reg,uint32_t val)261 static void imx_phy_write(IMXFECState *s, int reg, uint32_t val)
262 {
263 uint32_t phy = reg / 32;
264
265 if (!s->phy_connected) {
266 return;
267 }
268
269 if (phy != s->phy_num) {
270 if (s->phy_consumer && phy == s->phy_consumer->phy_num) {
271 s = s->phy_consumer;
272 } else {
273 trace_imx_phy_write_num(phy, s->phy_num);
274 return;
275 }
276 }
277
278 reg %= 32;
279
280 lan9118_phy_write(&s->mii, reg, val);
281 }
282
imx_fec_read_bd(IMXFECBufDesc * bd,dma_addr_t addr)283 static void imx_fec_read_bd(IMXFECBufDesc *bd, dma_addr_t addr)
284 {
285 dma_memory_read(&address_space_memory, addr, bd, sizeof(*bd),
286 MEMTXATTRS_UNSPECIFIED);
287
288 trace_imx_fec_read_bd(addr, bd->flags, bd->length, bd->data);
289 }
290
imx_fec_write_bd(IMXFECBufDesc * bd,dma_addr_t addr)291 static void imx_fec_write_bd(IMXFECBufDesc *bd, dma_addr_t addr)
292 {
293 dma_memory_write(&address_space_memory, addr, bd, sizeof(*bd),
294 MEMTXATTRS_UNSPECIFIED);
295 }
296
imx_enet_read_bd(IMXENETBufDesc * bd,dma_addr_t addr)297 static void imx_enet_read_bd(IMXENETBufDesc *bd, dma_addr_t addr)
298 {
299 dma_memory_read(&address_space_memory, addr, bd, sizeof(*bd),
300 MEMTXATTRS_UNSPECIFIED);
301
302 trace_imx_enet_read_bd(addr, bd->flags, bd->length, bd->data,
303 bd->option, bd->status);
304 }
305
imx_enet_write_bd(IMXENETBufDesc * bd,dma_addr_t addr)306 static void imx_enet_write_bd(IMXENETBufDesc *bd, dma_addr_t addr)
307 {
308 dma_memory_write(&address_space_memory, addr, bd, sizeof(*bd),
309 MEMTXATTRS_UNSPECIFIED);
310 }
311
imx_eth_update(IMXFECState * s)312 static void imx_eth_update(IMXFECState *s)
313 {
314 /*
315 * Previous versions of qemu had the ENET_INT_MAC and ENET_INT_TS_TIMER
316 * interrupts swapped. This worked with older versions of Linux (4.14
317 * and older) since Linux associated both interrupt lines with Ethernet
318 * MAC interrupts. Specifically,
319 * - Linux 4.15 and later have separate interrupt handlers for the MAC and
320 * timer interrupts. Those versions of Linux fail with versions of QEMU
321 * with swapped interrupt assignments.
322 * - In linux 4.14, both interrupt lines were registered with the Ethernet
323 * MAC interrupt handler. As a result, all versions of qemu happen to
324 * work, though that is accidental.
325 * - In Linux 4.9 and older, the timer interrupt was registered directly
326 * with the Ethernet MAC interrupt handler. The MAC interrupt was
327 * redirected to a GPIO interrupt to work around erratum ERR006687.
328 * This was implemented using the SOC's IOMUX block. In qemu, this GPIO
329 * interrupt never fired since IOMUX is currently not supported in qemu.
330 * Linux instead received MAC interrupts on the timer interrupt.
331 * As a result, qemu versions with the swapped interrupt assignment work,
332 * albeit accidentally, but qemu versions with the correct interrupt
333 * assignment fail.
334 *
335 * To ensure that all versions of Linux work, generate ENET_INT_MAC
336 * interrupts on both interrupt lines. This should be changed if and when
337 * qemu supports IOMUX.
338 */
339 if (s->regs[ENET_EIR] & s->regs[ENET_EIMR] &
340 (ENET_INT_MAC | ENET_INT_TS_TIMER)) {
341 qemu_set_irq(s->irq[1], 1);
342 } else {
343 qemu_set_irq(s->irq[1], 0);
344 }
345
346 if (s->regs[ENET_EIR] & s->regs[ENET_EIMR] & ENET_INT_MAC) {
347 qemu_set_irq(s->irq[0], 1);
348 } else {
349 qemu_set_irq(s->irq[0], 0);
350 }
351 }
352
imx_fec_do_tx(IMXFECState * s)353 static void imx_fec_do_tx(IMXFECState *s)
354 {
355 int frame_size = 0, descnt = 0;
356 uint8_t *ptr = s->frame;
357 uint32_t addr = s->tx_descriptor[0];
358
359 while (descnt++ < IMX_MAX_DESC) {
360 IMXFECBufDesc bd;
361 int len;
362
363 imx_fec_read_bd(&bd, addr);
364 if ((bd.flags & ENET_BD_R) == 0) {
365
366 /* Run out of descriptors to transmit. */
367 trace_imx_eth_tx_bd_busy();
368
369 break;
370 }
371 len = bd.length;
372 if (frame_size + len > ENET_MAX_FRAME_SIZE) {
373 len = ENET_MAX_FRAME_SIZE - frame_size;
374 s->regs[ENET_EIR] |= ENET_INT_BABT;
375 }
376 dma_memory_read(&address_space_memory, bd.data, ptr, len,
377 MEMTXATTRS_UNSPECIFIED);
378 ptr += len;
379 frame_size += len;
380 if (bd.flags & ENET_BD_L) {
381 /* Last buffer in frame. */
382 qemu_send_packet(qemu_get_queue(s->nic), s->frame, frame_size);
383 ptr = s->frame;
384 frame_size = 0;
385 s->regs[ENET_EIR] |= ENET_INT_TXF;
386 }
387 s->regs[ENET_EIR] |= ENET_INT_TXB;
388 bd.flags &= ~ENET_BD_R;
389 /* Write back the modified descriptor. */
390 imx_fec_write_bd(&bd, addr);
391 /* Advance to the next descriptor. */
392 if ((bd.flags & ENET_BD_W) != 0) {
393 addr = s->regs[ENET_TDSR];
394 } else {
395 addr += sizeof(bd);
396 }
397 }
398
399 s->tx_descriptor[0] = addr;
400
401 imx_eth_update(s);
402 }
403
imx_enet_do_tx(IMXFECState * s,uint32_t index)404 static void imx_enet_do_tx(IMXFECState *s, uint32_t index)
405 {
406 int frame_size = 0, descnt = 0;
407
408 uint8_t *ptr = s->frame;
409 uint32_t addr, int_txb, int_txf, tdsr;
410 size_t ring;
411
412 switch (index) {
413 case ENET_TDAR:
414 ring = 0;
415 int_txb = ENET_INT_TXB;
416 int_txf = ENET_INT_TXF;
417 tdsr = ENET_TDSR;
418 break;
419 case ENET_TDAR1:
420 ring = 1;
421 int_txb = ENET_INT_TXB1;
422 int_txf = ENET_INT_TXF1;
423 tdsr = ENET_TDSR1;
424 break;
425 case ENET_TDAR2:
426 ring = 2;
427 int_txb = ENET_INT_TXB2;
428 int_txf = ENET_INT_TXF2;
429 tdsr = ENET_TDSR2;
430 break;
431 default:
432 qemu_log_mask(LOG_GUEST_ERROR,
433 "%s: bogus value for index %x\n",
434 __func__, index);
435 abort();
436 break;
437 }
438
439 addr = s->tx_descriptor[ring];
440
441 while (descnt++ < IMX_MAX_DESC) {
442 IMXENETBufDesc bd;
443 int len;
444
445 imx_enet_read_bd(&bd, addr);
446 if ((bd.flags & ENET_BD_R) == 0) {
447 /* Run out of descriptors to transmit. */
448
449 trace_imx_eth_tx_bd_busy();
450
451 break;
452 }
453 len = bd.length;
454 if (frame_size + len > ENET_MAX_FRAME_SIZE) {
455 len = ENET_MAX_FRAME_SIZE - frame_size;
456 s->regs[ENET_EIR] |= ENET_INT_BABT;
457 }
458 dma_memory_read(&address_space_memory, bd.data, ptr, len,
459 MEMTXATTRS_UNSPECIFIED);
460 ptr += len;
461 frame_size += len;
462 if (bd.flags & ENET_BD_L) {
463 int csum = 0;
464
465 if (bd.option & ENET_BD_PINS) {
466 csum |= (CSUM_TCP | CSUM_UDP);
467 }
468 if (bd.option & ENET_BD_IINS) {
469 csum |= CSUM_IP;
470 }
471 if (csum) {
472 net_checksum_calculate(s->frame, frame_size, csum);
473 }
474
475 /* Last buffer in frame. */
476
477 qemu_send_packet(qemu_get_queue(s->nic), s->frame, frame_size);
478 ptr = s->frame;
479
480 frame_size = 0;
481 if (bd.option & ENET_BD_TX_INT) {
482 s->regs[ENET_EIR] |= int_txf;
483 }
484 /* Indicate that we've updated the last buffer descriptor. */
485 bd.last_buffer = ENET_BD_BDU;
486 }
487 if (bd.option & ENET_BD_TX_INT) {
488 s->regs[ENET_EIR] |= int_txb;
489 }
490 bd.flags &= ~ENET_BD_R;
491 /* Write back the modified descriptor. */
492 imx_enet_write_bd(&bd, addr);
493 /* Advance to the next descriptor. */
494 if ((bd.flags & ENET_BD_W) != 0) {
495 addr = s->regs[tdsr];
496 } else {
497 addr += sizeof(bd);
498 }
499 }
500
501 s->tx_descriptor[ring] = addr;
502
503 imx_eth_update(s);
504 }
505
imx_eth_do_tx(IMXFECState * s,uint32_t index)506 static void imx_eth_do_tx(IMXFECState *s, uint32_t index)
507 {
508 if (!s->is_fec && (s->regs[ENET_ECR] & ENET_ECR_EN1588)) {
509 imx_enet_do_tx(s, index);
510 } else {
511 imx_fec_do_tx(s);
512 }
513 }
514
imx_eth_enable_rx(IMXFECState * s,bool flush)515 static void imx_eth_enable_rx(IMXFECState *s, bool flush)
516 {
517 IMXFECBufDesc bd;
518
519 imx_fec_read_bd(&bd, s->rx_descriptor);
520
521 s->regs[ENET_RDAR] = (bd.flags & ENET_BD_E) ? ENET_RDAR_RDAR : 0;
522
523 if (!s->regs[ENET_RDAR]) {
524 trace_imx_eth_rx_bd_full();
525 } else if (flush) {
526 qemu_flush_queued_packets(qemu_get_queue(s->nic));
527 }
528 }
529
imx_eth_reset(DeviceState * d)530 static void imx_eth_reset(DeviceState *d)
531 {
532 IMXFECState *s = IMX_FEC(d);
533
534 /* Reset the Device */
535 memset(s->regs, 0, sizeof(s->regs));
536 s->regs[ENET_ECR] = 0xf0000000;
537 s->regs[ENET_MIBC] = 0xc0000000;
538 s->regs[ENET_RCR] = 0x05ee0001;
539 s->regs[ENET_OPD] = 0x00010000;
540
541 s->regs[ENET_PALR] = (s->conf.macaddr.a[0] << 24)
542 | (s->conf.macaddr.a[1] << 16)
543 | (s->conf.macaddr.a[2] << 8)
544 | s->conf.macaddr.a[3];
545 s->regs[ENET_PAUR] = (s->conf.macaddr.a[4] << 24)
546 | (s->conf.macaddr.a[5] << 16)
547 | 0x8808;
548
549 if (s->is_fec) {
550 s->regs[ENET_FRBR] = 0x00000600;
551 s->regs[ENET_FRSR] = 0x00000500;
552 s->regs[ENET_MIIGSK_ENR] = 0x00000006;
553 } else {
554 s->regs[ENET_RAEM] = 0x00000004;
555 s->regs[ENET_RAFL] = 0x00000004;
556 s->regs[ENET_TAEM] = 0x00000004;
557 s->regs[ENET_TAFL] = 0x00000008;
558 s->regs[ENET_TIPG] = 0x0000000c;
559 s->regs[ENET_FTRL] = 0x000007ff;
560 s->regs[ENET_ATPER] = 0x3b9aca00;
561 }
562
563 s->rx_descriptor = 0;
564 memset(s->tx_descriptor, 0, sizeof(s->tx_descriptor));
565 }
566
imx_default_read(IMXFECState * s,uint32_t index)567 static uint32_t imx_default_read(IMXFECState *s, uint32_t index)
568 {
569 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Bad register at offset 0x%"
570 PRIx32 "\n", TYPE_IMX_FEC, __func__, index * 4);
571 return 0;
572 }
573
imx_fec_read(IMXFECState * s,uint32_t index)574 static uint32_t imx_fec_read(IMXFECState *s, uint32_t index)
575 {
576 switch (index) {
577 case ENET_FRBR:
578 case ENET_FRSR:
579 case ENET_MIIGSK_CFGR:
580 case ENET_MIIGSK_ENR:
581 return s->regs[index];
582 default:
583 return imx_default_read(s, index);
584 }
585 }
586
imx_enet_read(IMXFECState * s,uint32_t index)587 static uint32_t imx_enet_read(IMXFECState *s, uint32_t index)
588 {
589 switch (index) {
590 case ENET_RSFL:
591 case ENET_RSEM:
592 case ENET_RAEM:
593 case ENET_RAFL:
594 case ENET_TSEM:
595 case ENET_TAEM:
596 case ENET_TAFL:
597 case ENET_TIPG:
598 case ENET_FTRL:
599 case ENET_TACC:
600 case ENET_RACC:
601 case ENET_ATCR:
602 case ENET_ATVR:
603 case ENET_ATOFF:
604 case ENET_ATPER:
605 case ENET_ATCOR:
606 case ENET_ATINC:
607 case ENET_ATSTMP:
608 case ENET_TGSR:
609 case ENET_TCSR0:
610 case ENET_TCCR0:
611 case ENET_TCSR1:
612 case ENET_TCCR1:
613 case ENET_TCSR2:
614 case ENET_TCCR2:
615 case ENET_TCSR3:
616 case ENET_TCCR3:
617 return s->regs[index];
618 default:
619 return imx_default_read(s, index);
620 }
621 }
622
imx_eth_read(void * opaque,hwaddr offset,unsigned size)623 static uint64_t imx_eth_read(void *opaque, hwaddr offset, unsigned size)
624 {
625 uint32_t value = 0;
626 IMXFECState *s = IMX_FEC(opaque);
627 uint32_t index = offset >> 2;
628
629 switch (index) {
630 case ENET_EIR:
631 case ENET_EIMR:
632 case ENET_RDAR:
633 case ENET_TDAR:
634 case ENET_ECR:
635 case ENET_MMFR:
636 case ENET_MSCR:
637 case ENET_MIBC:
638 case ENET_RCR:
639 case ENET_TCR:
640 case ENET_PALR:
641 case ENET_PAUR:
642 case ENET_OPD:
643 case ENET_IAUR:
644 case ENET_IALR:
645 case ENET_GAUR:
646 case ENET_GALR:
647 case ENET_TFWR:
648 case ENET_RDSR:
649 case ENET_TDSR:
650 case ENET_MRBR:
651 value = s->regs[index];
652 break;
653 default:
654 if (s->is_fec) {
655 value = imx_fec_read(s, index);
656 } else {
657 value = imx_enet_read(s, index);
658 }
659 break;
660 }
661
662 trace_imx_eth_read(index, imx_eth_reg_name(s, index), value);
663
664 return value;
665 }
666
imx_default_write(IMXFECState * s,uint32_t index,uint32_t value)667 static void imx_default_write(IMXFECState *s, uint32_t index, uint32_t value)
668 {
669 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Bad address at offset 0x%"
670 PRIx32 "\n", TYPE_IMX_FEC, __func__, index * 4);
671 return;
672 }
673
imx_fec_write(IMXFECState * s,uint32_t index,uint32_t value)674 static void imx_fec_write(IMXFECState *s, uint32_t index, uint32_t value)
675 {
676 switch (index) {
677 case ENET_FRBR:
678 /* FRBR is read only */
679 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Register FRBR is read only\n",
680 TYPE_IMX_FEC, __func__);
681 break;
682 case ENET_FRSR:
683 s->regs[index] = (value & 0x000003fc) | 0x00000400;
684 break;
685 case ENET_MIIGSK_CFGR:
686 s->regs[index] = value & 0x00000053;
687 break;
688 case ENET_MIIGSK_ENR:
689 s->regs[index] = (value & 0x00000002) ? 0x00000006 : 0;
690 break;
691 default:
692 imx_default_write(s, index, value);
693 break;
694 }
695 }
696
imx_enet_write(IMXFECState * s,uint32_t index,uint32_t value)697 static void imx_enet_write(IMXFECState *s, uint32_t index, uint32_t value)
698 {
699 switch (index) {
700 case ENET_RSFL:
701 case ENET_RSEM:
702 case ENET_RAEM:
703 case ENET_RAFL:
704 case ENET_TSEM:
705 case ENET_TAEM:
706 case ENET_TAFL:
707 s->regs[index] = value & 0x000001ff;
708 break;
709 case ENET_TIPG:
710 s->regs[index] = value & 0x0000001f;
711 break;
712 case ENET_FTRL:
713 s->regs[index] = value & 0x00003fff;
714 break;
715 case ENET_TACC:
716 s->regs[index] = value & 0x00000019;
717 break;
718 case ENET_RACC:
719 s->regs[index] = value & 0x000000C7;
720 break;
721 case ENET_ATCR:
722 s->regs[index] = value & 0x00002a9d;
723 break;
724 case ENET_ATVR:
725 case ENET_ATOFF:
726 case ENET_ATPER:
727 s->regs[index] = value;
728 break;
729 case ENET_ATSTMP:
730 /* ATSTMP is read only */
731 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Register ATSTMP is read only\n",
732 TYPE_IMX_FEC, __func__);
733 break;
734 case ENET_ATCOR:
735 s->regs[index] = value & 0x7fffffff;
736 break;
737 case ENET_ATINC:
738 s->regs[index] = value & 0x00007f7f;
739 break;
740 case ENET_TGSR:
741 /* implement clear timer flag */
742 s->regs[index] &= ~(value & 0x0000000f); /* all bits W1C */
743 break;
744 case ENET_TCSR0:
745 case ENET_TCSR1:
746 case ENET_TCSR2:
747 case ENET_TCSR3:
748 s->regs[index] &= ~(value & 0x00000080); /* W1C bits */
749 s->regs[index] &= ~0x0000007d; /* writable fields */
750 s->regs[index] |= (value & 0x0000007d);
751 break;
752 case ENET_TCCR0:
753 case ENET_TCCR1:
754 case ENET_TCCR2:
755 case ENET_TCCR3:
756 s->regs[index] = value;
757 break;
758 default:
759 imx_default_write(s, index, value);
760 break;
761 }
762 }
763
imx_eth_write(void * opaque,hwaddr offset,uint64_t value,unsigned size)764 static void imx_eth_write(void *opaque, hwaddr offset, uint64_t value,
765 unsigned size)
766 {
767 IMXFECState *s = IMX_FEC(opaque);
768 const bool single_tx_ring = !imx_eth_is_multi_tx_ring(s);
769 uint32_t index = offset >> 2;
770
771 trace_imx_eth_write(index, imx_eth_reg_name(s, index), value);
772
773 switch (index) {
774 case ENET_EIR:
775 s->regs[index] &= ~value;
776 break;
777 case ENET_EIMR:
778 s->regs[index] = value;
779 break;
780 case ENET_RDAR:
781 if (s->regs[ENET_ECR] & ENET_ECR_ETHEREN) {
782 if (!s->regs[index]) {
783 imx_eth_enable_rx(s, true);
784 }
785 } else {
786 s->regs[index] = 0;
787 }
788 break;
789 case ENET_TDAR1:
790 case ENET_TDAR2:
791 if (unlikely(single_tx_ring)) {
792 qemu_log_mask(LOG_GUEST_ERROR,
793 "[%s]%s: trying to access TDAR2 or TDAR1\n",
794 TYPE_IMX_FEC, __func__);
795 return;
796 }
797 /* fall through */
798 case ENET_TDAR:
799 if (s->regs[ENET_ECR] & ENET_ECR_ETHEREN) {
800 s->regs[index] = ENET_TDAR_TDAR;
801 imx_eth_do_tx(s, index);
802 }
803 s->regs[index] = 0;
804 break;
805 case ENET_ECR:
806 if (value & ENET_ECR_RESET) {
807 return imx_eth_reset(DEVICE(s));
808 }
809 s->regs[index] = value;
810 if ((s->regs[index] & ENET_ECR_ETHEREN) == 0) {
811 s->regs[ENET_RDAR] = 0;
812 s->rx_descriptor = s->regs[ENET_RDSR];
813 s->regs[ENET_TDAR] = 0;
814 s->regs[ENET_TDAR1] = 0;
815 s->regs[ENET_TDAR2] = 0;
816 s->tx_descriptor[0] = s->regs[ENET_TDSR];
817 s->tx_descriptor[1] = s->regs[ENET_TDSR1];
818 s->tx_descriptor[2] = s->regs[ENET_TDSR2];
819 }
820 break;
821 case ENET_MMFR:
822 s->regs[index] = value;
823 if (extract32(value, 29, 1)) {
824 /* This is a read operation */
825 s->regs[ENET_MMFR] = deposit32(s->regs[ENET_MMFR], 0, 16,
826 imx_phy_read(s,
827 extract32(value,
828 18, 10)));
829 } else {
830 /* This is a write operation */
831 imx_phy_write(s, extract32(value, 18, 10), extract32(value, 0, 16));
832 }
833 /* raise the interrupt as the PHY operation is done */
834 s->regs[ENET_EIR] |= ENET_INT_MII;
835 break;
836 case ENET_MSCR:
837 s->regs[index] = value & 0xfe;
838 break;
839 case ENET_MIBC:
840 /* TODO: Implement MIB. */
841 s->regs[index] = (value & 0x80000000) ? 0xc0000000 : 0;
842 break;
843 case ENET_RCR:
844 s->regs[index] = value & 0x07ff003f;
845 /* TODO: Implement LOOP mode. */
846 break;
847 case ENET_TCR:
848 /* We transmit immediately, so raise GRA immediately. */
849 s->regs[index] = value;
850 if (value & 1) {
851 s->regs[ENET_EIR] |= ENET_INT_GRA;
852 }
853 break;
854 case ENET_PALR:
855 s->regs[index] = value;
856 s->conf.macaddr.a[0] = value >> 24;
857 s->conf.macaddr.a[1] = value >> 16;
858 s->conf.macaddr.a[2] = value >> 8;
859 s->conf.macaddr.a[3] = value;
860 break;
861 case ENET_PAUR:
862 s->regs[index] = (value | 0x0000ffff) & 0xffff8808;
863 s->conf.macaddr.a[4] = value >> 24;
864 s->conf.macaddr.a[5] = value >> 16;
865 break;
866 case ENET_OPD:
867 s->regs[index] = (value & 0x0000ffff) | 0x00010000;
868 break;
869 case ENET_IAUR:
870 case ENET_IALR:
871 case ENET_GAUR:
872 case ENET_GALR:
873 /* TODO: implement MAC hash filtering. */
874 break;
875 case ENET_TFWR:
876 if (s->is_fec) {
877 s->regs[index] = value & 0x3;
878 } else {
879 s->regs[index] = value & 0x13f;
880 }
881 break;
882 case ENET_RDSR:
883 if (s->is_fec) {
884 s->regs[index] = value & ~3;
885 } else {
886 s->regs[index] = value & ~7;
887 }
888 s->rx_descriptor = s->regs[index];
889 break;
890 case ENET_TDSR:
891 if (s->is_fec) {
892 s->regs[index] = value & ~3;
893 } else {
894 s->regs[index] = value & ~7;
895 }
896 s->tx_descriptor[0] = s->regs[index];
897 break;
898 case ENET_TDSR1:
899 if (unlikely(single_tx_ring)) {
900 qemu_log_mask(LOG_GUEST_ERROR,
901 "[%s]%s: trying to access TDSR1\n",
902 TYPE_IMX_FEC, __func__);
903 return;
904 }
905
906 s->regs[index] = value & ~7;
907 s->tx_descriptor[1] = s->regs[index];
908 break;
909 case ENET_TDSR2:
910 if (unlikely(single_tx_ring)) {
911 qemu_log_mask(LOG_GUEST_ERROR,
912 "[%s]%s: trying to access TDSR2\n",
913 TYPE_IMX_FEC, __func__);
914 return;
915 }
916
917 s->regs[index] = value & ~7;
918 s->tx_descriptor[2] = s->regs[index];
919 break;
920 case ENET_MRBR:
921 s->regs[index] = value & 0x00003ff0;
922 break;
923 default:
924 if (s->is_fec) {
925 imx_fec_write(s, index, value);
926 } else {
927 imx_enet_write(s, index, value);
928 }
929 return;
930 }
931
932 imx_eth_update(s);
933 }
934
imx_eth_can_receive(NetClientState * nc)935 static bool imx_eth_can_receive(NetClientState *nc)
936 {
937 IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc));
938
939 return !!s->regs[ENET_RDAR];
940 }
941
imx_fec_receive(NetClientState * nc,const uint8_t * buf,size_t len)942 static ssize_t imx_fec_receive(NetClientState *nc, const uint8_t *buf,
943 size_t len)
944 {
945 IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc));
946 IMXFECBufDesc bd;
947 uint32_t flags = 0;
948 uint32_t addr;
949 uint32_t crc;
950 uint32_t buf_addr;
951 uint8_t *crc_ptr;
952 unsigned int buf_len;
953 size_t size = len;
954
955 trace_imx_fec_receive(size);
956
957 if (!s->regs[ENET_RDAR]) {
958 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Unexpected packet\n",
959 TYPE_IMX_FEC, __func__);
960 return 0;
961 }
962
963 crc = cpu_to_be32(crc32(~0, buf, size));
964 /* Increase size by 4, loop below reads the last 4 bytes from crc_ptr. */
965 size += 4;
966 crc_ptr = (uint8_t *) &crc;
967
968 /* Huge frames are truncated. */
969 if (size > ENET_MAX_FRAME_SIZE) {
970 size = ENET_MAX_FRAME_SIZE;
971 flags |= ENET_BD_TR | ENET_BD_LG;
972 }
973
974 /* Frames larger than the user limit just set error flags. */
975 if (size > (s->regs[ENET_RCR] >> 16)) {
976 flags |= ENET_BD_LG;
977 }
978
979 addr = s->rx_descriptor;
980 while (size > 0) {
981 imx_fec_read_bd(&bd, addr);
982 if ((bd.flags & ENET_BD_E) == 0) {
983 /* No descriptors available. Bail out. */
984 /*
985 * FIXME: This is wrong. We should probably either
986 * save the remainder for when more RX buffers are
987 * available, or flag an error.
988 */
989 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Lost end of frame\n",
990 TYPE_IMX_FEC, __func__);
991 break;
992 }
993 buf_len = (size <= s->regs[ENET_MRBR]) ? size : s->regs[ENET_MRBR];
994 bd.length = buf_len;
995 size -= buf_len;
996
997 trace_imx_fec_receive_len(addr, bd.length);
998
999 /* The last 4 bytes are the CRC. */
1000 if (size < 4) {
1001 buf_len += size - 4;
1002 }
1003 buf_addr = bd.data;
1004 dma_memory_write(&address_space_memory, buf_addr, buf, buf_len,
1005 MEMTXATTRS_UNSPECIFIED);
1006 buf += buf_len;
1007 if (size < 4) {
1008 dma_memory_write(&address_space_memory, buf_addr + buf_len,
1009 crc_ptr, 4 - size, MEMTXATTRS_UNSPECIFIED);
1010 crc_ptr += 4 - size;
1011 }
1012 bd.flags &= ~ENET_BD_E;
1013 if (size == 0) {
1014 /* Last buffer in frame. */
1015 bd.flags |= flags | ENET_BD_L;
1016
1017 trace_imx_fec_receive_last(bd.flags);
1018
1019 s->regs[ENET_EIR] |= ENET_INT_RXF;
1020 } else {
1021 s->regs[ENET_EIR] |= ENET_INT_RXB;
1022 }
1023 imx_fec_write_bd(&bd, addr);
1024 /* Advance to the next descriptor. */
1025 if ((bd.flags & ENET_BD_W) != 0) {
1026 addr = s->regs[ENET_RDSR];
1027 } else {
1028 addr += sizeof(bd);
1029 }
1030 }
1031 s->rx_descriptor = addr;
1032 imx_eth_enable_rx(s, false);
1033 imx_eth_update(s);
1034 return len;
1035 }
1036
imx_enet_receive(NetClientState * nc,const uint8_t * buf,size_t len)1037 static ssize_t imx_enet_receive(NetClientState *nc, const uint8_t *buf,
1038 size_t len)
1039 {
1040 IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc));
1041 IMXENETBufDesc bd;
1042 uint32_t flags = 0;
1043 uint32_t addr;
1044 uint32_t crc;
1045 uint32_t buf_addr;
1046 uint8_t *crc_ptr;
1047 unsigned int buf_len;
1048 size_t size = len;
1049 bool shift16 = s->regs[ENET_RACC] & ENET_RACC_SHIFT16;
1050
1051 trace_imx_enet_receive(size);
1052
1053 if (!s->regs[ENET_RDAR]) {
1054 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Unexpected packet\n",
1055 TYPE_IMX_FEC, __func__);
1056 return 0;
1057 }
1058
1059 crc = cpu_to_be32(crc32(~0, buf, size));
1060 /* Increase size by 4, loop below reads the last 4 bytes from crc_ptr. */
1061 size += 4;
1062 crc_ptr = (uint8_t *) &crc;
1063
1064 if (shift16) {
1065 size += 2;
1066 }
1067
1068 /* Huge frames are truncated. */
1069 if (size > s->regs[ENET_FTRL]) {
1070 size = s->regs[ENET_FTRL];
1071 flags |= ENET_BD_TR | ENET_BD_LG;
1072 }
1073
1074 /* Frames larger than the user limit just set error flags. */
1075 if (size > (s->regs[ENET_RCR] >> 16)) {
1076 flags |= ENET_BD_LG;
1077 }
1078
1079 addr = s->rx_descriptor;
1080 while (size > 0) {
1081 imx_enet_read_bd(&bd, addr);
1082 if ((bd.flags & ENET_BD_E) == 0) {
1083 /* No descriptors available. Bail out. */
1084 /*
1085 * FIXME: This is wrong. We should probably either
1086 * save the remainder for when more RX buffers are
1087 * available, or flag an error.
1088 */
1089 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Lost end of frame\n",
1090 TYPE_IMX_FEC, __func__);
1091 break;
1092 }
1093 buf_len = MIN(size, s->regs[ENET_MRBR]);
1094 bd.length = buf_len;
1095 size -= buf_len;
1096
1097 trace_imx_enet_receive_len(addr, bd.length);
1098
1099 /* The last 4 bytes are the CRC. */
1100 if (size < 4) {
1101 buf_len += size - 4;
1102 }
1103 buf_addr = bd.data;
1104
1105 if (shift16) {
1106 /*
1107 * If SHIFT16 bit of ENETx_RACC register is set we need to
1108 * align the payload to 4-byte boundary.
1109 */
1110 const uint8_t zeros[2] = { 0 };
1111
1112 dma_memory_write(&address_space_memory, buf_addr, zeros,
1113 sizeof(zeros), MEMTXATTRS_UNSPECIFIED);
1114
1115 buf_addr += sizeof(zeros);
1116 buf_len -= sizeof(zeros);
1117
1118 /* We only do this once per Ethernet frame */
1119 shift16 = false;
1120 }
1121
1122 dma_memory_write(&address_space_memory, buf_addr, buf, buf_len,
1123 MEMTXATTRS_UNSPECIFIED);
1124 buf += buf_len;
1125 if (size < 4) {
1126 dma_memory_write(&address_space_memory, buf_addr + buf_len,
1127 crc_ptr, 4 - size, MEMTXATTRS_UNSPECIFIED);
1128 crc_ptr += 4 - size;
1129 }
1130 bd.flags &= ~ENET_BD_E;
1131 if (size == 0) {
1132 /* Last buffer in frame. */
1133 bd.flags |= flags | ENET_BD_L;
1134
1135 trace_imx_enet_receive_last(bd.flags);
1136
1137 /* Indicate that we've updated the last buffer descriptor. */
1138 bd.last_buffer = ENET_BD_BDU;
1139 if (bd.option & ENET_BD_RX_INT) {
1140 s->regs[ENET_EIR] |= ENET_INT_RXF;
1141 }
1142 } else {
1143 if (bd.option & ENET_BD_RX_INT) {
1144 s->regs[ENET_EIR] |= ENET_INT_RXB;
1145 }
1146 }
1147 imx_enet_write_bd(&bd, addr);
1148 /* Advance to the next descriptor. */
1149 if ((bd.flags & ENET_BD_W) != 0) {
1150 addr = s->regs[ENET_RDSR];
1151 } else {
1152 addr += sizeof(bd);
1153 }
1154 }
1155 s->rx_descriptor = addr;
1156 imx_eth_enable_rx(s, false);
1157 imx_eth_update(s);
1158 return len;
1159 }
1160
imx_eth_receive(NetClientState * nc,const uint8_t * buf,size_t len)1161 static ssize_t imx_eth_receive(NetClientState *nc, const uint8_t *buf,
1162 size_t len)
1163 {
1164 IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc));
1165
1166 if (!s->is_fec && (s->regs[ENET_ECR] & ENET_ECR_EN1588)) {
1167 return imx_enet_receive(nc, buf, len);
1168 } else {
1169 return imx_fec_receive(nc, buf, len);
1170 }
1171 }
1172
1173 static const MemoryRegionOps imx_eth_ops = {
1174 .read = imx_eth_read,
1175 .write = imx_eth_write,
1176 .valid.min_access_size = 4,
1177 .valid.max_access_size = 4,
1178 .endianness = DEVICE_NATIVE_ENDIAN,
1179 };
1180
imx_eth_cleanup(NetClientState * nc)1181 static void imx_eth_cleanup(NetClientState *nc)
1182 {
1183 IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc));
1184
1185 s->nic = NULL;
1186 }
1187
1188 static NetClientInfo imx_eth_net_info = {
1189 .type = NET_CLIENT_DRIVER_NIC,
1190 .size = sizeof(NICState),
1191 .can_receive = imx_eth_can_receive,
1192 .receive = imx_eth_receive,
1193 .cleanup = imx_eth_cleanup,
1194 .link_status_changed = imx_eth_set_link,
1195 };
1196
1197
imx_eth_realize(DeviceState * dev,Error ** errp)1198 static void imx_eth_realize(DeviceState *dev, Error **errp)
1199 {
1200 IMXFECState *s = IMX_FEC(dev);
1201 SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
1202
1203 memory_region_init_io(&s->iomem, OBJECT(dev), &imx_eth_ops, s,
1204 TYPE_IMX_FEC, FSL_IMX25_FEC_SIZE);
1205 sysbus_init_mmio(sbd, &s->iomem);
1206 sysbus_init_irq(sbd, &s->irq[0]);
1207 sysbus_init_irq(sbd, &s->irq[1]);
1208
1209 qemu_init_irq(&s->mii_irq, imx_phy_update_irq, s, 0);
1210 object_initialize_child(OBJECT(s), "mii", &s->mii, TYPE_LAN9118_PHY);
1211 if (!sysbus_realize_and_unref(SYS_BUS_DEVICE(&s->mii), errp)) {
1212 return;
1213 }
1214 qdev_connect_gpio_out(DEVICE(&s->mii), 0, &s->mii_irq);
1215
1216 qemu_macaddr_default_if_unset(&s->conf.macaddr);
1217
1218 s->nic = qemu_new_nic(&imx_eth_net_info, &s->conf,
1219 object_get_typename(OBJECT(dev)),
1220 dev->id, &dev->mem_reentrancy_guard, s);
1221
1222 qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a);
1223 }
1224
1225 static const Property imx_eth_properties[] = {
1226 DEFINE_NIC_PROPERTIES(IMXFECState, conf),
1227 DEFINE_PROP_UINT32("tx-ring-num", IMXFECState, tx_ring_num, 1),
1228 DEFINE_PROP_UINT32("phy-num", IMXFECState, phy_num, 0),
1229 DEFINE_PROP_BOOL("phy-connected", IMXFECState, phy_connected, true),
1230 DEFINE_PROP_LINK("phy-consumer", IMXFECState, phy_consumer, TYPE_IMX_FEC,
1231 IMXFECState *),
1232 };
1233
imx_eth_class_init(ObjectClass * klass,void * data)1234 static void imx_eth_class_init(ObjectClass *klass, void *data)
1235 {
1236 DeviceClass *dc = DEVICE_CLASS(klass);
1237
1238 dc->vmsd = &vmstate_imx_eth;
1239 device_class_set_legacy_reset(dc, imx_eth_reset);
1240 device_class_set_props(dc, imx_eth_properties);
1241 dc->realize = imx_eth_realize;
1242 dc->desc = "i.MX FEC/ENET Ethernet Controller";
1243 }
1244
imx_fec_init(Object * obj)1245 static void imx_fec_init(Object *obj)
1246 {
1247 IMXFECState *s = IMX_FEC(obj);
1248
1249 s->is_fec = true;
1250 }
1251
imx_enet_init(Object * obj)1252 static void imx_enet_init(Object *obj)
1253 {
1254 IMXFECState *s = IMX_FEC(obj);
1255
1256 s->is_fec = false;
1257 }
1258
1259 static const TypeInfo imx_fec_info = {
1260 .name = TYPE_IMX_FEC,
1261 .parent = TYPE_SYS_BUS_DEVICE,
1262 .instance_size = sizeof(IMXFECState),
1263 .instance_init = imx_fec_init,
1264 .class_init = imx_eth_class_init,
1265 };
1266
1267 static const TypeInfo imx_enet_info = {
1268 .name = TYPE_IMX_ENET,
1269 .parent = TYPE_IMX_FEC,
1270 .instance_init = imx_enet_init,
1271 };
1272
imx_eth_register_types(void)1273 static void imx_eth_register_types(void)
1274 {
1275 type_register_static(&imx_fec_info);
1276 type_register_static(&imx_enet_info);
1277 }
1278
1279 type_init(imx_eth_register_types)
1280