1 /*
2 * QEMU model of Xilinx AXI-Ethernet.
3 *
4 * Copyright (c) 2011 Edgar E. Iglesias.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25 #include "qemu/osdep.h"
26 #include "hw/hw.h"
27 #include "hw/sysbus.h"
28 #include "qapi/error.h"
29 #include "qemu/log.h"
30 #include "qemu/module.h"
31 #include "net/net.h"
32 #include "net/checksum.h"
33
34 #include "hw/irq.h"
35 #include "hw/qdev-properties.h"
36 #include "hw/stream.h"
37 #include "qom/object.h"
38
39 #define DPHY(x)
40
41 #define TYPE_XILINX_AXI_ENET "xlnx.axi-ethernet"
42 #define TYPE_XILINX_AXI_ENET_DATA_STREAM "xilinx-axienet-data-stream"
43 #define TYPE_XILINX_AXI_ENET_CONTROL_STREAM "xilinx-axienet-control-stream"
44
45 OBJECT_DECLARE_SIMPLE_TYPE(XilinxAXIEnet, XILINX_AXI_ENET)
46
47 typedef struct XilinxAXIEnetStreamSink XilinxAXIEnetStreamSink;
48 DECLARE_INSTANCE_CHECKER(XilinxAXIEnetStreamSink, XILINX_AXI_ENET_DATA_STREAM,
49 TYPE_XILINX_AXI_ENET_DATA_STREAM)
50
51 DECLARE_INSTANCE_CHECKER(XilinxAXIEnetStreamSink, XILINX_AXI_ENET_CONTROL_STREAM,
52 TYPE_XILINX_AXI_ENET_CONTROL_STREAM)
53
54 /* Advertisement control register. */
55 #define ADVERTISE_10FULL 0x0040 /* Try for 10mbps full-duplex */
56 #define ADVERTISE_100HALF 0x0080 /* Try for 100mbps half-duplex */
57 #define ADVERTISE_100FULL 0x0100 /* Try for 100mbps full-duplex */
58
59 #define CONTROL_PAYLOAD_WORDS 5
60 #define CONTROL_PAYLOAD_SIZE (CONTROL_PAYLOAD_WORDS * (sizeof(uint32_t)))
61
62 struct PHY {
63 uint32_t regs[32];
64
65 int link;
66
67 unsigned int (*read)(struct PHY *phy, unsigned int req);
68 void (*write)(struct PHY *phy, unsigned int req,
69 unsigned int data);
70 };
71
tdk_read(struct PHY * phy,unsigned int req)72 static unsigned int tdk_read(struct PHY *phy, unsigned int req)
73 {
74 int regnum;
75 unsigned r = 0;
76
77 regnum = req & 0x1f;
78
79 switch (regnum) {
80 case 1:
81 if (!phy->link) {
82 break;
83 }
84 /* MR1. */
85 /* Speeds and modes. */
86 r |= (1 << 13) | (1 << 14);
87 r |= (1 << 11) | (1 << 12);
88 r |= (1 << 5); /* Autoneg complete. */
89 r |= (1 << 3); /* Autoneg able. */
90 r |= (1 << 2); /* link. */
91 r |= (1 << 1); /* link. */
92 break;
93 case 5:
94 /* Link partner ability.
95 We are kind; always agree with whatever best mode
96 the guest advertises. */
97 r = 1 << 14; /* Success. */
98 /* Copy advertised modes. */
99 r |= phy->regs[4] & (15 << 5);
100 /* Autoneg support. */
101 r |= 1;
102 break;
103 case 17:
104 /* Marvell PHY on many xilinx boards. */
105 r = 0x8000; /* 1000Mb */
106 break;
107 case 18:
108 {
109 /* Diagnostics reg. */
110 int duplex = 0;
111 int speed_100 = 0;
112
113 if (!phy->link) {
114 break;
115 }
116
117 /* Are we advertising 100 half or 100 duplex ? */
118 speed_100 = !!(phy->regs[4] & ADVERTISE_100HALF);
119 speed_100 |= !!(phy->regs[4] & ADVERTISE_100FULL);
120
121 /* Are we advertising 10 duplex or 100 duplex ? */
122 duplex = !!(phy->regs[4] & ADVERTISE_100FULL);
123 duplex |= !!(phy->regs[4] & ADVERTISE_10FULL);
124 r = (speed_100 << 10) | (duplex << 11);
125 }
126 break;
127
128 default:
129 r = phy->regs[regnum];
130 break;
131 }
132 DPHY(qemu_log("\n%s %x = reg[%d]\n", __func__, r, regnum));
133 return r;
134 }
135
136 static void
tdk_write(struct PHY * phy,unsigned int req,unsigned int data)137 tdk_write(struct PHY *phy, unsigned int req, unsigned int data)
138 {
139 int regnum;
140
141 regnum = req & 0x1f;
142 DPHY(qemu_log("%s reg[%d] = %x\n", __func__, regnum, data));
143 switch (regnum) {
144 default:
145 phy->regs[regnum] = data;
146 break;
147 }
148
149 /* Unconditionally clear regs[BMCR][BMCR_RESET] and auto-neg */
150 phy->regs[0] &= ~0x8200;
151 }
152
153 static void
tdk_init(struct PHY * phy)154 tdk_init(struct PHY *phy)
155 {
156 phy->regs[0] = 0x3100;
157 /* PHY Id. */
158 phy->regs[2] = 0x0300;
159 phy->regs[3] = 0xe400;
160 /* Autonegotiation advertisement reg. */
161 phy->regs[4] = 0x01E1;
162 phy->link = 1;
163
164 phy->read = tdk_read;
165 phy->write = tdk_write;
166 }
167
168 struct MDIOBus {
169 struct PHY *devs[32];
170 };
171
172 static void
mdio_attach(struct MDIOBus * bus,struct PHY * phy,unsigned int addr)173 mdio_attach(struct MDIOBus *bus, struct PHY *phy, unsigned int addr)
174 {
175 bus->devs[addr & 0x1f] = phy;
176 }
177
178 #ifdef USE_THIS_DEAD_CODE
179 static void
mdio_detach(struct MDIOBus * bus,struct PHY * phy,unsigned int addr)180 mdio_detach(struct MDIOBus *bus, struct PHY *phy, unsigned int addr)
181 {
182 bus->devs[addr & 0x1f] = NULL;
183 }
184 #endif
185
mdio_read_req(struct MDIOBus * bus,unsigned int addr,unsigned int reg)186 static uint16_t mdio_read_req(struct MDIOBus *bus, unsigned int addr,
187 unsigned int reg)
188 {
189 struct PHY *phy;
190 uint16_t data;
191
192 phy = bus->devs[addr];
193 if (phy && phy->read) {
194 data = phy->read(phy, reg);
195 } else {
196 data = 0xffff;
197 }
198 DPHY(qemu_log("%s addr=%d reg=%d data=%x\n", __func__, addr, reg, data));
199 return data;
200 }
201
mdio_write_req(struct MDIOBus * bus,unsigned int addr,unsigned int reg,uint16_t data)202 static void mdio_write_req(struct MDIOBus *bus, unsigned int addr,
203 unsigned int reg, uint16_t data)
204 {
205 struct PHY *phy;
206
207 DPHY(qemu_log("%s addr=%d reg=%d data=%x\n", __func__, addr, reg, data));
208 phy = bus->devs[addr];
209 if (phy && phy->write) {
210 phy->write(phy, reg, data);
211 }
212 }
213
214 #define DENET(x)
215
216 #define R_RAF (0x000 / 4)
217 enum {
218 RAF_MCAST_REJ = (1 << 1),
219 RAF_BCAST_REJ = (1 << 2),
220 RAF_EMCF_EN = (1 << 12),
221 RAF_NEWFUNC_EN = (1 << 11)
222 };
223
224 #define R_IS (0x00C / 4)
225 enum {
226 IS_HARD_ACCESS_COMPLETE = 1,
227 IS_AUTONEG = (1 << 1),
228 IS_RX_COMPLETE = (1 << 2),
229 IS_RX_REJECT = (1 << 3),
230 IS_TX_COMPLETE = (1 << 5),
231 IS_RX_DCM_LOCK = (1 << 6),
232 IS_MGM_RDY = (1 << 7),
233 IS_PHY_RST_DONE = (1 << 8),
234 };
235
236 #define R_IP (0x010 / 4)
237 #define R_IE (0x014 / 4)
238 #define R_UAWL (0x020 / 4)
239 #define R_UAWU (0x024 / 4)
240 #define R_PPST (0x030 / 4)
241 enum {
242 PPST_LINKSTATUS = (1 << 0),
243 PPST_PHY_LINKSTATUS = (1 << 7),
244 };
245
246 #define R_STATS_RX_BYTESL (0x200 / 4)
247 #define R_STATS_RX_BYTESH (0x204 / 4)
248 #define R_STATS_TX_BYTESL (0x208 / 4)
249 #define R_STATS_TX_BYTESH (0x20C / 4)
250 #define R_STATS_RXL (0x290 / 4)
251 #define R_STATS_RXH (0x294 / 4)
252 #define R_STATS_RX_BCASTL (0x2a0 / 4)
253 #define R_STATS_RX_BCASTH (0x2a4 / 4)
254 #define R_STATS_RX_MCASTL (0x2a8 / 4)
255 #define R_STATS_RX_MCASTH (0x2ac / 4)
256
257 #define R_RCW0 (0x400 / 4)
258 #define R_RCW1 (0x404 / 4)
259 enum {
260 RCW1_VLAN = (1 << 27),
261 RCW1_RX = (1 << 28),
262 RCW1_FCS = (1 << 29),
263 RCW1_JUM = (1 << 30),
264 RCW1_RST = (1 << 31),
265 };
266
267 #define R_TC (0x408 / 4)
268 enum {
269 TC_VLAN = (1 << 27),
270 TC_TX = (1 << 28),
271 TC_FCS = (1 << 29),
272 TC_JUM = (1 << 30),
273 TC_RST = (1 << 31),
274 };
275
276 #define R_EMMC (0x410 / 4)
277 enum {
278 EMMC_LINKSPEED_10MB = (0 << 30),
279 EMMC_LINKSPEED_100MB = (1 << 30),
280 EMMC_LINKSPEED_1000MB = (2 << 30),
281 };
282
283 #define R_PHYC (0x414 / 4)
284
285 #define R_MC (0x500 / 4)
286 #define MC_EN (1 << 6)
287
288 #define R_MCR (0x504 / 4)
289 #define R_MWD (0x508 / 4)
290 #define R_MRD (0x50c / 4)
291 #define R_MIS (0x600 / 4)
292 #define R_MIP (0x620 / 4)
293 #define R_MIE (0x640 / 4)
294 #define R_MIC (0x640 / 4)
295
296 #define R_UAW0 (0x700 / 4)
297 #define R_UAW1 (0x704 / 4)
298 #define R_FMI (0x708 / 4)
299 #define R_AF0 (0x710 / 4)
300 #define R_AF1 (0x714 / 4)
301 #define R_MAX (0x34 / 4)
302
303 /* Indirect registers. */
304 struct TEMAC {
305 struct MDIOBus mdio_bus;
306 struct PHY phy;
307
308 void *parent;
309 };
310
311
312 struct XilinxAXIEnetStreamSink {
313 Object parent;
314
315 struct XilinxAXIEnet *enet;
316 } ;
317
318 struct XilinxAXIEnet {
319 SysBusDevice busdev;
320 MemoryRegion iomem;
321 qemu_irq irq;
322 StreamSink *tx_data_dev;
323 StreamSink *tx_control_dev;
324 XilinxAXIEnetStreamSink rx_data_dev;
325 XilinxAXIEnetStreamSink rx_control_dev;
326 NICState *nic;
327 NICConf conf;
328
329
330 uint32_t c_rxmem;
331 uint32_t c_txmem;
332 uint32_t c_phyaddr;
333
334 struct TEMAC TEMAC;
335
336 /* MII regs. */
337 union {
338 uint32_t regs[4];
339 struct {
340 uint32_t mc;
341 uint32_t mcr;
342 uint32_t mwd;
343 uint32_t mrd;
344 };
345 } mii;
346
347 struct {
348 uint64_t rx_bytes;
349 uint64_t tx_bytes;
350
351 uint64_t rx;
352 uint64_t rx_bcast;
353 uint64_t rx_mcast;
354 } stats;
355
356 /* Receive configuration words. */
357 uint32_t rcw[2];
358 /* Transmit config. */
359 uint32_t tc;
360 uint32_t emmc;
361 uint32_t phyc;
362
363 /* Unicast Address Word. */
364 uint32_t uaw[2];
365 /* Unicast address filter used with extended mcast. */
366 uint32_t ext_uaw[2];
367 uint32_t fmi;
368
369 uint32_t regs[R_MAX];
370
371 /* Multicast filter addrs. */
372 uint32_t maddr[4][2];
373 /* 32K x 1 lookup filter. */
374 uint32_t ext_mtable[1024];
375
376 uint32_t hdr[CONTROL_PAYLOAD_WORDS];
377
378 uint8_t *txmem;
379 uint32_t txpos;
380
381 uint8_t *rxmem;
382 uint32_t rxsize;
383 uint32_t rxpos;
384
385 uint8_t rxapp[CONTROL_PAYLOAD_SIZE];
386 uint32_t rxappsize;
387
388 /* Whether axienet_eth_rx_notify should flush incoming queue. */
389 bool need_flush;
390 };
391
axienet_rx_reset(XilinxAXIEnet * s)392 static void axienet_rx_reset(XilinxAXIEnet *s)
393 {
394 s->rcw[1] = RCW1_JUM | RCW1_FCS | RCW1_RX | RCW1_VLAN;
395 }
396
axienet_tx_reset(XilinxAXIEnet * s)397 static void axienet_tx_reset(XilinxAXIEnet *s)
398 {
399 s->tc = TC_JUM | TC_TX | TC_VLAN;
400 s->txpos = 0;
401 }
402
axienet_rx_resetting(XilinxAXIEnet * s)403 static inline int axienet_rx_resetting(XilinxAXIEnet *s)
404 {
405 return s->rcw[1] & RCW1_RST;
406 }
407
axienet_rx_enabled(XilinxAXIEnet * s)408 static inline int axienet_rx_enabled(XilinxAXIEnet *s)
409 {
410 return s->rcw[1] & RCW1_RX;
411 }
412
axienet_extmcf_enabled(XilinxAXIEnet * s)413 static inline int axienet_extmcf_enabled(XilinxAXIEnet *s)
414 {
415 return !!(s->regs[R_RAF] & RAF_EMCF_EN);
416 }
417
axienet_newfunc_enabled(XilinxAXIEnet * s)418 static inline int axienet_newfunc_enabled(XilinxAXIEnet *s)
419 {
420 return !!(s->regs[R_RAF] & RAF_NEWFUNC_EN);
421 }
422
xilinx_axienet_reset(DeviceState * d)423 static void xilinx_axienet_reset(DeviceState *d)
424 {
425 XilinxAXIEnet *s = XILINX_AXI_ENET(d);
426
427 axienet_rx_reset(s);
428 axienet_tx_reset(s);
429
430 s->regs[R_PPST] = PPST_LINKSTATUS | PPST_PHY_LINKSTATUS;
431 s->regs[R_IS] = IS_AUTONEG | IS_RX_DCM_LOCK | IS_MGM_RDY | IS_PHY_RST_DONE;
432
433 s->emmc = EMMC_LINKSPEED_100MB;
434 }
435
enet_update_irq(XilinxAXIEnet * s)436 static void enet_update_irq(XilinxAXIEnet *s)
437 {
438 s->regs[R_IP] = s->regs[R_IS] & s->regs[R_IE];
439 qemu_set_irq(s->irq, !!s->regs[R_IP]);
440 }
441
enet_read(void * opaque,hwaddr addr,unsigned size)442 static uint64_t enet_read(void *opaque, hwaddr addr, unsigned size)
443 {
444 XilinxAXIEnet *s = opaque;
445 uint32_t r = 0;
446 addr >>= 2;
447
448 switch (addr) {
449 case R_RCW0:
450 case R_RCW1:
451 r = s->rcw[addr & 1];
452 break;
453
454 case R_TC:
455 r = s->tc;
456 break;
457
458 case R_EMMC:
459 r = s->emmc;
460 break;
461
462 case R_PHYC:
463 r = s->phyc;
464 break;
465
466 case R_MCR:
467 r = s->mii.regs[addr & 3] | (1 << 7); /* Always ready. */
468 break;
469
470 case R_STATS_RX_BYTESL:
471 case R_STATS_RX_BYTESH:
472 r = s->stats.rx_bytes >> (32 * (addr & 1));
473 break;
474
475 case R_STATS_TX_BYTESL:
476 case R_STATS_TX_BYTESH:
477 r = s->stats.tx_bytes >> (32 * (addr & 1));
478 break;
479
480 case R_STATS_RXL:
481 case R_STATS_RXH:
482 r = s->stats.rx >> (32 * (addr & 1));
483 break;
484 case R_STATS_RX_BCASTL:
485 case R_STATS_RX_BCASTH:
486 r = s->stats.rx_bcast >> (32 * (addr & 1));
487 break;
488 case R_STATS_RX_MCASTL:
489 case R_STATS_RX_MCASTH:
490 r = s->stats.rx_mcast >> (32 * (addr & 1));
491 break;
492
493 case R_MC:
494 case R_MWD:
495 case R_MRD:
496 r = s->mii.regs[addr & 3];
497 break;
498
499 case R_UAW0:
500 case R_UAW1:
501 r = s->uaw[addr & 1];
502 break;
503
504 case R_UAWU:
505 case R_UAWL:
506 r = s->ext_uaw[addr & 1];
507 break;
508
509 case R_FMI:
510 r = s->fmi;
511 break;
512
513 case R_AF0:
514 case R_AF1:
515 r = s->maddr[s->fmi & 3][addr & 1];
516 break;
517
518 case 0x8000 ... 0x83ff:
519 r = s->ext_mtable[addr - 0x8000];
520 break;
521
522 default:
523 if (addr < ARRAY_SIZE(s->regs)) {
524 r = s->regs[addr];
525 }
526 DENET(qemu_log("%s addr=" HWADDR_FMT_plx " v=%x\n",
527 __func__, addr * 4, r));
528 break;
529 }
530 return r;
531 }
532
enet_write(void * opaque,hwaddr addr,uint64_t value,unsigned size)533 static void enet_write(void *opaque, hwaddr addr,
534 uint64_t value, unsigned size)
535 {
536 XilinxAXIEnet *s = opaque;
537 struct TEMAC *t = &s->TEMAC;
538
539 addr >>= 2;
540 switch (addr) {
541 case R_RCW0:
542 case R_RCW1:
543 s->rcw[addr & 1] = value;
544 if ((addr & 1) && value & RCW1_RST) {
545 axienet_rx_reset(s);
546 } else {
547 qemu_flush_queued_packets(qemu_get_queue(s->nic));
548 }
549 break;
550
551 case R_TC:
552 s->tc = value;
553 if (value & TC_RST) {
554 axienet_tx_reset(s);
555 }
556 break;
557
558 case R_EMMC:
559 s->emmc = value;
560 break;
561
562 case R_PHYC:
563 s->phyc = value;
564 break;
565
566 case R_MC:
567 value &= ((1 << 7) - 1);
568
569 /* Enable the MII. */
570 if (value & MC_EN) {
571 unsigned int miiclkdiv = value & ((1 << 6) - 1);
572 if (!miiclkdiv) {
573 qemu_log("AXIENET: MDIO enabled but MDIOCLK is zero!\n");
574 }
575 }
576 s->mii.mc = value;
577 break;
578
579 case R_MCR: {
580 unsigned int phyaddr = (value >> 24) & 0x1f;
581 unsigned int regaddr = (value >> 16) & 0x1f;
582 unsigned int op = (value >> 14) & 3;
583 unsigned int initiate = (value >> 11) & 1;
584
585 if (initiate) {
586 if (op == 1) {
587 mdio_write_req(&t->mdio_bus, phyaddr, regaddr, s->mii.mwd);
588 } else if (op == 2) {
589 s->mii.mrd = mdio_read_req(&t->mdio_bus, phyaddr, regaddr);
590 } else {
591 qemu_log("AXIENET: invalid MDIOBus OP=%d\n", op);
592 }
593 }
594 s->mii.mcr = value;
595 break;
596 }
597
598 case R_MWD:
599 case R_MRD:
600 s->mii.regs[addr & 3] = value;
601 break;
602
603
604 case R_UAW0:
605 case R_UAW1:
606 s->uaw[addr & 1] = value;
607 break;
608
609 case R_UAWL:
610 case R_UAWU:
611 s->ext_uaw[addr & 1] = value;
612 break;
613
614 case R_FMI:
615 s->fmi = value;
616 break;
617
618 case R_AF0:
619 case R_AF1:
620 s->maddr[s->fmi & 3][addr & 1] = value;
621 break;
622
623 case R_IS:
624 s->regs[addr] &= ~value;
625 break;
626
627 case 0x8000 ... 0x83ff:
628 s->ext_mtable[addr - 0x8000] = value;
629 break;
630
631 default:
632 DENET(qemu_log("%s addr=" HWADDR_FMT_plx " v=%x\n",
633 __func__, addr * 4, (unsigned)value));
634 if (addr < ARRAY_SIZE(s->regs)) {
635 s->regs[addr] = value;
636 }
637 break;
638 }
639 enet_update_irq(s);
640 }
641
642 static const MemoryRegionOps enet_ops = {
643 .read = enet_read,
644 .write = enet_write,
645 .endianness = DEVICE_LITTLE_ENDIAN,
646 };
647
eth_can_rx(XilinxAXIEnet * s)648 static int eth_can_rx(XilinxAXIEnet *s)
649 {
650 /* RX enabled? */
651 return !s->rxsize && !axienet_rx_resetting(s) && axienet_rx_enabled(s);
652 }
653
enet_match_addr(const uint8_t * buf,uint32_t f0,uint32_t f1)654 static int enet_match_addr(const uint8_t *buf, uint32_t f0, uint32_t f1)
655 {
656 int match = 1;
657
658 if (memcmp(buf, &f0, 4)) {
659 match = 0;
660 }
661
662 if (buf[4] != (f1 & 0xff) || buf[5] != ((f1 >> 8) & 0xff)) {
663 match = 0;
664 }
665
666 return match;
667 }
668
axienet_eth_rx_notify(void * opaque)669 static void axienet_eth_rx_notify(void *opaque)
670 {
671 XilinxAXIEnet *s = XILINX_AXI_ENET(opaque);
672
673 while (s->rxappsize && stream_can_push(s->tx_control_dev,
674 axienet_eth_rx_notify, s)) {
675 size_t ret = stream_push(s->tx_control_dev,
676 (void *)s->rxapp + CONTROL_PAYLOAD_SIZE
677 - s->rxappsize, s->rxappsize, true);
678 s->rxappsize -= ret;
679 }
680
681 while (s->rxsize && stream_can_push(s->tx_data_dev,
682 axienet_eth_rx_notify, s)) {
683 size_t ret = stream_push(s->tx_data_dev, (void *)s->rxmem + s->rxpos,
684 s->rxsize, true);
685 s->rxsize -= ret;
686 s->rxpos += ret;
687 if (!s->rxsize) {
688 s->regs[R_IS] |= IS_RX_COMPLETE;
689 if (s->need_flush) {
690 s->need_flush = false;
691 qemu_flush_queued_packets(qemu_get_queue(s->nic));
692 }
693 }
694 }
695 enet_update_irq(s);
696 }
697
eth_rx(NetClientState * nc,const uint8_t * buf,size_t size)698 static ssize_t eth_rx(NetClientState *nc, const uint8_t *buf, size_t size)
699 {
700 XilinxAXIEnet *s = qemu_get_nic_opaque(nc);
701 static const unsigned char sa_bcast[6] = {0xff, 0xff, 0xff,
702 0xff, 0xff, 0xff};
703 static const unsigned char sa_ipmcast[3] = {0x01, 0x00, 0x52};
704 uint32_t app[CONTROL_PAYLOAD_WORDS] = {0};
705 int promisc = s->fmi & (1 << 31);
706 int unicast, broadcast, multicast, ip_multicast = 0;
707 uint32_t csum32;
708 uint16_t csum16;
709 int i;
710
711 DENET(qemu_log("%s: %zd bytes\n", __func__, size));
712
713 if (!eth_can_rx(s)) {
714 s->need_flush = true;
715 return 0;
716 }
717
718 unicast = ~buf[0] & 0x1;
719 broadcast = memcmp(buf, sa_bcast, 6) == 0;
720 multicast = !unicast && !broadcast;
721 if (multicast && (memcmp(sa_ipmcast, buf, sizeof sa_ipmcast) == 0)) {
722 ip_multicast = 1;
723 }
724
725 /* Jumbo or vlan sizes ? */
726 if (!(s->rcw[1] & RCW1_JUM)) {
727 if (size > 1518 && size <= 1522 && !(s->rcw[1] & RCW1_VLAN)) {
728 return size;
729 }
730 }
731
732 /* Basic Address filters. If you want to use the extended filters
733 you'll generally have to place the ethernet mac into promiscuous mode
734 to avoid the basic filtering from dropping most frames. */
735 if (!promisc) {
736 if (unicast) {
737 if (!enet_match_addr(buf, s->uaw[0], s->uaw[1])) {
738 return size;
739 }
740 } else {
741 if (broadcast) {
742 /* Broadcast. */
743 if (s->regs[R_RAF] & RAF_BCAST_REJ) {
744 return size;
745 }
746 } else {
747 int drop = 1;
748
749 /* Multicast. */
750 if (s->regs[R_RAF] & RAF_MCAST_REJ) {
751 return size;
752 }
753
754 for (i = 0; i < 4; i++) {
755 if (enet_match_addr(buf, s->maddr[i][0], s->maddr[i][1])) {
756 drop = 0;
757 break;
758 }
759 }
760
761 if (drop) {
762 return size;
763 }
764 }
765 }
766 }
767
768 /* Extended mcast filtering enabled? */
769 if (axienet_newfunc_enabled(s) && axienet_extmcf_enabled(s)) {
770 if (unicast) {
771 if (!enet_match_addr(buf, s->ext_uaw[0], s->ext_uaw[1])) {
772 return size;
773 }
774 } else {
775 if (broadcast) {
776 /* Broadcast. ??? */
777 if (s->regs[R_RAF] & RAF_BCAST_REJ) {
778 return size;
779 }
780 } else {
781 int idx, bit;
782
783 /* Multicast. */
784 if (!memcmp(buf, sa_ipmcast, 3)) {
785 return size;
786 }
787
788 idx = (buf[4] & 0x7f) << 8;
789 idx |= buf[5];
790
791 bit = 1 << (idx & 0x1f);
792 idx >>= 5;
793
794 if (!(s->ext_mtable[idx] & bit)) {
795 return size;
796 }
797 }
798 }
799 }
800
801 if (size < 12) {
802 s->regs[R_IS] |= IS_RX_REJECT;
803 enet_update_irq(s);
804 return -1;
805 }
806
807 if (size > (s->c_rxmem - 4)) {
808 size = s->c_rxmem - 4;
809 }
810
811 memcpy(s->rxmem, buf, size);
812 memset(s->rxmem + size, 0, 4); /* Clear the FCS. */
813
814 if (s->rcw[1] & RCW1_FCS) {
815 size += 4; /* fcs is inband. */
816 }
817
818 app[0] = 5 << 28;
819 csum32 = net_checksum_add(size - 14, (uint8_t *)s->rxmem + 14);
820 /* Fold it once. */
821 csum32 = (csum32 & 0xffff) + (csum32 >> 16);
822 /* And twice to get rid of possible carries. */
823 csum16 = (csum32 & 0xffff) + (csum32 >> 16);
824 app[3] = csum16;
825 app[4] = size & 0xffff;
826
827 s->stats.rx_bytes += size;
828 s->stats.rx++;
829 if (multicast) {
830 s->stats.rx_mcast++;
831 app[2] |= 1 | (ip_multicast << 1);
832 } else if (broadcast) {
833 s->stats.rx_bcast++;
834 app[2] |= 1 << 3;
835 }
836
837 /* Good frame. */
838 app[2] |= 1 << 6;
839
840 s->rxsize = size;
841 s->rxpos = 0;
842 for (i = 0; i < ARRAY_SIZE(app); ++i) {
843 app[i] = cpu_to_le32(app[i]);
844 }
845 s->rxappsize = CONTROL_PAYLOAD_SIZE;
846 memcpy(s->rxapp, app, s->rxappsize);
847 axienet_eth_rx_notify(s);
848
849 enet_update_irq(s);
850 return s->rxpos;
851 }
852
853 static size_t
xilinx_axienet_control_stream_push(StreamSink * obj,uint8_t * buf,size_t len,bool eop)854 xilinx_axienet_control_stream_push(StreamSink *obj, uint8_t *buf, size_t len,
855 bool eop)
856 {
857 int i;
858 XilinxAXIEnetStreamSink *cs = XILINX_AXI_ENET_CONTROL_STREAM(obj);
859 XilinxAXIEnet *s = cs->enet;
860
861 assert(eop);
862 if (len != CONTROL_PAYLOAD_SIZE) {
863 hw_error("AXI Enet requires %d byte control stream payload\n",
864 (int)CONTROL_PAYLOAD_SIZE);
865 }
866
867 memcpy(s->hdr, buf, len);
868
869 for (i = 0; i < ARRAY_SIZE(s->hdr); ++i) {
870 s->hdr[i] = le32_to_cpu(s->hdr[i]);
871 }
872 return len;
873 }
874
875 static size_t
xilinx_axienet_data_stream_push(StreamSink * obj,uint8_t * buf,size_t size,bool eop)876 xilinx_axienet_data_stream_push(StreamSink *obj, uint8_t *buf, size_t size,
877 bool eop)
878 {
879 XilinxAXIEnetStreamSink *ds = XILINX_AXI_ENET_DATA_STREAM(obj);
880 XilinxAXIEnet *s = ds->enet;
881
882 /* TX enable ? */
883 if (!(s->tc & TC_TX)) {
884 return size;
885 }
886
887 if (s->txpos + size > s->c_txmem) {
888 qemu_log_mask(LOG_GUEST_ERROR, "%s: Packet larger than txmem\n",
889 TYPE_XILINX_AXI_ENET);
890 s->txpos = 0;
891 return size;
892 }
893
894 if (s->txpos == 0 && eop) {
895 /* Fast path single fragment. */
896 s->txpos = size;
897 } else {
898 memcpy(s->txmem + s->txpos, buf, size);
899 buf = s->txmem;
900 s->txpos += size;
901
902 if (!eop) {
903 return size;
904 }
905 }
906
907 /* Jumbo or vlan sizes ? */
908 if (!(s->tc & TC_JUM)) {
909 if (s->txpos > 1518 && s->txpos <= 1522 && !(s->tc & TC_VLAN)) {
910 s->txpos = 0;
911 return size;
912 }
913 }
914
915 if (s->hdr[0] & 1) {
916 unsigned int start_off = s->hdr[1] >> 16;
917 unsigned int write_off = s->hdr[1] & 0xffff;
918 uint32_t tmp_csum;
919 uint16_t csum;
920
921 tmp_csum = net_checksum_add(s->txpos - start_off,
922 buf + start_off);
923 /* Accumulate the seed. */
924 tmp_csum += s->hdr[2] & 0xffff;
925
926 /* Fold the 32bit partial checksum. */
927 csum = net_checksum_finish(tmp_csum);
928
929 /* Writeback. */
930 buf[write_off] = csum >> 8;
931 buf[write_off + 1] = csum & 0xff;
932 }
933
934 qemu_send_packet(qemu_get_queue(s->nic), buf, s->txpos);
935
936 s->stats.tx_bytes += s->txpos;
937 s->regs[R_IS] |= IS_TX_COMPLETE;
938 enet_update_irq(s);
939
940 s->txpos = 0;
941 return size;
942 }
943
944 static NetClientInfo net_xilinx_enet_info = {
945 .type = NET_CLIENT_DRIVER_NIC,
946 .size = sizeof(NICState),
947 .receive = eth_rx,
948 };
949
xilinx_enet_realize(DeviceState * dev,Error ** errp)950 static void xilinx_enet_realize(DeviceState *dev, Error **errp)
951 {
952 XilinxAXIEnet *s = XILINX_AXI_ENET(dev);
953 XilinxAXIEnetStreamSink *ds = XILINX_AXI_ENET_DATA_STREAM(&s->rx_data_dev);
954 XilinxAXIEnetStreamSink *cs = XILINX_AXI_ENET_CONTROL_STREAM(
955 &s->rx_control_dev);
956
957 object_property_add_link(OBJECT(ds), "enet", "xlnx.axi-ethernet",
958 (Object **) &ds->enet,
959 object_property_allow_set_link,
960 OBJ_PROP_LINK_STRONG);
961 object_property_add_link(OBJECT(cs), "enet", "xlnx.axi-ethernet",
962 (Object **) &cs->enet,
963 object_property_allow_set_link,
964 OBJ_PROP_LINK_STRONG);
965 object_property_set_link(OBJECT(ds), "enet", OBJECT(s), &error_abort);
966 object_property_set_link(OBJECT(cs), "enet", OBJECT(s), &error_abort);
967
968 qemu_macaddr_default_if_unset(&s->conf.macaddr);
969 s->nic = qemu_new_nic(&net_xilinx_enet_info, &s->conf,
970 object_get_typename(OBJECT(dev)), dev->id,
971 &dev->mem_reentrancy_guard, s);
972 qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a);
973
974 tdk_init(&s->TEMAC.phy);
975 mdio_attach(&s->TEMAC.mdio_bus, &s->TEMAC.phy, s->c_phyaddr);
976
977 s->TEMAC.parent = s;
978
979 s->rxmem = g_malloc(s->c_rxmem);
980 s->txmem = g_malloc(s->c_txmem);
981 }
982
xilinx_enet_init(Object * obj)983 static void xilinx_enet_init(Object *obj)
984 {
985 XilinxAXIEnet *s = XILINX_AXI_ENET(obj);
986 SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
987
988 object_initialize_child(OBJECT(s), "axistream-connected-target",
989 &s->rx_data_dev, TYPE_XILINX_AXI_ENET_DATA_STREAM);
990 object_initialize_child(OBJECT(s), "axistream-control-connected-target",
991 &s->rx_control_dev,
992 TYPE_XILINX_AXI_ENET_CONTROL_STREAM);
993 sysbus_init_irq(sbd, &s->irq);
994
995 memory_region_init_io(&s->iomem, OBJECT(s), &enet_ops, s, "enet", 0x40000);
996 sysbus_init_mmio(sbd, &s->iomem);
997 }
998
999 static Property xilinx_enet_properties[] = {
1000 DEFINE_PROP_UINT32("phyaddr", XilinxAXIEnet, c_phyaddr, 7),
1001 DEFINE_PROP_UINT32("rxmem", XilinxAXIEnet, c_rxmem, 0x1000),
1002 DEFINE_PROP_UINT32("txmem", XilinxAXIEnet, c_txmem, 0x1000),
1003 DEFINE_NIC_PROPERTIES(XilinxAXIEnet, conf),
1004 DEFINE_PROP_LINK("axistream-connected", XilinxAXIEnet,
1005 tx_data_dev, TYPE_STREAM_SINK, StreamSink *),
1006 DEFINE_PROP_LINK("axistream-control-connected", XilinxAXIEnet,
1007 tx_control_dev, TYPE_STREAM_SINK, StreamSink *),
1008 DEFINE_PROP_END_OF_LIST(),
1009 };
1010
xilinx_enet_class_init(ObjectClass * klass,void * data)1011 static void xilinx_enet_class_init(ObjectClass *klass, void *data)
1012 {
1013 DeviceClass *dc = DEVICE_CLASS(klass);
1014
1015 dc->realize = xilinx_enet_realize;
1016 device_class_set_props(dc, xilinx_enet_properties);
1017 device_class_set_legacy_reset(dc, xilinx_axienet_reset);
1018 }
1019
xilinx_enet_control_stream_class_init(ObjectClass * klass,void * data)1020 static void xilinx_enet_control_stream_class_init(ObjectClass *klass,
1021 void *data)
1022 {
1023 StreamSinkClass *ssc = STREAM_SINK_CLASS(klass);
1024
1025 ssc->push = xilinx_axienet_control_stream_push;
1026 }
1027
xilinx_enet_data_stream_class_init(ObjectClass * klass,void * data)1028 static void xilinx_enet_data_stream_class_init(ObjectClass *klass, void *data)
1029 {
1030 StreamSinkClass *ssc = STREAM_SINK_CLASS(klass);
1031
1032 ssc->push = xilinx_axienet_data_stream_push;
1033 }
1034
1035 static const TypeInfo xilinx_enet_info = {
1036 .name = TYPE_XILINX_AXI_ENET,
1037 .parent = TYPE_SYS_BUS_DEVICE,
1038 .instance_size = sizeof(XilinxAXIEnet),
1039 .class_init = xilinx_enet_class_init,
1040 .instance_init = xilinx_enet_init,
1041 };
1042
1043 static const TypeInfo xilinx_enet_data_stream_info = {
1044 .name = TYPE_XILINX_AXI_ENET_DATA_STREAM,
1045 .parent = TYPE_OBJECT,
1046 .instance_size = sizeof(XilinxAXIEnetStreamSink),
1047 .class_init = xilinx_enet_data_stream_class_init,
1048 .interfaces = (InterfaceInfo[]) {
1049 { TYPE_STREAM_SINK },
1050 { }
1051 }
1052 };
1053
1054 static const TypeInfo xilinx_enet_control_stream_info = {
1055 .name = TYPE_XILINX_AXI_ENET_CONTROL_STREAM,
1056 .parent = TYPE_OBJECT,
1057 .instance_size = sizeof(XilinxAXIEnetStreamSink),
1058 .class_init = xilinx_enet_control_stream_class_init,
1059 .interfaces = (InterfaceInfo[]) {
1060 { TYPE_STREAM_SINK },
1061 { }
1062 }
1063 };
1064
xilinx_enet_register_types(void)1065 static void xilinx_enet_register_types(void)
1066 {
1067 type_register_static(&xilinx_enet_info);
1068 type_register_static(&xilinx_enet_data_stream_info);
1069 type_register_static(&xilinx_enet_control_stream_info);
1070 }
1071
1072 type_init(xilinx_enet_register_types)
1073