xref: /openbmc/qemu/hw/net/sunhme.c (revision 1cf5ae51)
1 /*
2  * QEMU Sun Happy Meal Ethernet emulation
3  *
4  * Copyright (c) 2017 Mark Cave-Ayland
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to deal
8  * in the Software without restriction, including without limitation the rights
9  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10  * copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22  * THE SOFTWARE.
23  */
24 
25 #include "qemu/osdep.h"
26 #include "hw/pci/pci.h"
27 #include "hw/qdev-properties.h"
28 #include "migration/vmstate.h"
29 #include "hw/net/mii.h"
30 #include "net/net.h"
31 #include "qemu/module.h"
32 #include "net/checksum.h"
33 #include "net/eth.h"
34 #include "sysemu/sysemu.h"
35 #include "trace.h"
36 
37 #define HME_REG_SIZE                   0x8000
38 
39 #define HME_SEB_REG_SIZE               0x2000
40 
41 #define HME_SEBI_RESET                 0x0
42 #define HME_SEB_RESET_ETX              0x1
43 #define HME_SEB_RESET_ERX              0x2
44 
45 #define HME_SEBI_STAT                  0x100
46 #define HME_SEBI_STAT_LINUXBUG         0x108
47 #define HME_SEB_STAT_RXTOHOST          0x10000
48 #define HME_SEB_STAT_NORXD             0x20000
49 #define HME_SEB_STAT_MIFIRQ            0x800000
50 #define HME_SEB_STAT_HOSTTOTX          0x1000000
51 #define HME_SEB_STAT_TXALL             0x2000000
52 
53 #define HME_SEBI_IMASK                 0x104
54 #define HME_SEBI_IMASK_LINUXBUG        0x10c
55 
56 #define HME_ETX_REG_SIZE               0x2000
57 
58 #define HME_ETXI_PENDING               0x0
59 
60 #define HME_ETXI_RING                  0x8
61 #define HME_ETXI_RING_ADDR             0xffffff00
62 #define HME_ETXI_RING_OFFSET           0xff
63 
64 #define HME_ETXI_RSIZE                 0x2c
65 
66 #define HME_ERX_REG_SIZE               0x2000
67 
68 #define HME_ERXI_CFG                   0x0
69 #define HME_ERX_CFG_RINGSIZE           0x600
70 #define HME_ERX_CFG_RINGSIZE_SHIFT     9
71 #define HME_ERX_CFG_BYTEOFFSET         0x38
72 #define HME_ERX_CFG_BYTEOFFSET_SHIFT   3
73 #define HME_ERX_CFG_CSUMSTART          0x7f0000
74 #define HME_ERX_CFG_CSUMSHIFT          16
75 
76 #define HME_ERXI_RING                  0x4
77 #define HME_ERXI_RING_ADDR             0xffffff00
78 #define HME_ERXI_RING_OFFSET           0xff
79 
80 #define HME_MAC_REG_SIZE               0x1000
81 
82 #define HME_MACI_TXCFG                 0x20c
83 #define HME_MAC_TXCFG_ENABLE           0x1
84 
85 #define HME_MACI_RXCFG                 0x30c
86 #define HME_MAC_RXCFG_ENABLE           0x1
87 #define HME_MAC_RXCFG_PMISC            0x40
88 #define HME_MAC_RXCFG_HENABLE          0x800
89 
90 #define HME_MACI_MACADDR2              0x318
91 #define HME_MACI_MACADDR1              0x31c
92 #define HME_MACI_MACADDR0              0x320
93 
94 #define HME_MACI_HASHTAB3              0x340
95 #define HME_MACI_HASHTAB2              0x344
96 #define HME_MACI_HASHTAB1              0x348
97 #define HME_MACI_HASHTAB0              0x34c
98 
99 #define HME_MIF_REG_SIZE               0x20
100 
101 #define HME_MIFI_FO                    0xc
102 #define HME_MIF_FO_ST                  0xc0000000
103 #define HME_MIF_FO_ST_SHIFT            30
104 #define HME_MIF_FO_OPC                 0x30000000
105 #define HME_MIF_FO_OPC_SHIFT           28
106 #define HME_MIF_FO_PHYAD               0x0f800000
107 #define HME_MIF_FO_PHYAD_SHIFT         23
108 #define HME_MIF_FO_REGAD               0x007c0000
109 #define HME_MIF_FO_REGAD_SHIFT         18
110 #define HME_MIF_FO_TAMSB               0x20000
111 #define HME_MIF_FO_TALSB               0x10000
112 #define HME_MIF_FO_DATA                0xffff
113 
114 #define HME_MIFI_CFG                   0x10
115 #define HME_MIF_CFG_MDI0               0x100
116 #define HME_MIF_CFG_MDI1               0x200
117 
118 #define HME_MIFI_IMASK                 0x14
119 
120 #define HME_MIFI_STAT                  0x18
121 
122 
123 /* Wired HME PHY addresses */
124 #define HME_PHYAD_INTERNAL     1
125 #define HME_PHYAD_EXTERNAL     0
126 
127 #define MII_COMMAND_START      0x1
128 #define MII_COMMAND_READ       0x2
129 #define MII_COMMAND_WRITE      0x1
130 
131 #define TYPE_SUNHME "sunhme"
132 #define SUNHME(obj) OBJECT_CHECK(SunHMEState, (obj), TYPE_SUNHME)
133 
134 /* Maximum size of buffer */
135 #define HME_FIFO_SIZE          0x800
136 
137 /* Size of TX/RX descriptor */
138 #define HME_DESC_SIZE          0x8
139 
140 #define HME_XD_OWN             0x80000000
141 #define HME_XD_OFL             0x40000000
142 #define HME_XD_SOP             0x40000000
143 #define HME_XD_EOP             0x20000000
144 #define HME_XD_RXLENMSK        0x3fff0000
145 #define HME_XD_RXLENSHIFT      16
146 #define HME_XD_RXCKSUM         0xffff
147 #define HME_XD_TXLENMSK        0x00001fff
148 #define HME_XD_TXCKSUM         0x10000000
149 #define HME_XD_TXCSSTUFF       0xff00000
150 #define HME_XD_TXCSSTUFFSHIFT  20
151 #define HME_XD_TXCSSTART       0xfc000
152 #define HME_XD_TXCSSTARTSHIFT  14
153 
154 #define HME_MII_REGS_SIZE      0x20
155 
156 typedef struct SunHMEState {
157     /*< private >*/
158     PCIDevice parent_obj;
159 
160     NICState *nic;
161     NICConf conf;
162 
163     MemoryRegion hme;
164     MemoryRegion sebreg;
165     MemoryRegion etxreg;
166     MemoryRegion erxreg;
167     MemoryRegion macreg;
168     MemoryRegion mifreg;
169 
170     uint32_t sebregs[HME_SEB_REG_SIZE >> 2];
171     uint32_t etxregs[HME_ETX_REG_SIZE >> 2];
172     uint32_t erxregs[HME_ERX_REG_SIZE >> 2];
173     uint32_t macregs[HME_MAC_REG_SIZE >> 2];
174     uint32_t mifregs[HME_MIF_REG_SIZE >> 2];
175 
176     uint16_t miiregs[HME_MII_REGS_SIZE];
177 } SunHMEState;
178 
179 static Property sunhme_properties[] = {
180     DEFINE_NIC_PROPERTIES(SunHMEState, conf),
181     DEFINE_PROP_END_OF_LIST(),
182 };
183 
184 static void sunhme_reset_tx(SunHMEState *s)
185 {
186     /* Indicate TX reset complete */
187     s->sebregs[HME_SEBI_RESET] &= ~HME_SEB_RESET_ETX;
188 }
189 
190 static void sunhme_reset_rx(SunHMEState *s)
191 {
192     /* Indicate RX reset complete */
193     s->sebregs[HME_SEBI_RESET] &= ~HME_SEB_RESET_ERX;
194 }
195 
196 static void sunhme_update_irq(SunHMEState *s)
197 {
198     PCIDevice *d = PCI_DEVICE(s);
199     int level;
200 
201     /* MIF interrupt mask (16-bit) */
202     uint32_t mifmask = ~(s->mifregs[HME_MIFI_IMASK >> 2]) & 0xffff;
203     uint32_t mif = s->mifregs[HME_MIFI_STAT >> 2] & mifmask;
204 
205     /* Main SEB interrupt mask (include MIF status from above) */
206     uint32_t sebmask = ~(s->sebregs[HME_SEBI_IMASK >> 2]) &
207                        ~HME_SEB_STAT_MIFIRQ;
208     uint32_t seb = s->sebregs[HME_SEBI_STAT >> 2] & sebmask;
209     if (mif) {
210         seb |= HME_SEB_STAT_MIFIRQ;
211     }
212 
213     level = (seb ? 1 : 0);
214     trace_sunhme_update_irq(mifmask, mif, sebmask, seb, level);
215 
216     pci_set_irq(d, level);
217 }
218 
219 static void sunhme_seb_write(void *opaque, hwaddr addr,
220                           uint64_t val, unsigned size)
221 {
222     SunHMEState *s = SUNHME(opaque);
223 
224     trace_sunhme_seb_write(addr, val);
225 
226     /* Handly buggy Linux drivers before 4.13 which have
227        the wrong offsets for HME_SEBI_STAT and HME_SEBI_IMASK */
228     switch (addr) {
229     case HME_SEBI_STAT_LINUXBUG:
230         addr = HME_SEBI_STAT;
231         break;
232     case HME_SEBI_IMASK_LINUXBUG:
233         addr = HME_SEBI_IMASK;
234         break;
235     default:
236         break;
237     }
238 
239     switch (addr) {
240     case HME_SEBI_RESET:
241         if (val & HME_SEB_RESET_ETX) {
242             sunhme_reset_tx(s);
243         }
244         if (val & HME_SEB_RESET_ERX) {
245             sunhme_reset_rx(s);
246         }
247         val = s->sebregs[HME_SEBI_RESET >> 2];
248         break;
249     }
250 
251     s->sebregs[addr >> 2] = val;
252 }
253 
254 static uint64_t sunhme_seb_read(void *opaque, hwaddr addr,
255                              unsigned size)
256 {
257     SunHMEState *s = SUNHME(opaque);
258     uint64_t val;
259 
260     /* Handly buggy Linux drivers before 4.13 which have
261        the wrong offsets for HME_SEBI_STAT and HME_SEBI_IMASK */
262     switch (addr) {
263     case HME_SEBI_STAT_LINUXBUG:
264         addr = HME_SEBI_STAT;
265         break;
266     case HME_SEBI_IMASK_LINUXBUG:
267         addr = HME_SEBI_IMASK;
268         break;
269     default:
270         break;
271     }
272 
273     val = s->sebregs[addr >> 2];
274 
275     switch (addr) {
276     case HME_SEBI_STAT:
277         /* Autoclear status (except MIF) */
278         s->sebregs[HME_SEBI_STAT >> 2] &= HME_SEB_STAT_MIFIRQ;
279         sunhme_update_irq(s);
280         break;
281     }
282 
283     trace_sunhme_seb_read(addr, val);
284 
285     return val;
286 }
287 
288 static const MemoryRegionOps sunhme_seb_ops = {
289     .read = sunhme_seb_read,
290     .write = sunhme_seb_write,
291     .endianness = DEVICE_LITTLE_ENDIAN,
292     .valid = {
293         .min_access_size = 4,
294         .max_access_size = 4,
295     },
296 };
297 
298 static void sunhme_transmit(SunHMEState *s);
299 
300 static void sunhme_etx_write(void *opaque, hwaddr addr,
301                           uint64_t val, unsigned size)
302 {
303     SunHMEState *s = SUNHME(opaque);
304 
305     trace_sunhme_etx_write(addr, val);
306 
307     switch (addr) {
308     case HME_ETXI_PENDING:
309         if (val) {
310             sunhme_transmit(s);
311         }
312         break;
313     }
314 
315     s->etxregs[addr >> 2] = val;
316 }
317 
318 static uint64_t sunhme_etx_read(void *opaque, hwaddr addr,
319                              unsigned size)
320 {
321     SunHMEState *s = SUNHME(opaque);
322     uint64_t val;
323 
324     val = s->etxregs[addr >> 2];
325 
326     trace_sunhme_etx_read(addr, val);
327 
328     return val;
329 }
330 
331 static const MemoryRegionOps sunhme_etx_ops = {
332     .read = sunhme_etx_read,
333     .write = sunhme_etx_write,
334     .endianness = DEVICE_LITTLE_ENDIAN,
335     .valid = {
336         .min_access_size = 4,
337         .max_access_size = 4,
338     },
339 };
340 
341 static void sunhme_erx_write(void *opaque, hwaddr addr,
342                           uint64_t val, unsigned size)
343 {
344     SunHMEState *s = SUNHME(opaque);
345 
346     trace_sunhme_erx_write(addr, val);
347 
348     s->erxregs[addr >> 2] = val;
349 }
350 
351 static uint64_t sunhme_erx_read(void *opaque, hwaddr addr,
352                              unsigned size)
353 {
354     SunHMEState *s = SUNHME(opaque);
355     uint64_t val;
356 
357     val = s->erxregs[addr >> 2];
358 
359     trace_sunhme_erx_read(addr, val);
360 
361     return val;
362 }
363 
364 static const MemoryRegionOps sunhme_erx_ops = {
365     .read = sunhme_erx_read,
366     .write = sunhme_erx_write,
367     .endianness = DEVICE_LITTLE_ENDIAN,
368     .valid = {
369         .min_access_size = 4,
370         .max_access_size = 4,
371     },
372 };
373 
374 static void sunhme_mac_write(void *opaque, hwaddr addr,
375                           uint64_t val, unsigned size)
376 {
377     SunHMEState *s = SUNHME(opaque);
378     uint64_t oldval = s->macregs[addr >> 2];
379 
380     trace_sunhme_mac_write(addr, val);
381 
382     s->macregs[addr >> 2] = val;
383 
384     switch (addr) {
385     case HME_MACI_RXCFG:
386         if (!(oldval & HME_MAC_RXCFG_ENABLE) &&
387              (val & HME_MAC_RXCFG_ENABLE)) {
388             qemu_flush_queued_packets(qemu_get_queue(s->nic));
389         }
390         break;
391     }
392 }
393 
394 static uint64_t sunhme_mac_read(void *opaque, hwaddr addr,
395                              unsigned size)
396 {
397     SunHMEState *s = SUNHME(opaque);
398     uint64_t val;
399 
400     val = s->macregs[addr >> 2];
401 
402     trace_sunhme_mac_read(addr, val);
403 
404     return val;
405 }
406 
407 static const MemoryRegionOps sunhme_mac_ops = {
408     .read = sunhme_mac_read,
409     .write = sunhme_mac_write,
410     .endianness = DEVICE_LITTLE_ENDIAN,
411     .valid = {
412         .min_access_size = 4,
413         .max_access_size = 4,
414     },
415 };
416 
417 static void sunhme_mii_write(SunHMEState *s, uint8_t reg, uint16_t data)
418 {
419     trace_sunhme_mii_write(reg, data);
420 
421     switch (reg) {
422     case MII_BMCR:
423         if (data & MII_BMCR_RESET) {
424             /* Autoclear reset bit, enable auto negotiation */
425             data &= ~MII_BMCR_RESET;
426             data |= MII_BMCR_AUTOEN;
427         }
428         if (data & MII_BMCR_ANRESTART) {
429             /* Autoclear auto negotiation restart */
430             data &= ~MII_BMCR_ANRESTART;
431 
432             /* Indicate negotiation complete */
433             s->miiregs[MII_BMSR] |= MII_BMSR_AN_COMP;
434 
435             if (!qemu_get_queue(s->nic)->link_down) {
436                 s->miiregs[MII_ANLPAR] |= MII_ANLPAR_TXFD;
437                 s->miiregs[MII_BMSR] |= MII_BMSR_LINK_ST;
438             }
439         }
440         break;
441     }
442 
443     s->miiregs[reg] = data;
444 }
445 
446 static uint16_t sunhme_mii_read(SunHMEState *s, uint8_t reg)
447 {
448     uint16_t data = s->miiregs[reg];
449 
450     trace_sunhme_mii_read(reg, data);
451 
452     return data;
453 }
454 
455 static void sunhme_mif_write(void *opaque, hwaddr addr,
456                           uint64_t val, unsigned size)
457 {
458     SunHMEState *s = SUNHME(opaque);
459     uint8_t cmd, reg;
460     uint16_t data;
461 
462     trace_sunhme_mif_write(addr, val);
463 
464     switch (addr) {
465     case HME_MIFI_CFG:
466         /* Mask the read-only bits */
467         val &= ~(HME_MIF_CFG_MDI0 | HME_MIF_CFG_MDI1);
468         val |= s->mifregs[HME_MIFI_CFG >> 2] &
469                (HME_MIF_CFG_MDI0 | HME_MIF_CFG_MDI1);
470         break;
471     case HME_MIFI_FO:
472         /* Detect start of MII command */
473         if ((val & HME_MIF_FO_ST) >> HME_MIF_FO_ST_SHIFT
474                 != MII_COMMAND_START) {
475             val |= HME_MIF_FO_TALSB;
476             break;
477         }
478 
479         /* Internal phy only */
480         if ((val & HME_MIF_FO_PHYAD) >> HME_MIF_FO_PHYAD_SHIFT
481                 != HME_PHYAD_INTERNAL) {
482             val |= HME_MIF_FO_TALSB;
483             break;
484         }
485 
486         cmd = (val & HME_MIF_FO_OPC) >> HME_MIF_FO_OPC_SHIFT;
487         reg = (val & HME_MIF_FO_REGAD) >> HME_MIF_FO_REGAD_SHIFT;
488         data = (val & HME_MIF_FO_DATA);
489 
490         switch (cmd) {
491         case MII_COMMAND_WRITE:
492             sunhme_mii_write(s, reg, data);
493             break;
494 
495         case MII_COMMAND_READ:
496             val &= ~HME_MIF_FO_DATA;
497             val |= sunhme_mii_read(s, reg);
498             break;
499         }
500 
501         val |= HME_MIF_FO_TALSB;
502         break;
503     }
504 
505     s->mifregs[addr >> 2] = val;
506 }
507 
508 static uint64_t sunhme_mif_read(void *opaque, hwaddr addr,
509                              unsigned size)
510 {
511     SunHMEState *s = SUNHME(opaque);
512     uint64_t val;
513 
514     val = s->mifregs[addr >> 2];
515 
516     switch (addr) {
517     case HME_MIFI_STAT:
518         /* Autoclear MIF interrupt status */
519         s->mifregs[HME_MIFI_STAT >> 2] = 0;
520         sunhme_update_irq(s);
521         break;
522     }
523 
524     trace_sunhme_mif_read(addr, val);
525 
526     return val;
527 }
528 
529 static const MemoryRegionOps sunhme_mif_ops = {
530     .read = sunhme_mif_read,
531     .write = sunhme_mif_write,
532     .endianness = DEVICE_LITTLE_ENDIAN,
533     .valid = {
534         .min_access_size = 4,
535         .max_access_size = 4,
536     },
537 };
538 
539 static void sunhme_transmit_frame(SunHMEState *s, uint8_t *buf, int size)
540 {
541     qemu_send_packet(qemu_get_queue(s->nic), buf, size);
542 }
543 
544 static inline int sunhme_get_tx_ring_count(SunHMEState *s)
545 {
546     return (s->etxregs[HME_ETXI_RSIZE >> 2] + 1) << 4;
547 }
548 
549 static inline int sunhme_get_tx_ring_nr(SunHMEState *s)
550 {
551     return s->etxregs[HME_ETXI_RING >> 2] & HME_ETXI_RING_OFFSET;
552 }
553 
554 static inline void sunhme_set_tx_ring_nr(SunHMEState *s, int i)
555 {
556     uint32_t ring = s->etxregs[HME_ETXI_RING >> 2] & ~HME_ETXI_RING_OFFSET;
557     ring |= i & HME_ETXI_RING_OFFSET;
558 
559     s->etxregs[HME_ETXI_RING >> 2] = ring;
560 }
561 
562 static void sunhme_transmit(SunHMEState *s)
563 {
564     PCIDevice *d = PCI_DEVICE(s);
565     dma_addr_t tb, addr;
566     uint32_t intstatus, status, buffer, sum = 0;
567     int cr, nr, len, xmit_pos, csum_offset = 0, csum_stuff_offset = 0;
568     uint16_t csum = 0;
569     uint8_t xmit_buffer[HME_FIFO_SIZE];
570 
571     tb = s->etxregs[HME_ETXI_RING >> 2] & HME_ETXI_RING_ADDR;
572     nr = sunhme_get_tx_ring_count(s);
573     cr = sunhme_get_tx_ring_nr(s);
574 
575     pci_dma_read(d, tb + cr * HME_DESC_SIZE, &status, 4);
576     pci_dma_read(d, tb + cr * HME_DESC_SIZE + 4, &buffer, 4);
577 
578     xmit_pos = 0;
579     while (status & HME_XD_OWN) {
580         trace_sunhme_tx_desc(buffer, status, cr, nr);
581 
582         /* Copy data into transmit buffer */
583         addr = buffer;
584         len = status & HME_XD_TXLENMSK;
585 
586         if (xmit_pos + len > HME_FIFO_SIZE) {
587             len = HME_FIFO_SIZE - xmit_pos;
588         }
589 
590         pci_dma_read(d, addr, &xmit_buffer[xmit_pos], len);
591         xmit_pos += len;
592 
593         /* Detect start of packet for TX checksum */
594         if (status & HME_XD_SOP) {
595             sum = 0;
596             csum_offset = (status & HME_XD_TXCSSTART) >> HME_XD_TXCSSTARTSHIFT;
597             csum_stuff_offset = (status & HME_XD_TXCSSTUFF) >>
598                                 HME_XD_TXCSSTUFFSHIFT;
599         }
600 
601         if (status & HME_XD_TXCKSUM) {
602             /* Only start calculation from csum_offset */
603             if (xmit_pos - len <= csum_offset && xmit_pos > csum_offset) {
604                 sum += net_checksum_add(xmit_pos - csum_offset,
605                                         xmit_buffer + csum_offset);
606                 trace_sunhme_tx_xsum_add(csum_offset, xmit_pos - csum_offset);
607             } else {
608                 sum += net_checksum_add(len, xmit_buffer + xmit_pos - len);
609                 trace_sunhme_tx_xsum_add(xmit_pos - len, len);
610             }
611         }
612 
613         /* Detect end of packet for TX checksum */
614         if (status & HME_XD_EOP) {
615             /* Stuff the checksum if required */
616             if (status & HME_XD_TXCKSUM) {
617                 csum = net_checksum_finish(sum);
618                 stw_be_p(xmit_buffer + csum_stuff_offset, csum);
619                 trace_sunhme_tx_xsum_stuff(csum, csum_stuff_offset);
620             }
621 
622             if (s->macregs[HME_MACI_TXCFG >> 2] & HME_MAC_TXCFG_ENABLE) {
623                 sunhme_transmit_frame(s, xmit_buffer, xmit_pos);
624                 trace_sunhme_tx_done(xmit_pos);
625             }
626         }
627 
628         /* Update status */
629         status &= ~HME_XD_OWN;
630         pci_dma_write(d, tb + cr * HME_DESC_SIZE, &status, 4);
631 
632         /* Move onto next descriptor */
633         cr++;
634         if (cr >= nr) {
635             cr = 0;
636         }
637         sunhme_set_tx_ring_nr(s, cr);
638 
639         pci_dma_read(d, tb + cr * HME_DESC_SIZE, &status, 4);
640         pci_dma_read(d, tb + cr * HME_DESC_SIZE + 4, &buffer, 4);
641 
642         /* Indicate TX complete */
643         intstatus = s->sebregs[HME_SEBI_STAT >> 2];
644         intstatus |= HME_SEB_STAT_HOSTTOTX;
645         s->sebregs[HME_SEBI_STAT >> 2] = intstatus;
646 
647         /* Autoclear TX pending */
648         s->etxregs[HME_ETXI_PENDING >> 2] = 0;
649 
650         sunhme_update_irq(s);
651     }
652 
653     /* TX FIFO now clear */
654     intstatus = s->sebregs[HME_SEBI_STAT >> 2];
655     intstatus |= HME_SEB_STAT_TXALL;
656     s->sebregs[HME_SEBI_STAT >> 2] = intstatus;
657     sunhme_update_irq(s);
658 }
659 
660 static bool sunhme_can_receive(NetClientState *nc)
661 {
662     SunHMEState *s = qemu_get_nic_opaque(nc);
663 
664     return !!(s->macregs[HME_MACI_RXCFG >> 2] & HME_MAC_RXCFG_ENABLE);
665 }
666 
667 static void sunhme_link_status_changed(NetClientState *nc)
668 {
669     SunHMEState *s = qemu_get_nic_opaque(nc);
670 
671     if (nc->link_down) {
672         s->miiregs[MII_ANLPAR] &= ~MII_ANLPAR_TXFD;
673         s->miiregs[MII_BMSR] &= ~MII_BMSR_LINK_ST;
674     } else {
675         s->miiregs[MII_ANLPAR] |= MII_ANLPAR_TXFD;
676         s->miiregs[MII_BMSR] |= MII_BMSR_LINK_ST;
677     }
678 
679     /* Exact bits unknown */
680     s->mifregs[HME_MIFI_STAT >> 2] = 0xffff;
681     sunhme_update_irq(s);
682 }
683 
684 static inline int sunhme_get_rx_ring_count(SunHMEState *s)
685 {
686     uint32_t rings = (s->erxregs[HME_ERXI_CFG >> 2] & HME_ERX_CFG_RINGSIZE)
687                       >> HME_ERX_CFG_RINGSIZE_SHIFT;
688 
689     switch (rings) {
690     case 0:
691         return 32;
692     case 1:
693         return 64;
694     case 2:
695         return 128;
696     case 3:
697         return 256;
698     }
699 
700     return 0;
701 }
702 
703 static inline int sunhme_get_rx_ring_nr(SunHMEState *s)
704 {
705     return s->erxregs[HME_ERXI_RING >> 2] & HME_ERXI_RING_OFFSET;
706 }
707 
708 static inline void sunhme_set_rx_ring_nr(SunHMEState *s, int i)
709 {
710     uint32_t ring = s->erxregs[HME_ERXI_RING >> 2] & ~HME_ERXI_RING_OFFSET;
711     ring |= i & HME_ERXI_RING_OFFSET;
712 
713     s->erxregs[HME_ERXI_RING >> 2] = ring;
714 }
715 
716 #define MIN_BUF_SIZE 60
717 
718 static ssize_t sunhme_receive(NetClientState *nc, const uint8_t *buf,
719                               size_t size)
720 {
721     SunHMEState *s = qemu_get_nic_opaque(nc);
722     PCIDevice *d = PCI_DEVICE(s);
723     dma_addr_t rb, addr;
724     uint32_t intstatus, status, buffer, buffersize, sum;
725     uint16_t csum;
726     uint8_t buf1[60];
727     int nr, cr, len, rxoffset, csum_offset;
728 
729     trace_sunhme_rx_incoming(size);
730 
731     /* Do nothing if MAC RX disabled */
732     if (!(s->macregs[HME_MACI_RXCFG >> 2] & HME_MAC_RXCFG_ENABLE)) {
733         return 0;
734     }
735 
736     trace_sunhme_rx_filter_destmac(buf[0], buf[1], buf[2],
737                                    buf[3], buf[4], buf[5]);
738 
739     /* Check destination MAC address */
740     if (!(s->macregs[HME_MACI_RXCFG >> 2] & HME_MAC_RXCFG_PMISC)) {
741         /* Try and match local MAC address */
742         if (((s->macregs[HME_MACI_MACADDR0 >> 2] & 0xff00) >> 8) == buf[0] &&
743              (s->macregs[HME_MACI_MACADDR0 >> 2] & 0xff) == buf[1] &&
744             ((s->macregs[HME_MACI_MACADDR1 >> 2] & 0xff00) >> 8) == buf[2] &&
745              (s->macregs[HME_MACI_MACADDR1 >> 2] & 0xff) == buf[3] &&
746             ((s->macregs[HME_MACI_MACADDR2 >> 2] & 0xff00) >> 8) == buf[4] &&
747              (s->macregs[HME_MACI_MACADDR2 >> 2] & 0xff) == buf[5]) {
748             /* Matched local MAC address */
749             trace_sunhme_rx_filter_local_match();
750         } else if (buf[0] == 0xff && buf[1] == 0xff && buf[2] == 0xff &&
751                    buf[3] == 0xff && buf[4] == 0xff && buf[5] == 0xff) {
752             /* Matched broadcast address */
753             trace_sunhme_rx_filter_bcast_match();
754         } else if (s->macregs[HME_MACI_RXCFG >> 2] & HME_MAC_RXCFG_HENABLE) {
755             /* Didn't match local address, check hash filter */
756             int mcast_idx = net_crc32_le(buf, ETH_ALEN) >> 26;
757             if (!(s->macregs[(HME_MACI_HASHTAB0 >> 2) - (mcast_idx >> 4)] &
758                     (1 << (mcast_idx & 0xf)))) {
759                 /* Didn't match hash filter */
760                 trace_sunhme_rx_filter_hash_nomatch();
761                 trace_sunhme_rx_filter_reject();
762                 return -1;
763             } else {
764                 trace_sunhme_rx_filter_hash_match();
765             }
766         } else {
767             /* Not for us */
768             trace_sunhme_rx_filter_reject();
769             return -1;
770         }
771     } else {
772         trace_sunhme_rx_filter_promisc_match();
773     }
774 
775     trace_sunhme_rx_filter_accept();
776 
777     /* If too small buffer, then expand it */
778     if (size < MIN_BUF_SIZE) {
779         memcpy(buf1, buf, size);
780         memset(buf1 + size, 0, MIN_BUF_SIZE - size);
781         buf = buf1;
782         size = MIN_BUF_SIZE;
783     }
784 
785     rb = s->erxregs[HME_ERXI_RING >> 2] & HME_ERXI_RING_ADDR;
786     nr = sunhme_get_rx_ring_count(s);
787     cr = sunhme_get_rx_ring_nr(s);
788 
789     pci_dma_read(d, rb + cr * HME_DESC_SIZE, &status, 4);
790     pci_dma_read(d, rb + cr * HME_DESC_SIZE + 4, &buffer, 4);
791 
792     /* If we don't own the current descriptor then indicate overflow error */
793     if (!(status & HME_XD_OWN)) {
794         s->sebregs[HME_SEBI_STAT >> 2] |= HME_SEB_STAT_NORXD;
795         sunhme_update_irq(s);
796         trace_sunhme_rx_norxd();
797         return -1;
798     }
799 
800     rxoffset = (s->erxregs[HME_ERXI_CFG >> 2] & HME_ERX_CFG_BYTEOFFSET) >>
801                 HME_ERX_CFG_BYTEOFFSET_SHIFT;
802 
803     addr = buffer + rxoffset;
804     buffersize = (status & HME_XD_RXLENMSK) >> HME_XD_RXLENSHIFT;
805 
806     /* Detect receive overflow */
807     len = size;
808     if (size > buffersize) {
809         status |= HME_XD_OFL;
810         len = buffersize;
811     }
812 
813     pci_dma_write(d, addr, buf, len);
814 
815     trace_sunhme_rx_desc(buffer, rxoffset, status, len, cr, nr);
816 
817     /* Calculate the receive checksum */
818     csum_offset = (s->erxregs[HME_ERXI_CFG >> 2] & HME_ERX_CFG_CSUMSTART) >>
819                   HME_ERX_CFG_CSUMSHIFT << 1;
820     sum = 0;
821     sum += net_checksum_add(len - csum_offset, (uint8_t *)buf + csum_offset);
822     csum = net_checksum_finish(sum);
823 
824     trace_sunhme_rx_xsum_calc(csum);
825 
826     /* Update status */
827     status &= ~HME_XD_OWN;
828     status &= ~HME_XD_RXLENMSK;
829     status |= len << HME_XD_RXLENSHIFT;
830     status &= ~HME_XD_RXCKSUM;
831     status |= csum;
832 
833     pci_dma_write(d, rb + cr * HME_DESC_SIZE, &status, 4);
834 
835     cr++;
836     if (cr >= nr) {
837         cr = 0;
838     }
839 
840     sunhme_set_rx_ring_nr(s, cr);
841 
842     /* Indicate RX complete */
843     intstatus = s->sebregs[HME_SEBI_STAT >> 2];
844     intstatus |= HME_SEB_STAT_RXTOHOST;
845     s->sebregs[HME_SEBI_STAT >> 2] = intstatus;
846 
847     sunhme_update_irq(s);
848 
849     return len;
850 }
851 
852 static NetClientInfo net_sunhme_info = {
853     .type = NET_CLIENT_DRIVER_NIC,
854     .size = sizeof(NICState),
855     .can_receive = sunhme_can_receive,
856     .receive = sunhme_receive,
857     .link_status_changed = sunhme_link_status_changed,
858 };
859 
860 static void sunhme_realize(PCIDevice *pci_dev, Error **errp)
861 {
862     SunHMEState *s = SUNHME(pci_dev);
863     DeviceState *d = DEVICE(pci_dev);
864     uint8_t *pci_conf;
865 
866     pci_conf = pci_dev->config;
867     pci_conf[PCI_INTERRUPT_PIN] = 1;    /* interrupt pin A */
868 
869     memory_region_init(&s->hme, OBJECT(pci_dev), "sunhme", HME_REG_SIZE);
870     pci_register_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &s->hme);
871 
872     memory_region_init_io(&s->sebreg, OBJECT(pci_dev), &sunhme_seb_ops, s,
873                           "sunhme.seb", HME_SEB_REG_SIZE);
874     memory_region_add_subregion(&s->hme, 0, &s->sebreg);
875 
876     memory_region_init_io(&s->etxreg, OBJECT(pci_dev), &sunhme_etx_ops, s,
877                           "sunhme.etx", HME_ETX_REG_SIZE);
878     memory_region_add_subregion(&s->hme, 0x2000, &s->etxreg);
879 
880     memory_region_init_io(&s->erxreg, OBJECT(pci_dev), &sunhme_erx_ops, s,
881                           "sunhme.erx", HME_ERX_REG_SIZE);
882     memory_region_add_subregion(&s->hme, 0x4000, &s->erxreg);
883 
884     memory_region_init_io(&s->macreg, OBJECT(pci_dev), &sunhme_mac_ops, s,
885                           "sunhme.mac", HME_MAC_REG_SIZE);
886     memory_region_add_subregion(&s->hme, 0x6000, &s->macreg);
887 
888     memory_region_init_io(&s->mifreg, OBJECT(pci_dev), &sunhme_mif_ops, s,
889                           "sunhme.mif", HME_MIF_REG_SIZE);
890     memory_region_add_subregion(&s->hme, 0x7000, &s->mifreg);
891 
892     qemu_macaddr_default_if_unset(&s->conf.macaddr);
893     s->nic = qemu_new_nic(&net_sunhme_info, &s->conf,
894                           object_get_typename(OBJECT(d)), d->id, s);
895     qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a);
896 }
897 
898 static void sunhme_instance_init(Object *obj)
899 {
900     SunHMEState *s = SUNHME(obj);
901 
902     device_add_bootindex_property(obj, &s->conf.bootindex,
903                                   "bootindex", "/ethernet-phy@0",
904                                   DEVICE(obj), NULL);
905 }
906 
907 static void sunhme_reset(DeviceState *ds)
908 {
909     SunHMEState *s = SUNHME(ds);
910 
911     /* Configure internal transceiver */
912     s->mifregs[HME_MIFI_CFG >> 2] |= HME_MIF_CFG_MDI0;
913 
914     /* Advetise auto, 100Mbps FD */
915     s->miiregs[MII_ANAR] = MII_ANAR_TXFD;
916     s->miiregs[MII_BMSR] = MII_BMSR_AUTONEG | MII_BMSR_100TX_FD |
917                            MII_BMSR_AN_COMP;
918 
919     if (!qemu_get_queue(s->nic)->link_down) {
920         s->miiregs[MII_ANLPAR] |= MII_ANLPAR_TXFD;
921         s->miiregs[MII_BMSR] |= MII_BMSR_LINK_ST;
922     }
923 
924     /* Set manufacturer */
925     s->miiregs[MII_PHYID1] = DP83840_PHYID1;
926     s->miiregs[MII_PHYID2] = DP83840_PHYID2;
927 
928     /* Configure default interrupt mask */
929     s->mifregs[HME_MIFI_IMASK >> 2] = 0xffff;
930     s->sebregs[HME_SEBI_IMASK >> 2] = 0xff7fffff;
931 }
932 
933 static const VMStateDescription vmstate_hme = {
934     .name = "sunhme",
935     .version_id = 0,
936     .minimum_version_id = 0,
937     .fields = (VMStateField[]) {
938         VMSTATE_PCI_DEVICE(parent_obj, SunHMEState),
939         VMSTATE_MACADDR(conf.macaddr, SunHMEState),
940         VMSTATE_UINT32_ARRAY(sebregs, SunHMEState, (HME_SEB_REG_SIZE >> 2)),
941         VMSTATE_UINT32_ARRAY(etxregs, SunHMEState, (HME_ETX_REG_SIZE >> 2)),
942         VMSTATE_UINT32_ARRAY(erxregs, SunHMEState, (HME_ERX_REG_SIZE >> 2)),
943         VMSTATE_UINT32_ARRAY(macregs, SunHMEState, (HME_MAC_REG_SIZE >> 2)),
944         VMSTATE_UINT32_ARRAY(mifregs, SunHMEState, (HME_MIF_REG_SIZE >> 2)),
945         VMSTATE_UINT16_ARRAY(miiregs, SunHMEState, HME_MII_REGS_SIZE),
946         VMSTATE_END_OF_LIST()
947     }
948 };
949 
950 static void sunhme_class_init(ObjectClass *klass, void *data)
951 {
952     DeviceClass *dc = DEVICE_CLASS(klass);
953     PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
954 
955     k->realize = sunhme_realize;
956     k->vendor_id = PCI_VENDOR_ID_SUN;
957     k->device_id = PCI_DEVICE_ID_SUN_HME;
958     k->class_id = PCI_CLASS_NETWORK_ETHERNET;
959     dc->vmsd = &vmstate_hme;
960     dc->reset = sunhme_reset;
961     device_class_set_props(dc, sunhme_properties);
962     set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
963 }
964 
965 static const TypeInfo sunhme_info = {
966     .name          = TYPE_SUNHME,
967     .parent        = TYPE_PCI_DEVICE,
968     .class_init    = sunhme_class_init,
969     .instance_size = sizeof(SunHMEState),
970     .instance_init = sunhme_instance_init,
971     .interfaces = (InterfaceInfo[]) {
972         { INTERFACE_CONVENTIONAL_PCI_DEVICE },
973         { }
974     }
975 };
976 
977 static void sunhme_register_types(void)
978 {
979     type_register_static(&sunhme_info);
980 }
981 
982 type_init(sunhme_register_types)
983