xref: /openbmc/qemu/hw/net/sunhme.c (revision 7f6c3d1a)
1 /*
2  * QEMU Sun Happy Meal Ethernet emulation
3  *
4  * Copyright (c) 2017 Mark Cave-Ayland
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to deal
8  * in the Software without restriction, including without limitation the rights
9  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10  * copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22  * THE SOFTWARE.
23  */
24 
25 #include "qemu/osdep.h"
26 #include "hw/pci/pci.h"
27 #include "hw/qdev-properties.h"
28 #include "migration/vmstate.h"
29 #include "hw/net/mii.h"
30 #include "net/net.h"
31 #include "qemu/module.h"
32 #include "net/checksum.h"
33 #include "net/eth.h"
34 #include "sysemu/sysemu.h"
35 #include "trace.h"
36 #include "qom/object.h"
37 
38 #define HME_REG_SIZE                   0x8000
39 
40 #define HME_SEB_REG_SIZE               0x2000
41 
42 #define HME_SEBI_RESET                 0x0
43 #define HME_SEB_RESET_ETX              0x1
44 #define HME_SEB_RESET_ERX              0x2
45 
46 #define HME_SEBI_STAT                  0x100
47 #define HME_SEBI_STAT_LINUXBUG         0x108
48 #define HME_SEB_STAT_RXTOHOST          0x10000
49 #define HME_SEB_STAT_NORXD             0x20000
50 #define HME_SEB_STAT_MIFIRQ            0x800000
51 #define HME_SEB_STAT_HOSTTOTX          0x1000000
52 #define HME_SEB_STAT_TXALL             0x2000000
53 
54 #define HME_SEBI_IMASK                 0x104
55 #define HME_SEBI_IMASK_LINUXBUG        0x10c
56 
57 #define HME_ETX_REG_SIZE               0x2000
58 
59 #define HME_ETXI_PENDING               0x0
60 
61 #define HME_ETXI_RING                  0x8
62 #define HME_ETXI_RING_ADDR             0xffffff00
63 #define HME_ETXI_RING_OFFSET           0xff
64 
65 #define HME_ETXI_RSIZE                 0x2c
66 
67 #define HME_ERX_REG_SIZE               0x2000
68 
69 #define HME_ERXI_CFG                   0x0
70 #define HME_ERX_CFG_RINGSIZE           0x600
71 #define HME_ERX_CFG_RINGSIZE_SHIFT     9
72 #define HME_ERX_CFG_BYTEOFFSET         0x38
73 #define HME_ERX_CFG_BYTEOFFSET_SHIFT   3
74 #define HME_ERX_CFG_CSUMSTART          0x7f0000
75 #define HME_ERX_CFG_CSUMSHIFT          16
76 
77 #define HME_ERXI_RING                  0x4
78 #define HME_ERXI_RING_ADDR             0xffffff00
79 #define HME_ERXI_RING_OFFSET           0xff
80 
81 #define HME_MAC_REG_SIZE               0x1000
82 
83 #define HME_MACI_TXCFG                 0x20c
84 #define HME_MAC_TXCFG_ENABLE           0x1
85 
86 #define HME_MACI_RXCFG                 0x30c
87 #define HME_MAC_RXCFG_ENABLE           0x1
88 #define HME_MAC_RXCFG_PMISC            0x40
89 #define HME_MAC_RXCFG_HENABLE          0x800
90 
91 #define HME_MACI_MACADDR2              0x318
92 #define HME_MACI_MACADDR1              0x31c
93 #define HME_MACI_MACADDR0              0x320
94 
95 #define HME_MACI_HASHTAB3              0x340
96 #define HME_MACI_HASHTAB2              0x344
97 #define HME_MACI_HASHTAB1              0x348
98 #define HME_MACI_HASHTAB0              0x34c
99 
100 #define HME_MIF_REG_SIZE               0x20
101 
102 #define HME_MIFI_FO                    0xc
103 #define HME_MIF_FO_ST                  0xc0000000
104 #define HME_MIF_FO_ST_SHIFT            30
105 #define HME_MIF_FO_OPC                 0x30000000
106 #define HME_MIF_FO_OPC_SHIFT           28
107 #define HME_MIF_FO_PHYAD               0x0f800000
108 #define HME_MIF_FO_PHYAD_SHIFT         23
109 #define HME_MIF_FO_REGAD               0x007c0000
110 #define HME_MIF_FO_REGAD_SHIFT         18
111 #define HME_MIF_FO_TAMSB               0x20000
112 #define HME_MIF_FO_TALSB               0x10000
113 #define HME_MIF_FO_DATA                0xffff
114 
115 #define HME_MIFI_CFG                   0x10
116 #define HME_MIF_CFG_MDI0               0x100
117 #define HME_MIF_CFG_MDI1               0x200
118 
119 #define HME_MIFI_IMASK                 0x14
120 
121 #define HME_MIFI_STAT                  0x18
122 
123 
124 /* Wired HME PHY addresses */
125 #define HME_PHYAD_INTERNAL     1
126 #define HME_PHYAD_EXTERNAL     0
127 
128 #define MII_COMMAND_START      0x1
129 #define MII_COMMAND_READ       0x2
130 #define MII_COMMAND_WRITE      0x1
131 
132 #define TYPE_SUNHME "sunhme"
133 OBJECT_DECLARE_SIMPLE_TYPE(SunHMEState, SUNHME)
134 
135 /* Maximum size of buffer */
136 #define HME_FIFO_SIZE          0x800
137 
138 /* Size of TX/RX descriptor */
139 #define HME_DESC_SIZE          0x8
140 
141 #define HME_XD_OWN             0x80000000
142 #define HME_XD_OFL             0x40000000
143 #define HME_XD_SOP             0x40000000
144 #define HME_XD_EOP             0x20000000
145 #define HME_XD_RXLENMSK        0x3fff0000
146 #define HME_XD_RXLENSHIFT      16
147 #define HME_XD_RXCKSUM         0xffff
148 #define HME_XD_TXLENMSK        0x00001fff
149 #define HME_XD_TXCKSUM         0x10000000
150 #define HME_XD_TXCSSTUFF       0xff00000
151 #define HME_XD_TXCSSTUFFSHIFT  20
152 #define HME_XD_TXCSSTART       0xfc000
153 #define HME_XD_TXCSSTARTSHIFT  14
154 
155 #define HME_MII_REGS_SIZE      0x20
156 
157 struct SunHMEState {
158     /*< private >*/
159     PCIDevice parent_obj;
160 
161     NICState *nic;
162     NICConf conf;
163 
164     MemoryRegion hme;
165     MemoryRegion sebreg;
166     MemoryRegion etxreg;
167     MemoryRegion erxreg;
168     MemoryRegion macreg;
169     MemoryRegion mifreg;
170 
171     uint32_t sebregs[HME_SEB_REG_SIZE >> 2];
172     uint32_t etxregs[HME_ETX_REG_SIZE >> 2];
173     uint32_t erxregs[HME_ERX_REG_SIZE >> 2];
174     uint32_t macregs[HME_MAC_REG_SIZE >> 2];
175     uint32_t mifregs[HME_MIF_REG_SIZE >> 2];
176 
177     uint16_t miiregs[HME_MII_REGS_SIZE];
178 };
179 
180 static Property sunhme_properties[] = {
181     DEFINE_NIC_PROPERTIES(SunHMEState, conf),
182     DEFINE_PROP_END_OF_LIST(),
183 };
184 
185 static void sunhme_reset_tx(SunHMEState *s)
186 {
187     /* Indicate TX reset complete */
188     s->sebregs[HME_SEBI_RESET] &= ~HME_SEB_RESET_ETX;
189 }
190 
191 static void sunhme_reset_rx(SunHMEState *s)
192 {
193     /* Indicate RX reset complete */
194     s->sebregs[HME_SEBI_RESET] &= ~HME_SEB_RESET_ERX;
195 }
196 
197 static void sunhme_update_irq(SunHMEState *s)
198 {
199     PCIDevice *d = PCI_DEVICE(s);
200     int level;
201 
202     /* MIF interrupt mask (16-bit) */
203     uint32_t mifmask = ~(s->mifregs[HME_MIFI_IMASK >> 2]) & 0xffff;
204     uint32_t mif = s->mifregs[HME_MIFI_STAT >> 2] & mifmask;
205 
206     /* Main SEB interrupt mask (include MIF status from above) */
207     uint32_t sebmask = ~(s->sebregs[HME_SEBI_IMASK >> 2]) &
208                        ~HME_SEB_STAT_MIFIRQ;
209     uint32_t seb = s->sebregs[HME_SEBI_STAT >> 2] & sebmask;
210     if (mif) {
211         seb |= HME_SEB_STAT_MIFIRQ;
212     }
213 
214     level = (seb ? 1 : 0);
215     trace_sunhme_update_irq(mifmask, mif, sebmask, seb, level);
216 
217     pci_set_irq(d, level);
218 }
219 
220 static void sunhme_seb_write(void *opaque, hwaddr addr,
221                           uint64_t val, unsigned size)
222 {
223     SunHMEState *s = SUNHME(opaque);
224 
225     trace_sunhme_seb_write(addr, val);
226 
227     /* Handly buggy Linux drivers before 4.13 which have
228        the wrong offsets for HME_SEBI_STAT and HME_SEBI_IMASK */
229     switch (addr) {
230     case HME_SEBI_STAT_LINUXBUG:
231         addr = HME_SEBI_STAT;
232         break;
233     case HME_SEBI_IMASK_LINUXBUG:
234         addr = HME_SEBI_IMASK;
235         break;
236     default:
237         break;
238     }
239 
240     switch (addr) {
241     case HME_SEBI_RESET:
242         if (val & HME_SEB_RESET_ETX) {
243             sunhme_reset_tx(s);
244         }
245         if (val & HME_SEB_RESET_ERX) {
246             sunhme_reset_rx(s);
247         }
248         val = s->sebregs[HME_SEBI_RESET >> 2];
249         break;
250     }
251 
252     s->sebregs[addr >> 2] = val;
253 }
254 
255 static uint64_t sunhme_seb_read(void *opaque, hwaddr addr,
256                              unsigned size)
257 {
258     SunHMEState *s = SUNHME(opaque);
259     uint64_t val;
260 
261     /* Handly buggy Linux drivers before 4.13 which have
262        the wrong offsets for HME_SEBI_STAT and HME_SEBI_IMASK */
263     switch (addr) {
264     case HME_SEBI_STAT_LINUXBUG:
265         addr = HME_SEBI_STAT;
266         break;
267     case HME_SEBI_IMASK_LINUXBUG:
268         addr = HME_SEBI_IMASK;
269         break;
270     default:
271         break;
272     }
273 
274     val = s->sebregs[addr >> 2];
275 
276     switch (addr) {
277     case HME_SEBI_STAT:
278         /* Autoclear status (except MIF) */
279         s->sebregs[HME_SEBI_STAT >> 2] &= HME_SEB_STAT_MIFIRQ;
280         sunhme_update_irq(s);
281         break;
282     }
283 
284     trace_sunhme_seb_read(addr, val);
285 
286     return val;
287 }
288 
289 static const MemoryRegionOps sunhme_seb_ops = {
290     .read = sunhme_seb_read,
291     .write = sunhme_seb_write,
292     .endianness = DEVICE_LITTLE_ENDIAN,
293     .valid = {
294         .min_access_size = 4,
295         .max_access_size = 4,
296     },
297 };
298 
299 static void sunhme_transmit(SunHMEState *s);
300 
301 static void sunhme_etx_write(void *opaque, hwaddr addr,
302                           uint64_t val, unsigned size)
303 {
304     SunHMEState *s = SUNHME(opaque);
305 
306     trace_sunhme_etx_write(addr, val);
307 
308     switch (addr) {
309     case HME_ETXI_PENDING:
310         if (val) {
311             sunhme_transmit(s);
312         }
313         break;
314     }
315 
316     s->etxregs[addr >> 2] = val;
317 }
318 
319 static uint64_t sunhme_etx_read(void *opaque, hwaddr addr,
320                              unsigned size)
321 {
322     SunHMEState *s = SUNHME(opaque);
323     uint64_t val;
324 
325     val = s->etxregs[addr >> 2];
326 
327     trace_sunhme_etx_read(addr, val);
328 
329     return val;
330 }
331 
332 static const MemoryRegionOps sunhme_etx_ops = {
333     .read = sunhme_etx_read,
334     .write = sunhme_etx_write,
335     .endianness = DEVICE_LITTLE_ENDIAN,
336     .valid = {
337         .min_access_size = 4,
338         .max_access_size = 4,
339     },
340 };
341 
342 static void sunhme_erx_write(void *opaque, hwaddr addr,
343                           uint64_t val, unsigned size)
344 {
345     SunHMEState *s = SUNHME(opaque);
346 
347     trace_sunhme_erx_write(addr, val);
348 
349     s->erxregs[addr >> 2] = val;
350 }
351 
352 static uint64_t sunhme_erx_read(void *opaque, hwaddr addr,
353                              unsigned size)
354 {
355     SunHMEState *s = SUNHME(opaque);
356     uint64_t val;
357 
358     val = s->erxregs[addr >> 2];
359 
360     trace_sunhme_erx_read(addr, val);
361 
362     return val;
363 }
364 
365 static const MemoryRegionOps sunhme_erx_ops = {
366     .read = sunhme_erx_read,
367     .write = sunhme_erx_write,
368     .endianness = DEVICE_LITTLE_ENDIAN,
369     .valid = {
370         .min_access_size = 4,
371         .max_access_size = 4,
372     },
373 };
374 
375 static void sunhme_mac_write(void *opaque, hwaddr addr,
376                           uint64_t val, unsigned size)
377 {
378     SunHMEState *s = SUNHME(opaque);
379     uint64_t oldval = s->macregs[addr >> 2];
380 
381     trace_sunhme_mac_write(addr, val);
382 
383     s->macregs[addr >> 2] = val;
384 
385     switch (addr) {
386     case HME_MACI_RXCFG:
387         if (!(oldval & HME_MAC_RXCFG_ENABLE) &&
388              (val & HME_MAC_RXCFG_ENABLE)) {
389             qemu_flush_queued_packets(qemu_get_queue(s->nic));
390         }
391         break;
392     }
393 }
394 
395 static uint64_t sunhme_mac_read(void *opaque, hwaddr addr,
396                              unsigned size)
397 {
398     SunHMEState *s = SUNHME(opaque);
399     uint64_t val;
400 
401     val = s->macregs[addr >> 2];
402 
403     trace_sunhme_mac_read(addr, val);
404 
405     return val;
406 }
407 
408 static const MemoryRegionOps sunhme_mac_ops = {
409     .read = sunhme_mac_read,
410     .write = sunhme_mac_write,
411     .endianness = DEVICE_LITTLE_ENDIAN,
412     .valid = {
413         .min_access_size = 4,
414         .max_access_size = 4,
415     },
416 };
417 
418 static void sunhme_mii_write(SunHMEState *s, uint8_t reg, uint16_t data)
419 {
420     trace_sunhme_mii_write(reg, data);
421 
422     switch (reg) {
423     case MII_BMCR:
424         if (data & MII_BMCR_RESET) {
425             /* Autoclear reset bit, enable auto negotiation */
426             data &= ~MII_BMCR_RESET;
427             data |= MII_BMCR_AUTOEN;
428         }
429         if (data & MII_BMCR_ANRESTART) {
430             /* Autoclear auto negotiation restart */
431             data &= ~MII_BMCR_ANRESTART;
432 
433             /* Indicate negotiation complete */
434             s->miiregs[MII_BMSR] |= MII_BMSR_AN_COMP;
435 
436             if (!qemu_get_queue(s->nic)->link_down) {
437                 s->miiregs[MII_ANLPAR] |= MII_ANLPAR_TXFD;
438                 s->miiregs[MII_BMSR] |= MII_BMSR_LINK_ST;
439             }
440         }
441         break;
442     }
443 
444     s->miiregs[reg] = data;
445 }
446 
447 static uint16_t sunhme_mii_read(SunHMEState *s, uint8_t reg)
448 {
449     uint16_t data = s->miiregs[reg];
450 
451     trace_sunhme_mii_read(reg, data);
452 
453     return data;
454 }
455 
456 static void sunhme_mif_write(void *opaque, hwaddr addr,
457                           uint64_t val, unsigned size)
458 {
459     SunHMEState *s = SUNHME(opaque);
460     uint8_t cmd, reg;
461     uint16_t data;
462 
463     trace_sunhme_mif_write(addr, val);
464 
465     switch (addr) {
466     case HME_MIFI_CFG:
467         /* Mask the read-only bits */
468         val &= ~(HME_MIF_CFG_MDI0 | HME_MIF_CFG_MDI1);
469         val |= s->mifregs[HME_MIFI_CFG >> 2] &
470                (HME_MIF_CFG_MDI0 | HME_MIF_CFG_MDI1);
471         break;
472     case HME_MIFI_FO:
473         /* Detect start of MII command */
474         if ((val & HME_MIF_FO_ST) >> HME_MIF_FO_ST_SHIFT
475                 != MII_COMMAND_START) {
476             val |= HME_MIF_FO_TALSB;
477             break;
478         }
479 
480         /* Internal phy only */
481         if ((val & HME_MIF_FO_PHYAD) >> HME_MIF_FO_PHYAD_SHIFT
482                 != HME_PHYAD_INTERNAL) {
483             val |= HME_MIF_FO_TALSB;
484             break;
485         }
486 
487         cmd = (val & HME_MIF_FO_OPC) >> HME_MIF_FO_OPC_SHIFT;
488         reg = (val & HME_MIF_FO_REGAD) >> HME_MIF_FO_REGAD_SHIFT;
489         data = (val & HME_MIF_FO_DATA);
490 
491         switch (cmd) {
492         case MII_COMMAND_WRITE:
493             sunhme_mii_write(s, reg, data);
494             break;
495 
496         case MII_COMMAND_READ:
497             val &= ~HME_MIF_FO_DATA;
498             val |= sunhme_mii_read(s, reg);
499             break;
500         }
501 
502         val |= HME_MIF_FO_TALSB;
503         break;
504     }
505 
506     s->mifregs[addr >> 2] = val;
507 }
508 
509 static uint64_t sunhme_mif_read(void *opaque, hwaddr addr,
510                              unsigned size)
511 {
512     SunHMEState *s = SUNHME(opaque);
513     uint64_t val;
514 
515     val = s->mifregs[addr >> 2];
516 
517     switch (addr) {
518     case HME_MIFI_STAT:
519         /* Autoclear MIF interrupt status */
520         s->mifregs[HME_MIFI_STAT >> 2] = 0;
521         sunhme_update_irq(s);
522         break;
523     }
524 
525     trace_sunhme_mif_read(addr, val);
526 
527     return val;
528 }
529 
530 static const MemoryRegionOps sunhme_mif_ops = {
531     .read = sunhme_mif_read,
532     .write = sunhme_mif_write,
533     .endianness = DEVICE_LITTLE_ENDIAN,
534     .valid = {
535         .min_access_size = 4,
536         .max_access_size = 4,
537     },
538 };
539 
540 static void sunhme_transmit_frame(SunHMEState *s, uint8_t *buf, int size)
541 {
542     qemu_send_packet(qemu_get_queue(s->nic), buf, size);
543 }
544 
545 static inline int sunhme_get_tx_ring_count(SunHMEState *s)
546 {
547     return (s->etxregs[HME_ETXI_RSIZE >> 2] + 1) << 4;
548 }
549 
550 static inline int sunhme_get_tx_ring_nr(SunHMEState *s)
551 {
552     return s->etxregs[HME_ETXI_RING >> 2] & HME_ETXI_RING_OFFSET;
553 }
554 
555 static inline void sunhme_set_tx_ring_nr(SunHMEState *s, int i)
556 {
557     uint32_t ring = s->etxregs[HME_ETXI_RING >> 2] & ~HME_ETXI_RING_OFFSET;
558     ring |= i & HME_ETXI_RING_OFFSET;
559 
560     s->etxregs[HME_ETXI_RING >> 2] = ring;
561 }
562 
563 static void sunhme_transmit(SunHMEState *s)
564 {
565     PCIDevice *d = PCI_DEVICE(s);
566     dma_addr_t tb, addr;
567     uint32_t intstatus, status, buffer, sum = 0;
568     int cr, nr, len, xmit_pos, csum_offset = 0, csum_stuff_offset = 0;
569     uint16_t csum = 0;
570     uint8_t xmit_buffer[HME_FIFO_SIZE];
571 
572     tb = s->etxregs[HME_ETXI_RING >> 2] & HME_ETXI_RING_ADDR;
573     nr = sunhme_get_tx_ring_count(s);
574     cr = sunhme_get_tx_ring_nr(s);
575 
576     pci_dma_read(d, tb + cr * HME_DESC_SIZE, &status, 4);
577     pci_dma_read(d, tb + cr * HME_DESC_SIZE + 4, &buffer, 4);
578 
579     xmit_pos = 0;
580     while (status & HME_XD_OWN) {
581         trace_sunhme_tx_desc(buffer, status, cr, nr);
582 
583         /* Copy data into transmit buffer */
584         addr = buffer;
585         len = status & HME_XD_TXLENMSK;
586 
587         if (xmit_pos + len > HME_FIFO_SIZE) {
588             len = HME_FIFO_SIZE - xmit_pos;
589         }
590 
591         pci_dma_read(d, addr, &xmit_buffer[xmit_pos], len);
592         xmit_pos += len;
593 
594         /* Detect start of packet for TX checksum */
595         if (status & HME_XD_SOP) {
596             sum = 0;
597             csum_offset = (status & HME_XD_TXCSSTART) >> HME_XD_TXCSSTARTSHIFT;
598             csum_stuff_offset = (status & HME_XD_TXCSSTUFF) >>
599                                 HME_XD_TXCSSTUFFSHIFT;
600         }
601 
602         if (status & HME_XD_TXCKSUM) {
603             /* Only start calculation from csum_offset */
604             if (xmit_pos - len <= csum_offset && xmit_pos > csum_offset) {
605                 sum += net_checksum_add(xmit_pos - csum_offset,
606                                         xmit_buffer + csum_offset);
607                 trace_sunhme_tx_xsum_add(csum_offset, xmit_pos - csum_offset);
608             } else {
609                 sum += net_checksum_add(len, xmit_buffer + xmit_pos - len);
610                 trace_sunhme_tx_xsum_add(xmit_pos - len, len);
611             }
612         }
613 
614         /* Detect end of packet for TX checksum */
615         if (status & HME_XD_EOP) {
616             /* Stuff the checksum if required */
617             if (status & HME_XD_TXCKSUM) {
618                 csum = net_checksum_finish(sum);
619                 stw_be_p(xmit_buffer + csum_stuff_offset, csum);
620                 trace_sunhme_tx_xsum_stuff(csum, csum_stuff_offset);
621             }
622 
623             if (s->macregs[HME_MACI_TXCFG >> 2] & HME_MAC_TXCFG_ENABLE) {
624                 sunhme_transmit_frame(s, xmit_buffer, xmit_pos);
625                 trace_sunhme_tx_done(xmit_pos);
626             }
627         }
628 
629         /* Update status */
630         status &= ~HME_XD_OWN;
631         pci_dma_write(d, tb + cr * HME_DESC_SIZE, &status, 4);
632 
633         /* Move onto next descriptor */
634         cr++;
635         if (cr >= nr) {
636             cr = 0;
637         }
638         sunhme_set_tx_ring_nr(s, cr);
639 
640         pci_dma_read(d, tb + cr * HME_DESC_SIZE, &status, 4);
641         pci_dma_read(d, tb + cr * HME_DESC_SIZE + 4, &buffer, 4);
642 
643         /* Indicate TX complete */
644         intstatus = s->sebregs[HME_SEBI_STAT >> 2];
645         intstatus |= HME_SEB_STAT_HOSTTOTX;
646         s->sebregs[HME_SEBI_STAT >> 2] = intstatus;
647 
648         /* Autoclear TX pending */
649         s->etxregs[HME_ETXI_PENDING >> 2] = 0;
650 
651         sunhme_update_irq(s);
652     }
653 
654     /* TX FIFO now clear */
655     intstatus = s->sebregs[HME_SEBI_STAT >> 2];
656     intstatus |= HME_SEB_STAT_TXALL;
657     s->sebregs[HME_SEBI_STAT >> 2] = intstatus;
658     sunhme_update_irq(s);
659 }
660 
661 static bool sunhme_can_receive(NetClientState *nc)
662 {
663     SunHMEState *s = qemu_get_nic_opaque(nc);
664 
665     return !!(s->macregs[HME_MACI_RXCFG >> 2] & HME_MAC_RXCFG_ENABLE);
666 }
667 
668 static void sunhme_link_status_changed(NetClientState *nc)
669 {
670     SunHMEState *s = qemu_get_nic_opaque(nc);
671 
672     if (nc->link_down) {
673         s->miiregs[MII_ANLPAR] &= ~MII_ANLPAR_TXFD;
674         s->miiregs[MII_BMSR] &= ~MII_BMSR_LINK_ST;
675     } else {
676         s->miiregs[MII_ANLPAR] |= MII_ANLPAR_TXFD;
677         s->miiregs[MII_BMSR] |= MII_BMSR_LINK_ST;
678     }
679 
680     /* Exact bits unknown */
681     s->mifregs[HME_MIFI_STAT >> 2] = 0xffff;
682     sunhme_update_irq(s);
683 }
684 
685 static inline int sunhme_get_rx_ring_count(SunHMEState *s)
686 {
687     uint32_t rings = (s->erxregs[HME_ERXI_CFG >> 2] & HME_ERX_CFG_RINGSIZE)
688                       >> HME_ERX_CFG_RINGSIZE_SHIFT;
689 
690     switch (rings) {
691     case 0:
692         return 32;
693     case 1:
694         return 64;
695     case 2:
696         return 128;
697     case 3:
698         return 256;
699     }
700 
701     return 0;
702 }
703 
704 static inline int sunhme_get_rx_ring_nr(SunHMEState *s)
705 {
706     return s->erxregs[HME_ERXI_RING >> 2] & HME_ERXI_RING_OFFSET;
707 }
708 
709 static inline void sunhme_set_rx_ring_nr(SunHMEState *s, int i)
710 {
711     uint32_t ring = s->erxregs[HME_ERXI_RING >> 2] & ~HME_ERXI_RING_OFFSET;
712     ring |= i & HME_ERXI_RING_OFFSET;
713 
714     s->erxregs[HME_ERXI_RING >> 2] = ring;
715 }
716 
717 #define MIN_BUF_SIZE 60
718 
719 static ssize_t sunhme_receive(NetClientState *nc, const uint8_t *buf,
720                               size_t size)
721 {
722     SunHMEState *s = qemu_get_nic_opaque(nc);
723     PCIDevice *d = PCI_DEVICE(s);
724     dma_addr_t rb, addr;
725     uint32_t intstatus, status, buffer, buffersize, sum;
726     uint16_t csum;
727     uint8_t buf1[60];
728     int nr, cr, len, rxoffset, csum_offset;
729 
730     trace_sunhme_rx_incoming(size);
731 
732     /* Do nothing if MAC RX disabled */
733     if (!(s->macregs[HME_MACI_RXCFG >> 2] & HME_MAC_RXCFG_ENABLE)) {
734         return 0;
735     }
736 
737     trace_sunhme_rx_filter_destmac(buf[0], buf[1], buf[2],
738                                    buf[3], buf[4], buf[5]);
739 
740     /* Check destination MAC address */
741     if (!(s->macregs[HME_MACI_RXCFG >> 2] & HME_MAC_RXCFG_PMISC)) {
742         /* Try and match local MAC address */
743         if (((s->macregs[HME_MACI_MACADDR0 >> 2] & 0xff00) >> 8) == buf[0] &&
744              (s->macregs[HME_MACI_MACADDR0 >> 2] & 0xff) == buf[1] &&
745             ((s->macregs[HME_MACI_MACADDR1 >> 2] & 0xff00) >> 8) == buf[2] &&
746              (s->macregs[HME_MACI_MACADDR1 >> 2] & 0xff) == buf[3] &&
747             ((s->macregs[HME_MACI_MACADDR2 >> 2] & 0xff00) >> 8) == buf[4] &&
748              (s->macregs[HME_MACI_MACADDR2 >> 2] & 0xff) == buf[5]) {
749             /* Matched local MAC address */
750             trace_sunhme_rx_filter_local_match();
751         } else if (buf[0] == 0xff && buf[1] == 0xff && buf[2] == 0xff &&
752                    buf[3] == 0xff && buf[4] == 0xff && buf[5] == 0xff) {
753             /* Matched broadcast address */
754             trace_sunhme_rx_filter_bcast_match();
755         } else if (s->macregs[HME_MACI_RXCFG >> 2] & HME_MAC_RXCFG_HENABLE) {
756             /* Didn't match local address, check hash filter */
757             int mcast_idx = net_crc32_le(buf, ETH_ALEN) >> 26;
758             if (!(s->macregs[(HME_MACI_HASHTAB0 >> 2) - (mcast_idx >> 4)] &
759                     (1 << (mcast_idx & 0xf)))) {
760                 /* Didn't match hash filter */
761                 trace_sunhme_rx_filter_hash_nomatch();
762                 trace_sunhme_rx_filter_reject();
763                 return -1;
764             } else {
765                 trace_sunhme_rx_filter_hash_match();
766             }
767         } else {
768             /* Not for us */
769             trace_sunhme_rx_filter_reject();
770             return -1;
771         }
772     } else {
773         trace_sunhme_rx_filter_promisc_match();
774     }
775 
776     trace_sunhme_rx_filter_accept();
777 
778     /* If too small buffer, then expand it */
779     if (size < MIN_BUF_SIZE) {
780         memcpy(buf1, buf, size);
781         memset(buf1 + size, 0, MIN_BUF_SIZE - size);
782         buf = buf1;
783         size = MIN_BUF_SIZE;
784     }
785 
786     rb = s->erxregs[HME_ERXI_RING >> 2] & HME_ERXI_RING_ADDR;
787     nr = sunhme_get_rx_ring_count(s);
788     cr = sunhme_get_rx_ring_nr(s);
789 
790     pci_dma_read(d, rb + cr * HME_DESC_SIZE, &status, 4);
791     pci_dma_read(d, rb + cr * HME_DESC_SIZE + 4, &buffer, 4);
792 
793     /* If we don't own the current descriptor then indicate overflow error */
794     if (!(status & HME_XD_OWN)) {
795         s->sebregs[HME_SEBI_STAT >> 2] |= HME_SEB_STAT_NORXD;
796         sunhme_update_irq(s);
797         trace_sunhme_rx_norxd();
798         return -1;
799     }
800 
801     rxoffset = (s->erxregs[HME_ERXI_CFG >> 2] & HME_ERX_CFG_BYTEOFFSET) >>
802                 HME_ERX_CFG_BYTEOFFSET_SHIFT;
803 
804     addr = buffer + rxoffset;
805     buffersize = (status & HME_XD_RXLENMSK) >> HME_XD_RXLENSHIFT;
806 
807     /* Detect receive overflow */
808     len = size;
809     if (size > buffersize) {
810         status |= HME_XD_OFL;
811         len = buffersize;
812     }
813 
814     pci_dma_write(d, addr, buf, len);
815 
816     trace_sunhme_rx_desc(buffer, rxoffset, status, len, cr, nr);
817 
818     /* Calculate the receive checksum */
819     csum_offset = (s->erxregs[HME_ERXI_CFG >> 2] & HME_ERX_CFG_CSUMSTART) >>
820                   HME_ERX_CFG_CSUMSHIFT << 1;
821     sum = 0;
822     sum += net_checksum_add(len - csum_offset, (uint8_t *)buf + csum_offset);
823     csum = net_checksum_finish(sum);
824 
825     trace_sunhme_rx_xsum_calc(csum);
826 
827     /* Update status */
828     status &= ~HME_XD_OWN;
829     status &= ~HME_XD_RXLENMSK;
830     status |= len << HME_XD_RXLENSHIFT;
831     status &= ~HME_XD_RXCKSUM;
832     status |= csum;
833 
834     pci_dma_write(d, rb + cr * HME_DESC_SIZE, &status, 4);
835 
836     cr++;
837     if (cr >= nr) {
838         cr = 0;
839     }
840 
841     sunhme_set_rx_ring_nr(s, cr);
842 
843     /* Indicate RX complete */
844     intstatus = s->sebregs[HME_SEBI_STAT >> 2];
845     intstatus |= HME_SEB_STAT_RXTOHOST;
846     s->sebregs[HME_SEBI_STAT >> 2] = intstatus;
847 
848     sunhme_update_irq(s);
849 
850     return len;
851 }
852 
853 static NetClientInfo net_sunhme_info = {
854     .type = NET_CLIENT_DRIVER_NIC,
855     .size = sizeof(NICState),
856     .can_receive = sunhme_can_receive,
857     .receive = sunhme_receive,
858     .link_status_changed = sunhme_link_status_changed,
859 };
860 
861 static void sunhme_realize(PCIDevice *pci_dev, Error **errp)
862 {
863     SunHMEState *s = SUNHME(pci_dev);
864     DeviceState *d = DEVICE(pci_dev);
865     uint8_t *pci_conf;
866 
867     pci_conf = pci_dev->config;
868     pci_conf[PCI_INTERRUPT_PIN] = 1;    /* interrupt pin A */
869 
870     memory_region_init(&s->hme, OBJECT(pci_dev), "sunhme", HME_REG_SIZE);
871     pci_register_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &s->hme);
872 
873     memory_region_init_io(&s->sebreg, OBJECT(pci_dev), &sunhme_seb_ops, s,
874                           "sunhme.seb", HME_SEB_REG_SIZE);
875     memory_region_add_subregion(&s->hme, 0, &s->sebreg);
876 
877     memory_region_init_io(&s->etxreg, OBJECT(pci_dev), &sunhme_etx_ops, s,
878                           "sunhme.etx", HME_ETX_REG_SIZE);
879     memory_region_add_subregion(&s->hme, 0x2000, &s->etxreg);
880 
881     memory_region_init_io(&s->erxreg, OBJECT(pci_dev), &sunhme_erx_ops, s,
882                           "sunhme.erx", HME_ERX_REG_SIZE);
883     memory_region_add_subregion(&s->hme, 0x4000, &s->erxreg);
884 
885     memory_region_init_io(&s->macreg, OBJECT(pci_dev), &sunhme_mac_ops, s,
886                           "sunhme.mac", HME_MAC_REG_SIZE);
887     memory_region_add_subregion(&s->hme, 0x6000, &s->macreg);
888 
889     memory_region_init_io(&s->mifreg, OBJECT(pci_dev), &sunhme_mif_ops, s,
890                           "sunhme.mif", HME_MIF_REG_SIZE);
891     memory_region_add_subregion(&s->hme, 0x7000, &s->mifreg);
892 
893     qemu_macaddr_default_if_unset(&s->conf.macaddr);
894     s->nic = qemu_new_nic(&net_sunhme_info, &s->conf,
895                           object_get_typename(OBJECT(d)), d->id, s);
896     qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a);
897 }
898 
899 static void sunhme_instance_init(Object *obj)
900 {
901     SunHMEState *s = SUNHME(obj);
902 
903     device_add_bootindex_property(obj, &s->conf.bootindex,
904                                   "bootindex", "/ethernet-phy@0",
905                                   DEVICE(obj));
906 }
907 
908 static void sunhme_reset(DeviceState *ds)
909 {
910     SunHMEState *s = SUNHME(ds);
911 
912     /* Configure internal transceiver */
913     s->mifregs[HME_MIFI_CFG >> 2] |= HME_MIF_CFG_MDI0;
914 
915     /* Advetise auto, 100Mbps FD */
916     s->miiregs[MII_ANAR] = MII_ANAR_TXFD;
917     s->miiregs[MII_BMSR] = MII_BMSR_AUTONEG | MII_BMSR_100TX_FD |
918                            MII_BMSR_AN_COMP;
919 
920     if (!qemu_get_queue(s->nic)->link_down) {
921         s->miiregs[MII_ANLPAR] |= MII_ANLPAR_TXFD;
922         s->miiregs[MII_BMSR] |= MII_BMSR_LINK_ST;
923     }
924 
925     /* Set manufacturer */
926     s->miiregs[MII_PHYID1] = DP83840_PHYID1;
927     s->miiregs[MII_PHYID2] = DP83840_PHYID2;
928 
929     /* Configure default interrupt mask */
930     s->mifregs[HME_MIFI_IMASK >> 2] = 0xffff;
931     s->sebregs[HME_SEBI_IMASK >> 2] = 0xff7fffff;
932 }
933 
934 static const VMStateDescription vmstate_hme = {
935     .name = "sunhme",
936     .version_id = 0,
937     .minimum_version_id = 0,
938     .fields = (VMStateField[]) {
939         VMSTATE_PCI_DEVICE(parent_obj, SunHMEState),
940         VMSTATE_MACADDR(conf.macaddr, SunHMEState),
941         VMSTATE_UINT32_ARRAY(sebregs, SunHMEState, (HME_SEB_REG_SIZE >> 2)),
942         VMSTATE_UINT32_ARRAY(etxregs, SunHMEState, (HME_ETX_REG_SIZE >> 2)),
943         VMSTATE_UINT32_ARRAY(erxregs, SunHMEState, (HME_ERX_REG_SIZE >> 2)),
944         VMSTATE_UINT32_ARRAY(macregs, SunHMEState, (HME_MAC_REG_SIZE >> 2)),
945         VMSTATE_UINT32_ARRAY(mifregs, SunHMEState, (HME_MIF_REG_SIZE >> 2)),
946         VMSTATE_UINT16_ARRAY(miiregs, SunHMEState, HME_MII_REGS_SIZE),
947         VMSTATE_END_OF_LIST()
948     }
949 };
950 
951 static void sunhme_class_init(ObjectClass *klass, void *data)
952 {
953     DeviceClass *dc = DEVICE_CLASS(klass);
954     PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
955 
956     k->realize = sunhme_realize;
957     k->vendor_id = PCI_VENDOR_ID_SUN;
958     k->device_id = PCI_DEVICE_ID_SUN_HME;
959     k->class_id = PCI_CLASS_NETWORK_ETHERNET;
960     dc->vmsd = &vmstate_hme;
961     dc->reset = sunhme_reset;
962     device_class_set_props(dc, sunhme_properties);
963     set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
964 }
965 
966 static const TypeInfo sunhme_info = {
967     .name          = TYPE_SUNHME,
968     .parent        = TYPE_PCI_DEVICE,
969     .class_init    = sunhme_class_init,
970     .instance_size = sizeof(SunHMEState),
971     .instance_init = sunhme_instance_init,
972     .interfaces = (InterfaceInfo[]) {
973         { INTERFACE_CONVENTIONAL_PCI_DEVICE },
974         { }
975     }
976 };
977 
978 static void sunhme_register_types(void)
979 {
980     type_register_static(&sunhme_info);
981 }
982 
983 type_init(sunhme_register_types)
984