xref: /openbmc/qemu/hw/net/npcm_gmac.c (revision 0828b374c6568eecdb5e5389986efd99898aa822)
1 /*
2  * Nuvoton NPCM7xx/8xx GMAC Module
3  *
4  * Copyright 2024 Google LLC
5  * Authors:
6  * Hao Wu <wuhaotsh@google.com>
7  * Nabih Estefan <nabihestefan@google.com>
8  *
9  * This program is free software; you can redistribute it and/or modify it
10  * under the terms of the GNU General Public License as published by the
11  * Free Software Foundation; either version 2 of the License, or
12  * (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful, but WITHOUT
15  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16  * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17  * for more details.
18  *
19  * Unsupported/unimplemented features:
20  * - MII is not implemented, MII_ADDR.BUSY and MII_DATA always return zero
21  * - Precision timestamp (PTP) is not implemented.
22  */
23 
24 #include "qemu/osdep.h"
25 
26 #include "hw/registerfields.h"
27 #include "hw/net/mii.h"
28 #include "hw/net/npcm_gmac.h"
29 #include "migration/vmstate.h"
30 #include "net/checksum.h"
31 #include "net/eth.h"
32 #include "net/net.h"
33 #include "qemu/cutils.h"
34 #include "qemu/log.h"
35 #include "qemu/units.h"
36 #include "system/dma.h"
37 #include "trace.h"
38 
39 REG32(NPCM_DMA_BUS_MODE, 0x1000)
40 REG32(NPCM_DMA_XMT_POLL_DEMAND, 0x1004)
41 REG32(NPCM_DMA_RCV_POLL_DEMAND, 0x1008)
42 REG32(NPCM_DMA_RX_BASE_ADDR, 0x100c)
43 REG32(NPCM_DMA_TX_BASE_ADDR, 0x1010)
44 REG32(NPCM_DMA_STATUS, 0x1014)
45 REG32(NPCM_DMA_CONTROL, 0x1018)
46 REG32(NPCM_DMA_INTR_ENA, 0x101c)
47 REG32(NPCM_DMA_MISSED_FRAME_CTR, 0x1020)
48 REG32(NPCM_DMA_HOST_TX_DESC, 0x1048)
49 REG32(NPCM_DMA_HOST_RX_DESC, 0x104c)
50 REG32(NPCM_DMA_CUR_TX_BUF_ADDR, 0x1050)
51 REG32(NPCM_DMA_CUR_RX_BUF_ADDR, 0x1054)
52 REG32(NPCM_DMA_HW_FEATURE, 0x1058)
53 
54 REG32(NPCM_GMAC_MAC_CONFIG, 0x0)
55 REG32(NPCM_GMAC_FRAME_FILTER, 0x4)
56 REG32(NPCM_GMAC_HASH_HIGH, 0x8)
57 REG32(NPCM_GMAC_HASH_LOW, 0xc)
58 REG32(NPCM_GMAC_MII_ADDR, 0x10)
59 REG32(NPCM_GMAC_MII_DATA, 0x14)
60 REG32(NPCM_GMAC_FLOW_CTRL, 0x18)
61 REG32(NPCM_GMAC_VLAN_FLAG, 0x1c)
62 REG32(NPCM_GMAC_VERSION, 0x20)
63 REG32(NPCM_GMAC_WAKEUP_FILTER, 0x28)
64 REG32(NPCM_GMAC_PMT, 0x2c)
65 REG32(NPCM_GMAC_LPI_CTRL, 0x30)
66 REG32(NPCM_GMAC_TIMER_CTRL, 0x34)
67 REG32(NPCM_GMAC_INT_STATUS, 0x38)
68 REG32(NPCM_GMAC_INT_MASK, 0x3c)
69 REG32(NPCM_GMAC_MAC0_ADDR_HI, 0x40)
70 REG32(NPCM_GMAC_MAC0_ADDR_LO, 0x44)
71 REG32(NPCM_GMAC_MAC1_ADDR_HI, 0x48)
72 REG32(NPCM_GMAC_MAC1_ADDR_LO, 0x4c)
73 REG32(NPCM_GMAC_MAC2_ADDR_HI, 0x50)
74 REG32(NPCM_GMAC_MAC2_ADDR_LO, 0x54)
75 REG32(NPCM_GMAC_MAC3_ADDR_HI, 0x58)
76 REG32(NPCM_GMAC_MAC3_ADDR_LO, 0x5c)
77 REG32(NPCM_GMAC_RGMII_STATUS, 0xd8)
78 REG32(NPCM_GMAC_WATCHDOG, 0xdc)
79 REG32(NPCM_GMAC_PTP_TCR, 0x700)
80 REG32(NPCM_GMAC_PTP_SSIR, 0x704)
81 REG32(NPCM_GMAC_PTP_STSR, 0x708)
82 REG32(NPCM_GMAC_PTP_STNSR, 0x70c)
83 REG32(NPCM_GMAC_PTP_STSUR, 0x710)
84 REG32(NPCM_GMAC_PTP_STNSUR, 0x714)
85 REG32(NPCM_GMAC_PTP_TAR, 0x718)
86 REG32(NPCM_GMAC_PTP_TTSR, 0x71c)
87 
88 /* Register Fields */
89 #define NPCM_GMAC_MII_ADDR_BUSY             BIT(0)
90 #define NPCM_GMAC_MII_ADDR_WRITE            BIT(1)
91 #define NPCM_GMAC_MII_ADDR_GR(rv)           extract16((rv), 6, 5)
92 #define NPCM_GMAC_MII_ADDR_PA(rv)           extract16((rv), 11, 5)
93 
94 #define NPCM_GMAC_INT_MASK_LPIIM            BIT(10)
95 #define NPCM_GMAC_INT_MASK_PMTM             BIT(3)
96 #define NPCM_GMAC_INT_MASK_RGIM             BIT(0)
97 
98 #define NPCM_DMA_BUS_MODE_SWR               BIT(0)
99 
100 static const uint32_t npcm_gmac_cold_reset_values[NPCM_GMAC_NR_REGS] = {
101     /* Reduce version to 3.2 so that the kernel can enable interrupt. */
102     [R_NPCM_GMAC_VERSION]         = 0x00001032,
103     [R_NPCM_GMAC_TIMER_CTRL]      = 0x03e80000,
104     [R_NPCM_GMAC_MAC0_ADDR_HI]    = 0x8000ffff,
105     [R_NPCM_GMAC_MAC0_ADDR_LO]    = 0xffffffff,
106     [R_NPCM_GMAC_MAC1_ADDR_HI]    = 0x0000ffff,
107     [R_NPCM_GMAC_MAC1_ADDR_LO]    = 0xffffffff,
108     [R_NPCM_GMAC_MAC2_ADDR_HI]    = 0x0000ffff,
109     [R_NPCM_GMAC_MAC2_ADDR_LO]    = 0xffffffff,
110     [R_NPCM_GMAC_MAC3_ADDR_HI]    = 0x0000ffff,
111     [R_NPCM_GMAC_MAC3_ADDR_LO]    = 0xffffffff,
112     [R_NPCM_GMAC_PTP_TCR]         = 0x00002000,
113     [R_NPCM_DMA_BUS_MODE]         = 0x00020101,
114     [R_NPCM_DMA_HW_FEATURE]       = 0x100d4f37,
115 };
116 
117 static const uint16_t phy_reg_init[] = {
118     [MII_BMCR]      = MII_BMCR_AUTOEN | MII_BMCR_FD | MII_BMCR_SPEED1000,
119     [MII_BMSR]      = MII_BMSR_100TX_FD | MII_BMSR_100TX_HD | MII_BMSR_10T_FD |
120                       MII_BMSR_10T_HD | MII_BMSR_EXTSTAT | MII_BMSR_AUTONEG |
121                       MII_BMSR_LINK_ST | MII_BMSR_EXTCAP,
122     [MII_PHYID1]    = 0x0362,
123     [MII_PHYID2]    = 0x5e6a,
124     [MII_ANAR]      = MII_ANAR_TXFD | MII_ANAR_TX | MII_ANAR_10FD |
125                       MII_ANAR_10 | MII_ANAR_CSMACD,
126     [MII_ANLPAR]    = MII_ANLPAR_ACK | MII_ANLPAR_PAUSE |
127                       MII_ANLPAR_TXFD | MII_ANLPAR_TX | MII_ANLPAR_10FD |
128                       MII_ANLPAR_10 | MII_ANLPAR_CSMACD,
129     [MII_ANER]      = 0x64 | MII_ANER_NWAY,
130     [MII_ANNP]      = 0x2001,
131     [MII_CTRL1000]  = MII_CTRL1000_FULL,
132     [MII_STAT1000]  = MII_STAT1000_FULL,
133     [MII_EXTSTAT]   = 0x3000, /* 1000BASTE_T full-duplex capable */
134 };
135 
npcm_gmac_soft_reset(NPCMGMACState * gmac)136 static void npcm_gmac_soft_reset(NPCMGMACState *gmac)
137 {
138     memcpy(gmac->regs, npcm_gmac_cold_reset_values,
139            NPCM_GMAC_NR_REGS * sizeof(uint32_t));
140     /* Clear reset bits */
141     gmac->regs[R_NPCM_DMA_BUS_MODE] &= ~NPCM_DMA_BUS_MODE_SWR;
142 }
143 
gmac_phy_set_link(NPCMGMACState * gmac,bool active)144 static void gmac_phy_set_link(NPCMGMACState *gmac, bool active)
145 {
146     /* Autonegotiation status mirrors link status.  */
147     if (active) {
148         gmac->phy_regs[0][MII_BMSR] |= (MII_BMSR_LINK_ST | MII_BMSR_AN_COMP);
149     } else {
150         gmac->phy_regs[0][MII_BMSR] &= ~(MII_BMSR_LINK_ST | MII_BMSR_AN_COMP);
151     }
152 }
153 
gmac_can_receive(NetClientState * nc)154 static bool gmac_can_receive(NetClientState *nc)
155 {
156     NPCMGMACState *gmac = NPCM_GMAC(qemu_get_nic_opaque(nc));
157 
158     /* If GMAC receive is disabled. */
159     if (!(gmac->regs[R_NPCM_GMAC_MAC_CONFIG] & NPCM_GMAC_MAC_CONFIG_RX_EN)) {
160         return false;
161     }
162 
163     /* If GMAC DMA RX is stopped. */
164     if (!(gmac->regs[R_NPCM_DMA_CONTROL] & NPCM_DMA_CONTROL_START_STOP_RX)) {
165         return false;
166     }
167     return true;
168 }
169 
170 /*
171  * Function that updates the GMAC IRQ
172  * It find the logical OR of the enabled bits for NIS (if enabled)
173  * It find the logical OR of the enabled bits for AIS (if enabled)
174  */
gmac_update_irq(NPCMGMACState * gmac)175 static void gmac_update_irq(NPCMGMACState *gmac)
176 {
177     /*
178      * Check if the normal interrupts summary is enabled
179      * if so, add the bits for the summary that are enabled
180      */
181     if (gmac->regs[R_NPCM_DMA_INTR_ENA] & gmac->regs[R_NPCM_DMA_STATUS] &
182         (NPCM_DMA_INTR_ENAB_NIE_BITS)) {
183         gmac->regs[R_NPCM_DMA_STATUS] |=  NPCM_DMA_STATUS_NIS;
184     }
185     /*
186      * Check if the abnormal interrupts summary is enabled
187      * if so, add the bits for the summary that are enabled
188      */
189     if (gmac->regs[R_NPCM_DMA_INTR_ENA] & gmac->regs[R_NPCM_DMA_STATUS] &
190         (NPCM_DMA_INTR_ENAB_AIE_BITS)) {
191         gmac->regs[R_NPCM_DMA_STATUS] |=  NPCM_DMA_STATUS_AIS;
192     }
193 
194     /* Get the logical OR of both normal and abnormal interrupts */
195     int level = !!((gmac->regs[R_NPCM_DMA_STATUS] &
196                     gmac->regs[R_NPCM_DMA_INTR_ENA] &
197                     NPCM_DMA_STATUS_NIS) |
198                    (gmac->regs[R_NPCM_DMA_STATUS] &
199                    gmac->regs[R_NPCM_DMA_INTR_ENA] &
200                    NPCM_DMA_STATUS_AIS));
201 
202     /* Set the IRQ */
203     trace_npcm_gmac_update_irq(DEVICE(gmac)->canonical_path,
204                                gmac->regs[R_NPCM_DMA_STATUS],
205                                gmac->regs[R_NPCM_DMA_INTR_ENA],
206                                level);
207     qemu_set_irq(gmac->irq, level);
208 }
209 
gmac_read_rx_desc(dma_addr_t addr,struct NPCMGMACRxDesc * desc)210 static int gmac_read_rx_desc(dma_addr_t addr, struct NPCMGMACRxDesc *desc)
211 {
212     if (dma_memory_read(&address_space_memory, addr, desc,
213                         sizeof(*desc), MEMTXATTRS_UNSPECIFIED)) {
214         qemu_log_mask(LOG_GUEST_ERROR, "%s: Failed to read descriptor @ 0x%"
215                       HWADDR_PRIx "\n", __func__, addr);
216         return -1;
217     }
218     desc->rdes0 = le32_to_cpu(desc->rdes0);
219     desc->rdes1 = le32_to_cpu(desc->rdes1);
220     desc->rdes2 = le32_to_cpu(desc->rdes2);
221     desc->rdes3 = le32_to_cpu(desc->rdes3);
222     return 0;
223 }
224 
gmac_write_rx_desc(dma_addr_t addr,struct NPCMGMACRxDesc * desc)225 static int gmac_write_rx_desc(dma_addr_t addr, struct NPCMGMACRxDesc *desc)
226 {
227     struct NPCMGMACRxDesc le_desc;
228     le_desc.rdes0 = cpu_to_le32(desc->rdes0);
229     le_desc.rdes1 = cpu_to_le32(desc->rdes1);
230     le_desc.rdes2 = cpu_to_le32(desc->rdes2);
231     le_desc.rdes3 = cpu_to_le32(desc->rdes3);
232     if (dma_memory_write(&address_space_memory, addr, &le_desc,
233                         sizeof(le_desc), MEMTXATTRS_UNSPECIFIED)) {
234         qemu_log_mask(LOG_GUEST_ERROR, "%s: Failed to write descriptor @ 0x%"
235                       HWADDR_PRIx "\n", __func__, addr);
236         return -1;
237     }
238     return 0;
239 }
240 
gmac_read_tx_desc(dma_addr_t addr,struct NPCMGMACTxDesc * desc)241 static int gmac_read_tx_desc(dma_addr_t addr, struct NPCMGMACTxDesc *desc)
242 {
243     if (dma_memory_read(&address_space_memory, addr, desc,
244                         sizeof(*desc), MEMTXATTRS_UNSPECIFIED)) {
245         qemu_log_mask(LOG_GUEST_ERROR, "%s: Failed to read descriptor @ 0x%"
246                       HWADDR_PRIx "\n", __func__, addr);
247         return -1;
248     }
249     desc->tdes0 = le32_to_cpu(desc->tdes0);
250     desc->tdes1 = le32_to_cpu(desc->tdes1);
251     desc->tdes2 = le32_to_cpu(desc->tdes2);
252     desc->tdes3 = le32_to_cpu(desc->tdes3);
253     return 0;
254 }
255 
gmac_write_tx_desc(dma_addr_t addr,struct NPCMGMACTxDesc * desc)256 static int gmac_write_tx_desc(dma_addr_t addr, struct NPCMGMACTxDesc *desc)
257 {
258     struct NPCMGMACTxDesc le_desc;
259     le_desc.tdes0 = cpu_to_le32(desc->tdes0);
260     le_desc.tdes1 = cpu_to_le32(desc->tdes1);
261     le_desc.tdes2 = cpu_to_le32(desc->tdes2);
262     le_desc.tdes3 = cpu_to_le32(desc->tdes3);
263     if (dma_memory_write(&address_space_memory, addr, &le_desc,
264                         sizeof(le_desc), MEMTXATTRS_UNSPECIFIED)) {
265         qemu_log_mask(LOG_GUEST_ERROR, "%s: Failed to write descriptor @ 0x%"
266                       HWADDR_PRIx "\n", __func__, addr);
267         return -1;
268     }
269     return 0;
270 }
271 
gmac_rx_transfer_frame_to_buffer(uint32_t rx_buf_len,uint32_t * left_frame,uint32_t rx_buf_addr,bool * eof_transferred,const uint8_t ** frame_ptr,uint16_t * transferred)272 static int gmac_rx_transfer_frame_to_buffer(uint32_t rx_buf_len,
273                                             uint32_t *left_frame,
274                                             uint32_t rx_buf_addr,
275                                             bool *eof_transferred,
276                                             const uint8_t **frame_ptr,
277                                             uint16_t *transferred)
278 {
279     uint32_t to_transfer;
280     /*
281      * Check that buffer is bigger than the frame being transfered
282      * If bigger then transfer only whats left of frame
283      * Else, fill frame with all the content possible
284      */
285     if (rx_buf_len >= *left_frame) {
286         to_transfer = *left_frame;
287         *eof_transferred = true;
288     } else {
289         to_transfer = rx_buf_len;
290     }
291 
292     /* write frame part to memory */
293     if (dma_memory_write(&address_space_memory, (uint64_t) rx_buf_addr,
294                          *frame_ptr, to_transfer, MEMTXATTRS_UNSPECIFIED)) {
295         return -1;
296     }
297 
298     /* update frame pointer and size of whats left of frame */
299     *frame_ptr += to_transfer;
300     *left_frame -= to_transfer;
301     *transferred += to_transfer;
302 
303     return 0;
304 }
305 
gmac_dma_set_state(NPCMGMACState * gmac,int shift,uint32_t state)306 static void gmac_dma_set_state(NPCMGMACState *gmac, int shift, uint32_t state)
307 {
308     gmac->regs[R_NPCM_DMA_STATUS] = deposit32(gmac->regs[R_NPCM_DMA_STATUS],
309         shift, 3, state);
310 }
311 
gmac_receive(NetClientState * nc,const uint8_t * buf,size_t len)312 static ssize_t gmac_receive(NetClientState *nc, const uint8_t *buf, size_t len)
313 {
314     /*
315      * Comments have steps that relate to the
316      * receiving process steps in pg 386
317      */
318     NPCMGMACState *gmac = NPCM_GMAC(qemu_get_nic_opaque(nc));
319     uint32_t left_frame = len;
320     const uint8_t *frame_ptr = buf;
321     uint32_t desc_addr;
322     uint32_t rx_buf_len, rx_buf_addr;
323     struct NPCMGMACRxDesc rx_desc;
324     uint16_t transferred = 0;
325     bool eof_transferred = false;
326 
327     trace_npcm_gmac_packet_receive(DEVICE(gmac)->canonical_path, len);
328     if (!gmac_can_receive(nc)) {
329         qemu_log_mask(LOG_GUEST_ERROR, "GMAC Currently is not able for Rx");
330         return -1;
331     }
332     if (!gmac->regs[R_NPCM_DMA_HOST_RX_DESC]) {
333         gmac->regs[R_NPCM_DMA_HOST_RX_DESC] =
334             NPCM_DMA_HOST_RX_DESC_MASK(gmac->regs[R_NPCM_DMA_RX_BASE_ADDR]);
335     }
336     desc_addr = NPCM_DMA_HOST_RX_DESC_MASK(gmac->regs[R_NPCM_DMA_HOST_RX_DESC]);
337 
338     /* step 1 */
339     gmac_dma_set_state(gmac, NPCM_DMA_STATUS_RX_PROCESS_STATE_SHIFT,
340         NPCM_DMA_STATUS_RX_RUNNING_FETCHING_STATE);
341     trace_npcm_gmac_packet_desc_read(DEVICE(gmac)->canonical_path, desc_addr);
342     if (gmac_read_rx_desc(desc_addr, &rx_desc)) {
343         qemu_log_mask(LOG_GUEST_ERROR, "RX Descriptor @ 0x%x cant be read\n",
344                       desc_addr);
345         gmac_dma_set_state(gmac, NPCM_DMA_STATUS_RX_PROCESS_STATE_SHIFT,
346             NPCM_DMA_STATUS_RX_SUSPENDED_STATE);
347         return -1;
348     }
349 
350     /* step 2 */
351     if (!(rx_desc.rdes0 & RX_DESC_RDES0_OWN)) {
352         qemu_log_mask(LOG_GUEST_ERROR,
353                       "RX Descriptor @ 0x%x is owned by software\n",
354                       desc_addr);
355         gmac->regs[R_NPCM_DMA_STATUS] |= NPCM_DMA_STATUS_RU;
356         gmac->regs[R_NPCM_DMA_STATUS] |= NPCM_DMA_STATUS_RI;
357         gmac_dma_set_state(gmac, NPCM_DMA_STATUS_RX_PROCESS_STATE_SHIFT,
358             NPCM_DMA_STATUS_RX_SUSPENDED_STATE);
359         gmac_update_irq(gmac);
360         return len;
361     }
362     /* step 3 */
363     /*
364      * TODO --
365      * Implement all frame filtering and processing (with its own interrupts)
366      */
367     trace_npcm_gmac_debug_desc_data(DEVICE(gmac)->canonical_path, &rx_desc,
368                                     rx_desc.rdes0, rx_desc.rdes1, rx_desc.rdes2,
369                                     rx_desc.rdes3);
370     /* Clear rdes0 for the incoming descriptor and set FS in first descriptor.*/
371     rx_desc.rdes0 = RX_DESC_RDES0_FIRST_DESC_MASK;
372 
373     gmac_dma_set_state(gmac, NPCM_DMA_STATUS_RX_PROCESS_STATE_SHIFT,
374         NPCM_DMA_STATUS_RX_RUNNING_TRANSFERRING_STATE);
375 
376     /* Pad the frame with FCS as the kernel driver will strip it away. */
377     left_frame += ETH_FCS_LEN;
378 
379     /* repeat while we still have frame to transfer to memory */
380     while (!eof_transferred) {
381         /* Return descriptor no matter what happens */
382         rx_desc.rdes0 &= ~RX_DESC_RDES0_OWN;
383         /* Set the frame to be an IPv4/IPv6 frame. */
384         rx_desc.rdes0 |= RX_DESC_RDES0_FRM_TYPE_MASK;
385 
386         /* step 4 */
387         rx_buf_len = RX_DESC_RDES1_BFFR1_SZ_MASK(rx_desc.rdes1);
388         rx_buf_addr = rx_desc.rdes2;
389         gmac->regs[R_NPCM_DMA_CUR_RX_BUF_ADDR] = rx_buf_addr;
390         gmac_rx_transfer_frame_to_buffer(rx_buf_len, &left_frame, rx_buf_addr,
391                                          &eof_transferred, &frame_ptr,
392                                          &transferred);
393 
394         trace_npcm_gmac_packet_receiving_buffer(DEVICE(gmac)->canonical_path,
395                                                 rx_buf_len, rx_buf_addr);
396         /* if we still have frame left and the second buffer is not chained */
397          if (!(rx_desc.rdes1 & RX_DESC_RDES1_SEC_ADDR_CHND_MASK) && \
398               !eof_transferred) {
399             /* repeat process from above on buffer 2 */
400             rx_buf_len = RX_DESC_RDES1_BFFR2_SZ_MASK(rx_desc.rdes1);
401             rx_buf_addr = rx_desc.rdes3;
402             gmac->regs[R_NPCM_DMA_CUR_RX_BUF_ADDR] = rx_buf_addr;
403             gmac_rx_transfer_frame_to_buffer(rx_buf_len, &left_frame,
404                                              rx_buf_addr, &eof_transferred,
405                                              &frame_ptr, &transferred);
406             trace_npcm_gmac_packet_receiving_buffer( \
407                                                 DEVICE(gmac)->canonical_path,
408                                                 rx_buf_len, rx_buf_addr);
409         }
410         /* update address for descriptor */
411         gmac->regs[R_NPCM_DMA_HOST_RX_DESC] = rx_buf_addr;
412         /* Return descriptor */
413         rx_desc.rdes0 &= ~RX_DESC_RDES0_OWN;
414         /* Update frame length transferred */
415         rx_desc.rdes0 |= ((uint32_t)transferred)
416             << RX_DESC_RDES0_FRAME_LEN_SHIFT;
417         trace_npcm_gmac_debug_desc_data(DEVICE(gmac)->canonical_path, &rx_desc,
418                                         rx_desc.rdes0, rx_desc.rdes1,
419                                         rx_desc.rdes2, rx_desc.rdes3);
420 
421         /* step 5 */
422         gmac_write_rx_desc(desc_addr, &rx_desc);
423         trace_npcm_gmac_debug_desc_data(DEVICE(gmac)->canonical_path,
424                                         &rx_desc, rx_desc.rdes0,
425                                         rx_desc.rdes1, rx_desc.rdes2,
426                                         rx_desc.rdes3);
427         /* read new descriptor into rx_desc if needed*/
428         if (!eof_transferred) {
429             /* Get next descriptor address (chained or sequential) */
430             if (rx_desc.rdes1 & RX_DESC_RDES1_RC_END_RING_MASK) {
431                 desc_addr = gmac->regs[R_NPCM_DMA_RX_BASE_ADDR];
432             } else if (rx_desc.rdes1 & RX_DESC_RDES1_SEC_ADDR_CHND_MASK) {
433                 desc_addr = rx_desc.rdes3;
434             } else {
435                 desc_addr += sizeof(rx_desc);
436             }
437             trace_npcm_gmac_packet_desc_read(DEVICE(gmac)->canonical_path,
438                                              desc_addr);
439             if (gmac_read_rx_desc(desc_addr, &rx_desc)) {
440                 qemu_log_mask(LOG_GUEST_ERROR,
441                               "RX Descriptor @ 0x%x cant be read\n",
442                               desc_addr);
443                 gmac->regs[R_NPCM_DMA_STATUS] |= NPCM_DMA_STATUS_RU;
444                 gmac_update_irq(gmac);
445                 return len;
446             }
447 
448             /* step 6 */
449             if (!(rx_desc.rdes0 & RX_DESC_RDES0_OWN)) {
450                 if (!(gmac->regs[R_NPCM_DMA_CONTROL] & \
451                      NPCM_DMA_CONTROL_FLUSH_MASK)) {
452                     rx_desc.rdes0 |= RX_DESC_RDES0_DESC_ERR_MASK;
453                 }
454                 eof_transferred = true;
455             }
456             /* Clear rdes0 for the incoming descriptor */
457             rx_desc.rdes0 = 0;
458         }
459     }
460     gmac_dma_set_state(gmac, NPCM_DMA_STATUS_RX_PROCESS_STATE_SHIFT,
461         NPCM_DMA_STATUS_RX_RUNNING_CLOSING_STATE);
462 
463     rx_desc.rdes0 |= RX_DESC_RDES0_LAST_DESC_MASK;
464     if (!(rx_desc.rdes1 & RX_DESC_RDES1_DIS_INTR_COMP_MASK)) {
465         gmac->regs[R_NPCM_DMA_STATUS] |= NPCM_DMA_STATUS_RI;
466         gmac_update_irq(gmac);
467     }
468     trace_npcm_gmac_debug_desc_data(DEVICE(gmac)->canonical_path, &rx_desc,
469                                     rx_desc.rdes0, rx_desc.rdes1, rx_desc.rdes2,
470                                     rx_desc.rdes3);
471 
472     /* step 8 */
473     gmac->regs[R_NPCM_DMA_CONTROL] |= NPCM_DMA_CONTROL_FLUSH_MASK;
474 
475     /* step 9 */
476     trace_npcm_gmac_packet_received(DEVICE(gmac)->canonical_path, left_frame);
477     gmac_dma_set_state(gmac, NPCM_DMA_STATUS_RX_PROCESS_STATE_SHIFT,
478         NPCM_DMA_STATUS_RX_RUNNING_WAITING_STATE);
479     gmac_write_rx_desc(desc_addr, &rx_desc);
480 
481     /* Get next descriptor address (chained or sequential) */
482     if (rx_desc.rdes1 & RX_DESC_RDES1_RC_END_RING_MASK) {
483         desc_addr = gmac->regs[R_NPCM_DMA_RX_BASE_ADDR];
484     } else if (rx_desc.rdes1 & RX_DESC_RDES1_SEC_ADDR_CHND_MASK) {
485         desc_addr = rx_desc.rdes3;
486     } else {
487         desc_addr += sizeof(rx_desc);
488     }
489     gmac->regs[R_NPCM_DMA_HOST_RX_DESC] = desc_addr;
490     return len;
491 }
492 
gmac_tx_get_csum(uint32_t tdes1)493 static int gmac_tx_get_csum(uint32_t tdes1)
494 {
495     uint32_t mask = TX_DESC_TDES1_CHKSM_INS_CTRL_MASK(tdes1);
496     int csum = 0;
497 
498     if (likely(mask > 0)) {
499         csum |= CSUM_IP;
500     }
501     if (likely(mask > 1)) {
502         csum |= CSUM_TCP | CSUM_UDP;
503     }
504 
505     return csum;
506 }
507 
gmac_try_send_next_packet(NPCMGMACState * gmac)508 static void gmac_try_send_next_packet(NPCMGMACState *gmac)
509 {
510     /*
511      * Comments about steps refer to steps for
512      * transmitting in page 384 of datasheet
513      */
514     uint16_t tx_buffer_size = 2048;
515     g_autofree uint8_t *tx_send_buffer = g_malloc(tx_buffer_size);
516     uint32_t desc_addr;
517     struct NPCMGMACTxDesc tx_desc;
518     uint32_t tx_buf_addr, tx_buf_len;
519     uint32_t prev_buf_size = 0;
520     int csum = 0;
521 
522     /* steps 1&2 */
523     if (!gmac->regs[R_NPCM_DMA_HOST_TX_DESC]) {
524         gmac->regs[R_NPCM_DMA_HOST_TX_DESC] =
525             NPCM_DMA_HOST_TX_DESC_MASK(gmac->regs[R_NPCM_DMA_TX_BASE_ADDR]);
526     }
527     desc_addr = gmac->regs[R_NPCM_DMA_HOST_TX_DESC];
528 
529     while (true) {
530         gmac_dma_set_state(gmac, NPCM_DMA_STATUS_TX_PROCESS_STATE_SHIFT,
531             NPCM_DMA_STATUS_TX_RUNNING_FETCHING_STATE);
532         if (gmac_read_tx_desc(desc_addr, &tx_desc)) {
533             qemu_log_mask(LOG_GUEST_ERROR,
534                           "TX Descriptor @ 0x%x can't be read\n",
535                           desc_addr);
536             return;
537         }
538         /* step 3 */
539 
540         trace_npcm_gmac_packet_desc_read(DEVICE(gmac)->canonical_path,
541             desc_addr);
542         trace_npcm_gmac_debug_desc_data(DEVICE(gmac)->canonical_path, &tx_desc,
543             tx_desc.tdes0, tx_desc.tdes1, tx_desc.tdes2, tx_desc.tdes3);
544 
545         /* 1 = DMA Owned, 0 = Software Owned */
546         if (!(tx_desc.tdes0 & TX_DESC_TDES0_OWN)) {
547             trace_npcm_gmac_tx_desc_owner(DEVICE(gmac)->canonical_path,
548                                           desc_addr);
549             gmac->regs[R_NPCM_DMA_STATUS] |= NPCM_DMA_STATUS_TU;
550             gmac_dma_set_state(gmac, NPCM_DMA_STATUS_TX_PROCESS_STATE_SHIFT,
551                 NPCM_DMA_STATUS_TX_SUSPENDED_STATE);
552             gmac_update_irq(gmac);
553             return;
554         }
555 
556         gmac_dma_set_state(gmac, NPCM_DMA_STATUS_TX_PROCESS_STATE_SHIFT,
557             NPCM_DMA_STATUS_TX_RUNNING_READ_STATE);
558         /* Give the descriptor back regardless of what happens. */
559         tx_desc.tdes0 &= ~TX_DESC_TDES0_OWN;
560 
561         if (tx_desc.tdes1 & TX_DESC_TDES1_FIRST_SEG_MASK) {
562             csum = gmac_tx_get_csum(tx_desc.tdes1);
563         }
564 
565         /* step 4 */
566         tx_buf_addr = tx_desc.tdes2;
567         gmac->regs[R_NPCM_DMA_CUR_TX_BUF_ADDR] = tx_buf_addr;
568         tx_buf_len = TX_DESC_TDES1_BFFR1_SZ_MASK(tx_desc.tdes1);
569 
570         if ((prev_buf_size + tx_buf_len) > tx_buffer_size) {
571             tx_buffer_size = prev_buf_size + tx_buf_len;
572             tx_send_buffer = g_realloc(tx_send_buffer, tx_buffer_size);
573         }
574 
575         /* step 5 */
576         if (dma_memory_read(&address_space_memory, tx_buf_addr,
577                             tx_send_buffer + prev_buf_size,
578                             tx_buf_len, MEMTXATTRS_UNSPECIFIED)) {
579             qemu_log_mask(LOG_GUEST_ERROR, "%s: Failed to read packet @ 0x%x\n",
580                         __func__, tx_buf_addr);
581             return;
582         }
583         prev_buf_size += tx_buf_len;
584 
585         /* If not chained we'll have a second buffer. */
586         if (!(tx_desc.tdes1 & TX_DESC_TDES1_SEC_ADDR_CHND_MASK)) {
587             tx_buf_addr = tx_desc.tdes3;
588             gmac->regs[R_NPCM_DMA_CUR_TX_BUF_ADDR] = tx_buf_addr;
589             tx_buf_len = TX_DESC_TDES1_BFFR2_SZ_MASK(tx_desc.tdes1);
590 
591             if ((prev_buf_size + tx_buf_len) > tx_buffer_size) {
592                 tx_buffer_size = prev_buf_size + tx_buf_len;
593                 tx_send_buffer = g_realloc(tx_send_buffer, tx_buffer_size);
594             }
595 
596             if (dma_memory_read(&address_space_memory, tx_buf_addr,
597                                 tx_send_buffer + prev_buf_size,
598                                 tx_buf_len, MEMTXATTRS_UNSPECIFIED)) {
599                 qemu_log_mask(LOG_GUEST_ERROR,
600                               "%s: Failed to read packet @ 0x%x\n",
601                               __func__, tx_buf_addr);
602                 return;
603             }
604             prev_buf_size += tx_buf_len;
605         }
606         if (tx_desc.tdes1 & TX_DESC_TDES1_LAST_SEG_MASK) {
607             /*
608              * This will truncate the packet at 64K.
609              * TODO: find out if this is the correct behaviour.
610              */
611             uint16_t length = prev_buf_size;
612             net_checksum_calculate(tx_send_buffer, length, csum);
613             qemu_send_packet(qemu_get_queue(gmac->nic), tx_send_buffer, length);
614             trace_npcm_gmac_packet_sent(DEVICE(gmac)->canonical_path, length);
615             prev_buf_size = 0;
616         }
617 
618         /* step 6 */
619         gmac_dma_set_state(gmac, NPCM_DMA_STATUS_TX_PROCESS_STATE_SHIFT,
620             NPCM_DMA_STATUS_TX_RUNNING_CLOSING_STATE);
621         gmac_write_tx_desc(desc_addr, &tx_desc);
622         if (tx_desc.tdes1 & TX_DESC_TDES1_TX_END_RING_MASK) {
623             desc_addr = gmac->regs[R_NPCM_DMA_TX_BASE_ADDR];
624         } else if (tx_desc.tdes1 & TX_DESC_TDES1_SEC_ADDR_CHND_MASK) {
625             desc_addr = tx_desc.tdes3;
626         } else {
627             desc_addr += sizeof(tx_desc);
628         }
629         gmac->regs[R_NPCM_DMA_HOST_TX_DESC] = desc_addr;
630 
631         /* step 7 */
632         if (tx_desc.tdes1 & TX_DESC_TDES1_INTERR_COMP_MASK) {
633             gmac->regs[R_NPCM_DMA_STATUS] |= NPCM_DMA_STATUS_TI;
634             gmac_update_irq(gmac);
635         }
636     }
637 }
638 
gmac_cleanup(NetClientState * nc)639 static void gmac_cleanup(NetClientState *nc)
640 {
641     /* Nothing to do yet. */
642 }
643 
gmac_set_link(NetClientState * nc)644 static void gmac_set_link(NetClientState *nc)
645 {
646     NPCMGMACState *gmac = qemu_get_nic_opaque(nc);
647 
648     trace_npcm_gmac_set_link(!nc->link_down);
649     gmac_phy_set_link(gmac, !nc->link_down);
650 }
651 
npcm_gmac_mdio_access(NPCMGMACState * gmac,uint16_t v)652 static void npcm_gmac_mdio_access(NPCMGMACState *gmac, uint16_t v)
653 {
654     bool busy = v & NPCM_GMAC_MII_ADDR_BUSY;
655     uint8_t is_write;
656     uint8_t pa, gr;
657     uint16_t data;
658 
659     if (busy) {
660         is_write = v & NPCM_GMAC_MII_ADDR_WRITE;
661         pa = NPCM_GMAC_MII_ADDR_PA(v);
662         gr = NPCM_GMAC_MII_ADDR_GR(v);
663         /* Both pa and gr are 5 bits, so they are less than 32. */
664         g_assert(pa < NPCM_GMAC_MAX_PHYS);
665         g_assert(gr < NPCM_GMAC_MAX_PHY_REGS);
666 
667 
668         if (v & NPCM_GMAC_MII_ADDR_WRITE) {
669             data = gmac->regs[R_NPCM_GMAC_MII_DATA];
670             /* Clear reset bit for BMCR register */
671             switch (gr) {
672             case MII_BMCR:
673                 data &= ~MII_BMCR_RESET;
674                 /* Autonegotiation is a W1C bit*/
675                 if (data & MII_BMCR_ANRESTART) {
676                     /* Tells autonegotiation to not restart again */
677                     data &= ~MII_BMCR_ANRESTART;
678                 }
679                 if ((data & MII_BMCR_AUTOEN) &&
680                     !(gmac->phy_regs[pa][MII_BMSR] & MII_BMSR_AN_COMP)) {
681                     /* sets autonegotiation as complete */
682                     gmac->phy_regs[pa][MII_BMSR] |= MII_BMSR_AN_COMP;
683                     /* Resolve AN automatically->need to set this */
684                     gmac->phy_regs[0][MII_ANLPAR] = 0x0000;
685                 }
686             }
687             gmac->phy_regs[pa][gr] = data;
688         } else {
689             data = gmac->phy_regs[pa][gr];
690             gmac->regs[R_NPCM_GMAC_MII_DATA] = data;
691         }
692         trace_npcm_gmac_mdio_access(DEVICE(gmac)->canonical_path, is_write, pa,
693                                         gr, data);
694     }
695     gmac->regs[R_NPCM_GMAC_MII_ADDR] = v & ~NPCM_GMAC_MII_ADDR_BUSY;
696 }
697 
npcm_gmac_read(void * opaque,hwaddr offset,unsigned size)698 static uint64_t npcm_gmac_read(void *opaque, hwaddr offset, unsigned size)
699 {
700     NPCMGMACState *gmac = opaque;
701     uint32_t v = 0;
702 
703     switch (offset) {
704     /* Write only registers */
705     case A_NPCM_DMA_XMT_POLL_DEMAND:
706     case A_NPCM_DMA_RCV_POLL_DEMAND:
707         qemu_log_mask(LOG_GUEST_ERROR,
708                       "%s: Read of write-only reg: offset: 0x%04" HWADDR_PRIx
709                       "\n", DEVICE(gmac)->canonical_path, offset);
710         break;
711 
712     default:
713         v = gmac->regs[offset / sizeof(uint32_t)];
714     }
715 
716     trace_npcm_gmac_reg_read(DEVICE(gmac)->canonical_path, offset, v);
717     return v;
718 }
719 
npcm_gmac_write(void * opaque,hwaddr offset,uint64_t v,unsigned size)720 static void npcm_gmac_write(void *opaque, hwaddr offset,
721                               uint64_t v, unsigned size)
722 {
723     NPCMGMACState *gmac = opaque;
724 
725     trace_npcm_gmac_reg_write(DEVICE(gmac)->canonical_path, offset, v);
726 
727     switch (offset) {
728     /* Read only registers */
729     case A_NPCM_GMAC_VERSION:
730     case A_NPCM_GMAC_INT_STATUS:
731     case A_NPCM_GMAC_RGMII_STATUS:
732     case A_NPCM_GMAC_PTP_STSR:
733     case A_NPCM_GMAC_PTP_STNSR:
734     case A_NPCM_DMA_MISSED_FRAME_CTR:
735     case A_NPCM_DMA_HOST_TX_DESC:
736     case A_NPCM_DMA_HOST_RX_DESC:
737     case A_NPCM_DMA_CUR_TX_BUF_ADDR:
738     case A_NPCM_DMA_CUR_RX_BUF_ADDR:
739     case A_NPCM_DMA_HW_FEATURE:
740         qemu_log_mask(LOG_GUEST_ERROR,
741                       "%s: Write of read-only reg: offset: 0x%04" HWADDR_PRIx
742                       ", value: 0x%04" PRIx64 "\n",
743                       DEVICE(gmac)->canonical_path, offset, v);
744         break;
745 
746     case A_NPCM_GMAC_MAC_CONFIG:
747         gmac->regs[offset / sizeof(uint32_t)] = v;
748         break;
749 
750     case A_NPCM_GMAC_MII_ADDR:
751         npcm_gmac_mdio_access(gmac, v);
752         break;
753 
754     case A_NPCM_GMAC_MAC0_ADDR_HI:
755         gmac->regs[offset / sizeof(uint32_t)] = v;
756         gmac->conf.macaddr.a[0] = v >> 8;
757         gmac->conf.macaddr.a[1] = v >> 0;
758         break;
759 
760     case A_NPCM_GMAC_MAC0_ADDR_LO:
761         gmac->regs[offset / sizeof(uint32_t)] = v;
762         gmac->conf.macaddr.a[2] = v >> 24;
763         gmac->conf.macaddr.a[3] = v >> 16;
764         gmac->conf.macaddr.a[4] = v >> 8;
765         gmac->conf.macaddr.a[5] = v >> 0;
766         break;
767 
768     case A_NPCM_GMAC_MAC1_ADDR_HI:
769     case A_NPCM_GMAC_MAC1_ADDR_LO:
770     case A_NPCM_GMAC_MAC2_ADDR_HI:
771     case A_NPCM_GMAC_MAC2_ADDR_LO:
772     case A_NPCM_GMAC_MAC3_ADDR_HI:
773     case A_NPCM_GMAC_MAC3_ADDR_LO:
774         gmac->regs[offset / sizeof(uint32_t)] = v;
775         qemu_log_mask(LOG_UNIMP,
776                       "%s: Only MAC Address 0 is supported. This request "
777                       "is ignored.\n", DEVICE(gmac)->canonical_path);
778         break;
779 
780     case A_NPCM_DMA_BUS_MODE:
781         gmac->regs[offset / sizeof(uint32_t)] = v;
782         if (v & NPCM_DMA_BUS_MODE_SWR) {
783             npcm_gmac_soft_reset(gmac);
784         }
785         break;
786 
787     case A_NPCM_DMA_RCV_POLL_DEMAND:
788         /* We dont actually care about the value */
789         gmac_dma_set_state(gmac, NPCM_DMA_STATUS_RX_PROCESS_STATE_SHIFT,
790             NPCM_DMA_STATUS_RX_RUNNING_WAITING_STATE);
791         break;
792 
793     case A_NPCM_DMA_XMT_POLL_DEMAND:
794         /* We dont actually care about the value */
795         gmac_try_send_next_packet(gmac);
796         break;
797 
798     case A_NPCM_DMA_CONTROL:
799         gmac->regs[offset / sizeof(uint32_t)] = v;
800         if (v & NPCM_DMA_CONTROL_START_STOP_TX) {
801             gmac_try_send_next_packet(gmac);
802         } else {
803             gmac_dma_set_state(gmac, NPCM_DMA_STATUS_TX_PROCESS_STATE_SHIFT,
804                 NPCM_DMA_STATUS_TX_STOPPED_STATE);
805         }
806         if (v & NPCM_DMA_CONTROL_START_STOP_RX) {
807             gmac_dma_set_state(gmac, NPCM_DMA_STATUS_RX_PROCESS_STATE_SHIFT,
808                 NPCM_DMA_STATUS_RX_RUNNING_WAITING_STATE);
809             qemu_flush_queued_packets(qemu_get_queue(gmac->nic));
810         } else {
811             gmac_dma_set_state(gmac, NPCM_DMA_STATUS_RX_PROCESS_STATE_SHIFT,
812                 NPCM_DMA_STATUS_RX_STOPPED_STATE);
813         }
814         break;
815 
816     case A_NPCM_DMA_STATUS:
817         /* Check that RO bits are not written to */
818         if (NPCM_DMA_STATUS_RO_MASK(v)) {
819             qemu_log_mask(LOG_GUEST_ERROR,
820                           "%s: Write of read-only bits of reg: offset: 0x%04"
821                            HWADDR_PRIx ", value: 0x%04" PRIx64 "\n",
822                            DEVICE(gmac)->canonical_path, offset, v);
823         }
824         /* for W1C bits, implement W1C */
825         gmac->regs[offset / sizeof(uint32_t)] &= ~NPCM_DMA_STATUS_W1C_MASK(v);
826         if (v & NPCM_DMA_STATUS_RU) {
827             /* Clearing RU bit indicates descriptor is owned by DMA again. */
828             gmac_dma_set_state(gmac, NPCM_DMA_STATUS_RX_PROCESS_STATE_SHIFT,
829                 NPCM_DMA_STATUS_RX_RUNNING_WAITING_STATE);
830             qemu_flush_queued_packets(qemu_get_queue(gmac->nic));
831         }
832         break;
833 
834     default:
835         gmac->regs[offset / sizeof(uint32_t)] = v;
836         break;
837     }
838 
839     gmac_update_irq(gmac);
840 }
841 
npcm_gmac_reset(DeviceState * dev)842 static void npcm_gmac_reset(DeviceState *dev)
843 {
844     NPCMGMACState *gmac = NPCM_GMAC(dev);
845 
846     npcm_gmac_soft_reset(gmac);
847     memcpy(gmac->phy_regs[0], phy_reg_init, sizeof(phy_reg_init));
848 
849     trace_npcm_gmac_reset(DEVICE(gmac)->canonical_path,
850                           gmac->phy_regs[0][MII_BMSR]);
851 }
852 
853 static NetClientInfo net_npcm_gmac_info = {
854     .type = NET_CLIENT_DRIVER_NIC,
855     .size = sizeof(NICState),
856     .can_receive = gmac_can_receive,
857     .receive = gmac_receive,
858     .cleanup = gmac_cleanup,
859     .link_status_changed = gmac_set_link,
860 };
861 
862 static const struct MemoryRegionOps npcm_gmac_ops = {
863     .read = npcm_gmac_read,
864     .write = npcm_gmac_write,
865     .endianness = DEVICE_LITTLE_ENDIAN,
866     .valid = {
867         .min_access_size = 4,
868         .max_access_size = 4,
869         .unaligned = false,
870     },
871 };
872 
npcm_gmac_realize(DeviceState * dev,Error ** errp)873 static void npcm_gmac_realize(DeviceState *dev, Error **errp)
874 {
875     NPCMGMACState *gmac = NPCM_GMAC(dev);
876     SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
877 
878     memory_region_init_io(&gmac->iomem, OBJECT(gmac), &npcm_gmac_ops, gmac,
879                           TYPE_NPCM_GMAC, 8 * KiB);
880     sysbus_init_mmio(sbd, &gmac->iomem);
881     sysbus_init_irq(sbd, &gmac->irq);
882 
883     qemu_macaddr_default_if_unset(&gmac->conf.macaddr);
884 
885     gmac->nic = qemu_new_nic(&net_npcm_gmac_info, &gmac->conf, TYPE_NPCM_GMAC,
886                              dev->id, &dev->mem_reentrancy_guard, gmac);
887     qemu_format_nic_info_str(qemu_get_queue(gmac->nic), gmac->conf.macaddr.a);
888     gmac->regs[R_NPCM_GMAC_MAC0_ADDR_HI] = (gmac->conf.macaddr.a[0] << 8) + \
889                                             gmac->conf.macaddr.a[1];
890     gmac->regs[R_NPCM_GMAC_MAC0_ADDR_LO] = (gmac->conf.macaddr.a[2] << 24) + \
891                                            (gmac->conf.macaddr.a[3] << 16) + \
892                                            (gmac->conf.macaddr.a[4] << 8) + \
893                                             gmac->conf.macaddr.a[5];
894 }
895 
npcm_gmac_unrealize(DeviceState * dev)896 static void npcm_gmac_unrealize(DeviceState *dev)
897 {
898     NPCMGMACState *gmac = NPCM_GMAC(dev);
899 
900     qemu_del_nic(gmac->nic);
901 }
902 
903 static const VMStateDescription vmstate_npcm_gmac = {
904     .name = TYPE_NPCM_GMAC,
905     .version_id = 0,
906     .minimum_version_id = 0,
907     .fields = (VMStateField[]) {
908         VMSTATE_UINT32_ARRAY(regs, NPCMGMACState, NPCM_GMAC_NR_REGS),
909         VMSTATE_END_OF_LIST(),
910     },
911 };
912 
913 static const Property npcm_gmac_properties[] = {
914     DEFINE_NIC_PROPERTIES(NPCMGMACState, conf),
915 };
916 
npcm_gmac_class_init(ObjectClass * klass,const void * data)917 static void npcm_gmac_class_init(ObjectClass *klass, const void *data)
918 {
919     DeviceClass *dc = DEVICE_CLASS(klass);
920 
921     set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
922     dc->desc = "NPCM GMAC Controller";
923     dc->realize = npcm_gmac_realize;
924     dc->unrealize = npcm_gmac_unrealize;
925     device_class_set_legacy_reset(dc, npcm_gmac_reset);
926     dc->vmsd = &vmstate_npcm_gmac;
927     device_class_set_props(dc, npcm_gmac_properties);
928 }
929 
930 static const TypeInfo npcm_gmac_types[] = {
931     {
932         .name = TYPE_NPCM_GMAC,
933         .parent = TYPE_SYS_BUS_DEVICE,
934         .instance_size = sizeof(NPCMGMACState),
935         .class_init = npcm_gmac_class_init,
936     },
937 };
938 DEFINE_TYPES(npcm_gmac_types)
939