1 /*
2 * Nuvoton NPCM7xx EMC Module
3 *
4 * Copyright 2020 Google LLC
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * for more details.
15 *
16 * Unsupported/unimplemented features:
17 * - MCMDR.FDUP (full duplex) is ignored, half duplex is not supported
18 * - Only CAM0 is supported, CAM[1-15] are not
19 * - writes to CAMEN.[1-15] are ignored, these bits always read as zeroes
20 * - MII is not implemented, MIIDA.BUSY and MIID always return zero
21 * - MCMDR.LBK is not implemented
22 * - MCMDR.{OPMOD,ENSQE,AEP,ARP} are not supported
23 * - H/W FIFOs are not supported, MCMDR.FFTCR is ignored
24 * - MGSTA.SQE is not supported
25 * - pause and control frames are not implemented
26 * - MGSTA.CCNT is not supported
27 * - MPCNT, DMARFS are not implemented
28 */
29
30 #include "qemu/osdep.h"
31
32 /* For crc32 */
33 #include <zlib.h>
34
35 #include "hw/irq.h"
36 #include "hw/qdev-clock.h"
37 #include "hw/qdev-properties.h"
38 #include "hw/net/npcm7xx_emc.h"
39 #include "net/eth.h"
40 #include "migration/vmstate.h"
41 #include "qemu/bitops.h"
42 #include "qemu/error-report.h"
43 #include "qemu/log.h"
44 #include "qemu/module.h"
45 #include "qemu/units.h"
46 #include "sysemu/dma.h"
47 #include "trace.h"
48
49 #define CRC_LENGTH 4
50
51 /*
52 * The maximum size of a (layer 2) ethernet frame as defined by 802.3.
53 * 1518 = 6(dest macaddr) + 6(src macaddr) + 2(proto) + 4(crc) + 1500(payload)
54 * This does not include an additional 4 for the vlan field (802.1q).
55 */
56 #define MAX_ETH_FRAME_SIZE 1518
57
emc_reg_name(int regno)58 static const char *emc_reg_name(int regno)
59 {
60 #define REG(name) case REG_ ## name: return #name;
61 switch (regno) {
62 REG(CAMCMR)
63 REG(CAMEN)
64 REG(TXDLSA)
65 REG(RXDLSA)
66 REG(MCMDR)
67 REG(MIID)
68 REG(MIIDA)
69 REG(FFTCR)
70 REG(TSDR)
71 REG(RSDR)
72 REG(DMARFC)
73 REG(MIEN)
74 REG(MISTA)
75 REG(MGSTA)
76 REG(MPCNT)
77 REG(MRPC)
78 REG(MRPCC)
79 REG(MREPC)
80 REG(DMARFS)
81 REG(CTXDSA)
82 REG(CTXBSA)
83 REG(CRXDSA)
84 REG(CRXBSA)
85 case REG_CAMM_BASE + 0: return "CAM0M";
86 case REG_CAML_BASE + 0: return "CAM0L";
87 case REG_CAMM_BASE + 2 ... REG_CAMML_LAST:
88 /* Only CAM0 is supported, fold the others into something simple. */
89 if (regno & 1) {
90 return "CAM<n>L";
91 } else {
92 return "CAM<n>M";
93 }
94 default: return "UNKNOWN";
95 }
96 #undef REG
97 }
98
emc_reset(NPCM7xxEMCState * emc)99 static void emc_reset(NPCM7xxEMCState *emc)
100 {
101 uint32_t value;
102
103 trace_npcm7xx_emc_reset(emc->emc_num);
104
105 memset(&emc->regs[0], 0, sizeof(emc->regs));
106
107 /* These regs have non-zero reset values. */
108 emc->regs[REG_TXDLSA] = 0xfffffffc;
109 emc->regs[REG_RXDLSA] = 0xfffffffc;
110 emc->regs[REG_MIIDA] = 0x00900000;
111 emc->regs[REG_FFTCR] = 0x0101;
112 emc->regs[REG_DMARFC] = 0x0800;
113 emc->regs[REG_MPCNT] = 0x7fff;
114
115 emc->tx_active = false;
116 emc->rx_active = false;
117
118 /* Set the MAC address in the register space. */
119 value = (emc->conf.macaddr.a[0] << 24) |
120 (emc->conf.macaddr.a[1] << 16) |
121 (emc->conf.macaddr.a[2] << 8) |
122 emc->conf.macaddr.a[3];
123 emc->regs[REG_CAMM_BASE] = value;
124
125 value = (emc->conf.macaddr.a[4] << 24) | (emc->conf.macaddr.a[5] << 16);
126 emc->regs[REG_CAML_BASE] = value;
127 }
128
npcm7xx_emc_reset(DeviceState * dev)129 static void npcm7xx_emc_reset(DeviceState *dev)
130 {
131 NPCM7xxEMCState *emc = NPCM7XX_EMC(dev);
132 emc_reset(emc);
133 }
134
emc_soft_reset(NPCM7xxEMCState * emc)135 static void emc_soft_reset(NPCM7xxEMCState *emc)
136 {
137 /*
138 * The docs say at least MCMDR.{LBK,OPMOD} bits are not changed during a
139 * soft reset, but does not go into further detail. For now, KISS.
140 */
141 uint32_t mcmdr = emc->regs[REG_MCMDR];
142 emc_reset(emc);
143 emc->regs[REG_MCMDR] = mcmdr & (REG_MCMDR_LBK | REG_MCMDR_OPMOD);
144
145 qemu_set_irq(emc->tx_irq, 0);
146 qemu_set_irq(emc->rx_irq, 0);
147 }
148
emc_set_link(NetClientState * nc)149 static void emc_set_link(NetClientState *nc)
150 {
151 /* Nothing to do yet. */
152 }
153
154 /* MISTA.TXINTR is the union of the individual bits with their enables. */
emc_update_mista_txintr(NPCM7xxEMCState * emc)155 static void emc_update_mista_txintr(NPCM7xxEMCState *emc)
156 {
157 /* Only look at the bits we support. */
158 uint32_t mask = (REG_MISTA_TXBERR |
159 REG_MISTA_TDU |
160 REG_MISTA_TXCP);
161 if (emc->regs[REG_MISTA] & emc->regs[REG_MIEN] & mask) {
162 emc->regs[REG_MISTA] |= REG_MISTA_TXINTR;
163 } else {
164 emc->regs[REG_MISTA] &= ~REG_MISTA_TXINTR;
165 }
166 }
167
168 /* MISTA.RXINTR is the union of the individual bits with their enables. */
emc_update_mista_rxintr(NPCM7xxEMCState * emc)169 static void emc_update_mista_rxintr(NPCM7xxEMCState *emc)
170 {
171 /* Only look at the bits we support. */
172 uint32_t mask = (REG_MISTA_RXBERR |
173 REG_MISTA_RDU |
174 REG_MISTA_RXGD);
175 if (emc->regs[REG_MISTA] & emc->regs[REG_MIEN] & mask) {
176 emc->regs[REG_MISTA] |= REG_MISTA_RXINTR;
177 } else {
178 emc->regs[REG_MISTA] &= ~REG_MISTA_RXINTR;
179 }
180 }
181
182 /* N.B. emc_update_mista_txintr must have already been called. */
emc_update_tx_irq(NPCM7xxEMCState * emc)183 static void emc_update_tx_irq(NPCM7xxEMCState *emc)
184 {
185 int level = !!(emc->regs[REG_MISTA] &
186 emc->regs[REG_MIEN] &
187 REG_MISTA_TXINTR);
188 trace_npcm7xx_emc_update_tx_irq(level);
189 qemu_set_irq(emc->tx_irq, level);
190 }
191
192 /* N.B. emc_update_mista_rxintr must have already been called. */
emc_update_rx_irq(NPCM7xxEMCState * emc)193 static void emc_update_rx_irq(NPCM7xxEMCState *emc)
194 {
195 int level = !!(emc->regs[REG_MISTA] &
196 emc->regs[REG_MIEN] &
197 REG_MISTA_RXINTR);
198 trace_npcm7xx_emc_update_rx_irq(level);
199 qemu_set_irq(emc->rx_irq, level);
200 }
201
202 /* Update IRQ states due to changes in MIEN,MISTA. */
emc_update_irq_from_reg_change(NPCM7xxEMCState * emc)203 static void emc_update_irq_from_reg_change(NPCM7xxEMCState *emc)
204 {
205 emc_update_mista_txintr(emc);
206 emc_update_tx_irq(emc);
207
208 emc_update_mista_rxintr(emc);
209 emc_update_rx_irq(emc);
210 }
211
emc_read_tx_desc(dma_addr_t addr,NPCM7xxEMCTxDesc * desc)212 static int emc_read_tx_desc(dma_addr_t addr, NPCM7xxEMCTxDesc *desc)
213 {
214 if (dma_memory_read(&address_space_memory, addr, desc,
215 sizeof(*desc), MEMTXATTRS_UNSPECIFIED)) {
216 qemu_log_mask(LOG_GUEST_ERROR, "%s: Failed to read descriptor @ 0x%"
217 HWADDR_PRIx "\n", __func__, addr);
218 return -1;
219 }
220 desc->flags = le32_to_cpu(desc->flags);
221 desc->txbsa = le32_to_cpu(desc->txbsa);
222 desc->status_and_length = le32_to_cpu(desc->status_and_length);
223 desc->ntxdsa = le32_to_cpu(desc->ntxdsa);
224 return 0;
225 }
226
emc_write_tx_desc(const NPCM7xxEMCTxDesc * desc,dma_addr_t addr)227 static int emc_write_tx_desc(const NPCM7xxEMCTxDesc *desc, dma_addr_t addr)
228 {
229 NPCM7xxEMCTxDesc le_desc;
230
231 le_desc.flags = cpu_to_le32(desc->flags);
232 le_desc.txbsa = cpu_to_le32(desc->txbsa);
233 le_desc.status_and_length = cpu_to_le32(desc->status_and_length);
234 le_desc.ntxdsa = cpu_to_le32(desc->ntxdsa);
235 if (dma_memory_write(&address_space_memory, addr, &le_desc,
236 sizeof(le_desc), MEMTXATTRS_UNSPECIFIED)) {
237 qemu_log_mask(LOG_GUEST_ERROR, "%s: Failed to write descriptor @ 0x%"
238 HWADDR_PRIx "\n", __func__, addr);
239 return -1;
240 }
241 return 0;
242 }
243
emc_read_rx_desc(dma_addr_t addr,NPCM7xxEMCRxDesc * desc)244 static int emc_read_rx_desc(dma_addr_t addr, NPCM7xxEMCRxDesc *desc)
245 {
246 if (dma_memory_read(&address_space_memory, addr, desc,
247 sizeof(*desc), MEMTXATTRS_UNSPECIFIED)) {
248 qemu_log_mask(LOG_GUEST_ERROR, "%s: Failed to read descriptor @ 0x%"
249 HWADDR_PRIx "\n", __func__, addr);
250 return -1;
251 }
252 desc->status_and_length = le32_to_cpu(desc->status_and_length);
253 desc->rxbsa = le32_to_cpu(desc->rxbsa);
254 desc->reserved = le32_to_cpu(desc->reserved);
255 desc->nrxdsa = le32_to_cpu(desc->nrxdsa);
256 return 0;
257 }
258
emc_write_rx_desc(const NPCM7xxEMCRxDesc * desc,dma_addr_t addr)259 static int emc_write_rx_desc(const NPCM7xxEMCRxDesc *desc, dma_addr_t addr)
260 {
261 NPCM7xxEMCRxDesc le_desc;
262
263 le_desc.status_and_length = cpu_to_le32(desc->status_and_length);
264 le_desc.rxbsa = cpu_to_le32(desc->rxbsa);
265 le_desc.reserved = cpu_to_le32(desc->reserved);
266 le_desc.nrxdsa = cpu_to_le32(desc->nrxdsa);
267 if (dma_memory_write(&address_space_memory, addr, &le_desc,
268 sizeof(le_desc), MEMTXATTRS_UNSPECIFIED)) {
269 qemu_log_mask(LOG_GUEST_ERROR, "%s: Failed to write descriptor @ 0x%"
270 HWADDR_PRIx "\n", __func__, addr);
271 return -1;
272 }
273 return 0;
274 }
275
emc_set_mista(NPCM7xxEMCState * emc,uint32_t flags)276 static void emc_set_mista(NPCM7xxEMCState *emc, uint32_t flags)
277 {
278 trace_npcm7xx_emc_set_mista(flags);
279 emc->regs[REG_MISTA] |= flags;
280 if (extract32(flags, 16, 16)) {
281 emc_update_mista_txintr(emc);
282 }
283 if (extract32(flags, 0, 16)) {
284 emc_update_mista_rxintr(emc);
285 }
286 }
287
emc_halt_tx(NPCM7xxEMCState * emc,uint32_t mista_flag)288 static void emc_halt_tx(NPCM7xxEMCState *emc, uint32_t mista_flag)
289 {
290 emc->tx_active = false;
291 emc_set_mista(emc, mista_flag);
292 }
293
emc_halt_rx(NPCM7xxEMCState * emc,uint32_t mista_flag)294 static void emc_halt_rx(NPCM7xxEMCState *emc, uint32_t mista_flag)
295 {
296 emc->rx_active = false;
297 emc_set_mista(emc, mista_flag);
298 }
299
emc_enable_rx_and_flush(NPCM7xxEMCState * emc)300 static void emc_enable_rx_and_flush(NPCM7xxEMCState *emc)
301 {
302 emc->rx_active = true;
303 qemu_flush_queued_packets(qemu_get_queue(emc->nic));
304 }
305
emc_set_next_tx_descriptor(NPCM7xxEMCState * emc,const NPCM7xxEMCTxDesc * tx_desc,uint32_t desc_addr)306 static void emc_set_next_tx_descriptor(NPCM7xxEMCState *emc,
307 const NPCM7xxEMCTxDesc *tx_desc,
308 uint32_t desc_addr)
309 {
310 /* Update the current descriptor, if only to reset the owner flag. */
311 if (emc_write_tx_desc(tx_desc, desc_addr)) {
312 /*
313 * We just read it so this shouldn't generally happen.
314 * Error already reported.
315 */
316 emc_set_mista(emc, REG_MISTA_TXBERR);
317 }
318 emc->regs[REG_CTXDSA] = TX_DESC_NTXDSA(tx_desc->ntxdsa);
319 }
320
emc_set_next_rx_descriptor(NPCM7xxEMCState * emc,const NPCM7xxEMCRxDesc * rx_desc,uint32_t desc_addr)321 static void emc_set_next_rx_descriptor(NPCM7xxEMCState *emc,
322 const NPCM7xxEMCRxDesc *rx_desc,
323 uint32_t desc_addr)
324 {
325 /* Update the current descriptor, if only to reset the owner flag. */
326 if (emc_write_rx_desc(rx_desc, desc_addr)) {
327 /*
328 * We just read it so this shouldn't generally happen.
329 * Error already reported.
330 */
331 emc_set_mista(emc, REG_MISTA_RXBERR);
332 }
333 emc->regs[REG_CRXDSA] = RX_DESC_NRXDSA(rx_desc->nrxdsa);
334 }
335
emc_try_send_next_packet(NPCM7xxEMCState * emc)336 static void emc_try_send_next_packet(NPCM7xxEMCState *emc)
337 {
338 /* Working buffer for sending out packets. Most packets fit in this. */
339 #define TX_BUFFER_SIZE 2048
340 uint8_t tx_send_buffer[TX_BUFFER_SIZE];
341 uint32_t desc_addr = TX_DESC_NTXDSA(emc->regs[REG_CTXDSA]);
342 NPCM7xxEMCTxDesc tx_desc;
343 uint32_t next_buf_addr, length;
344 uint8_t *buf;
345 g_autofree uint8_t *malloced_buf = NULL;
346
347 if (emc_read_tx_desc(desc_addr, &tx_desc)) {
348 /* Error reading descriptor, already reported. */
349 emc_halt_tx(emc, REG_MISTA_TXBERR);
350 emc_update_tx_irq(emc);
351 return;
352 }
353
354 /* Nothing we can do if we don't own the descriptor. */
355 if (!(tx_desc.flags & TX_DESC_FLAG_OWNER_MASK)) {
356 trace_npcm7xx_emc_cpu_owned_desc(desc_addr);
357 emc_halt_tx(emc, REG_MISTA_TDU);
358 emc_update_tx_irq(emc);
359 return;
360 }
361
362 /* Give the descriptor back regardless of what happens. */
363 tx_desc.flags &= ~TX_DESC_FLAG_OWNER_MASK;
364 tx_desc.status_and_length &= 0xffff;
365
366 /*
367 * Despite the h/w documentation saying the tx buffer is word aligned,
368 * the linux driver does not word align the buffer. There is value in not
369 * aligning the buffer: See the description of NET_IP_ALIGN in linux
370 * kernel sources.
371 */
372 next_buf_addr = tx_desc.txbsa;
373 emc->regs[REG_CTXBSA] = next_buf_addr;
374 length = TX_DESC_PKT_LEN(tx_desc.status_and_length);
375 buf = &tx_send_buffer[0];
376
377 if (length > sizeof(tx_send_buffer)) {
378 malloced_buf = g_malloc(length);
379 buf = malloced_buf;
380 }
381
382 if (dma_memory_read(&address_space_memory, next_buf_addr, buf,
383 length, MEMTXATTRS_UNSPECIFIED)) {
384 qemu_log_mask(LOG_GUEST_ERROR, "%s: Failed to read packet @ 0x%x\n",
385 __func__, next_buf_addr);
386 emc_set_mista(emc, REG_MISTA_TXBERR);
387 emc_set_next_tx_descriptor(emc, &tx_desc, desc_addr);
388 emc_update_tx_irq(emc);
389 trace_npcm7xx_emc_tx_done(emc->regs[REG_CTXDSA]);
390 return;
391 }
392
393 if ((tx_desc.flags & TX_DESC_FLAG_PADEN) && (length < MIN_PACKET_LENGTH)) {
394 memset(buf + length, 0, MIN_PACKET_LENGTH - length);
395 length = MIN_PACKET_LENGTH;
396 }
397
398 /* N.B. emc_receive can get called here. */
399 qemu_send_packet(qemu_get_queue(emc->nic), buf, length);
400 trace_npcm7xx_emc_sent_packet(length);
401
402 tx_desc.status_and_length |= TX_DESC_STATUS_TXCP;
403 if (tx_desc.flags & TX_DESC_FLAG_INTEN) {
404 emc_set_mista(emc, REG_MISTA_TXCP);
405 }
406 if (emc->regs[REG_MISTA] & emc->regs[REG_MIEN] & REG_MISTA_TXINTR) {
407 tx_desc.status_and_length |= TX_DESC_STATUS_TXINTR;
408 }
409
410 emc_set_next_tx_descriptor(emc, &tx_desc, desc_addr);
411 emc_update_tx_irq(emc);
412 trace_npcm7xx_emc_tx_done(emc->regs[REG_CTXDSA]);
413 }
414
emc_can_receive(NetClientState * nc)415 static bool emc_can_receive(NetClientState *nc)
416 {
417 NPCM7xxEMCState *emc = NPCM7XX_EMC(qemu_get_nic_opaque(nc));
418
419 bool can_receive = emc->rx_active;
420 trace_npcm7xx_emc_can_receive(can_receive);
421 return can_receive;
422 }
423
424 /* If result is false then *fail_reason contains the reason. */
emc_receive_filter1(NPCM7xxEMCState * emc,const uint8_t * buf,size_t len,const char ** fail_reason)425 static bool emc_receive_filter1(NPCM7xxEMCState *emc, const uint8_t *buf,
426 size_t len, const char **fail_reason)
427 {
428 eth_pkt_types_e pkt_type = get_eth_packet_type(PKT_GET_ETH_HDR(buf));
429
430 switch (pkt_type) {
431 case ETH_PKT_BCAST:
432 if (emc->regs[REG_CAMCMR] & REG_CAMCMR_CCAM) {
433 return true;
434 } else {
435 *fail_reason = "Broadcast packet disabled";
436 return !!(emc->regs[REG_CAMCMR] & REG_CAMCMR_ABP);
437 }
438 case ETH_PKT_MCAST:
439 if (emc->regs[REG_CAMCMR] & REG_CAMCMR_CCAM) {
440 return true;
441 } else {
442 *fail_reason = "Multicast packet disabled";
443 return !!(emc->regs[REG_CAMCMR] & REG_CAMCMR_AMP);
444 }
445 case ETH_PKT_UCAST: {
446 bool matches;
447 uint32_t value;
448 struct MACAddr mac;
449 if (emc->regs[REG_CAMCMR] & REG_CAMCMR_AUP) {
450 return true;
451 }
452
453 value = emc->regs[REG_CAMM_BASE];
454 mac.a[0] = value >> 24;
455 mac.a[1] = value >> 16;
456 mac.a[2] = value >> 8;
457 mac.a[3] = value >> 0;
458 value = emc->regs[REG_CAML_BASE];
459 mac.a[4] = value >> 24;
460 mac.a[5] = value >> 16;
461
462 matches = ((emc->regs[REG_CAMCMR] & REG_CAMCMR_ECMP) &&
463 /* We only support one CAM register, CAM0. */
464 (emc->regs[REG_CAMEN] & (1 << 0)) &&
465 memcmp(buf, mac.a, ETH_ALEN) == 0);
466 if (emc->regs[REG_CAMCMR] & REG_CAMCMR_CCAM) {
467 *fail_reason = "MACADDR matched, comparison complemented";
468 return !matches;
469 } else {
470 *fail_reason = "MACADDR didn't match";
471 return matches;
472 }
473 }
474 default:
475 g_assert_not_reached();
476 }
477 }
478
emc_receive_filter(NPCM7xxEMCState * emc,const uint8_t * buf,size_t len)479 static bool emc_receive_filter(NPCM7xxEMCState *emc, const uint8_t *buf,
480 size_t len)
481 {
482 const char *fail_reason = NULL;
483 bool ok = emc_receive_filter1(emc, buf, len, &fail_reason);
484 if (!ok) {
485 trace_npcm7xx_emc_packet_filtered_out(fail_reason);
486 }
487 return ok;
488 }
489
emc_receive(NetClientState * nc,const uint8_t * buf,size_t len1)490 static ssize_t emc_receive(NetClientState *nc, const uint8_t *buf, size_t len1)
491 {
492 NPCM7xxEMCState *emc = NPCM7XX_EMC(qemu_get_nic_opaque(nc));
493 const uint32_t len = len1;
494 size_t max_frame_len;
495 bool long_frame;
496 uint32_t desc_addr;
497 NPCM7xxEMCRxDesc rx_desc;
498 uint32_t crc;
499 uint8_t *crc_ptr;
500 uint32_t buf_addr;
501
502 trace_npcm7xx_emc_receiving_packet(len);
503
504 if (!emc_can_receive(nc)) {
505 qemu_log_mask(LOG_GUEST_ERROR, "%s: Unexpected packet\n", __func__);
506 return -1;
507 }
508
509 if (len < ETH_HLEN ||
510 /* Defensive programming: drop unsupportable large packets. */
511 len > 0xffff - CRC_LENGTH) {
512 qemu_log_mask(LOG_GUEST_ERROR, "%s: Dropped frame of %u bytes\n",
513 __func__, len);
514 return len;
515 }
516
517 /*
518 * DENI is set if EMC received the Length/Type field of the incoming
519 * packet, so it will be set regardless of what happens next.
520 */
521 emc_set_mista(emc, REG_MISTA_DENI);
522
523 if (!emc_receive_filter(emc, buf, len)) {
524 emc_update_rx_irq(emc);
525 return len;
526 }
527
528 /* Huge frames (> DMARFC) are dropped. */
529 max_frame_len = REG_DMARFC_RXMS(emc->regs[REG_DMARFC]);
530 if (len + CRC_LENGTH > max_frame_len) {
531 trace_npcm7xx_emc_packet_dropped(len);
532 emc_set_mista(emc, REG_MISTA_DFOI);
533 emc_update_rx_irq(emc);
534 return len;
535 }
536
537 /*
538 * Long Frames (> MAX_ETH_FRAME_SIZE) are also dropped, unless MCMDR.ALP
539 * is set.
540 */
541 long_frame = false;
542 if (len + CRC_LENGTH > MAX_ETH_FRAME_SIZE) {
543 if (emc->regs[REG_MCMDR] & REG_MCMDR_ALP) {
544 long_frame = true;
545 } else {
546 trace_npcm7xx_emc_packet_dropped(len);
547 emc_set_mista(emc, REG_MISTA_PTLE);
548 emc_update_rx_irq(emc);
549 return len;
550 }
551 }
552
553 desc_addr = RX_DESC_NRXDSA(emc->regs[REG_CRXDSA]);
554 if (emc_read_rx_desc(desc_addr, &rx_desc)) {
555 /* Error reading descriptor, already reported. */
556 emc_halt_rx(emc, REG_MISTA_RXBERR);
557 emc_update_rx_irq(emc);
558 return len;
559 }
560
561 /* Nothing we can do if we don't own the descriptor. */
562 if (!(rx_desc.status_and_length & RX_DESC_STATUS_OWNER_MASK)) {
563 trace_npcm7xx_emc_cpu_owned_desc(desc_addr);
564 emc_halt_rx(emc, REG_MISTA_RDU);
565 emc_update_rx_irq(emc);
566 return len;
567 }
568
569 crc = 0;
570 crc_ptr = (uint8_t *) &crc;
571 if (!(emc->regs[REG_MCMDR] & REG_MCMDR_SPCRC)) {
572 crc = cpu_to_be32(crc32(~0, buf, len));
573 }
574
575 /* Give the descriptor back regardless of what happens. */
576 rx_desc.status_and_length &= ~RX_DESC_STATUS_OWNER_MASK;
577
578 buf_addr = rx_desc.rxbsa;
579 emc->regs[REG_CRXBSA] = buf_addr;
580 if (dma_memory_write(&address_space_memory, buf_addr, buf,
581 len, MEMTXATTRS_UNSPECIFIED) ||
582 (!(emc->regs[REG_MCMDR] & REG_MCMDR_SPCRC) &&
583 dma_memory_write(&address_space_memory, buf_addr + len,
584 crc_ptr, 4, MEMTXATTRS_UNSPECIFIED))) {
585 qemu_log_mask(LOG_GUEST_ERROR, "%s: Bus error writing packet\n",
586 __func__);
587 emc_set_mista(emc, REG_MISTA_RXBERR);
588 emc_set_next_rx_descriptor(emc, &rx_desc, desc_addr);
589 emc_update_rx_irq(emc);
590 trace_npcm7xx_emc_rx_done(emc->regs[REG_CRXDSA]);
591 return len;
592 }
593
594 trace_npcm7xx_emc_received_packet(len);
595
596 /* Note: We've already verified len+4 <= 0xffff. */
597 rx_desc.status_and_length = len;
598 if (!(emc->regs[REG_MCMDR] & REG_MCMDR_SPCRC)) {
599 rx_desc.status_and_length += 4;
600 }
601 rx_desc.status_and_length |= RX_DESC_STATUS_RXGD;
602 emc_set_mista(emc, REG_MISTA_RXGD);
603
604 if (emc->regs[REG_MISTA] & emc->regs[REG_MIEN] & REG_MISTA_RXINTR) {
605 rx_desc.status_and_length |= RX_DESC_STATUS_RXINTR;
606 }
607 if (long_frame) {
608 rx_desc.status_and_length |= RX_DESC_STATUS_PTLE;
609 }
610
611 emc_set_next_rx_descriptor(emc, &rx_desc, desc_addr);
612 emc_update_rx_irq(emc);
613 trace_npcm7xx_emc_rx_done(emc->regs[REG_CRXDSA]);
614 return len;
615 }
616
npcm7xx_emc_read(void * opaque,hwaddr offset,unsigned size)617 static uint64_t npcm7xx_emc_read(void *opaque, hwaddr offset, unsigned size)
618 {
619 NPCM7xxEMCState *emc = opaque;
620 uint32_t reg = offset / sizeof(uint32_t);
621 uint32_t result;
622
623 if (reg >= NPCM7XX_NUM_EMC_REGS) {
624 qemu_log_mask(LOG_GUEST_ERROR,
625 "%s: Invalid offset 0x%04" HWADDR_PRIx "\n",
626 __func__, offset);
627 return 0;
628 }
629
630 switch (reg) {
631 case REG_MIID:
632 /*
633 * We don't implement MII. For determinism, always return zero as
634 * writes record the last value written for debugging purposes.
635 */
636 qemu_log_mask(LOG_UNIMP, "%s: Read of MIID, returning 0\n", __func__);
637 result = 0;
638 break;
639 case REG_TSDR:
640 case REG_RSDR:
641 qemu_log_mask(LOG_GUEST_ERROR,
642 "%s: Read of write-only reg, %s/%d\n",
643 __func__, emc_reg_name(reg), reg);
644 return 0;
645 default:
646 result = emc->regs[reg];
647 break;
648 }
649
650 trace_npcm7xx_emc_reg_read(emc->emc_num, result, emc_reg_name(reg), reg);
651 return result;
652 }
653
npcm7xx_emc_write(void * opaque,hwaddr offset,uint64_t v,unsigned size)654 static void npcm7xx_emc_write(void *opaque, hwaddr offset,
655 uint64_t v, unsigned size)
656 {
657 NPCM7xxEMCState *emc = opaque;
658 uint32_t reg = offset / sizeof(uint32_t);
659 uint32_t value = v;
660
661 g_assert(size == sizeof(uint32_t));
662
663 if (reg >= NPCM7XX_NUM_EMC_REGS) {
664 qemu_log_mask(LOG_GUEST_ERROR,
665 "%s: Invalid offset 0x%04" HWADDR_PRIx "\n",
666 __func__, offset);
667 return;
668 }
669
670 trace_npcm7xx_emc_reg_write(emc->emc_num, emc_reg_name(reg), reg, value);
671
672 switch (reg) {
673 case REG_CAMCMR:
674 emc->regs[reg] = value;
675 break;
676 case REG_CAMEN:
677 /* Only CAM0 is supported, don't pretend otherwise. */
678 if (value & ~1) {
679 qemu_log_mask(LOG_GUEST_ERROR,
680 "%s: Only CAM0 is supported, cannot enable others"
681 ": 0x%x\n",
682 __func__, value);
683 }
684 emc->regs[reg] = value & 1;
685 break;
686 case REG_CAMM_BASE + 0:
687 emc->regs[reg] = value;
688 break;
689 case REG_CAML_BASE + 0:
690 emc->regs[reg] = value;
691 break;
692 case REG_MCMDR: {
693 uint32_t prev;
694 if (value & REG_MCMDR_SWR) {
695 emc_soft_reset(emc);
696 /* On h/w the reset happens over multiple cycles. For now KISS. */
697 break;
698 }
699 prev = emc->regs[reg];
700 emc->regs[reg] = value;
701 /* Update tx state. */
702 if (!(prev & REG_MCMDR_TXON) &&
703 (value & REG_MCMDR_TXON)) {
704 emc->regs[REG_CTXDSA] = emc->regs[REG_TXDLSA];
705 /*
706 * Linux kernel turns TX on with CPU still holding descriptor,
707 * which suggests we should wait for a write to TSDR before trying
708 * to send a packet: so we don't send one here.
709 */
710 } else if ((prev & REG_MCMDR_TXON) &&
711 !(value & REG_MCMDR_TXON)) {
712 emc->regs[REG_MGSTA] |= REG_MGSTA_TXHA;
713 }
714 if (!(value & REG_MCMDR_TXON)) {
715 emc_halt_tx(emc, 0);
716 }
717 /* Update rx state. */
718 if (!(prev & REG_MCMDR_RXON) &&
719 (value & REG_MCMDR_RXON)) {
720 emc->regs[REG_CRXDSA] = emc->regs[REG_RXDLSA];
721 } else if ((prev & REG_MCMDR_RXON) &&
722 !(value & REG_MCMDR_RXON)) {
723 emc->regs[REG_MGSTA] |= REG_MGSTA_RXHA;
724 }
725 if (value & REG_MCMDR_RXON) {
726 emc_enable_rx_and_flush(emc);
727 } else {
728 emc_halt_rx(emc, 0);
729 }
730 break;
731 }
732 case REG_TXDLSA:
733 case REG_RXDLSA:
734 case REG_DMARFC:
735 case REG_MIID:
736 emc->regs[reg] = value;
737 break;
738 case REG_MIEN:
739 emc->regs[reg] = value;
740 emc_update_irq_from_reg_change(emc);
741 break;
742 case REG_MISTA:
743 /* Clear the bits that have 1 in "value". */
744 emc->regs[reg] &= ~value;
745 emc_update_irq_from_reg_change(emc);
746 break;
747 case REG_MGSTA:
748 /* Clear the bits that have 1 in "value". */
749 emc->regs[reg] &= ~value;
750 break;
751 case REG_TSDR:
752 if (emc->regs[REG_MCMDR] & REG_MCMDR_TXON) {
753 emc->tx_active = true;
754 /* Keep trying to send packets until we run out. */
755 while (emc->tx_active) {
756 emc_try_send_next_packet(emc);
757 }
758 }
759 break;
760 case REG_RSDR:
761 if (emc->regs[REG_MCMDR] & REG_MCMDR_RXON) {
762 emc_enable_rx_and_flush(emc);
763 }
764 break;
765 case REG_MIIDA:
766 emc->regs[reg] = value & ~REG_MIIDA_BUSY;
767 break;
768 case REG_MRPC:
769 case REG_MRPCC:
770 case REG_MREPC:
771 case REG_CTXDSA:
772 case REG_CTXBSA:
773 case REG_CRXDSA:
774 case REG_CRXBSA:
775 qemu_log_mask(LOG_GUEST_ERROR,
776 "%s: Write to read-only reg %s/%d\n",
777 __func__, emc_reg_name(reg), reg);
778 break;
779 default:
780 qemu_log_mask(LOG_UNIMP, "%s: Write to unimplemented reg %s/%d\n",
781 __func__, emc_reg_name(reg), reg);
782 break;
783 }
784 }
785
786 static const struct MemoryRegionOps npcm7xx_emc_ops = {
787 .read = npcm7xx_emc_read,
788 .write = npcm7xx_emc_write,
789 .endianness = DEVICE_LITTLE_ENDIAN,
790 .valid = {
791 .min_access_size = 4,
792 .max_access_size = 4,
793 .unaligned = false,
794 },
795 };
796
emc_cleanup(NetClientState * nc)797 static void emc_cleanup(NetClientState *nc)
798 {
799 /* Nothing to do yet. */
800 }
801
802 static NetClientInfo net_npcm7xx_emc_info = {
803 .type = NET_CLIENT_DRIVER_NIC,
804 .size = sizeof(NICState),
805 .can_receive = emc_can_receive,
806 .receive = emc_receive,
807 .cleanup = emc_cleanup,
808 .link_status_changed = emc_set_link,
809 };
810
npcm7xx_emc_realize(DeviceState * dev,Error ** errp)811 static void npcm7xx_emc_realize(DeviceState *dev, Error **errp)
812 {
813 NPCM7xxEMCState *emc = NPCM7XX_EMC(dev);
814 SysBusDevice *sbd = SYS_BUS_DEVICE(emc);
815
816 memory_region_init_io(&emc->iomem, OBJECT(emc), &npcm7xx_emc_ops, emc,
817 TYPE_NPCM7XX_EMC, 4 * KiB);
818 sysbus_init_mmio(sbd, &emc->iomem);
819 sysbus_init_irq(sbd, &emc->tx_irq);
820 sysbus_init_irq(sbd, &emc->rx_irq);
821
822 qemu_macaddr_default_if_unset(&emc->conf.macaddr);
823 emc->nic = qemu_new_nic(&net_npcm7xx_emc_info, &emc->conf,
824 object_get_typename(OBJECT(dev)), dev->id,
825 &dev->mem_reentrancy_guard, emc);
826 qemu_format_nic_info_str(qemu_get_queue(emc->nic), emc->conf.macaddr.a);
827 }
828
npcm7xx_emc_unrealize(DeviceState * dev)829 static void npcm7xx_emc_unrealize(DeviceState *dev)
830 {
831 NPCM7xxEMCState *emc = NPCM7XX_EMC(dev);
832
833 qemu_del_nic(emc->nic);
834 }
835
836 static const VMStateDescription vmstate_npcm7xx_emc = {
837 .name = TYPE_NPCM7XX_EMC,
838 .version_id = 0,
839 .minimum_version_id = 0,
840 .fields = (const VMStateField[]) {
841 VMSTATE_UINT8(emc_num, NPCM7xxEMCState),
842 VMSTATE_UINT32_ARRAY(regs, NPCM7xxEMCState, NPCM7XX_NUM_EMC_REGS),
843 VMSTATE_BOOL(tx_active, NPCM7xxEMCState),
844 VMSTATE_BOOL(rx_active, NPCM7xxEMCState),
845 VMSTATE_END_OF_LIST(),
846 },
847 };
848
849 static Property npcm7xx_emc_properties[] = {
850 DEFINE_NIC_PROPERTIES(NPCM7xxEMCState, conf),
851 DEFINE_PROP_END_OF_LIST(),
852 };
853
npcm7xx_emc_class_init(ObjectClass * klass,void * data)854 static void npcm7xx_emc_class_init(ObjectClass *klass, void *data)
855 {
856 DeviceClass *dc = DEVICE_CLASS(klass);
857
858 set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
859 dc->desc = "NPCM7xx EMC Controller";
860 dc->realize = npcm7xx_emc_realize;
861 dc->unrealize = npcm7xx_emc_unrealize;
862 dc->reset = npcm7xx_emc_reset;
863 dc->vmsd = &vmstate_npcm7xx_emc;
864 device_class_set_props(dc, npcm7xx_emc_properties);
865 }
866
867 static const TypeInfo npcm7xx_emc_info = {
868 .name = TYPE_NPCM7XX_EMC,
869 .parent = TYPE_SYS_BUS_DEVICE,
870 .instance_size = sizeof(NPCM7xxEMCState),
871 .class_init = npcm7xx_emc_class_init,
872 };
873
npcm7xx_emc_register_type(void)874 static void npcm7xx_emc_register_type(void)
875 {
876 type_register_static(&npcm7xx_emc_info);
877 }
878
879 type_init(npcm7xx_emc_register_type)
880