1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * sonic.c
4  *
5  * (C) 2005 Finn Thain
6  *
7  * Converted to DMA API, added zero-copy buffer handling, and
8  * (from the mac68k project) introduced dhd's support for 16-bit cards.
9  *
10  * (C) 1996,1998 by Thomas Bogendoerfer (tsbogend@alpha.franken.de)
11  *
12  * This driver is based on work from Andreas Busse, but most of
13  * the code is rewritten.
14  *
15  * (C) 1995 by Andreas Busse (andy@waldorf-gmbh.de)
16  *
17  *    Core code included by system sonic drivers
18  *
19  * And... partially rewritten again by David Huggins-Daines in order
20  * to cope with screwed up Macintosh NICs that may or may not use
21  * 16-bit DMA.
22  *
23  * (C) 1999 David Huggins-Daines <dhd@debian.org>
24  *
25  */
26 
27 /*
28  * Sources: Olivetti M700-10 Risc Personal Computer hardware handbook,
29  * National Semiconductors data sheet for the DP83932B Sonic Ethernet
30  * controller, and the files "8390.c" and "skeleton.c" in this directory.
31  *
32  * Additional sources: Nat Semi data sheet for the DP83932C and Nat Semi
33  * Application Note AN-746, the files "lance.c" and "ibmlana.c". See also
34  * the NetBSD file "sys/arch/mac68k/dev/if_sn.c".
35  */
36 
37 static unsigned int version_printed;
38 
39 static int sonic_debug = -1;
40 module_param(sonic_debug, int, 0);
41 MODULE_PARM_DESC(sonic_debug, "debug message level");
42 
43 static void sonic_msg_init(struct net_device *dev)
44 {
45 	struct sonic_local *lp = netdev_priv(dev);
46 
47 	lp->msg_enable = netif_msg_init(sonic_debug, 0);
48 
49 	if (version_printed++ == 0)
50 		netif_dbg(lp, drv, dev, "%s", version);
51 }
52 
53 /*
54  * Open/initialize the SONIC controller.
55  *
56  * This routine should set everything up anew at each open, even
57  *  registers that "should" only need to be set once at boot, so that
58  *  there is non-reboot way to recover if something goes wrong.
59  */
60 static int sonic_open(struct net_device *dev)
61 {
62 	struct sonic_local *lp = netdev_priv(dev);
63 	int i;
64 
65 	netif_dbg(lp, ifup, dev, "%s: initializing sonic driver\n", __func__);
66 
67 	spin_lock_init(&lp->lock);
68 
69 	for (i = 0; i < SONIC_NUM_RRS; i++) {
70 		struct sk_buff *skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2);
71 		if (skb == NULL) {
72 			while(i > 0) { /* free any that were allocated successfully */
73 				i--;
74 				dev_kfree_skb(lp->rx_skb[i]);
75 				lp->rx_skb[i] = NULL;
76 			}
77 			printk(KERN_ERR "%s: couldn't allocate receive buffers\n",
78 			       dev->name);
79 			return -ENOMEM;
80 		}
81 		/* align IP header unless DMA requires otherwise */
82 		if (SONIC_BUS_SCALE(lp->dma_bitmode) == 2)
83 			skb_reserve(skb, 2);
84 		lp->rx_skb[i] = skb;
85 	}
86 
87 	for (i = 0; i < SONIC_NUM_RRS; i++) {
88 		dma_addr_t laddr = dma_map_single(lp->device, skb_put(lp->rx_skb[i], SONIC_RBSIZE),
89 		                                  SONIC_RBSIZE, DMA_FROM_DEVICE);
90 		if (dma_mapping_error(lp->device, laddr)) {
91 			while(i > 0) { /* free any that were mapped successfully */
92 				i--;
93 				dma_unmap_single(lp->device, lp->rx_laddr[i], SONIC_RBSIZE, DMA_FROM_DEVICE);
94 				lp->rx_laddr[i] = (dma_addr_t)0;
95 			}
96 			for (i = 0; i < SONIC_NUM_RRS; i++) {
97 				dev_kfree_skb(lp->rx_skb[i]);
98 				lp->rx_skb[i] = NULL;
99 			}
100 			printk(KERN_ERR "%s: couldn't map rx DMA buffers\n",
101 			       dev->name);
102 			return -ENOMEM;
103 		}
104 		lp->rx_laddr[i] = laddr;
105 	}
106 
107 	/*
108 	 * Initialize the SONIC
109 	 */
110 	sonic_init(dev);
111 
112 	netif_start_queue(dev);
113 
114 	netif_dbg(lp, ifup, dev, "%s: Initialization done\n", __func__);
115 
116 	return 0;
117 }
118 
119 /* Wait for the SONIC to become idle. */
120 static void sonic_quiesce(struct net_device *dev, u16 mask)
121 {
122 	struct sonic_local * __maybe_unused lp = netdev_priv(dev);
123 	int i;
124 	u16 bits;
125 
126 	for (i = 0; i < 1000; ++i) {
127 		bits = SONIC_READ(SONIC_CMD) & mask;
128 		if (!bits)
129 			return;
130 		if (irqs_disabled() || in_interrupt())
131 			udelay(20);
132 		else
133 			usleep_range(100, 200);
134 	}
135 	WARN_ONCE(1, "command deadline expired! 0x%04x\n", bits);
136 }
137 
138 /*
139  * Close the SONIC device
140  */
141 static int sonic_close(struct net_device *dev)
142 {
143 	struct sonic_local *lp = netdev_priv(dev);
144 	int i;
145 
146 	netif_dbg(lp, ifdown, dev, "%s\n", __func__);
147 
148 	netif_stop_queue(dev);
149 
150 	/*
151 	 * stop the SONIC, disable interrupts
152 	 */
153 	SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS);
154 	sonic_quiesce(dev, SONIC_CR_ALL);
155 
156 	SONIC_WRITE(SONIC_IMR, 0);
157 	SONIC_WRITE(SONIC_ISR, 0x7fff);
158 	SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
159 
160 	/* unmap and free skbs that haven't been transmitted */
161 	for (i = 0; i < SONIC_NUM_TDS; i++) {
162 		if(lp->tx_laddr[i]) {
163 			dma_unmap_single(lp->device, lp->tx_laddr[i], lp->tx_len[i], DMA_TO_DEVICE);
164 			lp->tx_laddr[i] = (dma_addr_t)0;
165 		}
166 		if(lp->tx_skb[i]) {
167 			dev_kfree_skb(lp->tx_skb[i]);
168 			lp->tx_skb[i] = NULL;
169 		}
170 	}
171 
172 	/* unmap and free the receive buffers */
173 	for (i = 0; i < SONIC_NUM_RRS; i++) {
174 		if(lp->rx_laddr[i]) {
175 			dma_unmap_single(lp->device, lp->rx_laddr[i], SONIC_RBSIZE, DMA_FROM_DEVICE);
176 			lp->rx_laddr[i] = (dma_addr_t)0;
177 		}
178 		if(lp->rx_skb[i]) {
179 			dev_kfree_skb(lp->rx_skb[i]);
180 			lp->rx_skb[i] = NULL;
181 		}
182 	}
183 
184 	return 0;
185 }
186 
187 static void sonic_tx_timeout(struct net_device *dev, unsigned int txqueue)
188 {
189 	struct sonic_local *lp = netdev_priv(dev);
190 	int i;
191 	/*
192 	 * put the Sonic into software-reset mode and
193 	 * disable all interrupts before releasing DMA buffers
194 	 */
195 	SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS);
196 	sonic_quiesce(dev, SONIC_CR_ALL);
197 
198 	SONIC_WRITE(SONIC_IMR, 0);
199 	SONIC_WRITE(SONIC_ISR, 0x7fff);
200 	SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
201 	/* We could resend the original skbs. Easier to re-initialise. */
202 	for (i = 0; i < SONIC_NUM_TDS; i++) {
203 		if(lp->tx_laddr[i]) {
204 			dma_unmap_single(lp->device, lp->tx_laddr[i], lp->tx_len[i], DMA_TO_DEVICE);
205 			lp->tx_laddr[i] = (dma_addr_t)0;
206 		}
207 		if(lp->tx_skb[i]) {
208 			dev_kfree_skb(lp->tx_skb[i]);
209 			lp->tx_skb[i] = NULL;
210 		}
211 	}
212 	/* Try to restart the adaptor. */
213 	sonic_init(dev);
214 	lp->stats.tx_errors++;
215 	netif_trans_update(dev); /* prevent tx timeout */
216 	netif_wake_queue(dev);
217 }
218 
219 /*
220  * transmit packet
221  *
222  * Appends new TD during transmission thus avoiding any TX interrupts
223  * until we run out of TDs.
224  * This routine interacts closely with the ISR in that it may,
225  *   set tx_skb[i]
226  *   reset the status flags of the new TD
227  *   set and reset EOL flags
228  *   stop the tx queue
229  * The ISR interacts with this routine in various ways. It may,
230  *   reset tx_skb[i]
231  *   test the EOL and status flags of the TDs
232  *   wake the tx queue
233  * Concurrently with all of this, the SONIC is potentially writing to
234  * the status flags of the TDs.
235  */
236 
237 static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
238 {
239 	struct sonic_local *lp = netdev_priv(dev);
240 	dma_addr_t laddr;
241 	int length;
242 	int entry;
243 	unsigned long flags;
244 
245 	netif_dbg(lp, tx_queued, dev, "%s: skb=%p\n", __func__, skb);
246 
247 	length = skb->len;
248 	if (length < ETH_ZLEN) {
249 		if (skb_padto(skb, ETH_ZLEN))
250 			return NETDEV_TX_OK;
251 		length = ETH_ZLEN;
252 	}
253 
254 	/*
255 	 * Map the packet data into the logical DMA address space
256 	 */
257 
258 	laddr = dma_map_single(lp->device, skb->data, length, DMA_TO_DEVICE);
259 	if (!laddr) {
260 		pr_err_ratelimited("%s: failed to map tx DMA buffer.\n", dev->name);
261 		dev_kfree_skb_any(skb);
262 		return NETDEV_TX_OK;
263 	}
264 
265 	spin_lock_irqsave(&lp->lock, flags);
266 
267 	entry = lp->next_tx;
268 
269 	sonic_tda_put(dev, entry, SONIC_TD_STATUS, 0);       /* clear status */
270 	sonic_tda_put(dev, entry, SONIC_TD_FRAG_COUNT, 1);   /* single fragment */
271 	sonic_tda_put(dev, entry, SONIC_TD_PKTSIZE, length); /* length of packet */
272 	sonic_tda_put(dev, entry, SONIC_TD_FRAG_PTR_L, laddr & 0xffff);
273 	sonic_tda_put(dev, entry, SONIC_TD_FRAG_PTR_H, laddr >> 16);
274 	sonic_tda_put(dev, entry, SONIC_TD_FRAG_SIZE, length);
275 	sonic_tda_put(dev, entry, SONIC_TD_LINK,
276 		sonic_tda_get(dev, entry, SONIC_TD_LINK) | SONIC_EOL);
277 
278 	wmb();
279 	lp->tx_len[entry] = length;
280 	lp->tx_laddr[entry] = laddr;
281 	lp->tx_skb[entry] = skb;
282 
283 	wmb();
284 	sonic_tda_put(dev, lp->eol_tx, SONIC_TD_LINK,
285 				  sonic_tda_get(dev, lp->eol_tx, SONIC_TD_LINK) & ~SONIC_EOL);
286 	lp->eol_tx = entry;
287 
288 	lp->next_tx = (entry + 1) & SONIC_TDS_MASK;
289 	if (lp->tx_skb[lp->next_tx] != NULL) {
290 		/* The ring is full, the ISR has yet to process the next TD. */
291 		netif_dbg(lp, tx_queued, dev, "%s: stopping queue\n", __func__);
292 		netif_stop_queue(dev);
293 		/* after this packet, wait for ISR to free up some TDAs */
294 	} else netif_start_queue(dev);
295 
296 	netif_dbg(lp, tx_queued, dev, "%s: issuing Tx command\n", __func__);
297 
298 	SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP);
299 
300 	spin_unlock_irqrestore(&lp->lock, flags);
301 
302 	return NETDEV_TX_OK;
303 }
304 
305 /*
306  * The typical workload of the driver:
307  * Handle the network interface interrupts.
308  */
309 static irqreturn_t sonic_interrupt(int irq, void *dev_id)
310 {
311 	struct net_device *dev = dev_id;
312 	struct sonic_local *lp = netdev_priv(dev);
313 	int status;
314 	unsigned long flags;
315 
316 	/* The lock has two purposes. Firstly, it synchronizes sonic_interrupt()
317 	 * with sonic_send_packet() so that the two functions can share state.
318 	 * Secondly, it makes sonic_interrupt() re-entrant, as that is required
319 	 * by macsonic which must use two IRQs with different priority levels.
320 	 */
321 	spin_lock_irqsave(&lp->lock, flags);
322 
323 	status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT;
324 	if (!status) {
325 		spin_unlock_irqrestore(&lp->lock, flags);
326 
327 		return IRQ_NONE;
328 	}
329 
330 	do {
331 		SONIC_WRITE(SONIC_ISR, status); /* clear the interrupt(s) */
332 
333 		if (status & SONIC_INT_PKTRX) {
334 			netif_dbg(lp, intr, dev, "%s: packet rx\n", __func__);
335 			sonic_rx(dev);	/* got packet(s) */
336 		}
337 
338 		if (status & SONIC_INT_TXDN) {
339 			int entry = lp->cur_tx;
340 			int td_status;
341 			int freed_some = 0;
342 
343 			/* The state of a Transmit Descriptor may be inferred
344 			 * from { tx_skb[entry], td_status } as follows.
345 			 * { clear, clear } => the TD has never been used
346 			 * { set,   clear } => the TD was handed to SONIC
347 			 * { set,   set   } => the TD was handed back
348 			 * { clear, set   } => the TD is available for re-use
349 			 */
350 
351 			netif_dbg(lp, intr, dev, "%s: tx done\n", __func__);
352 
353 			while (lp->tx_skb[entry] != NULL) {
354 				if ((td_status = sonic_tda_get(dev, entry, SONIC_TD_STATUS)) == 0)
355 					break;
356 
357 				if (td_status & SONIC_TCR_PTX) {
358 					lp->stats.tx_packets++;
359 					lp->stats.tx_bytes += sonic_tda_get(dev, entry, SONIC_TD_PKTSIZE);
360 				} else {
361 					if (td_status & (SONIC_TCR_EXD |
362 					    SONIC_TCR_EXC | SONIC_TCR_BCM))
363 						lp->stats.tx_aborted_errors++;
364 					if (td_status &
365 					    (SONIC_TCR_NCRS | SONIC_TCR_CRLS))
366 						lp->stats.tx_carrier_errors++;
367 					if (td_status & SONIC_TCR_OWC)
368 						lp->stats.tx_window_errors++;
369 					if (td_status & SONIC_TCR_FU)
370 						lp->stats.tx_fifo_errors++;
371 				}
372 
373 				/* We must free the original skb */
374 				dev_consume_skb_irq(lp->tx_skb[entry]);
375 				lp->tx_skb[entry] = NULL;
376 				/* and unmap DMA buffer */
377 				dma_unmap_single(lp->device, lp->tx_laddr[entry], lp->tx_len[entry], DMA_TO_DEVICE);
378 				lp->tx_laddr[entry] = (dma_addr_t)0;
379 				freed_some = 1;
380 
381 				if (sonic_tda_get(dev, entry, SONIC_TD_LINK) & SONIC_EOL) {
382 					entry = (entry + 1) & SONIC_TDS_MASK;
383 					break;
384 				}
385 				entry = (entry + 1) & SONIC_TDS_MASK;
386 			}
387 
388 			if (freed_some || lp->tx_skb[entry] == NULL)
389 				netif_wake_queue(dev);  /* The ring is no longer full */
390 			lp->cur_tx = entry;
391 		}
392 
393 		/*
394 		 * check error conditions
395 		 */
396 		if (status & SONIC_INT_RFO) {
397 			netif_dbg(lp, rx_err, dev, "%s: rx fifo overrun\n",
398 				  __func__);
399 		}
400 		if (status & SONIC_INT_RDE) {
401 			netif_dbg(lp, rx_err, dev, "%s: rx descriptors exhausted\n",
402 				  __func__);
403 		}
404 		if (status & SONIC_INT_RBAE) {
405 			netif_dbg(lp, rx_err, dev, "%s: rx buffer area exceeded\n",
406 				  __func__);
407 		}
408 
409 		/* counter overruns; all counters are 16bit wide */
410 		if (status & SONIC_INT_FAE)
411 			lp->stats.rx_frame_errors += 65536;
412 		if (status & SONIC_INT_CRC)
413 			lp->stats.rx_crc_errors += 65536;
414 		if (status & SONIC_INT_MP)
415 			lp->stats.rx_missed_errors += 65536;
416 
417 		/* transmit error */
418 		if (status & SONIC_INT_TXER) {
419 			u16 tcr = SONIC_READ(SONIC_TCR);
420 
421 			netif_dbg(lp, tx_err, dev, "%s: TXER intr, TCR %04x\n",
422 				  __func__, tcr);
423 
424 			if (tcr & (SONIC_TCR_EXD | SONIC_TCR_EXC |
425 				   SONIC_TCR_FU | SONIC_TCR_BCM)) {
426 				/* Aborted transmission. Try again. */
427 				netif_stop_queue(dev);
428 				SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP);
429 			}
430 		}
431 
432 		/* bus retry */
433 		if (status & SONIC_INT_BR) {
434 			printk(KERN_ERR "%s: Bus retry occurred! Device interrupt disabled.\n",
435 				dev->name);
436 			/* ... to help debug DMA problems causing endless interrupts. */
437 			/* Bounce the eth interface to turn on the interrupt again. */
438 			SONIC_WRITE(SONIC_IMR, 0);
439 		}
440 
441 		status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT;
442 	} while (status);
443 
444 	spin_unlock_irqrestore(&lp->lock, flags);
445 
446 	return IRQ_HANDLED;
447 }
448 
449 /* Return the array index corresponding to a given Receive Buffer pointer. */
450 static int index_from_addr(struct sonic_local *lp, dma_addr_t addr,
451 			   unsigned int last)
452 {
453 	unsigned int i = last;
454 
455 	do {
456 		i = (i + 1) & SONIC_RRS_MASK;
457 		if (addr == lp->rx_laddr[i])
458 			return i;
459 	} while (i != last);
460 
461 	return -ENOENT;
462 }
463 
464 /* Allocate and map a new skb to be used as a receive buffer. */
465 static bool sonic_alloc_rb(struct net_device *dev, struct sonic_local *lp,
466 			   struct sk_buff **new_skb, dma_addr_t *new_addr)
467 {
468 	*new_skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2);
469 	if (!*new_skb)
470 		return false;
471 
472 	if (SONIC_BUS_SCALE(lp->dma_bitmode) == 2)
473 		skb_reserve(*new_skb, 2);
474 
475 	*new_addr = dma_map_single(lp->device, skb_put(*new_skb, SONIC_RBSIZE),
476 				   SONIC_RBSIZE, DMA_FROM_DEVICE);
477 	if (!*new_addr) {
478 		dev_kfree_skb(*new_skb);
479 		*new_skb = NULL;
480 		return false;
481 	}
482 
483 	return true;
484 }
485 
486 /* Place a new receive resource in the Receive Resource Area and update RWP. */
487 static void sonic_update_rra(struct net_device *dev, struct sonic_local *lp,
488 			     dma_addr_t old_addr, dma_addr_t new_addr)
489 {
490 	unsigned int entry = sonic_rr_entry(dev, SONIC_READ(SONIC_RWP));
491 	unsigned int end = sonic_rr_entry(dev, SONIC_READ(SONIC_RRP));
492 	u32 buf;
493 
494 	/* The resources in the range [RRP, RWP) belong to the SONIC. This loop
495 	 * scans the other resources in the RRA, those in the range [RWP, RRP).
496 	 */
497 	do {
498 		buf = (sonic_rra_get(dev, entry, SONIC_RR_BUFADR_H) << 16) |
499 		      sonic_rra_get(dev, entry, SONIC_RR_BUFADR_L);
500 
501 		if (buf == old_addr)
502 			break;
503 
504 		entry = (entry + 1) & SONIC_RRS_MASK;
505 	} while (entry != end);
506 
507 	WARN_ONCE(buf != old_addr, "failed to find resource!\n");
508 
509 	sonic_rra_put(dev, entry, SONIC_RR_BUFADR_H, new_addr >> 16);
510 	sonic_rra_put(dev, entry, SONIC_RR_BUFADR_L, new_addr & 0xffff);
511 
512 	entry = (entry + 1) & SONIC_RRS_MASK;
513 
514 	SONIC_WRITE(SONIC_RWP, sonic_rr_addr(dev, entry));
515 }
516 
517 /*
518  * We have a good packet(s), pass it/them up the network stack.
519  */
520 static void sonic_rx(struct net_device *dev)
521 {
522 	struct sonic_local *lp = netdev_priv(dev);
523 	int entry = lp->cur_rx;
524 	int prev_entry = lp->eol_rx;
525 	bool rbe = false;
526 
527 	while (sonic_rda_get(dev, entry, SONIC_RD_IN_USE) == 0) {
528 		u16 status = sonic_rda_get(dev, entry, SONIC_RD_STATUS);
529 
530 		/* If the RD has LPKT set, the chip has finished with the RB */
531 		if ((status & SONIC_RCR_PRX) && (status & SONIC_RCR_LPKT)) {
532 			struct sk_buff *new_skb;
533 			dma_addr_t new_laddr;
534 			u32 addr = (sonic_rda_get(dev, entry,
535 						  SONIC_RD_PKTPTR_H) << 16) |
536 				   sonic_rda_get(dev, entry, SONIC_RD_PKTPTR_L);
537 			int i = index_from_addr(lp, addr, entry);
538 
539 			if (i < 0) {
540 				WARN_ONCE(1, "failed to find buffer!\n");
541 				break;
542 			}
543 
544 			if (sonic_alloc_rb(dev, lp, &new_skb, &new_laddr)) {
545 				struct sk_buff *used_skb = lp->rx_skb[i];
546 				int pkt_len;
547 
548 				/* Pass the used buffer up the stack */
549 				dma_unmap_single(lp->device, addr, SONIC_RBSIZE,
550 						 DMA_FROM_DEVICE);
551 
552 				pkt_len = sonic_rda_get(dev, entry,
553 							SONIC_RD_PKTLEN);
554 				skb_trim(used_skb, pkt_len);
555 				used_skb->protocol = eth_type_trans(used_skb,
556 								    dev);
557 				netif_rx(used_skb);
558 				lp->stats.rx_packets++;
559 				lp->stats.rx_bytes += pkt_len;
560 
561 				lp->rx_skb[i] = new_skb;
562 				lp->rx_laddr[i] = new_laddr;
563 			} else {
564 				/* Failed to obtain a new buffer so re-use it */
565 				new_laddr = addr;
566 				lp->stats.rx_dropped++;
567 			}
568 			/* If RBE is already asserted when RWP advances then
569 			 * it's safe to clear RBE after processing this packet.
570 			 */
571 			rbe = rbe || SONIC_READ(SONIC_ISR) & SONIC_INT_RBE;
572 			sonic_update_rra(dev, lp, addr, new_laddr);
573 		}
574 		/*
575 		 * give back the descriptor
576 		 */
577 		sonic_rda_put(dev, entry, SONIC_RD_STATUS, 0);
578 		sonic_rda_put(dev, entry, SONIC_RD_IN_USE, 1);
579 
580 		prev_entry = entry;
581 		entry = (entry + 1) & SONIC_RDS_MASK;
582 	}
583 
584 	lp->cur_rx = entry;
585 
586 	if (prev_entry != lp->eol_rx) {
587 		/* Advance the EOL flag to put descriptors back into service */
588 		sonic_rda_put(dev, prev_entry, SONIC_RD_LINK, SONIC_EOL |
589 			      sonic_rda_get(dev, prev_entry, SONIC_RD_LINK));
590 		sonic_rda_put(dev, lp->eol_rx, SONIC_RD_LINK, ~SONIC_EOL &
591 			      sonic_rda_get(dev, lp->eol_rx, SONIC_RD_LINK));
592 		lp->eol_rx = prev_entry;
593 	}
594 
595 	if (rbe)
596 		SONIC_WRITE(SONIC_ISR, SONIC_INT_RBE);
597 	/*
598 	 * If any worth-while packets have been received, netif_rx()
599 	 * has done a mark_bh(NET_BH) for us and will work on them
600 	 * when we get to the bottom-half routine.
601 	 */
602 }
603 
604 
605 /*
606  * Get the current statistics.
607  * This may be called with the device open or closed.
608  */
609 static struct net_device_stats *sonic_get_stats(struct net_device *dev)
610 {
611 	struct sonic_local *lp = netdev_priv(dev);
612 
613 	/* read the tally counter from the SONIC and reset them */
614 	lp->stats.rx_crc_errors += SONIC_READ(SONIC_CRCT);
615 	SONIC_WRITE(SONIC_CRCT, 0xffff);
616 	lp->stats.rx_frame_errors += SONIC_READ(SONIC_FAET);
617 	SONIC_WRITE(SONIC_FAET, 0xffff);
618 	lp->stats.rx_missed_errors += SONIC_READ(SONIC_MPT);
619 	SONIC_WRITE(SONIC_MPT, 0xffff);
620 
621 	return &lp->stats;
622 }
623 
624 
625 /*
626  * Set or clear the multicast filter for this adaptor.
627  */
628 static void sonic_multicast_list(struct net_device *dev)
629 {
630 	struct sonic_local *lp = netdev_priv(dev);
631 	unsigned int rcr;
632 	struct netdev_hw_addr *ha;
633 	unsigned char *addr;
634 	int i;
635 
636 	rcr = SONIC_READ(SONIC_RCR) & ~(SONIC_RCR_PRO | SONIC_RCR_AMC);
637 	rcr |= SONIC_RCR_BRD;	/* accept broadcast packets */
638 
639 	if (dev->flags & IFF_PROMISC) {	/* set promiscuous mode */
640 		rcr |= SONIC_RCR_PRO;
641 	} else {
642 		if ((dev->flags & IFF_ALLMULTI) ||
643 		    (netdev_mc_count(dev) > 15)) {
644 			rcr |= SONIC_RCR_AMC;
645 		} else {
646 			unsigned long flags;
647 
648 			netif_dbg(lp, ifup, dev, "%s: mc_count %d\n", __func__,
649 				  netdev_mc_count(dev));
650 			sonic_set_cam_enable(dev, 1);  /* always enable our own address */
651 			i = 1;
652 			netdev_for_each_mc_addr(ha, dev) {
653 				addr = ha->addr;
654 				sonic_cda_put(dev, i, SONIC_CD_CAP0, addr[1] << 8 | addr[0]);
655 				sonic_cda_put(dev, i, SONIC_CD_CAP1, addr[3] << 8 | addr[2]);
656 				sonic_cda_put(dev, i, SONIC_CD_CAP2, addr[5] << 8 | addr[4]);
657 				sonic_set_cam_enable(dev, sonic_get_cam_enable(dev) | (1 << i));
658 				i++;
659 			}
660 			SONIC_WRITE(SONIC_CDC, 16);
661 			SONIC_WRITE(SONIC_CDP, lp->cda_laddr & 0xffff);
662 
663 			/* LCAM and TXP commands can't be used simultaneously */
664 			spin_lock_irqsave(&lp->lock, flags);
665 			sonic_quiesce(dev, SONIC_CR_TXP);
666 			SONIC_WRITE(SONIC_CMD, SONIC_CR_LCAM);
667 			sonic_quiesce(dev, SONIC_CR_LCAM);
668 			spin_unlock_irqrestore(&lp->lock, flags);
669 		}
670 	}
671 
672 	netif_dbg(lp, ifup, dev, "%s: setting RCR=%x\n", __func__, rcr);
673 
674 	SONIC_WRITE(SONIC_RCR, rcr);
675 }
676 
677 
678 /*
679  * Initialize the SONIC ethernet controller.
680  */
681 static int sonic_init(struct net_device *dev)
682 {
683 	struct sonic_local *lp = netdev_priv(dev);
684 	int i;
685 
686 	/*
687 	 * put the Sonic into software-reset mode and
688 	 * disable all interrupts
689 	 */
690 	SONIC_WRITE(SONIC_IMR, 0);
691 	SONIC_WRITE(SONIC_ISR, 0x7fff);
692 	SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
693 
694 	/* While in reset mode, clear CAM Enable register */
695 	SONIC_WRITE(SONIC_CE, 0);
696 
697 	/*
698 	 * clear software reset flag, disable receiver, clear and
699 	 * enable interrupts, then completely initialize the SONIC
700 	 */
701 	SONIC_WRITE(SONIC_CMD, 0);
702 	SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS | SONIC_CR_STP);
703 	sonic_quiesce(dev, SONIC_CR_ALL);
704 
705 	/*
706 	 * initialize the receive resource area
707 	 */
708 	netif_dbg(lp, ifup, dev, "%s: initialize receive resource area\n",
709 		  __func__);
710 
711 	for (i = 0; i < SONIC_NUM_RRS; i++) {
712 		u16 bufadr_l = (unsigned long)lp->rx_laddr[i] & 0xffff;
713 		u16 bufadr_h = (unsigned long)lp->rx_laddr[i] >> 16;
714 		sonic_rra_put(dev, i, SONIC_RR_BUFADR_L, bufadr_l);
715 		sonic_rra_put(dev, i, SONIC_RR_BUFADR_H, bufadr_h);
716 		sonic_rra_put(dev, i, SONIC_RR_BUFSIZE_L, SONIC_RBSIZE >> 1);
717 		sonic_rra_put(dev, i, SONIC_RR_BUFSIZE_H, 0);
718 	}
719 
720 	/* initialize all RRA registers */
721 	SONIC_WRITE(SONIC_RSA, sonic_rr_addr(dev, 0));
722 	SONIC_WRITE(SONIC_REA, sonic_rr_addr(dev, SONIC_NUM_RRS));
723 	SONIC_WRITE(SONIC_RRP, sonic_rr_addr(dev, 0));
724 	SONIC_WRITE(SONIC_RWP, sonic_rr_addr(dev, SONIC_NUM_RRS - 1));
725 	SONIC_WRITE(SONIC_URRA, lp->rra_laddr >> 16);
726 	SONIC_WRITE(SONIC_EOBC, (SONIC_RBSIZE >> 1) - (lp->dma_bitmode ? 2 : 1));
727 
728 	/* load the resource pointers */
729 	netif_dbg(lp, ifup, dev, "%s: issuing RRRA command\n", __func__);
730 
731 	SONIC_WRITE(SONIC_CMD, SONIC_CR_RRRA);
732 	sonic_quiesce(dev, SONIC_CR_RRRA);
733 
734 	/*
735 	 * Initialize the receive descriptors so that they
736 	 * become a circular linked list, ie. let the last
737 	 * descriptor point to the first again.
738 	 */
739 	netif_dbg(lp, ifup, dev, "%s: initialize receive descriptors\n",
740 		  __func__);
741 
742 	for (i=0; i<SONIC_NUM_RDS; i++) {
743 		sonic_rda_put(dev, i, SONIC_RD_STATUS, 0);
744 		sonic_rda_put(dev, i, SONIC_RD_PKTLEN, 0);
745 		sonic_rda_put(dev, i, SONIC_RD_PKTPTR_L, 0);
746 		sonic_rda_put(dev, i, SONIC_RD_PKTPTR_H, 0);
747 		sonic_rda_put(dev, i, SONIC_RD_SEQNO, 0);
748 		sonic_rda_put(dev, i, SONIC_RD_IN_USE, 1);
749 		sonic_rda_put(dev, i, SONIC_RD_LINK,
750 			lp->rda_laddr +
751 			((i+1) * SIZEOF_SONIC_RD * SONIC_BUS_SCALE(lp->dma_bitmode)));
752 	}
753 	/* fix last descriptor */
754 	sonic_rda_put(dev, SONIC_NUM_RDS - 1, SONIC_RD_LINK,
755 		(lp->rda_laddr & 0xffff) | SONIC_EOL);
756 	lp->eol_rx = SONIC_NUM_RDS - 1;
757 	lp->cur_rx = 0;
758 	SONIC_WRITE(SONIC_URDA, lp->rda_laddr >> 16);
759 	SONIC_WRITE(SONIC_CRDA, lp->rda_laddr & 0xffff);
760 
761 	/*
762 	 * initialize transmit descriptors
763 	 */
764 	netif_dbg(lp, ifup, dev, "%s: initialize transmit descriptors\n",
765 		  __func__);
766 
767 	for (i = 0; i < SONIC_NUM_TDS; i++) {
768 		sonic_tda_put(dev, i, SONIC_TD_STATUS, 0);
769 		sonic_tda_put(dev, i, SONIC_TD_CONFIG, 0);
770 		sonic_tda_put(dev, i, SONIC_TD_PKTSIZE, 0);
771 		sonic_tda_put(dev, i, SONIC_TD_FRAG_COUNT, 0);
772 		sonic_tda_put(dev, i, SONIC_TD_LINK,
773 			(lp->tda_laddr & 0xffff) +
774 			(i + 1) * SIZEOF_SONIC_TD * SONIC_BUS_SCALE(lp->dma_bitmode));
775 		lp->tx_skb[i] = NULL;
776 	}
777 	/* fix last descriptor */
778 	sonic_tda_put(dev, SONIC_NUM_TDS - 1, SONIC_TD_LINK,
779 		(lp->tda_laddr & 0xffff));
780 
781 	SONIC_WRITE(SONIC_UTDA, lp->tda_laddr >> 16);
782 	SONIC_WRITE(SONIC_CTDA, lp->tda_laddr & 0xffff);
783 	lp->cur_tx = lp->next_tx = 0;
784 	lp->eol_tx = SONIC_NUM_TDS - 1;
785 
786 	/*
787 	 * put our own address to CAM desc[0]
788 	 */
789 	sonic_cda_put(dev, 0, SONIC_CD_CAP0, dev->dev_addr[1] << 8 | dev->dev_addr[0]);
790 	sonic_cda_put(dev, 0, SONIC_CD_CAP1, dev->dev_addr[3] << 8 | dev->dev_addr[2]);
791 	sonic_cda_put(dev, 0, SONIC_CD_CAP2, dev->dev_addr[5] << 8 | dev->dev_addr[4]);
792 	sonic_set_cam_enable(dev, 1);
793 
794 	for (i = 0; i < 16; i++)
795 		sonic_cda_put(dev, i, SONIC_CD_ENTRY_POINTER, i);
796 
797 	/*
798 	 * initialize CAM registers
799 	 */
800 	SONIC_WRITE(SONIC_CDP, lp->cda_laddr & 0xffff);
801 	SONIC_WRITE(SONIC_CDC, 16);
802 
803 	/*
804 	 * load the CAM
805 	 */
806 	SONIC_WRITE(SONIC_CMD, SONIC_CR_LCAM);
807 	sonic_quiesce(dev, SONIC_CR_LCAM);
808 
809 	/*
810 	 * enable receiver, disable loopback
811 	 * and enable all interrupts
812 	 */
813 	SONIC_WRITE(SONIC_RCR, SONIC_RCR_DEFAULT);
814 	SONIC_WRITE(SONIC_TCR, SONIC_TCR_DEFAULT);
815 	SONIC_WRITE(SONIC_ISR, 0x7fff);
816 	SONIC_WRITE(SONIC_IMR, SONIC_IMR_DEFAULT);
817 	SONIC_WRITE(SONIC_CMD, SONIC_CR_RXEN);
818 
819 	netif_dbg(lp, ifup, dev, "%s: new status=%x\n", __func__,
820 		  SONIC_READ(SONIC_CMD));
821 
822 	return 0;
823 }
824 
825 MODULE_LICENSE("GPL");
826