xref: /openbmc/linux/drivers/net/ethernet/amd/7990.c (revision e3d786a3)
1 /*
2  * 7990.c -- LANCE ethernet IC generic routines.
3  * This is an attempt to separate out the bits of various ethernet
4  * drivers that are common because they all use the AMD 7990 LANCE
5  * (Local Area Network Controller for Ethernet) chip.
6  *
7  * Copyright (C) 05/1998 Peter Maydell <pmaydell@chiark.greenend.org.uk>
8  *
9  * Most of this stuff was obtained by looking at other LANCE drivers,
10  * in particular a2065.[ch]. The AMD C-LANCE datasheet was also helpful.
11  * NB: this was made easy by the fact that Jes Sorensen had cleaned up
12  * most of a2025 and sunlance with the aim of merging them, so the
13  * common code was pretty obvious.
14  */
15 #include <linux/crc32.h>
16 #include <linux/delay.h>
17 #include <linux/errno.h>
18 #include <linux/netdevice.h>
19 #include <linux/etherdevice.h>
20 #include <linux/module.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/fcntl.h>
24 #include <linux/interrupt.h>
25 #include <linux/ioport.h>
26 #include <linux/in.h>
27 #include <linux/route.h>
28 #include <linux/string.h>
29 #include <linux/skbuff.h>
30 #include <asm/irq.h>
31 /* Used for the temporal inet entries and routing */
32 #include <linux/socket.h>
33 #include <linux/bitops.h>
34 
35 #include <asm/io.h>
36 #include <asm/dma.h>
37 #include <asm/pgtable.h>
38 #ifdef CONFIG_HP300
39 #include <asm/blinken.h>
40 #endif
41 
42 #include "7990.h"
43 
44 #define WRITERAP(lp, x)	out_be16(lp->base + LANCE_RAP, (x))
45 #define WRITERDP(lp, x)	out_be16(lp->base + LANCE_RDP, (x))
46 #define READRDP(lp)	in_be16(lp->base + LANCE_RDP)
47 
48 #if IS_ENABLED(CONFIG_HPLANCE)
49 #include "hplance.h"
50 
51 #undef WRITERAP
52 #undef WRITERDP
53 #undef READRDP
54 
55 #if IS_ENABLED(CONFIG_MVME147_NET)
56 
57 /* Lossage Factor Nine, Mr Sulu. */
58 #define WRITERAP(lp, x)	(lp->writerap(lp, x))
59 #define WRITERDP(lp, x)	(lp->writerdp(lp, x))
60 #define READRDP(lp)	(lp->readrdp(lp))
61 
62 #else
63 
64 /* These inlines can be used if only CONFIG_HPLANCE is defined */
65 static inline void WRITERAP(struct lance_private *lp, __u16 value)
66 {
67 	do {
68 		out_be16(lp->base + HPLANCE_REGOFF + LANCE_RAP, value);
69 	} while ((in_8(lp->base + HPLANCE_STATUS) & LE_ACK) == 0);
70 }
71 
72 static inline void WRITERDP(struct lance_private *lp, __u16 value)
73 {
74 	do {
75 		out_be16(lp->base + HPLANCE_REGOFF + LANCE_RDP, value);
76 	} while ((in_8(lp->base + HPLANCE_STATUS) & LE_ACK) == 0);
77 }
78 
79 static inline __u16 READRDP(struct lance_private *lp)
80 {
81 	__u16 value;
82 	do {
83 		value = in_be16(lp->base + HPLANCE_REGOFF + LANCE_RDP);
84 	} while ((in_8(lp->base + HPLANCE_STATUS) & LE_ACK) == 0);
85 	return value;
86 }
87 
88 #endif
89 #endif /* IS_ENABLED(CONFIG_HPLANCE) */
90 
91 /* debugging output macros, various flavours */
92 /* #define TEST_HITS */
93 #ifdef UNDEF
94 #define PRINT_RINGS() \
95 do { \
96 	int t; \
97 	for (t = 0; t < RX_RING_SIZE; t++) { \
98 		printk("R%d: @(%02X %04X) len %04X, mblen %04X, bits %02X\n", \
99 		       t, ib->brx_ring[t].rmd1_hadr, ib->brx_ring[t].rmd0, \
100 		       ib->brx_ring[t].length, \
101 		       ib->brx_ring[t].mblength, ib->brx_ring[t].rmd1_bits); \
102 	} \
103 	for (t = 0; t < TX_RING_SIZE; t++) { \
104 		printk("T%d: @(%02X %04X) len %04X, misc %04X, bits %02X\n", \
105 		       t, ib->btx_ring[t].tmd1_hadr, ib->btx_ring[t].tmd0, \
106 		       ib->btx_ring[t].length, \
107 		       ib->btx_ring[t].misc, ib->btx_ring[t].tmd1_bits); \
108 	} \
109 } while (0)
110 #else
111 #define PRINT_RINGS()
112 #endif
113 
114 /* Load the CSR registers. The LANCE has to be STOPped when we do this! */
115 static void load_csrs(struct lance_private *lp)
116 {
117 	volatile struct lance_init_block *aib = lp->lance_init_block;
118 	int leptr;
119 
120 	leptr = LANCE_ADDR(aib);
121 
122 	WRITERAP(lp, LE_CSR1);                    /* load address of init block */
123 	WRITERDP(lp, leptr & 0xFFFF);
124 	WRITERAP(lp, LE_CSR2);
125 	WRITERDP(lp, leptr >> 16);
126 	WRITERAP(lp, LE_CSR3);
127 	WRITERDP(lp, lp->busmaster_regval);       /* set byteswap/ALEctrl/byte ctrl */
128 
129 	/* Point back to csr0 */
130 	WRITERAP(lp, LE_CSR0);
131 }
132 
133 /* #define to 0 or 1 appropriately */
134 #define DEBUG_IRING 0
135 /* Set up the Lance Rx and Tx rings and the init block */
136 static void lance_init_ring(struct net_device *dev)
137 {
138 	struct lance_private *lp = netdev_priv(dev);
139 	volatile struct lance_init_block *ib = lp->init_block;
140 	volatile struct lance_init_block *aib; /* for LANCE_ADDR computations */
141 	int leptr;
142 	int i;
143 
144 	aib = lp->lance_init_block;
145 
146 	lp->rx_new = lp->tx_new = 0;
147 	lp->rx_old = lp->tx_old = 0;
148 
149 	ib->mode = LE_MO_PROM;                             /* normal, enable Tx & Rx */
150 
151 	/* Copy the ethernet address to the lance init block
152 	 * Notice that we do a byteswap if we're big endian.
153 	 * [I think this is the right criterion; at least, sunlance,
154 	 * a2065 and atarilance do the byteswap and lance.c (PC) doesn't.
155 	 * However, the datasheet says that the BSWAP bit doesn't affect
156 	 * the init block, so surely it should be low byte first for
157 	 * everybody? Um.]
158 	 * We could define the ib->physaddr as three 16bit values and
159 	 * use (addr[1] << 8) | addr[0] & co, but this is more efficient.
160 	 */
161 #ifdef __BIG_ENDIAN
162 	ib->phys_addr[0] = dev->dev_addr[1];
163 	ib->phys_addr[1] = dev->dev_addr[0];
164 	ib->phys_addr[2] = dev->dev_addr[3];
165 	ib->phys_addr[3] = dev->dev_addr[2];
166 	ib->phys_addr[4] = dev->dev_addr[5];
167 	ib->phys_addr[5] = dev->dev_addr[4];
168 #else
169 	for (i = 0; i < 6; i++)
170 	       ib->phys_addr[i] = dev->dev_addr[i];
171 #endif
172 
173 	if (DEBUG_IRING)
174 		printk("TX rings:\n");
175 
176 	lp->tx_full = 0;
177 	/* Setup the Tx ring entries */
178 	for (i = 0; i < (1 << lp->lance_log_tx_bufs); i++) {
179 		leptr = LANCE_ADDR(&aib->tx_buf[i][0]);
180 		ib->btx_ring[i].tmd0      = leptr;
181 		ib->btx_ring[i].tmd1_hadr = leptr >> 16;
182 		ib->btx_ring[i].tmd1_bits = 0;
183 		ib->btx_ring[i].length    = 0xf000; /* The ones required by tmd2 */
184 		ib->btx_ring[i].misc      = 0;
185 		if (DEBUG_IRING)
186 			printk("%d: 0x%8.8x\n", i, leptr);
187 	}
188 
189 	/* Setup the Rx ring entries */
190 	if (DEBUG_IRING)
191 		printk("RX rings:\n");
192 	for (i = 0; i < (1 << lp->lance_log_rx_bufs); i++) {
193 		leptr = LANCE_ADDR(&aib->rx_buf[i][0]);
194 
195 		ib->brx_ring[i].rmd0      = leptr;
196 		ib->brx_ring[i].rmd1_hadr = leptr >> 16;
197 		ib->brx_ring[i].rmd1_bits = LE_R1_OWN;
198 		/* 0xf000 == bits that must be one (reserved, presumably) */
199 		ib->brx_ring[i].length    = -RX_BUFF_SIZE | 0xf000;
200 		ib->brx_ring[i].mblength  = 0;
201 		if (DEBUG_IRING)
202 			printk("%d: 0x%8.8x\n", i, leptr);
203 	}
204 
205 	/* Setup the initialization block */
206 
207 	/* Setup rx descriptor pointer */
208 	leptr = LANCE_ADDR(&aib->brx_ring);
209 	ib->rx_len = (lp->lance_log_rx_bufs << 13) | (leptr >> 16);
210 	ib->rx_ptr = leptr;
211 	if (DEBUG_IRING)
212 		printk("RX ptr: %8.8x\n", leptr);
213 
214 	/* Setup tx descriptor pointer */
215 	leptr = LANCE_ADDR(&aib->btx_ring);
216 	ib->tx_len = (lp->lance_log_tx_bufs << 13) | (leptr >> 16);
217 	ib->tx_ptr = leptr;
218 	if (DEBUG_IRING)
219 		printk("TX ptr: %8.8x\n", leptr);
220 
221 	/* Clear the multicast filter */
222 	ib->filter[0] = 0;
223 	ib->filter[1] = 0;
224 	PRINT_RINGS();
225 }
226 
227 /* LANCE must be STOPped before we do this, too... */
228 static int init_restart_lance(struct lance_private *lp)
229 {
230 	int i;
231 
232 	WRITERAP(lp, LE_CSR0);
233 	WRITERDP(lp, LE_C0_INIT);
234 
235 	/* Need a hook here for sunlance ledma stuff */
236 
237 	/* Wait for the lance to complete initialization */
238 	for (i = 0; (i < 100) && !(READRDP(lp) & (LE_C0_ERR | LE_C0_IDON)); i++)
239 		barrier();
240 	if ((i == 100) || (READRDP(lp) & LE_C0_ERR)) {
241 		printk("LANCE unopened after %d ticks, csr0=%4.4x.\n", i, READRDP(lp));
242 		return -1;
243 	}
244 
245 	/* Clear IDON by writing a "1", enable interrupts and start lance */
246 	WRITERDP(lp, LE_C0_IDON);
247 	WRITERDP(lp, LE_C0_INEA | LE_C0_STRT);
248 
249 	return 0;
250 }
251 
252 static int lance_reset(struct net_device *dev)
253 {
254 	struct lance_private *lp = netdev_priv(dev);
255 	int status;
256 
257 	/* Stop the lance */
258 	WRITERAP(lp, LE_CSR0);
259 	WRITERDP(lp, LE_C0_STOP);
260 
261 	load_csrs(lp);
262 	lance_init_ring(dev);
263 	netif_trans_update(dev); /* prevent tx timeout */
264 	status = init_restart_lance(lp);
265 #ifdef DEBUG_DRIVER
266 	printk("Lance restart=%d\n", status);
267 #endif
268 	return status;
269 }
270 
271 static int lance_rx(struct net_device *dev)
272 {
273 	struct lance_private *lp = netdev_priv(dev);
274 	volatile struct lance_init_block *ib = lp->init_block;
275 	volatile struct lance_rx_desc *rd;
276 	unsigned char bits;
277 #ifdef TEST_HITS
278 	int i;
279 #endif
280 
281 #ifdef TEST_HITS
282 	printk("[");
283 	for (i = 0; i < RX_RING_SIZE; i++) {
284 		if (i == lp->rx_new)
285 			printk("%s",
286 			       ib->brx_ring[i].rmd1_bits & LE_R1_OWN ? "_" : "X");
287 		else
288 			printk("%s",
289 			      ib->brx_ring[i].rmd1_bits & LE_R1_OWN ? "." : "1");
290 	}
291 	printk("]");
292 #endif
293 #ifdef CONFIG_HP300
294 	blinken_leds(0x40, 0);
295 #endif
296 	WRITERDP(lp, LE_C0_RINT | LE_C0_INEA);     /* ack Rx int, reenable ints */
297 	for (rd = &ib->brx_ring[lp->rx_new];     /* For each Rx ring we own... */
298 	     !((bits = rd->rmd1_bits) & LE_R1_OWN);
299 	     rd = &ib->brx_ring[lp->rx_new]) {
300 
301 		/* We got an incomplete frame? */
302 		if ((bits & LE_R1_POK) != LE_R1_POK) {
303 			dev->stats.rx_over_errors++;
304 			dev->stats.rx_errors++;
305 			continue;
306 		} else if (bits & LE_R1_ERR) {
307 			/* Count only the end frame as a rx error,
308 			 * not the beginning
309 			 */
310 			if (bits & LE_R1_BUF)
311 				dev->stats.rx_fifo_errors++;
312 			if (bits & LE_R1_CRC)
313 				dev->stats.rx_crc_errors++;
314 			if (bits & LE_R1_OFL)
315 				dev->stats.rx_over_errors++;
316 			if (bits & LE_R1_FRA)
317 				dev->stats.rx_frame_errors++;
318 			if (bits & LE_R1_EOP)
319 				dev->stats.rx_errors++;
320 		} else {
321 			int len = (rd->mblength & 0xfff) - 4;
322 			struct sk_buff *skb = netdev_alloc_skb(dev, len + 2);
323 
324 			if (!skb) {
325 				dev->stats.rx_dropped++;
326 				rd->mblength = 0;
327 				rd->rmd1_bits = LE_R1_OWN;
328 				lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask;
329 				return 0;
330 			}
331 
332 			skb_reserve(skb, 2);           /* 16 byte align */
333 			skb_put(skb, len);             /* make room */
334 			skb_copy_to_linear_data(skb,
335 					 (unsigned char *)&(ib->rx_buf[lp->rx_new][0]),
336 					 len);
337 			skb->protocol = eth_type_trans(skb, dev);
338 			netif_rx(skb);
339 			dev->stats.rx_packets++;
340 			dev->stats.rx_bytes += len;
341 		}
342 
343 		/* Return the packet to the pool */
344 		rd->mblength = 0;
345 		rd->rmd1_bits = LE_R1_OWN;
346 		lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask;
347 	}
348 	return 0;
349 }
350 
351 static int lance_tx(struct net_device *dev)
352 {
353 	struct lance_private *lp = netdev_priv(dev);
354 	volatile struct lance_init_block *ib = lp->init_block;
355 	volatile struct lance_tx_desc *td;
356 	int i, j;
357 	int status;
358 
359 #ifdef CONFIG_HP300
360 	blinken_leds(0x80, 0);
361 #endif
362 	/* csr0 is 2f3 */
363 	WRITERDP(lp, LE_C0_TINT | LE_C0_INEA);
364 	/* csr0 is 73 */
365 
366 	j = lp->tx_old;
367 	for (i = j; i != lp->tx_new; i = j) {
368 		td = &ib->btx_ring[i];
369 
370 		/* If we hit a packet not owned by us, stop */
371 		if (td->tmd1_bits & LE_T1_OWN)
372 			break;
373 
374 		if (td->tmd1_bits & LE_T1_ERR) {
375 			status = td->misc;
376 
377 			dev->stats.tx_errors++;
378 			if (status & LE_T3_RTY)
379 				dev->stats.tx_aborted_errors++;
380 			if (status & LE_T3_LCOL)
381 				dev->stats.tx_window_errors++;
382 
383 			if (status & LE_T3_CLOS) {
384 				dev->stats.tx_carrier_errors++;
385 				if (lp->auto_select) {
386 					lp->tpe = 1 - lp->tpe;
387 					printk("%s: Carrier Lost, trying %s\n",
388 					       dev->name,
389 					       lp->tpe ? "TPE" : "AUI");
390 					/* Stop the lance */
391 					WRITERAP(lp, LE_CSR0);
392 					WRITERDP(lp, LE_C0_STOP);
393 					lance_init_ring(dev);
394 					load_csrs(lp);
395 					init_restart_lance(lp);
396 					return 0;
397 				}
398 			}
399 
400 			/* buffer errors and underflows turn off the transmitter */
401 			/* Restart the adapter */
402 			if (status & (LE_T3_BUF|LE_T3_UFL)) {
403 				dev->stats.tx_fifo_errors++;
404 
405 				printk("%s: Tx: ERR_BUF|ERR_UFL, restarting\n",
406 				       dev->name);
407 				/* Stop the lance */
408 				WRITERAP(lp, LE_CSR0);
409 				WRITERDP(lp, LE_C0_STOP);
410 				lance_init_ring(dev);
411 				load_csrs(lp);
412 				init_restart_lance(lp);
413 				return 0;
414 			}
415 		} else if ((td->tmd1_bits & LE_T1_POK) == LE_T1_POK) {
416 			/*
417 			 * So we don't count the packet more than once.
418 			 */
419 			td->tmd1_bits &= ~(LE_T1_POK);
420 
421 			/* One collision before packet was sent. */
422 			if (td->tmd1_bits & LE_T1_EONE)
423 				dev->stats.collisions++;
424 
425 			/* More than one collision, be optimistic. */
426 			if (td->tmd1_bits & LE_T1_EMORE)
427 				dev->stats.collisions += 2;
428 
429 			dev->stats.tx_packets++;
430 		}
431 
432 		j = (j + 1) & lp->tx_ring_mod_mask;
433 	}
434 	lp->tx_old = j;
435 	WRITERDP(lp, LE_C0_TINT | LE_C0_INEA);
436 	return 0;
437 }
438 
439 static irqreturn_t
440 lance_interrupt(int irq, void *dev_id)
441 {
442 	struct net_device *dev = (struct net_device *)dev_id;
443 	struct lance_private *lp = netdev_priv(dev);
444 	int csr0;
445 
446 	spin_lock(&lp->devlock);
447 
448 	WRITERAP(lp, LE_CSR0);              /* LANCE Controller Status */
449 	csr0 = READRDP(lp);
450 
451 	PRINT_RINGS();
452 
453 	if (!(csr0 & LE_C0_INTR)) {     /* Check if any interrupt has */
454 		spin_unlock(&lp->devlock);
455 		return IRQ_NONE;        /* been generated by the Lance. */
456 	}
457 
458 	/* Acknowledge all the interrupt sources ASAP */
459 	WRITERDP(lp, csr0 & ~(LE_C0_INEA|LE_C0_TDMD|LE_C0_STOP|LE_C0_STRT|LE_C0_INIT));
460 
461 	if ((csr0 & LE_C0_ERR)) {
462 		/* Clear the error condition */
463 		WRITERDP(lp, LE_C0_BABL|LE_C0_ERR|LE_C0_MISS|LE_C0_INEA);
464 	}
465 
466 	if (csr0 & LE_C0_RINT)
467 		lance_rx(dev);
468 
469 	if (csr0 & LE_C0_TINT)
470 		lance_tx(dev);
471 
472 	/* Log misc errors. */
473 	if (csr0 & LE_C0_BABL)
474 		dev->stats.tx_errors++;       /* Tx babble. */
475 	if (csr0 & LE_C0_MISS)
476 		dev->stats.rx_errors++;       /* Missed a Rx frame. */
477 	if (csr0 & LE_C0_MERR) {
478 		printk("%s: Bus master arbitration failure, status %4.4x.\n",
479 		       dev->name, csr0);
480 		/* Restart the chip. */
481 		WRITERDP(lp, LE_C0_STRT);
482 	}
483 
484 	if (lp->tx_full && netif_queue_stopped(dev) && (TX_BUFFS_AVAIL >= 0)) {
485 		lp->tx_full = 0;
486 		netif_wake_queue(dev);
487 	}
488 
489 	WRITERAP(lp, LE_CSR0);
490 	WRITERDP(lp, LE_C0_BABL|LE_C0_CERR|LE_C0_MISS|LE_C0_MERR|LE_C0_IDON|LE_C0_INEA);
491 
492 	spin_unlock(&lp->devlock);
493 	return IRQ_HANDLED;
494 }
495 
496 int lance_open(struct net_device *dev)
497 {
498 	struct lance_private *lp = netdev_priv(dev);
499 	int res;
500 
501 	/* Install the Interrupt handler. Or we could shunt this out to specific drivers? */
502 	if (request_irq(lp->irq, lance_interrupt, IRQF_SHARED, lp->name, dev))
503 		return -EAGAIN;
504 
505 	res = lance_reset(dev);
506 	spin_lock_init(&lp->devlock);
507 	netif_start_queue(dev);
508 
509 	return res;
510 }
511 EXPORT_SYMBOL_GPL(lance_open);
512 
513 int lance_close(struct net_device *dev)
514 {
515 	struct lance_private *lp = netdev_priv(dev);
516 
517 	netif_stop_queue(dev);
518 
519 	/* Stop the LANCE */
520 	WRITERAP(lp, LE_CSR0);
521 	WRITERDP(lp, LE_C0_STOP);
522 
523 	free_irq(lp->irq, dev);
524 
525 	return 0;
526 }
527 EXPORT_SYMBOL_GPL(lance_close);
528 
529 void lance_tx_timeout(struct net_device *dev)
530 {
531 	printk("lance_tx_timeout\n");
532 	lance_reset(dev);
533 	netif_trans_update(dev); /* prevent tx timeout */
534 	netif_wake_queue(dev);
535 }
536 EXPORT_SYMBOL_GPL(lance_tx_timeout);
537 
538 int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
539 {
540 	struct lance_private *lp = netdev_priv(dev);
541 	volatile struct lance_init_block *ib = lp->init_block;
542 	int entry, skblen, len;
543 	static int outs;
544 	unsigned long flags;
545 
546 	netif_stop_queue(dev);
547 
548 	if (!TX_BUFFS_AVAIL) {
549 		dev_consume_skb_any(skb);
550 		return NETDEV_TX_OK;
551 	}
552 
553 	skblen = skb->len;
554 
555 #ifdef DEBUG_DRIVER
556 	/* dump the packet */
557 	{
558 		int i;
559 
560 		for (i = 0; i < 64; i++) {
561 			if ((i % 16) == 0)
562 				printk("\n");
563 			printk("%2.2x ", skb->data[i]);
564 		}
565 	}
566 #endif
567 	len = (skblen <= ETH_ZLEN) ? ETH_ZLEN : skblen;
568 	entry = lp->tx_new & lp->tx_ring_mod_mask;
569 	ib->btx_ring[entry].length = (-len) | 0xf000;
570 	ib->btx_ring[entry].misc = 0;
571 
572 	if (skb->len < ETH_ZLEN)
573 		memset((void *)&ib->tx_buf[entry][0], 0, ETH_ZLEN);
574 	skb_copy_from_linear_data(skb, (void *)&ib->tx_buf[entry][0], skblen);
575 
576 	/* Now, give the packet to the lance */
577 	ib->btx_ring[entry].tmd1_bits = (LE_T1_POK|LE_T1_OWN);
578 	lp->tx_new = (lp->tx_new + 1) & lp->tx_ring_mod_mask;
579 
580 	outs++;
581 	/* Kick the lance: transmit now */
582 	WRITERDP(lp, LE_C0_INEA | LE_C0_TDMD);
583 	dev_consume_skb_any(skb);
584 
585 	spin_lock_irqsave(&lp->devlock, flags);
586 	if (TX_BUFFS_AVAIL)
587 		netif_start_queue(dev);
588 	else
589 		lp->tx_full = 1;
590 	spin_unlock_irqrestore(&lp->devlock, flags);
591 
592 	return NETDEV_TX_OK;
593 }
594 EXPORT_SYMBOL_GPL(lance_start_xmit);
595 
596 /* taken from the depca driver via a2065.c */
597 static void lance_load_multicast(struct net_device *dev)
598 {
599 	struct lance_private *lp = netdev_priv(dev);
600 	volatile struct lance_init_block *ib = lp->init_block;
601 	volatile u16 *mcast_table = (u16 *)&ib->filter;
602 	struct netdev_hw_addr *ha;
603 	u32 crc;
604 
605 	/* set all multicast bits */
606 	if (dev->flags & IFF_ALLMULTI) {
607 		ib->filter[0] = 0xffffffff;
608 		ib->filter[1] = 0xffffffff;
609 		return;
610 	}
611 	/* clear the multicast filter */
612 	ib->filter[0] = 0;
613 	ib->filter[1] = 0;
614 
615 	/* Add addresses */
616 	netdev_for_each_mc_addr(ha, dev) {
617 		crc = ether_crc_le(6, ha->addr);
618 		crc = crc >> 26;
619 		mcast_table[crc >> 4] |= 1 << (crc & 0xf);
620 	}
621 }
622 
623 
624 void lance_set_multicast(struct net_device *dev)
625 {
626 	struct lance_private *lp = netdev_priv(dev);
627 	volatile struct lance_init_block *ib = lp->init_block;
628 	int stopped;
629 
630 	stopped = netif_queue_stopped(dev);
631 	if (!stopped)
632 		netif_stop_queue(dev);
633 
634 	while (lp->tx_old != lp->tx_new)
635 		schedule();
636 
637 	WRITERAP(lp, LE_CSR0);
638 	WRITERDP(lp, LE_C0_STOP);
639 	lance_init_ring(dev);
640 
641 	if (dev->flags & IFF_PROMISC) {
642 		ib->mode |= LE_MO_PROM;
643 	} else {
644 		ib->mode &= ~LE_MO_PROM;
645 		lance_load_multicast(dev);
646 	}
647 	load_csrs(lp);
648 	init_restart_lance(lp);
649 
650 	if (!stopped)
651 		netif_start_queue(dev);
652 }
653 EXPORT_SYMBOL_GPL(lance_set_multicast);
654 
655 #ifdef CONFIG_NET_POLL_CONTROLLER
656 void lance_poll(struct net_device *dev)
657 {
658 	struct lance_private *lp = netdev_priv(dev);
659 
660 	spin_lock(&lp->devlock);
661 	WRITERAP(lp, LE_CSR0);
662 	WRITERDP(lp, LE_C0_STRT);
663 	spin_unlock(&lp->devlock);
664 	lance_interrupt(dev->irq, dev);
665 }
666 EXPORT_SYMBOL_GPL(lance_poll);
667 #endif
668 
669 MODULE_LICENSE("GPL");
670