1 /* 8390.c: A general NS8390 ethernet driver core for linux. */
2 /*
3 	Written 1992-94 by Donald Becker.
4 
5 	Copyright 1993 United States Government as represented by the
6 	Director, National Security Agency.
7 
8 	This software may be used and distributed according to the terms
9 	of the GNU General Public License, incorporated herein by reference.
10 
11 	The author may be reached as becker@scyld.com, or C/O
12 	Scyld Computing Corporation
13 	410 Severn Ave., Suite 210
14 	Annapolis MD 21403
15 
16 
17   This is the chip-specific code for many 8390-based ethernet adaptors.
18   This is not a complete driver, it must be combined with board-specific
19   code such as ne.c, wd.c, 3c503.c, etc.
20 
21   Seeing how at least eight drivers use this code, (not counting the
22   PCMCIA ones either) it is easy to break some card by what seems like
23   a simple innocent change. Please contact me or Donald if you think
24   you have found something that needs changing. -- PG
25 
26 
27   Changelog:
28 
29   Paul Gortmaker	: remove set_bit lock, other cleanups.
30   Paul Gortmaker	: add ei_get_8390_hdr() so we can pass skb's to
31 			  ei_block_input() for eth_io_copy_and_sum().
32   Paul Gortmaker	: exchange static int ei_pingpong for a #define,
33 			  also add better Tx error handling.
34   Paul Gortmaker	: rewrite Rx overrun handling as per NS specs.
35   Alexey Kuznetsov	: use the 8390's six bit hash multicast filter.
36   Paul Gortmaker	: tweak ANK's above multicast changes a bit.
37   Paul Gortmaker	: update packet statistics for v2.1.x
38   Alan Cox		: support arbitrary stupid port mappings on the
39 			  68K Macintosh. Support >16bit I/O spaces
40   Paul Gortmaker	: add kmod support for auto-loading of the 8390
41 			  module by all drivers that require it.
42   Alan Cox		: Spinlocking work, added 'BUG_83C690'
43   Paul Gortmaker	: Separate out Tx timeout code from Tx path.
44   Paul Gortmaker	: Remove old unused single Tx buffer code.
45   Hayato Fujiwara	: Add m32r support.
46   Paul Gortmaker	: use skb_padto() instead of stack scratch area
47 
48   Sources:
49   The National Semiconductor LAN Databook, and the 3Com 3c503 databook.
50 
51   */
52 
53 #include <linux/module.h>
54 #include <linux/kernel.h>
55 #include <linux/jiffies.h>
56 #include <linux/fs.h>
57 #include <linux/types.h>
58 #include <linux/string.h>
59 #include <linux/bitops.h>
60 #include <linux/uaccess.h>
61 #include <linux/io.h>
62 #include <asm/irq.h>
63 #include <linux/delay.h>
64 #include <linux/errno.h>
65 #include <linux/fcntl.h>
66 #include <linux/in.h>
67 #include <linux/interrupt.h>
68 #include <linux/init.h>
69 #include <linux/crc32.h>
70 
71 #include <linux/netdevice.h>
72 #include <linux/etherdevice.h>
73 
74 #define NS8390_CORE
75 #include "8390.h"
76 
77 #define BUG_83C690
78 
79 /* These are the operational function interfaces to board-specific
80    routines.
81 	void reset_8390(struct net_device *dev)
82 		Resets the board associated with DEV, including a hardware reset of
83 		the 8390.  This is only called when there is a transmit timeout, and
84 		it is always followed by 8390_init().
85 	void block_output(struct net_device *dev, int count, const unsigned char *buf,
86 					  int start_page)
87 		Write the COUNT bytes of BUF to the packet buffer at START_PAGE.  The
88 		"page" value uses the 8390's 256-byte pages.
89 	void get_8390_hdr(struct net_device *dev, struct e8390_hdr *hdr, int ring_page)
90 		Read the 4 byte, page aligned 8390 header. *If* there is a
91 		subsequent read, it will be of the rest of the packet.
92 	void block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
93 		Read COUNT bytes from the packet buffer into the skb data area. Start
94 		reading from RING_OFFSET, the address as the 8390 sees it.  This will always
95 		follow the read of the 8390 header.
96 */
97 #define ei_reset_8390 (ei_local->reset_8390)
98 #define ei_block_output (ei_local->block_output)
99 #define ei_block_input (ei_local->block_input)
100 #define ei_get_8390_hdr (ei_local->get_8390_hdr)
101 
102 /* Index to functions. */
103 static void ei_tx_intr(struct net_device *dev);
104 static void ei_tx_err(struct net_device *dev);
105 static void ei_receive(struct net_device *dev);
106 static void ei_rx_overrun(struct net_device *dev);
107 
108 /* Routines generic to NS8390-based boards. */
109 static void NS8390_trigger_send(struct net_device *dev, unsigned int length,
110 								int start_page);
111 static void do_set_multicast_list(struct net_device *dev);
112 static void __NS8390_init(struct net_device *dev, int startp);
113 
114 static unsigned version_printed;
115 static u32 msg_enable;
116 module_param(msg_enable, uint, 0444);
117 MODULE_PARM_DESC(msg_enable, "Debug message level (see linux/netdevice.h for bitmap)");
118 
119 /*
120  *	SMP and the 8390 setup.
121  *
122  *	The 8390 isn't exactly designed to be multithreaded on RX/TX. There is
123  *	a page register that controls bank and packet buffer access. We guard
124  *	this with ei_local->page_lock. Nobody should assume or set the page other
125  *	than zero when the lock is not held. Lock holders must restore page 0
126  *	before unlocking. Even pure readers must take the lock to protect in
127  *	page 0.
128  *
129  *	To make life difficult the chip can also be very slow. We therefore can't
130  *	just use spinlocks. For the longer lockups we disable the irq the device
131  *	sits on and hold the lock. We must hold the lock because there is a dual
132  *	processor case other than interrupts (get stats/set multicast list in
133  *	parallel with each other and transmit).
134  *
135  *	Note: in theory we can just disable the irq on the card _but_ there is
136  *	a latency on SMP irq delivery. So we can easily go "disable irq" "sync irqs"
137  *	enter lock, take the queued irq. So we waddle instead of flying.
138  *
139  *	Finally by special arrangement for the purpose of being generally
140  *	annoying the transmit function is called bh atomic. That places
141  *	restrictions on the user context callers as disable_irq won't save
142  *	them.
143  *
144  *	Additional explanation of problems with locking by Alan Cox:
145  *
146  *	"The author (me) didn't use spin_lock_irqsave because the slowness of the
147  *	card means that approach caused horrible problems like losing serial data
148  *	at 38400 baud on some chips. Remember many 8390 nics on PCI were ISA
149  *	chips with FPGA front ends.
150  *
151  *	Ok the logic behind the 8390 is very simple:
152  *
153  *	Things to know
154  *		- IRQ delivery is asynchronous to the PCI bus
155  *		- Blocking the local CPU IRQ via spin locks was too slow
156  *		- The chip has register windows needing locking work
157  *
158  *	So the path was once (I say once as people appear to have changed it
159  *	in the mean time and it now looks rather bogus if the changes to use
160  *	disable_irq_nosync_irqsave are disabling the local IRQ)
161  *
162  *
163  *		Take the page lock
164  *		Mask the IRQ on chip
165  *		Disable the IRQ (but not mask locally- someone seems to have
166  *			broken this with the lock validator stuff)
167  *			[This must be _nosync as the page lock may otherwise
168  *				deadlock us]
169  *		Drop the page lock and turn IRQs back on
170  *
171  *		At this point an existing IRQ may still be running but we can't
172  *		get a new one
173  *
174  *		Take the lock (so we know the IRQ has terminated) but don't mask
175  *	the IRQs on the processor
176  *		Set irqlock [for debug]
177  *
178  *		Transmit (slow as ****)
179  *
180  *		re-enable the IRQ
181  *
182  *
183  *	We have to use disable_irq because otherwise you will get delayed
184  *	interrupts on the APIC bus deadlocking the transmit path.
185  *
186  *	Quite hairy but the chip simply wasn't designed for SMP and you can't
187  *	even ACK an interrupt without risking corrupting other parallel
188  *	activities on the chip." [lkml, 25 Jul 2007]
189  */
190 
191 
192 
193 /**
194  * ei_open - Open/initialize the board.
195  * @dev: network device to initialize
196  *
197  * This routine goes all-out, setting everything
198  * up anew at each open, even though many of these registers should only
199  * need to be set once at boot.
200  */
201 static int __ei_open(struct net_device *dev)
202 {
203 	unsigned long flags;
204 	struct ei_device *ei_local = netdev_priv(dev);
205 
206 	if (dev->watchdog_timeo <= 0)
207 		dev->watchdog_timeo = TX_TIMEOUT;
208 
209 	/*
210 	 *	Grab the page lock so we own the register set, then call
211 	 *	the init function.
212 	 */
213 
214 	spin_lock_irqsave(&ei_local->page_lock, flags);
215 	__NS8390_init(dev, 1);
216 	/* Set the flag before we drop the lock, That way the IRQ arrives
217 	   after its set and we get no silly warnings */
218 	netif_start_queue(dev);
219 	spin_unlock_irqrestore(&ei_local->page_lock, flags);
220 	ei_local->irqlock = 0;
221 	return 0;
222 }
223 
224 /**
225  * ei_close - shut down network device
226  * @dev: network device to close
227  *
228  * Opposite of ei_open(). Only used when "ifconfig <devname> down" is done.
229  */
230 static int __ei_close(struct net_device *dev)
231 {
232 	struct ei_device *ei_local = netdev_priv(dev);
233 	unsigned long flags;
234 
235 	/*
236 	 *	Hold the page lock during close
237 	 */
238 
239 	spin_lock_irqsave(&ei_local->page_lock, flags);
240 	__NS8390_init(dev, 0);
241 	spin_unlock_irqrestore(&ei_local->page_lock, flags);
242 	netif_stop_queue(dev);
243 	return 0;
244 }
245 
246 /**
247  * ei_tx_timeout - handle transmit time out condition
248  * @dev: network device which has apparently fallen asleep
249  *
250  * Called by kernel when device never acknowledges a transmit has
251  * completed (or failed) - i.e. never posted a Tx related interrupt.
252  */
253 
254 static void __ei_tx_timeout(struct net_device *dev, unsigned int txqueue)
255 {
256 	unsigned long e8390_base = dev->base_addr;
257 	struct ei_device *ei_local = netdev_priv(dev);
258 	int txsr, isr, tickssofar = jiffies - dev_trans_start(dev);
259 	unsigned long flags;
260 
261 	dev->stats.tx_errors++;
262 
263 	spin_lock_irqsave(&ei_local->page_lock, flags);
264 	txsr = ei_inb(e8390_base+EN0_TSR);
265 	isr = ei_inb(e8390_base+EN0_ISR);
266 	spin_unlock_irqrestore(&ei_local->page_lock, flags);
267 
268 	netdev_dbg(dev, "Tx timed out, %s TSR=%#2x, ISR=%#2x, t=%d\n",
269 		   (txsr & ENTSR_ABT) ? "excess collisions." :
270 		   (isr) ? "lost interrupt?" : "cable problem?",
271 		   txsr, isr, tickssofar);
272 
273 	if (!isr && !dev->stats.tx_packets) {
274 		/* The 8390 probably hasn't gotten on the cable yet. */
275 		ei_local->interface_num ^= 1;   /* Try a different xcvr.  */
276 	}
277 
278 	/* Ugly but a reset can be slow, yet must be protected */
279 
280 	disable_irq_nosync_lockdep(dev->irq);
281 	spin_lock(&ei_local->page_lock);
282 
283 	/* Try to restart the card.  Perhaps the user has fixed something. */
284 	ei_reset_8390(dev);
285 	__NS8390_init(dev, 1);
286 
287 	spin_unlock(&ei_local->page_lock);
288 	enable_irq_lockdep(dev->irq);
289 	netif_wake_queue(dev);
290 }
291 
292 /**
293  * ei_start_xmit - begin packet transmission
294  * @skb: packet to be sent
295  * @dev: network device to which packet is sent
296  *
297  * Sends a packet to an 8390 network device.
298  */
299 
300 static netdev_tx_t __ei_start_xmit(struct sk_buff *skb,
301 				   struct net_device *dev)
302 {
303 	unsigned long e8390_base = dev->base_addr;
304 	struct ei_device *ei_local = netdev_priv(dev);
305 	int send_length = skb->len, output_page;
306 	unsigned long flags;
307 	char buf[ETH_ZLEN];
308 	char *data = skb->data;
309 
310 	if (skb->len < ETH_ZLEN) {
311 		memset(buf, 0, ETH_ZLEN);	/* more efficient than doing just the needed bits */
312 		memcpy(buf, data, skb->len);
313 		send_length = ETH_ZLEN;
314 		data = buf;
315 	}
316 
317 	/* Mask interrupts from the ethercard.
318 	   SMP: We have to grab the lock here otherwise the IRQ handler
319 	   on another CPU can flip window and race the IRQ mask set. We end
320 	   up trashing the mcast filter not disabling irqs if we don't lock */
321 
322 	spin_lock_irqsave(&ei_local->page_lock, flags);
323 	ei_outb_p(0x00, e8390_base + EN0_IMR);
324 	spin_unlock_irqrestore(&ei_local->page_lock, flags);
325 
326 
327 	/*
328 	 *	Slow phase with lock held.
329 	 */
330 
331 	disable_irq_nosync_lockdep_irqsave(dev->irq, &flags);
332 
333 	spin_lock(&ei_local->page_lock);
334 
335 	ei_local->irqlock = 1;
336 
337 	/*
338 	 * We have two Tx slots available for use. Find the first free
339 	 * slot, and then perform some sanity checks. With two Tx bufs,
340 	 * you get very close to transmitting back-to-back packets. With
341 	 * only one Tx buf, the transmitter sits idle while you reload the
342 	 * card, leaving a substantial gap between each transmitted packet.
343 	 */
344 
345 	if (ei_local->tx1 == 0) {
346 		output_page = ei_local->tx_start_page;
347 		ei_local->tx1 = send_length;
348 		if ((netif_msg_tx_queued(ei_local)) &&
349 		    ei_local->tx2 > 0)
350 			netdev_dbg(dev,
351 				   "idle transmitter tx2=%d, lasttx=%d, txing=%d\n",
352 				   ei_local->tx2, ei_local->lasttx, ei_local->txing);
353 	} else if (ei_local->tx2 == 0) {
354 		output_page = ei_local->tx_start_page + TX_PAGES/2;
355 		ei_local->tx2 = send_length;
356 		if ((netif_msg_tx_queued(ei_local)) &&
357 		    ei_local->tx1 > 0)
358 			netdev_dbg(dev,
359 				   "idle transmitter, tx1=%d, lasttx=%d, txing=%d\n",
360 				   ei_local->tx1, ei_local->lasttx, ei_local->txing);
361 	} else {			/* We should never get here. */
362 		netif_dbg(ei_local, tx_err, dev,
363 			  "No Tx buffers free! tx1=%d tx2=%d last=%d\n",
364 			  ei_local->tx1, ei_local->tx2, ei_local->lasttx);
365 		ei_local->irqlock = 0;
366 		netif_stop_queue(dev);
367 		ei_outb_p(ENISR_ALL, e8390_base + EN0_IMR);
368 		spin_unlock(&ei_local->page_lock);
369 		enable_irq_lockdep_irqrestore(dev->irq, &flags);
370 		dev->stats.tx_errors++;
371 		return NETDEV_TX_BUSY;
372 	}
373 
374 	/*
375 	 * Okay, now upload the packet and trigger a send if the transmitter
376 	 * isn't already sending. If it is busy, the interrupt handler will
377 	 * trigger the send later, upon receiving a Tx done interrupt.
378 	 */
379 
380 	ei_block_output(dev, send_length, data, output_page);
381 
382 	if (!ei_local->txing) {
383 		ei_local->txing = 1;
384 		NS8390_trigger_send(dev, send_length, output_page);
385 		if (output_page == ei_local->tx_start_page) {
386 			ei_local->tx1 = -1;
387 			ei_local->lasttx = -1;
388 		} else {
389 			ei_local->tx2 = -1;
390 			ei_local->lasttx = -2;
391 		}
392 	} else
393 		ei_local->txqueue++;
394 
395 	if (ei_local->tx1 && ei_local->tx2)
396 		netif_stop_queue(dev);
397 	else
398 		netif_start_queue(dev);
399 
400 	/* Turn 8390 interrupts back on. */
401 	ei_local->irqlock = 0;
402 	ei_outb_p(ENISR_ALL, e8390_base + EN0_IMR);
403 
404 	spin_unlock(&ei_local->page_lock);
405 	enable_irq_lockdep_irqrestore(dev->irq, &flags);
406 	skb_tx_timestamp(skb);
407 	dev_consume_skb_any(skb);
408 	dev->stats.tx_bytes += send_length;
409 
410 	return NETDEV_TX_OK;
411 }
412 
413 /**
414  * ei_interrupt - handle the interrupts from an 8390
415  * @irq: interrupt number
416  * @dev_id: a pointer to the net_device
417  *
418  * Handle the ether interface interrupts. We pull packets from
419  * the 8390 via the card specific functions and fire them at the networking
420  * stack. We also handle transmit completions and wake the transmit path if
421  * necessary. We also update the counters and do other housekeeping as
422  * needed.
423  */
424 
425 static irqreturn_t __ei_interrupt(int irq, void *dev_id)
426 {
427 	struct net_device *dev = dev_id;
428 	unsigned long e8390_base = dev->base_addr;
429 	int interrupts, nr_serviced = 0;
430 	struct ei_device *ei_local = netdev_priv(dev);
431 
432 	/*
433 	 *	Protect the irq test too.
434 	 */
435 
436 	spin_lock(&ei_local->page_lock);
437 
438 	if (ei_local->irqlock) {
439 		/*
440 		 * This might just be an interrupt for a PCI device sharing
441 		 * this line
442 		 */
443 		netdev_err(dev, "Interrupted while interrupts are masked! isr=%#2x imr=%#2x\n",
444 			   ei_inb_p(e8390_base + EN0_ISR),
445 			   ei_inb_p(e8390_base + EN0_IMR));
446 		spin_unlock(&ei_local->page_lock);
447 		return IRQ_NONE;
448 	}
449 
450 	/* Change to page 0 and read the intr status reg. */
451 	ei_outb_p(E8390_NODMA+E8390_PAGE0, e8390_base + E8390_CMD);
452 	netif_dbg(ei_local, intr, dev, "interrupt(isr=%#2.2x)\n",
453 		  ei_inb_p(e8390_base + EN0_ISR));
454 
455 	/* !!Assumption!! -- we stay in page 0.	 Don't break this. */
456 	while ((interrupts = ei_inb_p(e8390_base + EN0_ISR)) != 0 &&
457 	       ++nr_serviced < MAX_SERVICE) {
458 		if (!netif_running(dev)) {
459 			netdev_warn(dev, "interrupt from stopped card\n");
460 			/* rmk - acknowledge the interrupts */
461 			ei_outb_p(interrupts, e8390_base + EN0_ISR);
462 			interrupts = 0;
463 			break;
464 		}
465 		if (interrupts & ENISR_OVER)
466 			ei_rx_overrun(dev);
467 		else if (interrupts & (ENISR_RX+ENISR_RX_ERR)) {
468 			/* Got a good (?) packet. */
469 			ei_receive(dev);
470 		}
471 		/* Push the next to-transmit packet through. */
472 		if (interrupts & ENISR_TX)
473 			ei_tx_intr(dev);
474 		else if (interrupts & ENISR_TX_ERR)
475 			ei_tx_err(dev);
476 
477 		if (interrupts & ENISR_COUNTERS) {
478 			dev->stats.rx_frame_errors += ei_inb_p(e8390_base + EN0_COUNTER0);
479 			dev->stats.rx_crc_errors   += ei_inb_p(e8390_base + EN0_COUNTER1);
480 			dev->stats.rx_missed_errors += ei_inb_p(e8390_base + EN0_COUNTER2);
481 			ei_outb_p(ENISR_COUNTERS, e8390_base + EN0_ISR); /* Ack intr. */
482 		}
483 
484 		/* Ignore any RDC interrupts that make it back to here. */
485 		if (interrupts & ENISR_RDC)
486 			ei_outb_p(ENISR_RDC, e8390_base + EN0_ISR);
487 
488 		ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base + E8390_CMD);
489 	}
490 
491 	if (interrupts && (netif_msg_intr(ei_local))) {
492 		ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base + E8390_CMD);
493 		if (nr_serviced >= MAX_SERVICE) {
494 			/* 0xFF is valid for a card removal */
495 			if (interrupts != 0xFF)
496 				netdev_warn(dev, "Too much work at interrupt, status %#2.2x\n",
497 					    interrupts);
498 			ei_outb_p(ENISR_ALL, e8390_base + EN0_ISR); /* Ack. most intrs. */
499 		} else {
500 			netdev_warn(dev, "unknown interrupt %#2x\n", interrupts);
501 			ei_outb_p(0xff, e8390_base + EN0_ISR); /* Ack. all intrs. */
502 		}
503 	}
504 	spin_unlock(&ei_local->page_lock);
505 	return IRQ_RETVAL(nr_serviced > 0);
506 }
507 
508 #ifdef CONFIG_NET_POLL_CONTROLLER
509 static void __ei_poll(struct net_device *dev)
510 {
511 	disable_irq(dev->irq);
512 	__ei_interrupt(dev->irq, dev);
513 	enable_irq(dev->irq);
514 }
515 #endif
516 
517 /**
518  * ei_tx_err - handle transmitter error
519  * @dev: network device which threw the exception
520  *
521  * A transmitter error has happened. Most likely excess collisions (which
522  * is a fairly normal condition). If the error is one where the Tx will
523  * have been aborted, we try and send another one right away, instead of
524  * letting the failed packet sit and collect dust in the Tx buffer. This
525  * is a much better solution as it avoids kernel based Tx timeouts, and
526  * an unnecessary card reset.
527  *
528  * Called with lock held.
529  */
530 
531 static void ei_tx_err(struct net_device *dev)
532 {
533 	unsigned long e8390_base = dev->base_addr;
534 	/* ei_local is used on some platforms via the EI_SHIFT macro */
535 	struct ei_device *ei_local __maybe_unused = netdev_priv(dev);
536 	unsigned char txsr = ei_inb_p(e8390_base+EN0_TSR);
537 	unsigned char tx_was_aborted = txsr & (ENTSR_ABT+ENTSR_FU);
538 
539 #ifdef VERBOSE_ERROR_DUMP
540 	netdev_dbg(dev, "transmitter error (%#2x):", txsr);
541 	if (txsr & ENTSR_ABT)
542 		pr_cont(" excess-collisions ");
543 	if (txsr & ENTSR_ND)
544 		pr_cont(" non-deferral ");
545 	if (txsr & ENTSR_CRS)
546 		pr_cont(" lost-carrier ");
547 	if (txsr & ENTSR_FU)
548 		pr_cont(" FIFO-underrun ");
549 	if (txsr & ENTSR_CDH)
550 		pr_cont(" lost-heartbeat ");
551 	pr_cont("\n");
552 #endif
553 
554 	ei_outb_p(ENISR_TX_ERR, e8390_base + EN0_ISR); /* Ack intr. */
555 
556 	if (tx_was_aborted)
557 		ei_tx_intr(dev);
558 	else {
559 		dev->stats.tx_errors++;
560 		if (txsr & ENTSR_CRS)
561 			dev->stats.tx_carrier_errors++;
562 		if (txsr & ENTSR_CDH)
563 			dev->stats.tx_heartbeat_errors++;
564 		if (txsr & ENTSR_OWC)
565 			dev->stats.tx_window_errors++;
566 	}
567 }
568 
569 /**
570  * ei_tx_intr - transmit interrupt handler
571  * @dev: network device for which tx intr is handled
572  *
573  * We have finished a transmit: check for errors and then trigger the next
574  * packet to be sent. Called with lock held.
575  */
576 
577 static void ei_tx_intr(struct net_device *dev)
578 {
579 	unsigned long e8390_base = dev->base_addr;
580 	struct ei_device *ei_local = netdev_priv(dev);
581 	int status = ei_inb(e8390_base + EN0_TSR);
582 
583 	ei_outb_p(ENISR_TX, e8390_base + EN0_ISR); /* Ack intr. */
584 
585 	/*
586 	 * There are two Tx buffers, see which one finished, and trigger
587 	 * the send of another one if it exists.
588 	 */
589 	ei_local->txqueue--;
590 
591 	if (ei_local->tx1 < 0) {
592 		if (ei_local->lasttx != 1 && ei_local->lasttx != -1)
593 			pr_err("%s: bogus last_tx_buffer %d, tx1=%d\n",
594 			       ei_local->name, ei_local->lasttx, ei_local->tx1);
595 		ei_local->tx1 = 0;
596 		if (ei_local->tx2 > 0) {
597 			ei_local->txing = 1;
598 			NS8390_trigger_send(dev, ei_local->tx2, ei_local->tx_start_page + 6);
599 			netif_trans_update(dev);
600 			ei_local->tx2 = -1;
601 			ei_local->lasttx = 2;
602 		} else {
603 			ei_local->lasttx = 20;
604 			ei_local->txing = 0;
605 		}
606 	} else if (ei_local->tx2 < 0) {
607 		if (ei_local->lasttx != 2  &&  ei_local->lasttx != -2)
608 			pr_err("%s: bogus last_tx_buffer %d, tx2=%d\n",
609 			       ei_local->name, ei_local->lasttx, ei_local->tx2);
610 		ei_local->tx2 = 0;
611 		if (ei_local->tx1 > 0) {
612 			ei_local->txing = 1;
613 			NS8390_trigger_send(dev, ei_local->tx1, ei_local->tx_start_page);
614 			netif_trans_update(dev);
615 			ei_local->tx1 = -1;
616 			ei_local->lasttx = 1;
617 		} else {
618 			ei_local->lasttx = 10;
619 			ei_local->txing = 0;
620 		}
621 	} /* else
622 		netdev_warn(dev, "unexpected TX-done interrupt, lasttx=%d\n",
623 			    ei_local->lasttx);
624 */
625 
626 	/* Minimize Tx latency: update the statistics after we restart TXing. */
627 	if (status & ENTSR_COL)
628 		dev->stats.collisions++;
629 	if (status & ENTSR_PTX)
630 		dev->stats.tx_packets++;
631 	else {
632 		dev->stats.tx_errors++;
633 		if (status & ENTSR_ABT) {
634 			dev->stats.tx_aborted_errors++;
635 			dev->stats.collisions += 16;
636 		}
637 		if (status & ENTSR_CRS)
638 			dev->stats.tx_carrier_errors++;
639 		if (status & ENTSR_FU)
640 			dev->stats.tx_fifo_errors++;
641 		if (status & ENTSR_CDH)
642 			dev->stats.tx_heartbeat_errors++;
643 		if (status & ENTSR_OWC)
644 			dev->stats.tx_window_errors++;
645 	}
646 	netif_wake_queue(dev);
647 }
648 
649 /**
650  * ei_receive - receive some packets
651  * @dev: network device with which receive will be run
652  *
653  * We have a good packet(s), get it/them out of the buffers.
654  * Called with lock held.
655  */
656 
657 static void ei_receive(struct net_device *dev)
658 {
659 	unsigned long e8390_base = dev->base_addr;
660 	struct ei_device *ei_local = netdev_priv(dev);
661 	unsigned char rxing_page, this_frame, next_frame;
662 	unsigned short current_offset;
663 	int rx_pkt_count = 0;
664 	struct e8390_pkt_hdr rx_frame;
665 	int num_rx_pages = ei_local->stop_page-ei_local->rx_start_page;
666 
667 	while (++rx_pkt_count < 10) {
668 		int pkt_len, pkt_stat;
669 
670 		/* Get the rx page (incoming packet pointer). */
671 		ei_outb_p(E8390_NODMA+E8390_PAGE1, e8390_base + E8390_CMD);
672 		rxing_page = ei_inb_p(e8390_base + EN1_CURPAG);
673 		ei_outb_p(E8390_NODMA+E8390_PAGE0, e8390_base + E8390_CMD);
674 
675 		/* Remove one frame from the ring.  Boundary is always a page behind. */
676 		this_frame = ei_inb_p(e8390_base + EN0_BOUNDARY) + 1;
677 		if (this_frame >= ei_local->stop_page)
678 			this_frame = ei_local->rx_start_page;
679 
680 		/* Someday we'll omit the previous, iff we never get this message.
681 		   (There is at least one clone claimed to have a problem.)
682 
683 		   Keep quiet if it looks like a card removal. One problem here
684 		   is that some clones crash in roughly the same way.
685 		 */
686 		if ((netif_msg_rx_status(ei_local)) &&
687 		    this_frame != ei_local->current_page &&
688 		    (this_frame != 0x0 || rxing_page != 0xFF))
689 			netdev_err(dev,
690 				   "mismatched read page pointers %2x vs %2x\n",
691 				   this_frame, ei_local->current_page);
692 
693 		if (this_frame == rxing_page)	/* Read all the frames? */
694 			break;				/* Done for now */
695 
696 		current_offset = this_frame << 8;
697 		ei_get_8390_hdr(dev, &rx_frame, this_frame);
698 
699 		pkt_len = rx_frame.count - sizeof(struct e8390_pkt_hdr);
700 		pkt_stat = rx_frame.status;
701 
702 		next_frame = this_frame + 1 + ((pkt_len+4)>>8);
703 
704 		/* Check for bogosity warned by 3c503 book: the status byte is never
705 		   written.  This happened a lot during testing! This code should be
706 		   cleaned up someday. */
707 		if (rx_frame.next != next_frame &&
708 		    rx_frame.next != next_frame + 1 &&
709 		    rx_frame.next != next_frame - num_rx_pages &&
710 		    rx_frame.next != next_frame + 1 - num_rx_pages) {
711 			ei_local->current_page = rxing_page;
712 			ei_outb(ei_local->current_page-1, e8390_base+EN0_BOUNDARY);
713 			dev->stats.rx_errors++;
714 			continue;
715 		}
716 
717 		if (pkt_len < 60  ||  pkt_len > 1518) {
718 			netif_dbg(ei_local, rx_status, dev,
719 				  "bogus packet size: %d, status=%#2x nxpg=%#2x\n",
720 				  rx_frame.count, rx_frame.status,
721 				  rx_frame.next);
722 			dev->stats.rx_errors++;
723 			dev->stats.rx_length_errors++;
724 		} else if ((pkt_stat & 0x0F) == ENRSR_RXOK) {
725 			struct sk_buff *skb;
726 
727 			skb = netdev_alloc_skb(dev, pkt_len + 2);
728 			if (skb == NULL) {
729 				netif_err(ei_local, rx_err, dev,
730 					  "Couldn't allocate a sk_buff of size %d\n",
731 					  pkt_len);
732 				dev->stats.rx_dropped++;
733 				break;
734 			} else {
735 				skb_reserve(skb, 2);	/* IP headers on 16 byte boundaries */
736 				skb_put(skb, pkt_len);	/* Make room */
737 				ei_block_input(dev, pkt_len, skb, current_offset + sizeof(rx_frame));
738 				skb->protocol = eth_type_trans(skb, dev);
739 				if (!skb_defer_rx_timestamp(skb))
740 					netif_rx(skb);
741 				dev->stats.rx_packets++;
742 				dev->stats.rx_bytes += pkt_len;
743 				if (pkt_stat & ENRSR_PHY)
744 					dev->stats.multicast++;
745 			}
746 		} else {
747 			netif_err(ei_local, rx_err, dev,
748 				  "bogus packet: status=%#2x nxpg=%#2x size=%d\n",
749 				  rx_frame.status, rx_frame.next,
750 				  rx_frame.count);
751 			dev->stats.rx_errors++;
752 			/* NB: The NIC counts CRC, frame and missed errors. */
753 			if (pkt_stat & ENRSR_FO)
754 				dev->stats.rx_fifo_errors++;
755 		}
756 		next_frame = rx_frame.next;
757 
758 		/* This _should_ never happen: it's here for avoiding bad clones. */
759 		if (next_frame >= ei_local->stop_page) {
760 			netdev_notice(dev, "next frame inconsistency, %#2x\n",
761 				      next_frame);
762 			next_frame = ei_local->rx_start_page;
763 		}
764 		ei_local->current_page = next_frame;
765 		ei_outb_p(next_frame-1, e8390_base+EN0_BOUNDARY);
766 	}
767 
768 	/* We used to also ack ENISR_OVER here, but that would sometimes mask
769 	   a real overrun, leaving the 8390 in a stopped state with rec'vr off. */
770 	ei_outb_p(ENISR_RX+ENISR_RX_ERR, e8390_base+EN0_ISR);
771 }
772 
773 /**
774  * ei_rx_overrun - handle receiver overrun
775  * @dev: network device which threw exception
776  *
777  * We have a receiver overrun: we have to kick the 8390 to get it started
778  * again. Problem is that you have to kick it exactly as NS prescribes in
779  * the updated datasheets, or "the NIC may act in an unpredictable manner."
780  * This includes causing "the NIC to defer indefinitely when it is stopped
781  * on a busy network."  Ugh.
782  * Called with lock held. Don't call this with the interrupts off or your
783  * computer will hate you - it takes 10ms or so.
784  */
785 
786 static void ei_rx_overrun(struct net_device *dev)
787 {
788 	unsigned long e8390_base = dev->base_addr;
789 	unsigned char was_txing, must_resend = 0;
790 	/* ei_local is used on some platforms via the EI_SHIFT macro */
791 	struct ei_device *ei_local __maybe_unused = netdev_priv(dev);
792 
793 	/*
794 	 * Record whether a Tx was in progress and then issue the
795 	 * stop command.
796 	 */
797 	was_txing = ei_inb_p(e8390_base+E8390_CMD) & E8390_TRANS;
798 	ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD);
799 
800 	netif_dbg(ei_local, rx_err, dev, "Receiver overrun\n");
801 	dev->stats.rx_over_errors++;
802 
803 	/*
804 	 * Wait a full Tx time (1.2ms) + some guard time, NS says 1.6ms total.
805 	 * Early datasheets said to poll the reset bit, but now they say that
806 	 * it "is not a reliable indicator and subsequently should be ignored."
807 	 * We wait at least 10ms.
808 	 */
809 
810 	mdelay(10);
811 
812 	/*
813 	 * Reset RBCR[01] back to zero as per magic incantation.
814 	 */
815 	ei_outb_p(0x00, e8390_base+EN0_RCNTLO);
816 	ei_outb_p(0x00, e8390_base+EN0_RCNTHI);
817 
818 	/*
819 	 * See if any Tx was interrupted or not. According to NS, this
820 	 * step is vital, and skipping it will cause no end of havoc.
821 	 */
822 
823 	if (was_txing) {
824 		unsigned char tx_completed = ei_inb_p(e8390_base+EN0_ISR) & (ENISR_TX+ENISR_TX_ERR);
825 		if (!tx_completed)
826 			must_resend = 1;
827 	}
828 
829 	/*
830 	 * Have to enter loopback mode and then restart the NIC before
831 	 * you are allowed to slurp packets up off the ring.
832 	 */
833 	ei_outb_p(E8390_TXOFF, e8390_base + EN0_TXCR);
834 	ei_outb_p(E8390_NODMA + E8390_PAGE0 + E8390_START, e8390_base + E8390_CMD);
835 
836 	/*
837 	 * Clear the Rx ring of all the debris, and ack the interrupt.
838 	 */
839 	ei_receive(dev);
840 	ei_outb_p(ENISR_OVER, e8390_base+EN0_ISR);
841 
842 	/*
843 	 * Leave loopback mode, and resend any packet that got stopped.
844 	 */
845 	ei_outb_p(E8390_TXCONFIG, e8390_base + EN0_TXCR);
846 	if (must_resend)
847 		ei_outb_p(E8390_NODMA + E8390_PAGE0 + E8390_START + E8390_TRANS, e8390_base + E8390_CMD);
848 }
849 
850 /*
851  *	Collect the stats. This is called unlocked and from several contexts.
852  */
853 
854 static struct net_device_stats *__ei_get_stats(struct net_device *dev)
855 {
856 	unsigned long ioaddr = dev->base_addr;
857 	struct ei_device *ei_local = netdev_priv(dev);
858 	unsigned long flags;
859 
860 	/* If the card is stopped, just return the present stats. */
861 	if (!netif_running(dev))
862 		return &dev->stats;
863 
864 	spin_lock_irqsave(&ei_local->page_lock, flags);
865 	/* Read the counter registers, assuming we are in page 0. */
866 	dev->stats.rx_frame_errors  += ei_inb_p(ioaddr + EN0_COUNTER0);
867 	dev->stats.rx_crc_errors    += ei_inb_p(ioaddr + EN0_COUNTER1);
868 	dev->stats.rx_missed_errors += ei_inb_p(ioaddr + EN0_COUNTER2);
869 	spin_unlock_irqrestore(&ei_local->page_lock, flags);
870 
871 	return &dev->stats;
872 }
873 
874 /*
875  * Form the 64 bit 8390 multicast table from the linked list of addresses
876  * associated with this dev structure.
877  */
878 
879 static inline void make_mc_bits(u8 *bits, struct net_device *dev)
880 {
881 	struct netdev_hw_addr *ha;
882 
883 	netdev_for_each_mc_addr(ha, dev) {
884 		u32 crc = ether_crc(ETH_ALEN, ha->addr);
885 		/*
886 		 * The 8390 uses the 6 most significant bits of the
887 		 * CRC to index the multicast table.
888 		 */
889 		bits[crc>>29] |= (1<<((crc>>26)&7));
890 	}
891 }
892 
893 /**
894  * do_set_multicast_list - set/clear multicast filter
895  * @dev: net device for which multicast filter is adjusted
896  *
897  *	Set or clear the multicast filter for this adaptor. May be called
898  *	from a BH in 2.1.x. Must be called with lock held.
899  */
900 
901 static void do_set_multicast_list(struct net_device *dev)
902 {
903 	unsigned long e8390_base = dev->base_addr;
904 	int i;
905 	struct ei_device *ei_local = netdev_priv(dev);
906 
907 	if (!(dev->flags&(IFF_PROMISC|IFF_ALLMULTI))) {
908 		memset(ei_local->mcfilter, 0, 8);
909 		if (!netdev_mc_empty(dev))
910 			make_mc_bits(ei_local->mcfilter, dev);
911 	} else
912 		memset(ei_local->mcfilter, 0xFF, 8);	/* mcast set to accept-all */
913 
914 	/*
915 	 * DP8390 manuals don't specify any magic sequence for altering
916 	 * the multicast regs on an already running card. To be safe, we
917 	 * ensure multicast mode is off prior to loading up the new hash
918 	 * table. If this proves to be not enough, we can always resort
919 	 * to stopping the NIC, loading the table and then restarting.
920 	 *
921 	 * Bug Alert!  The MC regs on the SMC 83C690 (SMC Elite and SMC
922 	 * Elite16) appear to be write-only. The NS 8390 data sheet lists
923 	 * them as r/w so this is a bug.  The SMC 83C790 (SMC Ultra and
924 	 * Ultra32 EISA) appears to have this bug fixed.
925 	 */
926 
927 	if (netif_running(dev))
928 		ei_outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR);
929 	ei_outb_p(E8390_NODMA + E8390_PAGE1, e8390_base + E8390_CMD);
930 	for (i = 0; i < 8; i++) {
931 		ei_outb_p(ei_local->mcfilter[i], e8390_base + EN1_MULT_SHIFT(i));
932 #ifndef BUG_83C690
933 		if (ei_inb_p(e8390_base + EN1_MULT_SHIFT(i)) != ei_local->mcfilter[i])
934 			netdev_err(dev, "Multicast filter read/write mismap %d\n",
935 				   i);
936 #endif
937 	}
938 	ei_outb_p(E8390_NODMA + E8390_PAGE0, e8390_base + E8390_CMD);
939 
940 	if (dev->flags&IFF_PROMISC)
941 		ei_outb_p(E8390_RXCONFIG | 0x18, e8390_base + EN0_RXCR);
942 	else if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev))
943 		ei_outb_p(E8390_RXCONFIG | 0x08, e8390_base + EN0_RXCR);
944 	else
945 		ei_outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR);
946 }
947 
948 /*
949  *	Called without lock held. This is invoked from user context and may
950  *	be parallel to just about everything else. Its also fairly quick and
951  *	not called too often. Must protect against both bh and irq users
952  */
953 
954 static void __ei_set_multicast_list(struct net_device *dev)
955 {
956 	unsigned long flags;
957 	struct ei_device *ei_local = netdev_priv(dev);
958 
959 	spin_lock_irqsave(&ei_local->page_lock, flags);
960 	do_set_multicast_list(dev);
961 	spin_unlock_irqrestore(&ei_local->page_lock, flags);
962 }
963 
964 /**
965  * ethdev_setup - init rest of 8390 device struct
966  * @dev: network device structure to init
967  *
968  * Initialize the rest of the 8390 device structure.  Do NOT __init
969  * this, as it is used by 8390 based modular drivers too.
970  */
971 
972 static void ethdev_setup(struct net_device *dev)
973 {
974 	struct ei_device *ei_local = netdev_priv(dev);
975 
976 	if ((msg_enable & NETIF_MSG_DRV) && (version_printed++ == 0))
977 		pr_info("%s", version);
978 
979 	ether_setup(dev);
980 
981 	spin_lock_init(&ei_local->page_lock);
982 
983 	ei_local->msg_enable = msg_enable;
984 }
985 
986 /**
987  * alloc_ei_netdev - alloc_etherdev counterpart for 8390
988  * @size: extra bytes to allocate
989  *
990  * Allocate 8390-specific net_device.
991  */
992 static struct net_device *____alloc_ei_netdev(int size)
993 {
994 	return alloc_netdev(sizeof(struct ei_device) + size, "eth%d",
995 			    NET_NAME_UNKNOWN, ethdev_setup);
996 }
997 
998 
999 
1000 
1001 /* This page of functions should be 8390 generic */
1002 /* Follow National Semi's recommendations for initializing the "NIC". */
1003 
1004 /**
1005  * NS8390_init - initialize 8390 hardware
1006  * @dev: network device to initialize
1007  * @startp: boolean.  non-zero value to initiate chip processing
1008  *
1009  *	Must be called with lock held.
1010  */
1011 
1012 static void __NS8390_init(struct net_device *dev, int startp)
1013 {
1014 	unsigned long e8390_base = dev->base_addr;
1015 	struct ei_device *ei_local = netdev_priv(dev);
1016 	int i;
1017 	int endcfg = ei_local->word16
1018 	    ? (0x48 | ENDCFG_WTS | (ei_local->bigendian ? ENDCFG_BOS : 0))
1019 	    : 0x48;
1020 
1021 	if (sizeof(struct e8390_pkt_hdr) != 4)
1022 		panic("8390.c: header struct mispacked\n");
1023 	/* Follow National Semi's recommendations for initing the DP83902. */
1024 	ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD); /* 0x21 */
1025 	ei_outb_p(endcfg, e8390_base + EN0_DCFG);	/* 0x48 or 0x49 */
1026 	/* Clear the remote byte count registers. */
1027 	ei_outb_p(0x00,  e8390_base + EN0_RCNTLO);
1028 	ei_outb_p(0x00,  e8390_base + EN0_RCNTHI);
1029 	/* Set to monitor and loopback mode -- this is vital!. */
1030 	ei_outb_p(E8390_RXOFF, e8390_base + EN0_RXCR); /* 0x20 */
1031 	ei_outb_p(E8390_TXOFF, e8390_base + EN0_TXCR); /* 0x02 */
1032 	/* Set the transmit page and receive ring. */
1033 	ei_outb_p(ei_local->tx_start_page, e8390_base + EN0_TPSR);
1034 	ei_local->tx1 = ei_local->tx2 = 0;
1035 	ei_outb_p(ei_local->rx_start_page, e8390_base + EN0_STARTPG);
1036 	ei_outb_p(ei_local->stop_page-1, e8390_base + EN0_BOUNDARY);	/* 3c503 says 0x3f,NS0x26*/
1037 	ei_local->current_page = ei_local->rx_start_page;		/* assert boundary+1 */
1038 	ei_outb_p(ei_local->stop_page, e8390_base + EN0_STOPPG);
1039 	/* Clear the pending interrupts and mask. */
1040 	ei_outb_p(0xFF, e8390_base + EN0_ISR);
1041 	ei_outb_p(0x00,  e8390_base + EN0_IMR);
1042 
1043 	/* Copy the station address into the DS8390 registers. */
1044 
1045 	ei_outb_p(E8390_NODMA + E8390_PAGE1 + E8390_STOP, e8390_base+E8390_CMD); /* 0x61 */
1046 	for (i = 0; i < 6; i++) {
1047 		ei_outb_p(dev->dev_addr[i], e8390_base + EN1_PHYS_SHIFT(i));
1048 		if ((netif_msg_probe(ei_local)) &&
1049 		    ei_inb_p(e8390_base + EN1_PHYS_SHIFT(i)) != dev->dev_addr[i])
1050 			netdev_err(dev,
1051 				   "Hw. address read/write mismap %d\n", i);
1052 	}
1053 
1054 	ei_outb_p(ei_local->rx_start_page, e8390_base + EN1_CURPAG);
1055 	ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD);
1056 
1057 	ei_local->tx1 = ei_local->tx2 = 0;
1058 	ei_local->txing = 0;
1059 
1060 	if (startp) {
1061 		ei_outb_p(0xff,  e8390_base + EN0_ISR);
1062 		ei_outb_p(ENISR_ALL,  e8390_base + EN0_IMR);
1063 		ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base+E8390_CMD);
1064 		ei_outb_p(E8390_TXCONFIG, e8390_base + EN0_TXCR); /* xmit on. */
1065 		/* 3c503 TechMan says rxconfig only after the NIC is started. */
1066 		ei_outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR); /* rx on,  */
1067 		do_set_multicast_list(dev);	/* (re)load the mcast table */
1068 	}
1069 }
1070 
1071 /* Trigger a transmit start, assuming the length is valid.
1072    Always called with the page lock held */
1073 
1074 static void NS8390_trigger_send(struct net_device *dev, unsigned int length,
1075 								int start_page)
1076 {
1077 	unsigned long e8390_base = dev->base_addr;
1078 	struct ei_device *ei_local __attribute((unused)) = netdev_priv(dev);
1079 
1080 	ei_outb_p(E8390_NODMA+E8390_PAGE0, e8390_base+E8390_CMD);
1081 
1082 	if (ei_inb_p(e8390_base + E8390_CMD) & E8390_TRANS) {
1083 		netdev_warn(dev, "trigger_send() called with the transmitter busy\n");
1084 		return;
1085 	}
1086 	ei_outb_p(length & 0xff, e8390_base + EN0_TCNTLO);
1087 	ei_outb_p(length >> 8, e8390_base + EN0_TCNTHI);
1088 	ei_outb_p(start_page, e8390_base + EN0_TPSR);
1089 	ei_outb_p(E8390_NODMA+E8390_TRANS+E8390_START, e8390_base+E8390_CMD);
1090 }
1091