xref: /openbmc/linux/drivers/net/ethernet/8390/lib8390.c (revision 644570b830266ff33ff5f3542b9c838f93a55ea6)
1*644570b8SJeff Kirsher /* 8390.c: A general NS8390 ethernet driver core for linux. */
2*644570b8SJeff Kirsher /*
3*644570b8SJeff Kirsher 	Written 1992-94 by Donald Becker.
4*644570b8SJeff Kirsher 
5*644570b8SJeff Kirsher 	Copyright 1993 United States Government as represented by the
6*644570b8SJeff Kirsher 	Director, National Security Agency.
7*644570b8SJeff Kirsher 
8*644570b8SJeff Kirsher 	This software may be used and distributed according to the terms
9*644570b8SJeff Kirsher 	of the GNU General Public License, incorporated herein by reference.
10*644570b8SJeff Kirsher 
11*644570b8SJeff Kirsher 	The author may be reached as becker@scyld.com, or C/O
12*644570b8SJeff Kirsher 	Scyld Computing Corporation
13*644570b8SJeff Kirsher 	410 Severn Ave., Suite 210
14*644570b8SJeff Kirsher 	Annapolis MD 21403
15*644570b8SJeff Kirsher 
16*644570b8SJeff Kirsher 
17*644570b8SJeff Kirsher   This is the chip-specific code for many 8390-based ethernet adaptors.
18*644570b8SJeff Kirsher   This is not a complete driver, it must be combined with board-specific
19*644570b8SJeff Kirsher   code such as ne.c, wd.c, 3c503.c, etc.
20*644570b8SJeff Kirsher 
21*644570b8SJeff Kirsher   Seeing how at least eight drivers use this code, (not counting the
22*644570b8SJeff Kirsher   PCMCIA ones either) it is easy to break some card by what seems like
23*644570b8SJeff Kirsher   a simple innocent change. Please contact me or Donald if you think
24*644570b8SJeff Kirsher   you have found something that needs changing. -- PG
25*644570b8SJeff Kirsher 
26*644570b8SJeff Kirsher 
27*644570b8SJeff Kirsher   Changelog:
28*644570b8SJeff Kirsher 
29*644570b8SJeff Kirsher   Paul Gortmaker	: remove set_bit lock, other cleanups.
30*644570b8SJeff Kirsher   Paul Gortmaker	: add ei_get_8390_hdr() so we can pass skb's to
31*644570b8SJeff Kirsher 			  ei_block_input() for eth_io_copy_and_sum().
32*644570b8SJeff Kirsher   Paul Gortmaker	: exchange static int ei_pingpong for a #define,
33*644570b8SJeff Kirsher 			  also add better Tx error handling.
34*644570b8SJeff Kirsher   Paul Gortmaker	: rewrite Rx overrun handling as per NS specs.
35*644570b8SJeff Kirsher   Alexey Kuznetsov	: use the 8390's six bit hash multicast filter.
36*644570b8SJeff Kirsher   Paul Gortmaker	: tweak ANK's above multicast changes a bit.
37*644570b8SJeff Kirsher   Paul Gortmaker	: update packet statistics for v2.1.x
38*644570b8SJeff Kirsher   Alan Cox		: support arbitrary stupid port mappings on the
39*644570b8SJeff Kirsher 			  68K Macintosh. Support >16bit I/O spaces
40*644570b8SJeff Kirsher   Paul Gortmaker	: add kmod support for auto-loading of the 8390
41*644570b8SJeff Kirsher 			  module by all drivers that require it.
42*644570b8SJeff Kirsher   Alan Cox		: Spinlocking work, added 'BUG_83C690'
43*644570b8SJeff Kirsher   Paul Gortmaker	: Separate out Tx timeout code from Tx path.
44*644570b8SJeff Kirsher   Paul Gortmaker	: Remove old unused single Tx buffer code.
45*644570b8SJeff Kirsher   Hayato Fujiwara	: Add m32r support.
46*644570b8SJeff Kirsher   Paul Gortmaker	: use skb_padto() instead of stack scratch area
47*644570b8SJeff Kirsher 
48*644570b8SJeff Kirsher   Sources:
49*644570b8SJeff Kirsher   The National Semiconductor LAN Databook, and the 3Com 3c503 databook.
50*644570b8SJeff Kirsher 
51*644570b8SJeff Kirsher   */
52*644570b8SJeff Kirsher 
53*644570b8SJeff Kirsher #include <linux/module.h>
54*644570b8SJeff Kirsher #include <linux/kernel.h>
55*644570b8SJeff Kirsher #include <linux/jiffies.h>
56*644570b8SJeff Kirsher #include <linux/fs.h>
57*644570b8SJeff Kirsher #include <linux/types.h>
58*644570b8SJeff Kirsher #include <linux/string.h>
59*644570b8SJeff Kirsher #include <linux/bitops.h>
60*644570b8SJeff Kirsher #include <asm/system.h>
61*644570b8SJeff Kirsher #include <linux/uaccess.h>
62*644570b8SJeff Kirsher #include <linux/io.h>
63*644570b8SJeff Kirsher #include <asm/irq.h>
64*644570b8SJeff Kirsher #include <linux/delay.h>
65*644570b8SJeff Kirsher #include <linux/errno.h>
66*644570b8SJeff Kirsher #include <linux/fcntl.h>
67*644570b8SJeff Kirsher #include <linux/in.h>
68*644570b8SJeff Kirsher #include <linux/interrupt.h>
69*644570b8SJeff Kirsher #include <linux/init.h>
70*644570b8SJeff Kirsher #include <linux/crc32.h>
71*644570b8SJeff Kirsher 
72*644570b8SJeff Kirsher #include <linux/netdevice.h>
73*644570b8SJeff Kirsher #include <linux/etherdevice.h>
74*644570b8SJeff Kirsher 
75*644570b8SJeff Kirsher #define NS8390_CORE
76*644570b8SJeff Kirsher #include "8390.h"
77*644570b8SJeff Kirsher 
78*644570b8SJeff Kirsher #define BUG_83C690
79*644570b8SJeff Kirsher 
80*644570b8SJeff Kirsher /* These are the operational function interfaces to board-specific
81*644570b8SJeff Kirsher    routines.
82*644570b8SJeff Kirsher 	void reset_8390(struct net_device *dev)
83*644570b8SJeff Kirsher 		Resets the board associated with DEV, including a hardware reset of
84*644570b8SJeff Kirsher 		the 8390.  This is only called when there is a transmit timeout, and
85*644570b8SJeff Kirsher 		it is always followed by 8390_init().
86*644570b8SJeff Kirsher 	void block_output(struct net_device *dev, int count, const unsigned char *buf,
87*644570b8SJeff Kirsher 					  int start_page)
88*644570b8SJeff Kirsher 		Write the COUNT bytes of BUF to the packet buffer at START_PAGE.  The
89*644570b8SJeff Kirsher 		"page" value uses the 8390's 256-byte pages.
90*644570b8SJeff Kirsher 	void get_8390_hdr(struct net_device *dev, struct e8390_hdr *hdr, int ring_page)
91*644570b8SJeff Kirsher 		Read the 4 byte, page aligned 8390 header. *If* there is a
92*644570b8SJeff Kirsher 		subsequent read, it will be of the rest of the packet.
93*644570b8SJeff Kirsher 	void block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
94*644570b8SJeff Kirsher 		Read COUNT bytes from the packet buffer into the skb data area. Start
95*644570b8SJeff Kirsher 		reading from RING_OFFSET, the address as the 8390 sees it.  This will always
96*644570b8SJeff Kirsher 		follow the read of the 8390 header.
97*644570b8SJeff Kirsher */
98*644570b8SJeff Kirsher #define ei_reset_8390 (ei_local->reset_8390)
99*644570b8SJeff Kirsher #define ei_block_output (ei_local->block_output)
100*644570b8SJeff Kirsher #define ei_block_input (ei_local->block_input)
101*644570b8SJeff Kirsher #define ei_get_8390_hdr (ei_local->get_8390_hdr)
102*644570b8SJeff Kirsher 
103*644570b8SJeff Kirsher /* use 0 for production, 1 for verification, >2 for debug */
104*644570b8SJeff Kirsher #ifndef ei_debug
105*644570b8SJeff Kirsher int ei_debug = 1;
106*644570b8SJeff Kirsher #endif
107*644570b8SJeff Kirsher 
108*644570b8SJeff Kirsher /* Index to functions. */
109*644570b8SJeff Kirsher static void ei_tx_intr(struct net_device *dev);
110*644570b8SJeff Kirsher static void ei_tx_err(struct net_device *dev);
111*644570b8SJeff Kirsher static void ei_receive(struct net_device *dev);
112*644570b8SJeff Kirsher static void ei_rx_overrun(struct net_device *dev);
113*644570b8SJeff Kirsher 
114*644570b8SJeff Kirsher /* Routines generic to NS8390-based boards. */
115*644570b8SJeff Kirsher static void NS8390_trigger_send(struct net_device *dev, unsigned int length,
116*644570b8SJeff Kirsher 								int start_page);
117*644570b8SJeff Kirsher static void do_set_multicast_list(struct net_device *dev);
118*644570b8SJeff Kirsher static void __NS8390_init(struct net_device *dev, int startp);
119*644570b8SJeff Kirsher 
120*644570b8SJeff Kirsher /*
121*644570b8SJeff Kirsher  *	SMP and the 8390 setup.
122*644570b8SJeff Kirsher  *
123*644570b8SJeff Kirsher  *	The 8390 isn't exactly designed to be multithreaded on RX/TX. There is
124*644570b8SJeff Kirsher  *	a page register that controls bank and packet buffer access. We guard
125*644570b8SJeff Kirsher  *	this with ei_local->page_lock. Nobody should assume or set the page other
126*644570b8SJeff Kirsher  *	than zero when the lock is not held. Lock holders must restore page 0
127*644570b8SJeff Kirsher  *	before unlocking. Even pure readers must take the lock to protect in
128*644570b8SJeff Kirsher  *	page 0.
129*644570b8SJeff Kirsher  *
130*644570b8SJeff Kirsher  *	To make life difficult the chip can also be very slow. We therefore can't
131*644570b8SJeff Kirsher  *	just use spinlocks. For the longer lockups we disable the irq the device
132*644570b8SJeff Kirsher  *	sits on and hold the lock. We must hold the lock because there is a dual
133*644570b8SJeff Kirsher  *	processor case other than interrupts (get stats/set multicast list in
134*644570b8SJeff Kirsher  *	parallel with each other and transmit).
135*644570b8SJeff Kirsher  *
136*644570b8SJeff Kirsher  *	Note: in theory we can just disable the irq on the card _but_ there is
137*644570b8SJeff Kirsher  *	a latency on SMP irq delivery. So we can easily go "disable irq" "sync irqs"
138*644570b8SJeff Kirsher  *	enter lock, take the queued irq. So we waddle instead of flying.
139*644570b8SJeff Kirsher  *
140*644570b8SJeff Kirsher  *	Finally by special arrangement for the purpose of being generally
141*644570b8SJeff Kirsher  *	annoying the transmit function is called bh atomic. That places
142*644570b8SJeff Kirsher  *	restrictions on the user context callers as disable_irq won't save
143*644570b8SJeff Kirsher  *	them.
144*644570b8SJeff Kirsher  *
145*644570b8SJeff Kirsher  *	Additional explanation of problems with locking by Alan Cox:
146*644570b8SJeff Kirsher  *
147*644570b8SJeff Kirsher  *	"The author (me) didn't use spin_lock_irqsave because the slowness of the
148*644570b8SJeff Kirsher  *	card means that approach caused horrible problems like losing serial data
149*644570b8SJeff Kirsher  *	at 38400 baud on some chips. Remember many 8390 nics on PCI were ISA
150*644570b8SJeff Kirsher  *	chips with FPGA front ends.
151*644570b8SJeff Kirsher  *
152*644570b8SJeff Kirsher  *	Ok the logic behind the 8390 is very simple:
153*644570b8SJeff Kirsher  *
154*644570b8SJeff Kirsher  *	Things to know
155*644570b8SJeff Kirsher  *		- IRQ delivery is asynchronous to the PCI bus
156*644570b8SJeff Kirsher  *		- Blocking the local CPU IRQ via spin locks was too slow
157*644570b8SJeff Kirsher  *		- The chip has register windows needing locking work
158*644570b8SJeff Kirsher  *
159*644570b8SJeff Kirsher  *	So the path was once (I say once as people appear to have changed it
160*644570b8SJeff Kirsher  *	in the mean time and it now looks rather bogus if the changes to use
161*644570b8SJeff Kirsher  *	disable_irq_nosync_irqsave are disabling the local IRQ)
162*644570b8SJeff Kirsher  *
163*644570b8SJeff Kirsher  *
164*644570b8SJeff Kirsher  *		Take the page lock
165*644570b8SJeff Kirsher  *		Mask the IRQ on chip
166*644570b8SJeff Kirsher  *		Disable the IRQ (but not mask locally- someone seems to have
167*644570b8SJeff Kirsher  *			broken this with the lock validator stuff)
168*644570b8SJeff Kirsher  *			[This must be _nosync as the page lock may otherwise
169*644570b8SJeff Kirsher  *				deadlock us]
170*644570b8SJeff Kirsher  *		Drop the page lock and turn IRQs back on
171*644570b8SJeff Kirsher  *
172*644570b8SJeff Kirsher  *		At this point an existing IRQ may still be running but we can't
173*644570b8SJeff Kirsher  *		get a new one
174*644570b8SJeff Kirsher  *
175*644570b8SJeff Kirsher  *		Take the lock (so we know the IRQ has terminated) but don't mask
176*644570b8SJeff Kirsher  *	the IRQs on the processor
177*644570b8SJeff Kirsher  *		Set irqlock [for debug]
178*644570b8SJeff Kirsher  *
179*644570b8SJeff Kirsher  *		Transmit (slow as ****)
180*644570b8SJeff Kirsher  *
181*644570b8SJeff Kirsher  *		re-enable the IRQ
182*644570b8SJeff Kirsher  *
183*644570b8SJeff Kirsher  *
184*644570b8SJeff Kirsher  *	We have to use disable_irq because otherwise you will get delayed
185*644570b8SJeff Kirsher  *	interrupts on the APIC bus deadlocking the transmit path.
186*644570b8SJeff Kirsher  *
187*644570b8SJeff Kirsher  *	Quite hairy but the chip simply wasn't designed for SMP and you can't
188*644570b8SJeff Kirsher  *	even ACK an interrupt without risking corrupting other parallel
189*644570b8SJeff Kirsher  *	activities on the chip." [lkml, 25 Jul 2007]
190*644570b8SJeff Kirsher  */
191*644570b8SJeff Kirsher 
192*644570b8SJeff Kirsher 
193*644570b8SJeff Kirsher 
194*644570b8SJeff Kirsher /**
195*644570b8SJeff Kirsher  * ei_open - Open/initialize the board.
196*644570b8SJeff Kirsher  * @dev: network device to initialize
197*644570b8SJeff Kirsher  *
198*644570b8SJeff Kirsher  * This routine goes all-out, setting everything
199*644570b8SJeff Kirsher  * up anew at each open, even though many of these registers should only
200*644570b8SJeff Kirsher  * need to be set once at boot.
201*644570b8SJeff Kirsher  */
202*644570b8SJeff Kirsher static int __ei_open(struct net_device *dev)
203*644570b8SJeff Kirsher {
204*644570b8SJeff Kirsher 	unsigned long flags;
205*644570b8SJeff Kirsher 	struct ei_device *ei_local = netdev_priv(dev);
206*644570b8SJeff Kirsher 
207*644570b8SJeff Kirsher 	if (dev->watchdog_timeo <= 0)
208*644570b8SJeff Kirsher 		dev->watchdog_timeo = TX_TIMEOUT;
209*644570b8SJeff Kirsher 
210*644570b8SJeff Kirsher 	/*
211*644570b8SJeff Kirsher 	 *	Grab the page lock so we own the register set, then call
212*644570b8SJeff Kirsher 	 *	the init function.
213*644570b8SJeff Kirsher 	 */
214*644570b8SJeff Kirsher 
215*644570b8SJeff Kirsher 	spin_lock_irqsave(&ei_local->page_lock, flags);
216*644570b8SJeff Kirsher 	__NS8390_init(dev, 1);
217*644570b8SJeff Kirsher 	/* Set the flag before we drop the lock, That way the IRQ arrives
218*644570b8SJeff Kirsher 	   after its set and we get no silly warnings */
219*644570b8SJeff Kirsher 	netif_start_queue(dev);
220*644570b8SJeff Kirsher 	spin_unlock_irqrestore(&ei_local->page_lock, flags);
221*644570b8SJeff Kirsher 	ei_local->irqlock = 0;
222*644570b8SJeff Kirsher 	return 0;
223*644570b8SJeff Kirsher }
224*644570b8SJeff Kirsher 
225*644570b8SJeff Kirsher /**
226*644570b8SJeff Kirsher  * ei_close - shut down network device
227*644570b8SJeff Kirsher  * @dev: network device to close
228*644570b8SJeff Kirsher  *
229*644570b8SJeff Kirsher  * Opposite of ei_open(). Only used when "ifconfig <devname> down" is done.
230*644570b8SJeff Kirsher  */
231*644570b8SJeff Kirsher static int __ei_close(struct net_device *dev)
232*644570b8SJeff Kirsher {
233*644570b8SJeff Kirsher 	struct ei_device *ei_local = netdev_priv(dev);
234*644570b8SJeff Kirsher 	unsigned long flags;
235*644570b8SJeff Kirsher 
236*644570b8SJeff Kirsher 	/*
237*644570b8SJeff Kirsher 	 *	Hold the page lock during close
238*644570b8SJeff Kirsher 	 */
239*644570b8SJeff Kirsher 
240*644570b8SJeff Kirsher 	spin_lock_irqsave(&ei_local->page_lock, flags);
241*644570b8SJeff Kirsher 	__NS8390_init(dev, 0);
242*644570b8SJeff Kirsher 	spin_unlock_irqrestore(&ei_local->page_lock, flags);
243*644570b8SJeff Kirsher 	netif_stop_queue(dev);
244*644570b8SJeff Kirsher 	return 0;
245*644570b8SJeff Kirsher }
246*644570b8SJeff Kirsher 
247*644570b8SJeff Kirsher /**
248*644570b8SJeff Kirsher  * ei_tx_timeout - handle transmit time out condition
249*644570b8SJeff Kirsher  * @dev: network device which has apparently fallen asleep
250*644570b8SJeff Kirsher  *
251*644570b8SJeff Kirsher  * Called by kernel when device never acknowledges a transmit has
252*644570b8SJeff Kirsher  * completed (or failed) - i.e. never posted a Tx related interrupt.
253*644570b8SJeff Kirsher  */
254*644570b8SJeff Kirsher 
255*644570b8SJeff Kirsher static void __ei_tx_timeout(struct net_device *dev)
256*644570b8SJeff Kirsher {
257*644570b8SJeff Kirsher 	unsigned long e8390_base = dev->base_addr;
258*644570b8SJeff Kirsher 	struct ei_device *ei_local = netdev_priv(dev);
259*644570b8SJeff Kirsher 	int txsr, isr, tickssofar = jiffies - dev_trans_start(dev);
260*644570b8SJeff Kirsher 	unsigned long flags;
261*644570b8SJeff Kirsher 
262*644570b8SJeff Kirsher 	dev->stats.tx_errors++;
263*644570b8SJeff Kirsher 
264*644570b8SJeff Kirsher 	spin_lock_irqsave(&ei_local->page_lock, flags);
265*644570b8SJeff Kirsher 	txsr = ei_inb(e8390_base+EN0_TSR);
266*644570b8SJeff Kirsher 	isr = ei_inb(e8390_base+EN0_ISR);
267*644570b8SJeff Kirsher 	spin_unlock_irqrestore(&ei_local->page_lock, flags);
268*644570b8SJeff Kirsher 
269*644570b8SJeff Kirsher 	netdev_dbg(dev, "Tx timed out, %s TSR=%#2x, ISR=%#2x, t=%d\n",
270*644570b8SJeff Kirsher 		   (txsr & ENTSR_ABT) ? "excess collisions." :
271*644570b8SJeff Kirsher 		   (isr) ? "lost interrupt?" : "cable problem?",
272*644570b8SJeff Kirsher 		   txsr, isr, tickssofar);
273*644570b8SJeff Kirsher 
274*644570b8SJeff Kirsher 	if (!isr && !dev->stats.tx_packets) {
275*644570b8SJeff Kirsher 		/* The 8390 probably hasn't gotten on the cable yet. */
276*644570b8SJeff Kirsher 		ei_local->interface_num ^= 1;   /* Try a different xcvr.  */
277*644570b8SJeff Kirsher 	}
278*644570b8SJeff Kirsher 
279*644570b8SJeff Kirsher 	/* Ugly but a reset can be slow, yet must be protected */
280*644570b8SJeff Kirsher 
281*644570b8SJeff Kirsher 	disable_irq_nosync_lockdep(dev->irq);
282*644570b8SJeff Kirsher 	spin_lock(&ei_local->page_lock);
283*644570b8SJeff Kirsher 
284*644570b8SJeff Kirsher 	/* Try to restart the card.  Perhaps the user has fixed something. */
285*644570b8SJeff Kirsher 	ei_reset_8390(dev);
286*644570b8SJeff Kirsher 	__NS8390_init(dev, 1);
287*644570b8SJeff Kirsher 
288*644570b8SJeff Kirsher 	spin_unlock(&ei_local->page_lock);
289*644570b8SJeff Kirsher 	enable_irq_lockdep(dev->irq);
290*644570b8SJeff Kirsher 	netif_wake_queue(dev);
291*644570b8SJeff Kirsher }
292*644570b8SJeff Kirsher 
293*644570b8SJeff Kirsher /**
294*644570b8SJeff Kirsher  * ei_start_xmit - begin packet transmission
295*644570b8SJeff Kirsher  * @skb: packet to be sent
296*644570b8SJeff Kirsher  * @dev: network device to which packet is sent
297*644570b8SJeff Kirsher  *
298*644570b8SJeff Kirsher  * Sends a packet to an 8390 network device.
299*644570b8SJeff Kirsher  */
300*644570b8SJeff Kirsher 
301*644570b8SJeff Kirsher static netdev_tx_t __ei_start_xmit(struct sk_buff *skb,
302*644570b8SJeff Kirsher 				   struct net_device *dev)
303*644570b8SJeff Kirsher {
304*644570b8SJeff Kirsher 	unsigned long e8390_base = dev->base_addr;
305*644570b8SJeff Kirsher 	struct ei_device *ei_local = netdev_priv(dev);
306*644570b8SJeff Kirsher 	int send_length = skb->len, output_page;
307*644570b8SJeff Kirsher 	unsigned long flags;
308*644570b8SJeff Kirsher 	char buf[ETH_ZLEN];
309*644570b8SJeff Kirsher 	char *data = skb->data;
310*644570b8SJeff Kirsher 
311*644570b8SJeff Kirsher 	if (skb->len < ETH_ZLEN) {
312*644570b8SJeff Kirsher 		memset(buf, 0, ETH_ZLEN);	/* more efficient than doing just the needed bits */
313*644570b8SJeff Kirsher 		memcpy(buf, data, skb->len);
314*644570b8SJeff Kirsher 		send_length = ETH_ZLEN;
315*644570b8SJeff Kirsher 		data = buf;
316*644570b8SJeff Kirsher 	}
317*644570b8SJeff Kirsher 
318*644570b8SJeff Kirsher 	/* Mask interrupts from the ethercard.
319*644570b8SJeff Kirsher 	   SMP: We have to grab the lock here otherwise the IRQ handler
320*644570b8SJeff Kirsher 	   on another CPU can flip window and race the IRQ mask set. We end
321*644570b8SJeff Kirsher 	   up trashing the mcast filter not disabling irqs if we don't lock */
322*644570b8SJeff Kirsher 
323*644570b8SJeff Kirsher 	spin_lock_irqsave(&ei_local->page_lock, flags);
324*644570b8SJeff Kirsher 	ei_outb_p(0x00, e8390_base + EN0_IMR);
325*644570b8SJeff Kirsher 	spin_unlock_irqrestore(&ei_local->page_lock, flags);
326*644570b8SJeff Kirsher 
327*644570b8SJeff Kirsher 
328*644570b8SJeff Kirsher 	/*
329*644570b8SJeff Kirsher 	 *	Slow phase with lock held.
330*644570b8SJeff Kirsher 	 */
331*644570b8SJeff Kirsher 
332*644570b8SJeff Kirsher 	disable_irq_nosync_lockdep_irqsave(dev->irq, &flags);
333*644570b8SJeff Kirsher 
334*644570b8SJeff Kirsher 	spin_lock(&ei_local->page_lock);
335*644570b8SJeff Kirsher 
336*644570b8SJeff Kirsher 	ei_local->irqlock = 1;
337*644570b8SJeff Kirsher 
338*644570b8SJeff Kirsher 	/*
339*644570b8SJeff Kirsher 	 * We have two Tx slots available for use. Find the first free
340*644570b8SJeff Kirsher 	 * slot, and then perform some sanity checks. With two Tx bufs,
341*644570b8SJeff Kirsher 	 * you get very close to transmitting back-to-back packets. With
342*644570b8SJeff Kirsher 	 * only one Tx buf, the transmitter sits idle while you reload the
343*644570b8SJeff Kirsher 	 * card, leaving a substantial gap between each transmitted packet.
344*644570b8SJeff Kirsher 	 */
345*644570b8SJeff Kirsher 
346*644570b8SJeff Kirsher 	if (ei_local->tx1 == 0) {
347*644570b8SJeff Kirsher 		output_page = ei_local->tx_start_page;
348*644570b8SJeff Kirsher 		ei_local->tx1 = send_length;
349*644570b8SJeff Kirsher 		if (ei_debug  &&  ei_local->tx2 > 0)
350*644570b8SJeff Kirsher 			netdev_dbg(dev, "idle transmitter tx2=%d, lasttx=%d, txing=%d\n",
351*644570b8SJeff Kirsher 				   ei_local->tx2, ei_local->lasttx, ei_local->txing);
352*644570b8SJeff Kirsher 	} else if (ei_local->tx2 == 0) {
353*644570b8SJeff Kirsher 		output_page = ei_local->tx_start_page + TX_PAGES/2;
354*644570b8SJeff Kirsher 		ei_local->tx2 = send_length;
355*644570b8SJeff Kirsher 		if (ei_debug  &&  ei_local->tx1 > 0)
356*644570b8SJeff Kirsher 			netdev_dbg(dev, "idle transmitter, tx1=%d, lasttx=%d, txing=%d\n",
357*644570b8SJeff Kirsher 				   ei_local->tx1, ei_local->lasttx, ei_local->txing);
358*644570b8SJeff Kirsher 	} else {			/* We should never get here. */
359*644570b8SJeff Kirsher 		if (ei_debug)
360*644570b8SJeff Kirsher 			netdev_dbg(dev, "No Tx buffers free! tx1=%d tx2=%d last=%d\n",
361*644570b8SJeff Kirsher 				   ei_local->tx1, ei_local->tx2, ei_local->lasttx);
362*644570b8SJeff Kirsher 		ei_local->irqlock = 0;
363*644570b8SJeff Kirsher 		netif_stop_queue(dev);
364*644570b8SJeff Kirsher 		ei_outb_p(ENISR_ALL, e8390_base + EN0_IMR);
365*644570b8SJeff Kirsher 		spin_unlock(&ei_local->page_lock);
366*644570b8SJeff Kirsher 		enable_irq_lockdep_irqrestore(dev->irq, &flags);
367*644570b8SJeff Kirsher 		dev->stats.tx_errors++;
368*644570b8SJeff Kirsher 		return NETDEV_TX_BUSY;
369*644570b8SJeff Kirsher 	}
370*644570b8SJeff Kirsher 
371*644570b8SJeff Kirsher 	/*
372*644570b8SJeff Kirsher 	 * Okay, now upload the packet and trigger a send if the transmitter
373*644570b8SJeff Kirsher 	 * isn't already sending. If it is busy, the interrupt handler will
374*644570b8SJeff Kirsher 	 * trigger the send later, upon receiving a Tx done interrupt.
375*644570b8SJeff Kirsher 	 */
376*644570b8SJeff Kirsher 
377*644570b8SJeff Kirsher 	ei_block_output(dev, send_length, data, output_page);
378*644570b8SJeff Kirsher 
379*644570b8SJeff Kirsher 	if (!ei_local->txing) {
380*644570b8SJeff Kirsher 		ei_local->txing = 1;
381*644570b8SJeff Kirsher 		NS8390_trigger_send(dev, send_length, output_page);
382*644570b8SJeff Kirsher 		if (output_page == ei_local->tx_start_page) {
383*644570b8SJeff Kirsher 			ei_local->tx1 = -1;
384*644570b8SJeff Kirsher 			ei_local->lasttx = -1;
385*644570b8SJeff Kirsher 		} else {
386*644570b8SJeff Kirsher 			ei_local->tx2 = -1;
387*644570b8SJeff Kirsher 			ei_local->lasttx = -2;
388*644570b8SJeff Kirsher 		}
389*644570b8SJeff Kirsher 	} else
390*644570b8SJeff Kirsher 		ei_local->txqueue++;
391*644570b8SJeff Kirsher 
392*644570b8SJeff Kirsher 	if (ei_local->tx1  &&  ei_local->tx2)
393*644570b8SJeff Kirsher 		netif_stop_queue(dev);
394*644570b8SJeff Kirsher 	else
395*644570b8SJeff Kirsher 		netif_start_queue(dev);
396*644570b8SJeff Kirsher 
397*644570b8SJeff Kirsher 	/* Turn 8390 interrupts back on. */
398*644570b8SJeff Kirsher 	ei_local->irqlock = 0;
399*644570b8SJeff Kirsher 	ei_outb_p(ENISR_ALL, e8390_base + EN0_IMR);
400*644570b8SJeff Kirsher 
401*644570b8SJeff Kirsher 	spin_unlock(&ei_local->page_lock);
402*644570b8SJeff Kirsher 	enable_irq_lockdep_irqrestore(dev->irq, &flags);
403*644570b8SJeff Kirsher 	skb_tx_timestamp(skb);
404*644570b8SJeff Kirsher 	dev_kfree_skb(skb);
405*644570b8SJeff Kirsher 	dev->stats.tx_bytes += send_length;
406*644570b8SJeff Kirsher 
407*644570b8SJeff Kirsher 	return NETDEV_TX_OK;
408*644570b8SJeff Kirsher }
409*644570b8SJeff Kirsher 
410*644570b8SJeff Kirsher /**
411*644570b8SJeff Kirsher  * ei_interrupt - handle the interrupts from an 8390
412*644570b8SJeff Kirsher  * @irq: interrupt number
413*644570b8SJeff Kirsher  * @dev_id: a pointer to the net_device
414*644570b8SJeff Kirsher  *
415*644570b8SJeff Kirsher  * Handle the ether interface interrupts. We pull packets from
416*644570b8SJeff Kirsher  * the 8390 via the card specific functions and fire them at the networking
417*644570b8SJeff Kirsher  * stack. We also handle transmit completions and wake the transmit path if
418*644570b8SJeff Kirsher  * necessary. We also update the counters and do other housekeeping as
419*644570b8SJeff Kirsher  * needed.
420*644570b8SJeff Kirsher  */
421*644570b8SJeff Kirsher 
422*644570b8SJeff Kirsher static irqreturn_t __ei_interrupt(int irq, void *dev_id)
423*644570b8SJeff Kirsher {
424*644570b8SJeff Kirsher 	struct net_device *dev = dev_id;
425*644570b8SJeff Kirsher 	unsigned long e8390_base = dev->base_addr;
426*644570b8SJeff Kirsher 	int interrupts, nr_serviced = 0;
427*644570b8SJeff Kirsher 	struct ei_device *ei_local = netdev_priv(dev);
428*644570b8SJeff Kirsher 
429*644570b8SJeff Kirsher 	/*
430*644570b8SJeff Kirsher 	 *	Protect the irq test too.
431*644570b8SJeff Kirsher 	 */
432*644570b8SJeff Kirsher 
433*644570b8SJeff Kirsher 	spin_lock(&ei_local->page_lock);
434*644570b8SJeff Kirsher 
435*644570b8SJeff Kirsher 	if (ei_local->irqlock) {
436*644570b8SJeff Kirsher 		/*
437*644570b8SJeff Kirsher 		 * This might just be an interrupt for a PCI device sharing
438*644570b8SJeff Kirsher 		 * this line
439*644570b8SJeff Kirsher 		 */
440*644570b8SJeff Kirsher 		netdev_err(dev, "Interrupted while interrupts are masked! isr=%#2x imr=%#2x\n",
441*644570b8SJeff Kirsher 			   ei_inb_p(e8390_base + EN0_ISR),
442*644570b8SJeff Kirsher 			   ei_inb_p(e8390_base + EN0_IMR));
443*644570b8SJeff Kirsher 		spin_unlock(&ei_local->page_lock);
444*644570b8SJeff Kirsher 		return IRQ_NONE;
445*644570b8SJeff Kirsher 	}
446*644570b8SJeff Kirsher 
447*644570b8SJeff Kirsher 	/* Change to page 0 and read the intr status reg. */
448*644570b8SJeff Kirsher 	ei_outb_p(E8390_NODMA+E8390_PAGE0, e8390_base + E8390_CMD);
449*644570b8SJeff Kirsher 	if (ei_debug > 3)
450*644570b8SJeff Kirsher 		netdev_dbg(dev, "interrupt(isr=%#2.2x)\n",
451*644570b8SJeff Kirsher 			   ei_inb_p(e8390_base + EN0_ISR));
452*644570b8SJeff Kirsher 
453*644570b8SJeff Kirsher 	/* !!Assumption!! -- we stay in page 0.	 Don't break this. */
454*644570b8SJeff Kirsher 	while ((interrupts = ei_inb_p(e8390_base + EN0_ISR)) != 0 &&
455*644570b8SJeff Kirsher 	       ++nr_serviced < MAX_SERVICE) {
456*644570b8SJeff Kirsher 		if (!netif_running(dev)) {
457*644570b8SJeff Kirsher 			netdev_warn(dev, "interrupt from stopped card\n");
458*644570b8SJeff Kirsher 			/* rmk - acknowledge the interrupts */
459*644570b8SJeff Kirsher 			ei_outb_p(interrupts, e8390_base + EN0_ISR);
460*644570b8SJeff Kirsher 			interrupts = 0;
461*644570b8SJeff Kirsher 			break;
462*644570b8SJeff Kirsher 		}
463*644570b8SJeff Kirsher 		if (interrupts & ENISR_OVER)
464*644570b8SJeff Kirsher 			ei_rx_overrun(dev);
465*644570b8SJeff Kirsher 		else if (interrupts & (ENISR_RX+ENISR_RX_ERR)) {
466*644570b8SJeff Kirsher 			/* Got a good (?) packet. */
467*644570b8SJeff Kirsher 			ei_receive(dev);
468*644570b8SJeff Kirsher 		}
469*644570b8SJeff Kirsher 		/* Push the next to-transmit packet through. */
470*644570b8SJeff Kirsher 		if (interrupts & ENISR_TX)
471*644570b8SJeff Kirsher 			ei_tx_intr(dev);
472*644570b8SJeff Kirsher 		else if (interrupts & ENISR_TX_ERR)
473*644570b8SJeff Kirsher 			ei_tx_err(dev);
474*644570b8SJeff Kirsher 
475*644570b8SJeff Kirsher 		if (interrupts & ENISR_COUNTERS) {
476*644570b8SJeff Kirsher 			dev->stats.rx_frame_errors += ei_inb_p(e8390_base + EN0_COUNTER0);
477*644570b8SJeff Kirsher 			dev->stats.rx_crc_errors   += ei_inb_p(e8390_base + EN0_COUNTER1);
478*644570b8SJeff Kirsher 			dev->stats.rx_missed_errors += ei_inb_p(e8390_base + EN0_COUNTER2);
479*644570b8SJeff Kirsher 			ei_outb_p(ENISR_COUNTERS, e8390_base + EN0_ISR); /* Ack intr. */
480*644570b8SJeff Kirsher 		}
481*644570b8SJeff Kirsher 
482*644570b8SJeff Kirsher 		/* Ignore any RDC interrupts that make it back to here. */
483*644570b8SJeff Kirsher 		if (interrupts & ENISR_RDC)
484*644570b8SJeff Kirsher 			ei_outb_p(ENISR_RDC, e8390_base + EN0_ISR);
485*644570b8SJeff Kirsher 
486*644570b8SJeff Kirsher 		ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base + E8390_CMD);
487*644570b8SJeff Kirsher 	}
488*644570b8SJeff Kirsher 
489*644570b8SJeff Kirsher 	if (interrupts && ei_debug) {
490*644570b8SJeff Kirsher 		ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base + E8390_CMD);
491*644570b8SJeff Kirsher 		if (nr_serviced >= MAX_SERVICE) {
492*644570b8SJeff Kirsher 			/* 0xFF is valid for a card removal */
493*644570b8SJeff Kirsher 			if (interrupts != 0xFF)
494*644570b8SJeff Kirsher 				netdev_warn(dev, "Too much work at interrupt, status %#2.2x\n",
495*644570b8SJeff Kirsher 					    interrupts);
496*644570b8SJeff Kirsher 			ei_outb_p(ENISR_ALL, e8390_base + EN0_ISR); /* Ack. most intrs. */
497*644570b8SJeff Kirsher 		} else {
498*644570b8SJeff Kirsher 			netdev_warn(dev, "unknown interrupt %#2x\n", interrupts);
499*644570b8SJeff Kirsher 			ei_outb_p(0xff, e8390_base + EN0_ISR); /* Ack. all intrs. */
500*644570b8SJeff Kirsher 		}
501*644570b8SJeff Kirsher 	}
502*644570b8SJeff Kirsher 	spin_unlock(&ei_local->page_lock);
503*644570b8SJeff Kirsher 	return IRQ_RETVAL(nr_serviced > 0);
504*644570b8SJeff Kirsher }
505*644570b8SJeff Kirsher 
506*644570b8SJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER
507*644570b8SJeff Kirsher static void __ei_poll(struct net_device *dev)
508*644570b8SJeff Kirsher {
509*644570b8SJeff Kirsher 	disable_irq(dev->irq);
510*644570b8SJeff Kirsher 	__ei_interrupt(dev->irq, dev);
511*644570b8SJeff Kirsher 	enable_irq(dev->irq);
512*644570b8SJeff Kirsher }
513*644570b8SJeff Kirsher #endif
514*644570b8SJeff Kirsher 
515*644570b8SJeff Kirsher /**
516*644570b8SJeff Kirsher  * ei_tx_err - handle transmitter error
517*644570b8SJeff Kirsher  * @dev: network device which threw the exception
518*644570b8SJeff Kirsher  *
519*644570b8SJeff Kirsher  * A transmitter error has happened. Most likely excess collisions (which
520*644570b8SJeff Kirsher  * is a fairly normal condition). If the error is one where the Tx will
521*644570b8SJeff Kirsher  * have been aborted, we try and send another one right away, instead of
522*644570b8SJeff Kirsher  * letting the failed packet sit and collect dust in the Tx buffer. This
523*644570b8SJeff Kirsher  * is a much better solution as it avoids kernel based Tx timeouts, and
524*644570b8SJeff Kirsher  * an unnecessary card reset.
525*644570b8SJeff Kirsher  *
526*644570b8SJeff Kirsher  * Called with lock held.
527*644570b8SJeff Kirsher  */
528*644570b8SJeff Kirsher 
529*644570b8SJeff Kirsher static void ei_tx_err(struct net_device *dev)
530*644570b8SJeff Kirsher {
531*644570b8SJeff Kirsher 	unsigned long e8390_base = dev->base_addr;
532*644570b8SJeff Kirsher 	/* ei_local is used on some platforms via the EI_SHIFT macro */
533*644570b8SJeff Kirsher 	struct ei_device *ei_local __maybe_unused = netdev_priv(dev);
534*644570b8SJeff Kirsher 	unsigned char txsr = ei_inb_p(e8390_base+EN0_TSR);
535*644570b8SJeff Kirsher 	unsigned char tx_was_aborted = txsr & (ENTSR_ABT+ENTSR_FU);
536*644570b8SJeff Kirsher 
537*644570b8SJeff Kirsher #ifdef VERBOSE_ERROR_DUMP
538*644570b8SJeff Kirsher 	netdev_dbg(dev, "transmitter error (%#2x):", txsr);
539*644570b8SJeff Kirsher 	if (txsr & ENTSR_ABT)
540*644570b8SJeff Kirsher 		pr_cont(" excess-collisions ");
541*644570b8SJeff Kirsher 	if (txsr & ENTSR_ND)
542*644570b8SJeff Kirsher 		pr_cont(" non-deferral ");
543*644570b8SJeff Kirsher 	if (txsr & ENTSR_CRS)
544*644570b8SJeff Kirsher 		pr_cont(" lost-carrier ");
545*644570b8SJeff Kirsher 	if (txsr & ENTSR_FU)
546*644570b8SJeff Kirsher 		pr_cont(" FIFO-underrun ");
547*644570b8SJeff Kirsher 	if (txsr & ENTSR_CDH)
548*644570b8SJeff Kirsher 		pr_cont(" lost-heartbeat ");
549*644570b8SJeff Kirsher 	pr_cont("\n");
550*644570b8SJeff Kirsher #endif
551*644570b8SJeff Kirsher 
552*644570b8SJeff Kirsher 	ei_outb_p(ENISR_TX_ERR, e8390_base + EN0_ISR); /* Ack intr. */
553*644570b8SJeff Kirsher 
554*644570b8SJeff Kirsher 	if (tx_was_aborted)
555*644570b8SJeff Kirsher 		ei_tx_intr(dev);
556*644570b8SJeff Kirsher 	else {
557*644570b8SJeff Kirsher 		dev->stats.tx_errors++;
558*644570b8SJeff Kirsher 		if (txsr & ENTSR_CRS)
559*644570b8SJeff Kirsher 			dev->stats.tx_carrier_errors++;
560*644570b8SJeff Kirsher 		if (txsr & ENTSR_CDH)
561*644570b8SJeff Kirsher 			dev->stats.tx_heartbeat_errors++;
562*644570b8SJeff Kirsher 		if (txsr & ENTSR_OWC)
563*644570b8SJeff Kirsher 			dev->stats.tx_window_errors++;
564*644570b8SJeff Kirsher 	}
565*644570b8SJeff Kirsher }
566*644570b8SJeff Kirsher 
567*644570b8SJeff Kirsher /**
568*644570b8SJeff Kirsher  * ei_tx_intr - transmit interrupt handler
569*644570b8SJeff Kirsher  * @dev: network device for which tx intr is handled
570*644570b8SJeff Kirsher  *
571*644570b8SJeff Kirsher  * We have finished a transmit: check for errors and then trigger the next
572*644570b8SJeff Kirsher  * packet to be sent. Called with lock held.
573*644570b8SJeff Kirsher  */
574*644570b8SJeff Kirsher 
575*644570b8SJeff Kirsher static void ei_tx_intr(struct net_device *dev)
576*644570b8SJeff Kirsher {
577*644570b8SJeff Kirsher 	unsigned long e8390_base = dev->base_addr;
578*644570b8SJeff Kirsher 	struct ei_device *ei_local = netdev_priv(dev);
579*644570b8SJeff Kirsher 	int status = ei_inb(e8390_base + EN0_TSR);
580*644570b8SJeff Kirsher 
581*644570b8SJeff Kirsher 	ei_outb_p(ENISR_TX, e8390_base + EN0_ISR); /* Ack intr. */
582*644570b8SJeff Kirsher 
583*644570b8SJeff Kirsher 	/*
584*644570b8SJeff Kirsher 	 * There are two Tx buffers, see which one finished, and trigger
585*644570b8SJeff Kirsher 	 * the send of another one if it exists.
586*644570b8SJeff Kirsher 	 */
587*644570b8SJeff Kirsher 	ei_local->txqueue--;
588*644570b8SJeff Kirsher 
589*644570b8SJeff Kirsher 	if (ei_local->tx1 < 0) {
590*644570b8SJeff Kirsher 		if (ei_local->lasttx != 1 && ei_local->lasttx != -1)
591*644570b8SJeff Kirsher 			pr_err("%s: bogus last_tx_buffer %d, tx1=%d\n",
592*644570b8SJeff Kirsher 			       ei_local->name, ei_local->lasttx, ei_local->tx1);
593*644570b8SJeff Kirsher 		ei_local->tx1 = 0;
594*644570b8SJeff Kirsher 		if (ei_local->tx2 > 0) {
595*644570b8SJeff Kirsher 			ei_local->txing = 1;
596*644570b8SJeff Kirsher 			NS8390_trigger_send(dev, ei_local->tx2, ei_local->tx_start_page + 6);
597*644570b8SJeff Kirsher 			dev->trans_start = jiffies;
598*644570b8SJeff Kirsher 			ei_local->tx2 = -1,
599*644570b8SJeff Kirsher 			ei_local->lasttx = 2;
600*644570b8SJeff Kirsher 		} else
601*644570b8SJeff Kirsher 			ei_local->lasttx = 20, ei_local->txing = 0;
602*644570b8SJeff Kirsher 	} else if (ei_local->tx2 < 0) {
603*644570b8SJeff Kirsher 		if (ei_local->lasttx != 2  &&  ei_local->lasttx != -2)
604*644570b8SJeff Kirsher 			pr_err("%s: bogus last_tx_buffer %d, tx2=%d\n",
605*644570b8SJeff Kirsher 			       ei_local->name, ei_local->lasttx, ei_local->tx2);
606*644570b8SJeff Kirsher 		ei_local->tx2 = 0;
607*644570b8SJeff Kirsher 		if (ei_local->tx1 > 0) {
608*644570b8SJeff Kirsher 			ei_local->txing = 1;
609*644570b8SJeff Kirsher 			NS8390_trigger_send(dev, ei_local->tx1, ei_local->tx_start_page);
610*644570b8SJeff Kirsher 			dev->trans_start = jiffies;
611*644570b8SJeff Kirsher 			ei_local->tx1 = -1;
612*644570b8SJeff Kirsher 			ei_local->lasttx = 1;
613*644570b8SJeff Kirsher 		} else
614*644570b8SJeff Kirsher 			ei_local->lasttx = 10, ei_local->txing = 0;
615*644570b8SJeff Kirsher 	} /* else
616*644570b8SJeff Kirsher 		netdev_warn(dev, "unexpected TX-done interrupt, lasttx=%d\n",
617*644570b8SJeff Kirsher 			    ei_local->lasttx);
618*644570b8SJeff Kirsher */
619*644570b8SJeff Kirsher 
620*644570b8SJeff Kirsher 	/* Minimize Tx latency: update the statistics after we restart TXing. */
621*644570b8SJeff Kirsher 	if (status & ENTSR_COL)
622*644570b8SJeff Kirsher 		dev->stats.collisions++;
623*644570b8SJeff Kirsher 	if (status & ENTSR_PTX)
624*644570b8SJeff Kirsher 		dev->stats.tx_packets++;
625*644570b8SJeff Kirsher 	else {
626*644570b8SJeff Kirsher 		dev->stats.tx_errors++;
627*644570b8SJeff Kirsher 		if (status & ENTSR_ABT) {
628*644570b8SJeff Kirsher 			dev->stats.tx_aborted_errors++;
629*644570b8SJeff Kirsher 			dev->stats.collisions += 16;
630*644570b8SJeff Kirsher 		}
631*644570b8SJeff Kirsher 		if (status & ENTSR_CRS)
632*644570b8SJeff Kirsher 			dev->stats.tx_carrier_errors++;
633*644570b8SJeff Kirsher 		if (status & ENTSR_FU)
634*644570b8SJeff Kirsher 			dev->stats.tx_fifo_errors++;
635*644570b8SJeff Kirsher 		if (status & ENTSR_CDH)
636*644570b8SJeff Kirsher 			dev->stats.tx_heartbeat_errors++;
637*644570b8SJeff Kirsher 		if (status & ENTSR_OWC)
638*644570b8SJeff Kirsher 			dev->stats.tx_window_errors++;
639*644570b8SJeff Kirsher 	}
640*644570b8SJeff Kirsher 	netif_wake_queue(dev);
641*644570b8SJeff Kirsher }
642*644570b8SJeff Kirsher 
643*644570b8SJeff Kirsher /**
644*644570b8SJeff Kirsher  * ei_receive - receive some packets
645*644570b8SJeff Kirsher  * @dev: network device with which receive will be run
646*644570b8SJeff Kirsher  *
647*644570b8SJeff Kirsher  * We have a good packet(s), get it/them out of the buffers.
648*644570b8SJeff Kirsher  * Called with lock held.
649*644570b8SJeff Kirsher  */
650*644570b8SJeff Kirsher 
651*644570b8SJeff Kirsher static void ei_receive(struct net_device *dev)
652*644570b8SJeff Kirsher {
653*644570b8SJeff Kirsher 	unsigned long e8390_base = dev->base_addr;
654*644570b8SJeff Kirsher 	struct ei_device *ei_local = netdev_priv(dev);
655*644570b8SJeff Kirsher 	unsigned char rxing_page, this_frame, next_frame;
656*644570b8SJeff Kirsher 	unsigned short current_offset;
657*644570b8SJeff Kirsher 	int rx_pkt_count = 0;
658*644570b8SJeff Kirsher 	struct e8390_pkt_hdr rx_frame;
659*644570b8SJeff Kirsher 	int num_rx_pages = ei_local->stop_page-ei_local->rx_start_page;
660*644570b8SJeff Kirsher 
661*644570b8SJeff Kirsher 	while (++rx_pkt_count < 10) {
662*644570b8SJeff Kirsher 		int pkt_len, pkt_stat;
663*644570b8SJeff Kirsher 
664*644570b8SJeff Kirsher 		/* Get the rx page (incoming packet pointer). */
665*644570b8SJeff Kirsher 		ei_outb_p(E8390_NODMA+E8390_PAGE1, e8390_base + E8390_CMD);
666*644570b8SJeff Kirsher 		rxing_page = ei_inb_p(e8390_base + EN1_CURPAG);
667*644570b8SJeff Kirsher 		ei_outb_p(E8390_NODMA+E8390_PAGE0, e8390_base + E8390_CMD);
668*644570b8SJeff Kirsher 
669*644570b8SJeff Kirsher 		/* Remove one frame from the ring.  Boundary is always a page behind. */
670*644570b8SJeff Kirsher 		this_frame = ei_inb_p(e8390_base + EN0_BOUNDARY) + 1;
671*644570b8SJeff Kirsher 		if (this_frame >= ei_local->stop_page)
672*644570b8SJeff Kirsher 			this_frame = ei_local->rx_start_page;
673*644570b8SJeff Kirsher 
674*644570b8SJeff Kirsher 		/* Someday we'll omit the previous, iff we never get this message.
675*644570b8SJeff Kirsher 		   (There is at least one clone claimed to have a problem.)
676*644570b8SJeff Kirsher 
677*644570b8SJeff Kirsher 		   Keep quiet if it looks like a card removal. One problem here
678*644570b8SJeff Kirsher 		   is that some clones crash in roughly the same way.
679*644570b8SJeff Kirsher 		 */
680*644570b8SJeff Kirsher 		if (ei_debug > 0 &&
681*644570b8SJeff Kirsher 		    this_frame != ei_local->current_page &&
682*644570b8SJeff Kirsher 		    (this_frame != 0x0 || rxing_page != 0xFF))
683*644570b8SJeff Kirsher 			netdev_err(dev, "mismatched read page pointers %2x vs %2x\n",
684*644570b8SJeff Kirsher 				   this_frame, ei_local->current_page);
685*644570b8SJeff Kirsher 
686*644570b8SJeff Kirsher 		if (this_frame == rxing_page)	/* Read all the frames? */
687*644570b8SJeff Kirsher 			break;				/* Done for now */
688*644570b8SJeff Kirsher 
689*644570b8SJeff Kirsher 		current_offset = this_frame << 8;
690*644570b8SJeff Kirsher 		ei_get_8390_hdr(dev, &rx_frame, this_frame);
691*644570b8SJeff Kirsher 
692*644570b8SJeff Kirsher 		pkt_len = rx_frame.count - sizeof(struct e8390_pkt_hdr);
693*644570b8SJeff Kirsher 		pkt_stat = rx_frame.status;
694*644570b8SJeff Kirsher 
695*644570b8SJeff Kirsher 		next_frame = this_frame + 1 + ((pkt_len+4)>>8);
696*644570b8SJeff Kirsher 
697*644570b8SJeff Kirsher 		/* Check for bogosity warned by 3c503 book: the status byte is never
698*644570b8SJeff Kirsher 		   written.  This happened a lot during testing! This code should be
699*644570b8SJeff Kirsher 		   cleaned up someday. */
700*644570b8SJeff Kirsher 		if (rx_frame.next != next_frame &&
701*644570b8SJeff Kirsher 		    rx_frame.next != next_frame + 1 &&
702*644570b8SJeff Kirsher 		    rx_frame.next != next_frame - num_rx_pages &&
703*644570b8SJeff Kirsher 		    rx_frame.next != next_frame + 1 - num_rx_pages) {
704*644570b8SJeff Kirsher 			ei_local->current_page = rxing_page;
705*644570b8SJeff Kirsher 			ei_outb(ei_local->current_page-1, e8390_base+EN0_BOUNDARY);
706*644570b8SJeff Kirsher 			dev->stats.rx_errors++;
707*644570b8SJeff Kirsher 			continue;
708*644570b8SJeff Kirsher 		}
709*644570b8SJeff Kirsher 
710*644570b8SJeff Kirsher 		if (pkt_len < 60  ||  pkt_len > 1518) {
711*644570b8SJeff Kirsher 			if (ei_debug)
712*644570b8SJeff Kirsher 				netdev_dbg(dev, "bogus packet size: %d, status=%#2x nxpg=%#2x\n",
713*644570b8SJeff Kirsher 					   rx_frame.count, rx_frame.status,
714*644570b8SJeff Kirsher 					   rx_frame.next);
715*644570b8SJeff Kirsher 			dev->stats.rx_errors++;
716*644570b8SJeff Kirsher 			dev->stats.rx_length_errors++;
717*644570b8SJeff Kirsher 		} else if ((pkt_stat & 0x0F) == ENRSR_RXOK) {
718*644570b8SJeff Kirsher 			struct sk_buff *skb;
719*644570b8SJeff Kirsher 
720*644570b8SJeff Kirsher 			skb = dev_alloc_skb(pkt_len+2);
721*644570b8SJeff Kirsher 			if (skb == NULL) {
722*644570b8SJeff Kirsher 				if (ei_debug > 1)
723*644570b8SJeff Kirsher 					netdev_dbg(dev, "Couldn't allocate a sk_buff of size %d\n",
724*644570b8SJeff Kirsher 						   pkt_len);
725*644570b8SJeff Kirsher 				dev->stats.rx_dropped++;
726*644570b8SJeff Kirsher 				break;
727*644570b8SJeff Kirsher 			} else {
728*644570b8SJeff Kirsher 				skb_reserve(skb, 2);	/* IP headers on 16 byte boundaries */
729*644570b8SJeff Kirsher 				skb_put(skb, pkt_len);	/* Make room */
730*644570b8SJeff Kirsher 				ei_block_input(dev, pkt_len, skb, current_offset + sizeof(rx_frame));
731*644570b8SJeff Kirsher 				skb->protocol = eth_type_trans(skb, dev);
732*644570b8SJeff Kirsher 				if (!skb_defer_rx_timestamp(skb))
733*644570b8SJeff Kirsher 					netif_rx(skb);
734*644570b8SJeff Kirsher 				dev->stats.rx_packets++;
735*644570b8SJeff Kirsher 				dev->stats.rx_bytes += pkt_len;
736*644570b8SJeff Kirsher 				if (pkt_stat & ENRSR_PHY)
737*644570b8SJeff Kirsher 					dev->stats.multicast++;
738*644570b8SJeff Kirsher 			}
739*644570b8SJeff Kirsher 		} else {
740*644570b8SJeff Kirsher 			if (ei_debug)
741*644570b8SJeff Kirsher 				netdev_dbg(dev, "bogus packet: status=%#2x nxpg=%#2x size=%d\n",
742*644570b8SJeff Kirsher 					   rx_frame.status, rx_frame.next,
743*644570b8SJeff Kirsher 					   rx_frame.count);
744*644570b8SJeff Kirsher 			dev->stats.rx_errors++;
745*644570b8SJeff Kirsher 			/* NB: The NIC counts CRC, frame and missed errors. */
746*644570b8SJeff Kirsher 			if (pkt_stat & ENRSR_FO)
747*644570b8SJeff Kirsher 				dev->stats.rx_fifo_errors++;
748*644570b8SJeff Kirsher 		}
749*644570b8SJeff Kirsher 		next_frame = rx_frame.next;
750*644570b8SJeff Kirsher 
751*644570b8SJeff Kirsher 		/* This _should_ never happen: it's here for avoiding bad clones. */
752*644570b8SJeff Kirsher 		if (next_frame >= ei_local->stop_page) {
753*644570b8SJeff Kirsher 			netdev_notice(dev, "next frame inconsistency, %#2x\n",
754*644570b8SJeff Kirsher 				      next_frame);
755*644570b8SJeff Kirsher 			next_frame = ei_local->rx_start_page;
756*644570b8SJeff Kirsher 		}
757*644570b8SJeff Kirsher 		ei_local->current_page = next_frame;
758*644570b8SJeff Kirsher 		ei_outb_p(next_frame-1, e8390_base+EN0_BOUNDARY);
759*644570b8SJeff Kirsher 	}
760*644570b8SJeff Kirsher 
761*644570b8SJeff Kirsher 	/* We used to also ack ENISR_OVER here, but that would sometimes mask
762*644570b8SJeff Kirsher 	   a real overrun, leaving the 8390 in a stopped state with rec'vr off. */
763*644570b8SJeff Kirsher 	ei_outb_p(ENISR_RX+ENISR_RX_ERR, e8390_base+EN0_ISR);
764*644570b8SJeff Kirsher }
765*644570b8SJeff Kirsher 
766*644570b8SJeff Kirsher /**
767*644570b8SJeff Kirsher  * ei_rx_overrun - handle receiver overrun
768*644570b8SJeff Kirsher  * @dev: network device which threw exception
769*644570b8SJeff Kirsher  *
770*644570b8SJeff Kirsher  * We have a receiver overrun: we have to kick the 8390 to get it started
771*644570b8SJeff Kirsher  * again. Problem is that you have to kick it exactly as NS prescribes in
772*644570b8SJeff Kirsher  * the updated datasheets, or "the NIC may act in an unpredictable manner."
773*644570b8SJeff Kirsher  * This includes causing "the NIC to defer indefinitely when it is stopped
774*644570b8SJeff Kirsher  * on a busy network."  Ugh.
775*644570b8SJeff Kirsher  * Called with lock held. Don't call this with the interrupts off or your
776*644570b8SJeff Kirsher  * computer will hate you - it takes 10ms or so.
777*644570b8SJeff Kirsher  */
778*644570b8SJeff Kirsher 
779*644570b8SJeff Kirsher static void ei_rx_overrun(struct net_device *dev)
780*644570b8SJeff Kirsher {
781*644570b8SJeff Kirsher 	unsigned long e8390_base = dev->base_addr;
782*644570b8SJeff Kirsher 	unsigned char was_txing, must_resend = 0;
783*644570b8SJeff Kirsher 	/* ei_local is used on some platforms via the EI_SHIFT macro */
784*644570b8SJeff Kirsher 	struct ei_device *ei_local __maybe_unused = netdev_priv(dev);
785*644570b8SJeff Kirsher 
786*644570b8SJeff Kirsher 	/*
787*644570b8SJeff Kirsher 	 * Record whether a Tx was in progress and then issue the
788*644570b8SJeff Kirsher 	 * stop command.
789*644570b8SJeff Kirsher 	 */
790*644570b8SJeff Kirsher 	was_txing = ei_inb_p(e8390_base+E8390_CMD) & E8390_TRANS;
791*644570b8SJeff Kirsher 	ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD);
792*644570b8SJeff Kirsher 
793*644570b8SJeff Kirsher 	if (ei_debug > 1)
794*644570b8SJeff Kirsher 		netdev_dbg(dev, "Receiver overrun\n");
795*644570b8SJeff Kirsher 	dev->stats.rx_over_errors++;
796*644570b8SJeff Kirsher 
797*644570b8SJeff Kirsher 	/*
798*644570b8SJeff Kirsher 	 * Wait a full Tx time (1.2ms) + some guard time, NS says 1.6ms total.
799*644570b8SJeff Kirsher 	 * Early datasheets said to poll the reset bit, but now they say that
800*644570b8SJeff Kirsher 	 * it "is not a reliable indicator and subsequently should be ignored."
801*644570b8SJeff Kirsher 	 * We wait at least 10ms.
802*644570b8SJeff Kirsher 	 */
803*644570b8SJeff Kirsher 
804*644570b8SJeff Kirsher 	mdelay(10);
805*644570b8SJeff Kirsher 
806*644570b8SJeff Kirsher 	/*
807*644570b8SJeff Kirsher 	 * Reset RBCR[01] back to zero as per magic incantation.
808*644570b8SJeff Kirsher 	 */
809*644570b8SJeff Kirsher 	ei_outb_p(0x00, e8390_base+EN0_RCNTLO);
810*644570b8SJeff Kirsher 	ei_outb_p(0x00, e8390_base+EN0_RCNTHI);
811*644570b8SJeff Kirsher 
812*644570b8SJeff Kirsher 	/*
813*644570b8SJeff Kirsher 	 * See if any Tx was interrupted or not. According to NS, this
814*644570b8SJeff Kirsher 	 * step is vital, and skipping it will cause no end of havoc.
815*644570b8SJeff Kirsher 	 */
816*644570b8SJeff Kirsher 
817*644570b8SJeff Kirsher 	if (was_txing) {
818*644570b8SJeff Kirsher 		unsigned char tx_completed = ei_inb_p(e8390_base+EN0_ISR) & (ENISR_TX+ENISR_TX_ERR);
819*644570b8SJeff Kirsher 		if (!tx_completed)
820*644570b8SJeff Kirsher 			must_resend = 1;
821*644570b8SJeff Kirsher 	}
822*644570b8SJeff Kirsher 
823*644570b8SJeff Kirsher 	/*
824*644570b8SJeff Kirsher 	 * Have to enter loopback mode and then restart the NIC before
825*644570b8SJeff Kirsher 	 * you are allowed to slurp packets up off the ring.
826*644570b8SJeff Kirsher 	 */
827*644570b8SJeff Kirsher 	ei_outb_p(E8390_TXOFF, e8390_base + EN0_TXCR);
828*644570b8SJeff Kirsher 	ei_outb_p(E8390_NODMA + E8390_PAGE0 + E8390_START, e8390_base + E8390_CMD);
829*644570b8SJeff Kirsher 
830*644570b8SJeff Kirsher 	/*
831*644570b8SJeff Kirsher 	 * Clear the Rx ring of all the debris, and ack the interrupt.
832*644570b8SJeff Kirsher 	 */
833*644570b8SJeff Kirsher 	ei_receive(dev);
834*644570b8SJeff Kirsher 	ei_outb_p(ENISR_OVER, e8390_base+EN0_ISR);
835*644570b8SJeff Kirsher 
836*644570b8SJeff Kirsher 	/*
837*644570b8SJeff Kirsher 	 * Leave loopback mode, and resend any packet that got stopped.
838*644570b8SJeff Kirsher 	 */
839*644570b8SJeff Kirsher 	ei_outb_p(E8390_TXCONFIG, e8390_base + EN0_TXCR);
840*644570b8SJeff Kirsher 	if (must_resend)
841*644570b8SJeff Kirsher 		ei_outb_p(E8390_NODMA + E8390_PAGE0 + E8390_START + E8390_TRANS, e8390_base + E8390_CMD);
842*644570b8SJeff Kirsher }
843*644570b8SJeff Kirsher 
844*644570b8SJeff Kirsher /*
845*644570b8SJeff Kirsher  *	Collect the stats. This is called unlocked and from several contexts.
846*644570b8SJeff Kirsher  */
847*644570b8SJeff Kirsher 
848*644570b8SJeff Kirsher static struct net_device_stats *__ei_get_stats(struct net_device *dev)
849*644570b8SJeff Kirsher {
850*644570b8SJeff Kirsher 	unsigned long ioaddr = dev->base_addr;
851*644570b8SJeff Kirsher 	struct ei_device *ei_local = netdev_priv(dev);
852*644570b8SJeff Kirsher 	unsigned long flags;
853*644570b8SJeff Kirsher 
854*644570b8SJeff Kirsher 	/* If the card is stopped, just return the present stats. */
855*644570b8SJeff Kirsher 	if (!netif_running(dev))
856*644570b8SJeff Kirsher 		return &dev->stats;
857*644570b8SJeff Kirsher 
858*644570b8SJeff Kirsher 	spin_lock_irqsave(&ei_local->page_lock, flags);
859*644570b8SJeff Kirsher 	/* Read the counter registers, assuming we are in page 0. */
860*644570b8SJeff Kirsher 	dev->stats.rx_frame_errors  += ei_inb_p(ioaddr + EN0_COUNTER0);
861*644570b8SJeff Kirsher 	dev->stats.rx_crc_errors    += ei_inb_p(ioaddr + EN0_COUNTER1);
862*644570b8SJeff Kirsher 	dev->stats.rx_missed_errors += ei_inb_p(ioaddr + EN0_COUNTER2);
863*644570b8SJeff Kirsher 	spin_unlock_irqrestore(&ei_local->page_lock, flags);
864*644570b8SJeff Kirsher 
865*644570b8SJeff Kirsher 	return &dev->stats;
866*644570b8SJeff Kirsher }
867*644570b8SJeff Kirsher 
868*644570b8SJeff Kirsher /*
869*644570b8SJeff Kirsher  * Form the 64 bit 8390 multicast table from the linked list of addresses
870*644570b8SJeff Kirsher  * associated with this dev structure.
871*644570b8SJeff Kirsher  */
872*644570b8SJeff Kirsher 
873*644570b8SJeff Kirsher static inline void make_mc_bits(u8 *bits, struct net_device *dev)
874*644570b8SJeff Kirsher {
875*644570b8SJeff Kirsher 	struct netdev_hw_addr *ha;
876*644570b8SJeff Kirsher 
877*644570b8SJeff Kirsher 	netdev_for_each_mc_addr(ha, dev) {
878*644570b8SJeff Kirsher 		u32 crc = ether_crc(ETH_ALEN, ha->addr);
879*644570b8SJeff Kirsher 		/*
880*644570b8SJeff Kirsher 		 * The 8390 uses the 6 most significant bits of the
881*644570b8SJeff Kirsher 		 * CRC to index the multicast table.
882*644570b8SJeff Kirsher 		 */
883*644570b8SJeff Kirsher 		bits[crc>>29] |= (1<<((crc>>26)&7));
884*644570b8SJeff Kirsher 	}
885*644570b8SJeff Kirsher }
886*644570b8SJeff Kirsher 
887*644570b8SJeff Kirsher /**
888*644570b8SJeff Kirsher  * do_set_multicast_list - set/clear multicast filter
889*644570b8SJeff Kirsher  * @dev: net device for which multicast filter is adjusted
890*644570b8SJeff Kirsher  *
891*644570b8SJeff Kirsher  *	Set or clear the multicast filter for this adaptor. May be called
892*644570b8SJeff Kirsher  *	from a BH in 2.1.x. Must be called with lock held.
893*644570b8SJeff Kirsher  */
894*644570b8SJeff Kirsher 
895*644570b8SJeff Kirsher static void do_set_multicast_list(struct net_device *dev)
896*644570b8SJeff Kirsher {
897*644570b8SJeff Kirsher 	unsigned long e8390_base = dev->base_addr;
898*644570b8SJeff Kirsher 	int i;
899*644570b8SJeff Kirsher 	struct ei_device *ei_local = netdev_priv(dev);
900*644570b8SJeff Kirsher 
901*644570b8SJeff Kirsher 	if (!(dev->flags&(IFF_PROMISC|IFF_ALLMULTI))) {
902*644570b8SJeff Kirsher 		memset(ei_local->mcfilter, 0, 8);
903*644570b8SJeff Kirsher 		if (!netdev_mc_empty(dev))
904*644570b8SJeff Kirsher 			make_mc_bits(ei_local->mcfilter, dev);
905*644570b8SJeff Kirsher 	} else
906*644570b8SJeff Kirsher 		memset(ei_local->mcfilter, 0xFF, 8);	/* mcast set to accept-all */
907*644570b8SJeff Kirsher 
908*644570b8SJeff Kirsher 	/*
909*644570b8SJeff Kirsher 	 * DP8390 manuals don't specify any magic sequence for altering
910*644570b8SJeff Kirsher 	 * the multicast regs on an already running card. To be safe, we
911*644570b8SJeff Kirsher 	 * ensure multicast mode is off prior to loading up the new hash
912*644570b8SJeff Kirsher 	 * table. If this proves to be not enough, we can always resort
913*644570b8SJeff Kirsher 	 * to stopping the NIC, loading the table and then restarting.
914*644570b8SJeff Kirsher 	 *
915*644570b8SJeff Kirsher 	 * Bug Alert!  The MC regs on the SMC 83C690 (SMC Elite and SMC
916*644570b8SJeff Kirsher 	 * Elite16) appear to be write-only. The NS 8390 data sheet lists
917*644570b8SJeff Kirsher 	 * them as r/w so this is a bug.  The SMC 83C790 (SMC Ultra and
918*644570b8SJeff Kirsher 	 * Ultra32 EISA) appears to have this bug fixed.
919*644570b8SJeff Kirsher 	 */
920*644570b8SJeff Kirsher 
921*644570b8SJeff Kirsher 	if (netif_running(dev))
922*644570b8SJeff Kirsher 		ei_outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR);
923*644570b8SJeff Kirsher 	ei_outb_p(E8390_NODMA + E8390_PAGE1, e8390_base + E8390_CMD);
924*644570b8SJeff Kirsher 	for (i = 0; i < 8; i++) {
925*644570b8SJeff Kirsher 		ei_outb_p(ei_local->mcfilter[i], e8390_base + EN1_MULT_SHIFT(i));
926*644570b8SJeff Kirsher #ifndef BUG_83C690
927*644570b8SJeff Kirsher 		if (ei_inb_p(e8390_base + EN1_MULT_SHIFT(i)) != ei_local->mcfilter[i])
928*644570b8SJeff Kirsher 			netdev_err(dev, "Multicast filter read/write mismap %d\n",
929*644570b8SJeff Kirsher 				   i);
930*644570b8SJeff Kirsher #endif
931*644570b8SJeff Kirsher 	}
932*644570b8SJeff Kirsher 	ei_outb_p(E8390_NODMA + E8390_PAGE0, e8390_base + E8390_CMD);
933*644570b8SJeff Kirsher 
934*644570b8SJeff Kirsher 	if (dev->flags&IFF_PROMISC)
935*644570b8SJeff Kirsher 		ei_outb_p(E8390_RXCONFIG | 0x18, e8390_base + EN0_RXCR);
936*644570b8SJeff Kirsher 	else if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev))
937*644570b8SJeff Kirsher 		ei_outb_p(E8390_RXCONFIG | 0x08, e8390_base + EN0_RXCR);
938*644570b8SJeff Kirsher 	else
939*644570b8SJeff Kirsher 		ei_outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR);
940*644570b8SJeff Kirsher }
941*644570b8SJeff Kirsher 
942*644570b8SJeff Kirsher /*
943*644570b8SJeff Kirsher  *	Called without lock held. This is invoked from user context and may
944*644570b8SJeff Kirsher  *	be parallel to just about everything else. Its also fairly quick and
945*644570b8SJeff Kirsher  *	not called too often. Must protect against both bh and irq users
946*644570b8SJeff Kirsher  */
947*644570b8SJeff Kirsher 
948*644570b8SJeff Kirsher static void __ei_set_multicast_list(struct net_device *dev)
949*644570b8SJeff Kirsher {
950*644570b8SJeff Kirsher 	unsigned long flags;
951*644570b8SJeff Kirsher 	struct ei_device *ei_local = netdev_priv(dev);
952*644570b8SJeff Kirsher 
953*644570b8SJeff Kirsher 	spin_lock_irqsave(&ei_local->page_lock, flags);
954*644570b8SJeff Kirsher 	do_set_multicast_list(dev);
955*644570b8SJeff Kirsher 	spin_unlock_irqrestore(&ei_local->page_lock, flags);
956*644570b8SJeff Kirsher }
957*644570b8SJeff Kirsher 
958*644570b8SJeff Kirsher /**
959*644570b8SJeff Kirsher  * ethdev_setup - init rest of 8390 device struct
960*644570b8SJeff Kirsher  * @dev: network device structure to init
961*644570b8SJeff Kirsher  *
962*644570b8SJeff Kirsher  * Initialize the rest of the 8390 device structure.  Do NOT __init
963*644570b8SJeff Kirsher  * this, as it is used by 8390 based modular drivers too.
964*644570b8SJeff Kirsher  */
965*644570b8SJeff Kirsher 
966*644570b8SJeff Kirsher static void ethdev_setup(struct net_device *dev)
967*644570b8SJeff Kirsher {
968*644570b8SJeff Kirsher 	struct ei_device *ei_local = netdev_priv(dev);
969*644570b8SJeff Kirsher 	if (ei_debug > 1)
970*644570b8SJeff Kirsher 		printk(version);
971*644570b8SJeff Kirsher 
972*644570b8SJeff Kirsher 	ether_setup(dev);
973*644570b8SJeff Kirsher 
974*644570b8SJeff Kirsher 	spin_lock_init(&ei_local->page_lock);
975*644570b8SJeff Kirsher }
976*644570b8SJeff Kirsher 
977*644570b8SJeff Kirsher /**
978*644570b8SJeff Kirsher  * alloc_ei_netdev - alloc_etherdev counterpart for 8390
979*644570b8SJeff Kirsher  * @size: extra bytes to allocate
980*644570b8SJeff Kirsher  *
981*644570b8SJeff Kirsher  * Allocate 8390-specific net_device.
982*644570b8SJeff Kirsher  */
983*644570b8SJeff Kirsher static struct net_device *____alloc_ei_netdev(int size)
984*644570b8SJeff Kirsher {
985*644570b8SJeff Kirsher 	return alloc_netdev(sizeof(struct ei_device) + size, "eth%d",
986*644570b8SJeff Kirsher 				ethdev_setup);
987*644570b8SJeff Kirsher }
988*644570b8SJeff Kirsher 
989*644570b8SJeff Kirsher 
990*644570b8SJeff Kirsher 
991*644570b8SJeff Kirsher 
992*644570b8SJeff Kirsher /* This page of functions should be 8390 generic */
993*644570b8SJeff Kirsher /* Follow National Semi's recommendations for initializing the "NIC". */
994*644570b8SJeff Kirsher 
995*644570b8SJeff Kirsher /**
996*644570b8SJeff Kirsher  * NS8390_init - initialize 8390 hardware
997*644570b8SJeff Kirsher  * @dev: network device to initialize
998*644570b8SJeff Kirsher  * @startp: boolean.  non-zero value to initiate chip processing
999*644570b8SJeff Kirsher  *
1000*644570b8SJeff Kirsher  *	Must be called with lock held.
1001*644570b8SJeff Kirsher  */
1002*644570b8SJeff Kirsher 
1003*644570b8SJeff Kirsher static void __NS8390_init(struct net_device *dev, int startp)
1004*644570b8SJeff Kirsher {
1005*644570b8SJeff Kirsher 	unsigned long e8390_base = dev->base_addr;
1006*644570b8SJeff Kirsher 	struct ei_device *ei_local = netdev_priv(dev);
1007*644570b8SJeff Kirsher 	int i;
1008*644570b8SJeff Kirsher 	int endcfg = ei_local->word16
1009*644570b8SJeff Kirsher 	    ? (0x48 | ENDCFG_WTS | (ei_local->bigendian ? ENDCFG_BOS : 0))
1010*644570b8SJeff Kirsher 	    : 0x48;
1011*644570b8SJeff Kirsher 
1012*644570b8SJeff Kirsher 	if (sizeof(struct e8390_pkt_hdr) != 4)
1013*644570b8SJeff Kirsher 		panic("8390.c: header struct mispacked\n");
1014*644570b8SJeff Kirsher 	/* Follow National Semi's recommendations for initing the DP83902. */
1015*644570b8SJeff Kirsher 	ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD); /* 0x21 */
1016*644570b8SJeff Kirsher 	ei_outb_p(endcfg, e8390_base + EN0_DCFG);	/* 0x48 or 0x49 */
1017*644570b8SJeff Kirsher 	/* Clear the remote byte count registers. */
1018*644570b8SJeff Kirsher 	ei_outb_p(0x00,  e8390_base + EN0_RCNTLO);
1019*644570b8SJeff Kirsher 	ei_outb_p(0x00,  e8390_base + EN0_RCNTHI);
1020*644570b8SJeff Kirsher 	/* Set to monitor and loopback mode -- this is vital!. */
1021*644570b8SJeff Kirsher 	ei_outb_p(E8390_RXOFF, e8390_base + EN0_RXCR); /* 0x20 */
1022*644570b8SJeff Kirsher 	ei_outb_p(E8390_TXOFF, e8390_base + EN0_TXCR); /* 0x02 */
1023*644570b8SJeff Kirsher 	/* Set the transmit page and receive ring. */
1024*644570b8SJeff Kirsher 	ei_outb_p(ei_local->tx_start_page, e8390_base + EN0_TPSR);
1025*644570b8SJeff Kirsher 	ei_local->tx1 = ei_local->tx2 = 0;
1026*644570b8SJeff Kirsher 	ei_outb_p(ei_local->rx_start_page, e8390_base + EN0_STARTPG);
1027*644570b8SJeff Kirsher 	ei_outb_p(ei_local->stop_page-1, e8390_base + EN0_BOUNDARY);	/* 3c503 says 0x3f,NS0x26*/
1028*644570b8SJeff Kirsher 	ei_local->current_page = ei_local->rx_start_page;		/* assert boundary+1 */
1029*644570b8SJeff Kirsher 	ei_outb_p(ei_local->stop_page, e8390_base + EN0_STOPPG);
1030*644570b8SJeff Kirsher 	/* Clear the pending interrupts and mask. */
1031*644570b8SJeff Kirsher 	ei_outb_p(0xFF, e8390_base + EN0_ISR);
1032*644570b8SJeff Kirsher 	ei_outb_p(0x00,  e8390_base + EN0_IMR);
1033*644570b8SJeff Kirsher 
1034*644570b8SJeff Kirsher 	/* Copy the station address into the DS8390 registers. */
1035*644570b8SJeff Kirsher 
1036*644570b8SJeff Kirsher 	ei_outb_p(E8390_NODMA + E8390_PAGE1 + E8390_STOP, e8390_base+E8390_CMD); /* 0x61 */
1037*644570b8SJeff Kirsher 	for (i = 0; i < 6; i++) {
1038*644570b8SJeff Kirsher 		ei_outb_p(dev->dev_addr[i], e8390_base + EN1_PHYS_SHIFT(i));
1039*644570b8SJeff Kirsher 		if (ei_debug > 1 &&
1040*644570b8SJeff Kirsher 		    ei_inb_p(e8390_base + EN1_PHYS_SHIFT(i)) != dev->dev_addr[i])
1041*644570b8SJeff Kirsher 			netdev_err(dev, "Hw. address read/write mismap %d\n", i);
1042*644570b8SJeff Kirsher 	}
1043*644570b8SJeff Kirsher 
1044*644570b8SJeff Kirsher 	ei_outb_p(ei_local->rx_start_page, e8390_base + EN1_CURPAG);
1045*644570b8SJeff Kirsher 	ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD);
1046*644570b8SJeff Kirsher 
1047*644570b8SJeff Kirsher 	ei_local->tx1 = ei_local->tx2 = 0;
1048*644570b8SJeff Kirsher 	ei_local->txing = 0;
1049*644570b8SJeff Kirsher 
1050*644570b8SJeff Kirsher 	if (startp) {
1051*644570b8SJeff Kirsher 		ei_outb_p(0xff,  e8390_base + EN0_ISR);
1052*644570b8SJeff Kirsher 		ei_outb_p(ENISR_ALL,  e8390_base + EN0_IMR);
1053*644570b8SJeff Kirsher 		ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base+E8390_CMD);
1054*644570b8SJeff Kirsher 		ei_outb_p(E8390_TXCONFIG, e8390_base + EN0_TXCR); /* xmit on. */
1055*644570b8SJeff Kirsher 		/* 3c503 TechMan says rxconfig only after the NIC is started. */
1056*644570b8SJeff Kirsher 		ei_outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR); /* rx on,  */
1057*644570b8SJeff Kirsher 		do_set_multicast_list(dev);	/* (re)load the mcast table */
1058*644570b8SJeff Kirsher 	}
1059*644570b8SJeff Kirsher }
1060*644570b8SJeff Kirsher 
1061*644570b8SJeff Kirsher /* Trigger a transmit start, assuming the length is valid.
1062*644570b8SJeff Kirsher    Always called with the page lock held */
1063*644570b8SJeff Kirsher 
1064*644570b8SJeff Kirsher static void NS8390_trigger_send(struct net_device *dev, unsigned int length,
1065*644570b8SJeff Kirsher 								int start_page)
1066*644570b8SJeff Kirsher {
1067*644570b8SJeff Kirsher 	unsigned long e8390_base = dev->base_addr;
1068*644570b8SJeff Kirsher 	struct ei_device *ei_local __attribute((unused)) = netdev_priv(dev);
1069*644570b8SJeff Kirsher 
1070*644570b8SJeff Kirsher 	ei_outb_p(E8390_NODMA+E8390_PAGE0, e8390_base+E8390_CMD);
1071*644570b8SJeff Kirsher 
1072*644570b8SJeff Kirsher 	if (ei_inb_p(e8390_base + E8390_CMD) & E8390_TRANS) {
1073*644570b8SJeff Kirsher 		netdev_warn(dev, "trigger_send() called with the transmitter busy\n");
1074*644570b8SJeff Kirsher 		return;
1075*644570b8SJeff Kirsher 	}
1076*644570b8SJeff Kirsher 	ei_outb_p(length & 0xff, e8390_base + EN0_TCNTLO);
1077*644570b8SJeff Kirsher 	ei_outb_p(length >> 8, e8390_base + EN0_TCNTHI);
1078*644570b8SJeff Kirsher 	ei_outb_p(start_page, e8390_base + EN0_TPSR);
1079*644570b8SJeff Kirsher 	ei_outb_p(E8390_NODMA+E8390_TRANS+E8390_START, e8390_base+E8390_CMD);
1080*644570b8SJeff Kirsher }
1081