xref: /openbmc/linux/drivers/net/ethernet/8390/lib8390.c (revision 2b2706aaaefee49ba0030bc679865db178e89e11)
1644570b8SJeff Kirsher /* 8390.c: A general NS8390 ethernet driver core for linux. */
2644570b8SJeff Kirsher /*
3644570b8SJeff Kirsher 	Written 1992-94 by Donald Becker.
4644570b8SJeff Kirsher 
5644570b8SJeff Kirsher 	Copyright 1993 United States Government as represented by the
6644570b8SJeff Kirsher 	Director, National Security Agency.
7644570b8SJeff Kirsher 
8644570b8SJeff Kirsher 	This software may be used and distributed according to the terms
9644570b8SJeff Kirsher 	of the GNU General Public License, incorporated herein by reference.
10644570b8SJeff Kirsher 
11644570b8SJeff Kirsher 	The author may be reached as becker@scyld.com, or C/O
12644570b8SJeff Kirsher 	Scyld Computing Corporation
13644570b8SJeff Kirsher 	410 Severn Ave., Suite 210
14644570b8SJeff Kirsher 	Annapolis MD 21403
15644570b8SJeff Kirsher 
16644570b8SJeff Kirsher 
17644570b8SJeff Kirsher   This is the chip-specific code for many 8390-based ethernet adaptors.
18644570b8SJeff Kirsher   This is not a complete driver, it must be combined with board-specific
19644570b8SJeff Kirsher   code such as ne.c, wd.c, 3c503.c, etc.
20644570b8SJeff Kirsher 
21644570b8SJeff Kirsher   Seeing how at least eight drivers use this code, (not counting the
22644570b8SJeff Kirsher   PCMCIA ones either) it is easy to break some card by what seems like
23644570b8SJeff Kirsher   a simple innocent change. Please contact me or Donald if you think
24644570b8SJeff Kirsher   you have found something that needs changing. -- PG
25644570b8SJeff Kirsher 
26644570b8SJeff Kirsher 
27644570b8SJeff Kirsher   Changelog:
28644570b8SJeff Kirsher 
29644570b8SJeff Kirsher   Paul Gortmaker	: remove set_bit lock, other cleanups.
30644570b8SJeff Kirsher   Paul Gortmaker	: add ei_get_8390_hdr() so we can pass skb's to
31644570b8SJeff Kirsher 			  ei_block_input() for eth_io_copy_and_sum().
32644570b8SJeff Kirsher   Paul Gortmaker	: exchange static int ei_pingpong for a #define,
33644570b8SJeff Kirsher 			  also add better Tx error handling.
34644570b8SJeff Kirsher   Paul Gortmaker	: rewrite Rx overrun handling as per NS specs.
35644570b8SJeff Kirsher   Alexey Kuznetsov	: use the 8390's six bit hash multicast filter.
36644570b8SJeff Kirsher   Paul Gortmaker	: tweak ANK's above multicast changes a bit.
37644570b8SJeff Kirsher   Paul Gortmaker	: update packet statistics for v2.1.x
38644570b8SJeff Kirsher   Alan Cox		: support arbitrary stupid port mappings on the
39644570b8SJeff Kirsher 			  68K Macintosh. Support >16bit I/O spaces
40644570b8SJeff Kirsher   Paul Gortmaker	: add kmod support for auto-loading of the 8390
41644570b8SJeff Kirsher 			  module by all drivers that require it.
42644570b8SJeff Kirsher   Alan Cox		: Spinlocking work, added 'BUG_83C690'
43644570b8SJeff Kirsher   Paul Gortmaker	: Separate out Tx timeout code from Tx path.
44644570b8SJeff Kirsher   Paul Gortmaker	: Remove old unused single Tx buffer code.
45644570b8SJeff Kirsher   Hayato Fujiwara	: Add m32r support.
46644570b8SJeff Kirsher   Paul Gortmaker	: use skb_padto() instead of stack scratch area
47644570b8SJeff Kirsher 
48644570b8SJeff Kirsher   Sources:
49644570b8SJeff Kirsher   The National Semiconductor LAN Databook, and the 3Com 3c503 databook.
50644570b8SJeff Kirsher 
51644570b8SJeff Kirsher   */
52644570b8SJeff Kirsher 
53*2b2706aaSArmin Wolf #include <linux/build_bug.h>
54644570b8SJeff Kirsher #include <linux/module.h>
55644570b8SJeff Kirsher #include <linux/kernel.h>
56644570b8SJeff Kirsher #include <linux/jiffies.h>
57644570b8SJeff Kirsher #include <linux/fs.h>
58644570b8SJeff Kirsher #include <linux/types.h>
59644570b8SJeff Kirsher #include <linux/string.h>
60644570b8SJeff Kirsher #include <linux/bitops.h>
61644570b8SJeff Kirsher #include <linux/uaccess.h>
62644570b8SJeff Kirsher #include <linux/io.h>
63644570b8SJeff Kirsher #include <asm/irq.h>
64644570b8SJeff Kirsher #include <linux/delay.h>
65644570b8SJeff Kirsher #include <linux/errno.h>
66644570b8SJeff Kirsher #include <linux/fcntl.h>
67644570b8SJeff Kirsher #include <linux/in.h>
68644570b8SJeff Kirsher #include <linux/interrupt.h>
69644570b8SJeff Kirsher #include <linux/init.h>
70644570b8SJeff Kirsher #include <linux/crc32.h>
71644570b8SJeff Kirsher 
72644570b8SJeff Kirsher #include <linux/netdevice.h>
73644570b8SJeff Kirsher #include <linux/etherdevice.h>
74644570b8SJeff Kirsher 
75644570b8SJeff Kirsher #define NS8390_CORE
76644570b8SJeff Kirsher #include "8390.h"
77644570b8SJeff Kirsher 
78644570b8SJeff Kirsher #define BUG_83C690
79644570b8SJeff Kirsher 
80644570b8SJeff Kirsher /* These are the operational function interfaces to board-specific
81644570b8SJeff Kirsher    routines.
82644570b8SJeff Kirsher 	void reset_8390(struct net_device *dev)
83644570b8SJeff Kirsher 		Resets the board associated with DEV, including a hardware reset of
84644570b8SJeff Kirsher 		the 8390.  This is only called when there is a transmit timeout, and
85644570b8SJeff Kirsher 		it is always followed by 8390_init().
86644570b8SJeff Kirsher 	void block_output(struct net_device *dev, int count, const unsigned char *buf,
87644570b8SJeff Kirsher 					  int start_page)
88644570b8SJeff Kirsher 		Write the COUNT bytes of BUF to the packet buffer at START_PAGE.  The
89644570b8SJeff Kirsher 		"page" value uses the 8390's 256-byte pages.
90644570b8SJeff Kirsher 	void get_8390_hdr(struct net_device *dev, struct e8390_hdr *hdr, int ring_page)
91644570b8SJeff Kirsher 		Read the 4 byte, page aligned 8390 header. *If* there is a
92644570b8SJeff Kirsher 		subsequent read, it will be of the rest of the packet.
93644570b8SJeff Kirsher 	void block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
94644570b8SJeff Kirsher 		Read COUNT bytes from the packet buffer into the skb data area. Start
95644570b8SJeff Kirsher 		reading from RING_OFFSET, the address as the 8390 sees it.  This will always
96644570b8SJeff Kirsher 		follow the read of the 8390 header.
97644570b8SJeff Kirsher */
98644570b8SJeff Kirsher #define ei_reset_8390 (ei_local->reset_8390)
99644570b8SJeff Kirsher #define ei_block_output (ei_local->block_output)
100644570b8SJeff Kirsher #define ei_block_input (ei_local->block_input)
101644570b8SJeff Kirsher #define ei_get_8390_hdr (ei_local->get_8390_hdr)
102644570b8SJeff Kirsher 
103644570b8SJeff Kirsher /* Index to functions. */
104644570b8SJeff Kirsher static void ei_tx_intr(struct net_device *dev);
105644570b8SJeff Kirsher static void ei_tx_err(struct net_device *dev);
106644570b8SJeff Kirsher static void ei_receive(struct net_device *dev);
107644570b8SJeff Kirsher static void ei_rx_overrun(struct net_device *dev);
108644570b8SJeff Kirsher 
109644570b8SJeff Kirsher /* Routines generic to NS8390-based boards. */
110644570b8SJeff Kirsher static void NS8390_trigger_send(struct net_device *dev, unsigned int length,
111644570b8SJeff Kirsher 								int start_page);
112644570b8SJeff Kirsher static void do_set_multicast_list(struct net_device *dev);
113644570b8SJeff Kirsher static void __NS8390_init(struct net_device *dev, int startp);
114644570b8SJeff Kirsher 
115c45f812fSMatthew Whitehead static unsigned version_printed;
116c45f812fSMatthew Whitehead static u32 msg_enable;
117d3757ba4SJoe Perches module_param(msg_enable, uint, 0444);
118c45f812fSMatthew Whitehead MODULE_PARM_DESC(msg_enable, "Debug message level (see linux/netdevice.h for bitmap)");
119c45f812fSMatthew Whitehead 
120644570b8SJeff Kirsher /*
121644570b8SJeff Kirsher  *	SMP and the 8390 setup.
122644570b8SJeff Kirsher  *
123644570b8SJeff Kirsher  *	The 8390 isn't exactly designed to be multithreaded on RX/TX. There is
124644570b8SJeff Kirsher  *	a page register that controls bank and packet buffer access. We guard
125644570b8SJeff Kirsher  *	this with ei_local->page_lock. Nobody should assume or set the page other
126644570b8SJeff Kirsher  *	than zero when the lock is not held. Lock holders must restore page 0
127644570b8SJeff Kirsher  *	before unlocking. Even pure readers must take the lock to protect in
128644570b8SJeff Kirsher  *	page 0.
129644570b8SJeff Kirsher  *
130644570b8SJeff Kirsher  *	To make life difficult the chip can also be very slow. We therefore can't
131644570b8SJeff Kirsher  *	just use spinlocks. For the longer lockups we disable the irq the device
132644570b8SJeff Kirsher  *	sits on and hold the lock. We must hold the lock because there is a dual
133644570b8SJeff Kirsher  *	processor case other than interrupts (get stats/set multicast list in
134644570b8SJeff Kirsher  *	parallel with each other and transmit).
135644570b8SJeff Kirsher  *
136644570b8SJeff Kirsher  *	Note: in theory we can just disable the irq on the card _but_ there is
137644570b8SJeff Kirsher  *	a latency on SMP irq delivery. So we can easily go "disable irq" "sync irqs"
138644570b8SJeff Kirsher  *	enter lock, take the queued irq. So we waddle instead of flying.
139644570b8SJeff Kirsher  *
140644570b8SJeff Kirsher  *	Finally by special arrangement for the purpose of being generally
141644570b8SJeff Kirsher  *	annoying the transmit function is called bh atomic. That places
142644570b8SJeff Kirsher  *	restrictions on the user context callers as disable_irq won't save
143644570b8SJeff Kirsher  *	them.
144644570b8SJeff Kirsher  *
145644570b8SJeff Kirsher  *	Additional explanation of problems with locking by Alan Cox:
146644570b8SJeff Kirsher  *
147644570b8SJeff Kirsher  *	"The author (me) didn't use spin_lock_irqsave because the slowness of the
148644570b8SJeff Kirsher  *	card means that approach caused horrible problems like losing serial data
149644570b8SJeff Kirsher  *	at 38400 baud on some chips. Remember many 8390 nics on PCI were ISA
150644570b8SJeff Kirsher  *	chips with FPGA front ends.
151644570b8SJeff Kirsher  *
152644570b8SJeff Kirsher  *	Ok the logic behind the 8390 is very simple:
153644570b8SJeff Kirsher  *
154644570b8SJeff Kirsher  *	Things to know
155644570b8SJeff Kirsher  *		- IRQ delivery is asynchronous to the PCI bus
156644570b8SJeff Kirsher  *		- Blocking the local CPU IRQ via spin locks was too slow
157644570b8SJeff Kirsher  *		- The chip has register windows needing locking work
158644570b8SJeff Kirsher  *
159644570b8SJeff Kirsher  *	So the path was once (I say once as people appear to have changed it
160644570b8SJeff Kirsher  *	in the mean time and it now looks rather bogus if the changes to use
161644570b8SJeff Kirsher  *	disable_irq_nosync_irqsave are disabling the local IRQ)
162644570b8SJeff Kirsher  *
163644570b8SJeff Kirsher  *
164644570b8SJeff Kirsher  *		Take the page lock
165644570b8SJeff Kirsher  *		Mask the IRQ on chip
166644570b8SJeff Kirsher  *		Disable the IRQ (but not mask locally- someone seems to have
167644570b8SJeff Kirsher  *			broken this with the lock validator stuff)
168644570b8SJeff Kirsher  *			[This must be _nosync as the page lock may otherwise
169644570b8SJeff Kirsher  *				deadlock us]
170644570b8SJeff Kirsher  *		Drop the page lock and turn IRQs back on
171644570b8SJeff Kirsher  *
172644570b8SJeff Kirsher  *		At this point an existing IRQ may still be running but we can't
173644570b8SJeff Kirsher  *		get a new one
174644570b8SJeff Kirsher  *
175644570b8SJeff Kirsher  *		Take the lock (so we know the IRQ has terminated) but don't mask
176644570b8SJeff Kirsher  *	the IRQs on the processor
177644570b8SJeff Kirsher  *		Set irqlock [for debug]
178644570b8SJeff Kirsher  *
179644570b8SJeff Kirsher  *		Transmit (slow as ****)
180644570b8SJeff Kirsher  *
181644570b8SJeff Kirsher  *		re-enable the IRQ
182644570b8SJeff Kirsher  *
183644570b8SJeff Kirsher  *
184644570b8SJeff Kirsher  *	We have to use disable_irq because otherwise you will get delayed
185644570b8SJeff Kirsher  *	interrupts on the APIC bus deadlocking the transmit path.
186644570b8SJeff Kirsher  *
187644570b8SJeff Kirsher  *	Quite hairy but the chip simply wasn't designed for SMP and you can't
188644570b8SJeff Kirsher  *	even ACK an interrupt without risking corrupting other parallel
189644570b8SJeff Kirsher  *	activities on the chip." [lkml, 25 Jul 2007]
190644570b8SJeff Kirsher  */
191644570b8SJeff Kirsher 
192644570b8SJeff Kirsher 
193644570b8SJeff Kirsher 
194644570b8SJeff Kirsher /**
195644570b8SJeff Kirsher  * ei_open - Open/initialize the board.
196644570b8SJeff Kirsher  * @dev: network device to initialize
197644570b8SJeff Kirsher  *
198644570b8SJeff Kirsher  * This routine goes all-out, setting everything
199644570b8SJeff Kirsher  * up anew at each open, even though many of these registers should only
200644570b8SJeff Kirsher  * need to be set once at boot.
201644570b8SJeff Kirsher  */
202644570b8SJeff Kirsher static int __ei_open(struct net_device *dev)
203644570b8SJeff Kirsher {
204644570b8SJeff Kirsher 	unsigned long flags;
205644570b8SJeff Kirsher 	struct ei_device *ei_local = netdev_priv(dev);
206644570b8SJeff Kirsher 
207644570b8SJeff Kirsher 	if (dev->watchdog_timeo <= 0)
208644570b8SJeff Kirsher 		dev->watchdog_timeo = TX_TIMEOUT;
209644570b8SJeff Kirsher 
210644570b8SJeff Kirsher 	/*
211644570b8SJeff Kirsher 	 *	Grab the page lock so we own the register set, then call
212644570b8SJeff Kirsher 	 *	the init function.
213644570b8SJeff Kirsher 	 */
214644570b8SJeff Kirsher 
215644570b8SJeff Kirsher 	spin_lock_irqsave(&ei_local->page_lock, flags);
216644570b8SJeff Kirsher 	__NS8390_init(dev, 1);
217644570b8SJeff Kirsher 	/* Set the flag before we drop the lock, That way the IRQ arrives
218644570b8SJeff Kirsher 	   after its set and we get no silly warnings */
219644570b8SJeff Kirsher 	netif_start_queue(dev);
220644570b8SJeff Kirsher 	spin_unlock_irqrestore(&ei_local->page_lock, flags);
221644570b8SJeff Kirsher 	ei_local->irqlock = 0;
222644570b8SJeff Kirsher 	return 0;
223644570b8SJeff Kirsher }
224644570b8SJeff Kirsher 
225644570b8SJeff Kirsher /**
226644570b8SJeff Kirsher  * ei_close - shut down network device
227644570b8SJeff Kirsher  * @dev: network device to close
228644570b8SJeff Kirsher  *
229644570b8SJeff Kirsher  * Opposite of ei_open(). Only used when "ifconfig <devname> down" is done.
230644570b8SJeff Kirsher  */
231644570b8SJeff Kirsher static int __ei_close(struct net_device *dev)
232644570b8SJeff Kirsher {
233644570b8SJeff Kirsher 	struct ei_device *ei_local = netdev_priv(dev);
234644570b8SJeff Kirsher 	unsigned long flags;
235644570b8SJeff Kirsher 
236644570b8SJeff Kirsher 	/*
237644570b8SJeff Kirsher 	 *	Hold the page lock during close
238644570b8SJeff Kirsher 	 */
239644570b8SJeff Kirsher 
240644570b8SJeff Kirsher 	spin_lock_irqsave(&ei_local->page_lock, flags);
241644570b8SJeff Kirsher 	__NS8390_init(dev, 0);
242644570b8SJeff Kirsher 	spin_unlock_irqrestore(&ei_local->page_lock, flags);
243644570b8SJeff Kirsher 	netif_stop_queue(dev);
244644570b8SJeff Kirsher 	return 0;
245644570b8SJeff Kirsher }
246644570b8SJeff Kirsher 
247644570b8SJeff Kirsher /**
248644570b8SJeff Kirsher  * ei_tx_timeout - handle transmit time out condition
249644570b8SJeff Kirsher  * @dev: network device which has apparently fallen asleep
250644570b8SJeff Kirsher  *
251644570b8SJeff Kirsher  * Called by kernel when device never acknowledges a transmit has
252644570b8SJeff Kirsher  * completed (or failed) - i.e. never posted a Tx related interrupt.
253644570b8SJeff Kirsher  */
254644570b8SJeff Kirsher 
2550290bd29SMichael S. Tsirkin static void __ei_tx_timeout(struct net_device *dev, unsigned int txqueue)
256644570b8SJeff Kirsher {
257644570b8SJeff Kirsher 	unsigned long e8390_base = dev->base_addr;
258644570b8SJeff Kirsher 	struct ei_device *ei_local = netdev_priv(dev);
259644570b8SJeff Kirsher 	int txsr, isr, tickssofar = jiffies - dev_trans_start(dev);
260644570b8SJeff Kirsher 	unsigned long flags;
261644570b8SJeff Kirsher 
262644570b8SJeff Kirsher 	dev->stats.tx_errors++;
263644570b8SJeff Kirsher 
264644570b8SJeff Kirsher 	spin_lock_irqsave(&ei_local->page_lock, flags);
265644570b8SJeff Kirsher 	txsr = ei_inb(e8390_base+EN0_TSR);
266644570b8SJeff Kirsher 	isr = ei_inb(e8390_base+EN0_ISR);
267644570b8SJeff Kirsher 	spin_unlock_irqrestore(&ei_local->page_lock, flags);
268644570b8SJeff Kirsher 
269644570b8SJeff Kirsher 	netdev_dbg(dev, "Tx timed out, %s TSR=%#2x, ISR=%#2x, t=%d\n",
270644570b8SJeff Kirsher 		   (txsr & ENTSR_ABT) ? "excess collisions." :
271644570b8SJeff Kirsher 		   (isr) ? "lost interrupt?" : "cable problem?",
272644570b8SJeff Kirsher 		   txsr, isr, tickssofar);
273644570b8SJeff Kirsher 
274644570b8SJeff Kirsher 	if (!isr && !dev->stats.tx_packets) {
275644570b8SJeff Kirsher 		/* The 8390 probably hasn't gotten on the cable yet. */
276644570b8SJeff Kirsher 		ei_local->interface_num ^= 1;   /* Try a different xcvr.  */
277644570b8SJeff Kirsher 	}
278644570b8SJeff Kirsher 
279644570b8SJeff Kirsher 	/* Ugly but a reset can be slow, yet must be protected */
280644570b8SJeff Kirsher 
281644570b8SJeff Kirsher 	disable_irq_nosync_lockdep(dev->irq);
282644570b8SJeff Kirsher 	spin_lock(&ei_local->page_lock);
283644570b8SJeff Kirsher 
284644570b8SJeff Kirsher 	/* Try to restart the card.  Perhaps the user has fixed something. */
285644570b8SJeff Kirsher 	ei_reset_8390(dev);
286644570b8SJeff Kirsher 	__NS8390_init(dev, 1);
287644570b8SJeff Kirsher 
288644570b8SJeff Kirsher 	spin_unlock(&ei_local->page_lock);
289644570b8SJeff Kirsher 	enable_irq_lockdep(dev->irq);
290644570b8SJeff Kirsher 	netif_wake_queue(dev);
291644570b8SJeff Kirsher }
292644570b8SJeff Kirsher 
293644570b8SJeff Kirsher /**
294644570b8SJeff Kirsher  * ei_start_xmit - begin packet transmission
295644570b8SJeff Kirsher  * @skb: packet to be sent
296644570b8SJeff Kirsher  * @dev: network device to which packet is sent
297644570b8SJeff Kirsher  *
298644570b8SJeff Kirsher  * Sends a packet to an 8390 network device.
299644570b8SJeff Kirsher  */
300644570b8SJeff Kirsher 
301644570b8SJeff Kirsher static netdev_tx_t __ei_start_xmit(struct sk_buff *skb,
302644570b8SJeff Kirsher 				   struct net_device *dev)
303644570b8SJeff Kirsher {
304644570b8SJeff Kirsher 	unsigned long e8390_base = dev->base_addr;
305644570b8SJeff Kirsher 	struct ei_device *ei_local = netdev_priv(dev);
306644570b8SJeff Kirsher 	int send_length = skb->len, output_page;
307644570b8SJeff Kirsher 	unsigned long flags;
308644570b8SJeff Kirsher 	char buf[ETH_ZLEN];
309644570b8SJeff Kirsher 	char *data = skb->data;
310644570b8SJeff Kirsher 
311644570b8SJeff Kirsher 	if (skb->len < ETH_ZLEN) {
312644570b8SJeff Kirsher 		memset(buf, 0, ETH_ZLEN);	/* more efficient than doing just the needed bits */
313644570b8SJeff Kirsher 		memcpy(buf, data, skb->len);
314644570b8SJeff Kirsher 		send_length = ETH_ZLEN;
315644570b8SJeff Kirsher 		data = buf;
316644570b8SJeff Kirsher 	}
317644570b8SJeff Kirsher 
318644570b8SJeff Kirsher 	/* Mask interrupts from the ethercard.
319644570b8SJeff Kirsher 	   SMP: We have to grab the lock here otherwise the IRQ handler
320644570b8SJeff Kirsher 	   on another CPU can flip window and race the IRQ mask set. We end
321644570b8SJeff Kirsher 	   up trashing the mcast filter not disabling irqs if we don't lock */
322644570b8SJeff Kirsher 
323644570b8SJeff Kirsher 	spin_lock_irqsave(&ei_local->page_lock, flags);
324644570b8SJeff Kirsher 	ei_outb_p(0x00, e8390_base + EN0_IMR);
325644570b8SJeff Kirsher 	spin_unlock_irqrestore(&ei_local->page_lock, flags);
326644570b8SJeff Kirsher 
327644570b8SJeff Kirsher 
328644570b8SJeff Kirsher 	/*
329644570b8SJeff Kirsher 	 *	Slow phase with lock held.
330644570b8SJeff Kirsher 	 */
331644570b8SJeff Kirsher 
332644570b8SJeff Kirsher 	disable_irq_nosync_lockdep_irqsave(dev->irq, &flags);
333644570b8SJeff Kirsher 
334644570b8SJeff Kirsher 	spin_lock(&ei_local->page_lock);
335644570b8SJeff Kirsher 
336644570b8SJeff Kirsher 	ei_local->irqlock = 1;
337644570b8SJeff Kirsher 
338644570b8SJeff Kirsher 	/*
339644570b8SJeff Kirsher 	 * We have two Tx slots available for use. Find the first free
340644570b8SJeff Kirsher 	 * slot, and then perform some sanity checks. With two Tx bufs,
341644570b8SJeff Kirsher 	 * you get very close to transmitting back-to-back packets. With
342644570b8SJeff Kirsher 	 * only one Tx buf, the transmitter sits idle while you reload the
343644570b8SJeff Kirsher 	 * card, leaving a substantial gap between each transmitted packet.
344644570b8SJeff Kirsher 	 */
345644570b8SJeff Kirsher 
346644570b8SJeff Kirsher 	if (ei_local->tx1 == 0) {
347644570b8SJeff Kirsher 		output_page = ei_local->tx_start_page;
348644570b8SJeff Kirsher 		ei_local->tx1 = send_length;
349c45f812fSMatthew Whitehead 		if ((netif_msg_tx_queued(ei_local)) &&
350c45f812fSMatthew Whitehead 		    ei_local->tx2 > 0)
351c45f812fSMatthew Whitehead 			netdev_dbg(dev,
352c45f812fSMatthew Whitehead 				   "idle transmitter tx2=%d, lasttx=%d, txing=%d\n",
353644570b8SJeff Kirsher 				   ei_local->tx2, ei_local->lasttx, ei_local->txing);
354644570b8SJeff Kirsher 	} else if (ei_local->tx2 == 0) {
355644570b8SJeff Kirsher 		output_page = ei_local->tx_start_page + TX_PAGES/2;
356644570b8SJeff Kirsher 		ei_local->tx2 = send_length;
357c45f812fSMatthew Whitehead 		if ((netif_msg_tx_queued(ei_local)) &&
358c45f812fSMatthew Whitehead 		    ei_local->tx1 > 0)
359c45f812fSMatthew Whitehead 			netdev_dbg(dev,
360c45f812fSMatthew Whitehead 				   "idle transmitter, tx1=%d, lasttx=%d, txing=%d\n",
361644570b8SJeff Kirsher 				   ei_local->tx1, ei_local->lasttx, ei_local->txing);
362644570b8SJeff Kirsher 	} else {			/* We should never get here. */
363c45f812fSMatthew Whitehead 		netif_dbg(ei_local, tx_err, dev,
364c45f812fSMatthew Whitehead 			  "No Tx buffers free! tx1=%d tx2=%d last=%d\n",
365644570b8SJeff Kirsher 			  ei_local->tx1, ei_local->tx2, ei_local->lasttx);
366644570b8SJeff Kirsher 		ei_local->irqlock = 0;
367644570b8SJeff Kirsher 		netif_stop_queue(dev);
368644570b8SJeff Kirsher 		ei_outb_p(ENISR_ALL, e8390_base + EN0_IMR);
369644570b8SJeff Kirsher 		spin_unlock(&ei_local->page_lock);
370644570b8SJeff Kirsher 		enable_irq_lockdep_irqrestore(dev->irq, &flags);
371644570b8SJeff Kirsher 		dev->stats.tx_errors++;
372644570b8SJeff Kirsher 		return NETDEV_TX_BUSY;
373644570b8SJeff Kirsher 	}
374644570b8SJeff Kirsher 
375644570b8SJeff Kirsher 	/*
376644570b8SJeff Kirsher 	 * Okay, now upload the packet and trigger a send if the transmitter
377644570b8SJeff Kirsher 	 * isn't already sending. If it is busy, the interrupt handler will
378644570b8SJeff Kirsher 	 * trigger the send later, upon receiving a Tx done interrupt.
379644570b8SJeff Kirsher 	 */
380644570b8SJeff Kirsher 
381644570b8SJeff Kirsher 	ei_block_output(dev, send_length, data, output_page);
382644570b8SJeff Kirsher 
383644570b8SJeff Kirsher 	if (!ei_local->txing) {
384644570b8SJeff Kirsher 		ei_local->txing = 1;
385644570b8SJeff Kirsher 		NS8390_trigger_send(dev, send_length, output_page);
386644570b8SJeff Kirsher 		if (output_page == ei_local->tx_start_page) {
387644570b8SJeff Kirsher 			ei_local->tx1 = -1;
388644570b8SJeff Kirsher 			ei_local->lasttx = -1;
389644570b8SJeff Kirsher 		} else {
390644570b8SJeff Kirsher 			ei_local->tx2 = -1;
391644570b8SJeff Kirsher 			ei_local->lasttx = -2;
392644570b8SJeff Kirsher 		}
393644570b8SJeff Kirsher 	} else
394644570b8SJeff Kirsher 		ei_local->txqueue++;
395644570b8SJeff Kirsher 
396644570b8SJeff Kirsher 	if (ei_local->tx1 && ei_local->tx2)
397644570b8SJeff Kirsher 		netif_stop_queue(dev);
398644570b8SJeff Kirsher 	else
399644570b8SJeff Kirsher 		netif_start_queue(dev);
400644570b8SJeff Kirsher 
401644570b8SJeff Kirsher 	/* Turn 8390 interrupts back on. */
402644570b8SJeff Kirsher 	ei_local->irqlock = 0;
403644570b8SJeff Kirsher 	ei_outb_p(ENISR_ALL, e8390_base + EN0_IMR);
404644570b8SJeff Kirsher 
405644570b8SJeff Kirsher 	spin_unlock(&ei_local->page_lock);
406644570b8SJeff Kirsher 	enable_irq_lockdep_irqrestore(dev->irq, &flags);
407644570b8SJeff Kirsher 	skb_tx_timestamp(skb);
408e05b3101SEric W. Biederman 	dev_consume_skb_any(skb);
409644570b8SJeff Kirsher 	dev->stats.tx_bytes += send_length;
410644570b8SJeff Kirsher 
411644570b8SJeff Kirsher 	return NETDEV_TX_OK;
412644570b8SJeff Kirsher }
413644570b8SJeff Kirsher 
414644570b8SJeff Kirsher /**
415644570b8SJeff Kirsher  * ei_interrupt - handle the interrupts from an 8390
416644570b8SJeff Kirsher  * @irq: interrupt number
417644570b8SJeff Kirsher  * @dev_id: a pointer to the net_device
418644570b8SJeff Kirsher  *
419644570b8SJeff Kirsher  * Handle the ether interface interrupts. We pull packets from
420644570b8SJeff Kirsher  * the 8390 via the card specific functions and fire them at the networking
421644570b8SJeff Kirsher  * stack. We also handle transmit completions and wake the transmit path if
422644570b8SJeff Kirsher  * necessary. We also update the counters and do other housekeeping as
423644570b8SJeff Kirsher  * needed.
424644570b8SJeff Kirsher  */
425644570b8SJeff Kirsher 
426644570b8SJeff Kirsher static irqreturn_t __ei_interrupt(int irq, void *dev_id)
427644570b8SJeff Kirsher {
428644570b8SJeff Kirsher 	struct net_device *dev = dev_id;
429644570b8SJeff Kirsher 	unsigned long e8390_base = dev->base_addr;
430644570b8SJeff Kirsher 	int interrupts, nr_serviced = 0;
431644570b8SJeff Kirsher 	struct ei_device *ei_local = netdev_priv(dev);
432644570b8SJeff Kirsher 
433644570b8SJeff Kirsher 	/*
434644570b8SJeff Kirsher 	 *	Protect the irq test too.
435644570b8SJeff Kirsher 	 */
436644570b8SJeff Kirsher 
437644570b8SJeff Kirsher 	spin_lock(&ei_local->page_lock);
438644570b8SJeff Kirsher 
439644570b8SJeff Kirsher 	if (ei_local->irqlock) {
440644570b8SJeff Kirsher 		/*
441644570b8SJeff Kirsher 		 * This might just be an interrupt for a PCI device sharing
442644570b8SJeff Kirsher 		 * this line
443644570b8SJeff Kirsher 		 */
444644570b8SJeff Kirsher 		netdev_err(dev, "Interrupted while interrupts are masked! isr=%#2x imr=%#2x\n",
445644570b8SJeff Kirsher 			   ei_inb_p(e8390_base + EN0_ISR),
446644570b8SJeff Kirsher 			   ei_inb_p(e8390_base + EN0_IMR));
447644570b8SJeff Kirsher 		spin_unlock(&ei_local->page_lock);
448644570b8SJeff Kirsher 		return IRQ_NONE;
449644570b8SJeff Kirsher 	}
450644570b8SJeff Kirsher 
451644570b8SJeff Kirsher 	/* Change to page 0 and read the intr status reg. */
452644570b8SJeff Kirsher 	ei_outb_p(E8390_NODMA+E8390_PAGE0, e8390_base + E8390_CMD);
453c45f812fSMatthew Whitehead 	netif_dbg(ei_local, intr, dev, "interrupt(isr=%#2.2x)\n",
454644570b8SJeff Kirsher 		  ei_inb_p(e8390_base + EN0_ISR));
455644570b8SJeff Kirsher 
456644570b8SJeff Kirsher 	/* !!Assumption!! -- we stay in page 0.	 Don't break this. */
457644570b8SJeff Kirsher 	while ((interrupts = ei_inb_p(e8390_base + EN0_ISR)) != 0 &&
458644570b8SJeff Kirsher 	       ++nr_serviced < MAX_SERVICE) {
459644570b8SJeff Kirsher 		if (!netif_running(dev)) {
460644570b8SJeff Kirsher 			netdev_warn(dev, "interrupt from stopped card\n");
461644570b8SJeff Kirsher 			/* rmk - acknowledge the interrupts */
462644570b8SJeff Kirsher 			ei_outb_p(interrupts, e8390_base + EN0_ISR);
463644570b8SJeff Kirsher 			interrupts = 0;
464644570b8SJeff Kirsher 			break;
465644570b8SJeff Kirsher 		}
466644570b8SJeff Kirsher 		if (interrupts & ENISR_OVER)
467644570b8SJeff Kirsher 			ei_rx_overrun(dev);
468644570b8SJeff Kirsher 		else if (interrupts & (ENISR_RX+ENISR_RX_ERR)) {
469644570b8SJeff Kirsher 			/* Got a good (?) packet. */
470644570b8SJeff Kirsher 			ei_receive(dev);
471644570b8SJeff Kirsher 		}
472644570b8SJeff Kirsher 		/* Push the next to-transmit packet through. */
473644570b8SJeff Kirsher 		if (interrupts & ENISR_TX)
474644570b8SJeff Kirsher 			ei_tx_intr(dev);
475644570b8SJeff Kirsher 		else if (interrupts & ENISR_TX_ERR)
476644570b8SJeff Kirsher 			ei_tx_err(dev);
477644570b8SJeff Kirsher 
478644570b8SJeff Kirsher 		if (interrupts & ENISR_COUNTERS) {
479644570b8SJeff Kirsher 			dev->stats.rx_frame_errors += ei_inb_p(e8390_base + EN0_COUNTER0);
480644570b8SJeff Kirsher 			dev->stats.rx_crc_errors   += ei_inb_p(e8390_base + EN0_COUNTER1);
481644570b8SJeff Kirsher 			dev->stats.rx_missed_errors += ei_inb_p(e8390_base + EN0_COUNTER2);
482644570b8SJeff Kirsher 			ei_outb_p(ENISR_COUNTERS, e8390_base + EN0_ISR); /* Ack intr. */
483644570b8SJeff Kirsher 		}
484644570b8SJeff Kirsher 
485644570b8SJeff Kirsher 		/* Ignore any RDC interrupts that make it back to here. */
486644570b8SJeff Kirsher 		if (interrupts & ENISR_RDC)
487644570b8SJeff Kirsher 			ei_outb_p(ENISR_RDC, e8390_base + EN0_ISR);
488644570b8SJeff Kirsher 
489644570b8SJeff Kirsher 		ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base + E8390_CMD);
490644570b8SJeff Kirsher 	}
491644570b8SJeff Kirsher 
492c45f812fSMatthew Whitehead 	if (interrupts && (netif_msg_intr(ei_local))) {
493644570b8SJeff Kirsher 		ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base + E8390_CMD);
494644570b8SJeff Kirsher 		if (nr_serviced >= MAX_SERVICE) {
495644570b8SJeff Kirsher 			/* 0xFF is valid for a card removal */
496644570b8SJeff Kirsher 			if (interrupts != 0xFF)
497644570b8SJeff Kirsher 				netdev_warn(dev, "Too much work at interrupt, status %#2.2x\n",
498644570b8SJeff Kirsher 					    interrupts);
499644570b8SJeff Kirsher 			ei_outb_p(ENISR_ALL, e8390_base + EN0_ISR); /* Ack. most intrs. */
500644570b8SJeff Kirsher 		} else {
501644570b8SJeff Kirsher 			netdev_warn(dev, "unknown interrupt %#2x\n", interrupts);
502644570b8SJeff Kirsher 			ei_outb_p(0xff, e8390_base + EN0_ISR); /* Ack. all intrs. */
503644570b8SJeff Kirsher 		}
504644570b8SJeff Kirsher 	}
505644570b8SJeff Kirsher 	spin_unlock(&ei_local->page_lock);
506644570b8SJeff Kirsher 	return IRQ_RETVAL(nr_serviced > 0);
507644570b8SJeff Kirsher }
508644570b8SJeff Kirsher 
509644570b8SJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER
510644570b8SJeff Kirsher static void __ei_poll(struct net_device *dev)
511644570b8SJeff Kirsher {
512644570b8SJeff Kirsher 	disable_irq(dev->irq);
513644570b8SJeff Kirsher 	__ei_interrupt(dev->irq, dev);
514644570b8SJeff Kirsher 	enable_irq(dev->irq);
515644570b8SJeff Kirsher }
516644570b8SJeff Kirsher #endif
517644570b8SJeff Kirsher 
518644570b8SJeff Kirsher /**
519644570b8SJeff Kirsher  * ei_tx_err - handle transmitter error
520644570b8SJeff Kirsher  * @dev: network device which threw the exception
521644570b8SJeff Kirsher  *
522644570b8SJeff Kirsher  * A transmitter error has happened. Most likely excess collisions (which
523644570b8SJeff Kirsher  * is a fairly normal condition). If the error is one where the Tx will
524644570b8SJeff Kirsher  * have been aborted, we try and send another one right away, instead of
525644570b8SJeff Kirsher  * letting the failed packet sit and collect dust in the Tx buffer. This
526644570b8SJeff Kirsher  * is a much better solution as it avoids kernel based Tx timeouts, and
527644570b8SJeff Kirsher  * an unnecessary card reset.
528644570b8SJeff Kirsher  *
529644570b8SJeff Kirsher  * Called with lock held.
530644570b8SJeff Kirsher  */
531644570b8SJeff Kirsher 
532644570b8SJeff Kirsher static void ei_tx_err(struct net_device *dev)
533644570b8SJeff Kirsher {
534644570b8SJeff Kirsher 	unsigned long e8390_base = dev->base_addr;
535644570b8SJeff Kirsher 	/* ei_local is used on some platforms via the EI_SHIFT macro */
536644570b8SJeff Kirsher 	struct ei_device *ei_local __maybe_unused = netdev_priv(dev);
537644570b8SJeff Kirsher 	unsigned char txsr = ei_inb_p(e8390_base+EN0_TSR);
538644570b8SJeff Kirsher 	unsigned char tx_was_aborted = txsr & (ENTSR_ABT+ENTSR_FU);
539644570b8SJeff Kirsher 
540644570b8SJeff Kirsher #ifdef VERBOSE_ERROR_DUMP
541644570b8SJeff Kirsher 	netdev_dbg(dev, "transmitter error (%#2x):", txsr);
542644570b8SJeff Kirsher 	if (txsr & ENTSR_ABT)
543644570b8SJeff Kirsher 		pr_cont(" excess-collisions ");
544644570b8SJeff Kirsher 	if (txsr & ENTSR_ND)
545644570b8SJeff Kirsher 		pr_cont(" non-deferral ");
546644570b8SJeff Kirsher 	if (txsr & ENTSR_CRS)
547644570b8SJeff Kirsher 		pr_cont(" lost-carrier ");
548644570b8SJeff Kirsher 	if (txsr & ENTSR_FU)
549644570b8SJeff Kirsher 		pr_cont(" FIFO-underrun ");
550644570b8SJeff Kirsher 	if (txsr & ENTSR_CDH)
551644570b8SJeff Kirsher 		pr_cont(" lost-heartbeat ");
552644570b8SJeff Kirsher 	pr_cont("\n");
553644570b8SJeff Kirsher #endif
554644570b8SJeff Kirsher 
555644570b8SJeff Kirsher 	ei_outb_p(ENISR_TX_ERR, e8390_base + EN0_ISR); /* Ack intr. */
556644570b8SJeff Kirsher 
557644570b8SJeff Kirsher 	if (tx_was_aborted)
558644570b8SJeff Kirsher 		ei_tx_intr(dev);
559644570b8SJeff Kirsher 	else {
560644570b8SJeff Kirsher 		dev->stats.tx_errors++;
561644570b8SJeff Kirsher 		if (txsr & ENTSR_CRS)
562644570b8SJeff Kirsher 			dev->stats.tx_carrier_errors++;
563644570b8SJeff Kirsher 		if (txsr & ENTSR_CDH)
564644570b8SJeff Kirsher 			dev->stats.tx_heartbeat_errors++;
565644570b8SJeff Kirsher 		if (txsr & ENTSR_OWC)
566644570b8SJeff Kirsher 			dev->stats.tx_window_errors++;
567644570b8SJeff Kirsher 	}
568644570b8SJeff Kirsher }
569644570b8SJeff Kirsher 
570644570b8SJeff Kirsher /**
571644570b8SJeff Kirsher  * ei_tx_intr - transmit interrupt handler
572644570b8SJeff Kirsher  * @dev: network device for which tx intr is handled
573644570b8SJeff Kirsher  *
574644570b8SJeff Kirsher  * We have finished a transmit: check for errors and then trigger the next
575644570b8SJeff Kirsher  * packet to be sent. Called with lock held.
576644570b8SJeff Kirsher  */
577644570b8SJeff Kirsher 
578644570b8SJeff Kirsher static void ei_tx_intr(struct net_device *dev)
579644570b8SJeff Kirsher {
580644570b8SJeff Kirsher 	unsigned long e8390_base = dev->base_addr;
581644570b8SJeff Kirsher 	struct ei_device *ei_local = netdev_priv(dev);
582644570b8SJeff Kirsher 	int status = ei_inb(e8390_base + EN0_TSR);
583644570b8SJeff Kirsher 
584644570b8SJeff Kirsher 	ei_outb_p(ENISR_TX, e8390_base + EN0_ISR); /* Ack intr. */
585644570b8SJeff Kirsher 
586644570b8SJeff Kirsher 	/*
587644570b8SJeff Kirsher 	 * There are two Tx buffers, see which one finished, and trigger
588644570b8SJeff Kirsher 	 * the send of another one if it exists.
589644570b8SJeff Kirsher 	 */
590644570b8SJeff Kirsher 	ei_local->txqueue--;
591644570b8SJeff Kirsher 
592644570b8SJeff Kirsher 	if (ei_local->tx1 < 0) {
593644570b8SJeff Kirsher 		if (ei_local->lasttx != 1 && ei_local->lasttx != -1)
594644570b8SJeff Kirsher 			pr_err("%s: bogus last_tx_buffer %d, tx1=%d\n",
595644570b8SJeff Kirsher 			       ei_local->name, ei_local->lasttx, ei_local->tx1);
596644570b8SJeff Kirsher 		ei_local->tx1 = 0;
597644570b8SJeff Kirsher 		if (ei_local->tx2 > 0) {
598644570b8SJeff Kirsher 			ei_local->txing = 1;
599644570b8SJeff Kirsher 			NS8390_trigger_send(dev, ei_local->tx2, ei_local->tx_start_page + 6);
600860e9538SFlorian Westphal 			netif_trans_update(dev);
601e7fee115SJoe Perches 			ei_local->tx2 = -1;
602644570b8SJeff Kirsher 			ei_local->lasttx = 2;
603e7fee115SJoe Perches 		} else {
604e7fee115SJoe Perches 			ei_local->lasttx = 20;
605e7fee115SJoe Perches 			ei_local->txing = 0;
606e7fee115SJoe Perches 		}
607644570b8SJeff Kirsher 	} else if (ei_local->tx2 < 0) {
608644570b8SJeff Kirsher 		if (ei_local->lasttx != 2  &&  ei_local->lasttx != -2)
609644570b8SJeff Kirsher 			pr_err("%s: bogus last_tx_buffer %d, tx2=%d\n",
610644570b8SJeff Kirsher 			       ei_local->name, ei_local->lasttx, ei_local->tx2);
611644570b8SJeff Kirsher 		ei_local->tx2 = 0;
612644570b8SJeff Kirsher 		if (ei_local->tx1 > 0) {
613644570b8SJeff Kirsher 			ei_local->txing = 1;
614644570b8SJeff Kirsher 			NS8390_trigger_send(dev, ei_local->tx1, ei_local->tx_start_page);
615860e9538SFlorian Westphal 			netif_trans_update(dev);
616644570b8SJeff Kirsher 			ei_local->tx1 = -1;
617644570b8SJeff Kirsher 			ei_local->lasttx = 1;
618e7fee115SJoe Perches 		} else {
619e7fee115SJoe Perches 			ei_local->lasttx = 10;
620e7fee115SJoe Perches 			ei_local->txing = 0;
621e7fee115SJoe Perches 		}
622644570b8SJeff Kirsher 	} /* else
623644570b8SJeff Kirsher 		netdev_warn(dev, "unexpected TX-done interrupt, lasttx=%d\n",
624644570b8SJeff Kirsher 			    ei_local->lasttx);
625644570b8SJeff Kirsher */
626644570b8SJeff Kirsher 
627644570b8SJeff Kirsher 	/* Minimize Tx latency: update the statistics after we restart TXing. */
628644570b8SJeff Kirsher 	if (status & ENTSR_COL)
629644570b8SJeff Kirsher 		dev->stats.collisions++;
630644570b8SJeff Kirsher 	if (status & ENTSR_PTX)
631644570b8SJeff Kirsher 		dev->stats.tx_packets++;
632644570b8SJeff Kirsher 	else {
633644570b8SJeff Kirsher 		dev->stats.tx_errors++;
634644570b8SJeff Kirsher 		if (status & ENTSR_ABT) {
635644570b8SJeff Kirsher 			dev->stats.tx_aborted_errors++;
636644570b8SJeff Kirsher 			dev->stats.collisions += 16;
637644570b8SJeff Kirsher 		}
638644570b8SJeff Kirsher 		if (status & ENTSR_CRS)
639644570b8SJeff Kirsher 			dev->stats.tx_carrier_errors++;
640644570b8SJeff Kirsher 		if (status & ENTSR_FU)
641644570b8SJeff Kirsher 			dev->stats.tx_fifo_errors++;
642644570b8SJeff Kirsher 		if (status & ENTSR_CDH)
643644570b8SJeff Kirsher 			dev->stats.tx_heartbeat_errors++;
644644570b8SJeff Kirsher 		if (status & ENTSR_OWC)
645644570b8SJeff Kirsher 			dev->stats.tx_window_errors++;
646644570b8SJeff Kirsher 	}
647644570b8SJeff Kirsher 	netif_wake_queue(dev);
648644570b8SJeff Kirsher }
649644570b8SJeff Kirsher 
650644570b8SJeff Kirsher /**
651644570b8SJeff Kirsher  * ei_receive - receive some packets
652644570b8SJeff Kirsher  * @dev: network device with which receive will be run
653644570b8SJeff Kirsher  *
654644570b8SJeff Kirsher  * We have a good packet(s), get it/them out of the buffers.
655644570b8SJeff Kirsher  * Called with lock held.
656644570b8SJeff Kirsher  */
657644570b8SJeff Kirsher 
658644570b8SJeff Kirsher static void ei_receive(struct net_device *dev)
659644570b8SJeff Kirsher {
660644570b8SJeff Kirsher 	unsigned long e8390_base = dev->base_addr;
661644570b8SJeff Kirsher 	struct ei_device *ei_local = netdev_priv(dev);
662644570b8SJeff Kirsher 	unsigned char rxing_page, this_frame, next_frame;
663644570b8SJeff Kirsher 	unsigned short current_offset;
664644570b8SJeff Kirsher 	int rx_pkt_count = 0;
665644570b8SJeff Kirsher 	struct e8390_pkt_hdr rx_frame;
666644570b8SJeff Kirsher 	int num_rx_pages = ei_local->stop_page-ei_local->rx_start_page;
667644570b8SJeff Kirsher 
668644570b8SJeff Kirsher 	while (++rx_pkt_count < 10) {
669644570b8SJeff Kirsher 		int pkt_len, pkt_stat;
670644570b8SJeff Kirsher 
671644570b8SJeff Kirsher 		/* Get the rx page (incoming packet pointer). */
672644570b8SJeff Kirsher 		ei_outb_p(E8390_NODMA+E8390_PAGE1, e8390_base + E8390_CMD);
673644570b8SJeff Kirsher 		rxing_page = ei_inb_p(e8390_base + EN1_CURPAG);
674644570b8SJeff Kirsher 		ei_outb_p(E8390_NODMA+E8390_PAGE0, e8390_base + E8390_CMD);
675644570b8SJeff Kirsher 
676644570b8SJeff Kirsher 		/* Remove one frame from the ring.  Boundary is always a page behind. */
677644570b8SJeff Kirsher 		this_frame = ei_inb_p(e8390_base + EN0_BOUNDARY) + 1;
678644570b8SJeff Kirsher 		if (this_frame >= ei_local->stop_page)
679644570b8SJeff Kirsher 			this_frame = ei_local->rx_start_page;
680644570b8SJeff Kirsher 
681644570b8SJeff Kirsher 		/* Someday we'll omit the previous, iff we never get this message.
682644570b8SJeff Kirsher 		   (There is at least one clone claimed to have a problem.)
683644570b8SJeff Kirsher 
684644570b8SJeff Kirsher 		   Keep quiet if it looks like a card removal. One problem here
685644570b8SJeff Kirsher 		   is that some clones crash in roughly the same way.
686644570b8SJeff Kirsher 		 */
687c45f812fSMatthew Whitehead 		if ((netif_msg_rx_status(ei_local)) &&
688644570b8SJeff Kirsher 		    this_frame != ei_local->current_page &&
689644570b8SJeff Kirsher 		    (this_frame != 0x0 || rxing_page != 0xFF))
690c45f812fSMatthew Whitehead 			netdev_err(dev,
691c45f812fSMatthew Whitehead 				   "mismatched read page pointers %2x vs %2x\n",
692644570b8SJeff Kirsher 				   this_frame, ei_local->current_page);
693644570b8SJeff Kirsher 
694644570b8SJeff Kirsher 		if (this_frame == rxing_page)	/* Read all the frames? */
695644570b8SJeff Kirsher 			break;				/* Done for now */
696644570b8SJeff Kirsher 
697644570b8SJeff Kirsher 		current_offset = this_frame << 8;
698644570b8SJeff Kirsher 		ei_get_8390_hdr(dev, &rx_frame, this_frame);
699644570b8SJeff Kirsher 
700644570b8SJeff Kirsher 		pkt_len = rx_frame.count - sizeof(struct e8390_pkt_hdr);
701644570b8SJeff Kirsher 		pkt_stat = rx_frame.status;
702644570b8SJeff Kirsher 
703644570b8SJeff Kirsher 		next_frame = this_frame + 1 + ((pkt_len+4)>>8);
704644570b8SJeff Kirsher 
705644570b8SJeff Kirsher 		/* Check for bogosity warned by 3c503 book: the status byte is never
706644570b8SJeff Kirsher 		   written.  This happened a lot during testing! This code should be
707644570b8SJeff Kirsher 		   cleaned up someday. */
708644570b8SJeff Kirsher 		if (rx_frame.next != next_frame &&
709644570b8SJeff Kirsher 		    rx_frame.next != next_frame + 1 &&
710644570b8SJeff Kirsher 		    rx_frame.next != next_frame - num_rx_pages &&
711644570b8SJeff Kirsher 		    rx_frame.next != next_frame + 1 - num_rx_pages) {
712644570b8SJeff Kirsher 			ei_local->current_page = rxing_page;
713644570b8SJeff Kirsher 			ei_outb(ei_local->current_page-1, e8390_base+EN0_BOUNDARY);
714644570b8SJeff Kirsher 			dev->stats.rx_errors++;
715644570b8SJeff Kirsher 			continue;
716644570b8SJeff Kirsher 		}
717644570b8SJeff Kirsher 
718644570b8SJeff Kirsher 		if (pkt_len < 60  ||  pkt_len > 1518) {
719c45f812fSMatthew Whitehead 			netif_dbg(ei_local, rx_status, dev,
720c45f812fSMatthew Whitehead 				  "bogus packet size: %d, status=%#2x nxpg=%#2x\n",
721644570b8SJeff Kirsher 				  rx_frame.count, rx_frame.status,
722644570b8SJeff Kirsher 				  rx_frame.next);
723644570b8SJeff Kirsher 			dev->stats.rx_errors++;
724644570b8SJeff Kirsher 			dev->stats.rx_length_errors++;
725644570b8SJeff Kirsher 		} else if ((pkt_stat & 0x0F) == ENRSR_RXOK) {
726644570b8SJeff Kirsher 			struct sk_buff *skb;
727644570b8SJeff Kirsher 
7281d266430SPradeep A Dalvi 			skb = netdev_alloc_skb(dev, pkt_len + 2);
729644570b8SJeff Kirsher 			if (skb == NULL) {
730c45f812fSMatthew Whitehead 				netif_err(ei_local, rx_err, dev,
731c45f812fSMatthew Whitehead 					  "Couldn't allocate a sk_buff of size %d\n",
732644570b8SJeff Kirsher 					  pkt_len);
733644570b8SJeff Kirsher 				dev->stats.rx_dropped++;
734644570b8SJeff Kirsher 				break;
735644570b8SJeff Kirsher 			} else {
736644570b8SJeff Kirsher 				skb_reserve(skb, 2);	/* IP headers on 16 byte boundaries */
737644570b8SJeff Kirsher 				skb_put(skb, pkt_len);	/* Make room */
738644570b8SJeff Kirsher 				ei_block_input(dev, pkt_len, skb, current_offset + sizeof(rx_frame));
739644570b8SJeff Kirsher 				skb->protocol = eth_type_trans(skb, dev);
740644570b8SJeff Kirsher 				if (!skb_defer_rx_timestamp(skb))
741644570b8SJeff Kirsher 					netif_rx(skb);
742644570b8SJeff Kirsher 				dev->stats.rx_packets++;
743644570b8SJeff Kirsher 				dev->stats.rx_bytes += pkt_len;
744644570b8SJeff Kirsher 				if (pkt_stat & ENRSR_PHY)
745644570b8SJeff Kirsher 					dev->stats.multicast++;
746644570b8SJeff Kirsher 			}
747644570b8SJeff Kirsher 		} else {
748c45f812fSMatthew Whitehead 			netif_err(ei_local, rx_err, dev,
749c45f812fSMatthew Whitehead 				  "bogus packet: status=%#2x nxpg=%#2x size=%d\n",
750644570b8SJeff Kirsher 				  rx_frame.status, rx_frame.next,
751644570b8SJeff Kirsher 				  rx_frame.count);
752644570b8SJeff Kirsher 			dev->stats.rx_errors++;
753644570b8SJeff Kirsher 			/* NB: The NIC counts CRC, frame and missed errors. */
754644570b8SJeff Kirsher 			if (pkt_stat & ENRSR_FO)
755644570b8SJeff Kirsher 				dev->stats.rx_fifo_errors++;
756644570b8SJeff Kirsher 		}
757644570b8SJeff Kirsher 		next_frame = rx_frame.next;
758644570b8SJeff Kirsher 
759644570b8SJeff Kirsher 		/* This _should_ never happen: it's here for avoiding bad clones. */
760644570b8SJeff Kirsher 		if (next_frame >= ei_local->stop_page) {
761644570b8SJeff Kirsher 			netdev_notice(dev, "next frame inconsistency, %#2x\n",
762644570b8SJeff Kirsher 				      next_frame);
763644570b8SJeff Kirsher 			next_frame = ei_local->rx_start_page;
764644570b8SJeff Kirsher 		}
765644570b8SJeff Kirsher 		ei_local->current_page = next_frame;
766644570b8SJeff Kirsher 		ei_outb_p(next_frame-1, e8390_base+EN0_BOUNDARY);
767644570b8SJeff Kirsher 	}
768644570b8SJeff Kirsher 
769644570b8SJeff Kirsher 	/* We used to also ack ENISR_OVER here, but that would sometimes mask
770644570b8SJeff Kirsher 	   a real overrun, leaving the 8390 in a stopped state with rec'vr off. */
771644570b8SJeff Kirsher 	ei_outb_p(ENISR_RX+ENISR_RX_ERR, e8390_base+EN0_ISR);
772644570b8SJeff Kirsher }
773644570b8SJeff Kirsher 
774644570b8SJeff Kirsher /**
775644570b8SJeff Kirsher  * ei_rx_overrun - handle receiver overrun
776644570b8SJeff Kirsher  * @dev: network device which threw exception
777644570b8SJeff Kirsher  *
778644570b8SJeff Kirsher  * We have a receiver overrun: we have to kick the 8390 to get it started
779644570b8SJeff Kirsher  * again. Problem is that you have to kick it exactly as NS prescribes in
780644570b8SJeff Kirsher  * the updated datasheets, or "the NIC may act in an unpredictable manner."
781644570b8SJeff Kirsher  * This includes causing "the NIC to defer indefinitely when it is stopped
782644570b8SJeff Kirsher  * on a busy network."  Ugh.
783644570b8SJeff Kirsher  * Called with lock held. Don't call this with the interrupts off or your
784644570b8SJeff Kirsher  * computer will hate you - it takes 10ms or so.
785644570b8SJeff Kirsher  */
786644570b8SJeff Kirsher 
787644570b8SJeff Kirsher static void ei_rx_overrun(struct net_device *dev)
788644570b8SJeff Kirsher {
789644570b8SJeff Kirsher 	unsigned long e8390_base = dev->base_addr;
790644570b8SJeff Kirsher 	unsigned char was_txing, must_resend = 0;
791644570b8SJeff Kirsher 	/* ei_local is used on some platforms via the EI_SHIFT macro */
792644570b8SJeff Kirsher 	struct ei_device *ei_local __maybe_unused = netdev_priv(dev);
793644570b8SJeff Kirsher 
794644570b8SJeff Kirsher 	/*
795644570b8SJeff Kirsher 	 * Record whether a Tx was in progress and then issue the
796644570b8SJeff Kirsher 	 * stop command.
797644570b8SJeff Kirsher 	 */
798644570b8SJeff Kirsher 	was_txing = ei_inb_p(e8390_base+E8390_CMD) & E8390_TRANS;
799644570b8SJeff Kirsher 	ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD);
800644570b8SJeff Kirsher 
801c45f812fSMatthew Whitehead 	netif_dbg(ei_local, rx_err, dev, "Receiver overrun\n");
802644570b8SJeff Kirsher 	dev->stats.rx_over_errors++;
803644570b8SJeff Kirsher 
804644570b8SJeff Kirsher 	/*
805644570b8SJeff Kirsher 	 * Wait a full Tx time (1.2ms) + some guard time, NS says 1.6ms total.
806644570b8SJeff Kirsher 	 * Early datasheets said to poll the reset bit, but now they say that
807644570b8SJeff Kirsher 	 * it "is not a reliable indicator and subsequently should be ignored."
808644570b8SJeff Kirsher 	 * We wait at least 10ms.
809644570b8SJeff Kirsher 	 */
810644570b8SJeff Kirsher 
811644570b8SJeff Kirsher 	mdelay(10);
812644570b8SJeff Kirsher 
813644570b8SJeff Kirsher 	/*
814644570b8SJeff Kirsher 	 * Reset RBCR[01] back to zero as per magic incantation.
815644570b8SJeff Kirsher 	 */
816644570b8SJeff Kirsher 	ei_outb_p(0x00, e8390_base+EN0_RCNTLO);
817644570b8SJeff Kirsher 	ei_outb_p(0x00, e8390_base+EN0_RCNTHI);
818644570b8SJeff Kirsher 
819644570b8SJeff Kirsher 	/*
820644570b8SJeff Kirsher 	 * See if any Tx was interrupted or not. According to NS, this
821644570b8SJeff Kirsher 	 * step is vital, and skipping it will cause no end of havoc.
822644570b8SJeff Kirsher 	 */
823644570b8SJeff Kirsher 
824644570b8SJeff Kirsher 	if (was_txing) {
825644570b8SJeff Kirsher 		unsigned char tx_completed = ei_inb_p(e8390_base+EN0_ISR) & (ENISR_TX+ENISR_TX_ERR);
826644570b8SJeff Kirsher 		if (!tx_completed)
827644570b8SJeff Kirsher 			must_resend = 1;
828644570b8SJeff Kirsher 	}
829644570b8SJeff Kirsher 
830644570b8SJeff Kirsher 	/*
831644570b8SJeff Kirsher 	 * Have to enter loopback mode and then restart the NIC before
832644570b8SJeff Kirsher 	 * you are allowed to slurp packets up off the ring.
833644570b8SJeff Kirsher 	 */
834644570b8SJeff Kirsher 	ei_outb_p(E8390_TXOFF, e8390_base + EN0_TXCR);
835644570b8SJeff Kirsher 	ei_outb_p(E8390_NODMA + E8390_PAGE0 + E8390_START, e8390_base + E8390_CMD);
836644570b8SJeff Kirsher 
837644570b8SJeff Kirsher 	/*
838644570b8SJeff Kirsher 	 * Clear the Rx ring of all the debris, and ack the interrupt.
839644570b8SJeff Kirsher 	 */
840644570b8SJeff Kirsher 	ei_receive(dev);
841644570b8SJeff Kirsher 	ei_outb_p(ENISR_OVER, e8390_base+EN0_ISR);
842644570b8SJeff Kirsher 
843644570b8SJeff Kirsher 	/*
844644570b8SJeff Kirsher 	 * Leave loopback mode, and resend any packet that got stopped.
845644570b8SJeff Kirsher 	 */
846644570b8SJeff Kirsher 	ei_outb_p(E8390_TXCONFIG, e8390_base + EN0_TXCR);
847644570b8SJeff Kirsher 	if (must_resend)
848644570b8SJeff Kirsher 		ei_outb_p(E8390_NODMA + E8390_PAGE0 + E8390_START + E8390_TRANS, e8390_base + E8390_CMD);
849644570b8SJeff Kirsher }
850644570b8SJeff Kirsher 
851644570b8SJeff Kirsher /*
852644570b8SJeff Kirsher  *	Collect the stats. This is called unlocked and from several contexts.
853644570b8SJeff Kirsher  */
854644570b8SJeff Kirsher 
855644570b8SJeff Kirsher static struct net_device_stats *__ei_get_stats(struct net_device *dev)
856644570b8SJeff Kirsher {
857644570b8SJeff Kirsher 	unsigned long ioaddr = dev->base_addr;
858644570b8SJeff Kirsher 	struct ei_device *ei_local = netdev_priv(dev);
859644570b8SJeff Kirsher 	unsigned long flags;
860644570b8SJeff Kirsher 
861644570b8SJeff Kirsher 	/* If the card is stopped, just return the present stats. */
862644570b8SJeff Kirsher 	if (!netif_running(dev))
863644570b8SJeff Kirsher 		return &dev->stats;
864644570b8SJeff Kirsher 
865644570b8SJeff Kirsher 	spin_lock_irqsave(&ei_local->page_lock, flags);
866644570b8SJeff Kirsher 	/* Read the counter registers, assuming we are in page 0. */
867644570b8SJeff Kirsher 	dev->stats.rx_frame_errors  += ei_inb_p(ioaddr + EN0_COUNTER0);
868644570b8SJeff Kirsher 	dev->stats.rx_crc_errors    += ei_inb_p(ioaddr + EN0_COUNTER1);
869644570b8SJeff Kirsher 	dev->stats.rx_missed_errors += ei_inb_p(ioaddr + EN0_COUNTER2);
870644570b8SJeff Kirsher 	spin_unlock_irqrestore(&ei_local->page_lock, flags);
871644570b8SJeff Kirsher 
872644570b8SJeff Kirsher 	return &dev->stats;
873644570b8SJeff Kirsher }
874644570b8SJeff Kirsher 
875644570b8SJeff Kirsher /*
876644570b8SJeff Kirsher  * Form the 64 bit 8390 multicast table from the linked list of addresses
877644570b8SJeff Kirsher  * associated with this dev structure.
878644570b8SJeff Kirsher  */
879644570b8SJeff Kirsher 
880644570b8SJeff Kirsher static inline void make_mc_bits(u8 *bits, struct net_device *dev)
881644570b8SJeff Kirsher {
882644570b8SJeff Kirsher 	struct netdev_hw_addr *ha;
883644570b8SJeff Kirsher 
884644570b8SJeff Kirsher 	netdev_for_each_mc_addr(ha, dev) {
885644570b8SJeff Kirsher 		u32 crc = ether_crc(ETH_ALEN, ha->addr);
886644570b8SJeff Kirsher 		/*
887644570b8SJeff Kirsher 		 * The 8390 uses the 6 most significant bits of the
888644570b8SJeff Kirsher 		 * CRC to index the multicast table.
889644570b8SJeff Kirsher 		 */
890644570b8SJeff Kirsher 		bits[crc>>29] |= (1<<((crc>>26)&7));
891644570b8SJeff Kirsher 	}
892644570b8SJeff Kirsher }
893644570b8SJeff Kirsher 
894644570b8SJeff Kirsher /**
895644570b8SJeff Kirsher  * do_set_multicast_list - set/clear multicast filter
896644570b8SJeff Kirsher  * @dev: net device for which multicast filter is adjusted
897644570b8SJeff Kirsher  *
898644570b8SJeff Kirsher  *	Set or clear the multicast filter for this adaptor. May be called
899644570b8SJeff Kirsher  *	from a BH in 2.1.x. Must be called with lock held.
900644570b8SJeff Kirsher  */
901644570b8SJeff Kirsher 
902644570b8SJeff Kirsher static void do_set_multicast_list(struct net_device *dev)
903644570b8SJeff Kirsher {
904644570b8SJeff Kirsher 	unsigned long e8390_base = dev->base_addr;
905644570b8SJeff Kirsher 	int i;
906644570b8SJeff Kirsher 	struct ei_device *ei_local = netdev_priv(dev);
907644570b8SJeff Kirsher 
908644570b8SJeff Kirsher 	if (!(dev->flags&(IFF_PROMISC|IFF_ALLMULTI))) {
909644570b8SJeff Kirsher 		memset(ei_local->mcfilter, 0, 8);
910644570b8SJeff Kirsher 		if (!netdev_mc_empty(dev))
911644570b8SJeff Kirsher 			make_mc_bits(ei_local->mcfilter, dev);
912644570b8SJeff Kirsher 	} else
913644570b8SJeff Kirsher 		memset(ei_local->mcfilter, 0xFF, 8);	/* mcast set to accept-all */
914644570b8SJeff Kirsher 
915644570b8SJeff Kirsher 	/*
916644570b8SJeff Kirsher 	 * DP8390 manuals don't specify any magic sequence for altering
917644570b8SJeff Kirsher 	 * the multicast regs on an already running card. To be safe, we
918644570b8SJeff Kirsher 	 * ensure multicast mode is off prior to loading up the new hash
919644570b8SJeff Kirsher 	 * table. If this proves to be not enough, we can always resort
920644570b8SJeff Kirsher 	 * to stopping the NIC, loading the table and then restarting.
921644570b8SJeff Kirsher 	 *
922644570b8SJeff Kirsher 	 * Bug Alert!  The MC regs on the SMC 83C690 (SMC Elite and SMC
923644570b8SJeff Kirsher 	 * Elite16) appear to be write-only. The NS 8390 data sheet lists
924644570b8SJeff Kirsher 	 * them as r/w so this is a bug.  The SMC 83C790 (SMC Ultra and
925644570b8SJeff Kirsher 	 * Ultra32 EISA) appears to have this bug fixed.
926644570b8SJeff Kirsher 	 */
927644570b8SJeff Kirsher 
928644570b8SJeff Kirsher 	if (netif_running(dev))
929644570b8SJeff Kirsher 		ei_outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR);
930644570b8SJeff Kirsher 	ei_outb_p(E8390_NODMA + E8390_PAGE1, e8390_base + E8390_CMD);
931644570b8SJeff Kirsher 	for (i = 0; i < 8; i++) {
932644570b8SJeff Kirsher 		ei_outb_p(ei_local->mcfilter[i], e8390_base + EN1_MULT_SHIFT(i));
933644570b8SJeff Kirsher #ifndef BUG_83C690
934644570b8SJeff Kirsher 		if (ei_inb_p(e8390_base + EN1_MULT_SHIFT(i)) != ei_local->mcfilter[i])
935644570b8SJeff Kirsher 			netdev_err(dev, "Multicast filter read/write mismap %d\n",
936644570b8SJeff Kirsher 				   i);
937644570b8SJeff Kirsher #endif
938644570b8SJeff Kirsher 	}
939644570b8SJeff Kirsher 	ei_outb_p(E8390_NODMA + E8390_PAGE0, e8390_base + E8390_CMD);
940644570b8SJeff Kirsher 
941644570b8SJeff Kirsher 	if (dev->flags&IFF_PROMISC)
942644570b8SJeff Kirsher 		ei_outb_p(E8390_RXCONFIG | 0x18, e8390_base + EN0_RXCR);
943644570b8SJeff Kirsher 	else if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev))
944644570b8SJeff Kirsher 		ei_outb_p(E8390_RXCONFIG | 0x08, e8390_base + EN0_RXCR);
945644570b8SJeff Kirsher 	else
946644570b8SJeff Kirsher 		ei_outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR);
947644570b8SJeff Kirsher }
948644570b8SJeff Kirsher 
949644570b8SJeff Kirsher /*
950644570b8SJeff Kirsher  *	Called without lock held. This is invoked from user context and may
951644570b8SJeff Kirsher  *	be parallel to just about everything else. Its also fairly quick and
952644570b8SJeff Kirsher  *	not called too often. Must protect against both bh and irq users
953644570b8SJeff Kirsher  */
954644570b8SJeff Kirsher 
955644570b8SJeff Kirsher static void __ei_set_multicast_list(struct net_device *dev)
956644570b8SJeff Kirsher {
957644570b8SJeff Kirsher 	unsigned long flags;
958644570b8SJeff Kirsher 	struct ei_device *ei_local = netdev_priv(dev);
959644570b8SJeff Kirsher 
960644570b8SJeff Kirsher 	spin_lock_irqsave(&ei_local->page_lock, flags);
961644570b8SJeff Kirsher 	do_set_multicast_list(dev);
962644570b8SJeff Kirsher 	spin_unlock_irqrestore(&ei_local->page_lock, flags);
963644570b8SJeff Kirsher }
964644570b8SJeff Kirsher 
965644570b8SJeff Kirsher /**
966644570b8SJeff Kirsher  * ethdev_setup - init rest of 8390 device struct
967644570b8SJeff Kirsher  * @dev: network device structure to init
968644570b8SJeff Kirsher  *
969644570b8SJeff Kirsher  * Initialize the rest of the 8390 device structure.  Do NOT __init
970644570b8SJeff Kirsher  * this, as it is used by 8390 based modular drivers too.
971644570b8SJeff Kirsher  */
972644570b8SJeff Kirsher 
973644570b8SJeff Kirsher static void ethdev_setup(struct net_device *dev)
974644570b8SJeff Kirsher {
975644570b8SJeff Kirsher 	struct ei_device *ei_local = netdev_priv(dev);
976c45f812fSMatthew Whitehead 
977c45f812fSMatthew Whitehead 	if ((msg_enable & NETIF_MSG_DRV) && (version_printed++ == 0))
978c45f812fSMatthew Whitehead 		pr_info("%s", version);
979644570b8SJeff Kirsher 
980644570b8SJeff Kirsher 	ether_setup(dev);
981644570b8SJeff Kirsher 
982644570b8SJeff Kirsher 	spin_lock_init(&ei_local->page_lock);
983646fe03bSFinn Thain 
984646fe03bSFinn Thain 	ei_local->msg_enable = msg_enable;
985644570b8SJeff Kirsher }
986644570b8SJeff Kirsher 
987644570b8SJeff Kirsher /**
988644570b8SJeff Kirsher  * alloc_ei_netdev - alloc_etherdev counterpart for 8390
989644570b8SJeff Kirsher  * @size: extra bytes to allocate
990644570b8SJeff Kirsher  *
991644570b8SJeff Kirsher  * Allocate 8390-specific net_device.
992644570b8SJeff Kirsher  */
993644570b8SJeff Kirsher static struct net_device *____alloc_ei_netdev(int size)
994644570b8SJeff Kirsher {
995644570b8SJeff Kirsher 	return alloc_netdev(sizeof(struct ei_device) + size, "eth%d",
996c835a677STom Gundersen 			    NET_NAME_UNKNOWN, ethdev_setup);
997644570b8SJeff Kirsher }
998644570b8SJeff Kirsher 
999644570b8SJeff Kirsher 
1000644570b8SJeff Kirsher 
1001644570b8SJeff Kirsher 
1002644570b8SJeff Kirsher /* This page of functions should be 8390 generic */
1003644570b8SJeff Kirsher /* Follow National Semi's recommendations for initializing the "NIC". */
1004644570b8SJeff Kirsher 
1005644570b8SJeff Kirsher /**
1006644570b8SJeff Kirsher  * NS8390_init - initialize 8390 hardware
1007644570b8SJeff Kirsher  * @dev: network device to initialize
1008644570b8SJeff Kirsher  * @startp: boolean.  non-zero value to initiate chip processing
1009644570b8SJeff Kirsher  *
1010644570b8SJeff Kirsher  *	Must be called with lock held.
1011644570b8SJeff Kirsher  */
1012644570b8SJeff Kirsher 
1013644570b8SJeff Kirsher static void __NS8390_init(struct net_device *dev, int startp)
1014644570b8SJeff Kirsher {
1015644570b8SJeff Kirsher 	unsigned long e8390_base = dev->base_addr;
1016644570b8SJeff Kirsher 	struct ei_device *ei_local = netdev_priv(dev);
1017644570b8SJeff Kirsher 	int i;
1018644570b8SJeff Kirsher 	int endcfg = ei_local->word16
1019644570b8SJeff Kirsher 	    ? (0x48 | ENDCFG_WTS | (ei_local->bigendian ? ENDCFG_BOS : 0))
1020644570b8SJeff Kirsher 	    : 0x48;
1021644570b8SJeff Kirsher 
1022*2b2706aaSArmin Wolf 	BUILD_BUG_ON(sizeof(struct e8390_pkt_hdr) != 4);
1023644570b8SJeff Kirsher 	/* Follow National Semi's recommendations for initing the DP83902. */
1024644570b8SJeff Kirsher 	ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD); /* 0x21 */
1025644570b8SJeff Kirsher 	ei_outb_p(endcfg, e8390_base + EN0_DCFG);	/* 0x48 or 0x49 */
1026644570b8SJeff Kirsher 	/* Clear the remote byte count registers. */
1027644570b8SJeff Kirsher 	ei_outb_p(0x00,  e8390_base + EN0_RCNTLO);
1028644570b8SJeff Kirsher 	ei_outb_p(0x00,  e8390_base + EN0_RCNTHI);
1029644570b8SJeff Kirsher 	/* Set to monitor and loopback mode -- this is vital!. */
1030644570b8SJeff Kirsher 	ei_outb_p(E8390_RXOFF, e8390_base + EN0_RXCR); /* 0x20 */
1031644570b8SJeff Kirsher 	ei_outb_p(E8390_TXOFF, e8390_base + EN0_TXCR); /* 0x02 */
1032644570b8SJeff Kirsher 	/* Set the transmit page and receive ring. */
1033644570b8SJeff Kirsher 	ei_outb_p(ei_local->tx_start_page, e8390_base + EN0_TPSR);
1034644570b8SJeff Kirsher 	ei_local->tx1 = ei_local->tx2 = 0;
1035644570b8SJeff Kirsher 	ei_outb_p(ei_local->rx_start_page, e8390_base + EN0_STARTPG);
1036644570b8SJeff Kirsher 	ei_outb_p(ei_local->stop_page-1, e8390_base + EN0_BOUNDARY);	/* 3c503 says 0x3f,NS0x26*/
1037644570b8SJeff Kirsher 	ei_local->current_page = ei_local->rx_start_page;		/* assert boundary+1 */
1038644570b8SJeff Kirsher 	ei_outb_p(ei_local->stop_page, e8390_base + EN0_STOPPG);
1039644570b8SJeff Kirsher 	/* Clear the pending interrupts and mask. */
1040644570b8SJeff Kirsher 	ei_outb_p(0xFF, e8390_base + EN0_ISR);
1041644570b8SJeff Kirsher 	ei_outb_p(0x00,  e8390_base + EN0_IMR);
1042644570b8SJeff Kirsher 
1043644570b8SJeff Kirsher 	/* Copy the station address into the DS8390 registers. */
1044644570b8SJeff Kirsher 
1045644570b8SJeff Kirsher 	ei_outb_p(E8390_NODMA + E8390_PAGE1 + E8390_STOP, e8390_base+E8390_CMD); /* 0x61 */
1046644570b8SJeff Kirsher 	for (i = 0; i < 6; i++) {
1047644570b8SJeff Kirsher 		ei_outb_p(dev->dev_addr[i], e8390_base + EN1_PHYS_SHIFT(i));
1048c45f812fSMatthew Whitehead 		if ((netif_msg_probe(ei_local)) &&
1049644570b8SJeff Kirsher 		    ei_inb_p(e8390_base + EN1_PHYS_SHIFT(i)) != dev->dev_addr[i])
1050c45f812fSMatthew Whitehead 			netdev_err(dev,
1051c45f812fSMatthew Whitehead 				   "Hw. address read/write mismap %d\n", i);
1052644570b8SJeff Kirsher 	}
1053644570b8SJeff Kirsher 
1054644570b8SJeff Kirsher 	ei_outb_p(ei_local->rx_start_page, e8390_base + EN1_CURPAG);
1055644570b8SJeff Kirsher 	ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD);
1056644570b8SJeff Kirsher 
1057644570b8SJeff Kirsher 	ei_local->tx1 = ei_local->tx2 = 0;
1058644570b8SJeff Kirsher 	ei_local->txing = 0;
1059644570b8SJeff Kirsher 
1060644570b8SJeff Kirsher 	if (startp) {
1061644570b8SJeff Kirsher 		ei_outb_p(0xff,  e8390_base + EN0_ISR);
1062644570b8SJeff Kirsher 		ei_outb_p(ENISR_ALL,  e8390_base + EN0_IMR);
1063644570b8SJeff Kirsher 		ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base+E8390_CMD);
1064644570b8SJeff Kirsher 		ei_outb_p(E8390_TXCONFIG, e8390_base + EN0_TXCR); /* xmit on. */
1065644570b8SJeff Kirsher 		/* 3c503 TechMan says rxconfig only after the NIC is started. */
1066644570b8SJeff Kirsher 		ei_outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR); /* rx on,  */
1067644570b8SJeff Kirsher 		do_set_multicast_list(dev);	/* (re)load the mcast table */
1068644570b8SJeff Kirsher 	}
1069644570b8SJeff Kirsher }
1070644570b8SJeff Kirsher 
1071644570b8SJeff Kirsher /* Trigger a transmit start, assuming the length is valid.
1072644570b8SJeff Kirsher    Always called with the page lock held */
1073644570b8SJeff Kirsher 
1074644570b8SJeff Kirsher static void NS8390_trigger_send(struct net_device *dev, unsigned int length,
1075644570b8SJeff Kirsher 								int start_page)
1076644570b8SJeff Kirsher {
1077644570b8SJeff Kirsher 	unsigned long e8390_base = dev->base_addr;
1078644570b8SJeff Kirsher 	struct ei_device *ei_local __attribute((unused)) = netdev_priv(dev);
1079644570b8SJeff Kirsher 
1080644570b8SJeff Kirsher 	ei_outb_p(E8390_NODMA+E8390_PAGE0, e8390_base+E8390_CMD);
1081644570b8SJeff Kirsher 
1082644570b8SJeff Kirsher 	if (ei_inb_p(e8390_base + E8390_CMD) & E8390_TRANS) {
1083644570b8SJeff Kirsher 		netdev_warn(dev, "trigger_send() called with the transmitter busy\n");
1084644570b8SJeff Kirsher 		return;
1085644570b8SJeff Kirsher 	}
1086644570b8SJeff Kirsher 	ei_outb_p(length & 0xff, e8390_base + EN0_TCNTLO);
1087644570b8SJeff Kirsher 	ei_outb_p(length >> 8, e8390_base + EN0_TCNTHI);
1088644570b8SJeff Kirsher 	ei_outb_p(start_page, e8390_base + EN0_TPSR);
1089644570b8SJeff Kirsher 	ei_outb_p(E8390_NODMA+E8390_TRANS+E8390_START, e8390_base+E8390_CMD);
1090644570b8SJeff Kirsher }
1091