1*dc3eb2f4SBagas Sanjaya // SPDX-License-Identifier: GPL-1.0+
2*dc3eb2f4SBagas Sanjaya
3644570b8SJeff Kirsher /* 8390.c: A general NS8390 ethernet driver core for linux. */
4644570b8SJeff Kirsher /*
5644570b8SJeff Kirsher Written 1992-94 by Donald Becker.
6644570b8SJeff Kirsher
7644570b8SJeff Kirsher Copyright 1993 United States Government as represented by the
8644570b8SJeff Kirsher Director, National Security Agency.
9644570b8SJeff Kirsher
10644570b8SJeff Kirsher The author may be reached as becker@scyld.com, or C/O
11644570b8SJeff Kirsher Scyld Computing Corporation
12644570b8SJeff Kirsher 410 Severn Ave., Suite 210
13644570b8SJeff Kirsher Annapolis MD 21403
14644570b8SJeff Kirsher
15644570b8SJeff Kirsher
16644570b8SJeff Kirsher This is the chip-specific code for many 8390-based ethernet adaptors.
17644570b8SJeff Kirsher This is not a complete driver, it must be combined with board-specific
18644570b8SJeff Kirsher code such as ne.c, wd.c, 3c503.c, etc.
19644570b8SJeff Kirsher
20644570b8SJeff Kirsher Seeing how at least eight drivers use this code, (not counting the
21644570b8SJeff Kirsher PCMCIA ones either) it is easy to break some card by what seems like
22644570b8SJeff Kirsher a simple innocent change. Please contact me or Donald if you think
23644570b8SJeff Kirsher you have found something that needs changing. -- PG
24644570b8SJeff Kirsher
25644570b8SJeff Kirsher
26644570b8SJeff Kirsher Changelog:
27644570b8SJeff Kirsher
28644570b8SJeff Kirsher Paul Gortmaker : remove set_bit lock, other cleanups.
29644570b8SJeff Kirsher Paul Gortmaker : add ei_get_8390_hdr() so we can pass skb's to
30644570b8SJeff Kirsher ei_block_input() for eth_io_copy_and_sum().
31644570b8SJeff Kirsher Paul Gortmaker : exchange static int ei_pingpong for a #define,
32644570b8SJeff Kirsher also add better Tx error handling.
33644570b8SJeff Kirsher Paul Gortmaker : rewrite Rx overrun handling as per NS specs.
34644570b8SJeff Kirsher Alexey Kuznetsov : use the 8390's six bit hash multicast filter.
35644570b8SJeff Kirsher Paul Gortmaker : tweak ANK's above multicast changes a bit.
36644570b8SJeff Kirsher Paul Gortmaker : update packet statistics for v2.1.x
37644570b8SJeff Kirsher Alan Cox : support arbitrary stupid port mappings on the
38644570b8SJeff Kirsher 68K Macintosh. Support >16bit I/O spaces
39644570b8SJeff Kirsher Paul Gortmaker : add kmod support for auto-loading of the 8390
40644570b8SJeff Kirsher module by all drivers that require it.
41644570b8SJeff Kirsher Alan Cox : Spinlocking work, added 'BUG_83C690'
42644570b8SJeff Kirsher Paul Gortmaker : Separate out Tx timeout code from Tx path.
43644570b8SJeff Kirsher Paul Gortmaker : Remove old unused single Tx buffer code.
44644570b8SJeff Kirsher Hayato Fujiwara : Add m32r support.
45644570b8SJeff Kirsher Paul Gortmaker : use skb_padto() instead of stack scratch area
46644570b8SJeff Kirsher
47644570b8SJeff Kirsher Sources:
48644570b8SJeff Kirsher The National Semiconductor LAN Databook, and the 3Com 3c503 databook.
49644570b8SJeff Kirsher
50644570b8SJeff Kirsher */
51644570b8SJeff Kirsher
522b2706aaSArmin Wolf #include <linux/build_bug.h>
53644570b8SJeff Kirsher #include <linux/module.h>
54644570b8SJeff Kirsher #include <linux/kernel.h>
55644570b8SJeff Kirsher #include <linux/jiffies.h>
56644570b8SJeff Kirsher #include <linux/fs.h>
57644570b8SJeff Kirsher #include <linux/types.h>
58644570b8SJeff Kirsher #include <linux/string.h>
59644570b8SJeff Kirsher #include <linux/bitops.h>
60644570b8SJeff Kirsher #include <linux/uaccess.h>
61644570b8SJeff Kirsher #include <linux/io.h>
62644570b8SJeff Kirsher #include <asm/irq.h>
63644570b8SJeff Kirsher #include <linux/delay.h>
64644570b8SJeff Kirsher #include <linux/errno.h>
65644570b8SJeff Kirsher #include <linux/fcntl.h>
66644570b8SJeff Kirsher #include <linux/in.h>
67644570b8SJeff Kirsher #include <linux/interrupt.h>
68644570b8SJeff Kirsher #include <linux/init.h>
69644570b8SJeff Kirsher #include <linux/crc32.h>
70644570b8SJeff Kirsher
71644570b8SJeff Kirsher #include <linux/netdevice.h>
72644570b8SJeff Kirsher #include <linux/etherdevice.h>
73644570b8SJeff Kirsher
74644570b8SJeff Kirsher #define NS8390_CORE
75644570b8SJeff Kirsher #include "8390.h"
76644570b8SJeff Kirsher
77644570b8SJeff Kirsher #define BUG_83C690
78644570b8SJeff Kirsher
79644570b8SJeff Kirsher /* These are the operational function interfaces to board-specific
80644570b8SJeff Kirsher routines.
81644570b8SJeff Kirsher void reset_8390(struct net_device *dev)
82644570b8SJeff Kirsher Resets the board associated with DEV, including a hardware reset of
83644570b8SJeff Kirsher the 8390. This is only called when there is a transmit timeout, and
84644570b8SJeff Kirsher it is always followed by 8390_init().
85644570b8SJeff Kirsher void block_output(struct net_device *dev, int count, const unsigned char *buf,
86644570b8SJeff Kirsher int start_page)
87644570b8SJeff Kirsher Write the COUNT bytes of BUF to the packet buffer at START_PAGE. The
88644570b8SJeff Kirsher "page" value uses the 8390's 256-byte pages.
89644570b8SJeff Kirsher void get_8390_hdr(struct net_device *dev, struct e8390_hdr *hdr, int ring_page)
90644570b8SJeff Kirsher Read the 4 byte, page aligned 8390 header. *If* there is a
91644570b8SJeff Kirsher subsequent read, it will be of the rest of the packet.
92644570b8SJeff Kirsher void block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
93644570b8SJeff Kirsher Read COUNT bytes from the packet buffer into the skb data area. Start
94644570b8SJeff Kirsher reading from RING_OFFSET, the address as the 8390 sees it. This will always
95644570b8SJeff Kirsher follow the read of the 8390 header.
96644570b8SJeff Kirsher */
97644570b8SJeff Kirsher #define ei_reset_8390 (ei_local->reset_8390)
98644570b8SJeff Kirsher #define ei_block_output (ei_local->block_output)
99644570b8SJeff Kirsher #define ei_block_input (ei_local->block_input)
100644570b8SJeff Kirsher #define ei_get_8390_hdr (ei_local->get_8390_hdr)
101644570b8SJeff Kirsher
102644570b8SJeff Kirsher /* Index to functions. */
103644570b8SJeff Kirsher static void ei_tx_intr(struct net_device *dev);
104644570b8SJeff Kirsher static void ei_tx_err(struct net_device *dev);
105644570b8SJeff Kirsher static void ei_receive(struct net_device *dev);
106644570b8SJeff Kirsher static void ei_rx_overrun(struct net_device *dev);
107644570b8SJeff Kirsher
108644570b8SJeff Kirsher /* Routines generic to NS8390-based boards. */
109644570b8SJeff Kirsher static void NS8390_trigger_send(struct net_device *dev, unsigned int length,
110644570b8SJeff Kirsher int start_page);
111644570b8SJeff Kirsher static void do_set_multicast_list(struct net_device *dev);
112644570b8SJeff Kirsher static void __NS8390_init(struct net_device *dev, int startp);
113644570b8SJeff Kirsher
114c45f812fSMatthew Whitehead static unsigned version_printed;
115360f8987SArmin Wolf static int msg_enable;
116360f8987SArmin Wolf static const int default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_RX_ERR |
117360f8987SArmin Wolf NETIF_MSG_TX_ERR);
118360f8987SArmin Wolf module_param(msg_enable, int, 0444);
119c45f812fSMatthew Whitehead MODULE_PARM_DESC(msg_enable, "Debug message level (see linux/netdevice.h for bitmap)");
120c45f812fSMatthew Whitehead
121644570b8SJeff Kirsher /*
122644570b8SJeff Kirsher * SMP and the 8390 setup.
123644570b8SJeff Kirsher *
124644570b8SJeff Kirsher * The 8390 isn't exactly designed to be multithreaded on RX/TX. There is
125644570b8SJeff Kirsher * a page register that controls bank and packet buffer access. We guard
126644570b8SJeff Kirsher * this with ei_local->page_lock. Nobody should assume or set the page other
127644570b8SJeff Kirsher * than zero when the lock is not held. Lock holders must restore page 0
128644570b8SJeff Kirsher * before unlocking. Even pure readers must take the lock to protect in
129644570b8SJeff Kirsher * page 0.
130644570b8SJeff Kirsher *
131644570b8SJeff Kirsher * To make life difficult the chip can also be very slow. We therefore can't
132644570b8SJeff Kirsher * just use spinlocks. For the longer lockups we disable the irq the device
133644570b8SJeff Kirsher * sits on and hold the lock. We must hold the lock because there is a dual
134644570b8SJeff Kirsher * processor case other than interrupts (get stats/set multicast list in
135644570b8SJeff Kirsher * parallel with each other and transmit).
136644570b8SJeff Kirsher *
137644570b8SJeff Kirsher * Note: in theory we can just disable the irq on the card _but_ there is
138644570b8SJeff Kirsher * a latency on SMP irq delivery. So we can easily go "disable irq" "sync irqs"
139644570b8SJeff Kirsher * enter lock, take the queued irq. So we waddle instead of flying.
140644570b8SJeff Kirsher *
141644570b8SJeff Kirsher * Finally by special arrangement for the purpose of being generally
142644570b8SJeff Kirsher * annoying the transmit function is called bh atomic. That places
143644570b8SJeff Kirsher * restrictions on the user context callers as disable_irq won't save
144644570b8SJeff Kirsher * them.
145644570b8SJeff Kirsher *
146644570b8SJeff Kirsher * Additional explanation of problems with locking by Alan Cox:
147644570b8SJeff Kirsher *
148644570b8SJeff Kirsher * "The author (me) didn't use spin_lock_irqsave because the slowness of the
149644570b8SJeff Kirsher * card means that approach caused horrible problems like losing serial data
150644570b8SJeff Kirsher * at 38400 baud on some chips. Remember many 8390 nics on PCI were ISA
151644570b8SJeff Kirsher * chips with FPGA front ends.
152644570b8SJeff Kirsher *
153644570b8SJeff Kirsher * Ok the logic behind the 8390 is very simple:
154644570b8SJeff Kirsher *
155644570b8SJeff Kirsher * Things to know
156644570b8SJeff Kirsher * - IRQ delivery is asynchronous to the PCI bus
157644570b8SJeff Kirsher * - Blocking the local CPU IRQ via spin locks was too slow
158644570b8SJeff Kirsher * - The chip has register windows needing locking work
159644570b8SJeff Kirsher *
160644570b8SJeff Kirsher * So the path was once (I say once as people appear to have changed it
161644570b8SJeff Kirsher * in the mean time and it now looks rather bogus if the changes to use
162644570b8SJeff Kirsher * disable_irq_nosync_irqsave are disabling the local IRQ)
163644570b8SJeff Kirsher *
164644570b8SJeff Kirsher *
165644570b8SJeff Kirsher * Take the page lock
166644570b8SJeff Kirsher * Mask the IRQ on chip
167644570b8SJeff Kirsher * Disable the IRQ (but not mask locally- someone seems to have
168644570b8SJeff Kirsher * broken this with the lock validator stuff)
169644570b8SJeff Kirsher * [This must be _nosync as the page lock may otherwise
170644570b8SJeff Kirsher * deadlock us]
171644570b8SJeff Kirsher * Drop the page lock and turn IRQs back on
172644570b8SJeff Kirsher *
173644570b8SJeff Kirsher * At this point an existing IRQ may still be running but we can't
174644570b8SJeff Kirsher * get a new one
175644570b8SJeff Kirsher *
176644570b8SJeff Kirsher * Take the lock (so we know the IRQ has terminated) but don't mask
177644570b8SJeff Kirsher * the IRQs on the processor
178644570b8SJeff Kirsher * Set irqlock [for debug]
179644570b8SJeff Kirsher *
180644570b8SJeff Kirsher * Transmit (slow as ****)
181644570b8SJeff Kirsher *
182644570b8SJeff Kirsher * re-enable the IRQ
183644570b8SJeff Kirsher *
184644570b8SJeff Kirsher *
185644570b8SJeff Kirsher * We have to use disable_irq because otherwise you will get delayed
186644570b8SJeff Kirsher * interrupts on the APIC bus deadlocking the transmit path.
187644570b8SJeff Kirsher *
188644570b8SJeff Kirsher * Quite hairy but the chip simply wasn't designed for SMP and you can't
189644570b8SJeff Kirsher * even ACK an interrupt without risking corrupting other parallel
190644570b8SJeff Kirsher * activities on the chip." [lkml, 25 Jul 2007]
191644570b8SJeff Kirsher */
192644570b8SJeff Kirsher
193644570b8SJeff Kirsher
194644570b8SJeff Kirsher
195644570b8SJeff Kirsher /**
196644570b8SJeff Kirsher * ei_open - Open/initialize the board.
197644570b8SJeff Kirsher * @dev: network device to initialize
198644570b8SJeff Kirsher *
199644570b8SJeff Kirsher * This routine goes all-out, setting everything
200644570b8SJeff Kirsher * up anew at each open, even though many of these registers should only
201644570b8SJeff Kirsher * need to be set once at boot.
202644570b8SJeff Kirsher */
__ei_open(struct net_device * dev)203644570b8SJeff Kirsher static int __ei_open(struct net_device *dev)
204644570b8SJeff Kirsher {
205644570b8SJeff Kirsher unsigned long flags;
206644570b8SJeff Kirsher struct ei_device *ei_local = netdev_priv(dev);
207644570b8SJeff Kirsher
208644570b8SJeff Kirsher if (dev->watchdog_timeo <= 0)
209644570b8SJeff Kirsher dev->watchdog_timeo = TX_TIMEOUT;
210644570b8SJeff Kirsher
211644570b8SJeff Kirsher /*
212644570b8SJeff Kirsher * Grab the page lock so we own the register set, then call
213644570b8SJeff Kirsher * the init function.
214644570b8SJeff Kirsher */
215644570b8SJeff Kirsher
216644570b8SJeff Kirsher spin_lock_irqsave(&ei_local->page_lock, flags);
217644570b8SJeff Kirsher __NS8390_init(dev, 1);
218644570b8SJeff Kirsher /* Set the flag before we drop the lock, That way the IRQ arrives
219644570b8SJeff Kirsher after its set and we get no silly warnings */
220644570b8SJeff Kirsher netif_start_queue(dev);
221644570b8SJeff Kirsher spin_unlock_irqrestore(&ei_local->page_lock, flags);
222644570b8SJeff Kirsher ei_local->irqlock = 0;
223644570b8SJeff Kirsher return 0;
224644570b8SJeff Kirsher }
225644570b8SJeff Kirsher
226644570b8SJeff Kirsher /**
227644570b8SJeff Kirsher * ei_close - shut down network device
228644570b8SJeff Kirsher * @dev: network device to close
229644570b8SJeff Kirsher *
230644570b8SJeff Kirsher * Opposite of ei_open(). Only used when "ifconfig <devname> down" is done.
231644570b8SJeff Kirsher */
__ei_close(struct net_device * dev)232644570b8SJeff Kirsher static int __ei_close(struct net_device *dev)
233644570b8SJeff Kirsher {
234644570b8SJeff Kirsher struct ei_device *ei_local = netdev_priv(dev);
235644570b8SJeff Kirsher unsigned long flags;
236644570b8SJeff Kirsher
237644570b8SJeff Kirsher /*
238644570b8SJeff Kirsher * Hold the page lock during close
239644570b8SJeff Kirsher */
240644570b8SJeff Kirsher
241644570b8SJeff Kirsher spin_lock_irqsave(&ei_local->page_lock, flags);
242644570b8SJeff Kirsher __NS8390_init(dev, 0);
243644570b8SJeff Kirsher spin_unlock_irqrestore(&ei_local->page_lock, flags);
244644570b8SJeff Kirsher netif_stop_queue(dev);
245644570b8SJeff Kirsher return 0;
246644570b8SJeff Kirsher }
247644570b8SJeff Kirsher
248644570b8SJeff Kirsher /**
249644570b8SJeff Kirsher * ei_tx_timeout - handle transmit time out condition
250644570b8SJeff Kirsher * @dev: network device which has apparently fallen asleep
251644570b8SJeff Kirsher *
252644570b8SJeff Kirsher * Called by kernel when device never acknowledges a transmit has
253644570b8SJeff Kirsher * completed (or failed) - i.e. never posted a Tx related interrupt.
254644570b8SJeff Kirsher */
255644570b8SJeff Kirsher
__ei_tx_timeout(struct net_device * dev,unsigned int txqueue)2560290bd29SMichael S. Tsirkin static void __ei_tx_timeout(struct net_device *dev, unsigned int txqueue)
257644570b8SJeff Kirsher {
258644570b8SJeff Kirsher unsigned long e8390_base = dev->base_addr;
259644570b8SJeff Kirsher struct ei_device *ei_local = netdev_priv(dev);
260644570b8SJeff Kirsher int txsr, isr, tickssofar = jiffies - dev_trans_start(dev);
261644570b8SJeff Kirsher unsigned long flags;
262644570b8SJeff Kirsher
263644570b8SJeff Kirsher dev->stats.tx_errors++;
264644570b8SJeff Kirsher
265644570b8SJeff Kirsher spin_lock_irqsave(&ei_local->page_lock, flags);
266644570b8SJeff Kirsher txsr = ei_inb(e8390_base+EN0_TSR);
267644570b8SJeff Kirsher isr = ei_inb(e8390_base+EN0_ISR);
268644570b8SJeff Kirsher spin_unlock_irqrestore(&ei_local->page_lock, flags);
269644570b8SJeff Kirsher
270644570b8SJeff Kirsher netdev_dbg(dev, "Tx timed out, %s TSR=%#2x, ISR=%#2x, t=%d\n",
271644570b8SJeff Kirsher (txsr & ENTSR_ABT) ? "excess collisions." :
272644570b8SJeff Kirsher (isr) ? "lost interrupt?" : "cable problem?",
273644570b8SJeff Kirsher txsr, isr, tickssofar);
274644570b8SJeff Kirsher
275644570b8SJeff Kirsher if (!isr && !dev->stats.tx_packets) {
276644570b8SJeff Kirsher /* The 8390 probably hasn't gotten on the cable yet. */
277644570b8SJeff Kirsher ei_local->interface_num ^= 1; /* Try a different xcvr. */
278644570b8SJeff Kirsher }
279644570b8SJeff Kirsher
280644570b8SJeff Kirsher /* Ugly but a reset can be slow, yet must be protected */
281644570b8SJeff Kirsher
282644570b8SJeff Kirsher disable_irq_nosync_lockdep(dev->irq);
283644570b8SJeff Kirsher spin_lock(&ei_local->page_lock);
284644570b8SJeff Kirsher
285644570b8SJeff Kirsher /* Try to restart the card. Perhaps the user has fixed something. */
286644570b8SJeff Kirsher ei_reset_8390(dev);
287644570b8SJeff Kirsher __NS8390_init(dev, 1);
288644570b8SJeff Kirsher
289644570b8SJeff Kirsher spin_unlock(&ei_local->page_lock);
290644570b8SJeff Kirsher enable_irq_lockdep(dev->irq);
291644570b8SJeff Kirsher netif_wake_queue(dev);
292644570b8SJeff Kirsher }
293644570b8SJeff Kirsher
294644570b8SJeff Kirsher /**
295644570b8SJeff Kirsher * ei_start_xmit - begin packet transmission
296644570b8SJeff Kirsher * @skb: packet to be sent
297644570b8SJeff Kirsher * @dev: network device to which packet is sent
298644570b8SJeff Kirsher *
299644570b8SJeff Kirsher * Sends a packet to an 8390 network device.
300644570b8SJeff Kirsher */
301644570b8SJeff Kirsher
__ei_start_xmit(struct sk_buff * skb,struct net_device * dev)302644570b8SJeff Kirsher static netdev_tx_t __ei_start_xmit(struct sk_buff *skb,
303644570b8SJeff Kirsher struct net_device *dev)
304644570b8SJeff Kirsher {
305644570b8SJeff Kirsher unsigned long e8390_base = dev->base_addr;
306644570b8SJeff Kirsher struct ei_device *ei_local = netdev_priv(dev);
307644570b8SJeff Kirsher int send_length = skb->len, output_page;
308644570b8SJeff Kirsher unsigned long flags;
309644570b8SJeff Kirsher char buf[ETH_ZLEN];
310644570b8SJeff Kirsher char *data = skb->data;
311644570b8SJeff Kirsher
312644570b8SJeff Kirsher if (skb->len < ETH_ZLEN) {
313644570b8SJeff Kirsher memset(buf, 0, ETH_ZLEN); /* more efficient than doing just the needed bits */
314644570b8SJeff Kirsher memcpy(buf, data, skb->len);
315644570b8SJeff Kirsher send_length = ETH_ZLEN;
316644570b8SJeff Kirsher data = buf;
317644570b8SJeff Kirsher }
318644570b8SJeff Kirsher
319644570b8SJeff Kirsher /* Mask interrupts from the ethercard.
320644570b8SJeff Kirsher SMP: We have to grab the lock here otherwise the IRQ handler
321644570b8SJeff Kirsher on another CPU can flip window and race the IRQ mask set. We end
322644570b8SJeff Kirsher up trashing the mcast filter not disabling irqs if we don't lock */
323644570b8SJeff Kirsher
324644570b8SJeff Kirsher spin_lock_irqsave(&ei_local->page_lock, flags);
325644570b8SJeff Kirsher ei_outb_p(0x00, e8390_base + EN0_IMR);
326644570b8SJeff Kirsher spin_unlock_irqrestore(&ei_local->page_lock, flags);
327644570b8SJeff Kirsher
328644570b8SJeff Kirsher
329644570b8SJeff Kirsher /*
330644570b8SJeff Kirsher * Slow phase with lock held.
331644570b8SJeff Kirsher */
332644570b8SJeff Kirsher
333644570b8SJeff Kirsher disable_irq_nosync_lockdep_irqsave(dev->irq, &flags);
334644570b8SJeff Kirsher
335644570b8SJeff Kirsher spin_lock(&ei_local->page_lock);
336644570b8SJeff Kirsher
337644570b8SJeff Kirsher ei_local->irqlock = 1;
338644570b8SJeff Kirsher
339644570b8SJeff Kirsher /*
340644570b8SJeff Kirsher * We have two Tx slots available for use. Find the first free
341644570b8SJeff Kirsher * slot, and then perform some sanity checks. With two Tx bufs,
342644570b8SJeff Kirsher * you get very close to transmitting back-to-back packets. With
343644570b8SJeff Kirsher * only one Tx buf, the transmitter sits idle while you reload the
344644570b8SJeff Kirsher * card, leaving a substantial gap between each transmitted packet.
345644570b8SJeff Kirsher */
346644570b8SJeff Kirsher
347644570b8SJeff Kirsher if (ei_local->tx1 == 0) {
348644570b8SJeff Kirsher output_page = ei_local->tx_start_page;
349644570b8SJeff Kirsher ei_local->tx1 = send_length;
350c45f812fSMatthew Whitehead if ((netif_msg_tx_queued(ei_local)) &&
351c45f812fSMatthew Whitehead ei_local->tx2 > 0)
352c45f812fSMatthew Whitehead netdev_dbg(dev,
353c45f812fSMatthew Whitehead "idle transmitter tx2=%d, lasttx=%d, txing=%d\n",
354644570b8SJeff Kirsher ei_local->tx2, ei_local->lasttx, ei_local->txing);
355644570b8SJeff Kirsher } else if (ei_local->tx2 == 0) {
356644570b8SJeff Kirsher output_page = ei_local->tx_start_page + TX_PAGES/2;
357644570b8SJeff Kirsher ei_local->tx2 = send_length;
358c45f812fSMatthew Whitehead if ((netif_msg_tx_queued(ei_local)) &&
359c45f812fSMatthew Whitehead ei_local->tx1 > 0)
360c45f812fSMatthew Whitehead netdev_dbg(dev,
361c45f812fSMatthew Whitehead "idle transmitter, tx1=%d, lasttx=%d, txing=%d\n",
362644570b8SJeff Kirsher ei_local->tx1, ei_local->lasttx, ei_local->txing);
363644570b8SJeff Kirsher } else { /* We should never get here. */
364c45f812fSMatthew Whitehead netif_dbg(ei_local, tx_err, dev,
365c45f812fSMatthew Whitehead "No Tx buffers free! tx1=%d tx2=%d last=%d\n",
366644570b8SJeff Kirsher ei_local->tx1, ei_local->tx2, ei_local->lasttx);
367644570b8SJeff Kirsher ei_local->irqlock = 0;
368644570b8SJeff Kirsher netif_stop_queue(dev);
369644570b8SJeff Kirsher ei_outb_p(ENISR_ALL, e8390_base + EN0_IMR);
370644570b8SJeff Kirsher spin_unlock(&ei_local->page_lock);
371644570b8SJeff Kirsher enable_irq_lockdep_irqrestore(dev->irq, &flags);
372644570b8SJeff Kirsher dev->stats.tx_errors++;
373644570b8SJeff Kirsher return NETDEV_TX_BUSY;
374644570b8SJeff Kirsher }
375644570b8SJeff Kirsher
376644570b8SJeff Kirsher /*
377644570b8SJeff Kirsher * Okay, now upload the packet and trigger a send if the transmitter
378644570b8SJeff Kirsher * isn't already sending. If it is busy, the interrupt handler will
379644570b8SJeff Kirsher * trigger the send later, upon receiving a Tx done interrupt.
380644570b8SJeff Kirsher */
381644570b8SJeff Kirsher
382644570b8SJeff Kirsher ei_block_output(dev, send_length, data, output_page);
383644570b8SJeff Kirsher
384644570b8SJeff Kirsher if (!ei_local->txing) {
385644570b8SJeff Kirsher ei_local->txing = 1;
386644570b8SJeff Kirsher NS8390_trigger_send(dev, send_length, output_page);
387644570b8SJeff Kirsher if (output_page == ei_local->tx_start_page) {
388644570b8SJeff Kirsher ei_local->tx1 = -1;
389644570b8SJeff Kirsher ei_local->lasttx = -1;
390644570b8SJeff Kirsher } else {
391644570b8SJeff Kirsher ei_local->tx2 = -1;
392644570b8SJeff Kirsher ei_local->lasttx = -2;
393644570b8SJeff Kirsher }
394644570b8SJeff Kirsher } else
395644570b8SJeff Kirsher ei_local->txqueue++;
396644570b8SJeff Kirsher
397644570b8SJeff Kirsher if (ei_local->tx1 && ei_local->tx2)
398644570b8SJeff Kirsher netif_stop_queue(dev);
399644570b8SJeff Kirsher else
400644570b8SJeff Kirsher netif_start_queue(dev);
401644570b8SJeff Kirsher
402644570b8SJeff Kirsher /* Turn 8390 interrupts back on. */
403644570b8SJeff Kirsher ei_local->irqlock = 0;
404644570b8SJeff Kirsher ei_outb_p(ENISR_ALL, e8390_base + EN0_IMR);
405644570b8SJeff Kirsher
406644570b8SJeff Kirsher spin_unlock(&ei_local->page_lock);
407644570b8SJeff Kirsher enable_irq_lockdep_irqrestore(dev->irq, &flags);
408644570b8SJeff Kirsher skb_tx_timestamp(skb);
409e05b3101SEric W. Biederman dev_consume_skb_any(skb);
410644570b8SJeff Kirsher dev->stats.tx_bytes += send_length;
411644570b8SJeff Kirsher
412644570b8SJeff Kirsher return NETDEV_TX_OK;
413644570b8SJeff Kirsher }
414644570b8SJeff Kirsher
415644570b8SJeff Kirsher /**
416644570b8SJeff Kirsher * ei_interrupt - handle the interrupts from an 8390
417644570b8SJeff Kirsher * @irq: interrupt number
418644570b8SJeff Kirsher * @dev_id: a pointer to the net_device
419644570b8SJeff Kirsher *
420644570b8SJeff Kirsher * Handle the ether interface interrupts. We pull packets from
421644570b8SJeff Kirsher * the 8390 via the card specific functions and fire them at the networking
422644570b8SJeff Kirsher * stack. We also handle transmit completions and wake the transmit path if
423644570b8SJeff Kirsher * necessary. We also update the counters and do other housekeeping as
424644570b8SJeff Kirsher * needed.
425644570b8SJeff Kirsher */
426644570b8SJeff Kirsher
__ei_interrupt(int irq,void * dev_id)427644570b8SJeff Kirsher static irqreturn_t __ei_interrupt(int irq, void *dev_id)
428644570b8SJeff Kirsher {
429644570b8SJeff Kirsher struct net_device *dev = dev_id;
430644570b8SJeff Kirsher unsigned long e8390_base = dev->base_addr;
431644570b8SJeff Kirsher int interrupts, nr_serviced = 0;
432644570b8SJeff Kirsher struct ei_device *ei_local = netdev_priv(dev);
433644570b8SJeff Kirsher
434644570b8SJeff Kirsher /*
435644570b8SJeff Kirsher * Protect the irq test too.
436644570b8SJeff Kirsher */
437644570b8SJeff Kirsher
438644570b8SJeff Kirsher spin_lock(&ei_local->page_lock);
439644570b8SJeff Kirsher
440644570b8SJeff Kirsher if (ei_local->irqlock) {
441644570b8SJeff Kirsher /*
442644570b8SJeff Kirsher * This might just be an interrupt for a PCI device sharing
443644570b8SJeff Kirsher * this line
444644570b8SJeff Kirsher */
445644570b8SJeff Kirsher netdev_err(dev, "Interrupted while interrupts are masked! isr=%#2x imr=%#2x\n",
446644570b8SJeff Kirsher ei_inb_p(e8390_base + EN0_ISR),
447644570b8SJeff Kirsher ei_inb_p(e8390_base + EN0_IMR));
448644570b8SJeff Kirsher spin_unlock(&ei_local->page_lock);
449644570b8SJeff Kirsher return IRQ_NONE;
450644570b8SJeff Kirsher }
451644570b8SJeff Kirsher
452644570b8SJeff Kirsher /* Change to page 0 and read the intr status reg. */
453644570b8SJeff Kirsher ei_outb_p(E8390_NODMA+E8390_PAGE0, e8390_base + E8390_CMD);
454c45f812fSMatthew Whitehead netif_dbg(ei_local, intr, dev, "interrupt(isr=%#2.2x)\n",
455644570b8SJeff Kirsher ei_inb_p(e8390_base + EN0_ISR));
456644570b8SJeff Kirsher
457644570b8SJeff Kirsher /* !!Assumption!! -- we stay in page 0. Don't break this. */
458644570b8SJeff Kirsher while ((interrupts = ei_inb_p(e8390_base + EN0_ISR)) != 0 &&
459644570b8SJeff Kirsher ++nr_serviced < MAX_SERVICE) {
460644570b8SJeff Kirsher if (!netif_running(dev)) {
461644570b8SJeff Kirsher netdev_warn(dev, "interrupt from stopped card\n");
462644570b8SJeff Kirsher /* rmk - acknowledge the interrupts */
463644570b8SJeff Kirsher ei_outb_p(interrupts, e8390_base + EN0_ISR);
464644570b8SJeff Kirsher interrupts = 0;
465644570b8SJeff Kirsher break;
466644570b8SJeff Kirsher }
467644570b8SJeff Kirsher if (interrupts & ENISR_OVER)
468644570b8SJeff Kirsher ei_rx_overrun(dev);
469644570b8SJeff Kirsher else if (interrupts & (ENISR_RX+ENISR_RX_ERR)) {
470644570b8SJeff Kirsher /* Got a good (?) packet. */
471644570b8SJeff Kirsher ei_receive(dev);
472644570b8SJeff Kirsher }
473644570b8SJeff Kirsher /* Push the next to-transmit packet through. */
474644570b8SJeff Kirsher if (interrupts & ENISR_TX)
475644570b8SJeff Kirsher ei_tx_intr(dev);
476644570b8SJeff Kirsher else if (interrupts & ENISR_TX_ERR)
477644570b8SJeff Kirsher ei_tx_err(dev);
478644570b8SJeff Kirsher
479644570b8SJeff Kirsher if (interrupts & ENISR_COUNTERS) {
480644570b8SJeff Kirsher dev->stats.rx_frame_errors += ei_inb_p(e8390_base + EN0_COUNTER0);
481644570b8SJeff Kirsher dev->stats.rx_crc_errors += ei_inb_p(e8390_base + EN0_COUNTER1);
482644570b8SJeff Kirsher dev->stats.rx_missed_errors += ei_inb_p(e8390_base + EN0_COUNTER2);
483644570b8SJeff Kirsher ei_outb_p(ENISR_COUNTERS, e8390_base + EN0_ISR); /* Ack intr. */
484644570b8SJeff Kirsher }
485644570b8SJeff Kirsher
486644570b8SJeff Kirsher /* Ignore any RDC interrupts that make it back to here. */
487644570b8SJeff Kirsher if (interrupts & ENISR_RDC)
488644570b8SJeff Kirsher ei_outb_p(ENISR_RDC, e8390_base + EN0_ISR);
489644570b8SJeff Kirsher
490644570b8SJeff Kirsher ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base + E8390_CMD);
491644570b8SJeff Kirsher }
492644570b8SJeff Kirsher
493c45f812fSMatthew Whitehead if (interrupts && (netif_msg_intr(ei_local))) {
494644570b8SJeff Kirsher ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base + E8390_CMD);
495644570b8SJeff Kirsher if (nr_serviced >= MAX_SERVICE) {
496644570b8SJeff Kirsher /* 0xFF is valid for a card removal */
497644570b8SJeff Kirsher if (interrupts != 0xFF)
498644570b8SJeff Kirsher netdev_warn(dev, "Too much work at interrupt, status %#2.2x\n",
499644570b8SJeff Kirsher interrupts);
500644570b8SJeff Kirsher ei_outb_p(ENISR_ALL, e8390_base + EN0_ISR); /* Ack. most intrs. */
501644570b8SJeff Kirsher } else {
502644570b8SJeff Kirsher netdev_warn(dev, "unknown interrupt %#2x\n", interrupts);
503644570b8SJeff Kirsher ei_outb_p(0xff, e8390_base + EN0_ISR); /* Ack. all intrs. */
504644570b8SJeff Kirsher }
505644570b8SJeff Kirsher }
506644570b8SJeff Kirsher spin_unlock(&ei_local->page_lock);
507644570b8SJeff Kirsher return IRQ_RETVAL(nr_serviced > 0);
508644570b8SJeff Kirsher }
509644570b8SJeff Kirsher
510644570b8SJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER
__ei_poll(struct net_device * dev)511644570b8SJeff Kirsher static void __ei_poll(struct net_device *dev)
512644570b8SJeff Kirsher {
513644570b8SJeff Kirsher disable_irq(dev->irq);
514644570b8SJeff Kirsher __ei_interrupt(dev->irq, dev);
515644570b8SJeff Kirsher enable_irq(dev->irq);
516644570b8SJeff Kirsher }
517644570b8SJeff Kirsher #endif
518644570b8SJeff Kirsher
519644570b8SJeff Kirsher /**
520644570b8SJeff Kirsher * ei_tx_err - handle transmitter error
521644570b8SJeff Kirsher * @dev: network device which threw the exception
522644570b8SJeff Kirsher *
523644570b8SJeff Kirsher * A transmitter error has happened. Most likely excess collisions (which
524644570b8SJeff Kirsher * is a fairly normal condition). If the error is one where the Tx will
525644570b8SJeff Kirsher * have been aborted, we try and send another one right away, instead of
526644570b8SJeff Kirsher * letting the failed packet sit and collect dust in the Tx buffer. This
527644570b8SJeff Kirsher * is a much better solution as it avoids kernel based Tx timeouts, and
528644570b8SJeff Kirsher * an unnecessary card reset.
529644570b8SJeff Kirsher *
530644570b8SJeff Kirsher * Called with lock held.
531644570b8SJeff Kirsher */
532644570b8SJeff Kirsher
ei_tx_err(struct net_device * dev)533644570b8SJeff Kirsher static void ei_tx_err(struct net_device *dev)
534644570b8SJeff Kirsher {
535644570b8SJeff Kirsher unsigned long e8390_base = dev->base_addr;
536644570b8SJeff Kirsher /* ei_local is used on some platforms via the EI_SHIFT macro */
537644570b8SJeff Kirsher struct ei_device *ei_local __maybe_unused = netdev_priv(dev);
538644570b8SJeff Kirsher unsigned char txsr = ei_inb_p(e8390_base+EN0_TSR);
539644570b8SJeff Kirsher unsigned char tx_was_aborted = txsr & (ENTSR_ABT+ENTSR_FU);
540644570b8SJeff Kirsher
541644570b8SJeff Kirsher #ifdef VERBOSE_ERROR_DUMP
542644570b8SJeff Kirsher netdev_dbg(dev, "transmitter error (%#2x):", txsr);
543644570b8SJeff Kirsher if (txsr & ENTSR_ABT)
544644570b8SJeff Kirsher pr_cont(" excess-collisions ");
545644570b8SJeff Kirsher if (txsr & ENTSR_ND)
546644570b8SJeff Kirsher pr_cont(" non-deferral ");
547644570b8SJeff Kirsher if (txsr & ENTSR_CRS)
548644570b8SJeff Kirsher pr_cont(" lost-carrier ");
549644570b8SJeff Kirsher if (txsr & ENTSR_FU)
550644570b8SJeff Kirsher pr_cont(" FIFO-underrun ");
551644570b8SJeff Kirsher if (txsr & ENTSR_CDH)
552644570b8SJeff Kirsher pr_cont(" lost-heartbeat ");
553644570b8SJeff Kirsher pr_cont("\n");
554644570b8SJeff Kirsher #endif
555644570b8SJeff Kirsher
556644570b8SJeff Kirsher ei_outb_p(ENISR_TX_ERR, e8390_base + EN0_ISR); /* Ack intr. */
557644570b8SJeff Kirsher
558644570b8SJeff Kirsher if (tx_was_aborted)
559644570b8SJeff Kirsher ei_tx_intr(dev);
560644570b8SJeff Kirsher else {
561644570b8SJeff Kirsher dev->stats.tx_errors++;
562644570b8SJeff Kirsher if (txsr & ENTSR_CRS)
563644570b8SJeff Kirsher dev->stats.tx_carrier_errors++;
564644570b8SJeff Kirsher if (txsr & ENTSR_CDH)
565644570b8SJeff Kirsher dev->stats.tx_heartbeat_errors++;
566644570b8SJeff Kirsher if (txsr & ENTSR_OWC)
567644570b8SJeff Kirsher dev->stats.tx_window_errors++;
568644570b8SJeff Kirsher }
569644570b8SJeff Kirsher }
570644570b8SJeff Kirsher
571644570b8SJeff Kirsher /**
572644570b8SJeff Kirsher * ei_tx_intr - transmit interrupt handler
573644570b8SJeff Kirsher * @dev: network device for which tx intr is handled
574644570b8SJeff Kirsher *
575644570b8SJeff Kirsher * We have finished a transmit: check for errors and then trigger the next
576644570b8SJeff Kirsher * packet to be sent. Called with lock held.
577644570b8SJeff Kirsher */
578644570b8SJeff Kirsher
ei_tx_intr(struct net_device * dev)579644570b8SJeff Kirsher static void ei_tx_intr(struct net_device *dev)
580644570b8SJeff Kirsher {
581644570b8SJeff Kirsher unsigned long e8390_base = dev->base_addr;
582644570b8SJeff Kirsher struct ei_device *ei_local = netdev_priv(dev);
583644570b8SJeff Kirsher int status = ei_inb(e8390_base + EN0_TSR);
584644570b8SJeff Kirsher
585644570b8SJeff Kirsher ei_outb_p(ENISR_TX, e8390_base + EN0_ISR); /* Ack intr. */
586644570b8SJeff Kirsher
587644570b8SJeff Kirsher /*
588644570b8SJeff Kirsher * There are two Tx buffers, see which one finished, and trigger
589644570b8SJeff Kirsher * the send of another one if it exists.
590644570b8SJeff Kirsher */
591644570b8SJeff Kirsher ei_local->txqueue--;
592644570b8SJeff Kirsher
593644570b8SJeff Kirsher if (ei_local->tx1 < 0) {
594644570b8SJeff Kirsher if (ei_local->lasttx != 1 && ei_local->lasttx != -1)
595644570b8SJeff Kirsher pr_err("%s: bogus last_tx_buffer %d, tx1=%d\n",
596644570b8SJeff Kirsher ei_local->name, ei_local->lasttx, ei_local->tx1);
597644570b8SJeff Kirsher ei_local->tx1 = 0;
598644570b8SJeff Kirsher if (ei_local->tx2 > 0) {
599644570b8SJeff Kirsher ei_local->txing = 1;
600644570b8SJeff Kirsher NS8390_trigger_send(dev, ei_local->tx2, ei_local->tx_start_page + 6);
601860e9538SFlorian Westphal netif_trans_update(dev);
602e7fee115SJoe Perches ei_local->tx2 = -1;
603644570b8SJeff Kirsher ei_local->lasttx = 2;
604e7fee115SJoe Perches } else {
605e7fee115SJoe Perches ei_local->lasttx = 20;
606e7fee115SJoe Perches ei_local->txing = 0;
607e7fee115SJoe Perches }
608644570b8SJeff Kirsher } else if (ei_local->tx2 < 0) {
609644570b8SJeff Kirsher if (ei_local->lasttx != 2 && ei_local->lasttx != -2)
610644570b8SJeff Kirsher pr_err("%s: bogus last_tx_buffer %d, tx2=%d\n",
611644570b8SJeff Kirsher ei_local->name, ei_local->lasttx, ei_local->tx2);
612644570b8SJeff Kirsher ei_local->tx2 = 0;
613644570b8SJeff Kirsher if (ei_local->tx1 > 0) {
614644570b8SJeff Kirsher ei_local->txing = 1;
615644570b8SJeff Kirsher NS8390_trigger_send(dev, ei_local->tx1, ei_local->tx_start_page);
616860e9538SFlorian Westphal netif_trans_update(dev);
617644570b8SJeff Kirsher ei_local->tx1 = -1;
618644570b8SJeff Kirsher ei_local->lasttx = 1;
619e7fee115SJoe Perches } else {
620e7fee115SJoe Perches ei_local->lasttx = 10;
621e7fee115SJoe Perches ei_local->txing = 0;
622e7fee115SJoe Perches }
623644570b8SJeff Kirsher } /* else
624644570b8SJeff Kirsher netdev_warn(dev, "unexpected TX-done interrupt, lasttx=%d\n",
625644570b8SJeff Kirsher ei_local->lasttx);
626644570b8SJeff Kirsher */
627644570b8SJeff Kirsher
628644570b8SJeff Kirsher /* Minimize Tx latency: update the statistics after we restart TXing. */
629644570b8SJeff Kirsher if (status & ENTSR_COL)
630644570b8SJeff Kirsher dev->stats.collisions++;
631644570b8SJeff Kirsher if (status & ENTSR_PTX)
632644570b8SJeff Kirsher dev->stats.tx_packets++;
633644570b8SJeff Kirsher else {
634644570b8SJeff Kirsher dev->stats.tx_errors++;
635644570b8SJeff Kirsher if (status & ENTSR_ABT) {
636644570b8SJeff Kirsher dev->stats.tx_aborted_errors++;
637644570b8SJeff Kirsher dev->stats.collisions += 16;
638644570b8SJeff Kirsher }
639644570b8SJeff Kirsher if (status & ENTSR_CRS)
640644570b8SJeff Kirsher dev->stats.tx_carrier_errors++;
641644570b8SJeff Kirsher if (status & ENTSR_FU)
642644570b8SJeff Kirsher dev->stats.tx_fifo_errors++;
643644570b8SJeff Kirsher if (status & ENTSR_CDH)
644644570b8SJeff Kirsher dev->stats.tx_heartbeat_errors++;
645644570b8SJeff Kirsher if (status & ENTSR_OWC)
646644570b8SJeff Kirsher dev->stats.tx_window_errors++;
647644570b8SJeff Kirsher }
648644570b8SJeff Kirsher netif_wake_queue(dev);
649644570b8SJeff Kirsher }
650644570b8SJeff Kirsher
651644570b8SJeff Kirsher /**
652644570b8SJeff Kirsher * ei_receive - receive some packets
653644570b8SJeff Kirsher * @dev: network device with which receive will be run
654644570b8SJeff Kirsher *
655644570b8SJeff Kirsher * We have a good packet(s), get it/them out of the buffers.
656644570b8SJeff Kirsher * Called with lock held.
657644570b8SJeff Kirsher */
658644570b8SJeff Kirsher
ei_receive(struct net_device * dev)659644570b8SJeff Kirsher static void ei_receive(struct net_device *dev)
660644570b8SJeff Kirsher {
661644570b8SJeff Kirsher unsigned long e8390_base = dev->base_addr;
662644570b8SJeff Kirsher struct ei_device *ei_local = netdev_priv(dev);
663644570b8SJeff Kirsher unsigned char rxing_page, this_frame, next_frame;
664644570b8SJeff Kirsher unsigned short current_offset;
665644570b8SJeff Kirsher int rx_pkt_count = 0;
666644570b8SJeff Kirsher struct e8390_pkt_hdr rx_frame;
667644570b8SJeff Kirsher int num_rx_pages = ei_local->stop_page-ei_local->rx_start_page;
668644570b8SJeff Kirsher
669644570b8SJeff Kirsher while (++rx_pkt_count < 10) {
670644570b8SJeff Kirsher int pkt_len, pkt_stat;
671644570b8SJeff Kirsher
672644570b8SJeff Kirsher /* Get the rx page (incoming packet pointer). */
673644570b8SJeff Kirsher ei_outb_p(E8390_NODMA+E8390_PAGE1, e8390_base + E8390_CMD);
674644570b8SJeff Kirsher rxing_page = ei_inb_p(e8390_base + EN1_CURPAG);
675644570b8SJeff Kirsher ei_outb_p(E8390_NODMA+E8390_PAGE0, e8390_base + E8390_CMD);
676644570b8SJeff Kirsher
677644570b8SJeff Kirsher /* Remove one frame from the ring. Boundary is always a page behind. */
678644570b8SJeff Kirsher this_frame = ei_inb_p(e8390_base + EN0_BOUNDARY) + 1;
679644570b8SJeff Kirsher if (this_frame >= ei_local->stop_page)
680644570b8SJeff Kirsher this_frame = ei_local->rx_start_page;
681644570b8SJeff Kirsher
682644570b8SJeff Kirsher /* Someday we'll omit the previous, iff we never get this message.
683644570b8SJeff Kirsher (There is at least one clone claimed to have a problem.)
684644570b8SJeff Kirsher
685644570b8SJeff Kirsher Keep quiet if it looks like a card removal. One problem here
686644570b8SJeff Kirsher is that some clones crash in roughly the same way.
687644570b8SJeff Kirsher */
688c45f812fSMatthew Whitehead if ((netif_msg_rx_status(ei_local)) &&
689644570b8SJeff Kirsher this_frame != ei_local->current_page &&
690644570b8SJeff Kirsher (this_frame != 0x0 || rxing_page != 0xFF))
691c45f812fSMatthew Whitehead netdev_err(dev,
692c45f812fSMatthew Whitehead "mismatched read page pointers %2x vs %2x\n",
693644570b8SJeff Kirsher this_frame, ei_local->current_page);
694644570b8SJeff Kirsher
695644570b8SJeff Kirsher if (this_frame == rxing_page) /* Read all the frames? */
696644570b8SJeff Kirsher break; /* Done for now */
697644570b8SJeff Kirsher
698644570b8SJeff Kirsher current_offset = this_frame << 8;
699644570b8SJeff Kirsher ei_get_8390_hdr(dev, &rx_frame, this_frame);
700644570b8SJeff Kirsher
701644570b8SJeff Kirsher pkt_len = rx_frame.count - sizeof(struct e8390_pkt_hdr);
702644570b8SJeff Kirsher pkt_stat = rx_frame.status;
703644570b8SJeff Kirsher
704644570b8SJeff Kirsher next_frame = this_frame + 1 + ((pkt_len+4)>>8);
705644570b8SJeff Kirsher
706644570b8SJeff Kirsher /* Check for bogosity warned by 3c503 book: the status byte is never
707644570b8SJeff Kirsher written. This happened a lot during testing! This code should be
708644570b8SJeff Kirsher cleaned up someday. */
709644570b8SJeff Kirsher if (rx_frame.next != next_frame &&
710644570b8SJeff Kirsher rx_frame.next != next_frame + 1 &&
711644570b8SJeff Kirsher rx_frame.next != next_frame - num_rx_pages &&
712644570b8SJeff Kirsher rx_frame.next != next_frame + 1 - num_rx_pages) {
713644570b8SJeff Kirsher ei_local->current_page = rxing_page;
714644570b8SJeff Kirsher ei_outb(ei_local->current_page-1, e8390_base+EN0_BOUNDARY);
715644570b8SJeff Kirsher dev->stats.rx_errors++;
716644570b8SJeff Kirsher continue;
717644570b8SJeff Kirsher }
718644570b8SJeff Kirsher
719644570b8SJeff Kirsher if (pkt_len < 60 || pkt_len > 1518) {
720c45f812fSMatthew Whitehead netif_dbg(ei_local, rx_status, dev,
721c45f812fSMatthew Whitehead "bogus packet size: %d, status=%#2x nxpg=%#2x\n",
722644570b8SJeff Kirsher rx_frame.count, rx_frame.status,
723644570b8SJeff Kirsher rx_frame.next);
724644570b8SJeff Kirsher dev->stats.rx_errors++;
725644570b8SJeff Kirsher dev->stats.rx_length_errors++;
726644570b8SJeff Kirsher } else if ((pkt_stat & 0x0F) == ENRSR_RXOK) {
727644570b8SJeff Kirsher struct sk_buff *skb;
728644570b8SJeff Kirsher
7291d266430SPradeep A Dalvi skb = netdev_alloc_skb(dev, pkt_len + 2);
730644570b8SJeff Kirsher if (skb == NULL) {
731c45f812fSMatthew Whitehead netif_err(ei_local, rx_err, dev,
732c45f812fSMatthew Whitehead "Couldn't allocate a sk_buff of size %d\n",
733644570b8SJeff Kirsher pkt_len);
734644570b8SJeff Kirsher dev->stats.rx_dropped++;
735644570b8SJeff Kirsher break;
736644570b8SJeff Kirsher } else {
737644570b8SJeff Kirsher skb_reserve(skb, 2); /* IP headers on 16 byte boundaries */
738644570b8SJeff Kirsher skb_put(skb, pkt_len); /* Make room */
739644570b8SJeff Kirsher ei_block_input(dev, pkt_len, skb, current_offset + sizeof(rx_frame));
740644570b8SJeff Kirsher skb->protocol = eth_type_trans(skb, dev);
741644570b8SJeff Kirsher if (!skb_defer_rx_timestamp(skb))
742644570b8SJeff Kirsher netif_rx(skb);
743644570b8SJeff Kirsher dev->stats.rx_packets++;
744644570b8SJeff Kirsher dev->stats.rx_bytes += pkt_len;
745644570b8SJeff Kirsher if (pkt_stat & ENRSR_PHY)
746644570b8SJeff Kirsher dev->stats.multicast++;
747644570b8SJeff Kirsher }
748644570b8SJeff Kirsher } else {
749c45f812fSMatthew Whitehead netif_err(ei_local, rx_err, dev,
750c45f812fSMatthew Whitehead "bogus packet: status=%#2x nxpg=%#2x size=%d\n",
751644570b8SJeff Kirsher rx_frame.status, rx_frame.next,
752644570b8SJeff Kirsher rx_frame.count);
753644570b8SJeff Kirsher dev->stats.rx_errors++;
754644570b8SJeff Kirsher /* NB: The NIC counts CRC, frame and missed errors. */
755644570b8SJeff Kirsher if (pkt_stat & ENRSR_FO)
756644570b8SJeff Kirsher dev->stats.rx_fifo_errors++;
757644570b8SJeff Kirsher }
758644570b8SJeff Kirsher next_frame = rx_frame.next;
759644570b8SJeff Kirsher
760644570b8SJeff Kirsher /* This _should_ never happen: it's here for avoiding bad clones. */
761644570b8SJeff Kirsher if (next_frame >= ei_local->stop_page) {
762644570b8SJeff Kirsher netdev_notice(dev, "next frame inconsistency, %#2x\n",
763644570b8SJeff Kirsher next_frame);
764644570b8SJeff Kirsher next_frame = ei_local->rx_start_page;
765644570b8SJeff Kirsher }
766644570b8SJeff Kirsher ei_local->current_page = next_frame;
767644570b8SJeff Kirsher ei_outb_p(next_frame-1, e8390_base+EN0_BOUNDARY);
768644570b8SJeff Kirsher }
769644570b8SJeff Kirsher
770644570b8SJeff Kirsher /* We used to also ack ENISR_OVER here, but that would sometimes mask
771644570b8SJeff Kirsher a real overrun, leaving the 8390 in a stopped state with rec'vr off. */
772644570b8SJeff Kirsher ei_outb_p(ENISR_RX+ENISR_RX_ERR, e8390_base+EN0_ISR);
773644570b8SJeff Kirsher }
774644570b8SJeff Kirsher
775644570b8SJeff Kirsher /**
776644570b8SJeff Kirsher * ei_rx_overrun - handle receiver overrun
777644570b8SJeff Kirsher * @dev: network device which threw exception
778644570b8SJeff Kirsher *
779644570b8SJeff Kirsher * We have a receiver overrun: we have to kick the 8390 to get it started
780644570b8SJeff Kirsher * again. Problem is that you have to kick it exactly as NS prescribes in
781644570b8SJeff Kirsher * the updated datasheets, or "the NIC may act in an unpredictable manner."
782644570b8SJeff Kirsher * This includes causing "the NIC to defer indefinitely when it is stopped
783644570b8SJeff Kirsher * on a busy network." Ugh.
784644570b8SJeff Kirsher * Called with lock held. Don't call this with the interrupts off or your
785644570b8SJeff Kirsher * computer will hate you - it takes 10ms or so.
786644570b8SJeff Kirsher */
787644570b8SJeff Kirsher
ei_rx_overrun(struct net_device * dev)788644570b8SJeff Kirsher static void ei_rx_overrun(struct net_device *dev)
789644570b8SJeff Kirsher {
790644570b8SJeff Kirsher unsigned long e8390_base = dev->base_addr;
791644570b8SJeff Kirsher unsigned char was_txing, must_resend = 0;
792644570b8SJeff Kirsher /* ei_local is used on some platforms via the EI_SHIFT macro */
793644570b8SJeff Kirsher struct ei_device *ei_local __maybe_unused = netdev_priv(dev);
794644570b8SJeff Kirsher
795644570b8SJeff Kirsher /*
796644570b8SJeff Kirsher * Record whether a Tx was in progress and then issue the
797644570b8SJeff Kirsher * stop command.
798644570b8SJeff Kirsher */
799644570b8SJeff Kirsher was_txing = ei_inb_p(e8390_base+E8390_CMD) & E8390_TRANS;
800644570b8SJeff Kirsher ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD);
801644570b8SJeff Kirsher
802c45f812fSMatthew Whitehead netif_dbg(ei_local, rx_err, dev, "Receiver overrun\n");
803644570b8SJeff Kirsher dev->stats.rx_over_errors++;
804644570b8SJeff Kirsher
805644570b8SJeff Kirsher /*
806644570b8SJeff Kirsher * Wait a full Tx time (1.2ms) + some guard time, NS says 1.6ms total.
807644570b8SJeff Kirsher * Early datasheets said to poll the reset bit, but now they say that
808644570b8SJeff Kirsher * it "is not a reliable indicator and subsequently should be ignored."
809644570b8SJeff Kirsher * We wait at least 10ms.
810644570b8SJeff Kirsher */
811644570b8SJeff Kirsher
812644570b8SJeff Kirsher mdelay(10);
813644570b8SJeff Kirsher
814644570b8SJeff Kirsher /*
815644570b8SJeff Kirsher * Reset RBCR[01] back to zero as per magic incantation.
816644570b8SJeff Kirsher */
817644570b8SJeff Kirsher ei_outb_p(0x00, e8390_base+EN0_RCNTLO);
818644570b8SJeff Kirsher ei_outb_p(0x00, e8390_base+EN0_RCNTHI);
819644570b8SJeff Kirsher
820644570b8SJeff Kirsher /*
821644570b8SJeff Kirsher * See if any Tx was interrupted or not. According to NS, this
822644570b8SJeff Kirsher * step is vital, and skipping it will cause no end of havoc.
823644570b8SJeff Kirsher */
824644570b8SJeff Kirsher
825644570b8SJeff Kirsher if (was_txing) {
826644570b8SJeff Kirsher unsigned char tx_completed = ei_inb_p(e8390_base+EN0_ISR) & (ENISR_TX+ENISR_TX_ERR);
827644570b8SJeff Kirsher if (!tx_completed)
828644570b8SJeff Kirsher must_resend = 1;
829644570b8SJeff Kirsher }
830644570b8SJeff Kirsher
831644570b8SJeff Kirsher /*
832644570b8SJeff Kirsher * Have to enter loopback mode and then restart the NIC before
833644570b8SJeff Kirsher * you are allowed to slurp packets up off the ring.
834644570b8SJeff Kirsher */
835644570b8SJeff Kirsher ei_outb_p(E8390_TXOFF, e8390_base + EN0_TXCR);
836644570b8SJeff Kirsher ei_outb_p(E8390_NODMA + E8390_PAGE0 + E8390_START, e8390_base + E8390_CMD);
837644570b8SJeff Kirsher
838644570b8SJeff Kirsher /*
839644570b8SJeff Kirsher * Clear the Rx ring of all the debris, and ack the interrupt.
840644570b8SJeff Kirsher */
841644570b8SJeff Kirsher ei_receive(dev);
842644570b8SJeff Kirsher ei_outb_p(ENISR_OVER, e8390_base+EN0_ISR);
843644570b8SJeff Kirsher
844644570b8SJeff Kirsher /*
845644570b8SJeff Kirsher * Leave loopback mode, and resend any packet that got stopped.
846644570b8SJeff Kirsher */
847644570b8SJeff Kirsher ei_outb_p(E8390_TXCONFIG, e8390_base + EN0_TXCR);
848644570b8SJeff Kirsher if (must_resend)
849644570b8SJeff Kirsher ei_outb_p(E8390_NODMA + E8390_PAGE0 + E8390_START + E8390_TRANS, e8390_base + E8390_CMD);
850644570b8SJeff Kirsher }
851644570b8SJeff Kirsher
852644570b8SJeff Kirsher /*
853644570b8SJeff Kirsher * Collect the stats. This is called unlocked and from several contexts.
854644570b8SJeff Kirsher */
855644570b8SJeff Kirsher
__ei_get_stats(struct net_device * dev)856644570b8SJeff Kirsher static struct net_device_stats *__ei_get_stats(struct net_device *dev)
857644570b8SJeff Kirsher {
858644570b8SJeff Kirsher unsigned long ioaddr = dev->base_addr;
859644570b8SJeff Kirsher struct ei_device *ei_local = netdev_priv(dev);
860644570b8SJeff Kirsher unsigned long flags;
861644570b8SJeff Kirsher
862644570b8SJeff Kirsher /* If the card is stopped, just return the present stats. */
863644570b8SJeff Kirsher if (!netif_running(dev))
864644570b8SJeff Kirsher return &dev->stats;
865644570b8SJeff Kirsher
866644570b8SJeff Kirsher spin_lock_irqsave(&ei_local->page_lock, flags);
867644570b8SJeff Kirsher /* Read the counter registers, assuming we are in page 0. */
868644570b8SJeff Kirsher dev->stats.rx_frame_errors += ei_inb_p(ioaddr + EN0_COUNTER0);
869644570b8SJeff Kirsher dev->stats.rx_crc_errors += ei_inb_p(ioaddr + EN0_COUNTER1);
870644570b8SJeff Kirsher dev->stats.rx_missed_errors += ei_inb_p(ioaddr + EN0_COUNTER2);
871644570b8SJeff Kirsher spin_unlock_irqrestore(&ei_local->page_lock, flags);
872644570b8SJeff Kirsher
873644570b8SJeff Kirsher return &dev->stats;
874644570b8SJeff Kirsher }
875644570b8SJeff Kirsher
876644570b8SJeff Kirsher /*
877644570b8SJeff Kirsher * Form the 64 bit 8390 multicast table from the linked list of addresses
878644570b8SJeff Kirsher * associated with this dev structure.
879644570b8SJeff Kirsher */
880644570b8SJeff Kirsher
make_mc_bits(u8 * bits,struct net_device * dev)881644570b8SJeff Kirsher static inline void make_mc_bits(u8 *bits, struct net_device *dev)
882644570b8SJeff Kirsher {
883644570b8SJeff Kirsher struct netdev_hw_addr *ha;
884644570b8SJeff Kirsher
885644570b8SJeff Kirsher netdev_for_each_mc_addr(ha, dev) {
886644570b8SJeff Kirsher u32 crc = ether_crc(ETH_ALEN, ha->addr);
887644570b8SJeff Kirsher /*
888644570b8SJeff Kirsher * The 8390 uses the 6 most significant bits of the
889644570b8SJeff Kirsher * CRC to index the multicast table.
890644570b8SJeff Kirsher */
891644570b8SJeff Kirsher bits[crc>>29] |= (1<<((crc>>26)&7));
892644570b8SJeff Kirsher }
893644570b8SJeff Kirsher }
894644570b8SJeff Kirsher
895644570b8SJeff Kirsher /**
896644570b8SJeff Kirsher * do_set_multicast_list - set/clear multicast filter
897644570b8SJeff Kirsher * @dev: net device for which multicast filter is adjusted
898644570b8SJeff Kirsher *
899644570b8SJeff Kirsher * Set or clear the multicast filter for this adaptor. May be called
900644570b8SJeff Kirsher * from a BH in 2.1.x. Must be called with lock held.
901644570b8SJeff Kirsher */
902644570b8SJeff Kirsher
do_set_multicast_list(struct net_device * dev)903644570b8SJeff Kirsher static void do_set_multicast_list(struct net_device *dev)
904644570b8SJeff Kirsher {
905644570b8SJeff Kirsher unsigned long e8390_base = dev->base_addr;
906644570b8SJeff Kirsher int i;
907644570b8SJeff Kirsher struct ei_device *ei_local = netdev_priv(dev);
908644570b8SJeff Kirsher
909644570b8SJeff Kirsher if (!(dev->flags&(IFF_PROMISC|IFF_ALLMULTI))) {
910644570b8SJeff Kirsher memset(ei_local->mcfilter, 0, 8);
911644570b8SJeff Kirsher if (!netdev_mc_empty(dev))
912644570b8SJeff Kirsher make_mc_bits(ei_local->mcfilter, dev);
913644570b8SJeff Kirsher } else
914644570b8SJeff Kirsher memset(ei_local->mcfilter, 0xFF, 8); /* mcast set to accept-all */
915644570b8SJeff Kirsher
916644570b8SJeff Kirsher /*
917644570b8SJeff Kirsher * DP8390 manuals don't specify any magic sequence for altering
918644570b8SJeff Kirsher * the multicast regs on an already running card. To be safe, we
919644570b8SJeff Kirsher * ensure multicast mode is off prior to loading up the new hash
920644570b8SJeff Kirsher * table. If this proves to be not enough, we can always resort
921644570b8SJeff Kirsher * to stopping the NIC, loading the table and then restarting.
922644570b8SJeff Kirsher *
923644570b8SJeff Kirsher * Bug Alert! The MC regs on the SMC 83C690 (SMC Elite and SMC
924644570b8SJeff Kirsher * Elite16) appear to be write-only. The NS 8390 data sheet lists
925644570b8SJeff Kirsher * them as r/w so this is a bug. The SMC 83C790 (SMC Ultra and
926644570b8SJeff Kirsher * Ultra32 EISA) appears to have this bug fixed.
927644570b8SJeff Kirsher */
928644570b8SJeff Kirsher
929644570b8SJeff Kirsher if (netif_running(dev))
930644570b8SJeff Kirsher ei_outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR);
931644570b8SJeff Kirsher ei_outb_p(E8390_NODMA + E8390_PAGE1, e8390_base + E8390_CMD);
932644570b8SJeff Kirsher for (i = 0; i < 8; i++) {
933644570b8SJeff Kirsher ei_outb_p(ei_local->mcfilter[i], e8390_base + EN1_MULT_SHIFT(i));
934644570b8SJeff Kirsher #ifndef BUG_83C690
935644570b8SJeff Kirsher if (ei_inb_p(e8390_base + EN1_MULT_SHIFT(i)) != ei_local->mcfilter[i])
936644570b8SJeff Kirsher netdev_err(dev, "Multicast filter read/write mismap %d\n",
937644570b8SJeff Kirsher i);
938644570b8SJeff Kirsher #endif
939644570b8SJeff Kirsher }
940644570b8SJeff Kirsher ei_outb_p(E8390_NODMA + E8390_PAGE0, e8390_base + E8390_CMD);
941644570b8SJeff Kirsher
942644570b8SJeff Kirsher if (dev->flags&IFF_PROMISC)
943644570b8SJeff Kirsher ei_outb_p(E8390_RXCONFIG | 0x18, e8390_base + EN0_RXCR);
944644570b8SJeff Kirsher else if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev))
945644570b8SJeff Kirsher ei_outb_p(E8390_RXCONFIG | 0x08, e8390_base + EN0_RXCR);
946644570b8SJeff Kirsher else
947644570b8SJeff Kirsher ei_outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR);
948644570b8SJeff Kirsher }
949644570b8SJeff Kirsher
950644570b8SJeff Kirsher /*
951644570b8SJeff Kirsher * Called without lock held. This is invoked from user context and may
952644570b8SJeff Kirsher * be parallel to just about everything else. Its also fairly quick and
953644570b8SJeff Kirsher * not called too often. Must protect against both bh and irq users
954644570b8SJeff Kirsher */
955644570b8SJeff Kirsher
__ei_set_multicast_list(struct net_device * dev)956644570b8SJeff Kirsher static void __ei_set_multicast_list(struct net_device *dev)
957644570b8SJeff Kirsher {
958644570b8SJeff Kirsher unsigned long flags;
959644570b8SJeff Kirsher struct ei_device *ei_local = netdev_priv(dev);
960644570b8SJeff Kirsher
961644570b8SJeff Kirsher spin_lock_irqsave(&ei_local->page_lock, flags);
962644570b8SJeff Kirsher do_set_multicast_list(dev);
963644570b8SJeff Kirsher spin_unlock_irqrestore(&ei_local->page_lock, flags);
964644570b8SJeff Kirsher }
965644570b8SJeff Kirsher
966644570b8SJeff Kirsher /**
967644570b8SJeff Kirsher * ethdev_setup - init rest of 8390 device struct
968644570b8SJeff Kirsher * @dev: network device structure to init
969644570b8SJeff Kirsher *
970644570b8SJeff Kirsher * Initialize the rest of the 8390 device structure. Do NOT __init
971644570b8SJeff Kirsher * this, as it is used by 8390 based modular drivers too.
972644570b8SJeff Kirsher */
973644570b8SJeff Kirsher
ethdev_setup(struct net_device * dev)974644570b8SJeff Kirsher static void ethdev_setup(struct net_device *dev)
975644570b8SJeff Kirsher {
976644570b8SJeff Kirsher struct ei_device *ei_local = netdev_priv(dev);
977c45f812fSMatthew Whitehead
978644570b8SJeff Kirsher ether_setup(dev);
979644570b8SJeff Kirsher
980644570b8SJeff Kirsher spin_lock_init(&ei_local->page_lock);
981646fe03bSFinn Thain
982360f8987SArmin Wolf ei_local->msg_enable = netif_msg_init(msg_enable, default_msg_level);
983360f8987SArmin Wolf
984360f8987SArmin Wolf if (netif_msg_drv(ei_local) && (version_printed++ == 0))
985360f8987SArmin Wolf pr_info("%s", version);
986644570b8SJeff Kirsher }
987644570b8SJeff Kirsher
988644570b8SJeff Kirsher /**
989644570b8SJeff Kirsher * alloc_ei_netdev - alloc_etherdev counterpart for 8390
990644570b8SJeff Kirsher * @size: extra bytes to allocate
991644570b8SJeff Kirsher *
992644570b8SJeff Kirsher * Allocate 8390-specific net_device.
993644570b8SJeff Kirsher */
____alloc_ei_netdev(int size)994644570b8SJeff Kirsher static struct net_device *____alloc_ei_netdev(int size)
995644570b8SJeff Kirsher {
996644570b8SJeff Kirsher return alloc_netdev(sizeof(struct ei_device) + size, "eth%d",
997c835a677STom Gundersen NET_NAME_UNKNOWN, ethdev_setup);
998644570b8SJeff Kirsher }
999644570b8SJeff Kirsher
1000644570b8SJeff Kirsher
1001644570b8SJeff Kirsher
1002644570b8SJeff Kirsher
1003644570b8SJeff Kirsher /* This page of functions should be 8390 generic */
1004644570b8SJeff Kirsher /* Follow National Semi's recommendations for initializing the "NIC". */
1005644570b8SJeff Kirsher
1006644570b8SJeff Kirsher /**
1007644570b8SJeff Kirsher * NS8390_init - initialize 8390 hardware
1008644570b8SJeff Kirsher * @dev: network device to initialize
1009644570b8SJeff Kirsher * @startp: boolean. non-zero value to initiate chip processing
1010644570b8SJeff Kirsher *
1011644570b8SJeff Kirsher * Must be called with lock held.
1012644570b8SJeff Kirsher */
1013644570b8SJeff Kirsher
__NS8390_init(struct net_device * dev,int startp)1014644570b8SJeff Kirsher static void __NS8390_init(struct net_device *dev, int startp)
1015644570b8SJeff Kirsher {
1016644570b8SJeff Kirsher unsigned long e8390_base = dev->base_addr;
1017644570b8SJeff Kirsher struct ei_device *ei_local = netdev_priv(dev);
1018644570b8SJeff Kirsher int i;
1019644570b8SJeff Kirsher int endcfg = ei_local->word16
1020644570b8SJeff Kirsher ? (0x48 | ENDCFG_WTS | (ei_local->bigendian ? ENDCFG_BOS : 0))
1021644570b8SJeff Kirsher : 0x48;
1022644570b8SJeff Kirsher
10232b2706aaSArmin Wolf BUILD_BUG_ON(sizeof(struct e8390_pkt_hdr) != 4);
1024644570b8SJeff Kirsher /* Follow National Semi's recommendations for initing the DP83902. */
1025644570b8SJeff Kirsher ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD); /* 0x21 */
1026644570b8SJeff Kirsher ei_outb_p(endcfg, e8390_base + EN0_DCFG); /* 0x48 or 0x49 */
1027644570b8SJeff Kirsher /* Clear the remote byte count registers. */
1028644570b8SJeff Kirsher ei_outb_p(0x00, e8390_base + EN0_RCNTLO);
1029644570b8SJeff Kirsher ei_outb_p(0x00, e8390_base + EN0_RCNTHI);
1030644570b8SJeff Kirsher /* Set to monitor and loopback mode -- this is vital!. */
1031644570b8SJeff Kirsher ei_outb_p(E8390_RXOFF, e8390_base + EN0_RXCR); /* 0x20 */
1032644570b8SJeff Kirsher ei_outb_p(E8390_TXOFF, e8390_base + EN0_TXCR); /* 0x02 */
1033644570b8SJeff Kirsher /* Set the transmit page and receive ring. */
1034644570b8SJeff Kirsher ei_outb_p(ei_local->tx_start_page, e8390_base + EN0_TPSR);
1035644570b8SJeff Kirsher ei_local->tx1 = ei_local->tx2 = 0;
1036644570b8SJeff Kirsher ei_outb_p(ei_local->rx_start_page, e8390_base + EN0_STARTPG);
1037644570b8SJeff Kirsher ei_outb_p(ei_local->stop_page-1, e8390_base + EN0_BOUNDARY); /* 3c503 says 0x3f,NS0x26*/
1038644570b8SJeff Kirsher ei_local->current_page = ei_local->rx_start_page; /* assert boundary+1 */
1039644570b8SJeff Kirsher ei_outb_p(ei_local->stop_page, e8390_base + EN0_STOPPG);
1040644570b8SJeff Kirsher /* Clear the pending interrupts and mask. */
1041644570b8SJeff Kirsher ei_outb_p(0xFF, e8390_base + EN0_ISR);
1042644570b8SJeff Kirsher ei_outb_p(0x00, e8390_base + EN0_IMR);
1043644570b8SJeff Kirsher
1044644570b8SJeff Kirsher /* Copy the station address into the DS8390 registers. */
1045644570b8SJeff Kirsher
1046644570b8SJeff Kirsher ei_outb_p(E8390_NODMA + E8390_PAGE1 + E8390_STOP, e8390_base+E8390_CMD); /* 0x61 */
1047644570b8SJeff Kirsher for (i = 0; i < 6; i++) {
1048644570b8SJeff Kirsher ei_outb_p(dev->dev_addr[i], e8390_base + EN1_PHYS_SHIFT(i));
1049c45f812fSMatthew Whitehead if ((netif_msg_probe(ei_local)) &&
1050644570b8SJeff Kirsher ei_inb_p(e8390_base + EN1_PHYS_SHIFT(i)) != dev->dev_addr[i])
1051c45f812fSMatthew Whitehead netdev_err(dev,
1052c45f812fSMatthew Whitehead "Hw. address read/write mismap %d\n", i);
1053644570b8SJeff Kirsher }
1054644570b8SJeff Kirsher
1055644570b8SJeff Kirsher ei_outb_p(ei_local->rx_start_page, e8390_base + EN1_CURPAG);
1056644570b8SJeff Kirsher ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD);
1057644570b8SJeff Kirsher
1058644570b8SJeff Kirsher ei_local->tx1 = ei_local->tx2 = 0;
1059644570b8SJeff Kirsher ei_local->txing = 0;
1060644570b8SJeff Kirsher
1061644570b8SJeff Kirsher if (startp) {
1062644570b8SJeff Kirsher ei_outb_p(0xff, e8390_base + EN0_ISR);
1063644570b8SJeff Kirsher ei_outb_p(ENISR_ALL, e8390_base + EN0_IMR);
1064644570b8SJeff Kirsher ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base+E8390_CMD);
1065644570b8SJeff Kirsher ei_outb_p(E8390_TXCONFIG, e8390_base + EN0_TXCR); /* xmit on. */
1066644570b8SJeff Kirsher /* 3c503 TechMan says rxconfig only after the NIC is started. */
1067644570b8SJeff Kirsher ei_outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR); /* rx on, */
1068644570b8SJeff Kirsher do_set_multicast_list(dev); /* (re)load the mcast table */
1069644570b8SJeff Kirsher }
1070644570b8SJeff Kirsher }
1071644570b8SJeff Kirsher
1072644570b8SJeff Kirsher /* Trigger a transmit start, assuming the length is valid.
1073644570b8SJeff Kirsher Always called with the page lock held */
1074644570b8SJeff Kirsher
NS8390_trigger_send(struct net_device * dev,unsigned int length,int start_page)1075644570b8SJeff Kirsher static void NS8390_trigger_send(struct net_device *dev, unsigned int length,
1076644570b8SJeff Kirsher int start_page)
1077644570b8SJeff Kirsher {
1078644570b8SJeff Kirsher unsigned long e8390_base = dev->base_addr;
1079644570b8SJeff Kirsher struct ei_device *ei_local __attribute((unused)) = netdev_priv(dev);
1080644570b8SJeff Kirsher
1081644570b8SJeff Kirsher ei_outb_p(E8390_NODMA+E8390_PAGE0, e8390_base+E8390_CMD);
1082644570b8SJeff Kirsher
1083644570b8SJeff Kirsher if (ei_inb_p(e8390_base + E8390_CMD) & E8390_TRANS) {
1084644570b8SJeff Kirsher netdev_warn(dev, "trigger_send() called with the transmitter busy\n");
1085644570b8SJeff Kirsher return;
1086644570b8SJeff Kirsher }
1087644570b8SJeff Kirsher ei_outb_p(length & 0xff, e8390_base + EN0_TCNTLO);
1088644570b8SJeff Kirsher ei_outb_p(length >> 8, e8390_base + EN0_TCNTHI);
1089644570b8SJeff Kirsher ei_outb_p(start_page, e8390_base + EN0_TPSR);
1090644570b8SJeff Kirsher ei_outb_p(E8390_NODMA+E8390_TRANS+E8390_START, e8390_base+E8390_CMD);
1091644570b8SJeff Kirsher }
1092