1 /* de2104x.c: A Linux PCI Ethernet driver for Intel/Digital 21040/1 chips. */
2 /*
3 	Copyright 2001,2003 Jeff Garzik <jgarzik@pobox.com>
4 
5 	Copyright 1994, 1995 Digital Equipment Corporation.	    [de4x5.c]
6 	Written/copyright 1994-2001 by Donald Becker.		    [tulip.c]
7 
8 	This software may be used and distributed according to the terms of
9 	the GNU General Public License (GPL), incorporated herein by reference.
10 	Drivers based on or derived from this code fall under the GPL and must
11 	retain the authorship, copyright and license notice.  This file is not
12 	a complete program and may only be used when the entire operating
13 	system is licensed under the GPL.
14 
15 	See the file COPYING in this distribution for more information.
16 
17 	TODO, in rough priority order:
18 	* Support forcing media type with a module parameter,
19 	  like dl2k.c/sundance.c
20 	* Constants (module parms?) for Rx work limit
21 	* Complete reset on PciErr
22 	* Jumbo frames / dev->change_mtu
23 	* Adjust Rx FIFO threshold and Max Rx DMA burst on Rx FIFO error
24 	* Adjust Tx FIFO threshold and Max Tx DMA burst on Tx FIFO error
25 	* Implement Tx software interrupt mitigation via
26 	  Tx descriptor bit
27 
28  */
29 
30 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31 
32 #define DRV_NAME		"de2104x"
33 #define DRV_RELDATE		"Mar 17, 2004"
34 
35 #include <linux/module.h>
36 #include <linux/kernel.h>
37 #include <linux/netdevice.h>
38 #include <linux/etherdevice.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/pci.h>
42 #include <linux/delay.h>
43 #include <linux/ethtool.h>
44 #include <linux/compiler.h>
45 #include <linux/rtnetlink.h>
46 #include <linux/crc32.h>
47 #include <linux/slab.h>
48 
49 #include <asm/io.h>
50 #include <asm/irq.h>
51 #include <linux/uaccess.h>
52 #include <asm/unaligned.h>
53 
54 MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
55 MODULE_DESCRIPTION("Intel/Digital 21040/1 series PCI Ethernet driver");
56 MODULE_LICENSE("GPL");
57 
58 static int debug = -1;
59 module_param (debug, int, 0);
60 MODULE_PARM_DESC (debug, "de2104x bitmapped message enable number");
61 
62 /* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */
63 #if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \
64         defined(CONFIG_SPARC) || defined(__ia64__) ||		   \
65         defined(__sh__) || defined(__mips__)
66 static int rx_copybreak = 1518;
67 #else
68 static int rx_copybreak = 100;
69 #endif
70 module_param (rx_copybreak, int, 0);
71 MODULE_PARM_DESC (rx_copybreak, "de2104x Breakpoint at which Rx packets are copied");
72 
73 #define DE_DEF_MSG_ENABLE	(NETIF_MSG_DRV		| \
74 				 NETIF_MSG_PROBE 	| \
75 				 NETIF_MSG_LINK		| \
76 				 NETIF_MSG_IFDOWN	| \
77 				 NETIF_MSG_IFUP		| \
78 				 NETIF_MSG_RX_ERR	| \
79 				 NETIF_MSG_TX_ERR)
80 
81 /* Descriptor skip length in 32 bit longwords. */
82 #ifndef CONFIG_DE2104X_DSL
83 #define DSL			0
84 #else
85 #define DSL			CONFIG_DE2104X_DSL
86 #endif
87 
88 #define DE_RX_RING_SIZE		64
89 #define DE_TX_RING_SIZE		64
90 #define DE_RING_BYTES		\
91 		((sizeof(struct de_desc) * DE_RX_RING_SIZE) +	\
92 		(sizeof(struct de_desc) * DE_TX_RING_SIZE))
93 #define NEXT_TX(N)		(((N) + 1) & (DE_TX_RING_SIZE - 1))
94 #define NEXT_RX(N)		(((N) + 1) & (DE_RX_RING_SIZE - 1))
95 #define TX_BUFFS_AVAIL(CP)					\
96 	(((CP)->tx_tail <= (CP)->tx_head) ?			\
97 	  (CP)->tx_tail + (DE_TX_RING_SIZE - 1) - (CP)->tx_head :	\
98 	  (CP)->tx_tail - (CP)->tx_head - 1)
99 
100 #define PKT_BUF_SZ		1536	/* Size of each temporary Rx buffer.*/
101 #define RX_OFFSET		2
102 
103 #define DE_SETUP_SKB		((struct sk_buff *) 1)
104 #define DE_DUMMY_SKB		((struct sk_buff *) 2)
105 #define DE_SETUP_FRAME_WORDS	96
106 #define DE_EEPROM_WORDS		256
107 #define DE_EEPROM_SIZE		(DE_EEPROM_WORDS * sizeof(u16))
108 #define DE_MAX_MEDIA		5
109 
110 #define DE_MEDIA_TP_AUTO	0
111 #define DE_MEDIA_BNC		1
112 #define DE_MEDIA_AUI		2
113 #define DE_MEDIA_TP		3
114 #define DE_MEDIA_TP_FD		4
115 #define DE_MEDIA_INVALID	DE_MAX_MEDIA
116 #define DE_MEDIA_FIRST		0
117 #define DE_MEDIA_LAST		(DE_MAX_MEDIA - 1)
118 #define DE_AUI_BNC		(SUPPORTED_AUI | SUPPORTED_BNC)
119 
120 #define DE_TIMER_LINK		(60 * HZ)
121 #define DE_TIMER_NO_LINK	(5 * HZ)
122 
123 #define DE_NUM_REGS		16
124 #define DE_REGS_SIZE		(DE_NUM_REGS * sizeof(u32))
125 #define DE_REGS_VER		1
126 
127 /* Time in jiffies before concluding the transmitter is hung. */
128 #define TX_TIMEOUT		(6*HZ)
129 
130 /* This is a mysterious value that can be written to CSR11 in the 21040 (only)
131    to support a pre-NWay full-duplex signaling mechanism using short frames.
132    No one knows what it should be, but if left at its default value some
133    10base2(!) packets trigger a full-duplex-request interrupt. */
134 #define FULL_DUPLEX_MAGIC	0x6969
135 
136 enum {
137 	/* NIC registers */
138 	BusMode			= 0x00,
139 	TxPoll			= 0x08,
140 	RxPoll			= 0x10,
141 	RxRingAddr		= 0x18,
142 	TxRingAddr		= 0x20,
143 	MacStatus		= 0x28,
144 	MacMode			= 0x30,
145 	IntrMask		= 0x38,
146 	RxMissed		= 0x40,
147 	ROMCmd			= 0x48,
148 	CSR11			= 0x58,
149 	SIAStatus		= 0x60,
150 	CSR13			= 0x68,
151 	CSR14			= 0x70,
152 	CSR15			= 0x78,
153 	PCIPM			= 0x40,
154 
155 	/* BusMode bits */
156 	CmdReset		= (1 << 0),
157 	CacheAlign16		= 0x00008000,
158 	BurstLen4		= 0x00000400,
159 	DescSkipLen		= (DSL << 2),
160 
161 	/* Rx/TxPoll bits */
162 	NormalTxPoll		= (1 << 0),
163 	NormalRxPoll		= (1 << 0),
164 
165 	/* Tx/Rx descriptor status bits */
166 	DescOwn			= (1 << 31),
167 	RxError			= (1 << 15),
168 	RxErrLong		= (1 << 7),
169 	RxErrCRC		= (1 << 1),
170 	RxErrFIFO		= (1 << 0),
171 	RxErrRunt		= (1 << 11),
172 	RxErrFrame		= (1 << 14),
173 	RingEnd			= (1 << 25),
174 	FirstFrag		= (1 << 29),
175 	LastFrag		= (1 << 30),
176 	TxError			= (1 << 15),
177 	TxFIFOUnder		= (1 << 1),
178 	TxLinkFail		= (1 << 2) | (1 << 10) | (1 << 11),
179 	TxMaxCol		= (1 << 8),
180 	TxOWC			= (1 << 9),
181 	TxJabber		= (1 << 14),
182 	SetupFrame		= (1 << 27),
183 	TxSwInt			= (1 << 31),
184 
185 	/* MacStatus bits */
186 	IntrOK			= (1 << 16),
187 	IntrErr			= (1 << 15),
188 	RxIntr			= (1 << 6),
189 	RxEmpty			= (1 << 7),
190 	TxIntr			= (1 << 0),
191 	TxEmpty			= (1 << 2),
192 	PciErr			= (1 << 13),
193 	TxState			= (1 << 22) | (1 << 21) | (1 << 20),
194 	RxState			= (1 << 19) | (1 << 18) | (1 << 17),
195 	LinkFail		= (1 << 12),
196 	LinkPass		= (1 << 4),
197 	RxStopped		= (1 << 8),
198 	TxStopped		= (1 << 1),
199 
200 	/* MacMode bits */
201 	TxEnable		= (1 << 13),
202 	RxEnable		= (1 << 1),
203 	RxTx			= TxEnable | RxEnable,
204 	FullDuplex		= (1 << 9),
205 	AcceptAllMulticast	= (1 << 7),
206 	AcceptAllPhys		= (1 << 6),
207 	BOCnt			= (1 << 5),
208 	MacModeClear		= (1<<12) | (1<<11) | (1<<10) | (1<<8) | (1<<3) |
209 				  RxTx | BOCnt | AcceptAllPhys | AcceptAllMulticast,
210 
211 	/* ROMCmd bits */
212 	EE_SHIFT_CLK		= 0x02,	/* EEPROM shift clock. */
213 	EE_CS			= 0x01,	/* EEPROM chip select. */
214 	EE_DATA_WRITE		= 0x04,	/* Data from the Tulip to EEPROM. */
215 	EE_WRITE_0		= 0x01,
216 	EE_WRITE_1		= 0x05,
217 	EE_DATA_READ		= 0x08,	/* Data from the EEPROM chip. */
218 	EE_ENB			= (0x4800 | EE_CS),
219 
220 	/* The EEPROM commands include the alway-set leading bit. */
221 	EE_READ_CMD		= 6,
222 
223 	/* RxMissed bits */
224 	RxMissedOver		= (1 << 16),
225 	RxMissedMask		= 0xffff,
226 
227 	/* SROM-related bits */
228 	SROMC0InfoLeaf		= 27,
229 	MediaBlockMask		= 0x3f,
230 	MediaCustomCSRs		= (1 << 6),
231 
232 	/* PCIPM bits */
233 	PM_Sleep		= (1 << 31),
234 	PM_Snooze		= (1 << 30),
235 	PM_Mask			= PM_Sleep | PM_Snooze,
236 
237 	/* SIAStatus bits */
238 	NWayState		= (1 << 14) | (1 << 13) | (1 << 12),
239 	NWayRestart		= (1 << 12),
240 	NonselPortActive	= (1 << 9),
241 	SelPortActive		= (1 << 8),
242 	LinkFailStatus		= (1 << 2),
243 	NetCxnErr		= (1 << 1),
244 };
245 
246 static const u32 de_intr_mask =
247 	IntrOK | IntrErr | RxIntr | RxEmpty | TxIntr | TxEmpty |
248 	LinkPass | LinkFail | PciErr;
249 
250 /*
251  * Set the programmable burst length to 4 longwords for all:
252  * DMA errors result without these values. Cache align 16 long.
253  */
254 static const u32 de_bus_mode = CacheAlign16 | BurstLen4 | DescSkipLen;
255 
256 struct de_srom_media_block {
257 	u8			opts;
258 	u16			csr13;
259 	u16			csr14;
260 	u16			csr15;
261 } __packed;
262 
263 struct de_srom_info_leaf {
264 	u16			default_media;
265 	u8			n_blocks;
266 	u8			unused;
267 } __packed;
268 
269 struct de_desc {
270 	__le32			opts1;
271 	__le32			opts2;
272 	__le32			addr1;
273 	__le32			addr2;
274 #if DSL
275 	__le32			skip[DSL];
276 #endif
277 };
278 
279 struct media_info {
280 	u16			type;	/* DE_MEDIA_xxx */
281 	u16			csr13;
282 	u16			csr14;
283 	u16			csr15;
284 };
285 
286 struct ring_info {
287 	struct sk_buff		*skb;
288 	dma_addr_t		mapping;
289 };
290 
291 struct de_private {
292 	unsigned		tx_head;
293 	unsigned		tx_tail;
294 	unsigned		rx_tail;
295 
296 	void			__iomem *regs;
297 	struct net_device	*dev;
298 	spinlock_t		lock;
299 
300 	struct de_desc		*rx_ring;
301 	struct de_desc		*tx_ring;
302 	struct ring_info	tx_skb[DE_TX_RING_SIZE];
303 	struct ring_info	rx_skb[DE_RX_RING_SIZE];
304 	unsigned		rx_buf_sz;
305 	dma_addr_t		ring_dma;
306 
307 	u32			msg_enable;
308 
309 	struct pci_dev		*pdev;
310 
311 	u16			setup_frame[DE_SETUP_FRAME_WORDS];
312 
313 	u32			media_type;
314 	u32			media_supported;
315 	u32			media_advertise;
316 	struct media_info	media[DE_MAX_MEDIA];
317 	struct timer_list	media_timer;
318 
319 	u8			*ee_data;
320 	unsigned		board_idx;
321 	unsigned		de21040 : 1;
322 	unsigned		media_lock : 1;
323 };
324 
325 
326 static void de_set_rx_mode (struct net_device *dev);
327 static void de_tx (struct de_private *de);
328 static void de_clean_rings (struct de_private *de);
329 static void de_media_interrupt (struct de_private *de, u32 status);
330 static void de21040_media_timer (struct timer_list *t);
331 static void de21041_media_timer (struct timer_list *t);
332 static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media);
333 
334 
335 static const struct pci_device_id de_pci_tbl[] = {
336 	{ PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP,
337 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
338 	{ PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_PLUS,
339 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
340 	{ },
341 };
342 MODULE_DEVICE_TABLE(pci, de_pci_tbl);
343 
344 static const char * const media_name[DE_MAX_MEDIA] = {
345 	"10baseT auto",
346 	"BNC",
347 	"AUI",
348 	"10baseT-HD",
349 	"10baseT-FD"
350 };
351 
352 /* 21040 transceiver register settings:
353  * TP AUTO(unused), BNC(unused), AUI, TP, TP FD*/
354 static u16 t21040_csr13[] = { 0, 0, 0x8F09, 0x8F01, 0x8F01, };
355 static u16 t21040_csr14[] = { 0, 0, 0x0705, 0xFFFF, 0xFFFD, };
356 static u16 t21040_csr15[] = { 0, 0, 0x0006, 0x0000, 0x0000, };
357 
358 /* 21041 transceiver register settings: TP AUTO, BNC, AUI, TP, TP FD*/
359 static u16 t21041_csr13[] = { 0xEF01, 0xEF09, 0xEF09, 0xEF01, 0xEF09, };
360 static u16 t21041_csr14[] = { 0xFFFF, 0xF7FD, 0xF7FD, 0x7F3F, 0x7F3D, };
361 /* If on-chip autonegotiation is broken, use half-duplex (FF3F) instead */
362 static u16 t21041_csr14_brk[] = { 0xFF3F, 0xF7FD, 0xF7FD, 0x7F3F, 0x7F3D, };
363 static u16 t21041_csr15[] = { 0x0008, 0x0006, 0x000E, 0x0008, 0x0008, };
364 
365 
366 #define dr32(reg)	ioread32(de->regs + (reg))
367 #define dw32(reg, val)	iowrite32((val), de->regs + (reg))
368 
369 
370 static void de_rx_err_acct (struct de_private *de, unsigned rx_tail,
371 			    u32 status, u32 len)
372 {
373 	netif_dbg(de, rx_err, de->dev,
374 		  "rx err, slot %d status 0x%x len %d\n",
375 		  rx_tail, status, len);
376 
377 	if ((status & 0x38000300) != 0x0300) {
378 		/* Ingore earlier buffers. */
379 		if ((status & 0xffff) != 0x7fff) {
380 			netif_warn(de, rx_err, de->dev,
381 				   "Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
382 				   status);
383 			de->dev->stats.rx_length_errors++;
384 		}
385 	} else if (status & RxError) {
386 		/* There was a fatal error. */
387 		de->dev->stats.rx_errors++; /* end of a packet.*/
388 		if (status & 0x0890) de->dev->stats.rx_length_errors++;
389 		if (status & RxErrCRC) de->dev->stats.rx_crc_errors++;
390 		if (status & RxErrFIFO) de->dev->stats.rx_fifo_errors++;
391 	}
392 }
393 
394 static void de_rx (struct de_private *de)
395 {
396 	unsigned rx_tail = de->rx_tail;
397 	unsigned rx_work = DE_RX_RING_SIZE;
398 	unsigned drop = 0;
399 	int rc;
400 
401 	while (--rx_work) {
402 		u32 status, len;
403 		dma_addr_t mapping;
404 		struct sk_buff *skb, *copy_skb;
405 		unsigned copying_skb, buflen;
406 
407 		skb = de->rx_skb[rx_tail].skb;
408 		BUG_ON(!skb);
409 		rmb();
410 		status = le32_to_cpu(de->rx_ring[rx_tail].opts1);
411 		if (status & DescOwn)
412 			break;
413 
414 		/* the length is actually a 15 bit value here according
415 		 * to Table 4-1 in the DE2104x spec so mask is 0x7fff
416 		 */
417 		len = ((status >> 16) & 0x7fff) - 4;
418 		mapping = de->rx_skb[rx_tail].mapping;
419 
420 		if (unlikely(drop)) {
421 			de->dev->stats.rx_dropped++;
422 			goto rx_next;
423 		}
424 
425 		if (unlikely((status & 0x38008300) != 0x0300)) {
426 			de_rx_err_acct(de, rx_tail, status, len);
427 			goto rx_next;
428 		}
429 
430 		copying_skb = (len <= rx_copybreak);
431 
432 		netif_dbg(de, rx_status, de->dev,
433 			  "rx slot %d status 0x%x len %d copying? %d\n",
434 			  rx_tail, status, len, copying_skb);
435 
436 		buflen = copying_skb ? (len + RX_OFFSET) : de->rx_buf_sz;
437 		copy_skb = netdev_alloc_skb(de->dev, buflen);
438 		if (unlikely(!copy_skb)) {
439 			de->dev->stats.rx_dropped++;
440 			drop = 1;
441 			rx_work = 100;
442 			goto rx_next;
443 		}
444 
445 		if (!copying_skb) {
446 			pci_unmap_single(de->pdev, mapping,
447 					 buflen, PCI_DMA_FROMDEVICE);
448 			skb_put(skb, len);
449 
450 			mapping =
451 			de->rx_skb[rx_tail].mapping =
452 				pci_map_single(de->pdev, copy_skb->data,
453 					       buflen, PCI_DMA_FROMDEVICE);
454 			de->rx_skb[rx_tail].skb = copy_skb;
455 		} else {
456 			pci_dma_sync_single_for_cpu(de->pdev, mapping, len, PCI_DMA_FROMDEVICE);
457 			skb_reserve(copy_skb, RX_OFFSET);
458 			skb_copy_from_linear_data(skb, skb_put(copy_skb, len),
459 						  len);
460 			pci_dma_sync_single_for_device(de->pdev, mapping, len, PCI_DMA_FROMDEVICE);
461 
462 			/* We'll reuse the original ring buffer. */
463 			skb = copy_skb;
464 		}
465 
466 		skb->protocol = eth_type_trans (skb, de->dev);
467 
468 		de->dev->stats.rx_packets++;
469 		de->dev->stats.rx_bytes += skb->len;
470 		rc = netif_rx (skb);
471 		if (rc == NET_RX_DROP)
472 			drop = 1;
473 
474 rx_next:
475 		if (rx_tail == (DE_RX_RING_SIZE - 1))
476 			de->rx_ring[rx_tail].opts2 =
477 				cpu_to_le32(RingEnd | de->rx_buf_sz);
478 		else
479 			de->rx_ring[rx_tail].opts2 = cpu_to_le32(de->rx_buf_sz);
480 		de->rx_ring[rx_tail].addr1 = cpu_to_le32(mapping);
481 		wmb();
482 		de->rx_ring[rx_tail].opts1 = cpu_to_le32(DescOwn);
483 		rx_tail = NEXT_RX(rx_tail);
484 	}
485 
486 	if (!rx_work)
487 		netdev_warn(de->dev, "rx work limit reached\n");
488 
489 	de->rx_tail = rx_tail;
490 }
491 
492 static irqreturn_t de_interrupt (int irq, void *dev_instance)
493 {
494 	struct net_device *dev = dev_instance;
495 	struct de_private *de = netdev_priv(dev);
496 	u32 status;
497 
498 	status = dr32(MacStatus);
499 	if ((!(status & (IntrOK|IntrErr))) || (status == 0xFFFF))
500 		return IRQ_NONE;
501 
502 	netif_dbg(de, intr, dev, "intr, status %08x mode %08x desc %u/%u/%u\n",
503 		  status, dr32(MacMode),
504 		  de->rx_tail, de->tx_head, de->tx_tail);
505 
506 	dw32(MacStatus, status);
507 
508 	if (status & (RxIntr | RxEmpty)) {
509 		de_rx(de);
510 		if (status & RxEmpty)
511 			dw32(RxPoll, NormalRxPoll);
512 	}
513 
514 	spin_lock(&de->lock);
515 
516 	if (status & (TxIntr | TxEmpty))
517 		de_tx(de);
518 
519 	if (status & (LinkPass | LinkFail))
520 		de_media_interrupt(de, status);
521 
522 	spin_unlock(&de->lock);
523 
524 	if (status & PciErr) {
525 		u16 pci_status;
526 
527 		pci_read_config_word(de->pdev, PCI_STATUS, &pci_status);
528 		pci_write_config_word(de->pdev, PCI_STATUS, pci_status);
529 		netdev_err(de->dev,
530 			   "PCI bus error, status=%08x, PCI status=%04x\n",
531 			   status, pci_status);
532 	}
533 
534 	return IRQ_HANDLED;
535 }
536 
537 static void de_tx (struct de_private *de)
538 {
539 	unsigned tx_head = de->tx_head;
540 	unsigned tx_tail = de->tx_tail;
541 
542 	while (tx_tail != tx_head) {
543 		struct sk_buff *skb;
544 		u32 status;
545 
546 		rmb();
547 		status = le32_to_cpu(de->tx_ring[tx_tail].opts1);
548 		if (status & DescOwn)
549 			break;
550 
551 		skb = de->tx_skb[tx_tail].skb;
552 		BUG_ON(!skb);
553 		if (unlikely(skb == DE_DUMMY_SKB))
554 			goto next;
555 
556 		if (unlikely(skb == DE_SETUP_SKB)) {
557 			pci_unmap_single(de->pdev, de->tx_skb[tx_tail].mapping,
558 					 sizeof(de->setup_frame), PCI_DMA_TODEVICE);
559 			goto next;
560 		}
561 
562 		pci_unmap_single(de->pdev, de->tx_skb[tx_tail].mapping,
563 				 skb->len, PCI_DMA_TODEVICE);
564 
565 		if (status & LastFrag) {
566 			if (status & TxError) {
567 				netif_dbg(de, tx_err, de->dev,
568 					  "tx err, status 0x%x\n",
569 					  status);
570 				de->dev->stats.tx_errors++;
571 				if (status & TxOWC)
572 					de->dev->stats.tx_window_errors++;
573 				if (status & TxMaxCol)
574 					de->dev->stats.tx_aborted_errors++;
575 				if (status & TxLinkFail)
576 					de->dev->stats.tx_carrier_errors++;
577 				if (status & TxFIFOUnder)
578 					de->dev->stats.tx_fifo_errors++;
579 			} else {
580 				de->dev->stats.tx_packets++;
581 				de->dev->stats.tx_bytes += skb->len;
582 				netif_dbg(de, tx_done, de->dev,
583 					  "tx done, slot %d\n", tx_tail);
584 			}
585 			dev_consume_skb_irq(skb);
586 		}
587 
588 next:
589 		de->tx_skb[tx_tail].skb = NULL;
590 
591 		tx_tail = NEXT_TX(tx_tail);
592 	}
593 
594 	de->tx_tail = tx_tail;
595 
596 	if (netif_queue_stopped(de->dev) && (TX_BUFFS_AVAIL(de) > (DE_TX_RING_SIZE / 4)))
597 		netif_wake_queue(de->dev);
598 }
599 
600 static netdev_tx_t de_start_xmit (struct sk_buff *skb,
601 					struct net_device *dev)
602 {
603 	struct de_private *de = netdev_priv(dev);
604 	unsigned int entry, tx_free;
605 	u32 mapping, len, flags = FirstFrag | LastFrag;
606 	struct de_desc *txd;
607 
608 	spin_lock_irq(&de->lock);
609 
610 	tx_free = TX_BUFFS_AVAIL(de);
611 	if (tx_free == 0) {
612 		netif_stop_queue(dev);
613 		spin_unlock_irq(&de->lock);
614 		return NETDEV_TX_BUSY;
615 	}
616 	tx_free--;
617 
618 	entry = de->tx_head;
619 
620 	txd = &de->tx_ring[entry];
621 
622 	len = skb->len;
623 	mapping = pci_map_single(de->pdev, skb->data, len, PCI_DMA_TODEVICE);
624 	if (entry == (DE_TX_RING_SIZE - 1))
625 		flags |= RingEnd;
626 	if (!tx_free || (tx_free == (DE_TX_RING_SIZE / 2)))
627 		flags |= TxSwInt;
628 	flags |= len;
629 	txd->opts2 = cpu_to_le32(flags);
630 	txd->addr1 = cpu_to_le32(mapping);
631 
632 	de->tx_skb[entry].skb = skb;
633 	de->tx_skb[entry].mapping = mapping;
634 	wmb();
635 
636 	txd->opts1 = cpu_to_le32(DescOwn);
637 	wmb();
638 
639 	de->tx_head = NEXT_TX(entry);
640 	netif_dbg(de, tx_queued, dev, "tx queued, slot %d, skblen %d\n",
641 		  entry, skb->len);
642 
643 	if (tx_free == 0)
644 		netif_stop_queue(dev);
645 
646 	spin_unlock_irq(&de->lock);
647 
648 	/* Trigger an immediate transmit demand. */
649 	dw32(TxPoll, NormalTxPoll);
650 
651 	return NETDEV_TX_OK;
652 }
653 
654 /* Set or clear the multicast filter for this adaptor.
655    Note that we only use exclusion around actually queueing the
656    new frame, not around filling de->setup_frame.  This is non-deterministic
657    when re-entered but still correct. */
658 
659 static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
660 {
661 	struct de_private *de = netdev_priv(dev);
662 	u16 hash_table[32];
663 	struct netdev_hw_addr *ha;
664 	int i;
665 	u16 *eaddrs;
666 
667 	memset(hash_table, 0, sizeof(hash_table));
668 	__set_bit_le(255, hash_table);			/* Broadcast entry */
669 	/* This should work on big-endian machines as well. */
670 	netdev_for_each_mc_addr(ha, dev) {
671 		int index = ether_crc_le(ETH_ALEN, ha->addr) & 0x1ff;
672 
673 		__set_bit_le(index, hash_table);
674 	}
675 
676 	for (i = 0; i < 32; i++) {
677 		*setup_frm++ = hash_table[i];
678 		*setup_frm++ = hash_table[i];
679 	}
680 	setup_frm = &de->setup_frame[13*6];
681 
682 	/* Fill the final entry with our physical address. */
683 	eaddrs = (u16 *)dev->dev_addr;
684 	*setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
685 	*setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
686 	*setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
687 }
688 
689 static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
690 {
691 	struct de_private *de = netdev_priv(dev);
692 	struct netdev_hw_addr *ha;
693 	u16 *eaddrs;
694 
695 	/* We have <= 14 addresses so we can use the wonderful
696 	   16 address perfect filtering of the Tulip. */
697 	netdev_for_each_mc_addr(ha, dev) {
698 		eaddrs = (u16 *) ha->addr;
699 		*setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
700 		*setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
701 		*setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
702 	}
703 	/* Fill the unused entries with the broadcast address. */
704 	memset(setup_frm, 0xff, (15 - netdev_mc_count(dev)) * 12);
705 	setup_frm = &de->setup_frame[15*6];
706 
707 	/* Fill the final entry with our physical address. */
708 	eaddrs = (u16 *)dev->dev_addr;
709 	*setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
710 	*setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
711 	*setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
712 }
713 
714 
715 static void __de_set_rx_mode (struct net_device *dev)
716 {
717 	struct de_private *de = netdev_priv(dev);
718 	u32 macmode;
719 	unsigned int entry;
720 	u32 mapping;
721 	struct de_desc *txd;
722 	struct de_desc *dummy_txd = NULL;
723 
724 	macmode = dr32(MacMode) & ~(AcceptAllMulticast | AcceptAllPhys);
725 
726 	if (dev->flags & IFF_PROMISC) {	/* Set promiscuous. */
727 		macmode |= AcceptAllMulticast | AcceptAllPhys;
728 		goto out;
729 	}
730 
731 	if ((netdev_mc_count(dev) > 1000) || (dev->flags & IFF_ALLMULTI)) {
732 		/* Too many to filter well -- accept all multicasts. */
733 		macmode |= AcceptAllMulticast;
734 		goto out;
735 	}
736 
737 	/* Note that only the low-address shortword of setup_frame is valid!
738 	   The values are doubled for big-endian architectures. */
739 	if (netdev_mc_count(dev) > 14)	/* Must use a multicast hash table. */
740 		build_setup_frame_hash (de->setup_frame, dev);
741 	else
742 		build_setup_frame_perfect (de->setup_frame, dev);
743 
744 	/*
745 	 * Now add this frame to the Tx list.
746 	 */
747 
748 	entry = de->tx_head;
749 
750 	/* Avoid a chip errata by prefixing a dummy entry. */
751 	if (entry != 0) {
752 		de->tx_skb[entry].skb = DE_DUMMY_SKB;
753 
754 		dummy_txd = &de->tx_ring[entry];
755 		dummy_txd->opts2 = (entry == (DE_TX_RING_SIZE - 1)) ?
756 				   cpu_to_le32(RingEnd) : 0;
757 		dummy_txd->addr1 = 0;
758 
759 		/* Must set DescOwned later to avoid race with chip */
760 
761 		entry = NEXT_TX(entry);
762 	}
763 
764 	de->tx_skb[entry].skb = DE_SETUP_SKB;
765 	de->tx_skb[entry].mapping = mapping =
766 	    pci_map_single (de->pdev, de->setup_frame,
767 			    sizeof (de->setup_frame), PCI_DMA_TODEVICE);
768 
769 	/* Put the setup frame on the Tx list. */
770 	txd = &de->tx_ring[entry];
771 	if (entry == (DE_TX_RING_SIZE - 1))
772 		txd->opts2 = cpu_to_le32(SetupFrame | RingEnd | sizeof (de->setup_frame));
773 	else
774 		txd->opts2 = cpu_to_le32(SetupFrame | sizeof (de->setup_frame));
775 	txd->addr1 = cpu_to_le32(mapping);
776 	wmb();
777 
778 	txd->opts1 = cpu_to_le32(DescOwn);
779 	wmb();
780 
781 	if (dummy_txd) {
782 		dummy_txd->opts1 = cpu_to_le32(DescOwn);
783 		wmb();
784 	}
785 
786 	de->tx_head = NEXT_TX(entry);
787 
788 	if (TX_BUFFS_AVAIL(de) == 0)
789 		netif_stop_queue(dev);
790 
791 	/* Trigger an immediate transmit demand. */
792 	dw32(TxPoll, NormalTxPoll);
793 
794 out:
795 	if (macmode != dr32(MacMode))
796 		dw32(MacMode, macmode);
797 }
798 
799 static void de_set_rx_mode (struct net_device *dev)
800 {
801 	unsigned long flags;
802 	struct de_private *de = netdev_priv(dev);
803 
804 	spin_lock_irqsave (&de->lock, flags);
805 	__de_set_rx_mode(dev);
806 	spin_unlock_irqrestore (&de->lock, flags);
807 }
808 
809 static inline void de_rx_missed(struct de_private *de, u32 rx_missed)
810 {
811 	if (unlikely(rx_missed & RxMissedOver))
812 		de->dev->stats.rx_missed_errors += RxMissedMask;
813 	else
814 		de->dev->stats.rx_missed_errors += (rx_missed & RxMissedMask);
815 }
816 
817 static void __de_get_stats(struct de_private *de)
818 {
819 	u32 tmp = dr32(RxMissed); /* self-clearing */
820 
821 	de_rx_missed(de, tmp);
822 }
823 
824 static struct net_device_stats *de_get_stats(struct net_device *dev)
825 {
826 	struct de_private *de = netdev_priv(dev);
827 
828 	/* The chip only need report frame silently dropped. */
829 	spin_lock_irq(&de->lock);
830  	if (netif_running(dev) && netif_device_present(dev))
831  		__de_get_stats(de);
832 	spin_unlock_irq(&de->lock);
833 
834 	return &dev->stats;
835 }
836 
837 static inline int de_is_running (struct de_private *de)
838 {
839 	return (dr32(MacStatus) & (RxState | TxState)) ? 1 : 0;
840 }
841 
842 static void de_stop_rxtx (struct de_private *de)
843 {
844 	u32 macmode;
845 	unsigned int i = 1300/100;
846 
847 	macmode = dr32(MacMode);
848 	if (macmode & RxTx) {
849 		dw32(MacMode, macmode & ~RxTx);
850 		dr32(MacMode);
851 	}
852 
853 	/* wait until in-flight frame completes.
854 	 * Max time @ 10BT: 1500*8b/10Mbps == 1200us (+ 100us margin)
855 	 * Typically expect this loop to end in < 50 us on 100BT.
856 	 */
857 	while (--i) {
858 		if (!de_is_running(de))
859 			return;
860 		udelay(100);
861 	}
862 
863 	netdev_warn(de->dev, "timeout expired, stopping DMA\n");
864 }
865 
866 static inline void de_start_rxtx (struct de_private *de)
867 {
868 	u32 macmode;
869 
870 	macmode = dr32(MacMode);
871 	if ((macmode & RxTx) != RxTx) {
872 		dw32(MacMode, macmode | RxTx);
873 		dr32(MacMode);
874 	}
875 }
876 
877 static void de_stop_hw (struct de_private *de)
878 {
879 
880 	udelay(5);
881 	dw32(IntrMask, 0);
882 
883 	de_stop_rxtx(de);
884 
885 	dw32(MacStatus, dr32(MacStatus));
886 
887 	udelay(10);
888 
889 	de->rx_tail = 0;
890 	de->tx_head = de->tx_tail = 0;
891 }
892 
893 static void de_link_up(struct de_private *de)
894 {
895 	if (!netif_carrier_ok(de->dev)) {
896 		netif_carrier_on(de->dev);
897 		netif_info(de, link, de->dev, "link up, media %s\n",
898 			   media_name[de->media_type]);
899 	}
900 }
901 
902 static void de_link_down(struct de_private *de)
903 {
904 	if (netif_carrier_ok(de->dev)) {
905 		netif_carrier_off(de->dev);
906 		netif_info(de, link, de->dev, "link down\n");
907 	}
908 }
909 
910 static void de_set_media (struct de_private *de)
911 {
912 	unsigned media = de->media_type;
913 	u32 macmode = dr32(MacMode);
914 
915 	if (de_is_running(de))
916 		netdev_warn(de->dev, "chip is running while changing media!\n");
917 
918 	if (de->de21040)
919 		dw32(CSR11, FULL_DUPLEX_MAGIC);
920 	dw32(CSR13, 0); /* Reset phy */
921 	dw32(CSR14, de->media[media].csr14);
922 	dw32(CSR15, de->media[media].csr15);
923 	dw32(CSR13, de->media[media].csr13);
924 
925 	/* must delay 10ms before writing to other registers,
926 	 * especially CSR6
927 	 */
928 	mdelay(10);
929 
930 	if (media == DE_MEDIA_TP_FD)
931 		macmode |= FullDuplex;
932 	else
933 		macmode &= ~FullDuplex;
934 
935 	netif_info(de, link, de->dev, "set link %s\n", media_name[media]);
936 	netif_info(de, hw, de->dev, "mode 0x%x, sia 0x%x,0x%x,0x%x,0x%x\n",
937 		   dr32(MacMode), dr32(SIAStatus),
938 		   dr32(CSR13), dr32(CSR14), dr32(CSR15));
939 	netif_info(de, hw, de->dev, "set mode 0x%x, set sia 0x%x,0x%x,0x%x\n",
940 		   macmode, de->media[media].csr13,
941 		   de->media[media].csr14, de->media[media].csr15);
942 	if (macmode != dr32(MacMode))
943 		dw32(MacMode, macmode);
944 }
945 
946 static void de_next_media (struct de_private *de, const u32 *media,
947 			   unsigned int n_media)
948 {
949 	unsigned int i;
950 
951 	for (i = 0; i < n_media; i++) {
952 		if (de_ok_to_advertise(de, media[i])) {
953 			de->media_type = media[i];
954 			return;
955 		}
956 	}
957 }
958 
959 static void de21040_media_timer (struct timer_list *t)
960 {
961 	struct de_private *de = from_timer(de, t, media_timer);
962 	struct net_device *dev = de->dev;
963 	u32 status = dr32(SIAStatus);
964 	unsigned int carrier;
965 	unsigned long flags;
966 
967 	carrier = (status & NetCxnErr) ? 0 : 1;
968 
969 	if (carrier) {
970 		if (de->media_type != DE_MEDIA_AUI && (status & LinkFailStatus))
971 			goto no_link_yet;
972 
973 		de->media_timer.expires = jiffies + DE_TIMER_LINK;
974 		add_timer(&de->media_timer);
975 		if (!netif_carrier_ok(dev))
976 			de_link_up(de);
977 		else
978 			netif_info(de, timer, dev, "%s link ok, status %x\n",
979 				   media_name[de->media_type], status);
980 		return;
981 	}
982 
983 	de_link_down(de);
984 
985 	if (de->media_lock)
986 		return;
987 
988 	if (de->media_type == DE_MEDIA_AUI) {
989 		static const u32 next_state = DE_MEDIA_TP;
990 		de_next_media(de, &next_state, 1);
991 	} else {
992 		static const u32 next_state = DE_MEDIA_AUI;
993 		de_next_media(de, &next_state, 1);
994 	}
995 
996 	spin_lock_irqsave(&de->lock, flags);
997 	de_stop_rxtx(de);
998 	spin_unlock_irqrestore(&de->lock, flags);
999 	de_set_media(de);
1000 	de_start_rxtx(de);
1001 
1002 no_link_yet:
1003 	de->media_timer.expires = jiffies + DE_TIMER_NO_LINK;
1004 	add_timer(&de->media_timer);
1005 
1006 	netif_info(de, timer, dev, "no link, trying media %s, status %x\n",
1007 		   media_name[de->media_type], status);
1008 }
1009 
1010 static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media)
1011 {
1012 	switch (new_media) {
1013 	case DE_MEDIA_TP_AUTO:
1014 		if (!(de->media_advertise & ADVERTISED_Autoneg))
1015 			return 0;
1016 		if (!(de->media_advertise & (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full)))
1017 			return 0;
1018 		break;
1019 	case DE_MEDIA_BNC:
1020 		if (!(de->media_advertise & ADVERTISED_BNC))
1021 			return 0;
1022 		break;
1023 	case DE_MEDIA_AUI:
1024 		if (!(de->media_advertise & ADVERTISED_AUI))
1025 			return 0;
1026 		break;
1027 	case DE_MEDIA_TP:
1028 		if (!(de->media_advertise & ADVERTISED_10baseT_Half))
1029 			return 0;
1030 		break;
1031 	case DE_MEDIA_TP_FD:
1032 		if (!(de->media_advertise & ADVERTISED_10baseT_Full))
1033 			return 0;
1034 		break;
1035 	}
1036 
1037 	return 1;
1038 }
1039 
1040 static void de21041_media_timer (struct timer_list *t)
1041 {
1042 	struct de_private *de = from_timer(de, t, media_timer);
1043 	struct net_device *dev = de->dev;
1044 	u32 status = dr32(SIAStatus);
1045 	unsigned int carrier;
1046 	unsigned long flags;
1047 
1048 	/* clear port active bits */
1049 	dw32(SIAStatus, NonselPortActive | SelPortActive);
1050 
1051 	carrier = (status & NetCxnErr) ? 0 : 1;
1052 
1053 	if (carrier) {
1054 		if ((de->media_type == DE_MEDIA_TP_AUTO ||
1055 		     de->media_type == DE_MEDIA_TP ||
1056 		     de->media_type == DE_MEDIA_TP_FD) &&
1057 		    (status & LinkFailStatus))
1058 			goto no_link_yet;
1059 
1060 		de->media_timer.expires = jiffies + DE_TIMER_LINK;
1061 		add_timer(&de->media_timer);
1062 		if (!netif_carrier_ok(dev))
1063 			de_link_up(de);
1064 		else
1065 			netif_info(de, timer, dev,
1066 				   "%s link ok, mode %x status %x\n",
1067 				   media_name[de->media_type],
1068 				   dr32(MacMode), status);
1069 		return;
1070 	}
1071 
1072 	de_link_down(de);
1073 
1074 	/* if media type locked, don't switch media */
1075 	if (de->media_lock)
1076 		goto set_media;
1077 
1078 	/* if activity detected, use that as hint for new media type */
1079 	if (status & NonselPortActive) {
1080 		unsigned int have_media = 1;
1081 
1082 		/* if AUI/BNC selected, then activity is on TP port */
1083 		if (de->media_type == DE_MEDIA_AUI ||
1084 		    de->media_type == DE_MEDIA_BNC) {
1085 			if (de_ok_to_advertise(de, DE_MEDIA_TP_AUTO))
1086 				de->media_type = DE_MEDIA_TP_AUTO;
1087 			else
1088 				have_media = 0;
1089 		}
1090 
1091 		/* TP selected.  If there is only TP and BNC, then it's BNC */
1092 		else if (((de->media_supported & DE_AUI_BNC) == SUPPORTED_BNC) &&
1093 			 de_ok_to_advertise(de, DE_MEDIA_BNC))
1094 			de->media_type = DE_MEDIA_BNC;
1095 
1096 		/* TP selected.  If there is only TP and AUI, then it's AUI */
1097 		else if (((de->media_supported & DE_AUI_BNC) == SUPPORTED_AUI) &&
1098 			 de_ok_to_advertise(de, DE_MEDIA_AUI))
1099 			de->media_type = DE_MEDIA_AUI;
1100 
1101 		/* otherwise, ignore the hint */
1102 		else
1103 			have_media = 0;
1104 
1105 		if (have_media)
1106 			goto set_media;
1107 	}
1108 
1109 	/*
1110 	 * Absent or ambiguous activity hint, move to next advertised
1111 	 * media state.  If de->media_type is left unchanged, this
1112 	 * simply resets the PHY and reloads the current media settings.
1113 	 */
1114 	if (de->media_type == DE_MEDIA_AUI) {
1115 		static const u32 next_states[] = {
1116 			DE_MEDIA_BNC, DE_MEDIA_TP_AUTO
1117 		};
1118 		de_next_media(de, next_states, ARRAY_SIZE(next_states));
1119 	} else if (de->media_type == DE_MEDIA_BNC) {
1120 		static const u32 next_states[] = {
1121 			DE_MEDIA_TP_AUTO, DE_MEDIA_AUI
1122 		};
1123 		de_next_media(de, next_states, ARRAY_SIZE(next_states));
1124 	} else {
1125 		static const u32 next_states[] = {
1126 			DE_MEDIA_AUI, DE_MEDIA_BNC, DE_MEDIA_TP_AUTO
1127 		};
1128 		de_next_media(de, next_states, ARRAY_SIZE(next_states));
1129 	}
1130 
1131 set_media:
1132 	spin_lock_irqsave(&de->lock, flags);
1133 	de_stop_rxtx(de);
1134 	spin_unlock_irqrestore(&de->lock, flags);
1135 	de_set_media(de);
1136 	de_start_rxtx(de);
1137 
1138 no_link_yet:
1139 	de->media_timer.expires = jiffies + DE_TIMER_NO_LINK;
1140 	add_timer(&de->media_timer);
1141 
1142 	netif_info(de, timer, dev, "no link, trying media %s, status %x\n",
1143 		   media_name[de->media_type], status);
1144 }
1145 
1146 static void de_media_interrupt (struct de_private *de, u32 status)
1147 {
1148 	if (status & LinkPass) {
1149 		/* Ignore if current media is AUI or BNC and we can't use TP */
1150 		if ((de->media_type == DE_MEDIA_AUI ||
1151 		     de->media_type == DE_MEDIA_BNC) &&
1152 		    (de->media_lock ||
1153 		     !de_ok_to_advertise(de, DE_MEDIA_TP_AUTO)))
1154 			return;
1155 		/* If current media is not TP, change it to TP */
1156 		if ((de->media_type == DE_MEDIA_AUI ||
1157 		     de->media_type == DE_MEDIA_BNC)) {
1158 			de->media_type = DE_MEDIA_TP_AUTO;
1159 			de_stop_rxtx(de);
1160 			de_set_media(de);
1161 			de_start_rxtx(de);
1162 		}
1163 		de_link_up(de);
1164 		mod_timer(&de->media_timer, jiffies + DE_TIMER_LINK);
1165 		return;
1166 	}
1167 
1168 	BUG_ON(!(status & LinkFail));
1169 	/* Mark the link as down only if current media is TP */
1170 	if (netif_carrier_ok(de->dev) && de->media_type != DE_MEDIA_AUI &&
1171 	    de->media_type != DE_MEDIA_BNC) {
1172 		de_link_down(de);
1173 		mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK);
1174 	}
1175 }
1176 
1177 static int de_reset_mac (struct de_private *de)
1178 {
1179 	u32 status, tmp;
1180 
1181 	/*
1182 	 * Reset MAC.  de4x5.c and tulip.c examined for "advice"
1183 	 * in this area.
1184 	 */
1185 
1186 	if (dr32(BusMode) == 0xffffffff)
1187 		return -EBUSY;
1188 
1189 	/* Reset the chip, holding bit 0 set at least 50 PCI cycles. */
1190 	dw32 (BusMode, CmdReset);
1191 	mdelay (1);
1192 
1193 	dw32 (BusMode, de_bus_mode);
1194 	mdelay (1);
1195 
1196 	for (tmp = 0; tmp < 5; tmp++) {
1197 		dr32 (BusMode);
1198 		mdelay (1);
1199 	}
1200 
1201 	mdelay (1);
1202 
1203 	status = dr32(MacStatus);
1204 	if (status & (RxState | TxState))
1205 		return -EBUSY;
1206 	if (status == 0xffffffff)
1207 		return -ENODEV;
1208 	return 0;
1209 }
1210 
1211 static void de_adapter_wake (struct de_private *de)
1212 {
1213 	u32 pmctl;
1214 
1215 	if (de->de21040)
1216 		return;
1217 
1218 	pci_read_config_dword(de->pdev, PCIPM, &pmctl);
1219 	if (pmctl & PM_Mask) {
1220 		pmctl &= ~PM_Mask;
1221 		pci_write_config_dword(de->pdev, PCIPM, pmctl);
1222 
1223 		/* de4x5.c delays, so we do too */
1224 		msleep(10);
1225 	}
1226 }
1227 
1228 static void de_adapter_sleep (struct de_private *de)
1229 {
1230 	u32 pmctl;
1231 
1232 	if (de->de21040)
1233 		return;
1234 
1235 	dw32(CSR13, 0); /* Reset phy */
1236 	pci_read_config_dword(de->pdev, PCIPM, &pmctl);
1237 	pmctl |= PM_Sleep;
1238 	pci_write_config_dword(de->pdev, PCIPM, pmctl);
1239 }
1240 
1241 static int de_init_hw (struct de_private *de)
1242 {
1243 	struct net_device *dev = de->dev;
1244 	u32 macmode;
1245 	int rc;
1246 
1247 	de_adapter_wake(de);
1248 
1249 	macmode = dr32(MacMode) & ~MacModeClear;
1250 
1251 	rc = de_reset_mac(de);
1252 	if (rc)
1253 		return rc;
1254 
1255 	de_set_media(de); /* reset phy */
1256 
1257 	dw32(RxRingAddr, de->ring_dma);
1258 	dw32(TxRingAddr, de->ring_dma + (sizeof(struct de_desc) * DE_RX_RING_SIZE));
1259 
1260 	dw32(MacMode, RxTx | macmode);
1261 
1262 	dr32(RxMissed); /* self-clearing */
1263 
1264 	dw32(IntrMask, de_intr_mask);
1265 
1266 	de_set_rx_mode(dev);
1267 
1268 	return 0;
1269 }
1270 
1271 static int de_refill_rx (struct de_private *de)
1272 {
1273 	unsigned i;
1274 
1275 	for (i = 0; i < DE_RX_RING_SIZE; i++) {
1276 		struct sk_buff *skb;
1277 
1278 		skb = netdev_alloc_skb(de->dev, de->rx_buf_sz);
1279 		if (!skb)
1280 			goto err_out;
1281 
1282 		de->rx_skb[i].mapping = pci_map_single(de->pdev,
1283 			skb->data, de->rx_buf_sz, PCI_DMA_FROMDEVICE);
1284 		de->rx_skb[i].skb = skb;
1285 
1286 		de->rx_ring[i].opts1 = cpu_to_le32(DescOwn);
1287 		if (i == (DE_RX_RING_SIZE - 1))
1288 			de->rx_ring[i].opts2 =
1289 				cpu_to_le32(RingEnd | de->rx_buf_sz);
1290 		else
1291 			de->rx_ring[i].opts2 = cpu_to_le32(de->rx_buf_sz);
1292 		de->rx_ring[i].addr1 = cpu_to_le32(de->rx_skb[i].mapping);
1293 		de->rx_ring[i].addr2 = 0;
1294 	}
1295 
1296 	return 0;
1297 
1298 err_out:
1299 	de_clean_rings(de);
1300 	return -ENOMEM;
1301 }
1302 
1303 static int de_init_rings (struct de_private *de)
1304 {
1305 	memset(de->tx_ring, 0, sizeof(struct de_desc) * DE_TX_RING_SIZE);
1306 	de->tx_ring[DE_TX_RING_SIZE - 1].opts2 = cpu_to_le32(RingEnd);
1307 
1308 	de->rx_tail = 0;
1309 	de->tx_head = de->tx_tail = 0;
1310 
1311 	return de_refill_rx (de);
1312 }
1313 
1314 static int de_alloc_rings (struct de_private *de)
1315 {
1316 	de->rx_ring = pci_alloc_consistent(de->pdev, DE_RING_BYTES, &de->ring_dma);
1317 	if (!de->rx_ring)
1318 		return -ENOMEM;
1319 	de->tx_ring = &de->rx_ring[DE_RX_RING_SIZE];
1320 	return de_init_rings(de);
1321 }
1322 
1323 static void de_clean_rings (struct de_private *de)
1324 {
1325 	unsigned i;
1326 
1327 	memset(de->rx_ring, 0, sizeof(struct de_desc) * DE_RX_RING_SIZE);
1328 	de->rx_ring[DE_RX_RING_SIZE - 1].opts2 = cpu_to_le32(RingEnd);
1329 	wmb();
1330 	memset(de->tx_ring, 0, sizeof(struct de_desc) * DE_TX_RING_SIZE);
1331 	de->tx_ring[DE_TX_RING_SIZE - 1].opts2 = cpu_to_le32(RingEnd);
1332 	wmb();
1333 
1334 	for (i = 0; i < DE_RX_RING_SIZE; i++) {
1335 		if (de->rx_skb[i].skb) {
1336 			pci_unmap_single(de->pdev, de->rx_skb[i].mapping,
1337 					 de->rx_buf_sz, PCI_DMA_FROMDEVICE);
1338 			dev_kfree_skb(de->rx_skb[i].skb);
1339 		}
1340 	}
1341 
1342 	for (i = 0; i < DE_TX_RING_SIZE; i++) {
1343 		struct sk_buff *skb = de->tx_skb[i].skb;
1344 		if ((skb) && (skb != DE_DUMMY_SKB)) {
1345 			if (skb != DE_SETUP_SKB) {
1346 				de->dev->stats.tx_dropped++;
1347 				pci_unmap_single(de->pdev,
1348 					de->tx_skb[i].mapping,
1349 					skb->len, PCI_DMA_TODEVICE);
1350 				dev_kfree_skb(skb);
1351 			} else {
1352 				pci_unmap_single(de->pdev,
1353 					de->tx_skb[i].mapping,
1354 					sizeof(de->setup_frame),
1355 					PCI_DMA_TODEVICE);
1356 			}
1357 		}
1358 	}
1359 
1360 	memset(&de->rx_skb, 0, sizeof(struct ring_info) * DE_RX_RING_SIZE);
1361 	memset(&de->tx_skb, 0, sizeof(struct ring_info) * DE_TX_RING_SIZE);
1362 }
1363 
1364 static void de_free_rings (struct de_private *de)
1365 {
1366 	de_clean_rings(de);
1367 	pci_free_consistent(de->pdev, DE_RING_BYTES, de->rx_ring, de->ring_dma);
1368 	de->rx_ring = NULL;
1369 	de->tx_ring = NULL;
1370 }
1371 
1372 static int de_open (struct net_device *dev)
1373 {
1374 	struct de_private *de = netdev_priv(dev);
1375 	const int irq = de->pdev->irq;
1376 	int rc;
1377 
1378 	netif_dbg(de, ifup, dev, "enabling interface\n");
1379 
1380 	de->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1381 
1382 	rc = de_alloc_rings(de);
1383 	if (rc) {
1384 		netdev_err(dev, "ring allocation failure, err=%d\n", rc);
1385 		return rc;
1386 	}
1387 
1388 	dw32(IntrMask, 0);
1389 
1390 	rc = request_irq(irq, de_interrupt, IRQF_SHARED, dev->name, dev);
1391 	if (rc) {
1392 		netdev_err(dev, "IRQ %d request failure, err=%d\n", irq, rc);
1393 		goto err_out_free;
1394 	}
1395 
1396 	rc = de_init_hw(de);
1397 	if (rc) {
1398 		netdev_err(dev, "h/w init failure, err=%d\n", rc);
1399 		goto err_out_free_irq;
1400 	}
1401 
1402 	netif_start_queue(dev);
1403 	mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK);
1404 
1405 	return 0;
1406 
1407 err_out_free_irq:
1408 	free_irq(irq, dev);
1409 err_out_free:
1410 	de_free_rings(de);
1411 	return rc;
1412 }
1413 
1414 static int de_close (struct net_device *dev)
1415 {
1416 	struct de_private *de = netdev_priv(dev);
1417 	unsigned long flags;
1418 
1419 	netif_dbg(de, ifdown, dev, "disabling interface\n");
1420 
1421 	del_timer_sync(&de->media_timer);
1422 
1423 	spin_lock_irqsave(&de->lock, flags);
1424 	de_stop_hw(de);
1425 	netif_stop_queue(dev);
1426 	netif_carrier_off(dev);
1427 	spin_unlock_irqrestore(&de->lock, flags);
1428 
1429 	free_irq(de->pdev->irq, dev);
1430 
1431 	de_free_rings(de);
1432 	de_adapter_sleep(de);
1433 	return 0;
1434 }
1435 
1436 static void de_tx_timeout (struct net_device *dev, unsigned int txqueue)
1437 {
1438 	struct de_private *de = netdev_priv(dev);
1439 	const int irq = de->pdev->irq;
1440 
1441 	netdev_dbg(dev, "NIC status %08x mode %08x sia %08x desc %u/%u/%u\n",
1442 		   dr32(MacStatus), dr32(MacMode), dr32(SIAStatus),
1443 		   de->rx_tail, de->tx_head, de->tx_tail);
1444 
1445 	del_timer_sync(&de->media_timer);
1446 
1447 	disable_irq(irq);
1448 	spin_lock_irq(&de->lock);
1449 
1450 	de_stop_hw(de);
1451 	netif_stop_queue(dev);
1452 	netif_carrier_off(dev);
1453 
1454 	spin_unlock_irq(&de->lock);
1455 	enable_irq(irq);
1456 
1457 	/* Update the error counts. */
1458 	__de_get_stats(de);
1459 
1460 	synchronize_irq(irq);
1461 	de_clean_rings(de);
1462 
1463 	de_init_rings(de);
1464 
1465 	de_init_hw(de);
1466 
1467 	netif_wake_queue(dev);
1468 }
1469 
1470 static void __de_get_regs(struct de_private *de, u8 *buf)
1471 {
1472 	int i;
1473 	u32 *rbuf = (u32 *)buf;
1474 
1475 	/* read all CSRs */
1476 	for (i = 0; i < DE_NUM_REGS; i++)
1477 		rbuf[i] = dr32(i * 8);
1478 
1479 	/* handle self-clearing RxMissed counter, CSR8 */
1480 	de_rx_missed(de, rbuf[8]);
1481 }
1482 
1483 static void __de_get_link_ksettings(struct de_private *de,
1484 				    struct ethtool_link_ksettings *cmd)
1485 {
1486 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
1487 						de->media_supported);
1488 	cmd->base.phy_address = 0;
1489 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
1490 						de->media_advertise);
1491 
1492 	switch (de->media_type) {
1493 	case DE_MEDIA_AUI:
1494 		cmd->base.port = PORT_AUI;
1495 		break;
1496 	case DE_MEDIA_BNC:
1497 		cmd->base.port = PORT_BNC;
1498 		break;
1499 	default:
1500 		cmd->base.port = PORT_TP;
1501 		break;
1502 	}
1503 
1504 	cmd->base.speed = 10;
1505 
1506 	if (dr32(MacMode) & FullDuplex)
1507 		cmd->base.duplex = DUPLEX_FULL;
1508 	else
1509 		cmd->base.duplex = DUPLEX_HALF;
1510 
1511 	if (de->media_lock)
1512 		cmd->base.autoneg = AUTONEG_DISABLE;
1513 	else
1514 		cmd->base.autoneg = AUTONEG_ENABLE;
1515 
1516 	/* ignore maxtxpkt, maxrxpkt for now */
1517 }
1518 
1519 static int __de_set_link_ksettings(struct de_private *de,
1520 				   const struct ethtool_link_ksettings *cmd)
1521 {
1522 	u32 new_media;
1523 	unsigned int media_lock;
1524 	u8 duplex = cmd->base.duplex;
1525 	u8 port = cmd->base.port;
1526 	u8 autoneg = cmd->base.autoneg;
1527 	u32 advertising;
1528 
1529 	ethtool_convert_link_mode_to_legacy_u32(&advertising,
1530 						cmd->link_modes.advertising);
1531 
1532 	if (cmd->base.speed != 10)
1533 		return -EINVAL;
1534 	if (duplex != DUPLEX_HALF && duplex != DUPLEX_FULL)
1535 		return -EINVAL;
1536 	if (port != PORT_TP && port != PORT_AUI && port != PORT_BNC)
1537 		return -EINVAL;
1538 	if (de->de21040 && port == PORT_BNC)
1539 		return -EINVAL;
1540 	if (autoneg != AUTONEG_DISABLE && autoneg != AUTONEG_ENABLE)
1541 		return -EINVAL;
1542 	if (advertising & ~de->media_supported)
1543 		return -EINVAL;
1544 	if (autoneg == AUTONEG_ENABLE &&
1545 	    (!(advertising & ADVERTISED_Autoneg)))
1546 		return -EINVAL;
1547 
1548 	switch (port) {
1549 	case PORT_AUI:
1550 		new_media = DE_MEDIA_AUI;
1551 		if (!(advertising & ADVERTISED_AUI))
1552 			return -EINVAL;
1553 		break;
1554 	case PORT_BNC:
1555 		new_media = DE_MEDIA_BNC;
1556 		if (!(advertising & ADVERTISED_BNC))
1557 			return -EINVAL;
1558 		break;
1559 	default:
1560 		if (autoneg == AUTONEG_ENABLE)
1561 			new_media = DE_MEDIA_TP_AUTO;
1562 		else if (duplex == DUPLEX_FULL)
1563 			new_media = DE_MEDIA_TP_FD;
1564 		else
1565 			new_media = DE_MEDIA_TP;
1566 		if (!(advertising & ADVERTISED_TP))
1567 			return -EINVAL;
1568 		if (!(advertising & (ADVERTISED_10baseT_Full |
1569 				     ADVERTISED_10baseT_Half)))
1570 			return -EINVAL;
1571 		break;
1572 	}
1573 
1574 	media_lock = (autoneg == AUTONEG_ENABLE) ? 0 : 1;
1575 
1576 	if ((new_media == de->media_type) &&
1577 	    (media_lock == de->media_lock) &&
1578 	    (advertising == de->media_advertise))
1579 		return 0; /* nothing to change */
1580 
1581 	de_link_down(de);
1582 	mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK);
1583 	de_stop_rxtx(de);
1584 
1585 	de->media_type = new_media;
1586 	de->media_lock = media_lock;
1587 	de->media_advertise = advertising;
1588 	de_set_media(de);
1589 	if (netif_running(de->dev))
1590 		de_start_rxtx(de);
1591 
1592 	return 0;
1593 }
1594 
1595 static void de_get_drvinfo (struct net_device *dev,struct ethtool_drvinfo *info)
1596 {
1597 	struct de_private *de = netdev_priv(dev);
1598 
1599 	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1600 	strlcpy(info->bus_info, pci_name(de->pdev), sizeof(info->bus_info));
1601 }
1602 
1603 static int de_get_regs_len(struct net_device *dev)
1604 {
1605 	return DE_REGS_SIZE;
1606 }
1607 
1608 static int de_get_link_ksettings(struct net_device *dev,
1609 				 struct ethtool_link_ksettings *cmd)
1610 {
1611 	struct de_private *de = netdev_priv(dev);
1612 
1613 	spin_lock_irq(&de->lock);
1614 	__de_get_link_ksettings(de, cmd);
1615 	spin_unlock_irq(&de->lock);
1616 
1617 	return 0;
1618 }
1619 
1620 static int de_set_link_ksettings(struct net_device *dev,
1621 				 const struct ethtool_link_ksettings *cmd)
1622 {
1623 	struct de_private *de = netdev_priv(dev);
1624 	int rc;
1625 
1626 	spin_lock_irq(&de->lock);
1627 	rc = __de_set_link_ksettings(de, cmd);
1628 	spin_unlock_irq(&de->lock);
1629 
1630 	return rc;
1631 }
1632 
1633 static u32 de_get_msglevel(struct net_device *dev)
1634 {
1635 	struct de_private *de = netdev_priv(dev);
1636 
1637 	return de->msg_enable;
1638 }
1639 
1640 static void de_set_msglevel(struct net_device *dev, u32 msglvl)
1641 {
1642 	struct de_private *de = netdev_priv(dev);
1643 
1644 	de->msg_enable = msglvl;
1645 }
1646 
1647 static int de_get_eeprom(struct net_device *dev,
1648 			 struct ethtool_eeprom *eeprom, u8 *data)
1649 {
1650 	struct de_private *de = netdev_priv(dev);
1651 
1652 	if (!de->ee_data)
1653 		return -EOPNOTSUPP;
1654 	if ((eeprom->offset != 0) || (eeprom->magic != 0) ||
1655 	    (eeprom->len != DE_EEPROM_SIZE))
1656 		return -EINVAL;
1657 	memcpy(data, de->ee_data, eeprom->len);
1658 
1659 	return 0;
1660 }
1661 
1662 static int de_nway_reset(struct net_device *dev)
1663 {
1664 	struct de_private *de = netdev_priv(dev);
1665 	u32 status;
1666 
1667 	if (de->media_type != DE_MEDIA_TP_AUTO)
1668 		return -EINVAL;
1669 	if (netif_carrier_ok(de->dev))
1670 		de_link_down(de);
1671 
1672 	status = dr32(SIAStatus);
1673 	dw32(SIAStatus, (status & ~NWayState) | NWayRestart);
1674 	netif_info(de, link, dev, "link nway restart, status %x,%x\n",
1675 		   status, dr32(SIAStatus));
1676 	return 0;
1677 }
1678 
1679 static void de_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1680 			void *data)
1681 {
1682 	struct de_private *de = netdev_priv(dev);
1683 
1684 	regs->version = (DE_REGS_VER << 2) | de->de21040;
1685 
1686 	spin_lock_irq(&de->lock);
1687 	__de_get_regs(de, data);
1688 	spin_unlock_irq(&de->lock);
1689 }
1690 
1691 static const struct ethtool_ops de_ethtool_ops = {
1692 	.get_link		= ethtool_op_get_link,
1693 	.get_drvinfo		= de_get_drvinfo,
1694 	.get_regs_len		= de_get_regs_len,
1695 	.get_msglevel		= de_get_msglevel,
1696 	.set_msglevel		= de_set_msglevel,
1697 	.get_eeprom		= de_get_eeprom,
1698 	.nway_reset		= de_nway_reset,
1699 	.get_regs		= de_get_regs,
1700 	.get_link_ksettings	= de_get_link_ksettings,
1701 	.set_link_ksettings	= de_set_link_ksettings,
1702 };
1703 
1704 static void de21040_get_mac_address(struct de_private *de)
1705 {
1706 	unsigned i;
1707 
1708 	dw32 (ROMCmd, 0);	/* Reset the pointer with a dummy write. */
1709 	udelay(5);
1710 
1711 	for (i = 0; i < 6; i++) {
1712 		int value, boguscnt = 100000;
1713 		do {
1714 			value = dr32(ROMCmd);
1715 			rmb();
1716 		} while (value < 0 && --boguscnt > 0);
1717 		de->dev->dev_addr[i] = value;
1718 		udelay(1);
1719 		if (boguscnt <= 0)
1720 			pr_warn("timeout reading 21040 MAC address byte %u\n",
1721 				i);
1722 	}
1723 }
1724 
1725 static void de21040_get_media_info(struct de_private *de)
1726 {
1727 	unsigned int i;
1728 
1729 	de->media_type = DE_MEDIA_TP;
1730 	de->media_supported |= SUPPORTED_TP | SUPPORTED_10baseT_Full |
1731 			       SUPPORTED_10baseT_Half | SUPPORTED_AUI;
1732 	de->media_advertise = de->media_supported;
1733 
1734 	for (i = 0; i < DE_MAX_MEDIA; i++) {
1735 		switch (i) {
1736 		case DE_MEDIA_AUI:
1737 		case DE_MEDIA_TP:
1738 		case DE_MEDIA_TP_FD:
1739 			de->media[i].type = i;
1740 			de->media[i].csr13 = t21040_csr13[i];
1741 			de->media[i].csr14 = t21040_csr14[i];
1742 			de->media[i].csr15 = t21040_csr15[i];
1743 			break;
1744 		default:
1745 			de->media[i].type = DE_MEDIA_INVALID;
1746 			break;
1747 		}
1748 	}
1749 }
1750 
1751 /* Note: this routine returns extra data bits for size detection. */
1752 static unsigned tulip_read_eeprom(void __iomem *regs, int location,
1753 				  int addr_len)
1754 {
1755 	int i;
1756 	unsigned retval = 0;
1757 	void __iomem *ee_addr = regs + ROMCmd;
1758 	int read_cmd = location | (EE_READ_CMD << addr_len);
1759 
1760 	writel(EE_ENB & ~EE_CS, ee_addr);
1761 	writel(EE_ENB, ee_addr);
1762 
1763 	/* Shift the read command bits out. */
1764 	for (i = 4 + addr_len; i >= 0; i--) {
1765 		short dataval = (read_cmd & (1 << i)) ? EE_DATA_WRITE : 0;
1766 		writel(EE_ENB | dataval, ee_addr);
1767 		readl(ee_addr);
1768 		writel(EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
1769 		readl(ee_addr);
1770 		retval = (retval << 1) | ((readl(ee_addr) & EE_DATA_READ) ? 1 : 0);
1771 	}
1772 	writel(EE_ENB, ee_addr);
1773 	readl(ee_addr);
1774 
1775 	for (i = 16; i > 0; i--) {
1776 		writel(EE_ENB | EE_SHIFT_CLK, ee_addr);
1777 		readl(ee_addr);
1778 		retval = (retval << 1) | ((readl(ee_addr) & EE_DATA_READ) ? 1 : 0);
1779 		writel(EE_ENB, ee_addr);
1780 		readl(ee_addr);
1781 	}
1782 
1783 	/* Terminate the EEPROM access. */
1784 	writel(EE_ENB & ~EE_CS, ee_addr);
1785 	return retval;
1786 }
1787 
1788 static void de21041_get_srom_info(struct de_private *de)
1789 {
1790 	unsigned i, sa_offset = 0, ofs;
1791 	u8 ee_data[DE_EEPROM_SIZE + 6] = {};
1792 	unsigned ee_addr_size = tulip_read_eeprom(de->regs, 0xff, 8) & 0x40000 ? 8 : 6;
1793 	struct de_srom_info_leaf *il;
1794 	void *bufp;
1795 
1796 	/* download entire eeprom */
1797 	for (i = 0; i < DE_EEPROM_WORDS; i++)
1798 		((__le16 *)ee_data)[i] =
1799 			cpu_to_le16(tulip_read_eeprom(de->regs, i, ee_addr_size));
1800 
1801 	/* DEC now has a specification but early board makers
1802 	   just put the address in the first EEPROM locations. */
1803 	/* This does  memcmp(eedata, eedata+16, 8) */
1804 
1805 #ifndef CONFIG_MIPS_COBALT
1806 
1807 	for (i = 0; i < 8; i ++)
1808 		if (ee_data[i] != ee_data[16+i])
1809 			sa_offset = 20;
1810 
1811 #endif
1812 
1813 	/* store MAC address */
1814 	for (i = 0; i < 6; i ++)
1815 		de->dev->dev_addr[i] = ee_data[i + sa_offset];
1816 
1817 	/* get offset of controller 0 info leaf.  ignore 2nd byte. */
1818 	ofs = ee_data[SROMC0InfoLeaf];
1819 	if (ofs >= (sizeof(ee_data) - sizeof(struct de_srom_info_leaf) - sizeof(struct de_srom_media_block)))
1820 		goto bad_srom;
1821 
1822 	/* get pointer to info leaf */
1823 	il = (struct de_srom_info_leaf *) &ee_data[ofs];
1824 
1825 	/* paranoia checks */
1826 	if (il->n_blocks == 0)
1827 		goto bad_srom;
1828 	if ((sizeof(ee_data) - ofs) <
1829 	    (sizeof(struct de_srom_info_leaf) + (sizeof(struct de_srom_media_block) * il->n_blocks)))
1830 		goto bad_srom;
1831 
1832 	/* get default media type */
1833 	switch (get_unaligned(&il->default_media)) {
1834 	case 0x0001:  de->media_type = DE_MEDIA_BNC; break;
1835 	case 0x0002:  de->media_type = DE_MEDIA_AUI; break;
1836 	case 0x0204:  de->media_type = DE_MEDIA_TP_FD; break;
1837 	default: de->media_type = DE_MEDIA_TP_AUTO; break;
1838 	}
1839 
1840 	if (netif_msg_probe(de))
1841 		pr_info("de%d: SROM leaf offset %u, default media %s\n",
1842 		       de->board_idx, ofs, media_name[de->media_type]);
1843 
1844 	/* init SIA register values to defaults */
1845 	for (i = 0; i < DE_MAX_MEDIA; i++) {
1846 		de->media[i].type = DE_MEDIA_INVALID;
1847 		de->media[i].csr13 = 0xffff;
1848 		de->media[i].csr14 = 0xffff;
1849 		de->media[i].csr15 = 0xffff;
1850 	}
1851 
1852 	/* parse media blocks to see what medias are supported,
1853 	 * and if any custom CSR values are provided
1854 	 */
1855 	bufp = ((void *)il) + sizeof(*il);
1856 	for (i = 0; i < il->n_blocks; i++) {
1857 		struct de_srom_media_block *ib = bufp;
1858 		unsigned idx;
1859 
1860 		/* index based on media type in media block */
1861 		switch(ib->opts & MediaBlockMask) {
1862 		case 0: /* 10baseT */
1863 			de->media_supported |= SUPPORTED_TP | SUPPORTED_10baseT_Half
1864 					  | SUPPORTED_Autoneg;
1865 			idx = DE_MEDIA_TP;
1866 			de->media[DE_MEDIA_TP_AUTO].type = DE_MEDIA_TP_AUTO;
1867 			break;
1868 		case 1: /* BNC */
1869 			de->media_supported |= SUPPORTED_BNC;
1870 			idx = DE_MEDIA_BNC;
1871 			break;
1872 		case 2: /* AUI */
1873 			de->media_supported |= SUPPORTED_AUI;
1874 			idx = DE_MEDIA_AUI;
1875 			break;
1876 		case 4: /* 10baseT-FD */
1877 			de->media_supported |= SUPPORTED_TP | SUPPORTED_10baseT_Full
1878 					  | SUPPORTED_Autoneg;
1879 			idx = DE_MEDIA_TP_FD;
1880 			de->media[DE_MEDIA_TP_AUTO].type = DE_MEDIA_TP_AUTO;
1881 			break;
1882 		default:
1883 			goto bad_srom;
1884 		}
1885 
1886 		de->media[idx].type = idx;
1887 
1888 		if (netif_msg_probe(de))
1889 			pr_info("de%d:   media block #%u: %s",
1890 				de->board_idx, i,
1891 				media_name[de->media[idx].type]);
1892 
1893 		bufp += sizeof (ib->opts);
1894 
1895 		if (ib->opts & MediaCustomCSRs) {
1896 			de->media[idx].csr13 = get_unaligned(&ib->csr13);
1897 			de->media[idx].csr14 = get_unaligned(&ib->csr14);
1898 			de->media[idx].csr15 = get_unaligned(&ib->csr15);
1899 			bufp += sizeof(ib->csr13) + sizeof(ib->csr14) +
1900 				sizeof(ib->csr15);
1901 
1902 			if (netif_msg_probe(de))
1903 				pr_cont(" (%x,%x,%x)\n",
1904 					de->media[idx].csr13,
1905 					de->media[idx].csr14,
1906 					de->media[idx].csr15);
1907 
1908 		} else {
1909 			if (netif_msg_probe(de))
1910 				pr_cont("\n");
1911 		}
1912 
1913 		if (bufp > ((void *)&ee_data[DE_EEPROM_SIZE - 3]))
1914 			break;
1915 	}
1916 
1917 	de->media_advertise = de->media_supported;
1918 
1919 fill_defaults:
1920 	/* fill in defaults, for cases where custom CSRs not used */
1921 	for (i = 0; i < DE_MAX_MEDIA; i++) {
1922 		if (de->media[i].csr13 == 0xffff)
1923 			de->media[i].csr13 = t21041_csr13[i];
1924 		if (de->media[i].csr14 == 0xffff) {
1925 			/* autonegotiation is broken at least on some chip
1926 			   revisions - rev. 0x21 works, 0x11 does not */
1927 			if (de->pdev->revision < 0x20)
1928 				de->media[i].csr14 = t21041_csr14_brk[i];
1929 			else
1930 				de->media[i].csr14 = t21041_csr14[i];
1931 		}
1932 		if (de->media[i].csr15 == 0xffff)
1933 			de->media[i].csr15 = t21041_csr15[i];
1934 	}
1935 
1936 	de->ee_data = kmemdup(&ee_data[0], DE_EEPROM_SIZE, GFP_KERNEL);
1937 
1938 	return;
1939 
1940 bad_srom:
1941 	/* for error cases, it's ok to assume we support all these */
1942 	for (i = 0; i < DE_MAX_MEDIA; i++)
1943 		de->media[i].type = i;
1944 	de->media_supported =
1945 		SUPPORTED_10baseT_Half |
1946 		SUPPORTED_10baseT_Full |
1947 		SUPPORTED_Autoneg |
1948 		SUPPORTED_TP |
1949 		SUPPORTED_AUI |
1950 		SUPPORTED_BNC;
1951 	goto fill_defaults;
1952 }
1953 
1954 static const struct net_device_ops de_netdev_ops = {
1955 	.ndo_open		= de_open,
1956 	.ndo_stop		= de_close,
1957 	.ndo_set_rx_mode	= de_set_rx_mode,
1958 	.ndo_start_xmit		= de_start_xmit,
1959 	.ndo_get_stats		= de_get_stats,
1960 	.ndo_tx_timeout 	= de_tx_timeout,
1961 	.ndo_set_mac_address 	= eth_mac_addr,
1962 	.ndo_validate_addr	= eth_validate_addr,
1963 };
1964 
1965 static int de_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1966 {
1967 	struct net_device *dev;
1968 	struct de_private *de;
1969 	int rc;
1970 	void __iomem *regs;
1971 	unsigned long pciaddr;
1972 	static int board_idx = -1;
1973 
1974 	board_idx++;
1975 
1976 	/* allocate a new ethernet device structure, and fill in defaults */
1977 	dev = alloc_etherdev(sizeof(struct de_private));
1978 	if (!dev)
1979 		return -ENOMEM;
1980 
1981 	dev->netdev_ops = &de_netdev_ops;
1982 	SET_NETDEV_DEV(dev, &pdev->dev);
1983 	dev->ethtool_ops = &de_ethtool_ops;
1984 	dev->watchdog_timeo = TX_TIMEOUT;
1985 
1986 	de = netdev_priv(dev);
1987 	de->de21040 = ent->driver_data == 0 ? 1 : 0;
1988 	de->pdev = pdev;
1989 	de->dev = dev;
1990 	de->msg_enable = (debug < 0 ? DE_DEF_MSG_ENABLE : debug);
1991 	de->board_idx = board_idx;
1992 	spin_lock_init (&de->lock);
1993 	timer_setup(&de->media_timer,
1994 		    de->de21040 ? de21040_media_timer : de21041_media_timer,
1995 		    0);
1996 
1997 	netif_carrier_off(dev);
1998 
1999 	/* wake up device, assign resources */
2000 	rc = pci_enable_device(pdev);
2001 	if (rc)
2002 		goto err_out_free;
2003 
2004 	/* reserve PCI resources to ensure driver atomicity */
2005 	rc = pci_request_regions(pdev, DRV_NAME);
2006 	if (rc)
2007 		goto err_out_disable;
2008 
2009 	/* check for invalid IRQ value */
2010 	if (pdev->irq < 2) {
2011 		rc = -EIO;
2012 		pr_err("invalid irq (%d) for pci dev %s\n",
2013 		       pdev->irq, pci_name(pdev));
2014 		goto err_out_res;
2015 	}
2016 
2017 	/* obtain and check validity of PCI I/O address */
2018 	pciaddr = pci_resource_start(pdev, 1);
2019 	if (!pciaddr) {
2020 		rc = -EIO;
2021 		pr_err("no MMIO resource for pci dev %s\n", pci_name(pdev));
2022 		goto err_out_res;
2023 	}
2024 	if (pci_resource_len(pdev, 1) < DE_REGS_SIZE) {
2025 		rc = -EIO;
2026 		pr_err("MMIO resource (%llx) too small on pci dev %s\n",
2027 		       (unsigned long long)pci_resource_len(pdev, 1),
2028 		       pci_name(pdev));
2029 		goto err_out_res;
2030 	}
2031 
2032 	/* remap CSR registers */
2033 	regs = ioremap(pciaddr, DE_REGS_SIZE);
2034 	if (!regs) {
2035 		rc = -EIO;
2036 		pr_err("Cannot map PCI MMIO (%llx@%lx) on pci dev %s\n",
2037 		       (unsigned long long)pci_resource_len(pdev, 1),
2038 		       pciaddr, pci_name(pdev));
2039 		goto err_out_res;
2040 	}
2041 	de->regs = regs;
2042 
2043 	de_adapter_wake(de);
2044 
2045 	/* make sure hardware is not running */
2046 	rc = de_reset_mac(de);
2047 	if (rc) {
2048 		pr_err("Cannot reset MAC, pci dev %s\n", pci_name(pdev));
2049 		goto err_out_iomap;
2050 	}
2051 
2052 	/* get MAC address, initialize default media type and
2053 	 * get list of supported media
2054 	 */
2055 	if (de->de21040) {
2056 		de21040_get_mac_address(de);
2057 		de21040_get_media_info(de);
2058 	} else {
2059 		de21041_get_srom_info(de);
2060 	}
2061 
2062 	/* register new network interface with kernel */
2063 	rc = register_netdev(dev);
2064 	if (rc)
2065 		goto err_out_iomap;
2066 
2067 	/* print info about board and interface just registered */
2068 	netdev_info(dev, "%s at %p, %pM, IRQ %d\n",
2069 		    de->de21040 ? "21040" : "21041",
2070 		    regs, dev->dev_addr, pdev->irq);
2071 
2072 	pci_set_drvdata(pdev, dev);
2073 
2074 	/* enable busmastering */
2075 	pci_set_master(pdev);
2076 
2077 	/* put adapter to sleep */
2078 	de_adapter_sleep(de);
2079 
2080 	return 0;
2081 
2082 err_out_iomap:
2083 	kfree(de->ee_data);
2084 	iounmap(regs);
2085 err_out_res:
2086 	pci_release_regions(pdev);
2087 err_out_disable:
2088 	pci_disable_device(pdev);
2089 err_out_free:
2090 	free_netdev(dev);
2091 	return rc;
2092 }
2093 
2094 static void de_remove_one(struct pci_dev *pdev)
2095 {
2096 	struct net_device *dev = pci_get_drvdata(pdev);
2097 	struct de_private *de = netdev_priv(dev);
2098 
2099 	BUG_ON(!dev);
2100 	unregister_netdev(dev);
2101 	kfree(de->ee_data);
2102 	iounmap(de->regs);
2103 	pci_release_regions(pdev);
2104 	pci_disable_device(pdev);
2105 	free_netdev(dev);
2106 }
2107 
2108 static int __maybe_unused de_suspend(struct device *dev_d)
2109 {
2110 	struct pci_dev *pdev = to_pci_dev(dev_d);
2111 	struct net_device *dev = pci_get_drvdata(pdev);
2112 	struct de_private *de = netdev_priv(dev);
2113 
2114 	rtnl_lock();
2115 	if (netif_running (dev)) {
2116 		const int irq = pdev->irq;
2117 
2118 		del_timer_sync(&de->media_timer);
2119 
2120 		disable_irq(irq);
2121 		spin_lock_irq(&de->lock);
2122 
2123 		de_stop_hw(de);
2124 		netif_stop_queue(dev);
2125 		netif_device_detach(dev);
2126 		netif_carrier_off(dev);
2127 
2128 		spin_unlock_irq(&de->lock);
2129 		enable_irq(irq);
2130 
2131 		/* Update the error counts. */
2132 		__de_get_stats(de);
2133 
2134 		synchronize_irq(irq);
2135 		de_clean_rings(de);
2136 
2137 		de_adapter_sleep(de);
2138 	} else {
2139 		netif_device_detach(dev);
2140 	}
2141 	rtnl_unlock();
2142 	return 0;
2143 }
2144 
2145 static int __maybe_unused de_resume(struct device *dev_d)
2146 {
2147 	struct pci_dev *pdev = to_pci_dev(dev_d);
2148 	struct net_device *dev = pci_get_drvdata(pdev);
2149 	struct de_private *de = netdev_priv(dev);
2150 
2151 	rtnl_lock();
2152 	if (netif_device_present(dev))
2153 		goto out;
2154 	if (!netif_running(dev))
2155 		goto out_attach;
2156 	pci_set_master(pdev);
2157 	de_init_rings(de);
2158 	de_init_hw(de);
2159 out_attach:
2160 	netif_device_attach(dev);
2161 out:
2162 	rtnl_unlock();
2163 	return 0;
2164 }
2165 
2166 static SIMPLE_DEV_PM_OPS(de_pm_ops, de_suspend, de_resume);
2167 
2168 static struct pci_driver de_driver = {
2169 	.name		= DRV_NAME,
2170 	.id_table	= de_pci_tbl,
2171 	.probe		= de_init_one,
2172 	.remove		= de_remove_one,
2173 	.driver.pm	= &de_pm_ops,
2174 };
2175 
2176 static int __init de_init (void)
2177 {
2178 	return pci_register_driver(&de_driver);
2179 }
2180 
2181 static void __exit de_exit (void)
2182 {
2183 	pci_unregister_driver (&de_driver);
2184 }
2185 
2186 module_init(de_init);
2187 module_exit(de_exit);
2188