1 /* de2104x.c: A Linux PCI Ethernet driver for Intel/Digital 21040/1 chips. */
2 /*
3 	Copyright 2001,2003 Jeff Garzik <jgarzik@pobox.com>
4 
5 	Copyright 1994, 1995 Digital Equipment Corporation.	    [de4x5.c]
6 	Written/copyright 1994-2001 by Donald Becker.		    [tulip.c]
7 
8 	This software may be used and distributed according to the terms of
9 	the GNU General Public License (GPL), incorporated herein by reference.
10 	Drivers based on or derived from this code fall under the GPL and must
11 	retain the authorship, copyright and license notice.  This file is not
12 	a complete program and may only be used when the entire operating
13 	system is licensed under the GPL.
14 
15 	See the file COPYING in this distribution for more information.
16 
17 	TODO, in rough priority order:
18 	* Support forcing media type with a module parameter,
19 	  like dl2k.c/sundance.c
20 	* Constants (module parms?) for Rx work limit
21 	* Complete reset on PciErr
22 	* Jumbo frames / dev->change_mtu
23 	* Adjust Rx FIFO threshold and Max Rx DMA burst on Rx FIFO error
24 	* Adjust Tx FIFO threshold and Max Tx DMA burst on Tx FIFO error
25 	* Implement Tx software interrupt mitigation via
26 	  Tx descriptor bit
27 
28  */
29 
30 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31 
32 #define DRV_NAME		"de2104x"
33 #define DRV_RELDATE		"Mar 17, 2004"
34 
35 #include <linux/module.h>
36 #include <linux/kernel.h>
37 #include <linux/netdevice.h>
38 #include <linux/etherdevice.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/pci.h>
42 #include <linux/delay.h>
43 #include <linux/ethtool.h>
44 #include <linux/compiler.h>
45 #include <linux/rtnetlink.h>
46 #include <linux/crc32.h>
47 #include <linux/slab.h>
48 
49 #include <asm/io.h>
50 #include <asm/irq.h>
51 #include <linux/uaccess.h>
52 #include <asm/unaligned.h>
53 
54 MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
55 MODULE_DESCRIPTION("Intel/Digital 21040/1 series PCI Ethernet driver");
56 MODULE_LICENSE("GPL");
57 
58 static int debug = -1;
59 module_param (debug, int, 0);
60 MODULE_PARM_DESC (debug, "de2104x bitmapped message enable number");
61 
62 /* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */
63 #if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \
64         defined(CONFIG_SPARC) || defined(__ia64__) ||		   \
65         defined(__sh__) || defined(__mips__)
66 static int rx_copybreak = 1518;
67 #else
68 static int rx_copybreak = 100;
69 #endif
70 module_param (rx_copybreak, int, 0);
71 MODULE_PARM_DESC (rx_copybreak, "de2104x Breakpoint at which Rx packets are copied");
72 
73 #define DE_DEF_MSG_ENABLE	(NETIF_MSG_DRV		| \
74 				 NETIF_MSG_PROBE 	| \
75 				 NETIF_MSG_LINK		| \
76 				 NETIF_MSG_IFDOWN	| \
77 				 NETIF_MSG_IFUP		| \
78 				 NETIF_MSG_RX_ERR	| \
79 				 NETIF_MSG_TX_ERR)
80 
81 /* Descriptor skip length in 32 bit longwords. */
82 #ifndef CONFIG_DE2104X_DSL
83 #define DSL			0
84 #else
85 #define DSL			CONFIG_DE2104X_DSL
86 #endif
87 
88 #define DE_RX_RING_SIZE		128
89 #define DE_TX_RING_SIZE		64
90 #define DE_RING_BYTES		\
91 		((sizeof(struct de_desc) * DE_RX_RING_SIZE) +	\
92 		(sizeof(struct de_desc) * DE_TX_RING_SIZE))
93 #define NEXT_TX(N)		(((N) + 1) & (DE_TX_RING_SIZE - 1))
94 #define NEXT_RX(N)		(((N) + 1) & (DE_RX_RING_SIZE - 1))
95 #define TX_BUFFS_AVAIL(CP)					\
96 	(((CP)->tx_tail <= (CP)->tx_head) ?			\
97 	  (CP)->tx_tail + (DE_TX_RING_SIZE - 1) - (CP)->tx_head :	\
98 	  (CP)->tx_tail - (CP)->tx_head - 1)
99 
100 #define PKT_BUF_SZ		1536	/* Size of each temporary Rx buffer.*/
101 #define RX_OFFSET		2
102 
103 #define DE_SETUP_SKB		((struct sk_buff *) 1)
104 #define DE_DUMMY_SKB		((struct sk_buff *) 2)
105 #define DE_SETUP_FRAME_WORDS	96
106 #define DE_EEPROM_WORDS		256
107 #define DE_EEPROM_SIZE		(DE_EEPROM_WORDS * sizeof(u16))
108 #define DE_MAX_MEDIA		5
109 
110 #define DE_MEDIA_TP_AUTO	0
111 #define DE_MEDIA_BNC		1
112 #define DE_MEDIA_AUI		2
113 #define DE_MEDIA_TP		3
114 #define DE_MEDIA_TP_FD		4
115 #define DE_MEDIA_INVALID	DE_MAX_MEDIA
116 #define DE_MEDIA_FIRST		0
117 #define DE_MEDIA_LAST		(DE_MAX_MEDIA - 1)
118 #define DE_AUI_BNC		(SUPPORTED_AUI | SUPPORTED_BNC)
119 
120 #define DE_TIMER_LINK		(60 * HZ)
121 #define DE_TIMER_NO_LINK	(5 * HZ)
122 
123 #define DE_NUM_REGS		16
124 #define DE_REGS_SIZE		(DE_NUM_REGS * sizeof(u32))
125 #define DE_REGS_VER		1
126 
127 /* Time in jiffies before concluding the transmitter is hung. */
128 #define TX_TIMEOUT		(6*HZ)
129 
130 /* This is a mysterious value that can be written to CSR11 in the 21040 (only)
131    to support a pre-NWay full-duplex signaling mechanism using short frames.
132    No one knows what it should be, but if left at its default value some
133    10base2(!) packets trigger a full-duplex-request interrupt. */
134 #define FULL_DUPLEX_MAGIC	0x6969
135 
136 enum {
137 	/* NIC registers */
138 	BusMode			= 0x00,
139 	TxPoll			= 0x08,
140 	RxPoll			= 0x10,
141 	RxRingAddr		= 0x18,
142 	TxRingAddr		= 0x20,
143 	MacStatus		= 0x28,
144 	MacMode			= 0x30,
145 	IntrMask		= 0x38,
146 	RxMissed		= 0x40,
147 	ROMCmd			= 0x48,
148 	CSR11			= 0x58,
149 	SIAStatus		= 0x60,
150 	CSR13			= 0x68,
151 	CSR14			= 0x70,
152 	CSR15			= 0x78,
153 	PCIPM			= 0x40,
154 
155 	/* BusMode bits */
156 	CmdReset		= (1 << 0),
157 	CacheAlign16		= 0x00008000,
158 	BurstLen4		= 0x00000400,
159 	DescSkipLen		= (DSL << 2),
160 
161 	/* Rx/TxPoll bits */
162 	NormalTxPoll		= (1 << 0),
163 	NormalRxPoll		= (1 << 0),
164 
165 	/* Tx/Rx descriptor status bits */
166 	DescOwn			= (1 << 31),
167 	RxError			= (1 << 15),
168 	RxErrLong		= (1 << 7),
169 	RxErrCRC		= (1 << 1),
170 	RxErrFIFO		= (1 << 0),
171 	RxErrRunt		= (1 << 11),
172 	RxErrFrame		= (1 << 14),
173 	RingEnd			= (1 << 25),
174 	FirstFrag		= (1 << 29),
175 	LastFrag		= (1 << 30),
176 	TxError			= (1 << 15),
177 	TxFIFOUnder		= (1 << 1),
178 	TxLinkFail		= (1 << 2) | (1 << 10) | (1 << 11),
179 	TxMaxCol		= (1 << 8),
180 	TxOWC			= (1 << 9),
181 	TxJabber		= (1 << 14),
182 	SetupFrame		= (1 << 27),
183 	TxSwInt			= (1 << 31),
184 
185 	/* MacStatus bits */
186 	IntrOK			= (1 << 16),
187 	IntrErr			= (1 << 15),
188 	RxIntr			= (1 << 6),
189 	RxEmpty			= (1 << 7),
190 	TxIntr			= (1 << 0),
191 	TxEmpty			= (1 << 2),
192 	PciErr			= (1 << 13),
193 	TxState			= (1 << 22) | (1 << 21) | (1 << 20),
194 	RxState			= (1 << 19) | (1 << 18) | (1 << 17),
195 	LinkFail		= (1 << 12),
196 	LinkPass		= (1 << 4),
197 	RxStopped		= (1 << 8),
198 	TxStopped		= (1 << 1),
199 
200 	/* MacMode bits */
201 	TxEnable		= (1 << 13),
202 	RxEnable		= (1 << 1),
203 	RxTx			= TxEnable | RxEnable,
204 	FullDuplex		= (1 << 9),
205 	AcceptAllMulticast	= (1 << 7),
206 	AcceptAllPhys		= (1 << 6),
207 	BOCnt			= (1 << 5),
208 	MacModeClear		= (1<<12) | (1<<11) | (1<<10) | (1<<8) | (1<<3) |
209 				  RxTx | BOCnt | AcceptAllPhys | AcceptAllMulticast,
210 
211 	/* ROMCmd bits */
212 	EE_SHIFT_CLK		= 0x02,	/* EEPROM shift clock. */
213 	EE_CS			= 0x01,	/* EEPROM chip select. */
214 	EE_DATA_WRITE		= 0x04,	/* Data from the Tulip to EEPROM. */
215 	EE_WRITE_0		= 0x01,
216 	EE_WRITE_1		= 0x05,
217 	EE_DATA_READ		= 0x08,	/* Data from the EEPROM chip. */
218 	EE_ENB			= (0x4800 | EE_CS),
219 
220 	/* The EEPROM commands include the alway-set leading bit. */
221 	EE_READ_CMD		= 6,
222 
223 	/* RxMissed bits */
224 	RxMissedOver		= (1 << 16),
225 	RxMissedMask		= 0xffff,
226 
227 	/* SROM-related bits */
228 	SROMC0InfoLeaf		= 27,
229 	MediaBlockMask		= 0x3f,
230 	MediaCustomCSRs		= (1 << 6),
231 
232 	/* PCIPM bits */
233 	PM_Sleep		= (1 << 31),
234 	PM_Snooze		= (1 << 30),
235 	PM_Mask			= PM_Sleep | PM_Snooze,
236 
237 	/* SIAStatus bits */
238 	NWayState		= (1 << 14) | (1 << 13) | (1 << 12),
239 	NWayRestart		= (1 << 12),
240 	NonselPortActive	= (1 << 9),
241 	SelPortActive		= (1 << 8),
242 	LinkFailStatus		= (1 << 2),
243 	NetCxnErr		= (1 << 1),
244 };
245 
246 static const u32 de_intr_mask =
247 	IntrOK | IntrErr | RxIntr | RxEmpty | TxIntr | TxEmpty |
248 	LinkPass | LinkFail | PciErr;
249 
250 /*
251  * Set the programmable burst length to 4 longwords for all:
252  * DMA errors result without these values. Cache align 16 long.
253  */
254 static const u32 de_bus_mode = CacheAlign16 | BurstLen4 | DescSkipLen;
255 
256 struct de_srom_media_block {
257 	u8			opts;
258 	u16			csr13;
259 	u16			csr14;
260 	u16			csr15;
261 } __packed;
262 
263 struct de_srom_info_leaf {
264 	u16			default_media;
265 	u8			n_blocks;
266 	u8			unused;
267 } __packed;
268 
269 struct de_desc {
270 	__le32			opts1;
271 	__le32			opts2;
272 	__le32			addr1;
273 	__le32			addr2;
274 #if DSL
275 	__le32			skip[DSL];
276 #endif
277 };
278 
279 struct media_info {
280 	u16			type;	/* DE_MEDIA_xxx */
281 	u16			csr13;
282 	u16			csr14;
283 	u16			csr15;
284 };
285 
286 struct ring_info {
287 	struct sk_buff		*skb;
288 	dma_addr_t		mapping;
289 };
290 
291 struct de_private {
292 	unsigned		tx_head;
293 	unsigned		tx_tail;
294 	unsigned		rx_tail;
295 
296 	void			__iomem *regs;
297 	struct net_device	*dev;
298 	spinlock_t		lock;
299 
300 	struct de_desc		*rx_ring;
301 	struct de_desc		*tx_ring;
302 	struct ring_info	tx_skb[DE_TX_RING_SIZE];
303 	struct ring_info	rx_skb[DE_RX_RING_SIZE];
304 	unsigned		rx_buf_sz;
305 	dma_addr_t		ring_dma;
306 
307 	u32			msg_enable;
308 
309 	struct pci_dev		*pdev;
310 
311 	u16			setup_frame[DE_SETUP_FRAME_WORDS];
312 
313 	u32			media_type;
314 	u32			media_supported;
315 	u32			media_advertise;
316 	struct media_info	media[DE_MAX_MEDIA];
317 	struct timer_list	media_timer;
318 
319 	u8			*ee_data;
320 	unsigned		board_idx;
321 	unsigned		de21040 : 1;
322 	unsigned		media_lock : 1;
323 };
324 
325 
326 static void de_set_rx_mode (struct net_device *dev);
327 static void de_tx (struct de_private *de);
328 static void de_clean_rings (struct de_private *de);
329 static void de_media_interrupt (struct de_private *de, u32 status);
330 static void de21040_media_timer (struct timer_list *t);
331 static void de21041_media_timer (struct timer_list *t);
332 static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media);
333 
334 
335 static const struct pci_device_id de_pci_tbl[] = {
336 	{ PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP,
337 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
338 	{ PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_PLUS,
339 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
340 	{ },
341 };
342 MODULE_DEVICE_TABLE(pci, de_pci_tbl);
343 
344 static const char * const media_name[DE_MAX_MEDIA] = {
345 	"10baseT auto",
346 	"BNC",
347 	"AUI",
348 	"10baseT-HD",
349 	"10baseT-FD"
350 };
351 
352 /* 21040 transceiver register settings:
353  * TP AUTO(unused), BNC(unused), AUI, TP, TP FD*/
354 static u16 t21040_csr13[] = { 0, 0, 0x8F09, 0x8F01, 0x8F01, };
355 static u16 t21040_csr14[] = { 0, 0, 0x0705, 0xFFFF, 0xFFFD, };
356 static u16 t21040_csr15[] = { 0, 0, 0x0006, 0x0000, 0x0000, };
357 
358 /* 21041 transceiver register settings: TP AUTO, BNC, AUI, TP, TP FD*/
359 static u16 t21041_csr13[] = { 0xEF01, 0xEF09, 0xEF09, 0xEF01, 0xEF09, };
360 static u16 t21041_csr14[] = { 0xFFFF, 0xF7FD, 0xF7FD, 0x7F3F, 0x7F3D, };
361 /* If on-chip autonegotiation is broken, use half-duplex (FF3F) instead */
362 static u16 t21041_csr14_brk[] = { 0xFF3F, 0xF7FD, 0xF7FD, 0x7F3F, 0x7F3D, };
363 static u16 t21041_csr15[] = { 0x0008, 0x0006, 0x000E, 0x0008, 0x0008, };
364 
365 
366 #define dr32(reg)	ioread32(de->regs + (reg))
367 #define dw32(reg, val)	iowrite32((val), de->regs + (reg))
368 
369 
de_rx_err_acct(struct de_private * de,unsigned rx_tail,u32 status,u32 len)370 static void de_rx_err_acct (struct de_private *de, unsigned rx_tail,
371 			    u32 status, u32 len)
372 {
373 	netif_dbg(de, rx_err, de->dev,
374 		  "rx err, slot %d status 0x%x len %d\n",
375 		  rx_tail, status, len);
376 
377 	if ((status & 0x38000300) != 0x0300) {
378 		/* Ingore earlier buffers. */
379 		if ((status & 0xffff) != 0x7fff) {
380 			netif_warn(de, rx_err, de->dev,
381 				   "Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
382 				   status);
383 			de->dev->stats.rx_length_errors++;
384 		}
385 	} else if (status & RxError) {
386 		/* There was a fatal error. */
387 		de->dev->stats.rx_errors++; /* end of a packet.*/
388 		if (status & 0x0890) de->dev->stats.rx_length_errors++;
389 		if (status & RxErrCRC) de->dev->stats.rx_crc_errors++;
390 		if (status & RxErrFIFO) de->dev->stats.rx_fifo_errors++;
391 	}
392 }
393 
de_rx(struct de_private * de)394 static void de_rx (struct de_private *de)
395 {
396 	unsigned rx_tail = de->rx_tail;
397 	unsigned rx_work = DE_RX_RING_SIZE;
398 	unsigned drop = 0;
399 	int rc;
400 
401 	while (--rx_work) {
402 		u32 status, len;
403 		dma_addr_t mapping;
404 		struct sk_buff *skb, *copy_skb;
405 		unsigned copying_skb, buflen;
406 
407 		skb = de->rx_skb[rx_tail].skb;
408 		BUG_ON(!skb);
409 		rmb();
410 		status = le32_to_cpu(de->rx_ring[rx_tail].opts1);
411 		if (status & DescOwn)
412 			break;
413 
414 		/* the length is actually a 15 bit value here according
415 		 * to Table 4-1 in the DE2104x spec so mask is 0x7fff
416 		 */
417 		len = ((status >> 16) & 0x7fff) - 4;
418 		mapping = de->rx_skb[rx_tail].mapping;
419 
420 		if (unlikely(drop)) {
421 			de->dev->stats.rx_dropped++;
422 			goto rx_next;
423 		}
424 
425 		if (unlikely((status & 0x38008300) != 0x0300)) {
426 			de_rx_err_acct(de, rx_tail, status, len);
427 			goto rx_next;
428 		}
429 
430 		copying_skb = (len <= rx_copybreak);
431 
432 		netif_dbg(de, rx_status, de->dev,
433 			  "rx slot %d status 0x%x len %d copying? %d\n",
434 			  rx_tail, status, len, copying_skb);
435 
436 		buflen = copying_skb ? (len + RX_OFFSET) : de->rx_buf_sz;
437 		copy_skb = netdev_alloc_skb(de->dev, buflen);
438 		if (unlikely(!copy_skb)) {
439 			de->dev->stats.rx_dropped++;
440 			drop = 1;
441 			rx_work = 100;
442 			goto rx_next;
443 		}
444 
445 		if (!copying_skb) {
446 			dma_unmap_single(&de->pdev->dev, mapping, buflen,
447 					 DMA_FROM_DEVICE);
448 			skb_put(skb, len);
449 
450 			mapping =
451 			de->rx_skb[rx_tail].mapping =
452 				dma_map_single(&de->pdev->dev, copy_skb->data,
453 					       buflen, DMA_FROM_DEVICE);
454 			de->rx_skb[rx_tail].skb = copy_skb;
455 		} else {
456 			dma_sync_single_for_cpu(&de->pdev->dev, mapping, len,
457 						DMA_FROM_DEVICE);
458 			skb_reserve(copy_skb, RX_OFFSET);
459 			skb_copy_from_linear_data(skb, skb_put(copy_skb, len),
460 						  len);
461 			dma_sync_single_for_device(&de->pdev->dev, mapping,
462 						   len, DMA_FROM_DEVICE);
463 
464 			/* We'll reuse the original ring buffer. */
465 			skb = copy_skb;
466 		}
467 
468 		skb->protocol = eth_type_trans (skb, de->dev);
469 
470 		de->dev->stats.rx_packets++;
471 		de->dev->stats.rx_bytes += skb->len;
472 		rc = netif_rx (skb);
473 		if (rc == NET_RX_DROP)
474 			drop = 1;
475 
476 rx_next:
477 		if (rx_tail == (DE_RX_RING_SIZE - 1))
478 			de->rx_ring[rx_tail].opts2 =
479 				cpu_to_le32(RingEnd | de->rx_buf_sz);
480 		else
481 			de->rx_ring[rx_tail].opts2 = cpu_to_le32(de->rx_buf_sz);
482 		de->rx_ring[rx_tail].addr1 = cpu_to_le32(mapping);
483 		wmb();
484 		de->rx_ring[rx_tail].opts1 = cpu_to_le32(DescOwn);
485 		rx_tail = NEXT_RX(rx_tail);
486 	}
487 
488 	if (!rx_work)
489 		netdev_warn(de->dev, "rx work limit reached\n");
490 
491 	de->rx_tail = rx_tail;
492 }
493 
de_interrupt(int irq,void * dev_instance)494 static irqreturn_t de_interrupt (int irq, void *dev_instance)
495 {
496 	struct net_device *dev = dev_instance;
497 	struct de_private *de = netdev_priv(dev);
498 	u32 status;
499 
500 	status = dr32(MacStatus);
501 	if ((!(status & (IntrOK|IntrErr))) || (status == 0xFFFF))
502 		return IRQ_NONE;
503 
504 	netif_dbg(de, intr, dev, "intr, status %08x mode %08x desc %u/%u/%u\n",
505 		  status, dr32(MacMode),
506 		  de->rx_tail, de->tx_head, de->tx_tail);
507 
508 	dw32(MacStatus, status);
509 
510 	if (status & (RxIntr | RxEmpty)) {
511 		de_rx(de);
512 		if (status & RxEmpty)
513 			dw32(RxPoll, NormalRxPoll);
514 	}
515 
516 	spin_lock(&de->lock);
517 
518 	if (status & (TxIntr | TxEmpty))
519 		de_tx(de);
520 
521 	if (status & (LinkPass | LinkFail))
522 		de_media_interrupt(de, status);
523 
524 	spin_unlock(&de->lock);
525 
526 	if (status & PciErr) {
527 		u16 pci_status;
528 
529 		pci_read_config_word(de->pdev, PCI_STATUS, &pci_status);
530 		pci_write_config_word(de->pdev, PCI_STATUS, pci_status);
531 		netdev_err(de->dev,
532 			   "PCI bus error, status=%08x, PCI status=%04x\n",
533 			   status, pci_status);
534 	}
535 
536 	return IRQ_HANDLED;
537 }
538 
de_tx(struct de_private * de)539 static void de_tx (struct de_private *de)
540 {
541 	unsigned tx_head = de->tx_head;
542 	unsigned tx_tail = de->tx_tail;
543 
544 	while (tx_tail != tx_head) {
545 		struct sk_buff *skb;
546 		u32 status;
547 
548 		rmb();
549 		status = le32_to_cpu(de->tx_ring[tx_tail].opts1);
550 		if (status & DescOwn)
551 			break;
552 
553 		skb = de->tx_skb[tx_tail].skb;
554 		BUG_ON(!skb);
555 		if (unlikely(skb == DE_DUMMY_SKB))
556 			goto next;
557 
558 		if (unlikely(skb == DE_SETUP_SKB)) {
559 			dma_unmap_single(&de->pdev->dev,
560 					 de->tx_skb[tx_tail].mapping,
561 					 sizeof(de->setup_frame),
562 					 DMA_TO_DEVICE);
563 			goto next;
564 		}
565 
566 		dma_unmap_single(&de->pdev->dev, de->tx_skb[tx_tail].mapping,
567 				 skb->len, DMA_TO_DEVICE);
568 
569 		if (status & LastFrag) {
570 			if (status & TxError) {
571 				netif_dbg(de, tx_err, de->dev,
572 					  "tx err, status 0x%x\n",
573 					  status);
574 				de->dev->stats.tx_errors++;
575 				if (status & TxOWC)
576 					de->dev->stats.tx_window_errors++;
577 				if (status & TxMaxCol)
578 					de->dev->stats.tx_aborted_errors++;
579 				if (status & TxLinkFail)
580 					de->dev->stats.tx_carrier_errors++;
581 				if (status & TxFIFOUnder)
582 					de->dev->stats.tx_fifo_errors++;
583 			} else {
584 				de->dev->stats.tx_packets++;
585 				de->dev->stats.tx_bytes += skb->len;
586 				netif_dbg(de, tx_done, de->dev,
587 					  "tx done, slot %d\n", tx_tail);
588 			}
589 			dev_consume_skb_irq(skb);
590 		}
591 
592 next:
593 		de->tx_skb[tx_tail].skb = NULL;
594 
595 		tx_tail = NEXT_TX(tx_tail);
596 	}
597 
598 	de->tx_tail = tx_tail;
599 
600 	if (netif_queue_stopped(de->dev) && (TX_BUFFS_AVAIL(de) > (DE_TX_RING_SIZE / 4)))
601 		netif_wake_queue(de->dev);
602 }
603 
de_start_xmit(struct sk_buff * skb,struct net_device * dev)604 static netdev_tx_t de_start_xmit (struct sk_buff *skb,
605 					struct net_device *dev)
606 {
607 	struct de_private *de = netdev_priv(dev);
608 	unsigned int entry, tx_free;
609 	u32 mapping, len, flags = FirstFrag | LastFrag;
610 	struct de_desc *txd;
611 
612 	spin_lock_irq(&de->lock);
613 
614 	tx_free = TX_BUFFS_AVAIL(de);
615 	if (tx_free == 0) {
616 		netif_stop_queue(dev);
617 		spin_unlock_irq(&de->lock);
618 		return NETDEV_TX_BUSY;
619 	}
620 	tx_free--;
621 
622 	entry = de->tx_head;
623 
624 	txd = &de->tx_ring[entry];
625 
626 	len = skb->len;
627 	mapping = dma_map_single(&de->pdev->dev, skb->data, len,
628 				 DMA_TO_DEVICE);
629 	if (entry == (DE_TX_RING_SIZE - 1))
630 		flags |= RingEnd;
631 	if (!tx_free || (tx_free == (DE_TX_RING_SIZE / 2)))
632 		flags |= TxSwInt;
633 	flags |= len;
634 	txd->opts2 = cpu_to_le32(flags);
635 	txd->addr1 = cpu_to_le32(mapping);
636 
637 	de->tx_skb[entry].skb = skb;
638 	de->tx_skb[entry].mapping = mapping;
639 	wmb();
640 
641 	txd->opts1 = cpu_to_le32(DescOwn);
642 	wmb();
643 
644 	de->tx_head = NEXT_TX(entry);
645 	netif_dbg(de, tx_queued, dev, "tx queued, slot %d, skblen %d\n",
646 		  entry, skb->len);
647 
648 	if (tx_free == 0)
649 		netif_stop_queue(dev);
650 
651 	spin_unlock_irq(&de->lock);
652 
653 	/* Trigger an immediate transmit demand. */
654 	dw32(TxPoll, NormalTxPoll);
655 
656 	return NETDEV_TX_OK;
657 }
658 
659 /* Set or clear the multicast filter for this adaptor.
660    Note that we only use exclusion around actually queueing the
661    new frame, not around filling de->setup_frame.  This is non-deterministic
662    when re-entered but still correct. */
663 
build_setup_frame_hash(u16 * setup_frm,struct net_device * dev)664 static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
665 {
666 	struct de_private *de = netdev_priv(dev);
667 	u16 hash_table[32];
668 	struct netdev_hw_addr *ha;
669 	const u16 *eaddrs;
670 	int i;
671 
672 	memset(hash_table, 0, sizeof(hash_table));
673 	__set_bit_le(255, hash_table);			/* Broadcast entry */
674 	/* This should work on big-endian machines as well. */
675 	netdev_for_each_mc_addr(ha, dev) {
676 		int index = ether_crc_le(ETH_ALEN, ha->addr) & 0x1ff;
677 
678 		__set_bit_le(index, hash_table);
679 	}
680 
681 	for (i = 0; i < 32; i++) {
682 		*setup_frm++ = hash_table[i];
683 		*setup_frm++ = hash_table[i];
684 	}
685 	setup_frm = &de->setup_frame[13*6];
686 
687 	/* Fill the final entry with our physical address. */
688 	eaddrs = (const u16 *)dev->dev_addr;
689 	*setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
690 	*setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
691 	*setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
692 }
693 
build_setup_frame_perfect(u16 * setup_frm,struct net_device * dev)694 static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
695 {
696 	struct de_private *de = netdev_priv(dev);
697 	struct netdev_hw_addr *ha;
698 	const u16 *eaddrs;
699 
700 	/* We have <= 14 addresses so we can use the wonderful
701 	   16 address perfect filtering of the Tulip. */
702 	netdev_for_each_mc_addr(ha, dev) {
703 		eaddrs = (u16 *) ha->addr;
704 		*setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
705 		*setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
706 		*setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
707 	}
708 	/* Fill the unused entries with the broadcast address. */
709 	memset(setup_frm, 0xff, (15 - netdev_mc_count(dev)) * 12);
710 	setup_frm = &de->setup_frame[15*6];
711 
712 	/* Fill the final entry with our physical address. */
713 	eaddrs = (const u16 *)dev->dev_addr;
714 	*setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
715 	*setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
716 	*setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
717 }
718 
719 
__de_set_rx_mode(struct net_device * dev)720 static void __de_set_rx_mode (struct net_device *dev)
721 {
722 	struct de_private *de = netdev_priv(dev);
723 	u32 macmode;
724 	unsigned int entry;
725 	u32 mapping;
726 	struct de_desc *txd;
727 	struct de_desc *dummy_txd = NULL;
728 
729 	macmode = dr32(MacMode) & ~(AcceptAllMulticast | AcceptAllPhys);
730 
731 	if (dev->flags & IFF_PROMISC) {	/* Set promiscuous. */
732 		macmode |= AcceptAllMulticast | AcceptAllPhys;
733 		goto out;
734 	}
735 
736 	if ((netdev_mc_count(dev) > 1000) || (dev->flags & IFF_ALLMULTI)) {
737 		/* Too many to filter well -- accept all multicasts. */
738 		macmode |= AcceptAllMulticast;
739 		goto out;
740 	}
741 
742 	/* Note that only the low-address shortword of setup_frame is valid!
743 	   The values are doubled for big-endian architectures. */
744 	if (netdev_mc_count(dev) > 14)	/* Must use a multicast hash table. */
745 		build_setup_frame_hash (de->setup_frame, dev);
746 	else
747 		build_setup_frame_perfect (de->setup_frame, dev);
748 
749 	/*
750 	 * Now add this frame to the Tx list.
751 	 */
752 
753 	entry = de->tx_head;
754 
755 	/* Avoid a chip errata by prefixing a dummy entry. */
756 	if (entry != 0) {
757 		de->tx_skb[entry].skb = DE_DUMMY_SKB;
758 
759 		dummy_txd = &de->tx_ring[entry];
760 		dummy_txd->opts2 = (entry == (DE_TX_RING_SIZE - 1)) ?
761 				   cpu_to_le32(RingEnd) : 0;
762 		dummy_txd->addr1 = 0;
763 
764 		/* Must set DescOwned later to avoid race with chip */
765 
766 		entry = NEXT_TX(entry);
767 	}
768 
769 	de->tx_skb[entry].skb = DE_SETUP_SKB;
770 	de->tx_skb[entry].mapping = mapping =
771 	    dma_map_single(&de->pdev->dev, de->setup_frame,
772 			   sizeof(de->setup_frame), DMA_TO_DEVICE);
773 
774 	/* Put the setup frame on the Tx list. */
775 	txd = &de->tx_ring[entry];
776 	if (entry == (DE_TX_RING_SIZE - 1))
777 		txd->opts2 = cpu_to_le32(SetupFrame | RingEnd | sizeof (de->setup_frame));
778 	else
779 		txd->opts2 = cpu_to_le32(SetupFrame | sizeof (de->setup_frame));
780 	txd->addr1 = cpu_to_le32(mapping);
781 	wmb();
782 
783 	txd->opts1 = cpu_to_le32(DescOwn);
784 	wmb();
785 
786 	if (dummy_txd) {
787 		dummy_txd->opts1 = cpu_to_le32(DescOwn);
788 		wmb();
789 	}
790 
791 	de->tx_head = NEXT_TX(entry);
792 
793 	if (TX_BUFFS_AVAIL(de) == 0)
794 		netif_stop_queue(dev);
795 
796 	/* Trigger an immediate transmit demand. */
797 	dw32(TxPoll, NormalTxPoll);
798 
799 out:
800 	if (macmode != dr32(MacMode))
801 		dw32(MacMode, macmode);
802 }
803 
de_set_rx_mode(struct net_device * dev)804 static void de_set_rx_mode (struct net_device *dev)
805 {
806 	unsigned long flags;
807 	struct de_private *de = netdev_priv(dev);
808 
809 	spin_lock_irqsave (&de->lock, flags);
810 	__de_set_rx_mode(dev);
811 	spin_unlock_irqrestore (&de->lock, flags);
812 }
813 
de_rx_missed(struct de_private * de,u32 rx_missed)814 static inline void de_rx_missed(struct de_private *de, u32 rx_missed)
815 {
816 	if (unlikely(rx_missed & RxMissedOver))
817 		de->dev->stats.rx_missed_errors += RxMissedMask;
818 	else
819 		de->dev->stats.rx_missed_errors += (rx_missed & RxMissedMask);
820 }
821 
__de_get_stats(struct de_private * de)822 static void __de_get_stats(struct de_private *de)
823 {
824 	u32 tmp = dr32(RxMissed); /* self-clearing */
825 
826 	de_rx_missed(de, tmp);
827 }
828 
de_get_stats(struct net_device * dev)829 static struct net_device_stats *de_get_stats(struct net_device *dev)
830 {
831 	struct de_private *de = netdev_priv(dev);
832 
833 	/* The chip only need report frame silently dropped. */
834 	spin_lock_irq(&de->lock);
835 	if (netif_running(dev) && netif_device_present(dev))
836 		__de_get_stats(de);
837 	spin_unlock_irq(&de->lock);
838 
839 	return &dev->stats;
840 }
841 
de_is_running(struct de_private * de)842 static inline int de_is_running (struct de_private *de)
843 {
844 	return (dr32(MacStatus) & (RxState | TxState)) ? 1 : 0;
845 }
846 
de_stop_rxtx(struct de_private * de)847 static void de_stop_rxtx (struct de_private *de)
848 {
849 	u32 macmode;
850 	unsigned int i = 1300/100;
851 
852 	macmode = dr32(MacMode);
853 	if (macmode & RxTx) {
854 		dw32(MacMode, macmode & ~RxTx);
855 		dr32(MacMode);
856 	}
857 
858 	/* wait until in-flight frame completes.
859 	 * Max time @ 10BT: 1500*8b/10Mbps == 1200us (+ 100us margin)
860 	 * Typically expect this loop to end in < 50 us on 100BT.
861 	 */
862 	while (--i) {
863 		if (!de_is_running(de))
864 			return;
865 		udelay(100);
866 	}
867 
868 	netdev_warn(de->dev, "timeout expired, stopping DMA\n");
869 }
870 
de_start_rxtx(struct de_private * de)871 static inline void de_start_rxtx (struct de_private *de)
872 {
873 	u32 macmode;
874 
875 	macmode = dr32(MacMode);
876 	if ((macmode & RxTx) != RxTx) {
877 		dw32(MacMode, macmode | RxTx);
878 		dr32(MacMode);
879 	}
880 }
881 
de_stop_hw(struct de_private * de)882 static void de_stop_hw (struct de_private *de)
883 {
884 
885 	udelay(5);
886 	dw32(IntrMask, 0);
887 
888 	de_stop_rxtx(de);
889 
890 	dw32(MacStatus, dr32(MacStatus));
891 
892 	udelay(10);
893 
894 	de->rx_tail = 0;
895 	de->tx_head = de->tx_tail = 0;
896 }
897 
de_link_up(struct de_private * de)898 static void de_link_up(struct de_private *de)
899 {
900 	if (!netif_carrier_ok(de->dev)) {
901 		netif_carrier_on(de->dev);
902 		netif_info(de, link, de->dev, "link up, media %s\n",
903 			   media_name[de->media_type]);
904 	}
905 }
906 
de_link_down(struct de_private * de)907 static void de_link_down(struct de_private *de)
908 {
909 	if (netif_carrier_ok(de->dev)) {
910 		netif_carrier_off(de->dev);
911 		netif_info(de, link, de->dev, "link down\n");
912 	}
913 }
914 
de_set_media(struct de_private * de)915 static void de_set_media (struct de_private *de)
916 {
917 	unsigned media = de->media_type;
918 	u32 macmode = dr32(MacMode);
919 
920 	if (de_is_running(de))
921 		netdev_warn(de->dev, "chip is running while changing media!\n");
922 
923 	if (de->de21040)
924 		dw32(CSR11, FULL_DUPLEX_MAGIC);
925 	dw32(CSR13, 0); /* Reset phy */
926 	dw32(CSR14, de->media[media].csr14);
927 	dw32(CSR15, de->media[media].csr15);
928 	dw32(CSR13, de->media[media].csr13);
929 
930 	/* must delay 10ms before writing to other registers,
931 	 * especially CSR6
932 	 */
933 	mdelay(10);
934 
935 	if (media == DE_MEDIA_TP_FD)
936 		macmode |= FullDuplex;
937 	else
938 		macmode &= ~FullDuplex;
939 
940 	netif_info(de, link, de->dev, "set link %s\n", media_name[media]);
941 	netif_info(de, hw, de->dev, "mode 0x%x, sia 0x%x,0x%x,0x%x,0x%x\n",
942 		   dr32(MacMode), dr32(SIAStatus),
943 		   dr32(CSR13), dr32(CSR14), dr32(CSR15));
944 	netif_info(de, hw, de->dev, "set mode 0x%x, set sia 0x%x,0x%x,0x%x\n",
945 		   macmode, de->media[media].csr13,
946 		   de->media[media].csr14, de->media[media].csr15);
947 	if (macmode != dr32(MacMode))
948 		dw32(MacMode, macmode);
949 }
950 
de_next_media(struct de_private * de,const u32 * media,unsigned int n_media)951 static void de_next_media (struct de_private *de, const u32 *media,
952 			   unsigned int n_media)
953 {
954 	unsigned int i;
955 
956 	for (i = 0; i < n_media; i++) {
957 		if (de_ok_to_advertise(de, media[i])) {
958 			de->media_type = media[i];
959 			return;
960 		}
961 	}
962 }
963 
de21040_media_timer(struct timer_list * t)964 static void de21040_media_timer (struct timer_list *t)
965 {
966 	struct de_private *de = from_timer(de, t, media_timer);
967 	struct net_device *dev = de->dev;
968 	u32 status = dr32(SIAStatus);
969 	unsigned int carrier;
970 	unsigned long flags;
971 
972 	carrier = (status & NetCxnErr) ? 0 : 1;
973 
974 	if (carrier) {
975 		if (de->media_type != DE_MEDIA_AUI && (status & LinkFailStatus))
976 			goto no_link_yet;
977 
978 		de->media_timer.expires = jiffies + DE_TIMER_LINK;
979 		add_timer(&de->media_timer);
980 		if (!netif_carrier_ok(dev))
981 			de_link_up(de);
982 		else
983 			netif_info(de, timer, dev, "%s link ok, status %x\n",
984 				   media_name[de->media_type], status);
985 		return;
986 	}
987 
988 	de_link_down(de);
989 
990 	if (de->media_lock)
991 		return;
992 
993 	if (de->media_type == DE_MEDIA_AUI) {
994 		static const u32 next_state = DE_MEDIA_TP;
995 		de_next_media(de, &next_state, 1);
996 	} else {
997 		static const u32 next_state = DE_MEDIA_AUI;
998 		de_next_media(de, &next_state, 1);
999 	}
1000 
1001 	spin_lock_irqsave(&de->lock, flags);
1002 	de_stop_rxtx(de);
1003 	spin_unlock_irqrestore(&de->lock, flags);
1004 	de_set_media(de);
1005 	de_start_rxtx(de);
1006 
1007 no_link_yet:
1008 	de->media_timer.expires = jiffies + DE_TIMER_NO_LINK;
1009 	add_timer(&de->media_timer);
1010 
1011 	netif_info(de, timer, dev, "no link, trying media %s, status %x\n",
1012 		   media_name[de->media_type], status);
1013 }
1014 
de_ok_to_advertise(struct de_private * de,u32 new_media)1015 static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media)
1016 {
1017 	switch (new_media) {
1018 	case DE_MEDIA_TP_AUTO:
1019 		if (!(de->media_advertise & ADVERTISED_Autoneg))
1020 			return 0;
1021 		if (!(de->media_advertise & (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full)))
1022 			return 0;
1023 		break;
1024 	case DE_MEDIA_BNC:
1025 		if (!(de->media_advertise & ADVERTISED_BNC))
1026 			return 0;
1027 		break;
1028 	case DE_MEDIA_AUI:
1029 		if (!(de->media_advertise & ADVERTISED_AUI))
1030 			return 0;
1031 		break;
1032 	case DE_MEDIA_TP:
1033 		if (!(de->media_advertise & ADVERTISED_10baseT_Half))
1034 			return 0;
1035 		break;
1036 	case DE_MEDIA_TP_FD:
1037 		if (!(de->media_advertise & ADVERTISED_10baseT_Full))
1038 			return 0;
1039 		break;
1040 	}
1041 
1042 	return 1;
1043 }
1044 
de21041_media_timer(struct timer_list * t)1045 static void de21041_media_timer (struct timer_list *t)
1046 {
1047 	struct de_private *de = from_timer(de, t, media_timer);
1048 	struct net_device *dev = de->dev;
1049 	u32 status = dr32(SIAStatus);
1050 	unsigned int carrier;
1051 	unsigned long flags;
1052 
1053 	/* clear port active bits */
1054 	dw32(SIAStatus, NonselPortActive | SelPortActive);
1055 
1056 	carrier = (status & NetCxnErr) ? 0 : 1;
1057 
1058 	if (carrier) {
1059 		if ((de->media_type == DE_MEDIA_TP_AUTO ||
1060 		     de->media_type == DE_MEDIA_TP ||
1061 		     de->media_type == DE_MEDIA_TP_FD) &&
1062 		    (status & LinkFailStatus))
1063 			goto no_link_yet;
1064 
1065 		de->media_timer.expires = jiffies + DE_TIMER_LINK;
1066 		add_timer(&de->media_timer);
1067 		if (!netif_carrier_ok(dev))
1068 			de_link_up(de);
1069 		else
1070 			netif_info(de, timer, dev,
1071 				   "%s link ok, mode %x status %x\n",
1072 				   media_name[de->media_type],
1073 				   dr32(MacMode), status);
1074 		return;
1075 	}
1076 
1077 	de_link_down(de);
1078 
1079 	/* if media type locked, don't switch media */
1080 	if (de->media_lock)
1081 		goto set_media;
1082 
1083 	/* if activity detected, use that as hint for new media type */
1084 	if (status & NonselPortActive) {
1085 		unsigned int have_media = 1;
1086 
1087 		/* if AUI/BNC selected, then activity is on TP port */
1088 		if (de->media_type == DE_MEDIA_AUI ||
1089 		    de->media_type == DE_MEDIA_BNC) {
1090 			if (de_ok_to_advertise(de, DE_MEDIA_TP_AUTO))
1091 				de->media_type = DE_MEDIA_TP_AUTO;
1092 			else
1093 				have_media = 0;
1094 		}
1095 
1096 		/* TP selected.  If there is only TP and BNC, then it's BNC */
1097 		else if (((de->media_supported & DE_AUI_BNC) == SUPPORTED_BNC) &&
1098 			 de_ok_to_advertise(de, DE_MEDIA_BNC))
1099 			de->media_type = DE_MEDIA_BNC;
1100 
1101 		/* TP selected.  If there is only TP and AUI, then it's AUI */
1102 		else if (((de->media_supported & DE_AUI_BNC) == SUPPORTED_AUI) &&
1103 			 de_ok_to_advertise(de, DE_MEDIA_AUI))
1104 			de->media_type = DE_MEDIA_AUI;
1105 
1106 		/* otherwise, ignore the hint */
1107 		else
1108 			have_media = 0;
1109 
1110 		if (have_media)
1111 			goto set_media;
1112 	}
1113 
1114 	/*
1115 	 * Absent or ambiguous activity hint, move to next advertised
1116 	 * media state.  If de->media_type is left unchanged, this
1117 	 * simply resets the PHY and reloads the current media settings.
1118 	 */
1119 	if (de->media_type == DE_MEDIA_AUI) {
1120 		static const u32 next_states[] = {
1121 			DE_MEDIA_BNC, DE_MEDIA_TP_AUTO
1122 		};
1123 		de_next_media(de, next_states, ARRAY_SIZE(next_states));
1124 	} else if (de->media_type == DE_MEDIA_BNC) {
1125 		static const u32 next_states[] = {
1126 			DE_MEDIA_TP_AUTO, DE_MEDIA_AUI
1127 		};
1128 		de_next_media(de, next_states, ARRAY_SIZE(next_states));
1129 	} else {
1130 		static const u32 next_states[] = {
1131 			DE_MEDIA_AUI, DE_MEDIA_BNC, DE_MEDIA_TP_AUTO
1132 		};
1133 		de_next_media(de, next_states, ARRAY_SIZE(next_states));
1134 	}
1135 
1136 set_media:
1137 	spin_lock_irqsave(&de->lock, flags);
1138 	de_stop_rxtx(de);
1139 	spin_unlock_irqrestore(&de->lock, flags);
1140 	de_set_media(de);
1141 	de_start_rxtx(de);
1142 
1143 no_link_yet:
1144 	de->media_timer.expires = jiffies + DE_TIMER_NO_LINK;
1145 	add_timer(&de->media_timer);
1146 
1147 	netif_info(de, timer, dev, "no link, trying media %s, status %x\n",
1148 		   media_name[de->media_type], status);
1149 }
1150 
de_media_interrupt(struct de_private * de,u32 status)1151 static void de_media_interrupt (struct de_private *de, u32 status)
1152 {
1153 	if (status & LinkPass) {
1154 		/* Ignore if current media is AUI or BNC and we can't use TP */
1155 		if ((de->media_type == DE_MEDIA_AUI ||
1156 		     de->media_type == DE_MEDIA_BNC) &&
1157 		    (de->media_lock ||
1158 		     !de_ok_to_advertise(de, DE_MEDIA_TP_AUTO)))
1159 			return;
1160 		/* If current media is not TP, change it to TP */
1161 		if ((de->media_type == DE_MEDIA_AUI ||
1162 		     de->media_type == DE_MEDIA_BNC)) {
1163 			de->media_type = DE_MEDIA_TP_AUTO;
1164 			de_stop_rxtx(de);
1165 			de_set_media(de);
1166 			de_start_rxtx(de);
1167 		}
1168 		de_link_up(de);
1169 		mod_timer(&de->media_timer, jiffies + DE_TIMER_LINK);
1170 		return;
1171 	}
1172 
1173 	BUG_ON(!(status & LinkFail));
1174 	/* Mark the link as down only if current media is TP */
1175 	if (netif_carrier_ok(de->dev) && de->media_type != DE_MEDIA_AUI &&
1176 	    de->media_type != DE_MEDIA_BNC) {
1177 		de_link_down(de);
1178 		mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK);
1179 	}
1180 }
1181 
de_reset_mac(struct de_private * de)1182 static int de_reset_mac (struct de_private *de)
1183 {
1184 	u32 status, tmp;
1185 
1186 	/*
1187 	 * Reset MAC.  de4x5.c and tulip.c examined for "advice"
1188 	 * in this area.
1189 	 */
1190 
1191 	if (dr32(BusMode) == 0xffffffff)
1192 		return -EBUSY;
1193 
1194 	/* Reset the chip, holding bit 0 set at least 50 PCI cycles. */
1195 	dw32 (BusMode, CmdReset);
1196 	mdelay (1);
1197 
1198 	dw32 (BusMode, de_bus_mode);
1199 	mdelay (1);
1200 
1201 	for (tmp = 0; tmp < 5; tmp++) {
1202 		dr32 (BusMode);
1203 		mdelay (1);
1204 	}
1205 
1206 	mdelay (1);
1207 
1208 	status = dr32(MacStatus);
1209 	if (status & (RxState | TxState))
1210 		return -EBUSY;
1211 	if (status == 0xffffffff)
1212 		return -ENODEV;
1213 	return 0;
1214 }
1215 
de_adapter_wake(struct de_private * de)1216 static void de_adapter_wake (struct de_private *de)
1217 {
1218 	u32 pmctl;
1219 
1220 	if (de->de21040)
1221 		return;
1222 
1223 	pci_read_config_dword(de->pdev, PCIPM, &pmctl);
1224 	if (pmctl & PM_Mask) {
1225 		pmctl &= ~PM_Mask;
1226 		pci_write_config_dword(de->pdev, PCIPM, pmctl);
1227 
1228 		/* de4x5.c delays, so we do too */
1229 		msleep(10);
1230 	}
1231 }
1232 
de_adapter_sleep(struct de_private * de)1233 static void de_adapter_sleep (struct de_private *de)
1234 {
1235 	u32 pmctl;
1236 
1237 	if (de->de21040)
1238 		return;
1239 
1240 	dw32(CSR13, 0); /* Reset phy */
1241 	pci_read_config_dword(de->pdev, PCIPM, &pmctl);
1242 	pmctl |= PM_Sleep;
1243 	pci_write_config_dword(de->pdev, PCIPM, pmctl);
1244 }
1245 
de_init_hw(struct de_private * de)1246 static int de_init_hw (struct de_private *de)
1247 {
1248 	struct net_device *dev = de->dev;
1249 	u32 macmode;
1250 	int rc;
1251 
1252 	de_adapter_wake(de);
1253 
1254 	macmode = dr32(MacMode) & ~MacModeClear;
1255 
1256 	rc = de_reset_mac(de);
1257 	if (rc)
1258 		return rc;
1259 
1260 	de_set_media(de); /* reset phy */
1261 
1262 	dw32(RxRingAddr, de->ring_dma);
1263 	dw32(TxRingAddr, de->ring_dma + (sizeof(struct de_desc) * DE_RX_RING_SIZE));
1264 
1265 	dw32(MacMode, RxTx | macmode);
1266 
1267 	dr32(RxMissed); /* self-clearing */
1268 
1269 	dw32(IntrMask, de_intr_mask);
1270 
1271 	de_set_rx_mode(dev);
1272 
1273 	return 0;
1274 }
1275 
de_refill_rx(struct de_private * de)1276 static int de_refill_rx (struct de_private *de)
1277 {
1278 	unsigned i;
1279 
1280 	for (i = 0; i < DE_RX_RING_SIZE; i++) {
1281 		struct sk_buff *skb;
1282 
1283 		skb = netdev_alloc_skb(de->dev, de->rx_buf_sz);
1284 		if (!skb)
1285 			goto err_out;
1286 
1287 		de->rx_skb[i].mapping = dma_map_single(&de->pdev->dev,
1288 						       skb->data,
1289 						       de->rx_buf_sz,
1290 						       DMA_FROM_DEVICE);
1291 		de->rx_skb[i].skb = skb;
1292 
1293 		de->rx_ring[i].opts1 = cpu_to_le32(DescOwn);
1294 		if (i == (DE_RX_RING_SIZE - 1))
1295 			de->rx_ring[i].opts2 =
1296 				cpu_to_le32(RingEnd | de->rx_buf_sz);
1297 		else
1298 			de->rx_ring[i].opts2 = cpu_to_le32(de->rx_buf_sz);
1299 		de->rx_ring[i].addr1 = cpu_to_le32(de->rx_skb[i].mapping);
1300 		de->rx_ring[i].addr2 = 0;
1301 	}
1302 
1303 	return 0;
1304 
1305 err_out:
1306 	de_clean_rings(de);
1307 	return -ENOMEM;
1308 }
1309 
de_init_rings(struct de_private * de)1310 static int de_init_rings (struct de_private *de)
1311 {
1312 	memset(de->tx_ring, 0, sizeof(struct de_desc) * DE_TX_RING_SIZE);
1313 	de->tx_ring[DE_TX_RING_SIZE - 1].opts2 = cpu_to_le32(RingEnd);
1314 
1315 	de->rx_tail = 0;
1316 	de->tx_head = de->tx_tail = 0;
1317 
1318 	return de_refill_rx (de);
1319 }
1320 
de_alloc_rings(struct de_private * de)1321 static int de_alloc_rings (struct de_private *de)
1322 {
1323 	de->rx_ring = dma_alloc_coherent(&de->pdev->dev, DE_RING_BYTES,
1324 					 &de->ring_dma, GFP_KERNEL);
1325 	if (!de->rx_ring)
1326 		return -ENOMEM;
1327 	de->tx_ring = &de->rx_ring[DE_RX_RING_SIZE];
1328 	return de_init_rings(de);
1329 }
1330 
de_clean_rings(struct de_private * de)1331 static void de_clean_rings (struct de_private *de)
1332 {
1333 	unsigned i;
1334 
1335 	memset(de->rx_ring, 0, sizeof(struct de_desc) * DE_RX_RING_SIZE);
1336 	de->rx_ring[DE_RX_RING_SIZE - 1].opts2 = cpu_to_le32(RingEnd);
1337 	wmb();
1338 	memset(de->tx_ring, 0, sizeof(struct de_desc) * DE_TX_RING_SIZE);
1339 	de->tx_ring[DE_TX_RING_SIZE - 1].opts2 = cpu_to_le32(RingEnd);
1340 	wmb();
1341 
1342 	for (i = 0; i < DE_RX_RING_SIZE; i++) {
1343 		if (de->rx_skb[i].skb) {
1344 			dma_unmap_single(&de->pdev->dev,
1345 					 de->rx_skb[i].mapping, de->rx_buf_sz,
1346 					 DMA_FROM_DEVICE);
1347 			dev_kfree_skb(de->rx_skb[i].skb);
1348 		}
1349 	}
1350 
1351 	for (i = 0; i < DE_TX_RING_SIZE; i++) {
1352 		struct sk_buff *skb = de->tx_skb[i].skb;
1353 		if ((skb) && (skb != DE_DUMMY_SKB)) {
1354 			if (skb != DE_SETUP_SKB) {
1355 				de->dev->stats.tx_dropped++;
1356 				dma_unmap_single(&de->pdev->dev,
1357 						 de->tx_skb[i].mapping,
1358 						 skb->len, DMA_TO_DEVICE);
1359 				dev_kfree_skb(skb);
1360 			} else {
1361 				dma_unmap_single(&de->pdev->dev,
1362 						 de->tx_skb[i].mapping,
1363 						 sizeof(de->setup_frame),
1364 						 DMA_TO_DEVICE);
1365 			}
1366 		}
1367 	}
1368 
1369 	memset(&de->rx_skb, 0, sizeof(struct ring_info) * DE_RX_RING_SIZE);
1370 	memset(&de->tx_skb, 0, sizeof(struct ring_info) * DE_TX_RING_SIZE);
1371 }
1372 
de_free_rings(struct de_private * de)1373 static void de_free_rings (struct de_private *de)
1374 {
1375 	de_clean_rings(de);
1376 	dma_free_coherent(&de->pdev->dev, DE_RING_BYTES, de->rx_ring,
1377 			  de->ring_dma);
1378 	de->rx_ring = NULL;
1379 	de->tx_ring = NULL;
1380 }
1381 
de_open(struct net_device * dev)1382 static int de_open (struct net_device *dev)
1383 {
1384 	struct de_private *de = netdev_priv(dev);
1385 	const int irq = de->pdev->irq;
1386 	int rc;
1387 
1388 	netif_dbg(de, ifup, dev, "enabling interface\n");
1389 
1390 	de->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1391 
1392 	rc = de_alloc_rings(de);
1393 	if (rc) {
1394 		netdev_err(dev, "ring allocation failure, err=%d\n", rc);
1395 		return rc;
1396 	}
1397 
1398 	dw32(IntrMask, 0);
1399 
1400 	rc = request_irq(irq, de_interrupt, IRQF_SHARED, dev->name, dev);
1401 	if (rc) {
1402 		netdev_err(dev, "IRQ %d request failure, err=%d\n", irq, rc);
1403 		goto err_out_free;
1404 	}
1405 
1406 	rc = de_init_hw(de);
1407 	if (rc) {
1408 		netdev_err(dev, "h/w init failure, err=%d\n", rc);
1409 		goto err_out_free_irq;
1410 	}
1411 
1412 	netif_start_queue(dev);
1413 	mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK);
1414 
1415 	return 0;
1416 
1417 err_out_free_irq:
1418 	free_irq(irq, dev);
1419 err_out_free:
1420 	de_free_rings(de);
1421 	return rc;
1422 }
1423 
de_close(struct net_device * dev)1424 static int de_close (struct net_device *dev)
1425 {
1426 	struct de_private *de = netdev_priv(dev);
1427 	unsigned long flags;
1428 
1429 	netif_dbg(de, ifdown, dev, "disabling interface\n");
1430 
1431 	del_timer_sync(&de->media_timer);
1432 
1433 	spin_lock_irqsave(&de->lock, flags);
1434 	de_stop_hw(de);
1435 	netif_stop_queue(dev);
1436 	netif_carrier_off(dev);
1437 	spin_unlock_irqrestore(&de->lock, flags);
1438 
1439 	free_irq(de->pdev->irq, dev);
1440 
1441 	de_free_rings(de);
1442 	de_adapter_sleep(de);
1443 	return 0;
1444 }
1445 
de_tx_timeout(struct net_device * dev,unsigned int txqueue)1446 static void de_tx_timeout (struct net_device *dev, unsigned int txqueue)
1447 {
1448 	struct de_private *de = netdev_priv(dev);
1449 	const int irq = de->pdev->irq;
1450 
1451 	netdev_dbg(dev, "NIC status %08x mode %08x sia %08x desc %u/%u/%u\n",
1452 		   dr32(MacStatus), dr32(MacMode), dr32(SIAStatus),
1453 		   de->rx_tail, de->tx_head, de->tx_tail);
1454 
1455 	del_timer_sync(&de->media_timer);
1456 
1457 	disable_irq(irq);
1458 	spin_lock_irq(&de->lock);
1459 
1460 	de_stop_hw(de);
1461 	netif_stop_queue(dev);
1462 	netif_carrier_off(dev);
1463 
1464 	spin_unlock_irq(&de->lock);
1465 	enable_irq(irq);
1466 
1467 	/* Update the error counts. */
1468 	__de_get_stats(de);
1469 
1470 	synchronize_irq(irq);
1471 	de_clean_rings(de);
1472 
1473 	de_init_rings(de);
1474 
1475 	de_init_hw(de);
1476 
1477 	netif_wake_queue(dev);
1478 }
1479 
__de_get_regs(struct de_private * de,u8 * buf)1480 static void __de_get_regs(struct de_private *de, u8 *buf)
1481 {
1482 	int i;
1483 	u32 *rbuf = (u32 *)buf;
1484 
1485 	/* read all CSRs */
1486 	for (i = 0; i < DE_NUM_REGS; i++)
1487 		rbuf[i] = dr32(i * 8);
1488 
1489 	/* handle self-clearing RxMissed counter, CSR8 */
1490 	de_rx_missed(de, rbuf[8]);
1491 }
1492 
__de_get_link_ksettings(struct de_private * de,struct ethtool_link_ksettings * cmd)1493 static void __de_get_link_ksettings(struct de_private *de,
1494 				    struct ethtool_link_ksettings *cmd)
1495 {
1496 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
1497 						de->media_supported);
1498 	cmd->base.phy_address = 0;
1499 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
1500 						de->media_advertise);
1501 
1502 	switch (de->media_type) {
1503 	case DE_MEDIA_AUI:
1504 		cmd->base.port = PORT_AUI;
1505 		break;
1506 	case DE_MEDIA_BNC:
1507 		cmd->base.port = PORT_BNC;
1508 		break;
1509 	default:
1510 		cmd->base.port = PORT_TP;
1511 		break;
1512 	}
1513 
1514 	cmd->base.speed = 10;
1515 
1516 	if (dr32(MacMode) & FullDuplex)
1517 		cmd->base.duplex = DUPLEX_FULL;
1518 	else
1519 		cmd->base.duplex = DUPLEX_HALF;
1520 
1521 	if (de->media_lock)
1522 		cmd->base.autoneg = AUTONEG_DISABLE;
1523 	else
1524 		cmd->base.autoneg = AUTONEG_ENABLE;
1525 
1526 	/* ignore maxtxpkt, maxrxpkt for now */
1527 }
1528 
__de_set_link_ksettings(struct de_private * de,const struct ethtool_link_ksettings * cmd)1529 static int __de_set_link_ksettings(struct de_private *de,
1530 				   const struct ethtool_link_ksettings *cmd)
1531 {
1532 	u32 new_media;
1533 	unsigned int media_lock;
1534 	u8 duplex = cmd->base.duplex;
1535 	u8 port = cmd->base.port;
1536 	u8 autoneg = cmd->base.autoneg;
1537 	u32 advertising;
1538 
1539 	ethtool_convert_link_mode_to_legacy_u32(&advertising,
1540 						cmd->link_modes.advertising);
1541 
1542 	if (cmd->base.speed != 10)
1543 		return -EINVAL;
1544 	if (duplex != DUPLEX_HALF && duplex != DUPLEX_FULL)
1545 		return -EINVAL;
1546 	if (port != PORT_TP && port != PORT_AUI && port != PORT_BNC)
1547 		return -EINVAL;
1548 	if (de->de21040 && port == PORT_BNC)
1549 		return -EINVAL;
1550 	if (autoneg != AUTONEG_DISABLE && autoneg != AUTONEG_ENABLE)
1551 		return -EINVAL;
1552 	if (advertising & ~de->media_supported)
1553 		return -EINVAL;
1554 	if (autoneg == AUTONEG_ENABLE &&
1555 	    (!(advertising & ADVERTISED_Autoneg)))
1556 		return -EINVAL;
1557 
1558 	switch (port) {
1559 	case PORT_AUI:
1560 		new_media = DE_MEDIA_AUI;
1561 		if (!(advertising & ADVERTISED_AUI))
1562 			return -EINVAL;
1563 		break;
1564 	case PORT_BNC:
1565 		new_media = DE_MEDIA_BNC;
1566 		if (!(advertising & ADVERTISED_BNC))
1567 			return -EINVAL;
1568 		break;
1569 	default:
1570 		if (autoneg == AUTONEG_ENABLE)
1571 			new_media = DE_MEDIA_TP_AUTO;
1572 		else if (duplex == DUPLEX_FULL)
1573 			new_media = DE_MEDIA_TP_FD;
1574 		else
1575 			new_media = DE_MEDIA_TP;
1576 		if (!(advertising & ADVERTISED_TP))
1577 			return -EINVAL;
1578 		if (!(advertising & (ADVERTISED_10baseT_Full |
1579 				     ADVERTISED_10baseT_Half)))
1580 			return -EINVAL;
1581 		break;
1582 	}
1583 
1584 	media_lock = (autoneg == AUTONEG_ENABLE) ? 0 : 1;
1585 
1586 	if ((new_media == de->media_type) &&
1587 	    (media_lock == de->media_lock) &&
1588 	    (advertising == de->media_advertise))
1589 		return 0; /* nothing to change */
1590 
1591 	de_link_down(de);
1592 	mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK);
1593 	de_stop_rxtx(de);
1594 
1595 	de->media_type = new_media;
1596 	de->media_lock = media_lock;
1597 	de->media_advertise = advertising;
1598 	de_set_media(de);
1599 	if (netif_running(de->dev))
1600 		de_start_rxtx(de);
1601 
1602 	return 0;
1603 }
1604 
de_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)1605 static void de_get_drvinfo (struct net_device *dev,struct ethtool_drvinfo *info)
1606 {
1607 	struct de_private *de = netdev_priv(dev);
1608 
1609 	strscpy(info->driver, DRV_NAME, sizeof(info->driver));
1610 	strscpy(info->bus_info, pci_name(de->pdev), sizeof(info->bus_info));
1611 }
1612 
de_get_regs_len(struct net_device * dev)1613 static int de_get_regs_len(struct net_device *dev)
1614 {
1615 	return DE_REGS_SIZE;
1616 }
1617 
de_get_link_ksettings(struct net_device * dev,struct ethtool_link_ksettings * cmd)1618 static int de_get_link_ksettings(struct net_device *dev,
1619 				 struct ethtool_link_ksettings *cmd)
1620 {
1621 	struct de_private *de = netdev_priv(dev);
1622 
1623 	spin_lock_irq(&de->lock);
1624 	__de_get_link_ksettings(de, cmd);
1625 	spin_unlock_irq(&de->lock);
1626 
1627 	return 0;
1628 }
1629 
de_set_link_ksettings(struct net_device * dev,const struct ethtool_link_ksettings * cmd)1630 static int de_set_link_ksettings(struct net_device *dev,
1631 				 const struct ethtool_link_ksettings *cmd)
1632 {
1633 	struct de_private *de = netdev_priv(dev);
1634 	int rc;
1635 
1636 	spin_lock_irq(&de->lock);
1637 	rc = __de_set_link_ksettings(de, cmd);
1638 	spin_unlock_irq(&de->lock);
1639 
1640 	return rc;
1641 }
1642 
de_get_msglevel(struct net_device * dev)1643 static u32 de_get_msglevel(struct net_device *dev)
1644 {
1645 	struct de_private *de = netdev_priv(dev);
1646 
1647 	return de->msg_enable;
1648 }
1649 
de_set_msglevel(struct net_device * dev,u32 msglvl)1650 static void de_set_msglevel(struct net_device *dev, u32 msglvl)
1651 {
1652 	struct de_private *de = netdev_priv(dev);
1653 
1654 	de->msg_enable = msglvl;
1655 }
1656 
de_get_eeprom(struct net_device * dev,struct ethtool_eeprom * eeprom,u8 * data)1657 static int de_get_eeprom(struct net_device *dev,
1658 			 struct ethtool_eeprom *eeprom, u8 *data)
1659 {
1660 	struct de_private *de = netdev_priv(dev);
1661 
1662 	if (!de->ee_data)
1663 		return -EOPNOTSUPP;
1664 	if ((eeprom->offset != 0) || (eeprom->magic != 0) ||
1665 	    (eeprom->len != DE_EEPROM_SIZE))
1666 		return -EINVAL;
1667 	memcpy(data, de->ee_data, eeprom->len);
1668 
1669 	return 0;
1670 }
1671 
de_nway_reset(struct net_device * dev)1672 static int de_nway_reset(struct net_device *dev)
1673 {
1674 	struct de_private *de = netdev_priv(dev);
1675 	u32 status;
1676 
1677 	if (de->media_type != DE_MEDIA_TP_AUTO)
1678 		return -EINVAL;
1679 	if (netif_carrier_ok(de->dev))
1680 		de_link_down(de);
1681 
1682 	status = dr32(SIAStatus);
1683 	dw32(SIAStatus, (status & ~NWayState) | NWayRestart);
1684 	netif_info(de, link, dev, "link nway restart, status %x,%x\n",
1685 		   status, dr32(SIAStatus));
1686 	return 0;
1687 }
1688 
de_get_regs(struct net_device * dev,struct ethtool_regs * regs,void * data)1689 static void de_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1690 			void *data)
1691 {
1692 	struct de_private *de = netdev_priv(dev);
1693 
1694 	regs->version = (DE_REGS_VER << 2) | de->de21040;
1695 
1696 	spin_lock_irq(&de->lock);
1697 	__de_get_regs(de, data);
1698 	spin_unlock_irq(&de->lock);
1699 }
1700 
1701 static const struct ethtool_ops de_ethtool_ops = {
1702 	.get_link		= ethtool_op_get_link,
1703 	.get_drvinfo		= de_get_drvinfo,
1704 	.get_regs_len		= de_get_regs_len,
1705 	.get_msglevel		= de_get_msglevel,
1706 	.set_msglevel		= de_set_msglevel,
1707 	.get_eeprom		= de_get_eeprom,
1708 	.nway_reset		= de_nway_reset,
1709 	.get_regs		= de_get_regs,
1710 	.get_link_ksettings	= de_get_link_ksettings,
1711 	.set_link_ksettings	= de_set_link_ksettings,
1712 };
1713 
de21040_get_mac_address(struct de_private * de)1714 static void de21040_get_mac_address(struct de_private *de)
1715 {
1716 	u8 addr[ETH_ALEN];
1717 	unsigned i;
1718 
1719 	dw32 (ROMCmd, 0);	/* Reset the pointer with a dummy write. */
1720 	udelay(5);
1721 
1722 	for (i = 0; i < 6; i++) {
1723 		int value, boguscnt = 100000;
1724 		do {
1725 			value = dr32(ROMCmd);
1726 			rmb();
1727 		} while (value < 0 && --boguscnt > 0);
1728 		addr[i] = value;
1729 		udelay(1);
1730 		if (boguscnt <= 0)
1731 			pr_warn("timeout reading 21040 MAC address byte %u\n",
1732 				i);
1733 	}
1734 	eth_hw_addr_set(de->dev, addr);
1735 }
1736 
de21040_get_media_info(struct de_private * de)1737 static void de21040_get_media_info(struct de_private *de)
1738 {
1739 	unsigned int i;
1740 
1741 	de->media_type = DE_MEDIA_TP;
1742 	de->media_supported |= SUPPORTED_TP | SUPPORTED_10baseT_Full |
1743 			       SUPPORTED_10baseT_Half | SUPPORTED_AUI;
1744 	de->media_advertise = de->media_supported;
1745 
1746 	for (i = 0; i < DE_MAX_MEDIA; i++) {
1747 		switch (i) {
1748 		case DE_MEDIA_AUI:
1749 		case DE_MEDIA_TP:
1750 		case DE_MEDIA_TP_FD:
1751 			de->media[i].type = i;
1752 			de->media[i].csr13 = t21040_csr13[i];
1753 			de->media[i].csr14 = t21040_csr14[i];
1754 			de->media[i].csr15 = t21040_csr15[i];
1755 			break;
1756 		default:
1757 			de->media[i].type = DE_MEDIA_INVALID;
1758 			break;
1759 		}
1760 	}
1761 }
1762 
1763 /* Note: this routine returns extra data bits for size detection. */
tulip_read_eeprom(void __iomem * regs,int location,int addr_len)1764 static unsigned tulip_read_eeprom(void __iomem *regs, int location,
1765 				  int addr_len)
1766 {
1767 	int i;
1768 	unsigned retval = 0;
1769 	void __iomem *ee_addr = regs + ROMCmd;
1770 	int read_cmd = location | (EE_READ_CMD << addr_len);
1771 
1772 	writel(EE_ENB & ~EE_CS, ee_addr);
1773 	writel(EE_ENB, ee_addr);
1774 
1775 	/* Shift the read command bits out. */
1776 	for (i = 4 + addr_len; i >= 0; i--) {
1777 		short dataval = (read_cmd & (1 << i)) ? EE_DATA_WRITE : 0;
1778 		writel(EE_ENB | dataval, ee_addr);
1779 		readl(ee_addr);
1780 		writel(EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
1781 		readl(ee_addr);
1782 		retval = (retval << 1) | ((readl(ee_addr) & EE_DATA_READ) ? 1 : 0);
1783 	}
1784 	writel(EE_ENB, ee_addr);
1785 	readl(ee_addr);
1786 
1787 	for (i = 16; i > 0; i--) {
1788 		writel(EE_ENB | EE_SHIFT_CLK, ee_addr);
1789 		readl(ee_addr);
1790 		retval = (retval << 1) | ((readl(ee_addr) & EE_DATA_READ) ? 1 : 0);
1791 		writel(EE_ENB, ee_addr);
1792 		readl(ee_addr);
1793 	}
1794 
1795 	/* Terminate the EEPROM access. */
1796 	writel(EE_ENB & ~EE_CS, ee_addr);
1797 	return retval;
1798 }
1799 
de21041_get_srom_info(struct de_private * de)1800 static void de21041_get_srom_info(struct de_private *de)
1801 {
1802 	unsigned i, sa_offset = 0, ofs;
1803 	u8 ee_data[DE_EEPROM_SIZE + 6] = {};
1804 	unsigned ee_addr_size = tulip_read_eeprom(de->regs, 0xff, 8) & 0x40000 ? 8 : 6;
1805 	struct de_srom_info_leaf *il;
1806 	void *bufp;
1807 
1808 	/* download entire eeprom */
1809 	for (i = 0; i < DE_EEPROM_WORDS; i++)
1810 		((__le16 *)ee_data)[i] =
1811 			cpu_to_le16(tulip_read_eeprom(de->regs, i, ee_addr_size));
1812 
1813 	/* DEC now has a specification but early board makers
1814 	   just put the address in the first EEPROM locations. */
1815 	/* This does  memcmp(eedata, eedata+16, 8) */
1816 
1817 #ifndef CONFIG_MIPS_COBALT
1818 
1819 	for (i = 0; i < 8; i ++)
1820 		if (ee_data[i] != ee_data[16+i])
1821 			sa_offset = 20;
1822 
1823 #endif
1824 
1825 	/* store MAC address */
1826 	eth_hw_addr_set(de->dev, &ee_data[sa_offset]);
1827 
1828 	/* get offset of controller 0 info leaf.  ignore 2nd byte. */
1829 	ofs = ee_data[SROMC0InfoLeaf];
1830 	if (ofs >= (sizeof(ee_data) - sizeof(struct de_srom_info_leaf) - sizeof(struct de_srom_media_block)))
1831 		goto bad_srom;
1832 
1833 	/* get pointer to info leaf */
1834 	il = (struct de_srom_info_leaf *) &ee_data[ofs];
1835 
1836 	/* paranoia checks */
1837 	if (il->n_blocks == 0)
1838 		goto bad_srom;
1839 	if ((sizeof(ee_data) - ofs) <
1840 	    (sizeof(struct de_srom_info_leaf) + (sizeof(struct de_srom_media_block) * il->n_blocks)))
1841 		goto bad_srom;
1842 
1843 	/* get default media type */
1844 	switch (get_unaligned(&il->default_media)) {
1845 	case 0x0001:  de->media_type = DE_MEDIA_BNC; break;
1846 	case 0x0002:  de->media_type = DE_MEDIA_AUI; break;
1847 	case 0x0204:  de->media_type = DE_MEDIA_TP_FD; break;
1848 	default: de->media_type = DE_MEDIA_TP_AUTO; break;
1849 	}
1850 
1851 	if (netif_msg_probe(de))
1852 		pr_info("de%d: SROM leaf offset %u, default media %s\n",
1853 		       de->board_idx, ofs, media_name[de->media_type]);
1854 
1855 	/* init SIA register values to defaults */
1856 	for (i = 0; i < DE_MAX_MEDIA; i++) {
1857 		de->media[i].type = DE_MEDIA_INVALID;
1858 		de->media[i].csr13 = 0xffff;
1859 		de->media[i].csr14 = 0xffff;
1860 		de->media[i].csr15 = 0xffff;
1861 	}
1862 
1863 	/* parse media blocks to see what medias are supported,
1864 	 * and if any custom CSR values are provided
1865 	 */
1866 	bufp = ((void *)il) + sizeof(*il);
1867 	for (i = 0; i < il->n_blocks; i++) {
1868 		struct de_srom_media_block *ib = bufp;
1869 		unsigned idx;
1870 
1871 		/* index based on media type in media block */
1872 		switch(ib->opts & MediaBlockMask) {
1873 		case 0: /* 10baseT */
1874 			de->media_supported |= SUPPORTED_TP | SUPPORTED_10baseT_Half
1875 					  | SUPPORTED_Autoneg;
1876 			idx = DE_MEDIA_TP;
1877 			de->media[DE_MEDIA_TP_AUTO].type = DE_MEDIA_TP_AUTO;
1878 			break;
1879 		case 1: /* BNC */
1880 			de->media_supported |= SUPPORTED_BNC;
1881 			idx = DE_MEDIA_BNC;
1882 			break;
1883 		case 2: /* AUI */
1884 			de->media_supported |= SUPPORTED_AUI;
1885 			idx = DE_MEDIA_AUI;
1886 			break;
1887 		case 4: /* 10baseT-FD */
1888 			de->media_supported |= SUPPORTED_TP | SUPPORTED_10baseT_Full
1889 					  | SUPPORTED_Autoneg;
1890 			idx = DE_MEDIA_TP_FD;
1891 			de->media[DE_MEDIA_TP_AUTO].type = DE_MEDIA_TP_AUTO;
1892 			break;
1893 		default:
1894 			goto bad_srom;
1895 		}
1896 
1897 		de->media[idx].type = idx;
1898 
1899 		if (netif_msg_probe(de))
1900 			pr_info("de%d:   media block #%u: %s",
1901 				de->board_idx, i,
1902 				media_name[de->media[idx].type]);
1903 
1904 		bufp += sizeof (ib->opts);
1905 
1906 		if (ib->opts & MediaCustomCSRs) {
1907 			de->media[idx].csr13 = get_unaligned(&ib->csr13);
1908 			de->media[idx].csr14 = get_unaligned(&ib->csr14);
1909 			de->media[idx].csr15 = get_unaligned(&ib->csr15);
1910 			bufp += sizeof(ib->csr13) + sizeof(ib->csr14) +
1911 				sizeof(ib->csr15);
1912 
1913 			if (netif_msg_probe(de))
1914 				pr_cont(" (%x,%x,%x)\n",
1915 					de->media[idx].csr13,
1916 					de->media[idx].csr14,
1917 					de->media[idx].csr15);
1918 
1919 		} else {
1920 			if (netif_msg_probe(de))
1921 				pr_cont("\n");
1922 		}
1923 
1924 		if (bufp > ((void *)&ee_data[DE_EEPROM_SIZE - 3]))
1925 			break;
1926 	}
1927 
1928 	de->media_advertise = de->media_supported;
1929 
1930 fill_defaults:
1931 	/* fill in defaults, for cases where custom CSRs not used */
1932 	for (i = 0; i < DE_MAX_MEDIA; i++) {
1933 		if (de->media[i].csr13 == 0xffff)
1934 			de->media[i].csr13 = t21041_csr13[i];
1935 		if (de->media[i].csr14 == 0xffff) {
1936 			/* autonegotiation is broken at least on some chip
1937 			   revisions - rev. 0x21 works, 0x11 does not */
1938 			if (de->pdev->revision < 0x20)
1939 				de->media[i].csr14 = t21041_csr14_brk[i];
1940 			else
1941 				de->media[i].csr14 = t21041_csr14[i];
1942 		}
1943 		if (de->media[i].csr15 == 0xffff)
1944 			de->media[i].csr15 = t21041_csr15[i];
1945 	}
1946 
1947 	de->ee_data = kmemdup(&ee_data[0], DE_EEPROM_SIZE, GFP_KERNEL);
1948 
1949 	return;
1950 
1951 bad_srom:
1952 	/* for error cases, it's ok to assume we support all these */
1953 	for (i = 0; i < DE_MAX_MEDIA; i++)
1954 		de->media[i].type = i;
1955 	de->media_supported =
1956 		SUPPORTED_10baseT_Half |
1957 		SUPPORTED_10baseT_Full |
1958 		SUPPORTED_Autoneg |
1959 		SUPPORTED_TP |
1960 		SUPPORTED_AUI |
1961 		SUPPORTED_BNC;
1962 	goto fill_defaults;
1963 }
1964 
1965 static const struct net_device_ops de_netdev_ops = {
1966 	.ndo_open		= de_open,
1967 	.ndo_stop		= de_close,
1968 	.ndo_set_rx_mode	= de_set_rx_mode,
1969 	.ndo_start_xmit		= de_start_xmit,
1970 	.ndo_get_stats		= de_get_stats,
1971 	.ndo_tx_timeout 	= de_tx_timeout,
1972 	.ndo_set_mac_address 	= eth_mac_addr,
1973 	.ndo_validate_addr	= eth_validate_addr,
1974 };
1975 
de_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)1976 static int de_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1977 {
1978 	struct net_device *dev;
1979 	struct de_private *de;
1980 	int rc;
1981 	void __iomem *regs;
1982 	unsigned long pciaddr;
1983 	static int board_idx = -1;
1984 
1985 	board_idx++;
1986 
1987 	/* allocate a new ethernet device structure, and fill in defaults */
1988 	dev = alloc_etherdev(sizeof(struct de_private));
1989 	if (!dev)
1990 		return -ENOMEM;
1991 
1992 	dev->netdev_ops = &de_netdev_ops;
1993 	SET_NETDEV_DEV(dev, &pdev->dev);
1994 	dev->ethtool_ops = &de_ethtool_ops;
1995 	dev->watchdog_timeo = TX_TIMEOUT;
1996 
1997 	de = netdev_priv(dev);
1998 	de->de21040 = ent->driver_data == 0 ? 1 : 0;
1999 	de->pdev = pdev;
2000 	de->dev = dev;
2001 	de->msg_enable = (debug < 0 ? DE_DEF_MSG_ENABLE : debug);
2002 	de->board_idx = board_idx;
2003 	spin_lock_init (&de->lock);
2004 	timer_setup(&de->media_timer,
2005 		    de->de21040 ? de21040_media_timer : de21041_media_timer,
2006 		    0);
2007 
2008 	netif_carrier_off(dev);
2009 
2010 	/* wake up device, assign resources */
2011 	rc = pci_enable_device(pdev);
2012 	if (rc)
2013 		goto err_out_free;
2014 
2015 	/* reserve PCI resources to ensure driver atomicity */
2016 	rc = pci_request_regions(pdev, DRV_NAME);
2017 	if (rc)
2018 		goto err_out_disable;
2019 
2020 	/* check for invalid IRQ value */
2021 	if (pdev->irq < 2) {
2022 		rc = -EIO;
2023 		pr_err("invalid irq (%d) for pci dev %s\n",
2024 		       pdev->irq, pci_name(pdev));
2025 		goto err_out_res;
2026 	}
2027 
2028 	/* obtain and check validity of PCI I/O address */
2029 	pciaddr = pci_resource_start(pdev, 1);
2030 	if (!pciaddr) {
2031 		rc = -EIO;
2032 		pr_err("no MMIO resource for pci dev %s\n", pci_name(pdev));
2033 		goto err_out_res;
2034 	}
2035 	if (pci_resource_len(pdev, 1) < DE_REGS_SIZE) {
2036 		rc = -EIO;
2037 		pr_err("MMIO resource (%llx) too small on pci dev %s\n",
2038 		       (unsigned long long)pci_resource_len(pdev, 1),
2039 		       pci_name(pdev));
2040 		goto err_out_res;
2041 	}
2042 
2043 	/* remap CSR registers */
2044 	regs = ioremap(pciaddr, DE_REGS_SIZE);
2045 	if (!regs) {
2046 		rc = -EIO;
2047 		pr_err("Cannot map PCI MMIO (%llx@%lx) on pci dev %s\n",
2048 		       (unsigned long long)pci_resource_len(pdev, 1),
2049 		       pciaddr, pci_name(pdev));
2050 		goto err_out_res;
2051 	}
2052 	de->regs = regs;
2053 
2054 	de_adapter_wake(de);
2055 
2056 	/* make sure hardware is not running */
2057 	rc = de_reset_mac(de);
2058 	if (rc) {
2059 		pr_err("Cannot reset MAC, pci dev %s\n", pci_name(pdev));
2060 		goto err_out_iomap;
2061 	}
2062 
2063 	/* get MAC address, initialize default media type and
2064 	 * get list of supported media
2065 	 */
2066 	if (de->de21040) {
2067 		de21040_get_mac_address(de);
2068 		de21040_get_media_info(de);
2069 	} else {
2070 		de21041_get_srom_info(de);
2071 	}
2072 
2073 	/* register new network interface with kernel */
2074 	rc = register_netdev(dev);
2075 	if (rc)
2076 		goto err_out_iomap;
2077 
2078 	/* print info about board and interface just registered */
2079 	netdev_info(dev, "%s at %p, %pM, IRQ %d\n",
2080 		    de->de21040 ? "21040" : "21041",
2081 		    regs, dev->dev_addr, pdev->irq);
2082 
2083 	pci_set_drvdata(pdev, dev);
2084 
2085 	/* enable busmastering */
2086 	pci_set_master(pdev);
2087 
2088 	/* put adapter to sleep */
2089 	de_adapter_sleep(de);
2090 
2091 	return 0;
2092 
2093 err_out_iomap:
2094 	kfree(de->ee_data);
2095 	iounmap(regs);
2096 err_out_res:
2097 	pci_release_regions(pdev);
2098 err_out_disable:
2099 	pci_disable_device(pdev);
2100 err_out_free:
2101 	free_netdev(dev);
2102 	return rc;
2103 }
2104 
de_remove_one(struct pci_dev * pdev)2105 static void de_remove_one(struct pci_dev *pdev)
2106 {
2107 	struct net_device *dev = pci_get_drvdata(pdev);
2108 	struct de_private *de = netdev_priv(dev);
2109 
2110 	BUG_ON(!dev);
2111 	unregister_netdev(dev);
2112 	kfree(de->ee_data);
2113 	iounmap(de->regs);
2114 	pci_release_regions(pdev);
2115 	pci_disable_device(pdev);
2116 	free_netdev(dev);
2117 }
2118 
de_suspend(struct device * dev_d)2119 static int __maybe_unused de_suspend(struct device *dev_d)
2120 {
2121 	struct pci_dev *pdev = to_pci_dev(dev_d);
2122 	struct net_device *dev = pci_get_drvdata(pdev);
2123 	struct de_private *de = netdev_priv(dev);
2124 
2125 	rtnl_lock();
2126 	if (netif_running (dev)) {
2127 		const int irq = pdev->irq;
2128 
2129 		del_timer_sync(&de->media_timer);
2130 
2131 		disable_irq(irq);
2132 		spin_lock_irq(&de->lock);
2133 
2134 		de_stop_hw(de);
2135 		netif_stop_queue(dev);
2136 		netif_device_detach(dev);
2137 		netif_carrier_off(dev);
2138 
2139 		spin_unlock_irq(&de->lock);
2140 		enable_irq(irq);
2141 
2142 		/* Update the error counts. */
2143 		__de_get_stats(de);
2144 
2145 		synchronize_irq(irq);
2146 		de_clean_rings(de);
2147 
2148 		de_adapter_sleep(de);
2149 	} else {
2150 		netif_device_detach(dev);
2151 	}
2152 	rtnl_unlock();
2153 	return 0;
2154 }
2155 
de_resume(struct device * dev_d)2156 static int __maybe_unused de_resume(struct device *dev_d)
2157 {
2158 	struct pci_dev *pdev = to_pci_dev(dev_d);
2159 	struct net_device *dev = pci_get_drvdata(pdev);
2160 	struct de_private *de = netdev_priv(dev);
2161 
2162 	rtnl_lock();
2163 	if (netif_device_present(dev))
2164 		goto out;
2165 	if (!netif_running(dev))
2166 		goto out_attach;
2167 	pci_set_master(pdev);
2168 	de_init_rings(de);
2169 	de_init_hw(de);
2170 out_attach:
2171 	netif_device_attach(dev);
2172 out:
2173 	rtnl_unlock();
2174 	return 0;
2175 }
2176 
2177 static SIMPLE_DEV_PM_OPS(de_pm_ops, de_suspend, de_resume);
2178 
de_shutdown(struct pci_dev * pdev)2179 static void de_shutdown(struct pci_dev *pdev)
2180 {
2181 	struct net_device *dev = pci_get_drvdata(pdev);
2182 
2183 	rtnl_lock();
2184 	dev_close(dev);
2185 	rtnl_unlock();
2186 }
2187 
2188 static struct pci_driver de_driver = {
2189 	.name		= DRV_NAME,
2190 	.id_table	= de_pci_tbl,
2191 	.probe		= de_init_one,
2192 	.remove		= de_remove_one,
2193 	.shutdown	= de_shutdown,
2194 	.driver.pm	= &de_pm_ops,
2195 };
2196 
2197 module_pci_driver(de_driver);
2198