xref: /openbmc/linux/drivers/net/ethernet/sgi/meth.c (revision 09c434b8)
1 /*
2  * meth.c -- O2 Builtin 10/100 Ethernet driver
3  *
4  * Copyright (C) 2001-2003 Ilya Volynets
5  *
6  *	This program is free software; you can redistribute it and/or
7  *	modify it under the terms of the GNU General Public License
8  *	as published by the Free Software Foundation; either version
9  *	2 of the License, or (at your option) any later version.
10  */
11 #include <linux/delay.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/platform_device.h>
16 #include <linux/slab.h>
17 #include <linux/errno.h>
18 #include <linux/types.h>
19 #include <linux/interrupt.h>
20 
21 #include <linux/in.h>
22 #include <linux/in6.h>
23 #include <linux/device.h> /* struct device, et al */
24 #include <linux/netdevice.h>   /* struct device, and other headers */
25 #include <linux/etherdevice.h> /* eth_type_trans */
26 #include <linux/ip.h>          /* struct iphdr */
27 #include <linux/tcp.h>         /* struct tcphdr */
28 #include <linux/skbuff.h>
29 #include <linux/mii.h>         /* MII definitions */
30 #include <linux/crc32.h>
31 
32 #include <asm/ip32/mace.h>
33 #include <asm/ip32/ip32_ints.h>
34 
35 #include <asm/io.h>
36 
37 #include "meth.h"
38 
39 #ifndef MFE_DEBUG
40 #define MFE_DEBUG 0
41 #endif
42 
43 #if MFE_DEBUG>=1
44 #define DPRINTK(str,args...) printk(KERN_DEBUG "meth: %s: " str, __func__ , ## args)
45 #define MFE_RX_DEBUG 2
46 #else
47 #define DPRINTK(str,args...)
48 #define MFE_RX_DEBUG 0
49 #endif
50 
51 
52 static const char *meth_str="SGI O2 Fast Ethernet";
53 
54 /* The maximum time waited (in jiffies) before assuming a Tx failed. (400ms) */
55 #define TX_TIMEOUT (400*HZ/1000)
56 
57 static int timeout = TX_TIMEOUT;
58 module_param(timeout, int, 0);
59 
60 /*
61  * Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
62  * MACE Ethernet uses a 64 element hash table based on the Ethernet CRC.
63  */
64 #define METH_MCF_LIMIT 32
65 
66 /*
67  * This structure is private to each device. It is used to pass
68  * packets in and out, so there is place for a packet
69  */
70 struct meth_private {
71 	struct platform_device *pdev;
72 
73 	/* in-memory copy of MAC Control register */
74 	u64 mac_ctrl;
75 
76 	/* in-memory copy of DMA Control register */
77 	unsigned long dma_ctrl;
78 	/* address of PHY, used by mdio_* functions, initialized in mdio_probe */
79 	unsigned long phy_addr;
80 	tx_packet *tx_ring;
81 	dma_addr_t tx_ring_dma;
82 	struct sk_buff *tx_skbs[TX_RING_ENTRIES];
83 	dma_addr_t tx_skb_dmas[TX_RING_ENTRIES];
84 	unsigned long tx_read, tx_write, tx_count;
85 
86 	rx_packet *rx_ring[RX_RING_ENTRIES];
87 	dma_addr_t rx_ring_dmas[RX_RING_ENTRIES];
88 	struct sk_buff *rx_skbs[RX_RING_ENTRIES];
89 	unsigned long rx_write;
90 
91 	/* Multicast filter. */
92 	u64 mcast_filter;
93 
94 	spinlock_t meth_lock;
95 };
96 
97 static void meth_tx_timeout(struct net_device *dev);
98 static irqreturn_t meth_interrupt(int irq, void *dev_id);
99 
100 /* global, initialized in ip32-setup.c */
101 char o2meth_eaddr[8]={0,0,0,0,0,0,0,0};
102 
103 static inline void load_eaddr(struct net_device *dev)
104 {
105 	int i;
106 	u64 macaddr;
107 
108 	DPRINTK("Loading MAC Address: %pM\n", dev->dev_addr);
109 	macaddr = 0;
110 	for (i = 0; i < 6; i++)
111 		macaddr |= (u64)dev->dev_addr[i] << ((5 - i) * 8);
112 
113 	mace->eth.mac_addr = macaddr;
114 }
115 
116 /*
117  * Waits for BUSY status of mdio bus to clear
118  */
119 #define WAIT_FOR_PHY(___rval)					\
120 	while ((___rval = mace->eth.phy_data) & MDIO_BUSY) {	\
121 		udelay(25);					\
122 	}
123 /*read phy register, return value read */
124 static unsigned long mdio_read(struct meth_private *priv, unsigned long phyreg)
125 {
126 	unsigned long rval;
127 	WAIT_FOR_PHY(rval);
128 	mace->eth.phy_regs = (priv->phy_addr << 5) | (phyreg & 0x1f);
129 	udelay(25);
130 	mace->eth.phy_trans_go = 1;
131 	udelay(25);
132 	WAIT_FOR_PHY(rval);
133 	return rval & MDIO_DATA_MASK;
134 }
135 
136 static int mdio_probe(struct meth_private *priv)
137 {
138 	int i;
139 	unsigned long p2, p3, flags;
140 	/* check if phy is detected already */
141 	if(priv->phy_addr>=0&&priv->phy_addr<32)
142 		return 0;
143 	spin_lock_irqsave(&priv->meth_lock, flags);
144 	for (i=0;i<32;++i){
145 		priv->phy_addr=i;
146 		p2=mdio_read(priv,2);
147 		p3=mdio_read(priv,3);
148 #if MFE_DEBUG>=2
149 		switch ((p2<<12)|(p3>>4)){
150 		case PHY_QS6612X:
151 			DPRINTK("PHY is QS6612X\n");
152 			break;
153 		case PHY_ICS1889:
154 			DPRINTK("PHY is ICS1889\n");
155 			break;
156 		case PHY_ICS1890:
157 			DPRINTK("PHY is ICS1890\n");
158 			break;
159 		case PHY_DP83840:
160 			DPRINTK("PHY is DP83840\n");
161 			break;
162 		}
163 #endif
164 		if(p2!=0xffff&&p2!=0x0000){
165 			DPRINTK("PHY code: %x\n",(p2<<12)|(p3>>4));
166 			break;
167 		}
168 	}
169 	spin_unlock_irqrestore(&priv->meth_lock, flags);
170 	if(priv->phy_addr<32) {
171 		return 0;
172 	}
173 	DPRINTK("Oopsie! PHY is not known!\n");
174 	priv->phy_addr=-1;
175 	return -ENODEV;
176 }
177 
178 static void meth_check_link(struct net_device *dev)
179 {
180 	struct meth_private *priv = netdev_priv(dev);
181 	unsigned long mii_advertising = mdio_read(priv, 4);
182 	unsigned long mii_partner = mdio_read(priv, 5);
183 	unsigned long negotiated = mii_advertising & mii_partner;
184 	unsigned long duplex, speed;
185 
186 	if (mii_partner == 0xffff)
187 		return;
188 
189 	speed = (negotiated & 0x0380) ? METH_100MBIT : 0;
190 	duplex = ((negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040) ?
191 		 METH_PHY_FDX : 0;
192 
193 	if ((priv->mac_ctrl & METH_PHY_FDX) ^ duplex) {
194 		DPRINTK("Setting %s-duplex\n", duplex ? "full" : "half");
195 		if (duplex)
196 			priv->mac_ctrl |= METH_PHY_FDX;
197 		else
198 			priv->mac_ctrl &= ~METH_PHY_FDX;
199 		mace->eth.mac_ctrl = priv->mac_ctrl;
200 	}
201 
202 	if ((priv->mac_ctrl & METH_100MBIT) ^ speed) {
203 		DPRINTK("Setting %dMbs mode\n", speed ? 100 : 10);
204 		if (duplex)
205 			priv->mac_ctrl |= METH_100MBIT;
206 		else
207 			priv->mac_ctrl &= ~METH_100MBIT;
208 		mace->eth.mac_ctrl = priv->mac_ctrl;
209 	}
210 }
211 
212 
213 static int meth_init_tx_ring(struct meth_private *priv)
214 {
215 	/* Init TX ring */
216 	priv->tx_ring = dma_alloc_coherent(&priv->pdev->dev,
217 			TX_RING_BUFFER_SIZE, &priv->tx_ring_dma, GFP_ATOMIC);
218 	if (!priv->tx_ring)
219 		return -ENOMEM;
220 
221 	priv->tx_count = priv->tx_read = priv->tx_write = 0;
222 	mace->eth.tx_ring_base = priv->tx_ring_dma;
223 	/* Now init skb save area */
224 	memset(priv->tx_skbs, 0, sizeof(priv->tx_skbs));
225 	memset(priv->tx_skb_dmas, 0, sizeof(priv->tx_skb_dmas));
226 	return 0;
227 }
228 
229 static int meth_init_rx_ring(struct meth_private *priv)
230 {
231 	int i;
232 
233 	for (i = 0; i < RX_RING_ENTRIES; i++) {
234 		priv->rx_skbs[i] = alloc_skb(METH_RX_BUFF_SIZE, 0);
235 		/* 8byte status vector + 3quad padding + 2byte padding,
236 		 * to put data on 64bit aligned boundary */
237 		skb_reserve(priv->rx_skbs[i],METH_RX_HEAD);
238 		priv->rx_ring[i]=(rx_packet*)(priv->rx_skbs[i]->head);
239 		/* I'll need to re-sync it after each RX */
240 		priv->rx_ring_dmas[i] =
241 			dma_map_single(&priv->pdev->dev, priv->rx_ring[i],
242 				       METH_RX_BUFF_SIZE, DMA_FROM_DEVICE);
243 		mace->eth.rx_fifo = priv->rx_ring_dmas[i];
244 	}
245         priv->rx_write = 0;
246 	return 0;
247 }
248 static void meth_free_tx_ring(struct meth_private *priv)
249 {
250 	int i;
251 
252 	/* Remove any pending skb */
253 	for (i = 0; i < TX_RING_ENTRIES; i++) {
254 		if (priv->tx_skbs[i])
255 			dev_kfree_skb(priv->tx_skbs[i]);
256 		priv->tx_skbs[i] = NULL;
257 	}
258 	dma_free_coherent(&priv->pdev->dev, TX_RING_BUFFER_SIZE, priv->tx_ring,
259 	                  priv->tx_ring_dma);
260 }
261 
262 /* Presumes RX DMA engine is stopped, and RX fifo ring is reset */
263 static void meth_free_rx_ring(struct meth_private *priv)
264 {
265 	int i;
266 
267 	for (i = 0; i < RX_RING_ENTRIES; i++) {
268 		dma_unmap_single(&priv->pdev->dev, priv->rx_ring_dmas[i],
269 				 METH_RX_BUFF_SIZE, DMA_FROM_DEVICE);
270 		priv->rx_ring[i] = 0;
271 		priv->rx_ring_dmas[i] = 0;
272 		kfree_skb(priv->rx_skbs[i]);
273 	}
274 }
275 
276 int meth_reset(struct net_device *dev)
277 {
278 	struct meth_private *priv = netdev_priv(dev);
279 
280 	/* Reset card */
281 	mace->eth.mac_ctrl = SGI_MAC_RESET;
282 	udelay(1);
283 	mace->eth.mac_ctrl = 0;
284 	udelay(25);
285 
286 	/* Load ethernet address */
287 	load_eaddr(dev);
288 	/* Should load some "errata", but later */
289 
290 	/* Check for device */
291 	if (mdio_probe(priv) < 0) {
292 		DPRINTK("Unable to find PHY\n");
293 		return -ENODEV;
294 	}
295 
296 	/* Initial mode: 10 | Half-duplex | Accept normal packets */
297 	priv->mac_ctrl = METH_ACCEPT_MCAST | METH_DEFAULT_IPG;
298 	if (dev->flags & IFF_PROMISC)
299 		priv->mac_ctrl |= METH_PROMISC;
300 	mace->eth.mac_ctrl = priv->mac_ctrl;
301 
302 	/* Autonegotiate speed and duplex mode */
303 	meth_check_link(dev);
304 
305 	/* Now set dma control, but don't enable DMA, yet */
306 	priv->dma_ctrl = (4 << METH_RX_OFFSET_SHIFT) |
307 			 (RX_RING_ENTRIES << METH_RX_DEPTH_SHIFT);
308 	mace->eth.dma_ctrl = priv->dma_ctrl;
309 
310 	return 0;
311 }
312 
313 /*============End Helper Routines=====================*/
314 
315 /*
316  * Open and close
317  */
318 static int meth_open(struct net_device *dev)
319 {
320 	struct meth_private *priv = netdev_priv(dev);
321 	int ret;
322 
323 	priv->phy_addr = -1;    /* No PHY is known yet... */
324 
325 	/* Initialize the hardware */
326 	ret = meth_reset(dev);
327 	if (ret < 0)
328 		return ret;
329 
330 	/* Allocate the ring buffers */
331 	ret = meth_init_tx_ring(priv);
332 	if (ret < 0)
333 		return ret;
334 	ret = meth_init_rx_ring(priv);
335 	if (ret < 0)
336 		goto out_free_tx_ring;
337 
338 	ret = request_irq(dev->irq, meth_interrupt, 0, meth_str, dev);
339 	if (ret) {
340 		printk(KERN_ERR "%s: Can't get irq %d\n", dev->name, dev->irq);
341 		goto out_free_rx_ring;
342 	}
343 
344 	/* Start DMA */
345 	priv->dma_ctrl |= METH_DMA_TX_EN | /*METH_DMA_TX_INT_EN |*/
346 			  METH_DMA_RX_EN | METH_DMA_RX_INT_EN;
347 	mace->eth.dma_ctrl = priv->dma_ctrl;
348 
349 	DPRINTK("About to start queue\n");
350 	netif_start_queue(dev);
351 
352 	return 0;
353 
354 out_free_rx_ring:
355 	meth_free_rx_ring(priv);
356 out_free_tx_ring:
357 	meth_free_tx_ring(priv);
358 
359 	return ret;
360 }
361 
362 static int meth_release(struct net_device *dev)
363 {
364 	struct meth_private *priv = netdev_priv(dev);
365 
366 	DPRINTK("Stopping queue\n");
367 	netif_stop_queue(dev); /* can't transmit any more */
368 	/* shut down DMA */
369 	priv->dma_ctrl &= ~(METH_DMA_TX_EN | METH_DMA_TX_INT_EN |
370 			    METH_DMA_RX_EN | METH_DMA_RX_INT_EN);
371 	mace->eth.dma_ctrl = priv->dma_ctrl;
372 	free_irq(dev->irq, dev);
373 	meth_free_tx_ring(priv);
374 	meth_free_rx_ring(priv);
375 
376 	return 0;
377 }
378 
379 /*
380  * Receive a packet: retrieve, encapsulate and pass over to upper levels
381  */
382 static void meth_rx(struct net_device* dev, unsigned long int_status)
383 {
384 	struct sk_buff *skb;
385 	unsigned long status, flags;
386 	struct meth_private *priv = netdev_priv(dev);
387 	unsigned long fifo_rptr = (int_status & METH_INT_RX_RPTR_MASK) >> 8;
388 
389 	spin_lock_irqsave(&priv->meth_lock, flags);
390 	priv->dma_ctrl &= ~METH_DMA_RX_INT_EN;
391 	mace->eth.dma_ctrl = priv->dma_ctrl;
392 	spin_unlock_irqrestore(&priv->meth_lock, flags);
393 
394 	if (int_status & METH_INT_RX_UNDERFLOW) {
395 		fifo_rptr = (fifo_rptr - 1) & 0x0f;
396 	}
397 	while (priv->rx_write != fifo_rptr) {
398 		dma_unmap_single(&priv->pdev->dev,
399 				 priv->rx_ring_dmas[priv->rx_write],
400 				 METH_RX_BUFF_SIZE, DMA_FROM_DEVICE);
401 		status = priv->rx_ring[priv->rx_write]->status.raw;
402 #if MFE_DEBUG
403 		if (!(status & METH_RX_ST_VALID)) {
404 			DPRINTK("Not received? status=%016lx\n",status);
405 		}
406 #endif
407 		if ((!(status & METH_RX_STATUS_ERRORS)) && (status & METH_RX_ST_VALID)) {
408 			int len = (status & 0xffff) - 4; /* omit CRC */
409 			/* length sanity check */
410 			if (len < 60 || len > 1518) {
411 				printk(KERN_DEBUG "%s: bogus packet size: %ld, status=%#2Lx.\n",
412 				       dev->name, priv->rx_write,
413 				       priv->rx_ring[priv->rx_write]->status.raw);
414 				dev->stats.rx_errors++;
415 				dev->stats.rx_length_errors++;
416 				skb = priv->rx_skbs[priv->rx_write];
417 			} else {
418 				skb = alloc_skb(METH_RX_BUFF_SIZE, GFP_ATOMIC);
419 				if (!skb) {
420 					/* Ouch! No memory! Drop packet on the floor */
421 					DPRINTK("No mem: dropping packet\n");
422 					dev->stats.rx_dropped++;
423 					skb = priv->rx_skbs[priv->rx_write];
424 				} else {
425 					struct sk_buff *skb_c = priv->rx_skbs[priv->rx_write];
426 					/* 8byte status vector + 3quad padding + 2byte padding,
427 					 * to put data on 64bit aligned boundary */
428 					skb_reserve(skb, METH_RX_HEAD);
429 					/* Write metadata, and then pass to the receive level */
430 					skb_put(skb_c, len);
431 					priv->rx_skbs[priv->rx_write] = skb;
432 					skb_c->protocol = eth_type_trans(skb_c, dev);
433 					dev->stats.rx_packets++;
434 					dev->stats.rx_bytes += len;
435 					netif_rx(skb_c);
436 				}
437 			}
438 		} else {
439 			dev->stats.rx_errors++;
440 			skb=priv->rx_skbs[priv->rx_write];
441 #if MFE_DEBUG>0
442 			printk(KERN_WARNING "meth: RX error: status=0x%016lx\n",status);
443 			if(status&METH_RX_ST_RCV_CODE_VIOLATION)
444 				printk(KERN_WARNING "Receive Code Violation\n");
445 			if(status&METH_RX_ST_CRC_ERR)
446 				printk(KERN_WARNING "CRC error\n");
447 			if(status&METH_RX_ST_INV_PREAMBLE_CTX)
448 				printk(KERN_WARNING "Invalid Preamble Context\n");
449 			if(status&METH_RX_ST_LONG_EVT_SEEN)
450 				printk(KERN_WARNING "Long Event Seen...\n");
451 			if(status&METH_RX_ST_BAD_PACKET)
452 				printk(KERN_WARNING "Bad Packet\n");
453 			if(status&METH_RX_ST_CARRIER_EVT_SEEN)
454 				printk(KERN_WARNING "Carrier Event Seen\n");
455 #endif
456 		}
457 		priv->rx_ring[priv->rx_write] = (rx_packet*)skb->head;
458 		priv->rx_ring[priv->rx_write]->status.raw = 0;
459 		priv->rx_ring_dmas[priv->rx_write] =
460 			dma_map_single(&priv->pdev->dev,
461 				       priv->rx_ring[priv->rx_write],
462 				       METH_RX_BUFF_SIZE, DMA_FROM_DEVICE);
463 		mace->eth.rx_fifo = priv->rx_ring_dmas[priv->rx_write];
464 		ADVANCE_RX_PTR(priv->rx_write);
465 	}
466 	spin_lock_irqsave(&priv->meth_lock, flags);
467 	/* In case there was underflow, and Rx DMA was disabled */
468 	priv->dma_ctrl |= METH_DMA_RX_INT_EN | METH_DMA_RX_EN;
469 	mace->eth.dma_ctrl = priv->dma_ctrl;
470 	mace->eth.int_stat = METH_INT_RX_THRESHOLD;
471 	spin_unlock_irqrestore(&priv->meth_lock, flags);
472 }
473 
474 static int meth_tx_full(struct net_device *dev)
475 {
476 	struct meth_private *priv = netdev_priv(dev);
477 
478 	return priv->tx_count >= TX_RING_ENTRIES - 1;
479 }
480 
481 static void meth_tx_cleanup(struct net_device* dev, unsigned long int_status)
482 {
483 	struct meth_private *priv = netdev_priv(dev);
484 	unsigned long status, flags;
485 	struct sk_buff *skb;
486 	unsigned long rptr = (int_status&TX_INFO_RPTR) >> 16;
487 
488 	spin_lock_irqsave(&priv->meth_lock, flags);
489 
490 	/* Stop DMA notification */
491 	priv->dma_ctrl &= ~(METH_DMA_TX_INT_EN);
492 	mace->eth.dma_ctrl = priv->dma_ctrl;
493 
494 	while (priv->tx_read != rptr) {
495 		skb = priv->tx_skbs[priv->tx_read];
496 		status = priv->tx_ring[priv->tx_read].header.raw;
497 #if MFE_DEBUG>=1
498 		if (priv->tx_read == priv->tx_write)
499 			DPRINTK("Auchi! tx_read=%d,tx_write=%d,rptr=%d?\n", priv->tx_read, priv->tx_write,rptr);
500 #endif
501 		if (status & METH_TX_ST_DONE) {
502 			if (status & METH_TX_ST_SUCCESS){
503 				dev->stats.tx_packets++;
504 				dev->stats.tx_bytes += skb->len;
505 			} else {
506 				dev->stats.tx_errors++;
507 #if MFE_DEBUG>=1
508 				DPRINTK("TX error: status=%016lx <",status);
509 				if(status & METH_TX_ST_SUCCESS)
510 					printk(" SUCCESS");
511 				if(status & METH_TX_ST_TOOLONG)
512 					printk(" TOOLONG");
513 				if(status & METH_TX_ST_UNDERRUN)
514 					printk(" UNDERRUN");
515 				if(status & METH_TX_ST_EXCCOLL)
516 					printk(" EXCCOLL");
517 				if(status & METH_TX_ST_DEFER)
518 					printk(" DEFER");
519 				if(status & METH_TX_ST_LATECOLL)
520 					printk(" LATECOLL");
521 				printk(" >\n");
522 #endif
523 			}
524 		} else {
525 			DPRINTK("RPTR points us here, but packet not done?\n");
526 			break;
527 		}
528 		dev_consume_skb_irq(skb);
529 		priv->tx_skbs[priv->tx_read] = NULL;
530 		priv->tx_ring[priv->tx_read].header.raw = 0;
531 		priv->tx_read = (priv->tx_read+1)&(TX_RING_ENTRIES-1);
532 		priv->tx_count--;
533 	}
534 
535 	/* wake up queue if it was stopped */
536 	if (netif_queue_stopped(dev) && !meth_tx_full(dev)) {
537 		netif_wake_queue(dev);
538 	}
539 
540 	mace->eth.int_stat = METH_INT_TX_EMPTY | METH_INT_TX_PKT;
541 	spin_unlock_irqrestore(&priv->meth_lock, flags);
542 }
543 
544 static void meth_error(struct net_device* dev, unsigned status)
545 {
546 	struct meth_private *priv = netdev_priv(dev);
547 	unsigned long flags;
548 
549 	printk(KERN_WARNING "meth: error status: 0x%08x\n",status);
550 	/* check for errors too... */
551 	if (status & (METH_INT_TX_LINK_FAIL))
552 		printk(KERN_WARNING "meth: link failure\n");
553 	/* Should I do full reset in this case? */
554 	if (status & (METH_INT_MEM_ERROR))
555 		printk(KERN_WARNING "meth: memory error\n");
556 	if (status & (METH_INT_TX_ABORT))
557 		printk(KERN_WARNING "meth: aborted\n");
558 	if (status & (METH_INT_RX_OVERFLOW))
559 		printk(KERN_WARNING "meth: Rx overflow\n");
560 	if (status & (METH_INT_RX_UNDERFLOW)) {
561 		printk(KERN_WARNING "meth: Rx underflow\n");
562 		spin_lock_irqsave(&priv->meth_lock, flags);
563 		mace->eth.int_stat = METH_INT_RX_UNDERFLOW;
564 		/* more underflow interrupts will be delivered,
565 		 * effectively throwing us into an infinite loop.
566 		 *  Thus I stop processing Rx in this case. */
567 		priv->dma_ctrl &= ~METH_DMA_RX_EN;
568 		mace->eth.dma_ctrl = priv->dma_ctrl;
569 		DPRINTK("Disabled meth Rx DMA temporarily\n");
570 		spin_unlock_irqrestore(&priv->meth_lock, flags);
571 	}
572 	mace->eth.int_stat = METH_INT_ERROR;
573 }
574 
575 /*
576  * The typical interrupt entry point
577  */
578 static irqreturn_t meth_interrupt(int irq, void *dev_id)
579 {
580 	struct net_device *dev = (struct net_device *)dev_id;
581 	struct meth_private *priv = netdev_priv(dev);
582 	unsigned long status;
583 
584 	status = mace->eth.int_stat;
585 	while (status & 0xff) {
586 		/* First handle errors - if we get Rx underflow,
587 		 * Rx DMA will be disabled, and Rx handler will reenable
588 		 * it. I don't think it's possible to get Rx underflow,
589 		 * without getting Rx interrupt */
590 		if (status & METH_INT_ERROR) {
591 			meth_error(dev, status);
592 		}
593 		if (status & (METH_INT_TX_EMPTY | METH_INT_TX_PKT)) {
594 			/* a transmission is over: free the skb */
595 			meth_tx_cleanup(dev, status);
596 		}
597 		if (status & METH_INT_RX_THRESHOLD) {
598 			if (!(priv->dma_ctrl & METH_DMA_RX_INT_EN))
599 				break;
600 			/* send it to meth_rx for handling */
601 			meth_rx(dev, status);
602 		}
603 		status = mace->eth.int_stat;
604 	}
605 
606 	return IRQ_HANDLED;
607 }
608 
609 /*
610  * Transmits packets that fit into TX descriptor (are <=120B)
611  */
612 static void meth_tx_short_prepare(struct meth_private *priv,
613 				  struct sk_buff *skb)
614 {
615 	tx_packet *desc = &priv->tx_ring[priv->tx_write];
616 	int len = (skb->len < ETH_ZLEN) ? ETH_ZLEN : skb->len;
617 
618 	desc->header.raw = METH_TX_CMD_INT_EN | (len-1) | ((128-len) << 16);
619 	/* maybe I should set whole thing to 0 first... */
620 	skb_copy_from_linear_data(skb, desc->data.dt + (120 - len), skb->len);
621 	if (skb->len < len)
622 		memset(desc->data.dt + 120 - len + skb->len, 0, len-skb->len);
623 }
624 #define TX_CATBUF1 BIT(25)
625 static void meth_tx_1page_prepare(struct meth_private *priv,
626 				  struct sk_buff *skb)
627 {
628 	tx_packet *desc = &priv->tx_ring[priv->tx_write];
629 	void *buffer_data = (void *)(((unsigned long)skb->data + 7) & ~7);
630 	int unaligned_len = (int)((unsigned long)buffer_data - (unsigned long)skb->data);
631 	int buffer_len = skb->len - unaligned_len;
632 	dma_addr_t catbuf;
633 
634 	desc->header.raw = METH_TX_CMD_INT_EN | TX_CATBUF1 | (skb->len - 1);
635 
636 	/* unaligned part */
637 	if (unaligned_len) {
638 		skb_copy_from_linear_data(skb, desc->data.dt + (120 - unaligned_len),
639 			      unaligned_len);
640 		desc->header.raw |= (128 - unaligned_len) << 16;
641 	}
642 
643 	/* first page */
644 	catbuf = dma_map_single(&priv->pdev->dev, buffer_data, buffer_len,
645 				DMA_TO_DEVICE);
646 	desc->data.cat_buf[0].form.start_addr = catbuf >> 3;
647 	desc->data.cat_buf[0].form.len = buffer_len - 1;
648 }
649 #define TX_CATBUF2 BIT(26)
650 static void meth_tx_2page_prepare(struct meth_private *priv,
651 				  struct sk_buff *skb)
652 {
653 	tx_packet *desc = &priv->tx_ring[priv->tx_write];
654 	void *buffer1_data = (void *)(((unsigned long)skb->data + 7) & ~7);
655 	void *buffer2_data = (void *)PAGE_ALIGN((unsigned long)skb->data);
656 	int unaligned_len = (int)((unsigned long)buffer1_data - (unsigned long)skb->data);
657 	int buffer1_len = (int)((unsigned long)buffer2_data - (unsigned long)buffer1_data);
658 	int buffer2_len = skb->len - buffer1_len - unaligned_len;
659 	dma_addr_t catbuf1, catbuf2;
660 
661 	desc->header.raw = METH_TX_CMD_INT_EN | TX_CATBUF1 | TX_CATBUF2| (skb->len - 1);
662 	/* unaligned part */
663 	if (unaligned_len){
664 		skb_copy_from_linear_data(skb, desc->data.dt + (120 - unaligned_len),
665 			      unaligned_len);
666 		desc->header.raw |= (128 - unaligned_len) << 16;
667 	}
668 
669 	/* first page */
670 	catbuf1 = dma_map_single(&priv->pdev->dev, buffer1_data, buffer1_len,
671 				 DMA_TO_DEVICE);
672 	desc->data.cat_buf[0].form.start_addr = catbuf1 >> 3;
673 	desc->data.cat_buf[0].form.len = buffer1_len - 1;
674 	/* second page */
675 	catbuf2 = dma_map_single(&priv->pdev->dev, buffer2_data, buffer2_len,
676 				 DMA_TO_DEVICE);
677 	desc->data.cat_buf[1].form.start_addr = catbuf2 >> 3;
678 	desc->data.cat_buf[1].form.len = buffer2_len - 1;
679 }
680 
681 static void meth_add_to_tx_ring(struct meth_private *priv, struct sk_buff *skb)
682 {
683 	/* Remember the skb, so we can free it at interrupt time */
684 	priv->tx_skbs[priv->tx_write] = skb;
685 	if (skb->len <= 120) {
686 		/* Whole packet fits into descriptor */
687 		meth_tx_short_prepare(priv, skb);
688 	} else if (PAGE_ALIGN((unsigned long)skb->data) !=
689 		   PAGE_ALIGN((unsigned long)skb->data + skb->len - 1)) {
690 		/* Packet crosses page boundary */
691 		meth_tx_2page_prepare(priv, skb);
692 	} else {
693 		/* Packet is in one page */
694 		meth_tx_1page_prepare(priv, skb);
695 	}
696 	priv->tx_write = (priv->tx_write + 1) & (TX_RING_ENTRIES - 1);
697 	mace->eth.tx_info = priv->tx_write;
698 	priv->tx_count++;
699 }
700 
701 /*
702  * Transmit a packet (called by the kernel)
703  */
704 static netdev_tx_t meth_tx(struct sk_buff *skb, struct net_device *dev)
705 {
706 	struct meth_private *priv = netdev_priv(dev);
707 	unsigned long flags;
708 
709 	spin_lock_irqsave(&priv->meth_lock, flags);
710 	/* Stop DMA notification */
711 	priv->dma_ctrl &= ~(METH_DMA_TX_INT_EN);
712 	mace->eth.dma_ctrl = priv->dma_ctrl;
713 
714 	meth_add_to_tx_ring(priv, skb);
715 	netif_trans_update(dev); /* save the timestamp */
716 
717 	/* If TX ring is full, tell the upper layer to stop sending packets */
718 	if (meth_tx_full(dev)) {
719 	        printk(KERN_DEBUG "TX full: stopping\n");
720 		netif_stop_queue(dev);
721 	}
722 
723 	/* Restart DMA notification */
724 	priv->dma_ctrl |= METH_DMA_TX_INT_EN;
725 	mace->eth.dma_ctrl = priv->dma_ctrl;
726 
727 	spin_unlock_irqrestore(&priv->meth_lock, flags);
728 
729 	return NETDEV_TX_OK;
730 }
731 
732 /*
733  * Deal with a transmit timeout.
734  */
735 static void meth_tx_timeout(struct net_device *dev)
736 {
737 	struct meth_private *priv = netdev_priv(dev);
738 	unsigned long flags;
739 
740 	printk(KERN_WARNING "%s: transmit timed out\n", dev->name);
741 
742 	/* Protect against concurrent rx interrupts */
743 	spin_lock_irqsave(&priv->meth_lock,flags);
744 
745 	/* Try to reset the interface. */
746 	meth_reset(dev);
747 
748 	dev->stats.tx_errors++;
749 
750 	/* Clear all rings */
751 	meth_free_tx_ring(priv);
752 	meth_free_rx_ring(priv);
753 	meth_init_tx_ring(priv);
754 	meth_init_rx_ring(priv);
755 
756 	/* Restart dma */
757 	priv->dma_ctrl |= METH_DMA_TX_EN | METH_DMA_RX_EN | METH_DMA_RX_INT_EN;
758 	mace->eth.dma_ctrl = priv->dma_ctrl;
759 
760 	/* Enable interrupt */
761 	spin_unlock_irqrestore(&priv->meth_lock, flags);
762 
763 	netif_trans_update(dev); /* prevent tx timeout */
764 	netif_wake_queue(dev);
765 }
766 
767 /*
768  * Ioctl commands
769  */
770 static int meth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
771 {
772 	/* XXX Not yet implemented */
773 	switch(cmd) {
774 	case SIOCGMIIPHY:
775 	case SIOCGMIIREG:
776 	case SIOCSMIIREG:
777 	default:
778 		return -EOPNOTSUPP;
779 	}
780 }
781 
782 static void meth_set_rx_mode(struct net_device *dev)
783 {
784 	struct meth_private *priv = netdev_priv(dev);
785 	unsigned long flags;
786 
787 	netif_stop_queue(dev);
788 	spin_lock_irqsave(&priv->meth_lock, flags);
789 	priv->mac_ctrl &= ~METH_PROMISC;
790 
791 	if (dev->flags & IFF_PROMISC) {
792 		priv->mac_ctrl |= METH_PROMISC;
793 		priv->mcast_filter = 0xffffffffffffffffUL;
794 	} else if ((netdev_mc_count(dev) > METH_MCF_LIMIT) ||
795 		   (dev->flags & IFF_ALLMULTI)) {
796 		priv->mac_ctrl |= METH_ACCEPT_AMCAST;
797 		priv->mcast_filter = 0xffffffffffffffffUL;
798 	} else {
799 		struct netdev_hw_addr *ha;
800 		priv->mac_ctrl |= METH_ACCEPT_MCAST;
801 
802 		netdev_for_each_mc_addr(ha, dev)
803 			set_bit((ether_crc(ETH_ALEN, ha->addr) >> 26),
804 			        (volatile unsigned long *)&priv->mcast_filter);
805 	}
806 
807 	/* Write the changes to the chip registers. */
808 	mace->eth.mac_ctrl = priv->mac_ctrl;
809 	mace->eth.mcast_filter = priv->mcast_filter;
810 
811 	/* Done! */
812 	spin_unlock_irqrestore(&priv->meth_lock, flags);
813 	netif_wake_queue(dev);
814 }
815 
816 static const struct net_device_ops meth_netdev_ops = {
817 	.ndo_open		= meth_open,
818 	.ndo_stop		= meth_release,
819 	.ndo_start_xmit		= meth_tx,
820 	.ndo_do_ioctl		= meth_ioctl,
821 	.ndo_tx_timeout		= meth_tx_timeout,
822 	.ndo_validate_addr	= eth_validate_addr,
823 	.ndo_set_mac_address	= eth_mac_addr,
824 	.ndo_set_rx_mode    	= meth_set_rx_mode,
825 };
826 
827 /*
828  * The init function.
829  */
830 static int meth_probe(struct platform_device *pdev)
831 {
832 	struct net_device *dev;
833 	struct meth_private *priv;
834 	int err;
835 
836 	dev = alloc_etherdev(sizeof(struct meth_private));
837 	if (!dev)
838 		return -ENOMEM;
839 
840 	dev->netdev_ops		= &meth_netdev_ops;
841 	dev->watchdog_timeo	= timeout;
842 	dev->irq		= MACE_ETHERNET_IRQ;
843 	dev->base_addr		= (unsigned long)&mace->eth;
844 	memcpy(dev->dev_addr, o2meth_eaddr, ETH_ALEN);
845 
846 	priv = netdev_priv(dev);
847 	priv->pdev = pdev;
848 	spin_lock_init(&priv->meth_lock);
849 	SET_NETDEV_DEV(dev, &pdev->dev);
850 
851 	err = register_netdev(dev);
852 	if (err) {
853 		free_netdev(dev);
854 		return err;
855 	}
856 
857 	printk(KERN_INFO "%s: SGI MACE Ethernet rev. %d\n",
858 	       dev->name, (unsigned int)(mace->eth.mac_ctrl >> 29));
859 	return 0;
860 }
861 
862 static int meth_remove(struct platform_device *pdev)
863 {
864 	struct net_device *dev = platform_get_drvdata(pdev);
865 
866 	unregister_netdev(dev);
867 	free_netdev(dev);
868 
869 	return 0;
870 }
871 
872 static struct platform_driver meth_driver = {
873 	.probe	= meth_probe,
874 	.remove	= meth_remove,
875 	.driver = {
876 		.name	= "meth",
877 	}
878 };
879 
880 module_platform_driver(meth_driver);
881 
882 MODULE_AUTHOR("Ilya Volynets <ilya@theIlya.com>");
883 MODULE_DESCRIPTION("SGI O2 Builtin Fast Ethernet driver");
884 MODULE_LICENSE("GPL");
885 MODULE_ALIAS("platform:meth");
886