1 /* b44.c: Broadcom 44xx/47xx Fast Ethernet device driver.
2  *
3  * Copyright (C) 2002 David S. Miller (davem@redhat.com)
4  * Copyright (C) 2004 Pekka Pietikainen (pp@ee.oulu.fi)
5  * Copyright (C) 2004 Florian Schirmer (jolt@tuxbox.org)
6  * Copyright (C) 2006 Felix Fietkau (nbd@openwrt.org)
7  * Copyright (C) 2006 Broadcom Corporation.
8  * Copyright (C) 2007 Michael Buesch <m@bues.ch>
9  *
10  * Distribute under GPL.
11  */
12 
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/moduleparam.h>
18 #include <linux/types.h>
19 #include <linux/netdevice.h>
20 #include <linux/ethtool.h>
21 #include <linux/mii.h>
22 #include <linux/if_ether.h>
23 #include <linux/if_vlan.h>
24 #include <linux/etherdevice.h>
25 #include <linux/pci.h>
26 #include <linux/delay.h>
27 #include <linux/init.h>
28 #include <linux/interrupt.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/ssb/ssb.h>
31 #include <linux/slab.h>
32 
33 #include <asm/uaccess.h>
34 #include <asm/io.h>
35 #include <asm/irq.h>
36 
37 
38 #include "b44.h"
39 
40 #define DRV_MODULE_NAME		"b44"
41 #define DRV_MODULE_VERSION	"2.0"
42 #define DRV_DESCRIPTION		"Broadcom 44xx/47xx 10/100 PCI ethernet driver"
43 
44 #define B44_DEF_MSG_ENABLE	  \
45 	(NETIF_MSG_DRV		| \
46 	 NETIF_MSG_PROBE	| \
47 	 NETIF_MSG_LINK		| \
48 	 NETIF_MSG_TIMER	| \
49 	 NETIF_MSG_IFDOWN	| \
50 	 NETIF_MSG_IFUP		| \
51 	 NETIF_MSG_RX_ERR	| \
52 	 NETIF_MSG_TX_ERR)
53 
54 /* length of time before we decide the hardware is borked,
55  * and dev->tx_timeout() should be called to fix the problem
56  */
57 #define B44_TX_TIMEOUT			(5 * HZ)
58 
59 /* hardware minimum and maximum for a single frame's data payload */
60 #define B44_MIN_MTU			60
61 #define B44_MAX_MTU			1500
62 
63 #define B44_RX_RING_SIZE		512
64 #define B44_DEF_RX_RING_PENDING		200
65 #define B44_RX_RING_BYTES	(sizeof(struct dma_desc) * \
66 				 B44_RX_RING_SIZE)
67 #define B44_TX_RING_SIZE		512
68 #define B44_DEF_TX_RING_PENDING		(B44_TX_RING_SIZE - 1)
69 #define B44_TX_RING_BYTES	(sizeof(struct dma_desc) * \
70 				 B44_TX_RING_SIZE)
71 
72 #define TX_RING_GAP(BP)	\
73 	(B44_TX_RING_SIZE - (BP)->tx_pending)
74 #define TX_BUFFS_AVAIL(BP)						\
75 	(((BP)->tx_cons <= (BP)->tx_prod) ?				\
76 	  (BP)->tx_cons + (BP)->tx_pending - (BP)->tx_prod :		\
77 	  (BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP))
78 #define NEXT_TX(N)		(((N) + 1) & (B44_TX_RING_SIZE - 1))
79 
80 #define RX_PKT_OFFSET		(RX_HEADER_LEN + 2)
81 #define RX_PKT_BUF_SZ		(1536 + RX_PKT_OFFSET)
82 
83 /* minimum number of free TX descriptors required to wake up TX process */
84 #define B44_TX_WAKEUP_THRESH		(B44_TX_RING_SIZE / 4)
85 
86 /* b44 internal pattern match filter info */
87 #define B44_PATTERN_BASE	0x400
88 #define B44_PATTERN_SIZE	0x80
89 #define B44_PMASK_BASE		0x600
90 #define B44_PMASK_SIZE		0x10
91 #define B44_MAX_PATTERNS	16
92 #define B44_ETHIPV6UDP_HLEN	62
93 #define B44_ETHIPV4UDP_HLEN	42
94 
95 MODULE_AUTHOR("Felix Fietkau, Florian Schirmer, Pekka Pietikainen, David S. Miller");
96 MODULE_DESCRIPTION(DRV_DESCRIPTION);
97 MODULE_LICENSE("GPL");
98 MODULE_VERSION(DRV_MODULE_VERSION);
99 
100 static int b44_debug = -1;	/* -1 == use B44_DEF_MSG_ENABLE as value */
101 module_param(b44_debug, int, 0);
102 MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value");
103 
104 
105 #ifdef CONFIG_B44_PCI
106 static DEFINE_PCI_DEVICE_TABLE(b44_pci_tbl) = {
107 	{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401) },
108 	{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0) },
109 	{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1) },
110 	{ 0 } /* terminate list with empty entry */
111 };
112 MODULE_DEVICE_TABLE(pci, b44_pci_tbl);
113 
114 static struct pci_driver b44_pci_driver = {
115 	.name		= DRV_MODULE_NAME,
116 	.id_table	= b44_pci_tbl,
117 };
118 #endif /* CONFIG_B44_PCI */
119 
120 static const struct ssb_device_id b44_ssb_tbl[] = {
121 	SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_ETHERNET, SSB_ANY_REV),
122 	SSB_DEVTABLE_END
123 };
124 MODULE_DEVICE_TABLE(ssb, b44_ssb_tbl);
125 
126 static void b44_halt(struct b44 *);
127 static void b44_init_rings(struct b44 *);
128 
129 #define B44_FULL_RESET		1
130 #define B44_FULL_RESET_SKIP_PHY	2
131 #define B44_PARTIAL_RESET	3
132 #define B44_CHIP_RESET_FULL	4
133 #define B44_CHIP_RESET_PARTIAL	5
134 
135 static void b44_init_hw(struct b44 *, int);
136 
137 static int dma_desc_sync_size;
138 static int instance;
139 
140 static const char b44_gstrings[][ETH_GSTRING_LEN] = {
141 #define _B44(x...)	# x,
142 B44_STAT_REG_DECLARE
143 #undef _B44
144 };
145 
146 static inline void b44_sync_dma_desc_for_device(struct ssb_device *sdev,
147 						dma_addr_t dma_base,
148 						unsigned long offset,
149 						enum dma_data_direction dir)
150 {
151 	dma_sync_single_for_device(sdev->dma_dev, dma_base + offset,
152 				   dma_desc_sync_size, dir);
153 }
154 
155 static inline void b44_sync_dma_desc_for_cpu(struct ssb_device *sdev,
156 					     dma_addr_t dma_base,
157 					     unsigned long offset,
158 					     enum dma_data_direction dir)
159 {
160 	dma_sync_single_for_cpu(sdev->dma_dev, dma_base + offset,
161 				dma_desc_sync_size, dir);
162 }
163 
164 static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
165 {
166 	return ssb_read32(bp->sdev, reg);
167 }
168 
169 static inline void bw32(const struct b44 *bp,
170 			unsigned long reg, unsigned long val)
171 {
172 	ssb_write32(bp->sdev, reg, val);
173 }
174 
175 static int b44_wait_bit(struct b44 *bp, unsigned long reg,
176 			u32 bit, unsigned long timeout, const int clear)
177 {
178 	unsigned long i;
179 
180 	for (i = 0; i < timeout; i++) {
181 		u32 val = br32(bp, reg);
182 
183 		if (clear && !(val & bit))
184 			break;
185 		if (!clear && (val & bit))
186 			break;
187 		udelay(10);
188 	}
189 	if (i == timeout) {
190 		if (net_ratelimit())
191 			netdev_err(bp->dev, "BUG!  Timeout waiting for bit %08x of register %lx to %s\n",
192 				   bit, reg, clear ? "clear" : "set");
193 
194 		return -ENODEV;
195 	}
196 	return 0;
197 }
198 
199 static inline void __b44_cam_read(struct b44 *bp, unsigned char *data, int index)
200 {
201 	u32 val;
202 
203 	bw32(bp, B44_CAM_CTRL, (CAM_CTRL_READ |
204 			    (index << CAM_CTRL_INDEX_SHIFT)));
205 
206 	b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
207 
208 	val = br32(bp, B44_CAM_DATA_LO);
209 
210 	data[2] = (val >> 24) & 0xFF;
211 	data[3] = (val >> 16) & 0xFF;
212 	data[4] = (val >> 8) & 0xFF;
213 	data[5] = (val >> 0) & 0xFF;
214 
215 	val = br32(bp, B44_CAM_DATA_HI);
216 
217 	data[0] = (val >> 8) & 0xFF;
218 	data[1] = (val >> 0) & 0xFF;
219 }
220 
221 static inline void __b44_cam_write(struct b44 *bp, unsigned char *data, int index)
222 {
223 	u32 val;
224 
225 	val  = ((u32) data[2]) << 24;
226 	val |= ((u32) data[3]) << 16;
227 	val |= ((u32) data[4]) <<  8;
228 	val |= ((u32) data[5]) <<  0;
229 	bw32(bp, B44_CAM_DATA_LO, val);
230 	val = (CAM_DATA_HI_VALID |
231 	       (((u32) data[0]) << 8) |
232 	       (((u32) data[1]) << 0));
233 	bw32(bp, B44_CAM_DATA_HI, val);
234 	bw32(bp, B44_CAM_CTRL, (CAM_CTRL_WRITE |
235 			    (index << CAM_CTRL_INDEX_SHIFT)));
236 	b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
237 }
238 
239 static inline void __b44_disable_ints(struct b44 *bp)
240 {
241 	bw32(bp, B44_IMASK, 0);
242 }
243 
244 static void b44_disable_ints(struct b44 *bp)
245 {
246 	__b44_disable_ints(bp);
247 
248 	/* Flush posted writes. */
249 	br32(bp, B44_IMASK);
250 }
251 
252 static void b44_enable_ints(struct b44 *bp)
253 {
254 	bw32(bp, B44_IMASK, bp->imask);
255 }
256 
257 static int __b44_readphy(struct b44 *bp, int phy_addr, int reg, u32 *val)
258 {
259 	int err;
260 
261 	bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
262 	bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
263 			     (MDIO_OP_READ << MDIO_DATA_OP_SHIFT) |
264 			     (phy_addr << MDIO_DATA_PMD_SHIFT) |
265 			     (reg << MDIO_DATA_RA_SHIFT) |
266 			     (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT)));
267 	err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
268 	*val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA;
269 
270 	return err;
271 }
272 
273 static int __b44_writephy(struct b44 *bp, int phy_addr, int reg, u32 val)
274 {
275 	bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
276 	bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
277 			     (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT) |
278 			     (phy_addr << MDIO_DATA_PMD_SHIFT) |
279 			     (reg << MDIO_DATA_RA_SHIFT) |
280 			     (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT) |
281 			     (val & MDIO_DATA_DATA)));
282 	return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
283 }
284 
285 static inline int b44_readphy(struct b44 *bp, int reg, u32 *val)
286 {
287 	if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
288 		return 0;
289 
290 	return __b44_readphy(bp, bp->phy_addr, reg, val);
291 }
292 
293 static inline int b44_writephy(struct b44 *bp, int reg, u32 val)
294 {
295 	if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
296 		return 0;
297 
298 	return __b44_writephy(bp, bp->phy_addr, reg, val);
299 }
300 
301 /* miilib interface */
302 static int b44_mii_read(struct net_device *dev, int phy_id, int location)
303 {
304 	u32 val;
305 	struct b44 *bp = netdev_priv(dev);
306 	int rc = __b44_readphy(bp, phy_id, location, &val);
307 	if (rc)
308 		return 0xffffffff;
309 	return val;
310 }
311 
312 static void b44_mii_write(struct net_device *dev, int phy_id, int location,
313 			 int val)
314 {
315 	struct b44 *bp = netdev_priv(dev);
316 	__b44_writephy(bp, phy_id, location, val);
317 }
318 
319 static int b44_phy_reset(struct b44 *bp)
320 {
321 	u32 val;
322 	int err;
323 
324 	if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
325 		return 0;
326 	err = b44_writephy(bp, MII_BMCR, BMCR_RESET);
327 	if (err)
328 		return err;
329 	udelay(100);
330 	err = b44_readphy(bp, MII_BMCR, &val);
331 	if (!err) {
332 		if (val & BMCR_RESET) {
333 			netdev_err(bp->dev, "PHY Reset would not complete\n");
334 			err = -ENODEV;
335 		}
336 	}
337 
338 	return err;
339 }
340 
341 static void __b44_set_flow_ctrl(struct b44 *bp, u32 pause_flags)
342 {
343 	u32 val;
344 
345 	bp->flags &= ~(B44_FLAG_TX_PAUSE | B44_FLAG_RX_PAUSE);
346 	bp->flags |= pause_flags;
347 
348 	val = br32(bp, B44_RXCONFIG);
349 	if (pause_flags & B44_FLAG_RX_PAUSE)
350 		val |= RXCONFIG_FLOW;
351 	else
352 		val &= ~RXCONFIG_FLOW;
353 	bw32(bp, B44_RXCONFIG, val);
354 
355 	val = br32(bp, B44_MAC_FLOW);
356 	if (pause_flags & B44_FLAG_TX_PAUSE)
357 		val |= (MAC_FLOW_PAUSE_ENAB |
358 			(0xc0 & MAC_FLOW_RX_HI_WATER));
359 	else
360 		val &= ~MAC_FLOW_PAUSE_ENAB;
361 	bw32(bp, B44_MAC_FLOW, val);
362 }
363 
364 static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
365 {
366 	u32 pause_enab = 0;
367 
368 	/* The driver supports only rx pause by default because
369 	   the b44 mac tx pause mechanism generates excessive
370 	   pause frames.
371 	   Use ethtool to turn on b44 tx pause if necessary.
372 	 */
373 	if ((local & ADVERTISE_PAUSE_CAP) &&
374 	    (local & ADVERTISE_PAUSE_ASYM)){
375 		if ((remote & LPA_PAUSE_ASYM) &&
376 		    !(remote & LPA_PAUSE_CAP))
377 			pause_enab |= B44_FLAG_RX_PAUSE;
378 	}
379 
380 	__b44_set_flow_ctrl(bp, pause_enab);
381 }
382 
383 #ifdef CONFIG_BCM47XX
384 #include <asm/mach-bcm47xx/nvram.h>
385 static void b44_wap54g10_workaround(struct b44 *bp)
386 {
387 	char buf[20];
388 	u32 val;
389 	int err;
390 
391 	/*
392 	 * workaround for bad hardware design in Linksys WAP54G v1.0
393 	 * see https://dev.openwrt.org/ticket/146
394 	 * check and reset bit "isolate"
395 	 */
396 	if (nvram_getenv("boardnum", buf, sizeof(buf)) < 0)
397 		return;
398 	if (simple_strtoul(buf, NULL, 0) == 2) {
399 		err = __b44_readphy(bp, 0, MII_BMCR, &val);
400 		if (err)
401 			goto error;
402 		if (!(val & BMCR_ISOLATE))
403 			return;
404 		val &= ~BMCR_ISOLATE;
405 		err = __b44_writephy(bp, 0, MII_BMCR, val);
406 		if (err)
407 			goto error;
408 	}
409 	return;
410 error:
411 	pr_warning("PHY: cannot reset MII transceiver isolate bit\n");
412 }
413 #else
414 static inline void b44_wap54g10_workaround(struct b44 *bp)
415 {
416 }
417 #endif
418 
419 static int b44_setup_phy(struct b44 *bp)
420 {
421 	u32 val;
422 	int err;
423 
424 	b44_wap54g10_workaround(bp);
425 
426 	if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
427 		return 0;
428 	if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0)
429 		goto out;
430 	if ((err = b44_writephy(bp, B44_MII_ALEDCTRL,
431 				val & MII_ALEDCTRL_ALLMSK)) != 0)
432 		goto out;
433 	if ((err = b44_readphy(bp, B44_MII_TLEDCTRL, &val)) != 0)
434 		goto out;
435 	if ((err = b44_writephy(bp, B44_MII_TLEDCTRL,
436 				val | MII_TLEDCTRL_ENABLE)) != 0)
437 		goto out;
438 
439 	if (!(bp->flags & B44_FLAG_FORCE_LINK)) {
440 		u32 adv = ADVERTISE_CSMA;
441 
442 		if (bp->flags & B44_FLAG_ADV_10HALF)
443 			adv |= ADVERTISE_10HALF;
444 		if (bp->flags & B44_FLAG_ADV_10FULL)
445 			adv |= ADVERTISE_10FULL;
446 		if (bp->flags & B44_FLAG_ADV_100HALF)
447 			adv |= ADVERTISE_100HALF;
448 		if (bp->flags & B44_FLAG_ADV_100FULL)
449 			adv |= ADVERTISE_100FULL;
450 
451 		if (bp->flags & B44_FLAG_PAUSE_AUTO)
452 			adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
453 
454 		if ((err = b44_writephy(bp, MII_ADVERTISE, adv)) != 0)
455 			goto out;
456 		if ((err = b44_writephy(bp, MII_BMCR, (BMCR_ANENABLE |
457 						       BMCR_ANRESTART))) != 0)
458 			goto out;
459 	} else {
460 		u32 bmcr;
461 
462 		if ((err = b44_readphy(bp, MII_BMCR, &bmcr)) != 0)
463 			goto out;
464 		bmcr &= ~(BMCR_FULLDPLX | BMCR_ANENABLE | BMCR_SPEED100);
465 		if (bp->flags & B44_FLAG_100_BASE_T)
466 			bmcr |= BMCR_SPEED100;
467 		if (bp->flags & B44_FLAG_FULL_DUPLEX)
468 			bmcr |= BMCR_FULLDPLX;
469 		if ((err = b44_writephy(bp, MII_BMCR, bmcr)) != 0)
470 			goto out;
471 
472 		/* Since we will not be negotiating there is no safe way
473 		 * to determine if the link partner supports flow control
474 		 * or not.  So just disable it completely in this case.
475 		 */
476 		b44_set_flow_ctrl(bp, 0, 0);
477 	}
478 
479 out:
480 	return err;
481 }
482 
483 static void b44_stats_update(struct b44 *bp)
484 {
485 	unsigned long reg;
486 	u32 *val;
487 
488 	val = &bp->hw_stats.tx_good_octets;
489 	for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) {
490 		*val++ += br32(bp, reg);
491 	}
492 
493 	/* Pad */
494 	reg += 8*4UL;
495 
496 	for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) {
497 		*val++ += br32(bp, reg);
498 	}
499 }
500 
501 static void b44_link_report(struct b44 *bp)
502 {
503 	if (!netif_carrier_ok(bp->dev)) {
504 		netdev_info(bp->dev, "Link is down\n");
505 	} else {
506 		netdev_info(bp->dev, "Link is up at %d Mbps, %s duplex\n",
507 			    (bp->flags & B44_FLAG_100_BASE_T) ? 100 : 10,
508 			    (bp->flags & B44_FLAG_FULL_DUPLEX) ? "full" : "half");
509 
510 		netdev_info(bp->dev, "Flow control is %s for TX and %s for RX\n",
511 			    (bp->flags & B44_FLAG_TX_PAUSE) ? "on" : "off",
512 			    (bp->flags & B44_FLAG_RX_PAUSE) ? "on" : "off");
513 	}
514 }
515 
516 static void b44_check_phy(struct b44 *bp)
517 {
518 	u32 bmsr, aux;
519 
520 	if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) {
521 		bp->flags |= B44_FLAG_100_BASE_T;
522 		bp->flags |= B44_FLAG_FULL_DUPLEX;
523 		if (!netif_carrier_ok(bp->dev)) {
524 			u32 val = br32(bp, B44_TX_CTRL);
525 			val |= TX_CTRL_DUPLEX;
526 			bw32(bp, B44_TX_CTRL, val);
527 			netif_carrier_on(bp->dev);
528 			b44_link_report(bp);
529 		}
530 		return;
531 	}
532 
533 	if (!b44_readphy(bp, MII_BMSR, &bmsr) &&
534 	    !b44_readphy(bp, B44_MII_AUXCTRL, &aux) &&
535 	    (bmsr != 0xffff)) {
536 		if (aux & MII_AUXCTRL_SPEED)
537 			bp->flags |= B44_FLAG_100_BASE_T;
538 		else
539 			bp->flags &= ~B44_FLAG_100_BASE_T;
540 		if (aux & MII_AUXCTRL_DUPLEX)
541 			bp->flags |= B44_FLAG_FULL_DUPLEX;
542 		else
543 			bp->flags &= ~B44_FLAG_FULL_DUPLEX;
544 
545 		if (!netif_carrier_ok(bp->dev) &&
546 		    (bmsr & BMSR_LSTATUS)) {
547 			u32 val = br32(bp, B44_TX_CTRL);
548 			u32 local_adv, remote_adv;
549 
550 			if (bp->flags & B44_FLAG_FULL_DUPLEX)
551 				val |= TX_CTRL_DUPLEX;
552 			else
553 				val &= ~TX_CTRL_DUPLEX;
554 			bw32(bp, B44_TX_CTRL, val);
555 
556 			if (!(bp->flags & B44_FLAG_FORCE_LINK) &&
557 			    !b44_readphy(bp, MII_ADVERTISE, &local_adv) &&
558 			    !b44_readphy(bp, MII_LPA, &remote_adv))
559 				b44_set_flow_ctrl(bp, local_adv, remote_adv);
560 
561 			/* Link now up */
562 			netif_carrier_on(bp->dev);
563 			b44_link_report(bp);
564 		} else if (netif_carrier_ok(bp->dev) && !(bmsr & BMSR_LSTATUS)) {
565 			/* Link now down */
566 			netif_carrier_off(bp->dev);
567 			b44_link_report(bp);
568 		}
569 
570 		if (bmsr & BMSR_RFAULT)
571 			netdev_warn(bp->dev, "Remote fault detected in PHY\n");
572 		if (bmsr & BMSR_JCD)
573 			netdev_warn(bp->dev, "Jabber detected in PHY\n");
574 	}
575 }
576 
577 static void b44_timer(unsigned long __opaque)
578 {
579 	struct b44 *bp = (struct b44 *) __opaque;
580 
581 	spin_lock_irq(&bp->lock);
582 
583 	b44_check_phy(bp);
584 
585 	b44_stats_update(bp);
586 
587 	spin_unlock_irq(&bp->lock);
588 
589 	mod_timer(&bp->timer, round_jiffies(jiffies + HZ));
590 }
591 
592 static void b44_tx(struct b44 *bp)
593 {
594 	u32 cur, cons;
595 
596 	cur  = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK;
597 	cur /= sizeof(struct dma_desc);
598 
599 	/* XXX needs updating when NETIF_F_SG is supported */
600 	for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) {
601 		struct ring_info *rp = &bp->tx_buffers[cons];
602 		struct sk_buff *skb = rp->skb;
603 
604 		BUG_ON(skb == NULL);
605 
606 		dma_unmap_single(bp->sdev->dma_dev,
607 				 rp->mapping,
608 				 skb->len,
609 				 DMA_TO_DEVICE);
610 		rp->skb = NULL;
611 		dev_kfree_skb_irq(skb);
612 	}
613 
614 	bp->tx_cons = cons;
615 	if (netif_queue_stopped(bp->dev) &&
616 	    TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH)
617 		netif_wake_queue(bp->dev);
618 
619 	bw32(bp, B44_GPTIMER, 0);
620 }
621 
622 /* Works like this.  This chip writes a 'struct rx_header" 30 bytes
623  * before the DMA address you give it.  So we allocate 30 more bytes
624  * for the RX buffer, DMA map all of it, skb_reserve the 30 bytes, then
625  * point the chip at 30 bytes past where the rx_header will go.
626  */
627 static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
628 {
629 	struct dma_desc *dp;
630 	struct ring_info *src_map, *map;
631 	struct rx_header *rh;
632 	struct sk_buff *skb;
633 	dma_addr_t mapping;
634 	int dest_idx;
635 	u32 ctrl;
636 
637 	src_map = NULL;
638 	if (src_idx >= 0)
639 		src_map = &bp->rx_buffers[src_idx];
640 	dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
641 	map = &bp->rx_buffers[dest_idx];
642 	skb = netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ);
643 	if (skb == NULL)
644 		return -ENOMEM;
645 
646 	mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
647 				 RX_PKT_BUF_SZ,
648 				 DMA_FROM_DEVICE);
649 
650 	/* Hardware bug work-around, the chip is unable to do PCI DMA
651 	   to/from anything above 1GB :-( */
652 	if (dma_mapping_error(bp->sdev->dma_dev, mapping) ||
653 		mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
654 		/* Sigh... */
655 		if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
656 			dma_unmap_single(bp->sdev->dma_dev, mapping,
657 					     RX_PKT_BUF_SZ, DMA_FROM_DEVICE);
658 		dev_kfree_skb_any(skb);
659 		skb = __netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ, GFP_ATOMIC|GFP_DMA);
660 		if (skb == NULL)
661 			return -ENOMEM;
662 		mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
663 					 RX_PKT_BUF_SZ,
664 					 DMA_FROM_DEVICE);
665 		if (dma_mapping_error(bp->sdev->dma_dev, mapping) ||
666 		    mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
667 			if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
668 				dma_unmap_single(bp->sdev->dma_dev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE);
669 			dev_kfree_skb_any(skb);
670 			return -ENOMEM;
671 		}
672 		bp->force_copybreak = 1;
673 	}
674 
675 	rh = (struct rx_header *) skb->data;
676 
677 	rh->len = 0;
678 	rh->flags = 0;
679 
680 	map->skb = skb;
681 	map->mapping = mapping;
682 
683 	if (src_map != NULL)
684 		src_map->skb = NULL;
685 
686 	ctrl = (DESC_CTRL_LEN & RX_PKT_BUF_SZ);
687 	if (dest_idx == (B44_RX_RING_SIZE - 1))
688 		ctrl |= DESC_CTRL_EOT;
689 
690 	dp = &bp->rx_ring[dest_idx];
691 	dp->ctrl = cpu_to_le32(ctrl);
692 	dp->addr = cpu_to_le32((u32) mapping + bp->dma_offset);
693 
694 	if (bp->flags & B44_FLAG_RX_RING_HACK)
695 		b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
696 			                    dest_idx * sizeof(*dp),
697 			                    DMA_BIDIRECTIONAL);
698 
699 	return RX_PKT_BUF_SZ;
700 }
701 
702 static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
703 {
704 	struct dma_desc *src_desc, *dest_desc;
705 	struct ring_info *src_map, *dest_map;
706 	struct rx_header *rh;
707 	int dest_idx;
708 	__le32 ctrl;
709 
710 	dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
711 	dest_desc = &bp->rx_ring[dest_idx];
712 	dest_map = &bp->rx_buffers[dest_idx];
713 	src_desc = &bp->rx_ring[src_idx];
714 	src_map = &bp->rx_buffers[src_idx];
715 
716 	dest_map->skb = src_map->skb;
717 	rh = (struct rx_header *) src_map->skb->data;
718 	rh->len = 0;
719 	rh->flags = 0;
720 	dest_map->mapping = src_map->mapping;
721 
722 	if (bp->flags & B44_FLAG_RX_RING_HACK)
723 		b44_sync_dma_desc_for_cpu(bp->sdev, bp->rx_ring_dma,
724 			                 src_idx * sizeof(*src_desc),
725 			                 DMA_BIDIRECTIONAL);
726 
727 	ctrl = src_desc->ctrl;
728 	if (dest_idx == (B44_RX_RING_SIZE - 1))
729 		ctrl |= cpu_to_le32(DESC_CTRL_EOT);
730 	else
731 		ctrl &= cpu_to_le32(~DESC_CTRL_EOT);
732 
733 	dest_desc->ctrl = ctrl;
734 	dest_desc->addr = src_desc->addr;
735 
736 	src_map->skb = NULL;
737 
738 	if (bp->flags & B44_FLAG_RX_RING_HACK)
739 		b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
740 					     dest_idx * sizeof(*dest_desc),
741 					     DMA_BIDIRECTIONAL);
742 
743 	dma_sync_single_for_device(bp->sdev->dma_dev, dest_map->mapping,
744 				   RX_PKT_BUF_SZ,
745 				   DMA_FROM_DEVICE);
746 }
747 
748 static int b44_rx(struct b44 *bp, int budget)
749 {
750 	int received;
751 	u32 cons, prod;
752 
753 	received = 0;
754 	prod  = br32(bp, B44_DMARX_STAT) & DMARX_STAT_CDMASK;
755 	prod /= sizeof(struct dma_desc);
756 	cons = bp->rx_cons;
757 
758 	while (cons != prod && budget > 0) {
759 		struct ring_info *rp = &bp->rx_buffers[cons];
760 		struct sk_buff *skb = rp->skb;
761 		dma_addr_t map = rp->mapping;
762 		struct rx_header *rh;
763 		u16 len;
764 
765 		dma_sync_single_for_cpu(bp->sdev->dma_dev, map,
766 					RX_PKT_BUF_SZ,
767 					DMA_FROM_DEVICE);
768 		rh = (struct rx_header *) skb->data;
769 		len = le16_to_cpu(rh->len);
770 		if ((len > (RX_PKT_BUF_SZ - RX_PKT_OFFSET)) ||
771 		    (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) {
772 		drop_it:
773 			b44_recycle_rx(bp, cons, bp->rx_prod);
774 		drop_it_no_recycle:
775 			bp->dev->stats.rx_dropped++;
776 			goto next_pkt;
777 		}
778 
779 		if (len == 0) {
780 			int i = 0;
781 
782 			do {
783 				udelay(2);
784 				barrier();
785 				len = le16_to_cpu(rh->len);
786 			} while (len == 0 && i++ < 5);
787 			if (len == 0)
788 				goto drop_it;
789 		}
790 
791 		/* Omit CRC. */
792 		len -= 4;
793 
794 		if (!bp->force_copybreak && len > RX_COPY_THRESHOLD) {
795 			int skb_size;
796 			skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
797 			if (skb_size < 0)
798 				goto drop_it;
799 			dma_unmap_single(bp->sdev->dma_dev, map,
800 					 skb_size, DMA_FROM_DEVICE);
801 			/* Leave out rx_header */
802 			skb_put(skb, len + RX_PKT_OFFSET);
803 			skb_pull(skb, RX_PKT_OFFSET);
804 		} else {
805 			struct sk_buff *copy_skb;
806 
807 			b44_recycle_rx(bp, cons, bp->rx_prod);
808 			copy_skb = netdev_alloc_skb(bp->dev, len + 2);
809 			if (copy_skb == NULL)
810 				goto drop_it_no_recycle;
811 
812 			skb_reserve(copy_skb, 2);
813 			skb_put(copy_skb, len);
814 			/* DMA sync done above, copy just the actual packet */
815 			skb_copy_from_linear_data_offset(skb, RX_PKT_OFFSET,
816 							 copy_skb->data, len);
817 			skb = copy_skb;
818 		}
819 		skb_checksum_none_assert(skb);
820 		skb->protocol = eth_type_trans(skb, bp->dev);
821 		netif_receive_skb(skb);
822 		received++;
823 		budget--;
824 	next_pkt:
825 		bp->rx_prod = (bp->rx_prod + 1) &
826 			(B44_RX_RING_SIZE - 1);
827 		cons = (cons + 1) & (B44_RX_RING_SIZE - 1);
828 	}
829 
830 	bp->rx_cons = cons;
831 	bw32(bp, B44_DMARX_PTR, cons * sizeof(struct dma_desc));
832 
833 	return received;
834 }
835 
836 static int b44_poll(struct napi_struct *napi, int budget)
837 {
838 	struct b44 *bp = container_of(napi, struct b44, napi);
839 	int work_done;
840 	unsigned long flags;
841 
842 	spin_lock_irqsave(&bp->lock, flags);
843 
844 	if (bp->istat & (ISTAT_TX | ISTAT_TO)) {
845 		/* spin_lock(&bp->tx_lock); */
846 		b44_tx(bp);
847 		/* spin_unlock(&bp->tx_lock); */
848 	}
849 	if (bp->istat & ISTAT_RFO) {	/* fast recovery, in ~20msec */
850 		bp->istat &= ~ISTAT_RFO;
851 		b44_disable_ints(bp);
852 		ssb_device_enable(bp->sdev, 0); /* resets ISTAT_RFO */
853 		b44_init_rings(bp);
854 		b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
855 		netif_wake_queue(bp->dev);
856 	}
857 
858 	spin_unlock_irqrestore(&bp->lock, flags);
859 
860 	work_done = 0;
861 	if (bp->istat & ISTAT_RX)
862 		work_done += b44_rx(bp, budget);
863 
864 	if (bp->istat & ISTAT_ERRORS) {
865 		spin_lock_irqsave(&bp->lock, flags);
866 		b44_halt(bp);
867 		b44_init_rings(bp);
868 		b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
869 		netif_wake_queue(bp->dev);
870 		spin_unlock_irqrestore(&bp->lock, flags);
871 		work_done = 0;
872 	}
873 
874 	if (work_done < budget) {
875 		napi_complete(napi);
876 		b44_enable_ints(bp);
877 	}
878 
879 	return work_done;
880 }
881 
882 static irqreturn_t b44_interrupt(int irq, void *dev_id)
883 {
884 	struct net_device *dev = dev_id;
885 	struct b44 *bp = netdev_priv(dev);
886 	u32 istat, imask;
887 	int handled = 0;
888 
889 	spin_lock(&bp->lock);
890 
891 	istat = br32(bp, B44_ISTAT);
892 	imask = br32(bp, B44_IMASK);
893 
894 	/* The interrupt mask register controls which interrupt bits
895 	 * will actually raise an interrupt to the CPU when set by hw/firmware,
896 	 * but doesn't mask off the bits.
897 	 */
898 	istat &= imask;
899 	if (istat) {
900 		handled = 1;
901 
902 		if (unlikely(!netif_running(dev))) {
903 			netdev_info(dev, "late interrupt\n");
904 			goto irq_ack;
905 		}
906 
907 		if (napi_schedule_prep(&bp->napi)) {
908 			/* NOTE: These writes are posted by the readback of
909 			 *       the ISTAT register below.
910 			 */
911 			bp->istat = istat;
912 			__b44_disable_ints(bp);
913 			__napi_schedule(&bp->napi);
914 		}
915 
916 irq_ack:
917 		bw32(bp, B44_ISTAT, istat);
918 		br32(bp, B44_ISTAT);
919 	}
920 	spin_unlock(&bp->lock);
921 	return IRQ_RETVAL(handled);
922 }
923 
924 static void b44_tx_timeout(struct net_device *dev)
925 {
926 	struct b44 *bp = netdev_priv(dev);
927 
928 	netdev_err(dev, "transmit timed out, resetting\n");
929 
930 	spin_lock_irq(&bp->lock);
931 
932 	b44_halt(bp);
933 	b44_init_rings(bp);
934 	b44_init_hw(bp, B44_FULL_RESET);
935 
936 	spin_unlock_irq(&bp->lock);
937 
938 	b44_enable_ints(bp);
939 
940 	netif_wake_queue(dev);
941 }
942 
943 static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
944 {
945 	struct b44 *bp = netdev_priv(dev);
946 	int rc = NETDEV_TX_OK;
947 	dma_addr_t mapping;
948 	u32 len, entry, ctrl;
949 	unsigned long flags;
950 
951 	len = skb->len;
952 	spin_lock_irqsave(&bp->lock, flags);
953 
954 	/* This is a hard error, log it. */
955 	if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
956 		netif_stop_queue(dev);
957 		netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
958 		goto err_out;
959 	}
960 
961 	mapping = dma_map_single(bp->sdev->dma_dev, skb->data, len, DMA_TO_DEVICE);
962 	if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
963 		struct sk_buff *bounce_skb;
964 
965 		/* Chip can't handle DMA to/from >1GB, use bounce buffer */
966 		if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
967 			dma_unmap_single(bp->sdev->dma_dev, mapping, len,
968 					     DMA_TO_DEVICE);
969 
970 		bounce_skb = __netdev_alloc_skb(dev, len, GFP_ATOMIC | GFP_DMA);
971 		if (!bounce_skb)
972 			goto err_out;
973 
974 		mapping = dma_map_single(bp->sdev->dma_dev, bounce_skb->data,
975 					 len, DMA_TO_DEVICE);
976 		if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
977 			if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
978 				dma_unmap_single(bp->sdev->dma_dev, mapping,
979 						     len, DMA_TO_DEVICE);
980 			dev_kfree_skb_any(bounce_skb);
981 			goto err_out;
982 		}
983 
984 		skb_copy_from_linear_data(skb, skb_put(bounce_skb, len), len);
985 		dev_kfree_skb_any(skb);
986 		skb = bounce_skb;
987 	}
988 
989 	entry = bp->tx_prod;
990 	bp->tx_buffers[entry].skb = skb;
991 	bp->tx_buffers[entry].mapping = mapping;
992 
993 	ctrl  = (len & DESC_CTRL_LEN);
994 	ctrl |= DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF;
995 	if (entry == (B44_TX_RING_SIZE - 1))
996 		ctrl |= DESC_CTRL_EOT;
997 
998 	bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl);
999 	bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset);
1000 
1001 	if (bp->flags & B44_FLAG_TX_RING_HACK)
1002 		b44_sync_dma_desc_for_device(bp->sdev, bp->tx_ring_dma,
1003 			                    entry * sizeof(bp->tx_ring[0]),
1004 			                    DMA_TO_DEVICE);
1005 
1006 	entry = NEXT_TX(entry);
1007 
1008 	bp->tx_prod = entry;
1009 
1010 	wmb();
1011 
1012 	bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1013 	if (bp->flags & B44_FLAG_BUGGY_TXPTR)
1014 		bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1015 	if (bp->flags & B44_FLAG_REORDER_BUG)
1016 		br32(bp, B44_DMATX_PTR);
1017 
1018 	if (TX_BUFFS_AVAIL(bp) < 1)
1019 		netif_stop_queue(dev);
1020 
1021 out_unlock:
1022 	spin_unlock_irqrestore(&bp->lock, flags);
1023 
1024 	return rc;
1025 
1026 err_out:
1027 	rc = NETDEV_TX_BUSY;
1028 	goto out_unlock;
1029 }
1030 
1031 static int b44_change_mtu(struct net_device *dev, int new_mtu)
1032 {
1033 	struct b44 *bp = netdev_priv(dev);
1034 
1035 	if (new_mtu < B44_MIN_MTU || new_mtu > B44_MAX_MTU)
1036 		return -EINVAL;
1037 
1038 	if (!netif_running(dev)) {
1039 		/* We'll just catch it later when the
1040 		 * device is up'd.
1041 		 */
1042 		dev->mtu = new_mtu;
1043 		return 0;
1044 	}
1045 
1046 	spin_lock_irq(&bp->lock);
1047 	b44_halt(bp);
1048 	dev->mtu = new_mtu;
1049 	b44_init_rings(bp);
1050 	b44_init_hw(bp, B44_FULL_RESET);
1051 	spin_unlock_irq(&bp->lock);
1052 
1053 	b44_enable_ints(bp);
1054 
1055 	return 0;
1056 }
1057 
1058 /* Free up pending packets in all rx/tx rings.
1059  *
1060  * The chip has been shut down and the driver detached from
1061  * the networking, so no interrupts or new tx packets will
1062  * end up in the driver.  bp->lock is not held and we are not
1063  * in an interrupt context and thus may sleep.
1064  */
1065 static void b44_free_rings(struct b44 *bp)
1066 {
1067 	struct ring_info *rp;
1068 	int i;
1069 
1070 	for (i = 0; i < B44_RX_RING_SIZE; i++) {
1071 		rp = &bp->rx_buffers[i];
1072 
1073 		if (rp->skb == NULL)
1074 			continue;
1075 		dma_unmap_single(bp->sdev->dma_dev, rp->mapping, RX_PKT_BUF_SZ,
1076 				 DMA_FROM_DEVICE);
1077 		dev_kfree_skb_any(rp->skb);
1078 		rp->skb = NULL;
1079 	}
1080 
1081 	/* XXX needs changes once NETIF_F_SG is set... */
1082 	for (i = 0; i < B44_TX_RING_SIZE; i++) {
1083 		rp = &bp->tx_buffers[i];
1084 
1085 		if (rp->skb == NULL)
1086 			continue;
1087 		dma_unmap_single(bp->sdev->dma_dev, rp->mapping, rp->skb->len,
1088 				 DMA_TO_DEVICE);
1089 		dev_kfree_skb_any(rp->skb);
1090 		rp->skb = NULL;
1091 	}
1092 }
1093 
1094 /* Initialize tx/rx rings for packet processing.
1095  *
1096  * The chip has been shut down and the driver detached from
1097  * the networking, so no interrupts or new tx packets will
1098  * end up in the driver.
1099  */
1100 static void b44_init_rings(struct b44 *bp)
1101 {
1102 	int i;
1103 
1104 	b44_free_rings(bp);
1105 
1106 	memset(bp->rx_ring, 0, B44_RX_RING_BYTES);
1107 	memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
1108 
1109 	if (bp->flags & B44_FLAG_RX_RING_HACK)
1110 		dma_sync_single_for_device(bp->sdev->dma_dev, bp->rx_ring_dma,
1111 					   DMA_TABLE_BYTES, DMA_BIDIRECTIONAL);
1112 
1113 	if (bp->flags & B44_FLAG_TX_RING_HACK)
1114 		dma_sync_single_for_device(bp->sdev->dma_dev, bp->tx_ring_dma,
1115 					   DMA_TABLE_BYTES, DMA_TO_DEVICE);
1116 
1117 	for (i = 0; i < bp->rx_pending; i++) {
1118 		if (b44_alloc_rx_skb(bp, -1, i) < 0)
1119 			break;
1120 	}
1121 }
1122 
1123 /*
1124  * Must not be invoked with interrupt sources disabled and
1125  * the hardware shutdown down.
1126  */
1127 static void b44_free_consistent(struct b44 *bp)
1128 {
1129 	kfree(bp->rx_buffers);
1130 	bp->rx_buffers = NULL;
1131 	kfree(bp->tx_buffers);
1132 	bp->tx_buffers = NULL;
1133 	if (bp->rx_ring) {
1134 		if (bp->flags & B44_FLAG_RX_RING_HACK) {
1135 			dma_unmap_single(bp->sdev->dma_dev, bp->rx_ring_dma,
1136 					 DMA_TABLE_BYTES, DMA_BIDIRECTIONAL);
1137 			kfree(bp->rx_ring);
1138 		} else
1139 			dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES,
1140 					  bp->rx_ring, bp->rx_ring_dma);
1141 		bp->rx_ring = NULL;
1142 		bp->flags &= ~B44_FLAG_RX_RING_HACK;
1143 	}
1144 	if (bp->tx_ring) {
1145 		if (bp->flags & B44_FLAG_TX_RING_HACK) {
1146 			dma_unmap_single(bp->sdev->dma_dev, bp->tx_ring_dma,
1147 					 DMA_TABLE_BYTES, DMA_TO_DEVICE);
1148 			kfree(bp->tx_ring);
1149 		} else
1150 			dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES,
1151 					  bp->tx_ring, bp->tx_ring_dma);
1152 		bp->tx_ring = NULL;
1153 		bp->flags &= ~B44_FLAG_TX_RING_HACK;
1154 	}
1155 }
1156 
1157 /*
1158  * Must not be invoked with interrupt sources disabled and
1159  * the hardware shutdown down.  Can sleep.
1160  */
1161 static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp)
1162 {
1163 	int size;
1164 
1165 	size  = B44_RX_RING_SIZE * sizeof(struct ring_info);
1166 	bp->rx_buffers = kzalloc(size, gfp);
1167 	if (!bp->rx_buffers)
1168 		goto out_err;
1169 
1170 	size = B44_TX_RING_SIZE * sizeof(struct ring_info);
1171 	bp->tx_buffers = kzalloc(size, gfp);
1172 	if (!bp->tx_buffers)
1173 		goto out_err;
1174 
1175 	size = DMA_TABLE_BYTES;
1176 	bp->rx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size,
1177 					 &bp->rx_ring_dma, gfp);
1178 	if (!bp->rx_ring) {
1179 		/* Allocation may have failed due to pci_alloc_consistent
1180 		   insisting on use of GFP_DMA, which is more restrictive
1181 		   than necessary...  */
1182 		struct dma_desc *rx_ring;
1183 		dma_addr_t rx_ring_dma;
1184 
1185 		rx_ring = kzalloc(size, gfp);
1186 		if (!rx_ring)
1187 			goto out_err;
1188 
1189 		rx_ring_dma = dma_map_single(bp->sdev->dma_dev, rx_ring,
1190 					     DMA_TABLE_BYTES,
1191 					     DMA_BIDIRECTIONAL);
1192 
1193 		if (dma_mapping_error(bp->sdev->dma_dev, rx_ring_dma) ||
1194 			rx_ring_dma + size > DMA_BIT_MASK(30)) {
1195 			kfree(rx_ring);
1196 			goto out_err;
1197 		}
1198 
1199 		bp->rx_ring = rx_ring;
1200 		bp->rx_ring_dma = rx_ring_dma;
1201 		bp->flags |= B44_FLAG_RX_RING_HACK;
1202 	}
1203 
1204 	bp->tx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size,
1205 					 &bp->tx_ring_dma, gfp);
1206 	if (!bp->tx_ring) {
1207 		/* Allocation may have failed due to ssb_dma_alloc_consistent
1208 		   insisting on use of GFP_DMA, which is more restrictive
1209 		   than necessary...  */
1210 		struct dma_desc *tx_ring;
1211 		dma_addr_t tx_ring_dma;
1212 
1213 		tx_ring = kzalloc(size, gfp);
1214 		if (!tx_ring)
1215 			goto out_err;
1216 
1217 		tx_ring_dma = dma_map_single(bp->sdev->dma_dev, tx_ring,
1218 					     DMA_TABLE_BYTES,
1219 					     DMA_TO_DEVICE);
1220 
1221 		if (dma_mapping_error(bp->sdev->dma_dev, tx_ring_dma) ||
1222 			tx_ring_dma + size > DMA_BIT_MASK(30)) {
1223 			kfree(tx_ring);
1224 			goto out_err;
1225 		}
1226 
1227 		bp->tx_ring = tx_ring;
1228 		bp->tx_ring_dma = tx_ring_dma;
1229 		bp->flags |= B44_FLAG_TX_RING_HACK;
1230 	}
1231 
1232 	return 0;
1233 
1234 out_err:
1235 	b44_free_consistent(bp);
1236 	return -ENOMEM;
1237 }
1238 
1239 /* bp->lock is held. */
1240 static void b44_clear_stats(struct b44 *bp)
1241 {
1242 	unsigned long reg;
1243 
1244 	bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1245 	for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL)
1246 		br32(bp, reg);
1247 	for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL)
1248 		br32(bp, reg);
1249 }
1250 
1251 /* bp->lock is held. */
1252 static void b44_chip_reset(struct b44 *bp, int reset_kind)
1253 {
1254 	struct ssb_device *sdev = bp->sdev;
1255 	bool was_enabled;
1256 
1257 	was_enabled = ssb_device_is_enabled(bp->sdev);
1258 
1259 	ssb_device_enable(bp->sdev, 0);
1260 	ssb_pcicore_dev_irqvecs_enable(&sdev->bus->pcicore, sdev);
1261 
1262 	if (was_enabled) {
1263 		bw32(bp, B44_RCV_LAZY, 0);
1264 		bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE);
1265 		b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 200, 1);
1266 		bw32(bp, B44_DMATX_CTRL, 0);
1267 		bp->tx_prod = bp->tx_cons = 0;
1268 		if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK) {
1269 			b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE,
1270 				     100, 0);
1271 		}
1272 		bw32(bp, B44_DMARX_CTRL, 0);
1273 		bp->rx_prod = bp->rx_cons = 0;
1274 	}
1275 
1276 	b44_clear_stats(bp);
1277 
1278 	/*
1279 	 * Don't enable PHY if we are doing a partial reset
1280 	 * we are probably going to power down
1281 	 */
1282 	if (reset_kind == B44_CHIP_RESET_PARTIAL)
1283 		return;
1284 
1285 	switch (sdev->bus->bustype) {
1286 	case SSB_BUSTYPE_SSB:
1287 		bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1288 		     (DIV_ROUND_CLOSEST(ssb_clockspeed(sdev->bus),
1289 					B44_MDC_RATIO)
1290 		     & MDIO_CTRL_MAXF_MASK)));
1291 		break;
1292 	case SSB_BUSTYPE_PCI:
1293 		bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1294 		     (0x0d & MDIO_CTRL_MAXF_MASK)));
1295 		break;
1296 	case SSB_BUSTYPE_PCMCIA:
1297 	case SSB_BUSTYPE_SDIO:
1298 		WARN_ON(1); /* A device with this bus does not exist. */
1299 		break;
1300 	}
1301 
1302 	br32(bp, B44_MDIO_CTRL);
1303 
1304 	if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
1305 		bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL);
1306 		br32(bp, B44_ENET_CTRL);
1307 		bp->flags &= ~B44_FLAG_INTERNAL_PHY;
1308 	} else {
1309 		u32 val = br32(bp, B44_DEVCTRL);
1310 
1311 		if (val & DEVCTRL_EPR) {
1312 			bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR));
1313 			br32(bp, B44_DEVCTRL);
1314 			udelay(100);
1315 		}
1316 		bp->flags |= B44_FLAG_INTERNAL_PHY;
1317 	}
1318 }
1319 
1320 /* bp->lock is held. */
1321 static void b44_halt(struct b44 *bp)
1322 {
1323 	b44_disable_ints(bp);
1324 	/* reset PHY */
1325 	b44_phy_reset(bp);
1326 	/* power down PHY */
1327 	netdev_info(bp->dev, "powering down PHY\n");
1328 	bw32(bp, B44_MAC_CTRL, MAC_CTRL_PHY_PDOWN);
1329 	/* now reset the chip, but without enabling the MAC&PHY
1330 	 * part of it. This has to be done _after_ we shut down the PHY */
1331 	b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
1332 }
1333 
1334 /* bp->lock is held. */
1335 static void __b44_set_mac_addr(struct b44 *bp)
1336 {
1337 	bw32(bp, B44_CAM_CTRL, 0);
1338 	if (!(bp->dev->flags & IFF_PROMISC)) {
1339 		u32 val;
1340 
1341 		__b44_cam_write(bp, bp->dev->dev_addr, 0);
1342 		val = br32(bp, B44_CAM_CTRL);
1343 		bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1344 	}
1345 }
1346 
1347 static int b44_set_mac_addr(struct net_device *dev, void *p)
1348 {
1349 	struct b44 *bp = netdev_priv(dev);
1350 	struct sockaddr *addr = p;
1351 	u32 val;
1352 
1353 	if (netif_running(dev))
1354 		return -EBUSY;
1355 
1356 	if (!is_valid_ether_addr(addr->sa_data))
1357 		return -EINVAL;
1358 
1359 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1360 
1361 	spin_lock_irq(&bp->lock);
1362 
1363 	val = br32(bp, B44_RXCONFIG);
1364 	if (!(val & RXCONFIG_CAM_ABSENT))
1365 		__b44_set_mac_addr(bp);
1366 
1367 	spin_unlock_irq(&bp->lock);
1368 
1369 	return 0;
1370 }
1371 
1372 /* Called at device open time to get the chip ready for
1373  * packet processing.  Invoked with bp->lock held.
1374  */
1375 static void __b44_set_rx_mode(struct net_device *);
1376 static void b44_init_hw(struct b44 *bp, int reset_kind)
1377 {
1378 	u32 val;
1379 
1380 	b44_chip_reset(bp, B44_CHIP_RESET_FULL);
1381 	if (reset_kind == B44_FULL_RESET) {
1382 		b44_phy_reset(bp);
1383 		b44_setup_phy(bp);
1384 	}
1385 
1386 	/* Enable CRC32, set proper LED modes and power on PHY */
1387 	bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL);
1388 	bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT));
1389 
1390 	/* This sets the MAC address too.  */
1391 	__b44_set_rx_mode(bp->dev);
1392 
1393 	/* MTU + eth header + possible VLAN tag + struct rx_header */
1394 	bw32(bp, B44_RXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1395 	bw32(bp, B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1396 
1397 	bw32(bp, B44_TX_WMARK, 56); /* XXX magic */
1398 	if (reset_kind == B44_PARTIAL_RESET) {
1399 		bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1400 				      (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT)));
1401 	} else {
1402 		bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE);
1403 		bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset);
1404 		bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1405 				      (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT)));
1406 		bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset);
1407 
1408 		bw32(bp, B44_DMARX_PTR, bp->rx_pending);
1409 		bp->rx_prod = bp->rx_pending;
1410 
1411 		bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1412 	}
1413 
1414 	val = br32(bp, B44_ENET_CTRL);
1415 	bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
1416 }
1417 
1418 static int b44_open(struct net_device *dev)
1419 {
1420 	struct b44 *bp = netdev_priv(dev);
1421 	int err;
1422 
1423 	err = b44_alloc_consistent(bp, GFP_KERNEL);
1424 	if (err)
1425 		goto out;
1426 
1427 	napi_enable(&bp->napi);
1428 
1429 	b44_init_rings(bp);
1430 	b44_init_hw(bp, B44_FULL_RESET);
1431 
1432 	b44_check_phy(bp);
1433 
1434 	err = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
1435 	if (unlikely(err < 0)) {
1436 		napi_disable(&bp->napi);
1437 		b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
1438 		b44_free_rings(bp);
1439 		b44_free_consistent(bp);
1440 		goto out;
1441 	}
1442 
1443 	init_timer(&bp->timer);
1444 	bp->timer.expires = jiffies + HZ;
1445 	bp->timer.data = (unsigned long) bp;
1446 	bp->timer.function = b44_timer;
1447 	add_timer(&bp->timer);
1448 
1449 	b44_enable_ints(bp);
1450 	netif_start_queue(dev);
1451 out:
1452 	return err;
1453 }
1454 
1455 #ifdef CONFIG_NET_POLL_CONTROLLER
1456 /*
1457  * Polling receive - used by netconsole and other diagnostic tools
1458  * to allow network i/o with interrupts disabled.
1459  */
1460 static void b44_poll_controller(struct net_device *dev)
1461 {
1462 	disable_irq(dev->irq);
1463 	b44_interrupt(dev->irq, dev);
1464 	enable_irq(dev->irq);
1465 }
1466 #endif
1467 
1468 static void bwfilter_table(struct b44 *bp, u8 *pp, u32 bytes, u32 table_offset)
1469 {
1470 	u32 i;
1471 	u32 *pattern = (u32 *) pp;
1472 
1473 	for (i = 0; i < bytes; i += sizeof(u32)) {
1474 		bw32(bp, B44_FILT_ADDR, table_offset + i);
1475 		bw32(bp, B44_FILT_DATA, pattern[i / sizeof(u32)]);
1476 	}
1477 }
1478 
1479 static int b44_magic_pattern(u8 *macaddr, u8 *ppattern, u8 *pmask, int offset)
1480 {
1481 	int magicsync = 6;
1482 	int k, j, len = offset;
1483 	int ethaddr_bytes = ETH_ALEN;
1484 
1485 	memset(ppattern + offset, 0xff, magicsync);
1486 	for (j = 0; j < magicsync; j++)
1487 		set_bit(len++, (unsigned long *) pmask);
1488 
1489 	for (j = 0; j < B44_MAX_PATTERNS; j++) {
1490 		if ((B44_PATTERN_SIZE - len) >= ETH_ALEN)
1491 			ethaddr_bytes = ETH_ALEN;
1492 		else
1493 			ethaddr_bytes = B44_PATTERN_SIZE - len;
1494 		if (ethaddr_bytes <=0)
1495 			break;
1496 		for (k = 0; k< ethaddr_bytes; k++) {
1497 			ppattern[offset + magicsync +
1498 				(j * ETH_ALEN) + k] = macaddr[k];
1499 			set_bit(len++, (unsigned long *) pmask);
1500 		}
1501 	}
1502 	return len - 1;
1503 }
1504 
1505 /* Setup magic packet patterns in the b44 WOL
1506  * pattern matching filter.
1507  */
1508 static void b44_setup_pseudo_magicp(struct b44 *bp)
1509 {
1510 
1511 	u32 val;
1512 	int plen0, plen1, plen2;
1513 	u8 *pwol_pattern;
1514 	u8 pwol_mask[B44_PMASK_SIZE];
1515 
1516 	pwol_pattern = kzalloc(B44_PATTERN_SIZE, GFP_KERNEL);
1517 	if (!pwol_pattern) {
1518 		pr_err("Memory not available for WOL\n");
1519 		return;
1520 	}
1521 
1522 	/* Ipv4 magic packet pattern - pattern 0.*/
1523 	memset(pwol_mask, 0, B44_PMASK_SIZE);
1524 	plen0 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1525 				  B44_ETHIPV4UDP_HLEN);
1526 
1527    	bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE, B44_PATTERN_BASE);
1528    	bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE, B44_PMASK_BASE);
1529 
1530 	/* Raw ethernet II magic packet pattern - pattern 1 */
1531 	memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1532 	memset(pwol_mask, 0, B44_PMASK_SIZE);
1533 	plen1 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1534 				  ETH_HLEN);
1535 
1536    	bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1537 		       B44_PATTERN_BASE + B44_PATTERN_SIZE);
1538   	bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1539 		       B44_PMASK_BASE + B44_PMASK_SIZE);
1540 
1541 	/* Ipv6 magic packet pattern - pattern 2 */
1542 	memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1543 	memset(pwol_mask, 0, B44_PMASK_SIZE);
1544 	plen2 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1545 				  B44_ETHIPV6UDP_HLEN);
1546 
1547    	bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1548 		       B44_PATTERN_BASE + B44_PATTERN_SIZE + B44_PATTERN_SIZE);
1549   	bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1550 		       B44_PMASK_BASE + B44_PMASK_SIZE + B44_PMASK_SIZE);
1551 
1552 	kfree(pwol_pattern);
1553 
1554 	/* set these pattern's lengths: one less than each real length */
1555 	val = plen0 | (plen1 << 8) | (plen2 << 16) | WKUP_LEN_ENABLE_THREE;
1556 	bw32(bp, B44_WKUP_LEN, val);
1557 
1558 	/* enable wakeup pattern matching */
1559 	val = br32(bp, B44_DEVCTRL);
1560 	bw32(bp, B44_DEVCTRL, val | DEVCTRL_PFE);
1561 
1562 }
1563 
1564 #ifdef CONFIG_B44_PCI
1565 static void b44_setup_wol_pci(struct b44 *bp)
1566 {
1567 	u16 val;
1568 
1569 	if (bp->sdev->bus->bustype != SSB_BUSTYPE_SSB) {
1570 		bw32(bp, SSB_TMSLOW, br32(bp, SSB_TMSLOW) | SSB_TMSLOW_PE);
1571 		pci_read_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, &val);
1572 		pci_write_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, val | SSB_PE);
1573 	}
1574 }
1575 #else
1576 static inline void b44_setup_wol_pci(struct b44 *bp) { }
1577 #endif /* CONFIG_B44_PCI */
1578 
1579 static void b44_setup_wol(struct b44 *bp)
1580 {
1581 	u32 val;
1582 
1583 	bw32(bp, B44_RXCONFIG, RXCONFIG_ALLMULTI);
1584 
1585 	if (bp->flags & B44_FLAG_B0_ANDLATER) {
1586 
1587 		bw32(bp, B44_WKUP_LEN, WKUP_LEN_DISABLE);
1588 
1589 		val = bp->dev->dev_addr[2] << 24 |
1590 			bp->dev->dev_addr[3] << 16 |
1591 			bp->dev->dev_addr[4] << 8 |
1592 			bp->dev->dev_addr[5];
1593 		bw32(bp, B44_ADDR_LO, val);
1594 
1595 		val = bp->dev->dev_addr[0] << 8 |
1596 			bp->dev->dev_addr[1];
1597 		bw32(bp, B44_ADDR_HI, val);
1598 
1599 		val = br32(bp, B44_DEVCTRL);
1600 		bw32(bp, B44_DEVCTRL, val | DEVCTRL_MPM | DEVCTRL_PFE);
1601 
1602  	} else {
1603  		b44_setup_pseudo_magicp(bp);
1604  	}
1605 	b44_setup_wol_pci(bp);
1606 }
1607 
1608 static int b44_close(struct net_device *dev)
1609 {
1610 	struct b44 *bp = netdev_priv(dev);
1611 
1612 	netif_stop_queue(dev);
1613 
1614 	napi_disable(&bp->napi);
1615 
1616 	del_timer_sync(&bp->timer);
1617 
1618 	spin_lock_irq(&bp->lock);
1619 
1620 	b44_halt(bp);
1621 	b44_free_rings(bp);
1622 	netif_carrier_off(dev);
1623 
1624 	spin_unlock_irq(&bp->lock);
1625 
1626 	free_irq(dev->irq, dev);
1627 
1628 	if (bp->flags & B44_FLAG_WOL_ENABLE) {
1629 		b44_init_hw(bp, B44_PARTIAL_RESET);
1630 		b44_setup_wol(bp);
1631 	}
1632 
1633 	b44_free_consistent(bp);
1634 
1635 	return 0;
1636 }
1637 
1638 static struct net_device_stats *b44_get_stats(struct net_device *dev)
1639 {
1640 	struct b44 *bp = netdev_priv(dev);
1641 	struct net_device_stats *nstat = &dev->stats;
1642 	struct b44_hw_stats *hwstat = &bp->hw_stats;
1643 
1644 	/* Convert HW stats into netdevice stats. */
1645 	nstat->rx_packets = hwstat->rx_pkts;
1646 	nstat->tx_packets = hwstat->tx_pkts;
1647 	nstat->rx_bytes   = hwstat->rx_octets;
1648 	nstat->tx_bytes   = hwstat->tx_octets;
1649 	nstat->tx_errors  = (hwstat->tx_jabber_pkts +
1650 			     hwstat->tx_oversize_pkts +
1651 			     hwstat->tx_underruns +
1652 			     hwstat->tx_excessive_cols +
1653 			     hwstat->tx_late_cols);
1654 	nstat->multicast  = hwstat->tx_multicast_pkts;
1655 	nstat->collisions = hwstat->tx_total_cols;
1656 
1657 	nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
1658 				   hwstat->rx_undersize);
1659 	nstat->rx_over_errors   = hwstat->rx_missed_pkts;
1660 	nstat->rx_frame_errors  = hwstat->rx_align_errs;
1661 	nstat->rx_crc_errors    = hwstat->rx_crc_errs;
1662 	nstat->rx_errors        = (hwstat->rx_jabber_pkts +
1663 				   hwstat->rx_oversize_pkts +
1664 				   hwstat->rx_missed_pkts +
1665 				   hwstat->rx_crc_align_errs +
1666 				   hwstat->rx_undersize +
1667 				   hwstat->rx_crc_errs +
1668 				   hwstat->rx_align_errs +
1669 				   hwstat->rx_symbol_errs);
1670 
1671 	nstat->tx_aborted_errors = hwstat->tx_underruns;
1672 #if 0
1673 	/* Carrier lost counter seems to be broken for some devices */
1674 	nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
1675 #endif
1676 
1677 	return nstat;
1678 }
1679 
1680 static int __b44_load_mcast(struct b44 *bp, struct net_device *dev)
1681 {
1682 	struct netdev_hw_addr *ha;
1683 	int i, num_ents;
1684 
1685 	num_ents = min_t(int, netdev_mc_count(dev), B44_MCAST_TABLE_SIZE);
1686 	i = 0;
1687 	netdev_for_each_mc_addr(ha, dev) {
1688 		if (i == num_ents)
1689 			break;
1690 		__b44_cam_write(bp, ha->addr, i++ + 1);
1691 	}
1692 	return i+1;
1693 }
1694 
1695 static void __b44_set_rx_mode(struct net_device *dev)
1696 {
1697 	struct b44 *bp = netdev_priv(dev);
1698 	u32 val;
1699 
1700 	val = br32(bp, B44_RXCONFIG);
1701 	val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI);
1702 	if ((dev->flags & IFF_PROMISC) || (val & RXCONFIG_CAM_ABSENT)) {
1703 		val |= RXCONFIG_PROMISC;
1704 		bw32(bp, B44_RXCONFIG, val);
1705 	} else {
1706 		unsigned char zero[6] = {0, 0, 0, 0, 0, 0};
1707 		int i = 1;
1708 
1709 		__b44_set_mac_addr(bp);
1710 
1711 		if ((dev->flags & IFF_ALLMULTI) ||
1712 		    (netdev_mc_count(dev) > B44_MCAST_TABLE_SIZE))
1713 			val |= RXCONFIG_ALLMULTI;
1714 		else
1715 			i = __b44_load_mcast(bp, dev);
1716 
1717 		for (; i < 64; i++)
1718 			__b44_cam_write(bp, zero, i);
1719 
1720 		bw32(bp, B44_RXCONFIG, val);
1721         	val = br32(bp, B44_CAM_CTRL);
1722 	        bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1723 	}
1724 }
1725 
1726 static void b44_set_rx_mode(struct net_device *dev)
1727 {
1728 	struct b44 *bp = netdev_priv(dev);
1729 
1730 	spin_lock_irq(&bp->lock);
1731 	__b44_set_rx_mode(dev);
1732 	spin_unlock_irq(&bp->lock);
1733 }
1734 
1735 static u32 b44_get_msglevel(struct net_device *dev)
1736 {
1737 	struct b44 *bp = netdev_priv(dev);
1738 	return bp->msg_enable;
1739 }
1740 
1741 static void b44_set_msglevel(struct net_device *dev, u32 value)
1742 {
1743 	struct b44 *bp = netdev_priv(dev);
1744 	bp->msg_enable = value;
1745 }
1746 
1747 static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1748 {
1749 	struct b44 *bp = netdev_priv(dev);
1750 	struct ssb_bus *bus = bp->sdev->bus;
1751 
1752 	strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
1753 	strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
1754 	switch (bus->bustype) {
1755 	case SSB_BUSTYPE_PCI:
1756 		strlcpy(info->bus_info, pci_name(bus->host_pci), sizeof(info->bus_info));
1757 		break;
1758 	case SSB_BUSTYPE_SSB:
1759 		strlcpy(info->bus_info, "SSB", sizeof(info->bus_info));
1760 		break;
1761 	case SSB_BUSTYPE_PCMCIA:
1762 	case SSB_BUSTYPE_SDIO:
1763 		WARN_ON(1); /* A device with this bus does not exist. */
1764 		break;
1765 	}
1766 }
1767 
1768 static int b44_nway_reset(struct net_device *dev)
1769 {
1770 	struct b44 *bp = netdev_priv(dev);
1771 	u32 bmcr;
1772 	int r;
1773 
1774 	spin_lock_irq(&bp->lock);
1775 	b44_readphy(bp, MII_BMCR, &bmcr);
1776 	b44_readphy(bp, MII_BMCR, &bmcr);
1777 	r = -EINVAL;
1778 	if (bmcr & BMCR_ANENABLE) {
1779 		b44_writephy(bp, MII_BMCR,
1780 			     bmcr | BMCR_ANRESTART);
1781 		r = 0;
1782 	}
1783 	spin_unlock_irq(&bp->lock);
1784 
1785 	return r;
1786 }
1787 
1788 static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1789 {
1790 	struct b44 *bp = netdev_priv(dev);
1791 
1792 	cmd->supported = (SUPPORTED_Autoneg);
1793 	cmd->supported |= (SUPPORTED_100baseT_Half |
1794 			  SUPPORTED_100baseT_Full |
1795 			  SUPPORTED_10baseT_Half |
1796 			  SUPPORTED_10baseT_Full |
1797 			  SUPPORTED_MII);
1798 
1799 	cmd->advertising = 0;
1800 	if (bp->flags & B44_FLAG_ADV_10HALF)
1801 		cmd->advertising |= ADVERTISED_10baseT_Half;
1802 	if (bp->flags & B44_FLAG_ADV_10FULL)
1803 		cmd->advertising |= ADVERTISED_10baseT_Full;
1804 	if (bp->flags & B44_FLAG_ADV_100HALF)
1805 		cmd->advertising |= ADVERTISED_100baseT_Half;
1806 	if (bp->flags & B44_FLAG_ADV_100FULL)
1807 		cmd->advertising |= ADVERTISED_100baseT_Full;
1808 	cmd->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
1809 	ethtool_cmd_speed_set(cmd, ((bp->flags & B44_FLAG_100_BASE_T) ?
1810 				    SPEED_100 : SPEED_10));
1811 	cmd->duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
1812 		DUPLEX_FULL : DUPLEX_HALF;
1813 	cmd->port = 0;
1814 	cmd->phy_address = bp->phy_addr;
1815 	cmd->transceiver = (bp->flags & B44_FLAG_INTERNAL_PHY) ?
1816 		XCVR_INTERNAL : XCVR_EXTERNAL;
1817 	cmd->autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
1818 		AUTONEG_DISABLE : AUTONEG_ENABLE;
1819 	if (cmd->autoneg == AUTONEG_ENABLE)
1820 		cmd->advertising |= ADVERTISED_Autoneg;
1821 	if (!netif_running(dev)){
1822 		ethtool_cmd_speed_set(cmd, 0);
1823 		cmd->duplex = 0xff;
1824 	}
1825 	cmd->maxtxpkt = 0;
1826 	cmd->maxrxpkt = 0;
1827 	return 0;
1828 }
1829 
1830 static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1831 {
1832 	struct b44 *bp = netdev_priv(dev);
1833 	u32 speed = ethtool_cmd_speed(cmd);
1834 
1835 	/* We do not support gigabit. */
1836 	if (cmd->autoneg == AUTONEG_ENABLE) {
1837 		if (cmd->advertising &
1838 		    (ADVERTISED_1000baseT_Half |
1839 		     ADVERTISED_1000baseT_Full))
1840 			return -EINVAL;
1841 	} else if ((speed != SPEED_100 &&
1842 		    speed != SPEED_10) ||
1843 		   (cmd->duplex != DUPLEX_HALF &&
1844 		    cmd->duplex != DUPLEX_FULL)) {
1845 			return -EINVAL;
1846 	}
1847 
1848 	spin_lock_irq(&bp->lock);
1849 
1850 	if (cmd->autoneg == AUTONEG_ENABLE) {
1851 		bp->flags &= ~(B44_FLAG_FORCE_LINK |
1852 			       B44_FLAG_100_BASE_T |
1853 			       B44_FLAG_FULL_DUPLEX |
1854 			       B44_FLAG_ADV_10HALF |
1855 			       B44_FLAG_ADV_10FULL |
1856 			       B44_FLAG_ADV_100HALF |
1857 			       B44_FLAG_ADV_100FULL);
1858 		if (cmd->advertising == 0) {
1859 			bp->flags |= (B44_FLAG_ADV_10HALF |
1860 				      B44_FLAG_ADV_10FULL |
1861 				      B44_FLAG_ADV_100HALF |
1862 				      B44_FLAG_ADV_100FULL);
1863 		} else {
1864 			if (cmd->advertising & ADVERTISED_10baseT_Half)
1865 				bp->flags |= B44_FLAG_ADV_10HALF;
1866 			if (cmd->advertising & ADVERTISED_10baseT_Full)
1867 				bp->flags |= B44_FLAG_ADV_10FULL;
1868 			if (cmd->advertising & ADVERTISED_100baseT_Half)
1869 				bp->flags |= B44_FLAG_ADV_100HALF;
1870 			if (cmd->advertising & ADVERTISED_100baseT_Full)
1871 				bp->flags |= B44_FLAG_ADV_100FULL;
1872 		}
1873 	} else {
1874 		bp->flags |= B44_FLAG_FORCE_LINK;
1875 		bp->flags &= ~(B44_FLAG_100_BASE_T | B44_FLAG_FULL_DUPLEX);
1876 		if (speed == SPEED_100)
1877 			bp->flags |= B44_FLAG_100_BASE_T;
1878 		if (cmd->duplex == DUPLEX_FULL)
1879 			bp->flags |= B44_FLAG_FULL_DUPLEX;
1880 	}
1881 
1882 	if (netif_running(dev))
1883 		b44_setup_phy(bp);
1884 
1885 	spin_unlock_irq(&bp->lock);
1886 
1887 	return 0;
1888 }
1889 
1890 static void b44_get_ringparam(struct net_device *dev,
1891 			      struct ethtool_ringparam *ering)
1892 {
1893 	struct b44 *bp = netdev_priv(dev);
1894 
1895 	ering->rx_max_pending = B44_RX_RING_SIZE - 1;
1896 	ering->rx_pending = bp->rx_pending;
1897 
1898 	/* XXX ethtool lacks a tx_max_pending, oops... */
1899 }
1900 
1901 static int b44_set_ringparam(struct net_device *dev,
1902 			     struct ethtool_ringparam *ering)
1903 {
1904 	struct b44 *bp = netdev_priv(dev);
1905 
1906 	if ((ering->rx_pending > B44_RX_RING_SIZE - 1) ||
1907 	    (ering->rx_mini_pending != 0) ||
1908 	    (ering->rx_jumbo_pending != 0) ||
1909 	    (ering->tx_pending > B44_TX_RING_SIZE - 1))
1910 		return -EINVAL;
1911 
1912 	spin_lock_irq(&bp->lock);
1913 
1914 	bp->rx_pending = ering->rx_pending;
1915 	bp->tx_pending = ering->tx_pending;
1916 
1917 	b44_halt(bp);
1918 	b44_init_rings(bp);
1919 	b44_init_hw(bp, B44_FULL_RESET);
1920 	netif_wake_queue(bp->dev);
1921 	spin_unlock_irq(&bp->lock);
1922 
1923 	b44_enable_ints(bp);
1924 
1925 	return 0;
1926 }
1927 
1928 static void b44_get_pauseparam(struct net_device *dev,
1929 				struct ethtool_pauseparam *epause)
1930 {
1931 	struct b44 *bp = netdev_priv(dev);
1932 
1933 	epause->autoneg =
1934 		(bp->flags & B44_FLAG_PAUSE_AUTO) != 0;
1935 	epause->rx_pause =
1936 		(bp->flags & B44_FLAG_RX_PAUSE) != 0;
1937 	epause->tx_pause =
1938 		(bp->flags & B44_FLAG_TX_PAUSE) != 0;
1939 }
1940 
1941 static int b44_set_pauseparam(struct net_device *dev,
1942 				struct ethtool_pauseparam *epause)
1943 {
1944 	struct b44 *bp = netdev_priv(dev);
1945 
1946 	spin_lock_irq(&bp->lock);
1947 	if (epause->autoneg)
1948 		bp->flags |= B44_FLAG_PAUSE_AUTO;
1949 	else
1950 		bp->flags &= ~B44_FLAG_PAUSE_AUTO;
1951 	if (epause->rx_pause)
1952 		bp->flags |= B44_FLAG_RX_PAUSE;
1953 	else
1954 		bp->flags &= ~B44_FLAG_RX_PAUSE;
1955 	if (epause->tx_pause)
1956 		bp->flags |= B44_FLAG_TX_PAUSE;
1957 	else
1958 		bp->flags &= ~B44_FLAG_TX_PAUSE;
1959 	if (bp->flags & B44_FLAG_PAUSE_AUTO) {
1960 		b44_halt(bp);
1961 		b44_init_rings(bp);
1962 		b44_init_hw(bp, B44_FULL_RESET);
1963 	} else {
1964 		__b44_set_flow_ctrl(bp, bp->flags);
1965 	}
1966 	spin_unlock_irq(&bp->lock);
1967 
1968 	b44_enable_ints(bp);
1969 
1970 	return 0;
1971 }
1972 
1973 static void b44_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1974 {
1975 	switch(stringset) {
1976 	case ETH_SS_STATS:
1977 		memcpy(data, *b44_gstrings, sizeof(b44_gstrings));
1978 		break;
1979 	}
1980 }
1981 
1982 static int b44_get_sset_count(struct net_device *dev, int sset)
1983 {
1984 	switch (sset) {
1985 	case ETH_SS_STATS:
1986 		return ARRAY_SIZE(b44_gstrings);
1987 	default:
1988 		return -EOPNOTSUPP;
1989 	}
1990 }
1991 
1992 static void b44_get_ethtool_stats(struct net_device *dev,
1993 				  struct ethtool_stats *stats, u64 *data)
1994 {
1995 	struct b44 *bp = netdev_priv(dev);
1996 	u32 *val = &bp->hw_stats.tx_good_octets;
1997 	u32 i;
1998 
1999 	spin_lock_irq(&bp->lock);
2000 
2001 	b44_stats_update(bp);
2002 
2003 	for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++)
2004 		*data++ = *val++;
2005 
2006 	spin_unlock_irq(&bp->lock);
2007 }
2008 
2009 static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2010 {
2011 	struct b44 *bp = netdev_priv(dev);
2012 
2013 	wol->supported = WAKE_MAGIC;
2014 	if (bp->flags & B44_FLAG_WOL_ENABLE)
2015 		wol->wolopts = WAKE_MAGIC;
2016 	else
2017 		wol->wolopts = 0;
2018 	memset(&wol->sopass, 0, sizeof(wol->sopass));
2019 }
2020 
2021 static int b44_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2022 {
2023 	struct b44 *bp = netdev_priv(dev);
2024 
2025 	spin_lock_irq(&bp->lock);
2026 	if (wol->wolopts & WAKE_MAGIC)
2027 		bp->flags |= B44_FLAG_WOL_ENABLE;
2028 	else
2029 		bp->flags &= ~B44_FLAG_WOL_ENABLE;
2030 	spin_unlock_irq(&bp->lock);
2031 
2032 	return 0;
2033 }
2034 
2035 static const struct ethtool_ops b44_ethtool_ops = {
2036 	.get_drvinfo		= b44_get_drvinfo,
2037 	.get_settings		= b44_get_settings,
2038 	.set_settings		= b44_set_settings,
2039 	.nway_reset		= b44_nway_reset,
2040 	.get_link		= ethtool_op_get_link,
2041 	.get_wol		= b44_get_wol,
2042 	.set_wol		= b44_set_wol,
2043 	.get_ringparam		= b44_get_ringparam,
2044 	.set_ringparam		= b44_set_ringparam,
2045 	.get_pauseparam		= b44_get_pauseparam,
2046 	.set_pauseparam		= b44_set_pauseparam,
2047 	.get_msglevel		= b44_get_msglevel,
2048 	.set_msglevel		= b44_set_msglevel,
2049 	.get_strings		= b44_get_strings,
2050 	.get_sset_count		= b44_get_sset_count,
2051 	.get_ethtool_stats	= b44_get_ethtool_stats,
2052 };
2053 
2054 static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2055 {
2056 	struct mii_ioctl_data *data = if_mii(ifr);
2057 	struct b44 *bp = netdev_priv(dev);
2058 	int err = -EINVAL;
2059 
2060 	if (!netif_running(dev))
2061 		goto out;
2062 
2063 	spin_lock_irq(&bp->lock);
2064 	err = generic_mii_ioctl(&bp->mii_if, data, cmd, NULL);
2065 	spin_unlock_irq(&bp->lock);
2066 out:
2067 	return err;
2068 }
2069 
2070 static int __devinit b44_get_invariants(struct b44 *bp)
2071 {
2072 	struct ssb_device *sdev = bp->sdev;
2073 	int err = 0;
2074 	u8 *addr;
2075 
2076 	bp->dma_offset = ssb_dma_translation(sdev);
2077 
2078 	if (sdev->bus->bustype == SSB_BUSTYPE_SSB &&
2079 	    instance > 1) {
2080 		addr = sdev->bus->sprom.et1mac;
2081 		bp->phy_addr = sdev->bus->sprom.et1phyaddr;
2082 	} else {
2083 		addr = sdev->bus->sprom.et0mac;
2084 		bp->phy_addr = sdev->bus->sprom.et0phyaddr;
2085 	}
2086 	/* Some ROMs have buggy PHY addresses with the high
2087 	 * bits set (sign extension?). Truncate them to a
2088 	 * valid PHY address. */
2089 	bp->phy_addr &= 0x1F;
2090 
2091 	memcpy(bp->dev->dev_addr, addr, 6);
2092 
2093 	if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){
2094 		pr_err("Invalid MAC address found in EEPROM\n");
2095 		return -EINVAL;
2096 	}
2097 
2098 	memcpy(bp->dev->perm_addr, bp->dev->dev_addr, bp->dev->addr_len);
2099 
2100 	bp->imask = IMASK_DEF;
2101 
2102 	/* XXX - really required?
2103 	   bp->flags |= B44_FLAG_BUGGY_TXPTR;
2104 	*/
2105 
2106 	if (bp->sdev->id.revision >= 7)
2107 		bp->flags |= B44_FLAG_B0_ANDLATER;
2108 
2109 	return err;
2110 }
2111 
2112 static const struct net_device_ops b44_netdev_ops = {
2113 	.ndo_open		= b44_open,
2114 	.ndo_stop		= b44_close,
2115 	.ndo_start_xmit		= b44_start_xmit,
2116 	.ndo_get_stats		= b44_get_stats,
2117 	.ndo_set_rx_mode	= b44_set_rx_mode,
2118 	.ndo_set_mac_address	= b44_set_mac_addr,
2119 	.ndo_validate_addr	= eth_validate_addr,
2120 	.ndo_do_ioctl		= b44_ioctl,
2121 	.ndo_tx_timeout		= b44_tx_timeout,
2122 	.ndo_change_mtu		= b44_change_mtu,
2123 #ifdef CONFIG_NET_POLL_CONTROLLER
2124 	.ndo_poll_controller	= b44_poll_controller,
2125 #endif
2126 };
2127 
2128 static int __devinit b44_init_one(struct ssb_device *sdev,
2129 				  const struct ssb_device_id *ent)
2130 {
2131 	struct net_device *dev;
2132 	struct b44 *bp;
2133 	int err;
2134 
2135 	instance++;
2136 
2137 	pr_info_once("%s version %s\n", DRV_DESCRIPTION, DRV_MODULE_VERSION);
2138 
2139 	dev = alloc_etherdev(sizeof(*bp));
2140 	if (!dev) {
2141 		dev_err(sdev->dev, "Etherdev alloc failed, aborting\n");
2142 		err = -ENOMEM;
2143 		goto out;
2144 	}
2145 
2146 	SET_NETDEV_DEV(dev, sdev->dev);
2147 
2148 	/* No interesting netdevice features in this card... */
2149 	dev->features |= 0;
2150 
2151 	bp = netdev_priv(dev);
2152 	bp->sdev = sdev;
2153 	bp->dev = dev;
2154 	bp->force_copybreak = 0;
2155 
2156 	bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
2157 
2158 	spin_lock_init(&bp->lock);
2159 
2160 	bp->rx_pending = B44_DEF_RX_RING_PENDING;
2161 	bp->tx_pending = B44_DEF_TX_RING_PENDING;
2162 
2163 	dev->netdev_ops = &b44_netdev_ops;
2164 	netif_napi_add(dev, &bp->napi, b44_poll, 64);
2165 	dev->watchdog_timeo = B44_TX_TIMEOUT;
2166 	dev->irq = sdev->irq;
2167 	SET_ETHTOOL_OPS(dev, &b44_ethtool_ops);
2168 
2169 	err = ssb_bus_powerup(sdev->bus, 0);
2170 	if (err) {
2171 		dev_err(sdev->dev,
2172 			"Failed to powerup the bus\n");
2173 		goto err_out_free_dev;
2174 	}
2175 
2176 	if (dma_set_mask(sdev->dma_dev, DMA_BIT_MASK(30)) ||
2177 	    dma_set_coherent_mask(sdev->dma_dev, DMA_BIT_MASK(30))) {
2178 		dev_err(sdev->dev,
2179 			"Required 30BIT DMA mask unsupported by the system\n");
2180 		goto err_out_powerdown;
2181 	}
2182 
2183 	err = b44_get_invariants(bp);
2184 	if (err) {
2185 		dev_err(sdev->dev,
2186 			"Problem fetching invariants of chip, aborting\n");
2187 		goto err_out_powerdown;
2188 	}
2189 
2190 	bp->mii_if.dev = dev;
2191 	bp->mii_if.mdio_read = b44_mii_read;
2192 	bp->mii_if.mdio_write = b44_mii_write;
2193 	bp->mii_if.phy_id = bp->phy_addr;
2194 	bp->mii_if.phy_id_mask = 0x1f;
2195 	bp->mii_if.reg_num_mask = 0x1f;
2196 
2197 	/* By default, advertise all speed/duplex settings. */
2198 	bp->flags |= (B44_FLAG_ADV_10HALF | B44_FLAG_ADV_10FULL |
2199 		      B44_FLAG_ADV_100HALF | B44_FLAG_ADV_100FULL);
2200 
2201 	/* By default, auto-negotiate PAUSE. */
2202 	bp->flags |= B44_FLAG_PAUSE_AUTO;
2203 
2204 	err = register_netdev(dev);
2205 	if (err) {
2206 		dev_err(sdev->dev, "Cannot register net device, aborting\n");
2207 		goto err_out_powerdown;
2208 	}
2209 
2210 	netif_carrier_off(dev);
2211 
2212 	ssb_set_drvdata(sdev, dev);
2213 
2214 	/* Chip reset provides power to the b44 MAC & PCI cores, which
2215 	 * is necessary for MAC register access.
2216 	 */
2217 	b44_chip_reset(bp, B44_CHIP_RESET_FULL);
2218 
2219 	/* do a phy reset to test if there is an active phy */
2220 	if (b44_phy_reset(bp) < 0)
2221 		bp->phy_addr = B44_PHY_ADDR_NO_PHY;
2222 
2223 	netdev_info(dev, "%s %pM\n", DRV_DESCRIPTION, dev->dev_addr);
2224 
2225 	return 0;
2226 
2227 err_out_powerdown:
2228 	ssb_bus_may_powerdown(sdev->bus);
2229 
2230 err_out_free_dev:
2231 	free_netdev(dev);
2232 
2233 out:
2234 	return err;
2235 }
2236 
2237 static void __devexit b44_remove_one(struct ssb_device *sdev)
2238 {
2239 	struct net_device *dev = ssb_get_drvdata(sdev);
2240 
2241 	unregister_netdev(dev);
2242 	ssb_device_disable(sdev, 0);
2243 	ssb_bus_may_powerdown(sdev->bus);
2244 	free_netdev(dev);
2245 	ssb_pcihost_set_power_state(sdev, PCI_D3hot);
2246 	ssb_set_drvdata(sdev, NULL);
2247 }
2248 
2249 static int b44_suspend(struct ssb_device *sdev, pm_message_t state)
2250 {
2251 	struct net_device *dev = ssb_get_drvdata(sdev);
2252 	struct b44 *bp = netdev_priv(dev);
2253 
2254 	if (!netif_running(dev))
2255 		return 0;
2256 
2257 	del_timer_sync(&bp->timer);
2258 
2259 	spin_lock_irq(&bp->lock);
2260 
2261 	b44_halt(bp);
2262 	netif_carrier_off(bp->dev);
2263 	netif_device_detach(bp->dev);
2264 	b44_free_rings(bp);
2265 
2266 	spin_unlock_irq(&bp->lock);
2267 
2268 	free_irq(dev->irq, dev);
2269 	if (bp->flags & B44_FLAG_WOL_ENABLE) {
2270 		b44_init_hw(bp, B44_PARTIAL_RESET);
2271 		b44_setup_wol(bp);
2272 	}
2273 
2274 	ssb_pcihost_set_power_state(sdev, PCI_D3hot);
2275 	return 0;
2276 }
2277 
2278 static int b44_resume(struct ssb_device *sdev)
2279 {
2280 	struct net_device *dev = ssb_get_drvdata(sdev);
2281 	struct b44 *bp = netdev_priv(dev);
2282 	int rc = 0;
2283 
2284 	rc = ssb_bus_powerup(sdev->bus, 0);
2285 	if (rc) {
2286 		dev_err(sdev->dev,
2287 			"Failed to powerup the bus\n");
2288 		return rc;
2289 	}
2290 
2291 	if (!netif_running(dev))
2292 		return 0;
2293 
2294 	spin_lock_irq(&bp->lock);
2295 	b44_init_rings(bp);
2296 	b44_init_hw(bp, B44_FULL_RESET);
2297 	spin_unlock_irq(&bp->lock);
2298 
2299 	/*
2300 	 * As a shared interrupt, the handler can be called immediately. To be
2301 	 * able to check the interrupt status the hardware must already be
2302 	 * powered back on (b44_init_hw).
2303 	 */
2304 	rc = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
2305 	if (rc) {
2306 		netdev_err(dev, "request_irq failed\n");
2307 		spin_lock_irq(&bp->lock);
2308 		b44_halt(bp);
2309 		b44_free_rings(bp);
2310 		spin_unlock_irq(&bp->lock);
2311 		return rc;
2312 	}
2313 
2314 	netif_device_attach(bp->dev);
2315 
2316 	b44_enable_ints(bp);
2317 	netif_wake_queue(dev);
2318 
2319 	mod_timer(&bp->timer, jiffies + 1);
2320 
2321 	return 0;
2322 }
2323 
2324 static struct ssb_driver b44_ssb_driver = {
2325 	.name		= DRV_MODULE_NAME,
2326 	.id_table	= b44_ssb_tbl,
2327 	.probe		= b44_init_one,
2328 	.remove		= __devexit_p(b44_remove_one),
2329 	.suspend	= b44_suspend,
2330 	.resume		= b44_resume,
2331 };
2332 
2333 static inline int __init b44_pci_init(void)
2334 {
2335 	int err = 0;
2336 #ifdef CONFIG_B44_PCI
2337 	err = ssb_pcihost_register(&b44_pci_driver);
2338 #endif
2339 	return err;
2340 }
2341 
2342 static inline void __exit b44_pci_exit(void)
2343 {
2344 #ifdef CONFIG_B44_PCI
2345 	ssb_pcihost_unregister(&b44_pci_driver);
2346 #endif
2347 }
2348 
2349 static int __init b44_init(void)
2350 {
2351 	unsigned int dma_desc_align_size = dma_get_cache_alignment();
2352 	int err;
2353 
2354 	/* Setup paramaters for syncing RX/TX DMA descriptors */
2355 	dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
2356 
2357 	err = b44_pci_init();
2358 	if (err)
2359 		return err;
2360 	err = ssb_driver_register(&b44_ssb_driver);
2361 	if (err)
2362 		b44_pci_exit();
2363 	return err;
2364 }
2365 
2366 static void __exit b44_cleanup(void)
2367 {
2368 	ssb_driver_unregister(&b44_ssb_driver);
2369 	b44_pci_exit();
2370 }
2371 
2372 module_init(b44_init);
2373 module_exit(b44_cleanup);
2374 
2375