xref: /openbmc/linux/drivers/net/ethernet/broadcom/b44.c (revision c1e01cdbe0312d95b8c1542abd67fe786b534f57)
1adfc5217SJeff Kirsher /* b44.c: Broadcom 44xx/47xx Fast Ethernet device driver.
2adfc5217SJeff Kirsher  *
3adfc5217SJeff Kirsher  * Copyright (C) 2002 David S. Miller (davem@redhat.com)
4adfc5217SJeff Kirsher  * Copyright (C) 2004 Pekka Pietikainen (pp@ee.oulu.fi)
5adfc5217SJeff Kirsher  * Copyright (C) 2004 Florian Schirmer (jolt@tuxbox.org)
6adfc5217SJeff Kirsher  * Copyright (C) 2006 Felix Fietkau (nbd@openwrt.org)
7adfc5217SJeff Kirsher  * Copyright (C) 2006 Broadcom Corporation.
8adfc5217SJeff Kirsher  * Copyright (C) 2007 Michael Buesch <m@bues.ch>
986f4ea63SHauke Mehrtens  * Copyright (C) 2013 Hauke Mehrtens <hauke@hauke-m.de>
10adfc5217SJeff Kirsher  *
11adfc5217SJeff Kirsher  * Distribute under GPL.
12adfc5217SJeff Kirsher  */
13adfc5217SJeff Kirsher 
14adfc5217SJeff Kirsher #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15adfc5217SJeff Kirsher 
16adfc5217SJeff Kirsher #include <linux/kernel.h>
17adfc5217SJeff Kirsher #include <linux/module.h>
18adfc5217SJeff Kirsher #include <linux/moduleparam.h>
19adfc5217SJeff Kirsher #include <linux/types.h>
20adfc5217SJeff Kirsher #include <linux/netdevice.h>
21adfc5217SJeff Kirsher #include <linux/ethtool.h>
22adfc5217SJeff Kirsher #include <linux/mii.h>
23adfc5217SJeff Kirsher #include <linux/if_ether.h>
24adfc5217SJeff Kirsher #include <linux/if_vlan.h>
25adfc5217SJeff Kirsher #include <linux/etherdevice.h>
26adfc5217SJeff Kirsher #include <linux/pci.h>
27adfc5217SJeff Kirsher #include <linux/delay.h>
28adfc5217SJeff Kirsher #include <linux/init.h>
29adfc5217SJeff Kirsher #include <linux/interrupt.h>
30adfc5217SJeff Kirsher #include <linux/dma-mapping.h>
31adfc5217SJeff Kirsher #include <linux/ssb/ssb.h>
32adfc5217SJeff Kirsher #include <linux/slab.h>
3386f4ea63SHauke Mehrtens #include <linux/phy.h>
34adfc5217SJeff Kirsher 
357c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
36adfc5217SJeff Kirsher #include <asm/io.h>
37adfc5217SJeff Kirsher #include <asm/irq.h>
38adfc5217SJeff Kirsher 
39adfc5217SJeff Kirsher 
40adfc5217SJeff Kirsher #include "b44.h"
41adfc5217SJeff Kirsher 
42adfc5217SJeff Kirsher #define DRV_MODULE_NAME		"b44"
43adfc5217SJeff Kirsher #define DRV_DESCRIPTION		"Broadcom 44xx/47xx 10/100 PCI ethernet driver"
44adfc5217SJeff Kirsher 
45adfc5217SJeff Kirsher #define B44_DEF_MSG_ENABLE	  \
46adfc5217SJeff Kirsher 	(NETIF_MSG_DRV		| \
47adfc5217SJeff Kirsher 	 NETIF_MSG_PROBE	| \
48adfc5217SJeff Kirsher 	 NETIF_MSG_LINK		| \
49adfc5217SJeff Kirsher 	 NETIF_MSG_TIMER	| \
50adfc5217SJeff Kirsher 	 NETIF_MSG_IFDOWN	| \
51adfc5217SJeff Kirsher 	 NETIF_MSG_IFUP		| \
52adfc5217SJeff Kirsher 	 NETIF_MSG_RX_ERR	| \
53adfc5217SJeff Kirsher 	 NETIF_MSG_TX_ERR)
54adfc5217SJeff Kirsher 
55adfc5217SJeff Kirsher /* length of time before we decide the hardware is borked,
56adfc5217SJeff Kirsher  * and dev->tx_timeout() should be called to fix the problem
57adfc5217SJeff Kirsher  */
58adfc5217SJeff Kirsher #define B44_TX_TIMEOUT			(5 * HZ)
59adfc5217SJeff Kirsher 
60adfc5217SJeff Kirsher /* hardware minimum and maximum for a single frame's data payload */
61e1c6dccaSJarod Wilson #define B44_MIN_MTU			ETH_ZLEN
62e1c6dccaSJarod Wilson #define B44_MAX_MTU			ETH_DATA_LEN
63adfc5217SJeff Kirsher 
64adfc5217SJeff Kirsher #define B44_RX_RING_SIZE		512
65adfc5217SJeff Kirsher #define B44_DEF_RX_RING_PENDING		200
66adfc5217SJeff Kirsher #define B44_RX_RING_BYTES	(sizeof(struct dma_desc) * \
67adfc5217SJeff Kirsher 				 B44_RX_RING_SIZE)
68adfc5217SJeff Kirsher #define B44_TX_RING_SIZE		512
69adfc5217SJeff Kirsher #define B44_DEF_TX_RING_PENDING		(B44_TX_RING_SIZE - 1)
70adfc5217SJeff Kirsher #define B44_TX_RING_BYTES	(sizeof(struct dma_desc) * \
71adfc5217SJeff Kirsher 				 B44_TX_RING_SIZE)
72adfc5217SJeff Kirsher 
73adfc5217SJeff Kirsher #define TX_RING_GAP(BP)	\
74adfc5217SJeff Kirsher 	(B44_TX_RING_SIZE - (BP)->tx_pending)
75adfc5217SJeff Kirsher #define TX_BUFFS_AVAIL(BP)						\
76adfc5217SJeff Kirsher 	(((BP)->tx_cons <= (BP)->tx_prod) ?				\
77adfc5217SJeff Kirsher 	  (BP)->tx_cons + (BP)->tx_pending - (BP)->tx_prod :		\
78adfc5217SJeff Kirsher 	  (BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP))
79adfc5217SJeff Kirsher #define NEXT_TX(N)		(((N) + 1) & (B44_TX_RING_SIZE - 1))
80adfc5217SJeff Kirsher 
81adfc5217SJeff Kirsher #define RX_PKT_OFFSET		(RX_HEADER_LEN + 2)
82adfc5217SJeff Kirsher #define RX_PKT_BUF_SZ		(1536 + RX_PKT_OFFSET)
83adfc5217SJeff Kirsher 
84adfc5217SJeff Kirsher /* minimum number of free TX descriptors required to wake up TX process */
85adfc5217SJeff Kirsher #define B44_TX_WAKEUP_THRESH		(B44_TX_RING_SIZE / 4)
86adfc5217SJeff Kirsher 
87adfc5217SJeff Kirsher /* b44 internal pattern match filter info */
88adfc5217SJeff Kirsher #define B44_PATTERN_BASE	0x400
89adfc5217SJeff Kirsher #define B44_PATTERN_SIZE	0x80
90adfc5217SJeff Kirsher #define B44_PMASK_BASE		0x600
91adfc5217SJeff Kirsher #define B44_PMASK_SIZE		0x10
92adfc5217SJeff Kirsher #define B44_MAX_PATTERNS	16
93adfc5217SJeff Kirsher #define B44_ETHIPV6UDP_HLEN	62
94adfc5217SJeff Kirsher #define B44_ETHIPV4UDP_HLEN	42
95adfc5217SJeff Kirsher 
96adfc5217SJeff Kirsher MODULE_AUTHOR("Felix Fietkau, Florian Schirmer, Pekka Pietikainen, David S. Miller");
97adfc5217SJeff Kirsher MODULE_DESCRIPTION(DRV_DESCRIPTION);
98adfc5217SJeff Kirsher MODULE_LICENSE("GPL");
99adfc5217SJeff Kirsher 
100adfc5217SJeff Kirsher static int b44_debug = -1;	/* -1 == use B44_DEF_MSG_ENABLE as value */
101adfc5217SJeff Kirsher module_param(b44_debug, int, 0);
102adfc5217SJeff Kirsher MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value");
103adfc5217SJeff Kirsher 
104adfc5217SJeff Kirsher 
105adfc5217SJeff Kirsher #ifdef CONFIG_B44_PCI
1069baa3c34SBenoit Taine static const struct pci_device_id b44_pci_tbl[] = {
107adfc5217SJeff Kirsher 	{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401) },
108adfc5217SJeff Kirsher 	{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0) },
109adfc5217SJeff Kirsher 	{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1) },
110adfc5217SJeff Kirsher 	{ 0 } /* terminate list with empty entry */
111adfc5217SJeff Kirsher };
112adfc5217SJeff Kirsher MODULE_DEVICE_TABLE(pci, b44_pci_tbl);
113adfc5217SJeff Kirsher 
114adfc5217SJeff Kirsher static struct pci_driver b44_pci_driver = {
115adfc5217SJeff Kirsher 	.name		= DRV_MODULE_NAME,
116adfc5217SJeff Kirsher 	.id_table	= b44_pci_tbl,
117adfc5217SJeff Kirsher };
118adfc5217SJeff Kirsher #endif /* CONFIG_B44_PCI */
119adfc5217SJeff Kirsher 
120adfc5217SJeff Kirsher static const struct ssb_device_id b44_ssb_tbl[] = {
121adfc5217SJeff Kirsher 	SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_ETHERNET, SSB_ANY_REV),
122673e2baaSJoe Perches 	{},
123adfc5217SJeff Kirsher };
124adfc5217SJeff Kirsher MODULE_DEVICE_TABLE(ssb, b44_ssb_tbl);
125adfc5217SJeff Kirsher 
126adfc5217SJeff Kirsher static void b44_halt(struct b44 *);
127adfc5217SJeff Kirsher static void b44_init_rings(struct b44 *);
128adfc5217SJeff Kirsher 
129adfc5217SJeff Kirsher #define B44_FULL_RESET		1
130adfc5217SJeff Kirsher #define B44_FULL_RESET_SKIP_PHY	2
131adfc5217SJeff Kirsher #define B44_PARTIAL_RESET	3
132adfc5217SJeff Kirsher #define B44_CHIP_RESET_FULL	4
133adfc5217SJeff Kirsher #define B44_CHIP_RESET_PARTIAL	5
134adfc5217SJeff Kirsher 
135adfc5217SJeff Kirsher static void b44_init_hw(struct b44 *, int);
136adfc5217SJeff Kirsher 
137adfc5217SJeff Kirsher static int dma_desc_sync_size;
138adfc5217SJeff Kirsher static int instance;
139adfc5217SJeff Kirsher 
140adfc5217SJeff Kirsher static const char b44_gstrings[][ETH_GSTRING_LEN] = {
141adfc5217SJeff Kirsher #define _B44(x...)	# x,
142adfc5217SJeff Kirsher B44_STAT_REG_DECLARE
143adfc5217SJeff Kirsher #undef _B44
144adfc5217SJeff Kirsher };
145adfc5217SJeff Kirsher 
b44_sync_dma_desc_for_device(struct ssb_device * sdev,dma_addr_t dma_base,unsigned long offset,enum dma_data_direction dir)146adfc5217SJeff Kirsher static inline void b44_sync_dma_desc_for_device(struct ssb_device *sdev,
147adfc5217SJeff Kirsher 						dma_addr_t dma_base,
148adfc5217SJeff Kirsher 						unsigned long offset,
149adfc5217SJeff Kirsher 						enum dma_data_direction dir)
150adfc5217SJeff Kirsher {
151adfc5217SJeff Kirsher 	dma_sync_single_for_device(sdev->dma_dev, dma_base + offset,
152adfc5217SJeff Kirsher 				   dma_desc_sync_size, dir);
153adfc5217SJeff Kirsher }
154adfc5217SJeff Kirsher 
b44_sync_dma_desc_for_cpu(struct ssb_device * sdev,dma_addr_t dma_base,unsigned long offset,enum dma_data_direction dir)155adfc5217SJeff Kirsher static inline void b44_sync_dma_desc_for_cpu(struct ssb_device *sdev,
156adfc5217SJeff Kirsher 					     dma_addr_t dma_base,
157adfc5217SJeff Kirsher 					     unsigned long offset,
158adfc5217SJeff Kirsher 					     enum dma_data_direction dir)
159adfc5217SJeff Kirsher {
160adfc5217SJeff Kirsher 	dma_sync_single_for_cpu(sdev->dma_dev, dma_base + offset,
161adfc5217SJeff Kirsher 				dma_desc_sync_size, dir);
162adfc5217SJeff Kirsher }
163adfc5217SJeff Kirsher 
br32(const struct b44 * bp,unsigned long reg)164adfc5217SJeff Kirsher static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
165adfc5217SJeff Kirsher {
166adfc5217SJeff Kirsher 	return ssb_read32(bp->sdev, reg);
167adfc5217SJeff Kirsher }
168adfc5217SJeff Kirsher 
bw32(const struct b44 * bp,unsigned long reg,unsigned long val)169adfc5217SJeff Kirsher static inline void bw32(const struct b44 *bp,
170adfc5217SJeff Kirsher 			unsigned long reg, unsigned long val)
171adfc5217SJeff Kirsher {
172adfc5217SJeff Kirsher 	ssb_write32(bp->sdev, reg, val);
173adfc5217SJeff Kirsher }
174adfc5217SJeff Kirsher 
b44_wait_bit(struct b44 * bp,unsigned long reg,u32 bit,unsigned long timeout,const int clear)175adfc5217SJeff Kirsher static int b44_wait_bit(struct b44 *bp, unsigned long reg,
176adfc5217SJeff Kirsher 			u32 bit, unsigned long timeout, const int clear)
177adfc5217SJeff Kirsher {
178adfc5217SJeff Kirsher 	unsigned long i;
179adfc5217SJeff Kirsher 
180adfc5217SJeff Kirsher 	for (i = 0; i < timeout; i++) {
181adfc5217SJeff Kirsher 		u32 val = br32(bp, reg);
182adfc5217SJeff Kirsher 
183adfc5217SJeff Kirsher 		if (clear && !(val & bit))
184adfc5217SJeff Kirsher 			break;
185adfc5217SJeff Kirsher 		if (!clear && (val & bit))
186adfc5217SJeff Kirsher 			break;
187adfc5217SJeff Kirsher 		udelay(10);
188adfc5217SJeff Kirsher 	}
189adfc5217SJeff Kirsher 	if (i == timeout) {
190adfc5217SJeff Kirsher 		if (net_ratelimit())
191adfc5217SJeff Kirsher 			netdev_err(bp->dev, "BUG!  Timeout waiting for bit %08x of register %lx to %s\n",
192adfc5217SJeff Kirsher 				   bit, reg, clear ? "clear" : "set");
193adfc5217SJeff Kirsher 
194adfc5217SJeff Kirsher 		return -ENODEV;
195adfc5217SJeff Kirsher 	}
196adfc5217SJeff Kirsher 	return 0;
197adfc5217SJeff Kirsher }
198adfc5217SJeff Kirsher 
__b44_cam_write(struct b44 * bp,const unsigned char * data,int index)19976660757SJakub Kicinski static inline void __b44_cam_write(struct b44 *bp,
20076660757SJakub Kicinski 				   const unsigned char *data, int index)
201adfc5217SJeff Kirsher {
202adfc5217SJeff Kirsher 	u32 val;
203adfc5217SJeff Kirsher 
204adfc5217SJeff Kirsher 	val  = ((u32) data[2]) << 24;
205adfc5217SJeff Kirsher 	val |= ((u32) data[3]) << 16;
206adfc5217SJeff Kirsher 	val |= ((u32) data[4]) <<  8;
207adfc5217SJeff Kirsher 	val |= ((u32) data[5]) <<  0;
208adfc5217SJeff Kirsher 	bw32(bp, B44_CAM_DATA_LO, val);
209adfc5217SJeff Kirsher 	val = (CAM_DATA_HI_VALID |
210adfc5217SJeff Kirsher 	       (((u32) data[0]) << 8) |
211adfc5217SJeff Kirsher 	       (((u32) data[1]) << 0));
212adfc5217SJeff Kirsher 	bw32(bp, B44_CAM_DATA_HI, val);
213adfc5217SJeff Kirsher 	bw32(bp, B44_CAM_CTRL, (CAM_CTRL_WRITE |
214adfc5217SJeff Kirsher 			    (index << CAM_CTRL_INDEX_SHIFT)));
215adfc5217SJeff Kirsher 	b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
216adfc5217SJeff Kirsher }
217adfc5217SJeff Kirsher 
__b44_disable_ints(struct b44 * bp)218adfc5217SJeff Kirsher static inline void __b44_disable_ints(struct b44 *bp)
219adfc5217SJeff Kirsher {
220adfc5217SJeff Kirsher 	bw32(bp, B44_IMASK, 0);
221adfc5217SJeff Kirsher }
222adfc5217SJeff Kirsher 
b44_disable_ints(struct b44 * bp)223adfc5217SJeff Kirsher static void b44_disable_ints(struct b44 *bp)
224adfc5217SJeff Kirsher {
225adfc5217SJeff Kirsher 	__b44_disable_ints(bp);
226adfc5217SJeff Kirsher 
227adfc5217SJeff Kirsher 	/* Flush posted writes. */
228adfc5217SJeff Kirsher 	br32(bp, B44_IMASK);
229adfc5217SJeff Kirsher }
230adfc5217SJeff Kirsher 
b44_enable_ints(struct b44 * bp)231adfc5217SJeff Kirsher static void b44_enable_ints(struct b44 *bp)
232adfc5217SJeff Kirsher {
233adfc5217SJeff Kirsher 	bw32(bp, B44_IMASK, bp->imask);
234adfc5217SJeff Kirsher }
235adfc5217SJeff Kirsher 
__b44_readphy(struct b44 * bp,int phy_addr,int reg,u32 * val)236adfc5217SJeff Kirsher static int __b44_readphy(struct b44 *bp, int phy_addr, int reg, u32 *val)
237adfc5217SJeff Kirsher {
238adfc5217SJeff Kirsher 	int err;
239adfc5217SJeff Kirsher 
240adfc5217SJeff Kirsher 	bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
241adfc5217SJeff Kirsher 	bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
242adfc5217SJeff Kirsher 			     (MDIO_OP_READ << MDIO_DATA_OP_SHIFT) |
243adfc5217SJeff Kirsher 			     (phy_addr << MDIO_DATA_PMD_SHIFT) |
244adfc5217SJeff Kirsher 			     (reg << MDIO_DATA_RA_SHIFT) |
245adfc5217SJeff Kirsher 			     (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT)));
246adfc5217SJeff Kirsher 	err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
247adfc5217SJeff Kirsher 	*val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA;
248adfc5217SJeff Kirsher 
249adfc5217SJeff Kirsher 	return err;
250adfc5217SJeff Kirsher }
251adfc5217SJeff Kirsher 
__b44_writephy(struct b44 * bp,int phy_addr,int reg,u32 val)252adfc5217SJeff Kirsher static int __b44_writephy(struct b44 *bp, int phy_addr, int reg, u32 val)
253adfc5217SJeff Kirsher {
254adfc5217SJeff Kirsher 	bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
255adfc5217SJeff Kirsher 	bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
256adfc5217SJeff Kirsher 			     (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT) |
257adfc5217SJeff Kirsher 			     (phy_addr << MDIO_DATA_PMD_SHIFT) |
258adfc5217SJeff Kirsher 			     (reg << MDIO_DATA_RA_SHIFT) |
259adfc5217SJeff Kirsher 			     (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT) |
260adfc5217SJeff Kirsher 			     (val & MDIO_DATA_DATA)));
261adfc5217SJeff Kirsher 	return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
262adfc5217SJeff Kirsher }
263adfc5217SJeff Kirsher 
b44_readphy(struct b44 * bp,int reg,u32 * val)264adfc5217SJeff Kirsher static inline int b44_readphy(struct b44 *bp, int reg, u32 *val)
265adfc5217SJeff Kirsher {
266d6194195SHauke Mehrtens 	if (bp->flags & B44_FLAG_EXTERNAL_PHY)
267adfc5217SJeff Kirsher 		return 0;
268adfc5217SJeff Kirsher 
269adfc5217SJeff Kirsher 	return __b44_readphy(bp, bp->phy_addr, reg, val);
270adfc5217SJeff Kirsher }
271adfc5217SJeff Kirsher 
b44_writephy(struct b44 * bp,int reg,u32 val)272adfc5217SJeff Kirsher static inline int b44_writephy(struct b44 *bp, int reg, u32 val)
273adfc5217SJeff Kirsher {
274d6194195SHauke Mehrtens 	if (bp->flags & B44_FLAG_EXTERNAL_PHY)
275adfc5217SJeff Kirsher 		return 0;
276adfc5217SJeff Kirsher 
277adfc5217SJeff Kirsher 	return __b44_writephy(bp, bp->phy_addr, reg, val);
278adfc5217SJeff Kirsher }
279adfc5217SJeff Kirsher 
280adfc5217SJeff Kirsher /* miilib interface */
b44_mdio_read_mii(struct net_device * dev,int phy_id,int location)281348baa6cSHauke Mehrtens static int b44_mdio_read_mii(struct net_device *dev, int phy_id, int location)
282adfc5217SJeff Kirsher {
283adfc5217SJeff Kirsher 	u32 val;
284adfc5217SJeff Kirsher 	struct b44 *bp = netdev_priv(dev);
285adfc5217SJeff Kirsher 	int rc = __b44_readphy(bp, phy_id, location, &val);
286adfc5217SJeff Kirsher 	if (rc)
287adfc5217SJeff Kirsher 		return 0xffffffff;
288adfc5217SJeff Kirsher 	return val;
289adfc5217SJeff Kirsher }
290adfc5217SJeff Kirsher 
b44_mdio_write_mii(struct net_device * dev,int phy_id,int location,int val)291348baa6cSHauke Mehrtens static void b44_mdio_write_mii(struct net_device *dev, int phy_id, int location,
292adfc5217SJeff Kirsher 			       int val)
293adfc5217SJeff Kirsher {
294adfc5217SJeff Kirsher 	struct b44 *bp = netdev_priv(dev);
295adfc5217SJeff Kirsher 	__b44_writephy(bp, phy_id, location, val);
296adfc5217SJeff Kirsher }
297adfc5217SJeff Kirsher 
b44_mdio_read_phylib(struct mii_bus * bus,int phy_id,int location)29886f4ea63SHauke Mehrtens static int b44_mdio_read_phylib(struct mii_bus *bus, int phy_id, int location)
29986f4ea63SHauke Mehrtens {
30086f4ea63SHauke Mehrtens 	u32 val;
30186f4ea63SHauke Mehrtens 	struct b44 *bp = bus->priv;
30286f4ea63SHauke Mehrtens 	int rc = __b44_readphy(bp, phy_id, location, &val);
30386f4ea63SHauke Mehrtens 	if (rc)
30486f4ea63SHauke Mehrtens 		return 0xffffffff;
30586f4ea63SHauke Mehrtens 	return val;
30686f4ea63SHauke Mehrtens }
30786f4ea63SHauke Mehrtens 
b44_mdio_write_phylib(struct mii_bus * bus,int phy_id,int location,u16 val)30886f4ea63SHauke Mehrtens static int b44_mdio_write_phylib(struct mii_bus *bus, int phy_id, int location,
30986f4ea63SHauke Mehrtens 				 u16 val)
31086f4ea63SHauke Mehrtens {
31186f4ea63SHauke Mehrtens 	struct b44 *bp = bus->priv;
31286f4ea63SHauke Mehrtens 	return __b44_writephy(bp, phy_id, location, val);
31386f4ea63SHauke Mehrtens }
31486f4ea63SHauke Mehrtens 
b44_phy_reset(struct b44 * bp)315adfc5217SJeff Kirsher static int b44_phy_reset(struct b44 *bp)
316adfc5217SJeff Kirsher {
317adfc5217SJeff Kirsher 	u32 val;
318adfc5217SJeff Kirsher 	int err;
319adfc5217SJeff Kirsher 
320d6194195SHauke Mehrtens 	if (bp->flags & B44_FLAG_EXTERNAL_PHY)
321adfc5217SJeff Kirsher 		return 0;
322adfc5217SJeff Kirsher 	err = b44_writephy(bp, MII_BMCR, BMCR_RESET);
323adfc5217SJeff Kirsher 	if (err)
324adfc5217SJeff Kirsher 		return err;
325adfc5217SJeff Kirsher 	udelay(100);
326adfc5217SJeff Kirsher 	err = b44_readphy(bp, MII_BMCR, &val);
327adfc5217SJeff Kirsher 	if (!err) {
328adfc5217SJeff Kirsher 		if (val & BMCR_RESET) {
329adfc5217SJeff Kirsher 			netdev_err(bp->dev, "PHY Reset would not complete\n");
330adfc5217SJeff Kirsher 			err = -ENODEV;
331adfc5217SJeff Kirsher 		}
332adfc5217SJeff Kirsher 	}
333adfc5217SJeff Kirsher 
334adfc5217SJeff Kirsher 	return err;
335adfc5217SJeff Kirsher }
336adfc5217SJeff Kirsher 
__b44_set_flow_ctrl(struct b44 * bp,u32 pause_flags)337adfc5217SJeff Kirsher static void __b44_set_flow_ctrl(struct b44 *bp, u32 pause_flags)
338adfc5217SJeff Kirsher {
339adfc5217SJeff Kirsher 	u32 val;
340adfc5217SJeff Kirsher 
341adfc5217SJeff Kirsher 	bp->flags &= ~(B44_FLAG_TX_PAUSE | B44_FLAG_RX_PAUSE);
342adfc5217SJeff Kirsher 	bp->flags |= pause_flags;
343adfc5217SJeff Kirsher 
344adfc5217SJeff Kirsher 	val = br32(bp, B44_RXCONFIG);
345adfc5217SJeff Kirsher 	if (pause_flags & B44_FLAG_RX_PAUSE)
346adfc5217SJeff Kirsher 		val |= RXCONFIG_FLOW;
347adfc5217SJeff Kirsher 	else
348adfc5217SJeff Kirsher 		val &= ~RXCONFIG_FLOW;
349adfc5217SJeff Kirsher 	bw32(bp, B44_RXCONFIG, val);
350adfc5217SJeff Kirsher 
351adfc5217SJeff Kirsher 	val = br32(bp, B44_MAC_FLOW);
352adfc5217SJeff Kirsher 	if (pause_flags & B44_FLAG_TX_PAUSE)
353adfc5217SJeff Kirsher 		val |= (MAC_FLOW_PAUSE_ENAB |
354adfc5217SJeff Kirsher 			(0xc0 & MAC_FLOW_RX_HI_WATER));
355adfc5217SJeff Kirsher 	else
356adfc5217SJeff Kirsher 		val &= ~MAC_FLOW_PAUSE_ENAB;
357adfc5217SJeff Kirsher 	bw32(bp, B44_MAC_FLOW, val);
358adfc5217SJeff Kirsher }
359adfc5217SJeff Kirsher 
b44_set_flow_ctrl(struct b44 * bp,u32 local,u32 remote)360adfc5217SJeff Kirsher static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
361adfc5217SJeff Kirsher {
362adfc5217SJeff Kirsher 	u32 pause_enab = 0;
363adfc5217SJeff Kirsher 
364adfc5217SJeff Kirsher 	/* The driver supports only rx pause by default because
365adfc5217SJeff Kirsher 	   the b44 mac tx pause mechanism generates excessive
366adfc5217SJeff Kirsher 	   pause frames.
367adfc5217SJeff Kirsher 	   Use ethtool to turn on b44 tx pause if necessary.
368adfc5217SJeff Kirsher 	 */
369adfc5217SJeff Kirsher 	if ((local & ADVERTISE_PAUSE_CAP) &&
370adfc5217SJeff Kirsher 	    (local & ADVERTISE_PAUSE_ASYM)){
371adfc5217SJeff Kirsher 		if ((remote & LPA_PAUSE_ASYM) &&
372adfc5217SJeff Kirsher 		    !(remote & LPA_PAUSE_CAP))
373adfc5217SJeff Kirsher 			pause_enab |= B44_FLAG_RX_PAUSE;
374adfc5217SJeff Kirsher 	}
375adfc5217SJeff Kirsher 
376adfc5217SJeff Kirsher 	__b44_set_flow_ctrl(bp, pause_enab);
377adfc5217SJeff Kirsher }
378adfc5217SJeff Kirsher 
379adfc5217SJeff Kirsher #ifdef CONFIG_BCM47XX
380138173d4SRafał Miłecki #include <linux/bcm47xx_nvram.h>
b44_wap54g10_workaround(struct b44 * bp)381adfc5217SJeff Kirsher static void b44_wap54g10_workaround(struct b44 *bp)
382adfc5217SJeff Kirsher {
383adfc5217SJeff Kirsher 	char buf[20];
384adfc5217SJeff Kirsher 	u32 val;
385adfc5217SJeff Kirsher 	int err;
386adfc5217SJeff Kirsher 
387adfc5217SJeff Kirsher 	/*
388adfc5217SJeff Kirsher 	 * workaround for bad hardware design in Linksys WAP54G v1.0
389adfc5217SJeff Kirsher 	 * see https://dev.openwrt.org/ticket/146
390adfc5217SJeff Kirsher 	 * check and reset bit "isolate"
391adfc5217SJeff Kirsher 	 */
392111bd981SHauke Mehrtens 	if (bcm47xx_nvram_getenv("boardnum", buf, sizeof(buf)) < 0)
393adfc5217SJeff Kirsher 		return;
394adfc5217SJeff Kirsher 	if (simple_strtoul(buf, NULL, 0) == 2) {
395adfc5217SJeff Kirsher 		err = __b44_readphy(bp, 0, MII_BMCR, &val);
396adfc5217SJeff Kirsher 		if (err)
397adfc5217SJeff Kirsher 			goto error;
398adfc5217SJeff Kirsher 		if (!(val & BMCR_ISOLATE))
399adfc5217SJeff Kirsher 			return;
400adfc5217SJeff Kirsher 		val &= ~BMCR_ISOLATE;
401adfc5217SJeff Kirsher 		err = __b44_writephy(bp, 0, MII_BMCR, val);
402adfc5217SJeff Kirsher 		if (err)
403adfc5217SJeff Kirsher 			goto error;
404adfc5217SJeff Kirsher 	}
405adfc5217SJeff Kirsher 	return;
406adfc5217SJeff Kirsher error:
407fe3881cfSJoe Perches 	pr_warn("PHY: cannot reset MII transceiver isolate bit\n");
408adfc5217SJeff Kirsher }
409adfc5217SJeff Kirsher #else
b44_wap54g10_workaround(struct b44 * bp)410adfc5217SJeff Kirsher static inline void b44_wap54g10_workaround(struct b44 *bp)
411adfc5217SJeff Kirsher {
412adfc5217SJeff Kirsher }
413adfc5217SJeff Kirsher #endif
414adfc5217SJeff Kirsher 
b44_setup_phy(struct b44 * bp)415adfc5217SJeff Kirsher static int b44_setup_phy(struct b44 *bp)
416adfc5217SJeff Kirsher {
417adfc5217SJeff Kirsher 	u32 val;
418adfc5217SJeff Kirsher 	int err;
419adfc5217SJeff Kirsher 
420adfc5217SJeff Kirsher 	b44_wap54g10_workaround(bp);
421adfc5217SJeff Kirsher 
422d6194195SHauke Mehrtens 	if (bp->flags & B44_FLAG_EXTERNAL_PHY)
423adfc5217SJeff Kirsher 		return 0;
424adfc5217SJeff Kirsher 	if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0)
425adfc5217SJeff Kirsher 		goto out;
426adfc5217SJeff Kirsher 	if ((err = b44_writephy(bp, B44_MII_ALEDCTRL,
427adfc5217SJeff Kirsher 				val & MII_ALEDCTRL_ALLMSK)) != 0)
428adfc5217SJeff Kirsher 		goto out;
429adfc5217SJeff Kirsher 	if ((err = b44_readphy(bp, B44_MII_TLEDCTRL, &val)) != 0)
430adfc5217SJeff Kirsher 		goto out;
431adfc5217SJeff Kirsher 	if ((err = b44_writephy(bp, B44_MII_TLEDCTRL,
432adfc5217SJeff Kirsher 				val | MII_TLEDCTRL_ENABLE)) != 0)
433adfc5217SJeff Kirsher 		goto out;
434adfc5217SJeff Kirsher 
435adfc5217SJeff Kirsher 	if (!(bp->flags & B44_FLAG_FORCE_LINK)) {
436adfc5217SJeff Kirsher 		u32 adv = ADVERTISE_CSMA;
437adfc5217SJeff Kirsher 
438adfc5217SJeff Kirsher 		if (bp->flags & B44_FLAG_ADV_10HALF)
439adfc5217SJeff Kirsher 			adv |= ADVERTISE_10HALF;
440adfc5217SJeff Kirsher 		if (bp->flags & B44_FLAG_ADV_10FULL)
441adfc5217SJeff Kirsher 			adv |= ADVERTISE_10FULL;
442adfc5217SJeff Kirsher 		if (bp->flags & B44_FLAG_ADV_100HALF)
443adfc5217SJeff Kirsher 			adv |= ADVERTISE_100HALF;
444adfc5217SJeff Kirsher 		if (bp->flags & B44_FLAG_ADV_100FULL)
445adfc5217SJeff Kirsher 			adv |= ADVERTISE_100FULL;
446adfc5217SJeff Kirsher 
447adfc5217SJeff Kirsher 		if (bp->flags & B44_FLAG_PAUSE_AUTO)
448adfc5217SJeff Kirsher 			adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
449adfc5217SJeff Kirsher 
450adfc5217SJeff Kirsher 		if ((err = b44_writephy(bp, MII_ADVERTISE, adv)) != 0)
451adfc5217SJeff Kirsher 			goto out;
452adfc5217SJeff Kirsher 		if ((err = b44_writephy(bp, MII_BMCR, (BMCR_ANENABLE |
453adfc5217SJeff Kirsher 						       BMCR_ANRESTART))) != 0)
454adfc5217SJeff Kirsher 			goto out;
455adfc5217SJeff Kirsher 	} else {
456adfc5217SJeff Kirsher 		u32 bmcr;
457adfc5217SJeff Kirsher 
458adfc5217SJeff Kirsher 		if ((err = b44_readphy(bp, MII_BMCR, &bmcr)) != 0)
459adfc5217SJeff Kirsher 			goto out;
460adfc5217SJeff Kirsher 		bmcr &= ~(BMCR_FULLDPLX | BMCR_ANENABLE | BMCR_SPEED100);
461adfc5217SJeff Kirsher 		if (bp->flags & B44_FLAG_100_BASE_T)
462adfc5217SJeff Kirsher 			bmcr |= BMCR_SPEED100;
463adfc5217SJeff Kirsher 		if (bp->flags & B44_FLAG_FULL_DUPLEX)
464adfc5217SJeff Kirsher 			bmcr |= BMCR_FULLDPLX;
465adfc5217SJeff Kirsher 		if ((err = b44_writephy(bp, MII_BMCR, bmcr)) != 0)
466adfc5217SJeff Kirsher 			goto out;
467adfc5217SJeff Kirsher 
468adfc5217SJeff Kirsher 		/* Since we will not be negotiating there is no safe way
469adfc5217SJeff Kirsher 		 * to determine if the link partner supports flow control
470adfc5217SJeff Kirsher 		 * or not.  So just disable it completely in this case.
471adfc5217SJeff Kirsher 		 */
472adfc5217SJeff Kirsher 		b44_set_flow_ctrl(bp, 0, 0);
473adfc5217SJeff Kirsher 	}
474adfc5217SJeff Kirsher 
475adfc5217SJeff Kirsher out:
476adfc5217SJeff Kirsher 	return err;
477adfc5217SJeff Kirsher }
478adfc5217SJeff Kirsher 
b44_stats_update(struct b44 * bp)479adfc5217SJeff Kirsher static void b44_stats_update(struct b44 *bp)
480adfc5217SJeff Kirsher {
481adfc5217SJeff Kirsher 	unsigned long reg;
482eeda8585SKevin Groeneveld 	u64 *val;
483adfc5217SJeff Kirsher 
484adfc5217SJeff Kirsher 	val = &bp->hw_stats.tx_good_octets;
485eeda8585SKevin Groeneveld 	u64_stats_update_begin(&bp->hw_stats.syncp);
486eeda8585SKevin Groeneveld 
487adfc5217SJeff Kirsher 	for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) {
488adfc5217SJeff Kirsher 		*val++ += br32(bp, reg);
489adfc5217SJeff Kirsher 	}
490adfc5217SJeff Kirsher 
491adfc5217SJeff Kirsher 	for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) {
492adfc5217SJeff Kirsher 		*val++ += br32(bp, reg);
493adfc5217SJeff Kirsher 	}
494eeda8585SKevin Groeneveld 
495eeda8585SKevin Groeneveld 	u64_stats_update_end(&bp->hw_stats.syncp);
496adfc5217SJeff Kirsher }
497adfc5217SJeff Kirsher 
b44_link_report(struct b44 * bp)498adfc5217SJeff Kirsher static void b44_link_report(struct b44 *bp)
499adfc5217SJeff Kirsher {
500adfc5217SJeff Kirsher 	if (!netif_carrier_ok(bp->dev)) {
501adfc5217SJeff Kirsher 		netdev_info(bp->dev, "Link is down\n");
502adfc5217SJeff Kirsher 	} else {
503adfc5217SJeff Kirsher 		netdev_info(bp->dev, "Link is up at %d Mbps, %s duplex\n",
504adfc5217SJeff Kirsher 			    (bp->flags & B44_FLAG_100_BASE_T) ? 100 : 10,
505adfc5217SJeff Kirsher 			    (bp->flags & B44_FLAG_FULL_DUPLEX) ? "full" : "half");
506adfc5217SJeff Kirsher 
507adfc5217SJeff Kirsher 		netdev_info(bp->dev, "Flow control is %s for TX and %s for RX\n",
508adfc5217SJeff Kirsher 			    (bp->flags & B44_FLAG_TX_PAUSE) ? "on" : "off",
509adfc5217SJeff Kirsher 			    (bp->flags & B44_FLAG_RX_PAUSE) ? "on" : "off");
510adfc5217SJeff Kirsher 	}
511adfc5217SJeff Kirsher }
512adfc5217SJeff Kirsher 
b44_check_phy(struct b44 * bp)513adfc5217SJeff Kirsher static void b44_check_phy(struct b44 *bp)
514adfc5217SJeff Kirsher {
515adfc5217SJeff Kirsher 	u32 bmsr, aux;
516adfc5217SJeff Kirsher 
517d6194195SHauke Mehrtens 	if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
518adfc5217SJeff Kirsher 		bp->flags |= B44_FLAG_100_BASE_T;
519adfc5217SJeff Kirsher 		if (!netif_carrier_ok(bp->dev)) {
520adfc5217SJeff Kirsher 			u32 val = br32(bp, B44_TX_CTRL);
52186f4ea63SHauke Mehrtens 			if (bp->flags & B44_FLAG_FULL_DUPLEX)
522adfc5217SJeff Kirsher 				val |= TX_CTRL_DUPLEX;
52386f4ea63SHauke Mehrtens 			else
52486f4ea63SHauke Mehrtens 				val &= ~TX_CTRL_DUPLEX;
525adfc5217SJeff Kirsher 			bw32(bp, B44_TX_CTRL, val);
526adfc5217SJeff Kirsher 			netif_carrier_on(bp->dev);
527adfc5217SJeff Kirsher 			b44_link_report(bp);
528adfc5217SJeff Kirsher 		}
529adfc5217SJeff Kirsher 		return;
530adfc5217SJeff Kirsher 	}
531adfc5217SJeff Kirsher 
532adfc5217SJeff Kirsher 	if (!b44_readphy(bp, MII_BMSR, &bmsr) &&
533adfc5217SJeff Kirsher 	    !b44_readphy(bp, B44_MII_AUXCTRL, &aux) &&
534adfc5217SJeff Kirsher 	    (bmsr != 0xffff)) {
535adfc5217SJeff Kirsher 		if (aux & MII_AUXCTRL_SPEED)
536adfc5217SJeff Kirsher 			bp->flags |= B44_FLAG_100_BASE_T;
537adfc5217SJeff Kirsher 		else
538adfc5217SJeff Kirsher 			bp->flags &= ~B44_FLAG_100_BASE_T;
539adfc5217SJeff Kirsher 		if (aux & MII_AUXCTRL_DUPLEX)
540adfc5217SJeff Kirsher 			bp->flags |= B44_FLAG_FULL_DUPLEX;
541adfc5217SJeff Kirsher 		else
542adfc5217SJeff Kirsher 			bp->flags &= ~B44_FLAG_FULL_DUPLEX;
543adfc5217SJeff Kirsher 
544adfc5217SJeff Kirsher 		if (!netif_carrier_ok(bp->dev) &&
545adfc5217SJeff Kirsher 		    (bmsr & BMSR_LSTATUS)) {
546adfc5217SJeff Kirsher 			u32 val = br32(bp, B44_TX_CTRL);
547adfc5217SJeff Kirsher 			u32 local_adv, remote_adv;
548adfc5217SJeff Kirsher 
549adfc5217SJeff Kirsher 			if (bp->flags & B44_FLAG_FULL_DUPLEX)
550adfc5217SJeff Kirsher 				val |= TX_CTRL_DUPLEX;
551adfc5217SJeff Kirsher 			else
552adfc5217SJeff Kirsher 				val &= ~TX_CTRL_DUPLEX;
553adfc5217SJeff Kirsher 			bw32(bp, B44_TX_CTRL, val);
554adfc5217SJeff Kirsher 
555adfc5217SJeff Kirsher 			if (!(bp->flags & B44_FLAG_FORCE_LINK) &&
556adfc5217SJeff Kirsher 			    !b44_readphy(bp, MII_ADVERTISE, &local_adv) &&
557adfc5217SJeff Kirsher 			    !b44_readphy(bp, MII_LPA, &remote_adv))
558adfc5217SJeff Kirsher 				b44_set_flow_ctrl(bp, local_adv, remote_adv);
559adfc5217SJeff Kirsher 
560adfc5217SJeff Kirsher 			/* Link now up */
561adfc5217SJeff Kirsher 			netif_carrier_on(bp->dev);
562adfc5217SJeff Kirsher 			b44_link_report(bp);
563adfc5217SJeff Kirsher 		} else if (netif_carrier_ok(bp->dev) && !(bmsr & BMSR_LSTATUS)) {
564adfc5217SJeff Kirsher 			/* Link now down */
565adfc5217SJeff Kirsher 			netif_carrier_off(bp->dev);
566adfc5217SJeff Kirsher 			b44_link_report(bp);
567adfc5217SJeff Kirsher 		}
568adfc5217SJeff Kirsher 
569adfc5217SJeff Kirsher 		if (bmsr & BMSR_RFAULT)
570adfc5217SJeff Kirsher 			netdev_warn(bp->dev, "Remote fault detected in PHY\n");
571adfc5217SJeff Kirsher 		if (bmsr & BMSR_JCD)
572adfc5217SJeff Kirsher 			netdev_warn(bp->dev, "Jabber detected in PHY\n");
573adfc5217SJeff Kirsher 	}
574adfc5217SJeff Kirsher }
575adfc5217SJeff Kirsher 
b44_timer(struct timer_list * t)576e99e88a9SKees Cook static void b44_timer(struct timer_list *t)
577adfc5217SJeff Kirsher {
578e99e88a9SKees Cook 	struct b44 *bp = from_timer(bp, t, timer);
579adfc5217SJeff Kirsher 
580adfc5217SJeff Kirsher 	spin_lock_irq(&bp->lock);
581adfc5217SJeff Kirsher 
582adfc5217SJeff Kirsher 	b44_check_phy(bp);
583adfc5217SJeff Kirsher 
584adfc5217SJeff Kirsher 	b44_stats_update(bp);
585adfc5217SJeff Kirsher 
586adfc5217SJeff Kirsher 	spin_unlock_irq(&bp->lock);
587adfc5217SJeff Kirsher 
588adfc5217SJeff Kirsher 	mod_timer(&bp->timer, round_jiffies(jiffies + HZ));
589adfc5217SJeff Kirsher }
590adfc5217SJeff Kirsher 
b44_tx(struct b44 * bp)591adfc5217SJeff Kirsher static void b44_tx(struct b44 *bp)
592adfc5217SJeff Kirsher {
593adfc5217SJeff Kirsher 	u32 cur, cons;
5945055544eSHauke Mehrtens 	unsigned bytes_compl = 0, pkts_compl = 0;
595adfc5217SJeff Kirsher 
596adfc5217SJeff Kirsher 	cur  = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK;
597adfc5217SJeff Kirsher 	cur /= sizeof(struct dma_desc);
598adfc5217SJeff Kirsher 
599adfc5217SJeff Kirsher 	/* XXX needs updating when NETIF_F_SG is supported */
600adfc5217SJeff Kirsher 	for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) {
601adfc5217SJeff Kirsher 		struct ring_info *rp = &bp->tx_buffers[cons];
602adfc5217SJeff Kirsher 		struct sk_buff *skb = rp->skb;
603adfc5217SJeff Kirsher 
604adfc5217SJeff Kirsher 		BUG_ON(skb == NULL);
605adfc5217SJeff Kirsher 
606adfc5217SJeff Kirsher 		dma_unmap_single(bp->sdev->dma_dev,
607adfc5217SJeff Kirsher 				 rp->mapping,
608adfc5217SJeff Kirsher 				 skb->len,
609adfc5217SJeff Kirsher 				 DMA_TO_DEVICE);
610adfc5217SJeff Kirsher 		rp->skb = NULL;
6115055544eSHauke Mehrtens 
6125055544eSHauke Mehrtens 		bytes_compl += skb->len;
6135055544eSHauke Mehrtens 		pkts_compl++;
6145055544eSHauke Mehrtens 
6150f0ed828SYang Wei 		dev_consume_skb_irq(skb);
616adfc5217SJeff Kirsher 	}
617adfc5217SJeff Kirsher 
6185055544eSHauke Mehrtens 	netdev_completed_queue(bp->dev, pkts_compl, bytes_compl);
619adfc5217SJeff Kirsher 	bp->tx_cons = cons;
620adfc5217SJeff Kirsher 	if (netif_queue_stopped(bp->dev) &&
621adfc5217SJeff Kirsher 	    TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH)
622adfc5217SJeff Kirsher 		netif_wake_queue(bp->dev);
623adfc5217SJeff Kirsher 
624adfc5217SJeff Kirsher 	bw32(bp, B44_GPTIMER, 0);
625adfc5217SJeff Kirsher }
626adfc5217SJeff Kirsher 
627adfc5217SJeff Kirsher /* Works like this.  This chip writes a 'struct rx_header" 30 bytes
628adfc5217SJeff Kirsher  * before the DMA address you give it.  So we allocate 30 more bytes
629adfc5217SJeff Kirsher  * for the RX buffer, DMA map all of it, skb_reserve the 30 bytes, then
630adfc5217SJeff Kirsher  * point the chip at 30 bytes past where the rx_header will go.
631adfc5217SJeff Kirsher  */
b44_alloc_rx_skb(struct b44 * bp,int src_idx,u32 dest_idx_unmasked)632adfc5217SJeff Kirsher static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
633adfc5217SJeff Kirsher {
634adfc5217SJeff Kirsher 	struct dma_desc *dp;
635adfc5217SJeff Kirsher 	struct ring_info *src_map, *map;
636adfc5217SJeff Kirsher 	struct rx_header *rh;
637adfc5217SJeff Kirsher 	struct sk_buff *skb;
638adfc5217SJeff Kirsher 	dma_addr_t mapping;
639adfc5217SJeff Kirsher 	int dest_idx;
640adfc5217SJeff Kirsher 	u32 ctrl;
641adfc5217SJeff Kirsher 
642adfc5217SJeff Kirsher 	src_map = NULL;
643adfc5217SJeff Kirsher 	if (src_idx >= 0)
644adfc5217SJeff Kirsher 		src_map = &bp->rx_buffers[src_idx];
645adfc5217SJeff Kirsher 	dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
646adfc5217SJeff Kirsher 	map = &bp->rx_buffers[dest_idx];
647adfc5217SJeff Kirsher 	skb = netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ);
648adfc5217SJeff Kirsher 	if (skb == NULL)
649adfc5217SJeff Kirsher 		return -ENOMEM;
650adfc5217SJeff Kirsher 
651adfc5217SJeff Kirsher 	mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
652adfc5217SJeff Kirsher 				 RX_PKT_BUF_SZ,
653adfc5217SJeff Kirsher 				 DMA_FROM_DEVICE);
654adfc5217SJeff Kirsher 
655adfc5217SJeff Kirsher 	/* Hardware bug work-around, the chip is unable to do PCI DMA
656adfc5217SJeff Kirsher 	   to/from anything above 1GB :-( */
657adfc5217SJeff Kirsher 	if (dma_mapping_error(bp->sdev->dma_dev, mapping) ||
658adfc5217SJeff Kirsher 		mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
659adfc5217SJeff Kirsher 		/* Sigh... */
660adfc5217SJeff Kirsher 		if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
661adfc5217SJeff Kirsher 			dma_unmap_single(bp->sdev->dma_dev, mapping,
662adfc5217SJeff Kirsher 					     RX_PKT_BUF_SZ, DMA_FROM_DEVICE);
663adfc5217SJeff Kirsher 		dev_kfree_skb_any(skb);
664acfa9e94SEric Dumazet 		skb = alloc_skb(RX_PKT_BUF_SZ, GFP_ATOMIC | GFP_DMA);
665adfc5217SJeff Kirsher 		if (skb == NULL)
666adfc5217SJeff Kirsher 			return -ENOMEM;
667adfc5217SJeff Kirsher 		mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
668adfc5217SJeff Kirsher 					 RX_PKT_BUF_SZ,
669adfc5217SJeff Kirsher 					 DMA_FROM_DEVICE);
670adfc5217SJeff Kirsher 		if (dma_mapping_error(bp->sdev->dma_dev, mapping) ||
671adfc5217SJeff Kirsher 		    mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
672adfc5217SJeff Kirsher 			if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
673adfc5217SJeff Kirsher 				dma_unmap_single(bp->sdev->dma_dev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE);
674adfc5217SJeff Kirsher 			dev_kfree_skb_any(skb);
675adfc5217SJeff Kirsher 			return -ENOMEM;
676adfc5217SJeff Kirsher 		}
677adfc5217SJeff Kirsher 		bp->force_copybreak = 1;
678adfc5217SJeff Kirsher 	}
679adfc5217SJeff Kirsher 
680adfc5217SJeff Kirsher 	rh = (struct rx_header *) skb->data;
681adfc5217SJeff Kirsher 
682adfc5217SJeff Kirsher 	rh->len = 0;
683adfc5217SJeff Kirsher 	rh->flags = 0;
684adfc5217SJeff Kirsher 
685adfc5217SJeff Kirsher 	map->skb = skb;
686adfc5217SJeff Kirsher 	map->mapping = mapping;
687adfc5217SJeff Kirsher 
688adfc5217SJeff Kirsher 	if (src_map != NULL)
689adfc5217SJeff Kirsher 		src_map->skb = NULL;
690adfc5217SJeff Kirsher 
691adfc5217SJeff Kirsher 	ctrl = (DESC_CTRL_LEN & RX_PKT_BUF_SZ);
692adfc5217SJeff Kirsher 	if (dest_idx == (B44_RX_RING_SIZE - 1))
693adfc5217SJeff Kirsher 		ctrl |= DESC_CTRL_EOT;
694adfc5217SJeff Kirsher 
695adfc5217SJeff Kirsher 	dp = &bp->rx_ring[dest_idx];
696adfc5217SJeff Kirsher 	dp->ctrl = cpu_to_le32(ctrl);
697adfc5217SJeff Kirsher 	dp->addr = cpu_to_le32((u32) mapping + bp->dma_offset);
698adfc5217SJeff Kirsher 
699adfc5217SJeff Kirsher 	if (bp->flags & B44_FLAG_RX_RING_HACK)
700adfc5217SJeff Kirsher 		b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
701adfc5217SJeff Kirsher 			                    dest_idx * sizeof(*dp),
702adfc5217SJeff Kirsher 			                    DMA_BIDIRECTIONAL);
703adfc5217SJeff Kirsher 
704adfc5217SJeff Kirsher 	return RX_PKT_BUF_SZ;
705adfc5217SJeff Kirsher }
706adfc5217SJeff Kirsher 
b44_recycle_rx(struct b44 * bp,int src_idx,u32 dest_idx_unmasked)707adfc5217SJeff Kirsher static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
708adfc5217SJeff Kirsher {
709adfc5217SJeff Kirsher 	struct dma_desc *src_desc, *dest_desc;
710adfc5217SJeff Kirsher 	struct ring_info *src_map, *dest_map;
711adfc5217SJeff Kirsher 	struct rx_header *rh;
712adfc5217SJeff Kirsher 	int dest_idx;
713adfc5217SJeff Kirsher 	__le32 ctrl;
714adfc5217SJeff Kirsher 
715adfc5217SJeff Kirsher 	dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
716adfc5217SJeff Kirsher 	dest_desc = &bp->rx_ring[dest_idx];
717adfc5217SJeff Kirsher 	dest_map = &bp->rx_buffers[dest_idx];
718adfc5217SJeff Kirsher 	src_desc = &bp->rx_ring[src_idx];
719adfc5217SJeff Kirsher 	src_map = &bp->rx_buffers[src_idx];
720adfc5217SJeff Kirsher 
721adfc5217SJeff Kirsher 	dest_map->skb = src_map->skb;
722adfc5217SJeff Kirsher 	rh = (struct rx_header *) src_map->skb->data;
723adfc5217SJeff Kirsher 	rh->len = 0;
724adfc5217SJeff Kirsher 	rh->flags = 0;
725adfc5217SJeff Kirsher 	dest_map->mapping = src_map->mapping;
726adfc5217SJeff Kirsher 
727adfc5217SJeff Kirsher 	if (bp->flags & B44_FLAG_RX_RING_HACK)
728adfc5217SJeff Kirsher 		b44_sync_dma_desc_for_cpu(bp->sdev, bp->rx_ring_dma,
729adfc5217SJeff Kirsher 			                 src_idx * sizeof(*src_desc),
730adfc5217SJeff Kirsher 			                 DMA_BIDIRECTIONAL);
731adfc5217SJeff Kirsher 
732adfc5217SJeff Kirsher 	ctrl = src_desc->ctrl;
733adfc5217SJeff Kirsher 	if (dest_idx == (B44_RX_RING_SIZE - 1))
734adfc5217SJeff Kirsher 		ctrl |= cpu_to_le32(DESC_CTRL_EOT);
735adfc5217SJeff Kirsher 	else
736adfc5217SJeff Kirsher 		ctrl &= cpu_to_le32(~DESC_CTRL_EOT);
737adfc5217SJeff Kirsher 
738adfc5217SJeff Kirsher 	dest_desc->ctrl = ctrl;
739adfc5217SJeff Kirsher 	dest_desc->addr = src_desc->addr;
740adfc5217SJeff Kirsher 
741adfc5217SJeff Kirsher 	src_map->skb = NULL;
742adfc5217SJeff Kirsher 
743adfc5217SJeff Kirsher 	if (bp->flags & B44_FLAG_RX_RING_HACK)
744adfc5217SJeff Kirsher 		b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
745adfc5217SJeff Kirsher 					     dest_idx * sizeof(*dest_desc),
746adfc5217SJeff Kirsher 					     DMA_BIDIRECTIONAL);
747adfc5217SJeff Kirsher 
748adfc5217SJeff Kirsher 	dma_sync_single_for_device(bp->sdev->dma_dev, dest_map->mapping,
749adfc5217SJeff Kirsher 				   RX_PKT_BUF_SZ,
750adfc5217SJeff Kirsher 				   DMA_FROM_DEVICE);
751adfc5217SJeff Kirsher }
752adfc5217SJeff Kirsher 
b44_rx(struct b44 * bp,int budget)753adfc5217SJeff Kirsher static int b44_rx(struct b44 *bp, int budget)
754adfc5217SJeff Kirsher {
755adfc5217SJeff Kirsher 	int received;
756adfc5217SJeff Kirsher 	u32 cons, prod;
757adfc5217SJeff Kirsher 
758adfc5217SJeff Kirsher 	received = 0;
759adfc5217SJeff Kirsher 	prod  = br32(bp, B44_DMARX_STAT) & DMARX_STAT_CDMASK;
760adfc5217SJeff Kirsher 	prod /= sizeof(struct dma_desc);
761adfc5217SJeff Kirsher 	cons = bp->rx_cons;
762adfc5217SJeff Kirsher 
763adfc5217SJeff Kirsher 	while (cons != prod && budget > 0) {
764adfc5217SJeff Kirsher 		struct ring_info *rp = &bp->rx_buffers[cons];
765adfc5217SJeff Kirsher 		struct sk_buff *skb = rp->skb;
766adfc5217SJeff Kirsher 		dma_addr_t map = rp->mapping;
767adfc5217SJeff Kirsher 		struct rx_header *rh;
768adfc5217SJeff Kirsher 		u16 len;
769adfc5217SJeff Kirsher 
770adfc5217SJeff Kirsher 		dma_sync_single_for_cpu(bp->sdev->dma_dev, map,
771adfc5217SJeff Kirsher 					RX_PKT_BUF_SZ,
772adfc5217SJeff Kirsher 					DMA_FROM_DEVICE);
773adfc5217SJeff Kirsher 		rh = (struct rx_header *) skb->data;
774adfc5217SJeff Kirsher 		len = le16_to_cpu(rh->len);
775adfc5217SJeff Kirsher 		if ((len > (RX_PKT_BUF_SZ - RX_PKT_OFFSET)) ||
776adfc5217SJeff Kirsher 		    (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) {
777adfc5217SJeff Kirsher 		drop_it:
778adfc5217SJeff Kirsher 			b44_recycle_rx(bp, cons, bp->rx_prod);
779adfc5217SJeff Kirsher 		drop_it_no_recycle:
780adfc5217SJeff Kirsher 			bp->dev->stats.rx_dropped++;
781adfc5217SJeff Kirsher 			goto next_pkt;
782adfc5217SJeff Kirsher 		}
783adfc5217SJeff Kirsher 
784adfc5217SJeff Kirsher 		if (len == 0) {
785adfc5217SJeff Kirsher 			int i = 0;
786adfc5217SJeff Kirsher 
787adfc5217SJeff Kirsher 			do {
788adfc5217SJeff Kirsher 				udelay(2);
789adfc5217SJeff Kirsher 				barrier();
790adfc5217SJeff Kirsher 				len = le16_to_cpu(rh->len);
791adfc5217SJeff Kirsher 			} while (len == 0 && i++ < 5);
792adfc5217SJeff Kirsher 			if (len == 0)
793adfc5217SJeff Kirsher 				goto drop_it;
794adfc5217SJeff Kirsher 		}
795adfc5217SJeff Kirsher 
796adfc5217SJeff Kirsher 		/* Omit CRC. */
797adfc5217SJeff Kirsher 		len -= 4;
798adfc5217SJeff Kirsher 
799adfc5217SJeff Kirsher 		if (!bp->force_copybreak && len > RX_COPY_THRESHOLD) {
800adfc5217SJeff Kirsher 			int skb_size;
801adfc5217SJeff Kirsher 			skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
802adfc5217SJeff Kirsher 			if (skb_size < 0)
803adfc5217SJeff Kirsher 				goto drop_it;
804adfc5217SJeff Kirsher 			dma_unmap_single(bp->sdev->dma_dev, map,
805adfc5217SJeff Kirsher 					 skb_size, DMA_FROM_DEVICE);
806adfc5217SJeff Kirsher 			/* Leave out rx_header */
807adfc5217SJeff Kirsher 			skb_put(skb, len + RX_PKT_OFFSET);
808adfc5217SJeff Kirsher 			skb_pull(skb, RX_PKT_OFFSET);
809adfc5217SJeff Kirsher 		} else {
810adfc5217SJeff Kirsher 			struct sk_buff *copy_skb;
811adfc5217SJeff Kirsher 
812adfc5217SJeff Kirsher 			b44_recycle_rx(bp, cons, bp->rx_prod);
81345abfb10SAlexander Duyck 			copy_skb = napi_alloc_skb(&bp->napi, len);
814adfc5217SJeff Kirsher 			if (copy_skb == NULL)
815adfc5217SJeff Kirsher 				goto drop_it_no_recycle;
816adfc5217SJeff Kirsher 
817adfc5217SJeff Kirsher 			skb_put(copy_skb, len);
818adfc5217SJeff Kirsher 			/* DMA sync done above, copy just the actual packet */
819adfc5217SJeff Kirsher 			skb_copy_from_linear_data_offset(skb, RX_PKT_OFFSET,
820adfc5217SJeff Kirsher 							 copy_skb->data, len);
821adfc5217SJeff Kirsher 			skb = copy_skb;
822adfc5217SJeff Kirsher 		}
823adfc5217SJeff Kirsher 		skb_checksum_none_assert(skb);
824adfc5217SJeff Kirsher 		skb->protocol = eth_type_trans(skb, bp->dev);
825adfc5217SJeff Kirsher 		netif_receive_skb(skb);
826adfc5217SJeff Kirsher 		received++;
827adfc5217SJeff Kirsher 		budget--;
828adfc5217SJeff Kirsher 	next_pkt:
829adfc5217SJeff Kirsher 		bp->rx_prod = (bp->rx_prod + 1) &
830adfc5217SJeff Kirsher 			(B44_RX_RING_SIZE - 1);
831adfc5217SJeff Kirsher 		cons = (cons + 1) & (B44_RX_RING_SIZE - 1);
832adfc5217SJeff Kirsher 	}
833adfc5217SJeff Kirsher 
834adfc5217SJeff Kirsher 	bp->rx_cons = cons;
835adfc5217SJeff Kirsher 	bw32(bp, B44_DMARX_PTR, cons * sizeof(struct dma_desc));
836adfc5217SJeff Kirsher 
837adfc5217SJeff Kirsher 	return received;
838adfc5217SJeff Kirsher }
839adfc5217SJeff Kirsher 
b44_poll(struct napi_struct * napi,int budget)840adfc5217SJeff Kirsher static int b44_poll(struct napi_struct *napi, int budget)
841adfc5217SJeff Kirsher {
842adfc5217SJeff Kirsher 	struct b44 *bp = container_of(napi, struct b44, napi);
843adfc5217SJeff Kirsher 	int work_done;
844adfc5217SJeff Kirsher 	unsigned long flags;
845adfc5217SJeff Kirsher 
846adfc5217SJeff Kirsher 	spin_lock_irqsave(&bp->lock, flags);
847adfc5217SJeff Kirsher 
848adfc5217SJeff Kirsher 	if (bp->istat & (ISTAT_TX | ISTAT_TO)) {
849adfc5217SJeff Kirsher 		/* spin_lock(&bp->tx_lock); */
850adfc5217SJeff Kirsher 		b44_tx(bp);
851adfc5217SJeff Kirsher 		/* spin_unlock(&bp->tx_lock); */
852adfc5217SJeff Kirsher 	}
853adfc5217SJeff Kirsher 	if (bp->istat & ISTAT_RFO) {	/* fast recovery, in ~20msec */
854adfc5217SJeff Kirsher 		bp->istat &= ~ISTAT_RFO;
855adfc5217SJeff Kirsher 		b44_disable_ints(bp);
856adfc5217SJeff Kirsher 		ssb_device_enable(bp->sdev, 0); /* resets ISTAT_RFO */
857adfc5217SJeff Kirsher 		b44_init_rings(bp);
858adfc5217SJeff Kirsher 		b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
859adfc5217SJeff Kirsher 		netif_wake_queue(bp->dev);
860adfc5217SJeff Kirsher 	}
861adfc5217SJeff Kirsher 
862adfc5217SJeff Kirsher 	spin_unlock_irqrestore(&bp->lock, flags);
863adfc5217SJeff Kirsher 
864adfc5217SJeff Kirsher 	work_done = 0;
865adfc5217SJeff Kirsher 	if (bp->istat & ISTAT_RX)
866adfc5217SJeff Kirsher 		work_done += b44_rx(bp, budget);
867adfc5217SJeff Kirsher 
868adfc5217SJeff Kirsher 	if (bp->istat & ISTAT_ERRORS) {
869adfc5217SJeff Kirsher 		spin_lock_irqsave(&bp->lock, flags);
870adfc5217SJeff Kirsher 		b44_halt(bp);
871adfc5217SJeff Kirsher 		b44_init_rings(bp);
872adfc5217SJeff Kirsher 		b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
873adfc5217SJeff Kirsher 		netif_wake_queue(bp->dev);
874adfc5217SJeff Kirsher 		spin_unlock_irqrestore(&bp->lock, flags);
875adfc5217SJeff Kirsher 		work_done = 0;
876adfc5217SJeff Kirsher 	}
877adfc5217SJeff Kirsher 
878adfc5217SJeff Kirsher 	if (work_done < budget) {
8796ad20165SEric Dumazet 		napi_complete_done(napi, work_done);
880adfc5217SJeff Kirsher 		b44_enable_ints(bp);
881adfc5217SJeff Kirsher 	}
882adfc5217SJeff Kirsher 
883adfc5217SJeff Kirsher 	return work_done;
884adfc5217SJeff Kirsher }
885adfc5217SJeff Kirsher 
b44_interrupt(int irq,void * dev_id)886adfc5217SJeff Kirsher static irqreturn_t b44_interrupt(int irq, void *dev_id)
887adfc5217SJeff Kirsher {
888adfc5217SJeff Kirsher 	struct net_device *dev = dev_id;
889adfc5217SJeff Kirsher 	struct b44 *bp = netdev_priv(dev);
890adfc5217SJeff Kirsher 	u32 istat, imask;
891adfc5217SJeff Kirsher 	int handled = 0;
892adfc5217SJeff Kirsher 
893adfc5217SJeff Kirsher 	spin_lock(&bp->lock);
894adfc5217SJeff Kirsher 
895adfc5217SJeff Kirsher 	istat = br32(bp, B44_ISTAT);
896adfc5217SJeff Kirsher 	imask = br32(bp, B44_IMASK);
897adfc5217SJeff Kirsher 
898adfc5217SJeff Kirsher 	/* The interrupt mask register controls which interrupt bits
899adfc5217SJeff Kirsher 	 * will actually raise an interrupt to the CPU when set by hw/firmware,
900adfc5217SJeff Kirsher 	 * but doesn't mask off the bits.
901adfc5217SJeff Kirsher 	 */
902adfc5217SJeff Kirsher 	istat &= imask;
903adfc5217SJeff Kirsher 	if (istat) {
904adfc5217SJeff Kirsher 		handled = 1;
905adfc5217SJeff Kirsher 
906adfc5217SJeff Kirsher 		if (unlikely(!netif_running(dev))) {
907adfc5217SJeff Kirsher 			netdev_info(dev, "late interrupt\n");
908adfc5217SJeff Kirsher 			goto irq_ack;
909adfc5217SJeff Kirsher 		}
910adfc5217SJeff Kirsher 
911adfc5217SJeff Kirsher 		if (napi_schedule_prep(&bp->napi)) {
912adfc5217SJeff Kirsher 			/* NOTE: These writes are posted by the readback of
913adfc5217SJeff Kirsher 			 *       the ISTAT register below.
914adfc5217SJeff Kirsher 			 */
915adfc5217SJeff Kirsher 			bp->istat = istat;
916adfc5217SJeff Kirsher 			__b44_disable_ints(bp);
917adfc5217SJeff Kirsher 			__napi_schedule(&bp->napi);
918adfc5217SJeff Kirsher 		}
919adfc5217SJeff Kirsher 
920adfc5217SJeff Kirsher irq_ack:
921adfc5217SJeff Kirsher 		bw32(bp, B44_ISTAT, istat);
922adfc5217SJeff Kirsher 		br32(bp, B44_ISTAT);
923adfc5217SJeff Kirsher 	}
924adfc5217SJeff Kirsher 	spin_unlock(&bp->lock);
925adfc5217SJeff Kirsher 	return IRQ_RETVAL(handled);
926adfc5217SJeff Kirsher }
927adfc5217SJeff Kirsher 
b44_tx_timeout(struct net_device * dev,unsigned int txqueue)9280290bd29SMichael S. Tsirkin static void b44_tx_timeout(struct net_device *dev, unsigned int txqueue)
929adfc5217SJeff Kirsher {
930adfc5217SJeff Kirsher 	struct b44 *bp = netdev_priv(dev);
931adfc5217SJeff Kirsher 
932adfc5217SJeff Kirsher 	netdev_err(dev, "transmit timed out, resetting\n");
933adfc5217SJeff Kirsher 
934adfc5217SJeff Kirsher 	spin_lock_irq(&bp->lock);
935adfc5217SJeff Kirsher 
936adfc5217SJeff Kirsher 	b44_halt(bp);
937adfc5217SJeff Kirsher 	b44_init_rings(bp);
938adfc5217SJeff Kirsher 	b44_init_hw(bp, B44_FULL_RESET);
939adfc5217SJeff Kirsher 
940adfc5217SJeff Kirsher 	spin_unlock_irq(&bp->lock);
941adfc5217SJeff Kirsher 
942adfc5217SJeff Kirsher 	b44_enable_ints(bp);
943adfc5217SJeff Kirsher 
944adfc5217SJeff Kirsher 	netif_wake_queue(dev);
945adfc5217SJeff Kirsher }
946adfc5217SJeff Kirsher 
b44_start_xmit(struct sk_buff * skb,struct net_device * dev)947adfc5217SJeff Kirsher static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
948adfc5217SJeff Kirsher {
949adfc5217SJeff Kirsher 	struct b44 *bp = netdev_priv(dev);
950adfc5217SJeff Kirsher 	int rc = NETDEV_TX_OK;
951adfc5217SJeff Kirsher 	dma_addr_t mapping;
952adfc5217SJeff Kirsher 	u32 len, entry, ctrl;
953adfc5217SJeff Kirsher 	unsigned long flags;
954adfc5217SJeff Kirsher 
955adfc5217SJeff Kirsher 	len = skb->len;
956adfc5217SJeff Kirsher 	spin_lock_irqsave(&bp->lock, flags);
957adfc5217SJeff Kirsher 
958adfc5217SJeff Kirsher 	/* This is a hard error, log it. */
959adfc5217SJeff Kirsher 	if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
960adfc5217SJeff Kirsher 		netif_stop_queue(dev);
961adfc5217SJeff Kirsher 		netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
962adfc5217SJeff Kirsher 		goto err_out;
963adfc5217SJeff Kirsher 	}
964adfc5217SJeff Kirsher 
965adfc5217SJeff Kirsher 	mapping = dma_map_single(bp->sdev->dma_dev, skb->data, len, DMA_TO_DEVICE);
966adfc5217SJeff Kirsher 	if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
967adfc5217SJeff Kirsher 		struct sk_buff *bounce_skb;
968adfc5217SJeff Kirsher 
969adfc5217SJeff Kirsher 		/* Chip can't handle DMA to/from >1GB, use bounce buffer */
970adfc5217SJeff Kirsher 		if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
971adfc5217SJeff Kirsher 			dma_unmap_single(bp->sdev->dma_dev, mapping, len,
972adfc5217SJeff Kirsher 					     DMA_TO_DEVICE);
973adfc5217SJeff Kirsher 
974acfa9e94SEric Dumazet 		bounce_skb = alloc_skb(len, GFP_ATOMIC | GFP_DMA);
975adfc5217SJeff Kirsher 		if (!bounce_skb)
976adfc5217SJeff Kirsher 			goto err_out;
977adfc5217SJeff Kirsher 
978adfc5217SJeff Kirsher 		mapping = dma_map_single(bp->sdev->dma_dev, bounce_skb->data,
979adfc5217SJeff Kirsher 					 len, DMA_TO_DEVICE);
980adfc5217SJeff Kirsher 		if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
981adfc5217SJeff Kirsher 			if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
982adfc5217SJeff Kirsher 				dma_unmap_single(bp->sdev->dma_dev, mapping,
983adfc5217SJeff Kirsher 						     len, DMA_TO_DEVICE);
984adfc5217SJeff Kirsher 			dev_kfree_skb_any(bounce_skb);
985adfc5217SJeff Kirsher 			goto err_out;
986adfc5217SJeff Kirsher 		}
987adfc5217SJeff Kirsher 
988adfc5217SJeff Kirsher 		skb_copy_from_linear_data(skb, skb_put(bounce_skb, len), len);
9890f0ed828SYang Wei 		dev_consume_skb_any(skb);
990adfc5217SJeff Kirsher 		skb = bounce_skb;
991adfc5217SJeff Kirsher 	}
992adfc5217SJeff Kirsher 
993adfc5217SJeff Kirsher 	entry = bp->tx_prod;
994adfc5217SJeff Kirsher 	bp->tx_buffers[entry].skb = skb;
995adfc5217SJeff Kirsher 	bp->tx_buffers[entry].mapping = mapping;
996adfc5217SJeff Kirsher 
997adfc5217SJeff Kirsher 	ctrl  = (len & DESC_CTRL_LEN);
998adfc5217SJeff Kirsher 	ctrl |= DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF;
999adfc5217SJeff Kirsher 	if (entry == (B44_TX_RING_SIZE - 1))
1000adfc5217SJeff Kirsher 		ctrl |= DESC_CTRL_EOT;
1001adfc5217SJeff Kirsher 
1002adfc5217SJeff Kirsher 	bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl);
1003adfc5217SJeff Kirsher 	bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset);
1004adfc5217SJeff Kirsher 
1005adfc5217SJeff Kirsher 	if (bp->flags & B44_FLAG_TX_RING_HACK)
1006adfc5217SJeff Kirsher 		b44_sync_dma_desc_for_device(bp->sdev, bp->tx_ring_dma,
1007adfc5217SJeff Kirsher 			                    entry * sizeof(bp->tx_ring[0]),
1008adfc5217SJeff Kirsher 			                    DMA_TO_DEVICE);
1009adfc5217SJeff Kirsher 
1010adfc5217SJeff Kirsher 	entry = NEXT_TX(entry);
1011adfc5217SJeff Kirsher 
1012adfc5217SJeff Kirsher 	bp->tx_prod = entry;
1013adfc5217SJeff Kirsher 
1014adfc5217SJeff Kirsher 	wmb();
1015adfc5217SJeff Kirsher 
1016adfc5217SJeff Kirsher 	bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1017adfc5217SJeff Kirsher 	if (bp->flags & B44_FLAG_BUGGY_TXPTR)
1018adfc5217SJeff Kirsher 		bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1019adfc5217SJeff Kirsher 	if (bp->flags & B44_FLAG_REORDER_BUG)
1020adfc5217SJeff Kirsher 		br32(bp, B44_DMATX_PTR);
1021adfc5217SJeff Kirsher 
10225055544eSHauke Mehrtens 	netdev_sent_queue(dev, skb->len);
10235055544eSHauke Mehrtens 
1024adfc5217SJeff Kirsher 	if (TX_BUFFS_AVAIL(bp) < 1)
1025adfc5217SJeff Kirsher 		netif_stop_queue(dev);
1026adfc5217SJeff Kirsher 
1027adfc5217SJeff Kirsher out_unlock:
1028adfc5217SJeff Kirsher 	spin_unlock_irqrestore(&bp->lock, flags);
1029adfc5217SJeff Kirsher 
1030adfc5217SJeff Kirsher 	return rc;
1031adfc5217SJeff Kirsher 
1032adfc5217SJeff Kirsher err_out:
1033adfc5217SJeff Kirsher 	rc = NETDEV_TX_BUSY;
1034adfc5217SJeff Kirsher 	goto out_unlock;
1035adfc5217SJeff Kirsher }
1036adfc5217SJeff Kirsher 
b44_change_mtu(struct net_device * dev,int new_mtu)1037adfc5217SJeff Kirsher static int b44_change_mtu(struct net_device *dev, int new_mtu)
1038adfc5217SJeff Kirsher {
1039adfc5217SJeff Kirsher 	struct b44 *bp = netdev_priv(dev);
1040adfc5217SJeff Kirsher 
1041adfc5217SJeff Kirsher 	if (!netif_running(dev)) {
1042adfc5217SJeff Kirsher 		/* We'll just catch it later when the
1043adfc5217SJeff Kirsher 		 * device is up'd.
1044adfc5217SJeff Kirsher 		 */
1045adfc5217SJeff Kirsher 		dev->mtu = new_mtu;
1046adfc5217SJeff Kirsher 		return 0;
1047adfc5217SJeff Kirsher 	}
1048adfc5217SJeff Kirsher 
1049adfc5217SJeff Kirsher 	spin_lock_irq(&bp->lock);
1050adfc5217SJeff Kirsher 	b44_halt(bp);
1051adfc5217SJeff Kirsher 	dev->mtu = new_mtu;
1052adfc5217SJeff Kirsher 	b44_init_rings(bp);
1053adfc5217SJeff Kirsher 	b44_init_hw(bp, B44_FULL_RESET);
1054adfc5217SJeff Kirsher 	spin_unlock_irq(&bp->lock);
1055adfc5217SJeff Kirsher 
1056adfc5217SJeff Kirsher 	b44_enable_ints(bp);
1057adfc5217SJeff Kirsher 
1058adfc5217SJeff Kirsher 	return 0;
1059adfc5217SJeff Kirsher }
1060adfc5217SJeff Kirsher 
1061adfc5217SJeff Kirsher /* Free up pending packets in all rx/tx rings.
1062adfc5217SJeff Kirsher  *
1063adfc5217SJeff Kirsher  * The chip has been shut down and the driver detached from
1064adfc5217SJeff Kirsher  * the networking, so no interrupts or new tx packets will
1065adfc5217SJeff Kirsher  * end up in the driver.  bp->lock is not held and we are not
1066adfc5217SJeff Kirsher  * in an interrupt context and thus may sleep.
1067adfc5217SJeff Kirsher  */
b44_free_rings(struct b44 * bp)1068adfc5217SJeff Kirsher static void b44_free_rings(struct b44 *bp)
1069adfc5217SJeff Kirsher {
1070adfc5217SJeff Kirsher 	struct ring_info *rp;
1071adfc5217SJeff Kirsher 	int i;
1072adfc5217SJeff Kirsher 
1073adfc5217SJeff Kirsher 	for (i = 0; i < B44_RX_RING_SIZE; i++) {
1074adfc5217SJeff Kirsher 		rp = &bp->rx_buffers[i];
1075adfc5217SJeff Kirsher 
1076adfc5217SJeff Kirsher 		if (rp->skb == NULL)
1077adfc5217SJeff Kirsher 			continue;
1078adfc5217SJeff Kirsher 		dma_unmap_single(bp->sdev->dma_dev, rp->mapping, RX_PKT_BUF_SZ,
1079adfc5217SJeff Kirsher 				 DMA_FROM_DEVICE);
1080adfc5217SJeff Kirsher 		dev_kfree_skb_any(rp->skb);
1081adfc5217SJeff Kirsher 		rp->skb = NULL;
1082adfc5217SJeff Kirsher 	}
1083adfc5217SJeff Kirsher 
1084adfc5217SJeff Kirsher 	/* XXX needs changes once NETIF_F_SG is set... */
1085adfc5217SJeff Kirsher 	for (i = 0; i < B44_TX_RING_SIZE; i++) {
1086adfc5217SJeff Kirsher 		rp = &bp->tx_buffers[i];
1087adfc5217SJeff Kirsher 
1088adfc5217SJeff Kirsher 		if (rp->skb == NULL)
1089adfc5217SJeff Kirsher 			continue;
1090adfc5217SJeff Kirsher 		dma_unmap_single(bp->sdev->dma_dev, rp->mapping, rp->skb->len,
1091adfc5217SJeff Kirsher 				 DMA_TO_DEVICE);
1092adfc5217SJeff Kirsher 		dev_kfree_skb_any(rp->skb);
1093adfc5217SJeff Kirsher 		rp->skb = NULL;
1094adfc5217SJeff Kirsher 	}
1095adfc5217SJeff Kirsher }
1096adfc5217SJeff Kirsher 
1097adfc5217SJeff Kirsher /* Initialize tx/rx rings for packet processing.
1098adfc5217SJeff Kirsher  *
1099adfc5217SJeff Kirsher  * The chip has been shut down and the driver detached from
1100adfc5217SJeff Kirsher  * the networking, so no interrupts or new tx packets will
1101adfc5217SJeff Kirsher  * end up in the driver.
1102adfc5217SJeff Kirsher  */
b44_init_rings(struct b44 * bp)1103adfc5217SJeff Kirsher static void b44_init_rings(struct b44 *bp)
1104adfc5217SJeff Kirsher {
1105adfc5217SJeff Kirsher 	int i;
1106adfc5217SJeff Kirsher 
1107adfc5217SJeff Kirsher 	b44_free_rings(bp);
1108adfc5217SJeff Kirsher 
1109adfc5217SJeff Kirsher 	memset(bp->rx_ring, 0, B44_RX_RING_BYTES);
1110adfc5217SJeff Kirsher 	memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
1111adfc5217SJeff Kirsher 
1112adfc5217SJeff Kirsher 	if (bp->flags & B44_FLAG_RX_RING_HACK)
1113adfc5217SJeff Kirsher 		dma_sync_single_for_device(bp->sdev->dma_dev, bp->rx_ring_dma,
1114adfc5217SJeff Kirsher 					   DMA_TABLE_BYTES, DMA_BIDIRECTIONAL);
1115adfc5217SJeff Kirsher 
1116adfc5217SJeff Kirsher 	if (bp->flags & B44_FLAG_TX_RING_HACK)
1117adfc5217SJeff Kirsher 		dma_sync_single_for_device(bp->sdev->dma_dev, bp->tx_ring_dma,
1118adfc5217SJeff Kirsher 					   DMA_TABLE_BYTES, DMA_TO_DEVICE);
1119adfc5217SJeff Kirsher 
1120adfc5217SJeff Kirsher 	for (i = 0; i < bp->rx_pending; i++) {
1121adfc5217SJeff Kirsher 		if (b44_alloc_rx_skb(bp, -1, i) < 0)
1122adfc5217SJeff Kirsher 			break;
1123adfc5217SJeff Kirsher 	}
1124adfc5217SJeff Kirsher }
1125adfc5217SJeff Kirsher 
1126adfc5217SJeff Kirsher /*
1127adfc5217SJeff Kirsher  * Must not be invoked with interrupt sources disabled and
1128adfc5217SJeff Kirsher  * the hardware shutdown down.
1129adfc5217SJeff Kirsher  */
b44_free_consistent(struct b44 * bp)1130adfc5217SJeff Kirsher static void b44_free_consistent(struct b44 *bp)
1131adfc5217SJeff Kirsher {
1132adfc5217SJeff Kirsher 	kfree(bp->rx_buffers);
1133adfc5217SJeff Kirsher 	bp->rx_buffers = NULL;
1134adfc5217SJeff Kirsher 	kfree(bp->tx_buffers);
1135adfc5217SJeff Kirsher 	bp->tx_buffers = NULL;
1136adfc5217SJeff Kirsher 	if (bp->rx_ring) {
1137adfc5217SJeff Kirsher 		if (bp->flags & B44_FLAG_RX_RING_HACK) {
1138adfc5217SJeff Kirsher 			dma_unmap_single(bp->sdev->dma_dev, bp->rx_ring_dma,
1139adfc5217SJeff Kirsher 					 DMA_TABLE_BYTES, DMA_BIDIRECTIONAL);
1140adfc5217SJeff Kirsher 			kfree(bp->rx_ring);
1141adfc5217SJeff Kirsher 		} else
1142adfc5217SJeff Kirsher 			dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES,
1143adfc5217SJeff Kirsher 					  bp->rx_ring, bp->rx_ring_dma);
1144adfc5217SJeff Kirsher 		bp->rx_ring = NULL;
1145adfc5217SJeff Kirsher 		bp->flags &= ~B44_FLAG_RX_RING_HACK;
1146adfc5217SJeff Kirsher 	}
1147adfc5217SJeff Kirsher 	if (bp->tx_ring) {
1148adfc5217SJeff Kirsher 		if (bp->flags & B44_FLAG_TX_RING_HACK) {
1149adfc5217SJeff Kirsher 			dma_unmap_single(bp->sdev->dma_dev, bp->tx_ring_dma,
1150adfc5217SJeff Kirsher 					 DMA_TABLE_BYTES, DMA_TO_DEVICE);
1151adfc5217SJeff Kirsher 			kfree(bp->tx_ring);
1152adfc5217SJeff Kirsher 		} else
1153adfc5217SJeff Kirsher 			dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES,
1154adfc5217SJeff Kirsher 					  bp->tx_ring, bp->tx_ring_dma);
1155adfc5217SJeff Kirsher 		bp->tx_ring = NULL;
1156adfc5217SJeff Kirsher 		bp->flags &= ~B44_FLAG_TX_RING_HACK;
1157adfc5217SJeff Kirsher 	}
1158adfc5217SJeff Kirsher }
1159adfc5217SJeff Kirsher 
1160adfc5217SJeff Kirsher /*
1161adfc5217SJeff Kirsher  * Must not be invoked with interrupt sources disabled and
1162adfc5217SJeff Kirsher  * the hardware shutdown down.  Can sleep.
1163adfc5217SJeff Kirsher  */
b44_alloc_consistent(struct b44 * bp,gfp_t gfp)1164adfc5217SJeff Kirsher static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp)
1165adfc5217SJeff Kirsher {
1166adfc5217SJeff Kirsher 	int size;
1167adfc5217SJeff Kirsher 
1168adfc5217SJeff Kirsher 	size  = B44_RX_RING_SIZE * sizeof(struct ring_info);
1169adfc5217SJeff Kirsher 	bp->rx_buffers = kzalloc(size, gfp);
1170adfc5217SJeff Kirsher 	if (!bp->rx_buffers)
1171adfc5217SJeff Kirsher 		goto out_err;
1172adfc5217SJeff Kirsher 
1173adfc5217SJeff Kirsher 	size = B44_TX_RING_SIZE * sizeof(struct ring_info);
1174adfc5217SJeff Kirsher 	bp->tx_buffers = kzalloc(size, gfp);
1175adfc5217SJeff Kirsher 	if (!bp->tx_buffers)
1176adfc5217SJeff Kirsher 		goto out_err;
1177adfc5217SJeff Kirsher 
1178adfc5217SJeff Kirsher 	size = DMA_TABLE_BYTES;
1179adfc5217SJeff Kirsher 	bp->rx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size,
1180adfc5217SJeff Kirsher 					 &bp->rx_ring_dma, gfp);
1181adfc5217SJeff Kirsher 	if (!bp->rx_ring) {
11828b58cba4SCai Huoqing 		/* Allocation may have failed due to dma_alloc_coherent
1183adfc5217SJeff Kirsher 		   insisting on use of GFP_DMA, which is more restrictive
1184adfc5217SJeff Kirsher 		   than necessary...  */
1185adfc5217SJeff Kirsher 		struct dma_desc *rx_ring;
1186adfc5217SJeff Kirsher 		dma_addr_t rx_ring_dma;
1187adfc5217SJeff Kirsher 
1188adfc5217SJeff Kirsher 		rx_ring = kzalloc(size, gfp);
1189adfc5217SJeff Kirsher 		if (!rx_ring)
1190adfc5217SJeff Kirsher 			goto out_err;
1191adfc5217SJeff Kirsher 
1192adfc5217SJeff Kirsher 		rx_ring_dma = dma_map_single(bp->sdev->dma_dev, rx_ring,
1193adfc5217SJeff Kirsher 					     DMA_TABLE_BYTES,
1194adfc5217SJeff Kirsher 					     DMA_BIDIRECTIONAL);
1195adfc5217SJeff Kirsher 
1196adfc5217SJeff Kirsher 		if (dma_mapping_error(bp->sdev->dma_dev, rx_ring_dma) ||
1197adfc5217SJeff Kirsher 			rx_ring_dma + size > DMA_BIT_MASK(30)) {
1198adfc5217SJeff Kirsher 			kfree(rx_ring);
1199adfc5217SJeff Kirsher 			goto out_err;
1200adfc5217SJeff Kirsher 		}
1201adfc5217SJeff Kirsher 
1202adfc5217SJeff Kirsher 		bp->rx_ring = rx_ring;
1203adfc5217SJeff Kirsher 		bp->rx_ring_dma = rx_ring_dma;
1204adfc5217SJeff Kirsher 		bp->flags |= B44_FLAG_RX_RING_HACK;
1205adfc5217SJeff Kirsher 	}
1206adfc5217SJeff Kirsher 
1207adfc5217SJeff Kirsher 	bp->tx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size,
1208adfc5217SJeff Kirsher 					 &bp->tx_ring_dma, gfp);
1209adfc5217SJeff Kirsher 	if (!bp->tx_ring) {
1210adfc5217SJeff Kirsher 		/* Allocation may have failed due to ssb_dma_alloc_consistent
1211adfc5217SJeff Kirsher 		   insisting on use of GFP_DMA, which is more restrictive
1212adfc5217SJeff Kirsher 		   than necessary...  */
1213adfc5217SJeff Kirsher 		struct dma_desc *tx_ring;
1214adfc5217SJeff Kirsher 		dma_addr_t tx_ring_dma;
1215adfc5217SJeff Kirsher 
1216adfc5217SJeff Kirsher 		tx_ring = kzalloc(size, gfp);
1217adfc5217SJeff Kirsher 		if (!tx_ring)
1218adfc5217SJeff Kirsher 			goto out_err;
1219adfc5217SJeff Kirsher 
1220adfc5217SJeff Kirsher 		tx_ring_dma = dma_map_single(bp->sdev->dma_dev, tx_ring,
1221adfc5217SJeff Kirsher 					     DMA_TABLE_BYTES,
1222adfc5217SJeff Kirsher 					     DMA_TO_DEVICE);
1223adfc5217SJeff Kirsher 
1224adfc5217SJeff Kirsher 		if (dma_mapping_error(bp->sdev->dma_dev, tx_ring_dma) ||
1225adfc5217SJeff Kirsher 			tx_ring_dma + size > DMA_BIT_MASK(30)) {
1226adfc5217SJeff Kirsher 			kfree(tx_ring);
1227adfc5217SJeff Kirsher 			goto out_err;
1228adfc5217SJeff Kirsher 		}
1229adfc5217SJeff Kirsher 
1230adfc5217SJeff Kirsher 		bp->tx_ring = tx_ring;
1231adfc5217SJeff Kirsher 		bp->tx_ring_dma = tx_ring_dma;
1232adfc5217SJeff Kirsher 		bp->flags |= B44_FLAG_TX_RING_HACK;
1233adfc5217SJeff Kirsher 	}
1234adfc5217SJeff Kirsher 
1235adfc5217SJeff Kirsher 	return 0;
1236adfc5217SJeff Kirsher 
1237adfc5217SJeff Kirsher out_err:
1238adfc5217SJeff Kirsher 	b44_free_consistent(bp);
1239adfc5217SJeff Kirsher 	return -ENOMEM;
1240adfc5217SJeff Kirsher }
1241adfc5217SJeff Kirsher 
1242adfc5217SJeff Kirsher /* bp->lock is held. */
b44_clear_stats(struct b44 * bp)1243adfc5217SJeff Kirsher static void b44_clear_stats(struct b44 *bp)
1244adfc5217SJeff Kirsher {
1245adfc5217SJeff Kirsher 	unsigned long reg;
1246adfc5217SJeff Kirsher 
1247adfc5217SJeff Kirsher 	bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1248adfc5217SJeff Kirsher 	for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL)
1249adfc5217SJeff Kirsher 		br32(bp, reg);
1250adfc5217SJeff Kirsher 	for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL)
1251adfc5217SJeff Kirsher 		br32(bp, reg);
1252adfc5217SJeff Kirsher }
1253adfc5217SJeff Kirsher 
1254adfc5217SJeff Kirsher /* bp->lock is held. */
b44_chip_reset(struct b44 * bp,int reset_kind)1255adfc5217SJeff Kirsher static void b44_chip_reset(struct b44 *bp, int reset_kind)
1256adfc5217SJeff Kirsher {
1257adfc5217SJeff Kirsher 	struct ssb_device *sdev = bp->sdev;
1258adfc5217SJeff Kirsher 	bool was_enabled;
1259adfc5217SJeff Kirsher 
1260adfc5217SJeff Kirsher 	was_enabled = ssb_device_is_enabled(bp->sdev);
1261adfc5217SJeff Kirsher 
1262adfc5217SJeff Kirsher 	ssb_device_enable(bp->sdev, 0);
1263adfc5217SJeff Kirsher 	ssb_pcicore_dev_irqvecs_enable(&sdev->bus->pcicore, sdev);
1264adfc5217SJeff Kirsher 
1265adfc5217SJeff Kirsher 	if (was_enabled) {
1266adfc5217SJeff Kirsher 		bw32(bp, B44_RCV_LAZY, 0);
1267adfc5217SJeff Kirsher 		bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE);
1268adfc5217SJeff Kirsher 		b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 200, 1);
1269adfc5217SJeff Kirsher 		bw32(bp, B44_DMATX_CTRL, 0);
1270adfc5217SJeff Kirsher 		bp->tx_prod = bp->tx_cons = 0;
1271adfc5217SJeff Kirsher 		if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK) {
1272adfc5217SJeff Kirsher 			b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE,
1273adfc5217SJeff Kirsher 				     100, 0);
1274adfc5217SJeff Kirsher 		}
1275adfc5217SJeff Kirsher 		bw32(bp, B44_DMARX_CTRL, 0);
1276adfc5217SJeff Kirsher 		bp->rx_prod = bp->rx_cons = 0;
1277adfc5217SJeff Kirsher 	}
1278adfc5217SJeff Kirsher 
1279adfc5217SJeff Kirsher 	b44_clear_stats(bp);
1280adfc5217SJeff Kirsher 
1281adfc5217SJeff Kirsher 	/*
1282adfc5217SJeff Kirsher 	 * Don't enable PHY if we are doing a partial reset
1283adfc5217SJeff Kirsher 	 * we are probably going to power down
1284adfc5217SJeff Kirsher 	 */
1285adfc5217SJeff Kirsher 	if (reset_kind == B44_CHIP_RESET_PARTIAL)
1286adfc5217SJeff Kirsher 		return;
1287adfc5217SJeff Kirsher 
1288adfc5217SJeff Kirsher 	switch (sdev->bus->bustype) {
1289adfc5217SJeff Kirsher 	case SSB_BUSTYPE_SSB:
1290adfc5217SJeff Kirsher 		bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1291adfc5217SJeff Kirsher 		     (DIV_ROUND_CLOSEST(ssb_clockspeed(sdev->bus),
1292adfc5217SJeff Kirsher 					B44_MDC_RATIO)
1293adfc5217SJeff Kirsher 		     & MDIO_CTRL_MAXF_MASK)));
1294adfc5217SJeff Kirsher 		break;
1295adfc5217SJeff Kirsher 	case SSB_BUSTYPE_PCI:
1296adfc5217SJeff Kirsher 		bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1297adfc5217SJeff Kirsher 		     (0x0d & MDIO_CTRL_MAXF_MASK)));
1298adfc5217SJeff Kirsher 		break;
1299adfc5217SJeff Kirsher 	case SSB_BUSTYPE_PCMCIA:
1300adfc5217SJeff Kirsher 	case SSB_BUSTYPE_SDIO:
1301adfc5217SJeff Kirsher 		WARN_ON(1); /* A device with this bus does not exist. */
1302adfc5217SJeff Kirsher 		break;
1303adfc5217SJeff Kirsher 	}
1304adfc5217SJeff Kirsher 
1305adfc5217SJeff Kirsher 	br32(bp, B44_MDIO_CTRL);
1306adfc5217SJeff Kirsher 
1307adfc5217SJeff Kirsher 	if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
1308adfc5217SJeff Kirsher 		bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL);
1309adfc5217SJeff Kirsher 		br32(bp, B44_ENET_CTRL);
1310d6194195SHauke Mehrtens 		bp->flags |= B44_FLAG_EXTERNAL_PHY;
1311adfc5217SJeff Kirsher 	} else {
1312adfc5217SJeff Kirsher 		u32 val = br32(bp, B44_DEVCTRL);
1313adfc5217SJeff Kirsher 
1314adfc5217SJeff Kirsher 		if (val & DEVCTRL_EPR) {
1315adfc5217SJeff Kirsher 			bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR));
1316adfc5217SJeff Kirsher 			br32(bp, B44_DEVCTRL);
1317adfc5217SJeff Kirsher 			udelay(100);
1318adfc5217SJeff Kirsher 		}
1319d6194195SHauke Mehrtens 		bp->flags &= ~B44_FLAG_EXTERNAL_PHY;
1320adfc5217SJeff Kirsher 	}
1321adfc5217SJeff Kirsher }
1322adfc5217SJeff Kirsher 
1323adfc5217SJeff Kirsher /* bp->lock is held. */
b44_halt(struct b44 * bp)1324adfc5217SJeff Kirsher static void b44_halt(struct b44 *bp)
1325adfc5217SJeff Kirsher {
1326adfc5217SJeff Kirsher 	b44_disable_ints(bp);
1327adfc5217SJeff Kirsher 	/* reset PHY */
1328adfc5217SJeff Kirsher 	b44_phy_reset(bp);
1329adfc5217SJeff Kirsher 	/* power down PHY */
1330adfc5217SJeff Kirsher 	netdev_info(bp->dev, "powering down PHY\n");
1331adfc5217SJeff Kirsher 	bw32(bp, B44_MAC_CTRL, MAC_CTRL_PHY_PDOWN);
1332adfc5217SJeff Kirsher 	/* now reset the chip, but without enabling the MAC&PHY
1333adfc5217SJeff Kirsher 	 * part of it. This has to be done _after_ we shut down the PHY */
1334bea69c47SHauke Mehrtens 	if (bp->flags & B44_FLAG_EXTERNAL_PHY)
1335bea69c47SHauke Mehrtens 		b44_chip_reset(bp, B44_CHIP_RESET_FULL);
1336bea69c47SHauke Mehrtens 	else
1337adfc5217SJeff Kirsher 		b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
1338adfc5217SJeff Kirsher }
1339adfc5217SJeff Kirsher 
1340adfc5217SJeff Kirsher /* bp->lock is held. */
__b44_set_mac_addr(struct b44 * bp)1341adfc5217SJeff Kirsher static void __b44_set_mac_addr(struct b44 *bp)
1342adfc5217SJeff Kirsher {
1343adfc5217SJeff Kirsher 	bw32(bp, B44_CAM_CTRL, 0);
1344adfc5217SJeff Kirsher 	if (!(bp->dev->flags & IFF_PROMISC)) {
1345adfc5217SJeff Kirsher 		u32 val;
1346adfc5217SJeff Kirsher 
1347adfc5217SJeff Kirsher 		__b44_cam_write(bp, bp->dev->dev_addr, 0);
1348adfc5217SJeff Kirsher 		val = br32(bp, B44_CAM_CTRL);
1349adfc5217SJeff Kirsher 		bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1350adfc5217SJeff Kirsher 	}
1351adfc5217SJeff Kirsher }
1352adfc5217SJeff Kirsher 
b44_set_mac_addr(struct net_device * dev,void * p)1353adfc5217SJeff Kirsher static int b44_set_mac_addr(struct net_device *dev, void *p)
1354adfc5217SJeff Kirsher {
1355adfc5217SJeff Kirsher 	struct b44 *bp = netdev_priv(dev);
1356adfc5217SJeff Kirsher 	struct sockaddr *addr = p;
1357adfc5217SJeff Kirsher 	u32 val;
1358adfc5217SJeff Kirsher 
1359adfc5217SJeff Kirsher 	if (netif_running(dev))
1360adfc5217SJeff Kirsher 		return -EBUSY;
1361adfc5217SJeff Kirsher 
1362adfc5217SJeff Kirsher 	if (!is_valid_ether_addr(addr->sa_data))
1363adfc5217SJeff Kirsher 		return -EINVAL;
1364adfc5217SJeff Kirsher 
1365a05e4c0aSJakub Kicinski 	eth_hw_addr_set(dev, addr->sa_data);
1366adfc5217SJeff Kirsher 
1367adfc5217SJeff Kirsher 	spin_lock_irq(&bp->lock);
1368adfc5217SJeff Kirsher 
1369adfc5217SJeff Kirsher 	val = br32(bp, B44_RXCONFIG);
1370adfc5217SJeff Kirsher 	if (!(val & RXCONFIG_CAM_ABSENT))
1371adfc5217SJeff Kirsher 		__b44_set_mac_addr(bp);
1372adfc5217SJeff Kirsher 
1373adfc5217SJeff Kirsher 	spin_unlock_irq(&bp->lock);
1374adfc5217SJeff Kirsher 
1375adfc5217SJeff Kirsher 	return 0;
1376adfc5217SJeff Kirsher }
1377adfc5217SJeff Kirsher 
1378adfc5217SJeff Kirsher /* Called at device open time to get the chip ready for
1379adfc5217SJeff Kirsher  * packet processing.  Invoked with bp->lock held.
1380adfc5217SJeff Kirsher  */
1381adfc5217SJeff Kirsher static void __b44_set_rx_mode(struct net_device *);
b44_init_hw(struct b44 * bp,int reset_kind)1382adfc5217SJeff Kirsher static void b44_init_hw(struct b44 *bp, int reset_kind)
1383adfc5217SJeff Kirsher {
1384adfc5217SJeff Kirsher 	u32 val;
1385adfc5217SJeff Kirsher 
1386adfc5217SJeff Kirsher 	b44_chip_reset(bp, B44_CHIP_RESET_FULL);
1387adfc5217SJeff Kirsher 	if (reset_kind == B44_FULL_RESET) {
1388adfc5217SJeff Kirsher 		b44_phy_reset(bp);
1389adfc5217SJeff Kirsher 		b44_setup_phy(bp);
1390adfc5217SJeff Kirsher 	}
1391adfc5217SJeff Kirsher 
1392adfc5217SJeff Kirsher 	/* Enable CRC32, set proper LED modes and power on PHY */
1393adfc5217SJeff Kirsher 	bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL);
1394adfc5217SJeff Kirsher 	bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT));
1395adfc5217SJeff Kirsher 
1396adfc5217SJeff Kirsher 	/* This sets the MAC address too.  */
1397adfc5217SJeff Kirsher 	__b44_set_rx_mode(bp->dev);
1398adfc5217SJeff Kirsher 
1399adfc5217SJeff Kirsher 	/* MTU + eth header + possible VLAN tag + struct rx_header */
1400adfc5217SJeff Kirsher 	bw32(bp, B44_RXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1401adfc5217SJeff Kirsher 	bw32(bp, B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1402adfc5217SJeff Kirsher 
1403adfc5217SJeff Kirsher 	bw32(bp, B44_TX_WMARK, 56); /* XXX magic */
1404adfc5217SJeff Kirsher 	if (reset_kind == B44_PARTIAL_RESET) {
1405adfc5217SJeff Kirsher 		bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1406adfc5217SJeff Kirsher 				      (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT)));
1407adfc5217SJeff Kirsher 	} else {
1408adfc5217SJeff Kirsher 		bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE);
1409adfc5217SJeff Kirsher 		bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset);
1410adfc5217SJeff Kirsher 		bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1411adfc5217SJeff Kirsher 				      (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT)));
1412adfc5217SJeff Kirsher 		bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset);
1413adfc5217SJeff Kirsher 
1414adfc5217SJeff Kirsher 		bw32(bp, B44_DMARX_PTR, bp->rx_pending);
1415adfc5217SJeff Kirsher 		bp->rx_prod = bp->rx_pending;
1416adfc5217SJeff Kirsher 
1417adfc5217SJeff Kirsher 		bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1418adfc5217SJeff Kirsher 	}
1419adfc5217SJeff Kirsher 
1420adfc5217SJeff Kirsher 	val = br32(bp, B44_ENET_CTRL);
1421adfc5217SJeff Kirsher 	bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
14225055544eSHauke Mehrtens 
14235055544eSHauke Mehrtens 	netdev_reset_queue(bp->dev);
1424adfc5217SJeff Kirsher }
1425adfc5217SJeff Kirsher 
b44_open(struct net_device * dev)1426adfc5217SJeff Kirsher static int b44_open(struct net_device *dev)
1427adfc5217SJeff Kirsher {
1428adfc5217SJeff Kirsher 	struct b44 *bp = netdev_priv(dev);
1429adfc5217SJeff Kirsher 	int err;
1430adfc5217SJeff Kirsher 
1431adfc5217SJeff Kirsher 	err = b44_alloc_consistent(bp, GFP_KERNEL);
1432adfc5217SJeff Kirsher 	if (err)
1433adfc5217SJeff Kirsher 		goto out;
1434adfc5217SJeff Kirsher 
1435adfc5217SJeff Kirsher 	napi_enable(&bp->napi);
1436adfc5217SJeff Kirsher 
1437adfc5217SJeff Kirsher 	b44_init_rings(bp);
1438adfc5217SJeff Kirsher 	b44_init_hw(bp, B44_FULL_RESET);
1439adfc5217SJeff Kirsher 
1440adfc5217SJeff Kirsher 	b44_check_phy(bp);
1441adfc5217SJeff Kirsher 
1442adfc5217SJeff Kirsher 	err = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
1443adfc5217SJeff Kirsher 	if (unlikely(err < 0)) {
1444adfc5217SJeff Kirsher 		napi_disable(&bp->napi);
1445adfc5217SJeff Kirsher 		b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
1446adfc5217SJeff Kirsher 		b44_free_rings(bp);
1447adfc5217SJeff Kirsher 		b44_free_consistent(bp);
1448adfc5217SJeff Kirsher 		goto out;
1449adfc5217SJeff Kirsher 	}
1450adfc5217SJeff Kirsher 
1451e99e88a9SKees Cook 	timer_setup(&bp->timer, b44_timer, 0);
1452adfc5217SJeff Kirsher 	bp->timer.expires = jiffies + HZ;
1453adfc5217SJeff Kirsher 	add_timer(&bp->timer);
1454adfc5217SJeff Kirsher 
1455adfc5217SJeff Kirsher 	b44_enable_ints(bp);
145625d54fe5SHauke Mehrtens 
145725d54fe5SHauke Mehrtens 	if (bp->flags & B44_FLAG_EXTERNAL_PHY)
145851f141beSPhilippe Reynes 		phy_start(dev->phydev);
145925d54fe5SHauke Mehrtens 
1460adfc5217SJeff Kirsher 	netif_start_queue(dev);
1461adfc5217SJeff Kirsher out:
1462adfc5217SJeff Kirsher 	return err;
1463adfc5217SJeff Kirsher }
1464adfc5217SJeff Kirsher 
1465adfc5217SJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER
1466adfc5217SJeff Kirsher /*
1467adfc5217SJeff Kirsher  * Polling receive - used by netconsole and other diagnostic tools
1468adfc5217SJeff Kirsher  * to allow network i/o with interrupts disabled.
1469adfc5217SJeff Kirsher  */
b44_poll_controller(struct net_device * dev)1470adfc5217SJeff Kirsher static void b44_poll_controller(struct net_device *dev)
1471adfc5217SJeff Kirsher {
1472adfc5217SJeff Kirsher 	disable_irq(dev->irq);
1473adfc5217SJeff Kirsher 	b44_interrupt(dev->irq, dev);
1474adfc5217SJeff Kirsher 	enable_irq(dev->irq);
1475adfc5217SJeff Kirsher }
1476adfc5217SJeff Kirsher #endif
1477adfc5217SJeff Kirsher 
bwfilter_table(struct b44 * bp,u8 * pp,u32 bytes,u32 table_offset)1478adfc5217SJeff Kirsher static void bwfilter_table(struct b44 *bp, u8 *pp, u32 bytes, u32 table_offset)
1479adfc5217SJeff Kirsher {
1480adfc5217SJeff Kirsher 	u32 i;
1481adfc5217SJeff Kirsher 	u32 *pattern = (u32 *) pp;
1482adfc5217SJeff Kirsher 
1483adfc5217SJeff Kirsher 	for (i = 0; i < bytes; i += sizeof(u32)) {
1484adfc5217SJeff Kirsher 		bw32(bp, B44_FILT_ADDR, table_offset + i);
1485adfc5217SJeff Kirsher 		bw32(bp, B44_FILT_DATA, pattern[i / sizeof(u32)]);
1486adfc5217SJeff Kirsher 	}
1487adfc5217SJeff Kirsher }
1488adfc5217SJeff Kirsher 
b44_magic_pattern(const u8 * macaddr,u8 * ppattern,u8 * pmask,int offset)148976660757SJakub Kicinski static int b44_magic_pattern(const u8 *macaddr, u8 *ppattern, u8 *pmask,
149076660757SJakub Kicinski 			     int offset)
1491adfc5217SJeff Kirsher {
1492adfc5217SJeff Kirsher 	int magicsync = 6;
1493adfc5217SJeff Kirsher 	int k, j, len = offset;
1494adfc5217SJeff Kirsher 	int ethaddr_bytes = ETH_ALEN;
1495adfc5217SJeff Kirsher 
1496adfc5217SJeff Kirsher 	memset(ppattern + offset, 0xff, magicsync);
1497f11421baSFenghua Yu 	for (j = 0; j < magicsync; j++) {
1498f11421baSFenghua Yu 		pmask[len >> 3] |= BIT(len & 7);
1499f11421baSFenghua Yu 		len++;
1500f11421baSFenghua Yu 	}
1501adfc5217SJeff Kirsher 
1502adfc5217SJeff Kirsher 	for (j = 0; j < B44_MAX_PATTERNS; j++) {
1503adfc5217SJeff Kirsher 		if ((B44_PATTERN_SIZE - len) >= ETH_ALEN)
1504adfc5217SJeff Kirsher 			ethaddr_bytes = ETH_ALEN;
1505adfc5217SJeff Kirsher 		else
1506adfc5217SJeff Kirsher 			ethaddr_bytes = B44_PATTERN_SIZE - len;
1507adfc5217SJeff Kirsher 		if (ethaddr_bytes <=0)
1508adfc5217SJeff Kirsher 			break;
1509adfc5217SJeff Kirsher 		for (k = 0; k< ethaddr_bytes; k++) {
1510adfc5217SJeff Kirsher 			ppattern[offset + magicsync +
1511adfc5217SJeff Kirsher 				(j * ETH_ALEN) + k] = macaddr[k];
1512f11421baSFenghua Yu 			pmask[len >> 3] |= BIT(len & 7);
1513f11421baSFenghua Yu 			len++;
1514adfc5217SJeff Kirsher 		}
1515adfc5217SJeff Kirsher 	}
1516adfc5217SJeff Kirsher 	return len - 1;
1517adfc5217SJeff Kirsher }
1518adfc5217SJeff Kirsher 
1519adfc5217SJeff Kirsher /* Setup magic packet patterns in the b44 WOL
1520adfc5217SJeff Kirsher  * pattern matching filter.
1521adfc5217SJeff Kirsher  */
b44_setup_pseudo_magicp(struct b44 * bp)1522adfc5217SJeff Kirsher static void b44_setup_pseudo_magicp(struct b44 *bp)
1523adfc5217SJeff Kirsher {
1524adfc5217SJeff Kirsher 
1525adfc5217SJeff Kirsher 	u32 val;
1526adfc5217SJeff Kirsher 	int plen0, plen1, plen2;
1527adfc5217SJeff Kirsher 	u8 *pwol_pattern;
1528adfc5217SJeff Kirsher 	u8 pwol_mask[B44_PMASK_SIZE];
1529adfc5217SJeff Kirsher 
1530adfc5217SJeff Kirsher 	pwol_pattern = kzalloc(B44_PATTERN_SIZE, GFP_KERNEL);
1531b2adaca9SJoe Perches 	if (!pwol_pattern)
1532adfc5217SJeff Kirsher 		return;
1533adfc5217SJeff Kirsher 
1534adfc5217SJeff Kirsher 	/* Ipv4 magic packet pattern - pattern 0.*/
1535adfc5217SJeff Kirsher 	memset(pwol_mask, 0, B44_PMASK_SIZE);
1536adfc5217SJeff Kirsher 	plen0 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1537adfc5217SJeff Kirsher 				  B44_ETHIPV4UDP_HLEN);
1538adfc5217SJeff Kirsher 
1539adfc5217SJeff Kirsher 	bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE, B44_PATTERN_BASE);
1540adfc5217SJeff Kirsher 	bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE, B44_PMASK_BASE);
1541adfc5217SJeff Kirsher 
1542adfc5217SJeff Kirsher 	/* Raw ethernet II magic packet pattern - pattern 1 */
1543adfc5217SJeff Kirsher 	memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1544adfc5217SJeff Kirsher 	memset(pwol_mask, 0, B44_PMASK_SIZE);
1545adfc5217SJeff Kirsher 	plen1 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1546adfc5217SJeff Kirsher 				  ETH_HLEN);
1547adfc5217SJeff Kirsher 
1548adfc5217SJeff Kirsher 	bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1549adfc5217SJeff Kirsher 		       B44_PATTERN_BASE + B44_PATTERN_SIZE);
1550adfc5217SJeff Kirsher 	bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1551adfc5217SJeff Kirsher 		       B44_PMASK_BASE + B44_PMASK_SIZE);
1552adfc5217SJeff Kirsher 
1553adfc5217SJeff Kirsher 	/* Ipv6 magic packet pattern - pattern 2 */
1554adfc5217SJeff Kirsher 	memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1555adfc5217SJeff Kirsher 	memset(pwol_mask, 0, B44_PMASK_SIZE);
1556adfc5217SJeff Kirsher 	plen2 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1557adfc5217SJeff Kirsher 				  B44_ETHIPV6UDP_HLEN);
1558adfc5217SJeff Kirsher 
1559adfc5217SJeff Kirsher 	bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1560adfc5217SJeff Kirsher 		       B44_PATTERN_BASE + B44_PATTERN_SIZE + B44_PATTERN_SIZE);
1561adfc5217SJeff Kirsher 	bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1562adfc5217SJeff Kirsher 		       B44_PMASK_BASE + B44_PMASK_SIZE + B44_PMASK_SIZE);
1563adfc5217SJeff Kirsher 
1564adfc5217SJeff Kirsher 	kfree(pwol_pattern);
1565adfc5217SJeff Kirsher 
1566adfc5217SJeff Kirsher 	/* set these pattern's lengths: one less than each real length */
1567adfc5217SJeff Kirsher 	val = plen0 | (plen1 << 8) | (plen2 << 16) | WKUP_LEN_ENABLE_THREE;
1568adfc5217SJeff Kirsher 	bw32(bp, B44_WKUP_LEN, val);
1569adfc5217SJeff Kirsher 
1570adfc5217SJeff Kirsher 	/* enable wakeup pattern matching */
1571adfc5217SJeff Kirsher 	val = br32(bp, B44_DEVCTRL);
1572adfc5217SJeff Kirsher 	bw32(bp, B44_DEVCTRL, val | DEVCTRL_PFE);
1573adfc5217SJeff Kirsher 
1574adfc5217SJeff Kirsher }
1575adfc5217SJeff Kirsher 
1576adfc5217SJeff Kirsher #ifdef CONFIG_B44_PCI
b44_setup_wol_pci(struct b44 * bp)1577adfc5217SJeff Kirsher static void b44_setup_wol_pci(struct b44 *bp)
1578adfc5217SJeff Kirsher {
1579adfc5217SJeff Kirsher 	u16 val;
1580adfc5217SJeff Kirsher 
1581adfc5217SJeff Kirsher 	if (bp->sdev->bus->bustype != SSB_BUSTYPE_SSB) {
1582adfc5217SJeff Kirsher 		bw32(bp, SSB_TMSLOW, br32(bp, SSB_TMSLOW) | SSB_TMSLOW_PE);
1583adfc5217SJeff Kirsher 		pci_read_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, &val);
1584adfc5217SJeff Kirsher 		pci_write_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, val | SSB_PE);
1585adfc5217SJeff Kirsher 	}
1586adfc5217SJeff Kirsher }
1587adfc5217SJeff Kirsher #else
b44_setup_wol_pci(struct b44 * bp)1588adfc5217SJeff Kirsher static inline void b44_setup_wol_pci(struct b44 *bp) { }
1589adfc5217SJeff Kirsher #endif /* CONFIG_B44_PCI */
1590adfc5217SJeff Kirsher 
b44_setup_wol(struct b44 * bp)1591adfc5217SJeff Kirsher static void b44_setup_wol(struct b44 *bp)
1592adfc5217SJeff Kirsher {
1593adfc5217SJeff Kirsher 	u32 val;
1594adfc5217SJeff Kirsher 
1595adfc5217SJeff Kirsher 	bw32(bp, B44_RXCONFIG, RXCONFIG_ALLMULTI);
1596adfc5217SJeff Kirsher 
1597adfc5217SJeff Kirsher 	if (bp->flags & B44_FLAG_B0_ANDLATER) {
1598adfc5217SJeff Kirsher 
1599adfc5217SJeff Kirsher 		bw32(bp, B44_WKUP_LEN, WKUP_LEN_DISABLE);
1600adfc5217SJeff Kirsher 
1601adfc5217SJeff Kirsher 		val = bp->dev->dev_addr[2] << 24 |
1602adfc5217SJeff Kirsher 			bp->dev->dev_addr[3] << 16 |
1603adfc5217SJeff Kirsher 			bp->dev->dev_addr[4] << 8 |
1604adfc5217SJeff Kirsher 			bp->dev->dev_addr[5];
1605adfc5217SJeff Kirsher 		bw32(bp, B44_ADDR_LO, val);
1606adfc5217SJeff Kirsher 
1607adfc5217SJeff Kirsher 		val = bp->dev->dev_addr[0] << 8 |
1608adfc5217SJeff Kirsher 			bp->dev->dev_addr[1];
1609adfc5217SJeff Kirsher 		bw32(bp, B44_ADDR_HI, val);
1610adfc5217SJeff Kirsher 
1611adfc5217SJeff Kirsher 		val = br32(bp, B44_DEVCTRL);
1612adfc5217SJeff Kirsher 		bw32(bp, B44_DEVCTRL, val | DEVCTRL_MPM | DEVCTRL_PFE);
1613adfc5217SJeff Kirsher 
1614adfc5217SJeff Kirsher 	} else {
1615adfc5217SJeff Kirsher 		b44_setup_pseudo_magicp(bp);
1616adfc5217SJeff Kirsher 	}
1617adfc5217SJeff Kirsher 	b44_setup_wol_pci(bp);
1618adfc5217SJeff Kirsher }
1619adfc5217SJeff Kirsher 
b44_close(struct net_device * dev)1620adfc5217SJeff Kirsher static int b44_close(struct net_device *dev)
1621adfc5217SJeff Kirsher {
1622adfc5217SJeff Kirsher 	struct b44 *bp = netdev_priv(dev);
1623adfc5217SJeff Kirsher 
1624adfc5217SJeff Kirsher 	netif_stop_queue(dev);
1625adfc5217SJeff Kirsher 
162625d54fe5SHauke Mehrtens 	if (bp->flags & B44_FLAG_EXTERNAL_PHY)
162751f141beSPhilippe Reynes 		phy_stop(dev->phydev);
162825d54fe5SHauke Mehrtens 
1629adfc5217SJeff Kirsher 	napi_disable(&bp->napi);
1630adfc5217SJeff Kirsher 
1631adfc5217SJeff Kirsher 	del_timer_sync(&bp->timer);
1632adfc5217SJeff Kirsher 
1633adfc5217SJeff Kirsher 	spin_lock_irq(&bp->lock);
1634adfc5217SJeff Kirsher 
1635adfc5217SJeff Kirsher 	b44_halt(bp);
1636adfc5217SJeff Kirsher 	b44_free_rings(bp);
1637adfc5217SJeff Kirsher 	netif_carrier_off(dev);
1638adfc5217SJeff Kirsher 
1639adfc5217SJeff Kirsher 	spin_unlock_irq(&bp->lock);
1640adfc5217SJeff Kirsher 
1641adfc5217SJeff Kirsher 	free_irq(dev->irq, dev);
1642adfc5217SJeff Kirsher 
1643adfc5217SJeff Kirsher 	if (bp->flags & B44_FLAG_WOL_ENABLE) {
1644adfc5217SJeff Kirsher 		b44_init_hw(bp, B44_PARTIAL_RESET);
1645adfc5217SJeff Kirsher 		b44_setup_wol(bp);
1646adfc5217SJeff Kirsher 	}
1647adfc5217SJeff Kirsher 
1648adfc5217SJeff Kirsher 	b44_free_consistent(bp);
1649adfc5217SJeff Kirsher 
1650adfc5217SJeff Kirsher 	return 0;
1651adfc5217SJeff Kirsher }
1652adfc5217SJeff Kirsher 
b44_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * nstat)1653bc1f4470Sstephen hemminger static void b44_get_stats64(struct net_device *dev,
1654eeda8585SKevin Groeneveld 			    struct rtnl_link_stats64 *nstat)
1655adfc5217SJeff Kirsher {
1656adfc5217SJeff Kirsher 	struct b44 *bp = netdev_priv(dev);
1657adfc5217SJeff Kirsher 	struct b44_hw_stats *hwstat = &bp->hw_stats;
1658eeda8585SKevin Groeneveld 	unsigned int start;
1659adfc5217SJeff Kirsher 
1660eeda8585SKevin Groeneveld 	do {
1661068c38adSThomas Gleixner 		start = u64_stats_fetch_begin(&hwstat->syncp);
1662eeda8585SKevin Groeneveld 
1663eeda8585SKevin Groeneveld 		/* Convert HW stats into rtnl_link_stats64 stats. */
1664adfc5217SJeff Kirsher 		nstat->rx_packets = hwstat->rx_pkts;
1665adfc5217SJeff Kirsher 		nstat->tx_packets = hwstat->tx_pkts;
1666adfc5217SJeff Kirsher 		nstat->rx_bytes   = hwstat->rx_octets;
1667adfc5217SJeff Kirsher 		nstat->tx_bytes   = hwstat->tx_octets;
1668adfc5217SJeff Kirsher 		nstat->tx_errors  = (hwstat->tx_jabber_pkts +
1669adfc5217SJeff Kirsher 				     hwstat->tx_oversize_pkts +
1670adfc5217SJeff Kirsher 				     hwstat->tx_underruns +
1671adfc5217SJeff Kirsher 				     hwstat->tx_excessive_cols +
1672adfc5217SJeff Kirsher 				     hwstat->tx_late_cols);
16730bc9b73bSMark Einon 		nstat->multicast  = hwstat->rx_multicast_pkts;
1674adfc5217SJeff Kirsher 		nstat->collisions = hwstat->tx_total_cols;
1675adfc5217SJeff Kirsher 
1676adfc5217SJeff Kirsher 		nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
1677adfc5217SJeff Kirsher 					   hwstat->rx_undersize);
1678adfc5217SJeff Kirsher 		nstat->rx_over_errors   = hwstat->rx_missed_pkts;
1679adfc5217SJeff Kirsher 		nstat->rx_frame_errors  = hwstat->rx_align_errs;
1680adfc5217SJeff Kirsher 		nstat->rx_crc_errors    = hwstat->rx_crc_errs;
1681adfc5217SJeff Kirsher 		nstat->rx_errors        = (hwstat->rx_jabber_pkts +
1682adfc5217SJeff Kirsher 					   hwstat->rx_oversize_pkts +
1683adfc5217SJeff Kirsher 					   hwstat->rx_missed_pkts +
1684adfc5217SJeff Kirsher 					   hwstat->rx_crc_align_errs +
1685adfc5217SJeff Kirsher 					   hwstat->rx_undersize +
1686adfc5217SJeff Kirsher 					   hwstat->rx_crc_errs +
1687adfc5217SJeff Kirsher 					   hwstat->rx_align_errs +
1688adfc5217SJeff Kirsher 					   hwstat->rx_symbol_errs);
1689adfc5217SJeff Kirsher 
1690adfc5217SJeff Kirsher 		nstat->tx_aborted_errors = hwstat->tx_underruns;
1691adfc5217SJeff Kirsher #if 0
1692adfc5217SJeff Kirsher 		/* Carrier lost counter seems to be broken for some devices */
1693adfc5217SJeff Kirsher 		nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
1694adfc5217SJeff Kirsher #endif
1695068c38adSThomas Gleixner 	} while (u64_stats_fetch_retry(&hwstat->syncp, start));
1696adfc5217SJeff Kirsher 
1697adfc5217SJeff Kirsher }
1698adfc5217SJeff Kirsher 
__b44_load_mcast(struct b44 * bp,struct net_device * dev)1699adfc5217SJeff Kirsher static int __b44_load_mcast(struct b44 *bp, struct net_device *dev)
1700adfc5217SJeff Kirsher {
1701adfc5217SJeff Kirsher 	struct netdev_hw_addr *ha;
1702adfc5217SJeff Kirsher 	int i, num_ents;
1703adfc5217SJeff Kirsher 
1704adfc5217SJeff Kirsher 	num_ents = min_t(int, netdev_mc_count(dev), B44_MCAST_TABLE_SIZE);
1705adfc5217SJeff Kirsher 	i = 0;
1706adfc5217SJeff Kirsher 	netdev_for_each_mc_addr(ha, dev) {
1707adfc5217SJeff Kirsher 		if (i == num_ents)
1708adfc5217SJeff Kirsher 			break;
1709adfc5217SJeff Kirsher 		__b44_cam_write(bp, ha->addr, i++ + 1);
1710adfc5217SJeff Kirsher 	}
1711adfc5217SJeff Kirsher 	return i+1;
1712adfc5217SJeff Kirsher }
1713adfc5217SJeff Kirsher 
__b44_set_rx_mode(struct net_device * dev)1714adfc5217SJeff Kirsher static void __b44_set_rx_mode(struct net_device *dev)
1715adfc5217SJeff Kirsher {
1716adfc5217SJeff Kirsher 	struct b44 *bp = netdev_priv(dev);
1717adfc5217SJeff Kirsher 	u32 val;
1718adfc5217SJeff Kirsher 
1719adfc5217SJeff Kirsher 	val = br32(bp, B44_RXCONFIG);
1720adfc5217SJeff Kirsher 	val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI);
1721adfc5217SJeff Kirsher 	if ((dev->flags & IFF_PROMISC) || (val & RXCONFIG_CAM_ABSENT)) {
1722adfc5217SJeff Kirsher 		val |= RXCONFIG_PROMISC;
1723adfc5217SJeff Kirsher 		bw32(bp, B44_RXCONFIG, val);
1724adfc5217SJeff Kirsher 	} else {
1725adfc5217SJeff Kirsher 		unsigned char zero[6] = {0, 0, 0, 0, 0, 0};
1726adfc5217SJeff Kirsher 		int i = 1;
1727adfc5217SJeff Kirsher 
1728adfc5217SJeff Kirsher 		__b44_set_mac_addr(bp);
1729adfc5217SJeff Kirsher 
1730adfc5217SJeff Kirsher 		if ((dev->flags & IFF_ALLMULTI) ||
1731adfc5217SJeff Kirsher 		    (netdev_mc_count(dev) > B44_MCAST_TABLE_SIZE))
1732adfc5217SJeff Kirsher 			val |= RXCONFIG_ALLMULTI;
1733adfc5217SJeff Kirsher 		else
1734adfc5217SJeff Kirsher 			i = __b44_load_mcast(bp, dev);
1735adfc5217SJeff Kirsher 
1736adfc5217SJeff Kirsher 		for (; i < 64; i++)
1737adfc5217SJeff Kirsher 			__b44_cam_write(bp, zero, i);
1738adfc5217SJeff Kirsher 
1739adfc5217SJeff Kirsher 		bw32(bp, B44_RXCONFIG, val);
1740adfc5217SJeff Kirsher 		val = br32(bp, B44_CAM_CTRL);
1741adfc5217SJeff Kirsher 	        bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1742adfc5217SJeff Kirsher 	}
1743adfc5217SJeff Kirsher }
1744adfc5217SJeff Kirsher 
b44_set_rx_mode(struct net_device * dev)1745adfc5217SJeff Kirsher static void b44_set_rx_mode(struct net_device *dev)
1746adfc5217SJeff Kirsher {
1747adfc5217SJeff Kirsher 	struct b44 *bp = netdev_priv(dev);
1748adfc5217SJeff Kirsher 
1749adfc5217SJeff Kirsher 	spin_lock_irq(&bp->lock);
1750adfc5217SJeff Kirsher 	__b44_set_rx_mode(dev);
1751adfc5217SJeff Kirsher 	spin_unlock_irq(&bp->lock);
1752adfc5217SJeff Kirsher }
1753adfc5217SJeff Kirsher 
b44_get_msglevel(struct net_device * dev)1754adfc5217SJeff Kirsher static u32 b44_get_msglevel(struct net_device *dev)
1755adfc5217SJeff Kirsher {
1756adfc5217SJeff Kirsher 	struct b44 *bp = netdev_priv(dev);
1757adfc5217SJeff Kirsher 	return bp->msg_enable;
1758adfc5217SJeff Kirsher }
1759adfc5217SJeff Kirsher 
b44_set_msglevel(struct net_device * dev,u32 value)1760adfc5217SJeff Kirsher static void b44_set_msglevel(struct net_device *dev, u32 value)
1761adfc5217SJeff Kirsher {
1762adfc5217SJeff Kirsher 	struct b44 *bp = netdev_priv(dev);
1763adfc5217SJeff Kirsher 	bp->msg_enable = value;
1764adfc5217SJeff Kirsher }
1765adfc5217SJeff Kirsher 
b44_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)1766adfc5217SJeff Kirsher static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1767adfc5217SJeff Kirsher {
1768adfc5217SJeff Kirsher 	struct b44 *bp = netdev_priv(dev);
1769adfc5217SJeff Kirsher 	struct ssb_bus *bus = bp->sdev->bus;
1770adfc5217SJeff Kirsher 
1771f029c781SWolfram Sang 	strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
1772adfc5217SJeff Kirsher 	switch (bus->bustype) {
1773adfc5217SJeff Kirsher 	case SSB_BUSTYPE_PCI:
1774f029c781SWolfram Sang 		strscpy(info->bus_info, pci_name(bus->host_pci), sizeof(info->bus_info));
1775adfc5217SJeff Kirsher 		break;
1776adfc5217SJeff Kirsher 	case SSB_BUSTYPE_SSB:
1777f029c781SWolfram Sang 		strscpy(info->bus_info, "SSB", sizeof(info->bus_info));
1778adfc5217SJeff Kirsher 		break;
1779adfc5217SJeff Kirsher 	case SSB_BUSTYPE_PCMCIA:
1780adfc5217SJeff Kirsher 	case SSB_BUSTYPE_SDIO:
1781adfc5217SJeff Kirsher 		WARN_ON(1); /* A device with this bus does not exist. */
1782adfc5217SJeff Kirsher 		break;
1783adfc5217SJeff Kirsher 	}
1784adfc5217SJeff Kirsher }
1785adfc5217SJeff Kirsher 
b44_nway_reset(struct net_device * dev)1786adfc5217SJeff Kirsher static int b44_nway_reset(struct net_device *dev)
1787adfc5217SJeff Kirsher {
1788adfc5217SJeff Kirsher 	struct b44 *bp = netdev_priv(dev);
1789adfc5217SJeff Kirsher 	u32 bmcr;
1790adfc5217SJeff Kirsher 	int r;
1791adfc5217SJeff Kirsher 
1792adfc5217SJeff Kirsher 	spin_lock_irq(&bp->lock);
1793adfc5217SJeff Kirsher 	b44_readphy(bp, MII_BMCR, &bmcr);
1794adfc5217SJeff Kirsher 	b44_readphy(bp, MII_BMCR, &bmcr);
1795adfc5217SJeff Kirsher 	r = -EINVAL;
17969944d203SArtem Chernyshev 	if (bmcr & BMCR_ANENABLE)
17979944d203SArtem Chernyshev 		r = b44_writephy(bp, MII_BMCR,
1798adfc5217SJeff Kirsher 				 bmcr | BMCR_ANRESTART);
1799adfc5217SJeff Kirsher 	spin_unlock_irq(&bp->lock);
1800adfc5217SJeff Kirsher 
1801adfc5217SJeff Kirsher 	return r;
1802adfc5217SJeff Kirsher }
1803adfc5217SJeff Kirsher 
b44_get_link_ksettings(struct net_device * dev,struct ethtool_link_ksettings * cmd)18042406e5d4SPhilippe Reynes static int b44_get_link_ksettings(struct net_device *dev,
18052406e5d4SPhilippe Reynes 				  struct ethtool_link_ksettings *cmd)
1806adfc5217SJeff Kirsher {
1807adfc5217SJeff Kirsher 	struct b44 *bp = netdev_priv(dev);
18082406e5d4SPhilippe Reynes 	u32 supported, advertising;
1809adfc5217SJeff Kirsher 
181086f4ea63SHauke Mehrtens 	if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
181151f141beSPhilippe Reynes 		BUG_ON(!dev->phydev);
18125514174fSyuval.shaia@oracle.com 		phy_ethtool_ksettings_get(dev->phydev, cmd);
18135514174fSyuval.shaia@oracle.com 
18145514174fSyuval.shaia@oracle.com 		return 0;
181586f4ea63SHauke Mehrtens 	}
181686f4ea63SHauke Mehrtens 
18172406e5d4SPhilippe Reynes 	supported = (SUPPORTED_Autoneg);
18182406e5d4SPhilippe Reynes 	supported |= (SUPPORTED_100baseT_Half |
1819adfc5217SJeff Kirsher 		      SUPPORTED_100baseT_Full |
1820adfc5217SJeff Kirsher 		      SUPPORTED_10baseT_Half |
1821adfc5217SJeff Kirsher 		      SUPPORTED_10baseT_Full |
1822adfc5217SJeff Kirsher 		      SUPPORTED_MII);
1823adfc5217SJeff Kirsher 
18242406e5d4SPhilippe Reynes 	advertising = 0;
1825adfc5217SJeff Kirsher 	if (bp->flags & B44_FLAG_ADV_10HALF)
18262406e5d4SPhilippe Reynes 		advertising |= ADVERTISED_10baseT_Half;
1827adfc5217SJeff Kirsher 	if (bp->flags & B44_FLAG_ADV_10FULL)
18282406e5d4SPhilippe Reynes 		advertising |= ADVERTISED_10baseT_Full;
1829adfc5217SJeff Kirsher 	if (bp->flags & B44_FLAG_ADV_100HALF)
18302406e5d4SPhilippe Reynes 		advertising |= ADVERTISED_100baseT_Half;
1831adfc5217SJeff Kirsher 	if (bp->flags & B44_FLAG_ADV_100FULL)
18322406e5d4SPhilippe Reynes 		advertising |= ADVERTISED_100baseT_Full;
18332406e5d4SPhilippe Reynes 	advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
18342406e5d4SPhilippe Reynes 	cmd->base.speed = (bp->flags & B44_FLAG_100_BASE_T) ?
18352406e5d4SPhilippe Reynes 		SPEED_100 : SPEED_10;
18362406e5d4SPhilippe Reynes 	cmd->base.duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
1837adfc5217SJeff Kirsher 		DUPLEX_FULL : DUPLEX_HALF;
18382406e5d4SPhilippe Reynes 	cmd->base.port = 0;
18392406e5d4SPhilippe Reynes 	cmd->base.phy_address = bp->phy_addr;
18402406e5d4SPhilippe Reynes 	cmd->base.autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
1841adfc5217SJeff Kirsher 		AUTONEG_DISABLE : AUTONEG_ENABLE;
18422406e5d4SPhilippe Reynes 	if (cmd->base.autoneg == AUTONEG_ENABLE)
18432406e5d4SPhilippe Reynes 		advertising |= ADVERTISED_Autoneg;
18442406e5d4SPhilippe Reynes 
18452406e5d4SPhilippe Reynes 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
18462406e5d4SPhilippe Reynes 						supported);
18472406e5d4SPhilippe Reynes 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
18482406e5d4SPhilippe Reynes 						advertising);
18492406e5d4SPhilippe Reynes 
1850adfc5217SJeff Kirsher 	if (!netif_running(dev)){
18512406e5d4SPhilippe Reynes 		cmd->base.speed = 0;
18522406e5d4SPhilippe Reynes 		cmd->base.duplex = 0xff;
1853adfc5217SJeff Kirsher 	}
18542406e5d4SPhilippe Reynes 
1855adfc5217SJeff Kirsher 	return 0;
1856adfc5217SJeff Kirsher }
1857adfc5217SJeff Kirsher 
b44_set_link_ksettings(struct net_device * dev,const struct ethtool_link_ksettings * cmd)18582406e5d4SPhilippe Reynes static int b44_set_link_ksettings(struct net_device *dev,
18592406e5d4SPhilippe Reynes 				  const struct ethtool_link_ksettings *cmd)
1860adfc5217SJeff Kirsher {
1861adfc5217SJeff Kirsher 	struct b44 *bp = netdev_priv(dev);
186286f4ea63SHauke Mehrtens 	u32 speed;
186386f4ea63SHauke Mehrtens 	int ret;
18642406e5d4SPhilippe Reynes 	u32 advertising;
186586f4ea63SHauke Mehrtens 
186686f4ea63SHauke Mehrtens 	if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
186751f141beSPhilippe Reynes 		BUG_ON(!dev->phydev);
186886f4ea63SHauke Mehrtens 		spin_lock_irq(&bp->lock);
186986f4ea63SHauke Mehrtens 		if (netif_running(dev))
187086f4ea63SHauke Mehrtens 			b44_setup_phy(bp);
187186f4ea63SHauke Mehrtens 
18722406e5d4SPhilippe Reynes 		ret = phy_ethtool_ksettings_set(dev->phydev, cmd);
187386f4ea63SHauke Mehrtens 
187486f4ea63SHauke Mehrtens 		spin_unlock_irq(&bp->lock);
187586f4ea63SHauke Mehrtens 
187686f4ea63SHauke Mehrtens 		return ret;
187786f4ea63SHauke Mehrtens 	}
187886f4ea63SHauke Mehrtens 
18792406e5d4SPhilippe Reynes 	speed = cmd->base.speed;
18802406e5d4SPhilippe Reynes 
18812406e5d4SPhilippe Reynes 	ethtool_convert_link_mode_to_legacy_u32(&advertising,
18822406e5d4SPhilippe Reynes 						cmd->link_modes.advertising);
1883adfc5217SJeff Kirsher 
1884adfc5217SJeff Kirsher 	/* We do not support gigabit. */
18852406e5d4SPhilippe Reynes 	if (cmd->base.autoneg == AUTONEG_ENABLE) {
18862406e5d4SPhilippe Reynes 		if (advertising &
1887adfc5217SJeff Kirsher 		    (ADVERTISED_1000baseT_Half |
1888adfc5217SJeff Kirsher 		     ADVERTISED_1000baseT_Full))
1889adfc5217SJeff Kirsher 			return -EINVAL;
1890adfc5217SJeff Kirsher 	} else if ((speed != SPEED_100 &&
1891adfc5217SJeff Kirsher 		    speed != SPEED_10) ||
18922406e5d4SPhilippe Reynes 		   (cmd->base.duplex != DUPLEX_HALF &&
18932406e5d4SPhilippe Reynes 		    cmd->base.duplex != DUPLEX_FULL)) {
1894adfc5217SJeff Kirsher 			return -EINVAL;
1895adfc5217SJeff Kirsher 	}
1896adfc5217SJeff Kirsher 
1897adfc5217SJeff Kirsher 	spin_lock_irq(&bp->lock);
1898adfc5217SJeff Kirsher 
18992406e5d4SPhilippe Reynes 	if (cmd->base.autoneg == AUTONEG_ENABLE) {
1900adfc5217SJeff Kirsher 		bp->flags &= ~(B44_FLAG_FORCE_LINK |
1901adfc5217SJeff Kirsher 			       B44_FLAG_100_BASE_T |
1902adfc5217SJeff Kirsher 			       B44_FLAG_FULL_DUPLEX |
1903adfc5217SJeff Kirsher 			       B44_FLAG_ADV_10HALF |
1904adfc5217SJeff Kirsher 			       B44_FLAG_ADV_10FULL |
1905adfc5217SJeff Kirsher 			       B44_FLAG_ADV_100HALF |
1906adfc5217SJeff Kirsher 			       B44_FLAG_ADV_100FULL);
19072406e5d4SPhilippe Reynes 		if (advertising == 0) {
1908adfc5217SJeff Kirsher 			bp->flags |= (B44_FLAG_ADV_10HALF |
1909adfc5217SJeff Kirsher 				      B44_FLAG_ADV_10FULL |
1910adfc5217SJeff Kirsher 				      B44_FLAG_ADV_100HALF |
1911adfc5217SJeff Kirsher 				      B44_FLAG_ADV_100FULL);
1912adfc5217SJeff Kirsher 		} else {
19132406e5d4SPhilippe Reynes 			if (advertising & ADVERTISED_10baseT_Half)
1914adfc5217SJeff Kirsher 				bp->flags |= B44_FLAG_ADV_10HALF;
19152406e5d4SPhilippe Reynes 			if (advertising & ADVERTISED_10baseT_Full)
1916adfc5217SJeff Kirsher 				bp->flags |= B44_FLAG_ADV_10FULL;
19172406e5d4SPhilippe Reynes 			if (advertising & ADVERTISED_100baseT_Half)
1918adfc5217SJeff Kirsher 				bp->flags |= B44_FLAG_ADV_100HALF;
19192406e5d4SPhilippe Reynes 			if (advertising & ADVERTISED_100baseT_Full)
1920adfc5217SJeff Kirsher 				bp->flags |= B44_FLAG_ADV_100FULL;
1921adfc5217SJeff Kirsher 		}
1922adfc5217SJeff Kirsher 	} else {
1923adfc5217SJeff Kirsher 		bp->flags |= B44_FLAG_FORCE_LINK;
1924adfc5217SJeff Kirsher 		bp->flags &= ~(B44_FLAG_100_BASE_T | B44_FLAG_FULL_DUPLEX);
1925adfc5217SJeff Kirsher 		if (speed == SPEED_100)
1926adfc5217SJeff Kirsher 			bp->flags |= B44_FLAG_100_BASE_T;
19272406e5d4SPhilippe Reynes 		if (cmd->base.duplex == DUPLEX_FULL)
1928adfc5217SJeff Kirsher 			bp->flags |= B44_FLAG_FULL_DUPLEX;
1929adfc5217SJeff Kirsher 	}
1930adfc5217SJeff Kirsher 
1931adfc5217SJeff Kirsher 	if (netif_running(dev))
1932adfc5217SJeff Kirsher 		b44_setup_phy(bp);
1933adfc5217SJeff Kirsher 
1934adfc5217SJeff Kirsher 	spin_unlock_irq(&bp->lock);
1935adfc5217SJeff Kirsher 
1936adfc5217SJeff Kirsher 	return 0;
1937adfc5217SJeff Kirsher }
1938adfc5217SJeff Kirsher 
b44_get_ringparam(struct net_device * dev,struct ethtool_ringparam * ering,struct kernel_ethtool_ringparam * kernel_ering,struct netlink_ext_ack * extack)1939adfc5217SJeff Kirsher static void b44_get_ringparam(struct net_device *dev,
194074624944SHao Chen 			      struct ethtool_ringparam *ering,
194174624944SHao Chen 			      struct kernel_ethtool_ringparam *kernel_ering,
194274624944SHao Chen 			      struct netlink_ext_ack *extack)
1943adfc5217SJeff Kirsher {
1944adfc5217SJeff Kirsher 	struct b44 *bp = netdev_priv(dev);
1945adfc5217SJeff Kirsher 
1946adfc5217SJeff Kirsher 	ering->rx_max_pending = B44_RX_RING_SIZE - 1;
1947adfc5217SJeff Kirsher 	ering->rx_pending = bp->rx_pending;
1948adfc5217SJeff Kirsher 
1949adfc5217SJeff Kirsher 	/* XXX ethtool lacks a tx_max_pending, oops... */
1950adfc5217SJeff Kirsher }
1951adfc5217SJeff Kirsher 
b44_set_ringparam(struct net_device * dev,struct ethtool_ringparam * ering,struct kernel_ethtool_ringparam * kernel_ering,struct netlink_ext_ack * extack)1952adfc5217SJeff Kirsher static int b44_set_ringparam(struct net_device *dev,
195374624944SHao Chen 			     struct ethtool_ringparam *ering,
195474624944SHao Chen 			     struct kernel_ethtool_ringparam *kernel_ering,
195574624944SHao Chen 			     struct netlink_ext_ack *extack)
1956adfc5217SJeff Kirsher {
1957adfc5217SJeff Kirsher 	struct b44 *bp = netdev_priv(dev);
1958adfc5217SJeff Kirsher 
1959adfc5217SJeff Kirsher 	if ((ering->rx_pending > B44_RX_RING_SIZE - 1) ||
1960adfc5217SJeff Kirsher 	    (ering->rx_mini_pending != 0) ||
1961adfc5217SJeff Kirsher 	    (ering->rx_jumbo_pending != 0) ||
1962adfc5217SJeff Kirsher 	    (ering->tx_pending > B44_TX_RING_SIZE - 1))
1963adfc5217SJeff Kirsher 		return -EINVAL;
1964adfc5217SJeff Kirsher 
1965adfc5217SJeff Kirsher 	spin_lock_irq(&bp->lock);
1966adfc5217SJeff Kirsher 
1967adfc5217SJeff Kirsher 	bp->rx_pending = ering->rx_pending;
1968adfc5217SJeff Kirsher 	bp->tx_pending = ering->tx_pending;
1969adfc5217SJeff Kirsher 
1970adfc5217SJeff Kirsher 	b44_halt(bp);
1971adfc5217SJeff Kirsher 	b44_init_rings(bp);
1972adfc5217SJeff Kirsher 	b44_init_hw(bp, B44_FULL_RESET);
1973adfc5217SJeff Kirsher 	netif_wake_queue(bp->dev);
1974adfc5217SJeff Kirsher 	spin_unlock_irq(&bp->lock);
1975adfc5217SJeff Kirsher 
1976adfc5217SJeff Kirsher 	b44_enable_ints(bp);
1977adfc5217SJeff Kirsher 
1978adfc5217SJeff Kirsher 	return 0;
1979adfc5217SJeff Kirsher }
1980adfc5217SJeff Kirsher 
b44_get_pauseparam(struct net_device * dev,struct ethtool_pauseparam * epause)1981adfc5217SJeff Kirsher static void b44_get_pauseparam(struct net_device *dev,
1982adfc5217SJeff Kirsher 				struct ethtool_pauseparam *epause)
1983adfc5217SJeff Kirsher {
1984adfc5217SJeff Kirsher 	struct b44 *bp = netdev_priv(dev);
1985adfc5217SJeff Kirsher 
1986adfc5217SJeff Kirsher 	epause->autoneg =
1987adfc5217SJeff Kirsher 		(bp->flags & B44_FLAG_PAUSE_AUTO) != 0;
1988adfc5217SJeff Kirsher 	epause->rx_pause =
1989adfc5217SJeff Kirsher 		(bp->flags & B44_FLAG_RX_PAUSE) != 0;
1990adfc5217SJeff Kirsher 	epause->tx_pause =
1991adfc5217SJeff Kirsher 		(bp->flags & B44_FLAG_TX_PAUSE) != 0;
1992adfc5217SJeff Kirsher }
1993adfc5217SJeff Kirsher 
b44_set_pauseparam(struct net_device * dev,struct ethtool_pauseparam * epause)1994adfc5217SJeff Kirsher static int b44_set_pauseparam(struct net_device *dev,
1995adfc5217SJeff Kirsher 				struct ethtool_pauseparam *epause)
1996adfc5217SJeff Kirsher {
1997adfc5217SJeff Kirsher 	struct b44 *bp = netdev_priv(dev);
1998adfc5217SJeff Kirsher 
1999adfc5217SJeff Kirsher 	spin_lock_irq(&bp->lock);
2000adfc5217SJeff Kirsher 	if (epause->autoneg)
2001adfc5217SJeff Kirsher 		bp->flags |= B44_FLAG_PAUSE_AUTO;
2002adfc5217SJeff Kirsher 	else
2003adfc5217SJeff Kirsher 		bp->flags &= ~B44_FLAG_PAUSE_AUTO;
2004adfc5217SJeff Kirsher 	if (epause->rx_pause)
2005adfc5217SJeff Kirsher 		bp->flags |= B44_FLAG_RX_PAUSE;
2006adfc5217SJeff Kirsher 	else
2007adfc5217SJeff Kirsher 		bp->flags &= ~B44_FLAG_RX_PAUSE;
2008adfc5217SJeff Kirsher 	if (epause->tx_pause)
2009adfc5217SJeff Kirsher 		bp->flags |= B44_FLAG_TX_PAUSE;
2010adfc5217SJeff Kirsher 	else
2011adfc5217SJeff Kirsher 		bp->flags &= ~B44_FLAG_TX_PAUSE;
2012*fefe98d3SPeter Münster 	if (netif_running(dev)) {
2013adfc5217SJeff Kirsher 		if (bp->flags & B44_FLAG_PAUSE_AUTO) {
2014adfc5217SJeff Kirsher 			b44_halt(bp);
2015adfc5217SJeff Kirsher 			b44_init_rings(bp);
2016adfc5217SJeff Kirsher 			b44_init_hw(bp, B44_FULL_RESET);
2017adfc5217SJeff Kirsher 		} else {
2018adfc5217SJeff Kirsher 			__b44_set_flow_ctrl(bp, bp->flags);
2019adfc5217SJeff Kirsher 		}
2020*fefe98d3SPeter Münster 	}
2021adfc5217SJeff Kirsher 	spin_unlock_irq(&bp->lock);
2022adfc5217SJeff Kirsher 
2023adfc5217SJeff Kirsher 	b44_enable_ints(bp);
2024adfc5217SJeff Kirsher 
2025adfc5217SJeff Kirsher 	return 0;
2026adfc5217SJeff Kirsher }
2027adfc5217SJeff Kirsher 
b44_get_strings(struct net_device * dev,u32 stringset,u8 * data)2028adfc5217SJeff Kirsher static void b44_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2029adfc5217SJeff Kirsher {
2030adfc5217SJeff Kirsher 	switch(stringset) {
2031adfc5217SJeff Kirsher 	case ETH_SS_STATS:
2032adfc5217SJeff Kirsher 		memcpy(data, *b44_gstrings, sizeof(b44_gstrings));
2033adfc5217SJeff Kirsher 		break;
2034adfc5217SJeff Kirsher 	}
2035adfc5217SJeff Kirsher }
2036adfc5217SJeff Kirsher 
b44_get_sset_count(struct net_device * dev,int sset)2037adfc5217SJeff Kirsher static int b44_get_sset_count(struct net_device *dev, int sset)
2038adfc5217SJeff Kirsher {
2039adfc5217SJeff Kirsher 	switch (sset) {
2040adfc5217SJeff Kirsher 	case ETH_SS_STATS:
2041adfc5217SJeff Kirsher 		return ARRAY_SIZE(b44_gstrings);
2042adfc5217SJeff Kirsher 	default:
2043adfc5217SJeff Kirsher 		return -EOPNOTSUPP;
2044adfc5217SJeff Kirsher 	}
2045adfc5217SJeff Kirsher }
2046adfc5217SJeff Kirsher 
b44_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * stats,u64 * data)2047adfc5217SJeff Kirsher static void b44_get_ethtool_stats(struct net_device *dev,
2048adfc5217SJeff Kirsher 				  struct ethtool_stats *stats, u64 *data)
2049adfc5217SJeff Kirsher {
2050adfc5217SJeff Kirsher 	struct b44 *bp = netdev_priv(dev);
2051eeda8585SKevin Groeneveld 	struct b44_hw_stats *hwstat = &bp->hw_stats;
2052eeda8585SKevin Groeneveld 	u64 *data_src, *data_dst;
2053eeda8585SKevin Groeneveld 	unsigned int start;
2054adfc5217SJeff Kirsher 	u32 i;
2055adfc5217SJeff Kirsher 
2056adfc5217SJeff Kirsher 	spin_lock_irq(&bp->lock);
2057adfc5217SJeff Kirsher 	b44_stats_update(bp);
2058eeda8585SKevin Groeneveld 	spin_unlock_irq(&bp->lock);
2059eeda8585SKevin Groeneveld 
2060eeda8585SKevin Groeneveld 	do {
2061eeda8585SKevin Groeneveld 		data_src = &hwstat->tx_good_octets;
2062eeda8585SKevin Groeneveld 		data_dst = data;
2063068c38adSThomas Gleixner 		start = u64_stats_fetch_begin(&hwstat->syncp);
2064adfc5217SJeff Kirsher 
2065adfc5217SJeff Kirsher 		for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++)
2066eeda8585SKevin Groeneveld 			*data_dst++ = *data_src++;
2067adfc5217SJeff Kirsher 
2068068c38adSThomas Gleixner 	} while (u64_stats_fetch_retry(&hwstat->syncp, start));
2069adfc5217SJeff Kirsher }
2070adfc5217SJeff Kirsher 
b44_get_wol(struct net_device * dev,struct ethtool_wolinfo * wol)2071adfc5217SJeff Kirsher static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2072adfc5217SJeff Kirsher {
2073adfc5217SJeff Kirsher 	struct b44 *bp = netdev_priv(dev);
2074adfc5217SJeff Kirsher 
2075adfc5217SJeff Kirsher 	wol->supported = WAKE_MAGIC;
2076adfc5217SJeff Kirsher 	if (bp->flags & B44_FLAG_WOL_ENABLE)
2077adfc5217SJeff Kirsher 		wol->wolopts = WAKE_MAGIC;
2078adfc5217SJeff Kirsher 	else
2079adfc5217SJeff Kirsher 		wol->wolopts = 0;
2080adfc5217SJeff Kirsher 	memset(&wol->sopass, 0, sizeof(wol->sopass));
2081adfc5217SJeff Kirsher }
2082adfc5217SJeff Kirsher 
b44_set_wol(struct net_device * dev,struct ethtool_wolinfo * wol)2083adfc5217SJeff Kirsher static int b44_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2084adfc5217SJeff Kirsher {
2085adfc5217SJeff Kirsher 	struct b44 *bp = netdev_priv(dev);
2086adfc5217SJeff Kirsher 
2087adfc5217SJeff Kirsher 	spin_lock_irq(&bp->lock);
2088adfc5217SJeff Kirsher 	if (wol->wolopts & WAKE_MAGIC)
2089adfc5217SJeff Kirsher 		bp->flags |= B44_FLAG_WOL_ENABLE;
2090adfc5217SJeff Kirsher 	else
2091adfc5217SJeff Kirsher 		bp->flags &= ~B44_FLAG_WOL_ENABLE;
2092adfc5217SJeff Kirsher 	spin_unlock_irq(&bp->lock);
2093adfc5217SJeff Kirsher 
20945580373fSAndrey Skvortsov 	device_set_wakeup_enable(bp->sdev->dev, wol->wolopts & WAKE_MAGIC);
2095adfc5217SJeff Kirsher 	return 0;
2096adfc5217SJeff Kirsher }
2097adfc5217SJeff Kirsher 
2098adfc5217SJeff Kirsher static const struct ethtool_ops b44_ethtool_ops = {
2099adfc5217SJeff Kirsher 	.get_drvinfo		= b44_get_drvinfo,
2100adfc5217SJeff Kirsher 	.nway_reset		= b44_nway_reset,
2101adfc5217SJeff Kirsher 	.get_link		= ethtool_op_get_link,
2102adfc5217SJeff Kirsher 	.get_wol		= b44_get_wol,
2103adfc5217SJeff Kirsher 	.set_wol		= b44_set_wol,
2104adfc5217SJeff Kirsher 	.get_ringparam		= b44_get_ringparam,
2105adfc5217SJeff Kirsher 	.set_ringparam		= b44_set_ringparam,
2106adfc5217SJeff Kirsher 	.get_pauseparam		= b44_get_pauseparam,
2107adfc5217SJeff Kirsher 	.set_pauseparam		= b44_set_pauseparam,
2108adfc5217SJeff Kirsher 	.get_msglevel		= b44_get_msglevel,
2109adfc5217SJeff Kirsher 	.set_msglevel		= b44_set_msglevel,
2110adfc5217SJeff Kirsher 	.get_strings		= b44_get_strings,
2111adfc5217SJeff Kirsher 	.get_sset_count		= b44_get_sset_count,
2112adfc5217SJeff Kirsher 	.get_ethtool_stats	= b44_get_ethtool_stats,
21132406e5d4SPhilippe Reynes 	.get_link_ksettings	= b44_get_link_ksettings,
21142406e5d4SPhilippe Reynes 	.set_link_ksettings	= b44_set_link_ksettings,
2115adfc5217SJeff Kirsher };
2116adfc5217SJeff Kirsher 
b44_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)2117adfc5217SJeff Kirsher static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2118adfc5217SJeff Kirsher {
2119adfc5217SJeff Kirsher 	struct b44 *bp = netdev_priv(dev);
2120adfc5217SJeff Kirsher 	int err = -EINVAL;
2121adfc5217SJeff Kirsher 
2122adfc5217SJeff Kirsher 	if (!netif_running(dev))
2123adfc5217SJeff Kirsher 		goto out;
2124adfc5217SJeff Kirsher 
2125adfc5217SJeff Kirsher 	spin_lock_irq(&bp->lock);
212686f4ea63SHauke Mehrtens 	if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
212751f141beSPhilippe Reynes 		BUG_ON(!dev->phydev);
212851f141beSPhilippe Reynes 		err = phy_mii_ioctl(dev->phydev, ifr, cmd);
212986f4ea63SHauke Mehrtens 	} else {
213086f4ea63SHauke Mehrtens 		err = generic_mii_ioctl(&bp->mii_if, if_mii(ifr), cmd, NULL);
213186f4ea63SHauke Mehrtens 	}
2132adfc5217SJeff Kirsher 	spin_unlock_irq(&bp->lock);
2133adfc5217SJeff Kirsher out:
2134adfc5217SJeff Kirsher 	return err;
2135adfc5217SJeff Kirsher }
2136adfc5217SJeff Kirsher 
b44_get_invariants(struct b44 * bp)213723971887SBill Pemberton static int b44_get_invariants(struct b44 *bp)
2138adfc5217SJeff Kirsher {
2139adfc5217SJeff Kirsher 	struct ssb_device *sdev = bp->sdev;
2140adfc5217SJeff Kirsher 	int err = 0;
2141adfc5217SJeff Kirsher 	u8 *addr;
2142adfc5217SJeff Kirsher 
2143adfc5217SJeff Kirsher 	bp->dma_offset = ssb_dma_translation(sdev);
2144adfc5217SJeff Kirsher 
2145adfc5217SJeff Kirsher 	if (sdev->bus->bustype == SSB_BUSTYPE_SSB &&
2146adfc5217SJeff Kirsher 	    instance > 1) {
2147adfc5217SJeff Kirsher 		addr = sdev->bus->sprom.et1mac;
2148adfc5217SJeff Kirsher 		bp->phy_addr = sdev->bus->sprom.et1phyaddr;
2149adfc5217SJeff Kirsher 	} else {
2150adfc5217SJeff Kirsher 		addr = sdev->bus->sprom.et0mac;
2151adfc5217SJeff Kirsher 		bp->phy_addr = sdev->bus->sprom.et0phyaddr;
2152adfc5217SJeff Kirsher 	}
2153adfc5217SJeff Kirsher 	/* Some ROMs have buggy PHY addresses with the high
2154adfc5217SJeff Kirsher 	 * bits set (sign extension?). Truncate them to a
2155adfc5217SJeff Kirsher 	 * valid PHY address. */
2156adfc5217SJeff Kirsher 	bp->phy_addr &= 0x1F;
2157adfc5217SJeff Kirsher 
2158a96d317fSJakub Kicinski 	eth_hw_addr_set(bp->dev, addr);
2159adfc5217SJeff Kirsher 
2160adfc5217SJeff Kirsher 	if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){
2161adfc5217SJeff Kirsher 		pr_err("Invalid MAC address found in EEPROM\n");
2162adfc5217SJeff Kirsher 		return -EINVAL;
2163adfc5217SJeff Kirsher 	}
2164adfc5217SJeff Kirsher 
2165adfc5217SJeff Kirsher 	bp->imask = IMASK_DEF;
2166adfc5217SJeff Kirsher 
2167adfc5217SJeff Kirsher 	/* XXX - really required?
2168adfc5217SJeff Kirsher 	   bp->flags |= B44_FLAG_BUGGY_TXPTR;
2169adfc5217SJeff Kirsher 	*/
2170adfc5217SJeff Kirsher 
2171adfc5217SJeff Kirsher 	if (bp->sdev->id.revision >= 7)
2172adfc5217SJeff Kirsher 		bp->flags |= B44_FLAG_B0_ANDLATER;
2173adfc5217SJeff Kirsher 
2174adfc5217SJeff Kirsher 	return err;
2175adfc5217SJeff Kirsher }
2176adfc5217SJeff Kirsher 
2177adfc5217SJeff Kirsher static const struct net_device_ops b44_netdev_ops = {
2178adfc5217SJeff Kirsher 	.ndo_open		= b44_open,
2179adfc5217SJeff Kirsher 	.ndo_stop		= b44_close,
2180adfc5217SJeff Kirsher 	.ndo_start_xmit		= b44_start_xmit,
2181eeda8585SKevin Groeneveld 	.ndo_get_stats64	= b44_get_stats64,
2182afc4b13dSJiri Pirko 	.ndo_set_rx_mode	= b44_set_rx_mode,
2183adfc5217SJeff Kirsher 	.ndo_set_mac_address	= b44_set_mac_addr,
2184adfc5217SJeff Kirsher 	.ndo_validate_addr	= eth_validate_addr,
2185a7605370SArnd Bergmann 	.ndo_eth_ioctl		= b44_ioctl,
2186adfc5217SJeff Kirsher 	.ndo_tx_timeout		= b44_tx_timeout,
2187adfc5217SJeff Kirsher 	.ndo_change_mtu		= b44_change_mtu,
2188adfc5217SJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER
2189adfc5217SJeff Kirsher 	.ndo_poll_controller	= b44_poll_controller,
2190adfc5217SJeff Kirsher #endif
2191adfc5217SJeff Kirsher };
2192adfc5217SJeff Kirsher 
b44_adjust_link(struct net_device * dev)219386f4ea63SHauke Mehrtens static void b44_adjust_link(struct net_device *dev)
219486f4ea63SHauke Mehrtens {
219586f4ea63SHauke Mehrtens 	struct b44 *bp = netdev_priv(dev);
219651f141beSPhilippe Reynes 	struct phy_device *phydev = dev->phydev;
2197ebe65334SJason Yan 	bool status_changed = false;
219886f4ea63SHauke Mehrtens 
219986f4ea63SHauke Mehrtens 	BUG_ON(!phydev);
220086f4ea63SHauke Mehrtens 
220186f4ea63SHauke Mehrtens 	if (bp->old_link != phydev->link) {
2202ebe65334SJason Yan 		status_changed = true;
220386f4ea63SHauke Mehrtens 		bp->old_link = phydev->link;
220486f4ea63SHauke Mehrtens 	}
220586f4ea63SHauke Mehrtens 
220686f4ea63SHauke Mehrtens 	/* reflect duplex change */
220786f4ea63SHauke Mehrtens 	if (phydev->link) {
220886f4ea63SHauke Mehrtens 		if ((phydev->duplex == DUPLEX_HALF) &&
220986f4ea63SHauke Mehrtens 		    (bp->flags & B44_FLAG_FULL_DUPLEX)) {
2210ebe65334SJason Yan 			status_changed = true;
221186f4ea63SHauke Mehrtens 			bp->flags &= ~B44_FLAG_FULL_DUPLEX;
221286f4ea63SHauke Mehrtens 		} else if ((phydev->duplex == DUPLEX_FULL) &&
221386f4ea63SHauke Mehrtens 			   !(bp->flags & B44_FLAG_FULL_DUPLEX)) {
2214ebe65334SJason Yan 			status_changed = true;
221586f4ea63SHauke Mehrtens 			bp->flags |= B44_FLAG_FULL_DUPLEX;
221686f4ea63SHauke Mehrtens 		}
221786f4ea63SHauke Mehrtens 	}
221886f4ea63SHauke Mehrtens 
221986f4ea63SHauke Mehrtens 	if (status_changed) {
22201d3f41eaSHauke Mehrtens 		u32 val = br32(bp, B44_TX_CTRL);
22211d3f41eaSHauke Mehrtens 		if (bp->flags & B44_FLAG_FULL_DUPLEX)
22221d3f41eaSHauke Mehrtens 			val |= TX_CTRL_DUPLEX;
22231d3f41eaSHauke Mehrtens 		else
22241d3f41eaSHauke Mehrtens 			val &= ~TX_CTRL_DUPLEX;
22251d3f41eaSHauke Mehrtens 		bw32(bp, B44_TX_CTRL, val);
222686f4ea63SHauke Mehrtens 		phy_print_status(phydev);
222786f4ea63SHauke Mehrtens 	}
222886f4ea63SHauke Mehrtens }
222986f4ea63SHauke Mehrtens 
b44_register_phy_one(struct b44 * bp)223086f4ea63SHauke Mehrtens static int b44_register_phy_one(struct b44 *bp)
223186f4ea63SHauke Mehrtens {
22323c1bcc86SAndrew Lunn 	__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
223386f4ea63SHauke Mehrtens 	struct mii_bus *mii_bus;
223486f4ea63SHauke Mehrtens 	struct ssb_device *sdev = bp->sdev;
223586f4ea63SHauke Mehrtens 	struct phy_device *phydev;
223686f4ea63SHauke Mehrtens 	char bus_id[MII_BUS_ID_SIZE + 3];
2237b04138b3SHauke Mehrtens 	struct ssb_sprom *sprom = &sdev->bus->sprom;
223886f4ea63SHauke Mehrtens 	int err;
223986f4ea63SHauke Mehrtens 
224086f4ea63SHauke Mehrtens 	mii_bus = mdiobus_alloc();
224186f4ea63SHauke Mehrtens 	if (!mii_bus) {
224286f4ea63SHauke Mehrtens 		dev_err(sdev->dev, "mdiobus_alloc() failed\n");
224386f4ea63SHauke Mehrtens 		err = -ENOMEM;
224486f4ea63SHauke Mehrtens 		goto err_out;
224586f4ea63SHauke Mehrtens 	}
224686f4ea63SHauke Mehrtens 
224786f4ea63SHauke Mehrtens 	mii_bus->priv = bp;
224886f4ea63SHauke Mehrtens 	mii_bus->read = b44_mdio_read_phylib;
224986f4ea63SHauke Mehrtens 	mii_bus->write = b44_mdio_write_phylib;
225086f4ea63SHauke Mehrtens 	mii_bus->name = "b44_eth_mii";
225186f4ea63SHauke Mehrtens 	mii_bus->parent = sdev->dev;
225286f4ea63SHauke Mehrtens 	mii_bus->phy_mask = ~(1 << bp->phy_addr);
225386f4ea63SHauke Mehrtens 	snprintf(mii_bus->id, MII_BUS_ID_SIZE, "%x", instance);
225486f4ea63SHauke Mehrtens 
225586f4ea63SHauke Mehrtens 	bp->mii_bus = mii_bus;
225686f4ea63SHauke Mehrtens 
225786f4ea63SHauke Mehrtens 	err = mdiobus_register(mii_bus);
225886f4ea63SHauke Mehrtens 	if (err) {
225986f4ea63SHauke Mehrtens 		dev_err(sdev->dev, "failed to register MII bus\n");
2260e7f4dc35SAndrew Lunn 		goto err_out_mdiobus;
226186f4ea63SHauke Mehrtens 	}
226286f4ea63SHauke Mehrtens 
22637f854420SAndrew Lunn 	if (!mdiobus_is_registered_device(bp->mii_bus, bp->phy_addr) &&
2264b04138b3SHauke Mehrtens 	    (sprom->boardflags_lo & (B44_BOARDFLAG_ROBO | B44_BOARDFLAG_ADM))) {
2265b04138b3SHauke Mehrtens 
2266b04138b3SHauke Mehrtens 		dev_info(sdev->dev,
2267b04138b3SHauke Mehrtens 			 "could not find PHY at %i, use fixed one\n",
2268b04138b3SHauke Mehrtens 			 bp->phy_addr);
2269b04138b3SHauke Mehrtens 
2270b04138b3SHauke Mehrtens 		bp->phy_addr = 0;
2271b04138b3SHauke Mehrtens 		snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, "fixed-0",
2272b04138b3SHauke Mehrtens 			 bp->phy_addr);
2273b04138b3SHauke Mehrtens 	} else {
2274b04138b3SHauke Mehrtens 		snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, mii_bus->id,
2275b04138b3SHauke Mehrtens 			 bp->phy_addr);
2276b04138b3SHauke Mehrtens 	}
227786f4ea63SHauke Mehrtens 
227886f4ea63SHauke Mehrtens 	phydev = phy_connect(bp->dev, bus_id, &b44_adjust_link,
227986f4ea63SHauke Mehrtens 			     PHY_INTERFACE_MODE_MII);
228086f4ea63SHauke Mehrtens 	if (IS_ERR(phydev)) {
228186f4ea63SHauke Mehrtens 		dev_err(sdev->dev, "could not attach PHY at %i\n",
228286f4ea63SHauke Mehrtens 			bp->phy_addr);
228386f4ea63SHauke Mehrtens 		err = PTR_ERR(phydev);
228486f4ea63SHauke Mehrtens 		goto err_out_mdiobus_unregister;
228586f4ea63SHauke Mehrtens 	}
228686f4ea63SHauke Mehrtens 
228786f4ea63SHauke Mehrtens 	/* mask with MAC supported features */
22883c1bcc86SAndrew Lunn 	linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, mask);
22893c1bcc86SAndrew Lunn 	linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mask);
22903c1bcc86SAndrew Lunn 	linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mask);
22913c1bcc86SAndrew Lunn 	linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, mask);
22923c1bcc86SAndrew Lunn 	linkmode_and(phydev->supported, phydev->supported, mask);
22933c1bcc86SAndrew Lunn 	linkmode_copy(phydev->advertising, phydev->supported);
229486f4ea63SHauke Mehrtens 
229586f4ea63SHauke Mehrtens 	bp->old_link = 0;
2296e5a03bfdSAndrew Lunn 	bp->phy_addr = phydev->mdio.addr;
229786f4ea63SHauke Mehrtens 
22982220943aSAndrew Lunn 	phy_attached_info(phydev);
229986f4ea63SHauke Mehrtens 
230086f4ea63SHauke Mehrtens 	return 0;
230186f4ea63SHauke Mehrtens 
230286f4ea63SHauke Mehrtens err_out_mdiobus_unregister:
230386f4ea63SHauke Mehrtens 	mdiobus_unregister(mii_bus);
230486f4ea63SHauke Mehrtens 
230586f4ea63SHauke Mehrtens err_out_mdiobus:
230686f4ea63SHauke Mehrtens 	mdiobus_free(mii_bus);
230786f4ea63SHauke Mehrtens 
230886f4ea63SHauke Mehrtens err_out:
230986f4ea63SHauke Mehrtens 	return err;
231086f4ea63SHauke Mehrtens }
231186f4ea63SHauke Mehrtens 
b44_unregister_phy_one(struct b44 * bp)231286f4ea63SHauke Mehrtens static void b44_unregister_phy_one(struct b44 *bp)
231386f4ea63SHauke Mehrtens {
231451f141beSPhilippe Reynes 	struct net_device *dev = bp->dev;
231586f4ea63SHauke Mehrtens 	struct mii_bus *mii_bus = bp->mii_bus;
231686f4ea63SHauke Mehrtens 
231751f141beSPhilippe Reynes 	phy_disconnect(dev->phydev);
231886f4ea63SHauke Mehrtens 	mdiobus_unregister(mii_bus);
231986f4ea63SHauke Mehrtens 	mdiobus_free(mii_bus);
232086f4ea63SHauke Mehrtens }
232186f4ea63SHauke Mehrtens 
b44_init_one(struct ssb_device * sdev,const struct ssb_device_id * ent)232223971887SBill Pemberton static int b44_init_one(struct ssb_device *sdev,
2323adfc5217SJeff Kirsher 			const struct ssb_device_id *ent)
2324adfc5217SJeff Kirsher {
2325adfc5217SJeff Kirsher 	struct net_device *dev;
2326adfc5217SJeff Kirsher 	struct b44 *bp;
2327adfc5217SJeff Kirsher 	int err;
2328adfc5217SJeff Kirsher 
2329adfc5217SJeff Kirsher 	instance++;
2330adfc5217SJeff Kirsher 
2331adfc5217SJeff Kirsher 	dev = alloc_etherdev(sizeof(*bp));
2332adfc5217SJeff Kirsher 	if (!dev) {
2333adfc5217SJeff Kirsher 		err = -ENOMEM;
2334adfc5217SJeff Kirsher 		goto out;
2335adfc5217SJeff Kirsher 	}
2336adfc5217SJeff Kirsher 
2337adfc5217SJeff Kirsher 	SET_NETDEV_DEV(dev, sdev->dev);
2338adfc5217SJeff Kirsher 
2339adfc5217SJeff Kirsher 	/* No interesting netdevice features in this card... */
2340adfc5217SJeff Kirsher 	dev->features |= 0;
2341adfc5217SJeff Kirsher 
2342adfc5217SJeff Kirsher 	bp = netdev_priv(dev);
2343adfc5217SJeff Kirsher 	bp->sdev = sdev;
2344adfc5217SJeff Kirsher 	bp->dev = dev;
2345adfc5217SJeff Kirsher 	bp->force_copybreak = 0;
2346adfc5217SJeff Kirsher 
2347adfc5217SJeff Kirsher 	bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
2348adfc5217SJeff Kirsher 
2349adfc5217SJeff Kirsher 	spin_lock_init(&bp->lock);
2350e43c9f23SFlorian Fainelli 	u64_stats_init(&bp->hw_stats.syncp);
2351adfc5217SJeff Kirsher 
2352adfc5217SJeff Kirsher 	bp->rx_pending = B44_DEF_RX_RING_PENDING;
2353adfc5217SJeff Kirsher 	bp->tx_pending = B44_DEF_TX_RING_PENDING;
2354adfc5217SJeff Kirsher 
2355adfc5217SJeff Kirsher 	dev->netdev_ops = &b44_netdev_ops;
2356b48b89f9SJakub Kicinski 	netif_napi_add(dev, &bp->napi, b44_poll);
2357adfc5217SJeff Kirsher 	dev->watchdog_timeo = B44_TX_TIMEOUT;
2358e1c6dccaSJarod Wilson 	dev->min_mtu = B44_MIN_MTU;
2359e1c6dccaSJarod Wilson 	dev->max_mtu = B44_MAX_MTU;
2360adfc5217SJeff Kirsher 	dev->irq = sdev->irq;
23617ad24ea4SWilfried Klaebe 	dev->ethtool_ops = &b44_ethtool_ops;
2362adfc5217SJeff Kirsher 
2363adfc5217SJeff Kirsher 	err = ssb_bus_powerup(sdev->bus, 0);
2364adfc5217SJeff Kirsher 	if (err) {
2365adfc5217SJeff Kirsher 		dev_err(sdev->dev,
2366adfc5217SJeff Kirsher 			"Failed to powerup the bus\n");
2367adfc5217SJeff Kirsher 		goto err_out_free_dev;
2368adfc5217SJeff Kirsher 	}
2369adfc5217SJeff Kirsher 
23707b027c24SZhang Changzhong 	err = dma_set_mask_and_coherent(sdev->dma_dev, DMA_BIT_MASK(30));
23717b027c24SZhang Changzhong 	if (err) {
2372adfc5217SJeff Kirsher 		dev_err(sdev->dev,
2373adfc5217SJeff Kirsher 			"Required 30BIT DMA mask unsupported by the system\n");
2374adfc5217SJeff Kirsher 		goto err_out_powerdown;
2375adfc5217SJeff Kirsher 	}
2376adfc5217SJeff Kirsher 
2377adfc5217SJeff Kirsher 	err = b44_get_invariants(bp);
2378adfc5217SJeff Kirsher 	if (err) {
2379adfc5217SJeff Kirsher 		dev_err(sdev->dev,
2380adfc5217SJeff Kirsher 			"Problem fetching invariants of chip, aborting\n");
2381adfc5217SJeff Kirsher 		goto err_out_powerdown;
2382adfc5217SJeff Kirsher 	}
2383adfc5217SJeff Kirsher 
23847befa6abSHauke Mehrtens 	if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) {
23857befa6abSHauke Mehrtens 		dev_err(sdev->dev, "No PHY present on this MAC, aborting\n");
23867befa6abSHauke Mehrtens 		err = -ENODEV;
23877befa6abSHauke Mehrtens 		goto err_out_powerdown;
23887befa6abSHauke Mehrtens 	}
23897befa6abSHauke Mehrtens 
2390adfc5217SJeff Kirsher 	bp->mii_if.dev = dev;
2391348baa6cSHauke Mehrtens 	bp->mii_if.mdio_read = b44_mdio_read_mii;
2392348baa6cSHauke Mehrtens 	bp->mii_if.mdio_write = b44_mdio_write_mii;
2393adfc5217SJeff Kirsher 	bp->mii_if.phy_id = bp->phy_addr;
2394adfc5217SJeff Kirsher 	bp->mii_if.phy_id_mask = 0x1f;
2395adfc5217SJeff Kirsher 	bp->mii_if.reg_num_mask = 0x1f;
2396adfc5217SJeff Kirsher 
2397adfc5217SJeff Kirsher 	/* By default, advertise all speed/duplex settings. */
2398adfc5217SJeff Kirsher 	bp->flags |= (B44_FLAG_ADV_10HALF | B44_FLAG_ADV_10FULL |
2399adfc5217SJeff Kirsher 		      B44_FLAG_ADV_100HALF | B44_FLAG_ADV_100FULL);
2400adfc5217SJeff Kirsher 
2401adfc5217SJeff Kirsher 	/* By default, auto-negotiate PAUSE. */
2402adfc5217SJeff Kirsher 	bp->flags |= B44_FLAG_PAUSE_AUTO;
2403adfc5217SJeff Kirsher 
2404adfc5217SJeff Kirsher 	err = register_netdev(dev);
2405adfc5217SJeff Kirsher 	if (err) {
2406adfc5217SJeff Kirsher 		dev_err(sdev->dev, "Cannot register net device, aborting\n");
2407adfc5217SJeff Kirsher 		goto err_out_powerdown;
2408adfc5217SJeff Kirsher 	}
2409adfc5217SJeff Kirsher 
2410adfc5217SJeff Kirsher 	netif_carrier_off(dev);
2411adfc5217SJeff Kirsher 
2412adfc5217SJeff Kirsher 	ssb_set_drvdata(sdev, dev);
2413adfc5217SJeff Kirsher 
2414adfc5217SJeff Kirsher 	/* Chip reset provides power to the b44 MAC & PCI cores, which
2415adfc5217SJeff Kirsher 	 * is necessary for MAC register access.
2416adfc5217SJeff Kirsher 	 */
2417adfc5217SJeff Kirsher 	b44_chip_reset(bp, B44_CHIP_RESET_FULL);
2418adfc5217SJeff Kirsher 
2419adfc5217SJeff Kirsher 	/* do a phy reset to test if there is an active phy */
2420656a7c2bSHauke Mehrtens 	err = b44_phy_reset(bp);
2421656a7c2bSHauke Mehrtens 	if (err < 0) {
2422656a7c2bSHauke Mehrtens 		dev_err(sdev->dev, "phy reset failed\n");
2423656a7c2bSHauke Mehrtens 		goto err_out_unregister_netdev;
2424656a7c2bSHauke Mehrtens 	}
2425adfc5217SJeff Kirsher 
242686f4ea63SHauke Mehrtens 	if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
242786f4ea63SHauke Mehrtens 		err = b44_register_phy_one(bp);
242886f4ea63SHauke Mehrtens 		if (err) {
242986f4ea63SHauke Mehrtens 			dev_err(sdev->dev, "Cannot register PHY, aborting\n");
243086f4ea63SHauke Mehrtens 			goto err_out_unregister_netdev;
243186f4ea63SHauke Mehrtens 		}
243286f4ea63SHauke Mehrtens 	}
243386f4ea63SHauke Mehrtens 
24345580373fSAndrey Skvortsov 	device_set_wakeup_capable(sdev->dev, true);
2435adfc5217SJeff Kirsher 	netdev_info(dev, "%s %pM\n", DRV_DESCRIPTION, dev->dev_addr);
2436adfc5217SJeff Kirsher 
2437adfc5217SJeff Kirsher 	return 0;
2438adfc5217SJeff Kirsher 
243986f4ea63SHauke Mehrtens err_out_unregister_netdev:
244086f4ea63SHauke Mehrtens 	unregister_netdev(dev);
2441adfc5217SJeff Kirsher err_out_powerdown:
2442adfc5217SJeff Kirsher 	ssb_bus_may_powerdown(sdev->bus);
2443adfc5217SJeff Kirsher 
2444adfc5217SJeff Kirsher err_out_free_dev:
24451489bdeeSHauke Mehrtens 	netif_napi_del(&bp->napi);
2446adfc5217SJeff Kirsher 	free_netdev(dev);
2447adfc5217SJeff Kirsher 
2448adfc5217SJeff Kirsher out:
2449adfc5217SJeff Kirsher 	return err;
2450adfc5217SJeff Kirsher }
2451adfc5217SJeff Kirsher 
b44_remove_one(struct ssb_device * sdev)245223971887SBill Pemberton static void b44_remove_one(struct ssb_device *sdev)
2453adfc5217SJeff Kirsher {
2454adfc5217SJeff Kirsher 	struct net_device *dev = ssb_get_drvdata(sdev);
245586f4ea63SHauke Mehrtens 	struct b44 *bp = netdev_priv(dev);
2456adfc5217SJeff Kirsher 
2457adfc5217SJeff Kirsher 	unregister_netdev(dev);
245886f4ea63SHauke Mehrtens 	if (bp->flags & B44_FLAG_EXTERNAL_PHY)
245986f4ea63SHauke Mehrtens 		b44_unregister_phy_one(bp);
2460adfc5217SJeff Kirsher 	ssb_device_disable(sdev, 0);
2461adfc5217SJeff Kirsher 	ssb_bus_may_powerdown(sdev->bus);
24621489bdeeSHauke Mehrtens 	netif_napi_del(&bp->napi);
2463adfc5217SJeff Kirsher 	free_netdev(dev);
2464adfc5217SJeff Kirsher 	ssb_pcihost_set_power_state(sdev, PCI_D3hot);
2465adfc5217SJeff Kirsher 	ssb_set_drvdata(sdev, NULL);
2466adfc5217SJeff Kirsher }
2467adfc5217SJeff Kirsher 
b44_suspend(struct ssb_device * sdev,pm_message_t state)2468adfc5217SJeff Kirsher static int b44_suspend(struct ssb_device *sdev, pm_message_t state)
2469adfc5217SJeff Kirsher {
2470adfc5217SJeff Kirsher 	struct net_device *dev = ssb_get_drvdata(sdev);
2471adfc5217SJeff Kirsher 	struct b44 *bp = netdev_priv(dev);
2472adfc5217SJeff Kirsher 
2473adfc5217SJeff Kirsher 	if (!netif_running(dev))
2474adfc5217SJeff Kirsher 		return 0;
2475adfc5217SJeff Kirsher 
2476adfc5217SJeff Kirsher 	del_timer_sync(&bp->timer);
2477adfc5217SJeff Kirsher 
2478adfc5217SJeff Kirsher 	spin_lock_irq(&bp->lock);
2479adfc5217SJeff Kirsher 
2480adfc5217SJeff Kirsher 	b44_halt(bp);
2481adfc5217SJeff Kirsher 	netif_carrier_off(bp->dev);
2482adfc5217SJeff Kirsher 	netif_device_detach(bp->dev);
2483adfc5217SJeff Kirsher 	b44_free_rings(bp);
2484adfc5217SJeff Kirsher 
2485adfc5217SJeff Kirsher 	spin_unlock_irq(&bp->lock);
2486adfc5217SJeff Kirsher 
2487adfc5217SJeff Kirsher 	free_irq(dev->irq, dev);
2488adfc5217SJeff Kirsher 	if (bp->flags & B44_FLAG_WOL_ENABLE) {
2489adfc5217SJeff Kirsher 		b44_init_hw(bp, B44_PARTIAL_RESET);
2490adfc5217SJeff Kirsher 		b44_setup_wol(bp);
2491adfc5217SJeff Kirsher 	}
2492adfc5217SJeff Kirsher 
2493adfc5217SJeff Kirsher 	ssb_pcihost_set_power_state(sdev, PCI_D3hot);
2494adfc5217SJeff Kirsher 	return 0;
2495adfc5217SJeff Kirsher }
2496adfc5217SJeff Kirsher 
b44_resume(struct ssb_device * sdev)2497adfc5217SJeff Kirsher static int b44_resume(struct ssb_device *sdev)
2498adfc5217SJeff Kirsher {
2499adfc5217SJeff Kirsher 	struct net_device *dev = ssb_get_drvdata(sdev);
2500adfc5217SJeff Kirsher 	struct b44 *bp = netdev_priv(dev);
2501adfc5217SJeff Kirsher 	int rc = 0;
2502adfc5217SJeff Kirsher 
2503adfc5217SJeff Kirsher 	rc = ssb_bus_powerup(sdev->bus, 0);
2504adfc5217SJeff Kirsher 	if (rc) {
2505adfc5217SJeff Kirsher 		dev_err(sdev->dev,
2506adfc5217SJeff Kirsher 			"Failed to powerup the bus\n");
2507adfc5217SJeff Kirsher 		return rc;
2508adfc5217SJeff Kirsher 	}
2509adfc5217SJeff Kirsher 
2510adfc5217SJeff Kirsher 	if (!netif_running(dev))
2511adfc5217SJeff Kirsher 		return 0;
2512adfc5217SJeff Kirsher 
2513adfc5217SJeff Kirsher 	spin_lock_irq(&bp->lock);
2514adfc5217SJeff Kirsher 	b44_init_rings(bp);
2515adfc5217SJeff Kirsher 	b44_init_hw(bp, B44_FULL_RESET);
2516adfc5217SJeff Kirsher 	spin_unlock_irq(&bp->lock);
2517adfc5217SJeff Kirsher 
2518adfc5217SJeff Kirsher 	/*
2519adfc5217SJeff Kirsher 	 * As a shared interrupt, the handler can be called immediately. To be
2520adfc5217SJeff Kirsher 	 * able to check the interrupt status the hardware must already be
2521adfc5217SJeff Kirsher 	 * powered back on (b44_init_hw).
2522adfc5217SJeff Kirsher 	 */
2523adfc5217SJeff Kirsher 	rc = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
2524adfc5217SJeff Kirsher 	if (rc) {
2525adfc5217SJeff Kirsher 		netdev_err(dev, "request_irq failed\n");
2526adfc5217SJeff Kirsher 		spin_lock_irq(&bp->lock);
2527adfc5217SJeff Kirsher 		b44_halt(bp);
2528adfc5217SJeff Kirsher 		b44_free_rings(bp);
2529adfc5217SJeff Kirsher 		spin_unlock_irq(&bp->lock);
2530adfc5217SJeff Kirsher 		return rc;
2531adfc5217SJeff Kirsher 	}
2532adfc5217SJeff Kirsher 
2533adfc5217SJeff Kirsher 	netif_device_attach(bp->dev);
2534adfc5217SJeff Kirsher 
2535adfc5217SJeff Kirsher 	b44_enable_ints(bp);
2536adfc5217SJeff Kirsher 	netif_wake_queue(dev);
2537adfc5217SJeff Kirsher 
2538adfc5217SJeff Kirsher 	mod_timer(&bp->timer, jiffies + 1);
2539adfc5217SJeff Kirsher 
2540adfc5217SJeff Kirsher 	return 0;
2541adfc5217SJeff Kirsher }
2542adfc5217SJeff Kirsher 
2543adfc5217SJeff Kirsher static struct ssb_driver b44_ssb_driver = {
2544adfc5217SJeff Kirsher 	.name		= DRV_MODULE_NAME,
2545adfc5217SJeff Kirsher 	.id_table	= b44_ssb_tbl,
2546adfc5217SJeff Kirsher 	.probe		= b44_init_one,
254723971887SBill Pemberton 	.remove		= b44_remove_one,
2548adfc5217SJeff Kirsher 	.suspend	= b44_suspend,
2549adfc5217SJeff Kirsher 	.resume		= b44_resume,
2550adfc5217SJeff Kirsher };
2551adfc5217SJeff Kirsher 
b44_pci_init(void)2552adfc5217SJeff Kirsher static inline int __init b44_pci_init(void)
2553adfc5217SJeff Kirsher {
2554adfc5217SJeff Kirsher 	int err = 0;
2555adfc5217SJeff Kirsher #ifdef CONFIG_B44_PCI
2556adfc5217SJeff Kirsher 	err = ssb_pcihost_register(&b44_pci_driver);
2557adfc5217SJeff Kirsher #endif
2558adfc5217SJeff Kirsher 	return err;
2559adfc5217SJeff Kirsher }
2560adfc5217SJeff Kirsher 
b44_pci_exit(void)256164f0a836SNikola Pajkovsky static inline void b44_pci_exit(void)
2562adfc5217SJeff Kirsher {
2563adfc5217SJeff Kirsher #ifdef CONFIG_B44_PCI
2564adfc5217SJeff Kirsher 	ssb_pcihost_unregister(&b44_pci_driver);
2565adfc5217SJeff Kirsher #endif
2566adfc5217SJeff Kirsher }
2567adfc5217SJeff Kirsher 
b44_init(void)2568adfc5217SJeff Kirsher static int __init b44_init(void)
2569adfc5217SJeff Kirsher {
2570adfc5217SJeff Kirsher 	unsigned int dma_desc_align_size = dma_get_cache_alignment();
2571adfc5217SJeff Kirsher 	int err;
2572adfc5217SJeff Kirsher 
2573adfc5217SJeff Kirsher 	/* Setup paramaters for syncing RX/TX DMA descriptors */
2574adfc5217SJeff Kirsher 	dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
2575adfc5217SJeff Kirsher 
2576adfc5217SJeff Kirsher 	err = b44_pci_init();
2577adfc5217SJeff Kirsher 	if (err)
2578adfc5217SJeff Kirsher 		return err;
2579adfc5217SJeff Kirsher 	err = ssb_driver_register(&b44_ssb_driver);
2580adfc5217SJeff Kirsher 	if (err)
2581adfc5217SJeff Kirsher 		b44_pci_exit();
2582adfc5217SJeff Kirsher 	return err;
2583adfc5217SJeff Kirsher }
2584adfc5217SJeff Kirsher 
b44_cleanup(void)2585adfc5217SJeff Kirsher static void __exit b44_cleanup(void)
2586adfc5217SJeff Kirsher {
2587adfc5217SJeff Kirsher 	ssb_driver_unregister(&b44_ssb_driver);
2588adfc5217SJeff Kirsher 	b44_pci_exit();
2589adfc5217SJeff Kirsher }
2590adfc5217SJeff Kirsher 
2591adfc5217SJeff Kirsher module_init(b44_init);
2592adfc5217SJeff Kirsher module_exit(b44_cleanup);
2593adfc5217SJeff Kirsher 
2594