168cf027fSGrygorii Strashko // SPDX-License-Identifier: GPL-2.0+
2b544dbacSJeff Kirsher /*
3b544dbacSJeff Kirsher * Copyright (C) 2006, 2007 Eugene Konev
4b544dbacSJeff Kirsher *
5b544dbacSJeff Kirsher */
6b544dbacSJeff Kirsher
7b544dbacSJeff Kirsher #include <linux/module.h>
8b544dbacSJeff Kirsher #include <linux/interrupt.h>
9b544dbacSJeff Kirsher #include <linux/moduleparam.h>
10b544dbacSJeff Kirsher
11b544dbacSJeff Kirsher #include <linux/sched.h>
12b544dbacSJeff Kirsher #include <linux/kernel.h>
13b544dbacSJeff Kirsher #include <linux/slab.h>
14b544dbacSJeff Kirsher #include <linux/errno.h>
15b544dbacSJeff Kirsher #include <linux/types.h>
16b544dbacSJeff Kirsher #include <linux/delay.h>
17b544dbacSJeff Kirsher
18b544dbacSJeff Kirsher #include <linux/netdevice.h>
19b544dbacSJeff Kirsher #include <linux/if_vlan.h>
20b544dbacSJeff Kirsher #include <linux/etherdevice.h>
21b544dbacSJeff Kirsher #include <linux/ethtool.h>
22b544dbacSJeff Kirsher #include <linux/skbuff.h>
23b544dbacSJeff Kirsher #include <linux/mii.h>
24b544dbacSJeff Kirsher #include <linux/phy.h>
25b544dbacSJeff Kirsher #include <linux/phy_fixed.h>
26b544dbacSJeff Kirsher #include <linux/platform_device.h>
27b544dbacSJeff Kirsher #include <linux/dma-mapping.h>
28b544dbacSJeff Kirsher #include <linux/clk.h>
29b544dbacSJeff Kirsher #include <linux/gpio.h>
30b544dbacSJeff Kirsher #include <linux/atomic.h>
31b544dbacSJeff Kirsher
32832f5dacSAlban Bedel #include <asm/mach-ar7/ar7.h>
33832f5dacSAlban Bedel
34b544dbacSJeff Kirsher MODULE_AUTHOR("Eugene Konev <ejka@imfi.kspu.ru>");
35b544dbacSJeff Kirsher MODULE_DESCRIPTION("TI AR7 ethernet driver (CPMAC)");
36b544dbacSJeff Kirsher MODULE_LICENSE("GPL");
37b544dbacSJeff Kirsher MODULE_ALIAS("platform:cpmac");
38b544dbacSJeff Kirsher
39b544dbacSJeff Kirsher static int debug_level = 8;
40b544dbacSJeff Kirsher static int dumb_switch;
41b544dbacSJeff Kirsher
42b544dbacSJeff Kirsher /* Next 2 are only used in cpmac_probe, so it's pointless to change them */
43b544dbacSJeff Kirsher module_param(debug_level, int, 0444);
44b544dbacSJeff Kirsher module_param(dumb_switch, int, 0444);
45b544dbacSJeff Kirsher
46b544dbacSJeff Kirsher MODULE_PARM_DESC(debug_level, "Number of NETIF_MSG bits to enable");
47b544dbacSJeff Kirsher MODULE_PARM_DESC(dumb_switch, "Assume switch is not connected to MDIO bus");
48b544dbacSJeff Kirsher
49b544dbacSJeff Kirsher #define CPMAC_VERSION "0.5.2"
50b544dbacSJeff Kirsher /* frame size + 802.1q tag + FCS size */
51b544dbacSJeff Kirsher #define CPMAC_SKB_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN)
52b544dbacSJeff Kirsher #define CPMAC_QUEUES 8
53b544dbacSJeff Kirsher
54b544dbacSJeff Kirsher /* Ethernet registers */
55b544dbacSJeff Kirsher #define CPMAC_TX_CONTROL 0x0004
56b544dbacSJeff Kirsher #define CPMAC_TX_TEARDOWN 0x0008
57b544dbacSJeff Kirsher #define CPMAC_RX_CONTROL 0x0014
58b544dbacSJeff Kirsher #define CPMAC_RX_TEARDOWN 0x0018
59b544dbacSJeff Kirsher #define CPMAC_MBP 0x0100
60b544dbacSJeff Kirsher #define MBP_RXPASSCRC 0x40000000
61b544dbacSJeff Kirsher #define MBP_RXQOS 0x20000000
62b544dbacSJeff Kirsher #define MBP_RXNOCHAIN 0x10000000
63b544dbacSJeff Kirsher #define MBP_RXCMF 0x01000000
64b544dbacSJeff Kirsher #define MBP_RXSHORT 0x00800000
65b544dbacSJeff Kirsher #define MBP_RXCEF 0x00400000
66b544dbacSJeff Kirsher #define MBP_RXPROMISC 0x00200000
67b544dbacSJeff Kirsher #define MBP_PROMISCCHAN(channel) (((channel) & 0x7) << 16)
68b544dbacSJeff Kirsher #define MBP_RXBCAST 0x00002000
69b544dbacSJeff Kirsher #define MBP_BCASTCHAN(channel) (((channel) & 0x7) << 8)
70b544dbacSJeff Kirsher #define MBP_RXMCAST 0x00000020
71b544dbacSJeff Kirsher #define MBP_MCASTCHAN(channel) ((channel) & 0x7)
72b544dbacSJeff Kirsher #define CPMAC_UNICAST_ENABLE 0x0104
73b544dbacSJeff Kirsher #define CPMAC_UNICAST_CLEAR 0x0108
74b544dbacSJeff Kirsher #define CPMAC_MAX_LENGTH 0x010c
75b544dbacSJeff Kirsher #define CPMAC_BUFFER_OFFSET 0x0110
76b544dbacSJeff Kirsher #define CPMAC_MAC_CONTROL 0x0160
77b544dbacSJeff Kirsher #define MAC_TXPTYPE 0x00000200
78b544dbacSJeff Kirsher #define MAC_TXPACE 0x00000040
79b544dbacSJeff Kirsher #define MAC_MII 0x00000020
80b544dbacSJeff Kirsher #define MAC_TXFLOW 0x00000010
81b544dbacSJeff Kirsher #define MAC_RXFLOW 0x00000008
82b544dbacSJeff Kirsher #define MAC_MTEST 0x00000004
83b544dbacSJeff Kirsher #define MAC_LOOPBACK 0x00000002
84b544dbacSJeff Kirsher #define MAC_FDX 0x00000001
85b544dbacSJeff Kirsher #define CPMAC_MAC_STATUS 0x0164
86b544dbacSJeff Kirsher #define MAC_STATUS_QOS 0x00000004
87b544dbacSJeff Kirsher #define MAC_STATUS_RXFLOW 0x00000002
88b544dbacSJeff Kirsher #define MAC_STATUS_TXFLOW 0x00000001
89b544dbacSJeff Kirsher #define CPMAC_TX_INT_ENABLE 0x0178
90b544dbacSJeff Kirsher #define CPMAC_TX_INT_CLEAR 0x017c
91b544dbacSJeff Kirsher #define CPMAC_MAC_INT_VECTOR 0x0180
92b544dbacSJeff Kirsher #define MAC_INT_STATUS 0x00080000
93b544dbacSJeff Kirsher #define MAC_INT_HOST 0x00040000
94b544dbacSJeff Kirsher #define MAC_INT_RX 0x00020000
95b544dbacSJeff Kirsher #define MAC_INT_TX 0x00010000
96b544dbacSJeff Kirsher #define CPMAC_MAC_EOI_VECTOR 0x0184
97b544dbacSJeff Kirsher #define CPMAC_RX_INT_ENABLE 0x0198
98b544dbacSJeff Kirsher #define CPMAC_RX_INT_CLEAR 0x019c
99b544dbacSJeff Kirsher #define CPMAC_MAC_INT_ENABLE 0x01a8
100b544dbacSJeff Kirsher #define CPMAC_MAC_INT_CLEAR 0x01ac
101b544dbacSJeff Kirsher #define CPMAC_MAC_ADDR_LO(channel) (0x01b0 + (channel) * 4)
102b544dbacSJeff Kirsher #define CPMAC_MAC_ADDR_MID 0x01d0
103b544dbacSJeff Kirsher #define CPMAC_MAC_ADDR_HI 0x01d4
104b544dbacSJeff Kirsher #define CPMAC_MAC_HASH_LO 0x01d8
105b544dbacSJeff Kirsher #define CPMAC_MAC_HASH_HI 0x01dc
106b544dbacSJeff Kirsher #define CPMAC_TX_PTR(channel) (0x0600 + (channel) * 4)
107b544dbacSJeff Kirsher #define CPMAC_RX_PTR(channel) (0x0620 + (channel) * 4)
108b544dbacSJeff Kirsher #define CPMAC_TX_ACK(channel) (0x0640 + (channel) * 4)
109b544dbacSJeff Kirsher #define CPMAC_RX_ACK(channel) (0x0660 + (channel) * 4)
110b544dbacSJeff Kirsher #define CPMAC_REG_END 0x0680
1118bcd5c6dSVarka Bhadram
1128bcd5c6dSVarka Bhadram /* Rx/Tx statistics
113b544dbacSJeff Kirsher * TODO: use some of them to fill stats in cpmac_stats()
114b544dbacSJeff Kirsher */
115b544dbacSJeff Kirsher #define CPMAC_STATS_RX_GOOD 0x0200
116b544dbacSJeff Kirsher #define CPMAC_STATS_RX_BCAST 0x0204
117b544dbacSJeff Kirsher #define CPMAC_STATS_RX_MCAST 0x0208
118b544dbacSJeff Kirsher #define CPMAC_STATS_RX_PAUSE 0x020c
119b544dbacSJeff Kirsher #define CPMAC_STATS_RX_CRC 0x0210
120b544dbacSJeff Kirsher #define CPMAC_STATS_RX_ALIGN 0x0214
121b544dbacSJeff Kirsher #define CPMAC_STATS_RX_OVER 0x0218
122b544dbacSJeff Kirsher #define CPMAC_STATS_RX_JABBER 0x021c
123b544dbacSJeff Kirsher #define CPMAC_STATS_RX_UNDER 0x0220
124b544dbacSJeff Kirsher #define CPMAC_STATS_RX_FRAG 0x0224
125b544dbacSJeff Kirsher #define CPMAC_STATS_RX_FILTER 0x0228
126b544dbacSJeff Kirsher #define CPMAC_STATS_RX_QOSFILTER 0x022c
127b544dbacSJeff Kirsher #define CPMAC_STATS_RX_OCTETS 0x0230
128b544dbacSJeff Kirsher
129b544dbacSJeff Kirsher #define CPMAC_STATS_TX_GOOD 0x0234
130b544dbacSJeff Kirsher #define CPMAC_STATS_TX_BCAST 0x0238
131b544dbacSJeff Kirsher #define CPMAC_STATS_TX_MCAST 0x023c
132b544dbacSJeff Kirsher #define CPMAC_STATS_TX_PAUSE 0x0240
133b544dbacSJeff Kirsher #define CPMAC_STATS_TX_DEFER 0x0244
134b544dbacSJeff Kirsher #define CPMAC_STATS_TX_COLLISION 0x0248
135b544dbacSJeff Kirsher #define CPMAC_STATS_TX_SINGLECOLL 0x024c
136b544dbacSJeff Kirsher #define CPMAC_STATS_TX_MULTICOLL 0x0250
137b544dbacSJeff Kirsher #define CPMAC_STATS_TX_EXCESSCOLL 0x0254
138b544dbacSJeff Kirsher #define CPMAC_STATS_TX_LATECOLL 0x0258
139b544dbacSJeff Kirsher #define CPMAC_STATS_TX_UNDERRUN 0x025c
140b544dbacSJeff Kirsher #define CPMAC_STATS_TX_CARRIERSENSE 0x0260
141b544dbacSJeff Kirsher #define CPMAC_STATS_TX_OCTETS 0x0264
142b544dbacSJeff Kirsher
143b544dbacSJeff Kirsher #define cpmac_read(base, reg) (readl((void __iomem *)(base) + (reg)))
144b544dbacSJeff Kirsher #define cpmac_write(base, reg, val) (writel(val, (void __iomem *)(base) + \
145b544dbacSJeff Kirsher (reg)))
146b544dbacSJeff Kirsher
147b544dbacSJeff Kirsher /* MDIO bus */
148b544dbacSJeff Kirsher #define CPMAC_MDIO_VERSION 0x0000
149b544dbacSJeff Kirsher #define CPMAC_MDIO_CONTROL 0x0004
150b544dbacSJeff Kirsher #define MDIOC_IDLE 0x80000000
151b544dbacSJeff Kirsher #define MDIOC_ENABLE 0x40000000
152b544dbacSJeff Kirsher #define MDIOC_PREAMBLE 0x00100000
153b544dbacSJeff Kirsher #define MDIOC_FAULT 0x00080000
154b544dbacSJeff Kirsher #define MDIOC_FAULTDETECT 0x00040000
155b544dbacSJeff Kirsher #define MDIOC_INTTEST 0x00020000
156b544dbacSJeff Kirsher #define MDIOC_CLKDIV(div) ((div) & 0xff)
157b544dbacSJeff Kirsher #define CPMAC_MDIO_ALIVE 0x0008
158b544dbacSJeff Kirsher #define CPMAC_MDIO_LINK 0x000c
159b544dbacSJeff Kirsher #define CPMAC_MDIO_ACCESS(channel) (0x0080 + (channel) * 8)
160b544dbacSJeff Kirsher #define MDIO_BUSY 0x80000000
161b544dbacSJeff Kirsher #define MDIO_WRITE 0x40000000
162b544dbacSJeff Kirsher #define MDIO_REG(reg) (((reg) & 0x1f) << 21)
163b544dbacSJeff Kirsher #define MDIO_PHY(phy) (((phy) & 0x1f) << 16)
164b544dbacSJeff Kirsher #define MDIO_DATA(data) ((data) & 0xffff)
165b544dbacSJeff Kirsher #define CPMAC_MDIO_PHYSEL(channel) (0x0084 + (channel) * 8)
166b544dbacSJeff Kirsher #define PHYSEL_LINKSEL 0x00000040
167b544dbacSJeff Kirsher #define PHYSEL_LINKINT 0x00000020
168b544dbacSJeff Kirsher
169b544dbacSJeff Kirsher struct cpmac_desc {
170b544dbacSJeff Kirsher u32 hw_next;
171b544dbacSJeff Kirsher u32 hw_data;
172b544dbacSJeff Kirsher u16 buflen;
173b544dbacSJeff Kirsher u16 bufflags;
174b544dbacSJeff Kirsher u16 datalen;
175b544dbacSJeff Kirsher u16 dataflags;
176b544dbacSJeff Kirsher #define CPMAC_SOP 0x8000
177b544dbacSJeff Kirsher #define CPMAC_EOP 0x4000
178b544dbacSJeff Kirsher #define CPMAC_OWN 0x2000
179b544dbacSJeff Kirsher #define CPMAC_EOQ 0x1000
180b544dbacSJeff Kirsher struct sk_buff *skb;
181b544dbacSJeff Kirsher struct cpmac_desc *next;
182b544dbacSJeff Kirsher struct cpmac_desc *prev;
183b544dbacSJeff Kirsher dma_addr_t mapping;
184b544dbacSJeff Kirsher dma_addr_t data_mapping;
185b544dbacSJeff Kirsher };
186b544dbacSJeff Kirsher
187b544dbacSJeff Kirsher struct cpmac_priv {
188b544dbacSJeff Kirsher spinlock_t lock;
189b544dbacSJeff Kirsher spinlock_t rx_lock;
190b544dbacSJeff Kirsher struct cpmac_desc *rx_head;
191b544dbacSJeff Kirsher int ring_size;
192b544dbacSJeff Kirsher struct cpmac_desc *desc_ring;
193b544dbacSJeff Kirsher dma_addr_t dma_ring;
194b544dbacSJeff Kirsher void __iomem *regs;
195b544dbacSJeff Kirsher struct mii_bus *mii_bus;
196b544dbacSJeff Kirsher char phy_name[MII_BUS_ID_SIZE + 3];
197b544dbacSJeff Kirsher int oldlink, oldspeed, oldduplex;
198b544dbacSJeff Kirsher u32 msg_enable;
199b544dbacSJeff Kirsher struct net_device *dev;
200b544dbacSJeff Kirsher struct work_struct reset_work;
201b544dbacSJeff Kirsher struct platform_device *pdev;
202b544dbacSJeff Kirsher struct napi_struct napi;
203b544dbacSJeff Kirsher atomic_t reset_pending;
204b544dbacSJeff Kirsher };
205b544dbacSJeff Kirsher
206b544dbacSJeff Kirsher static irqreturn_t cpmac_irq(int, void *);
207b544dbacSJeff Kirsher static void cpmac_hw_start(struct net_device *dev);
208b544dbacSJeff Kirsher static void cpmac_hw_stop(struct net_device *dev);
209b544dbacSJeff Kirsher static int cpmac_stop(struct net_device *dev);
210b544dbacSJeff Kirsher static int cpmac_open(struct net_device *dev);
211b544dbacSJeff Kirsher
cpmac_dump_regs(struct net_device * dev)212b544dbacSJeff Kirsher static void cpmac_dump_regs(struct net_device *dev)
213b544dbacSJeff Kirsher {
214b544dbacSJeff Kirsher int i;
215b544dbacSJeff Kirsher struct cpmac_priv *priv = netdev_priv(dev);
21659329d8bSVarka Bhadram
217b544dbacSJeff Kirsher for (i = 0; i < CPMAC_REG_END; i += 4) {
218b544dbacSJeff Kirsher if (i % 16 == 0) {
219b544dbacSJeff Kirsher if (i)
220ff32045eSVarka Bhadram printk("\n");
221ff32045eSVarka Bhadram printk("%s: reg[%p]:", dev->name, priv->regs + i);
222b544dbacSJeff Kirsher }
223ff32045eSVarka Bhadram printk(" %08x", cpmac_read(priv->regs, i));
224b544dbacSJeff Kirsher }
225ff32045eSVarka Bhadram printk("\n");
226b544dbacSJeff Kirsher }
227b544dbacSJeff Kirsher
cpmac_dump_desc(struct net_device * dev,struct cpmac_desc * desc)228b544dbacSJeff Kirsher static void cpmac_dump_desc(struct net_device *dev, struct cpmac_desc *desc)
229b544dbacSJeff Kirsher {
230b544dbacSJeff Kirsher int i;
23159329d8bSVarka Bhadram
232ff32045eSVarka Bhadram printk("%s: desc[%p]:", dev->name, desc);
233b544dbacSJeff Kirsher for (i = 0; i < sizeof(*desc) / 4; i++)
234ff32045eSVarka Bhadram printk(" %08x", ((u32 *)desc)[i]);
235ff32045eSVarka Bhadram printk("\n");
236b544dbacSJeff Kirsher }
237b544dbacSJeff Kirsher
cpmac_dump_all_desc(struct net_device * dev)238b544dbacSJeff Kirsher static void cpmac_dump_all_desc(struct net_device *dev)
239b544dbacSJeff Kirsher {
240b544dbacSJeff Kirsher struct cpmac_priv *priv = netdev_priv(dev);
241b544dbacSJeff Kirsher struct cpmac_desc *dump = priv->rx_head;
24259329d8bSVarka Bhadram
243b544dbacSJeff Kirsher do {
244b544dbacSJeff Kirsher cpmac_dump_desc(dev, dump);
245b544dbacSJeff Kirsher dump = dump->next;
246b544dbacSJeff Kirsher } while (dump != priv->rx_head);
247b544dbacSJeff Kirsher }
248b544dbacSJeff Kirsher
cpmac_dump_skb(struct net_device * dev,struct sk_buff * skb)249b544dbacSJeff Kirsher static void cpmac_dump_skb(struct net_device *dev, struct sk_buff *skb)
250b544dbacSJeff Kirsher {
251b544dbacSJeff Kirsher int i;
25259329d8bSVarka Bhadram
253ff32045eSVarka Bhadram printk("%s: skb 0x%p, len=%d\n", dev->name, skb, skb->len);
254b544dbacSJeff Kirsher for (i = 0; i < skb->len; i++) {
255b544dbacSJeff Kirsher if (i % 16 == 0) {
256b544dbacSJeff Kirsher if (i)
257ff32045eSVarka Bhadram printk("\n");
258ff32045eSVarka Bhadram printk("%s: data[%p]:", dev->name, skb->data + i);
259b544dbacSJeff Kirsher }
260ff32045eSVarka Bhadram printk(" %02x", ((u8 *)skb->data)[i]);
261b544dbacSJeff Kirsher }
262ff32045eSVarka Bhadram printk("\n");
263b544dbacSJeff Kirsher }
264b544dbacSJeff Kirsher
cpmac_mdio_read(struct mii_bus * bus,int phy_id,int reg)265b544dbacSJeff Kirsher static int cpmac_mdio_read(struct mii_bus *bus, int phy_id, int reg)
266b544dbacSJeff Kirsher {
267b544dbacSJeff Kirsher u32 val;
268b544dbacSJeff Kirsher
269b544dbacSJeff Kirsher while (cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0)) & MDIO_BUSY)
270b544dbacSJeff Kirsher cpu_relax();
271b544dbacSJeff Kirsher cpmac_write(bus->priv, CPMAC_MDIO_ACCESS(0), MDIO_BUSY | MDIO_REG(reg) |
272b544dbacSJeff Kirsher MDIO_PHY(phy_id));
273b544dbacSJeff Kirsher while ((val = cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0))) & MDIO_BUSY)
274b544dbacSJeff Kirsher cpu_relax();
27555064efdSVarka Bhadram
276b544dbacSJeff Kirsher return MDIO_DATA(val);
277b544dbacSJeff Kirsher }
278b544dbacSJeff Kirsher
cpmac_mdio_write(struct mii_bus * bus,int phy_id,int reg,u16 val)279b544dbacSJeff Kirsher static int cpmac_mdio_write(struct mii_bus *bus, int phy_id,
280b544dbacSJeff Kirsher int reg, u16 val)
281b544dbacSJeff Kirsher {
282b544dbacSJeff Kirsher while (cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0)) & MDIO_BUSY)
283b544dbacSJeff Kirsher cpu_relax();
284b544dbacSJeff Kirsher cpmac_write(bus->priv, CPMAC_MDIO_ACCESS(0), MDIO_BUSY | MDIO_WRITE |
285b544dbacSJeff Kirsher MDIO_REG(reg) | MDIO_PHY(phy_id) | MDIO_DATA(val));
28655064efdSVarka Bhadram
287b544dbacSJeff Kirsher return 0;
288b544dbacSJeff Kirsher }
289b544dbacSJeff Kirsher
cpmac_mdio_reset(struct mii_bus * bus)290b544dbacSJeff Kirsher static int cpmac_mdio_reset(struct mii_bus *bus)
291b544dbacSJeff Kirsher {
292b544dbacSJeff Kirsher struct clk *cpmac_clk;
293b544dbacSJeff Kirsher
294b544dbacSJeff Kirsher cpmac_clk = clk_get(&bus->dev, "cpmac");
295b544dbacSJeff Kirsher if (IS_ERR(cpmac_clk)) {
296f160a2d0SVarka Bhadram pr_err("unable to get cpmac clock\n");
297b544dbacSJeff Kirsher return -1;
298b544dbacSJeff Kirsher }
299b544dbacSJeff Kirsher ar7_device_reset(AR7_RESET_BIT_MDIO);
300b544dbacSJeff Kirsher cpmac_write(bus->priv, CPMAC_MDIO_CONTROL, MDIOC_ENABLE |
301b544dbacSJeff Kirsher MDIOC_CLKDIV(clk_get_rate(cpmac_clk) / 2200000 - 1));
30255064efdSVarka Bhadram
303b544dbacSJeff Kirsher return 0;
304b544dbacSJeff Kirsher }
305b544dbacSJeff Kirsher
306b544dbacSJeff Kirsher static struct mii_bus *cpmac_mii;
307b544dbacSJeff Kirsher
cpmac_set_multicast_list(struct net_device * dev)308b544dbacSJeff Kirsher static void cpmac_set_multicast_list(struct net_device *dev)
309b544dbacSJeff Kirsher {
310b544dbacSJeff Kirsher struct netdev_hw_addr *ha;
311b544dbacSJeff Kirsher u8 tmp;
312b544dbacSJeff Kirsher u32 mbp, bit, hash[2] = { 0, };
313b544dbacSJeff Kirsher struct cpmac_priv *priv = netdev_priv(dev);
314b544dbacSJeff Kirsher
315b544dbacSJeff Kirsher mbp = cpmac_read(priv->regs, CPMAC_MBP);
316b544dbacSJeff Kirsher if (dev->flags & IFF_PROMISC) {
317b544dbacSJeff Kirsher cpmac_write(priv->regs, CPMAC_MBP, (mbp & ~MBP_PROMISCCHAN(0)) |
318b544dbacSJeff Kirsher MBP_RXPROMISC);
319b544dbacSJeff Kirsher } else {
320b544dbacSJeff Kirsher cpmac_write(priv->regs, CPMAC_MBP, mbp & ~MBP_RXPROMISC);
321b544dbacSJeff Kirsher if (dev->flags & IFF_ALLMULTI) {
322b544dbacSJeff Kirsher /* enable all multicast mode */
323b544dbacSJeff Kirsher cpmac_write(priv->regs, CPMAC_MAC_HASH_LO, 0xffffffff);
324b544dbacSJeff Kirsher cpmac_write(priv->regs, CPMAC_MAC_HASH_HI, 0xffffffff);
325b544dbacSJeff Kirsher } else {
3268bcd5c6dSVarka Bhadram /* cpmac uses some strange mac address hashing
327b544dbacSJeff Kirsher * (not crc32)
328b544dbacSJeff Kirsher */
329b544dbacSJeff Kirsher netdev_for_each_mc_addr(ha, dev) {
330b544dbacSJeff Kirsher bit = 0;
331b544dbacSJeff Kirsher tmp = ha->addr[0];
332b544dbacSJeff Kirsher bit ^= (tmp >> 2) ^ (tmp << 4);
333b544dbacSJeff Kirsher tmp = ha->addr[1];
334b544dbacSJeff Kirsher bit ^= (tmp >> 4) ^ (tmp << 2);
335b544dbacSJeff Kirsher tmp = ha->addr[2];
336b544dbacSJeff Kirsher bit ^= (tmp >> 6) ^ tmp;
337b544dbacSJeff Kirsher tmp = ha->addr[3];
338b544dbacSJeff Kirsher bit ^= (tmp >> 2) ^ (tmp << 4);
339b544dbacSJeff Kirsher tmp = ha->addr[4];
340b544dbacSJeff Kirsher bit ^= (tmp >> 4) ^ (tmp << 2);
341b544dbacSJeff Kirsher tmp = ha->addr[5];
342b544dbacSJeff Kirsher bit ^= (tmp >> 6) ^ tmp;
343b544dbacSJeff Kirsher bit &= 0x3f;
344b544dbacSJeff Kirsher hash[bit / 32] |= 1 << (bit % 32);
345b544dbacSJeff Kirsher }
346b544dbacSJeff Kirsher
347b544dbacSJeff Kirsher cpmac_write(priv->regs, CPMAC_MAC_HASH_LO, hash[0]);
348b544dbacSJeff Kirsher cpmac_write(priv->regs, CPMAC_MAC_HASH_HI, hash[1]);
349b544dbacSJeff Kirsher }
350b544dbacSJeff Kirsher }
351b544dbacSJeff Kirsher }
352b544dbacSJeff Kirsher
cpmac_rx_one(struct cpmac_priv * priv,struct cpmac_desc * desc)353b544dbacSJeff Kirsher static struct sk_buff *cpmac_rx_one(struct cpmac_priv *priv,
354b544dbacSJeff Kirsher struct cpmac_desc *desc)
355b544dbacSJeff Kirsher {
356b544dbacSJeff Kirsher struct sk_buff *skb, *result = NULL;
357b544dbacSJeff Kirsher
358b544dbacSJeff Kirsher if (unlikely(netif_msg_hw(priv)))
359b544dbacSJeff Kirsher cpmac_dump_desc(priv->dev, desc);
360b544dbacSJeff Kirsher cpmac_write(priv->regs, CPMAC_RX_ACK(0), (u32)desc->mapping);
361b544dbacSJeff Kirsher if (unlikely(!desc->datalen)) {
362b544dbacSJeff Kirsher if (netif_msg_rx_err(priv) && net_ratelimit())
363f160a2d0SVarka Bhadram netdev_warn(priv->dev, "rx: spurious interrupt\n");
364f160a2d0SVarka Bhadram
365b544dbacSJeff Kirsher return NULL;
366b544dbacSJeff Kirsher }
367b544dbacSJeff Kirsher
368b544dbacSJeff Kirsher skb = netdev_alloc_skb_ip_align(priv->dev, CPMAC_SKB_SIZE);
369b544dbacSJeff Kirsher if (likely(skb)) {
370b544dbacSJeff Kirsher skb_put(desc->skb, desc->datalen);
371b544dbacSJeff Kirsher desc->skb->protocol = eth_type_trans(desc->skb, priv->dev);
372b544dbacSJeff Kirsher skb_checksum_none_assert(desc->skb);
373b544dbacSJeff Kirsher priv->dev->stats.rx_packets++;
374b544dbacSJeff Kirsher priv->dev->stats.rx_bytes += desc->datalen;
375b544dbacSJeff Kirsher result = desc->skb;
376b544dbacSJeff Kirsher dma_unmap_single(&priv->dev->dev, desc->data_mapping,
377b544dbacSJeff Kirsher CPMAC_SKB_SIZE, DMA_FROM_DEVICE);
378b544dbacSJeff Kirsher desc->skb = skb;
379b544dbacSJeff Kirsher desc->data_mapping = dma_map_single(&priv->dev->dev, skb->data,
380b544dbacSJeff Kirsher CPMAC_SKB_SIZE,
381b544dbacSJeff Kirsher DMA_FROM_DEVICE);
382b544dbacSJeff Kirsher desc->hw_data = (u32)desc->data_mapping;
383b544dbacSJeff Kirsher if (unlikely(netif_msg_pktdata(priv))) {
384f160a2d0SVarka Bhadram netdev_dbg(priv->dev, "received packet:\n");
385b544dbacSJeff Kirsher cpmac_dump_skb(priv->dev, result);
386b544dbacSJeff Kirsher }
387b544dbacSJeff Kirsher } else {
388b544dbacSJeff Kirsher if (netif_msg_rx_err(priv) && net_ratelimit())
389f160a2d0SVarka Bhadram netdev_warn(priv->dev,
390f160a2d0SVarka Bhadram "low on skbs, dropping packet\n");
391f160a2d0SVarka Bhadram
392b544dbacSJeff Kirsher priv->dev->stats.rx_dropped++;
393b544dbacSJeff Kirsher }
394b544dbacSJeff Kirsher
395b544dbacSJeff Kirsher desc->buflen = CPMAC_SKB_SIZE;
396b544dbacSJeff Kirsher desc->dataflags = CPMAC_OWN;
397b544dbacSJeff Kirsher
398b544dbacSJeff Kirsher return result;
399b544dbacSJeff Kirsher }
400b544dbacSJeff Kirsher
cpmac_poll(struct napi_struct * napi,int budget)401b544dbacSJeff Kirsher static int cpmac_poll(struct napi_struct *napi, int budget)
402b544dbacSJeff Kirsher {
403b544dbacSJeff Kirsher struct sk_buff *skb;
404b544dbacSJeff Kirsher struct cpmac_desc *desc, *restart;
405b544dbacSJeff Kirsher struct cpmac_priv *priv = container_of(napi, struct cpmac_priv, napi);
406b544dbacSJeff Kirsher int received = 0, processed = 0;
407b544dbacSJeff Kirsher
408b544dbacSJeff Kirsher spin_lock(&priv->rx_lock);
409b544dbacSJeff Kirsher if (unlikely(!priv->rx_head)) {
410b544dbacSJeff Kirsher if (netif_msg_rx_err(priv) && net_ratelimit())
411f160a2d0SVarka Bhadram netdev_warn(priv->dev, "rx: polling, but no queue\n");
412f160a2d0SVarka Bhadram
413b544dbacSJeff Kirsher spin_unlock(&priv->rx_lock);
414b544dbacSJeff Kirsher napi_complete(napi);
415b544dbacSJeff Kirsher return 0;
416b544dbacSJeff Kirsher }
417b544dbacSJeff Kirsher
418b544dbacSJeff Kirsher desc = priv->rx_head;
419b544dbacSJeff Kirsher restart = NULL;
420b544dbacSJeff Kirsher while (((desc->dataflags & CPMAC_OWN) == 0) && (received < budget)) {
421b544dbacSJeff Kirsher processed++;
422b544dbacSJeff Kirsher
423b544dbacSJeff Kirsher if ((desc->dataflags & CPMAC_EOQ) != 0) {
424b544dbacSJeff Kirsher /* The last update to eoq->hw_next didn't happen
425b544dbacSJeff Kirsher * soon enough, and the receiver stopped here.
426b544dbacSJeff Kirsher * Remember this descriptor so we can restart
427b544dbacSJeff Kirsher * the receiver after freeing some space.
428b544dbacSJeff Kirsher */
429b544dbacSJeff Kirsher if (unlikely(restart)) {
430b544dbacSJeff Kirsher if (netif_msg_rx_err(priv))
431f160a2d0SVarka Bhadram netdev_err(priv->dev, "poll found a"
432b544dbacSJeff Kirsher " duplicate EOQ: %p and %p\n",
433f160a2d0SVarka Bhadram restart, desc);
434b544dbacSJeff Kirsher goto fatal_error;
435b544dbacSJeff Kirsher }
436b544dbacSJeff Kirsher
437b544dbacSJeff Kirsher restart = desc->next;
438b544dbacSJeff Kirsher }
439b544dbacSJeff Kirsher
440b544dbacSJeff Kirsher skb = cpmac_rx_one(priv, desc);
441b544dbacSJeff Kirsher if (likely(skb)) {
442b544dbacSJeff Kirsher netif_receive_skb(skb);
443b544dbacSJeff Kirsher received++;
444b544dbacSJeff Kirsher }
445b544dbacSJeff Kirsher desc = desc->next;
446b544dbacSJeff Kirsher }
447b544dbacSJeff Kirsher
448b544dbacSJeff Kirsher if (desc != priv->rx_head) {
449b544dbacSJeff Kirsher /* We freed some buffers, but not the whole ring,
4508bcd5c6dSVarka Bhadram * add what we did free to the rx list
4518bcd5c6dSVarka Bhadram */
452b544dbacSJeff Kirsher desc->prev->hw_next = (u32)0;
453b544dbacSJeff Kirsher priv->rx_head->prev->hw_next = priv->rx_head->mapping;
454b544dbacSJeff Kirsher }
455b544dbacSJeff Kirsher
456b544dbacSJeff Kirsher /* Optimization: If we did not actually process an EOQ (perhaps because
457b544dbacSJeff Kirsher * of quota limits), check to see if the tail of the queue has EOQ set.
458b544dbacSJeff Kirsher * We should immediately restart in that case so that the receiver can
459b544dbacSJeff Kirsher * restart and run in parallel with more packet processing.
460b544dbacSJeff Kirsher * This lets us handle slightly larger bursts before running
4618bcd5c6dSVarka Bhadram * out of ring space (assuming dev->weight < ring_size)
4628bcd5c6dSVarka Bhadram */
463b544dbacSJeff Kirsher
464b544dbacSJeff Kirsher if (!restart &&
465b544dbacSJeff Kirsher (priv->rx_head->prev->dataflags & (CPMAC_OWN|CPMAC_EOQ))
466b544dbacSJeff Kirsher == CPMAC_EOQ &&
467b544dbacSJeff Kirsher (priv->rx_head->dataflags & CPMAC_OWN) != 0) {
468b544dbacSJeff Kirsher /* reset EOQ so the poll loop (above) doesn't try to
469b544dbacSJeff Kirsher * restart this when it eventually gets to this descriptor.
470b544dbacSJeff Kirsher */
471b544dbacSJeff Kirsher priv->rx_head->prev->dataflags &= ~CPMAC_EOQ;
472b544dbacSJeff Kirsher restart = priv->rx_head;
473b544dbacSJeff Kirsher }
474b544dbacSJeff Kirsher
475b544dbacSJeff Kirsher if (restart) {
476b544dbacSJeff Kirsher priv->dev->stats.rx_errors++;
477b544dbacSJeff Kirsher priv->dev->stats.rx_fifo_errors++;
478b544dbacSJeff Kirsher if (netif_msg_rx_err(priv) && net_ratelimit())
479f160a2d0SVarka Bhadram netdev_warn(priv->dev, "rx dma ring overrun\n");
480b544dbacSJeff Kirsher
481b544dbacSJeff Kirsher if (unlikely((restart->dataflags & CPMAC_OWN) == 0)) {
482b544dbacSJeff Kirsher if (netif_msg_drv(priv))
483f160a2d0SVarka Bhadram netdev_err(priv->dev, "cpmac_poll is trying "
484f160a2d0SVarka Bhadram "to restart rx from a descriptor "
485f160a2d0SVarka Bhadram "that's not free: %p\n", restart);
486b544dbacSJeff Kirsher goto fatal_error;
487b544dbacSJeff Kirsher }
488b544dbacSJeff Kirsher
489b544dbacSJeff Kirsher cpmac_write(priv->regs, CPMAC_RX_PTR(0), restart->mapping);
490b544dbacSJeff Kirsher }
491b544dbacSJeff Kirsher
492b544dbacSJeff Kirsher priv->rx_head = desc;
493b544dbacSJeff Kirsher spin_unlock(&priv->rx_lock);
494b544dbacSJeff Kirsher if (unlikely(netif_msg_rx_status(priv)))
495f160a2d0SVarka Bhadram netdev_dbg(priv->dev, "poll processed %d packets\n", received);
496f160a2d0SVarka Bhadram
497b544dbacSJeff Kirsher if (processed == 0) {
498b544dbacSJeff Kirsher /* we ran out of packets to read,
4998bcd5c6dSVarka Bhadram * revert to interrupt-driven mode
5008bcd5c6dSVarka Bhadram */
501b544dbacSJeff Kirsher napi_complete(napi);
502b544dbacSJeff Kirsher cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1);
503b544dbacSJeff Kirsher return 0;
504b544dbacSJeff Kirsher }
505b544dbacSJeff Kirsher
506b544dbacSJeff Kirsher return 1;
507b544dbacSJeff Kirsher
508b544dbacSJeff Kirsher fatal_error:
509b544dbacSJeff Kirsher /* Something went horribly wrong.
5108bcd5c6dSVarka Bhadram * Reset hardware to try to recover rather than wedging.
5118bcd5c6dSVarka Bhadram */
512b544dbacSJeff Kirsher if (netif_msg_drv(priv)) {
513f160a2d0SVarka Bhadram netdev_err(priv->dev, "cpmac_poll is confused. "
514f160a2d0SVarka Bhadram "Resetting hardware\n");
515b544dbacSJeff Kirsher cpmac_dump_all_desc(priv->dev);
516f160a2d0SVarka Bhadram netdev_dbg(priv->dev, "RX_PTR(0)=0x%08x RX_ACK(0)=0x%08x\n",
517b544dbacSJeff Kirsher cpmac_read(priv->regs, CPMAC_RX_PTR(0)),
518b544dbacSJeff Kirsher cpmac_read(priv->regs, CPMAC_RX_ACK(0)));
519b544dbacSJeff Kirsher }
520b544dbacSJeff Kirsher
521b544dbacSJeff Kirsher spin_unlock(&priv->rx_lock);
522b544dbacSJeff Kirsher napi_complete(napi);
523b544dbacSJeff Kirsher netif_tx_stop_all_queues(priv->dev);
524b544dbacSJeff Kirsher napi_disable(&priv->napi);
525b544dbacSJeff Kirsher
526b544dbacSJeff Kirsher atomic_inc(&priv->reset_pending);
527b544dbacSJeff Kirsher cpmac_hw_stop(priv->dev);
528b544dbacSJeff Kirsher if (!schedule_work(&priv->reset_work))
529b544dbacSJeff Kirsher atomic_dec(&priv->reset_pending);
53055064efdSVarka Bhadram
531b544dbacSJeff Kirsher return 0;
532b544dbacSJeff Kirsher
533b544dbacSJeff Kirsher }
534b544dbacSJeff Kirsher
cpmac_start_xmit(struct sk_buff * skb,struct net_device * dev)535787e4a79SYunjian Wang static netdev_tx_t cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
536b544dbacSJeff Kirsher {
5372f5281baSPaul Burton int queue;
5382f5281baSPaul Burton unsigned int len;
539b544dbacSJeff Kirsher struct cpmac_desc *desc;
540b544dbacSJeff Kirsher struct cpmac_priv *priv = netdev_priv(dev);
541b544dbacSJeff Kirsher
542b544dbacSJeff Kirsher if (unlikely(atomic_read(&priv->reset_pending)))
543b544dbacSJeff Kirsher return NETDEV_TX_BUSY;
544b544dbacSJeff Kirsher
545b544dbacSJeff Kirsher if (unlikely(skb_padto(skb, ETH_ZLEN)))
546b544dbacSJeff Kirsher return NETDEV_TX_OK;
547b544dbacSJeff Kirsher
5482f5281baSPaul Burton len = max_t(unsigned int, skb->len, ETH_ZLEN);
549b544dbacSJeff Kirsher queue = skb_get_queue_mapping(skb);
550b544dbacSJeff Kirsher netif_stop_subqueue(dev, queue);
551b544dbacSJeff Kirsher
552b544dbacSJeff Kirsher desc = &priv->desc_ring[queue];
553b544dbacSJeff Kirsher if (unlikely(desc->dataflags & CPMAC_OWN)) {
554b544dbacSJeff Kirsher if (netif_msg_tx_err(priv) && net_ratelimit())
555f160a2d0SVarka Bhadram netdev_warn(dev, "tx dma ring full\n");
556f160a2d0SVarka Bhadram
557b544dbacSJeff Kirsher return NETDEV_TX_BUSY;
558b544dbacSJeff Kirsher }
559b544dbacSJeff Kirsher
560b544dbacSJeff Kirsher spin_lock(&priv->lock);
561b544dbacSJeff Kirsher spin_unlock(&priv->lock);
562b544dbacSJeff Kirsher desc->dataflags = CPMAC_SOP | CPMAC_EOP | CPMAC_OWN;
563b544dbacSJeff Kirsher desc->skb = skb;
564b544dbacSJeff Kirsher desc->data_mapping = dma_map_single(&dev->dev, skb->data, len,
565b544dbacSJeff Kirsher DMA_TO_DEVICE);
566b544dbacSJeff Kirsher desc->hw_data = (u32)desc->data_mapping;
567b544dbacSJeff Kirsher desc->datalen = len;
568b544dbacSJeff Kirsher desc->buflen = len;
569b544dbacSJeff Kirsher if (unlikely(netif_msg_tx_queued(priv)))
570f160a2d0SVarka Bhadram netdev_dbg(dev, "sending 0x%p, len=%d\n", skb, skb->len);
571b544dbacSJeff Kirsher if (unlikely(netif_msg_hw(priv)))
572b544dbacSJeff Kirsher cpmac_dump_desc(dev, desc);
573b544dbacSJeff Kirsher if (unlikely(netif_msg_pktdata(priv)))
574b544dbacSJeff Kirsher cpmac_dump_skb(dev, skb);
575b544dbacSJeff Kirsher cpmac_write(priv->regs, CPMAC_TX_PTR(queue), (u32)desc->mapping);
576b544dbacSJeff Kirsher
577b544dbacSJeff Kirsher return NETDEV_TX_OK;
578b544dbacSJeff Kirsher }
579b544dbacSJeff Kirsher
cpmac_end_xmit(struct net_device * dev,int queue)580b544dbacSJeff Kirsher static void cpmac_end_xmit(struct net_device *dev, int queue)
581b544dbacSJeff Kirsher {
582b544dbacSJeff Kirsher struct cpmac_desc *desc;
583b544dbacSJeff Kirsher struct cpmac_priv *priv = netdev_priv(dev);
584b544dbacSJeff Kirsher
585b544dbacSJeff Kirsher desc = &priv->desc_ring[queue];
586b544dbacSJeff Kirsher cpmac_write(priv->regs, CPMAC_TX_ACK(queue), (u32)desc->mapping);
587b544dbacSJeff Kirsher if (likely(desc->skb)) {
588b544dbacSJeff Kirsher spin_lock(&priv->lock);
589b544dbacSJeff Kirsher dev->stats.tx_packets++;
590b544dbacSJeff Kirsher dev->stats.tx_bytes += desc->skb->len;
591b544dbacSJeff Kirsher spin_unlock(&priv->lock);
592b544dbacSJeff Kirsher dma_unmap_single(&dev->dev, desc->data_mapping, desc->skb->len,
593b544dbacSJeff Kirsher DMA_TO_DEVICE);
594b544dbacSJeff Kirsher
595b544dbacSJeff Kirsher if (unlikely(netif_msg_tx_done(priv)))
596f160a2d0SVarka Bhadram netdev_dbg(dev, "sent 0x%p, len=%d\n",
597b544dbacSJeff Kirsher desc->skb, desc->skb->len);
598b544dbacSJeff Kirsher
599b3379a42SYang Wei dev_consume_skb_irq(desc->skb);
600b544dbacSJeff Kirsher desc->skb = NULL;
601b544dbacSJeff Kirsher if (__netif_subqueue_stopped(dev, queue))
602b544dbacSJeff Kirsher netif_wake_subqueue(dev, queue);
603b544dbacSJeff Kirsher } else {
604b544dbacSJeff Kirsher if (netif_msg_tx_err(priv) && net_ratelimit())
605f160a2d0SVarka Bhadram netdev_warn(dev, "end_xmit: spurious interrupt\n");
606b544dbacSJeff Kirsher if (__netif_subqueue_stopped(dev, queue))
607b544dbacSJeff Kirsher netif_wake_subqueue(dev, queue);
608b544dbacSJeff Kirsher }
609b544dbacSJeff Kirsher }
610b544dbacSJeff Kirsher
cpmac_hw_stop(struct net_device * dev)611b544dbacSJeff Kirsher static void cpmac_hw_stop(struct net_device *dev)
612b544dbacSJeff Kirsher {
613b544dbacSJeff Kirsher int i;
614b544dbacSJeff Kirsher struct cpmac_priv *priv = netdev_priv(dev);
615a0ea2ac8SJingoo Han struct plat_cpmac_data *pdata = dev_get_platdata(&priv->pdev->dev);
616b544dbacSJeff Kirsher
617b544dbacSJeff Kirsher ar7_device_reset(pdata->reset_bit);
618b544dbacSJeff Kirsher cpmac_write(priv->regs, CPMAC_RX_CONTROL,
619b544dbacSJeff Kirsher cpmac_read(priv->regs, CPMAC_RX_CONTROL) & ~1);
620b544dbacSJeff Kirsher cpmac_write(priv->regs, CPMAC_TX_CONTROL,
621b544dbacSJeff Kirsher cpmac_read(priv->regs, CPMAC_TX_CONTROL) & ~1);
622b544dbacSJeff Kirsher for (i = 0; i < 8; i++) {
623b544dbacSJeff Kirsher cpmac_write(priv->regs, CPMAC_TX_PTR(i), 0);
624b544dbacSJeff Kirsher cpmac_write(priv->regs, CPMAC_RX_PTR(i), 0);
625b544dbacSJeff Kirsher }
626b544dbacSJeff Kirsher cpmac_write(priv->regs, CPMAC_UNICAST_CLEAR, 0xff);
627b544dbacSJeff Kirsher cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 0xff);
628b544dbacSJeff Kirsher cpmac_write(priv->regs, CPMAC_TX_INT_CLEAR, 0xff);
629b544dbacSJeff Kirsher cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff);
630b544dbacSJeff Kirsher cpmac_write(priv->regs, CPMAC_MAC_CONTROL,
631b544dbacSJeff Kirsher cpmac_read(priv->regs, CPMAC_MAC_CONTROL) & ~MAC_MII);
632b544dbacSJeff Kirsher }
633b544dbacSJeff Kirsher
cpmac_hw_start(struct net_device * dev)634b544dbacSJeff Kirsher static void cpmac_hw_start(struct net_device *dev)
635b544dbacSJeff Kirsher {
636b544dbacSJeff Kirsher int i;
637b544dbacSJeff Kirsher struct cpmac_priv *priv = netdev_priv(dev);
638a0ea2ac8SJingoo Han struct plat_cpmac_data *pdata = dev_get_platdata(&priv->pdev->dev);
639b544dbacSJeff Kirsher
640b544dbacSJeff Kirsher ar7_device_reset(pdata->reset_bit);
641b544dbacSJeff Kirsher for (i = 0; i < 8; i++) {
642b544dbacSJeff Kirsher cpmac_write(priv->regs, CPMAC_TX_PTR(i), 0);
643b544dbacSJeff Kirsher cpmac_write(priv->regs, CPMAC_RX_PTR(i), 0);
644b544dbacSJeff Kirsher }
645b544dbacSJeff Kirsher cpmac_write(priv->regs, CPMAC_RX_PTR(0), priv->rx_head->mapping);
646b544dbacSJeff Kirsher
647b544dbacSJeff Kirsher cpmac_write(priv->regs, CPMAC_MBP, MBP_RXSHORT | MBP_RXBCAST |
648b544dbacSJeff Kirsher MBP_RXMCAST);
649b544dbacSJeff Kirsher cpmac_write(priv->regs, CPMAC_BUFFER_OFFSET, 0);
650b544dbacSJeff Kirsher for (i = 0; i < 8; i++)
651b544dbacSJeff Kirsher cpmac_write(priv->regs, CPMAC_MAC_ADDR_LO(i), dev->dev_addr[5]);
652b544dbacSJeff Kirsher cpmac_write(priv->regs, CPMAC_MAC_ADDR_MID, dev->dev_addr[4]);
653b544dbacSJeff Kirsher cpmac_write(priv->regs, CPMAC_MAC_ADDR_HI, dev->dev_addr[0] |
654b544dbacSJeff Kirsher (dev->dev_addr[1] << 8) | (dev->dev_addr[2] << 16) |
655b544dbacSJeff Kirsher (dev->dev_addr[3] << 24));
656b544dbacSJeff Kirsher cpmac_write(priv->regs, CPMAC_MAX_LENGTH, CPMAC_SKB_SIZE);
657b544dbacSJeff Kirsher cpmac_write(priv->regs, CPMAC_UNICAST_CLEAR, 0xff);
658b544dbacSJeff Kirsher cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 0xff);
659b544dbacSJeff Kirsher cpmac_write(priv->regs, CPMAC_TX_INT_CLEAR, 0xff);
660b544dbacSJeff Kirsher cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff);
661b544dbacSJeff Kirsher cpmac_write(priv->regs, CPMAC_UNICAST_ENABLE, 1);
662b544dbacSJeff Kirsher cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1);
663b544dbacSJeff Kirsher cpmac_write(priv->regs, CPMAC_TX_INT_ENABLE, 0xff);
664b544dbacSJeff Kirsher cpmac_write(priv->regs, CPMAC_MAC_INT_ENABLE, 3);
665b544dbacSJeff Kirsher
666b544dbacSJeff Kirsher cpmac_write(priv->regs, CPMAC_RX_CONTROL,
667b544dbacSJeff Kirsher cpmac_read(priv->regs, CPMAC_RX_CONTROL) | 1);
668b544dbacSJeff Kirsher cpmac_write(priv->regs, CPMAC_TX_CONTROL,
669b544dbacSJeff Kirsher cpmac_read(priv->regs, CPMAC_TX_CONTROL) | 1);
670b544dbacSJeff Kirsher cpmac_write(priv->regs, CPMAC_MAC_CONTROL,
671b544dbacSJeff Kirsher cpmac_read(priv->regs, CPMAC_MAC_CONTROL) | MAC_MII |
672b544dbacSJeff Kirsher MAC_FDX);
673b544dbacSJeff Kirsher }
674b544dbacSJeff Kirsher
cpmac_clear_rx(struct net_device * dev)675b544dbacSJeff Kirsher static void cpmac_clear_rx(struct net_device *dev)
676b544dbacSJeff Kirsher {
677b544dbacSJeff Kirsher struct cpmac_priv *priv = netdev_priv(dev);
678b544dbacSJeff Kirsher struct cpmac_desc *desc;
679b544dbacSJeff Kirsher int i;
68059329d8bSVarka Bhadram
681b544dbacSJeff Kirsher if (unlikely(!priv->rx_head))
682b544dbacSJeff Kirsher return;
683b544dbacSJeff Kirsher desc = priv->rx_head;
684b544dbacSJeff Kirsher for (i = 0; i < priv->ring_size; i++) {
685b544dbacSJeff Kirsher if ((desc->dataflags & CPMAC_OWN) == 0) {
686b544dbacSJeff Kirsher if (netif_msg_rx_err(priv) && net_ratelimit())
687f160a2d0SVarka Bhadram netdev_warn(dev, "packet dropped\n");
688b544dbacSJeff Kirsher if (unlikely(netif_msg_hw(priv)))
689b544dbacSJeff Kirsher cpmac_dump_desc(dev, desc);
690b544dbacSJeff Kirsher desc->dataflags = CPMAC_OWN;
691b544dbacSJeff Kirsher dev->stats.rx_dropped++;
692b544dbacSJeff Kirsher }
693b544dbacSJeff Kirsher desc->hw_next = desc->next->mapping;
694b544dbacSJeff Kirsher desc = desc->next;
695b544dbacSJeff Kirsher }
696b544dbacSJeff Kirsher priv->rx_head->prev->hw_next = 0;
697b544dbacSJeff Kirsher }
698b544dbacSJeff Kirsher
cpmac_clear_tx(struct net_device * dev)699b544dbacSJeff Kirsher static void cpmac_clear_tx(struct net_device *dev)
700b544dbacSJeff Kirsher {
701b544dbacSJeff Kirsher struct cpmac_priv *priv = netdev_priv(dev);
702b544dbacSJeff Kirsher int i;
70359329d8bSVarka Bhadram
704b544dbacSJeff Kirsher if (unlikely(!priv->desc_ring))
705b544dbacSJeff Kirsher return;
706b544dbacSJeff Kirsher for (i = 0; i < CPMAC_QUEUES; i++) {
707b544dbacSJeff Kirsher priv->desc_ring[i].dataflags = 0;
708b544dbacSJeff Kirsher if (priv->desc_ring[i].skb) {
709b544dbacSJeff Kirsher dev_kfree_skb_any(priv->desc_ring[i].skb);
710b544dbacSJeff Kirsher priv->desc_ring[i].skb = NULL;
711b544dbacSJeff Kirsher }
712b544dbacSJeff Kirsher }
713b544dbacSJeff Kirsher }
714b544dbacSJeff Kirsher
cpmac_hw_error(struct work_struct * work)715b544dbacSJeff Kirsher static void cpmac_hw_error(struct work_struct *work)
716b544dbacSJeff Kirsher {
717b544dbacSJeff Kirsher struct cpmac_priv *priv =
718b544dbacSJeff Kirsher container_of(work, struct cpmac_priv, reset_work);
719b544dbacSJeff Kirsher
720b544dbacSJeff Kirsher spin_lock(&priv->rx_lock);
721b544dbacSJeff Kirsher cpmac_clear_rx(priv->dev);
722b544dbacSJeff Kirsher spin_unlock(&priv->rx_lock);
723b544dbacSJeff Kirsher cpmac_clear_tx(priv->dev);
724b544dbacSJeff Kirsher cpmac_hw_start(priv->dev);
725b544dbacSJeff Kirsher barrier();
726b544dbacSJeff Kirsher atomic_dec(&priv->reset_pending);
727b544dbacSJeff Kirsher
728b544dbacSJeff Kirsher netif_tx_wake_all_queues(priv->dev);
729b544dbacSJeff Kirsher cpmac_write(priv->regs, CPMAC_MAC_INT_ENABLE, 3);
730b544dbacSJeff Kirsher }
731b544dbacSJeff Kirsher
cpmac_check_status(struct net_device * dev)732b544dbacSJeff Kirsher static void cpmac_check_status(struct net_device *dev)
733b544dbacSJeff Kirsher {
734b544dbacSJeff Kirsher struct cpmac_priv *priv = netdev_priv(dev);
735b544dbacSJeff Kirsher
736b544dbacSJeff Kirsher u32 macstatus = cpmac_read(priv->regs, CPMAC_MAC_STATUS);
737b544dbacSJeff Kirsher int rx_channel = (macstatus >> 8) & 7;
738b544dbacSJeff Kirsher int rx_code = (macstatus >> 12) & 15;
739b544dbacSJeff Kirsher int tx_channel = (macstatus >> 16) & 7;
740b544dbacSJeff Kirsher int tx_code = (macstatus >> 20) & 15;
741b544dbacSJeff Kirsher
742b544dbacSJeff Kirsher if (rx_code || tx_code) {
743b544dbacSJeff Kirsher if (netif_msg_drv(priv) && net_ratelimit()) {
744b544dbacSJeff Kirsher /* Can't find any documentation on what these
745b544dbacSJeff Kirsher * error codes actually are. So just log them and hope..
746b544dbacSJeff Kirsher */
747b544dbacSJeff Kirsher if (rx_code)
748f160a2d0SVarka Bhadram netdev_warn(dev, "host error %d on rx "
749b544dbacSJeff Kirsher "channel %d (macstatus %08x), resetting\n",
750f160a2d0SVarka Bhadram rx_code, rx_channel, macstatus);
751b544dbacSJeff Kirsher if (tx_code)
752f160a2d0SVarka Bhadram netdev_warn(dev, "host error %d on tx "
753b544dbacSJeff Kirsher "channel %d (macstatus %08x), resetting\n",
754f160a2d0SVarka Bhadram tx_code, tx_channel, macstatus);
755b544dbacSJeff Kirsher }
756b544dbacSJeff Kirsher
757b544dbacSJeff Kirsher netif_tx_stop_all_queues(dev);
758b544dbacSJeff Kirsher cpmac_hw_stop(dev);
759b544dbacSJeff Kirsher if (schedule_work(&priv->reset_work))
760b544dbacSJeff Kirsher atomic_inc(&priv->reset_pending);
761b544dbacSJeff Kirsher if (unlikely(netif_msg_hw(priv)))
762b544dbacSJeff Kirsher cpmac_dump_regs(dev);
763b544dbacSJeff Kirsher }
764b544dbacSJeff Kirsher cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff);
765b544dbacSJeff Kirsher }
766b544dbacSJeff Kirsher
cpmac_irq(int irq,void * dev_id)767b544dbacSJeff Kirsher static irqreturn_t cpmac_irq(int irq, void *dev_id)
768b544dbacSJeff Kirsher {
769b544dbacSJeff Kirsher struct net_device *dev = dev_id;
770b544dbacSJeff Kirsher struct cpmac_priv *priv;
771b544dbacSJeff Kirsher int queue;
772b544dbacSJeff Kirsher u32 status;
773b544dbacSJeff Kirsher
774b544dbacSJeff Kirsher priv = netdev_priv(dev);
775b544dbacSJeff Kirsher
776b544dbacSJeff Kirsher status = cpmac_read(priv->regs, CPMAC_MAC_INT_VECTOR);
777b544dbacSJeff Kirsher
778b544dbacSJeff Kirsher if (unlikely(netif_msg_intr(priv)))
779f160a2d0SVarka Bhadram netdev_dbg(dev, "interrupt status: 0x%08x\n", status);
780b544dbacSJeff Kirsher
781b544dbacSJeff Kirsher if (status & MAC_INT_TX)
782b544dbacSJeff Kirsher cpmac_end_xmit(dev, (status & 7));
783b544dbacSJeff Kirsher
784b544dbacSJeff Kirsher if (status & MAC_INT_RX) {
785b544dbacSJeff Kirsher queue = (status >> 8) & 7;
786b544dbacSJeff Kirsher if (napi_schedule_prep(&priv->napi)) {
787b544dbacSJeff Kirsher cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 1 << queue);
788b544dbacSJeff Kirsher __napi_schedule(&priv->napi);
789b544dbacSJeff Kirsher }
790b544dbacSJeff Kirsher }
791b544dbacSJeff Kirsher
792b544dbacSJeff Kirsher cpmac_write(priv->regs, CPMAC_MAC_EOI_VECTOR, 0);
793b544dbacSJeff Kirsher
794b544dbacSJeff Kirsher if (unlikely(status & (MAC_INT_HOST | MAC_INT_STATUS)))
795b544dbacSJeff Kirsher cpmac_check_status(dev);
796b544dbacSJeff Kirsher
797b544dbacSJeff Kirsher return IRQ_HANDLED;
798b544dbacSJeff Kirsher }
799b544dbacSJeff Kirsher
cpmac_tx_timeout(struct net_device * dev,unsigned int txqueue)8000290bd29SMichael S. Tsirkin static void cpmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
801b544dbacSJeff Kirsher {
802b544dbacSJeff Kirsher struct cpmac_priv *priv = netdev_priv(dev);
803b544dbacSJeff Kirsher
804b544dbacSJeff Kirsher spin_lock(&priv->lock);
805b544dbacSJeff Kirsher dev->stats.tx_errors++;
806b544dbacSJeff Kirsher spin_unlock(&priv->lock);
807b544dbacSJeff Kirsher if (netif_msg_tx_err(priv) && net_ratelimit())
808f160a2d0SVarka Bhadram netdev_warn(dev, "transmit timeout\n");
809b544dbacSJeff Kirsher
810b544dbacSJeff Kirsher atomic_inc(&priv->reset_pending);
811b544dbacSJeff Kirsher barrier();
812b544dbacSJeff Kirsher cpmac_clear_tx(dev);
813b544dbacSJeff Kirsher barrier();
814b544dbacSJeff Kirsher atomic_dec(&priv->reset_pending);
815b544dbacSJeff Kirsher
816b544dbacSJeff Kirsher netif_tx_wake_all_queues(priv->dev);
817b544dbacSJeff Kirsher }
818b544dbacSJeff Kirsher
cpmac_get_ringparam(struct net_device * dev,struct ethtool_ringparam * ring,struct kernel_ethtool_ringparam * kernel_ring,struct netlink_ext_ack * extack)819b544dbacSJeff Kirsher static void cpmac_get_ringparam(struct net_device *dev,
82074624944SHao Chen struct ethtool_ringparam *ring,
82174624944SHao Chen struct kernel_ethtool_ringparam *kernel_ring,
82274624944SHao Chen struct netlink_ext_ack *extack)
823b544dbacSJeff Kirsher {
824b544dbacSJeff Kirsher struct cpmac_priv *priv = netdev_priv(dev);
825b544dbacSJeff Kirsher
826b544dbacSJeff Kirsher ring->rx_max_pending = 1024;
827b544dbacSJeff Kirsher ring->rx_mini_max_pending = 1;
828b544dbacSJeff Kirsher ring->rx_jumbo_max_pending = 1;
829b544dbacSJeff Kirsher ring->tx_max_pending = 1;
830b544dbacSJeff Kirsher
831b544dbacSJeff Kirsher ring->rx_pending = priv->ring_size;
832b544dbacSJeff Kirsher ring->rx_mini_pending = 1;
833b544dbacSJeff Kirsher ring->rx_jumbo_pending = 1;
834b544dbacSJeff Kirsher ring->tx_pending = 1;
835b544dbacSJeff Kirsher }
836b544dbacSJeff Kirsher
cpmac_set_ringparam(struct net_device * dev,struct ethtool_ringparam * ring,struct kernel_ethtool_ringparam * kernel_ring,struct netlink_ext_ack * extack)837b544dbacSJeff Kirsher static int cpmac_set_ringparam(struct net_device *dev,
83874624944SHao Chen struct ethtool_ringparam *ring,
83974624944SHao Chen struct kernel_ethtool_ringparam *kernel_ring,
84074624944SHao Chen struct netlink_ext_ack *extack)
841b544dbacSJeff Kirsher {
842b544dbacSJeff Kirsher struct cpmac_priv *priv = netdev_priv(dev);
843b544dbacSJeff Kirsher
844b544dbacSJeff Kirsher if (netif_running(dev))
845b544dbacSJeff Kirsher return -EBUSY;
846b544dbacSJeff Kirsher priv->ring_size = ring->rx_pending;
84755064efdSVarka Bhadram
848b544dbacSJeff Kirsher return 0;
849b544dbacSJeff Kirsher }
850b544dbacSJeff Kirsher
cpmac_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)851b544dbacSJeff Kirsher static void cpmac_get_drvinfo(struct net_device *dev,
852b544dbacSJeff Kirsher struct ethtool_drvinfo *info)
853b544dbacSJeff Kirsher {
854f029c781SWolfram Sang strscpy(info->driver, "cpmac", sizeof(info->driver));
855f029c781SWolfram Sang strscpy(info->version, CPMAC_VERSION, sizeof(info->version));
8567826d43fSJiri Pirko snprintf(info->bus_info, sizeof(info->bus_info), "%s", "cpmac");
857b544dbacSJeff Kirsher }
858b544dbacSJeff Kirsher
859b544dbacSJeff Kirsher static const struct ethtool_ops cpmac_ethtool_ops = {
860b544dbacSJeff Kirsher .get_drvinfo = cpmac_get_drvinfo,
861b544dbacSJeff Kirsher .get_link = ethtool_op_get_link,
862b544dbacSJeff Kirsher .get_ringparam = cpmac_get_ringparam,
863b544dbacSJeff Kirsher .set_ringparam = cpmac_set_ringparam,
8647dc09934SPhilippe Reynes .get_link_ksettings = phy_ethtool_get_link_ksettings,
8657dc09934SPhilippe Reynes .set_link_ksettings = phy_ethtool_set_link_ksettings,
866b544dbacSJeff Kirsher };
867b544dbacSJeff Kirsher
cpmac_adjust_link(struct net_device * dev)868b544dbacSJeff Kirsher static void cpmac_adjust_link(struct net_device *dev)
869b544dbacSJeff Kirsher {
870b544dbacSJeff Kirsher struct cpmac_priv *priv = netdev_priv(dev);
871b544dbacSJeff Kirsher int new_state = 0;
872b544dbacSJeff Kirsher
873b544dbacSJeff Kirsher spin_lock(&priv->lock);
874b401a9bcSPhilippe Reynes if (dev->phydev->link) {
875b544dbacSJeff Kirsher netif_tx_start_all_queues(dev);
876b401a9bcSPhilippe Reynes if (dev->phydev->duplex != priv->oldduplex) {
877b544dbacSJeff Kirsher new_state = 1;
878b401a9bcSPhilippe Reynes priv->oldduplex = dev->phydev->duplex;
879b544dbacSJeff Kirsher }
880b544dbacSJeff Kirsher
881b401a9bcSPhilippe Reynes if (dev->phydev->speed != priv->oldspeed) {
882b544dbacSJeff Kirsher new_state = 1;
883b401a9bcSPhilippe Reynes priv->oldspeed = dev->phydev->speed;
884b544dbacSJeff Kirsher }
885b544dbacSJeff Kirsher
886b544dbacSJeff Kirsher if (!priv->oldlink) {
887b544dbacSJeff Kirsher new_state = 1;
888b544dbacSJeff Kirsher priv->oldlink = 1;
889b544dbacSJeff Kirsher }
890b544dbacSJeff Kirsher } else if (priv->oldlink) {
891b544dbacSJeff Kirsher new_state = 1;
892b544dbacSJeff Kirsher priv->oldlink = 0;
893b544dbacSJeff Kirsher priv->oldspeed = 0;
894b544dbacSJeff Kirsher priv->oldduplex = -1;
895b544dbacSJeff Kirsher }
896b544dbacSJeff Kirsher
897b544dbacSJeff Kirsher if (new_state && netif_msg_link(priv) && net_ratelimit())
898b401a9bcSPhilippe Reynes phy_print_status(dev->phydev);
899b544dbacSJeff Kirsher
900b544dbacSJeff Kirsher spin_unlock(&priv->lock);
901b544dbacSJeff Kirsher }
902b544dbacSJeff Kirsher
cpmac_open(struct net_device * dev)903b544dbacSJeff Kirsher static int cpmac_open(struct net_device *dev)
904b544dbacSJeff Kirsher {
905b544dbacSJeff Kirsher int i, size, res;
906b544dbacSJeff Kirsher struct cpmac_priv *priv = netdev_priv(dev);
907b544dbacSJeff Kirsher struct resource *mem;
908b544dbacSJeff Kirsher struct cpmac_desc *desc;
909b544dbacSJeff Kirsher struct sk_buff *skb;
910b544dbacSJeff Kirsher
911b544dbacSJeff Kirsher mem = platform_get_resource_byname(priv->pdev, IORESOURCE_MEM, "regs");
912b544dbacSJeff Kirsher if (!request_mem_region(mem->start, resource_size(mem), dev->name)) {
913b544dbacSJeff Kirsher if (netif_msg_drv(priv))
914f160a2d0SVarka Bhadram netdev_err(dev, "failed to request registers\n");
915f160a2d0SVarka Bhadram
916b544dbacSJeff Kirsher res = -ENXIO;
917b544dbacSJeff Kirsher goto fail_reserve;
918b544dbacSJeff Kirsher }
919b544dbacSJeff Kirsher
920b544dbacSJeff Kirsher priv->regs = ioremap(mem->start, resource_size(mem));
921b544dbacSJeff Kirsher if (!priv->regs) {
922b544dbacSJeff Kirsher if (netif_msg_drv(priv))
923f160a2d0SVarka Bhadram netdev_err(dev, "failed to remap registers\n");
924f160a2d0SVarka Bhadram
925b544dbacSJeff Kirsher res = -ENXIO;
926b544dbacSJeff Kirsher goto fail_remap;
927b544dbacSJeff Kirsher }
928b544dbacSJeff Kirsher
929b544dbacSJeff Kirsher size = priv->ring_size + CPMAC_QUEUES;
930b544dbacSJeff Kirsher priv->desc_ring = dma_alloc_coherent(&dev->dev,
931b544dbacSJeff Kirsher sizeof(struct cpmac_desc) * size,
932b544dbacSJeff Kirsher &priv->dma_ring,
933b544dbacSJeff Kirsher GFP_KERNEL);
934b544dbacSJeff Kirsher if (!priv->desc_ring) {
935b544dbacSJeff Kirsher res = -ENOMEM;
936b544dbacSJeff Kirsher goto fail_alloc;
937b544dbacSJeff Kirsher }
938b544dbacSJeff Kirsher
939b544dbacSJeff Kirsher for (i = 0; i < size; i++)
940b544dbacSJeff Kirsher priv->desc_ring[i].mapping = priv->dma_ring + sizeof(*desc) * i;
941b544dbacSJeff Kirsher
942b544dbacSJeff Kirsher priv->rx_head = &priv->desc_ring[CPMAC_QUEUES];
943b544dbacSJeff Kirsher for (i = 0, desc = priv->rx_head; i < priv->ring_size; i++, desc++) {
944b544dbacSJeff Kirsher skb = netdev_alloc_skb_ip_align(dev, CPMAC_SKB_SIZE);
945b544dbacSJeff Kirsher if (unlikely(!skb)) {
946b544dbacSJeff Kirsher res = -ENOMEM;
947b544dbacSJeff Kirsher goto fail_desc;
948b544dbacSJeff Kirsher }
949b544dbacSJeff Kirsher desc->skb = skb;
950b544dbacSJeff Kirsher desc->data_mapping = dma_map_single(&dev->dev, skb->data,
951b544dbacSJeff Kirsher CPMAC_SKB_SIZE,
952b544dbacSJeff Kirsher DMA_FROM_DEVICE);
953b544dbacSJeff Kirsher desc->hw_data = (u32)desc->data_mapping;
954b544dbacSJeff Kirsher desc->buflen = CPMAC_SKB_SIZE;
955b544dbacSJeff Kirsher desc->dataflags = CPMAC_OWN;
956b544dbacSJeff Kirsher desc->next = &priv->rx_head[(i + 1) % priv->ring_size];
957b544dbacSJeff Kirsher desc->next->prev = desc;
958b544dbacSJeff Kirsher desc->hw_next = (u32)desc->next->mapping;
959b544dbacSJeff Kirsher }
960b544dbacSJeff Kirsher
961b544dbacSJeff Kirsher priv->rx_head->prev->hw_next = (u32)0;
962b544dbacSJeff Kirsher
963b544dbacSJeff Kirsher res = request_irq(dev->irq, cpmac_irq, IRQF_SHARED, dev->name, dev);
964b544dbacSJeff Kirsher if (res) {
965b544dbacSJeff Kirsher if (netif_msg_drv(priv))
966f160a2d0SVarka Bhadram netdev_err(dev, "failed to obtain irq\n");
967f160a2d0SVarka Bhadram
968b544dbacSJeff Kirsher goto fail_irq;
969b544dbacSJeff Kirsher }
970b544dbacSJeff Kirsher
971b544dbacSJeff Kirsher atomic_set(&priv->reset_pending, 0);
972b544dbacSJeff Kirsher INIT_WORK(&priv->reset_work, cpmac_hw_error);
973b544dbacSJeff Kirsher cpmac_hw_start(dev);
974b544dbacSJeff Kirsher
975b544dbacSJeff Kirsher napi_enable(&priv->napi);
976b401a9bcSPhilippe Reynes phy_start(dev->phydev);
977b544dbacSJeff Kirsher
978b544dbacSJeff Kirsher return 0;
979b544dbacSJeff Kirsher
980b544dbacSJeff Kirsher fail_irq:
981b544dbacSJeff Kirsher fail_desc:
982b544dbacSJeff Kirsher for (i = 0; i < priv->ring_size; i++) {
983b544dbacSJeff Kirsher if (priv->rx_head[i].skb) {
984b544dbacSJeff Kirsher dma_unmap_single(&dev->dev,
985b544dbacSJeff Kirsher priv->rx_head[i].data_mapping,
986b544dbacSJeff Kirsher CPMAC_SKB_SIZE,
987b544dbacSJeff Kirsher DMA_FROM_DEVICE);
988b544dbacSJeff Kirsher kfree_skb(priv->rx_head[i].skb);
989b544dbacSJeff Kirsher }
990b544dbacSJeff Kirsher }
991731e6f00SChristophe Jaillet dma_free_coherent(&dev->dev, sizeof(struct cpmac_desc) * size,
992731e6f00SChristophe Jaillet priv->desc_ring, priv->dma_ring);
993731e6f00SChristophe Jaillet
994b544dbacSJeff Kirsher fail_alloc:
995b544dbacSJeff Kirsher iounmap(priv->regs);
996b544dbacSJeff Kirsher
997b544dbacSJeff Kirsher fail_remap:
998b544dbacSJeff Kirsher release_mem_region(mem->start, resource_size(mem));
999b544dbacSJeff Kirsher
1000b544dbacSJeff Kirsher fail_reserve:
1001b544dbacSJeff Kirsher return res;
1002b544dbacSJeff Kirsher }
1003b544dbacSJeff Kirsher
cpmac_stop(struct net_device * dev)1004b544dbacSJeff Kirsher static int cpmac_stop(struct net_device *dev)
1005b544dbacSJeff Kirsher {
1006b544dbacSJeff Kirsher int i;
1007b544dbacSJeff Kirsher struct cpmac_priv *priv = netdev_priv(dev);
1008b544dbacSJeff Kirsher struct resource *mem;
1009b544dbacSJeff Kirsher
1010b544dbacSJeff Kirsher netif_tx_stop_all_queues(dev);
1011b544dbacSJeff Kirsher
1012b544dbacSJeff Kirsher cancel_work_sync(&priv->reset_work);
1013b544dbacSJeff Kirsher napi_disable(&priv->napi);
1014b401a9bcSPhilippe Reynes phy_stop(dev->phydev);
1015b544dbacSJeff Kirsher
1016b544dbacSJeff Kirsher cpmac_hw_stop(dev);
1017b544dbacSJeff Kirsher
1018b544dbacSJeff Kirsher for (i = 0; i < 8; i++)
1019b544dbacSJeff Kirsher cpmac_write(priv->regs, CPMAC_TX_PTR(i), 0);
1020b544dbacSJeff Kirsher cpmac_write(priv->regs, CPMAC_RX_PTR(0), 0);
1021b544dbacSJeff Kirsher cpmac_write(priv->regs, CPMAC_MBP, 0);
1022b544dbacSJeff Kirsher
1023b544dbacSJeff Kirsher free_irq(dev->irq, dev);
1024b544dbacSJeff Kirsher iounmap(priv->regs);
1025b544dbacSJeff Kirsher mem = platform_get_resource_byname(priv->pdev, IORESOURCE_MEM, "regs");
1026b544dbacSJeff Kirsher release_mem_region(mem->start, resource_size(mem));
1027b544dbacSJeff Kirsher priv->rx_head = &priv->desc_ring[CPMAC_QUEUES];
1028b544dbacSJeff Kirsher for (i = 0; i < priv->ring_size; i++) {
1029b544dbacSJeff Kirsher if (priv->rx_head[i].skb) {
1030b544dbacSJeff Kirsher dma_unmap_single(&dev->dev,
1031b544dbacSJeff Kirsher priv->rx_head[i].data_mapping,
1032b544dbacSJeff Kirsher CPMAC_SKB_SIZE,
1033b544dbacSJeff Kirsher DMA_FROM_DEVICE);
1034b544dbacSJeff Kirsher kfree_skb(priv->rx_head[i].skb);
1035b544dbacSJeff Kirsher }
1036b544dbacSJeff Kirsher }
1037b544dbacSJeff Kirsher
1038b544dbacSJeff Kirsher dma_free_coherent(&dev->dev, sizeof(struct cpmac_desc) *
1039b544dbacSJeff Kirsher (CPMAC_QUEUES + priv->ring_size),
1040b544dbacSJeff Kirsher priv->desc_ring, priv->dma_ring);
104155064efdSVarka Bhadram
1042b544dbacSJeff Kirsher return 0;
1043b544dbacSJeff Kirsher }
1044b544dbacSJeff Kirsher
1045b544dbacSJeff Kirsher static const struct net_device_ops cpmac_netdev_ops = {
1046b544dbacSJeff Kirsher .ndo_open = cpmac_open,
1047b544dbacSJeff Kirsher .ndo_stop = cpmac_stop,
1048b544dbacSJeff Kirsher .ndo_start_xmit = cpmac_start_xmit,
1049b544dbacSJeff Kirsher .ndo_tx_timeout = cpmac_tx_timeout,
1050afc4b13dSJiri Pirko .ndo_set_rx_mode = cpmac_set_multicast_list,
1051a7605370SArnd Bergmann .ndo_eth_ioctl = phy_do_ioctl_running,
1052b544dbacSJeff Kirsher .ndo_validate_addr = eth_validate_addr,
1053b544dbacSJeff Kirsher .ndo_set_mac_address = eth_mac_addr,
1054b544dbacSJeff Kirsher };
1055b544dbacSJeff Kirsher
1056b544dbacSJeff Kirsher static int external_switch;
1057b544dbacSJeff Kirsher
cpmac_probe(struct platform_device * pdev)1058f57ae66eSBill Pemberton static int cpmac_probe(struct platform_device *pdev)
1059b544dbacSJeff Kirsher {
1060b544dbacSJeff Kirsher int rc, phy_id;
1061b544dbacSJeff Kirsher char mdio_bus_id[MII_BUS_ID_SIZE];
1062b544dbacSJeff Kirsher struct resource *mem;
1063b544dbacSJeff Kirsher struct cpmac_priv *priv;
1064b544dbacSJeff Kirsher struct net_device *dev;
1065b544dbacSJeff Kirsher struct plat_cpmac_data *pdata;
1066b401a9bcSPhilippe Reynes struct phy_device *phydev = NULL;
1067b544dbacSJeff Kirsher
1068a0ea2ac8SJingoo Han pdata = dev_get_platdata(&pdev->dev);
1069b544dbacSJeff Kirsher
1070b544dbacSJeff Kirsher if (external_switch || dumb_switch) {
1071a19c5d68SFlorian Fainelli strncpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE); /* fixed phys bus */
1072b544dbacSJeff Kirsher phy_id = pdev->id;
1073b544dbacSJeff Kirsher } else {
1074b544dbacSJeff Kirsher for (phy_id = 0; phy_id < PHY_MAX_ADDR; phy_id++) {
1075b544dbacSJeff Kirsher if (!(pdata->phy_mask & (1 << phy_id)))
1076b544dbacSJeff Kirsher continue;
10773c6396d6SGuenter Roeck if (!mdiobus_get_phy(cpmac_mii, phy_id))
1078b544dbacSJeff Kirsher continue;
1079b544dbacSJeff Kirsher strncpy(mdio_bus_id, cpmac_mii->id, MII_BUS_ID_SIZE);
1080b544dbacSJeff Kirsher break;
1081b544dbacSJeff Kirsher }
1082b544dbacSJeff Kirsher }
1083b544dbacSJeff Kirsher
1084b544dbacSJeff Kirsher if (phy_id == PHY_MAX_ADDR) {
1085b544dbacSJeff Kirsher dev_err(&pdev->dev, "no PHY present, falling back "
1086b544dbacSJeff Kirsher "to switch on MDIO bus 0\n");
1087a19c5d68SFlorian Fainelli strncpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE); /* fixed phys bus */
1088b544dbacSJeff Kirsher phy_id = pdev->id;
1089b544dbacSJeff Kirsher }
10909951e048SRickard Strandqvist mdio_bus_id[sizeof(mdio_bus_id) - 1] = '\0';
1091b544dbacSJeff Kirsher
1092b544dbacSJeff Kirsher dev = alloc_etherdev_mq(sizeof(*priv), CPMAC_QUEUES);
109341de8d4cSJoe Perches if (!dev)
1094b544dbacSJeff Kirsher return -ENOMEM;
1095b544dbacSJeff Kirsher
10965579f28cSFlorian Fainelli SET_NETDEV_DEV(dev, &pdev->dev);
1097b544dbacSJeff Kirsher platform_set_drvdata(pdev, dev);
1098b544dbacSJeff Kirsher priv = netdev_priv(dev);
1099b544dbacSJeff Kirsher
1100b544dbacSJeff Kirsher priv->pdev = pdev;
1101b544dbacSJeff Kirsher mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
1102b544dbacSJeff Kirsher if (!mem) {
1103b544dbacSJeff Kirsher rc = -ENODEV;
110409714275SWei Yongjun goto fail;
1105b544dbacSJeff Kirsher }
1106b544dbacSJeff Kirsher
1107b544dbacSJeff Kirsher dev->irq = platform_get_irq_byname(pdev, "irq");
1108b544dbacSJeff Kirsher
1109b544dbacSJeff Kirsher dev->netdev_ops = &cpmac_netdev_ops;
1110b544dbacSJeff Kirsher dev->ethtool_ops = &cpmac_ethtool_ops;
1111b544dbacSJeff Kirsher
1112b48b89f9SJakub Kicinski netif_napi_add(dev, &priv->napi, cpmac_poll);
1113b544dbacSJeff Kirsher
1114b544dbacSJeff Kirsher spin_lock_init(&priv->lock);
1115b544dbacSJeff Kirsher spin_lock_init(&priv->rx_lock);
1116b544dbacSJeff Kirsher priv->dev = dev;
1117b544dbacSJeff Kirsher priv->ring_size = 64;
1118b544dbacSJeff Kirsher priv->msg_enable = netif_msg_init(debug_level, 0xff);
1119c51e5062SJakub Kicinski eth_hw_addr_set(dev, pdata->dev_addr);
1120b544dbacSJeff Kirsher
1121b544dbacSJeff Kirsher snprintf(priv->phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT,
1122b544dbacSJeff Kirsher mdio_bus_id, phy_id);
1123b544dbacSJeff Kirsher
1124b401a9bcSPhilippe Reynes phydev = phy_connect(dev, priv->phy_name, cpmac_adjust_link,
1125b544dbacSJeff Kirsher PHY_INTERFACE_MODE_MII);
1126b544dbacSJeff Kirsher
1127b401a9bcSPhilippe Reynes if (IS_ERR(phydev)) {
1128b544dbacSJeff Kirsher if (netif_msg_drv(priv))
1129f160a2d0SVarka Bhadram dev_err(&pdev->dev, "Could not attach to PHY\n");
1130f160a2d0SVarka Bhadram
1131b401a9bcSPhilippe Reynes rc = PTR_ERR(phydev);
113209714275SWei Yongjun goto fail;
1133b544dbacSJeff Kirsher }
1134b544dbacSJeff Kirsher
1135b544dbacSJeff Kirsher rc = register_netdev(dev);
1136b544dbacSJeff Kirsher if (rc) {
1137f160a2d0SVarka Bhadram dev_err(&pdev->dev, "Could not register net device\n");
1138b544dbacSJeff Kirsher goto fail;
1139b544dbacSJeff Kirsher }
1140b544dbacSJeff Kirsher
1141b544dbacSJeff Kirsher if (netif_msg_probe(priv)) {
1142f160a2d0SVarka Bhadram dev_info(&pdev->dev, "regs: %p, irq: %d, phy: %s, "
1143f160a2d0SVarka Bhadram "mac: %pM\n", (void *)mem->start, dev->irq,
1144b544dbacSJeff Kirsher priv->phy_name, dev->dev_addr);
1145b544dbacSJeff Kirsher }
114655064efdSVarka Bhadram
1147b544dbacSJeff Kirsher return 0;
1148b544dbacSJeff Kirsher
1149b544dbacSJeff Kirsher fail:
1150b544dbacSJeff Kirsher free_netdev(dev);
1151b544dbacSJeff Kirsher return rc;
1152b544dbacSJeff Kirsher }
1153b544dbacSJeff Kirsher
cpmac_remove(struct platform_device * pdev)1154f57ae66eSBill Pemberton static int cpmac_remove(struct platform_device *pdev)
1155b544dbacSJeff Kirsher {
1156b544dbacSJeff Kirsher struct net_device *dev = platform_get_drvdata(pdev);
115759329d8bSVarka Bhadram
1158b544dbacSJeff Kirsher unregister_netdev(dev);
1159b544dbacSJeff Kirsher free_netdev(dev);
116055064efdSVarka Bhadram
1161b544dbacSJeff Kirsher return 0;
1162b544dbacSJeff Kirsher }
1163b544dbacSJeff Kirsher
1164b544dbacSJeff Kirsher static struct platform_driver cpmac_driver = {
116596a8d3c1SVarka Bhadram .driver = {
116696a8d3c1SVarka Bhadram .name = "cpmac",
116796a8d3c1SVarka Bhadram },
1168b544dbacSJeff Kirsher .probe = cpmac_probe,
1169f57ae66eSBill Pemberton .remove = cpmac_remove,
1170b544dbacSJeff Kirsher };
1171b544dbacSJeff Kirsher
cpmac_init(void)1172*510bbf82Sruanjinjie int __init cpmac_init(void)
1173b544dbacSJeff Kirsher {
1174b544dbacSJeff Kirsher u32 mask;
1175b544dbacSJeff Kirsher int i, res;
1176b544dbacSJeff Kirsher
1177b544dbacSJeff Kirsher cpmac_mii = mdiobus_alloc();
1178b544dbacSJeff Kirsher if (cpmac_mii == NULL)
1179b544dbacSJeff Kirsher return -ENOMEM;
1180b544dbacSJeff Kirsher
1181b544dbacSJeff Kirsher cpmac_mii->name = "cpmac-mii";
1182b544dbacSJeff Kirsher cpmac_mii->read = cpmac_mdio_read;
1183b544dbacSJeff Kirsher cpmac_mii->write = cpmac_mdio_write;
1184b544dbacSJeff Kirsher cpmac_mii->reset = cpmac_mdio_reset;
1185b544dbacSJeff Kirsher
1186b544dbacSJeff Kirsher cpmac_mii->priv = ioremap(AR7_REGS_MDIO, 256);
1187b544dbacSJeff Kirsher
1188b544dbacSJeff Kirsher if (!cpmac_mii->priv) {
1189f160a2d0SVarka Bhadram pr_err("Can't ioremap mdio registers\n");
1190b544dbacSJeff Kirsher res = -ENXIO;
1191b544dbacSJeff Kirsher goto fail_alloc;
1192b544dbacSJeff Kirsher }
1193b544dbacSJeff Kirsher
1194d43e6fb4SArnd Bergmann /* FIXME: unhardcode gpio&reset bits */
1195b544dbacSJeff Kirsher ar7_gpio_disable(26);
1196b544dbacSJeff Kirsher ar7_gpio_disable(27);
1197b544dbacSJeff Kirsher ar7_device_reset(AR7_RESET_BIT_CPMAC_LO);
1198b544dbacSJeff Kirsher ar7_device_reset(AR7_RESET_BIT_CPMAC_HI);
1199b544dbacSJeff Kirsher ar7_device_reset(AR7_RESET_BIT_EPHY);
1200b544dbacSJeff Kirsher
1201b544dbacSJeff Kirsher cpmac_mii->reset(cpmac_mii);
1202b544dbacSJeff Kirsher
1203b544dbacSJeff Kirsher for (i = 0; i < 300; i++) {
1204b544dbacSJeff Kirsher mask = cpmac_read(cpmac_mii->priv, CPMAC_MDIO_ALIVE);
1205b544dbacSJeff Kirsher if (mask)
1206b544dbacSJeff Kirsher break;
1207b544dbacSJeff Kirsher else
1208b544dbacSJeff Kirsher msleep(10);
1209b544dbacSJeff Kirsher }
1210b544dbacSJeff Kirsher
1211b544dbacSJeff Kirsher mask &= 0x7fffffff;
1212b544dbacSJeff Kirsher if (mask & (mask - 1)) {
1213b544dbacSJeff Kirsher external_switch = 1;
1214b544dbacSJeff Kirsher mask = 0;
1215b544dbacSJeff Kirsher }
1216b544dbacSJeff Kirsher
1217b544dbacSJeff Kirsher cpmac_mii->phy_mask = ~(mask | 0x80000000);
1218d1733f07SFlorian Fainelli snprintf(cpmac_mii->id, MII_BUS_ID_SIZE, "cpmac-1");
1219b544dbacSJeff Kirsher
1220b544dbacSJeff Kirsher res = mdiobus_register(cpmac_mii);
1221b544dbacSJeff Kirsher if (res)
1222b544dbacSJeff Kirsher goto fail_mii;
1223b544dbacSJeff Kirsher
1224b544dbacSJeff Kirsher res = platform_driver_register(&cpmac_driver);
1225b544dbacSJeff Kirsher if (res)
1226b544dbacSJeff Kirsher goto fail_cpmac;
1227b544dbacSJeff Kirsher
1228b544dbacSJeff Kirsher return 0;
1229b544dbacSJeff Kirsher
1230b544dbacSJeff Kirsher fail_cpmac:
1231b544dbacSJeff Kirsher mdiobus_unregister(cpmac_mii);
1232b544dbacSJeff Kirsher
1233b544dbacSJeff Kirsher fail_mii:
1234b544dbacSJeff Kirsher iounmap(cpmac_mii->priv);
1235b544dbacSJeff Kirsher
1236b544dbacSJeff Kirsher fail_alloc:
1237b544dbacSJeff Kirsher mdiobus_free(cpmac_mii);
1238b544dbacSJeff Kirsher
1239b544dbacSJeff Kirsher return res;
1240b544dbacSJeff Kirsher }
1241b544dbacSJeff Kirsher
cpmac_exit(void)1242*510bbf82Sruanjinjie void __exit cpmac_exit(void)
1243b544dbacSJeff Kirsher {
1244b544dbacSJeff Kirsher platform_driver_unregister(&cpmac_driver);
1245b544dbacSJeff Kirsher mdiobus_unregister(cpmac_mii);
1246b544dbacSJeff Kirsher iounmap(cpmac_mii->priv);
1247b544dbacSJeff Kirsher mdiobus_free(cpmac_mii);
1248b544dbacSJeff Kirsher }
1249b544dbacSJeff Kirsher
1250b544dbacSJeff Kirsher module_init(cpmac_init);
1251b544dbacSJeff Kirsher module_exit(cpmac_exit);
1252