1caab277bSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2ae7668d0SJeff Kirsher /*
3ae7668d0SJeff Kirsher *
4ae7668d0SJeff Kirsher * Copyright (C) 2011 John Crispin <blogic@openwrt.org>
5ae7668d0SJeff Kirsher */
6ae7668d0SJeff Kirsher
7ae7668d0SJeff Kirsher #include <linux/kernel.h>
8ae7668d0SJeff Kirsher #include <linux/slab.h>
9ae7668d0SJeff Kirsher #include <linux/errno.h>
10ae7668d0SJeff Kirsher #include <linux/types.h>
11ae7668d0SJeff Kirsher #include <linux/interrupt.h>
12ae7668d0SJeff Kirsher #include <linux/uaccess.h>
13ae7668d0SJeff Kirsher #include <linux/in.h>
14ae7668d0SJeff Kirsher #include <linux/netdevice.h>
15ae7668d0SJeff Kirsher #include <linux/etherdevice.h>
16ae7668d0SJeff Kirsher #include <linux/phy.h>
17ae7668d0SJeff Kirsher #include <linux/ip.h>
18ae7668d0SJeff Kirsher #include <linux/tcp.h>
19ae7668d0SJeff Kirsher #include <linux/skbuff.h>
20ae7668d0SJeff Kirsher #include <linux/mm.h>
21ae7668d0SJeff Kirsher #include <linux/platform_device.h>
22ae7668d0SJeff Kirsher #include <linux/ethtool.h>
23ae7668d0SJeff Kirsher #include <linux/init.h>
24ae7668d0SJeff Kirsher #include <linux/delay.h>
25ae7668d0SJeff Kirsher #include <linux/io.h>
26a32fd63dSJohn Crispin #include <linux/dma-mapping.h>
27a32fd63dSJohn Crispin #include <linux/module.h>
28e97b21e9SRandy Dunlap #include <linux/property.h>
29ae7668d0SJeff Kirsher
30ae7668d0SJeff Kirsher #include <asm/checksum.h>
31ae7668d0SJeff Kirsher
32ae7668d0SJeff Kirsher #include <lantiq_soc.h>
33ae7668d0SJeff Kirsher #include <xway_dma.h>
34ae7668d0SJeff Kirsher #include <lantiq_platform.h>
35ae7668d0SJeff Kirsher
36ae7668d0SJeff Kirsher #define LTQ_ETOP_MDIO 0x11804
37ae7668d0SJeff Kirsher #define MDIO_REQUEST 0x80000000
38ae7668d0SJeff Kirsher #define MDIO_READ 0x40000000
39ae7668d0SJeff Kirsher #define MDIO_ADDR_MASK 0x1f
40ae7668d0SJeff Kirsher #define MDIO_ADDR_OFFSET 0x15
41ae7668d0SJeff Kirsher #define MDIO_REG_MASK 0x1f
42ae7668d0SJeff Kirsher #define MDIO_REG_OFFSET 0x10
43ae7668d0SJeff Kirsher #define MDIO_VAL_MASK 0xffff
44ae7668d0SJeff Kirsher
45ae7668d0SJeff Kirsher #define PPE32_CGEN 0x800
46ae7668d0SJeff Kirsher #define LQ_PPE32_ENET_MAC_CFG 0x1840
47ae7668d0SJeff Kirsher
48ae7668d0SJeff Kirsher #define LTQ_ETOP_ENETS0 0x11850
49ae7668d0SJeff Kirsher #define LTQ_ETOP_MAC_DA0 0x1186C
50ae7668d0SJeff Kirsher #define LTQ_ETOP_MAC_DA1 0x11870
51ae7668d0SJeff Kirsher #define LTQ_ETOP_CFG 0x16020
52ae7668d0SJeff Kirsher #define LTQ_ETOP_IGPLEN 0x16080
53ae7668d0SJeff Kirsher
54ae7668d0SJeff Kirsher #define MAX_DMA_CHAN 0x8
55ae7668d0SJeff Kirsher #define MAX_DMA_CRC_LEN 0x4
56ae7668d0SJeff Kirsher #define MAX_DMA_DATA_LEN 0x600
57ae7668d0SJeff Kirsher
58ae7668d0SJeff Kirsher #define ETOP_FTCU BIT(28)
59ae7668d0SJeff Kirsher #define ETOP_MII_MASK 0xf
60ae7668d0SJeff Kirsher #define ETOP_MII_NORMAL 0xd
61ae7668d0SJeff Kirsher #define ETOP_MII_REVERSE 0xe
62ae7668d0SJeff Kirsher #define ETOP_PLEN_UNDER 0x40
63ae7668d0SJeff Kirsher #define ETOP_CGEN 0x800
64ae7668d0SJeff Kirsher
65ae7668d0SJeff Kirsher /* use 2 static channels for TX/RX */
66ae7668d0SJeff Kirsher #define LTQ_ETOP_TX_CHANNEL 1
67ae7668d0SJeff Kirsher #define LTQ_ETOP_RX_CHANNEL 6
68b1cb12a2SAleksander Jan Bajkowski #define IS_TX(x) ((x) == LTQ_ETOP_TX_CHANNEL)
69b1cb12a2SAleksander Jan Bajkowski #define IS_RX(x) ((x) == LTQ_ETOP_RX_CHANNEL)
70ae7668d0SJeff Kirsher
71ae7668d0SJeff Kirsher #define ltq_etop_r32(x) ltq_r32(ltq_etop_membase + (x))
72ae7668d0SJeff Kirsher #define ltq_etop_w32(x, y) ltq_w32(x, ltq_etop_membase + (y))
73ae7668d0SJeff Kirsher #define ltq_etop_w32_mask(x, y, z) \
74ae7668d0SJeff Kirsher ltq_w32_mask(x, y, ltq_etop_membase + (z))
75ae7668d0SJeff Kirsher
76ae7668d0SJeff Kirsher #define DRV_VERSION "1.0"
77ae7668d0SJeff Kirsher
78ae7668d0SJeff Kirsher static void __iomem *ltq_etop_membase;
79ae7668d0SJeff Kirsher
80ae7668d0SJeff Kirsher struct ltq_etop_chan {
81ae7668d0SJeff Kirsher int idx;
82ae7668d0SJeff Kirsher int tx_free;
83ae7668d0SJeff Kirsher struct net_device *netdev;
84ae7668d0SJeff Kirsher struct napi_struct napi;
85ae7668d0SJeff Kirsher struct ltq_dma_channel dma;
86ae7668d0SJeff Kirsher struct sk_buff *skb[LTQ_DESC_NUM];
87ae7668d0SJeff Kirsher };
88ae7668d0SJeff Kirsher
89ae7668d0SJeff Kirsher struct ltq_etop_priv {
90ae7668d0SJeff Kirsher struct net_device *netdev;
91d1b86507SFlorian Fainelli struct platform_device *pdev;
92ae7668d0SJeff Kirsher struct ltq_eth_data *pldata;
93ae7668d0SJeff Kirsher struct resource *res;
94ae7668d0SJeff Kirsher
95ae7668d0SJeff Kirsher struct mii_bus *mii_bus;
96ae7668d0SJeff Kirsher
97ae7668d0SJeff Kirsher struct ltq_etop_chan ch[MAX_DMA_CHAN];
98ae7668d0SJeff Kirsher int tx_free[MAX_DMA_CHAN >> 1];
99ae7668d0SJeff Kirsher
10014d4e308SAleksander Jan Bajkowski int tx_burst_len;
10114d4e308SAleksander Jan Bajkowski int rx_burst_len;
10214d4e308SAleksander Jan Bajkowski
103ae7668d0SJeff Kirsher spinlock_t lock;
104ae7668d0SJeff Kirsher };
105ae7668d0SJeff Kirsher
106ae7668d0SJeff Kirsher static int
ltq_etop_alloc_skb(struct ltq_etop_chan * ch)107ae7668d0SJeff Kirsher ltq_etop_alloc_skb(struct ltq_etop_chan *ch)
108ae7668d0SJeff Kirsher {
10974e0deb8SChristoph Hellwig struct ltq_etop_priv *priv = netdev_priv(ch->netdev);
11074e0deb8SChristoph Hellwig
111c056b734SPradeep A Dalvi ch->skb[ch->dma.desc] = netdev_alloc_skb(ch->netdev, MAX_DMA_DATA_LEN);
112ae7668d0SJeff Kirsher if (!ch->skb[ch->dma.desc])
113ae7668d0SJeff Kirsher return -ENOMEM;
1147a6653adSAleksander Jan Bajkowski ch->dma.desc_base[ch->dma.desc].addr =
1157a6653adSAleksander Jan Bajkowski dma_map_single(&priv->pdev->dev, ch->skb[ch->dma.desc]->data,
1167a6653adSAleksander Jan Bajkowski MAX_DMA_DATA_LEN, DMA_FROM_DEVICE);
117ae7668d0SJeff Kirsher ch->dma.desc_base[ch->dma.desc].addr =
118ae7668d0SJeff Kirsher CPHYSADDR(ch->skb[ch->dma.desc]->data);
119ae7668d0SJeff Kirsher ch->dma.desc_base[ch->dma.desc].ctl =
120ae7668d0SJeff Kirsher LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) |
121ae7668d0SJeff Kirsher MAX_DMA_DATA_LEN;
122ae7668d0SJeff Kirsher skb_reserve(ch->skb[ch->dma.desc], NET_IP_ALIGN);
123ae7668d0SJeff Kirsher return 0;
124ae7668d0SJeff Kirsher }
125ae7668d0SJeff Kirsher
126ae7668d0SJeff Kirsher static void
ltq_etop_hw_receive(struct ltq_etop_chan * ch)127ae7668d0SJeff Kirsher ltq_etop_hw_receive(struct ltq_etop_chan *ch)
128ae7668d0SJeff Kirsher {
129ae7668d0SJeff Kirsher struct ltq_etop_priv *priv = netdev_priv(ch->netdev);
130ae7668d0SJeff Kirsher struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
131ae7668d0SJeff Kirsher struct sk_buff *skb = ch->skb[ch->dma.desc];
132ae7668d0SJeff Kirsher int len = (desc->ctl & LTQ_DMA_SIZE_MASK) - MAX_DMA_CRC_LEN;
133ae7668d0SJeff Kirsher unsigned long flags;
134ae7668d0SJeff Kirsher
135ae7668d0SJeff Kirsher spin_lock_irqsave(&priv->lock, flags);
136ae7668d0SJeff Kirsher if (ltq_etop_alloc_skb(ch)) {
137ae7668d0SJeff Kirsher netdev_err(ch->netdev,
138ae7668d0SJeff Kirsher "failed to allocate new rx buffer, stopping DMA\n");
139ae7668d0SJeff Kirsher ltq_dma_close(&ch->dma);
140ae7668d0SJeff Kirsher }
141ae7668d0SJeff Kirsher ch->dma.desc++;
142ae7668d0SJeff Kirsher ch->dma.desc %= LTQ_DESC_NUM;
143ae7668d0SJeff Kirsher spin_unlock_irqrestore(&priv->lock, flags);
144ae7668d0SJeff Kirsher
145ae7668d0SJeff Kirsher skb_put(skb, len);
146ae7668d0SJeff Kirsher skb->protocol = eth_type_trans(skb, ch->netdev);
147ae7668d0SJeff Kirsher netif_receive_skb(skb);
148ae7668d0SJeff Kirsher }
149ae7668d0SJeff Kirsher
150ae7668d0SJeff Kirsher static int
ltq_etop_poll_rx(struct napi_struct * napi,int budget)151ae7668d0SJeff Kirsher ltq_etop_poll_rx(struct napi_struct *napi, int budget)
152ae7668d0SJeff Kirsher {
153ae7668d0SJeff Kirsher struct ltq_etop_chan *ch = container_of(napi,
154ae7668d0SJeff Kirsher struct ltq_etop_chan, napi);
1556ad20165SEric Dumazet int work_done = 0;
156ae7668d0SJeff Kirsher
1576ad20165SEric Dumazet while (work_done < budget) {
158ae7668d0SJeff Kirsher struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
159ae7668d0SJeff Kirsher
1606ad20165SEric Dumazet if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) != LTQ_DMA_C)
1616ad20165SEric Dumazet break;
162ae7668d0SJeff Kirsher ltq_etop_hw_receive(ch);
1636ad20165SEric Dumazet work_done++;
164ae7668d0SJeff Kirsher }
1656ad20165SEric Dumazet if (work_done < budget) {
1666ad20165SEric Dumazet napi_complete_done(&ch->napi, work_done);
167ae7668d0SJeff Kirsher ltq_dma_ack_irq(&ch->dma);
168ae7668d0SJeff Kirsher }
1696ad20165SEric Dumazet return work_done;
170ae7668d0SJeff Kirsher }
171ae7668d0SJeff Kirsher
172ae7668d0SJeff Kirsher static int
ltq_etop_poll_tx(struct napi_struct * napi,int budget)173ae7668d0SJeff Kirsher ltq_etop_poll_tx(struct napi_struct *napi, int budget)
174ae7668d0SJeff Kirsher {
175ae7668d0SJeff Kirsher struct ltq_etop_chan *ch =
176ae7668d0SJeff Kirsher container_of(napi, struct ltq_etop_chan, napi);
177ae7668d0SJeff Kirsher struct ltq_etop_priv *priv = netdev_priv(ch->netdev);
178ae7668d0SJeff Kirsher struct netdev_queue *txq =
179ae7668d0SJeff Kirsher netdev_get_tx_queue(ch->netdev, ch->idx >> 1);
180ae7668d0SJeff Kirsher unsigned long flags;
181ae7668d0SJeff Kirsher
182ae7668d0SJeff Kirsher spin_lock_irqsave(&priv->lock, flags);
183ae7668d0SJeff Kirsher while ((ch->dma.desc_base[ch->tx_free].ctl &
184ae7668d0SJeff Kirsher (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) {
185ae7668d0SJeff Kirsher dev_kfree_skb_any(ch->skb[ch->tx_free]);
186ae7668d0SJeff Kirsher ch->skb[ch->tx_free] = NULL;
187ae7668d0SJeff Kirsher memset(&ch->dma.desc_base[ch->tx_free], 0,
188ae7668d0SJeff Kirsher sizeof(struct ltq_dma_desc));
189ae7668d0SJeff Kirsher ch->tx_free++;
190ae7668d0SJeff Kirsher ch->tx_free %= LTQ_DESC_NUM;
191ae7668d0SJeff Kirsher }
192ae7668d0SJeff Kirsher spin_unlock_irqrestore(&priv->lock, flags);
193ae7668d0SJeff Kirsher
194ae7668d0SJeff Kirsher if (netif_tx_queue_stopped(txq))
195ae7668d0SJeff Kirsher netif_tx_start_queue(txq);
196ae7668d0SJeff Kirsher napi_complete(&ch->napi);
197ae7668d0SJeff Kirsher ltq_dma_ack_irq(&ch->dma);
198ae7668d0SJeff Kirsher return 1;
199ae7668d0SJeff Kirsher }
200ae7668d0SJeff Kirsher
201ae7668d0SJeff Kirsher static irqreturn_t
ltq_etop_dma_irq(int irq,void * _priv)202ae7668d0SJeff Kirsher ltq_etop_dma_irq(int irq, void *_priv)
203ae7668d0SJeff Kirsher {
204ae7668d0SJeff Kirsher struct ltq_etop_priv *priv = _priv;
205ae7668d0SJeff Kirsher int ch = irq - LTQ_DMA_CH0_INT;
206ae7668d0SJeff Kirsher
207ae7668d0SJeff Kirsher napi_schedule(&priv->ch[ch].napi);
208ae7668d0SJeff Kirsher return IRQ_HANDLED;
209ae7668d0SJeff Kirsher }
210ae7668d0SJeff Kirsher
211ae7668d0SJeff Kirsher static void
ltq_etop_free_channel(struct net_device * dev,struct ltq_etop_chan * ch)212ae7668d0SJeff Kirsher ltq_etop_free_channel(struct net_device *dev, struct ltq_etop_chan *ch)
213ae7668d0SJeff Kirsher {
214ae7668d0SJeff Kirsher struct ltq_etop_priv *priv = netdev_priv(dev);
215ae7668d0SJeff Kirsher
216ae7668d0SJeff Kirsher ltq_dma_free(&ch->dma);
217ae7668d0SJeff Kirsher if (ch->dma.irq)
218ae7668d0SJeff Kirsher free_irq(ch->dma.irq, priv);
219ae7668d0SJeff Kirsher if (IS_RX(ch->idx)) {
2209d23909aSAleksander Jan Bajkowski struct ltq_dma_channel *dma = &ch->dma;
2214c46625bSAleksander Jan Bajkowski
2229d23909aSAleksander Jan Bajkowski for (dma->desc = 0; dma->desc < LTQ_DESC_NUM; dma->desc++)
223ae7668d0SJeff Kirsher dev_kfree_skb_any(ch->skb[ch->dma.desc]);
224ae7668d0SJeff Kirsher }
225ae7668d0SJeff Kirsher }
226ae7668d0SJeff Kirsher
227ae7668d0SJeff Kirsher static void
ltq_etop_hw_exit(struct net_device * dev)228ae7668d0SJeff Kirsher ltq_etop_hw_exit(struct net_device *dev)
229ae7668d0SJeff Kirsher {
230ae7668d0SJeff Kirsher struct ltq_etop_priv *priv = netdev_priv(dev);
231ae7668d0SJeff Kirsher int i;
232ae7668d0SJeff Kirsher
233ae7668d0SJeff Kirsher ltq_pmu_disable(PMU_PPE);
234ae7668d0SJeff Kirsher for (i = 0; i < MAX_DMA_CHAN; i++)
235ae7668d0SJeff Kirsher if (IS_TX(i) || IS_RX(i))
236ae7668d0SJeff Kirsher ltq_etop_free_channel(dev, &priv->ch[i]);
237ae7668d0SJeff Kirsher }
238ae7668d0SJeff Kirsher
239ae7668d0SJeff Kirsher static int
ltq_etop_hw_init(struct net_device * dev)240ae7668d0SJeff Kirsher ltq_etop_hw_init(struct net_device *dev)
241ae7668d0SJeff Kirsher {
242ae7668d0SJeff Kirsher struct ltq_etop_priv *priv = netdev_priv(dev);
243ae7668d0SJeff Kirsher int i;
244e97b21e9SRandy Dunlap int err;
245ae7668d0SJeff Kirsher
246ae7668d0SJeff Kirsher ltq_pmu_enable(PMU_PPE);
247ae7668d0SJeff Kirsher
248ae7668d0SJeff Kirsher switch (priv->pldata->mii_mode) {
249ae7668d0SJeff Kirsher case PHY_INTERFACE_MODE_RMII:
2507a6653adSAleksander Jan Bajkowski ltq_etop_w32_mask(ETOP_MII_MASK, ETOP_MII_REVERSE,
2517a6653adSAleksander Jan Bajkowski LTQ_ETOP_CFG);
252ae7668d0SJeff Kirsher break;
253ae7668d0SJeff Kirsher
254ae7668d0SJeff Kirsher case PHY_INTERFACE_MODE_MII:
2557a6653adSAleksander Jan Bajkowski ltq_etop_w32_mask(ETOP_MII_MASK, ETOP_MII_NORMAL,
2567a6653adSAleksander Jan Bajkowski LTQ_ETOP_CFG);
257ae7668d0SJeff Kirsher break;
258ae7668d0SJeff Kirsher
259ae7668d0SJeff Kirsher default:
260ae7668d0SJeff Kirsher netdev_err(dev, "unknown mii mode %d\n",
261ae7668d0SJeff Kirsher priv->pldata->mii_mode);
262ae7668d0SJeff Kirsher return -ENOTSUPP;
263ae7668d0SJeff Kirsher }
264ae7668d0SJeff Kirsher
265ae7668d0SJeff Kirsher /* enable crc generation */
266ae7668d0SJeff Kirsher ltq_etop_w32(PPE32_CGEN, LQ_PPE32_ENET_MAC_CFG);
267ae7668d0SJeff Kirsher
26868eabc34SAleksander Jan Bajkowski ltq_dma_init_port(DMA_PORT_ETOP, priv->tx_burst_len, priv->rx_burst_len);
269ae7668d0SJeff Kirsher
270ae7668d0SJeff Kirsher for (i = 0; i < MAX_DMA_CHAN; i++) {
271ae7668d0SJeff Kirsher int irq = LTQ_DMA_CH0_INT + i;
272ae7668d0SJeff Kirsher struct ltq_etop_chan *ch = &priv->ch[i];
273ae7668d0SJeff Kirsher
274370509b2SAleksander Jan Bajkowski ch->dma.nr = i;
275370509b2SAleksander Jan Bajkowski ch->idx = ch->dma.nr;
2762d946e5bSHauke Mehrtens ch->dma.dev = &priv->pdev->dev;
277ae7668d0SJeff Kirsher
278ae7668d0SJeff Kirsher if (IS_TX(i)) {
279ae7668d0SJeff Kirsher ltq_dma_alloc_tx(&ch->dma);
280e97b21e9SRandy Dunlap err = request_irq(irq, ltq_etop_dma_irq, 0, "etop_tx", priv);
281e97b21e9SRandy Dunlap if (err) {
282e97b21e9SRandy Dunlap netdev_err(dev,
283e97b21e9SRandy Dunlap "Unable to get Tx DMA IRQ %d\n",
284e97b21e9SRandy Dunlap irq);
285e97b21e9SRandy Dunlap return err;
286e97b21e9SRandy Dunlap }
287ae7668d0SJeff Kirsher } else if (IS_RX(i)) {
288ae7668d0SJeff Kirsher ltq_dma_alloc_rx(&ch->dma);
289ae7668d0SJeff Kirsher for (ch->dma.desc = 0; ch->dma.desc < LTQ_DESC_NUM;
290ae7668d0SJeff Kirsher ch->dma.desc++)
291ae7668d0SJeff Kirsher if (ltq_etop_alloc_skb(ch))
292ae7668d0SJeff Kirsher return -ENOMEM;
293ae7668d0SJeff Kirsher ch->dma.desc = 0;
294e97b21e9SRandy Dunlap err = request_irq(irq, ltq_etop_dma_irq, 0, "etop_rx", priv);
295e97b21e9SRandy Dunlap if (err) {
296e97b21e9SRandy Dunlap netdev_err(dev,
297e97b21e9SRandy Dunlap "Unable to get Rx DMA IRQ %d\n",
298e97b21e9SRandy Dunlap irq);
299e97b21e9SRandy Dunlap return err;
300e97b21e9SRandy Dunlap }
301ae7668d0SJeff Kirsher }
302ae7668d0SJeff Kirsher ch->dma.irq = irq;
303ae7668d0SJeff Kirsher }
304ae7668d0SJeff Kirsher return 0;
305ae7668d0SJeff Kirsher }
306ae7668d0SJeff Kirsher
307ae7668d0SJeff Kirsher static void
ltq_etop_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)308ae7668d0SJeff Kirsher ltq_etop_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
309ae7668d0SJeff Kirsher {
3107b1cd6a6SAleksander Jan Bajkowski strscpy(info->driver, "Lantiq ETOP", sizeof(info->driver));
3117b1cd6a6SAleksander Jan Bajkowski strscpy(info->bus_info, "internal", sizeof(info->bus_info));
3127b1cd6a6SAleksander Jan Bajkowski strscpy(info->version, DRV_VERSION, sizeof(info->version));
313ae7668d0SJeff Kirsher }
314ae7668d0SJeff Kirsher
315ae7668d0SJeff Kirsher static const struct ethtool_ops ltq_etop_ethtool_ops = {
316ae7668d0SJeff Kirsher .get_drvinfo = ltq_etop_get_drvinfo,
317e3979ce9SFlorian Fainelli .nway_reset = phy_ethtool_nway_reset,
3185376d95fSPhilippe Reynes .get_link_ksettings = phy_ethtool_get_link_ksettings,
3195376d95fSPhilippe Reynes .set_link_ksettings = phy_ethtool_set_link_ksettings,
320ae7668d0SJeff Kirsher };
321ae7668d0SJeff Kirsher
322ae7668d0SJeff Kirsher static int
ltq_etop_mdio_wr(struct mii_bus * bus,int phy_addr,int phy_reg,u16 phy_data)323ae7668d0SJeff Kirsher ltq_etop_mdio_wr(struct mii_bus *bus, int phy_addr, int phy_reg, u16 phy_data)
324ae7668d0SJeff Kirsher {
325ae7668d0SJeff Kirsher u32 val = MDIO_REQUEST |
326ae7668d0SJeff Kirsher ((phy_addr & MDIO_ADDR_MASK) << MDIO_ADDR_OFFSET) |
327ae7668d0SJeff Kirsher ((phy_reg & MDIO_REG_MASK) << MDIO_REG_OFFSET) |
328ae7668d0SJeff Kirsher phy_data;
329ae7668d0SJeff Kirsher
330ae7668d0SJeff Kirsher while (ltq_etop_r32(LTQ_ETOP_MDIO) & MDIO_REQUEST)
331ae7668d0SJeff Kirsher ;
332ae7668d0SJeff Kirsher ltq_etop_w32(val, LTQ_ETOP_MDIO);
333ae7668d0SJeff Kirsher return 0;
334ae7668d0SJeff Kirsher }
335ae7668d0SJeff Kirsher
336ae7668d0SJeff Kirsher static int
ltq_etop_mdio_rd(struct mii_bus * bus,int phy_addr,int phy_reg)337ae7668d0SJeff Kirsher ltq_etop_mdio_rd(struct mii_bus *bus, int phy_addr, int phy_reg)
338ae7668d0SJeff Kirsher {
339ae7668d0SJeff Kirsher u32 val = MDIO_REQUEST | MDIO_READ |
340ae7668d0SJeff Kirsher ((phy_addr & MDIO_ADDR_MASK) << MDIO_ADDR_OFFSET) |
341ae7668d0SJeff Kirsher ((phy_reg & MDIO_REG_MASK) << MDIO_REG_OFFSET);
342ae7668d0SJeff Kirsher
343ae7668d0SJeff Kirsher while (ltq_etop_r32(LTQ_ETOP_MDIO) & MDIO_REQUEST)
344ae7668d0SJeff Kirsher ;
345ae7668d0SJeff Kirsher ltq_etop_w32(val, LTQ_ETOP_MDIO);
346ae7668d0SJeff Kirsher while (ltq_etop_r32(LTQ_ETOP_MDIO) & MDIO_REQUEST)
347ae7668d0SJeff Kirsher ;
348ae7668d0SJeff Kirsher val = ltq_etop_r32(LTQ_ETOP_MDIO) & MDIO_VAL_MASK;
349ae7668d0SJeff Kirsher return val;
350ae7668d0SJeff Kirsher }
351ae7668d0SJeff Kirsher
352ae7668d0SJeff Kirsher static void
ltq_etop_mdio_link(struct net_device * dev)353ae7668d0SJeff Kirsher ltq_etop_mdio_link(struct net_device *dev)
354ae7668d0SJeff Kirsher {
355ae7668d0SJeff Kirsher /* nothing to do */
356ae7668d0SJeff Kirsher }
357ae7668d0SJeff Kirsher
358ae7668d0SJeff Kirsher static int
ltq_etop_mdio_probe(struct net_device * dev)359ae7668d0SJeff Kirsher ltq_etop_mdio_probe(struct net_device *dev)
360ae7668d0SJeff Kirsher {
361ae7668d0SJeff Kirsher struct ltq_etop_priv *priv = netdev_priv(dev);
3622a4fc4eaSAndrew Lunn struct phy_device *phydev;
363ae7668d0SJeff Kirsher
3642a4fc4eaSAndrew Lunn phydev = phy_find_first(priv->mii_bus);
365ae7668d0SJeff Kirsher
366ae7668d0SJeff Kirsher if (!phydev) {
367ae7668d0SJeff Kirsher netdev_err(dev, "no PHY found\n");
368ae7668d0SJeff Kirsher return -ENODEV;
369ae7668d0SJeff Kirsher }
370ae7668d0SJeff Kirsher
37184eff6d1SAndrew Lunn phydev = phy_connect(dev, phydev_name(phydev),
372f9a8f83bSFlorian Fainelli <q_etop_mdio_link, priv->pldata->mii_mode);
373ae7668d0SJeff Kirsher
374ae7668d0SJeff Kirsher if (IS_ERR(phydev)) {
375ae7668d0SJeff Kirsher netdev_err(dev, "Could not attach to PHY\n");
376ae7668d0SJeff Kirsher return PTR_ERR(phydev);
377ae7668d0SJeff Kirsher }
378ae7668d0SJeff Kirsher
37958056c1eSAndrew Lunn phy_set_max_speed(phydev, SPEED_100);
380ae7668d0SJeff Kirsher
3812220943aSAndrew Lunn phy_attached_info(phydev);
382ae7668d0SJeff Kirsher
383ae7668d0SJeff Kirsher return 0;
384ae7668d0SJeff Kirsher }
385ae7668d0SJeff Kirsher
386ae7668d0SJeff Kirsher static int
ltq_etop_mdio_init(struct net_device * dev)387ae7668d0SJeff Kirsher ltq_etop_mdio_init(struct net_device *dev)
388ae7668d0SJeff Kirsher {
389ae7668d0SJeff Kirsher struct ltq_etop_priv *priv = netdev_priv(dev);
390ae7668d0SJeff Kirsher int err;
391ae7668d0SJeff Kirsher
392ae7668d0SJeff Kirsher priv->mii_bus = mdiobus_alloc();
393ae7668d0SJeff Kirsher if (!priv->mii_bus) {
394ae7668d0SJeff Kirsher netdev_err(dev, "failed to allocate mii bus\n");
395ae7668d0SJeff Kirsher err = -ENOMEM;
396ae7668d0SJeff Kirsher goto err_out;
397ae7668d0SJeff Kirsher }
398ae7668d0SJeff Kirsher
399ae7668d0SJeff Kirsher priv->mii_bus->priv = dev;
400ae7668d0SJeff Kirsher priv->mii_bus->read = ltq_etop_mdio_rd;
401ae7668d0SJeff Kirsher priv->mii_bus->write = ltq_etop_mdio_wr;
402ae7668d0SJeff Kirsher priv->mii_bus->name = "ltq_mii";
403d1b86507SFlorian Fainelli snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
404d1b86507SFlorian Fainelli priv->pdev->name, priv->pdev->id);
405ae7668d0SJeff Kirsher if (mdiobus_register(priv->mii_bus)) {
406ae7668d0SJeff Kirsher err = -ENXIO;
407e7f4dc35SAndrew Lunn goto err_out_free_mdiobus;
408ae7668d0SJeff Kirsher }
409ae7668d0SJeff Kirsher
410ae7668d0SJeff Kirsher if (ltq_etop_mdio_probe(dev)) {
411ae7668d0SJeff Kirsher err = -ENXIO;
412ae7668d0SJeff Kirsher goto err_out_unregister_bus;
413ae7668d0SJeff Kirsher }
414ae7668d0SJeff Kirsher return 0;
415ae7668d0SJeff Kirsher
416ae7668d0SJeff Kirsher err_out_unregister_bus:
417ae7668d0SJeff Kirsher mdiobus_unregister(priv->mii_bus);
418ae7668d0SJeff Kirsher err_out_free_mdiobus:
419ae7668d0SJeff Kirsher mdiobus_free(priv->mii_bus);
420ae7668d0SJeff Kirsher err_out:
421ae7668d0SJeff Kirsher return err;
422ae7668d0SJeff Kirsher }
423ae7668d0SJeff Kirsher
424ae7668d0SJeff Kirsher static void
ltq_etop_mdio_cleanup(struct net_device * dev)425ae7668d0SJeff Kirsher ltq_etop_mdio_cleanup(struct net_device *dev)
426ae7668d0SJeff Kirsher {
427ae7668d0SJeff Kirsher struct ltq_etop_priv *priv = netdev_priv(dev);
428ae7668d0SJeff Kirsher
429d1e3a356SPhilippe Reynes phy_disconnect(dev->phydev);
430ae7668d0SJeff Kirsher mdiobus_unregister(priv->mii_bus);
431ae7668d0SJeff Kirsher mdiobus_free(priv->mii_bus);
432ae7668d0SJeff Kirsher }
433ae7668d0SJeff Kirsher
434ae7668d0SJeff Kirsher static int
ltq_etop_open(struct net_device * dev)435ae7668d0SJeff Kirsher ltq_etop_open(struct net_device *dev)
436ae7668d0SJeff Kirsher {
437ae7668d0SJeff Kirsher struct ltq_etop_priv *priv = netdev_priv(dev);
438ae7668d0SJeff Kirsher int i;
439ae7668d0SJeff Kirsher
440ae7668d0SJeff Kirsher for (i = 0; i < MAX_DMA_CHAN; i++) {
441ae7668d0SJeff Kirsher struct ltq_etop_chan *ch = &priv->ch[i];
442ae7668d0SJeff Kirsher
443ae7668d0SJeff Kirsher if (!IS_TX(i) && (!IS_RX(i)))
444ae7668d0SJeff Kirsher continue;
445ae7668d0SJeff Kirsher ltq_dma_open(&ch->dma);
446cc973aecSHauke Mehrtens ltq_dma_enable_irq(&ch->dma);
447ae7668d0SJeff Kirsher napi_enable(&ch->napi);
448ae7668d0SJeff Kirsher }
449d1e3a356SPhilippe Reynes phy_start(dev->phydev);
450ae7668d0SJeff Kirsher netif_tx_start_all_queues(dev);
451ae7668d0SJeff Kirsher return 0;
452ae7668d0SJeff Kirsher }
453ae7668d0SJeff Kirsher
454ae7668d0SJeff Kirsher static int
ltq_etop_stop(struct net_device * dev)455ae7668d0SJeff Kirsher ltq_etop_stop(struct net_device *dev)
456ae7668d0SJeff Kirsher {
457ae7668d0SJeff Kirsher struct ltq_etop_priv *priv = netdev_priv(dev);
458ae7668d0SJeff Kirsher int i;
459ae7668d0SJeff Kirsher
460ae7668d0SJeff Kirsher netif_tx_stop_all_queues(dev);
461d1e3a356SPhilippe Reynes phy_stop(dev->phydev);
462ae7668d0SJeff Kirsher for (i = 0; i < MAX_DMA_CHAN; i++) {
463ae7668d0SJeff Kirsher struct ltq_etop_chan *ch = &priv->ch[i];
464ae7668d0SJeff Kirsher
465ae7668d0SJeff Kirsher if (!IS_RX(i) && !IS_TX(i))
466ae7668d0SJeff Kirsher continue;
467ae7668d0SJeff Kirsher napi_disable(&ch->napi);
468ae7668d0SJeff Kirsher ltq_dma_close(&ch->dma);
469ae7668d0SJeff Kirsher }
470ae7668d0SJeff Kirsher return 0;
471ae7668d0SJeff Kirsher }
472ae7668d0SJeff Kirsher
473c8ef3c94SGUO Zihua static netdev_tx_t
ltq_etop_tx(struct sk_buff * skb,struct net_device * dev)474ae7668d0SJeff Kirsher ltq_etop_tx(struct sk_buff *skb, struct net_device *dev)
475ae7668d0SJeff Kirsher {
476ae7668d0SJeff Kirsher int queue = skb_get_queue_mapping(skb);
477ae7668d0SJeff Kirsher struct netdev_queue *txq = netdev_get_tx_queue(dev, queue);
478ae7668d0SJeff Kirsher struct ltq_etop_priv *priv = netdev_priv(dev);
479ae7668d0SJeff Kirsher struct ltq_etop_chan *ch = &priv->ch[(queue << 1) | 1];
480ae7668d0SJeff Kirsher struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
481ae7668d0SJeff Kirsher int len;
482ae7668d0SJeff Kirsher unsigned long flags;
483ae7668d0SJeff Kirsher u32 byte_offset;
484ae7668d0SJeff Kirsher
485*e66e38d0SAleksander Jan Bajkowski if (skb_put_padto(skb, ETH_ZLEN))
486*e66e38d0SAleksander Jan Bajkowski return NETDEV_TX_OK;
487*e66e38d0SAleksander Jan Bajkowski len = skb->len;
488ae7668d0SJeff Kirsher
489ae7668d0SJeff Kirsher if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) || ch->skb[ch->dma.desc]) {
490ae7668d0SJeff Kirsher netdev_err(dev, "tx ring full\n");
491ae7668d0SJeff Kirsher netif_tx_stop_queue(txq);
492ae7668d0SJeff Kirsher return NETDEV_TX_BUSY;
493ae7668d0SJeff Kirsher }
494ae7668d0SJeff Kirsher
49514d4e308SAleksander Jan Bajkowski /* dma needs to start on a burst length value aligned address */
49614d4e308SAleksander Jan Bajkowski byte_offset = CPHYSADDR(skb->data) % (priv->tx_burst_len * 4);
497ae7668d0SJeff Kirsher ch->skb[ch->dma.desc] = skb;
498ae7668d0SJeff Kirsher
499860e9538SFlorian Westphal netif_trans_update(dev);
500ae7668d0SJeff Kirsher
501ae7668d0SJeff Kirsher spin_lock_irqsave(&priv->lock, flags);
50274e0deb8SChristoph Hellwig desc->addr = ((unsigned int)dma_map_single(&priv->pdev->dev, skb->data, len,
503ae7668d0SJeff Kirsher DMA_TO_DEVICE)) - byte_offset;
50472395591SAleksander Jan Bajkowski /* Make sure the address is written before we give it to HW */
505ae7668d0SJeff Kirsher wmb();
506ae7668d0SJeff Kirsher desc->ctl = LTQ_DMA_OWN | LTQ_DMA_SOP | LTQ_DMA_EOP |
507ae7668d0SJeff Kirsher LTQ_DMA_TX_OFFSET(byte_offset) | (len & LTQ_DMA_SIZE_MASK);
508ae7668d0SJeff Kirsher ch->dma.desc++;
509ae7668d0SJeff Kirsher ch->dma.desc %= LTQ_DESC_NUM;
510ae7668d0SJeff Kirsher spin_unlock_irqrestore(&priv->lock, flags);
511ae7668d0SJeff Kirsher
512ae7668d0SJeff Kirsher if (ch->dma.desc_base[ch->dma.desc].ctl & LTQ_DMA_OWN)
513ae7668d0SJeff Kirsher netif_tx_stop_queue(txq);
514ae7668d0SJeff Kirsher
515ae7668d0SJeff Kirsher return NETDEV_TX_OK;
516ae7668d0SJeff Kirsher }
517ae7668d0SJeff Kirsher
518ae7668d0SJeff Kirsher static int
ltq_etop_change_mtu(struct net_device * dev,int new_mtu)519ae7668d0SJeff Kirsher ltq_etop_change_mtu(struct net_device *dev, int new_mtu)
520ae7668d0SJeff Kirsher {
521ae7668d0SJeff Kirsher struct ltq_etop_priv *priv = netdev_priv(dev);
522ae7668d0SJeff Kirsher unsigned long flags;
523ae7668d0SJeff Kirsher
524a52ad514SJarod Wilson dev->mtu = new_mtu;
525a52ad514SJarod Wilson
526ae7668d0SJeff Kirsher spin_lock_irqsave(&priv->lock, flags);
527a52ad514SJarod Wilson ltq_etop_w32((ETOP_PLEN_UNDER << 16) | new_mtu, LTQ_ETOP_IGPLEN);
528ae7668d0SJeff Kirsher spin_unlock_irqrestore(&priv->lock, flags);
529a52ad514SJarod Wilson
530a52ad514SJarod Wilson return 0;
531ae7668d0SJeff Kirsher }
532ae7668d0SJeff Kirsher
533ae7668d0SJeff Kirsher static int
ltq_etop_set_mac_address(struct net_device * dev,void * p)534ae7668d0SJeff Kirsher ltq_etop_set_mac_address(struct net_device *dev, void *p)
535ae7668d0SJeff Kirsher {
536ae7668d0SJeff Kirsher int ret = eth_mac_addr(dev, p);
537ae7668d0SJeff Kirsher
538ae7668d0SJeff Kirsher if (!ret) {
539ae7668d0SJeff Kirsher struct ltq_etop_priv *priv = netdev_priv(dev);
540ae7668d0SJeff Kirsher unsigned long flags;
541ae7668d0SJeff Kirsher
542ae7668d0SJeff Kirsher /* store the mac for the unicast filter */
543ae7668d0SJeff Kirsher spin_lock_irqsave(&priv->lock, flags);
544ae7668d0SJeff Kirsher ltq_etop_w32(*((u32 *)dev->dev_addr), LTQ_ETOP_MAC_DA0);
545ae7668d0SJeff Kirsher ltq_etop_w32(*((u16 *)&dev->dev_addr[4]) << 16,
546ae7668d0SJeff Kirsher LTQ_ETOP_MAC_DA1);
547ae7668d0SJeff Kirsher spin_unlock_irqrestore(&priv->lock, flags);
548ae7668d0SJeff Kirsher }
549ae7668d0SJeff Kirsher return ret;
550ae7668d0SJeff Kirsher }
551ae7668d0SJeff Kirsher
552ae7668d0SJeff Kirsher static void
ltq_etop_set_multicast_list(struct net_device * dev)553ae7668d0SJeff Kirsher ltq_etop_set_multicast_list(struct net_device *dev)
554ae7668d0SJeff Kirsher {
555ae7668d0SJeff Kirsher struct ltq_etop_priv *priv = netdev_priv(dev);
556ae7668d0SJeff Kirsher unsigned long flags;
557ae7668d0SJeff Kirsher
558ae7668d0SJeff Kirsher /* ensure that the unicast filter is not enabled in promiscious mode */
559ae7668d0SJeff Kirsher spin_lock_irqsave(&priv->lock, flags);
560ae7668d0SJeff Kirsher if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI))
561ae7668d0SJeff Kirsher ltq_etop_w32_mask(ETOP_FTCU, 0, LTQ_ETOP_ENETS0);
562ae7668d0SJeff Kirsher else
563ae7668d0SJeff Kirsher ltq_etop_w32_mask(0, ETOP_FTCU, LTQ_ETOP_ENETS0);
564ae7668d0SJeff Kirsher spin_unlock_irqrestore(&priv->lock, flags);
565ae7668d0SJeff Kirsher }
566ae7668d0SJeff Kirsher
567ae7668d0SJeff Kirsher static int
ltq_etop_init(struct net_device * dev)568ae7668d0SJeff Kirsher ltq_etop_init(struct net_device *dev)
569ae7668d0SJeff Kirsher {
570ae7668d0SJeff Kirsher struct ltq_etop_priv *priv = netdev_priv(dev);
571ae7668d0SJeff Kirsher struct sockaddr mac;
572ae7668d0SJeff Kirsher int err;
57343aabec5SDanny Kukawka bool random_mac = false;
574ae7668d0SJeff Kirsher
575ae7668d0SJeff Kirsher dev->watchdog_timeo = 10 * HZ;
576ae7668d0SJeff Kirsher err = ltq_etop_hw_init(dev);
577ae7668d0SJeff Kirsher if (err)
578ae7668d0SJeff Kirsher goto err_hw;
579ae7668d0SJeff Kirsher ltq_etop_change_mtu(dev, 1500);
580ae7668d0SJeff Kirsher
581ae7668d0SJeff Kirsher memcpy(&mac, &priv->pldata->mac, sizeof(struct sockaddr));
582ae7668d0SJeff Kirsher if (!is_valid_ether_addr(mac.sa_data)) {
583ae7668d0SJeff Kirsher pr_warn("etop: invalid MAC, using random\n");
5847efd26d0SJoe Perches eth_random_addr(mac.sa_data);
58543aabec5SDanny Kukawka random_mac = true;
586ae7668d0SJeff Kirsher }
587ae7668d0SJeff Kirsher
588ae7668d0SJeff Kirsher err = ltq_etop_set_mac_address(dev, &mac);
589ae7668d0SJeff Kirsher if (err)
590ae7668d0SJeff Kirsher goto err_netdev;
59143aabec5SDanny Kukawka
59243aabec5SDanny Kukawka /* Set addr_assign_type here, ltq_etop_set_mac_address would reset it. */
59343aabec5SDanny Kukawka if (random_mac)
594e41b2d7fSJiri Pirko dev->addr_assign_type = NET_ADDR_RANDOM;
59543aabec5SDanny Kukawka
596ae7668d0SJeff Kirsher ltq_etop_set_multicast_list(dev);
597ae7668d0SJeff Kirsher err = ltq_etop_mdio_init(dev);
598ae7668d0SJeff Kirsher if (err)
599ae7668d0SJeff Kirsher goto err_netdev;
600ae7668d0SJeff Kirsher return 0;
601ae7668d0SJeff Kirsher
602ae7668d0SJeff Kirsher err_netdev:
603ae7668d0SJeff Kirsher unregister_netdev(dev);
604ae7668d0SJeff Kirsher free_netdev(dev);
605ae7668d0SJeff Kirsher err_hw:
606ae7668d0SJeff Kirsher ltq_etop_hw_exit(dev);
607ae7668d0SJeff Kirsher return err;
608ae7668d0SJeff Kirsher }
609ae7668d0SJeff Kirsher
610ae7668d0SJeff Kirsher static void
ltq_etop_tx_timeout(struct net_device * dev,unsigned int txqueue)6110290bd29SMichael S. Tsirkin ltq_etop_tx_timeout(struct net_device *dev, unsigned int txqueue)
612ae7668d0SJeff Kirsher {
613ae7668d0SJeff Kirsher int err;
614ae7668d0SJeff Kirsher
615ae7668d0SJeff Kirsher ltq_etop_hw_exit(dev);
616ae7668d0SJeff Kirsher err = ltq_etop_hw_init(dev);
617ae7668d0SJeff Kirsher if (err)
618ae7668d0SJeff Kirsher goto err_hw;
619860e9538SFlorian Westphal netif_trans_update(dev);
620ae7668d0SJeff Kirsher netif_wake_queue(dev);
621ae7668d0SJeff Kirsher return;
622ae7668d0SJeff Kirsher
623ae7668d0SJeff Kirsher err_hw:
624ae7668d0SJeff Kirsher ltq_etop_hw_exit(dev);
625ae7668d0SJeff Kirsher netdev_err(dev, "failed to restart etop after TX timeout\n");
626ae7668d0SJeff Kirsher }
627ae7668d0SJeff Kirsher
628ae7668d0SJeff Kirsher static const struct net_device_ops ltq_eth_netdev_ops = {
629ae7668d0SJeff Kirsher .ndo_open = ltq_etop_open,
630ae7668d0SJeff Kirsher .ndo_stop = ltq_etop_stop,
631ae7668d0SJeff Kirsher .ndo_start_xmit = ltq_etop_tx,
632ae7668d0SJeff Kirsher .ndo_change_mtu = ltq_etop_change_mtu,
633a7605370SArnd Bergmann .ndo_eth_ioctl = phy_do_ioctl,
634ae7668d0SJeff Kirsher .ndo_set_mac_address = ltq_etop_set_mac_address,
635ae7668d0SJeff Kirsher .ndo_validate_addr = eth_validate_addr,
636afc4b13dSJiri Pirko .ndo_set_rx_mode = ltq_etop_set_multicast_list,
637a4ea8a3dSAlexander Duyck .ndo_select_queue = dev_pick_tx_zero,
638ae7668d0SJeff Kirsher .ndo_init = ltq_etop_init,
639ae7668d0SJeff Kirsher .ndo_tx_timeout = ltq_etop_tx_timeout,
640ae7668d0SJeff Kirsher };
641ae7668d0SJeff Kirsher
642ae7668d0SJeff Kirsher static int __init
ltq_etop_probe(struct platform_device * pdev)643ae7668d0SJeff Kirsher ltq_etop_probe(struct platform_device *pdev)
644ae7668d0SJeff Kirsher {
645ae7668d0SJeff Kirsher struct net_device *dev;
646ae7668d0SJeff Kirsher struct ltq_etop_priv *priv;
647ae7668d0SJeff Kirsher struct resource *res;
648ae7668d0SJeff Kirsher int err;
649ae7668d0SJeff Kirsher int i;
650ae7668d0SJeff Kirsher
651ae7668d0SJeff Kirsher res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
652ae7668d0SJeff Kirsher if (!res) {
653ae7668d0SJeff Kirsher dev_err(&pdev->dev, "failed to get etop resource\n");
654ae7668d0SJeff Kirsher err = -ENOENT;
655ae7668d0SJeff Kirsher goto err_out;
656ae7668d0SJeff Kirsher }
657ae7668d0SJeff Kirsher
658ae7668d0SJeff Kirsher res = devm_request_mem_region(&pdev->dev, res->start,
659ae7668d0SJeff Kirsher resource_size(res), dev_name(&pdev->dev));
660ae7668d0SJeff Kirsher if (!res) {
661ae7668d0SJeff Kirsher dev_err(&pdev->dev, "failed to request etop resource\n");
662ae7668d0SJeff Kirsher err = -EBUSY;
663ae7668d0SJeff Kirsher goto err_out;
664ae7668d0SJeff Kirsher }
665ae7668d0SJeff Kirsher
6667a6653adSAleksander Jan Bajkowski ltq_etop_membase = devm_ioremap(&pdev->dev, res->start,
6677a6653adSAleksander Jan Bajkowski resource_size(res));
668ae7668d0SJeff Kirsher if (!ltq_etop_membase) {
669ae7668d0SJeff Kirsher dev_err(&pdev->dev, "failed to remap etop engine %d\n",
670ae7668d0SJeff Kirsher pdev->id);
671ae7668d0SJeff Kirsher err = -ENOMEM;
672ae7668d0SJeff Kirsher goto err_out;
673ae7668d0SJeff Kirsher }
674ae7668d0SJeff Kirsher
675ae7668d0SJeff Kirsher dev = alloc_etherdev_mq(sizeof(struct ltq_etop_priv), 4);
67641de8d4cSJoe Perches if (!dev) {
67741de8d4cSJoe Perches err = -ENOMEM;
67841de8d4cSJoe Perches goto err_out;
67941de8d4cSJoe Perches }
680ae7668d0SJeff Kirsher strcpy(dev->name, "eth%d");
681ae7668d0SJeff Kirsher dev->netdev_ops = <q_eth_netdev_ops;
682ae7668d0SJeff Kirsher dev->ethtool_ops = <q_etop_ethtool_ops;
683ae7668d0SJeff Kirsher priv = netdev_priv(dev);
684ae7668d0SJeff Kirsher priv->res = res;
685d1b86507SFlorian Fainelli priv->pdev = pdev;
686ae7668d0SJeff Kirsher priv->pldata = dev_get_platdata(&pdev->dev);
687ae7668d0SJeff Kirsher priv->netdev = dev;
688ae7668d0SJeff Kirsher spin_lock_init(&priv->lock);
6899cecb138SFlorian Fainelli SET_NETDEV_DEV(dev, &pdev->dev);
690ae7668d0SJeff Kirsher
69114d4e308SAleksander Jan Bajkowski err = device_property_read_u32(&pdev->dev, "lantiq,tx-burst-length", &priv->tx_burst_len);
69214d4e308SAleksander Jan Bajkowski if (err < 0) {
69314d4e308SAleksander Jan Bajkowski dev_err(&pdev->dev, "unable to read tx-burst-length property\n");
6942680ce7fSYang Yingliang goto err_free;
69514d4e308SAleksander Jan Bajkowski }
69614d4e308SAleksander Jan Bajkowski
69714d4e308SAleksander Jan Bajkowski err = device_property_read_u32(&pdev->dev, "lantiq,rx-burst-length", &priv->rx_burst_len);
69814d4e308SAleksander Jan Bajkowski if (err < 0) {
69914d4e308SAleksander Jan Bajkowski dev_err(&pdev->dev, "unable to read rx-burst-length property\n");
7002680ce7fSYang Yingliang goto err_free;
70114d4e308SAleksander Jan Bajkowski }
70214d4e308SAleksander Jan Bajkowski
703ae7668d0SJeff Kirsher for (i = 0; i < MAX_DMA_CHAN; i++) {
704ae7668d0SJeff Kirsher if (IS_TX(i))
705b707b89fSJakub Kicinski netif_napi_add_weight(dev, &priv->ch[i].napi,
706ae7668d0SJeff Kirsher ltq_etop_poll_tx, 8);
707ae7668d0SJeff Kirsher else if (IS_RX(i))
708b707b89fSJakub Kicinski netif_napi_add_weight(dev, &priv->ch[i].napi,
709ae7668d0SJeff Kirsher ltq_etop_poll_rx, 32);
710ae7668d0SJeff Kirsher priv->ch[i].netdev = dev;
711ae7668d0SJeff Kirsher }
712ae7668d0SJeff Kirsher
713ae7668d0SJeff Kirsher err = register_netdev(dev);
714ae7668d0SJeff Kirsher if (err)
715ae7668d0SJeff Kirsher goto err_free;
716ae7668d0SJeff Kirsher
717ae7668d0SJeff Kirsher platform_set_drvdata(pdev, dev);
718ae7668d0SJeff Kirsher return 0;
719ae7668d0SJeff Kirsher
720ae7668d0SJeff Kirsher err_free:
721cb0e51d8SWei Yongjun free_netdev(dev);
722ae7668d0SJeff Kirsher err_out:
723ae7668d0SJeff Kirsher return err;
724ae7668d0SJeff Kirsher }
725ae7668d0SJeff Kirsher
726a0a4efedSBill Pemberton static int
ltq_etop_remove(struct platform_device * pdev)727ae7668d0SJeff Kirsher ltq_etop_remove(struct platform_device *pdev)
728ae7668d0SJeff Kirsher {
729ae7668d0SJeff Kirsher struct net_device *dev = platform_get_drvdata(pdev);
730ae7668d0SJeff Kirsher
731ae7668d0SJeff Kirsher if (dev) {
732ae7668d0SJeff Kirsher netif_tx_stop_all_queues(dev);
733ae7668d0SJeff Kirsher ltq_etop_hw_exit(dev);
734ae7668d0SJeff Kirsher ltq_etop_mdio_cleanup(dev);
735ae7668d0SJeff Kirsher unregister_netdev(dev);
736ae7668d0SJeff Kirsher }
737ae7668d0SJeff Kirsher return 0;
738ae7668d0SJeff Kirsher }
739ae7668d0SJeff Kirsher
740ae7668d0SJeff Kirsher static struct platform_driver ltq_mii_driver = {
741a0a4efedSBill Pemberton .remove = ltq_etop_remove,
742ae7668d0SJeff Kirsher .driver = {
743ae7668d0SJeff Kirsher .name = "ltq_etop",
744ae7668d0SJeff Kirsher },
745ae7668d0SJeff Kirsher };
746ae7668d0SJeff Kirsher
747e97b21e9SRandy Dunlap static int __init
init_ltq_etop(void)748ae7668d0SJeff Kirsher init_ltq_etop(void)
749ae7668d0SJeff Kirsher {
750ae7668d0SJeff Kirsher int ret = platform_driver_probe(<q_mii_driver, ltq_etop_probe);
751ae7668d0SJeff Kirsher
752ae7668d0SJeff Kirsher if (ret)
753772301b6SMasanari Iida pr_err("ltq_etop: Error registering platform driver!");
754ae7668d0SJeff Kirsher return ret;
755ae7668d0SJeff Kirsher }
756ae7668d0SJeff Kirsher
757ae7668d0SJeff Kirsher static void __exit
exit_ltq_etop(void)758ae7668d0SJeff Kirsher exit_ltq_etop(void)
759ae7668d0SJeff Kirsher {
760ae7668d0SJeff Kirsher platform_driver_unregister(<q_mii_driver);
761ae7668d0SJeff Kirsher }
762ae7668d0SJeff Kirsher
763ae7668d0SJeff Kirsher module_init(init_ltq_etop);
764ae7668d0SJeff Kirsher module_exit(exit_ltq_etop);
765ae7668d0SJeff Kirsher
766ae7668d0SJeff Kirsher MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
767ae7668d0SJeff Kirsher MODULE_DESCRIPTION("Lantiq SoC ETOP");
768ae7668d0SJeff Kirsher MODULE_LICENSE("GPL");
769