1 /* MOXA ART Ethernet (RTL8201CP) driver.
2  *
3  * Copyright (C) 2013 Jonas Jensen
4  *
5  * Jonas Jensen <jonas.jensen@gmail.com>
6  *
7  * Based on code from
8  * Moxa Technology Co., Ltd. <www.moxa.com>
9  *
10  * This file is licensed under the terms of the GNU General Public
11  * License version 2.  This program is licensed "as is" without any
12  * warranty of any kind, whether express or implied.
13  */
14 
15 #include <linux/module.h>
16 #include <linux/init.h>
17 #include <linux/netdevice.h>
18 #include <linux/etherdevice.h>
19 #include <linux/skbuff.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/ethtool.h>
22 #include <linux/platform_device.h>
23 #include <linux/interrupt.h>
24 #include <linux/irq.h>
25 #include <linux/of_address.h>
26 #include <linux/of_irq.h>
27 #include <linux/crc32.h>
28 #include <linux/crc32c.h>
29 
30 #include "moxart_ether.h"
31 
32 static inline void moxart_emac_write(struct net_device *ndev,
33 				     unsigned int reg, unsigned long value)
34 {
35 	struct moxart_mac_priv_t *priv = netdev_priv(ndev);
36 
37 	writel(value, priv->base + reg);
38 }
39 
40 static void moxart_update_mac_address(struct net_device *ndev)
41 {
42 	moxart_emac_write(ndev, REG_MAC_MS_ADDRESS,
43 			  ((ndev->dev_addr[0] << 8) | (ndev->dev_addr[1])));
44 	moxart_emac_write(ndev, REG_MAC_MS_ADDRESS + 4,
45 			  ((ndev->dev_addr[2] << 24) |
46 			   (ndev->dev_addr[3] << 16) |
47 			   (ndev->dev_addr[4] << 8) |
48 			   (ndev->dev_addr[5])));
49 }
50 
51 static int moxart_set_mac_address(struct net_device *ndev, void *addr)
52 {
53 	struct sockaddr *address = addr;
54 
55 	if (!is_valid_ether_addr(address->sa_data))
56 		return -EADDRNOTAVAIL;
57 
58 	memcpy(ndev->dev_addr, address->sa_data, ndev->addr_len);
59 	moxart_update_mac_address(ndev);
60 
61 	return 0;
62 }
63 
64 static void moxart_mac_free_memory(struct net_device *ndev)
65 {
66 	struct moxart_mac_priv_t *priv = netdev_priv(ndev);
67 	int i;
68 
69 	for (i = 0; i < RX_DESC_NUM; i++)
70 		dma_unmap_single(&ndev->dev, priv->rx_mapping[i],
71 				 priv->rx_buf_size, DMA_FROM_DEVICE);
72 
73 	if (priv->tx_desc_base)
74 		dma_free_coherent(NULL, TX_REG_DESC_SIZE * TX_DESC_NUM,
75 				  priv->tx_desc_base, priv->tx_base);
76 
77 	if (priv->rx_desc_base)
78 		dma_free_coherent(NULL, RX_REG_DESC_SIZE * RX_DESC_NUM,
79 				  priv->rx_desc_base, priv->rx_base);
80 
81 	kfree(priv->tx_buf_base);
82 	kfree(priv->rx_buf_base);
83 }
84 
85 static void moxart_mac_reset(struct net_device *ndev)
86 {
87 	struct moxart_mac_priv_t *priv = netdev_priv(ndev);
88 
89 	writel(SW_RST, priv->base + REG_MAC_CTRL);
90 	while (readl(priv->base + REG_MAC_CTRL) & SW_RST)
91 		mdelay(10);
92 
93 	writel(0, priv->base + REG_INTERRUPT_MASK);
94 
95 	priv->reg_maccr = RX_BROADPKT | FULLDUP | CRC_APD | RX_FTL;
96 }
97 
98 static void moxart_mac_enable(struct net_device *ndev)
99 {
100 	struct moxart_mac_priv_t *priv = netdev_priv(ndev);
101 
102 	writel(0x00001010, priv->base + REG_INT_TIMER_CTRL);
103 	writel(0x00000001, priv->base + REG_APOLL_TIMER_CTRL);
104 	writel(0x00000390, priv->base + REG_DMA_BLEN_CTRL);
105 
106 	priv->reg_imr |= (RPKT_FINISH_M | XPKT_FINISH_M);
107 	writel(priv->reg_imr, priv->base + REG_INTERRUPT_MASK);
108 
109 	priv->reg_maccr |= (RCV_EN | XMT_EN | RDMA_EN | XDMA_EN);
110 	writel(priv->reg_maccr, priv->base + REG_MAC_CTRL);
111 }
112 
113 static void moxart_mac_setup_desc_ring(struct net_device *ndev)
114 {
115 	struct moxart_mac_priv_t *priv = netdev_priv(ndev);
116 	void __iomem *desc;
117 	int i;
118 
119 	for (i = 0; i < TX_DESC_NUM; i++) {
120 		desc = priv->tx_desc_base + i * TX_REG_DESC_SIZE;
121 		memset(desc, 0, TX_REG_DESC_SIZE);
122 
123 		priv->tx_buf[i] = priv->tx_buf_base + priv->tx_buf_size * i;
124 	}
125 	writel(TX_DESC1_END, desc + TX_REG_OFFSET_DESC1);
126 
127 	priv->tx_head = 0;
128 	priv->tx_tail = 0;
129 
130 	for (i = 0; i < RX_DESC_NUM; i++) {
131 		desc = priv->rx_desc_base + i * RX_REG_DESC_SIZE;
132 		memset(desc, 0, RX_REG_DESC_SIZE);
133 		writel(RX_DESC0_DMA_OWN, desc + RX_REG_OFFSET_DESC0);
134 		writel(RX_BUF_SIZE & RX_DESC1_BUF_SIZE_MASK,
135 		       desc + RX_REG_OFFSET_DESC1);
136 
137 		priv->rx_buf[i] = priv->rx_buf_base + priv->rx_buf_size * i;
138 		priv->rx_mapping[i] = dma_map_single(&ndev->dev,
139 						     priv->rx_buf[i],
140 						     priv->rx_buf_size,
141 						     DMA_FROM_DEVICE);
142 		if (dma_mapping_error(&ndev->dev, priv->rx_mapping[i]))
143 			netdev_err(ndev, "DMA mapping error\n");
144 
145 		writel(priv->rx_mapping[i],
146 		       desc + RX_REG_OFFSET_DESC2 + RX_DESC2_ADDRESS_PHYS);
147 		writel(priv->rx_buf[i],
148 		       desc + RX_REG_OFFSET_DESC2 + RX_DESC2_ADDRESS_VIRT);
149 	}
150 	writel(RX_DESC1_END, desc + RX_REG_OFFSET_DESC1);
151 
152 	priv->rx_head = 0;
153 
154 	/* reset the MAC controler TX/RX desciptor base address */
155 	writel(priv->tx_base, priv->base + REG_TXR_BASE_ADDRESS);
156 	writel(priv->rx_base, priv->base + REG_RXR_BASE_ADDRESS);
157 }
158 
159 static int moxart_mac_open(struct net_device *ndev)
160 {
161 	struct moxart_mac_priv_t *priv = netdev_priv(ndev);
162 
163 	if (!is_valid_ether_addr(ndev->dev_addr))
164 		return -EADDRNOTAVAIL;
165 
166 	napi_enable(&priv->napi);
167 
168 	moxart_mac_reset(ndev);
169 	moxart_update_mac_address(ndev);
170 	moxart_mac_setup_desc_ring(ndev);
171 	moxart_mac_enable(ndev);
172 	netif_start_queue(ndev);
173 
174 	netdev_dbg(ndev, "%s: IMR=0x%x, MACCR=0x%x\n",
175 		   __func__, readl(priv->base + REG_INTERRUPT_MASK),
176 		   readl(priv->base + REG_MAC_CTRL));
177 
178 	return 0;
179 }
180 
181 static int moxart_mac_stop(struct net_device *ndev)
182 {
183 	struct moxart_mac_priv_t *priv = netdev_priv(ndev);
184 
185 	napi_disable(&priv->napi);
186 
187 	netif_stop_queue(ndev);
188 
189 	/* disable all interrupts */
190 	writel(0, priv->base + REG_INTERRUPT_MASK);
191 
192 	/* disable all functions */
193 	writel(0, priv->base + REG_MAC_CTRL);
194 
195 	return 0;
196 }
197 
198 static int moxart_rx_poll(struct napi_struct *napi, int budget)
199 {
200 	struct moxart_mac_priv_t *priv = container_of(napi,
201 						      struct moxart_mac_priv_t,
202 						      napi);
203 	struct net_device *ndev = priv->ndev;
204 	struct sk_buff *skb;
205 	void __iomem *desc;
206 	unsigned int desc0, len;
207 	int rx_head = priv->rx_head;
208 	int rx = 0;
209 
210 	while (1) {
211 		desc = priv->rx_desc_base + (RX_REG_DESC_SIZE * rx_head);
212 		desc0 = readl(desc + RX_REG_OFFSET_DESC0);
213 
214 		if (desc0 & RX_DESC0_DMA_OWN)
215 			break;
216 
217 		if (desc0 & (RX_DESC0_ERR | RX_DESC0_CRC_ERR | RX_DESC0_FTL |
218 			     RX_DESC0_RUNT | RX_DESC0_ODD_NB)) {
219 			net_dbg_ratelimited("packet error\n");
220 			priv->stats.rx_dropped++;
221 			priv->stats.rx_errors++;
222 			continue;
223 		}
224 
225 		len = desc0 & RX_DESC0_FRAME_LEN_MASK;
226 
227 		if (len > RX_BUF_SIZE)
228 			len = RX_BUF_SIZE;
229 
230 		skb = build_skb(priv->rx_buf[rx_head], priv->rx_buf_size);
231 		if (unlikely(!skb)) {
232 			net_dbg_ratelimited("build_skb failed\n");
233 			priv->stats.rx_dropped++;
234 			priv->stats.rx_errors++;
235 		}
236 
237 		skb_put(skb, len);
238 		skb->protocol = eth_type_trans(skb, ndev);
239 		napi_gro_receive(&priv->napi, skb);
240 		rx++;
241 
242 		ndev->last_rx = jiffies;
243 		priv->stats.rx_packets++;
244 		priv->stats.rx_bytes += len;
245 		if (desc0 & RX_DESC0_MULTICAST)
246 			priv->stats.multicast++;
247 
248 		writel(RX_DESC0_DMA_OWN, desc + RX_REG_OFFSET_DESC0);
249 
250 		rx_head = RX_NEXT(rx_head);
251 		priv->rx_head = rx_head;
252 
253 		if (rx >= budget)
254 			break;
255 	}
256 
257 	if (rx < budget) {
258 		napi_gro_flush(napi, false);
259 		__napi_complete(napi);
260 	}
261 
262 	priv->reg_imr |= RPKT_FINISH_M;
263 	writel(priv->reg_imr, priv->base + REG_INTERRUPT_MASK);
264 
265 	return rx;
266 }
267 
268 static void moxart_tx_finished(struct net_device *ndev)
269 {
270 	struct moxart_mac_priv_t *priv = netdev_priv(ndev);
271 	unsigned tx_head = priv->tx_head;
272 	unsigned tx_tail = priv->tx_tail;
273 
274 	while (tx_tail != tx_head) {
275 		dma_unmap_single(&ndev->dev, priv->tx_mapping[tx_tail],
276 				 priv->tx_len[tx_tail], DMA_TO_DEVICE);
277 
278 		priv->stats.tx_packets++;
279 		priv->stats.tx_bytes += priv->tx_skb[tx_tail]->len;
280 
281 		dev_kfree_skb_irq(priv->tx_skb[tx_tail]);
282 		priv->tx_skb[tx_tail] = NULL;
283 
284 		tx_tail = TX_NEXT(tx_tail);
285 	}
286 	priv->tx_tail = tx_tail;
287 }
288 
289 static irqreturn_t moxart_mac_interrupt(int irq, void *dev_id)
290 {
291 	struct net_device *ndev = (struct net_device *) dev_id;
292 	struct moxart_mac_priv_t *priv = netdev_priv(ndev);
293 	unsigned int ists = readl(priv->base + REG_INTERRUPT_STATUS);
294 
295 	if (ists & XPKT_OK_INT_STS)
296 		moxart_tx_finished(ndev);
297 
298 	if (ists & RPKT_FINISH) {
299 		if (napi_schedule_prep(&priv->napi)) {
300 			priv->reg_imr &= ~RPKT_FINISH_M;
301 			writel(priv->reg_imr, priv->base + REG_INTERRUPT_MASK);
302 			__napi_schedule(&priv->napi);
303 		}
304 	}
305 
306 	return IRQ_HANDLED;
307 }
308 
309 static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
310 {
311 	struct moxart_mac_priv_t *priv = netdev_priv(ndev);
312 	void __iomem *desc;
313 	unsigned int len;
314 	unsigned int tx_head = priv->tx_head;
315 	u32 txdes1;
316 	int ret = NETDEV_TX_BUSY;
317 
318 	desc = priv->tx_desc_base + (TX_REG_DESC_SIZE * tx_head);
319 
320 	spin_lock_irq(&priv->txlock);
321 	if (readl(desc + TX_REG_OFFSET_DESC0) & TX_DESC0_DMA_OWN) {
322 		net_dbg_ratelimited("no TX space for packet\n");
323 		priv->stats.tx_dropped++;
324 		goto out_unlock;
325 	}
326 
327 	len = skb->len > TX_BUF_SIZE ? TX_BUF_SIZE : skb->len;
328 
329 	priv->tx_mapping[tx_head] = dma_map_single(&ndev->dev, skb->data,
330 						   len, DMA_TO_DEVICE);
331 	if (dma_mapping_error(&ndev->dev, priv->tx_mapping[tx_head])) {
332 		netdev_err(ndev, "DMA mapping error\n");
333 		goto out_unlock;
334 	}
335 
336 	priv->tx_len[tx_head] = len;
337 	priv->tx_skb[tx_head] = skb;
338 
339 	writel(priv->tx_mapping[tx_head],
340 	       desc + TX_REG_OFFSET_DESC2 + TX_DESC2_ADDRESS_PHYS);
341 	writel(skb->data,
342 	       desc + TX_REG_OFFSET_DESC2 + TX_DESC2_ADDRESS_VIRT);
343 
344 	if (skb->len < ETH_ZLEN) {
345 		memset(&skb->data[skb->len],
346 		       0, ETH_ZLEN - skb->len);
347 		len = ETH_ZLEN;
348 	}
349 
350 	txdes1 = readl(desc + TX_REG_OFFSET_DESC1);
351 	txdes1 |= TX_DESC1_LTS | TX_DESC1_FTS;
352 	txdes1 &= ~(TX_DESC1_FIFO_COMPLETE | TX_DESC1_INTR_COMPLETE);
353 	txdes1 |= (len & TX_DESC1_BUF_SIZE_MASK);
354 	writel(txdes1, desc + TX_REG_OFFSET_DESC1);
355 	writel(TX_DESC0_DMA_OWN, desc + TX_REG_OFFSET_DESC0);
356 
357 	/* start to send packet */
358 	writel(0xffffffff, priv->base + REG_TX_POLL_DEMAND);
359 
360 	priv->tx_head = TX_NEXT(tx_head);
361 
362 	ndev->trans_start = jiffies;
363 	ret = NETDEV_TX_OK;
364 out_unlock:
365 	spin_unlock_irq(&priv->txlock);
366 
367 	return ret;
368 }
369 
370 static struct net_device_stats *moxart_mac_get_stats(struct net_device *ndev)
371 {
372 	struct moxart_mac_priv_t *priv = netdev_priv(ndev);
373 
374 	return &priv->stats;
375 }
376 
377 static void moxart_mac_setmulticast(struct net_device *ndev)
378 {
379 	struct moxart_mac_priv_t *priv = netdev_priv(ndev);
380 	struct netdev_hw_addr *ha;
381 	int crc_val;
382 
383 	netdev_for_each_mc_addr(ha, ndev) {
384 		crc_val = crc32_le(~0, ha->addr, ETH_ALEN);
385 		crc_val = (crc_val >> 26) & 0x3f;
386 		if (crc_val >= 32) {
387 			writel(readl(priv->base + REG_MCAST_HASH_TABLE1) |
388 			       (1UL << (crc_val - 32)),
389 			       priv->base + REG_MCAST_HASH_TABLE1);
390 		} else {
391 			writel(readl(priv->base + REG_MCAST_HASH_TABLE0) |
392 			       (1UL << crc_val),
393 			       priv->base + REG_MCAST_HASH_TABLE0);
394 		}
395 	}
396 }
397 
398 static void moxart_mac_set_rx_mode(struct net_device *ndev)
399 {
400 	struct moxart_mac_priv_t *priv = netdev_priv(ndev);
401 
402 	spin_lock_irq(&priv->txlock);
403 
404 	(ndev->flags & IFF_PROMISC) ? (priv->reg_maccr |= RCV_ALL) :
405 				      (priv->reg_maccr &= ~RCV_ALL);
406 
407 	(ndev->flags & IFF_ALLMULTI) ? (priv->reg_maccr |= RX_MULTIPKT) :
408 				       (priv->reg_maccr &= ~RX_MULTIPKT);
409 
410 	if ((ndev->flags & IFF_MULTICAST) && netdev_mc_count(ndev)) {
411 		priv->reg_maccr |= HT_MULTI_EN;
412 		moxart_mac_setmulticast(ndev);
413 	} else {
414 		priv->reg_maccr &= ~HT_MULTI_EN;
415 	}
416 
417 	writel(priv->reg_maccr, priv->base + REG_MAC_CTRL);
418 
419 	spin_unlock_irq(&priv->txlock);
420 }
421 
422 static struct net_device_ops moxart_netdev_ops = {
423 	.ndo_open		= moxart_mac_open,
424 	.ndo_stop		= moxart_mac_stop,
425 	.ndo_start_xmit		= moxart_mac_start_xmit,
426 	.ndo_get_stats		= moxart_mac_get_stats,
427 	.ndo_set_rx_mode	= moxart_mac_set_rx_mode,
428 	.ndo_set_mac_address	= moxart_set_mac_address,
429 	.ndo_validate_addr	= eth_validate_addr,
430 	.ndo_change_mtu		= eth_change_mtu,
431 };
432 
433 static int moxart_mac_probe(struct platform_device *pdev)
434 {
435 	struct device *p_dev = &pdev->dev;
436 	struct device_node *node = p_dev->of_node;
437 	struct net_device *ndev;
438 	struct moxart_mac_priv_t *priv;
439 	struct resource *res;
440 	unsigned int irq;
441 	int ret;
442 
443 	ndev = alloc_etherdev(sizeof(struct moxart_mac_priv_t));
444 	if (!ndev)
445 		return -ENOMEM;
446 
447 	irq = irq_of_parse_and_map(node, 0);
448 	if (irq <= 0) {
449 		netdev_err(ndev, "irq_of_parse_and_map failed\n");
450 		ret = -EINVAL;
451 		goto irq_map_fail;
452 	}
453 
454 	priv = netdev_priv(ndev);
455 	priv->ndev = ndev;
456 
457 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
458 	ndev->base_addr = res->start;
459 	priv->base = devm_ioremap_resource(p_dev, res);
460 	ret = IS_ERR(priv->base);
461 	if (ret) {
462 		dev_err(p_dev, "devm_ioremap_resource failed\n");
463 		goto init_fail;
464 	}
465 
466 	spin_lock_init(&priv->txlock);
467 
468 	priv->tx_buf_size = TX_BUF_SIZE;
469 	priv->rx_buf_size = RX_BUF_SIZE +
470 			    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
471 
472 	priv->tx_desc_base = dma_alloc_coherent(NULL, TX_REG_DESC_SIZE *
473 						TX_DESC_NUM, &priv->tx_base,
474 						GFP_DMA | GFP_KERNEL);
475 	if (priv->tx_desc_base == NULL) {
476 		ret = -ENOMEM;
477 		goto init_fail;
478 	}
479 
480 	priv->rx_desc_base = dma_alloc_coherent(NULL, RX_REG_DESC_SIZE *
481 						RX_DESC_NUM, &priv->rx_base,
482 						GFP_DMA | GFP_KERNEL);
483 	if (priv->rx_desc_base == NULL) {
484 		ret = -ENOMEM;
485 		goto init_fail;
486 	}
487 
488 	priv->tx_buf_base = kmalloc(priv->tx_buf_size * TX_DESC_NUM,
489 				    GFP_ATOMIC);
490 	if (!priv->tx_buf_base) {
491 		ret = -ENOMEM;
492 		goto init_fail;
493 	}
494 
495 	priv->rx_buf_base = kmalloc(priv->rx_buf_size * RX_DESC_NUM,
496 				    GFP_ATOMIC);
497 	if (!priv->rx_buf_base) {
498 		ret = -ENOMEM;
499 		goto init_fail;
500 	}
501 
502 	platform_set_drvdata(pdev, ndev);
503 
504 	ret = devm_request_irq(p_dev, irq, moxart_mac_interrupt, 0,
505 			       pdev->name, ndev);
506 	if (ret) {
507 		netdev_err(ndev, "devm_request_irq failed\n");
508 		goto init_fail;
509 	}
510 
511 	ether_setup(ndev);
512 	ndev->netdev_ops = &moxart_netdev_ops;
513 	netif_napi_add(ndev, &priv->napi, moxart_rx_poll, RX_DESC_NUM);
514 	ndev->priv_flags |= IFF_UNICAST_FLT;
515 	ndev->irq = irq;
516 
517 	SET_NETDEV_DEV(ndev, &pdev->dev);
518 
519 	ret = register_netdev(ndev);
520 	if (ret) {
521 		free_netdev(ndev);
522 		goto init_fail;
523 	}
524 
525 	netdev_dbg(ndev, "%s: IRQ=%d address=%pM\n",
526 		   __func__, ndev->irq, ndev->dev_addr);
527 
528 	return 0;
529 
530 init_fail:
531 	netdev_err(ndev, "init failed\n");
532 	moxart_mac_free_memory(ndev);
533 irq_map_fail:
534 	free_netdev(ndev);
535 	return ret;
536 }
537 
538 static int moxart_remove(struct platform_device *pdev)
539 {
540 	struct net_device *ndev = platform_get_drvdata(pdev);
541 
542 	unregister_netdev(ndev);
543 	free_irq(ndev->irq, ndev);
544 	moxart_mac_free_memory(ndev);
545 	free_netdev(ndev);
546 
547 	return 0;
548 }
549 
550 static const struct of_device_id moxart_mac_match[] = {
551 	{ .compatible = "moxa,moxart-mac" },
552 	{ }
553 };
554 
555 static struct platform_driver moxart_mac_driver = {
556 	.probe	= moxart_mac_probe,
557 	.remove	= moxart_remove,
558 	.driver	= {
559 		.name		= "moxart-ethernet",
560 		.owner		= THIS_MODULE,
561 		.of_match_table	= moxart_mac_match,
562 	},
563 };
564 module_platform_driver(moxart_mac_driver);
565 
566 MODULE_DESCRIPTION("MOXART RTL8201CP Ethernet driver");
567 MODULE_LICENSE("GPL v2");
568 MODULE_AUTHOR("Jonas Jensen <jonas.jensen@gmail.com>");
569