1 /* Applied Micro X-Gene SoC Ethernet Driver
2  *
3  * Copyright (c) 2014, Applied Micro Circuits Corporation
4  * Authors: Iyappan Subramanian <isubramanian@apm.com>
5  *	    Ravi Patel <rapatel@apm.com>
6  *	    Keyur Chudgar <kchudgar@apm.com>
7  *
8  * This program is free software; you can redistribute  it and/or modify it
9  * under  the terms of  the GNU General  Public License as published by the
10  * Free Software Foundation;  either version 2 of the  License, or (at your
11  * option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
20  */
21 
22 #include "xgene_enet_main.h"
23 #include "xgene_enet_hw.h"
24 
25 static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring *buf_pool)
26 {
27 	struct xgene_enet_raw_desc16 *raw_desc;
28 	int i;
29 
30 	for (i = 0; i < buf_pool->slots; i++) {
31 		raw_desc = &buf_pool->raw_desc16[i];
32 
33 		/* Hardware expects descriptor in little endian format */
34 		raw_desc->m0 = cpu_to_le64(i |
35 				SET_VAL(FPQNUM, buf_pool->dst_ring_num) |
36 				SET_VAL(STASH, 3));
37 	}
38 }
39 
40 static int xgene_enet_refill_bufpool(struct xgene_enet_desc_ring *buf_pool,
41 				     u32 nbuf)
42 {
43 	struct sk_buff *skb;
44 	struct xgene_enet_raw_desc16 *raw_desc;
45 	struct net_device *ndev;
46 	struct device *dev;
47 	dma_addr_t dma_addr;
48 	u32 tail = buf_pool->tail;
49 	u32 slots = buf_pool->slots - 1;
50 	u16 bufdatalen, len;
51 	int i;
52 
53 	ndev = buf_pool->ndev;
54 	dev = ndev_to_dev(buf_pool->ndev);
55 	bufdatalen = BUF_LEN_CODE_2K | (SKB_BUFFER_SIZE & GENMASK(11, 0));
56 	len = XGENE_ENET_MAX_MTU;
57 
58 	for (i = 0; i < nbuf; i++) {
59 		raw_desc = &buf_pool->raw_desc16[tail];
60 
61 		skb = netdev_alloc_skb_ip_align(ndev, len);
62 		if (unlikely(!skb))
63 			return -ENOMEM;
64 		buf_pool->rx_skb[tail] = skb;
65 
66 		dma_addr = dma_map_single(dev, skb->data, len, DMA_FROM_DEVICE);
67 		if (dma_mapping_error(dev, dma_addr)) {
68 			netdev_err(ndev, "DMA mapping error\n");
69 			dev_kfree_skb_any(skb);
70 			return -EINVAL;
71 		}
72 
73 		raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
74 					   SET_VAL(BUFDATALEN, bufdatalen) |
75 					   SET_BIT(COHERENT));
76 		tail = (tail + 1) & slots;
77 	}
78 
79 	iowrite32(nbuf, buf_pool->cmd);
80 	buf_pool->tail = tail;
81 
82 	return 0;
83 }
84 
85 static u16 xgene_enet_dst_ring_num(struct xgene_enet_desc_ring *ring)
86 {
87 	struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
88 
89 	return ((u16)pdata->rm << 10) | ring->num;
90 }
91 
92 static u8 xgene_enet_hdr_len(const void *data)
93 {
94 	const struct ethhdr *eth = data;
95 
96 	return (eth->h_proto == htons(ETH_P_8021Q)) ? VLAN_ETH_HLEN : ETH_HLEN;
97 }
98 
99 static u32 xgene_enet_ring_len(struct xgene_enet_desc_ring *ring)
100 {
101 	u32 __iomem *cmd_base = ring->cmd_base;
102 	u32 ring_state, num_msgs;
103 
104 	ring_state = ioread32(&cmd_base[1]);
105 	num_msgs = ring_state & CREATE_MASK(NUMMSGSINQ_POS, NUMMSGSINQ_LEN);
106 
107 	return num_msgs >> NUMMSGSINQ_POS;
108 }
109 
110 static void xgene_enet_delete_bufpool(struct xgene_enet_desc_ring *buf_pool)
111 {
112 	struct xgene_enet_raw_desc16 *raw_desc;
113 	u32 slots = buf_pool->slots - 1;
114 	u32 tail = buf_pool->tail;
115 	u32 userinfo;
116 	int i, len;
117 
118 	len = xgene_enet_ring_len(buf_pool);
119 	for (i = 0; i < len; i++) {
120 		tail = (tail - 1) & slots;
121 		raw_desc = &buf_pool->raw_desc16[tail];
122 
123 		/* Hardware stores descriptor in little endian format */
124 		userinfo = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
125 		dev_kfree_skb_any(buf_pool->rx_skb[userinfo]);
126 	}
127 
128 	iowrite32(-len, buf_pool->cmd);
129 	buf_pool->tail = tail;
130 }
131 
132 static irqreturn_t xgene_enet_rx_irq(const int irq, void *data)
133 {
134 	struct xgene_enet_desc_ring *rx_ring = data;
135 
136 	if (napi_schedule_prep(&rx_ring->napi)) {
137 		disable_irq_nosync(irq);
138 		__napi_schedule(&rx_ring->napi);
139 	}
140 
141 	return IRQ_HANDLED;
142 }
143 
144 static int xgene_enet_tx_completion(struct xgene_enet_desc_ring *cp_ring,
145 				    struct xgene_enet_raw_desc *raw_desc)
146 {
147 	struct sk_buff *skb;
148 	struct device *dev;
149 	u16 skb_index;
150 	u8 status;
151 	int ret = 0;
152 
153 	skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
154 	skb = cp_ring->cp_skb[skb_index];
155 
156 	dev = ndev_to_dev(cp_ring->ndev);
157 	dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)),
158 			 GET_VAL(BUFDATALEN, le64_to_cpu(raw_desc->m1)),
159 			 DMA_TO_DEVICE);
160 
161 	/* Checking for error */
162 	status = GET_VAL(LERR, le64_to_cpu(raw_desc->m0));
163 	if (unlikely(status > 2)) {
164 		xgene_enet_parse_error(cp_ring, netdev_priv(cp_ring->ndev),
165 				       status);
166 		ret = -EIO;
167 	}
168 
169 	if (likely(skb)) {
170 		dev_kfree_skb_any(skb);
171 	} else {
172 		netdev_err(cp_ring->ndev, "completion skb is NULL\n");
173 		ret = -EIO;
174 	}
175 
176 	return ret;
177 }
178 
179 static u64 xgene_enet_work_msg(struct sk_buff *skb)
180 {
181 	struct iphdr *iph;
182 	u8 l3hlen, l4hlen = 0;
183 	u8 csum_enable = 0;
184 	u8 proto = 0;
185 	u8 ethhdr;
186 	u64 hopinfo;
187 
188 	if (unlikely(skb->protocol != htons(ETH_P_IP)) &&
189 	    unlikely(skb->protocol != htons(ETH_P_8021Q)))
190 		goto out;
191 
192 	if (unlikely(!(skb->dev->features & NETIF_F_IP_CSUM)))
193 		goto out;
194 
195 	iph = ip_hdr(skb);
196 	if (unlikely(ip_is_fragment(iph)))
197 		goto out;
198 
199 	if (likely(iph->protocol == IPPROTO_TCP)) {
200 		l4hlen = tcp_hdrlen(skb) >> 2;
201 		csum_enable = 1;
202 		proto = TSO_IPPROTO_TCP;
203 	} else if (iph->protocol == IPPROTO_UDP) {
204 		l4hlen = UDP_HDR_SIZE;
205 		csum_enable = 1;
206 	}
207 out:
208 	l3hlen = ip_hdrlen(skb) >> 2;
209 	ethhdr = xgene_enet_hdr_len(skb->data);
210 	hopinfo = SET_VAL(TCPHDR, l4hlen) |
211 		  SET_VAL(IPHDR, l3hlen) |
212 		  SET_VAL(ETHHDR, ethhdr) |
213 		  SET_VAL(EC, csum_enable) |
214 		  SET_VAL(IS, proto) |
215 		  SET_BIT(IC) |
216 		  SET_BIT(TYPE_ETH_WORK_MESSAGE);
217 
218 	return hopinfo;
219 }
220 
221 static int xgene_enet_setup_tx_desc(struct xgene_enet_desc_ring *tx_ring,
222 				    struct sk_buff *skb)
223 {
224 	struct device *dev = ndev_to_dev(tx_ring->ndev);
225 	struct xgene_enet_raw_desc *raw_desc;
226 	dma_addr_t dma_addr;
227 	u16 tail = tx_ring->tail;
228 	u64 hopinfo;
229 
230 	raw_desc = &tx_ring->raw_desc[tail];
231 	memset(raw_desc, 0, sizeof(struct xgene_enet_raw_desc));
232 
233 	dma_addr = dma_map_single(dev, skb->data, skb->len, DMA_TO_DEVICE);
234 	if (dma_mapping_error(dev, dma_addr)) {
235 		netdev_err(tx_ring->ndev, "DMA mapping error\n");
236 		return -EINVAL;
237 	}
238 
239 	/* Hardware expects descriptor in little endian format */
240 	raw_desc->m0 = cpu_to_le64(tail);
241 	raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
242 				   SET_VAL(BUFDATALEN, skb->len) |
243 				   SET_BIT(COHERENT));
244 	hopinfo = xgene_enet_work_msg(skb);
245 	raw_desc->m3 = cpu_to_le64(SET_VAL(HENQNUM, tx_ring->dst_ring_num) |
246 				   hopinfo);
247 	tx_ring->cp_ring->cp_skb[tail] = skb;
248 
249 	return 0;
250 }
251 
252 static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb,
253 					 struct net_device *ndev)
254 {
255 	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
256 	struct xgene_enet_desc_ring *tx_ring = pdata->tx_ring;
257 	struct xgene_enet_desc_ring *cp_ring = tx_ring->cp_ring;
258 	u32 tx_level, cq_level;
259 
260 	tx_level = xgene_enet_ring_len(tx_ring);
261 	cq_level = xgene_enet_ring_len(cp_ring);
262 	if (unlikely(tx_level > pdata->tx_qcnt_hi ||
263 		     cq_level > pdata->cp_qcnt_hi)) {
264 		netif_stop_queue(ndev);
265 		return NETDEV_TX_BUSY;
266 	}
267 
268 	if (xgene_enet_setup_tx_desc(tx_ring, skb)) {
269 		dev_kfree_skb_any(skb);
270 		return NETDEV_TX_OK;
271 	}
272 
273 	iowrite32(1, tx_ring->cmd);
274 	skb_tx_timestamp(skb);
275 	tx_ring->tail = (tx_ring->tail + 1) & (tx_ring->slots - 1);
276 
277 	pdata->stats.tx_packets++;
278 	pdata->stats.tx_bytes += skb->len;
279 
280 	return NETDEV_TX_OK;
281 }
282 
283 static void xgene_enet_skip_csum(struct sk_buff *skb)
284 {
285 	struct iphdr *iph = ip_hdr(skb);
286 
287 	if (!ip_is_fragment(iph) ||
288 	    (iph->protocol != IPPROTO_TCP && iph->protocol != IPPROTO_UDP)) {
289 		skb->ip_summed = CHECKSUM_UNNECESSARY;
290 	}
291 }
292 
293 static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring,
294 			       struct xgene_enet_raw_desc *raw_desc)
295 {
296 	struct net_device *ndev;
297 	struct xgene_enet_pdata *pdata;
298 	struct device *dev;
299 	struct xgene_enet_desc_ring *buf_pool;
300 	u32 datalen, skb_index;
301 	struct sk_buff *skb;
302 	u8 status;
303 	int ret = 0;
304 
305 	ndev = rx_ring->ndev;
306 	pdata = netdev_priv(ndev);
307 	dev = ndev_to_dev(rx_ring->ndev);
308 	buf_pool = rx_ring->buf_pool;
309 
310 	dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)),
311 			 XGENE_ENET_MAX_MTU, DMA_FROM_DEVICE);
312 	skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
313 	skb = buf_pool->rx_skb[skb_index];
314 
315 	/* checking for error */
316 	status = GET_VAL(LERR, le64_to_cpu(raw_desc->m0));
317 	if (unlikely(status > 2)) {
318 		dev_kfree_skb_any(skb);
319 		xgene_enet_parse_error(rx_ring, netdev_priv(rx_ring->ndev),
320 				       status);
321 		pdata->stats.rx_dropped++;
322 		ret = -EIO;
323 		goto out;
324 	}
325 
326 	/* strip off CRC as HW isn't doing this */
327 	datalen = GET_VAL(BUFDATALEN, le64_to_cpu(raw_desc->m1));
328 	datalen -= 4;
329 	prefetch(skb->data - NET_IP_ALIGN);
330 	skb_put(skb, datalen);
331 
332 	skb_checksum_none_assert(skb);
333 	skb->protocol = eth_type_trans(skb, ndev);
334 	if (likely((ndev->features & NETIF_F_IP_CSUM) &&
335 		   skb->protocol == htons(ETH_P_IP))) {
336 		xgene_enet_skip_csum(skb);
337 	}
338 
339 	pdata->stats.rx_packets++;
340 	pdata->stats.rx_bytes += datalen;
341 	napi_gro_receive(&rx_ring->napi, skb);
342 out:
343 	if (--rx_ring->nbufpool == 0) {
344 		ret = xgene_enet_refill_bufpool(buf_pool, NUM_BUFPOOL);
345 		rx_ring->nbufpool = NUM_BUFPOOL;
346 	}
347 
348 	return ret;
349 }
350 
351 static bool is_rx_desc(struct xgene_enet_raw_desc *raw_desc)
352 {
353 	return GET_VAL(FPQNUM, le64_to_cpu(raw_desc->m0)) ? true : false;
354 }
355 
356 static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring,
357 				   int budget)
358 {
359 	struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
360 	struct xgene_enet_raw_desc *raw_desc;
361 	u16 head = ring->head;
362 	u16 slots = ring->slots - 1;
363 	int ret, count = 0;
364 
365 	do {
366 		raw_desc = &ring->raw_desc[head];
367 		if (unlikely(xgene_enet_is_desc_slot_empty(raw_desc)))
368 			break;
369 
370 		if (is_rx_desc(raw_desc))
371 			ret = xgene_enet_rx_frame(ring, raw_desc);
372 		else
373 			ret = xgene_enet_tx_completion(ring, raw_desc);
374 		xgene_enet_mark_desc_slot_empty(raw_desc);
375 
376 		head = (head + 1) & slots;
377 		count++;
378 
379 		if (ret)
380 			break;
381 	} while (--budget);
382 
383 	if (likely(count)) {
384 		iowrite32(-count, ring->cmd);
385 		ring->head = head;
386 
387 		if (netif_queue_stopped(ring->ndev)) {
388 			if (xgene_enet_ring_len(ring) < pdata->cp_qcnt_low)
389 				netif_wake_queue(ring->ndev);
390 		}
391 	}
392 
393 	return budget;
394 }
395 
396 static int xgene_enet_napi(struct napi_struct *napi, const int budget)
397 {
398 	struct xgene_enet_desc_ring *ring;
399 	int processed;
400 
401 	ring = container_of(napi, struct xgene_enet_desc_ring, napi);
402 	processed = xgene_enet_process_ring(ring, budget);
403 
404 	if (processed != budget) {
405 		napi_complete(napi);
406 		enable_irq(ring->irq);
407 	}
408 
409 	return processed;
410 }
411 
412 static void xgene_enet_timeout(struct net_device *ndev)
413 {
414 	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
415 
416 	xgene_gmac_reset(pdata);
417 }
418 
419 static int xgene_enet_register_irq(struct net_device *ndev)
420 {
421 	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
422 	struct device *dev = ndev_to_dev(ndev);
423 	int ret;
424 
425 	ret = devm_request_irq(dev, pdata->rx_ring->irq, xgene_enet_rx_irq,
426 			       IRQF_SHARED, ndev->name, pdata->rx_ring);
427 	if (ret) {
428 		netdev_err(ndev, "rx%d interrupt request failed\n",
429 			   pdata->rx_ring->irq);
430 	}
431 
432 	return ret;
433 }
434 
435 static void xgene_enet_free_irq(struct net_device *ndev)
436 {
437 	struct xgene_enet_pdata *pdata;
438 	struct device *dev;
439 
440 	pdata = netdev_priv(ndev);
441 	dev = ndev_to_dev(ndev);
442 	devm_free_irq(dev, pdata->rx_ring->irq, pdata->rx_ring);
443 }
444 
445 static int xgene_enet_open(struct net_device *ndev)
446 {
447 	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
448 	int ret;
449 
450 	xgene_gmac_tx_enable(pdata);
451 	xgene_gmac_rx_enable(pdata);
452 
453 	ret = xgene_enet_register_irq(ndev);
454 	if (ret)
455 		return ret;
456 	napi_enable(&pdata->rx_ring->napi);
457 
458 	if (pdata->phy_dev)
459 		phy_start(pdata->phy_dev);
460 
461 	netif_start_queue(ndev);
462 
463 	return ret;
464 }
465 
466 static int xgene_enet_close(struct net_device *ndev)
467 {
468 	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
469 
470 	netif_stop_queue(ndev);
471 
472 	if (pdata->phy_dev)
473 		phy_stop(pdata->phy_dev);
474 
475 	napi_disable(&pdata->rx_ring->napi);
476 	xgene_enet_free_irq(ndev);
477 	xgene_enet_process_ring(pdata->rx_ring, -1);
478 
479 	xgene_gmac_tx_disable(pdata);
480 	xgene_gmac_rx_disable(pdata);
481 
482 	return 0;
483 }
484 
485 static void xgene_enet_delete_ring(struct xgene_enet_desc_ring *ring)
486 {
487 	struct xgene_enet_pdata *pdata;
488 	struct device *dev;
489 
490 	pdata = netdev_priv(ring->ndev);
491 	dev = ndev_to_dev(ring->ndev);
492 
493 	xgene_enet_clear_ring(ring);
494 	dma_free_coherent(dev, ring->size, ring->desc_addr, ring->dma);
495 }
496 
497 static void xgene_enet_delete_desc_rings(struct xgene_enet_pdata *pdata)
498 {
499 	struct xgene_enet_desc_ring *buf_pool;
500 
501 	if (pdata->tx_ring) {
502 		xgene_enet_delete_ring(pdata->tx_ring);
503 		pdata->tx_ring = NULL;
504 	}
505 
506 	if (pdata->rx_ring) {
507 		buf_pool = pdata->rx_ring->buf_pool;
508 		xgene_enet_delete_bufpool(buf_pool);
509 		xgene_enet_delete_ring(buf_pool);
510 		xgene_enet_delete_ring(pdata->rx_ring);
511 		pdata->rx_ring = NULL;
512 	}
513 }
514 
515 static int xgene_enet_get_ring_size(struct device *dev,
516 				    enum xgene_enet_ring_cfgsize cfgsize)
517 {
518 	int size = -EINVAL;
519 
520 	switch (cfgsize) {
521 	case RING_CFGSIZE_512B:
522 		size = 0x200;
523 		break;
524 	case RING_CFGSIZE_2KB:
525 		size = 0x800;
526 		break;
527 	case RING_CFGSIZE_16KB:
528 		size = 0x4000;
529 		break;
530 	case RING_CFGSIZE_64KB:
531 		size = 0x10000;
532 		break;
533 	case RING_CFGSIZE_512KB:
534 		size = 0x80000;
535 		break;
536 	default:
537 		dev_err(dev, "Unsupported cfg ring size %d\n", cfgsize);
538 		break;
539 	}
540 
541 	return size;
542 }
543 
544 static void xgene_enet_free_desc_ring(struct xgene_enet_desc_ring *ring)
545 {
546 	struct device *dev;
547 
548 	if (!ring)
549 		return;
550 
551 	dev = ndev_to_dev(ring->ndev);
552 
553 	if (ring->desc_addr) {
554 		xgene_enet_clear_ring(ring);
555 		dma_free_coherent(dev, ring->size, ring->desc_addr, ring->dma);
556 	}
557 	devm_kfree(dev, ring);
558 }
559 
560 static void xgene_enet_free_desc_rings(struct xgene_enet_pdata *pdata)
561 {
562 	struct device *dev = &pdata->pdev->dev;
563 	struct xgene_enet_desc_ring *ring;
564 
565 	ring = pdata->tx_ring;
566 	if (ring) {
567 		if (ring->cp_ring && ring->cp_ring->cp_skb)
568 			devm_kfree(dev, ring->cp_ring->cp_skb);
569 		xgene_enet_free_desc_ring(ring);
570 	}
571 
572 	ring = pdata->rx_ring;
573 	if (ring) {
574 		if (ring->buf_pool) {
575 			if (ring->buf_pool->rx_skb)
576 				devm_kfree(dev, ring->buf_pool->rx_skb);
577 			xgene_enet_free_desc_ring(ring->buf_pool);
578 		}
579 		xgene_enet_free_desc_ring(ring);
580 	}
581 }
582 
583 static struct xgene_enet_desc_ring *xgene_enet_create_desc_ring(
584 			struct net_device *ndev, u32 ring_num,
585 			enum xgene_enet_ring_cfgsize cfgsize, u32 ring_id)
586 {
587 	struct xgene_enet_desc_ring *ring;
588 	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
589 	struct device *dev = ndev_to_dev(ndev);
590 	int size;
591 
592 	size = xgene_enet_get_ring_size(dev, cfgsize);
593 	if (size < 0)
594 		return NULL;
595 
596 	ring = devm_kzalloc(dev, sizeof(struct xgene_enet_desc_ring),
597 			    GFP_KERNEL);
598 	if (!ring)
599 		return NULL;
600 
601 	ring->ndev = ndev;
602 	ring->num = ring_num;
603 	ring->cfgsize = cfgsize;
604 	ring->id = ring_id;
605 
606 	ring->desc_addr = dma_zalloc_coherent(dev, size, &ring->dma,
607 					      GFP_KERNEL);
608 	if (!ring->desc_addr) {
609 		devm_kfree(dev, ring);
610 		return NULL;
611 	}
612 	ring->size = size;
613 
614 	ring->cmd_base = pdata->ring_cmd_addr + (ring->num << 6);
615 	ring->cmd = ring->cmd_base + INC_DEC_CMD_ADDR;
616 	pdata->rm = RM3;
617 	ring = xgene_enet_setup_ring(ring);
618 	netdev_dbg(ndev, "ring info: num=%d  size=%d  id=%d  slots=%d\n",
619 		   ring->num, ring->size, ring->id, ring->slots);
620 
621 	return ring;
622 }
623 
624 static u16 xgene_enet_get_ring_id(enum xgene_ring_owner owner, u8 bufnum)
625 {
626 	return (owner << 6) | (bufnum & GENMASK(5, 0));
627 }
628 
629 static int xgene_enet_create_desc_rings(struct net_device *ndev)
630 {
631 	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
632 	struct device *dev = ndev_to_dev(ndev);
633 	struct xgene_enet_desc_ring *rx_ring, *tx_ring, *cp_ring;
634 	struct xgene_enet_desc_ring *buf_pool = NULL;
635 	u8 cpu_bufnum = 0, eth_bufnum = 0;
636 	u8 bp_bufnum = 0x20;
637 	u16 ring_id, ring_num = 0;
638 	int ret;
639 
640 	/* allocate rx descriptor ring */
641 	ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU, cpu_bufnum++);
642 	rx_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
643 					      RING_CFGSIZE_16KB, ring_id);
644 	if (!rx_ring) {
645 		ret = -ENOMEM;
646 		goto err;
647 	}
648 
649 	/* allocate buffer pool for receiving packets */
650 	ring_id = xgene_enet_get_ring_id(RING_OWNER_ETH0, bp_bufnum++);
651 	buf_pool = xgene_enet_create_desc_ring(ndev, ring_num++,
652 					       RING_CFGSIZE_2KB, ring_id);
653 	if (!buf_pool) {
654 		ret = -ENOMEM;
655 		goto err;
656 	}
657 
658 	rx_ring->nbufpool = NUM_BUFPOOL;
659 	rx_ring->buf_pool = buf_pool;
660 	rx_ring->irq = pdata->rx_irq;
661 	buf_pool->rx_skb = devm_kcalloc(dev, buf_pool->slots,
662 					sizeof(struct sk_buff *), GFP_KERNEL);
663 	if (!buf_pool->rx_skb) {
664 		ret = -ENOMEM;
665 		goto err;
666 	}
667 
668 	buf_pool->dst_ring_num = xgene_enet_dst_ring_num(buf_pool);
669 	rx_ring->buf_pool = buf_pool;
670 	pdata->rx_ring = rx_ring;
671 
672 	/* allocate tx descriptor ring */
673 	ring_id = xgene_enet_get_ring_id(RING_OWNER_ETH0, eth_bufnum++);
674 	tx_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
675 					      RING_CFGSIZE_16KB, ring_id);
676 	if (!tx_ring) {
677 		ret = -ENOMEM;
678 		goto err;
679 	}
680 	pdata->tx_ring = tx_ring;
681 
682 	cp_ring = pdata->rx_ring;
683 	cp_ring->cp_skb = devm_kcalloc(dev, tx_ring->slots,
684 				       sizeof(struct sk_buff *), GFP_KERNEL);
685 	if (!cp_ring->cp_skb) {
686 		ret = -ENOMEM;
687 		goto err;
688 	}
689 	pdata->tx_ring->cp_ring = cp_ring;
690 	pdata->tx_ring->dst_ring_num = xgene_enet_dst_ring_num(cp_ring);
691 
692 	pdata->tx_qcnt_hi = pdata->tx_ring->slots / 2;
693 	pdata->cp_qcnt_hi = pdata->rx_ring->slots / 2;
694 	pdata->cp_qcnt_low = pdata->cp_qcnt_hi / 2;
695 
696 	return 0;
697 
698 err:
699 	xgene_enet_free_desc_rings(pdata);
700 	return ret;
701 }
702 
703 static struct rtnl_link_stats64 *xgene_enet_get_stats64(
704 			struct net_device *ndev,
705 			struct rtnl_link_stats64 *storage)
706 {
707 	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
708 	struct rtnl_link_stats64 *stats = &pdata->stats;
709 
710 	stats->rx_errors += stats->rx_length_errors +
711 			    stats->rx_crc_errors +
712 			    stats->rx_frame_errors +
713 			    stats->rx_fifo_errors;
714 	memcpy(storage, &pdata->stats, sizeof(struct rtnl_link_stats64));
715 
716 	return storage;
717 }
718 
719 static int xgene_enet_set_mac_address(struct net_device *ndev, void *addr)
720 {
721 	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
722 	int ret;
723 
724 	ret = eth_mac_addr(ndev, addr);
725 	if (ret)
726 		return ret;
727 	xgene_gmac_set_mac_addr(pdata);
728 
729 	return ret;
730 }
731 
732 static const struct net_device_ops xgene_ndev_ops = {
733 	.ndo_open = xgene_enet_open,
734 	.ndo_stop = xgene_enet_close,
735 	.ndo_start_xmit = xgene_enet_start_xmit,
736 	.ndo_tx_timeout = xgene_enet_timeout,
737 	.ndo_get_stats64 = xgene_enet_get_stats64,
738 	.ndo_change_mtu = eth_change_mtu,
739 	.ndo_set_mac_address = xgene_enet_set_mac_address,
740 };
741 
742 static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
743 {
744 	struct platform_device *pdev;
745 	struct net_device *ndev;
746 	struct device *dev;
747 	struct resource *res;
748 	void __iomem *base_addr;
749 	const char *mac;
750 	int ret;
751 
752 	pdev = pdata->pdev;
753 	dev = &pdev->dev;
754 	ndev = pdata->ndev;
755 
756 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "enet_csr");
757 	if (!res) {
758 		dev_err(dev, "Resource enet_csr not defined\n");
759 		return -ENODEV;
760 	}
761 	pdata->base_addr = devm_ioremap_resource(dev, res);
762 	if (IS_ERR(pdata->base_addr)) {
763 		dev_err(dev, "Unable to retrieve ENET Port CSR region\n");
764 		return PTR_ERR(pdata->base_addr);
765 	}
766 
767 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ring_csr");
768 	if (!res) {
769 		dev_err(dev, "Resource ring_csr not defined\n");
770 		return -ENODEV;
771 	}
772 	pdata->ring_csr_addr = devm_ioremap_resource(dev, res);
773 	if (IS_ERR(pdata->ring_csr_addr)) {
774 		dev_err(dev, "Unable to retrieve ENET Ring CSR region\n");
775 		return PTR_ERR(pdata->ring_csr_addr);
776 	}
777 
778 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ring_cmd");
779 	if (!res) {
780 		dev_err(dev, "Resource ring_cmd not defined\n");
781 		return -ENODEV;
782 	}
783 	pdata->ring_cmd_addr = devm_ioremap_resource(dev, res);
784 	if (IS_ERR(pdata->ring_cmd_addr)) {
785 		dev_err(dev, "Unable to retrieve ENET Ring command region\n");
786 		return PTR_ERR(pdata->ring_cmd_addr);
787 	}
788 
789 	ret = platform_get_irq(pdev, 0);
790 	if (ret <= 0) {
791 		dev_err(dev, "Unable to get ENET Rx IRQ\n");
792 		ret = ret ? : -ENXIO;
793 		return ret;
794 	}
795 	pdata->rx_irq = ret;
796 
797 	mac = of_get_mac_address(dev->of_node);
798 	if (mac)
799 		memcpy(ndev->dev_addr, mac, ndev->addr_len);
800 	else
801 		eth_hw_addr_random(ndev);
802 	memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
803 
804 	pdata->phy_mode = of_get_phy_mode(pdev->dev.of_node);
805 	if (pdata->phy_mode < 0) {
806 		dev_err(dev, "Incorrect phy-connection-type in DTS\n");
807 		return -EINVAL;
808 	}
809 
810 	pdata->clk = devm_clk_get(&pdev->dev, NULL);
811 	ret = IS_ERR(pdata->clk);
812 	if (IS_ERR(pdata->clk)) {
813 		dev_err(&pdev->dev, "can't get clock\n");
814 		ret = PTR_ERR(pdata->clk);
815 		return ret;
816 	}
817 
818 	base_addr = pdata->base_addr;
819 	pdata->eth_csr_addr = base_addr + BLOCK_ETH_CSR_OFFSET;
820 	pdata->eth_ring_if_addr = base_addr + BLOCK_ETH_RING_IF_OFFSET;
821 	pdata->eth_diag_csr_addr = base_addr + BLOCK_ETH_DIAG_CSR_OFFSET;
822 	pdata->mcx_mac_addr = base_addr + BLOCK_ETH_MAC_OFFSET;
823 	pdata->mcx_stats_addr = base_addr + BLOCK_ETH_STATS_OFFSET;
824 	pdata->mcx_mac_csr_addr = base_addr + BLOCK_ETH_MAC_CSR_OFFSET;
825 	pdata->rx_buff_cnt = NUM_PKT_BUF;
826 
827 	return ret;
828 }
829 
830 static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata)
831 {
832 	struct net_device *ndev = pdata->ndev;
833 	struct xgene_enet_desc_ring *buf_pool;
834 	u16 dst_ring_num;
835 	int ret;
836 
837 	xgene_gmac_tx_disable(pdata);
838 	xgene_gmac_rx_disable(pdata);
839 
840 	ret = xgene_enet_create_desc_rings(ndev);
841 	if (ret) {
842 		netdev_err(ndev, "Error in ring configuration\n");
843 		return ret;
844 	}
845 
846 	/* setup buffer pool */
847 	buf_pool = pdata->rx_ring->buf_pool;
848 	xgene_enet_init_bufpool(buf_pool);
849 	ret = xgene_enet_refill_bufpool(buf_pool, pdata->rx_buff_cnt);
850 	if (ret) {
851 		xgene_enet_delete_desc_rings(pdata);
852 		return ret;
853 	}
854 
855 	dst_ring_num = xgene_enet_dst_ring_num(pdata->rx_ring);
856 	xgene_enet_cle_bypass(pdata, dst_ring_num, buf_pool->id);
857 
858 	return ret;
859 }
860 
861 static int xgene_enet_probe(struct platform_device *pdev)
862 {
863 	struct net_device *ndev;
864 	struct xgene_enet_pdata *pdata;
865 	struct device *dev = &pdev->dev;
866 	struct napi_struct *napi;
867 	int ret;
868 
869 	ndev = alloc_etherdev(sizeof(struct xgene_enet_pdata));
870 	if (!ndev)
871 		return -ENOMEM;
872 
873 	pdata = netdev_priv(ndev);
874 
875 	pdata->pdev = pdev;
876 	pdata->ndev = ndev;
877 	SET_NETDEV_DEV(ndev, dev);
878 	platform_set_drvdata(pdev, pdata);
879 	ndev->netdev_ops = &xgene_ndev_ops;
880 	xgene_enet_set_ethtool_ops(ndev);
881 	ndev->features |= NETIF_F_IP_CSUM |
882 			  NETIF_F_GSO |
883 			  NETIF_F_GRO;
884 
885 	ret = xgene_enet_get_resources(pdata);
886 	if (ret)
887 		goto err;
888 
889 	xgene_enet_reset(pdata);
890 	xgene_gmac_init(pdata, SPEED_1000);
891 
892 	ret = register_netdev(ndev);
893 	if (ret) {
894 		netdev_err(ndev, "Failed to register netdev\n");
895 		goto err;
896 	}
897 
898 	ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
899 	if (ret) {
900 		netdev_err(ndev, "No usable DMA configuration\n");
901 		goto err;
902 	}
903 
904 	ret = xgene_enet_init_hw(pdata);
905 	if (ret)
906 		goto err;
907 
908 	napi = &pdata->rx_ring->napi;
909 	netif_napi_add(ndev, napi, xgene_enet_napi, NAPI_POLL_WEIGHT);
910 	ret = xgene_enet_mdio_config(pdata);
911 
912 	return ret;
913 err:
914 	free_netdev(ndev);
915 	return ret;
916 }
917 
918 static int xgene_enet_remove(struct platform_device *pdev)
919 {
920 	struct xgene_enet_pdata *pdata;
921 	struct net_device *ndev;
922 
923 	pdata = platform_get_drvdata(pdev);
924 	ndev = pdata->ndev;
925 
926 	xgene_gmac_rx_disable(pdata);
927 	xgene_gmac_tx_disable(pdata);
928 
929 	netif_napi_del(&pdata->rx_ring->napi);
930 	xgene_enet_mdio_remove(pdata);
931 	xgene_enet_delete_desc_rings(pdata);
932 	unregister_netdev(ndev);
933 	xgene_gport_shutdown(pdata);
934 	free_netdev(ndev);
935 
936 	return 0;
937 }
938 
939 static struct of_device_id xgene_enet_match[] = {
940 	{.compatible = "apm,xgene-enet",},
941 	{},
942 };
943 
944 MODULE_DEVICE_TABLE(of, xgene_enet_match);
945 
946 static struct platform_driver xgene_enet_driver = {
947 	.driver = {
948 		   .name = "xgene-enet",
949 		   .of_match_table = xgene_enet_match,
950 	},
951 	.probe = xgene_enet_probe,
952 	.remove = xgene_enet_remove,
953 };
954 
955 module_platform_driver(xgene_enet_driver);
956 
957 MODULE_DESCRIPTION("APM X-Gene SoC Ethernet driver");
958 MODULE_VERSION(XGENE_DRV_VERSION);
959 MODULE_AUTHOR("Keyur Chudgar <kchudgar@apm.com>");
960 MODULE_LICENSE("GPL");
961