1 /* Applied Micro X-Gene SoC Ethernet Driver
2  *
3  * Copyright (c) 2014, Applied Micro Circuits Corporation
4  * Authors: Iyappan Subramanian <isubramanian@apm.com>
5  *	    Ravi Patel <rapatel@apm.com>
6  *	    Keyur Chudgar <kchudgar@apm.com>
7  *
8  * This program is free software; you can redistribute  it and/or modify it
9  * under  the terms of  the GNU General  Public License as published by the
10  * Free Software Foundation;  either version 2 of the  License, or (at your
11  * option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
20  */
21 
22 #include "xgene_enet_main.h"
23 #include "xgene_enet_hw.h"
24 
25 static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring *buf_pool)
26 {
27 	struct xgene_enet_raw_desc16 *raw_desc;
28 	int i;
29 
30 	for (i = 0; i < buf_pool->slots; i++) {
31 		raw_desc = &buf_pool->raw_desc16[i];
32 
33 		/* Hardware expects descriptor in little endian format */
34 		raw_desc->m0 = cpu_to_le64(i |
35 				SET_VAL(FPQNUM, buf_pool->dst_ring_num) |
36 				SET_VAL(STASH, 3));
37 	}
38 }
39 
40 static int xgene_enet_refill_bufpool(struct xgene_enet_desc_ring *buf_pool,
41 				     u32 nbuf)
42 {
43 	struct sk_buff *skb;
44 	struct xgene_enet_raw_desc16 *raw_desc;
45 	struct net_device *ndev;
46 	struct device *dev;
47 	dma_addr_t dma_addr;
48 	u32 tail = buf_pool->tail;
49 	u32 slots = buf_pool->slots - 1;
50 	u16 bufdatalen, len;
51 	int i;
52 
53 	ndev = buf_pool->ndev;
54 	dev = ndev_to_dev(buf_pool->ndev);
55 	bufdatalen = BUF_LEN_CODE_2K | (SKB_BUFFER_SIZE & GENMASK(11, 0));
56 	len = XGENE_ENET_MAX_MTU;
57 
58 	for (i = 0; i < nbuf; i++) {
59 		raw_desc = &buf_pool->raw_desc16[tail];
60 
61 		skb = netdev_alloc_skb_ip_align(ndev, len);
62 		if (unlikely(!skb))
63 			return -ENOMEM;
64 		buf_pool->rx_skb[tail] = skb;
65 
66 		dma_addr = dma_map_single(dev, skb->data, len, DMA_FROM_DEVICE);
67 		if (dma_mapping_error(dev, dma_addr)) {
68 			netdev_err(ndev, "DMA mapping error\n");
69 			dev_kfree_skb_any(skb);
70 			return -EINVAL;
71 		}
72 
73 		raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
74 					   SET_VAL(BUFDATALEN, bufdatalen) |
75 					   SET_BIT(COHERENT));
76 		tail = (tail + 1) & slots;
77 	}
78 
79 	iowrite32(nbuf, buf_pool->cmd);
80 	buf_pool->tail = tail;
81 
82 	return 0;
83 }
84 
85 static u16 xgene_enet_dst_ring_num(struct xgene_enet_desc_ring *ring)
86 {
87 	struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
88 
89 	return ((u16)pdata->rm << 10) | ring->num;
90 }
91 
92 static u8 xgene_enet_hdr_len(const void *data)
93 {
94 	const struct ethhdr *eth = data;
95 
96 	return (eth->h_proto == htons(ETH_P_8021Q)) ? VLAN_ETH_HLEN : ETH_HLEN;
97 }
98 
99 static u32 xgene_enet_ring_len(struct xgene_enet_desc_ring *ring)
100 {
101 	u32 __iomem *cmd_base = ring->cmd_base;
102 	u32 ring_state, num_msgs;
103 
104 	ring_state = ioread32(&cmd_base[1]);
105 	num_msgs = ring_state & CREATE_MASK(NUMMSGSINQ_POS, NUMMSGSINQ_LEN);
106 
107 	return num_msgs >> NUMMSGSINQ_POS;
108 }
109 
110 static void xgene_enet_delete_bufpool(struct xgene_enet_desc_ring *buf_pool)
111 {
112 	struct xgene_enet_raw_desc16 *raw_desc;
113 	u32 slots = buf_pool->slots - 1;
114 	u32 tail = buf_pool->tail;
115 	u32 userinfo;
116 	int i, len;
117 
118 	len = xgene_enet_ring_len(buf_pool);
119 	for (i = 0; i < len; i++) {
120 		tail = (tail - 1) & slots;
121 		raw_desc = &buf_pool->raw_desc16[tail];
122 
123 		/* Hardware stores descriptor in little endian format */
124 		userinfo = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
125 		dev_kfree_skb_any(buf_pool->rx_skb[userinfo]);
126 	}
127 
128 	iowrite32(-len, buf_pool->cmd);
129 	buf_pool->tail = tail;
130 }
131 
132 static irqreturn_t xgene_enet_rx_irq(const int irq, void *data)
133 {
134 	struct xgene_enet_desc_ring *rx_ring = data;
135 
136 	if (napi_schedule_prep(&rx_ring->napi)) {
137 		disable_irq_nosync(irq);
138 		__napi_schedule(&rx_ring->napi);
139 	}
140 
141 	return IRQ_HANDLED;
142 }
143 
144 static int xgene_enet_tx_completion(struct xgene_enet_desc_ring *cp_ring,
145 				    struct xgene_enet_raw_desc *raw_desc)
146 {
147 	struct sk_buff *skb;
148 	struct device *dev;
149 	u16 skb_index;
150 	u8 status;
151 	int ret = 0;
152 
153 	skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
154 	skb = cp_ring->cp_skb[skb_index];
155 
156 	dev = ndev_to_dev(cp_ring->ndev);
157 	dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)),
158 			 GET_VAL(BUFDATALEN, le64_to_cpu(raw_desc->m1)),
159 			 DMA_TO_DEVICE);
160 
161 	/* Checking for error */
162 	status = GET_VAL(LERR, le64_to_cpu(raw_desc->m0));
163 	if (unlikely(status > 2)) {
164 		xgene_enet_parse_error(cp_ring, netdev_priv(cp_ring->ndev),
165 				       status);
166 		ret = -EIO;
167 	}
168 
169 	if (likely(skb)) {
170 		dev_kfree_skb_any(skb);
171 	} else {
172 		netdev_err(cp_ring->ndev, "completion skb is NULL\n");
173 		ret = -EIO;
174 	}
175 
176 	return ret;
177 }
178 
179 static u64 xgene_enet_work_msg(struct sk_buff *skb)
180 {
181 	struct iphdr *iph;
182 	u8 l3hlen, l4hlen = 0;
183 	u8 csum_enable = 0;
184 	u8 proto = 0;
185 	u8 ethhdr;
186 	u64 hopinfo;
187 
188 	if (unlikely(skb->protocol != htons(ETH_P_IP)) &&
189 	    unlikely(skb->protocol != htons(ETH_P_8021Q)))
190 		goto out;
191 
192 	if (unlikely(!(skb->dev->features & NETIF_F_IP_CSUM)))
193 		goto out;
194 
195 	iph = ip_hdr(skb);
196 	if (unlikely(ip_is_fragment(iph)))
197 		goto out;
198 
199 	if (likely(iph->protocol == IPPROTO_TCP)) {
200 		l4hlen = tcp_hdrlen(skb) >> 2;
201 		csum_enable = 1;
202 		proto = TSO_IPPROTO_TCP;
203 	} else if (iph->protocol == IPPROTO_UDP) {
204 		l4hlen = UDP_HDR_SIZE;
205 		csum_enable = 1;
206 	}
207 out:
208 	l3hlen = ip_hdrlen(skb) >> 2;
209 	ethhdr = xgene_enet_hdr_len(skb->data);
210 	hopinfo = SET_VAL(TCPHDR, l4hlen) |
211 		  SET_VAL(IPHDR, l3hlen) |
212 		  SET_VAL(ETHHDR, ethhdr) |
213 		  SET_VAL(EC, csum_enable) |
214 		  SET_VAL(IS, proto) |
215 		  SET_BIT(IC) |
216 		  SET_BIT(TYPE_ETH_WORK_MESSAGE);
217 
218 	return hopinfo;
219 }
220 
221 static int xgene_enet_setup_tx_desc(struct xgene_enet_desc_ring *tx_ring,
222 				    struct sk_buff *skb)
223 {
224 	struct device *dev = ndev_to_dev(tx_ring->ndev);
225 	struct xgene_enet_raw_desc *raw_desc;
226 	dma_addr_t dma_addr;
227 	u16 tail = tx_ring->tail;
228 	u64 hopinfo;
229 
230 	raw_desc = &tx_ring->raw_desc[tail];
231 	memset(raw_desc, 0, sizeof(struct xgene_enet_raw_desc));
232 
233 	dma_addr = dma_map_single(dev, skb->data, skb->len, DMA_TO_DEVICE);
234 	if (dma_mapping_error(dev, dma_addr)) {
235 		netdev_err(tx_ring->ndev, "DMA mapping error\n");
236 		return -EINVAL;
237 	}
238 
239 	/* Hardware expects descriptor in little endian format */
240 	raw_desc->m0 = cpu_to_le64(tail);
241 	raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
242 				   SET_VAL(BUFDATALEN, skb->len) |
243 				   SET_BIT(COHERENT));
244 	hopinfo = xgene_enet_work_msg(skb);
245 	raw_desc->m3 = cpu_to_le64(SET_VAL(HENQNUM, tx_ring->dst_ring_num) |
246 				   hopinfo);
247 	tx_ring->cp_ring->cp_skb[tail] = skb;
248 
249 	return 0;
250 }
251 
252 static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb,
253 					 struct net_device *ndev)
254 {
255 	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
256 	struct xgene_enet_desc_ring *tx_ring = pdata->tx_ring;
257 	struct xgene_enet_desc_ring *cp_ring = tx_ring->cp_ring;
258 	u32 tx_level, cq_level;
259 
260 	tx_level = xgene_enet_ring_len(tx_ring);
261 	cq_level = xgene_enet_ring_len(cp_ring);
262 	if (unlikely(tx_level > pdata->tx_qcnt_hi ||
263 		     cq_level > pdata->cp_qcnt_hi)) {
264 		netif_stop_queue(ndev);
265 		return NETDEV_TX_BUSY;
266 	}
267 
268 	if (xgene_enet_setup_tx_desc(tx_ring, skb)) {
269 		dev_kfree_skb_any(skb);
270 		return NETDEV_TX_OK;
271 	}
272 
273 	iowrite32(1, tx_ring->cmd);
274 	skb_tx_timestamp(skb);
275 	tx_ring->tail = (tx_ring->tail + 1) & (tx_ring->slots - 1);
276 
277 	pdata->stats.tx_packets++;
278 	pdata->stats.tx_bytes += skb->len;
279 
280 	return NETDEV_TX_OK;
281 }
282 
283 static void xgene_enet_skip_csum(struct sk_buff *skb)
284 {
285 	struct iphdr *iph = ip_hdr(skb);
286 
287 	if (!ip_is_fragment(iph) ||
288 	    (iph->protocol != IPPROTO_TCP && iph->protocol != IPPROTO_UDP)) {
289 		skb->ip_summed = CHECKSUM_UNNECESSARY;
290 	}
291 }
292 
293 static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring,
294 			       struct xgene_enet_raw_desc *raw_desc)
295 {
296 	struct net_device *ndev;
297 	struct xgene_enet_pdata *pdata;
298 	struct device *dev;
299 	struct xgene_enet_desc_ring *buf_pool;
300 	u32 datalen, skb_index;
301 	struct sk_buff *skb;
302 	u8 status;
303 	int ret = 0;
304 
305 	ndev = rx_ring->ndev;
306 	pdata = netdev_priv(ndev);
307 	dev = ndev_to_dev(rx_ring->ndev);
308 	buf_pool = rx_ring->buf_pool;
309 
310 	dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)),
311 			 XGENE_ENET_MAX_MTU, DMA_FROM_DEVICE);
312 	skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
313 	skb = buf_pool->rx_skb[skb_index];
314 
315 	/* checking for error */
316 	status = GET_VAL(LERR, le64_to_cpu(raw_desc->m0));
317 	if (unlikely(status > 2)) {
318 		dev_kfree_skb_any(skb);
319 		xgene_enet_parse_error(rx_ring, netdev_priv(rx_ring->ndev),
320 				       status);
321 		pdata->stats.rx_dropped++;
322 		ret = -EIO;
323 		goto out;
324 	}
325 
326 	/* strip off CRC as HW isn't doing this */
327 	datalen = GET_VAL(BUFDATALEN, le64_to_cpu(raw_desc->m1));
328 	datalen -= 4;
329 	prefetch(skb->data - NET_IP_ALIGN);
330 	skb_put(skb, datalen);
331 
332 	skb_checksum_none_assert(skb);
333 	skb->protocol = eth_type_trans(skb, ndev);
334 	if (likely((ndev->features & NETIF_F_IP_CSUM) &&
335 		   skb->protocol == htons(ETH_P_IP))) {
336 		xgene_enet_skip_csum(skb);
337 	}
338 
339 	pdata->stats.rx_packets++;
340 	pdata->stats.rx_bytes += datalen;
341 	napi_gro_receive(&rx_ring->napi, skb);
342 out:
343 	if (--rx_ring->nbufpool == 0) {
344 		ret = xgene_enet_refill_bufpool(buf_pool, NUM_BUFPOOL);
345 		rx_ring->nbufpool = NUM_BUFPOOL;
346 	}
347 
348 	return ret;
349 }
350 
351 static bool is_rx_desc(struct xgene_enet_raw_desc *raw_desc)
352 {
353 	return GET_VAL(FPQNUM, le64_to_cpu(raw_desc->m0)) ? true : false;
354 }
355 
356 static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring,
357 				   int budget)
358 {
359 	struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
360 	struct xgene_enet_raw_desc *raw_desc;
361 	u16 head = ring->head;
362 	u16 slots = ring->slots - 1;
363 	int ret, count = 0;
364 
365 	do {
366 		raw_desc = &ring->raw_desc[head];
367 		if (unlikely(xgene_enet_is_desc_slot_empty(raw_desc)))
368 			break;
369 
370 		if (is_rx_desc(raw_desc))
371 			ret = xgene_enet_rx_frame(ring, raw_desc);
372 		else
373 			ret = xgene_enet_tx_completion(ring, raw_desc);
374 		xgene_enet_mark_desc_slot_empty(raw_desc);
375 
376 		head = (head + 1) & slots;
377 		count++;
378 
379 		if (ret)
380 			break;
381 	} while (--budget);
382 
383 	if (likely(count)) {
384 		iowrite32(-count, ring->cmd);
385 		ring->head = head;
386 
387 		if (netif_queue_stopped(ring->ndev)) {
388 			if (xgene_enet_ring_len(ring) < pdata->cp_qcnt_low)
389 				netif_wake_queue(ring->ndev);
390 		}
391 	}
392 
393 	return budget;
394 }
395 
396 static int xgene_enet_napi(struct napi_struct *napi, const int budget)
397 {
398 	struct xgene_enet_desc_ring *ring;
399 	int processed;
400 
401 	ring = container_of(napi, struct xgene_enet_desc_ring, napi);
402 	processed = xgene_enet_process_ring(ring, budget);
403 
404 	if (processed != budget) {
405 		napi_complete(napi);
406 		enable_irq(ring->irq);
407 	}
408 
409 	return processed;
410 }
411 
412 static void xgene_enet_timeout(struct net_device *ndev)
413 {
414 	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
415 
416 	xgene_gmac_reset(pdata);
417 }
418 
419 static int xgene_enet_register_irq(struct net_device *ndev)
420 {
421 	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
422 	struct device *dev = ndev_to_dev(ndev);
423 	int ret;
424 
425 	ret = devm_request_irq(dev, pdata->rx_ring->irq, xgene_enet_rx_irq,
426 			       IRQF_SHARED, ndev->name, pdata->rx_ring);
427 	if (ret) {
428 		netdev_err(ndev, "rx%d interrupt request failed\n",
429 			   pdata->rx_ring->irq);
430 	}
431 
432 	return ret;
433 }
434 
435 static void xgene_enet_free_irq(struct net_device *ndev)
436 {
437 	struct xgene_enet_pdata *pdata;
438 	struct device *dev;
439 
440 	pdata = netdev_priv(ndev);
441 	dev = ndev_to_dev(ndev);
442 	devm_free_irq(dev, pdata->rx_ring->irq, pdata->rx_ring);
443 }
444 
445 static int xgene_enet_open(struct net_device *ndev)
446 {
447 	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
448 	int ret;
449 
450 	xgene_gmac_tx_enable(pdata);
451 	xgene_gmac_rx_enable(pdata);
452 
453 	ret = xgene_enet_register_irq(ndev);
454 	if (ret)
455 		return ret;
456 	napi_enable(&pdata->rx_ring->napi);
457 
458 	if (pdata->phy_dev)
459 		phy_start(pdata->phy_dev);
460 
461 	netif_start_queue(ndev);
462 
463 	return ret;
464 }
465 
466 static int xgene_enet_close(struct net_device *ndev)
467 {
468 	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
469 
470 	netif_stop_queue(ndev);
471 
472 	if (pdata->phy_dev)
473 		phy_stop(pdata->phy_dev);
474 
475 	napi_disable(&pdata->rx_ring->napi);
476 	xgene_enet_free_irq(ndev);
477 	xgene_enet_process_ring(pdata->rx_ring, -1);
478 
479 	xgene_gmac_tx_disable(pdata);
480 	xgene_gmac_rx_disable(pdata);
481 
482 	return 0;
483 }
484 
485 static void xgene_enet_delete_ring(struct xgene_enet_desc_ring *ring)
486 {
487 	struct xgene_enet_pdata *pdata;
488 	struct device *dev;
489 
490 	pdata = netdev_priv(ring->ndev);
491 	dev = ndev_to_dev(ring->ndev);
492 
493 	xgene_enet_clear_ring(ring);
494 	dma_free_coherent(dev, ring->size, ring->desc_addr, ring->dma);
495 }
496 
497 static void xgene_enet_delete_desc_rings(struct xgene_enet_pdata *pdata)
498 {
499 	struct xgene_enet_desc_ring *buf_pool;
500 
501 	if (pdata->tx_ring) {
502 		xgene_enet_delete_ring(pdata->tx_ring);
503 		pdata->tx_ring = NULL;
504 	}
505 
506 	if (pdata->rx_ring) {
507 		buf_pool = pdata->rx_ring->buf_pool;
508 		xgene_enet_delete_bufpool(buf_pool);
509 		xgene_enet_delete_ring(buf_pool);
510 		xgene_enet_delete_ring(pdata->rx_ring);
511 		pdata->rx_ring = NULL;
512 	}
513 }
514 
515 static int xgene_enet_get_ring_size(struct device *dev,
516 				    enum xgene_enet_ring_cfgsize cfgsize)
517 {
518 	int size = -EINVAL;
519 
520 	switch (cfgsize) {
521 	case RING_CFGSIZE_512B:
522 		size = 0x200;
523 		break;
524 	case RING_CFGSIZE_2KB:
525 		size = 0x800;
526 		break;
527 	case RING_CFGSIZE_16KB:
528 		size = 0x4000;
529 		break;
530 	case RING_CFGSIZE_64KB:
531 		size = 0x10000;
532 		break;
533 	case RING_CFGSIZE_512KB:
534 		size = 0x80000;
535 		break;
536 	default:
537 		dev_err(dev, "Unsupported cfg ring size %d\n", cfgsize);
538 		break;
539 	}
540 
541 	return size;
542 }
543 
544 static void xgene_enet_free_desc_ring(struct xgene_enet_desc_ring *ring)
545 {
546 	struct device *dev;
547 
548 	if (!ring)
549 		return;
550 
551 	dev = ndev_to_dev(ring->ndev);
552 
553 	if (ring->desc_addr) {
554 		xgene_enet_clear_ring(ring);
555 		dma_free_coherent(dev, ring->size, ring->desc_addr, ring->dma);
556 	}
557 	devm_kfree(dev, ring);
558 }
559 
560 static void xgene_enet_free_desc_rings(struct xgene_enet_pdata *pdata)
561 {
562 	struct device *dev = &pdata->pdev->dev;
563 	struct xgene_enet_desc_ring *ring;
564 
565 	ring = pdata->tx_ring;
566 	if (ring && ring->cp_ring && ring->cp_ring->cp_skb)
567 		devm_kfree(dev, ring->cp_ring->cp_skb);
568 	xgene_enet_free_desc_ring(ring);
569 
570 	ring = pdata->rx_ring;
571 	if (ring && ring->buf_pool && ring->buf_pool->rx_skb)
572 		devm_kfree(dev, ring->buf_pool->rx_skb);
573 	xgene_enet_free_desc_ring(ring->buf_pool);
574 	xgene_enet_free_desc_ring(ring);
575 }
576 
577 static struct xgene_enet_desc_ring *xgene_enet_create_desc_ring(
578 			struct net_device *ndev, u32 ring_num,
579 			enum xgene_enet_ring_cfgsize cfgsize, u32 ring_id)
580 {
581 	struct xgene_enet_desc_ring *ring;
582 	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
583 	struct device *dev = ndev_to_dev(ndev);
584 	int size;
585 
586 	size = xgene_enet_get_ring_size(dev, cfgsize);
587 	if (size < 0)
588 		return NULL;
589 
590 	ring = devm_kzalloc(dev, sizeof(struct xgene_enet_desc_ring),
591 			    GFP_KERNEL);
592 	if (!ring)
593 		return NULL;
594 
595 	ring->ndev = ndev;
596 	ring->num = ring_num;
597 	ring->cfgsize = cfgsize;
598 	ring->id = ring_id;
599 
600 	ring->desc_addr = dma_zalloc_coherent(dev, size, &ring->dma,
601 					      GFP_KERNEL);
602 	if (!ring->desc_addr) {
603 		devm_kfree(dev, ring);
604 		return NULL;
605 	}
606 	ring->size = size;
607 
608 	ring->cmd_base = pdata->ring_cmd_addr + (ring->num << 6);
609 	ring->cmd = ring->cmd_base + INC_DEC_CMD_ADDR;
610 	pdata->rm = RM3;
611 	ring = xgene_enet_setup_ring(ring);
612 	netdev_dbg(ndev, "ring info: num=%d  size=%d  id=%d  slots=%d\n",
613 		   ring->num, ring->size, ring->id, ring->slots);
614 
615 	return ring;
616 }
617 
618 static u16 xgene_enet_get_ring_id(enum xgene_ring_owner owner, u8 bufnum)
619 {
620 	return (owner << 6) | (bufnum & GENMASK(5, 0));
621 }
622 
623 static int xgene_enet_create_desc_rings(struct net_device *ndev)
624 {
625 	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
626 	struct device *dev = ndev_to_dev(ndev);
627 	struct xgene_enet_desc_ring *rx_ring, *tx_ring, *cp_ring;
628 	struct xgene_enet_desc_ring *buf_pool = NULL;
629 	u8 cpu_bufnum = 0, eth_bufnum = 0;
630 	u8 bp_bufnum = 0x20;
631 	u16 ring_id, ring_num = 0;
632 	int ret;
633 
634 	/* allocate rx descriptor ring */
635 	ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU, cpu_bufnum++);
636 	rx_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
637 					      RING_CFGSIZE_16KB, ring_id);
638 	if (!rx_ring) {
639 		ret = -ENOMEM;
640 		goto err;
641 	}
642 
643 	/* allocate buffer pool for receiving packets */
644 	ring_id = xgene_enet_get_ring_id(RING_OWNER_ETH0, bp_bufnum++);
645 	buf_pool = xgene_enet_create_desc_ring(ndev, ring_num++,
646 					       RING_CFGSIZE_2KB, ring_id);
647 	if (!buf_pool) {
648 		ret = -ENOMEM;
649 		goto err;
650 	}
651 
652 	rx_ring->nbufpool = NUM_BUFPOOL;
653 	rx_ring->buf_pool = buf_pool;
654 	rx_ring->irq = pdata->rx_irq;
655 	buf_pool->rx_skb = devm_kcalloc(dev, buf_pool->slots,
656 					sizeof(struct sk_buff *), GFP_KERNEL);
657 	if (!buf_pool->rx_skb) {
658 		ret = -ENOMEM;
659 		goto err;
660 	}
661 
662 	buf_pool->dst_ring_num = xgene_enet_dst_ring_num(buf_pool);
663 	rx_ring->buf_pool = buf_pool;
664 	pdata->rx_ring = rx_ring;
665 
666 	/* allocate tx descriptor ring */
667 	ring_id = xgene_enet_get_ring_id(RING_OWNER_ETH0, eth_bufnum++);
668 	tx_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
669 					      RING_CFGSIZE_16KB, ring_id);
670 	if (!tx_ring) {
671 		ret = -ENOMEM;
672 		goto err;
673 	}
674 	pdata->tx_ring = tx_ring;
675 
676 	cp_ring = pdata->rx_ring;
677 	cp_ring->cp_skb = devm_kcalloc(dev, tx_ring->slots,
678 				       sizeof(struct sk_buff *), GFP_KERNEL);
679 	if (!cp_ring->cp_skb) {
680 		ret = -ENOMEM;
681 		goto err;
682 	}
683 	pdata->tx_ring->cp_ring = cp_ring;
684 	pdata->tx_ring->dst_ring_num = xgene_enet_dst_ring_num(cp_ring);
685 
686 	pdata->tx_qcnt_hi = pdata->tx_ring->slots / 2;
687 	pdata->cp_qcnt_hi = pdata->rx_ring->slots / 2;
688 	pdata->cp_qcnt_low = pdata->cp_qcnt_hi / 2;
689 
690 	return 0;
691 
692 err:
693 	xgene_enet_free_desc_rings(pdata);
694 	return ret;
695 }
696 
697 static struct rtnl_link_stats64 *xgene_enet_get_stats64(
698 			struct net_device *ndev,
699 			struct rtnl_link_stats64 *storage)
700 {
701 	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
702 	struct rtnl_link_stats64 *stats = &pdata->stats;
703 
704 	stats->rx_errors += stats->rx_length_errors +
705 			    stats->rx_crc_errors +
706 			    stats->rx_frame_errors +
707 			    stats->rx_fifo_errors;
708 	memcpy(storage, &pdata->stats, sizeof(struct rtnl_link_stats64));
709 
710 	return storage;
711 }
712 
713 static int xgene_enet_set_mac_address(struct net_device *ndev, void *addr)
714 {
715 	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
716 	int ret;
717 
718 	ret = eth_mac_addr(ndev, addr);
719 	if (ret)
720 		return ret;
721 	xgene_gmac_set_mac_addr(pdata);
722 
723 	return ret;
724 }
725 
726 static const struct net_device_ops xgene_ndev_ops = {
727 	.ndo_open = xgene_enet_open,
728 	.ndo_stop = xgene_enet_close,
729 	.ndo_start_xmit = xgene_enet_start_xmit,
730 	.ndo_tx_timeout = xgene_enet_timeout,
731 	.ndo_get_stats64 = xgene_enet_get_stats64,
732 	.ndo_change_mtu = eth_change_mtu,
733 	.ndo_set_mac_address = xgene_enet_set_mac_address,
734 };
735 
736 static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
737 {
738 	struct platform_device *pdev;
739 	struct net_device *ndev;
740 	struct device *dev;
741 	struct resource *res;
742 	void __iomem *base_addr;
743 	const char *mac;
744 	int ret;
745 
746 	pdev = pdata->pdev;
747 	dev = &pdev->dev;
748 	ndev = pdata->ndev;
749 
750 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "enet_csr");
751 	if (!res) {
752 		dev_err(dev, "Resource enet_csr not defined\n");
753 		return -ENODEV;
754 	}
755 	pdata->base_addr = devm_ioremap_resource(dev, res);
756 	if (IS_ERR(pdata->base_addr)) {
757 		dev_err(dev, "Unable to retrieve ENET Port CSR region\n");
758 		return PTR_ERR(pdata->base_addr);
759 	}
760 
761 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ring_csr");
762 	if (!res) {
763 		dev_err(dev, "Resource ring_csr not defined\n");
764 		return -ENODEV;
765 	}
766 	pdata->ring_csr_addr = devm_ioremap_resource(dev, res);
767 	if (IS_ERR(pdata->ring_csr_addr)) {
768 		dev_err(dev, "Unable to retrieve ENET Ring CSR region\n");
769 		return PTR_ERR(pdata->ring_csr_addr);
770 	}
771 
772 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ring_cmd");
773 	if (!res) {
774 		dev_err(dev, "Resource ring_cmd not defined\n");
775 		return -ENODEV;
776 	}
777 	pdata->ring_cmd_addr = devm_ioremap_resource(dev, res);
778 	if (IS_ERR(pdata->ring_cmd_addr)) {
779 		dev_err(dev, "Unable to retrieve ENET Ring command region\n");
780 		return PTR_ERR(pdata->ring_cmd_addr);
781 	}
782 
783 	ret = platform_get_irq(pdev, 0);
784 	if (ret <= 0) {
785 		dev_err(dev, "Unable to get ENET Rx IRQ\n");
786 		ret = ret ? : -ENXIO;
787 		return ret;
788 	}
789 	pdata->rx_irq = ret;
790 
791 	mac = of_get_mac_address(dev->of_node);
792 	if (mac)
793 		memcpy(ndev->dev_addr, mac, ndev->addr_len);
794 	else
795 		eth_hw_addr_random(ndev);
796 	memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
797 
798 	pdata->phy_mode = of_get_phy_mode(pdev->dev.of_node);
799 	if (pdata->phy_mode < 0) {
800 		dev_err(dev, "Incorrect phy-connection-type in DTS\n");
801 		return -EINVAL;
802 	}
803 
804 	pdata->clk = devm_clk_get(&pdev->dev, NULL);
805 	ret = IS_ERR(pdata->clk);
806 	if (IS_ERR(pdata->clk)) {
807 		dev_err(&pdev->dev, "can't get clock\n");
808 		ret = PTR_ERR(pdata->clk);
809 		return ret;
810 	}
811 
812 	base_addr = pdata->base_addr;
813 	pdata->eth_csr_addr = base_addr + BLOCK_ETH_CSR_OFFSET;
814 	pdata->eth_ring_if_addr = base_addr + BLOCK_ETH_RING_IF_OFFSET;
815 	pdata->eth_diag_csr_addr = base_addr + BLOCK_ETH_DIAG_CSR_OFFSET;
816 	pdata->mcx_mac_addr = base_addr + BLOCK_ETH_MAC_OFFSET;
817 	pdata->mcx_stats_addr = base_addr + BLOCK_ETH_STATS_OFFSET;
818 	pdata->mcx_mac_csr_addr = base_addr + BLOCK_ETH_MAC_CSR_OFFSET;
819 	pdata->rx_buff_cnt = NUM_PKT_BUF;
820 
821 	return ret;
822 }
823 
824 static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata)
825 {
826 	struct net_device *ndev = pdata->ndev;
827 	struct xgene_enet_desc_ring *buf_pool;
828 	u16 dst_ring_num;
829 	int ret;
830 
831 	xgene_gmac_tx_disable(pdata);
832 	xgene_gmac_rx_disable(pdata);
833 
834 	ret = xgene_enet_create_desc_rings(ndev);
835 	if (ret) {
836 		netdev_err(ndev, "Error in ring configuration\n");
837 		return ret;
838 	}
839 
840 	/* setup buffer pool */
841 	buf_pool = pdata->rx_ring->buf_pool;
842 	xgene_enet_init_bufpool(buf_pool);
843 	ret = xgene_enet_refill_bufpool(buf_pool, pdata->rx_buff_cnt);
844 	if (ret) {
845 		xgene_enet_delete_desc_rings(pdata);
846 		return ret;
847 	}
848 
849 	dst_ring_num = xgene_enet_dst_ring_num(pdata->rx_ring);
850 	xgene_enet_cle_bypass(pdata, dst_ring_num, buf_pool->id);
851 
852 	return ret;
853 }
854 
855 static int xgene_enet_probe(struct platform_device *pdev)
856 {
857 	struct net_device *ndev;
858 	struct xgene_enet_pdata *pdata;
859 	struct device *dev = &pdev->dev;
860 	struct napi_struct *napi;
861 	int ret;
862 
863 	ndev = alloc_etherdev(sizeof(struct xgene_enet_pdata));
864 	if (!ndev)
865 		return -ENOMEM;
866 
867 	pdata = netdev_priv(ndev);
868 
869 	pdata->pdev = pdev;
870 	pdata->ndev = ndev;
871 	SET_NETDEV_DEV(ndev, dev);
872 	platform_set_drvdata(pdev, pdata);
873 	ndev->netdev_ops = &xgene_ndev_ops;
874 	xgene_enet_set_ethtool_ops(ndev);
875 	ndev->features |= NETIF_F_IP_CSUM |
876 			  NETIF_F_GSO |
877 			  NETIF_F_GRO;
878 
879 	ret = xgene_enet_get_resources(pdata);
880 	if (ret)
881 		goto err;
882 
883 	xgene_enet_reset(pdata);
884 	xgene_gmac_init(pdata, SPEED_1000);
885 
886 	ret = register_netdev(ndev);
887 	if (ret) {
888 		netdev_err(ndev, "Failed to register netdev\n");
889 		goto err;
890 	}
891 
892 	ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
893 	if (ret) {
894 		netdev_err(ndev, "No usable DMA configuration\n");
895 		goto err;
896 	}
897 
898 	ret = xgene_enet_init_hw(pdata);
899 	if (ret)
900 		goto err;
901 
902 	napi = &pdata->rx_ring->napi;
903 	netif_napi_add(ndev, napi, xgene_enet_napi, NAPI_POLL_WEIGHT);
904 	ret = xgene_enet_mdio_config(pdata);
905 
906 	return ret;
907 err:
908 	free_netdev(ndev);
909 	return ret;
910 }
911 
912 static int xgene_enet_remove(struct platform_device *pdev)
913 {
914 	struct xgene_enet_pdata *pdata;
915 	struct net_device *ndev;
916 
917 	pdata = platform_get_drvdata(pdev);
918 	ndev = pdata->ndev;
919 
920 	xgene_gmac_rx_disable(pdata);
921 	xgene_gmac_tx_disable(pdata);
922 
923 	netif_napi_del(&pdata->rx_ring->napi);
924 	xgene_enet_mdio_remove(pdata);
925 	xgene_enet_delete_desc_rings(pdata);
926 	unregister_netdev(ndev);
927 	xgene_gport_shutdown(pdata);
928 	free_netdev(ndev);
929 
930 	return 0;
931 }
932 
933 static struct of_device_id xgene_enet_match[] = {
934 	{.compatible = "apm,xgene-enet",},
935 	{},
936 };
937 
938 MODULE_DEVICE_TABLE(of, xgene_enet_match);
939 
940 static struct platform_driver xgene_enet_driver = {
941 	.driver = {
942 		   .name = "xgene-enet",
943 		   .of_match_table = xgene_enet_match,
944 	},
945 	.probe = xgene_enet_probe,
946 	.remove = xgene_enet_remove,
947 };
948 
949 module_platform_driver(xgene_enet_driver);
950 
951 MODULE_DESCRIPTION("APM X-Gene SoC Ethernet driver");
952 MODULE_VERSION(XGENE_DRV_VERSION);
953 MODULE_AUTHOR("Keyur Chudgar <kchudgar@apm.com>");
954 MODULE_LICENSE("GPL");
955