1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Applied Micro X-Gene SoC Ethernet Driver
3  *
4  * Copyright (c) 2014, Applied Micro Circuits Corporation
5  * Authors: Iyappan Subramanian <isubramanian@apm.com>
6  *	    Ravi Patel <rapatel@apm.com>
7  *	    Keyur Chudgar <kchudgar@apm.com>
8  */
9 
10 #include <linux/gpio.h>
11 #include "xgene_enet_main.h"
12 #include "xgene_enet_hw.h"
13 #include "xgene_enet_sgmac.h"
14 #include "xgene_enet_xgmac.h"
15 
16 #define RES_ENET_CSR	0
17 #define RES_RING_CSR	1
18 #define RES_RING_CMD	2
19 
20 static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring *buf_pool)
21 {
22 	struct xgene_enet_raw_desc16 *raw_desc;
23 	int i;
24 
25 	if (!buf_pool)
26 		return;
27 
28 	for (i = 0; i < buf_pool->slots; i++) {
29 		raw_desc = &buf_pool->raw_desc16[i];
30 
31 		/* Hardware expects descriptor in little endian format */
32 		raw_desc->m0 = cpu_to_le64(i |
33 				SET_VAL(FPQNUM, buf_pool->dst_ring_num) |
34 				SET_VAL(STASH, 3));
35 	}
36 }
37 
38 static u16 xgene_enet_get_data_len(u64 bufdatalen)
39 {
40 	u16 hw_len, mask;
41 
42 	hw_len = GET_VAL(BUFDATALEN, bufdatalen);
43 
44 	if (unlikely(hw_len == 0x7800)) {
45 		return 0;
46 	} else if (!(hw_len & BIT(14))) {
47 		mask = GENMASK(13, 0);
48 		return (hw_len & mask) ? (hw_len & mask) : SIZE_16K;
49 	} else if (!(hw_len & GENMASK(13, 12))) {
50 		mask = GENMASK(11, 0);
51 		return (hw_len & mask) ? (hw_len & mask) : SIZE_4K;
52 	} else {
53 		mask = GENMASK(11, 0);
54 		return (hw_len & mask) ? (hw_len & mask) : SIZE_2K;
55 	}
56 }
57 
58 static u16 xgene_enet_set_data_len(u32 size)
59 {
60 	u16 hw_len;
61 
62 	hw_len =  (size == SIZE_4K) ? BIT(14) : 0;
63 
64 	return hw_len;
65 }
66 
67 static int xgene_enet_refill_pagepool(struct xgene_enet_desc_ring *buf_pool,
68 				      u32 nbuf)
69 {
70 	struct xgene_enet_raw_desc16 *raw_desc;
71 	struct xgene_enet_pdata *pdata;
72 	struct net_device *ndev;
73 	dma_addr_t dma_addr;
74 	struct device *dev;
75 	struct page *page;
76 	u32 slots, tail;
77 	u16 hw_len;
78 	int i;
79 
80 	if (unlikely(!buf_pool))
81 		return 0;
82 
83 	ndev = buf_pool->ndev;
84 	pdata = netdev_priv(ndev);
85 	dev = ndev_to_dev(ndev);
86 	slots = buf_pool->slots - 1;
87 	tail = buf_pool->tail;
88 
89 	for (i = 0; i < nbuf; i++) {
90 		raw_desc = &buf_pool->raw_desc16[tail];
91 
92 		page = dev_alloc_page();
93 		if (unlikely(!page))
94 			return -ENOMEM;
95 
96 		dma_addr = dma_map_page(dev, page, 0,
97 					PAGE_SIZE, DMA_FROM_DEVICE);
98 		if (unlikely(dma_mapping_error(dev, dma_addr))) {
99 			put_page(page);
100 			return -ENOMEM;
101 		}
102 
103 		hw_len = xgene_enet_set_data_len(PAGE_SIZE);
104 		raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
105 					   SET_VAL(BUFDATALEN, hw_len) |
106 					   SET_BIT(COHERENT));
107 
108 		buf_pool->frag_page[tail] = page;
109 		tail = (tail + 1) & slots;
110 	}
111 
112 	pdata->ring_ops->wr_cmd(buf_pool, nbuf);
113 	buf_pool->tail = tail;
114 
115 	return 0;
116 }
117 
118 static int xgene_enet_refill_bufpool(struct xgene_enet_desc_ring *buf_pool,
119 				     u32 nbuf)
120 {
121 	struct sk_buff *skb;
122 	struct xgene_enet_raw_desc16 *raw_desc;
123 	struct xgene_enet_pdata *pdata;
124 	struct net_device *ndev;
125 	struct device *dev;
126 	dma_addr_t dma_addr;
127 	u32 tail = buf_pool->tail;
128 	u32 slots = buf_pool->slots - 1;
129 	u16 bufdatalen, len;
130 	int i;
131 
132 	ndev = buf_pool->ndev;
133 	dev = ndev_to_dev(buf_pool->ndev);
134 	pdata = netdev_priv(ndev);
135 
136 	bufdatalen = BUF_LEN_CODE_2K | (SKB_BUFFER_SIZE & GENMASK(11, 0));
137 	len = XGENE_ENET_STD_MTU;
138 
139 	for (i = 0; i < nbuf; i++) {
140 		raw_desc = &buf_pool->raw_desc16[tail];
141 
142 		skb = netdev_alloc_skb_ip_align(ndev, len);
143 		if (unlikely(!skb))
144 			return -ENOMEM;
145 
146 		dma_addr = dma_map_single(dev, skb->data, len, DMA_FROM_DEVICE);
147 		if (dma_mapping_error(dev, dma_addr)) {
148 			netdev_err(ndev, "DMA mapping error\n");
149 			dev_kfree_skb_any(skb);
150 			return -EINVAL;
151 		}
152 
153 		buf_pool->rx_skb[tail] = skb;
154 
155 		raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
156 					   SET_VAL(BUFDATALEN, bufdatalen) |
157 					   SET_BIT(COHERENT));
158 		tail = (tail + 1) & slots;
159 	}
160 
161 	pdata->ring_ops->wr_cmd(buf_pool, nbuf);
162 	buf_pool->tail = tail;
163 
164 	return 0;
165 }
166 
167 static u8 xgene_enet_hdr_len(const void *data)
168 {
169 	const struct ethhdr *eth = data;
170 
171 	return (eth->h_proto == htons(ETH_P_8021Q)) ? VLAN_ETH_HLEN : ETH_HLEN;
172 }
173 
174 static void xgene_enet_delete_bufpool(struct xgene_enet_desc_ring *buf_pool)
175 {
176 	struct device *dev = ndev_to_dev(buf_pool->ndev);
177 	struct xgene_enet_raw_desc16 *raw_desc;
178 	dma_addr_t dma_addr;
179 	int i;
180 
181 	/* Free up the buffers held by hardware */
182 	for (i = 0; i < buf_pool->slots; i++) {
183 		if (buf_pool->rx_skb[i]) {
184 			dev_kfree_skb_any(buf_pool->rx_skb[i]);
185 
186 			raw_desc = &buf_pool->raw_desc16[i];
187 			dma_addr = GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1));
188 			dma_unmap_single(dev, dma_addr, XGENE_ENET_MAX_MTU,
189 					 DMA_FROM_DEVICE);
190 		}
191 	}
192 }
193 
194 static void xgene_enet_delete_pagepool(struct xgene_enet_desc_ring *buf_pool)
195 {
196 	struct device *dev = ndev_to_dev(buf_pool->ndev);
197 	dma_addr_t dma_addr;
198 	struct page *page;
199 	int i;
200 
201 	/* Free up the buffers held by hardware */
202 	for (i = 0; i < buf_pool->slots; i++) {
203 		page = buf_pool->frag_page[i];
204 		if (page) {
205 			dma_addr = buf_pool->frag_dma_addr[i];
206 			dma_unmap_page(dev, dma_addr, PAGE_SIZE,
207 				       DMA_FROM_DEVICE);
208 			put_page(page);
209 		}
210 	}
211 }
212 
213 static irqreturn_t xgene_enet_rx_irq(const int irq, void *data)
214 {
215 	struct xgene_enet_desc_ring *rx_ring = data;
216 
217 	if (napi_schedule_prep(&rx_ring->napi)) {
218 		disable_irq_nosync(irq);
219 		__napi_schedule(&rx_ring->napi);
220 	}
221 
222 	return IRQ_HANDLED;
223 }
224 
225 static int xgene_enet_tx_completion(struct xgene_enet_desc_ring *cp_ring,
226 				    struct xgene_enet_raw_desc *raw_desc)
227 {
228 	struct xgene_enet_pdata *pdata = netdev_priv(cp_ring->ndev);
229 	struct sk_buff *skb;
230 	struct device *dev;
231 	skb_frag_t *frag;
232 	dma_addr_t *frag_dma_addr;
233 	u16 skb_index;
234 	u8 mss_index;
235 	u8 status;
236 	int i;
237 
238 	skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
239 	skb = cp_ring->cp_skb[skb_index];
240 	frag_dma_addr = &cp_ring->frag_dma_addr[skb_index * MAX_SKB_FRAGS];
241 
242 	dev = ndev_to_dev(cp_ring->ndev);
243 	dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)),
244 			 skb_headlen(skb),
245 			 DMA_TO_DEVICE);
246 
247 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
248 		frag = &skb_shinfo(skb)->frags[i];
249 		dma_unmap_page(dev, frag_dma_addr[i], skb_frag_size(frag),
250 			       DMA_TO_DEVICE);
251 	}
252 
253 	if (GET_BIT(ET, le64_to_cpu(raw_desc->m3))) {
254 		mss_index = GET_VAL(MSS, le64_to_cpu(raw_desc->m3));
255 		spin_lock(&pdata->mss_lock);
256 		pdata->mss_refcnt[mss_index]--;
257 		spin_unlock(&pdata->mss_lock);
258 	}
259 
260 	/* Checking for error */
261 	status = GET_VAL(LERR, le64_to_cpu(raw_desc->m0));
262 	if (unlikely(status > 2)) {
263 		cp_ring->tx_dropped++;
264 		cp_ring->tx_errors++;
265 	}
266 
267 	if (likely(skb)) {
268 		dev_kfree_skb_any(skb);
269 	} else {
270 		netdev_err(cp_ring->ndev, "completion skb is NULL\n");
271 	}
272 
273 	return 0;
274 }
275 
276 static int xgene_enet_setup_mss(struct net_device *ndev, u32 mss)
277 {
278 	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
279 	int mss_index = -EBUSY;
280 	int i;
281 
282 	spin_lock(&pdata->mss_lock);
283 
284 	/* Reuse the slot if MSS matches */
285 	for (i = 0; mss_index < 0 && i < NUM_MSS_REG; i++) {
286 		if (pdata->mss[i] == mss) {
287 			pdata->mss_refcnt[i]++;
288 			mss_index = i;
289 		}
290 	}
291 
292 	/* Overwrite the slot with ref_count = 0 */
293 	for (i = 0; mss_index < 0 && i < NUM_MSS_REG; i++) {
294 		if (!pdata->mss_refcnt[i]) {
295 			pdata->mss_refcnt[i]++;
296 			pdata->mac_ops->set_mss(pdata, mss, i);
297 			pdata->mss[i] = mss;
298 			mss_index = i;
299 		}
300 	}
301 
302 	spin_unlock(&pdata->mss_lock);
303 
304 	return mss_index;
305 }
306 
307 static int xgene_enet_work_msg(struct sk_buff *skb, u64 *hopinfo)
308 {
309 	struct net_device *ndev = skb->dev;
310 	struct iphdr *iph;
311 	u8 l3hlen = 0, l4hlen = 0;
312 	u8 ethhdr, proto = 0, csum_enable = 0;
313 	u32 hdr_len, mss = 0;
314 	u32 i, len, nr_frags;
315 	int mss_index;
316 
317 	ethhdr = xgene_enet_hdr_len(skb->data);
318 
319 	if (unlikely(skb->protocol != htons(ETH_P_IP)) &&
320 	    unlikely(skb->protocol != htons(ETH_P_8021Q)))
321 		goto out;
322 
323 	if (unlikely(!(skb->dev->features & NETIF_F_IP_CSUM)))
324 		goto out;
325 
326 	iph = ip_hdr(skb);
327 	if (unlikely(ip_is_fragment(iph)))
328 		goto out;
329 
330 	if (likely(iph->protocol == IPPROTO_TCP)) {
331 		l4hlen = tcp_hdrlen(skb) >> 2;
332 		csum_enable = 1;
333 		proto = TSO_IPPROTO_TCP;
334 		if (ndev->features & NETIF_F_TSO) {
335 			hdr_len = ethhdr + ip_hdrlen(skb) + tcp_hdrlen(skb);
336 			mss = skb_shinfo(skb)->gso_size;
337 
338 			if (skb_is_nonlinear(skb)) {
339 				len = skb_headlen(skb);
340 				nr_frags = skb_shinfo(skb)->nr_frags;
341 
342 				for (i = 0; i < 2 && i < nr_frags; i++)
343 					len += skb_shinfo(skb)->frags[i].size;
344 
345 				/* HW requires header must reside in 3 buffer */
346 				if (unlikely(hdr_len > len)) {
347 					if (skb_linearize(skb))
348 						return 0;
349 				}
350 			}
351 
352 			if (!mss || ((skb->len - hdr_len) <= mss))
353 				goto out;
354 
355 			mss_index = xgene_enet_setup_mss(ndev, mss);
356 			if (unlikely(mss_index < 0))
357 				return -EBUSY;
358 
359 			*hopinfo |= SET_BIT(ET) | SET_VAL(MSS, mss_index);
360 		}
361 	} else if (iph->protocol == IPPROTO_UDP) {
362 		l4hlen = UDP_HDR_SIZE;
363 		csum_enable = 1;
364 	}
365 out:
366 	l3hlen = ip_hdrlen(skb) >> 2;
367 	*hopinfo |= SET_VAL(TCPHDR, l4hlen) |
368 		    SET_VAL(IPHDR, l3hlen) |
369 		    SET_VAL(ETHHDR, ethhdr) |
370 		    SET_VAL(EC, csum_enable) |
371 		    SET_VAL(IS, proto) |
372 		    SET_BIT(IC) |
373 		    SET_BIT(TYPE_ETH_WORK_MESSAGE);
374 
375 	return 0;
376 }
377 
378 static u16 xgene_enet_encode_len(u16 len)
379 {
380 	return (len == BUFLEN_16K) ? 0 : len;
381 }
382 
383 static void xgene_set_addr_len(__le64 *desc, u32 idx, dma_addr_t addr, u32 len)
384 {
385 	desc[idx ^ 1] = cpu_to_le64(SET_VAL(DATAADDR, addr) |
386 				    SET_VAL(BUFDATALEN, len));
387 }
388 
389 static __le64 *xgene_enet_get_exp_bufs(struct xgene_enet_desc_ring *ring)
390 {
391 	__le64 *exp_bufs;
392 
393 	exp_bufs = &ring->exp_bufs[ring->exp_buf_tail * MAX_EXP_BUFFS];
394 	memset(exp_bufs, 0, sizeof(__le64) * MAX_EXP_BUFFS);
395 	ring->exp_buf_tail = (ring->exp_buf_tail + 1) & ((ring->slots / 2) - 1);
396 
397 	return exp_bufs;
398 }
399 
400 static dma_addr_t *xgene_get_frag_dma_array(struct xgene_enet_desc_ring *ring)
401 {
402 	return &ring->cp_ring->frag_dma_addr[ring->tail * MAX_SKB_FRAGS];
403 }
404 
405 static int xgene_enet_setup_tx_desc(struct xgene_enet_desc_ring *tx_ring,
406 				    struct sk_buff *skb)
407 {
408 	struct device *dev = ndev_to_dev(tx_ring->ndev);
409 	struct xgene_enet_pdata *pdata = netdev_priv(tx_ring->ndev);
410 	struct xgene_enet_raw_desc *raw_desc;
411 	__le64 *exp_desc = NULL, *exp_bufs = NULL;
412 	dma_addr_t dma_addr, pbuf_addr, *frag_dma_addr;
413 	skb_frag_t *frag;
414 	u16 tail = tx_ring->tail;
415 	u64 hopinfo = 0;
416 	u32 len, hw_len;
417 	u8 ll = 0, nv = 0, idx = 0;
418 	bool split = false;
419 	u32 size, offset, ell_bytes = 0;
420 	u32 i, fidx, nr_frags, count = 1;
421 	int ret;
422 
423 	raw_desc = &tx_ring->raw_desc[tail];
424 	tail = (tail + 1) & (tx_ring->slots - 1);
425 	memset(raw_desc, 0, sizeof(struct xgene_enet_raw_desc));
426 
427 	ret = xgene_enet_work_msg(skb, &hopinfo);
428 	if (ret)
429 		return ret;
430 
431 	raw_desc->m3 = cpu_to_le64(SET_VAL(HENQNUM, tx_ring->dst_ring_num) |
432 				   hopinfo);
433 
434 	len = skb_headlen(skb);
435 	hw_len = xgene_enet_encode_len(len);
436 
437 	dma_addr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
438 	if (dma_mapping_error(dev, dma_addr)) {
439 		netdev_err(tx_ring->ndev, "DMA mapping error\n");
440 		return -EINVAL;
441 	}
442 
443 	/* Hardware expects descriptor in little endian format */
444 	raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
445 				   SET_VAL(BUFDATALEN, hw_len) |
446 				   SET_BIT(COHERENT));
447 
448 	if (!skb_is_nonlinear(skb))
449 		goto out;
450 
451 	/* scatter gather */
452 	nv = 1;
453 	exp_desc = (void *)&tx_ring->raw_desc[tail];
454 	tail = (tail + 1) & (tx_ring->slots - 1);
455 	memset(exp_desc, 0, sizeof(struct xgene_enet_raw_desc));
456 
457 	nr_frags = skb_shinfo(skb)->nr_frags;
458 	for (i = nr_frags; i < 4 ; i++)
459 		exp_desc[i ^ 1] = cpu_to_le64(LAST_BUFFER);
460 
461 	frag_dma_addr = xgene_get_frag_dma_array(tx_ring);
462 
463 	for (i = 0, fidx = 0; split || (fidx < nr_frags); i++) {
464 		if (!split) {
465 			frag = &skb_shinfo(skb)->frags[fidx];
466 			size = skb_frag_size(frag);
467 			offset = 0;
468 
469 			pbuf_addr = skb_frag_dma_map(dev, frag, 0, size,
470 						     DMA_TO_DEVICE);
471 			if (dma_mapping_error(dev, pbuf_addr))
472 				return -EINVAL;
473 
474 			frag_dma_addr[fidx] = pbuf_addr;
475 			fidx++;
476 
477 			if (size > BUFLEN_16K)
478 				split = true;
479 		}
480 
481 		if (size > BUFLEN_16K) {
482 			len = BUFLEN_16K;
483 			size -= BUFLEN_16K;
484 		} else {
485 			len = size;
486 			split = false;
487 		}
488 
489 		dma_addr = pbuf_addr + offset;
490 		hw_len = xgene_enet_encode_len(len);
491 
492 		switch (i) {
493 		case 0:
494 		case 1:
495 		case 2:
496 			xgene_set_addr_len(exp_desc, i, dma_addr, hw_len);
497 			break;
498 		case 3:
499 			if (split || (fidx != nr_frags)) {
500 				exp_bufs = xgene_enet_get_exp_bufs(tx_ring);
501 				xgene_set_addr_len(exp_bufs, idx, dma_addr,
502 						   hw_len);
503 				idx++;
504 				ell_bytes += len;
505 			} else {
506 				xgene_set_addr_len(exp_desc, i, dma_addr,
507 						   hw_len);
508 			}
509 			break;
510 		default:
511 			xgene_set_addr_len(exp_bufs, idx, dma_addr, hw_len);
512 			idx++;
513 			ell_bytes += len;
514 			break;
515 		}
516 
517 		if (split)
518 			offset += BUFLEN_16K;
519 	}
520 	count++;
521 
522 	if (idx) {
523 		ll = 1;
524 		dma_addr = dma_map_single(dev, exp_bufs,
525 					  sizeof(u64) * MAX_EXP_BUFFS,
526 					  DMA_TO_DEVICE);
527 		if (dma_mapping_error(dev, dma_addr)) {
528 			dev_kfree_skb_any(skb);
529 			return -EINVAL;
530 		}
531 		i = ell_bytes >> LL_BYTES_LSB_LEN;
532 		exp_desc[2] = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
533 					  SET_VAL(LL_BYTES_MSB, i) |
534 					  SET_VAL(LL_LEN, idx));
535 		raw_desc->m2 = cpu_to_le64(SET_VAL(LL_BYTES_LSB, ell_bytes));
536 	}
537 
538 out:
539 	raw_desc->m0 = cpu_to_le64(SET_VAL(LL, ll) | SET_VAL(NV, nv) |
540 				   SET_VAL(USERINFO, tx_ring->tail));
541 	tx_ring->cp_ring->cp_skb[tx_ring->tail] = skb;
542 	pdata->tx_level[tx_ring->cp_ring->index] += count;
543 	tx_ring->tail = tail;
544 
545 	return count;
546 }
547 
548 static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb,
549 					 struct net_device *ndev)
550 {
551 	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
552 	struct xgene_enet_desc_ring *tx_ring;
553 	int index = skb->queue_mapping;
554 	u32 tx_level = pdata->tx_level[index];
555 	int count;
556 
557 	tx_ring = pdata->tx_ring[index];
558 	if (tx_level < pdata->txc_level[index])
559 		tx_level += ((typeof(pdata->tx_level[index]))~0U);
560 
561 	if ((tx_level - pdata->txc_level[index]) > pdata->tx_qcnt_hi) {
562 		netif_stop_subqueue(ndev, index);
563 		return NETDEV_TX_BUSY;
564 	}
565 
566 	if (skb_padto(skb, XGENE_MIN_ENET_FRAME_SIZE))
567 		return NETDEV_TX_OK;
568 
569 	count = xgene_enet_setup_tx_desc(tx_ring, skb);
570 	if (count == -EBUSY)
571 		return NETDEV_TX_BUSY;
572 
573 	if (count <= 0) {
574 		dev_kfree_skb_any(skb);
575 		return NETDEV_TX_OK;
576 	}
577 
578 	skb_tx_timestamp(skb);
579 
580 	tx_ring->tx_packets++;
581 	tx_ring->tx_bytes += skb->len;
582 
583 	pdata->ring_ops->wr_cmd(tx_ring, count);
584 	return NETDEV_TX_OK;
585 }
586 
587 static void xgene_enet_rx_csum(struct sk_buff *skb)
588 {
589 	struct net_device *ndev = skb->dev;
590 	struct iphdr *iph = ip_hdr(skb);
591 
592 	if (!(ndev->features & NETIF_F_RXCSUM))
593 		return;
594 
595 	if (skb->protocol != htons(ETH_P_IP))
596 		return;
597 
598 	if (ip_is_fragment(iph))
599 		return;
600 
601 	if (iph->protocol != IPPROTO_TCP && iph->protocol != IPPROTO_UDP)
602 		return;
603 
604 	skb->ip_summed = CHECKSUM_UNNECESSARY;
605 }
606 
607 static void xgene_enet_free_pagepool(struct xgene_enet_desc_ring *buf_pool,
608 				     struct xgene_enet_raw_desc *raw_desc,
609 				     struct xgene_enet_raw_desc *exp_desc)
610 {
611 	__le64 *desc = (void *)exp_desc;
612 	dma_addr_t dma_addr;
613 	struct device *dev;
614 	struct page *page;
615 	u16 slots, head;
616 	u32 frag_size;
617 	int i;
618 
619 	if (!buf_pool || !raw_desc || !exp_desc ||
620 	    (!GET_VAL(NV, le64_to_cpu(raw_desc->m0))))
621 		return;
622 
623 	dev = ndev_to_dev(buf_pool->ndev);
624 	slots = buf_pool->slots - 1;
625 	head = buf_pool->head;
626 
627 	for (i = 0; i < 4; i++) {
628 		frag_size = xgene_enet_get_data_len(le64_to_cpu(desc[i ^ 1]));
629 		if (!frag_size)
630 			break;
631 
632 		dma_addr = GET_VAL(DATAADDR, le64_to_cpu(desc[i ^ 1]));
633 		dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_FROM_DEVICE);
634 
635 		page = buf_pool->frag_page[head];
636 		put_page(page);
637 
638 		buf_pool->frag_page[head] = NULL;
639 		head = (head + 1) & slots;
640 	}
641 	buf_pool->head = head;
642 }
643 
644 /* Errata 10GE_10 and ENET_15 - Fix duplicated HW statistic counters */
645 static bool xgene_enet_errata_10GE_10(struct sk_buff *skb, u32 len, u8 status)
646 {
647 	if (status == INGRESS_CRC &&
648 	    len >= (ETHER_STD_PACKET + 1) &&
649 	    len <= (ETHER_STD_PACKET + 4) &&
650 	    skb->protocol == htons(ETH_P_8021Q))
651 		return true;
652 
653 	return false;
654 }
655 
656 /* Errata 10GE_8 and ENET_11 - allow packet with length <=64B */
657 static bool xgene_enet_errata_10GE_8(struct sk_buff *skb, u32 len, u8 status)
658 {
659 	if (status == INGRESS_PKT_LEN && len == ETHER_MIN_PACKET) {
660 		if (ntohs(eth_hdr(skb)->h_proto) < 46)
661 			return true;
662 	}
663 
664 	return false;
665 }
666 
667 static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring,
668 			       struct xgene_enet_raw_desc *raw_desc,
669 			       struct xgene_enet_raw_desc *exp_desc)
670 {
671 	struct xgene_enet_desc_ring *buf_pool, *page_pool;
672 	u32 datalen, frag_size, skb_index;
673 	struct xgene_enet_pdata *pdata;
674 	struct net_device *ndev;
675 	dma_addr_t dma_addr;
676 	struct sk_buff *skb;
677 	struct device *dev;
678 	struct page *page;
679 	u16 slots, head;
680 	int i, ret = 0;
681 	__le64 *desc;
682 	u8 status;
683 	bool nv;
684 
685 	ndev = rx_ring->ndev;
686 	pdata = netdev_priv(ndev);
687 	dev = ndev_to_dev(rx_ring->ndev);
688 	buf_pool = rx_ring->buf_pool;
689 	page_pool = rx_ring->page_pool;
690 
691 	dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)),
692 			 XGENE_ENET_STD_MTU, DMA_FROM_DEVICE);
693 	skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
694 	skb = buf_pool->rx_skb[skb_index];
695 	buf_pool->rx_skb[skb_index] = NULL;
696 
697 	datalen = xgene_enet_get_data_len(le64_to_cpu(raw_desc->m1));
698 	skb_put(skb, datalen);
699 	prefetch(skb->data - NET_IP_ALIGN);
700 	skb->protocol = eth_type_trans(skb, ndev);
701 
702 	/* checking for error */
703 	status = (GET_VAL(ELERR, le64_to_cpu(raw_desc->m0)) << LERR_LEN) |
704 		  GET_VAL(LERR, le64_to_cpu(raw_desc->m0));
705 	if (unlikely(status)) {
706 		if (xgene_enet_errata_10GE_8(skb, datalen, status)) {
707 			pdata->false_rflr++;
708 		} else if (xgene_enet_errata_10GE_10(skb, datalen, status)) {
709 			pdata->vlan_rjbr++;
710 		} else {
711 			dev_kfree_skb_any(skb);
712 			xgene_enet_free_pagepool(page_pool, raw_desc, exp_desc);
713 			xgene_enet_parse_error(rx_ring, status);
714 			rx_ring->rx_dropped++;
715 			goto out;
716 		}
717 	}
718 
719 	nv = GET_VAL(NV, le64_to_cpu(raw_desc->m0));
720 	if (!nv) {
721 		/* strip off CRC as HW isn't doing this */
722 		datalen -= 4;
723 		goto skip_jumbo;
724 	}
725 
726 	slots = page_pool->slots - 1;
727 	head = page_pool->head;
728 	desc = (void *)exp_desc;
729 
730 	for (i = 0; i < 4; i++) {
731 		frag_size = xgene_enet_get_data_len(le64_to_cpu(desc[i ^ 1]));
732 		if (!frag_size)
733 			break;
734 
735 		dma_addr = GET_VAL(DATAADDR, le64_to_cpu(desc[i ^ 1]));
736 		dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_FROM_DEVICE);
737 
738 		page = page_pool->frag_page[head];
739 		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, 0,
740 				frag_size, PAGE_SIZE);
741 
742 		datalen += frag_size;
743 
744 		page_pool->frag_page[head] = NULL;
745 		head = (head + 1) & slots;
746 	}
747 
748 	page_pool->head = head;
749 	rx_ring->npagepool -= skb_shinfo(skb)->nr_frags;
750 
751 skip_jumbo:
752 	skb_checksum_none_assert(skb);
753 	xgene_enet_rx_csum(skb);
754 
755 	rx_ring->rx_packets++;
756 	rx_ring->rx_bytes += datalen;
757 	napi_gro_receive(&rx_ring->napi, skb);
758 
759 out:
760 	if (rx_ring->npagepool <= 0) {
761 		ret = xgene_enet_refill_pagepool(page_pool, NUM_NXTBUFPOOL);
762 		rx_ring->npagepool = NUM_NXTBUFPOOL;
763 		if (ret)
764 			return ret;
765 	}
766 
767 	if (--rx_ring->nbufpool == 0) {
768 		ret = xgene_enet_refill_bufpool(buf_pool, NUM_BUFPOOL);
769 		rx_ring->nbufpool = NUM_BUFPOOL;
770 	}
771 
772 	return ret;
773 }
774 
775 static bool is_rx_desc(struct xgene_enet_raw_desc *raw_desc)
776 {
777 	return GET_VAL(FPQNUM, le64_to_cpu(raw_desc->m0)) ? true : false;
778 }
779 
780 static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring,
781 				   int budget)
782 {
783 	struct net_device *ndev = ring->ndev;
784 	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
785 	struct xgene_enet_raw_desc *raw_desc, *exp_desc;
786 	u16 head = ring->head;
787 	u16 slots = ring->slots - 1;
788 	int ret, desc_count, count = 0, processed = 0;
789 	bool is_completion;
790 
791 	do {
792 		raw_desc = &ring->raw_desc[head];
793 		desc_count = 0;
794 		is_completion = false;
795 		exp_desc = NULL;
796 		if (unlikely(xgene_enet_is_desc_slot_empty(raw_desc)))
797 			break;
798 
799 		/* read fpqnum field after dataaddr field */
800 		dma_rmb();
801 		if (GET_BIT(NV, le64_to_cpu(raw_desc->m0))) {
802 			head = (head + 1) & slots;
803 			exp_desc = &ring->raw_desc[head];
804 
805 			if (unlikely(xgene_enet_is_desc_slot_empty(exp_desc))) {
806 				head = (head - 1) & slots;
807 				break;
808 			}
809 			dma_rmb();
810 			count++;
811 			desc_count++;
812 		}
813 		if (is_rx_desc(raw_desc)) {
814 			ret = xgene_enet_rx_frame(ring, raw_desc, exp_desc);
815 		} else {
816 			ret = xgene_enet_tx_completion(ring, raw_desc);
817 			is_completion = true;
818 		}
819 		xgene_enet_mark_desc_slot_empty(raw_desc);
820 		if (exp_desc)
821 			xgene_enet_mark_desc_slot_empty(exp_desc);
822 
823 		head = (head + 1) & slots;
824 		count++;
825 		desc_count++;
826 		processed++;
827 		if (is_completion)
828 			pdata->txc_level[ring->index] += desc_count;
829 
830 		if (ret)
831 			break;
832 	} while (--budget);
833 
834 	if (likely(count)) {
835 		pdata->ring_ops->wr_cmd(ring, -count);
836 		ring->head = head;
837 
838 		if (__netif_subqueue_stopped(ndev, ring->index))
839 			netif_start_subqueue(ndev, ring->index);
840 	}
841 
842 	return processed;
843 }
844 
845 static int xgene_enet_napi(struct napi_struct *napi, const int budget)
846 {
847 	struct xgene_enet_desc_ring *ring;
848 	int processed;
849 
850 	ring = container_of(napi, struct xgene_enet_desc_ring, napi);
851 	processed = xgene_enet_process_ring(ring, budget);
852 
853 	if (processed != budget) {
854 		napi_complete_done(napi, processed);
855 		enable_irq(ring->irq);
856 	}
857 
858 	return processed;
859 }
860 
861 static void xgene_enet_timeout(struct net_device *ndev)
862 {
863 	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
864 	struct netdev_queue *txq;
865 	int i;
866 
867 	pdata->mac_ops->reset(pdata);
868 
869 	for (i = 0; i < pdata->txq_cnt; i++) {
870 		txq = netdev_get_tx_queue(ndev, i);
871 		txq->trans_start = jiffies;
872 		netif_tx_start_queue(txq);
873 	}
874 }
875 
876 static void xgene_enet_set_irq_name(struct net_device *ndev)
877 {
878 	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
879 	struct xgene_enet_desc_ring *ring;
880 	int i;
881 
882 	for (i = 0; i < pdata->rxq_cnt; i++) {
883 		ring = pdata->rx_ring[i];
884 		if (!pdata->cq_cnt) {
885 			snprintf(ring->irq_name, IRQ_ID_SIZE, "%s-rx-txc",
886 				 ndev->name);
887 		} else {
888 			snprintf(ring->irq_name, IRQ_ID_SIZE, "%s-rx-%d",
889 				 ndev->name, i);
890 		}
891 	}
892 
893 	for (i = 0; i < pdata->cq_cnt; i++) {
894 		ring = pdata->tx_ring[i]->cp_ring;
895 		snprintf(ring->irq_name, IRQ_ID_SIZE, "%s-txc-%d",
896 			 ndev->name, i);
897 	}
898 }
899 
900 static int xgene_enet_register_irq(struct net_device *ndev)
901 {
902 	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
903 	struct device *dev = ndev_to_dev(ndev);
904 	struct xgene_enet_desc_ring *ring;
905 	int ret = 0, i;
906 
907 	xgene_enet_set_irq_name(ndev);
908 	for (i = 0; i < pdata->rxq_cnt; i++) {
909 		ring = pdata->rx_ring[i];
910 		irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
911 		ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq,
912 				       0, ring->irq_name, ring);
913 		if (ret) {
914 			netdev_err(ndev, "Failed to request irq %s\n",
915 				   ring->irq_name);
916 		}
917 	}
918 
919 	for (i = 0; i < pdata->cq_cnt; i++) {
920 		ring = pdata->tx_ring[i]->cp_ring;
921 		irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
922 		ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq,
923 				       0, ring->irq_name, ring);
924 		if (ret) {
925 			netdev_err(ndev, "Failed to request irq %s\n",
926 				   ring->irq_name);
927 		}
928 	}
929 
930 	return ret;
931 }
932 
933 static void xgene_enet_free_irq(struct net_device *ndev)
934 {
935 	struct xgene_enet_pdata *pdata;
936 	struct xgene_enet_desc_ring *ring;
937 	struct device *dev;
938 	int i;
939 
940 	pdata = netdev_priv(ndev);
941 	dev = ndev_to_dev(ndev);
942 
943 	for (i = 0; i < pdata->rxq_cnt; i++) {
944 		ring = pdata->rx_ring[i];
945 		irq_clear_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
946 		devm_free_irq(dev, ring->irq, ring);
947 	}
948 
949 	for (i = 0; i < pdata->cq_cnt; i++) {
950 		ring = pdata->tx_ring[i]->cp_ring;
951 		irq_clear_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
952 		devm_free_irq(dev, ring->irq, ring);
953 	}
954 }
955 
956 static void xgene_enet_napi_enable(struct xgene_enet_pdata *pdata)
957 {
958 	struct napi_struct *napi;
959 	int i;
960 
961 	for (i = 0; i < pdata->rxq_cnt; i++) {
962 		napi = &pdata->rx_ring[i]->napi;
963 		napi_enable(napi);
964 	}
965 
966 	for (i = 0; i < pdata->cq_cnt; i++) {
967 		napi = &pdata->tx_ring[i]->cp_ring->napi;
968 		napi_enable(napi);
969 	}
970 }
971 
972 static void xgene_enet_napi_disable(struct xgene_enet_pdata *pdata)
973 {
974 	struct napi_struct *napi;
975 	int i;
976 
977 	for (i = 0; i < pdata->rxq_cnt; i++) {
978 		napi = &pdata->rx_ring[i]->napi;
979 		napi_disable(napi);
980 	}
981 
982 	for (i = 0; i < pdata->cq_cnt; i++) {
983 		napi = &pdata->tx_ring[i]->cp_ring->napi;
984 		napi_disable(napi);
985 	}
986 }
987 
988 static int xgene_enet_open(struct net_device *ndev)
989 {
990 	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
991 	const struct xgene_mac_ops *mac_ops = pdata->mac_ops;
992 	int ret;
993 
994 	ret = netif_set_real_num_tx_queues(ndev, pdata->txq_cnt);
995 	if (ret)
996 		return ret;
997 
998 	ret = netif_set_real_num_rx_queues(ndev, pdata->rxq_cnt);
999 	if (ret)
1000 		return ret;
1001 
1002 	xgene_enet_napi_enable(pdata);
1003 	ret = xgene_enet_register_irq(ndev);
1004 	if (ret)
1005 		return ret;
1006 
1007 	if (ndev->phydev) {
1008 		phy_start(ndev->phydev);
1009 	} else {
1010 		schedule_delayed_work(&pdata->link_work, PHY_POLL_LINK_OFF);
1011 		netif_carrier_off(ndev);
1012 	}
1013 
1014 	mac_ops->tx_enable(pdata);
1015 	mac_ops->rx_enable(pdata);
1016 	netif_tx_start_all_queues(ndev);
1017 
1018 	return ret;
1019 }
1020 
1021 static int xgene_enet_close(struct net_device *ndev)
1022 {
1023 	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
1024 	const struct xgene_mac_ops *mac_ops = pdata->mac_ops;
1025 	int i;
1026 
1027 	netif_tx_stop_all_queues(ndev);
1028 	mac_ops->tx_disable(pdata);
1029 	mac_ops->rx_disable(pdata);
1030 
1031 	if (ndev->phydev)
1032 		phy_stop(ndev->phydev);
1033 	else
1034 		cancel_delayed_work_sync(&pdata->link_work);
1035 
1036 	xgene_enet_free_irq(ndev);
1037 	xgene_enet_napi_disable(pdata);
1038 	for (i = 0; i < pdata->rxq_cnt; i++)
1039 		xgene_enet_process_ring(pdata->rx_ring[i], -1);
1040 
1041 	return 0;
1042 }
1043 static void xgene_enet_delete_ring(struct xgene_enet_desc_ring *ring)
1044 {
1045 	struct xgene_enet_pdata *pdata;
1046 	struct device *dev;
1047 
1048 	pdata = netdev_priv(ring->ndev);
1049 	dev = ndev_to_dev(ring->ndev);
1050 
1051 	pdata->ring_ops->clear(ring);
1052 	dmam_free_coherent(dev, ring->size, ring->desc_addr, ring->dma);
1053 }
1054 
1055 static void xgene_enet_delete_desc_rings(struct xgene_enet_pdata *pdata)
1056 {
1057 	struct xgene_enet_desc_ring *buf_pool, *page_pool;
1058 	struct xgene_enet_desc_ring *ring;
1059 	int i;
1060 
1061 	for (i = 0; i < pdata->txq_cnt; i++) {
1062 		ring = pdata->tx_ring[i];
1063 		if (ring) {
1064 			xgene_enet_delete_ring(ring);
1065 			pdata->port_ops->clear(pdata, ring);
1066 			if (pdata->cq_cnt)
1067 				xgene_enet_delete_ring(ring->cp_ring);
1068 			pdata->tx_ring[i] = NULL;
1069 		}
1070 
1071 	}
1072 
1073 	for (i = 0; i < pdata->rxq_cnt; i++) {
1074 		ring = pdata->rx_ring[i];
1075 		if (ring) {
1076 			page_pool = ring->page_pool;
1077 			if (page_pool) {
1078 				xgene_enet_delete_pagepool(page_pool);
1079 				xgene_enet_delete_ring(page_pool);
1080 				pdata->port_ops->clear(pdata, page_pool);
1081 			}
1082 
1083 			buf_pool = ring->buf_pool;
1084 			xgene_enet_delete_bufpool(buf_pool);
1085 			xgene_enet_delete_ring(buf_pool);
1086 			pdata->port_ops->clear(pdata, buf_pool);
1087 
1088 			xgene_enet_delete_ring(ring);
1089 			pdata->rx_ring[i] = NULL;
1090 		}
1091 
1092 	}
1093 }
1094 
1095 static int xgene_enet_get_ring_size(struct device *dev,
1096 				    enum xgene_enet_ring_cfgsize cfgsize)
1097 {
1098 	int size = -EINVAL;
1099 
1100 	switch (cfgsize) {
1101 	case RING_CFGSIZE_512B:
1102 		size = 0x200;
1103 		break;
1104 	case RING_CFGSIZE_2KB:
1105 		size = 0x800;
1106 		break;
1107 	case RING_CFGSIZE_16KB:
1108 		size = 0x4000;
1109 		break;
1110 	case RING_CFGSIZE_64KB:
1111 		size = 0x10000;
1112 		break;
1113 	case RING_CFGSIZE_512KB:
1114 		size = 0x80000;
1115 		break;
1116 	default:
1117 		dev_err(dev, "Unsupported cfg ring size %d\n", cfgsize);
1118 		break;
1119 	}
1120 
1121 	return size;
1122 }
1123 
1124 static void xgene_enet_free_desc_ring(struct xgene_enet_desc_ring *ring)
1125 {
1126 	struct xgene_enet_pdata *pdata;
1127 	struct device *dev;
1128 
1129 	if (!ring)
1130 		return;
1131 
1132 	dev = ndev_to_dev(ring->ndev);
1133 	pdata = netdev_priv(ring->ndev);
1134 
1135 	if (ring->desc_addr) {
1136 		pdata->ring_ops->clear(ring);
1137 		dmam_free_coherent(dev, ring->size, ring->desc_addr, ring->dma);
1138 	}
1139 	devm_kfree(dev, ring);
1140 }
1141 
1142 static void xgene_enet_free_desc_rings(struct xgene_enet_pdata *pdata)
1143 {
1144 	struct xgene_enet_desc_ring *page_pool;
1145 	struct device *dev = &pdata->pdev->dev;
1146 	struct xgene_enet_desc_ring *ring;
1147 	void *p;
1148 	int i;
1149 
1150 	for (i = 0; i < pdata->txq_cnt; i++) {
1151 		ring = pdata->tx_ring[i];
1152 		if (ring) {
1153 			if (ring->cp_ring && ring->cp_ring->cp_skb)
1154 				devm_kfree(dev, ring->cp_ring->cp_skb);
1155 
1156 			if (ring->cp_ring && pdata->cq_cnt)
1157 				xgene_enet_free_desc_ring(ring->cp_ring);
1158 
1159 			xgene_enet_free_desc_ring(ring);
1160 		}
1161 
1162 	}
1163 
1164 	for (i = 0; i < pdata->rxq_cnt; i++) {
1165 		ring = pdata->rx_ring[i];
1166 		if (ring) {
1167 			if (ring->buf_pool) {
1168 				if (ring->buf_pool->rx_skb)
1169 					devm_kfree(dev, ring->buf_pool->rx_skb);
1170 
1171 				xgene_enet_free_desc_ring(ring->buf_pool);
1172 			}
1173 
1174 			page_pool = ring->page_pool;
1175 			if (page_pool) {
1176 				p = page_pool->frag_page;
1177 				if (p)
1178 					devm_kfree(dev, p);
1179 
1180 				p = page_pool->frag_dma_addr;
1181 				if (p)
1182 					devm_kfree(dev, p);
1183 			}
1184 
1185 			xgene_enet_free_desc_ring(ring);
1186 		}
1187 	}
1188 }
1189 
1190 static bool is_irq_mbox_required(struct xgene_enet_pdata *pdata,
1191 				 struct xgene_enet_desc_ring *ring)
1192 {
1193 	if ((pdata->enet_id == XGENE_ENET2) &&
1194 	    (xgene_enet_ring_owner(ring->id) == RING_OWNER_CPU)) {
1195 		return true;
1196 	}
1197 
1198 	return false;
1199 }
1200 
1201 static void __iomem *xgene_enet_ring_cmd_base(struct xgene_enet_pdata *pdata,
1202 					      struct xgene_enet_desc_ring *ring)
1203 {
1204 	u8 num_ring_id_shift = pdata->ring_ops->num_ring_id_shift;
1205 
1206 	return pdata->ring_cmd_addr + (ring->num << num_ring_id_shift);
1207 }
1208 
1209 static struct xgene_enet_desc_ring *xgene_enet_create_desc_ring(
1210 			struct net_device *ndev, u32 ring_num,
1211 			enum xgene_enet_ring_cfgsize cfgsize, u32 ring_id)
1212 {
1213 	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
1214 	struct device *dev = ndev_to_dev(ndev);
1215 	struct xgene_enet_desc_ring *ring;
1216 	void *irq_mbox_addr;
1217 	int size;
1218 
1219 	size = xgene_enet_get_ring_size(dev, cfgsize);
1220 	if (size < 0)
1221 		return NULL;
1222 
1223 	ring = devm_kzalloc(dev, sizeof(struct xgene_enet_desc_ring),
1224 			    GFP_KERNEL);
1225 	if (!ring)
1226 		return NULL;
1227 
1228 	ring->ndev = ndev;
1229 	ring->num = ring_num;
1230 	ring->cfgsize = cfgsize;
1231 	ring->id = ring_id;
1232 
1233 	ring->desc_addr = dmam_alloc_coherent(dev, size, &ring->dma,
1234 					      GFP_KERNEL | __GFP_ZERO);
1235 	if (!ring->desc_addr) {
1236 		devm_kfree(dev, ring);
1237 		return NULL;
1238 	}
1239 	ring->size = size;
1240 
1241 	if (is_irq_mbox_required(pdata, ring)) {
1242 		irq_mbox_addr = dmam_alloc_coherent(dev, INTR_MBOX_SIZE,
1243 						    &ring->irq_mbox_dma,
1244 						    GFP_KERNEL | __GFP_ZERO);
1245 		if (!irq_mbox_addr) {
1246 			dmam_free_coherent(dev, size, ring->desc_addr,
1247 					   ring->dma);
1248 			devm_kfree(dev, ring);
1249 			return NULL;
1250 		}
1251 		ring->irq_mbox_addr = irq_mbox_addr;
1252 	}
1253 
1254 	ring->cmd_base = xgene_enet_ring_cmd_base(pdata, ring);
1255 	ring->cmd = ring->cmd_base + INC_DEC_CMD_ADDR;
1256 	ring = pdata->ring_ops->setup(ring);
1257 	netdev_dbg(ndev, "ring info: num=%d  size=%d  id=%d  slots=%d\n",
1258 		   ring->num, ring->size, ring->id, ring->slots);
1259 
1260 	return ring;
1261 }
1262 
1263 static u16 xgene_enet_get_ring_id(enum xgene_ring_owner owner, u8 bufnum)
1264 {
1265 	return (owner << 6) | (bufnum & GENMASK(5, 0));
1266 }
1267 
1268 static enum xgene_ring_owner xgene_derive_ring_owner(struct xgene_enet_pdata *p)
1269 {
1270 	enum xgene_ring_owner owner;
1271 
1272 	if (p->enet_id == XGENE_ENET1) {
1273 		switch (p->phy_mode) {
1274 		case PHY_INTERFACE_MODE_SGMII:
1275 			owner = RING_OWNER_ETH0;
1276 			break;
1277 		default:
1278 			owner = (!p->port_id) ? RING_OWNER_ETH0 :
1279 						RING_OWNER_ETH1;
1280 			break;
1281 		}
1282 	} else {
1283 		owner = (!p->port_id) ? RING_OWNER_ETH0 : RING_OWNER_ETH1;
1284 	}
1285 
1286 	return owner;
1287 }
1288 
1289 static u8 xgene_start_cpu_bufnum(struct xgene_enet_pdata *pdata)
1290 {
1291 	struct device *dev = &pdata->pdev->dev;
1292 	u32 cpu_bufnum;
1293 	int ret;
1294 
1295 	ret = device_property_read_u32(dev, "channel", &cpu_bufnum);
1296 
1297 	return (!ret) ? cpu_bufnum : pdata->cpu_bufnum;
1298 }
1299 
1300 static int xgene_enet_create_desc_rings(struct net_device *ndev)
1301 {
1302 	struct xgene_enet_desc_ring *rx_ring, *tx_ring, *cp_ring;
1303 	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
1304 	struct xgene_enet_desc_ring *page_pool = NULL;
1305 	struct xgene_enet_desc_ring *buf_pool = NULL;
1306 	struct device *dev = ndev_to_dev(ndev);
1307 	u8 eth_bufnum = pdata->eth_bufnum;
1308 	u8 bp_bufnum = pdata->bp_bufnum;
1309 	u16 ring_num = pdata->ring_num;
1310 	enum xgene_ring_owner owner;
1311 	dma_addr_t dma_exp_bufs;
1312 	u16 ring_id, slots;
1313 	__le64 *exp_bufs;
1314 	int i, ret, size;
1315 	u8 cpu_bufnum;
1316 
1317 	cpu_bufnum = xgene_start_cpu_bufnum(pdata);
1318 
1319 	for (i = 0; i < pdata->rxq_cnt; i++) {
1320 		/* allocate rx descriptor ring */
1321 		owner = xgene_derive_ring_owner(pdata);
1322 		ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU, cpu_bufnum++);
1323 		rx_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
1324 						      RING_CFGSIZE_16KB,
1325 						      ring_id);
1326 		if (!rx_ring) {
1327 			ret = -ENOMEM;
1328 			goto err;
1329 		}
1330 
1331 		/* allocate buffer pool for receiving packets */
1332 		owner = xgene_derive_ring_owner(pdata);
1333 		ring_id = xgene_enet_get_ring_id(owner, bp_bufnum++);
1334 		buf_pool = xgene_enet_create_desc_ring(ndev, ring_num++,
1335 						       RING_CFGSIZE_16KB,
1336 						       ring_id);
1337 		if (!buf_pool) {
1338 			ret = -ENOMEM;
1339 			goto err;
1340 		}
1341 
1342 		rx_ring->nbufpool = NUM_BUFPOOL;
1343 		rx_ring->npagepool = NUM_NXTBUFPOOL;
1344 		rx_ring->irq = pdata->irqs[i];
1345 		buf_pool->rx_skb = devm_kcalloc(dev, buf_pool->slots,
1346 						sizeof(struct sk_buff *),
1347 						GFP_KERNEL);
1348 		if (!buf_pool->rx_skb) {
1349 			ret = -ENOMEM;
1350 			goto err;
1351 		}
1352 
1353 		buf_pool->dst_ring_num = xgene_enet_dst_ring_num(buf_pool);
1354 		rx_ring->buf_pool = buf_pool;
1355 		pdata->rx_ring[i] = rx_ring;
1356 
1357 		if ((pdata->enet_id == XGENE_ENET1 &&  pdata->rxq_cnt > 4) ||
1358 		    (pdata->enet_id == XGENE_ENET2 &&  pdata->rxq_cnt > 16)) {
1359 			break;
1360 		}
1361 
1362 		/* allocate next buffer pool for jumbo packets */
1363 		owner = xgene_derive_ring_owner(pdata);
1364 		ring_id = xgene_enet_get_ring_id(owner, bp_bufnum++);
1365 		page_pool = xgene_enet_create_desc_ring(ndev, ring_num++,
1366 							RING_CFGSIZE_16KB,
1367 							ring_id);
1368 		if (!page_pool) {
1369 			ret = -ENOMEM;
1370 			goto err;
1371 		}
1372 
1373 		slots = page_pool->slots;
1374 		page_pool->frag_page = devm_kcalloc(dev, slots,
1375 						    sizeof(struct page *),
1376 						    GFP_KERNEL);
1377 		if (!page_pool->frag_page) {
1378 			ret = -ENOMEM;
1379 			goto err;
1380 		}
1381 
1382 		page_pool->frag_dma_addr = devm_kcalloc(dev, slots,
1383 							sizeof(dma_addr_t),
1384 							GFP_KERNEL);
1385 		if (!page_pool->frag_dma_addr) {
1386 			ret = -ENOMEM;
1387 			goto err;
1388 		}
1389 
1390 		page_pool->dst_ring_num = xgene_enet_dst_ring_num(page_pool);
1391 		rx_ring->page_pool = page_pool;
1392 	}
1393 
1394 	for (i = 0; i < pdata->txq_cnt; i++) {
1395 		/* allocate tx descriptor ring */
1396 		owner = xgene_derive_ring_owner(pdata);
1397 		ring_id = xgene_enet_get_ring_id(owner, eth_bufnum++);
1398 		tx_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
1399 						      RING_CFGSIZE_16KB,
1400 						      ring_id);
1401 		if (!tx_ring) {
1402 			ret = -ENOMEM;
1403 			goto err;
1404 		}
1405 
1406 		size = (tx_ring->slots / 2) * sizeof(__le64) * MAX_EXP_BUFFS;
1407 		exp_bufs = dmam_alloc_coherent(dev, size, &dma_exp_bufs,
1408 					       GFP_KERNEL | __GFP_ZERO);
1409 		if (!exp_bufs) {
1410 			ret = -ENOMEM;
1411 			goto err;
1412 		}
1413 		tx_ring->exp_bufs = exp_bufs;
1414 
1415 		pdata->tx_ring[i] = tx_ring;
1416 
1417 		if (!pdata->cq_cnt) {
1418 			cp_ring = pdata->rx_ring[i];
1419 		} else {
1420 			/* allocate tx completion descriptor ring */
1421 			ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU,
1422 							 cpu_bufnum++);
1423 			cp_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
1424 							      RING_CFGSIZE_16KB,
1425 							      ring_id);
1426 			if (!cp_ring) {
1427 				ret = -ENOMEM;
1428 				goto err;
1429 			}
1430 
1431 			cp_ring->irq = pdata->irqs[pdata->rxq_cnt + i];
1432 			cp_ring->index = i;
1433 		}
1434 
1435 		cp_ring->cp_skb = devm_kcalloc(dev, tx_ring->slots,
1436 					       sizeof(struct sk_buff *),
1437 					       GFP_KERNEL);
1438 		if (!cp_ring->cp_skb) {
1439 			ret = -ENOMEM;
1440 			goto err;
1441 		}
1442 
1443 		size = sizeof(dma_addr_t) * MAX_SKB_FRAGS;
1444 		cp_ring->frag_dma_addr = devm_kcalloc(dev, tx_ring->slots,
1445 						      size, GFP_KERNEL);
1446 		if (!cp_ring->frag_dma_addr) {
1447 			devm_kfree(dev, cp_ring->cp_skb);
1448 			ret = -ENOMEM;
1449 			goto err;
1450 		}
1451 
1452 		tx_ring->cp_ring = cp_ring;
1453 		tx_ring->dst_ring_num = xgene_enet_dst_ring_num(cp_ring);
1454 	}
1455 
1456 	if (pdata->ring_ops->coalesce)
1457 		pdata->ring_ops->coalesce(pdata->tx_ring[0]);
1458 	pdata->tx_qcnt_hi = pdata->tx_ring[0]->slots - 128;
1459 
1460 	return 0;
1461 
1462 err:
1463 	xgene_enet_free_desc_rings(pdata);
1464 	return ret;
1465 }
1466 
1467 static void xgene_enet_get_stats64(
1468 			struct net_device *ndev,
1469 			struct rtnl_link_stats64 *stats)
1470 {
1471 	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
1472 	struct xgene_enet_desc_ring *ring;
1473 	int i;
1474 
1475 	for (i = 0; i < pdata->txq_cnt; i++) {
1476 		ring = pdata->tx_ring[i];
1477 		if (ring) {
1478 			stats->tx_packets += ring->tx_packets;
1479 			stats->tx_bytes += ring->tx_bytes;
1480 			stats->tx_dropped += ring->tx_dropped;
1481 			stats->tx_errors += ring->tx_errors;
1482 		}
1483 	}
1484 
1485 	for (i = 0; i < pdata->rxq_cnt; i++) {
1486 		ring = pdata->rx_ring[i];
1487 		if (ring) {
1488 			stats->rx_packets += ring->rx_packets;
1489 			stats->rx_bytes += ring->rx_bytes;
1490 			stats->rx_dropped += ring->rx_dropped;
1491 			stats->rx_errors += ring->rx_errors +
1492 				ring->rx_length_errors +
1493 				ring->rx_crc_errors +
1494 				ring->rx_frame_errors +
1495 				ring->rx_fifo_errors;
1496 			stats->rx_length_errors += ring->rx_length_errors;
1497 			stats->rx_crc_errors += ring->rx_crc_errors;
1498 			stats->rx_frame_errors += ring->rx_frame_errors;
1499 			stats->rx_fifo_errors += ring->rx_fifo_errors;
1500 		}
1501 	}
1502 }
1503 
1504 static int xgene_enet_set_mac_address(struct net_device *ndev, void *addr)
1505 {
1506 	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
1507 	int ret;
1508 
1509 	ret = eth_mac_addr(ndev, addr);
1510 	if (ret)
1511 		return ret;
1512 	pdata->mac_ops->set_mac_addr(pdata);
1513 
1514 	return ret;
1515 }
1516 
1517 static int xgene_change_mtu(struct net_device *ndev, int new_mtu)
1518 {
1519 	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
1520 	int frame_size;
1521 
1522 	if (!netif_running(ndev))
1523 		return 0;
1524 
1525 	frame_size = (new_mtu > ETH_DATA_LEN) ? (new_mtu + 18) : 0x600;
1526 
1527 	xgene_enet_close(ndev);
1528 	ndev->mtu = new_mtu;
1529 	pdata->mac_ops->set_framesize(pdata, frame_size);
1530 	xgene_enet_open(ndev);
1531 
1532 	return 0;
1533 }
1534 
1535 static const struct net_device_ops xgene_ndev_ops = {
1536 	.ndo_open = xgene_enet_open,
1537 	.ndo_stop = xgene_enet_close,
1538 	.ndo_start_xmit = xgene_enet_start_xmit,
1539 	.ndo_tx_timeout = xgene_enet_timeout,
1540 	.ndo_get_stats64 = xgene_enet_get_stats64,
1541 	.ndo_change_mtu = xgene_change_mtu,
1542 	.ndo_set_mac_address = xgene_enet_set_mac_address,
1543 };
1544 
1545 #ifdef CONFIG_ACPI
1546 static void xgene_get_port_id_acpi(struct device *dev,
1547 				  struct xgene_enet_pdata *pdata)
1548 {
1549 	acpi_status status;
1550 	u64 temp;
1551 
1552 	status = acpi_evaluate_integer(ACPI_HANDLE(dev), "_SUN", NULL, &temp);
1553 	if (ACPI_FAILURE(status)) {
1554 		pdata->port_id = 0;
1555 	} else {
1556 		pdata->port_id = temp;
1557 	}
1558 
1559 	return;
1560 }
1561 #endif
1562 
1563 static void xgene_get_port_id_dt(struct device *dev, struct xgene_enet_pdata *pdata)
1564 {
1565 	u32 id = 0;
1566 
1567 	of_property_read_u32(dev->of_node, "port-id", &id);
1568 
1569 	pdata->port_id = id & BIT(0);
1570 
1571 	return;
1572 }
1573 
1574 static int xgene_get_tx_delay(struct xgene_enet_pdata *pdata)
1575 {
1576 	struct device *dev = &pdata->pdev->dev;
1577 	int delay, ret;
1578 
1579 	ret = device_property_read_u32(dev, "tx-delay", &delay);
1580 	if (ret) {
1581 		pdata->tx_delay = 4;
1582 		return 0;
1583 	}
1584 
1585 	if (delay < 0 || delay > 7) {
1586 		dev_err(dev, "Invalid tx-delay specified\n");
1587 		return -EINVAL;
1588 	}
1589 
1590 	pdata->tx_delay = delay;
1591 
1592 	return 0;
1593 }
1594 
1595 static int xgene_get_rx_delay(struct xgene_enet_pdata *pdata)
1596 {
1597 	struct device *dev = &pdata->pdev->dev;
1598 	int delay, ret;
1599 
1600 	ret = device_property_read_u32(dev, "rx-delay", &delay);
1601 	if (ret) {
1602 		pdata->rx_delay = 2;
1603 		return 0;
1604 	}
1605 
1606 	if (delay < 0 || delay > 7) {
1607 		dev_err(dev, "Invalid rx-delay specified\n");
1608 		return -EINVAL;
1609 	}
1610 
1611 	pdata->rx_delay = delay;
1612 
1613 	return 0;
1614 }
1615 
1616 static int xgene_enet_get_irqs(struct xgene_enet_pdata *pdata)
1617 {
1618 	struct platform_device *pdev = pdata->pdev;
1619 	struct device *dev = &pdev->dev;
1620 	int i, ret, max_irqs;
1621 
1622 	if (phy_interface_mode_is_rgmii(pdata->phy_mode))
1623 		max_irqs = 1;
1624 	else if (pdata->phy_mode == PHY_INTERFACE_MODE_SGMII)
1625 		max_irqs = 2;
1626 	else
1627 		max_irqs = XGENE_MAX_ENET_IRQ;
1628 
1629 	for (i = 0; i < max_irqs; i++) {
1630 		ret = platform_get_irq(pdev, i);
1631 		if (ret <= 0) {
1632 			if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1633 				max_irqs = i;
1634 				pdata->rxq_cnt = max_irqs / 2;
1635 				pdata->txq_cnt = max_irqs / 2;
1636 				pdata->cq_cnt = max_irqs / 2;
1637 				break;
1638 			}
1639 			dev_err(dev, "Unable to get ENET IRQ\n");
1640 			ret = ret ? : -ENXIO;
1641 			return ret;
1642 		}
1643 		pdata->irqs[i] = ret;
1644 	}
1645 
1646 	return 0;
1647 }
1648 
1649 static void xgene_enet_check_phy_handle(struct xgene_enet_pdata *pdata)
1650 {
1651 	int ret;
1652 
1653 	if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII)
1654 		return;
1655 
1656 	if (!IS_ENABLED(CONFIG_MDIO_XGENE))
1657 		return;
1658 
1659 	ret = xgene_enet_phy_connect(pdata->ndev);
1660 	if (!ret)
1661 		pdata->mdio_driver = true;
1662 }
1663 
1664 static void xgene_enet_gpiod_get(struct xgene_enet_pdata *pdata)
1665 {
1666 	struct device *dev = &pdata->pdev->dev;
1667 
1668 	pdata->sfp_gpio_en = false;
1669 	if (pdata->phy_mode != PHY_INTERFACE_MODE_XGMII ||
1670 	    (!device_property_present(dev, "sfp-gpios") &&
1671 	     !device_property_present(dev, "rxlos-gpios")))
1672 		return;
1673 
1674 	pdata->sfp_gpio_en = true;
1675 	pdata->sfp_rdy = gpiod_get(dev, "rxlos", GPIOD_IN);
1676 	if (IS_ERR(pdata->sfp_rdy))
1677 		pdata->sfp_rdy = gpiod_get(dev, "sfp", GPIOD_IN);
1678 }
1679 
1680 static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
1681 {
1682 	struct platform_device *pdev;
1683 	struct net_device *ndev;
1684 	struct device *dev;
1685 	struct resource *res;
1686 	void __iomem *base_addr;
1687 	u32 offset;
1688 	int ret = 0;
1689 
1690 	pdev = pdata->pdev;
1691 	dev = &pdev->dev;
1692 	ndev = pdata->ndev;
1693 
1694 	res = platform_get_resource(pdev, IORESOURCE_MEM, RES_ENET_CSR);
1695 	if (!res) {
1696 		dev_err(dev, "Resource enet_csr not defined\n");
1697 		return -ENODEV;
1698 	}
1699 	pdata->base_addr = devm_ioremap(dev, res->start, resource_size(res));
1700 	if (!pdata->base_addr) {
1701 		dev_err(dev, "Unable to retrieve ENET Port CSR region\n");
1702 		return -ENOMEM;
1703 	}
1704 
1705 	res = platform_get_resource(pdev, IORESOURCE_MEM, RES_RING_CSR);
1706 	if (!res) {
1707 		dev_err(dev, "Resource ring_csr not defined\n");
1708 		return -ENODEV;
1709 	}
1710 	pdata->ring_csr_addr = devm_ioremap(dev, res->start,
1711 							resource_size(res));
1712 	if (!pdata->ring_csr_addr) {
1713 		dev_err(dev, "Unable to retrieve ENET Ring CSR region\n");
1714 		return -ENOMEM;
1715 	}
1716 
1717 	res = platform_get_resource(pdev, IORESOURCE_MEM, RES_RING_CMD);
1718 	if (!res) {
1719 		dev_err(dev, "Resource ring_cmd not defined\n");
1720 		return -ENODEV;
1721 	}
1722 	pdata->ring_cmd_addr = devm_ioremap(dev, res->start,
1723 							resource_size(res));
1724 	if (!pdata->ring_cmd_addr) {
1725 		dev_err(dev, "Unable to retrieve ENET Ring command region\n");
1726 		return -ENOMEM;
1727 	}
1728 
1729 	if (dev->of_node)
1730 		xgene_get_port_id_dt(dev, pdata);
1731 #ifdef CONFIG_ACPI
1732 	else
1733 		xgene_get_port_id_acpi(dev, pdata);
1734 #endif
1735 
1736 	if (!device_get_mac_address(dev, ndev->dev_addr, ETH_ALEN))
1737 		eth_hw_addr_random(ndev);
1738 
1739 	memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
1740 
1741 	pdata->phy_mode = device_get_phy_mode(dev);
1742 	if (pdata->phy_mode < 0) {
1743 		dev_err(dev, "Unable to get phy-connection-type\n");
1744 		return pdata->phy_mode;
1745 	}
1746 	if (!phy_interface_mode_is_rgmii(pdata->phy_mode) &&
1747 	    pdata->phy_mode != PHY_INTERFACE_MODE_SGMII &&
1748 	    pdata->phy_mode != PHY_INTERFACE_MODE_XGMII) {
1749 		dev_err(dev, "Incorrect phy-connection-type specified\n");
1750 		return -ENODEV;
1751 	}
1752 
1753 	ret = xgene_get_tx_delay(pdata);
1754 	if (ret)
1755 		return ret;
1756 
1757 	ret = xgene_get_rx_delay(pdata);
1758 	if (ret)
1759 		return ret;
1760 
1761 	ret = xgene_enet_get_irqs(pdata);
1762 	if (ret)
1763 		return ret;
1764 
1765 	xgene_enet_gpiod_get(pdata);
1766 
1767 	pdata->clk = devm_clk_get(&pdev->dev, NULL);
1768 	if (IS_ERR(pdata->clk)) {
1769 		if (pdata->phy_mode != PHY_INTERFACE_MODE_SGMII) {
1770 			/* Abort if the clock is defined but couldn't be
1771 			 * retrived. Always abort if the clock is missing on
1772 			 * DT system as the driver can't cope with this case.
1773 			 */
1774 			if (PTR_ERR(pdata->clk) != -ENOENT || dev->of_node)
1775 				return PTR_ERR(pdata->clk);
1776 			/* Firmware may have set up the clock already. */
1777 			dev_info(dev, "clocks have been setup already\n");
1778 		}
1779 	}
1780 
1781 	if (pdata->phy_mode != PHY_INTERFACE_MODE_XGMII)
1782 		base_addr = pdata->base_addr - (pdata->port_id * MAC_OFFSET);
1783 	else
1784 		base_addr = pdata->base_addr;
1785 	pdata->eth_csr_addr = base_addr + BLOCK_ETH_CSR_OFFSET;
1786 	pdata->cle.base = base_addr + BLOCK_ETH_CLE_CSR_OFFSET;
1787 	pdata->eth_ring_if_addr = base_addr + BLOCK_ETH_RING_IF_OFFSET;
1788 	pdata->eth_diag_csr_addr = base_addr + BLOCK_ETH_DIAG_CSR_OFFSET;
1789 	if (phy_interface_mode_is_rgmii(pdata->phy_mode) ||
1790 	    pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) {
1791 		pdata->mcx_mac_addr = pdata->base_addr + BLOCK_ETH_MAC_OFFSET;
1792 		pdata->mcx_stats_addr =
1793 			pdata->base_addr + BLOCK_ETH_STATS_OFFSET;
1794 		offset = (pdata->enet_id == XGENE_ENET1) ?
1795 			  BLOCK_ETH_MAC_CSR_OFFSET :
1796 			  X2_BLOCK_ETH_MAC_CSR_OFFSET;
1797 		pdata->mcx_mac_csr_addr = base_addr + offset;
1798 	} else {
1799 		pdata->mcx_mac_addr = base_addr + BLOCK_AXG_MAC_OFFSET;
1800 		pdata->mcx_stats_addr = base_addr + BLOCK_AXG_STATS_OFFSET;
1801 		pdata->mcx_mac_csr_addr = base_addr + BLOCK_AXG_MAC_CSR_OFFSET;
1802 		pdata->pcs_addr = base_addr + BLOCK_PCS_OFFSET;
1803 	}
1804 	pdata->rx_buff_cnt = NUM_PKT_BUF;
1805 
1806 	return 0;
1807 }
1808 
1809 static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata)
1810 {
1811 	struct xgene_enet_cle *enet_cle = &pdata->cle;
1812 	struct xgene_enet_desc_ring *page_pool;
1813 	struct net_device *ndev = pdata->ndev;
1814 	struct xgene_enet_desc_ring *buf_pool;
1815 	u16 dst_ring_num, ring_id;
1816 	int i, ret;
1817 	u32 count;
1818 
1819 	ret = pdata->port_ops->reset(pdata);
1820 	if (ret)
1821 		return ret;
1822 
1823 	ret = xgene_enet_create_desc_rings(ndev);
1824 	if (ret) {
1825 		netdev_err(ndev, "Error in ring configuration\n");
1826 		return ret;
1827 	}
1828 
1829 	/* setup buffer pool */
1830 	for (i = 0; i < pdata->rxq_cnt; i++) {
1831 		buf_pool = pdata->rx_ring[i]->buf_pool;
1832 		xgene_enet_init_bufpool(buf_pool);
1833 		page_pool = pdata->rx_ring[i]->page_pool;
1834 		xgene_enet_init_bufpool(page_pool);
1835 
1836 		count = pdata->rx_buff_cnt;
1837 		ret = xgene_enet_refill_bufpool(buf_pool, count);
1838 		if (ret)
1839 			goto err;
1840 
1841 		ret = xgene_enet_refill_pagepool(page_pool, count);
1842 		if (ret)
1843 			goto err;
1844 
1845 	}
1846 
1847 	dst_ring_num = xgene_enet_dst_ring_num(pdata->rx_ring[0]);
1848 	buf_pool = pdata->rx_ring[0]->buf_pool;
1849 	if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1850 		/* Initialize and Enable  PreClassifier Tree */
1851 		enet_cle->max_nodes = 512;
1852 		enet_cle->max_dbptrs = 1024;
1853 		enet_cle->parsers = 3;
1854 		enet_cle->active_parser = PARSER_ALL;
1855 		enet_cle->ptree.start_node = 0;
1856 		enet_cle->ptree.start_dbptr = 0;
1857 		enet_cle->jump_bytes = 8;
1858 		ret = pdata->cle_ops->cle_init(pdata);
1859 		if (ret) {
1860 			netdev_err(ndev, "Preclass Tree init error\n");
1861 			goto err;
1862 		}
1863 
1864 	} else {
1865 		dst_ring_num = xgene_enet_dst_ring_num(pdata->rx_ring[0]);
1866 		buf_pool = pdata->rx_ring[0]->buf_pool;
1867 		page_pool = pdata->rx_ring[0]->page_pool;
1868 		ring_id = (page_pool) ? page_pool->id : 0;
1869 		pdata->port_ops->cle_bypass(pdata, dst_ring_num,
1870 					    buf_pool->id, ring_id);
1871 	}
1872 
1873 	ndev->max_mtu = XGENE_ENET_MAX_MTU;
1874 	pdata->phy_speed = SPEED_UNKNOWN;
1875 	pdata->mac_ops->init(pdata);
1876 
1877 	return ret;
1878 
1879 err:
1880 	xgene_enet_delete_desc_rings(pdata);
1881 	return ret;
1882 }
1883 
1884 static void xgene_enet_setup_ops(struct xgene_enet_pdata *pdata)
1885 {
1886 	switch (pdata->phy_mode) {
1887 	case PHY_INTERFACE_MODE_RGMII:
1888 	case PHY_INTERFACE_MODE_RGMII_ID:
1889 	case PHY_INTERFACE_MODE_RGMII_RXID:
1890 	case PHY_INTERFACE_MODE_RGMII_TXID:
1891 		pdata->mac_ops = &xgene_gmac_ops;
1892 		pdata->port_ops = &xgene_gport_ops;
1893 		pdata->rm = RM3;
1894 		pdata->rxq_cnt = 1;
1895 		pdata->txq_cnt = 1;
1896 		pdata->cq_cnt = 0;
1897 		break;
1898 	case PHY_INTERFACE_MODE_SGMII:
1899 		pdata->mac_ops = &xgene_sgmac_ops;
1900 		pdata->port_ops = &xgene_sgport_ops;
1901 		pdata->rm = RM1;
1902 		pdata->rxq_cnt = 1;
1903 		pdata->txq_cnt = 1;
1904 		pdata->cq_cnt = 1;
1905 		break;
1906 	default:
1907 		pdata->mac_ops = &xgene_xgmac_ops;
1908 		pdata->port_ops = &xgene_xgport_ops;
1909 		pdata->cle_ops = &xgene_cle3in_ops;
1910 		pdata->rm = RM0;
1911 		if (!pdata->rxq_cnt) {
1912 			pdata->rxq_cnt = XGENE_NUM_RX_RING;
1913 			pdata->txq_cnt = XGENE_NUM_TX_RING;
1914 			pdata->cq_cnt = XGENE_NUM_TXC_RING;
1915 		}
1916 		break;
1917 	}
1918 
1919 	if (pdata->enet_id == XGENE_ENET1) {
1920 		switch (pdata->port_id) {
1921 		case 0:
1922 			if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1923 				pdata->cpu_bufnum = X2_START_CPU_BUFNUM_0;
1924 				pdata->eth_bufnum = X2_START_ETH_BUFNUM_0;
1925 				pdata->bp_bufnum = X2_START_BP_BUFNUM_0;
1926 				pdata->ring_num = START_RING_NUM_0;
1927 			} else {
1928 				pdata->cpu_bufnum = START_CPU_BUFNUM_0;
1929 				pdata->eth_bufnum = START_ETH_BUFNUM_0;
1930 				pdata->bp_bufnum = START_BP_BUFNUM_0;
1931 				pdata->ring_num = START_RING_NUM_0;
1932 			}
1933 			break;
1934 		case 1:
1935 			if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1936 				pdata->cpu_bufnum = XG_START_CPU_BUFNUM_1;
1937 				pdata->eth_bufnum = XG_START_ETH_BUFNUM_1;
1938 				pdata->bp_bufnum = XG_START_BP_BUFNUM_1;
1939 				pdata->ring_num = XG_START_RING_NUM_1;
1940 			} else {
1941 				pdata->cpu_bufnum = START_CPU_BUFNUM_1;
1942 				pdata->eth_bufnum = START_ETH_BUFNUM_1;
1943 				pdata->bp_bufnum = START_BP_BUFNUM_1;
1944 				pdata->ring_num = START_RING_NUM_1;
1945 			}
1946 			break;
1947 		default:
1948 			break;
1949 		}
1950 		pdata->ring_ops = &xgene_ring1_ops;
1951 	} else {
1952 		switch (pdata->port_id) {
1953 		case 0:
1954 			pdata->cpu_bufnum = X2_START_CPU_BUFNUM_0;
1955 			pdata->eth_bufnum = X2_START_ETH_BUFNUM_0;
1956 			pdata->bp_bufnum = X2_START_BP_BUFNUM_0;
1957 			pdata->ring_num = X2_START_RING_NUM_0;
1958 			break;
1959 		case 1:
1960 			pdata->cpu_bufnum = X2_START_CPU_BUFNUM_1;
1961 			pdata->eth_bufnum = X2_START_ETH_BUFNUM_1;
1962 			pdata->bp_bufnum = X2_START_BP_BUFNUM_1;
1963 			pdata->ring_num = X2_START_RING_NUM_1;
1964 			break;
1965 		default:
1966 			break;
1967 		}
1968 		pdata->rm = RM0;
1969 		pdata->ring_ops = &xgene_ring2_ops;
1970 	}
1971 }
1972 
1973 static void xgene_enet_napi_add(struct xgene_enet_pdata *pdata)
1974 {
1975 	struct napi_struct *napi;
1976 	int i;
1977 
1978 	for (i = 0; i < pdata->rxq_cnt; i++) {
1979 		napi = &pdata->rx_ring[i]->napi;
1980 		netif_napi_add(pdata->ndev, napi, xgene_enet_napi,
1981 			       NAPI_POLL_WEIGHT);
1982 	}
1983 
1984 	for (i = 0; i < pdata->cq_cnt; i++) {
1985 		napi = &pdata->tx_ring[i]->cp_ring->napi;
1986 		netif_napi_add(pdata->ndev, napi, xgene_enet_napi,
1987 			       NAPI_POLL_WEIGHT);
1988 	}
1989 }
1990 
1991 #ifdef CONFIG_ACPI
1992 static const struct acpi_device_id xgene_enet_acpi_match[] = {
1993 	{ "APMC0D05", XGENE_ENET1},
1994 	{ "APMC0D30", XGENE_ENET1},
1995 	{ "APMC0D31", XGENE_ENET1},
1996 	{ "APMC0D3F", XGENE_ENET1},
1997 	{ "APMC0D26", XGENE_ENET2},
1998 	{ "APMC0D25", XGENE_ENET2},
1999 	{ }
2000 };
2001 MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match);
2002 #endif
2003 
2004 static const struct of_device_id xgene_enet_of_match[] = {
2005 	{.compatible = "apm,xgene-enet",    .data = (void *)XGENE_ENET1},
2006 	{.compatible = "apm,xgene1-sgenet", .data = (void *)XGENE_ENET1},
2007 	{.compatible = "apm,xgene1-xgenet", .data = (void *)XGENE_ENET1},
2008 	{.compatible = "apm,xgene2-sgenet", .data = (void *)XGENE_ENET2},
2009 	{.compatible = "apm,xgene2-xgenet", .data = (void *)XGENE_ENET2},
2010 	{},
2011 };
2012 
2013 MODULE_DEVICE_TABLE(of, xgene_enet_of_match);
2014 
2015 static int xgene_enet_probe(struct platform_device *pdev)
2016 {
2017 	struct net_device *ndev;
2018 	struct xgene_enet_pdata *pdata;
2019 	struct device *dev = &pdev->dev;
2020 	void (*link_state)(struct work_struct *);
2021 	const struct of_device_id *of_id;
2022 	int ret;
2023 
2024 	ndev = alloc_etherdev_mqs(sizeof(struct xgene_enet_pdata),
2025 				  XGENE_NUM_RX_RING, XGENE_NUM_TX_RING);
2026 	if (!ndev)
2027 		return -ENOMEM;
2028 
2029 	pdata = netdev_priv(ndev);
2030 
2031 	pdata->pdev = pdev;
2032 	pdata->ndev = ndev;
2033 	SET_NETDEV_DEV(ndev, dev);
2034 	platform_set_drvdata(pdev, pdata);
2035 	ndev->netdev_ops = &xgene_ndev_ops;
2036 	xgene_enet_set_ethtool_ops(ndev);
2037 	ndev->features |= NETIF_F_IP_CSUM |
2038 			  NETIF_F_GSO |
2039 			  NETIF_F_GRO |
2040 			  NETIF_F_SG;
2041 
2042 	of_id = of_match_device(xgene_enet_of_match, &pdev->dev);
2043 	if (of_id) {
2044 		pdata->enet_id = (enum xgene_enet_id)of_id->data;
2045 	}
2046 #ifdef CONFIG_ACPI
2047 	else {
2048 		const struct acpi_device_id *acpi_id;
2049 
2050 		acpi_id = acpi_match_device(xgene_enet_acpi_match, &pdev->dev);
2051 		if (acpi_id)
2052 			pdata->enet_id = (enum xgene_enet_id) acpi_id->driver_data;
2053 	}
2054 #endif
2055 	if (!pdata->enet_id) {
2056 		ret = -ENODEV;
2057 		goto err;
2058 	}
2059 
2060 	ret = xgene_enet_get_resources(pdata);
2061 	if (ret)
2062 		goto err;
2063 
2064 	xgene_enet_setup_ops(pdata);
2065 	spin_lock_init(&pdata->mac_lock);
2066 
2067 	if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
2068 		ndev->features |= NETIF_F_TSO | NETIF_F_RXCSUM;
2069 		spin_lock_init(&pdata->mss_lock);
2070 	}
2071 	ndev->hw_features = ndev->features;
2072 
2073 	ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
2074 	if (ret) {
2075 		netdev_err(ndev, "No usable DMA configuration\n");
2076 		goto err;
2077 	}
2078 
2079 	xgene_enet_check_phy_handle(pdata);
2080 
2081 	ret = xgene_enet_init_hw(pdata);
2082 	if (ret)
2083 		goto err2;
2084 
2085 	link_state = pdata->mac_ops->link_state;
2086 	if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
2087 		INIT_DELAYED_WORK(&pdata->link_work, link_state);
2088 	} else if (!pdata->mdio_driver) {
2089 		if (phy_interface_mode_is_rgmii(pdata->phy_mode))
2090 			ret = xgene_enet_mdio_config(pdata);
2091 		else
2092 			INIT_DELAYED_WORK(&pdata->link_work, link_state);
2093 
2094 		if (ret)
2095 			goto err1;
2096 	}
2097 
2098 	spin_lock_init(&pdata->stats_lock);
2099 	ret = xgene_extd_stats_init(pdata);
2100 	if (ret)
2101 		goto err1;
2102 
2103 	xgene_enet_napi_add(pdata);
2104 	ret = register_netdev(ndev);
2105 	if (ret) {
2106 		netdev_err(ndev, "Failed to register netdev\n");
2107 		goto err1;
2108 	}
2109 
2110 	return 0;
2111 
2112 err1:
2113 	/*
2114 	 * If necessary, free_netdev() will call netif_napi_del() and undo
2115 	 * the effects of xgene_enet_napi_add()'s calls to netif_napi_add().
2116 	 */
2117 
2118 	xgene_enet_delete_desc_rings(pdata);
2119 
2120 err2:
2121 	if (pdata->mdio_driver)
2122 		xgene_enet_phy_disconnect(pdata);
2123 	else if (phy_interface_mode_is_rgmii(pdata->phy_mode))
2124 		xgene_enet_mdio_remove(pdata);
2125 err:
2126 	free_netdev(ndev);
2127 	return ret;
2128 }
2129 
2130 static int xgene_enet_remove(struct platform_device *pdev)
2131 {
2132 	struct xgene_enet_pdata *pdata;
2133 	struct net_device *ndev;
2134 
2135 	pdata = platform_get_drvdata(pdev);
2136 	ndev = pdata->ndev;
2137 
2138 	rtnl_lock();
2139 	if (netif_running(ndev))
2140 		dev_close(ndev);
2141 	rtnl_unlock();
2142 
2143 	if (pdata->mdio_driver)
2144 		xgene_enet_phy_disconnect(pdata);
2145 	else if (phy_interface_mode_is_rgmii(pdata->phy_mode))
2146 		xgene_enet_mdio_remove(pdata);
2147 
2148 	unregister_netdev(ndev);
2149 	xgene_enet_delete_desc_rings(pdata);
2150 	pdata->port_ops->shutdown(pdata);
2151 	free_netdev(ndev);
2152 
2153 	return 0;
2154 }
2155 
2156 static void xgene_enet_shutdown(struct platform_device *pdev)
2157 {
2158 	struct xgene_enet_pdata *pdata;
2159 
2160 	pdata = platform_get_drvdata(pdev);
2161 	if (!pdata)
2162 		return;
2163 
2164 	if (!pdata->ndev)
2165 		return;
2166 
2167 	xgene_enet_remove(pdev);
2168 }
2169 
2170 static struct platform_driver xgene_enet_driver = {
2171 	.driver = {
2172 		   .name = "xgene-enet",
2173 		   .of_match_table = of_match_ptr(xgene_enet_of_match),
2174 		   .acpi_match_table = ACPI_PTR(xgene_enet_acpi_match),
2175 	},
2176 	.probe = xgene_enet_probe,
2177 	.remove = xgene_enet_remove,
2178 	.shutdown = xgene_enet_shutdown,
2179 };
2180 
2181 module_platform_driver(xgene_enet_driver);
2182 
2183 MODULE_DESCRIPTION("APM X-Gene SoC Ethernet driver");
2184 MODULE_VERSION(XGENE_DRV_VERSION);
2185 MODULE_AUTHOR("Iyappan Subramanian <isubramanian@apm.com>");
2186 MODULE_AUTHOR("Keyur Chudgar <kchudgar@apm.com>");
2187 MODULE_LICENSE("GPL");
2188