1 /* Synopsys DesignWare Core Enterprise Ethernet (XLGMAC) Driver
2  *
3  * Copyright (c) 2017 Synopsys, Inc. (www.synopsys.com)
4  *
5  * This program is dual-licensed; you may select either version 2 of
6  * the GNU General Public License ("GPL") or BSD license ("BSD").
7  *
8  * This Synopsys DWC XLGMAC software driver and associated documentation
9  * (hereinafter the "Software") is an unsupported proprietary work of
10  * Synopsys, Inc. unless otherwise expressly agreed to in writing between
11  * Synopsys and you. The Software IS NOT an item of Licensed Software or a
12  * Licensed Product under any End User Software License Agreement or
13  * Agreement for Licensed Products with Synopsys or any supplement thereto.
14  * Synopsys is a registered trademark of Synopsys, Inc. Other names included
15  * in the SOFTWARE may be the trademarks of their respective owners.
16  */
17 
18 #include <linux/netdevice.h>
19 #include <linux/tcp.h>
20 #include <linux/interrupt.h>
21 
22 #include "dwc-xlgmac.h"
23 #include "dwc-xlgmac-reg.h"
24 
25 static int xlgmac_one_poll(struct napi_struct *, int);
26 static int xlgmac_all_poll(struct napi_struct *, int);
27 
28 static inline unsigned int xlgmac_tx_avail_desc(struct xlgmac_ring *ring)
29 {
30 	return (ring->dma_desc_count - (ring->cur - ring->dirty));
31 }
32 
33 static inline unsigned int xlgmac_rx_dirty_desc(struct xlgmac_ring *ring)
34 {
35 	return (ring->cur - ring->dirty);
36 }
37 
38 static int xlgmac_maybe_stop_tx_queue(
39 			struct xlgmac_channel *channel,
40 			struct xlgmac_ring *ring,
41 			unsigned int count)
42 {
43 	struct xlgmac_pdata *pdata = channel->pdata;
44 
45 	if (count > xlgmac_tx_avail_desc(ring)) {
46 		netif_info(pdata, drv, pdata->netdev,
47 			   "Tx queue stopped, not enough descriptors available\n");
48 		netif_stop_subqueue(pdata->netdev, channel->queue_index);
49 		ring->tx.queue_stopped = 1;
50 
51 		/* If we haven't notified the hardware because of xmit_more
52 		 * support, tell it now
53 		 */
54 		if (ring->tx.xmit_more)
55 			pdata->hw_ops.tx_start_xmit(channel, ring);
56 
57 		return NETDEV_TX_BUSY;
58 	}
59 
60 	return 0;
61 }
62 
63 static void xlgmac_prep_vlan(struct sk_buff *skb,
64 			     struct xlgmac_pkt_info *pkt_info)
65 {
66 	if (skb_vlan_tag_present(skb))
67 		pkt_info->vlan_ctag = skb_vlan_tag_get(skb);
68 }
69 
70 static int xlgmac_prep_tso(struct sk_buff *skb,
71 			   struct xlgmac_pkt_info *pkt_info)
72 {
73 	int ret;
74 
75 	if (!XLGMAC_GET_REG_BITS(pkt_info->attributes,
76 				 TX_PACKET_ATTRIBUTES_TSO_ENABLE_POS,
77 				 TX_PACKET_ATTRIBUTES_TSO_ENABLE_LEN))
78 		return 0;
79 
80 	ret = skb_cow_head(skb, 0);
81 	if (ret)
82 		return ret;
83 
84 	pkt_info->header_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
85 	pkt_info->tcp_header_len = tcp_hdrlen(skb);
86 	pkt_info->tcp_payload_len = skb->len - pkt_info->header_len;
87 	pkt_info->mss = skb_shinfo(skb)->gso_size;
88 
89 	XLGMAC_PR("header_len=%u\n", pkt_info->header_len);
90 	XLGMAC_PR("tcp_header_len=%u, tcp_payload_len=%u\n",
91 		  pkt_info->tcp_header_len, pkt_info->tcp_payload_len);
92 	XLGMAC_PR("mss=%u\n", pkt_info->mss);
93 
94 	/* Update the number of packets that will ultimately be transmitted
95 	 * along with the extra bytes for each extra packet
96 	 */
97 	pkt_info->tx_packets = skb_shinfo(skb)->gso_segs;
98 	pkt_info->tx_bytes += (pkt_info->tx_packets - 1) * pkt_info->header_len;
99 
100 	return 0;
101 }
102 
103 static int xlgmac_is_tso(struct sk_buff *skb)
104 {
105 	if (skb->ip_summed != CHECKSUM_PARTIAL)
106 		return 0;
107 
108 	if (!skb_is_gso(skb))
109 		return 0;
110 
111 	return 1;
112 }
113 
114 static void xlgmac_prep_tx_pkt(struct xlgmac_pdata *pdata,
115 			       struct xlgmac_ring *ring,
116 			       struct sk_buff *skb,
117 			       struct xlgmac_pkt_info *pkt_info)
118 {
119 	struct skb_frag_struct *frag;
120 	unsigned int context_desc;
121 	unsigned int len;
122 	unsigned int i;
123 
124 	pkt_info->skb = skb;
125 
126 	context_desc = 0;
127 	pkt_info->desc_count = 0;
128 
129 	pkt_info->tx_packets = 1;
130 	pkt_info->tx_bytes = skb->len;
131 
132 	if (xlgmac_is_tso(skb)) {
133 		/* TSO requires an extra descriptor if mss is different */
134 		if (skb_shinfo(skb)->gso_size != ring->tx.cur_mss) {
135 			context_desc = 1;
136 			pkt_info->desc_count++;
137 		}
138 
139 		/* TSO requires an extra descriptor for TSO header */
140 		pkt_info->desc_count++;
141 
142 		pkt_info->attributes = XLGMAC_SET_REG_BITS(
143 					pkt_info->attributes,
144 					TX_PACKET_ATTRIBUTES_TSO_ENABLE_POS,
145 					TX_PACKET_ATTRIBUTES_TSO_ENABLE_LEN,
146 					1);
147 		pkt_info->attributes = XLGMAC_SET_REG_BITS(
148 					pkt_info->attributes,
149 					TX_PACKET_ATTRIBUTES_CSUM_ENABLE_POS,
150 					TX_PACKET_ATTRIBUTES_CSUM_ENABLE_LEN,
151 					1);
152 	} else if (skb->ip_summed == CHECKSUM_PARTIAL)
153 		pkt_info->attributes = XLGMAC_SET_REG_BITS(
154 					pkt_info->attributes,
155 					TX_PACKET_ATTRIBUTES_CSUM_ENABLE_POS,
156 					TX_PACKET_ATTRIBUTES_CSUM_ENABLE_LEN,
157 					1);
158 
159 	if (skb_vlan_tag_present(skb)) {
160 		/* VLAN requires an extra descriptor if tag is different */
161 		if (skb_vlan_tag_get(skb) != ring->tx.cur_vlan_ctag)
162 			/* We can share with the TSO context descriptor */
163 			if (!context_desc) {
164 				context_desc = 1;
165 				pkt_info->desc_count++;
166 			}
167 
168 		pkt_info->attributes = XLGMAC_SET_REG_BITS(
169 					pkt_info->attributes,
170 					TX_PACKET_ATTRIBUTES_VLAN_CTAG_POS,
171 					TX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN,
172 					1);
173 	}
174 
175 	for (len = skb_headlen(skb); len;) {
176 		pkt_info->desc_count++;
177 		len -= min_t(unsigned int, len, XLGMAC_TX_MAX_BUF_SIZE);
178 	}
179 
180 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
181 		frag = &skb_shinfo(skb)->frags[i];
182 		for (len = skb_frag_size(frag); len; ) {
183 			pkt_info->desc_count++;
184 			len -= min_t(unsigned int, len, XLGMAC_TX_MAX_BUF_SIZE);
185 		}
186 	}
187 }
188 
189 static int xlgmac_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu)
190 {
191 	unsigned int rx_buf_size;
192 
193 	if (mtu > XLGMAC_JUMBO_PACKET_MTU) {
194 		netdev_alert(netdev, "MTU exceeds maximum supported value\n");
195 		return -EINVAL;
196 	}
197 
198 	rx_buf_size = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
199 	rx_buf_size = clamp_val(rx_buf_size, XLGMAC_RX_MIN_BUF_SIZE, PAGE_SIZE);
200 
201 	rx_buf_size = (rx_buf_size + XLGMAC_RX_BUF_ALIGN - 1) &
202 		      ~(XLGMAC_RX_BUF_ALIGN - 1);
203 
204 	return rx_buf_size;
205 }
206 
207 static void xlgmac_enable_rx_tx_ints(struct xlgmac_pdata *pdata)
208 {
209 	struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
210 	struct xlgmac_channel *channel;
211 	enum xlgmac_int int_id;
212 	unsigned int i;
213 
214 	channel = pdata->channel_head;
215 	for (i = 0; i < pdata->channel_count; i++, channel++) {
216 		if (channel->tx_ring && channel->rx_ring)
217 			int_id = XLGMAC_INT_DMA_CH_SR_TI_RI;
218 		else if (channel->tx_ring)
219 			int_id = XLGMAC_INT_DMA_CH_SR_TI;
220 		else if (channel->rx_ring)
221 			int_id = XLGMAC_INT_DMA_CH_SR_RI;
222 		else
223 			continue;
224 
225 		hw_ops->enable_int(channel, int_id);
226 	}
227 }
228 
229 static void xlgmac_disable_rx_tx_ints(struct xlgmac_pdata *pdata)
230 {
231 	struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
232 	struct xlgmac_channel *channel;
233 	enum xlgmac_int int_id;
234 	unsigned int i;
235 
236 	channel = pdata->channel_head;
237 	for (i = 0; i < pdata->channel_count; i++, channel++) {
238 		if (channel->tx_ring && channel->rx_ring)
239 			int_id = XLGMAC_INT_DMA_CH_SR_TI_RI;
240 		else if (channel->tx_ring)
241 			int_id = XLGMAC_INT_DMA_CH_SR_TI;
242 		else if (channel->rx_ring)
243 			int_id = XLGMAC_INT_DMA_CH_SR_RI;
244 		else
245 			continue;
246 
247 		hw_ops->disable_int(channel, int_id);
248 	}
249 }
250 
251 static irqreturn_t xlgmac_isr(int irq, void *data)
252 {
253 	unsigned int dma_isr, dma_ch_isr, mac_isr;
254 	struct xlgmac_pdata *pdata = data;
255 	struct xlgmac_channel *channel;
256 	struct xlgmac_hw_ops *hw_ops;
257 	unsigned int i, ti, ri;
258 
259 	hw_ops = &pdata->hw_ops;
260 
261 	/* The DMA interrupt status register also reports MAC and MTL
262 	 * interrupts. So for polling mode, we just need to check for
263 	 * this register to be non-zero
264 	 */
265 	dma_isr = readl(pdata->mac_regs + DMA_ISR);
266 	if (!dma_isr)
267 		return IRQ_HANDLED;
268 
269 	netif_dbg(pdata, intr, pdata->netdev, "DMA_ISR=%#010x\n", dma_isr);
270 
271 	for (i = 0; i < pdata->channel_count; i++) {
272 		if (!(dma_isr & (1 << i)))
273 			continue;
274 
275 		channel = pdata->channel_head + i;
276 
277 		dma_ch_isr = readl(XLGMAC_DMA_REG(channel, DMA_CH_SR));
278 		netif_dbg(pdata, intr, pdata->netdev, "DMA_CH%u_ISR=%#010x\n",
279 			  i, dma_ch_isr);
280 
281 		/* The TI or RI interrupt bits may still be set even if using
282 		 * per channel DMA interrupts. Check to be sure those are not
283 		 * enabled before using the private data napi structure.
284 		 */
285 		ti = XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_TI_POS,
286 					 DMA_CH_SR_TI_LEN);
287 		ri = XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_RI_POS,
288 					 DMA_CH_SR_RI_LEN);
289 		if (!pdata->per_channel_irq && (ti || ri)) {
290 			if (napi_schedule_prep(&pdata->napi)) {
291 				/* Disable Tx and Rx interrupts */
292 				xlgmac_disable_rx_tx_ints(pdata);
293 
294 				pdata->stats.napi_poll_isr++;
295 				/* Turn on polling */
296 				__napi_schedule_irqoff(&pdata->napi);
297 			}
298 		}
299 
300 		if (XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_TPS_POS,
301 					DMA_CH_SR_TPS_LEN))
302 			pdata->stats.tx_process_stopped++;
303 
304 		if (XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_RPS_POS,
305 					DMA_CH_SR_RPS_LEN))
306 			pdata->stats.rx_process_stopped++;
307 
308 		if (XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_TBU_POS,
309 					DMA_CH_SR_TBU_LEN))
310 			pdata->stats.tx_buffer_unavailable++;
311 
312 		if (XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_RBU_POS,
313 					DMA_CH_SR_RBU_LEN))
314 			pdata->stats.rx_buffer_unavailable++;
315 
316 		/* Restart the device on a Fatal Bus Error */
317 		if (XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_FBE_POS,
318 					DMA_CH_SR_FBE_LEN)) {
319 			pdata->stats.fatal_bus_error++;
320 			schedule_work(&pdata->restart_work);
321 		}
322 
323 		/* Clear all interrupt signals */
324 		writel(dma_ch_isr, XLGMAC_DMA_REG(channel, DMA_CH_SR));
325 	}
326 
327 	if (XLGMAC_GET_REG_BITS(dma_isr, DMA_ISR_MACIS_POS,
328 				DMA_ISR_MACIS_LEN)) {
329 		mac_isr = readl(pdata->mac_regs + MAC_ISR);
330 
331 		if (XLGMAC_GET_REG_BITS(mac_isr, MAC_ISR_MMCTXIS_POS,
332 					MAC_ISR_MMCTXIS_LEN))
333 			hw_ops->tx_mmc_int(pdata);
334 
335 		if (XLGMAC_GET_REG_BITS(mac_isr, MAC_ISR_MMCRXIS_POS,
336 					MAC_ISR_MMCRXIS_LEN))
337 			hw_ops->rx_mmc_int(pdata);
338 	}
339 
340 	return IRQ_HANDLED;
341 }
342 
343 static irqreturn_t xlgmac_dma_isr(int irq, void *data)
344 {
345 	struct xlgmac_channel *channel = data;
346 
347 	/* Per channel DMA interrupts are enabled, so we use the per
348 	 * channel napi structure and not the private data napi structure
349 	 */
350 	if (napi_schedule_prep(&channel->napi)) {
351 		/* Disable Tx and Rx interrupts */
352 		disable_irq_nosync(channel->dma_irq);
353 
354 		/* Turn on polling */
355 		__napi_schedule_irqoff(&channel->napi);
356 	}
357 
358 	return IRQ_HANDLED;
359 }
360 
361 static void xlgmac_tx_timer(unsigned long data)
362 {
363 	struct xlgmac_channel *channel = (struct xlgmac_channel *)data;
364 	struct xlgmac_pdata *pdata = channel->pdata;
365 	struct napi_struct *napi;
366 
367 	napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
368 
369 	if (napi_schedule_prep(napi)) {
370 		/* Disable Tx and Rx interrupts */
371 		if (pdata->per_channel_irq)
372 			disable_irq_nosync(channel->dma_irq);
373 		else
374 			xlgmac_disable_rx_tx_ints(pdata);
375 
376 		pdata->stats.napi_poll_txtimer++;
377 		/* Turn on polling */
378 		__napi_schedule(napi);
379 	}
380 
381 	channel->tx_timer_active = 0;
382 }
383 
384 static void xlgmac_init_timers(struct xlgmac_pdata *pdata)
385 {
386 	struct xlgmac_channel *channel;
387 	unsigned int i;
388 
389 	channel = pdata->channel_head;
390 	for (i = 0; i < pdata->channel_count; i++, channel++) {
391 		if (!channel->tx_ring)
392 			break;
393 
394 		setup_timer(&channel->tx_timer, xlgmac_tx_timer,
395 			    (unsigned long)channel);
396 	}
397 }
398 
399 static void xlgmac_stop_timers(struct xlgmac_pdata *pdata)
400 {
401 	struct xlgmac_channel *channel;
402 	unsigned int i;
403 
404 	channel = pdata->channel_head;
405 	for (i = 0; i < pdata->channel_count; i++, channel++) {
406 		if (!channel->tx_ring)
407 			break;
408 
409 		del_timer_sync(&channel->tx_timer);
410 	}
411 }
412 
413 static void xlgmac_napi_enable(struct xlgmac_pdata *pdata, unsigned int add)
414 {
415 	struct xlgmac_channel *channel;
416 	unsigned int i;
417 
418 	if (pdata->per_channel_irq) {
419 		channel = pdata->channel_head;
420 		for (i = 0; i < pdata->channel_count; i++, channel++) {
421 			if (add)
422 				netif_napi_add(pdata->netdev, &channel->napi,
423 					       xlgmac_one_poll,
424 					       NAPI_POLL_WEIGHT);
425 
426 			napi_enable(&channel->napi);
427 		}
428 	} else {
429 		if (add)
430 			netif_napi_add(pdata->netdev, &pdata->napi,
431 				       xlgmac_all_poll, NAPI_POLL_WEIGHT);
432 
433 		napi_enable(&pdata->napi);
434 	}
435 }
436 
437 static void xlgmac_napi_disable(struct xlgmac_pdata *pdata, unsigned int del)
438 {
439 	struct xlgmac_channel *channel;
440 	unsigned int i;
441 
442 	if (pdata->per_channel_irq) {
443 		channel = pdata->channel_head;
444 		for (i = 0; i < pdata->channel_count; i++, channel++) {
445 			napi_disable(&channel->napi);
446 
447 			if (del)
448 				netif_napi_del(&channel->napi);
449 		}
450 	} else {
451 		napi_disable(&pdata->napi);
452 
453 		if (del)
454 			netif_napi_del(&pdata->napi);
455 	}
456 }
457 
458 static int xlgmac_request_irqs(struct xlgmac_pdata *pdata)
459 {
460 	struct net_device *netdev = pdata->netdev;
461 	struct xlgmac_channel *channel;
462 	unsigned int i;
463 	int ret;
464 
465 	ret = devm_request_irq(pdata->dev, pdata->dev_irq, xlgmac_isr,
466 			       IRQF_SHARED, netdev->name, pdata);
467 	if (ret) {
468 		netdev_alert(netdev, "error requesting irq %d\n",
469 			     pdata->dev_irq);
470 		return ret;
471 	}
472 
473 	if (!pdata->per_channel_irq)
474 		return 0;
475 
476 	channel = pdata->channel_head;
477 	for (i = 0; i < pdata->channel_count; i++, channel++) {
478 		snprintf(channel->dma_irq_name,
479 			 sizeof(channel->dma_irq_name) - 1,
480 			 "%s-TxRx-%u", netdev_name(netdev),
481 			 channel->queue_index);
482 
483 		ret = devm_request_irq(pdata->dev, channel->dma_irq,
484 				       xlgmac_dma_isr, 0,
485 				       channel->dma_irq_name, channel);
486 		if (ret) {
487 			netdev_alert(netdev, "error requesting irq %d\n",
488 				     channel->dma_irq);
489 			goto err_irq;
490 		}
491 	}
492 
493 	return 0;
494 
495 err_irq:
496 	/* Using an unsigned int, 'i' will go to UINT_MAX and exit */
497 	for (i--, channel--; i < pdata->channel_count; i--, channel--)
498 		devm_free_irq(pdata->dev, channel->dma_irq, channel);
499 
500 	devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
501 
502 	return ret;
503 }
504 
505 static void xlgmac_free_irqs(struct xlgmac_pdata *pdata)
506 {
507 	struct xlgmac_channel *channel;
508 	unsigned int i;
509 
510 	devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
511 
512 	if (!pdata->per_channel_irq)
513 		return;
514 
515 	channel = pdata->channel_head;
516 	for (i = 0; i < pdata->channel_count; i++, channel++)
517 		devm_free_irq(pdata->dev, channel->dma_irq, channel);
518 }
519 
520 static void xlgmac_free_tx_data(struct xlgmac_pdata *pdata)
521 {
522 	struct xlgmac_desc_ops *desc_ops = &pdata->desc_ops;
523 	struct xlgmac_desc_data *desc_data;
524 	struct xlgmac_channel *channel;
525 	struct xlgmac_ring *ring;
526 	unsigned int i, j;
527 
528 	channel = pdata->channel_head;
529 	for (i = 0; i < pdata->channel_count; i++, channel++) {
530 		ring = channel->tx_ring;
531 		if (!ring)
532 			break;
533 
534 		for (j = 0; j < ring->dma_desc_count; j++) {
535 			desc_data = XLGMAC_GET_DESC_DATA(ring, j);
536 			desc_ops->unmap_desc_data(pdata, desc_data);
537 		}
538 	}
539 }
540 
541 static void xlgmac_free_rx_data(struct xlgmac_pdata *pdata)
542 {
543 	struct xlgmac_desc_ops *desc_ops = &pdata->desc_ops;
544 	struct xlgmac_desc_data *desc_data;
545 	struct xlgmac_channel *channel;
546 	struct xlgmac_ring *ring;
547 	unsigned int i, j;
548 
549 	channel = pdata->channel_head;
550 	for (i = 0; i < pdata->channel_count; i++, channel++) {
551 		ring = channel->rx_ring;
552 		if (!ring)
553 			break;
554 
555 		for (j = 0; j < ring->dma_desc_count; j++) {
556 			desc_data = XLGMAC_GET_DESC_DATA(ring, j);
557 			desc_ops->unmap_desc_data(pdata, desc_data);
558 		}
559 	}
560 }
561 
562 static int xlgmac_start(struct xlgmac_pdata *pdata)
563 {
564 	struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
565 	struct net_device *netdev = pdata->netdev;
566 	int ret;
567 
568 	hw_ops->init(pdata);
569 	xlgmac_napi_enable(pdata, 1);
570 
571 	ret = xlgmac_request_irqs(pdata);
572 	if (ret)
573 		goto err_napi;
574 
575 	hw_ops->enable_tx(pdata);
576 	hw_ops->enable_rx(pdata);
577 	netif_tx_start_all_queues(netdev);
578 
579 	return 0;
580 
581 err_napi:
582 	xlgmac_napi_disable(pdata, 1);
583 	hw_ops->exit(pdata);
584 
585 	return ret;
586 }
587 
588 static void xlgmac_stop(struct xlgmac_pdata *pdata)
589 {
590 	struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
591 	struct net_device *netdev = pdata->netdev;
592 	struct xlgmac_channel *channel;
593 	struct netdev_queue *txq;
594 	unsigned int i;
595 
596 	netif_tx_stop_all_queues(netdev);
597 	xlgmac_stop_timers(pdata);
598 	hw_ops->disable_tx(pdata);
599 	hw_ops->disable_rx(pdata);
600 	xlgmac_free_irqs(pdata);
601 	xlgmac_napi_disable(pdata, 1);
602 	hw_ops->exit(pdata);
603 
604 	channel = pdata->channel_head;
605 	for (i = 0; i < pdata->channel_count; i++, channel++) {
606 		if (!channel->tx_ring)
607 			continue;
608 
609 		txq = netdev_get_tx_queue(netdev, channel->queue_index);
610 		netdev_tx_reset_queue(txq);
611 	}
612 }
613 
614 static void xlgmac_restart_dev(struct xlgmac_pdata *pdata)
615 {
616 	/* If not running, "restart" will happen on open */
617 	if (!netif_running(pdata->netdev))
618 		return;
619 
620 	xlgmac_stop(pdata);
621 
622 	xlgmac_free_tx_data(pdata);
623 	xlgmac_free_rx_data(pdata);
624 
625 	xlgmac_start(pdata);
626 }
627 
628 static void xlgmac_restart(struct work_struct *work)
629 {
630 	struct xlgmac_pdata *pdata = container_of(work,
631 						   struct xlgmac_pdata,
632 						   restart_work);
633 
634 	rtnl_lock();
635 
636 	xlgmac_restart_dev(pdata);
637 
638 	rtnl_unlock();
639 }
640 
641 static int xlgmac_open(struct net_device *netdev)
642 {
643 	struct xlgmac_pdata *pdata = netdev_priv(netdev);
644 	struct xlgmac_desc_ops *desc_ops;
645 	int ret;
646 
647 	desc_ops = &pdata->desc_ops;
648 
649 	/* TODO: Initialize the phy */
650 
651 	/* Calculate the Rx buffer size before allocating rings */
652 	ret = xlgmac_calc_rx_buf_size(netdev, netdev->mtu);
653 	if (ret < 0)
654 		return ret;
655 	pdata->rx_buf_size = ret;
656 
657 	/* Allocate the channels and rings */
658 	ret = desc_ops->alloc_channles_and_rings(pdata);
659 	if (ret)
660 		return ret;
661 
662 	INIT_WORK(&pdata->restart_work, xlgmac_restart);
663 	xlgmac_init_timers(pdata);
664 
665 	ret = xlgmac_start(pdata);
666 	if (ret)
667 		goto err_channels_and_rings;
668 
669 	return 0;
670 
671 err_channels_and_rings:
672 	desc_ops->free_channels_and_rings(pdata);
673 
674 	return ret;
675 }
676 
677 static int xlgmac_close(struct net_device *netdev)
678 {
679 	struct xlgmac_pdata *pdata = netdev_priv(netdev);
680 	struct xlgmac_desc_ops *desc_ops;
681 
682 	desc_ops = &pdata->desc_ops;
683 
684 	/* Stop the device */
685 	xlgmac_stop(pdata);
686 
687 	/* Free the channels and rings */
688 	desc_ops->free_channels_and_rings(pdata);
689 
690 	return 0;
691 }
692 
693 static void xlgmac_tx_timeout(struct net_device *netdev)
694 {
695 	struct xlgmac_pdata *pdata = netdev_priv(netdev);
696 
697 	netdev_warn(netdev, "tx timeout, device restarting\n");
698 	schedule_work(&pdata->restart_work);
699 }
700 
701 static int xlgmac_xmit(struct sk_buff *skb, struct net_device *netdev)
702 {
703 	struct xlgmac_pdata *pdata = netdev_priv(netdev);
704 	struct xlgmac_pkt_info *tx_pkt_info;
705 	struct xlgmac_desc_ops *desc_ops;
706 	struct xlgmac_channel *channel;
707 	struct xlgmac_hw_ops *hw_ops;
708 	struct netdev_queue *txq;
709 	struct xlgmac_ring *ring;
710 	int ret;
711 
712 	desc_ops = &pdata->desc_ops;
713 	hw_ops = &pdata->hw_ops;
714 
715 	XLGMAC_PR("skb->len = %d\n", skb->len);
716 
717 	channel = pdata->channel_head + skb->queue_mapping;
718 	txq = netdev_get_tx_queue(netdev, channel->queue_index);
719 	ring = channel->tx_ring;
720 	tx_pkt_info = &ring->pkt_info;
721 
722 	if (skb->len == 0) {
723 		netif_err(pdata, tx_err, netdev,
724 			  "empty skb received from stack\n");
725 		dev_kfree_skb_any(skb);
726 		return NETDEV_TX_OK;
727 	}
728 
729 	/* Prepare preliminary packet info for TX */
730 	memset(tx_pkt_info, 0, sizeof(*tx_pkt_info));
731 	xlgmac_prep_tx_pkt(pdata, ring, skb, tx_pkt_info);
732 
733 	/* Check that there are enough descriptors available */
734 	ret = xlgmac_maybe_stop_tx_queue(channel, ring,
735 					 tx_pkt_info->desc_count);
736 	if (ret)
737 		return ret;
738 
739 	ret = xlgmac_prep_tso(skb, tx_pkt_info);
740 	if (ret) {
741 		netif_err(pdata, tx_err, netdev,
742 			  "error processing TSO packet\n");
743 		dev_kfree_skb_any(skb);
744 		return ret;
745 	}
746 	xlgmac_prep_vlan(skb, tx_pkt_info);
747 
748 	if (!desc_ops->map_tx_skb(channel, skb)) {
749 		dev_kfree_skb_any(skb);
750 		return NETDEV_TX_OK;
751 	}
752 
753 	/* Report on the actual number of bytes (to be) sent */
754 	netdev_tx_sent_queue(txq, tx_pkt_info->tx_bytes);
755 
756 	/* Configure required descriptor fields for transmission */
757 	hw_ops->dev_xmit(channel);
758 
759 	if (netif_msg_pktdata(pdata))
760 		xlgmac_print_pkt(netdev, skb, true);
761 
762 	/* Stop the queue in advance if there may not be enough descriptors */
763 	xlgmac_maybe_stop_tx_queue(channel, ring, XLGMAC_TX_MAX_DESC_NR);
764 
765 	return NETDEV_TX_OK;
766 }
767 
768 static void xlgmac_get_stats64(struct net_device *netdev,
769 			       struct rtnl_link_stats64 *s)
770 {
771 	struct xlgmac_pdata *pdata = netdev_priv(netdev);
772 	struct xlgmac_stats *pstats = &pdata->stats;
773 
774 	pdata->hw_ops.read_mmc_stats(pdata);
775 
776 	s->rx_packets = pstats->rxframecount_gb;
777 	s->rx_bytes = pstats->rxoctetcount_gb;
778 	s->rx_errors = pstats->rxframecount_gb -
779 		       pstats->rxbroadcastframes_g -
780 		       pstats->rxmulticastframes_g -
781 		       pstats->rxunicastframes_g;
782 	s->multicast = pstats->rxmulticastframes_g;
783 	s->rx_length_errors = pstats->rxlengtherror;
784 	s->rx_crc_errors = pstats->rxcrcerror;
785 	s->rx_fifo_errors = pstats->rxfifooverflow;
786 
787 	s->tx_packets = pstats->txframecount_gb;
788 	s->tx_bytes = pstats->txoctetcount_gb;
789 	s->tx_errors = pstats->txframecount_gb - pstats->txframecount_g;
790 	s->tx_dropped = netdev->stats.tx_dropped;
791 }
792 
793 static int xlgmac_set_mac_address(struct net_device *netdev, void *addr)
794 {
795 	struct xlgmac_pdata *pdata = netdev_priv(netdev);
796 	struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
797 	struct sockaddr *saddr = addr;
798 
799 	if (!is_valid_ether_addr(saddr->sa_data))
800 		return -EADDRNOTAVAIL;
801 
802 	memcpy(netdev->dev_addr, saddr->sa_data, netdev->addr_len);
803 
804 	hw_ops->set_mac_address(pdata, netdev->dev_addr);
805 
806 	return 0;
807 }
808 
809 static int xlgmac_ioctl(struct net_device *netdev,
810 			struct ifreq *ifreq, int cmd)
811 {
812 	if (!netif_running(netdev))
813 		return -ENODEV;
814 
815 	return 0;
816 }
817 
818 static int xlgmac_change_mtu(struct net_device *netdev, int mtu)
819 {
820 	struct xlgmac_pdata *pdata = netdev_priv(netdev);
821 	int ret;
822 
823 	ret = xlgmac_calc_rx_buf_size(netdev, mtu);
824 	if (ret < 0)
825 		return ret;
826 
827 	pdata->rx_buf_size = ret;
828 	netdev->mtu = mtu;
829 
830 	xlgmac_restart_dev(pdata);
831 
832 	return 0;
833 }
834 
835 static int xlgmac_vlan_rx_add_vid(struct net_device *netdev,
836 				  __be16 proto,
837 				  u16 vid)
838 {
839 	struct xlgmac_pdata *pdata = netdev_priv(netdev);
840 	struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
841 
842 	set_bit(vid, pdata->active_vlans);
843 	hw_ops->update_vlan_hash_table(pdata);
844 
845 	return 0;
846 }
847 
848 static int xlgmac_vlan_rx_kill_vid(struct net_device *netdev,
849 				   __be16 proto,
850 				   u16 vid)
851 {
852 	struct xlgmac_pdata *pdata = netdev_priv(netdev);
853 	struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
854 
855 	clear_bit(vid, pdata->active_vlans);
856 	hw_ops->update_vlan_hash_table(pdata);
857 
858 	return 0;
859 }
860 
861 #ifdef CONFIG_NET_POLL_CONTROLLER
862 static void xlgmac_poll_controller(struct net_device *netdev)
863 {
864 	struct xlgmac_pdata *pdata = netdev_priv(netdev);
865 	struct xlgmac_channel *channel;
866 	unsigned int i;
867 
868 	if (pdata->per_channel_irq) {
869 		channel = pdata->channel_head;
870 		for (i = 0; i < pdata->channel_count; i++, channel++)
871 			xlgmac_dma_isr(channel->dma_irq, channel);
872 	} else {
873 		disable_irq(pdata->dev_irq);
874 		xlgmac_isr(pdata->dev_irq, pdata);
875 		enable_irq(pdata->dev_irq);
876 	}
877 }
878 #endif /* CONFIG_NET_POLL_CONTROLLER */
879 
880 static int xlgmac_set_features(struct net_device *netdev,
881 			       netdev_features_t features)
882 {
883 	netdev_features_t rxhash, rxcsum, rxvlan, rxvlan_filter;
884 	struct xlgmac_pdata *pdata = netdev_priv(netdev);
885 	struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
886 	int ret = 0;
887 
888 	rxhash = pdata->netdev_features & NETIF_F_RXHASH;
889 	rxcsum = pdata->netdev_features & NETIF_F_RXCSUM;
890 	rxvlan = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX;
891 	rxvlan_filter = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_FILTER;
892 
893 	if ((features & NETIF_F_RXHASH) && !rxhash)
894 		ret = hw_ops->enable_rss(pdata);
895 	else if (!(features & NETIF_F_RXHASH) && rxhash)
896 		ret = hw_ops->disable_rss(pdata);
897 	if (ret)
898 		return ret;
899 
900 	if ((features & NETIF_F_RXCSUM) && !rxcsum)
901 		hw_ops->enable_rx_csum(pdata);
902 	else if (!(features & NETIF_F_RXCSUM) && rxcsum)
903 		hw_ops->disable_rx_csum(pdata);
904 
905 	if ((features & NETIF_F_HW_VLAN_CTAG_RX) && !rxvlan)
906 		hw_ops->enable_rx_vlan_stripping(pdata);
907 	else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) && rxvlan)
908 		hw_ops->disable_rx_vlan_stripping(pdata);
909 
910 	if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) && !rxvlan_filter)
911 		hw_ops->enable_rx_vlan_filtering(pdata);
912 	else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) && rxvlan_filter)
913 		hw_ops->disable_rx_vlan_filtering(pdata);
914 
915 	pdata->netdev_features = features;
916 
917 	return 0;
918 }
919 
920 static void xlgmac_set_rx_mode(struct net_device *netdev)
921 {
922 	struct xlgmac_pdata *pdata = netdev_priv(netdev);
923 	struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
924 
925 	hw_ops->config_rx_mode(pdata);
926 }
927 
928 static const struct net_device_ops xlgmac_netdev_ops = {
929 	.ndo_open		= xlgmac_open,
930 	.ndo_stop		= xlgmac_close,
931 	.ndo_start_xmit		= xlgmac_xmit,
932 	.ndo_tx_timeout		= xlgmac_tx_timeout,
933 	.ndo_get_stats64	= xlgmac_get_stats64,
934 	.ndo_change_mtu		= xlgmac_change_mtu,
935 	.ndo_set_mac_address	= xlgmac_set_mac_address,
936 	.ndo_validate_addr	= eth_validate_addr,
937 	.ndo_do_ioctl		= xlgmac_ioctl,
938 	.ndo_vlan_rx_add_vid	= xlgmac_vlan_rx_add_vid,
939 	.ndo_vlan_rx_kill_vid	= xlgmac_vlan_rx_kill_vid,
940 #ifdef CONFIG_NET_POLL_CONTROLLER
941 	.ndo_poll_controller	= xlgmac_poll_controller,
942 #endif
943 	.ndo_set_features	= xlgmac_set_features,
944 	.ndo_set_rx_mode	= xlgmac_set_rx_mode,
945 };
946 
947 const struct net_device_ops *xlgmac_get_netdev_ops(void)
948 {
949 	return &xlgmac_netdev_ops;
950 }
951 
952 static void xlgmac_rx_refresh(struct xlgmac_channel *channel)
953 {
954 	struct xlgmac_pdata *pdata = channel->pdata;
955 	struct xlgmac_ring *ring = channel->rx_ring;
956 	struct xlgmac_desc_data *desc_data;
957 	struct xlgmac_desc_ops *desc_ops;
958 	struct xlgmac_hw_ops *hw_ops;
959 
960 	desc_ops = &pdata->desc_ops;
961 	hw_ops = &pdata->hw_ops;
962 
963 	while (ring->dirty != ring->cur) {
964 		desc_data = XLGMAC_GET_DESC_DATA(ring, ring->dirty);
965 
966 		/* Reset desc_data values */
967 		desc_ops->unmap_desc_data(pdata, desc_data);
968 
969 		if (desc_ops->map_rx_buffer(pdata, ring, desc_data))
970 			break;
971 
972 		hw_ops->rx_desc_reset(pdata, desc_data, ring->dirty);
973 
974 		ring->dirty++;
975 	}
976 
977 	/* Make sure everything is written before the register write */
978 	wmb();
979 
980 	/* Update the Rx Tail Pointer Register with address of
981 	 * the last cleaned entry
982 	 */
983 	desc_data = XLGMAC_GET_DESC_DATA(ring, ring->dirty - 1);
984 	writel(lower_32_bits(desc_data->dma_desc_addr),
985 	       XLGMAC_DMA_REG(channel, DMA_CH_RDTR_LO));
986 }
987 
988 static struct sk_buff *xlgmac_create_skb(struct xlgmac_pdata *pdata,
989 					 struct napi_struct *napi,
990 					 struct xlgmac_desc_data *desc_data,
991 					 unsigned int len)
992 {
993 	unsigned int copy_len;
994 	struct sk_buff *skb;
995 	u8 *packet;
996 
997 	skb = napi_alloc_skb(napi, desc_data->rx.hdr.dma_len);
998 	if (!skb)
999 		return NULL;
1000 
1001 	/* Start with the header buffer which may contain just the header
1002 	 * or the header plus data
1003 	 */
1004 	dma_sync_single_range_for_cpu(pdata->dev, desc_data->rx.hdr.dma_base,
1005 				      desc_data->rx.hdr.dma_off,
1006 				      desc_data->rx.hdr.dma_len,
1007 				      DMA_FROM_DEVICE);
1008 
1009 	packet = page_address(desc_data->rx.hdr.pa.pages) +
1010 		 desc_data->rx.hdr.pa.pages_offset;
1011 	copy_len = (desc_data->rx.hdr_len) ? desc_data->rx.hdr_len : len;
1012 	copy_len = min(desc_data->rx.hdr.dma_len, copy_len);
1013 	skb_copy_to_linear_data(skb, packet, copy_len);
1014 	skb_put(skb, copy_len);
1015 
1016 	len -= copy_len;
1017 	if (len) {
1018 		/* Add the remaining data as a frag */
1019 		dma_sync_single_range_for_cpu(pdata->dev,
1020 					      desc_data->rx.buf.dma_base,
1021 					      desc_data->rx.buf.dma_off,
1022 					      desc_data->rx.buf.dma_len,
1023 					      DMA_FROM_DEVICE);
1024 
1025 		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
1026 				desc_data->rx.buf.pa.pages,
1027 				desc_data->rx.buf.pa.pages_offset,
1028 				len, desc_data->rx.buf.dma_len);
1029 		desc_data->rx.buf.pa.pages = NULL;
1030 	}
1031 
1032 	return skb;
1033 }
1034 
1035 static int xlgmac_tx_poll(struct xlgmac_channel *channel)
1036 {
1037 	struct xlgmac_pdata *pdata = channel->pdata;
1038 	struct xlgmac_ring *ring = channel->tx_ring;
1039 	struct net_device *netdev = pdata->netdev;
1040 	unsigned int tx_packets = 0, tx_bytes = 0;
1041 	struct xlgmac_desc_data *desc_data;
1042 	struct xlgmac_dma_desc *dma_desc;
1043 	struct xlgmac_desc_ops *desc_ops;
1044 	struct xlgmac_hw_ops *hw_ops;
1045 	struct netdev_queue *txq;
1046 	int processed = 0;
1047 	unsigned int cur;
1048 
1049 	desc_ops = &pdata->desc_ops;
1050 	hw_ops = &pdata->hw_ops;
1051 
1052 	/* Nothing to do if there isn't a Tx ring for this channel */
1053 	if (!ring)
1054 		return 0;
1055 
1056 	cur = ring->cur;
1057 
1058 	/* Be sure we get ring->cur before accessing descriptor data */
1059 	smp_rmb();
1060 
1061 	txq = netdev_get_tx_queue(netdev, channel->queue_index);
1062 
1063 	while ((processed < XLGMAC_TX_DESC_MAX_PROC) &&
1064 	       (ring->dirty != cur)) {
1065 		desc_data = XLGMAC_GET_DESC_DATA(ring, ring->dirty);
1066 		dma_desc = desc_data->dma_desc;
1067 
1068 		if (!hw_ops->tx_complete(dma_desc))
1069 			break;
1070 
1071 		/* Make sure descriptor fields are read after reading
1072 		 * the OWN bit
1073 		 */
1074 		dma_rmb();
1075 
1076 		if (netif_msg_tx_done(pdata))
1077 			xlgmac_dump_tx_desc(pdata, ring, ring->dirty, 1, 0);
1078 
1079 		if (hw_ops->is_last_desc(dma_desc)) {
1080 			tx_packets += desc_data->tx.packets;
1081 			tx_bytes += desc_data->tx.bytes;
1082 		}
1083 
1084 		/* Free the SKB and reset the descriptor for re-use */
1085 		desc_ops->unmap_desc_data(pdata, desc_data);
1086 		hw_ops->tx_desc_reset(desc_data);
1087 
1088 		processed++;
1089 		ring->dirty++;
1090 	}
1091 
1092 	if (!processed)
1093 		return 0;
1094 
1095 	netdev_tx_completed_queue(txq, tx_packets, tx_bytes);
1096 
1097 	if ((ring->tx.queue_stopped == 1) &&
1098 	    (xlgmac_tx_avail_desc(ring) > XLGMAC_TX_DESC_MIN_FREE)) {
1099 		ring->tx.queue_stopped = 0;
1100 		netif_tx_wake_queue(txq);
1101 	}
1102 
1103 	XLGMAC_PR("processed=%d\n", processed);
1104 
1105 	return processed;
1106 }
1107 
1108 static int xlgmac_rx_poll(struct xlgmac_channel *channel, int budget)
1109 {
1110 	struct xlgmac_pdata *pdata = channel->pdata;
1111 	struct xlgmac_ring *ring = channel->rx_ring;
1112 	struct net_device *netdev = pdata->netdev;
1113 	unsigned int len, dma_desc_len, max_len;
1114 	unsigned int context_next, context;
1115 	struct xlgmac_desc_data *desc_data;
1116 	struct xlgmac_pkt_info *pkt_info;
1117 	unsigned int incomplete, error;
1118 	struct xlgmac_hw_ops *hw_ops;
1119 	unsigned int received = 0;
1120 	struct napi_struct *napi;
1121 	struct sk_buff *skb;
1122 	int packet_count = 0;
1123 
1124 	hw_ops = &pdata->hw_ops;
1125 
1126 	/* Nothing to do if there isn't a Rx ring for this channel */
1127 	if (!ring)
1128 		return 0;
1129 
1130 	incomplete = 0;
1131 	context_next = 0;
1132 
1133 	napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
1134 
1135 	desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur);
1136 	pkt_info = &ring->pkt_info;
1137 	while (packet_count < budget) {
1138 		/* First time in loop see if we need to restore state */
1139 		if (!received && desc_data->state_saved) {
1140 			skb = desc_data->state.skb;
1141 			error = desc_data->state.error;
1142 			len = desc_data->state.len;
1143 		} else {
1144 			memset(pkt_info, 0, sizeof(*pkt_info));
1145 			skb = NULL;
1146 			error = 0;
1147 			len = 0;
1148 		}
1149 
1150 read_again:
1151 		desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur);
1152 
1153 		if (xlgmac_rx_dirty_desc(ring) > XLGMAC_RX_DESC_MAX_DIRTY)
1154 			xlgmac_rx_refresh(channel);
1155 
1156 		if (hw_ops->dev_read(channel))
1157 			break;
1158 
1159 		received++;
1160 		ring->cur++;
1161 
1162 		incomplete = XLGMAC_GET_REG_BITS(
1163 					pkt_info->attributes,
1164 					RX_PACKET_ATTRIBUTES_INCOMPLETE_POS,
1165 					RX_PACKET_ATTRIBUTES_INCOMPLETE_LEN);
1166 		context_next = XLGMAC_GET_REG_BITS(
1167 					pkt_info->attributes,
1168 					RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_POS,
1169 					RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_LEN);
1170 		context = XLGMAC_GET_REG_BITS(
1171 					pkt_info->attributes,
1172 					RX_PACKET_ATTRIBUTES_CONTEXT_POS,
1173 					RX_PACKET_ATTRIBUTES_CONTEXT_LEN);
1174 
1175 		/* Earlier error, just drain the remaining data */
1176 		if ((incomplete || context_next) && error)
1177 			goto read_again;
1178 
1179 		if (error || pkt_info->errors) {
1180 			if (pkt_info->errors)
1181 				netif_err(pdata, rx_err, netdev,
1182 					  "error in received packet\n");
1183 			dev_kfree_skb(skb);
1184 			goto next_packet;
1185 		}
1186 
1187 		if (!context) {
1188 			/* Length is cumulative, get this descriptor's length */
1189 			dma_desc_len = desc_data->rx.len - len;
1190 			len += dma_desc_len;
1191 
1192 			if (dma_desc_len && !skb) {
1193 				skb = xlgmac_create_skb(pdata, napi, desc_data,
1194 							dma_desc_len);
1195 				if (!skb)
1196 					error = 1;
1197 			} else if (dma_desc_len) {
1198 				dma_sync_single_range_for_cpu(
1199 						pdata->dev,
1200 						desc_data->rx.buf.dma_base,
1201 						desc_data->rx.buf.dma_off,
1202 						desc_data->rx.buf.dma_len,
1203 						DMA_FROM_DEVICE);
1204 
1205 				skb_add_rx_frag(
1206 					skb, skb_shinfo(skb)->nr_frags,
1207 					desc_data->rx.buf.pa.pages,
1208 					desc_data->rx.buf.pa.pages_offset,
1209 					dma_desc_len,
1210 					desc_data->rx.buf.dma_len);
1211 				desc_data->rx.buf.pa.pages = NULL;
1212 			}
1213 		}
1214 
1215 		if (incomplete || context_next)
1216 			goto read_again;
1217 
1218 		if (!skb)
1219 			goto next_packet;
1220 
1221 		/* Be sure we don't exceed the configured MTU */
1222 		max_len = netdev->mtu + ETH_HLEN;
1223 		if (!(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
1224 		    (skb->protocol == htons(ETH_P_8021Q)))
1225 			max_len += VLAN_HLEN;
1226 
1227 		if (skb->len > max_len) {
1228 			netif_err(pdata, rx_err, netdev,
1229 				  "packet length exceeds configured MTU\n");
1230 			dev_kfree_skb(skb);
1231 			goto next_packet;
1232 		}
1233 
1234 		if (netif_msg_pktdata(pdata))
1235 			xlgmac_print_pkt(netdev, skb, false);
1236 
1237 		skb_checksum_none_assert(skb);
1238 		if (XLGMAC_GET_REG_BITS(pkt_info->attributes,
1239 					RX_PACKET_ATTRIBUTES_CSUM_DONE_POS,
1240 				    RX_PACKET_ATTRIBUTES_CSUM_DONE_LEN))
1241 			skb->ip_summed = CHECKSUM_UNNECESSARY;
1242 
1243 		if (XLGMAC_GET_REG_BITS(pkt_info->attributes,
1244 					RX_PACKET_ATTRIBUTES_VLAN_CTAG_POS,
1245 				    RX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN)) {
1246 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1247 					       pkt_info->vlan_ctag);
1248 			pdata->stats.rx_vlan_packets++;
1249 		}
1250 
1251 		if (XLGMAC_GET_REG_BITS(pkt_info->attributes,
1252 					RX_PACKET_ATTRIBUTES_RSS_HASH_POS,
1253 				    RX_PACKET_ATTRIBUTES_RSS_HASH_LEN))
1254 			skb_set_hash(skb, pkt_info->rss_hash,
1255 				     pkt_info->rss_hash_type);
1256 
1257 		skb->dev = netdev;
1258 		skb->protocol = eth_type_trans(skb, netdev);
1259 		skb_record_rx_queue(skb, channel->queue_index);
1260 
1261 		napi_gro_receive(napi, skb);
1262 
1263 next_packet:
1264 		packet_count++;
1265 	}
1266 
1267 	/* Check if we need to save state before leaving */
1268 	if (received && (incomplete || context_next)) {
1269 		desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur);
1270 		desc_data->state_saved = 1;
1271 		desc_data->state.skb = skb;
1272 		desc_data->state.len = len;
1273 		desc_data->state.error = error;
1274 	}
1275 
1276 	XLGMAC_PR("packet_count = %d\n", packet_count);
1277 
1278 	return packet_count;
1279 }
1280 
1281 static int xlgmac_one_poll(struct napi_struct *napi, int budget)
1282 {
1283 	struct xlgmac_channel *channel = container_of(napi,
1284 						struct xlgmac_channel,
1285 						napi);
1286 	int processed = 0;
1287 
1288 	XLGMAC_PR("budget=%d\n", budget);
1289 
1290 	/* Cleanup Tx ring first */
1291 	xlgmac_tx_poll(channel);
1292 
1293 	/* Process Rx ring next */
1294 	processed = xlgmac_rx_poll(channel, budget);
1295 
1296 	/* If we processed everything, we are done */
1297 	if (processed < budget) {
1298 		/* Turn off polling */
1299 		napi_complete_done(napi, processed);
1300 
1301 		/* Enable Tx and Rx interrupts */
1302 		enable_irq(channel->dma_irq);
1303 	}
1304 
1305 	XLGMAC_PR("received = %d\n", processed);
1306 
1307 	return processed;
1308 }
1309 
1310 static int xlgmac_all_poll(struct napi_struct *napi, int budget)
1311 {
1312 	struct xlgmac_pdata *pdata = container_of(napi,
1313 						   struct xlgmac_pdata,
1314 						   napi);
1315 	struct xlgmac_channel *channel;
1316 	int processed, last_processed;
1317 	int ring_budget;
1318 	unsigned int i;
1319 
1320 	XLGMAC_PR("budget=%d\n", budget);
1321 
1322 	processed = 0;
1323 	ring_budget = budget / pdata->rx_ring_count;
1324 	do {
1325 		last_processed = processed;
1326 
1327 		channel = pdata->channel_head;
1328 		for (i = 0; i < pdata->channel_count; i++, channel++) {
1329 			/* Cleanup Tx ring first */
1330 			xlgmac_tx_poll(channel);
1331 
1332 			/* Process Rx ring next */
1333 			if (ring_budget > (budget - processed))
1334 				ring_budget = budget - processed;
1335 			processed += xlgmac_rx_poll(channel, ring_budget);
1336 		}
1337 	} while ((processed < budget) && (processed != last_processed));
1338 
1339 	/* If we processed everything, we are done */
1340 	if (processed < budget) {
1341 		/* Turn off polling */
1342 		napi_complete_done(napi, processed);
1343 
1344 		/* Enable Tx and Rx interrupts */
1345 		xlgmac_enable_rx_tx_ints(pdata);
1346 	}
1347 
1348 	XLGMAC_PR("received = %d\n", processed);
1349 
1350 	return processed;
1351 }
1352