1 /*
2  * AMD 10Gb Ethernet driver
3  *
4  * This file is available to you under your choice of the following two
5  * licenses:
6  *
7  * License 1: GPLv2
8  *
9  * Copyright (c) 2014 Advanced Micro Devices, Inc.
10  *
11  * This file is free software; you may copy, redistribute and/or modify
12  * it under the terms of the GNU General Public License as published by
13  * the Free Software Foundation, either version 2 of the License, or (at
14  * your option) any later version.
15  *
16  * This file is distributed in the hope that it will be useful, but
17  * WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19  * General Public License for more details.
20  *
21  * You should have received a copy of the GNU General Public License
22  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
23  *
24  * This file incorporates work covered by the following copyright and
25  * permission notice:
26  *     The Synopsys DWC ETHER XGMAC Software Driver and documentation
27  *     (hereinafter "Software") is an unsupported proprietary work of Synopsys,
28  *     Inc. unless otherwise expressly agreed to in writing between Synopsys
29  *     and you.
30  *
31  *     The Software IS NOT an item of Licensed Software or Licensed Product
32  *     under any End User Software License Agreement or Agreement for Licensed
33  *     Product with Synopsys or any supplement thereto.  Permission is hereby
34  *     granted, free of charge, to any person obtaining a copy of this software
35  *     annotated with this license and the Software, to deal in the Software
36  *     without restriction, including without limitation the rights to use,
37  *     copy, modify, merge, publish, distribute, sublicense, and/or sell copies
38  *     of the Software, and to permit persons to whom the Software is furnished
39  *     to do so, subject to the following conditions:
40  *
41  *     The above copyright notice and this permission notice shall be included
42  *     in all copies or substantial portions of the Software.
43  *
44  *     THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
45  *     BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
46  *     TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
47  *     PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
48  *     BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
49  *     CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
50  *     SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
51  *     INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
52  *     CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
53  *     ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
54  *     THE POSSIBILITY OF SUCH DAMAGE.
55  *
56  *
57  * License 2: Modified BSD
58  *
59  * Copyright (c) 2014 Advanced Micro Devices, Inc.
60  * All rights reserved.
61  *
62  * Redistribution and use in source and binary forms, with or without
63  * modification, are permitted provided that the following conditions are met:
64  *     * Redistributions of source code must retain the above copyright
65  *       notice, this list of conditions and the following disclaimer.
66  *     * Redistributions in binary form must reproduce the above copyright
67  *       notice, this list of conditions and the following disclaimer in the
68  *       documentation and/or other materials provided with the distribution.
69  *     * Neither the name of Advanced Micro Devices, Inc. nor the
70  *       names of its contributors may be used to endorse or promote products
71  *       derived from this software without specific prior written permission.
72  *
73  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
74  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
75  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
76  * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
77  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
78  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
79  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
80  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
81  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
82  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
83  *
84  * This file incorporates work covered by the following copyright and
85  * permission notice:
86  *     The Synopsys DWC ETHER XGMAC Software Driver and documentation
87  *     (hereinafter "Software") is an unsupported proprietary work of Synopsys,
88  *     Inc. unless otherwise expressly agreed to in writing between Synopsys
89  *     and you.
90  *
91  *     The Software IS NOT an item of Licensed Software or Licensed Product
92  *     under any End User Software License Agreement or Agreement for Licensed
93  *     Product with Synopsys or any supplement thereto.  Permission is hereby
94  *     granted, free of charge, to any person obtaining a copy of this software
95  *     annotated with this license and the Software, to deal in the Software
96  *     without restriction, including without limitation the rights to use,
97  *     copy, modify, merge, publish, distribute, sublicense, and/or sell copies
98  *     of the Software, and to permit persons to whom the Software is furnished
99  *     to do so, subject to the following conditions:
100  *
101  *     The above copyright notice and this permission notice shall be included
102  *     in all copies or substantial portions of the Software.
103  *
104  *     THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
105  *     BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
106  *     TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
107  *     PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
108  *     BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
109  *     CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
110  *     SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
111  *     INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
112  *     CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
113  *     ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
114  *     THE POSSIBILITY OF SUCH DAMAGE.
115  */
116 
117 #include <linux/spinlock.h>
118 #include <linux/tcp.h>
119 #include <linux/if_vlan.h>
120 #include <net/busy_poll.h>
121 #include <linux/clk.h>
122 #include <linux/if_ether.h>
123 #include <linux/net_tstamp.h>
124 #include <linux/phy.h>
125 
126 #include "xgbe.h"
127 #include "xgbe-common.h"
128 
129 
130 static int xgbe_poll(struct napi_struct *, int);
131 static void xgbe_set_rx_mode(struct net_device *);
132 
133 static inline unsigned int xgbe_tx_avail_desc(struct xgbe_ring *ring)
134 {
135 	return (ring->rdesc_count - (ring->cur - ring->dirty));
136 }
137 
138 static int xgbe_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu)
139 {
140 	unsigned int rx_buf_size;
141 
142 	if (mtu > XGMAC_JUMBO_PACKET_MTU) {
143 		netdev_alert(netdev, "MTU exceeds maximum supported value\n");
144 		return -EINVAL;
145 	}
146 
147 	rx_buf_size = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
148 	if (rx_buf_size < XGBE_RX_MIN_BUF_SIZE)
149 		rx_buf_size = XGBE_RX_MIN_BUF_SIZE;
150 	rx_buf_size = (rx_buf_size + XGBE_RX_BUF_ALIGN - 1) &
151 		      ~(XGBE_RX_BUF_ALIGN - 1);
152 
153 	return rx_buf_size;
154 }
155 
156 static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
157 {
158 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
159 	struct xgbe_channel *channel;
160 	enum xgbe_int int_id;
161 	unsigned int i;
162 
163 	channel = pdata->channel;
164 	for (i = 0; i < pdata->channel_count; i++, channel++) {
165 		if (channel->tx_ring && channel->rx_ring)
166 			int_id = XGMAC_INT_DMA_CH_SR_TI_RI;
167 		else if (channel->tx_ring)
168 			int_id = XGMAC_INT_DMA_CH_SR_TI;
169 		else if (channel->rx_ring)
170 			int_id = XGMAC_INT_DMA_CH_SR_RI;
171 		else
172 			continue;
173 
174 		hw_if->enable_int(channel, int_id);
175 	}
176 }
177 
178 static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
179 {
180 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
181 	struct xgbe_channel *channel;
182 	enum xgbe_int int_id;
183 	unsigned int i;
184 
185 	channel = pdata->channel;
186 	for (i = 0; i < pdata->channel_count; i++, channel++) {
187 		if (channel->tx_ring && channel->rx_ring)
188 			int_id = XGMAC_INT_DMA_CH_SR_TI_RI;
189 		else if (channel->tx_ring)
190 			int_id = XGMAC_INT_DMA_CH_SR_TI;
191 		else if (channel->rx_ring)
192 			int_id = XGMAC_INT_DMA_CH_SR_RI;
193 		else
194 			continue;
195 
196 		hw_if->disable_int(channel, int_id);
197 	}
198 }
199 
200 static irqreturn_t xgbe_isr(int irq, void *data)
201 {
202 	struct xgbe_prv_data *pdata = data;
203 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
204 	struct xgbe_channel *channel;
205 	unsigned int dma_isr, dma_ch_isr;
206 	unsigned int mac_isr, mac_tssr;
207 	unsigned int i;
208 
209 	/* The DMA interrupt status register also reports MAC and MTL
210 	 * interrupts. So for polling mode, we just need to check for
211 	 * this register to be non-zero
212 	 */
213 	dma_isr = XGMAC_IOREAD(pdata, DMA_ISR);
214 	if (!dma_isr)
215 		goto isr_done;
216 
217 	DBGPR("-->xgbe_isr\n");
218 
219 	DBGPR("  DMA_ISR = %08x\n", dma_isr);
220 	DBGPR("  DMA_DS0 = %08x\n", XGMAC_IOREAD(pdata, DMA_DSR0));
221 	DBGPR("  DMA_DS1 = %08x\n", XGMAC_IOREAD(pdata, DMA_DSR1));
222 
223 	for (i = 0; i < pdata->channel_count; i++) {
224 		if (!(dma_isr & (1 << i)))
225 			continue;
226 
227 		channel = pdata->channel + i;
228 
229 		dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR);
230 		DBGPR("  DMA_CH%u_ISR = %08x\n", i, dma_ch_isr);
231 
232 		if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, TI) ||
233 		    XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RI)) {
234 			if (napi_schedule_prep(&pdata->napi)) {
235 				/* Disable Tx and Rx interrupts */
236 				xgbe_disable_rx_tx_ints(pdata);
237 
238 				/* Turn on polling */
239 				__napi_schedule(&pdata->napi);
240 			}
241 		}
242 
243 		/* Restart the device on a Fatal Bus Error */
244 		if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, FBE))
245 			schedule_work(&pdata->restart_work);
246 
247 		/* Clear all interrupt signals */
248 		XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_ch_isr);
249 	}
250 
251 	if (XGMAC_GET_BITS(dma_isr, DMA_ISR, MACIS)) {
252 		mac_isr = XGMAC_IOREAD(pdata, MAC_ISR);
253 
254 		if (XGMAC_GET_BITS(mac_isr, MAC_ISR, MMCTXIS))
255 			hw_if->tx_mmc_int(pdata);
256 
257 		if (XGMAC_GET_BITS(mac_isr, MAC_ISR, MMCRXIS))
258 			hw_if->rx_mmc_int(pdata);
259 
260 		if (XGMAC_GET_BITS(mac_isr, MAC_ISR, TSIS)) {
261 			mac_tssr = XGMAC_IOREAD(pdata, MAC_TSSR);
262 
263 			if (XGMAC_GET_BITS(mac_tssr, MAC_TSSR, TXTSC)) {
264 				/* Read Tx Timestamp to clear interrupt */
265 				pdata->tx_tstamp =
266 					hw_if->get_tx_tstamp(pdata);
267 				schedule_work(&pdata->tx_tstamp_work);
268 			}
269 		}
270 	}
271 
272 	DBGPR("  DMA_ISR = %08x\n", XGMAC_IOREAD(pdata, DMA_ISR));
273 
274 	DBGPR("<--xgbe_isr\n");
275 
276 isr_done:
277 	return IRQ_HANDLED;
278 }
279 
280 static enum hrtimer_restart xgbe_tx_timer(struct hrtimer *timer)
281 {
282 	struct xgbe_channel *channel = container_of(timer,
283 						    struct xgbe_channel,
284 						    tx_timer);
285 	struct xgbe_ring *ring = channel->tx_ring;
286 	struct xgbe_prv_data *pdata = channel->pdata;
287 	unsigned long flags;
288 
289 	DBGPR("-->xgbe_tx_timer\n");
290 
291 	spin_lock_irqsave(&ring->lock, flags);
292 
293 	if (napi_schedule_prep(&pdata->napi)) {
294 		/* Disable Tx and Rx interrupts */
295 		xgbe_disable_rx_tx_ints(pdata);
296 
297 		/* Turn on polling */
298 		__napi_schedule(&pdata->napi);
299 	}
300 
301 	channel->tx_timer_active = 0;
302 
303 	spin_unlock_irqrestore(&ring->lock, flags);
304 
305 	DBGPR("<--xgbe_tx_timer\n");
306 
307 	return HRTIMER_NORESTART;
308 }
309 
310 static void xgbe_init_tx_timers(struct xgbe_prv_data *pdata)
311 {
312 	struct xgbe_channel *channel;
313 	unsigned int i;
314 
315 	DBGPR("-->xgbe_init_tx_timers\n");
316 
317 	channel = pdata->channel;
318 	for (i = 0; i < pdata->channel_count; i++, channel++) {
319 		if (!channel->tx_ring)
320 			break;
321 
322 		DBGPR("  %s adding tx timer\n", channel->name);
323 		hrtimer_init(&channel->tx_timer, CLOCK_MONOTONIC,
324 			     HRTIMER_MODE_REL);
325 		channel->tx_timer.function = xgbe_tx_timer;
326 	}
327 
328 	DBGPR("<--xgbe_init_tx_timers\n");
329 }
330 
331 static void xgbe_stop_tx_timers(struct xgbe_prv_data *pdata)
332 {
333 	struct xgbe_channel *channel;
334 	unsigned int i;
335 
336 	DBGPR("-->xgbe_stop_tx_timers\n");
337 
338 	channel = pdata->channel;
339 	for (i = 0; i < pdata->channel_count; i++, channel++) {
340 		if (!channel->tx_ring)
341 			break;
342 
343 		DBGPR("  %s deleting tx timer\n", channel->name);
344 		channel->tx_timer_active = 0;
345 		hrtimer_cancel(&channel->tx_timer);
346 	}
347 
348 	DBGPR("<--xgbe_stop_tx_timers\n");
349 }
350 
351 void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
352 {
353 	unsigned int mac_hfr0, mac_hfr1, mac_hfr2;
354 	struct xgbe_hw_features *hw_feat = &pdata->hw_feat;
355 
356 	DBGPR("-->xgbe_get_all_hw_features\n");
357 
358 	mac_hfr0 = XGMAC_IOREAD(pdata, MAC_HWF0R);
359 	mac_hfr1 = XGMAC_IOREAD(pdata, MAC_HWF1R);
360 	mac_hfr2 = XGMAC_IOREAD(pdata, MAC_HWF2R);
361 
362 	memset(hw_feat, 0, sizeof(*hw_feat));
363 
364 	/* Hardware feature register 0 */
365 	hw_feat->gmii        = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL);
366 	hw_feat->vlhash      = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH);
367 	hw_feat->sma         = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SMASEL);
368 	hw_feat->rwk         = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RWKSEL);
369 	hw_feat->mgk         = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MGKSEL);
370 	hw_feat->mmc         = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MMCSEL);
371 	hw_feat->aoe         = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, ARPOFFSEL);
372 	hw_feat->ts          = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSEL);
373 	hw_feat->eee         = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, EEESEL);
374 	hw_feat->tx_coe      = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TXCOESEL);
375 	hw_feat->rx_coe      = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RXCOESEL);
376 	hw_feat->addn_mac    = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R,
377 					      ADDMACADRSEL);
378 	hw_feat->ts_src      = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSTSSEL);
379 	hw_feat->sa_vlan_ins = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SAVLANINS);
380 
381 	/* Hardware feature register 1 */
382 	hw_feat->rx_fifo_size  = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
383 						RXFIFOSIZE);
384 	hw_feat->tx_fifo_size  = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
385 						TXFIFOSIZE);
386 	hw_feat->dcb           = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DCBEN);
387 	hw_feat->sph           = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN);
388 	hw_feat->tso           = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN);
389 	hw_feat->dma_debug     = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA);
390 	hw_feat->tc_cnt	       = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, NUMTC);
391 	hw_feat->hash_table_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
392 						  HASHTBLSZ);
393 	hw_feat->l3l4_filter_num = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
394 						  L3L4FNUM);
395 
396 	/* Hardware feature register 2 */
397 	hw_feat->rx_q_cnt     = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXQCNT);
398 	hw_feat->tx_q_cnt     = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXQCNT);
399 	hw_feat->rx_ch_cnt    = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXCHCNT);
400 	hw_feat->tx_ch_cnt    = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXCHCNT);
401 	hw_feat->pps_out_num  = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, PPSOUTNUM);
402 	hw_feat->aux_snap_num = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, AUXSNAPNUM);
403 
404 	/* Translate the Hash Table size into actual number */
405 	switch (hw_feat->hash_table_size) {
406 	case 0:
407 		break;
408 	case 1:
409 		hw_feat->hash_table_size = 64;
410 		break;
411 	case 2:
412 		hw_feat->hash_table_size = 128;
413 		break;
414 	case 3:
415 		hw_feat->hash_table_size = 256;
416 		break;
417 	}
418 
419 	/* The Queue and Channel counts are zero based so increment them
420 	 * to get the actual number
421 	 */
422 	hw_feat->rx_q_cnt++;
423 	hw_feat->tx_q_cnt++;
424 	hw_feat->rx_ch_cnt++;
425 	hw_feat->tx_ch_cnt++;
426 
427 	DBGPR("<--xgbe_get_all_hw_features\n");
428 }
429 
430 static void xgbe_napi_enable(struct xgbe_prv_data *pdata, unsigned int add)
431 {
432 	if (add)
433 		netif_napi_add(pdata->netdev, &pdata->napi, xgbe_poll,
434 			       NAPI_POLL_WEIGHT);
435 	napi_enable(&pdata->napi);
436 }
437 
438 static void xgbe_napi_disable(struct xgbe_prv_data *pdata, unsigned int del)
439 {
440 	napi_disable(&pdata->napi);
441 
442 	if (del)
443 		netif_napi_del(&pdata->napi);
444 }
445 
446 void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
447 {
448 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
449 
450 	DBGPR("-->xgbe_init_tx_coalesce\n");
451 
452 	pdata->tx_usecs = XGMAC_INIT_DMA_TX_USECS;
453 	pdata->tx_frames = XGMAC_INIT_DMA_TX_FRAMES;
454 
455 	hw_if->config_tx_coalesce(pdata);
456 
457 	DBGPR("<--xgbe_init_tx_coalesce\n");
458 }
459 
460 void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
461 {
462 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
463 
464 	DBGPR("-->xgbe_init_rx_coalesce\n");
465 
466 	pdata->rx_riwt = hw_if->usec_to_riwt(pdata, XGMAC_INIT_DMA_RX_USECS);
467 	pdata->rx_frames = XGMAC_INIT_DMA_RX_FRAMES;
468 
469 	hw_if->config_rx_coalesce(pdata);
470 
471 	DBGPR("<--xgbe_init_rx_coalesce\n");
472 }
473 
474 static void xgbe_free_tx_skbuff(struct xgbe_prv_data *pdata)
475 {
476 	struct xgbe_desc_if *desc_if = &pdata->desc_if;
477 	struct xgbe_channel *channel;
478 	struct xgbe_ring *ring;
479 	struct xgbe_ring_data *rdata;
480 	unsigned int i, j;
481 
482 	DBGPR("-->xgbe_free_tx_skbuff\n");
483 
484 	channel = pdata->channel;
485 	for (i = 0; i < pdata->channel_count; i++, channel++) {
486 		ring = channel->tx_ring;
487 		if (!ring)
488 			break;
489 
490 		for (j = 0; j < ring->rdesc_count; j++) {
491 			rdata = XGBE_GET_DESC_DATA(ring, j);
492 			desc_if->unmap_skb(pdata, rdata);
493 		}
494 	}
495 
496 	DBGPR("<--xgbe_free_tx_skbuff\n");
497 }
498 
499 static void xgbe_free_rx_skbuff(struct xgbe_prv_data *pdata)
500 {
501 	struct xgbe_desc_if *desc_if = &pdata->desc_if;
502 	struct xgbe_channel *channel;
503 	struct xgbe_ring *ring;
504 	struct xgbe_ring_data *rdata;
505 	unsigned int i, j;
506 
507 	DBGPR("-->xgbe_free_rx_skbuff\n");
508 
509 	channel = pdata->channel;
510 	for (i = 0; i < pdata->channel_count; i++, channel++) {
511 		ring = channel->rx_ring;
512 		if (!ring)
513 			break;
514 
515 		for (j = 0; j < ring->rdesc_count; j++) {
516 			rdata = XGBE_GET_DESC_DATA(ring, j);
517 			desc_if->unmap_skb(pdata, rdata);
518 		}
519 	}
520 
521 	DBGPR("<--xgbe_free_rx_skbuff\n");
522 }
523 
524 static void xgbe_adjust_link(struct net_device *netdev)
525 {
526 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
527 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
528 	struct phy_device *phydev = pdata->phydev;
529 	int new_state = 0;
530 
531 	if (phydev == NULL)
532 		return;
533 
534 	if (phydev->link) {
535 		/* Flow control support */
536 		if (pdata->pause_autoneg) {
537 			if (phydev->pause || phydev->asym_pause) {
538 				pdata->tx_pause = 1;
539 				pdata->rx_pause = 1;
540 			} else {
541 				pdata->tx_pause = 0;
542 				pdata->rx_pause = 0;
543 			}
544 		}
545 
546 		if (pdata->tx_pause != pdata->phy_tx_pause) {
547 			hw_if->config_tx_flow_control(pdata);
548 			pdata->phy_tx_pause = pdata->tx_pause;
549 		}
550 
551 		if (pdata->rx_pause != pdata->phy_rx_pause) {
552 			hw_if->config_rx_flow_control(pdata);
553 			pdata->phy_rx_pause = pdata->rx_pause;
554 		}
555 
556 		/* Speed support */
557 		if (phydev->speed != pdata->phy_speed) {
558 			new_state = 1;
559 
560 			switch (phydev->speed) {
561 			case SPEED_10000:
562 				hw_if->set_xgmii_speed(pdata);
563 				break;
564 
565 			case SPEED_2500:
566 				hw_if->set_gmii_2500_speed(pdata);
567 				break;
568 
569 			case SPEED_1000:
570 				hw_if->set_gmii_speed(pdata);
571 				break;
572 			}
573 			pdata->phy_speed = phydev->speed;
574 		}
575 
576 		if (phydev->link != pdata->phy_link) {
577 			new_state = 1;
578 			pdata->phy_link = 1;
579 		}
580 	} else if (pdata->phy_link) {
581 		new_state = 1;
582 		pdata->phy_link = 0;
583 		pdata->phy_speed = SPEED_UNKNOWN;
584 	}
585 
586 	if (new_state)
587 		phy_print_status(phydev);
588 }
589 
590 static int xgbe_phy_init(struct xgbe_prv_data *pdata)
591 {
592 	struct net_device *netdev = pdata->netdev;
593 	struct phy_device *phydev = pdata->phydev;
594 	int ret;
595 
596 	pdata->phy_link = -1;
597 	pdata->phy_speed = SPEED_UNKNOWN;
598 	pdata->phy_tx_pause = pdata->tx_pause;
599 	pdata->phy_rx_pause = pdata->rx_pause;
600 
601 	ret = phy_connect_direct(netdev, phydev, &xgbe_adjust_link,
602 				 pdata->phy_mode);
603 	if (ret) {
604 		netdev_err(netdev, "phy_connect_direct failed\n");
605 		return ret;
606 	}
607 
608 	if (!phydev->drv || (phydev->drv->phy_id == 0)) {
609 		netdev_err(netdev, "phy_id not valid\n");
610 		ret = -ENODEV;
611 		goto err_phy_connect;
612 	}
613 	DBGPR("  phy_connect_direct succeeded for PHY %s, link=%d\n",
614 	      dev_name(&phydev->dev), phydev->link);
615 
616 	return 0;
617 
618 err_phy_connect:
619 	phy_disconnect(phydev);
620 
621 	return ret;
622 }
623 
624 static void xgbe_phy_exit(struct xgbe_prv_data *pdata)
625 {
626 	if (!pdata->phydev)
627 		return;
628 
629 	phy_disconnect(pdata->phydev);
630 }
631 
632 int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
633 {
634 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
635 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
636 	unsigned long flags;
637 
638 	DBGPR("-->xgbe_powerdown\n");
639 
640 	if (!netif_running(netdev) ||
641 	    (caller == XGMAC_IOCTL_CONTEXT && pdata->power_down)) {
642 		netdev_alert(netdev, "Device is already powered down\n");
643 		DBGPR("<--xgbe_powerdown\n");
644 		return -EINVAL;
645 	}
646 
647 	phy_stop(pdata->phydev);
648 
649 	spin_lock_irqsave(&pdata->lock, flags);
650 
651 	if (caller == XGMAC_DRIVER_CONTEXT)
652 		netif_device_detach(netdev);
653 
654 	netif_tx_stop_all_queues(netdev);
655 	xgbe_napi_disable(pdata, 0);
656 
657 	/* Powerdown Tx/Rx */
658 	hw_if->powerdown_tx(pdata);
659 	hw_if->powerdown_rx(pdata);
660 
661 	pdata->power_down = 1;
662 
663 	spin_unlock_irqrestore(&pdata->lock, flags);
664 
665 	DBGPR("<--xgbe_powerdown\n");
666 
667 	return 0;
668 }
669 
670 int xgbe_powerup(struct net_device *netdev, unsigned int caller)
671 {
672 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
673 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
674 	unsigned long flags;
675 
676 	DBGPR("-->xgbe_powerup\n");
677 
678 	if (!netif_running(netdev) ||
679 	    (caller == XGMAC_IOCTL_CONTEXT && !pdata->power_down)) {
680 		netdev_alert(netdev, "Device is already powered up\n");
681 		DBGPR("<--xgbe_powerup\n");
682 		return -EINVAL;
683 	}
684 
685 	spin_lock_irqsave(&pdata->lock, flags);
686 
687 	pdata->power_down = 0;
688 
689 	phy_start(pdata->phydev);
690 
691 	/* Enable Tx/Rx */
692 	hw_if->powerup_tx(pdata);
693 	hw_if->powerup_rx(pdata);
694 
695 	if (caller == XGMAC_DRIVER_CONTEXT)
696 		netif_device_attach(netdev);
697 
698 	xgbe_napi_enable(pdata, 0);
699 	netif_tx_start_all_queues(netdev);
700 
701 	spin_unlock_irqrestore(&pdata->lock, flags);
702 
703 	DBGPR("<--xgbe_powerup\n");
704 
705 	return 0;
706 }
707 
708 static int xgbe_start(struct xgbe_prv_data *pdata)
709 {
710 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
711 	struct net_device *netdev = pdata->netdev;
712 
713 	DBGPR("-->xgbe_start\n");
714 
715 	xgbe_set_rx_mode(netdev);
716 
717 	hw_if->init(pdata);
718 
719 	phy_start(pdata->phydev);
720 
721 	hw_if->enable_tx(pdata);
722 	hw_if->enable_rx(pdata);
723 
724 	xgbe_init_tx_timers(pdata);
725 
726 	xgbe_napi_enable(pdata, 1);
727 	netif_tx_start_all_queues(netdev);
728 
729 	DBGPR("<--xgbe_start\n");
730 
731 	return 0;
732 }
733 
734 static void xgbe_stop(struct xgbe_prv_data *pdata)
735 {
736 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
737 	struct net_device *netdev = pdata->netdev;
738 
739 	DBGPR("-->xgbe_stop\n");
740 
741 	phy_stop(pdata->phydev);
742 
743 	netif_tx_stop_all_queues(netdev);
744 	xgbe_napi_disable(pdata, 1);
745 
746 	xgbe_stop_tx_timers(pdata);
747 
748 	hw_if->disable_tx(pdata);
749 	hw_if->disable_rx(pdata);
750 
751 	DBGPR("<--xgbe_stop\n");
752 }
753 
754 static void xgbe_restart_dev(struct xgbe_prv_data *pdata, unsigned int reset)
755 {
756 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
757 
758 	DBGPR("-->xgbe_restart_dev\n");
759 
760 	/* If not running, "restart" will happen on open */
761 	if (!netif_running(pdata->netdev))
762 		return;
763 
764 	xgbe_stop(pdata);
765 	synchronize_irq(pdata->irq_number);
766 
767 	xgbe_free_tx_skbuff(pdata);
768 	xgbe_free_rx_skbuff(pdata);
769 
770 	/* Issue software reset to device if requested */
771 	if (reset)
772 		hw_if->exit(pdata);
773 
774 	xgbe_start(pdata);
775 
776 	DBGPR("<--xgbe_restart_dev\n");
777 }
778 
779 static void xgbe_restart(struct work_struct *work)
780 {
781 	struct xgbe_prv_data *pdata = container_of(work,
782 						   struct xgbe_prv_data,
783 						   restart_work);
784 
785 	rtnl_lock();
786 
787 	xgbe_restart_dev(pdata, 1);
788 
789 	rtnl_unlock();
790 }
791 
792 static void xgbe_tx_tstamp(struct work_struct *work)
793 {
794 	struct xgbe_prv_data *pdata = container_of(work,
795 						   struct xgbe_prv_data,
796 						   tx_tstamp_work);
797 	struct skb_shared_hwtstamps hwtstamps;
798 	u64 nsec;
799 	unsigned long flags;
800 
801 	if (pdata->tx_tstamp) {
802 		nsec = timecounter_cyc2time(&pdata->tstamp_tc,
803 					    pdata->tx_tstamp);
804 
805 		memset(&hwtstamps, 0, sizeof(hwtstamps));
806 		hwtstamps.hwtstamp = ns_to_ktime(nsec);
807 		skb_tstamp_tx(pdata->tx_tstamp_skb, &hwtstamps);
808 	}
809 
810 	dev_kfree_skb_any(pdata->tx_tstamp_skb);
811 
812 	spin_lock_irqsave(&pdata->tstamp_lock, flags);
813 	pdata->tx_tstamp_skb = NULL;
814 	spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
815 }
816 
817 static int xgbe_get_hwtstamp_settings(struct xgbe_prv_data *pdata,
818 				      struct ifreq *ifreq)
819 {
820 	if (copy_to_user(ifreq->ifr_data, &pdata->tstamp_config,
821 			 sizeof(pdata->tstamp_config)))
822 		return -EFAULT;
823 
824 	return 0;
825 }
826 
827 static int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata,
828 				      struct ifreq *ifreq)
829 {
830 	struct hwtstamp_config config;
831 	unsigned int mac_tscr;
832 
833 	if (copy_from_user(&config, ifreq->ifr_data, sizeof(config)))
834 		return -EFAULT;
835 
836 	if (config.flags)
837 		return -EINVAL;
838 
839 	mac_tscr = 0;
840 
841 	switch (config.tx_type) {
842 	case HWTSTAMP_TX_OFF:
843 		break;
844 
845 	case HWTSTAMP_TX_ON:
846 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
847 		break;
848 
849 	default:
850 		return -ERANGE;
851 	}
852 
853 	switch (config.rx_filter) {
854 	case HWTSTAMP_FILTER_NONE:
855 		break;
856 
857 	case HWTSTAMP_FILTER_ALL:
858 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENALL, 1);
859 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
860 		break;
861 
862 	/* PTP v2, UDP, any kind of event packet */
863 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
864 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
865 	/* PTP v1, UDP, any kind of event packet */
866 	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
867 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
868 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
869 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
870 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
871 		break;
872 
873 	/* PTP v2, UDP, Sync packet */
874 	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
875 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
876 	/* PTP v1, UDP, Sync packet */
877 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
878 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
879 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
880 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
881 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
882 		break;
883 
884 	/* PTP v2, UDP, Delay_req packet */
885 	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
886 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
887 	/* PTP v1, UDP, Delay_req packet */
888 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
889 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
890 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
891 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
892 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
893 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
894 		break;
895 
896 	/* 802.AS1, Ethernet, any kind of event packet */
897 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
898 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
899 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
900 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
901 		break;
902 
903 	/* 802.AS1, Ethernet, Sync packet */
904 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
905 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
906 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
907 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
908 		break;
909 
910 	/* 802.AS1, Ethernet, Delay_req packet */
911 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
912 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
913 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
914 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
915 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
916 		break;
917 
918 	/* PTP v2/802.AS1, any layer, any kind of event packet */
919 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
920 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
921 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
922 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
923 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
924 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
925 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
926 		break;
927 
928 	/* PTP v2/802.AS1, any layer, Sync packet */
929 	case HWTSTAMP_FILTER_PTP_V2_SYNC:
930 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
931 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
932 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
933 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
934 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
935 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
936 		break;
937 
938 	/* PTP v2/802.AS1, any layer, Delay_req packet */
939 	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
940 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
941 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
942 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
943 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
944 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
945 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
946 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
947 		break;
948 
949 	default:
950 		return -ERANGE;
951 	}
952 
953 	pdata->hw_if.config_tstamp(pdata, mac_tscr);
954 
955 	memcpy(&pdata->tstamp_config, &config, sizeof(config));
956 
957 	return 0;
958 }
959 
960 static void xgbe_prep_tx_tstamp(struct xgbe_prv_data *pdata,
961 				struct sk_buff *skb,
962 				struct xgbe_packet_data *packet)
963 {
964 	unsigned long flags;
965 
966 	if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP)) {
967 		spin_lock_irqsave(&pdata->tstamp_lock, flags);
968 		if (pdata->tx_tstamp_skb) {
969 			/* Another timestamp in progress, ignore this one */
970 			XGMAC_SET_BITS(packet->attributes,
971 				       TX_PACKET_ATTRIBUTES, PTP, 0);
972 		} else {
973 			pdata->tx_tstamp_skb = skb_get(skb);
974 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
975 		}
976 		spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
977 	}
978 
979 	if (!XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP))
980 		skb_tx_timestamp(skb);
981 }
982 
983 static void xgbe_prep_vlan(struct sk_buff *skb, struct xgbe_packet_data *packet)
984 {
985 	if (vlan_tx_tag_present(skb))
986 		packet->vlan_ctag = vlan_tx_tag_get(skb);
987 }
988 
989 static int xgbe_prep_tso(struct sk_buff *skb, struct xgbe_packet_data *packet)
990 {
991 	int ret;
992 
993 	if (!XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
994 			    TSO_ENABLE))
995 		return 0;
996 
997 	ret = skb_cow_head(skb, 0);
998 	if (ret)
999 		return ret;
1000 
1001 	packet->header_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1002 	packet->tcp_header_len = tcp_hdrlen(skb);
1003 	packet->tcp_payload_len = skb->len - packet->header_len;
1004 	packet->mss = skb_shinfo(skb)->gso_size;
1005 	DBGPR("  packet->header_len=%u\n", packet->header_len);
1006 	DBGPR("  packet->tcp_header_len=%u, packet->tcp_payload_len=%u\n",
1007 	      packet->tcp_header_len, packet->tcp_payload_len);
1008 	DBGPR("  packet->mss=%u\n", packet->mss);
1009 
1010 	return 0;
1011 }
1012 
1013 static int xgbe_is_tso(struct sk_buff *skb)
1014 {
1015 	if (skb->ip_summed != CHECKSUM_PARTIAL)
1016 		return 0;
1017 
1018 	if (!skb_is_gso(skb))
1019 		return 0;
1020 
1021 	DBGPR("  TSO packet to be processed\n");
1022 
1023 	return 1;
1024 }
1025 
1026 static void xgbe_packet_info(struct xgbe_prv_data *pdata,
1027 			     struct xgbe_ring *ring, struct sk_buff *skb,
1028 			     struct xgbe_packet_data *packet)
1029 {
1030 	struct skb_frag_struct *frag;
1031 	unsigned int context_desc;
1032 	unsigned int len;
1033 	unsigned int i;
1034 
1035 	context_desc = 0;
1036 	packet->rdesc_count = 0;
1037 
1038 	if (xgbe_is_tso(skb)) {
1039 		/* TSO requires an extra desriptor if mss is different */
1040 		if (skb_shinfo(skb)->gso_size != ring->tx.cur_mss) {
1041 			context_desc = 1;
1042 			packet->rdesc_count++;
1043 		}
1044 
1045 		/* TSO requires an extra desriptor for TSO header */
1046 		packet->rdesc_count++;
1047 
1048 		XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1049 			       TSO_ENABLE, 1);
1050 		XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1051 			       CSUM_ENABLE, 1);
1052 	} else if (skb->ip_summed == CHECKSUM_PARTIAL)
1053 		XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1054 			       CSUM_ENABLE, 1);
1055 
1056 	if (vlan_tx_tag_present(skb)) {
1057 		/* VLAN requires an extra descriptor if tag is different */
1058 		if (vlan_tx_tag_get(skb) != ring->tx.cur_vlan_ctag)
1059 			/* We can share with the TSO context descriptor */
1060 			if (!context_desc) {
1061 				context_desc = 1;
1062 				packet->rdesc_count++;
1063 			}
1064 
1065 		XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1066 			       VLAN_CTAG, 1);
1067 	}
1068 
1069 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
1070 	    (pdata->tstamp_config.tx_type == HWTSTAMP_TX_ON))
1071 		XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1072 			       PTP, 1);
1073 
1074 	for (len = skb_headlen(skb); len;) {
1075 		packet->rdesc_count++;
1076 		len -= min_t(unsigned int, len, XGBE_TX_MAX_BUF_SIZE);
1077 	}
1078 
1079 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1080 		frag = &skb_shinfo(skb)->frags[i];
1081 		for (len = skb_frag_size(frag); len; ) {
1082 			packet->rdesc_count++;
1083 			len -= min_t(unsigned int, len, XGBE_TX_MAX_BUF_SIZE);
1084 		}
1085 	}
1086 }
1087 
1088 static int xgbe_open(struct net_device *netdev)
1089 {
1090 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
1091 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
1092 	struct xgbe_desc_if *desc_if = &pdata->desc_if;
1093 	int ret;
1094 
1095 	DBGPR("-->xgbe_open\n");
1096 
1097 	/* Initialize the phy */
1098 	ret = xgbe_phy_init(pdata);
1099 	if (ret)
1100 		return ret;
1101 
1102 	/* Enable the clocks */
1103 	ret = clk_prepare_enable(pdata->sysclk);
1104 	if (ret) {
1105 		netdev_alert(netdev, "dma clk_prepare_enable failed\n");
1106 		goto err_phy_init;
1107 	}
1108 
1109 	ret = clk_prepare_enable(pdata->ptpclk);
1110 	if (ret) {
1111 		netdev_alert(netdev, "ptp clk_prepare_enable failed\n");
1112 		goto err_sysclk;
1113 	}
1114 
1115 	/* Calculate the Rx buffer size before allocating rings */
1116 	ret = xgbe_calc_rx_buf_size(netdev, netdev->mtu);
1117 	if (ret < 0)
1118 		goto err_ptpclk;
1119 	pdata->rx_buf_size = ret;
1120 
1121 	/* Allocate the ring descriptors and buffers */
1122 	ret = desc_if->alloc_ring_resources(pdata);
1123 	if (ret)
1124 		goto err_ptpclk;
1125 
1126 	/* Initialize the device restart and Tx timestamp work struct */
1127 	INIT_WORK(&pdata->restart_work, xgbe_restart);
1128 	INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp);
1129 
1130 	/* Request interrupts */
1131 	ret = devm_request_irq(pdata->dev, netdev->irq, xgbe_isr, 0,
1132 			       netdev->name, pdata);
1133 	if (ret) {
1134 		netdev_alert(netdev, "error requesting irq %d\n",
1135 			     pdata->irq_number);
1136 		goto err_irq;
1137 	}
1138 	pdata->irq_number = netdev->irq;
1139 
1140 	ret = xgbe_start(pdata);
1141 	if (ret)
1142 		goto err_start;
1143 
1144 	DBGPR("<--xgbe_open\n");
1145 
1146 	return 0;
1147 
1148 err_start:
1149 	hw_if->exit(pdata);
1150 
1151 	devm_free_irq(pdata->dev, pdata->irq_number, pdata);
1152 	pdata->irq_number = 0;
1153 
1154 err_irq:
1155 	desc_if->free_ring_resources(pdata);
1156 
1157 err_ptpclk:
1158 	clk_disable_unprepare(pdata->ptpclk);
1159 
1160 err_sysclk:
1161 	clk_disable_unprepare(pdata->sysclk);
1162 
1163 err_phy_init:
1164 	xgbe_phy_exit(pdata);
1165 
1166 	return ret;
1167 }
1168 
1169 static int xgbe_close(struct net_device *netdev)
1170 {
1171 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
1172 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
1173 	struct xgbe_desc_if *desc_if = &pdata->desc_if;
1174 
1175 	DBGPR("-->xgbe_close\n");
1176 
1177 	/* Stop the device */
1178 	xgbe_stop(pdata);
1179 
1180 	/* Issue software reset to device */
1181 	hw_if->exit(pdata);
1182 
1183 	/* Free all the ring data */
1184 	desc_if->free_ring_resources(pdata);
1185 
1186 	/* Release the interrupt */
1187 	if (pdata->irq_number != 0) {
1188 		devm_free_irq(pdata->dev, pdata->irq_number, pdata);
1189 		pdata->irq_number = 0;
1190 	}
1191 
1192 	/* Disable the clocks */
1193 	clk_disable_unprepare(pdata->ptpclk);
1194 	clk_disable_unprepare(pdata->sysclk);
1195 
1196 	/* Release the phy */
1197 	xgbe_phy_exit(pdata);
1198 
1199 	DBGPR("<--xgbe_close\n");
1200 
1201 	return 0;
1202 }
1203 
1204 static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
1205 {
1206 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
1207 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
1208 	struct xgbe_desc_if *desc_if = &pdata->desc_if;
1209 	struct xgbe_channel *channel;
1210 	struct xgbe_ring *ring;
1211 	struct xgbe_packet_data *packet;
1212 	unsigned long flags;
1213 	int ret;
1214 
1215 	DBGPR("-->xgbe_xmit: skb->len = %d\n", skb->len);
1216 
1217 	channel = pdata->channel + skb->queue_mapping;
1218 	ring = channel->tx_ring;
1219 	packet = &ring->packet_data;
1220 
1221 	ret = NETDEV_TX_OK;
1222 
1223 	spin_lock_irqsave(&ring->lock, flags);
1224 
1225 	if (skb->len == 0) {
1226 		netdev_err(netdev, "empty skb received from stack\n");
1227 		dev_kfree_skb_any(skb);
1228 		goto tx_netdev_return;
1229 	}
1230 
1231 	/* Calculate preliminary packet info */
1232 	memset(packet, 0, sizeof(*packet));
1233 	xgbe_packet_info(pdata, ring, skb, packet);
1234 
1235 	/* Check that there are enough descriptors available */
1236 	if (packet->rdesc_count > xgbe_tx_avail_desc(ring)) {
1237 		DBGPR("  Tx queue stopped, not enough descriptors available\n");
1238 		netif_stop_subqueue(netdev, channel->queue_index);
1239 		ring->tx.queue_stopped = 1;
1240 		ret = NETDEV_TX_BUSY;
1241 		goto tx_netdev_return;
1242 	}
1243 
1244 	ret = xgbe_prep_tso(skb, packet);
1245 	if (ret) {
1246 		netdev_err(netdev, "error processing TSO packet\n");
1247 		dev_kfree_skb_any(skb);
1248 		goto tx_netdev_return;
1249 	}
1250 	xgbe_prep_vlan(skb, packet);
1251 
1252 	if (!desc_if->map_tx_skb(channel, skb)) {
1253 		dev_kfree_skb_any(skb);
1254 		goto tx_netdev_return;
1255 	}
1256 
1257 	xgbe_prep_tx_tstamp(pdata, skb, packet);
1258 
1259 	/* Configure required descriptor fields for transmission */
1260 	hw_if->pre_xmit(channel);
1261 
1262 #ifdef XGMAC_ENABLE_TX_PKT_DUMP
1263 	xgbe_print_pkt(netdev, skb, true);
1264 #endif
1265 
1266 tx_netdev_return:
1267 	spin_unlock_irqrestore(&ring->lock, flags);
1268 
1269 	DBGPR("<--xgbe_xmit\n");
1270 
1271 	return ret;
1272 }
1273 
1274 static void xgbe_set_rx_mode(struct net_device *netdev)
1275 {
1276 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
1277 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
1278 	unsigned int pr_mode, am_mode;
1279 
1280 	DBGPR("-->xgbe_set_rx_mode\n");
1281 
1282 	pr_mode = ((netdev->flags & IFF_PROMISC) != 0);
1283 	am_mode = ((netdev->flags & IFF_ALLMULTI) != 0);
1284 
1285 	hw_if->set_promiscuous_mode(pdata, pr_mode);
1286 	hw_if->set_all_multicast_mode(pdata, am_mode);
1287 
1288 	hw_if->add_mac_addresses(pdata);
1289 
1290 	DBGPR("<--xgbe_set_rx_mode\n");
1291 }
1292 
1293 static int xgbe_set_mac_address(struct net_device *netdev, void *addr)
1294 {
1295 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
1296 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
1297 	struct sockaddr *saddr = addr;
1298 
1299 	DBGPR("-->xgbe_set_mac_address\n");
1300 
1301 	if (!is_valid_ether_addr(saddr->sa_data))
1302 		return -EADDRNOTAVAIL;
1303 
1304 	memcpy(netdev->dev_addr, saddr->sa_data, netdev->addr_len);
1305 
1306 	hw_if->set_mac_address(pdata, netdev->dev_addr);
1307 
1308 	DBGPR("<--xgbe_set_mac_address\n");
1309 
1310 	return 0;
1311 }
1312 
1313 static int xgbe_ioctl(struct net_device *netdev, struct ifreq *ifreq, int cmd)
1314 {
1315 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
1316 	int ret;
1317 
1318 	switch (cmd) {
1319 	case SIOCGHWTSTAMP:
1320 		ret = xgbe_get_hwtstamp_settings(pdata, ifreq);
1321 		break;
1322 
1323 	case SIOCSHWTSTAMP:
1324 		ret = xgbe_set_hwtstamp_settings(pdata, ifreq);
1325 		break;
1326 
1327 	default:
1328 		ret = -EOPNOTSUPP;
1329 	}
1330 
1331 	return ret;
1332 }
1333 
1334 static int xgbe_change_mtu(struct net_device *netdev, int mtu)
1335 {
1336 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
1337 	int ret;
1338 
1339 	DBGPR("-->xgbe_change_mtu\n");
1340 
1341 	ret = xgbe_calc_rx_buf_size(netdev, mtu);
1342 	if (ret < 0)
1343 		return ret;
1344 
1345 	pdata->rx_buf_size = ret;
1346 	netdev->mtu = mtu;
1347 
1348 	xgbe_restart_dev(pdata, 0);
1349 
1350 	DBGPR("<--xgbe_change_mtu\n");
1351 
1352 	return 0;
1353 }
1354 
1355 static struct rtnl_link_stats64 *xgbe_get_stats64(struct net_device *netdev,
1356 						  struct rtnl_link_stats64 *s)
1357 {
1358 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
1359 	struct xgbe_mmc_stats *pstats = &pdata->mmc_stats;
1360 
1361 	DBGPR("-->%s\n", __func__);
1362 
1363 	pdata->hw_if.read_mmc_stats(pdata);
1364 
1365 	s->rx_packets = pstats->rxframecount_gb;
1366 	s->rx_bytes = pstats->rxoctetcount_gb;
1367 	s->rx_errors = pstats->rxframecount_gb -
1368 		       pstats->rxbroadcastframes_g -
1369 		       pstats->rxmulticastframes_g -
1370 		       pstats->rxunicastframes_g;
1371 	s->multicast = pstats->rxmulticastframes_g;
1372 	s->rx_length_errors = pstats->rxlengtherror;
1373 	s->rx_crc_errors = pstats->rxcrcerror;
1374 	s->rx_fifo_errors = pstats->rxfifooverflow;
1375 
1376 	s->tx_packets = pstats->txframecount_gb;
1377 	s->tx_bytes = pstats->txoctetcount_gb;
1378 	s->tx_errors = pstats->txframecount_gb - pstats->txframecount_g;
1379 	s->tx_dropped = netdev->stats.tx_dropped;
1380 
1381 	DBGPR("<--%s\n", __func__);
1382 
1383 	return s;
1384 }
1385 
1386 static int xgbe_vlan_rx_add_vid(struct net_device *netdev, __be16 proto,
1387 				u16 vid)
1388 {
1389 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
1390 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
1391 
1392 	DBGPR("-->%s\n", __func__);
1393 
1394 	set_bit(vid, pdata->active_vlans);
1395 	hw_if->update_vlan_hash_table(pdata);
1396 
1397 	DBGPR("<--%s\n", __func__);
1398 
1399 	return 0;
1400 }
1401 
1402 static int xgbe_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto,
1403 				 u16 vid)
1404 {
1405 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
1406 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
1407 
1408 	DBGPR("-->%s\n", __func__);
1409 
1410 	clear_bit(vid, pdata->active_vlans);
1411 	hw_if->update_vlan_hash_table(pdata);
1412 
1413 	DBGPR("<--%s\n", __func__);
1414 
1415 	return 0;
1416 }
1417 
1418 #ifdef CONFIG_NET_POLL_CONTROLLER
1419 static void xgbe_poll_controller(struct net_device *netdev)
1420 {
1421 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
1422 
1423 	DBGPR("-->xgbe_poll_controller\n");
1424 
1425 	disable_irq(pdata->irq_number);
1426 
1427 	xgbe_isr(pdata->irq_number, pdata);
1428 
1429 	enable_irq(pdata->irq_number);
1430 
1431 	DBGPR("<--xgbe_poll_controller\n");
1432 }
1433 #endif /* End CONFIG_NET_POLL_CONTROLLER */
1434 
1435 static int xgbe_setup_tc(struct net_device *netdev, u8 tc)
1436 {
1437 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
1438 	unsigned int offset, queue;
1439 	u8 i;
1440 
1441 	if (tc && (tc != pdata->hw_feat.tc_cnt))
1442 		return -EINVAL;
1443 
1444 	if (tc) {
1445 		netdev_set_num_tc(netdev, tc);
1446 		for (i = 0, queue = 0, offset = 0; i < tc; i++) {
1447 			while ((queue < pdata->tx_q_count) &&
1448 			       (pdata->q2tc_map[queue] == i))
1449 				queue++;
1450 
1451 			DBGPR("  TC%u using TXq%u-%u\n", i, offset, queue - 1);
1452 			netdev_set_tc_queue(netdev, i, queue - offset, offset);
1453 			offset = queue;
1454 		}
1455 	} else {
1456 		netdev_reset_tc(netdev);
1457 	}
1458 
1459 	return 0;
1460 }
1461 
1462 static int xgbe_set_features(struct net_device *netdev,
1463 			     netdev_features_t features)
1464 {
1465 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
1466 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
1467 	unsigned int rxcsum, rxvlan, rxvlan_filter;
1468 
1469 	rxcsum = pdata->netdev_features & NETIF_F_RXCSUM;
1470 	rxvlan = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX;
1471 	rxvlan_filter = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_FILTER;
1472 
1473 	if ((features & NETIF_F_RXCSUM) && !rxcsum)
1474 		hw_if->enable_rx_csum(pdata);
1475 	else if (!(features & NETIF_F_RXCSUM) && rxcsum)
1476 		hw_if->disable_rx_csum(pdata);
1477 
1478 	if ((features & NETIF_F_HW_VLAN_CTAG_RX) && !rxvlan)
1479 		hw_if->enable_rx_vlan_stripping(pdata);
1480 	else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) && rxvlan)
1481 		hw_if->disable_rx_vlan_stripping(pdata);
1482 
1483 	if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) && !rxvlan_filter)
1484 		hw_if->enable_rx_vlan_filtering(pdata);
1485 	else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) && rxvlan_filter)
1486 		hw_if->disable_rx_vlan_filtering(pdata);
1487 
1488 	pdata->netdev_features = features;
1489 
1490 	DBGPR("<--xgbe_set_features\n");
1491 
1492 	return 0;
1493 }
1494 
1495 static const struct net_device_ops xgbe_netdev_ops = {
1496 	.ndo_open		= xgbe_open,
1497 	.ndo_stop		= xgbe_close,
1498 	.ndo_start_xmit		= xgbe_xmit,
1499 	.ndo_set_rx_mode	= xgbe_set_rx_mode,
1500 	.ndo_set_mac_address	= xgbe_set_mac_address,
1501 	.ndo_validate_addr	= eth_validate_addr,
1502 	.ndo_do_ioctl		= xgbe_ioctl,
1503 	.ndo_change_mtu		= xgbe_change_mtu,
1504 	.ndo_get_stats64	= xgbe_get_stats64,
1505 	.ndo_vlan_rx_add_vid	= xgbe_vlan_rx_add_vid,
1506 	.ndo_vlan_rx_kill_vid	= xgbe_vlan_rx_kill_vid,
1507 #ifdef CONFIG_NET_POLL_CONTROLLER
1508 	.ndo_poll_controller	= xgbe_poll_controller,
1509 #endif
1510 	.ndo_setup_tc		= xgbe_setup_tc,
1511 	.ndo_set_features	= xgbe_set_features,
1512 };
1513 
1514 struct net_device_ops *xgbe_get_netdev_ops(void)
1515 {
1516 	return (struct net_device_ops *)&xgbe_netdev_ops;
1517 }
1518 
1519 static void xgbe_rx_refresh(struct xgbe_channel *channel)
1520 {
1521 	struct xgbe_prv_data *pdata = channel->pdata;
1522 	struct xgbe_desc_if *desc_if = &pdata->desc_if;
1523 	struct xgbe_ring *ring = channel->rx_ring;
1524 	struct xgbe_ring_data *rdata;
1525 
1526 	desc_if->realloc_skb(channel);
1527 
1528 	/* Update the Rx Tail Pointer Register with address of
1529 	 * the last cleaned entry */
1530 	rdata = XGBE_GET_DESC_DATA(ring, ring->rx.realloc_index - 1);
1531 	XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO,
1532 			  lower_32_bits(rdata->rdesc_dma));
1533 }
1534 
1535 static int xgbe_tx_poll(struct xgbe_channel *channel)
1536 {
1537 	struct xgbe_prv_data *pdata = channel->pdata;
1538 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
1539 	struct xgbe_desc_if *desc_if = &pdata->desc_if;
1540 	struct xgbe_ring *ring = channel->tx_ring;
1541 	struct xgbe_ring_data *rdata;
1542 	struct xgbe_ring_desc *rdesc;
1543 	struct net_device *netdev = pdata->netdev;
1544 	unsigned long flags;
1545 	int processed = 0;
1546 
1547 	DBGPR("-->xgbe_tx_poll\n");
1548 
1549 	/* Nothing to do if there isn't a Tx ring for this channel */
1550 	if (!ring)
1551 		return 0;
1552 
1553 	spin_lock_irqsave(&ring->lock, flags);
1554 
1555 	while ((processed < XGBE_TX_DESC_MAX_PROC) &&
1556 	       (ring->dirty < ring->cur)) {
1557 		rdata = XGBE_GET_DESC_DATA(ring, ring->dirty);
1558 		rdesc = rdata->rdesc;
1559 
1560 		if (!hw_if->tx_complete(rdesc))
1561 			break;
1562 
1563 #ifdef XGMAC_ENABLE_TX_DESC_DUMP
1564 		xgbe_dump_tx_desc(ring, ring->dirty, 1, 0);
1565 #endif
1566 
1567 		/* Free the SKB and reset the descriptor for re-use */
1568 		desc_if->unmap_skb(pdata, rdata);
1569 		hw_if->tx_desc_reset(rdata);
1570 
1571 		processed++;
1572 		ring->dirty++;
1573 	}
1574 
1575 	if ((ring->tx.queue_stopped == 1) &&
1576 	    (xgbe_tx_avail_desc(ring) > XGBE_TX_DESC_MIN_FREE)) {
1577 		ring->tx.queue_stopped = 0;
1578 		netif_wake_subqueue(netdev, channel->queue_index);
1579 	}
1580 
1581 	DBGPR("<--xgbe_tx_poll: processed=%d\n", processed);
1582 
1583 	spin_unlock_irqrestore(&ring->lock, flags);
1584 
1585 	return processed;
1586 }
1587 
1588 static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
1589 {
1590 	struct xgbe_prv_data *pdata = channel->pdata;
1591 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
1592 	struct xgbe_ring *ring = channel->rx_ring;
1593 	struct xgbe_ring_data *rdata;
1594 	struct xgbe_packet_data *packet;
1595 	struct net_device *netdev = pdata->netdev;
1596 	struct sk_buff *skb;
1597 	struct skb_shared_hwtstamps *hwtstamps;
1598 	unsigned int incomplete, error, context_next, context;
1599 	unsigned int len, put_len, max_len;
1600 	int received = 0;
1601 
1602 	DBGPR("-->xgbe_rx_poll: budget=%d\n", budget);
1603 
1604 	/* Nothing to do if there isn't a Rx ring for this channel */
1605 	if (!ring)
1606 		return 0;
1607 
1608 	rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
1609 	packet = &ring->packet_data;
1610 	while (received < budget) {
1611 		DBGPR("  cur = %d\n", ring->cur);
1612 
1613 		/* First time in loop see if we need to restore state */
1614 		if (!received && rdata->state_saved) {
1615 			incomplete = rdata->state.incomplete;
1616 			context_next = rdata->state.context_next;
1617 			skb = rdata->state.skb;
1618 			error = rdata->state.error;
1619 			len = rdata->state.len;
1620 		} else {
1621 			memset(packet, 0, sizeof(*packet));
1622 			incomplete = 0;
1623 			context_next = 0;
1624 			skb = NULL;
1625 			error = 0;
1626 			len = 0;
1627 		}
1628 
1629 read_again:
1630 		rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
1631 
1632 		if (ring->dirty > (XGBE_RX_DESC_CNT >> 3))
1633 			xgbe_rx_refresh(channel);
1634 
1635 		if (hw_if->dev_read(channel))
1636 			break;
1637 
1638 		received++;
1639 		ring->cur++;
1640 		ring->dirty++;
1641 
1642 		dma_unmap_single(pdata->dev, rdata->skb_dma,
1643 				 rdata->skb_dma_len, DMA_FROM_DEVICE);
1644 		rdata->skb_dma = 0;
1645 
1646 		incomplete = XGMAC_GET_BITS(packet->attributes,
1647 					    RX_PACKET_ATTRIBUTES,
1648 					    INCOMPLETE);
1649 		context_next = XGMAC_GET_BITS(packet->attributes,
1650 					      RX_PACKET_ATTRIBUTES,
1651 					      CONTEXT_NEXT);
1652 		context = XGMAC_GET_BITS(packet->attributes,
1653 					 RX_PACKET_ATTRIBUTES,
1654 					 CONTEXT);
1655 
1656 		/* Earlier error, just drain the remaining data */
1657 		if ((incomplete || context_next) && error)
1658 			goto read_again;
1659 
1660 		if (error || packet->errors) {
1661 			if (packet->errors)
1662 				DBGPR("Error in received packet\n");
1663 			dev_kfree_skb(skb);
1664 			continue;
1665 		}
1666 
1667 		if (!context) {
1668 			put_len = rdata->len - len;
1669 			if (skb) {
1670 				if (pskb_expand_head(skb, 0, put_len,
1671 						     GFP_ATOMIC)) {
1672 					DBGPR("pskb_expand_head error\n");
1673 					if (incomplete) {
1674 						error = 1;
1675 						goto read_again;
1676 					}
1677 
1678 					dev_kfree_skb(skb);
1679 					continue;
1680 				}
1681 				memcpy(skb_tail_pointer(skb), rdata->skb->data,
1682 				       put_len);
1683 			} else {
1684 				skb = rdata->skb;
1685 				rdata->skb = NULL;
1686 			}
1687 			skb_put(skb, put_len);
1688 			len += put_len;
1689 		}
1690 
1691 		if (incomplete || context_next)
1692 			goto read_again;
1693 
1694 		/* Stray Context Descriptor? */
1695 		if (!skb)
1696 			continue;
1697 
1698 		/* Be sure we don't exceed the configured MTU */
1699 		max_len = netdev->mtu + ETH_HLEN;
1700 		if (!(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
1701 		    (skb->protocol == htons(ETH_P_8021Q)))
1702 			max_len += VLAN_HLEN;
1703 
1704 		if (skb->len > max_len) {
1705 			DBGPR("packet length exceeds configured MTU\n");
1706 			dev_kfree_skb(skb);
1707 			continue;
1708 		}
1709 
1710 #ifdef XGMAC_ENABLE_RX_PKT_DUMP
1711 		xgbe_print_pkt(netdev, skb, false);
1712 #endif
1713 
1714 		skb_checksum_none_assert(skb);
1715 		if (XGMAC_GET_BITS(packet->attributes,
1716 				   RX_PACKET_ATTRIBUTES, CSUM_DONE))
1717 			skb->ip_summed = CHECKSUM_UNNECESSARY;
1718 
1719 		if (XGMAC_GET_BITS(packet->attributes,
1720 				   RX_PACKET_ATTRIBUTES, VLAN_CTAG))
1721 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1722 					       packet->vlan_ctag);
1723 
1724 		if (XGMAC_GET_BITS(packet->attributes,
1725 				   RX_PACKET_ATTRIBUTES, RX_TSTAMP)) {
1726 			u64 nsec;
1727 
1728 			nsec = timecounter_cyc2time(&pdata->tstamp_tc,
1729 						    packet->rx_tstamp);
1730 			hwtstamps = skb_hwtstamps(skb);
1731 			hwtstamps->hwtstamp = ns_to_ktime(nsec);
1732 		}
1733 
1734 		skb->dev = netdev;
1735 		skb->protocol = eth_type_trans(skb, netdev);
1736 		skb_record_rx_queue(skb, channel->queue_index);
1737 		skb_mark_napi_id(skb, &pdata->napi);
1738 
1739 		netdev->last_rx = jiffies;
1740 		napi_gro_receive(&pdata->napi, skb);
1741 	}
1742 
1743 	/* Check if we need to save state before leaving */
1744 	if (received && (incomplete || context_next)) {
1745 		rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
1746 		rdata->state_saved = 1;
1747 		rdata->state.incomplete = incomplete;
1748 		rdata->state.context_next = context_next;
1749 		rdata->state.skb = skb;
1750 		rdata->state.len = len;
1751 		rdata->state.error = error;
1752 	}
1753 
1754 	DBGPR("<--xgbe_rx_poll: received = %d\n", received);
1755 
1756 	return received;
1757 }
1758 
1759 static int xgbe_poll(struct napi_struct *napi, int budget)
1760 {
1761 	struct xgbe_prv_data *pdata = container_of(napi, struct xgbe_prv_data,
1762 						   napi);
1763 	struct xgbe_channel *channel;
1764 	int ring_budget;
1765 	int processed, last_processed;
1766 	unsigned int i;
1767 
1768 	DBGPR("-->xgbe_poll: budget=%d\n", budget);
1769 
1770 	processed = 0;
1771 	ring_budget = budget / pdata->rx_ring_count;
1772 	do {
1773 		last_processed = processed;
1774 
1775 		channel = pdata->channel;
1776 		for (i = 0; i < pdata->channel_count; i++, channel++) {
1777 			/* Cleanup Tx ring first */
1778 			xgbe_tx_poll(channel);
1779 
1780 			/* Process Rx ring next */
1781 			if (ring_budget > (budget - processed))
1782 				ring_budget = budget - processed;
1783 			processed += xgbe_rx_poll(channel, ring_budget);
1784 		}
1785 	} while ((processed < budget) && (processed != last_processed));
1786 
1787 	/* If we processed everything, we are done */
1788 	if (processed < budget) {
1789 		/* Turn off polling */
1790 		napi_complete(napi);
1791 
1792 		/* Enable Tx and Rx interrupts */
1793 		xgbe_enable_rx_tx_ints(pdata);
1794 	}
1795 
1796 	DBGPR("<--xgbe_poll: received = %d\n", processed);
1797 
1798 	return processed;
1799 }
1800 
1801 void xgbe_dump_tx_desc(struct xgbe_ring *ring, unsigned int idx,
1802 		       unsigned int count, unsigned int flag)
1803 {
1804 	struct xgbe_ring_data *rdata;
1805 	struct xgbe_ring_desc *rdesc;
1806 
1807 	while (count--) {
1808 		rdata = XGBE_GET_DESC_DATA(ring, idx);
1809 		rdesc = rdata->rdesc;
1810 		DBGPR("TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n", idx,
1811 		      (flag == 1) ? "QUEUED FOR TX" : "TX BY DEVICE",
1812 		      le32_to_cpu(rdesc->desc0), le32_to_cpu(rdesc->desc1),
1813 		      le32_to_cpu(rdesc->desc2), le32_to_cpu(rdesc->desc3));
1814 		idx++;
1815 	}
1816 }
1817 
1818 void xgbe_dump_rx_desc(struct xgbe_ring *ring, struct xgbe_ring_desc *desc,
1819 		       unsigned int idx)
1820 {
1821 	DBGPR("RX_NORMAL_DESC[%d RX BY DEVICE] = %08x:%08x:%08x:%08x\n", idx,
1822 	      le32_to_cpu(desc->desc0), le32_to_cpu(desc->desc1),
1823 	      le32_to_cpu(desc->desc2), le32_to_cpu(desc->desc3));
1824 }
1825 
1826 void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx)
1827 {
1828 	struct ethhdr *eth = (struct ethhdr *)skb->data;
1829 	unsigned char *buf = skb->data;
1830 	unsigned char buffer[128];
1831 	unsigned int i, j;
1832 
1833 	netdev_alert(netdev, "\n************** SKB dump ****************\n");
1834 
1835 	netdev_alert(netdev, "%s packet of %d bytes\n",
1836 		     (tx_rx ? "TX" : "RX"), skb->len);
1837 
1838 	netdev_alert(netdev, "Dst MAC addr: %pM\n", eth->h_dest);
1839 	netdev_alert(netdev, "Src MAC addr: %pM\n", eth->h_source);
1840 	netdev_alert(netdev, "Protocol: 0x%04hx\n", ntohs(eth->h_proto));
1841 
1842 	for (i = 0, j = 0; i < skb->len;) {
1843 		j += snprintf(buffer + j, sizeof(buffer) - j, "%02hhx",
1844 			      buf[i++]);
1845 
1846 		if ((i % 32) == 0) {
1847 			netdev_alert(netdev, "  0x%04x: %s\n", i - 32, buffer);
1848 			j = 0;
1849 		} else if ((i % 16) == 0) {
1850 			buffer[j++] = ' ';
1851 			buffer[j++] = ' ';
1852 		} else if ((i % 4) == 0) {
1853 			buffer[j++] = ' ';
1854 		}
1855 	}
1856 	if (i % 32)
1857 		netdev_alert(netdev, "  0x%04x: %s\n", i - (i % 32), buffer);
1858 
1859 	netdev_alert(netdev, "\n************** SKB dump ****************\n");
1860 }
1861