1 /*
2  * AMD 10Gb Ethernet driver
3  *
4  * This file is available to you under your choice of the following two
5  * licenses:
6  *
7  * License 1: GPLv2
8  *
9  * Copyright (c) 2014 Advanced Micro Devices, Inc.
10  *
11  * This file is free software; you may copy, redistribute and/or modify
12  * it under the terms of the GNU General Public License as published by
13  * the Free Software Foundation, either version 2 of the License, or (at
14  * your option) any later version.
15  *
16  * This file is distributed in the hope that it will be useful, but
17  * WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19  * General Public License for more details.
20  *
21  * You should have received a copy of the GNU General Public License
22  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
23  *
24  * This file incorporates work covered by the following copyright and
25  * permission notice:
26  *     The Synopsys DWC ETHER XGMAC Software Driver and documentation
27  *     (hereinafter "Software") is an unsupported proprietary work of Synopsys,
28  *     Inc. unless otherwise expressly agreed to in writing between Synopsys
29  *     and you.
30  *
31  *     The Software IS NOT an item of Licensed Software or Licensed Product
32  *     under any End User Software License Agreement or Agreement for Licensed
33  *     Product with Synopsys or any supplement thereto.  Permission is hereby
34  *     granted, free of charge, to any person obtaining a copy of this software
35  *     annotated with this license and the Software, to deal in the Software
36  *     without restriction, including without limitation the rights to use,
37  *     copy, modify, merge, publish, distribute, sublicense, and/or sell copies
38  *     of the Software, and to permit persons to whom the Software is furnished
39  *     to do so, subject to the following conditions:
40  *
41  *     The above copyright notice and this permission notice shall be included
42  *     in all copies or substantial portions of the Software.
43  *
44  *     THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
45  *     BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
46  *     TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
47  *     PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
48  *     BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
49  *     CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
50  *     SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
51  *     INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
52  *     CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
53  *     ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
54  *     THE POSSIBILITY OF SUCH DAMAGE.
55  *
56  *
57  * License 2: Modified BSD
58  *
59  * Copyright (c) 2014 Advanced Micro Devices, Inc.
60  * All rights reserved.
61  *
62  * Redistribution and use in source and binary forms, with or without
63  * modification, are permitted provided that the following conditions are met:
64  *     * Redistributions of source code must retain the above copyright
65  *       notice, this list of conditions and the following disclaimer.
66  *     * Redistributions in binary form must reproduce the above copyright
67  *       notice, this list of conditions and the following disclaimer in the
68  *       documentation and/or other materials provided with the distribution.
69  *     * Neither the name of Advanced Micro Devices, Inc. nor the
70  *       names of its contributors may be used to endorse or promote products
71  *       derived from this software without specific prior written permission.
72  *
73  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
74  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
75  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
76  * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
77  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
78  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
79  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
80  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
81  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
82  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
83  *
84  * This file incorporates work covered by the following copyright and
85  * permission notice:
86  *     The Synopsys DWC ETHER XGMAC Software Driver and documentation
87  *     (hereinafter "Software") is an unsupported proprietary work of Synopsys,
88  *     Inc. unless otherwise expressly agreed to in writing between Synopsys
89  *     and you.
90  *
91  *     The Software IS NOT an item of Licensed Software or Licensed Product
92  *     under any End User Software License Agreement or Agreement for Licensed
93  *     Product with Synopsys or any supplement thereto.  Permission is hereby
94  *     granted, free of charge, to any person obtaining a copy of this software
95  *     annotated with this license and the Software, to deal in the Software
96  *     without restriction, including without limitation the rights to use,
97  *     copy, modify, merge, publish, distribute, sublicense, and/or sell copies
98  *     of the Software, and to permit persons to whom the Software is furnished
99  *     to do so, subject to the following conditions:
100  *
101  *     The above copyright notice and this permission notice shall be included
102  *     in all copies or substantial portions of the Software.
103  *
104  *     THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
105  *     BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
106  *     TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
107  *     PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
108  *     BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
109  *     CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
110  *     SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
111  *     INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
112  *     CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
113  *     ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
114  *     THE POSSIBILITY OF SUCH DAMAGE.
115  */
116 
117 #include <linux/spinlock.h>
118 #include <linux/tcp.h>
119 #include <linux/if_vlan.h>
120 #include <net/busy_poll.h>
121 #include <linux/clk.h>
122 #include <linux/if_ether.h>
123 #include <linux/net_tstamp.h>
124 #include <linux/phy.h>
125 
126 #include "xgbe.h"
127 #include "xgbe-common.h"
128 
129 
130 static int xgbe_poll(struct napi_struct *, int);
131 static void xgbe_set_rx_mode(struct net_device *);
132 
133 static inline unsigned int xgbe_tx_avail_desc(struct xgbe_ring *ring)
134 {
135 	return (ring->rdesc_count - (ring->cur - ring->dirty));
136 }
137 
138 static int xgbe_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu)
139 {
140 	unsigned int rx_buf_size;
141 
142 	if (mtu > XGMAC_JUMBO_PACKET_MTU) {
143 		netdev_alert(netdev, "MTU exceeds maximum supported value\n");
144 		return -EINVAL;
145 	}
146 
147 	rx_buf_size = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
148 	if (rx_buf_size < XGBE_RX_MIN_BUF_SIZE)
149 		rx_buf_size = XGBE_RX_MIN_BUF_SIZE;
150 	rx_buf_size = (rx_buf_size + XGBE_RX_BUF_ALIGN - 1) &
151 		      ~(XGBE_RX_BUF_ALIGN - 1);
152 
153 	return rx_buf_size;
154 }
155 
156 static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
157 {
158 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
159 	struct xgbe_channel *channel;
160 	enum xgbe_int int_id;
161 	unsigned int i;
162 
163 	channel = pdata->channel;
164 	for (i = 0; i < pdata->channel_count; i++, channel++) {
165 		if (channel->tx_ring && channel->rx_ring)
166 			int_id = XGMAC_INT_DMA_CH_SR_TI_RI;
167 		else if (channel->tx_ring)
168 			int_id = XGMAC_INT_DMA_CH_SR_TI;
169 		else if (channel->rx_ring)
170 			int_id = XGMAC_INT_DMA_CH_SR_RI;
171 		else
172 			continue;
173 
174 		hw_if->enable_int(channel, int_id);
175 	}
176 }
177 
178 static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
179 {
180 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
181 	struct xgbe_channel *channel;
182 	enum xgbe_int int_id;
183 	unsigned int i;
184 
185 	channel = pdata->channel;
186 	for (i = 0; i < pdata->channel_count; i++, channel++) {
187 		if (channel->tx_ring && channel->rx_ring)
188 			int_id = XGMAC_INT_DMA_CH_SR_TI_RI;
189 		else if (channel->tx_ring)
190 			int_id = XGMAC_INT_DMA_CH_SR_TI;
191 		else if (channel->rx_ring)
192 			int_id = XGMAC_INT_DMA_CH_SR_RI;
193 		else
194 			continue;
195 
196 		hw_if->disable_int(channel, int_id);
197 	}
198 }
199 
200 static irqreturn_t xgbe_isr(int irq, void *data)
201 {
202 	struct xgbe_prv_data *pdata = data;
203 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
204 	struct xgbe_channel *channel;
205 	unsigned int dma_isr, dma_ch_isr;
206 	unsigned int mac_isr, mac_tssr;
207 	unsigned int i;
208 
209 	/* The DMA interrupt status register also reports MAC and MTL
210 	 * interrupts. So for polling mode, we just need to check for
211 	 * this register to be non-zero
212 	 */
213 	dma_isr = XGMAC_IOREAD(pdata, DMA_ISR);
214 	if (!dma_isr)
215 		goto isr_done;
216 
217 	DBGPR("-->xgbe_isr\n");
218 
219 	DBGPR("  DMA_ISR = %08x\n", dma_isr);
220 	DBGPR("  DMA_DS0 = %08x\n", XGMAC_IOREAD(pdata, DMA_DSR0));
221 	DBGPR("  DMA_DS1 = %08x\n", XGMAC_IOREAD(pdata, DMA_DSR1));
222 
223 	for (i = 0; i < pdata->channel_count; i++) {
224 		if (!(dma_isr & (1 << i)))
225 			continue;
226 
227 		channel = pdata->channel + i;
228 
229 		dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR);
230 		DBGPR("  DMA_CH%u_ISR = %08x\n", i, dma_ch_isr);
231 
232 		if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, TI) ||
233 		    XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RI)) {
234 			if (napi_schedule_prep(&pdata->napi)) {
235 				/* Disable Tx and Rx interrupts */
236 				xgbe_disable_rx_tx_ints(pdata);
237 
238 				/* Turn on polling */
239 				__napi_schedule(&pdata->napi);
240 			}
241 		}
242 
243 		/* Restart the device on a Fatal Bus Error */
244 		if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, FBE))
245 			schedule_work(&pdata->restart_work);
246 
247 		/* Clear all interrupt signals */
248 		XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_ch_isr);
249 	}
250 
251 	if (XGMAC_GET_BITS(dma_isr, DMA_ISR, MACIS)) {
252 		mac_isr = XGMAC_IOREAD(pdata, MAC_ISR);
253 
254 		if (XGMAC_GET_BITS(mac_isr, MAC_ISR, MMCTXIS))
255 			hw_if->tx_mmc_int(pdata);
256 
257 		if (XGMAC_GET_BITS(mac_isr, MAC_ISR, MMCRXIS))
258 			hw_if->rx_mmc_int(pdata);
259 
260 		if (XGMAC_GET_BITS(mac_isr, MAC_ISR, TSIS)) {
261 			mac_tssr = XGMAC_IOREAD(pdata, MAC_TSSR);
262 
263 			if (XGMAC_GET_BITS(mac_tssr, MAC_TSSR, TXTSC)) {
264 				/* Read Tx Timestamp to clear interrupt */
265 				pdata->tx_tstamp =
266 					hw_if->get_tx_tstamp(pdata);
267 				schedule_work(&pdata->tx_tstamp_work);
268 			}
269 		}
270 	}
271 
272 	DBGPR("  DMA_ISR = %08x\n", XGMAC_IOREAD(pdata, DMA_ISR));
273 
274 	DBGPR("<--xgbe_isr\n");
275 
276 isr_done:
277 	return IRQ_HANDLED;
278 }
279 
280 static enum hrtimer_restart xgbe_tx_timer(struct hrtimer *timer)
281 {
282 	struct xgbe_channel *channel = container_of(timer,
283 						    struct xgbe_channel,
284 						    tx_timer);
285 	struct xgbe_ring *ring = channel->tx_ring;
286 	struct xgbe_prv_data *pdata = channel->pdata;
287 	unsigned long flags;
288 
289 	DBGPR("-->xgbe_tx_timer\n");
290 
291 	spin_lock_irqsave(&ring->lock, flags);
292 
293 	if (napi_schedule_prep(&pdata->napi)) {
294 		/* Disable Tx and Rx interrupts */
295 		xgbe_disable_rx_tx_ints(pdata);
296 
297 		/* Turn on polling */
298 		__napi_schedule(&pdata->napi);
299 	}
300 
301 	channel->tx_timer_active = 0;
302 
303 	spin_unlock_irqrestore(&ring->lock, flags);
304 
305 	DBGPR("<--xgbe_tx_timer\n");
306 
307 	return HRTIMER_NORESTART;
308 }
309 
310 static void xgbe_init_tx_timers(struct xgbe_prv_data *pdata)
311 {
312 	struct xgbe_channel *channel;
313 	unsigned int i;
314 
315 	DBGPR("-->xgbe_init_tx_timers\n");
316 
317 	channel = pdata->channel;
318 	for (i = 0; i < pdata->channel_count; i++, channel++) {
319 		if (!channel->tx_ring)
320 			break;
321 
322 		DBGPR("  %s adding tx timer\n", channel->name);
323 		hrtimer_init(&channel->tx_timer, CLOCK_MONOTONIC,
324 			     HRTIMER_MODE_REL);
325 		channel->tx_timer.function = xgbe_tx_timer;
326 	}
327 
328 	DBGPR("<--xgbe_init_tx_timers\n");
329 }
330 
331 static void xgbe_stop_tx_timers(struct xgbe_prv_data *pdata)
332 {
333 	struct xgbe_channel *channel;
334 	unsigned int i;
335 
336 	DBGPR("-->xgbe_stop_tx_timers\n");
337 
338 	channel = pdata->channel;
339 	for (i = 0; i < pdata->channel_count; i++, channel++) {
340 		if (!channel->tx_ring)
341 			break;
342 
343 		DBGPR("  %s deleting tx timer\n", channel->name);
344 		channel->tx_timer_active = 0;
345 		hrtimer_cancel(&channel->tx_timer);
346 	}
347 
348 	DBGPR("<--xgbe_stop_tx_timers\n");
349 }
350 
351 void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
352 {
353 	unsigned int mac_hfr0, mac_hfr1, mac_hfr2;
354 	struct xgbe_hw_features *hw_feat = &pdata->hw_feat;
355 
356 	DBGPR("-->xgbe_get_all_hw_features\n");
357 
358 	mac_hfr0 = XGMAC_IOREAD(pdata, MAC_HWF0R);
359 	mac_hfr1 = XGMAC_IOREAD(pdata, MAC_HWF1R);
360 	mac_hfr2 = XGMAC_IOREAD(pdata, MAC_HWF2R);
361 
362 	memset(hw_feat, 0, sizeof(*hw_feat));
363 
364 	hw_feat->version = XGMAC_IOREAD(pdata, MAC_VR);
365 
366 	/* Hardware feature register 0 */
367 	hw_feat->gmii        = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL);
368 	hw_feat->vlhash      = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH);
369 	hw_feat->sma         = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SMASEL);
370 	hw_feat->rwk         = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RWKSEL);
371 	hw_feat->mgk         = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MGKSEL);
372 	hw_feat->mmc         = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MMCSEL);
373 	hw_feat->aoe         = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, ARPOFFSEL);
374 	hw_feat->ts          = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSEL);
375 	hw_feat->eee         = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, EEESEL);
376 	hw_feat->tx_coe      = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TXCOESEL);
377 	hw_feat->rx_coe      = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RXCOESEL);
378 	hw_feat->addn_mac    = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R,
379 					      ADDMACADRSEL);
380 	hw_feat->ts_src      = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSTSSEL);
381 	hw_feat->sa_vlan_ins = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SAVLANINS);
382 
383 	/* Hardware feature register 1 */
384 	hw_feat->rx_fifo_size  = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
385 						RXFIFOSIZE);
386 	hw_feat->tx_fifo_size  = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
387 						TXFIFOSIZE);
388 	hw_feat->dcb           = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DCBEN);
389 	hw_feat->sph           = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN);
390 	hw_feat->tso           = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN);
391 	hw_feat->dma_debug     = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA);
392 	hw_feat->tc_cnt	       = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, NUMTC);
393 	hw_feat->hash_table_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
394 						  HASHTBLSZ);
395 	hw_feat->l3l4_filter_num = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
396 						  L3L4FNUM);
397 
398 	/* Hardware feature register 2 */
399 	hw_feat->rx_q_cnt     = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXQCNT);
400 	hw_feat->tx_q_cnt     = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXQCNT);
401 	hw_feat->rx_ch_cnt    = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXCHCNT);
402 	hw_feat->tx_ch_cnt    = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXCHCNT);
403 	hw_feat->pps_out_num  = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, PPSOUTNUM);
404 	hw_feat->aux_snap_num = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, AUXSNAPNUM);
405 
406 	/* Translate the Hash Table size into actual number */
407 	switch (hw_feat->hash_table_size) {
408 	case 0:
409 		break;
410 	case 1:
411 		hw_feat->hash_table_size = 64;
412 		break;
413 	case 2:
414 		hw_feat->hash_table_size = 128;
415 		break;
416 	case 3:
417 		hw_feat->hash_table_size = 256;
418 		break;
419 	}
420 
421 	/* The Queue and Channel counts are zero based so increment them
422 	 * to get the actual number
423 	 */
424 	hw_feat->rx_q_cnt++;
425 	hw_feat->tx_q_cnt++;
426 	hw_feat->rx_ch_cnt++;
427 	hw_feat->tx_ch_cnt++;
428 
429 	DBGPR("<--xgbe_get_all_hw_features\n");
430 }
431 
432 static void xgbe_napi_enable(struct xgbe_prv_data *pdata, unsigned int add)
433 {
434 	if (add)
435 		netif_napi_add(pdata->netdev, &pdata->napi, xgbe_poll,
436 			       NAPI_POLL_WEIGHT);
437 	napi_enable(&pdata->napi);
438 }
439 
440 static void xgbe_napi_disable(struct xgbe_prv_data *pdata, unsigned int del)
441 {
442 	napi_disable(&pdata->napi);
443 
444 	if (del)
445 		netif_napi_del(&pdata->napi);
446 }
447 
448 void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
449 {
450 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
451 
452 	DBGPR("-->xgbe_init_tx_coalesce\n");
453 
454 	pdata->tx_usecs = XGMAC_INIT_DMA_TX_USECS;
455 	pdata->tx_frames = XGMAC_INIT_DMA_TX_FRAMES;
456 
457 	hw_if->config_tx_coalesce(pdata);
458 
459 	DBGPR("<--xgbe_init_tx_coalesce\n");
460 }
461 
462 void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
463 {
464 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
465 
466 	DBGPR("-->xgbe_init_rx_coalesce\n");
467 
468 	pdata->rx_riwt = hw_if->usec_to_riwt(pdata, XGMAC_INIT_DMA_RX_USECS);
469 	pdata->rx_frames = XGMAC_INIT_DMA_RX_FRAMES;
470 
471 	hw_if->config_rx_coalesce(pdata);
472 
473 	DBGPR("<--xgbe_init_rx_coalesce\n");
474 }
475 
476 static void xgbe_free_tx_skbuff(struct xgbe_prv_data *pdata)
477 {
478 	struct xgbe_desc_if *desc_if = &pdata->desc_if;
479 	struct xgbe_channel *channel;
480 	struct xgbe_ring *ring;
481 	struct xgbe_ring_data *rdata;
482 	unsigned int i, j;
483 
484 	DBGPR("-->xgbe_free_tx_skbuff\n");
485 
486 	channel = pdata->channel;
487 	for (i = 0; i < pdata->channel_count; i++, channel++) {
488 		ring = channel->tx_ring;
489 		if (!ring)
490 			break;
491 
492 		for (j = 0; j < ring->rdesc_count; j++) {
493 			rdata = XGBE_GET_DESC_DATA(ring, j);
494 			desc_if->unmap_skb(pdata, rdata);
495 		}
496 	}
497 
498 	DBGPR("<--xgbe_free_tx_skbuff\n");
499 }
500 
501 static void xgbe_free_rx_skbuff(struct xgbe_prv_data *pdata)
502 {
503 	struct xgbe_desc_if *desc_if = &pdata->desc_if;
504 	struct xgbe_channel *channel;
505 	struct xgbe_ring *ring;
506 	struct xgbe_ring_data *rdata;
507 	unsigned int i, j;
508 
509 	DBGPR("-->xgbe_free_rx_skbuff\n");
510 
511 	channel = pdata->channel;
512 	for (i = 0; i < pdata->channel_count; i++, channel++) {
513 		ring = channel->rx_ring;
514 		if (!ring)
515 			break;
516 
517 		for (j = 0; j < ring->rdesc_count; j++) {
518 			rdata = XGBE_GET_DESC_DATA(ring, j);
519 			desc_if->unmap_skb(pdata, rdata);
520 		}
521 	}
522 
523 	DBGPR("<--xgbe_free_rx_skbuff\n");
524 }
525 
526 static void xgbe_adjust_link(struct net_device *netdev)
527 {
528 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
529 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
530 	struct phy_device *phydev = pdata->phydev;
531 	int new_state = 0;
532 
533 	if (phydev == NULL)
534 		return;
535 
536 	if (phydev->link) {
537 		/* Flow control support */
538 		if (pdata->pause_autoneg) {
539 			if (phydev->pause || phydev->asym_pause) {
540 				pdata->tx_pause = 1;
541 				pdata->rx_pause = 1;
542 			} else {
543 				pdata->tx_pause = 0;
544 				pdata->rx_pause = 0;
545 			}
546 		}
547 
548 		if (pdata->tx_pause != pdata->phy_tx_pause) {
549 			hw_if->config_tx_flow_control(pdata);
550 			pdata->phy_tx_pause = pdata->tx_pause;
551 		}
552 
553 		if (pdata->rx_pause != pdata->phy_rx_pause) {
554 			hw_if->config_rx_flow_control(pdata);
555 			pdata->phy_rx_pause = pdata->rx_pause;
556 		}
557 
558 		/* Speed support */
559 		if (phydev->speed != pdata->phy_speed) {
560 			new_state = 1;
561 
562 			switch (phydev->speed) {
563 			case SPEED_10000:
564 				hw_if->set_xgmii_speed(pdata);
565 				break;
566 
567 			case SPEED_2500:
568 				hw_if->set_gmii_2500_speed(pdata);
569 				break;
570 
571 			case SPEED_1000:
572 				hw_if->set_gmii_speed(pdata);
573 				break;
574 			}
575 			pdata->phy_speed = phydev->speed;
576 		}
577 
578 		if (phydev->link != pdata->phy_link) {
579 			new_state = 1;
580 			pdata->phy_link = 1;
581 		}
582 	} else if (pdata->phy_link) {
583 		new_state = 1;
584 		pdata->phy_link = 0;
585 		pdata->phy_speed = SPEED_UNKNOWN;
586 	}
587 
588 	if (new_state)
589 		phy_print_status(phydev);
590 }
591 
592 static int xgbe_phy_init(struct xgbe_prv_data *pdata)
593 {
594 	struct net_device *netdev = pdata->netdev;
595 	struct phy_device *phydev = pdata->phydev;
596 	int ret;
597 
598 	pdata->phy_link = -1;
599 	pdata->phy_speed = SPEED_UNKNOWN;
600 	pdata->phy_tx_pause = pdata->tx_pause;
601 	pdata->phy_rx_pause = pdata->rx_pause;
602 
603 	ret = phy_connect_direct(netdev, phydev, &xgbe_adjust_link,
604 				 pdata->phy_mode);
605 	if (ret) {
606 		netdev_err(netdev, "phy_connect_direct failed\n");
607 		return ret;
608 	}
609 
610 	if (!phydev->drv || (phydev->drv->phy_id == 0)) {
611 		netdev_err(netdev, "phy_id not valid\n");
612 		ret = -ENODEV;
613 		goto err_phy_connect;
614 	}
615 	DBGPR("  phy_connect_direct succeeded for PHY %s, link=%d\n",
616 	      dev_name(&phydev->dev), phydev->link);
617 
618 	return 0;
619 
620 err_phy_connect:
621 	phy_disconnect(phydev);
622 
623 	return ret;
624 }
625 
626 static void xgbe_phy_exit(struct xgbe_prv_data *pdata)
627 {
628 	if (!pdata->phydev)
629 		return;
630 
631 	phy_disconnect(pdata->phydev);
632 }
633 
634 int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
635 {
636 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
637 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
638 	unsigned long flags;
639 
640 	DBGPR("-->xgbe_powerdown\n");
641 
642 	if (!netif_running(netdev) ||
643 	    (caller == XGMAC_IOCTL_CONTEXT && pdata->power_down)) {
644 		netdev_alert(netdev, "Device is already powered down\n");
645 		DBGPR("<--xgbe_powerdown\n");
646 		return -EINVAL;
647 	}
648 
649 	phy_stop(pdata->phydev);
650 
651 	spin_lock_irqsave(&pdata->lock, flags);
652 
653 	if (caller == XGMAC_DRIVER_CONTEXT)
654 		netif_device_detach(netdev);
655 
656 	netif_tx_stop_all_queues(netdev);
657 	xgbe_napi_disable(pdata, 0);
658 
659 	/* Powerdown Tx/Rx */
660 	hw_if->powerdown_tx(pdata);
661 	hw_if->powerdown_rx(pdata);
662 
663 	pdata->power_down = 1;
664 
665 	spin_unlock_irqrestore(&pdata->lock, flags);
666 
667 	DBGPR("<--xgbe_powerdown\n");
668 
669 	return 0;
670 }
671 
672 int xgbe_powerup(struct net_device *netdev, unsigned int caller)
673 {
674 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
675 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
676 	unsigned long flags;
677 
678 	DBGPR("-->xgbe_powerup\n");
679 
680 	if (!netif_running(netdev) ||
681 	    (caller == XGMAC_IOCTL_CONTEXT && !pdata->power_down)) {
682 		netdev_alert(netdev, "Device is already powered up\n");
683 		DBGPR("<--xgbe_powerup\n");
684 		return -EINVAL;
685 	}
686 
687 	spin_lock_irqsave(&pdata->lock, flags);
688 
689 	pdata->power_down = 0;
690 
691 	phy_start(pdata->phydev);
692 
693 	/* Enable Tx/Rx */
694 	hw_if->powerup_tx(pdata);
695 	hw_if->powerup_rx(pdata);
696 
697 	if (caller == XGMAC_DRIVER_CONTEXT)
698 		netif_device_attach(netdev);
699 
700 	xgbe_napi_enable(pdata, 0);
701 	netif_tx_start_all_queues(netdev);
702 
703 	spin_unlock_irqrestore(&pdata->lock, flags);
704 
705 	DBGPR("<--xgbe_powerup\n");
706 
707 	return 0;
708 }
709 
710 static int xgbe_start(struct xgbe_prv_data *pdata)
711 {
712 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
713 	struct net_device *netdev = pdata->netdev;
714 
715 	DBGPR("-->xgbe_start\n");
716 
717 	xgbe_set_rx_mode(netdev);
718 
719 	hw_if->init(pdata);
720 
721 	phy_start(pdata->phydev);
722 
723 	hw_if->enable_tx(pdata);
724 	hw_if->enable_rx(pdata);
725 
726 	xgbe_init_tx_timers(pdata);
727 
728 	xgbe_napi_enable(pdata, 1);
729 	netif_tx_start_all_queues(netdev);
730 
731 	DBGPR("<--xgbe_start\n");
732 
733 	return 0;
734 }
735 
736 static void xgbe_stop(struct xgbe_prv_data *pdata)
737 {
738 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
739 	struct net_device *netdev = pdata->netdev;
740 
741 	DBGPR("-->xgbe_stop\n");
742 
743 	phy_stop(pdata->phydev);
744 
745 	netif_tx_stop_all_queues(netdev);
746 	xgbe_napi_disable(pdata, 1);
747 
748 	xgbe_stop_tx_timers(pdata);
749 
750 	hw_if->disable_tx(pdata);
751 	hw_if->disable_rx(pdata);
752 
753 	DBGPR("<--xgbe_stop\n");
754 }
755 
756 static void xgbe_restart_dev(struct xgbe_prv_data *pdata, unsigned int reset)
757 {
758 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
759 
760 	DBGPR("-->xgbe_restart_dev\n");
761 
762 	/* If not running, "restart" will happen on open */
763 	if (!netif_running(pdata->netdev))
764 		return;
765 
766 	xgbe_stop(pdata);
767 	synchronize_irq(pdata->irq_number);
768 
769 	xgbe_free_tx_skbuff(pdata);
770 	xgbe_free_rx_skbuff(pdata);
771 
772 	/* Issue software reset to device if requested */
773 	if (reset)
774 		hw_if->exit(pdata);
775 
776 	xgbe_start(pdata);
777 
778 	DBGPR("<--xgbe_restart_dev\n");
779 }
780 
781 static void xgbe_restart(struct work_struct *work)
782 {
783 	struct xgbe_prv_data *pdata = container_of(work,
784 						   struct xgbe_prv_data,
785 						   restart_work);
786 
787 	rtnl_lock();
788 
789 	xgbe_restart_dev(pdata, 1);
790 
791 	rtnl_unlock();
792 }
793 
794 static void xgbe_tx_tstamp(struct work_struct *work)
795 {
796 	struct xgbe_prv_data *pdata = container_of(work,
797 						   struct xgbe_prv_data,
798 						   tx_tstamp_work);
799 	struct skb_shared_hwtstamps hwtstamps;
800 	u64 nsec;
801 	unsigned long flags;
802 
803 	if (pdata->tx_tstamp) {
804 		nsec = timecounter_cyc2time(&pdata->tstamp_tc,
805 					    pdata->tx_tstamp);
806 
807 		memset(&hwtstamps, 0, sizeof(hwtstamps));
808 		hwtstamps.hwtstamp = ns_to_ktime(nsec);
809 		skb_tstamp_tx(pdata->tx_tstamp_skb, &hwtstamps);
810 	}
811 
812 	dev_kfree_skb_any(pdata->tx_tstamp_skb);
813 
814 	spin_lock_irqsave(&pdata->tstamp_lock, flags);
815 	pdata->tx_tstamp_skb = NULL;
816 	spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
817 }
818 
819 static int xgbe_get_hwtstamp_settings(struct xgbe_prv_data *pdata,
820 				      struct ifreq *ifreq)
821 {
822 	if (copy_to_user(ifreq->ifr_data, &pdata->tstamp_config,
823 			 sizeof(pdata->tstamp_config)))
824 		return -EFAULT;
825 
826 	return 0;
827 }
828 
829 static int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata,
830 				      struct ifreq *ifreq)
831 {
832 	struct hwtstamp_config config;
833 	unsigned int mac_tscr;
834 
835 	if (copy_from_user(&config, ifreq->ifr_data, sizeof(config)))
836 		return -EFAULT;
837 
838 	if (config.flags)
839 		return -EINVAL;
840 
841 	mac_tscr = 0;
842 
843 	switch (config.tx_type) {
844 	case HWTSTAMP_TX_OFF:
845 		break;
846 
847 	case HWTSTAMP_TX_ON:
848 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
849 		break;
850 
851 	default:
852 		return -ERANGE;
853 	}
854 
855 	switch (config.rx_filter) {
856 	case HWTSTAMP_FILTER_NONE:
857 		break;
858 
859 	case HWTSTAMP_FILTER_ALL:
860 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENALL, 1);
861 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
862 		break;
863 
864 	/* PTP v2, UDP, any kind of event packet */
865 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
866 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
867 	/* PTP v1, UDP, any kind of event packet */
868 	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
869 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
870 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
871 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
872 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
873 		break;
874 
875 	/* PTP v2, UDP, Sync packet */
876 	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
877 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
878 	/* PTP v1, UDP, Sync packet */
879 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
880 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
881 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
882 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
883 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
884 		break;
885 
886 	/* PTP v2, UDP, Delay_req packet */
887 	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
888 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
889 	/* PTP v1, UDP, Delay_req packet */
890 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
891 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
892 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
893 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
894 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
895 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
896 		break;
897 
898 	/* 802.AS1, Ethernet, any kind of event packet */
899 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
900 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
901 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
902 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
903 		break;
904 
905 	/* 802.AS1, Ethernet, Sync packet */
906 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
907 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
908 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
909 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
910 		break;
911 
912 	/* 802.AS1, Ethernet, Delay_req packet */
913 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
914 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
915 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
916 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
917 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
918 		break;
919 
920 	/* PTP v2/802.AS1, any layer, any kind of event packet */
921 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
922 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
923 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
924 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
925 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
926 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
927 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
928 		break;
929 
930 	/* PTP v2/802.AS1, any layer, Sync packet */
931 	case HWTSTAMP_FILTER_PTP_V2_SYNC:
932 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
933 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
934 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
935 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
936 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
937 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
938 		break;
939 
940 	/* PTP v2/802.AS1, any layer, Delay_req packet */
941 	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
942 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
943 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
944 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
945 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
946 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
947 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
948 		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
949 		break;
950 
951 	default:
952 		return -ERANGE;
953 	}
954 
955 	pdata->hw_if.config_tstamp(pdata, mac_tscr);
956 
957 	memcpy(&pdata->tstamp_config, &config, sizeof(config));
958 
959 	return 0;
960 }
961 
962 static void xgbe_prep_tx_tstamp(struct xgbe_prv_data *pdata,
963 				struct sk_buff *skb,
964 				struct xgbe_packet_data *packet)
965 {
966 	unsigned long flags;
967 
968 	if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP)) {
969 		spin_lock_irqsave(&pdata->tstamp_lock, flags);
970 		if (pdata->tx_tstamp_skb) {
971 			/* Another timestamp in progress, ignore this one */
972 			XGMAC_SET_BITS(packet->attributes,
973 				       TX_PACKET_ATTRIBUTES, PTP, 0);
974 		} else {
975 			pdata->tx_tstamp_skb = skb_get(skb);
976 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
977 		}
978 		spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
979 	}
980 
981 	if (!XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP))
982 		skb_tx_timestamp(skb);
983 }
984 
985 static void xgbe_prep_vlan(struct sk_buff *skb, struct xgbe_packet_data *packet)
986 {
987 	if (vlan_tx_tag_present(skb))
988 		packet->vlan_ctag = vlan_tx_tag_get(skb);
989 }
990 
991 static int xgbe_prep_tso(struct sk_buff *skb, struct xgbe_packet_data *packet)
992 {
993 	int ret;
994 
995 	if (!XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
996 			    TSO_ENABLE))
997 		return 0;
998 
999 	ret = skb_cow_head(skb, 0);
1000 	if (ret)
1001 		return ret;
1002 
1003 	packet->header_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1004 	packet->tcp_header_len = tcp_hdrlen(skb);
1005 	packet->tcp_payload_len = skb->len - packet->header_len;
1006 	packet->mss = skb_shinfo(skb)->gso_size;
1007 	DBGPR("  packet->header_len=%u\n", packet->header_len);
1008 	DBGPR("  packet->tcp_header_len=%u, packet->tcp_payload_len=%u\n",
1009 	      packet->tcp_header_len, packet->tcp_payload_len);
1010 	DBGPR("  packet->mss=%u\n", packet->mss);
1011 
1012 	return 0;
1013 }
1014 
1015 static int xgbe_is_tso(struct sk_buff *skb)
1016 {
1017 	if (skb->ip_summed != CHECKSUM_PARTIAL)
1018 		return 0;
1019 
1020 	if (!skb_is_gso(skb))
1021 		return 0;
1022 
1023 	DBGPR("  TSO packet to be processed\n");
1024 
1025 	return 1;
1026 }
1027 
1028 static void xgbe_packet_info(struct xgbe_prv_data *pdata,
1029 			     struct xgbe_ring *ring, struct sk_buff *skb,
1030 			     struct xgbe_packet_data *packet)
1031 {
1032 	struct skb_frag_struct *frag;
1033 	unsigned int context_desc;
1034 	unsigned int len;
1035 	unsigned int i;
1036 
1037 	context_desc = 0;
1038 	packet->rdesc_count = 0;
1039 
1040 	if (xgbe_is_tso(skb)) {
1041 		/* TSO requires an extra desriptor if mss is different */
1042 		if (skb_shinfo(skb)->gso_size != ring->tx.cur_mss) {
1043 			context_desc = 1;
1044 			packet->rdesc_count++;
1045 		}
1046 
1047 		/* TSO requires an extra desriptor for TSO header */
1048 		packet->rdesc_count++;
1049 
1050 		XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1051 			       TSO_ENABLE, 1);
1052 		XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1053 			       CSUM_ENABLE, 1);
1054 	} else if (skb->ip_summed == CHECKSUM_PARTIAL)
1055 		XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1056 			       CSUM_ENABLE, 1);
1057 
1058 	if (vlan_tx_tag_present(skb)) {
1059 		/* VLAN requires an extra descriptor if tag is different */
1060 		if (vlan_tx_tag_get(skb) != ring->tx.cur_vlan_ctag)
1061 			/* We can share with the TSO context descriptor */
1062 			if (!context_desc) {
1063 				context_desc = 1;
1064 				packet->rdesc_count++;
1065 			}
1066 
1067 		XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1068 			       VLAN_CTAG, 1);
1069 	}
1070 
1071 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
1072 	    (pdata->tstamp_config.tx_type == HWTSTAMP_TX_ON))
1073 		XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1074 			       PTP, 1);
1075 
1076 	for (len = skb_headlen(skb); len;) {
1077 		packet->rdesc_count++;
1078 		len -= min_t(unsigned int, len, XGBE_TX_MAX_BUF_SIZE);
1079 	}
1080 
1081 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1082 		frag = &skb_shinfo(skb)->frags[i];
1083 		for (len = skb_frag_size(frag); len; ) {
1084 			packet->rdesc_count++;
1085 			len -= min_t(unsigned int, len, XGBE_TX_MAX_BUF_SIZE);
1086 		}
1087 	}
1088 }
1089 
1090 static int xgbe_open(struct net_device *netdev)
1091 {
1092 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
1093 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
1094 	struct xgbe_desc_if *desc_if = &pdata->desc_if;
1095 	int ret;
1096 
1097 	DBGPR("-->xgbe_open\n");
1098 
1099 	/* Initialize the phy */
1100 	ret = xgbe_phy_init(pdata);
1101 	if (ret)
1102 		return ret;
1103 
1104 	/* Enable the clocks */
1105 	ret = clk_prepare_enable(pdata->sysclk);
1106 	if (ret) {
1107 		netdev_alert(netdev, "dma clk_prepare_enable failed\n");
1108 		goto err_phy_init;
1109 	}
1110 
1111 	ret = clk_prepare_enable(pdata->ptpclk);
1112 	if (ret) {
1113 		netdev_alert(netdev, "ptp clk_prepare_enable failed\n");
1114 		goto err_sysclk;
1115 	}
1116 
1117 	/* Calculate the Rx buffer size before allocating rings */
1118 	ret = xgbe_calc_rx_buf_size(netdev, netdev->mtu);
1119 	if (ret < 0)
1120 		goto err_ptpclk;
1121 	pdata->rx_buf_size = ret;
1122 
1123 	/* Allocate the ring descriptors and buffers */
1124 	ret = desc_if->alloc_ring_resources(pdata);
1125 	if (ret)
1126 		goto err_ptpclk;
1127 
1128 	/* Initialize the device restart and Tx timestamp work struct */
1129 	INIT_WORK(&pdata->restart_work, xgbe_restart);
1130 	INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp);
1131 
1132 	/* Request interrupts */
1133 	ret = devm_request_irq(pdata->dev, netdev->irq, xgbe_isr, 0,
1134 			       netdev->name, pdata);
1135 	if (ret) {
1136 		netdev_alert(netdev, "error requesting irq %d\n",
1137 			     pdata->irq_number);
1138 		goto err_irq;
1139 	}
1140 	pdata->irq_number = netdev->irq;
1141 
1142 	ret = xgbe_start(pdata);
1143 	if (ret)
1144 		goto err_start;
1145 
1146 	DBGPR("<--xgbe_open\n");
1147 
1148 	return 0;
1149 
1150 err_start:
1151 	hw_if->exit(pdata);
1152 
1153 	devm_free_irq(pdata->dev, pdata->irq_number, pdata);
1154 	pdata->irq_number = 0;
1155 
1156 err_irq:
1157 	desc_if->free_ring_resources(pdata);
1158 
1159 err_ptpclk:
1160 	clk_disable_unprepare(pdata->ptpclk);
1161 
1162 err_sysclk:
1163 	clk_disable_unprepare(pdata->sysclk);
1164 
1165 err_phy_init:
1166 	xgbe_phy_exit(pdata);
1167 
1168 	return ret;
1169 }
1170 
1171 static int xgbe_close(struct net_device *netdev)
1172 {
1173 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
1174 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
1175 	struct xgbe_desc_if *desc_if = &pdata->desc_if;
1176 
1177 	DBGPR("-->xgbe_close\n");
1178 
1179 	/* Stop the device */
1180 	xgbe_stop(pdata);
1181 
1182 	/* Issue software reset to device */
1183 	hw_if->exit(pdata);
1184 
1185 	/* Free all the ring data */
1186 	desc_if->free_ring_resources(pdata);
1187 
1188 	/* Release the interrupt */
1189 	if (pdata->irq_number != 0) {
1190 		devm_free_irq(pdata->dev, pdata->irq_number, pdata);
1191 		pdata->irq_number = 0;
1192 	}
1193 
1194 	/* Disable the clocks */
1195 	clk_disable_unprepare(pdata->ptpclk);
1196 	clk_disable_unprepare(pdata->sysclk);
1197 
1198 	/* Release the phy */
1199 	xgbe_phy_exit(pdata);
1200 
1201 	DBGPR("<--xgbe_close\n");
1202 
1203 	return 0;
1204 }
1205 
1206 static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
1207 {
1208 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
1209 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
1210 	struct xgbe_desc_if *desc_if = &pdata->desc_if;
1211 	struct xgbe_channel *channel;
1212 	struct xgbe_ring *ring;
1213 	struct xgbe_packet_data *packet;
1214 	unsigned long flags;
1215 	int ret;
1216 
1217 	DBGPR("-->xgbe_xmit: skb->len = %d\n", skb->len);
1218 
1219 	channel = pdata->channel + skb->queue_mapping;
1220 	ring = channel->tx_ring;
1221 	packet = &ring->packet_data;
1222 
1223 	ret = NETDEV_TX_OK;
1224 
1225 	spin_lock_irqsave(&ring->lock, flags);
1226 
1227 	if (skb->len == 0) {
1228 		netdev_err(netdev, "empty skb received from stack\n");
1229 		dev_kfree_skb_any(skb);
1230 		goto tx_netdev_return;
1231 	}
1232 
1233 	/* Calculate preliminary packet info */
1234 	memset(packet, 0, sizeof(*packet));
1235 	xgbe_packet_info(pdata, ring, skb, packet);
1236 
1237 	/* Check that there are enough descriptors available */
1238 	if (packet->rdesc_count > xgbe_tx_avail_desc(ring)) {
1239 		DBGPR("  Tx queue stopped, not enough descriptors available\n");
1240 		netif_stop_subqueue(netdev, channel->queue_index);
1241 		ring->tx.queue_stopped = 1;
1242 		ret = NETDEV_TX_BUSY;
1243 		goto tx_netdev_return;
1244 	}
1245 
1246 	ret = xgbe_prep_tso(skb, packet);
1247 	if (ret) {
1248 		netdev_err(netdev, "error processing TSO packet\n");
1249 		dev_kfree_skb_any(skb);
1250 		goto tx_netdev_return;
1251 	}
1252 	xgbe_prep_vlan(skb, packet);
1253 
1254 	if (!desc_if->map_tx_skb(channel, skb)) {
1255 		dev_kfree_skb_any(skb);
1256 		goto tx_netdev_return;
1257 	}
1258 
1259 	xgbe_prep_tx_tstamp(pdata, skb, packet);
1260 
1261 	/* Configure required descriptor fields for transmission */
1262 	hw_if->pre_xmit(channel);
1263 
1264 #ifdef XGMAC_ENABLE_TX_PKT_DUMP
1265 	xgbe_print_pkt(netdev, skb, true);
1266 #endif
1267 
1268 tx_netdev_return:
1269 	spin_unlock_irqrestore(&ring->lock, flags);
1270 
1271 	DBGPR("<--xgbe_xmit\n");
1272 
1273 	return ret;
1274 }
1275 
1276 static void xgbe_set_rx_mode(struct net_device *netdev)
1277 {
1278 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
1279 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
1280 	unsigned int pr_mode, am_mode;
1281 
1282 	DBGPR("-->xgbe_set_rx_mode\n");
1283 
1284 	pr_mode = ((netdev->flags & IFF_PROMISC) != 0);
1285 	am_mode = ((netdev->flags & IFF_ALLMULTI) != 0);
1286 
1287 	hw_if->set_promiscuous_mode(pdata, pr_mode);
1288 	hw_if->set_all_multicast_mode(pdata, am_mode);
1289 
1290 	hw_if->add_mac_addresses(pdata);
1291 
1292 	DBGPR("<--xgbe_set_rx_mode\n");
1293 }
1294 
1295 static int xgbe_set_mac_address(struct net_device *netdev, void *addr)
1296 {
1297 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
1298 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
1299 	struct sockaddr *saddr = addr;
1300 
1301 	DBGPR("-->xgbe_set_mac_address\n");
1302 
1303 	if (!is_valid_ether_addr(saddr->sa_data))
1304 		return -EADDRNOTAVAIL;
1305 
1306 	memcpy(netdev->dev_addr, saddr->sa_data, netdev->addr_len);
1307 
1308 	hw_if->set_mac_address(pdata, netdev->dev_addr);
1309 
1310 	DBGPR("<--xgbe_set_mac_address\n");
1311 
1312 	return 0;
1313 }
1314 
1315 static int xgbe_ioctl(struct net_device *netdev, struct ifreq *ifreq, int cmd)
1316 {
1317 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
1318 	int ret;
1319 
1320 	switch (cmd) {
1321 	case SIOCGHWTSTAMP:
1322 		ret = xgbe_get_hwtstamp_settings(pdata, ifreq);
1323 		break;
1324 
1325 	case SIOCSHWTSTAMP:
1326 		ret = xgbe_set_hwtstamp_settings(pdata, ifreq);
1327 		break;
1328 
1329 	default:
1330 		ret = -EOPNOTSUPP;
1331 	}
1332 
1333 	return ret;
1334 }
1335 
1336 static int xgbe_change_mtu(struct net_device *netdev, int mtu)
1337 {
1338 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
1339 	int ret;
1340 
1341 	DBGPR("-->xgbe_change_mtu\n");
1342 
1343 	ret = xgbe_calc_rx_buf_size(netdev, mtu);
1344 	if (ret < 0)
1345 		return ret;
1346 
1347 	pdata->rx_buf_size = ret;
1348 	netdev->mtu = mtu;
1349 
1350 	xgbe_restart_dev(pdata, 0);
1351 
1352 	DBGPR("<--xgbe_change_mtu\n");
1353 
1354 	return 0;
1355 }
1356 
1357 static struct rtnl_link_stats64 *xgbe_get_stats64(struct net_device *netdev,
1358 						  struct rtnl_link_stats64 *s)
1359 {
1360 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
1361 	struct xgbe_mmc_stats *pstats = &pdata->mmc_stats;
1362 
1363 	DBGPR("-->%s\n", __func__);
1364 
1365 	pdata->hw_if.read_mmc_stats(pdata);
1366 
1367 	s->rx_packets = pstats->rxframecount_gb;
1368 	s->rx_bytes = pstats->rxoctetcount_gb;
1369 	s->rx_errors = pstats->rxframecount_gb -
1370 		       pstats->rxbroadcastframes_g -
1371 		       pstats->rxmulticastframes_g -
1372 		       pstats->rxunicastframes_g;
1373 	s->multicast = pstats->rxmulticastframes_g;
1374 	s->rx_length_errors = pstats->rxlengtherror;
1375 	s->rx_crc_errors = pstats->rxcrcerror;
1376 	s->rx_fifo_errors = pstats->rxfifooverflow;
1377 
1378 	s->tx_packets = pstats->txframecount_gb;
1379 	s->tx_bytes = pstats->txoctetcount_gb;
1380 	s->tx_errors = pstats->txframecount_gb - pstats->txframecount_g;
1381 	s->tx_dropped = netdev->stats.tx_dropped;
1382 
1383 	DBGPR("<--%s\n", __func__);
1384 
1385 	return s;
1386 }
1387 
1388 static int xgbe_vlan_rx_add_vid(struct net_device *netdev, __be16 proto,
1389 				u16 vid)
1390 {
1391 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
1392 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
1393 
1394 	DBGPR("-->%s\n", __func__);
1395 
1396 	set_bit(vid, pdata->active_vlans);
1397 	hw_if->update_vlan_hash_table(pdata);
1398 
1399 	DBGPR("<--%s\n", __func__);
1400 
1401 	return 0;
1402 }
1403 
1404 static int xgbe_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto,
1405 				 u16 vid)
1406 {
1407 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
1408 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
1409 
1410 	DBGPR("-->%s\n", __func__);
1411 
1412 	clear_bit(vid, pdata->active_vlans);
1413 	hw_if->update_vlan_hash_table(pdata);
1414 
1415 	DBGPR("<--%s\n", __func__);
1416 
1417 	return 0;
1418 }
1419 
1420 #ifdef CONFIG_NET_POLL_CONTROLLER
1421 static void xgbe_poll_controller(struct net_device *netdev)
1422 {
1423 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
1424 
1425 	DBGPR("-->xgbe_poll_controller\n");
1426 
1427 	disable_irq(pdata->irq_number);
1428 
1429 	xgbe_isr(pdata->irq_number, pdata);
1430 
1431 	enable_irq(pdata->irq_number);
1432 
1433 	DBGPR("<--xgbe_poll_controller\n");
1434 }
1435 #endif /* End CONFIG_NET_POLL_CONTROLLER */
1436 
1437 static int xgbe_setup_tc(struct net_device *netdev, u8 tc)
1438 {
1439 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
1440 	unsigned int offset, queue;
1441 	u8 i;
1442 
1443 	if (tc && (tc != pdata->hw_feat.tc_cnt))
1444 		return -EINVAL;
1445 
1446 	if (tc) {
1447 		netdev_set_num_tc(netdev, tc);
1448 		for (i = 0, queue = 0, offset = 0; i < tc; i++) {
1449 			while ((queue < pdata->tx_q_count) &&
1450 			       (pdata->q2tc_map[queue] == i))
1451 				queue++;
1452 
1453 			DBGPR("  TC%u using TXq%u-%u\n", i, offset, queue - 1);
1454 			netdev_set_tc_queue(netdev, i, queue - offset, offset);
1455 			offset = queue;
1456 		}
1457 	} else {
1458 		netdev_reset_tc(netdev);
1459 	}
1460 
1461 	return 0;
1462 }
1463 
1464 static int xgbe_set_features(struct net_device *netdev,
1465 			     netdev_features_t features)
1466 {
1467 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
1468 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
1469 	unsigned int rxcsum, rxvlan, rxvlan_filter;
1470 
1471 	rxcsum = pdata->netdev_features & NETIF_F_RXCSUM;
1472 	rxvlan = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX;
1473 	rxvlan_filter = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_FILTER;
1474 
1475 	if ((features & NETIF_F_RXCSUM) && !rxcsum)
1476 		hw_if->enable_rx_csum(pdata);
1477 	else if (!(features & NETIF_F_RXCSUM) && rxcsum)
1478 		hw_if->disable_rx_csum(pdata);
1479 
1480 	if ((features & NETIF_F_HW_VLAN_CTAG_RX) && !rxvlan)
1481 		hw_if->enable_rx_vlan_stripping(pdata);
1482 	else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) && rxvlan)
1483 		hw_if->disable_rx_vlan_stripping(pdata);
1484 
1485 	if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) && !rxvlan_filter)
1486 		hw_if->enable_rx_vlan_filtering(pdata);
1487 	else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) && rxvlan_filter)
1488 		hw_if->disable_rx_vlan_filtering(pdata);
1489 
1490 	pdata->netdev_features = features;
1491 
1492 	DBGPR("<--xgbe_set_features\n");
1493 
1494 	return 0;
1495 }
1496 
1497 static const struct net_device_ops xgbe_netdev_ops = {
1498 	.ndo_open		= xgbe_open,
1499 	.ndo_stop		= xgbe_close,
1500 	.ndo_start_xmit		= xgbe_xmit,
1501 	.ndo_set_rx_mode	= xgbe_set_rx_mode,
1502 	.ndo_set_mac_address	= xgbe_set_mac_address,
1503 	.ndo_validate_addr	= eth_validate_addr,
1504 	.ndo_do_ioctl		= xgbe_ioctl,
1505 	.ndo_change_mtu		= xgbe_change_mtu,
1506 	.ndo_get_stats64	= xgbe_get_stats64,
1507 	.ndo_vlan_rx_add_vid	= xgbe_vlan_rx_add_vid,
1508 	.ndo_vlan_rx_kill_vid	= xgbe_vlan_rx_kill_vid,
1509 #ifdef CONFIG_NET_POLL_CONTROLLER
1510 	.ndo_poll_controller	= xgbe_poll_controller,
1511 #endif
1512 	.ndo_setup_tc		= xgbe_setup_tc,
1513 	.ndo_set_features	= xgbe_set_features,
1514 };
1515 
1516 struct net_device_ops *xgbe_get_netdev_ops(void)
1517 {
1518 	return (struct net_device_ops *)&xgbe_netdev_ops;
1519 }
1520 
1521 static void xgbe_rx_refresh(struct xgbe_channel *channel)
1522 {
1523 	struct xgbe_prv_data *pdata = channel->pdata;
1524 	struct xgbe_desc_if *desc_if = &pdata->desc_if;
1525 	struct xgbe_ring *ring = channel->rx_ring;
1526 	struct xgbe_ring_data *rdata;
1527 
1528 	desc_if->realloc_skb(channel);
1529 
1530 	/* Update the Rx Tail Pointer Register with address of
1531 	 * the last cleaned entry */
1532 	rdata = XGBE_GET_DESC_DATA(ring, ring->rx.realloc_index - 1);
1533 	XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO,
1534 			  lower_32_bits(rdata->rdesc_dma));
1535 }
1536 
1537 static int xgbe_tx_poll(struct xgbe_channel *channel)
1538 {
1539 	struct xgbe_prv_data *pdata = channel->pdata;
1540 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
1541 	struct xgbe_desc_if *desc_if = &pdata->desc_if;
1542 	struct xgbe_ring *ring = channel->tx_ring;
1543 	struct xgbe_ring_data *rdata;
1544 	struct xgbe_ring_desc *rdesc;
1545 	struct net_device *netdev = pdata->netdev;
1546 	unsigned long flags;
1547 	int processed = 0;
1548 
1549 	DBGPR("-->xgbe_tx_poll\n");
1550 
1551 	/* Nothing to do if there isn't a Tx ring for this channel */
1552 	if (!ring)
1553 		return 0;
1554 
1555 	spin_lock_irqsave(&ring->lock, flags);
1556 
1557 	while ((processed < XGBE_TX_DESC_MAX_PROC) &&
1558 	       (ring->dirty < ring->cur)) {
1559 		rdata = XGBE_GET_DESC_DATA(ring, ring->dirty);
1560 		rdesc = rdata->rdesc;
1561 
1562 		if (!hw_if->tx_complete(rdesc))
1563 			break;
1564 
1565 #ifdef XGMAC_ENABLE_TX_DESC_DUMP
1566 		xgbe_dump_tx_desc(ring, ring->dirty, 1, 0);
1567 #endif
1568 
1569 		/* Free the SKB and reset the descriptor for re-use */
1570 		desc_if->unmap_skb(pdata, rdata);
1571 		hw_if->tx_desc_reset(rdata);
1572 
1573 		processed++;
1574 		ring->dirty++;
1575 	}
1576 
1577 	if ((ring->tx.queue_stopped == 1) &&
1578 	    (xgbe_tx_avail_desc(ring) > XGBE_TX_DESC_MIN_FREE)) {
1579 		ring->tx.queue_stopped = 0;
1580 		netif_wake_subqueue(netdev, channel->queue_index);
1581 	}
1582 
1583 	DBGPR("<--xgbe_tx_poll: processed=%d\n", processed);
1584 
1585 	spin_unlock_irqrestore(&ring->lock, flags);
1586 
1587 	return processed;
1588 }
1589 
1590 static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
1591 {
1592 	struct xgbe_prv_data *pdata = channel->pdata;
1593 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
1594 	struct xgbe_ring *ring = channel->rx_ring;
1595 	struct xgbe_ring_data *rdata;
1596 	struct xgbe_packet_data *packet;
1597 	struct net_device *netdev = pdata->netdev;
1598 	struct sk_buff *skb;
1599 	struct skb_shared_hwtstamps *hwtstamps;
1600 	unsigned int incomplete, error, context_next, context;
1601 	unsigned int len, put_len, max_len;
1602 	int received = 0;
1603 
1604 	DBGPR("-->xgbe_rx_poll: budget=%d\n", budget);
1605 
1606 	/* Nothing to do if there isn't a Rx ring for this channel */
1607 	if (!ring)
1608 		return 0;
1609 
1610 	rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
1611 	packet = &ring->packet_data;
1612 	while (received < budget) {
1613 		DBGPR("  cur = %d\n", ring->cur);
1614 
1615 		/* First time in loop see if we need to restore state */
1616 		if (!received && rdata->state_saved) {
1617 			incomplete = rdata->state.incomplete;
1618 			context_next = rdata->state.context_next;
1619 			skb = rdata->state.skb;
1620 			error = rdata->state.error;
1621 			len = rdata->state.len;
1622 		} else {
1623 			memset(packet, 0, sizeof(*packet));
1624 			incomplete = 0;
1625 			context_next = 0;
1626 			skb = NULL;
1627 			error = 0;
1628 			len = 0;
1629 		}
1630 
1631 read_again:
1632 		rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
1633 
1634 		if (ring->dirty > (XGBE_RX_DESC_CNT >> 3))
1635 			xgbe_rx_refresh(channel);
1636 
1637 		if (hw_if->dev_read(channel))
1638 			break;
1639 
1640 		received++;
1641 		ring->cur++;
1642 		ring->dirty++;
1643 
1644 		dma_unmap_single(pdata->dev, rdata->skb_dma,
1645 				 rdata->skb_dma_len, DMA_FROM_DEVICE);
1646 		rdata->skb_dma = 0;
1647 
1648 		incomplete = XGMAC_GET_BITS(packet->attributes,
1649 					    RX_PACKET_ATTRIBUTES,
1650 					    INCOMPLETE);
1651 		context_next = XGMAC_GET_BITS(packet->attributes,
1652 					      RX_PACKET_ATTRIBUTES,
1653 					      CONTEXT_NEXT);
1654 		context = XGMAC_GET_BITS(packet->attributes,
1655 					 RX_PACKET_ATTRIBUTES,
1656 					 CONTEXT);
1657 
1658 		/* Earlier error, just drain the remaining data */
1659 		if ((incomplete || context_next) && error)
1660 			goto read_again;
1661 
1662 		if (error || packet->errors) {
1663 			if (packet->errors)
1664 				DBGPR("Error in received packet\n");
1665 			dev_kfree_skb(skb);
1666 			continue;
1667 		}
1668 
1669 		if (!context) {
1670 			put_len = rdata->len - len;
1671 			if (skb) {
1672 				if (pskb_expand_head(skb, 0, put_len,
1673 						     GFP_ATOMIC)) {
1674 					DBGPR("pskb_expand_head error\n");
1675 					if (incomplete) {
1676 						error = 1;
1677 						goto read_again;
1678 					}
1679 
1680 					dev_kfree_skb(skb);
1681 					continue;
1682 				}
1683 				memcpy(skb_tail_pointer(skb), rdata->skb->data,
1684 				       put_len);
1685 			} else {
1686 				skb = rdata->skb;
1687 				rdata->skb = NULL;
1688 			}
1689 			skb_put(skb, put_len);
1690 			len += put_len;
1691 		}
1692 
1693 		if (incomplete || context_next)
1694 			goto read_again;
1695 
1696 		/* Stray Context Descriptor? */
1697 		if (!skb)
1698 			continue;
1699 
1700 		/* Be sure we don't exceed the configured MTU */
1701 		max_len = netdev->mtu + ETH_HLEN;
1702 		if (!(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
1703 		    (skb->protocol == htons(ETH_P_8021Q)))
1704 			max_len += VLAN_HLEN;
1705 
1706 		if (skb->len > max_len) {
1707 			DBGPR("packet length exceeds configured MTU\n");
1708 			dev_kfree_skb(skb);
1709 			continue;
1710 		}
1711 
1712 #ifdef XGMAC_ENABLE_RX_PKT_DUMP
1713 		xgbe_print_pkt(netdev, skb, false);
1714 #endif
1715 
1716 		skb_checksum_none_assert(skb);
1717 		if (XGMAC_GET_BITS(packet->attributes,
1718 				   RX_PACKET_ATTRIBUTES, CSUM_DONE))
1719 			skb->ip_summed = CHECKSUM_UNNECESSARY;
1720 
1721 		if (XGMAC_GET_BITS(packet->attributes,
1722 				   RX_PACKET_ATTRIBUTES, VLAN_CTAG))
1723 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1724 					       packet->vlan_ctag);
1725 
1726 		if (XGMAC_GET_BITS(packet->attributes,
1727 				   RX_PACKET_ATTRIBUTES, RX_TSTAMP)) {
1728 			u64 nsec;
1729 
1730 			nsec = timecounter_cyc2time(&pdata->tstamp_tc,
1731 						    packet->rx_tstamp);
1732 			hwtstamps = skb_hwtstamps(skb);
1733 			hwtstamps->hwtstamp = ns_to_ktime(nsec);
1734 		}
1735 
1736 		skb->dev = netdev;
1737 		skb->protocol = eth_type_trans(skb, netdev);
1738 		skb_record_rx_queue(skb, channel->queue_index);
1739 		skb_mark_napi_id(skb, &pdata->napi);
1740 
1741 		netdev->last_rx = jiffies;
1742 		napi_gro_receive(&pdata->napi, skb);
1743 	}
1744 
1745 	/* Check if we need to save state before leaving */
1746 	if (received && (incomplete || context_next)) {
1747 		rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
1748 		rdata->state_saved = 1;
1749 		rdata->state.incomplete = incomplete;
1750 		rdata->state.context_next = context_next;
1751 		rdata->state.skb = skb;
1752 		rdata->state.len = len;
1753 		rdata->state.error = error;
1754 	}
1755 
1756 	DBGPR("<--xgbe_rx_poll: received = %d\n", received);
1757 
1758 	return received;
1759 }
1760 
1761 static int xgbe_poll(struct napi_struct *napi, int budget)
1762 {
1763 	struct xgbe_prv_data *pdata = container_of(napi, struct xgbe_prv_data,
1764 						   napi);
1765 	struct xgbe_channel *channel;
1766 	int ring_budget;
1767 	int processed, last_processed;
1768 	unsigned int i;
1769 
1770 	DBGPR("-->xgbe_poll: budget=%d\n", budget);
1771 
1772 	processed = 0;
1773 	ring_budget = budget / pdata->rx_ring_count;
1774 	do {
1775 		last_processed = processed;
1776 
1777 		channel = pdata->channel;
1778 		for (i = 0; i < pdata->channel_count; i++, channel++) {
1779 			/* Cleanup Tx ring first */
1780 			xgbe_tx_poll(channel);
1781 
1782 			/* Process Rx ring next */
1783 			if (ring_budget > (budget - processed))
1784 				ring_budget = budget - processed;
1785 			processed += xgbe_rx_poll(channel, ring_budget);
1786 		}
1787 	} while ((processed < budget) && (processed != last_processed));
1788 
1789 	/* If we processed everything, we are done */
1790 	if (processed < budget) {
1791 		/* Turn off polling */
1792 		napi_complete(napi);
1793 
1794 		/* Enable Tx and Rx interrupts */
1795 		xgbe_enable_rx_tx_ints(pdata);
1796 	}
1797 
1798 	DBGPR("<--xgbe_poll: received = %d\n", processed);
1799 
1800 	return processed;
1801 }
1802 
1803 void xgbe_dump_tx_desc(struct xgbe_ring *ring, unsigned int idx,
1804 		       unsigned int count, unsigned int flag)
1805 {
1806 	struct xgbe_ring_data *rdata;
1807 	struct xgbe_ring_desc *rdesc;
1808 
1809 	while (count--) {
1810 		rdata = XGBE_GET_DESC_DATA(ring, idx);
1811 		rdesc = rdata->rdesc;
1812 		DBGPR("TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n", idx,
1813 		      (flag == 1) ? "QUEUED FOR TX" : "TX BY DEVICE",
1814 		      le32_to_cpu(rdesc->desc0), le32_to_cpu(rdesc->desc1),
1815 		      le32_to_cpu(rdesc->desc2), le32_to_cpu(rdesc->desc3));
1816 		idx++;
1817 	}
1818 }
1819 
1820 void xgbe_dump_rx_desc(struct xgbe_ring *ring, struct xgbe_ring_desc *desc,
1821 		       unsigned int idx)
1822 {
1823 	DBGPR("RX_NORMAL_DESC[%d RX BY DEVICE] = %08x:%08x:%08x:%08x\n", idx,
1824 	      le32_to_cpu(desc->desc0), le32_to_cpu(desc->desc1),
1825 	      le32_to_cpu(desc->desc2), le32_to_cpu(desc->desc3));
1826 }
1827 
1828 void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx)
1829 {
1830 	struct ethhdr *eth = (struct ethhdr *)skb->data;
1831 	unsigned char *buf = skb->data;
1832 	unsigned char buffer[128];
1833 	unsigned int i, j;
1834 
1835 	netdev_alert(netdev, "\n************** SKB dump ****************\n");
1836 
1837 	netdev_alert(netdev, "%s packet of %d bytes\n",
1838 		     (tx_rx ? "TX" : "RX"), skb->len);
1839 
1840 	netdev_alert(netdev, "Dst MAC addr: %pM\n", eth->h_dest);
1841 	netdev_alert(netdev, "Src MAC addr: %pM\n", eth->h_source);
1842 	netdev_alert(netdev, "Protocol: 0x%04hx\n", ntohs(eth->h_proto));
1843 
1844 	for (i = 0, j = 0; i < skb->len;) {
1845 		j += snprintf(buffer + j, sizeof(buffer) - j, "%02hhx",
1846 			      buf[i++]);
1847 
1848 		if ((i % 32) == 0) {
1849 			netdev_alert(netdev, "  0x%04x: %s\n", i - 32, buffer);
1850 			j = 0;
1851 		} else if ((i % 16) == 0) {
1852 			buffer[j++] = ' ';
1853 			buffer[j++] = ' ';
1854 		} else if ((i % 4) == 0) {
1855 			buffer[j++] = ' ';
1856 		}
1857 	}
1858 	if (i % 32)
1859 		netdev_alert(netdev, "  0x%04x: %s\n", i - (i % 32), buffer);
1860 
1861 	netdev_alert(netdev, "\n************** SKB dump ****************\n");
1862 }
1863