1 /*
2  * AMD 10Gb Ethernet driver
3  *
4  * This file is available to you under your choice of the following two
5  * licenses:
6  *
7  * License 1: GPLv2
8  *
9  * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
10  *
11  * This file is free software; you may copy, redistribute and/or modify
12  * it under the terms of the GNU General Public License as published by
13  * the Free Software Foundation, either version 2 of the License, or (at
14  * your option) any later version.
15  *
16  * This file is distributed in the hope that it will be useful, but
17  * WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19  * General Public License for more details.
20  *
21  * You should have received a copy of the GNU General Public License
22  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
23  *
24  * This file incorporates work covered by the following copyright and
25  * permission notice:
26  *     The Synopsys DWC ETHER XGMAC Software Driver and documentation
27  *     (hereinafter "Software") is an unsupported proprietary work of Synopsys,
28  *     Inc. unless otherwise expressly agreed to in writing between Synopsys
29  *     and you.
30  *
31  *     The Software IS NOT an item of Licensed Software or Licensed Product
32  *     under any End User Software License Agreement or Agreement for Licensed
33  *     Product with Synopsys or any supplement thereto.  Permission is hereby
34  *     granted, free of charge, to any person obtaining a copy of this software
35  *     annotated with this license and the Software, to deal in the Software
36  *     without restriction, including without limitation the rights to use,
37  *     copy, modify, merge, publish, distribute, sublicense, and/or sell copies
38  *     of the Software, and to permit persons to whom the Software is furnished
39  *     to do so, subject to the following conditions:
40  *
41  *     The above copyright notice and this permission notice shall be included
42  *     in all copies or substantial portions of the Software.
43  *
44  *     THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
45  *     BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
46  *     TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
47  *     PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
48  *     BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
49  *     CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
50  *     SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
51  *     INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
52  *     CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
53  *     ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
54  *     THE POSSIBILITY OF SUCH DAMAGE.
55  *
56  *
57  * License 2: Modified BSD
58  *
59  * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
60  * All rights reserved.
61  *
62  * Redistribution and use in source and binary forms, with or without
63  * modification, are permitted provided that the following conditions are met:
64  *     * Redistributions of source code must retain the above copyright
65  *       notice, this list of conditions and the following disclaimer.
66  *     * Redistributions in binary form must reproduce the above copyright
67  *       notice, this list of conditions and the following disclaimer in the
68  *       documentation and/or other materials provided with the distribution.
69  *     * Neither the name of Advanced Micro Devices, Inc. nor the
70  *       names of its contributors may be used to endorse or promote products
71  *       derived from this software without specific prior written permission.
72  *
73  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
74  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
75  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
76  * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
77  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
78  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
79  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
80  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
81  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
82  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
83  *
84  * This file incorporates work covered by the following copyright and
85  * permission notice:
86  *     The Synopsys DWC ETHER XGMAC Software Driver and documentation
87  *     (hereinafter "Software") is an unsupported proprietary work of Synopsys,
88  *     Inc. unless otherwise expressly agreed to in writing between Synopsys
89  *     and you.
90  *
91  *     The Software IS NOT an item of Licensed Software or Licensed Product
92  *     under any End User Software License Agreement or Agreement for Licensed
93  *     Product with Synopsys or any supplement thereto.  Permission is hereby
94  *     granted, free of charge, to any person obtaining a copy of this software
95  *     annotated with this license and the Software, to deal in the Software
96  *     without restriction, including without limitation the rights to use,
97  *     copy, modify, merge, publish, distribute, sublicense, and/or sell copies
98  *     of the Software, and to permit persons to whom the Software is furnished
99  *     to do so, subject to the following conditions:
100  *
101  *     The above copyright notice and this permission notice shall be included
102  *     in all copies or substantial portions of the Software.
103  *
104  *     THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
105  *     BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
106  *     TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
107  *     PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
108  *     BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
109  *     CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
110  *     SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
111  *     INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
112  *     CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
113  *     ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
114  *     THE POSSIBILITY OF SUCH DAMAGE.
115  */
116 
117 #include <linux/phy.h>
118 #include <linux/mdio.h>
119 #include <linux/clk.h>
120 #include <linux/bitrev.h>
121 #include <linux/crc32.h>
122 
123 #include "xgbe.h"
124 #include "xgbe-common.h"
125 
126 static inline unsigned int xgbe_get_max_frame(struct xgbe_prv_data *pdata)
127 {
128 	return pdata->netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
129 }
130 
131 static unsigned int xgbe_usec_to_riwt(struct xgbe_prv_data *pdata,
132 				      unsigned int usec)
133 {
134 	unsigned long rate;
135 	unsigned int ret;
136 
137 	DBGPR("-->xgbe_usec_to_riwt\n");
138 
139 	rate = pdata->sysclk_rate;
140 
141 	/*
142 	 * Convert the input usec value to the watchdog timer value. Each
143 	 * watchdog timer value is equivalent to 256 clock cycles.
144 	 * Calculate the required value as:
145 	 *   ( usec * ( system_clock_mhz / 10^6 ) / 256
146 	 */
147 	ret = (usec * (rate / 1000000)) / 256;
148 
149 	DBGPR("<--xgbe_usec_to_riwt\n");
150 
151 	return ret;
152 }
153 
154 static unsigned int xgbe_riwt_to_usec(struct xgbe_prv_data *pdata,
155 				      unsigned int riwt)
156 {
157 	unsigned long rate;
158 	unsigned int ret;
159 
160 	DBGPR("-->xgbe_riwt_to_usec\n");
161 
162 	rate = pdata->sysclk_rate;
163 
164 	/*
165 	 * Convert the input watchdog timer value to the usec value. Each
166 	 * watchdog timer value is equivalent to 256 clock cycles.
167 	 * Calculate the required value as:
168 	 *   ( riwt * 256 ) / ( system_clock_mhz / 10^6 )
169 	 */
170 	ret = (riwt * 256) / (rate / 1000000);
171 
172 	DBGPR("<--xgbe_riwt_to_usec\n");
173 
174 	return ret;
175 }
176 
177 static int xgbe_config_pblx8(struct xgbe_prv_data *pdata)
178 {
179 	struct xgbe_channel *channel;
180 	unsigned int i;
181 
182 	channel = pdata->channel;
183 	for (i = 0; i < pdata->channel_count; i++, channel++)
184 		XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_CR, PBLX8,
185 				       pdata->pblx8);
186 
187 	return 0;
188 }
189 
190 static int xgbe_get_tx_pbl_val(struct xgbe_prv_data *pdata)
191 {
192 	return XGMAC_DMA_IOREAD_BITS(pdata->channel, DMA_CH_TCR, PBL);
193 }
194 
195 static int xgbe_config_tx_pbl_val(struct xgbe_prv_data *pdata)
196 {
197 	struct xgbe_channel *channel;
198 	unsigned int i;
199 
200 	channel = pdata->channel;
201 	for (i = 0; i < pdata->channel_count; i++, channel++) {
202 		if (!channel->tx_ring)
203 			break;
204 
205 		XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, PBL,
206 				       pdata->tx_pbl);
207 	}
208 
209 	return 0;
210 }
211 
212 static int xgbe_get_rx_pbl_val(struct xgbe_prv_data *pdata)
213 {
214 	return XGMAC_DMA_IOREAD_BITS(pdata->channel, DMA_CH_RCR, PBL);
215 }
216 
217 static int xgbe_config_rx_pbl_val(struct xgbe_prv_data *pdata)
218 {
219 	struct xgbe_channel *channel;
220 	unsigned int i;
221 
222 	channel = pdata->channel;
223 	for (i = 0; i < pdata->channel_count; i++, channel++) {
224 		if (!channel->rx_ring)
225 			break;
226 
227 		XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, PBL,
228 				       pdata->rx_pbl);
229 	}
230 
231 	return 0;
232 }
233 
234 static int xgbe_config_osp_mode(struct xgbe_prv_data *pdata)
235 {
236 	struct xgbe_channel *channel;
237 	unsigned int i;
238 
239 	channel = pdata->channel;
240 	for (i = 0; i < pdata->channel_count; i++, channel++) {
241 		if (!channel->tx_ring)
242 			break;
243 
244 		XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, OSP,
245 				       pdata->tx_osp_mode);
246 	}
247 
248 	return 0;
249 }
250 
251 static int xgbe_config_rsf_mode(struct xgbe_prv_data *pdata, unsigned int val)
252 {
253 	unsigned int i;
254 
255 	for (i = 0; i < pdata->rx_q_count; i++)
256 		XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RSF, val);
257 
258 	return 0;
259 }
260 
261 static int xgbe_config_tsf_mode(struct xgbe_prv_data *pdata, unsigned int val)
262 {
263 	unsigned int i;
264 
265 	for (i = 0; i < pdata->tx_q_count; i++)
266 		XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TSF, val);
267 
268 	return 0;
269 }
270 
271 static int xgbe_config_rx_threshold(struct xgbe_prv_data *pdata,
272 				    unsigned int val)
273 {
274 	unsigned int i;
275 
276 	for (i = 0; i < pdata->rx_q_count; i++)
277 		XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RTC, val);
278 
279 	return 0;
280 }
281 
282 static int xgbe_config_tx_threshold(struct xgbe_prv_data *pdata,
283 				    unsigned int val)
284 {
285 	unsigned int i;
286 
287 	for (i = 0; i < pdata->tx_q_count; i++)
288 		XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TTC, val);
289 
290 	return 0;
291 }
292 
293 static int xgbe_config_rx_coalesce(struct xgbe_prv_data *pdata)
294 {
295 	struct xgbe_channel *channel;
296 	unsigned int i;
297 
298 	channel = pdata->channel;
299 	for (i = 0; i < pdata->channel_count; i++, channel++) {
300 		if (!channel->rx_ring)
301 			break;
302 
303 		XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RIWT, RWT,
304 				       pdata->rx_riwt);
305 	}
306 
307 	return 0;
308 }
309 
310 static int xgbe_config_tx_coalesce(struct xgbe_prv_data *pdata)
311 {
312 	return 0;
313 }
314 
315 static void xgbe_config_rx_buffer_size(struct xgbe_prv_data *pdata)
316 {
317 	struct xgbe_channel *channel;
318 	unsigned int i;
319 
320 	channel = pdata->channel;
321 	for (i = 0; i < pdata->channel_count; i++, channel++) {
322 		if (!channel->rx_ring)
323 			break;
324 
325 		XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, RBSZ,
326 				       pdata->rx_buf_size);
327 	}
328 }
329 
330 static void xgbe_config_tso_mode(struct xgbe_prv_data *pdata)
331 {
332 	struct xgbe_channel *channel;
333 	unsigned int i;
334 
335 	channel = pdata->channel;
336 	for (i = 0; i < pdata->channel_count; i++, channel++) {
337 		if (!channel->tx_ring)
338 			break;
339 
340 		XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, TSE, 1);
341 	}
342 }
343 
344 static void xgbe_config_sph_mode(struct xgbe_prv_data *pdata)
345 {
346 	struct xgbe_channel *channel;
347 	unsigned int i;
348 
349 	channel = pdata->channel;
350 	for (i = 0; i < pdata->channel_count; i++, channel++) {
351 		if (!channel->rx_ring)
352 			break;
353 
354 		XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_CR, SPH, 1);
355 	}
356 
357 	XGMAC_IOWRITE_BITS(pdata, MAC_RCR, HDSMS, XGBE_SPH_HDSMS_SIZE);
358 }
359 
360 static int xgbe_write_rss_reg(struct xgbe_prv_data *pdata, unsigned int type,
361 			      unsigned int index, unsigned int val)
362 {
363 	unsigned int wait;
364 	int ret = 0;
365 
366 	mutex_lock(&pdata->rss_mutex);
367 
368 	if (XGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB)) {
369 		ret = -EBUSY;
370 		goto unlock;
371 	}
372 
373 	XGMAC_IOWRITE(pdata, MAC_RSSDR, val);
374 
375 	XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, RSSIA, index);
376 	XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, ADDRT, type);
377 	XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, CT, 0);
378 	XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, OB, 1);
379 
380 	wait = 1000;
381 	while (wait--) {
382 		if (!XGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB))
383 			goto unlock;
384 
385 		usleep_range(1000, 1500);
386 	}
387 
388 	ret = -EBUSY;
389 
390 unlock:
391 	mutex_unlock(&pdata->rss_mutex);
392 
393 	return ret;
394 }
395 
396 static int xgbe_write_rss_hash_key(struct xgbe_prv_data *pdata)
397 {
398 	unsigned int key_regs = sizeof(pdata->rss_key) / sizeof(u32);
399 	unsigned int *key = (unsigned int *)&pdata->rss_key;
400 	int ret;
401 
402 	while (key_regs--) {
403 		ret = xgbe_write_rss_reg(pdata, XGBE_RSS_HASH_KEY_TYPE,
404 					 key_regs, *key++);
405 		if (ret)
406 			return ret;
407 	}
408 
409 	return 0;
410 }
411 
412 static int xgbe_write_rss_lookup_table(struct xgbe_prv_data *pdata)
413 {
414 	unsigned int i;
415 	int ret;
416 
417 	for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) {
418 		ret = xgbe_write_rss_reg(pdata,
419 					 XGBE_RSS_LOOKUP_TABLE_TYPE, i,
420 					 pdata->rss_table[i]);
421 		if (ret)
422 			return ret;
423 	}
424 
425 	return 0;
426 }
427 
428 static int xgbe_set_rss_hash_key(struct xgbe_prv_data *pdata, const u8 *key)
429 {
430 	memcpy(pdata->rss_key, key, sizeof(pdata->rss_key));
431 
432 	return xgbe_write_rss_hash_key(pdata);
433 }
434 
435 static int xgbe_set_rss_lookup_table(struct xgbe_prv_data *pdata,
436 				     const u32 *table)
437 {
438 	unsigned int i;
439 
440 	for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++)
441 		XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH, table[i]);
442 
443 	return xgbe_write_rss_lookup_table(pdata);
444 }
445 
446 static int xgbe_enable_rss(struct xgbe_prv_data *pdata)
447 {
448 	int ret;
449 
450 	if (!pdata->hw_feat.rss)
451 		return -EOPNOTSUPP;
452 
453 	/* Program the hash key */
454 	ret = xgbe_write_rss_hash_key(pdata);
455 	if (ret)
456 		return ret;
457 
458 	/* Program the lookup table */
459 	ret = xgbe_write_rss_lookup_table(pdata);
460 	if (ret)
461 		return ret;
462 
463 	/* Set the RSS options */
464 	XGMAC_IOWRITE(pdata, MAC_RSSCR, pdata->rss_options);
465 
466 	/* Enable RSS */
467 	XGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 1);
468 
469 	return 0;
470 }
471 
472 static int xgbe_disable_rss(struct xgbe_prv_data *pdata)
473 {
474 	if (!pdata->hw_feat.rss)
475 		return -EOPNOTSUPP;
476 
477 	XGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 0);
478 
479 	return 0;
480 }
481 
482 static void xgbe_config_rss(struct xgbe_prv_data *pdata)
483 {
484 	int ret;
485 
486 	if (!pdata->hw_feat.rss)
487 		return;
488 
489 	if (pdata->netdev->features & NETIF_F_RXHASH)
490 		ret = xgbe_enable_rss(pdata);
491 	else
492 		ret = xgbe_disable_rss(pdata);
493 
494 	if (ret)
495 		netdev_err(pdata->netdev,
496 			   "error configuring RSS, RSS disabled\n");
497 }
498 
499 static bool xgbe_is_pfc_queue(struct xgbe_prv_data *pdata,
500 			      unsigned int queue)
501 {
502 	unsigned int prio, tc;
503 
504 	for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) {
505 		/* Does this queue handle the priority? */
506 		if (pdata->prio2q_map[prio] != queue)
507 			continue;
508 
509 		/* Get the Traffic Class for this priority */
510 		tc = pdata->ets->prio_tc[prio];
511 
512 		/* Check if PFC is enabled for this traffic class */
513 		if (pdata->pfc->pfc_en & (1 << tc))
514 			return true;
515 	}
516 
517 	return false;
518 }
519 
520 static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata)
521 {
522 	unsigned int max_q_count, q_count;
523 	unsigned int reg, reg_val;
524 	unsigned int i;
525 
526 	/* Clear MTL flow control */
527 	for (i = 0; i < pdata->rx_q_count; i++)
528 		XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 0);
529 
530 	/* Clear MAC flow control */
531 	max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
532 	q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count);
533 	reg = MAC_Q0TFCR;
534 	for (i = 0; i < q_count; i++) {
535 		reg_val = XGMAC_IOREAD(pdata, reg);
536 		XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 0);
537 		XGMAC_IOWRITE(pdata, reg, reg_val);
538 
539 		reg += MAC_QTFCR_INC;
540 	}
541 
542 	return 0;
543 }
544 
545 static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata)
546 {
547 	struct ieee_pfc *pfc = pdata->pfc;
548 	struct ieee_ets *ets = pdata->ets;
549 	unsigned int max_q_count, q_count;
550 	unsigned int reg, reg_val;
551 	unsigned int i;
552 
553 	/* Set MTL flow control */
554 	for (i = 0; i < pdata->rx_q_count; i++) {
555 		unsigned int ehfc = 0;
556 
557 		if (pdata->rx_rfd[i]) {
558 			/* Flow control thresholds are established */
559 			if (pfc && ets) {
560 				if (xgbe_is_pfc_queue(pdata, i))
561 					ehfc = 1;
562 			} else {
563 				ehfc = 1;
564 			}
565 		}
566 
567 		XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, ehfc);
568 
569 		netif_dbg(pdata, drv, pdata->netdev,
570 			  "flow control %s for RXq%u\n",
571 			  ehfc ? "enabled" : "disabled", i);
572 	}
573 
574 	/* Set MAC flow control */
575 	max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
576 	q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count);
577 	reg = MAC_Q0TFCR;
578 	for (i = 0; i < q_count; i++) {
579 		reg_val = XGMAC_IOREAD(pdata, reg);
580 
581 		/* Enable transmit flow control */
582 		XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 1);
583 		/* Set pause time */
584 		XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, PT, 0xffff);
585 
586 		XGMAC_IOWRITE(pdata, reg, reg_val);
587 
588 		reg += MAC_QTFCR_INC;
589 	}
590 
591 	return 0;
592 }
593 
594 static int xgbe_disable_rx_flow_control(struct xgbe_prv_data *pdata)
595 {
596 	XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 0);
597 
598 	return 0;
599 }
600 
601 static int xgbe_enable_rx_flow_control(struct xgbe_prv_data *pdata)
602 {
603 	XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 1);
604 
605 	return 0;
606 }
607 
608 static int xgbe_config_tx_flow_control(struct xgbe_prv_data *pdata)
609 {
610 	struct ieee_pfc *pfc = pdata->pfc;
611 
612 	if (pdata->tx_pause || (pfc && pfc->pfc_en))
613 		xgbe_enable_tx_flow_control(pdata);
614 	else
615 		xgbe_disable_tx_flow_control(pdata);
616 
617 	return 0;
618 }
619 
620 static int xgbe_config_rx_flow_control(struct xgbe_prv_data *pdata)
621 {
622 	struct ieee_pfc *pfc = pdata->pfc;
623 
624 	if (pdata->rx_pause || (pfc && pfc->pfc_en))
625 		xgbe_enable_rx_flow_control(pdata);
626 	else
627 		xgbe_disable_rx_flow_control(pdata);
628 
629 	return 0;
630 }
631 
632 static void xgbe_config_flow_control(struct xgbe_prv_data *pdata)
633 {
634 	struct ieee_pfc *pfc = pdata->pfc;
635 
636 	xgbe_config_tx_flow_control(pdata);
637 	xgbe_config_rx_flow_control(pdata);
638 
639 	XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE,
640 			   (pfc && pfc->pfc_en) ? 1 : 0);
641 }
642 
643 static void xgbe_enable_dma_interrupts(struct xgbe_prv_data *pdata)
644 {
645 	struct xgbe_channel *channel;
646 	unsigned int dma_ch_isr, dma_ch_ier;
647 	unsigned int i;
648 
649 	/* Set the interrupt mode if supported */
650 	if (pdata->channel_irq_mode)
651 		XGMAC_IOWRITE_BITS(pdata, DMA_MR, INTM,
652 				   pdata->channel_irq_mode);
653 
654 	channel = pdata->channel;
655 	for (i = 0; i < pdata->channel_count; i++, channel++) {
656 		/* Clear all the interrupts which are set */
657 		dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR);
658 		XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_ch_isr);
659 
660 		/* Clear all interrupt enable bits */
661 		dma_ch_ier = 0;
662 
663 		/* Enable following interrupts
664 		 *   NIE  - Normal Interrupt Summary Enable
665 		 *   AIE  - Abnormal Interrupt Summary Enable
666 		 *   FBEE - Fatal Bus Error Enable
667 		 */
668 		XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, NIE, 1);
669 		XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, AIE, 1);
670 		XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 1);
671 
672 		if (channel->tx_ring) {
673 			/* Enable the following Tx interrupts
674 			 *   TIE  - Transmit Interrupt Enable (unless using
675 			 *          per channel interrupts in edge triggered
676 			 *          mode)
677 			 */
678 			if (!pdata->per_channel_irq || pdata->channel_irq_mode)
679 				XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1);
680 		}
681 		if (channel->rx_ring) {
682 			/* Enable following Rx interrupts
683 			 *   RBUE - Receive Buffer Unavailable Enable
684 			 *   RIE  - Receive Interrupt Enable (unless using
685 			 *          per channel interrupts in edge triggered
686 			 *          mode)
687 			 */
688 			XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 1);
689 			if (!pdata->per_channel_irq || pdata->channel_irq_mode)
690 				XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1);
691 		}
692 
693 		XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier);
694 	}
695 }
696 
697 static void xgbe_enable_mtl_interrupts(struct xgbe_prv_data *pdata)
698 {
699 	unsigned int mtl_q_isr;
700 	unsigned int q_count, i;
701 
702 	q_count = max(pdata->hw_feat.tx_q_cnt, pdata->hw_feat.rx_q_cnt);
703 	for (i = 0; i < q_count; i++) {
704 		/* Clear all the interrupts which are set */
705 		mtl_q_isr = XGMAC_MTL_IOREAD(pdata, i, MTL_Q_ISR);
706 		XGMAC_MTL_IOWRITE(pdata, i, MTL_Q_ISR, mtl_q_isr);
707 
708 		/* No MTL interrupts to be enabled */
709 		XGMAC_MTL_IOWRITE(pdata, i, MTL_Q_IER, 0);
710 	}
711 }
712 
713 static void xgbe_enable_mac_interrupts(struct xgbe_prv_data *pdata)
714 {
715 	unsigned int mac_ier = 0;
716 
717 	/* Enable Timestamp interrupt */
718 	XGMAC_SET_BITS(mac_ier, MAC_IER, TSIE, 1);
719 
720 	XGMAC_IOWRITE(pdata, MAC_IER, mac_ier);
721 
722 	/* Enable all counter interrupts */
723 	XGMAC_IOWRITE_BITS(pdata, MMC_RIER, ALL_INTERRUPTS, 0xffffffff);
724 	XGMAC_IOWRITE_BITS(pdata, MMC_TIER, ALL_INTERRUPTS, 0xffffffff);
725 
726 	/* Enable MDIO single command completion interrupt */
727 	XGMAC_IOWRITE_BITS(pdata, MAC_MDIOIER, SNGLCOMPIE, 1);
728 }
729 
730 static void xgbe_enable_ecc_interrupts(struct xgbe_prv_data *pdata)
731 {
732 	unsigned int ecc_isr, ecc_ier = 0;
733 
734 	if (!pdata->vdata->ecc_support)
735 		return;
736 
737 	/* Clear all the interrupts which are set */
738 	ecc_isr = XP_IOREAD(pdata, XP_ECC_ISR);
739 	XP_IOWRITE(pdata, XP_ECC_ISR, ecc_isr);
740 
741 	/* Enable ECC interrupts */
742 	XP_SET_BITS(ecc_ier, XP_ECC_IER, TX_DED, 1);
743 	XP_SET_BITS(ecc_ier, XP_ECC_IER, TX_SEC, 1);
744 	XP_SET_BITS(ecc_ier, XP_ECC_IER, RX_DED, 1);
745 	XP_SET_BITS(ecc_ier, XP_ECC_IER, RX_SEC, 1);
746 	XP_SET_BITS(ecc_ier, XP_ECC_IER, DESC_DED, 1);
747 	XP_SET_BITS(ecc_ier, XP_ECC_IER, DESC_SEC, 1);
748 
749 	XP_IOWRITE(pdata, XP_ECC_IER, ecc_ier);
750 }
751 
752 static void xgbe_disable_ecc_ded(struct xgbe_prv_data *pdata)
753 {
754 	unsigned int ecc_ier;
755 
756 	ecc_ier = XP_IOREAD(pdata, XP_ECC_IER);
757 
758 	/* Disable ECC DED interrupts */
759 	XP_SET_BITS(ecc_ier, XP_ECC_IER, TX_DED, 0);
760 	XP_SET_BITS(ecc_ier, XP_ECC_IER, RX_DED, 0);
761 	XP_SET_BITS(ecc_ier, XP_ECC_IER, DESC_DED, 0);
762 
763 	XP_IOWRITE(pdata, XP_ECC_IER, ecc_ier);
764 }
765 
766 static void xgbe_disable_ecc_sec(struct xgbe_prv_data *pdata,
767 				 enum xgbe_ecc_sec sec)
768 {
769 	unsigned int ecc_ier;
770 
771 	ecc_ier = XP_IOREAD(pdata, XP_ECC_IER);
772 
773 	/* Disable ECC SEC interrupt */
774 	switch (sec) {
775 	case XGBE_ECC_SEC_TX:
776 	XP_SET_BITS(ecc_ier, XP_ECC_IER, TX_SEC, 0);
777 		break;
778 	case XGBE_ECC_SEC_RX:
779 	XP_SET_BITS(ecc_ier, XP_ECC_IER, RX_SEC, 0);
780 		break;
781 	case XGBE_ECC_SEC_DESC:
782 	XP_SET_BITS(ecc_ier, XP_ECC_IER, DESC_SEC, 0);
783 		break;
784 	}
785 
786 	XP_IOWRITE(pdata, XP_ECC_IER, ecc_ier);
787 }
788 
789 static int xgbe_set_speed(struct xgbe_prv_data *pdata, int speed)
790 {
791 	unsigned int ss;
792 
793 	switch (speed) {
794 	case SPEED_1000:
795 		ss = 0x03;
796 		break;
797 	case SPEED_2500:
798 		ss = 0x02;
799 		break;
800 	case SPEED_10000:
801 		ss = 0x00;
802 		break;
803 	default:
804 		return -EINVAL;
805 	}
806 
807 	if (XGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) != ss)
808 		XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, ss);
809 
810 	return 0;
811 }
812 
813 static int xgbe_enable_rx_vlan_stripping(struct xgbe_prv_data *pdata)
814 {
815 	/* Put the VLAN tag in the Rx descriptor */
816 	XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLRXS, 1);
817 
818 	/* Don't check the VLAN type */
819 	XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, DOVLTC, 1);
820 
821 	/* Check only C-TAG (0x8100) packets */
822 	XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ERSVLM, 0);
823 
824 	/* Don't consider an S-TAG (0x88A8) packet as a VLAN packet */
825 	XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ESVL, 0);
826 
827 	/* Enable VLAN tag stripping */
828 	XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0x3);
829 
830 	return 0;
831 }
832 
833 static int xgbe_disable_rx_vlan_stripping(struct xgbe_prv_data *pdata)
834 {
835 	XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0);
836 
837 	return 0;
838 }
839 
840 static int xgbe_enable_rx_vlan_filtering(struct xgbe_prv_data *pdata)
841 {
842 	/* Enable VLAN filtering */
843 	XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 1);
844 
845 	/* Enable VLAN Hash Table filtering */
846 	XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTHM, 1);
847 
848 	/* Disable VLAN tag inverse matching */
849 	XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTIM, 0);
850 
851 	/* Only filter on the lower 12-bits of the VLAN tag */
852 	XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ETV, 1);
853 
854 	/* In order for the VLAN Hash Table filtering to be effective,
855 	 * the VLAN tag identifier in the VLAN Tag Register must not
856 	 * be zero.  Set the VLAN tag identifier to "1" to enable the
857 	 * VLAN Hash Table filtering.  This implies that a VLAN tag of
858 	 * 1 will always pass filtering.
859 	 */
860 	XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VL, 1);
861 
862 	return 0;
863 }
864 
865 static int xgbe_disable_rx_vlan_filtering(struct xgbe_prv_data *pdata)
866 {
867 	/* Disable VLAN filtering */
868 	XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 0);
869 
870 	return 0;
871 }
872 
873 static u32 xgbe_vid_crc32_le(__le16 vid_le)
874 {
875 	u32 poly = 0xedb88320;	/* CRCPOLY_LE */
876 	u32 crc = ~0;
877 	u32 temp = 0;
878 	unsigned char *data = (unsigned char *)&vid_le;
879 	unsigned char data_byte = 0;
880 	int i, bits;
881 
882 	bits = get_bitmask_order(VLAN_VID_MASK);
883 	for (i = 0; i < bits; i++) {
884 		if ((i % 8) == 0)
885 			data_byte = data[i / 8];
886 
887 		temp = ((crc & 1) ^ data_byte) & 1;
888 		crc >>= 1;
889 		data_byte >>= 1;
890 
891 		if (temp)
892 			crc ^= poly;
893 	}
894 
895 	return crc;
896 }
897 
898 static int xgbe_update_vlan_hash_table(struct xgbe_prv_data *pdata)
899 {
900 	u32 crc;
901 	u16 vid;
902 	__le16 vid_le;
903 	u16 vlan_hash_table = 0;
904 
905 	/* Generate the VLAN Hash Table value */
906 	for_each_set_bit(vid, pdata->active_vlans, VLAN_N_VID) {
907 		/* Get the CRC32 value of the VLAN ID */
908 		vid_le = cpu_to_le16(vid);
909 		crc = bitrev32(~xgbe_vid_crc32_le(vid_le)) >> 28;
910 
911 		vlan_hash_table |= (1 << crc);
912 	}
913 
914 	/* Set the VLAN Hash Table filtering register */
915 	XGMAC_IOWRITE_BITS(pdata, MAC_VLANHTR, VLHT, vlan_hash_table);
916 
917 	return 0;
918 }
919 
920 static int xgbe_set_promiscuous_mode(struct xgbe_prv_data *pdata,
921 				     unsigned int enable)
922 {
923 	unsigned int val = enable ? 1 : 0;
924 
925 	if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PR) == val)
926 		return 0;
927 
928 	netif_dbg(pdata, drv, pdata->netdev, "%s promiscuous mode\n",
929 		  enable ? "entering" : "leaving");
930 	XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, val);
931 
932 	/* Hardware will still perform VLAN filtering in promiscuous mode */
933 	if (enable) {
934 		xgbe_disable_rx_vlan_filtering(pdata);
935 	} else {
936 		if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
937 			xgbe_enable_rx_vlan_filtering(pdata);
938 	}
939 
940 	return 0;
941 }
942 
943 static int xgbe_set_all_multicast_mode(struct xgbe_prv_data *pdata,
944 				       unsigned int enable)
945 {
946 	unsigned int val = enable ? 1 : 0;
947 
948 	if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PM) == val)
949 		return 0;
950 
951 	netif_dbg(pdata, drv, pdata->netdev, "%s allmulti mode\n",
952 		  enable ? "entering" : "leaving");
953 	XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, val);
954 
955 	return 0;
956 }
957 
958 static void xgbe_set_mac_reg(struct xgbe_prv_data *pdata,
959 			     struct netdev_hw_addr *ha, unsigned int *mac_reg)
960 {
961 	unsigned int mac_addr_hi, mac_addr_lo;
962 	u8 *mac_addr;
963 
964 	mac_addr_lo = 0;
965 	mac_addr_hi = 0;
966 
967 	if (ha) {
968 		mac_addr = (u8 *)&mac_addr_lo;
969 		mac_addr[0] = ha->addr[0];
970 		mac_addr[1] = ha->addr[1];
971 		mac_addr[2] = ha->addr[2];
972 		mac_addr[3] = ha->addr[3];
973 		mac_addr = (u8 *)&mac_addr_hi;
974 		mac_addr[0] = ha->addr[4];
975 		mac_addr[1] = ha->addr[5];
976 
977 		netif_dbg(pdata, drv, pdata->netdev,
978 			  "adding mac address %pM at %#x\n",
979 			  ha->addr, *mac_reg);
980 
981 		XGMAC_SET_BITS(mac_addr_hi, MAC_MACA1HR, AE, 1);
982 	}
983 
984 	XGMAC_IOWRITE(pdata, *mac_reg, mac_addr_hi);
985 	*mac_reg += MAC_MACA_INC;
986 	XGMAC_IOWRITE(pdata, *mac_reg, mac_addr_lo);
987 	*mac_reg += MAC_MACA_INC;
988 }
989 
990 static void xgbe_set_mac_addn_addrs(struct xgbe_prv_data *pdata)
991 {
992 	struct net_device *netdev = pdata->netdev;
993 	struct netdev_hw_addr *ha;
994 	unsigned int mac_reg;
995 	unsigned int addn_macs;
996 
997 	mac_reg = MAC_MACA1HR;
998 	addn_macs = pdata->hw_feat.addn_mac;
999 
1000 	if (netdev_uc_count(netdev) > addn_macs) {
1001 		xgbe_set_promiscuous_mode(pdata, 1);
1002 	} else {
1003 		netdev_for_each_uc_addr(ha, netdev) {
1004 			xgbe_set_mac_reg(pdata, ha, &mac_reg);
1005 			addn_macs--;
1006 		}
1007 
1008 		if (netdev_mc_count(netdev) > addn_macs) {
1009 			xgbe_set_all_multicast_mode(pdata, 1);
1010 		} else {
1011 			netdev_for_each_mc_addr(ha, netdev) {
1012 				xgbe_set_mac_reg(pdata, ha, &mac_reg);
1013 				addn_macs--;
1014 			}
1015 		}
1016 	}
1017 
1018 	/* Clear remaining additional MAC address entries */
1019 	while (addn_macs--)
1020 		xgbe_set_mac_reg(pdata, NULL, &mac_reg);
1021 }
1022 
1023 static void xgbe_set_mac_hash_table(struct xgbe_prv_data *pdata)
1024 {
1025 	struct net_device *netdev = pdata->netdev;
1026 	struct netdev_hw_addr *ha;
1027 	unsigned int hash_reg;
1028 	unsigned int hash_table_shift, hash_table_count;
1029 	u32 hash_table[XGBE_MAC_HASH_TABLE_SIZE];
1030 	u32 crc;
1031 	unsigned int i;
1032 
1033 	hash_table_shift = 26 - (pdata->hw_feat.hash_table_size >> 7);
1034 	hash_table_count = pdata->hw_feat.hash_table_size / 32;
1035 	memset(hash_table, 0, sizeof(hash_table));
1036 
1037 	/* Build the MAC Hash Table register values */
1038 	netdev_for_each_uc_addr(ha, netdev) {
1039 		crc = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN));
1040 		crc >>= hash_table_shift;
1041 		hash_table[crc >> 5] |= (1 << (crc & 0x1f));
1042 	}
1043 
1044 	netdev_for_each_mc_addr(ha, netdev) {
1045 		crc = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN));
1046 		crc >>= hash_table_shift;
1047 		hash_table[crc >> 5] |= (1 << (crc & 0x1f));
1048 	}
1049 
1050 	/* Set the MAC Hash Table registers */
1051 	hash_reg = MAC_HTR0;
1052 	for (i = 0; i < hash_table_count; i++) {
1053 		XGMAC_IOWRITE(pdata, hash_reg, hash_table[i]);
1054 		hash_reg += MAC_HTR_INC;
1055 	}
1056 }
1057 
1058 static int xgbe_add_mac_addresses(struct xgbe_prv_data *pdata)
1059 {
1060 	if (pdata->hw_feat.hash_table_size)
1061 		xgbe_set_mac_hash_table(pdata);
1062 	else
1063 		xgbe_set_mac_addn_addrs(pdata);
1064 
1065 	return 0;
1066 }
1067 
1068 static int xgbe_set_mac_address(struct xgbe_prv_data *pdata, u8 *addr)
1069 {
1070 	unsigned int mac_addr_hi, mac_addr_lo;
1071 
1072 	mac_addr_hi = (addr[5] <<  8) | (addr[4] <<  0);
1073 	mac_addr_lo = (addr[3] << 24) | (addr[2] << 16) |
1074 		      (addr[1] <<  8) | (addr[0] <<  0);
1075 
1076 	XGMAC_IOWRITE(pdata, MAC_MACA0HR, mac_addr_hi);
1077 	XGMAC_IOWRITE(pdata, MAC_MACA0LR, mac_addr_lo);
1078 
1079 	return 0;
1080 }
1081 
1082 static int xgbe_config_rx_mode(struct xgbe_prv_data *pdata)
1083 {
1084 	struct net_device *netdev = pdata->netdev;
1085 	unsigned int pr_mode, am_mode;
1086 
1087 	pr_mode = ((netdev->flags & IFF_PROMISC) != 0);
1088 	am_mode = ((netdev->flags & IFF_ALLMULTI) != 0);
1089 
1090 	xgbe_set_promiscuous_mode(pdata, pr_mode);
1091 	xgbe_set_all_multicast_mode(pdata, am_mode);
1092 
1093 	xgbe_add_mac_addresses(pdata);
1094 
1095 	return 0;
1096 }
1097 
1098 static int xgbe_clr_gpio(struct xgbe_prv_data *pdata, unsigned int gpio)
1099 {
1100 	unsigned int reg;
1101 
1102 	if (gpio > 15)
1103 		return -EINVAL;
1104 
1105 	reg = XGMAC_IOREAD(pdata, MAC_GPIOSR);
1106 
1107 	reg &= ~(1 << (gpio + 16));
1108 	XGMAC_IOWRITE(pdata, MAC_GPIOSR, reg);
1109 
1110 	return 0;
1111 }
1112 
1113 static int xgbe_set_gpio(struct xgbe_prv_data *pdata, unsigned int gpio)
1114 {
1115 	unsigned int reg;
1116 
1117 	if (gpio > 15)
1118 		return -EINVAL;
1119 
1120 	reg = XGMAC_IOREAD(pdata, MAC_GPIOSR);
1121 
1122 	reg |= (1 << (gpio + 16));
1123 	XGMAC_IOWRITE(pdata, MAC_GPIOSR, reg);
1124 
1125 	return 0;
1126 }
1127 
1128 static int xgbe_read_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad,
1129 				 int mmd_reg)
1130 {
1131 	unsigned long flags;
1132 	unsigned int mmd_address, index, offset;
1133 	int mmd_data;
1134 
1135 	if (mmd_reg & MII_ADDR_C45)
1136 		mmd_address = mmd_reg & ~MII_ADDR_C45;
1137 	else
1138 		mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
1139 
1140 	/* The PCS registers are accessed using mmio. The underlying
1141 	 * management interface uses indirect addressing to access the MMD
1142 	 * register sets. This requires accessing of the PCS register in two
1143 	 * phases, an address phase and a data phase.
1144 	 *
1145 	 * The mmio interface is based on 16-bit offsets and values. All
1146 	 * register offsets must therefore be adjusted by left shifting the
1147 	 * offset 1 bit and reading 16 bits of data.
1148 	 */
1149 	mmd_address <<= 1;
1150 	index = mmd_address & ~pdata->xpcs_window_mask;
1151 	offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
1152 
1153 	spin_lock_irqsave(&pdata->xpcs_lock, flags);
1154 	XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
1155 	mmd_data = XPCS16_IOREAD(pdata, offset);
1156 	spin_unlock_irqrestore(&pdata->xpcs_lock, flags);
1157 
1158 	return mmd_data;
1159 }
1160 
1161 static void xgbe_write_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad,
1162 				   int mmd_reg, int mmd_data)
1163 {
1164 	unsigned long flags;
1165 	unsigned int mmd_address, index, offset;
1166 
1167 	if (mmd_reg & MII_ADDR_C45)
1168 		mmd_address = mmd_reg & ~MII_ADDR_C45;
1169 	else
1170 		mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
1171 
1172 	/* The PCS registers are accessed using mmio. The underlying
1173 	 * management interface uses indirect addressing to access the MMD
1174 	 * register sets. This requires accessing of the PCS register in two
1175 	 * phases, an address phase and a data phase.
1176 	 *
1177 	 * The mmio interface is based on 16-bit offsets and values. All
1178 	 * register offsets must therefore be adjusted by left shifting the
1179 	 * offset 1 bit and writing 16 bits of data.
1180 	 */
1181 	mmd_address <<= 1;
1182 	index = mmd_address & ~pdata->xpcs_window_mask;
1183 	offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
1184 
1185 	spin_lock_irqsave(&pdata->xpcs_lock, flags);
1186 	XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
1187 	XPCS16_IOWRITE(pdata, offset, mmd_data);
1188 	spin_unlock_irqrestore(&pdata->xpcs_lock, flags);
1189 }
1190 
1191 static int xgbe_read_mmd_regs_v1(struct xgbe_prv_data *pdata, int prtad,
1192 				 int mmd_reg)
1193 {
1194 	unsigned long flags;
1195 	unsigned int mmd_address;
1196 	int mmd_data;
1197 
1198 	if (mmd_reg & MII_ADDR_C45)
1199 		mmd_address = mmd_reg & ~MII_ADDR_C45;
1200 	else
1201 		mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
1202 
1203 	/* The PCS registers are accessed using mmio. The underlying APB3
1204 	 * management interface uses indirect addressing to access the MMD
1205 	 * register sets. This requires accessing of the PCS register in two
1206 	 * phases, an address phase and a data phase.
1207 	 *
1208 	 * The mmio interface is based on 32-bit offsets and values. All
1209 	 * register offsets must therefore be adjusted by left shifting the
1210 	 * offset 2 bits and reading 32 bits of data.
1211 	 */
1212 	spin_lock_irqsave(&pdata->xpcs_lock, flags);
1213 	XPCS32_IOWRITE(pdata, PCS_V1_WINDOW_SELECT, mmd_address >> 8);
1214 	mmd_data = XPCS32_IOREAD(pdata, (mmd_address & 0xff) << 2);
1215 	spin_unlock_irqrestore(&pdata->xpcs_lock, flags);
1216 
1217 	return mmd_data;
1218 }
1219 
1220 static void xgbe_write_mmd_regs_v1(struct xgbe_prv_data *pdata, int prtad,
1221 				   int mmd_reg, int mmd_data)
1222 {
1223 	unsigned int mmd_address;
1224 	unsigned long flags;
1225 
1226 	if (mmd_reg & MII_ADDR_C45)
1227 		mmd_address = mmd_reg & ~MII_ADDR_C45;
1228 	else
1229 		mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
1230 
1231 	/* The PCS registers are accessed using mmio. The underlying APB3
1232 	 * management interface uses indirect addressing to access the MMD
1233 	 * register sets. This requires accessing of the PCS register in two
1234 	 * phases, an address phase and a data phase.
1235 	 *
1236 	 * The mmio interface is based on 32-bit offsets and values. All
1237 	 * register offsets must therefore be adjusted by left shifting the
1238 	 * offset 2 bits and writing 32 bits of data.
1239 	 */
1240 	spin_lock_irqsave(&pdata->xpcs_lock, flags);
1241 	XPCS32_IOWRITE(pdata, PCS_V1_WINDOW_SELECT, mmd_address >> 8);
1242 	XPCS32_IOWRITE(pdata, (mmd_address & 0xff) << 2, mmd_data);
1243 	spin_unlock_irqrestore(&pdata->xpcs_lock, flags);
1244 }
1245 
1246 static int xgbe_read_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
1247 			      int mmd_reg)
1248 {
1249 	switch (pdata->vdata->xpcs_access) {
1250 	case XGBE_XPCS_ACCESS_V1:
1251 		return xgbe_read_mmd_regs_v1(pdata, prtad, mmd_reg);
1252 
1253 	case XGBE_XPCS_ACCESS_V2:
1254 	default:
1255 		return xgbe_read_mmd_regs_v2(pdata, prtad, mmd_reg);
1256 	}
1257 }
1258 
1259 static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
1260 				int mmd_reg, int mmd_data)
1261 {
1262 	switch (pdata->vdata->xpcs_access) {
1263 	case XGBE_XPCS_ACCESS_V1:
1264 		return xgbe_write_mmd_regs_v1(pdata, prtad, mmd_reg, mmd_data);
1265 
1266 	case XGBE_XPCS_ACCESS_V2:
1267 	default:
1268 		return xgbe_write_mmd_regs_v2(pdata, prtad, mmd_reg, mmd_data);
1269 	}
1270 }
1271 
1272 static int xgbe_write_ext_mii_regs(struct xgbe_prv_data *pdata, int addr,
1273 				   int reg, u16 val)
1274 {
1275 	unsigned int mdio_sca, mdio_sccd;
1276 
1277 	reinit_completion(&pdata->mdio_complete);
1278 
1279 	mdio_sca = 0;
1280 	XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg);
1281 	XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr);
1282 	XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca);
1283 
1284 	mdio_sccd = 0;
1285 	XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, DATA, val);
1286 	XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, CMD, 1);
1287 	XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, BUSY, 1);
1288 	XGMAC_IOWRITE(pdata, MAC_MDIOSCCDR, mdio_sccd);
1289 
1290 	if (!wait_for_completion_timeout(&pdata->mdio_complete, HZ)) {
1291 		netdev_err(pdata->netdev, "mdio write operation timed out\n");
1292 		return -ETIMEDOUT;
1293 	}
1294 
1295 	return 0;
1296 }
1297 
1298 static int xgbe_read_ext_mii_regs(struct xgbe_prv_data *pdata, int addr,
1299 				  int reg)
1300 {
1301 	unsigned int mdio_sca, mdio_sccd;
1302 
1303 	reinit_completion(&pdata->mdio_complete);
1304 
1305 	mdio_sca = 0;
1306 	XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg);
1307 	XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr);
1308 	XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca);
1309 
1310 	mdio_sccd = 0;
1311 	XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, CMD, 3);
1312 	XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, BUSY, 1);
1313 	XGMAC_IOWRITE(pdata, MAC_MDIOSCCDR, mdio_sccd);
1314 
1315 	if (!wait_for_completion_timeout(&pdata->mdio_complete, HZ)) {
1316 		netdev_err(pdata->netdev, "mdio read operation timed out\n");
1317 		return -ETIMEDOUT;
1318 	}
1319 
1320 	return XGMAC_IOREAD_BITS(pdata, MAC_MDIOSCCDR, DATA);
1321 }
1322 
1323 static int xgbe_set_ext_mii_mode(struct xgbe_prv_data *pdata, unsigned int port,
1324 				 enum xgbe_mdio_mode mode)
1325 {
1326 	unsigned int reg_val = XGMAC_IOREAD(pdata, MAC_MDIOCL22R);
1327 
1328 	switch (mode) {
1329 	case XGBE_MDIO_MODE_CL22:
1330 		if (port > XGMAC_MAX_C22_PORT)
1331 			return -EINVAL;
1332 		reg_val |= (1 << port);
1333 		break;
1334 	case XGBE_MDIO_MODE_CL45:
1335 		break;
1336 	default:
1337 		return -EINVAL;
1338 	}
1339 
1340 	XGMAC_IOWRITE(pdata, MAC_MDIOCL22R, reg_val);
1341 
1342 	return 0;
1343 }
1344 
1345 static int xgbe_tx_complete(struct xgbe_ring_desc *rdesc)
1346 {
1347 	return !XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN);
1348 }
1349 
1350 static int xgbe_disable_rx_csum(struct xgbe_prv_data *pdata)
1351 {
1352 	XGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 0);
1353 
1354 	return 0;
1355 }
1356 
1357 static int xgbe_enable_rx_csum(struct xgbe_prv_data *pdata)
1358 {
1359 	XGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 1);
1360 
1361 	return 0;
1362 }
1363 
1364 static void xgbe_tx_desc_reset(struct xgbe_ring_data *rdata)
1365 {
1366 	struct xgbe_ring_desc *rdesc = rdata->rdesc;
1367 
1368 	/* Reset the Tx descriptor
1369 	 *   Set buffer 1 (lo) address to zero
1370 	 *   Set buffer 1 (hi) address to zero
1371 	 *   Reset all other control bits (IC, TTSE, B2L & B1L)
1372 	 *   Reset all other control bits (OWN, CTXT, FD, LD, CPC, CIC, etc)
1373 	 */
1374 	rdesc->desc0 = 0;
1375 	rdesc->desc1 = 0;
1376 	rdesc->desc2 = 0;
1377 	rdesc->desc3 = 0;
1378 
1379 	/* Make sure ownership is written to the descriptor */
1380 	dma_wmb();
1381 }
1382 
1383 static void xgbe_tx_desc_init(struct xgbe_channel *channel)
1384 {
1385 	struct xgbe_ring *ring = channel->tx_ring;
1386 	struct xgbe_ring_data *rdata;
1387 	int i;
1388 	int start_index = ring->cur;
1389 
1390 	DBGPR("-->tx_desc_init\n");
1391 
1392 	/* Initialze all descriptors */
1393 	for (i = 0; i < ring->rdesc_count; i++) {
1394 		rdata = XGBE_GET_DESC_DATA(ring, i);
1395 
1396 		/* Initialize Tx descriptor */
1397 		xgbe_tx_desc_reset(rdata);
1398 	}
1399 
1400 	/* Update the total number of Tx descriptors */
1401 	XGMAC_DMA_IOWRITE(channel, DMA_CH_TDRLR, ring->rdesc_count - 1);
1402 
1403 	/* Update the starting address of descriptor ring */
1404 	rdata = XGBE_GET_DESC_DATA(ring, start_index);
1405 	XGMAC_DMA_IOWRITE(channel, DMA_CH_TDLR_HI,
1406 			  upper_32_bits(rdata->rdesc_dma));
1407 	XGMAC_DMA_IOWRITE(channel, DMA_CH_TDLR_LO,
1408 			  lower_32_bits(rdata->rdesc_dma));
1409 
1410 	DBGPR("<--tx_desc_init\n");
1411 }
1412 
1413 static void xgbe_rx_desc_reset(struct xgbe_prv_data *pdata,
1414 			       struct xgbe_ring_data *rdata, unsigned int index)
1415 {
1416 	struct xgbe_ring_desc *rdesc = rdata->rdesc;
1417 	unsigned int rx_usecs = pdata->rx_usecs;
1418 	unsigned int rx_frames = pdata->rx_frames;
1419 	unsigned int inte;
1420 	dma_addr_t hdr_dma, buf_dma;
1421 
1422 	if (!rx_usecs && !rx_frames) {
1423 		/* No coalescing, interrupt for every descriptor */
1424 		inte = 1;
1425 	} else {
1426 		/* Set interrupt based on Rx frame coalescing setting */
1427 		if (rx_frames && !((index + 1) % rx_frames))
1428 			inte = 1;
1429 		else
1430 			inte = 0;
1431 	}
1432 
1433 	/* Reset the Rx descriptor
1434 	 *   Set buffer 1 (lo) address to header dma address (lo)
1435 	 *   Set buffer 1 (hi) address to header dma address (hi)
1436 	 *   Set buffer 2 (lo) address to buffer dma address (lo)
1437 	 *   Set buffer 2 (hi) address to buffer dma address (hi) and
1438 	 *     set control bits OWN and INTE
1439 	 */
1440 	hdr_dma = rdata->rx.hdr.dma_base + rdata->rx.hdr.dma_off;
1441 	buf_dma = rdata->rx.buf.dma_base + rdata->rx.buf.dma_off;
1442 	rdesc->desc0 = cpu_to_le32(lower_32_bits(hdr_dma));
1443 	rdesc->desc1 = cpu_to_le32(upper_32_bits(hdr_dma));
1444 	rdesc->desc2 = cpu_to_le32(lower_32_bits(buf_dma));
1445 	rdesc->desc3 = cpu_to_le32(upper_32_bits(buf_dma));
1446 
1447 	XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, inte);
1448 
1449 	/* Since the Rx DMA engine is likely running, make sure everything
1450 	 * is written to the descriptor(s) before setting the OWN bit
1451 	 * for the descriptor
1452 	 */
1453 	dma_wmb();
1454 
1455 	XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN, 1);
1456 
1457 	/* Make sure ownership is written to the descriptor */
1458 	dma_wmb();
1459 }
1460 
1461 static void xgbe_rx_desc_init(struct xgbe_channel *channel)
1462 {
1463 	struct xgbe_prv_data *pdata = channel->pdata;
1464 	struct xgbe_ring *ring = channel->rx_ring;
1465 	struct xgbe_ring_data *rdata;
1466 	unsigned int start_index = ring->cur;
1467 	unsigned int i;
1468 
1469 	DBGPR("-->rx_desc_init\n");
1470 
1471 	/* Initialize all descriptors */
1472 	for (i = 0; i < ring->rdesc_count; i++) {
1473 		rdata = XGBE_GET_DESC_DATA(ring, i);
1474 
1475 		/* Initialize Rx descriptor */
1476 		xgbe_rx_desc_reset(pdata, rdata, i);
1477 	}
1478 
1479 	/* Update the total number of Rx descriptors */
1480 	XGMAC_DMA_IOWRITE(channel, DMA_CH_RDRLR, ring->rdesc_count - 1);
1481 
1482 	/* Update the starting address of descriptor ring */
1483 	rdata = XGBE_GET_DESC_DATA(ring, start_index);
1484 	XGMAC_DMA_IOWRITE(channel, DMA_CH_RDLR_HI,
1485 			  upper_32_bits(rdata->rdesc_dma));
1486 	XGMAC_DMA_IOWRITE(channel, DMA_CH_RDLR_LO,
1487 			  lower_32_bits(rdata->rdesc_dma));
1488 
1489 	/* Update the Rx Descriptor Tail Pointer */
1490 	rdata = XGBE_GET_DESC_DATA(ring, start_index + ring->rdesc_count - 1);
1491 	XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO,
1492 			  lower_32_bits(rdata->rdesc_dma));
1493 
1494 	DBGPR("<--rx_desc_init\n");
1495 }
1496 
1497 static void xgbe_update_tstamp_addend(struct xgbe_prv_data *pdata,
1498 				      unsigned int addend)
1499 {
1500 	/* Set the addend register value and tell the device */
1501 	XGMAC_IOWRITE(pdata, MAC_TSAR, addend);
1502 	XGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSADDREG, 1);
1503 
1504 	/* Wait for addend update to complete */
1505 	while (XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSADDREG))
1506 		udelay(5);
1507 }
1508 
1509 static void xgbe_set_tstamp_time(struct xgbe_prv_data *pdata, unsigned int sec,
1510 				 unsigned int nsec)
1511 {
1512 	/* Set the time values and tell the device */
1513 	XGMAC_IOWRITE(pdata, MAC_STSUR, sec);
1514 	XGMAC_IOWRITE(pdata, MAC_STNUR, nsec);
1515 	XGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSINIT, 1);
1516 
1517 	/* Wait for time update to complete */
1518 	while (XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSINIT))
1519 		udelay(5);
1520 }
1521 
1522 static u64 xgbe_get_tstamp_time(struct xgbe_prv_data *pdata)
1523 {
1524 	u64 nsec;
1525 
1526 	nsec = XGMAC_IOREAD(pdata, MAC_STSR);
1527 	nsec *= NSEC_PER_SEC;
1528 	nsec += XGMAC_IOREAD(pdata, MAC_STNR);
1529 
1530 	return nsec;
1531 }
1532 
1533 static u64 xgbe_get_tx_tstamp(struct xgbe_prv_data *pdata)
1534 {
1535 	unsigned int tx_snr, tx_ssr;
1536 	u64 nsec;
1537 
1538 	if (pdata->vdata->tx_tstamp_workaround) {
1539 		tx_snr = XGMAC_IOREAD(pdata, MAC_TXSNR);
1540 		tx_ssr = XGMAC_IOREAD(pdata, MAC_TXSSR);
1541 	} else {
1542 		tx_ssr = XGMAC_IOREAD(pdata, MAC_TXSSR);
1543 		tx_snr = XGMAC_IOREAD(pdata, MAC_TXSNR);
1544 	}
1545 
1546 	if (XGMAC_GET_BITS(tx_snr, MAC_TXSNR, TXTSSTSMIS))
1547 		return 0;
1548 
1549 	nsec = tx_ssr;
1550 	nsec *= NSEC_PER_SEC;
1551 	nsec += tx_snr;
1552 
1553 	return nsec;
1554 }
1555 
1556 static void xgbe_get_rx_tstamp(struct xgbe_packet_data *packet,
1557 			       struct xgbe_ring_desc *rdesc)
1558 {
1559 	u64 nsec;
1560 
1561 	if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_CONTEXT_DESC3, TSA) &&
1562 	    !XGMAC_GET_BITS_LE(rdesc->desc3, RX_CONTEXT_DESC3, TSD)) {
1563 		nsec = le32_to_cpu(rdesc->desc1);
1564 		nsec <<= 32;
1565 		nsec |= le32_to_cpu(rdesc->desc0);
1566 		if (nsec != 0xffffffffffffffffULL) {
1567 			packet->rx_tstamp = nsec;
1568 			XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1569 				       RX_TSTAMP, 1);
1570 		}
1571 	}
1572 }
1573 
1574 static int xgbe_config_tstamp(struct xgbe_prv_data *pdata,
1575 			      unsigned int mac_tscr)
1576 {
1577 	/* Set one nano-second accuracy */
1578 	XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCTRLSSR, 1);
1579 
1580 	/* Set fine timestamp update */
1581 	XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCFUPDT, 1);
1582 
1583 	/* Overwrite earlier timestamps */
1584 	XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TXTSSTSM, 1);
1585 
1586 	XGMAC_IOWRITE(pdata, MAC_TSCR, mac_tscr);
1587 
1588 	/* Exit if timestamping is not enabled */
1589 	if (!XGMAC_GET_BITS(mac_tscr, MAC_TSCR, TSENA))
1590 		return 0;
1591 
1592 	/* Initialize time registers */
1593 	XGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SSINC, XGBE_TSTAMP_SSINC);
1594 	XGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SNSINC, XGBE_TSTAMP_SNSINC);
1595 	xgbe_update_tstamp_addend(pdata, pdata->tstamp_addend);
1596 	xgbe_set_tstamp_time(pdata, 0, 0);
1597 
1598 	/* Initialize the timecounter */
1599 	timecounter_init(&pdata->tstamp_tc, &pdata->tstamp_cc,
1600 			 ktime_to_ns(ktime_get_real()));
1601 
1602 	return 0;
1603 }
1604 
1605 static void xgbe_tx_start_xmit(struct xgbe_channel *channel,
1606 			       struct xgbe_ring *ring)
1607 {
1608 	struct xgbe_prv_data *pdata = channel->pdata;
1609 	struct xgbe_ring_data *rdata;
1610 
1611 	/* Make sure everything is written before the register write */
1612 	wmb();
1613 
1614 	/* Issue a poll command to Tx DMA by writing address
1615 	 * of next immediate free descriptor */
1616 	rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
1617 	XGMAC_DMA_IOWRITE(channel, DMA_CH_TDTR_LO,
1618 			  lower_32_bits(rdata->rdesc_dma));
1619 
1620 	/* Start the Tx timer */
1621 	if (pdata->tx_usecs && !channel->tx_timer_active) {
1622 		channel->tx_timer_active = 1;
1623 		mod_timer(&channel->tx_timer,
1624 			  jiffies + usecs_to_jiffies(pdata->tx_usecs));
1625 	}
1626 
1627 	ring->tx.xmit_more = 0;
1628 }
1629 
1630 static void xgbe_dev_xmit(struct xgbe_channel *channel)
1631 {
1632 	struct xgbe_prv_data *pdata = channel->pdata;
1633 	struct xgbe_ring *ring = channel->tx_ring;
1634 	struct xgbe_ring_data *rdata;
1635 	struct xgbe_ring_desc *rdesc;
1636 	struct xgbe_packet_data *packet = &ring->packet_data;
1637 	unsigned int csum, tso, vlan;
1638 	unsigned int tso_context, vlan_context;
1639 	unsigned int tx_set_ic;
1640 	int start_index = ring->cur;
1641 	int cur_index = ring->cur;
1642 	int i;
1643 
1644 	DBGPR("-->xgbe_dev_xmit\n");
1645 
1646 	csum = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1647 			      CSUM_ENABLE);
1648 	tso = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1649 			     TSO_ENABLE);
1650 	vlan = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1651 			      VLAN_CTAG);
1652 
1653 	if (tso && (packet->mss != ring->tx.cur_mss))
1654 		tso_context = 1;
1655 	else
1656 		tso_context = 0;
1657 
1658 	if (vlan && (packet->vlan_ctag != ring->tx.cur_vlan_ctag))
1659 		vlan_context = 1;
1660 	else
1661 		vlan_context = 0;
1662 
1663 	/* Determine if an interrupt should be generated for this Tx:
1664 	 *   Interrupt:
1665 	 *     - Tx frame count exceeds the frame count setting
1666 	 *     - Addition of Tx frame count to the frame count since the
1667 	 *       last interrupt was set exceeds the frame count setting
1668 	 *   No interrupt:
1669 	 *     - No frame count setting specified (ethtool -C ethX tx-frames 0)
1670 	 *     - Addition of Tx frame count to the frame count since the
1671 	 *       last interrupt was set does not exceed the frame count setting
1672 	 */
1673 	ring->coalesce_count += packet->tx_packets;
1674 	if (!pdata->tx_frames)
1675 		tx_set_ic = 0;
1676 	else if (packet->tx_packets > pdata->tx_frames)
1677 		tx_set_ic = 1;
1678 	else if ((ring->coalesce_count % pdata->tx_frames) <
1679 		 packet->tx_packets)
1680 		tx_set_ic = 1;
1681 	else
1682 		tx_set_ic = 0;
1683 
1684 	rdata = XGBE_GET_DESC_DATA(ring, cur_index);
1685 	rdesc = rdata->rdesc;
1686 
1687 	/* Create a context descriptor if this is a TSO packet */
1688 	if (tso_context || vlan_context) {
1689 		if (tso_context) {
1690 			netif_dbg(pdata, tx_queued, pdata->netdev,
1691 				  "TSO context descriptor, mss=%u\n",
1692 				  packet->mss);
1693 
1694 			/* Set the MSS size */
1695 			XGMAC_SET_BITS_LE(rdesc->desc2, TX_CONTEXT_DESC2,
1696 					  MSS, packet->mss);
1697 
1698 			/* Mark it as a CONTEXT descriptor */
1699 			XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
1700 					  CTXT, 1);
1701 
1702 			/* Indicate this descriptor contains the MSS */
1703 			XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
1704 					  TCMSSV, 1);
1705 
1706 			ring->tx.cur_mss = packet->mss;
1707 		}
1708 
1709 		if (vlan_context) {
1710 			netif_dbg(pdata, tx_queued, pdata->netdev,
1711 				  "VLAN context descriptor, ctag=%u\n",
1712 				  packet->vlan_ctag);
1713 
1714 			/* Mark it as a CONTEXT descriptor */
1715 			XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
1716 					  CTXT, 1);
1717 
1718 			/* Set the VLAN tag */
1719 			XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
1720 					  VT, packet->vlan_ctag);
1721 
1722 			/* Indicate this descriptor contains the VLAN tag */
1723 			XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
1724 					  VLTV, 1);
1725 
1726 			ring->tx.cur_vlan_ctag = packet->vlan_ctag;
1727 		}
1728 
1729 		cur_index++;
1730 		rdata = XGBE_GET_DESC_DATA(ring, cur_index);
1731 		rdesc = rdata->rdesc;
1732 	}
1733 
1734 	/* Update buffer address (for TSO this is the header) */
1735 	rdesc->desc0 =  cpu_to_le32(lower_32_bits(rdata->skb_dma));
1736 	rdesc->desc1 =  cpu_to_le32(upper_32_bits(rdata->skb_dma));
1737 
1738 	/* Update the buffer length */
1739 	XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L,
1740 			  rdata->skb_dma_len);
1741 
1742 	/* VLAN tag insertion check */
1743 	if (vlan)
1744 		XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, VTIR,
1745 				  TX_NORMAL_DESC2_VLAN_INSERT);
1746 
1747 	/* Timestamp enablement check */
1748 	if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP))
1749 		XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, TTSE, 1);
1750 
1751 	/* Mark it as First Descriptor */
1752 	XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FD, 1);
1753 
1754 	/* Mark it as a NORMAL descriptor */
1755 	XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0);
1756 
1757 	/* Set OWN bit if not the first descriptor */
1758 	if (cur_index != start_index)
1759 		XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
1760 
1761 	if (tso) {
1762 		/* Enable TSO */
1763 		XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TSE, 1);
1764 		XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPPL,
1765 				  packet->tcp_payload_len);
1766 		XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPHDRLEN,
1767 				  packet->tcp_header_len / 4);
1768 
1769 		pdata->ext_stats.tx_tso_packets++;
1770 	} else {
1771 		/* Enable CRC and Pad Insertion */
1772 		XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CPC, 0);
1773 
1774 		/* Enable HW CSUM */
1775 		if (csum)
1776 			XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3,
1777 					  CIC, 0x3);
1778 
1779 		/* Set the total length to be transmitted */
1780 		XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FL,
1781 				  packet->length);
1782 	}
1783 
1784 	for (i = cur_index - start_index + 1; i < packet->rdesc_count; i++) {
1785 		cur_index++;
1786 		rdata = XGBE_GET_DESC_DATA(ring, cur_index);
1787 		rdesc = rdata->rdesc;
1788 
1789 		/* Update buffer address */
1790 		rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma));
1791 		rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma));
1792 
1793 		/* Update the buffer length */
1794 		XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L,
1795 				  rdata->skb_dma_len);
1796 
1797 		/* Set OWN bit */
1798 		XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
1799 
1800 		/* Mark it as NORMAL descriptor */
1801 		XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0);
1802 
1803 		/* Enable HW CSUM */
1804 		if (csum)
1805 			XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3,
1806 					  CIC, 0x3);
1807 	}
1808 
1809 	/* Set LAST bit for the last descriptor */
1810 	XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD, 1);
1811 
1812 	/* Set IC bit based on Tx coalescing settings */
1813 	if (tx_set_ic)
1814 		XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 1);
1815 
1816 	/* Save the Tx info to report back during cleanup */
1817 	rdata->tx.packets = packet->tx_packets;
1818 	rdata->tx.bytes = packet->tx_bytes;
1819 
1820 	/* In case the Tx DMA engine is running, make sure everything
1821 	 * is written to the descriptor(s) before setting the OWN bit
1822 	 * for the first descriptor
1823 	 */
1824 	dma_wmb();
1825 
1826 	/* Set OWN bit for the first descriptor */
1827 	rdata = XGBE_GET_DESC_DATA(ring, start_index);
1828 	rdesc = rdata->rdesc;
1829 	XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
1830 
1831 	if (netif_msg_tx_queued(pdata))
1832 		xgbe_dump_tx_desc(pdata, ring, start_index,
1833 				  packet->rdesc_count, 1);
1834 
1835 	/* Make sure ownership is written to the descriptor */
1836 	smp_wmb();
1837 
1838 	ring->cur = cur_index + 1;
1839 	if (!packet->skb->xmit_more ||
1840 	    netif_xmit_stopped(netdev_get_tx_queue(pdata->netdev,
1841 						   channel->queue_index)))
1842 		xgbe_tx_start_xmit(channel, ring);
1843 	else
1844 		ring->tx.xmit_more = 1;
1845 
1846 	DBGPR("  %s: descriptors %u to %u written\n",
1847 	      channel->name, start_index & (ring->rdesc_count - 1),
1848 	      (ring->cur - 1) & (ring->rdesc_count - 1));
1849 
1850 	DBGPR("<--xgbe_dev_xmit\n");
1851 }
1852 
1853 static int xgbe_dev_read(struct xgbe_channel *channel)
1854 {
1855 	struct xgbe_prv_data *pdata = channel->pdata;
1856 	struct xgbe_ring *ring = channel->rx_ring;
1857 	struct xgbe_ring_data *rdata;
1858 	struct xgbe_ring_desc *rdesc;
1859 	struct xgbe_packet_data *packet = &ring->packet_data;
1860 	struct net_device *netdev = pdata->netdev;
1861 	unsigned int err, etlt, l34t;
1862 
1863 	DBGPR("-->xgbe_dev_read: cur = %d\n", ring->cur);
1864 
1865 	rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
1866 	rdesc = rdata->rdesc;
1867 
1868 	/* Check for data availability */
1869 	if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN))
1870 		return 1;
1871 
1872 	/* Make sure descriptor fields are read after reading the OWN bit */
1873 	dma_rmb();
1874 
1875 	if (netif_msg_rx_status(pdata))
1876 		xgbe_dump_rx_desc(pdata, ring, ring->cur);
1877 
1878 	if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CTXT)) {
1879 		/* Timestamp Context Descriptor */
1880 		xgbe_get_rx_tstamp(packet, rdesc);
1881 
1882 		XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1883 			       CONTEXT, 1);
1884 		XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1885 			       CONTEXT_NEXT, 0);
1886 		return 0;
1887 	}
1888 
1889 	/* Normal Descriptor, be sure Context Descriptor bit is off */
1890 	XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, CONTEXT, 0);
1891 
1892 	/* Indicate if a Context Descriptor is next */
1893 	if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CDA))
1894 		XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1895 			       CONTEXT_NEXT, 1);
1896 
1897 	/* Get the header length */
1898 	if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, FD)) {
1899 		XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1900 			       FIRST, 1);
1901 		rdata->rx.hdr_len = XGMAC_GET_BITS_LE(rdesc->desc2,
1902 						      RX_NORMAL_DESC2, HL);
1903 		if (rdata->rx.hdr_len)
1904 			pdata->ext_stats.rx_split_header_packets++;
1905 	} else {
1906 		XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1907 			       FIRST, 0);
1908 	}
1909 
1910 	/* Get the RSS hash */
1911 	if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, RSV)) {
1912 		XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1913 			       RSS_HASH, 1);
1914 
1915 		packet->rss_hash = le32_to_cpu(rdesc->desc1);
1916 
1917 		l34t = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, L34T);
1918 		switch (l34t) {
1919 		case RX_DESC3_L34T_IPV4_TCP:
1920 		case RX_DESC3_L34T_IPV4_UDP:
1921 		case RX_DESC3_L34T_IPV6_TCP:
1922 		case RX_DESC3_L34T_IPV6_UDP:
1923 			packet->rss_hash_type = PKT_HASH_TYPE_L4;
1924 			break;
1925 		default:
1926 			packet->rss_hash_type = PKT_HASH_TYPE_L3;
1927 		}
1928 	}
1929 
1930 	/* Not all the data has been transferred for this packet */
1931 	if (!XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, LD))
1932 		return 0;
1933 
1934 	/* This is the last of the data for this packet */
1935 	XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1936 		       LAST, 1);
1937 
1938 	/* Get the packet length */
1939 	rdata->rx.len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL);
1940 
1941 	/* Set checksum done indicator as appropriate */
1942 	if (netdev->features & NETIF_F_RXCSUM)
1943 		XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1944 			       CSUM_DONE, 1);
1945 
1946 	/* Check for errors (only valid in last descriptor) */
1947 	err = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ES);
1948 	etlt = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ETLT);
1949 	netif_dbg(pdata, rx_status, netdev, "err=%u, etlt=%#x\n", err, etlt);
1950 
1951 	if (!err || !etlt) {
1952 		/* No error if err is 0 or etlt is 0 */
1953 		if ((etlt == 0x09) &&
1954 		    (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
1955 			XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1956 				       VLAN_CTAG, 1);
1957 			packet->vlan_ctag = XGMAC_GET_BITS_LE(rdesc->desc0,
1958 							      RX_NORMAL_DESC0,
1959 							      OVT);
1960 			netif_dbg(pdata, rx_status, netdev, "vlan-ctag=%#06x\n",
1961 				  packet->vlan_ctag);
1962 		}
1963 	} else {
1964 		if ((etlt == 0x05) || (etlt == 0x06))
1965 			XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1966 				       CSUM_DONE, 0);
1967 		else
1968 			XGMAC_SET_BITS(packet->errors, RX_PACKET_ERRORS,
1969 				       FRAME, 1);
1970 	}
1971 
1972 	DBGPR("<--xgbe_dev_read: %s - descriptor=%u (cur=%d)\n", channel->name,
1973 	      ring->cur & (ring->rdesc_count - 1), ring->cur);
1974 
1975 	return 0;
1976 }
1977 
1978 static int xgbe_is_context_desc(struct xgbe_ring_desc *rdesc)
1979 {
1980 	/* Rx and Tx share CTXT bit, so check TDES3.CTXT bit */
1981 	return XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT);
1982 }
1983 
1984 static int xgbe_is_last_desc(struct xgbe_ring_desc *rdesc)
1985 {
1986 	/* Rx and Tx share LD bit, so check TDES3.LD bit */
1987 	return XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD);
1988 }
1989 
1990 static int xgbe_enable_int(struct xgbe_channel *channel,
1991 			   enum xgbe_int int_id)
1992 {
1993 	unsigned int dma_ch_ier;
1994 
1995 	dma_ch_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER);
1996 
1997 	switch (int_id) {
1998 	case XGMAC_INT_DMA_CH_SR_TI:
1999 		XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1);
2000 		break;
2001 	case XGMAC_INT_DMA_CH_SR_TPS:
2002 		XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TXSE, 1);
2003 		break;
2004 	case XGMAC_INT_DMA_CH_SR_TBU:
2005 		XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TBUE, 1);
2006 		break;
2007 	case XGMAC_INT_DMA_CH_SR_RI:
2008 		XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1);
2009 		break;
2010 	case XGMAC_INT_DMA_CH_SR_RBU:
2011 		XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 1);
2012 		break;
2013 	case XGMAC_INT_DMA_CH_SR_RPS:
2014 		XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RSE, 1);
2015 		break;
2016 	case XGMAC_INT_DMA_CH_SR_TI_RI:
2017 		XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1);
2018 		XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1);
2019 		break;
2020 	case XGMAC_INT_DMA_CH_SR_FBE:
2021 		XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 1);
2022 		break;
2023 	case XGMAC_INT_DMA_ALL:
2024 		dma_ch_ier |= channel->saved_ier;
2025 		break;
2026 	default:
2027 		return -1;
2028 	}
2029 
2030 	XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier);
2031 
2032 	return 0;
2033 }
2034 
2035 static int xgbe_disable_int(struct xgbe_channel *channel,
2036 			    enum xgbe_int int_id)
2037 {
2038 	unsigned int dma_ch_ier;
2039 
2040 	dma_ch_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER);
2041 
2042 	switch (int_id) {
2043 	case XGMAC_INT_DMA_CH_SR_TI:
2044 		XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 0);
2045 		break;
2046 	case XGMAC_INT_DMA_CH_SR_TPS:
2047 		XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TXSE, 0);
2048 		break;
2049 	case XGMAC_INT_DMA_CH_SR_TBU:
2050 		XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TBUE, 0);
2051 		break;
2052 	case XGMAC_INT_DMA_CH_SR_RI:
2053 		XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 0);
2054 		break;
2055 	case XGMAC_INT_DMA_CH_SR_RBU:
2056 		XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 0);
2057 		break;
2058 	case XGMAC_INT_DMA_CH_SR_RPS:
2059 		XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RSE, 0);
2060 		break;
2061 	case XGMAC_INT_DMA_CH_SR_TI_RI:
2062 		XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 0);
2063 		XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 0);
2064 		break;
2065 	case XGMAC_INT_DMA_CH_SR_FBE:
2066 		XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 0);
2067 		break;
2068 	case XGMAC_INT_DMA_ALL:
2069 		channel->saved_ier = dma_ch_ier & XGBE_DMA_INTERRUPT_MASK;
2070 		dma_ch_ier &= ~XGBE_DMA_INTERRUPT_MASK;
2071 		break;
2072 	default:
2073 		return -1;
2074 	}
2075 
2076 	XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier);
2077 
2078 	return 0;
2079 }
2080 
2081 static int __xgbe_exit(struct xgbe_prv_data *pdata)
2082 {
2083 	unsigned int count = 2000;
2084 
2085 	DBGPR("-->xgbe_exit\n");
2086 
2087 	/* Issue a software reset */
2088 	XGMAC_IOWRITE_BITS(pdata, DMA_MR, SWR, 1);
2089 	usleep_range(10, 15);
2090 
2091 	/* Poll Until Poll Condition */
2092 	while (--count && XGMAC_IOREAD_BITS(pdata, DMA_MR, SWR))
2093 		usleep_range(500, 600);
2094 
2095 	if (!count)
2096 		return -EBUSY;
2097 
2098 	DBGPR("<--xgbe_exit\n");
2099 
2100 	return 0;
2101 }
2102 
2103 static int xgbe_exit(struct xgbe_prv_data *pdata)
2104 {
2105 	int ret;
2106 
2107 	/* To guard against possible incorrectly generated interrupts,
2108 	 * issue the software reset twice.
2109 	 */
2110 	ret = __xgbe_exit(pdata);
2111 	if (ret)
2112 		return ret;
2113 
2114 	return __xgbe_exit(pdata);
2115 }
2116 
2117 static int xgbe_flush_tx_queues(struct xgbe_prv_data *pdata)
2118 {
2119 	unsigned int i, count;
2120 
2121 	if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) < 0x21)
2122 		return 0;
2123 
2124 	for (i = 0; i < pdata->tx_q_count; i++)
2125 		XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, FTQ, 1);
2126 
2127 	/* Poll Until Poll Condition */
2128 	for (i = 0; i < pdata->tx_q_count; i++) {
2129 		count = 2000;
2130 		while (--count && XGMAC_MTL_IOREAD_BITS(pdata, i,
2131 							MTL_Q_TQOMR, FTQ))
2132 			usleep_range(500, 600);
2133 
2134 		if (!count)
2135 			return -EBUSY;
2136 	}
2137 
2138 	return 0;
2139 }
2140 
2141 static void xgbe_config_dma_bus(struct xgbe_prv_data *pdata)
2142 {
2143 	/* Set enhanced addressing mode */
2144 	XGMAC_IOWRITE_BITS(pdata, DMA_SBMR, EAME, 1);
2145 
2146 	/* Set the System Bus mode */
2147 	XGMAC_IOWRITE_BITS(pdata, DMA_SBMR, UNDEF, 1);
2148 	XGMAC_IOWRITE_BITS(pdata, DMA_SBMR, BLEN_256, 1);
2149 }
2150 
2151 static void xgbe_config_dma_cache(struct xgbe_prv_data *pdata)
2152 {
2153 	unsigned int arcache, awcache;
2154 
2155 	arcache = 0;
2156 	XGMAC_SET_BITS(arcache, DMA_AXIARCR, DRC, pdata->arcache);
2157 	XGMAC_SET_BITS(arcache, DMA_AXIARCR, DRD, pdata->axdomain);
2158 	XGMAC_SET_BITS(arcache, DMA_AXIARCR, TEC, pdata->arcache);
2159 	XGMAC_SET_BITS(arcache, DMA_AXIARCR, TED, pdata->axdomain);
2160 	XGMAC_SET_BITS(arcache, DMA_AXIARCR, THC, pdata->arcache);
2161 	XGMAC_SET_BITS(arcache, DMA_AXIARCR, THD, pdata->axdomain);
2162 	XGMAC_IOWRITE(pdata, DMA_AXIARCR, arcache);
2163 
2164 	awcache = 0;
2165 	XGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWC, pdata->awcache);
2166 	XGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWD, pdata->axdomain);
2167 	XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPC, pdata->awcache);
2168 	XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPD, pdata->axdomain);
2169 	XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHC, pdata->awcache);
2170 	XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHD, pdata->axdomain);
2171 	XGMAC_SET_BITS(awcache, DMA_AXIAWCR, TDC, pdata->awcache);
2172 	XGMAC_SET_BITS(awcache, DMA_AXIAWCR, TDD, pdata->axdomain);
2173 	XGMAC_IOWRITE(pdata, DMA_AXIAWCR, awcache);
2174 }
2175 
2176 static void xgbe_config_mtl_mode(struct xgbe_prv_data *pdata)
2177 {
2178 	unsigned int i;
2179 
2180 	/* Set Tx to weighted round robin scheduling algorithm */
2181 	XGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_WRR);
2182 
2183 	/* Set Tx traffic classes to use WRR algorithm with equal weights */
2184 	for (i = 0; i < pdata->hw_feat.tc_cnt; i++) {
2185 		XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
2186 				       MTL_TSA_ETS);
2187 		XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW, 1);
2188 	}
2189 
2190 	/* Set Rx to strict priority algorithm */
2191 	XGMAC_IOWRITE_BITS(pdata, MTL_OMR, RAA, MTL_RAA_SP);
2192 }
2193 
2194 static void xgbe_queue_flow_control_threshold(struct xgbe_prv_data *pdata,
2195 					      unsigned int queue,
2196 					      unsigned int q_fifo_size)
2197 {
2198 	unsigned int frame_fifo_size;
2199 	unsigned int rfa, rfd;
2200 
2201 	frame_fifo_size = XGMAC_FLOW_CONTROL_ALIGN(xgbe_get_max_frame(pdata));
2202 
2203 	if (pdata->pfcq[queue] && (q_fifo_size > pdata->pfc_rfa)) {
2204 		/* PFC is active for this queue */
2205 		rfa = pdata->pfc_rfa;
2206 		rfd = rfa + frame_fifo_size;
2207 		if (rfd > XGMAC_FLOW_CONTROL_MAX)
2208 			rfd = XGMAC_FLOW_CONTROL_MAX;
2209 		if (rfa >= XGMAC_FLOW_CONTROL_MAX)
2210 			rfa = XGMAC_FLOW_CONTROL_MAX - XGMAC_FLOW_CONTROL_UNIT;
2211 	} else {
2212 		/* This path deals with just maximum frame sizes which are
2213 		 * limited to a jumbo frame of 9,000 (plus headers, etc.)
2214 		 * so we can never exceed the maximum allowable RFA/RFD
2215 		 * values.
2216 		 */
2217 		if (q_fifo_size <= 2048) {
2218 			/* rx_rfd to zero to signal no flow control */
2219 			pdata->rx_rfa[queue] = 0;
2220 			pdata->rx_rfd[queue] = 0;
2221 			return;
2222 		}
2223 
2224 		if (q_fifo_size <= 4096) {
2225 			/* Between 2048 and 4096 */
2226 			pdata->rx_rfa[queue] = 0;	/* Full - 1024 bytes */
2227 			pdata->rx_rfd[queue] = 1;	/* Full - 1536 bytes */
2228 			return;
2229 		}
2230 
2231 		if (q_fifo_size <= frame_fifo_size) {
2232 			/* Between 4096 and max-frame */
2233 			pdata->rx_rfa[queue] = 2;	/* Full - 2048 bytes */
2234 			pdata->rx_rfd[queue] = 5;	/* Full - 3584 bytes */
2235 			return;
2236 		}
2237 
2238 		if (q_fifo_size <= (frame_fifo_size * 3)) {
2239 			/* Between max-frame and 3 max-frames,
2240 			 * trigger if we get just over a frame of data and
2241 			 * resume when we have just under half a frame left.
2242 			 */
2243 			rfa = q_fifo_size - frame_fifo_size;
2244 			rfd = rfa + (frame_fifo_size / 2);
2245 		} else {
2246 			/* Above 3 max-frames - trigger when just over
2247 			 * 2 frames of space available
2248 			 */
2249 			rfa = frame_fifo_size * 2;
2250 			rfa += XGMAC_FLOW_CONTROL_UNIT;
2251 			rfd = rfa + frame_fifo_size;
2252 		}
2253 	}
2254 
2255 	pdata->rx_rfa[queue] = XGMAC_FLOW_CONTROL_VALUE(rfa);
2256 	pdata->rx_rfd[queue] = XGMAC_FLOW_CONTROL_VALUE(rfd);
2257 }
2258 
2259 static void xgbe_calculate_flow_control_threshold(struct xgbe_prv_data *pdata,
2260 						  unsigned int *fifo)
2261 {
2262 	unsigned int q_fifo_size;
2263 	unsigned int i;
2264 
2265 	for (i = 0; i < pdata->rx_q_count; i++) {
2266 		q_fifo_size = (fifo[i] + 1) * XGMAC_FIFO_UNIT;
2267 
2268 		xgbe_queue_flow_control_threshold(pdata, i, q_fifo_size);
2269 	}
2270 }
2271 
2272 static void xgbe_config_flow_control_threshold(struct xgbe_prv_data *pdata)
2273 {
2274 	unsigned int i;
2275 
2276 	for (i = 0; i < pdata->rx_q_count; i++) {
2277 		XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFA,
2278 				       pdata->rx_rfa[i]);
2279 		XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFD,
2280 				       pdata->rx_rfd[i]);
2281 	}
2282 }
2283 
2284 static unsigned int xgbe_get_tx_fifo_size(struct xgbe_prv_data *pdata)
2285 {
2286 	/* The configured value may not be the actual amount of fifo RAM */
2287 	return min_t(unsigned int, pdata->tx_max_fifo_size,
2288 		     pdata->hw_feat.tx_fifo_size);
2289 }
2290 
2291 static unsigned int xgbe_get_rx_fifo_size(struct xgbe_prv_data *pdata)
2292 {
2293 	/* The configured value may not be the actual amount of fifo RAM */
2294 	return min_t(unsigned int, pdata->rx_max_fifo_size,
2295 		     pdata->hw_feat.rx_fifo_size);
2296 }
2297 
2298 static void xgbe_calculate_equal_fifo(unsigned int fifo_size,
2299 				      unsigned int queue_count,
2300 				      unsigned int *fifo)
2301 {
2302 	unsigned int q_fifo_size;
2303 	unsigned int p_fifo;
2304 	unsigned int i;
2305 
2306 	q_fifo_size = fifo_size / queue_count;
2307 
2308 	/* Calculate the fifo setting by dividing the queue's fifo size
2309 	 * by the fifo allocation increment (with 0 representing the
2310 	 * base allocation increment so decrement the result by 1).
2311 	 */
2312 	p_fifo = q_fifo_size / XGMAC_FIFO_UNIT;
2313 	if (p_fifo)
2314 		p_fifo--;
2315 
2316 	/* Distribute the fifo equally amongst the queues */
2317 	for (i = 0; i < queue_count; i++)
2318 		fifo[i] = p_fifo;
2319 }
2320 
2321 static unsigned int xgbe_set_nonprio_fifos(unsigned int fifo_size,
2322 					   unsigned int queue_count,
2323 					   unsigned int *fifo)
2324 {
2325 	unsigned int i;
2326 
2327 	BUILD_BUG_ON_NOT_POWER_OF_2(XGMAC_FIFO_MIN_ALLOC);
2328 
2329 	if (queue_count <= IEEE_8021QAZ_MAX_TCS)
2330 		return fifo_size;
2331 
2332 	/* Rx queues 9 and up are for specialized packets,
2333 	 * such as PTP or DCB control packets, etc. and
2334 	 * don't require a large fifo
2335 	 */
2336 	for (i = IEEE_8021QAZ_MAX_TCS; i < queue_count; i++) {
2337 		fifo[i] = (XGMAC_FIFO_MIN_ALLOC / XGMAC_FIFO_UNIT) - 1;
2338 		fifo_size -= XGMAC_FIFO_MIN_ALLOC;
2339 	}
2340 
2341 	return fifo_size;
2342 }
2343 
2344 static unsigned int xgbe_get_pfc_delay(struct xgbe_prv_data *pdata)
2345 {
2346 	unsigned int delay;
2347 
2348 	/* If a delay has been provided, use that */
2349 	if (pdata->pfc->delay)
2350 		return pdata->pfc->delay / 8;
2351 
2352 	/* Allow for two maximum size frames */
2353 	delay = xgbe_get_max_frame(pdata);
2354 	delay += XGMAC_ETH_PREAMBLE;
2355 	delay *= 2;
2356 
2357 	/* Allow for PFC frame */
2358 	delay += XGMAC_PFC_DATA_LEN;
2359 	delay += ETH_HLEN + ETH_FCS_LEN;
2360 	delay += XGMAC_ETH_PREAMBLE;
2361 
2362 	/* Allow for miscellaneous delays (LPI exit, cable, etc.) */
2363 	delay += XGMAC_PFC_DELAYS;
2364 
2365 	return delay;
2366 }
2367 
2368 static unsigned int xgbe_get_pfc_queues(struct xgbe_prv_data *pdata)
2369 {
2370 	unsigned int count, prio_queues;
2371 	unsigned int i;
2372 
2373 	if (!pdata->pfc->pfc_en)
2374 		return 0;
2375 
2376 	count = 0;
2377 	prio_queues = XGMAC_PRIO_QUEUES(pdata->rx_q_count);
2378 	for (i = 0; i < prio_queues; i++) {
2379 		if (!xgbe_is_pfc_queue(pdata, i))
2380 			continue;
2381 
2382 		pdata->pfcq[i] = 1;
2383 		count++;
2384 	}
2385 
2386 	return count;
2387 }
2388 
2389 static void xgbe_calculate_dcb_fifo(struct xgbe_prv_data *pdata,
2390 				    unsigned int fifo_size,
2391 				    unsigned int *fifo)
2392 {
2393 	unsigned int q_fifo_size, rem_fifo, addn_fifo;
2394 	unsigned int prio_queues;
2395 	unsigned int pfc_count;
2396 	unsigned int i;
2397 
2398 	q_fifo_size = XGMAC_FIFO_ALIGN(xgbe_get_max_frame(pdata));
2399 	prio_queues = XGMAC_PRIO_QUEUES(pdata->rx_q_count);
2400 	pfc_count = xgbe_get_pfc_queues(pdata);
2401 
2402 	if (!pfc_count || ((q_fifo_size * prio_queues) > fifo_size)) {
2403 		/* No traffic classes with PFC enabled or can't do lossless */
2404 		xgbe_calculate_equal_fifo(fifo_size, prio_queues, fifo);
2405 		return;
2406 	}
2407 
2408 	/* Calculate how much fifo we have to play with */
2409 	rem_fifo = fifo_size - (q_fifo_size * prio_queues);
2410 
2411 	/* Calculate how much more than base fifo PFC needs, which also
2412 	 * becomes the threshold activation point (RFA)
2413 	 */
2414 	pdata->pfc_rfa = xgbe_get_pfc_delay(pdata);
2415 	pdata->pfc_rfa = XGMAC_FLOW_CONTROL_ALIGN(pdata->pfc_rfa);
2416 
2417 	if (pdata->pfc_rfa > q_fifo_size) {
2418 		addn_fifo = pdata->pfc_rfa - q_fifo_size;
2419 		addn_fifo = XGMAC_FIFO_ALIGN(addn_fifo);
2420 	} else {
2421 		addn_fifo = 0;
2422 	}
2423 
2424 	/* Calculate DCB fifo settings:
2425 	 *   - distribute remaining fifo between the VLAN priority
2426 	 *     queues based on traffic class PFC enablement and overall
2427 	 *     priority (0 is lowest priority, so start at highest)
2428 	 */
2429 	i = prio_queues;
2430 	while (i > 0) {
2431 		i--;
2432 
2433 		fifo[i] = (q_fifo_size / XGMAC_FIFO_UNIT) - 1;
2434 
2435 		if (!pdata->pfcq[i] || !addn_fifo)
2436 			continue;
2437 
2438 		if (addn_fifo > rem_fifo) {
2439 			netdev_warn(pdata->netdev,
2440 				    "RXq%u cannot set needed fifo size\n", i);
2441 			if (!rem_fifo)
2442 				continue;
2443 
2444 			addn_fifo = rem_fifo;
2445 		}
2446 
2447 		fifo[i] += (addn_fifo / XGMAC_FIFO_UNIT);
2448 		rem_fifo -= addn_fifo;
2449 	}
2450 
2451 	if (rem_fifo) {
2452 		unsigned int inc_fifo = rem_fifo / prio_queues;
2453 
2454 		/* Distribute remaining fifo across queues */
2455 		for (i = 0; i < prio_queues; i++)
2456 			fifo[i] += (inc_fifo / XGMAC_FIFO_UNIT);
2457 	}
2458 }
2459 
2460 static void xgbe_config_tx_fifo_size(struct xgbe_prv_data *pdata)
2461 {
2462 	unsigned int fifo_size;
2463 	unsigned int fifo[XGBE_MAX_QUEUES];
2464 	unsigned int i;
2465 
2466 	fifo_size = xgbe_get_tx_fifo_size(pdata);
2467 
2468 	xgbe_calculate_equal_fifo(fifo_size, pdata->tx_q_count, fifo);
2469 
2470 	for (i = 0; i < pdata->tx_q_count; i++)
2471 		XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, fifo[i]);
2472 
2473 	netif_info(pdata, drv, pdata->netdev,
2474 		   "%d Tx hardware queues, %d byte fifo per queue\n",
2475 		   pdata->tx_q_count, ((fifo[0] + 1) * XGMAC_FIFO_UNIT));
2476 }
2477 
2478 static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata)
2479 {
2480 	unsigned int fifo_size;
2481 	unsigned int fifo[XGBE_MAX_QUEUES];
2482 	unsigned int prio_queues;
2483 	unsigned int i;
2484 
2485 	/* Clear any DCB related fifo/queue information */
2486 	memset(pdata->pfcq, 0, sizeof(pdata->pfcq));
2487 	pdata->pfc_rfa = 0;
2488 
2489 	fifo_size = xgbe_get_rx_fifo_size(pdata);
2490 	prio_queues = XGMAC_PRIO_QUEUES(pdata->rx_q_count);
2491 
2492 	/* Assign a minimum fifo to the non-VLAN priority queues */
2493 	fifo_size = xgbe_set_nonprio_fifos(fifo_size, pdata->rx_q_count, fifo);
2494 
2495 	if (pdata->pfc && pdata->ets)
2496 		xgbe_calculate_dcb_fifo(pdata, fifo_size, fifo);
2497 	else
2498 		xgbe_calculate_equal_fifo(fifo_size, prio_queues, fifo);
2499 
2500 	for (i = 0; i < pdata->rx_q_count; i++)
2501 		XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, fifo[i]);
2502 
2503 	xgbe_calculate_flow_control_threshold(pdata, fifo);
2504 	xgbe_config_flow_control_threshold(pdata);
2505 
2506 	if (pdata->pfc && pdata->ets && pdata->pfc->pfc_en) {
2507 		netif_info(pdata, drv, pdata->netdev,
2508 			   "%u Rx hardware queues\n", pdata->rx_q_count);
2509 		for (i = 0; i < pdata->rx_q_count; i++)
2510 			netif_info(pdata, drv, pdata->netdev,
2511 				   "RxQ%u, %u byte fifo queue\n", i,
2512 				   ((fifo[i] + 1) * XGMAC_FIFO_UNIT));
2513 	} else {
2514 		netif_info(pdata, drv, pdata->netdev,
2515 			   "%u Rx hardware queues, %u byte fifo per queue\n",
2516 			   pdata->rx_q_count,
2517 			   ((fifo[0] + 1) * XGMAC_FIFO_UNIT));
2518 	}
2519 }
2520 
2521 static void xgbe_config_queue_mapping(struct xgbe_prv_data *pdata)
2522 {
2523 	unsigned int qptc, qptc_extra, queue;
2524 	unsigned int prio_queues;
2525 	unsigned int ppq, ppq_extra, prio;
2526 	unsigned int mask;
2527 	unsigned int i, j, reg, reg_val;
2528 
2529 	/* Map the MTL Tx Queues to Traffic Classes
2530 	 *   Note: Tx Queues >= Traffic Classes
2531 	 */
2532 	qptc = pdata->tx_q_count / pdata->hw_feat.tc_cnt;
2533 	qptc_extra = pdata->tx_q_count % pdata->hw_feat.tc_cnt;
2534 
2535 	for (i = 0, queue = 0; i < pdata->hw_feat.tc_cnt; i++) {
2536 		for (j = 0; j < qptc; j++) {
2537 			netif_dbg(pdata, drv, pdata->netdev,
2538 				  "TXq%u mapped to TC%u\n", queue, i);
2539 			XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
2540 					       Q2TCMAP, i);
2541 			pdata->q2tc_map[queue++] = i;
2542 		}
2543 
2544 		if (i < qptc_extra) {
2545 			netif_dbg(pdata, drv, pdata->netdev,
2546 				  "TXq%u mapped to TC%u\n", queue, i);
2547 			XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
2548 					       Q2TCMAP, i);
2549 			pdata->q2tc_map[queue++] = i;
2550 		}
2551 	}
2552 
2553 	/* Map the 8 VLAN priority values to available MTL Rx queues */
2554 	prio_queues = XGMAC_PRIO_QUEUES(pdata->rx_q_count);
2555 	ppq = IEEE_8021QAZ_MAX_TCS / prio_queues;
2556 	ppq_extra = IEEE_8021QAZ_MAX_TCS % prio_queues;
2557 
2558 	reg = MAC_RQC2R;
2559 	reg_val = 0;
2560 	for (i = 0, prio = 0; i < prio_queues;) {
2561 		mask = 0;
2562 		for (j = 0; j < ppq; j++) {
2563 			netif_dbg(pdata, drv, pdata->netdev,
2564 				  "PRIO%u mapped to RXq%u\n", prio, i);
2565 			mask |= (1 << prio);
2566 			pdata->prio2q_map[prio++] = i;
2567 		}
2568 
2569 		if (i < ppq_extra) {
2570 			netif_dbg(pdata, drv, pdata->netdev,
2571 				  "PRIO%u mapped to RXq%u\n", prio, i);
2572 			mask |= (1 << prio);
2573 			pdata->prio2q_map[prio++] = i;
2574 		}
2575 
2576 		reg_val |= (mask << ((i++ % MAC_RQC2_Q_PER_REG) << 3));
2577 
2578 		if ((i % MAC_RQC2_Q_PER_REG) && (i != prio_queues))
2579 			continue;
2580 
2581 		XGMAC_IOWRITE(pdata, reg, reg_val);
2582 		reg += MAC_RQC2_INC;
2583 		reg_val = 0;
2584 	}
2585 
2586 	/* Select dynamic mapping of MTL Rx queue to DMA Rx channel */
2587 	reg = MTL_RQDCM0R;
2588 	reg_val = 0;
2589 	for (i = 0; i < pdata->rx_q_count;) {
2590 		reg_val |= (0x80 << ((i++ % MTL_RQDCM_Q_PER_REG) << 3));
2591 
2592 		if ((i % MTL_RQDCM_Q_PER_REG) && (i != pdata->rx_q_count))
2593 			continue;
2594 
2595 		XGMAC_IOWRITE(pdata, reg, reg_val);
2596 
2597 		reg += MTL_RQDCM_INC;
2598 		reg_val = 0;
2599 	}
2600 }
2601 
2602 static void xgbe_config_tc(struct xgbe_prv_data *pdata)
2603 {
2604 	unsigned int offset, queue, prio;
2605 	u8 i;
2606 
2607 	netdev_reset_tc(pdata->netdev);
2608 	if (!pdata->num_tcs)
2609 		return;
2610 
2611 	netdev_set_num_tc(pdata->netdev, pdata->num_tcs);
2612 
2613 	for (i = 0, queue = 0, offset = 0; i < pdata->num_tcs; i++) {
2614 		while ((queue < pdata->tx_q_count) &&
2615 		       (pdata->q2tc_map[queue] == i))
2616 			queue++;
2617 
2618 		netif_dbg(pdata, drv, pdata->netdev, "TC%u using TXq%u-%u\n",
2619 			  i, offset, queue - 1);
2620 		netdev_set_tc_queue(pdata->netdev, i, queue - offset, offset);
2621 		offset = queue;
2622 	}
2623 
2624 	if (!pdata->ets)
2625 		return;
2626 
2627 	for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++)
2628 		netdev_set_prio_tc_map(pdata->netdev, prio,
2629 				       pdata->ets->prio_tc[prio]);
2630 }
2631 
2632 static void xgbe_config_dcb_tc(struct xgbe_prv_data *pdata)
2633 {
2634 	struct ieee_ets *ets = pdata->ets;
2635 	unsigned int total_weight, min_weight, weight;
2636 	unsigned int mask, reg, reg_val;
2637 	unsigned int i, prio;
2638 
2639 	if (!ets)
2640 		return;
2641 
2642 	/* Set Tx to deficit weighted round robin scheduling algorithm (when
2643 	 * traffic class is using ETS algorithm)
2644 	 */
2645 	XGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_DWRR);
2646 
2647 	/* Set Traffic Class algorithms */
2648 	total_weight = pdata->netdev->mtu * pdata->hw_feat.tc_cnt;
2649 	min_weight = total_weight / 100;
2650 	if (!min_weight)
2651 		min_weight = 1;
2652 
2653 	for (i = 0; i < pdata->hw_feat.tc_cnt; i++) {
2654 		/* Map the priorities to the traffic class */
2655 		mask = 0;
2656 		for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) {
2657 			if (ets->prio_tc[prio] == i)
2658 				mask |= (1 << prio);
2659 		}
2660 		mask &= 0xff;
2661 
2662 		netif_dbg(pdata, drv, pdata->netdev, "TC%u PRIO mask=%#x\n",
2663 			  i, mask);
2664 		reg = MTL_TCPM0R + (MTL_TCPM_INC * (i / MTL_TCPM_TC_PER_REG));
2665 		reg_val = XGMAC_IOREAD(pdata, reg);
2666 
2667 		reg_val &= ~(0xff << ((i % MTL_TCPM_TC_PER_REG) << 3));
2668 		reg_val |= (mask << ((i % MTL_TCPM_TC_PER_REG) << 3));
2669 
2670 		XGMAC_IOWRITE(pdata, reg, reg_val);
2671 
2672 		/* Set the traffic class algorithm */
2673 		switch (ets->tc_tsa[i]) {
2674 		case IEEE_8021QAZ_TSA_STRICT:
2675 			netif_dbg(pdata, drv, pdata->netdev,
2676 				  "TC%u using SP\n", i);
2677 			XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
2678 					       MTL_TSA_SP);
2679 			break;
2680 		case IEEE_8021QAZ_TSA_ETS:
2681 			weight = total_weight * ets->tc_tx_bw[i] / 100;
2682 			weight = clamp(weight, min_weight, total_weight);
2683 
2684 			netif_dbg(pdata, drv, pdata->netdev,
2685 				  "TC%u using DWRR (weight %u)\n", i, weight);
2686 			XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
2687 					       MTL_TSA_ETS);
2688 			XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW,
2689 					       weight);
2690 			break;
2691 		}
2692 	}
2693 
2694 	xgbe_config_tc(pdata);
2695 }
2696 
2697 static void xgbe_config_dcb_pfc(struct xgbe_prv_data *pdata)
2698 {
2699 	if (!test_bit(XGBE_DOWN, &pdata->dev_state)) {
2700 		/* Just stop the Tx queues while Rx fifo is changed */
2701 		netif_tx_stop_all_queues(pdata->netdev);
2702 
2703 		/* Suspend Rx so that fifo's can be adjusted */
2704 		pdata->hw_if.disable_rx(pdata);
2705 	}
2706 
2707 	xgbe_config_rx_fifo_size(pdata);
2708 	xgbe_config_flow_control(pdata);
2709 
2710 	if (!test_bit(XGBE_DOWN, &pdata->dev_state)) {
2711 		/* Resume Rx */
2712 		pdata->hw_if.enable_rx(pdata);
2713 
2714 		/* Resume Tx queues */
2715 		netif_tx_start_all_queues(pdata->netdev);
2716 	}
2717 }
2718 
2719 static void xgbe_config_mac_address(struct xgbe_prv_data *pdata)
2720 {
2721 	xgbe_set_mac_address(pdata, pdata->netdev->dev_addr);
2722 
2723 	/* Filtering is done using perfect filtering and hash filtering */
2724 	if (pdata->hw_feat.hash_table_size) {
2725 		XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 1);
2726 		XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 1);
2727 		XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HMC, 1);
2728 	}
2729 }
2730 
2731 static void xgbe_config_jumbo_enable(struct xgbe_prv_data *pdata)
2732 {
2733 	unsigned int val;
2734 
2735 	val = (pdata->netdev->mtu > XGMAC_STD_PACKET_MTU) ? 1 : 0;
2736 
2737 	XGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val);
2738 }
2739 
2740 static void xgbe_config_mac_speed(struct xgbe_prv_data *pdata)
2741 {
2742 	xgbe_set_speed(pdata, pdata->phy_speed);
2743 }
2744 
2745 static void xgbe_config_checksum_offload(struct xgbe_prv_data *pdata)
2746 {
2747 	if (pdata->netdev->features & NETIF_F_RXCSUM)
2748 		xgbe_enable_rx_csum(pdata);
2749 	else
2750 		xgbe_disable_rx_csum(pdata);
2751 }
2752 
2753 static void xgbe_config_vlan_support(struct xgbe_prv_data *pdata)
2754 {
2755 	/* Indicate that VLAN Tx CTAGs come from context descriptors */
2756 	XGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, CSVL, 0);
2757 	XGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, VLTI, 1);
2758 
2759 	/* Set the current VLAN Hash Table register value */
2760 	xgbe_update_vlan_hash_table(pdata);
2761 
2762 	if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
2763 		xgbe_enable_rx_vlan_filtering(pdata);
2764 	else
2765 		xgbe_disable_rx_vlan_filtering(pdata);
2766 
2767 	if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
2768 		xgbe_enable_rx_vlan_stripping(pdata);
2769 	else
2770 		xgbe_disable_rx_vlan_stripping(pdata);
2771 }
2772 
2773 static u64 xgbe_mmc_read(struct xgbe_prv_data *pdata, unsigned int reg_lo)
2774 {
2775 	bool read_hi;
2776 	u64 val;
2777 
2778 	if (pdata->vdata->mmc_64bit) {
2779 		switch (reg_lo) {
2780 		/* These registers are always 32 bit */
2781 		case MMC_RXRUNTERROR:
2782 		case MMC_RXJABBERERROR:
2783 		case MMC_RXUNDERSIZE_G:
2784 		case MMC_RXOVERSIZE_G:
2785 		case MMC_RXWATCHDOGERROR:
2786 			read_hi = false;
2787 			break;
2788 
2789 		default:
2790 			read_hi = true;
2791 		}
2792 	} else {
2793 		switch (reg_lo) {
2794 		/* These registers are always 64 bit */
2795 		case MMC_TXOCTETCOUNT_GB_LO:
2796 		case MMC_TXOCTETCOUNT_G_LO:
2797 		case MMC_RXOCTETCOUNT_GB_LO:
2798 		case MMC_RXOCTETCOUNT_G_LO:
2799 			read_hi = true;
2800 			break;
2801 
2802 		default:
2803 			read_hi = false;
2804 		}
2805 	}
2806 
2807 	val = XGMAC_IOREAD(pdata, reg_lo);
2808 
2809 	if (read_hi)
2810 		val |= ((u64)XGMAC_IOREAD(pdata, reg_lo + 4) << 32);
2811 
2812 	return val;
2813 }
2814 
2815 static void xgbe_tx_mmc_int(struct xgbe_prv_data *pdata)
2816 {
2817 	struct xgbe_mmc_stats *stats = &pdata->mmc_stats;
2818 	unsigned int mmc_isr = XGMAC_IOREAD(pdata, MMC_TISR);
2819 
2820 	if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_GB))
2821 		stats->txoctetcount_gb +=
2822 			xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO);
2823 
2824 	if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_GB))
2825 		stats->txframecount_gb +=
2826 			xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO);
2827 
2828 	if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_G))
2829 		stats->txbroadcastframes_g +=
2830 			xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO);
2831 
2832 	if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_G))
2833 		stats->txmulticastframes_g +=
2834 			xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO);
2835 
2836 	if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX64OCTETS_GB))
2837 		stats->tx64octets_gb +=
2838 			xgbe_mmc_read(pdata, MMC_TX64OCTETS_GB_LO);
2839 
2840 	if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX65TO127OCTETS_GB))
2841 		stats->tx65to127octets_gb +=
2842 			xgbe_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO);
2843 
2844 	if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX128TO255OCTETS_GB))
2845 		stats->tx128to255octets_gb +=
2846 			xgbe_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO);
2847 
2848 	if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX256TO511OCTETS_GB))
2849 		stats->tx256to511octets_gb +=
2850 			xgbe_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO);
2851 
2852 	if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX512TO1023OCTETS_GB))
2853 		stats->tx512to1023octets_gb +=
2854 			xgbe_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO);
2855 
2856 	if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX1024TOMAXOCTETS_GB))
2857 		stats->tx1024tomaxoctets_gb +=
2858 			xgbe_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
2859 
2860 	if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNICASTFRAMES_GB))
2861 		stats->txunicastframes_gb +=
2862 			xgbe_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO);
2863 
2864 	if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_GB))
2865 		stats->txmulticastframes_gb +=
2866 			xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
2867 
2868 	if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_GB))
2869 		stats->txbroadcastframes_g +=
2870 			xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
2871 
2872 	if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNDERFLOWERROR))
2873 		stats->txunderflowerror +=
2874 			xgbe_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO);
2875 
2876 	if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_G))
2877 		stats->txoctetcount_g +=
2878 			xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO);
2879 
2880 	if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_G))
2881 		stats->txframecount_g +=
2882 			xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO);
2883 
2884 	if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXPAUSEFRAMES))
2885 		stats->txpauseframes +=
2886 			xgbe_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO);
2887 
2888 	if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXVLANFRAMES_G))
2889 		stats->txvlanframes_g +=
2890 			xgbe_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO);
2891 }
2892 
2893 static void xgbe_rx_mmc_int(struct xgbe_prv_data *pdata)
2894 {
2895 	struct xgbe_mmc_stats *stats = &pdata->mmc_stats;
2896 	unsigned int mmc_isr = XGMAC_IOREAD(pdata, MMC_RISR);
2897 
2898 	if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFRAMECOUNT_GB))
2899 		stats->rxframecount_gb +=
2900 			xgbe_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO);
2901 
2902 	if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_GB))
2903 		stats->rxoctetcount_gb +=
2904 			xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO);
2905 
2906 	if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_G))
2907 		stats->rxoctetcount_g +=
2908 			xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO);
2909 
2910 	if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXBROADCASTFRAMES_G))
2911 		stats->rxbroadcastframes_g +=
2912 			xgbe_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO);
2913 
2914 	if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXMULTICASTFRAMES_G))
2915 		stats->rxmulticastframes_g +=
2916 			xgbe_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO);
2917 
2918 	if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXCRCERROR))
2919 		stats->rxcrcerror +=
2920 			xgbe_mmc_read(pdata, MMC_RXCRCERROR_LO);
2921 
2922 	if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXRUNTERROR))
2923 		stats->rxrunterror +=
2924 			xgbe_mmc_read(pdata, MMC_RXRUNTERROR);
2925 
2926 	if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXJABBERERROR))
2927 		stats->rxjabbererror +=
2928 			xgbe_mmc_read(pdata, MMC_RXJABBERERROR);
2929 
2930 	if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNDERSIZE_G))
2931 		stats->rxundersize_g +=
2932 			xgbe_mmc_read(pdata, MMC_RXUNDERSIZE_G);
2933 
2934 	if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOVERSIZE_G))
2935 		stats->rxoversize_g +=
2936 			xgbe_mmc_read(pdata, MMC_RXOVERSIZE_G);
2937 
2938 	if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX64OCTETS_GB))
2939 		stats->rx64octets_gb +=
2940 			xgbe_mmc_read(pdata, MMC_RX64OCTETS_GB_LO);
2941 
2942 	if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX65TO127OCTETS_GB))
2943 		stats->rx65to127octets_gb +=
2944 			xgbe_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO);
2945 
2946 	if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX128TO255OCTETS_GB))
2947 		stats->rx128to255octets_gb +=
2948 			xgbe_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO);
2949 
2950 	if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX256TO511OCTETS_GB))
2951 		stats->rx256to511octets_gb +=
2952 			xgbe_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO);
2953 
2954 	if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX512TO1023OCTETS_GB))
2955 		stats->rx512to1023octets_gb +=
2956 			xgbe_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO);
2957 
2958 	if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX1024TOMAXOCTETS_GB))
2959 		stats->rx1024tomaxoctets_gb +=
2960 			xgbe_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
2961 
2962 	if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNICASTFRAMES_G))
2963 		stats->rxunicastframes_g +=
2964 			xgbe_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO);
2965 
2966 	if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXLENGTHERROR))
2967 		stats->rxlengtherror +=
2968 			xgbe_mmc_read(pdata, MMC_RXLENGTHERROR_LO);
2969 
2970 	if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOUTOFRANGETYPE))
2971 		stats->rxoutofrangetype +=
2972 			xgbe_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO);
2973 
2974 	if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXPAUSEFRAMES))
2975 		stats->rxpauseframes +=
2976 			xgbe_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO);
2977 
2978 	if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFIFOOVERFLOW))
2979 		stats->rxfifooverflow +=
2980 			xgbe_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO);
2981 
2982 	if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXVLANFRAMES_GB))
2983 		stats->rxvlanframes_gb +=
2984 			xgbe_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO);
2985 
2986 	if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXWATCHDOGERROR))
2987 		stats->rxwatchdogerror +=
2988 			xgbe_mmc_read(pdata, MMC_RXWATCHDOGERROR);
2989 }
2990 
2991 static void xgbe_read_mmc_stats(struct xgbe_prv_data *pdata)
2992 {
2993 	struct xgbe_mmc_stats *stats = &pdata->mmc_stats;
2994 
2995 	/* Freeze counters */
2996 	XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 1);
2997 
2998 	stats->txoctetcount_gb +=
2999 		xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO);
3000 
3001 	stats->txframecount_gb +=
3002 		xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO);
3003 
3004 	stats->txbroadcastframes_g +=
3005 		xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO);
3006 
3007 	stats->txmulticastframes_g +=
3008 		xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO);
3009 
3010 	stats->tx64octets_gb +=
3011 		xgbe_mmc_read(pdata, MMC_TX64OCTETS_GB_LO);
3012 
3013 	stats->tx65to127octets_gb +=
3014 		xgbe_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO);
3015 
3016 	stats->tx128to255octets_gb +=
3017 		xgbe_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO);
3018 
3019 	stats->tx256to511octets_gb +=
3020 		xgbe_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO);
3021 
3022 	stats->tx512to1023octets_gb +=
3023 		xgbe_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO);
3024 
3025 	stats->tx1024tomaxoctets_gb +=
3026 		xgbe_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
3027 
3028 	stats->txunicastframes_gb +=
3029 		xgbe_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO);
3030 
3031 	stats->txmulticastframes_gb +=
3032 		xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
3033 
3034 	stats->txbroadcastframes_g +=
3035 		xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
3036 
3037 	stats->txunderflowerror +=
3038 		xgbe_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO);
3039 
3040 	stats->txoctetcount_g +=
3041 		xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO);
3042 
3043 	stats->txframecount_g +=
3044 		xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO);
3045 
3046 	stats->txpauseframes +=
3047 		xgbe_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO);
3048 
3049 	stats->txvlanframes_g +=
3050 		xgbe_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO);
3051 
3052 	stats->rxframecount_gb +=
3053 		xgbe_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO);
3054 
3055 	stats->rxoctetcount_gb +=
3056 		xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO);
3057 
3058 	stats->rxoctetcount_g +=
3059 		xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO);
3060 
3061 	stats->rxbroadcastframes_g +=
3062 		xgbe_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO);
3063 
3064 	stats->rxmulticastframes_g +=
3065 		xgbe_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO);
3066 
3067 	stats->rxcrcerror +=
3068 		xgbe_mmc_read(pdata, MMC_RXCRCERROR_LO);
3069 
3070 	stats->rxrunterror +=
3071 		xgbe_mmc_read(pdata, MMC_RXRUNTERROR);
3072 
3073 	stats->rxjabbererror +=
3074 		xgbe_mmc_read(pdata, MMC_RXJABBERERROR);
3075 
3076 	stats->rxundersize_g +=
3077 		xgbe_mmc_read(pdata, MMC_RXUNDERSIZE_G);
3078 
3079 	stats->rxoversize_g +=
3080 		xgbe_mmc_read(pdata, MMC_RXOVERSIZE_G);
3081 
3082 	stats->rx64octets_gb +=
3083 		xgbe_mmc_read(pdata, MMC_RX64OCTETS_GB_LO);
3084 
3085 	stats->rx65to127octets_gb +=
3086 		xgbe_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO);
3087 
3088 	stats->rx128to255octets_gb +=
3089 		xgbe_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO);
3090 
3091 	stats->rx256to511octets_gb +=
3092 		xgbe_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO);
3093 
3094 	stats->rx512to1023octets_gb +=
3095 		xgbe_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO);
3096 
3097 	stats->rx1024tomaxoctets_gb +=
3098 		xgbe_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
3099 
3100 	stats->rxunicastframes_g +=
3101 		xgbe_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO);
3102 
3103 	stats->rxlengtherror +=
3104 		xgbe_mmc_read(pdata, MMC_RXLENGTHERROR_LO);
3105 
3106 	stats->rxoutofrangetype +=
3107 		xgbe_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO);
3108 
3109 	stats->rxpauseframes +=
3110 		xgbe_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO);
3111 
3112 	stats->rxfifooverflow +=
3113 		xgbe_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO);
3114 
3115 	stats->rxvlanframes_gb +=
3116 		xgbe_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO);
3117 
3118 	stats->rxwatchdogerror +=
3119 		xgbe_mmc_read(pdata, MMC_RXWATCHDOGERROR);
3120 
3121 	/* Un-freeze counters */
3122 	XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 0);
3123 }
3124 
3125 static void xgbe_config_mmc(struct xgbe_prv_data *pdata)
3126 {
3127 	/* Set counters to reset on read */
3128 	XGMAC_IOWRITE_BITS(pdata, MMC_CR, ROR, 1);
3129 
3130 	/* Reset the counters */
3131 	XGMAC_IOWRITE_BITS(pdata, MMC_CR, CR, 1);
3132 }
3133 
3134 static void xgbe_txq_prepare_tx_stop(struct xgbe_prv_data *pdata,
3135 				     unsigned int queue)
3136 {
3137 	unsigned int tx_status;
3138 	unsigned long tx_timeout;
3139 
3140 	/* The Tx engine cannot be stopped if it is actively processing
3141 	 * packets. Wait for the Tx queue to empty the Tx fifo.  Don't
3142 	 * wait forever though...
3143 	 */
3144 	tx_timeout = jiffies + (XGBE_DMA_STOP_TIMEOUT * HZ);
3145 	while (time_before(jiffies, tx_timeout)) {
3146 		tx_status = XGMAC_MTL_IOREAD(pdata, queue, MTL_Q_TQDR);
3147 		if ((XGMAC_GET_BITS(tx_status, MTL_Q_TQDR, TRCSTS) != 1) &&
3148 		    (XGMAC_GET_BITS(tx_status, MTL_Q_TQDR, TXQSTS) == 0))
3149 			break;
3150 
3151 		usleep_range(500, 1000);
3152 	}
3153 
3154 	if (!time_before(jiffies, tx_timeout))
3155 		netdev_info(pdata->netdev,
3156 			    "timed out waiting for Tx queue %u to empty\n",
3157 			    queue);
3158 }
3159 
3160 static void xgbe_prepare_tx_stop(struct xgbe_prv_data *pdata,
3161 				 unsigned int queue)
3162 {
3163 	unsigned int tx_dsr, tx_pos, tx_qidx;
3164 	unsigned int tx_status;
3165 	unsigned long tx_timeout;
3166 
3167 	if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) > 0x20)
3168 		return xgbe_txq_prepare_tx_stop(pdata, queue);
3169 
3170 	/* Calculate the status register to read and the position within */
3171 	if (queue < DMA_DSRX_FIRST_QUEUE) {
3172 		tx_dsr = DMA_DSR0;
3173 		tx_pos = (queue * DMA_DSR_Q_WIDTH) + DMA_DSR0_TPS_START;
3174 	} else {
3175 		tx_qidx = queue - DMA_DSRX_FIRST_QUEUE;
3176 
3177 		tx_dsr = DMA_DSR1 + ((tx_qidx / DMA_DSRX_QPR) * DMA_DSRX_INC);
3178 		tx_pos = ((tx_qidx % DMA_DSRX_QPR) * DMA_DSR_Q_WIDTH) +
3179 			 DMA_DSRX_TPS_START;
3180 	}
3181 
3182 	/* The Tx engine cannot be stopped if it is actively processing
3183 	 * descriptors. Wait for the Tx engine to enter the stopped or
3184 	 * suspended state.  Don't wait forever though...
3185 	 */
3186 	tx_timeout = jiffies + (XGBE_DMA_STOP_TIMEOUT * HZ);
3187 	while (time_before(jiffies, tx_timeout)) {
3188 		tx_status = XGMAC_IOREAD(pdata, tx_dsr);
3189 		tx_status = GET_BITS(tx_status, tx_pos, DMA_DSR_TPS_WIDTH);
3190 		if ((tx_status == DMA_TPS_STOPPED) ||
3191 		    (tx_status == DMA_TPS_SUSPENDED))
3192 			break;
3193 
3194 		usleep_range(500, 1000);
3195 	}
3196 
3197 	if (!time_before(jiffies, tx_timeout))
3198 		netdev_info(pdata->netdev,
3199 			    "timed out waiting for Tx DMA channel %u to stop\n",
3200 			    queue);
3201 }
3202 
3203 static void xgbe_enable_tx(struct xgbe_prv_data *pdata)
3204 {
3205 	struct xgbe_channel *channel;
3206 	unsigned int i;
3207 
3208 	/* Enable each Tx DMA channel */
3209 	channel = pdata->channel;
3210 	for (i = 0; i < pdata->channel_count; i++, channel++) {
3211 		if (!channel->tx_ring)
3212 			break;
3213 
3214 		XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 1);
3215 	}
3216 
3217 	/* Enable each Tx queue */
3218 	for (i = 0; i < pdata->tx_q_count; i++)
3219 		XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN,
3220 				       MTL_Q_ENABLED);
3221 
3222 	/* Enable MAC Tx */
3223 	XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1);
3224 }
3225 
3226 static void xgbe_disable_tx(struct xgbe_prv_data *pdata)
3227 {
3228 	struct xgbe_channel *channel;
3229 	unsigned int i;
3230 
3231 	/* Prepare for Tx DMA channel stop */
3232 	for (i = 0; i < pdata->tx_q_count; i++)
3233 		xgbe_prepare_tx_stop(pdata, i);
3234 
3235 	/* Disable MAC Tx */
3236 	XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0);
3237 
3238 	/* Disable each Tx queue */
3239 	for (i = 0; i < pdata->tx_q_count; i++)
3240 		XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN, 0);
3241 
3242 	/* Disable each Tx DMA channel */
3243 	channel = pdata->channel;
3244 	for (i = 0; i < pdata->channel_count; i++, channel++) {
3245 		if (!channel->tx_ring)
3246 			break;
3247 
3248 		XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 0);
3249 	}
3250 }
3251 
3252 static void xgbe_prepare_rx_stop(struct xgbe_prv_data *pdata,
3253 				 unsigned int queue)
3254 {
3255 	unsigned int rx_status;
3256 	unsigned long rx_timeout;
3257 
3258 	/* The Rx engine cannot be stopped if it is actively processing
3259 	 * packets. Wait for the Rx queue to empty the Rx fifo.  Don't
3260 	 * wait forever though...
3261 	 */
3262 	rx_timeout = jiffies + (XGBE_DMA_STOP_TIMEOUT * HZ);
3263 	while (time_before(jiffies, rx_timeout)) {
3264 		rx_status = XGMAC_MTL_IOREAD(pdata, queue, MTL_Q_RQDR);
3265 		if ((XGMAC_GET_BITS(rx_status, MTL_Q_RQDR, PRXQ) == 0) &&
3266 		    (XGMAC_GET_BITS(rx_status, MTL_Q_RQDR, RXQSTS) == 0))
3267 			break;
3268 
3269 		usleep_range(500, 1000);
3270 	}
3271 
3272 	if (!time_before(jiffies, rx_timeout))
3273 		netdev_info(pdata->netdev,
3274 			    "timed out waiting for Rx queue %u to empty\n",
3275 			    queue);
3276 }
3277 
3278 static void xgbe_enable_rx(struct xgbe_prv_data *pdata)
3279 {
3280 	struct xgbe_channel *channel;
3281 	unsigned int reg_val, i;
3282 
3283 	/* Enable each Rx DMA channel */
3284 	channel = pdata->channel;
3285 	for (i = 0; i < pdata->channel_count; i++, channel++) {
3286 		if (!channel->rx_ring)
3287 			break;
3288 
3289 		XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 1);
3290 	}
3291 
3292 	/* Enable each Rx queue */
3293 	reg_val = 0;
3294 	for (i = 0; i < pdata->rx_q_count; i++)
3295 		reg_val |= (0x02 << (i << 1));
3296 	XGMAC_IOWRITE(pdata, MAC_RQC0R, reg_val);
3297 
3298 	/* Enable MAC Rx */
3299 	XGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 1);
3300 	XGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 1);
3301 	XGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 1);
3302 	XGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 1);
3303 }
3304 
3305 static void xgbe_disable_rx(struct xgbe_prv_data *pdata)
3306 {
3307 	struct xgbe_channel *channel;
3308 	unsigned int i;
3309 
3310 	/* Disable MAC Rx */
3311 	XGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 0);
3312 	XGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 0);
3313 	XGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 0);
3314 	XGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 0);
3315 
3316 	/* Prepare for Rx DMA channel stop */
3317 	for (i = 0; i < pdata->rx_q_count; i++)
3318 		xgbe_prepare_rx_stop(pdata, i);
3319 
3320 	/* Disable each Rx queue */
3321 	XGMAC_IOWRITE(pdata, MAC_RQC0R, 0);
3322 
3323 	/* Disable each Rx DMA channel */
3324 	channel = pdata->channel;
3325 	for (i = 0; i < pdata->channel_count; i++, channel++) {
3326 		if (!channel->rx_ring)
3327 			break;
3328 
3329 		XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 0);
3330 	}
3331 }
3332 
3333 static void xgbe_powerup_tx(struct xgbe_prv_data *pdata)
3334 {
3335 	struct xgbe_channel *channel;
3336 	unsigned int i;
3337 
3338 	/* Enable each Tx DMA channel */
3339 	channel = pdata->channel;
3340 	for (i = 0; i < pdata->channel_count; i++, channel++) {
3341 		if (!channel->tx_ring)
3342 			break;
3343 
3344 		XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 1);
3345 	}
3346 
3347 	/* Enable MAC Tx */
3348 	XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1);
3349 }
3350 
3351 static void xgbe_powerdown_tx(struct xgbe_prv_data *pdata)
3352 {
3353 	struct xgbe_channel *channel;
3354 	unsigned int i;
3355 
3356 	/* Prepare for Tx DMA channel stop */
3357 	for (i = 0; i < pdata->tx_q_count; i++)
3358 		xgbe_prepare_tx_stop(pdata, i);
3359 
3360 	/* Disable MAC Tx */
3361 	XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0);
3362 
3363 	/* Disable each Tx DMA channel */
3364 	channel = pdata->channel;
3365 	for (i = 0; i < pdata->channel_count; i++, channel++) {
3366 		if (!channel->tx_ring)
3367 			break;
3368 
3369 		XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 0);
3370 	}
3371 }
3372 
3373 static void xgbe_powerup_rx(struct xgbe_prv_data *pdata)
3374 {
3375 	struct xgbe_channel *channel;
3376 	unsigned int i;
3377 
3378 	/* Enable each Rx DMA channel */
3379 	channel = pdata->channel;
3380 	for (i = 0; i < pdata->channel_count; i++, channel++) {
3381 		if (!channel->rx_ring)
3382 			break;
3383 
3384 		XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 1);
3385 	}
3386 }
3387 
3388 static void xgbe_powerdown_rx(struct xgbe_prv_data *pdata)
3389 {
3390 	struct xgbe_channel *channel;
3391 	unsigned int i;
3392 
3393 	/* Disable each Rx DMA channel */
3394 	channel = pdata->channel;
3395 	for (i = 0; i < pdata->channel_count; i++, channel++) {
3396 		if (!channel->rx_ring)
3397 			break;
3398 
3399 		XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 0);
3400 	}
3401 }
3402 
3403 static int xgbe_init(struct xgbe_prv_data *pdata)
3404 {
3405 	struct xgbe_desc_if *desc_if = &pdata->desc_if;
3406 	int ret;
3407 
3408 	DBGPR("-->xgbe_init\n");
3409 
3410 	/* Flush Tx queues */
3411 	ret = xgbe_flush_tx_queues(pdata);
3412 	if (ret) {
3413 		netdev_err(pdata->netdev, "error flushing TX queues\n");
3414 		return ret;
3415 	}
3416 
3417 	/*
3418 	 * Initialize DMA related features
3419 	 */
3420 	xgbe_config_dma_bus(pdata);
3421 	xgbe_config_dma_cache(pdata);
3422 	xgbe_config_osp_mode(pdata);
3423 	xgbe_config_pblx8(pdata);
3424 	xgbe_config_tx_pbl_val(pdata);
3425 	xgbe_config_rx_pbl_val(pdata);
3426 	xgbe_config_rx_coalesce(pdata);
3427 	xgbe_config_tx_coalesce(pdata);
3428 	xgbe_config_rx_buffer_size(pdata);
3429 	xgbe_config_tso_mode(pdata);
3430 	xgbe_config_sph_mode(pdata);
3431 	xgbe_config_rss(pdata);
3432 	desc_if->wrapper_tx_desc_init(pdata);
3433 	desc_if->wrapper_rx_desc_init(pdata);
3434 	xgbe_enable_dma_interrupts(pdata);
3435 
3436 	/*
3437 	 * Initialize MTL related features
3438 	 */
3439 	xgbe_config_mtl_mode(pdata);
3440 	xgbe_config_queue_mapping(pdata);
3441 	xgbe_config_tsf_mode(pdata, pdata->tx_sf_mode);
3442 	xgbe_config_rsf_mode(pdata, pdata->rx_sf_mode);
3443 	xgbe_config_tx_threshold(pdata, pdata->tx_threshold);
3444 	xgbe_config_rx_threshold(pdata, pdata->rx_threshold);
3445 	xgbe_config_tx_fifo_size(pdata);
3446 	xgbe_config_rx_fifo_size(pdata);
3447 	/*TODO: Error Packet and undersized good Packet forwarding enable
3448 		(FEP and FUP)
3449 	 */
3450 	xgbe_config_dcb_tc(pdata);
3451 	xgbe_enable_mtl_interrupts(pdata);
3452 
3453 	/*
3454 	 * Initialize MAC related features
3455 	 */
3456 	xgbe_config_mac_address(pdata);
3457 	xgbe_config_rx_mode(pdata);
3458 	xgbe_config_jumbo_enable(pdata);
3459 	xgbe_config_flow_control(pdata);
3460 	xgbe_config_mac_speed(pdata);
3461 	xgbe_config_checksum_offload(pdata);
3462 	xgbe_config_vlan_support(pdata);
3463 	xgbe_config_mmc(pdata);
3464 	xgbe_enable_mac_interrupts(pdata);
3465 
3466 	/*
3467 	 * Initialize ECC related features
3468 	 */
3469 	xgbe_enable_ecc_interrupts(pdata);
3470 
3471 	DBGPR("<--xgbe_init\n");
3472 
3473 	return 0;
3474 }
3475 
3476 void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
3477 {
3478 	DBGPR("-->xgbe_init_function_ptrs\n");
3479 
3480 	hw_if->tx_complete = xgbe_tx_complete;
3481 
3482 	hw_if->set_mac_address = xgbe_set_mac_address;
3483 	hw_if->config_rx_mode = xgbe_config_rx_mode;
3484 
3485 	hw_if->enable_rx_csum = xgbe_enable_rx_csum;
3486 	hw_if->disable_rx_csum = xgbe_disable_rx_csum;
3487 
3488 	hw_if->enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping;
3489 	hw_if->disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping;
3490 	hw_if->enable_rx_vlan_filtering = xgbe_enable_rx_vlan_filtering;
3491 	hw_if->disable_rx_vlan_filtering = xgbe_disable_rx_vlan_filtering;
3492 	hw_if->update_vlan_hash_table = xgbe_update_vlan_hash_table;
3493 
3494 	hw_if->read_mmd_regs = xgbe_read_mmd_regs;
3495 	hw_if->write_mmd_regs = xgbe_write_mmd_regs;
3496 
3497 	hw_if->set_speed = xgbe_set_speed;
3498 
3499 	hw_if->set_ext_mii_mode = xgbe_set_ext_mii_mode;
3500 	hw_if->read_ext_mii_regs = xgbe_read_ext_mii_regs;
3501 	hw_if->write_ext_mii_regs = xgbe_write_ext_mii_regs;
3502 
3503 	hw_if->set_gpio = xgbe_set_gpio;
3504 	hw_if->clr_gpio = xgbe_clr_gpio;
3505 
3506 	hw_if->enable_tx = xgbe_enable_tx;
3507 	hw_if->disable_tx = xgbe_disable_tx;
3508 	hw_if->enable_rx = xgbe_enable_rx;
3509 	hw_if->disable_rx = xgbe_disable_rx;
3510 
3511 	hw_if->powerup_tx = xgbe_powerup_tx;
3512 	hw_if->powerdown_tx = xgbe_powerdown_tx;
3513 	hw_if->powerup_rx = xgbe_powerup_rx;
3514 	hw_if->powerdown_rx = xgbe_powerdown_rx;
3515 
3516 	hw_if->dev_xmit = xgbe_dev_xmit;
3517 	hw_if->dev_read = xgbe_dev_read;
3518 	hw_if->enable_int = xgbe_enable_int;
3519 	hw_if->disable_int = xgbe_disable_int;
3520 	hw_if->init = xgbe_init;
3521 	hw_if->exit = xgbe_exit;
3522 
3523 	/* Descriptor related Sequences have to be initialized here */
3524 	hw_if->tx_desc_init = xgbe_tx_desc_init;
3525 	hw_if->rx_desc_init = xgbe_rx_desc_init;
3526 	hw_if->tx_desc_reset = xgbe_tx_desc_reset;
3527 	hw_if->rx_desc_reset = xgbe_rx_desc_reset;
3528 	hw_if->is_last_desc = xgbe_is_last_desc;
3529 	hw_if->is_context_desc = xgbe_is_context_desc;
3530 	hw_if->tx_start_xmit = xgbe_tx_start_xmit;
3531 
3532 	/* For FLOW ctrl */
3533 	hw_if->config_tx_flow_control = xgbe_config_tx_flow_control;
3534 	hw_if->config_rx_flow_control = xgbe_config_rx_flow_control;
3535 
3536 	/* For RX coalescing */
3537 	hw_if->config_rx_coalesce = xgbe_config_rx_coalesce;
3538 	hw_if->config_tx_coalesce = xgbe_config_tx_coalesce;
3539 	hw_if->usec_to_riwt = xgbe_usec_to_riwt;
3540 	hw_if->riwt_to_usec = xgbe_riwt_to_usec;
3541 
3542 	/* For RX and TX threshold config */
3543 	hw_if->config_rx_threshold = xgbe_config_rx_threshold;
3544 	hw_if->config_tx_threshold = xgbe_config_tx_threshold;
3545 
3546 	/* For RX and TX Store and Forward Mode config */
3547 	hw_if->config_rsf_mode = xgbe_config_rsf_mode;
3548 	hw_if->config_tsf_mode = xgbe_config_tsf_mode;
3549 
3550 	/* For TX DMA Operating on Second Frame config */
3551 	hw_if->config_osp_mode = xgbe_config_osp_mode;
3552 
3553 	/* For RX and TX PBL config */
3554 	hw_if->config_rx_pbl_val = xgbe_config_rx_pbl_val;
3555 	hw_if->get_rx_pbl_val = xgbe_get_rx_pbl_val;
3556 	hw_if->config_tx_pbl_val = xgbe_config_tx_pbl_val;
3557 	hw_if->get_tx_pbl_val = xgbe_get_tx_pbl_val;
3558 	hw_if->config_pblx8 = xgbe_config_pblx8;
3559 
3560 	/* For MMC statistics support */
3561 	hw_if->tx_mmc_int = xgbe_tx_mmc_int;
3562 	hw_if->rx_mmc_int = xgbe_rx_mmc_int;
3563 	hw_if->read_mmc_stats = xgbe_read_mmc_stats;
3564 
3565 	/* For PTP config */
3566 	hw_if->config_tstamp = xgbe_config_tstamp;
3567 	hw_if->update_tstamp_addend = xgbe_update_tstamp_addend;
3568 	hw_if->set_tstamp_time = xgbe_set_tstamp_time;
3569 	hw_if->get_tstamp_time = xgbe_get_tstamp_time;
3570 	hw_if->get_tx_tstamp = xgbe_get_tx_tstamp;
3571 
3572 	/* For Data Center Bridging config */
3573 	hw_if->config_tc = xgbe_config_tc;
3574 	hw_if->config_dcb_tc = xgbe_config_dcb_tc;
3575 	hw_if->config_dcb_pfc = xgbe_config_dcb_pfc;
3576 
3577 	/* For Receive Side Scaling */
3578 	hw_if->enable_rss = xgbe_enable_rss;
3579 	hw_if->disable_rss = xgbe_disable_rss;
3580 	hw_if->set_rss_hash_key = xgbe_set_rss_hash_key;
3581 	hw_if->set_rss_lookup_table = xgbe_set_rss_lookup_table;
3582 
3583 	/* For ECC */
3584 	hw_if->disable_ecc_ded = xgbe_disable_ecc_ded;
3585 	hw_if->disable_ecc_sec = xgbe_disable_ecc_sec;
3586 
3587 	DBGPR("<--xgbe_init_function_ptrs\n");
3588 }
3589