1 // SPDX-License-Identifier: GPL-2.0-only
2 /* 10G controller driver for Samsung SoCs
3  *
4  * Copyright (C) 2013 Samsung Electronics Co., Ltd.
5  *		http://www.samsung.com
6  *
7  * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
8  */
9 
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11 
12 #include <linux/clk.h>
13 #include <linux/crc32.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/etherdevice.h>
16 #include <linux/ethtool.h>
17 #include <linux/if.h>
18 #include <linux/if_ether.h>
19 #include <linux/if_vlan.h>
20 #include <linux/init.h>
21 #include <linux/interrupt.h>
22 #include <linux/ip.h>
23 #include <linux/kernel.h>
24 #include <linux/mii.h>
25 #include <linux/module.h>
26 #include <linux/net_tstamp.h>
27 #include <linux/netdevice.h>
28 #include <linux/phy.h>
29 #include <linux/platform_device.h>
30 #include <linux/prefetch.h>
31 #include <linux/skbuff.h>
32 #include <linux/slab.h>
33 #include <linux/tcp.h>
34 #include <linux/sxgbe_platform.h>
35 
36 #include "sxgbe_common.h"
37 #include "sxgbe_desc.h"
38 #include "sxgbe_dma.h"
39 #include "sxgbe_mtl.h"
40 #include "sxgbe_reg.h"
41 
42 #define SXGBE_ALIGN(x)	L1_CACHE_ALIGN(x)
43 #define JUMBO_LEN	9000
44 
45 /* Module parameters */
46 #define TX_TIMEO	5000
47 #define DMA_TX_SIZE	512
48 #define DMA_RX_SIZE	1024
49 #define TC_DEFAULT	64
50 #define DMA_BUFFER_SIZE	BUF_SIZE_2KiB
51 /* The default timer value as per the sxgbe specification 1 sec(1000 ms) */
52 #define SXGBE_DEFAULT_LPI_TIMER	1000
53 
54 static int debug = -1;
55 static int eee_timer = SXGBE_DEFAULT_LPI_TIMER;
56 
57 module_param(eee_timer, int, 0644);
58 
59 module_param(debug, int, 0644);
60 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
61 				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
62 				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
63 
64 static irqreturn_t sxgbe_common_interrupt(int irq, void *dev_id);
65 static irqreturn_t sxgbe_tx_interrupt(int irq, void *dev_id);
66 static irqreturn_t sxgbe_rx_interrupt(int irq, void *dev_id);
67 
68 #define SXGBE_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
69 
70 #define SXGBE_LPI_TIMER(x) (jiffies + msecs_to_jiffies(x))
71 
72 /**
73  * sxgbe_verify_args - verify the driver parameters.
74  * Description: it verifies if some wrong parameter is passed to the driver.
75  * Note that wrong parameters are replaced with the default values.
76  */
77 static void sxgbe_verify_args(void)
78 {
79 	if (unlikely(eee_timer < 0))
80 		eee_timer = SXGBE_DEFAULT_LPI_TIMER;
81 }
82 
83 static void sxgbe_enable_eee_mode(const struct sxgbe_priv_data *priv)
84 {
85 	/* Check and enter in LPI mode */
86 	if (!priv->tx_path_in_lpi_mode)
87 		priv->hw->mac->set_eee_mode(priv->ioaddr);
88 }
89 
90 void sxgbe_disable_eee_mode(struct sxgbe_priv_data * const priv)
91 {
92 	/* Exit and disable EEE in case of we are are in LPI state. */
93 	priv->hw->mac->reset_eee_mode(priv->ioaddr);
94 	del_timer_sync(&priv->eee_ctrl_timer);
95 	priv->tx_path_in_lpi_mode = false;
96 }
97 
98 /**
99  * sxgbe_eee_ctrl_timer
100  * @t: timer list containing a data
101  * Description:
102  *  If there is no data transfer and if we are not in LPI state,
103  *  then MAC Transmitter can be moved to LPI state.
104  */
105 static void sxgbe_eee_ctrl_timer(struct timer_list *t)
106 {
107 	struct sxgbe_priv_data *priv = from_timer(priv, t, eee_ctrl_timer);
108 
109 	sxgbe_enable_eee_mode(priv);
110 	mod_timer(&priv->eee_ctrl_timer, SXGBE_LPI_TIMER(eee_timer));
111 }
112 
113 /**
114  * sxgbe_eee_init
115  * @priv: private device pointer
116  * Description:
117  *  If the EEE support has been enabled while configuring the driver,
118  *  if the GMAC actually supports the EEE (from the HW cap reg) and the
119  *  phy can also manage EEE, so enable the LPI state and start the timer
120  *  to verify if the tx path can enter in LPI state.
121  */
122 bool sxgbe_eee_init(struct sxgbe_priv_data * const priv)
123 {
124 	struct net_device *ndev = priv->dev;
125 	bool ret = false;
126 
127 	/* MAC core supports the EEE feature. */
128 	if (priv->hw_cap.eee) {
129 		/* Check if the PHY supports EEE */
130 		if (phy_init_eee(ndev->phydev, 1))
131 			return false;
132 
133 		priv->eee_active = 1;
134 		timer_setup(&priv->eee_ctrl_timer, sxgbe_eee_ctrl_timer, 0);
135 		priv->eee_ctrl_timer.expires = SXGBE_LPI_TIMER(eee_timer);
136 		add_timer(&priv->eee_ctrl_timer);
137 
138 		priv->hw->mac->set_eee_timer(priv->ioaddr,
139 					     SXGBE_DEFAULT_LPI_TIMER,
140 					     priv->tx_lpi_timer);
141 
142 		pr_info("Energy-Efficient Ethernet initialized\n");
143 
144 		ret = true;
145 	}
146 
147 	return ret;
148 }
149 
150 static void sxgbe_eee_adjust(const struct sxgbe_priv_data *priv)
151 {
152 	struct net_device *ndev = priv->dev;
153 
154 	/* When the EEE has been already initialised we have to
155 	 * modify the PLS bit in the LPI ctrl & status reg according
156 	 * to the PHY link status. For this reason.
157 	 */
158 	if (priv->eee_enabled)
159 		priv->hw->mac->set_eee_pls(priv->ioaddr, ndev->phydev->link);
160 }
161 
162 /**
163  * sxgbe_clk_csr_set - dynamically set the MDC clock
164  * @priv: driver private structure
165  * Description: this is to dynamically set the MDC clock according to the csr
166  * clock input.
167  */
168 static void sxgbe_clk_csr_set(struct sxgbe_priv_data *priv)
169 {
170 	u32 clk_rate = clk_get_rate(priv->sxgbe_clk);
171 
172 	/* assign the proper divider, this will be used during
173 	 * mdio communication
174 	 */
175 	if (clk_rate < SXGBE_CSR_F_150M)
176 		priv->clk_csr = SXGBE_CSR_100_150M;
177 	else if (clk_rate <= SXGBE_CSR_F_250M)
178 		priv->clk_csr = SXGBE_CSR_150_250M;
179 	else if (clk_rate <= SXGBE_CSR_F_300M)
180 		priv->clk_csr = SXGBE_CSR_250_300M;
181 	else if (clk_rate <= SXGBE_CSR_F_350M)
182 		priv->clk_csr = SXGBE_CSR_300_350M;
183 	else if (clk_rate <= SXGBE_CSR_F_400M)
184 		priv->clk_csr = SXGBE_CSR_350_400M;
185 	else if (clk_rate <= SXGBE_CSR_F_500M)
186 		priv->clk_csr = SXGBE_CSR_400_500M;
187 }
188 
189 /* minimum number of free TX descriptors required to wake up TX process */
190 #define SXGBE_TX_THRESH(x)	(x->dma_tx_size/4)
191 
192 static inline u32 sxgbe_tx_avail(struct sxgbe_tx_queue *queue, int tx_qsize)
193 {
194 	return queue->dirty_tx + tx_qsize - queue->cur_tx - 1;
195 }
196 
197 /**
198  * sxgbe_adjust_link
199  * @dev: net device structure
200  * Description: it adjusts the link parameters.
201  */
202 static void sxgbe_adjust_link(struct net_device *dev)
203 {
204 	struct sxgbe_priv_data *priv = netdev_priv(dev);
205 	struct phy_device *phydev = dev->phydev;
206 	u8 new_state = 0;
207 	u8 speed = 0xff;
208 
209 	if (!phydev)
210 		return;
211 
212 	/* SXGBE is not supporting auto-negotiation and
213 	 * half duplex mode. so, not handling duplex change
214 	 * in this function. only handling speed and link status
215 	 */
216 	if (phydev->link) {
217 		if (phydev->speed != priv->speed) {
218 			new_state = 1;
219 			switch (phydev->speed) {
220 			case SPEED_10000:
221 				speed = SXGBE_SPEED_10G;
222 				break;
223 			case SPEED_2500:
224 				speed = SXGBE_SPEED_2_5G;
225 				break;
226 			case SPEED_1000:
227 				speed = SXGBE_SPEED_1G;
228 				break;
229 			default:
230 				netif_err(priv, link, dev,
231 					  "Speed (%d) not supported\n",
232 					  phydev->speed);
233 			}
234 
235 			priv->speed = phydev->speed;
236 			priv->hw->mac->set_speed(priv->ioaddr, speed);
237 		}
238 
239 		if (!priv->oldlink) {
240 			new_state = 1;
241 			priv->oldlink = 1;
242 		}
243 	} else if (priv->oldlink) {
244 		new_state = 1;
245 		priv->oldlink = 0;
246 		priv->speed = SPEED_UNKNOWN;
247 	}
248 
249 	if (new_state & netif_msg_link(priv))
250 		phy_print_status(phydev);
251 
252 	/* Alter the MAC settings for EEE */
253 	sxgbe_eee_adjust(priv);
254 }
255 
256 /**
257  * sxgbe_init_phy - PHY initialization
258  * @ndev: net device structure
259  * Description: it initializes the driver's PHY state, and attaches the PHY
260  * to the mac driver.
261  *  Return value:
262  *  0 on success
263  */
264 static int sxgbe_init_phy(struct net_device *ndev)
265 {
266 	char phy_id_fmt[MII_BUS_ID_SIZE + 3];
267 	char bus_id[MII_BUS_ID_SIZE];
268 	struct phy_device *phydev;
269 	struct sxgbe_priv_data *priv = netdev_priv(ndev);
270 	int phy_iface = priv->plat->interface;
271 
272 	/* assign default link status */
273 	priv->oldlink = 0;
274 	priv->speed = SPEED_UNKNOWN;
275 	priv->oldduplex = DUPLEX_UNKNOWN;
276 
277 	if (priv->plat->phy_bus_name)
278 		snprintf(bus_id, MII_BUS_ID_SIZE, "%s-%x",
279 			 priv->plat->phy_bus_name, priv->plat->bus_id);
280 	else
281 		snprintf(bus_id, MII_BUS_ID_SIZE, "sxgbe-%x",
282 			 priv->plat->bus_id);
283 
284 	snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
285 		 priv->plat->phy_addr);
286 	netdev_dbg(ndev, "%s: trying to attach to %s\n", __func__, phy_id_fmt);
287 
288 	phydev = phy_connect(ndev, phy_id_fmt, &sxgbe_adjust_link, phy_iface);
289 
290 	if (IS_ERR(phydev)) {
291 		netdev_err(ndev, "Could not attach to PHY\n");
292 		return PTR_ERR(phydev);
293 	}
294 
295 	/* Stop Advertising 1000BASE Capability if interface is not GMII */
296 	if ((phy_iface == PHY_INTERFACE_MODE_MII) ||
297 	    (phy_iface == PHY_INTERFACE_MODE_RMII))
298 		phy_set_max_speed(phydev, SPEED_1000);
299 
300 	if (phydev->phy_id == 0) {
301 		phy_disconnect(phydev);
302 		return -ENODEV;
303 	}
304 
305 	netdev_dbg(ndev, "%s: attached to PHY (UID 0x%x) Link = %d\n",
306 		   __func__, phydev->phy_id, phydev->link);
307 
308 	return 0;
309 }
310 
311 /**
312  * sxgbe_clear_descriptors: clear descriptors
313  * @priv: driver private structure
314  * Description: this function is called to clear the tx and rx descriptors
315  * in case of both basic and extended descriptors are used.
316  */
317 static void sxgbe_clear_descriptors(struct sxgbe_priv_data *priv)
318 {
319 	int i, j;
320 	unsigned int txsize = priv->dma_tx_size;
321 	unsigned int rxsize = priv->dma_rx_size;
322 
323 	/* Clear the Rx/Tx descriptors */
324 	for (j = 0; j < SXGBE_RX_QUEUES; j++) {
325 		for (i = 0; i < rxsize; i++)
326 			priv->hw->desc->init_rx_desc(&priv->rxq[j]->dma_rx[i],
327 						     priv->use_riwt, priv->mode,
328 						     (i == rxsize - 1));
329 	}
330 
331 	for (j = 0; j < SXGBE_TX_QUEUES; j++) {
332 		for (i = 0; i < txsize; i++)
333 			priv->hw->desc->init_tx_desc(&priv->txq[j]->dma_tx[i]);
334 	}
335 }
336 
337 static int sxgbe_init_rx_buffers(struct net_device *dev,
338 				 struct sxgbe_rx_norm_desc *p, int i,
339 				 unsigned int dma_buf_sz,
340 				 struct sxgbe_rx_queue *rx_ring)
341 {
342 	struct sxgbe_priv_data *priv = netdev_priv(dev);
343 	struct sk_buff *skb;
344 
345 	skb = __netdev_alloc_skb_ip_align(dev, dma_buf_sz, GFP_KERNEL);
346 	if (!skb)
347 		return -ENOMEM;
348 
349 	rx_ring->rx_skbuff[i] = skb;
350 	rx_ring->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
351 						   dma_buf_sz, DMA_FROM_DEVICE);
352 
353 	if (dma_mapping_error(priv->device, rx_ring->rx_skbuff_dma[i])) {
354 		netdev_err(dev, "%s: DMA mapping error\n", __func__);
355 		dev_kfree_skb_any(skb);
356 		return -EINVAL;
357 	}
358 
359 	p->rdes23.rx_rd_des23.buf2_addr = rx_ring->rx_skbuff_dma[i];
360 
361 	return 0;
362 }
363 
364 /**
365  * sxgbe_free_rx_buffers - free what sxgbe_init_rx_buffers() allocated
366  * @dev: net device structure
367  * @p: dec pointer
368  * @i: index
369  * @dma_buf_sz: size
370  * @rx_ring: ring to be freed
371  *
372  * Description:  this function initializes the DMA RX descriptor
373  */
374 static void sxgbe_free_rx_buffers(struct net_device *dev,
375 				  struct sxgbe_rx_norm_desc *p, int i,
376 				  unsigned int dma_buf_sz,
377 				  struct sxgbe_rx_queue *rx_ring)
378 {
379 	struct sxgbe_priv_data *priv = netdev_priv(dev);
380 
381 	kfree_skb(rx_ring->rx_skbuff[i]);
382 	dma_unmap_single(priv->device, rx_ring->rx_skbuff_dma[i],
383 			 dma_buf_sz, DMA_FROM_DEVICE);
384 }
385 
386 /**
387  * init_tx_ring - init the TX descriptor ring
388  * @dev: net device structure
389  * @queue_no: queue
390  * @tx_ring: ring to be initialised
391  * @tx_rsize: ring size
392  * Description:  this function initializes the DMA TX descriptor
393  */
394 static int init_tx_ring(struct device *dev, u8 queue_no,
395 			struct sxgbe_tx_queue *tx_ring,	int tx_rsize)
396 {
397 	/* TX ring is not allcoated */
398 	if (!tx_ring) {
399 		dev_err(dev, "No memory for TX queue of SXGBE\n");
400 		return -ENOMEM;
401 	}
402 
403 	/* allocate memory for TX descriptors */
404 	tx_ring->dma_tx = dma_alloc_coherent(dev,
405 					     tx_rsize * sizeof(struct sxgbe_tx_norm_desc),
406 					     &tx_ring->dma_tx_phy, GFP_KERNEL);
407 	if (!tx_ring->dma_tx)
408 		return -ENOMEM;
409 
410 	/* allocate memory for TX skbuff array */
411 	tx_ring->tx_skbuff_dma = devm_kcalloc(dev, tx_rsize,
412 					      sizeof(dma_addr_t), GFP_KERNEL);
413 	if (!tx_ring->tx_skbuff_dma)
414 		goto dmamem_err;
415 
416 	tx_ring->tx_skbuff = devm_kcalloc(dev, tx_rsize,
417 					  sizeof(struct sk_buff *), GFP_KERNEL);
418 
419 	if (!tx_ring->tx_skbuff)
420 		goto dmamem_err;
421 
422 	/* assign queue number */
423 	tx_ring->queue_no = queue_no;
424 
425 	/* initialise counters */
426 	tx_ring->dirty_tx = 0;
427 	tx_ring->cur_tx = 0;
428 
429 	return 0;
430 
431 dmamem_err:
432 	dma_free_coherent(dev, tx_rsize * sizeof(struct sxgbe_tx_norm_desc),
433 			  tx_ring->dma_tx, tx_ring->dma_tx_phy);
434 	return -ENOMEM;
435 }
436 
437 /**
438  * free_rx_ring - free the RX descriptor ring
439  * @dev: net device structure
440  * @rx_ring: ring to be initialised
441  * @rx_rsize: ring size
442  * Description:  this function initializes the DMA RX descriptor
443  */
444 static void free_rx_ring(struct device *dev, struct sxgbe_rx_queue *rx_ring,
445 			 int rx_rsize)
446 {
447 	dma_free_coherent(dev, rx_rsize * sizeof(struct sxgbe_rx_norm_desc),
448 			  rx_ring->dma_rx, rx_ring->dma_rx_phy);
449 	kfree(rx_ring->rx_skbuff_dma);
450 	kfree(rx_ring->rx_skbuff);
451 }
452 
453 /**
454  * init_rx_ring - init the RX descriptor ring
455  * @dev: net device structure
456  * @queue_no: queue
457  * @rx_ring: ring to be initialised
458  * @rx_rsize: ring size
459  * Description:  this function initializes the DMA RX descriptor
460  */
461 static int init_rx_ring(struct net_device *dev, u8 queue_no,
462 			struct sxgbe_rx_queue *rx_ring,	int rx_rsize)
463 {
464 	struct sxgbe_priv_data *priv = netdev_priv(dev);
465 	int desc_index;
466 	unsigned int bfsize = 0;
467 	unsigned int ret = 0;
468 
469 	/* Set the max buffer size according to the MTU. */
470 	bfsize = ALIGN(dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN, 8);
471 
472 	netif_dbg(priv, probe, dev, "%s: bfsize %d\n", __func__, bfsize);
473 
474 	/* RX ring is not allcoated */
475 	if (rx_ring == NULL) {
476 		netdev_err(dev, "No memory for RX queue\n");
477 		return -ENOMEM;
478 	}
479 
480 	/* assign queue number */
481 	rx_ring->queue_no = queue_no;
482 
483 	/* allocate memory for RX descriptors */
484 	rx_ring->dma_rx = dma_alloc_coherent(priv->device,
485 					     rx_rsize * sizeof(struct sxgbe_rx_norm_desc),
486 					     &rx_ring->dma_rx_phy, GFP_KERNEL);
487 
488 	if (rx_ring->dma_rx == NULL)
489 		return -ENOMEM;
490 
491 	/* allocate memory for RX skbuff array */
492 	rx_ring->rx_skbuff_dma = kmalloc_array(rx_rsize,
493 					       sizeof(dma_addr_t), GFP_KERNEL);
494 	if (!rx_ring->rx_skbuff_dma) {
495 		ret = -ENOMEM;
496 		goto err_free_dma_rx;
497 	}
498 
499 	rx_ring->rx_skbuff = kmalloc_array(rx_rsize,
500 					   sizeof(struct sk_buff *), GFP_KERNEL);
501 	if (!rx_ring->rx_skbuff) {
502 		ret = -ENOMEM;
503 		goto err_free_skbuff_dma;
504 	}
505 
506 	/* initialise the buffers */
507 	for (desc_index = 0; desc_index < rx_rsize; desc_index++) {
508 		struct sxgbe_rx_norm_desc *p;
509 		p = rx_ring->dma_rx + desc_index;
510 		ret = sxgbe_init_rx_buffers(dev, p, desc_index,
511 					    bfsize, rx_ring);
512 		if (ret)
513 			goto err_free_rx_buffers;
514 	}
515 
516 	/* initialise counters */
517 	rx_ring->cur_rx = 0;
518 	rx_ring->dirty_rx = (unsigned int)(desc_index - rx_rsize);
519 	priv->dma_buf_sz = bfsize;
520 
521 	return 0;
522 
523 err_free_rx_buffers:
524 	while (--desc_index >= 0) {
525 		struct sxgbe_rx_norm_desc *p;
526 
527 		p = rx_ring->dma_rx + desc_index;
528 		sxgbe_free_rx_buffers(dev, p, desc_index, bfsize, rx_ring);
529 	}
530 	kfree(rx_ring->rx_skbuff);
531 err_free_skbuff_dma:
532 	kfree(rx_ring->rx_skbuff_dma);
533 err_free_dma_rx:
534 	dma_free_coherent(priv->device,
535 			  rx_rsize * sizeof(struct sxgbe_rx_norm_desc),
536 			  rx_ring->dma_rx, rx_ring->dma_rx_phy);
537 
538 	return ret;
539 }
540 /**
541  * free_tx_ring - free the TX descriptor ring
542  * @dev: net device structure
543  * @tx_ring: ring to be initialised
544  * @tx_rsize: ring size
545  * Description:  this function initializes the DMA TX descriptor
546  */
547 static void free_tx_ring(struct device *dev, struct sxgbe_tx_queue *tx_ring,
548 			 int tx_rsize)
549 {
550 	dma_free_coherent(dev, tx_rsize * sizeof(struct sxgbe_tx_norm_desc),
551 			  tx_ring->dma_tx, tx_ring->dma_tx_phy);
552 }
553 
554 /**
555  * init_dma_desc_rings - init the RX/TX descriptor rings
556  * @netd: net device structure
557  * Description:  this function initializes the DMA RX/TX descriptors
558  * and allocates the socket buffers. It suppors the chained and ring
559  * modes.
560  */
561 static int init_dma_desc_rings(struct net_device *netd)
562 {
563 	int queue_num, ret;
564 	struct sxgbe_priv_data *priv = netdev_priv(netd);
565 	int tx_rsize = priv->dma_tx_size;
566 	int rx_rsize = priv->dma_rx_size;
567 
568 	/* Allocate memory for queue structures and TX descs */
569 	SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
570 		ret = init_tx_ring(priv->device, queue_num,
571 				   priv->txq[queue_num], tx_rsize);
572 		if (ret) {
573 			dev_err(&netd->dev, "TX DMA ring allocation failed!\n");
574 			goto txalloc_err;
575 		}
576 
577 		/* save private pointer in each ring this
578 		 * pointer is needed during cleaing TX queue
579 		 */
580 		priv->txq[queue_num]->priv_ptr = priv;
581 	}
582 
583 	/* Allocate memory for queue structures and RX descs */
584 	SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) {
585 		ret = init_rx_ring(netd, queue_num,
586 				   priv->rxq[queue_num], rx_rsize);
587 		if (ret) {
588 			netdev_err(netd, "RX DMA ring allocation failed!!\n");
589 			goto rxalloc_err;
590 		}
591 
592 		/* save private pointer in each ring this
593 		 * pointer is needed during cleaing TX queue
594 		 */
595 		priv->rxq[queue_num]->priv_ptr = priv;
596 	}
597 
598 	sxgbe_clear_descriptors(priv);
599 
600 	return 0;
601 
602 txalloc_err:
603 	while (queue_num--)
604 		free_tx_ring(priv->device, priv->txq[queue_num], tx_rsize);
605 	return ret;
606 
607 rxalloc_err:
608 	while (queue_num--)
609 		free_rx_ring(priv->device, priv->rxq[queue_num], rx_rsize);
610 	return ret;
611 }
612 
613 static void tx_free_ring_skbufs(struct sxgbe_tx_queue *txqueue)
614 {
615 	int dma_desc;
616 	struct sxgbe_priv_data *priv = txqueue->priv_ptr;
617 	int tx_rsize = priv->dma_tx_size;
618 
619 	for (dma_desc = 0; dma_desc < tx_rsize; dma_desc++) {
620 		struct sxgbe_tx_norm_desc *tdesc = txqueue->dma_tx + dma_desc;
621 
622 		if (txqueue->tx_skbuff_dma[dma_desc])
623 			dma_unmap_single(priv->device,
624 					 txqueue->tx_skbuff_dma[dma_desc],
625 					 priv->hw->desc->get_tx_len(tdesc),
626 					 DMA_TO_DEVICE);
627 
628 		dev_kfree_skb_any(txqueue->tx_skbuff[dma_desc]);
629 		txqueue->tx_skbuff[dma_desc] = NULL;
630 		txqueue->tx_skbuff_dma[dma_desc] = 0;
631 	}
632 }
633 
634 
635 static void dma_free_tx_skbufs(struct sxgbe_priv_data *priv)
636 {
637 	int queue_num;
638 
639 	SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
640 		struct sxgbe_tx_queue *tqueue = priv->txq[queue_num];
641 		tx_free_ring_skbufs(tqueue);
642 	}
643 }
644 
645 static void free_dma_desc_resources(struct sxgbe_priv_data *priv)
646 {
647 	int queue_num;
648 	int tx_rsize = priv->dma_tx_size;
649 	int rx_rsize = priv->dma_rx_size;
650 
651 	/* Release the DMA TX buffers */
652 	dma_free_tx_skbufs(priv);
653 
654 	/* Release the TX ring memory also */
655 	SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
656 		free_tx_ring(priv->device, priv->txq[queue_num], tx_rsize);
657 	}
658 
659 	/* Release the RX ring memory also */
660 	SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) {
661 		free_rx_ring(priv->device, priv->rxq[queue_num], rx_rsize);
662 	}
663 }
664 
665 static int txring_mem_alloc(struct sxgbe_priv_data *priv)
666 {
667 	int queue_num;
668 
669 	SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
670 		priv->txq[queue_num] = devm_kmalloc(priv->device,
671 						    sizeof(struct sxgbe_tx_queue), GFP_KERNEL);
672 		if (!priv->txq[queue_num])
673 			return -ENOMEM;
674 	}
675 
676 	return 0;
677 }
678 
679 static int rxring_mem_alloc(struct sxgbe_priv_data *priv)
680 {
681 	int queue_num;
682 
683 	SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) {
684 		priv->rxq[queue_num] = devm_kmalloc(priv->device,
685 						    sizeof(struct sxgbe_rx_queue), GFP_KERNEL);
686 		if (!priv->rxq[queue_num])
687 			return -ENOMEM;
688 	}
689 
690 	return 0;
691 }
692 
693 /**
694  *  sxgbe_mtl_operation_mode - HW MTL operation mode
695  *  @priv: driver private structure
696  *  Description: it sets the MTL operation mode: tx/rx MTL thresholds
697  *  or Store-And-Forward capability.
698  */
699 static void sxgbe_mtl_operation_mode(struct sxgbe_priv_data *priv)
700 {
701 	int queue_num;
702 
703 	/* TX/RX threshold control */
704 	if (likely(priv->plat->force_sf_dma_mode)) {
705 		/* set TC mode for TX QUEUES */
706 		SXGBE_FOR_EACH_QUEUE(priv->hw_cap.tx_mtl_queues, queue_num)
707 			priv->hw->mtl->set_tx_mtl_mode(priv->ioaddr, queue_num,
708 						       SXGBE_MTL_SFMODE);
709 		priv->tx_tc = SXGBE_MTL_SFMODE;
710 
711 		/* set TC mode for RX QUEUES */
712 		SXGBE_FOR_EACH_QUEUE(priv->hw_cap.rx_mtl_queues, queue_num)
713 			priv->hw->mtl->set_rx_mtl_mode(priv->ioaddr, queue_num,
714 						       SXGBE_MTL_SFMODE);
715 		priv->rx_tc = SXGBE_MTL_SFMODE;
716 	} else if (unlikely(priv->plat->force_thresh_dma_mode)) {
717 		/* set TC mode for TX QUEUES */
718 		SXGBE_FOR_EACH_QUEUE(priv->hw_cap.tx_mtl_queues, queue_num)
719 			priv->hw->mtl->set_tx_mtl_mode(priv->ioaddr, queue_num,
720 						       priv->tx_tc);
721 		/* set TC mode for RX QUEUES */
722 		SXGBE_FOR_EACH_QUEUE(priv->hw_cap.rx_mtl_queues, queue_num)
723 			priv->hw->mtl->set_rx_mtl_mode(priv->ioaddr, queue_num,
724 						       priv->rx_tc);
725 	} else {
726 		pr_err("ERROR: %s: Invalid TX threshold mode\n", __func__);
727 	}
728 }
729 
730 /**
731  * sxgbe_tx_queue_clean:
732  * @tqueue: queue pointer
733  * Description: it reclaims resources after transmission completes.
734  */
735 static void sxgbe_tx_queue_clean(struct sxgbe_tx_queue *tqueue)
736 {
737 	struct sxgbe_priv_data *priv = tqueue->priv_ptr;
738 	unsigned int tx_rsize = priv->dma_tx_size;
739 	struct netdev_queue *dev_txq;
740 	u8 queue_no = tqueue->queue_no;
741 
742 	dev_txq = netdev_get_tx_queue(priv->dev, queue_no);
743 
744 	__netif_tx_lock(dev_txq, smp_processor_id());
745 
746 	priv->xstats.tx_clean++;
747 	while (tqueue->dirty_tx != tqueue->cur_tx) {
748 		unsigned int entry = tqueue->dirty_tx % tx_rsize;
749 		struct sk_buff *skb = tqueue->tx_skbuff[entry];
750 		struct sxgbe_tx_norm_desc *p;
751 
752 		p = tqueue->dma_tx + entry;
753 
754 		/* Check if the descriptor is owned by the DMA. */
755 		if (priv->hw->desc->get_tx_owner(p))
756 			break;
757 
758 		if (netif_msg_tx_done(priv))
759 			pr_debug("%s: curr %d, dirty %d\n",
760 				 __func__, tqueue->cur_tx, tqueue->dirty_tx);
761 
762 		if (likely(tqueue->tx_skbuff_dma[entry])) {
763 			dma_unmap_single(priv->device,
764 					 tqueue->tx_skbuff_dma[entry],
765 					 priv->hw->desc->get_tx_len(p),
766 					 DMA_TO_DEVICE);
767 			tqueue->tx_skbuff_dma[entry] = 0;
768 		}
769 
770 		if (likely(skb)) {
771 			dev_kfree_skb(skb);
772 			tqueue->tx_skbuff[entry] = NULL;
773 		}
774 
775 		priv->hw->desc->release_tx_desc(p);
776 
777 		tqueue->dirty_tx++;
778 	}
779 
780 	/* wake up queue */
781 	if (unlikely(netif_tx_queue_stopped(dev_txq) &&
782 	    sxgbe_tx_avail(tqueue, tx_rsize) > SXGBE_TX_THRESH(priv))) {
783 		if (netif_msg_tx_done(priv))
784 			pr_debug("%s: restart transmit\n", __func__);
785 		netif_tx_wake_queue(dev_txq);
786 	}
787 
788 	__netif_tx_unlock(dev_txq);
789 }
790 
791 /**
792  * sxgbe_tx_clean:
793  * @priv: driver private structure
794  * Description: it reclaims resources after transmission completes.
795  */
796 static void sxgbe_tx_all_clean(struct sxgbe_priv_data * const priv)
797 {
798 	u8 queue_num;
799 
800 	SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
801 		struct sxgbe_tx_queue *tqueue = priv->txq[queue_num];
802 
803 		sxgbe_tx_queue_clean(tqueue);
804 	}
805 
806 	if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
807 		sxgbe_enable_eee_mode(priv);
808 		mod_timer(&priv->eee_ctrl_timer, SXGBE_LPI_TIMER(eee_timer));
809 	}
810 }
811 
812 /**
813  * sxgbe_restart_tx_queue: irq tx error mng function
814  * @priv: driver private structure
815  * @queue_num: queue number
816  * Description: it cleans the descriptors and restarts the transmission
817  * in case of errors.
818  */
819 static void sxgbe_restart_tx_queue(struct sxgbe_priv_data *priv, int queue_num)
820 {
821 	struct sxgbe_tx_queue *tx_ring = priv->txq[queue_num];
822 	struct netdev_queue *dev_txq = netdev_get_tx_queue(priv->dev,
823 							   queue_num);
824 
825 	/* stop the queue */
826 	netif_tx_stop_queue(dev_txq);
827 
828 	/* stop the tx dma */
829 	priv->hw->dma->stop_tx_queue(priv->ioaddr, queue_num);
830 
831 	/* free the skbuffs of the ring */
832 	tx_free_ring_skbufs(tx_ring);
833 
834 	/* initialise counters */
835 	tx_ring->cur_tx = 0;
836 	tx_ring->dirty_tx = 0;
837 
838 	/* start the tx dma */
839 	priv->hw->dma->start_tx_queue(priv->ioaddr, queue_num);
840 
841 	priv->dev->stats.tx_errors++;
842 
843 	/* wakeup the queue */
844 	netif_tx_wake_queue(dev_txq);
845 }
846 
847 /**
848  * sxgbe_reset_all_tx_queues: irq tx error mng function
849  * @priv: driver private structure
850  * Description: it cleans all the descriptors and
851  * restarts the transmission on all queues in case of errors.
852  */
853 static void sxgbe_reset_all_tx_queues(struct sxgbe_priv_data *priv)
854 {
855 	int queue_num;
856 
857 	/* On TX timeout of net device, resetting of all queues
858 	 * may not be proper way, revisit this later if needed
859 	 */
860 	SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num)
861 		sxgbe_restart_tx_queue(priv, queue_num);
862 }
863 
864 /**
865  * sxgbe_get_hw_features: get XMAC capabilities from the HW cap. register.
866  * @priv: driver private structure
867  * Description:
868  *  new GMAC chip generations have a new register to indicate the
869  *  presence of the optional feature/functions.
870  *  This can be also used to override the value passed through the
871  *  platform and necessary for old MAC10/100 and GMAC chips.
872  */
873 static int sxgbe_get_hw_features(struct sxgbe_priv_data * const priv)
874 {
875 	int rval = 0;
876 	struct sxgbe_hw_features *features = &priv->hw_cap;
877 
878 	/* Read First Capability Register CAP[0] */
879 	rval = priv->hw->mac->get_hw_feature(priv->ioaddr, 0);
880 	if (rval) {
881 		features->pmt_remote_wake_up =
882 			SXGBE_HW_FEAT_PMT_TEMOTE_WOP(rval);
883 		features->pmt_magic_frame = SXGBE_HW_FEAT_PMT_MAGIC_PKT(rval);
884 		features->atime_stamp = SXGBE_HW_FEAT_IEEE1500_2008(rval);
885 		features->tx_csum_offload =
886 			SXGBE_HW_FEAT_TX_CSUM_OFFLOAD(rval);
887 		features->rx_csum_offload =
888 			SXGBE_HW_FEAT_RX_CSUM_OFFLOAD(rval);
889 		features->multi_macaddr = SXGBE_HW_FEAT_MACADDR_COUNT(rval);
890 		features->tstamp_srcselect = SXGBE_HW_FEAT_TSTMAP_SRC(rval);
891 		features->sa_vlan_insert = SXGBE_HW_FEAT_SRCADDR_VLAN(rval);
892 		features->eee = SXGBE_HW_FEAT_EEE(rval);
893 	}
894 
895 	/* Read First Capability Register CAP[1] */
896 	rval = priv->hw->mac->get_hw_feature(priv->ioaddr, 1);
897 	if (rval) {
898 		features->rxfifo_size = SXGBE_HW_FEAT_RX_FIFO_SIZE(rval);
899 		features->txfifo_size = SXGBE_HW_FEAT_TX_FIFO_SIZE(rval);
900 		features->atstmap_hword = SXGBE_HW_FEAT_TX_FIFO_SIZE(rval);
901 		features->dcb_enable = SXGBE_HW_FEAT_DCB(rval);
902 		features->splithead_enable = SXGBE_HW_FEAT_SPLIT_HDR(rval);
903 		features->tcpseg_offload = SXGBE_HW_FEAT_TSO(rval);
904 		features->debug_mem = SXGBE_HW_FEAT_DEBUG_MEM_IFACE(rval);
905 		features->rss_enable = SXGBE_HW_FEAT_RSS(rval);
906 		features->hash_tsize = SXGBE_HW_FEAT_HASH_TABLE_SIZE(rval);
907 		features->l3l4_filer_size = SXGBE_HW_FEAT_L3L4_FILTER_NUM(rval);
908 	}
909 
910 	/* Read First Capability Register CAP[2] */
911 	rval = priv->hw->mac->get_hw_feature(priv->ioaddr, 2);
912 	if (rval) {
913 		features->rx_mtl_queues = SXGBE_HW_FEAT_RX_MTL_QUEUES(rval);
914 		features->tx_mtl_queues = SXGBE_HW_FEAT_TX_MTL_QUEUES(rval);
915 		features->rx_dma_channels = SXGBE_HW_FEAT_RX_DMA_CHANNELS(rval);
916 		features->tx_dma_channels = SXGBE_HW_FEAT_TX_DMA_CHANNELS(rval);
917 		features->pps_output_count = SXGBE_HW_FEAT_PPS_OUTPUTS(rval);
918 		features->aux_input_count = SXGBE_HW_FEAT_AUX_SNAPSHOTS(rval);
919 	}
920 
921 	return rval;
922 }
923 
924 /**
925  * sxgbe_check_ether_addr: check if the MAC addr is valid
926  * @priv: driver private structure
927  * Description:
928  * it is to verify if the MAC address is valid, in case of failures it
929  * generates a random MAC address
930  */
931 static void sxgbe_check_ether_addr(struct sxgbe_priv_data *priv)
932 {
933 	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
934 		priv->hw->mac->get_umac_addr((void __iomem *)
935 					     priv->ioaddr,
936 					     priv->dev->dev_addr, 0);
937 		if (!is_valid_ether_addr(priv->dev->dev_addr))
938 			eth_hw_addr_random(priv->dev);
939 	}
940 	dev_info(priv->device, "device MAC address %pM\n",
941 		 priv->dev->dev_addr);
942 }
943 
944 /**
945  * sxgbe_init_dma_engine: DMA init.
946  * @priv: driver private structure
947  * Description:
948  * It inits the DMA invoking the specific SXGBE callback.
949  * Some DMA parameters can be passed from the platform;
950  * in case of these are not passed a default is kept for the MAC or GMAC.
951  */
952 static int sxgbe_init_dma_engine(struct sxgbe_priv_data *priv)
953 {
954 	int pbl = DEFAULT_DMA_PBL, fixed_burst = 0, burst_map = 0;
955 	int queue_num;
956 
957 	if (priv->plat->dma_cfg) {
958 		pbl = priv->plat->dma_cfg->pbl;
959 		fixed_burst = priv->plat->dma_cfg->fixed_burst;
960 		burst_map = priv->plat->dma_cfg->burst_map;
961 	}
962 
963 	SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num)
964 		priv->hw->dma->cha_init(priv->ioaddr, queue_num,
965 					fixed_burst, pbl,
966 					(priv->txq[queue_num])->dma_tx_phy,
967 					(priv->rxq[queue_num])->dma_rx_phy,
968 					priv->dma_tx_size, priv->dma_rx_size);
969 
970 	return priv->hw->dma->init(priv->ioaddr, fixed_burst, burst_map);
971 }
972 
973 /**
974  * sxgbe_init_mtl_engine: MTL init.
975  * @priv: driver private structure
976  * Description:
977  * It inits the MTL invoking the specific SXGBE callback.
978  */
979 static void sxgbe_init_mtl_engine(struct sxgbe_priv_data *priv)
980 {
981 	int queue_num;
982 
983 	SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
984 		priv->hw->mtl->mtl_set_txfifosize(priv->ioaddr, queue_num,
985 						  priv->hw_cap.tx_mtl_qsize);
986 		priv->hw->mtl->mtl_enable_txqueue(priv->ioaddr, queue_num);
987 	}
988 }
989 
990 /**
991  * sxgbe_disable_mtl_engine: MTL disable.
992  * @priv: driver private structure
993  * Description:
994  * It disables the MTL queues by invoking the specific SXGBE callback.
995  */
996 static void sxgbe_disable_mtl_engine(struct sxgbe_priv_data *priv)
997 {
998 	int queue_num;
999 
1000 	SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num)
1001 		priv->hw->mtl->mtl_disable_txqueue(priv->ioaddr, queue_num);
1002 }
1003 
1004 
1005 /**
1006  * sxgbe_tx_timer: mitigation sw timer for tx.
1007  * @t: timer pointer
1008  * Description:
1009  * This is the timer handler to directly invoke the sxgbe_tx_clean.
1010  */
1011 static void sxgbe_tx_timer(struct timer_list *t)
1012 {
1013 	struct sxgbe_tx_queue *p = from_timer(p, t, txtimer);
1014 	sxgbe_tx_queue_clean(p);
1015 }
1016 
1017 /**
1018  * sxgbe_init_tx_coalesce: init tx mitigation options.
1019  * @priv: driver private structure
1020  * Description:
1021  * This inits the transmit coalesce parameters: i.e. timer rate,
1022  * timer handler and default threshold used for enabling the
1023  * interrupt on completion bit.
1024  */
1025 static void sxgbe_tx_init_coalesce(struct sxgbe_priv_data *priv)
1026 {
1027 	u8 queue_num;
1028 
1029 	SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
1030 		struct sxgbe_tx_queue *p = priv->txq[queue_num];
1031 		p->tx_coal_frames =  SXGBE_TX_FRAMES;
1032 		p->tx_coal_timer = SXGBE_COAL_TX_TIMER;
1033 		timer_setup(&p->txtimer, sxgbe_tx_timer, 0);
1034 		p->txtimer.expires = SXGBE_COAL_TIMER(p->tx_coal_timer);
1035 		add_timer(&p->txtimer);
1036 	}
1037 }
1038 
1039 static void sxgbe_tx_del_timer(struct sxgbe_priv_data *priv)
1040 {
1041 	u8 queue_num;
1042 
1043 	SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
1044 		struct sxgbe_tx_queue *p = priv->txq[queue_num];
1045 		del_timer_sync(&p->txtimer);
1046 	}
1047 }
1048 
1049 /**
1050  *  sxgbe_open - open entry point of the driver
1051  *  @dev : pointer to the device structure.
1052  *  Description:
1053  *  This function is the open entry point of the driver.
1054  *  Return value:
1055  *  0 on success and an appropriate (-)ve integer as defined in errno.h
1056  *  file on failure.
1057  */
1058 static int sxgbe_open(struct net_device *dev)
1059 {
1060 	struct sxgbe_priv_data *priv = netdev_priv(dev);
1061 	int ret, queue_num;
1062 
1063 	clk_prepare_enable(priv->sxgbe_clk);
1064 
1065 	sxgbe_check_ether_addr(priv);
1066 
1067 	/* Init the phy */
1068 	ret = sxgbe_init_phy(dev);
1069 	if (ret) {
1070 		netdev_err(dev, "%s: Cannot attach to PHY (error: %d)\n",
1071 			   __func__, ret);
1072 		goto phy_error;
1073 	}
1074 
1075 	/* Create and initialize the TX/RX descriptors chains. */
1076 	priv->dma_tx_size = SXGBE_ALIGN(DMA_TX_SIZE);
1077 	priv->dma_rx_size = SXGBE_ALIGN(DMA_RX_SIZE);
1078 	priv->dma_buf_sz = SXGBE_ALIGN(DMA_BUFFER_SIZE);
1079 	priv->tx_tc = TC_DEFAULT;
1080 	priv->rx_tc = TC_DEFAULT;
1081 	init_dma_desc_rings(dev);
1082 
1083 	/* DMA initialization and SW reset */
1084 	ret = sxgbe_init_dma_engine(priv);
1085 	if (ret < 0) {
1086 		netdev_err(dev, "%s: DMA initialization failed\n", __func__);
1087 		goto init_error;
1088 	}
1089 
1090 	/*  MTL initialization */
1091 	sxgbe_init_mtl_engine(priv);
1092 
1093 	/* Copy the MAC addr into the HW  */
1094 	priv->hw->mac->set_umac_addr(priv->ioaddr, dev->dev_addr, 0);
1095 
1096 	/* Initialize the MAC Core */
1097 	priv->hw->mac->core_init(priv->ioaddr);
1098 	SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) {
1099 		priv->hw->mac->enable_rxqueue(priv->ioaddr, queue_num);
1100 	}
1101 
1102 	/* Request the IRQ lines */
1103 	ret = devm_request_irq(priv->device, priv->irq, sxgbe_common_interrupt,
1104 			       IRQF_SHARED, dev->name, dev);
1105 	if (unlikely(ret < 0)) {
1106 		netdev_err(dev, "%s: ERROR: allocating the IRQ %d (error: %d)\n",
1107 			   __func__, priv->irq, ret);
1108 		goto init_error;
1109 	}
1110 
1111 	/* If the LPI irq is different from the mac irq
1112 	 * register a dedicated handler
1113 	 */
1114 	if (priv->lpi_irq != dev->irq) {
1115 		ret = devm_request_irq(priv->device, priv->lpi_irq,
1116 				       sxgbe_common_interrupt,
1117 				       IRQF_SHARED, dev->name, dev);
1118 		if (unlikely(ret < 0)) {
1119 			netdev_err(dev, "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
1120 				   __func__, priv->lpi_irq, ret);
1121 			goto init_error;
1122 		}
1123 	}
1124 
1125 	/* Request TX DMA irq lines */
1126 	SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
1127 		ret = devm_request_irq(priv->device,
1128 				       (priv->txq[queue_num])->irq_no,
1129 				       sxgbe_tx_interrupt, 0,
1130 				       dev->name, priv->txq[queue_num]);
1131 		if (unlikely(ret < 0)) {
1132 			netdev_err(dev, "%s: ERROR: allocating TX IRQ %d (error: %d)\n",
1133 				   __func__, priv->irq, ret);
1134 			goto init_error;
1135 		}
1136 	}
1137 
1138 	/* Request RX DMA irq lines */
1139 	SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) {
1140 		ret = devm_request_irq(priv->device,
1141 				       (priv->rxq[queue_num])->irq_no,
1142 				       sxgbe_rx_interrupt, 0,
1143 				       dev->name, priv->rxq[queue_num]);
1144 		if (unlikely(ret < 0)) {
1145 			netdev_err(dev, "%s: ERROR: allocating TX IRQ %d (error: %d)\n",
1146 				   __func__, priv->irq, ret);
1147 			goto init_error;
1148 		}
1149 	}
1150 
1151 	/* Enable the MAC Rx/Tx */
1152 	priv->hw->mac->enable_tx(priv->ioaddr, true);
1153 	priv->hw->mac->enable_rx(priv->ioaddr, true);
1154 
1155 	/* Set the HW DMA mode and the COE */
1156 	sxgbe_mtl_operation_mode(priv);
1157 
1158 	/* Extra statistics */
1159 	memset(&priv->xstats, 0, sizeof(struct sxgbe_extra_stats));
1160 
1161 	priv->xstats.tx_threshold = priv->tx_tc;
1162 	priv->xstats.rx_threshold = priv->rx_tc;
1163 
1164 	/* Start the ball rolling... */
1165 	netdev_dbg(dev, "DMA RX/TX processes started...\n");
1166 	priv->hw->dma->start_tx(priv->ioaddr, SXGBE_TX_QUEUES);
1167 	priv->hw->dma->start_rx(priv->ioaddr, SXGBE_RX_QUEUES);
1168 
1169 	if (dev->phydev)
1170 		phy_start(dev->phydev);
1171 
1172 	/* initialise TX coalesce parameters */
1173 	sxgbe_tx_init_coalesce(priv);
1174 
1175 	if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
1176 		priv->rx_riwt = SXGBE_MAX_DMA_RIWT;
1177 		priv->hw->dma->rx_watchdog(priv->ioaddr, SXGBE_MAX_DMA_RIWT);
1178 	}
1179 
1180 	priv->tx_lpi_timer = SXGBE_DEFAULT_LPI_TIMER;
1181 	priv->eee_enabled = sxgbe_eee_init(priv);
1182 
1183 	napi_enable(&priv->napi);
1184 	netif_start_queue(dev);
1185 
1186 	return 0;
1187 
1188 init_error:
1189 	free_dma_desc_resources(priv);
1190 	if (dev->phydev)
1191 		phy_disconnect(dev->phydev);
1192 phy_error:
1193 	clk_disable_unprepare(priv->sxgbe_clk);
1194 
1195 	return ret;
1196 }
1197 
1198 /**
1199  *  sxgbe_release - close entry point of the driver
1200  *  @dev : device pointer.
1201  *  Description:
1202  *  This is the stop entry point of the driver.
1203  */
1204 static int sxgbe_release(struct net_device *dev)
1205 {
1206 	struct sxgbe_priv_data *priv = netdev_priv(dev);
1207 
1208 	if (priv->eee_enabled)
1209 		del_timer_sync(&priv->eee_ctrl_timer);
1210 
1211 	/* Stop and disconnect the PHY */
1212 	if (dev->phydev) {
1213 		phy_stop(dev->phydev);
1214 		phy_disconnect(dev->phydev);
1215 	}
1216 
1217 	netif_tx_stop_all_queues(dev);
1218 
1219 	napi_disable(&priv->napi);
1220 
1221 	/* delete TX timers */
1222 	sxgbe_tx_del_timer(priv);
1223 
1224 	/* Stop TX/RX DMA and clear the descriptors */
1225 	priv->hw->dma->stop_tx(priv->ioaddr, SXGBE_TX_QUEUES);
1226 	priv->hw->dma->stop_rx(priv->ioaddr, SXGBE_RX_QUEUES);
1227 
1228 	/* disable MTL queue */
1229 	sxgbe_disable_mtl_engine(priv);
1230 
1231 	/* Release and free the Rx/Tx resources */
1232 	free_dma_desc_resources(priv);
1233 
1234 	/* Disable the MAC Rx/Tx */
1235 	priv->hw->mac->enable_tx(priv->ioaddr, false);
1236 	priv->hw->mac->enable_rx(priv->ioaddr, false);
1237 
1238 	clk_disable_unprepare(priv->sxgbe_clk);
1239 
1240 	return 0;
1241 }
1242 /* Prepare first Tx descriptor for doing TSO operation */
1243 static void sxgbe_tso_prepare(struct sxgbe_priv_data *priv,
1244 			      struct sxgbe_tx_norm_desc *first_desc,
1245 			      struct sk_buff *skb)
1246 {
1247 	unsigned int total_hdr_len, tcp_hdr_len;
1248 
1249 	/* Write first Tx descriptor with appropriate value */
1250 	tcp_hdr_len = tcp_hdrlen(skb);
1251 	total_hdr_len = skb_transport_offset(skb) + tcp_hdr_len;
1252 
1253 	first_desc->tdes01 = dma_map_single(priv->device, skb->data,
1254 					    total_hdr_len, DMA_TO_DEVICE);
1255 	if (dma_mapping_error(priv->device, first_desc->tdes01))
1256 		pr_err("%s: TX dma mapping failed!!\n", __func__);
1257 
1258 	first_desc->tdes23.tx_rd_des23.first_desc = 1;
1259 	priv->hw->desc->tx_desc_enable_tse(first_desc, 1, total_hdr_len,
1260 					   tcp_hdr_len,
1261 					   skb->len - total_hdr_len);
1262 }
1263 
1264 /**
1265  *  sxgbe_xmit: Tx entry point of the driver
1266  *  @skb : the socket buffer
1267  *  @dev : device pointer
1268  *  Description : this is the tx entry point of the driver.
1269  *  It programs the chain or the ring and supports oversized frames
1270  *  and SG feature.
1271  */
1272 static netdev_tx_t sxgbe_xmit(struct sk_buff *skb, struct net_device *dev)
1273 {
1274 	unsigned int entry, frag_num;
1275 	int cksum_flag = 0;
1276 	struct netdev_queue *dev_txq;
1277 	unsigned txq_index = skb_get_queue_mapping(skb);
1278 	struct sxgbe_priv_data *priv = netdev_priv(dev);
1279 	unsigned int tx_rsize = priv->dma_tx_size;
1280 	struct sxgbe_tx_queue *tqueue = priv->txq[txq_index];
1281 	struct sxgbe_tx_norm_desc *tx_desc, *first_desc;
1282 	struct sxgbe_tx_ctxt_desc *ctxt_desc = NULL;
1283 	int nr_frags = skb_shinfo(skb)->nr_frags;
1284 	int no_pagedlen = skb_headlen(skb);
1285 	int is_jumbo = 0;
1286 	u16 cur_mss = skb_shinfo(skb)->gso_size;
1287 	u32 ctxt_desc_req = 0;
1288 
1289 	/* get the TX queue handle */
1290 	dev_txq = netdev_get_tx_queue(dev, txq_index);
1291 
1292 	if (unlikely(skb_is_gso(skb) && tqueue->prev_mss != cur_mss))
1293 		ctxt_desc_req = 1;
1294 
1295 	if (unlikely(skb_vlan_tag_present(skb) ||
1296 		     ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
1297 		      tqueue->hwts_tx_en)))
1298 		ctxt_desc_req = 1;
1299 
1300 	if (priv->tx_path_in_lpi_mode)
1301 		sxgbe_disable_eee_mode(priv);
1302 
1303 	if (unlikely(sxgbe_tx_avail(tqueue, tx_rsize) < nr_frags + 1)) {
1304 		if (!netif_tx_queue_stopped(dev_txq)) {
1305 			netif_tx_stop_queue(dev_txq);
1306 			netdev_err(dev, "%s: Tx Ring is full when %d queue is awake\n",
1307 				   __func__, txq_index);
1308 		}
1309 		return NETDEV_TX_BUSY;
1310 	}
1311 
1312 	entry = tqueue->cur_tx % tx_rsize;
1313 	tx_desc = tqueue->dma_tx + entry;
1314 
1315 	first_desc = tx_desc;
1316 	if (ctxt_desc_req)
1317 		ctxt_desc = (struct sxgbe_tx_ctxt_desc *)first_desc;
1318 
1319 	/* save the skb address */
1320 	tqueue->tx_skbuff[entry] = skb;
1321 
1322 	if (!is_jumbo) {
1323 		if (likely(skb_is_gso(skb))) {
1324 			/* TSO support */
1325 			if (unlikely(tqueue->prev_mss != cur_mss)) {
1326 				priv->hw->desc->tx_ctxt_desc_set_mss(
1327 						ctxt_desc, cur_mss);
1328 				priv->hw->desc->tx_ctxt_desc_set_tcmssv(
1329 						ctxt_desc);
1330 				priv->hw->desc->tx_ctxt_desc_reset_ostc(
1331 						ctxt_desc);
1332 				priv->hw->desc->tx_ctxt_desc_set_ctxt(
1333 						ctxt_desc);
1334 				priv->hw->desc->tx_ctxt_desc_set_owner(
1335 						ctxt_desc);
1336 
1337 				entry = (++tqueue->cur_tx) % tx_rsize;
1338 				first_desc = tqueue->dma_tx + entry;
1339 
1340 				tqueue->prev_mss = cur_mss;
1341 			}
1342 			sxgbe_tso_prepare(priv, first_desc, skb);
1343 		} else {
1344 			tx_desc->tdes01 = dma_map_single(priv->device,
1345 							 skb->data, no_pagedlen, DMA_TO_DEVICE);
1346 			if (dma_mapping_error(priv->device, tx_desc->tdes01))
1347 				netdev_err(dev, "%s: TX dma mapping failed!!\n",
1348 					   __func__);
1349 
1350 			priv->hw->desc->prepare_tx_desc(tx_desc, 1, no_pagedlen,
1351 							no_pagedlen, cksum_flag);
1352 		}
1353 	}
1354 
1355 	for (frag_num = 0; frag_num < nr_frags; frag_num++) {
1356 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_num];
1357 		int len = skb_frag_size(frag);
1358 
1359 		entry = (++tqueue->cur_tx) % tx_rsize;
1360 		tx_desc = tqueue->dma_tx + entry;
1361 		tx_desc->tdes01 = skb_frag_dma_map(priv->device, frag, 0, len,
1362 						   DMA_TO_DEVICE);
1363 
1364 		tqueue->tx_skbuff_dma[entry] = tx_desc->tdes01;
1365 		tqueue->tx_skbuff[entry] = NULL;
1366 
1367 		/* prepare the descriptor */
1368 		priv->hw->desc->prepare_tx_desc(tx_desc, 0, len,
1369 						len, cksum_flag);
1370 		/* memory barrier to flush descriptor */
1371 		wmb();
1372 
1373 		/* set the owner */
1374 		priv->hw->desc->set_tx_owner(tx_desc);
1375 	}
1376 
1377 	/* close the descriptors */
1378 	priv->hw->desc->close_tx_desc(tx_desc);
1379 
1380 	/* memory barrier to flush descriptor */
1381 	wmb();
1382 
1383 	tqueue->tx_count_frames += nr_frags + 1;
1384 	if (tqueue->tx_count_frames > tqueue->tx_coal_frames) {
1385 		priv->hw->desc->clear_tx_ic(tx_desc);
1386 		priv->xstats.tx_reset_ic_bit++;
1387 		mod_timer(&tqueue->txtimer,
1388 			  SXGBE_COAL_TIMER(tqueue->tx_coal_timer));
1389 	} else {
1390 		tqueue->tx_count_frames = 0;
1391 	}
1392 
1393 	/* set owner for first desc */
1394 	priv->hw->desc->set_tx_owner(first_desc);
1395 
1396 	/* memory barrier to flush descriptor */
1397 	wmb();
1398 
1399 	tqueue->cur_tx++;
1400 
1401 	/* display current ring */
1402 	netif_dbg(priv, pktdata, dev, "%s: curr %d dirty=%d entry=%d, first=%p, nfrags=%d\n",
1403 		  __func__, tqueue->cur_tx % tx_rsize,
1404 		  tqueue->dirty_tx % tx_rsize, entry,
1405 		  first_desc, nr_frags);
1406 
1407 	if (unlikely(sxgbe_tx_avail(tqueue, tx_rsize) <= (MAX_SKB_FRAGS + 1))) {
1408 		netif_dbg(priv, hw, dev, "%s: stop transmitted packets\n",
1409 			  __func__);
1410 		netif_tx_stop_queue(dev_txq);
1411 	}
1412 
1413 	dev->stats.tx_bytes += skb->len;
1414 
1415 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
1416 		     tqueue->hwts_tx_en)) {
1417 		/* declare that device is doing timestamping */
1418 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1419 		priv->hw->desc->tx_enable_tstamp(first_desc);
1420 	}
1421 
1422 	skb_tx_timestamp(skb);
1423 
1424 	priv->hw->dma->enable_dma_transmission(priv->ioaddr, txq_index);
1425 
1426 	return NETDEV_TX_OK;
1427 }
1428 
1429 /**
1430  * sxgbe_rx_refill: refill used skb preallocated buffers
1431  * @priv: driver private structure
1432  * Description : this is to reallocate the skb for the reception process
1433  * that is based on zero-copy.
1434  */
1435 static void sxgbe_rx_refill(struct sxgbe_priv_data *priv)
1436 {
1437 	unsigned int rxsize = priv->dma_rx_size;
1438 	int bfsize = priv->dma_buf_sz;
1439 	u8 qnum = priv->cur_rx_qnum;
1440 
1441 	for (; priv->rxq[qnum]->cur_rx - priv->rxq[qnum]->dirty_rx > 0;
1442 	     priv->rxq[qnum]->dirty_rx++) {
1443 		unsigned int entry = priv->rxq[qnum]->dirty_rx % rxsize;
1444 		struct sxgbe_rx_norm_desc *p;
1445 
1446 		p = priv->rxq[qnum]->dma_rx + entry;
1447 
1448 		if (likely(priv->rxq[qnum]->rx_skbuff[entry] == NULL)) {
1449 			struct sk_buff *skb;
1450 
1451 			skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
1452 
1453 			if (unlikely(skb == NULL))
1454 				break;
1455 
1456 			priv->rxq[qnum]->rx_skbuff[entry] = skb;
1457 			priv->rxq[qnum]->rx_skbuff_dma[entry] =
1458 				dma_map_single(priv->device, skb->data, bfsize,
1459 					       DMA_FROM_DEVICE);
1460 
1461 			p->rdes23.rx_rd_des23.buf2_addr =
1462 				priv->rxq[qnum]->rx_skbuff_dma[entry];
1463 		}
1464 
1465 		/* Added memory barrier for RX descriptor modification */
1466 		wmb();
1467 		priv->hw->desc->set_rx_owner(p);
1468 		priv->hw->desc->set_rx_int_on_com(p);
1469 		/* Added memory barrier for RX descriptor modification */
1470 		wmb();
1471 	}
1472 }
1473 
1474 /**
1475  * sxgbe_rx: receive the frames from the remote host
1476  * @priv: driver private structure
1477  * @limit: napi bugget.
1478  * Description :  this the function called by the napi poll method.
1479  * It gets all the frames inside the ring.
1480  */
1481 static int sxgbe_rx(struct sxgbe_priv_data *priv, int limit)
1482 {
1483 	u8 qnum = priv->cur_rx_qnum;
1484 	unsigned int rxsize = priv->dma_rx_size;
1485 	unsigned int entry = priv->rxq[qnum]->cur_rx;
1486 	unsigned int next_entry = 0;
1487 	unsigned int count = 0;
1488 	int checksum;
1489 	int status;
1490 
1491 	while (count < limit) {
1492 		struct sxgbe_rx_norm_desc *p;
1493 		struct sk_buff *skb;
1494 		int frame_len;
1495 
1496 		p = priv->rxq[qnum]->dma_rx + entry;
1497 
1498 		if (priv->hw->desc->get_rx_owner(p))
1499 			break;
1500 
1501 		count++;
1502 
1503 		next_entry = (++priv->rxq[qnum]->cur_rx) % rxsize;
1504 		prefetch(priv->rxq[qnum]->dma_rx + next_entry);
1505 
1506 		/* Read the status of the incoming frame and also get checksum
1507 		 * value based on whether it is enabled in SXGBE hardware or
1508 		 * not.
1509 		 */
1510 		status = priv->hw->desc->rx_wbstatus(p, &priv->xstats,
1511 						     &checksum);
1512 		if (unlikely(status < 0)) {
1513 			entry = next_entry;
1514 			continue;
1515 		}
1516 		if (unlikely(!priv->rxcsum_insertion))
1517 			checksum = CHECKSUM_NONE;
1518 
1519 		skb = priv->rxq[qnum]->rx_skbuff[entry];
1520 
1521 		if (unlikely(!skb))
1522 			netdev_err(priv->dev, "rx descriptor is not consistent\n");
1523 
1524 		prefetch(skb->data - NET_IP_ALIGN);
1525 		priv->rxq[qnum]->rx_skbuff[entry] = NULL;
1526 
1527 		frame_len = priv->hw->desc->get_rx_frame_len(p);
1528 
1529 		skb_put(skb, frame_len);
1530 
1531 		skb->ip_summed = checksum;
1532 		if (checksum == CHECKSUM_NONE)
1533 			netif_receive_skb(skb);
1534 		else
1535 			napi_gro_receive(&priv->napi, skb);
1536 
1537 		entry = next_entry;
1538 	}
1539 
1540 	sxgbe_rx_refill(priv);
1541 
1542 	return count;
1543 }
1544 
1545 /**
1546  *  sxgbe_poll - sxgbe poll method (NAPI)
1547  *  @napi : pointer to the napi structure.
1548  *  @budget : maximum number of packets that the current CPU can receive from
1549  *	      all interfaces.
1550  *  Description :
1551  *  To look at the incoming frames and clear the tx resources.
1552  */
1553 static int sxgbe_poll(struct napi_struct *napi, int budget)
1554 {
1555 	struct sxgbe_priv_data *priv = container_of(napi,
1556 						    struct sxgbe_priv_data, napi);
1557 	int work_done = 0;
1558 	u8 qnum = priv->cur_rx_qnum;
1559 
1560 	priv->xstats.napi_poll++;
1561 	/* first, clean the tx queues */
1562 	sxgbe_tx_all_clean(priv);
1563 
1564 	work_done = sxgbe_rx(priv, budget);
1565 	if (work_done < budget) {
1566 		napi_complete_done(napi, work_done);
1567 		priv->hw->dma->enable_dma_irq(priv->ioaddr, qnum);
1568 	}
1569 
1570 	return work_done;
1571 }
1572 
1573 /**
1574  *  sxgbe_tx_timeout
1575  *  @dev : Pointer to net device structure
1576  *  @txqueue: index of the hanging queue
1577  *  Description: this function is called when a packet transmission fails to
1578  *   complete within a reasonable time. The driver will mark the error in the
1579  *   netdev structure and arrange for the device to be reset to a sane state
1580  *   in order to transmit a new packet.
1581  */
1582 static void sxgbe_tx_timeout(struct net_device *dev, unsigned int txqueue)
1583 {
1584 	struct sxgbe_priv_data *priv = netdev_priv(dev);
1585 
1586 	sxgbe_reset_all_tx_queues(priv);
1587 }
1588 
1589 /**
1590  *  sxgbe_common_interrupt - main ISR
1591  *  @irq: interrupt number.
1592  *  @dev_id: to pass the net device pointer.
1593  *  Description: this is the main driver interrupt service routine.
1594  *  It calls the DMA ISR and also the core ISR to manage PMT, MMC, LPI
1595  *  interrupts.
1596  */
1597 static irqreturn_t sxgbe_common_interrupt(int irq, void *dev_id)
1598 {
1599 	struct net_device *netdev = (struct net_device *)dev_id;
1600 	struct sxgbe_priv_data *priv = netdev_priv(netdev);
1601 	int status;
1602 
1603 	status = priv->hw->mac->host_irq_status(priv->ioaddr, &priv->xstats);
1604 	/* For LPI we need to save the tx status */
1605 	if (status & TX_ENTRY_LPI_MODE) {
1606 		priv->xstats.tx_lpi_entry_n++;
1607 		priv->tx_path_in_lpi_mode = true;
1608 	}
1609 	if (status & TX_EXIT_LPI_MODE) {
1610 		priv->xstats.tx_lpi_exit_n++;
1611 		priv->tx_path_in_lpi_mode = false;
1612 	}
1613 	if (status & RX_ENTRY_LPI_MODE)
1614 		priv->xstats.rx_lpi_entry_n++;
1615 	if (status & RX_EXIT_LPI_MODE)
1616 		priv->xstats.rx_lpi_exit_n++;
1617 
1618 	return IRQ_HANDLED;
1619 }
1620 
1621 /**
1622  *  sxgbe_tx_interrupt - TX DMA ISR
1623  *  @irq: interrupt number.
1624  *  @dev_id: to pass the net device pointer.
1625  *  Description: this is the tx dma interrupt service routine.
1626  */
1627 static irqreturn_t sxgbe_tx_interrupt(int irq, void *dev_id)
1628 {
1629 	int status;
1630 	struct sxgbe_tx_queue *txq = (struct sxgbe_tx_queue *)dev_id;
1631 	struct sxgbe_priv_data *priv = txq->priv_ptr;
1632 
1633 	/* get the channel status */
1634 	status = priv->hw->dma->tx_dma_int_status(priv->ioaddr, txq->queue_no,
1635 						  &priv->xstats);
1636 	/* check for normal path */
1637 	if (likely((status & handle_tx)))
1638 		napi_schedule(&priv->napi);
1639 
1640 	/* check for unrecoverable error */
1641 	if (unlikely((status & tx_hard_error)))
1642 		sxgbe_restart_tx_queue(priv, txq->queue_no);
1643 
1644 	/* check for TC configuration change */
1645 	if (unlikely((status & tx_bump_tc) &&
1646 		     (priv->tx_tc != SXGBE_MTL_SFMODE) &&
1647 		     (priv->tx_tc < 512))) {
1648 		/* step of TX TC is 32 till 128, otherwise 64 */
1649 		priv->tx_tc += (priv->tx_tc < 128) ? 32 : 64;
1650 		priv->hw->mtl->set_tx_mtl_mode(priv->ioaddr,
1651 					       txq->queue_no, priv->tx_tc);
1652 		priv->xstats.tx_threshold = priv->tx_tc;
1653 	}
1654 
1655 	return IRQ_HANDLED;
1656 }
1657 
1658 /**
1659  *  sxgbe_rx_interrupt - RX DMA ISR
1660  *  @irq: interrupt number.
1661  *  @dev_id: to pass the net device pointer.
1662  *  Description: this is the rx dma interrupt service routine.
1663  */
1664 static irqreturn_t sxgbe_rx_interrupt(int irq, void *dev_id)
1665 {
1666 	int status;
1667 	struct sxgbe_rx_queue *rxq = (struct sxgbe_rx_queue *)dev_id;
1668 	struct sxgbe_priv_data *priv = rxq->priv_ptr;
1669 
1670 	/* get the channel status */
1671 	status = priv->hw->dma->rx_dma_int_status(priv->ioaddr, rxq->queue_no,
1672 						  &priv->xstats);
1673 
1674 	if (likely((status & handle_rx) && (napi_schedule_prep(&priv->napi)))) {
1675 		priv->hw->dma->disable_dma_irq(priv->ioaddr, rxq->queue_no);
1676 		__napi_schedule(&priv->napi);
1677 	}
1678 
1679 	/* check for TC configuration change */
1680 	if (unlikely((status & rx_bump_tc) &&
1681 		     (priv->rx_tc != SXGBE_MTL_SFMODE) &&
1682 		     (priv->rx_tc < 128))) {
1683 		/* step of TC is 32 */
1684 		priv->rx_tc += 32;
1685 		priv->hw->mtl->set_rx_mtl_mode(priv->ioaddr,
1686 					       rxq->queue_no, priv->rx_tc);
1687 		priv->xstats.rx_threshold = priv->rx_tc;
1688 	}
1689 
1690 	return IRQ_HANDLED;
1691 }
1692 
1693 static inline u64 sxgbe_get_stat64(void __iomem *ioaddr, int reg_lo, int reg_hi)
1694 {
1695 	u64 val = readl(ioaddr + reg_lo);
1696 
1697 	val |= ((u64)readl(ioaddr + reg_hi)) << 32;
1698 
1699 	return val;
1700 }
1701 
1702 
1703 /*  sxgbe_get_stats64 - entry point to see statistical information of device
1704  *  @dev : device pointer.
1705  *  @stats : pointer to hold all the statistical information of device.
1706  *  Description:
1707  *  This function is a driver entry point whenever ifconfig command gets
1708  *  executed to see device statistics. Statistics are number of
1709  *  bytes sent or received, errors occurred etc.
1710  */
1711 static void sxgbe_get_stats64(struct net_device *dev,
1712 			      struct rtnl_link_stats64 *stats)
1713 {
1714 	struct sxgbe_priv_data *priv = netdev_priv(dev);
1715 	void __iomem *ioaddr = priv->ioaddr;
1716 	u64 count;
1717 
1718 	spin_lock(&priv->stats_lock);
1719 	/* Freeze the counter registers before reading value otherwise it may
1720 	 * get updated by hardware while we are reading them
1721 	 */
1722 	writel(SXGBE_MMC_CTRL_CNT_FRZ, ioaddr + SXGBE_MMC_CTL_REG);
1723 
1724 	stats->rx_bytes = sxgbe_get_stat64(ioaddr,
1725 					   SXGBE_MMC_RXOCTETLO_GCNT_REG,
1726 					   SXGBE_MMC_RXOCTETHI_GCNT_REG);
1727 
1728 	stats->rx_packets = sxgbe_get_stat64(ioaddr,
1729 					     SXGBE_MMC_RXFRAMELO_GBCNT_REG,
1730 					     SXGBE_MMC_RXFRAMEHI_GBCNT_REG);
1731 
1732 	stats->multicast = sxgbe_get_stat64(ioaddr,
1733 					    SXGBE_MMC_RXMULTILO_GCNT_REG,
1734 					    SXGBE_MMC_RXMULTIHI_GCNT_REG);
1735 
1736 	stats->rx_crc_errors = sxgbe_get_stat64(ioaddr,
1737 						SXGBE_MMC_RXCRCERRLO_REG,
1738 						SXGBE_MMC_RXCRCERRHI_REG);
1739 
1740 	stats->rx_length_errors = sxgbe_get_stat64(ioaddr,
1741 						  SXGBE_MMC_RXLENERRLO_REG,
1742 						  SXGBE_MMC_RXLENERRHI_REG);
1743 
1744 	stats->rx_missed_errors = sxgbe_get_stat64(ioaddr,
1745 						   SXGBE_MMC_RXFIFOOVERFLOWLO_GBCNT_REG,
1746 						   SXGBE_MMC_RXFIFOOVERFLOWHI_GBCNT_REG);
1747 
1748 	stats->tx_bytes = sxgbe_get_stat64(ioaddr,
1749 					   SXGBE_MMC_TXOCTETLO_GCNT_REG,
1750 					   SXGBE_MMC_TXOCTETHI_GCNT_REG);
1751 
1752 	count = sxgbe_get_stat64(ioaddr, SXGBE_MMC_TXFRAMELO_GBCNT_REG,
1753 				 SXGBE_MMC_TXFRAMEHI_GBCNT_REG);
1754 
1755 	stats->tx_errors = sxgbe_get_stat64(ioaddr, SXGBE_MMC_TXFRAMELO_GCNT_REG,
1756 					    SXGBE_MMC_TXFRAMEHI_GCNT_REG);
1757 	stats->tx_errors = count - stats->tx_errors;
1758 	stats->tx_packets = count;
1759 	stats->tx_fifo_errors = sxgbe_get_stat64(ioaddr, SXGBE_MMC_TXUFLWLO_GBCNT_REG,
1760 						 SXGBE_MMC_TXUFLWHI_GBCNT_REG);
1761 	writel(0, ioaddr + SXGBE_MMC_CTL_REG);
1762 	spin_unlock(&priv->stats_lock);
1763 }
1764 
1765 /*  sxgbe_set_features - entry point to set offload features of the device.
1766  *  @dev : device pointer.
1767  *  @features : features which are required to be set.
1768  *  Description:
1769  *  This function is a driver entry point and called by Linux kernel whenever
1770  *  any device features are set or reset by user.
1771  *  Return value:
1772  *  This function returns 0 after setting or resetting device features.
1773  */
1774 static int sxgbe_set_features(struct net_device *dev,
1775 			      netdev_features_t features)
1776 {
1777 	struct sxgbe_priv_data *priv = netdev_priv(dev);
1778 	netdev_features_t changed = dev->features ^ features;
1779 
1780 	if (changed & NETIF_F_RXCSUM) {
1781 		if (features & NETIF_F_RXCSUM) {
1782 			priv->hw->mac->enable_rx_csum(priv->ioaddr);
1783 			priv->rxcsum_insertion = true;
1784 		} else {
1785 			priv->hw->mac->disable_rx_csum(priv->ioaddr);
1786 			priv->rxcsum_insertion = false;
1787 		}
1788 	}
1789 
1790 	return 0;
1791 }
1792 
1793 /*  sxgbe_change_mtu - entry point to change MTU size for the device.
1794  *  @dev : device pointer.
1795  *  @new_mtu : the new MTU size for the device.
1796  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
1797  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
1798  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
1799  *  Return value:
1800  *  0 on success and an appropriate (-)ve integer as defined in errno.h
1801  *  file on failure.
1802  */
1803 static int sxgbe_change_mtu(struct net_device *dev, int new_mtu)
1804 {
1805 	dev->mtu = new_mtu;
1806 
1807 	if (!netif_running(dev))
1808 		return 0;
1809 
1810 	/* Recevice ring buffer size is needed to be set based on MTU. If MTU is
1811 	 * changed then reinitilisation of the receive ring buffers need to be
1812 	 * done. Hence bring interface down and bring interface back up
1813 	 */
1814 	sxgbe_release(dev);
1815 	return sxgbe_open(dev);
1816 }
1817 
1818 static void sxgbe_set_umac_addr(void __iomem *ioaddr, unsigned char *addr,
1819 				unsigned int reg_n)
1820 {
1821 	unsigned long data;
1822 
1823 	data = (addr[5] << 8) | addr[4];
1824 	/* For MAC Addr registers se have to set the Address Enable (AE)
1825 	 * bit that has no effect on the High Reg 0 where the bit 31 (MO)
1826 	 * is RO.
1827 	 */
1828 	writel(data | SXGBE_HI_REG_AE, ioaddr + SXGBE_ADDR_HIGH(reg_n));
1829 	data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
1830 	writel(data, ioaddr + SXGBE_ADDR_LOW(reg_n));
1831 }
1832 
1833 /**
1834  * sxgbe_set_rx_mode - entry point for setting different receive mode of
1835  * a device. unicast, multicast addressing
1836  * @dev : pointer to the device structure
1837  * Description:
1838  * This function is a driver entry point which gets called by the kernel
1839  * whenever different receive mode like unicast, multicast and promiscuous
1840  * must be enabled/disabled.
1841  * Return value:
1842  * void.
1843  */
1844 static void sxgbe_set_rx_mode(struct net_device *dev)
1845 {
1846 	struct sxgbe_priv_data *priv = netdev_priv(dev);
1847 	void __iomem *ioaddr = (void __iomem *)priv->ioaddr;
1848 	unsigned int value = 0;
1849 	u32 mc_filter[2];
1850 	struct netdev_hw_addr *ha;
1851 	int reg = 1;
1852 
1853 	netdev_dbg(dev, "%s: # mcasts %d, # unicast %d\n",
1854 		   __func__, netdev_mc_count(dev), netdev_uc_count(dev));
1855 
1856 	if (dev->flags & IFF_PROMISC) {
1857 		value = SXGBE_FRAME_FILTER_PR;
1858 
1859 	} else if ((netdev_mc_count(dev) > SXGBE_HASH_TABLE_SIZE) ||
1860 		   (dev->flags & IFF_ALLMULTI)) {
1861 		value = SXGBE_FRAME_FILTER_PM;	/* pass all multi */
1862 		writel(0xffffffff, ioaddr + SXGBE_HASH_HIGH);
1863 		writel(0xffffffff, ioaddr + SXGBE_HASH_LOW);
1864 
1865 	} else if (!netdev_mc_empty(dev)) {
1866 		/* Hash filter for multicast */
1867 		value = SXGBE_FRAME_FILTER_HMC;
1868 
1869 		memset(mc_filter, 0, sizeof(mc_filter));
1870 		netdev_for_each_mc_addr(ha, dev) {
1871 			/* The upper 6 bits of the calculated CRC are used to
1872 			 * index the contens of the hash table
1873 			 */
1874 			int bit_nr = bitrev32(~crc32_le(~0, ha->addr, 6)) >> 26;
1875 
1876 			/* The most significant bit determines the register to
1877 			 * use (H/L) while the other 5 bits determine the bit
1878 			 * within the register.
1879 			 */
1880 			mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
1881 		}
1882 		writel(mc_filter[0], ioaddr + SXGBE_HASH_LOW);
1883 		writel(mc_filter[1], ioaddr + SXGBE_HASH_HIGH);
1884 	}
1885 
1886 	/* Handle multiple unicast addresses (perfect filtering) */
1887 	if (netdev_uc_count(dev) > SXGBE_MAX_PERFECT_ADDRESSES)
1888 		/* Switch to promiscuous mode if more than 16 addrs
1889 		 * are required
1890 		 */
1891 		value |= SXGBE_FRAME_FILTER_PR;
1892 	else {
1893 		netdev_for_each_uc_addr(ha, dev) {
1894 			sxgbe_set_umac_addr(ioaddr, ha->addr, reg);
1895 			reg++;
1896 		}
1897 	}
1898 #ifdef FRAME_FILTER_DEBUG
1899 	/* Enable Receive all mode (to debug filtering_fail errors) */
1900 	value |= SXGBE_FRAME_FILTER_RA;
1901 #endif
1902 	writel(value, ioaddr + SXGBE_FRAME_FILTER);
1903 
1904 	netdev_dbg(dev, "Filter: 0x%08x\n\tHash: HI 0x%08x, LO 0x%08x\n",
1905 		   readl(ioaddr + SXGBE_FRAME_FILTER),
1906 		   readl(ioaddr + SXGBE_HASH_HIGH),
1907 		   readl(ioaddr + SXGBE_HASH_LOW));
1908 }
1909 
1910 #ifdef CONFIG_NET_POLL_CONTROLLER
1911 /**
1912  * sxgbe_poll_controller - entry point for polling receive by device
1913  * @dev : pointer to the device structure
1914  * Description:
1915  * This function is used by NETCONSOLE and other diagnostic tools
1916  * to allow network I/O with interrupts disabled.
1917  * Return value:
1918  * Void.
1919  */
1920 static void sxgbe_poll_controller(struct net_device *dev)
1921 {
1922 	struct sxgbe_priv_data *priv = netdev_priv(dev);
1923 
1924 	disable_irq(priv->irq);
1925 	sxgbe_rx_interrupt(priv->irq, dev);
1926 	enable_irq(priv->irq);
1927 }
1928 #endif
1929 
1930 /*  sxgbe_ioctl - Entry point for the Ioctl
1931  *  @dev: Device pointer.
1932  *  @rq: An IOCTL specefic structure, that can contain a pointer to
1933  *  a proprietary structure used to pass information to the driver.
1934  *  @cmd: IOCTL command
1935  *  Description:
1936  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
1937  */
1938 static int sxgbe_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1939 {
1940 	int ret = -EOPNOTSUPP;
1941 
1942 	if (!netif_running(dev))
1943 		return -EINVAL;
1944 
1945 	switch (cmd) {
1946 	case SIOCGMIIPHY:
1947 	case SIOCGMIIREG:
1948 	case SIOCSMIIREG:
1949 		ret = phy_do_ioctl(dev, rq, cmd);
1950 		break;
1951 	default:
1952 		break;
1953 	}
1954 
1955 	return ret;
1956 }
1957 
1958 static const struct net_device_ops sxgbe_netdev_ops = {
1959 	.ndo_open		= sxgbe_open,
1960 	.ndo_start_xmit		= sxgbe_xmit,
1961 	.ndo_stop		= sxgbe_release,
1962 	.ndo_get_stats64	= sxgbe_get_stats64,
1963 	.ndo_change_mtu		= sxgbe_change_mtu,
1964 	.ndo_set_features	= sxgbe_set_features,
1965 	.ndo_set_rx_mode	= sxgbe_set_rx_mode,
1966 	.ndo_tx_timeout		= sxgbe_tx_timeout,
1967 	.ndo_do_ioctl		= sxgbe_ioctl,
1968 #ifdef CONFIG_NET_POLL_CONTROLLER
1969 	.ndo_poll_controller	= sxgbe_poll_controller,
1970 #endif
1971 	.ndo_set_mac_address	= eth_mac_addr,
1972 };
1973 
1974 /* Get the hardware ops */
1975 static void sxgbe_get_ops(struct sxgbe_ops * const ops_ptr)
1976 {
1977 	ops_ptr->mac		= sxgbe_get_core_ops();
1978 	ops_ptr->desc		= sxgbe_get_desc_ops();
1979 	ops_ptr->dma		= sxgbe_get_dma_ops();
1980 	ops_ptr->mtl		= sxgbe_get_mtl_ops();
1981 
1982 	/* set the MDIO communication Address/Data regisers */
1983 	ops_ptr->mii.addr	= SXGBE_MDIO_SCMD_ADD_REG;
1984 	ops_ptr->mii.data	= SXGBE_MDIO_SCMD_DATA_REG;
1985 
1986 	/* Assigning the default link settings
1987 	 * no SXGBE defined default values to be set in registers,
1988 	 * so assigning as 0 for port and duplex
1989 	 */
1990 	ops_ptr->link.port	= 0;
1991 	ops_ptr->link.duplex	= 0;
1992 	ops_ptr->link.speed	= SXGBE_SPEED_10G;
1993 }
1994 
1995 /**
1996  *  sxgbe_hw_init - Init the GMAC device
1997  *  @priv: driver private structure
1998  *  Description: this function checks the HW capability
1999  *  (if supported) and sets the driver's features.
2000  */
2001 static int sxgbe_hw_init(struct sxgbe_priv_data * const priv)
2002 {
2003 	u32 ctrl_ids;
2004 
2005 	priv->hw = kmalloc(sizeof(*priv->hw), GFP_KERNEL);
2006 	if(!priv->hw)
2007 		return -ENOMEM;
2008 
2009 	/* get the hardware ops */
2010 	sxgbe_get_ops(priv->hw);
2011 
2012 	/* get the controller id */
2013 	ctrl_ids = priv->hw->mac->get_controller_version(priv->ioaddr);
2014 	priv->hw->ctrl_uid = (ctrl_ids & 0x00ff0000) >> 16;
2015 	priv->hw->ctrl_id = (ctrl_ids & 0x000000ff);
2016 	pr_info("user ID: 0x%x, Controller ID: 0x%x\n",
2017 		priv->hw->ctrl_uid, priv->hw->ctrl_id);
2018 
2019 	/* get the H/W features */
2020 	if (!sxgbe_get_hw_features(priv))
2021 		pr_info("Hardware features not found\n");
2022 
2023 	if (priv->hw_cap.tx_csum_offload)
2024 		pr_info("TX Checksum offload supported\n");
2025 
2026 	if (priv->hw_cap.rx_csum_offload)
2027 		pr_info("RX Checksum offload supported\n");
2028 
2029 	return 0;
2030 }
2031 
2032 static int sxgbe_sw_reset(void __iomem *addr)
2033 {
2034 	int retry_count = 10;
2035 
2036 	writel(SXGBE_DMA_SOFT_RESET, addr + SXGBE_DMA_MODE_REG);
2037 	while (retry_count--) {
2038 		if (!(readl(addr + SXGBE_DMA_MODE_REG) &
2039 		      SXGBE_DMA_SOFT_RESET))
2040 			break;
2041 		mdelay(10);
2042 	}
2043 
2044 	if (retry_count < 0)
2045 		return -EBUSY;
2046 
2047 	return 0;
2048 }
2049 
2050 /**
2051  * sxgbe_drv_probe
2052  * @device: device pointer
2053  * @plat_dat: platform data pointer
2054  * @addr: iobase memory address
2055  * Description: this is the main probe function used to
2056  * call the alloc_etherdev, allocate the priv structure.
2057  */
2058 struct sxgbe_priv_data *sxgbe_drv_probe(struct device *device,
2059 					struct sxgbe_plat_data *plat_dat,
2060 					void __iomem *addr)
2061 {
2062 	struct sxgbe_priv_data *priv;
2063 	struct net_device *ndev;
2064 	int ret;
2065 	u8 queue_num;
2066 
2067 	ndev = alloc_etherdev_mqs(sizeof(struct sxgbe_priv_data),
2068 				  SXGBE_TX_QUEUES, SXGBE_RX_QUEUES);
2069 	if (!ndev)
2070 		return NULL;
2071 
2072 	SET_NETDEV_DEV(ndev, device);
2073 
2074 	priv = netdev_priv(ndev);
2075 	priv->device = device;
2076 	priv->dev = ndev;
2077 
2078 	sxgbe_set_ethtool_ops(ndev);
2079 	priv->plat = plat_dat;
2080 	priv->ioaddr = addr;
2081 
2082 	ret = sxgbe_sw_reset(priv->ioaddr);
2083 	if (ret)
2084 		goto error_free_netdev;
2085 
2086 	/* Verify driver arguments */
2087 	sxgbe_verify_args();
2088 
2089 	/* Init MAC and get the capabilities */
2090 	ret = sxgbe_hw_init(priv);
2091 	if (ret)
2092 		goto error_free_netdev;
2093 
2094 	/* allocate memory resources for Descriptor rings */
2095 	ret = txring_mem_alloc(priv);
2096 	if (ret)
2097 		goto error_free_hw;
2098 
2099 	ret = rxring_mem_alloc(priv);
2100 	if (ret)
2101 		goto error_free_hw;
2102 
2103 	ndev->netdev_ops = &sxgbe_netdev_ops;
2104 
2105 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2106 		NETIF_F_RXCSUM | NETIF_F_TSO | NETIF_F_TSO6 |
2107 		NETIF_F_GRO;
2108 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
2109 	ndev->watchdog_timeo = msecs_to_jiffies(TX_TIMEO);
2110 
2111 	/* assign filtering support */
2112 	ndev->priv_flags |= IFF_UNICAST_FLT;
2113 
2114 	/* MTU range: 68 - 9000 */
2115 	ndev->min_mtu = MIN_MTU;
2116 	ndev->max_mtu = MAX_MTU;
2117 
2118 	priv->msg_enable = netif_msg_init(debug, default_msg_level);
2119 
2120 	/* Enable TCP segmentation offload for all DMA channels */
2121 	if (priv->hw_cap.tcpseg_offload) {
2122 		SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
2123 			priv->hw->dma->enable_tso(priv->ioaddr, queue_num);
2124 		}
2125 	}
2126 
2127 	/* Enable Rx checksum offload */
2128 	if (priv->hw_cap.rx_csum_offload) {
2129 		priv->hw->mac->enable_rx_csum(priv->ioaddr);
2130 		priv->rxcsum_insertion = true;
2131 	}
2132 
2133 	/* Initialise pause frame settings */
2134 	priv->rx_pause = 1;
2135 	priv->tx_pause = 1;
2136 
2137 	/* Rx Watchdog is available, enable depend on platform data */
2138 	if (!priv->plat->riwt_off) {
2139 		priv->use_riwt = 1;
2140 		pr_info("Enable RX Mitigation via HW Watchdog Timer\n");
2141 	}
2142 
2143 	netif_napi_add(ndev, &priv->napi, sxgbe_poll, 64);
2144 
2145 	spin_lock_init(&priv->stats_lock);
2146 
2147 	priv->sxgbe_clk = clk_get(priv->device, SXGBE_RESOURCE_NAME);
2148 	if (IS_ERR(priv->sxgbe_clk)) {
2149 		netdev_warn(ndev, "%s: warning: cannot get CSR clock\n",
2150 			    __func__);
2151 		goto error_napi_del;
2152 	}
2153 
2154 	/* If a specific clk_csr value is passed from the platform
2155 	 * this means that the CSR Clock Range selection cannot be
2156 	 * changed at run-time and it is fixed. Viceversa the driver'll try to
2157 	 * set the MDC clock dynamically according to the csr actual
2158 	 * clock input.
2159 	 */
2160 	if (!priv->plat->clk_csr)
2161 		sxgbe_clk_csr_set(priv);
2162 	else
2163 		priv->clk_csr = priv->plat->clk_csr;
2164 
2165 	/* MDIO bus Registration */
2166 	ret = sxgbe_mdio_register(ndev);
2167 	if (ret < 0) {
2168 		netdev_dbg(ndev, "%s: MDIO bus (id: %d) registration failed\n",
2169 			   __func__, priv->plat->bus_id);
2170 		goto error_clk_put;
2171 	}
2172 
2173 	ret = register_netdev(ndev);
2174 	if (ret) {
2175 		pr_err("%s: ERROR %i registering the device\n", __func__, ret);
2176 		goto error_mdio_unregister;
2177 	}
2178 
2179 	sxgbe_check_ether_addr(priv);
2180 
2181 	return priv;
2182 
2183 error_mdio_unregister:
2184 	sxgbe_mdio_unregister(ndev);
2185 error_clk_put:
2186 	clk_put(priv->sxgbe_clk);
2187 error_napi_del:
2188 	netif_napi_del(&priv->napi);
2189 error_free_hw:
2190 	kfree(priv->hw);
2191 error_free_netdev:
2192 	free_netdev(ndev);
2193 
2194 	return NULL;
2195 }
2196 
2197 /**
2198  * sxgbe_drv_remove
2199  * @ndev: net device pointer
2200  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
2201  * changes the link status, releases the DMA descriptor rings.
2202  */
2203 int sxgbe_drv_remove(struct net_device *ndev)
2204 {
2205 	struct sxgbe_priv_data *priv = netdev_priv(ndev);
2206 	u8 queue_num;
2207 
2208 	netdev_info(ndev, "%s: removing driver\n", __func__);
2209 
2210 	SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) {
2211 		priv->hw->mac->disable_rxqueue(priv->ioaddr, queue_num);
2212 	}
2213 
2214 	priv->hw->dma->stop_rx(priv->ioaddr, SXGBE_RX_QUEUES);
2215 	priv->hw->dma->stop_tx(priv->ioaddr, SXGBE_TX_QUEUES);
2216 
2217 	priv->hw->mac->enable_tx(priv->ioaddr, false);
2218 	priv->hw->mac->enable_rx(priv->ioaddr, false);
2219 
2220 	unregister_netdev(ndev);
2221 
2222 	sxgbe_mdio_unregister(ndev);
2223 
2224 	clk_put(priv->sxgbe_clk);
2225 
2226 	netif_napi_del(&priv->napi);
2227 
2228 	kfree(priv->hw);
2229 
2230 	free_netdev(ndev);
2231 
2232 	return 0;
2233 }
2234 
2235 #ifdef CONFIG_PM
2236 int sxgbe_suspend(struct net_device *ndev)
2237 {
2238 	return 0;
2239 }
2240 
2241 int sxgbe_resume(struct net_device *ndev)
2242 {
2243 	return 0;
2244 }
2245 
2246 int sxgbe_freeze(struct net_device *ndev)
2247 {
2248 	return -ENOSYS;
2249 }
2250 
2251 int sxgbe_restore(struct net_device *ndev)
2252 {
2253 	return -ENOSYS;
2254 }
2255 #endif /* CONFIG_PM */
2256 
2257 /* Driver is configured as Platform driver */
2258 static int __init sxgbe_init(void)
2259 {
2260 	int ret;
2261 
2262 	ret = sxgbe_register_platform();
2263 	if (ret)
2264 		goto err;
2265 	return 0;
2266 err:
2267 	pr_err("driver registration failed\n");
2268 	return ret;
2269 }
2270 
2271 static void __exit sxgbe_exit(void)
2272 {
2273 	sxgbe_unregister_platform();
2274 }
2275 
2276 module_init(sxgbe_init);
2277 module_exit(sxgbe_exit);
2278 
2279 #ifndef MODULE
2280 static int __init sxgbe_cmdline_opt(char *str)
2281 {
2282 	char *opt;
2283 
2284 	if (!str || !*str)
2285 		return -EINVAL;
2286 	while ((opt = strsep(&str, ",")) != NULL) {
2287 		if (!strncmp(opt, "eee_timer:", 10)) {
2288 			if (kstrtoint(opt + 10, 0, &eee_timer))
2289 				goto err;
2290 		}
2291 	}
2292 	return 0;
2293 
2294 err:
2295 	pr_err("%s: ERROR broken module parameter conversion\n", __func__);
2296 	return -EINVAL;
2297 }
2298 
2299 __setup("sxgbeeth=", sxgbe_cmdline_opt);
2300 #endif /* MODULE */
2301 
2302 
2303 
2304 MODULE_DESCRIPTION("Samsung 10G/2.5G/1G Ethernet PLATFORM driver");
2305 
2306 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
2307 MODULE_PARM_DESC(eee_timer, "EEE-LPI Default LS timer value");
2308 
2309 MODULE_AUTHOR("Siva Reddy Kallam <siva.kallam@samsung.com>");
2310 MODULE_AUTHOR("ByungHo An <bh74.an@samsung.com>");
2311 MODULE_AUTHOR("Girish K S <ks.giri@samsung.com>");
2312 MODULE_AUTHOR("Vipul Pandya <vipul.pandya@samsung.com>");
2313 
2314 MODULE_LICENSE("GPL");
2315