14fa9c49fSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
27ac6653aSJeff Kirsher /*******************************************************************************
37ac6653aSJeff Kirsher   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
47ac6653aSJeff Kirsher   ST Ethernet IPs are built around a Synopsys IP Core.
57ac6653aSJeff Kirsher 
6286a8372SGiuseppe CAVALLARO 	Copyright(C) 2007-2011 STMicroelectronics Ltd
77ac6653aSJeff Kirsher 
87ac6653aSJeff Kirsher 
97ac6653aSJeff Kirsher   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
107ac6653aSJeff Kirsher 
117ac6653aSJeff Kirsher   Documentation available at:
127ac6653aSJeff Kirsher 	http://www.stlinux.com
137ac6653aSJeff Kirsher   Support available at:
147ac6653aSJeff Kirsher 	https://bugzilla.stlinux.com/
157ac6653aSJeff Kirsher *******************************************************************************/
167ac6653aSJeff Kirsher 
176a81c26fSViresh Kumar #include <linux/clk.h>
187ac6653aSJeff Kirsher #include <linux/kernel.h>
197ac6653aSJeff Kirsher #include <linux/interrupt.h>
207ac6653aSJeff Kirsher #include <linux/ip.h>
217ac6653aSJeff Kirsher #include <linux/tcp.h>
227ac6653aSJeff Kirsher #include <linux/skbuff.h>
237ac6653aSJeff Kirsher #include <linux/ethtool.h>
247ac6653aSJeff Kirsher #include <linux/if_ether.h>
257ac6653aSJeff Kirsher #include <linux/crc32.h>
267ac6653aSJeff Kirsher #include <linux/mii.h>
2701789349SJiri Pirko #include <linux/if.h>
287ac6653aSJeff Kirsher #include <linux/if_vlan.h>
297ac6653aSJeff Kirsher #include <linux/dma-mapping.h>
307ac6653aSJeff Kirsher #include <linux/slab.h>
317ac6653aSJeff Kirsher #include <linux/prefetch.h>
32db88f10aSSrinivas Kandagatla #include <linux/pinctrl/consumer.h>
3350fb4f74SGiuseppe CAVALLARO #ifdef CONFIG_DEBUG_FS
347ac29055SGiuseppe CAVALLARO #include <linux/debugfs.h>
357ac29055SGiuseppe CAVALLARO #include <linux/seq_file.h>
3650fb4f74SGiuseppe CAVALLARO #endif /* CONFIG_DEBUG_FS */
37891434b1SRayagond Kokatanur #include <linux/net_tstamp.h>
38eeef2f6bSJose Abreu #include <linux/phylink.h>
394dbbe8ddSJose Abreu #include <net/pkt_cls.h>
40891434b1SRayagond Kokatanur #include "stmmac_ptp.h"
41286a8372SGiuseppe CAVALLARO #include "stmmac.h"
42c5e4ddbdSChen-Yu Tsai #include <linux/reset.h>
435790cf3cSMathieu Olivari #include <linux/of_mdio.h>
4419d857c9SPhil Reid #include "dwmac1000.h"
457d9e6c5aSJose Abreu #include "dwxgmac2.h"
4642de047dSJose Abreu #include "hwif.h"
477ac6653aSJeff Kirsher 
489939a46dSEugeniy Paltsev #define	STMMAC_ALIGN(x)		__ALIGN_KERNEL(x, SMP_CACHE_BYTES)
49f748be53SAlexandre TORGUE #define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
507ac6653aSJeff Kirsher 
517ac6653aSJeff Kirsher /* Module parameters */
5232ceabcaSGiuseppe CAVALLARO #define TX_TIMEO	5000
537ac6653aSJeff Kirsher static int watchdog = TX_TIMEO;
54d3757ba4SJoe Perches module_param(watchdog, int, 0644);
5532ceabcaSGiuseppe CAVALLARO MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
567ac6653aSJeff Kirsher 
5732ceabcaSGiuseppe CAVALLARO static int debug = -1;
58d3757ba4SJoe Perches module_param(debug, int, 0644);
5932ceabcaSGiuseppe CAVALLARO MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
607ac6653aSJeff Kirsher 
6147d1f71fSstephen hemminger static int phyaddr = -1;
62d3757ba4SJoe Perches module_param(phyaddr, int, 0444);
637ac6653aSJeff Kirsher MODULE_PARM_DESC(phyaddr, "Physical device address");
647ac6653aSJeff Kirsher 
65e3ad57c9SGiuseppe Cavallaro #define STMMAC_TX_THRESH	(DMA_TX_SIZE / 4)
66120e87f9SGiuseppe Cavallaro #define STMMAC_RX_THRESH	(DMA_RX_SIZE / 4)
677ac6653aSJeff Kirsher 
68e9989339SJose Abreu static int flow_ctrl = FLOW_AUTO;
69d3757ba4SJoe Perches module_param(flow_ctrl, int, 0644);
707ac6653aSJeff Kirsher MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
717ac6653aSJeff Kirsher 
727ac6653aSJeff Kirsher static int pause = PAUSE_TIME;
73d3757ba4SJoe Perches module_param(pause, int, 0644);
747ac6653aSJeff Kirsher MODULE_PARM_DESC(pause, "Flow Control Pause Time");
757ac6653aSJeff Kirsher 
767ac6653aSJeff Kirsher #define TC_DEFAULT 64
777ac6653aSJeff Kirsher static int tc = TC_DEFAULT;
78d3757ba4SJoe Perches module_param(tc, int, 0644);
797ac6653aSJeff Kirsher MODULE_PARM_DESC(tc, "DMA threshold control value");
807ac6653aSJeff Kirsher 
81d916701cSGiuseppe CAVALLARO #define	DEFAULT_BUFSIZE	1536
82d916701cSGiuseppe CAVALLARO static int buf_sz = DEFAULT_BUFSIZE;
83d3757ba4SJoe Perches module_param(buf_sz, int, 0644);
847ac6653aSJeff Kirsher MODULE_PARM_DESC(buf_sz, "DMA buffer size");
857ac6653aSJeff Kirsher 
8622ad3838SGiuseppe Cavallaro #define	STMMAC_RX_COPYBREAK	256
8722ad3838SGiuseppe Cavallaro 
887ac6653aSJeff Kirsher static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
897ac6653aSJeff Kirsher 				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
907ac6653aSJeff Kirsher 				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
917ac6653aSJeff Kirsher 
92d765955dSGiuseppe CAVALLARO #define STMMAC_DEFAULT_LPI_TIMER	1000
93d765955dSGiuseppe CAVALLARO static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
94d3757ba4SJoe Perches module_param(eee_timer, int, 0644);
95d765955dSGiuseppe CAVALLARO MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
96f5351ef7SGiuseppe CAVALLARO #define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
97d765955dSGiuseppe CAVALLARO 
9822d3efe5SPavel Machek /* By default the driver will use the ring mode to manage tx and rx descriptors,
9922d3efe5SPavel Machek  * but allow user to force to use the chain instead of the ring
1004a7d666aSGiuseppe CAVALLARO  */
1014a7d666aSGiuseppe CAVALLARO static unsigned int chain_mode;
102d3757ba4SJoe Perches module_param(chain_mode, int, 0444);
1034a7d666aSGiuseppe CAVALLARO MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
1044a7d666aSGiuseppe CAVALLARO 
1057ac6653aSJeff Kirsher static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
1067ac6653aSJeff Kirsher 
10750fb4f74SGiuseppe CAVALLARO #ifdef CONFIG_DEBUG_FS
1088d72ab11SGreg Kroah-Hartman static void stmmac_init_fs(struct net_device *dev);
109466c5ac8SMathieu Olivari static void stmmac_exit_fs(struct net_device *dev);
110bfab27a1SGiuseppe CAVALLARO #endif
111bfab27a1SGiuseppe CAVALLARO 
1129125cdd1SGiuseppe CAVALLARO #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
1139125cdd1SGiuseppe CAVALLARO 
1147ac6653aSJeff Kirsher /**
1157ac6653aSJeff Kirsher  * stmmac_verify_args - verify the driver parameters.
116732fdf0eSGiuseppe CAVALLARO  * Description: it checks the driver parameters and set a default in case of
117732fdf0eSGiuseppe CAVALLARO  * errors.
1187ac6653aSJeff Kirsher  */
1197ac6653aSJeff Kirsher static void stmmac_verify_args(void)
1207ac6653aSJeff Kirsher {
1217ac6653aSJeff Kirsher 	if (unlikely(watchdog < 0))
1227ac6653aSJeff Kirsher 		watchdog = TX_TIMEO;
123d916701cSGiuseppe CAVALLARO 	if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
124d916701cSGiuseppe CAVALLARO 		buf_sz = DEFAULT_BUFSIZE;
1257ac6653aSJeff Kirsher 	if (unlikely(flow_ctrl > 1))
1267ac6653aSJeff Kirsher 		flow_ctrl = FLOW_AUTO;
1277ac6653aSJeff Kirsher 	else if (likely(flow_ctrl < 0))
1287ac6653aSJeff Kirsher 		flow_ctrl = FLOW_OFF;
1297ac6653aSJeff Kirsher 	if (unlikely((pause < 0) || (pause > 0xffff)))
1307ac6653aSJeff Kirsher 		pause = PAUSE_TIME;
131d765955dSGiuseppe CAVALLARO 	if (eee_timer < 0)
132d765955dSGiuseppe CAVALLARO 		eee_timer = STMMAC_DEFAULT_LPI_TIMER;
1337ac6653aSJeff Kirsher }
1347ac6653aSJeff Kirsher 
13532ceabcaSGiuseppe CAVALLARO /**
136c22a3f48SJoao Pinto  * stmmac_disable_all_queues - Disable all queues
137c22a3f48SJoao Pinto  * @priv: driver private structure
138c22a3f48SJoao Pinto  */
139c22a3f48SJoao Pinto static void stmmac_disable_all_queues(struct stmmac_priv *priv)
140c22a3f48SJoao Pinto {
141c22a3f48SJoao Pinto 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
1428fce3331SJose Abreu 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
1438fce3331SJose Abreu 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
144c22a3f48SJoao Pinto 	u32 queue;
145c22a3f48SJoao Pinto 
1468fce3331SJose Abreu 	for (queue = 0; queue < maxq; queue++) {
1478fce3331SJose Abreu 		struct stmmac_channel *ch = &priv->channel[queue];
148c22a3f48SJoao Pinto 
1494ccb4585SJose Abreu 		if (queue < rx_queues_cnt)
1504ccb4585SJose Abreu 			napi_disable(&ch->rx_napi);
1514ccb4585SJose Abreu 		if (queue < tx_queues_cnt)
1524ccb4585SJose Abreu 			napi_disable(&ch->tx_napi);
153c22a3f48SJoao Pinto 	}
154c22a3f48SJoao Pinto }
155c22a3f48SJoao Pinto 
156c22a3f48SJoao Pinto /**
157c22a3f48SJoao Pinto  * stmmac_enable_all_queues - Enable all queues
158c22a3f48SJoao Pinto  * @priv: driver private structure
159c22a3f48SJoao Pinto  */
160c22a3f48SJoao Pinto static void stmmac_enable_all_queues(struct stmmac_priv *priv)
161c22a3f48SJoao Pinto {
162c22a3f48SJoao Pinto 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
1638fce3331SJose Abreu 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
1648fce3331SJose Abreu 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
165c22a3f48SJoao Pinto 	u32 queue;
166c22a3f48SJoao Pinto 
1678fce3331SJose Abreu 	for (queue = 0; queue < maxq; queue++) {
1688fce3331SJose Abreu 		struct stmmac_channel *ch = &priv->channel[queue];
169c22a3f48SJoao Pinto 
1704ccb4585SJose Abreu 		if (queue < rx_queues_cnt)
1714ccb4585SJose Abreu 			napi_enable(&ch->rx_napi);
1724ccb4585SJose Abreu 		if (queue < tx_queues_cnt)
1734ccb4585SJose Abreu 			napi_enable(&ch->tx_napi);
174c22a3f48SJoao Pinto 	}
175c22a3f48SJoao Pinto }
176c22a3f48SJoao Pinto 
177c22a3f48SJoao Pinto /**
178c22a3f48SJoao Pinto  * stmmac_stop_all_queues - Stop all queues
179c22a3f48SJoao Pinto  * @priv: driver private structure
180c22a3f48SJoao Pinto  */
181c22a3f48SJoao Pinto static void stmmac_stop_all_queues(struct stmmac_priv *priv)
182c22a3f48SJoao Pinto {
183c22a3f48SJoao Pinto 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
184c22a3f48SJoao Pinto 	u32 queue;
185c22a3f48SJoao Pinto 
186c22a3f48SJoao Pinto 	for (queue = 0; queue < tx_queues_cnt; queue++)
187c22a3f48SJoao Pinto 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
188c22a3f48SJoao Pinto }
189c22a3f48SJoao Pinto 
190c22a3f48SJoao Pinto /**
191c22a3f48SJoao Pinto  * stmmac_start_all_queues - Start all queues
192c22a3f48SJoao Pinto  * @priv: driver private structure
193c22a3f48SJoao Pinto  */
194c22a3f48SJoao Pinto static void stmmac_start_all_queues(struct stmmac_priv *priv)
195c22a3f48SJoao Pinto {
196c22a3f48SJoao Pinto 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
197c22a3f48SJoao Pinto 	u32 queue;
198c22a3f48SJoao Pinto 
199c22a3f48SJoao Pinto 	for (queue = 0; queue < tx_queues_cnt; queue++)
200c22a3f48SJoao Pinto 		netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue));
201c22a3f48SJoao Pinto }
202c22a3f48SJoao Pinto 
20334877a15SJose Abreu static void stmmac_service_event_schedule(struct stmmac_priv *priv)
20434877a15SJose Abreu {
20534877a15SJose Abreu 	if (!test_bit(STMMAC_DOWN, &priv->state) &&
20634877a15SJose Abreu 	    !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
20734877a15SJose Abreu 		queue_work(priv->wq, &priv->service_task);
20834877a15SJose Abreu }
20934877a15SJose Abreu 
21034877a15SJose Abreu static void stmmac_global_err(struct stmmac_priv *priv)
21134877a15SJose Abreu {
21234877a15SJose Abreu 	netif_carrier_off(priv->dev);
21334877a15SJose Abreu 	set_bit(STMMAC_RESET_REQUESTED, &priv->state);
21434877a15SJose Abreu 	stmmac_service_event_schedule(priv);
21534877a15SJose Abreu }
21634877a15SJose Abreu 
217c22a3f48SJoao Pinto /**
21832ceabcaSGiuseppe CAVALLARO  * stmmac_clk_csr_set - dynamically set the MDC clock
21932ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
22032ceabcaSGiuseppe CAVALLARO  * Description: this is to dynamically set the MDC clock according to the csr
22132ceabcaSGiuseppe CAVALLARO  * clock input.
22232ceabcaSGiuseppe CAVALLARO  * Note:
22332ceabcaSGiuseppe CAVALLARO  *	If a specific clk_csr value is passed from the platform
22432ceabcaSGiuseppe CAVALLARO  *	this means that the CSR Clock Range selection cannot be
22532ceabcaSGiuseppe CAVALLARO  *	changed at run-time and it is fixed (as reported in the driver
22632ceabcaSGiuseppe CAVALLARO  *	documentation). Viceversa the driver will try to set the MDC
22732ceabcaSGiuseppe CAVALLARO  *	clock dynamically according to the actual clock input.
22832ceabcaSGiuseppe CAVALLARO  */
229cd7201f4SGiuseppe CAVALLARO static void stmmac_clk_csr_set(struct stmmac_priv *priv)
230cd7201f4SGiuseppe CAVALLARO {
231cd7201f4SGiuseppe CAVALLARO 	u32 clk_rate;
232cd7201f4SGiuseppe CAVALLARO 
233f573c0b9Sjpinto 	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
234cd7201f4SGiuseppe CAVALLARO 
235cd7201f4SGiuseppe CAVALLARO 	/* Platform provided default clk_csr would be assumed valid
236ceb69499SGiuseppe CAVALLARO 	 * for all other cases except for the below mentioned ones.
237ceb69499SGiuseppe CAVALLARO 	 * For values higher than the IEEE 802.3 specified frequency
238ceb69499SGiuseppe CAVALLARO 	 * we can not estimate the proper divider as it is not known
239ceb69499SGiuseppe CAVALLARO 	 * the frequency of clk_csr_i. So we do not change the default
240ceb69499SGiuseppe CAVALLARO 	 * divider.
241ceb69499SGiuseppe CAVALLARO 	 */
242cd7201f4SGiuseppe CAVALLARO 	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
243cd7201f4SGiuseppe CAVALLARO 		if (clk_rate < CSR_F_35M)
244cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_20_35M;
245cd7201f4SGiuseppe CAVALLARO 		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
246cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_35_60M;
247cd7201f4SGiuseppe CAVALLARO 		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
248cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_60_100M;
249cd7201f4SGiuseppe CAVALLARO 		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
250cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_100_150M;
251cd7201f4SGiuseppe CAVALLARO 		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
252cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_150_250M;
25319d857c9SPhil Reid 		else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
254cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_250_300M;
255ceb69499SGiuseppe CAVALLARO 	}
2569f93ac8dSLABBE Corentin 
2579f93ac8dSLABBE Corentin 	if (priv->plat->has_sun8i) {
2589f93ac8dSLABBE Corentin 		if (clk_rate > 160000000)
2599f93ac8dSLABBE Corentin 			priv->clk_csr = 0x03;
2609f93ac8dSLABBE Corentin 		else if (clk_rate > 80000000)
2619f93ac8dSLABBE Corentin 			priv->clk_csr = 0x02;
2629f93ac8dSLABBE Corentin 		else if (clk_rate > 40000000)
2639f93ac8dSLABBE Corentin 			priv->clk_csr = 0x01;
2649f93ac8dSLABBE Corentin 		else
2659f93ac8dSLABBE Corentin 			priv->clk_csr = 0;
2669f93ac8dSLABBE Corentin 	}
2677d9e6c5aSJose Abreu 
2687d9e6c5aSJose Abreu 	if (priv->plat->has_xgmac) {
2697d9e6c5aSJose Abreu 		if (clk_rate > 400000000)
2707d9e6c5aSJose Abreu 			priv->clk_csr = 0x5;
2717d9e6c5aSJose Abreu 		else if (clk_rate > 350000000)
2727d9e6c5aSJose Abreu 			priv->clk_csr = 0x4;
2737d9e6c5aSJose Abreu 		else if (clk_rate > 300000000)
2747d9e6c5aSJose Abreu 			priv->clk_csr = 0x3;
2757d9e6c5aSJose Abreu 		else if (clk_rate > 250000000)
2767d9e6c5aSJose Abreu 			priv->clk_csr = 0x2;
2777d9e6c5aSJose Abreu 		else if (clk_rate > 150000000)
2787d9e6c5aSJose Abreu 			priv->clk_csr = 0x1;
2797d9e6c5aSJose Abreu 		else
2807d9e6c5aSJose Abreu 			priv->clk_csr = 0x0;
2817d9e6c5aSJose Abreu 	}
282cd7201f4SGiuseppe CAVALLARO }
283cd7201f4SGiuseppe CAVALLARO 
2847ac6653aSJeff Kirsher static void print_pkt(unsigned char *buf, int len)
2857ac6653aSJeff Kirsher {
286424c4f78SAndy Shevchenko 	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
287424c4f78SAndy Shevchenko 	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
2887ac6653aSJeff Kirsher }
2897ac6653aSJeff Kirsher 
290ce736788SJoao Pinto static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
2917ac6653aSJeff Kirsher {
292ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
293a6a3e026SLABBE Corentin 	u32 avail;
294e3ad57c9SGiuseppe Cavallaro 
295ce736788SJoao Pinto 	if (tx_q->dirty_tx > tx_q->cur_tx)
296ce736788SJoao Pinto 		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
297e3ad57c9SGiuseppe Cavallaro 	else
298ce736788SJoao Pinto 		avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1;
299e3ad57c9SGiuseppe Cavallaro 
300e3ad57c9SGiuseppe Cavallaro 	return avail;
301e3ad57c9SGiuseppe Cavallaro }
302e3ad57c9SGiuseppe Cavallaro 
30354139cf3SJoao Pinto /**
30454139cf3SJoao Pinto  * stmmac_rx_dirty - Get RX queue dirty
30554139cf3SJoao Pinto  * @priv: driver private structure
30654139cf3SJoao Pinto  * @queue: RX queue index
30754139cf3SJoao Pinto  */
30854139cf3SJoao Pinto static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
309e3ad57c9SGiuseppe Cavallaro {
31054139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
311a6a3e026SLABBE Corentin 	u32 dirty;
312e3ad57c9SGiuseppe Cavallaro 
31354139cf3SJoao Pinto 	if (rx_q->dirty_rx <= rx_q->cur_rx)
31454139cf3SJoao Pinto 		dirty = rx_q->cur_rx - rx_q->dirty_rx;
315e3ad57c9SGiuseppe Cavallaro 	else
31654139cf3SJoao Pinto 		dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx;
317e3ad57c9SGiuseppe Cavallaro 
318e3ad57c9SGiuseppe Cavallaro 	return dirty;
3197ac6653aSJeff Kirsher }
3207ac6653aSJeff Kirsher 
32132ceabcaSGiuseppe CAVALLARO /**
322732fdf0eSGiuseppe CAVALLARO  * stmmac_enable_eee_mode - check and enter in LPI mode
32332ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
324732fdf0eSGiuseppe CAVALLARO  * Description: this function is to verify and enter in LPI mode in case of
325732fdf0eSGiuseppe CAVALLARO  * EEE.
32632ceabcaSGiuseppe CAVALLARO  */
327d765955dSGiuseppe CAVALLARO static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
328d765955dSGiuseppe CAVALLARO {
329ce736788SJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
330ce736788SJoao Pinto 	u32 queue;
331ce736788SJoao Pinto 
332ce736788SJoao Pinto 	/* check if all TX queues have the work finished */
333ce736788SJoao Pinto 	for (queue = 0; queue < tx_cnt; queue++) {
334ce736788SJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
335ce736788SJoao Pinto 
336ce736788SJoao Pinto 		if (tx_q->dirty_tx != tx_q->cur_tx)
337ce736788SJoao Pinto 			return; /* still unfinished work */
338ce736788SJoao Pinto 	}
339ce736788SJoao Pinto 
340d765955dSGiuseppe CAVALLARO 	/* Check and enter in LPI mode */
341ce736788SJoao Pinto 	if (!priv->tx_path_in_lpi_mode)
342c10d4c82SJose Abreu 		stmmac_set_eee_mode(priv, priv->hw,
343b4b7b772Sjpinto 				priv->plat->en_tx_lpi_clockgating);
344d765955dSGiuseppe CAVALLARO }
345d765955dSGiuseppe CAVALLARO 
34632ceabcaSGiuseppe CAVALLARO /**
347732fdf0eSGiuseppe CAVALLARO  * stmmac_disable_eee_mode - disable and exit from LPI mode
34832ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
34932ceabcaSGiuseppe CAVALLARO  * Description: this function is to exit and disable EEE in case of
35032ceabcaSGiuseppe CAVALLARO  * LPI state is true. This is called by the xmit.
35132ceabcaSGiuseppe CAVALLARO  */
352d765955dSGiuseppe CAVALLARO void stmmac_disable_eee_mode(struct stmmac_priv *priv)
353d765955dSGiuseppe CAVALLARO {
354c10d4c82SJose Abreu 	stmmac_reset_eee_mode(priv, priv->hw);
355d765955dSGiuseppe CAVALLARO 	del_timer_sync(&priv->eee_ctrl_timer);
356d765955dSGiuseppe CAVALLARO 	priv->tx_path_in_lpi_mode = false;
357d765955dSGiuseppe CAVALLARO }
358d765955dSGiuseppe CAVALLARO 
359d765955dSGiuseppe CAVALLARO /**
360732fdf0eSGiuseppe CAVALLARO  * stmmac_eee_ctrl_timer - EEE TX SW timer.
361d765955dSGiuseppe CAVALLARO  * @arg : data hook
362d765955dSGiuseppe CAVALLARO  * Description:
36332ceabcaSGiuseppe CAVALLARO  *  if there is no data transfer and if we are not in LPI state,
364d765955dSGiuseppe CAVALLARO  *  then MAC Transmitter can be moved to LPI state.
365d765955dSGiuseppe CAVALLARO  */
366e99e88a9SKees Cook static void stmmac_eee_ctrl_timer(struct timer_list *t)
367d765955dSGiuseppe CAVALLARO {
368e99e88a9SKees Cook 	struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
369d765955dSGiuseppe CAVALLARO 
370d765955dSGiuseppe CAVALLARO 	stmmac_enable_eee_mode(priv);
371f5351ef7SGiuseppe CAVALLARO 	mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
372d765955dSGiuseppe CAVALLARO }
373d765955dSGiuseppe CAVALLARO 
374d765955dSGiuseppe CAVALLARO /**
375732fdf0eSGiuseppe CAVALLARO  * stmmac_eee_init - init EEE
37632ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
377d765955dSGiuseppe CAVALLARO  * Description:
378732fdf0eSGiuseppe CAVALLARO  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
379732fdf0eSGiuseppe CAVALLARO  *  can also manage EEE, this function enable the LPI state and start related
380732fdf0eSGiuseppe CAVALLARO  *  timer.
381d765955dSGiuseppe CAVALLARO  */
382d765955dSGiuseppe CAVALLARO bool stmmac_eee_init(struct stmmac_priv *priv)
383d765955dSGiuseppe CAVALLARO {
38474371272SJose Abreu 	int tx_lpi_timer = priv->tx_lpi_timer;
385879626e3SJerome Brunet 
386f5351ef7SGiuseppe CAVALLARO 	/* Using PCS we cannot dial with the phy registers at this stage
387f5351ef7SGiuseppe CAVALLARO 	 * so we do not support extra feature like EEE.
388f5351ef7SGiuseppe CAVALLARO 	 */
3893fe5cadbSGiuseppe CAVALLARO 	if ((priv->hw->pcs == STMMAC_PCS_RGMII) ||
3903fe5cadbSGiuseppe CAVALLARO 	    (priv->hw->pcs == STMMAC_PCS_TBI) ||
3913fe5cadbSGiuseppe CAVALLARO 	    (priv->hw->pcs == STMMAC_PCS_RTBI))
39274371272SJose Abreu 		return false;
393f5351ef7SGiuseppe CAVALLARO 
39474371272SJose Abreu 	/* Check if MAC core supports the EEE feature. */
39574371272SJose Abreu 	if (!priv->dma_cap.eee)
39674371272SJose Abreu 		return false;
397d765955dSGiuseppe CAVALLARO 
39829555fa3SThierry Reding 	mutex_lock(&priv->lock);
39974371272SJose Abreu 
40074371272SJose Abreu 	/* Check if it needs to be deactivated */
401177d935aSJon Hunter 	if (!priv->eee_active) {
402177d935aSJon Hunter 		if (priv->eee_enabled) {
40338ddc59dSLABBE Corentin 			netdev_dbg(priv->dev, "disable EEE\n");
40483bf79b6SGiuseppe CAVALLARO 			del_timer_sync(&priv->eee_ctrl_timer);
40574371272SJose Abreu 			stmmac_set_eee_timer(priv, priv->hw, 0, tx_lpi_timer);
406177d935aSJon Hunter 		}
4070867bb97SJon Hunter 		mutex_unlock(&priv->lock);
40874371272SJose Abreu 		return false;
40974371272SJose Abreu 	}
41074371272SJose Abreu 
41174371272SJose Abreu 	if (priv->eee_active && !priv->eee_enabled) {
41274371272SJose Abreu 		timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
41374371272SJose Abreu 		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
41474371272SJose Abreu 		stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
41583bf79b6SGiuseppe CAVALLARO 				     tx_lpi_timer);
41683bf79b6SGiuseppe CAVALLARO 	}
41774371272SJose Abreu 
41829555fa3SThierry Reding 	mutex_unlock(&priv->lock);
41938ddc59dSLABBE Corentin 	netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
42074371272SJose Abreu 	return true;
421d765955dSGiuseppe CAVALLARO }
422d765955dSGiuseppe CAVALLARO 
423732fdf0eSGiuseppe CAVALLARO /* stmmac_get_tx_hwtstamp - get HW TX timestamps
42432ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
425ba1ffd74SGiuseppe CAVALLARO  * @p : descriptor pointer
426891434b1SRayagond Kokatanur  * @skb : the socket buffer
427891434b1SRayagond Kokatanur  * Description :
428891434b1SRayagond Kokatanur  * This function will read timestamp from the descriptor & pass it to stack.
429891434b1SRayagond Kokatanur  * and also perform some sanity checks.
430891434b1SRayagond Kokatanur  */
431891434b1SRayagond Kokatanur static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
432ba1ffd74SGiuseppe CAVALLARO 				   struct dma_desc *p, struct sk_buff *skb)
433891434b1SRayagond Kokatanur {
434891434b1SRayagond Kokatanur 	struct skb_shared_hwtstamps shhwtstamp;
43525e80cd0SJose Abreu 	bool found = false;
436df103170SNathan Chancellor 	u64 ns = 0;
437891434b1SRayagond Kokatanur 
438891434b1SRayagond Kokatanur 	if (!priv->hwts_tx_en)
439891434b1SRayagond Kokatanur 		return;
440891434b1SRayagond Kokatanur 
441ceb69499SGiuseppe CAVALLARO 	/* exit if skb doesn't support hw tstamp */
44275e4364fSdamuzi000 	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
443891434b1SRayagond Kokatanur 		return;
444891434b1SRayagond Kokatanur 
445891434b1SRayagond Kokatanur 	/* check tx tstamp status */
44642de047dSJose Abreu 	if (stmmac_get_tx_timestamp_status(priv, p)) {
44742de047dSJose Abreu 		stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
44825e80cd0SJose Abreu 		found = true;
44925e80cd0SJose Abreu 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
45025e80cd0SJose Abreu 		found = true;
45125e80cd0SJose Abreu 	}
452891434b1SRayagond Kokatanur 
45325e80cd0SJose Abreu 	if (found) {
454891434b1SRayagond Kokatanur 		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
455891434b1SRayagond Kokatanur 		shhwtstamp.hwtstamp = ns_to_ktime(ns);
456ba1ffd74SGiuseppe CAVALLARO 
45733d4c482SMario Molitor 		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
458891434b1SRayagond Kokatanur 		/* pass tstamp to stack */
459891434b1SRayagond Kokatanur 		skb_tstamp_tx(skb, &shhwtstamp);
460ba1ffd74SGiuseppe CAVALLARO 	}
461891434b1SRayagond Kokatanur }
462891434b1SRayagond Kokatanur 
463732fdf0eSGiuseppe CAVALLARO /* stmmac_get_rx_hwtstamp - get HW RX timestamps
46432ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
465ba1ffd74SGiuseppe CAVALLARO  * @p : descriptor pointer
466ba1ffd74SGiuseppe CAVALLARO  * @np : next descriptor pointer
467891434b1SRayagond Kokatanur  * @skb : the socket buffer
468891434b1SRayagond Kokatanur  * Description :
469891434b1SRayagond Kokatanur  * This function will read received packet's timestamp from the descriptor
470891434b1SRayagond Kokatanur  * and pass it to stack. It also perform some sanity checks.
471891434b1SRayagond Kokatanur  */
472ba1ffd74SGiuseppe CAVALLARO static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
473ba1ffd74SGiuseppe CAVALLARO 				   struct dma_desc *np, struct sk_buff *skb)
474891434b1SRayagond Kokatanur {
475891434b1SRayagond Kokatanur 	struct skb_shared_hwtstamps *shhwtstamp = NULL;
47698870943SJose Abreu 	struct dma_desc *desc = p;
477df103170SNathan Chancellor 	u64 ns = 0;
478891434b1SRayagond Kokatanur 
479891434b1SRayagond Kokatanur 	if (!priv->hwts_rx_en)
480891434b1SRayagond Kokatanur 		return;
481ba1ffd74SGiuseppe CAVALLARO 	/* For GMAC4, the valid timestamp is from CTX next desc. */
4827d9e6c5aSJose Abreu 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
48398870943SJose Abreu 		desc = np;
484891434b1SRayagond Kokatanur 
48598870943SJose Abreu 	/* Check if timestamp is available */
48642de047dSJose Abreu 	if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
48742de047dSJose Abreu 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
48833d4c482SMario Molitor 		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
489891434b1SRayagond Kokatanur 		shhwtstamp = skb_hwtstamps(skb);
490891434b1SRayagond Kokatanur 		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
491891434b1SRayagond Kokatanur 		shhwtstamp->hwtstamp = ns_to_ktime(ns);
492ba1ffd74SGiuseppe CAVALLARO 	} else  {
49333d4c482SMario Molitor 		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
494ba1ffd74SGiuseppe CAVALLARO 	}
495891434b1SRayagond Kokatanur }
496891434b1SRayagond Kokatanur 
497891434b1SRayagond Kokatanur /**
498d6228b7cSArtem Panfilov  *  stmmac_hwtstamp_set - control hardware timestamping.
499891434b1SRayagond Kokatanur  *  @dev: device pointer.
5008d45e42bSLABBE Corentin  *  @ifr: An IOCTL specific structure, that can contain a pointer to
501891434b1SRayagond Kokatanur  *  a proprietary structure used to pass information to the driver.
502891434b1SRayagond Kokatanur  *  Description:
503891434b1SRayagond Kokatanur  *  This function configures the MAC to enable/disable both outgoing(TX)
504891434b1SRayagond Kokatanur  *  and incoming(RX) packets time stamping based on user input.
505891434b1SRayagond Kokatanur  *  Return Value:
506891434b1SRayagond Kokatanur  *  0 on success and an appropriate -ve integer on failure.
507891434b1SRayagond Kokatanur  */
508d6228b7cSArtem Panfilov static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
509891434b1SRayagond Kokatanur {
510891434b1SRayagond Kokatanur 	struct stmmac_priv *priv = netdev_priv(dev);
511891434b1SRayagond Kokatanur 	struct hwtstamp_config config;
5120a624155SArnd Bergmann 	struct timespec64 now;
513891434b1SRayagond Kokatanur 	u64 temp = 0;
514891434b1SRayagond Kokatanur 	u32 ptp_v2 = 0;
515891434b1SRayagond Kokatanur 	u32 tstamp_all = 0;
516891434b1SRayagond Kokatanur 	u32 ptp_over_ipv4_udp = 0;
517891434b1SRayagond Kokatanur 	u32 ptp_over_ipv6_udp = 0;
518891434b1SRayagond Kokatanur 	u32 ptp_over_ethernet = 0;
519891434b1SRayagond Kokatanur 	u32 snap_type_sel = 0;
520891434b1SRayagond Kokatanur 	u32 ts_master_en = 0;
521891434b1SRayagond Kokatanur 	u32 ts_event_en = 0;
522df103170SNathan Chancellor 	u32 sec_inc = 0;
523891434b1SRayagond Kokatanur 	u32 value = 0;
5247d9e6c5aSJose Abreu 	bool xmac;
5257d9e6c5aSJose Abreu 
5267d9e6c5aSJose Abreu 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
527891434b1SRayagond Kokatanur 
528891434b1SRayagond Kokatanur 	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
529891434b1SRayagond Kokatanur 		netdev_alert(priv->dev, "No support for HW time stamping\n");
530891434b1SRayagond Kokatanur 		priv->hwts_tx_en = 0;
531891434b1SRayagond Kokatanur 		priv->hwts_rx_en = 0;
532891434b1SRayagond Kokatanur 
533891434b1SRayagond Kokatanur 		return -EOPNOTSUPP;
534891434b1SRayagond Kokatanur 	}
535891434b1SRayagond Kokatanur 
536891434b1SRayagond Kokatanur 	if (copy_from_user(&config, ifr->ifr_data,
537d6228b7cSArtem Panfilov 			   sizeof(config)))
538891434b1SRayagond Kokatanur 		return -EFAULT;
539891434b1SRayagond Kokatanur 
54038ddc59dSLABBE Corentin 	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
541891434b1SRayagond Kokatanur 		   __func__, config.flags, config.tx_type, config.rx_filter);
542891434b1SRayagond Kokatanur 
543891434b1SRayagond Kokatanur 	/* reserved for future extensions */
544891434b1SRayagond Kokatanur 	if (config.flags)
545891434b1SRayagond Kokatanur 		return -EINVAL;
546891434b1SRayagond Kokatanur 
5475f3da328SBen Hutchings 	if (config.tx_type != HWTSTAMP_TX_OFF &&
5485f3da328SBen Hutchings 	    config.tx_type != HWTSTAMP_TX_ON)
549891434b1SRayagond Kokatanur 		return -ERANGE;
550891434b1SRayagond Kokatanur 
551891434b1SRayagond Kokatanur 	if (priv->adv_ts) {
552891434b1SRayagond Kokatanur 		switch (config.rx_filter) {
553891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_NONE:
554ceb69499SGiuseppe CAVALLARO 			/* time stamp no incoming packet at all */
555891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_NONE;
556891434b1SRayagond Kokatanur 			break;
557891434b1SRayagond Kokatanur 
558891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
559ceb69499SGiuseppe CAVALLARO 			/* PTP v1, UDP, any kind of event packet */
560891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
5617d8e249fSIlias Apalodimas 			/* 'xmac' hardware can support Sync, Pdelay_Req and
5627d8e249fSIlias Apalodimas 			 * Pdelay_resp by setting bit14 and bits17/16 to 01
5637d8e249fSIlias Apalodimas 			 * This leaves Delay_Req timestamps out.
5647d8e249fSIlias Apalodimas 			 * Enable all events *and* general purpose message
5657d8e249fSIlias Apalodimas 			 * timestamping
5667d8e249fSIlias Apalodimas 			 */
567891434b1SRayagond Kokatanur 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
568891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
569891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
570891434b1SRayagond Kokatanur 			break;
571891434b1SRayagond Kokatanur 
572891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
573ceb69499SGiuseppe CAVALLARO 			/* PTP v1, UDP, Sync packet */
574891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
575891434b1SRayagond Kokatanur 			/* take time stamp for SYNC messages only */
576891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
577891434b1SRayagond Kokatanur 
578891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
579891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
580891434b1SRayagond Kokatanur 			break;
581891434b1SRayagond Kokatanur 
582891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
583ceb69499SGiuseppe CAVALLARO 			/* PTP v1, UDP, Delay_req packet */
584891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
585891434b1SRayagond Kokatanur 			/* take time stamp for Delay_Req messages only */
586891434b1SRayagond Kokatanur 			ts_master_en = PTP_TCR_TSMSTRENA;
587891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
588891434b1SRayagond Kokatanur 
589891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
590891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
591891434b1SRayagond Kokatanur 			break;
592891434b1SRayagond Kokatanur 
593891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
594ceb69499SGiuseppe CAVALLARO 			/* PTP v2, UDP, any kind of event packet */
595891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
596891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
597891434b1SRayagond Kokatanur 			/* take time stamp for all event messages */
598891434b1SRayagond Kokatanur 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
599891434b1SRayagond Kokatanur 
600891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
601891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
602891434b1SRayagond Kokatanur 			break;
603891434b1SRayagond Kokatanur 
604891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
605ceb69499SGiuseppe CAVALLARO 			/* PTP v2, UDP, Sync packet */
606891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
607891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
608891434b1SRayagond Kokatanur 			/* take time stamp for SYNC messages only */
609891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
610891434b1SRayagond Kokatanur 
611891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
612891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
613891434b1SRayagond Kokatanur 			break;
614891434b1SRayagond Kokatanur 
615891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
616ceb69499SGiuseppe CAVALLARO 			/* PTP v2, UDP, Delay_req packet */
617891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
618891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
619891434b1SRayagond Kokatanur 			/* take time stamp for Delay_Req messages only */
620891434b1SRayagond Kokatanur 			ts_master_en = PTP_TCR_TSMSTRENA;
621891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
622891434b1SRayagond Kokatanur 
623891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
624891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
625891434b1SRayagond Kokatanur 			break;
626891434b1SRayagond Kokatanur 
627891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_EVENT:
628ceb69499SGiuseppe CAVALLARO 			/* PTP v2/802.AS1 any layer, any kind of event packet */
629891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
630891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
631891434b1SRayagond Kokatanur 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
632891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
633891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
634891434b1SRayagond Kokatanur 			ptp_over_ethernet = PTP_TCR_TSIPENA;
635891434b1SRayagond Kokatanur 			break;
636891434b1SRayagond Kokatanur 
637891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_SYNC:
638ceb69499SGiuseppe CAVALLARO 			/* PTP v2/802.AS1, any layer, Sync packet */
639891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
640891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
641891434b1SRayagond Kokatanur 			/* take time stamp for SYNC messages only */
642891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
643891434b1SRayagond Kokatanur 
644891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
645891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
646891434b1SRayagond Kokatanur 			ptp_over_ethernet = PTP_TCR_TSIPENA;
647891434b1SRayagond Kokatanur 			break;
648891434b1SRayagond Kokatanur 
649891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
650ceb69499SGiuseppe CAVALLARO 			/* PTP v2/802.AS1, any layer, Delay_req packet */
651891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
652891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
653891434b1SRayagond Kokatanur 			/* take time stamp for Delay_Req messages only */
654891434b1SRayagond Kokatanur 			ts_master_en = PTP_TCR_TSMSTRENA;
655891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
656891434b1SRayagond Kokatanur 
657891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
658891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
659891434b1SRayagond Kokatanur 			ptp_over_ethernet = PTP_TCR_TSIPENA;
660891434b1SRayagond Kokatanur 			break;
661891434b1SRayagond Kokatanur 
662e3412575SMiroslav Lichvar 		case HWTSTAMP_FILTER_NTP_ALL:
663891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_ALL:
664ceb69499SGiuseppe CAVALLARO 			/* time stamp any incoming packet */
665891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_ALL;
666891434b1SRayagond Kokatanur 			tstamp_all = PTP_TCR_TSENALL;
667891434b1SRayagond Kokatanur 			break;
668891434b1SRayagond Kokatanur 
669891434b1SRayagond Kokatanur 		default:
670891434b1SRayagond Kokatanur 			return -ERANGE;
671891434b1SRayagond Kokatanur 		}
672891434b1SRayagond Kokatanur 	} else {
673891434b1SRayagond Kokatanur 		switch (config.rx_filter) {
674891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_NONE:
675891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_NONE;
676891434b1SRayagond Kokatanur 			break;
677891434b1SRayagond Kokatanur 		default:
678891434b1SRayagond Kokatanur 			/* PTP v1, UDP, any kind of event packet */
679891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
680891434b1SRayagond Kokatanur 			break;
681891434b1SRayagond Kokatanur 		}
682891434b1SRayagond Kokatanur 	}
683891434b1SRayagond Kokatanur 	priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
6845f3da328SBen Hutchings 	priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
685891434b1SRayagond Kokatanur 
686891434b1SRayagond Kokatanur 	if (!priv->hwts_tx_en && !priv->hwts_rx_en)
687cc4c9001SJose Abreu 		stmmac_config_hw_tstamping(priv, priv->ptpaddr, 0);
688891434b1SRayagond Kokatanur 	else {
689891434b1SRayagond Kokatanur 		value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
690891434b1SRayagond Kokatanur 			 tstamp_all | ptp_v2 | ptp_over_ethernet |
691891434b1SRayagond Kokatanur 			 ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
692891434b1SRayagond Kokatanur 			 ts_master_en | snap_type_sel);
693cc4c9001SJose Abreu 		stmmac_config_hw_tstamping(priv, priv->ptpaddr, value);
694891434b1SRayagond Kokatanur 
695891434b1SRayagond Kokatanur 		/* program Sub Second Increment reg */
696cc4c9001SJose Abreu 		stmmac_config_sub_second_increment(priv,
697f573c0b9Sjpinto 				priv->ptpaddr, priv->plat->clk_ptp_rate,
6987d9e6c5aSJose Abreu 				xmac, &sec_inc);
69919d857c9SPhil Reid 		temp = div_u64(1000000000ULL, sec_inc);
700891434b1SRayagond Kokatanur 
7019a8a02c9SJose Abreu 		/* Store sub second increment and flags for later use */
7029a8a02c9SJose Abreu 		priv->sub_second_inc = sec_inc;
7039a8a02c9SJose Abreu 		priv->systime_flags = value;
7049a8a02c9SJose Abreu 
705891434b1SRayagond Kokatanur 		/* calculate default added value:
706891434b1SRayagond Kokatanur 		 * formula is :
707891434b1SRayagond Kokatanur 		 * addend = (2^32)/freq_div_ratio;
70819d857c9SPhil Reid 		 * where, freq_div_ratio = 1e9ns/sec_inc
709891434b1SRayagond Kokatanur 		 */
71019d857c9SPhil Reid 		temp = (u64)(temp << 32);
711f573c0b9Sjpinto 		priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
712cc4c9001SJose Abreu 		stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
713891434b1SRayagond Kokatanur 
714891434b1SRayagond Kokatanur 		/* initialize system time */
7150a624155SArnd Bergmann 		ktime_get_real_ts64(&now);
7160a624155SArnd Bergmann 
7170a624155SArnd Bergmann 		/* lower 32 bits of tv_sec are safe until y2106 */
718cc4c9001SJose Abreu 		stmmac_init_systime(priv, priv->ptpaddr,
719cc4c9001SJose Abreu 				(u32)now.tv_sec, now.tv_nsec);
720891434b1SRayagond Kokatanur 	}
721891434b1SRayagond Kokatanur 
722d6228b7cSArtem Panfilov 	memcpy(&priv->tstamp_config, &config, sizeof(config));
723d6228b7cSArtem Panfilov 
724891434b1SRayagond Kokatanur 	return copy_to_user(ifr->ifr_data, &config,
725d6228b7cSArtem Panfilov 			    sizeof(config)) ? -EFAULT : 0;
726d6228b7cSArtem Panfilov }
727d6228b7cSArtem Panfilov 
728d6228b7cSArtem Panfilov /**
729d6228b7cSArtem Panfilov  *  stmmac_hwtstamp_get - read hardware timestamping.
730d6228b7cSArtem Panfilov  *  @dev: device pointer.
731d6228b7cSArtem Panfilov  *  @ifr: An IOCTL specific structure, that can contain a pointer to
732d6228b7cSArtem Panfilov  *  a proprietary structure used to pass information to the driver.
733d6228b7cSArtem Panfilov  *  Description:
734d6228b7cSArtem Panfilov  *  This function obtain the current hardware timestamping settings
735d6228b7cSArtem Panfilov     as requested.
736d6228b7cSArtem Panfilov  */
737d6228b7cSArtem Panfilov static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
738d6228b7cSArtem Panfilov {
739d6228b7cSArtem Panfilov 	struct stmmac_priv *priv = netdev_priv(dev);
740d6228b7cSArtem Panfilov 	struct hwtstamp_config *config = &priv->tstamp_config;
741d6228b7cSArtem Panfilov 
742d6228b7cSArtem Panfilov 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
743d6228b7cSArtem Panfilov 		return -EOPNOTSUPP;
744d6228b7cSArtem Panfilov 
745d6228b7cSArtem Panfilov 	return copy_to_user(ifr->ifr_data, config,
746d6228b7cSArtem Panfilov 			    sizeof(*config)) ? -EFAULT : 0;
747891434b1SRayagond Kokatanur }
748891434b1SRayagond Kokatanur 
74932ceabcaSGiuseppe CAVALLARO /**
750732fdf0eSGiuseppe CAVALLARO  * stmmac_init_ptp - init PTP
75132ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
752732fdf0eSGiuseppe CAVALLARO  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
75332ceabcaSGiuseppe CAVALLARO  * This is done by looking at the HW cap. register.
754732fdf0eSGiuseppe CAVALLARO  * This function also registers the ptp driver.
75532ceabcaSGiuseppe CAVALLARO  */
75692ba6888SRayagond Kokatanur static int stmmac_init_ptp(struct stmmac_priv *priv)
757891434b1SRayagond Kokatanur {
7587d9e6c5aSJose Abreu 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
7597d9e6c5aSJose Abreu 
76092ba6888SRayagond Kokatanur 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
76192ba6888SRayagond Kokatanur 		return -EOPNOTSUPP;
76292ba6888SRayagond Kokatanur 
763891434b1SRayagond Kokatanur 	priv->adv_ts = 0;
7647d9e6c5aSJose Abreu 	/* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
7657d9e6c5aSJose Abreu 	if (xmac && priv->dma_cap.atime_stamp)
766be9b3174SGiuseppe CAVALLARO 		priv->adv_ts = 1;
767be9b3174SGiuseppe CAVALLARO 	/* Dwmac 3.x core with extend_desc can support adv_ts */
768be9b3174SGiuseppe CAVALLARO 	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
769891434b1SRayagond Kokatanur 		priv->adv_ts = 1;
7707cd01399SVince Bridgers 
771be9b3174SGiuseppe CAVALLARO 	if (priv->dma_cap.time_stamp)
772be9b3174SGiuseppe CAVALLARO 		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
7737cd01399SVince Bridgers 
774be9b3174SGiuseppe CAVALLARO 	if (priv->adv_ts)
775be9b3174SGiuseppe CAVALLARO 		netdev_info(priv->dev,
776be9b3174SGiuseppe CAVALLARO 			    "IEEE 1588-2008 Advanced Timestamp supported\n");
777891434b1SRayagond Kokatanur 
778891434b1SRayagond Kokatanur 	priv->hwts_tx_en = 0;
779891434b1SRayagond Kokatanur 	priv->hwts_rx_en = 0;
78092ba6888SRayagond Kokatanur 
781c30a70d3SGiuseppe CAVALLARO 	stmmac_ptp_register(priv);
782c30a70d3SGiuseppe CAVALLARO 
783c30a70d3SGiuseppe CAVALLARO 	return 0;
78492ba6888SRayagond Kokatanur }
78592ba6888SRayagond Kokatanur 
78692ba6888SRayagond Kokatanur static void stmmac_release_ptp(struct stmmac_priv *priv)
78792ba6888SRayagond Kokatanur {
788f573c0b9Sjpinto 	if (priv->plat->clk_ptp_ref)
789f573c0b9Sjpinto 		clk_disable_unprepare(priv->plat->clk_ptp_ref);
79092ba6888SRayagond Kokatanur 	stmmac_ptp_unregister(priv);
791891434b1SRayagond Kokatanur }
792891434b1SRayagond Kokatanur 
7937ac6653aSJeff Kirsher /**
79429feff39SJoao Pinto  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
79529feff39SJoao Pinto  *  @priv: driver private structure
79629feff39SJoao Pinto  *  Description: It is used for configuring the flow control in all queues
79729feff39SJoao Pinto  */
79829feff39SJoao Pinto static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
79929feff39SJoao Pinto {
80029feff39SJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
80129feff39SJoao Pinto 
802c10d4c82SJose Abreu 	stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
80329feff39SJoao Pinto 			priv->pause, tx_cnt);
80429feff39SJoao Pinto }
80529feff39SJoao Pinto 
806eeef2f6bSJose Abreu static void stmmac_validate(struct phylink_config *config,
807eeef2f6bSJose Abreu 			    unsigned long *supported,
808eeef2f6bSJose Abreu 			    struct phylink_link_state *state)
809eeef2f6bSJose Abreu {
810eeef2f6bSJose Abreu 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
8115b0d7d7dSJose Abreu 	__ETHTOOL_DECLARE_LINK_MODE_MASK(mac_supported) = { 0, };
812eeef2f6bSJose Abreu 	__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
813eeef2f6bSJose Abreu 	int tx_cnt = priv->plat->tx_queues_to_use;
814eeef2f6bSJose Abreu 	int max_speed = priv->plat->max_speed;
815eeef2f6bSJose Abreu 
8165b0d7d7dSJose Abreu 	phylink_set(mac_supported, 10baseT_Half);
8175b0d7d7dSJose Abreu 	phylink_set(mac_supported, 10baseT_Full);
8185b0d7d7dSJose Abreu 	phylink_set(mac_supported, 100baseT_Half);
8195b0d7d7dSJose Abreu 	phylink_set(mac_supported, 100baseT_Full);
820df7699c7SJose Abreu 	phylink_set(mac_supported, 1000baseT_Half);
821df7699c7SJose Abreu 	phylink_set(mac_supported, 1000baseT_Full);
822df7699c7SJose Abreu 	phylink_set(mac_supported, 1000baseKX_Full);
8235b0d7d7dSJose Abreu 
8245b0d7d7dSJose Abreu 	phylink_set(mac_supported, Autoneg);
8255b0d7d7dSJose Abreu 	phylink_set(mac_supported, Pause);
8265b0d7d7dSJose Abreu 	phylink_set(mac_supported, Asym_Pause);
8275b0d7d7dSJose Abreu 	phylink_set_port_modes(mac_supported);
8285b0d7d7dSJose Abreu 
829eeef2f6bSJose Abreu 	/* Cut down 1G if asked to */
830eeef2f6bSJose Abreu 	if ((max_speed > 0) && (max_speed < 1000)) {
831eeef2f6bSJose Abreu 		phylink_set(mask, 1000baseT_Full);
832eeef2f6bSJose Abreu 		phylink_set(mask, 1000baseX_Full);
8335b0d7d7dSJose Abreu 	} else if (priv->plat->has_xgmac) {
8345b0d7d7dSJose Abreu 		phylink_set(mac_supported, 2500baseT_Full);
8355b0d7d7dSJose Abreu 		phylink_set(mac_supported, 5000baseT_Full);
8365b0d7d7dSJose Abreu 		phylink_set(mac_supported, 10000baseSR_Full);
8375b0d7d7dSJose Abreu 		phylink_set(mac_supported, 10000baseLR_Full);
8385b0d7d7dSJose Abreu 		phylink_set(mac_supported, 10000baseER_Full);
8395b0d7d7dSJose Abreu 		phylink_set(mac_supported, 10000baseLRM_Full);
8405b0d7d7dSJose Abreu 		phylink_set(mac_supported, 10000baseT_Full);
8415b0d7d7dSJose Abreu 		phylink_set(mac_supported, 10000baseKX4_Full);
8425b0d7d7dSJose Abreu 		phylink_set(mac_supported, 10000baseKR_Full);
843eeef2f6bSJose Abreu 	}
844eeef2f6bSJose Abreu 
845eeef2f6bSJose Abreu 	/* Half-Duplex can only work with single queue */
846eeef2f6bSJose Abreu 	if (tx_cnt > 1) {
847eeef2f6bSJose Abreu 		phylink_set(mask, 10baseT_Half);
848eeef2f6bSJose Abreu 		phylink_set(mask, 100baseT_Half);
849eeef2f6bSJose Abreu 		phylink_set(mask, 1000baseT_Half);
850eeef2f6bSJose Abreu 	}
851eeef2f6bSJose Abreu 
8525b0d7d7dSJose Abreu 	bitmap_and(supported, supported, mac_supported,
8535b0d7d7dSJose Abreu 		   __ETHTOOL_LINK_MODE_MASK_NBITS);
8545b0d7d7dSJose Abreu 	bitmap_andnot(supported, supported, mask,
8555b0d7d7dSJose Abreu 		      __ETHTOOL_LINK_MODE_MASK_NBITS);
8565b0d7d7dSJose Abreu 	bitmap_and(state->advertising, state->advertising, mac_supported,
8575b0d7d7dSJose Abreu 		   __ETHTOOL_LINK_MODE_MASK_NBITS);
858eeef2f6bSJose Abreu 	bitmap_andnot(state->advertising, state->advertising, mask,
859eeef2f6bSJose Abreu 		      __ETHTOOL_LINK_MODE_MASK_NBITS);
860eeef2f6bSJose Abreu }
861eeef2f6bSJose Abreu 
862eeef2f6bSJose Abreu static int stmmac_mac_link_state(struct phylink_config *config,
863eeef2f6bSJose Abreu 				 struct phylink_link_state *state)
864eeef2f6bSJose Abreu {
865eeef2f6bSJose Abreu 	return -EOPNOTSUPP;
866eeef2f6bSJose Abreu }
867eeef2f6bSJose Abreu 
86874371272SJose Abreu static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
86974371272SJose Abreu 			      const struct phylink_link_state *state)
8709ad372fcSJose Abreu {
87174371272SJose Abreu 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
8729ad372fcSJose Abreu 	u32 ctrl;
8739ad372fcSJose Abreu 
8749ad372fcSJose Abreu 	ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
8759ad372fcSJose Abreu 	ctrl &= ~priv->hw->link.speed_mask;
8769ad372fcSJose Abreu 
8775b0d7d7dSJose Abreu 	if (state->interface == PHY_INTERFACE_MODE_USXGMII) {
87874371272SJose Abreu 		switch (state->speed) {
8795b0d7d7dSJose Abreu 		case SPEED_10000:
8805b0d7d7dSJose Abreu 			ctrl |= priv->hw->link.xgmii.speed10000;
8815b0d7d7dSJose Abreu 			break;
8825b0d7d7dSJose Abreu 		case SPEED_5000:
8835b0d7d7dSJose Abreu 			ctrl |= priv->hw->link.xgmii.speed5000;
8845b0d7d7dSJose Abreu 			break;
8855b0d7d7dSJose Abreu 		case SPEED_2500:
8865b0d7d7dSJose Abreu 			ctrl |= priv->hw->link.xgmii.speed2500;
8875b0d7d7dSJose Abreu 			break;
8885b0d7d7dSJose Abreu 		default:
8895b0d7d7dSJose Abreu 			return;
8905b0d7d7dSJose Abreu 		}
8915b0d7d7dSJose Abreu 	} else {
8925b0d7d7dSJose Abreu 		switch (state->speed) {
8935b0d7d7dSJose Abreu 		case SPEED_2500:
8945b0d7d7dSJose Abreu 			ctrl |= priv->hw->link.speed2500;
8955b0d7d7dSJose Abreu 			break;
8969ad372fcSJose Abreu 		case SPEED_1000:
8979ad372fcSJose Abreu 			ctrl |= priv->hw->link.speed1000;
8989ad372fcSJose Abreu 			break;
8999ad372fcSJose Abreu 		case SPEED_100:
9009ad372fcSJose Abreu 			ctrl |= priv->hw->link.speed100;
9019ad372fcSJose Abreu 			break;
9029ad372fcSJose Abreu 		case SPEED_10:
9039ad372fcSJose Abreu 			ctrl |= priv->hw->link.speed10;
9049ad372fcSJose Abreu 			break;
9059ad372fcSJose Abreu 		default:
90674371272SJose Abreu 			return;
9079ad372fcSJose Abreu 		}
9085b0d7d7dSJose Abreu 	}
9099ad372fcSJose Abreu 
91074371272SJose Abreu 	priv->speed = state->speed;
9119ad372fcSJose Abreu 
91274371272SJose Abreu 	if (priv->plat->fix_mac_speed)
91374371272SJose Abreu 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, state->speed);
9149ad372fcSJose Abreu 
91574371272SJose Abreu 	if (!state->duplex)
9169ad372fcSJose Abreu 		ctrl &= ~priv->hw->link.duplex;
9179ad372fcSJose Abreu 	else
9189ad372fcSJose Abreu 		ctrl |= priv->hw->link.duplex;
9199ad372fcSJose Abreu 
9209ad372fcSJose Abreu 	/* Flow Control operation */
92174371272SJose Abreu 	if (state->pause)
92274371272SJose Abreu 		stmmac_mac_flow_ctrl(priv, state->duplex);
9239ad372fcSJose Abreu 
9249ad372fcSJose Abreu 	writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
9259ad372fcSJose Abreu }
9269ad372fcSJose Abreu 
927eeef2f6bSJose Abreu static void stmmac_mac_an_restart(struct phylink_config *config)
928eeef2f6bSJose Abreu {
929eeef2f6bSJose Abreu 	/* Not Supported */
930eeef2f6bSJose Abreu }
931eeef2f6bSJose Abreu 
93274371272SJose Abreu static void stmmac_mac_link_down(struct phylink_config *config,
93374371272SJose Abreu 				 unsigned int mode, phy_interface_t interface)
9349ad372fcSJose Abreu {
93574371272SJose Abreu 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
9369ad372fcSJose Abreu 
9379ad372fcSJose Abreu 	stmmac_mac_set(priv, priv->ioaddr, false);
93874371272SJose Abreu 	priv->eee_active = false;
93974371272SJose Abreu 	stmmac_eee_init(priv);
94074371272SJose Abreu 	stmmac_set_eee_pls(priv, priv->hw, false);
9419ad372fcSJose Abreu }
9429ad372fcSJose Abreu 
94374371272SJose Abreu static void stmmac_mac_link_up(struct phylink_config *config,
94474371272SJose Abreu 			       unsigned int mode, phy_interface_t interface,
94574371272SJose Abreu 			       struct phy_device *phy)
9469ad372fcSJose Abreu {
94774371272SJose Abreu 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
9489ad372fcSJose Abreu 
9499ad372fcSJose Abreu 	stmmac_mac_set(priv, priv->ioaddr, true);
9505b111770SJose Abreu 	if (phy && priv->dma_cap.eee) {
95174371272SJose Abreu 		priv->eee_active = phy_init_eee(phy, 1) >= 0;
95274371272SJose Abreu 		priv->eee_enabled = stmmac_eee_init(priv);
95374371272SJose Abreu 		stmmac_set_eee_pls(priv, priv->hw, true);
95474371272SJose Abreu 	}
9559ad372fcSJose Abreu }
9569ad372fcSJose Abreu 
95774371272SJose Abreu static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
958eeef2f6bSJose Abreu 	.validate = stmmac_validate,
959eeef2f6bSJose Abreu 	.mac_link_state = stmmac_mac_link_state,
96074371272SJose Abreu 	.mac_config = stmmac_mac_config,
961eeef2f6bSJose Abreu 	.mac_an_restart = stmmac_mac_an_restart,
96274371272SJose Abreu 	.mac_link_down = stmmac_mac_link_down,
96374371272SJose Abreu 	.mac_link_up = stmmac_mac_link_up,
964eeef2f6bSJose Abreu };
965eeef2f6bSJose Abreu 
96629feff39SJoao Pinto /**
967732fdf0eSGiuseppe CAVALLARO  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
96832ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
96932ceabcaSGiuseppe CAVALLARO  * Description: this is to verify if the HW supports the PCS.
97032ceabcaSGiuseppe CAVALLARO  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
97132ceabcaSGiuseppe CAVALLARO  * configured for the TBI, RTBI, or SGMII PHY interface.
97232ceabcaSGiuseppe CAVALLARO  */
973e58bb43fSGiuseppe CAVALLARO static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
974e58bb43fSGiuseppe CAVALLARO {
975e58bb43fSGiuseppe CAVALLARO 	int interface = priv->plat->interface;
976e58bb43fSGiuseppe CAVALLARO 
977e58bb43fSGiuseppe CAVALLARO 	if (priv->dma_cap.pcs) {
9780d909dcdSByungho An 		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
9790d909dcdSByungho An 		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
9800d909dcdSByungho An 		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
9810d909dcdSByungho An 		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
98238ddc59dSLABBE Corentin 			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
9833fe5cadbSGiuseppe CAVALLARO 			priv->hw->pcs = STMMAC_PCS_RGMII;
9840d909dcdSByungho An 		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
98538ddc59dSLABBE Corentin 			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
9863fe5cadbSGiuseppe CAVALLARO 			priv->hw->pcs = STMMAC_PCS_SGMII;
987e58bb43fSGiuseppe CAVALLARO 		}
988e58bb43fSGiuseppe CAVALLARO 	}
989e58bb43fSGiuseppe CAVALLARO }
990e58bb43fSGiuseppe CAVALLARO 
9917ac6653aSJeff Kirsher /**
9927ac6653aSJeff Kirsher  * stmmac_init_phy - PHY initialization
9937ac6653aSJeff Kirsher  * @dev: net device structure
9947ac6653aSJeff Kirsher  * Description: it initializes the driver's PHY state, and attaches the PHY
9957ac6653aSJeff Kirsher  * to the mac driver.
9967ac6653aSJeff Kirsher  *  Return value:
9977ac6653aSJeff Kirsher  *  0 on success
9987ac6653aSJeff Kirsher  */
9997ac6653aSJeff Kirsher static int stmmac_init_phy(struct net_device *dev)
10007ac6653aSJeff Kirsher {
10017ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
100274371272SJose Abreu 	struct device_node *node;
100374371272SJose Abreu 	int ret;
10047ac6653aSJeff Kirsher 
10054838a540SJose Abreu 	node = priv->plat->phylink_node;
100674371272SJose Abreu 
100742e87024SJose Abreu 	if (node)
100874371272SJose Abreu 		ret = phylink_of_phy_connect(priv->phylink, node, 0);
100942e87024SJose Abreu 
101042e87024SJose Abreu 	/* Some DT bindings do not set-up the PHY handle. Let's try to
101142e87024SJose Abreu 	 * manually parse it
101242e87024SJose Abreu 	 */
101342e87024SJose Abreu 	if (!node || ret) {
101474371272SJose Abreu 		int addr = priv->plat->phy_addr;
101574371272SJose Abreu 		struct phy_device *phydev;
1016f142af2eSSrinivas Kandagatla 
101774371272SJose Abreu 		phydev = mdiobus_get_phy(priv->mii, addr);
101874371272SJose Abreu 		if (!phydev) {
101974371272SJose Abreu 			netdev_err(priv->dev, "no phy at addr %d\n", addr);
10207ac6653aSJeff Kirsher 			return -ENODEV;
10217ac6653aSJeff Kirsher 		}
10228e99fc5fSGiuseppe Cavallaro 
102374371272SJose Abreu 		ret = phylink_connect_phy(priv->phylink, phydev);
102474371272SJose Abreu 	}
1025c51e424dSFlorian Fainelli 
102674371272SJose Abreu 	return ret;
102774371272SJose Abreu }
102874371272SJose Abreu 
102974371272SJose Abreu static int stmmac_phy_setup(struct stmmac_priv *priv)
103074371272SJose Abreu {
1031c63d1e5cSArnd Bergmann 	struct fwnode_handle *fwnode = of_fwnode_handle(priv->plat->phylink_node);
103274371272SJose Abreu 	int mode = priv->plat->interface;
103374371272SJose Abreu 	struct phylink *phylink;
103474371272SJose Abreu 
103574371272SJose Abreu 	priv->phylink_config.dev = &priv->dev->dev;
103674371272SJose Abreu 	priv->phylink_config.type = PHYLINK_NETDEV;
103774371272SJose Abreu 
1038c63d1e5cSArnd Bergmann 	phylink = phylink_create(&priv->phylink_config, fwnode,
103974371272SJose Abreu 				 mode, &stmmac_phylink_mac_ops);
104074371272SJose Abreu 	if (IS_ERR(phylink))
104174371272SJose Abreu 		return PTR_ERR(phylink);
104274371272SJose Abreu 
104374371272SJose Abreu 	priv->phylink = phylink;
10447ac6653aSJeff Kirsher 	return 0;
10457ac6653aSJeff Kirsher }
10467ac6653aSJeff Kirsher 
104771fedb01SJoao Pinto static void stmmac_display_rx_rings(struct stmmac_priv *priv)
1048c24602efSGiuseppe CAVALLARO {
104954139cf3SJoao Pinto 	u32 rx_cnt = priv->plat->rx_queues_to_use;
105071fedb01SJoao Pinto 	void *head_rx;
105154139cf3SJoao Pinto 	u32 queue;
105254139cf3SJoao Pinto 
105354139cf3SJoao Pinto 	/* Display RX rings */
105454139cf3SJoao Pinto 	for (queue = 0; queue < rx_cnt; queue++) {
105554139cf3SJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
105654139cf3SJoao Pinto 
105754139cf3SJoao Pinto 		pr_info("\tRX Queue %u rings\n", queue);
1058d0225e7dSAlexandre TORGUE 
105971fedb01SJoao Pinto 		if (priv->extend_desc)
106054139cf3SJoao Pinto 			head_rx = (void *)rx_q->dma_erx;
106171fedb01SJoao Pinto 		else
106254139cf3SJoao Pinto 			head_rx = (void *)rx_q->dma_rx;
106371fedb01SJoao Pinto 
106471fedb01SJoao Pinto 		/* Display RX ring */
106542de047dSJose Abreu 		stmmac_display_ring(priv, head_rx, DMA_RX_SIZE, true);
10665bacd778SLABBE Corentin 	}
106754139cf3SJoao Pinto }
1068d0225e7dSAlexandre TORGUE 
106971fedb01SJoao Pinto static void stmmac_display_tx_rings(struct stmmac_priv *priv)
107071fedb01SJoao Pinto {
1071ce736788SJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
107271fedb01SJoao Pinto 	void *head_tx;
1073ce736788SJoao Pinto 	u32 queue;
1074ce736788SJoao Pinto 
1075ce736788SJoao Pinto 	/* Display TX rings */
1076ce736788SJoao Pinto 	for (queue = 0; queue < tx_cnt; queue++) {
1077ce736788SJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1078ce736788SJoao Pinto 
1079ce736788SJoao Pinto 		pr_info("\tTX Queue %d rings\n", queue);
108071fedb01SJoao Pinto 
108171fedb01SJoao Pinto 		if (priv->extend_desc)
1082ce736788SJoao Pinto 			head_tx = (void *)tx_q->dma_etx;
108371fedb01SJoao Pinto 		else
1084ce736788SJoao Pinto 			head_tx = (void *)tx_q->dma_tx;
108571fedb01SJoao Pinto 
108642de047dSJose Abreu 		stmmac_display_ring(priv, head_tx, DMA_TX_SIZE, false);
1087c24602efSGiuseppe CAVALLARO 	}
1088ce736788SJoao Pinto }
1089c24602efSGiuseppe CAVALLARO 
109071fedb01SJoao Pinto static void stmmac_display_rings(struct stmmac_priv *priv)
109171fedb01SJoao Pinto {
109271fedb01SJoao Pinto 	/* Display RX ring */
109371fedb01SJoao Pinto 	stmmac_display_rx_rings(priv);
109471fedb01SJoao Pinto 
109571fedb01SJoao Pinto 	/* Display TX ring */
109671fedb01SJoao Pinto 	stmmac_display_tx_rings(priv);
109771fedb01SJoao Pinto }
109871fedb01SJoao Pinto 
1099286a8372SGiuseppe CAVALLARO static int stmmac_set_bfsize(int mtu, int bufsize)
1100286a8372SGiuseppe CAVALLARO {
1101286a8372SGiuseppe CAVALLARO 	int ret = bufsize;
1102286a8372SGiuseppe CAVALLARO 
1103286a8372SGiuseppe CAVALLARO 	if (mtu >= BUF_SIZE_4KiB)
1104286a8372SGiuseppe CAVALLARO 		ret = BUF_SIZE_8KiB;
1105286a8372SGiuseppe CAVALLARO 	else if (mtu >= BUF_SIZE_2KiB)
1106286a8372SGiuseppe CAVALLARO 		ret = BUF_SIZE_4KiB;
1107d916701cSGiuseppe CAVALLARO 	else if (mtu > DEFAULT_BUFSIZE)
1108286a8372SGiuseppe CAVALLARO 		ret = BUF_SIZE_2KiB;
1109286a8372SGiuseppe CAVALLARO 	else
1110d916701cSGiuseppe CAVALLARO 		ret = DEFAULT_BUFSIZE;
1111286a8372SGiuseppe CAVALLARO 
1112286a8372SGiuseppe CAVALLARO 	return ret;
1113286a8372SGiuseppe CAVALLARO }
1114286a8372SGiuseppe CAVALLARO 
111532ceabcaSGiuseppe CAVALLARO /**
111671fedb01SJoao Pinto  * stmmac_clear_rx_descriptors - clear RX descriptors
111732ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
111854139cf3SJoao Pinto  * @queue: RX queue index
111971fedb01SJoao Pinto  * Description: this function is called to clear the RX descriptors
112032ceabcaSGiuseppe CAVALLARO  * in case of both basic and extended descriptors are used.
112132ceabcaSGiuseppe CAVALLARO  */
112254139cf3SJoao Pinto static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
1123c24602efSGiuseppe CAVALLARO {
112454139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
11255bacd778SLABBE Corentin 	int i;
1126c24602efSGiuseppe CAVALLARO 
112771fedb01SJoao Pinto 	/* Clear the RX descriptors */
11285bacd778SLABBE Corentin 	for (i = 0; i < DMA_RX_SIZE; i++)
11295bacd778SLABBE Corentin 		if (priv->extend_desc)
113042de047dSJose Abreu 			stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
11315bacd778SLABBE Corentin 					priv->use_riwt, priv->mode,
1132583e6361SAaro Koskinen 					(i == DMA_RX_SIZE - 1),
1133583e6361SAaro Koskinen 					priv->dma_buf_sz);
11345bacd778SLABBE Corentin 		else
113542de047dSJose Abreu 			stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
11365bacd778SLABBE Corentin 					priv->use_riwt, priv->mode,
1137583e6361SAaro Koskinen 					(i == DMA_RX_SIZE - 1),
1138583e6361SAaro Koskinen 					priv->dma_buf_sz);
113971fedb01SJoao Pinto }
114071fedb01SJoao Pinto 
114171fedb01SJoao Pinto /**
114271fedb01SJoao Pinto  * stmmac_clear_tx_descriptors - clear tx descriptors
114371fedb01SJoao Pinto  * @priv: driver private structure
1144ce736788SJoao Pinto  * @queue: TX queue index.
114571fedb01SJoao Pinto  * Description: this function is called to clear the TX descriptors
114671fedb01SJoao Pinto  * in case of both basic and extended descriptors are used.
114771fedb01SJoao Pinto  */
1148ce736788SJoao Pinto static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
114971fedb01SJoao Pinto {
1150ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
115171fedb01SJoao Pinto 	int i;
115271fedb01SJoao Pinto 
115371fedb01SJoao Pinto 	/* Clear the TX descriptors */
11545bacd778SLABBE Corentin 	for (i = 0; i < DMA_TX_SIZE; i++)
11555bacd778SLABBE Corentin 		if (priv->extend_desc)
115642de047dSJose Abreu 			stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
115742de047dSJose Abreu 					priv->mode, (i == DMA_TX_SIZE - 1));
11585bacd778SLABBE Corentin 		else
115942de047dSJose Abreu 			stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
116042de047dSJose Abreu 					priv->mode, (i == DMA_TX_SIZE - 1));
1161c24602efSGiuseppe CAVALLARO }
1162c24602efSGiuseppe CAVALLARO 
1163732fdf0eSGiuseppe CAVALLARO /**
116471fedb01SJoao Pinto  * stmmac_clear_descriptors - clear descriptors
116571fedb01SJoao Pinto  * @priv: driver private structure
116671fedb01SJoao Pinto  * Description: this function is called to clear the TX and RX descriptors
116771fedb01SJoao Pinto  * in case of both basic and extended descriptors are used.
116871fedb01SJoao Pinto  */
116971fedb01SJoao Pinto static void stmmac_clear_descriptors(struct stmmac_priv *priv)
117071fedb01SJoao Pinto {
117154139cf3SJoao Pinto 	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1172ce736788SJoao Pinto 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
117354139cf3SJoao Pinto 	u32 queue;
117454139cf3SJoao Pinto 
117571fedb01SJoao Pinto 	/* Clear the RX descriptors */
117654139cf3SJoao Pinto 	for (queue = 0; queue < rx_queue_cnt; queue++)
117754139cf3SJoao Pinto 		stmmac_clear_rx_descriptors(priv, queue);
117871fedb01SJoao Pinto 
117971fedb01SJoao Pinto 	/* Clear the TX descriptors */
1180ce736788SJoao Pinto 	for (queue = 0; queue < tx_queue_cnt; queue++)
1181ce736788SJoao Pinto 		stmmac_clear_tx_descriptors(priv, queue);
118271fedb01SJoao Pinto }
118371fedb01SJoao Pinto 
118471fedb01SJoao Pinto /**
1185732fdf0eSGiuseppe CAVALLARO  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1186732fdf0eSGiuseppe CAVALLARO  * @priv: driver private structure
1187732fdf0eSGiuseppe CAVALLARO  * @p: descriptor pointer
1188732fdf0eSGiuseppe CAVALLARO  * @i: descriptor index
118954139cf3SJoao Pinto  * @flags: gfp flag
119054139cf3SJoao Pinto  * @queue: RX queue index
1191732fdf0eSGiuseppe CAVALLARO  * Description: this function is called to allocate a receive buffer, perform
1192732fdf0eSGiuseppe CAVALLARO  * the DMA mapping and init the descriptor.
1193732fdf0eSGiuseppe CAVALLARO  */
1194c24602efSGiuseppe CAVALLARO static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
119554139cf3SJoao Pinto 				  int i, gfp_t flags, u32 queue)
1196c24602efSGiuseppe CAVALLARO {
119754139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
11982af6106aSJose Abreu 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1199c24602efSGiuseppe CAVALLARO 
12002af6106aSJose Abreu 	buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
12012af6106aSJose Abreu 	if (!buf->page)
120256329137SBartlomiej Zolnierkiewicz 		return -ENOMEM;
1203c24602efSGiuseppe CAVALLARO 
120467afd6d1SJose Abreu 	if (priv->sph) {
120567afd6d1SJose Abreu 		buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
120667afd6d1SJose Abreu 		if (!buf->sec_page)
120767afd6d1SJose Abreu 			return -ENOMEM;
120867afd6d1SJose Abreu 
120967afd6d1SJose Abreu 		buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
121067afd6d1SJose Abreu 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr);
121167afd6d1SJose Abreu 	} else {
121267afd6d1SJose Abreu 		buf->sec_page = NULL;
121367afd6d1SJose Abreu 	}
121467afd6d1SJose Abreu 
12152af6106aSJose Abreu 	buf->addr = page_pool_get_dma_addr(buf->page);
12162af6106aSJose Abreu 	stmmac_set_desc_addr(priv, p, buf->addr);
12172c520b1cSJose Abreu 	if (priv->dma_buf_sz == BUF_SIZE_16KiB)
12182c520b1cSJose Abreu 		stmmac_init_desc3(priv, p);
1219c24602efSGiuseppe CAVALLARO 
1220c24602efSGiuseppe CAVALLARO 	return 0;
1221c24602efSGiuseppe CAVALLARO }
1222c24602efSGiuseppe CAVALLARO 
122371fedb01SJoao Pinto /**
122471fedb01SJoao Pinto  * stmmac_free_rx_buffer - free RX dma buffers
122571fedb01SJoao Pinto  * @priv: private structure
122654139cf3SJoao Pinto  * @queue: RX queue index
122771fedb01SJoao Pinto  * @i: buffer index.
122871fedb01SJoao Pinto  */
122954139cf3SJoao Pinto static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
123056329137SBartlomiej Zolnierkiewicz {
123154139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
12322af6106aSJose Abreu 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
123354139cf3SJoao Pinto 
12342af6106aSJose Abreu 	if (buf->page)
12352af6106aSJose Abreu 		page_pool_put_page(rx_q->page_pool, buf->page, false);
12362af6106aSJose Abreu 	buf->page = NULL;
123767afd6d1SJose Abreu 
123867afd6d1SJose Abreu 	if (buf->sec_page)
123967afd6d1SJose Abreu 		page_pool_put_page(rx_q->page_pool, buf->sec_page, false);
124067afd6d1SJose Abreu 	buf->sec_page = NULL;
124156329137SBartlomiej Zolnierkiewicz }
124256329137SBartlomiej Zolnierkiewicz 
12437ac6653aSJeff Kirsher /**
124471fedb01SJoao Pinto  * stmmac_free_tx_buffer - free RX dma buffers
124571fedb01SJoao Pinto  * @priv: private structure
1246ce736788SJoao Pinto  * @queue: RX queue index
124771fedb01SJoao Pinto  * @i: buffer index.
124871fedb01SJoao Pinto  */
1249ce736788SJoao Pinto static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
125071fedb01SJoao Pinto {
1251ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1252ce736788SJoao Pinto 
1253ce736788SJoao Pinto 	if (tx_q->tx_skbuff_dma[i].buf) {
1254ce736788SJoao Pinto 		if (tx_q->tx_skbuff_dma[i].map_as_page)
125571fedb01SJoao Pinto 			dma_unmap_page(priv->device,
1256ce736788SJoao Pinto 				       tx_q->tx_skbuff_dma[i].buf,
1257ce736788SJoao Pinto 				       tx_q->tx_skbuff_dma[i].len,
125871fedb01SJoao Pinto 				       DMA_TO_DEVICE);
125971fedb01SJoao Pinto 		else
126071fedb01SJoao Pinto 			dma_unmap_single(priv->device,
1261ce736788SJoao Pinto 					 tx_q->tx_skbuff_dma[i].buf,
1262ce736788SJoao Pinto 					 tx_q->tx_skbuff_dma[i].len,
126371fedb01SJoao Pinto 					 DMA_TO_DEVICE);
126471fedb01SJoao Pinto 	}
126571fedb01SJoao Pinto 
1266ce736788SJoao Pinto 	if (tx_q->tx_skbuff[i]) {
1267ce736788SJoao Pinto 		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1268ce736788SJoao Pinto 		tx_q->tx_skbuff[i] = NULL;
1269ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[i].buf = 0;
1270ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[i].map_as_page = false;
127171fedb01SJoao Pinto 	}
127271fedb01SJoao Pinto }
127371fedb01SJoao Pinto 
127471fedb01SJoao Pinto /**
127571fedb01SJoao Pinto  * init_dma_rx_desc_rings - init the RX descriptor rings
12767ac6653aSJeff Kirsher  * @dev: net device structure
12775bacd778SLABBE Corentin  * @flags: gfp flag.
127871fedb01SJoao Pinto  * Description: this function initializes the DMA RX descriptors
12795bacd778SLABBE Corentin  * and allocates the socket buffers. It supports the chained and ring
1280286a8372SGiuseppe CAVALLARO  * modes.
12817ac6653aSJeff Kirsher  */
128271fedb01SJoao Pinto static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
12837ac6653aSJeff Kirsher {
12847ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
128554139cf3SJoao Pinto 	u32 rx_count = priv->plat->rx_queues_to_use;
12865bacd778SLABBE Corentin 	int ret = -ENOMEM;
12872c520b1cSJose Abreu 	int bfsize = 0;
12881d3028f4SColin Ian King 	int queue;
128954139cf3SJoao Pinto 	int i;
12907ac6653aSJeff Kirsher 
12912c520b1cSJose Abreu 	bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
12922c520b1cSJose Abreu 	if (bfsize < 0)
12932c520b1cSJose Abreu 		bfsize = 0;
12945bacd778SLABBE Corentin 
12955bacd778SLABBE Corentin 	if (bfsize < BUF_SIZE_16KiB)
12965bacd778SLABBE Corentin 		bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
12975bacd778SLABBE Corentin 
12985bacd778SLABBE Corentin 	priv->dma_buf_sz = bfsize;
12992618abb7SVince Bridgers 
130054139cf3SJoao Pinto 	/* RX INITIALIZATION */
13015bacd778SLABBE Corentin 	netif_dbg(priv, probe, priv->dev,
13025bacd778SLABBE Corentin 		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
13035bacd778SLABBE Corentin 
130454139cf3SJoao Pinto 	for (queue = 0; queue < rx_count; queue++) {
130554139cf3SJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
130654139cf3SJoao Pinto 
130754139cf3SJoao Pinto 		netif_dbg(priv, probe, priv->dev,
130854139cf3SJoao Pinto 			  "(%s) dma_rx_phy=0x%08x\n", __func__,
130954139cf3SJoao Pinto 			  (u32)rx_q->dma_rx_phy);
131054139cf3SJoao Pinto 
1311cbcf0999SJose Abreu 		stmmac_clear_rx_descriptors(priv, queue);
1312cbcf0999SJose Abreu 
13135bacd778SLABBE Corentin 		for (i = 0; i < DMA_RX_SIZE; i++) {
13145bacd778SLABBE Corentin 			struct dma_desc *p;
13155bacd778SLABBE Corentin 
131654139cf3SJoao Pinto 			if (priv->extend_desc)
131754139cf3SJoao Pinto 				p = &((rx_q->dma_erx + i)->basic);
131854139cf3SJoao Pinto 			else
131954139cf3SJoao Pinto 				p = rx_q->dma_rx + i;
132054139cf3SJoao Pinto 
132154139cf3SJoao Pinto 			ret = stmmac_init_rx_buffers(priv, p, i, flags,
132254139cf3SJoao Pinto 						     queue);
13235bacd778SLABBE Corentin 			if (ret)
13245bacd778SLABBE Corentin 				goto err_init_rx_buffers;
13255bacd778SLABBE Corentin 		}
132654139cf3SJoao Pinto 
132754139cf3SJoao Pinto 		rx_q->cur_rx = 0;
132854139cf3SJoao Pinto 		rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
132954139cf3SJoao Pinto 
1330c24602efSGiuseppe CAVALLARO 		/* Setup the chained descriptor addresses */
1331c24602efSGiuseppe CAVALLARO 		if (priv->mode == STMMAC_CHAIN_MODE) {
133271fedb01SJoao Pinto 			if (priv->extend_desc)
13332c520b1cSJose Abreu 				stmmac_mode_init(priv, rx_q->dma_erx,
13342c520b1cSJose Abreu 						rx_q->dma_rx_phy, DMA_RX_SIZE, 1);
133571fedb01SJoao Pinto 			else
13362c520b1cSJose Abreu 				stmmac_mode_init(priv, rx_q->dma_rx,
13372c520b1cSJose Abreu 						rx_q->dma_rx_phy, DMA_RX_SIZE, 0);
133871fedb01SJoao Pinto 		}
133954139cf3SJoao Pinto 	}
134054139cf3SJoao Pinto 
134154139cf3SJoao Pinto 	buf_sz = bfsize;
134271fedb01SJoao Pinto 
134371fedb01SJoao Pinto 	return 0;
134454139cf3SJoao Pinto 
134571fedb01SJoao Pinto err_init_rx_buffers:
134654139cf3SJoao Pinto 	while (queue >= 0) {
134771fedb01SJoao Pinto 		while (--i >= 0)
134854139cf3SJoao Pinto 			stmmac_free_rx_buffer(priv, queue, i);
134954139cf3SJoao Pinto 
135054139cf3SJoao Pinto 		if (queue == 0)
135154139cf3SJoao Pinto 			break;
135254139cf3SJoao Pinto 
135354139cf3SJoao Pinto 		i = DMA_RX_SIZE;
135454139cf3SJoao Pinto 		queue--;
135554139cf3SJoao Pinto 	}
135654139cf3SJoao Pinto 
135771fedb01SJoao Pinto 	return ret;
135871fedb01SJoao Pinto }
135971fedb01SJoao Pinto 
136071fedb01SJoao Pinto /**
136171fedb01SJoao Pinto  * init_dma_tx_desc_rings - init the TX descriptor rings
136271fedb01SJoao Pinto  * @dev: net device structure.
136371fedb01SJoao Pinto  * Description: this function initializes the DMA TX descriptors
136471fedb01SJoao Pinto  * and allocates the socket buffers. It supports the chained and ring
136571fedb01SJoao Pinto  * modes.
136671fedb01SJoao Pinto  */
136771fedb01SJoao Pinto static int init_dma_tx_desc_rings(struct net_device *dev)
136871fedb01SJoao Pinto {
136971fedb01SJoao Pinto 	struct stmmac_priv *priv = netdev_priv(dev);
1370ce736788SJoao Pinto 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1371ce736788SJoao Pinto 	u32 queue;
137271fedb01SJoao Pinto 	int i;
137371fedb01SJoao Pinto 
1374ce736788SJoao Pinto 	for (queue = 0; queue < tx_queue_cnt; queue++) {
1375ce736788SJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1376ce736788SJoao Pinto 
137771fedb01SJoao Pinto 		netif_dbg(priv, probe, priv->dev,
1378ce736788SJoao Pinto 			  "(%s) dma_tx_phy=0x%08x\n", __func__,
1379ce736788SJoao Pinto 			 (u32)tx_q->dma_tx_phy);
138071fedb01SJoao Pinto 
138171fedb01SJoao Pinto 		/* Setup the chained descriptor addresses */
138271fedb01SJoao Pinto 		if (priv->mode == STMMAC_CHAIN_MODE) {
138371fedb01SJoao Pinto 			if (priv->extend_desc)
13842c520b1cSJose Abreu 				stmmac_mode_init(priv, tx_q->dma_etx,
13852c520b1cSJose Abreu 						tx_q->dma_tx_phy, DMA_TX_SIZE, 1);
138671fedb01SJoao Pinto 			else
13872c520b1cSJose Abreu 				stmmac_mode_init(priv, tx_q->dma_tx,
13882c520b1cSJose Abreu 						tx_q->dma_tx_phy, DMA_TX_SIZE, 0);
1389c24602efSGiuseppe CAVALLARO 		}
1390286a8372SGiuseppe CAVALLARO 
1391e3ad57c9SGiuseppe Cavallaro 		for (i = 0; i < DMA_TX_SIZE; i++) {
1392c24602efSGiuseppe CAVALLARO 			struct dma_desc *p;
1393c24602efSGiuseppe CAVALLARO 			if (priv->extend_desc)
1394ce736788SJoao Pinto 				p = &((tx_q->dma_etx + i)->basic);
1395c24602efSGiuseppe CAVALLARO 			else
1396ce736788SJoao Pinto 				p = tx_q->dma_tx + i;
1397f748be53SAlexandre TORGUE 
139844c67f85SJose Abreu 			stmmac_clear_desc(priv, p);
1399f748be53SAlexandre TORGUE 
1400ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[i].buf = 0;
1401ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[i].map_as_page = false;
1402ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[i].len = 0;
1403ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[i].last_segment = false;
1404ce736788SJoao Pinto 			tx_q->tx_skbuff[i] = NULL;
14054a7d666aSGiuseppe CAVALLARO 		}
1406c24602efSGiuseppe CAVALLARO 
1407ce736788SJoao Pinto 		tx_q->dirty_tx = 0;
1408ce736788SJoao Pinto 		tx_q->cur_tx = 0;
14098d212a9eSNiklas Cassel 		tx_q->mss = 0;
1410ce736788SJoao Pinto 
1411c22a3f48SJoao Pinto 		netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
1412c22a3f48SJoao Pinto 	}
14137ac6653aSJeff Kirsher 
141471fedb01SJoao Pinto 	return 0;
141571fedb01SJoao Pinto }
141671fedb01SJoao Pinto 
141771fedb01SJoao Pinto /**
141871fedb01SJoao Pinto  * init_dma_desc_rings - init the RX/TX descriptor rings
141971fedb01SJoao Pinto  * @dev: net device structure
142071fedb01SJoao Pinto  * @flags: gfp flag.
142171fedb01SJoao Pinto  * Description: this function initializes the DMA RX/TX descriptors
142271fedb01SJoao Pinto  * and allocates the socket buffers. It supports the chained and ring
142371fedb01SJoao Pinto  * modes.
142471fedb01SJoao Pinto  */
142571fedb01SJoao Pinto static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
142671fedb01SJoao Pinto {
142771fedb01SJoao Pinto 	struct stmmac_priv *priv = netdev_priv(dev);
142871fedb01SJoao Pinto 	int ret;
142971fedb01SJoao Pinto 
143071fedb01SJoao Pinto 	ret = init_dma_rx_desc_rings(dev, flags);
143171fedb01SJoao Pinto 	if (ret)
143271fedb01SJoao Pinto 		return ret;
143371fedb01SJoao Pinto 
143471fedb01SJoao Pinto 	ret = init_dma_tx_desc_rings(dev);
143571fedb01SJoao Pinto 
14365bacd778SLABBE Corentin 	stmmac_clear_descriptors(priv);
14377ac6653aSJeff Kirsher 
1438c24602efSGiuseppe CAVALLARO 	if (netif_msg_hw(priv))
1439c24602efSGiuseppe CAVALLARO 		stmmac_display_rings(priv);
144056329137SBartlomiej Zolnierkiewicz 
144156329137SBartlomiej Zolnierkiewicz 	return ret;
14427ac6653aSJeff Kirsher }
14437ac6653aSJeff Kirsher 
144471fedb01SJoao Pinto /**
144571fedb01SJoao Pinto  * dma_free_rx_skbufs - free RX dma buffers
144671fedb01SJoao Pinto  * @priv: private structure
144754139cf3SJoao Pinto  * @queue: RX queue index
144871fedb01SJoao Pinto  */
144954139cf3SJoao Pinto static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
14507ac6653aSJeff Kirsher {
14517ac6653aSJeff Kirsher 	int i;
14527ac6653aSJeff Kirsher 
1453e3ad57c9SGiuseppe Cavallaro 	for (i = 0; i < DMA_RX_SIZE; i++)
145454139cf3SJoao Pinto 		stmmac_free_rx_buffer(priv, queue, i);
14557ac6653aSJeff Kirsher }
14567ac6653aSJeff Kirsher 
145771fedb01SJoao Pinto /**
145871fedb01SJoao Pinto  * dma_free_tx_skbufs - free TX dma buffers
145971fedb01SJoao Pinto  * @priv: private structure
1460ce736788SJoao Pinto  * @queue: TX queue index
146171fedb01SJoao Pinto  */
1462ce736788SJoao Pinto static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
14637ac6653aSJeff Kirsher {
14647ac6653aSJeff Kirsher 	int i;
14657ac6653aSJeff Kirsher 
146671fedb01SJoao Pinto 	for (i = 0; i < DMA_TX_SIZE; i++)
1467ce736788SJoao Pinto 		stmmac_free_tx_buffer(priv, queue, i);
14687ac6653aSJeff Kirsher }
14697ac6653aSJeff Kirsher 
1470732fdf0eSGiuseppe CAVALLARO /**
147154139cf3SJoao Pinto  * free_dma_rx_desc_resources - free RX dma desc resources
147254139cf3SJoao Pinto  * @priv: private structure
147354139cf3SJoao Pinto  */
147454139cf3SJoao Pinto static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
147554139cf3SJoao Pinto {
147654139cf3SJoao Pinto 	u32 rx_count = priv->plat->rx_queues_to_use;
147754139cf3SJoao Pinto 	u32 queue;
147854139cf3SJoao Pinto 
147954139cf3SJoao Pinto 	/* Free RX queue resources */
148054139cf3SJoao Pinto 	for (queue = 0; queue < rx_count; queue++) {
148154139cf3SJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
148254139cf3SJoao Pinto 
148354139cf3SJoao Pinto 		/* Release the DMA RX socket buffers */
148454139cf3SJoao Pinto 		dma_free_rx_skbufs(priv, queue);
148554139cf3SJoao Pinto 
148654139cf3SJoao Pinto 		/* Free DMA regions of consistent memory previously allocated */
148754139cf3SJoao Pinto 		if (!priv->extend_desc)
148854139cf3SJoao Pinto 			dma_free_coherent(priv->device,
148954139cf3SJoao Pinto 					  DMA_RX_SIZE * sizeof(struct dma_desc),
149054139cf3SJoao Pinto 					  rx_q->dma_rx, rx_q->dma_rx_phy);
149154139cf3SJoao Pinto 		else
149254139cf3SJoao Pinto 			dma_free_coherent(priv->device, DMA_RX_SIZE *
149354139cf3SJoao Pinto 					  sizeof(struct dma_extended_desc),
149454139cf3SJoao Pinto 					  rx_q->dma_erx, rx_q->dma_rx_phy);
149554139cf3SJoao Pinto 
14962af6106aSJose Abreu 		kfree(rx_q->buf_pool);
14972af6106aSJose Abreu 		if (rx_q->page_pool) {
14982af6106aSJose Abreu 			page_pool_request_shutdown(rx_q->page_pool);
14992af6106aSJose Abreu 			page_pool_destroy(rx_q->page_pool);
15002af6106aSJose Abreu 		}
150154139cf3SJoao Pinto 	}
150254139cf3SJoao Pinto }
150354139cf3SJoao Pinto 
150454139cf3SJoao Pinto /**
1505ce736788SJoao Pinto  * free_dma_tx_desc_resources - free TX dma desc resources
1506ce736788SJoao Pinto  * @priv: private structure
1507ce736788SJoao Pinto  */
1508ce736788SJoao Pinto static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
1509ce736788SJoao Pinto {
1510ce736788SJoao Pinto 	u32 tx_count = priv->plat->tx_queues_to_use;
151162242260SChristophe Jaillet 	u32 queue;
1512ce736788SJoao Pinto 
1513ce736788SJoao Pinto 	/* Free TX queue resources */
1514ce736788SJoao Pinto 	for (queue = 0; queue < tx_count; queue++) {
1515ce736788SJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1516ce736788SJoao Pinto 
1517ce736788SJoao Pinto 		/* Release the DMA TX socket buffers */
1518ce736788SJoao Pinto 		dma_free_tx_skbufs(priv, queue);
1519ce736788SJoao Pinto 
1520ce736788SJoao Pinto 		/* Free DMA regions of consistent memory previously allocated */
1521ce736788SJoao Pinto 		if (!priv->extend_desc)
1522ce736788SJoao Pinto 			dma_free_coherent(priv->device,
1523ce736788SJoao Pinto 					  DMA_TX_SIZE * sizeof(struct dma_desc),
1524ce736788SJoao Pinto 					  tx_q->dma_tx, tx_q->dma_tx_phy);
1525ce736788SJoao Pinto 		else
1526ce736788SJoao Pinto 			dma_free_coherent(priv->device, DMA_TX_SIZE *
1527ce736788SJoao Pinto 					  sizeof(struct dma_extended_desc),
1528ce736788SJoao Pinto 					  tx_q->dma_etx, tx_q->dma_tx_phy);
1529ce736788SJoao Pinto 
1530ce736788SJoao Pinto 		kfree(tx_q->tx_skbuff_dma);
1531ce736788SJoao Pinto 		kfree(tx_q->tx_skbuff);
1532ce736788SJoao Pinto 	}
1533ce736788SJoao Pinto }
1534ce736788SJoao Pinto 
1535ce736788SJoao Pinto /**
153671fedb01SJoao Pinto  * alloc_dma_rx_desc_resources - alloc RX resources.
1537732fdf0eSGiuseppe CAVALLARO  * @priv: private structure
1538732fdf0eSGiuseppe CAVALLARO  * Description: according to which descriptor can be used (extend or basic)
1539732fdf0eSGiuseppe CAVALLARO  * this function allocates the resources for TX and RX paths. In case of
1540732fdf0eSGiuseppe CAVALLARO  * reception, for example, it pre-allocated the RX socket buffer in order to
1541732fdf0eSGiuseppe CAVALLARO  * allow zero-copy mechanism.
1542732fdf0eSGiuseppe CAVALLARO  */
154371fedb01SJoao Pinto static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
154409f8d696SSrinivas Kandagatla {
154554139cf3SJoao Pinto 	u32 rx_count = priv->plat->rx_queues_to_use;
15465bacd778SLABBE Corentin 	int ret = -ENOMEM;
154754139cf3SJoao Pinto 	u32 queue;
154809f8d696SSrinivas Kandagatla 
154954139cf3SJoao Pinto 	/* RX queues buffers and DMA */
155054139cf3SJoao Pinto 	for (queue = 0; queue < rx_count; queue++) {
155154139cf3SJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
15522af6106aSJose Abreu 		struct page_pool_params pp_params = { 0 };
155354139cf3SJoao Pinto 
155454139cf3SJoao Pinto 		rx_q->queue_index = queue;
155554139cf3SJoao Pinto 		rx_q->priv_data = priv;
155654139cf3SJoao Pinto 
15572af6106aSJose Abreu 		pp_params.flags = PP_FLAG_DMA_MAP;
15582af6106aSJose Abreu 		pp_params.pool_size = DMA_RX_SIZE;
15592af6106aSJose Abreu 		pp_params.order = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE);
15602af6106aSJose Abreu 		pp_params.nid = dev_to_node(priv->device);
15612af6106aSJose Abreu 		pp_params.dev = priv->device;
15622af6106aSJose Abreu 		pp_params.dma_dir = DMA_FROM_DEVICE;
15635bacd778SLABBE Corentin 
15642af6106aSJose Abreu 		rx_q->page_pool = page_pool_create(&pp_params);
15652af6106aSJose Abreu 		if (IS_ERR(rx_q->page_pool)) {
15662af6106aSJose Abreu 			ret = PTR_ERR(rx_q->page_pool);
15672af6106aSJose Abreu 			rx_q->page_pool = NULL;
15682af6106aSJose Abreu 			goto err_dma;
15692af6106aSJose Abreu 		}
15702af6106aSJose Abreu 
1571ec5e5ce1SJose Abreu 		rx_q->buf_pool = kcalloc(DMA_RX_SIZE, sizeof(*rx_q->buf_pool),
15725bacd778SLABBE Corentin 					 GFP_KERNEL);
15732af6106aSJose Abreu 		if (!rx_q->buf_pool)
157454139cf3SJoao Pinto 			goto err_dma;
15755bacd778SLABBE Corentin 
15765bacd778SLABBE Corentin 		if (priv->extend_desc) {
1577750afb08SLuis Chamberlain 			rx_q->dma_erx = dma_alloc_coherent(priv->device,
1578750afb08SLuis Chamberlain 							   DMA_RX_SIZE * sizeof(struct dma_extended_desc),
157954139cf3SJoao Pinto 							   &rx_q->dma_rx_phy,
15805bacd778SLABBE Corentin 							   GFP_KERNEL);
158154139cf3SJoao Pinto 			if (!rx_q->dma_erx)
15825bacd778SLABBE Corentin 				goto err_dma;
15835bacd778SLABBE Corentin 
158471fedb01SJoao Pinto 		} else {
1585750afb08SLuis Chamberlain 			rx_q->dma_rx = dma_alloc_coherent(priv->device,
1586750afb08SLuis Chamberlain 							  DMA_RX_SIZE * sizeof(struct dma_desc),
158754139cf3SJoao Pinto 							  &rx_q->dma_rx_phy,
158871fedb01SJoao Pinto 							  GFP_KERNEL);
158954139cf3SJoao Pinto 			if (!rx_q->dma_rx)
159071fedb01SJoao Pinto 				goto err_dma;
159171fedb01SJoao Pinto 		}
159254139cf3SJoao Pinto 	}
159371fedb01SJoao Pinto 
159471fedb01SJoao Pinto 	return 0;
159571fedb01SJoao Pinto 
159671fedb01SJoao Pinto err_dma:
159754139cf3SJoao Pinto 	free_dma_rx_desc_resources(priv);
159854139cf3SJoao Pinto 
159971fedb01SJoao Pinto 	return ret;
160071fedb01SJoao Pinto }
160171fedb01SJoao Pinto 
160271fedb01SJoao Pinto /**
160371fedb01SJoao Pinto  * alloc_dma_tx_desc_resources - alloc TX resources.
160471fedb01SJoao Pinto  * @priv: private structure
160571fedb01SJoao Pinto  * Description: according to which descriptor can be used (extend or basic)
160671fedb01SJoao Pinto  * this function allocates the resources for TX and RX paths. In case of
160771fedb01SJoao Pinto  * reception, for example, it pre-allocated the RX socket buffer in order to
160871fedb01SJoao Pinto  * allow zero-copy mechanism.
160971fedb01SJoao Pinto  */
161071fedb01SJoao Pinto static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
161171fedb01SJoao Pinto {
1612ce736788SJoao Pinto 	u32 tx_count = priv->plat->tx_queues_to_use;
161371fedb01SJoao Pinto 	int ret = -ENOMEM;
1614ce736788SJoao Pinto 	u32 queue;
161571fedb01SJoao Pinto 
1616ce736788SJoao Pinto 	/* TX queues buffers and DMA */
1617ce736788SJoao Pinto 	for (queue = 0; queue < tx_count; queue++) {
1618ce736788SJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1619ce736788SJoao Pinto 
1620ce736788SJoao Pinto 		tx_q->queue_index = queue;
1621ce736788SJoao Pinto 		tx_q->priv_data = priv;
1622ce736788SJoao Pinto 
1623ec5e5ce1SJose Abreu 		tx_q->tx_skbuff_dma = kcalloc(DMA_TX_SIZE,
1624ce736788SJoao Pinto 					      sizeof(*tx_q->tx_skbuff_dma),
162571fedb01SJoao Pinto 					      GFP_KERNEL);
1626ce736788SJoao Pinto 		if (!tx_q->tx_skbuff_dma)
162762242260SChristophe Jaillet 			goto err_dma;
162871fedb01SJoao Pinto 
1629ec5e5ce1SJose Abreu 		tx_q->tx_skbuff = kcalloc(DMA_TX_SIZE,
1630ce736788SJoao Pinto 					  sizeof(struct sk_buff *),
163171fedb01SJoao Pinto 					  GFP_KERNEL);
1632ce736788SJoao Pinto 		if (!tx_q->tx_skbuff)
163362242260SChristophe Jaillet 			goto err_dma;
163471fedb01SJoao Pinto 
163571fedb01SJoao Pinto 		if (priv->extend_desc) {
1636750afb08SLuis Chamberlain 			tx_q->dma_etx = dma_alloc_coherent(priv->device,
1637750afb08SLuis Chamberlain 							   DMA_TX_SIZE * sizeof(struct dma_extended_desc),
1638ce736788SJoao Pinto 							   &tx_q->dma_tx_phy,
16395bacd778SLABBE Corentin 							   GFP_KERNEL);
1640ce736788SJoao Pinto 			if (!tx_q->dma_etx)
164162242260SChristophe Jaillet 				goto err_dma;
16425bacd778SLABBE Corentin 		} else {
1643750afb08SLuis Chamberlain 			tx_q->dma_tx = dma_alloc_coherent(priv->device,
1644750afb08SLuis Chamberlain 							  DMA_TX_SIZE * sizeof(struct dma_desc),
1645ce736788SJoao Pinto 							  &tx_q->dma_tx_phy,
16465bacd778SLABBE Corentin 							  GFP_KERNEL);
1647ce736788SJoao Pinto 			if (!tx_q->dma_tx)
164862242260SChristophe Jaillet 				goto err_dma;
1649ce736788SJoao Pinto 		}
16505bacd778SLABBE Corentin 	}
16515bacd778SLABBE Corentin 
16525bacd778SLABBE Corentin 	return 0;
16535bacd778SLABBE Corentin 
165462242260SChristophe Jaillet err_dma:
1655ce736788SJoao Pinto 	free_dma_tx_desc_resources(priv);
1656ce736788SJoao Pinto 
165709f8d696SSrinivas Kandagatla 	return ret;
16585bacd778SLABBE Corentin }
165909f8d696SSrinivas Kandagatla 
166071fedb01SJoao Pinto /**
166171fedb01SJoao Pinto  * alloc_dma_desc_resources - alloc TX/RX resources.
166271fedb01SJoao Pinto  * @priv: private structure
166371fedb01SJoao Pinto  * Description: according to which descriptor can be used (extend or basic)
166471fedb01SJoao Pinto  * this function allocates the resources for TX and RX paths. In case of
166571fedb01SJoao Pinto  * reception, for example, it pre-allocated the RX socket buffer in order to
166671fedb01SJoao Pinto  * allow zero-copy mechanism.
166771fedb01SJoao Pinto  */
166871fedb01SJoao Pinto static int alloc_dma_desc_resources(struct stmmac_priv *priv)
16695bacd778SLABBE Corentin {
167054139cf3SJoao Pinto 	/* RX Allocation */
167171fedb01SJoao Pinto 	int ret = alloc_dma_rx_desc_resources(priv);
167271fedb01SJoao Pinto 
167371fedb01SJoao Pinto 	if (ret)
167471fedb01SJoao Pinto 		return ret;
167571fedb01SJoao Pinto 
167671fedb01SJoao Pinto 	ret = alloc_dma_tx_desc_resources(priv);
167771fedb01SJoao Pinto 
167871fedb01SJoao Pinto 	return ret;
167971fedb01SJoao Pinto }
168071fedb01SJoao Pinto 
168171fedb01SJoao Pinto /**
168271fedb01SJoao Pinto  * free_dma_desc_resources - free dma desc resources
168371fedb01SJoao Pinto  * @priv: private structure
168471fedb01SJoao Pinto  */
168571fedb01SJoao Pinto static void free_dma_desc_resources(struct stmmac_priv *priv)
168671fedb01SJoao Pinto {
168771fedb01SJoao Pinto 	/* Release the DMA RX socket buffers */
168871fedb01SJoao Pinto 	free_dma_rx_desc_resources(priv);
168971fedb01SJoao Pinto 
169071fedb01SJoao Pinto 	/* Release the DMA TX socket buffers */
169171fedb01SJoao Pinto 	free_dma_tx_desc_resources(priv);
169271fedb01SJoao Pinto }
169371fedb01SJoao Pinto 
169471fedb01SJoao Pinto /**
16959eb12474Sjpinto  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
16969eb12474Sjpinto  *  @priv: driver private structure
16979eb12474Sjpinto  *  Description: It is used for enabling the rx queues in the MAC
16989eb12474Sjpinto  */
16999eb12474Sjpinto static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
17009eb12474Sjpinto {
17014f6046f5SJoao Pinto 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
17024f6046f5SJoao Pinto 	int queue;
17034f6046f5SJoao Pinto 	u8 mode;
17049eb12474Sjpinto 
17054f6046f5SJoao Pinto 	for (queue = 0; queue < rx_queues_count; queue++) {
17064f6046f5SJoao Pinto 		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
1707c10d4c82SJose Abreu 		stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
17084f6046f5SJoao Pinto 	}
17099eb12474Sjpinto }
17109eb12474Sjpinto 
17119eb12474Sjpinto /**
1712ae4f0d46SJoao Pinto  * stmmac_start_rx_dma - start RX DMA channel
1713ae4f0d46SJoao Pinto  * @priv: driver private structure
1714ae4f0d46SJoao Pinto  * @chan: RX channel index
1715ae4f0d46SJoao Pinto  * Description:
1716ae4f0d46SJoao Pinto  * This starts a RX DMA channel
1717ae4f0d46SJoao Pinto  */
1718ae4f0d46SJoao Pinto static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
1719ae4f0d46SJoao Pinto {
1720ae4f0d46SJoao Pinto 	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
1721a4e887faSJose Abreu 	stmmac_start_rx(priv, priv->ioaddr, chan);
1722ae4f0d46SJoao Pinto }
1723ae4f0d46SJoao Pinto 
1724ae4f0d46SJoao Pinto /**
1725ae4f0d46SJoao Pinto  * stmmac_start_tx_dma - start TX DMA channel
1726ae4f0d46SJoao Pinto  * @priv: driver private structure
1727ae4f0d46SJoao Pinto  * @chan: TX channel index
1728ae4f0d46SJoao Pinto  * Description:
1729ae4f0d46SJoao Pinto  * This starts a TX DMA channel
1730ae4f0d46SJoao Pinto  */
1731ae4f0d46SJoao Pinto static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
1732ae4f0d46SJoao Pinto {
1733ae4f0d46SJoao Pinto 	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
1734a4e887faSJose Abreu 	stmmac_start_tx(priv, priv->ioaddr, chan);
1735ae4f0d46SJoao Pinto }
1736ae4f0d46SJoao Pinto 
1737ae4f0d46SJoao Pinto /**
1738ae4f0d46SJoao Pinto  * stmmac_stop_rx_dma - stop RX DMA channel
1739ae4f0d46SJoao Pinto  * @priv: driver private structure
1740ae4f0d46SJoao Pinto  * @chan: RX channel index
1741ae4f0d46SJoao Pinto  * Description:
1742ae4f0d46SJoao Pinto  * This stops a RX DMA channel
1743ae4f0d46SJoao Pinto  */
1744ae4f0d46SJoao Pinto static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
1745ae4f0d46SJoao Pinto {
1746ae4f0d46SJoao Pinto 	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
1747a4e887faSJose Abreu 	stmmac_stop_rx(priv, priv->ioaddr, chan);
1748ae4f0d46SJoao Pinto }
1749ae4f0d46SJoao Pinto 
1750ae4f0d46SJoao Pinto /**
1751ae4f0d46SJoao Pinto  * stmmac_stop_tx_dma - stop TX DMA channel
1752ae4f0d46SJoao Pinto  * @priv: driver private structure
1753ae4f0d46SJoao Pinto  * @chan: TX channel index
1754ae4f0d46SJoao Pinto  * Description:
1755ae4f0d46SJoao Pinto  * This stops a TX DMA channel
1756ae4f0d46SJoao Pinto  */
1757ae4f0d46SJoao Pinto static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
1758ae4f0d46SJoao Pinto {
1759ae4f0d46SJoao Pinto 	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
1760a4e887faSJose Abreu 	stmmac_stop_tx(priv, priv->ioaddr, chan);
1761ae4f0d46SJoao Pinto }
1762ae4f0d46SJoao Pinto 
1763ae4f0d46SJoao Pinto /**
1764ae4f0d46SJoao Pinto  * stmmac_start_all_dma - start all RX and TX DMA channels
1765ae4f0d46SJoao Pinto  * @priv: driver private structure
1766ae4f0d46SJoao Pinto  * Description:
1767ae4f0d46SJoao Pinto  * This starts all the RX and TX DMA channels
1768ae4f0d46SJoao Pinto  */
1769ae4f0d46SJoao Pinto static void stmmac_start_all_dma(struct stmmac_priv *priv)
1770ae4f0d46SJoao Pinto {
1771ae4f0d46SJoao Pinto 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1772ae4f0d46SJoao Pinto 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1773ae4f0d46SJoao Pinto 	u32 chan = 0;
1774ae4f0d46SJoao Pinto 
1775ae4f0d46SJoao Pinto 	for (chan = 0; chan < rx_channels_count; chan++)
1776ae4f0d46SJoao Pinto 		stmmac_start_rx_dma(priv, chan);
1777ae4f0d46SJoao Pinto 
1778ae4f0d46SJoao Pinto 	for (chan = 0; chan < tx_channels_count; chan++)
1779ae4f0d46SJoao Pinto 		stmmac_start_tx_dma(priv, chan);
1780ae4f0d46SJoao Pinto }
1781ae4f0d46SJoao Pinto 
1782ae4f0d46SJoao Pinto /**
1783ae4f0d46SJoao Pinto  * stmmac_stop_all_dma - stop all RX and TX DMA channels
1784ae4f0d46SJoao Pinto  * @priv: driver private structure
1785ae4f0d46SJoao Pinto  * Description:
1786ae4f0d46SJoao Pinto  * This stops the RX and TX DMA channels
1787ae4f0d46SJoao Pinto  */
1788ae4f0d46SJoao Pinto static void stmmac_stop_all_dma(struct stmmac_priv *priv)
1789ae4f0d46SJoao Pinto {
1790ae4f0d46SJoao Pinto 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1791ae4f0d46SJoao Pinto 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1792ae4f0d46SJoao Pinto 	u32 chan = 0;
1793ae4f0d46SJoao Pinto 
1794ae4f0d46SJoao Pinto 	for (chan = 0; chan < rx_channels_count; chan++)
1795ae4f0d46SJoao Pinto 		stmmac_stop_rx_dma(priv, chan);
1796ae4f0d46SJoao Pinto 
1797ae4f0d46SJoao Pinto 	for (chan = 0; chan < tx_channels_count; chan++)
1798ae4f0d46SJoao Pinto 		stmmac_stop_tx_dma(priv, chan);
1799ae4f0d46SJoao Pinto }
1800ae4f0d46SJoao Pinto 
1801ae4f0d46SJoao Pinto /**
18027ac6653aSJeff Kirsher  *  stmmac_dma_operation_mode - HW DMA operation mode
180332ceabcaSGiuseppe CAVALLARO  *  @priv: driver private structure
1804732fdf0eSGiuseppe CAVALLARO  *  Description: it is used for configuring the DMA operation mode register in
1805732fdf0eSGiuseppe CAVALLARO  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
18067ac6653aSJeff Kirsher  */
18077ac6653aSJeff Kirsher static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
18087ac6653aSJeff Kirsher {
18096deee222SJoao Pinto 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
18106deee222SJoao Pinto 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1811f88203a2SVince Bridgers 	int rxfifosz = priv->plat->rx_fifo_size;
181252a76235SJose Abreu 	int txfifosz = priv->plat->tx_fifo_size;
18136deee222SJoao Pinto 	u32 txmode = 0;
18146deee222SJoao Pinto 	u32 rxmode = 0;
18156deee222SJoao Pinto 	u32 chan = 0;
1816a0daae13SJose Abreu 	u8 qmode = 0;
1817f88203a2SVince Bridgers 
181811fbf811SThierry Reding 	if (rxfifosz == 0)
181911fbf811SThierry Reding 		rxfifosz = priv->dma_cap.rx_fifo_size;
182052a76235SJose Abreu 	if (txfifosz == 0)
182152a76235SJose Abreu 		txfifosz = priv->dma_cap.tx_fifo_size;
182252a76235SJose Abreu 
182352a76235SJose Abreu 	/* Adjust for real per queue fifo size */
182452a76235SJose Abreu 	rxfifosz /= rx_channels_count;
182552a76235SJose Abreu 	txfifosz /= tx_channels_count;
182611fbf811SThierry Reding 
18276deee222SJoao Pinto 	if (priv->plat->force_thresh_dma_mode) {
18286deee222SJoao Pinto 		txmode = tc;
18296deee222SJoao Pinto 		rxmode = tc;
18306deee222SJoao Pinto 	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
18317ac6653aSJeff Kirsher 		/*
18327ac6653aSJeff Kirsher 		 * In case of GMAC, SF mode can be enabled
18337ac6653aSJeff Kirsher 		 * to perform the TX COE in HW. This depends on:
18347ac6653aSJeff Kirsher 		 * 1) TX COE if actually supported
18357ac6653aSJeff Kirsher 		 * 2) There is no bugged Jumbo frame support
18367ac6653aSJeff Kirsher 		 *    that needs to not insert csum in the TDES.
18377ac6653aSJeff Kirsher 		 */
18386deee222SJoao Pinto 		txmode = SF_DMA_MODE;
18396deee222SJoao Pinto 		rxmode = SF_DMA_MODE;
1840b2dec116SSonic Zhang 		priv->xstats.threshold = SF_DMA_MODE;
18416deee222SJoao Pinto 	} else {
18426deee222SJoao Pinto 		txmode = tc;
18436deee222SJoao Pinto 		rxmode = SF_DMA_MODE;
18446deee222SJoao Pinto 	}
18456deee222SJoao Pinto 
18466deee222SJoao Pinto 	/* configure all channels */
1847a0daae13SJose Abreu 	for (chan = 0; chan < rx_channels_count; chan++) {
1848a0daae13SJose Abreu 		qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
18496deee222SJoao Pinto 
1850a4e887faSJose Abreu 		stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
1851a0daae13SJose Abreu 				rxfifosz, qmode);
18524205c88eSJose Abreu 		stmmac_set_dma_bfsize(priv, priv->ioaddr, priv->dma_buf_sz,
18534205c88eSJose Abreu 				chan);
1854a0daae13SJose Abreu 	}
1855a0daae13SJose Abreu 
1856a0daae13SJose Abreu 	for (chan = 0; chan < tx_channels_count; chan++) {
1857a0daae13SJose Abreu 		qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
1858a0daae13SJose Abreu 
1859a4e887faSJose Abreu 		stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
1860a0daae13SJose Abreu 				txfifosz, qmode);
1861a0daae13SJose Abreu 	}
18627ac6653aSJeff Kirsher }
18637ac6653aSJeff Kirsher 
18647ac6653aSJeff Kirsher /**
1865732fdf0eSGiuseppe CAVALLARO  * stmmac_tx_clean - to manage the transmission completion
186632ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
1867ce736788SJoao Pinto  * @queue: TX queue index
1868732fdf0eSGiuseppe CAVALLARO  * Description: it reclaims the transmit resources after transmission completes.
18697ac6653aSJeff Kirsher  */
18708fce3331SJose Abreu static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
18717ac6653aSJeff Kirsher {
1872ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
187338979574SBeniamino Galvani 	unsigned int bytes_compl = 0, pkts_compl = 0;
18748fce3331SJose Abreu 	unsigned int entry, count = 0;
18757ac6653aSJeff Kirsher 
18768fce3331SJose Abreu 	__netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
1877a9097a96SGiuseppe CAVALLARO 
18789125cdd1SGiuseppe CAVALLARO 	priv->xstats.tx_clean++;
18799125cdd1SGiuseppe CAVALLARO 
18808d5f4b07SBernd Edlinger 	entry = tx_q->dirty_tx;
18818fce3331SJose Abreu 	while ((entry != tx_q->cur_tx) && (count < budget)) {
1882ce736788SJoao Pinto 		struct sk_buff *skb = tx_q->tx_skbuff[entry];
1883c24602efSGiuseppe CAVALLARO 		struct dma_desc *p;
1884c363b658SFabrice Gasnier 		int status;
1885c24602efSGiuseppe CAVALLARO 
1886c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
1887ce736788SJoao Pinto 			p = (struct dma_desc *)(tx_q->dma_etx + entry);
1888c24602efSGiuseppe CAVALLARO 		else
1889ce736788SJoao Pinto 			p = tx_q->dma_tx + entry;
18907ac6653aSJeff Kirsher 
189142de047dSJose Abreu 		status = stmmac_tx_status(priv, &priv->dev->stats,
189242de047dSJose Abreu 				&priv->xstats, p, priv->ioaddr);
1893c363b658SFabrice Gasnier 		/* Check if the descriptor is owned by the DMA */
1894c363b658SFabrice Gasnier 		if (unlikely(status & tx_dma_own))
1895c363b658SFabrice Gasnier 			break;
1896c363b658SFabrice Gasnier 
18978fce3331SJose Abreu 		count++;
18988fce3331SJose Abreu 
1899a6b25da5SNiklas Cassel 		/* Make sure descriptor fields are read after reading
1900a6b25da5SNiklas Cassel 		 * the own bit.
1901a6b25da5SNiklas Cassel 		 */
1902a6b25da5SNiklas Cassel 		dma_rmb();
1903a6b25da5SNiklas Cassel 
1904c363b658SFabrice Gasnier 		/* Just consider the last segment and ...*/
1905c363b658SFabrice Gasnier 		if (likely(!(status & tx_not_ls))) {
1906c363b658SFabrice Gasnier 			/* ... verify the status error condition */
1907c363b658SFabrice Gasnier 			if (unlikely(status & tx_err)) {
1908c363b658SFabrice Gasnier 				priv->dev->stats.tx_errors++;
1909c363b658SFabrice Gasnier 			} else {
19107ac6653aSJeff Kirsher 				priv->dev->stats.tx_packets++;
19117ac6653aSJeff Kirsher 				priv->xstats.tx_pkt_n++;
1912c363b658SFabrice Gasnier 			}
1913ba1ffd74SGiuseppe CAVALLARO 			stmmac_get_tx_hwtstamp(priv, p, skb);
19147ac6653aSJeff Kirsher 		}
19157ac6653aSJeff Kirsher 
1916ce736788SJoao Pinto 		if (likely(tx_q->tx_skbuff_dma[entry].buf)) {
1917ce736788SJoao Pinto 			if (tx_q->tx_skbuff_dma[entry].map_as_page)
1918362b37beSGiuseppe CAVALLARO 				dma_unmap_page(priv->device,
1919ce736788SJoao Pinto 					       tx_q->tx_skbuff_dma[entry].buf,
1920ce736788SJoao Pinto 					       tx_q->tx_skbuff_dma[entry].len,
19217ac6653aSJeff Kirsher 					       DMA_TO_DEVICE);
1922362b37beSGiuseppe CAVALLARO 			else
1923362b37beSGiuseppe CAVALLARO 				dma_unmap_single(priv->device,
1924ce736788SJoao Pinto 						 tx_q->tx_skbuff_dma[entry].buf,
1925ce736788SJoao Pinto 						 tx_q->tx_skbuff_dma[entry].len,
1926362b37beSGiuseppe CAVALLARO 						 DMA_TO_DEVICE);
1927ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[entry].buf = 0;
1928ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[entry].len = 0;
1929ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[entry].map_as_page = false;
1930cf32deecSRayagond Kokatanur 		}
1931f748be53SAlexandre TORGUE 
19322c520b1cSJose Abreu 		stmmac_clean_desc3(priv, tx_q, p);
1933f748be53SAlexandre TORGUE 
1934ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].last_segment = false;
1935ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
19367ac6653aSJeff Kirsher 
19377ac6653aSJeff Kirsher 		if (likely(skb != NULL)) {
193838979574SBeniamino Galvani 			pkts_compl++;
193938979574SBeniamino Galvani 			bytes_compl += skb->len;
19407c565c33SEric W. Biederman 			dev_consume_skb_any(skb);
1941ce736788SJoao Pinto 			tx_q->tx_skbuff[entry] = NULL;
19427ac6653aSJeff Kirsher 		}
19437ac6653aSJeff Kirsher 
194442de047dSJose Abreu 		stmmac_release_tx_desc(priv, p, priv->mode);
19457ac6653aSJeff Kirsher 
1946e3ad57c9SGiuseppe Cavallaro 		entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
19477ac6653aSJeff Kirsher 	}
1948ce736788SJoao Pinto 	tx_q->dirty_tx = entry;
194938979574SBeniamino Galvani 
1950c22a3f48SJoao Pinto 	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
1951c22a3f48SJoao Pinto 				  pkts_compl, bytes_compl);
195238979574SBeniamino Galvani 
1953c22a3f48SJoao Pinto 	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
1954c22a3f48SJoao Pinto 								queue))) &&
1955c22a3f48SJoao Pinto 	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) {
1956c22a3f48SJoao Pinto 
1957b3e51069SLABBE Corentin 		netif_dbg(priv, tx_done, priv->dev,
1958b3e51069SLABBE Corentin 			  "%s: restart transmit\n", __func__);
1959c22a3f48SJoao Pinto 		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
19607ac6653aSJeff Kirsher 	}
1961d765955dSGiuseppe CAVALLARO 
1962d765955dSGiuseppe CAVALLARO 	if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
1963d765955dSGiuseppe CAVALLARO 		stmmac_enable_eee_mode(priv);
1964f5351ef7SGiuseppe CAVALLARO 		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
1965d765955dSGiuseppe CAVALLARO 	}
19668fce3331SJose Abreu 
19674ccb4585SJose Abreu 	/* We still have pending packets, let's call for a new scheduling */
19684ccb4585SJose Abreu 	if (tx_q->dirty_tx != tx_q->cur_tx)
19694ccb4585SJose Abreu 		mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(10));
19704ccb4585SJose Abreu 
19718fce3331SJose Abreu 	__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
19728fce3331SJose Abreu 
19738fce3331SJose Abreu 	return count;
19747ac6653aSJeff Kirsher }
19757ac6653aSJeff Kirsher 
19767ac6653aSJeff Kirsher /**
1977732fdf0eSGiuseppe CAVALLARO  * stmmac_tx_err - to manage the tx error
197832ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
19795bacd778SLABBE Corentin  * @chan: channel index
19807ac6653aSJeff Kirsher  * Description: it cleans the descriptors and restarts the transmission
1981732fdf0eSGiuseppe CAVALLARO  * in case of transmission errors.
19827ac6653aSJeff Kirsher  */
19835bacd778SLABBE Corentin static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
19847ac6653aSJeff Kirsher {
1985ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
1986c24602efSGiuseppe CAVALLARO 	int i;
1987ce736788SJoao Pinto 
1988c22a3f48SJoao Pinto 	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
19897ac6653aSJeff Kirsher 
1990ae4f0d46SJoao Pinto 	stmmac_stop_tx_dma(priv, chan);
1991ce736788SJoao Pinto 	dma_free_tx_skbufs(priv, chan);
1992e3ad57c9SGiuseppe Cavallaro 	for (i = 0; i < DMA_TX_SIZE; i++)
1993c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
199442de047dSJose Abreu 			stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
199542de047dSJose Abreu 					priv->mode, (i == DMA_TX_SIZE - 1));
1996c24602efSGiuseppe CAVALLARO 		else
199742de047dSJose Abreu 			stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
199842de047dSJose Abreu 					priv->mode, (i == DMA_TX_SIZE - 1));
1999ce736788SJoao Pinto 	tx_q->dirty_tx = 0;
2000ce736788SJoao Pinto 	tx_q->cur_tx = 0;
20018d212a9eSNiklas Cassel 	tx_q->mss = 0;
2002c22a3f48SJoao Pinto 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
2003ae4f0d46SJoao Pinto 	stmmac_start_tx_dma(priv, chan);
20047ac6653aSJeff Kirsher 
20057ac6653aSJeff Kirsher 	priv->dev->stats.tx_errors++;
2006c22a3f48SJoao Pinto 	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
20077ac6653aSJeff Kirsher }
20087ac6653aSJeff Kirsher 
200932ceabcaSGiuseppe CAVALLARO /**
20106deee222SJoao Pinto  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
20116deee222SJoao Pinto  *  @priv: driver private structure
20126deee222SJoao Pinto  *  @txmode: TX operating mode
20136deee222SJoao Pinto  *  @rxmode: RX operating mode
20146deee222SJoao Pinto  *  @chan: channel index
20156deee222SJoao Pinto  *  Description: it is used for configuring of the DMA operation mode in
20166deee222SJoao Pinto  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
20176deee222SJoao Pinto  *  mode.
20186deee222SJoao Pinto  */
20196deee222SJoao Pinto static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
20206deee222SJoao Pinto 					  u32 rxmode, u32 chan)
20216deee222SJoao Pinto {
2022a0daae13SJose Abreu 	u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2023a0daae13SJose Abreu 	u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
202452a76235SJose Abreu 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
202552a76235SJose Abreu 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
20266deee222SJoao Pinto 	int rxfifosz = priv->plat->rx_fifo_size;
202752a76235SJose Abreu 	int txfifosz = priv->plat->tx_fifo_size;
20286deee222SJoao Pinto 
20296deee222SJoao Pinto 	if (rxfifosz == 0)
20306deee222SJoao Pinto 		rxfifosz = priv->dma_cap.rx_fifo_size;
203152a76235SJose Abreu 	if (txfifosz == 0)
203252a76235SJose Abreu 		txfifosz = priv->dma_cap.tx_fifo_size;
203352a76235SJose Abreu 
203452a76235SJose Abreu 	/* Adjust for real per queue fifo size */
203552a76235SJose Abreu 	rxfifosz /= rx_channels_count;
203652a76235SJose Abreu 	txfifosz /= tx_channels_count;
20376deee222SJoao Pinto 
2038ab0204e3SJose Abreu 	stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2039ab0204e3SJose Abreu 	stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
20406deee222SJoao Pinto }
20416deee222SJoao Pinto 
20428bf993a5SJose Abreu static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
20438bf993a5SJose Abreu {
204463a550fcSJose Abreu 	int ret;
20458bf993a5SJose Abreu 
2046c10d4c82SJose Abreu 	ret = stmmac_safety_feat_irq_status(priv, priv->dev,
20478bf993a5SJose Abreu 			priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2048c10d4c82SJose Abreu 	if (ret && (ret != -EINVAL)) {
20498bf993a5SJose Abreu 		stmmac_global_err(priv);
2050c10d4c82SJose Abreu 		return true;
2051c10d4c82SJose Abreu 	}
2052c10d4c82SJose Abreu 
2053c10d4c82SJose Abreu 	return false;
20548bf993a5SJose Abreu }
20558bf993a5SJose Abreu 
20568fce3331SJose Abreu static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan)
20578fce3331SJose Abreu {
20588fce3331SJose Abreu 	int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
20598fce3331SJose Abreu 						 &priv->xstats, chan);
20608fce3331SJose Abreu 	struct stmmac_channel *ch = &priv->channel[chan];
20618fce3331SJose Abreu 
20624ccb4585SJose Abreu 	if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
20633ba07debSJose Abreu 		if (napi_schedule_prep(&ch->rx_napi)) {
20648fce3331SJose Abreu 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
20653ba07debSJose Abreu 			__napi_schedule_irqoff(&ch->rx_napi);
20663ba07debSJose Abreu 			status |= handle_tx;
20673ba07debSJose Abreu 		}
20684ccb4585SJose Abreu 	}
20694ccb4585SJose Abreu 
2070a66b5884SJose Abreu 	if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use))
20714ccb4585SJose Abreu 		napi_schedule_irqoff(&ch->tx_napi);
20728fce3331SJose Abreu 
20738fce3331SJose Abreu 	return status;
20748fce3331SJose Abreu }
20758fce3331SJose Abreu 
20766deee222SJoao Pinto /**
2077732fdf0eSGiuseppe CAVALLARO  * stmmac_dma_interrupt - DMA ISR
207832ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
207932ceabcaSGiuseppe CAVALLARO  * Description: this is the DMA ISR. It is called by the main ISR.
2080732fdf0eSGiuseppe CAVALLARO  * It calls the dwmac dma routine and schedule poll method in case of some
2081732fdf0eSGiuseppe CAVALLARO  * work can be done.
208232ceabcaSGiuseppe CAVALLARO  */
20837ac6653aSJeff Kirsher static void stmmac_dma_interrupt(struct stmmac_priv *priv)
20847ac6653aSJeff Kirsher {
2085d62a107aSJoao Pinto 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
20865a6a0445SNiklas Cassel 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
20875a6a0445SNiklas Cassel 	u32 channels_to_check = tx_channel_count > rx_channel_count ?
20885a6a0445SNiklas Cassel 				tx_channel_count : rx_channel_count;
2089d62a107aSJoao Pinto 	u32 chan;
20908ac60ffbSKees Cook 	int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
20918ac60ffbSKees Cook 
20928ac60ffbSKees Cook 	/* Make sure we never check beyond our status buffer. */
20938ac60ffbSKees Cook 	if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
20948ac60ffbSKees Cook 		channels_to_check = ARRAY_SIZE(status);
209568e5cfafSJoao Pinto 
20965a6a0445SNiklas Cassel 	for (chan = 0; chan < channels_to_check; chan++)
20978fce3331SJose Abreu 		status[chan] = stmmac_napi_check(priv, chan);
2098d62a107aSJoao Pinto 
20995a6a0445SNiklas Cassel 	for (chan = 0; chan < tx_channel_count; chan++) {
21005a6a0445SNiklas Cassel 		if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
21017ac6653aSJeff Kirsher 			/* Try to bump up the dma threshold on this failure */
2102b2dec116SSonic Zhang 			if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
2103b2dec116SSonic Zhang 			    (tc <= 256)) {
21047ac6653aSJeff Kirsher 				tc += 64;
2105c405abe2SSonic Zhang 				if (priv->plat->force_thresh_dma_mode)
2106d62a107aSJoao Pinto 					stmmac_set_dma_operation_mode(priv,
2107d62a107aSJoao Pinto 								      tc,
2108d62a107aSJoao Pinto 								      tc,
2109d62a107aSJoao Pinto 								      chan);
2110c405abe2SSonic Zhang 				else
2111d62a107aSJoao Pinto 					stmmac_set_dma_operation_mode(priv,
2112d62a107aSJoao Pinto 								    tc,
2113d62a107aSJoao Pinto 								    SF_DMA_MODE,
2114d62a107aSJoao Pinto 								    chan);
21157ac6653aSJeff Kirsher 				priv->xstats.threshold = tc;
21167ac6653aSJeff Kirsher 			}
21175a6a0445SNiklas Cassel 		} else if (unlikely(status[chan] == tx_hard_error)) {
21184e593262SJoao Pinto 			stmmac_tx_err(priv, chan);
21197ac6653aSJeff Kirsher 		}
2120d62a107aSJoao Pinto 	}
2121d62a107aSJoao Pinto }
21227ac6653aSJeff Kirsher 
212332ceabcaSGiuseppe CAVALLARO /**
212432ceabcaSGiuseppe CAVALLARO  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
212532ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
212632ceabcaSGiuseppe CAVALLARO  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
212732ceabcaSGiuseppe CAVALLARO  */
21281c901a46SGiuseppe CAVALLARO static void stmmac_mmc_setup(struct stmmac_priv *priv)
21291c901a46SGiuseppe CAVALLARO {
21301c901a46SGiuseppe CAVALLARO 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
21311c901a46SGiuseppe CAVALLARO 			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
21321c901a46SGiuseppe CAVALLARO 
21333b1dd2c5SJose Abreu 	stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
21344f795b25SGiuseppe CAVALLARO 
21354f795b25SGiuseppe CAVALLARO 	if (priv->dma_cap.rmon) {
21363b1dd2c5SJose Abreu 		stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
21371c901a46SGiuseppe CAVALLARO 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
21384f795b25SGiuseppe CAVALLARO 	} else
213938ddc59dSLABBE Corentin 		netdev_info(priv->dev, "No MAC Management Counters available\n");
21401c901a46SGiuseppe CAVALLARO }
21411c901a46SGiuseppe CAVALLARO 
2142732fdf0eSGiuseppe CAVALLARO /**
2143732fdf0eSGiuseppe CAVALLARO  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
214432ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
214519e30c14SGiuseppe CAVALLARO  * Description:
214619e30c14SGiuseppe CAVALLARO  *  new GMAC chip generations have a new register to indicate the
2147e7434821SGiuseppe CAVALLARO  *  presence of the optional feature/functions.
214819e30c14SGiuseppe CAVALLARO  *  This can be also used to override the value passed through the
214919e30c14SGiuseppe CAVALLARO  *  platform and necessary for old MAC10/100 and GMAC chips.
2150e7434821SGiuseppe CAVALLARO  */
2151e7434821SGiuseppe CAVALLARO static int stmmac_get_hw_features(struct stmmac_priv *priv)
2152e7434821SGiuseppe CAVALLARO {
2153a4e887faSJose Abreu 	return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2154e7434821SGiuseppe CAVALLARO }
2155e7434821SGiuseppe CAVALLARO 
215632ceabcaSGiuseppe CAVALLARO /**
2157732fdf0eSGiuseppe CAVALLARO  * stmmac_check_ether_addr - check if the MAC addr is valid
215832ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
215932ceabcaSGiuseppe CAVALLARO  * Description:
216032ceabcaSGiuseppe CAVALLARO  * it is to verify if the MAC address is valid, in case of failures it
216132ceabcaSGiuseppe CAVALLARO  * generates a random MAC address
216232ceabcaSGiuseppe CAVALLARO  */
2163bfab27a1SGiuseppe CAVALLARO static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2164bfab27a1SGiuseppe CAVALLARO {
2165bfab27a1SGiuseppe CAVALLARO 	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2166c10d4c82SJose Abreu 		stmmac_get_umac_addr(priv, priv->hw, priv->dev->dev_addr, 0);
2167bfab27a1SGiuseppe CAVALLARO 		if (!is_valid_ether_addr(priv->dev->dev_addr))
2168f2cedb63SDanny Kukawka 			eth_hw_addr_random(priv->dev);
2169af649352SJisheng Zhang 		dev_info(priv->device, "device MAC address %pM\n",
2170bfab27a1SGiuseppe CAVALLARO 			 priv->dev->dev_addr);
2171bfab27a1SGiuseppe CAVALLARO 	}
2172c88460b7SHans de Goede }
2173bfab27a1SGiuseppe CAVALLARO 
217432ceabcaSGiuseppe CAVALLARO /**
2175732fdf0eSGiuseppe CAVALLARO  * stmmac_init_dma_engine - DMA init.
217632ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
217732ceabcaSGiuseppe CAVALLARO  * Description:
217832ceabcaSGiuseppe CAVALLARO  * It inits the DMA invoking the specific MAC/GMAC callback.
217932ceabcaSGiuseppe CAVALLARO  * Some DMA parameters can be passed from the platform;
218032ceabcaSGiuseppe CAVALLARO  * in case of these are not passed a default is kept for the MAC or GMAC.
218132ceabcaSGiuseppe CAVALLARO  */
21820f1f88a8SGiuseppe CAVALLARO static int stmmac_init_dma_engine(struct stmmac_priv *priv)
21830f1f88a8SGiuseppe CAVALLARO {
218447f2a9ceSJoao Pinto 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
218547f2a9ceSJoao Pinto 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
218624aaed0cSJose Abreu 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
218754139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q;
2188ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q;
218947f2a9ceSJoao Pinto 	u32 chan = 0;
2190c24602efSGiuseppe CAVALLARO 	int atds = 0;
2191495db273SGiuseppe Cavallaro 	int ret = 0;
21920f1f88a8SGiuseppe CAVALLARO 
2193a332e2faSNiklas Cassel 	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2194a332e2faSNiklas Cassel 		dev_err(priv->device, "Invalid DMA configuration\n");
219589ab75bfSNiklas Cassel 		return -EINVAL;
21960f1f88a8SGiuseppe CAVALLARO 	}
21970f1f88a8SGiuseppe CAVALLARO 
2198c24602efSGiuseppe CAVALLARO 	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2199c24602efSGiuseppe CAVALLARO 		atds = 1;
2200c24602efSGiuseppe CAVALLARO 
2201a4e887faSJose Abreu 	ret = stmmac_reset(priv, priv->ioaddr);
2202495db273SGiuseppe Cavallaro 	if (ret) {
2203495db273SGiuseppe Cavallaro 		dev_err(priv->device, "Failed to reset the dma\n");
2204495db273SGiuseppe Cavallaro 		return ret;
2205495db273SGiuseppe Cavallaro 	}
2206495db273SGiuseppe Cavallaro 
22077d9e6c5aSJose Abreu 	/* DMA Configuration */
22087d9e6c5aSJose Abreu 	stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
22097d9e6c5aSJose Abreu 
22107d9e6c5aSJose Abreu 	if (priv->plat->axi)
22117d9e6c5aSJose Abreu 		stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
22127d9e6c5aSJose Abreu 
2213af8f3fb7SWeifeng Voon 	/* DMA CSR Channel configuration */
2214af8f3fb7SWeifeng Voon 	for (chan = 0; chan < dma_csr_ch; chan++)
2215af8f3fb7SWeifeng Voon 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
2216af8f3fb7SWeifeng Voon 
221747f2a9ceSJoao Pinto 	/* DMA RX Channel Configuration */
221847f2a9ceSJoao Pinto 	for (chan = 0; chan < rx_channels_count; chan++) {
221954139cf3SJoao Pinto 		rx_q = &priv->rx_queue[chan];
222054139cf3SJoao Pinto 
222124aaed0cSJose Abreu 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
222224aaed0cSJose Abreu 				    rx_q->dma_rx_phy, chan);
222347f2a9ceSJoao Pinto 
222454139cf3SJoao Pinto 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2225f748be53SAlexandre TORGUE 			    (DMA_RX_SIZE * sizeof(struct dma_desc));
2226a4e887faSJose Abreu 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
2227a4e887faSJose Abreu 				       rx_q->rx_tail_addr, chan);
222847f2a9ceSJoao Pinto 	}
222947f2a9ceSJoao Pinto 
223047f2a9ceSJoao Pinto 	/* DMA TX Channel Configuration */
223147f2a9ceSJoao Pinto 	for (chan = 0; chan < tx_channels_count; chan++) {
2232ce736788SJoao Pinto 		tx_q = &priv->tx_queue[chan];
2233ce736788SJoao Pinto 
223424aaed0cSJose Abreu 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
223524aaed0cSJose Abreu 				    tx_q->dma_tx_phy, chan);
2236f748be53SAlexandre TORGUE 
22370431100bSJose Abreu 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
2238a4e887faSJose Abreu 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
2239a4e887faSJose Abreu 				       tx_q->tx_tail_addr, chan);
224047f2a9ceSJoao Pinto 	}
224124aaed0cSJose Abreu 
2242495db273SGiuseppe Cavallaro 	return ret;
22430f1f88a8SGiuseppe CAVALLARO }
22440f1f88a8SGiuseppe CAVALLARO 
22458fce3331SJose Abreu static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
22468fce3331SJose Abreu {
22478fce3331SJose Abreu 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
22488fce3331SJose Abreu 
22498fce3331SJose Abreu 	mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer));
22508fce3331SJose Abreu }
22518fce3331SJose Abreu 
2252bfab27a1SGiuseppe CAVALLARO /**
2253732fdf0eSGiuseppe CAVALLARO  * stmmac_tx_timer - mitigation sw timer for tx.
22549125cdd1SGiuseppe CAVALLARO  * @data: data pointer
22559125cdd1SGiuseppe CAVALLARO  * Description:
22569125cdd1SGiuseppe CAVALLARO  * This is the timer handler to directly invoke the stmmac_tx_clean.
22579125cdd1SGiuseppe CAVALLARO  */
2258e99e88a9SKees Cook static void stmmac_tx_timer(struct timer_list *t)
22599125cdd1SGiuseppe CAVALLARO {
22608fce3331SJose Abreu 	struct stmmac_tx_queue *tx_q = from_timer(tx_q, t, txtimer);
22618fce3331SJose Abreu 	struct stmmac_priv *priv = tx_q->priv_data;
22628fce3331SJose Abreu 	struct stmmac_channel *ch;
22639125cdd1SGiuseppe CAVALLARO 
22648fce3331SJose Abreu 	ch = &priv->channel[tx_q->queue_index];
22658fce3331SJose Abreu 
22664ccb4585SJose Abreu 	/*
22674ccb4585SJose Abreu 	 * If NAPI is already running we can miss some events. Let's rearm
22684ccb4585SJose Abreu 	 * the timer and try again.
22694ccb4585SJose Abreu 	 */
22704ccb4585SJose Abreu 	if (likely(napi_schedule_prep(&ch->tx_napi)))
22714ccb4585SJose Abreu 		__napi_schedule(&ch->tx_napi);
22724ccb4585SJose Abreu 	else
22734ccb4585SJose Abreu 		mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(10));
22749125cdd1SGiuseppe CAVALLARO }
22759125cdd1SGiuseppe CAVALLARO 
22769125cdd1SGiuseppe CAVALLARO /**
2277d429b66eSJose Abreu  * stmmac_init_coalesce - init mitigation options.
227832ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
22799125cdd1SGiuseppe CAVALLARO  * Description:
2280d429b66eSJose Abreu  * This inits the coalesce parameters: i.e. timer rate,
22819125cdd1SGiuseppe CAVALLARO  * timer handler and default threshold used for enabling the
22829125cdd1SGiuseppe CAVALLARO  * interrupt on completion bit.
22839125cdd1SGiuseppe CAVALLARO  */
2284d429b66eSJose Abreu static void stmmac_init_coalesce(struct stmmac_priv *priv)
22859125cdd1SGiuseppe CAVALLARO {
22868fce3331SJose Abreu 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
22878fce3331SJose Abreu 	u32 chan;
22888fce3331SJose Abreu 
22899125cdd1SGiuseppe CAVALLARO 	priv->tx_coal_frames = STMMAC_TX_FRAMES;
22909125cdd1SGiuseppe CAVALLARO 	priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
2291d429b66eSJose Abreu 	priv->rx_coal_frames = STMMAC_RX_FRAMES;
22928fce3331SJose Abreu 
22938fce3331SJose Abreu 	for (chan = 0; chan < tx_channel_count; chan++) {
22948fce3331SJose Abreu 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
22958fce3331SJose Abreu 
22968fce3331SJose Abreu 		timer_setup(&tx_q->txtimer, stmmac_tx_timer, 0);
22978fce3331SJose Abreu 	}
22989125cdd1SGiuseppe CAVALLARO }
22999125cdd1SGiuseppe CAVALLARO 
23004854ab99SJoao Pinto static void stmmac_set_rings_length(struct stmmac_priv *priv)
23014854ab99SJoao Pinto {
23024854ab99SJoao Pinto 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
23034854ab99SJoao Pinto 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
23044854ab99SJoao Pinto 	u32 chan;
23054854ab99SJoao Pinto 
23064854ab99SJoao Pinto 	/* set TX ring length */
23074854ab99SJoao Pinto 	for (chan = 0; chan < tx_channels_count; chan++)
2308a4e887faSJose Abreu 		stmmac_set_tx_ring_len(priv, priv->ioaddr,
23094854ab99SJoao Pinto 				(DMA_TX_SIZE - 1), chan);
23104854ab99SJoao Pinto 
23114854ab99SJoao Pinto 	/* set RX ring length */
23124854ab99SJoao Pinto 	for (chan = 0; chan < rx_channels_count; chan++)
2313a4e887faSJose Abreu 		stmmac_set_rx_ring_len(priv, priv->ioaddr,
23144854ab99SJoao Pinto 				(DMA_RX_SIZE - 1), chan);
23154854ab99SJoao Pinto }
23164854ab99SJoao Pinto 
23179125cdd1SGiuseppe CAVALLARO /**
23186a3a7193SJoao Pinto  *  stmmac_set_tx_queue_weight - Set TX queue weight
23196a3a7193SJoao Pinto  *  @priv: driver private structure
23206a3a7193SJoao Pinto  *  Description: It is used for setting TX queues weight
23216a3a7193SJoao Pinto  */
23226a3a7193SJoao Pinto static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
23236a3a7193SJoao Pinto {
23246a3a7193SJoao Pinto 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
23256a3a7193SJoao Pinto 	u32 weight;
23266a3a7193SJoao Pinto 	u32 queue;
23276a3a7193SJoao Pinto 
23286a3a7193SJoao Pinto 	for (queue = 0; queue < tx_queues_count; queue++) {
23296a3a7193SJoao Pinto 		weight = priv->plat->tx_queues_cfg[queue].weight;
2330c10d4c82SJose Abreu 		stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
23316a3a7193SJoao Pinto 	}
23326a3a7193SJoao Pinto }
23336a3a7193SJoao Pinto 
23346a3a7193SJoao Pinto /**
233519d91873SJoao Pinto  *  stmmac_configure_cbs - Configure CBS in TX queue
233619d91873SJoao Pinto  *  @priv: driver private structure
233719d91873SJoao Pinto  *  Description: It is used for configuring CBS in AVB TX queues
233819d91873SJoao Pinto  */
233919d91873SJoao Pinto static void stmmac_configure_cbs(struct stmmac_priv *priv)
234019d91873SJoao Pinto {
234119d91873SJoao Pinto 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
234219d91873SJoao Pinto 	u32 mode_to_use;
234319d91873SJoao Pinto 	u32 queue;
234419d91873SJoao Pinto 
234544781fefSJoao Pinto 	/* queue 0 is reserved for legacy traffic */
234644781fefSJoao Pinto 	for (queue = 1; queue < tx_queues_count; queue++) {
234719d91873SJoao Pinto 		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
234819d91873SJoao Pinto 		if (mode_to_use == MTL_QUEUE_DCB)
234919d91873SJoao Pinto 			continue;
235019d91873SJoao Pinto 
2351c10d4c82SJose Abreu 		stmmac_config_cbs(priv, priv->hw,
235219d91873SJoao Pinto 				priv->plat->tx_queues_cfg[queue].send_slope,
235319d91873SJoao Pinto 				priv->plat->tx_queues_cfg[queue].idle_slope,
235419d91873SJoao Pinto 				priv->plat->tx_queues_cfg[queue].high_credit,
235519d91873SJoao Pinto 				priv->plat->tx_queues_cfg[queue].low_credit,
235619d91873SJoao Pinto 				queue);
235719d91873SJoao Pinto 	}
235819d91873SJoao Pinto }
235919d91873SJoao Pinto 
236019d91873SJoao Pinto /**
2361d43042f4SJoao Pinto  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
2362d43042f4SJoao Pinto  *  @priv: driver private structure
2363d43042f4SJoao Pinto  *  Description: It is used for mapping RX queues to RX dma channels
2364d43042f4SJoao Pinto  */
2365d43042f4SJoao Pinto static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
2366d43042f4SJoao Pinto {
2367d43042f4SJoao Pinto 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2368d43042f4SJoao Pinto 	u32 queue;
2369d43042f4SJoao Pinto 	u32 chan;
2370d43042f4SJoao Pinto 
2371d43042f4SJoao Pinto 	for (queue = 0; queue < rx_queues_count; queue++) {
2372d43042f4SJoao Pinto 		chan = priv->plat->rx_queues_cfg[queue].chan;
2373c10d4c82SJose Abreu 		stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
2374d43042f4SJoao Pinto 	}
2375d43042f4SJoao Pinto }
2376d43042f4SJoao Pinto 
2377d43042f4SJoao Pinto /**
2378a8f5102aSJoao Pinto  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
2379a8f5102aSJoao Pinto  *  @priv: driver private structure
2380a8f5102aSJoao Pinto  *  Description: It is used for configuring the RX Queue Priority
2381a8f5102aSJoao Pinto  */
2382a8f5102aSJoao Pinto static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
2383a8f5102aSJoao Pinto {
2384a8f5102aSJoao Pinto 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2385a8f5102aSJoao Pinto 	u32 queue;
2386a8f5102aSJoao Pinto 	u32 prio;
2387a8f5102aSJoao Pinto 
2388a8f5102aSJoao Pinto 	for (queue = 0; queue < rx_queues_count; queue++) {
2389a8f5102aSJoao Pinto 		if (!priv->plat->rx_queues_cfg[queue].use_prio)
2390a8f5102aSJoao Pinto 			continue;
2391a8f5102aSJoao Pinto 
2392a8f5102aSJoao Pinto 		prio = priv->plat->rx_queues_cfg[queue].prio;
2393c10d4c82SJose Abreu 		stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
2394a8f5102aSJoao Pinto 	}
2395a8f5102aSJoao Pinto }
2396a8f5102aSJoao Pinto 
2397a8f5102aSJoao Pinto /**
2398a8f5102aSJoao Pinto  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
2399a8f5102aSJoao Pinto  *  @priv: driver private structure
2400a8f5102aSJoao Pinto  *  Description: It is used for configuring the TX Queue Priority
2401a8f5102aSJoao Pinto  */
2402a8f5102aSJoao Pinto static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
2403a8f5102aSJoao Pinto {
2404a8f5102aSJoao Pinto 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2405a8f5102aSJoao Pinto 	u32 queue;
2406a8f5102aSJoao Pinto 	u32 prio;
2407a8f5102aSJoao Pinto 
2408a8f5102aSJoao Pinto 	for (queue = 0; queue < tx_queues_count; queue++) {
2409a8f5102aSJoao Pinto 		if (!priv->plat->tx_queues_cfg[queue].use_prio)
2410a8f5102aSJoao Pinto 			continue;
2411a8f5102aSJoao Pinto 
2412a8f5102aSJoao Pinto 		prio = priv->plat->tx_queues_cfg[queue].prio;
2413c10d4c82SJose Abreu 		stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
2414a8f5102aSJoao Pinto 	}
2415a8f5102aSJoao Pinto }
2416a8f5102aSJoao Pinto 
2417a8f5102aSJoao Pinto /**
2418abe80fdcSJoao Pinto  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
2419abe80fdcSJoao Pinto  *  @priv: driver private structure
2420abe80fdcSJoao Pinto  *  Description: It is used for configuring the RX queue routing
2421abe80fdcSJoao Pinto  */
2422abe80fdcSJoao Pinto static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
2423abe80fdcSJoao Pinto {
2424abe80fdcSJoao Pinto 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2425abe80fdcSJoao Pinto 	u32 queue;
2426abe80fdcSJoao Pinto 	u8 packet;
2427abe80fdcSJoao Pinto 
2428abe80fdcSJoao Pinto 	for (queue = 0; queue < rx_queues_count; queue++) {
2429abe80fdcSJoao Pinto 		/* no specific packet type routing specified for the queue */
2430abe80fdcSJoao Pinto 		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
2431abe80fdcSJoao Pinto 			continue;
2432abe80fdcSJoao Pinto 
2433abe80fdcSJoao Pinto 		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
2434c10d4c82SJose Abreu 		stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
2435abe80fdcSJoao Pinto 	}
2436abe80fdcSJoao Pinto }
2437abe80fdcSJoao Pinto 
243876067459SJose Abreu static void stmmac_mac_config_rss(struct stmmac_priv *priv)
243976067459SJose Abreu {
244076067459SJose Abreu 	if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
244176067459SJose Abreu 		priv->rss.enable = false;
244276067459SJose Abreu 		return;
244376067459SJose Abreu 	}
244476067459SJose Abreu 
244576067459SJose Abreu 	if (priv->dev->features & NETIF_F_RXHASH)
244676067459SJose Abreu 		priv->rss.enable = true;
244776067459SJose Abreu 	else
244876067459SJose Abreu 		priv->rss.enable = false;
244976067459SJose Abreu 
245076067459SJose Abreu 	stmmac_rss_configure(priv, priv->hw, &priv->rss,
245176067459SJose Abreu 			     priv->plat->rx_queues_to_use);
245276067459SJose Abreu }
245376067459SJose Abreu 
2454abe80fdcSJoao Pinto /**
2455d0a9c9f9SJoao Pinto  *  stmmac_mtl_configuration - Configure MTL
2456d0a9c9f9SJoao Pinto  *  @priv: driver private structure
2457d0a9c9f9SJoao Pinto  *  Description: It is used for configurring MTL
2458d0a9c9f9SJoao Pinto  */
2459d0a9c9f9SJoao Pinto static void stmmac_mtl_configuration(struct stmmac_priv *priv)
2460d0a9c9f9SJoao Pinto {
2461d0a9c9f9SJoao Pinto 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2462d0a9c9f9SJoao Pinto 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2463d0a9c9f9SJoao Pinto 
2464c10d4c82SJose Abreu 	if (tx_queues_count > 1)
24656a3a7193SJoao Pinto 		stmmac_set_tx_queue_weight(priv);
24666a3a7193SJoao Pinto 
2467d0a9c9f9SJoao Pinto 	/* Configure MTL RX algorithms */
2468c10d4c82SJose Abreu 	if (rx_queues_count > 1)
2469c10d4c82SJose Abreu 		stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
2470d0a9c9f9SJoao Pinto 				priv->plat->rx_sched_algorithm);
2471d0a9c9f9SJoao Pinto 
2472d0a9c9f9SJoao Pinto 	/* Configure MTL TX algorithms */
2473c10d4c82SJose Abreu 	if (tx_queues_count > 1)
2474c10d4c82SJose Abreu 		stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
2475d0a9c9f9SJoao Pinto 				priv->plat->tx_sched_algorithm);
2476d0a9c9f9SJoao Pinto 
247719d91873SJoao Pinto 	/* Configure CBS in AVB TX queues */
2478c10d4c82SJose Abreu 	if (tx_queues_count > 1)
247919d91873SJoao Pinto 		stmmac_configure_cbs(priv);
248019d91873SJoao Pinto 
2481d43042f4SJoao Pinto 	/* Map RX MTL to DMA channels */
2482d43042f4SJoao Pinto 	stmmac_rx_queue_dma_chan_map(priv);
2483d43042f4SJoao Pinto 
2484d0a9c9f9SJoao Pinto 	/* Enable MAC RX Queues */
2485d0a9c9f9SJoao Pinto 	stmmac_mac_enable_rx_queues(priv);
24866deee222SJoao Pinto 
2487a8f5102aSJoao Pinto 	/* Set RX priorities */
2488c10d4c82SJose Abreu 	if (rx_queues_count > 1)
2489a8f5102aSJoao Pinto 		stmmac_mac_config_rx_queues_prio(priv);
2490a8f5102aSJoao Pinto 
2491a8f5102aSJoao Pinto 	/* Set TX priorities */
2492c10d4c82SJose Abreu 	if (tx_queues_count > 1)
2493a8f5102aSJoao Pinto 		stmmac_mac_config_tx_queues_prio(priv);
2494abe80fdcSJoao Pinto 
2495abe80fdcSJoao Pinto 	/* Set RX routing */
2496c10d4c82SJose Abreu 	if (rx_queues_count > 1)
2497abe80fdcSJoao Pinto 		stmmac_mac_config_rx_queues_routing(priv);
249876067459SJose Abreu 
249976067459SJose Abreu 	/* Receive Side Scaling */
250076067459SJose Abreu 	if (rx_queues_count > 1)
250176067459SJose Abreu 		stmmac_mac_config_rss(priv);
2502d0a9c9f9SJoao Pinto }
2503d0a9c9f9SJoao Pinto 
25048bf993a5SJose Abreu static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
25058bf993a5SJose Abreu {
2506c10d4c82SJose Abreu 	if (priv->dma_cap.asp) {
25078bf993a5SJose Abreu 		netdev_info(priv->dev, "Enabling Safety Features\n");
2508c10d4c82SJose Abreu 		stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp);
25098bf993a5SJose Abreu 	} else {
25108bf993a5SJose Abreu 		netdev_info(priv->dev, "No Safety Features support found\n");
25118bf993a5SJose Abreu 	}
25128bf993a5SJose Abreu }
25138bf993a5SJose Abreu 
2514d0a9c9f9SJoao Pinto /**
2515732fdf0eSGiuseppe CAVALLARO  * stmmac_hw_setup - setup mac in a usable state.
2516523f11b5SSrinivas Kandagatla  *  @dev : pointer to the device structure.
2517523f11b5SSrinivas Kandagatla  *  Description:
2518732fdf0eSGiuseppe CAVALLARO  *  this is the main function to setup the HW in a usable state because the
2519732fdf0eSGiuseppe CAVALLARO  *  dma engine is reset, the core registers are configured (e.g. AXI,
2520732fdf0eSGiuseppe CAVALLARO  *  Checksum features, timers). The DMA is ready to start receiving and
2521732fdf0eSGiuseppe CAVALLARO  *  transmitting.
2522523f11b5SSrinivas Kandagatla  *  Return value:
2523523f11b5SSrinivas Kandagatla  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2524523f11b5SSrinivas Kandagatla  *  file on failure.
2525523f11b5SSrinivas Kandagatla  */
2526fe131929SHuacai Chen static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
2527523f11b5SSrinivas Kandagatla {
2528523f11b5SSrinivas Kandagatla 	struct stmmac_priv *priv = netdev_priv(dev);
25293c55d4d0SJoao Pinto 	u32 rx_cnt = priv->plat->rx_queues_to_use;
2530146617b8SJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
2531146617b8SJoao Pinto 	u32 chan;
2532523f11b5SSrinivas Kandagatla 	int ret;
2533523f11b5SSrinivas Kandagatla 
2534523f11b5SSrinivas Kandagatla 	/* DMA initialization and SW reset */
2535523f11b5SSrinivas Kandagatla 	ret = stmmac_init_dma_engine(priv);
2536523f11b5SSrinivas Kandagatla 	if (ret < 0) {
253738ddc59dSLABBE Corentin 		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
253838ddc59dSLABBE Corentin 			   __func__);
2539523f11b5SSrinivas Kandagatla 		return ret;
2540523f11b5SSrinivas Kandagatla 	}
2541523f11b5SSrinivas Kandagatla 
2542523f11b5SSrinivas Kandagatla 	/* Copy the MAC addr into the HW  */
2543c10d4c82SJose Abreu 	stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
2544523f11b5SSrinivas Kandagatla 
254502e57b9dSGiuseppe CAVALLARO 	/* PS and related bits will be programmed according to the speed */
254602e57b9dSGiuseppe CAVALLARO 	if (priv->hw->pcs) {
254702e57b9dSGiuseppe CAVALLARO 		int speed = priv->plat->mac_port_sel_speed;
254802e57b9dSGiuseppe CAVALLARO 
254902e57b9dSGiuseppe CAVALLARO 		if ((speed == SPEED_10) || (speed == SPEED_100) ||
255002e57b9dSGiuseppe CAVALLARO 		    (speed == SPEED_1000)) {
255102e57b9dSGiuseppe CAVALLARO 			priv->hw->ps = speed;
255202e57b9dSGiuseppe CAVALLARO 		} else {
255302e57b9dSGiuseppe CAVALLARO 			dev_warn(priv->device, "invalid port speed\n");
255402e57b9dSGiuseppe CAVALLARO 			priv->hw->ps = 0;
255502e57b9dSGiuseppe CAVALLARO 		}
255602e57b9dSGiuseppe CAVALLARO 	}
255702e57b9dSGiuseppe CAVALLARO 
2558523f11b5SSrinivas Kandagatla 	/* Initialize the MAC Core */
2559c10d4c82SJose Abreu 	stmmac_core_init(priv, priv->hw, dev);
2560523f11b5SSrinivas Kandagatla 
2561d0a9c9f9SJoao Pinto 	/* Initialize MTL*/
2562d0a9c9f9SJoao Pinto 	stmmac_mtl_configuration(priv);
25639eb12474Sjpinto 
25648bf993a5SJose Abreu 	/* Initialize Safety Features */
25658bf993a5SJose Abreu 	stmmac_safety_feat_configuration(priv);
25668bf993a5SJose Abreu 
2567c10d4c82SJose Abreu 	ret = stmmac_rx_ipc(priv, priv->hw);
2568978aded4SGiuseppe CAVALLARO 	if (!ret) {
256938ddc59dSLABBE Corentin 		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
2570978aded4SGiuseppe CAVALLARO 		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
2571d2afb5bdSGiuseppe CAVALLARO 		priv->hw->rx_csum = 0;
2572978aded4SGiuseppe CAVALLARO 	}
2573978aded4SGiuseppe CAVALLARO 
2574523f11b5SSrinivas Kandagatla 	/* Enable the MAC Rx/Tx */
2575c10d4c82SJose Abreu 	stmmac_mac_set(priv, priv->ioaddr, true);
2576523f11b5SSrinivas Kandagatla 
2577b4f0a661SJoao Pinto 	/* Set the HW DMA mode and the COE */
2578b4f0a661SJoao Pinto 	stmmac_dma_operation_mode(priv);
2579b4f0a661SJoao Pinto 
2580523f11b5SSrinivas Kandagatla 	stmmac_mmc_setup(priv);
2581523f11b5SSrinivas Kandagatla 
2582fe131929SHuacai Chen 	if (init_ptp) {
25830ad2be79SThierry Reding 		ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
25840ad2be79SThierry Reding 		if (ret < 0)
25850ad2be79SThierry Reding 			netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
25860ad2be79SThierry Reding 
2587523f11b5SSrinivas Kandagatla 		ret = stmmac_init_ptp(priv);
2588722eef28SHeiner Kallweit 		if (ret == -EOPNOTSUPP)
2589722eef28SHeiner Kallweit 			netdev_warn(priv->dev, "PTP not supported by HW\n");
2590722eef28SHeiner Kallweit 		else if (ret)
2591722eef28SHeiner Kallweit 			netdev_warn(priv->dev, "PTP init failed\n");
2592fe131929SHuacai Chen 	}
2593523f11b5SSrinivas Kandagatla 
2594523f11b5SSrinivas Kandagatla 	priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
2595523f11b5SSrinivas Kandagatla 
2596a4e887faSJose Abreu 	if (priv->use_riwt) {
259701d1689dSJose Abreu 		ret = stmmac_rx_watchdog(priv, priv->ioaddr, MIN_DMA_RIWT, rx_cnt);
2598a4e887faSJose Abreu 		if (!ret)
259901d1689dSJose Abreu 			priv->rx_riwt = MIN_DMA_RIWT;
2600523f11b5SSrinivas Kandagatla 	}
2601523f11b5SSrinivas Kandagatla 
2602c10d4c82SJose Abreu 	if (priv->hw->pcs)
2603c10d4c82SJose Abreu 		stmmac_pcs_ctrl_ane(priv, priv->hw, 1, priv->hw->ps, 0);
2604523f11b5SSrinivas Kandagatla 
26054854ab99SJoao Pinto 	/* set TX and RX rings length */
26064854ab99SJoao Pinto 	stmmac_set_rings_length(priv);
26074854ab99SJoao Pinto 
2608f748be53SAlexandre TORGUE 	/* Enable TSO */
2609146617b8SJoao Pinto 	if (priv->tso) {
2610146617b8SJoao Pinto 		for (chan = 0; chan < tx_cnt; chan++)
2611a4e887faSJose Abreu 			stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
2612146617b8SJoao Pinto 	}
2613f748be53SAlexandre TORGUE 
261467afd6d1SJose Abreu 	/* Enable Split Header */
261567afd6d1SJose Abreu 	if (priv->sph && priv->hw->rx_csum) {
261667afd6d1SJose Abreu 		for (chan = 0; chan < rx_cnt; chan++)
261767afd6d1SJose Abreu 			stmmac_enable_sph(priv, priv->ioaddr, 1, chan);
261867afd6d1SJose Abreu 	}
261967afd6d1SJose Abreu 
262030d93227SJose Abreu 	/* VLAN Tag Insertion */
262130d93227SJose Abreu 	if (priv->dma_cap.vlins)
262230d93227SJose Abreu 		stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
262330d93227SJose Abreu 
26247d9e6c5aSJose Abreu 	/* Start the ball rolling... */
26257d9e6c5aSJose Abreu 	stmmac_start_all_dma(priv);
26267d9e6c5aSJose Abreu 
2627523f11b5SSrinivas Kandagatla 	return 0;
2628523f11b5SSrinivas Kandagatla }
2629523f11b5SSrinivas Kandagatla 
2630c66f6c37SThierry Reding static void stmmac_hw_teardown(struct net_device *dev)
2631c66f6c37SThierry Reding {
2632c66f6c37SThierry Reding 	struct stmmac_priv *priv = netdev_priv(dev);
2633c66f6c37SThierry Reding 
2634c66f6c37SThierry Reding 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
2635c66f6c37SThierry Reding }
2636c66f6c37SThierry Reding 
2637523f11b5SSrinivas Kandagatla /**
26387ac6653aSJeff Kirsher  *  stmmac_open - open entry point of the driver
26397ac6653aSJeff Kirsher  *  @dev : pointer to the device structure.
26407ac6653aSJeff Kirsher  *  Description:
26417ac6653aSJeff Kirsher  *  This function is the open entry point of the driver.
26427ac6653aSJeff Kirsher  *  Return value:
26437ac6653aSJeff Kirsher  *  0 on success and an appropriate (-)ve integer as defined in errno.h
26447ac6653aSJeff Kirsher  *  file on failure.
26457ac6653aSJeff Kirsher  */
26467ac6653aSJeff Kirsher static int stmmac_open(struct net_device *dev)
26477ac6653aSJeff Kirsher {
26487ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
26498fce3331SJose Abreu 	u32 chan;
26507ac6653aSJeff Kirsher 	int ret;
26517ac6653aSJeff Kirsher 
26523fe5cadbSGiuseppe CAVALLARO 	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
26533fe5cadbSGiuseppe CAVALLARO 	    priv->hw->pcs != STMMAC_PCS_TBI &&
26543fe5cadbSGiuseppe CAVALLARO 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
26557ac6653aSJeff Kirsher 		ret = stmmac_init_phy(dev);
2656e58bb43fSGiuseppe CAVALLARO 		if (ret) {
265738ddc59dSLABBE Corentin 			netdev_err(priv->dev,
265838ddc59dSLABBE Corentin 				   "%s: Cannot attach to PHY (error: %d)\n",
2659e58bb43fSGiuseppe CAVALLARO 				   __func__, ret);
266089df20d9SHans de Goede 			return ret;
26617ac6653aSJeff Kirsher 		}
2662e58bb43fSGiuseppe CAVALLARO 	}
26637ac6653aSJeff Kirsher 
2664523f11b5SSrinivas Kandagatla 	/* Extra statistics */
2665523f11b5SSrinivas Kandagatla 	memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
2666523f11b5SSrinivas Kandagatla 	priv->xstats.threshold = tc;
2667523f11b5SSrinivas Kandagatla 
26685bacd778SLABBE Corentin 	priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
266922ad3838SGiuseppe Cavallaro 	priv->rx_copybreak = STMMAC_RX_COPYBREAK;
267056329137SBartlomiej Zolnierkiewicz 
26715bacd778SLABBE Corentin 	ret = alloc_dma_desc_resources(priv);
26725bacd778SLABBE Corentin 	if (ret < 0) {
26735bacd778SLABBE Corentin 		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
26745bacd778SLABBE Corentin 			   __func__);
26755bacd778SLABBE Corentin 		goto dma_desc_error;
26765bacd778SLABBE Corentin 	}
26775bacd778SLABBE Corentin 
26785bacd778SLABBE Corentin 	ret = init_dma_desc_rings(dev, GFP_KERNEL);
26795bacd778SLABBE Corentin 	if (ret < 0) {
26805bacd778SLABBE Corentin 		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
26815bacd778SLABBE Corentin 			   __func__);
26825bacd778SLABBE Corentin 		goto init_error;
26835bacd778SLABBE Corentin 	}
26845bacd778SLABBE Corentin 
2685fe131929SHuacai Chen 	ret = stmmac_hw_setup(dev, true);
268656329137SBartlomiej Zolnierkiewicz 	if (ret < 0) {
268738ddc59dSLABBE Corentin 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
2688c9324d18SGiuseppe CAVALLARO 		goto init_error;
26897ac6653aSJeff Kirsher 	}
26907ac6653aSJeff Kirsher 
2691d429b66eSJose Abreu 	stmmac_init_coalesce(priv);
2692777da230SGiuseppe CAVALLARO 
269374371272SJose Abreu 	phylink_start(priv->phylink);
26947ac6653aSJeff Kirsher 
26957ac6653aSJeff Kirsher 	/* Request the IRQ lines */
26967ac6653aSJeff Kirsher 	ret = request_irq(dev->irq, stmmac_interrupt,
26977ac6653aSJeff Kirsher 			  IRQF_SHARED, dev->name, dev);
26987ac6653aSJeff Kirsher 	if (unlikely(ret < 0)) {
269938ddc59dSLABBE Corentin 		netdev_err(priv->dev,
270038ddc59dSLABBE Corentin 			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
27017ac6653aSJeff Kirsher 			   __func__, dev->irq, ret);
27026c1e5abeSThierry Reding 		goto irq_error;
27037ac6653aSJeff Kirsher 	}
27047ac6653aSJeff Kirsher 
27057a13f8f5SFrancesco Virlinzi 	/* Request the Wake IRQ in case of another line is used for WoL */
27067a13f8f5SFrancesco Virlinzi 	if (priv->wol_irq != dev->irq) {
27077a13f8f5SFrancesco Virlinzi 		ret = request_irq(priv->wol_irq, stmmac_interrupt,
27087a13f8f5SFrancesco Virlinzi 				  IRQF_SHARED, dev->name, dev);
27097a13f8f5SFrancesco Virlinzi 		if (unlikely(ret < 0)) {
271038ddc59dSLABBE Corentin 			netdev_err(priv->dev,
271138ddc59dSLABBE Corentin 				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
2712ceb69499SGiuseppe CAVALLARO 				   __func__, priv->wol_irq, ret);
2713c9324d18SGiuseppe CAVALLARO 			goto wolirq_error;
27147a13f8f5SFrancesco Virlinzi 		}
27157a13f8f5SFrancesco Virlinzi 	}
27167a13f8f5SFrancesco Virlinzi 
2717d765955dSGiuseppe CAVALLARO 	/* Request the IRQ lines */
2718d7ec8584SChen-Yu Tsai 	if (priv->lpi_irq > 0) {
2719d765955dSGiuseppe CAVALLARO 		ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
2720d765955dSGiuseppe CAVALLARO 				  dev->name, dev);
2721d765955dSGiuseppe CAVALLARO 		if (unlikely(ret < 0)) {
272238ddc59dSLABBE Corentin 			netdev_err(priv->dev,
272338ddc59dSLABBE Corentin 				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
2724d765955dSGiuseppe CAVALLARO 				   __func__, priv->lpi_irq, ret);
2725c9324d18SGiuseppe CAVALLARO 			goto lpiirq_error;
2726d765955dSGiuseppe CAVALLARO 		}
2727d765955dSGiuseppe CAVALLARO 	}
2728d765955dSGiuseppe CAVALLARO 
2729c22a3f48SJoao Pinto 	stmmac_enable_all_queues(priv);
2730c22a3f48SJoao Pinto 	stmmac_start_all_queues(priv);
27317ac6653aSJeff Kirsher 
27327ac6653aSJeff Kirsher 	return 0;
27337ac6653aSJeff Kirsher 
2734c9324d18SGiuseppe CAVALLARO lpiirq_error:
2735d765955dSGiuseppe CAVALLARO 	if (priv->wol_irq != dev->irq)
2736d765955dSGiuseppe CAVALLARO 		free_irq(priv->wol_irq, dev);
2737c9324d18SGiuseppe CAVALLARO wolirq_error:
27387a13f8f5SFrancesco Virlinzi 	free_irq(dev->irq, dev);
27396c1e5abeSThierry Reding irq_error:
274074371272SJose Abreu 	phylink_stop(priv->phylink);
27417a13f8f5SFrancesco Virlinzi 
27428fce3331SJose Abreu 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
27438fce3331SJose Abreu 		del_timer_sync(&priv->tx_queue[chan].txtimer);
27448fce3331SJose Abreu 
2745c66f6c37SThierry Reding 	stmmac_hw_teardown(dev);
2746c9324d18SGiuseppe CAVALLARO init_error:
2747c9324d18SGiuseppe CAVALLARO 	free_dma_desc_resources(priv);
27485bacd778SLABBE Corentin dma_desc_error:
274974371272SJose Abreu 	phylink_disconnect_phy(priv->phylink);
27507ac6653aSJeff Kirsher 	return ret;
27517ac6653aSJeff Kirsher }
27527ac6653aSJeff Kirsher 
27537ac6653aSJeff Kirsher /**
27547ac6653aSJeff Kirsher  *  stmmac_release - close entry point of the driver
27557ac6653aSJeff Kirsher  *  @dev : device pointer.
27567ac6653aSJeff Kirsher  *  Description:
27577ac6653aSJeff Kirsher  *  This is the stop entry point of the driver.
27587ac6653aSJeff Kirsher  */
27597ac6653aSJeff Kirsher static int stmmac_release(struct net_device *dev)
27607ac6653aSJeff Kirsher {
27617ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
27628fce3331SJose Abreu 	u32 chan;
27637ac6653aSJeff Kirsher 
2764d765955dSGiuseppe CAVALLARO 	if (priv->eee_enabled)
2765d765955dSGiuseppe CAVALLARO 		del_timer_sync(&priv->eee_ctrl_timer);
2766d765955dSGiuseppe CAVALLARO 
27677ac6653aSJeff Kirsher 	/* Stop and disconnect the PHY */
276874371272SJose Abreu 	phylink_stop(priv->phylink);
276974371272SJose Abreu 	phylink_disconnect_phy(priv->phylink);
27707ac6653aSJeff Kirsher 
2771c22a3f48SJoao Pinto 	stmmac_stop_all_queues(priv);
27727ac6653aSJeff Kirsher 
2773c22a3f48SJoao Pinto 	stmmac_disable_all_queues(priv);
27747ac6653aSJeff Kirsher 
27758fce3331SJose Abreu 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
27768fce3331SJose Abreu 		del_timer_sync(&priv->tx_queue[chan].txtimer);
27779125cdd1SGiuseppe CAVALLARO 
27787ac6653aSJeff Kirsher 	/* Free the IRQ lines */
27797ac6653aSJeff Kirsher 	free_irq(dev->irq, dev);
27807a13f8f5SFrancesco Virlinzi 	if (priv->wol_irq != dev->irq)
27817a13f8f5SFrancesco Virlinzi 		free_irq(priv->wol_irq, dev);
2782d7ec8584SChen-Yu Tsai 	if (priv->lpi_irq > 0)
2783d765955dSGiuseppe CAVALLARO 		free_irq(priv->lpi_irq, dev);
27847ac6653aSJeff Kirsher 
27857ac6653aSJeff Kirsher 	/* Stop TX/RX DMA and clear the descriptors */
2786ae4f0d46SJoao Pinto 	stmmac_stop_all_dma(priv);
27877ac6653aSJeff Kirsher 
27887ac6653aSJeff Kirsher 	/* Release and free the Rx/Tx resources */
27897ac6653aSJeff Kirsher 	free_dma_desc_resources(priv);
27907ac6653aSJeff Kirsher 
27917ac6653aSJeff Kirsher 	/* Disable the MAC Rx/Tx */
2792c10d4c82SJose Abreu 	stmmac_mac_set(priv, priv->ioaddr, false);
27937ac6653aSJeff Kirsher 
27947ac6653aSJeff Kirsher 	netif_carrier_off(dev);
27957ac6653aSJeff Kirsher 
279692ba6888SRayagond Kokatanur 	stmmac_release_ptp(priv);
279792ba6888SRayagond Kokatanur 
27987ac6653aSJeff Kirsher 	return 0;
27997ac6653aSJeff Kirsher }
28007ac6653aSJeff Kirsher 
280130d93227SJose Abreu static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
280230d93227SJose Abreu 			       struct stmmac_tx_queue *tx_q)
280330d93227SJose Abreu {
280430d93227SJose Abreu 	u16 tag = 0x0, inner_tag = 0x0;
280530d93227SJose Abreu 	u32 inner_type = 0x0;
280630d93227SJose Abreu 	struct dma_desc *p;
280730d93227SJose Abreu 
280830d93227SJose Abreu 	if (!priv->dma_cap.vlins)
280930d93227SJose Abreu 		return false;
281030d93227SJose Abreu 	if (!skb_vlan_tag_present(skb))
281130d93227SJose Abreu 		return false;
281230d93227SJose Abreu 	if (skb->vlan_proto == htons(ETH_P_8021AD)) {
281330d93227SJose Abreu 		inner_tag = skb_vlan_tag_get(skb);
281430d93227SJose Abreu 		inner_type = STMMAC_VLAN_INSERT;
281530d93227SJose Abreu 	}
281630d93227SJose Abreu 
281730d93227SJose Abreu 	tag = skb_vlan_tag_get(skb);
281830d93227SJose Abreu 
281930d93227SJose Abreu 	p = tx_q->dma_tx + tx_q->cur_tx;
282030d93227SJose Abreu 	if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
282130d93227SJose Abreu 		return false;
282230d93227SJose Abreu 
282330d93227SJose Abreu 	stmmac_set_tx_owner(priv, p);
282430d93227SJose Abreu 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
282530d93227SJose Abreu 	return true;
282630d93227SJose Abreu }
282730d93227SJose Abreu 
28287ac6653aSJeff Kirsher /**
2829f748be53SAlexandre TORGUE  *  stmmac_tso_allocator - close entry point of the driver
2830f748be53SAlexandre TORGUE  *  @priv: driver private structure
2831f748be53SAlexandre TORGUE  *  @des: buffer start address
2832f748be53SAlexandre TORGUE  *  @total_len: total length to fill in descriptors
2833f748be53SAlexandre TORGUE  *  @last_segmant: condition for the last descriptor
2834ce736788SJoao Pinto  *  @queue: TX queue index
2835f748be53SAlexandre TORGUE  *  Description:
2836f748be53SAlexandre TORGUE  *  This function fills descriptor and request new descriptors according to
2837f748be53SAlexandre TORGUE  *  buffer length to fill
2838f748be53SAlexandre TORGUE  */
2839a993db88SJose Abreu static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
2840ce736788SJoao Pinto 				 int total_len, bool last_segment, u32 queue)
2841f748be53SAlexandre TORGUE {
2842ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2843f748be53SAlexandre TORGUE 	struct dma_desc *desc;
28445bacd778SLABBE Corentin 	u32 buff_size;
2845ce736788SJoao Pinto 	int tmp_len;
2846f748be53SAlexandre TORGUE 
2847f748be53SAlexandre TORGUE 	tmp_len = total_len;
2848f748be53SAlexandre TORGUE 
2849f748be53SAlexandre TORGUE 	while (tmp_len > 0) {
2850a993db88SJose Abreu 		dma_addr_t curr_addr;
2851a993db88SJose Abreu 
2852ce736788SJoao Pinto 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2853b4c9784cSNiklas Cassel 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
2854ce736788SJoao Pinto 		desc = tx_q->dma_tx + tx_q->cur_tx;
2855f748be53SAlexandre TORGUE 
2856a993db88SJose Abreu 		curr_addr = des + (total_len - tmp_len);
2857a993db88SJose Abreu 		if (priv->dma_cap.addr64 <= 32)
2858a993db88SJose Abreu 			desc->des0 = cpu_to_le32(curr_addr);
2859a993db88SJose Abreu 		else
2860a993db88SJose Abreu 			stmmac_set_desc_addr(priv, desc, curr_addr);
2861a993db88SJose Abreu 
2862f748be53SAlexandre TORGUE 		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
2863f748be53SAlexandre TORGUE 			    TSO_MAX_BUFF_SIZE : tmp_len;
2864f748be53SAlexandre TORGUE 
286542de047dSJose Abreu 		stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
2866f748be53SAlexandre TORGUE 				0, 1,
2867426849e6SNiklas Cassel 				(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
2868f748be53SAlexandre TORGUE 				0, 0);
2869f748be53SAlexandre TORGUE 
2870f748be53SAlexandre TORGUE 		tmp_len -= TSO_MAX_BUFF_SIZE;
2871f748be53SAlexandre TORGUE 	}
2872f748be53SAlexandre TORGUE }
2873f748be53SAlexandre TORGUE 
2874f748be53SAlexandre TORGUE /**
2875f748be53SAlexandre TORGUE  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
2876f748be53SAlexandre TORGUE  *  @skb : the socket buffer
2877f748be53SAlexandre TORGUE  *  @dev : device pointer
2878f748be53SAlexandre TORGUE  *  Description: this is the transmit function that is called on TSO frames
2879f748be53SAlexandre TORGUE  *  (support available on GMAC4 and newer chips).
2880f748be53SAlexandre TORGUE  *  Diagram below show the ring programming in case of TSO frames:
2881f748be53SAlexandre TORGUE  *
2882f748be53SAlexandre TORGUE  *  First Descriptor
2883f748be53SAlexandre TORGUE  *   --------
2884f748be53SAlexandre TORGUE  *   | DES0 |---> buffer1 = L2/L3/L4 header
2885f748be53SAlexandre TORGUE  *   | DES1 |---> TCP Payload (can continue on next descr...)
2886f748be53SAlexandre TORGUE  *   | DES2 |---> buffer 1 and 2 len
2887f748be53SAlexandre TORGUE  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
2888f748be53SAlexandre TORGUE  *   --------
2889f748be53SAlexandre TORGUE  *	|
2890f748be53SAlexandre TORGUE  *     ...
2891f748be53SAlexandre TORGUE  *	|
2892f748be53SAlexandre TORGUE  *   --------
2893f748be53SAlexandre TORGUE  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
2894f748be53SAlexandre TORGUE  *   | DES1 | --|
2895f748be53SAlexandre TORGUE  *   | DES2 | --> buffer 1 and 2 len
2896f748be53SAlexandre TORGUE  *   | DES3 |
2897f748be53SAlexandre TORGUE  *   --------
2898f748be53SAlexandre TORGUE  *
2899f748be53SAlexandre TORGUE  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
2900f748be53SAlexandre TORGUE  */
2901f748be53SAlexandre TORGUE static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2902f748be53SAlexandre TORGUE {
2903ce736788SJoao Pinto 	struct dma_desc *desc, *first, *mss_desc = NULL;
2904f748be53SAlexandre TORGUE 	struct stmmac_priv *priv = netdev_priv(dev);
2905f748be53SAlexandre TORGUE 	int nfrags = skb_shinfo(skb)->nr_frags;
2906ce736788SJoao Pinto 	u32 queue = skb_get_queue_mapping(skb);
2907ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q;
290830d93227SJose Abreu 	unsigned int first_entry;
2909ce736788SJoao Pinto 	int tmp_pay_len = 0;
2910ce736788SJoao Pinto 	u32 pay_len, mss;
2911f748be53SAlexandre TORGUE 	u8 proto_hdr_len;
2912a993db88SJose Abreu 	dma_addr_t des;
291330d93227SJose Abreu 	bool has_vlan;
2914f748be53SAlexandre TORGUE 	int i;
2915f748be53SAlexandre TORGUE 
2916ce736788SJoao Pinto 	tx_q = &priv->tx_queue[queue];
2917ce736788SJoao Pinto 
2918f748be53SAlexandre TORGUE 	/* Compute header lengths */
2919f748be53SAlexandre TORGUE 	proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2920f748be53SAlexandre TORGUE 
2921f748be53SAlexandre TORGUE 	/* Desc availability based on threshold should be enough safe */
2922ce736788SJoao Pinto 	if (unlikely(stmmac_tx_avail(priv, queue) <
2923f748be53SAlexandre TORGUE 		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
2924c22a3f48SJoao Pinto 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
2925c22a3f48SJoao Pinto 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
2926c22a3f48SJoao Pinto 								queue));
2927f748be53SAlexandre TORGUE 			/* This is a hard error, log it. */
292838ddc59dSLABBE Corentin 			netdev_err(priv->dev,
292938ddc59dSLABBE Corentin 				   "%s: Tx Ring full when queue awake\n",
293038ddc59dSLABBE Corentin 				   __func__);
2931f748be53SAlexandre TORGUE 		}
2932f748be53SAlexandre TORGUE 		return NETDEV_TX_BUSY;
2933f748be53SAlexandre TORGUE 	}
2934f748be53SAlexandre TORGUE 
2935f748be53SAlexandre TORGUE 	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
2936f748be53SAlexandre TORGUE 
2937f748be53SAlexandre TORGUE 	mss = skb_shinfo(skb)->gso_size;
2938f748be53SAlexandre TORGUE 
2939f748be53SAlexandre TORGUE 	/* set new MSS value if needed */
29408d212a9eSNiklas Cassel 	if (mss != tx_q->mss) {
2941ce736788SJoao Pinto 		mss_desc = tx_q->dma_tx + tx_q->cur_tx;
294242de047dSJose Abreu 		stmmac_set_mss(priv, mss_desc, mss);
29438d212a9eSNiklas Cassel 		tx_q->mss = mss;
2944ce736788SJoao Pinto 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2945b4c9784cSNiklas Cassel 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
2946f748be53SAlexandre TORGUE 	}
2947f748be53SAlexandre TORGUE 
2948f748be53SAlexandre TORGUE 	if (netif_msg_tx_queued(priv)) {
2949f748be53SAlexandre TORGUE 		pr_info("%s: tcphdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
2950f748be53SAlexandre TORGUE 			__func__, tcp_hdrlen(skb), proto_hdr_len, pay_len, mss);
2951f748be53SAlexandre TORGUE 		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
2952f748be53SAlexandre TORGUE 			skb->data_len);
2953f748be53SAlexandre TORGUE 	}
2954f748be53SAlexandre TORGUE 
295530d93227SJose Abreu 	/* Check if VLAN can be inserted by HW */
295630d93227SJose Abreu 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
295730d93227SJose Abreu 
2958ce736788SJoao Pinto 	first_entry = tx_q->cur_tx;
2959b4c9784cSNiklas Cassel 	WARN_ON(tx_q->tx_skbuff[first_entry]);
2960f748be53SAlexandre TORGUE 
2961ce736788SJoao Pinto 	desc = tx_q->dma_tx + first_entry;
2962f748be53SAlexandre TORGUE 	first = desc;
2963f748be53SAlexandre TORGUE 
296430d93227SJose Abreu 	if (has_vlan)
296530d93227SJose Abreu 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
296630d93227SJose Abreu 
2967f748be53SAlexandre TORGUE 	/* first descriptor: fill Headers on Buf1 */
2968f748be53SAlexandre TORGUE 	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
2969f748be53SAlexandre TORGUE 			     DMA_TO_DEVICE);
2970f748be53SAlexandre TORGUE 	if (dma_mapping_error(priv->device, des))
2971f748be53SAlexandre TORGUE 		goto dma_map_err;
2972f748be53SAlexandre TORGUE 
2973ce736788SJoao Pinto 	tx_q->tx_skbuff_dma[first_entry].buf = des;
2974ce736788SJoao Pinto 	tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
2975f748be53SAlexandre TORGUE 
2976a993db88SJose Abreu 	if (priv->dma_cap.addr64 <= 32) {
2977f8be0d78SMichael Weiser 		first->des0 = cpu_to_le32(des);
2978f748be53SAlexandre TORGUE 
2979f748be53SAlexandre TORGUE 		/* Fill start of payload in buff2 of first descriptor */
2980f748be53SAlexandre TORGUE 		if (pay_len)
2981f8be0d78SMichael Weiser 			first->des1 = cpu_to_le32(des + proto_hdr_len);
2982f748be53SAlexandre TORGUE 
2983f748be53SAlexandre TORGUE 		/* If needed take extra descriptors to fill the remaining payload */
2984f748be53SAlexandre TORGUE 		tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
2985a993db88SJose Abreu 	} else {
2986a993db88SJose Abreu 		stmmac_set_desc_addr(priv, first, des);
2987a993db88SJose Abreu 		tmp_pay_len = pay_len;
2988a993db88SJose Abreu 	}
2989f748be53SAlexandre TORGUE 
2990ce736788SJoao Pinto 	stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
2991f748be53SAlexandre TORGUE 
2992f748be53SAlexandre TORGUE 	/* Prepare fragments */
2993f748be53SAlexandre TORGUE 	for (i = 0; i < nfrags; i++) {
2994f748be53SAlexandre TORGUE 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2995f748be53SAlexandre TORGUE 
2996f748be53SAlexandre TORGUE 		des = skb_frag_dma_map(priv->device, frag, 0,
2997f748be53SAlexandre TORGUE 				       skb_frag_size(frag),
2998f748be53SAlexandre TORGUE 				       DMA_TO_DEVICE);
2999937071c1SThierry Reding 		if (dma_mapping_error(priv->device, des))
3000937071c1SThierry Reding 			goto dma_map_err;
3001f748be53SAlexandre TORGUE 
3002f748be53SAlexandre TORGUE 		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
3003ce736788SJoao Pinto 				     (i == nfrags - 1), queue);
3004f748be53SAlexandre TORGUE 
3005ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
3006ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
3007ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
3008f748be53SAlexandre TORGUE 	}
3009f748be53SAlexandre TORGUE 
3010ce736788SJoao Pinto 	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
3011f748be53SAlexandre TORGUE 
301205cf0d1bSNiklas Cassel 	/* Only the last descriptor gets to point to the skb. */
301305cf0d1bSNiklas Cassel 	tx_q->tx_skbuff[tx_q->cur_tx] = skb;
301405cf0d1bSNiklas Cassel 
301505cf0d1bSNiklas Cassel 	/* We've used all descriptors we need for this skb, however,
301605cf0d1bSNiklas Cassel 	 * advance cur_tx so that it references a fresh descriptor.
301705cf0d1bSNiklas Cassel 	 * ndo_start_xmit will fill this descriptor the next time it's
301805cf0d1bSNiklas Cassel 	 * called and stmmac_tx_clean may clean up to this descriptor.
301905cf0d1bSNiklas Cassel 	 */
3020ce736788SJoao Pinto 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
3021f748be53SAlexandre TORGUE 
3022ce736788SJoao Pinto 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3023b3e51069SLABBE Corentin 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
302438ddc59dSLABBE Corentin 			  __func__);
3025c22a3f48SJoao Pinto 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
3026f748be53SAlexandre TORGUE 	}
3027f748be53SAlexandre TORGUE 
3028f748be53SAlexandre TORGUE 	dev->stats.tx_bytes += skb->len;
3029f748be53SAlexandre TORGUE 	priv->xstats.tx_tso_frames++;
3030f748be53SAlexandre TORGUE 	priv->xstats.tx_tso_nfrags += nfrags;
3031f748be53SAlexandre TORGUE 
3032f748be53SAlexandre TORGUE 	/* Manage tx mitigation */
30338fce3331SJose Abreu 	tx_q->tx_count_frames += nfrags + 1;
3034d0bb82fdSRoland Hii 	if (likely(priv->tx_coal_frames > tx_q->tx_count_frames) &&
3035d0bb82fdSRoland Hii 	    !(priv->synopsys_id >= DWMAC_CORE_4_00 &&
3036d0bb82fdSRoland Hii 	    (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3037d0bb82fdSRoland Hii 	    priv->hwts_tx_en)) {
3038d0bb82fdSRoland Hii 		stmmac_tx_timer_arm(priv, queue);
3039d0bb82fdSRoland Hii 	} else {
3040d0bb82fdSRoland Hii 		tx_q->tx_count_frames = 0;
304142de047dSJose Abreu 		stmmac_set_tx_ic(priv, desc);
3042f748be53SAlexandre TORGUE 		priv->xstats.tx_set_ic_bit++;
3043f748be53SAlexandre TORGUE 	}
3044f748be53SAlexandre TORGUE 
30458000ddc0SJose Abreu 	if (priv->sarc_type)
30468000ddc0SJose Abreu 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
30478000ddc0SJose Abreu 
3048f748be53SAlexandre TORGUE 	skb_tx_timestamp(skb);
3049f748be53SAlexandre TORGUE 
3050f748be53SAlexandre TORGUE 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3051f748be53SAlexandre TORGUE 		     priv->hwts_tx_en)) {
3052f748be53SAlexandre TORGUE 		/* declare that device is doing timestamping */
3053f748be53SAlexandre TORGUE 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
305442de047dSJose Abreu 		stmmac_enable_tx_timestamp(priv, first);
3055f748be53SAlexandre TORGUE 	}
3056f748be53SAlexandre TORGUE 
3057f748be53SAlexandre TORGUE 	/* Complete the first descriptor before granting the DMA */
305842de047dSJose Abreu 	stmmac_prepare_tso_tx_desc(priv, first, 1,
3059f748be53SAlexandre TORGUE 			proto_hdr_len,
3060f748be53SAlexandre TORGUE 			pay_len,
3061ce736788SJoao Pinto 			1, tx_q->tx_skbuff_dma[first_entry].last_segment,
3062f748be53SAlexandre TORGUE 			tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
3063f748be53SAlexandre TORGUE 
3064f748be53SAlexandre TORGUE 	/* If context desc is used to change MSS */
306515d2ee42SNiklas Cassel 	if (mss_desc) {
306615d2ee42SNiklas Cassel 		/* Make sure that first descriptor has been completely
306715d2ee42SNiklas Cassel 		 * written, including its own bit. This is because MSS is
306815d2ee42SNiklas Cassel 		 * actually before first descriptor, so we need to make
306915d2ee42SNiklas Cassel 		 * sure that MSS's own bit is the last thing written.
307015d2ee42SNiklas Cassel 		 */
307115d2ee42SNiklas Cassel 		dma_wmb();
307242de047dSJose Abreu 		stmmac_set_tx_owner(priv, mss_desc);
307315d2ee42SNiklas Cassel 	}
3074f748be53SAlexandre TORGUE 
3075f748be53SAlexandre TORGUE 	/* The own bit must be the latest setting done when prepare the
3076f748be53SAlexandre TORGUE 	 * descriptor and then barrier is needed to make sure that
3077f748be53SAlexandre TORGUE 	 * all is coherent before granting the DMA engine.
3078f748be53SAlexandre TORGUE 	 */
307995eb930aSNiklas Cassel 	wmb();
3080f748be53SAlexandre TORGUE 
3081f748be53SAlexandre TORGUE 	if (netif_msg_pktdata(priv)) {
3082f748be53SAlexandre TORGUE 		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
3083ce736788SJoao Pinto 			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
3084ce736788SJoao Pinto 			tx_q->cur_tx, first, nfrags);
3085f748be53SAlexandre TORGUE 
308642de047dSJose Abreu 		stmmac_display_ring(priv, (void *)tx_q->dma_tx, DMA_TX_SIZE, 0);
3087f748be53SAlexandre TORGUE 
3088f748be53SAlexandre TORGUE 		pr_info(">>> frame to be transmitted: ");
3089f748be53SAlexandre TORGUE 		print_pkt(skb->data, skb_headlen(skb));
3090f748be53SAlexandre TORGUE 	}
3091f748be53SAlexandre TORGUE 
3092c22a3f48SJoao Pinto 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3093f748be53SAlexandre TORGUE 
30940431100bSJose Abreu 	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
3095a4e887faSJose Abreu 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
3096f748be53SAlexandre TORGUE 
3097f748be53SAlexandre TORGUE 	return NETDEV_TX_OK;
3098f748be53SAlexandre TORGUE 
3099f748be53SAlexandre TORGUE dma_map_err:
3100f748be53SAlexandre TORGUE 	dev_err(priv->device, "Tx dma map failed\n");
3101f748be53SAlexandre TORGUE 	dev_kfree_skb(skb);
3102f748be53SAlexandre TORGUE 	priv->dev->stats.tx_dropped++;
3103f748be53SAlexandre TORGUE 	return NETDEV_TX_OK;
3104f748be53SAlexandre TORGUE }
3105f748be53SAlexandre TORGUE 
3106f748be53SAlexandre TORGUE /**
3107732fdf0eSGiuseppe CAVALLARO  *  stmmac_xmit - Tx entry point of the driver
31087ac6653aSJeff Kirsher  *  @skb : the socket buffer
31097ac6653aSJeff Kirsher  *  @dev : device pointer
311032ceabcaSGiuseppe CAVALLARO  *  Description : this is the tx entry point of the driver.
311132ceabcaSGiuseppe CAVALLARO  *  It programs the chain or the ring and supports oversized frames
311232ceabcaSGiuseppe CAVALLARO  *  and SG feature.
31137ac6653aSJeff Kirsher  */
31147ac6653aSJeff Kirsher static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
31157ac6653aSJeff Kirsher {
31167ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
31170e80bdc9SGiuseppe Cavallaro 	unsigned int nopaged_len = skb_headlen(skb);
31184a7d666aSGiuseppe CAVALLARO 	int i, csum_insertion = 0, is_jumbo = 0;
3119ce736788SJoao Pinto 	u32 queue = skb_get_queue_mapping(skb);
31207ac6653aSJeff Kirsher 	int nfrags = skb_shinfo(skb)->nr_frags;
31217ac6653aSJeff Kirsher 	struct dma_desc *desc, *first;
3122ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q;
3123a993db88SJose Abreu 	unsigned int first_entry;
31240e80bdc9SGiuseppe Cavallaro 	unsigned int enh_desc;
3125a993db88SJose Abreu 	dma_addr_t des;
312630d93227SJose Abreu 	bool has_vlan;
3127a993db88SJose Abreu 	int entry;
3128f748be53SAlexandre TORGUE 
3129ce736788SJoao Pinto 	tx_q = &priv->tx_queue[queue];
3130ce736788SJoao Pinto 
3131e2cd682dSJose Abreu 	if (priv->tx_path_in_lpi_mode)
3132e2cd682dSJose Abreu 		stmmac_disable_eee_mode(priv);
3133e2cd682dSJose Abreu 
3134f748be53SAlexandre TORGUE 	/* Manage oversized TCP frames for GMAC4 device */
3135f748be53SAlexandre TORGUE 	if (skb_is_gso(skb) && priv->tso) {
31364993e5b3SJose Abreu 		if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
3137f748be53SAlexandre TORGUE 			return stmmac_tso_xmit(skb, dev);
3138f748be53SAlexandre TORGUE 	}
31397ac6653aSJeff Kirsher 
3140ce736788SJoao Pinto 	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
3141c22a3f48SJoao Pinto 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
3142c22a3f48SJoao Pinto 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
3143c22a3f48SJoao Pinto 								queue));
31447ac6653aSJeff Kirsher 			/* This is a hard error, log it. */
314538ddc59dSLABBE Corentin 			netdev_err(priv->dev,
314638ddc59dSLABBE Corentin 				   "%s: Tx Ring full when queue awake\n",
314738ddc59dSLABBE Corentin 				   __func__);
31487ac6653aSJeff Kirsher 		}
31497ac6653aSJeff Kirsher 		return NETDEV_TX_BUSY;
31507ac6653aSJeff Kirsher 	}
31517ac6653aSJeff Kirsher 
315230d93227SJose Abreu 	/* Check if VLAN can be inserted by HW */
315330d93227SJose Abreu 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
315430d93227SJose Abreu 
3155ce736788SJoao Pinto 	entry = tx_q->cur_tx;
31560e80bdc9SGiuseppe Cavallaro 	first_entry = entry;
3157b4c9784cSNiklas Cassel 	WARN_ON(tx_q->tx_skbuff[first_entry]);
31587ac6653aSJeff Kirsher 
31597ac6653aSJeff Kirsher 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
31607ac6653aSJeff Kirsher 
31610e80bdc9SGiuseppe Cavallaro 	if (likely(priv->extend_desc))
3162ce736788SJoao Pinto 		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3163c24602efSGiuseppe CAVALLARO 	else
3164ce736788SJoao Pinto 		desc = tx_q->dma_tx + entry;
3165c24602efSGiuseppe CAVALLARO 
31667ac6653aSJeff Kirsher 	first = desc;
31677ac6653aSJeff Kirsher 
316830d93227SJose Abreu 	if (has_vlan)
316930d93227SJose Abreu 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
317030d93227SJose Abreu 
31710e80bdc9SGiuseppe Cavallaro 	enh_desc = priv->plat->enh_desc;
31724a7d666aSGiuseppe CAVALLARO 	/* To program the descriptors according to the size of the frame */
317329896a67SGiuseppe CAVALLARO 	if (enh_desc)
31742c520b1cSJose Abreu 		is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
317529896a67SGiuseppe CAVALLARO 
317663a550fcSJose Abreu 	if (unlikely(is_jumbo)) {
31772c520b1cSJose Abreu 		entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
317863a550fcSJose Abreu 		if (unlikely(entry < 0) && (entry != -EINVAL))
3179362b37beSGiuseppe CAVALLARO 			goto dma_map_err;
318029896a67SGiuseppe CAVALLARO 	}
31817ac6653aSJeff Kirsher 
31827ac6653aSJeff Kirsher 	for (i = 0; i < nfrags; i++) {
31839e903e08SEric Dumazet 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
31849e903e08SEric Dumazet 		int len = skb_frag_size(frag);
3185be434d50SGiuseppe Cavallaro 		bool last_segment = (i == (nfrags - 1));
31867ac6653aSJeff Kirsher 
3187e3ad57c9SGiuseppe Cavallaro 		entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3188b4c9784cSNiklas Cassel 		WARN_ON(tx_q->tx_skbuff[entry]);
3189e3ad57c9SGiuseppe Cavallaro 
31900e80bdc9SGiuseppe Cavallaro 		if (likely(priv->extend_desc))
3191ce736788SJoao Pinto 			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3192c24602efSGiuseppe CAVALLARO 		else
3193ce736788SJoao Pinto 			desc = tx_q->dma_tx + entry;
31947ac6653aSJeff Kirsher 
3195f748be53SAlexandre TORGUE 		des = skb_frag_dma_map(priv->device, frag, 0, len,
3196f722380dSIan Campbell 				       DMA_TO_DEVICE);
3197f748be53SAlexandre TORGUE 		if (dma_mapping_error(priv->device, des))
3198362b37beSGiuseppe CAVALLARO 			goto dma_map_err; /* should reuse desc w/o issues */
3199362b37beSGiuseppe CAVALLARO 
3200ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].buf = des;
32016844171dSJose Abreu 
32026844171dSJose Abreu 		stmmac_set_desc_addr(priv, desc, des);
3203f748be53SAlexandre TORGUE 
3204ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].map_as_page = true;
3205ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].len = len;
3206ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
32070e80bdc9SGiuseppe Cavallaro 
32080e80bdc9SGiuseppe Cavallaro 		/* Prepare the descriptor and set the own bit too */
320942de047dSJose Abreu 		stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
321042de047dSJose Abreu 				priv->mode, 1, last_segment, skb->len);
32117ac6653aSJeff Kirsher 	}
32127ac6653aSJeff Kirsher 
321305cf0d1bSNiklas Cassel 	/* Only the last descriptor gets to point to the skb. */
321405cf0d1bSNiklas Cassel 	tx_q->tx_skbuff[entry] = skb;
3215e3ad57c9SGiuseppe Cavallaro 
321605cf0d1bSNiklas Cassel 	/* We've used all descriptors we need for this skb, however,
321705cf0d1bSNiklas Cassel 	 * advance cur_tx so that it references a fresh descriptor.
321805cf0d1bSNiklas Cassel 	 * ndo_start_xmit will fill this descriptor the next time it's
321905cf0d1bSNiklas Cassel 	 * called and stmmac_tx_clean may clean up to this descriptor.
322005cf0d1bSNiklas Cassel 	 */
322105cf0d1bSNiklas Cassel 	entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3222ce736788SJoao Pinto 	tx_q->cur_tx = entry;
32237ac6653aSJeff Kirsher 
32247ac6653aSJeff Kirsher 	if (netif_msg_pktdata(priv)) {
3225d0225e7dSAlexandre TORGUE 		void *tx_head;
3226d0225e7dSAlexandre TORGUE 
322738ddc59dSLABBE Corentin 		netdev_dbg(priv->dev,
322838ddc59dSLABBE Corentin 			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
3229ce736788SJoao Pinto 			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
32300e80bdc9SGiuseppe Cavallaro 			   entry, first, nfrags);
323183d7af64SGiuseppe CAVALLARO 
3232c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
3233ce736788SJoao Pinto 			tx_head = (void *)tx_q->dma_etx;
3234c24602efSGiuseppe CAVALLARO 		else
3235ce736788SJoao Pinto 			tx_head = (void *)tx_q->dma_tx;
3236d0225e7dSAlexandre TORGUE 
323742de047dSJose Abreu 		stmmac_display_ring(priv, tx_head, DMA_TX_SIZE, false);
3238c24602efSGiuseppe CAVALLARO 
323938ddc59dSLABBE Corentin 		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
32407ac6653aSJeff Kirsher 		print_pkt(skb->data, skb->len);
32417ac6653aSJeff Kirsher 	}
32420e80bdc9SGiuseppe Cavallaro 
3243ce736788SJoao Pinto 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3244b3e51069SLABBE Corentin 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
3245b3e51069SLABBE Corentin 			  __func__);
3246c22a3f48SJoao Pinto 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
32477ac6653aSJeff Kirsher 	}
32487ac6653aSJeff Kirsher 
32497ac6653aSJeff Kirsher 	dev->stats.tx_bytes += skb->len;
32507ac6653aSJeff Kirsher 
32510e80bdc9SGiuseppe Cavallaro 	/* According to the coalesce parameter the IC bit for the latest
32520e80bdc9SGiuseppe Cavallaro 	 * segment is reset and the timer re-started to clean the tx status.
32530e80bdc9SGiuseppe Cavallaro 	 * This approach takes care about the fragments: desc is the first
32540e80bdc9SGiuseppe Cavallaro 	 * element in case of no SG.
32550e80bdc9SGiuseppe Cavallaro 	 */
32568fce3331SJose Abreu 	tx_q->tx_count_frames += nfrags + 1;
3257d0bb82fdSRoland Hii 	if (likely(priv->tx_coal_frames > tx_q->tx_count_frames) &&
3258d0bb82fdSRoland Hii 	    !(priv->synopsys_id >= DWMAC_CORE_4_00 &&
3259d0bb82fdSRoland Hii 	    (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3260d0bb82fdSRoland Hii 	    priv->hwts_tx_en)) {
3261d0bb82fdSRoland Hii 		stmmac_tx_timer_arm(priv, queue);
3262d0bb82fdSRoland Hii 	} else {
3263d0bb82fdSRoland Hii 		tx_q->tx_count_frames = 0;
326442de047dSJose Abreu 		stmmac_set_tx_ic(priv, desc);
32650e80bdc9SGiuseppe Cavallaro 		priv->xstats.tx_set_ic_bit++;
32660e80bdc9SGiuseppe Cavallaro 	}
32670e80bdc9SGiuseppe Cavallaro 
32688000ddc0SJose Abreu 	if (priv->sarc_type)
32698000ddc0SJose Abreu 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
32708000ddc0SJose Abreu 
32710e80bdc9SGiuseppe Cavallaro 	skb_tx_timestamp(skb);
32720e80bdc9SGiuseppe Cavallaro 
32730e80bdc9SGiuseppe Cavallaro 	/* Ready to fill the first descriptor and set the OWN bit w/o any
32740e80bdc9SGiuseppe Cavallaro 	 * problems because all the descriptors are actually ready to be
32750e80bdc9SGiuseppe Cavallaro 	 * passed to the DMA engine.
32760e80bdc9SGiuseppe Cavallaro 	 */
32770e80bdc9SGiuseppe Cavallaro 	if (likely(!is_jumbo)) {
32780e80bdc9SGiuseppe Cavallaro 		bool last_segment = (nfrags == 0);
32790e80bdc9SGiuseppe Cavallaro 
3280f748be53SAlexandre TORGUE 		des = dma_map_single(priv->device, skb->data,
32810e80bdc9SGiuseppe Cavallaro 				     nopaged_len, DMA_TO_DEVICE);
3282f748be53SAlexandre TORGUE 		if (dma_mapping_error(priv->device, des))
32830e80bdc9SGiuseppe Cavallaro 			goto dma_map_err;
32840e80bdc9SGiuseppe Cavallaro 
3285ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[first_entry].buf = des;
32866844171dSJose Abreu 
32876844171dSJose Abreu 		stmmac_set_desc_addr(priv, first, des);
3288f748be53SAlexandre TORGUE 
3289ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
3290ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
32910e80bdc9SGiuseppe Cavallaro 
3292891434b1SRayagond Kokatanur 		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3293891434b1SRayagond Kokatanur 			     priv->hwts_tx_en)) {
3294891434b1SRayagond Kokatanur 			/* declare that device is doing timestamping */
3295891434b1SRayagond Kokatanur 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
329642de047dSJose Abreu 			stmmac_enable_tx_timestamp(priv, first);
3297891434b1SRayagond Kokatanur 		}
3298891434b1SRayagond Kokatanur 
32990e80bdc9SGiuseppe Cavallaro 		/* Prepare the first descriptor setting the OWN bit too */
330042de047dSJose Abreu 		stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
330142de047dSJose Abreu 				csum_insertion, priv->mode, 1, last_segment,
330242de047dSJose Abreu 				skb->len);
330380acbed9SAaro Koskinen 	} else {
330480acbed9SAaro Koskinen 		stmmac_set_tx_owner(priv, first);
330580acbed9SAaro Koskinen 	}
33060e80bdc9SGiuseppe Cavallaro 
33070e80bdc9SGiuseppe Cavallaro 	/* The own bit must be the latest setting done when prepare the
33080e80bdc9SGiuseppe Cavallaro 	 * descriptor and then barrier is needed to make sure that
33090e80bdc9SGiuseppe Cavallaro 	 * all is coherent before granting the DMA engine.
33100e80bdc9SGiuseppe Cavallaro 	 */
331195eb930aSNiklas Cassel 	wmb();
33127ac6653aSJeff Kirsher 
3313c22a3f48SJoao Pinto 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3314f748be53SAlexandre TORGUE 
3315a4e887faSJose Abreu 	stmmac_enable_dma_transmission(priv, priv->ioaddr);
33168fce3331SJose Abreu 
33170431100bSJose Abreu 	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
3318f1565c60SJose Abreu 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
33197ac6653aSJeff Kirsher 
3320362b37beSGiuseppe CAVALLARO 	return NETDEV_TX_OK;
3321a9097a96SGiuseppe CAVALLARO 
3322362b37beSGiuseppe CAVALLARO dma_map_err:
332338ddc59dSLABBE Corentin 	netdev_err(priv->dev, "Tx DMA map failed\n");
3324362b37beSGiuseppe CAVALLARO 	dev_kfree_skb(skb);
3325362b37beSGiuseppe CAVALLARO 	priv->dev->stats.tx_dropped++;
33267ac6653aSJeff Kirsher 	return NETDEV_TX_OK;
33277ac6653aSJeff Kirsher }
33287ac6653aSJeff Kirsher 
3329b9381985SVince Bridgers static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
3330b9381985SVince Bridgers {
3331ab188e8fSElad Nachman 	struct vlan_ethhdr *veth;
3332ab188e8fSElad Nachman 	__be16 vlan_proto;
3333b9381985SVince Bridgers 	u16 vlanid;
3334b9381985SVince Bridgers 
3335ab188e8fSElad Nachman 	veth = (struct vlan_ethhdr *)skb->data;
3336ab188e8fSElad Nachman 	vlan_proto = veth->h_vlan_proto;
3337ab188e8fSElad Nachman 
3338ab188e8fSElad Nachman 	if ((vlan_proto == htons(ETH_P_8021Q) &&
3339ab188e8fSElad Nachman 	     dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
3340ab188e8fSElad Nachman 	    (vlan_proto == htons(ETH_P_8021AD) &&
3341ab188e8fSElad Nachman 	     dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
3342b9381985SVince Bridgers 		/* pop the vlan tag */
3343ab188e8fSElad Nachman 		vlanid = ntohs(veth->h_vlan_TCI);
3344ab188e8fSElad Nachman 		memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
3345b9381985SVince Bridgers 		skb_pull(skb, VLAN_HLEN);
3346ab188e8fSElad Nachman 		__vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
3347b9381985SVince Bridgers 	}
3348b9381985SVince Bridgers }
3349b9381985SVince Bridgers 
3350b9381985SVince Bridgers 
335154139cf3SJoao Pinto static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q)
3352120e87f9SGiuseppe Cavallaro {
335354139cf3SJoao Pinto 	if (rx_q->rx_zeroc_thresh < STMMAC_RX_THRESH)
3354120e87f9SGiuseppe Cavallaro 		return 0;
3355120e87f9SGiuseppe Cavallaro 
3356120e87f9SGiuseppe Cavallaro 	return 1;
3357120e87f9SGiuseppe Cavallaro }
3358120e87f9SGiuseppe Cavallaro 
335932ceabcaSGiuseppe CAVALLARO /**
3360732fdf0eSGiuseppe CAVALLARO  * stmmac_rx_refill - refill used skb preallocated buffers
336132ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
336254139cf3SJoao Pinto  * @queue: RX queue index
336332ceabcaSGiuseppe CAVALLARO  * Description : this is to reallocate the skb for the reception process
336432ceabcaSGiuseppe CAVALLARO  * that is based on zero-copy.
336532ceabcaSGiuseppe CAVALLARO  */
336654139cf3SJoao Pinto static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
33677ac6653aSJeff Kirsher {
336854139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
33693caa61c2SJose Abreu 	int len, dirty = stmmac_rx_dirty(priv, queue);
337054139cf3SJoao Pinto 	unsigned int entry = rx_q->dirty_rx;
337154139cf3SJoao Pinto 
33723caa61c2SJose Abreu 	len = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
33733caa61c2SJose Abreu 
3374e3ad57c9SGiuseppe Cavallaro 	while (dirty-- > 0) {
33752af6106aSJose Abreu 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
3376c24602efSGiuseppe CAVALLARO 		struct dma_desc *p;
3377d429b66eSJose Abreu 		bool use_rx_wd;
3378c24602efSGiuseppe CAVALLARO 
3379c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
338054139cf3SJoao Pinto 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
3381c24602efSGiuseppe CAVALLARO 		else
338254139cf3SJoao Pinto 			p = rx_q->dma_rx + entry;
3383c24602efSGiuseppe CAVALLARO 
33842af6106aSJose Abreu 		if (!buf->page) {
33852af6106aSJose Abreu 			buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
33862af6106aSJose Abreu 			if (!buf->page)
33877ac6653aSJeff Kirsher 				break;
3388120e87f9SGiuseppe Cavallaro 		}
33897ac6653aSJeff Kirsher 
339067afd6d1SJose Abreu 		if (priv->sph && !buf->sec_page) {
339167afd6d1SJose Abreu 			buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
339267afd6d1SJose Abreu 			if (!buf->sec_page)
339367afd6d1SJose Abreu 				break;
339467afd6d1SJose Abreu 
339567afd6d1SJose Abreu 			buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
339667afd6d1SJose Abreu 
339767afd6d1SJose Abreu 			dma_sync_single_for_device(priv->device, buf->sec_addr,
339867afd6d1SJose Abreu 						   len, DMA_FROM_DEVICE);
339967afd6d1SJose Abreu 		}
340067afd6d1SJose Abreu 
34012af6106aSJose Abreu 		buf->addr = page_pool_get_dma_addr(buf->page);
34023caa61c2SJose Abreu 
34033caa61c2SJose Abreu 		/* Sync whole allocation to device. This will invalidate old
34043caa61c2SJose Abreu 		 * data.
34053caa61c2SJose Abreu 		 */
34063caa61c2SJose Abreu 		dma_sync_single_for_device(priv->device, buf->addr, len,
34073caa61c2SJose Abreu 					   DMA_FROM_DEVICE);
34083caa61c2SJose Abreu 
34092af6106aSJose Abreu 		stmmac_set_desc_addr(priv, p, buf->addr);
341067afd6d1SJose Abreu 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr);
34112c520b1cSJose Abreu 		stmmac_refill_desc3(priv, rx_q, p);
3412286a8372SGiuseppe CAVALLARO 
3413d429b66eSJose Abreu 		rx_q->rx_count_frames++;
3414d429b66eSJose Abreu 		rx_q->rx_count_frames %= priv->rx_coal_frames;
3415d429b66eSJose Abreu 		use_rx_wd = priv->use_riwt && rx_q->rx_count_frames;
3416d429b66eSJose Abreu 
3417ad688cdbSPavel Machek 		dma_wmb();
34182af6106aSJose Abreu 		stmmac_set_rx_owner(priv, p, use_rx_wd);
3419e3ad57c9SGiuseppe Cavallaro 
3420e3ad57c9SGiuseppe Cavallaro 		entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
34217ac6653aSJeff Kirsher 	}
342254139cf3SJoao Pinto 	rx_q->dirty_rx = entry;
3423858a31ffSJose Abreu 	rx_q->rx_tail_addr = rx_q->dma_rx_phy +
3424858a31ffSJose Abreu 			    (rx_q->dirty_rx * sizeof(struct dma_desc));
34254523a561SBiao Huang 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
34267ac6653aSJeff Kirsher }
34277ac6653aSJeff Kirsher 
342832ceabcaSGiuseppe CAVALLARO /**
3429732fdf0eSGiuseppe CAVALLARO  * stmmac_rx - manage the receive process
343032ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
343154139cf3SJoao Pinto  * @limit: napi bugget
343254139cf3SJoao Pinto  * @queue: RX queue index.
343332ceabcaSGiuseppe CAVALLARO  * Description :  this the function called by the napi poll method.
343432ceabcaSGiuseppe CAVALLARO  * It gets all the frames inside the ring.
343532ceabcaSGiuseppe CAVALLARO  */
343654139cf3SJoao Pinto static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
34377ac6653aSJeff Kirsher {
343854139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
34398fce3331SJose Abreu 	struct stmmac_channel *ch = &priv->channel[queue];
3440ec222003SJose Abreu 	unsigned int count = 0, error = 0, len = 0;
3441ec222003SJose Abreu 	int status = 0, coe = priv->hw->rx_csum;
344207b39753SAaro Koskinen 	unsigned int next_entry = rx_q->cur_rx;
3443ec222003SJose Abreu 	struct sk_buff *skb = NULL;
34447ac6653aSJeff Kirsher 
344583d7af64SGiuseppe CAVALLARO 	if (netif_msg_rx_status(priv)) {
3446d0225e7dSAlexandre TORGUE 		void *rx_head;
3447d0225e7dSAlexandre TORGUE 
344838ddc59dSLABBE Corentin 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
3449c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
345054139cf3SJoao Pinto 			rx_head = (void *)rx_q->dma_erx;
3451c24602efSGiuseppe CAVALLARO 		else
345254139cf3SJoao Pinto 			rx_head = (void *)rx_q->dma_rx;
3453d0225e7dSAlexandre TORGUE 
345442de047dSJose Abreu 		stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true);
34557ac6653aSJeff Kirsher 	}
3456c24602efSGiuseppe CAVALLARO 	while (count < limit) {
345767afd6d1SJose Abreu 		unsigned int hlen = 0, prev_len = 0;
3458ec222003SJose Abreu 		enum pkt_hash_types hash_type;
34592af6106aSJose Abreu 		struct stmmac_rx_buffer *buf;
34602af6106aSJose Abreu 		struct dma_desc *np, *p;
346167afd6d1SJose Abreu 		unsigned int sec_len;
3462ec222003SJose Abreu 		int entry;
3463ec222003SJose Abreu 		u32 hash;
34647ac6653aSJeff Kirsher 
3465ec222003SJose Abreu 		if (!count && rx_q->state_saved) {
3466ec222003SJose Abreu 			skb = rx_q->state.skb;
3467ec222003SJose Abreu 			error = rx_q->state.error;
3468ec222003SJose Abreu 			len = rx_q->state.len;
3469ec222003SJose Abreu 		} else {
3470ec222003SJose Abreu 			rx_q->state_saved = false;
3471ec222003SJose Abreu 			skb = NULL;
3472ec222003SJose Abreu 			error = 0;
3473ec222003SJose Abreu 			len = 0;
3474ec222003SJose Abreu 		}
3475ec222003SJose Abreu 
3476ec222003SJose Abreu 		if (count >= limit)
3477ec222003SJose Abreu 			break;
3478ec222003SJose Abreu 
3479ec222003SJose Abreu read_again:
348067afd6d1SJose Abreu 		sec_len = 0;
348107b39753SAaro Koskinen 		entry = next_entry;
34822af6106aSJose Abreu 		buf = &rx_q->buf_pool[entry];
348307b39753SAaro Koskinen 
3484c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
348554139cf3SJoao Pinto 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
3486c24602efSGiuseppe CAVALLARO 		else
348754139cf3SJoao Pinto 			p = rx_q->dma_rx + entry;
3488c24602efSGiuseppe CAVALLARO 
3489c1fa3212SFabrice Gasnier 		/* read the status of the incoming frame */
349042de047dSJose Abreu 		status = stmmac_rx_status(priv, &priv->dev->stats,
3491c1fa3212SFabrice Gasnier 				&priv->xstats, p);
3492c1fa3212SFabrice Gasnier 		/* check if managed by the DMA otherwise go ahead */
3493c1fa3212SFabrice Gasnier 		if (unlikely(status & dma_own))
34947ac6653aSJeff Kirsher 			break;
34957ac6653aSJeff Kirsher 
34967ac6653aSJeff Kirsher 		count++;
34977ac6653aSJeff Kirsher 
349854139cf3SJoao Pinto 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE);
349954139cf3SJoao Pinto 		next_entry = rx_q->cur_rx;
3500e3ad57c9SGiuseppe Cavallaro 
3501c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
350254139cf3SJoao Pinto 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
3503c24602efSGiuseppe CAVALLARO 		else
350454139cf3SJoao Pinto 			np = rx_q->dma_rx + next_entry;
3505ba1ffd74SGiuseppe CAVALLARO 
3506ba1ffd74SGiuseppe CAVALLARO 		prefetch(np);
350767afd6d1SJose Abreu 		prefetch(page_address(buf->page));
35087ac6653aSJeff Kirsher 
350942de047dSJose Abreu 		if (priv->extend_desc)
351042de047dSJose Abreu 			stmmac_rx_extended_status(priv, &priv->dev->stats,
351142de047dSJose Abreu 					&priv->xstats, rx_q->dma_erx + entry);
3512891434b1SRayagond Kokatanur 		if (unlikely(status == discard_frame)) {
35132af6106aSJose Abreu 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
35142af6106aSJose Abreu 			buf->page = NULL;
3515ec222003SJose Abreu 			error = 1;
35160b273ca4SJose Abreu 			if (!priv->hwts_rx_en)
35170b273ca4SJose Abreu 				priv->dev->stats.rx_errors++;
3518ec222003SJose Abreu 		}
3519f748be53SAlexandre TORGUE 
3520ec222003SJose Abreu 		if (unlikely(error && (status & rx_not_ls)))
3521ec222003SJose Abreu 			goto read_again;
3522ec222003SJose Abreu 		if (unlikely(error)) {
3523ec222003SJose Abreu 			dev_kfree_skb(skb);
352407b39753SAaro Koskinen 			continue;
3525e527c4a7SGiuseppe CAVALLARO 		}
3526e527c4a7SGiuseppe CAVALLARO 
3527ec222003SJose Abreu 		/* Buffer is good. Go on. */
3528ec222003SJose Abreu 
3529ec222003SJose Abreu 		if (likely(status & rx_not_ls)) {
3530ec222003SJose Abreu 			len += priv->dma_buf_sz;
3531ec222003SJose Abreu 		} else {
3532ec222003SJose Abreu 			prev_len = len;
3533ec222003SJose Abreu 			len = stmmac_get_rx_frame_len(priv, p, coe);
3534ec222003SJose Abreu 
35357ac6653aSJeff Kirsher 			/* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
3536ceb69499SGiuseppe CAVALLARO 			 * Type frames (LLC/LLC-SNAP)
3537565020aaSJose Abreu 			 *
3538565020aaSJose Abreu 			 * llc_snap is never checked in GMAC >= 4, so this ACS
3539565020aaSJose Abreu 			 * feature is always disabled and packets need to be
3540565020aaSJose Abreu 			 * stripped manually.
3541ceb69499SGiuseppe CAVALLARO 			 */
3542565020aaSJose Abreu 			if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
3543565020aaSJose Abreu 			    unlikely(status != llc_snap))
3544ec222003SJose Abreu 				len -= ETH_FCS_LEN;
354583d7af64SGiuseppe CAVALLARO 		}
354622ad3838SGiuseppe Cavallaro 
3547ec222003SJose Abreu 		if (!skb) {
354867afd6d1SJose Abreu 			int ret = stmmac_get_rx_header_len(priv, p, &hlen);
354967afd6d1SJose Abreu 
355067afd6d1SJose Abreu 			if (priv->sph && !ret && (hlen > 0)) {
355167afd6d1SJose Abreu 				sec_len = len;
355267afd6d1SJose Abreu 				if (!(status & rx_not_ls))
355367afd6d1SJose Abreu 					sec_len = sec_len - hlen;
355467afd6d1SJose Abreu 				len = hlen;
355567afd6d1SJose Abreu 
355667afd6d1SJose Abreu 				prefetch(page_address(buf->sec_page));
3557b5418e13SJose Abreu 				priv->xstats.rx_split_hdr_pkt_n++;
355867afd6d1SJose Abreu 			}
355967afd6d1SJose Abreu 
3560ec222003SJose Abreu 			skb = napi_alloc_skb(&ch->rx_napi, len);
3561ec222003SJose Abreu 			if (!skb) {
356222ad3838SGiuseppe Cavallaro 				priv->dev->stats.rx_dropped++;
356307b39753SAaro Koskinen 				continue;
356422ad3838SGiuseppe Cavallaro 			}
356522ad3838SGiuseppe Cavallaro 
3566ec222003SJose Abreu 			dma_sync_single_for_cpu(priv->device, buf->addr, len,
3567ec222003SJose Abreu 						DMA_FROM_DEVICE);
35682af6106aSJose Abreu 			skb_copy_to_linear_data(skb, page_address(buf->page),
3569ec222003SJose Abreu 						len);
3570ec222003SJose Abreu 			skb_put(skb, len);
357122ad3838SGiuseppe Cavallaro 
3572ec222003SJose Abreu 			/* Data payload copied into SKB, page ready for recycle */
3573ec222003SJose Abreu 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
3574ec222003SJose Abreu 			buf->page = NULL;
3575ec222003SJose Abreu 		} else {
3576ec222003SJose Abreu 			unsigned int buf_len = len - prev_len;
3577ec222003SJose Abreu 
3578ec222003SJose Abreu 			if (likely(status & rx_not_ls))
3579ec222003SJose Abreu 				buf_len = priv->dma_buf_sz;
3580ec222003SJose Abreu 
3581ec222003SJose Abreu 			dma_sync_single_for_cpu(priv->device, buf->addr,
3582ec222003SJose Abreu 						buf_len, DMA_FROM_DEVICE);
3583ec222003SJose Abreu 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
3584ec222003SJose Abreu 					buf->page, 0, buf_len,
3585ec222003SJose Abreu 					priv->dma_buf_sz);
3586ec222003SJose Abreu 
3587ec222003SJose Abreu 			/* Data payload appended into SKB */
3588ec222003SJose Abreu 			page_pool_release_page(rx_q->page_pool, buf->page);
3589ec222003SJose Abreu 			buf->page = NULL;
35907ac6653aSJeff Kirsher 		}
359183d7af64SGiuseppe CAVALLARO 
359267afd6d1SJose Abreu 		if (sec_len > 0) {
359367afd6d1SJose Abreu 			dma_sync_single_for_cpu(priv->device, buf->sec_addr,
359467afd6d1SJose Abreu 						sec_len, DMA_FROM_DEVICE);
359567afd6d1SJose Abreu 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
359667afd6d1SJose Abreu 					buf->sec_page, 0, sec_len,
359767afd6d1SJose Abreu 					priv->dma_buf_sz);
359867afd6d1SJose Abreu 
359967afd6d1SJose Abreu 			len += sec_len;
360067afd6d1SJose Abreu 
360167afd6d1SJose Abreu 			/* Data payload appended into SKB */
360267afd6d1SJose Abreu 			page_pool_release_page(rx_q->page_pool, buf->sec_page);
360367afd6d1SJose Abreu 			buf->sec_page = NULL;
360467afd6d1SJose Abreu 		}
360567afd6d1SJose Abreu 
3606ec222003SJose Abreu 		if (likely(status & rx_not_ls))
3607ec222003SJose Abreu 			goto read_again;
3608ec222003SJose Abreu 
3609ec222003SJose Abreu 		/* Got entire packet into SKB. Finish it. */
3610ec222003SJose Abreu 
3611ba1ffd74SGiuseppe CAVALLARO 		stmmac_get_rx_hwtstamp(priv, p, np, skb);
3612b9381985SVince Bridgers 		stmmac_rx_vlan(priv->dev, skb);
36137ac6653aSJeff Kirsher 		skb->protocol = eth_type_trans(skb, priv->dev);
36147ac6653aSJeff Kirsher 
3615ceb69499SGiuseppe CAVALLARO 		if (unlikely(!coe))
36167ac6653aSJeff Kirsher 			skb_checksum_none_assert(skb);
361762a2ab93SGiuseppe CAVALLARO 		else
36187ac6653aSJeff Kirsher 			skb->ip_summed = CHECKSUM_UNNECESSARY;
361962a2ab93SGiuseppe CAVALLARO 
362076067459SJose Abreu 		if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
362176067459SJose Abreu 			skb_set_hash(skb, hash, hash_type);
362276067459SJose Abreu 
362376067459SJose Abreu 		skb_record_rx_queue(skb, queue);
36244ccb4585SJose Abreu 		napi_gro_receive(&ch->rx_napi, skb);
36257ac6653aSJeff Kirsher 
36267ac6653aSJeff Kirsher 		priv->dev->stats.rx_packets++;
3627ec222003SJose Abreu 		priv->dev->stats.rx_bytes += len;
36287ac6653aSJeff Kirsher 	}
3629ec222003SJose Abreu 
3630ec222003SJose Abreu 	if (status & rx_not_ls) {
3631ec222003SJose Abreu 		rx_q->state_saved = true;
3632ec222003SJose Abreu 		rx_q->state.skb = skb;
3633ec222003SJose Abreu 		rx_q->state.error = error;
3634ec222003SJose Abreu 		rx_q->state.len = len;
36357ac6653aSJeff Kirsher 	}
36367ac6653aSJeff Kirsher 
363754139cf3SJoao Pinto 	stmmac_rx_refill(priv, queue);
36387ac6653aSJeff Kirsher 
36397ac6653aSJeff Kirsher 	priv->xstats.rx_pkt_n += count;
36407ac6653aSJeff Kirsher 
36417ac6653aSJeff Kirsher 	return count;
36427ac6653aSJeff Kirsher }
36437ac6653aSJeff Kirsher 
36444ccb4585SJose Abreu static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
36457ac6653aSJeff Kirsher {
36468fce3331SJose Abreu 	struct stmmac_channel *ch =
36474ccb4585SJose Abreu 		container_of(napi, struct stmmac_channel, rx_napi);
36488fce3331SJose Abreu 	struct stmmac_priv *priv = ch->priv_data;
36498fce3331SJose Abreu 	u32 chan = ch->index;
36504ccb4585SJose Abreu 	int work_done;
36517ac6653aSJeff Kirsher 
36529125cdd1SGiuseppe CAVALLARO 	priv->xstats.napi_poll++;
3653ce736788SJoao Pinto 
36544ccb4585SJose Abreu 	work_done = stmmac_rx(priv, budget, chan);
36554ccb4585SJose Abreu 	if (work_done < budget && napi_complete_done(napi, work_done))
36564ccb4585SJose Abreu 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan);
36574ccb4585SJose Abreu 	return work_done;
36584ccb4585SJose Abreu }
3659ce736788SJoao Pinto 
36604ccb4585SJose Abreu static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
36614ccb4585SJose Abreu {
36624ccb4585SJose Abreu 	struct stmmac_channel *ch =
36634ccb4585SJose Abreu 		container_of(napi, struct stmmac_channel, tx_napi);
36644ccb4585SJose Abreu 	struct stmmac_priv *priv = ch->priv_data;
36654ccb4585SJose Abreu 	struct stmmac_tx_queue *tx_q;
36664ccb4585SJose Abreu 	u32 chan = ch->index;
36674ccb4585SJose Abreu 	int work_done;
36684ccb4585SJose Abreu 
36694ccb4585SJose Abreu 	priv->xstats.napi_poll++;
36704ccb4585SJose Abreu 
36714ccb4585SJose Abreu 	work_done = stmmac_tx_clean(priv, DMA_TX_SIZE, chan);
3672fa0be0a4SJose Abreu 	work_done = min(work_done, budget);
36738fce3331SJose Abreu 
3674a66b5884SJose Abreu 	if (work_done < budget)
3675a66b5884SJose Abreu 		napi_complete_done(napi, work_done);
36764ccb4585SJose Abreu 
36774ccb4585SJose Abreu 	/* Force transmission restart */
36784ccb4585SJose Abreu 	tx_q = &priv->tx_queue[chan];
36794ccb4585SJose Abreu 	if (tx_q->cur_tx != tx_q->dirty_tx) {
36804ccb4585SJose Abreu 		stmmac_enable_dma_transmission(priv, priv->ioaddr);
36814ccb4585SJose Abreu 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr,
36824ccb4585SJose Abreu 				       chan);
3683fa0be0a4SJose Abreu 	}
36848fce3331SJose Abreu 
36857ac6653aSJeff Kirsher 	return work_done;
36867ac6653aSJeff Kirsher }
36877ac6653aSJeff Kirsher 
36887ac6653aSJeff Kirsher /**
36897ac6653aSJeff Kirsher  *  stmmac_tx_timeout
36907ac6653aSJeff Kirsher  *  @dev : Pointer to net device structure
36917ac6653aSJeff Kirsher  *  Description: this function is called when a packet transmission fails to
36927284a3f1SGiuseppe CAVALLARO  *   complete within a reasonable time. The driver will mark the error in the
36937ac6653aSJeff Kirsher  *   netdev structure and arrange for the device to be reset to a sane state
36947ac6653aSJeff Kirsher  *   in order to transmit a new packet.
36957ac6653aSJeff Kirsher  */
36967ac6653aSJeff Kirsher static void stmmac_tx_timeout(struct net_device *dev)
36977ac6653aSJeff Kirsher {
36987ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
36997ac6653aSJeff Kirsher 
370034877a15SJose Abreu 	stmmac_global_err(priv);
37017ac6653aSJeff Kirsher }
37027ac6653aSJeff Kirsher 
37037ac6653aSJeff Kirsher /**
370401789349SJiri Pirko  *  stmmac_set_rx_mode - entry point for multicast addressing
37057ac6653aSJeff Kirsher  *  @dev : pointer to the device structure
37067ac6653aSJeff Kirsher  *  Description:
37077ac6653aSJeff Kirsher  *  This function is a driver entry point which gets called by the kernel
37087ac6653aSJeff Kirsher  *  whenever multicast addresses must be enabled/disabled.
37097ac6653aSJeff Kirsher  *  Return value:
37107ac6653aSJeff Kirsher  *  void.
37117ac6653aSJeff Kirsher  */
371201789349SJiri Pirko static void stmmac_set_rx_mode(struct net_device *dev)
37137ac6653aSJeff Kirsher {
37147ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
37157ac6653aSJeff Kirsher 
3716c10d4c82SJose Abreu 	stmmac_set_filter(priv, priv->hw, dev);
37177ac6653aSJeff Kirsher }
37187ac6653aSJeff Kirsher 
37197ac6653aSJeff Kirsher /**
37207ac6653aSJeff Kirsher  *  stmmac_change_mtu - entry point to change MTU size for the device.
37217ac6653aSJeff Kirsher  *  @dev : device pointer.
37227ac6653aSJeff Kirsher  *  @new_mtu : the new MTU size for the device.
37237ac6653aSJeff Kirsher  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
37247ac6653aSJeff Kirsher  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
37257ac6653aSJeff Kirsher  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
37267ac6653aSJeff Kirsher  *  Return value:
37277ac6653aSJeff Kirsher  *  0 on success and an appropriate (-)ve integer as defined in errno.h
37287ac6653aSJeff Kirsher  *  file on failure.
37297ac6653aSJeff Kirsher  */
37307ac6653aSJeff Kirsher static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
37317ac6653aSJeff Kirsher {
373238ddc59dSLABBE Corentin 	struct stmmac_priv *priv = netdev_priv(dev);
373338ddc59dSLABBE Corentin 
37347ac6653aSJeff Kirsher 	if (netif_running(dev)) {
373538ddc59dSLABBE Corentin 		netdev_err(priv->dev, "must be stopped to change its MTU\n");
37367ac6653aSJeff Kirsher 		return -EBUSY;
37377ac6653aSJeff Kirsher 	}
37387ac6653aSJeff Kirsher 
37397ac6653aSJeff Kirsher 	dev->mtu = new_mtu;
3740f748be53SAlexandre TORGUE 
37417ac6653aSJeff Kirsher 	netdev_update_features(dev);
37427ac6653aSJeff Kirsher 
37437ac6653aSJeff Kirsher 	return 0;
37447ac6653aSJeff Kirsher }
37457ac6653aSJeff Kirsher 
3746c8f44affSMichał Mirosław static netdev_features_t stmmac_fix_features(struct net_device *dev,
3747c8f44affSMichał Mirosław 					     netdev_features_t features)
37487ac6653aSJeff Kirsher {
37497ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
37507ac6653aSJeff Kirsher 
375138912bdbSDeepak SIKRI 	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
37527ac6653aSJeff Kirsher 		features &= ~NETIF_F_RXCSUM;
3753d2afb5bdSGiuseppe CAVALLARO 
37547ac6653aSJeff Kirsher 	if (!priv->plat->tx_coe)
3755a188222bSTom Herbert 		features &= ~NETIF_F_CSUM_MASK;
37567ac6653aSJeff Kirsher 
37577ac6653aSJeff Kirsher 	/* Some GMAC devices have a bugged Jumbo frame support that
37587ac6653aSJeff Kirsher 	 * needs to have the Tx COE disabled for oversized frames
37597ac6653aSJeff Kirsher 	 * (due to limited buffer sizes). In this case we disable
3760ceb69499SGiuseppe CAVALLARO 	 * the TX csum insertion in the TDES and not use SF.
3761ceb69499SGiuseppe CAVALLARO 	 */
37627ac6653aSJeff Kirsher 	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
3763a188222bSTom Herbert 		features &= ~NETIF_F_CSUM_MASK;
37647ac6653aSJeff Kirsher 
3765f748be53SAlexandre TORGUE 	/* Disable tso if asked by ethtool */
3766f748be53SAlexandre TORGUE 	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
3767f748be53SAlexandre TORGUE 		if (features & NETIF_F_TSO)
3768f748be53SAlexandre TORGUE 			priv->tso = true;
3769f748be53SAlexandre TORGUE 		else
3770f748be53SAlexandre TORGUE 			priv->tso = false;
3771f748be53SAlexandre TORGUE 	}
3772f748be53SAlexandre TORGUE 
37737ac6653aSJeff Kirsher 	return features;
37747ac6653aSJeff Kirsher }
37757ac6653aSJeff Kirsher 
3776d2afb5bdSGiuseppe CAVALLARO static int stmmac_set_features(struct net_device *netdev,
3777d2afb5bdSGiuseppe CAVALLARO 			       netdev_features_t features)
3778d2afb5bdSGiuseppe CAVALLARO {
3779d2afb5bdSGiuseppe CAVALLARO 	struct stmmac_priv *priv = netdev_priv(netdev);
378067afd6d1SJose Abreu 	bool sph_en;
378167afd6d1SJose Abreu 	u32 chan;
3782d2afb5bdSGiuseppe CAVALLARO 
3783d2afb5bdSGiuseppe CAVALLARO 	/* Keep the COE Type in case of csum is supporting */
3784d2afb5bdSGiuseppe CAVALLARO 	if (features & NETIF_F_RXCSUM)
3785d2afb5bdSGiuseppe CAVALLARO 		priv->hw->rx_csum = priv->plat->rx_coe;
3786d2afb5bdSGiuseppe CAVALLARO 	else
3787d2afb5bdSGiuseppe CAVALLARO 		priv->hw->rx_csum = 0;
3788d2afb5bdSGiuseppe CAVALLARO 	/* No check needed because rx_coe has been set before and it will be
3789d2afb5bdSGiuseppe CAVALLARO 	 * fixed in case of issue.
3790d2afb5bdSGiuseppe CAVALLARO 	 */
3791c10d4c82SJose Abreu 	stmmac_rx_ipc(priv, priv->hw);
3792d2afb5bdSGiuseppe CAVALLARO 
379367afd6d1SJose Abreu 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
379467afd6d1SJose Abreu 	for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
379567afd6d1SJose Abreu 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
379667afd6d1SJose Abreu 
3797d2afb5bdSGiuseppe CAVALLARO 	return 0;
3798d2afb5bdSGiuseppe CAVALLARO }
3799d2afb5bdSGiuseppe CAVALLARO 
380032ceabcaSGiuseppe CAVALLARO /**
380132ceabcaSGiuseppe CAVALLARO  *  stmmac_interrupt - main ISR
380232ceabcaSGiuseppe CAVALLARO  *  @irq: interrupt number.
380332ceabcaSGiuseppe CAVALLARO  *  @dev_id: to pass the net device pointer.
380432ceabcaSGiuseppe CAVALLARO  *  Description: this is the main driver interrupt service routine.
3805732fdf0eSGiuseppe CAVALLARO  *  It can call:
3806732fdf0eSGiuseppe CAVALLARO  *  o DMA service routine (to manage incoming frame reception and transmission
3807732fdf0eSGiuseppe CAVALLARO  *    status)
3808732fdf0eSGiuseppe CAVALLARO  *  o Core interrupts to manage: remote wake-up, management counter, LPI
380932ceabcaSGiuseppe CAVALLARO  *    interrupts.
381032ceabcaSGiuseppe CAVALLARO  */
38117ac6653aSJeff Kirsher static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
38127ac6653aSJeff Kirsher {
38137ac6653aSJeff Kirsher 	struct net_device *dev = (struct net_device *)dev_id;
38147ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
38157bac4e1eSJoao Pinto 	u32 rx_cnt = priv->plat->rx_queues_to_use;
38167bac4e1eSJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
38177bac4e1eSJoao Pinto 	u32 queues_count;
38187bac4e1eSJoao Pinto 	u32 queue;
38197d9e6c5aSJose Abreu 	bool xmac;
38207bac4e1eSJoao Pinto 
38217d9e6c5aSJose Abreu 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
38227bac4e1eSJoao Pinto 	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
38237ac6653aSJeff Kirsher 
382489f7f2cfSSrinivas Kandagatla 	if (priv->irq_wake)
382589f7f2cfSSrinivas Kandagatla 		pm_wakeup_event(priv->device, 0);
382689f7f2cfSSrinivas Kandagatla 
38277ac6653aSJeff Kirsher 	if (unlikely(!dev)) {
382838ddc59dSLABBE Corentin 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
38297ac6653aSJeff Kirsher 		return IRQ_NONE;
38307ac6653aSJeff Kirsher 	}
38317ac6653aSJeff Kirsher 
383234877a15SJose Abreu 	/* Check if adapter is up */
383334877a15SJose Abreu 	if (test_bit(STMMAC_DOWN, &priv->state))
383434877a15SJose Abreu 		return IRQ_HANDLED;
38358bf993a5SJose Abreu 	/* Check if a fatal error happened */
38368bf993a5SJose Abreu 	if (stmmac_safety_feat_interrupt(priv))
38378bf993a5SJose Abreu 		return IRQ_HANDLED;
383834877a15SJose Abreu 
38397ac6653aSJeff Kirsher 	/* To handle GMAC own interrupts */
38407d9e6c5aSJose Abreu 	if ((priv->plat->has_gmac) || xmac) {
3841c10d4c82SJose Abreu 		int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
384261fac60aSJose Abreu 		int mtl_status;
38438f71a88dSJoao Pinto 
3844d765955dSGiuseppe CAVALLARO 		if (unlikely(status)) {
3845d765955dSGiuseppe CAVALLARO 			/* For LPI we need to save the tx status */
38460982a0f6SGiuseppe CAVALLARO 			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
3847d765955dSGiuseppe CAVALLARO 				priv->tx_path_in_lpi_mode = true;
38480982a0f6SGiuseppe CAVALLARO 			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
3849d765955dSGiuseppe CAVALLARO 				priv->tx_path_in_lpi_mode = false;
38507bac4e1eSJoao Pinto 		}
38517bac4e1eSJoao Pinto 
38527bac4e1eSJoao Pinto 		for (queue = 0; queue < queues_count; queue++) {
385361fac60aSJose Abreu 			struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
385454139cf3SJoao Pinto 
385561fac60aSJose Abreu 			mtl_status = stmmac_host_mtl_irq_status(priv, priv->hw,
385661fac60aSJose Abreu 								queue);
385761fac60aSJose Abreu 			if (mtl_status != -EINVAL)
385861fac60aSJose Abreu 				status |= mtl_status;
38597bac4e1eSJoao Pinto 
3860a4e887faSJose Abreu 			if (status & CORE_IRQ_MTL_RX_OVERFLOW)
386161fac60aSJose Abreu 				stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
386254139cf3SJoao Pinto 						       rx_q->rx_tail_addr,
38637bac4e1eSJoao Pinto 						       queue);
38647bac4e1eSJoao Pinto 		}
386570523e63SGiuseppe CAVALLARO 
386670523e63SGiuseppe CAVALLARO 		/* PCS link status */
38673fe5cadbSGiuseppe CAVALLARO 		if (priv->hw->pcs) {
386870523e63SGiuseppe CAVALLARO 			if (priv->xstats.pcs_link)
386970523e63SGiuseppe CAVALLARO 				netif_carrier_on(dev);
387070523e63SGiuseppe CAVALLARO 			else
387170523e63SGiuseppe CAVALLARO 				netif_carrier_off(dev);
387270523e63SGiuseppe CAVALLARO 		}
3873d765955dSGiuseppe CAVALLARO 	}
3874d765955dSGiuseppe CAVALLARO 
3875d765955dSGiuseppe CAVALLARO 	/* To handle DMA interrupts */
38767ac6653aSJeff Kirsher 	stmmac_dma_interrupt(priv);
38777ac6653aSJeff Kirsher 
38787ac6653aSJeff Kirsher 	return IRQ_HANDLED;
38797ac6653aSJeff Kirsher }
38807ac6653aSJeff Kirsher 
38817ac6653aSJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER
38827ac6653aSJeff Kirsher /* Polling receive - used by NETCONSOLE and other diagnostic tools
3883ceb69499SGiuseppe CAVALLARO  * to allow network I/O with interrupts disabled.
3884ceb69499SGiuseppe CAVALLARO  */
38857ac6653aSJeff Kirsher static void stmmac_poll_controller(struct net_device *dev)
38867ac6653aSJeff Kirsher {
38877ac6653aSJeff Kirsher 	disable_irq(dev->irq);
38887ac6653aSJeff Kirsher 	stmmac_interrupt(dev->irq, dev);
38897ac6653aSJeff Kirsher 	enable_irq(dev->irq);
38907ac6653aSJeff Kirsher }
38917ac6653aSJeff Kirsher #endif
38927ac6653aSJeff Kirsher 
38937ac6653aSJeff Kirsher /**
38947ac6653aSJeff Kirsher  *  stmmac_ioctl - Entry point for the Ioctl
38957ac6653aSJeff Kirsher  *  @dev: Device pointer.
38967ac6653aSJeff Kirsher  *  @rq: An IOCTL specefic structure, that can contain a pointer to
38977ac6653aSJeff Kirsher  *  a proprietary structure used to pass information to the driver.
38987ac6653aSJeff Kirsher  *  @cmd: IOCTL command
38997ac6653aSJeff Kirsher  *  Description:
390032ceabcaSGiuseppe CAVALLARO  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
39017ac6653aSJeff Kirsher  */
39027ac6653aSJeff Kirsher static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
39037ac6653aSJeff Kirsher {
390474371272SJose Abreu 	struct stmmac_priv *priv = netdev_priv (dev);
3905891434b1SRayagond Kokatanur 	int ret = -EOPNOTSUPP;
39067ac6653aSJeff Kirsher 
39077ac6653aSJeff Kirsher 	if (!netif_running(dev))
39087ac6653aSJeff Kirsher 		return -EINVAL;
39097ac6653aSJeff Kirsher 
3910891434b1SRayagond Kokatanur 	switch (cmd) {
3911891434b1SRayagond Kokatanur 	case SIOCGMIIPHY:
3912891434b1SRayagond Kokatanur 	case SIOCGMIIREG:
3913891434b1SRayagond Kokatanur 	case SIOCSMIIREG:
391474371272SJose Abreu 		ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
3915891434b1SRayagond Kokatanur 		break;
3916891434b1SRayagond Kokatanur 	case SIOCSHWTSTAMP:
3917d6228b7cSArtem Panfilov 		ret = stmmac_hwtstamp_set(dev, rq);
3918d6228b7cSArtem Panfilov 		break;
3919d6228b7cSArtem Panfilov 	case SIOCGHWTSTAMP:
3920d6228b7cSArtem Panfilov 		ret = stmmac_hwtstamp_get(dev, rq);
3921891434b1SRayagond Kokatanur 		break;
3922891434b1SRayagond Kokatanur 	default:
3923891434b1SRayagond Kokatanur 		break;
3924891434b1SRayagond Kokatanur 	}
39257ac6653aSJeff Kirsher 
39267ac6653aSJeff Kirsher 	return ret;
39277ac6653aSJeff Kirsher }
39287ac6653aSJeff Kirsher 
39294dbbe8ddSJose Abreu static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
39304dbbe8ddSJose Abreu 				    void *cb_priv)
39314dbbe8ddSJose Abreu {
39324dbbe8ddSJose Abreu 	struct stmmac_priv *priv = cb_priv;
39334dbbe8ddSJose Abreu 	int ret = -EOPNOTSUPP;
39344dbbe8ddSJose Abreu 
3935425eabddSJose Abreu 	if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
3936425eabddSJose Abreu 		return ret;
3937425eabddSJose Abreu 
39384dbbe8ddSJose Abreu 	stmmac_disable_all_queues(priv);
39394dbbe8ddSJose Abreu 
39404dbbe8ddSJose Abreu 	switch (type) {
39414dbbe8ddSJose Abreu 	case TC_SETUP_CLSU32:
39424dbbe8ddSJose Abreu 		ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
39434dbbe8ddSJose Abreu 		break;
3944425eabddSJose Abreu 	case TC_SETUP_CLSFLOWER:
3945425eabddSJose Abreu 		ret = stmmac_tc_setup_cls(priv, priv, type_data);
3946425eabddSJose Abreu 		break;
39474dbbe8ddSJose Abreu 	default:
39484dbbe8ddSJose Abreu 		break;
39494dbbe8ddSJose Abreu 	}
39504dbbe8ddSJose Abreu 
39514dbbe8ddSJose Abreu 	stmmac_enable_all_queues(priv);
39524dbbe8ddSJose Abreu 	return ret;
39534dbbe8ddSJose Abreu }
39544dbbe8ddSJose Abreu 
3955955bcb6eSPablo Neira Ayuso static LIST_HEAD(stmmac_block_cb_list);
3956955bcb6eSPablo Neira Ayuso 
39574dbbe8ddSJose Abreu static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
39584dbbe8ddSJose Abreu 			   void *type_data)
39594dbbe8ddSJose Abreu {
39604dbbe8ddSJose Abreu 	struct stmmac_priv *priv = netdev_priv(ndev);
39614dbbe8ddSJose Abreu 
39624dbbe8ddSJose Abreu 	switch (type) {
39634dbbe8ddSJose Abreu 	case TC_SETUP_BLOCK:
3964955bcb6eSPablo Neira Ayuso 		return flow_block_cb_setup_simple(type_data,
3965955bcb6eSPablo Neira Ayuso 						  &stmmac_block_cb_list,
39664e95bc26SPablo Neira Ayuso 						  stmmac_setup_tc_block_cb,
39674e95bc26SPablo Neira Ayuso 						  priv, priv, true);
39681f705bc6SJose Abreu 	case TC_SETUP_QDISC_CBS:
39691f705bc6SJose Abreu 		return stmmac_tc_setup_cbs(priv, priv, type_data);
39704dbbe8ddSJose Abreu 	default:
39714dbbe8ddSJose Abreu 		return -EOPNOTSUPP;
39724dbbe8ddSJose Abreu 	}
39734dbbe8ddSJose Abreu }
39744dbbe8ddSJose Abreu 
39754993e5b3SJose Abreu static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
39764993e5b3SJose Abreu 			       struct net_device *sb_dev)
39774993e5b3SJose Abreu {
39784993e5b3SJose Abreu 	if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
39794993e5b3SJose Abreu 		/*
39804993e5b3SJose Abreu 		 * There is no way to determine the number of TSO
39814993e5b3SJose Abreu 		 * capable Queues. Let's use always the Queue 0
39824993e5b3SJose Abreu 		 * because if TSO is supported then at least this
39834993e5b3SJose Abreu 		 * one will be capable.
39844993e5b3SJose Abreu 		 */
39854993e5b3SJose Abreu 		return 0;
39864993e5b3SJose Abreu 	}
39874993e5b3SJose Abreu 
39884993e5b3SJose Abreu 	return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
39894993e5b3SJose Abreu }
39904993e5b3SJose Abreu 
3991a830405eSBhadram Varka static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
3992a830405eSBhadram Varka {
3993a830405eSBhadram Varka 	struct stmmac_priv *priv = netdev_priv(ndev);
3994a830405eSBhadram Varka 	int ret = 0;
3995a830405eSBhadram Varka 
3996a830405eSBhadram Varka 	ret = eth_mac_addr(ndev, addr);
3997a830405eSBhadram Varka 	if (ret)
3998a830405eSBhadram Varka 		return ret;
3999a830405eSBhadram Varka 
4000c10d4c82SJose Abreu 	stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
4001a830405eSBhadram Varka 
4002a830405eSBhadram Varka 	return ret;
4003a830405eSBhadram Varka }
4004a830405eSBhadram Varka 
400550fb4f74SGiuseppe CAVALLARO #ifdef CONFIG_DEBUG_FS
40067ac29055SGiuseppe CAVALLARO static struct dentry *stmmac_fs_dir;
40077ac29055SGiuseppe CAVALLARO 
4008c24602efSGiuseppe CAVALLARO static void sysfs_display_ring(void *head, int size, int extend_desc,
4009c24602efSGiuseppe CAVALLARO 			       struct seq_file *seq)
40107ac29055SGiuseppe CAVALLARO {
40117ac29055SGiuseppe CAVALLARO 	int i;
4012c24602efSGiuseppe CAVALLARO 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
4013c24602efSGiuseppe CAVALLARO 	struct dma_desc *p = (struct dma_desc *)head;
40147ac29055SGiuseppe CAVALLARO 
4015c24602efSGiuseppe CAVALLARO 	for (i = 0; i < size; i++) {
4016c24602efSGiuseppe CAVALLARO 		if (extend_desc) {
4017c24602efSGiuseppe CAVALLARO 			seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
4018c24602efSGiuseppe CAVALLARO 				   i, (unsigned int)virt_to_phys(ep),
4019f8be0d78SMichael Weiser 				   le32_to_cpu(ep->basic.des0),
4020f8be0d78SMichael Weiser 				   le32_to_cpu(ep->basic.des1),
4021f8be0d78SMichael Weiser 				   le32_to_cpu(ep->basic.des2),
4022f8be0d78SMichael Weiser 				   le32_to_cpu(ep->basic.des3));
4023c24602efSGiuseppe CAVALLARO 			ep++;
4024c24602efSGiuseppe CAVALLARO 		} else {
4025c24602efSGiuseppe CAVALLARO 			seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
402666c25f6eSNiklas Cassel 				   i, (unsigned int)virt_to_phys(p),
4027f8be0d78SMichael Weiser 				   le32_to_cpu(p->des0), le32_to_cpu(p->des1),
4028f8be0d78SMichael Weiser 				   le32_to_cpu(p->des2), le32_to_cpu(p->des3));
4029c24602efSGiuseppe CAVALLARO 			p++;
4030c24602efSGiuseppe CAVALLARO 		}
40317ac29055SGiuseppe CAVALLARO 		seq_printf(seq, "\n");
40327ac29055SGiuseppe CAVALLARO 	}
4033c24602efSGiuseppe CAVALLARO }
40347ac29055SGiuseppe CAVALLARO 
4035fb0d9c63SYangtao Li static int stmmac_rings_status_show(struct seq_file *seq, void *v)
4036c24602efSGiuseppe CAVALLARO {
4037c24602efSGiuseppe CAVALLARO 	struct net_device *dev = seq->private;
4038c24602efSGiuseppe CAVALLARO 	struct stmmac_priv *priv = netdev_priv(dev);
403954139cf3SJoao Pinto 	u32 rx_count = priv->plat->rx_queues_to_use;
4040ce736788SJoao Pinto 	u32 tx_count = priv->plat->tx_queues_to_use;
404154139cf3SJoao Pinto 	u32 queue;
404254139cf3SJoao Pinto 
40435f2b8b62SThierry Reding 	if ((dev->flags & IFF_UP) == 0)
40445f2b8b62SThierry Reding 		return 0;
40455f2b8b62SThierry Reding 
404654139cf3SJoao Pinto 	for (queue = 0; queue < rx_count; queue++) {
404754139cf3SJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
404854139cf3SJoao Pinto 
404954139cf3SJoao Pinto 		seq_printf(seq, "RX Queue %d:\n", queue);
40507ac29055SGiuseppe CAVALLARO 
4051c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc) {
405254139cf3SJoao Pinto 			seq_printf(seq, "Extended descriptor ring:\n");
405354139cf3SJoao Pinto 			sysfs_display_ring((void *)rx_q->dma_erx,
405454139cf3SJoao Pinto 					   DMA_RX_SIZE, 1, seq);
405554139cf3SJoao Pinto 		} else {
405654139cf3SJoao Pinto 			seq_printf(seq, "Descriptor ring:\n");
405754139cf3SJoao Pinto 			sysfs_display_ring((void *)rx_q->dma_rx,
405854139cf3SJoao Pinto 					   DMA_RX_SIZE, 0, seq);
405954139cf3SJoao Pinto 		}
406054139cf3SJoao Pinto 	}
406154139cf3SJoao Pinto 
4062ce736788SJoao Pinto 	for (queue = 0; queue < tx_count; queue++) {
4063ce736788SJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
4064ce736788SJoao Pinto 
4065ce736788SJoao Pinto 		seq_printf(seq, "TX Queue %d:\n", queue);
4066ce736788SJoao Pinto 
406754139cf3SJoao Pinto 		if (priv->extend_desc) {
4068ce736788SJoao Pinto 			seq_printf(seq, "Extended descriptor ring:\n");
4069ce736788SJoao Pinto 			sysfs_display_ring((void *)tx_q->dma_etx,
4070ce736788SJoao Pinto 					   DMA_TX_SIZE, 1, seq);
4071c24602efSGiuseppe CAVALLARO 		} else {
4072ce736788SJoao Pinto 			seq_printf(seq, "Descriptor ring:\n");
4073ce736788SJoao Pinto 			sysfs_display_ring((void *)tx_q->dma_tx,
4074ce736788SJoao Pinto 					   DMA_TX_SIZE, 0, seq);
4075ce736788SJoao Pinto 		}
40767ac29055SGiuseppe CAVALLARO 	}
40777ac29055SGiuseppe CAVALLARO 
40787ac29055SGiuseppe CAVALLARO 	return 0;
40797ac29055SGiuseppe CAVALLARO }
4080fb0d9c63SYangtao Li DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
40817ac29055SGiuseppe CAVALLARO 
4082fb0d9c63SYangtao Li static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
4083e7434821SGiuseppe CAVALLARO {
4084e7434821SGiuseppe CAVALLARO 	struct net_device *dev = seq->private;
4085e7434821SGiuseppe CAVALLARO 	struct stmmac_priv *priv = netdev_priv(dev);
4086e7434821SGiuseppe CAVALLARO 
408719e30c14SGiuseppe CAVALLARO 	if (!priv->hw_cap_support) {
4088e7434821SGiuseppe CAVALLARO 		seq_printf(seq, "DMA HW features not supported\n");
4089e7434821SGiuseppe CAVALLARO 		return 0;
4090e7434821SGiuseppe CAVALLARO 	}
4091e7434821SGiuseppe CAVALLARO 
4092e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "==============================\n");
4093e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tDMA HW features\n");
4094e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "==============================\n");
4095e7434821SGiuseppe CAVALLARO 
409622d3efe5SPavel Machek 	seq_printf(seq, "\t10/100 Mbps: %s\n",
4097e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
409822d3efe5SPavel Machek 	seq_printf(seq, "\t1000 Mbps: %s\n",
4099e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
410022d3efe5SPavel Machek 	seq_printf(seq, "\tHalf duplex: %s\n",
4101e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.half_duplex) ? "Y" : "N");
4102e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tHash Filter: %s\n",
4103e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.hash_filter) ? "Y" : "N");
4104e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tMultiple MAC address registers: %s\n",
4105e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.multi_addr) ? "Y" : "N");
41068d45e42bSLABBE Corentin 	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
4107e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.pcs) ? "Y" : "N");
4108e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
4109e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
4110e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tPMT Remote wake up: %s\n",
4111e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
4112e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tPMT Magic Frame: %s\n",
4113e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
4114e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tRMON module: %s\n",
4115e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.rmon) ? "Y" : "N");
4116e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
4117e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.time_stamp) ? "Y" : "N");
4118e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
4119e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
412022d3efe5SPavel Machek 	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
4121e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.eee) ? "Y" : "N");
4122e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
4123e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
4124e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.tx_coe) ? "Y" : "N");
4125f748be53SAlexandre TORGUE 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
4126f748be53SAlexandre TORGUE 		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
4127f748be53SAlexandre TORGUE 			   (priv->dma_cap.rx_coe) ? "Y" : "N");
4128f748be53SAlexandre TORGUE 	} else {
4129e7434821SGiuseppe CAVALLARO 		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
4130e7434821SGiuseppe CAVALLARO 			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
4131e7434821SGiuseppe CAVALLARO 		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
4132e7434821SGiuseppe CAVALLARO 			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
4133f748be53SAlexandre TORGUE 	}
4134e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
4135e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
4136e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
4137e7434821SGiuseppe CAVALLARO 		   priv->dma_cap.number_rx_channel);
4138e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
4139e7434821SGiuseppe CAVALLARO 		   priv->dma_cap.number_tx_channel);
4140e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tEnhanced descriptors: %s\n",
4141e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.enh_desc) ? "Y" : "N");
4142e7434821SGiuseppe CAVALLARO 
4143e7434821SGiuseppe CAVALLARO 	return 0;
4144e7434821SGiuseppe CAVALLARO }
4145fb0d9c63SYangtao Li DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
4146e7434821SGiuseppe CAVALLARO 
41478d72ab11SGreg Kroah-Hartman static void stmmac_init_fs(struct net_device *dev)
41487ac29055SGiuseppe CAVALLARO {
4149466c5ac8SMathieu Olivari 	struct stmmac_priv *priv = netdev_priv(dev);
41507ac29055SGiuseppe CAVALLARO 
4151466c5ac8SMathieu Olivari 	/* Create per netdev entries */
4152466c5ac8SMathieu Olivari 	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
4153466c5ac8SMathieu Olivari 
41547ac29055SGiuseppe CAVALLARO 	/* Entry to report DMA RX/TX rings */
41558d72ab11SGreg Kroah-Hartman 	debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
41567ac29055SGiuseppe CAVALLARO 			    &stmmac_rings_status_fops);
41577ac29055SGiuseppe CAVALLARO 
4158e7434821SGiuseppe CAVALLARO 	/* Entry to report the DMA HW features */
41598d72ab11SGreg Kroah-Hartman 	debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
41608d72ab11SGreg Kroah-Hartman 			    &stmmac_dma_cap_fops);
41617ac29055SGiuseppe CAVALLARO }
41627ac29055SGiuseppe CAVALLARO 
4163466c5ac8SMathieu Olivari static void stmmac_exit_fs(struct net_device *dev)
41647ac29055SGiuseppe CAVALLARO {
4165466c5ac8SMathieu Olivari 	struct stmmac_priv *priv = netdev_priv(dev);
4166466c5ac8SMathieu Olivari 
4167466c5ac8SMathieu Olivari 	debugfs_remove_recursive(priv->dbgfs_dir);
41687ac29055SGiuseppe CAVALLARO }
416950fb4f74SGiuseppe CAVALLARO #endif /* CONFIG_DEBUG_FS */
41707ac29055SGiuseppe CAVALLARO 
41713cd1cfcbSJose Abreu static u32 stmmac_vid_crc32_le(__le16 vid_le)
41723cd1cfcbSJose Abreu {
41733cd1cfcbSJose Abreu 	unsigned char *data = (unsigned char *)&vid_le;
41743cd1cfcbSJose Abreu 	unsigned char data_byte = 0;
41753cd1cfcbSJose Abreu 	u32 crc = ~0x0;
41763cd1cfcbSJose Abreu 	u32 temp = 0;
41773cd1cfcbSJose Abreu 	int i, bits;
41783cd1cfcbSJose Abreu 
41793cd1cfcbSJose Abreu 	bits = get_bitmask_order(VLAN_VID_MASK);
41803cd1cfcbSJose Abreu 	for (i = 0; i < bits; i++) {
41813cd1cfcbSJose Abreu 		if ((i % 8) == 0)
41823cd1cfcbSJose Abreu 			data_byte = data[i / 8];
41833cd1cfcbSJose Abreu 
41843cd1cfcbSJose Abreu 		temp = ((crc & 1) ^ data_byte) & 1;
41853cd1cfcbSJose Abreu 		crc >>= 1;
41863cd1cfcbSJose Abreu 		data_byte >>= 1;
41873cd1cfcbSJose Abreu 
41883cd1cfcbSJose Abreu 		if (temp)
41893cd1cfcbSJose Abreu 			crc ^= 0xedb88320;
41903cd1cfcbSJose Abreu 	}
41913cd1cfcbSJose Abreu 
41923cd1cfcbSJose Abreu 	return crc;
41933cd1cfcbSJose Abreu }
41943cd1cfcbSJose Abreu 
41953cd1cfcbSJose Abreu static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
41963cd1cfcbSJose Abreu {
41973cd1cfcbSJose Abreu 	u32 crc, hash = 0;
41983cd1cfcbSJose Abreu 	u16 vid;
41993cd1cfcbSJose Abreu 
42003cd1cfcbSJose Abreu 	for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
42013cd1cfcbSJose Abreu 		__le16 vid_le = cpu_to_le16(vid);
42023cd1cfcbSJose Abreu 		crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
42033cd1cfcbSJose Abreu 		hash |= (1 << crc);
42043cd1cfcbSJose Abreu 	}
42053cd1cfcbSJose Abreu 
42063cd1cfcbSJose Abreu 	return stmmac_update_vlan_hash(priv, priv->hw, hash, is_double);
42073cd1cfcbSJose Abreu }
42083cd1cfcbSJose Abreu 
42093cd1cfcbSJose Abreu static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
42103cd1cfcbSJose Abreu {
42113cd1cfcbSJose Abreu 	struct stmmac_priv *priv = netdev_priv(ndev);
42123cd1cfcbSJose Abreu 	bool is_double = false;
42133cd1cfcbSJose Abreu 	int ret;
42143cd1cfcbSJose Abreu 
42153cd1cfcbSJose Abreu 	if (!priv->dma_cap.vlhash)
42163cd1cfcbSJose Abreu 		return -EOPNOTSUPP;
42173cd1cfcbSJose Abreu 	if (be16_to_cpu(proto) == ETH_P_8021AD)
42183cd1cfcbSJose Abreu 		is_double = true;
42193cd1cfcbSJose Abreu 
42203cd1cfcbSJose Abreu 	set_bit(vid, priv->active_vlans);
42213cd1cfcbSJose Abreu 	ret = stmmac_vlan_update(priv, is_double);
42223cd1cfcbSJose Abreu 	if (ret) {
42233cd1cfcbSJose Abreu 		clear_bit(vid, priv->active_vlans);
42243cd1cfcbSJose Abreu 		return ret;
42253cd1cfcbSJose Abreu 	}
42263cd1cfcbSJose Abreu 
42273cd1cfcbSJose Abreu 	return ret;
42283cd1cfcbSJose Abreu }
42293cd1cfcbSJose Abreu 
42303cd1cfcbSJose Abreu static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
42313cd1cfcbSJose Abreu {
42323cd1cfcbSJose Abreu 	struct stmmac_priv *priv = netdev_priv(ndev);
42333cd1cfcbSJose Abreu 	bool is_double = false;
42343cd1cfcbSJose Abreu 
42353cd1cfcbSJose Abreu 	if (!priv->dma_cap.vlhash)
42363cd1cfcbSJose Abreu 		return -EOPNOTSUPP;
42373cd1cfcbSJose Abreu 	if (be16_to_cpu(proto) == ETH_P_8021AD)
42383cd1cfcbSJose Abreu 		is_double = true;
42393cd1cfcbSJose Abreu 
42403cd1cfcbSJose Abreu 	clear_bit(vid, priv->active_vlans);
42413cd1cfcbSJose Abreu 	return stmmac_vlan_update(priv, is_double);
42423cd1cfcbSJose Abreu }
42433cd1cfcbSJose Abreu 
42447ac6653aSJeff Kirsher static const struct net_device_ops stmmac_netdev_ops = {
42457ac6653aSJeff Kirsher 	.ndo_open = stmmac_open,
42467ac6653aSJeff Kirsher 	.ndo_start_xmit = stmmac_xmit,
42477ac6653aSJeff Kirsher 	.ndo_stop = stmmac_release,
42487ac6653aSJeff Kirsher 	.ndo_change_mtu = stmmac_change_mtu,
42497ac6653aSJeff Kirsher 	.ndo_fix_features = stmmac_fix_features,
4250d2afb5bdSGiuseppe CAVALLARO 	.ndo_set_features = stmmac_set_features,
425101789349SJiri Pirko 	.ndo_set_rx_mode = stmmac_set_rx_mode,
42527ac6653aSJeff Kirsher 	.ndo_tx_timeout = stmmac_tx_timeout,
42537ac6653aSJeff Kirsher 	.ndo_do_ioctl = stmmac_ioctl,
42544dbbe8ddSJose Abreu 	.ndo_setup_tc = stmmac_setup_tc,
42554993e5b3SJose Abreu 	.ndo_select_queue = stmmac_select_queue,
42567ac6653aSJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER
42577ac6653aSJeff Kirsher 	.ndo_poll_controller = stmmac_poll_controller,
42587ac6653aSJeff Kirsher #endif
4259a830405eSBhadram Varka 	.ndo_set_mac_address = stmmac_set_mac_address,
42603cd1cfcbSJose Abreu 	.ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
42613cd1cfcbSJose Abreu 	.ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
42627ac6653aSJeff Kirsher };
42637ac6653aSJeff Kirsher 
426434877a15SJose Abreu static void stmmac_reset_subtask(struct stmmac_priv *priv)
426534877a15SJose Abreu {
426634877a15SJose Abreu 	if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
426734877a15SJose Abreu 		return;
426834877a15SJose Abreu 	if (test_bit(STMMAC_DOWN, &priv->state))
426934877a15SJose Abreu 		return;
427034877a15SJose Abreu 
427134877a15SJose Abreu 	netdev_err(priv->dev, "Reset adapter.\n");
427234877a15SJose Abreu 
427334877a15SJose Abreu 	rtnl_lock();
427434877a15SJose Abreu 	netif_trans_update(priv->dev);
427534877a15SJose Abreu 	while (test_and_set_bit(STMMAC_RESETING, &priv->state))
427634877a15SJose Abreu 		usleep_range(1000, 2000);
427734877a15SJose Abreu 
427834877a15SJose Abreu 	set_bit(STMMAC_DOWN, &priv->state);
427934877a15SJose Abreu 	dev_close(priv->dev);
428000f54e68SPetr Machata 	dev_open(priv->dev, NULL);
428134877a15SJose Abreu 	clear_bit(STMMAC_DOWN, &priv->state);
428234877a15SJose Abreu 	clear_bit(STMMAC_RESETING, &priv->state);
428334877a15SJose Abreu 	rtnl_unlock();
428434877a15SJose Abreu }
428534877a15SJose Abreu 
428634877a15SJose Abreu static void stmmac_service_task(struct work_struct *work)
428734877a15SJose Abreu {
428834877a15SJose Abreu 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
428934877a15SJose Abreu 			service_task);
429034877a15SJose Abreu 
429134877a15SJose Abreu 	stmmac_reset_subtask(priv);
429234877a15SJose Abreu 	clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
429334877a15SJose Abreu }
429434877a15SJose Abreu 
42957ac6653aSJeff Kirsher /**
4296cf3f047bSGiuseppe CAVALLARO  *  stmmac_hw_init - Init the MAC device
429732ceabcaSGiuseppe CAVALLARO  *  @priv: driver private structure
4298732fdf0eSGiuseppe CAVALLARO  *  Description: this function is to configure the MAC device according to
4299732fdf0eSGiuseppe CAVALLARO  *  some platform parameters or the HW capability register. It prepares the
4300732fdf0eSGiuseppe CAVALLARO  *  driver to use either ring or chain modes and to setup either enhanced or
4301732fdf0eSGiuseppe CAVALLARO  *  normal descriptors.
4302cf3f047bSGiuseppe CAVALLARO  */
4303cf3f047bSGiuseppe CAVALLARO static int stmmac_hw_init(struct stmmac_priv *priv)
4304cf3f047bSGiuseppe CAVALLARO {
43055f0456b4SJose Abreu 	int ret;
4306cf3f047bSGiuseppe CAVALLARO 
43079f93ac8dSLABBE Corentin 	/* dwmac-sun8i only work in chain mode */
43089f93ac8dSLABBE Corentin 	if (priv->plat->has_sun8i)
43099f93ac8dSLABBE Corentin 		chain_mode = 1;
43105f0456b4SJose Abreu 	priv->chain_mode = chain_mode;
43119f93ac8dSLABBE Corentin 
43125f0456b4SJose Abreu 	/* Initialize HW Interface */
43135f0456b4SJose Abreu 	ret = stmmac_hwif_init(priv);
43145f0456b4SJose Abreu 	if (ret)
43155f0456b4SJose Abreu 		return ret;
43164a7d666aSGiuseppe CAVALLARO 
4317cf3f047bSGiuseppe CAVALLARO 	/* Get the HW capability (new GMAC newer than 3.50a) */
4318cf3f047bSGiuseppe CAVALLARO 	priv->hw_cap_support = stmmac_get_hw_features(priv);
4319cf3f047bSGiuseppe CAVALLARO 	if (priv->hw_cap_support) {
432038ddc59dSLABBE Corentin 		dev_info(priv->device, "DMA HW capability register supported\n");
4321cf3f047bSGiuseppe CAVALLARO 
4322cf3f047bSGiuseppe CAVALLARO 		/* We can override some gmac/dma configuration fields: e.g.
4323cf3f047bSGiuseppe CAVALLARO 		 * enh_desc, tx_coe (e.g. that are passed through the
4324cf3f047bSGiuseppe CAVALLARO 		 * platform) with the values from the HW capability
4325cf3f047bSGiuseppe CAVALLARO 		 * register (if supported).
4326cf3f047bSGiuseppe CAVALLARO 		 */
4327cf3f047bSGiuseppe CAVALLARO 		priv->plat->enh_desc = priv->dma_cap.enh_desc;
4328cf3f047bSGiuseppe CAVALLARO 		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
43293fe5cadbSGiuseppe CAVALLARO 		priv->hw->pmt = priv->plat->pmt;
4330b8ef7020SBiao Huang 		if (priv->dma_cap.hash_tb_sz) {
4331b8ef7020SBiao Huang 			priv->hw->multicast_filter_bins =
4332b8ef7020SBiao Huang 					(BIT(priv->dma_cap.hash_tb_sz) << 5);
4333b8ef7020SBiao Huang 			priv->hw->mcast_bits_log2 =
4334b8ef7020SBiao Huang 					ilog2(priv->hw->multicast_filter_bins);
4335b8ef7020SBiao Huang 		}
433638912bdbSDeepak SIKRI 
4337a8df35d4SEzequiel Garcia 		/* TXCOE doesn't work in thresh DMA mode */
4338a8df35d4SEzequiel Garcia 		if (priv->plat->force_thresh_dma_mode)
4339a8df35d4SEzequiel Garcia 			priv->plat->tx_coe = 0;
4340a8df35d4SEzequiel Garcia 		else
434138912bdbSDeepak SIKRI 			priv->plat->tx_coe = priv->dma_cap.tx_coe;
4342a8df35d4SEzequiel Garcia 
4343f748be53SAlexandre TORGUE 		/* In case of GMAC4 rx_coe is from HW cap register. */
4344f748be53SAlexandre TORGUE 		priv->plat->rx_coe = priv->dma_cap.rx_coe;
434538912bdbSDeepak SIKRI 
434638912bdbSDeepak SIKRI 		if (priv->dma_cap.rx_coe_type2)
434738912bdbSDeepak SIKRI 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
434838912bdbSDeepak SIKRI 		else if (priv->dma_cap.rx_coe_type1)
434938912bdbSDeepak SIKRI 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
435038912bdbSDeepak SIKRI 
435138ddc59dSLABBE Corentin 	} else {
435238ddc59dSLABBE Corentin 		dev_info(priv->device, "No HW DMA feature register supported\n");
435338ddc59dSLABBE Corentin 	}
4354cf3f047bSGiuseppe CAVALLARO 
4355d2afb5bdSGiuseppe CAVALLARO 	if (priv->plat->rx_coe) {
4356d2afb5bdSGiuseppe CAVALLARO 		priv->hw->rx_csum = priv->plat->rx_coe;
435738ddc59dSLABBE Corentin 		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
4358f748be53SAlexandre TORGUE 		if (priv->synopsys_id < DWMAC_CORE_4_00)
435938ddc59dSLABBE Corentin 			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
4360d2afb5bdSGiuseppe CAVALLARO 	}
4361cf3f047bSGiuseppe CAVALLARO 	if (priv->plat->tx_coe)
436238ddc59dSLABBE Corentin 		dev_info(priv->device, "TX Checksum insertion supported\n");
4363cf3f047bSGiuseppe CAVALLARO 
4364cf3f047bSGiuseppe CAVALLARO 	if (priv->plat->pmt) {
436538ddc59dSLABBE Corentin 		dev_info(priv->device, "Wake-Up On Lan supported\n");
4366cf3f047bSGiuseppe CAVALLARO 		device_set_wakeup_capable(priv->device, 1);
4367cf3f047bSGiuseppe CAVALLARO 	}
4368cf3f047bSGiuseppe CAVALLARO 
4369f748be53SAlexandre TORGUE 	if (priv->dma_cap.tsoen)
437038ddc59dSLABBE Corentin 		dev_info(priv->device, "TSO supported\n");
4371f748be53SAlexandre TORGUE 
43727cfde0afSJose Abreu 	/* Run HW quirks, if any */
43737cfde0afSJose Abreu 	if (priv->hwif_quirks) {
43747cfde0afSJose Abreu 		ret = priv->hwif_quirks(priv);
43757cfde0afSJose Abreu 		if (ret)
43767cfde0afSJose Abreu 			return ret;
43777cfde0afSJose Abreu 	}
43787cfde0afSJose Abreu 
43793b509466SJose Abreu 	/* Rx Watchdog is available in the COREs newer than the 3.40.
43803b509466SJose Abreu 	 * In some case, for example on bugged HW this feature
43813b509466SJose Abreu 	 * has to be disable and this can be done by passing the
43823b509466SJose Abreu 	 * riwt_off field from the platform.
43833b509466SJose Abreu 	 */
43843b509466SJose Abreu 	if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
43853b509466SJose Abreu 	    (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
43863b509466SJose Abreu 		priv->use_riwt = 1;
43873b509466SJose Abreu 		dev_info(priv->device,
43883b509466SJose Abreu 			 "Enable RX Mitigation via HW Watchdog Timer\n");
43893b509466SJose Abreu 	}
43903b509466SJose Abreu 
4391c24602efSGiuseppe CAVALLARO 	return 0;
4392cf3f047bSGiuseppe CAVALLARO }
4393cf3f047bSGiuseppe CAVALLARO 
4394cf3f047bSGiuseppe CAVALLARO /**
4395bfab27a1SGiuseppe CAVALLARO  * stmmac_dvr_probe
4396bfab27a1SGiuseppe CAVALLARO  * @device: device pointer
4397ff3dd78cSGiuseppe CAVALLARO  * @plat_dat: platform data pointer
4398e56788cfSJoachim Eastwood  * @res: stmmac resource pointer
4399bfab27a1SGiuseppe CAVALLARO  * Description: this is the main probe function used to
4400bfab27a1SGiuseppe CAVALLARO  * call the alloc_etherdev, allocate the priv structure.
44019afec6efSAndy Shevchenko  * Return:
440215ffac73SJoachim Eastwood  * returns 0 on success, otherwise errno.
44037ac6653aSJeff Kirsher  */
440415ffac73SJoachim Eastwood int stmmac_dvr_probe(struct device *device,
4405cf3f047bSGiuseppe CAVALLARO 		     struct plat_stmmacenet_data *plat_dat,
4406e56788cfSJoachim Eastwood 		     struct stmmac_resources *res)
44077ac6653aSJeff Kirsher {
4408bfab27a1SGiuseppe CAVALLARO 	struct net_device *ndev = NULL;
4409bfab27a1SGiuseppe CAVALLARO 	struct stmmac_priv *priv;
441076067459SJose Abreu 	u32 queue, rxq, maxq;
441176067459SJose Abreu 	int i, ret = 0;
44127ac6653aSJeff Kirsher 
44139737070cSJisheng Zhang 	ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
44149737070cSJisheng Zhang 				       MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
441541de8d4cSJoe Perches 	if (!ndev)
441615ffac73SJoachim Eastwood 		return -ENOMEM;
44177ac6653aSJeff Kirsher 
4418bfab27a1SGiuseppe CAVALLARO 	SET_NETDEV_DEV(ndev, device);
44197ac6653aSJeff Kirsher 
4420bfab27a1SGiuseppe CAVALLARO 	priv = netdev_priv(ndev);
4421bfab27a1SGiuseppe CAVALLARO 	priv->device = device;
4422bfab27a1SGiuseppe CAVALLARO 	priv->dev = ndev;
4423bfab27a1SGiuseppe CAVALLARO 
4424bfab27a1SGiuseppe CAVALLARO 	stmmac_set_ethtool_ops(ndev);
4425cf3f047bSGiuseppe CAVALLARO 	priv->pause = pause;
4426cf3f047bSGiuseppe CAVALLARO 	priv->plat = plat_dat;
4427e56788cfSJoachim Eastwood 	priv->ioaddr = res->addr;
4428e56788cfSJoachim Eastwood 	priv->dev->base_addr = (unsigned long)res->addr;
4429e56788cfSJoachim Eastwood 
4430e56788cfSJoachim Eastwood 	priv->dev->irq = res->irq;
4431e56788cfSJoachim Eastwood 	priv->wol_irq = res->wol_irq;
4432e56788cfSJoachim Eastwood 	priv->lpi_irq = res->lpi_irq;
4433e56788cfSJoachim Eastwood 
4434a51645f7SPetr Štetiar 	if (!IS_ERR_OR_NULL(res->mac))
4435e56788cfSJoachim Eastwood 		memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
4436bfab27a1SGiuseppe CAVALLARO 
4437a7a62685SJoachim Eastwood 	dev_set_drvdata(device, priv->dev);
4438803f8fc4SJoachim Eastwood 
4439cf3f047bSGiuseppe CAVALLARO 	/* Verify driver arguments */
4440cf3f047bSGiuseppe CAVALLARO 	stmmac_verify_args();
4441cf3f047bSGiuseppe CAVALLARO 
444234877a15SJose Abreu 	/* Allocate workqueue */
444334877a15SJose Abreu 	priv->wq = create_singlethread_workqueue("stmmac_wq");
444434877a15SJose Abreu 	if (!priv->wq) {
444534877a15SJose Abreu 		dev_err(priv->device, "failed to create workqueue\n");
44469737070cSJisheng Zhang 		return -ENOMEM;
444734877a15SJose Abreu 	}
444834877a15SJose Abreu 
444934877a15SJose Abreu 	INIT_WORK(&priv->service_task, stmmac_service_task);
445034877a15SJose Abreu 
4451cf3f047bSGiuseppe CAVALLARO 	/* Override with kernel parameters if supplied XXX CRS XXX
4452ceb69499SGiuseppe CAVALLARO 	 * this needs to have multiple instances
4453ceb69499SGiuseppe CAVALLARO 	 */
4454cf3f047bSGiuseppe CAVALLARO 	if ((phyaddr >= 0) && (phyaddr <= 31))
4455cf3f047bSGiuseppe CAVALLARO 		priv->plat->phy_addr = phyaddr;
4456cf3f047bSGiuseppe CAVALLARO 
445790f522a2SEugeniy Paltsev 	if (priv->plat->stmmac_rst) {
445890f522a2SEugeniy Paltsev 		ret = reset_control_assert(priv->plat->stmmac_rst);
4459f573c0b9Sjpinto 		reset_control_deassert(priv->plat->stmmac_rst);
446090f522a2SEugeniy Paltsev 		/* Some reset controllers have only reset callback instead of
446190f522a2SEugeniy Paltsev 		 * assert + deassert callbacks pair.
446290f522a2SEugeniy Paltsev 		 */
446390f522a2SEugeniy Paltsev 		if (ret == -ENOTSUPP)
446490f522a2SEugeniy Paltsev 			reset_control_reset(priv->plat->stmmac_rst);
446590f522a2SEugeniy Paltsev 	}
4466c5e4ddbdSChen-Yu Tsai 
4467cf3f047bSGiuseppe CAVALLARO 	/* Init MAC and get the capabilities */
4468c24602efSGiuseppe CAVALLARO 	ret = stmmac_hw_init(priv);
4469c24602efSGiuseppe CAVALLARO 	if (ret)
447062866e98SChen-Yu Tsai 		goto error_hw_init;
4471cf3f047bSGiuseppe CAVALLARO 
4472b561af36SVinod Koul 	stmmac_check_ether_addr(priv);
4473b561af36SVinod Koul 
4474c22a3f48SJoao Pinto 	/* Configure real RX and TX queues */
4475c02b7a91SJoao Pinto 	netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
4476c02b7a91SJoao Pinto 	netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
4477c22a3f48SJoao Pinto 
4478cf3f047bSGiuseppe CAVALLARO 	ndev->netdev_ops = &stmmac_netdev_ops;
4479cf3f047bSGiuseppe CAVALLARO 
4480cf3f047bSGiuseppe CAVALLARO 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4481cf3f047bSGiuseppe CAVALLARO 			    NETIF_F_RXCSUM;
4482f748be53SAlexandre TORGUE 
44834dbbe8ddSJose Abreu 	ret = stmmac_tc_init(priv, priv);
44844dbbe8ddSJose Abreu 	if (!ret) {
44854dbbe8ddSJose Abreu 		ndev->hw_features |= NETIF_F_HW_TC;
44864dbbe8ddSJose Abreu 	}
44874dbbe8ddSJose Abreu 
4488f748be53SAlexandre TORGUE 	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
44899edfa7daSNiklas Cassel 		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
4490f748be53SAlexandre TORGUE 		priv->tso = true;
449138ddc59dSLABBE Corentin 		dev_info(priv->device, "TSO feature enabled\n");
4492f748be53SAlexandre TORGUE 	}
4493a993db88SJose Abreu 
449467afd6d1SJose Abreu 	if (priv->dma_cap.sphen) {
449567afd6d1SJose Abreu 		ndev->hw_features |= NETIF_F_GRO;
449667afd6d1SJose Abreu 		priv->sph = true;
449767afd6d1SJose Abreu 		dev_info(priv->device, "SPH feature enabled\n");
449867afd6d1SJose Abreu 	}
449967afd6d1SJose Abreu 
4500a993db88SJose Abreu 	if (priv->dma_cap.addr64) {
4501a993db88SJose Abreu 		ret = dma_set_mask_and_coherent(device,
4502a993db88SJose Abreu 				DMA_BIT_MASK(priv->dma_cap.addr64));
4503a993db88SJose Abreu 		if (!ret) {
4504a993db88SJose Abreu 			dev_info(priv->device, "Using %d bits DMA width\n",
4505a993db88SJose Abreu 				 priv->dma_cap.addr64);
4506a993db88SJose Abreu 		} else {
4507a993db88SJose Abreu 			ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
4508a993db88SJose Abreu 			if (ret) {
4509a993db88SJose Abreu 				dev_err(priv->device, "Failed to set DMA Mask\n");
4510a993db88SJose Abreu 				goto error_hw_init;
4511a993db88SJose Abreu 			}
4512a993db88SJose Abreu 
4513a993db88SJose Abreu 			priv->dma_cap.addr64 = 32;
4514a993db88SJose Abreu 		}
4515a993db88SJose Abreu 	}
4516a993db88SJose Abreu 
4517bfab27a1SGiuseppe CAVALLARO 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
4518bfab27a1SGiuseppe CAVALLARO 	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
45197ac6653aSJeff Kirsher #ifdef STMMAC_VLAN_TAG_USED
45207ac6653aSJeff Kirsher 	/* Both mac100 and gmac support receive VLAN tag detection */
4521ab188e8fSElad Nachman 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
45223cd1cfcbSJose Abreu 	if (priv->dma_cap.vlhash) {
45233cd1cfcbSJose Abreu 		ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
45243cd1cfcbSJose Abreu 		ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
45253cd1cfcbSJose Abreu 	}
452630d93227SJose Abreu 	if (priv->dma_cap.vlins) {
452730d93227SJose Abreu 		ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
452830d93227SJose Abreu 		if (priv->dma_cap.dvlan)
452930d93227SJose Abreu 			ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
453030d93227SJose Abreu 	}
45317ac6653aSJeff Kirsher #endif
45327ac6653aSJeff Kirsher 	priv->msg_enable = netif_msg_init(debug, default_msg_level);
45337ac6653aSJeff Kirsher 
453476067459SJose Abreu 	/* Initialize RSS */
453576067459SJose Abreu 	rxq = priv->plat->rx_queues_to_use;
453676067459SJose Abreu 	netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
453776067459SJose Abreu 	for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
453876067459SJose Abreu 		priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
453976067459SJose Abreu 
454076067459SJose Abreu 	if (priv->dma_cap.rssen && priv->plat->rss_en)
454176067459SJose Abreu 		ndev->features |= NETIF_F_RXHASH;
454276067459SJose Abreu 
454344770e11SJarod Wilson 	/* MTU range: 46 - hw-specific max */
454444770e11SJarod Wilson 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
454556bcd591SJose Abreu 	if (priv->plat->has_xgmac)
45467d9e6c5aSJose Abreu 		ndev->max_mtu = XGMAC_JUMBO_LEN;
454756bcd591SJose Abreu 	else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
454856bcd591SJose Abreu 		ndev->max_mtu = JUMBO_LEN;
454944770e11SJarod Wilson 	else
455044770e11SJarod Wilson 		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
4551a2cd64f3SKweh, Hock Leong 	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
4552a2cd64f3SKweh, Hock Leong 	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
4553a2cd64f3SKweh, Hock Leong 	 */
4554a2cd64f3SKweh, Hock Leong 	if ((priv->plat->maxmtu < ndev->max_mtu) &&
4555a2cd64f3SKweh, Hock Leong 	    (priv->plat->maxmtu >= ndev->min_mtu))
455644770e11SJarod Wilson 		ndev->max_mtu = priv->plat->maxmtu;
4557a2cd64f3SKweh, Hock Leong 	else if (priv->plat->maxmtu < ndev->min_mtu)
4558b618ab45SHeiner Kallweit 		dev_warn(priv->device,
4559a2cd64f3SKweh, Hock Leong 			 "%s: warning: maxmtu having invalid value (%d)\n",
4560a2cd64f3SKweh, Hock Leong 			 __func__, priv->plat->maxmtu);
456144770e11SJarod Wilson 
45627ac6653aSJeff Kirsher 	if (flow_ctrl)
45637ac6653aSJeff Kirsher 		priv->flow_ctrl = FLOW_AUTO;	/* RX/TX pause on */
45647ac6653aSJeff Kirsher 
45658fce3331SJose Abreu 	/* Setup channels NAPI */
45668fce3331SJose Abreu 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
4567c22a3f48SJoao Pinto 
45688fce3331SJose Abreu 	for (queue = 0; queue < maxq; queue++) {
45698fce3331SJose Abreu 		struct stmmac_channel *ch = &priv->channel[queue];
45708fce3331SJose Abreu 
45718fce3331SJose Abreu 		ch->priv_data = priv;
45728fce3331SJose Abreu 		ch->index = queue;
45738fce3331SJose Abreu 
45744ccb4585SJose Abreu 		if (queue < priv->plat->rx_queues_to_use) {
45754ccb4585SJose Abreu 			netif_napi_add(ndev, &ch->rx_napi, stmmac_napi_poll_rx,
45768fce3331SJose Abreu 				       NAPI_POLL_WEIGHT);
4577c22a3f48SJoao Pinto 		}
45784ccb4585SJose Abreu 		if (queue < priv->plat->tx_queues_to_use) {
45794d97972bSFrode Isaksen 			netif_tx_napi_add(ndev, &ch->tx_napi,
45804d97972bSFrode Isaksen 					  stmmac_napi_poll_tx,
45814ccb4585SJose Abreu 					  NAPI_POLL_WEIGHT);
45824ccb4585SJose Abreu 		}
45834ccb4585SJose Abreu 	}
45847ac6653aSJeff Kirsher 
458529555fa3SThierry Reding 	mutex_init(&priv->lock);
45867ac6653aSJeff Kirsher 
4587cd7201f4SGiuseppe CAVALLARO 	/* If a specific clk_csr value is passed from the platform
4588cd7201f4SGiuseppe CAVALLARO 	 * this means that the CSR Clock Range selection cannot be
4589cd7201f4SGiuseppe CAVALLARO 	 * changed at run-time and it is fixed. Viceversa the driver'll try to
4590cd7201f4SGiuseppe CAVALLARO 	 * set the MDC clock dynamically according to the csr actual
4591cd7201f4SGiuseppe CAVALLARO 	 * clock input.
4592cd7201f4SGiuseppe CAVALLARO 	 */
45935e7f7fc5SBiao Huang 	if (priv->plat->clk_csr >= 0)
4594cd7201f4SGiuseppe CAVALLARO 		priv->clk_csr = priv->plat->clk_csr;
45955e7f7fc5SBiao Huang 	else
45965e7f7fc5SBiao Huang 		stmmac_clk_csr_set(priv);
4597cd7201f4SGiuseppe CAVALLARO 
4598e58bb43fSGiuseppe CAVALLARO 	stmmac_check_pcs_mode(priv);
4599e58bb43fSGiuseppe CAVALLARO 
46003fe5cadbSGiuseppe CAVALLARO 	if (priv->hw->pcs != STMMAC_PCS_RGMII  &&
46013fe5cadbSGiuseppe CAVALLARO 	    priv->hw->pcs != STMMAC_PCS_TBI &&
46023fe5cadbSGiuseppe CAVALLARO 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
46034bfcbd7aSFrancesco Virlinzi 		/* MDIO bus Registration */
46044bfcbd7aSFrancesco Virlinzi 		ret = stmmac_mdio_register(ndev);
46054bfcbd7aSFrancesco Virlinzi 		if (ret < 0) {
4606b618ab45SHeiner Kallweit 			dev_err(priv->device,
460738ddc59dSLABBE Corentin 				"%s: MDIO bus (id: %d) registration failed",
46084bfcbd7aSFrancesco Virlinzi 				__func__, priv->plat->bus_id);
46096a81c26fSViresh Kumar 			goto error_mdio_register;
46104bfcbd7aSFrancesco Virlinzi 		}
4611e58bb43fSGiuseppe CAVALLARO 	}
46124bfcbd7aSFrancesco Virlinzi 
461374371272SJose Abreu 	ret = stmmac_phy_setup(priv);
461474371272SJose Abreu 	if (ret) {
461574371272SJose Abreu 		netdev_err(ndev, "failed to setup phy (%d)\n", ret);
461674371272SJose Abreu 		goto error_phy_setup;
461774371272SJose Abreu 	}
461874371272SJose Abreu 
461957016590SFlorian Fainelli 	ret = register_netdev(ndev);
4620b2eb09afSFlorian Fainelli 	if (ret) {
4621b618ab45SHeiner Kallweit 		dev_err(priv->device, "%s: ERROR %i registering the device\n",
462257016590SFlorian Fainelli 			__func__, ret);
4623b2eb09afSFlorian Fainelli 		goto error_netdev_register;
4624b2eb09afSFlorian Fainelli 	}
46257ac6653aSJeff Kirsher 
46265f2b8b62SThierry Reding #ifdef CONFIG_DEBUG_FS
46278d72ab11SGreg Kroah-Hartman 	stmmac_init_fs(ndev);
46285f2b8b62SThierry Reding #endif
46295f2b8b62SThierry Reding 
463057016590SFlorian Fainelli 	return ret;
46317ac6653aSJeff Kirsher 
46326a81c26fSViresh Kumar error_netdev_register:
463374371272SJose Abreu 	phylink_destroy(priv->phylink);
463474371272SJose Abreu error_phy_setup:
4635b2eb09afSFlorian Fainelli 	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4636b2eb09afSFlorian Fainelli 	    priv->hw->pcs != STMMAC_PCS_TBI &&
4637b2eb09afSFlorian Fainelli 	    priv->hw->pcs != STMMAC_PCS_RTBI)
4638b2eb09afSFlorian Fainelli 		stmmac_mdio_unregister(ndev);
46397ac6653aSJeff Kirsher error_mdio_register:
46408fce3331SJose Abreu 	for (queue = 0; queue < maxq; queue++) {
46418fce3331SJose Abreu 		struct stmmac_channel *ch = &priv->channel[queue];
4642c22a3f48SJoao Pinto 
46434ccb4585SJose Abreu 		if (queue < priv->plat->rx_queues_to_use)
46444ccb4585SJose Abreu 			netif_napi_del(&ch->rx_napi);
46454ccb4585SJose Abreu 		if (queue < priv->plat->tx_queues_to_use)
46464ccb4585SJose Abreu 			netif_napi_del(&ch->tx_napi);
4647c22a3f48SJoao Pinto 	}
464862866e98SChen-Yu Tsai error_hw_init:
464934877a15SJose Abreu 	destroy_workqueue(priv->wq);
46507ac6653aSJeff Kirsher 
465115ffac73SJoachim Eastwood 	return ret;
46527ac6653aSJeff Kirsher }
4653b2e2f0c7SAndy Shevchenko EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
46547ac6653aSJeff Kirsher 
46557ac6653aSJeff Kirsher /**
46567ac6653aSJeff Kirsher  * stmmac_dvr_remove
4657f4e7bd81SJoachim Eastwood  * @dev: device pointer
46587ac6653aSJeff Kirsher  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
4659bfab27a1SGiuseppe CAVALLARO  * changes the link status, releases the DMA descriptor rings.
46607ac6653aSJeff Kirsher  */
4661f4e7bd81SJoachim Eastwood int stmmac_dvr_remove(struct device *dev)
46627ac6653aSJeff Kirsher {
4663f4e7bd81SJoachim Eastwood 	struct net_device *ndev = dev_get_drvdata(dev);
46647ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(ndev);
46657ac6653aSJeff Kirsher 
466638ddc59dSLABBE Corentin 	netdev_info(priv->dev, "%s: removing driver", __func__);
46677ac6653aSJeff Kirsher 
46685f2b8b62SThierry Reding #ifdef CONFIG_DEBUG_FS
46695f2b8b62SThierry Reding 	stmmac_exit_fs(ndev);
46705f2b8b62SThierry Reding #endif
4671ae4f0d46SJoao Pinto 	stmmac_stop_all_dma(priv);
46727ac6653aSJeff Kirsher 
4673c10d4c82SJose Abreu 	stmmac_mac_set(priv, priv->ioaddr, false);
46747ac6653aSJeff Kirsher 	netif_carrier_off(ndev);
46757ac6653aSJeff Kirsher 	unregister_netdev(ndev);
467674371272SJose Abreu 	phylink_destroy(priv->phylink);
4677f573c0b9Sjpinto 	if (priv->plat->stmmac_rst)
4678f573c0b9Sjpinto 		reset_control_assert(priv->plat->stmmac_rst);
4679f573c0b9Sjpinto 	clk_disable_unprepare(priv->plat->pclk);
4680f573c0b9Sjpinto 	clk_disable_unprepare(priv->plat->stmmac_clk);
46813fe5cadbSGiuseppe CAVALLARO 	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
46823fe5cadbSGiuseppe CAVALLARO 	    priv->hw->pcs != STMMAC_PCS_TBI &&
46833fe5cadbSGiuseppe CAVALLARO 	    priv->hw->pcs != STMMAC_PCS_RTBI)
4684e743471fSBryan O'Donoghue 		stmmac_mdio_unregister(ndev);
468534877a15SJose Abreu 	destroy_workqueue(priv->wq);
468629555fa3SThierry Reding 	mutex_destroy(&priv->lock);
46877ac6653aSJeff Kirsher 
46887ac6653aSJeff Kirsher 	return 0;
46897ac6653aSJeff Kirsher }
4690b2e2f0c7SAndy Shevchenko EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
46917ac6653aSJeff Kirsher 
4692732fdf0eSGiuseppe CAVALLARO /**
4693732fdf0eSGiuseppe CAVALLARO  * stmmac_suspend - suspend callback
4694f4e7bd81SJoachim Eastwood  * @dev: device pointer
4695732fdf0eSGiuseppe CAVALLARO  * Description: this is the function to suspend the device and it is called
4696732fdf0eSGiuseppe CAVALLARO  * by the platform driver to stop the network queue, release the resources,
4697732fdf0eSGiuseppe CAVALLARO  * program the PMT register (for WoL), clean and release driver resources.
4698732fdf0eSGiuseppe CAVALLARO  */
4699f4e7bd81SJoachim Eastwood int stmmac_suspend(struct device *dev)
47007ac6653aSJeff Kirsher {
4701f4e7bd81SJoachim Eastwood 	struct net_device *ndev = dev_get_drvdata(dev);
47027ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(ndev);
47037ac6653aSJeff Kirsher 
47047ac6653aSJeff Kirsher 	if (!ndev || !netif_running(ndev))
47057ac6653aSJeff Kirsher 		return 0;
47067ac6653aSJeff Kirsher 
470774371272SJose Abreu 	phylink_stop(priv->phylink);
4708102463b1SFrancesco Virlinzi 
470929555fa3SThierry Reding 	mutex_lock(&priv->lock);
47107ac6653aSJeff Kirsher 
47117ac6653aSJeff Kirsher 	netif_device_detach(ndev);
4712c22a3f48SJoao Pinto 	stmmac_stop_all_queues(priv);
47137ac6653aSJeff Kirsher 
4714c22a3f48SJoao Pinto 	stmmac_disable_all_queues(priv);
47157ac6653aSJeff Kirsher 
47167ac6653aSJeff Kirsher 	/* Stop TX/RX DMA */
4717ae4f0d46SJoao Pinto 	stmmac_stop_all_dma(priv);
4718c24602efSGiuseppe CAVALLARO 
47197ac6653aSJeff Kirsher 	/* Enable Power down mode by programming the PMT regs */
472089f7f2cfSSrinivas Kandagatla 	if (device_may_wakeup(priv->device)) {
4721c10d4c82SJose Abreu 		stmmac_pmt(priv, priv->hw, priv->wolopts);
472289f7f2cfSSrinivas Kandagatla 		priv->irq_wake = 1;
472389f7f2cfSSrinivas Kandagatla 	} else {
4724c10d4c82SJose Abreu 		stmmac_mac_set(priv, priv->ioaddr, false);
4725db88f10aSSrinivas Kandagatla 		pinctrl_pm_select_sleep_state(priv->device);
4726ba1377ffSGiuseppe CAVALLARO 		/* Disable clock in case of PWM is off */
4727f573c0b9Sjpinto 		clk_disable(priv->plat->pclk);
4728f573c0b9Sjpinto 		clk_disable(priv->plat->stmmac_clk);
4729ba1377ffSGiuseppe CAVALLARO 	}
473029555fa3SThierry Reding 	mutex_unlock(&priv->lock);
47312d871aa0SVince Bridgers 
4732bd00632cSLABBE Corentin 	priv->speed = SPEED_UNKNOWN;
47337ac6653aSJeff Kirsher 	return 0;
47347ac6653aSJeff Kirsher }
4735b2e2f0c7SAndy Shevchenko EXPORT_SYMBOL_GPL(stmmac_suspend);
47367ac6653aSJeff Kirsher 
4737732fdf0eSGiuseppe CAVALLARO /**
473854139cf3SJoao Pinto  * stmmac_reset_queues_param - reset queue parameters
473954139cf3SJoao Pinto  * @dev: device pointer
474054139cf3SJoao Pinto  */
474154139cf3SJoao Pinto static void stmmac_reset_queues_param(struct stmmac_priv *priv)
474254139cf3SJoao Pinto {
474354139cf3SJoao Pinto 	u32 rx_cnt = priv->plat->rx_queues_to_use;
4744ce736788SJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
474554139cf3SJoao Pinto 	u32 queue;
474654139cf3SJoao Pinto 
474754139cf3SJoao Pinto 	for (queue = 0; queue < rx_cnt; queue++) {
474854139cf3SJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
474954139cf3SJoao Pinto 
475054139cf3SJoao Pinto 		rx_q->cur_rx = 0;
475154139cf3SJoao Pinto 		rx_q->dirty_rx = 0;
475254139cf3SJoao Pinto 	}
475354139cf3SJoao Pinto 
4754ce736788SJoao Pinto 	for (queue = 0; queue < tx_cnt; queue++) {
4755ce736788SJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
4756ce736788SJoao Pinto 
4757ce736788SJoao Pinto 		tx_q->cur_tx = 0;
4758ce736788SJoao Pinto 		tx_q->dirty_tx = 0;
47598d212a9eSNiklas Cassel 		tx_q->mss = 0;
4760ce736788SJoao Pinto 	}
476154139cf3SJoao Pinto }
476254139cf3SJoao Pinto 
476354139cf3SJoao Pinto /**
4764732fdf0eSGiuseppe CAVALLARO  * stmmac_resume - resume callback
4765f4e7bd81SJoachim Eastwood  * @dev: device pointer
4766732fdf0eSGiuseppe CAVALLARO  * Description: when resume this function is invoked to setup the DMA and CORE
4767732fdf0eSGiuseppe CAVALLARO  * in a usable state.
4768732fdf0eSGiuseppe CAVALLARO  */
4769f4e7bd81SJoachim Eastwood int stmmac_resume(struct device *dev)
47707ac6653aSJeff Kirsher {
4771f4e7bd81SJoachim Eastwood 	struct net_device *ndev = dev_get_drvdata(dev);
47727ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(ndev);
47737ac6653aSJeff Kirsher 
47747ac6653aSJeff Kirsher 	if (!netif_running(ndev))
47757ac6653aSJeff Kirsher 		return 0;
47767ac6653aSJeff Kirsher 
47777ac6653aSJeff Kirsher 	/* Power Down bit, into the PM register, is cleared
47787ac6653aSJeff Kirsher 	 * automatically as soon as a magic packet or a Wake-up frame
47797ac6653aSJeff Kirsher 	 * is received. Anyway, it's better to manually clear
47807ac6653aSJeff Kirsher 	 * this bit because it can generate problems while resuming
4781ceb69499SGiuseppe CAVALLARO 	 * from another devices (e.g. serial console).
4782ceb69499SGiuseppe CAVALLARO 	 */
4783623997fbSSrinivas Kandagatla 	if (device_may_wakeup(priv->device)) {
478429555fa3SThierry Reding 		mutex_lock(&priv->lock);
4785c10d4c82SJose Abreu 		stmmac_pmt(priv, priv->hw, 0);
478629555fa3SThierry Reding 		mutex_unlock(&priv->lock);
478789f7f2cfSSrinivas Kandagatla 		priv->irq_wake = 0;
4788623997fbSSrinivas Kandagatla 	} else {
4789db88f10aSSrinivas Kandagatla 		pinctrl_pm_select_default_state(priv->device);
47908d45e42bSLABBE Corentin 		/* enable the clk previously disabled */
4791f573c0b9Sjpinto 		clk_enable(priv->plat->stmmac_clk);
4792f573c0b9Sjpinto 		clk_enable(priv->plat->pclk);
4793623997fbSSrinivas Kandagatla 		/* reset the phy so that it's ready */
4794623997fbSSrinivas Kandagatla 		if (priv->mii)
4795623997fbSSrinivas Kandagatla 			stmmac_mdio_reset(priv->mii);
4796623997fbSSrinivas Kandagatla 	}
47977ac6653aSJeff Kirsher 
47987ac6653aSJeff Kirsher 	netif_device_attach(ndev);
47997ac6653aSJeff Kirsher 
480029555fa3SThierry Reding 	mutex_lock(&priv->lock);
4801f55d84b0SVincent Palatin 
480254139cf3SJoao Pinto 	stmmac_reset_queues_param(priv);
480354139cf3SJoao Pinto 
4804ae79a639SGiuseppe CAVALLARO 	stmmac_clear_descriptors(priv);
4805ae79a639SGiuseppe CAVALLARO 
4806fe131929SHuacai Chen 	stmmac_hw_setup(ndev, false);
4807d429b66eSJose Abreu 	stmmac_init_coalesce(priv);
4808ac316c78SGiuseppe CAVALLARO 	stmmac_set_rx_mode(ndev);
48097ac6653aSJeff Kirsher 
4810c22a3f48SJoao Pinto 	stmmac_enable_all_queues(priv);
48117ac6653aSJeff Kirsher 
4812c22a3f48SJoao Pinto 	stmmac_start_all_queues(priv);
48137ac6653aSJeff Kirsher 
481429555fa3SThierry Reding 	mutex_unlock(&priv->lock);
4815102463b1SFrancesco Virlinzi 
481674371272SJose Abreu 	phylink_start(priv->phylink);
4817102463b1SFrancesco Virlinzi 
48187ac6653aSJeff Kirsher 	return 0;
48197ac6653aSJeff Kirsher }
4820b2e2f0c7SAndy Shevchenko EXPORT_SYMBOL_GPL(stmmac_resume);
4821ba27ec66SGiuseppe CAVALLARO 
48227ac6653aSJeff Kirsher #ifndef MODULE
48237ac6653aSJeff Kirsher static int __init stmmac_cmdline_opt(char *str)
48247ac6653aSJeff Kirsher {
48257ac6653aSJeff Kirsher 	char *opt;
48267ac6653aSJeff Kirsher 
48277ac6653aSJeff Kirsher 	if (!str || !*str)
48287ac6653aSJeff Kirsher 		return -EINVAL;
48297ac6653aSJeff Kirsher 	while ((opt = strsep(&str, ",")) != NULL) {
48307ac6653aSJeff Kirsher 		if (!strncmp(opt, "debug:", 6)) {
4831ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 6, 0, &debug))
48327ac6653aSJeff Kirsher 				goto err;
48337ac6653aSJeff Kirsher 		} else if (!strncmp(opt, "phyaddr:", 8)) {
4834ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 8, 0, &phyaddr))
48357ac6653aSJeff Kirsher 				goto err;
48367ac6653aSJeff Kirsher 		} else if (!strncmp(opt, "buf_sz:", 7)) {
4837ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 7, 0, &buf_sz))
48387ac6653aSJeff Kirsher 				goto err;
48397ac6653aSJeff Kirsher 		} else if (!strncmp(opt, "tc:", 3)) {
4840ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 3, 0, &tc))
48417ac6653aSJeff Kirsher 				goto err;
48427ac6653aSJeff Kirsher 		} else if (!strncmp(opt, "watchdog:", 9)) {
4843ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 9, 0, &watchdog))
48447ac6653aSJeff Kirsher 				goto err;
48457ac6653aSJeff Kirsher 		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
4846ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 10, 0, &flow_ctrl))
48477ac6653aSJeff Kirsher 				goto err;
48487ac6653aSJeff Kirsher 		} else if (!strncmp(opt, "pause:", 6)) {
4849ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 6, 0, &pause))
48507ac6653aSJeff Kirsher 				goto err;
4851506f669cSGiuseppe CAVALLARO 		} else if (!strncmp(opt, "eee_timer:", 10)) {
4852d765955dSGiuseppe CAVALLARO 			if (kstrtoint(opt + 10, 0, &eee_timer))
4853d765955dSGiuseppe CAVALLARO 				goto err;
48544a7d666aSGiuseppe CAVALLARO 		} else if (!strncmp(opt, "chain_mode:", 11)) {
48554a7d666aSGiuseppe CAVALLARO 			if (kstrtoint(opt + 11, 0, &chain_mode))
48564a7d666aSGiuseppe CAVALLARO 				goto err;
48577ac6653aSJeff Kirsher 		}
48587ac6653aSJeff Kirsher 	}
48597ac6653aSJeff Kirsher 	return 0;
48607ac6653aSJeff Kirsher 
48617ac6653aSJeff Kirsher err:
48627ac6653aSJeff Kirsher 	pr_err("%s: ERROR broken module parameter conversion", __func__);
48637ac6653aSJeff Kirsher 	return -EINVAL;
48647ac6653aSJeff Kirsher }
48657ac6653aSJeff Kirsher 
48667ac6653aSJeff Kirsher __setup("stmmaceth=", stmmac_cmdline_opt);
4867ceb69499SGiuseppe CAVALLARO #endif /* MODULE */
48686fc0d0f2SGiuseppe Cavallaro 
4869466c5ac8SMathieu Olivari static int __init stmmac_init(void)
4870466c5ac8SMathieu Olivari {
4871466c5ac8SMathieu Olivari #ifdef CONFIG_DEBUG_FS
4872466c5ac8SMathieu Olivari 	/* Create debugfs main directory if it doesn't exist yet */
48738d72ab11SGreg Kroah-Hartman 	if (!stmmac_fs_dir)
4874466c5ac8SMathieu Olivari 		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
4875466c5ac8SMathieu Olivari #endif
4876466c5ac8SMathieu Olivari 
4877466c5ac8SMathieu Olivari 	return 0;
4878466c5ac8SMathieu Olivari }
4879466c5ac8SMathieu Olivari 
4880466c5ac8SMathieu Olivari static void __exit stmmac_exit(void)
4881466c5ac8SMathieu Olivari {
4882466c5ac8SMathieu Olivari #ifdef CONFIG_DEBUG_FS
4883466c5ac8SMathieu Olivari 	debugfs_remove_recursive(stmmac_fs_dir);
4884466c5ac8SMathieu Olivari #endif
4885466c5ac8SMathieu Olivari }
4886466c5ac8SMathieu Olivari 
4887466c5ac8SMathieu Olivari module_init(stmmac_init)
4888466c5ac8SMathieu Olivari module_exit(stmmac_exit)
4889466c5ac8SMathieu Olivari 
48906fc0d0f2SGiuseppe Cavallaro MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
48916fc0d0f2SGiuseppe Cavallaro MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
48926fc0d0f2SGiuseppe Cavallaro MODULE_LICENSE("GPL");
4893