14fa9c49fSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
27ac6653aSJeff Kirsher /*******************************************************************************
37ac6653aSJeff Kirsher   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
47ac6653aSJeff Kirsher   ST Ethernet IPs are built around a Synopsys IP Core.
57ac6653aSJeff Kirsher 
6286a8372SGiuseppe CAVALLARO 	Copyright(C) 2007-2011 STMicroelectronics Ltd
77ac6653aSJeff Kirsher 
87ac6653aSJeff Kirsher 
97ac6653aSJeff Kirsher   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
107ac6653aSJeff Kirsher 
117ac6653aSJeff Kirsher   Documentation available at:
127ac6653aSJeff Kirsher 	http://www.stlinux.com
137ac6653aSJeff Kirsher   Support available at:
147ac6653aSJeff Kirsher 	https://bugzilla.stlinux.com/
157ac6653aSJeff Kirsher *******************************************************************************/
167ac6653aSJeff Kirsher 
176a81c26fSViresh Kumar #include <linux/clk.h>
187ac6653aSJeff Kirsher #include <linux/kernel.h>
197ac6653aSJeff Kirsher #include <linux/interrupt.h>
207ac6653aSJeff Kirsher #include <linux/ip.h>
217ac6653aSJeff Kirsher #include <linux/tcp.h>
227ac6653aSJeff Kirsher #include <linux/skbuff.h>
237ac6653aSJeff Kirsher #include <linux/ethtool.h>
247ac6653aSJeff Kirsher #include <linux/if_ether.h>
257ac6653aSJeff Kirsher #include <linux/crc32.h>
267ac6653aSJeff Kirsher #include <linux/mii.h>
2701789349SJiri Pirko #include <linux/if.h>
287ac6653aSJeff Kirsher #include <linux/if_vlan.h>
297ac6653aSJeff Kirsher #include <linux/dma-mapping.h>
307ac6653aSJeff Kirsher #include <linux/slab.h>
317ac6653aSJeff Kirsher #include <linux/prefetch.h>
32db88f10aSSrinivas Kandagatla #include <linux/pinctrl/consumer.h>
3350fb4f74SGiuseppe CAVALLARO #ifdef CONFIG_DEBUG_FS
347ac29055SGiuseppe CAVALLARO #include <linux/debugfs.h>
357ac29055SGiuseppe CAVALLARO #include <linux/seq_file.h>
3650fb4f74SGiuseppe CAVALLARO #endif /* CONFIG_DEBUG_FS */
37891434b1SRayagond Kokatanur #include <linux/net_tstamp.h>
38eeef2f6bSJose Abreu #include <linux/phylink.h>
39b7766206SJose Abreu #include <linux/udp.h>
404dbbe8ddSJose Abreu #include <net/pkt_cls.h>
41891434b1SRayagond Kokatanur #include "stmmac_ptp.h"
42286a8372SGiuseppe CAVALLARO #include "stmmac.h"
43c5e4ddbdSChen-Yu Tsai #include <linux/reset.h>
445790cf3cSMathieu Olivari #include <linux/of_mdio.h>
4519d857c9SPhil Reid #include "dwmac1000.h"
467d9e6c5aSJose Abreu #include "dwxgmac2.h"
4742de047dSJose Abreu #include "hwif.h"
487ac6653aSJeff Kirsher 
498d558f02SJose Abreu #define	STMMAC_ALIGN(x)		ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
50f748be53SAlexandre TORGUE #define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
517ac6653aSJeff Kirsher 
527ac6653aSJeff Kirsher /* Module parameters */
5332ceabcaSGiuseppe CAVALLARO #define TX_TIMEO	5000
547ac6653aSJeff Kirsher static int watchdog = TX_TIMEO;
55d3757ba4SJoe Perches module_param(watchdog, int, 0644);
5632ceabcaSGiuseppe CAVALLARO MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
577ac6653aSJeff Kirsher 
5832ceabcaSGiuseppe CAVALLARO static int debug = -1;
59d3757ba4SJoe Perches module_param(debug, int, 0644);
6032ceabcaSGiuseppe CAVALLARO MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
617ac6653aSJeff Kirsher 
6247d1f71fSstephen hemminger static int phyaddr = -1;
63d3757ba4SJoe Perches module_param(phyaddr, int, 0444);
647ac6653aSJeff Kirsher MODULE_PARM_DESC(phyaddr, "Physical device address");
657ac6653aSJeff Kirsher 
66e3ad57c9SGiuseppe Cavallaro #define STMMAC_TX_THRESH	(DMA_TX_SIZE / 4)
67120e87f9SGiuseppe Cavallaro #define STMMAC_RX_THRESH	(DMA_RX_SIZE / 4)
687ac6653aSJeff Kirsher 
69e9989339SJose Abreu static int flow_ctrl = FLOW_AUTO;
70d3757ba4SJoe Perches module_param(flow_ctrl, int, 0644);
717ac6653aSJeff Kirsher MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
727ac6653aSJeff Kirsher 
737ac6653aSJeff Kirsher static int pause = PAUSE_TIME;
74d3757ba4SJoe Perches module_param(pause, int, 0644);
757ac6653aSJeff Kirsher MODULE_PARM_DESC(pause, "Flow Control Pause Time");
767ac6653aSJeff Kirsher 
777ac6653aSJeff Kirsher #define TC_DEFAULT 64
787ac6653aSJeff Kirsher static int tc = TC_DEFAULT;
79d3757ba4SJoe Perches module_param(tc, int, 0644);
807ac6653aSJeff Kirsher MODULE_PARM_DESC(tc, "DMA threshold control value");
817ac6653aSJeff Kirsher 
82d916701cSGiuseppe CAVALLARO #define	DEFAULT_BUFSIZE	1536
83d916701cSGiuseppe CAVALLARO static int buf_sz = DEFAULT_BUFSIZE;
84d3757ba4SJoe Perches module_param(buf_sz, int, 0644);
857ac6653aSJeff Kirsher MODULE_PARM_DESC(buf_sz, "DMA buffer size");
867ac6653aSJeff Kirsher 
8722ad3838SGiuseppe Cavallaro #define	STMMAC_RX_COPYBREAK	256
8822ad3838SGiuseppe Cavallaro 
897ac6653aSJeff Kirsher static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
907ac6653aSJeff Kirsher 				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
917ac6653aSJeff Kirsher 				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
927ac6653aSJeff Kirsher 
93d765955dSGiuseppe CAVALLARO #define STMMAC_DEFAULT_LPI_TIMER	1000
94d765955dSGiuseppe CAVALLARO static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
95d3757ba4SJoe Perches module_param(eee_timer, int, 0644);
96d765955dSGiuseppe CAVALLARO MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
97f5351ef7SGiuseppe CAVALLARO #define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
98d765955dSGiuseppe CAVALLARO 
9922d3efe5SPavel Machek /* By default the driver will use the ring mode to manage tx and rx descriptors,
10022d3efe5SPavel Machek  * but allow user to force to use the chain instead of the ring
1014a7d666aSGiuseppe CAVALLARO  */
1024a7d666aSGiuseppe CAVALLARO static unsigned int chain_mode;
103d3757ba4SJoe Perches module_param(chain_mode, int, 0444);
1044a7d666aSGiuseppe CAVALLARO MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
1054a7d666aSGiuseppe CAVALLARO 
1067ac6653aSJeff Kirsher static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
1077ac6653aSJeff Kirsher 
10850fb4f74SGiuseppe CAVALLARO #ifdef CONFIG_DEBUG_FS
109481a7d15SJiping Ma static const struct net_device_ops stmmac_netdev_ops;
1108d72ab11SGreg Kroah-Hartman static void stmmac_init_fs(struct net_device *dev);
111466c5ac8SMathieu Olivari static void stmmac_exit_fs(struct net_device *dev);
112bfab27a1SGiuseppe CAVALLARO #endif
113bfab27a1SGiuseppe CAVALLARO 
1149125cdd1SGiuseppe CAVALLARO #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
1159125cdd1SGiuseppe CAVALLARO 
1167ac6653aSJeff Kirsher /**
1177ac6653aSJeff Kirsher  * stmmac_verify_args - verify the driver parameters.
118732fdf0eSGiuseppe CAVALLARO  * Description: it checks the driver parameters and set a default in case of
119732fdf0eSGiuseppe CAVALLARO  * errors.
1207ac6653aSJeff Kirsher  */
1217ac6653aSJeff Kirsher static void stmmac_verify_args(void)
1227ac6653aSJeff Kirsher {
1237ac6653aSJeff Kirsher 	if (unlikely(watchdog < 0))
1247ac6653aSJeff Kirsher 		watchdog = TX_TIMEO;
125d916701cSGiuseppe CAVALLARO 	if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
126d916701cSGiuseppe CAVALLARO 		buf_sz = DEFAULT_BUFSIZE;
1277ac6653aSJeff Kirsher 	if (unlikely(flow_ctrl > 1))
1287ac6653aSJeff Kirsher 		flow_ctrl = FLOW_AUTO;
1297ac6653aSJeff Kirsher 	else if (likely(flow_ctrl < 0))
1307ac6653aSJeff Kirsher 		flow_ctrl = FLOW_OFF;
1317ac6653aSJeff Kirsher 	if (unlikely((pause < 0) || (pause > 0xffff)))
1327ac6653aSJeff Kirsher 		pause = PAUSE_TIME;
133d765955dSGiuseppe CAVALLARO 	if (eee_timer < 0)
134d765955dSGiuseppe CAVALLARO 		eee_timer = STMMAC_DEFAULT_LPI_TIMER;
1357ac6653aSJeff Kirsher }
1367ac6653aSJeff Kirsher 
13732ceabcaSGiuseppe CAVALLARO /**
138c22a3f48SJoao Pinto  * stmmac_disable_all_queues - Disable all queues
139c22a3f48SJoao Pinto  * @priv: driver private structure
140c22a3f48SJoao Pinto  */
141c22a3f48SJoao Pinto static void stmmac_disable_all_queues(struct stmmac_priv *priv)
142c22a3f48SJoao Pinto {
143c22a3f48SJoao Pinto 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
1448fce3331SJose Abreu 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
1458fce3331SJose Abreu 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
146c22a3f48SJoao Pinto 	u32 queue;
147c22a3f48SJoao Pinto 
1488fce3331SJose Abreu 	for (queue = 0; queue < maxq; queue++) {
1498fce3331SJose Abreu 		struct stmmac_channel *ch = &priv->channel[queue];
150c22a3f48SJoao Pinto 
1514ccb4585SJose Abreu 		if (queue < rx_queues_cnt)
1524ccb4585SJose Abreu 			napi_disable(&ch->rx_napi);
1534ccb4585SJose Abreu 		if (queue < tx_queues_cnt)
1544ccb4585SJose Abreu 			napi_disable(&ch->tx_napi);
155c22a3f48SJoao Pinto 	}
156c22a3f48SJoao Pinto }
157c22a3f48SJoao Pinto 
158c22a3f48SJoao Pinto /**
159c22a3f48SJoao Pinto  * stmmac_enable_all_queues - Enable all queues
160c22a3f48SJoao Pinto  * @priv: driver private structure
161c22a3f48SJoao Pinto  */
162c22a3f48SJoao Pinto static void stmmac_enable_all_queues(struct stmmac_priv *priv)
163c22a3f48SJoao Pinto {
164c22a3f48SJoao Pinto 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
1658fce3331SJose Abreu 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
1668fce3331SJose Abreu 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
167c22a3f48SJoao Pinto 	u32 queue;
168c22a3f48SJoao Pinto 
1698fce3331SJose Abreu 	for (queue = 0; queue < maxq; queue++) {
1708fce3331SJose Abreu 		struct stmmac_channel *ch = &priv->channel[queue];
171c22a3f48SJoao Pinto 
1724ccb4585SJose Abreu 		if (queue < rx_queues_cnt)
1734ccb4585SJose Abreu 			napi_enable(&ch->rx_napi);
1744ccb4585SJose Abreu 		if (queue < tx_queues_cnt)
1754ccb4585SJose Abreu 			napi_enable(&ch->tx_napi);
176c22a3f48SJoao Pinto 	}
177c22a3f48SJoao Pinto }
178c22a3f48SJoao Pinto 
179c22a3f48SJoao Pinto /**
180c22a3f48SJoao Pinto  * stmmac_stop_all_queues - Stop all queues
181c22a3f48SJoao Pinto  * @priv: driver private structure
182c22a3f48SJoao Pinto  */
183c22a3f48SJoao Pinto static void stmmac_stop_all_queues(struct stmmac_priv *priv)
184c22a3f48SJoao Pinto {
185c22a3f48SJoao Pinto 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
186c22a3f48SJoao Pinto 	u32 queue;
187c22a3f48SJoao Pinto 
188c22a3f48SJoao Pinto 	for (queue = 0; queue < tx_queues_cnt; queue++)
189c22a3f48SJoao Pinto 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
190c22a3f48SJoao Pinto }
191c22a3f48SJoao Pinto 
192c22a3f48SJoao Pinto /**
193c22a3f48SJoao Pinto  * stmmac_start_all_queues - Start all queues
194c22a3f48SJoao Pinto  * @priv: driver private structure
195c22a3f48SJoao Pinto  */
196c22a3f48SJoao Pinto static void stmmac_start_all_queues(struct stmmac_priv *priv)
197c22a3f48SJoao Pinto {
198c22a3f48SJoao Pinto 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
199c22a3f48SJoao Pinto 	u32 queue;
200c22a3f48SJoao Pinto 
201c22a3f48SJoao Pinto 	for (queue = 0; queue < tx_queues_cnt; queue++)
202c22a3f48SJoao Pinto 		netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue));
203c22a3f48SJoao Pinto }
204c22a3f48SJoao Pinto 
20534877a15SJose Abreu static void stmmac_service_event_schedule(struct stmmac_priv *priv)
20634877a15SJose Abreu {
20734877a15SJose Abreu 	if (!test_bit(STMMAC_DOWN, &priv->state) &&
20834877a15SJose Abreu 	    !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
20934877a15SJose Abreu 		queue_work(priv->wq, &priv->service_task);
21034877a15SJose Abreu }
21134877a15SJose Abreu 
21234877a15SJose Abreu static void stmmac_global_err(struct stmmac_priv *priv)
21334877a15SJose Abreu {
21434877a15SJose Abreu 	netif_carrier_off(priv->dev);
21534877a15SJose Abreu 	set_bit(STMMAC_RESET_REQUESTED, &priv->state);
21634877a15SJose Abreu 	stmmac_service_event_schedule(priv);
21734877a15SJose Abreu }
21834877a15SJose Abreu 
219c22a3f48SJoao Pinto /**
22032ceabcaSGiuseppe CAVALLARO  * stmmac_clk_csr_set - dynamically set the MDC clock
22132ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
22232ceabcaSGiuseppe CAVALLARO  * Description: this is to dynamically set the MDC clock according to the csr
22332ceabcaSGiuseppe CAVALLARO  * clock input.
22432ceabcaSGiuseppe CAVALLARO  * Note:
22532ceabcaSGiuseppe CAVALLARO  *	If a specific clk_csr value is passed from the platform
22632ceabcaSGiuseppe CAVALLARO  *	this means that the CSR Clock Range selection cannot be
22732ceabcaSGiuseppe CAVALLARO  *	changed at run-time and it is fixed (as reported in the driver
22832ceabcaSGiuseppe CAVALLARO  *	documentation). Viceversa the driver will try to set the MDC
22932ceabcaSGiuseppe CAVALLARO  *	clock dynamically according to the actual clock input.
23032ceabcaSGiuseppe CAVALLARO  */
231cd7201f4SGiuseppe CAVALLARO static void stmmac_clk_csr_set(struct stmmac_priv *priv)
232cd7201f4SGiuseppe CAVALLARO {
233cd7201f4SGiuseppe CAVALLARO 	u32 clk_rate;
234cd7201f4SGiuseppe CAVALLARO 
235f573c0b9Sjpinto 	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
236cd7201f4SGiuseppe CAVALLARO 
237cd7201f4SGiuseppe CAVALLARO 	/* Platform provided default clk_csr would be assumed valid
238ceb69499SGiuseppe CAVALLARO 	 * for all other cases except for the below mentioned ones.
239ceb69499SGiuseppe CAVALLARO 	 * For values higher than the IEEE 802.3 specified frequency
240ceb69499SGiuseppe CAVALLARO 	 * we can not estimate the proper divider as it is not known
241ceb69499SGiuseppe CAVALLARO 	 * the frequency of clk_csr_i. So we do not change the default
242ceb69499SGiuseppe CAVALLARO 	 * divider.
243ceb69499SGiuseppe CAVALLARO 	 */
244cd7201f4SGiuseppe CAVALLARO 	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
245cd7201f4SGiuseppe CAVALLARO 		if (clk_rate < CSR_F_35M)
246cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_20_35M;
247cd7201f4SGiuseppe CAVALLARO 		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
248cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_35_60M;
249cd7201f4SGiuseppe CAVALLARO 		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
250cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_60_100M;
251cd7201f4SGiuseppe CAVALLARO 		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
252cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_100_150M;
253cd7201f4SGiuseppe CAVALLARO 		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
254cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_150_250M;
25519d857c9SPhil Reid 		else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
256cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_250_300M;
257ceb69499SGiuseppe CAVALLARO 	}
2589f93ac8dSLABBE Corentin 
2599f93ac8dSLABBE Corentin 	if (priv->plat->has_sun8i) {
2609f93ac8dSLABBE Corentin 		if (clk_rate > 160000000)
2619f93ac8dSLABBE Corentin 			priv->clk_csr = 0x03;
2629f93ac8dSLABBE Corentin 		else if (clk_rate > 80000000)
2639f93ac8dSLABBE Corentin 			priv->clk_csr = 0x02;
2649f93ac8dSLABBE Corentin 		else if (clk_rate > 40000000)
2659f93ac8dSLABBE Corentin 			priv->clk_csr = 0x01;
2669f93ac8dSLABBE Corentin 		else
2679f93ac8dSLABBE Corentin 			priv->clk_csr = 0;
2689f93ac8dSLABBE Corentin 	}
2697d9e6c5aSJose Abreu 
2707d9e6c5aSJose Abreu 	if (priv->plat->has_xgmac) {
2717d9e6c5aSJose Abreu 		if (clk_rate > 400000000)
2727d9e6c5aSJose Abreu 			priv->clk_csr = 0x5;
2737d9e6c5aSJose Abreu 		else if (clk_rate > 350000000)
2747d9e6c5aSJose Abreu 			priv->clk_csr = 0x4;
2757d9e6c5aSJose Abreu 		else if (clk_rate > 300000000)
2767d9e6c5aSJose Abreu 			priv->clk_csr = 0x3;
2777d9e6c5aSJose Abreu 		else if (clk_rate > 250000000)
2787d9e6c5aSJose Abreu 			priv->clk_csr = 0x2;
2797d9e6c5aSJose Abreu 		else if (clk_rate > 150000000)
2807d9e6c5aSJose Abreu 			priv->clk_csr = 0x1;
2817d9e6c5aSJose Abreu 		else
2827d9e6c5aSJose Abreu 			priv->clk_csr = 0x0;
2837d9e6c5aSJose Abreu 	}
284cd7201f4SGiuseppe CAVALLARO }
285cd7201f4SGiuseppe CAVALLARO 
2867ac6653aSJeff Kirsher static void print_pkt(unsigned char *buf, int len)
2877ac6653aSJeff Kirsher {
288424c4f78SAndy Shevchenko 	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
289424c4f78SAndy Shevchenko 	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
2907ac6653aSJeff Kirsher }
2917ac6653aSJeff Kirsher 
292ce736788SJoao Pinto static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
2937ac6653aSJeff Kirsher {
294ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
295a6a3e026SLABBE Corentin 	u32 avail;
296e3ad57c9SGiuseppe Cavallaro 
297ce736788SJoao Pinto 	if (tx_q->dirty_tx > tx_q->cur_tx)
298ce736788SJoao Pinto 		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
299e3ad57c9SGiuseppe Cavallaro 	else
300ce736788SJoao Pinto 		avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1;
301e3ad57c9SGiuseppe Cavallaro 
302e3ad57c9SGiuseppe Cavallaro 	return avail;
303e3ad57c9SGiuseppe Cavallaro }
304e3ad57c9SGiuseppe Cavallaro 
30554139cf3SJoao Pinto /**
30654139cf3SJoao Pinto  * stmmac_rx_dirty - Get RX queue dirty
30754139cf3SJoao Pinto  * @priv: driver private structure
30854139cf3SJoao Pinto  * @queue: RX queue index
30954139cf3SJoao Pinto  */
31054139cf3SJoao Pinto static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
311e3ad57c9SGiuseppe Cavallaro {
31254139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
313a6a3e026SLABBE Corentin 	u32 dirty;
314e3ad57c9SGiuseppe Cavallaro 
31554139cf3SJoao Pinto 	if (rx_q->dirty_rx <= rx_q->cur_rx)
31654139cf3SJoao Pinto 		dirty = rx_q->cur_rx - rx_q->dirty_rx;
317e3ad57c9SGiuseppe Cavallaro 	else
31854139cf3SJoao Pinto 		dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx;
319e3ad57c9SGiuseppe Cavallaro 
320e3ad57c9SGiuseppe Cavallaro 	return dirty;
3217ac6653aSJeff Kirsher }
3227ac6653aSJeff Kirsher 
32332ceabcaSGiuseppe CAVALLARO /**
324732fdf0eSGiuseppe CAVALLARO  * stmmac_enable_eee_mode - check and enter in LPI mode
32532ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
326732fdf0eSGiuseppe CAVALLARO  * Description: this function is to verify and enter in LPI mode in case of
327732fdf0eSGiuseppe CAVALLARO  * EEE.
32832ceabcaSGiuseppe CAVALLARO  */
329d765955dSGiuseppe CAVALLARO static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
330d765955dSGiuseppe CAVALLARO {
331ce736788SJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
332ce736788SJoao Pinto 	u32 queue;
333ce736788SJoao Pinto 
334ce736788SJoao Pinto 	/* check if all TX queues have the work finished */
335ce736788SJoao Pinto 	for (queue = 0; queue < tx_cnt; queue++) {
336ce736788SJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
337ce736788SJoao Pinto 
338ce736788SJoao Pinto 		if (tx_q->dirty_tx != tx_q->cur_tx)
339ce736788SJoao Pinto 			return; /* still unfinished work */
340ce736788SJoao Pinto 	}
341ce736788SJoao Pinto 
342d765955dSGiuseppe CAVALLARO 	/* Check and enter in LPI mode */
343ce736788SJoao Pinto 	if (!priv->tx_path_in_lpi_mode)
344c10d4c82SJose Abreu 		stmmac_set_eee_mode(priv, priv->hw,
345b4b7b772Sjpinto 				priv->plat->en_tx_lpi_clockgating);
346d765955dSGiuseppe CAVALLARO }
347d765955dSGiuseppe CAVALLARO 
34832ceabcaSGiuseppe CAVALLARO /**
349732fdf0eSGiuseppe CAVALLARO  * stmmac_disable_eee_mode - disable and exit from LPI mode
35032ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
35132ceabcaSGiuseppe CAVALLARO  * Description: this function is to exit and disable EEE in case of
35232ceabcaSGiuseppe CAVALLARO  * LPI state is true. This is called by the xmit.
35332ceabcaSGiuseppe CAVALLARO  */
354d765955dSGiuseppe CAVALLARO void stmmac_disable_eee_mode(struct stmmac_priv *priv)
355d765955dSGiuseppe CAVALLARO {
356c10d4c82SJose Abreu 	stmmac_reset_eee_mode(priv, priv->hw);
357d765955dSGiuseppe CAVALLARO 	del_timer_sync(&priv->eee_ctrl_timer);
358d765955dSGiuseppe CAVALLARO 	priv->tx_path_in_lpi_mode = false;
359d765955dSGiuseppe CAVALLARO }
360d765955dSGiuseppe CAVALLARO 
361d765955dSGiuseppe CAVALLARO /**
362732fdf0eSGiuseppe CAVALLARO  * stmmac_eee_ctrl_timer - EEE TX SW timer.
363d765955dSGiuseppe CAVALLARO  * @arg : data hook
364d765955dSGiuseppe CAVALLARO  * Description:
36532ceabcaSGiuseppe CAVALLARO  *  if there is no data transfer and if we are not in LPI state,
366d765955dSGiuseppe CAVALLARO  *  then MAC Transmitter can be moved to LPI state.
367d765955dSGiuseppe CAVALLARO  */
368e99e88a9SKees Cook static void stmmac_eee_ctrl_timer(struct timer_list *t)
369d765955dSGiuseppe CAVALLARO {
370e99e88a9SKees Cook 	struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
371d765955dSGiuseppe CAVALLARO 
372d765955dSGiuseppe CAVALLARO 	stmmac_enable_eee_mode(priv);
373f5351ef7SGiuseppe CAVALLARO 	mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
374d765955dSGiuseppe CAVALLARO }
375d765955dSGiuseppe CAVALLARO 
376d765955dSGiuseppe CAVALLARO /**
377732fdf0eSGiuseppe CAVALLARO  * stmmac_eee_init - init EEE
37832ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
379d765955dSGiuseppe CAVALLARO  * Description:
380732fdf0eSGiuseppe CAVALLARO  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
381732fdf0eSGiuseppe CAVALLARO  *  can also manage EEE, this function enable the LPI state and start related
382732fdf0eSGiuseppe CAVALLARO  *  timer.
383d765955dSGiuseppe CAVALLARO  */
384d765955dSGiuseppe CAVALLARO bool stmmac_eee_init(struct stmmac_priv *priv)
385d765955dSGiuseppe CAVALLARO {
38674371272SJose Abreu 	int tx_lpi_timer = priv->tx_lpi_timer;
387879626e3SJerome Brunet 
388f5351ef7SGiuseppe CAVALLARO 	/* Using PCS we cannot dial with the phy registers at this stage
389f5351ef7SGiuseppe CAVALLARO 	 * so we do not support extra feature like EEE.
390f5351ef7SGiuseppe CAVALLARO 	 */
391a47b9e15SDejin Zheng 	if (priv->hw->pcs == STMMAC_PCS_TBI ||
392a47b9e15SDejin Zheng 	    priv->hw->pcs == STMMAC_PCS_RTBI)
39374371272SJose Abreu 		return false;
394f5351ef7SGiuseppe CAVALLARO 
39574371272SJose Abreu 	/* Check if MAC core supports the EEE feature. */
39674371272SJose Abreu 	if (!priv->dma_cap.eee)
39774371272SJose Abreu 		return false;
398d765955dSGiuseppe CAVALLARO 
39929555fa3SThierry Reding 	mutex_lock(&priv->lock);
40074371272SJose Abreu 
40174371272SJose Abreu 	/* Check if it needs to be deactivated */
402177d935aSJon Hunter 	if (!priv->eee_active) {
403177d935aSJon Hunter 		if (priv->eee_enabled) {
40438ddc59dSLABBE Corentin 			netdev_dbg(priv->dev, "disable EEE\n");
40583bf79b6SGiuseppe CAVALLARO 			del_timer_sync(&priv->eee_ctrl_timer);
40674371272SJose Abreu 			stmmac_set_eee_timer(priv, priv->hw, 0, tx_lpi_timer);
407177d935aSJon Hunter 		}
4080867bb97SJon Hunter 		mutex_unlock(&priv->lock);
40974371272SJose Abreu 		return false;
41074371272SJose Abreu 	}
41174371272SJose Abreu 
41274371272SJose Abreu 	if (priv->eee_active && !priv->eee_enabled) {
41374371272SJose Abreu 		timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
41474371272SJose Abreu 		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
41574371272SJose Abreu 		stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
41683bf79b6SGiuseppe CAVALLARO 				     tx_lpi_timer);
41783bf79b6SGiuseppe CAVALLARO 	}
41874371272SJose Abreu 
41929555fa3SThierry Reding 	mutex_unlock(&priv->lock);
42038ddc59dSLABBE Corentin 	netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
42174371272SJose Abreu 	return true;
422d765955dSGiuseppe CAVALLARO }
423d765955dSGiuseppe CAVALLARO 
424732fdf0eSGiuseppe CAVALLARO /* stmmac_get_tx_hwtstamp - get HW TX timestamps
42532ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
426ba1ffd74SGiuseppe CAVALLARO  * @p : descriptor pointer
427891434b1SRayagond Kokatanur  * @skb : the socket buffer
428891434b1SRayagond Kokatanur  * Description :
429891434b1SRayagond Kokatanur  * This function will read timestamp from the descriptor & pass it to stack.
430891434b1SRayagond Kokatanur  * and also perform some sanity checks.
431891434b1SRayagond Kokatanur  */
432891434b1SRayagond Kokatanur static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
433ba1ffd74SGiuseppe CAVALLARO 				   struct dma_desc *p, struct sk_buff *skb)
434891434b1SRayagond Kokatanur {
435891434b1SRayagond Kokatanur 	struct skb_shared_hwtstamps shhwtstamp;
43625e80cd0SJose Abreu 	bool found = false;
437df103170SNathan Chancellor 	u64 ns = 0;
438891434b1SRayagond Kokatanur 
439891434b1SRayagond Kokatanur 	if (!priv->hwts_tx_en)
440891434b1SRayagond Kokatanur 		return;
441891434b1SRayagond Kokatanur 
442ceb69499SGiuseppe CAVALLARO 	/* exit if skb doesn't support hw tstamp */
44375e4364fSdamuzi000 	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
444891434b1SRayagond Kokatanur 		return;
445891434b1SRayagond Kokatanur 
446891434b1SRayagond Kokatanur 	/* check tx tstamp status */
44742de047dSJose Abreu 	if (stmmac_get_tx_timestamp_status(priv, p)) {
44842de047dSJose Abreu 		stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
44925e80cd0SJose Abreu 		found = true;
45025e80cd0SJose Abreu 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
45125e80cd0SJose Abreu 		found = true;
45225e80cd0SJose Abreu 	}
453891434b1SRayagond Kokatanur 
45425e80cd0SJose Abreu 	if (found) {
455891434b1SRayagond Kokatanur 		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
456891434b1SRayagond Kokatanur 		shhwtstamp.hwtstamp = ns_to_ktime(ns);
457ba1ffd74SGiuseppe CAVALLARO 
45833d4c482SMario Molitor 		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
459891434b1SRayagond Kokatanur 		/* pass tstamp to stack */
460891434b1SRayagond Kokatanur 		skb_tstamp_tx(skb, &shhwtstamp);
461ba1ffd74SGiuseppe CAVALLARO 	}
462891434b1SRayagond Kokatanur }
463891434b1SRayagond Kokatanur 
464732fdf0eSGiuseppe CAVALLARO /* stmmac_get_rx_hwtstamp - get HW RX timestamps
46532ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
466ba1ffd74SGiuseppe CAVALLARO  * @p : descriptor pointer
467ba1ffd74SGiuseppe CAVALLARO  * @np : next descriptor pointer
468891434b1SRayagond Kokatanur  * @skb : the socket buffer
469891434b1SRayagond Kokatanur  * Description :
470891434b1SRayagond Kokatanur  * This function will read received packet's timestamp from the descriptor
471891434b1SRayagond Kokatanur  * and pass it to stack. It also perform some sanity checks.
472891434b1SRayagond Kokatanur  */
473ba1ffd74SGiuseppe CAVALLARO static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
474ba1ffd74SGiuseppe CAVALLARO 				   struct dma_desc *np, struct sk_buff *skb)
475891434b1SRayagond Kokatanur {
476891434b1SRayagond Kokatanur 	struct skb_shared_hwtstamps *shhwtstamp = NULL;
47798870943SJose Abreu 	struct dma_desc *desc = p;
478df103170SNathan Chancellor 	u64 ns = 0;
479891434b1SRayagond Kokatanur 
480891434b1SRayagond Kokatanur 	if (!priv->hwts_rx_en)
481891434b1SRayagond Kokatanur 		return;
482ba1ffd74SGiuseppe CAVALLARO 	/* For GMAC4, the valid timestamp is from CTX next desc. */
4837d9e6c5aSJose Abreu 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
48498870943SJose Abreu 		desc = np;
485891434b1SRayagond Kokatanur 
48698870943SJose Abreu 	/* Check if timestamp is available */
48742de047dSJose Abreu 	if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
48842de047dSJose Abreu 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
48933d4c482SMario Molitor 		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
490891434b1SRayagond Kokatanur 		shhwtstamp = skb_hwtstamps(skb);
491891434b1SRayagond Kokatanur 		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
492891434b1SRayagond Kokatanur 		shhwtstamp->hwtstamp = ns_to_ktime(ns);
493ba1ffd74SGiuseppe CAVALLARO 	} else  {
49433d4c482SMario Molitor 		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
495ba1ffd74SGiuseppe CAVALLARO 	}
496891434b1SRayagond Kokatanur }
497891434b1SRayagond Kokatanur 
498891434b1SRayagond Kokatanur /**
499d6228b7cSArtem Panfilov  *  stmmac_hwtstamp_set - control hardware timestamping.
500891434b1SRayagond Kokatanur  *  @dev: device pointer.
5018d45e42bSLABBE Corentin  *  @ifr: An IOCTL specific structure, that can contain a pointer to
502891434b1SRayagond Kokatanur  *  a proprietary structure used to pass information to the driver.
503891434b1SRayagond Kokatanur  *  Description:
504891434b1SRayagond Kokatanur  *  This function configures the MAC to enable/disable both outgoing(TX)
505891434b1SRayagond Kokatanur  *  and incoming(RX) packets time stamping based on user input.
506891434b1SRayagond Kokatanur  *  Return Value:
507891434b1SRayagond Kokatanur  *  0 on success and an appropriate -ve integer on failure.
508891434b1SRayagond Kokatanur  */
509d6228b7cSArtem Panfilov static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
510891434b1SRayagond Kokatanur {
511891434b1SRayagond Kokatanur 	struct stmmac_priv *priv = netdev_priv(dev);
512891434b1SRayagond Kokatanur 	struct hwtstamp_config config;
5130a624155SArnd Bergmann 	struct timespec64 now;
514891434b1SRayagond Kokatanur 	u64 temp = 0;
515891434b1SRayagond Kokatanur 	u32 ptp_v2 = 0;
516891434b1SRayagond Kokatanur 	u32 tstamp_all = 0;
517891434b1SRayagond Kokatanur 	u32 ptp_over_ipv4_udp = 0;
518891434b1SRayagond Kokatanur 	u32 ptp_over_ipv6_udp = 0;
519891434b1SRayagond Kokatanur 	u32 ptp_over_ethernet = 0;
520891434b1SRayagond Kokatanur 	u32 snap_type_sel = 0;
521891434b1SRayagond Kokatanur 	u32 ts_master_en = 0;
522891434b1SRayagond Kokatanur 	u32 ts_event_en = 0;
523df103170SNathan Chancellor 	u32 sec_inc = 0;
524891434b1SRayagond Kokatanur 	u32 value = 0;
5257d9e6c5aSJose Abreu 	bool xmac;
5267d9e6c5aSJose Abreu 
5277d9e6c5aSJose Abreu 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
528891434b1SRayagond Kokatanur 
529891434b1SRayagond Kokatanur 	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
530891434b1SRayagond Kokatanur 		netdev_alert(priv->dev, "No support for HW time stamping\n");
531891434b1SRayagond Kokatanur 		priv->hwts_tx_en = 0;
532891434b1SRayagond Kokatanur 		priv->hwts_rx_en = 0;
533891434b1SRayagond Kokatanur 
534891434b1SRayagond Kokatanur 		return -EOPNOTSUPP;
535891434b1SRayagond Kokatanur 	}
536891434b1SRayagond Kokatanur 
537891434b1SRayagond Kokatanur 	if (copy_from_user(&config, ifr->ifr_data,
538d6228b7cSArtem Panfilov 			   sizeof(config)))
539891434b1SRayagond Kokatanur 		return -EFAULT;
540891434b1SRayagond Kokatanur 
54138ddc59dSLABBE Corentin 	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
542891434b1SRayagond Kokatanur 		   __func__, config.flags, config.tx_type, config.rx_filter);
543891434b1SRayagond Kokatanur 
544891434b1SRayagond Kokatanur 	/* reserved for future extensions */
545891434b1SRayagond Kokatanur 	if (config.flags)
546891434b1SRayagond Kokatanur 		return -EINVAL;
547891434b1SRayagond Kokatanur 
5485f3da328SBen Hutchings 	if (config.tx_type != HWTSTAMP_TX_OFF &&
5495f3da328SBen Hutchings 	    config.tx_type != HWTSTAMP_TX_ON)
550891434b1SRayagond Kokatanur 		return -ERANGE;
551891434b1SRayagond Kokatanur 
552891434b1SRayagond Kokatanur 	if (priv->adv_ts) {
553891434b1SRayagond Kokatanur 		switch (config.rx_filter) {
554891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_NONE:
555ceb69499SGiuseppe CAVALLARO 			/* time stamp no incoming packet at all */
556891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_NONE;
557891434b1SRayagond Kokatanur 			break;
558891434b1SRayagond Kokatanur 
559891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
560ceb69499SGiuseppe CAVALLARO 			/* PTP v1, UDP, any kind of event packet */
561891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
5627d8e249fSIlias Apalodimas 			/* 'xmac' hardware can support Sync, Pdelay_Req and
5637d8e249fSIlias Apalodimas 			 * Pdelay_resp by setting bit14 and bits17/16 to 01
5647d8e249fSIlias Apalodimas 			 * This leaves Delay_Req timestamps out.
5657d8e249fSIlias Apalodimas 			 * Enable all events *and* general purpose message
5667d8e249fSIlias Apalodimas 			 * timestamping
5677d8e249fSIlias Apalodimas 			 */
568891434b1SRayagond Kokatanur 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
569891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
570891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
571891434b1SRayagond Kokatanur 			break;
572891434b1SRayagond Kokatanur 
573891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
574ceb69499SGiuseppe CAVALLARO 			/* PTP v1, UDP, Sync packet */
575891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
576891434b1SRayagond Kokatanur 			/* take time stamp for SYNC messages only */
577891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
578891434b1SRayagond Kokatanur 
579891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
580891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
581891434b1SRayagond Kokatanur 			break;
582891434b1SRayagond Kokatanur 
583891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
584ceb69499SGiuseppe CAVALLARO 			/* PTP v1, UDP, Delay_req packet */
585891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
586891434b1SRayagond Kokatanur 			/* take time stamp for Delay_Req messages only */
587891434b1SRayagond Kokatanur 			ts_master_en = PTP_TCR_TSMSTRENA;
588891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
589891434b1SRayagond Kokatanur 
590891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
591891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
592891434b1SRayagond Kokatanur 			break;
593891434b1SRayagond Kokatanur 
594891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
595ceb69499SGiuseppe CAVALLARO 			/* PTP v2, UDP, any kind of event packet */
596891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
597891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
598891434b1SRayagond Kokatanur 			/* take time stamp for all event messages */
599891434b1SRayagond Kokatanur 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
600891434b1SRayagond Kokatanur 
601891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
602891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
603891434b1SRayagond Kokatanur 			break;
604891434b1SRayagond Kokatanur 
605891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
606ceb69499SGiuseppe CAVALLARO 			/* PTP v2, UDP, Sync packet */
607891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
608891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
609891434b1SRayagond Kokatanur 			/* take time stamp for SYNC messages only */
610891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
611891434b1SRayagond Kokatanur 
612891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
613891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
614891434b1SRayagond Kokatanur 			break;
615891434b1SRayagond Kokatanur 
616891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
617ceb69499SGiuseppe CAVALLARO 			/* PTP v2, UDP, Delay_req packet */
618891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
619891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
620891434b1SRayagond Kokatanur 			/* take time stamp for Delay_Req messages only */
621891434b1SRayagond Kokatanur 			ts_master_en = PTP_TCR_TSMSTRENA;
622891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
623891434b1SRayagond Kokatanur 
624891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
625891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
626891434b1SRayagond Kokatanur 			break;
627891434b1SRayagond Kokatanur 
628891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_EVENT:
629ceb69499SGiuseppe CAVALLARO 			/* PTP v2/802.AS1 any layer, any kind of event packet */
630891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
631891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
632891434b1SRayagond Kokatanur 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
63314f34733SJose Abreu 			ts_event_en = PTP_TCR_TSEVNTENA;
634891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
635891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
636891434b1SRayagond Kokatanur 			ptp_over_ethernet = PTP_TCR_TSIPENA;
637891434b1SRayagond Kokatanur 			break;
638891434b1SRayagond Kokatanur 
639891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_SYNC:
640ceb69499SGiuseppe CAVALLARO 			/* PTP v2/802.AS1, any layer, Sync packet */
641891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
642891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
643891434b1SRayagond Kokatanur 			/* take time stamp for SYNC messages only */
644891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
645891434b1SRayagond Kokatanur 
646891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
647891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
648891434b1SRayagond Kokatanur 			ptp_over_ethernet = PTP_TCR_TSIPENA;
649891434b1SRayagond Kokatanur 			break;
650891434b1SRayagond Kokatanur 
651891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
652ceb69499SGiuseppe CAVALLARO 			/* PTP v2/802.AS1, any layer, Delay_req packet */
653891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
654891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
655891434b1SRayagond Kokatanur 			/* take time stamp for Delay_Req messages only */
656891434b1SRayagond Kokatanur 			ts_master_en = PTP_TCR_TSMSTRENA;
657891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
658891434b1SRayagond Kokatanur 
659891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
660891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
661891434b1SRayagond Kokatanur 			ptp_over_ethernet = PTP_TCR_TSIPENA;
662891434b1SRayagond Kokatanur 			break;
663891434b1SRayagond Kokatanur 
664e3412575SMiroslav Lichvar 		case HWTSTAMP_FILTER_NTP_ALL:
665891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_ALL:
666ceb69499SGiuseppe CAVALLARO 			/* time stamp any incoming packet */
667891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_ALL;
668891434b1SRayagond Kokatanur 			tstamp_all = PTP_TCR_TSENALL;
669891434b1SRayagond Kokatanur 			break;
670891434b1SRayagond Kokatanur 
671891434b1SRayagond Kokatanur 		default:
672891434b1SRayagond Kokatanur 			return -ERANGE;
673891434b1SRayagond Kokatanur 		}
674891434b1SRayagond Kokatanur 	} else {
675891434b1SRayagond Kokatanur 		switch (config.rx_filter) {
676891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_NONE:
677891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_NONE;
678891434b1SRayagond Kokatanur 			break;
679891434b1SRayagond Kokatanur 		default:
680891434b1SRayagond Kokatanur 			/* PTP v1, UDP, any kind of event packet */
681891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
682891434b1SRayagond Kokatanur 			break;
683891434b1SRayagond Kokatanur 		}
684891434b1SRayagond Kokatanur 	}
685891434b1SRayagond Kokatanur 	priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
6865f3da328SBen Hutchings 	priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
687891434b1SRayagond Kokatanur 
688891434b1SRayagond Kokatanur 	if (!priv->hwts_tx_en && !priv->hwts_rx_en)
689cc4c9001SJose Abreu 		stmmac_config_hw_tstamping(priv, priv->ptpaddr, 0);
690891434b1SRayagond Kokatanur 	else {
691891434b1SRayagond Kokatanur 		value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
692891434b1SRayagond Kokatanur 			 tstamp_all | ptp_v2 | ptp_over_ethernet |
693891434b1SRayagond Kokatanur 			 ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
694891434b1SRayagond Kokatanur 			 ts_master_en | snap_type_sel);
695cc4c9001SJose Abreu 		stmmac_config_hw_tstamping(priv, priv->ptpaddr, value);
696891434b1SRayagond Kokatanur 
697891434b1SRayagond Kokatanur 		/* program Sub Second Increment reg */
698cc4c9001SJose Abreu 		stmmac_config_sub_second_increment(priv,
699f573c0b9Sjpinto 				priv->ptpaddr, priv->plat->clk_ptp_rate,
7007d9e6c5aSJose Abreu 				xmac, &sec_inc);
70119d857c9SPhil Reid 		temp = div_u64(1000000000ULL, sec_inc);
702891434b1SRayagond Kokatanur 
7039a8a02c9SJose Abreu 		/* Store sub second increment and flags for later use */
7049a8a02c9SJose Abreu 		priv->sub_second_inc = sec_inc;
7059a8a02c9SJose Abreu 		priv->systime_flags = value;
7069a8a02c9SJose Abreu 
707891434b1SRayagond Kokatanur 		/* calculate default added value:
708891434b1SRayagond Kokatanur 		 * formula is :
709891434b1SRayagond Kokatanur 		 * addend = (2^32)/freq_div_ratio;
71019d857c9SPhil Reid 		 * where, freq_div_ratio = 1e9ns/sec_inc
711891434b1SRayagond Kokatanur 		 */
71219d857c9SPhil Reid 		temp = (u64)(temp << 32);
713f573c0b9Sjpinto 		priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
714cc4c9001SJose Abreu 		stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
715891434b1SRayagond Kokatanur 
716891434b1SRayagond Kokatanur 		/* initialize system time */
7170a624155SArnd Bergmann 		ktime_get_real_ts64(&now);
7180a624155SArnd Bergmann 
7190a624155SArnd Bergmann 		/* lower 32 bits of tv_sec are safe until y2106 */
720cc4c9001SJose Abreu 		stmmac_init_systime(priv, priv->ptpaddr,
721cc4c9001SJose Abreu 				(u32)now.tv_sec, now.tv_nsec);
722891434b1SRayagond Kokatanur 	}
723891434b1SRayagond Kokatanur 
724d6228b7cSArtem Panfilov 	memcpy(&priv->tstamp_config, &config, sizeof(config));
725d6228b7cSArtem Panfilov 
726891434b1SRayagond Kokatanur 	return copy_to_user(ifr->ifr_data, &config,
727d6228b7cSArtem Panfilov 			    sizeof(config)) ? -EFAULT : 0;
728d6228b7cSArtem Panfilov }
729d6228b7cSArtem Panfilov 
730d6228b7cSArtem Panfilov /**
731d6228b7cSArtem Panfilov  *  stmmac_hwtstamp_get - read hardware timestamping.
732d6228b7cSArtem Panfilov  *  @dev: device pointer.
733d6228b7cSArtem Panfilov  *  @ifr: An IOCTL specific structure, that can contain a pointer to
734d6228b7cSArtem Panfilov  *  a proprietary structure used to pass information to the driver.
735d6228b7cSArtem Panfilov  *  Description:
736d6228b7cSArtem Panfilov  *  This function obtain the current hardware timestamping settings
737d6228b7cSArtem Panfilov     as requested.
738d6228b7cSArtem Panfilov  */
739d6228b7cSArtem Panfilov static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
740d6228b7cSArtem Panfilov {
741d6228b7cSArtem Panfilov 	struct stmmac_priv *priv = netdev_priv(dev);
742d6228b7cSArtem Panfilov 	struct hwtstamp_config *config = &priv->tstamp_config;
743d6228b7cSArtem Panfilov 
744d6228b7cSArtem Panfilov 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
745d6228b7cSArtem Panfilov 		return -EOPNOTSUPP;
746d6228b7cSArtem Panfilov 
747d6228b7cSArtem Panfilov 	return copy_to_user(ifr->ifr_data, config,
748d6228b7cSArtem Panfilov 			    sizeof(*config)) ? -EFAULT : 0;
749891434b1SRayagond Kokatanur }
750891434b1SRayagond Kokatanur 
75132ceabcaSGiuseppe CAVALLARO /**
752732fdf0eSGiuseppe CAVALLARO  * stmmac_init_ptp - init PTP
75332ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
754732fdf0eSGiuseppe CAVALLARO  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
75532ceabcaSGiuseppe CAVALLARO  * This is done by looking at the HW cap. register.
756732fdf0eSGiuseppe CAVALLARO  * This function also registers the ptp driver.
75732ceabcaSGiuseppe CAVALLARO  */
75892ba6888SRayagond Kokatanur static int stmmac_init_ptp(struct stmmac_priv *priv)
759891434b1SRayagond Kokatanur {
7607d9e6c5aSJose Abreu 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
7617d9e6c5aSJose Abreu 
76292ba6888SRayagond Kokatanur 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
76392ba6888SRayagond Kokatanur 		return -EOPNOTSUPP;
76492ba6888SRayagond Kokatanur 
765891434b1SRayagond Kokatanur 	priv->adv_ts = 0;
7667d9e6c5aSJose Abreu 	/* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
7677d9e6c5aSJose Abreu 	if (xmac && priv->dma_cap.atime_stamp)
768be9b3174SGiuseppe CAVALLARO 		priv->adv_ts = 1;
769be9b3174SGiuseppe CAVALLARO 	/* Dwmac 3.x core with extend_desc can support adv_ts */
770be9b3174SGiuseppe CAVALLARO 	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
771891434b1SRayagond Kokatanur 		priv->adv_ts = 1;
7727cd01399SVince Bridgers 
773be9b3174SGiuseppe CAVALLARO 	if (priv->dma_cap.time_stamp)
774be9b3174SGiuseppe CAVALLARO 		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
7757cd01399SVince Bridgers 
776be9b3174SGiuseppe CAVALLARO 	if (priv->adv_ts)
777be9b3174SGiuseppe CAVALLARO 		netdev_info(priv->dev,
778be9b3174SGiuseppe CAVALLARO 			    "IEEE 1588-2008 Advanced Timestamp supported\n");
779891434b1SRayagond Kokatanur 
780891434b1SRayagond Kokatanur 	priv->hwts_tx_en = 0;
781891434b1SRayagond Kokatanur 	priv->hwts_rx_en = 0;
78292ba6888SRayagond Kokatanur 
783c30a70d3SGiuseppe CAVALLARO 	stmmac_ptp_register(priv);
784c30a70d3SGiuseppe CAVALLARO 
785c30a70d3SGiuseppe CAVALLARO 	return 0;
78692ba6888SRayagond Kokatanur }
78792ba6888SRayagond Kokatanur 
78892ba6888SRayagond Kokatanur static void stmmac_release_ptp(struct stmmac_priv *priv)
78992ba6888SRayagond Kokatanur {
790f573c0b9Sjpinto 	if (priv->plat->clk_ptp_ref)
791f573c0b9Sjpinto 		clk_disable_unprepare(priv->plat->clk_ptp_ref);
79292ba6888SRayagond Kokatanur 	stmmac_ptp_unregister(priv);
793891434b1SRayagond Kokatanur }
794891434b1SRayagond Kokatanur 
7957ac6653aSJeff Kirsher /**
79629feff39SJoao Pinto  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
79729feff39SJoao Pinto  *  @priv: driver private structure
79829feff39SJoao Pinto  *  Description: It is used for configuring the flow control in all queues
79929feff39SJoao Pinto  */
80029feff39SJoao Pinto static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
80129feff39SJoao Pinto {
80229feff39SJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
80329feff39SJoao Pinto 
804c10d4c82SJose Abreu 	stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
80529feff39SJoao Pinto 			priv->pause, tx_cnt);
80629feff39SJoao Pinto }
80729feff39SJoao Pinto 
808eeef2f6bSJose Abreu static void stmmac_validate(struct phylink_config *config,
809eeef2f6bSJose Abreu 			    unsigned long *supported,
810eeef2f6bSJose Abreu 			    struct phylink_link_state *state)
811eeef2f6bSJose Abreu {
812eeef2f6bSJose Abreu 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
8135b0d7d7dSJose Abreu 	__ETHTOOL_DECLARE_LINK_MODE_MASK(mac_supported) = { 0, };
814eeef2f6bSJose Abreu 	__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
815eeef2f6bSJose Abreu 	int tx_cnt = priv->plat->tx_queues_to_use;
816eeef2f6bSJose Abreu 	int max_speed = priv->plat->max_speed;
817eeef2f6bSJose Abreu 
8185b0d7d7dSJose Abreu 	phylink_set(mac_supported, 10baseT_Half);
8195b0d7d7dSJose Abreu 	phylink_set(mac_supported, 10baseT_Full);
8205b0d7d7dSJose Abreu 	phylink_set(mac_supported, 100baseT_Half);
8215b0d7d7dSJose Abreu 	phylink_set(mac_supported, 100baseT_Full);
822df7699c7SJose Abreu 	phylink_set(mac_supported, 1000baseT_Half);
823df7699c7SJose Abreu 	phylink_set(mac_supported, 1000baseT_Full);
824df7699c7SJose Abreu 	phylink_set(mac_supported, 1000baseKX_Full);
8255b0d7d7dSJose Abreu 
8265b0d7d7dSJose Abreu 	phylink_set(mac_supported, Autoneg);
8275b0d7d7dSJose Abreu 	phylink_set(mac_supported, Pause);
8285b0d7d7dSJose Abreu 	phylink_set(mac_supported, Asym_Pause);
8295b0d7d7dSJose Abreu 	phylink_set_port_modes(mac_supported);
8305b0d7d7dSJose Abreu 
831eeef2f6bSJose Abreu 	/* Cut down 1G if asked to */
832eeef2f6bSJose Abreu 	if ((max_speed > 0) && (max_speed < 1000)) {
833eeef2f6bSJose Abreu 		phylink_set(mask, 1000baseT_Full);
834eeef2f6bSJose Abreu 		phylink_set(mask, 1000baseX_Full);
8355b0d7d7dSJose Abreu 	} else if (priv->plat->has_xgmac) {
836d9da2c87SJose Abreu 		if (!max_speed || (max_speed >= 2500)) {
8375b0d7d7dSJose Abreu 			phylink_set(mac_supported, 2500baseT_Full);
838d9da2c87SJose Abreu 			phylink_set(mac_supported, 2500baseX_Full);
839d9da2c87SJose Abreu 		}
840d9da2c87SJose Abreu 		if (!max_speed || (max_speed >= 5000)) {
8415b0d7d7dSJose Abreu 			phylink_set(mac_supported, 5000baseT_Full);
842d9da2c87SJose Abreu 		}
843d9da2c87SJose Abreu 		if (!max_speed || (max_speed >= 10000)) {
8445b0d7d7dSJose Abreu 			phylink_set(mac_supported, 10000baseSR_Full);
8455b0d7d7dSJose Abreu 			phylink_set(mac_supported, 10000baseLR_Full);
8465b0d7d7dSJose Abreu 			phylink_set(mac_supported, 10000baseER_Full);
8475b0d7d7dSJose Abreu 			phylink_set(mac_supported, 10000baseLRM_Full);
8485b0d7d7dSJose Abreu 			phylink_set(mac_supported, 10000baseT_Full);
8495b0d7d7dSJose Abreu 			phylink_set(mac_supported, 10000baseKX4_Full);
8505b0d7d7dSJose Abreu 			phylink_set(mac_supported, 10000baseKR_Full);
851eeef2f6bSJose Abreu 		}
852d9da2c87SJose Abreu 	}
853eeef2f6bSJose Abreu 
854eeef2f6bSJose Abreu 	/* Half-Duplex can only work with single queue */
855eeef2f6bSJose Abreu 	if (tx_cnt > 1) {
856eeef2f6bSJose Abreu 		phylink_set(mask, 10baseT_Half);
857eeef2f6bSJose Abreu 		phylink_set(mask, 100baseT_Half);
858eeef2f6bSJose Abreu 		phylink_set(mask, 1000baseT_Half);
859eeef2f6bSJose Abreu 	}
860eeef2f6bSJose Abreu 
861422829f9SJose Abreu 	linkmode_and(supported, supported, mac_supported);
862422829f9SJose Abreu 	linkmode_andnot(supported, supported, mask);
863422829f9SJose Abreu 
864422829f9SJose Abreu 	linkmode_and(state->advertising, state->advertising, mac_supported);
865422829f9SJose Abreu 	linkmode_andnot(state->advertising, state->advertising, mask);
866f213bbe8SJose Abreu 
867f213bbe8SJose Abreu 	/* If PCS is supported, check which modes it supports. */
868f213bbe8SJose Abreu 	stmmac_xpcs_validate(priv, &priv->hw->xpcs_args, supported, state);
869eeef2f6bSJose Abreu }
870eeef2f6bSJose Abreu 
871d46b7e4fSRussell King static void stmmac_mac_pcs_get_state(struct phylink_config *config,
872eeef2f6bSJose Abreu 				     struct phylink_link_state *state)
873eeef2f6bSJose Abreu {
874f213bbe8SJose Abreu 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
875f213bbe8SJose Abreu 
876d46b7e4fSRussell King 	state->link = 0;
877f213bbe8SJose Abreu 	stmmac_xpcs_get_state(priv, &priv->hw->xpcs_args, state);
878eeef2f6bSJose Abreu }
879eeef2f6bSJose Abreu 
88074371272SJose Abreu static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
88174371272SJose Abreu 			      const struct phylink_link_state *state)
8829ad372fcSJose Abreu {
883f213bbe8SJose Abreu 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
884f213bbe8SJose Abreu 
885f213bbe8SJose Abreu 	stmmac_xpcs_config(priv, &priv->hw->xpcs_args, state);
8869ad372fcSJose Abreu }
8879ad372fcSJose Abreu 
888eeef2f6bSJose Abreu static void stmmac_mac_an_restart(struct phylink_config *config)
889eeef2f6bSJose Abreu {
890eeef2f6bSJose Abreu 	/* Not Supported */
891eeef2f6bSJose Abreu }
892eeef2f6bSJose Abreu 
89374371272SJose Abreu static void stmmac_mac_link_down(struct phylink_config *config,
89474371272SJose Abreu 				 unsigned int mode, phy_interface_t interface)
8959ad372fcSJose Abreu {
89674371272SJose Abreu 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
8979ad372fcSJose Abreu 
8989ad372fcSJose Abreu 	stmmac_mac_set(priv, priv->ioaddr, false);
89974371272SJose Abreu 	priv->eee_active = false;
90074371272SJose Abreu 	stmmac_eee_init(priv);
90174371272SJose Abreu 	stmmac_set_eee_pls(priv, priv->hw, false);
9029ad372fcSJose Abreu }
9039ad372fcSJose Abreu 
90474371272SJose Abreu static void stmmac_mac_link_up(struct phylink_config *config,
90591a208f2SRussell King 			       struct phy_device *phy,
90674371272SJose Abreu 			       unsigned int mode, phy_interface_t interface,
90791a208f2SRussell King 			       int speed, int duplex,
90891a208f2SRussell King 			       bool tx_pause, bool rx_pause)
9099ad372fcSJose Abreu {
91074371272SJose Abreu 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
91146f69dedSJose Abreu 	u32 ctrl;
91246f69dedSJose Abreu 
913f213bbe8SJose Abreu 	stmmac_xpcs_link_up(priv, &priv->hw->xpcs_args, speed, interface);
914f213bbe8SJose Abreu 
91546f69dedSJose Abreu 	ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
91646f69dedSJose Abreu 	ctrl &= ~priv->hw->link.speed_mask;
91746f69dedSJose Abreu 
91846f69dedSJose Abreu 	if (interface == PHY_INTERFACE_MODE_USXGMII) {
91946f69dedSJose Abreu 		switch (speed) {
92046f69dedSJose Abreu 		case SPEED_10000:
92146f69dedSJose Abreu 			ctrl |= priv->hw->link.xgmii.speed10000;
92246f69dedSJose Abreu 			break;
92346f69dedSJose Abreu 		case SPEED_5000:
92446f69dedSJose Abreu 			ctrl |= priv->hw->link.xgmii.speed5000;
92546f69dedSJose Abreu 			break;
92646f69dedSJose Abreu 		case SPEED_2500:
92746f69dedSJose Abreu 			ctrl |= priv->hw->link.xgmii.speed2500;
92846f69dedSJose Abreu 			break;
92946f69dedSJose Abreu 		default:
93046f69dedSJose Abreu 			return;
93146f69dedSJose Abreu 		}
93246f69dedSJose Abreu 	} else {
93346f69dedSJose Abreu 		switch (speed) {
93446f69dedSJose Abreu 		case SPEED_2500:
93546f69dedSJose Abreu 			ctrl |= priv->hw->link.speed2500;
93646f69dedSJose Abreu 			break;
93746f69dedSJose Abreu 		case SPEED_1000:
93846f69dedSJose Abreu 			ctrl |= priv->hw->link.speed1000;
93946f69dedSJose Abreu 			break;
94046f69dedSJose Abreu 		case SPEED_100:
94146f69dedSJose Abreu 			ctrl |= priv->hw->link.speed100;
94246f69dedSJose Abreu 			break;
94346f69dedSJose Abreu 		case SPEED_10:
94446f69dedSJose Abreu 			ctrl |= priv->hw->link.speed10;
94546f69dedSJose Abreu 			break;
94646f69dedSJose Abreu 		default:
94746f69dedSJose Abreu 			return;
94846f69dedSJose Abreu 		}
94946f69dedSJose Abreu 	}
95046f69dedSJose Abreu 
95146f69dedSJose Abreu 	priv->speed = speed;
95246f69dedSJose Abreu 
95346f69dedSJose Abreu 	if (priv->plat->fix_mac_speed)
95446f69dedSJose Abreu 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed);
95546f69dedSJose Abreu 
95646f69dedSJose Abreu 	if (!duplex)
95746f69dedSJose Abreu 		ctrl &= ~priv->hw->link.duplex;
95846f69dedSJose Abreu 	else
95946f69dedSJose Abreu 		ctrl |= priv->hw->link.duplex;
96046f69dedSJose Abreu 
96146f69dedSJose Abreu 	/* Flow Control operation */
96246f69dedSJose Abreu 	if (tx_pause && rx_pause)
96346f69dedSJose Abreu 		stmmac_mac_flow_ctrl(priv, duplex);
96446f69dedSJose Abreu 
96546f69dedSJose Abreu 	writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
9669ad372fcSJose Abreu 
9679ad372fcSJose Abreu 	stmmac_mac_set(priv, priv->ioaddr, true);
9685b111770SJose Abreu 	if (phy && priv->dma_cap.eee) {
96974371272SJose Abreu 		priv->eee_active = phy_init_eee(phy, 1) >= 0;
97074371272SJose Abreu 		priv->eee_enabled = stmmac_eee_init(priv);
97174371272SJose Abreu 		stmmac_set_eee_pls(priv, priv->hw, true);
97274371272SJose Abreu 	}
9739ad372fcSJose Abreu }
9749ad372fcSJose Abreu 
97574371272SJose Abreu static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
976eeef2f6bSJose Abreu 	.validate = stmmac_validate,
977d46b7e4fSRussell King 	.mac_pcs_get_state = stmmac_mac_pcs_get_state,
97874371272SJose Abreu 	.mac_config = stmmac_mac_config,
979eeef2f6bSJose Abreu 	.mac_an_restart = stmmac_mac_an_restart,
98074371272SJose Abreu 	.mac_link_down = stmmac_mac_link_down,
98174371272SJose Abreu 	.mac_link_up = stmmac_mac_link_up,
982eeef2f6bSJose Abreu };
983eeef2f6bSJose Abreu 
98429feff39SJoao Pinto /**
985732fdf0eSGiuseppe CAVALLARO  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
98632ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
98732ceabcaSGiuseppe CAVALLARO  * Description: this is to verify if the HW supports the PCS.
98832ceabcaSGiuseppe CAVALLARO  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
98932ceabcaSGiuseppe CAVALLARO  * configured for the TBI, RTBI, or SGMII PHY interface.
99032ceabcaSGiuseppe CAVALLARO  */
991e58bb43fSGiuseppe CAVALLARO static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
992e58bb43fSGiuseppe CAVALLARO {
993e58bb43fSGiuseppe CAVALLARO 	int interface = priv->plat->interface;
994e58bb43fSGiuseppe CAVALLARO 
995e58bb43fSGiuseppe CAVALLARO 	if (priv->dma_cap.pcs) {
9960d909dcdSByungho An 		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
9970d909dcdSByungho An 		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
9980d909dcdSByungho An 		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
9990d909dcdSByungho An 		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
100038ddc59dSLABBE Corentin 			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
10013fe5cadbSGiuseppe CAVALLARO 			priv->hw->pcs = STMMAC_PCS_RGMII;
10020d909dcdSByungho An 		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
100338ddc59dSLABBE Corentin 			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
10043fe5cadbSGiuseppe CAVALLARO 			priv->hw->pcs = STMMAC_PCS_SGMII;
1005e58bb43fSGiuseppe CAVALLARO 		}
1006e58bb43fSGiuseppe CAVALLARO 	}
1007e58bb43fSGiuseppe CAVALLARO }
1008e58bb43fSGiuseppe CAVALLARO 
10097ac6653aSJeff Kirsher /**
10107ac6653aSJeff Kirsher  * stmmac_init_phy - PHY initialization
10117ac6653aSJeff Kirsher  * @dev: net device structure
10127ac6653aSJeff Kirsher  * Description: it initializes the driver's PHY state, and attaches the PHY
10137ac6653aSJeff Kirsher  * to the mac driver.
10147ac6653aSJeff Kirsher  *  Return value:
10157ac6653aSJeff Kirsher  *  0 on success
10167ac6653aSJeff Kirsher  */
10177ac6653aSJeff Kirsher static int stmmac_init_phy(struct net_device *dev)
10187ac6653aSJeff Kirsher {
10197ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
102074371272SJose Abreu 	struct device_node *node;
102174371272SJose Abreu 	int ret;
10227ac6653aSJeff Kirsher 
10234838a540SJose Abreu 	node = priv->plat->phylink_node;
102474371272SJose Abreu 
102542e87024SJose Abreu 	if (node)
102674371272SJose Abreu 		ret = phylink_of_phy_connect(priv->phylink, node, 0);
102742e87024SJose Abreu 
102842e87024SJose Abreu 	/* Some DT bindings do not set-up the PHY handle. Let's try to
102942e87024SJose Abreu 	 * manually parse it
103042e87024SJose Abreu 	 */
103142e87024SJose Abreu 	if (!node || ret) {
103274371272SJose Abreu 		int addr = priv->plat->phy_addr;
103374371272SJose Abreu 		struct phy_device *phydev;
1034f142af2eSSrinivas Kandagatla 
103574371272SJose Abreu 		phydev = mdiobus_get_phy(priv->mii, addr);
103674371272SJose Abreu 		if (!phydev) {
103774371272SJose Abreu 			netdev_err(priv->dev, "no phy at addr %d\n", addr);
10387ac6653aSJeff Kirsher 			return -ENODEV;
10397ac6653aSJeff Kirsher 		}
10408e99fc5fSGiuseppe Cavallaro 
104174371272SJose Abreu 		ret = phylink_connect_phy(priv->phylink, phydev);
104274371272SJose Abreu 	}
1043c51e424dSFlorian Fainelli 
104474371272SJose Abreu 	return ret;
104574371272SJose Abreu }
104674371272SJose Abreu 
104774371272SJose Abreu static int stmmac_phy_setup(struct stmmac_priv *priv)
104874371272SJose Abreu {
1049c63d1e5cSArnd Bergmann 	struct fwnode_handle *fwnode = of_fwnode_handle(priv->plat->phylink_node);
10500060c878SAlexandru Ardelean 	int mode = priv->plat->phy_interface;
105174371272SJose Abreu 	struct phylink *phylink;
105274371272SJose Abreu 
105374371272SJose Abreu 	priv->phylink_config.dev = &priv->dev->dev;
105474371272SJose Abreu 	priv->phylink_config.type = PHYLINK_NETDEV;
1055f213bbe8SJose Abreu 	priv->phylink_config.pcs_poll = true;
105674371272SJose Abreu 
10578dc6051cSJose Abreu 	if (!fwnode)
10588dc6051cSJose Abreu 		fwnode = dev_fwnode(priv->device);
10598dc6051cSJose Abreu 
1060c63d1e5cSArnd Bergmann 	phylink = phylink_create(&priv->phylink_config, fwnode,
106174371272SJose Abreu 				 mode, &stmmac_phylink_mac_ops);
106274371272SJose Abreu 	if (IS_ERR(phylink))
106374371272SJose Abreu 		return PTR_ERR(phylink);
106474371272SJose Abreu 
106574371272SJose Abreu 	priv->phylink = phylink;
10667ac6653aSJeff Kirsher 	return 0;
10677ac6653aSJeff Kirsher }
10687ac6653aSJeff Kirsher 
106971fedb01SJoao Pinto static void stmmac_display_rx_rings(struct stmmac_priv *priv)
1070c24602efSGiuseppe CAVALLARO {
107154139cf3SJoao Pinto 	u32 rx_cnt = priv->plat->rx_queues_to_use;
107271fedb01SJoao Pinto 	void *head_rx;
107354139cf3SJoao Pinto 	u32 queue;
107454139cf3SJoao Pinto 
107554139cf3SJoao Pinto 	/* Display RX rings */
107654139cf3SJoao Pinto 	for (queue = 0; queue < rx_cnt; queue++) {
107754139cf3SJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
107854139cf3SJoao Pinto 
107954139cf3SJoao Pinto 		pr_info("\tRX Queue %u rings\n", queue);
1080d0225e7dSAlexandre TORGUE 
108171fedb01SJoao Pinto 		if (priv->extend_desc)
108254139cf3SJoao Pinto 			head_rx = (void *)rx_q->dma_erx;
108371fedb01SJoao Pinto 		else
108454139cf3SJoao Pinto 			head_rx = (void *)rx_q->dma_rx;
108571fedb01SJoao Pinto 
108671fedb01SJoao Pinto 		/* Display RX ring */
108742de047dSJose Abreu 		stmmac_display_ring(priv, head_rx, DMA_RX_SIZE, true);
10885bacd778SLABBE Corentin 	}
108954139cf3SJoao Pinto }
1090d0225e7dSAlexandre TORGUE 
109171fedb01SJoao Pinto static void stmmac_display_tx_rings(struct stmmac_priv *priv)
109271fedb01SJoao Pinto {
1093ce736788SJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
109471fedb01SJoao Pinto 	void *head_tx;
1095ce736788SJoao Pinto 	u32 queue;
1096ce736788SJoao Pinto 
1097ce736788SJoao Pinto 	/* Display TX rings */
1098ce736788SJoao Pinto 	for (queue = 0; queue < tx_cnt; queue++) {
1099ce736788SJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1100ce736788SJoao Pinto 
1101ce736788SJoao Pinto 		pr_info("\tTX Queue %d rings\n", queue);
110271fedb01SJoao Pinto 
110371fedb01SJoao Pinto 		if (priv->extend_desc)
1104ce736788SJoao Pinto 			head_tx = (void *)tx_q->dma_etx;
1105579a25a8SJose Abreu 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1106579a25a8SJose Abreu 			head_tx = (void *)tx_q->dma_entx;
110771fedb01SJoao Pinto 		else
1108ce736788SJoao Pinto 			head_tx = (void *)tx_q->dma_tx;
110971fedb01SJoao Pinto 
111042de047dSJose Abreu 		stmmac_display_ring(priv, head_tx, DMA_TX_SIZE, false);
1111c24602efSGiuseppe CAVALLARO 	}
1112ce736788SJoao Pinto }
1113c24602efSGiuseppe CAVALLARO 
111471fedb01SJoao Pinto static void stmmac_display_rings(struct stmmac_priv *priv)
111571fedb01SJoao Pinto {
111671fedb01SJoao Pinto 	/* Display RX ring */
111771fedb01SJoao Pinto 	stmmac_display_rx_rings(priv);
111871fedb01SJoao Pinto 
111971fedb01SJoao Pinto 	/* Display TX ring */
112071fedb01SJoao Pinto 	stmmac_display_tx_rings(priv);
112171fedb01SJoao Pinto }
112271fedb01SJoao Pinto 
1123286a8372SGiuseppe CAVALLARO static int stmmac_set_bfsize(int mtu, int bufsize)
1124286a8372SGiuseppe CAVALLARO {
1125286a8372SGiuseppe CAVALLARO 	int ret = bufsize;
1126286a8372SGiuseppe CAVALLARO 
1127b2f3a481SJose Abreu 	if (mtu >= BUF_SIZE_8KiB)
1128b2f3a481SJose Abreu 		ret = BUF_SIZE_16KiB;
1129b2f3a481SJose Abreu 	else if (mtu >= BUF_SIZE_4KiB)
1130286a8372SGiuseppe CAVALLARO 		ret = BUF_SIZE_8KiB;
1131286a8372SGiuseppe CAVALLARO 	else if (mtu >= BUF_SIZE_2KiB)
1132286a8372SGiuseppe CAVALLARO 		ret = BUF_SIZE_4KiB;
1133d916701cSGiuseppe CAVALLARO 	else if (mtu > DEFAULT_BUFSIZE)
1134286a8372SGiuseppe CAVALLARO 		ret = BUF_SIZE_2KiB;
1135286a8372SGiuseppe CAVALLARO 	else
1136d916701cSGiuseppe CAVALLARO 		ret = DEFAULT_BUFSIZE;
1137286a8372SGiuseppe CAVALLARO 
1138286a8372SGiuseppe CAVALLARO 	return ret;
1139286a8372SGiuseppe CAVALLARO }
1140286a8372SGiuseppe CAVALLARO 
114132ceabcaSGiuseppe CAVALLARO /**
114271fedb01SJoao Pinto  * stmmac_clear_rx_descriptors - clear RX descriptors
114332ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
114454139cf3SJoao Pinto  * @queue: RX queue index
114571fedb01SJoao Pinto  * Description: this function is called to clear the RX descriptors
114632ceabcaSGiuseppe CAVALLARO  * in case of both basic and extended descriptors are used.
114732ceabcaSGiuseppe CAVALLARO  */
114854139cf3SJoao Pinto static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
1149c24602efSGiuseppe CAVALLARO {
115054139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
11515bacd778SLABBE Corentin 	int i;
1152c24602efSGiuseppe CAVALLARO 
115371fedb01SJoao Pinto 	/* Clear the RX descriptors */
11545bacd778SLABBE Corentin 	for (i = 0; i < DMA_RX_SIZE; i++)
11555bacd778SLABBE Corentin 		if (priv->extend_desc)
115642de047dSJose Abreu 			stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
11575bacd778SLABBE Corentin 					priv->use_riwt, priv->mode,
1158583e6361SAaro Koskinen 					(i == DMA_RX_SIZE - 1),
1159583e6361SAaro Koskinen 					priv->dma_buf_sz);
11605bacd778SLABBE Corentin 		else
116142de047dSJose Abreu 			stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
11625bacd778SLABBE Corentin 					priv->use_riwt, priv->mode,
1163583e6361SAaro Koskinen 					(i == DMA_RX_SIZE - 1),
1164583e6361SAaro Koskinen 					priv->dma_buf_sz);
116571fedb01SJoao Pinto }
116671fedb01SJoao Pinto 
116771fedb01SJoao Pinto /**
116871fedb01SJoao Pinto  * stmmac_clear_tx_descriptors - clear tx descriptors
116971fedb01SJoao Pinto  * @priv: driver private structure
1170ce736788SJoao Pinto  * @queue: TX queue index.
117171fedb01SJoao Pinto  * Description: this function is called to clear the TX descriptors
117271fedb01SJoao Pinto  * in case of both basic and extended descriptors are used.
117371fedb01SJoao Pinto  */
1174ce736788SJoao Pinto static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
117571fedb01SJoao Pinto {
1176ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
117771fedb01SJoao Pinto 	int i;
117871fedb01SJoao Pinto 
117971fedb01SJoao Pinto 	/* Clear the TX descriptors */
1180579a25a8SJose Abreu 	for (i = 0; i < DMA_TX_SIZE; i++) {
1181579a25a8SJose Abreu 		int last = (i == (DMA_TX_SIZE - 1));
1182579a25a8SJose Abreu 		struct dma_desc *p;
1183579a25a8SJose Abreu 
11845bacd778SLABBE Corentin 		if (priv->extend_desc)
1185579a25a8SJose Abreu 			p = &tx_q->dma_etx[i].basic;
1186579a25a8SJose Abreu 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1187579a25a8SJose Abreu 			p = &tx_q->dma_entx[i].basic;
11885bacd778SLABBE Corentin 		else
1189579a25a8SJose Abreu 			p = &tx_q->dma_tx[i];
1190579a25a8SJose Abreu 
1191579a25a8SJose Abreu 		stmmac_init_tx_desc(priv, p, priv->mode, last);
1192579a25a8SJose Abreu 	}
1193c24602efSGiuseppe CAVALLARO }
1194c24602efSGiuseppe CAVALLARO 
1195732fdf0eSGiuseppe CAVALLARO /**
119671fedb01SJoao Pinto  * stmmac_clear_descriptors - clear descriptors
119771fedb01SJoao Pinto  * @priv: driver private structure
119871fedb01SJoao Pinto  * Description: this function is called to clear the TX and RX descriptors
119971fedb01SJoao Pinto  * in case of both basic and extended descriptors are used.
120071fedb01SJoao Pinto  */
120171fedb01SJoao Pinto static void stmmac_clear_descriptors(struct stmmac_priv *priv)
120271fedb01SJoao Pinto {
120354139cf3SJoao Pinto 	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1204ce736788SJoao Pinto 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
120554139cf3SJoao Pinto 	u32 queue;
120654139cf3SJoao Pinto 
120771fedb01SJoao Pinto 	/* Clear the RX descriptors */
120854139cf3SJoao Pinto 	for (queue = 0; queue < rx_queue_cnt; queue++)
120954139cf3SJoao Pinto 		stmmac_clear_rx_descriptors(priv, queue);
121071fedb01SJoao Pinto 
121171fedb01SJoao Pinto 	/* Clear the TX descriptors */
1212ce736788SJoao Pinto 	for (queue = 0; queue < tx_queue_cnt; queue++)
1213ce736788SJoao Pinto 		stmmac_clear_tx_descriptors(priv, queue);
121471fedb01SJoao Pinto }
121571fedb01SJoao Pinto 
121671fedb01SJoao Pinto /**
1217732fdf0eSGiuseppe CAVALLARO  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1218732fdf0eSGiuseppe CAVALLARO  * @priv: driver private structure
1219732fdf0eSGiuseppe CAVALLARO  * @p: descriptor pointer
1220732fdf0eSGiuseppe CAVALLARO  * @i: descriptor index
122154139cf3SJoao Pinto  * @flags: gfp flag
122254139cf3SJoao Pinto  * @queue: RX queue index
1223732fdf0eSGiuseppe CAVALLARO  * Description: this function is called to allocate a receive buffer, perform
1224732fdf0eSGiuseppe CAVALLARO  * the DMA mapping and init the descriptor.
1225732fdf0eSGiuseppe CAVALLARO  */
1226c24602efSGiuseppe CAVALLARO static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
122754139cf3SJoao Pinto 				  int i, gfp_t flags, u32 queue)
1228c24602efSGiuseppe CAVALLARO {
122954139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
12302af6106aSJose Abreu 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1231c24602efSGiuseppe CAVALLARO 
12322af6106aSJose Abreu 	buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
12332af6106aSJose Abreu 	if (!buf->page)
123456329137SBartlomiej Zolnierkiewicz 		return -ENOMEM;
1235c24602efSGiuseppe CAVALLARO 
123667afd6d1SJose Abreu 	if (priv->sph) {
123767afd6d1SJose Abreu 		buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
123867afd6d1SJose Abreu 		if (!buf->sec_page)
123967afd6d1SJose Abreu 			return -ENOMEM;
124067afd6d1SJose Abreu 
124167afd6d1SJose Abreu 		buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
124267afd6d1SJose Abreu 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr);
124367afd6d1SJose Abreu 	} else {
124467afd6d1SJose Abreu 		buf->sec_page = NULL;
124567afd6d1SJose Abreu 	}
124667afd6d1SJose Abreu 
12472af6106aSJose Abreu 	buf->addr = page_pool_get_dma_addr(buf->page);
12482af6106aSJose Abreu 	stmmac_set_desc_addr(priv, p, buf->addr);
12492c520b1cSJose Abreu 	if (priv->dma_buf_sz == BUF_SIZE_16KiB)
12502c520b1cSJose Abreu 		stmmac_init_desc3(priv, p);
1251c24602efSGiuseppe CAVALLARO 
1252c24602efSGiuseppe CAVALLARO 	return 0;
1253c24602efSGiuseppe CAVALLARO }
1254c24602efSGiuseppe CAVALLARO 
125571fedb01SJoao Pinto /**
125671fedb01SJoao Pinto  * stmmac_free_rx_buffer - free RX dma buffers
125771fedb01SJoao Pinto  * @priv: private structure
125854139cf3SJoao Pinto  * @queue: RX queue index
125971fedb01SJoao Pinto  * @i: buffer index.
126071fedb01SJoao Pinto  */
126154139cf3SJoao Pinto static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
126256329137SBartlomiej Zolnierkiewicz {
126354139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
12642af6106aSJose Abreu 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
126554139cf3SJoao Pinto 
12662af6106aSJose Abreu 	if (buf->page)
1267458de8a9SIlias Apalodimas 		page_pool_put_full_page(rx_q->page_pool, buf->page, false);
12682af6106aSJose Abreu 	buf->page = NULL;
126967afd6d1SJose Abreu 
127067afd6d1SJose Abreu 	if (buf->sec_page)
1271458de8a9SIlias Apalodimas 		page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
127267afd6d1SJose Abreu 	buf->sec_page = NULL;
127356329137SBartlomiej Zolnierkiewicz }
127456329137SBartlomiej Zolnierkiewicz 
12757ac6653aSJeff Kirsher /**
127671fedb01SJoao Pinto  * stmmac_free_tx_buffer - free RX dma buffers
127771fedb01SJoao Pinto  * @priv: private structure
1278ce736788SJoao Pinto  * @queue: RX queue index
127971fedb01SJoao Pinto  * @i: buffer index.
128071fedb01SJoao Pinto  */
1281ce736788SJoao Pinto static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
128271fedb01SJoao Pinto {
1283ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1284ce736788SJoao Pinto 
1285ce736788SJoao Pinto 	if (tx_q->tx_skbuff_dma[i].buf) {
1286ce736788SJoao Pinto 		if (tx_q->tx_skbuff_dma[i].map_as_page)
128771fedb01SJoao Pinto 			dma_unmap_page(priv->device,
1288ce736788SJoao Pinto 				       tx_q->tx_skbuff_dma[i].buf,
1289ce736788SJoao Pinto 				       tx_q->tx_skbuff_dma[i].len,
129071fedb01SJoao Pinto 				       DMA_TO_DEVICE);
129171fedb01SJoao Pinto 		else
129271fedb01SJoao Pinto 			dma_unmap_single(priv->device,
1293ce736788SJoao Pinto 					 tx_q->tx_skbuff_dma[i].buf,
1294ce736788SJoao Pinto 					 tx_q->tx_skbuff_dma[i].len,
129571fedb01SJoao Pinto 					 DMA_TO_DEVICE);
129671fedb01SJoao Pinto 	}
129771fedb01SJoao Pinto 
1298ce736788SJoao Pinto 	if (tx_q->tx_skbuff[i]) {
1299ce736788SJoao Pinto 		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1300ce736788SJoao Pinto 		tx_q->tx_skbuff[i] = NULL;
1301ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[i].buf = 0;
1302ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[i].map_as_page = false;
130371fedb01SJoao Pinto 	}
130471fedb01SJoao Pinto }
130571fedb01SJoao Pinto 
130671fedb01SJoao Pinto /**
130771fedb01SJoao Pinto  * init_dma_rx_desc_rings - init the RX descriptor rings
13087ac6653aSJeff Kirsher  * @dev: net device structure
13095bacd778SLABBE Corentin  * @flags: gfp flag.
131071fedb01SJoao Pinto  * Description: this function initializes the DMA RX descriptors
13115bacd778SLABBE Corentin  * and allocates the socket buffers. It supports the chained and ring
1312286a8372SGiuseppe CAVALLARO  * modes.
13137ac6653aSJeff Kirsher  */
131471fedb01SJoao Pinto static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
13157ac6653aSJeff Kirsher {
13167ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
131754139cf3SJoao Pinto 	u32 rx_count = priv->plat->rx_queues_to_use;
13185bacd778SLABBE Corentin 	int ret = -ENOMEM;
13191d3028f4SColin Ian King 	int queue;
132054139cf3SJoao Pinto 	int i;
13217ac6653aSJeff Kirsher 
132254139cf3SJoao Pinto 	/* RX INITIALIZATION */
13235bacd778SLABBE Corentin 	netif_dbg(priv, probe, priv->dev,
13245bacd778SLABBE Corentin 		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
13255bacd778SLABBE Corentin 
132654139cf3SJoao Pinto 	for (queue = 0; queue < rx_count; queue++) {
132754139cf3SJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
132854139cf3SJoao Pinto 
132954139cf3SJoao Pinto 		netif_dbg(priv, probe, priv->dev,
133054139cf3SJoao Pinto 			  "(%s) dma_rx_phy=0x%08x\n", __func__,
133154139cf3SJoao Pinto 			  (u32)rx_q->dma_rx_phy);
133254139cf3SJoao Pinto 
1333cbcf0999SJose Abreu 		stmmac_clear_rx_descriptors(priv, queue);
1334cbcf0999SJose Abreu 
13355bacd778SLABBE Corentin 		for (i = 0; i < DMA_RX_SIZE; i++) {
13365bacd778SLABBE Corentin 			struct dma_desc *p;
13375bacd778SLABBE Corentin 
133854139cf3SJoao Pinto 			if (priv->extend_desc)
133954139cf3SJoao Pinto 				p = &((rx_q->dma_erx + i)->basic);
134054139cf3SJoao Pinto 			else
134154139cf3SJoao Pinto 				p = rx_q->dma_rx + i;
134254139cf3SJoao Pinto 
134354139cf3SJoao Pinto 			ret = stmmac_init_rx_buffers(priv, p, i, flags,
134454139cf3SJoao Pinto 						     queue);
13455bacd778SLABBE Corentin 			if (ret)
13465bacd778SLABBE Corentin 				goto err_init_rx_buffers;
13475bacd778SLABBE Corentin 		}
134854139cf3SJoao Pinto 
134954139cf3SJoao Pinto 		rx_q->cur_rx = 0;
135054139cf3SJoao Pinto 		rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
135154139cf3SJoao Pinto 
1352c24602efSGiuseppe CAVALLARO 		/* Setup the chained descriptor addresses */
1353c24602efSGiuseppe CAVALLARO 		if (priv->mode == STMMAC_CHAIN_MODE) {
135471fedb01SJoao Pinto 			if (priv->extend_desc)
13552c520b1cSJose Abreu 				stmmac_mode_init(priv, rx_q->dma_erx,
13562c520b1cSJose Abreu 						rx_q->dma_rx_phy, DMA_RX_SIZE, 1);
135771fedb01SJoao Pinto 			else
13582c520b1cSJose Abreu 				stmmac_mode_init(priv, rx_q->dma_rx,
13592c520b1cSJose Abreu 						rx_q->dma_rx_phy, DMA_RX_SIZE, 0);
136071fedb01SJoao Pinto 		}
136154139cf3SJoao Pinto 	}
136254139cf3SJoao Pinto 
136371fedb01SJoao Pinto 	return 0;
136454139cf3SJoao Pinto 
136571fedb01SJoao Pinto err_init_rx_buffers:
136654139cf3SJoao Pinto 	while (queue >= 0) {
136771fedb01SJoao Pinto 		while (--i >= 0)
136854139cf3SJoao Pinto 			stmmac_free_rx_buffer(priv, queue, i);
136954139cf3SJoao Pinto 
137054139cf3SJoao Pinto 		if (queue == 0)
137154139cf3SJoao Pinto 			break;
137254139cf3SJoao Pinto 
137354139cf3SJoao Pinto 		i = DMA_RX_SIZE;
137454139cf3SJoao Pinto 		queue--;
137554139cf3SJoao Pinto 	}
137654139cf3SJoao Pinto 
137771fedb01SJoao Pinto 	return ret;
137871fedb01SJoao Pinto }
137971fedb01SJoao Pinto 
138071fedb01SJoao Pinto /**
138171fedb01SJoao Pinto  * init_dma_tx_desc_rings - init the TX descriptor rings
138271fedb01SJoao Pinto  * @dev: net device structure.
138371fedb01SJoao Pinto  * Description: this function initializes the DMA TX descriptors
138471fedb01SJoao Pinto  * and allocates the socket buffers. It supports the chained and ring
138571fedb01SJoao Pinto  * modes.
138671fedb01SJoao Pinto  */
138771fedb01SJoao Pinto static int init_dma_tx_desc_rings(struct net_device *dev)
138871fedb01SJoao Pinto {
138971fedb01SJoao Pinto 	struct stmmac_priv *priv = netdev_priv(dev);
1390ce736788SJoao Pinto 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1391ce736788SJoao Pinto 	u32 queue;
139271fedb01SJoao Pinto 	int i;
139371fedb01SJoao Pinto 
1394ce736788SJoao Pinto 	for (queue = 0; queue < tx_queue_cnt; queue++) {
1395ce736788SJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1396ce736788SJoao Pinto 
139771fedb01SJoao Pinto 		netif_dbg(priv, probe, priv->dev,
1398ce736788SJoao Pinto 			  "(%s) dma_tx_phy=0x%08x\n", __func__,
1399ce736788SJoao Pinto 			 (u32)tx_q->dma_tx_phy);
140071fedb01SJoao Pinto 
140171fedb01SJoao Pinto 		/* Setup the chained descriptor addresses */
140271fedb01SJoao Pinto 		if (priv->mode == STMMAC_CHAIN_MODE) {
140371fedb01SJoao Pinto 			if (priv->extend_desc)
14042c520b1cSJose Abreu 				stmmac_mode_init(priv, tx_q->dma_etx,
14052c520b1cSJose Abreu 						tx_q->dma_tx_phy, DMA_TX_SIZE, 1);
1406579a25a8SJose Abreu 			else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
14072c520b1cSJose Abreu 				stmmac_mode_init(priv, tx_q->dma_tx,
14082c520b1cSJose Abreu 						tx_q->dma_tx_phy, DMA_TX_SIZE, 0);
1409c24602efSGiuseppe CAVALLARO 		}
1410286a8372SGiuseppe CAVALLARO 
1411e3ad57c9SGiuseppe Cavallaro 		for (i = 0; i < DMA_TX_SIZE; i++) {
1412c24602efSGiuseppe CAVALLARO 			struct dma_desc *p;
1413c24602efSGiuseppe CAVALLARO 			if (priv->extend_desc)
1414ce736788SJoao Pinto 				p = &((tx_q->dma_etx + i)->basic);
1415579a25a8SJose Abreu 			else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1416579a25a8SJose Abreu 				p = &((tx_q->dma_entx + i)->basic);
1417c24602efSGiuseppe CAVALLARO 			else
1418ce736788SJoao Pinto 				p = tx_q->dma_tx + i;
1419f748be53SAlexandre TORGUE 
142044c67f85SJose Abreu 			stmmac_clear_desc(priv, p);
1421f748be53SAlexandre TORGUE 
1422ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[i].buf = 0;
1423ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[i].map_as_page = false;
1424ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[i].len = 0;
1425ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[i].last_segment = false;
1426ce736788SJoao Pinto 			tx_q->tx_skbuff[i] = NULL;
14274a7d666aSGiuseppe CAVALLARO 		}
1428c24602efSGiuseppe CAVALLARO 
1429ce736788SJoao Pinto 		tx_q->dirty_tx = 0;
1430ce736788SJoao Pinto 		tx_q->cur_tx = 0;
14318d212a9eSNiklas Cassel 		tx_q->mss = 0;
1432ce736788SJoao Pinto 
1433c22a3f48SJoao Pinto 		netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
1434c22a3f48SJoao Pinto 	}
14357ac6653aSJeff Kirsher 
143671fedb01SJoao Pinto 	return 0;
143771fedb01SJoao Pinto }
143871fedb01SJoao Pinto 
143971fedb01SJoao Pinto /**
144071fedb01SJoao Pinto  * init_dma_desc_rings - init the RX/TX descriptor rings
144171fedb01SJoao Pinto  * @dev: net device structure
144271fedb01SJoao Pinto  * @flags: gfp flag.
144371fedb01SJoao Pinto  * Description: this function initializes the DMA RX/TX descriptors
144471fedb01SJoao Pinto  * and allocates the socket buffers. It supports the chained and ring
144571fedb01SJoao Pinto  * modes.
144671fedb01SJoao Pinto  */
144771fedb01SJoao Pinto static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
144871fedb01SJoao Pinto {
144971fedb01SJoao Pinto 	struct stmmac_priv *priv = netdev_priv(dev);
145071fedb01SJoao Pinto 	int ret;
145171fedb01SJoao Pinto 
145271fedb01SJoao Pinto 	ret = init_dma_rx_desc_rings(dev, flags);
145371fedb01SJoao Pinto 	if (ret)
145471fedb01SJoao Pinto 		return ret;
145571fedb01SJoao Pinto 
145671fedb01SJoao Pinto 	ret = init_dma_tx_desc_rings(dev);
145771fedb01SJoao Pinto 
14585bacd778SLABBE Corentin 	stmmac_clear_descriptors(priv);
14597ac6653aSJeff Kirsher 
1460c24602efSGiuseppe CAVALLARO 	if (netif_msg_hw(priv))
1461c24602efSGiuseppe CAVALLARO 		stmmac_display_rings(priv);
146256329137SBartlomiej Zolnierkiewicz 
146356329137SBartlomiej Zolnierkiewicz 	return ret;
14647ac6653aSJeff Kirsher }
14657ac6653aSJeff Kirsher 
146671fedb01SJoao Pinto /**
146771fedb01SJoao Pinto  * dma_free_rx_skbufs - free RX dma buffers
146871fedb01SJoao Pinto  * @priv: private structure
146954139cf3SJoao Pinto  * @queue: RX queue index
147071fedb01SJoao Pinto  */
147154139cf3SJoao Pinto static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
14727ac6653aSJeff Kirsher {
14737ac6653aSJeff Kirsher 	int i;
14747ac6653aSJeff Kirsher 
1475e3ad57c9SGiuseppe Cavallaro 	for (i = 0; i < DMA_RX_SIZE; i++)
147654139cf3SJoao Pinto 		stmmac_free_rx_buffer(priv, queue, i);
14777ac6653aSJeff Kirsher }
14787ac6653aSJeff Kirsher 
147971fedb01SJoao Pinto /**
148071fedb01SJoao Pinto  * dma_free_tx_skbufs - free TX dma buffers
148171fedb01SJoao Pinto  * @priv: private structure
1482ce736788SJoao Pinto  * @queue: TX queue index
148371fedb01SJoao Pinto  */
1484ce736788SJoao Pinto static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
14857ac6653aSJeff Kirsher {
14867ac6653aSJeff Kirsher 	int i;
14877ac6653aSJeff Kirsher 
148871fedb01SJoao Pinto 	for (i = 0; i < DMA_TX_SIZE; i++)
1489ce736788SJoao Pinto 		stmmac_free_tx_buffer(priv, queue, i);
14907ac6653aSJeff Kirsher }
14917ac6653aSJeff Kirsher 
1492732fdf0eSGiuseppe CAVALLARO /**
149354139cf3SJoao Pinto  * free_dma_rx_desc_resources - free RX dma desc resources
149454139cf3SJoao Pinto  * @priv: private structure
149554139cf3SJoao Pinto  */
149654139cf3SJoao Pinto static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
149754139cf3SJoao Pinto {
149854139cf3SJoao Pinto 	u32 rx_count = priv->plat->rx_queues_to_use;
149954139cf3SJoao Pinto 	u32 queue;
150054139cf3SJoao Pinto 
150154139cf3SJoao Pinto 	/* Free RX queue resources */
150254139cf3SJoao Pinto 	for (queue = 0; queue < rx_count; queue++) {
150354139cf3SJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
150454139cf3SJoao Pinto 
150554139cf3SJoao Pinto 		/* Release the DMA RX socket buffers */
150654139cf3SJoao Pinto 		dma_free_rx_skbufs(priv, queue);
150754139cf3SJoao Pinto 
150854139cf3SJoao Pinto 		/* Free DMA regions of consistent memory previously allocated */
150954139cf3SJoao Pinto 		if (!priv->extend_desc)
151054139cf3SJoao Pinto 			dma_free_coherent(priv->device,
151154139cf3SJoao Pinto 					  DMA_RX_SIZE * sizeof(struct dma_desc),
151254139cf3SJoao Pinto 					  rx_q->dma_rx, rx_q->dma_rx_phy);
151354139cf3SJoao Pinto 		else
151454139cf3SJoao Pinto 			dma_free_coherent(priv->device, DMA_RX_SIZE *
151554139cf3SJoao Pinto 					  sizeof(struct dma_extended_desc),
151654139cf3SJoao Pinto 					  rx_q->dma_erx, rx_q->dma_rx_phy);
151754139cf3SJoao Pinto 
15182af6106aSJose Abreu 		kfree(rx_q->buf_pool);
1519c3f812ceSJonathan Lemon 		if (rx_q->page_pool)
15202af6106aSJose Abreu 			page_pool_destroy(rx_q->page_pool);
15212af6106aSJose Abreu 	}
152254139cf3SJoao Pinto }
152354139cf3SJoao Pinto 
152454139cf3SJoao Pinto /**
1525ce736788SJoao Pinto  * free_dma_tx_desc_resources - free TX dma desc resources
1526ce736788SJoao Pinto  * @priv: private structure
1527ce736788SJoao Pinto  */
1528ce736788SJoao Pinto static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
1529ce736788SJoao Pinto {
1530ce736788SJoao Pinto 	u32 tx_count = priv->plat->tx_queues_to_use;
153162242260SChristophe Jaillet 	u32 queue;
1532ce736788SJoao Pinto 
1533ce736788SJoao Pinto 	/* Free TX queue resources */
1534ce736788SJoao Pinto 	for (queue = 0; queue < tx_count; queue++) {
1535ce736788SJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1536579a25a8SJose Abreu 		size_t size;
1537579a25a8SJose Abreu 		void *addr;
1538ce736788SJoao Pinto 
1539ce736788SJoao Pinto 		/* Release the DMA TX socket buffers */
1540ce736788SJoao Pinto 		dma_free_tx_skbufs(priv, queue);
1541ce736788SJoao Pinto 
1542579a25a8SJose Abreu 		if (priv->extend_desc) {
1543579a25a8SJose Abreu 			size = sizeof(struct dma_extended_desc);
1544579a25a8SJose Abreu 			addr = tx_q->dma_etx;
1545579a25a8SJose Abreu 		} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1546579a25a8SJose Abreu 			size = sizeof(struct dma_edesc);
1547579a25a8SJose Abreu 			addr = tx_q->dma_entx;
1548579a25a8SJose Abreu 		} else {
1549579a25a8SJose Abreu 			size = sizeof(struct dma_desc);
1550579a25a8SJose Abreu 			addr = tx_q->dma_tx;
1551579a25a8SJose Abreu 		}
1552579a25a8SJose Abreu 
1553579a25a8SJose Abreu 		size *= DMA_TX_SIZE;
1554579a25a8SJose Abreu 
1555579a25a8SJose Abreu 		dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
1556ce736788SJoao Pinto 
1557ce736788SJoao Pinto 		kfree(tx_q->tx_skbuff_dma);
1558ce736788SJoao Pinto 		kfree(tx_q->tx_skbuff);
1559ce736788SJoao Pinto 	}
1560ce736788SJoao Pinto }
1561ce736788SJoao Pinto 
1562ce736788SJoao Pinto /**
156371fedb01SJoao Pinto  * alloc_dma_rx_desc_resources - alloc RX resources.
1564732fdf0eSGiuseppe CAVALLARO  * @priv: private structure
1565732fdf0eSGiuseppe CAVALLARO  * Description: according to which descriptor can be used (extend or basic)
1566732fdf0eSGiuseppe CAVALLARO  * this function allocates the resources for TX and RX paths. In case of
1567732fdf0eSGiuseppe CAVALLARO  * reception, for example, it pre-allocated the RX socket buffer in order to
1568732fdf0eSGiuseppe CAVALLARO  * allow zero-copy mechanism.
1569732fdf0eSGiuseppe CAVALLARO  */
157071fedb01SJoao Pinto static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
157109f8d696SSrinivas Kandagatla {
157254139cf3SJoao Pinto 	u32 rx_count = priv->plat->rx_queues_to_use;
15735bacd778SLABBE Corentin 	int ret = -ENOMEM;
157454139cf3SJoao Pinto 	u32 queue;
157509f8d696SSrinivas Kandagatla 
157654139cf3SJoao Pinto 	/* RX queues buffers and DMA */
157754139cf3SJoao Pinto 	for (queue = 0; queue < rx_count; queue++) {
157854139cf3SJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
15792af6106aSJose Abreu 		struct page_pool_params pp_params = { 0 };
15804f28bd95SThierry Reding 		unsigned int num_pages;
158154139cf3SJoao Pinto 
158254139cf3SJoao Pinto 		rx_q->queue_index = queue;
158354139cf3SJoao Pinto 		rx_q->priv_data = priv;
158454139cf3SJoao Pinto 
15852af6106aSJose Abreu 		pp_params.flags = PP_FLAG_DMA_MAP;
15862af6106aSJose Abreu 		pp_params.pool_size = DMA_RX_SIZE;
15874f28bd95SThierry Reding 		num_pages = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE);
15884f28bd95SThierry Reding 		pp_params.order = ilog2(num_pages);
15892af6106aSJose Abreu 		pp_params.nid = dev_to_node(priv->device);
15902af6106aSJose Abreu 		pp_params.dev = priv->device;
15912af6106aSJose Abreu 		pp_params.dma_dir = DMA_FROM_DEVICE;
15925bacd778SLABBE Corentin 
15932af6106aSJose Abreu 		rx_q->page_pool = page_pool_create(&pp_params);
15942af6106aSJose Abreu 		if (IS_ERR(rx_q->page_pool)) {
15952af6106aSJose Abreu 			ret = PTR_ERR(rx_q->page_pool);
15962af6106aSJose Abreu 			rx_q->page_pool = NULL;
15972af6106aSJose Abreu 			goto err_dma;
15982af6106aSJose Abreu 		}
15992af6106aSJose Abreu 
1600ec5e5ce1SJose Abreu 		rx_q->buf_pool = kcalloc(DMA_RX_SIZE, sizeof(*rx_q->buf_pool),
16015bacd778SLABBE Corentin 					 GFP_KERNEL);
16022af6106aSJose Abreu 		if (!rx_q->buf_pool)
160354139cf3SJoao Pinto 			goto err_dma;
16045bacd778SLABBE Corentin 
16055bacd778SLABBE Corentin 		if (priv->extend_desc) {
1606750afb08SLuis Chamberlain 			rx_q->dma_erx = dma_alloc_coherent(priv->device,
1607750afb08SLuis Chamberlain 							   DMA_RX_SIZE * sizeof(struct dma_extended_desc),
160854139cf3SJoao Pinto 							   &rx_q->dma_rx_phy,
16095bacd778SLABBE Corentin 							   GFP_KERNEL);
161054139cf3SJoao Pinto 			if (!rx_q->dma_erx)
16115bacd778SLABBE Corentin 				goto err_dma;
16125bacd778SLABBE Corentin 
161371fedb01SJoao Pinto 		} else {
1614750afb08SLuis Chamberlain 			rx_q->dma_rx = dma_alloc_coherent(priv->device,
1615750afb08SLuis Chamberlain 							  DMA_RX_SIZE * sizeof(struct dma_desc),
161654139cf3SJoao Pinto 							  &rx_q->dma_rx_phy,
161771fedb01SJoao Pinto 							  GFP_KERNEL);
161854139cf3SJoao Pinto 			if (!rx_q->dma_rx)
161971fedb01SJoao Pinto 				goto err_dma;
162071fedb01SJoao Pinto 		}
162154139cf3SJoao Pinto 	}
162271fedb01SJoao Pinto 
162371fedb01SJoao Pinto 	return 0;
162471fedb01SJoao Pinto 
162571fedb01SJoao Pinto err_dma:
162654139cf3SJoao Pinto 	free_dma_rx_desc_resources(priv);
162754139cf3SJoao Pinto 
162871fedb01SJoao Pinto 	return ret;
162971fedb01SJoao Pinto }
163071fedb01SJoao Pinto 
163171fedb01SJoao Pinto /**
163271fedb01SJoao Pinto  * alloc_dma_tx_desc_resources - alloc TX resources.
163371fedb01SJoao Pinto  * @priv: private structure
163471fedb01SJoao Pinto  * Description: according to which descriptor can be used (extend or basic)
163571fedb01SJoao Pinto  * this function allocates the resources for TX and RX paths. In case of
163671fedb01SJoao Pinto  * reception, for example, it pre-allocated the RX socket buffer in order to
163771fedb01SJoao Pinto  * allow zero-copy mechanism.
163871fedb01SJoao Pinto  */
163971fedb01SJoao Pinto static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
164071fedb01SJoao Pinto {
1641ce736788SJoao Pinto 	u32 tx_count = priv->plat->tx_queues_to_use;
164271fedb01SJoao Pinto 	int ret = -ENOMEM;
1643ce736788SJoao Pinto 	u32 queue;
164471fedb01SJoao Pinto 
1645ce736788SJoao Pinto 	/* TX queues buffers and DMA */
1646ce736788SJoao Pinto 	for (queue = 0; queue < tx_count; queue++) {
1647ce736788SJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1648579a25a8SJose Abreu 		size_t size;
1649579a25a8SJose Abreu 		void *addr;
1650ce736788SJoao Pinto 
1651ce736788SJoao Pinto 		tx_q->queue_index = queue;
1652ce736788SJoao Pinto 		tx_q->priv_data = priv;
1653ce736788SJoao Pinto 
1654ec5e5ce1SJose Abreu 		tx_q->tx_skbuff_dma = kcalloc(DMA_TX_SIZE,
1655ce736788SJoao Pinto 					      sizeof(*tx_q->tx_skbuff_dma),
165671fedb01SJoao Pinto 					      GFP_KERNEL);
1657ce736788SJoao Pinto 		if (!tx_q->tx_skbuff_dma)
165862242260SChristophe Jaillet 			goto err_dma;
165971fedb01SJoao Pinto 
1660ec5e5ce1SJose Abreu 		tx_q->tx_skbuff = kcalloc(DMA_TX_SIZE,
1661ce736788SJoao Pinto 					  sizeof(struct sk_buff *),
166271fedb01SJoao Pinto 					  GFP_KERNEL);
1663ce736788SJoao Pinto 		if (!tx_q->tx_skbuff)
166462242260SChristophe Jaillet 			goto err_dma;
166571fedb01SJoao Pinto 
1666579a25a8SJose Abreu 		if (priv->extend_desc)
1667579a25a8SJose Abreu 			size = sizeof(struct dma_extended_desc);
1668579a25a8SJose Abreu 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1669579a25a8SJose Abreu 			size = sizeof(struct dma_edesc);
1670579a25a8SJose Abreu 		else
1671579a25a8SJose Abreu 			size = sizeof(struct dma_desc);
1672579a25a8SJose Abreu 
1673579a25a8SJose Abreu 		size *= DMA_TX_SIZE;
1674579a25a8SJose Abreu 
1675579a25a8SJose Abreu 		addr = dma_alloc_coherent(priv->device, size,
1676579a25a8SJose Abreu 					  &tx_q->dma_tx_phy, GFP_KERNEL);
1677579a25a8SJose Abreu 		if (!addr)
167862242260SChristophe Jaillet 			goto err_dma;
1679579a25a8SJose Abreu 
1680579a25a8SJose Abreu 		if (priv->extend_desc)
1681579a25a8SJose Abreu 			tx_q->dma_etx = addr;
1682579a25a8SJose Abreu 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1683579a25a8SJose Abreu 			tx_q->dma_entx = addr;
1684579a25a8SJose Abreu 		else
1685579a25a8SJose Abreu 			tx_q->dma_tx = addr;
16865bacd778SLABBE Corentin 	}
16875bacd778SLABBE Corentin 
16885bacd778SLABBE Corentin 	return 0;
16895bacd778SLABBE Corentin 
169062242260SChristophe Jaillet err_dma:
1691ce736788SJoao Pinto 	free_dma_tx_desc_resources(priv);
169209f8d696SSrinivas Kandagatla 	return ret;
16935bacd778SLABBE Corentin }
169409f8d696SSrinivas Kandagatla 
169571fedb01SJoao Pinto /**
169671fedb01SJoao Pinto  * alloc_dma_desc_resources - alloc TX/RX resources.
169771fedb01SJoao Pinto  * @priv: private structure
169871fedb01SJoao Pinto  * Description: according to which descriptor can be used (extend or basic)
169971fedb01SJoao Pinto  * this function allocates the resources for TX and RX paths. In case of
170071fedb01SJoao Pinto  * reception, for example, it pre-allocated the RX socket buffer in order to
170171fedb01SJoao Pinto  * allow zero-copy mechanism.
170271fedb01SJoao Pinto  */
170371fedb01SJoao Pinto static int alloc_dma_desc_resources(struct stmmac_priv *priv)
17045bacd778SLABBE Corentin {
170554139cf3SJoao Pinto 	/* RX Allocation */
170671fedb01SJoao Pinto 	int ret = alloc_dma_rx_desc_resources(priv);
170771fedb01SJoao Pinto 
170871fedb01SJoao Pinto 	if (ret)
170971fedb01SJoao Pinto 		return ret;
171071fedb01SJoao Pinto 
171171fedb01SJoao Pinto 	ret = alloc_dma_tx_desc_resources(priv);
171271fedb01SJoao Pinto 
171371fedb01SJoao Pinto 	return ret;
171471fedb01SJoao Pinto }
171571fedb01SJoao Pinto 
171671fedb01SJoao Pinto /**
171771fedb01SJoao Pinto  * free_dma_desc_resources - free dma desc resources
171871fedb01SJoao Pinto  * @priv: private structure
171971fedb01SJoao Pinto  */
172071fedb01SJoao Pinto static void free_dma_desc_resources(struct stmmac_priv *priv)
172171fedb01SJoao Pinto {
172271fedb01SJoao Pinto 	/* Release the DMA RX socket buffers */
172371fedb01SJoao Pinto 	free_dma_rx_desc_resources(priv);
172471fedb01SJoao Pinto 
172571fedb01SJoao Pinto 	/* Release the DMA TX socket buffers */
172671fedb01SJoao Pinto 	free_dma_tx_desc_resources(priv);
172771fedb01SJoao Pinto }
172871fedb01SJoao Pinto 
172971fedb01SJoao Pinto /**
17309eb12474Sjpinto  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
17319eb12474Sjpinto  *  @priv: driver private structure
17329eb12474Sjpinto  *  Description: It is used for enabling the rx queues in the MAC
17339eb12474Sjpinto  */
17349eb12474Sjpinto static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
17359eb12474Sjpinto {
17364f6046f5SJoao Pinto 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
17374f6046f5SJoao Pinto 	int queue;
17384f6046f5SJoao Pinto 	u8 mode;
17399eb12474Sjpinto 
17404f6046f5SJoao Pinto 	for (queue = 0; queue < rx_queues_count; queue++) {
17414f6046f5SJoao Pinto 		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
1742c10d4c82SJose Abreu 		stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
17434f6046f5SJoao Pinto 	}
17449eb12474Sjpinto }
17459eb12474Sjpinto 
17469eb12474Sjpinto /**
1747ae4f0d46SJoao Pinto  * stmmac_start_rx_dma - start RX DMA channel
1748ae4f0d46SJoao Pinto  * @priv: driver private structure
1749ae4f0d46SJoao Pinto  * @chan: RX channel index
1750ae4f0d46SJoao Pinto  * Description:
1751ae4f0d46SJoao Pinto  * This starts a RX DMA channel
1752ae4f0d46SJoao Pinto  */
1753ae4f0d46SJoao Pinto static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
1754ae4f0d46SJoao Pinto {
1755ae4f0d46SJoao Pinto 	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
1756a4e887faSJose Abreu 	stmmac_start_rx(priv, priv->ioaddr, chan);
1757ae4f0d46SJoao Pinto }
1758ae4f0d46SJoao Pinto 
1759ae4f0d46SJoao Pinto /**
1760ae4f0d46SJoao Pinto  * stmmac_start_tx_dma - start TX DMA channel
1761ae4f0d46SJoao Pinto  * @priv: driver private structure
1762ae4f0d46SJoao Pinto  * @chan: TX channel index
1763ae4f0d46SJoao Pinto  * Description:
1764ae4f0d46SJoao Pinto  * This starts a TX DMA channel
1765ae4f0d46SJoao Pinto  */
1766ae4f0d46SJoao Pinto static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
1767ae4f0d46SJoao Pinto {
1768ae4f0d46SJoao Pinto 	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
1769a4e887faSJose Abreu 	stmmac_start_tx(priv, priv->ioaddr, chan);
1770ae4f0d46SJoao Pinto }
1771ae4f0d46SJoao Pinto 
1772ae4f0d46SJoao Pinto /**
1773ae4f0d46SJoao Pinto  * stmmac_stop_rx_dma - stop RX DMA channel
1774ae4f0d46SJoao Pinto  * @priv: driver private structure
1775ae4f0d46SJoao Pinto  * @chan: RX channel index
1776ae4f0d46SJoao Pinto  * Description:
1777ae4f0d46SJoao Pinto  * This stops a RX DMA channel
1778ae4f0d46SJoao Pinto  */
1779ae4f0d46SJoao Pinto static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
1780ae4f0d46SJoao Pinto {
1781ae4f0d46SJoao Pinto 	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
1782a4e887faSJose Abreu 	stmmac_stop_rx(priv, priv->ioaddr, chan);
1783ae4f0d46SJoao Pinto }
1784ae4f0d46SJoao Pinto 
1785ae4f0d46SJoao Pinto /**
1786ae4f0d46SJoao Pinto  * stmmac_stop_tx_dma - stop TX DMA channel
1787ae4f0d46SJoao Pinto  * @priv: driver private structure
1788ae4f0d46SJoao Pinto  * @chan: TX channel index
1789ae4f0d46SJoao Pinto  * Description:
1790ae4f0d46SJoao Pinto  * This stops a TX DMA channel
1791ae4f0d46SJoao Pinto  */
1792ae4f0d46SJoao Pinto static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
1793ae4f0d46SJoao Pinto {
1794ae4f0d46SJoao Pinto 	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
1795a4e887faSJose Abreu 	stmmac_stop_tx(priv, priv->ioaddr, chan);
1796ae4f0d46SJoao Pinto }
1797ae4f0d46SJoao Pinto 
1798ae4f0d46SJoao Pinto /**
1799ae4f0d46SJoao Pinto  * stmmac_start_all_dma - start all RX and TX DMA channels
1800ae4f0d46SJoao Pinto  * @priv: driver private structure
1801ae4f0d46SJoao Pinto  * Description:
1802ae4f0d46SJoao Pinto  * This starts all the RX and TX DMA channels
1803ae4f0d46SJoao Pinto  */
1804ae4f0d46SJoao Pinto static void stmmac_start_all_dma(struct stmmac_priv *priv)
1805ae4f0d46SJoao Pinto {
1806ae4f0d46SJoao Pinto 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1807ae4f0d46SJoao Pinto 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1808ae4f0d46SJoao Pinto 	u32 chan = 0;
1809ae4f0d46SJoao Pinto 
1810ae4f0d46SJoao Pinto 	for (chan = 0; chan < rx_channels_count; chan++)
1811ae4f0d46SJoao Pinto 		stmmac_start_rx_dma(priv, chan);
1812ae4f0d46SJoao Pinto 
1813ae4f0d46SJoao Pinto 	for (chan = 0; chan < tx_channels_count; chan++)
1814ae4f0d46SJoao Pinto 		stmmac_start_tx_dma(priv, chan);
1815ae4f0d46SJoao Pinto }
1816ae4f0d46SJoao Pinto 
1817ae4f0d46SJoao Pinto /**
1818ae4f0d46SJoao Pinto  * stmmac_stop_all_dma - stop all RX and TX DMA channels
1819ae4f0d46SJoao Pinto  * @priv: driver private structure
1820ae4f0d46SJoao Pinto  * Description:
1821ae4f0d46SJoao Pinto  * This stops the RX and TX DMA channels
1822ae4f0d46SJoao Pinto  */
1823ae4f0d46SJoao Pinto static void stmmac_stop_all_dma(struct stmmac_priv *priv)
1824ae4f0d46SJoao Pinto {
1825ae4f0d46SJoao Pinto 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1826ae4f0d46SJoao Pinto 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1827ae4f0d46SJoao Pinto 	u32 chan = 0;
1828ae4f0d46SJoao Pinto 
1829ae4f0d46SJoao Pinto 	for (chan = 0; chan < rx_channels_count; chan++)
1830ae4f0d46SJoao Pinto 		stmmac_stop_rx_dma(priv, chan);
1831ae4f0d46SJoao Pinto 
1832ae4f0d46SJoao Pinto 	for (chan = 0; chan < tx_channels_count; chan++)
1833ae4f0d46SJoao Pinto 		stmmac_stop_tx_dma(priv, chan);
1834ae4f0d46SJoao Pinto }
1835ae4f0d46SJoao Pinto 
1836ae4f0d46SJoao Pinto /**
18377ac6653aSJeff Kirsher  *  stmmac_dma_operation_mode - HW DMA operation mode
183832ceabcaSGiuseppe CAVALLARO  *  @priv: driver private structure
1839732fdf0eSGiuseppe CAVALLARO  *  Description: it is used for configuring the DMA operation mode register in
1840732fdf0eSGiuseppe CAVALLARO  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
18417ac6653aSJeff Kirsher  */
18427ac6653aSJeff Kirsher static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
18437ac6653aSJeff Kirsher {
18446deee222SJoao Pinto 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
18456deee222SJoao Pinto 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1846f88203a2SVince Bridgers 	int rxfifosz = priv->plat->rx_fifo_size;
184752a76235SJose Abreu 	int txfifosz = priv->plat->tx_fifo_size;
18486deee222SJoao Pinto 	u32 txmode = 0;
18496deee222SJoao Pinto 	u32 rxmode = 0;
18506deee222SJoao Pinto 	u32 chan = 0;
1851a0daae13SJose Abreu 	u8 qmode = 0;
1852f88203a2SVince Bridgers 
185311fbf811SThierry Reding 	if (rxfifosz == 0)
185411fbf811SThierry Reding 		rxfifosz = priv->dma_cap.rx_fifo_size;
185552a76235SJose Abreu 	if (txfifosz == 0)
185652a76235SJose Abreu 		txfifosz = priv->dma_cap.tx_fifo_size;
185752a76235SJose Abreu 
185852a76235SJose Abreu 	/* Adjust for real per queue fifo size */
185952a76235SJose Abreu 	rxfifosz /= rx_channels_count;
186052a76235SJose Abreu 	txfifosz /= tx_channels_count;
186111fbf811SThierry Reding 
18626deee222SJoao Pinto 	if (priv->plat->force_thresh_dma_mode) {
18636deee222SJoao Pinto 		txmode = tc;
18646deee222SJoao Pinto 		rxmode = tc;
18656deee222SJoao Pinto 	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
18667ac6653aSJeff Kirsher 		/*
18677ac6653aSJeff Kirsher 		 * In case of GMAC, SF mode can be enabled
18687ac6653aSJeff Kirsher 		 * to perform the TX COE in HW. This depends on:
18697ac6653aSJeff Kirsher 		 * 1) TX COE if actually supported
18707ac6653aSJeff Kirsher 		 * 2) There is no bugged Jumbo frame support
18717ac6653aSJeff Kirsher 		 *    that needs to not insert csum in the TDES.
18727ac6653aSJeff Kirsher 		 */
18736deee222SJoao Pinto 		txmode = SF_DMA_MODE;
18746deee222SJoao Pinto 		rxmode = SF_DMA_MODE;
1875b2dec116SSonic Zhang 		priv->xstats.threshold = SF_DMA_MODE;
18766deee222SJoao Pinto 	} else {
18776deee222SJoao Pinto 		txmode = tc;
18786deee222SJoao Pinto 		rxmode = SF_DMA_MODE;
18796deee222SJoao Pinto 	}
18806deee222SJoao Pinto 
18816deee222SJoao Pinto 	/* configure all channels */
1882a0daae13SJose Abreu 	for (chan = 0; chan < rx_channels_count; chan++) {
1883a0daae13SJose Abreu 		qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
18846deee222SJoao Pinto 
1885a4e887faSJose Abreu 		stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
1886a0daae13SJose Abreu 				rxfifosz, qmode);
18874205c88eSJose Abreu 		stmmac_set_dma_bfsize(priv, priv->ioaddr, priv->dma_buf_sz,
18884205c88eSJose Abreu 				chan);
1889a0daae13SJose Abreu 	}
1890a0daae13SJose Abreu 
1891a0daae13SJose Abreu 	for (chan = 0; chan < tx_channels_count; chan++) {
1892a0daae13SJose Abreu 		qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
1893a0daae13SJose Abreu 
1894a4e887faSJose Abreu 		stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
1895a0daae13SJose Abreu 				txfifosz, qmode);
1896a0daae13SJose Abreu 	}
18977ac6653aSJeff Kirsher }
18987ac6653aSJeff Kirsher 
18997ac6653aSJeff Kirsher /**
1900732fdf0eSGiuseppe CAVALLARO  * stmmac_tx_clean - to manage the transmission completion
190132ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
1902ce736788SJoao Pinto  * @queue: TX queue index
1903732fdf0eSGiuseppe CAVALLARO  * Description: it reclaims the transmit resources after transmission completes.
19047ac6653aSJeff Kirsher  */
19058fce3331SJose Abreu static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
19067ac6653aSJeff Kirsher {
1907ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
190838979574SBeniamino Galvani 	unsigned int bytes_compl = 0, pkts_compl = 0;
19098fce3331SJose Abreu 	unsigned int entry, count = 0;
19107ac6653aSJeff Kirsher 
19118fce3331SJose Abreu 	__netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
1912a9097a96SGiuseppe CAVALLARO 
19139125cdd1SGiuseppe CAVALLARO 	priv->xstats.tx_clean++;
19149125cdd1SGiuseppe CAVALLARO 
19158d5f4b07SBernd Edlinger 	entry = tx_q->dirty_tx;
19168fce3331SJose Abreu 	while ((entry != tx_q->cur_tx) && (count < budget)) {
1917ce736788SJoao Pinto 		struct sk_buff *skb = tx_q->tx_skbuff[entry];
1918c24602efSGiuseppe CAVALLARO 		struct dma_desc *p;
1919c363b658SFabrice Gasnier 		int status;
1920c24602efSGiuseppe CAVALLARO 
1921c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
1922ce736788SJoao Pinto 			p = (struct dma_desc *)(tx_q->dma_etx + entry);
1923579a25a8SJose Abreu 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1924579a25a8SJose Abreu 			p = &tx_q->dma_entx[entry].basic;
1925c24602efSGiuseppe CAVALLARO 		else
1926ce736788SJoao Pinto 			p = tx_q->dma_tx + entry;
19277ac6653aSJeff Kirsher 
192842de047dSJose Abreu 		status = stmmac_tx_status(priv, &priv->dev->stats,
192942de047dSJose Abreu 				&priv->xstats, p, priv->ioaddr);
1930c363b658SFabrice Gasnier 		/* Check if the descriptor is owned by the DMA */
1931c363b658SFabrice Gasnier 		if (unlikely(status & tx_dma_own))
1932c363b658SFabrice Gasnier 			break;
1933c363b658SFabrice Gasnier 
19348fce3331SJose Abreu 		count++;
19358fce3331SJose Abreu 
1936a6b25da5SNiklas Cassel 		/* Make sure descriptor fields are read after reading
1937a6b25da5SNiklas Cassel 		 * the own bit.
1938a6b25da5SNiklas Cassel 		 */
1939a6b25da5SNiklas Cassel 		dma_rmb();
1940a6b25da5SNiklas Cassel 
1941c363b658SFabrice Gasnier 		/* Just consider the last segment and ...*/
1942c363b658SFabrice Gasnier 		if (likely(!(status & tx_not_ls))) {
1943c363b658SFabrice Gasnier 			/* ... verify the status error condition */
1944c363b658SFabrice Gasnier 			if (unlikely(status & tx_err)) {
1945c363b658SFabrice Gasnier 				priv->dev->stats.tx_errors++;
1946c363b658SFabrice Gasnier 			} else {
19477ac6653aSJeff Kirsher 				priv->dev->stats.tx_packets++;
19487ac6653aSJeff Kirsher 				priv->xstats.tx_pkt_n++;
1949c363b658SFabrice Gasnier 			}
1950ba1ffd74SGiuseppe CAVALLARO 			stmmac_get_tx_hwtstamp(priv, p, skb);
19517ac6653aSJeff Kirsher 		}
19527ac6653aSJeff Kirsher 
1953ce736788SJoao Pinto 		if (likely(tx_q->tx_skbuff_dma[entry].buf)) {
1954ce736788SJoao Pinto 			if (tx_q->tx_skbuff_dma[entry].map_as_page)
1955362b37beSGiuseppe CAVALLARO 				dma_unmap_page(priv->device,
1956ce736788SJoao Pinto 					       tx_q->tx_skbuff_dma[entry].buf,
1957ce736788SJoao Pinto 					       tx_q->tx_skbuff_dma[entry].len,
19587ac6653aSJeff Kirsher 					       DMA_TO_DEVICE);
1959362b37beSGiuseppe CAVALLARO 			else
1960362b37beSGiuseppe CAVALLARO 				dma_unmap_single(priv->device,
1961ce736788SJoao Pinto 						 tx_q->tx_skbuff_dma[entry].buf,
1962ce736788SJoao Pinto 						 tx_q->tx_skbuff_dma[entry].len,
1963362b37beSGiuseppe CAVALLARO 						 DMA_TO_DEVICE);
1964ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[entry].buf = 0;
1965ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[entry].len = 0;
1966ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[entry].map_as_page = false;
1967cf32deecSRayagond Kokatanur 		}
1968f748be53SAlexandre TORGUE 
19692c520b1cSJose Abreu 		stmmac_clean_desc3(priv, tx_q, p);
1970f748be53SAlexandre TORGUE 
1971ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].last_segment = false;
1972ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
19737ac6653aSJeff Kirsher 
19747ac6653aSJeff Kirsher 		if (likely(skb != NULL)) {
197538979574SBeniamino Galvani 			pkts_compl++;
197638979574SBeniamino Galvani 			bytes_compl += skb->len;
19777c565c33SEric W. Biederman 			dev_consume_skb_any(skb);
1978ce736788SJoao Pinto 			tx_q->tx_skbuff[entry] = NULL;
19797ac6653aSJeff Kirsher 		}
19807ac6653aSJeff Kirsher 
198142de047dSJose Abreu 		stmmac_release_tx_desc(priv, p, priv->mode);
19827ac6653aSJeff Kirsher 
1983e3ad57c9SGiuseppe Cavallaro 		entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
19847ac6653aSJeff Kirsher 	}
1985ce736788SJoao Pinto 	tx_q->dirty_tx = entry;
198638979574SBeniamino Galvani 
1987c22a3f48SJoao Pinto 	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
1988c22a3f48SJoao Pinto 				  pkts_compl, bytes_compl);
198938979574SBeniamino Galvani 
1990c22a3f48SJoao Pinto 	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
1991c22a3f48SJoao Pinto 								queue))) &&
1992c22a3f48SJoao Pinto 	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) {
1993c22a3f48SJoao Pinto 
1994b3e51069SLABBE Corentin 		netif_dbg(priv, tx_done, priv->dev,
1995b3e51069SLABBE Corentin 			  "%s: restart transmit\n", __func__);
1996c22a3f48SJoao Pinto 		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
19977ac6653aSJeff Kirsher 	}
1998d765955dSGiuseppe CAVALLARO 
1999d765955dSGiuseppe CAVALLARO 	if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
2000d765955dSGiuseppe CAVALLARO 		stmmac_enable_eee_mode(priv);
2001f5351ef7SGiuseppe CAVALLARO 		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
2002d765955dSGiuseppe CAVALLARO 	}
20038fce3331SJose Abreu 
20044ccb4585SJose Abreu 	/* We still have pending packets, let's call for a new scheduling */
20054ccb4585SJose Abreu 	if (tx_q->dirty_tx != tx_q->cur_tx)
20063755b21bSJose Abreu 		mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer));
20074ccb4585SJose Abreu 
20088fce3331SJose Abreu 	__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
20098fce3331SJose Abreu 
20108fce3331SJose Abreu 	return count;
20117ac6653aSJeff Kirsher }
20127ac6653aSJeff Kirsher 
20137ac6653aSJeff Kirsher /**
2014732fdf0eSGiuseppe CAVALLARO  * stmmac_tx_err - to manage the tx error
201532ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
20165bacd778SLABBE Corentin  * @chan: channel index
20177ac6653aSJeff Kirsher  * Description: it cleans the descriptors and restarts the transmission
2018732fdf0eSGiuseppe CAVALLARO  * in case of transmission errors.
20197ac6653aSJeff Kirsher  */
20205bacd778SLABBE Corentin static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
20217ac6653aSJeff Kirsher {
2022ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2023ce736788SJoao Pinto 
2024c22a3f48SJoao Pinto 	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
20257ac6653aSJeff Kirsher 
2026ae4f0d46SJoao Pinto 	stmmac_stop_tx_dma(priv, chan);
2027ce736788SJoao Pinto 	dma_free_tx_skbufs(priv, chan);
2028579a25a8SJose Abreu 	stmmac_clear_tx_descriptors(priv, chan);
2029ce736788SJoao Pinto 	tx_q->dirty_tx = 0;
2030ce736788SJoao Pinto 	tx_q->cur_tx = 0;
20318d212a9eSNiklas Cassel 	tx_q->mss = 0;
2032c22a3f48SJoao Pinto 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
2033f421031eSJongsung Kim 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2034f421031eSJongsung Kim 			    tx_q->dma_tx_phy, chan);
2035ae4f0d46SJoao Pinto 	stmmac_start_tx_dma(priv, chan);
20367ac6653aSJeff Kirsher 
20377ac6653aSJeff Kirsher 	priv->dev->stats.tx_errors++;
2038c22a3f48SJoao Pinto 	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
20397ac6653aSJeff Kirsher }
20407ac6653aSJeff Kirsher 
204132ceabcaSGiuseppe CAVALLARO /**
20426deee222SJoao Pinto  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
20436deee222SJoao Pinto  *  @priv: driver private structure
20446deee222SJoao Pinto  *  @txmode: TX operating mode
20456deee222SJoao Pinto  *  @rxmode: RX operating mode
20466deee222SJoao Pinto  *  @chan: channel index
20476deee222SJoao Pinto  *  Description: it is used for configuring of the DMA operation mode in
20486deee222SJoao Pinto  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
20496deee222SJoao Pinto  *  mode.
20506deee222SJoao Pinto  */
20516deee222SJoao Pinto static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
20526deee222SJoao Pinto 					  u32 rxmode, u32 chan)
20536deee222SJoao Pinto {
2054a0daae13SJose Abreu 	u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2055a0daae13SJose Abreu 	u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
205652a76235SJose Abreu 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
205752a76235SJose Abreu 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
20586deee222SJoao Pinto 	int rxfifosz = priv->plat->rx_fifo_size;
205952a76235SJose Abreu 	int txfifosz = priv->plat->tx_fifo_size;
20606deee222SJoao Pinto 
20616deee222SJoao Pinto 	if (rxfifosz == 0)
20626deee222SJoao Pinto 		rxfifosz = priv->dma_cap.rx_fifo_size;
206352a76235SJose Abreu 	if (txfifosz == 0)
206452a76235SJose Abreu 		txfifosz = priv->dma_cap.tx_fifo_size;
206552a76235SJose Abreu 
206652a76235SJose Abreu 	/* Adjust for real per queue fifo size */
206752a76235SJose Abreu 	rxfifosz /= rx_channels_count;
206852a76235SJose Abreu 	txfifosz /= tx_channels_count;
20696deee222SJoao Pinto 
2070ab0204e3SJose Abreu 	stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2071ab0204e3SJose Abreu 	stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
20726deee222SJoao Pinto }
20736deee222SJoao Pinto 
20748bf993a5SJose Abreu static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
20758bf993a5SJose Abreu {
207663a550fcSJose Abreu 	int ret;
20778bf993a5SJose Abreu 
2078c10d4c82SJose Abreu 	ret = stmmac_safety_feat_irq_status(priv, priv->dev,
20798bf993a5SJose Abreu 			priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2080c10d4c82SJose Abreu 	if (ret && (ret != -EINVAL)) {
20818bf993a5SJose Abreu 		stmmac_global_err(priv);
2082c10d4c82SJose Abreu 		return true;
2083c10d4c82SJose Abreu 	}
2084c10d4c82SJose Abreu 
2085c10d4c82SJose Abreu 	return false;
20868bf993a5SJose Abreu }
20878bf993a5SJose Abreu 
20888fce3331SJose Abreu static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan)
20898fce3331SJose Abreu {
20908fce3331SJose Abreu 	int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
20918fce3331SJose Abreu 						 &priv->xstats, chan);
20928fce3331SJose Abreu 	struct stmmac_channel *ch = &priv->channel[chan];
2093021bd5e3SJose Abreu 	unsigned long flags;
20948fce3331SJose Abreu 
20954ccb4585SJose Abreu 	if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
20963ba07debSJose Abreu 		if (napi_schedule_prep(&ch->rx_napi)) {
2097021bd5e3SJose Abreu 			spin_lock_irqsave(&ch->lock, flags);
2098021bd5e3SJose Abreu 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2099021bd5e3SJose Abreu 			spin_unlock_irqrestore(&ch->lock, flags);
21003ba07debSJose Abreu 			__napi_schedule_irqoff(&ch->rx_napi);
21013ba07debSJose Abreu 		}
21024ccb4585SJose Abreu 	}
21034ccb4585SJose Abreu 
2104021bd5e3SJose Abreu 	if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2105021bd5e3SJose Abreu 		if (napi_schedule_prep(&ch->tx_napi)) {
2106021bd5e3SJose Abreu 			spin_lock_irqsave(&ch->lock, flags);
2107021bd5e3SJose Abreu 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
2108021bd5e3SJose Abreu 			spin_unlock_irqrestore(&ch->lock, flags);
2109021bd5e3SJose Abreu 			__napi_schedule_irqoff(&ch->tx_napi);
2110021bd5e3SJose Abreu 		}
2111021bd5e3SJose Abreu 	}
21128fce3331SJose Abreu 
21138fce3331SJose Abreu 	return status;
21148fce3331SJose Abreu }
21158fce3331SJose Abreu 
21166deee222SJoao Pinto /**
2117732fdf0eSGiuseppe CAVALLARO  * stmmac_dma_interrupt - DMA ISR
211832ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
211932ceabcaSGiuseppe CAVALLARO  * Description: this is the DMA ISR. It is called by the main ISR.
2120732fdf0eSGiuseppe CAVALLARO  * It calls the dwmac dma routine and schedule poll method in case of some
2121732fdf0eSGiuseppe CAVALLARO  * work can be done.
212232ceabcaSGiuseppe CAVALLARO  */
21237ac6653aSJeff Kirsher static void stmmac_dma_interrupt(struct stmmac_priv *priv)
21247ac6653aSJeff Kirsher {
2125d62a107aSJoao Pinto 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
21265a6a0445SNiklas Cassel 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
21275a6a0445SNiklas Cassel 	u32 channels_to_check = tx_channel_count > rx_channel_count ?
21285a6a0445SNiklas Cassel 				tx_channel_count : rx_channel_count;
2129d62a107aSJoao Pinto 	u32 chan;
21308ac60ffbSKees Cook 	int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
21318ac60ffbSKees Cook 
21328ac60ffbSKees Cook 	/* Make sure we never check beyond our status buffer. */
21338ac60ffbSKees Cook 	if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
21348ac60ffbSKees Cook 		channels_to_check = ARRAY_SIZE(status);
213568e5cfafSJoao Pinto 
21365a6a0445SNiklas Cassel 	for (chan = 0; chan < channels_to_check; chan++)
21378fce3331SJose Abreu 		status[chan] = stmmac_napi_check(priv, chan);
2138d62a107aSJoao Pinto 
21395a6a0445SNiklas Cassel 	for (chan = 0; chan < tx_channel_count; chan++) {
21405a6a0445SNiklas Cassel 		if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
21417ac6653aSJeff Kirsher 			/* Try to bump up the dma threshold on this failure */
2142b2dec116SSonic Zhang 			if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
2143b2dec116SSonic Zhang 			    (tc <= 256)) {
21447ac6653aSJeff Kirsher 				tc += 64;
2145c405abe2SSonic Zhang 				if (priv->plat->force_thresh_dma_mode)
2146d62a107aSJoao Pinto 					stmmac_set_dma_operation_mode(priv,
2147d62a107aSJoao Pinto 								      tc,
2148d62a107aSJoao Pinto 								      tc,
2149d62a107aSJoao Pinto 								      chan);
2150c405abe2SSonic Zhang 				else
2151d62a107aSJoao Pinto 					stmmac_set_dma_operation_mode(priv,
2152d62a107aSJoao Pinto 								    tc,
2153d62a107aSJoao Pinto 								    SF_DMA_MODE,
2154d62a107aSJoao Pinto 								    chan);
21557ac6653aSJeff Kirsher 				priv->xstats.threshold = tc;
21567ac6653aSJeff Kirsher 			}
21575a6a0445SNiklas Cassel 		} else if (unlikely(status[chan] == tx_hard_error)) {
21584e593262SJoao Pinto 			stmmac_tx_err(priv, chan);
21597ac6653aSJeff Kirsher 		}
2160d62a107aSJoao Pinto 	}
2161d62a107aSJoao Pinto }
21627ac6653aSJeff Kirsher 
216332ceabcaSGiuseppe CAVALLARO /**
216432ceabcaSGiuseppe CAVALLARO  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
216532ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
216632ceabcaSGiuseppe CAVALLARO  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
216732ceabcaSGiuseppe CAVALLARO  */
21681c901a46SGiuseppe CAVALLARO static void stmmac_mmc_setup(struct stmmac_priv *priv)
21691c901a46SGiuseppe CAVALLARO {
21701c901a46SGiuseppe CAVALLARO 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
21711c901a46SGiuseppe CAVALLARO 			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
21721c901a46SGiuseppe CAVALLARO 
21733b1dd2c5SJose Abreu 	stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
21744f795b25SGiuseppe CAVALLARO 
21754f795b25SGiuseppe CAVALLARO 	if (priv->dma_cap.rmon) {
21763b1dd2c5SJose Abreu 		stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
21771c901a46SGiuseppe CAVALLARO 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
21784f795b25SGiuseppe CAVALLARO 	} else
217938ddc59dSLABBE Corentin 		netdev_info(priv->dev, "No MAC Management Counters available\n");
21801c901a46SGiuseppe CAVALLARO }
21811c901a46SGiuseppe CAVALLARO 
2182732fdf0eSGiuseppe CAVALLARO /**
2183732fdf0eSGiuseppe CAVALLARO  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
218432ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
218519e30c14SGiuseppe CAVALLARO  * Description:
218619e30c14SGiuseppe CAVALLARO  *  new GMAC chip generations have a new register to indicate the
2187e7434821SGiuseppe CAVALLARO  *  presence of the optional feature/functions.
218819e30c14SGiuseppe CAVALLARO  *  This can be also used to override the value passed through the
218919e30c14SGiuseppe CAVALLARO  *  platform and necessary for old MAC10/100 and GMAC chips.
2190e7434821SGiuseppe CAVALLARO  */
2191e7434821SGiuseppe CAVALLARO static int stmmac_get_hw_features(struct stmmac_priv *priv)
2192e7434821SGiuseppe CAVALLARO {
2193a4e887faSJose Abreu 	return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2194e7434821SGiuseppe CAVALLARO }
2195e7434821SGiuseppe CAVALLARO 
219632ceabcaSGiuseppe CAVALLARO /**
2197732fdf0eSGiuseppe CAVALLARO  * stmmac_check_ether_addr - check if the MAC addr is valid
219832ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
219932ceabcaSGiuseppe CAVALLARO  * Description:
220032ceabcaSGiuseppe CAVALLARO  * it is to verify if the MAC address is valid, in case of failures it
220132ceabcaSGiuseppe CAVALLARO  * generates a random MAC address
220232ceabcaSGiuseppe CAVALLARO  */
2203bfab27a1SGiuseppe CAVALLARO static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2204bfab27a1SGiuseppe CAVALLARO {
2205bfab27a1SGiuseppe CAVALLARO 	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2206c10d4c82SJose Abreu 		stmmac_get_umac_addr(priv, priv->hw, priv->dev->dev_addr, 0);
2207bfab27a1SGiuseppe CAVALLARO 		if (!is_valid_ether_addr(priv->dev->dev_addr))
2208f2cedb63SDanny Kukawka 			eth_hw_addr_random(priv->dev);
2209af649352SJisheng Zhang 		dev_info(priv->device, "device MAC address %pM\n",
2210bfab27a1SGiuseppe CAVALLARO 			 priv->dev->dev_addr);
2211bfab27a1SGiuseppe CAVALLARO 	}
2212c88460b7SHans de Goede }
2213bfab27a1SGiuseppe CAVALLARO 
221432ceabcaSGiuseppe CAVALLARO /**
2215732fdf0eSGiuseppe CAVALLARO  * stmmac_init_dma_engine - DMA init.
221632ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
221732ceabcaSGiuseppe CAVALLARO  * Description:
221832ceabcaSGiuseppe CAVALLARO  * It inits the DMA invoking the specific MAC/GMAC callback.
221932ceabcaSGiuseppe CAVALLARO  * Some DMA parameters can be passed from the platform;
222032ceabcaSGiuseppe CAVALLARO  * in case of these are not passed a default is kept for the MAC or GMAC.
222132ceabcaSGiuseppe CAVALLARO  */
22220f1f88a8SGiuseppe CAVALLARO static int stmmac_init_dma_engine(struct stmmac_priv *priv)
22230f1f88a8SGiuseppe CAVALLARO {
222447f2a9ceSJoao Pinto 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
222547f2a9ceSJoao Pinto 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
222624aaed0cSJose Abreu 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
222754139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q;
2228ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q;
222947f2a9ceSJoao Pinto 	u32 chan = 0;
2230c24602efSGiuseppe CAVALLARO 	int atds = 0;
2231495db273SGiuseppe Cavallaro 	int ret = 0;
22320f1f88a8SGiuseppe CAVALLARO 
2233a332e2faSNiklas Cassel 	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2234a332e2faSNiklas Cassel 		dev_err(priv->device, "Invalid DMA configuration\n");
223589ab75bfSNiklas Cassel 		return -EINVAL;
22360f1f88a8SGiuseppe CAVALLARO 	}
22370f1f88a8SGiuseppe CAVALLARO 
2238c24602efSGiuseppe CAVALLARO 	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2239c24602efSGiuseppe CAVALLARO 		atds = 1;
2240c24602efSGiuseppe CAVALLARO 
2241a4e887faSJose Abreu 	ret = stmmac_reset(priv, priv->ioaddr);
2242495db273SGiuseppe Cavallaro 	if (ret) {
2243495db273SGiuseppe Cavallaro 		dev_err(priv->device, "Failed to reset the dma\n");
2244495db273SGiuseppe Cavallaro 		return ret;
2245495db273SGiuseppe Cavallaro 	}
2246495db273SGiuseppe Cavallaro 
22477d9e6c5aSJose Abreu 	/* DMA Configuration */
22487d9e6c5aSJose Abreu 	stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
22497d9e6c5aSJose Abreu 
22507d9e6c5aSJose Abreu 	if (priv->plat->axi)
22517d9e6c5aSJose Abreu 		stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
22527d9e6c5aSJose Abreu 
2253af8f3fb7SWeifeng Voon 	/* DMA CSR Channel configuration */
2254af8f3fb7SWeifeng Voon 	for (chan = 0; chan < dma_csr_ch; chan++)
2255af8f3fb7SWeifeng Voon 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
2256af8f3fb7SWeifeng Voon 
225747f2a9ceSJoao Pinto 	/* DMA RX Channel Configuration */
225847f2a9ceSJoao Pinto 	for (chan = 0; chan < rx_channels_count; chan++) {
225954139cf3SJoao Pinto 		rx_q = &priv->rx_queue[chan];
226054139cf3SJoao Pinto 
226124aaed0cSJose Abreu 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
226224aaed0cSJose Abreu 				    rx_q->dma_rx_phy, chan);
226347f2a9ceSJoao Pinto 
226454139cf3SJoao Pinto 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2265f748be53SAlexandre TORGUE 			    (DMA_RX_SIZE * sizeof(struct dma_desc));
2266a4e887faSJose Abreu 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
2267a4e887faSJose Abreu 				       rx_q->rx_tail_addr, chan);
226847f2a9ceSJoao Pinto 	}
226947f2a9ceSJoao Pinto 
227047f2a9ceSJoao Pinto 	/* DMA TX Channel Configuration */
227147f2a9ceSJoao Pinto 	for (chan = 0; chan < tx_channels_count; chan++) {
2272ce736788SJoao Pinto 		tx_q = &priv->tx_queue[chan];
2273ce736788SJoao Pinto 
227424aaed0cSJose Abreu 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
227524aaed0cSJose Abreu 				    tx_q->dma_tx_phy, chan);
2276f748be53SAlexandre TORGUE 
22770431100bSJose Abreu 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
2278a4e887faSJose Abreu 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
2279a4e887faSJose Abreu 				       tx_q->tx_tail_addr, chan);
228047f2a9ceSJoao Pinto 	}
228124aaed0cSJose Abreu 
2282495db273SGiuseppe Cavallaro 	return ret;
22830f1f88a8SGiuseppe CAVALLARO }
22840f1f88a8SGiuseppe CAVALLARO 
22858fce3331SJose Abreu static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
22868fce3331SJose Abreu {
22878fce3331SJose Abreu 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
22888fce3331SJose Abreu 
22898fce3331SJose Abreu 	mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer));
22908fce3331SJose Abreu }
22918fce3331SJose Abreu 
2292bfab27a1SGiuseppe CAVALLARO /**
2293732fdf0eSGiuseppe CAVALLARO  * stmmac_tx_timer - mitigation sw timer for tx.
22949125cdd1SGiuseppe CAVALLARO  * @data: data pointer
22959125cdd1SGiuseppe CAVALLARO  * Description:
22969125cdd1SGiuseppe CAVALLARO  * This is the timer handler to directly invoke the stmmac_tx_clean.
22979125cdd1SGiuseppe CAVALLARO  */
2298e99e88a9SKees Cook static void stmmac_tx_timer(struct timer_list *t)
22999125cdd1SGiuseppe CAVALLARO {
23008fce3331SJose Abreu 	struct stmmac_tx_queue *tx_q = from_timer(tx_q, t, txtimer);
23018fce3331SJose Abreu 	struct stmmac_priv *priv = tx_q->priv_data;
23028fce3331SJose Abreu 	struct stmmac_channel *ch;
23039125cdd1SGiuseppe CAVALLARO 
23048fce3331SJose Abreu 	ch = &priv->channel[tx_q->queue_index];
23058fce3331SJose Abreu 
2306021bd5e3SJose Abreu 	if (likely(napi_schedule_prep(&ch->tx_napi))) {
2307021bd5e3SJose Abreu 		unsigned long flags;
2308021bd5e3SJose Abreu 
2309021bd5e3SJose Abreu 		spin_lock_irqsave(&ch->lock, flags);
2310021bd5e3SJose Abreu 		stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
2311021bd5e3SJose Abreu 		spin_unlock_irqrestore(&ch->lock, flags);
23124ccb4585SJose Abreu 		__napi_schedule(&ch->tx_napi);
2313021bd5e3SJose Abreu 	}
23149125cdd1SGiuseppe CAVALLARO }
23159125cdd1SGiuseppe CAVALLARO 
23169125cdd1SGiuseppe CAVALLARO /**
2317d429b66eSJose Abreu  * stmmac_init_coalesce - init mitigation options.
231832ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
23199125cdd1SGiuseppe CAVALLARO  * Description:
2320d429b66eSJose Abreu  * This inits the coalesce parameters: i.e. timer rate,
23219125cdd1SGiuseppe CAVALLARO  * timer handler and default threshold used for enabling the
23229125cdd1SGiuseppe CAVALLARO  * interrupt on completion bit.
23239125cdd1SGiuseppe CAVALLARO  */
2324d429b66eSJose Abreu static void stmmac_init_coalesce(struct stmmac_priv *priv)
23259125cdd1SGiuseppe CAVALLARO {
23268fce3331SJose Abreu 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
23278fce3331SJose Abreu 	u32 chan;
23288fce3331SJose Abreu 
23299125cdd1SGiuseppe CAVALLARO 	priv->tx_coal_frames = STMMAC_TX_FRAMES;
23309125cdd1SGiuseppe CAVALLARO 	priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
2331d429b66eSJose Abreu 	priv->rx_coal_frames = STMMAC_RX_FRAMES;
23328fce3331SJose Abreu 
23338fce3331SJose Abreu 	for (chan = 0; chan < tx_channel_count; chan++) {
23348fce3331SJose Abreu 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
23358fce3331SJose Abreu 
23368fce3331SJose Abreu 		timer_setup(&tx_q->txtimer, stmmac_tx_timer, 0);
23378fce3331SJose Abreu 	}
23389125cdd1SGiuseppe CAVALLARO }
23399125cdd1SGiuseppe CAVALLARO 
23404854ab99SJoao Pinto static void stmmac_set_rings_length(struct stmmac_priv *priv)
23414854ab99SJoao Pinto {
23424854ab99SJoao Pinto 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
23434854ab99SJoao Pinto 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
23444854ab99SJoao Pinto 	u32 chan;
23454854ab99SJoao Pinto 
23464854ab99SJoao Pinto 	/* set TX ring length */
23474854ab99SJoao Pinto 	for (chan = 0; chan < tx_channels_count; chan++)
2348a4e887faSJose Abreu 		stmmac_set_tx_ring_len(priv, priv->ioaddr,
23494854ab99SJoao Pinto 				(DMA_TX_SIZE - 1), chan);
23504854ab99SJoao Pinto 
23514854ab99SJoao Pinto 	/* set RX ring length */
23524854ab99SJoao Pinto 	for (chan = 0; chan < rx_channels_count; chan++)
2353a4e887faSJose Abreu 		stmmac_set_rx_ring_len(priv, priv->ioaddr,
23544854ab99SJoao Pinto 				(DMA_RX_SIZE - 1), chan);
23554854ab99SJoao Pinto }
23564854ab99SJoao Pinto 
23579125cdd1SGiuseppe CAVALLARO /**
23586a3a7193SJoao Pinto  *  stmmac_set_tx_queue_weight - Set TX queue weight
23596a3a7193SJoao Pinto  *  @priv: driver private structure
23606a3a7193SJoao Pinto  *  Description: It is used for setting TX queues weight
23616a3a7193SJoao Pinto  */
23626a3a7193SJoao Pinto static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
23636a3a7193SJoao Pinto {
23646a3a7193SJoao Pinto 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
23656a3a7193SJoao Pinto 	u32 weight;
23666a3a7193SJoao Pinto 	u32 queue;
23676a3a7193SJoao Pinto 
23686a3a7193SJoao Pinto 	for (queue = 0; queue < tx_queues_count; queue++) {
23696a3a7193SJoao Pinto 		weight = priv->plat->tx_queues_cfg[queue].weight;
2370c10d4c82SJose Abreu 		stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
23716a3a7193SJoao Pinto 	}
23726a3a7193SJoao Pinto }
23736a3a7193SJoao Pinto 
23746a3a7193SJoao Pinto /**
237519d91873SJoao Pinto  *  stmmac_configure_cbs - Configure CBS in TX queue
237619d91873SJoao Pinto  *  @priv: driver private structure
237719d91873SJoao Pinto  *  Description: It is used for configuring CBS in AVB TX queues
237819d91873SJoao Pinto  */
237919d91873SJoao Pinto static void stmmac_configure_cbs(struct stmmac_priv *priv)
238019d91873SJoao Pinto {
238119d91873SJoao Pinto 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
238219d91873SJoao Pinto 	u32 mode_to_use;
238319d91873SJoao Pinto 	u32 queue;
238419d91873SJoao Pinto 
238544781fefSJoao Pinto 	/* queue 0 is reserved for legacy traffic */
238644781fefSJoao Pinto 	for (queue = 1; queue < tx_queues_count; queue++) {
238719d91873SJoao Pinto 		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
238819d91873SJoao Pinto 		if (mode_to_use == MTL_QUEUE_DCB)
238919d91873SJoao Pinto 			continue;
239019d91873SJoao Pinto 
2391c10d4c82SJose Abreu 		stmmac_config_cbs(priv, priv->hw,
239219d91873SJoao Pinto 				priv->plat->tx_queues_cfg[queue].send_slope,
239319d91873SJoao Pinto 				priv->plat->tx_queues_cfg[queue].idle_slope,
239419d91873SJoao Pinto 				priv->plat->tx_queues_cfg[queue].high_credit,
239519d91873SJoao Pinto 				priv->plat->tx_queues_cfg[queue].low_credit,
239619d91873SJoao Pinto 				queue);
239719d91873SJoao Pinto 	}
239819d91873SJoao Pinto }
239919d91873SJoao Pinto 
240019d91873SJoao Pinto /**
2401d43042f4SJoao Pinto  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
2402d43042f4SJoao Pinto  *  @priv: driver private structure
2403d43042f4SJoao Pinto  *  Description: It is used for mapping RX queues to RX dma channels
2404d43042f4SJoao Pinto  */
2405d43042f4SJoao Pinto static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
2406d43042f4SJoao Pinto {
2407d43042f4SJoao Pinto 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2408d43042f4SJoao Pinto 	u32 queue;
2409d43042f4SJoao Pinto 	u32 chan;
2410d43042f4SJoao Pinto 
2411d43042f4SJoao Pinto 	for (queue = 0; queue < rx_queues_count; queue++) {
2412d43042f4SJoao Pinto 		chan = priv->plat->rx_queues_cfg[queue].chan;
2413c10d4c82SJose Abreu 		stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
2414d43042f4SJoao Pinto 	}
2415d43042f4SJoao Pinto }
2416d43042f4SJoao Pinto 
2417d43042f4SJoao Pinto /**
2418a8f5102aSJoao Pinto  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
2419a8f5102aSJoao Pinto  *  @priv: driver private structure
2420a8f5102aSJoao Pinto  *  Description: It is used for configuring the RX Queue Priority
2421a8f5102aSJoao Pinto  */
2422a8f5102aSJoao Pinto static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
2423a8f5102aSJoao Pinto {
2424a8f5102aSJoao Pinto 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2425a8f5102aSJoao Pinto 	u32 queue;
2426a8f5102aSJoao Pinto 	u32 prio;
2427a8f5102aSJoao Pinto 
2428a8f5102aSJoao Pinto 	for (queue = 0; queue < rx_queues_count; queue++) {
2429a8f5102aSJoao Pinto 		if (!priv->plat->rx_queues_cfg[queue].use_prio)
2430a8f5102aSJoao Pinto 			continue;
2431a8f5102aSJoao Pinto 
2432a8f5102aSJoao Pinto 		prio = priv->plat->rx_queues_cfg[queue].prio;
2433c10d4c82SJose Abreu 		stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
2434a8f5102aSJoao Pinto 	}
2435a8f5102aSJoao Pinto }
2436a8f5102aSJoao Pinto 
2437a8f5102aSJoao Pinto /**
2438a8f5102aSJoao Pinto  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
2439a8f5102aSJoao Pinto  *  @priv: driver private structure
2440a8f5102aSJoao Pinto  *  Description: It is used for configuring the TX Queue Priority
2441a8f5102aSJoao Pinto  */
2442a8f5102aSJoao Pinto static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
2443a8f5102aSJoao Pinto {
2444a8f5102aSJoao Pinto 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2445a8f5102aSJoao Pinto 	u32 queue;
2446a8f5102aSJoao Pinto 	u32 prio;
2447a8f5102aSJoao Pinto 
2448a8f5102aSJoao Pinto 	for (queue = 0; queue < tx_queues_count; queue++) {
2449a8f5102aSJoao Pinto 		if (!priv->plat->tx_queues_cfg[queue].use_prio)
2450a8f5102aSJoao Pinto 			continue;
2451a8f5102aSJoao Pinto 
2452a8f5102aSJoao Pinto 		prio = priv->plat->tx_queues_cfg[queue].prio;
2453c10d4c82SJose Abreu 		stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
2454a8f5102aSJoao Pinto 	}
2455a8f5102aSJoao Pinto }
2456a8f5102aSJoao Pinto 
2457a8f5102aSJoao Pinto /**
2458abe80fdcSJoao Pinto  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
2459abe80fdcSJoao Pinto  *  @priv: driver private structure
2460abe80fdcSJoao Pinto  *  Description: It is used for configuring the RX queue routing
2461abe80fdcSJoao Pinto  */
2462abe80fdcSJoao Pinto static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
2463abe80fdcSJoao Pinto {
2464abe80fdcSJoao Pinto 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2465abe80fdcSJoao Pinto 	u32 queue;
2466abe80fdcSJoao Pinto 	u8 packet;
2467abe80fdcSJoao Pinto 
2468abe80fdcSJoao Pinto 	for (queue = 0; queue < rx_queues_count; queue++) {
2469abe80fdcSJoao Pinto 		/* no specific packet type routing specified for the queue */
2470abe80fdcSJoao Pinto 		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
2471abe80fdcSJoao Pinto 			continue;
2472abe80fdcSJoao Pinto 
2473abe80fdcSJoao Pinto 		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
2474c10d4c82SJose Abreu 		stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
2475abe80fdcSJoao Pinto 	}
2476abe80fdcSJoao Pinto }
2477abe80fdcSJoao Pinto 
247876067459SJose Abreu static void stmmac_mac_config_rss(struct stmmac_priv *priv)
247976067459SJose Abreu {
248076067459SJose Abreu 	if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
248176067459SJose Abreu 		priv->rss.enable = false;
248276067459SJose Abreu 		return;
248376067459SJose Abreu 	}
248476067459SJose Abreu 
248576067459SJose Abreu 	if (priv->dev->features & NETIF_F_RXHASH)
248676067459SJose Abreu 		priv->rss.enable = true;
248776067459SJose Abreu 	else
248876067459SJose Abreu 		priv->rss.enable = false;
248976067459SJose Abreu 
249076067459SJose Abreu 	stmmac_rss_configure(priv, priv->hw, &priv->rss,
249176067459SJose Abreu 			     priv->plat->rx_queues_to_use);
249276067459SJose Abreu }
249376067459SJose Abreu 
2494abe80fdcSJoao Pinto /**
2495d0a9c9f9SJoao Pinto  *  stmmac_mtl_configuration - Configure MTL
2496d0a9c9f9SJoao Pinto  *  @priv: driver private structure
2497d0a9c9f9SJoao Pinto  *  Description: It is used for configurring MTL
2498d0a9c9f9SJoao Pinto  */
2499d0a9c9f9SJoao Pinto static void stmmac_mtl_configuration(struct stmmac_priv *priv)
2500d0a9c9f9SJoao Pinto {
2501d0a9c9f9SJoao Pinto 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2502d0a9c9f9SJoao Pinto 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2503d0a9c9f9SJoao Pinto 
2504c10d4c82SJose Abreu 	if (tx_queues_count > 1)
25056a3a7193SJoao Pinto 		stmmac_set_tx_queue_weight(priv);
25066a3a7193SJoao Pinto 
2507d0a9c9f9SJoao Pinto 	/* Configure MTL RX algorithms */
2508c10d4c82SJose Abreu 	if (rx_queues_count > 1)
2509c10d4c82SJose Abreu 		stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
2510d0a9c9f9SJoao Pinto 				priv->plat->rx_sched_algorithm);
2511d0a9c9f9SJoao Pinto 
2512d0a9c9f9SJoao Pinto 	/* Configure MTL TX algorithms */
2513c10d4c82SJose Abreu 	if (tx_queues_count > 1)
2514c10d4c82SJose Abreu 		stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
2515d0a9c9f9SJoao Pinto 				priv->plat->tx_sched_algorithm);
2516d0a9c9f9SJoao Pinto 
251719d91873SJoao Pinto 	/* Configure CBS in AVB TX queues */
2518c10d4c82SJose Abreu 	if (tx_queues_count > 1)
251919d91873SJoao Pinto 		stmmac_configure_cbs(priv);
252019d91873SJoao Pinto 
2521d43042f4SJoao Pinto 	/* Map RX MTL to DMA channels */
2522d43042f4SJoao Pinto 	stmmac_rx_queue_dma_chan_map(priv);
2523d43042f4SJoao Pinto 
2524d0a9c9f9SJoao Pinto 	/* Enable MAC RX Queues */
2525d0a9c9f9SJoao Pinto 	stmmac_mac_enable_rx_queues(priv);
25266deee222SJoao Pinto 
2527a8f5102aSJoao Pinto 	/* Set RX priorities */
2528c10d4c82SJose Abreu 	if (rx_queues_count > 1)
2529a8f5102aSJoao Pinto 		stmmac_mac_config_rx_queues_prio(priv);
2530a8f5102aSJoao Pinto 
2531a8f5102aSJoao Pinto 	/* Set TX priorities */
2532c10d4c82SJose Abreu 	if (tx_queues_count > 1)
2533a8f5102aSJoao Pinto 		stmmac_mac_config_tx_queues_prio(priv);
2534abe80fdcSJoao Pinto 
2535abe80fdcSJoao Pinto 	/* Set RX routing */
2536c10d4c82SJose Abreu 	if (rx_queues_count > 1)
2537abe80fdcSJoao Pinto 		stmmac_mac_config_rx_queues_routing(priv);
253876067459SJose Abreu 
253976067459SJose Abreu 	/* Receive Side Scaling */
254076067459SJose Abreu 	if (rx_queues_count > 1)
254176067459SJose Abreu 		stmmac_mac_config_rss(priv);
2542d0a9c9f9SJoao Pinto }
2543d0a9c9f9SJoao Pinto 
25448bf993a5SJose Abreu static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
25458bf993a5SJose Abreu {
2546c10d4c82SJose Abreu 	if (priv->dma_cap.asp) {
25478bf993a5SJose Abreu 		netdev_info(priv->dev, "Enabling Safety Features\n");
2548c10d4c82SJose Abreu 		stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp);
25498bf993a5SJose Abreu 	} else {
25508bf993a5SJose Abreu 		netdev_info(priv->dev, "No Safety Features support found\n");
25518bf993a5SJose Abreu 	}
25528bf993a5SJose Abreu }
25538bf993a5SJose Abreu 
2554d0a9c9f9SJoao Pinto /**
2555732fdf0eSGiuseppe CAVALLARO  * stmmac_hw_setup - setup mac in a usable state.
2556523f11b5SSrinivas Kandagatla  *  @dev : pointer to the device structure.
2557523f11b5SSrinivas Kandagatla  *  Description:
2558732fdf0eSGiuseppe CAVALLARO  *  this is the main function to setup the HW in a usable state because the
2559732fdf0eSGiuseppe CAVALLARO  *  dma engine is reset, the core registers are configured (e.g. AXI,
2560732fdf0eSGiuseppe CAVALLARO  *  Checksum features, timers). The DMA is ready to start receiving and
2561732fdf0eSGiuseppe CAVALLARO  *  transmitting.
2562523f11b5SSrinivas Kandagatla  *  Return value:
2563523f11b5SSrinivas Kandagatla  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2564523f11b5SSrinivas Kandagatla  *  file on failure.
2565523f11b5SSrinivas Kandagatla  */
2566fe131929SHuacai Chen static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
2567523f11b5SSrinivas Kandagatla {
2568523f11b5SSrinivas Kandagatla 	struct stmmac_priv *priv = netdev_priv(dev);
25693c55d4d0SJoao Pinto 	u32 rx_cnt = priv->plat->rx_queues_to_use;
2570146617b8SJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
2571146617b8SJoao Pinto 	u32 chan;
2572523f11b5SSrinivas Kandagatla 	int ret;
2573523f11b5SSrinivas Kandagatla 
2574523f11b5SSrinivas Kandagatla 	/* DMA initialization and SW reset */
2575523f11b5SSrinivas Kandagatla 	ret = stmmac_init_dma_engine(priv);
2576523f11b5SSrinivas Kandagatla 	if (ret < 0) {
257738ddc59dSLABBE Corentin 		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
257838ddc59dSLABBE Corentin 			   __func__);
2579523f11b5SSrinivas Kandagatla 		return ret;
2580523f11b5SSrinivas Kandagatla 	}
2581523f11b5SSrinivas Kandagatla 
2582523f11b5SSrinivas Kandagatla 	/* Copy the MAC addr into the HW  */
2583c10d4c82SJose Abreu 	stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
2584523f11b5SSrinivas Kandagatla 
258502e57b9dSGiuseppe CAVALLARO 	/* PS and related bits will be programmed according to the speed */
258602e57b9dSGiuseppe CAVALLARO 	if (priv->hw->pcs) {
258702e57b9dSGiuseppe CAVALLARO 		int speed = priv->plat->mac_port_sel_speed;
258802e57b9dSGiuseppe CAVALLARO 
258902e57b9dSGiuseppe CAVALLARO 		if ((speed == SPEED_10) || (speed == SPEED_100) ||
259002e57b9dSGiuseppe CAVALLARO 		    (speed == SPEED_1000)) {
259102e57b9dSGiuseppe CAVALLARO 			priv->hw->ps = speed;
259202e57b9dSGiuseppe CAVALLARO 		} else {
259302e57b9dSGiuseppe CAVALLARO 			dev_warn(priv->device, "invalid port speed\n");
259402e57b9dSGiuseppe CAVALLARO 			priv->hw->ps = 0;
259502e57b9dSGiuseppe CAVALLARO 		}
259602e57b9dSGiuseppe CAVALLARO 	}
259702e57b9dSGiuseppe CAVALLARO 
2598523f11b5SSrinivas Kandagatla 	/* Initialize the MAC Core */
2599c10d4c82SJose Abreu 	stmmac_core_init(priv, priv->hw, dev);
2600523f11b5SSrinivas Kandagatla 
2601d0a9c9f9SJoao Pinto 	/* Initialize MTL*/
2602d0a9c9f9SJoao Pinto 	stmmac_mtl_configuration(priv);
26039eb12474Sjpinto 
26048bf993a5SJose Abreu 	/* Initialize Safety Features */
26058bf993a5SJose Abreu 	stmmac_safety_feat_configuration(priv);
26068bf993a5SJose Abreu 
2607c10d4c82SJose Abreu 	ret = stmmac_rx_ipc(priv, priv->hw);
2608978aded4SGiuseppe CAVALLARO 	if (!ret) {
260938ddc59dSLABBE Corentin 		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
2610978aded4SGiuseppe CAVALLARO 		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
2611d2afb5bdSGiuseppe CAVALLARO 		priv->hw->rx_csum = 0;
2612978aded4SGiuseppe CAVALLARO 	}
2613978aded4SGiuseppe CAVALLARO 
2614523f11b5SSrinivas Kandagatla 	/* Enable the MAC Rx/Tx */
2615c10d4c82SJose Abreu 	stmmac_mac_set(priv, priv->ioaddr, true);
2616523f11b5SSrinivas Kandagatla 
2617b4f0a661SJoao Pinto 	/* Set the HW DMA mode and the COE */
2618b4f0a661SJoao Pinto 	stmmac_dma_operation_mode(priv);
2619b4f0a661SJoao Pinto 
2620523f11b5SSrinivas Kandagatla 	stmmac_mmc_setup(priv);
2621523f11b5SSrinivas Kandagatla 
2622fe131929SHuacai Chen 	if (init_ptp) {
26230ad2be79SThierry Reding 		ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
26240ad2be79SThierry Reding 		if (ret < 0)
26250ad2be79SThierry Reding 			netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
26260ad2be79SThierry Reding 
2627523f11b5SSrinivas Kandagatla 		ret = stmmac_init_ptp(priv);
2628722eef28SHeiner Kallweit 		if (ret == -EOPNOTSUPP)
2629722eef28SHeiner Kallweit 			netdev_warn(priv->dev, "PTP not supported by HW\n");
2630722eef28SHeiner Kallweit 		else if (ret)
2631722eef28SHeiner Kallweit 			netdev_warn(priv->dev, "PTP init failed\n");
2632fe131929SHuacai Chen 	}
2633523f11b5SSrinivas Kandagatla 
2634523f11b5SSrinivas Kandagatla 	priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
2635523f11b5SSrinivas Kandagatla 
2636a4e887faSJose Abreu 	if (priv->use_riwt) {
26374e4337ccSJose Abreu 		if (!priv->rx_riwt)
26384e4337ccSJose Abreu 			priv->rx_riwt = DEF_DMA_RIWT;
26394e4337ccSJose Abreu 
26404e4337ccSJose Abreu 		ret = stmmac_rx_watchdog(priv, priv->ioaddr, priv->rx_riwt, rx_cnt);
2641523f11b5SSrinivas Kandagatla 	}
2642523f11b5SSrinivas Kandagatla 
2643c10d4c82SJose Abreu 	if (priv->hw->pcs)
2644c9ad4c10SBen Dooks (Codethink) 		stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
2645523f11b5SSrinivas Kandagatla 
26464854ab99SJoao Pinto 	/* set TX and RX rings length */
26474854ab99SJoao Pinto 	stmmac_set_rings_length(priv);
26484854ab99SJoao Pinto 
2649f748be53SAlexandre TORGUE 	/* Enable TSO */
2650146617b8SJoao Pinto 	if (priv->tso) {
2651146617b8SJoao Pinto 		for (chan = 0; chan < tx_cnt; chan++)
2652a4e887faSJose Abreu 			stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
2653146617b8SJoao Pinto 	}
2654f748be53SAlexandre TORGUE 
265567afd6d1SJose Abreu 	/* Enable Split Header */
265667afd6d1SJose Abreu 	if (priv->sph && priv->hw->rx_csum) {
265767afd6d1SJose Abreu 		for (chan = 0; chan < rx_cnt; chan++)
265867afd6d1SJose Abreu 			stmmac_enable_sph(priv, priv->ioaddr, 1, chan);
265967afd6d1SJose Abreu 	}
266067afd6d1SJose Abreu 
266130d93227SJose Abreu 	/* VLAN Tag Insertion */
266230d93227SJose Abreu 	if (priv->dma_cap.vlins)
266330d93227SJose Abreu 		stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
266430d93227SJose Abreu 
2665579a25a8SJose Abreu 	/* TBS */
2666579a25a8SJose Abreu 	for (chan = 0; chan < tx_cnt; chan++) {
2667579a25a8SJose Abreu 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2668579a25a8SJose Abreu 		int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
2669579a25a8SJose Abreu 
2670579a25a8SJose Abreu 		stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
2671579a25a8SJose Abreu 	}
2672579a25a8SJose Abreu 
26737d9e6c5aSJose Abreu 	/* Start the ball rolling... */
26747d9e6c5aSJose Abreu 	stmmac_start_all_dma(priv);
26757d9e6c5aSJose Abreu 
2676523f11b5SSrinivas Kandagatla 	return 0;
2677523f11b5SSrinivas Kandagatla }
2678523f11b5SSrinivas Kandagatla 
2679c66f6c37SThierry Reding static void stmmac_hw_teardown(struct net_device *dev)
2680c66f6c37SThierry Reding {
2681c66f6c37SThierry Reding 	struct stmmac_priv *priv = netdev_priv(dev);
2682c66f6c37SThierry Reding 
2683c66f6c37SThierry Reding 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
2684c66f6c37SThierry Reding }
2685c66f6c37SThierry Reding 
2686523f11b5SSrinivas Kandagatla /**
26877ac6653aSJeff Kirsher  *  stmmac_open - open entry point of the driver
26887ac6653aSJeff Kirsher  *  @dev : pointer to the device structure.
26897ac6653aSJeff Kirsher  *  Description:
26907ac6653aSJeff Kirsher  *  This function is the open entry point of the driver.
26917ac6653aSJeff Kirsher  *  Return value:
26927ac6653aSJeff Kirsher  *  0 on success and an appropriate (-)ve integer as defined in errno.h
26937ac6653aSJeff Kirsher  *  file on failure.
26947ac6653aSJeff Kirsher  */
26957ac6653aSJeff Kirsher static int stmmac_open(struct net_device *dev)
26967ac6653aSJeff Kirsher {
26977ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
26985d626c87SJose Abreu 	int bfsize = 0;
26998fce3331SJose Abreu 	u32 chan;
27007ac6653aSJeff Kirsher 	int ret;
27017ac6653aSJeff Kirsher 
2702a47b9e15SDejin Zheng 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
2703f213bbe8SJose Abreu 	    priv->hw->pcs != STMMAC_PCS_RTBI &&
2704f213bbe8SJose Abreu 	    priv->hw->xpcs == NULL) {
27057ac6653aSJeff Kirsher 		ret = stmmac_init_phy(dev);
2706e58bb43fSGiuseppe CAVALLARO 		if (ret) {
270738ddc59dSLABBE Corentin 			netdev_err(priv->dev,
270838ddc59dSLABBE Corentin 				   "%s: Cannot attach to PHY (error: %d)\n",
2709e58bb43fSGiuseppe CAVALLARO 				   __func__, ret);
271089df20d9SHans de Goede 			return ret;
27117ac6653aSJeff Kirsher 		}
2712e58bb43fSGiuseppe CAVALLARO 	}
27137ac6653aSJeff Kirsher 
2714523f11b5SSrinivas Kandagatla 	/* Extra statistics */
2715523f11b5SSrinivas Kandagatla 	memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
2716523f11b5SSrinivas Kandagatla 	priv->xstats.threshold = tc;
2717523f11b5SSrinivas Kandagatla 
27185d626c87SJose Abreu 	bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
27195d626c87SJose Abreu 	if (bfsize < 0)
27205d626c87SJose Abreu 		bfsize = 0;
27215d626c87SJose Abreu 
27225d626c87SJose Abreu 	if (bfsize < BUF_SIZE_16KiB)
27235d626c87SJose Abreu 		bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
27245d626c87SJose Abreu 
27255d626c87SJose Abreu 	priv->dma_buf_sz = bfsize;
27265d626c87SJose Abreu 	buf_sz = bfsize;
27275d626c87SJose Abreu 
272822ad3838SGiuseppe Cavallaro 	priv->rx_copybreak = STMMAC_RX_COPYBREAK;
272956329137SBartlomiej Zolnierkiewicz 
2730579a25a8SJose Abreu 	/* Earlier check for TBS */
2731579a25a8SJose Abreu 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
2732579a25a8SJose Abreu 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2733579a25a8SJose Abreu 		int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
2734579a25a8SJose Abreu 
2735579a25a8SJose Abreu 		tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
2736579a25a8SJose Abreu 		if (stmmac_enable_tbs(priv, priv->ioaddr, tbs_en, chan))
2737579a25a8SJose Abreu 			tx_q->tbs &= ~STMMAC_TBS_AVAIL;
2738579a25a8SJose Abreu 	}
2739579a25a8SJose Abreu 
27405bacd778SLABBE Corentin 	ret = alloc_dma_desc_resources(priv);
27415bacd778SLABBE Corentin 	if (ret < 0) {
27425bacd778SLABBE Corentin 		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
27435bacd778SLABBE Corentin 			   __func__);
27445bacd778SLABBE Corentin 		goto dma_desc_error;
27455bacd778SLABBE Corentin 	}
27465bacd778SLABBE Corentin 
27475bacd778SLABBE Corentin 	ret = init_dma_desc_rings(dev, GFP_KERNEL);
27485bacd778SLABBE Corentin 	if (ret < 0) {
27495bacd778SLABBE Corentin 		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
27505bacd778SLABBE Corentin 			   __func__);
27515bacd778SLABBE Corentin 		goto init_error;
27525bacd778SLABBE Corentin 	}
27535bacd778SLABBE Corentin 
2754fe131929SHuacai Chen 	ret = stmmac_hw_setup(dev, true);
275556329137SBartlomiej Zolnierkiewicz 	if (ret < 0) {
275638ddc59dSLABBE Corentin 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
2757c9324d18SGiuseppe CAVALLARO 		goto init_error;
27587ac6653aSJeff Kirsher 	}
27597ac6653aSJeff Kirsher 
2760d429b66eSJose Abreu 	stmmac_init_coalesce(priv);
2761777da230SGiuseppe CAVALLARO 
276274371272SJose Abreu 	phylink_start(priv->phylink);
27637ac6653aSJeff Kirsher 
27647ac6653aSJeff Kirsher 	/* Request the IRQ lines */
27657ac6653aSJeff Kirsher 	ret = request_irq(dev->irq, stmmac_interrupt,
27667ac6653aSJeff Kirsher 			  IRQF_SHARED, dev->name, dev);
27677ac6653aSJeff Kirsher 	if (unlikely(ret < 0)) {
276838ddc59dSLABBE Corentin 		netdev_err(priv->dev,
276938ddc59dSLABBE Corentin 			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
27707ac6653aSJeff Kirsher 			   __func__, dev->irq, ret);
27716c1e5abeSThierry Reding 		goto irq_error;
27727ac6653aSJeff Kirsher 	}
27737ac6653aSJeff Kirsher 
27747a13f8f5SFrancesco Virlinzi 	/* Request the Wake IRQ in case of another line is used for WoL */
27757a13f8f5SFrancesco Virlinzi 	if (priv->wol_irq != dev->irq) {
27767a13f8f5SFrancesco Virlinzi 		ret = request_irq(priv->wol_irq, stmmac_interrupt,
27777a13f8f5SFrancesco Virlinzi 				  IRQF_SHARED, dev->name, dev);
27787a13f8f5SFrancesco Virlinzi 		if (unlikely(ret < 0)) {
277938ddc59dSLABBE Corentin 			netdev_err(priv->dev,
278038ddc59dSLABBE Corentin 				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
2781ceb69499SGiuseppe CAVALLARO 				   __func__, priv->wol_irq, ret);
2782c9324d18SGiuseppe CAVALLARO 			goto wolirq_error;
27837a13f8f5SFrancesco Virlinzi 		}
27847a13f8f5SFrancesco Virlinzi 	}
27857a13f8f5SFrancesco Virlinzi 
2786d765955dSGiuseppe CAVALLARO 	/* Request the IRQ lines */
2787d7ec8584SChen-Yu Tsai 	if (priv->lpi_irq > 0) {
2788d765955dSGiuseppe CAVALLARO 		ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
2789d765955dSGiuseppe CAVALLARO 				  dev->name, dev);
2790d765955dSGiuseppe CAVALLARO 		if (unlikely(ret < 0)) {
279138ddc59dSLABBE Corentin 			netdev_err(priv->dev,
279238ddc59dSLABBE Corentin 				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
2793d765955dSGiuseppe CAVALLARO 				   __func__, priv->lpi_irq, ret);
2794c9324d18SGiuseppe CAVALLARO 			goto lpiirq_error;
2795d765955dSGiuseppe CAVALLARO 		}
2796d765955dSGiuseppe CAVALLARO 	}
2797d765955dSGiuseppe CAVALLARO 
2798c22a3f48SJoao Pinto 	stmmac_enable_all_queues(priv);
2799c22a3f48SJoao Pinto 	stmmac_start_all_queues(priv);
28007ac6653aSJeff Kirsher 
28017ac6653aSJeff Kirsher 	return 0;
28027ac6653aSJeff Kirsher 
2803c9324d18SGiuseppe CAVALLARO lpiirq_error:
2804d765955dSGiuseppe CAVALLARO 	if (priv->wol_irq != dev->irq)
2805d765955dSGiuseppe CAVALLARO 		free_irq(priv->wol_irq, dev);
2806c9324d18SGiuseppe CAVALLARO wolirq_error:
28077a13f8f5SFrancesco Virlinzi 	free_irq(dev->irq, dev);
28086c1e5abeSThierry Reding irq_error:
280974371272SJose Abreu 	phylink_stop(priv->phylink);
28107a13f8f5SFrancesco Virlinzi 
28118fce3331SJose Abreu 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
28128fce3331SJose Abreu 		del_timer_sync(&priv->tx_queue[chan].txtimer);
28138fce3331SJose Abreu 
2814c66f6c37SThierry Reding 	stmmac_hw_teardown(dev);
2815c9324d18SGiuseppe CAVALLARO init_error:
2816c9324d18SGiuseppe CAVALLARO 	free_dma_desc_resources(priv);
28175bacd778SLABBE Corentin dma_desc_error:
281874371272SJose Abreu 	phylink_disconnect_phy(priv->phylink);
28197ac6653aSJeff Kirsher 	return ret;
28207ac6653aSJeff Kirsher }
28217ac6653aSJeff Kirsher 
28227ac6653aSJeff Kirsher /**
28237ac6653aSJeff Kirsher  *  stmmac_release - close entry point of the driver
28247ac6653aSJeff Kirsher  *  @dev : device pointer.
28257ac6653aSJeff Kirsher  *  Description:
28267ac6653aSJeff Kirsher  *  This is the stop entry point of the driver.
28277ac6653aSJeff Kirsher  */
28287ac6653aSJeff Kirsher static int stmmac_release(struct net_device *dev)
28297ac6653aSJeff Kirsher {
28307ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
28318fce3331SJose Abreu 	u32 chan;
28327ac6653aSJeff Kirsher 
2833d765955dSGiuseppe CAVALLARO 	if (priv->eee_enabled)
2834d765955dSGiuseppe CAVALLARO 		del_timer_sync(&priv->eee_ctrl_timer);
2835d765955dSGiuseppe CAVALLARO 
28367ac6653aSJeff Kirsher 	/* Stop and disconnect the PHY */
283774371272SJose Abreu 	phylink_stop(priv->phylink);
283874371272SJose Abreu 	phylink_disconnect_phy(priv->phylink);
28397ac6653aSJeff Kirsher 
2840c22a3f48SJoao Pinto 	stmmac_stop_all_queues(priv);
28417ac6653aSJeff Kirsher 
2842c22a3f48SJoao Pinto 	stmmac_disable_all_queues(priv);
28437ac6653aSJeff Kirsher 
28448fce3331SJose Abreu 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
28458fce3331SJose Abreu 		del_timer_sync(&priv->tx_queue[chan].txtimer);
28469125cdd1SGiuseppe CAVALLARO 
28477ac6653aSJeff Kirsher 	/* Free the IRQ lines */
28487ac6653aSJeff Kirsher 	free_irq(dev->irq, dev);
28497a13f8f5SFrancesco Virlinzi 	if (priv->wol_irq != dev->irq)
28507a13f8f5SFrancesco Virlinzi 		free_irq(priv->wol_irq, dev);
2851d7ec8584SChen-Yu Tsai 	if (priv->lpi_irq > 0)
2852d765955dSGiuseppe CAVALLARO 		free_irq(priv->lpi_irq, dev);
28537ac6653aSJeff Kirsher 
28547ac6653aSJeff Kirsher 	/* Stop TX/RX DMA and clear the descriptors */
2855ae4f0d46SJoao Pinto 	stmmac_stop_all_dma(priv);
28567ac6653aSJeff Kirsher 
28577ac6653aSJeff Kirsher 	/* Release and free the Rx/Tx resources */
28587ac6653aSJeff Kirsher 	free_dma_desc_resources(priv);
28597ac6653aSJeff Kirsher 
28607ac6653aSJeff Kirsher 	/* Disable the MAC Rx/Tx */
2861c10d4c82SJose Abreu 	stmmac_mac_set(priv, priv->ioaddr, false);
28627ac6653aSJeff Kirsher 
28637ac6653aSJeff Kirsher 	netif_carrier_off(dev);
28647ac6653aSJeff Kirsher 
286592ba6888SRayagond Kokatanur 	stmmac_release_ptp(priv);
286692ba6888SRayagond Kokatanur 
28677ac6653aSJeff Kirsher 	return 0;
28687ac6653aSJeff Kirsher }
28697ac6653aSJeff Kirsher 
287030d93227SJose Abreu static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
287130d93227SJose Abreu 			       struct stmmac_tx_queue *tx_q)
287230d93227SJose Abreu {
287330d93227SJose Abreu 	u16 tag = 0x0, inner_tag = 0x0;
287430d93227SJose Abreu 	u32 inner_type = 0x0;
287530d93227SJose Abreu 	struct dma_desc *p;
287630d93227SJose Abreu 
287730d93227SJose Abreu 	if (!priv->dma_cap.vlins)
287830d93227SJose Abreu 		return false;
287930d93227SJose Abreu 	if (!skb_vlan_tag_present(skb))
288030d93227SJose Abreu 		return false;
288130d93227SJose Abreu 	if (skb->vlan_proto == htons(ETH_P_8021AD)) {
288230d93227SJose Abreu 		inner_tag = skb_vlan_tag_get(skb);
288330d93227SJose Abreu 		inner_type = STMMAC_VLAN_INSERT;
288430d93227SJose Abreu 	}
288530d93227SJose Abreu 
288630d93227SJose Abreu 	tag = skb_vlan_tag_get(skb);
288730d93227SJose Abreu 
2888579a25a8SJose Abreu 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
2889579a25a8SJose Abreu 		p = &tx_q->dma_entx[tx_q->cur_tx].basic;
2890579a25a8SJose Abreu 	else
2891579a25a8SJose Abreu 		p = &tx_q->dma_tx[tx_q->cur_tx];
2892579a25a8SJose Abreu 
289330d93227SJose Abreu 	if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
289430d93227SJose Abreu 		return false;
289530d93227SJose Abreu 
289630d93227SJose Abreu 	stmmac_set_tx_owner(priv, p);
289730d93227SJose Abreu 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
289830d93227SJose Abreu 	return true;
289930d93227SJose Abreu }
290030d93227SJose Abreu 
29017ac6653aSJeff Kirsher /**
2902f748be53SAlexandre TORGUE  *  stmmac_tso_allocator - close entry point of the driver
2903f748be53SAlexandre TORGUE  *  @priv: driver private structure
2904f748be53SAlexandre TORGUE  *  @des: buffer start address
2905f748be53SAlexandre TORGUE  *  @total_len: total length to fill in descriptors
2906f748be53SAlexandre TORGUE  *  @last_segmant: condition for the last descriptor
2907ce736788SJoao Pinto  *  @queue: TX queue index
2908f748be53SAlexandre TORGUE  *  Description:
2909f748be53SAlexandre TORGUE  *  This function fills descriptor and request new descriptors according to
2910f748be53SAlexandre TORGUE  *  buffer length to fill
2911f748be53SAlexandre TORGUE  */
2912a993db88SJose Abreu static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
2913ce736788SJoao Pinto 				 int total_len, bool last_segment, u32 queue)
2914f748be53SAlexandre TORGUE {
2915ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2916f748be53SAlexandre TORGUE 	struct dma_desc *desc;
29175bacd778SLABBE Corentin 	u32 buff_size;
2918ce736788SJoao Pinto 	int tmp_len;
2919f748be53SAlexandre TORGUE 
2920f748be53SAlexandre TORGUE 	tmp_len = total_len;
2921f748be53SAlexandre TORGUE 
2922f748be53SAlexandre TORGUE 	while (tmp_len > 0) {
2923a993db88SJose Abreu 		dma_addr_t curr_addr;
2924a993db88SJose Abreu 
2925ce736788SJoao Pinto 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2926b4c9784cSNiklas Cassel 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
2927579a25a8SJose Abreu 
2928579a25a8SJose Abreu 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
2929579a25a8SJose Abreu 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
2930579a25a8SJose Abreu 		else
2931579a25a8SJose Abreu 			desc = &tx_q->dma_tx[tx_q->cur_tx];
2932f748be53SAlexandre TORGUE 
2933a993db88SJose Abreu 		curr_addr = des + (total_len - tmp_len);
2934a993db88SJose Abreu 		if (priv->dma_cap.addr64 <= 32)
2935a993db88SJose Abreu 			desc->des0 = cpu_to_le32(curr_addr);
2936a993db88SJose Abreu 		else
2937a993db88SJose Abreu 			stmmac_set_desc_addr(priv, desc, curr_addr);
2938a993db88SJose Abreu 
2939f748be53SAlexandre TORGUE 		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
2940f748be53SAlexandre TORGUE 			    TSO_MAX_BUFF_SIZE : tmp_len;
2941f748be53SAlexandre TORGUE 
294242de047dSJose Abreu 		stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
2943f748be53SAlexandre TORGUE 				0, 1,
2944426849e6SNiklas Cassel 				(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
2945f748be53SAlexandre TORGUE 				0, 0);
2946f748be53SAlexandre TORGUE 
2947f748be53SAlexandre TORGUE 		tmp_len -= TSO_MAX_BUFF_SIZE;
2948f748be53SAlexandre TORGUE 	}
2949f748be53SAlexandre TORGUE }
2950f748be53SAlexandre TORGUE 
2951f748be53SAlexandre TORGUE /**
2952f748be53SAlexandre TORGUE  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
2953f748be53SAlexandre TORGUE  *  @skb : the socket buffer
2954f748be53SAlexandre TORGUE  *  @dev : device pointer
2955f748be53SAlexandre TORGUE  *  Description: this is the transmit function that is called on TSO frames
2956f748be53SAlexandre TORGUE  *  (support available on GMAC4 and newer chips).
2957f748be53SAlexandre TORGUE  *  Diagram below show the ring programming in case of TSO frames:
2958f748be53SAlexandre TORGUE  *
2959f748be53SAlexandre TORGUE  *  First Descriptor
2960f748be53SAlexandre TORGUE  *   --------
2961f748be53SAlexandre TORGUE  *   | DES0 |---> buffer1 = L2/L3/L4 header
2962f748be53SAlexandre TORGUE  *   | DES1 |---> TCP Payload (can continue on next descr...)
2963f748be53SAlexandre TORGUE  *   | DES2 |---> buffer 1 and 2 len
2964f748be53SAlexandre TORGUE  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
2965f748be53SAlexandre TORGUE  *   --------
2966f748be53SAlexandre TORGUE  *	|
2967f748be53SAlexandre TORGUE  *     ...
2968f748be53SAlexandre TORGUE  *	|
2969f748be53SAlexandre TORGUE  *   --------
2970f748be53SAlexandre TORGUE  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
2971f748be53SAlexandre TORGUE  *   | DES1 | --|
2972f748be53SAlexandre TORGUE  *   | DES2 | --> buffer 1 and 2 len
2973f748be53SAlexandre TORGUE  *   | DES3 |
2974f748be53SAlexandre TORGUE  *   --------
2975f748be53SAlexandre TORGUE  *
2976f748be53SAlexandre TORGUE  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
2977f748be53SAlexandre TORGUE  */
2978f748be53SAlexandre TORGUE static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2979f748be53SAlexandre TORGUE {
2980ce736788SJoao Pinto 	struct dma_desc *desc, *first, *mss_desc = NULL;
2981f748be53SAlexandre TORGUE 	struct stmmac_priv *priv = netdev_priv(dev);
2982579a25a8SJose Abreu 	int desc_size, tmp_pay_len = 0, first_tx;
2983f748be53SAlexandre TORGUE 	int nfrags = skb_shinfo(skb)->nr_frags;
2984ce736788SJoao Pinto 	u32 queue = skb_get_queue_mapping(skb);
2985c2837423SJose Abreu 	unsigned int first_entry, tx_packets;
2986ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q;
2987c2837423SJose Abreu 	bool has_vlan, set_ic;
2988579a25a8SJose Abreu 	u8 proto_hdr_len, hdr;
2989ce736788SJoao Pinto 	u32 pay_len, mss;
2990a993db88SJose Abreu 	dma_addr_t des;
2991f748be53SAlexandre TORGUE 	int i;
2992f748be53SAlexandre TORGUE 
2993ce736788SJoao Pinto 	tx_q = &priv->tx_queue[queue];
2994c2837423SJose Abreu 	first_tx = tx_q->cur_tx;
2995ce736788SJoao Pinto 
2996f748be53SAlexandre TORGUE 	/* Compute header lengths */
2997b7766206SJose Abreu 	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
2998b7766206SJose Abreu 		proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
2999b7766206SJose Abreu 		hdr = sizeof(struct udphdr);
3000b7766206SJose Abreu 	} else {
3001f748be53SAlexandre TORGUE 		proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
3002b7766206SJose Abreu 		hdr = tcp_hdrlen(skb);
3003b7766206SJose Abreu 	}
3004f748be53SAlexandre TORGUE 
3005f748be53SAlexandre TORGUE 	/* Desc availability based on threshold should be enough safe */
3006ce736788SJoao Pinto 	if (unlikely(stmmac_tx_avail(priv, queue) <
3007f748be53SAlexandre TORGUE 		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
3008c22a3f48SJoao Pinto 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
3009c22a3f48SJoao Pinto 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
3010c22a3f48SJoao Pinto 								queue));
3011f748be53SAlexandre TORGUE 			/* This is a hard error, log it. */
301238ddc59dSLABBE Corentin 			netdev_err(priv->dev,
301338ddc59dSLABBE Corentin 				   "%s: Tx Ring full when queue awake\n",
301438ddc59dSLABBE Corentin 				   __func__);
3015f748be53SAlexandre TORGUE 		}
3016f748be53SAlexandre TORGUE 		return NETDEV_TX_BUSY;
3017f748be53SAlexandre TORGUE 	}
3018f748be53SAlexandre TORGUE 
3019f748be53SAlexandre TORGUE 	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
3020f748be53SAlexandre TORGUE 
3021f748be53SAlexandre TORGUE 	mss = skb_shinfo(skb)->gso_size;
3022f748be53SAlexandre TORGUE 
3023f748be53SAlexandre TORGUE 	/* set new MSS value if needed */
30248d212a9eSNiklas Cassel 	if (mss != tx_q->mss) {
3025579a25a8SJose Abreu 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
3026579a25a8SJose Abreu 			mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
3027579a25a8SJose Abreu 		else
3028579a25a8SJose Abreu 			mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
3029579a25a8SJose Abreu 
303042de047dSJose Abreu 		stmmac_set_mss(priv, mss_desc, mss);
30318d212a9eSNiklas Cassel 		tx_q->mss = mss;
3032ce736788SJoao Pinto 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
3033b4c9784cSNiklas Cassel 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
3034f748be53SAlexandre TORGUE 	}
3035f748be53SAlexandre TORGUE 
3036f748be53SAlexandre TORGUE 	if (netif_msg_tx_queued(priv)) {
3037b7766206SJose Abreu 		pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
3038b7766206SJose Abreu 			__func__, hdr, proto_hdr_len, pay_len, mss);
3039f748be53SAlexandre TORGUE 		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
3040f748be53SAlexandre TORGUE 			skb->data_len);
3041f748be53SAlexandre TORGUE 	}
3042f748be53SAlexandre TORGUE 
304330d93227SJose Abreu 	/* Check if VLAN can be inserted by HW */
304430d93227SJose Abreu 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
304530d93227SJose Abreu 
3046ce736788SJoao Pinto 	first_entry = tx_q->cur_tx;
3047b4c9784cSNiklas Cassel 	WARN_ON(tx_q->tx_skbuff[first_entry]);
3048f748be53SAlexandre TORGUE 
3049579a25a8SJose Abreu 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
3050579a25a8SJose Abreu 		desc = &tx_q->dma_entx[first_entry].basic;
3051579a25a8SJose Abreu 	else
3052579a25a8SJose Abreu 		desc = &tx_q->dma_tx[first_entry];
3053f748be53SAlexandre TORGUE 	first = desc;
3054f748be53SAlexandre TORGUE 
305530d93227SJose Abreu 	if (has_vlan)
305630d93227SJose Abreu 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
305730d93227SJose Abreu 
3058f748be53SAlexandre TORGUE 	/* first descriptor: fill Headers on Buf1 */
3059f748be53SAlexandre TORGUE 	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
3060f748be53SAlexandre TORGUE 			     DMA_TO_DEVICE);
3061f748be53SAlexandre TORGUE 	if (dma_mapping_error(priv->device, des))
3062f748be53SAlexandre TORGUE 		goto dma_map_err;
3063f748be53SAlexandre TORGUE 
3064ce736788SJoao Pinto 	tx_q->tx_skbuff_dma[first_entry].buf = des;
3065ce736788SJoao Pinto 	tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
3066f748be53SAlexandre TORGUE 
3067a993db88SJose Abreu 	if (priv->dma_cap.addr64 <= 32) {
3068f8be0d78SMichael Weiser 		first->des0 = cpu_to_le32(des);
3069f748be53SAlexandre TORGUE 
3070f748be53SAlexandre TORGUE 		/* Fill start of payload in buff2 of first descriptor */
3071f748be53SAlexandre TORGUE 		if (pay_len)
3072f8be0d78SMichael Weiser 			first->des1 = cpu_to_le32(des + proto_hdr_len);
3073f748be53SAlexandre TORGUE 
3074f748be53SAlexandre TORGUE 		/* If needed take extra descriptors to fill the remaining payload */
3075f748be53SAlexandre TORGUE 		tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
3076a993db88SJose Abreu 	} else {
3077a993db88SJose Abreu 		stmmac_set_desc_addr(priv, first, des);
3078a993db88SJose Abreu 		tmp_pay_len = pay_len;
307934c15202Syuqi jin 		des += proto_hdr_len;
3080b2f07199SJose Abreu 		pay_len = 0;
3081a993db88SJose Abreu 	}
3082f748be53SAlexandre TORGUE 
3083ce736788SJoao Pinto 	stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
3084f748be53SAlexandre TORGUE 
3085f748be53SAlexandre TORGUE 	/* Prepare fragments */
3086f748be53SAlexandre TORGUE 	for (i = 0; i < nfrags; i++) {
3087f748be53SAlexandre TORGUE 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3088f748be53SAlexandre TORGUE 
3089f748be53SAlexandre TORGUE 		des = skb_frag_dma_map(priv->device, frag, 0,
3090f748be53SAlexandre TORGUE 				       skb_frag_size(frag),
3091f748be53SAlexandre TORGUE 				       DMA_TO_DEVICE);
3092937071c1SThierry Reding 		if (dma_mapping_error(priv->device, des))
3093937071c1SThierry Reding 			goto dma_map_err;
3094f748be53SAlexandre TORGUE 
3095f748be53SAlexandre TORGUE 		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
3096ce736788SJoao Pinto 				     (i == nfrags - 1), queue);
3097f748be53SAlexandre TORGUE 
3098ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
3099ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
3100ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
3101f748be53SAlexandre TORGUE 	}
3102f748be53SAlexandre TORGUE 
3103ce736788SJoao Pinto 	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
3104f748be53SAlexandre TORGUE 
310505cf0d1bSNiklas Cassel 	/* Only the last descriptor gets to point to the skb. */
310605cf0d1bSNiklas Cassel 	tx_q->tx_skbuff[tx_q->cur_tx] = skb;
310705cf0d1bSNiklas Cassel 
31087df4a3a7SJose Abreu 	/* Manage tx mitigation */
3109c2837423SJose Abreu 	tx_packets = (tx_q->cur_tx + 1) - first_tx;
3110c2837423SJose Abreu 	tx_q->tx_count_frames += tx_packets;
3111c2837423SJose Abreu 
3112c2837423SJose Abreu 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
3113c2837423SJose Abreu 		set_ic = true;
3114c2837423SJose Abreu 	else if (!priv->tx_coal_frames)
3115c2837423SJose Abreu 		set_ic = false;
3116c2837423SJose Abreu 	else if (tx_packets > priv->tx_coal_frames)
3117c2837423SJose Abreu 		set_ic = true;
3118c2837423SJose Abreu 	else if ((tx_q->tx_count_frames % priv->tx_coal_frames) < tx_packets)
3119c2837423SJose Abreu 		set_ic = true;
3120c2837423SJose Abreu 	else
3121c2837423SJose Abreu 		set_ic = false;
3122c2837423SJose Abreu 
3123c2837423SJose Abreu 	if (set_ic) {
3124579a25a8SJose Abreu 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
3125579a25a8SJose Abreu 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
3126579a25a8SJose Abreu 		else
31277df4a3a7SJose Abreu 			desc = &tx_q->dma_tx[tx_q->cur_tx];
3128579a25a8SJose Abreu 
31297df4a3a7SJose Abreu 		tx_q->tx_count_frames = 0;
31307df4a3a7SJose Abreu 		stmmac_set_tx_ic(priv, desc);
31317df4a3a7SJose Abreu 		priv->xstats.tx_set_ic_bit++;
31327df4a3a7SJose Abreu 	}
31337df4a3a7SJose Abreu 
313405cf0d1bSNiklas Cassel 	/* We've used all descriptors we need for this skb, however,
313505cf0d1bSNiklas Cassel 	 * advance cur_tx so that it references a fresh descriptor.
313605cf0d1bSNiklas Cassel 	 * ndo_start_xmit will fill this descriptor the next time it's
313705cf0d1bSNiklas Cassel 	 * called and stmmac_tx_clean may clean up to this descriptor.
313805cf0d1bSNiklas Cassel 	 */
3139ce736788SJoao Pinto 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
3140f748be53SAlexandre TORGUE 
3141ce736788SJoao Pinto 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3142b3e51069SLABBE Corentin 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
314338ddc59dSLABBE Corentin 			  __func__);
3144c22a3f48SJoao Pinto 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
3145f748be53SAlexandre TORGUE 	}
3146f748be53SAlexandre TORGUE 
3147f748be53SAlexandre TORGUE 	dev->stats.tx_bytes += skb->len;
3148f748be53SAlexandre TORGUE 	priv->xstats.tx_tso_frames++;
3149f748be53SAlexandre TORGUE 	priv->xstats.tx_tso_nfrags += nfrags;
3150f748be53SAlexandre TORGUE 
31518000ddc0SJose Abreu 	if (priv->sarc_type)
31528000ddc0SJose Abreu 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
31538000ddc0SJose Abreu 
3154f748be53SAlexandre TORGUE 	skb_tx_timestamp(skb);
3155f748be53SAlexandre TORGUE 
3156f748be53SAlexandre TORGUE 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3157f748be53SAlexandre TORGUE 		     priv->hwts_tx_en)) {
3158f748be53SAlexandre TORGUE 		/* declare that device is doing timestamping */
3159f748be53SAlexandre TORGUE 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
316042de047dSJose Abreu 		stmmac_enable_tx_timestamp(priv, first);
3161f748be53SAlexandre TORGUE 	}
3162f748be53SAlexandre TORGUE 
3163f748be53SAlexandre TORGUE 	/* Complete the first descriptor before granting the DMA */
316442de047dSJose Abreu 	stmmac_prepare_tso_tx_desc(priv, first, 1,
3165f748be53SAlexandre TORGUE 			proto_hdr_len,
3166f748be53SAlexandre TORGUE 			pay_len,
3167ce736788SJoao Pinto 			1, tx_q->tx_skbuff_dma[first_entry].last_segment,
3168b7766206SJose Abreu 			hdr / 4, (skb->len - proto_hdr_len));
3169f748be53SAlexandre TORGUE 
3170f748be53SAlexandre TORGUE 	/* If context desc is used to change MSS */
317115d2ee42SNiklas Cassel 	if (mss_desc) {
317215d2ee42SNiklas Cassel 		/* Make sure that first descriptor has been completely
317315d2ee42SNiklas Cassel 		 * written, including its own bit. This is because MSS is
317415d2ee42SNiklas Cassel 		 * actually before first descriptor, so we need to make
317515d2ee42SNiklas Cassel 		 * sure that MSS's own bit is the last thing written.
317615d2ee42SNiklas Cassel 		 */
317715d2ee42SNiklas Cassel 		dma_wmb();
317842de047dSJose Abreu 		stmmac_set_tx_owner(priv, mss_desc);
317915d2ee42SNiklas Cassel 	}
3180f748be53SAlexandre TORGUE 
3181f748be53SAlexandre TORGUE 	/* The own bit must be the latest setting done when prepare the
3182f748be53SAlexandre TORGUE 	 * descriptor and then barrier is needed to make sure that
3183f748be53SAlexandre TORGUE 	 * all is coherent before granting the DMA engine.
3184f748be53SAlexandre TORGUE 	 */
318595eb930aSNiklas Cassel 	wmb();
3186f748be53SAlexandre TORGUE 
3187f748be53SAlexandre TORGUE 	if (netif_msg_pktdata(priv)) {
3188f748be53SAlexandre TORGUE 		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
3189ce736788SJoao Pinto 			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
3190ce736788SJoao Pinto 			tx_q->cur_tx, first, nfrags);
3191f748be53SAlexandre TORGUE 		pr_info(">>> frame to be transmitted: ");
3192f748be53SAlexandre TORGUE 		print_pkt(skb->data, skb_headlen(skb));
3193f748be53SAlexandre TORGUE 	}
3194f748be53SAlexandre TORGUE 
3195c22a3f48SJoao Pinto 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3196f748be53SAlexandre TORGUE 
3197579a25a8SJose Abreu 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
3198579a25a8SJose Abreu 		desc_size = sizeof(struct dma_edesc);
3199579a25a8SJose Abreu 	else
3200579a25a8SJose Abreu 		desc_size = sizeof(struct dma_desc);
3201579a25a8SJose Abreu 
3202579a25a8SJose Abreu 	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
3203a4e887faSJose Abreu 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
32044772f26dSJose Abreu 	stmmac_tx_timer_arm(priv, queue);
3205f748be53SAlexandre TORGUE 
3206f748be53SAlexandre TORGUE 	return NETDEV_TX_OK;
3207f748be53SAlexandre TORGUE 
3208f748be53SAlexandre TORGUE dma_map_err:
3209f748be53SAlexandre TORGUE 	dev_err(priv->device, "Tx dma map failed\n");
3210f748be53SAlexandre TORGUE 	dev_kfree_skb(skb);
3211f748be53SAlexandre TORGUE 	priv->dev->stats.tx_dropped++;
3212f748be53SAlexandre TORGUE 	return NETDEV_TX_OK;
3213f748be53SAlexandre TORGUE }
3214f748be53SAlexandre TORGUE 
3215f748be53SAlexandre TORGUE /**
3216732fdf0eSGiuseppe CAVALLARO  *  stmmac_xmit - Tx entry point of the driver
32177ac6653aSJeff Kirsher  *  @skb : the socket buffer
32187ac6653aSJeff Kirsher  *  @dev : device pointer
321932ceabcaSGiuseppe CAVALLARO  *  Description : this is the tx entry point of the driver.
322032ceabcaSGiuseppe CAVALLARO  *  It programs the chain or the ring and supports oversized frames
322132ceabcaSGiuseppe CAVALLARO  *  and SG feature.
32227ac6653aSJeff Kirsher  */
32237ac6653aSJeff Kirsher static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
32247ac6653aSJeff Kirsher {
3225c2837423SJose Abreu 	unsigned int first_entry, tx_packets, enh_desc;
32267ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
32270e80bdc9SGiuseppe Cavallaro 	unsigned int nopaged_len = skb_headlen(skb);
32284a7d666aSGiuseppe CAVALLARO 	int i, csum_insertion = 0, is_jumbo = 0;
3229ce736788SJoao Pinto 	u32 queue = skb_get_queue_mapping(skb);
32307ac6653aSJeff Kirsher 	int nfrags = skb_shinfo(skb)->nr_frags;
3231b7766206SJose Abreu 	int gso = skb_shinfo(skb)->gso_type;
3232579a25a8SJose Abreu 	struct dma_edesc *tbs_desc = NULL;
3233579a25a8SJose Abreu 	int entry, desc_size, first_tx;
32347ac6653aSJeff Kirsher 	struct dma_desc *desc, *first;
3235ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q;
3236c2837423SJose Abreu 	bool has_vlan, set_ic;
3237a993db88SJose Abreu 	dma_addr_t des;
3238f748be53SAlexandre TORGUE 
3239ce736788SJoao Pinto 	tx_q = &priv->tx_queue[queue];
3240c2837423SJose Abreu 	first_tx = tx_q->cur_tx;
3241ce736788SJoao Pinto 
3242e2cd682dSJose Abreu 	if (priv->tx_path_in_lpi_mode)
3243e2cd682dSJose Abreu 		stmmac_disable_eee_mode(priv);
3244e2cd682dSJose Abreu 
3245f748be53SAlexandre TORGUE 	/* Manage oversized TCP frames for GMAC4 device */
3246f748be53SAlexandre TORGUE 	if (skb_is_gso(skb) && priv->tso) {
3247b7766206SJose Abreu 		if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
3248b7766206SJose Abreu 			return stmmac_tso_xmit(skb, dev);
3249b7766206SJose Abreu 		if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
3250f748be53SAlexandre TORGUE 			return stmmac_tso_xmit(skb, dev);
3251f748be53SAlexandre TORGUE 	}
32527ac6653aSJeff Kirsher 
3253ce736788SJoao Pinto 	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
3254c22a3f48SJoao Pinto 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
3255c22a3f48SJoao Pinto 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
3256c22a3f48SJoao Pinto 								queue));
32577ac6653aSJeff Kirsher 			/* This is a hard error, log it. */
325838ddc59dSLABBE Corentin 			netdev_err(priv->dev,
325938ddc59dSLABBE Corentin 				   "%s: Tx Ring full when queue awake\n",
326038ddc59dSLABBE Corentin 				   __func__);
32617ac6653aSJeff Kirsher 		}
32627ac6653aSJeff Kirsher 		return NETDEV_TX_BUSY;
32637ac6653aSJeff Kirsher 	}
32647ac6653aSJeff Kirsher 
326530d93227SJose Abreu 	/* Check if VLAN can be inserted by HW */
326630d93227SJose Abreu 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
326730d93227SJose Abreu 
3268ce736788SJoao Pinto 	entry = tx_q->cur_tx;
32690e80bdc9SGiuseppe Cavallaro 	first_entry = entry;
3270b4c9784cSNiklas Cassel 	WARN_ON(tx_q->tx_skbuff[first_entry]);
32717ac6653aSJeff Kirsher 
32727ac6653aSJeff Kirsher 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
32737ac6653aSJeff Kirsher 
32740e80bdc9SGiuseppe Cavallaro 	if (likely(priv->extend_desc))
3275ce736788SJoao Pinto 		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3276579a25a8SJose Abreu 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
3277579a25a8SJose Abreu 		desc = &tx_q->dma_entx[entry].basic;
3278c24602efSGiuseppe CAVALLARO 	else
3279ce736788SJoao Pinto 		desc = tx_q->dma_tx + entry;
3280c24602efSGiuseppe CAVALLARO 
32817ac6653aSJeff Kirsher 	first = desc;
32827ac6653aSJeff Kirsher 
328330d93227SJose Abreu 	if (has_vlan)
328430d93227SJose Abreu 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
328530d93227SJose Abreu 
32860e80bdc9SGiuseppe Cavallaro 	enh_desc = priv->plat->enh_desc;
32874a7d666aSGiuseppe CAVALLARO 	/* To program the descriptors according to the size of the frame */
328829896a67SGiuseppe CAVALLARO 	if (enh_desc)
32892c520b1cSJose Abreu 		is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
329029896a67SGiuseppe CAVALLARO 
329163a550fcSJose Abreu 	if (unlikely(is_jumbo)) {
32922c520b1cSJose Abreu 		entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
329363a550fcSJose Abreu 		if (unlikely(entry < 0) && (entry != -EINVAL))
3294362b37beSGiuseppe CAVALLARO 			goto dma_map_err;
329529896a67SGiuseppe CAVALLARO 	}
32967ac6653aSJeff Kirsher 
32977ac6653aSJeff Kirsher 	for (i = 0; i < nfrags; i++) {
32989e903e08SEric Dumazet 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
32999e903e08SEric Dumazet 		int len = skb_frag_size(frag);
3300be434d50SGiuseppe Cavallaro 		bool last_segment = (i == (nfrags - 1));
33017ac6653aSJeff Kirsher 
3302e3ad57c9SGiuseppe Cavallaro 		entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3303b4c9784cSNiklas Cassel 		WARN_ON(tx_q->tx_skbuff[entry]);
3304e3ad57c9SGiuseppe Cavallaro 
33050e80bdc9SGiuseppe Cavallaro 		if (likely(priv->extend_desc))
3306ce736788SJoao Pinto 			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3307579a25a8SJose Abreu 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
3308579a25a8SJose Abreu 			desc = &tx_q->dma_entx[entry].basic;
3309c24602efSGiuseppe CAVALLARO 		else
3310ce736788SJoao Pinto 			desc = tx_q->dma_tx + entry;
33117ac6653aSJeff Kirsher 
3312f748be53SAlexandre TORGUE 		des = skb_frag_dma_map(priv->device, frag, 0, len,
3313f722380dSIan Campbell 				       DMA_TO_DEVICE);
3314f748be53SAlexandre TORGUE 		if (dma_mapping_error(priv->device, des))
3315362b37beSGiuseppe CAVALLARO 			goto dma_map_err; /* should reuse desc w/o issues */
3316362b37beSGiuseppe CAVALLARO 
3317ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].buf = des;
33186844171dSJose Abreu 
33196844171dSJose Abreu 		stmmac_set_desc_addr(priv, desc, des);
3320f748be53SAlexandre TORGUE 
3321ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].map_as_page = true;
3322ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].len = len;
3323ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
33240e80bdc9SGiuseppe Cavallaro 
33250e80bdc9SGiuseppe Cavallaro 		/* Prepare the descriptor and set the own bit too */
332642de047dSJose Abreu 		stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
332742de047dSJose Abreu 				priv->mode, 1, last_segment, skb->len);
33287ac6653aSJeff Kirsher 	}
33297ac6653aSJeff Kirsher 
333005cf0d1bSNiklas Cassel 	/* Only the last descriptor gets to point to the skb. */
333105cf0d1bSNiklas Cassel 	tx_q->tx_skbuff[entry] = skb;
3332e3ad57c9SGiuseppe Cavallaro 
33337df4a3a7SJose Abreu 	/* According to the coalesce parameter the IC bit for the latest
33347df4a3a7SJose Abreu 	 * segment is reset and the timer re-started to clean the tx status.
33357df4a3a7SJose Abreu 	 * This approach takes care about the fragments: desc is the first
33367df4a3a7SJose Abreu 	 * element in case of no SG.
33377df4a3a7SJose Abreu 	 */
3338c2837423SJose Abreu 	tx_packets = (entry + 1) - first_tx;
3339c2837423SJose Abreu 	tx_q->tx_count_frames += tx_packets;
3340c2837423SJose Abreu 
3341c2837423SJose Abreu 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
3342c2837423SJose Abreu 		set_ic = true;
3343c2837423SJose Abreu 	else if (!priv->tx_coal_frames)
3344c2837423SJose Abreu 		set_ic = false;
3345c2837423SJose Abreu 	else if (tx_packets > priv->tx_coal_frames)
3346c2837423SJose Abreu 		set_ic = true;
3347c2837423SJose Abreu 	else if ((tx_q->tx_count_frames % priv->tx_coal_frames) < tx_packets)
3348c2837423SJose Abreu 		set_ic = true;
3349c2837423SJose Abreu 	else
3350c2837423SJose Abreu 		set_ic = false;
3351c2837423SJose Abreu 
3352c2837423SJose Abreu 	if (set_ic) {
33537df4a3a7SJose Abreu 		if (likely(priv->extend_desc))
33547df4a3a7SJose Abreu 			desc = &tx_q->dma_etx[entry].basic;
3355579a25a8SJose Abreu 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
3356579a25a8SJose Abreu 			desc = &tx_q->dma_entx[entry].basic;
33577df4a3a7SJose Abreu 		else
33587df4a3a7SJose Abreu 			desc = &tx_q->dma_tx[entry];
33597df4a3a7SJose Abreu 
33607df4a3a7SJose Abreu 		tx_q->tx_count_frames = 0;
33617df4a3a7SJose Abreu 		stmmac_set_tx_ic(priv, desc);
33627df4a3a7SJose Abreu 		priv->xstats.tx_set_ic_bit++;
33637df4a3a7SJose Abreu 	}
33647df4a3a7SJose Abreu 
336505cf0d1bSNiklas Cassel 	/* We've used all descriptors we need for this skb, however,
336605cf0d1bSNiklas Cassel 	 * advance cur_tx so that it references a fresh descriptor.
336705cf0d1bSNiklas Cassel 	 * ndo_start_xmit will fill this descriptor the next time it's
336805cf0d1bSNiklas Cassel 	 * called and stmmac_tx_clean may clean up to this descriptor.
336905cf0d1bSNiklas Cassel 	 */
337005cf0d1bSNiklas Cassel 	entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3371ce736788SJoao Pinto 	tx_q->cur_tx = entry;
33727ac6653aSJeff Kirsher 
33737ac6653aSJeff Kirsher 	if (netif_msg_pktdata(priv)) {
337438ddc59dSLABBE Corentin 		netdev_dbg(priv->dev,
337538ddc59dSLABBE Corentin 			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
3376ce736788SJoao Pinto 			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
33770e80bdc9SGiuseppe Cavallaro 			   entry, first, nfrags);
337883d7af64SGiuseppe CAVALLARO 
337938ddc59dSLABBE Corentin 		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
33807ac6653aSJeff Kirsher 		print_pkt(skb->data, skb->len);
33817ac6653aSJeff Kirsher 	}
33820e80bdc9SGiuseppe Cavallaro 
3383ce736788SJoao Pinto 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3384b3e51069SLABBE Corentin 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
3385b3e51069SLABBE Corentin 			  __func__);
3386c22a3f48SJoao Pinto 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
33877ac6653aSJeff Kirsher 	}
33887ac6653aSJeff Kirsher 
33897ac6653aSJeff Kirsher 	dev->stats.tx_bytes += skb->len;
33907ac6653aSJeff Kirsher 
33918000ddc0SJose Abreu 	if (priv->sarc_type)
33928000ddc0SJose Abreu 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
33938000ddc0SJose Abreu 
33940e80bdc9SGiuseppe Cavallaro 	skb_tx_timestamp(skb);
33950e80bdc9SGiuseppe Cavallaro 
33960e80bdc9SGiuseppe Cavallaro 	/* Ready to fill the first descriptor and set the OWN bit w/o any
33970e80bdc9SGiuseppe Cavallaro 	 * problems because all the descriptors are actually ready to be
33980e80bdc9SGiuseppe Cavallaro 	 * passed to the DMA engine.
33990e80bdc9SGiuseppe Cavallaro 	 */
34000e80bdc9SGiuseppe Cavallaro 	if (likely(!is_jumbo)) {
34010e80bdc9SGiuseppe Cavallaro 		bool last_segment = (nfrags == 0);
34020e80bdc9SGiuseppe Cavallaro 
3403f748be53SAlexandre TORGUE 		des = dma_map_single(priv->device, skb->data,
34040e80bdc9SGiuseppe Cavallaro 				     nopaged_len, DMA_TO_DEVICE);
3405f748be53SAlexandre TORGUE 		if (dma_mapping_error(priv->device, des))
34060e80bdc9SGiuseppe Cavallaro 			goto dma_map_err;
34070e80bdc9SGiuseppe Cavallaro 
3408ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[first_entry].buf = des;
34096844171dSJose Abreu 
34106844171dSJose Abreu 		stmmac_set_desc_addr(priv, first, des);
3411f748be53SAlexandre TORGUE 
3412ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
3413ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
34140e80bdc9SGiuseppe Cavallaro 
3415891434b1SRayagond Kokatanur 		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3416891434b1SRayagond Kokatanur 			     priv->hwts_tx_en)) {
3417891434b1SRayagond Kokatanur 			/* declare that device is doing timestamping */
3418891434b1SRayagond Kokatanur 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
341942de047dSJose Abreu 			stmmac_enable_tx_timestamp(priv, first);
3420891434b1SRayagond Kokatanur 		}
3421891434b1SRayagond Kokatanur 
34220e80bdc9SGiuseppe Cavallaro 		/* Prepare the first descriptor setting the OWN bit too */
342342de047dSJose Abreu 		stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
3424579a25a8SJose Abreu 				csum_insertion, priv->mode, 0, last_segment,
342542de047dSJose Abreu 				skb->len);
342680acbed9SAaro Koskinen 	}
34270e80bdc9SGiuseppe Cavallaro 
3428579a25a8SJose Abreu 	if (tx_q->tbs & STMMAC_TBS_EN) {
3429579a25a8SJose Abreu 		struct timespec64 ts = ns_to_timespec64(skb->tstamp);
3430579a25a8SJose Abreu 
3431579a25a8SJose Abreu 		tbs_desc = &tx_q->dma_entx[first_entry];
3432579a25a8SJose Abreu 		stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
3433579a25a8SJose Abreu 	}
3434579a25a8SJose Abreu 
3435579a25a8SJose Abreu 	stmmac_set_tx_owner(priv, first);
3436579a25a8SJose Abreu 
34370e80bdc9SGiuseppe Cavallaro 	/* The own bit must be the latest setting done when prepare the
34380e80bdc9SGiuseppe Cavallaro 	 * descriptor and then barrier is needed to make sure that
34390e80bdc9SGiuseppe Cavallaro 	 * all is coherent before granting the DMA engine.
34400e80bdc9SGiuseppe Cavallaro 	 */
344195eb930aSNiklas Cassel 	wmb();
34427ac6653aSJeff Kirsher 
3443c22a3f48SJoao Pinto 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3444f748be53SAlexandre TORGUE 
3445a4e887faSJose Abreu 	stmmac_enable_dma_transmission(priv, priv->ioaddr);
34468fce3331SJose Abreu 
3447579a25a8SJose Abreu 	if (likely(priv->extend_desc))
3448579a25a8SJose Abreu 		desc_size = sizeof(struct dma_extended_desc);
3449579a25a8SJose Abreu 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
3450579a25a8SJose Abreu 		desc_size = sizeof(struct dma_edesc);
3451579a25a8SJose Abreu 	else
3452579a25a8SJose Abreu 		desc_size = sizeof(struct dma_desc);
3453579a25a8SJose Abreu 
3454579a25a8SJose Abreu 	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
3455f1565c60SJose Abreu 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
34564772f26dSJose Abreu 	stmmac_tx_timer_arm(priv, queue);
34577ac6653aSJeff Kirsher 
3458362b37beSGiuseppe CAVALLARO 	return NETDEV_TX_OK;
3459a9097a96SGiuseppe CAVALLARO 
3460362b37beSGiuseppe CAVALLARO dma_map_err:
346138ddc59dSLABBE Corentin 	netdev_err(priv->dev, "Tx DMA map failed\n");
3462362b37beSGiuseppe CAVALLARO 	dev_kfree_skb(skb);
3463362b37beSGiuseppe CAVALLARO 	priv->dev->stats.tx_dropped++;
34647ac6653aSJeff Kirsher 	return NETDEV_TX_OK;
34657ac6653aSJeff Kirsher }
34667ac6653aSJeff Kirsher 
3467b9381985SVince Bridgers static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
3468b9381985SVince Bridgers {
3469ab188e8fSElad Nachman 	struct vlan_ethhdr *veth;
3470ab188e8fSElad Nachman 	__be16 vlan_proto;
3471b9381985SVince Bridgers 	u16 vlanid;
3472b9381985SVince Bridgers 
3473ab188e8fSElad Nachman 	veth = (struct vlan_ethhdr *)skb->data;
3474ab188e8fSElad Nachman 	vlan_proto = veth->h_vlan_proto;
3475ab188e8fSElad Nachman 
3476ab188e8fSElad Nachman 	if ((vlan_proto == htons(ETH_P_8021Q) &&
3477ab188e8fSElad Nachman 	     dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
3478ab188e8fSElad Nachman 	    (vlan_proto == htons(ETH_P_8021AD) &&
3479ab188e8fSElad Nachman 	     dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
3480b9381985SVince Bridgers 		/* pop the vlan tag */
3481ab188e8fSElad Nachman 		vlanid = ntohs(veth->h_vlan_TCI);
3482ab188e8fSElad Nachman 		memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
3483b9381985SVince Bridgers 		skb_pull(skb, VLAN_HLEN);
3484ab188e8fSElad Nachman 		__vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
3485b9381985SVince Bridgers 	}
3486b9381985SVince Bridgers }
3487b9381985SVince Bridgers 
3488b9381985SVince Bridgers 
348954139cf3SJoao Pinto static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q)
3490120e87f9SGiuseppe Cavallaro {
349154139cf3SJoao Pinto 	if (rx_q->rx_zeroc_thresh < STMMAC_RX_THRESH)
3492120e87f9SGiuseppe Cavallaro 		return 0;
3493120e87f9SGiuseppe Cavallaro 
3494120e87f9SGiuseppe Cavallaro 	return 1;
3495120e87f9SGiuseppe Cavallaro }
3496120e87f9SGiuseppe Cavallaro 
349732ceabcaSGiuseppe CAVALLARO /**
3498732fdf0eSGiuseppe CAVALLARO  * stmmac_rx_refill - refill used skb preallocated buffers
349932ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
350054139cf3SJoao Pinto  * @queue: RX queue index
350132ceabcaSGiuseppe CAVALLARO  * Description : this is to reallocate the skb for the reception process
350232ceabcaSGiuseppe CAVALLARO  * that is based on zero-copy.
350332ceabcaSGiuseppe CAVALLARO  */
350454139cf3SJoao Pinto static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
35057ac6653aSJeff Kirsher {
350654139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
35073caa61c2SJose Abreu 	int len, dirty = stmmac_rx_dirty(priv, queue);
350854139cf3SJoao Pinto 	unsigned int entry = rx_q->dirty_rx;
350954139cf3SJoao Pinto 
35103caa61c2SJose Abreu 	len = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
35113caa61c2SJose Abreu 
3512e3ad57c9SGiuseppe Cavallaro 	while (dirty-- > 0) {
35132af6106aSJose Abreu 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
3514c24602efSGiuseppe CAVALLARO 		struct dma_desc *p;
3515d429b66eSJose Abreu 		bool use_rx_wd;
3516c24602efSGiuseppe CAVALLARO 
3517c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
351854139cf3SJoao Pinto 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
3519c24602efSGiuseppe CAVALLARO 		else
352054139cf3SJoao Pinto 			p = rx_q->dma_rx + entry;
3521c24602efSGiuseppe CAVALLARO 
35222af6106aSJose Abreu 		if (!buf->page) {
35232af6106aSJose Abreu 			buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
35242af6106aSJose Abreu 			if (!buf->page)
35257ac6653aSJeff Kirsher 				break;
3526120e87f9SGiuseppe Cavallaro 		}
35277ac6653aSJeff Kirsher 
352867afd6d1SJose Abreu 		if (priv->sph && !buf->sec_page) {
352967afd6d1SJose Abreu 			buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
353067afd6d1SJose Abreu 			if (!buf->sec_page)
353167afd6d1SJose Abreu 				break;
353267afd6d1SJose Abreu 
353367afd6d1SJose Abreu 			buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
353467afd6d1SJose Abreu 
353567afd6d1SJose Abreu 			dma_sync_single_for_device(priv->device, buf->sec_addr,
353667afd6d1SJose Abreu 						   len, DMA_FROM_DEVICE);
353767afd6d1SJose Abreu 		}
353867afd6d1SJose Abreu 
35392af6106aSJose Abreu 		buf->addr = page_pool_get_dma_addr(buf->page);
35403caa61c2SJose Abreu 
35413caa61c2SJose Abreu 		/* Sync whole allocation to device. This will invalidate old
35423caa61c2SJose Abreu 		 * data.
35433caa61c2SJose Abreu 		 */
35443caa61c2SJose Abreu 		dma_sync_single_for_device(priv->device, buf->addr, len,
35453caa61c2SJose Abreu 					   DMA_FROM_DEVICE);
35463caa61c2SJose Abreu 
35472af6106aSJose Abreu 		stmmac_set_desc_addr(priv, p, buf->addr);
354867afd6d1SJose Abreu 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr);
35492c520b1cSJose Abreu 		stmmac_refill_desc3(priv, rx_q, p);
3550286a8372SGiuseppe CAVALLARO 
3551d429b66eSJose Abreu 		rx_q->rx_count_frames++;
35526fa9d691SJose Abreu 		rx_q->rx_count_frames += priv->rx_coal_frames;
35536fa9d691SJose Abreu 		if (rx_q->rx_count_frames > priv->rx_coal_frames)
35546fa9d691SJose Abreu 			rx_q->rx_count_frames = 0;
355509146abeSJose Abreu 
355609146abeSJose Abreu 		use_rx_wd = !priv->rx_coal_frames;
355709146abeSJose Abreu 		use_rx_wd |= rx_q->rx_count_frames > 0;
355809146abeSJose Abreu 		if (!priv->use_riwt)
355909146abeSJose Abreu 			use_rx_wd = false;
3560d429b66eSJose Abreu 
3561ad688cdbSPavel Machek 		dma_wmb();
35622af6106aSJose Abreu 		stmmac_set_rx_owner(priv, p, use_rx_wd);
3563e3ad57c9SGiuseppe Cavallaro 
3564e3ad57c9SGiuseppe Cavallaro 		entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
35657ac6653aSJeff Kirsher 	}
356654139cf3SJoao Pinto 	rx_q->dirty_rx = entry;
3567858a31ffSJose Abreu 	rx_q->rx_tail_addr = rx_q->dma_rx_phy +
3568858a31ffSJose Abreu 			    (rx_q->dirty_rx * sizeof(struct dma_desc));
35694523a561SBiao Huang 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
35707ac6653aSJeff Kirsher }
35717ac6653aSJeff Kirsher 
357288ebe2cfSJose Abreu static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
357388ebe2cfSJose Abreu 				       struct dma_desc *p,
357488ebe2cfSJose Abreu 				       int status, unsigned int len)
357588ebe2cfSJose Abreu {
357688ebe2cfSJose Abreu 	int ret, coe = priv->hw->rx_csum;
357788ebe2cfSJose Abreu 	unsigned int plen = 0, hlen = 0;
357888ebe2cfSJose Abreu 
357988ebe2cfSJose Abreu 	/* Not first descriptor, buffer is always zero */
358088ebe2cfSJose Abreu 	if (priv->sph && len)
358188ebe2cfSJose Abreu 		return 0;
358288ebe2cfSJose Abreu 
358388ebe2cfSJose Abreu 	/* First descriptor, get split header length */
358488ebe2cfSJose Abreu 	ret = stmmac_get_rx_header_len(priv, p, &hlen);
358588ebe2cfSJose Abreu 	if (priv->sph && hlen) {
358688ebe2cfSJose Abreu 		priv->xstats.rx_split_hdr_pkt_n++;
358788ebe2cfSJose Abreu 		return hlen;
358888ebe2cfSJose Abreu 	}
358988ebe2cfSJose Abreu 
359088ebe2cfSJose Abreu 	/* First descriptor, not last descriptor and not split header */
359188ebe2cfSJose Abreu 	if (status & rx_not_ls)
359288ebe2cfSJose Abreu 		return priv->dma_buf_sz;
359388ebe2cfSJose Abreu 
359488ebe2cfSJose Abreu 	plen = stmmac_get_rx_frame_len(priv, p, coe);
359588ebe2cfSJose Abreu 
359688ebe2cfSJose Abreu 	/* First descriptor and last descriptor and not split header */
359788ebe2cfSJose Abreu 	return min_t(unsigned int, priv->dma_buf_sz, plen);
359888ebe2cfSJose Abreu }
359988ebe2cfSJose Abreu 
360088ebe2cfSJose Abreu static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
360188ebe2cfSJose Abreu 				       struct dma_desc *p,
360288ebe2cfSJose Abreu 				       int status, unsigned int len)
360388ebe2cfSJose Abreu {
360488ebe2cfSJose Abreu 	int coe = priv->hw->rx_csum;
360588ebe2cfSJose Abreu 	unsigned int plen = 0;
360688ebe2cfSJose Abreu 
360788ebe2cfSJose Abreu 	/* Not split header, buffer is not available */
360888ebe2cfSJose Abreu 	if (!priv->sph)
360988ebe2cfSJose Abreu 		return 0;
361088ebe2cfSJose Abreu 
361188ebe2cfSJose Abreu 	/* Not last descriptor */
361288ebe2cfSJose Abreu 	if (status & rx_not_ls)
361388ebe2cfSJose Abreu 		return priv->dma_buf_sz;
361488ebe2cfSJose Abreu 
361588ebe2cfSJose Abreu 	plen = stmmac_get_rx_frame_len(priv, p, coe);
361688ebe2cfSJose Abreu 
361788ebe2cfSJose Abreu 	/* Last descriptor */
361888ebe2cfSJose Abreu 	return plen - len;
361988ebe2cfSJose Abreu }
362088ebe2cfSJose Abreu 
362132ceabcaSGiuseppe CAVALLARO /**
3622732fdf0eSGiuseppe CAVALLARO  * stmmac_rx - manage the receive process
362332ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
362454139cf3SJoao Pinto  * @limit: napi bugget
362554139cf3SJoao Pinto  * @queue: RX queue index.
362632ceabcaSGiuseppe CAVALLARO  * Description :  this the function called by the napi poll method.
362732ceabcaSGiuseppe CAVALLARO  * It gets all the frames inside the ring.
362832ceabcaSGiuseppe CAVALLARO  */
362954139cf3SJoao Pinto static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
36307ac6653aSJeff Kirsher {
363154139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
36328fce3331SJose Abreu 	struct stmmac_channel *ch = &priv->channel[queue];
3633ec222003SJose Abreu 	unsigned int count = 0, error = 0, len = 0;
3634ec222003SJose Abreu 	int status = 0, coe = priv->hw->rx_csum;
363507b39753SAaro Koskinen 	unsigned int next_entry = rx_q->cur_rx;
3636ec222003SJose Abreu 	struct sk_buff *skb = NULL;
36377ac6653aSJeff Kirsher 
363883d7af64SGiuseppe CAVALLARO 	if (netif_msg_rx_status(priv)) {
3639d0225e7dSAlexandre TORGUE 		void *rx_head;
3640d0225e7dSAlexandre TORGUE 
364138ddc59dSLABBE Corentin 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
3642c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
364354139cf3SJoao Pinto 			rx_head = (void *)rx_q->dma_erx;
3644c24602efSGiuseppe CAVALLARO 		else
364554139cf3SJoao Pinto 			rx_head = (void *)rx_q->dma_rx;
3646d0225e7dSAlexandre TORGUE 
364742de047dSJose Abreu 		stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true);
36487ac6653aSJeff Kirsher 	}
3649c24602efSGiuseppe CAVALLARO 	while (count < limit) {
365088ebe2cfSJose Abreu 		unsigned int buf1_len = 0, buf2_len = 0;
3651ec222003SJose Abreu 		enum pkt_hash_types hash_type;
36522af6106aSJose Abreu 		struct stmmac_rx_buffer *buf;
36532af6106aSJose Abreu 		struct dma_desc *np, *p;
3654ec222003SJose Abreu 		int entry;
3655ec222003SJose Abreu 		u32 hash;
36567ac6653aSJeff Kirsher 
3657ec222003SJose Abreu 		if (!count && rx_q->state_saved) {
3658ec222003SJose Abreu 			skb = rx_q->state.skb;
3659ec222003SJose Abreu 			error = rx_q->state.error;
3660ec222003SJose Abreu 			len = rx_q->state.len;
3661ec222003SJose Abreu 		} else {
3662ec222003SJose Abreu 			rx_q->state_saved = false;
3663ec222003SJose Abreu 			skb = NULL;
3664ec222003SJose Abreu 			error = 0;
3665ec222003SJose Abreu 			len = 0;
3666ec222003SJose Abreu 		}
3667ec222003SJose Abreu 
3668ec222003SJose Abreu 		if (count >= limit)
3669ec222003SJose Abreu 			break;
3670ec222003SJose Abreu 
3671ec222003SJose Abreu read_again:
367288ebe2cfSJose Abreu 		buf1_len = 0;
367388ebe2cfSJose Abreu 		buf2_len = 0;
367407b39753SAaro Koskinen 		entry = next_entry;
36752af6106aSJose Abreu 		buf = &rx_q->buf_pool[entry];
367607b39753SAaro Koskinen 
3677c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
367854139cf3SJoao Pinto 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
3679c24602efSGiuseppe CAVALLARO 		else
368054139cf3SJoao Pinto 			p = rx_q->dma_rx + entry;
3681c24602efSGiuseppe CAVALLARO 
3682c1fa3212SFabrice Gasnier 		/* read the status of the incoming frame */
368342de047dSJose Abreu 		status = stmmac_rx_status(priv, &priv->dev->stats,
3684c1fa3212SFabrice Gasnier 				&priv->xstats, p);
3685c1fa3212SFabrice Gasnier 		/* check if managed by the DMA otherwise go ahead */
3686c1fa3212SFabrice Gasnier 		if (unlikely(status & dma_own))
36877ac6653aSJeff Kirsher 			break;
36887ac6653aSJeff Kirsher 
368954139cf3SJoao Pinto 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE);
369054139cf3SJoao Pinto 		next_entry = rx_q->cur_rx;
3691e3ad57c9SGiuseppe Cavallaro 
3692c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
369354139cf3SJoao Pinto 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
3694c24602efSGiuseppe CAVALLARO 		else
369554139cf3SJoao Pinto 			np = rx_q->dma_rx + next_entry;
3696ba1ffd74SGiuseppe CAVALLARO 
3697ba1ffd74SGiuseppe CAVALLARO 		prefetch(np);
36987ac6653aSJeff Kirsher 
369942de047dSJose Abreu 		if (priv->extend_desc)
370042de047dSJose Abreu 			stmmac_rx_extended_status(priv, &priv->dev->stats,
370142de047dSJose Abreu 					&priv->xstats, rx_q->dma_erx + entry);
3702891434b1SRayagond Kokatanur 		if (unlikely(status == discard_frame)) {
37032af6106aSJose Abreu 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
37042af6106aSJose Abreu 			buf->page = NULL;
3705ec222003SJose Abreu 			error = 1;
37060b273ca4SJose Abreu 			if (!priv->hwts_rx_en)
37070b273ca4SJose Abreu 				priv->dev->stats.rx_errors++;
3708ec222003SJose Abreu 		}
3709f748be53SAlexandre TORGUE 
3710ec222003SJose Abreu 		if (unlikely(error && (status & rx_not_ls)))
3711ec222003SJose Abreu 			goto read_again;
3712ec222003SJose Abreu 		if (unlikely(error)) {
3713ec222003SJose Abreu 			dev_kfree_skb(skb);
371488ebe2cfSJose Abreu 			skb = NULL;
3715cda4985aSJose Abreu 			count++;
371607b39753SAaro Koskinen 			continue;
3717e527c4a7SGiuseppe CAVALLARO 		}
3718e527c4a7SGiuseppe CAVALLARO 
3719ec222003SJose Abreu 		/* Buffer is good. Go on. */
3720ec222003SJose Abreu 
372188ebe2cfSJose Abreu 		prefetch(page_address(buf->page));
372288ebe2cfSJose Abreu 		if (buf->sec_page)
372388ebe2cfSJose Abreu 			prefetch(page_address(buf->sec_page));
372488ebe2cfSJose Abreu 
372588ebe2cfSJose Abreu 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
372688ebe2cfSJose Abreu 		len += buf1_len;
372788ebe2cfSJose Abreu 		buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
372888ebe2cfSJose Abreu 		len += buf2_len;
3729ec222003SJose Abreu 
37307ac6653aSJeff Kirsher 		/* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
3731ceb69499SGiuseppe CAVALLARO 		 * Type frames (LLC/LLC-SNAP)
3732565020aaSJose Abreu 		 *
3733565020aaSJose Abreu 		 * llc_snap is never checked in GMAC >= 4, so this ACS
3734565020aaSJose Abreu 		 * feature is always disabled and packets need to be
3735565020aaSJose Abreu 		 * stripped manually.
3736ceb69499SGiuseppe CAVALLARO 		 */
373793b5dce4SJose Abreu 		if (likely(!(status & rx_not_ls)) &&
373893b5dce4SJose Abreu 		    (likely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
373993b5dce4SJose Abreu 		     unlikely(status != llc_snap))) {
374088ebe2cfSJose Abreu 			if (buf2_len)
374188ebe2cfSJose Abreu 				buf2_len -= ETH_FCS_LEN;
374288ebe2cfSJose Abreu 			else
374388ebe2cfSJose Abreu 				buf1_len -= ETH_FCS_LEN;
374488ebe2cfSJose Abreu 
3745ec222003SJose Abreu 			len -= ETH_FCS_LEN;
374683d7af64SGiuseppe CAVALLARO 		}
374722ad3838SGiuseppe Cavallaro 
3748ec222003SJose Abreu 		if (!skb) {
374988ebe2cfSJose Abreu 			skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
3750ec222003SJose Abreu 			if (!skb) {
375122ad3838SGiuseppe Cavallaro 				priv->dev->stats.rx_dropped++;
3752cda4985aSJose Abreu 				count++;
375388ebe2cfSJose Abreu 				goto drain_data;
375422ad3838SGiuseppe Cavallaro 			}
375522ad3838SGiuseppe Cavallaro 
375688ebe2cfSJose Abreu 			dma_sync_single_for_cpu(priv->device, buf->addr,
375788ebe2cfSJose Abreu 						buf1_len, DMA_FROM_DEVICE);
37582af6106aSJose Abreu 			skb_copy_to_linear_data(skb, page_address(buf->page),
375988ebe2cfSJose Abreu 						buf1_len);
376088ebe2cfSJose Abreu 			skb_put(skb, buf1_len);
376122ad3838SGiuseppe Cavallaro 
3762ec222003SJose Abreu 			/* Data payload copied into SKB, page ready for recycle */
3763ec222003SJose Abreu 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
3764ec222003SJose Abreu 			buf->page = NULL;
376588ebe2cfSJose Abreu 		} else if (buf1_len) {
3766ec222003SJose Abreu 			dma_sync_single_for_cpu(priv->device, buf->addr,
376788ebe2cfSJose Abreu 						buf1_len, DMA_FROM_DEVICE);
3768ec222003SJose Abreu 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
376988ebe2cfSJose Abreu 					buf->page, 0, buf1_len,
3770ec222003SJose Abreu 					priv->dma_buf_sz);
3771ec222003SJose Abreu 
3772ec222003SJose Abreu 			/* Data payload appended into SKB */
3773ec222003SJose Abreu 			page_pool_release_page(rx_q->page_pool, buf->page);
3774ec222003SJose Abreu 			buf->page = NULL;
37757ac6653aSJeff Kirsher 		}
377683d7af64SGiuseppe CAVALLARO 
377788ebe2cfSJose Abreu 		if (buf2_len) {
377867afd6d1SJose Abreu 			dma_sync_single_for_cpu(priv->device, buf->sec_addr,
377988ebe2cfSJose Abreu 						buf2_len, DMA_FROM_DEVICE);
378067afd6d1SJose Abreu 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
378188ebe2cfSJose Abreu 					buf->sec_page, 0, buf2_len,
378267afd6d1SJose Abreu 					priv->dma_buf_sz);
378367afd6d1SJose Abreu 
378467afd6d1SJose Abreu 			/* Data payload appended into SKB */
378567afd6d1SJose Abreu 			page_pool_release_page(rx_q->page_pool, buf->sec_page);
378667afd6d1SJose Abreu 			buf->sec_page = NULL;
378767afd6d1SJose Abreu 		}
378867afd6d1SJose Abreu 
378988ebe2cfSJose Abreu drain_data:
3790ec222003SJose Abreu 		if (likely(status & rx_not_ls))
3791ec222003SJose Abreu 			goto read_again;
379288ebe2cfSJose Abreu 		if (!skb)
379388ebe2cfSJose Abreu 			continue;
3794ec222003SJose Abreu 
3795ec222003SJose Abreu 		/* Got entire packet into SKB. Finish it. */
3796ec222003SJose Abreu 
3797ba1ffd74SGiuseppe CAVALLARO 		stmmac_get_rx_hwtstamp(priv, p, np, skb);
3798b9381985SVince Bridgers 		stmmac_rx_vlan(priv->dev, skb);
37997ac6653aSJeff Kirsher 		skb->protocol = eth_type_trans(skb, priv->dev);
38007ac6653aSJeff Kirsher 
3801ceb69499SGiuseppe CAVALLARO 		if (unlikely(!coe))
38027ac6653aSJeff Kirsher 			skb_checksum_none_assert(skb);
380362a2ab93SGiuseppe CAVALLARO 		else
38047ac6653aSJeff Kirsher 			skb->ip_summed = CHECKSUM_UNNECESSARY;
380562a2ab93SGiuseppe CAVALLARO 
380676067459SJose Abreu 		if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
380776067459SJose Abreu 			skb_set_hash(skb, hash, hash_type);
380876067459SJose Abreu 
380976067459SJose Abreu 		skb_record_rx_queue(skb, queue);
38104ccb4585SJose Abreu 		napi_gro_receive(&ch->rx_napi, skb);
381188ebe2cfSJose Abreu 		skb = NULL;
38127ac6653aSJeff Kirsher 
38137ac6653aSJeff Kirsher 		priv->dev->stats.rx_packets++;
3814ec222003SJose Abreu 		priv->dev->stats.rx_bytes += len;
3815cda4985aSJose Abreu 		count++;
38167ac6653aSJeff Kirsher 	}
3817ec222003SJose Abreu 
381888ebe2cfSJose Abreu 	if (status & rx_not_ls || skb) {
3819ec222003SJose Abreu 		rx_q->state_saved = true;
3820ec222003SJose Abreu 		rx_q->state.skb = skb;
3821ec222003SJose Abreu 		rx_q->state.error = error;
3822ec222003SJose Abreu 		rx_q->state.len = len;
38237ac6653aSJeff Kirsher 	}
38247ac6653aSJeff Kirsher 
382554139cf3SJoao Pinto 	stmmac_rx_refill(priv, queue);
38267ac6653aSJeff Kirsher 
38277ac6653aSJeff Kirsher 	priv->xstats.rx_pkt_n += count;
38287ac6653aSJeff Kirsher 
38297ac6653aSJeff Kirsher 	return count;
38307ac6653aSJeff Kirsher }
38317ac6653aSJeff Kirsher 
38324ccb4585SJose Abreu static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
38337ac6653aSJeff Kirsher {
38348fce3331SJose Abreu 	struct stmmac_channel *ch =
38354ccb4585SJose Abreu 		container_of(napi, struct stmmac_channel, rx_napi);
38368fce3331SJose Abreu 	struct stmmac_priv *priv = ch->priv_data;
38378fce3331SJose Abreu 	u32 chan = ch->index;
38384ccb4585SJose Abreu 	int work_done;
38397ac6653aSJeff Kirsher 
38409125cdd1SGiuseppe CAVALLARO 	priv->xstats.napi_poll++;
3841ce736788SJoao Pinto 
38424ccb4585SJose Abreu 	work_done = stmmac_rx(priv, budget, chan);
3843021bd5e3SJose Abreu 	if (work_done < budget && napi_complete_done(napi, work_done)) {
3844021bd5e3SJose Abreu 		unsigned long flags;
3845021bd5e3SJose Abreu 
3846021bd5e3SJose Abreu 		spin_lock_irqsave(&ch->lock, flags);
3847021bd5e3SJose Abreu 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
3848021bd5e3SJose Abreu 		spin_unlock_irqrestore(&ch->lock, flags);
3849021bd5e3SJose Abreu 	}
3850021bd5e3SJose Abreu 
38514ccb4585SJose Abreu 	return work_done;
38524ccb4585SJose Abreu }
3853ce736788SJoao Pinto 
38544ccb4585SJose Abreu static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
38554ccb4585SJose Abreu {
38564ccb4585SJose Abreu 	struct stmmac_channel *ch =
38574ccb4585SJose Abreu 		container_of(napi, struct stmmac_channel, tx_napi);
38584ccb4585SJose Abreu 	struct stmmac_priv *priv = ch->priv_data;
38594ccb4585SJose Abreu 	u32 chan = ch->index;
38604ccb4585SJose Abreu 	int work_done;
38614ccb4585SJose Abreu 
38624ccb4585SJose Abreu 	priv->xstats.napi_poll++;
38634ccb4585SJose Abreu 
38644ccb4585SJose Abreu 	work_done = stmmac_tx_clean(priv, DMA_TX_SIZE, chan);
3865fa0be0a4SJose Abreu 	work_done = min(work_done, budget);
38668fce3331SJose Abreu 
3867021bd5e3SJose Abreu 	if (work_done < budget && napi_complete_done(napi, work_done)) {
3868021bd5e3SJose Abreu 		unsigned long flags;
38694ccb4585SJose Abreu 
3870021bd5e3SJose Abreu 		spin_lock_irqsave(&ch->lock, flags);
3871021bd5e3SJose Abreu 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
3872021bd5e3SJose Abreu 		spin_unlock_irqrestore(&ch->lock, flags);
3873fa0be0a4SJose Abreu 	}
38748fce3331SJose Abreu 
38757ac6653aSJeff Kirsher 	return work_done;
38767ac6653aSJeff Kirsher }
38777ac6653aSJeff Kirsher 
38787ac6653aSJeff Kirsher /**
38797ac6653aSJeff Kirsher  *  stmmac_tx_timeout
38807ac6653aSJeff Kirsher  *  @dev : Pointer to net device structure
38817ac6653aSJeff Kirsher  *  Description: this function is called when a packet transmission fails to
38827284a3f1SGiuseppe CAVALLARO  *   complete within a reasonable time. The driver will mark the error in the
38837ac6653aSJeff Kirsher  *   netdev structure and arrange for the device to be reset to a sane state
38847ac6653aSJeff Kirsher  *   in order to transmit a new packet.
38857ac6653aSJeff Kirsher  */
38860290bd29SMichael S. Tsirkin static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
38877ac6653aSJeff Kirsher {
38887ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
38897ac6653aSJeff Kirsher 
389034877a15SJose Abreu 	stmmac_global_err(priv);
38917ac6653aSJeff Kirsher }
38927ac6653aSJeff Kirsher 
38937ac6653aSJeff Kirsher /**
389401789349SJiri Pirko  *  stmmac_set_rx_mode - entry point for multicast addressing
38957ac6653aSJeff Kirsher  *  @dev : pointer to the device structure
38967ac6653aSJeff Kirsher  *  Description:
38977ac6653aSJeff Kirsher  *  This function is a driver entry point which gets called by the kernel
38987ac6653aSJeff Kirsher  *  whenever multicast addresses must be enabled/disabled.
38997ac6653aSJeff Kirsher  *  Return value:
39007ac6653aSJeff Kirsher  *  void.
39017ac6653aSJeff Kirsher  */
390201789349SJiri Pirko static void stmmac_set_rx_mode(struct net_device *dev)
39037ac6653aSJeff Kirsher {
39047ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
39057ac6653aSJeff Kirsher 
3906c10d4c82SJose Abreu 	stmmac_set_filter(priv, priv->hw, dev);
39077ac6653aSJeff Kirsher }
39087ac6653aSJeff Kirsher 
39097ac6653aSJeff Kirsher /**
39107ac6653aSJeff Kirsher  *  stmmac_change_mtu - entry point to change MTU size for the device.
39117ac6653aSJeff Kirsher  *  @dev : device pointer.
39127ac6653aSJeff Kirsher  *  @new_mtu : the new MTU size for the device.
39137ac6653aSJeff Kirsher  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
39147ac6653aSJeff Kirsher  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
39157ac6653aSJeff Kirsher  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
39167ac6653aSJeff Kirsher  *  Return value:
39177ac6653aSJeff Kirsher  *  0 on success and an appropriate (-)ve integer as defined in errno.h
39187ac6653aSJeff Kirsher  *  file on failure.
39197ac6653aSJeff Kirsher  */
39207ac6653aSJeff Kirsher static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
39217ac6653aSJeff Kirsher {
392238ddc59dSLABBE Corentin 	struct stmmac_priv *priv = netdev_priv(dev);
3923eaf4fac4SJose Abreu 	int txfifosz = priv->plat->tx_fifo_size;
3924eaf4fac4SJose Abreu 
3925eaf4fac4SJose Abreu 	if (txfifosz == 0)
3926eaf4fac4SJose Abreu 		txfifosz = priv->dma_cap.tx_fifo_size;
3927eaf4fac4SJose Abreu 
3928eaf4fac4SJose Abreu 	txfifosz /= priv->plat->tx_queues_to_use;
392938ddc59dSLABBE Corentin 
39307ac6653aSJeff Kirsher 	if (netif_running(dev)) {
393138ddc59dSLABBE Corentin 		netdev_err(priv->dev, "must be stopped to change its MTU\n");
39327ac6653aSJeff Kirsher 		return -EBUSY;
39337ac6653aSJeff Kirsher 	}
39347ac6653aSJeff Kirsher 
3935eaf4fac4SJose Abreu 	new_mtu = STMMAC_ALIGN(new_mtu);
3936eaf4fac4SJose Abreu 
3937eaf4fac4SJose Abreu 	/* If condition true, FIFO is too small or MTU too large */
3938eaf4fac4SJose Abreu 	if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
3939eaf4fac4SJose Abreu 		return -EINVAL;
3940eaf4fac4SJose Abreu 
39417ac6653aSJeff Kirsher 	dev->mtu = new_mtu;
3942f748be53SAlexandre TORGUE 
39437ac6653aSJeff Kirsher 	netdev_update_features(dev);
39447ac6653aSJeff Kirsher 
39457ac6653aSJeff Kirsher 	return 0;
39467ac6653aSJeff Kirsher }
39477ac6653aSJeff Kirsher 
3948c8f44affSMichał Mirosław static netdev_features_t stmmac_fix_features(struct net_device *dev,
3949c8f44affSMichał Mirosław 					     netdev_features_t features)
39507ac6653aSJeff Kirsher {
39517ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
39527ac6653aSJeff Kirsher 
395338912bdbSDeepak SIKRI 	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
39547ac6653aSJeff Kirsher 		features &= ~NETIF_F_RXCSUM;
3955d2afb5bdSGiuseppe CAVALLARO 
39567ac6653aSJeff Kirsher 	if (!priv->plat->tx_coe)
3957a188222bSTom Herbert 		features &= ~NETIF_F_CSUM_MASK;
39587ac6653aSJeff Kirsher 
39597ac6653aSJeff Kirsher 	/* Some GMAC devices have a bugged Jumbo frame support that
39607ac6653aSJeff Kirsher 	 * needs to have the Tx COE disabled for oversized frames
39617ac6653aSJeff Kirsher 	 * (due to limited buffer sizes). In this case we disable
3962ceb69499SGiuseppe CAVALLARO 	 * the TX csum insertion in the TDES and not use SF.
3963ceb69499SGiuseppe CAVALLARO 	 */
39647ac6653aSJeff Kirsher 	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
3965a188222bSTom Herbert 		features &= ~NETIF_F_CSUM_MASK;
39667ac6653aSJeff Kirsher 
3967f748be53SAlexandre TORGUE 	/* Disable tso if asked by ethtool */
3968f748be53SAlexandre TORGUE 	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
3969f748be53SAlexandre TORGUE 		if (features & NETIF_F_TSO)
3970f748be53SAlexandre TORGUE 			priv->tso = true;
3971f748be53SAlexandre TORGUE 		else
3972f748be53SAlexandre TORGUE 			priv->tso = false;
3973f748be53SAlexandre TORGUE 	}
3974f748be53SAlexandre TORGUE 
39757ac6653aSJeff Kirsher 	return features;
39767ac6653aSJeff Kirsher }
39777ac6653aSJeff Kirsher 
3978d2afb5bdSGiuseppe CAVALLARO static int stmmac_set_features(struct net_device *netdev,
3979d2afb5bdSGiuseppe CAVALLARO 			       netdev_features_t features)
3980d2afb5bdSGiuseppe CAVALLARO {
3981d2afb5bdSGiuseppe CAVALLARO 	struct stmmac_priv *priv = netdev_priv(netdev);
398267afd6d1SJose Abreu 	bool sph_en;
398367afd6d1SJose Abreu 	u32 chan;
3984d2afb5bdSGiuseppe CAVALLARO 
3985d2afb5bdSGiuseppe CAVALLARO 	/* Keep the COE Type in case of csum is supporting */
3986d2afb5bdSGiuseppe CAVALLARO 	if (features & NETIF_F_RXCSUM)
3987d2afb5bdSGiuseppe CAVALLARO 		priv->hw->rx_csum = priv->plat->rx_coe;
3988d2afb5bdSGiuseppe CAVALLARO 	else
3989d2afb5bdSGiuseppe CAVALLARO 		priv->hw->rx_csum = 0;
3990d2afb5bdSGiuseppe CAVALLARO 	/* No check needed because rx_coe has been set before and it will be
3991d2afb5bdSGiuseppe CAVALLARO 	 * fixed in case of issue.
3992d2afb5bdSGiuseppe CAVALLARO 	 */
3993c10d4c82SJose Abreu 	stmmac_rx_ipc(priv, priv->hw);
3994d2afb5bdSGiuseppe CAVALLARO 
399567afd6d1SJose Abreu 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
399667afd6d1SJose Abreu 	for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
399767afd6d1SJose Abreu 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
399867afd6d1SJose Abreu 
3999d2afb5bdSGiuseppe CAVALLARO 	return 0;
4000d2afb5bdSGiuseppe CAVALLARO }
4001d2afb5bdSGiuseppe CAVALLARO 
400232ceabcaSGiuseppe CAVALLARO /**
400332ceabcaSGiuseppe CAVALLARO  *  stmmac_interrupt - main ISR
400432ceabcaSGiuseppe CAVALLARO  *  @irq: interrupt number.
400532ceabcaSGiuseppe CAVALLARO  *  @dev_id: to pass the net device pointer.
400632ceabcaSGiuseppe CAVALLARO  *  Description: this is the main driver interrupt service routine.
4007732fdf0eSGiuseppe CAVALLARO  *  It can call:
4008732fdf0eSGiuseppe CAVALLARO  *  o DMA service routine (to manage incoming frame reception and transmission
4009732fdf0eSGiuseppe CAVALLARO  *    status)
4010732fdf0eSGiuseppe CAVALLARO  *  o Core interrupts to manage: remote wake-up, management counter, LPI
401132ceabcaSGiuseppe CAVALLARO  *    interrupts.
401232ceabcaSGiuseppe CAVALLARO  */
40137ac6653aSJeff Kirsher static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
40147ac6653aSJeff Kirsher {
40157ac6653aSJeff Kirsher 	struct net_device *dev = (struct net_device *)dev_id;
40167ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
40177bac4e1eSJoao Pinto 	u32 rx_cnt = priv->plat->rx_queues_to_use;
40187bac4e1eSJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
40197bac4e1eSJoao Pinto 	u32 queues_count;
40207bac4e1eSJoao Pinto 	u32 queue;
40217d9e6c5aSJose Abreu 	bool xmac;
40227bac4e1eSJoao Pinto 
40237d9e6c5aSJose Abreu 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
40247bac4e1eSJoao Pinto 	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
40257ac6653aSJeff Kirsher 
402689f7f2cfSSrinivas Kandagatla 	if (priv->irq_wake)
402789f7f2cfSSrinivas Kandagatla 		pm_wakeup_event(priv->device, 0);
402889f7f2cfSSrinivas Kandagatla 
40297ac6653aSJeff Kirsher 	if (unlikely(!dev)) {
403038ddc59dSLABBE Corentin 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
40317ac6653aSJeff Kirsher 		return IRQ_NONE;
40327ac6653aSJeff Kirsher 	}
40337ac6653aSJeff Kirsher 
403434877a15SJose Abreu 	/* Check if adapter is up */
403534877a15SJose Abreu 	if (test_bit(STMMAC_DOWN, &priv->state))
403634877a15SJose Abreu 		return IRQ_HANDLED;
40378bf993a5SJose Abreu 	/* Check if a fatal error happened */
40388bf993a5SJose Abreu 	if (stmmac_safety_feat_interrupt(priv))
40398bf993a5SJose Abreu 		return IRQ_HANDLED;
404034877a15SJose Abreu 
40417ac6653aSJeff Kirsher 	/* To handle GMAC own interrupts */
40427d9e6c5aSJose Abreu 	if ((priv->plat->has_gmac) || xmac) {
4043c10d4c82SJose Abreu 		int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
404461fac60aSJose Abreu 		int mtl_status;
40458f71a88dSJoao Pinto 
4046d765955dSGiuseppe CAVALLARO 		if (unlikely(status)) {
4047d765955dSGiuseppe CAVALLARO 			/* For LPI we need to save the tx status */
40480982a0f6SGiuseppe CAVALLARO 			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
4049d765955dSGiuseppe CAVALLARO 				priv->tx_path_in_lpi_mode = true;
40500982a0f6SGiuseppe CAVALLARO 			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
4051d765955dSGiuseppe CAVALLARO 				priv->tx_path_in_lpi_mode = false;
40527bac4e1eSJoao Pinto 		}
40537bac4e1eSJoao Pinto 
40547bac4e1eSJoao Pinto 		for (queue = 0; queue < queues_count; queue++) {
405561fac60aSJose Abreu 			struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
405654139cf3SJoao Pinto 
405761fac60aSJose Abreu 			mtl_status = stmmac_host_mtl_irq_status(priv, priv->hw,
405861fac60aSJose Abreu 								queue);
405961fac60aSJose Abreu 			if (mtl_status != -EINVAL)
406061fac60aSJose Abreu 				status |= mtl_status;
40617bac4e1eSJoao Pinto 
4062a4e887faSJose Abreu 			if (status & CORE_IRQ_MTL_RX_OVERFLOW)
406361fac60aSJose Abreu 				stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
406454139cf3SJoao Pinto 						       rx_q->rx_tail_addr,
40657bac4e1eSJoao Pinto 						       queue);
40667bac4e1eSJoao Pinto 		}
406770523e63SGiuseppe CAVALLARO 
406870523e63SGiuseppe CAVALLARO 		/* PCS link status */
40693fe5cadbSGiuseppe CAVALLARO 		if (priv->hw->pcs) {
407070523e63SGiuseppe CAVALLARO 			if (priv->xstats.pcs_link)
407170523e63SGiuseppe CAVALLARO 				netif_carrier_on(dev);
407270523e63SGiuseppe CAVALLARO 			else
407370523e63SGiuseppe CAVALLARO 				netif_carrier_off(dev);
407470523e63SGiuseppe CAVALLARO 		}
4075d765955dSGiuseppe CAVALLARO 	}
4076d765955dSGiuseppe CAVALLARO 
4077d765955dSGiuseppe CAVALLARO 	/* To handle DMA interrupts */
40787ac6653aSJeff Kirsher 	stmmac_dma_interrupt(priv);
40797ac6653aSJeff Kirsher 
40807ac6653aSJeff Kirsher 	return IRQ_HANDLED;
40817ac6653aSJeff Kirsher }
40827ac6653aSJeff Kirsher 
40837ac6653aSJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER
40847ac6653aSJeff Kirsher /* Polling receive - used by NETCONSOLE and other diagnostic tools
4085ceb69499SGiuseppe CAVALLARO  * to allow network I/O with interrupts disabled.
4086ceb69499SGiuseppe CAVALLARO  */
40877ac6653aSJeff Kirsher static void stmmac_poll_controller(struct net_device *dev)
40887ac6653aSJeff Kirsher {
40897ac6653aSJeff Kirsher 	disable_irq(dev->irq);
40907ac6653aSJeff Kirsher 	stmmac_interrupt(dev->irq, dev);
40917ac6653aSJeff Kirsher 	enable_irq(dev->irq);
40927ac6653aSJeff Kirsher }
40937ac6653aSJeff Kirsher #endif
40947ac6653aSJeff Kirsher 
40957ac6653aSJeff Kirsher /**
40967ac6653aSJeff Kirsher  *  stmmac_ioctl - Entry point for the Ioctl
40977ac6653aSJeff Kirsher  *  @dev: Device pointer.
40987ac6653aSJeff Kirsher  *  @rq: An IOCTL specefic structure, that can contain a pointer to
40997ac6653aSJeff Kirsher  *  a proprietary structure used to pass information to the driver.
41007ac6653aSJeff Kirsher  *  @cmd: IOCTL command
41017ac6653aSJeff Kirsher  *  Description:
410232ceabcaSGiuseppe CAVALLARO  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
41037ac6653aSJeff Kirsher  */
41047ac6653aSJeff Kirsher static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
41057ac6653aSJeff Kirsher {
410674371272SJose Abreu 	struct stmmac_priv *priv = netdev_priv (dev);
4107891434b1SRayagond Kokatanur 	int ret = -EOPNOTSUPP;
41087ac6653aSJeff Kirsher 
41097ac6653aSJeff Kirsher 	if (!netif_running(dev))
41107ac6653aSJeff Kirsher 		return -EINVAL;
41117ac6653aSJeff Kirsher 
4112891434b1SRayagond Kokatanur 	switch (cmd) {
4113891434b1SRayagond Kokatanur 	case SIOCGMIIPHY:
4114891434b1SRayagond Kokatanur 	case SIOCGMIIREG:
4115891434b1SRayagond Kokatanur 	case SIOCSMIIREG:
411674371272SJose Abreu 		ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
4117891434b1SRayagond Kokatanur 		break;
4118891434b1SRayagond Kokatanur 	case SIOCSHWTSTAMP:
4119d6228b7cSArtem Panfilov 		ret = stmmac_hwtstamp_set(dev, rq);
4120d6228b7cSArtem Panfilov 		break;
4121d6228b7cSArtem Panfilov 	case SIOCGHWTSTAMP:
4122d6228b7cSArtem Panfilov 		ret = stmmac_hwtstamp_get(dev, rq);
4123891434b1SRayagond Kokatanur 		break;
4124891434b1SRayagond Kokatanur 	default:
4125891434b1SRayagond Kokatanur 		break;
4126891434b1SRayagond Kokatanur 	}
41277ac6653aSJeff Kirsher 
41287ac6653aSJeff Kirsher 	return ret;
41297ac6653aSJeff Kirsher }
41307ac6653aSJeff Kirsher 
41314dbbe8ddSJose Abreu static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
41324dbbe8ddSJose Abreu 				    void *cb_priv)
41334dbbe8ddSJose Abreu {
41344dbbe8ddSJose Abreu 	struct stmmac_priv *priv = cb_priv;
41354dbbe8ddSJose Abreu 	int ret = -EOPNOTSUPP;
41364dbbe8ddSJose Abreu 
4137425eabddSJose Abreu 	if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
4138425eabddSJose Abreu 		return ret;
4139425eabddSJose Abreu 
41404dbbe8ddSJose Abreu 	stmmac_disable_all_queues(priv);
41414dbbe8ddSJose Abreu 
41424dbbe8ddSJose Abreu 	switch (type) {
41434dbbe8ddSJose Abreu 	case TC_SETUP_CLSU32:
41444dbbe8ddSJose Abreu 		ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
41454dbbe8ddSJose Abreu 		break;
4146425eabddSJose Abreu 	case TC_SETUP_CLSFLOWER:
4147425eabddSJose Abreu 		ret = stmmac_tc_setup_cls(priv, priv, type_data);
4148425eabddSJose Abreu 		break;
41494dbbe8ddSJose Abreu 	default:
41504dbbe8ddSJose Abreu 		break;
41514dbbe8ddSJose Abreu 	}
41524dbbe8ddSJose Abreu 
41534dbbe8ddSJose Abreu 	stmmac_enable_all_queues(priv);
41544dbbe8ddSJose Abreu 	return ret;
41554dbbe8ddSJose Abreu }
41564dbbe8ddSJose Abreu 
4157955bcb6eSPablo Neira Ayuso static LIST_HEAD(stmmac_block_cb_list);
4158955bcb6eSPablo Neira Ayuso 
41594dbbe8ddSJose Abreu static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
41604dbbe8ddSJose Abreu 			   void *type_data)
41614dbbe8ddSJose Abreu {
41624dbbe8ddSJose Abreu 	struct stmmac_priv *priv = netdev_priv(ndev);
41634dbbe8ddSJose Abreu 
41644dbbe8ddSJose Abreu 	switch (type) {
41654dbbe8ddSJose Abreu 	case TC_SETUP_BLOCK:
4166955bcb6eSPablo Neira Ayuso 		return flow_block_cb_setup_simple(type_data,
4167955bcb6eSPablo Neira Ayuso 						  &stmmac_block_cb_list,
41684e95bc26SPablo Neira Ayuso 						  stmmac_setup_tc_block_cb,
41694e95bc26SPablo Neira Ayuso 						  priv, priv, true);
41701f705bc6SJose Abreu 	case TC_SETUP_QDISC_CBS:
41711f705bc6SJose Abreu 		return stmmac_tc_setup_cbs(priv, priv, type_data);
4172b60189e0SJose Abreu 	case TC_SETUP_QDISC_TAPRIO:
4173b60189e0SJose Abreu 		return stmmac_tc_setup_taprio(priv, priv, type_data);
4174430b383cSJose Abreu 	case TC_SETUP_QDISC_ETF:
4175430b383cSJose Abreu 		return stmmac_tc_setup_etf(priv, priv, type_data);
41764dbbe8ddSJose Abreu 	default:
41774dbbe8ddSJose Abreu 		return -EOPNOTSUPP;
41784dbbe8ddSJose Abreu 	}
41794dbbe8ddSJose Abreu }
41804dbbe8ddSJose Abreu 
41814993e5b3SJose Abreu static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
41824993e5b3SJose Abreu 			       struct net_device *sb_dev)
41834993e5b3SJose Abreu {
4184b7766206SJose Abreu 	int gso = skb_shinfo(skb)->gso_type;
4185b7766206SJose Abreu 
4186b7766206SJose Abreu 	if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
41874993e5b3SJose Abreu 		/*
4188b7766206SJose Abreu 		 * There is no way to determine the number of TSO/USO
41894993e5b3SJose Abreu 		 * capable Queues. Let's use always the Queue 0
4190b7766206SJose Abreu 		 * because if TSO/USO is supported then at least this
41914993e5b3SJose Abreu 		 * one will be capable.
41924993e5b3SJose Abreu 		 */
41934993e5b3SJose Abreu 		return 0;
41944993e5b3SJose Abreu 	}
41954993e5b3SJose Abreu 
41964993e5b3SJose Abreu 	return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
41974993e5b3SJose Abreu }
41984993e5b3SJose Abreu 
4199a830405eSBhadram Varka static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
4200a830405eSBhadram Varka {
4201a830405eSBhadram Varka 	struct stmmac_priv *priv = netdev_priv(ndev);
4202a830405eSBhadram Varka 	int ret = 0;
4203a830405eSBhadram Varka 
4204a830405eSBhadram Varka 	ret = eth_mac_addr(ndev, addr);
4205a830405eSBhadram Varka 	if (ret)
4206a830405eSBhadram Varka 		return ret;
4207a830405eSBhadram Varka 
4208c10d4c82SJose Abreu 	stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
4209a830405eSBhadram Varka 
4210a830405eSBhadram Varka 	return ret;
4211a830405eSBhadram Varka }
4212a830405eSBhadram Varka 
421350fb4f74SGiuseppe CAVALLARO #ifdef CONFIG_DEBUG_FS
42147ac29055SGiuseppe CAVALLARO static struct dentry *stmmac_fs_dir;
42157ac29055SGiuseppe CAVALLARO 
4216c24602efSGiuseppe CAVALLARO static void sysfs_display_ring(void *head, int size, int extend_desc,
4217c24602efSGiuseppe CAVALLARO 			       struct seq_file *seq)
42187ac29055SGiuseppe CAVALLARO {
42197ac29055SGiuseppe CAVALLARO 	int i;
4220c24602efSGiuseppe CAVALLARO 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
4221c24602efSGiuseppe CAVALLARO 	struct dma_desc *p = (struct dma_desc *)head;
42227ac29055SGiuseppe CAVALLARO 
4223c24602efSGiuseppe CAVALLARO 	for (i = 0; i < size; i++) {
4224c24602efSGiuseppe CAVALLARO 		if (extend_desc) {
4225c24602efSGiuseppe CAVALLARO 			seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
4226c24602efSGiuseppe CAVALLARO 				   i, (unsigned int)virt_to_phys(ep),
4227f8be0d78SMichael Weiser 				   le32_to_cpu(ep->basic.des0),
4228f8be0d78SMichael Weiser 				   le32_to_cpu(ep->basic.des1),
4229f8be0d78SMichael Weiser 				   le32_to_cpu(ep->basic.des2),
4230f8be0d78SMichael Weiser 				   le32_to_cpu(ep->basic.des3));
4231c24602efSGiuseppe CAVALLARO 			ep++;
4232c24602efSGiuseppe CAVALLARO 		} else {
4233c24602efSGiuseppe CAVALLARO 			seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
423466c25f6eSNiklas Cassel 				   i, (unsigned int)virt_to_phys(p),
4235f8be0d78SMichael Weiser 				   le32_to_cpu(p->des0), le32_to_cpu(p->des1),
4236f8be0d78SMichael Weiser 				   le32_to_cpu(p->des2), le32_to_cpu(p->des3));
4237c24602efSGiuseppe CAVALLARO 			p++;
4238c24602efSGiuseppe CAVALLARO 		}
42397ac29055SGiuseppe CAVALLARO 		seq_printf(seq, "\n");
42407ac29055SGiuseppe CAVALLARO 	}
4241c24602efSGiuseppe CAVALLARO }
42427ac29055SGiuseppe CAVALLARO 
4243fb0d9c63SYangtao Li static int stmmac_rings_status_show(struct seq_file *seq, void *v)
4244c24602efSGiuseppe CAVALLARO {
4245c24602efSGiuseppe CAVALLARO 	struct net_device *dev = seq->private;
4246c24602efSGiuseppe CAVALLARO 	struct stmmac_priv *priv = netdev_priv(dev);
424754139cf3SJoao Pinto 	u32 rx_count = priv->plat->rx_queues_to_use;
4248ce736788SJoao Pinto 	u32 tx_count = priv->plat->tx_queues_to_use;
424954139cf3SJoao Pinto 	u32 queue;
425054139cf3SJoao Pinto 
42515f2b8b62SThierry Reding 	if ((dev->flags & IFF_UP) == 0)
42525f2b8b62SThierry Reding 		return 0;
42535f2b8b62SThierry Reding 
425454139cf3SJoao Pinto 	for (queue = 0; queue < rx_count; queue++) {
425554139cf3SJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
425654139cf3SJoao Pinto 
425754139cf3SJoao Pinto 		seq_printf(seq, "RX Queue %d:\n", queue);
42587ac29055SGiuseppe CAVALLARO 
4259c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc) {
426054139cf3SJoao Pinto 			seq_printf(seq, "Extended descriptor ring:\n");
426154139cf3SJoao Pinto 			sysfs_display_ring((void *)rx_q->dma_erx,
426254139cf3SJoao Pinto 					   DMA_RX_SIZE, 1, seq);
426354139cf3SJoao Pinto 		} else {
426454139cf3SJoao Pinto 			seq_printf(seq, "Descriptor ring:\n");
426554139cf3SJoao Pinto 			sysfs_display_ring((void *)rx_q->dma_rx,
426654139cf3SJoao Pinto 					   DMA_RX_SIZE, 0, seq);
426754139cf3SJoao Pinto 		}
426854139cf3SJoao Pinto 	}
426954139cf3SJoao Pinto 
4270ce736788SJoao Pinto 	for (queue = 0; queue < tx_count; queue++) {
4271ce736788SJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
4272ce736788SJoao Pinto 
4273ce736788SJoao Pinto 		seq_printf(seq, "TX Queue %d:\n", queue);
4274ce736788SJoao Pinto 
427554139cf3SJoao Pinto 		if (priv->extend_desc) {
4276ce736788SJoao Pinto 			seq_printf(seq, "Extended descriptor ring:\n");
4277ce736788SJoao Pinto 			sysfs_display_ring((void *)tx_q->dma_etx,
4278ce736788SJoao Pinto 					   DMA_TX_SIZE, 1, seq);
4279579a25a8SJose Abreu 		} else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
4280ce736788SJoao Pinto 			seq_printf(seq, "Descriptor ring:\n");
4281ce736788SJoao Pinto 			sysfs_display_ring((void *)tx_q->dma_tx,
4282ce736788SJoao Pinto 					   DMA_TX_SIZE, 0, seq);
4283ce736788SJoao Pinto 		}
42847ac29055SGiuseppe CAVALLARO 	}
42857ac29055SGiuseppe CAVALLARO 
42867ac29055SGiuseppe CAVALLARO 	return 0;
42877ac29055SGiuseppe CAVALLARO }
4288fb0d9c63SYangtao Li DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
42897ac29055SGiuseppe CAVALLARO 
4290fb0d9c63SYangtao Li static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
4291e7434821SGiuseppe CAVALLARO {
4292e7434821SGiuseppe CAVALLARO 	struct net_device *dev = seq->private;
4293e7434821SGiuseppe CAVALLARO 	struct stmmac_priv *priv = netdev_priv(dev);
4294e7434821SGiuseppe CAVALLARO 
429519e30c14SGiuseppe CAVALLARO 	if (!priv->hw_cap_support) {
4296e7434821SGiuseppe CAVALLARO 		seq_printf(seq, "DMA HW features not supported\n");
4297e7434821SGiuseppe CAVALLARO 		return 0;
4298e7434821SGiuseppe CAVALLARO 	}
4299e7434821SGiuseppe CAVALLARO 
4300e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "==============================\n");
4301e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tDMA HW features\n");
4302e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "==============================\n");
4303e7434821SGiuseppe CAVALLARO 
430422d3efe5SPavel Machek 	seq_printf(seq, "\t10/100 Mbps: %s\n",
4305e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
430622d3efe5SPavel Machek 	seq_printf(seq, "\t1000 Mbps: %s\n",
4307e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
430822d3efe5SPavel Machek 	seq_printf(seq, "\tHalf duplex: %s\n",
4309e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.half_duplex) ? "Y" : "N");
4310e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tHash Filter: %s\n",
4311e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.hash_filter) ? "Y" : "N");
4312e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tMultiple MAC address registers: %s\n",
4313e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.multi_addr) ? "Y" : "N");
43148d45e42bSLABBE Corentin 	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
4315e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.pcs) ? "Y" : "N");
4316e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
4317e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
4318e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tPMT Remote wake up: %s\n",
4319e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
4320e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tPMT Magic Frame: %s\n",
4321e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
4322e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tRMON module: %s\n",
4323e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.rmon) ? "Y" : "N");
4324e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
4325e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.time_stamp) ? "Y" : "N");
4326e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
4327e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
432822d3efe5SPavel Machek 	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
4329e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.eee) ? "Y" : "N");
4330e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
4331e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
4332e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.tx_coe) ? "Y" : "N");
4333f748be53SAlexandre TORGUE 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
4334f748be53SAlexandre TORGUE 		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
4335f748be53SAlexandre TORGUE 			   (priv->dma_cap.rx_coe) ? "Y" : "N");
4336f748be53SAlexandre TORGUE 	} else {
4337e7434821SGiuseppe CAVALLARO 		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
4338e7434821SGiuseppe CAVALLARO 			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
4339e7434821SGiuseppe CAVALLARO 		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
4340e7434821SGiuseppe CAVALLARO 			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
4341f748be53SAlexandre TORGUE 	}
4342e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
4343e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
4344e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
4345e7434821SGiuseppe CAVALLARO 		   priv->dma_cap.number_rx_channel);
4346e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
4347e7434821SGiuseppe CAVALLARO 		   priv->dma_cap.number_tx_channel);
43487d0b447aSJose Abreu 	seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
43497d0b447aSJose Abreu 		   priv->dma_cap.number_rx_queues);
43507d0b447aSJose Abreu 	seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
43517d0b447aSJose Abreu 		   priv->dma_cap.number_tx_queues);
4352e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tEnhanced descriptors: %s\n",
4353e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.enh_desc) ? "Y" : "N");
43547d0b447aSJose Abreu 	seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
43557d0b447aSJose Abreu 	seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
43567d0b447aSJose Abreu 	seq_printf(seq, "\tHash Table Size: %d\n", priv->dma_cap.hash_tb_sz);
43577d0b447aSJose Abreu 	seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
43587d0b447aSJose Abreu 	seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
43597d0b447aSJose Abreu 		   priv->dma_cap.pps_out_num);
43607d0b447aSJose Abreu 	seq_printf(seq, "\tSafety Features: %s\n",
43617d0b447aSJose Abreu 		   priv->dma_cap.asp ? "Y" : "N");
43627d0b447aSJose Abreu 	seq_printf(seq, "\tFlexible RX Parser: %s\n",
43637d0b447aSJose Abreu 		   priv->dma_cap.frpsel ? "Y" : "N");
43647d0b447aSJose Abreu 	seq_printf(seq, "\tEnhanced Addressing: %d\n",
43657d0b447aSJose Abreu 		   priv->dma_cap.addr64);
43667d0b447aSJose Abreu 	seq_printf(seq, "\tReceive Side Scaling: %s\n",
43677d0b447aSJose Abreu 		   priv->dma_cap.rssen ? "Y" : "N");
43687d0b447aSJose Abreu 	seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
43697d0b447aSJose Abreu 		   priv->dma_cap.vlhash ? "Y" : "N");
43707d0b447aSJose Abreu 	seq_printf(seq, "\tSplit Header: %s\n",
43717d0b447aSJose Abreu 		   priv->dma_cap.sphen ? "Y" : "N");
43727d0b447aSJose Abreu 	seq_printf(seq, "\tVLAN TX Insertion: %s\n",
43737d0b447aSJose Abreu 		   priv->dma_cap.vlins ? "Y" : "N");
43747d0b447aSJose Abreu 	seq_printf(seq, "\tDouble VLAN: %s\n",
43757d0b447aSJose Abreu 		   priv->dma_cap.dvlan ? "Y" : "N");
43767d0b447aSJose Abreu 	seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
43777d0b447aSJose Abreu 		   priv->dma_cap.l3l4fnum);
43787d0b447aSJose Abreu 	seq_printf(seq, "\tARP Offloading: %s\n",
43797d0b447aSJose Abreu 		   priv->dma_cap.arpoffsel ? "Y" : "N");
438044e65475SJose Abreu 	seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
438144e65475SJose Abreu 		   priv->dma_cap.estsel ? "Y" : "N");
438244e65475SJose Abreu 	seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
438344e65475SJose Abreu 		   priv->dma_cap.fpesel ? "Y" : "N");
438444e65475SJose Abreu 	seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
438544e65475SJose Abreu 		   priv->dma_cap.tbssel ? "Y" : "N");
4386e7434821SGiuseppe CAVALLARO 	return 0;
4387e7434821SGiuseppe CAVALLARO }
4388fb0d9c63SYangtao Li DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
4389e7434821SGiuseppe CAVALLARO 
4390481a7d15SJiping Ma /* Use network device events to rename debugfs file entries.
4391481a7d15SJiping Ma  */
4392481a7d15SJiping Ma static int stmmac_device_event(struct notifier_block *unused,
4393481a7d15SJiping Ma 			       unsigned long event, void *ptr)
4394481a7d15SJiping Ma {
4395481a7d15SJiping Ma 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4396481a7d15SJiping Ma 	struct stmmac_priv *priv = netdev_priv(dev);
4397481a7d15SJiping Ma 
4398481a7d15SJiping Ma 	if (dev->netdev_ops != &stmmac_netdev_ops)
4399481a7d15SJiping Ma 		goto done;
4400481a7d15SJiping Ma 
4401481a7d15SJiping Ma 	switch (event) {
4402481a7d15SJiping Ma 	case NETDEV_CHANGENAME:
4403481a7d15SJiping Ma 		if (priv->dbgfs_dir)
4404481a7d15SJiping Ma 			priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
4405481a7d15SJiping Ma 							 priv->dbgfs_dir,
4406481a7d15SJiping Ma 							 stmmac_fs_dir,
4407481a7d15SJiping Ma 							 dev->name);
4408481a7d15SJiping Ma 		break;
4409481a7d15SJiping Ma 	}
4410481a7d15SJiping Ma done:
4411481a7d15SJiping Ma 	return NOTIFY_DONE;
4412481a7d15SJiping Ma }
4413481a7d15SJiping Ma 
4414481a7d15SJiping Ma static struct notifier_block stmmac_notifier = {
4415481a7d15SJiping Ma 	.notifier_call = stmmac_device_event,
4416481a7d15SJiping Ma };
4417481a7d15SJiping Ma 
44188d72ab11SGreg Kroah-Hartman static void stmmac_init_fs(struct net_device *dev)
44197ac29055SGiuseppe CAVALLARO {
4420466c5ac8SMathieu Olivari 	struct stmmac_priv *priv = netdev_priv(dev);
44217ac29055SGiuseppe CAVALLARO 
4422474a31e1SAaro Koskinen 	rtnl_lock();
4423474a31e1SAaro Koskinen 
4424466c5ac8SMathieu Olivari 	/* Create per netdev entries */
4425466c5ac8SMathieu Olivari 	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
4426466c5ac8SMathieu Olivari 
44277ac29055SGiuseppe CAVALLARO 	/* Entry to report DMA RX/TX rings */
44288d72ab11SGreg Kroah-Hartman 	debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
44297ac29055SGiuseppe CAVALLARO 			    &stmmac_rings_status_fops);
44307ac29055SGiuseppe CAVALLARO 
4431e7434821SGiuseppe CAVALLARO 	/* Entry to report the DMA HW features */
44328d72ab11SGreg Kroah-Hartman 	debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
44338d72ab11SGreg Kroah-Hartman 			    &stmmac_dma_cap_fops);
4434481a7d15SJiping Ma 
4435474a31e1SAaro Koskinen 	rtnl_unlock();
44367ac29055SGiuseppe CAVALLARO }
44377ac29055SGiuseppe CAVALLARO 
4438466c5ac8SMathieu Olivari static void stmmac_exit_fs(struct net_device *dev)
44397ac29055SGiuseppe CAVALLARO {
4440466c5ac8SMathieu Olivari 	struct stmmac_priv *priv = netdev_priv(dev);
4441466c5ac8SMathieu Olivari 
4442466c5ac8SMathieu Olivari 	debugfs_remove_recursive(priv->dbgfs_dir);
44437ac29055SGiuseppe CAVALLARO }
444450fb4f74SGiuseppe CAVALLARO #endif /* CONFIG_DEBUG_FS */
44457ac29055SGiuseppe CAVALLARO 
44463cd1cfcbSJose Abreu static u32 stmmac_vid_crc32_le(__le16 vid_le)
44473cd1cfcbSJose Abreu {
44483cd1cfcbSJose Abreu 	unsigned char *data = (unsigned char *)&vid_le;
44493cd1cfcbSJose Abreu 	unsigned char data_byte = 0;
44503cd1cfcbSJose Abreu 	u32 crc = ~0x0;
44513cd1cfcbSJose Abreu 	u32 temp = 0;
44523cd1cfcbSJose Abreu 	int i, bits;
44533cd1cfcbSJose Abreu 
44543cd1cfcbSJose Abreu 	bits = get_bitmask_order(VLAN_VID_MASK);
44553cd1cfcbSJose Abreu 	for (i = 0; i < bits; i++) {
44563cd1cfcbSJose Abreu 		if ((i % 8) == 0)
44573cd1cfcbSJose Abreu 			data_byte = data[i / 8];
44583cd1cfcbSJose Abreu 
44593cd1cfcbSJose Abreu 		temp = ((crc & 1) ^ data_byte) & 1;
44603cd1cfcbSJose Abreu 		crc >>= 1;
44613cd1cfcbSJose Abreu 		data_byte >>= 1;
44623cd1cfcbSJose Abreu 
44633cd1cfcbSJose Abreu 		if (temp)
44643cd1cfcbSJose Abreu 			crc ^= 0xedb88320;
44653cd1cfcbSJose Abreu 	}
44663cd1cfcbSJose Abreu 
44673cd1cfcbSJose Abreu 	return crc;
44683cd1cfcbSJose Abreu }
44693cd1cfcbSJose Abreu 
44703cd1cfcbSJose Abreu static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
44713cd1cfcbSJose Abreu {
44723cd1cfcbSJose Abreu 	u32 crc, hash = 0;
4473a24cae70SJose Abreu 	__le16 pmatch = 0;
4474c7ab0b80SJose Abreu 	int count = 0;
4475c7ab0b80SJose Abreu 	u16 vid = 0;
44763cd1cfcbSJose Abreu 
44773cd1cfcbSJose Abreu 	for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
44783cd1cfcbSJose Abreu 		__le16 vid_le = cpu_to_le16(vid);
44793cd1cfcbSJose Abreu 		crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
44803cd1cfcbSJose Abreu 		hash |= (1 << crc);
4481c7ab0b80SJose Abreu 		count++;
44823cd1cfcbSJose Abreu 	}
44833cd1cfcbSJose Abreu 
4484c7ab0b80SJose Abreu 	if (!priv->dma_cap.vlhash) {
4485c7ab0b80SJose Abreu 		if (count > 2) /* VID = 0 always passes filter */
4486c7ab0b80SJose Abreu 			return -EOPNOTSUPP;
4487c7ab0b80SJose Abreu 
4488a24cae70SJose Abreu 		pmatch = cpu_to_le16(vid);
4489c7ab0b80SJose Abreu 		hash = 0;
4490c7ab0b80SJose Abreu 	}
4491c7ab0b80SJose Abreu 
4492a24cae70SJose Abreu 	return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
44933cd1cfcbSJose Abreu }
44943cd1cfcbSJose Abreu 
44953cd1cfcbSJose Abreu static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
44963cd1cfcbSJose Abreu {
44973cd1cfcbSJose Abreu 	struct stmmac_priv *priv = netdev_priv(ndev);
44983cd1cfcbSJose Abreu 	bool is_double = false;
44993cd1cfcbSJose Abreu 	int ret;
45003cd1cfcbSJose Abreu 
45013cd1cfcbSJose Abreu 	if (be16_to_cpu(proto) == ETH_P_8021AD)
45023cd1cfcbSJose Abreu 		is_double = true;
45033cd1cfcbSJose Abreu 
45043cd1cfcbSJose Abreu 	set_bit(vid, priv->active_vlans);
45053cd1cfcbSJose Abreu 	ret = stmmac_vlan_update(priv, is_double);
45063cd1cfcbSJose Abreu 	if (ret) {
45073cd1cfcbSJose Abreu 		clear_bit(vid, priv->active_vlans);
45083cd1cfcbSJose Abreu 		return ret;
45093cd1cfcbSJose Abreu 	}
45103cd1cfcbSJose Abreu 
45113cd1cfcbSJose Abreu 	return ret;
45123cd1cfcbSJose Abreu }
45133cd1cfcbSJose Abreu 
45143cd1cfcbSJose Abreu static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
45153cd1cfcbSJose Abreu {
45163cd1cfcbSJose Abreu 	struct stmmac_priv *priv = netdev_priv(ndev);
45173cd1cfcbSJose Abreu 	bool is_double = false;
45183cd1cfcbSJose Abreu 
45193cd1cfcbSJose Abreu 	if (be16_to_cpu(proto) == ETH_P_8021AD)
45203cd1cfcbSJose Abreu 		is_double = true;
45213cd1cfcbSJose Abreu 
45223cd1cfcbSJose Abreu 	clear_bit(vid, priv->active_vlans);
45233cd1cfcbSJose Abreu 	return stmmac_vlan_update(priv, is_double);
45243cd1cfcbSJose Abreu }
45253cd1cfcbSJose Abreu 
45267ac6653aSJeff Kirsher static const struct net_device_ops stmmac_netdev_ops = {
45277ac6653aSJeff Kirsher 	.ndo_open = stmmac_open,
45287ac6653aSJeff Kirsher 	.ndo_start_xmit = stmmac_xmit,
45297ac6653aSJeff Kirsher 	.ndo_stop = stmmac_release,
45307ac6653aSJeff Kirsher 	.ndo_change_mtu = stmmac_change_mtu,
45317ac6653aSJeff Kirsher 	.ndo_fix_features = stmmac_fix_features,
4532d2afb5bdSGiuseppe CAVALLARO 	.ndo_set_features = stmmac_set_features,
453301789349SJiri Pirko 	.ndo_set_rx_mode = stmmac_set_rx_mode,
45347ac6653aSJeff Kirsher 	.ndo_tx_timeout = stmmac_tx_timeout,
45357ac6653aSJeff Kirsher 	.ndo_do_ioctl = stmmac_ioctl,
45364dbbe8ddSJose Abreu 	.ndo_setup_tc = stmmac_setup_tc,
45374993e5b3SJose Abreu 	.ndo_select_queue = stmmac_select_queue,
45387ac6653aSJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER
45397ac6653aSJeff Kirsher 	.ndo_poll_controller = stmmac_poll_controller,
45407ac6653aSJeff Kirsher #endif
4541a830405eSBhadram Varka 	.ndo_set_mac_address = stmmac_set_mac_address,
45423cd1cfcbSJose Abreu 	.ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
45433cd1cfcbSJose Abreu 	.ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
45447ac6653aSJeff Kirsher };
45457ac6653aSJeff Kirsher 
454634877a15SJose Abreu static void stmmac_reset_subtask(struct stmmac_priv *priv)
454734877a15SJose Abreu {
454834877a15SJose Abreu 	if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
454934877a15SJose Abreu 		return;
455034877a15SJose Abreu 	if (test_bit(STMMAC_DOWN, &priv->state))
455134877a15SJose Abreu 		return;
455234877a15SJose Abreu 
455334877a15SJose Abreu 	netdev_err(priv->dev, "Reset adapter.\n");
455434877a15SJose Abreu 
455534877a15SJose Abreu 	rtnl_lock();
455634877a15SJose Abreu 	netif_trans_update(priv->dev);
455734877a15SJose Abreu 	while (test_and_set_bit(STMMAC_RESETING, &priv->state))
455834877a15SJose Abreu 		usleep_range(1000, 2000);
455934877a15SJose Abreu 
456034877a15SJose Abreu 	set_bit(STMMAC_DOWN, &priv->state);
456134877a15SJose Abreu 	dev_close(priv->dev);
456200f54e68SPetr Machata 	dev_open(priv->dev, NULL);
456334877a15SJose Abreu 	clear_bit(STMMAC_DOWN, &priv->state);
456434877a15SJose Abreu 	clear_bit(STMMAC_RESETING, &priv->state);
456534877a15SJose Abreu 	rtnl_unlock();
456634877a15SJose Abreu }
456734877a15SJose Abreu 
456834877a15SJose Abreu static void stmmac_service_task(struct work_struct *work)
456934877a15SJose Abreu {
457034877a15SJose Abreu 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
457134877a15SJose Abreu 			service_task);
457234877a15SJose Abreu 
457334877a15SJose Abreu 	stmmac_reset_subtask(priv);
457434877a15SJose Abreu 	clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
457534877a15SJose Abreu }
457634877a15SJose Abreu 
45777ac6653aSJeff Kirsher /**
4578cf3f047bSGiuseppe CAVALLARO  *  stmmac_hw_init - Init the MAC device
457932ceabcaSGiuseppe CAVALLARO  *  @priv: driver private structure
4580732fdf0eSGiuseppe CAVALLARO  *  Description: this function is to configure the MAC device according to
4581732fdf0eSGiuseppe CAVALLARO  *  some platform parameters or the HW capability register. It prepares the
4582732fdf0eSGiuseppe CAVALLARO  *  driver to use either ring or chain modes and to setup either enhanced or
4583732fdf0eSGiuseppe CAVALLARO  *  normal descriptors.
4584cf3f047bSGiuseppe CAVALLARO  */
4585cf3f047bSGiuseppe CAVALLARO static int stmmac_hw_init(struct stmmac_priv *priv)
4586cf3f047bSGiuseppe CAVALLARO {
45875f0456b4SJose Abreu 	int ret;
4588cf3f047bSGiuseppe CAVALLARO 
45899f93ac8dSLABBE Corentin 	/* dwmac-sun8i only work in chain mode */
45909f93ac8dSLABBE Corentin 	if (priv->plat->has_sun8i)
45919f93ac8dSLABBE Corentin 		chain_mode = 1;
45925f0456b4SJose Abreu 	priv->chain_mode = chain_mode;
45939f93ac8dSLABBE Corentin 
45945f0456b4SJose Abreu 	/* Initialize HW Interface */
45955f0456b4SJose Abreu 	ret = stmmac_hwif_init(priv);
45965f0456b4SJose Abreu 	if (ret)
45975f0456b4SJose Abreu 		return ret;
45984a7d666aSGiuseppe CAVALLARO 
4599cf3f047bSGiuseppe CAVALLARO 	/* Get the HW capability (new GMAC newer than 3.50a) */
4600cf3f047bSGiuseppe CAVALLARO 	priv->hw_cap_support = stmmac_get_hw_features(priv);
4601cf3f047bSGiuseppe CAVALLARO 	if (priv->hw_cap_support) {
460238ddc59dSLABBE Corentin 		dev_info(priv->device, "DMA HW capability register supported\n");
4603cf3f047bSGiuseppe CAVALLARO 
4604cf3f047bSGiuseppe CAVALLARO 		/* We can override some gmac/dma configuration fields: e.g.
4605cf3f047bSGiuseppe CAVALLARO 		 * enh_desc, tx_coe (e.g. that are passed through the
4606cf3f047bSGiuseppe CAVALLARO 		 * platform) with the values from the HW capability
4607cf3f047bSGiuseppe CAVALLARO 		 * register (if supported).
4608cf3f047bSGiuseppe CAVALLARO 		 */
4609cf3f047bSGiuseppe CAVALLARO 		priv->plat->enh_desc = priv->dma_cap.enh_desc;
4610cf3f047bSGiuseppe CAVALLARO 		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
46113fe5cadbSGiuseppe CAVALLARO 		priv->hw->pmt = priv->plat->pmt;
4612b8ef7020SBiao Huang 		if (priv->dma_cap.hash_tb_sz) {
4613b8ef7020SBiao Huang 			priv->hw->multicast_filter_bins =
4614b8ef7020SBiao Huang 					(BIT(priv->dma_cap.hash_tb_sz) << 5);
4615b8ef7020SBiao Huang 			priv->hw->mcast_bits_log2 =
4616b8ef7020SBiao Huang 					ilog2(priv->hw->multicast_filter_bins);
4617b8ef7020SBiao Huang 		}
461838912bdbSDeepak SIKRI 
4619a8df35d4SEzequiel Garcia 		/* TXCOE doesn't work in thresh DMA mode */
4620a8df35d4SEzequiel Garcia 		if (priv->plat->force_thresh_dma_mode)
4621a8df35d4SEzequiel Garcia 			priv->plat->tx_coe = 0;
4622a8df35d4SEzequiel Garcia 		else
462338912bdbSDeepak SIKRI 			priv->plat->tx_coe = priv->dma_cap.tx_coe;
4624a8df35d4SEzequiel Garcia 
4625f748be53SAlexandre TORGUE 		/* In case of GMAC4 rx_coe is from HW cap register. */
4626f748be53SAlexandre TORGUE 		priv->plat->rx_coe = priv->dma_cap.rx_coe;
462738912bdbSDeepak SIKRI 
462838912bdbSDeepak SIKRI 		if (priv->dma_cap.rx_coe_type2)
462938912bdbSDeepak SIKRI 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
463038912bdbSDeepak SIKRI 		else if (priv->dma_cap.rx_coe_type1)
463138912bdbSDeepak SIKRI 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
463238912bdbSDeepak SIKRI 
463338ddc59dSLABBE Corentin 	} else {
463438ddc59dSLABBE Corentin 		dev_info(priv->device, "No HW DMA feature register supported\n");
463538ddc59dSLABBE Corentin 	}
4636cf3f047bSGiuseppe CAVALLARO 
4637d2afb5bdSGiuseppe CAVALLARO 	if (priv->plat->rx_coe) {
4638d2afb5bdSGiuseppe CAVALLARO 		priv->hw->rx_csum = priv->plat->rx_coe;
463938ddc59dSLABBE Corentin 		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
4640f748be53SAlexandre TORGUE 		if (priv->synopsys_id < DWMAC_CORE_4_00)
464138ddc59dSLABBE Corentin 			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
4642d2afb5bdSGiuseppe CAVALLARO 	}
4643cf3f047bSGiuseppe CAVALLARO 	if (priv->plat->tx_coe)
464438ddc59dSLABBE Corentin 		dev_info(priv->device, "TX Checksum insertion supported\n");
4645cf3f047bSGiuseppe CAVALLARO 
4646cf3f047bSGiuseppe CAVALLARO 	if (priv->plat->pmt) {
464738ddc59dSLABBE Corentin 		dev_info(priv->device, "Wake-Up On Lan supported\n");
4648cf3f047bSGiuseppe CAVALLARO 		device_set_wakeup_capable(priv->device, 1);
4649cf3f047bSGiuseppe CAVALLARO 	}
4650cf3f047bSGiuseppe CAVALLARO 
4651f748be53SAlexandre TORGUE 	if (priv->dma_cap.tsoen)
465238ddc59dSLABBE Corentin 		dev_info(priv->device, "TSO supported\n");
4653f748be53SAlexandre TORGUE 
46547cfde0afSJose Abreu 	/* Run HW quirks, if any */
46557cfde0afSJose Abreu 	if (priv->hwif_quirks) {
46567cfde0afSJose Abreu 		ret = priv->hwif_quirks(priv);
46577cfde0afSJose Abreu 		if (ret)
46587cfde0afSJose Abreu 			return ret;
46597cfde0afSJose Abreu 	}
46607cfde0afSJose Abreu 
46613b509466SJose Abreu 	/* Rx Watchdog is available in the COREs newer than the 3.40.
46623b509466SJose Abreu 	 * In some case, for example on bugged HW this feature
46633b509466SJose Abreu 	 * has to be disable and this can be done by passing the
46643b509466SJose Abreu 	 * riwt_off field from the platform.
46653b509466SJose Abreu 	 */
46663b509466SJose Abreu 	if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
46673b509466SJose Abreu 	    (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
46683b509466SJose Abreu 		priv->use_riwt = 1;
46693b509466SJose Abreu 		dev_info(priv->device,
46703b509466SJose Abreu 			 "Enable RX Mitigation via HW Watchdog Timer\n");
46713b509466SJose Abreu 	}
46723b509466SJose Abreu 
4673c24602efSGiuseppe CAVALLARO 	return 0;
4674cf3f047bSGiuseppe CAVALLARO }
4675cf3f047bSGiuseppe CAVALLARO 
4676cf3f047bSGiuseppe CAVALLARO /**
4677bfab27a1SGiuseppe CAVALLARO  * stmmac_dvr_probe
4678bfab27a1SGiuseppe CAVALLARO  * @device: device pointer
4679ff3dd78cSGiuseppe CAVALLARO  * @plat_dat: platform data pointer
4680e56788cfSJoachim Eastwood  * @res: stmmac resource pointer
4681bfab27a1SGiuseppe CAVALLARO  * Description: this is the main probe function used to
4682bfab27a1SGiuseppe CAVALLARO  * call the alloc_etherdev, allocate the priv structure.
46839afec6efSAndy Shevchenko  * Return:
468415ffac73SJoachim Eastwood  * returns 0 on success, otherwise errno.
46857ac6653aSJeff Kirsher  */
468615ffac73SJoachim Eastwood int stmmac_dvr_probe(struct device *device,
4687cf3f047bSGiuseppe CAVALLARO 		     struct plat_stmmacenet_data *plat_dat,
4688e56788cfSJoachim Eastwood 		     struct stmmac_resources *res)
46897ac6653aSJeff Kirsher {
4690bfab27a1SGiuseppe CAVALLARO 	struct net_device *ndev = NULL;
4691bfab27a1SGiuseppe CAVALLARO 	struct stmmac_priv *priv;
469276067459SJose Abreu 	u32 queue, rxq, maxq;
469376067459SJose Abreu 	int i, ret = 0;
46947ac6653aSJeff Kirsher 
46959737070cSJisheng Zhang 	ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
46969737070cSJisheng Zhang 				       MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
469741de8d4cSJoe Perches 	if (!ndev)
469815ffac73SJoachim Eastwood 		return -ENOMEM;
46997ac6653aSJeff Kirsher 
4700bfab27a1SGiuseppe CAVALLARO 	SET_NETDEV_DEV(ndev, device);
47017ac6653aSJeff Kirsher 
4702bfab27a1SGiuseppe CAVALLARO 	priv = netdev_priv(ndev);
4703bfab27a1SGiuseppe CAVALLARO 	priv->device = device;
4704bfab27a1SGiuseppe CAVALLARO 	priv->dev = ndev;
4705bfab27a1SGiuseppe CAVALLARO 
4706bfab27a1SGiuseppe CAVALLARO 	stmmac_set_ethtool_ops(ndev);
4707cf3f047bSGiuseppe CAVALLARO 	priv->pause = pause;
4708cf3f047bSGiuseppe CAVALLARO 	priv->plat = plat_dat;
4709e56788cfSJoachim Eastwood 	priv->ioaddr = res->addr;
4710e56788cfSJoachim Eastwood 	priv->dev->base_addr = (unsigned long)res->addr;
4711e56788cfSJoachim Eastwood 
4712e56788cfSJoachim Eastwood 	priv->dev->irq = res->irq;
4713e56788cfSJoachim Eastwood 	priv->wol_irq = res->wol_irq;
4714e56788cfSJoachim Eastwood 	priv->lpi_irq = res->lpi_irq;
4715e56788cfSJoachim Eastwood 
4716a51645f7SPetr Štetiar 	if (!IS_ERR_OR_NULL(res->mac))
4717e56788cfSJoachim Eastwood 		memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
4718bfab27a1SGiuseppe CAVALLARO 
4719a7a62685SJoachim Eastwood 	dev_set_drvdata(device, priv->dev);
4720803f8fc4SJoachim Eastwood 
4721cf3f047bSGiuseppe CAVALLARO 	/* Verify driver arguments */
4722cf3f047bSGiuseppe CAVALLARO 	stmmac_verify_args();
4723cf3f047bSGiuseppe CAVALLARO 
472434877a15SJose Abreu 	/* Allocate workqueue */
472534877a15SJose Abreu 	priv->wq = create_singlethread_workqueue("stmmac_wq");
472634877a15SJose Abreu 	if (!priv->wq) {
472734877a15SJose Abreu 		dev_err(priv->device, "failed to create workqueue\n");
47289737070cSJisheng Zhang 		return -ENOMEM;
472934877a15SJose Abreu 	}
473034877a15SJose Abreu 
473134877a15SJose Abreu 	INIT_WORK(&priv->service_task, stmmac_service_task);
473234877a15SJose Abreu 
4733cf3f047bSGiuseppe CAVALLARO 	/* Override with kernel parameters if supplied XXX CRS XXX
4734ceb69499SGiuseppe CAVALLARO 	 * this needs to have multiple instances
4735ceb69499SGiuseppe CAVALLARO 	 */
4736cf3f047bSGiuseppe CAVALLARO 	if ((phyaddr >= 0) && (phyaddr <= 31))
4737cf3f047bSGiuseppe CAVALLARO 		priv->plat->phy_addr = phyaddr;
4738cf3f047bSGiuseppe CAVALLARO 
473990f522a2SEugeniy Paltsev 	if (priv->plat->stmmac_rst) {
474090f522a2SEugeniy Paltsev 		ret = reset_control_assert(priv->plat->stmmac_rst);
4741f573c0b9Sjpinto 		reset_control_deassert(priv->plat->stmmac_rst);
474290f522a2SEugeniy Paltsev 		/* Some reset controllers have only reset callback instead of
474390f522a2SEugeniy Paltsev 		 * assert + deassert callbacks pair.
474490f522a2SEugeniy Paltsev 		 */
474590f522a2SEugeniy Paltsev 		if (ret == -ENOTSUPP)
474690f522a2SEugeniy Paltsev 			reset_control_reset(priv->plat->stmmac_rst);
474790f522a2SEugeniy Paltsev 	}
4748c5e4ddbdSChen-Yu Tsai 
4749cf3f047bSGiuseppe CAVALLARO 	/* Init MAC and get the capabilities */
4750c24602efSGiuseppe CAVALLARO 	ret = stmmac_hw_init(priv);
4751c24602efSGiuseppe CAVALLARO 	if (ret)
475262866e98SChen-Yu Tsai 		goto error_hw_init;
4753cf3f047bSGiuseppe CAVALLARO 
4754b561af36SVinod Koul 	stmmac_check_ether_addr(priv);
4755b561af36SVinod Koul 
4756c22a3f48SJoao Pinto 	/* Configure real RX and TX queues */
4757c02b7a91SJoao Pinto 	netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
4758c02b7a91SJoao Pinto 	netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
4759c22a3f48SJoao Pinto 
4760cf3f047bSGiuseppe CAVALLARO 	ndev->netdev_ops = &stmmac_netdev_ops;
4761cf3f047bSGiuseppe CAVALLARO 
4762cf3f047bSGiuseppe CAVALLARO 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4763cf3f047bSGiuseppe CAVALLARO 			    NETIF_F_RXCSUM;
4764f748be53SAlexandre TORGUE 
47654dbbe8ddSJose Abreu 	ret = stmmac_tc_init(priv, priv);
47664dbbe8ddSJose Abreu 	if (!ret) {
47674dbbe8ddSJose Abreu 		ndev->hw_features |= NETIF_F_HW_TC;
47684dbbe8ddSJose Abreu 	}
47694dbbe8ddSJose Abreu 
4770f748be53SAlexandre TORGUE 	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
47719edfa7daSNiklas Cassel 		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
4772b7766206SJose Abreu 		if (priv->plat->has_gmac4)
4773b7766206SJose Abreu 			ndev->hw_features |= NETIF_F_GSO_UDP_L4;
4774f748be53SAlexandre TORGUE 		priv->tso = true;
477538ddc59dSLABBE Corentin 		dev_info(priv->device, "TSO feature enabled\n");
4776f748be53SAlexandre TORGUE 	}
4777a993db88SJose Abreu 
477867afd6d1SJose Abreu 	if (priv->dma_cap.sphen) {
477967afd6d1SJose Abreu 		ndev->hw_features |= NETIF_F_GRO;
478067afd6d1SJose Abreu 		priv->sph = true;
478167afd6d1SJose Abreu 		dev_info(priv->device, "SPH feature enabled\n");
478267afd6d1SJose Abreu 	}
478367afd6d1SJose Abreu 
4784a993db88SJose Abreu 	if (priv->dma_cap.addr64) {
4785a993db88SJose Abreu 		ret = dma_set_mask_and_coherent(device,
4786a993db88SJose Abreu 				DMA_BIT_MASK(priv->dma_cap.addr64));
4787a993db88SJose Abreu 		if (!ret) {
4788a993db88SJose Abreu 			dev_info(priv->device, "Using %d bits DMA width\n",
4789a993db88SJose Abreu 				 priv->dma_cap.addr64);
4790968a2978SThierry Reding 
4791968a2978SThierry Reding 			/*
4792968a2978SThierry Reding 			 * If more than 32 bits can be addressed, make sure to
4793968a2978SThierry Reding 			 * enable enhanced addressing mode.
4794968a2978SThierry Reding 			 */
4795968a2978SThierry Reding 			if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
4796968a2978SThierry Reding 				priv->plat->dma_cfg->eame = true;
4797a993db88SJose Abreu 		} else {
4798a993db88SJose Abreu 			ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
4799a993db88SJose Abreu 			if (ret) {
4800a993db88SJose Abreu 				dev_err(priv->device, "Failed to set DMA Mask\n");
4801a993db88SJose Abreu 				goto error_hw_init;
4802a993db88SJose Abreu 			}
4803a993db88SJose Abreu 
4804a993db88SJose Abreu 			priv->dma_cap.addr64 = 32;
4805a993db88SJose Abreu 		}
4806a993db88SJose Abreu 	}
4807a993db88SJose Abreu 
4808bfab27a1SGiuseppe CAVALLARO 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
4809bfab27a1SGiuseppe CAVALLARO 	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
48107ac6653aSJeff Kirsher #ifdef STMMAC_VLAN_TAG_USED
48117ac6653aSJeff Kirsher 	/* Both mac100 and gmac support receive VLAN tag detection */
4812ab188e8fSElad Nachman 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
48133cd1cfcbSJose Abreu 	if (priv->dma_cap.vlhash) {
48143cd1cfcbSJose Abreu 		ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
48153cd1cfcbSJose Abreu 		ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
48163cd1cfcbSJose Abreu 	}
481730d93227SJose Abreu 	if (priv->dma_cap.vlins) {
481830d93227SJose Abreu 		ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
481930d93227SJose Abreu 		if (priv->dma_cap.dvlan)
482030d93227SJose Abreu 			ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
482130d93227SJose Abreu 	}
48227ac6653aSJeff Kirsher #endif
48237ac6653aSJeff Kirsher 	priv->msg_enable = netif_msg_init(debug, default_msg_level);
48247ac6653aSJeff Kirsher 
482576067459SJose Abreu 	/* Initialize RSS */
482676067459SJose Abreu 	rxq = priv->plat->rx_queues_to_use;
482776067459SJose Abreu 	netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
482876067459SJose Abreu 	for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
482976067459SJose Abreu 		priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
483076067459SJose Abreu 
483176067459SJose Abreu 	if (priv->dma_cap.rssen && priv->plat->rss_en)
483276067459SJose Abreu 		ndev->features |= NETIF_F_RXHASH;
483376067459SJose Abreu 
483444770e11SJarod Wilson 	/* MTU range: 46 - hw-specific max */
483544770e11SJarod Wilson 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
483656bcd591SJose Abreu 	if (priv->plat->has_xgmac)
48377d9e6c5aSJose Abreu 		ndev->max_mtu = XGMAC_JUMBO_LEN;
483856bcd591SJose Abreu 	else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
483956bcd591SJose Abreu 		ndev->max_mtu = JUMBO_LEN;
484044770e11SJarod Wilson 	else
484144770e11SJarod Wilson 		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
4842a2cd64f3SKweh, Hock Leong 	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
4843a2cd64f3SKweh, Hock Leong 	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
4844a2cd64f3SKweh, Hock Leong 	 */
4845a2cd64f3SKweh, Hock Leong 	if ((priv->plat->maxmtu < ndev->max_mtu) &&
4846a2cd64f3SKweh, Hock Leong 	    (priv->plat->maxmtu >= ndev->min_mtu))
484744770e11SJarod Wilson 		ndev->max_mtu = priv->plat->maxmtu;
4848a2cd64f3SKweh, Hock Leong 	else if (priv->plat->maxmtu < ndev->min_mtu)
4849b618ab45SHeiner Kallweit 		dev_warn(priv->device,
4850a2cd64f3SKweh, Hock Leong 			 "%s: warning: maxmtu having invalid value (%d)\n",
4851a2cd64f3SKweh, Hock Leong 			 __func__, priv->plat->maxmtu);
485244770e11SJarod Wilson 
48537ac6653aSJeff Kirsher 	if (flow_ctrl)
48547ac6653aSJeff Kirsher 		priv->flow_ctrl = FLOW_AUTO;	/* RX/TX pause on */
48557ac6653aSJeff Kirsher 
48568fce3331SJose Abreu 	/* Setup channels NAPI */
48578fce3331SJose Abreu 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
4858c22a3f48SJoao Pinto 
48598fce3331SJose Abreu 	for (queue = 0; queue < maxq; queue++) {
48608fce3331SJose Abreu 		struct stmmac_channel *ch = &priv->channel[queue];
48618fce3331SJose Abreu 
4862021bd5e3SJose Abreu 		spin_lock_init(&ch->lock);
48638fce3331SJose Abreu 		ch->priv_data = priv;
48648fce3331SJose Abreu 		ch->index = queue;
48658fce3331SJose Abreu 
48664ccb4585SJose Abreu 		if (queue < priv->plat->rx_queues_to_use) {
48674ccb4585SJose Abreu 			netif_napi_add(ndev, &ch->rx_napi, stmmac_napi_poll_rx,
48688fce3331SJose Abreu 				       NAPI_POLL_WEIGHT);
4869c22a3f48SJoao Pinto 		}
48704ccb4585SJose Abreu 		if (queue < priv->plat->tx_queues_to_use) {
48714d97972bSFrode Isaksen 			netif_tx_napi_add(ndev, &ch->tx_napi,
48724d97972bSFrode Isaksen 					  stmmac_napi_poll_tx,
48734ccb4585SJose Abreu 					  NAPI_POLL_WEIGHT);
48744ccb4585SJose Abreu 		}
48754ccb4585SJose Abreu 	}
48767ac6653aSJeff Kirsher 
487729555fa3SThierry Reding 	mutex_init(&priv->lock);
48787ac6653aSJeff Kirsher 
4879cd7201f4SGiuseppe CAVALLARO 	/* If a specific clk_csr value is passed from the platform
4880cd7201f4SGiuseppe CAVALLARO 	 * this means that the CSR Clock Range selection cannot be
4881cd7201f4SGiuseppe CAVALLARO 	 * changed at run-time and it is fixed. Viceversa the driver'll try to
4882cd7201f4SGiuseppe CAVALLARO 	 * set the MDC clock dynamically according to the csr actual
4883cd7201f4SGiuseppe CAVALLARO 	 * clock input.
4884cd7201f4SGiuseppe CAVALLARO 	 */
48855e7f7fc5SBiao Huang 	if (priv->plat->clk_csr >= 0)
4886cd7201f4SGiuseppe CAVALLARO 		priv->clk_csr = priv->plat->clk_csr;
48875e7f7fc5SBiao Huang 	else
48885e7f7fc5SBiao Huang 		stmmac_clk_csr_set(priv);
4889cd7201f4SGiuseppe CAVALLARO 
4890e58bb43fSGiuseppe CAVALLARO 	stmmac_check_pcs_mode(priv);
4891e58bb43fSGiuseppe CAVALLARO 
4892a47b9e15SDejin Zheng 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
48933fe5cadbSGiuseppe CAVALLARO 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
48944bfcbd7aSFrancesco Virlinzi 		/* MDIO bus Registration */
48954bfcbd7aSFrancesco Virlinzi 		ret = stmmac_mdio_register(ndev);
48964bfcbd7aSFrancesco Virlinzi 		if (ret < 0) {
4897b618ab45SHeiner Kallweit 			dev_err(priv->device,
489838ddc59dSLABBE Corentin 				"%s: MDIO bus (id: %d) registration failed",
48994bfcbd7aSFrancesco Virlinzi 				__func__, priv->plat->bus_id);
49006a81c26fSViresh Kumar 			goto error_mdio_register;
49014bfcbd7aSFrancesco Virlinzi 		}
4902e58bb43fSGiuseppe CAVALLARO 	}
49034bfcbd7aSFrancesco Virlinzi 
490474371272SJose Abreu 	ret = stmmac_phy_setup(priv);
490574371272SJose Abreu 	if (ret) {
490674371272SJose Abreu 		netdev_err(ndev, "failed to setup phy (%d)\n", ret);
490774371272SJose Abreu 		goto error_phy_setup;
490874371272SJose Abreu 	}
490974371272SJose Abreu 
491057016590SFlorian Fainelli 	ret = register_netdev(ndev);
4911b2eb09afSFlorian Fainelli 	if (ret) {
4912b618ab45SHeiner Kallweit 		dev_err(priv->device, "%s: ERROR %i registering the device\n",
491357016590SFlorian Fainelli 			__func__, ret);
4914b2eb09afSFlorian Fainelli 		goto error_netdev_register;
4915b2eb09afSFlorian Fainelli 	}
49167ac6653aSJeff Kirsher 
49175f2b8b62SThierry Reding #ifdef CONFIG_DEBUG_FS
49188d72ab11SGreg Kroah-Hartman 	stmmac_init_fs(ndev);
49195f2b8b62SThierry Reding #endif
49205f2b8b62SThierry Reding 
492157016590SFlorian Fainelli 	return ret;
49227ac6653aSJeff Kirsher 
49236a81c26fSViresh Kumar error_netdev_register:
492474371272SJose Abreu 	phylink_destroy(priv->phylink);
492574371272SJose Abreu error_phy_setup:
4926a47b9e15SDejin Zheng 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
4927b2eb09afSFlorian Fainelli 	    priv->hw->pcs != STMMAC_PCS_RTBI)
4928b2eb09afSFlorian Fainelli 		stmmac_mdio_unregister(ndev);
49297ac6653aSJeff Kirsher error_mdio_register:
49308fce3331SJose Abreu 	for (queue = 0; queue < maxq; queue++) {
49318fce3331SJose Abreu 		struct stmmac_channel *ch = &priv->channel[queue];
4932c22a3f48SJoao Pinto 
49334ccb4585SJose Abreu 		if (queue < priv->plat->rx_queues_to_use)
49344ccb4585SJose Abreu 			netif_napi_del(&ch->rx_napi);
49354ccb4585SJose Abreu 		if (queue < priv->plat->tx_queues_to_use)
49364ccb4585SJose Abreu 			netif_napi_del(&ch->tx_napi);
4937c22a3f48SJoao Pinto 	}
493862866e98SChen-Yu Tsai error_hw_init:
493934877a15SJose Abreu 	destroy_workqueue(priv->wq);
49407ac6653aSJeff Kirsher 
494115ffac73SJoachim Eastwood 	return ret;
49427ac6653aSJeff Kirsher }
4943b2e2f0c7SAndy Shevchenko EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
49447ac6653aSJeff Kirsher 
49457ac6653aSJeff Kirsher /**
49467ac6653aSJeff Kirsher  * stmmac_dvr_remove
4947f4e7bd81SJoachim Eastwood  * @dev: device pointer
49487ac6653aSJeff Kirsher  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
4949bfab27a1SGiuseppe CAVALLARO  * changes the link status, releases the DMA descriptor rings.
49507ac6653aSJeff Kirsher  */
4951f4e7bd81SJoachim Eastwood int stmmac_dvr_remove(struct device *dev)
49527ac6653aSJeff Kirsher {
4953f4e7bd81SJoachim Eastwood 	struct net_device *ndev = dev_get_drvdata(dev);
49547ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(ndev);
49557ac6653aSJeff Kirsher 
495638ddc59dSLABBE Corentin 	netdev_info(priv->dev, "%s: removing driver", __func__);
49577ac6653aSJeff Kirsher 
4958ae4f0d46SJoao Pinto 	stmmac_stop_all_dma(priv);
49597ac6653aSJeff Kirsher 
4960c10d4c82SJose Abreu 	stmmac_mac_set(priv, priv->ioaddr, false);
49617ac6653aSJeff Kirsher 	netif_carrier_off(ndev);
49627ac6653aSJeff Kirsher 	unregister_netdev(ndev);
4963474a31e1SAaro Koskinen #ifdef CONFIG_DEBUG_FS
4964474a31e1SAaro Koskinen 	stmmac_exit_fs(ndev);
4965474a31e1SAaro Koskinen #endif
496674371272SJose Abreu 	phylink_destroy(priv->phylink);
4967f573c0b9Sjpinto 	if (priv->plat->stmmac_rst)
4968f573c0b9Sjpinto 		reset_control_assert(priv->plat->stmmac_rst);
4969f573c0b9Sjpinto 	clk_disable_unprepare(priv->plat->pclk);
4970f573c0b9Sjpinto 	clk_disable_unprepare(priv->plat->stmmac_clk);
4971a47b9e15SDejin Zheng 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
49723fe5cadbSGiuseppe CAVALLARO 	    priv->hw->pcs != STMMAC_PCS_RTBI)
4973e743471fSBryan O'Donoghue 		stmmac_mdio_unregister(ndev);
497434877a15SJose Abreu 	destroy_workqueue(priv->wq);
497529555fa3SThierry Reding 	mutex_destroy(&priv->lock);
49767ac6653aSJeff Kirsher 
49777ac6653aSJeff Kirsher 	return 0;
49787ac6653aSJeff Kirsher }
4979b2e2f0c7SAndy Shevchenko EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
49807ac6653aSJeff Kirsher 
4981732fdf0eSGiuseppe CAVALLARO /**
4982732fdf0eSGiuseppe CAVALLARO  * stmmac_suspend - suspend callback
4983f4e7bd81SJoachim Eastwood  * @dev: device pointer
4984732fdf0eSGiuseppe CAVALLARO  * Description: this is the function to suspend the device and it is called
4985732fdf0eSGiuseppe CAVALLARO  * by the platform driver to stop the network queue, release the resources,
4986732fdf0eSGiuseppe CAVALLARO  * program the PMT register (for WoL), clean and release driver resources.
4987732fdf0eSGiuseppe CAVALLARO  */
4988f4e7bd81SJoachim Eastwood int stmmac_suspend(struct device *dev)
49897ac6653aSJeff Kirsher {
4990f4e7bd81SJoachim Eastwood 	struct net_device *ndev = dev_get_drvdata(dev);
49917ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(ndev);
499214b41a29SNicolin Chen 	u32 chan;
49937ac6653aSJeff Kirsher 
49947ac6653aSJeff Kirsher 	if (!ndev || !netif_running(ndev))
49957ac6653aSJeff Kirsher 		return 0;
49967ac6653aSJeff Kirsher 
49973e2bf04fSJose Abreu 	phylink_mac_change(priv->phylink, false);
49987ac6653aSJeff Kirsher 
4999134cc4ceSThierry Reding 	mutex_lock(&priv->lock);
500019e13cb2SJose Abreu 
50017ac6653aSJeff Kirsher 	netif_device_detach(ndev);
5002c22a3f48SJoao Pinto 	stmmac_stop_all_queues(priv);
50037ac6653aSJeff Kirsher 
5004c22a3f48SJoao Pinto 	stmmac_disable_all_queues(priv);
50057ac6653aSJeff Kirsher 
500614b41a29SNicolin Chen 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
500714b41a29SNicolin Chen 		del_timer_sync(&priv->tx_queue[chan].txtimer);
500814b41a29SNicolin Chen 
50097ac6653aSJeff Kirsher 	/* Stop TX/RX DMA */
5010ae4f0d46SJoao Pinto 	stmmac_stop_all_dma(priv);
5011c24602efSGiuseppe CAVALLARO 
50127ac6653aSJeff Kirsher 	/* Enable Power down mode by programming the PMT regs */
501389f7f2cfSSrinivas Kandagatla 	if (device_may_wakeup(priv->device)) {
5014c10d4c82SJose Abreu 		stmmac_pmt(priv, priv->hw, priv->wolopts);
501589f7f2cfSSrinivas Kandagatla 		priv->irq_wake = 1;
501689f7f2cfSSrinivas Kandagatla 	} else {
5017134cc4ceSThierry Reding 		mutex_unlock(&priv->lock);
50183e2bf04fSJose Abreu 		rtnl_lock();
50193e2bf04fSJose Abreu 		phylink_stop(priv->phylink);
50203e2bf04fSJose Abreu 		rtnl_unlock();
5021134cc4ceSThierry Reding 		mutex_lock(&priv->lock);
50223e2bf04fSJose Abreu 
5023c10d4c82SJose Abreu 		stmmac_mac_set(priv, priv->ioaddr, false);
5024db88f10aSSrinivas Kandagatla 		pinctrl_pm_select_sleep_state(priv->device);
5025ba1377ffSGiuseppe CAVALLARO 		/* Disable clock in case of PWM is off */
5026e497c20eSBiao Huang 		if (priv->plat->clk_ptp_ref)
5027e497c20eSBiao Huang 			clk_disable_unprepare(priv->plat->clk_ptp_ref);
5028e497c20eSBiao Huang 		clk_disable_unprepare(priv->plat->pclk);
5029e497c20eSBiao Huang 		clk_disable_unprepare(priv->plat->stmmac_clk);
5030ba1377ffSGiuseppe CAVALLARO 	}
503129555fa3SThierry Reding 	mutex_unlock(&priv->lock);
50322d871aa0SVince Bridgers 
5033bd00632cSLABBE Corentin 	priv->speed = SPEED_UNKNOWN;
50347ac6653aSJeff Kirsher 	return 0;
50357ac6653aSJeff Kirsher }
5036b2e2f0c7SAndy Shevchenko EXPORT_SYMBOL_GPL(stmmac_suspend);
50377ac6653aSJeff Kirsher 
5038732fdf0eSGiuseppe CAVALLARO /**
503954139cf3SJoao Pinto  * stmmac_reset_queues_param - reset queue parameters
504054139cf3SJoao Pinto  * @dev: device pointer
504154139cf3SJoao Pinto  */
504254139cf3SJoao Pinto static void stmmac_reset_queues_param(struct stmmac_priv *priv)
504354139cf3SJoao Pinto {
504454139cf3SJoao Pinto 	u32 rx_cnt = priv->plat->rx_queues_to_use;
5045ce736788SJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
504654139cf3SJoao Pinto 	u32 queue;
504754139cf3SJoao Pinto 
504854139cf3SJoao Pinto 	for (queue = 0; queue < rx_cnt; queue++) {
504954139cf3SJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
505054139cf3SJoao Pinto 
505154139cf3SJoao Pinto 		rx_q->cur_rx = 0;
505254139cf3SJoao Pinto 		rx_q->dirty_rx = 0;
505354139cf3SJoao Pinto 	}
505454139cf3SJoao Pinto 
5055ce736788SJoao Pinto 	for (queue = 0; queue < tx_cnt; queue++) {
5056ce736788SJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
5057ce736788SJoao Pinto 
5058ce736788SJoao Pinto 		tx_q->cur_tx = 0;
5059ce736788SJoao Pinto 		tx_q->dirty_tx = 0;
50608d212a9eSNiklas Cassel 		tx_q->mss = 0;
5061ce736788SJoao Pinto 	}
506254139cf3SJoao Pinto }
506354139cf3SJoao Pinto 
506454139cf3SJoao Pinto /**
5065732fdf0eSGiuseppe CAVALLARO  * stmmac_resume - resume callback
5066f4e7bd81SJoachim Eastwood  * @dev: device pointer
5067732fdf0eSGiuseppe CAVALLARO  * Description: when resume this function is invoked to setup the DMA and CORE
5068732fdf0eSGiuseppe CAVALLARO  * in a usable state.
5069732fdf0eSGiuseppe CAVALLARO  */
5070f4e7bd81SJoachim Eastwood int stmmac_resume(struct device *dev)
50717ac6653aSJeff Kirsher {
5072f4e7bd81SJoachim Eastwood 	struct net_device *ndev = dev_get_drvdata(dev);
50737ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(ndev);
50747ac6653aSJeff Kirsher 
50757ac6653aSJeff Kirsher 	if (!netif_running(ndev))
50767ac6653aSJeff Kirsher 		return 0;
50777ac6653aSJeff Kirsher 
50787ac6653aSJeff Kirsher 	/* Power Down bit, into the PM register, is cleared
50797ac6653aSJeff Kirsher 	 * automatically as soon as a magic packet or a Wake-up frame
50807ac6653aSJeff Kirsher 	 * is received. Anyway, it's better to manually clear
50817ac6653aSJeff Kirsher 	 * this bit because it can generate problems while resuming
5082ceb69499SGiuseppe CAVALLARO 	 * from another devices (e.g. serial console).
5083ceb69499SGiuseppe CAVALLARO 	 */
5084623997fbSSrinivas Kandagatla 	if (device_may_wakeup(priv->device)) {
508529555fa3SThierry Reding 		mutex_lock(&priv->lock);
5086c10d4c82SJose Abreu 		stmmac_pmt(priv, priv->hw, 0);
508729555fa3SThierry Reding 		mutex_unlock(&priv->lock);
508889f7f2cfSSrinivas Kandagatla 		priv->irq_wake = 0;
5089623997fbSSrinivas Kandagatla 	} else {
5090db88f10aSSrinivas Kandagatla 		pinctrl_pm_select_default_state(priv->device);
50918d45e42bSLABBE Corentin 		/* enable the clk previously disabled */
5092e497c20eSBiao Huang 		clk_prepare_enable(priv->plat->stmmac_clk);
5093e497c20eSBiao Huang 		clk_prepare_enable(priv->plat->pclk);
5094e497c20eSBiao Huang 		if (priv->plat->clk_ptp_ref)
5095e497c20eSBiao Huang 			clk_prepare_enable(priv->plat->clk_ptp_ref);
5096623997fbSSrinivas Kandagatla 		/* reset the phy so that it's ready */
5097623997fbSSrinivas Kandagatla 		if (priv->mii)
5098623997fbSSrinivas Kandagatla 			stmmac_mdio_reset(priv->mii);
5099623997fbSSrinivas Kandagatla 	}
51007ac6653aSJeff Kirsher 
51017ac6653aSJeff Kirsher 	netif_device_attach(ndev);
51027ac6653aSJeff Kirsher 
510329555fa3SThierry Reding 	mutex_lock(&priv->lock);
5104f55d84b0SVincent Palatin 
510554139cf3SJoao Pinto 	stmmac_reset_queues_param(priv);
510654139cf3SJoao Pinto 
5107ae79a639SGiuseppe CAVALLARO 	stmmac_clear_descriptors(priv);
5108ae79a639SGiuseppe CAVALLARO 
5109fe131929SHuacai Chen 	stmmac_hw_setup(ndev, false);
5110d429b66eSJose Abreu 	stmmac_init_coalesce(priv);
5111ac316c78SGiuseppe CAVALLARO 	stmmac_set_rx_mode(ndev);
51127ac6653aSJeff Kirsher 
5113c22a3f48SJoao Pinto 	stmmac_enable_all_queues(priv);
51147ac6653aSJeff Kirsher 
5115c22a3f48SJoao Pinto 	stmmac_start_all_queues(priv);
51167ac6653aSJeff Kirsher 
5117134cc4ceSThierry Reding 	mutex_unlock(&priv->lock);
5118134cc4ceSThierry Reding 
51193e2bf04fSJose Abreu 	if (!device_may_wakeup(priv->device)) {
512019e13cb2SJose Abreu 		rtnl_lock();
512174371272SJose Abreu 		phylink_start(priv->phylink);
512219e13cb2SJose Abreu 		rtnl_unlock();
51233e2bf04fSJose Abreu 	}
512419e13cb2SJose Abreu 
51253e2bf04fSJose Abreu 	phylink_mac_change(priv->phylink, true);
5126102463b1SFrancesco Virlinzi 
51277ac6653aSJeff Kirsher 	return 0;
51287ac6653aSJeff Kirsher }
5129b2e2f0c7SAndy Shevchenko EXPORT_SYMBOL_GPL(stmmac_resume);
5130ba27ec66SGiuseppe CAVALLARO 
51317ac6653aSJeff Kirsher #ifndef MODULE
51327ac6653aSJeff Kirsher static int __init stmmac_cmdline_opt(char *str)
51337ac6653aSJeff Kirsher {
51347ac6653aSJeff Kirsher 	char *opt;
51357ac6653aSJeff Kirsher 
51367ac6653aSJeff Kirsher 	if (!str || !*str)
51377ac6653aSJeff Kirsher 		return -EINVAL;
51387ac6653aSJeff Kirsher 	while ((opt = strsep(&str, ",")) != NULL) {
51397ac6653aSJeff Kirsher 		if (!strncmp(opt, "debug:", 6)) {
5140ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 6, 0, &debug))
51417ac6653aSJeff Kirsher 				goto err;
51427ac6653aSJeff Kirsher 		} else if (!strncmp(opt, "phyaddr:", 8)) {
5143ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 8, 0, &phyaddr))
51447ac6653aSJeff Kirsher 				goto err;
51457ac6653aSJeff Kirsher 		} else if (!strncmp(opt, "buf_sz:", 7)) {
5146ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 7, 0, &buf_sz))
51477ac6653aSJeff Kirsher 				goto err;
51487ac6653aSJeff Kirsher 		} else if (!strncmp(opt, "tc:", 3)) {
5149ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 3, 0, &tc))
51507ac6653aSJeff Kirsher 				goto err;
51517ac6653aSJeff Kirsher 		} else if (!strncmp(opt, "watchdog:", 9)) {
5152ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 9, 0, &watchdog))
51537ac6653aSJeff Kirsher 				goto err;
51547ac6653aSJeff Kirsher 		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
5155ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 10, 0, &flow_ctrl))
51567ac6653aSJeff Kirsher 				goto err;
51577ac6653aSJeff Kirsher 		} else if (!strncmp(opt, "pause:", 6)) {
5158ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 6, 0, &pause))
51597ac6653aSJeff Kirsher 				goto err;
5160506f669cSGiuseppe CAVALLARO 		} else if (!strncmp(opt, "eee_timer:", 10)) {
5161d765955dSGiuseppe CAVALLARO 			if (kstrtoint(opt + 10, 0, &eee_timer))
5162d765955dSGiuseppe CAVALLARO 				goto err;
51634a7d666aSGiuseppe CAVALLARO 		} else if (!strncmp(opt, "chain_mode:", 11)) {
51644a7d666aSGiuseppe CAVALLARO 			if (kstrtoint(opt + 11, 0, &chain_mode))
51654a7d666aSGiuseppe CAVALLARO 				goto err;
51667ac6653aSJeff Kirsher 		}
51677ac6653aSJeff Kirsher 	}
51687ac6653aSJeff Kirsher 	return 0;
51697ac6653aSJeff Kirsher 
51707ac6653aSJeff Kirsher err:
51717ac6653aSJeff Kirsher 	pr_err("%s: ERROR broken module parameter conversion", __func__);
51727ac6653aSJeff Kirsher 	return -EINVAL;
51737ac6653aSJeff Kirsher }
51747ac6653aSJeff Kirsher 
51757ac6653aSJeff Kirsher __setup("stmmaceth=", stmmac_cmdline_opt);
5176ceb69499SGiuseppe CAVALLARO #endif /* MODULE */
51776fc0d0f2SGiuseppe Cavallaro 
5178466c5ac8SMathieu Olivari static int __init stmmac_init(void)
5179466c5ac8SMathieu Olivari {
5180466c5ac8SMathieu Olivari #ifdef CONFIG_DEBUG_FS
5181466c5ac8SMathieu Olivari 	/* Create debugfs main directory if it doesn't exist yet */
51828d72ab11SGreg Kroah-Hartman 	if (!stmmac_fs_dir)
5183466c5ac8SMathieu Olivari 		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
5184474a31e1SAaro Koskinen 	register_netdevice_notifier(&stmmac_notifier);
5185466c5ac8SMathieu Olivari #endif
5186466c5ac8SMathieu Olivari 
5187466c5ac8SMathieu Olivari 	return 0;
5188466c5ac8SMathieu Olivari }
5189466c5ac8SMathieu Olivari 
5190466c5ac8SMathieu Olivari static void __exit stmmac_exit(void)
5191466c5ac8SMathieu Olivari {
5192466c5ac8SMathieu Olivari #ifdef CONFIG_DEBUG_FS
5193474a31e1SAaro Koskinen 	unregister_netdevice_notifier(&stmmac_notifier);
5194466c5ac8SMathieu Olivari 	debugfs_remove_recursive(stmmac_fs_dir);
5195466c5ac8SMathieu Olivari #endif
5196466c5ac8SMathieu Olivari }
5197466c5ac8SMathieu Olivari 
5198466c5ac8SMathieu Olivari module_init(stmmac_init)
5199466c5ac8SMathieu Olivari module_exit(stmmac_exit)
5200466c5ac8SMathieu Olivari 
52016fc0d0f2SGiuseppe Cavallaro MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
52026fc0d0f2SGiuseppe Cavallaro MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
52036fc0d0f2SGiuseppe Cavallaro MODULE_LICENSE("GPL");
5204