14fa9c49fSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
27ac6653aSJeff Kirsher /*******************************************************************************
37ac6653aSJeff Kirsher   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
47ac6653aSJeff Kirsher   ST Ethernet IPs are built around a Synopsys IP Core.
57ac6653aSJeff Kirsher 
6286a8372SGiuseppe CAVALLARO 	Copyright(C) 2007-2011 STMicroelectronics Ltd
77ac6653aSJeff Kirsher 
87ac6653aSJeff Kirsher 
97ac6653aSJeff Kirsher   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
107ac6653aSJeff Kirsher 
117ac6653aSJeff Kirsher   Documentation available at:
127ac6653aSJeff Kirsher 	http://www.stlinux.com
137ac6653aSJeff Kirsher   Support available at:
147ac6653aSJeff Kirsher 	https://bugzilla.stlinux.com/
157ac6653aSJeff Kirsher *******************************************************************************/
167ac6653aSJeff Kirsher 
176a81c26fSViresh Kumar #include <linux/clk.h>
187ac6653aSJeff Kirsher #include <linux/kernel.h>
197ac6653aSJeff Kirsher #include <linux/interrupt.h>
207ac6653aSJeff Kirsher #include <linux/ip.h>
217ac6653aSJeff Kirsher #include <linux/tcp.h>
227ac6653aSJeff Kirsher #include <linux/skbuff.h>
237ac6653aSJeff Kirsher #include <linux/ethtool.h>
247ac6653aSJeff Kirsher #include <linux/if_ether.h>
257ac6653aSJeff Kirsher #include <linux/crc32.h>
267ac6653aSJeff Kirsher #include <linux/mii.h>
2701789349SJiri Pirko #include <linux/if.h>
287ac6653aSJeff Kirsher #include <linux/if_vlan.h>
297ac6653aSJeff Kirsher #include <linux/dma-mapping.h>
307ac6653aSJeff Kirsher #include <linux/slab.h>
317ac6653aSJeff Kirsher #include <linux/prefetch.h>
32db88f10aSSrinivas Kandagatla #include <linux/pinctrl/consumer.h>
3350fb4f74SGiuseppe CAVALLARO #ifdef CONFIG_DEBUG_FS
347ac29055SGiuseppe CAVALLARO #include <linux/debugfs.h>
357ac29055SGiuseppe CAVALLARO #include <linux/seq_file.h>
3650fb4f74SGiuseppe CAVALLARO #endif /* CONFIG_DEBUG_FS */
37891434b1SRayagond Kokatanur #include <linux/net_tstamp.h>
38eeef2f6bSJose Abreu #include <linux/phylink.h>
39b7766206SJose Abreu #include <linux/udp.h>
404dbbe8ddSJose Abreu #include <net/pkt_cls.h>
41891434b1SRayagond Kokatanur #include "stmmac_ptp.h"
42286a8372SGiuseppe CAVALLARO #include "stmmac.h"
43c5e4ddbdSChen-Yu Tsai #include <linux/reset.h>
445790cf3cSMathieu Olivari #include <linux/of_mdio.h>
4519d857c9SPhil Reid #include "dwmac1000.h"
467d9e6c5aSJose Abreu #include "dwxgmac2.h"
4742de047dSJose Abreu #include "hwif.h"
487ac6653aSJeff Kirsher 
499939a46dSEugeniy Paltsev #define	STMMAC_ALIGN(x)		__ALIGN_KERNEL(x, SMP_CACHE_BYTES)
50f748be53SAlexandre TORGUE #define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
517ac6653aSJeff Kirsher 
527ac6653aSJeff Kirsher /* Module parameters */
5332ceabcaSGiuseppe CAVALLARO #define TX_TIMEO	5000
547ac6653aSJeff Kirsher static int watchdog = TX_TIMEO;
55d3757ba4SJoe Perches module_param(watchdog, int, 0644);
5632ceabcaSGiuseppe CAVALLARO MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
577ac6653aSJeff Kirsher 
5832ceabcaSGiuseppe CAVALLARO static int debug = -1;
59d3757ba4SJoe Perches module_param(debug, int, 0644);
6032ceabcaSGiuseppe CAVALLARO MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
617ac6653aSJeff Kirsher 
6247d1f71fSstephen hemminger static int phyaddr = -1;
63d3757ba4SJoe Perches module_param(phyaddr, int, 0444);
647ac6653aSJeff Kirsher MODULE_PARM_DESC(phyaddr, "Physical device address");
657ac6653aSJeff Kirsher 
66e3ad57c9SGiuseppe Cavallaro #define STMMAC_TX_THRESH	(DMA_TX_SIZE / 4)
67120e87f9SGiuseppe Cavallaro #define STMMAC_RX_THRESH	(DMA_RX_SIZE / 4)
687ac6653aSJeff Kirsher 
69e9989339SJose Abreu static int flow_ctrl = FLOW_AUTO;
70d3757ba4SJoe Perches module_param(flow_ctrl, int, 0644);
717ac6653aSJeff Kirsher MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
727ac6653aSJeff Kirsher 
737ac6653aSJeff Kirsher static int pause = PAUSE_TIME;
74d3757ba4SJoe Perches module_param(pause, int, 0644);
757ac6653aSJeff Kirsher MODULE_PARM_DESC(pause, "Flow Control Pause Time");
767ac6653aSJeff Kirsher 
777ac6653aSJeff Kirsher #define TC_DEFAULT 64
787ac6653aSJeff Kirsher static int tc = TC_DEFAULT;
79d3757ba4SJoe Perches module_param(tc, int, 0644);
807ac6653aSJeff Kirsher MODULE_PARM_DESC(tc, "DMA threshold control value");
817ac6653aSJeff Kirsher 
82d916701cSGiuseppe CAVALLARO #define	DEFAULT_BUFSIZE	1536
83d916701cSGiuseppe CAVALLARO static int buf_sz = DEFAULT_BUFSIZE;
84d3757ba4SJoe Perches module_param(buf_sz, int, 0644);
857ac6653aSJeff Kirsher MODULE_PARM_DESC(buf_sz, "DMA buffer size");
867ac6653aSJeff Kirsher 
8722ad3838SGiuseppe Cavallaro #define	STMMAC_RX_COPYBREAK	256
8822ad3838SGiuseppe Cavallaro 
897ac6653aSJeff Kirsher static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
907ac6653aSJeff Kirsher 				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
917ac6653aSJeff Kirsher 				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
927ac6653aSJeff Kirsher 
93d765955dSGiuseppe CAVALLARO #define STMMAC_DEFAULT_LPI_TIMER	1000
94d765955dSGiuseppe CAVALLARO static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
95d3757ba4SJoe Perches module_param(eee_timer, int, 0644);
96d765955dSGiuseppe CAVALLARO MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
97f5351ef7SGiuseppe CAVALLARO #define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
98d765955dSGiuseppe CAVALLARO 
9922d3efe5SPavel Machek /* By default the driver will use the ring mode to manage tx and rx descriptors,
10022d3efe5SPavel Machek  * but allow user to force to use the chain instead of the ring
1014a7d666aSGiuseppe CAVALLARO  */
1024a7d666aSGiuseppe CAVALLARO static unsigned int chain_mode;
103d3757ba4SJoe Perches module_param(chain_mode, int, 0444);
1044a7d666aSGiuseppe CAVALLARO MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
1054a7d666aSGiuseppe CAVALLARO 
1067ac6653aSJeff Kirsher static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
1077ac6653aSJeff Kirsher 
10850fb4f74SGiuseppe CAVALLARO #ifdef CONFIG_DEBUG_FS
1098d72ab11SGreg Kroah-Hartman static void stmmac_init_fs(struct net_device *dev);
110466c5ac8SMathieu Olivari static void stmmac_exit_fs(struct net_device *dev);
111bfab27a1SGiuseppe CAVALLARO #endif
112bfab27a1SGiuseppe CAVALLARO 
1139125cdd1SGiuseppe CAVALLARO #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
1149125cdd1SGiuseppe CAVALLARO 
1157ac6653aSJeff Kirsher /**
1167ac6653aSJeff Kirsher  * stmmac_verify_args - verify the driver parameters.
117732fdf0eSGiuseppe CAVALLARO  * Description: it checks the driver parameters and set a default in case of
118732fdf0eSGiuseppe CAVALLARO  * errors.
1197ac6653aSJeff Kirsher  */
1207ac6653aSJeff Kirsher static void stmmac_verify_args(void)
1217ac6653aSJeff Kirsher {
1227ac6653aSJeff Kirsher 	if (unlikely(watchdog < 0))
1237ac6653aSJeff Kirsher 		watchdog = TX_TIMEO;
124d916701cSGiuseppe CAVALLARO 	if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
125d916701cSGiuseppe CAVALLARO 		buf_sz = DEFAULT_BUFSIZE;
1267ac6653aSJeff Kirsher 	if (unlikely(flow_ctrl > 1))
1277ac6653aSJeff Kirsher 		flow_ctrl = FLOW_AUTO;
1287ac6653aSJeff Kirsher 	else if (likely(flow_ctrl < 0))
1297ac6653aSJeff Kirsher 		flow_ctrl = FLOW_OFF;
1307ac6653aSJeff Kirsher 	if (unlikely((pause < 0) || (pause > 0xffff)))
1317ac6653aSJeff Kirsher 		pause = PAUSE_TIME;
132d765955dSGiuseppe CAVALLARO 	if (eee_timer < 0)
133d765955dSGiuseppe CAVALLARO 		eee_timer = STMMAC_DEFAULT_LPI_TIMER;
1347ac6653aSJeff Kirsher }
1357ac6653aSJeff Kirsher 
13632ceabcaSGiuseppe CAVALLARO /**
137c22a3f48SJoao Pinto  * stmmac_disable_all_queues - Disable all queues
138c22a3f48SJoao Pinto  * @priv: driver private structure
139c22a3f48SJoao Pinto  */
140c22a3f48SJoao Pinto static void stmmac_disable_all_queues(struct stmmac_priv *priv)
141c22a3f48SJoao Pinto {
142c22a3f48SJoao Pinto 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
1438fce3331SJose Abreu 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
1448fce3331SJose Abreu 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
145c22a3f48SJoao Pinto 	u32 queue;
146c22a3f48SJoao Pinto 
1478fce3331SJose Abreu 	for (queue = 0; queue < maxq; queue++) {
1488fce3331SJose Abreu 		struct stmmac_channel *ch = &priv->channel[queue];
149c22a3f48SJoao Pinto 
1504ccb4585SJose Abreu 		if (queue < rx_queues_cnt)
1514ccb4585SJose Abreu 			napi_disable(&ch->rx_napi);
1524ccb4585SJose Abreu 		if (queue < tx_queues_cnt)
1534ccb4585SJose Abreu 			napi_disable(&ch->tx_napi);
154c22a3f48SJoao Pinto 	}
155c22a3f48SJoao Pinto }
156c22a3f48SJoao Pinto 
157c22a3f48SJoao Pinto /**
158c22a3f48SJoao Pinto  * stmmac_enable_all_queues - Enable all queues
159c22a3f48SJoao Pinto  * @priv: driver private structure
160c22a3f48SJoao Pinto  */
161c22a3f48SJoao Pinto static void stmmac_enable_all_queues(struct stmmac_priv *priv)
162c22a3f48SJoao Pinto {
163c22a3f48SJoao Pinto 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
1648fce3331SJose Abreu 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
1658fce3331SJose Abreu 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
166c22a3f48SJoao Pinto 	u32 queue;
167c22a3f48SJoao Pinto 
1688fce3331SJose Abreu 	for (queue = 0; queue < maxq; queue++) {
1698fce3331SJose Abreu 		struct stmmac_channel *ch = &priv->channel[queue];
170c22a3f48SJoao Pinto 
1714ccb4585SJose Abreu 		if (queue < rx_queues_cnt)
1724ccb4585SJose Abreu 			napi_enable(&ch->rx_napi);
1734ccb4585SJose Abreu 		if (queue < tx_queues_cnt)
1744ccb4585SJose Abreu 			napi_enable(&ch->tx_napi);
175c22a3f48SJoao Pinto 	}
176c22a3f48SJoao Pinto }
177c22a3f48SJoao Pinto 
178c22a3f48SJoao Pinto /**
179c22a3f48SJoao Pinto  * stmmac_stop_all_queues - Stop all queues
180c22a3f48SJoao Pinto  * @priv: driver private structure
181c22a3f48SJoao Pinto  */
182c22a3f48SJoao Pinto static void stmmac_stop_all_queues(struct stmmac_priv *priv)
183c22a3f48SJoao Pinto {
184c22a3f48SJoao Pinto 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
185c22a3f48SJoao Pinto 	u32 queue;
186c22a3f48SJoao Pinto 
187c22a3f48SJoao Pinto 	for (queue = 0; queue < tx_queues_cnt; queue++)
188c22a3f48SJoao Pinto 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
189c22a3f48SJoao Pinto }
190c22a3f48SJoao Pinto 
191c22a3f48SJoao Pinto /**
192c22a3f48SJoao Pinto  * stmmac_start_all_queues - Start all queues
193c22a3f48SJoao Pinto  * @priv: driver private structure
194c22a3f48SJoao Pinto  */
195c22a3f48SJoao Pinto static void stmmac_start_all_queues(struct stmmac_priv *priv)
196c22a3f48SJoao Pinto {
197c22a3f48SJoao Pinto 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
198c22a3f48SJoao Pinto 	u32 queue;
199c22a3f48SJoao Pinto 
200c22a3f48SJoao Pinto 	for (queue = 0; queue < tx_queues_cnt; queue++)
201c22a3f48SJoao Pinto 		netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue));
202c22a3f48SJoao Pinto }
203c22a3f48SJoao Pinto 
20434877a15SJose Abreu static void stmmac_service_event_schedule(struct stmmac_priv *priv)
20534877a15SJose Abreu {
20634877a15SJose Abreu 	if (!test_bit(STMMAC_DOWN, &priv->state) &&
20734877a15SJose Abreu 	    !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
20834877a15SJose Abreu 		queue_work(priv->wq, &priv->service_task);
20934877a15SJose Abreu }
21034877a15SJose Abreu 
21134877a15SJose Abreu static void stmmac_global_err(struct stmmac_priv *priv)
21234877a15SJose Abreu {
21334877a15SJose Abreu 	netif_carrier_off(priv->dev);
21434877a15SJose Abreu 	set_bit(STMMAC_RESET_REQUESTED, &priv->state);
21534877a15SJose Abreu 	stmmac_service_event_schedule(priv);
21634877a15SJose Abreu }
21734877a15SJose Abreu 
218c22a3f48SJoao Pinto /**
21932ceabcaSGiuseppe CAVALLARO  * stmmac_clk_csr_set - dynamically set the MDC clock
22032ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
22132ceabcaSGiuseppe CAVALLARO  * Description: this is to dynamically set the MDC clock according to the csr
22232ceabcaSGiuseppe CAVALLARO  * clock input.
22332ceabcaSGiuseppe CAVALLARO  * Note:
22432ceabcaSGiuseppe CAVALLARO  *	If a specific clk_csr value is passed from the platform
22532ceabcaSGiuseppe CAVALLARO  *	this means that the CSR Clock Range selection cannot be
22632ceabcaSGiuseppe CAVALLARO  *	changed at run-time and it is fixed (as reported in the driver
22732ceabcaSGiuseppe CAVALLARO  *	documentation). Viceversa the driver will try to set the MDC
22832ceabcaSGiuseppe CAVALLARO  *	clock dynamically according to the actual clock input.
22932ceabcaSGiuseppe CAVALLARO  */
230cd7201f4SGiuseppe CAVALLARO static void stmmac_clk_csr_set(struct stmmac_priv *priv)
231cd7201f4SGiuseppe CAVALLARO {
232cd7201f4SGiuseppe CAVALLARO 	u32 clk_rate;
233cd7201f4SGiuseppe CAVALLARO 
234f573c0b9Sjpinto 	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
235cd7201f4SGiuseppe CAVALLARO 
236cd7201f4SGiuseppe CAVALLARO 	/* Platform provided default clk_csr would be assumed valid
237ceb69499SGiuseppe CAVALLARO 	 * for all other cases except for the below mentioned ones.
238ceb69499SGiuseppe CAVALLARO 	 * For values higher than the IEEE 802.3 specified frequency
239ceb69499SGiuseppe CAVALLARO 	 * we can not estimate the proper divider as it is not known
240ceb69499SGiuseppe CAVALLARO 	 * the frequency of clk_csr_i. So we do not change the default
241ceb69499SGiuseppe CAVALLARO 	 * divider.
242ceb69499SGiuseppe CAVALLARO 	 */
243cd7201f4SGiuseppe CAVALLARO 	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
244cd7201f4SGiuseppe CAVALLARO 		if (clk_rate < CSR_F_35M)
245cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_20_35M;
246cd7201f4SGiuseppe CAVALLARO 		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
247cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_35_60M;
248cd7201f4SGiuseppe CAVALLARO 		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
249cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_60_100M;
250cd7201f4SGiuseppe CAVALLARO 		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
251cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_100_150M;
252cd7201f4SGiuseppe CAVALLARO 		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
253cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_150_250M;
25419d857c9SPhil Reid 		else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
255cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_250_300M;
256ceb69499SGiuseppe CAVALLARO 	}
2579f93ac8dSLABBE Corentin 
2589f93ac8dSLABBE Corentin 	if (priv->plat->has_sun8i) {
2599f93ac8dSLABBE Corentin 		if (clk_rate > 160000000)
2609f93ac8dSLABBE Corentin 			priv->clk_csr = 0x03;
2619f93ac8dSLABBE Corentin 		else if (clk_rate > 80000000)
2629f93ac8dSLABBE Corentin 			priv->clk_csr = 0x02;
2639f93ac8dSLABBE Corentin 		else if (clk_rate > 40000000)
2649f93ac8dSLABBE Corentin 			priv->clk_csr = 0x01;
2659f93ac8dSLABBE Corentin 		else
2669f93ac8dSLABBE Corentin 			priv->clk_csr = 0;
2679f93ac8dSLABBE Corentin 	}
2687d9e6c5aSJose Abreu 
2697d9e6c5aSJose Abreu 	if (priv->plat->has_xgmac) {
2707d9e6c5aSJose Abreu 		if (clk_rate > 400000000)
2717d9e6c5aSJose Abreu 			priv->clk_csr = 0x5;
2727d9e6c5aSJose Abreu 		else if (clk_rate > 350000000)
2737d9e6c5aSJose Abreu 			priv->clk_csr = 0x4;
2747d9e6c5aSJose Abreu 		else if (clk_rate > 300000000)
2757d9e6c5aSJose Abreu 			priv->clk_csr = 0x3;
2767d9e6c5aSJose Abreu 		else if (clk_rate > 250000000)
2777d9e6c5aSJose Abreu 			priv->clk_csr = 0x2;
2787d9e6c5aSJose Abreu 		else if (clk_rate > 150000000)
2797d9e6c5aSJose Abreu 			priv->clk_csr = 0x1;
2807d9e6c5aSJose Abreu 		else
2817d9e6c5aSJose Abreu 			priv->clk_csr = 0x0;
2827d9e6c5aSJose Abreu 	}
283cd7201f4SGiuseppe CAVALLARO }
284cd7201f4SGiuseppe CAVALLARO 
2857ac6653aSJeff Kirsher static void print_pkt(unsigned char *buf, int len)
2867ac6653aSJeff Kirsher {
287424c4f78SAndy Shevchenko 	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
288424c4f78SAndy Shevchenko 	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
2897ac6653aSJeff Kirsher }
2907ac6653aSJeff Kirsher 
291ce736788SJoao Pinto static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
2927ac6653aSJeff Kirsher {
293ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
294a6a3e026SLABBE Corentin 	u32 avail;
295e3ad57c9SGiuseppe Cavallaro 
296ce736788SJoao Pinto 	if (tx_q->dirty_tx > tx_q->cur_tx)
297ce736788SJoao Pinto 		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
298e3ad57c9SGiuseppe Cavallaro 	else
299ce736788SJoao Pinto 		avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1;
300e3ad57c9SGiuseppe Cavallaro 
301e3ad57c9SGiuseppe Cavallaro 	return avail;
302e3ad57c9SGiuseppe Cavallaro }
303e3ad57c9SGiuseppe Cavallaro 
30454139cf3SJoao Pinto /**
30554139cf3SJoao Pinto  * stmmac_rx_dirty - Get RX queue dirty
30654139cf3SJoao Pinto  * @priv: driver private structure
30754139cf3SJoao Pinto  * @queue: RX queue index
30854139cf3SJoao Pinto  */
30954139cf3SJoao Pinto static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
310e3ad57c9SGiuseppe Cavallaro {
31154139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
312a6a3e026SLABBE Corentin 	u32 dirty;
313e3ad57c9SGiuseppe Cavallaro 
31454139cf3SJoao Pinto 	if (rx_q->dirty_rx <= rx_q->cur_rx)
31554139cf3SJoao Pinto 		dirty = rx_q->cur_rx - rx_q->dirty_rx;
316e3ad57c9SGiuseppe Cavallaro 	else
31754139cf3SJoao Pinto 		dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx;
318e3ad57c9SGiuseppe Cavallaro 
319e3ad57c9SGiuseppe Cavallaro 	return dirty;
3207ac6653aSJeff Kirsher }
3217ac6653aSJeff Kirsher 
32232ceabcaSGiuseppe CAVALLARO /**
323732fdf0eSGiuseppe CAVALLARO  * stmmac_enable_eee_mode - check and enter in LPI mode
32432ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
325732fdf0eSGiuseppe CAVALLARO  * Description: this function is to verify and enter in LPI mode in case of
326732fdf0eSGiuseppe CAVALLARO  * EEE.
32732ceabcaSGiuseppe CAVALLARO  */
328d765955dSGiuseppe CAVALLARO static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
329d765955dSGiuseppe CAVALLARO {
330ce736788SJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
331ce736788SJoao Pinto 	u32 queue;
332ce736788SJoao Pinto 
333ce736788SJoao Pinto 	/* check if all TX queues have the work finished */
334ce736788SJoao Pinto 	for (queue = 0; queue < tx_cnt; queue++) {
335ce736788SJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
336ce736788SJoao Pinto 
337ce736788SJoao Pinto 		if (tx_q->dirty_tx != tx_q->cur_tx)
338ce736788SJoao Pinto 			return; /* still unfinished work */
339ce736788SJoao Pinto 	}
340ce736788SJoao Pinto 
341d765955dSGiuseppe CAVALLARO 	/* Check and enter in LPI mode */
342ce736788SJoao Pinto 	if (!priv->tx_path_in_lpi_mode)
343c10d4c82SJose Abreu 		stmmac_set_eee_mode(priv, priv->hw,
344b4b7b772Sjpinto 				priv->plat->en_tx_lpi_clockgating);
345d765955dSGiuseppe CAVALLARO }
346d765955dSGiuseppe CAVALLARO 
34732ceabcaSGiuseppe CAVALLARO /**
348732fdf0eSGiuseppe CAVALLARO  * stmmac_disable_eee_mode - disable and exit from LPI mode
34932ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
35032ceabcaSGiuseppe CAVALLARO  * Description: this function is to exit and disable EEE in case of
35132ceabcaSGiuseppe CAVALLARO  * LPI state is true. This is called by the xmit.
35232ceabcaSGiuseppe CAVALLARO  */
353d765955dSGiuseppe CAVALLARO void stmmac_disable_eee_mode(struct stmmac_priv *priv)
354d765955dSGiuseppe CAVALLARO {
355c10d4c82SJose Abreu 	stmmac_reset_eee_mode(priv, priv->hw);
356d765955dSGiuseppe CAVALLARO 	del_timer_sync(&priv->eee_ctrl_timer);
357d765955dSGiuseppe CAVALLARO 	priv->tx_path_in_lpi_mode = false;
358d765955dSGiuseppe CAVALLARO }
359d765955dSGiuseppe CAVALLARO 
360d765955dSGiuseppe CAVALLARO /**
361732fdf0eSGiuseppe CAVALLARO  * stmmac_eee_ctrl_timer - EEE TX SW timer.
362d765955dSGiuseppe CAVALLARO  * @arg : data hook
363d765955dSGiuseppe CAVALLARO  * Description:
36432ceabcaSGiuseppe CAVALLARO  *  if there is no data transfer and if we are not in LPI state,
365d765955dSGiuseppe CAVALLARO  *  then MAC Transmitter can be moved to LPI state.
366d765955dSGiuseppe CAVALLARO  */
367e99e88a9SKees Cook static void stmmac_eee_ctrl_timer(struct timer_list *t)
368d765955dSGiuseppe CAVALLARO {
369e99e88a9SKees Cook 	struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
370d765955dSGiuseppe CAVALLARO 
371d765955dSGiuseppe CAVALLARO 	stmmac_enable_eee_mode(priv);
372f5351ef7SGiuseppe CAVALLARO 	mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
373d765955dSGiuseppe CAVALLARO }
374d765955dSGiuseppe CAVALLARO 
375d765955dSGiuseppe CAVALLARO /**
376732fdf0eSGiuseppe CAVALLARO  * stmmac_eee_init - init EEE
37732ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
378d765955dSGiuseppe CAVALLARO  * Description:
379732fdf0eSGiuseppe CAVALLARO  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
380732fdf0eSGiuseppe CAVALLARO  *  can also manage EEE, this function enable the LPI state and start related
381732fdf0eSGiuseppe CAVALLARO  *  timer.
382d765955dSGiuseppe CAVALLARO  */
383d765955dSGiuseppe CAVALLARO bool stmmac_eee_init(struct stmmac_priv *priv)
384d765955dSGiuseppe CAVALLARO {
38574371272SJose Abreu 	int tx_lpi_timer = priv->tx_lpi_timer;
386879626e3SJerome Brunet 
387f5351ef7SGiuseppe CAVALLARO 	/* Using PCS we cannot dial with the phy registers at this stage
388f5351ef7SGiuseppe CAVALLARO 	 * so we do not support extra feature like EEE.
389f5351ef7SGiuseppe CAVALLARO 	 */
3903fe5cadbSGiuseppe CAVALLARO 	if ((priv->hw->pcs == STMMAC_PCS_RGMII) ||
3913fe5cadbSGiuseppe CAVALLARO 	    (priv->hw->pcs == STMMAC_PCS_TBI) ||
3923fe5cadbSGiuseppe CAVALLARO 	    (priv->hw->pcs == STMMAC_PCS_RTBI))
39374371272SJose Abreu 		return false;
394f5351ef7SGiuseppe CAVALLARO 
39574371272SJose Abreu 	/* Check if MAC core supports the EEE feature. */
39674371272SJose Abreu 	if (!priv->dma_cap.eee)
39774371272SJose Abreu 		return false;
398d765955dSGiuseppe CAVALLARO 
39929555fa3SThierry Reding 	mutex_lock(&priv->lock);
40074371272SJose Abreu 
40174371272SJose Abreu 	/* Check if it needs to be deactivated */
402177d935aSJon Hunter 	if (!priv->eee_active) {
403177d935aSJon Hunter 		if (priv->eee_enabled) {
40438ddc59dSLABBE Corentin 			netdev_dbg(priv->dev, "disable EEE\n");
40583bf79b6SGiuseppe CAVALLARO 			del_timer_sync(&priv->eee_ctrl_timer);
40674371272SJose Abreu 			stmmac_set_eee_timer(priv, priv->hw, 0, tx_lpi_timer);
407177d935aSJon Hunter 		}
4080867bb97SJon Hunter 		mutex_unlock(&priv->lock);
40974371272SJose Abreu 		return false;
41074371272SJose Abreu 	}
41174371272SJose Abreu 
41274371272SJose Abreu 	if (priv->eee_active && !priv->eee_enabled) {
41374371272SJose Abreu 		timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
41474371272SJose Abreu 		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
41574371272SJose Abreu 		stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
41683bf79b6SGiuseppe CAVALLARO 				     tx_lpi_timer);
41783bf79b6SGiuseppe CAVALLARO 	}
41874371272SJose Abreu 
41929555fa3SThierry Reding 	mutex_unlock(&priv->lock);
42038ddc59dSLABBE Corentin 	netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
42174371272SJose Abreu 	return true;
422d765955dSGiuseppe CAVALLARO }
423d765955dSGiuseppe CAVALLARO 
424732fdf0eSGiuseppe CAVALLARO /* stmmac_get_tx_hwtstamp - get HW TX timestamps
42532ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
426ba1ffd74SGiuseppe CAVALLARO  * @p : descriptor pointer
427891434b1SRayagond Kokatanur  * @skb : the socket buffer
428891434b1SRayagond Kokatanur  * Description :
429891434b1SRayagond Kokatanur  * This function will read timestamp from the descriptor & pass it to stack.
430891434b1SRayagond Kokatanur  * and also perform some sanity checks.
431891434b1SRayagond Kokatanur  */
432891434b1SRayagond Kokatanur static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
433ba1ffd74SGiuseppe CAVALLARO 				   struct dma_desc *p, struct sk_buff *skb)
434891434b1SRayagond Kokatanur {
435891434b1SRayagond Kokatanur 	struct skb_shared_hwtstamps shhwtstamp;
43625e80cd0SJose Abreu 	bool found = false;
437df103170SNathan Chancellor 	u64 ns = 0;
438891434b1SRayagond Kokatanur 
439891434b1SRayagond Kokatanur 	if (!priv->hwts_tx_en)
440891434b1SRayagond Kokatanur 		return;
441891434b1SRayagond Kokatanur 
442ceb69499SGiuseppe CAVALLARO 	/* exit if skb doesn't support hw tstamp */
44375e4364fSdamuzi000 	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
444891434b1SRayagond Kokatanur 		return;
445891434b1SRayagond Kokatanur 
446891434b1SRayagond Kokatanur 	/* check tx tstamp status */
44742de047dSJose Abreu 	if (stmmac_get_tx_timestamp_status(priv, p)) {
44842de047dSJose Abreu 		stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
44925e80cd0SJose Abreu 		found = true;
45025e80cd0SJose Abreu 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
45125e80cd0SJose Abreu 		found = true;
45225e80cd0SJose Abreu 	}
453891434b1SRayagond Kokatanur 
45425e80cd0SJose Abreu 	if (found) {
455891434b1SRayagond Kokatanur 		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
456891434b1SRayagond Kokatanur 		shhwtstamp.hwtstamp = ns_to_ktime(ns);
457ba1ffd74SGiuseppe CAVALLARO 
45833d4c482SMario Molitor 		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
459891434b1SRayagond Kokatanur 		/* pass tstamp to stack */
460891434b1SRayagond Kokatanur 		skb_tstamp_tx(skb, &shhwtstamp);
461ba1ffd74SGiuseppe CAVALLARO 	}
462891434b1SRayagond Kokatanur }
463891434b1SRayagond Kokatanur 
464732fdf0eSGiuseppe CAVALLARO /* stmmac_get_rx_hwtstamp - get HW RX timestamps
46532ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
466ba1ffd74SGiuseppe CAVALLARO  * @p : descriptor pointer
467ba1ffd74SGiuseppe CAVALLARO  * @np : next descriptor pointer
468891434b1SRayagond Kokatanur  * @skb : the socket buffer
469891434b1SRayagond Kokatanur  * Description :
470891434b1SRayagond Kokatanur  * This function will read received packet's timestamp from the descriptor
471891434b1SRayagond Kokatanur  * and pass it to stack. It also perform some sanity checks.
472891434b1SRayagond Kokatanur  */
473ba1ffd74SGiuseppe CAVALLARO static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
474ba1ffd74SGiuseppe CAVALLARO 				   struct dma_desc *np, struct sk_buff *skb)
475891434b1SRayagond Kokatanur {
476891434b1SRayagond Kokatanur 	struct skb_shared_hwtstamps *shhwtstamp = NULL;
47798870943SJose Abreu 	struct dma_desc *desc = p;
478df103170SNathan Chancellor 	u64 ns = 0;
479891434b1SRayagond Kokatanur 
480891434b1SRayagond Kokatanur 	if (!priv->hwts_rx_en)
481891434b1SRayagond Kokatanur 		return;
482ba1ffd74SGiuseppe CAVALLARO 	/* For GMAC4, the valid timestamp is from CTX next desc. */
4837d9e6c5aSJose Abreu 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
48498870943SJose Abreu 		desc = np;
485891434b1SRayagond Kokatanur 
48698870943SJose Abreu 	/* Check if timestamp is available */
48742de047dSJose Abreu 	if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
48842de047dSJose Abreu 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
48933d4c482SMario Molitor 		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
490891434b1SRayagond Kokatanur 		shhwtstamp = skb_hwtstamps(skb);
491891434b1SRayagond Kokatanur 		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
492891434b1SRayagond Kokatanur 		shhwtstamp->hwtstamp = ns_to_ktime(ns);
493ba1ffd74SGiuseppe CAVALLARO 	} else  {
49433d4c482SMario Molitor 		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
495ba1ffd74SGiuseppe CAVALLARO 	}
496891434b1SRayagond Kokatanur }
497891434b1SRayagond Kokatanur 
498891434b1SRayagond Kokatanur /**
499d6228b7cSArtem Panfilov  *  stmmac_hwtstamp_set - control hardware timestamping.
500891434b1SRayagond Kokatanur  *  @dev: device pointer.
5018d45e42bSLABBE Corentin  *  @ifr: An IOCTL specific structure, that can contain a pointer to
502891434b1SRayagond Kokatanur  *  a proprietary structure used to pass information to the driver.
503891434b1SRayagond Kokatanur  *  Description:
504891434b1SRayagond Kokatanur  *  This function configures the MAC to enable/disable both outgoing(TX)
505891434b1SRayagond Kokatanur  *  and incoming(RX) packets time stamping based on user input.
506891434b1SRayagond Kokatanur  *  Return Value:
507891434b1SRayagond Kokatanur  *  0 on success and an appropriate -ve integer on failure.
508891434b1SRayagond Kokatanur  */
509d6228b7cSArtem Panfilov static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
510891434b1SRayagond Kokatanur {
511891434b1SRayagond Kokatanur 	struct stmmac_priv *priv = netdev_priv(dev);
512891434b1SRayagond Kokatanur 	struct hwtstamp_config config;
5130a624155SArnd Bergmann 	struct timespec64 now;
514891434b1SRayagond Kokatanur 	u64 temp = 0;
515891434b1SRayagond Kokatanur 	u32 ptp_v2 = 0;
516891434b1SRayagond Kokatanur 	u32 tstamp_all = 0;
517891434b1SRayagond Kokatanur 	u32 ptp_over_ipv4_udp = 0;
518891434b1SRayagond Kokatanur 	u32 ptp_over_ipv6_udp = 0;
519891434b1SRayagond Kokatanur 	u32 ptp_over_ethernet = 0;
520891434b1SRayagond Kokatanur 	u32 snap_type_sel = 0;
521891434b1SRayagond Kokatanur 	u32 ts_master_en = 0;
522891434b1SRayagond Kokatanur 	u32 ts_event_en = 0;
523df103170SNathan Chancellor 	u32 sec_inc = 0;
524891434b1SRayagond Kokatanur 	u32 value = 0;
5257d9e6c5aSJose Abreu 	bool xmac;
5267d9e6c5aSJose Abreu 
5277d9e6c5aSJose Abreu 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
528891434b1SRayagond Kokatanur 
529891434b1SRayagond Kokatanur 	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
530891434b1SRayagond Kokatanur 		netdev_alert(priv->dev, "No support for HW time stamping\n");
531891434b1SRayagond Kokatanur 		priv->hwts_tx_en = 0;
532891434b1SRayagond Kokatanur 		priv->hwts_rx_en = 0;
533891434b1SRayagond Kokatanur 
534891434b1SRayagond Kokatanur 		return -EOPNOTSUPP;
535891434b1SRayagond Kokatanur 	}
536891434b1SRayagond Kokatanur 
537891434b1SRayagond Kokatanur 	if (copy_from_user(&config, ifr->ifr_data,
538d6228b7cSArtem Panfilov 			   sizeof(config)))
539891434b1SRayagond Kokatanur 		return -EFAULT;
540891434b1SRayagond Kokatanur 
54138ddc59dSLABBE Corentin 	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
542891434b1SRayagond Kokatanur 		   __func__, config.flags, config.tx_type, config.rx_filter);
543891434b1SRayagond Kokatanur 
544891434b1SRayagond Kokatanur 	/* reserved for future extensions */
545891434b1SRayagond Kokatanur 	if (config.flags)
546891434b1SRayagond Kokatanur 		return -EINVAL;
547891434b1SRayagond Kokatanur 
5485f3da328SBen Hutchings 	if (config.tx_type != HWTSTAMP_TX_OFF &&
5495f3da328SBen Hutchings 	    config.tx_type != HWTSTAMP_TX_ON)
550891434b1SRayagond Kokatanur 		return -ERANGE;
551891434b1SRayagond Kokatanur 
552891434b1SRayagond Kokatanur 	if (priv->adv_ts) {
553891434b1SRayagond Kokatanur 		switch (config.rx_filter) {
554891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_NONE:
555ceb69499SGiuseppe CAVALLARO 			/* time stamp no incoming packet at all */
556891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_NONE;
557891434b1SRayagond Kokatanur 			break;
558891434b1SRayagond Kokatanur 
559891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
560ceb69499SGiuseppe CAVALLARO 			/* PTP v1, UDP, any kind of event packet */
561891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
5627d8e249fSIlias Apalodimas 			/* 'xmac' hardware can support Sync, Pdelay_Req and
5637d8e249fSIlias Apalodimas 			 * Pdelay_resp by setting bit14 and bits17/16 to 01
5647d8e249fSIlias Apalodimas 			 * This leaves Delay_Req timestamps out.
5657d8e249fSIlias Apalodimas 			 * Enable all events *and* general purpose message
5667d8e249fSIlias Apalodimas 			 * timestamping
5677d8e249fSIlias Apalodimas 			 */
568891434b1SRayagond Kokatanur 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
569891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
570891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
571891434b1SRayagond Kokatanur 			break;
572891434b1SRayagond Kokatanur 
573891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
574ceb69499SGiuseppe CAVALLARO 			/* PTP v1, UDP, Sync packet */
575891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
576891434b1SRayagond Kokatanur 			/* take time stamp for SYNC messages only */
577891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
578891434b1SRayagond Kokatanur 
579891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
580891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
581891434b1SRayagond Kokatanur 			break;
582891434b1SRayagond Kokatanur 
583891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
584ceb69499SGiuseppe CAVALLARO 			/* PTP v1, UDP, Delay_req packet */
585891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
586891434b1SRayagond Kokatanur 			/* take time stamp for Delay_Req messages only */
587891434b1SRayagond Kokatanur 			ts_master_en = PTP_TCR_TSMSTRENA;
588891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
589891434b1SRayagond Kokatanur 
590891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
591891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
592891434b1SRayagond Kokatanur 			break;
593891434b1SRayagond Kokatanur 
594891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
595ceb69499SGiuseppe CAVALLARO 			/* PTP v2, UDP, any kind of event packet */
596891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
597891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
598891434b1SRayagond Kokatanur 			/* take time stamp for all event messages */
599891434b1SRayagond Kokatanur 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
600891434b1SRayagond Kokatanur 
601891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
602891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
603891434b1SRayagond Kokatanur 			break;
604891434b1SRayagond Kokatanur 
605891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
606ceb69499SGiuseppe CAVALLARO 			/* PTP v2, UDP, Sync packet */
607891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
608891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
609891434b1SRayagond Kokatanur 			/* take time stamp for SYNC messages only */
610891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
611891434b1SRayagond Kokatanur 
612891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
613891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
614891434b1SRayagond Kokatanur 			break;
615891434b1SRayagond Kokatanur 
616891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
617ceb69499SGiuseppe CAVALLARO 			/* PTP v2, UDP, Delay_req packet */
618891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
619891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
620891434b1SRayagond Kokatanur 			/* take time stamp for Delay_Req messages only */
621891434b1SRayagond Kokatanur 			ts_master_en = PTP_TCR_TSMSTRENA;
622891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
623891434b1SRayagond Kokatanur 
624891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
625891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
626891434b1SRayagond Kokatanur 			break;
627891434b1SRayagond Kokatanur 
628891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_EVENT:
629ceb69499SGiuseppe CAVALLARO 			/* PTP v2/802.AS1 any layer, any kind of event packet */
630891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
631891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
632891434b1SRayagond Kokatanur 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
63314f34733SJose Abreu 			ts_event_en = PTP_TCR_TSEVNTENA;
634891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
635891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
636891434b1SRayagond Kokatanur 			ptp_over_ethernet = PTP_TCR_TSIPENA;
637891434b1SRayagond Kokatanur 			break;
638891434b1SRayagond Kokatanur 
639891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_SYNC:
640ceb69499SGiuseppe CAVALLARO 			/* PTP v2/802.AS1, any layer, Sync packet */
641891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
642891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
643891434b1SRayagond Kokatanur 			/* take time stamp for SYNC messages only */
644891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
645891434b1SRayagond Kokatanur 
646891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
647891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
648891434b1SRayagond Kokatanur 			ptp_over_ethernet = PTP_TCR_TSIPENA;
649891434b1SRayagond Kokatanur 			break;
650891434b1SRayagond Kokatanur 
651891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
652ceb69499SGiuseppe CAVALLARO 			/* PTP v2/802.AS1, any layer, Delay_req packet */
653891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
654891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
655891434b1SRayagond Kokatanur 			/* take time stamp for Delay_Req messages only */
656891434b1SRayagond Kokatanur 			ts_master_en = PTP_TCR_TSMSTRENA;
657891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
658891434b1SRayagond Kokatanur 
659891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
660891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
661891434b1SRayagond Kokatanur 			ptp_over_ethernet = PTP_TCR_TSIPENA;
662891434b1SRayagond Kokatanur 			break;
663891434b1SRayagond Kokatanur 
664e3412575SMiroslav Lichvar 		case HWTSTAMP_FILTER_NTP_ALL:
665891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_ALL:
666ceb69499SGiuseppe CAVALLARO 			/* time stamp any incoming packet */
667891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_ALL;
668891434b1SRayagond Kokatanur 			tstamp_all = PTP_TCR_TSENALL;
669891434b1SRayagond Kokatanur 			break;
670891434b1SRayagond Kokatanur 
671891434b1SRayagond Kokatanur 		default:
672891434b1SRayagond Kokatanur 			return -ERANGE;
673891434b1SRayagond Kokatanur 		}
674891434b1SRayagond Kokatanur 	} else {
675891434b1SRayagond Kokatanur 		switch (config.rx_filter) {
676891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_NONE:
677891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_NONE;
678891434b1SRayagond Kokatanur 			break;
679891434b1SRayagond Kokatanur 		default:
680891434b1SRayagond Kokatanur 			/* PTP v1, UDP, any kind of event packet */
681891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
682891434b1SRayagond Kokatanur 			break;
683891434b1SRayagond Kokatanur 		}
684891434b1SRayagond Kokatanur 	}
685891434b1SRayagond Kokatanur 	priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
6865f3da328SBen Hutchings 	priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
687891434b1SRayagond Kokatanur 
688891434b1SRayagond Kokatanur 	if (!priv->hwts_tx_en && !priv->hwts_rx_en)
689cc4c9001SJose Abreu 		stmmac_config_hw_tstamping(priv, priv->ptpaddr, 0);
690891434b1SRayagond Kokatanur 	else {
691891434b1SRayagond Kokatanur 		value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
692891434b1SRayagond Kokatanur 			 tstamp_all | ptp_v2 | ptp_over_ethernet |
693891434b1SRayagond Kokatanur 			 ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
694891434b1SRayagond Kokatanur 			 ts_master_en | snap_type_sel);
695cc4c9001SJose Abreu 		stmmac_config_hw_tstamping(priv, priv->ptpaddr, value);
696891434b1SRayagond Kokatanur 
697891434b1SRayagond Kokatanur 		/* program Sub Second Increment reg */
698cc4c9001SJose Abreu 		stmmac_config_sub_second_increment(priv,
699f573c0b9Sjpinto 				priv->ptpaddr, priv->plat->clk_ptp_rate,
7007d9e6c5aSJose Abreu 				xmac, &sec_inc);
70119d857c9SPhil Reid 		temp = div_u64(1000000000ULL, sec_inc);
702891434b1SRayagond Kokatanur 
7039a8a02c9SJose Abreu 		/* Store sub second increment and flags for later use */
7049a8a02c9SJose Abreu 		priv->sub_second_inc = sec_inc;
7059a8a02c9SJose Abreu 		priv->systime_flags = value;
7069a8a02c9SJose Abreu 
707891434b1SRayagond Kokatanur 		/* calculate default added value:
708891434b1SRayagond Kokatanur 		 * formula is :
709891434b1SRayagond Kokatanur 		 * addend = (2^32)/freq_div_ratio;
71019d857c9SPhil Reid 		 * where, freq_div_ratio = 1e9ns/sec_inc
711891434b1SRayagond Kokatanur 		 */
71219d857c9SPhil Reid 		temp = (u64)(temp << 32);
713f573c0b9Sjpinto 		priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
714cc4c9001SJose Abreu 		stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
715891434b1SRayagond Kokatanur 
716891434b1SRayagond Kokatanur 		/* initialize system time */
7170a624155SArnd Bergmann 		ktime_get_real_ts64(&now);
7180a624155SArnd Bergmann 
7190a624155SArnd Bergmann 		/* lower 32 bits of tv_sec are safe until y2106 */
720cc4c9001SJose Abreu 		stmmac_init_systime(priv, priv->ptpaddr,
721cc4c9001SJose Abreu 				(u32)now.tv_sec, now.tv_nsec);
722891434b1SRayagond Kokatanur 	}
723891434b1SRayagond Kokatanur 
724d6228b7cSArtem Panfilov 	memcpy(&priv->tstamp_config, &config, sizeof(config));
725d6228b7cSArtem Panfilov 
726891434b1SRayagond Kokatanur 	return copy_to_user(ifr->ifr_data, &config,
727d6228b7cSArtem Panfilov 			    sizeof(config)) ? -EFAULT : 0;
728d6228b7cSArtem Panfilov }
729d6228b7cSArtem Panfilov 
730d6228b7cSArtem Panfilov /**
731d6228b7cSArtem Panfilov  *  stmmac_hwtstamp_get - read hardware timestamping.
732d6228b7cSArtem Panfilov  *  @dev: device pointer.
733d6228b7cSArtem Panfilov  *  @ifr: An IOCTL specific structure, that can contain a pointer to
734d6228b7cSArtem Panfilov  *  a proprietary structure used to pass information to the driver.
735d6228b7cSArtem Panfilov  *  Description:
736d6228b7cSArtem Panfilov  *  This function obtain the current hardware timestamping settings
737d6228b7cSArtem Panfilov     as requested.
738d6228b7cSArtem Panfilov  */
739d6228b7cSArtem Panfilov static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
740d6228b7cSArtem Panfilov {
741d6228b7cSArtem Panfilov 	struct stmmac_priv *priv = netdev_priv(dev);
742d6228b7cSArtem Panfilov 	struct hwtstamp_config *config = &priv->tstamp_config;
743d6228b7cSArtem Panfilov 
744d6228b7cSArtem Panfilov 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
745d6228b7cSArtem Panfilov 		return -EOPNOTSUPP;
746d6228b7cSArtem Panfilov 
747d6228b7cSArtem Panfilov 	return copy_to_user(ifr->ifr_data, config,
748d6228b7cSArtem Panfilov 			    sizeof(*config)) ? -EFAULT : 0;
749891434b1SRayagond Kokatanur }
750891434b1SRayagond Kokatanur 
75132ceabcaSGiuseppe CAVALLARO /**
752732fdf0eSGiuseppe CAVALLARO  * stmmac_init_ptp - init PTP
75332ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
754732fdf0eSGiuseppe CAVALLARO  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
75532ceabcaSGiuseppe CAVALLARO  * This is done by looking at the HW cap. register.
756732fdf0eSGiuseppe CAVALLARO  * This function also registers the ptp driver.
75732ceabcaSGiuseppe CAVALLARO  */
75892ba6888SRayagond Kokatanur static int stmmac_init_ptp(struct stmmac_priv *priv)
759891434b1SRayagond Kokatanur {
7607d9e6c5aSJose Abreu 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
7617d9e6c5aSJose Abreu 
76292ba6888SRayagond Kokatanur 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
76392ba6888SRayagond Kokatanur 		return -EOPNOTSUPP;
76492ba6888SRayagond Kokatanur 
765891434b1SRayagond Kokatanur 	priv->adv_ts = 0;
7667d9e6c5aSJose Abreu 	/* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
7677d9e6c5aSJose Abreu 	if (xmac && priv->dma_cap.atime_stamp)
768be9b3174SGiuseppe CAVALLARO 		priv->adv_ts = 1;
769be9b3174SGiuseppe CAVALLARO 	/* Dwmac 3.x core with extend_desc can support adv_ts */
770be9b3174SGiuseppe CAVALLARO 	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
771891434b1SRayagond Kokatanur 		priv->adv_ts = 1;
7727cd01399SVince Bridgers 
773be9b3174SGiuseppe CAVALLARO 	if (priv->dma_cap.time_stamp)
774be9b3174SGiuseppe CAVALLARO 		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
7757cd01399SVince Bridgers 
776be9b3174SGiuseppe CAVALLARO 	if (priv->adv_ts)
777be9b3174SGiuseppe CAVALLARO 		netdev_info(priv->dev,
778be9b3174SGiuseppe CAVALLARO 			    "IEEE 1588-2008 Advanced Timestamp supported\n");
779891434b1SRayagond Kokatanur 
780891434b1SRayagond Kokatanur 	priv->hwts_tx_en = 0;
781891434b1SRayagond Kokatanur 	priv->hwts_rx_en = 0;
78292ba6888SRayagond Kokatanur 
783c30a70d3SGiuseppe CAVALLARO 	stmmac_ptp_register(priv);
784c30a70d3SGiuseppe CAVALLARO 
785c30a70d3SGiuseppe CAVALLARO 	return 0;
78692ba6888SRayagond Kokatanur }
78792ba6888SRayagond Kokatanur 
78892ba6888SRayagond Kokatanur static void stmmac_release_ptp(struct stmmac_priv *priv)
78992ba6888SRayagond Kokatanur {
790f573c0b9Sjpinto 	if (priv->plat->clk_ptp_ref)
791f573c0b9Sjpinto 		clk_disable_unprepare(priv->plat->clk_ptp_ref);
79292ba6888SRayagond Kokatanur 	stmmac_ptp_unregister(priv);
793891434b1SRayagond Kokatanur }
794891434b1SRayagond Kokatanur 
7957ac6653aSJeff Kirsher /**
79629feff39SJoao Pinto  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
79729feff39SJoao Pinto  *  @priv: driver private structure
79829feff39SJoao Pinto  *  Description: It is used for configuring the flow control in all queues
79929feff39SJoao Pinto  */
80029feff39SJoao Pinto static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
80129feff39SJoao Pinto {
80229feff39SJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
80329feff39SJoao Pinto 
804c10d4c82SJose Abreu 	stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
80529feff39SJoao Pinto 			priv->pause, tx_cnt);
80629feff39SJoao Pinto }
80729feff39SJoao Pinto 
808eeef2f6bSJose Abreu static void stmmac_validate(struct phylink_config *config,
809eeef2f6bSJose Abreu 			    unsigned long *supported,
810eeef2f6bSJose Abreu 			    struct phylink_link_state *state)
811eeef2f6bSJose Abreu {
812eeef2f6bSJose Abreu 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
8135b0d7d7dSJose Abreu 	__ETHTOOL_DECLARE_LINK_MODE_MASK(mac_supported) = { 0, };
814eeef2f6bSJose Abreu 	__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
815eeef2f6bSJose Abreu 	int tx_cnt = priv->plat->tx_queues_to_use;
816eeef2f6bSJose Abreu 	int max_speed = priv->plat->max_speed;
817eeef2f6bSJose Abreu 
8185b0d7d7dSJose Abreu 	phylink_set(mac_supported, 10baseT_Half);
8195b0d7d7dSJose Abreu 	phylink_set(mac_supported, 10baseT_Full);
8205b0d7d7dSJose Abreu 	phylink_set(mac_supported, 100baseT_Half);
8215b0d7d7dSJose Abreu 	phylink_set(mac_supported, 100baseT_Full);
822df7699c7SJose Abreu 	phylink_set(mac_supported, 1000baseT_Half);
823df7699c7SJose Abreu 	phylink_set(mac_supported, 1000baseT_Full);
824df7699c7SJose Abreu 	phylink_set(mac_supported, 1000baseKX_Full);
8255b0d7d7dSJose Abreu 
8265b0d7d7dSJose Abreu 	phylink_set(mac_supported, Autoneg);
8275b0d7d7dSJose Abreu 	phylink_set(mac_supported, Pause);
8285b0d7d7dSJose Abreu 	phylink_set(mac_supported, Asym_Pause);
8295b0d7d7dSJose Abreu 	phylink_set_port_modes(mac_supported);
8305b0d7d7dSJose Abreu 
831eeef2f6bSJose Abreu 	/* Cut down 1G if asked to */
832eeef2f6bSJose Abreu 	if ((max_speed > 0) && (max_speed < 1000)) {
833eeef2f6bSJose Abreu 		phylink_set(mask, 1000baseT_Full);
834eeef2f6bSJose Abreu 		phylink_set(mask, 1000baseX_Full);
8355b0d7d7dSJose Abreu 	} else if (priv->plat->has_xgmac) {
836d9da2c87SJose Abreu 		if (!max_speed || (max_speed >= 2500)) {
8375b0d7d7dSJose Abreu 			phylink_set(mac_supported, 2500baseT_Full);
838d9da2c87SJose Abreu 			phylink_set(mac_supported, 2500baseX_Full);
839d9da2c87SJose Abreu 		}
840d9da2c87SJose Abreu 		if (!max_speed || (max_speed >= 5000)) {
8415b0d7d7dSJose Abreu 			phylink_set(mac_supported, 5000baseT_Full);
842d9da2c87SJose Abreu 		}
843d9da2c87SJose Abreu 		if (!max_speed || (max_speed >= 10000)) {
8445b0d7d7dSJose Abreu 			phylink_set(mac_supported, 10000baseSR_Full);
8455b0d7d7dSJose Abreu 			phylink_set(mac_supported, 10000baseLR_Full);
8465b0d7d7dSJose Abreu 			phylink_set(mac_supported, 10000baseER_Full);
8475b0d7d7dSJose Abreu 			phylink_set(mac_supported, 10000baseLRM_Full);
8485b0d7d7dSJose Abreu 			phylink_set(mac_supported, 10000baseT_Full);
8495b0d7d7dSJose Abreu 			phylink_set(mac_supported, 10000baseKX4_Full);
8505b0d7d7dSJose Abreu 			phylink_set(mac_supported, 10000baseKR_Full);
851eeef2f6bSJose Abreu 		}
852d9da2c87SJose Abreu 	}
853eeef2f6bSJose Abreu 
854eeef2f6bSJose Abreu 	/* Half-Duplex can only work with single queue */
855eeef2f6bSJose Abreu 	if (tx_cnt > 1) {
856eeef2f6bSJose Abreu 		phylink_set(mask, 10baseT_Half);
857eeef2f6bSJose Abreu 		phylink_set(mask, 100baseT_Half);
858eeef2f6bSJose Abreu 		phylink_set(mask, 1000baseT_Half);
859eeef2f6bSJose Abreu 	}
860eeef2f6bSJose Abreu 
8615b0d7d7dSJose Abreu 	bitmap_and(supported, supported, mac_supported,
8625b0d7d7dSJose Abreu 		   __ETHTOOL_LINK_MODE_MASK_NBITS);
8635b0d7d7dSJose Abreu 	bitmap_andnot(supported, supported, mask,
8645b0d7d7dSJose Abreu 		      __ETHTOOL_LINK_MODE_MASK_NBITS);
8655b0d7d7dSJose Abreu 	bitmap_and(state->advertising, state->advertising, mac_supported,
8665b0d7d7dSJose Abreu 		   __ETHTOOL_LINK_MODE_MASK_NBITS);
867eeef2f6bSJose Abreu 	bitmap_andnot(state->advertising, state->advertising, mask,
868eeef2f6bSJose Abreu 		      __ETHTOOL_LINK_MODE_MASK_NBITS);
869eeef2f6bSJose Abreu }
870eeef2f6bSJose Abreu 
871eeef2f6bSJose Abreu static int stmmac_mac_link_state(struct phylink_config *config,
872eeef2f6bSJose Abreu 				 struct phylink_link_state *state)
873eeef2f6bSJose Abreu {
874eeef2f6bSJose Abreu 	return -EOPNOTSUPP;
875eeef2f6bSJose Abreu }
876eeef2f6bSJose Abreu 
87774371272SJose Abreu static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
87874371272SJose Abreu 			      const struct phylink_link_state *state)
8799ad372fcSJose Abreu {
88074371272SJose Abreu 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
8819ad372fcSJose Abreu 	u32 ctrl;
8829ad372fcSJose Abreu 
8839ad372fcSJose Abreu 	ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
8849ad372fcSJose Abreu 	ctrl &= ~priv->hw->link.speed_mask;
8859ad372fcSJose Abreu 
8865b0d7d7dSJose Abreu 	if (state->interface == PHY_INTERFACE_MODE_USXGMII) {
88774371272SJose Abreu 		switch (state->speed) {
8885b0d7d7dSJose Abreu 		case SPEED_10000:
8895b0d7d7dSJose Abreu 			ctrl |= priv->hw->link.xgmii.speed10000;
8905b0d7d7dSJose Abreu 			break;
8915b0d7d7dSJose Abreu 		case SPEED_5000:
8925b0d7d7dSJose Abreu 			ctrl |= priv->hw->link.xgmii.speed5000;
8935b0d7d7dSJose Abreu 			break;
8945b0d7d7dSJose Abreu 		case SPEED_2500:
8955b0d7d7dSJose Abreu 			ctrl |= priv->hw->link.xgmii.speed2500;
8965b0d7d7dSJose Abreu 			break;
8975b0d7d7dSJose Abreu 		default:
8985b0d7d7dSJose Abreu 			return;
8995b0d7d7dSJose Abreu 		}
9005b0d7d7dSJose Abreu 	} else {
9015b0d7d7dSJose Abreu 		switch (state->speed) {
9025b0d7d7dSJose Abreu 		case SPEED_2500:
9035b0d7d7dSJose Abreu 			ctrl |= priv->hw->link.speed2500;
9045b0d7d7dSJose Abreu 			break;
9059ad372fcSJose Abreu 		case SPEED_1000:
9069ad372fcSJose Abreu 			ctrl |= priv->hw->link.speed1000;
9079ad372fcSJose Abreu 			break;
9089ad372fcSJose Abreu 		case SPEED_100:
9099ad372fcSJose Abreu 			ctrl |= priv->hw->link.speed100;
9109ad372fcSJose Abreu 			break;
9119ad372fcSJose Abreu 		case SPEED_10:
9129ad372fcSJose Abreu 			ctrl |= priv->hw->link.speed10;
9139ad372fcSJose Abreu 			break;
9149ad372fcSJose Abreu 		default:
91574371272SJose Abreu 			return;
9169ad372fcSJose Abreu 		}
9175b0d7d7dSJose Abreu 	}
9189ad372fcSJose Abreu 
91974371272SJose Abreu 	priv->speed = state->speed;
9209ad372fcSJose Abreu 
92174371272SJose Abreu 	if (priv->plat->fix_mac_speed)
92274371272SJose Abreu 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, state->speed);
9239ad372fcSJose Abreu 
92474371272SJose Abreu 	if (!state->duplex)
9259ad372fcSJose Abreu 		ctrl &= ~priv->hw->link.duplex;
9269ad372fcSJose Abreu 	else
9279ad372fcSJose Abreu 		ctrl |= priv->hw->link.duplex;
9289ad372fcSJose Abreu 
9299ad372fcSJose Abreu 	/* Flow Control operation */
93074371272SJose Abreu 	if (state->pause)
93174371272SJose Abreu 		stmmac_mac_flow_ctrl(priv, state->duplex);
9329ad372fcSJose Abreu 
9339ad372fcSJose Abreu 	writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
9349ad372fcSJose Abreu }
9359ad372fcSJose Abreu 
936eeef2f6bSJose Abreu static void stmmac_mac_an_restart(struct phylink_config *config)
937eeef2f6bSJose Abreu {
938eeef2f6bSJose Abreu 	/* Not Supported */
939eeef2f6bSJose Abreu }
940eeef2f6bSJose Abreu 
94174371272SJose Abreu static void stmmac_mac_link_down(struct phylink_config *config,
94274371272SJose Abreu 				 unsigned int mode, phy_interface_t interface)
9439ad372fcSJose Abreu {
94474371272SJose Abreu 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
9459ad372fcSJose Abreu 
9469ad372fcSJose Abreu 	stmmac_mac_set(priv, priv->ioaddr, false);
94774371272SJose Abreu 	priv->eee_active = false;
94874371272SJose Abreu 	stmmac_eee_init(priv);
94974371272SJose Abreu 	stmmac_set_eee_pls(priv, priv->hw, false);
9509ad372fcSJose Abreu }
9519ad372fcSJose Abreu 
95274371272SJose Abreu static void stmmac_mac_link_up(struct phylink_config *config,
95374371272SJose Abreu 			       unsigned int mode, phy_interface_t interface,
95474371272SJose Abreu 			       struct phy_device *phy)
9559ad372fcSJose Abreu {
95674371272SJose Abreu 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
9579ad372fcSJose Abreu 
9589ad372fcSJose Abreu 	stmmac_mac_set(priv, priv->ioaddr, true);
9595b111770SJose Abreu 	if (phy && priv->dma_cap.eee) {
96074371272SJose Abreu 		priv->eee_active = phy_init_eee(phy, 1) >= 0;
96174371272SJose Abreu 		priv->eee_enabled = stmmac_eee_init(priv);
96274371272SJose Abreu 		stmmac_set_eee_pls(priv, priv->hw, true);
96374371272SJose Abreu 	}
9649ad372fcSJose Abreu }
9659ad372fcSJose Abreu 
96674371272SJose Abreu static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
967eeef2f6bSJose Abreu 	.validate = stmmac_validate,
968eeef2f6bSJose Abreu 	.mac_link_state = stmmac_mac_link_state,
96974371272SJose Abreu 	.mac_config = stmmac_mac_config,
970eeef2f6bSJose Abreu 	.mac_an_restart = stmmac_mac_an_restart,
97174371272SJose Abreu 	.mac_link_down = stmmac_mac_link_down,
97274371272SJose Abreu 	.mac_link_up = stmmac_mac_link_up,
973eeef2f6bSJose Abreu };
974eeef2f6bSJose Abreu 
97529feff39SJoao Pinto /**
976732fdf0eSGiuseppe CAVALLARO  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
97732ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
97832ceabcaSGiuseppe CAVALLARO  * Description: this is to verify if the HW supports the PCS.
97932ceabcaSGiuseppe CAVALLARO  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
98032ceabcaSGiuseppe CAVALLARO  * configured for the TBI, RTBI, or SGMII PHY interface.
98132ceabcaSGiuseppe CAVALLARO  */
982e58bb43fSGiuseppe CAVALLARO static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
983e58bb43fSGiuseppe CAVALLARO {
984e58bb43fSGiuseppe CAVALLARO 	int interface = priv->plat->interface;
985e58bb43fSGiuseppe CAVALLARO 
986e58bb43fSGiuseppe CAVALLARO 	if (priv->dma_cap.pcs) {
9870d909dcdSByungho An 		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
9880d909dcdSByungho An 		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
9890d909dcdSByungho An 		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
9900d909dcdSByungho An 		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
99138ddc59dSLABBE Corentin 			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
9923fe5cadbSGiuseppe CAVALLARO 			priv->hw->pcs = STMMAC_PCS_RGMII;
9930d909dcdSByungho An 		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
99438ddc59dSLABBE Corentin 			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
9953fe5cadbSGiuseppe CAVALLARO 			priv->hw->pcs = STMMAC_PCS_SGMII;
996e58bb43fSGiuseppe CAVALLARO 		}
997e58bb43fSGiuseppe CAVALLARO 	}
998e58bb43fSGiuseppe CAVALLARO }
999e58bb43fSGiuseppe CAVALLARO 
10007ac6653aSJeff Kirsher /**
10017ac6653aSJeff Kirsher  * stmmac_init_phy - PHY initialization
10027ac6653aSJeff Kirsher  * @dev: net device structure
10037ac6653aSJeff Kirsher  * Description: it initializes the driver's PHY state, and attaches the PHY
10047ac6653aSJeff Kirsher  * to the mac driver.
10057ac6653aSJeff Kirsher  *  Return value:
10067ac6653aSJeff Kirsher  *  0 on success
10077ac6653aSJeff Kirsher  */
10087ac6653aSJeff Kirsher static int stmmac_init_phy(struct net_device *dev)
10097ac6653aSJeff Kirsher {
10107ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
101174371272SJose Abreu 	struct device_node *node;
101274371272SJose Abreu 	int ret;
10137ac6653aSJeff Kirsher 
10144838a540SJose Abreu 	node = priv->plat->phylink_node;
101574371272SJose Abreu 
101642e87024SJose Abreu 	if (node)
101774371272SJose Abreu 		ret = phylink_of_phy_connect(priv->phylink, node, 0);
101842e87024SJose Abreu 
101942e87024SJose Abreu 	/* Some DT bindings do not set-up the PHY handle. Let's try to
102042e87024SJose Abreu 	 * manually parse it
102142e87024SJose Abreu 	 */
102242e87024SJose Abreu 	if (!node || ret) {
102374371272SJose Abreu 		int addr = priv->plat->phy_addr;
102474371272SJose Abreu 		struct phy_device *phydev;
1025f142af2eSSrinivas Kandagatla 
102674371272SJose Abreu 		phydev = mdiobus_get_phy(priv->mii, addr);
102774371272SJose Abreu 		if (!phydev) {
102874371272SJose Abreu 			netdev_err(priv->dev, "no phy at addr %d\n", addr);
10297ac6653aSJeff Kirsher 			return -ENODEV;
10307ac6653aSJeff Kirsher 		}
10318e99fc5fSGiuseppe Cavallaro 
103274371272SJose Abreu 		ret = phylink_connect_phy(priv->phylink, phydev);
103374371272SJose Abreu 	}
1034c51e424dSFlorian Fainelli 
103574371272SJose Abreu 	return ret;
103674371272SJose Abreu }
103774371272SJose Abreu 
103874371272SJose Abreu static int stmmac_phy_setup(struct stmmac_priv *priv)
103974371272SJose Abreu {
1040c63d1e5cSArnd Bergmann 	struct fwnode_handle *fwnode = of_fwnode_handle(priv->plat->phylink_node);
10410060c878SAlexandru Ardelean 	int mode = priv->plat->phy_interface;
104274371272SJose Abreu 	struct phylink *phylink;
104374371272SJose Abreu 
104474371272SJose Abreu 	priv->phylink_config.dev = &priv->dev->dev;
104574371272SJose Abreu 	priv->phylink_config.type = PHYLINK_NETDEV;
104674371272SJose Abreu 
1047c63d1e5cSArnd Bergmann 	phylink = phylink_create(&priv->phylink_config, fwnode,
104874371272SJose Abreu 				 mode, &stmmac_phylink_mac_ops);
104974371272SJose Abreu 	if (IS_ERR(phylink))
105074371272SJose Abreu 		return PTR_ERR(phylink);
105174371272SJose Abreu 
105274371272SJose Abreu 	priv->phylink = phylink;
10537ac6653aSJeff Kirsher 	return 0;
10547ac6653aSJeff Kirsher }
10557ac6653aSJeff Kirsher 
105671fedb01SJoao Pinto static void stmmac_display_rx_rings(struct stmmac_priv *priv)
1057c24602efSGiuseppe CAVALLARO {
105854139cf3SJoao Pinto 	u32 rx_cnt = priv->plat->rx_queues_to_use;
105971fedb01SJoao Pinto 	void *head_rx;
106054139cf3SJoao Pinto 	u32 queue;
106154139cf3SJoao Pinto 
106254139cf3SJoao Pinto 	/* Display RX rings */
106354139cf3SJoao Pinto 	for (queue = 0; queue < rx_cnt; queue++) {
106454139cf3SJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
106554139cf3SJoao Pinto 
106654139cf3SJoao Pinto 		pr_info("\tRX Queue %u rings\n", queue);
1067d0225e7dSAlexandre TORGUE 
106871fedb01SJoao Pinto 		if (priv->extend_desc)
106954139cf3SJoao Pinto 			head_rx = (void *)rx_q->dma_erx;
107071fedb01SJoao Pinto 		else
107154139cf3SJoao Pinto 			head_rx = (void *)rx_q->dma_rx;
107271fedb01SJoao Pinto 
107371fedb01SJoao Pinto 		/* Display RX ring */
107442de047dSJose Abreu 		stmmac_display_ring(priv, head_rx, DMA_RX_SIZE, true);
10755bacd778SLABBE Corentin 	}
107654139cf3SJoao Pinto }
1077d0225e7dSAlexandre TORGUE 
107871fedb01SJoao Pinto static void stmmac_display_tx_rings(struct stmmac_priv *priv)
107971fedb01SJoao Pinto {
1080ce736788SJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
108171fedb01SJoao Pinto 	void *head_tx;
1082ce736788SJoao Pinto 	u32 queue;
1083ce736788SJoao Pinto 
1084ce736788SJoao Pinto 	/* Display TX rings */
1085ce736788SJoao Pinto 	for (queue = 0; queue < tx_cnt; queue++) {
1086ce736788SJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1087ce736788SJoao Pinto 
1088ce736788SJoao Pinto 		pr_info("\tTX Queue %d rings\n", queue);
108971fedb01SJoao Pinto 
109071fedb01SJoao Pinto 		if (priv->extend_desc)
1091ce736788SJoao Pinto 			head_tx = (void *)tx_q->dma_etx;
109271fedb01SJoao Pinto 		else
1093ce736788SJoao Pinto 			head_tx = (void *)tx_q->dma_tx;
109471fedb01SJoao Pinto 
109542de047dSJose Abreu 		stmmac_display_ring(priv, head_tx, DMA_TX_SIZE, false);
1096c24602efSGiuseppe CAVALLARO 	}
1097ce736788SJoao Pinto }
1098c24602efSGiuseppe CAVALLARO 
109971fedb01SJoao Pinto static void stmmac_display_rings(struct stmmac_priv *priv)
110071fedb01SJoao Pinto {
110171fedb01SJoao Pinto 	/* Display RX ring */
110271fedb01SJoao Pinto 	stmmac_display_rx_rings(priv);
110371fedb01SJoao Pinto 
110471fedb01SJoao Pinto 	/* Display TX ring */
110571fedb01SJoao Pinto 	stmmac_display_tx_rings(priv);
110671fedb01SJoao Pinto }
110771fedb01SJoao Pinto 
1108286a8372SGiuseppe CAVALLARO static int stmmac_set_bfsize(int mtu, int bufsize)
1109286a8372SGiuseppe CAVALLARO {
1110286a8372SGiuseppe CAVALLARO 	int ret = bufsize;
1111286a8372SGiuseppe CAVALLARO 
1112286a8372SGiuseppe CAVALLARO 	if (mtu >= BUF_SIZE_4KiB)
1113286a8372SGiuseppe CAVALLARO 		ret = BUF_SIZE_8KiB;
1114286a8372SGiuseppe CAVALLARO 	else if (mtu >= BUF_SIZE_2KiB)
1115286a8372SGiuseppe CAVALLARO 		ret = BUF_SIZE_4KiB;
1116d916701cSGiuseppe CAVALLARO 	else if (mtu > DEFAULT_BUFSIZE)
1117286a8372SGiuseppe CAVALLARO 		ret = BUF_SIZE_2KiB;
1118286a8372SGiuseppe CAVALLARO 	else
1119d916701cSGiuseppe CAVALLARO 		ret = DEFAULT_BUFSIZE;
1120286a8372SGiuseppe CAVALLARO 
1121286a8372SGiuseppe CAVALLARO 	return ret;
1122286a8372SGiuseppe CAVALLARO }
1123286a8372SGiuseppe CAVALLARO 
112432ceabcaSGiuseppe CAVALLARO /**
112571fedb01SJoao Pinto  * stmmac_clear_rx_descriptors - clear RX descriptors
112632ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
112754139cf3SJoao Pinto  * @queue: RX queue index
112871fedb01SJoao Pinto  * Description: this function is called to clear the RX descriptors
112932ceabcaSGiuseppe CAVALLARO  * in case of both basic and extended descriptors are used.
113032ceabcaSGiuseppe CAVALLARO  */
113154139cf3SJoao Pinto static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
1132c24602efSGiuseppe CAVALLARO {
113354139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
11345bacd778SLABBE Corentin 	int i;
1135c24602efSGiuseppe CAVALLARO 
113671fedb01SJoao Pinto 	/* Clear the RX descriptors */
11375bacd778SLABBE Corentin 	for (i = 0; i < DMA_RX_SIZE; i++)
11385bacd778SLABBE Corentin 		if (priv->extend_desc)
113942de047dSJose Abreu 			stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
11405bacd778SLABBE Corentin 					priv->use_riwt, priv->mode,
1141583e6361SAaro Koskinen 					(i == DMA_RX_SIZE - 1),
1142583e6361SAaro Koskinen 					priv->dma_buf_sz);
11435bacd778SLABBE Corentin 		else
114442de047dSJose Abreu 			stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
11455bacd778SLABBE Corentin 					priv->use_riwt, priv->mode,
1146583e6361SAaro Koskinen 					(i == DMA_RX_SIZE - 1),
1147583e6361SAaro Koskinen 					priv->dma_buf_sz);
114871fedb01SJoao Pinto }
114971fedb01SJoao Pinto 
115071fedb01SJoao Pinto /**
115171fedb01SJoao Pinto  * stmmac_clear_tx_descriptors - clear tx descriptors
115271fedb01SJoao Pinto  * @priv: driver private structure
1153ce736788SJoao Pinto  * @queue: TX queue index.
115471fedb01SJoao Pinto  * Description: this function is called to clear the TX descriptors
115571fedb01SJoao Pinto  * in case of both basic and extended descriptors are used.
115671fedb01SJoao Pinto  */
1157ce736788SJoao Pinto static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
115871fedb01SJoao Pinto {
1159ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
116071fedb01SJoao Pinto 	int i;
116171fedb01SJoao Pinto 
116271fedb01SJoao Pinto 	/* Clear the TX descriptors */
11635bacd778SLABBE Corentin 	for (i = 0; i < DMA_TX_SIZE; i++)
11645bacd778SLABBE Corentin 		if (priv->extend_desc)
116542de047dSJose Abreu 			stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
116642de047dSJose Abreu 					priv->mode, (i == DMA_TX_SIZE - 1));
11675bacd778SLABBE Corentin 		else
116842de047dSJose Abreu 			stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
116942de047dSJose Abreu 					priv->mode, (i == DMA_TX_SIZE - 1));
1170c24602efSGiuseppe CAVALLARO }
1171c24602efSGiuseppe CAVALLARO 
1172732fdf0eSGiuseppe CAVALLARO /**
117371fedb01SJoao Pinto  * stmmac_clear_descriptors - clear descriptors
117471fedb01SJoao Pinto  * @priv: driver private structure
117571fedb01SJoao Pinto  * Description: this function is called to clear the TX and RX descriptors
117671fedb01SJoao Pinto  * in case of both basic and extended descriptors are used.
117771fedb01SJoao Pinto  */
117871fedb01SJoao Pinto static void stmmac_clear_descriptors(struct stmmac_priv *priv)
117971fedb01SJoao Pinto {
118054139cf3SJoao Pinto 	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1181ce736788SJoao Pinto 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
118254139cf3SJoao Pinto 	u32 queue;
118354139cf3SJoao Pinto 
118471fedb01SJoao Pinto 	/* Clear the RX descriptors */
118554139cf3SJoao Pinto 	for (queue = 0; queue < rx_queue_cnt; queue++)
118654139cf3SJoao Pinto 		stmmac_clear_rx_descriptors(priv, queue);
118771fedb01SJoao Pinto 
118871fedb01SJoao Pinto 	/* Clear the TX descriptors */
1189ce736788SJoao Pinto 	for (queue = 0; queue < tx_queue_cnt; queue++)
1190ce736788SJoao Pinto 		stmmac_clear_tx_descriptors(priv, queue);
119171fedb01SJoao Pinto }
119271fedb01SJoao Pinto 
119371fedb01SJoao Pinto /**
1194732fdf0eSGiuseppe CAVALLARO  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1195732fdf0eSGiuseppe CAVALLARO  * @priv: driver private structure
1196732fdf0eSGiuseppe CAVALLARO  * @p: descriptor pointer
1197732fdf0eSGiuseppe CAVALLARO  * @i: descriptor index
119854139cf3SJoao Pinto  * @flags: gfp flag
119954139cf3SJoao Pinto  * @queue: RX queue index
1200732fdf0eSGiuseppe CAVALLARO  * Description: this function is called to allocate a receive buffer, perform
1201732fdf0eSGiuseppe CAVALLARO  * the DMA mapping and init the descriptor.
1202732fdf0eSGiuseppe CAVALLARO  */
1203c24602efSGiuseppe CAVALLARO static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
120454139cf3SJoao Pinto 				  int i, gfp_t flags, u32 queue)
1205c24602efSGiuseppe CAVALLARO {
120654139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
12072af6106aSJose Abreu 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1208c24602efSGiuseppe CAVALLARO 
12092af6106aSJose Abreu 	buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
12102af6106aSJose Abreu 	if (!buf->page)
121156329137SBartlomiej Zolnierkiewicz 		return -ENOMEM;
1212c24602efSGiuseppe CAVALLARO 
121367afd6d1SJose Abreu 	if (priv->sph) {
121467afd6d1SJose Abreu 		buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
121567afd6d1SJose Abreu 		if (!buf->sec_page)
121667afd6d1SJose Abreu 			return -ENOMEM;
121767afd6d1SJose Abreu 
121867afd6d1SJose Abreu 		buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
121967afd6d1SJose Abreu 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr);
122067afd6d1SJose Abreu 	} else {
122167afd6d1SJose Abreu 		buf->sec_page = NULL;
122267afd6d1SJose Abreu 	}
122367afd6d1SJose Abreu 
12242af6106aSJose Abreu 	buf->addr = page_pool_get_dma_addr(buf->page);
12252af6106aSJose Abreu 	stmmac_set_desc_addr(priv, p, buf->addr);
12262c520b1cSJose Abreu 	if (priv->dma_buf_sz == BUF_SIZE_16KiB)
12272c520b1cSJose Abreu 		stmmac_init_desc3(priv, p);
1228c24602efSGiuseppe CAVALLARO 
1229c24602efSGiuseppe CAVALLARO 	return 0;
1230c24602efSGiuseppe CAVALLARO }
1231c24602efSGiuseppe CAVALLARO 
123271fedb01SJoao Pinto /**
123371fedb01SJoao Pinto  * stmmac_free_rx_buffer - free RX dma buffers
123471fedb01SJoao Pinto  * @priv: private structure
123554139cf3SJoao Pinto  * @queue: RX queue index
123671fedb01SJoao Pinto  * @i: buffer index.
123771fedb01SJoao Pinto  */
123854139cf3SJoao Pinto static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
123956329137SBartlomiej Zolnierkiewicz {
124054139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
12412af6106aSJose Abreu 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
124254139cf3SJoao Pinto 
12432af6106aSJose Abreu 	if (buf->page)
12442af6106aSJose Abreu 		page_pool_put_page(rx_q->page_pool, buf->page, false);
12452af6106aSJose Abreu 	buf->page = NULL;
124667afd6d1SJose Abreu 
124767afd6d1SJose Abreu 	if (buf->sec_page)
124867afd6d1SJose Abreu 		page_pool_put_page(rx_q->page_pool, buf->sec_page, false);
124967afd6d1SJose Abreu 	buf->sec_page = NULL;
125056329137SBartlomiej Zolnierkiewicz }
125156329137SBartlomiej Zolnierkiewicz 
12527ac6653aSJeff Kirsher /**
125371fedb01SJoao Pinto  * stmmac_free_tx_buffer - free RX dma buffers
125471fedb01SJoao Pinto  * @priv: private structure
1255ce736788SJoao Pinto  * @queue: RX queue index
125671fedb01SJoao Pinto  * @i: buffer index.
125771fedb01SJoao Pinto  */
1258ce736788SJoao Pinto static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
125971fedb01SJoao Pinto {
1260ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1261ce736788SJoao Pinto 
1262ce736788SJoao Pinto 	if (tx_q->tx_skbuff_dma[i].buf) {
1263ce736788SJoao Pinto 		if (tx_q->tx_skbuff_dma[i].map_as_page)
126471fedb01SJoao Pinto 			dma_unmap_page(priv->device,
1265ce736788SJoao Pinto 				       tx_q->tx_skbuff_dma[i].buf,
1266ce736788SJoao Pinto 				       tx_q->tx_skbuff_dma[i].len,
126771fedb01SJoao Pinto 				       DMA_TO_DEVICE);
126871fedb01SJoao Pinto 		else
126971fedb01SJoao Pinto 			dma_unmap_single(priv->device,
1270ce736788SJoao Pinto 					 tx_q->tx_skbuff_dma[i].buf,
1271ce736788SJoao Pinto 					 tx_q->tx_skbuff_dma[i].len,
127271fedb01SJoao Pinto 					 DMA_TO_DEVICE);
127371fedb01SJoao Pinto 	}
127471fedb01SJoao Pinto 
1275ce736788SJoao Pinto 	if (tx_q->tx_skbuff[i]) {
1276ce736788SJoao Pinto 		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1277ce736788SJoao Pinto 		tx_q->tx_skbuff[i] = NULL;
1278ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[i].buf = 0;
1279ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[i].map_as_page = false;
128071fedb01SJoao Pinto 	}
128171fedb01SJoao Pinto }
128271fedb01SJoao Pinto 
128371fedb01SJoao Pinto /**
128471fedb01SJoao Pinto  * init_dma_rx_desc_rings - init the RX descriptor rings
12857ac6653aSJeff Kirsher  * @dev: net device structure
12865bacd778SLABBE Corentin  * @flags: gfp flag.
128771fedb01SJoao Pinto  * Description: this function initializes the DMA RX descriptors
12885bacd778SLABBE Corentin  * and allocates the socket buffers. It supports the chained and ring
1289286a8372SGiuseppe CAVALLARO  * modes.
12907ac6653aSJeff Kirsher  */
129171fedb01SJoao Pinto static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
12927ac6653aSJeff Kirsher {
12937ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
129454139cf3SJoao Pinto 	u32 rx_count = priv->plat->rx_queues_to_use;
12955bacd778SLABBE Corentin 	int ret = -ENOMEM;
12962c520b1cSJose Abreu 	int bfsize = 0;
12971d3028f4SColin Ian King 	int queue;
129854139cf3SJoao Pinto 	int i;
12997ac6653aSJeff Kirsher 
13002c520b1cSJose Abreu 	bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
13012c520b1cSJose Abreu 	if (bfsize < 0)
13022c520b1cSJose Abreu 		bfsize = 0;
13035bacd778SLABBE Corentin 
13045bacd778SLABBE Corentin 	if (bfsize < BUF_SIZE_16KiB)
13055bacd778SLABBE Corentin 		bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
13065bacd778SLABBE Corentin 
13075bacd778SLABBE Corentin 	priv->dma_buf_sz = bfsize;
13082618abb7SVince Bridgers 
130954139cf3SJoao Pinto 	/* RX INITIALIZATION */
13105bacd778SLABBE Corentin 	netif_dbg(priv, probe, priv->dev,
13115bacd778SLABBE Corentin 		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
13125bacd778SLABBE Corentin 
131354139cf3SJoao Pinto 	for (queue = 0; queue < rx_count; queue++) {
131454139cf3SJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
131554139cf3SJoao Pinto 
131654139cf3SJoao Pinto 		netif_dbg(priv, probe, priv->dev,
131754139cf3SJoao Pinto 			  "(%s) dma_rx_phy=0x%08x\n", __func__,
131854139cf3SJoao Pinto 			  (u32)rx_q->dma_rx_phy);
131954139cf3SJoao Pinto 
1320cbcf0999SJose Abreu 		stmmac_clear_rx_descriptors(priv, queue);
1321cbcf0999SJose Abreu 
13225bacd778SLABBE Corentin 		for (i = 0; i < DMA_RX_SIZE; i++) {
13235bacd778SLABBE Corentin 			struct dma_desc *p;
13245bacd778SLABBE Corentin 
132554139cf3SJoao Pinto 			if (priv->extend_desc)
132654139cf3SJoao Pinto 				p = &((rx_q->dma_erx + i)->basic);
132754139cf3SJoao Pinto 			else
132854139cf3SJoao Pinto 				p = rx_q->dma_rx + i;
132954139cf3SJoao Pinto 
133054139cf3SJoao Pinto 			ret = stmmac_init_rx_buffers(priv, p, i, flags,
133154139cf3SJoao Pinto 						     queue);
13325bacd778SLABBE Corentin 			if (ret)
13335bacd778SLABBE Corentin 				goto err_init_rx_buffers;
13345bacd778SLABBE Corentin 		}
133554139cf3SJoao Pinto 
133654139cf3SJoao Pinto 		rx_q->cur_rx = 0;
133754139cf3SJoao Pinto 		rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
133854139cf3SJoao Pinto 
1339c24602efSGiuseppe CAVALLARO 		/* Setup the chained descriptor addresses */
1340c24602efSGiuseppe CAVALLARO 		if (priv->mode == STMMAC_CHAIN_MODE) {
134171fedb01SJoao Pinto 			if (priv->extend_desc)
13422c520b1cSJose Abreu 				stmmac_mode_init(priv, rx_q->dma_erx,
13432c520b1cSJose Abreu 						rx_q->dma_rx_phy, DMA_RX_SIZE, 1);
134471fedb01SJoao Pinto 			else
13452c520b1cSJose Abreu 				stmmac_mode_init(priv, rx_q->dma_rx,
13462c520b1cSJose Abreu 						rx_q->dma_rx_phy, DMA_RX_SIZE, 0);
134771fedb01SJoao Pinto 		}
134854139cf3SJoao Pinto 	}
134954139cf3SJoao Pinto 
135054139cf3SJoao Pinto 	buf_sz = bfsize;
135171fedb01SJoao Pinto 
135271fedb01SJoao Pinto 	return 0;
135354139cf3SJoao Pinto 
135471fedb01SJoao Pinto err_init_rx_buffers:
135554139cf3SJoao Pinto 	while (queue >= 0) {
135671fedb01SJoao Pinto 		while (--i >= 0)
135754139cf3SJoao Pinto 			stmmac_free_rx_buffer(priv, queue, i);
135854139cf3SJoao Pinto 
135954139cf3SJoao Pinto 		if (queue == 0)
136054139cf3SJoao Pinto 			break;
136154139cf3SJoao Pinto 
136254139cf3SJoao Pinto 		i = DMA_RX_SIZE;
136354139cf3SJoao Pinto 		queue--;
136454139cf3SJoao Pinto 	}
136554139cf3SJoao Pinto 
136671fedb01SJoao Pinto 	return ret;
136771fedb01SJoao Pinto }
136871fedb01SJoao Pinto 
136971fedb01SJoao Pinto /**
137071fedb01SJoao Pinto  * init_dma_tx_desc_rings - init the TX descriptor rings
137171fedb01SJoao Pinto  * @dev: net device structure.
137271fedb01SJoao Pinto  * Description: this function initializes the DMA TX descriptors
137371fedb01SJoao Pinto  * and allocates the socket buffers. It supports the chained and ring
137471fedb01SJoao Pinto  * modes.
137571fedb01SJoao Pinto  */
137671fedb01SJoao Pinto static int init_dma_tx_desc_rings(struct net_device *dev)
137771fedb01SJoao Pinto {
137871fedb01SJoao Pinto 	struct stmmac_priv *priv = netdev_priv(dev);
1379ce736788SJoao Pinto 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1380ce736788SJoao Pinto 	u32 queue;
138171fedb01SJoao Pinto 	int i;
138271fedb01SJoao Pinto 
1383ce736788SJoao Pinto 	for (queue = 0; queue < tx_queue_cnt; queue++) {
1384ce736788SJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1385ce736788SJoao Pinto 
138671fedb01SJoao Pinto 		netif_dbg(priv, probe, priv->dev,
1387ce736788SJoao Pinto 			  "(%s) dma_tx_phy=0x%08x\n", __func__,
1388ce736788SJoao Pinto 			 (u32)tx_q->dma_tx_phy);
138971fedb01SJoao Pinto 
139071fedb01SJoao Pinto 		/* Setup the chained descriptor addresses */
139171fedb01SJoao Pinto 		if (priv->mode == STMMAC_CHAIN_MODE) {
139271fedb01SJoao Pinto 			if (priv->extend_desc)
13932c520b1cSJose Abreu 				stmmac_mode_init(priv, tx_q->dma_etx,
13942c520b1cSJose Abreu 						tx_q->dma_tx_phy, DMA_TX_SIZE, 1);
139571fedb01SJoao Pinto 			else
13962c520b1cSJose Abreu 				stmmac_mode_init(priv, tx_q->dma_tx,
13972c520b1cSJose Abreu 						tx_q->dma_tx_phy, DMA_TX_SIZE, 0);
1398c24602efSGiuseppe CAVALLARO 		}
1399286a8372SGiuseppe CAVALLARO 
1400e3ad57c9SGiuseppe Cavallaro 		for (i = 0; i < DMA_TX_SIZE; i++) {
1401c24602efSGiuseppe CAVALLARO 			struct dma_desc *p;
1402c24602efSGiuseppe CAVALLARO 			if (priv->extend_desc)
1403ce736788SJoao Pinto 				p = &((tx_q->dma_etx + i)->basic);
1404c24602efSGiuseppe CAVALLARO 			else
1405ce736788SJoao Pinto 				p = tx_q->dma_tx + i;
1406f748be53SAlexandre TORGUE 
140744c67f85SJose Abreu 			stmmac_clear_desc(priv, p);
1408f748be53SAlexandre TORGUE 
1409ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[i].buf = 0;
1410ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[i].map_as_page = false;
1411ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[i].len = 0;
1412ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[i].last_segment = false;
1413ce736788SJoao Pinto 			tx_q->tx_skbuff[i] = NULL;
14144a7d666aSGiuseppe CAVALLARO 		}
1415c24602efSGiuseppe CAVALLARO 
1416ce736788SJoao Pinto 		tx_q->dirty_tx = 0;
1417ce736788SJoao Pinto 		tx_q->cur_tx = 0;
14188d212a9eSNiklas Cassel 		tx_q->mss = 0;
1419ce736788SJoao Pinto 
1420c22a3f48SJoao Pinto 		netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
1421c22a3f48SJoao Pinto 	}
14227ac6653aSJeff Kirsher 
142371fedb01SJoao Pinto 	return 0;
142471fedb01SJoao Pinto }
142571fedb01SJoao Pinto 
142671fedb01SJoao Pinto /**
142771fedb01SJoao Pinto  * init_dma_desc_rings - init the RX/TX descriptor rings
142871fedb01SJoao Pinto  * @dev: net device structure
142971fedb01SJoao Pinto  * @flags: gfp flag.
143071fedb01SJoao Pinto  * Description: this function initializes the DMA RX/TX descriptors
143171fedb01SJoao Pinto  * and allocates the socket buffers. It supports the chained and ring
143271fedb01SJoao Pinto  * modes.
143371fedb01SJoao Pinto  */
143471fedb01SJoao Pinto static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
143571fedb01SJoao Pinto {
143671fedb01SJoao Pinto 	struct stmmac_priv *priv = netdev_priv(dev);
143771fedb01SJoao Pinto 	int ret;
143871fedb01SJoao Pinto 
143971fedb01SJoao Pinto 	ret = init_dma_rx_desc_rings(dev, flags);
144071fedb01SJoao Pinto 	if (ret)
144171fedb01SJoao Pinto 		return ret;
144271fedb01SJoao Pinto 
144371fedb01SJoao Pinto 	ret = init_dma_tx_desc_rings(dev);
144471fedb01SJoao Pinto 
14455bacd778SLABBE Corentin 	stmmac_clear_descriptors(priv);
14467ac6653aSJeff Kirsher 
1447c24602efSGiuseppe CAVALLARO 	if (netif_msg_hw(priv))
1448c24602efSGiuseppe CAVALLARO 		stmmac_display_rings(priv);
144956329137SBartlomiej Zolnierkiewicz 
145056329137SBartlomiej Zolnierkiewicz 	return ret;
14517ac6653aSJeff Kirsher }
14527ac6653aSJeff Kirsher 
145371fedb01SJoao Pinto /**
145471fedb01SJoao Pinto  * dma_free_rx_skbufs - free RX dma buffers
145571fedb01SJoao Pinto  * @priv: private structure
145654139cf3SJoao Pinto  * @queue: RX queue index
145771fedb01SJoao Pinto  */
145854139cf3SJoao Pinto static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
14597ac6653aSJeff Kirsher {
14607ac6653aSJeff Kirsher 	int i;
14617ac6653aSJeff Kirsher 
1462e3ad57c9SGiuseppe Cavallaro 	for (i = 0; i < DMA_RX_SIZE; i++)
146354139cf3SJoao Pinto 		stmmac_free_rx_buffer(priv, queue, i);
14647ac6653aSJeff Kirsher }
14657ac6653aSJeff Kirsher 
146671fedb01SJoao Pinto /**
146771fedb01SJoao Pinto  * dma_free_tx_skbufs - free TX dma buffers
146871fedb01SJoao Pinto  * @priv: private structure
1469ce736788SJoao Pinto  * @queue: TX queue index
147071fedb01SJoao Pinto  */
1471ce736788SJoao Pinto static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
14727ac6653aSJeff Kirsher {
14737ac6653aSJeff Kirsher 	int i;
14747ac6653aSJeff Kirsher 
147571fedb01SJoao Pinto 	for (i = 0; i < DMA_TX_SIZE; i++)
1476ce736788SJoao Pinto 		stmmac_free_tx_buffer(priv, queue, i);
14777ac6653aSJeff Kirsher }
14787ac6653aSJeff Kirsher 
1479732fdf0eSGiuseppe CAVALLARO /**
148054139cf3SJoao Pinto  * free_dma_rx_desc_resources - free RX dma desc resources
148154139cf3SJoao Pinto  * @priv: private structure
148254139cf3SJoao Pinto  */
148354139cf3SJoao Pinto static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
148454139cf3SJoao Pinto {
148554139cf3SJoao Pinto 	u32 rx_count = priv->plat->rx_queues_to_use;
148654139cf3SJoao Pinto 	u32 queue;
148754139cf3SJoao Pinto 
148854139cf3SJoao Pinto 	/* Free RX queue resources */
148954139cf3SJoao Pinto 	for (queue = 0; queue < rx_count; queue++) {
149054139cf3SJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
149154139cf3SJoao Pinto 
149254139cf3SJoao Pinto 		/* Release the DMA RX socket buffers */
149354139cf3SJoao Pinto 		dma_free_rx_skbufs(priv, queue);
149454139cf3SJoao Pinto 
149554139cf3SJoao Pinto 		/* Free DMA regions of consistent memory previously allocated */
149654139cf3SJoao Pinto 		if (!priv->extend_desc)
149754139cf3SJoao Pinto 			dma_free_coherent(priv->device,
149854139cf3SJoao Pinto 					  DMA_RX_SIZE * sizeof(struct dma_desc),
149954139cf3SJoao Pinto 					  rx_q->dma_rx, rx_q->dma_rx_phy);
150054139cf3SJoao Pinto 		else
150154139cf3SJoao Pinto 			dma_free_coherent(priv->device, DMA_RX_SIZE *
150254139cf3SJoao Pinto 					  sizeof(struct dma_extended_desc),
150354139cf3SJoao Pinto 					  rx_q->dma_erx, rx_q->dma_rx_phy);
150454139cf3SJoao Pinto 
15052af6106aSJose Abreu 		kfree(rx_q->buf_pool);
15062af6106aSJose Abreu 		if (rx_q->page_pool) {
15072af6106aSJose Abreu 			page_pool_request_shutdown(rx_q->page_pool);
15082af6106aSJose Abreu 			page_pool_destroy(rx_q->page_pool);
15092af6106aSJose Abreu 		}
151054139cf3SJoao Pinto 	}
151154139cf3SJoao Pinto }
151254139cf3SJoao Pinto 
151354139cf3SJoao Pinto /**
1514ce736788SJoao Pinto  * free_dma_tx_desc_resources - free TX dma desc resources
1515ce736788SJoao Pinto  * @priv: private structure
1516ce736788SJoao Pinto  */
1517ce736788SJoao Pinto static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
1518ce736788SJoao Pinto {
1519ce736788SJoao Pinto 	u32 tx_count = priv->plat->tx_queues_to_use;
152062242260SChristophe Jaillet 	u32 queue;
1521ce736788SJoao Pinto 
1522ce736788SJoao Pinto 	/* Free TX queue resources */
1523ce736788SJoao Pinto 	for (queue = 0; queue < tx_count; queue++) {
1524ce736788SJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1525ce736788SJoao Pinto 
1526ce736788SJoao Pinto 		/* Release the DMA TX socket buffers */
1527ce736788SJoao Pinto 		dma_free_tx_skbufs(priv, queue);
1528ce736788SJoao Pinto 
1529ce736788SJoao Pinto 		/* Free DMA regions of consistent memory previously allocated */
1530ce736788SJoao Pinto 		if (!priv->extend_desc)
1531ce736788SJoao Pinto 			dma_free_coherent(priv->device,
1532ce736788SJoao Pinto 					  DMA_TX_SIZE * sizeof(struct dma_desc),
1533ce736788SJoao Pinto 					  tx_q->dma_tx, tx_q->dma_tx_phy);
1534ce736788SJoao Pinto 		else
1535ce736788SJoao Pinto 			dma_free_coherent(priv->device, DMA_TX_SIZE *
1536ce736788SJoao Pinto 					  sizeof(struct dma_extended_desc),
1537ce736788SJoao Pinto 					  tx_q->dma_etx, tx_q->dma_tx_phy);
1538ce736788SJoao Pinto 
1539ce736788SJoao Pinto 		kfree(tx_q->tx_skbuff_dma);
1540ce736788SJoao Pinto 		kfree(tx_q->tx_skbuff);
1541ce736788SJoao Pinto 	}
1542ce736788SJoao Pinto }
1543ce736788SJoao Pinto 
1544ce736788SJoao Pinto /**
154571fedb01SJoao Pinto  * alloc_dma_rx_desc_resources - alloc RX resources.
1546732fdf0eSGiuseppe CAVALLARO  * @priv: private structure
1547732fdf0eSGiuseppe CAVALLARO  * Description: according to which descriptor can be used (extend or basic)
1548732fdf0eSGiuseppe CAVALLARO  * this function allocates the resources for TX and RX paths. In case of
1549732fdf0eSGiuseppe CAVALLARO  * reception, for example, it pre-allocated the RX socket buffer in order to
1550732fdf0eSGiuseppe CAVALLARO  * allow zero-copy mechanism.
1551732fdf0eSGiuseppe CAVALLARO  */
155271fedb01SJoao Pinto static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
155309f8d696SSrinivas Kandagatla {
155454139cf3SJoao Pinto 	u32 rx_count = priv->plat->rx_queues_to_use;
15555bacd778SLABBE Corentin 	int ret = -ENOMEM;
155654139cf3SJoao Pinto 	u32 queue;
155709f8d696SSrinivas Kandagatla 
155854139cf3SJoao Pinto 	/* RX queues buffers and DMA */
155954139cf3SJoao Pinto 	for (queue = 0; queue < rx_count; queue++) {
156054139cf3SJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
15612af6106aSJose Abreu 		struct page_pool_params pp_params = { 0 };
15624f28bd95SThierry Reding 		unsigned int num_pages;
156354139cf3SJoao Pinto 
156454139cf3SJoao Pinto 		rx_q->queue_index = queue;
156554139cf3SJoao Pinto 		rx_q->priv_data = priv;
156654139cf3SJoao Pinto 
15672af6106aSJose Abreu 		pp_params.flags = PP_FLAG_DMA_MAP;
15682af6106aSJose Abreu 		pp_params.pool_size = DMA_RX_SIZE;
15694f28bd95SThierry Reding 		num_pages = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE);
15704f28bd95SThierry Reding 		pp_params.order = ilog2(num_pages);
15712af6106aSJose Abreu 		pp_params.nid = dev_to_node(priv->device);
15722af6106aSJose Abreu 		pp_params.dev = priv->device;
15732af6106aSJose Abreu 		pp_params.dma_dir = DMA_FROM_DEVICE;
15745bacd778SLABBE Corentin 
15752af6106aSJose Abreu 		rx_q->page_pool = page_pool_create(&pp_params);
15762af6106aSJose Abreu 		if (IS_ERR(rx_q->page_pool)) {
15772af6106aSJose Abreu 			ret = PTR_ERR(rx_q->page_pool);
15782af6106aSJose Abreu 			rx_q->page_pool = NULL;
15792af6106aSJose Abreu 			goto err_dma;
15802af6106aSJose Abreu 		}
15812af6106aSJose Abreu 
1582ec5e5ce1SJose Abreu 		rx_q->buf_pool = kcalloc(DMA_RX_SIZE, sizeof(*rx_q->buf_pool),
15835bacd778SLABBE Corentin 					 GFP_KERNEL);
15842af6106aSJose Abreu 		if (!rx_q->buf_pool)
158554139cf3SJoao Pinto 			goto err_dma;
15865bacd778SLABBE Corentin 
15875bacd778SLABBE Corentin 		if (priv->extend_desc) {
1588750afb08SLuis Chamberlain 			rx_q->dma_erx = dma_alloc_coherent(priv->device,
1589750afb08SLuis Chamberlain 							   DMA_RX_SIZE * sizeof(struct dma_extended_desc),
159054139cf3SJoao Pinto 							   &rx_q->dma_rx_phy,
15915bacd778SLABBE Corentin 							   GFP_KERNEL);
159254139cf3SJoao Pinto 			if (!rx_q->dma_erx)
15935bacd778SLABBE Corentin 				goto err_dma;
15945bacd778SLABBE Corentin 
159571fedb01SJoao Pinto 		} else {
1596750afb08SLuis Chamberlain 			rx_q->dma_rx = dma_alloc_coherent(priv->device,
1597750afb08SLuis Chamberlain 							  DMA_RX_SIZE * sizeof(struct dma_desc),
159854139cf3SJoao Pinto 							  &rx_q->dma_rx_phy,
159971fedb01SJoao Pinto 							  GFP_KERNEL);
160054139cf3SJoao Pinto 			if (!rx_q->dma_rx)
160171fedb01SJoao Pinto 				goto err_dma;
160271fedb01SJoao Pinto 		}
160354139cf3SJoao Pinto 	}
160471fedb01SJoao Pinto 
160571fedb01SJoao Pinto 	return 0;
160671fedb01SJoao Pinto 
160771fedb01SJoao Pinto err_dma:
160854139cf3SJoao Pinto 	free_dma_rx_desc_resources(priv);
160954139cf3SJoao Pinto 
161071fedb01SJoao Pinto 	return ret;
161171fedb01SJoao Pinto }
161271fedb01SJoao Pinto 
161371fedb01SJoao Pinto /**
161471fedb01SJoao Pinto  * alloc_dma_tx_desc_resources - alloc TX resources.
161571fedb01SJoao Pinto  * @priv: private structure
161671fedb01SJoao Pinto  * Description: according to which descriptor can be used (extend or basic)
161771fedb01SJoao Pinto  * this function allocates the resources for TX and RX paths. In case of
161871fedb01SJoao Pinto  * reception, for example, it pre-allocated the RX socket buffer in order to
161971fedb01SJoao Pinto  * allow zero-copy mechanism.
162071fedb01SJoao Pinto  */
162171fedb01SJoao Pinto static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
162271fedb01SJoao Pinto {
1623ce736788SJoao Pinto 	u32 tx_count = priv->plat->tx_queues_to_use;
162471fedb01SJoao Pinto 	int ret = -ENOMEM;
1625ce736788SJoao Pinto 	u32 queue;
162671fedb01SJoao Pinto 
1627ce736788SJoao Pinto 	/* TX queues buffers and DMA */
1628ce736788SJoao Pinto 	for (queue = 0; queue < tx_count; queue++) {
1629ce736788SJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1630ce736788SJoao Pinto 
1631ce736788SJoao Pinto 		tx_q->queue_index = queue;
1632ce736788SJoao Pinto 		tx_q->priv_data = priv;
1633ce736788SJoao Pinto 
1634ec5e5ce1SJose Abreu 		tx_q->tx_skbuff_dma = kcalloc(DMA_TX_SIZE,
1635ce736788SJoao Pinto 					      sizeof(*tx_q->tx_skbuff_dma),
163671fedb01SJoao Pinto 					      GFP_KERNEL);
1637ce736788SJoao Pinto 		if (!tx_q->tx_skbuff_dma)
163862242260SChristophe Jaillet 			goto err_dma;
163971fedb01SJoao Pinto 
1640ec5e5ce1SJose Abreu 		tx_q->tx_skbuff = kcalloc(DMA_TX_SIZE,
1641ce736788SJoao Pinto 					  sizeof(struct sk_buff *),
164271fedb01SJoao Pinto 					  GFP_KERNEL);
1643ce736788SJoao Pinto 		if (!tx_q->tx_skbuff)
164462242260SChristophe Jaillet 			goto err_dma;
164571fedb01SJoao Pinto 
164671fedb01SJoao Pinto 		if (priv->extend_desc) {
1647750afb08SLuis Chamberlain 			tx_q->dma_etx = dma_alloc_coherent(priv->device,
1648750afb08SLuis Chamberlain 							   DMA_TX_SIZE * sizeof(struct dma_extended_desc),
1649ce736788SJoao Pinto 							   &tx_q->dma_tx_phy,
16505bacd778SLABBE Corentin 							   GFP_KERNEL);
1651ce736788SJoao Pinto 			if (!tx_q->dma_etx)
165262242260SChristophe Jaillet 				goto err_dma;
16535bacd778SLABBE Corentin 		} else {
1654750afb08SLuis Chamberlain 			tx_q->dma_tx = dma_alloc_coherent(priv->device,
1655750afb08SLuis Chamberlain 							  DMA_TX_SIZE * sizeof(struct dma_desc),
1656ce736788SJoao Pinto 							  &tx_q->dma_tx_phy,
16575bacd778SLABBE Corentin 							  GFP_KERNEL);
1658ce736788SJoao Pinto 			if (!tx_q->dma_tx)
165962242260SChristophe Jaillet 				goto err_dma;
1660ce736788SJoao Pinto 		}
16615bacd778SLABBE Corentin 	}
16625bacd778SLABBE Corentin 
16635bacd778SLABBE Corentin 	return 0;
16645bacd778SLABBE Corentin 
166562242260SChristophe Jaillet err_dma:
1666ce736788SJoao Pinto 	free_dma_tx_desc_resources(priv);
1667ce736788SJoao Pinto 
166809f8d696SSrinivas Kandagatla 	return ret;
16695bacd778SLABBE Corentin }
167009f8d696SSrinivas Kandagatla 
167171fedb01SJoao Pinto /**
167271fedb01SJoao Pinto  * alloc_dma_desc_resources - alloc TX/RX resources.
167371fedb01SJoao Pinto  * @priv: private structure
167471fedb01SJoao Pinto  * Description: according to which descriptor can be used (extend or basic)
167571fedb01SJoao Pinto  * this function allocates the resources for TX and RX paths. In case of
167671fedb01SJoao Pinto  * reception, for example, it pre-allocated the RX socket buffer in order to
167771fedb01SJoao Pinto  * allow zero-copy mechanism.
167871fedb01SJoao Pinto  */
167971fedb01SJoao Pinto static int alloc_dma_desc_resources(struct stmmac_priv *priv)
16805bacd778SLABBE Corentin {
168154139cf3SJoao Pinto 	/* RX Allocation */
168271fedb01SJoao Pinto 	int ret = alloc_dma_rx_desc_resources(priv);
168371fedb01SJoao Pinto 
168471fedb01SJoao Pinto 	if (ret)
168571fedb01SJoao Pinto 		return ret;
168671fedb01SJoao Pinto 
168771fedb01SJoao Pinto 	ret = alloc_dma_tx_desc_resources(priv);
168871fedb01SJoao Pinto 
168971fedb01SJoao Pinto 	return ret;
169071fedb01SJoao Pinto }
169171fedb01SJoao Pinto 
169271fedb01SJoao Pinto /**
169371fedb01SJoao Pinto  * free_dma_desc_resources - free dma desc resources
169471fedb01SJoao Pinto  * @priv: private structure
169571fedb01SJoao Pinto  */
169671fedb01SJoao Pinto static void free_dma_desc_resources(struct stmmac_priv *priv)
169771fedb01SJoao Pinto {
169871fedb01SJoao Pinto 	/* Release the DMA RX socket buffers */
169971fedb01SJoao Pinto 	free_dma_rx_desc_resources(priv);
170071fedb01SJoao Pinto 
170171fedb01SJoao Pinto 	/* Release the DMA TX socket buffers */
170271fedb01SJoao Pinto 	free_dma_tx_desc_resources(priv);
170371fedb01SJoao Pinto }
170471fedb01SJoao Pinto 
170571fedb01SJoao Pinto /**
17069eb12474Sjpinto  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
17079eb12474Sjpinto  *  @priv: driver private structure
17089eb12474Sjpinto  *  Description: It is used for enabling the rx queues in the MAC
17099eb12474Sjpinto  */
17109eb12474Sjpinto static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
17119eb12474Sjpinto {
17124f6046f5SJoao Pinto 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
17134f6046f5SJoao Pinto 	int queue;
17144f6046f5SJoao Pinto 	u8 mode;
17159eb12474Sjpinto 
17164f6046f5SJoao Pinto 	for (queue = 0; queue < rx_queues_count; queue++) {
17174f6046f5SJoao Pinto 		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
1718c10d4c82SJose Abreu 		stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
17194f6046f5SJoao Pinto 	}
17209eb12474Sjpinto }
17219eb12474Sjpinto 
17229eb12474Sjpinto /**
1723ae4f0d46SJoao Pinto  * stmmac_start_rx_dma - start RX DMA channel
1724ae4f0d46SJoao Pinto  * @priv: driver private structure
1725ae4f0d46SJoao Pinto  * @chan: RX channel index
1726ae4f0d46SJoao Pinto  * Description:
1727ae4f0d46SJoao Pinto  * This starts a RX DMA channel
1728ae4f0d46SJoao Pinto  */
1729ae4f0d46SJoao Pinto static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
1730ae4f0d46SJoao Pinto {
1731ae4f0d46SJoao Pinto 	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
1732a4e887faSJose Abreu 	stmmac_start_rx(priv, priv->ioaddr, chan);
1733ae4f0d46SJoao Pinto }
1734ae4f0d46SJoao Pinto 
1735ae4f0d46SJoao Pinto /**
1736ae4f0d46SJoao Pinto  * stmmac_start_tx_dma - start TX DMA channel
1737ae4f0d46SJoao Pinto  * @priv: driver private structure
1738ae4f0d46SJoao Pinto  * @chan: TX channel index
1739ae4f0d46SJoao Pinto  * Description:
1740ae4f0d46SJoao Pinto  * This starts a TX DMA channel
1741ae4f0d46SJoao Pinto  */
1742ae4f0d46SJoao Pinto static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
1743ae4f0d46SJoao Pinto {
1744ae4f0d46SJoao Pinto 	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
1745a4e887faSJose Abreu 	stmmac_start_tx(priv, priv->ioaddr, chan);
1746ae4f0d46SJoao Pinto }
1747ae4f0d46SJoao Pinto 
1748ae4f0d46SJoao Pinto /**
1749ae4f0d46SJoao Pinto  * stmmac_stop_rx_dma - stop RX DMA channel
1750ae4f0d46SJoao Pinto  * @priv: driver private structure
1751ae4f0d46SJoao Pinto  * @chan: RX channel index
1752ae4f0d46SJoao Pinto  * Description:
1753ae4f0d46SJoao Pinto  * This stops a RX DMA channel
1754ae4f0d46SJoao Pinto  */
1755ae4f0d46SJoao Pinto static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
1756ae4f0d46SJoao Pinto {
1757ae4f0d46SJoao Pinto 	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
1758a4e887faSJose Abreu 	stmmac_stop_rx(priv, priv->ioaddr, chan);
1759ae4f0d46SJoao Pinto }
1760ae4f0d46SJoao Pinto 
1761ae4f0d46SJoao Pinto /**
1762ae4f0d46SJoao Pinto  * stmmac_stop_tx_dma - stop TX DMA channel
1763ae4f0d46SJoao Pinto  * @priv: driver private structure
1764ae4f0d46SJoao Pinto  * @chan: TX channel index
1765ae4f0d46SJoao Pinto  * Description:
1766ae4f0d46SJoao Pinto  * This stops a TX DMA channel
1767ae4f0d46SJoao Pinto  */
1768ae4f0d46SJoao Pinto static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
1769ae4f0d46SJoao Pinto {
1770ae4f0d46SJoao Pinto 	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
1771a4e887faSJose Abreu 	stmmac_stop_tx(priv, priv->ioaddr, chan);
1772ae4f0d46SJoao Pinto }
1773ae4f0d46SJoao Pinto 
1774ae4f0d46SJoao Pinto /**
1775ae4f0d46SJoao Pinto  * stmmac_start_all_dma - start all RX and TX DMA channels
1776ae4f0d46SJoao Pinto  * @priv: driver private structure
1777ae4f0d46SJoao Pinto  * Description:
1778ae4f0d46SJoao Pinto  * This starts all the RX and TX DMA channels
1779ae4f0d46SJoao Pinto  */
1780ae4f0d46SJoao Pinto static void stmmac_start_all_dma(struct stmmac_priv *priv)
1781ae4f0d46SJoao Pinto {
1782ae4f0d46SJoao Pinto 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1783ae4f0d46SJoao Pinto 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1784ae4f0d46SJoao Pinto 	u32 chan = 0;
1785ae4f0d46SJoao Pinto 
1786ae4f0d46SJoao Pinto 	for (chan = 0; chan < rx_channels_count; chan++)
1787ae4f0d46SJoao Pinto 		stmmac_start_rx_dma(priv, chan);
1788ae4f0d46SJoao Pinto 
1789ae4f0d46SJoao Pinto 	for (chan = 0; chan < tx_channels_count; chan++)
1790ae4f0d46SJoao Pinto 		stmmac_start_tx_dma(priv, chan);
1791ae4f0d46SJoao Pinto }
1792ae4f0d46SJoao Pinto 
1793ae4f0d46SJoao Pinto /**
1794ae4f0d46SJoao Pinto  * stmmac_stop_all_dma - stop all RX and TX DMA channels
1795ae4f0d46SJoao Pinto  * @priv: driver private structure
1796ae4f0d46SJoao Pinto  * Description:
1797ae4f0d46SJoao Pinto  * This stops the RX and TX DMA channels
1798ae4f0d46SJoao Pinto  */
1799ae4f0d46SJoao Pinto static void stmmac_stop_all_dma(struct stmmac_priv *priv)
1800ae4f0d46SJoao Pinto {
1801ae4f0d46SJoao Pinto 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1802ae4f0d46SJoao Pinto 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1803ae4f0d46SJoao Pinto 	u32 chan = 0;
1804ae4f0d46SJoao Pinto 
1805ae4f0d46SJoao Pinto 	for (chan = 0; chan < rx_channels_count; chan++)
1806ae4f0d46SJoao Pinto 		stmmac_stop_rx_dma(priv, chan);
1807ae4f0d46SJoao Pinto 
1808ae4f0d46SJoao Pinto 	for (chan = 0; chan < tx_channels_count; chan++)
1809ae4f0d46SJoao Pinto 		stmmac_stop_tx_dma(priv, chan);
1810ae4f0d46SJoao Pinto }
1811ae4f0d46SJoao Pinto 
1812ae4f0d46SJoao Pinto /**
18137ac6653aSJeff Kirsher  *  stmmac_dma_operation_mode - HW DMA operation mode
181432ceabcaSGiuseppe CAVALLARO  *  @priv: driver private structure
1815732fdf0eSGiuseppe CAVALLARO  *  Description: it is used for configuring the DMA operation mode register in
1816732fdf0eSGiuseppe CAVALLARO  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
18177ac6653aSJeff Kirsher  */
18187ac6653aSJeff Kirsher static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
18197ac6653aSJeff Kirsher {
18206deee222SJoao Pinto 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
18216deee222SJoao Pinto 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1822f88203a2SVince Bridgers 	int rxfifosz = priv->plat->rx_fifo_size;
182352a76235SJose Abreu 	int txfifosz = priv->plat->tx_fifo_size;
18246deee222SJoao Pinto 	u32 txmode = 0;
18256deee222SJoao Pinto 	u32 rxmode = 0;
18266deee222SJoao Pinto 	u32 chan = 0;
1827a0daae13SJose Abreu 	u8 qmode = 0;
1828f88203a2SVince Bridgers 
182911fbf811SThierry Reding 	if (rxfifosz == 0)
183011fbf811SThierry Reding 		rxfifosz = priv->dma_cap.rx_fifo_size;
183152a76235SJose Abreu 	if (txfifosz == 0)
183252a76235SJose Abreu 		txfifosz = priv->dma_cap.tx_fifo_size;
183352a76235SJose Abreu 
183452a76235SJose Abreu 	/* Adjust for real per queue fifo size */
183552a76235SJose Abreu 	rxfifosz /= rx_channels_count;
183652a76235SJose Abreu 	txfifosz /= tx_channels_count;
183711fbf811SThierry Reding 
18386deee222SJoao Pinto 	if (priv->plat->force_thresh_dma_mode) {
18396deee222SJoao Pinto 		txmode = tc;
18406deee222SJoao Pinto 		rxmode = tc;
18416deee222SJoao Pinto 	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
18427ac6653aSJeff Kirsher 		/*
18437ac6653aSJeff Kirsher 		 * In case of GMAC, SF mode can be enabled
18447ac6653aSJeff Kirsher 		 * to perform the TX COE in HW. This depends on:
18457ac6653aSJeff Kirsher 		 * 1) TX COE if actually supported
18467ac6653aSJeff Kirsher 		 * 2) There is no bugged Jumbo frame support
18477ac6653aSJeff Kirsher 		 *    that needs to not insert csum in the TDES.
18487ac6653aSJeff Kirsher 		 */
18496deee222SJoao Pinto 		txmode = SF_DMA_MODE;
18506deee222SJoao Pinto 		rxmode = SF_DMA_MODE;
1851b2dec116SSonic Zhang 		priv->xstats.threshold = SF_DMA_MODE;
18526deee222SJoao Pinto 	} else {
18536deee222SJoao Pinto 		txmode = tc;
18546deee222SJoao Pinto 		rxmode = SF_DMA_MODE;
18556deee222SJoao Pinto 	}
18566deee222SJoao Pinto 
18576deee222SJoao Pinto 	/* configure all channels */
1858a0daae13SJose Abreu 	for (chan = 0; chan < rx_channels_count; chan++) {
1859a0daae13SJose Abreu 		qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
18606deee222SJoao Pinto 
1861a4e887faSJose Abreu 		stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
1862a0daae13SJose Abreu 				rxfifosz, qmode);
18634205c88eSJose Abreu 		stmmac_set_dma_bfsize(priv, priv->ioaddr, priv->dma_buf_sz,
18644205c88eSJose Abreu 				chan);
1865a0daae13SJose Abreu 	}
1866a0daae13SJose Abreu 
1867a0daae13SJose Abreu 	for (chan = 0; chan < tx_channels_count; chan++) {
1868a0daae13SJose Abreu 		qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
1869a0daae13SJose Abreu 
1870a4e887faSJose Abreu 		stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
1871a0daae13SJose Abreu 				txfifosz, qmode);
1872a0daae13SJose Abreu 	}
18737ac6653aSJeff Kirsher }
18747ac6653aSJeff Kirsher 
18757ac6653aSJeff Kirsher /**
1876732fdf0eSGiuseppe CAVALLARO  * stmmac_tx_clean - to manage the transmission completion
187732ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
1878ce736788SJoao Pinto  * @queue: TX queue index
1879732fdf0eSGiuseppe CAVALLARO  * Description: it reclaims the transmit resources after transmission completes.
18807ac6653aSJeff Kirsher  */
18818fce3331SJose Abreu static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
18827ac6653aSJeff Kirsher {
1883ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
188438979574SBeniamino Galvani 	unsigned int bytes_compl = 0, pkts_compl = 0;
18858fce3331SJose Abreu 	unsigned int entry, count = 0;
18867ac6653aSJeff Kirsher 
18878fce3331SJose Abreu 	__netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
1888a9097a96SGiuseppe CAVALLARO 
18899125cdd1SGiuseppe CAVALLARO 	priv->xstats.tx_clean++;
18909125cdd1SGiuseppe CAVALLARO 
18918d5f4b07SBernd Edlinger 	entry = tx_q->dirty_tx;
18928fce3331SJose Abreu 	while ((entry != tx_q->cur_tx) && (count < budget)) {
1893ce736788SJoao Pinto 		struct sk_buff *skb = tx_q->tx_skbuff[entry];
1894c24602efSGiuseppe CAVALLARO 		struct dma_desc *p;
1895c363b658SFabrice Gasnier 		int status;
1896c24602efSGiuseppe CAVALLARO 
1897c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
1898ce736788SJoao Pinto 			p = (struct dma_desc *)(tx_q->dma_etx + entry);
1899c24602efSGiuseppe CAVALLARO 		else
1900ce736788SJoao Pinto 			p = tx_q->dma_tx + entry;
19017ac6653aSJeff Kirsher 
190242de047dSJose Abreu 		status = stmmac_tx_status(priv, &priv->dev->stats,
190342de047dSJose Abreu 				&priv->xstats, p, priv->ioaddr);
1904c363b658SFabrice Gasnier 		/* Check if the descriptor is owned by the DMA */
1905c363b658SFabrice Gasnier 		if (unlikely(status & tx_dma_own))
1906c363b658SFabrice Gasnier 			break;
1907c363b658SFabrice Gasnier 
19088fce3331SJose Abreu 		count++;
19098fce3331SJose Abreu 
1910a6b25da5SNiklas Cassel 		/* Make sure descriptor fields are read after reading
1911a6b25da5SNiklas Cassel 		 * the own bit.
1912a6b25da5SNiklas Cassel 		 */
1913a6b25da5SNiklas Cassel 		dma_rmb();
1914a6b25da5SNiklas Cassel 
1915c363b658SFabrice Gasnier 		/* Just consider the last segment and ...*/
1916c363b658SFabrice Gasnier 		if (likely(!(status & tx_not_ls))) {
1917c363b658SFabrice Gasnier 			/* ... verify the status error condition */
1918c363b658SFabrice Gasnier 			if (unlikely(status & tx_err)) {
1919c363b658SFabrice Gasnier 				priv->dev->stats.tx_errors++;
1920c363b658SFabrice Gasnier 			} else {
19217ac6653aSJeff Kirsher 				priv->dev->stats.tx_packets++;
19227ac6653aSJeff Kirsher 				priv->xstats.tx_pkt_n++;
1923c363b658SFabrice Gasnier 			}
1924ba1ffd74SGiuseppe CAVALLARO 			stmmac_get_tx_hwtstamp(priv, p, skb);
19257ac6653aSJeff Kirsher 		}
19267ac6653aSJeff Kirsher 
1927ce736788SJoao Pinto 		if (likely(tx_q->tx_skbuff_dma[entry].buf)) {
1928ce736788SJoao Pinto 			if (tx_q->tx_skbuff_dma[entry].map_as_page)
1929362b37beSGiuseppe CAVALLARO 				dma_unmap_page(priv->device,
1930ce736788SJoao Pinto 					       tx_q->tx_skbuff_dma[entry].buf,
1931ce736788SJoao Pinto 					       tx_q->tx_skbuff_dma[entry].len,
19327ac6653aSJeff Kirsher 					       DMA_TO_DEVICE);
1933362b37beSGiuseppe CAVALLARO 			else
1934362b37beSGiuseppe CAVALLARO 				dma_unmap_single(priv->device,
1935ce736788SJoao Pinto 						 tx_q->tx_skbuff_dma[entry].buf,
1936ce736788SJoao Pinto 						 tx_q->tx_skbuff_dma[entry].len,
1937362b37beSGiuseppe CAVALLARO 						 DMA_TO_DEVICE);
1938ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[entry].buf = 0;
1939ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[entry].len = 0;
1940ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[entry].map_as_page = false;
1941cf32deecSRayagond Kokatanur 		}
1942f748be53SAlexandre TORGUE 
19432c520b1cSJose Abreu 		stmmac_clean_desc3(priv, tx_q, p);
1944f748be53SAlexandre TORGUE 
1945ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].last_segment = false;
1946ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
19477ac6653aSJeff Kirsher 
19487ac6653aSJeff Kirsher 		if (likely(skb != NULL)) {
194938979574SBeniamino Galvani 			pkts_compl++;
195038979574SBeniamino Galvani 			bytes_compl += skb->len;
19517c565c33SEric W. Biederman 			dev_consume_skb_any(skb);
1952ce736788SJoao Pinto 			tx_q->tx_skbuff[entry] = NULL;
19537ac6653aSJeff Kirsher 		}
19547ac6653aSJeff Kirsher 
195542de047dSJose Abreu 		stmmac_release_tx_desc(priv, p, priv->mode);
19567ac6653aSJeff Kirsher 
1957e3ad57c9SGiuseppe Cavallaro 		entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
19587ac6653aSJeff Kirsher 	}
1959ce736788SJoao Pinto 	tx_q->dirty_tx = entry;
196038979574SBeniamino Galvani 
1961c22a3f48SJoao Pinto 	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
1962c22a3f48SJoao Pinto 				  pkts_compl, bytes_compl);
196338979574SBeniamino Galvani 
1964c22a3f48SJoao Pinto 	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
1965c22a3f48SJoao Pinto 								queue))) &&
1966c22a3f48SJoao Pinto 	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) {
1967c22a3f48SJoao Pinto 
1968b3e51069SLABBE Corentin 		netif_dbg(priv, tx_done, priv->dev,
1969b3e51069SLABBE Corentin 			  "%s: restart transmit\n", __func__);
1970c22a3f48SJoao Pinto 		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
19717ac6653aSJeff Kirsher 	}
1972d765955dSGiuseppe CAVALLARO 
1973d765955dSGiuseppe CAVALLARO 	if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
1974d765955dSGiuseppe CAVALLARO 		stmmac_enable_eee_mode(priv);
1975f5351ef7SGiuseppe CAVALLARO 		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
1976d765955dSGiuseppe CAVALLARO 	}
19778fce3331SJose Abreu 
19784ccb4585SJose Abreu 	/* We still have pending packets, let's call for a new scheduling */
19794ccb4585SJose Abreu 	if (tx_q->dirty_tx != tx_q->cur_tx)
19804ccb4585SJose Abreu 		mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(10));
19814ccb4585SJose Abreu 
19828fce3331SJose Abreu 	__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
19838fce3331SJose Abreu 
19848fce3331SJose Abreu 	return count;
19857ac6653aSJeff Kirsher }
19867ac6653aSJeff Kirsher 
19877ac6653aSJeff Kirsher /**
1988732fdf0eSGiuseppe CAVALLARO  * stmmac_tx_err - to manage the tx error
198932ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
19905bacd778SLABBE Corentin  * @chan: channel index
19917ac6653aSJeff Kirsher  * Description: it cleans the descriptors and restarts the transmission
1992732fdf0eSGiuseppe CAVALLARO  * in case of transmission errors.
19937ac6653aSJeff Kirsher  */
19945bacd778SLABBE Corentin static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
19957ac6653aSJeff Kirsher {
1996ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
1997c24602efSGiuseppe CAVALLARO 	int i;
1998ce736788SJoao Pinto 
1999c22a3f48SJoao Pinto 	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
20007ac6653aSJeff Kirsher 
2001ae4f0d46SJoao Pinto 	stmmac_stop_tx_dma(priv, chan);
2002ce736788SJoao Pinto 	dma_free_tx_skbufs(priv, chan);
2003e3ad57c9SGiuseppe Cavallaro 	for (i = 0; i < DMA_TX_SIZE; i++)
2004c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
200542de047dSJose Abreu 			stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
200642de047dSJose Abreu 					priv->mode, (i == DMA_TX_SIZE - 1));
2007c24602efSGiuseppe CAVALLARO 		else
200842de047dSJose Abreu 			stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
200942de047dSJose Abreu 					priv->mode, (i == DMA_TX_SIZE - 1));
2010ce736788SJoao Pinto 	tx_q->dirty_tx = 0;
2011ce736788SJoao Pinto 	tx_q->cur_tx = 0;
20128d212a9eSNiklas Cassel 	tx_q->mss = 0;
2013c22a3f48SJoao Pinto 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
2014ae4f0d46SJoao Pinto 	stmmac_start_tx_dma(priv, chan);
20157ac6653aSJeff Kirsher 
20167ac6653aSJeff Kirsher 	priv->dev->stats.tx_errors++;
2017c22a3f48SJoao Pinto 	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
20187ac6653aSJeff Kirsher }
20197ac6653aSJeff Kirsher 
202032ceabcaSGiuseppe CAVALLARO /**
20216deee222SJoao Pinto  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
20226deee222SJoao Pinto  *  @priv: driver private structure
20236deee222SJoao Pinto  *  @txmode: TX operating mode
20246deee222SJoao Pinto  *  @rxmode: RX operating mode
20256deee222SJoao Pinto  *  @chan: channel index
20266deee222SJoao Pinto  *  Description: it is used for configuring of the DMA operation mode in
20276deee222SJoao Pinto  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
20286deee222SJoao Pinto  *  mode.
20296deee222SJoao Pinto  */
20306deee222SJoao Pinto static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
20316deee222SJoao Pinto 					  u32 rxmode, u32 chan)
20326deee222SJoao Pinto {
2033a0daae13SJose Abreu 	u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2034a0daae13SJose Abreu 	u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
203552a76235SJose Abreu 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
203652a76235SJose Abreu 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
20376deee222SJoao Pinto 	int rxfifosz = priv->plat->rx_fifo_size;
203852a76235SJose Abreu 	int txfifosz = priv->plat->tx_fifo_size;
20396deee222SJoao Pinto 
20406deee222SJoao Pinto 	if (rxfifosz == 0)
20416deee222SJoao Pinto 		rxfifosz = priv->dma_cap.rx_fifo_size;
204252a76235SJose Abreu 	if (txfifosz == 0)
204352a76235SJose Abreu 		txfifosz = priv->dma_cap.tx_fifo_size;
204452a76235SJose Abreu 
204552a76235SJose Abreu 	/* Adjust for real per queue fifo size */
204652a76235SJose Abreu 	rxfifosz /= rx_channels_count;
204752a76235SJose Abreu 	txfifosz /= tx_channels_count;
20486deee222SJoao Pinto 
2049ab0204e3SJose Abreu 	stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2050ab0204e3SJose Abreu 	stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
20516deee222SJoao Pinto }
20526deee222SJoao Pinto 
20538bf993a5SJose Abreu static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
20548bf993a5SJose Abreu {
205563a550fcSJose Abreu 	int ret;
20568bf993a5SJose Abreu 
2057c10d4c82SJose Abreu 	ret = stmmac_safety_feat_irq_status(priv, priv->dev,
20588bf993a5SJose Abreu 			priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2059c10d4c82SJose Abreu 	if (ret && (ret != -EINVAL)) {
20608bf993a5SJose Abreu 		stmmac_global_err(priv);
2061c10d4c82SJose Abreu 		return true;
2062c10d4c82SJose Abreu 	}
2063c10d4c82SJose Abreu 
2064c10d4c82SJose Abreu 	return false;
20658bf993a5SJose Abreu }
20668bf993a5SJose Abreu 
20678fce3331SJose Abreu static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan)
20688fce3331SJose Abreu {
20698fce3331SJose Abreu 	int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
20708fce3331SJose Abreu 						 &priv->xstats, chan);
20718fce3331SJose Abreu 	struct stmmac_channel *ch = &priv->channel[chan];
20728fce3331SJose Abreu 
20734ccb4585SJose Abreu 	if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
20743ba07debSJose Abreu 		if (napi_schedule_prep(&ch->rx_napi)) {
20758fce3331SJose Abreu 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
20763ba07debSJose Abreu 			__napi_schedule_irqoff(&ch->rx_napi);
20773ba07debSJose Abreu 			status |= handle_tx;
20783ba07debSJose Abreu 		}
20794ccb4585SJose Abreu 	}
20804ccb4585SJose Abreu 
2081a66b5884SJose Abreu 	if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use))
20824ccb4585SJose Abreu 		napi_schedule_irqoff(&ch->tx_napi);
20838fce3331SJose Abreu 
20848fce3331SJose Abreu 	return status;
20858fce3331SJose Abreu }
20868fce3331SJose Abreu 
20876deee222SJoao Pinto /**
2088732fdf0eSGiuseppe CAVALLARO  * stmmac_dma_interrupt - DMA ISR
208932ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
209032ceabcaSGiuseppe CAVALLARO  * Description: this is the DMA ISR. It is called by the main ISR.
2091732fdf0eSGiuseppe CAVALLARO  * It calls the dwmac dma routine and schedule poll method in case of some
2092732fdf0eSGiuseppe CAVALLARO  * work can be done.
209332ceabcaSGiuseppe CAVALLARO  */
20947ac6653aSJeff Kirsher static void stmmac_dma_interrupt(struct stmmac_priv *priv)
20957ac6653aSJeff Kirsher {
2096d62a107aSJoao Pinto 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
20975a6a0445SNiklas Cassel 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
20985a6a0445SNiklas Cassel 	u32 channels_to_check = tx_channel_count > rx_channel_count ?
20995a6a0445SNiklas Cassel 				tx_channel_count : rx_channel_count;
2100d62a107aSJoao Pinto 	u32 chan;
21018ac60ffbSKees Cook 	int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
21028ac60ffbSKees Cook 
21038ac60ffbSKees Cook 	/* Make sure we never check beyond our status buffer. */
21048ac60ffbSKees Cook 	if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
21058ac60ffbSKees Cook 		channels_to_check = ARRAY_SIZE(status);
210668e5cfafSJoao Pinto 
21075a6a0445SNiklas Cassel 	for (chan = 0; chan < channels_to_check; chan++)
21088fce3331SJose Abreu 		status[chan] = stmmac_napi_check(priv, chan);
2109d62a107aSJoao Pinto 
21105a6a0445SNiklas Cassel 	for (chan = 0; chan < tx_channel_count; chan++) {
21115a6a0445SNiklas Cassel 		if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
21127ac6653aSJeff Kirsher 			/* Try to bump up the dma threshold on this failure */
2113b2dec116SSonic Zhang 			if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
2114b2dec116SSonic Zhang 			    (tc <= 256)) {
21157ac6653aSJeff Kirsher 				tc += 64;
2116c405abe2SSonic Zhang 				if (priv->plat->force_thresh_dma_mode)
2117d62a107aSJoao Pinto 					stmmac_set_dma_operation_mode(priv,
2118d62a107aSJoao Pinto 								      tc,
2119d62a107aSJoao Pinto 								      tc,
2120d62a107aSJoao Pinto 								      chan);
2121c405abe2SSonic Zhang 				else
2122d62a107aSJoao Pinto 					stmmac_set_dma_operation_mode(priv,
2123d62a107aSJoao Pinto 								    tc,
2124d62a107aSJoao Pinto 								    SF_DMA_MODE,
2125d62a107aSJoao Pinto 								    chan);
21267ac6653aSJeff Kirsher 				priv->xstats.threshold = tc;
21277ac6653aSJeff Kirsher 			}
21285a6a0445SNiklas Cassel 		} else if (unlikely(status[chan] == tx_hard_error)) {
21294e593262SJoao Pinto 			stmmac_tx_err(priv, chan);
21307ac6653aSJeff Kirsher 		}
2131d62a107aSJoao Pinto 	}
2132d62a107aSJoao Pinto }
21337ac6653aSJeff Kirsher 
213432ceabcaSGiuseppe CAVALLARO /**
213532ceabcaSGiuseppe CAVALLARO  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
213632ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
213732ceabcaSGiuseppe CAVALLARO  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
213832ceabcaSGiuseppe CAVALLARO  */
21391c901a46SGiuseppe CAVALLARO static void stmmac_mmc_setup(struct stmmac_priv *priv)
21401c901a46SGiuseppe CAVALLARO {
21411c901a46SGiuseppe CAVALLARO 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
21421c901a46SGiuseppe CAVALLARO 			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
21431c901a46SGiuseppe CAVALLARO 
21443b1dd2c5SJose Abreu 	stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
21454f795b25SGiuseppe CAVALLARO 
21464f795b25SGiuseppe CAVALLARO 	if (priv->dma_cap.rmon) {
21473b1dd2c5SJose Abreu 		stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
21481c901a46SGiuseppe CAVALLARO 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
21494f795b25SGiuseppe CAVALLARO 	} else
215038ddc59dSLABBE Corentin 		netdev_info(priv->dev, "No MAC Management Counters available\n");
21511c901a46SGiuseppe CAVALLARO }
21521c901a46SGiuseppe CAVALLARO 
2153732fdf0eSGiuseppe CAVALLARO /**
2154732fdf0eSGiuseppe CAVALLARO  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
215532ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
215619e30c14SGiuseppe CAVALLARO  * Description:
215719e30c14SGiuseppe CAVALLARO  *  new GMAC chip generations have a new register to indicate the
2158e7434821SGiuseppe CAVALLARO  *  presence of the optional feature/functions.
215919e30c14SGiuseppe CAVALLARO  *  This can be also used to override the value passed through the
216019e30c14SGiuseppe CAVALLARO  *  platform and necessary for old MAC10/100 and GMAC chips.
2161e7434821SGiuseppe CAVALLARO  */
2162e7434821SGiuseppe CAVALLARO static int stmmac_get_hw_features(struct stmmac_priv *priv)
2163e7434821SGiuseppe CAVALLARO {
2164a4e887faSJose Abreu 	return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2165e7434821SGiuseppe CAVALLARO }
2166e7434821SGiuseppe CAVALLARO 
216732ceabcaSGiuseppe CAVALLARO /**
2168732fdf0eSGiuseppe CAVALLARO  * stmmac_check_ether_addr - check if the MAC addr is valid
216932ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
217032ceabcaSGiuseppe CAVALLARO  * Description:
217132ceabcaSGiuseppe CAVALLARO  * it is to verify if the MAC address is valid, in case of failures it
217232ceabcaSGiuseppe CAVALLARO  * generates a random MAC address
217332ceabcaSGiuseppe CAVALLARO  */
2174bfab27a1SGiuseppe CAVALLARO static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2175bfab27a1SGiuseppe CAVALLARO {
2176bfab27a1SGiuseppe CAVALLARO 	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2177c10d4c82SJose Abreu 		stmmac_get_umac_addr(priv, priv->hw, priv->dev->dev_addr, 0);
2178bfab27a1SGiuseppe CAVALLARO 		if (!is_valid_ether_addr(priv->dev->dev_addr))
2179f2cedb63SDanny Kukawka 			eth_hw_addr_random(priv->dev);
2180af649352SJisheng Zhang 		dev_info(priv->device, "device MAC address %pM\n",
2181bfab27a1SGiuseppe CAVALLARO 			 priv->dev->dev_addr);
2182bfab27a1SGiuseppe CAVALLARO 	}
2183c88460b7SHans de Goede }
2184bfab27a1SGiuseppe CAVALLARO 
218532ceabcaSGiuseppe CAVALLARO /**
2186732fdf0eSGiuseppe CAVALLARO  * stmmac_init_dma_engine - DMA init.
218732ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
218832ceabcaSGiuseppe CAVALLARO  * Description:
218932ceabcaSGiuseppe CAVALLARO  * It inits the DMA invoking the specific MAC/GMAC callback.
219032ceabcaSGiuseppe CAVALLARO  * Some DMA parameters can be passed from the platform;
219132ceabcaSGiuseppe CAVALLARO  * in case of these are not passed a default is kept for the MAC or GMAC.
219232ceabcaSGiuseppe CAVALLARO  */
21930f1f88a8SGiuseppe CAVALLARO static int stmmac_init_dma_engine(struct stmmac_priv *priv)
21940f1f88a8SGiuseppe CAVALLARO {
219547f2a9ceSJoao Pinto 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
219647f2a9ceSJoao Pinto 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
219724aaed0cSJose Abreu 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
219854139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q;
2199ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q;
220047f2a9ceSJoao Pinto 	u32 chan = 0;
2201c24602efSGiuseppe CAVALLARO 	int atds = 0;
2202495db273SGiuseppe Cavallaro 	int ret = 0;
22030f1f88a8SGiuseppe CAVALLARO 
2204a332e2faSNiklas Cassel 	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2205a332e2faSNiklas Cassel 		dev_err(priv->device, "Invalid DMA configuration\n");
220689ab75bfSNiklas Cassel 		return -EINVAL;
22070f1f88a8SGiuseppe CAVALLARO 	}
22080f1f88a8SGiuseppe CAVALLARO 
2209c24602efSGiuseppe CAVALLARO 	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2210c24602efSGiuseppe CAVALLARO 		atds = 1;
2211c24602efSGiuseppe CAVALLARO 
2212a4e887faSJose Abreu 	ret = stmmac_reset(priv, priv->ioaddr);
2213495db273SGiuseppe Cavallaro 	if (ret) {
2214495db273SGiuseppe Cavallaro 		dev_err(priv->device, "Failed to reset the dma\n");
2215495db273SGiuseppe Cavallaro 		return ret;
2216495db273SGiuseppe Cavallaro 	}
2217495db273SGiuseppe Cavallaro 
22187d9e6c5aSJose Abreu 	/* DMA Configuration */
22197d9e6c5aSJose Abreu 	stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
22207d9e6c5aSJose Abreu 
22217d9e6c5aSJose Abreu 	if (priv->plat->axi)
22227d9e6c5aSJose Abreu 		stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
22237d9e6c5aSJose Abreu 
2224af8f3fb7SWeifeng Voon 	/* DMA CSR Channel configuration */
2225af8f3fb7SWeifeng Voon 	for (chan = 0; chan < dma_csr_ch; chan++)
2226af8f3fb7SWeifeng Voon 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
2227af8f3fb7SWeifeng Voon 
222847f2a9ceSJoao Pinto 	/* DMA RX Channel Configuration */
222947f2a9ceSJoao Pinto 	for (chan = 0; chan < rx_channels_count; chan++) {
223054139cf3SJoao Pinto 		rx_q = &priv->rx_queue[chan];
223154139cf3SJoao Pinto 
223224aaed0cSJose Abreu 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
223324aaed0cSJose Abreu 				    rx_q->dma_rx_phy, chan);
223447f2a9ceSJoao Pinto 
223554139cf3SJoao Pinto 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2236f748be53SAlexandre TORGUE 			    (DMA_RX_SIZE * sizeof(struct dma_desc));
2237a4e887faSJose Abreu 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
2238a4e887faSJose Abreu 				       rx_q->rx_tail_addr, chan);
223947f2a9ceSJoao Pinto 	}
224047f2a9ceSJoao Pinto 
224147f2a9ceSJoao Pinto 	/* DMA TX Channel Configuration */
224247f2a9ceSJoao Pinto 	for (chan = 0; chan < tx_channels_count; chan++) {
2243ce736788SJoao Pinto 		tx_q = &priv->tx_queue[chan];
2244ce736788SJoao Pinto 
224524aaed0cSJose Abreu 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
224624aaed0cSJose Abreu 				    tx_q->dma_tx_phy, chan);
2247f748be53SAlexandre TORGUE 
22480431100bSJose Abreu 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
2249a4e887faSJose Abreu 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
2250a4e887faSJose Abreu 				       tx_q->tx_tail_addr, chan);
225147f2a9ceSJoao Pinto 	}
225224aaed0cSJose Abreu 
2253495db273SGiuseppe Cavallaro 	return ret;
22540f1f88a8SGiuseppe CAVALLARO }
22550f1f88a8SGiuseppe CAVALLARO 
22568fce3331SJose Abreu static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
22578fce3331SJose Abreu {
22588fce3331SJose Abreu 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
22598fce3331SJose Abreu 
22608fce3331SJose Abreu 	mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer));
22618fce3331SJose Abreu }
22628fce3331SJose Abreu 
2263bfab27a1SGiuseppe CAVALLARO /**
2264732fdf0eSGiuseppe CAVALLARO  * stmmac_tx_timer - mitigation sw timer for tx.
22659125cdd1SGiuseppe CAVALLARO  * @data: data pointer
22669125cdd1SGiuseppe CAVALLARO  * Description:
22679125cdd1SGiuseppe CAVALLARO  * This is the timer handler to directly invoke the stmmac_tx_clean.
22689125cdd1SGiuseppe CAVALLARO  */
2269e99e88a9SKees Cook static void stmmac_tx_timer(struct timer_list *t)
22709125cdd1SGiuseppe CAVALLARO {
22718fce3331SJose Abreu 	struct stmmac_tx_queue *tx_q = from_timer(tx_q, t, txtimer);
22728fce3331SJose Abreu 	struct stmmac_priv *priv = tx_q->priv_data;
22738fce3331SJose Abreu 	struct stmmac_channel *ch;
22749125cdd1SGiuseppe CAVALLARO 
22758fce3331SJose Abreu 	ch = &priv->channel[tx_q->queue_index];
22768fce3331SJose Abreu 
22774ccb4585SJose Abreu 	/*
22784ccb4585SJose Abreu 	 * If NAPI is already running we can miss some events. Let's rearm
22794ccb4585SJose Abreu 	 * the timer and try again.
22804ccb4585SJose Abreu 	 */
22814ccb4585SJose Abreu 	if (likely(napi_schedule_prep(&ch->tx_napi)))
22824ccb4585SJose Abreu 		__napi_schedule(&ch->tx_napi);
22834ccb4585SJose Abreu 	else
22844ccb4585SJose Abreu 		mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(10));
22859125cdd1SGiuseppe CAVALLARO }
22869125cdd1SGiuseppe CAVALLARO 
22879125cdd1SGiuseppe CAVALLARO /**
2288d429b66eSJose Abreu  * stmmac_init_coalesce - init mitigation options.
228932ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
22909125cdd1SGiuseppe CAVALLARO  * Description:
2291d429b66eSJose Abreu  * This inits the coalesce parameters: i.e. timer rate,
22929125cdd1SGiuseppe CAVALLARO  * timer handler and default threshold used for enabling the
22939125cdd1SGiuseppe CAVALLARO  * interrupt on completion bit.
22949125cdd1SGiuseppe CAVALLARO  */
2295d429b66eSJose Abreu static void stmmac_init_coalesce(struct stmmac_priv *priv)
22969125cdd1SGiuseppe CAVALLARO {
22978fce3331SJose Abreu 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
22988fce3331SJose Abreu 	u32 chan;
22998fce3331SJose Abreu 
23009125cdd1SGiuseppe CAVALLARO 	priv->tx_coal_frames = STMMAC_TX_FRAMES;
23019125cdd1SGiuseppe CAVALLARO 	priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
2302d429b66eSJose Abreu 	priv->rx_coal_frames = STMMAC_RX_FRAMES;
23038fce3331SJose Abreu 
23048fce3331SJose Abreu 	for (chan = 0; chan < tx_channel_count; chan++) {
23058fce3331SJose Abreu 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
23068fce3331SJose Abreu 
23078fce3331SJose Abreu 		timer_setup(&tx_q->txtimer, stmmac_tx_timer, 0);
23088fce3331SJose Abreu 	}
23099125cdd1SGiuseppe CAVALLARO }
23109125cdd1SGiuseppe CAVALLARO 
23114854ab99SJoao Pinto static void stmmac_set_rings_length(struct stmmac_priv *priv)
23124854ab99SJoao Pinto {
23134854ab99SJoao Pinto 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
23144854ab99SJoao Pinto 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
23154854ab99SJoao Pinto 	u32 chan;
23164854ab99SJoao Pinto 
23174854ab99SJoao Pinto 	/* set TX ring length */
23184854ab99SJoao Pinto 	for (chan = 0; chan < tx_channels_count; chan++)
2319a4e887faSJose Abreu 		stmmac_set_tx_ring_len(priv, priv->ioaddr,
23204854ab99SJoao Pinto 				(DMA_TX_SIZE - 1), chan);
23214854ab99SJoao Pinto 
23224854ab99SJoao Pinto 	/* set RX ring length */
23234854ab99SJoao Pinto 	for (chan = 0; chan < rx_channels_count; chan++)
2324a4e887faSJose Abreu 		stmmac_set_rx_ring_len(priv, priv->ioaddr,
23254854ab99SJoao Pinto 				(DMA_RX_SIZE - 1), chan);
23264854ab99SJoao Pinto }
23274854ab99SJoao Pinto 
23289125cdd1SGiuseppe CAVALLARO /**
23296a3a7193SJoao Pinto  *  stmmac_set_tx_queue_weight - Set TX queue weight
23306a3a7193SJoao Pinto  *  @priv: driver private structure
23316a3a7193SJoao Pinto  *  Description: It is used for setting TX queues weight
23326a3a7193SJoao Pinto  */
23336a3a7193SJoao Pinto static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
23346a3a7193SJoao Pinto {
23356a3a7193SJoao Pinto 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
23366a3a7193SJoao Pinto 	u32 weight;
23376a3a7193SJoao Pinto 	u32 queue;
23386a3a7193SJoao Pinto 
23396a3a7193SJoao Pinto 	for (queue = 0; queue < tx_queues_count; queue++) {
23406a3a7193SJoao Pinto 		weight = priv->plat->tx_queues_cfg[queue].weight;
2341c10d4c82SJose Abreu 		stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
23426a3a7193SJoao Pinto 	}
23436a3a7193SJoao Pinto }
23446a3a7193SJoao Pinto 
23456a3a7193SJoao Pinto /**
234619d91873SJoao Pinto  *  stmmac_configure_cbs - Configure CBS in TX queue
234719d91873SJoao Pinto  *  @priv: driver private structure
234819d91873SJoao Pinto  *  Description: It is used for configuring CBS in AVB TX queues
234919d91873SJoao Pinto  */
235019d91873SJoao Pinto static void stmmac_configure_cbs(struct stmmac_priv *priv)
235119d91873SJoao Pinto {
235219d91873SJoao Pinto 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
235319d91873SJoao Pinto 	u32 mode_to_use;
235419d91873SJoao Pinto 	u32 queue;
235519d91873SJoao Pinto 
235644781fefSJoao Pinto 	/* queue 0 is reserved for legacy traffic */
235744781fefSJoao Pinto 	for (queue = 1; queue < tx_queues_count; queue++) {
235819d91873SJoao Pinto 		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
235919d91873SJoao Pinto 		if (mode_to_use == MTL_QUEUE_DCB)
236019d91873SJoao Pinto 			continue;
236119d91873SJoao Pinto 
2362c10d4c82SJose Abreu 		stmmac_config_cbs(priv, priv->hw,
236319d91873SJoao Pinto 				priv->plat->tx_queues_cfg[queue].send_slope,
236419d91873SJoao Pinto 				priv->plat->tx_queues_cfg[queue].idle_slope,
236519d91873SJoao Pinto 				priv->plat->tx_queues_cfg[queue].high_credit,
236619d91873SJoao Pinto 				priv->plat->tx_queues_cfg[queue].low_credit,
236719d91873SJoao Pinto 				queue);
236819d91873SJoao Pinto 	}
236919d91873SJoao Pinto }
237019d91873SJoao Pinto 
237119d91873SJoao Pinto /**
2372d43042f4SJoao Pinto  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
2373d43042f4SJoao Pinto  *  @priv: driver private structure
2374d43042f4SJoao Pinto  *  Description: It is used for mapping RX queues to RX dma channels
2375d43042f4SJoao Pinto  */
2376d43042f4SJoao Pinto static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
2377d43042f4SJoao Pinto {
2378d43042f4SJoao Pinto 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2379d43042f4SJoao Pinto 	u32 queue;
2380d43042f4SJoao Pinto 	u32 chan;
2381d43042f4SJoao Pinto 
2382d43042f4SJoao Pinto 	for (queue = 0; queue < rx_queues_count; queue++) {
2383d43042f4SJoao Pinto 		chan = priv->plat->rx_queues_cfg[queue].chan;
2384c10d4c82SJose Abreu 		stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
2385d43042f4SJoao Pinto 	}
2386d43042f4SJoao Pinto }
2387d43042f4SJoao Pinto 
2388d43042f4SJoao Pinto /**
2389a8f5102aSJoao Pinto  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
2390a8f5102aSJoao Pinto  *  @priv: driver private structure
2391a8f5102aSJoao Pinto  *  Description: It is used for configuring the RX Queue Priority
2392a8f5102aSJoao Pinto  */
2393a8f5102aSJoao Pinto static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
2394a8f5102aSJoao Pinto {
2395a8f5102aSJoao Pinto 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2396a8f5102aSJoao Pinto 	u32 queue;
2397a8f5102aSJoao Pinto 	u32 prio;
2398a8f5102aSJoao Pinto 
2399a8f5102aSJoao Pinto 	for (queue = 0; queue < rx_queues_count; queue++) {
2400a8f5102aSJoao Pinto 		if (!priv->plat->rx_queues_cfg[queue].use_prio)
2401a8f5102aSJoao Pinto 			continue;
2402a8f5102aSJoao Pinto 
2403a8f5102aSJoao Pinto 		prio = priv->plat->rx_queues_cfg[queue].prio;
2404c10d4c82SJose Abreu 		stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
2405a8f5102aSJoao Pinto 	}
2406a8f5102aSJoao Pinto }
2407a8f5102aSJoao Pinto 
2408a8f5102aSJoao Pinto /**
2409a8f5102aSJoao Pinto  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
2410a8f5102aSJoao Pinto  *  @priv: driver private structure
2411a8f5102aSJoao Pinto  *  Description: It is used for configuring the TX Queue Priority
2412a8f5102aSJoao Pinto  */
2413a8f5102aSJoao Pinto static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
2414a8f5102aSJoao Pinto {
2415a8f5102aSJoao Pinto 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2416a8f5102aSJoao Pinto 	u32 queue;
2417a8f5102aSJoao Pinto 	u32 prio;
2418a8f5102aSJoao Pinto 
2419a8f5102aSJoao Pinto 	for (queue = 0; queue < tx_queues_count; queue++) {
2420a8f5102aSJoao Pinto 		if (!priv->plat->tx_queues_cfg[queue].use_prio)
2421a8f5102aSJoao Pinto 			continue;
2422a8f5102aSJoao Pinto 
2423a8f5102aSJoao Pinto 		prio = priv->plat->tx_queues_cfg[queue].prio;
2424c10d4c82SJose Abreu 		stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
2425a8f5102aSJoao Pinto 	}
2426a8f5102aSJoao Pinto }
2427a8f5102aSJoao Pinto 
2428a8f5102aSJoao Pinto /**
2429abe80fdcSJoao Pinto  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
2430abe80fdcSJoao Pinto  *  @priv: driver private structure
2431abe80fdcSJoao Pinto  *  Description: It is used for configuring the RX queue routing
2432abe80fdcSJoao Pinto  */
2433abe80fdcSJoao Pinto static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
2434abe80fdcSJoao Pinto {
2435abe80fdcSJoao Pinto 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2436abe80fdcSJoao Pinto 	u32 queue;
2437abe80fdcSJoao Pinto 	u8 packet;
2438abe80fdcSJoao Pinto 
2439abe80fdcSJoao Pinto 	for (queue = 0; queue < rx_queues_count; queue++) {
2440abe80fdcSJoao Pinto 		/* no specific packet type routing specified for the queue */
2441abe80fdcSJoao Pinto 		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
2442abe80fdcSJoao Pinto 			continue;
2443abe80fdcSJoao Pinto 
2444abe80fdcSJoao Pinto 		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
2445c10d4c82SJose Abreu 		stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
2446abe80fdcSJoao Pinto 	}
2447abe80fdcSJoao Pinto }
2448abe80fdcSJoao Pinto 
244976067459SJose Abreu static void stmmac_mac_config_rss(struct stmmac_priv *priv)
245076067459SJose Abreu {
245176067459SJose Abreu 	if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
245276067459SJose Abreu 		priv->rss.enable = false;
245376067459SJose Abreu 		return;
245476067459SJose Abreu 	}
245576067459SJose Abreu 
245676067459SJose Abreu 	if (priv->dev->features & NETIF_F_RXHASH)
245776067459SJose Abreu 		priv->rss.enable = true;
245876067459SJose Abreu 	else
245976067459SJose Abreu 		priv->rss.enable = false;
246076067459SJose Abreu 
246176067459SJose Abreu 	stmmac_rss_configure(priv, priv->hw, &priv->rss,
246276067459SJose Abreu 			     priv->plat->rx_queues_to_use);
246376067459SJose Abreu }
246476067459SJose Abreu 
2465abe80fdcSJoao Pinto /**
2466d0a9c9f9SJoao Pinto  *  stmmac_mtl_configuration - Configure MTL
2467d0a9c9f9SJoao Pinto  *  @priv: driver private structure
2468d0a9c9f9SJoao Pinto  *  Description: It is used for configurring MTL
2469d0a9c9f9SJoao Pinto  */
2470d0a9c9f9SJoao Pinto static void stmmac_mtl_configuration(struct stmmac_priv *priv)
2471d0a9c9f9SJoao Pinto {
2472d0a9c9f9SJoao Pinto 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2473d0a9c9f9SJoao Pinto 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2474d0a9c9f9SJoao Pinto 
2475c10d4c82SJose Abreu 	if (tx_queues_count > 1)
24766a3a7193SJoao Pinto 		stmmac_set_tx_queue_weight(priv);
24776a3a7193SJoao Pinto 
2478d0a9c9f9SJoao Pinto 	/* Configure MTL RX algorithms */
2479c10d4c82SJose Abreu 	if (rx_queues_count > 1)
2480c10d4c82SJose Abreu 		stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
2481d0a9c9f9SJoao Pinto 				priv->plat->rx_sched_algorithm);
2482d0a9c9f9SJoao Pinto 
2483d0a9c9f9SJoao Pinto 	/* Configure MTL TX algorithms */
2484c10d4c82SJose Abreu 	if (tx_queues_count > 1)
2485c10d4c82SJose Abreu 		stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
2486d0a9c9f9SJoao Pinto 				priv->plat->tx_sched_algorithm);
2487d0a9c9f9SJoao Pinto 
248819d91873SJoao Pinto 	/* Configure CBS in AVB TX queues */
2489c10d4c82SJose Abreu 	if (tx_queues_count > 1)
249019d91873SJoao Pinto 		stmmac_configure_cbs(priv);
249119d91873SJoao Pinto 
2492d43042f4SJoao Pinto 	/* Map RX MTL to DMA channels */
2493d43042f4SJoao Pinto 	stmmac_rx_queue_dma_chan_map(priv);
2494d43042f4SJoao Pinto 
2495d0a9c9f9SJoao Pinto 	/* Enable MAC RX Queues */
2496d0a9c9f9SJoao Pinto 	stmmac_mac_enable_rx_queues(priv);
24976deee222SJoao Pinto 
2498a8f5102aSJoao Pinto 	/* Set RX priorities */
2499c10d4c82SJose Abreu 	if (rx_queues_count > 1)
2500a8f5102aSJoao Pinto 		stmmac_mac_config_rx_queues_prio(priv);
2501a8f5102aSJoao Pinto 
2502a8f5102aSJoao Pinto 	/* Set TX priorities */
2503c10d4c82SJose Abreu 	if (tx_queues_count > 1)
2504a8f5102aSJoao Pinto 		stmmac_mac_config_tx_queues_prio(priv);
2505abe80fdcSJoao Pinto 
2506abe80fdcSJoao Pinto 	/* Set RX routing */
2507c10d4c82SJose Abreu 	if (rx_queues_count > 1)
2508abe80fdcSJoao Pinto 		stmmac_mac_config_rx_queues_routing(priv);
250976067459SJose Abreu 
251076067459SJose Abreu 	/* Receive Side Scaling */
251176067459SJose Abreu 	if (rx_queues_count > 1)
251276067459SJose Abreu 		stmmac_mac_config_rss(priv);
2513d0a9c9f9SJoao Pinto }
2514d0a9c9f9SJoao Pinto 
25158bf993a5SJose Abreu static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
25168bf993a5SJose Abreu {
2517c10d4c82SJose Abreu 	if (priv->dma_cap.asp) {
25188bf993a5SJose Abreu 		netdev_info(priv->dev, "Enabling Safety Features\n");
2519c10d4c82SJose Abreu 		stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp);
25208bf993a5SJose Abreu 	} else {
25218bf993a5SJose Abreu 		netdev_info(priv->dev, "No Safety Features support found\n");
25228bf993a5SJose Abreu 	}
25238bf993a5SJose Abreu }
25248bf993a5SJose Abreu 
2525d0a9c9f9SJoao Pinto /**
2526732fdf0eSGiuseppe CAVALLARO  * stmmac_hw_setup - setup mac in a usable state.
2527523f11b5SSrinivas Kandagatla  *  @dev : pointer to the device structure.
2528523f11b5SSrinivas Kandagatla  *  Description:
2529732fdf0eSGiuseppe CAVALLARO  *  this is the main function to setup the HW in a usable state because the
2530732fdf0eSGiuseppe CAVALLARO  *  dma engine is reset, the core registers are configured (e.g. AXI,
2531732fdf0eSGiuseppe CAVALLARO  *  Checksum features, timers). The DMA is ready to start receiving and
2532732fdf0eSGiuseppe CAVALLARO  *  transmitting.
2533523f11b5SSrinivas Kandagatla  *  Return value:
2534523f11b5SSrinivas Kandagatla  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2535523f11b5SSrinivas Kandagatla  *  file on failure.
2536523f11b5SSrinivas Kandagatla  */
2537fe131929SHuacai Chen static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
2538523f11b5SSrinivas Kandagatla {
2539523f11b5SSrinivas Kandagatla 	struct stmmac_priv *priv = netdev_priv(dev);
25403c55d4d0SJoao Pinto 	u32 rx_cnt = priv->plat->rx_queues_to_use;
2541146617b8SJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
2542146617b8SJoao Pinto 	u32 chan;
2543523f11b5SSrinivas Kandagatla 	int ret;
2544523f11b5SSrinivas Kandagatla 
2545523f11b5SSrinivas Kandagatla 	/* DMA initialization and SW reset */
2546523f11b5SSrinivas Kandagatla 	ret = stmmac_init_dma_engine(priv);
2547523f11b5SSrinivas Kandagatla 	if (ret < 0) {
254838ddc59dSLABBE Corentin 		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
254938ddc59dSLABBE Corentin 			   __func__);
2550523f11b5SSrinivas Kandagatla 		return ret;
2551523f11b5SSrinivas Kandagatla 	}
2552523f11b5SSrinivas Kandagatla 
2553523f11b5SSrinivas Kandagatla 	/* Copy the MAC addr into the HW  */
2554c10d4c82SJose Abreu 	stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
2555523f11b5SSrinivas Kandagatla 
255602e57b9dSGiuseppe CAVALLARO 	/* PS and related bits will be programmed according to the speed */
255702e57b9dSGiuseppe CAVALLARO 	if (priv->hw->pcs) {
255802e57b9dSGiuseppe CAVALLARO 		int speed = priv->plat->mac_port_sel_speed;
255902e57b9dSGiuseppe CAVALLARO 
256002e57b9dSGiuseppe CAVALLARO 		if ((speed == SPEED_10) || (speed == SPEED_100) ||
256102e57b9dSGiuseppe CAVALLARO 		    (speed == SPEED_1000)) {
256202e57b9dSGiuseppe CAVALLARO 			priv->hw->ps = speed;
256302e57b9dSGiuseppe CAVALLARO 		} else {
256402e57b9dSGiuseppe CAVALLARO 			dev_warn(priv->device, "invalid port speed\n");
256502e57b9dSGiuseppe CAVALLARO 			priv->hw->ps = 0;
256602e57b9dSGiuseppe CAVALLARO 		}
256702e57b9dSGiuseppe CAVALLARO 	}
256802e57b9dSGiuseppe CAVALLARO 
2569523f11b5SSrinivas Kandagatla 	/* Initialize the MAC Core */
2570c10d4c82SJose Abreu 	stmmac_core_init(priv, priv->hw, dev);
2571523f11b5SSrinivas Kandagatla 
2572d0a9c9f9SJoao Pinto 	/* Initialize MTL*/
2573d0a9c9f9SJoao Pinto 	stmmac_mtl_configuration(priv);
25749eb12474Sjpinto 
25758bf993a5SJose Abreu 	/* Initialize Safety Features */
25768bf993a5SJose Abreu 	stmmac_safety_feat_configuration(priv);
25778bf993a5SJose Abreu 
2578c10d4c82SJose Abreu 	ret = stmmac_rx_ipc(priv, priv->hw);
2579978aded4SGiuseppe CAVALLARO 	if (!ret) {
258038ddc59dSLABBE Corentin 		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
2581978aded4SGiuseppe CAVALLARO 		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
2582d2afb5bdSGiuseppe CAVALLARO 		priv->hw->rx_csum = 0;
2583978aded4SGiuseppe CAVALLARO 	}
2584978aded4SGiuseppe CAVALLARO 
2585523f11b5SSrinivas Kandagatla 	/* Enable the MAC Rx/Tx */
2586c10d4c82SJose Abreu 	stmmac_mac_set(priv, priv->ioaddr, true);
2587523f11b5SSrinivas Kandagatla 
2588b4f0a661SJoao Pinto 	/* Set the HW DMA mode and the COE */
2589b4f0a661SJoao Pinto 	stmmac_dma_operation_mode(priv);
2590b4f0a661SJoao Pinto 
2591523f11b5SSrinivas Kandagatla 	stmmac_mmc_setup(priv);
2592523f11b5SSrinivas Kandagatla 
2593fe131929SHuacai Chen 	if (init_ptp) {
25940ad2be79SThierry Reding 		ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
25950ad2be79SThierry Reding 		if (ret < 0)
25960ad2be79SThierry Reding 			netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
25970ad2be79SThierry Reding 
2598523f11b5SSrinivas Kandagatla 		ret = stmmac_init_ptp(priv);
2599722eef28SHeiner Kallweit 		if (ret == -EOPNOTSUPP)
2600722eef28SHeiner Kallweit 			netdev_warn(priv->dev, "PTP not supported by HW\n");
2601722eef28SHeiner Kallweit 		else if (ret)
2602722eef28SHeiner Kallweit 			netdev_warn(priv->dev, "PTP init failed\n");
2603fe131929SHuacai Chen 	}
2604523f11b5SSrinivas Kandagatla 
2605523f11b5SSrinivas Kandagatla 	priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
2606523f11b5SSrinivas Kandagatla 
2607a4e887faSJose Abreu 	if (priv->use_riwt) {
260801d1689dSJose Abreu 		ret = stmmac_rx_watchdog(priv, priv->ioaddr, MIN_DMA_RIWT, rx_cnt);
2609a4e887faSJose Abreu 		if (!ret)
261001d1689dSJose Abreu 			priv->rx_riwt = MIN_DMA_RIWT;
2611523f11b5SSrinivas Kandagatla 	}
2612523f11b5SSrinivas Kandagatla 
2613c10d4c82SJose Abreu 	if (priv->hw->pcs)
2614c9ad4c10SBen Dooks (Codethink) 		stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
2615523f11b5SSrinivas Kandagatla 
26164854ab99SJoao Pinto 	/* set TX and RX rings length */
26174854ab99SJoao Pinto 	stmmac_set_rings_length(priv);
26184854ab99SJoao Pinto 
2619f748be53SAlexandre TORGUE 	/* Enable TSO */
2620146617b8SJoao Pinto 	if (priv->tso) {
2621146617b8SJoao Pinto 		for (chan = 0; chan < tx_cnt; chan++)
2622a4e887faSJose Abreu 			stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
2623146617b8SJoao Pinto 	}
2624f748be53SAlexandre TORGUE 
262567afd6d1SJose Abreu 	/* Enable Split Header */
262667afd6d1SJose Abreu 	if (priv->sph && priv->hw->rx_csum) {
262767afd6d1SJose Abreu 		for (chan = 0; chan < rx_cnt; chan++)
262867afd6d1SJose Abreu 			stmmac_enable_sph(priv, priv->ioaddr, 1, chan);
262967afd6d1SJose Abreu 	}
263067afd6d1SJose Abreu 
263130d93227SJose Abreu 	/* VLAN Tag Insertion */
263230d93227SJose Abreu 	if (priv->dma_cap.vlins)
263330d93227SJose Abreu 		stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
263430d93227SJose Abreu 
26357d9e6c5aSJose Abreu 	/* Start the ball rolling... */
26367d9e6c5aSJose Abreu 	stmmac_start_all_dma(priv);
26377d9e6c5aSJose Abreu 
2638523f11b5SSrinivas Kandagatla 	return 0;
2639523f11b5SSrinivas Kandagatla }
2640523f11b5SSrinivas Kandagatla 
2641c66f6c37SThierry Reding static void stmmac_hw_teardown(struct net_device *dev)
2642c66f6c37SThierry Reding {
2643c66f6c37SThierry Reding 	struct stmmac_priv *priv = netdev_priv(dev);
2644c66f6c37SThierry Reding 
2645c66f6c37SThierry Reding 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
2646c66f6c37SThierry Reding }
2647c66f6c37SThierry Reding 
2648523f11b5SSrinivas Kandagatla /**
26497ac6653aSJeff Kirsher  *  stmmac_open - open entry point of the driver
26507ac6653aSJeff Kirsher  *  @dev : pointer to the device structure.
26517ac6653aSJeff Kirsher  *  Description:
26527ac6653aSJeff Kirsher  *  This function is the open entry point of the driver.
26537ac6653aSJeff Kirsher  *  Return value:
26547ac6653aSJeff Kirsher  *  0 on success and an appropriate (-)ve integer as defined in errno.h
26557ac6653aSJeff Kirsher  *  file on failure.
26567ac6653aSJeff Kirsher  */
26577ac6653aSJeff Kirsher static int stmmac_open(struct net_device *dev)
26587ac6653aSJeff Kirsher {
26597ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
26608fce3331SJose Abreu 	u32 chan;
26617ac6653aSJeff Kirsher 	int ret;
26627ac6653aSJeff Kirsher 
26633fe5cadbSGiuseppe CAVALLARO 	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
26643fe5cadbSGiuseppe CAVALLARO 	    priv->hw->pcs != STMMAC_PCS_TBI &&
26653fe5cadbSGiuseppe CAVALLARO 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
26667ac6653aSJeff Kirsher 		ret = stmmac_init_phy(dev);
2667e58bb43fSGiuseppe CAVALLARO 		if (ret) {
266838ddc59dSLABBE Corentin 			netdev_err(priv->dev,
266938ddc59dSLABBE Corentin 				   "%s: Cannot attach to PHY (error: %d)\n",
2670e58bb43fSGiuseppe CAVALLARO 				   __func__, ret);
267189df20d9SHans de Goede 			return ret;
26727ac6653aSJeff Kirsher 		}
2673e58bb43fSGiuseppe CAVALLARO 	}
26747ac6653aSJeff Kirsher 
2675523f11b5SSrinivas Kandagatla 	/* Extra statistics */
2676523f11b5SSrinivas Kandagatla 	memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
2677523f11b5SSrinivas Kandagatla 	priv->xstats.threshold = tc;
2678523f11b5SSrinivas Kandagatla 
26795bacd778SLABBE Corentin 	priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
268022ad3838SGiuseppe Cavallaro 	priv->rx_copybreak = STMMAC_RX_COPYBREAK;
268156329137SBartlomiej Zolnierkiewicz 
26825bacd778SLABBE Corentin 	ret = alloc_dma_desc_resources(priv);
26835bacd778SLABBE Corentin 	if (ret < 0) {
26845bacd778SLABBE Corentin 		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
26855bacd778SLABBE Corentin 			   __func__);
26865bacd778SLABBE Corentin 		goto dma_desc_error;
26875bacd778SLABBE Corentin 	}
26885bacd778SLABBE Corentin 
26895bacd778SLABBE Corentin 	ret = init_dma_desc_rings(dev, GFP_KERNEL);
26905bacd778SLABBE Corentin 	if (ret < 0) {
26915bacd778SLABBE Corentin 		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
26925bacd778SLABBE Corentin 			   __func__);
26935bacd778SLABBE Corentin 		goto init_error;
26945bacd778SLABBE Corentin 	}
26955bacd778SLABBE Corentin 
2696fe131929SHuacai Chen 	ret = stmmac_hw_setup(dev, true);
269756329137SBartlomiej Zolnierkiewicz 	if (ret < 0) {
269838ddc59dSLABBE Corentin 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
2699c9324d18SGiuseppe CAVALLARO 		goto init_error;
27007ac6653aSJeff Kirsher 	}
27017ac6653aSJeff Kirsher 
2702d429b66eSJose Abreu 	stmmac_init_coalesce(priv);
2703777da230SGiuseppe CAVALLARO 
270474371272SJose Abreu 	phylink_start(priv->phylink);
27057ac6653aSJeff Kirsher 
27067ac6653aSJeff Kirsher 	/* Request the IRQ lines */
27077ac6653aSJeff Kirsher 	ret = request_irq(dev->irq, stmmac_interrupt,
27087ac6653aSJeff Kirsher 			  IRQF_SHARED, dev->name, dev);
27097ac6653aSJeff Kirsher 	if (unlikely(ret < 0)) {
271038ddc59dSLABBE Corentin 		netdev_err(priv->dev,
271138ddc59dSLABBE Corentin 			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
27127ac6653aSJeff Kirsher 			   __func__, dev->irq, ret);
27136c1e5abeSThierry Reding 		goto irq_error;
27147ac6653aSJeff Kirsher 	}
27157ac6653aSJeff Kirsher 
27167a13f8f5SFrancesco Virlinzi 	/* Request the Wake IRQ in case of another line is used for WoL */
27177a13f8f5SFrancesco Virlinzi 	if (priv->wol_irq != dev->irq) {
27187a13f8f5SFrancesco Virlinzi 		ret = request_irq(priv->wol_irq, stmmac_interrupt,
27197a13f8f5SFrancesco Virlinzi 				  IRQF_SHARED, dev->name, dev);
27207a13f8f5SFrancesco Virlinzi 		if (unlikely(ret < 0)) {
272138ddc59dSLABBE Corentin 			netdev_err(priv->dev,
272238ddc59dSLABBE Corentin 				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
2723ceb69499SGiuseppe CAVALLARO 				   __func__, priv->wol_irq, ret);
2724c9324d18SGiuseppe CAVALLARO 			goto wolirq_error;
27257a13f8f5SFrancesco Virlinzi 		}
27267a13f8f5SFrancesco Virlinzi 	}
27277a13f8f5SFrancesco Virlinzi 
2728d765955dSGiuseppe CAVALLARO 	/* Request the IRQ lines */
2729d7ec8584SChen-Yu Tsai 	if (priv->lpi_irq > 0) {
2730d765955dSGiuseppe CAVALLARO 		ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
2731d765955dSGiuseppe CAVALLARO 				  dev->name, dev);
2732d765955dSGiuseppe CAVALLARO 		if (unlikely(ret < 0)) {
273338ddc59dSLABBE Corentin 			netdev_err(priv->dev,
273438ddc59dSLABBE Corentin 				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
2735d765955dSGiuseppe CAVALLARO 				   __func__, priv->lpi_irq, ret);
2736c9324d18SGiuseppe CAVALLARO 			goto lpiirq_error;
2737d765955dSGiuseppe CAVALLARO 		}
2738d765955dSGiuseppe CAVALLARO 	}
2739d765955dSGiuseppe CAVALLARO 
2740c22a3f48SJoao Pinto 	stmmac_enable_all_queues(priv);
2741c22a3f48SJoao Pinto 	stmmac_start_all_queues(priv);
27427ac6653aSJeff Kirsher 
27437ac6653aSJeff Kirsher 	return 0;
27447ac6653aSJeff Kirsher 
2745c9324d18SGiuseppe CAVALLARO lpiirq_error:
2746d765955dSGiuseppe CAVALLARO 	if (priv->wol_irq != dev->irq)
2747d765955dSGiuseppe CAVALLARO 		free_irq(priv->wol_irq, dev);
2748c9324d18SGiuseppe CAVALLARO wolirq_error:
27497a13f8f5SFrancesco Virlinzi 	free_irq(dev->irq, dev);
27506c1e5abeSThierry Reding irq_error:
275174371272SJose Abreu 	phylink_stop(priv->phylink);
27527a13f8f5SFrancesco Virlinzi 
27538fce3331SJose Abreu 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
27548fce3331SJose Abreu 		del_timer_sync(&priv->tx_queue[chan].txtimer);
27558fce3331SJose Abreu 
2756c66f6c37SThierry Reding 	stmmac_hw_teardown(dev);
2757c9324d18SGiuseppe CAVALLARO init_error:
2758c9324d18SGiuseppe CAVALLARO 	free_dma_desc_resources(priv);
27595bacd778SLABBE Corentin dma_desc_error:
276074371272SJose Abreu 	phylink_disconnect_phy(priv->phylink);
27617ac6653aSJeff Kirsher 	return ret;
27627ac6653aSJeff Kirsher }
27637ac6653aSJeff Kirsher 
27647ac6653aSJeff Kirsher /**
27657ac6653aSJeff Kirsher  *  stmmac_release - close entry point of the driver
27667ac6653aSJeff Kirsher  *  @dev : device pointer.
27677ac6653aSJeff Kirsher  *  Description:
27687ac6653aSJeff Kirsher  *  This is the stop entry point of the driver.
27697ac6653aSJeff Kirsher  */
27707ac6653aSJeff Kirsher static int stmmac_release(struct net_device *dev)
27717ac6653aSJeff Kirsher {
27727ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
27738fce3331SJose Abreu 	u32 chan;
27747ac6653aSJeff Kirsher 
2775d765955dSGiuseppe CAVALLARO 	if (priv->eee_enabled)
2776d765955dSGiuseppe CAVALLARO 		del_timer_sync(&priv->eee_ctrl_timer);
2777d765955dSGiuseppe CAVALLARO 
27787ac6653aSJeff Kirsher 	/* Stop and disconnect the PHY */
277974371272SJose Abreu 	phylink_stop(priv->phylink);
278074371272SJose Abreu 	phylink_disconnect_phy(priv->phylink);
27817ac6653aSJeff Kirsher 
2782c22a3f48SJoao Pinto 	stmmac_stop_all_queues(priv);
27837ac6653aSJeff Kirsher 
2784c22a3f48SJoao Pinto 	stmmac_disable_all_queues(priv);
27857ac6653aSJeff Kirsher 
27868fce3331SJose Abreu 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
27878fce3331SJose Abreu 		del_timer_sync(&priv->tx_queue[chan].txtimer);
27889125cdd1SGiuseppe CAVALLARO 
27897ac6653aSJeff Kirsher 	/* Free the IRQ lines */
27907ac6653aSJeff Kirsher 	free_irq(dev->irq, dev);
27917a13f8f5SFrancesco Virlinzi 	if (priv->wol_irq != dev->irq)
27927a13f8f5SFrancesco Virlinzi 		free_irq(priv->wol_irq, dev);
2793d7ec8584SChen-Yu Tsai 	if (priv->lpi_irq > 0)
2794d765955dSGiuseppe CAVALLARO 		free_irq(priv->lpi_irq, dev);
27957ac6653aSJeff Kirsher 
27967ac6653aSJeff Kirsher 	/* Stop TX/RX DMA and clear the descriptors */
2797ae4f0d46SJoao Pinto 	stmmac_stop_all_dma(priv);
27987ac6653aSJeff Kirsher 
27997ac6653aSJeff Kirsher 	/* Release and free the Rx/Tx resources */
28007ac6653aSJeff Kirsher 	free_dma_desc_resources(priv);
28017ac6653aSJeff Kirsher 
28027ac6653aSJeff Kirsher 	/* Disable the MAC Rx/Tx */
2803c10d4c82SJose Abreu 	stmmac_mac_set(priv, priv->ioaddr, false);
28047ac6653aSJeff Kirsher 
28057ac6653aSJeff Kirsher 	netif_carrier_off(dev);
28067ac6653aSJeff Kirsher 
280792ba6888SRayagond Kokatanur 	stmmac_release_ptp(priv);
280892ba6888SRayagond Kokatanur 
28097ac6653aSJeff Kirsher 	return 0;
28107ac6653aSJeff Kirsher }
28117ac6653aSJeff Kirsher 
281230d93227SJose Abreu static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
281330d93227SJose Abreu 			       struct stmmac_tx_queue *tx_q)
281430d93227SJose Abreu {
281530d93227SJose Abreu 	u16 tag = 0x0, inner_tag = 0x0;
281630d93227SJose Abreu 	u32 inner_type = 0x0;
281730d93227SJose Abreu 	struct dma_desc *p;
281830d93227SJose Abreu 
281930d93227SJose Abreu 	if (!priv->dma_cap.vlins)
282030d93227SJose Abreu 		return false;
282130d93227SJose Abreu 	if (!skb_vlan_tag_present(skb))
282230d93227SJose Abreu 		return false;
282330d93227SJose Abreu 	if (skb->vlan_proto == htons(ETH_P_8021AD)) {
282430d93227SJose Abreu 		inner_tag = skb_vlan_tag_get(skb);
282530d93227SJose Abreu 		inner_type = STMMAC_VLAN_INSERT;
282630d93227SJose Abreu 	}
282730d93227SJose Abreu 
282830d93227SJose Abreu 	tag = skb_vlan_tag_get(skb);
282930d93227SJose Abreu 
283030d93227SJose Abreu 	p = tx_q->dma_tx + tx_q->cur_tx;
283130d93227SJose Abreu 	if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
283230d93227SJose Abreu 		return false;
283330d93227SJose Abreu 
283430d93227SJose Abreu 	stmmac_set_tx_owner(priv, p);
283530d93227SJose Abreu 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
283630d93227SJose Abreu 	return true;
283730d93227SJose Abreu }
283830d93227SJose Abreu 
28397ac6653aSJeff Kirsher /**
2840f748be53SAlexandre TORGUE  *  stmmac_tso_allocator - close entry point of the driver
2841f748be53SAlexandre TORGUE  *  @priv: driver private structure
2842f748be53SAlexandre TORGUE  *  @des: buffer start address
2843f748be53SAlexandre TORGUE  *  @total_len: total length to fill in descriptors
2844f748be53SAlexandre TORGUE  *  @last_segmant: condition for the last descriptor
2845ce736788SJoao Pinto  *  @queue: TX queue index
2846f748be53SAlexandre TORGUE  *  Description:
2847f748be53SAlexandre TORGUE  *  This function fills descriptor and request new descriptors according to
2848f748be53SAlexandre TORGUE  *  buffer length to fill
2849f748be53SAlexandre TORGUE  */
2850a993db88SJose Abreu static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
2851ce736788SJoao Pinto 				 int total_len, bool last_segment, u32 queue)
2852f748be53SAlexandre TORGUE {
2853ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2854f748be53SAlexandre TORGUE 	struct dma_desc *desc;
28555bacd778SLABBE Corentin 	u32 buff_size;
2856ce736788SJoao Pinto 	int tmp_len;
2857f748be53SAlexandre TORGUE 
2858f748be53SAlexandre TORGUE 	tmp_len = total_len;
2859f748be53SAlexandre TORGUE 
2860f748be53SAlexandre TORGUE 	while (tmp_len > 0) {
2861a993db88SJose Abreu 		dma_addr_t curr_addr;
2862a993db88SJose Abreu 
2863ce736788SJoao Pinto 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2864b4c9784cSNiklas Cassel 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
2865ce736788SJoao Pinto 		desc = tx_q->dma_tx + tx_q->cur_tx;
2866f748be53SAlexandre TORGUE 
2867a993db88SJose Abreu 		curr_addr = des + (total_len - tmp_len);
2868a993db88SJose Abreu 		if (priv->dma_cap.addr64 <= 32)
2869a993db88SJose Abreu 			desc->des0 = cpu_to_le32(curr_addr);
2870a993db88SJose Abreu 		else
2871a993db88SJose Abreu 			stmmac_set_desc_addr(priv, desc, curr_addr);
2872a993db88SJose Abreu 
2873f748be53SAlexandre TORGUE 		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
2874f748be53SAlexandre TORGUE 			    TSO_MAX_BUFF_SIZE : tmp_len;
2875f748be53SAlexandre TORGUE 
287642de047dSJose Abreu 		stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
2877f748be53SAlexandre TORGUE 				0, 1,
2878426849e6SNiklas Cassel 				(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
2879f748be53SAlexandre TORGUE 				0, 0);
2880f748be53SAlexandre TORGUE 
2881f748be53SAlexandre TORGUE 		tmp_len -= TSO_MAX_BUFF_SIZE;
2882f748be53SAlexandre TORGUE 	}
2883f748be53SAlexandre TORGUE }
2884f748be53SAlexandre TORGUE 
2885f748be53SAlexandre TORGUE /**
2886f748be53SAlexandre TORGUE  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
2887f748be53SAlexandre TORGUE  *  @skb : the socket buffer
2888f748be53SAlexandre TORGUE  *  @dev : device pointer
2889f748be53SAlexandre TORGUE  *  Description: this is the transmit function that is called on TSO frames
2890f748be53SAlexandre TORGUE  *  (support available on GMAC4 and newer chips).
2891f748be53SAlexandre TORGUE  *  Diagram below show the ring programming in case of TSO frames:
2892f748be53SAlexandre TORGUE  *
2893f748be53SAlexandre TORGUE  *  First Descriptor
2894f748be53SAlexandre TORGUE  *   --------
2895f748be53SAlexandre TORGUE  *   | DES0 |---> buffer1 = L2/L3/L4 header
2896f748be53SAlexandre TORGUE  *   | DES1 |---> TCP Payload (can continue on next descr...)
2897f748be53SAlexandre TORGUE  *   | DES2 |---> buffer 1 and 2 len
2898f748be53SAlexandre TORGUE  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
2899f748be53SAlexandre TORGUE  *   --------
2900f748be53SAlexandre TORGUE  *	|
2901f748be53SAlexandre TORGUE  *     ...
2902f748be53SAlexandre TORGUE  *	|
2903f748be53SAlexandre TORGUE  *   --------
2904f748be53SAlexandre TORGUE  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
2905f748be53SAlexandre TORGUE  *   | DES1 | --|
2906f748be53SAlexandre TORGUE  *   | DES2 | --> buffer 1 and 2 len
2907f748be53SAlexandre TORGUE  *   | DES3 |
2908f748be53SAlexandre TORGUE  *   --------
2909f748be53SAlexandre TORGUE  *
2910f748be53SAlexandre TORGUE  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
2911f748be53SAlexandre TORGUE  */
2912f748be53SAlexandre TORGUE static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2913f748be53SAlexandre TORGUE {
2914ce736788SJoao Pinto 	struct dma_desc *desc, *first, *mss_desc = NULL;
2915f748be53SAlexandre TORGUE 	struct stmmac_priv *priv = netdev_priv(dev);
2916f748be53SAlexandre TORGUE 	int nfrags = skb_shinfo(skb)->nr_frags;
2917ce736788SJoao Pinto 	u32 queue = skb_get_queue_mapping(skb);
2918ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q;
291930d93227SJose Abreu 	unsigned int first_entry;
2920b7766206SJose Abreu 	u8 proto_hdr_len, hdr;
2921ce736788SJoao Pinto 	int tmp_pay_len = 0;
2922ce736788SJoao Pinto 	u32 pay_len, mss;
2923a993db88SJose Abreu 	dma_addr_t des;
292430d93227SJose Abreu 	bool has_vlan;
2925f748be53SAlexandre TORGUE 	int i;
2926f748be53SAlexandre TORGUE 
2927ce736788SJoao Pinto 	tx_q = &priv->tx_queue[queue];
2928ce736788SJoao Pinto 
2929f748be53SAlexandre TORGUE 	/* Compute header lengths */
2930b7766206SJose Abreu 	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
2931b7766206SJose Abreu 		proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
2932b7766206SJose Abreu 		hdr = sizeof(struct udphdr);
2933b7766206SJose Abreu 	} else {
2934f748be53SAlexandre TORGUE 		proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2935b7766206SJose Abreu 		hdr = tcp_hdrlen(skb);
2936b7766206SJose Abreu 	}
2937f748be53SAlexandre TORGUE 
2938f748be53SAlexandre TORGUE 	/* Desc availability based on threshold should be enough safe */
2939ce736788SJoao Pinto 	if (unlikely(stmmac_tx_avail(priv, queue) <
2940f748be53SAlexandre TORGUE 		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
2941c22a3f48SJoao Pinto 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
2942c22a3f48SJoao Pinto 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
2943c22a3f48SJoao Pinto 								queue));
2944f748be53SAlexandre TORGUE 			/* This is a hard error, log it. */
294538ddc59dSLABBE Corentin 			netdev_err(priv->dev,
294638ddc59dSLABBE Corentin 				   "%s: Tx Ring full when queue awake\n",
294738ddc59dSLABBE Corentin 				   __func__);
2948f748be53SAlexandre TORGUE 		}
2949f748be53SAlexandre TORGUE 		return NETDEV_TX_BUSY;
2950f748be53SAlexandre TORGUE 	}
2951f748be53SAlexandre TORGUE 
2952f748be53SAlexandre TORGUE 	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
2953f748be53SAlexandre TORGUE 
2954f748be53SAlexandre TORGUE 	mss = skb_shinfo(skb)->gso_size;
2955f748be53SAlexandre TORGUE 
2956f748be53SAlexandre TORGUE 	/* set new MSS value if needed */
29578d212a9eSNiklas Cassel 	if (mss != tx_q->mss) {
2958ce736788SJoao Pinto 		mss_desc = tx_q->dma_tx + tx_q->cur_tx;
295942de047dSJose Abreu 		stmmac_set_mss(priv, mss_desc, mss);
29608d212a9eSNiklas Cassel 		tx_q->mss = mss;
2961ce736788SJoao Pinto 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2962b4c9784cSNiklas Cassel 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
2963f748be53SAlexandre TORGUE 	}
2964f748be53SAlexandre TORGUE 
2965f748be53SAlexandre TORGUE 	if (netif_msg_tx_queued(priv)) {
2966b7766206SJose Abreu 		pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
2967b7766206SJose Abreu 			__func__, hdr, proto_hdr_len, pay_len, mss);
2968f748be53SAlexandre TORGUE 		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
2969f748be53SAlexandre TORGUE 			skb->data_len);
2970f748be53SAlexandre TORGUE 	}
2971f748be53SAlexandre TORGUE 
297230d93227SJose Abreu 	/* Check if VLAN can be inserted by HW */
297330d93227SJose Abreu 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
297430d93227SJose Abreu 
2975ce736788SJoao Pinto 	first_entry = tx_q->cur_tx;
2976b4c9784cSNiklas Cassel 	WARN_ON(tx_q->tx_skbuff[first_entry]);
2977f748be53SAlexandre TORGUE 
2978ce736788SJoao Pinto 	desc = tx_q->dma_tx + first_entry;
2979f748be53SAlexandre TORGUE 	first = desc;
2980f748be53SAlexandre TORGUE 
298130d93227SJose Abreu 	if (has_vlan)
298230d93227SJose Abreu 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
298330d93227SJose Abreu 
2984f748be53SAlexandre TORGUE 	/* first descriptor: fill Headers on Buf1 */
2985f748be53SAlexandre TORGUE 	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
2986f748be53SAlexandre TORGUE 			     DMA_TO_DEVICE);
2987f748be53SAlexandre TORGUE 	if (dma_mapping_error(priv->device, des))
2988f748be53SAlexandre TORGUE 		goto dma_map_err;
2989f748be53SAlexandre TORGUE 
2990ce736788SJoao Pinto 	tx_q->tx_skbuff_dma[first_entry].buf = des;
2991ce736788SJoao Pinto 	tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
2992f748be53SAlexandre TORGUE 
2993a993db88SJose Abreu 	if (priv->dma_cap.addr64 <= 32) {
2994f8be0d78SMichael Weiser 		first->des0 = cpu_to_le32(des);
2995f748be53SAlexandre TORGUE 
2996f748be53SAlexandre TORGUE 		/* Fill start of payload in buff2 of first descriptor */
2997f748be53SAlexandre TORGUE 		if (pay_len)
2998f8be0d78SMichael Weiser 			first->des1 = cpu_to_le32(des + proto_hdr_len);
2999f748be53SAlexandre TORGUE 
3000f748be53SAlexandre TORGUE 		/* If needed take extra descriptors to fill the remaining payload */
3001f748be53SAlexandre TORGUE 		tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
3002a993db88SJose Abreu 	} else {
3003a993db88SJose Abreu 		stmmac_set_desc_addr(priv, first, des);
3004a993db88SJose Abreu 		tmp_pay_len = pay_len;
300534c15202Syuqi jin 		des += proto_hdr_len;
3006b2f07199SJose Abreu 		pay_len = 0;
3007a993db88SJose Abreu 	}
3008f748be53SAlexandre TORGUE 
3009ce736788SJoao Pinto 	stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
3010f748be53SAlexandre TORGUE 
3011f748be53SAlexandre TORGUE 	/* Prepare fragments */
3012f748be53SAlexandre TORGUE 	for (i = 0; i < nfrags; i++) {
3013f748be53SAlexandre TORGUE 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3014f748be53SAlexandre TORGUE 
3015f748be53SAlexandre TORGUE 		des = skb_frag_dma_map(priv->device, frag, 0,
3016f748be53SAlexandre TORGUE 				       skb_frag_size(frag),
3017f748be53SAlexandre TORGUE 				       DMA_TO_DEVICE);
3018937071c1SThierry Reding 		if (dma_mapping_error(priv->device, des))
3019937071c1SThierry Reding 			goto dma_map_err;
3020f748be53SAlexandre TORGUE 
3021f748be53SAlexandre TORGUE 		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
3022ce736788SJoao Pinto 				     (i == nfrags - 1), queue);
3023f748be53SAlexandre TORGUE 
3024ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
3025ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
3026ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
3027f748be53SAlexandre TORGUE 	}
3028f748be53SAlexandre TORGUE 
3029ce736788SJoao Pinto 	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
3030f748be53SAlexandre TORGUE 
303105cf0d1bSNiklas Cassel 	/* Only the last descriptor gets to point to the skb. */
303205cf0d1bSNiklas Cassel 	tx_q->tx_skbuff[tx_q->cur_tx] = skb;
303305cf0d1bSNiklas Cassel 
30347df4a3a7SJose Abreu 	/* Manage tx mitigation */
30357df4a3a7SJose Abreu 	tx_q->tx_count_frames += nfrags + 1;
30367df4a3a7SJose Abreu 	if (likely(priv->tx_coal_frames > tx_q->tx_count_frames) &&
30377df4a3a7SJose Abreu 	    !((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
30387df4a3a7SJose Abreu 	      priv->hwts_tx_en)) {
30397df4a3a7SJose Abreu 		stmmac_tx_timer_arm(priv, queue);
30407df4a3a7SJose Abreu 	} else {
30417df4a3a7SJose Abreu 		desc = &tx_q->dma_tx[tx_q->cur_tx];
30427df4a3a7SJose Abreu 		tx_q->tx_count_frames = 0;
30437df4a3a7SJose Abreu 		stmmac_set_tx_ic(priv, desc);
30447df4a3a7SJose Abreu 		priv->xstats.tx_set_ic_bit++;
30457df4a3a7SJose Abreu 	}
30467df4a3a7SJose Abreu 
304705cf0d1bSNiklas Cassel 	/* We've used all descriptors we need for this skb, however,
304805cf0d1bSNiklas Cassel 	 * advance cur_tx so that it references a fresh descriptor.
304905cf0d1bSNiklas Cassel 	 * ndo_start_xmit will fill this descriptor the next time it's
305005cf0d1bSNiklas Cassel 	 * called and stmmac_tx_clean may clean up to this descriptor.
305105cf0d1bSNiklas Cassel 	 */
3052ce736788SJoao Pinto 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
3053f748be53SAlexandre TORGUE 
3054ce736788SJoao Pinto 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3055b3e51069SLABBE Corentin 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
305638ddc59dSLABBE Corentin 			  __func__);
3057c22a3f48SJoao Pinto 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
3058f748be53SAlexandre TORGUE 	}
3059f748be53SAlexandre TORGUE 
3060f748be53SAlexandre TORGUE 	dev->stats.tx_bytes += skb->len;
3061f748be53SAlexandre TORGUE 	priv->xstats.tx_tso_frames++;
3062f748be53SAlexandre TORGUE 	priv->xstats.tx_tso_nfrags += nfrags;
3063f748be53SAlexandre TORGUE 
30648000ddc0SJose Abreu 	if (priv->sarc_type)
30658000ddc0SJose Abreu 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
30668000ddc0SJose Abreu 
3067f748be53SAlexandre TORGUE 	skb_tx_timestamp(skb);
3068f748be53SAlexandre TORGUE 
3069f748be53SAlexandre TORGUE 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3070f748be53SAlexandre TORGUE 		     priv->hwts_tx_en)) {
3071f748be53SAlexandre TORGUE 		/* declare that device is doing timestamping */
3072f748be53SAlexandre TORGUE 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
307342de047dSJose Abreu 		stmmac_enable_tx_timestamp(priv, first);
3074f748be53SAlexandre TORGUE 	}
3075f748be53SAlexandre TORGUE 
3076f748be53SAlexandre TORGUE 	/* Complete the first descriptor before granting the DMA */
307742de047dSJose Abreu 	stmmac_prepare_tso_tx_desc(priv, first, 1,
3078f748be53SAlexandre TORGUE 			proto_hdr_len,
3079f748be53SAlexandre TORGUE 			pay_len,
3080ce736788SJoao Pinto 			1, tx_q->tx_skbuff_dma[first_entry].last_segment,
3081b7766206SJose Abreu 			hdr / 4, (skb->len - proto_hdr_len));
3082f748be53SAlexandre TORGUE 
3083f748be53SAlexandre TORGUE 	/* If context desc is used to change MSS */
308415d2ee42SNiklas Cassel 	if (mss_desc) {
308515d2ee42SNiklas Cassel 		/* Make sure that first descriptor has been completely
308615d2ee42SNiklas Cassel 		 * written, including its own bit. This is because MSS is
308715d2ee42SNiklas Cassel 		 * actually before first descriptor, so we need to make
308815d2ee42SNiklas Cassel 		 * sure that MSS's own bit is the last thing written.
308915d2ee42SNiklas Cassel 		 */
309015d2ee42SNiklas Cassel 		dma_wmb();
309142de047dSJose Abreu 		stmmac_set_tx_owner(priv, mss_desc);
309215d2ee42SNiklas Cassel 	}
3093f748be53SAlexandre TORGUE 
3094f748be53SAlexandre TORGUE 	/* The own bit must be the latest setting done when prepare the
3095f748be53SAlexandre TORGUE 	 * descriptor and then barrier is needed to make sure that
3096f748be53SAlexandre TORGUE 	 * all is coherent before granting the DMA engine.
3097f748be53SAlexandre TORGUE 	 */
309895eb930aSNiklas Cassel 	wmb();
3099f748be53SAlexandre TORGUE 
3100f748be53SAlexandre TORGUE 	if (netif_msg_pktdata(priv)) {
3101f748be53SAlexandre TORGUE 		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
3102ce736788SJoao Pinto 			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
3103ce736788SJoao Pinto 			tx_q->cur_tx, first, nfrags);
3104f748be53SAlexandre TORGUE 
310542de047dSJose Abreu 		stmmac_display_ring(priv, (void *)tx_q->dma_tx, DMA_TX_SIZE, 0);
3106f748be53SAlexandre TORGUE 
3107f748be53SAlexandre TORGUE 		pr_info(">>> frame to be transmitted: ");
3108f748be53SAlexandre TORGUE 		print_pkt(skb->data, skb_headlen(skb));
3109f748be53SAlexandre TORGUE 	}
3110f748be53SAlexandre TORGUE 
3111c22a3f48SJoao Pinto 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3112f748be53SAlexandre TORGUE 
31130431100bSJose Abreu 	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
3114a4e887faSJose Abreu 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
3115f748be53SAlexandre TORGUE 
3116f748be53SAlexandre TORGUE 	return NETDEV_TX_OK;
3117f748be53SAlexandre TORGUE 
3118f748be53SAlexandre TORGUE dma_map_err:
3119f748be53SAlexandre TORGUE 	dev_err(priv->device, "Tx dma map failed\n");
3120f748be53SAlexandre TORGUE 	dev_kfree_skb(skb);
3121f748be53SAlexandre TORGUE 	priv->dev->stats.tx_dropped++;
3122f748be53SAlexandre TORGUE 	return NETDEV_TX_OK;
3123f748be53SAlexandre TORGUE }
3124f748be53SAlexandre TORGUE 
3125f748be53SAlexandre TORGUE /**
3126732fdf0eSGiuseppe CAVALLARO  *  stmmac_xmit - Tx entry point of the driver
31277ac6653aSJeff Kirsher  *  @skb : the socket buffer
31287ac6653aSJeff Kirsher  *  @dev : device pointer
312932ceabcaSGiuseppe CAVALLARO  *  Description : this is the tx entry point of the driver.
313032ceabcaSGiuseppe CAVALLARO  *  It programs the chain or the ring and supports oversized frames
313132ceabcaSGiuseppe CAVALLARO  *  and SG feature.
31327ac6653aSJeff Kirsher  */
31337ac6653aSJeff Kirsher static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
31347ac6653aSJeff Kirsher {
31357ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
31360e80bdc9SGiuseppe Cavallaro 	unsigned int nopaged_len = skb_headlen(skb);
31374a7d666aSGiuseppe CAVALLARO 	int i, csum_insertion = 0, is_jumbo = 0;
3138ce736788SJoao Pinto 	u32 queue = skb_get_queue_mapping(skb);
31397ac6653aSJeff Kirsher 	int nfrags = skb_shinfo(skb)->nr_frags;
3140b7766206SJose Abreu 	int gso = skb_shinfo(skb)->gso_type;
31417ac6653aSJeff Kirsher 	struct dma_desc *desc, *first;
3142ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q;
3143a993db88SJose Abreu 	unsigned int first_entry;
31440e80bdc9SGiuseppe Cavallaro 	unsigned int enh_desc;
3145a993db88SJose Abreu 	dma_addr_t des;
314630d93227SJose Abreu 	bool has_vlan;
3147a993db88SJose Abreu 	int entry;
3148f748be53SAlexandre TORGUE 
3149ce736788SJoao Pinto 	tx_q = &priv->tx_queue[queue];
3150ce736788SJoao Pinto 
3151e2cd682dSJose Abreu 	if (priv->tx_path_in_lpi_mode)
3152e2cd682dSJose Abreu 		stmmac_disable_eee_mode(priv);
3153e2cd682dSJose Abreu 
3154f748be53SAlexandre TORGUE 	/* Manage oversized TCP frames for GMAC4 device */
3155f748be53SAlexandre TORGUE 	if (skb_is_gso(skb) && priv->tso) {
3156b7766206SJose Abreu 		if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
3157b7766206SJose Abreu 			return stmmac_tso_xmit(skb, dev);
3158b7766206SJose Abreu 		if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
3159f748be53SAlexandre TORGUE 			return stmmac_tso_xmit(skb, dev);
3160f748be53SAlexandre TORGUE 	}
31617ac6653aSJeff Kirsher 
3162ce736788SJoao Pinto 	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
3163c22a3f48SJoao Pinto 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
3164c22a3f48SJoao Pinto 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
3165c22a3f48SJoao Pinto 								queue));
31667ac6653aSJeff Kirsher 			/* This is a hard error, log it. */
316738ddc59dSLABBE Corentin 			netdev_err(priv->dev,
316838ddc59dSLABBE Corentin 				   "%s: Tx Ring full when queue awake\n",
316938ddc59dSLABBE Corentin 				   __func__);
31707ac6653aSJeff Kirsher 		}
31717ac6653aSJeff Kirsher 		return NETDEV_TX_BUSY;
31727ac6653aSJeff Kirsher 	}
31737ac6653aSJeff Kirsher 
317430d93227SJose Abreu 	/* Check if VLAN can be inserted by HW */
317530d93227SJose Abreu 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
317630d93227SJose Abreu 
3177ce736788SJoao Pinto 	entry = tx_q->cur_tx;
31780e80bdc9SGiuseppe Cavallaro 	first_entry = entry;
3179b4c9784cSNiklas Cassel 	WARN_ON(tx_q->tx_skbuff[first_entry]);
31807ac6653aSJeff Kirsher 
31817ac6653aSJeff Kirsher 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
31827ac6653aSJeff Kirsher 
31830e80bdc9SGiuseppe Cavallaro 	if (likely(priv->extend_desc))
3184ce736788SJoao Pinto 		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3185c24602efSGiuseppe CAVALLARO 	else
3186ce736788SJoao Pinto 		desc = tx_q->dma_tx + entry;
3187c24602efSGiuseppe CAVALLARO 
31887ac6653aSJeff Kirsher 	first = desc;
31897ac6653aSJeff Kirsher 
319030d93227SJose Abreu 	if (has_vlan)
319130d93227SJose Abreu 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
319230d93227SJose Abreu 
31930e80bdc9SGiuseppe Cavallaro 	enh_desc = priv->plat->enh_desc;
31944a7d666aSGiuseppe CAVALLARO 	/* To program the descriptors according to the size of the frame */
319529896a67SGiuseppe CAVALLARO 	if (enh_desc)
31962c520b1cSJose Abreu 		is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
319729896a67SGiuseppe CAVALLARO 
319863a550fcSJose Abreu 	if (unlikely(is_jumbo)) {
31992c520b1cSJose Abreu 		entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
320063a550fcSJose Abreu 		if (unlikely(entry < 0) && (entry != -EINVAL))
3201362b37beSGiuseppe CAVALLARO 			goto dma_map_err;
320229896a67SGiuseppe CAVALLARO 	}
32037ac6653aSJeff Kirsher 
32047ac6653aSJeff Kirsher 	for (i = 0; i < nfrags; i++) {
32059e903e08SEric Dumazet 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
32069e903e08SEric Dumazet 		int len = skb_frag_size(frag);
3207be434d50SGiuseppe Cavallaro 		bool last_segment = (i == (nfrags - 1));
32087ac6653aSJeff Kirsher 
3209e3ad57c9SGiuseppe Cavallaro 		entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3210b4c9784cSNiklas Cassel 		WARN_ON(tx_q->tx_skbuff[entry]);
3211e3ad57c9SGiuseppe Cavallaro 
32120e80bdc9SGiuseppe Cavallaro 		if (likely(priv->extend_desc))
3213ce736788SJoao Pinto 			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3214c24602efSGiuseppe CAVALLARO 		else
3215ce736788SJoao Pinto 			desc = tx_q->dma_tx + entry;
32167ac6653aSJeff Kirsher 
3217f748be53SAlexandre TORGUE 		des = skb_frag_dma_map(priv->device, frag, 0, len,
3218f722380dSIan Campbell 				       DMA_TO_DEVICE);
3219f748be53SAlexandre TORGUE 		if (dma_mapping_error(priv->device, des))
3220362b37beSGiuseppe CAVALLARO 			goto dma_map_err; /* should reuse desc w/o issues */
3221362b37beSGiuseppe CAVALLARO 
3222ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].buf = des;
32236844171dSJose Abreu 
32246844171dSJose Abreu 		stmmac_set_desc_addr(priv, desc, des);
3225f748be53SAlexandre TORGUE 
3226ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].map_as_page = true;
3227ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].len = len;
3228ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
32290e80bdc9SGiuseppe Cavallaro 
32300e80bdc9SGiuseppe Cavallaro 		/* Prepare the descriptor and set the own bit too */
323142de047dSJose Abreu 		stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
323242de047dSJose Abreu 				priv->mode, 1, last_segment, skb->len);
32337ac6653aSJeff Kirsher 	}
32347ac6653aSJeff Kirsher 
323505cf0d1bSNiklas Cassel 	/* Only the last descriptor gets to point to the skb. */
323605cf0d1bSNiklas Cassel 	tx_q->tx_skbuff[entry] = skb;
3237e3ad57c9SGiuseppe Cavallaro 
32387df4a3a7SJose Abreu 	/* According to the coalesce parameter the IC bit for the latest
32397df4a3a7SJose Abreu 	 * segment is reset and the timer re-started to clean the tx status.
32407df4a3a7SJose Abreu 	 * This approach takes care about the fragments: desc is the first
32417df4a3a7SJose Abreu 	 * element in case of no SG.
32427df4a3a7SJose Abreu 	 */
32437df4a3a7SJose Abreu 	tx_q->tx_count_frames += nfrags + 1;
32447df4a3a7SJose Abreu 	if (likely(priv->tx_coal_frames > tx_q->tx_count_frames) &&
32457df4a3a7SJose Abreu 	    !((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
32467df4a3a7SJose Abreu 	      priv->hwts_tx_en)) {
32477df4a3a7SJose Abreu 		stmmac_tx_timer_arm(priv, queue);
32487df4a3a7SJose Abreu 	} else {
32497df4a3a7SJose Abreu 		if (likely(priv->extend_desc))
32507df4a3a7SJose Abreu 			desc = &tx_q->dma_etx[entry].basic;
32517df4a3a7SJose Abreu 		else
32527df4a3a7SJose Abreu 			desc = &tx_q->dma_tx[entry];
32537df4a3a7SJose Abreu 
32547df4a3a7SJose Abreu 		tx_q->tx_count_frames = 0;
32557df4a3a7SJose Abreu 		stmmac_set_tx_ic(priv, desc);
32567df4a3a7SJose Abreu 		priv->xstats.tx_set_ic_bit++;
32577df4a3a7SJose Abreu 	}
32587df4a3a7SJose Abreu 
325905cf0d1bSNiklas Cassel 	/* We've used all descriptors we need for this skb, however,
326005cf0d1bSNiklas Cassel 	 * advance cur_tx so that it references a fresh descriptor.
326105cf0d1bSNiklas Cassel 	 * ndo_start_xmit will fill this descriptor the next time it's
326205cf0d1bSNiklas Cassel 	 * called and stmmac_tx_clean may clean up to this descriptor.
326305cf0d1bSNiklas Cassel 	 */
326405cf0d1bSNiklas Cassel 	entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3265ce736788SJoao Pinto 	tx_q->cur_tx = entry;
32667ac6653aSJeff Kirsher 
32677ac6653aSJeff Kirsher 	if (netif_msg_pktdata(priv)) {
3268d0225e7dSAlexandre TORGUE 		void *tx_head;
3269d0225e7dSAlexandre TORGUE 
327038ddc59dSLABBE Corentin 		netdev_dbg(priv->dev,
327138ddc59dSLABBE Corentin 			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
3272ce736788SJoao Pinto 			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
32730e80bdc9SGiuseppe Cavallaro 			   entry, first, nfrags);
327483d7af64SGiuseppe CAVALLARO 
3275c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
3276ce736788SJoao Pinto 			tx_head = (void *)tx_q->dma_etx;
3277c24602efSGiuseppe CAVALLARO 		else
3278ce736788SJoao Pinto 			tx_head = (void *)tx_q->dma_tx;
3279d0225e7dSAlexandre TORGUE 
328042de047dSJose Abreu 		stmmac_display_ring(priv, tx_head, DMA_TX_SIZE, false);
3281c24602efSGiuseppe CAVALLARO 
328238ddc59dSLABBE Corentin 		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
32837ac6653aSJeff Kirsher 		print_pkt(skb->data, skb->len);
32847ac6653aSJeff Kirsher 	}
32850e80bdc9SGiuseppe Cavallaro 
3286ce736788SJoao Pinto 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3287b3e51069SLABBE Corentin 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
3288b3e51069SLABBE Corentin 			  __func__);
3289c22a3f48SJoao Pinto 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
32907ac6653aSJeff Kirsher 	}
32917ac6653aSJeff Kirsher 
32927ac6653aSJeff Kirsher 	dev->stats.tx_bytes += skb->len;
32937ac6653aSJeff Kirsher 
32948000ddc0SJose Abreu 	if (priv->sarc_type)
32958000ddc0SJose Abreu 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
32968000ddc0SJose Abreu 
32970e80bdc9SGiuseppe Cavallaro 	skb_tx_timestamp(skb);
32980e80bdc9SGiuseppe Cavallaro 
32990e80bdc9SGiuseppe Cavallaro 	/* Ready to fill the first descriptor and set the OWN bit w/o any
33000e80bdc9SGiuseppe Cavallaro 	 * problems because all the descriptors are actually ready to be
33010e80bdc9SGiuseppe Cavallaro 	 * passed to the DMA engine.
33020e80bdc9SGiuseppe Cavallaro 	 */
33030e80bdc9SGiuseppe Cavallaro 	if (likely(!is_jumbo)) {
33040e80bdc9SGiuseppe Cavallaro 		bool last_segment = (nfrags == 0);
33050e80bdc9SGiuseppe Cavallaro 
3306f748be53SAlexandre TORGUE 		des = dma_map_single(priv->device, skb->data,
33070e80bdc9SGiuseppe Cavallaro 				     nopaged_len, DMA_TO_DEVICE);
3308f748be53SAlexandre TORGUE 		if (dma_mapping_error(priv->device, des))
33090e80bdc9SGiuseppe Cavallaro 			goto dma_map_err;
33100e80bdc9SGiuseppe Cavallaro 
3311ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[first_entry].buf = des;
33126844171dSJose Abreu 
33136844171dSJose Abreu 		stmmac_set_desc_addr(priv, first, des);
3314f748be53SAlexandre TORGUE 
3315ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
3316ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
33170e80bdc9SGiuseppe Cavallaro 
3318891434b1SRayagond Kokatanur 		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3319891434b1SRayagond Kokatanur 			     priv->hwts_tx_en)) {
3320891434b1SRayagond Kokatanur 			/* declare that device is doing timestamping */
3321891434b1SRayagond Kokatanur 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
332242de047dSJose Abreu 			stmmac_enable_tx_timestamp(priv, first);
3323891434b1SRayagond Kokatanur 		}
3324891434b1SRayagond Kokatanur 
33250e80bdc9SGiuseppe Cavallaro 		/* Prepare the first descriptor setting the OWN bit too */
332642de047dSJose Abreu 		stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
332742de047dSJose Abreu 				csum_insertion, priv->mode, 1, last_segment,
332842de047dSJose Abreu 				skb->len);
332980acbed9SAaro Koskinen 	} else {
333080acbed9SAaro Koskinen 		stmmac_set_tx_owner(priv, first);
333180acbed9SAaro Koskinen 	}
33320e80bdc9SGiuseppe Cavallaro 
33330e80bdc9SGiuseppe Cavallaro 	/* The own bit must be the latest setting done when prepare the
33340e80bdc9SGiuseppe Cavallaro 	 * descriptor and then barrier is needed to make sure that
33350e80bdc9SGiuseppe Cavallaro 	 * all is coherent before granting the DMA engine.
33360e80bdc9SGiuseppe Cavallaro 	 */
333795eb930aSNiklas Cassel 	wmb();
33387ac6653aSJeff Kirsher 
3339c22a3f48SJoao Pinto 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3340f748be53SAlexandre TORGUE 
3341a4e887faSJose Abreu 	stmmac_enable_dma_transmission(priv, priv->ioaddr);
33428fce3331SJose Abreu 
33430431100bSJose Abreu 	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
3344f1565c60SJose Abreu 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
33457ac6653aSJeff Kirsher 
3346362b37beSGiuseppe CAVALLARO 	return NETDEV_TX_OK;
3347a9097a96SGiuseppe CAVALLARO 
3348362b37beSGiuseppe CAVALLARO dma_map_err:
334938ddc59dSLABBE Corentin 	netdev_err(priv->dev, "Tx DMA map failed\n");
3350362b37beSGiuseppe CAVALLARO 	dev_kfree_skb(skb);
3351362b37beSGiuseppe CAVALLARO 	priv->dev->stats.tx_dropped++;
33527ac6653aSJeff Kirsher 	return NETDEV_TX_OK;
33537ac6653aSJeff Kirsher }
33547ac6653aSJeff Kirsher 
3355b9381985SVince Bridgers static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
3356b9381985SVince Bridgers {
3357ab188e8fSElad Nachman 	struct vlan_ethhdr *veth;
3358ab188e8fSElad Nachman 	__be16 vlan_proto;
3359b9381985SVince Bridgers 	u16 vlanid;
3360b9381985SVince Bridgers 
3361ab188e8fSElad Nachman 	veth = (struct vlan_ethhdr *)skb->data;
3362ab188e8fSElad Nachman 	vlan_proto = veth->h_vlan_proto;
3363ab188e8fSElad Nachman 
3364ab188e8fSElad Nachman 	if ((vlan_proto == htons(ETH_P_8021Q) &&
3365ab188e8fSElad Nachman 	     dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
3366ab188e8fSElad Nachman 	    (vlan_proto == htons(ETH_P_8021AD) &&
3367ab188e8fSElad Nachman 	     dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
3368b9381985SVince Bridgers 		/* pop the vlan tag */
3369ab188e8fSElad Nachman 		vlanid = ntohs(veth->h_vlan_TCI);
3370ab188e8fSElad Nachman 		memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
3371b9381985SVince Bridgers 		skb_pull(skb, VLAN_HLEN);
3372ab188e8fSElad Nachman 		__vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
3373b9381985SVince Bridgers 	}
3374b9381985SVince Bridgers }
3375b9381985SVince Bridgers 
3376b9381985SVince Bridgers 
337754139cf3SJoao Pinto static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q)
3378120e87f9SGiuseppe Cavallaro {
337954139cf3SJoao Pinto 	if (rx_q->rx_zeroc_thresh < STMMAC_RX_THRESH)
3380120e87f9SGiuseppe Cavallaro 		return 0;
3381120e87f9SGiuseppe Cavallaro 
3382120e87f9SGiuseppe Cavallaro 	return 1;
3383120e87f9SGiuseppe Cavallaro }
3384120e87f9SGiuseppe Cavallaro 
338532ceabcaSGiuseppe CAVALLARO /**
3386732fdf0eSGiuseppe CAVALLARO  * stmmac_rx_refill - refill used skb preallocated buffers
338732ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
338854139cf3SJoao Pinto  * @queue: RX queue index
338932ceabcaSGiuseppe CAVALLARO  * Description : this is to reallocate the skb for the reception process
339032ceabcaSGiuseppe CAVALLARO  * that is based on zero-copy.
339132ceabcaSGiuseppe CAVALLARO  */
339254139cf3SJoao Pinto static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
33937ac6653aSJeff Kirsher {
339454139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
33953caa61c2SJose Abreu 	int len, dirty = stmmac_rx_dirty(priv, queue);
339654139cf3SJoao Pinto 	unsigned int entry = rx_q->dirty_rx;
339754139cf3SJoao Pinto 
33983caa61c2SJose Abreu 	len = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
33993caa61c2SJose Abreu 
3400e3ad57c9SGiuseppe Cavallaro 	while (dirty-- > 0) {
34012af6106aSJose Abreu 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
3402c24602efSGiuseppe CAVALLARO 		struct dma_desc *p;
3403d429b66eSJose Abreu 		bool use_rx_wd;
3404c24602efSGiuseppe CAVALLARO 
3405c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
340654139cf3SJoao Pinto 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
3407c24602efSGiuseppe CAVALLARO 		else
340854139cf3SJoao Pinto 			p = rx_q->dma_rx + entry;
3409c24602efSGiuseppe CAVALLARO 
34102af6106aSJose Abreu 		if (!buf->page) {
34112af6106aSJose Abreu 			buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
34122af6106aSJose Abreu 			if (!buf->page)
34137ac6653aSJeff Kirsher 				break;
3414120e87f9SGiuseppe Cavallaro 		}
34157ac6653aSJeff Kirsher 
341667afd6d1SJose Abreu 		if (priv->sph && !buf->sec_page) {
341767afd6d1SJose Abreu 			buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
341867afd6d1SJose Abreu 			if (!buf->sec_page)
341967afd6d1SJose Abreu 				break;
342067afd6d1SJose Abreu 
342167afd6d1SJose Abreu 			buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
342267afd6d1SJose Abreu 
342367afd6d1SJose Abreu 			dma_sync_single_for_device(priv->device, buf->sec_addr,
342467afd6d1SJose Abreu 						   len, DMA_FROM_DEVICE);
342567afd6d1SJose Abreu 		}
342667afd6d1SJose Abreu 
34272af6106aSJose Abreu 		buf->addr = page_pool_get_dma_addr(buf->page);
34283caa61c2SJose Abreu 
34293caa61c2SJose Abreu 		/* Sync whole allocation to device. This will invalidate old
34303caa61c2SJose Abreu 		 * data.
34313caa61c2SJose Abreu 		 */
34323caa61c2SJose Abreu 		dma_sync_single_for_device(priv->device, buf->addr, len,
34333caa61c2SJose Abreu 					   DMA_FROM_DEVICE);
34343caa61c2SJose Abreu 
34352af6106aSJose Abreu 		stmmac_set_desc_addr(priv, p, buf->addr);
343667afd6d1SJose Abreu 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr);
34372c520b1cSJose Abreu 		stmmac_refill_desc3(priv, rx_q, p);
3438286a8372SGiuseppe CAVALLARO 
3439d429b66eSJose Abreu 		rx_q->rx_count_frames++;
34406fa9d691SJose Abreu 		rx_q->rx_count_frames += priv->rx_coal_frames;
34416fa9d691SJose Abreu 		if (rx_q->rx_count_frames > priv->rx_coal_frames)
34426fa9d691SJose Abreu 			rx_q->rx_count_frames = 0;
344309146abeSJose Abreu 
344409146abeSJose Abreu 		use_rx_wd = !priv->rx_coal_frames;
344509146abeSJose Abreu 		use_rx_wd |= rx_q->rx_count_frames > 0;
344609146abeSJose Abreu 		if (!priv->use_riwt)
344709146abeSJose Abreu 			use_rx_wd = false;
3448d429b66eSJose Abreu 
3449ad688cdbSPavel Machek 		dma_wmb();
34502af6106aSJose Abreu 		stmmac_set_rx_owner(priv, p, use_rx_wd);
3451e3ad57c9SGiuseppe Cavallaro 
3452e3ad57c9SGiuseppe Cavallaro 		entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
34537ac6653aSJeff Kirsher 	}
345454139cf3SJoao Pinto 	rx_q->dirty_rx = entry;
3455858a31ffSJose Abreu 	rx_q->rx_tail_addr = rx_q->dma_rx_phy +
3456858a31ffSJose Abreu 			    (rx_q->dirty_rx * sizeof(struct dma_desc));
34574523a561SBiao Huang 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
34587ac6653aSJeff Kirsher }
34597ac6653aSJeff Kirsher 
346088ebe2cfSJose Abreu static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
346188ebe2cfSJose Abreu 				       struct dma_desc *p,
346288ebe2cfSJose Abreu 				       int status, unsigned int len)
346388ebe2cfSJose Abreu {
346488ebe2cfSJose Abreu 	int ret, coe = priv->hw->rx_csum;
346588ebe2cfSJose Abreu 	unsigned int plen = 0, hlen = 0;
346688ebe2cfSJose Abreu 
346788ebe2cfSJose Abreu 	/* Not first descriptor, buffer is always zero */
346888ebe2cfSJose Abreu 	if (priv->sph && len)
346988ebe2cfSJose Abreu 		return 0;
347088ebe2cfSJose Abreu 
347188ebe2cfSJose Abreu 	/* First descriptor, get split header length */
347288ebe2cfSJose Abreu 	ret = stmmac_get_rx_header_len(priv, p, &hlen);
347388ebe2cfSJose Abreu 	if (priv->sph && hlen) {
347488ebe2cfSJose Abreu 		priv->xstats.rx_split_hdr_pkt_n++;
347588ebe2cfSJose Abreu 		return hlen;
347688ebe2cfSJose Abreu 	}
347788ebe2cfSJose Abreu 
347888ebe2cfSJose Abreu 	/* First descriptor, not last descriptor and not split header */
347988ebe2cfSJose Abreu 	if (status & rx_not_ls)
348088ebe2cfSJose Abreu 		return priv->dma_buf_sz;
348188ebe2cfSJose Abreu 
348288ebe2cfSJose Abreu 	plen = stmmac_get_rx_frame_len(priv, p, coe);
348388ebe2cfSJose Abreu 
348488ebe2cfSJose Abreu 	/* First descriptor and last descriptor and not split header */
348588ebe2cfSJose Abreu 	return min_t(unsigned int, priv->dma_buf_sz, plen);
348688ebe2cfSJose Abreu }
348788ebe2cfSJose Abreu 
348888ebe2cfSJose Abreu static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
348988ebe2cfSJose Abreu 				       struct dma_desc *p,
349088ebe2cfSJose Abreu 				       int status, unsigned int len)
349188ebe2cfSJose Abreu {
349288ebe2cfSJose Abreu 	int coe = priv->hw->rx_csum;
349388ebe2cfSJose Abreu 	unsigned int plen = 0;
349488ebe2cfSJose Abreu 
349588ebe2cfSJose Abreu 	/* Not split header, buffer is not available */
349688ebe2cfSJose Abreu 	if (!priv->sph)
349788ebe2cfSJose Abreu 		return 0;
349888ebe2cfSJose Abreu 
349988ebe2cfSJose Abreu 	/* Not last descriptor */
350088ebe2cfSJose Abreu 	if (status & rx_not_ls)
350188ebe2cfSJose Abreu 		return priv->dma_buf_sz;
350288ebe2cfSJose Abreu 
350388ebe2cfSJose Abreu 	plen = stmmac_get_rx_frame_len(priv, p, coe);
350488ebe2cfSJose Abreu 
350588ebe2cfSJose Abreu 	/* Last descriptor */
350688ebe2cfSJose Abreu 	return plen - len;
350788ebe2cfSJose Abreu }
350888ebe2cfSJose Abreu 
350932ceabcaSGiuseppe CAVALLARO /**
3510732fdf0eSGiuseppe CAVALLARO  * stmmac_rx - manage the receive process
351132ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
351254139cf3SJoao Pinto  * @limit: napi bugget
351354139cf3SJoao Pinto  * @queue: RX queue index.
351432ceabcaSGiuseppe CAVALLARO  * Description :  this the function called by the napi poll method.
351532ceabcaSGiuseppe CAVALLARO  * It gets all the frames inside the ring.
351632ceabcaSGiuseppe CAVALLARO  */
351754139cf3SJoao Pinto static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
35187ac6653aSJeff Kirsher {
351954139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
35208fce3331SJose Abreu 	struct stmmac_channel *ch = &priv->channel[queue];
3521ec222003SJose Abreu 	unsigned int count = 0, error = 0, len = 0;
3522ec222003SJose Abreu 	int status = 0, coe = priv->hw->rx_csum;
352307b39753SAaro Koskinen 	unsigned int next_entry = rx_q->cur_rx;
3524ec222003SJose Abreu 	struct sk_buff *skb = NULL;
35257ac6653aSJeff Kirsher 
352683d7af64SGiuseppe CAVALLARO 	if (netif_msg_rx_status(priv)) {
3527d0225e7dSAlexandre TORGUE 		void *rx_head;
3528d0225e7dSAlexandre TORGUE 
352938ddc59dSLABBE Corentin 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
3530c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
353154139cf3SJoao Pinto 			rx_head = (void *)rx_q->dma_erx;
3532c24602efSGiuseppe CAVALLARO 		else
353354139cf3SJoao Pinto 			rx_head = (void *)rx_q->dma_rx;
3534d0225e7dSAlexandre TORGUE 
353542de047dSJose Abreu 		stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true);
35367ac6653aSJeff Kirsher 	}
3537c24602efSGiuseppe CAVALLARO 	while (count < limit) {
353888ebe2cfSJose Abreu 		unsigned int buf1_len = 0, buf2_len = 0;
3539ec222003SJose Abreu 		enum pkt_hash_types hash_type;
35402af6106aSJose Abreu 		struct stmmac_rx_buffer *buf;
35412af6106aSJose Abreu 		struct dma_desc *np, *p;
3542ec222003SJose Abreu 		int entry;
3543ec222003SJose Abreu 		u32 hash;
35447ac6653aSJeff Kirsher 
3545ec222003SJose Abreu 		if (!count && rx_q->state_saved) {
3546ec222003SJose Abreu 			skb = rx_q->state.skb;
3547ec222003SJose Abreu 			error = rx_q->state.error;
3548ec222003SJose Abreu 			len = rx_q->state.len;
3549ec222003SJose Abreu 		} else {
3550ec222003SJose Abreu 			rx_q->state_saved = false;
3551ec222003SJose Abreu 			skb = NULL;
3552ec222003SJose Abreu 			error = 0;
3553ec222003SJose Abreu 			len = 0;
3554ec222003SJose Abreu 		}
3555ec222003SJose Abreu 
3556ec222003SJose Abreu 		if (count >= limit)
3557ec222003SJose Abreu 			break;
3558ec222003SJose Abreu 
3559ec222003SJose Abreu read_again:
356088ebe2cfSJose Abreu 		buf1_len = 0;
356188ebe2cfSJose Abreu 		buf2_len = 0;
356207b39753SAaro Koskinen 		entry = next_entry;
35632af6106aSJose Abreu 		buf = &rx_q->buf_pool[entry];
356407b39753SAaro Koskinen 
3565c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
356654139cf3SJoao Pinto 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
3567c24602efSGiuseppe CAVALLARO 		else
356854139cf3SJoao Pinto 			p = rx_q->dma_rx + entry;
3569c24602efSGiuseppe CAVALLARO 
3570c1fa3212SFabrice Gasnier 		/* read the status of the incoming frame */
357142de047dSJose Abreu 		status = stmmac_rx_status(priv, &priv->dev->stats,
3572c1fa3212SFabrice Gasnier 				&priv->xstats, p);
3573c1fa3212SFabrice Gasnier 		/* check if managed by the DMA otherwise go ahead */
3574c1fa3212SFabrice Gasnier 		if (unlikely(status & dma_own))
35757ac6653aSJeff Kirsher 			break;
35767ac6653aSJeff Kirsher 
357754139cf3SJoao Pinto 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE);
357854139cf3SJoao Pinto 		next_entry = rx_q->cur_rx;
3579e3ad57c9SGiuseppe Cavallaro 
3580c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
358154139cf3SJoao Pinto 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
3582c24602efSGiuseppe CAVALLARO 		else
358354139cf3SJoao Pinto 			np = rx_q->dma_rx + next_entry;
3584ba1ffd74SGiuseppe CAVALLARO 
3585ba1ffd74SGiuseppe CAVALLARO 		prefetch(np);
35867ac6653aSJeff Kirsher 
358742de047dSJose Abreu 		if (priv->extend_desc)
358842de047dSJose Abreu 			stmmac_rx_extended_status(priv, &priv->dev->stats,
358942de047dSJose Abreu 					&priv->xstats, rx_q->dma_erx + entry);
3590891434b1SRayagond Kokatanur 		if (unlikely(status == discard_frame)) {
35912af6106aSJose Abreu 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
35922af6106aSJose Abreu 			buf->page = NULL;
3593ec222003SJose Abreu 			error = 1;
35940b273ca4SJose Abreu 			if (!priv->hwts_rx_en)
35950b273ca4SJose Abreu 				priv->dev->stats.rx_errors++;
3596ec222003SJose Abreu 		}
3597f748be53SAlexandre TORGUE 
3598ec222003SJose Abreu 		if (unlikely(error && (status & rx_not_ls)))
3599ec222003SJose Abreu 			goto read_again;
3600ec222003SJose Abreu 		if (unlikely(error)) {
3601ec222003SJose Abreu 			dev_kfree_skb(skb);
360288ebe2cfSJose Abreu 			skb = NULL;
3603cda4985aSJose Abreu 			count++;
360407b39753SAaro Koskinen 			continue;
3605e527c4a7SGiuseppe CAVALLARO 		}
3606e527c4a7SGiuseppe CAVALLARO 
3607ec222003SJose Abreu 		/* Buffer is good. Go on. */
3608ec222003SJose Abreu 
360988ebe2cfSJose Abreu 		prefetch(page_address(buf->page));
361088ebe2cfSJose Abreu 		if (buf->sec_page)
361188ebe2cfSJose Abreu 			prefetch(page_address(buf->sec_page));
361288ebe2cfSJose Abreu 
361388ebe2cfSJose Abreu 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
361488ebe2cfSJose Abreu 		len += buf1_len;
361588ebe2cfSJose Abreu 		buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
361688ebe2cfSJose Abreu 		len += buf2_len;
3617ec222003SJose Abreu 
36187ac6653aSJeff Kirsher 		/* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
3619ceb69499SGiuseppe CAVALLARO 		 * Type frames (LLC/LLC-SNAP)
3620565020aaSJose Abreu 		 *
3621565020aaSJose Abreu 		 * llc_snap is never checked in GMAC >= 4, so this ACS
3622565020aaSJose Abreu 		 * feature is always disabled and packets need to be
3623565020aaSJose Abreu 		 * stripped manually.
3624ceb69499SGiuseppe CAVALLARO 		 */
3625565020aaSJose Abreu 		if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
362688ebe2cfSJose Abreu 		    unlikely(status != llc_snap)) {
362788ebe2cfSJose Abreu 			if (buf2_len)
362888ebe2cfSJose Abreu 				buf2_len -= ETH_FCS_LEN;
362988ebe2cfSJose Abreu 			else
363088ebe2cfSJose Abreu 				buf1_len -= ETH_FCS_LEN;
363188ebe2cfSJose Abreu 
3632ec222003SJose Abreu 			len -= ETH_FCS_LEN;
363383d7af64SGiuseppe CAVALLARO 		}
363422ad3838SGiuseppe Cavallaro 
3635ec222003SJose Abreu 		if (!skb) {
363688ebe2cfSJose Abreu 			skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
3637ec222003SJose Abreu 			if (!skb) {
363822ad3838SGiuseppe Cavallaro 				priv->dev->stats.rx_dropped++;
3639cda4985aSJose Abreu 				count++;
364088ebe2cfSJose Abreu 				goto drain_data;
364122ad3838SGiuseppe Cavallaro 			}
364222ad3838SGiuseppe Cavallaro 
364388ebe2cfSJose Abreu 			dma_sync_single_for_cpu(priv->device, buf->addr,
364488ebe2cfSJose Abreu 						buf1_len, DMA_FROM_DEVICE);
36452af6106aSJose Abreu 			skb_copy_to_linear_data(skb, page_address(buf->page),
364688ebe2cfSJose Abreu 						buf1_len);
364788ebe2cfSJose Abreu 			skb_put(skb, buf1_len);
364822ad3838SGiuseppe Cavallaro 
3649ec222003SJose Abreu 			/* Data payload copied into SKB, page ready for recycle */
3650ec222003SJose Abreu 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
3651ec222003SJose Abreu 			buf->page = NULL;
365288ebe2cfSJose Abreu 		} else if (buf1_len) {
3653ec222003SJose Abreu 			dma_sync_single_for_cpu(priv->device, buf->addr,
365488ebe2cfSJose Abreu 						buf1_len, DMA_FROM_DEVICE);
3655ec222003SJose Abreu 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
365688ebe2cfSJose Abreu 					buf->page, 0, buf1_len,
3657ec222003SJose Abreu 					priv->dma_buf_sz);
3658ec222003SJose Abreu 
3659ec222003SJose Abreu 			/* Data payload appended into SKB */
3660ec222003SJose Abreu 			page_pool_release_page(rx_q->page_pool, buf->page);
3661ec222003SJose Abreu 			buf->page = NULL;
36627ac6653aSJeff Kirsher 		}
366383d7af64SGiuseppe CAVALLARO 
366488ebe2cfSJose Abreu 		if (buf2_len) {
366567afd6d1SJose Abreu 			dma_sync_single_for_cpu(priv->device, buf->sec_addr,
366688ebe2cfSJose Abreu 						buf2_len, DMA_FROM_DEVICE);
366767afd6d1SJose Abreu 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
366888ebe2cfSJose Abreu 					buf->sec_page, 0, buf2_len,
366967afd6d1SJose Abreu 					priv->dma_buf_sz);
367067afd6d1SJose Abreu 
367167afd6d1SJose Abreu 			/* Data payload appended into SKB */
367267afd6d1SJose Abreu 			page_pool_release_page(rx_q->page_pool, buf->sec_page);
367367afd6d1SJose Abreu 			buf->sec_page = NULL;
367467afd6d1SJose Abreu 		}
367567afd6d1SJose Abreu 
367688ebe2cfSJose Abreu drain_data:
3677ec222003SJose Abreu 		if (likely(status & rx_not_ls))
3678ec222003SJose Abreu 			goto read_again;
367988ebe2cfSJose Abreu 		if (!skb)
368088ebe2cfSJose Abreu 			continue;
3681ec222003SJose Abreu 
3682ec222003SJose Abreu 		/* Got entire packet into SKB. Finish it. */
3683ec222003SJose Abreu 
3684ba1ffd74SGiuseppe CAVALLARO 		stmmac_get_rx_hwtstamp(priv, p, np, skb);
3685b9381985SVince Bridgers 		stmmac_rx_vlan(priv->dev, skb);
36867ac6653aSJeff Kirsher 		skb->protocol = eth_type_trans(skb, priv->dev);
36877ac6653aSJeff Kirsher 
3688ceb69499SGiuseppe CAVALLARO 		if (unlikely(!coe))
36897ac6653aSJeff Kirsher 			skb_checksum_none_assert(skb);
369062a2ab93SGiuseppe CAVALLARO 		else
36917ac6653aSJeff Kirsher 			skb->ip_summed = CHECKSUM_UNNECESSARY;
369262a2ab93SGiuseppe CAVALLARO 
369376067459SJose Abreu 		if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
369476067459SJose Abreu 			skb_set_hash(skb, hash, hash_type);
369576067459SJose Abreu 
369676067459SJose Abreu 		skb_record_rx_queue(skb, queue);
36974ccb4585SJose Abreu 		napi_gro_receive(&ch->rx_napi, skb);
369888ebe2cfSJose Abreu 		skb = NULL;
36997ac6653aSJeff Kirsher 
37007ac6653aSJeff Kirsher 		priv->dev->stats.rx_packets++;
3701ec222003SJose Abreu 		priv->dev->stats.rx_bytes += len;
3702cda4985aSJose Abreu 		count++;
37037ac6653aSJeff Kirsher 	}
3704ec222003SJose Abreu 
370588ebe2cfSJose Abreu 	if (status & rx_not_ls || skb) {
3706ec222003SJose Abreu 		rx_q->state_saved = true;
3707ec222003SJose Abreu 		rx_q->state.skb = skb;
3708ec222003SJose Abreu 		rx_q->state.error = error;
3709ec222003SJose Abreu 		rx_q->state.len = len;
37107ac6653aSJeff Kirsher 	}
37117ac6653aSJeff Kirsher 
371254139cf3SJoao Pinto 	stmmac_rx_refill(priv, queue);
37137ac6653aSJeff Kirsher 
37147ac6653aSJeff Kirsher 	priv->xstats.rx_pkt_n += count;
37157ac6653aSJeff Kirsher 
37167ac6653aSJeff Kirsher 	return count;
37177ac6653aSJeff Kirsher }
37187ac6653aSJeff Kirsher 
37194ccb4585SJose Abreu static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
37207ac6653aSJeff Kirsher {
37218fce3331SJose Abreu 	struct stmmac_channel *ch =
37224ccb4585SJose Abreu 		container_of(napi, struct stmmac_channel, rx_napi);
37238fce3331SJose Abreu 	struct stmmac_priv *priv = ch->priv_data;
37248fce3331SJose Abreu 	u32 chan = ch->index;
37254ccb4585SJose Abreu 	int work_done;
37267ac6653aSJeff Kirsher 
37279125cdd1SGiuseppe CAVALLARO 	priv->xstats.napi_poll++;
3728ce736788SJoao Pinto 
37294ccb4585SJose Abreu 	work_done = stmmac_rx(priv, budget, chan);
37304ccb4585SJose Abreu 	if (work_done < budget && napi_complete_done(napi, work_done))
37314ccb4585SJose Abreu 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan);
37324ccb4585SJose Abreu 	return work_done;
37334ccb4585SJose Abreu }
3734ce736788SJoao Pinto 
37354ccb4585SJose Abreu static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
37364ccb4585SJose Abreu {
37374ccb4585SJose Abreu 	struct stmmac_channel *ch =
37384ccb4585SJose Abreu 		container_of(napi, struct stmmac_channel, tx_napi);
37394ccb4585SJose Abreu 	struct stmmac_priv *priv = ch->priv_data;
37404ccb4585SJose Abreu 	struct stmmac_tx_queue *tx_q;
37414ccb4585SJose Abreu 	u32 chan = ch->index;
37424ccb4585SJose Abreu 	int work_done;
37434ccb4585SJose Abreu 
37444ccb4585SJose Abreu 	priv->xstats.napi_poll++;
37454ccb4585SJose Abreu 
37464ccb4585SJose Abreu 	work_done = stmmac_tx_clean(priv, DMA_TX_SIZE, chan);
3747fa0be0a4SJose Abreu 	work_done = min(work_done, budget);
37488fce3331SJose Abreu 
3749a66b5884SJose Abreu 	if (work_done < budget)
3750a66b5884SJose Abreu 		napi_complete_done(napi, work_done);
37514ccb4585SJose Abreu 
37524ccb4585SJose Abreu 	/* Force transmission restart */
37534ccb4585SJose Abreu 	tx_q = &priv->tx_queue[chan];
37544ccb4585SJose Abreu 	if (tx_q->cur_tx != tx_q->dirty_tx) {
37554ccb4585SJose Abreu 		stmmac_enable_dma_transmission(priv, priv->ioaddr);
37564ccb4585SJose Abreu 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr,
37574ccb4585SJose Abreu 				       chan);
3758fa0be0a4SJose Abreu 	}
37598fce3331SJose Abreu 
37607ac6653aSJeff Kirsher 	return work_done;
37617ac6653aSJeff Kirsher }
37627ac6653aSJeff Kirsher 
37637ac6653aSJeff Kirsher /**
37647ac6653aSJeff Kirsher  *  stmmac_tx_timeout
37657ac6653aSJeff Kirsher  *  @dev : Pointer to net device structure
37667ac6653aSJeff Kirsher  *  Description: this function is called when a packet transmission fails to
37677284a3f1SGiuseppe CAVALLARO  *   complete within a reasonable time. The driver will mark the error in the
37687ac6653aSJeff Kirsher  *   netdev structure and arrange for the device to be reset to a sane state
37697ac6653aSJeff Kirsher  *   in order to transmit a new packet.
37707ac6653aSJeff Kirsher  */
37717ac6653aSJeff Kirsher static void stmmac_tx_timeout(struct net_device *dev)
37727ac6653aSJeff Kirsher {
37737ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
37747ac6653aSJeff Kirsher 
377534877a15SJose Abreu 	stmmac_global_err(priv);
37767ac6653aSJeff Kirsher }
37777ac6653aSJeff Kirsher 
37787ac6653aSJeff Kirsher /**
377901789349SJiri Pirko  *  stmmac_set_rx_mode - entry point for multicast addressing
37807ac6653aSJeff Kirsher  *  @dev : pointer to the device structure
37817ac6653aSJeff Kirsher  *  Description:
37827ac6653aSJeff Kirsher  *  This function is a driver entry point which gets called by the kernel
37837ac6653aSJeff Kirsher  *  whenever multicast addresses must be enabled/disabled.
37847ac6653aSJeff Kirsher  *  Return value:
37857ac6653aSJeff Kirsher  *  void.
37867ac6653aSJeff Kirsher  */
378701789349SJiri Pirko static void stmmac_set_rx_mode(struct net_device *dev)
37887ac6653aSJeff Kirsher {
37897ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
37907ac6653aSJeff Kirsher 
3791c10d4c82SJose Abreu 	stmmac_set_filter(priv, priv->hw, dev);
37927ac6653aSJeff Kirsher }
37937ac6653aSJeff Kirsher 
37947ac6653aSJeff Kirsher /**
37957ac6653aSJeff Kirsher  *  stmmac_change_mtu - entry point to change MTU size for the device.
37967ac6653aSJeff Kirsher  *  @dev : device pointer.
37977ac6653aSJeff Kirsher  *  @new_mtu : the new MTU size for the device.
37987ac6653aSJeff Kirsher  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
37997ac6653aSJeff Kirsher  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
38007ac6653aSJeff Kirsher  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
38017ac6653aSJeff Kirsher  *  Return value:
38027ac6653aSJeff Kirsher  *  0 on success and an appropriate (-)ve integer as defined in errno.h
38037ac6653aSJeff Kirsher  *  file on failure.
38047ac6653aSJeff Kirsher  */
38057ac6653aSJeff Kirsher static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
38067ac6653aSJeff Kirsher {
380738ddc59dSLABBE Corentin 	struct stmmac_priv *priv = netdev_priv(dev);
380838ddc59dSLABBE Corentin 
38097ac6653aSJeff Kirsher 	if (netif_running(dev)) {
381038ddc59dSLABBE Corentin 		netdev_err(priv->dev, "must be stopped to change its MTU\n");
38117ac6653aSJeff Kirsher 		return -EBUSY;
38127ac6653aSJeff Kirsher 	}
38137ac6653aSJeff Kirsher 
38147ac6653aSJeff Kirsher 	dev->mtu = new_mtu;
3815f748be53SAlexandre TORGUE 
38167ac6653aSJeff Kirsher 	netdev_update_features(dev);
38177ac6653aSJeff Kirsher 
38187ac6653aSJeff Kirsher 	return 0;
38197ac6653aSJeff Kirsher }
38207ac6653aSJeff Kirsher 
3821c8f44affSMichał Mirosław static netdev_features_t stmmac_fix_features(struct net_device *dev,
3822c8f44affSMichał Mirosław 					     netdev_features_t features)
38237ac6653aSJeff Kirsher {
38247ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
38257ac6653aSJeff Kirsher 
382638912bdbSDeepak SIKRI 	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
38277ac6653aSJeff Kirsher 		features &= ~NETIF_F_RXCSUM;
3828d2afb5bdSGiuseppe CAVALLARO 
38297ac6653aSJeff Kirsher 	if (!priv->plat->tx_coe)
3830a188222bSTom Herbert 		features &= ~NETIF_F_CSUM_MASK;
38317ac6653aSJeff Kirsher 
38327ac6653aSJeff Kirsher 	/* Some GMAC devices have a bugged Jumbo frame support that
38337ac6653aSJeff Kirsher 	 * needs to have the Tx COE disabled for oversized frames
38347ac6653aSJeff Kirsher 	 * (due to limited buffer sizes). In this case we disable
3835ceb69499SGiuseppe CAVALLARO 	 * the TX csum insertion in the TDES and not use SF.
3836ceb69499SGiuseppe CAVALLARO 	 */
38377ac6653aSJeff Kirsher 	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
3838a188222bSTom Herbert 		features &= ~NETIF_F_CSUM_MASK;
38397ac6653aSJeff Kirsher 
3840f748be53SAlexandre TORGUE 	/* Disable tso if asked by ethtool */
3841f748be53SAlexandre TORGUE 	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
3842f748be53SAlexandre TORGUE 		if (features & NETIF_F_TSO)
3843f748be53SAlexandre TORGUE 			priv->tso = true;
3844f748be53SAlexandre TORGUE 		else
3845f748be53SAlexandre TORGUE 			priv->tso = false;
3846f748be53SAlexandre TORGUE 	}
3847f748be53SAlexandre TORGUE 
38487ac6653aSJeff Kirsher 	return features;
38497ac6653aSJeff Kirsher }
38507ac6653aSJeff Kirsher 
3851d2afb5bdSGiuseppe CAVALLARO static int stmmac_set_features(struct net_device *netdev,
3852d2afb5bdSGiuseppe CAVALLARO 			       netdev_features_t features)
3853d2afb5bdSGiuseppe CAVALLARO {
3854d2afb5bdSGiuseppe CAVALLARO 	struct stmmac_priv *priv = netdev_priv(netdev);
385567afd6d1SJose Abreu 	bool sph_en;
385667afd6d1SJose Abreu 	u32 chan;
3857d2afb5bdSGiuseppe CAVALLARO 
3858d2afb5bdSGiuseppe CAVALLARO 	/* Keep the COE Type in case of csum is supporting */
3859d2afb5bdSGiuseppe CAVALLARO 	if (features & NETIF_F_RXCSUM)
3860d2afb5bdSGiuseppe CAVALLARO 		priv->hw->rx_csum = priv->plat->rx_coe;
3861d2afb5bdSGiuseppe CAVALLARO 	else
3862d2afb5bdSGiuseppe CAVALLARO 		priv->hw->rx_csum = 0;
3863d2afb5bdSGiuseppe CAVALLARO 	/* No check needed because rx_coe has been set before and it will be
3864d2afb5bdSGiuseppe CAVALLARO 	 * fixed in case of issue.
3865d2afb5bdSGiuseppe CAVALLARO 	 */
3866c10d4c82SJose Abreu 	stmmac_rx_ipc(priv, priv->hw);
3867d2afb5bdSGiuseppe CAVALLARO 
386867afd6d1SJose Abreu 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
386967afd6d1SJose Abreu 	for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
387067afd6d1SJose Abreu 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
387167afd6d1SJose Abreu 
3872d2afb5bdSGiuseppe CAVALLARO 	return 0;
3873d2afb5bdSGiuseppe CAVALLARO }
3874d2afb5bdSGiuseppe CAVALLARO 
387532ceabcaSGiuseppe CAVALLARO /**
387632ceabcaSGiuseppe CAVALLARO  *  stmmac_interrupt - main ISR
387732ceabcaSGiuseppe CAVALLARO  *  @irq: interrupt number.
387832ceabcaSGiuseppe CAVALLARO  *  @dev_id: to pass the net device pointer.
387932ceabcaSGiuseppe CAVALLARO  *  Description: this is the main driver interrupt service routine.
3880732fdf0eSGiuseppe CAVALLARO  *  It can call:
3881732fdf0eSGiuseppe CAVALLARO  *  o DMA service routine (to manage incoming frame reception and transmission
3882732fdf0eSGiuseppe CAVALLARO  *    status)
3883732fdf0eSGiuseppe CAVALLARO  *  o Core interrupts to manage: remote wake-up, management counter, LPI
388432ceabcaSGiuseppe CAVALLARO  *    interrupts.
388532ceabcaSGiuseppe CAVALLARO  */
38867ac6653aSJeff Kirsher static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
38877ac6653aSJeff Kirsher {
38887ac6653aSJeff Kirsher 	struct net_device *dev = (struct net_device *)dev_id;
38897ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
38907bac4e1eSJoao Pinto 	u32 rx_cnt = priv->plat->rx_queues_to_use;
38917bac4e1eSJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
38927bac4e1eSJoao Pinto 	u32 queues_count;
38937bac4e1eSJoao Pinto 	u32 queue;
38947d9e6c5aSJose Abreu 	bool xmac;
38957bac4e1eSJoao Pinto 
38967d9e6c5aSJose Abreu 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
38977bac4e1eSJoao Pinto 	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
38987ac6653aSJeff Kirsher 
389989f7f2cfSSrinivas Kandagatla 	if (priv->irq_wake)
390089f7f2cfSSrinivas Kandagatla 		pm_wakeup_event(priv->device, 0);
390189f7f2cfSSrinivas Kandagatla 
39027ac6653aSJeff Kirsher 	if (unlikely(!dev)) {
390338ddc59dSLABBE Corentin 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
39047ac6653aSJeff Kirsher 		return IRQ_NONE;
39057ac6653aSJeff Kirsher 	}
39067ac6653aSJeff Kirsher 
390734877a15SJose Abreu 	/* Check if adapter is up */
390834877a15SJose Abreu 	if (test_bit(STMMAC_DOWN, &priv->state))
390934877a15SJose Abreu 		return IRQ_HANDLED;
39108bf993a5SJose Abreu 	/* Check if a fatal error happened */
39118bf993a5SJose Abreu 	if (stmmac_safety_feat_interrupt(priv))
39128bf993a5SJose Abreu 		return IRQ_HANDLED;
391334877a15SJose Abreu 
39147ac6653aSJeff Kirsher 	/* To handle GMAC own interrupts */
39157d9e6c5aSJose Abreu 	if ((priv->plat->has_gmac) || xmac) {
3916c10d4c82SJose Abreu 		int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
391761fac60aSJose Abreu 		int mtl_status;
39188f71a88dSJoao Pinto 
3919d765955dSGiuseppe CAVALLARO 		if (unlikely(status)) {
3920d765955dSGiuseppe CAVALLARO 			/* For LPI we need to save the tx status */
39210982a0f6SGiuseppe CAVALLARO 			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
3922d765955dSGiuseppe CAVALLARO 				priv->tx_path_in_lpi_mode = true;
39230982a0f6SGiuseppe CAVALLARO 			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
3924d765955dSGiuseppe CAVALLARO 				priv->tx_path_in_lpi_mode = false;
39257bac4e1eSJoao Pinto 		}
39267bac4e1eSJoao Pinto 
39277bac4e1eSJoao Pinto 		for (queue = 0; queue < queues_count; queue++) {
392861fac60aSJose Abreu 			struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
392954139cf3SJoao Pinto 
393061fac60aSJose Abreu 			mtl_status = stmmac_host_mtl_irq_status(priv, priv->hw,
393161fac60aSJose Abreu 								queue);
393261fac60aSJose Abreu 			if (mtl_status != -EINVAL)
393361fac60aSJose Abreu 				status |= mtl_status;
39347bac4e1eSJoao Pinto 
3935a4e887faSJose Abreu 			if (status & CORE_IRQ_MTL_RX_OVERFLOW)
393661fac60aSJose Abreu 				stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
393754139cf3SJoao Pinto 						       rx_q->rx_tail_addr,
39387bac4e1eSJoao Pinto 						       queue);
39397bac4e1eSJoao Pinto 		}
394070523e63SGiuseppe CAVALLARO 
394170523e63SGiuseppe CAVALLARO 		/* PCS link status */
39423fe5cadbSGiuseppe CAVALLARO 		if (priv->hw->pcs) {
394370523e63SGiuseppe CAVALLARO 			if (priv->xstats.pcs_link)
394470523e63SGiuseppe CAVALLARO 				netif_carrier_on(dev);
394570523e63SGiuseppe CAVALLARO 			else
394670523e63SGiuseppe CAVALLARO 				netif_carrier_off(dev);
394770523e63SGiuseppe CAVALLARO 		}
3948d765955dSGiuseppe CAVALLARO 	}
3949d765955dSGiuseppe CAVALLARO 
3950d765955dSGiuseppe CAVALLARO 	/* To handle DMA interrupts */
39517ac6653aSJeff Kirsher 	stmmac_dma_interrupt(priv);
39527ac6653aSJeff Kirsher 
39537ac6653aSJeff Kirsher 	return IRQ_HANDLED;
39547ac6653aSJeff Kirsher }
39557ac6653aSJeff Kirsher 
39567ac6653aSJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER
39577ac6653aSJeff Kirsher /* Polling receive - used by NETCONSOLE and other diagnostic tools
3958ceb69499SGiuseppe CAVALLARO  * to allow network I/O with interrupts disabled.
3959ceb69499SGiuseppe CAVALLARO  */
39607ac6653aSJeff Kirsher static void stmmac_poll_controller(struct net_device *dev)
39617ac6653aSJeff Kirsher {
39627ac6653aSJeff Kirsher 	disable_irq(dev->irq);
39637ac6653aSJeff Kirsher 	stmmac_interrupt(dev->irq, dev);
39647ac6653aSJeff Kirsher 	enable_irq(dev->irq);
39657ac6653aSJeff Kirsher }
39667ac6653aSJeff Kirsher #endif
39677ac6653aSJeff Kirsher 
39687ac6653aSJeff Kirsher /**
39697ac6653aSJeff Kirsher  *  stmmac_ioctl - Entry point for the Ioctl
39707ac6653aSJeff Kirsher  *  @dev: Device pointer.
39717ac6653aSJeff Kirsher  *  @rq: An IOCTL specefic structure, that can contain a pointer to
39727ac6653aSJeff Kirsher  *  a proprietary structure used to pass information to the driver.
39737ac6653aSJeff Kirsher  *  @cmd: IOCTL command
39747ac6653aSJeff Kirsher  *  Description:
397532ceabcaSGiuseppe CAVALLARO  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
39767ac6653aSJeff Kirsher  */
39777ac6653aSJeff Kirsher static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
39787ac6653aSJeff Kirsher {
397974371272SJose Abreu 	struct stmmac_priv *priv = netdev_priv (dev);
3980891434b1SRayagond Kokatanur 	int ret = -EOPNOTSUPP;
39817ac6653aSJeff Kirsher 
39827ac6653aSJeff Kirsher 	if (!netif_running(dev))
39837ac6653aSJeff Kirsher 		return -EINVAL;
39847ac6653aSJeff Kirsher 
3985891434b1SRayagond Kokatanur 	switch (cmd) {
3986891434b1SRayagond Kokatanur 	case SIOCGMIIPHY:
3987891434b1SRayagond Kokatanur 	case SIOCGMIIREG:
3988891434b1SRayagond Kokatanur 	case SIOCSMIIREG:
398974371272SJose Abreu 		ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
3990891434b1SRayagond Kokatanur 		break;
3991891434b1SRayagond Kokatanur 	case SIOCSHWTSTAMP:
3992d6228b7cSArtem Panfilov 		ret = stmmac_hwtstamp_set(dev, rq);
3993d6228b7cSArtem Panfilov 		break;
3994d6228b7cSArtem Panfilov 	case SIOCGHWTSTAMP:
3995d6228b7cSArtem Panfilov 		ret = stmmac_hwtstamp_get(dev, rq);
3996891434b1SRayagond Kokatanur 		break;
3997891434b1SRayagond Kokatanur 	default:
3998891434b1SRayagond Kokatanur 		break;
3999891434b1SRayagond Kokatanur 	}
40007ac6653aSJeff Kirsher 
40017ac6653aSJeff Kirsher 	return ret;
40027ac6653aSJeff Kirsher }
40037ac6653aSJeff Kirsher 
40044dbbe8ddSJose Abreu static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
40054dbbe8ddSJose Abreu 				    void *cb_priv)
40064dbbe8ddSJose Abreu {
40074dbbe8ddSJose Abreu 	struct stmmac_priv *priv = cb_priv;
40084dbbe8ddSJose Abreu 	int ret = -EOPNOTSUPP;
40094dbbe8ddSJose Abreu 
4010425eabddSJose Abreu 	if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
4011425eabddSJose Abreu 		return ret;
4012425eabddSJose Abreu 
40134dbbe8ddSJose Abreu 	stmmac_disable_all_queues(priv);
40144dbbe8ddSJose Abreu 
40154dbbe8ddSJose Abreu 	switch (type) {
40164dbbe8ddSJose Abreu 	case TC_SETUP_CLSU32:
40174dbbe8ddSJose Abreu 		ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
40184dbbe8ddSJose Abreu 		break;
4019425eabddSJose Abreu 	case TC_SETUP_CLSFLOWER:
4020425eabddSJose Abreu 		ret = stmmac_tc_setup_cls(priv, priv, type_data);
4021425eabddSJose Abreu 		break;
40224dbbe8ddSJose Abreu 	default:
40234dbbe8ddSJose Abreu 		break;
40244dbbe8ddSJose Abreu 	}
40254dbbe8ddSJose Abreu 
40264dbbe8ddSJose Abreu 	stmmac_enable_all_queues(priv);
40274dbbe8ddSJose Abreu 	return ret;
40284dbbe8ddSJose Abreu }
40294dbbe8ddSJose Abreu 
4030955bcb6eSPablo Neira Ayuso static LIST_HEAD(stmmac_block_cb_list);
4031955bcb6eSPablo Neira Ayuso 
40324dbbe8ddSJose Abreu static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
40334dbbe8ddSJose Abreu 			   void *type_data)
40344dbbe8ddSJose Abreu {
40354dbbe8ddSJose Abreu 	struct stmmac_priv *priv = netdev_priv(ndev);
40364dbbe8ddSJose Abreu 
40374dbbe8ddSJose Abreu 	switch (type) {
40384dbbe8ddSJose Abreu 	case TC_SETUP_BLOCK:
4039955bcb6eSPablo Neira Ayuso 		return flow_block_cb_setup_simple(type_data,
4040955bcb6eSPablo Neira Ayuso 						  &stmmac_block_cb_list,
40414e95bc26SPablo Neira Ayuso 						  stmmac_setup_tc_block_cb,
40424e95bc26SPablo Neira Ayuso 						  priv, priv, true);
40431f705bc6SJose Abreu 	case TC_SETUP_QDISC_CBS:
40441f705bc6SJose Abreu 		return stmmac_tc_setup_cbs(priv, priv, type_data);
40454dbbe8ddSJose Abreu 	default:
40464dbbe8ddSJose Abreu 		return -EOPNOTSUPP;
40474dbbe8ddSJose Abreu 	}
40484dbbe8ddSJose Abreu }
40494dbbe8ddSJose Abreu 
40504993e5b3SJose Abreu static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
40514993e5b3SJose Abreu 			       struct net_device *sb_dev)
40524993e5b3SJose Abreu {
4053b7766206SJose Abreu 	int gso = skb_shinfo(skb)->gso_type;
4054b7766206SJose Abreu 
4055b7766206SJose Abreu 	if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
40564993e5b3SJose Abreu 		/*
4057b7766206SJose Abreu 		 * There is no way to determine the number of TSO/USO
40584993e5b3SJose Abreu 		 * capable Queues. Let's use always the Queue 0
4059b7766206SJose Abreu 		 * because if TSO/USO is supported then at least this
40604993e5b3SJose Abreu 		 * one will be capable.
40614993e5b3SJose Abreu 		 */
40624993e5b3SJose Abreu 		return 0;
40634993e5b3SJose Abreu 	}
40644993e5b3SJose Abreu 
40654993e5b3SJose Abreu 	return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
40664993e5b3SJose Abreu }
40674993e5b3SJose Abreu 
4068a830405eSBhadram Varka static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
4069a830405eSBhadram Varka {
4070a830405eSBhadram Varka 	struct stmmac_priv *priv = netdev_priv(ndev);
4071a830405eSBhadram Varka 	int ret = 0;
4072a830405eSBhadram Varka 
4073a830405eSBhadram Varka 	ret = eth_mac_addr(ndev, addr);
4074a830405eSBhadram Varka 	if (ret)
4075a830405eSBhadram Varka 		return ret;
4076a830405eSBhadram Varka 
4077c10d4c82SJose Abreu 	stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
4078a830405eSBhadram Varka 
4079a830405eSBhadram Varka 	return ret;
4080a830405eSBhadram Varka }
4081a830405eSBhadram Varka 
408250fb4f74SGiuseppe CAVALLARO #ifdef CONFIG_DEBUG_FS
40837ac29055SGiuseppe CAVALLARO static struct dentry *stmmac_fs_dir;
40847ac29055SGiuseppe CAVALLARO 
4085c24602efSGiuseppe CAVALLARO static void sysfs_display_ring(void *head, int size, int extend_desc,
4086c24602efSGiuseppe CAVALLARO 			       struct seq_file *seq)
40877ac29055SGiuseppe CAVALLARO {
40887ac29055SGiuseppe CAVALLARO 	int i;
4089c24602efSGiuseppe CAVALLARO 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
4090c24602efSGiuseppe CAVALLARO 	struct dma_desc *p = (struct dma_desc *)head;
40917ac29055SGiuseppe CAVALLARO 
4092c24602efSGiuseppe CAVALLARO 	for (i = 0; i < size; i++) {
4093c24602efSGiuseppe CAVALLARO 		if (extend_desc) {
4094c24602efSGiuseppe CAVALLARO 			seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
4095c24602efSGiuseppe CAVALLARO 				   i, (unsigned int)virt_to_phys(ep),
4096f8be0d78SMichael Weiser 				   le32_to_cpu(ep->basic.des0),
4097f8be0d78SMichael Weiser 				   le32_to_cpu(ep->basic.des1),
4098f8be0d78SMichael Weiser 				   le32_to_cpu(ep->basic.des2),
4099f8be0d78SMichael Weiser 				   le32_to_cpu(ep->basic.des3));
4100c24602efSGiuseppe CAVALLARO 			ep++;
4101c24602efSGiuseppe CAVALLARO 		} else {
4102c24602efSGiuseppe CAVALLARO 			seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
410366c25f6eSNiklas Cassel 				   i, (unsigned int)virt_to_phys(p),
4104f8be0d78SMichael Weiser 				   le32_to_cpu(p->des0), le32_to_cpu(p->des1),
4105f8be0d78SMichael Weiser 				   le32_to_cpu(p->des2), le32_to_cpu(p->des3));
4106c24602efSGiuseppe CAVALLARO 			p++;
4107c24602efSGiuseppe CAVALLARO 		}
41087ac29055SGiuseppe CAVALLARO 		seq_printf(seq, "\n");
41097ac29055SGiuseppe CAVALLARO 	}
4110c24602efSGiuseppe CAVALLARO }
41117ac29055SGiuseppe CAVALLARO 
4112fb0d9c63SYangtao Li static int stmmac_rings_status_show(struct seq_file *seq, void *v)
4113c24602efSGiuseppe CAVALLARO {
4114c24602efSGiuseppe CAVALLARO 	struct net_device *dev = seq->private;
4115c24602efSGiuseppe CAVALLARO 	struct stmmac_priv *priv = netdev_priv(dev);
411654139cf3SJoao Pinto 	u32 rx_count = priv->plat->rx_queues_to_use;
4117ce736788SJoao Pinto 	u32 tx_count = priv->plat->tx_queues_to_use;
411854139cf3SJoao Pinto 	u32 queue;
411954139cf3SJoao Pinto 
41205f2b8b62SThierry Reding 	if ((dev->flags & IFF_UP) == 0)
41215f2b8b62SThierry Reding 		return 0;
41225f2b8b62SThierry Reding 
412354139cf3SJoao Pinto 	for (queue = 0; queue < rx_count; queue++) {
412454139cf3SJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
412554139cf3SJoao Pinto 
412654139cf3SJoao Pinto 		seq_printf(seq, "RX Queue %d:\n", queue);
41277ac29055SGiuseppe CAVALLARO 
4128c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc) {
412954139cf3SJoao Pinto 			seq_printf(seq, "Extended descriptor ring:\n");
413054139cf3SJoao Pinto 			sysfs_display_ring((void *)rx_q->dma_erx,
413154139cf3SJoao Pinto 					   DMA_RX_SIZE, 1, seq);
413254139cf3SJoao Pinto 		} else {
413354139cf3SJoao Pinto 			seq_printf(seq, "Descriptor ring:\n");
413454139cf3SJoao Pinto 			sysfs_display_ring((void *)rx_q->dma_rx,
413554139cf3SJoao Pinto 					   DMA_RX_SIZE, 0, seq);
413654139cf3SJoao Pinto 		}
413754139cf3SJoao Pinto 	}
413854139cf3SJoao Pinto 
4139ce736788SJoao Pinto 	for (queue = 0; queue < tx_count; queue++) {
4140ce736788SJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
4141ce736788SJoao Pinto 
4142ce736788SJoao Pinto 		seq_printf(seq, "TX Queue %d:\n", queue);
4143ce736788SJoao Pinto 
414454139cf3SJoao Pinto 		if (priv->extend_desc) {
4145ce736788SJoao Pinto 			seq_printf(seq, "Extended descriptor ring:\n");
4146ce736788SJoao Pinto 			sysfs_display_ring((void *)tx_q->dma_etx,
4147ce736788SJoao Pinto 					   DMA_TX_SIZE, 1, seq);
4148c24602efSGiuseppe CAVALLARO 		} else {
4149ce736788SJoao Pinto 			seq_printf(seq, "Descriptor ring:\n");
4150ce736788SJoao Pinto 			sysfs_display_ring((void *)tx_q->dma_tx,
4151ce736788SJoao Pinto 					   DMA_TX_SIZE, 0, seq);
4152ce736788SJoao Pinto 		}
41537ac29055SGiuseppe CAVALLARO 	}
41547ac29055SGiuseppe CAVALLARO 
41557ac29055SGiuseppe CAVALLARO 	return 0;
41567ac29055SGiuseppe CAVALLARO }
4157fb0d9c63SYangtao Li DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
41587ac29055SGiuseppe CAVALLARO 
4159fb0d9c63SYangtao Li static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
4160e7434821SGiuseppe CAVALLARO {
4161e7434821SGiuseppe CAVALLARO 	struct net_device *dev = seq->private;
4162e7434821SGiuseppe CAVALLARO 	struct stmmac_priv *priv = netdev_priv(dev);
4163e7434821SGiuseppe CAVALLARO 
416419e30c14SGiuseppe CAVALLARO 	if (!priv->hw_cap_support) {
4165e7434821SGiuseppe CAVALLARO 		seq_printf(seq, "DMA HW features not supported\n");
4166e7434821SGiuseppe CAVALLARO 		return 0;
4167e7434821SGiuseppe CAVALLARO 	}
4168e7434821SGiuseppe CAVALLARO 
4169e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "==============================\n");
4170e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tDMA HW features\n");
4171e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "==============================\n");
4172e7434821SGiuseppe CAVALLARO 
417322d3efe5SPavel Machek 	seq_printf(seq, "\t10/100 Mbps: %s\n",
4174e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
417522d3efe5SPavel Machek 	seq_printf(seq, "\t1000 Mbps: %s\n",
4176e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
417722d3efe5SPavel Machek 	seq_printf(seq, "\tHalf duplex: %s\n",
4178e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.half_duplex) ? "Y" : "N");
4179e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tHash Filter: %s\n",
4180e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.hash_filter) ? "Y" : "N");
4181e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tMultiple MAC address registers: %s\n",
4182e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.multi_addr) ? "Y" : "N");
41838d45e42bSLABBE Corentin 	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
4184e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.pcs) ? "Y" : "N");
4185e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
4186e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
4187e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tPMT Remote wake up: %s\n",
4188e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
4189e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tPMT Magic Frame: %s\n",
4190e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
4191e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tRMON module: %s\n",
4192e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.rmon) ? "Y" : "N");
4193e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
4194e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.time_stamp) ? "Y" : "N");
4195e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
4196e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
419722d3efe5SPavel Machek 	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
4198e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.eee) ? "Y" : "N");
4199e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
4200e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
4201e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.tx_coe) ? "Y" : "N");
4202f748be53SAlexandre TORGUE 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
4203f748be53SAlexandre TORGUE 		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
4204f748be53SAlexandre TORGUE 			   (priv->dma_cap.rx_coe) ? "Y" : "N");
4205f748be53SAlexandre TORGUE 	} else {
4206e7434821SGiuseppe CAVALLARO 		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
4207e7434821SGiuseppe CAVALLARO 			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
4208e7434821SGiuseppe CAVALLARO 		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
4209e7434821SGiuseppe CAVALLARO 			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
4210f748be53SAlexandre TORGUE 	}
4211e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
4212e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
4213e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
4214e7434821SGiuseppe CAVALLARO 		   priv->dma_cap.number_rx_channel);
4215e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
4216e7434821SGiuseppe CAVALLARO 		   priv->dma_cap.number_tx_channel);
4217e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tEnhanced descriptors: %s\n",
4218e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.enh_desc) ? "Y" : "N");
4219e7434821SGiuseppe CAVALLARO 
4220e7434821SGiuseppe CAVALLARO 	return 0;
4221e7434821SGiuseppe CAVALLARO }
4222fb0d9c63SYangtao Li DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
4223e7434821SGiuseppe CAVALLARO 
42248d72ab11SGreg Kroah-Hartman static void stmmac_init_fs(struct net_device *dev)
42257ac29055SGiuseppe CAVALLARO {
4226466c5ac8SMathieu Olivari 	struct stmmac_priv *priv = netdev_priv(dev);
42277ac29055SGiuseppe CAVALLARO 
4228466c5ac8SMathieu Olivari 	/* Create per netdev entries */
4229466c5ac8SMathieu Olivari 	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
4230466c5ac8SMathieu Olivari 
42317ac29055SGiuseppe CAVALLARO 	/* Entry to report DMA RX/TX rings */
42328d72ab11SGreg Kroah-Hartman 	debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
42337ac29055SGiuseppe CAVALLARO 			    &stmmac_rings_status_fops);
42347ac29055SGiuseppe CAVALLARO 
4235e7434821SGiuseppe CAVALLARO 	/* Entry to report the DMA HW features */
42368d72ab11SGreg Kroah-Hartman 	debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
42378d72ab11SGreg Kroah-Hartman 			    &stmmac_dma_cap_fops);
42387ac29055SGiuseppe CAVALLARO }
42397ac29055SGiuseppe CAVALLARO 
4240466c5ac8SMathieu Olivari static void stmmac_exit_fs(struct net_device *dev)
42417ac29055SGiuseppe CAVALLARO {
4242466c5ac8SMathieu Olivari 	struct stmmac_priv *priv = netdev_priv(dev);
4243466c5ac8SMathieu Olivari 
4244466c5ac8SMathieu Olivari 	debugfs_remove_recursive(priv->dbgfs_dir);
42457ac29055SGiuseppe CAVALLARO }
424650fb4f74SGiuseppe CAVALLARO #endif /* CONFIG_DEBUG_FS */
42477ac29055SGiuseppe CAVALLARO 
42483cd1cfcbSJose Abreu static u32 stmmac_vid_crc32_le(__le16 vid_le)
42493cd1cfcbSJose Abreu {
42503cd1cfcbSJose Abreu 	unsigned char *data = (unsigned char *)&vid_le;
42513cd1cfcbSJose Abreu 	unsigned char data_byte = 0;
42523cd1cfcbSJose Abreu 	u32 crc = ~0x0;
42533cd1cfcbSJose Abreu 	u32 temp = 0;
42543cd1cfcbSJose Abreu 	int i, bits;
42553cd1cfcbSJose Abreu 
42563cd1cfcbSJose Abreu 	bits = get_bitmask_order(VLAN_VID_MASK);
42573cd1cfcbSJose Abreu 	for (i = 0; i < bits; i++) {
42583cd1cfcbSJose Abreu 		if ((i % 8) == 0)
42593cd1cfcbSJose Abreu 			data_byte = data[i / 8];
42603cd1cfcbSJose Abreu 
42613cd1cfcbSJose Abreu 		temp = ((crc & 1) ^ data_byte) & 1;
42623cd1cfcbSJose Abreu 		crc >>= 1;
42633cd1cfcbSJose Abreu 		data_byte >>= 1;
42643cd1cfcbSJose Abreu 
42653cd1cfcbSJose Abreu 		if (temp)
42663cd1cfcbSJose Abreu 			crc ^= 0xedb88320;
42673cd1cfcbSJose Abreu 	}
42683cd1cfcbSJose Abreu 
42693cd1cfcbSJose Abreu 	return crc;
42703cd1cfcbSJose Abreu }
42713cd1cfcbSJose Abreu 
42723cd1cfcbSJose Abreu static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
42733cd1cfcbSJose Abreu {
42743cd1cfcbSJose Abreu 	u32 crc, hash = 0;
4275a24cae70SJose Abreu 	__le16 pmatch = 0;
4276c7ab0b80SJose Abreu 	int count = 0;
4277c7ab0b80SJose Abreu 	u16 vid = 0;
42783cd1cfcbSJose Abreu 
42793cd1cfcbSJose Abreu 	for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
42803cd1cfcbSJose Abreu 		__le16 vid_le = cpu_to_le16(vid);
42813cd1cfcbSJose Abreu 		crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
42823cd1cfcbSJose Abreu 		hash |= (1 << crc);
4283c7ab0b80SJose Abreu 		count++;
42843cd1cfcbSJose Abreu 	}
42853cd1cfcbSJose Abreu 
4286c7ab0b80SJose Abreu 	if (!priv->dma_cap.vlhash) {
4287c7ab0b80SJose Abreu 		if (count > 2) /* VID = 0 always passes filter */
4288c7ab0b80SJose Abreu 			return -EOPNOTSUPP;
4289c7ab0b80SJose Abreu 
4290a24cae70SJose Abreu 		pmatch = cpu_to_le16(vid);
4291c7ab0b80SJose Abreu 		hash = 0;
4292c7ab0b80SJose Abreu 	}
4293c7ab0b80SJose Abreu 
4294a24cae70SJose Abreu 	return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
42953cd1cfcbSJose Abreu }
42963cd1cfcbSJose Abreu 
42973cd1cfcbSJose Abreu static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
42983cd1cfcbSJose Abreu {
42993cd1cfcbSJose Abreu 	struct stmmac_priv *priv = netdev_priv(ndev);
43003cd1cfcbSJose Abreu 	bool is_double = false;
43013cd1cfcbSJose Abreu 	int ret;
43023cd1cfcbSJose Abreu 
43033cd1cfcbSJose Abreu 	if (be16_to_cpu(proto) == ETH_P_8021AD)
43043cd1cfcbSJose Abreu 		is_double = true;
43053cd1cfcbSJose Abreu 
43063cd1cfcbSJose Abreu 	set_bit(vid, priv->active_vlans);
43073cd1cfcbSJose Abreu 	ret = stmmac_vlan_update(priv, is_double);
43083cd1cfcbSJose Abreu 	if (ret) {
43093cd1cfcbSJose Abreu 		clear_bit(vid, priv->active_vlans);
43103cd1cfcbSJose Abreu 		return ret;
43113cd1cfcbSJose Abreu 	}
43123cd1cfcbSJose Abreu 
43133cd1cfcbSJose Abreu 	return ret;
43143cd1cfcbSJose Abreu }
43153cd1cfcbSJose Abreu 
43163cd1cfcbSJose Abreu static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
43173cd1cfcbSJose Abreu {
43183cd1cfcbSJose Abreu 	struct stmmac_priv *priv = netdev_priv(ndev);
43193cd1cfcbSJose Abreu 	bool is_double = false;
43203cd1cfcbSJose Abreu 
43213cd1cfcbSJose Abreu 	if (be16_to_cpu(proto) == ETH_P_8021AD)
43223cd1cfcbSJose Abreu 		is_double = true;
43233cd1cfcbSJose Abreu 
43243cd1cfcbSJose Abreu 	clear_bit(vid, priv->active_vlans);
43253cd1cfcbSJose Abreu 	return stmmac_vlan_update(priv, is_double);
43263cd1cfcbSJose Abreu }
43273cd1cfcbSJose Abreu 
43287ac6653aSJeff Kirsher static const struct net_device_ops stmmac_netdev_ops = {
43297ac6653aSJeff Kirsher 	.ndo_open = stmmac_open,
43307ac6653aSJeff Kirsher 	.ndo_start_xmit = stmmac_xmit,
43317ac6653aSJeff Kirsher 	.ndo_stop = stmmac_release,
43327ac6653aSJeff Kirsher 	.ndo_change_mtu = stmmac_change_mtu,
43337ac6653aSJeff Kirsher 	.ndo_fix_features = stmmac_fix_features,
4334d2afb5bdSGiuseppe CAVALLARO 	.ndo_set_features = stmmac_set_features,
433501789349SJiri Pirko 	.ndo_set_rx_mode = stmmac_set_rx_mode,
43367ac6653aSJeff Kirsher 	.ndo_tx_timeout = stmmac_tx_timeout,
43377ac6653aSJeff Kirsher 	.ndo_do_ioctl = stmmac_ioctl,
43384dbbe8ddSJose Abreu 	.ndo_setup_tc = stmmac_setup_tc,
43394993e5b3SJose Abreu 	.ndo_select_queue = stmmac_select_queue,
43407ac6653aSJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER
43417ac6653aSJeff Kirsher 	.ndo_poll_controller = stmmac_poll_controller,
43427ac6653aSJeff Kirsher #endif
4343a830405eSBhadram Varka 	.ndo_set_mac_address = stmmac_set_mac_address,
43443cd1cfcbSJose Abreu 	.ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
43453cd1cfcbSJose Abreu 	.ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
43467ac6653aSJeff Kirsher };
43477ac6653aSJeff Kirsher 
434834877a15SJose Abreu static void stmmac_reset_subtask(struct stmmac_priv *priv)
434934877a15SJose Abreu {
435034877a15SJose Abreu 	if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
435134877a15SJose Abreu 		return;
435234877a15SJose Abreu 	if (test_bit(STMMAC_DOWN, &priv->state))
435334877a15SJose Abreu 		return;
435434877a15SJose Abreu 
435534877a15SJose Abreu 	netdev_err(priv->dev, "Reset adapter.\n");
435634877a15SJose Abreu 
435734877a15SJose Abreu 	rtnl_lock();
435834877a15SJose Abreu 	netif_trans_update(priv->dev);
435934877a15SJose Abreu 	while (test_and_set_bit(STMMAC_RESETING, &priv->state))
436034877a15SJose Abreu 		usleep_range(1000, 2000);
436134877a15SJose Abreu 
436234877a15SJose Abreu 	set_bit(STMMAC_DOWN, &priv->state);
436334877a15SJose Abreu 	dev_close(priv->dev);
436400f54e68SPetr Machata 	dev_open(priv->dev, NULL);
436534877a15SJose Abreu 	clear_bit(STMMAC_DOWN, &priv->state);
436634877a15SJose Abreu 	clear_bit(STMMAC_RESETING, &priv->state);
436734877a15SJose Abreu 	rtnl_unlock();
436834877a15SJose Abreu }
436934877a15SJose Abreu 
437034877a15SJose Abreu static void stmmac_service_task(struct work_struct *work)
437134877a15SJose Abreu {
437234877a15SJose Abreu 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
437334877a15SJose Abreu 			service_task);
437434877a15SJose Abreu 
437534877a15SJose Abreu 	stmmac_reset_subtask(priv);
437634877a15SJose Abreu 	clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
437734877a15SJose Abreu }
437834877a15SJose Abreu 
43797ac6653aSJeff Kirsher /**
4380cf3f047bSGiuseppe CAVALLARO  *  stmmac_hw_init - Init the MAC device
438132ceabcaSGiuseppe CAVALLARO  *  @priv: driver private structure
4382732fdf0eSGiuseppe CAVALLARO  *  Description: this function is to configure the MAC device according to
4383732fdf0eSGiuseppe CAVALLARO  *  some platform parameters or the HW capability register. It prepares the
4384732fdf0eSGiuseppe CAVALLARO  *  driver to use either ring or chain modes and to setup either enhanced or
4385732fdf0eSGiuseppe CAVALLARO  *  normal descriptors.
4386cf3f047bSGiuseppe CAVALLARO  */
4387cf3f047bSGiuseppe CAVALLARO static int stmmac_hw_init(struct stmmac_priv *priv)
4388cf3f047bSGiuseppe CAVALLARO {
43895f0456b4SJose Abreu 	int ret;
4390cf3f047bSGiuseppe CAVALLARO 
43919f93ac8dSLABBE Corentin 	/* dwmac-sun8i only work in chain mode */
43929f93ac8dSLABBE Corentin 	if (priv->plat->has_sun8i)
43939f93ac8dSLABBE Corentin 		chain_mode = 1;
43945f0456b4SJose Abreu 	priv->chain_mode = chain_mode;
43959f93ac8dSLABBE Corentin 
43965f0456b4SJose Abreu 	/* Initialize HW Interface */
43975f0456b4SJose Abreu 	ret = stmmac_hwif_init(priv);
43985f0456b4SJose Abreu 	if (ret)
43995f0456b4SJose Abreu 		return ret;
44004a7d666aSGiuseppe CAVALLARO 
4401cf3f047bSGiuseppe CAVALLARO 	/* Get the HW capability (new GMAC newer than 3.50a) */
4402cf3f047bSGiuseppe CAVALLARO 	priv->hw_cap_support = stmmac_get_hw_features(priv);
4403cf3f047bSGiuseppe CAVALLARO 	if (priv->hw_cap_support) {
440438ddc59dSLABBE Corentin 		dev_info(priv->device, "DMA HW capability register supported\n");
4405cf3f047bSGiuseppe CAVALLARO 
4406cf3f047bSGiuseppe CAVALLARO 		/* We can override some gmac/dma configuration fields: e.g.
4407cf3f047bSGiuseppe CAVALLARO 		 * enh_desc, tx_coe (e.g. that are passed through the
4408cf3f047bSGiuseppe CAVALLARO 		 * platform) with the values from the HW capability
4409cf3f047bSGiuseppe CAVALLARO 		 * register (if supported).
4410cf3f047bSGiuseppe CAVALLARO 		 */
4411cf3f047bSGiuseppe CAVALLARO 		priv->plat->enh_desc = priv->dma_cap.enh_desc;
4412cf3f047bSGiuseppe CAVALLARO 		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
44133fe5cadbSGiuseppe CAVALLARO 		priv->hw->pmt = priv->plat->pmt;
4414b8ef7020SBiao Huang 		if (priv->dma_cap.hash_tb_sz) {
4415b8ef7020SBiao Huang 			priv->hw->multicast_filter_bins =
4416b8ef7020SBiao Huang 					(BIT(priv->dma_cap.hash_tb_sz) << 5);
4417b8ef7020SBiao Huang 			priv->hw->mcast_bits_log2 =
4418b8ef7020SBiao Huang 					ilog2(priv->hw->multicast_filter_bins);
4419b8ef7020SBiao Huang 		}
442038912bdbSDeepak SIKRI 
4421a8df35d4SEzequiel Garcia 		/* TXCOE doesn't work in thresh DMA mode */
4422a8df35d4SEzequiel Garcia 		if (priv->plat->force_thresh_dma_mode)
4423a8df35d4SEzequiel Garcia 			priv->plat->tx_coe = 0;
4424a8df35d4SEzequiel Garcia 		else
442538912bdbSDeepak SIKRI 			priv->plat->tx_coe = priv->dma_cap.tx_coe;
4426a8df35d4SEzequiel Garcia 
4427f748be53SAlexandre TORGUE 		/* In case of GMAC4 rx_coe is from HW cap register. */
4428f748be53SAlexandre TORGUE 		priv->plat->rx_coe = priv->dma_cap.rx_coe;
442938912bdbSDeepak SIKRI 
443038912bdbSDeepak SIKRI 		if (priv->dma_cap.rx_coe_type2)
443138912bdbSDeepak SIKRI 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
443238912bdbSDeepak SIKRI 		else if (priv->dma_cap.rx_coe_type1)
443338912bdbSDeepak SIKRI 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
443438912bdbSDeepak SIKRI 
443538ddc59dSLABBE Corentin 	} else {
443638ddc59dSLABBE Corentin 		dev_info(priv->device, "No HW DMA feature register supported\n");
443738ddc59dSLABBE Corentin 	}
4438cf3f047bSGiuseppe CAVALLARO 
4439d2afb5bdSGiuseppe CAVALLARO 	if (priv->plat->rx_coe) {
4440d2afb5bdSGiuseppe CAVALLARO 		priv->hw->rx_csum = priv->plat->rx_coe;
444138ddc59dSLABBE Corentin 		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
4442f748be53SAlexandre TORGUE 		if (priv->synopsys_id < DWMAC_CORE_4_00)
444338ddc59dSLABBE Corentin 			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
4444d2afb5bdSGiuseppe CAVALLARO 	}
4445cf3f047bSGiuseppe CAVALLARO 	if (priv->plat->tx_coe)
444638ddc59dSLABBE Corentin 		dev_info(priv->device, "TX Checksum insertion supported\n");
4447cf3f047bSGiuseppe CAVALLARO 
4448cf3f047bSGiuseppe CAVALLARO 	if (priv->plat->pmt) {
444938ddc59dSLABBE Corentin 		dev_info(priv->device, "Wake-Up On Lan supported\n");
4450cf3f047bSGiuseppe CAVALLARO 		device_set_wakeup_capable(priv->device, 1);
4451cf3f047bSGiuseppe CAVALLARO 	}
4452cf3f047bSGiuseppe CAVALLARO 
4453f748be53SAlexandre TORGUE 	if (priv->dma_cap.tsoen)
445438ddc59dSLABBE Corentin 		dev_info(priv->device, "TSO supported\n");
4455f748be53SAlexandre TORGUE 
44567cfde0afSJose Abreu 	/* Run HW quirks, if any */
44577cfde0afSJose Abreu 	if (priv->hwif_quirks) {
44587cfde0afSJose Abreu 		ret = priv->hwif_quirks(priv);
44597cfde0afSJose Abreu 		if (ret)
44607cfde0afSJose Abreu 			return ret;
44617cfde0afSJose Abreu 	}
44627cfde0afSJose Abreu 
44633b509466SJose Abreu 	/* Rx Watchdog is available in the COREs newer than the 3.40.
44643b509466SJose Abreu 	 * In some case, for example on bugged HW this feature
44653b509466SJose Abreu 	 * has to be disable and this can be done by passing the
44663b509466SJose Abreu 	 * riwt_off field from the platform.
44673b509466SJose Abreu 	 */
44683b509466SJose Abreu 	if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
44693b509466SJose Abreu 	    (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
44703b509466SJose Abreu 		priv->use_riwt = 1;
44713b509466SJose Abreu 		dev_info(priv->device,
44723b509466SJose Abreu 			 "Enable RX Mitigation via HW Watchdog Timer\n");
44733b509466SJose Abreu 	}
44743b509466SJose Abreu 
4475c24602efSGiuseppe CAVALLARO 	return 0;
4476cf3f047bSGiuseppe CAVALLARO }
4477cf3f047bSGiuseppe CAVALLARO 
4478cf3f047bSGiuseppe CAVALLARO /**
4479bfab27a1SGiuseppe CAVALLARO  * stmmac_dvr_probe
4480bfab27a1SGiuseppe CAVALLARO  * @device: device pointer
4481ff3dd78cSGiuseppe CAVALLARO  * @plat_dat: platform data pointer
4482e56788cfSJoachim Eastwood  * @res: stmmac resource pointer
4483bfab27a1SGiuseppe CAVALLARO  * Description: this is the main probe function used to
4484bfab27a1SGiuseppe CAVALLARO  * call the alloc_etherdev, allocate the priv structure.
44859afec6efSAndy Shevchenko  * Return:
448615ffac73SJoachim Eastwood  * returns 0 on success, otherwise errno.
44877ac6653aSJeff Kirsher  */
448815ffac73SJoachim Eastwood int stmmac_dvr_probe(struct device *device,
4489cf3f047bSGiuseppe CAVALLARO 		     struct plat_stmmacenet_data *plat_dat,
4490e56788cfSJoachim Eastwood 		     struct stmmac_resources *res)
44917ac6653aSJeff Kirsher {
4492bfab27a1SGiuseppe CAVALLARO 	struct net_device *ndev = NULL;
4493bfab27a1SGiuseppe CAVALLARO 	struct stmmac_priv *priv;
449476067459SJose Abreu 	u32 queue, rxq, maxq;
449576067459SJose Abreu 	int i, ret = 0;
44967ac6653aSJeff Kirsher 
44979737070cSJisheng Zhang 	ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
44989737070cSJisheng Zhang 				       MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
449941de8d4cSJoe Perches 	if (!ndev)
450015ffac73SJoachim Eastwood 		return -ENOMEM;
45017ac6653aSJeff Kirsher 
4502bfab27a1SGiuseppe CAVALLARO 	SET_NETDEV_DEV(ndev, device);
45037ac6653aSJeff Kirsher 
4504bfab27a1SGiuseppe CAVALLARO 	priv = netdev_priv(ndev);
4505bfab27a1SGiuseppe CAVALLARO 	priv->device = device;
4506bfab27a1SGiuseppe CAVALLARO 	priv->dev = ndev;
4507bfab27a1SGiuseppe CAVALLARO 
4508bfab27a1SGiuseppe CAVALLARO 	stmmac_set_ethtool_ops(ndev);
4509cf3f047bSGiuseppe CAVALLARO 	priv->pause = pause;
4510cf3f047bSGiuseppe CAVALLARO 	priv->plat = plat_dat;
4511e56788cfSJoachim Eastwood 	priv->ioaddr = res->addr;
4512e56788cfSJoachim Eastwood 	priv->dev->base_addr = (unsigned long)res->addr;
4513e56788cfSJoachim Eastwood 
4514e56788cfSJoachim Eastwood 	priv->dev->irq = res->irq;
4515e56788cfSJoachim Eastwood 	priv->wol_irq = res->wol_irq;
4516e56788cfSJoachim Eastwood 	priv->lpi_irq = res->lpi_irq;
4517e56788cfSJoachim Eastwood 
4518a51645f7SPetr Štetiar 	if (!IS_ERR_OR_NULL(res->mac))
4519e56788cfSJoachim Eastwood 		memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
4520bfab27a1SGiuseppe CAVALLARO 
4521a7a62685SJoachim Eastwood 	dev_set_drvdata(device, priv->dev);
4522803f8fc4SJoachim Eastwood 
4523cf3f047bSGiuseppe CAVALLARO 	/* Verify driver arguments */
4524cf3f047bSGiuseppe CAVALLARO 	stmmac_verify_args();
4525cf3f047bSGiuseppe CAVALLARO 
452634877a15SJose Abreu 	/* Allocate workqueue */
452734877a15SJose Abreu 	priv->wq = create_singlethread_workqueue("stmmac_wq");
452834877a15SJose Abreu 	if (!priv->wq) {
452934877a15SJose Abreu 		dev_err(priv->device, "failed to create workqueue\n");
45309737070cSJisheng Zhang 		return -ENOMEM;
453134877a15SJose Abreu 	}
453234877a15SJose Abreu 
453334877a15SJose Abreu 	INIT_WORK(&priv->service_task, stmmac_service_task);
453434877a15SJose Abreu 
4535cf3f047bSGiuseppe CAVALLARO 	/* Override with kernel parameters if supplied XXX CRS XXX
4536ceb69499SGiuseppe CAVALLARO 	 * this needs to have multiple instances
4537ceb69499SGiuseppe CAVALLARO 	 */
4538cf3f047bSGiuseppe CAVALLARO 	if ((phyaddr >= 0) && (phyaddr <= 31))
4539cf3f047bSGiuseppe CAVALLARO 		priv->plat->phy_addr = phyaddr;
4540cf3f047bSGiuseppe CAVALLARO 
454190f522a2SEugeniy Paltsev 	if (priv->plat->stmmac_rst) {
454290f522a2SEugeniy Paltsev 		ret = reset_control_assert(priv->plat->stmmac_rst);
4543f573c0b9Sjpinto 		reset_control_deassert(priv->plat->stmmac_rst);
454490f522a2SEugeniy Paltsev 		/* Some reset controllers have only reset callback instead of
454590f522a2SEugeniy Paltsev 		 * assert + deassert callbacks pair.
454690f522a2SEugeniy Paltsev 		 */
454790f522a2SEugeniy Paltsev 		if (ret == -ENOTSUPP)
454890f522a2SEugeniy Paltsev 			reset_control_reset(priv->plat->stmmac_rst);
454990f522a2SEugeniy Paltsev 	}
4550c5e4ddbdSChen-Yu Tsai 
4551cf3f047bSGiuseppe CAVALLARO 	/* Init MAC and get the capabilities */
4552c24602efSGiuseppe CAVALLARO 	ret = stmmac_hw_init(priv);
4553c24602efSGiuseppe CAVALLARO 	if (ret)
455462866e98SChen-Yu Tsai 		goto error_hw_init;
4555cf3f047bSGiuseppe CAVALLARO 
4556b561af36SVinod Koul 	stmmac_check_ether_addr(priv);
4557b561af36SVinod Koul 
4558c22a3f48SJoao Pinto 	/* Configure real RX and TX queues */
4559c02b7a91SJoao Pinto 	netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
4560c02b7a91SJoao Pinto 	netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
4561c22a3f48SJoao Pinto 
4562cf3f047bSGiuseppe CAVALLARO 	ndev->netdev_ops = &stmmac_netdev_ops;
4563cf3f047bSGiuseppe CAVALLARO 
4564cf3f047bSGiuseppe CAVALLARO 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4565cf3f047bSGiuseppe CAVALLARO 			    NETIF_F_RXCSUM;
4566f748be53SAlexandre TORGUE 
45674dbbe8ddSJose Abreu 	ret = stmmac_tc_init(priv, priv);
45684dbbe8ddSJose Abreu 	if (!ret) {
45694dbbe8ddSJose Abreu 		ndev->hw_features |= NETIF_F_HW_TC;
45704dbbe8ddSJose Abreu 	}
45714dbbe8ddSJose Abreu 
4572f748be53SAlexandre TORGUE 	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
45739edfa7daSNiklas Cassel 		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
4574b7766206SJose Abreu 		if (priv->plat->has_gmac4)
4575b7766206SJose Abreu 			ndev->hw_features |= NETIF_F_GSO_UDP_L4;
4576f748be53SAlexandre TORGUE 		priv->tso = true;
457738ddc59dSLABBE Corentin 		dev_info(priv->device, "TSO feature enabled\n");
4578f748be53SAlexandre TORGUE 	}
4579a993db88SJose Abreu 
458067afd6d1SJose Abreu 	if (priv->dma_cap.sphen) {
458167afd6d1SJose Abreu 		ndev->hw_features |= NETIF_F_GRO;
458267afd6d1SJose Abreu 		priv->sph = true;
458367afd6d1SJose Abreu 		dev_info(priv->device, "SPH feature enabled\n");
458467afd6d1SJose Abreu 	}
458567afd6d1SJose Abreu 
4586a993db88SJose Abreu 	if (priv->dma_cap.addr64) {
4587a993db88SJose Abreu 		ret = dma_set_mask_and_coherent(device,
4588a993db88SJose Abreu 				DMA_BIT_MASK(priv->dma_cap.addr64));
4589a993db88SJose Abreu 		if (!ret) {
4590a993db88SJose Abreu 			dev_info(priv->device, "Using %d bits DMA width\n",
4591a993db88SJose Abreu 				 priv->dma_cap.addr64);
4592968a2978SThierry Reding 
4593968a2978SThierry Reding 			/*
4594968a2978SThierry Reding 			 * If more than 32 bits can be addressed, make sure to
4595968a2978SThierry Reding 			 * enable enhanced addressing mode.
4596968a2978SThierry Reding 			 */
4597968a2978SThierry Reding 			if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
4598968a2978SThierry Reding 				priv->plat->dma_cfg->eame = true;
4599a993db88SJose Abreu 		} else {
4600a993db88SJose Abreu 			ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
4601a993db88SJose Abreu 			if (ret) {
4602a993db88SJose Abreu 				dev_err(priv->device, "Failed to set DMA Mask\n");
4603a993db88SJose Abreu 				goto error_hw_init;
4604a993db88SJose Abreu 			}
4605a993db88SJose Abreu 
4606a993db88SJose Abreu 			priv->dma_cap.addr64 = 32;
4607a993db88SJose Abreu 		}
4608a993db88SJose Abreu 	}
4609a993db88SJose Abreu 
4610bfab27a1SGiuseppe CAVALLARO 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
4611bfab27a1SGiuseppe CAVALLARO 	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
46127ac6653aSJeff Kirsher #ifdef STMMAC_VLAN_TAG_USED
46137ac6653aSJeff Kirsher 	/* Both mac100 and gmac support receive VLAN tag detection */
4614ab188e8fSElad Nachman 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
46153cd1cfcbSJose Abreu 	if (priv->dma_cap.vlhash) {
46163cd1cfcbSJose Abreu 		ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
46173cd1cfcbSJose Abreu 		ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
46183cd1cfcbSJose Abreu 	}
461930d93227SJose Abreu 	if (priv->dma_cap.vlins) {
462030d93227SJose Abreu 		ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
462130d93227SJose Abreu 		if (priv->dma_cap.dvlan)
462230d93227SJose Abreu 			ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
462330d93227SJose Abreu 	}
46247ac6653aSJeff Kirsher #endif
46257ac6653aSJeff Kirsher 	priv->msg_enable = netif_msg_init(debug, default_msg_level);
46267ac6653aSJeff Kirsher 
462776067459SJose Abreu 	/* Initialize RSS */
462876067459SJose Abreu 	rxq = priv->plat->rx_queues_to_use;
462976067459SJose Abreu 	netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
463076067459SJose Abreu 	for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
463176067459SJose Abreu 		priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
463276067459SJose Abreu 
463376067459SJose Abreu 	if (priv->dma_cap.rssen && priv->plat->rss_en)
463476067459SJose Abreu 		ndev->features |= NETIF_F_RXHASH;
463576067459SJose Abreu 
463644770e11SJarod Wilson 	/* MTU range: 46 - hw-specific max */
463744770e11SJarod Wilson 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
463856bcd591SJose Abreu 	if (priv->plat->has_xgmac)
46397d9e6c5aSJose Abreu 		ndev->max_mtu = XGMAC_JUMBO_LEN;
464056bcd591SJose Abreu 	else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
464156bcd591SJose Abreu 		ndev->max_mtu = JUMBO_LEN;
464244770e11SJarod Wilson 	else
464344770e11SJarod Wilson 		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
4644a2cd64f3SKweh, Hock Leong 	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
4645a2cd64f3SKweh, Hock Leong 	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
4646a2cd64f3SKweh, Hock Leong 	 */
4647a2cd64f3SKweh, Hock Leong 	if ((priv->plat->maxmtu < ndev->max_mtu) &&
4648a2cd64f3SKweh, Hock Leong 	    (priv->plat->maxmtu >= ndev->min_mtu))
464944770e11SJarod Wilson 		ndev->max_mtu = priv->plat->maxmtu;
4650a2cd64f3SKweh, Hock Leong 	else if (priv->plat->maxmtu < ndev->min_mtu)
4651b618ab45SHeiner Kallweit 		dev_warn(priv->device,
4652a2cd64f3SKweh, Hock Leong 			 "%s: warning: maxmtu having invalid value (%d)\n",
4653a2cd64f3SKweh, Hock Leong 			 __func__, priv->plat->maxmtu);
465444770e11SJarod Wilson 
46557ac6653aSJeff Kirsher 	if (flow_ctrl)
46567ac6653aSJeff Kirsher 		priv->flow_ctrl = FLOW_AUTO;	/* RX/TX pause on */
46577ac6653aSJeff Kirsher 
46588fce3331SJose Abreu 	/* Setup channels NAPI */
46598fce3331SJose Abreu 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
4660c22a3f48SJoao Pinto 
46618fce3331SJose Abreu 	for (queue = 0; queue < maxq; queue++) {
46628fce3331SJose Abreu 		struct stmmac_channel *ch = &priv->channel[queue];
46638fce3331SJose Abreu 
46648fce3331SJose Abreu 		ch->priv_data = priv;
46658fce3331SJose Abreu 		ch->index = queue;
46668fce3331SJose Abreu 
46674ccb4585SJose Abreu 		if (queue < priv->plat->rx_queues_to_use) {
46684ccb4585SJose Abreu 			netif_napi_add(ndev, &ch->rx_napi, stmmac_napi_poll_rx,
46698fce3331SJose Abreu 				       NAPI_POLL_WEIGHT);
4670c22a3f48SJoao Pinto 		}
46714ccb4585SJose Abreu 		if (queue < priv->plat->tx_queues_to_use) {
46724d97972bSFrode Isaksen 			netif_tx_napi_add(ndev, &ch->tx_napi,
46734d97972bSFrode Isaksen 					  stmmac_napi_poll_tx,
46744ccb4585SJose Abreu 					  NAPI_POLL_WEIGHT);
46754ccb4585SJose Abreu 		}
46764ccb4585SJose Abreu 	}
46777ac6653aSJeff Kirsher 
467829555fa3SThierry Reding 	mutex_init(&priv->lock);
46797ac6653aSJeff Kirsher 
4680cd7201f4SGiuseppe CAVALLARO 	/* If a specific clk_csr value is passed from the platform
4681cd7201f4SGiuseppe CAVALLARO 	 * this means that the CSR Clock Range selection cannot be
4682cd7201f4SGiuseppe CAVALLARO 	 * changed at run-time and it is fixed. Viceversa the driver'll try to
4683cd7201f4SGiuseppe CAVALLARO 	 * set the MDC clock dynamically according to the csr actual
4684cd7201f4SGiuseppe CAVALLARO 	 * clock input.
4685cd7201f4SGiuseppe CAVALLARO 	 */
46865e7f7fc5SBiao Huang 	if (priv->plat->clk_csr >= 0)
4687cd7201f4SGiuseppe CAVALLARO 		priv->clk_csr = priv->plat->clk_csr;
46885e7f7fc5SBiao Huang 	else
46895e7f7fc5SBiao Huang 		stmmac_clk_csr_set(priv);
4690cd7201f4SGiuseppe CAVALLARO 
4691e58bb43fSGiuseppe CAVALLARO 	stmmac_check_pcs_mode(priv);
4692e58bb43fSGiuseppe CAVALLARO 
46933fe5cadbSGiuseppe CAVALLARO 	if (priv->hw->pcs != STMMAC_PCS_RGMII  &&
46943fe5cadbSGiuseppe CAVALLARO 	    priv->hw->pcs != STMMAC_PCS_TBI &&
46953fe5cadbSGiuseppe CAVALLARO 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
46964bfcbd7aSFrancesco Virlinzi 		/* MDIO bus Registration */
46974bfcbd7aSFrancesco Virlinzi 		ret = stmmac_mdio_register(ndev);
46984bfcbd7aSFrancesco Virlinzi 		if (ret < 0) {
4699b618ab45SHeiner Kallweit 			dev_err(priv->device,
470038ddc59dSLABBE Corentin 				"%s: MDIO bus (id: %d) registration failed",
47014bfcbd7aSFrancesco Virlinzi 				__func__, priv->plat->bus_id);
47026a81c26fSViresh Kumar 			goto error_mdio_register;
47034bfcbd7aSFrancesco Virlinzi 		}
4704e58bb43fSGiuseppe CAVALLARO 	}
47054bfcbd7aSFrancesco Virlinzi 
470674371272SJose Abreu 	ret = stmmac_phy_setup(priv);
470774371272SJose Abreu 	if (ret) {
470874371272SJose Abreu 		netdev_err(ndev, "failed to setup phy (%d)\n", ret);
470974371272SJose Abreu 		goto error_phy_setup;
471074371272SJose Abreu 	}
471174371272SJose Abreu 
471257016590SFlorian Fainelli 	ret = register_netdev(ndev);
4713b2eb09afSFlorian Fainelli 	if (ret) {
4714b618ab45SHeiner Kallweit 		dev_err(priv->device, "%s: ERROR %i registering the device\n",
471557016590SFlorian Fainelli 			__func__, ret);
4716b2eb09afSFlorian Fainelli 		goto error_netdev_register;
4717b2eb09afSFlorian Fainelli 	}
47187ac6653aSJeff Kirsher 
47195f2b8b62SThierry Reding #ifdef CONFIG_DEBUG_FS
47208d72ab11SGreg Kroah-Hartman 	stmmac_init_fs(ndev);
47215f2b8b62SThierry Reding #endif
47225f2b8b62SThierry Reding 
472357016590SFlorian Fainelli 	return ret;
47247ac6653aSJeff Kirsher 
47256a81c26fSViresh Kumar error_netdev_register:
472674371272SJose Abreu 	phylink_destroy(priv->phylink);
472774371272SJose Abreu error_phy_setup:
4728b2eb09afSFlorian Fainelli 	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4729b2eb09afSFlorian Fainelli 	    priv->hw->pcs != STMMAC_PCS_TBI &&
4730b2eb09afSFlorian Fainelli 	    priv->hw->pcs != STMMAC_PCS_RTBI)
4731b2eb09afSFlorian Fainelli 		stmmac_mdio_unregister(ndev);
47327ac6653aSJeff Kirsher error_mdio_register:
47338fce3331SJose Abreu 	for (queue = 0; queue < maxq; queue++) {
47348fce3331SJose Abreu 		struct stmmac_channel *ch = &priv->channel[queue];
4735c22a3f48SJoao Pinto 
47364ccb4585SJose Abreu 		if (queue < priv->plat->rx_queues_to_use)
47374ccb4585SJose Abreu 			netif_napi_del(&ch->rx_napi);
47384ccb4585SJose Abreu 		if (queue < priv->plat->tx_queues_to_use)
47394ccb4585SJose Abreu 			netif_napi_del(&ch->tx_napi);
4740c22a3f48SJoao Pinto 	}
474162866e98SChen-Yu Tsai error_hw_init:
474234877a15SJose Abreu 	destroy_workqueue(priv->wq);
47437ac6653aSJeff Kirsher 
474415ffac73SJoachim Eastwood 	return ret;
47457ac6653aSJeff Kirsher }
4746b2e2f0c7SAndy Shevchenko EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
47477ac6653aSJeff Kirsher 
47487ac6653aSJeff Kirsher /**
47497ac6653aSJeff Kirsher  * stmmac_dvr_remove
4750f4e7bd81SJoachim Eastwood  * @dev: device pointer
47517ac6653aSJeff Kirsher  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
4752bfab27a1SGiuseppe CAVALLARO  * changes the link status, releases the DMA descriptor rings.
47537ac6653aSJeff Kirsher  */
4754f4e7bd81SJoachim Eastwood int stmmac_dvr_remove(struct device *dev)
47557ac6653aSJeff Kirsher {
4756f4e7bd81SJoachim Eastwood 	struct net_device *ndev = dev_get_drvdata(dev);
47577ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(ndev);
47587ac6653aSJeff Kirsher 
475938ddc59dSLABBE Corentin 	netdev_info(priv->dev, "%s: removing driver", __func__);
47607ac6653aSJeff Kirsher 
47615f2b8b62SThierry Reding #ifdef CONFIG_DEBUG_FS
47625f2b8b62SThierry Reding 	stmmac_exit_fs(ndev);
47635f2b8b62SThierry Reding #endif
4764ae4f0d46SJoao Pinto 	stmmac_stop_all_dma(priv);
47657ac6653aSJeff Kirsher 
4766c10d4c82SJose Abreu 	stmmac_mac_set(priv, priv->ioaddr, false);
47677ac6653aSJeff Kirsher 	netif_carrier_off(ndev);
47687ac6653aSJeff Kirsher 	unregister_netdev(ndev);
476974371272SJose Abreu 	phylink_destroy(priv->phylink);
4770f573c0b9Sjpinto 	if (priv->plat->stmmac_rst)
4771f573c0b9Sjpinto 		reset_control_assert(priv->plat->stmmac_rst);
4772f573c0b9Sjpinto 	clk_disable_unprepare(priv->plat->pclk);
4773f573c0b9Sjpinto 	clk_disable_unprepare(priv->plat->stmmac_clk);
47743fe5cadbSGiuseppe CAVALLARO 	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
47753fe5cadbSGiuseppe CAVALLARO 	    priv->hw->pcs != STMMAC_PCS_TBI &&
47763fe5cadbSGiuseppe CAVALLARO 	    priv->hw->pcs != STMMAC_PCS_RTBI)
4777e743471fSBryan O'Donoghue 		stmmac_mdio_unregister(ndev);
477834877a15SJose Abreu 	destroy_workqueue(priv->wq);
477929555fa3SThierry Reding 	mutex_destroy(&priv->lock);
47807ac6653aSJeff Kirsher 
47817ac6653aSJeff Kirsher 	return 0;
47827ac6653aSJeff Kirsher }
4783b2e2f0c7SAndy Shevchenko EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
47847ac6653aSJeff Kirsher 
4785732fdf0eSGiuseppe CAVALLARO /**
4786732fdf0eSGiuseppe CAVALLARO  * stmmac_suspend - suspend callback
4787f4e7bd81SJoachim Eastwood  * @dev: device pointer
4788732fdf0eSGiuseppe CAVALLARO  * Description: this is the function to suspend the device and it is called
4789732fdf0eSGiuseppe CAVALLARO  * by the platform driver to stop the network queue, release the resources,
4790732fdf0eSGiuseppe CAVALLARO  * program the PMT register (for WoL), clean and release driver resources.
4791732fdf0eSGiuseppe CAVALLARO  */
4792f4e7bd81SJoachim Eastwood int stmmac_suspend(struct device *dev)
47937ac6653aSJeff Kirsher {
4794f4e7bd81SJoachim Eastwood 	struct net_device *ndev = dev_get_drvdata(dev);
47957ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(ndev);
47967ac6653aSJeff Kirsher 
47977ac6653aSJeff Kirsher 	if (!ndev || !netif_running(ndev))
47987ac6653aSJeff Kirsher 		return 0;
47997ac6653aSJeff Kirsher 
48003e2bf04fSJose Abreu 	phylink_mac_change(priv->phylink, false);
48017ac6653aSJeff Kirsher 
4802134cc4ceSThierry Reding 	mutex_lock(&priv->lock);
480319e13cb2SJose Abreu 
48047ac6653aSJeff Kirsher 	netif_device_detach(ndev);
4805c22a3f48SJoao Pinto 	stmmac_stop_all_queues(priv);
48067ac6653aSJeff Kirsher 
4807c22a3f48SJoao Pinto 	stmmac_disable_all_queues(priv);
48087ac6653aSJeff Kirsher 
48097ac6653aSJeff Kirsher 	/* Stop TX/RX DMA */
4810ae4f0d46SJoao Pinto 	stmmac_stop_all_dma(priv);
4811c24602efSGiuseppe CAVALLARO 
48127ac6653aSJeff Kirsher 	/* Enable Power down mode by programming the PMT regs */
481389f7f2cfSSrinivas Kandagatla 	if (device_may_wakeup(priv->device)) {
4814c10d4c82SJose Abreu 		stmmac_pmt(priv, priv->hw, priv->wolopts);
481589f7f2cfSSrinivas Kandagatla 		priv->irq_wake = 1;
481689f7f2cfSSrinivas Kandagatla 	} else {
4817134cc4ceSThierry Reding 		mutex_unlock(&priv->lock);
48183e2bf04fSJose Abreu 		rtnl_lock();
48193e2bf04fSJose Abreu 		phylink_stop(priv->phylink);
48203e2bf04fSJose Abreu 		rtnl_unlock();
4821134cc4ceSThierry Reding 		mutex_lock(&priv->lock);
48223e2bf04fSJose Abreu 
4823c10d4c82SJose Abreu 		stmmac_mac_set(priv, priv->ioaddr, false);
4824db88f10aSSrinivas Kandagatla 		pinctrl_pm_select_sleep_state(priv->device);
4825ba1377ffSGiuseppe CAVALLARO 		/* Disable clock in case of PWM is off */
4826e497c20eSBiao Huang 		if (priv->plat->clk_ptp_ref)
4827e497c20eSBiao Huang 			clk_disable_unprepare(priv->plat->clk_ptp_ref);
4828e497c20eSBiao Huang 		clk_disable_unprepare(priv->plat->pclk);
4829e497c20eSBiao Huang 		clk_disable_unprepare(priv->plat->stmmac_clk);
4830ba1377ffSGiuseppe CAVALLARO 	}
483129555fa3SThierry Reding 	mutex_unlock(&priv->lock);
48322d871aa0SVince Bridgers 
4833bd00632cSLABBE Corentin 	priv->speed = SPEED_UNKNOWN;
48347ac6653aSJeff Kirsher 	return 0;
48357ac6653aSJeff Kirsher }
4836b2e2f0c7SAndy Shevchenko EXPORT_SYMBOL_GPL(stmmac_suspend);
48377ac6653aSJeff Kirsher 
4838732fdf0eSGiuseppe CAVALLARO /**
483954139cf3SJoao Pinto  * stmmac_reset_queues_param - reset queue parameters
484054139cf3SJoao Pinto  * @dev: device pointer
484154139cf3SJoao Pinto  */
484254139cf3SJoao Pinto static void stmmac_reset_queues_param(struct stmmac_priv *priv)
484354139cf3SJoao Pinto {
484454139cf3SJoao Pinto 	u32 rx_cnt = priv->plat->rx_queues_to_use;
4845ce736788SJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
484654139cf3SJoao Pinto 	u32 queue;
484754139cf3SJoao Pinto 
484854139cf3SJoao Pinto 	for (queue = 0; queue < rx_cnt; queue++) {
484954139cf3SJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
485054139cf3SJoao Pinto 
485154139cf3SJoao Pinto 		rx_q->cur_rx = 0;
485254139cf3SJoao Pinto 		rx_q->dirty_rx = 0;
485354139cf3SJoao Pinto 	}
485454139cf3SJoao Pinto 
4855ce736788SJoao Pinto 	for (queue = 0; queue < tx_cnt; queue++) {
4856ce736788SJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
4857ce736788SJoao Pinto 
4858ce736788SJoao Pinto 		tx_q->cur_tx = 0;
4859ce736788SJoao Pinto 		tx_q->dirty_tx = 0;
48608d212a9eSNiklas Cassel 		tx_q->mss = 0;
4861ce736788SJoao Pinto 	}
486254139cf3SJoao Pinto }
486354139cf3SJoao Pinto 
486454139cf3SJoao Pinto /**
4865732fdf0eSGiuseppe CAVALLARO  * stmmac_resume - resume callback
4866f4e7bd81SJoachim Eastwood  * @dev: device pointer
4867732fdf0eSGiuseppe CAVALLARO  * Description: when resume this function is invoked to setup the DMA and CORE
4868732fdf0eSGiuseppe CAVALLARO  * in a usable state.
4869732fdf0eSGiuseppe CAVALLARO  */
4870f4e7bd81SJoachim Eastwood int stmmac_resume(struct device *dev)
48717ac6653aSJeff Kirsher {
4872f4e7bd81SJoachim Eastwood 	struct net_device *ndev = dev_get_drvdata(dev);
48737ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(ndev);
48747ac6653aSJeff Kirsher 
48757ac6653aSJeff Kirsher 	if (!netif_running(ndev))
48767ac6653aSJeff Kirsher 		return 0;
48777ac6653aSJeff Kirsher 
48787ac6653aSJeff Kirsher 	/* Power Down bit, into the PM register, is cleared
48797ac6653aSJeff Kirsher 	 * automatically as soon as a magic packet or a Wake-up frame
48807ac6653aSJeff Kirsher 	 * is received. Anyway, it's better to manually clear
48817ac6653aSJeff Kirsher 	 * this bit because it can generate problems while resuming
4882ceb69499SGiuseppe CAVALLARO 	 * from another devices (e.g. serial console).
4883ceb69499SGiuseppe CAVALLARO 	 */
4884623997fbSSrinivas Kandagatla 	if (device_may_wakeup(priv->device)) {
488529555fa3SThierry Reding 		mutex_lock(&priv->lock);
4886c10d4c82SJose Abreu 		stmmac_pmt(priv, priv->hw, 0);
488729555fa3SThierry Reding 		mutex_unlock(&priv->lock);
488889f7f2cfSSrinivas Kandagatla 		priv->irq_wake = 0;
4889623997fbSSrinivas Kandagatla 	} else {
4890db88f10aSSrinivas Kandagatla 		pinctrl_pm_select_default_state(priv->device);
48918d45e42bSLABBE Corentin 		/* enable the clk previously disabled */
4892e497c20eSBiao Huang 		clk_prepare_enable(priv->plat->stmmac_clk);
4893e497c20eSBiao Huang 		clk_prepare_enable(priv->plat->pclk);
4894e497c20eSBiao Huang 		if (priv->plat->clk_ptp_ref)
4895e497c20eSBiao Huang 			clk_prepare_enable(priv->plat->clk_ptp_ref);
4896623997fbSSrinivas Kandagatla 		/* reset the phy so that it's ready */
4897623997fbSSrinivas Kandagatla 		if (priv->mii)
4898623997fbSSrinivas Kandagatla 			stmmac_mdio_reset(priv->mii);
4899623997fbSSrinivas Kandagatla 	}
49007ac6653aSJeff Kirsher 
49017ac6653aSJeff Kirsher 	netif_device_attach(ndev);
49027ac6653aSJeff Kirsher 
490329555fa3SThierry Reding 	mutex_lock(&priv->lock);
4904f55d84b0SVincent Palatin 
490554139cf3SJoao Pinto 	stmmac_reset_queues_param(priv);
490654139cf3SJoao Pinto 
4907ae79a639SGiuseppe CAVALLARO 	stmmac_clear_descriptors(priv);
4908ae79a639SGiuseppe CAVALLARO 
4909fe131929SHuacai Chen 	stmmac_hw_setup(ndev, false);
4910d429b66eSJose Abreu 	stmmac_init_coalesce(priv);
4911ac316c78SGiuseppe CAVALLARO 	stmmac_set_rx_mode(ndev);
49127ac6653aSJeff Kirsher 
4913c22a3f48SJoao Pinto 	stmmac_enable_all_queues(priv);
49147ac6653aSJeff Kirsher 
4915c22a3f48SJoao Pinto 	stmmac_start_all_queues(priv);
49167ac6653aSJeff Kirsher 
4917134cc4ceSThierry Reding 	mutex_unlock(&priv->lock);
4918134cc4ceSThierry Reding 
49193e2bf04fSJose Abreu 	if (!device_may_wakeup(priv->device)) {
492019e13cb2SJose Abreu 		rtnl_lock();
492174371272SJose Abreu 		phylink_start(priv->phylink);
492219e13cb2SJose Abreu 		rtnl_unlock();
49233e2bf04fSJose Abreu 	}
492419e13cb2SJose Abreu 
49253e2bf04fSJose Abreu 	phylink_mac_change(priv->phylink, true);
4926102463b1SFrancesco Virlinzi 
49277ac6653aSJeff Kirsher 	return 0;
49287ac6653aSJeff Kirsher }
4929b2e2f0c7SAndy Shevchenko EXPORT_SYMBOL_GPL(stmmac_resume);
4930ba27ec66SGiuseppe CAVALLARO 
49317ac6653aSJeff Kirsher #ifndef MODULE
49327ac6653aSJeff Kirsher static int __init stmmac_cmdline_opt(char *str)
49337ac6653aSJeff Kirsher {
49347ac6653aSJeff Kirsher 	char *opt;
49357ac6653aSJeff Kirsher 
49367ac6653aSJeff Kirsher 	if (!str || !*str)
49377ac6653aSJeff Kirsher 		return -EINVAL;
49387ac6653aSJeff Kirsher 	while ((opt = strsep(&str, ",")) != NULL) {
49397ac6653aSJeff Kirsher 		if (!strncmp(opt, "debug:", 6)) {
4940ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 6, 0, &debug))
49417ac6653aSJeff Kirsher 				goto err;
49427ac6653aSJeff Kirsher 		} else if (!strncmp(opt, "phyaddr:", 8)) {
4943ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 8, 0, &phyaddr))
49447ac6653aSJeff Kirsher 				goto err;
49457ac6653aSJeff Kirsher 		} else if (!strncmp(opt, "buf_sz:", 7)) {
4946ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 7, 0, &buf_sz))
49477ac6653aSJeff Kirsher 				goto err;
49487ac6653aSJeff Kirsher 		} else if (!strncmp(opt, "tc:", 3)) {
4949ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 3, 0, &tc))
49507ac6653aSJeff Kirsher 				goto err;
49517ac6653aSJeff Kirsher 		} else if (!strncmp(opt, "watchdog:", 9)) {
4952ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 9, 0, &watchdog))
49537ac6653aSJeff Kirsher 				goto err;
49547ac6653aSJeff Kirsher 		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
4955ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 10, 0, &flow_ctrl))
49567ac6653aSJeff Kirsher 				goto err;
49577ac6653aSJeff Kirsher 		} else if (!strncmp(opt, "pause:", 6)) {
4958ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 6, 0, &pause))
49597ac6653aSJeff Kirsher 				goto err;
4960506f669cSGiuseppe CAVALLARO 		} else if (!strncmp(opt, "eee_timer:", 10)) {
4961d765955dSGiuseppe CAVALLARO 			if (kstrtoint(opt + 10, 0, &eee_timer))
4962d765955dSGiuseppe CAVALLARO 				goto err;
49634a7d666aSGiuseppe CAVALLARO 		} else if (!strncmp(opt, "chain_mode:", 11)) {
49644a7d666aSGiuseppe CAVALLARO 			if (kstrtoint(opt + 11, 0, &chain_mode))
49654a7d666aSGiuseppe CAVALLARO 				goto err;
49667ac6653aSJeff Kirsher 		}
49677ac6653aSJeff Kirsher 	}
49687ac6653aSJeff Kirsher 	return 0;
49697ac6653aSJeff Kirsher 
49707ac6653aSJeff Kirsher err:
49717ac6653aSJeff Kirsher 	pr_err("%s: ERROR broken module parameter conversion", __func__);
49727ac6653aSJeff Kirsher 	return -EINVAL;
49737ac6653aSJeff Kirsher }
49747ac6653aSJeff Kirsher 
49757ac6653aSJeff Kirsher __setup("stmmaceth=", stmmac_cmdline_opt);
4976ceb69499SGiuseppe CAVALLARO #endif /* MODULE */
49776fc0d0f2SGiuseppe Cavallaro 
4978466c5ac8SMathieu Olivari static int __init stmmac_init(void)
4979466c5ac8SMathieu Olivari {
4980466c5ac8SMathieu Olivari #ifdef CONFIG_DEBUG_FS
4981466c5ac8SMathieu Olivari 	/* Create debugfs main directory if it doesn't exist yet */
49828d72ab11SGreg Kroah-Hartman 	if (!stmmac_fs_dir)
4983466c5ac8SMathieu Olivari 		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
4984466c5ac8SMathieu Olivari #endif
4985466c5ac8SMathieu Olivari 
4986466c5ac8SMathieu Olivari 	return 0;
4987466c5ac8SMathieu Olivari }
4988466c5ac8SMathieu Olivari 
4989466c5ac8SMathieu Olivari static void __exit stmmac_exit(void)
4990466c5ac8SMathieu Olivari {
4991466c5ac8SMathieu Olivari #ifdef CONFIG_DEBUG_FS
4992466c5ac8SMathieu Olivari 	debugfs_remove_recursive(stmmac_fs_dir);
4993466c5ac8SMathieu Olivari #endif
4994466c5ac8SMathieu Olivari }
4995466c5ac8SMathieu Olivari 
4996466c5ac8SMathieu Olivari module_init(stmmac_init)
4997466c5ac8SMathieu Olivari module_exit(stmmac_exit)
4998466c5ac8SMathieu Olivari 
49996fc0d0f2SGiuseppe Cavallaro MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
50006fc0d0f2SGiuseppe Cavallaro MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
50016fc0d0f2SGiuseppe Cavallaro MODULE_LICENSE("GPL");
5002