14fa9c49fSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
27ac6653aSJeff Kirsher /*******************************************************************************
37ac6653aSJeff Kirsher   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
47ac6653aSJeff Kirsher   ST Ethernet IPs are built around a Synopsys IP Core.
57ac6653aSJeff Kirsher 
6286a8372SGiuseppe CAVALLARO 	Copyright(C) 2007-2011 STMicroelectronics Ltd
77ac6653aSJeff Kirsher 
87ac6653aSJeff Kirsher 
97ac6653aSJeff Kirsher   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
107ac6653aSJeff Kirsher 
117ac6653aSJeff Kirsher   Documentation available at:
127ac6653aSJeff Kirsher 	http://www.stlinux.com
137ac6653aSJeff Kirsher   Support available at:
147ac6653aSJeff Kirsher 	https://bugzilla.stlinux.com/
157ac6653aSJeff Kirsher *******************************************************************************/
167ac6653aSJeff Kirsher 
176a81c26fSViresh Kumar #include <linux/clk.h>
187ac6653aSJeff Kirsher #include <linux/kernel.h>
197ac6653aSJeff Kirsher #include <linux/interrupt.h>
207ac6653aSJeff Kirsher #include <linux/ip.h>
217ac6653aSJeff Kirsher #include <linux/tcp.h>
227ac6653aSJeff Kirsher #include <linux/skbuff.h>
237ac6653aSJeff Kirsher #include <linux/ethtool.h>
247ac6653aSJeff Kirsher #include <linux/if_ether.h>
257ac6653aSJeff Kirsher #include <linux/crc32.h>
267ac6653aSJeff Kirsher #include <linux/mii.h>
2701789349SJiri Pirko #include <linux/if.h>
287ac6653aSJeff Kirsher #include <linux/if_vlan.h>
297ac6653aSJeff Kirsher #include <linux/dma-mapping.h>
307ac6653aSJeff Kirsher #include <linux/slab.h>
317ac6653aSJeff Kirsher #include <linux/prefetch.h>
32db88f10aSSrinivas Kandagatla #include <linux/pinctrl/consumer.h>
3350fb4f74SGiuseppe CAVALLARO #ifdef CONFIG_DEBUG_FS
347ac29055SGiuseppe CAVALLARO #include <linux/debugfs.h>
357ac29055SGiuseppe CAVALLARO #include <linux/seq_file.h>
3650fb4f74SGiuseppe CAVALLARO #endif /* CONFIG_DEBUG_FS */
37891434b1SRayagond Kokatanur #include <linux/net_tstamp.h>
38eeef2f6bSJose Abreu #include <linux/phylink.h>
394dbbe8ddSJose Abreu #include <net/pkt_cls.h>
40891434b1SRayagond Kokatanur #include "stmmac_ptp.h"
41286a8372SGiuseppe CAVALLARO #include "stmmac.h"
42c5e4ddbdSChen-Yu Tsai #include <linux/reset.h>
435790cf3cSMathieu Olivari #include <linux/of_mdio.h>
4419d857c9SPhil Reid #include "dwmac1000.h"
457d9e6c5aSJose Abreu #include "dwxgmac2.h"
4642de047dSJose Abreu #include "hwif.h"
477ac6653aSJeff Kirsher 
489939a46dSEugeniy Paltsev #define	STMMAC_ALIGN(x)		__ALIGN_KERNEL(x, SMP_CACHE_BYTES)
49f748be53SAlexandre TORGUE #define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
507ac6653aSJeff Kirsher 
517ac6653aSJeff Kirsher /* Module parameters */
5232ceabcaSGiuseppe CAVALLARO #define TX_TIMEO	5000
537ac6653aSJeff Kirsher static int watchdog = TX_TIMEO;
54d3757ba4SJoe Perches module_param(watchdog, int, 0644);
5532ceabcaSGiuseppe CAVALLARO MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
567ac6653aSJeff Kirsher 
5732ceabcaSGiuseppe CAVALLARO static int debug = -1;
58d3757ba4SJoe Perches module_param(debug, int, 0644);
5932ceabcaSGiuseppe CAVALLARO MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
607ac6653aSJeff Kirsher 
6147d1f71fSstephen hemminger static int phyaddr = -1;
62d3757ba4SJoe Perches module_param(phyaddr, int, 0444);
637ac6653aSJeff Kirsher MODULE_PARM_DESC(phyaddr, "Physical device address");
647ac6653aSJeff Kirsher 
65e3ad57c9SGiuseppe Cavallaro #define STMMAC_TX_THRESH	(DMA_TX_SIZE / 4)
66120e87f9SGiuseppe Cavallaro #define STMMAC_RX_THRESH	(DMA_RX_SIZE / 4)
677ac6653aSJeff Kirsher 
68e9989339SJose Abreu static int flow_ctrl = FLOW_AUTO;
69d3757ba4SJoe Perches module_param(flow_ctrl, int, 0644);
707ac6653aSJeff Kirsher MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
717ac6653aSJeff Kirsher 
727ac6653aSJeff Kirsher static int pause = PAUSE_TIME;
73d3757ba4SJoe Perches module_param(pause, int, 0644);
747ac6653aSJeff Kirsher MODULE_PARM_DESC(pause, "Flow Control Pause Time");
757ac6653aSJeff Kirsher 
767ac6653aSJeff Kirsher #define TC_DEFAULT 64
777ac6653aSJeff Kirsher static int tc = TC_DEFAULT;
78d3757ba4SJoe Perches module_param(tc, int, 0644);
797ac6653aSJeff Kirsher MODULE_PARM_DESC(tc, "DMA threshold control value");
807ac6653aSJeff Kirsher 
81d916701cSGiuseppe CAVALLARO #define	DEFAULT_BUFSIZE	1536
82d916701cSGiuseppe CAVALLARO static int buf_sz = DEFAULT_BUFSIZE;
83d3757ba4SJoe Perches module_param(buf_sz, int, 0644);
847ac6653aSJeff Kirsher MODULE_PARM_DESC(buf_sz, "DMA buffer size");
857ac6653aSJeff Kirsher 
8622ad3838SGiuseppe Cavallaro #define	STMMAC_RX_COPYBREAK	256
8722ad3838SGiuseppe Cavallaro 
887ac6653aSJeff Kirsher static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
897ac6653aSJeff Kirsher 				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
907ac6653aSJeff Kirsher 				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
917ac6653aSJeff Kirsher 
92d765955dSGiuseppe CAVALLARO #define STMMAC_DEFAULT_LPI_TIMER	1000
93d765955dSGiuseppe CAVALLARO static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
94d3757ba4SJoe Perches module_param(eee_timer, int, 0644);
95d765955dSGiuseppe CAVALLARO MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
96f5351ef7SGiuseppe CAVALLARO #define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
97d765955dSGiuseppe CAVALLARO 
9822d3efe5SPavel Machek /* By default the driver will use the ring mode to manage tx and rx descriptors,
9922d3efe5SPavel Machek  * but allow user to force to use the chain instead of the ring
1004a7d666aSGiuseppe CAVALLARO  */
1014a7d666aSGiuseppe CAVALLARO static unsigned int chain_mode;
102d3757ba4SJoe Perches module_param(chain_mode, int, 0444);
1034a7d666aSGiuseppe CAVALLARO MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
1044a7d666aSGiuseppe CAVALLARO 
1057ac6653aSJeff Kirsher static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
1067ac6653aSJeff Kirsher 
10750fb4f74SGiuseppe CAVALLARO #ifdef CONFIG_DEBUG_FS
108bfab27a1SGiuseppe CAVALLARO static int stmmac_init_fs(struct net_device *dev);
109466c5ac8SMathieu Olivari static void stmmac_exit_fs(struct net_device *dev);
110bfab27a1SGiuseppe CAVALLARO #endif
111bfab27a1SGiuseppe CAVALLARO 
1129125cdd1SGiuseppe CAVALLARO #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
1139125cdd1SGiuseppe CAVALLARO 
1147ac6653aSJeff Kirsher /**
1157ac6653aSJeff Kirsher  * stmmac_verify_args - verify the driver parameters.
116732fdf0eSGiuseppe CAVALLARO  * Description: it checks the driver parameters and set a default in case of
117732fdf0eSGiuseppe CAVALLARO  * errors.
1187ac6653aSJeff Kirsher  */
1197ac6653aSJeff Kirsher static void stmmac_verify_args(void)
1207ac6653aSJeff Kirsher {
1217ac6653aSJeff Kirsher 	if (unlikely(watchdog < 0))
1227ac6653aSJeff Kirsher 		watchdog = TX_TIMEO;
123d916701cSGiuseppe CAVALLARO 	if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
124d916701cSGiuseppe CAVALLARO 		buf_sz = DEFAULT_BUFSIZE;
1257ac6653aSJeff Kirsher 	if (unlikely(flow_ctrl > 1))
1267ac6653aSJeff Kirsher 		flow_ctrl = FLOW_AUTO;
1277ac6653aSJeff Kirsher 	else if (likely(flow_ctrl < 0))
1287ac6653aSJeff Kirsher 		flow_ctrl = FLOW_OFF;
1297ac6653aSJeff Kirsher 	if (unlikely((pause < 0) || (pause > 0xffff)))
1307ac6653aSJeff Kirsher 		pause = PAUSE_TIME;
131d765955dSGiuseppe CAVALLARO 	if (eee_timer < 0)
132d765955dSGiuseppe CAVALLARO 		eee_timer = STMMAC_DEFAULT_LPI_TIMER;
1337ac6653aSJeff Kirsher }
1347ac6653aSJeff Kirsher 
13532ceabcaSGiuseppe CAVALLARO /**
136c22a3f48SJoao Pinto  * stmmac_disable_all_queues - Disable all queues
137c22a3f48SJoao Pinto  * @priv: driver private structure
138c22a3f48SJoao Pinto  */
139c22a3f48SJoao Pinto static void stmmac_disable_all_queues(struct stmmac_priv *priv)
140c22a3f48SJoao Pinto {
141c22a3f48SJoao Pinto 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
1428fce3331SJose Abreu 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
1438fce3331SJose Abreu 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
144c22a3f48SJoao Pinto 	u32 queue;
145c22a3f48SJoao Pinto 
1468fce3331SJose Abreu 	for (queue = 0; queue < maxq; queue++) {
1478fce3331SJose Abreu 		struct stmmac_channel *ch = &priv->channel[queue];
148c22a3f48SJoao Pinto 
1494ccb4585SJose Abreu 		if (queue < rx_queues_cnt)
1504ccb4585SJose Abreu 			napi_disable(&ch->rx_napi);
1514ccb4585SJose Abreu 		if (queue < tx_queues_cnt)
1524ccb4585SJose Abreu 			napi_disable(&ch->tx_napi);
153c22a3f48SJoao Pinto 	}
154c22a3f48SJoao Pinto }
155c22a3f48SJoao Pinto 
156c22a3f48SJoao Pinto /**
157c22a3f48SJoao Pinto  * stmmac_enable_all_queues - Enable all queues
158c22a3f48SJoao Pinto  * @priv: driver private structure
159c22a3f48SJoao Pinto  */
160c22a3f48SJoao Pinto static void stmmac_enable_all_queues(struct stmmac_priv *priv)
161c22a3f48SJoao Pinto {
162c22a3f48SJoao Pinto 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
1638fce3331SJose Abreu 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
1648fce3331SJose Abreu 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
165c22a3f48SJoao Pinto 	u32 queue;
166c22a3f48SJoao Pinto 
1678fce3331SJose Abreu 	for (queue = 0; queue < maxq; queue++) {
1688fce3331SJose Abreu 		struct stmmac_channel *ch = &priv->channel[queue];
169c22a3f48SJoao Pinto 
1704ccb4585SJose Abreu 		if (queue < rx_queues_cnt)
1714ccb4585SJose Abreu 			napi_enable(&ch->rx_napi);
1724ccb4585SJose Abreu 		if (queue < tx_queues_cnt)
1734ccb4585SJose Abreu 			napi_enable(&ch->tx_napi);
174c22a3f48SJoao Pinto 	}
175c22a3f48SJoao Pinto }
176c22a3f48SJoao Pinto 
177c22a3f48SJoao Pinto /**
178c22a3f48SJoao Pinto  * stmmac_stop_all_queues - Stop all queues
179c22a3f48SJoao Pinto  * @priv: driver private structure
180c22a3f48SJoao Pinto  */
181c22a3f48SJoao Pinto static void stmmac_stop_all_queues(struct stmmac_priv *priv)
182c22a3f48SJoao Pinto {
183c22a3f48SJoao Pinto 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
184c22a3f48SJoao Pinto 	u32 queue;
185c22a3f48SJoao Pinto 
186c22a3f48SJoao Pinto 	for (queue = 0; queue < tx_queues_cnt; queue++)
187c22a3f48SJoao Pinto 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
188c22a3f48SJoao Pinto }
189c22a3f48SJoao Pinto 
190c22a3f48SJoao Pinto /**
191c22a3f48SJoao Pinto  * stmmac_start_all_queues - Start all queues
192c22a3f48SJoao Pinto  * @priv: driver private structure
193c22a3f48SJoao Pinto  */
194c22a3f48SJoao Pinto static void stmmac_start_all_queues(struct stmmac_priv *priv)
195c22a3f48SJoao Pinto {
196c22a3f48SJoao Pinto 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
197c22a3f48SJoao Pinto 	u32 queue;
198c22a3f48SJoao Pinto 
199c22a3f48SJoao Pinto 	for (queue = 0; queue < tx_queues_cnt; queue++)
200c22a3f48SJoao Pinto 		netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue));
201c22a3f48SJoao Pinto }
202c22a3f48SJoao Pinto 
20334877a15SJose Abreu static void stmmac_service_event_schedule(struct stmmac_priv *priv)
20434877a15SJose Abreu {
20534877a15SJose Abreu 	if (!test_bit(STMMAC_DOWN, &priv->state) &&
20634877a15SJose Abreu 	    !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
20734877a15SJose Abreu 		queue_work(priv->wq, &priv->service_task);
20834877a15SJose Abreu }
20934877a15SJose Abreu 
21034877a15SJose Abreu static void stmmac_global_err(struct stmmac_priv *priv)
21134877a15SJose Abreu {
21234877a15SJose Abreu 	netif_carrier_off(priv->dev);
21334877a15SJose Abreu 	set_bit(STMMAC_RESET_REQUESTED, &priv->state);
21434877a15SJose Abreu 	stmmac_service_event_schedule(priv);
21534877a15SJose Abreu }
21634877a15SJose Abreu 
217c22a3f48SJoao Pinto /**
21832ceabcaSGiuseppe CAVALLARO  * stmmac_clk_csr_set - dynamically set the MDC clock
21932ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
22032ceabcaSGiuseppe CAVALLARO  * Description: this is to dynamically set the MDC clock according to the csr
22132ceabcaSGiuseppe CAVALLARO  * clock input.
22232ceabcaSGiuseppe CAVALLARO  * Note:
22332ceabcaSGiuseppe CAVALLARO  *	If a specific clk_csr value is passed from the platform
22432ceabcaSGiuseppe CAVALLARO  *	this means that the CSR Clock Range selection cannot be
22532ceabcaSGiuseppe CAVALLARO  *	changed at run-time and it is fixed (as reported in the driver
22632ceabcaSGiuseppe CAVALLARO  *	documentation). Viceversa the driver will try to set the MDC
22732ceabcaSGiuseppe CAVALLARO  *	clock dynamically according to the actual clock input.
22832ceabcaSGiuseppe CAVALLARO  */
229cd7201f4SGiuseppe CAVALLARO static void stmmac_clk_csr_set(struct stmmac_priv *priv)
230cd7201f4SGiuseppe CAVALLARO {
231cd7201f4SGiuseppe CAVALLARO 	u32 clk_rate;
232cd7201f4SGiuseppe CAVALLARO 
233f573c0b9Sjpinto 	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
234cd7201f4SGiuseppe CAVALLARO 
235cd7201f4SGiuseppe CAVALLARO 	/* Platform provided default clk_csr would be assumed valid
236ceb69499SGiuseppe CAVALLARO 	 * for all other cases except for the below mentioned ones.
237ceb69499SGiuseppe CAVALLARO 	 * For values higher than the IEEE 802.3 specified frequency
238ceb69499SGiuseppe CAVALLARO 	 * we can not estimate the proper divider as it is not known
239ceb69499SGiuseppe CAVALLARO 	 * the frequency of clk_csr_i. So we do not change the default
240ceb69499SGiuseppe CAVALLARO 	 * divider.
241ceb69499SGiuseppe CAVALLARO 	 */
242cd7201f4SGiuseppe CAVALLARO 	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
243cd7201f4SGiuseppe CAVALLARO 		if (clk_rate < CSR_F_35M)
244cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_20_35M;
245cd7201f4SGiuseppe CAVALLARO 		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
246cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_35_60M;
247cd7201f4SGiuseppe CAVALLARO 		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
248cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_60_100M;
249cd7201f4SGiuseppe CAVALLARO 		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
250cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_100_150M;
251cd7201f4SGiuseppe CAVALLARO 		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
252cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_150_250M;
25319d857c9SPhil Reid 		else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
254cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_250_300M;
255ceb69499SGiuseppe CAVALLARO 	}
2569f93ac8dSLABBE Corentin 
2579f93ac8dSLABBE Corentin 	if (priv->plat->has_sun8i) {
2589f93ac8dSLABBE Corentin 		if (clk_rate > 160000000)
2599f93ac8dSLABBE Corentin 			priv->clk_csr = 0x03;
2609f93ac8dSLABBE Corentin 		else if (clk_rate > 80000000)
2619f93ac8dSLABBE Corentin 			priv->clk_csr = 0x02;
2629f93ac8dSLABBE Corentin 		else if (clk_rate > 40000000)
2639f93ac8dSLABBE Corentin 			priv->clk_csr = 0x01;
2649f93ac8dSLABBE Corentin 		else
2659f93ac8dSLABBE Corentin 			priv->clk_csr = 0;
2669f93ac8dSLABBE Corentin 	}
2677d9e6c5aSJose Abreu 
2687d9e6c5aSJose Abreu 	if (priv->plat->has_xgmac) {
2697d9e6c5aSJose Abreu 		if (clk_rate > 400000000)
2707d9e6c5aSJose Abreu 			priv->clk_csr = 0x5;
2717d9e6c5aSJose Abreu 		else if (clk_rate > 350000000)
2727d9e6c5aSJose Abreu 			priv->clk_csr = 0x4;
2737d9e6c5aSJose Abreu 		else if (clk_rate > 300000000)
2747d9e6c5aSJose Abreu 			priv->clk_csr = 0x3;
2757d9e6c5aSJose Abreu 		else if (clk_rate > 250000000)
2767d9e6c5aSJose Abreu 			priv->clk_csr = 0x2;
2777d9e6c5aSJose Abreu 		else if (clk_rate > 150000000)
2787d9e6c5aSJose Abreu 			priv->clk_csr = 0x1;
2797d9e6c5aSJose Abreu 		else
2807d9e6c5aSJose Abreu 			priv->clk_csr = 0x0;
2817d9e6c5aSJose Abreu 	}
282cd7201f4SGiuseppe CAVALLARO }
283cd7201f4SGiuseppe CAVALLARO 
2847ac6653aSJeff Kirsher static void print_pkt(unsigned char *buf, int len)
2857ac6653aSJeff Kirsher {
286424c4f78SAndy Shevchenko 	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
287424c4f78SAndy Shevchenko 	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
2887ac6653aSJeff Kirsher }
2897ac6653aSJeff Kirsher 
290ce736788SJoao Pinto static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
2917ac6653aSJeff Kirsher {
292ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
293a6a3e026SLABBE Corentin 	u32 avail;
294e3ad57c9SGiuseppe Cavallaro 
295ce736788SJoao Pinto 	if (tx_q->dirty_tx > tx_q->cur_tx)
296ce736788SJoao Pinto 		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
297e3ad57c9SGiuseppe Cavallaro 	else
298ce736788SJoao Pinto 		avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1;
299e3ad57c9SGiuseppe Cavallaro 
300e3ad57c9SGiuseppe Cavallaro 	return avail;
301e3ad57c9SGiuseppe Cavallaro }
302e3ad57c9SGiuseppe Cavallaro 
30354139cf3SJoao Pinto /**
30454139cf3SJoao Pinto  * stmmac_rx_dirty - Get RX queue dirty
30554139cf3SJoao Pinto  * @priv: driver private structure
30654139cf3SJoao Pinto  * @queue: RX queue index
30754139cf3SJoao Pinto  */
30854139cf3SJoao Pinto static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
309e3ad57c9SGiuseppe Cavallaro {
31054139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
311a6a3e026SLABBE Corentin 	u32 dirty;
312e3ad57c9SGiuseppe Cavallaro 
31354139cf3SJoao Pinto 	if (rx_q->dirty_rx <= rx_q->cur_rx)
31454139cf3SJoao Pinto 		dirty = rx_q->cur_rx - rx_q->dirty_rx;
315e3ad57c9SGiuseppe Cavallaro 	else
31654139cf3SJoao Pinto 		dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx;
317e3ad57c9SGiuseppe Cavallaro 
318e3ad57c9SGiuseppe Cavallaro 	return dirty;
3197ac6653aSJeff Kirsher }
3207ac6653aSJeff Kirsher 
32132ceabcaSGiuseppe CAVALLARO /**
322732fdf0eSGiuseppe CAVALLARO  * stmmac_enable_eee_mode - check and enter in LPI mode
32332ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
324732fdf0eSGiuseppe CAVALLARO  * Description: this function is to verify and enter in LPI mode in case of
325732fdf0eSGiuseppe CAVALLARO  * EEE.
32632ceabcaSGiuseppe CAVALLARO  */
327d765955dSGiuseppe CAVALLARO static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
328d765955dSGiuseppe CAVALLARO {
329ce736788SJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
330ce736788SJoao Pinto 	u32 queue;
331ce736788SJoao Pinto 
332ce736788SJoao Pinto 	/* check if all TX queues have the work finished */
333ce736788SJoao Pinto 	for (queue = 0; queue < tx_cnt; queue++) {
334ce736788SJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
335ce736788SJoao Pinto 
336ce736788SJoao Pinto 		if (tx_q->dirty_tx != tx_q->cur_tx)
337ce736788SJoao Pinto 			return; /* still unfinished work */
338ce736788SJoao Pinto 	}
339ce736788SJoao Pinto 
340d765955dSGiuseppe CAVALLARO 	/* Check and enter in LPI mode */
341ce736788SJoao Pinto 	if (!priv->tx_path_in_lpi_mode)
342c10d4c82SJose Abreu 		stmmac_set_eee_mode(priv, priv->hw,
343b4b7b772Sjpinto 				priv->plat->en_tx_lpi_clockgating);
344d765955dSGiuseppe CAVALLARO }
345d765955dSGiuseppe CAVALLARO 
34632ceabcaSGiuseppe CAVALLARO /**
347732fdf0eSGiuseppe CAVALLARO  * stmmac_disable_eee_mode - disable and exit from LPI mode
34832ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
34932ceabcaSGiuseppe CAVALLARO  * Description: this function is to exit and disable EEE in case of
35032ceabcaSGiuseppe CAVALLARO  * LPI state is true. This is called by the xmit.
35132ceabcaSGiuseppe CAVALLARO  */
352d765955dSGiuseppe CAVALLARO void stmmac_disable_eee_mode(struct stmmac_priv *priv)
353d765955dSGiuseppe CAVALLARO {
354c10d4c82SJose Abreu 	stmmac_reset_eee_mode(priv, priv->hw);
355d765955dSGiuseppe CAVALLARO 	del_timer_sync(&priv->eee_ctrl_timer);
356d765955dSGiuseppe CAVALLARO 	priv->tx_path_in_lpi_mode = false;
357d765955dSGiuseppe CAVALLARO }
358d765955dSGiuseppe CAVALLARO 
359d765955dSGiuseppe CAVALLARO /**
360732fdf0eSGiuseppe CAVALLARO  * stmmac_eee_ctrl_timer - EEE TX SW timer.
361d765955dSGiuseppe CAVALLARO  * @arg : data hook
362d765955dSGiuseppe CAVALLARO  * Description:
36332ceabcaSGiuseppe CAVALLARO  *  if there is no data transfer and if we are not in LPI state,
364d765955dSGiuseppe CAVALLARO  *  then MAC Transmitter can be moved to LPI state.
365d765955dSGiuseppe CAVALLARO  */
366e99e88a9SKees Cook static void stmmac_eee_ctrl_timer(struct timer_list *t)
367d765955dSGiuseppe CAVALLARO {
368e99e88a9SKees Cook 	struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
369d765955dSGiuseppe CAVALLARO 
370d765955dSGiuseppe CAVALLARO 	stmmac_enable_eee_mode(priv);
371f5351ef7SGiuseppe CAVALLARO 	mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
372d765955dSGiuseppe CAVALLARO }
373d765955dSGiuseppe CAVALLARO 
374d765955dSGiuseppe CAVALLARO /**
375732fdf0eSGiuseppe CAVALLARO  * stmmac_eee_init - init EEE
37632ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
377d765955dSGiuseppe CAVALLARO  * Description:
378732fdf0eSGiuseppe CAVALLARO  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
379732fdf0eSGiuseppe CAVALLARO  *  can also manage EEE, this function enable the LPI state and start related
380732fdf0eSGiuseppe CAVALLARO  *  timer.
381d765955dSGiuseppe CAVALLARO  */
382d765955dSGiuseppe CAVALLARO bool stmmac_eee_init(struct stmmac_priv *priv)
383d765955dSGiuseppe CAVALLARO {
38474371272SJose Abreu 	int tx_lpi_timer = priv->tx_lpi_timer;
385879626e3SJerome Brunet 
386f5351ef7SGiuseppe CAVALLARO 	/* Using PCS we cannot dial with the phy registers at this stage
387f5351ef7SGiuseppe CAVALLARO 	 * so we do not support extra feature like EEE.
388f5351ef7SGiuseppe CAVALLARO 	 */
3893fe5cadbSGiuseppe CAVALLARO 	if ((priv->hw->pcs == STMMAC_PCS_RGMII) ||
3903fe5cadbSGiuseppe CAVALLARO 	    (priv->hw->pcs == STMMAC_PCS_TBI) ||
3913fe5cadbSGiuseppe CAVALLARO 	    (priv->hw->pcs == STMMAC_PCS_RTBI))
39274371272SJose Abreu 		return false;
393f5351ef7SGiuseppe CAVALLARO 
39474371272SJose Abreu 	/* Check if MAC core supports the EEE feature. */
39574371272SJose Abreu 	if (!priv->dma_cap.eee)
39674371272SJose Abreu 		return false;
397d765955dSGiuseppe CAVALLARO 
39829555fa3SThierry Reding 	mutex_lock(&priv->lock);
39974371272SJose Abreu 
40074371272SJose Abreu 	/* Check if it needs to be deactivated */
401177d935aSJon Hunter 	if (!priv->eee_active) {
402177d935aSJon Hunter 		if (priv->eee_enabled) {
40338ddc59dSLABBE Corentin 			netdev_dbg(priv->dev, "disable EEE\n");
40483bf79b6SGiuseppe CAVALLARO 			del_timer_sync(&priv->eee_ctrl_timer);
40574371272SJose Abreu 			stmmac_set_eee_timer(priv, priv->hw, 0, tx_lpi_timer);
406177d935aSJon Hunter 		}
4070867bb97SJon Hunter 		mutex_unlock(&priv->lock);
40874371272SJose Abreu 		return false;
40974371272SJose Abreu 	}
41074371272SJose Abreu 
41174371272SJose Abreu 	if (priv->eee_active && !priv->eee_enabled) {
41274371272SJose Abreu 		timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
41374371272SJose Abreu 		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
41474371272SJose Abreu 		stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
41583bf79b6SGiuseppe CAVALLARO 				     tx_lpi_timer);
41683bf79b6SGiuseppe CAVALLARO 	}
41774371272SJose Abreu 
41829555fa3SThierry Reding 	mutex_unlock(&priv->lock);
41938ddc59dSLABBE Corentin 	netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
42074371272SJose Abreu 	return true;
421d765955dSGiuseppe CAVALLARO }
422d765955dSGiuseppe CAVALLARO 
423732fdf0eSGiuseppe CAVALLARO /* stmmac_get_tx_hwtstamp - get HW TX timestamps
42432ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
425ba1ffd74SGiuseppe CAVALLARO  * @p : descriptor pointer
426891434b1SRayagond Kokatanur  * @skb : the socket buffer
427891434b1SRayagond Kokatanur  * Description :
428891434b1SRayagond Kokatanur  * This function will read timestamp from the descriptor & pass it to stack.
429891434b1SRayagond Kokatanur  * and also perform some sanity checks.
430891434b1SRayagond Kokatanur  */
431891434b1SRayagond Kokatanur static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
432ba1ffd74SGiuseppe CAVALLARO 				   struct dma_desc *p, struct sk_buff *skb)
433891434b1SRayagond Kokatanur {
434891434b1SRayagond Kokatanur 	struct skb_shared_hwtstamps shhwtstamp;
435df103170SNathan Chancellor 	u64 ns = 0;
436891434b1SRayagond Kokatanur 
437891434b1SRayagond Kokatanur 	if (!priv->hwts_tx_en)
438891434b1SRayagond Kokatanur 		return;
439891434b1SRayagond Kokatanur 
440ceb69499SGiuseppe CAVALLARO 	/* exit if skb doesn't support hw tstamp */
44175e4364fSdamuzi000 	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
442891434b1SRayagond Kokatanur 		return;
443891434b1SRayagond Kokatanur 
444891434b1SRayagond Kokatanur 	/* check tx tstamp status */
44542de047dSJose Abreu 	if (stmmac_get_tx_timestamp_status(priv, p)) {
446891434b1SRayagond Kokatanur 		/* get the valid tstamp */
44742de047dSJose Abreu 		stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
448891434b1SRayagond Kokatanur 
449891434b1SRayagond Kokatanur 		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
450891434b1SRayagond Kokatanur 		shhwtstamp.hwtstamp = ns_to_ktime(ns);
451ba1ffd74SGiuseppe CAVALLARO 
45233d4c482SMario Molitor 		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
453891434b1SRayagond Kokatanur 		/* pass tstamp to stack */
454891434b1SRayagond Kokatanur 		skb_tstamp_tx(skb, &shhwtstamp);
455ba1ffd74SGiuseppe CAVALLARO 	}
456891434b1SRayagond Kokatanur 
457891434b1SRayagond Kokatanur 	return;
458891434b1SRayagond Kokatanur }
459891434b1SRayagond Kokatanur 
460732fdf0eSGiuseppe CAVALLARO /* stmmac_get_rx_hwtstamp - get HW RX timestamps
46132ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
462ba1ffd74SGiuseppe CAVALLARO  * @p : descriptor pointer
463ba1ffd74SGiuseppe CAVALLARO  * @np : next descriptor pointer
464891434b1SRayagond Kokatanur  * @skb : the socket buffer
465891434b1SRayagond Kokatanur  * Description :
466891434b1SRayagond Kokatanur  * This function will read received packet's timestamp from the descriptor
467891434b1SRayagond Kokatanur  * and pass it to stack. It also perform some sanity checks.
468891434b1SRayagond Kokatanur  */
469ba1ffd74SGiuseppe CAVALLARO static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
470ba1ffd74SGiuseppe CAVALLARO 				   struct dma_desc *np, struct sk_buff *skb)
471891434b1SRayagond Kokatanur {
472891434b1SRayagond Kokatanur 	struct skb_shared_hwtstamps *shhwtstamp = NULL;
47398870943SJose Abreu 	struct dma_desc *desc = p;
474df103170SNathan Chancellor 	u64 ns = 0;
475891434b1SRayagond Kokatanur 
476891434b1SRayagond Kokatanur 	if (!priv->hwts_rx_en)
477891434b1SRayagond Kokatanur 		return;
478ba1ffd74SGiuseppe CAVALLARO 	/* For GMAC4, the valid timestamp is from CTX next desc. */
4797d9e6c5aSJose Abreu 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
48098870943SJose Abreu 		desc = np;
481891434b1SRayagond Kokatanur 
48298870943SJose Abreu 	/* Check if timestamp is available */
48342de047dSJose Abreu 	if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
48442de047dSJose Abreu 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
48533d4c482SMario Molitor 		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
486891434b1SRayagond Kokatanur 		shhwtstamp = skb_hwtstamps(skb);
487891434b1SRayagond Kokatanur 		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
488891434b1SRayagond Kokatanur 		shhwtstamp->hwtstamp = ns_to_ktime(ns);
489ba1ffd74SGiuseppe CAVALLARO 	} else  {
49033d4c482SMario Molitor 		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
491ba1ffd74SGiuseppe CAVALLARO 	}
492891434b1SRayagond Kokatanur }
493891434b1SRayagond Kokatanur 
494891434b1SRayagond Kokatanur /**
495d6228b7cSArtem Panfilov  *  stmmac_hwtstamp_set - control hardware timestamping.
496891434b1SRayagond Kokatanur  *  @dev: device pointer.
4978d45e42bSLABBE Corentin  *  @ifr: An IOCTL specific structure, that can contain a pointer to
498891434b1SRayagond Kokatanur  *  a proprietary structure used to pass information to the driver.
499891434b1SRayagond Kokatanur  *  Description:
500891434b1SRayagond Kokatanur  *  This function configures the MAC to enable/disable both outgoing(TX)
501891434b1SRayagond Kokatanur  *  and incoming(RX) packets time stamping based on user input.
502891434b1SRayagond Kokatanur  *  Return Value:
503891434b1SRayagond Kokatanur  *  0 on success and an appropriate -ve integer on failure.
504891434b1SRayagond Kokatanur  */
505d6228b7cSArtem Panfilov static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
506891434b1SRayagond Kokatanur {
507891434b1SRayagond Kokatanur 	struct stmmac_priv *priv = netdev_priv(dev);
508891434b1SRayagond Kokatanur 	struct hwtstamp_config config;
5090a624155SArnd Bergmann 	struct timespec64 now;
510891434b1SRayagond Kokatanur 	u64 temp = 0;
511891434b1SRayagond Kokatanur 	u32 ptp_v2 = 0;
512891434b1SRayagond Kokatanur 	u32 tstamp_all = 0;
513891434b1SRayagond Kokatanur 	u32 ptp_over_ipv4_udp = 0;
514891434b1SRayagond Kokatanur 	u32 ptp_over_ipv6_udp = 0;
515891434b1SRayagond Kokatanur 	u32 ptp_over_ethernet = 0;
516891434b1SRayagond Kokatanur 	u32 snap_type_sel = 0;
517891434b1SRayagond Kokatanur 	u32 ts_master_en = 0;
518891434b1SRayagond Kokatanur 	u32 ts_event_en = 0;
519df103170SNathan Chancellor 	u32 sec_inc = 0;
520891434b1SRayagond Kokatanur 	u32 value = 0;
5217d9e6c5aSJose Abreu 	bool xmac;
5227d9e6c5aSJose Abreu 
5237d9e6c5aSJose Abreu 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
524891434b1SRayagond Kokatanur 
525891434b1SRayagond Kokatanur 	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
526891434b1SRayagond Kokatanur 		netdev_alert(priv->dev, "No support for HW time stamping\n");
527891434b1SRayagond Kokatanur 		priv->hwts_tx_en = 0;
528891434b1SRayagond Kokatanur 		priv->hwts_rx_en = 0;
529891434b1SRayagond Kokatanur 
530891434b1SRayagond Kokatanur 		return -EOPNOTSUPP;
531891434b1SRayagond Kokatanur 	}
532891434b1SRayagond Kokatanur 
533891434b1SRayagond Kokatanur 	if (copy_from_user(&config, ifr->ifr_data,
534d6228b7cSArtem Panfilov 			   sizeof(config)))
535891434b1SRayagond Kokatanur 		return -EFAULT;
536891434b1SRayagond Kokatanur 
53738ddc59dSLABBE Corentin 	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
538891434b1SRayagond Kokatanur 		   __func__, config.flags, config.tx_type, config.rx_filter);
539891434b1SRayagond Kokatanur 
540891434b1SRayagond Kokatanur 	/* reserved for future extensions */
541891434b1SRayagond Kokatanur 	if (config.flags)
542891434b1SRayagond Kokatanur 		return -EINVAL;
543891434b1SRayagond Kokatanur 
5445f3da328SBen Hutchings 	if (config.tx_type != HWTSTAMP_TX_OFF &&
5455f3da328SBen Hutchings 	    config.tx_type != HWTSTAMP_TX_ON)
546891434b1SRayagond Kokatanur 		return -ERANGE;
547891434b1SRayagond Kokatanur 
548891434b1SRayagond Kokatanur 	if (priv->adv_ts) {
549891434b1SRayagond Kokatanur 		switch (config.rx_filter) {
550891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_NONE:
551ceb69499SGiuseppe CAVALLARO 			/* time stamp no incoming packet at all */
552891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_NONE;
553891434b1SRayagond Kokatanur 			break;
554891434b1SRayagond Kokatanur 
555891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
556ceb69499SGiuseppe CAVALLARO 			/* PTP v1, UDP, any kind of event packet */
557891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
5587d8e249fSIlias Apalodimas 			/* 'xmac' hardware can support Sync, Pdelay_Req and
5597d8e249fSIlias Apalodimas 			 * Pdelay_resp by setting bit14 and bits17/16 to 01
5607d8e249fSIlias Apalodimas 			 * This leaves Delay_Req timestamps out.
5617d8e249fSIlias Apalodimas 			 * Enable all events *and* general purpose message
5627d8e249fSIlias Apalodimas 			 * timestamping
5637d8e249fSIlias Apalodimas 			 */
564891434b1SRayagond Kokatanur 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
565891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
566891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
567891434b1SRayagond Kokatanur 			break;
568891434b1SRayagond Kokatanur 
569891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
570ceb69499SGiuseppe CAVALLARO 			/* PTP v1, UDP, Sync packet */
571891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
572891434b1SRayagond Kokatanur 			/* take time stamp for SYNC messages only */
573891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
574891434b1SRayagond Kokatanur 
575891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
576891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
577891434b1SRayagond Kokatanur 			break;
578891434b1SRayagond Kokatanur 
579891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
580ceb69499SGiuseppe CAVALLARO 			/* PTP v1, UDP, Delay_req packet */
581891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
582891434b1SRayagond Kokatanur 			/* take time stamp for Delay_Req messages only */
583891434b1SRayagond Kokatanur 			ts_master_en = PTP_TCR_TSMSTRENA;
584891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
585891434b1SRayagond Kokatanur 
586891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
587891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
588891434b1SRayagond Kokatanur 			break;
589891434b1SRayagond Kokatanur 
590891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
591ceb69499SGiuseppe CAVALLARO 			/* PTP v2, UDP, any kind of event packet */
592891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
593891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
594891434b1SRayagond Kokatanur 			/* take time stamp for all event messages */
595891434b1SRayagond Kokatanur 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
596891434b1SRayagond Kokatanur 
597891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
598891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
599891434b1SRayagond Kokatanur 			break;
600891434b1SRayagond Kokatanur 
601891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
602ceb69499SGiuseppe CAVALLARO 			/* PTP v2, UDP, Sync packet */
603891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
604891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
605891434b1SRayagond Kokatanur 			/* take time stamp for SYNC messages only */
606891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
607891434b1SRayagond Kokatanur 
608891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
609891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
610891434b1SRayagond Kokatanur 			break;
611891434b1SRayagond Kokatanur 
612891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
613ceb69499SGiuseppe CAVALLARO 			/* PTP v2, UDP, Delay_req packet */
614891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
615891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
616891434b1SRayagond Kokatanur 			/* take time stamp for Delay_Req messages only */
617891434b1SRayagond Kokatanur 			ts_master_en = PTP_TCR_TSMSTRENA;
618891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
619891434b1SRayagond Kokatanur 
620891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
621891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
622891434b1SRayagond Kokatanur 			break;
623891434b1SRayagond Kokatanur 
624891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_EVENT:
625ceb69499SGiuseppe CAVALLARO 			/* PTP v2/802.AS1 any layer, any kind of event packet */
626891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
627891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
628891434b1SRayagond Kokatanur 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
629891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
630891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
631891434b1SRayagond Kokatanur 			ptp_over_ethernet = PTP_TCR_TSIPENA;
632891434b1SRayagond Kokatanur 			break;
633891434b1SRayagond Kokatanur 
634891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_SYNC:
635ceb69499SGiuseppe CAVALLARO 			/* PTP v2/802.AS1, any layer, Sync packet */
636891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
637891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
638891434b1SRayagond Kokatanur 			/* take time stamp for SYNC messages only */
639891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
640891434b1SRayagond Kokatanur 
641891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
642891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
643891434b1SRayagond Kokatanur 			ptp_over_ethernet = PTP_TCR_TSIPENA;
644891434b1SRayagond Kokatanur 			break;
645891434b1SRayagond Kokatanur 
646891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
647ceb69499SGiuseppe CAVALLARO 			/* PTP v2/802.AS1, any layer, Delay_req packet */
648891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
649891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
650891434b1SRayagond Kokatanur 			/* take time stamp for Delay_Req messages only */
651891434b1SRayagond Kokatanur 			ts_master_en = PTP_TCR_TSMSTRENA;
652891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
653891434b1SRayagond Kokatanur 
654891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
655891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
656891434b1SRayagond Kokatanur 			ptp_over_ethernet = PTP_TCR_TSIPENA;
657891434b1SRayagond Kokatanur 			break;
658891434b1SRayagond Kokatanur 
659e3412575SMiroslav Lichvar 		case HWTSTAMP_FILTER_NTP_ALL:
660891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_ALL:
661ceb69499SGiuseppe CAVALLARO 			/* time stamp any incoming packet */
662891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_ALL;
663891434b1SRayagond Kokatanur 			tstamp_all = PTP_TCR_TSENALL;
664891434b1SRayagond Kokatanur 			break;
665891434b1SRayagond Kokatanur 
666891434b1SRayagond Kokatanur 		default:
667891434b1SRayagond Kokatanur 			return -ERANGE;
668891434b1SRayagond Kokatanur 		}
669891434b1SRayagond Kokatanur 	} else {
670891434b1SRayagond Kokatanur 		switch (config.rx_filter) {
671891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_NONE:
672891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_NONE;
673891434b1SRayagond Kokatanur 			break;
674891434b1SRayagond Kokatanur 		default:
675891434b1SRayagond Kokatanur 			/* PTP v1, UDP, any kind of event packet */
676891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
677891434b1SRayagond Kokatanur 			break;
678891434b1SRayagond Kokatanur 		}
679891434b1SRayagond Kokatanur 	}
680891434b1SRayagond Kokatanur 	priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
6815f3da328SBen Hutchings 	priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
682891434b1SRayagond Kokatanur 
683891434b1SRayagond Kokatanur 	if (!priv->hwts_tx_en && !priv->hwts_rx_en)
684cc4c9001SJose Abreu 		stmmac_config_hw_tstamping(priv, priv->ptpaddr, 0);
685891434b1SRayagond Kokatanur 	else {
686891434b1SRayagond Kokatanur 		value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
687891434b1SRayagond Kokatanur 			 tstamp_all | ptp_v2 | ptp_over_ethernet |
688891434b1SRayagond Kokatanur 			 ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
689891434b1SRayagond Kokatanur 			 ts_master_en | snap_type_sel);
690cc4c9001SJose Abreu 		stmmac_config_hw_tstamping(priv, priv->ptpaddr, value);
691891434b1SRayagond Kokatanur 
692891434b1SRayagond Kokatanur 		/* program Sub Second Increment reg */
693cc4c9001SJose Abreu 		stmmac_config_sub_second_increment(priv,
694f573c0b9Sjpinto 				priv->ptpaddr, priv->plat->clk_ptp_rate,
6957d9e6c5aSJose Abreu 				xmac, &sec_inc);
69619d857c9SPhil Reid 		temp = div_u64(1000000000ULL, sec_inc);
697891434b1SRayagond Kokatanur 
6989a8a02c9SJose Abreu 		/* Store sub second increment and flags for later use */
6999a8a02c9SJose Abreu 		priv->sub_second_inc = sec_inc;
7009a8a02c9SJose Abreu 		priv->systime_flags = value;
7019a8a02c9SJose Abreu 
702891434b1SRayagond Kokatanur 		/* calculate default added value:
703891434b1SRayagond Kokatanur 		 * formula is :
704891434b1SRayagond Kokatanur 		 * addend = (2^32)/freq_div_ratio;
70519d857c9SPhil Reid 		 * where, freq_div_ratio = 1e9ns/sec_inc
706891434b1SRayagond Kokatanur 		 */
70719d857c9SPhil Reid 		temp = (u64)(temp << 32);
708f573c0b9Sjpinto 		priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
709cc4c9001SJose Abreu 		stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
710891434b1SRayagond Kokatanur 
711891434b1SRayagond Kokatanur 		/* initialize system time */
7120a624155SArnd Bergmann 		ktime_get_real_ts64(&now);
7130a624155SArnd Bergmann 
7140a624155SArnd Bergmann 		/* lower 32 bits of tv_sec are safe until y2106 */
715cc4c9001SJose Abreu 		stmmac_init_systime(priv, priv->ptpaddr,
716cc4c9001SJose Abreu 				(u32)now.tv_sec, now.tv_nsec);
717891434b1SRayagond Kokatanur 	}
718891434b1SRayagond Kokatanur 
719d6228b7cSArtem Panfilov 	memcpy(&priv->tstamp_config, &config, sizeof(config));
720d6228b7cSArtem Panfilov 
721891434b1SRayagond Kokatanur 	return copy_to_user(ifr->ifr_data, &config,
722d6228b7cSArtem Panfilov 			    sizeof(config)) ? -EFAULT : 0;
723d6228b7cSArtem Panfilov }
724d6228b7cSArtem Panfilov 
725d6228b7cSArtem Panfilov /**
726d6228b7cSArtem Panfilov  *  stmmac_hwtstamp_get - read hardware timestamping.
727d6228b7cSArtem Panfilov  *  @dev: device pointer.
728d6228b7cSArtem Panfilov  *  @ifr: An IOCTL specific structure, that can contain a pointer to
729d6228b7cSArtem Panfilov  *  a proprietary structure used to pass information to the driver.
730d6228b7cSArtem Panfilov  *  Description:
731d6228b7cSArtem Panfilov  *  This function obtain the current hardware timestamping settings
732d6228b7cSArtem Panfilov     as requested.
733d6228b7cSArtem Panfilov  */
734d6228b7cSArtem Panfilov static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
735d6228b7cSArtem Panfilov {
736d6228b7cSArtem Panfilov 	struct stmmac_priv *priv = netdev_priv(dev);
737d6228b7cSArtem Panfilov 	struct hwtstamp_config *config = &priv->tstamp_config;
738d6228b7cSArtem Panfilov 
739d6228b7cSArtem Panfilov 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
740d6228b7cSArtem Panfilov 		return -EOPNOTSUPP;
741d6228b7cSArtem Panfilov 
742d6228b7cSArtem Panfilov 	return copy_to_user(ifr->ifr_data, config,
743d6228b7cSArtem Panfilov 			    sizeof(*config)) ? -EFAULT : 0;
744891434b1SRayagond Kokatanur }
745891434b1SRayagond Kokatanur 
74632ceabcaSGiuseppe CAVALLARO /**
747732fdf0eSGiuseppe CAVALLARO  * stmmac_init_ptp - init PTP
74832ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
749732fdf0eSGiuseppe CAVALLARO  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
75032ceabcaSGiuseppe CAVALLARO  * This is done by looking at the HW cap. register.
751732fdf0eSGiuseppe CAVALLARO  * This function also registers the ptp driver.
75232ceabcaSGiuseppe CAVALLARO  */
75392ba6888SRayagond Kokatanur static int stmmac_init_ptp(struct stmmac_priv *priv)
754891434b1SRayagond Kokatanur {
7557d9e6c5aSJose Abreu 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
7567d9e6c5aSJose Abreu 
75792ba6888SRayagond Kokatanur 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
75892ba6888SRayagond Kokatanur 		return -EOPNOTSUPP;
75992ba6888SRayagond Kokatanur 
760891434b1SRayagond Kokatanur 	priv->adv_ts = 0;
7617d9e6c5aSJose Abreu 	/* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
7627d9e6c5aSJose Abreu 	if (xmac && priv->dma_cap.atime_stamp)
763be9b3174SGiuseppe CAVALLARO 		priv->adv_ts = 1;
764be9b3174SGiuseppe CAVALLARO 	/* Dwmac 3.x core with extend_desc can support adv_ts */
765be9b3174SGiuseppe CAVALLARO 	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
766891434b1SRayagond Kokatanur 		priv->adv_ts = 1;
7677cd01399SVince Bridgers 
768be9b3174SGiuseppe CAVALLARO 	if (priv->dma_cap.time_stamp)
769be9b3174SGiuseppe CAVALLARO 		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
7707cd01399SVince Bridgers 
771be9b3174SGiuseppe CAVALLARO 	if (priv->adv_ts)
772be9b3174SGiuseppe CAVALLARO 		netdev_info(priv->dev,
773be9b3174SGiuseppe CAVALLARO 			    "IEEE 1588-2008 Advanced Timestamp supported\n");
774891434b1SRayagond Kokatanur 
775891434b1SRayagond Kokatanur 	priv->hwts_tx_en = 0;
776891434b1SRayagond Kokatanur 	priv->hwts_rx_en = 0;
77792ba6888SRayagond Kokatanur 
778c30a70d3SGiuseppe CAVALLARO 	stmmac_ptp_register(priv);
779c30a70d3SGiuseppe CAVALLARO 
780c30a70d3SGiuseppe CAVALLARO 	return 0;
78192ba6888SRayagond Kokatanur }
78292ba6888SRayagond Kokatanur 
78392ba6888SRayagond Kokatanur static void stmmac_release_ptp(struct stmmac_priv *priv)
78492ba6888SRayagond Kokatanur {
785f573c0b9Sjpinto 	if (priv->plat->clk_ptp_ref)
786f573c0b9Sjpinto 		clk_disable_unprepare(priv->plat->clk_ptp_ref);
78792ba6888SRayagond Kokatanur 	stmmac_ptp_unregister(priv);
788891434b1SRayagond Kokatanur }
789891434b1SRayagond Kokatanur 
7907ac6653aSJeff Kirsher /**
79129feff39SJoao Pinto  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
79229feff39SJoao Pinto  *  @priv: driver private structure
79329feff39SJoao Pinto  *  Description: It is used for configuring the flow control in all queues
79429feff39SJoao Pinto  */
79529feff39SJoao Pinto static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
79629feff39SJoao Pinto {
79729feff39SJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
79829feff39SJoao Pinto 
799c10d4c82SJose Abreu 	stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
80029feff39SJoao Pinto 			priv->pause, tx_cnt);
80129feff39SJoao Pinto }
80229feff39SJoao Pinto 
803eeef2f6bSJose Abreu static void stmmac_validate(struct phylink_config *config,
804eeef2f6bSJose Abreu 			    unsigned long *supported,
805eeef2f6bSJose Abreu 			    struct phylink_link_state *state)
806eeef2f6bSJose Abreu {
807eeef2f6bSJose Abreu 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
8085b0d7d7dSJose Abreu 	__ETHTOOL_DECLARE_LINK_MODE_MASK(mac_supported) = { 0, };
809eeef2f6bSJose Abreu 	__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
810eeef2f6bSJose Abreu 	int tx_cnt = priv->plat->tx_queues_to_use;
811eeef2f6bSJose Abreu 	int max_speed = priv->plat->max_speed;
812eeef2f6bSJose Abreu 
8135b0d7d7dSJose Abreu 	phylink_set(mac_supported, 10baseT_Half);
8145b0d7d7dSJose Abreu 	phylink_set(mac_supported, 10baseT_Full);
8155b0d7d7dSJose Abreu 	phylink_set(mac_supported, 100baseT_Half);
8165b0d7d7dSJose Abreu 	phylink_set(mac_supported, 100baseT_Full);
817df7699c7SJose Abreu 	phylink_set(mac_supported, 1000baseT_Half);
818df7699c7SJose Abreu 	phylink_set(mac_supported, 1000baseT_Full);
819df7699c7SJose Abreu 	phylink_set(mac_supported, 1000baseKX_Full);
8205b0d7d7dSJose Abreu 
8215b0d7d7dSJose Abreu 	phylink_set(mac_supported, Autoneg);
8225b0d7d7dSJose Abreu 	phylink_set(mac_supported, Pause);
8235b0d7d7dSJose Abreu 	phylink_set(mac_supported, Asym_Pause);
8245b0d7d7dSJose Abreu 	phylink_set_port_modes(mac_supported);
8255b0d7d7dSJose Abreu 
826eeef2f6bSJose Abreu 	/* Cut down 1G if asked to */
827eeef2f6bSJose Abreu 	if ((max_speed > 0) && (max_speed < 1000)) {
828eeef2f6bSJose Abreu 		phylink_set(mask, 1000baseT_Full);
829eeef2f6bSJose Abreu 		phylink_set(mask, 1000baseX_Full);
8305b0d7d7dSJose Abreu 	} else if (priv->plat->has_xgmac) {
8315b0d7d7dSJose Abreu 		phylink_set(mac_supported, 2500baseT_Full);
8325b0d7d7dSJose Abreu 		phylink_set(mac_supported, 5000baseT_Full);
8335b0d7d7dSJose Abreu 		phylink_set(mac_supported, 10000baseSR_Full);
8345b0d7d7dSJose Abreu 		phylink_set(mac_supported, 10000baseLR_Full);
8355b0d7d7dSJose Abreu 		phylink_set(mac_supported, 10000baseER_Full);
8365b0d7d7dSJose Abreu 		phylink_set(mac_supported, 10000baseLRM_Full);
8375b0d7d7dSJose Abreu 		phylink_set(mac_supported, 10000baseT_Full);
8385b0d7d7dSJose Abreu 		phylink_set(mac_supported, 10000baseKX4_Full);
8395b0d7d7dSJose Abreu 		phylink_set(mac_supported, 10000baseKR_Full);
840eeef2f6bSJose Abreu 	}
841eeef2f6bSJose Abreu 
842eeef2f6bSJose Abreu 	/* Half-Duplex can only work with single queue */
843eeef2f6bSJose Abreu 	if (tx_cnt > 1) {
844eeef2f6bSJose Abreu 		phylink_set(mask, 10baseT_Half);
845eeef2f6bSJose Abreu 		phylink_set(mask, 100baseT_Half);
846eeef2f6bSJose Abreu 		phylink_set(mask, 1000baseT_Half);
847eeef2f6bSJose Abreu 	}
848eeef2f6bSJose Abreu 
8495b0d7d7dSJose Abreu 	bitmap_and(supported, supported, mac_supported,
8505b0d7d7dSJose Abreu 		   __ETHTOOL_LINK_MODE_MASK_NBITS);
8515b0d7d7dSJose Abreu 	bitmap_andnot(supported, supported, mask,
8525b0d7d7dSJose Abreu 		      __ETHTOOL_LINK_MODE_MASK_NBITS);
8535b0d7d7dSJose Abreu 	bitmap_and(state->advertising, state->advertising, mac_supported,
8545b0d7d7dSJose Abreu 		   __ETHTOOL_LINK_MODE_MASK_NBITS);
855eeef2f6bSJose Abreu 	bitmap_andnot(state->advertising, state->advertising, mask,
856eeef2f6bSJose Abreu 		      __ETHTOOL_LINK_MODE_MASK_NBITS);
857eeef2f6bSJose Abreu }
858eeef2f6bSJose Abreu 
859eeef2f6bSJose Abreu static int stmmac_mac_link_state(struct phylink_config *config,
860eeef2f6bSJose Abreu 				 struct phylink_link_state *state)
861eeef2f6bSJose Abreu {
862eeef2f6bSJose Abreu 	return -EOPNOTSUPP;
863eeef2f6bSJose Abreu }
864eeef2f6bSJose Abreu 
86574371272SJose Abreu static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
86674371272SJose Abreu 			      const struct phylink_link_state *state)
8679ad372fcSJose Abreu {
86874371272SJose Abreu 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
8699ad372fcSJose Abreu 	u32 ctrl;
8709ad372fcSJose Abreu 
8719ad372fcSJose Abreu 	ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
8729ad372fcSJose Abreu 	ctrl &= ~priv->hw->link.speed_mask;
8739ad372fcSJose Abreu 
8745b0d7d7dSJose Abreu 	if (state->interface == PHY_INTERFACE_MODE_USXGMII) {
87574371272SJose Abreu 		switch (state->speed) {
8765b0d7d7dSJose Abreu 		case SPEED_10000:
8775b0d7d7dSJose Abreu 			ctrl |= priv->hw->link.xgmii.speed10000;
8785b0d7d7dSJose Abreu 			break;
8795b0d7d7dSJose Abreu 		case SPEED_5000:
8805b0d7d7dSJose Abreu 			ctrl |= priv->hw->link.xgmii.speed5000;
8815b0d7d7dSJose Abreu 			break;
8825b0d7d7dSJose Abreu 		case SPEED_2500:
8835b0d7d7dSJose Abreu 			ctrl |= priv->hw->link.xgmii.speed2500;
8845b0d7d7dSJose Abreu 			break;
8855b0d7d7dSJose Abreu 		default:
8865b0d7d7dSJose Abreu 			return;
8875b0d7d7dSJose Abreu 		}
8885b0d7d7dSJose Abreu 	} else {
8895b0d7d7dSJose Abreu 		switch (state->speed) {
8905b0d7d7dSJose Abreu 		case SPEED_2500:
8915b0d7d7dSJose Abreu 			ctrl |= priv->hw->link.speed2500;
8925b0d7d7dSJose Abreu 			break;
8939ad372fcSJose Abreu 		case SPEED_1000:
8949ad372fcSJose Abreu 			ctrl |= priv->hw->link.speed1000;
8959ad372fcSJose Abreu 			break;
8969ad372fcSJose Abreu 		case SPEED_100:
8979ad372fcSJose Abreu 			ctrl |= priv->hw->link.speed100;
8989ad372fcSJose Abreu 			break;
8999ad372fcSJose Abreu 		case SPEED_10:
9009ad372fcSJose Abreu 			ctrl |= priv->hw->link.speed10;
9019ad372fcSJose Abreu 			break;
9029ad372fcSJose Abreu 		default:
90374371272SJose Abreu 			return;
9049ad372fcSJose Abreu 		}
9055b0d7d7dSJose Abreu 	}
9069ad372fcSJose Abreu 
90774371272SJose Abreu 	priv->speed = state->speed;
9089ad372fcSJose Abreu 
90974371272SJose Abreu 	if (priv->plat->fix_mac_speed)
91074371272SJose Abreu 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, state->speed);
9119ad372fcSJose Abreu 
91274371272SJose Abreu 	if (!state->duplex)
9139ad372fcSJose Abreu 		ctrl &= ~priv->hw->link.duplex;
9149ad372fcSJose Abreu 	else
9159ad372fcSJose Abreu 		ctrl |= priv->hw->link.duplex;
9169ad372fcSJose Abreu 
9179ad372fcSJose Abreu 	/* Flow Control operation */
91874371272SJose Abreu 	if (state->pause)
91974371272SJose Abreu 		stmmac_mac_flow_ctrl(priv, state->duplex);
9209ad372fcSJose Abreu 
9219ad372fcSJose Abreu 	writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
9229ad372fcSJose Abreu }
9239ad372fcSJose Abreu 
924eeef2f6bSJose Abreu static void stmmac_mac_an_restart(struct phylink_config *config)
925eeef2f6bSJose Abreu {
926eeef2f6bSJose Abreu 	/* Not Supported */
927eeef2f6bSJose Abreu }
928eeef2f6bSJose Abreu 
92974371272SJose Abreu static void stmmac_mac_link_down(struct phylink_config *config,
93074371272SJose Abreu 				 unsigned int mode, phy_interface_t interface)
9319ad372fcSJose Abreu {
93274371272SJose Abreu 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
9339ad372fcSJose Abreu 
9349ad372fcSJose Abreu 	stmmac_mac_set(priv, priv->ioaddr, false);
93574371272SJose Abreu 	priv->eee_active = false;
93674371272SJose Abreu 	stmmac_eee_init(priv);
93774371272SJose Abreu 	stmmac_set_eee_pls(priv, priv->hw, false);
9389ad372fcSJose Abreu }
9399ad372fcSJose Abreu 
94074371272SJose Abreu static void stmmac_mac_link_up(struct phylink_config *config,
94174371272SJose Abreu 			       unsigned int mode, phy_interface_t interface,
94274371272SJose Abreu 			       struct phy_device *phy)
9439ad372fcSJose Abreu {
94474371272SJose Abreu 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
9459ad372fcSJose Abreu 
9469ad372fcSJose Abreu 	stmmac_mac_set(priv, priv->ioaddr, true);
9475b111770SJose Abreu 	if (phy && priv->dma_cap.eee) {
94874371272SJose Abreu 		priv->eee_active = phy_init_eee(phy, 1) >= 0;
94974371272SJose Abreu 		priv->eee_enabled = stmmac_eee_init(priv);
95074371272SJose Abreu 		stmmac_set_eee_pls(priv, priv->hw, true);
95174371272SJose Abreu 	}
9529ad372fcSJose Abreu }
9539ad372fcSJose Abreu 
95474371272SJose Abreu static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
955eeef2f6bSJose Abreu 	.validate = stmmac_validate,
956eeef2f6bSJose Abreu 	.mac_link_state = stmmac_mac_link_state,
95774371272SJose Abreu 	.mac_config = stmmac_mac_config,
958eeef2f6bSJose Abreu 	.mac_an_restart = stmmac_mac_an_restart,
95974371272SJose Abreu 	.mac_link_down = stmmac_mac_link_down,
96074371272SJose Abreu 	.mac_link_up = stmmac_mac_link_up,
961eeef2f6bSJose Abreu };
962eeef2f6bSJose Abreu 
96329feff39SJoao Pinto /**
964732fdf0eSGiuseppe CAVALLARO  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
96532ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
96632ceabcaSGiuseppe CAVALLARO  * Description: this is to verify if the HW supports the PCS.
96732ceabcaSGiuseppe CAVALLARO  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
96832ceabcaSGiuseppe CAVALLARO  * configured for the TBI, RTBI, or SGMII PHY interface.
96932ceabcaSGiuseppe CAVALLARO  */
970e58bb43fSGiuseppe CAVALLARO static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
971e58bb43fSGiuseppe CAVALLARO {
972e58bb43fSGiuseppe CAVALLARO 	int interface = priv->plat->interface;
973e58bb43fSGiuseppe CAVALLARO 
974e58bb43fSGiuseppe CAVALLARO 	if (priv->dma_cap.pcs) {
9750d909dcdSByungho An 		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
9760d909dcdSByungho An 		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
9770d909dcdSByungho An 		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
9780d909dcdSByungho An 		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
97938ddc59dSLABBE Corentin 			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
9803fe5cadbSGiuseppe CAVALLARO 			priv->hw->pcs = STMMAC_PCS_RGMII;
9810d909dcdSByungho An 		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
98238ddc59dSLABBE Corentin 			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
9833fe5cadbSGiuseppe CAVALLARO 			priv->hw->pcs = STMMAC_PCS_SGMII;
984e58bb43fSGiuseppe CAVALLARO 		}
985e58bb43fSGiuseppe CAVALLARO 	}
986e58bb43fSGiuseppe CAVALLARO }
987e58bb43fSGiuseppe CAVALLARO 
9887ac6653aSJeff Kirsher /**
9897ac6653aSJeff Kirsher  * stmmac_init_phy - PHY initialization
9907ac6653aSJeff Kirsher  * @dev: net device structure
9917ac6653aSJeff Kirsher  * Description: it initializes the driver's PHY state, and attaches the PHY
9927ac6653aSJeff Kirsher  * to the mac driver.
9937ac6653aSJeff Kirsher  *  Return value:
9947ac6653aSJeff Kirsher  *  0 on success
9957ac6653aSJeff Kirsher  */
9967ac6653aSJeff Kirsher static int stmmac_init_phy(struct net_device *dev)
9977ac6653aSJeff Kirsher {
9987ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
99974371272SJose Abreu 	struct device_node *node;
100074371272SJose Abreu 	int ret;
10017ac6653aSJeff Kirsher 
10024838a540SJose Abreu 	node = priv->plat->phylink_node;
100374371272SJose Abreu 
100442e87024SJose Abreu 	if (node)
100574371272SJose Abreu 		ret = phylink_of_phy_connect(priv->phylink, node, 0);
100642e87024SJose Abreu 
100742e87024SJose Abreu 	/* Some DT bindings do not set-up the PHY handle. Let's try to
100842e87024SJose Abreu 	 * manually parse it
100942e87024SJose Abreu 	 */
101042e87024SJose Abreu 	if (!node || ret) {
101174371272SJose Abreu 		int addr = priv->plat->phy_addr;
101274371272SJose Abreu 		struct phy_device *phydev;
1013f142af2eSSrinivas Kandagatla 
101474371272SJose Abreu 		phydev = mdiobus_get_phy(priv->mii, addr);
101574371272SJose Abreu 		if (!phydev) {
101674371272SJose Abreu 			netdev_err(priv->dev, "no phy at addr %d\n", addr);
10177ac6653aSJeff Kirsher 			return -ENODEV;
10187ac6653aSJeff Kirsher 		}
10198e99fc5fSGiuseppe Cavallaro 
102074371272SJose Abreu 		ret = phylink_connect_phy(priv->phylink, phydev);
102174371272SJose Abreu 	}
1022c51e424dSFlorian Fainelli 
102374371272SJose Abreu 	return ret;
102474371272SJose Abreu }
102574371272SJose Abreu 
102674371272SJose Abreu static int stmmac_phy_setup(struct stmmac_priv *priv)
102774371272SJose Abreu {
1028c63d1e5cSArnd Bergmann 	struct fwnode_handle *fwnode = of_fwnode_handle(priv->plat->phylink_node);
102974371272SJose Abreu 	int mode = priv->plat->interface;
103074371272SJose Abreu 	struct phylink *phylink;
103174371272SJose Abreu 
103274371272SJose Abreu 	priv->phylink_config.dev = &priv->dev->dev;
103374371272SJose Abreu 	priv->phylink_config.type = PHYLINK_NETDEV;
103474371272SJose Abreu 
1035c63d1e5cSArnd Bergmann 	phylink = phylink_create(&priv->phylink_config, fwnode,
103674371272SJose Abreu 				 mode, &stmmac_phylink_mac_ops);
103774371272SJose Abreu 	if (IS_ERR(phylink))
103874371272SJose Abreu 		return PTR_ERR(phylink);
103974371272SJose Abreu 
104074371272SJose Abreu 	priv->phylink = phylink;
10417ac6653aSJeff Kirsher 	return 0;
10427ac6653aSJeff Kirsher }
10437ac6653aSJeff Kirsher 
104471fedb01SJoao Pinto static void stmmac_display_rx_rings(struct stmmac_priv *priv)
1045c24602efSGiuseppe CAVALLARO {
104654139cf3SJoao Pinto 	u32 rx_cnt = priv->plat->rx_queues_to_use;
104771fedb01SJoao Pinto 	void *head_rx;
104854139cf3SJoao Pinto 	u32 queue;
104954139cf3SJoao Pinto 
105054139cf3SJoao Pinto 	/* Display RX rings */
105154139cf3SJoao Pinto 	for (queue = 0; queue < rx_cnt; queue++) {
105254139cf3SJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
105354139cf3SJoao Pinto 
105454139cf3SJoao Pinto 		pr_info("\tRX Queue %u rings\n", queue);
1055d0225e7dSAlexandre TORGUE 
105671fedb01SJoao Pinto 		if (priv->extend_desc)
105754139cf3SJoao Pinto 			head_rx = (void *)rx_q->dma_erx;
105871fedb01SJoao Pinto 		else
105954139cf3SJoao Pinto 			head_rx = (void *)rx_q->dma_rx;
106071fedb01SJoao Pinto 
106171fedb01SJoao Pinto 		/* Display RX ring */
106242de047dSJose Abreu 		stmmac_display_ring(priv, head_rx, DMA_RX_SIZE, true);
10635bacd778SLABBE Corentin 	}
106454139cf3SJoao Pinto }
1065d0225e7dSAlexandre TORGUE 
106671fedb01SJoao Pinto static void stmmac_display_tx_rings(struct stmmac_priv *priv)
106771fedb01SJoao Pinto {
1068ce736788SJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
106971fedb01SJoao Pinto 	void *head_tx;
1070ce736788SJoao Pinto 	u32 queue;
1071ce736788SJoao Pinto 
1072ce736788SJoao Pinto 	/* Display TX rings */
1073ce736788SJoao Pinto 	for (queue = 0; queue < tx_cnt; queue++) {
1074ce736788SJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1075ce736788SJoao Pinto 
1076ce736788SJoao Pinto 		pr_info("\tTX Queue %d rings\n", queue);
107771fedb01SJoao Pinto 
107871fedb01SJoao Pinto 		if (priv->extend_desc)
1079ce736788SJoao Pinto 			head_tx = (void *)tx_q->dma_etx;
108071fedb01SJoao Pinto 		else
1081ce736788SJoao Pinto 			head_tx = (void *)tx_q->dma_tx;
108271fedb01SJoao Pinto 
108342de047dSJose Abreu 		stmmac_display_ring(priv, head_tx, DMA_TX_SIZE, false);
1084c24602efSGiuseppe CAVALLARO 	}
1085ce736788SJoao Pinto }
1086c24602efSGiuseppe CAVALLARO 
108771fedb01SJoao Pinto static void stmmac_display_rings(struct stmmac_priv *priv)
108871fedb01SJoao Pinto {
108971fedb01SJoao Pinto 	/* Display RX ring */
109071fedb01SJoao Pinto 	stmmac_display_rx_rings(priv);
109171fedb01SJoao Pinto 
109271fedb01SJoao Pinto 	/* Display TX ring */
109371fedb01SJoao Pinto 	stmmac_display_tx_rings(priv);
109471fedb01SJoao Pinto }
109571fedb01SJoao Pinto 
1096286a8372SGiuseppe CAVALLARO static int stmmac_set_bfsize(int mtu, int bufsize)
1097286a8372SGiuseppe CAVALLARO {
1098286a8372SGiuseppe CAVALLARO 	int ret = bufsize;
1099286a8372SGiuseppe CAVALLARO 
1100286a8372SGiuseppe CAVALLARO 	if (mtu >= BUF_SIZE_4KiB)
1101286a8372SGiuseppe CAVALLARO 		ret = BUF_SIZE_8KiB;
1102286a8372SGiuseppe CAVALLARO 	else if (mtu >= BUF_SIZE_2KiB)
1103286a8372SGiuseppe CAVALLARO 		ret = BUF_SIZE_4KiB;
1104d916701cSGiuseppe CAVALLARO 	else if (mtu > DEFAULT_BUFSIZE)
1105286a8372SGiuseppe CAVALLARO 		ret = BUF_SIZE_2KiB;
1106286a8372SGiuseppe CAVALLARO 	else
1107d916701cSGiuseppe CAVALLARO 		ret = DEFAULT_BUFSIZE;
1108286a8372SGiuseppe CAVALLARO 
1109286a8372SGiuseppe CAVALLARO 	return ret;
1110286a8372SGiuseppe CAVALLARO }
1111286a8372SGiuseppe CAVALLARO 
111232ceabcaSGiuseppe CAVALLARO /**
111371fedb01SJoao Pinto  * stmmac_clear_rx_descriptors - clear RX descriptors
111432ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
111554139cf3SJoao Pinto  * @queue: RX queue index
111671fedb01SJoao Pinto  * Description: this function is called to clear the RX descriptors
111732ceabcaSGiuseppe CAVALLARO  * in case of both basic and extended descriptors are used.
111832ceabcaSGiuseppe CAVALLARO  */
111954139cf3SJoao Pinto static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
1120c24602efSGiuseppe CAVALLARO {
112154139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
11225bacd778SLABBE Corentin 	int i;
1123c24602efSGiuseppe CAVALLARO 
112471fedb01SJoao Pinto 	/* Clear the RX descriptors */
11255bacd778SLABBE Corentin 	for (i = 0; i < DMA_RX_SIZE; i++)
11265bacd778SLABBE Corentin 		if (priv->extend_desc)
112742de047dSJose Abreu 			stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
11285bacd778SLABBE Corentin 					priv->use_riwt, priv->mode,
1129583e6361SAaro Koskinen 					(i == DMA_RX_SIZE - 1),
1130583e6361SAaro Koskinen 					priv->dma_buf_sz);
11315bacd778SLABBE Corentin 		else
113242de047dSJose Abreu 			stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
11335bacd778SLABBE Corentin 					priv->use_riwt, priv->mode,
1134583e6361SAaro Koskinen 					(i == DMA_RX_SIZE - 1),
1135583e6361SAaro Koskinen 					priv->dma_buf_sz);
113671fedb01SJoao Pinto }
113771fedb01SJoao Pinto 
113871fedb01SJoao Pinto /**
113971fedb01SJoao Pinto  * stmmac_clear_tx_descriptors - clear tx descriptors
114071fedb01SJoao Pinto  * @priv: driver private structure
1141ce736788SJoao Pinto  * @queue: TX queue index.
114271fedb01SJoao Pinto  * Description: this function is called to clear the TX descriptors
114371fedb01SJoao Pinto  * in case of both basic and extended descriptors are used.
114471fedb01SJoao Pinto  */
1145ce736788SJoao Pinto static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
114671fedb01SJoao Pinto {
1147ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
114871fedb01SJoao Pinto 	int i;
114971fedb01SJoao Pinto 
115071fedb01SJoao Pinto 	/* Clear the TX descriptors */
11515bacd778SLABBE Corentin 	for (i = 0; i < DMA_TX_SIZE; i++)
11525bacd778SLABBE Corentin 		if (priv->extend_desc)
115342de047dSJose Abreu 			stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
115442de047dSJose Abreu 					priv->mode, (i == DMA_TX_SIZE - 1));
11555bacd778SLABBE Corentin 		else
115642de047dSJose Abreu 			stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
115742de047dSJose Abreu 					priv->mode, (i == DMA_TX_SIZE - 1));
1158c24602efSGiuseppe CAVALLARO }
1159c24602efSGiuseppe CAVALLARO 
1160732fdf0eSGiuseppe CAVALLARO /**
116171fedb01SJoao Pinto  * stmmac_clear_descriptors - clear descriptors
116271fedb01SJoao Pinto  * @priv: driver private structure
116371fedb01SJoao Pinto  * Description: this function is called to clear the TX and RX descriptors
116471fedb01SJoao Pinto  * in case of both basic and extended descriptors are used.
116571fedb01SJoao Pinto  */
116671fedb01SJoao Pinto static void stmmac_clear_descriptors(struct stmmac_priv *priv)
116771fedb01SJoao Pinto {
116854139cf3SJoao Pinto 	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1169ce736788SJoao Pinto 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
117054139cf3SJoao Pinto 	u32 queue;
117154139cf3SJoao Pinto 
117271fedb01SJoao Pinto 	/* Clear the RX descriptors */
117354139cf3SJoao Pinto 	for (queue = 0; queue < rx_queue_cnt; queue++)
117454139cf3SJoao Pinto 		stmmac_clear_rx_descriptors(priv, queue);
117571fedb01SJoao Pinto 
117671fedb01SJoao Pinto 	/* Clear the TX descriptors */
1177ce736788SJoao Pinto 	for (queue = 0; queue < tx_queue_cnt; queue++)
1178ce736788SJoao Pinto 		stmmac_clear_tx_descriptors(priv, queue);
117971fedb01SJoao Pinto }
118071fedb01SJoao Pinto 
118171fedb01SJoao Pinto /**
1182732fdf0eSGiuseppe CAVALLARO  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1183732fdf0eSGiuseppe CAVALLARO  * @priv: driver private structure
1184732fdf0eSGiuseppe CAVALLARO  * @p: descriptor pointer
1185732fdf0eSGiuseppe CAVALLARO  * @i: descriptor index
118654139cf3SJoao Pinto  * @flags: gfp flag
118754139cf3SJoao Pinto  * @queue: RX queue index
1188732fdf0eSGiuseppe CAVALLARO  * Description: this function is called to allocate a receive buffer, perform
1189732fdf0eSGiuseppe CAVALLARO  * the DMA mapping and init the descriptor.
1190732fdf0eSGiuseppe CAVALLARO  */
1191c24602efSGiuseppe CAVALLARO static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
119254139cf3SJoao Pinto 				  int i, gfp_t flags, u32 queue)
1193c24602efSGiuseppe CAVALLARO {
119454139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
11952af6106aSJose Abreu 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1196c24602efSGiuseppe CAVALLARO 
11972af6106aSJose Abreu 	buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
11982af6106aSJose Abreu 	if (!buf->page)
119956329137SBartlomiej Zolnierkiewicz 		return -ENOMEM;
1200c24602efSGiuseppe CAVALLARO 
12012af6106aSJose Abreu 	buf->addr = page_pool_get_dma_addr(buf->page);
12022af6106aSJose Abreu 	stmmac_set_desc_addr(priv, p, buf->addr);
12032c520b1cSJose Abreu 	if (priv->dma_buf_sz == BUF_SIZE_16KiB)
12042c520b1cSJose Abreu 		stmmac_init_desc3(priv, p);
1205c24602efSGiuseppe CAVALLARO 
1206c24602efSGiuseppe CAVALLARO 	return 0;
1207c24602efSGiuseppe CAVALLARO }
1208c24602efSGiuseppe CAVALLARO 
120971fedb01SJoao Pinto /**
121071fedb01SJoao Pinto  * stmmac_free_rx_buffer - free RX dma buffers
121171fedb01SJoao Pinto  * @priv: private structure
121254139cf3SJoao Pinto  * @queue: RX queue index
121371fedb01SJoao Pinto  * @i: buffer index.
121471fedb01SJoao Pinto  */
121554139cf3SJoao Pinto static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
121656329137SBartlomiej Zolnierkiewicz {
121754139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
12182af6106aSJose Abreu 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
121954139cf3SJoao Pinto 
12202af6106aSJose Abreu 	if (buf->page)
12212af6106aSJose Abreu 		page_pool_put_page(rx_q->page_pool, buf->page, false);
12222af6106aSJose Abreu 	buf->page = NULL;
122356329137SBartlomiej Zolnierkiewicz }
122456329137SBartlomiej Zolnierkiewicz 
12257ac6653aSJeff Kirsher /**
122671fedb01SJoao Pinto  * stmmac_free_tx_buffer - free RX dma buffers
122771fedb01SJoao Pinto  * @priv: private structure
1228ce736788SJoao Pinto  * @queue: RX queue index
122971fedb01SJoao Pinto  * @i: buffer index.
123071fedb01SJoao Pinto  */
1231ce736788SJoao Pinto static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
123271fedb01SJoao Pinto {
1233ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1234ce736788SJoao Pinto 
1235ce736788SJoao Pinto 	if (tx_q->tx_skbuff_dma[i].buf) {
1236ce736788SJoao Pinto 		if (tx_q->tx_skbuff_dma[i].map_as_page)
123771fedb01SJoao Pinto 			dma_unmap_page(priv->device,
1238ce736788SJoao Pinto 				       tx_q->tx_skbuff_dma[i].buf,
1239ce736788SJoao Pinto 				       tx_q->tx_skbuff_dma[i].len,
124071fedb01SJoao Pinto 				       DMA_TO_DEVICE);
124171fedb01SJoao Pinto 		else
124271fedb01SJoao Pinto 			dma_unmap_single(priv->device,
1243ce736788SJoao Pinto 					 tx_q->tx_skbuff_dma[i].buf,
1244ce736788SJoao Pinto 					 tx_q->tx_skbuff_dma[i].len,
124571fedb01SJoao Pinto 					 DMA_TO_DEVICE);
124671fedb01SJoao Pinto 	}
124771fedb01SJoao Pinto 
1248ce736788SJoao Pinto 	if (tx_q->tx_skbuff[i]) {
1249ce736788SJoao Pinto 		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1250ce736788SJoao Pinto 		tx_q->tx_skbuff[i] = NULL;
1251ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[i].buf = 0;
1252ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[i].map_as_page = false;
125371fedb01SJoao Pinto 	}
125471fedb01SJoao Pinto }
125571fedb01SJoao Pinto 
125671fedb01SJoao Pinto /**
125771fedb01SJoao Pinto  * init_dma_rx_desc_rings - init the RX descriptor rings
12587ac6653aSJeff Kirsher  * @dev: net device structure
12595bacd778SLABBE Corentin  * @flags: gfp flag.
126071fedb01SJoao Pinto  * Description: this function initializes the DMA RX descriptors
12615bacd778SLABBE Corentin  * and allocates the socket buffers. It supports the chained and ring
1262286a8372SGiuseppe CAVALLARO  * modes.
12637ac6653aSJeff Kirsher  */
126471fedb01SJoao Pinto static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
12657ac6653aSJeff Kirsher {
12667ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
126754139cf3SJoao Pinto 	u32 rx_count = priv->plat->rx_queues_to_use;
12685bacd778SLABBE Corentin 	int ret = -ENOMEM;
12692c520b1cSJose Abreu 	int bfsize = 0;
12701d3028f4SColin Ian King 	int queue;
127154139cf3SJoao Pinto 	int i;
12727ac6653aSJeff Kirsher 
12732c520b1cSJose Abreu 	bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
12742c520b1cSJose Abreu 	if (bfsize < 0)
12752c520b1cSJose Abreu 		bfsize = 0;
12765bacd778SLABBE Corentin 
12775bacd778SLABBE Corentin 	if (bfsize < BUF_SIZE_16KiB)
12785bacd778SLABBE Corentin 		bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
12795bacd778SLABBE Corentin 
12805bacd778SLABBE Corentin 	priv->dma_buf_sz = bfsize;
12812618abb7SVince Bridgers 
128254139cf3SJoao Pinto 	/* RX INITIALIZATION */
12835bacd778SLABBE Corentin 	netif_dbg(priv, probe, priv->dev,
12845bacd778SLABBE Corentin 		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
12855bacd778SLABBE Corentin 
128654139cf3SJoao Pinto 	for (queue = 0; queue < rx_count; queue++) {
128754139cf3SJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
128854139cf3SJoao Pinto 
128954139cf3SJoao Pinto 		netif_dbg(priv, probe, priv->dev,
129054139cf3SJoao Pinto 			  "(%s) dma_rx_phy=0x%08x\n", __func__,
129154139cf3SJoao Pinto 			  (u32)rx_q->dma_rx_phy);
129254139cf3SJoao Pinto 
1293cbcf0999SJose Abreu 		stmmac_clear_rx_descriptors(priv, queue);
1294cbcf0999SJose Abreu 
12955bacd778SLABBE Corentin 		for (i = 0; i < DMA_RX_SIZE; i++) {
12965bacd778SLABBE Corentin 			struct dma_desc *p;
12975bacd778SLABBE Corentin 
129854139cf3SJoao Pinto 			if (priv->extend_desc)
129954139cf3SJoao Pinto 				p = &((rx_q->dma_erx + i)->basic);
130054139cf3SJoao Pinto 			else
130154139cf3SJoao Pinto 				p = rx_q->dma_rx + i;
130254139cf3SJoao Pinto 
130354139cf3SJoao Pinto 			ret = stmmac_init_rx_buffers(priv, p, i, flags,
130454139cf3SJoao Pinto 						     queue);
13055bacd778SLABBE Corentin 			if (ret)
13065bacd778SLABBE Corentin 				goto err_init_rx_buffers;
13075bacd778SLABBE Corentin 		}
130854139cf3SJoao Pinto 
130954139cf3SJoao Pinto 		rx_q->cur_rx = 0;
131054139cf3SJoao Pinto 		rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
131154139cf3SJoao Pinto 
1312c24602efSGiuseppe CAVALLARO 		/* Setup the chained descriptor addresses */
1313c24602efSGiuseppe CAVALLARO 		if (priv->mode == STMMAC_CHAIN_MODE) {
131471fedb01SJoao Pinto 			if (priv->extend_desc)
13152c520b1cSJose Abreu 				stmmac_mode_init(priv, rx_q->dma_erx,
13162c520b1cSJose Abreu 						rx_q->dma_rx_phy, DMA_RX_SIZE, 1);
131771fedb01SJoao Pinto 			else
13182c520b1cSJose Abreu 				stmmac_mode_init(priv, rx_q->dma_rx,
13192c520b1cSJose Abreu 						rx_q->dma_rx_phy, DMA_RX_SIZE, 0);
132071fedb01SJoao Pinto 		}
132154139cf3SJoao Pinto 	}
132254139cf3SJoao Pinto 
132354139cf3SJoao Pinto 	buf_sz = bfsize;
132471fedb01SJoao Pinto 
132571fedb01SJoao Pinto 	return 0;
132654139cf3SJoao Pinto 
132771fedb01SJoao Pinto err_init_rx_buffers:
132854139cf3SJoao Pinto 	while (queue >= 0) {
132971fedb01SJoao Pinto 		while (--i >= 0)
133054139cf3SJoao Pinto 			stmmac_free_rx_buffer(priv, queue, i);
133154139cf3SJoao Pinto 
133254139cf3SJoao Pinto 		if (queue == 0)
133354139cf3SJoao Pinto 			break;
133454139cf3SJoao Pinto 
133554139cf3SJoao Pinto 		i = DMA_RX_SIZE;
133654139cf3SJoao Pinto 		queue--;
133754139cf3SJoao Pinto 	}
133854139cf3SJoao Pinto 
133971fedb01SJoao Pinto 	return ret;
134071fedb01SJoao Pinto }
134171fedb01SJoao Pinto 
134271fedb01SJoao Pinto /**
134371fedb01SJoao Pinto  * init_dma_tx_desc_rings - init the TX descriptor rings
134471fedb01SJoao Pinto  * @dev: net device structure.
134571fedb01SJoao Pinto  * Description: this function initializes the DMA TX descriptors
134671fedb01SJoao Pinto  * and allocates the socket buffers. It supports the chained and ring
134771fedb01SJoao Pinto  * modes.
134871fedb01SJoao Pinto  */
134971fedb01SJoao Pinto static int init_dma_tx_desc_rings(struct net_device *dev)
135071fedb01SJoao Pinto {
135171fedb01SJoao Pinto 	struct stmmac_priv *priv = netdev_priv(dev);
1352ce736788SJoao Pinto 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1353ce736788SJoao Pinto 	u32 queue;
135471fedb01SJoao Pinto 	int i;
135571fedb01SJoao Pinto 
1356ce736788SJoao Pinto 	for (queue = 0; queue < tx_queue_cnt; queue++) {
1357ce736788SJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1358ce736788SJoao Pinto 
135971fedb01SJoao Pinto 		netif_dbg(priv, probe, priv->dev,
1360ce736788SJoao Pinto 			  "(%s) dma_tx_phy=0x%08x\n", __func__,
1361ce736788SJoao Pinto 			 (u32)tx_q->dma_tx_phy);
136271fedb01SJoao Pinto 
136371fedb01SJoao Pinto 		/* Setup the chained descriptor addresses */
136471fedb01SJoao Pinto 		if (priv->mode == STMMAC_CHAIN_MODE) {
136571fedb01SJoao Pinto 			if (priv->extend_desc)
13662c520b1cSJose Abreu 				stmmac_mode_init(priv, tx_q->dma_etx,
13672c520b1cSJose Abreu 						tx_q->dma_tx_phy, DMA_TX_SIZE, 1);
136871fedb01SJoao Pinto 			else
13692c520b1cSJose Abreu 				stmmac_mode_init(priv, tx_q->dma_tx,
13702c520b1cSJose Abreu 						tx_q->dma_tx_phy, DMA_TX_SIZE, 0);
1371c24602efSGiuseppe CAVALLARO 		}
1372286a8372SGiuseppe CAVALLARO 
1373e3ad57c9SGiuseppe Cavallaro 		for (i = 0; i < DMA_TX_SIZE; i++) {
1374c24602efSGiuseppe CAVALLARO 			struct dma_desc *p;
1375c24602efSGiuseppe CAVALLARO 			if (priv->extend_desc)
1376ce736788SJoao Pinto 				p = &((tx_q->dma_etx + i)->basic);
1377c24602efSGiuseppe CAVALLARO 			else
1378ce736788SJoao Pinto 				p = tx_q->dma_tx + i;
1379f748be53SAlexandre TORGUE 
138044c67f85SJose Abreu 			stmmac_clear_desc(priv, p);
1381f748be53SAlexandre TORGUE 
1382ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[i].buf = 0;
1383ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[i].map_as_page = false;
1384ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[i].len = 0;
1385ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[i].last_segment = false;
1386ce736788SJoao Pinto 			tx_q->tx_skbuff[i] = NULL;
13874a7d666aSGiuseppe CAVALLARO 		}
1388c24602efSGiuseppe CAVALLARO 
1389ce736788SJoao Pinto 		tx_q->dirty_tx = 0;
1390ce736788SJoao Pinto 		tx_q->cur_tx = 0;
13918d212a9eSNiklas Cassel 		tx_q->mss = 0;
1392ce736788SJoao Pinto 
1393c22a3f48SJoao Pinto 		netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
1394c22a3f48SJoao Pinto 	}
13957ac6653aSJeff Kirsher 
139671fedb01SJoao Pinto 	return 0;
139771fedb01SJoao Pinto }
139871fedb01SJoao Pinto 
139971fedb01SJoao Pinto /**
140071fedb01SJoao Pinto  * init_dma_desc_rings - init the RX/TX descriptor rings
140171fedb01SJoao Pinto  * @dev: net device structure
140271fedb01SJoao Pinto  * @flags: gfp flag.
140371fedb01SJoao Pinto  * Description: this function initializes the DMA RX/TX descriptors
140471fedb01SJoao Pinto  * and allocates the socket buffers. It supports the chained and ring
140571fedb01SJoao Pinto  * modes.
140671fedb01SJoao Pinto  */
140771fedb01SJoao Pinto static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
140871fedb01SJoao Pinto {
140971fedb01SJoao Pinto 	struct stmmac_priv *priv = netdev_priv(dev);
141071fedb01SJoao Pinto 	int ret;
141171fedb01SJoao Pinto 
141271fedb01SJoao Pinto 	ret = init_dma_rx_desc_rings(dev, flags);
141371fedb01SJoao Pinto 	if (ret)
141471fedb01SJoao Pinto 		return ret;
141571fedb01SJoao Pinto 
141671fedb01SJoao Pinto 	ret = init_dma_tx_desc_rings(dev);
141771fedb01SJoao Pinto 
14185bacd778SLABBE Corentin 	stmmac_clear_descriptors(priv);
14197ac6653aSJeff Kirsher 
1420c24602efSGiuseppe CAVALLARO 	if (netif_msg_hw(priv))
1421c24602efSGiuseppe CAVALLARO 		stmmac_display_rings(priv);
142256329137SBartlomiej Zolnierkiewicz 
142356329137SBartlomiej Zolnierkiewicz 	return ret;
14247ac6653aSJeff Kirsher }
14257ac6653aSJeff Kirsher 
142671fedb01SJoao Pinto /**
142771fedb01SJoao Pinto  * dma_free_rx_skbufs - free RX dma buffers
142871fedb01SJoao Pinto  * @priv: private structure
142954139cf3SJoao Pinto  * @queue: RX queue index
143071fedb01SJoao Pinto  */
143154139cf3SJoao Pinto static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
14327ac6653aSJeff Kirsher {
14337ac6653aSJeff Kirsher 	int i;
14347ac6653aSJeff Kirsher 
1435e3ad57c9SGiuseppe Cavallaro 	for (i = 0; i < DMA_RX_SIZE; i++)
143654139cf3SJoao Pinto 		stmmac_free_rx_buffer(priv, queue, i);
14377ac6653aSJeff Kirsher }
14387ac6653aSJeff Kirsher 
143971fedb01SJoao Pinto /**
144071fedb01SJoao Pinto  * dma_free_tx_skbufs - free TX dma buffers
144171fedb01SJoao Pinto  * @priv: private structure
1442ce736788SJoao Pinto  * @queue: TX queue index
144371fedb01SJoao Pinto  */
1444ce736788SJoao Pinto static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
14457ac6653aSJeff Kirsher {
14467ac6653aSJeff Kirsher 	int i;
14477ac6653aSJeff Kirsher 
144871fedb01SJoao Pinto 	for (i = 0; i < DMA_TX_SIZE; i++)
1449ce736788SJoao Pinto 		stmmac_free_tx_buffer(priv, queue, i);
14507ac6653aSJeff Kirsher }
14517ac6653aSJeff Kirsher 
1452732fdf0eSGiuseppe CAVALLARO /**
145354139cf3SJoao Pinto  * free_dma_rx_desc_resources - free RX dma desc resources
145454139cf3SJoao Pinto  * @priv: private structure
145554139cf3SJoao Pinto  */
145654139cf3SJoao Pinto static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
145754139cf3SJoao Pinto {
145854139cf3SJoao Pinto 	u32 rx_count = priv->plat->rx_queues_to_use;
145954139cf3SJoao Pinto 	u32 queue;
146054139cf3SJoao Pinto 
146154139cf3SJoao Pinto 	/* Free RX queue resources */
146254139cf3SJoao Pinto 	for (queue = 0; queue < rx_count; queue++) {
146354139cf3SJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
146454139cf3SJoao Pinto 
146554139cf3SJoao Pinto 		/* Release the DMA RX socket buffers */
146654139cf3SJoao Pinto 		dma_free_rx_skbufs(priv, queue);
146754139cf3SJoao Pinto 
146854139cf3SJoao Pinto 		/* Free DMA regions of consistent memory previously allocated */
146954139cf3SJoao Pinto 		if (!priv->extend_desc)
147054139cf3SJoao Pinto 			dma_free_coherent(priv->device,
147154139cf3SJoao Pinto 					  DMA_RX_SIZE * sizeof(struct dma_desc),
147254139cf3SJoao Pinto 					  rx_q->dma_rx, rx_q->dma_rx_phy);
147354139cf3SJoao Pinto 		else
147454139cf3SJoao Pinto 			dma_free_coherent(priv->device, DMA_RX_SIZE *
147554139cf3SJoao Pinto 					  sizeof(struct dma_extended_desc),
147654139cf3SJoao Pinto 					  rx_q->dma_erx, rx_q->dma_rx_phy);
147754139cf3SJoao Pinto 
14782af6106aSJose Abreu 		kfree(rx_q->buf_pool);
14792af6106aSJose Abreu 		if (rx_q->page_pool) {
14802af6106aSJose Abreu 			page_pool_request_shutdown(rx_q->page_pool);
14812af6106aSJose Abreu 			page_pool_destroy(rx_q->page_pool);
14822af6106aSJose Abreu 		}
148354139cf3SJoao Pinto 	}
148454139cf3SJoao Pinto }
148554139cf3SJoao Pinto 
148654139cf3SJoao Pinto /**
1487ce736788SJoao Pinto  * free_dma_tx_desc_resources - free TX dma desc resources
1488ce736788SJoao Pinto  * @priv: private structure
1489ce736788SJoao Pinto  */
1490ce736788SJoao Pinto static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
1491ce736788SJoao Pinto {
1492ce736788SJoao Pinto 	u32 tx_count = priv->plat->tx_queues_to_use;
149362242260SChristophe Jaillet 	u32 queue;
1494ce736788SJoao Pinto 
1495ce736788SJoao Pinto 	/* Free TX queue resources */
1496ce736788SJoao Pinto 	for (queue = 0; queue < tx_count; queue++) {
1497ce736788SJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1498ce736788SJoao Pinto 
1499ce736788SJoao Pinto 		/* Release the DMA TX socket buffers */
1500ce736788SJoao Pinto 		dma_free_tx_skbufs(priv, queue);
1501ce736788SJoao Pinto 
1502ce736788SJoao Pinto 		/* Free DMA regions of consistent memory previously allocated */
1503ce736788SJoao Pinto 		if (!priv->extend_desc)
1504ce736788SJoao Pinto 			dma_free_coherent(priv->device,
1505ce736788SJoao Pinto 					  DMA_TX_SIZE * sizeof(struct dma_desc),
1506ce736788SJoao Pinto 					  tx_q->dma_tx, tx_q->dma_tx_phy);
1507ce736788SJoao Pinto 		else
1508ce736788SJoao Pinto 			dma_free_coherent(priv->device, DMA_TX_SIZE *
1509ce736788SJoao Pinto 					  sizeof(struct dma_extended_desc),
1510ce736788SJoao Pinto 					  tx_q->dma_etx, tx_q->dma_tx_phy);
1511ce736788SJoao Pinto 
1512ce736788SJoao Pinto 		kfree(tx_q->tx_skbuff_dma);
1513ce736788SJoao Pinto 		kfree(tx_q->tx_skbuff);
1514ce736788SJoao Pinto 	}
1515ce736788SJoao Pinto }
1516ce736788SJoao Pinto 
1517ce736788SJoao Pinto /**
151871fedb01SJoao Pinto  * alloc_dma_rx_desc_resources - alloc RX resources.
1519732fdf0eSGiuseppe CAVALLARO  * @priv: private structure
1520732fdf0eSGiuseppe CAVALLARO  * Description: according to which descriptor can be used (extend or basic)
1521732fdf0eSGiuseppe CAVALLARO  * this function allocates the resources for TX and RX paths. In case of
1522732fdf0eSGiuseppe CAVALLARO  * reception, for example, it pre-allocated the RX socket buffer in order to
1523732fdf0eSGiuseppe CAVALLARO  * allow zero-copy mechanism.
1524732fdf0eSGiuseppe CAVALLARO  */
152571fedb01SJoao Pinto static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
152609f8d696SSrinivas Kandagatla {
152754139cf3SJoao Pinto 	u32 rx_count = priv->plat->rx_queues_to_use;
15285bacd778SLABBE Corentin 	int ret = -ENOMEM;
152954139cf3SJoao Pinto 	u32 queue;
153009f8d696SSrinivas Kandagatla 
153154139cf3SJoao Pinto 	/* RX queues buffers and DMA */
153254139cf3SJoao Pinto 	for (queue = 0; queue < rx_count; queue++) {
153354139cf3SJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
15342af6106aSJose Abreu 		struct page_pool_params pp_params = { 0 };
153554139cf3SJoao Pinto 
153654139cf3SJoao Pinto 		rx_q->queue_index = queue;
153754139cf3SJoao Pinto 		rx_q->priv_data = priv;
153854139cf3SJoao Pinto 
15392af6106aSJose Abreu 		pp_params.flags = PP_FLAG_DMA_MAP;
15402af6106aSJose Abreu 		pp_params.pool_size = DMA_RX_SIZE;
15412af6106aSJose Abreu 		pp_params.order = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE);
15422af6106aSJose Abreu 		pp_params.nid = dev_to_node(priv->device);
15432af6106aSJose Abreu 		pp_params.dev = priv->device;
15442af6106aSJose Abreu 		pp_params.dma_dir = DMA_FROM_DEVICE;
15455bacd778SLABBE Corentin 
15462af6106aSJose Abreu 		rx_q->page_pool = page_pool_create(&pp_params);
15472af6106aSJose Abreu 		if (IS_ERR(rx_q->page_pool)) {
15482af6106aSJose Abreu 			ret = PTR_ERR(rx_q->page_pool);
15492af6106aSJose Abreu 			rx_q->page_pool = NULL;
15502af6106aSJose Abreu 			goto err_dma;
15512af6106aSJose Abreu 		}
15522af6106aSJose Abreu 
1553ec5e5ce1SJose Abreu 		rx_q->buf_pool = kcalloc(DMA_RX_SIZE, sizeof(*rx_q->buf_pool),
15545bacd778SLABBE Corentin 					 GFP_KERNEL);
15552af6106aSJose Abreu 		if (!rx_q->buf_pool)
155654139cf3SJoao Pinto 			goto err_dma;
15575bacd778SLABBE Corentin 
15585bacd778SLABBE Corentin 		if (priv->extend_desc) {
1559750afb08SLuis Chamberlain 			rx_q->dma_erx = dma_alloc_coherent(priv->device,
1560750afb08SLuis Chamberlain 							   DMA_RX_SIZE * sizeof(struct dma_extended_desc),
156154139cf3SJoao Pinto 							   &rx_q->dma_rx_phy,
15625bacd778SLABBE Corentin 							   GFP_KERNEL);
156354139cf3SJoao Pinto 			if (!rx_q->dma_erx)
15645bacd778SLABBE Corentin 				goto err_dma;
15655bacd778SLABBE Corentin 
156671fedb01SJoao Pinto 		} else {
1567750afb08SLuis Chamberlain 			rx_q->dma_rx = dma_alloc_coherent(priv->device,
1568750afb08SLuis Chamberlain 							  DMA_RX_SIZE * sizeof(struct dma_desc),
156954139cf3SJoao Pinto 							  &rx_q->dma_rx_phy,
157071fedb01SJoao Pinto 							  GFP_KERNEL);
157154139cf3SJoao Pinto 			if (!rx_q->dma_rx)
157271fedb01SJoao Pinto 				goto err_dma;
157371fedb01SJoao Pinto 		}
157454139cf3SJoao Pinto 	}
157571fedb01SJoao Pinto 
157671fedb01SJoao Pinto 	return 0;
157771fedb01SJoao Pinto 
157871fedb01SJoao Pinto err_dma:
157954139cf3SJoao Pinto 	free_dma_rx_desc_resources(priv);
158054139cf3SJoao Pinto 
158171fedb01SJoao Pinto 	return ret;
158271fedb01SJoao Pinto }
158371fedb01SJoao Pinto 
158471fedb01SJoao Pinto /**
158571fedb01SJoao Pinto  * alloc_dma_tx_desc_resources - alloc TX resources.
158671fedb01SJoao Pinto  * @priv: private structure
158771fedb01SJoao Pinto  * Description: according to which descriptor can be used (extend or basic)
158871fedb01SJoao Pinto  * this function allocates the resources for TX and RX paths. In case of
158971fedb01SJoao Pinto  * reception, for example, it pre-allocated the RX socket buffer in order to
159071fedb01SJoao Pinto  * allow zero-copy mechanism.
159171fedb01SJoao Pinto  */
159271fedb01SJoao Pinto static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
159371fedb01SJoao Pinto {
1594ce736788SJoao Pinto 	u32 tx_count = priv->plat->tx_queues_to_use;
159571fedb01SJoao Pinto 	int ret = -ENOMEM;
1596ce736788SJoao Pinto 	u32 queue;
159771fedb01SJoao Pinto 
1598ce736788SJoao Pinto 	/* TX queues buffers and DMA */
1599ce736788SJoao Pinto 	for (queue = 0; queue < tx_count; queue++) {
1600ce736788SJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1601ce736788SJoao Pinto 
1602ce736788SJoao Pinto 		tx_q->queue_index = queue;
1603ce736788SJoao Pinto 		tx_q->priv_data = priv;
1604ce736788SJoao Pinto 
1605ec5e5ce1SJose Abreu 		tx_q->tx_skbuff_dma = kcalloc(DMA_TX_SIZE,
1606ce736788SJoao Pinto 					      sizeof(*tx_q->tx_skbuff_dma),
160771fedb01SJoao Pinto 					      GFP_KERNEL);
1608ce736788SJoao Pinto 		if (!tx_q->tx_skbuff_dma)
160962242260SChristophe Jaillet 			goto err_dma;
161071fedb01SJoao Pinto 
1611ec5e5ce1SJose Abreu 		tx_q->tx_skbuff = kcalloc(DMA_TX_SIZE,
1612ce736788SJoao Pinto 					  sizeof(struct sk_buff *),
161371fedb01SJoao Pinto 					  GFP_KERNEL);
1614ce736788SJoao Pinto 		if (!tx_q->tx_skbuff)
161562242260SChristophe Jaillet 			goto err_dma;
161671fedb01SJoao Pinto 
161771fedb01SJoao Pinto 		if (priv->extend_desc) {
1618750afb08SLuis Chamberlain 			tx_q->dma_etx = dma_alloc_coherent(priv->device,
1619750afb08SLuis Chamberlain 							   DMA_TX_SIZE * sizeof(struct dma_extended_desc),
1620ce736788SJoao Pinto 							   &tx_q->dma_tx_phy,
16215bacd778SLABBE Corentin 							   GFP_KERNEL);
1622ce736788SJoao Pinto 			if (!tx_q->dma_etx)
162362242260SChristophe Jaillet 				goto err_dma;
16245bacd778SLABBE Corentin 		} else {
1625750afb08SLuis Chamberlain 			tx_q->dma_tx = dma_alloc_coherent(priv->device,
1626750afb08SLuis Chamberlain 							  DMA_TX_SIZE * sizeof(struct dma_desc),
1627ce736788SJoao Pinto 							  &tx_q->dma_tx_phy,
16285bacd778SLABBE Corentin 							  GFP_KERNEL);
1629ce736788SJoao Pinto 			if (!tx_q->dma_tx)
163062242260SChristophe Jaillet 				goto err_dma;
1631ce736788SJoao Pinto 		}
16325bacd778SLABBE Corentin 	}
16335bacd778SLABBE Corentin 
16345bacd778SLABBE Corentin 	return 0;
16355bacd778SLABBE Corentin 
163662242260SChristophe Jaillet err_dma:
1637ce736788SJoao Pinto 	free_dma_tx_desc_resources(priv);
1638ce736788SJoao Pinto 
163909f8d696SSrinivas Kandagatla 	return ret;
16405bacd778SLABBE Corentin }
164109f8d696SSrinivas Kandagatla 
164271fedb01SJoao Pinto /**
164371fedb01SJoao Pinto  * alloc_dma_desc_resources - alloc TX/RX resources.
164471fedb01SJoao Pinto  * @priv: private structure
164571fedb01SJoao Pinto  * Description: according to which descriptor can be used (extend or basic)
164671fedb01SJoao Pinto  * this function allocates the resources for TX and RX paths. In case of
164771fedb01SJoao Pinto  * reception, for example, it pre-allocated the RX socket buffer in order to
164871fedb01SJoao Pinto  * allow zero-copy mechanism.
164971fedb01SJoao Pinto  */
165071fedb01SJoao Pinto static int alloc_dma_desc_resources(struct stmmac_priv *priv)
16515bacd778SLABBE Corentin {
165254139cf3SJoao Pinto 	/* RX Allocation */
165371fedb01SJoao Pinto 	int ret = alloc_dma_rx_desc_resources(priv);
165471fedb01SJoao Pinto 
165571fedb01SJoao Pinto 	if (ret)
165671fedb01SJoao Pinto 		return ret;
165771fedb01SJoao Pinto 
165871fedb01SJoao Pinto 	ret = alloc_dma_tx_desc_resources(priv);
165971fedb01SJoao Pinto 
166071fedb01SJoao Pinto 	return ret;
166171fedb01SJoao Pinto }
166271fedb01SJoao Pinto 
166371fedb01SJoao Pinto /**
166471fedb01SJoao Pinto  * free_dma_desc_resources - free dma desc resources
166571fedb01SJoao Pinto  * @priv: private structure
166671fedb01SJoao Pinto  */
166771fedb01SJoao Pinto static void free_dma_desc_resources(struct stmmac_priv *priv)
166871fedb01SJoao Pinto {
166971fedb01SJoao Pinto 	/* Release the DMA RX socket buffers */
167071fedb01SJoao Pinto 	free_dma_rx_desc_resources(priv);
167171fedb01SJoao Pinto 
167271fedb01SJoao Pinto 	/* Release the DMA TX socket buffers */
167371fedb01SJoao Pinto 	free_dma_tx_desc_resources(priv);
167471fedb01SJoao Pinto }
167571fedb01SJoao Pinto 
167671fedb01SJoao Pinto /**
16779eb12474Sjpinto  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
16789eb12474Sjpinto  *  @priv: driver private structure
16799eb12474Sjpinto  *  Description: It is used for enabling the rx queues in the MAC
16809eb12474Sjpinto  */
16819eb12474Sjpinto static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
16829eb12474Sjpinto {
16834f6046f5SJoao Pinto 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
16844f6046f5SJoao Pinto 	int queue;
16854f6046f5SJoao Pinto 	u8 mode;
16869eb12474Sjpinto 
16874f6046f5SJoao Pinto 	for (queue = 0; queue < rx_queues_count; queue++) {
16884f6046f5SJoao Pinto 		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
1689c10d4c82SJose Abreu 		stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
16904f6046f5SJoao Pinto 	}
16919eb12474Sjpinto }
16929eb12474Sjpinto 
16939eb12474Sjpinto /**
1694ae4f0d46SJoao Pinto  * stmmac_start_rx_dma - start RX DMA channel
1695ae4f0d46SJoao Pinto  * @priv: driver private structure
1696ae4f0d46SJoao Pinto  * @chan: RX channel index
1697ae4f0d46SJoao Pinto  * Description:
1698ae4f0d46SJoao Pinto  * This starts a RX DMA channel
1699ae4f0d46SJoao Pinto  */
1700ae4f0d46SJoao Pinto static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
1701ae4f0d46SJoao Pinto {
1702ae4f0d46SJoao Pinto 	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
1703a4e887faSJose Abreu 	stmmac_start_rx(priv, priv->ioaddr, chan);
1704ae4f0d46SJoao Pinto }
1705ae4f0d46SJoao Pinto 
1706ae4f0d46SJoao Pinto /**
1707ae4f0d46SJoao Pinto  * stmmac_start_tx_dma - start TX DMA channel
1708ae4f0d46SJoao Pinto  * @priv: driver private structure
1709ae4f0d46SJoao Pinto  * @chan: TX channel index
1710ae4f0d46SJoao Pinto  * Description:
1711ae4f0d46SJoao Pinto  * This starts a TX DMA channel
1712ae4f0d46SJoao Pinto  */
1713ae4f0d46SJoao Pinto static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
1714ae4f0d46SJoao Pinto {
1715ae4f0d46SJoao Pinto 	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
1716a4e887faSJose Abreu 	stmmac_start_tx(priv, priv->ioaddr, chan);
1717ae4f0d46SJoao Pinto }
1718ae4f0d46SJoao Pinto 
1719ae4f0d46SJoao Pinto /**
1720ae4f0d46SJoao Pinto  * stmmac_stop_rx_dma - stop RX DMA channel
1721ae4f0d46SJoao Pinto  * @priv: driver private structure
1722ae4f0d46SJoao Pinto  * @chan: RX channel index
1723ae4f0d46SJoao Pinto  * Description:
1724ae4f0d46SJoao Pinto  * This stops a RX DMA channel
1725ae4f0d46SJoao Pinto  */
1726ae4f0d46SJoao Pinto static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
1727ae4f0d46SJoao Pinto {
1728ae4f0d46SJoao Pinto 	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
1729a4e887faSJose Abreu 	stmmac_stop_rx(priv, priv->ioaddr, chan);
1730ae4f0d46SJoao Pinto }
1731ae4f0d46SJoao Pinto 
1732ae4f0d46SJoao Pinto /**
1733ae4f0d46SJoao Pinto  * stmmac_stop_tx_dma - stop TX DMA channel
1734ae4f0d46SJoao Pinto  * @priv: driver private structure
1735ae4f0d46SJoao Pinto  * @chan: TX channel index
1736ae4f0d46SJoao Pinto  * Description:
1737ae4f0d46SJoao Pinto  * This stops a TX DMA channel
1738ae4f0d46SJoao Pinto  */
1739ae4f0d46SJoao Pinto static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
1740ae4f0d46SJoao Pinto {
1741ae4f0d46SJoao Pinto 	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
1742a4e887faSJose Abreu 	stmmac_stop_tx(priv, priv->ioaddr, chan);
1743ae4f0d46SJoao Pinto }
1744ae4f0d46SJoao Pinto 
1745ae4f0d46SJoao Pinto /**
1746ae4f0d46SJoao Pinto  * stmmac_start_all_dma - start all RX and TX DMA channels
1747ae4f0d46SJoao Pinto  * @priv: driver private structure
1748ae4f0d46SJoao Pinto  * Description:
1749ae4f0d46SJoao Pinto  * This starts all the RX and TX DMA channels
1750ae4f0d46SJoao Pinto  */
1751ae4f0d46SJoao Pinto static void stmmac_start_all_dma(struct stmmac_priv *priv)
1752ae4f0d46SJoao Pinto {
1753ae4f0d46SJoao Pinto 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1754ae4f0d46SJoao Pinto 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1755ae4f0d46SJoao Pinto 	u32 chan = 0;
1756ae4f0d46SJoao Pinto 
1757ae4f0d46SJoao Pinto 	for (chan = 0; chan < rx_channels_count; chan++)
1758ae4f0d46SJoao Pinto 		stmmac_start_rx_dma(priv, chan);
1759ae4f0d46SJoao Pinto 
1760ae4f0d46SJoao Pinto 	for (chan = 0; chan < tx_channels_count; chan++)
1761ae4f0d46SJoao Pinto 		stmmac_start_tx_dma(priv, chan);
1762ae4f0d46SJoao Pinto }
1763ae4f0d46SJoao Pinto 
1764ae4f0d46SJoao Pinto /**
1765ae4f0d46SJoao Pinto  * stmmac_stop_all_dma - stop all RX and TX DMA channels
1766ae4f0d46SJoao Pinto  * @priv: driver private structure
1767ae4f0d46SJoao Pinto  * Description:
1768ae4f0d46SJoao Pinto  * This stops the RX and TX DMA channels
1769ae4f0d46SJoao Pinto  */
1770ae4f0d46SJoao Pinto static void stmmac_stop_all_dma(struct stmmac_priv *priv)
1771ae4f0d46SJoao Pinto {
1772ae4f0d46SJoao Pinto 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1773ae4f0d46SJoao Pinto 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1774ae4f0d46SJoao Pinto 	u32 chan = 0;
1775ae4f0d46SJoao Pinto 
1776ae4f0d46SJoao Pinto 	for (chan = 0; chan < rx_channels_count; chan++)
1777ae4f0d46SJoao Pinto 		stmmac_stop_rx_dma(priv, chan);
1778ae4f0d46SJoao Pinto 
1779ae4f0d46SJoao Pinto 	for (chan = 0; chan < tx_channels_count; chan++)
1780ae4f0d46SJoao Pinto 		stmmac_stop_tx_dma(priv, chan);
1781ae4f0d46SJoao Pinto }
1782ae4f0d46SJoao Pinto 
1783ae4f0d46SJoao Pinto /**
17847ac6653aSJeff Kirsher  *  stmmac_dma_operation_mode - HW DMA operation mode
178532ceabcaSGiuseppe CAVALLARO  *  @priv: driver private structure
1786732fdf0eSGiuseppe CAVALLARO  *  Description: it is used for configuring the DMA operation mode register in
1787732fdf0eSGiuseppe CAVALLARO  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
17887ac6653aSJeff Kirsher  */
17897ac6653aSJeff Kirsher static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
17907ac6653aSJeff Kirsher {
17916deee222SJoao Pinto 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
17926deee222SJoao Pinto 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1793f88203a2SVince Bridgers 	int rxfifosz = priv->plat->rx_fifo_size;
179452a76235SJose Abreu 	int txfifosz = priv->plat->tx_fifo_size;
17956deee222SJoao Pinto 	u32 txmode = 0;
17966deee222SJoao Pinto 	u32 rxmode = 0;
17976deee222SJoao Pinto 	u32 chan = 0;
1798a0daae13SJose Abreu 	u8 qmode = 0;
1799f88203a2SVince Bridgers 
180011fbf811SThierry Reding 	if (rxfifosz == 0)
180111fbf811SThierry Reding 		rxfifosz = priv->dma_cap.rx_fifo_size;
180252a76235SJose Abreu 	if (txfifosz == 0)
180352a76235SJose Abreu 		txfifosz = priv->dma_cap.tx_fifo_size;
180452a76235SJose Abreu 
180552a76235SJose Abreu 	/* Adjust for real per queue fifo size */
180652a76235SJose Abreu 	rxfifosz /= rx_channels_count;
180752a76235SJose Abreu 	txfifosz /= tx_channels_count;
180811fbf811SThierry Reding 
18096deee222SJoao Pinto 	if (priv->plat->force_thresh_dma_mode) {
18106deee222SJoao Pinto 		txmode = tc;
18116deee222SJoao Pinto 		rxmode = tc;
18126deee222SJoao Pinto 	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
18137ac6653aSJeff Kirsher 		/*
18147ac6653aSJeff Kirsher 		 * In case of GMAC, SF mode can be enabled
18157ac6653aSJeff Kirsher 		 * to perform the TX COE in HW. This depends on:
18167ac6653aSJeff Kirsher 		 * 1) TX COE if actually supported
18177ac6653aSJeff Kirsher 		 * 2) There is no bugged Jumbo frame support
18187ac6653aSJeff Kirsher 		 *    that needs to not insert csum in the TDES.
18197ac6653aSJeff Kirsher 		 */
18206deee222SJoao Pinto 		txmode = SF_DMA_MODE;
18216deee222SJoao Pinto 		rxmode = SF_DMA_MODE;
1822b2dec116SSonic Zhang 		priv->xstats.threshold = SF_DMA_MODE;
18236deee222SJoao Pinto 	} else {
18246deee222SJoao Pinto 		txmode = tc;
18256deee222SJoao Pinto 		rxmode = SF_DMA_MODE;
18266deee222SJoao Pinto 	}
18276deee222SJoao Pinto 
18286deee222SJoao Pinto 	/* configure all channels */
1829a0daae13SJose Abreu 	for (chan = 0; chan < rx_channels_count; chan++) {
1830a0daae13SJose Abreu 		qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
18316deee222SJoao Pinto 
1832a4e887faSJose Abreu 		stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
1833a0daae13SJose Abreu 				rxfifosz, qmode);
18344205c88eSJose Abreu 		stmmac_set_dma_bfsize(priv, priv->ioaddr, priv->dma_buf_sz,
18354205c88eSJose Abreu 				chan);
1836a0daae13SJose Abreu 	}
1837a0daae13SJose Abreu 
1838a0daae13SJose Abreu 	for (chan = 0; chan < tx_channels_count; chan++) {
1839a0daae13SJose Abreu 		qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
1840a0daae13SJose Abreu 
1841a4e887faSJose Abreu 		stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
1842a0daae13SJose Abreu 				txfifosz, qmode);
1843a0daae13SJose Abreu 	}
18447ac6653aSJeff Kirsher }
18457ac6653aSJeff Kirsher 
18467ac6653aSJeff Kirsher /**
1847732fdf0eSGiuseppe CAVALLARO  * stmmac_tx_clean - to manage the transmission completion
184832ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
1849ce736788SJoao Pinto  * @queue: TX queue index
1850732fdf0eSGiuseppe CAVALLARO  * Description: it reclaims the transmit resources after transmission completes.
18517ac6653aSJeff Kirsher  */
18528fce3331SJose Abreu static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
18537ac6653aSJeff Kirsher {
1854ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
185538979574SBeniamino Galvani 	unsigned int bytes_compl = 0, pkts_compl = 0;
18568fce3331SJose Abreu 	unsigned int entry, count = 0;
18577ac6653aSJeff Kirsher 
18588fce3331SJose Abreu 	__netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
1859a9097a96SGiuseppe CAVALLARO 
18609125cdd1SGiuseppe CAVALLARO 	priv->xstats.tx_clean++;
18619125cdd1SGiuseppe CAVALLARO 
18628d5f4b07SBernd Edlinger 	entry = tx_q->dirty_tx;
18638fce3331SJose Abreu 	while ((entry != tx_q->cur_tx) && (count < budget)) {
1864ce736788SJoao Pinto 		struct sk_buff *skb = tx_q->tx_skbuff[entry];
1865c24602efSGiuseppe CAVALLARO 		struct dma_desc *p;
1866c363b658SFabrice Gasnier 		int status;
1867c24602efSGiuseppe CAVALLARO 
1868c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
1869ce736788SJoao Pinto 			p = (struct dma_desc *)(tx_q->dma_etx + entry);
1870c24602efSGiuseppe CAVALLARO 		else
1871ce736788SJoao Pinto 			p = tx_q->dma_tx + entry;
18727ac6653aSJeff Kirsher 
187342de047dSJose Abreu 		status = stmmac_tx_status(priv, &priv->dev->stats,
187442de047dSJose Abreu 				&priv->xstats, p, priv->ioaddr);
1875c363b658SFabrice Gasnier 		/* Check if the descriptor is owned by the DMA */
1876c363b658SFabrice Gasnier 		if (unlikely(status & tx_dma_own))
1877c363b658SFabrice Gasnier 			break;
1878c363b658SFabrice Gasnier 
18798fce3331SJose Abreu 		count++;
18808fce3331SJose Abreu 
1881a6b25da5SNiklas Cassel 		/* Make sure descriptor fields are read after reading
1882a6b25da5SNiklas Cassel 		 * the own bit.
1883a6b25da5SNiklas Cassel 		 */
1884a6b25da5SNiklas Cassel 		dma_rmb();
1885a6b25da5SNiklas Cassel 
1886c363b658SFabrice Gasnier 		/* Just consider the last segment and ...*/
1887c363b658SFabrice Gasnier 		if (likely(!(status & tx_not_ls))) {
1888c363b658SFabrice Gasnier 			/* ... verify the status error condition */
1889c363b658SFabrice Gasnier 			if (unlikely(status & tx_err)) {
1890c363b658SFabrice Gasnier 				priv->dev->stats.tx_errors++;
1891c363b658SFabrice Gasnier 			} else {
18927ac6653aSJeff Kirsher 				priv->dev->stats.tx_packets++;
18937ac6653aSJeff Kirsher 				priv->xstats.tx_pkt_n++;
1894c363b658SFabrice Gasnier 			}
1895ba1ffd74SGiuseppe CAVALLARO 			stmmac_get_tx_hwtstamp(priv, p, skb);
18967ac6653aSJeff Kirsher 		}
18977ac6653aSJeff Kirsher 
1898ce736788SJoao Pinto 		if (likely(tx_q->tx_skbuff_dma[entry].buf)) {
1899ce736788SJoao Pinto 			if (tx_q->tx_skbuff_dma[entry].map_as_page)
1900362b37beSGiuseppe CAVALLARO 				dma_unmap_page(priv->device,
1901ce736788SJoao Pinto 					       tx_q->tx_skbuff_dma[entry].buf,
1902ce736788SJoao Pinto 					       tx_q->tx_skbuff_dma[entry].len,
19037ac6653aSJeff Kirsher 					       DMA_TO_DEVICE);
1904362b37beSGiuseppe CAVALLARO 			else
1905362b37beSGiuseppe CAVALLARO 				dma_unmap_single(priv->device,
1906ce736788SJoao Pinto 						 tx_q->tx_skbuff_dma[entry].buf,
1907ce736788SJoao Pinto 						 tx_q->tx_skbuff_dma[entry].len,
1908362b37beSGiuseppe CAVALLARO 						 DMA_TO_DEVICE);
1909ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[entry].buf = 0;
1910ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[entry].len = 0;
1911ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[entry].map_as_page = false;
1912cf32deecSRayagond Kokatanur 		}
1913f748be53SAlexandre TORGUE 
19142c520b1cSJose Abreu 		stmmac_clean_desc3(priv, tx_q, p);
1915f748be53SAlexandre TORGUE 
1916ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].last_segment = false;
1917ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
19187ac6653aSJeff Kirsher 
19197ac6653aSJeff Kirsher 		if (likely(skb != NULL)) {
192038979574SBeniamino Galvani 			pkts_compl++;
192138979574SBeniamino Galvani 			bytes_compl += skb->len;
19227c565c33SEric W. Biederman 			dev_consume_skb_any(skb);
1923ce736788SJoao Pinto 			tx_q->tx_skbuff[entry] = NULL;
19247ac6653aSJeff Kirsher 		}
19257ac6653aSJeff Kirsher 
192642de047dSJose Abreu 		stmmac_release_tx_desc(priv, p, priv->mode);
19277ac6653aSJeff Kirsher 
1928e3ad57c9SGiuseppe Cavallaro 		entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
19297ac6653aSJeff Kirsher 	}
1930ce736788SJoao Pinto 	tx_q->dirty_tx = entry;
193138979574SBeniamino Galvani 
1932c22a3f48SJoao Pinto 	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
1933c22a3f48SJoao Pinto 				  pkts_compl, bytes_compl);
193438979574SBeniamino Galvani 
1935c22a3f48SJoao Pinto 	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
1936c22a3f48SJoao Pinto 								queue))) &&
1937c22a3f48SJoao Pinto 	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) {
1938c22a3f48SJoao Pinto 
1939b3e51069SLABBE Corentin 		netif_dbg(priv, tx_done, priv->dev,
1940b3e51069SLABBE Corentin 			  "%s: restart transmit\n", __func__);
1941c22a3f48SJoao Pinto 		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
19427ac6653aSJeff Kirsher 	}
1943d765955dSGiuseppe CAVALLARO 
1944d765955dSGiuseppe CAVALLARO 	if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
1945d765955dSGiuseppe CAVALLARO 		stmmac_enable_eee_mode(priv);
1946f5351ef7SGiuseppe CAVALLARO 		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
1947d765955dSGiuseppe CAVALLARO 	}
19488fce3331SJose Abreu 
19494ccb4585SJose Abreu 	/* We still have pending packets, let's call for a new scheduling */
19504ccb4585SJose Abreu 	if (tx_q->dirty_tx != tx_q->cur_tx)
19514ccb4585SJose Abreu 		mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(10));
19524ccb4585SJose Abreu 
19538fce3331SJose Abreu 	__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
19548fce3331SJose Abreu 
19558fce3331SJose Abreu 	return count;
19567ac6653aSJeff Kirsher }
19577ac6653aSJeff Kirsher 
19587ac6653aSJeff Kirsher /**
1959732fdf0eSGiuseppe CAVALLARO  * stmmac_tx_err - to manage the tx error
196032ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
19615bacd778SLABBE Corentin  * @chan: channel index
19627ac6653aSJeff Kirsher  * Description: it cleans the descriptors and restarts the transmission
1963732fdf0eSGiuseppe CAVALLARO  * in case of transmission errors.
19647ac6653aSJeff Kirsher  */
19655bacd778SLABBE Corentin static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
19667ac6653aSJeff Kirsher {
1967ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
1968c24602efSGiuseppe CAVALLARO 	int i;
1969ce736788SJoao Pinto 
1970c22a3f48SJoao Pinto 	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
19717ac6653aSJeff Kirsher 
1972ae4f0d46SJoao Pinto 	stmmac_stop_tx_dma(priv, chan);
1973ce736788SJoao Pinto 	dma_free_tx_skbufs(priv, chan);
1974e3ad57c9SGiuseppe Cavallaro 	for (i = 0; i < DMA_TX_SIZE; i++)
1975c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
197642de047dSJose Abreu 			stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
197742de047dSJose Abreu 					priv->mode, (i == DMA_TX_SIZE - 1));
1978c24602efSGiuseppe CAVALLARO 		else
197942de047dSJose Abreu 			stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
198042de047dSJose Abreu 					priv->mode, (i == DMA_TX_SIZE - 1));
1981ce736788SJoao Pinto 	tx_q->dirty_tx = 0;
1982ce736788SJoao Pinto 	tx_q->cur_tx = 0;
19838d212a9eSNiklas Cassel 	tx_q->mss = 0;
1984c22a3f48SJoao Pinto 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
1985ae4f0d46SJoao Pinto 	stmmac_start_tx_dma(priv, chan);
19867ac6653aSJeff Kirsher 
19877ac6653aSJeff Kirsher 	priv->dev->stats.tx_errors++;
1988c22a3f48SJoao Pinto 	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
19897ac6653aSJeff Kirsher }
19907ac6653aSJeff Kirsher 
199132ceabcaSGiuseppe CAVALLARO /**
19926deee222SJoao Pinto  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
19936deee222SJoao Pinto  *  @priv: driver private structure
19946deee222SJoao Pinto  *  @txmode: TX operating mode
19956deee222SJoao Pinto  *  @rxmode: RX operating mode
19966deee222SJoao Pinto  *  @chan: channel index
19976deee222SJoao Pinto  *  Description: it is used for configuring of the DMA operation mode in
19986deee222SJoao Pinto  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
19996deee222SJoao Pinto  *  mode.
20006deee222SJoao Pinto  */
20016deee222SJoao Pinto static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
20026deee222SJoao Pinto 					  u32 rxmode, u32 chan)
20036deee222SJoao Pinto {
2004a0daae13SJose Abreu 	u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2005a0daae13SJose Abreu 	u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
200652a76235SJose Abreu 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
200752a76235SJose Abreu 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
20086deee222SJoao Pinto 	int rxfifosz = priv->plat->rx_fifo_size;
200952a76235SJose Abreu 	int txfifosz = priv->plat->tx_fifo_size;
20106deee222SJoao Pinto 
20116deee222SJoao Pinto 	if (rxfifosz == 0)
20126deee222SJoao Pinto 		rxfifosz = priv->dma_cap.rx_fifo_size;
201352a76235SJose Abreu 	if (txfifosz == 0)
201452a76235SJose Abreu 		txfifosz = priv->dma_cap.tx_fifo_size;
201552a76235SJose Abreu 
201652a76235SJose Abreu 	/* Adjust for real per queue fifo size */
201752a76235SJose Abreu 	rxfifosz /= rx_channels_count;
201852a76235SJose Abreu 	txfifosz /= tx_channels_count;
20196deee222SJoao Pinto 
2020ab0204e3SJose Abreu 	stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2021ab0204e3SJose Abreu 	stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
20226deee222SJoao Pinto }
20236deee222SJoao Pinto 
20248bf993a5SJose Abreu static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
20258bf993a5SJose Abreu {
202663a550fcSJose Abreu 	int ret;
20278bf993a5SJose Abreu 
2028c10d4c82SJose Abreu 	ret = stmmac_safety_feat_irq_status(priv, priv->dev,
20298bf993a5SJose Abreu 			priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2030c10d4c82SJose Abreu 	if (ret && (ret != -EINVAL)) {
20318bf993a5SJose Abreu 		stmmac_global_err(priv);
2032c10d4c82SJose Abreu 		return true;
2033c10d4c82SJose Abreu 	}
2034c10d4c82SJose Abreu 
2035c10d4c82SJose Abreu 	return false;
20368bf993a5SJose Abreu }
20378bf993a5SJose Abreu 
20388fce3331SJose Abreu static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan)
20398fce3331SJose Abreu {
20408fce3331SJose Abreu 	int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
20418fce3331SJose Abreu 						 &priv->xstats, chan);
20428fce3331SJose Abreu 	struct stmmac_channel *ch = &priv->channel[chan];
20438fce3331SJose Abreu 
20444ccb4585SJose Abreu 	if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
20453ba07debSJose Abreu 		if (napi_schedule_prep(&ch->rx_napi)) {
20468fce3331SJose Abreu 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
20473ba07debSJose Abreu 			__napi_schedule_irqoff(&ch->rx_napi);
20483ba07debSJose Abreu 			status |= handle_tx;
20493ba07debSJose Abreu 		}
20504ccb4585SJose Abreu 	}
20514ccb4585SJose Abreu 
2052a66b5884SJose Abreu 	if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use))
20534ccb4585SJose Abreu 		napi_schedule_irqoff(&ch->tx_napi);
20548fce3331SJose Abreu 
20558fce3331SJose Abreu 	return status;
20568fce3331SJose Abreu }
20578fce3331SJose Abreu 
20586deee222SJoao Pinto /**
2059732fdf0eSGiuseppe CAVALLARO  * stmmac_dma_interrupt - DMA ISR
206032ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
206132ceabcaSGiuseppe CAVALLARO  * Description: this is the DMA ISR. It is called by the main ISR.
2062732fdf0eSGiuseppe CAVALLARO  * It calls the dwmac dma routine and schedule poll method in case of some
2063732fdf0eSGiuseppe CAVALLARO  * work can be done.
206432ceabcaSGiuseppe CAVALLARO  */
20657ac6653aSJeff Kirsher static void stmmac_dma_interrupt(struct stmmac_priv *priv)
20667ac6653aSJeff Kirsher {
2067d62a107aSJoao Pinto 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
20685a6a0445SNiklas Cassel 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
20695a6a0445SNiklas Cassel 	u32 channels_to_check = tx_channel_count > rx_channel_count ?
20705a6a0445SNiklas Cassel 				tx_channel_count : rx_channel_count;
2071d62a107aSJoao Pinto 	u32 chan;
20728ac60ffbSKees Cook 	int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
20738ac60ffbSKees Cook 
20748ac60ffbSKees Cook 	/* Make sure we never check beyond our status buffer. */
20758ac60ffbSKees Cook 	if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
20768ac60ffbSKees Cook 		channels_to_check = ARRAY_SIZE(status);
207768e5cfafSJoao Pinto 
20785a6a0445SNiklas Cassel 	for (chan = 0; chan < channels_to_check; chan++)
20798fce3331SJose Abreu 		status[chan] = stmmac_napi_check(priv, chan);
2080d62a107aSJoao Pinto 
20815a6a0445SNiklas Cassel 	for (chan = 0; chan < tx_channel_count; chan++) {
20825a6a0445SNiklas Cassel 		if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
20837ac6653aSJeff Kirsher 			/* Try to bump up the dma threshold on this failure */
2084b2dec116SSonic Zhang 			if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
2085b2dec116SSonic Zhang 			    (tc <= 256)) {
20867ac6653aSJeff Kirsher 				tc += 64;
2087c405abe2SSonic Zhang 				if (priv->plat->force_thresh_dma_mode)
2088d62a107aSJoao Pinto 					stmmac_set_dma_operation_mode(priv,
2089d62a107aSJoao Pinto 								      tc,
2090d62a107aSJoao Pinto 								      tc,
2091d62a107aSJoao Pinto 								      chan);
2092c405abe2SSonic Zhang 				else
2093d62a107aSJoao Pinto 					stmmac_set_dma_operation_mode(priv,
2094d62a107aSJoao Pinto 								    tc,
2095d62a107aSJoao Pinto 								    SF_DMA_MODE,
2096d62a107aSJoao Pinto 								    chan);
20977ac6653aSJeff Kirsher 				priv->xstats.threshold = tc;
20987ac6653aSJeff Kirsher 			}
20995a6a0445SNiklas Cassel 		} else if (unlikely(status[chan] == tx_hard_error)) {
21004e593262SJoao Pinto 			stmmac_tx_err(priv, chan);
21017ac6653aSJeff Kirsher 		}
2102d62a107aSJoao Pinto 	}
2103d62a107aSJoao Pinto }
21047ac6653aSJeff Kirsher 
210532ceabcaSGiuseppe CAVALLARO /**
210632ceabcaSGiuseppe CAVALLARO  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
210732ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
210832ceabcaSGiuseppe CAVALLARO  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
210932ceabcaSGiuseppe CAVALLARO  */
21101c901a46SGiuseppe CAVALLARO static void stmmac_mmc_setup(struct stmmac_priv *priv)
21111c901a46SGiuseppe CAVALLARO {
21121c901a46SGiuseppe CAVALLARO 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
21131c901a46SGiuseppe CAVALLARO 			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
21141c901a46SGiuseppe CAVALLARO 
21153b1dd2c5SJose Abreu 	stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
21164f795b25SGiuseppe CAVALLARO 
21174f795b25SGiuseppe CAVALLARO 	if (priv->dma_cap.rmon) {
21183b1dd2c5SJose Abreu 		stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
21191c901a46SGiuseppe CAVALLARO 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
21204f795b25SGiuseppe CAVALLARO 	} else
212138ddc59dSLABBE Corentin 		netdev_info(priv->dev, "No MAC Management Counters available\n");
21221c901a46SGiuseppe CAVALLARO }
21231c901a46SGiuseppe CAVALLARO 
2124732fdf0eSGiuseppe CAVALLARO /**
2125732fdf0eSGiuseppe CAVALLARO  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
212632ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
212719e30c14SGiuseppe CAVALLARO  * Description:
212819e30c14SGiuseppe CAVALLARO  *  new GMAC chip generations have a new register to indicate the
2129e7434821SGiuseppe CAVALLARO  *  presence of the optional feature/functions.
213019e30c14SGiuseppe CAVALLARO  *  This can be also used to override the value passed through the
213119e30c14SGiuseppe CAVALLARO  *  platform and necessary for old MAC10/100 and GMAC chips.
2132e7434821SGiuseppe CAVALLARO  */
2133e7434821SGiuseppe CAVALLARO static int stmmac_get_hw_features(struct stmmac_priv *priv)
2134e7434821SGiuseppe CAVALLARO {
2135a4e887faSJose Abreu 	return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2136e7434821SGiuseppe CAVALLARO }
2137e7434821SGiuseppe CAVALLARO 
213832ceabcaSGiuseppe CAVALLARO /**
2139732fdf0eSGiuseppe CAVALLARO  * stmmac_check_ether_addr - check if the MAC addr is valid
214032ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
214132ceabcaSGiuseppe CAVALLARO  * Description:
214232ceabcaSGiuseppe CAVALLARO  * it is to verify if the MAC address is valid, in case of failures it
214332ceabcaSGiuseppe CAVALLARO  * generates a random MAC address
214432ceabcaSGiuseppe CAVALLARO  */
2145bfab27a1SGiuseppe CAVALLARO static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2146bfab27a1SGiuseppe CAVALLARO {
2147bfab27a1SGiuseppe CAVALLARO 	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2148c10d4c82SJose Abreu 		stmmac_get_umac_addr(priv, priv->hw, priv->dev->dev_addr, 0);
2149bfab27a1SGiuseppe CAVALLARO 		if (!is_valid_ether_addr(priv->dev->dev_addr))
2150f2cedb63SDanny Kukawka 			eth_hw_addr_random(priv->dev);
2151af649352SJisheng Zhang 		dev_info(priv->device, "device MAC address %pM\n",
2152bfab27a1SGiuseppe CAVALLARO 			 priv->dev->dev_addr);
2153bfab27a1SGiuseppe CAVALLARO 	}
2154c88460b7SHans de Goede }
2155bfab27a1SGiuseppe CAVALLARO 
215632ceabcaSGiuseppe CAVALLARO /**
2157732fdf0eSGiuseppe CAVALLARO  * stmmac_init_dma_engine - DMA init.
215832ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
215932ceabcaSGiuseppe CAVALLARO  * Description:
216032ceabcaSGiuseppe CAVALLARO  * It inits the DMA invoking the specific MAC/GMAC callback.
216132ceabcaSGiuseppe CAVALLARO  * Some DMA parameters can be passed from the platform;
216232ceabcaSGiuseppe CAVALLARO  * in case of these are not passed a default is kept for the MAC or GMAC.
216332ceabcaSGiuseppe CAVALLARO  */
21640f1f88a8SGiuseppe CAVALLARO static int stmmac_init_dma_engine(struct stmmac_priv *priv)
21650f1f88a8SGiuseppe CAVALLARO {
216647f2a9ceSJoao Pinto 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
216747f2a9ceSJoao Pinto 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
216824aaed0cSJose Abreu 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
216954139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q;
2170ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q;
217147f2a9ceSJoao Pinto 	u32 chan = 0;
2172c24602efSGiuseppe CAVALLARO 	int atds = 0;
2173495db273SGiuseppe Cavallaro 	int ret = 0;
21740f1f88a8SGiuseppe CAVALLARO 
2175a332e2faSNiklas Cassel 	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2176a332e2faSNiklas Cassel 		dev_err(priv->device, "Invalid DMA configuration\n");
217789ab75bfSNiklas Cassel 		return -EINVAL;
21780f1f88a8SGiuseppe CAVALLARO 	}
21790f1f88a8SGiuseppe CAVALLARO 
2180c24602efSGiuseppe CAVALLARO 	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2181c24602efSGiuseppe CAVALLARO 		atds = 1;
2182c24602efSGiuseppe CAVALLARO 
2183a4e887faSJose Abreu 	ret = stmmac_reset(priv, priv->ioaddr);
2184495db273SGiuseppe Cavallaro 	if (ret) {
2185495db273SGiuseppe Cavallaro 		dev_err(priv->device, "Failed to reset the dma\n");
2186495db273SGiuseppe Cavallaro 		return ret;
2187495db273SGiuseppe Cavallaro 	}
2188495db273SGiuseppe Cavallaro 
21897d9e6c5aSJose Abreu 	/* DMA Configuration */
21907d9e6c5aSJose Abreu 	stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
21917d9e6c5aSJose Abreu 
21927d9e6c5aSJose Abreu 	if (priv->plat->axi)
21937d9e6c5aSJose Abreu 		stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
21947d9e6c5aSJose Abreu 
2195af8f3fb7SWeifeng Voon 	/* DMA CSR Channel configuration */
2196af8f3fb7SWeifeng Voon 	for (chan = 0; chan < dma_csr_ch; chan++)
2197af8f3fb7SWeifeng Voon 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
2198af8f3fb7SWeifeng Voon 
219947f2a9ceSJoao Pinto 	/* DMA RX Channel Configuration */
220047f2a9ceSJoao Pinto 	for (chan = 0; chan < rx_channels_count; chan++) {
220154139cf3SJoao Pinto 		rx_q = &priv->rx_queue[chan];
220254139cf3SJoao Pinto 
220324aaed0cSJose Abreu 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
220424aaed0cSJose Abreu 				    rx_q->dma_rx_phy, chan);
220547f2a9ceSJoao Pinto 
220654139cf3SJoao Pinto 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2207f748be53SAlexandre TORGUE 			    (DMA_RX_SIZE * sizeof(struct dma_desc));
2208a4e887faSJose Abreu 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
2209a4e887faSJose Abreu 				       rx_q->rx_tail_addr, chan);
221047f2a9ceSJoao Pinto 	}
221147f2a9ceSJoao Pinto 
221247f2a9ceSJoao Pinto 	/* DMA TX Channel Configuration */
221347f2a9ceSJoao Pinto 	for (chan = 0; chan < tx_channels_count; chan++) {
2214ce736788SJoao Pinto 		tx_q = &priv->tx_queue[chan];
2215ce736788SJoao Pinto 
221624aaed0cSJose Abreu 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
221724aaed0cSJose Abreu 				    tx_q->dma_tx_phy, chan);
2218f748be53SAlexandre TORGUE 
22190431100bSJose Abreu 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
2220a4e887faSJose Abreu 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
2221a4e887faSJose Abreu 				       tx_q->tx_tail_addr, chan);
222247f2a9ceSJoao Pinto 	}
222324aaed0cSJose Abreu 
2224495db273SGiuseppe Cavallaro 	return ret;
22250f1f88a8SGiuseppe CAVALLARO }
22260f1f88a8SGiuseppe CAVALLARO 
22278fce3331SJose Abreu static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
22288fce3331SJose Abreu {
22298fce3331SJose Abreu 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
22308fce3331SJose Abreu 
22318fce3331SJose Abreu 	mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer));
22328fce3331SJose Abreu }
22338fce3331SJose Abreu 
2234bfab27a1SGiuseppe CAVALLARO /**
2235732fdf0eSGiuseppe CAVALLARO  * stmmac_tx_timer - mitigation sw timer for tx.
22369125cdd1SGiuseppe CAVALLARO  * @data: data pointer
22379125cdd1SGiuseppe CAVALLARO  * Description:
22389125cdd1SGiuseppe CAVALLARO  * This is the timer handler to directly invoke the stmmac_tx_clean.
22399125cdd1SGiuseppe CAVALLARO  */
2240e99e88a9SKees Cook static void stmmac_tx_timer(struct timer_list *t)
22419125cdd1SGiuseppe CAVALLARO {
22428fce3331SJose Abreu 	struct stmmac_tx_queue *tx_q = from_timer(tx_q, t, txtimer);
22438fce3331SJose Abreu 	struct stmmac_priv *priv = tx_q->priv_data;
22448fce3331SJose Abreu 	struct stmmac_channel *ch;
22459125cdd1SGiuseppe CAVALLARO 
22468fce3331SJose Abreu 	ch = &priv->channel[tx_q->queue_index];
22478fce3331SJose Abreu 
22484ccb4585SJose Abreu 	/*
22494ccb4585SJose Abreu 	 * If NAPI is already running we can miss some events. Let's rearm
22504ccb4585SJose Abreu 	 * the timer and try again.
22514ccb4585SJose Abreu 	 */
22524ccb4585SJose Abreu 	if (likely(napi_schedule_prep(&ch->tx_napi)))
22534ccb4585SJose Abreu 		__napi_schedule(&ch->tx_napi);
22544ccb4585SJose Abreu 	else
22554ccb4585SJose Abreu 		mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(10));
22569125cdd1SGiuseppe CAVALLARO }
22579125cdd1SGiuseppe CAVALLARO 
22589125cdd1SGiuseppe CAVALLARO /**
2259d429b66eSJose Abreu  * stmmac_init_coalesce - init mitigation options.
226032ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
22619125cdd1SGiuseppe CAVALLARO  * Description:
2262d429b66eSJose Abreu  * This inits the coalesce parameters: i.e. timer rate,
22639125cdd1SGiuseppe CAVALLARO  * timer handler and default threshold used for enabling the
22649125cdd1SGiuseppe CAVALLARO  * interrupt on completion bit.
22659125cdd1SGiuseppe CAVALLARO  */
2266d429b66eSJose Abreu static void stmmac_init_coalesce(struct stmmac_priv *priv)
22679125cdd1SGiuseppe CAVALLARO {
22688fce3331SJose Abreu 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
22698fce3331SJose Abreu 	u32 chan;
22708fce3331SJose Abreu 
22719125cdd1SGiuseppe CAVALLARO 	priv->tx_coal_frames = STMMAC_TX_FRAMES;
22729125cdd1SGiuseppe CAVALLARO 	priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
2273d429b66eSJose Abreu 	priv->rx_coal_frames = STMMAC_RX_FRAMES;
22748fce3331SJose Abreu 
22758fce3331SJose Abreu 	for (chan = 0; chan < tx_channel_count; chan++) {
22768fce3331SJose Abreu 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
22778fce3331SJose Abreu 
22788fce3331SJose Abreu 		timer_setup(&tx_q->txtimer, stmmac_tx_timer, 0);
22798fce3331SJose Abreu 	}
22809125cdd1SGiuseppe CAVALLARO }
22819125cdd1SGiuseppe CAVALLARO 
22824854ab99SJoao Pinto static void stmmac_set_rings_length(struct stmmac_priv *priv)
22834854ab99SJoao Pinto {
22844854ab99SJoao Pinto 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
22854854ab99SJoao Pinto 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
22864854ab99SJoao Pinto 	u32 chan;
22874854ab99SJoao Pinto 
22884854ab99SJoao Pinto 	/* set TX ring length */
22894854ab99SJoao Pinto 	for (chan = 0; chan < tx_channels_count; chan++)
2290a4e887faSJose Abreu 		stmmac_set_tx_ring_len(priv, priv->ioaddr,
22914854ab99SJoao Pinto 				(DMA_TX_SIZE - 1), chan);
22924854ab99SJoao Pinto 
22934854ab99SJoao Pinto 	/* set RX ring length */
22944854ab99SJoao Pinto 	for (chan = 0; chan < rx_channels_count; chan++)
2295a4e887faSJose Abreu 		stmmac_set_rx_ring_len(priv, priv->ioaddr,
22964854ab99SJoao Pinto 				(DMA_RX_SIZE - 1), chan);
22974854ab99SJoao Pinto }
22984854ab99SJoao Pinto 
22999125cdd1SGiuseppe CAVALLARO /**
23006a3a7193SJoao Pinto  *  stmmac_set_tx_queue_weight - Set TX queue weight
23016a3a7193SJoao Pinto  *  @priv: driver private structure
23026a3a7193SJoao Pinto  *  Description: It is used for setting TX queues weight
23036a3a7193SJoao Pinto  */
23046a3a7193SJoao Pinto static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
23056a3a7193SJoao Pinto {
23066a3a7193SJoao Pinto 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
23076a3a7193SJoao Pinto 	u32 weight;
23086a3a7193SJoao Pinto 	u32 queue;
23096a3a7193SJoao Pinto 
23106a3a7193SJoao Pinto 	for (queue = 0; queue < tx_queues_count; queue++) {
23116a3a7193SJoao Pinto 		weight = priv->plat->tx_queues_cfg[queue].weight;
2312c10d4c82SJose Abreu 		stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
23136a3a7193SJoao Pinto 	}
23146a3a7193SJoao Pinto }
23156a3a7193SJoao Pinto 
23166a3a7193SJoao Pinto /**
231719d91873SJoao Pinto  *  stmmac_configure_cbs - Configure CBS in TX queue
231819d91873SJoao Pinto  *  @priv: driver private structure
231919d91873SJoao Pinto  *  Description: It is used for configuring CBS in AVB TX queues
232019d91873SJoao Pinto  */
232119d91873SJoao Pinto static void stmmac_configure_cbs(struct stmmac_priv *priv)
232219d91873SJoao Pinto {
232319d91873SJoao Pinto 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
232419d91873SJoao Pinto 	u32 mode_to_use;
232519d91873SJoao Pinto 	u32 queue;
232619d91873SJoao Pinto 
232744781fefSJoao Pinto 	/* queue 0 is reserved for legacy traffic */
232844781fefSJoao Pinto 	for (queue = 1; queue < tx_queues_count; queue++) {
232919d91873SJoao Pinto 		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
233019d91873SJoao Pinto 		if (mode_to_use == MTL_QUEUE_DCB)
233119d91873SJoao Pinto 			continue;
233219d91873SJoao Pinto 
2333c10d4c82SJose Abreu 		stmmac_config_cbs(priv, priv->hw,
233419d91873SJoao Pinto 				priv->plat->tx_queues_cfg[queue].send_slope,
233519d91873SJoao Pinto 				priv->plat->tx_queues_cfg[queue].idle_slope,
233619d91873SJoao Pinto 				priv->plat->tx_queues_cfg[queue].high_credit,
233719d91873SJoao Pinto 				priv->plat->tx_queues_cfg[queue].low_credit,
233819d91873SJoao Pinto 				queue);
233919d91873SJoao Pinto 	}
234019d91873SJoao Pinto }
234119d91873SJoao Pinto 
234219d91873SJoao Pinto /**
2343d43042f4SJoao Pinto  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
2344d43042f4SJoao Pinto  *  @priv: driver private structure
2345d43042f4SJoao Pinto  *  Description: It is used for mapping RX queues to RX dma channels
2346d43042f4SJoao Pinto  */
2347d43042f4SJoao Pinto static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
2348d43042f4SJoao Pinto {
2349d43042f4SJoao Pinto 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2350d43042f4SJoao Pinto 	u32 queue;
2351d43042f4SJoao Pinto 	u32 chan;
2352d43042f4SJoao Pinto 
2353d43042f4SJoao Pinto 	for (queue = 0; queue < rx_queues_count; queue++) {
2354d43042f4SJoao Pinto 		chan = priv->plat->rx_queues_cfg[queue].chan;
2355c10d4c82SJose Abreu 		stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
2356d43042f4SJoao Pinto 	}
2357d43042f4SJoao Pinto }
2358d43042f4SJoao Pinto 
2359d43042f4SJoao Pinto /**
2360a8f5102aSJoao Pinto  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
2361a8f5102aSJoao Pinto  *  @priv: driver private structure
2362a8f5102aSJoao Pinto  *  Description: It is used for configuring the RX Queue Priority
2363a8f5102aSJoao Pinto  */
2364a8f5102aSJoao Pinto static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
2365a8f5102aSJoao Pinto {
2366a8f5102aSJoao Pinto 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2367a8f5102aSJoao Pinto 	u32 queue;
2368a8f5102aSJoao Pinto 	u32 prio;
2369a8f5102aSJoao Pinto 
2370a8f5102aSJoao Pinto 	for (queue = 0; queue < rx_queues_count; queue++) {
2371a8f5102aSJoao Pinto 		if (!priv->plat->rx_queues_cfg[queue].use_prio)
2372a8f5102aSJoao Pinto 			continue;
2373a8f5102aSJoao Pinto 
2374a8f5102aSJoao Pinto 		prio = priv->plat->rx_queues_cfg[queue].prio;
2375c10d4c82SJose Abreu 		stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
2376a8f5102aSJoao Pinto 	}
2377a8f5102aSJoao Pinto }
2378a8f5102aSJoao Pinto 
2379a8f5102aSJoao Pinto /**
2380a8f5102aSJoao Pinto  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
2381a8f5102aSJoao Pinto  *  @priv: driver private structure
2382a8f5102aSJoao Pinto  *  Description: It is used for configuring the TX Queue Priority
2383a8f5102aSJoao Pinto  */
2384a8f5102aSJoao Pinto static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
2385a8f5102aSJoao Pinto {
2386a8f5102aSJoao Pinto 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2387a8f5102aSJoao Pinto 	u32 queue;
2388a8f5102aSJoao Pinto 	u32 prio;
2389a8f5102aSJoao Pinto 
2390a8f5102aSJoao Pinto 	for (queue = 0; queue < tx_queues_count; queue++) {
2391a8f5102aSJoao Pinto 		if (!priv->plat->tx_queues_cfg[queue].use_prio)
2392a8f5102aSJoao Pinto 			continue;
2393a8f5102aSJoao Pinto 
2394a8f5102aSJoao Pinto 		prio = priv->plat->tx_queues_cfg[queue].prio;
2395c10d4c82SJose Abreu 		stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
2396a8f5102aSJoao Pinto 	}
2397a8f5102aSJoao Pinto }
2398a8f5102aSJoao Pinto 
2399a8f5102aSJoao Pinto /**
2400abe80fdcSJoao Pinto  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
2401abe80fdcSJoao Pinto  *  @priv: driver private structure
2402abe80fdcSJoao Pinto  *  Description: It is used for configuring the RX queue routing
2403abe80fdcSJoao Pinto  */
2404abe80fdcSJoao Pinto static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
2405abe80fdcSJoao Pinto {
2406abe80fdcSJoao Pinto 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2407abe80fdcSJoao Pinto 	u32 queue;
2408abe80fdcSJoao Pinto 	u8 packet;
2409abe80fdcSJoao Pinto 
2410abe80fdcSJoao Pinto 	for (queue = 0; queue < rx_queues_count; queue++) {
2411abe80fdcSJoao Pinto 		/* no specific packet type routing specified for the queue */
2412abe80fdcSJoao Pinto 		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
2413abe80fdcSJoao Pinto 			continue;
2414abe80fdcSJoao Pinto 
2415abe80fdcSJoao Pinto 		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
2416c10d4c82SJose Abreu 		stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
2417abe80fdcSJoao Pinto 	}
2418abe80fdcSJoao Pinto }
2419abe80fdcSJoao Pinto 
2420abe80fdcSJoao Pinto /**
2421d0a9c9f9SJoao Pinto  *  stmmac_mtl_configuration - Configure MTL
2422d0a9c9f9SJoao Pinto  *  @priv: driver private structure
2423d0a9c9f9SJoao Pinto  *  Description: It is used for configurring MTL
2424d0a9c9f9SJoao Pinto  */
2425d0a9c9f9SJoao Pinto static void stmmac_mtl_configuration(struct stmmac_priv *priv)
2426d0a9c9f9SJoao Pinto {
2427d0a9c9f9SJoao Pinto 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2428d0a9c9f9SJoao Pinto 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2429d0a9c9f9SJoao Pinto 
2430c10d4c82SJose Abreu 	if (tx_queues_count > 1)
24316a3a7193SJoao Pinto 		stmmac_set_tx_queue_weight(priv);
24326a3a7193SJoao Pinto 
2433d0a9c9f9SJoao Pinto 	/* Configure MTL RX algorithms */
2434c10d4c82SJose Abreu 	if (rx_queues_count > 1)
2435c10d4c82SJose Abreu 		stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
2436d0a9c9f9SJoao Pinto 				priv->plat->rx_sched_algorithm);
2437d0a9c9f9SJoao Pinto 
2438d0a9c9f9SJoao Pinto 	/* Configure MTL TX algorithms */
2439c10d4c82SJose Abreu 	if (tx_queues_count > 1)
2440c10d4c82SJose Abreu 		stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
2441d0a9c9f9SJoao Pinto 				priv->plat->tx_sched_algorithm);
2442d0a9c9f9SJoao Pinto 
244319d91873SJoao Pinto 	/* Configure CBS in AVB TX queues */
2444c10d4c82SJose Abreu 	if (tx_queues_count > 1)
244519d91873SJoao Pinto 		stmmac_configure_cbs(priv);
244619d91873SJoao Pinto 
2447d43042f4SJoao Pinto 	/* Map RX MTL to DMA channels */
2448d43042f4SJoao Pinto 	stmmac_rx_queue_dma_chan_map(priv);
2449d43042f4SJoao Pinto 
2450d0a9c9f9SJoao Pinto 	/* Enable MAC RX Queues */
2451d0a9c9f9SJoao Pinto 	stmmac_mac_enable_rx_queues(priv);
24526deee222SJoao Pinto 
2453a8f5102aSJoao Pinto 	/* Set RX priorities */
2454c10d4c82SJose Abreu 	if (rx_queues_count > 1)
2455a8f5102aSJoao Pinto 		stmmac_mac_config_rx_queues_prio(priv);
2456a8f5102aSJoao Pinto 
2457a8f5102aSJoao Pinto 	/* Set TX priorities */
2458c10d4c82SJose Abreu 	if (tx_queues_count > 1)
2459a8f5102aSJoao Pinto 		stmmac_mac_config_tx_queues_prio(priv);
2460abe80fdcSJoao Pinto 
2461abe80fdcSJoao Pinto 	/* Set RX routing */
2462c10d4c82SJose Abreu 	if (rx_queues_count > 1)
2463abe80fdcSJoao Pinto 		stmmac_mac_config_rx_queues_routing(priv);
2464d0a9c9f9SJoao Pinto }
2465d0a9c9f9SJoao Pinto 
24668bf993a5SJose Abreu static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
24678bf993a5SJose Abreu {
2468c10d4c82SJose Abreu 	if (priv->dma_cap.asp) {
24698bf993a5SJose Abreu 		netdev_info(priv->dev, "Enabling Safety Features\n");
2470c10d4c82SJose Abreu 		stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp);
24718bf993a5SJose Abreu 	} else {
24728bf993a5SJose Abreu 		netdev_info(priv->dev, "No Safety Features support found\n");
24738bf993a5SJose Abreu 	}
24748bf993a5SJose Abreu }
24758bf993a5SJose Abreu 
2476d0a9c9f9SJoao Pinto /**
2477732fdf0eSGiuseppe CAVALLARO  * stmmac_hw_setup - setup mac in a usable state.
2478523f11b5SSrinivas Kandagatla  *  @dev : pointer to the device structure.
2479523f11b5SSrinivas Kandagatla  *  Description:
2480732fdf0eSGiuseppe CAVALLARO  *  this is the main function to setup the HW in a usable state because the
2481732fdf0eSGiuseppe CAVALLARO  *  dma engine is reset, the core registers are configured (e.g. AXI,
2482732fdf0eSGiuseppe CAVALLARO  *  Checksum features, timers). The DMA is ready to start receiving and
2483732fdf0eSGiuseppe CAVALLARO  *  transmitting.
2484523f11b5SSrinivas Kandagatla  *  Return value:
2485523f11b5SSrinivas Kandagatla  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2486523f11b5SSrinivas Kandagatla  *  file on failure.
2487523f11b5SSrinivas Kandagatla  */
2488fe131929SHuacai Chen static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
2489523f11b5SSrinivas Kandagatla {
2490523f11b5SSrinivas Kandagatla 	struct stmmac_priv *priv = netdev_priv(dev);
24913c55d4d0SJoao Pinto 	u32 rx_cnt = priv->plat->rx_queues_to_use;
2492146617b8SJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
2493146617b8SJoao Pinto 	u32 chan;
2494523f11b5SSrinivas Kandagatla 	int ret;
2495523f11b5SSrinivas Kandagatla 
2496523f11b5SSrinivas Kandagatla 	/* DMA initialization and SW reset */
2497523f11b5SSrinivas Kandagatla 	ret = stmmac_init_dma_engine(priv);
2498523f11b5SSrinivas Kandagatla 	if (ret < 0) {
249938ddc59dSLABBE Corentin 		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
250038ddc59dSLABBE Corentin 			   __func__);
2501523f11b5SSrinivas Kandagatla 		return ret;
2502523f11b5SSrinivas Kandagatla 	}
2503523f11b5SSrinivas Kandagatla 
2504523f11b5SSrinivas Kandagatla 	/* Copy the MAC addr into the HW  */
2505c10d4c82SJose Abreu 	stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
2506523f11b5SSrinivas Kandagatla 
250702e57b9dSGiuseppe CAVALLARO 	/* PS and related bits will be programmed according to the speed */
250802e57b9dSGiuseppe CAVALLARO 	if (priv->hw->pcs) {
250902e57b9dSGiuseppe CAVALLARO 		int speed = priv->plat->mac_port_sel_speed;
251002e57b9dSGiuseppe CAVALLARO 
251102e57b9dSGiuseppe CAVALLARO 		if ((speed == SPEED_10) || (speed == SPEED_100) ||
251202e57b9dSGiuseppe CAVALLARO 		    (speed == SPEED_1000)) {
251302e57b9dSGiuseppe CAVALLARO 			priv->hw->ps = speed;
251402e57b9dSGiuseppe CAVALLARO 		} else {
251502e57b9dSGiuseppe CAVALLARO 			dev_warn(priv->device, "invalid port speed\n");
251602e57b9dSGiuseppe CAVALLARO 			priv->hw->ps = 0;
251702e57b9dSGiuseppe CAVALLARO 		}
251802e57b9dSGiuseppe CAVALLARO 	}
251902e57b9dSGiuseppe CAVALLARO 
2520523f11b5SSrinivas Kandagatla 	/* Initialize the MAC Core */
2521c10d4c82SJose Abreu 	stmmac_core_init(priv, priv->hw, dev);
2522523f11b5SSrinivas Kandagatla 
2523d0a9c9f9SJoao Pinto 	/* Initialize MTL*/
2524d0a9c9f9SJoao Pinto 	stmmac_mtl_configuration(priv);
25259eb12474Sjpinto 
25268bf993a5SJose Abreu 	/* Initialize Safety Features */
25278bf993a5SJose Abreu 	stmmac_safety_feat_configuration(priv);
25288bf993a5SJose Abreu 
2529c10d4c82SJose Abreu 	ret = stmmac_rx_ipc(priv, priv->hw);
2530978aded4SGiuseppe CAVALLARO 	if (!ret) {
253138ddc59dSLABBE Corentin 		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
2532978aded4SGiuseppe CAVALLARO 		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
2533d2afb5bdSGiuseppe CAVALLARO 		priv->hw->rx_csum = 0;
2534978aded4SGiuseppe CAVALLARO 	}
2535978aded4SGiuseppe CAVALLARO 
2536523f11b5SSrinivas Kandagatla 	/* Enable the MAC Rx/Tx */
2537c10d4c82SJose Abreu 	stmmac_mac_set(priv, priv->ioaddr, true);
2538523f11b5SSrinivas Kandagatla 
2539b4f0a661SJoao Pinto 	/* Set the HW DMA mode and the COE */
2540b4f0a661SJoao Pinto 	stmmac_dma_operation_mode(priv);
2541b4f0a661SJoao Pinto 
2542523f11b5SSrinivas Kandagatla 	stmmac_mmc_setup(priv);
2543523f11b5SSrinivas Kandagatla 
2544fe131929SHuacai Chen 	if (init_ptp) {
25450ad2be79SThierry Reding 		ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
25460ad2be79SThierry Reding 		if (ret < 0)
25470ad2be79SThierry Reding 			netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
25480ad2be79SThierry Reding 
2549523f11b5SSrinivas Kandagatla 		ret = stmmac_init_ptp(priv);
2550722eef28SHeiner Kallweit 		if (ret == -EOPNOTSUPP)
2551722eef28SHeiner Kallweit 			netdev_warn(priv->dev, "PTP not supported by HW\n");
2552722eef28SHeiner Kallweit 		else if (ret)
2553722eef28SHeiner Kallweit 			netdev_warn(priv->dev, "PTP init failed\n");
2554fe131929SHuacai Chen 	}
2555523f11b5SSrinivas Kandagatla 
2556523f11b5SSrinivas Kandagatla 	priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
2557523f11b5SSrinivas Kandagatla 
2558a4e887faSJose Abreu 	if (priv->use_riwt) {
255901d1689dSJose Abreu 		ret = stmmac_rx_watchdog(priv, priv->ioaddr, MIN_DMA_RIWT, rx_cnt);
2560a4e887faSJose Abreu 		if (!ret)
256101d1689dSJose Abreu 			priv->rx_riwt = MIN_DMA_RIWT;
2562523f11b5SSrinivas Kandagatla 	}
2563523f11b5SSrinivas Kandagatla 
2564c10d4c82SJose Abreu 	if (priv->hw->pcs)
2565c10d4c82SJose Abreu 		stmmac_pcs_ctrl_ane(priv, priv->hw, 1, priv->hw->ps, 0);
2566523f11b5SSrinivas Kandagatla 
25674854ab99SJoao Pinto 	/* set TX and RX rings length */
25684854ab99SJoao Pinto 	stmmac_set_rings_length(priv);
25694854ab99SJoao Pinto 
2570f748be53SAlexandre TORGUE 	/* Enable TSO */
2571146617b8SJoao Pinto 	if (priv->tso) {
2572146617b8SJoao Pinto 		for (chan = 0; chan < tx_cnt; chan++)
2573a4e887faSJose Abreu 			stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
2574146617b8SJoao Pinto 	}
2575f748be53SAlexandre TORGUE 
25767d9e6c5aSJose Abreu 	/* Start the ball rolling... */
25777d9e6c5aSJose Abreu 	stmmac_start_all_dma(priv);
25787d9e6c5aSJose Abreu 
2579523f11b5SSrinivas Kandagatla 	return 0;
2580523f11b5SSrinivas Kandagatla }
2581523f11b5SSrinivas Kandagatla 
2582c66f6c37SThierry Reding static void stmmac_hw_teardown(struct net_device *dev)
2583c66f6c37SThierry Reding {
2584c66f6c37SThierry Reding 	struct stmmac_priv *priv = netdev_priv(dev);
2585c66f6c37SThierry Reding 
2586c66f6c37SThierry Reding 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
2587c66f6c37SThierry Reding }
2588c66f6c37SThierry Reding 
2589523f11b5SSrinivas Kandagatla /**
25907ac6653aSJeff Kirsher  *  stmmac_open - open entry point of the driver
25917ac6653aSJeff Kirsher  *  @dev : pointer to the device structure.
25927ac6653aSJeff Kirsher  *  Description:
25937ac6653aSJeff Kirsher  *  This function is the open entry point of the driver.
25947ac6653aSJeff Kirsher  *  Return value:
25957ac6653aSJeff Kirsher  *  0 on success and an appropriate (-)ve integer as defined in errno.h
25967ac6653aSJeff Kirsher  *  file on failure.
25977ac6653aSJeff Kirsher  */
25987ac6653aSJeff Kirsher static int stmmac_open(struct net_device *dev)
25997ac6653aSJeff Kirsher {
26007ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
26018fce3331SJose Abreu 	u32 chan;
26027ac6653aSJeff Kirsher 	int ret;
26037ac6653aSJeff Kirsher 
26043fe5cadbSGiuseppe CAVALLARO 	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
26053fe5cadbSGiuseppe CAVALLARO 	    priv->hw->pcs != STMMAC_PCS_TBI &&
26063fe5cadbSGiuseppe CAVALLARO 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
26077ac6653aSJeff Kirsher 		ret = stmmac_init_phy(dev);
2608e58bb43fSGiuseppe CAVALLARO 		if (ret) {
260938ddc59dSLABBE Corentin 			netdev_err(priv->dev,
261038ddc59dSLABBE Corentin 				   "%s: Cannot attach to PHY (error: %d)\n",
2611e58bb43fSGiuseppe CAVALLARO 				   __func__, ret);
261289df20d9SHans de Goede 			return ret;
26137ac6653aSJeff Kirsher 		}
2614e58bb43fSGiuseppe CAVALLARO 	}
26157ac6653aSJeff Kirsher 
2616523f11b5SSrinivas Kandagatla 	/* Extra statistics */
2617523f11b5SSrinivas Kandagatla 	memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
2618523f11b5SSrinivas Kandagatla 	priv->xstats.threshold = tc;
2619523f11b5SSrinivas Kandagatla 
26205bacd778SLABBE Corentin 	priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
262122ad3838SGiuseppe Cavallaro 	priv->rx_copybreak = STMMAC_RX_COPYBREAK;
262256329137SBartlomiej Zolnierkiewicz 
26235bacd778SLABBE Corentin 	ret = alloc_dma_desc_resources(priv);
26245bacd778SLABBE Corentin 	if (ret < 0) {
26255bacd778SLABBE Corentin 		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
26265bacd778SLABBE Corentin 			   __func__);
26275bacd778SLABBE Corentin 		goto dma_desc_error;
26285bacd778SLABBE Corentin 	}
26295bacd778SLABBE Corentin 
26305bacd778SLABBE Corentin 	ret = init_dma_desc_rings(dev, GFP_KERNEL);
26315bacd778SLABBE Corentin 	if (ret < 0) {
26325bacd778SLABBE Corentin 		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
26335bacd778SLABBE Corentin 			   __func__);
26345bacd778SLABBE Corentin 		goto init_error;
26355bacd778SLABBE Corentin 	}
26365bacd778SLABBE Corentin 
2637fe131929SHuacai Chen 	ret = stmmac_hw_setup(dev, true);
263856329137SBartlomiej Zolnierkiewicz 	if (ret < 0) {
263938ddc59dSLABBE Corentin 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
2640c9324d18SGiuseppe CAVALLARO 		goto init_error;
26417ac6653aSJeff Kirsher 	}
26427ac6653aSJeff Kirsher 
2643d429b66eSJose Abreu 	stmmac_init_coalesce(priv);
2644777da230SGiuseppe CAVALLARO 
264574371272SJose Abreu 	phylink_start(priv->phylink);
26467ac6653aSJeff Kirsher 
26477ac6653aSJeff Kirsher 	/* Request the IRQ lines */
26487ac6653aSJeff Kirsher 	ret = request_irq(dev->irq, stmmac_interrupt,
26497ac6653aSJeff Kirsher 			  IRQF_SHARED, dev->name, dev);
26507ac6653aSJeff Kirsher 	if (unlikely(ret < 0)) {
265138ddc59dSLABBE Corentin 		netdev_err(priv->dev,
265238ddc59dSLABBE Corentin 			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
26537ac6653aSJeff Kirsher 			   __func__, dev->irq, ret);
26546c1e5abeSThierry Reding 		goto irq_error;
26557ac6653aSJeff Kirsher 	}
26567ac6653aSJeff Kirsher 
26577a13f8f5SFrancesco Virlinzi 	/* Request the Wake IRQ in case of another line is used for WoL */
26587a13f8f5SFrancesco Virlinzi 	if (priv->wol_irq != dev->irq) {
26597a13f8f5SFrancesco Virlinzi 		ret = request_irq(priv->wol_irq, stmmac_interrupt,
26607a13f8f5SFrancesco Virlinzi 				  IRQF_SHARED, dev->name, dev);
26617a13f8f5SFrancesco Virlinzi 		if (unlikely(ret < 0)) {
266238ddc59dSLABBE Corentin 			netdev_err(priv->dev,
266338ddc59dSLABBE Corentin 				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
2664ceb69499SGiuseppe CAVALLARO 				   __func__, priv->wol_irq, ret);
2665c9324d18SGiuseppe CAVALLARO 			goto wolirq_error;
26667a13f8f5SFrancesco Virlinzi 		}
26677a13f8f5SFrancesco Virlinzi 	}
26687a13f8f5SFrancesco Virlinzi 
2669d765955dSGiuseppe CAVALLARO 	/* Request the IRQ lines */
2670d7ec8584SChen-Yu Tsai 	if (priv->lpi_irq > 0) {
2671d765955dSGiuseppe CAVALLARO 		ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
2672d765955dSGiuseppe CAVALLARO 				  dev->name, dev);
2673d765955dSGiuseppe CAVALLARO 		if (unlikely(ret < 0)) {
267438ddc59dSLABBE Corentin 			netdev_err(priv->dev,
267538ddc59dSLABBE Corentin 				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
2676d765955dSGiuseppe CAVALLARO 				   __func__, priv->lpi_irq, ret);
2677c9324d18SGiuseppe CAVALLARO 			goto lpiirq_error;
2678d765955dSGiuseppe CAVALLARO 		}
2679d765955dSGiuseppe CAVALLARO 	}
2680d765955dSGiuseppe CAVALLARO 
2681c22a3f48SJoao Pinto 	stmmac_enable_all_queues(priv);
2682c22a3f48SJoao Pinto 	stmmac_start_all_queues(priv);
26837ac6653aSJeff Kirsher 
26847ac6653aSJeff Kirsher 	return 0;
26857ac6653aSJeff Kirsher 
2686c9324d18SGiuseppe CAVALLARO lpiirq_error:
2687d765955dSGiuseppe CAVALLARO 	if (priv->wol_irq != dev->irq)
2688d765955dSGiuseppe CAVALLARO 		free_irq(priv->wol_irq, dev);
2689c9324d18SGiuseppe CAVALLARO wolirq_error:
26907a13f8f5SFrancesco Virlinzi 	free_irq(dev->irq, dev);
26916c1e5abeSThierry Reding irq_error:
269274371272SJose Abreu 	phylink_stop(priv->phylink);
26937a13f8f5SFrancesco Virlinzi 
26948fce3331SJose Abreu 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
26958fce3331SJose Abreu 		del_timer_sync(&priv->tx_queue[chan].txtimer);
26968fce3331SJose Abreu 
2697c66f6c37SThierry Reding 	stmmac_hw_teardown(dev);
2698c9324d18SGiuseppe CAVALLARO init_error:
2699c9324d18SGiuseppe CAVALLARO 	free_dma_desc_resources(priv);
27005bacd778SLABBE Corentin dma_desc_error:
270174371272SJose Abreu 	phylink_disconnect_phy(priv->phylink);
27027ac6653aSJeff Kirsher 	return ret;
27037ac6653aSJeff Kirsher }
27047ac6653aSJeff Kirsher 
27057ac6653aSJeff Kirsher /**
27067ac6653aSJeff Kirsher  *  stmmac_release - close entry point of the driver
27077ac6653aSJeff Kirsher  *  @dev : device pointer.
27087ac6653aSJeff Kirsher  *  Description:
27097ac6653aSJeff Kirsher  *  This is the stop entry point of the driver.
27107ac6653aSJeff Kirsher  */
27117ac6653aSJeff Kirsher static int stmmac_release(struct net_device *dev)
27127ac6653aSJeff Kirsher {
27137ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
27148fce3331SJose Abreu 	u32 chan;
27157ac6653aSJeff Kirsher 
2716d765955dSGiuseppe CAVALLARO 	if (priv->eee_enabled)
2717d765955dSGiuseppe CAVALLARO 		del_timer_sync(&priv->eee_ctrl_timer);
2718d765955dSGiuseppe CAVALLARO 
27197ac6653aSJeff Kirsher 	/* Stop and disconnect the PHY */
272074371272SJose Abreu 	phylink_stop(priv->phylink);
272174371272SJose Abreu 	phylink_disconnect_phy(priv->phylink);
27227ac6653aSJeff Kirsher 
2723c22a3f48SJoao Pinto 	stmmac_stop_all_queues(priv);
27247ac6653aSJeff Kirsher 
2725c22a3f48SJoao Pinto 	stmmac_disable_all_queues(priv);
27267ac6653aSJeff Kirsher 
27278fce3331SJose Abreu 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
27288fce3331SJose Abreu 		del_timer_sync(&priv->tx_queue[chan].txtimer);
27299125cdd1SGiuseppe CAVALLARO 
27307ac6653aSJeff Kirsher 	/* Free the IRQ lines */
27317ac6653aSJeff Kirsher 	free_irq(dev->irq, dev);
27327a13f8f5SFrancesco Virlinzi 	if (priv->wol_irq != dev->irq)
27337a13f8f5SFrancesco Virlinzi 		free_irq(priv->wol_irq, dev);
2734d7ec8584SChen-Yu Tsai 	if (priv->lpi_irq > 0)
2735d765955dSGiuseppe CAVALLARO 		free_irq(priv->lpi_irq, dev);
27367ac6653aSJeff Kirsher 
27377ac6653aSJeff Kirsher 	/* Stop TX/RX DMA and clear the descriptors */
2738ae4f0d46SJoao Pinto 	stmmac_stop_all_dma(priv);
27397ac6653aSJeff Kirsher 
27407ac6653aSJeff Kirsher 	/* Release and free the Rx/Tx resources */
27417ac6653aSJeff Kirsher 	free_dma_desc_resources(priv);
27427ac6653aSJeff Kirsher 
27437ac6653aSJeff Kirsher 	/* Disable the MAC Rx/Tx */
2744c10d4c82SJose Abreu 	stmmac_mac_set(priv, priv->ioaddr, false);
27457ac6653aSJeff Kirsher 
27467ac6653aSJeff Kirsher 	netif_carrier_off(dev);
27477ac6653aSJeff Kirsher 
274892ba6888SRayagond Kokatanur 	stmmac_release_ptp(priv);
274992ba6888SRayagond Kokatanur 
27507ac6653aSJeff Kirsher 	return 0;
27517ac6653aSJeff Kirsher }
27527ac6653aSJeff Kirsher 
27537ac6653aSJeff Kirsher /**
2754f748be53SAlexandre TORGUE  *  stmmac_tso_allocator - close entry point of the driver
2755f748be53SAlexandre TORGUE  *  @priv: driver private structure
2756f748be53SAlexandre TORGUE  *  @des: buffer start address
2757f748be53SAlexandre TORGUE  *  @total_len: total length to fill in descriptors
2758f748be53SAlexandre TORGUE  *  @last_segmant: condition for the last descriptor
2759ce736788SJoao Pinto  *  @queue: TX queue index
2760f748be53SAlexandre TORGUE  *  Description:
2761f748be53SAlexandre TORGUE  *  This function fills descriptor and request new descriptors according to
2762f748be53SAlexandre TORGUE  *  buffer length to fill
2763f748be53SAlexandre TORGUE  */
2764a993db88SJose Abreu static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
2765ce736788SJoao Pinto 				 int total_len, bool last_segment, u32 queue)
2766f748be53SAlexandre TORGUE {
2767ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2768f748be53SAlexandre TORGUE 	struct dma_desc *desc;
27695bacd778SLABBE Corentin 	u32 buff_size;
2770ce736788SJoao Pinto 	int tmp_len;
2771f748be53SAlexandre TORGUE 
2772f748be53SAlexandre TORGUE 	tmp_len = total_len;
2773f748be53SAlexandre TORGUE 
2774f748be53SAlexandre TORGUE 	while (tmp_len > 0) {
2775a993db88SJose Abreu 		dma_addr_t curr_addr;
2776a993db88SJose Abreu 
2777ce736788SJoao Pinto 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2778b4c9784cSNiklas Cassel 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
2779ce736788SJoao Pinto 		desc = tx_q->dma_tx + tx_q->cur_tx;
2780f748be53SAlexandre TORGUE 
2781a993db88SJose Abreu 		curr_addr = des + (total_len - tmp_len);
2782a993db88SJose Abreu 		if (priv->dma_cap.addr64 <= 32)
2783a993db88SJose Abreu 			desc->des0 = cpu_to_le32(curr_addr);
2784a993db88SJose Abreu 		else
2785a993db88SJose Abreu 			stmmac_set_desc_addr(priv, desc, curr_addr);
2786a993db88SJose Abreu 
2787f748be53SAlexandre TORGUE 		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
2788f748be53SAlexandre TORGUE 			    TSO_MAX_BUFF_SIZE : tmp_len;
2789f748be53SAlexandre TORGUE 
279042de047dSJose Abreu 		stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
2791f748be53SAlexandre TORGUE 				0, 1,
2792426849e6SNiklas Cassel 				(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
2793f748be53SAlexandre TORGUE 				0, 0);
2794f748be53SAlexandre TORGUE 
2795f748be53SAlexandre TORGUE 		tmp_len -= TSO_MAX_BUFF_SIZE;
2796f748be53SAlexandre TORGUE 	}
2797f748be53SAlexandre TORGUE }
2798f748be53SAlexandre TORGUE 
2799f748be53SAlexandre TORGUE /**
2800f748be53SAlexandre TORGUE  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
2801f748be53SAlexandre TORGUE  *  @skb : the socket buffer
2802f748be53SAlexandre TORGUE  *  @dev : device pointer
2803f748be53SAlexandre TORGUE  *  Description: this is the transmit function that is called on TSO frames
2804f748be53SAlexandre TORGUE  *  (support available on GMAC4 and newer chips).
2805f748be53SAlexandre TORGUE  *  Diagram below show the ring programming in case of TSO frames:
2806f748be53SAlexandre TORGUE  *
2807f748be53SAlexandre TORGUE  *  First Descriptor
2808f748be53SAlexandre TORGUE  *   --------
2809f748be53SAlexandre TORGUE  *   | DES0 |---> buffer1 = L2/L3/L4 header
2810f748be53SAlexandre TORGUE  *   | DES1 |---> TCP Payload (can continue on next descr...)
2811f748be53SAlexandre TORGUE  *   | DES2 |---> buffer 1 and 2 len
2812f748be53SAlexandre TORGUE  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
2813f748be53SAlexandre TORGUE  *   --------
2814f748be53SAlexandre TORGUE  *	|
2815f748be53SAlexandre TORGUE  *     ...
2816f748be53SAlexandre TORGUE  *	|
2817f748be53SAlexandre TORGUE  *   --------
2818f748be53SAlexandre TORGUE  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
2819f748be53SAlexandre TORGUE  *   | DES1 | --|
2820f748be53SAlexandre TORGUE  *   | DES2 | --> buffer 1 and 2 len
2821f748be53SAlexandre TORGUE  *   | DES3 |
2822f748be53SAlexandre TORGUE  *   --------
2823f748be53SAlexandre TORGUE  *
2824f748be53SAlexandre TORGUE  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
2825f748be53SAlexandre TORGUE  */
2826f748be53SAlexandre TORGUE static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2827f748be53SAlexandre TORGUE {
2828ce736788SJoao Pinto 	struct dma_desc *desc, *first, *mss_desc = NULL;
2829f748be53SAlexandre TORGUE 	struct stmmac_priv *priv = netdev_priv(dev);
2830f748be53SAlexandre TORGUE 	int nfrags = skb_shinfo(skb)->nr_frags;
2831ce736788SJoao Pinto 	u32 queue = skb_get_queue_mapping(skb);
2832a993db88SJose Abreu 	unsigned int first_entry;
2833ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q;
2834ce736788SJoao Pinto 	int tmp_pay_len = 0;
2835ce736788SJoao Pinto 	u32 pay_len, mss;
2836f748be53SAlexandre TORGUE 	u8 proto_hdr_len;
2837a993db88SJose Abreu 	dma_addr_t des;
2838f748be53SAlexandre TORGUE 	int i;
2839f748be53SAlexandre TORGUE 
2840ce736788SJoao Pinto 	tx_q = &priv->tx_queue[queue];
2841ce736788SJoao Pinto 
2842f748be53SAlexandre TORGUE 	/* Compute header lengths */
2843f748be53SAlexandre TORGUE 	proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2844f748be53SAlexandre TORGUE 
2845f748be53SAlexandre TORGUE 	/* Desc availability based on threshold should be enough safe */
2846ce736788SJoao Pinto 	if (unlikely(stmmac_tx_avail(priv, queue) <
2847f748be53SAlexandre TORGUE 		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
2848c22a3f48SJoao Pinto 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
2849c22a3f48SJoao Pinto 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
2850c22a3f48SJoao Pinto 								queue));
2851f748be53SAlexandre TORGUE 			/* This is a hard error, log it. */
285238ddc59dSLABBE Corentin 			netdev_err(priv->dev,
285338ddc59dSLABBE Corentin 				   "%s: Tx Ring full when queue awake\n",
285438ddc59dSLABBE Corentin 				   __func__);
2855f748be53SAlexandre TORGUE 		}
2856f748be53SAlexandre TORGUE 		return NETDEV_TX_BUSY;
2857f748be53SAlexandre TORGUE 	}
2858f748be53SAlexandre TORGUE 
2859f748be53SAlexandre TORGUE 	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
2860f748be53SAlexandre TORGUE 
2861f748be53SAlexandre TORGUE 	mss = skb_shinfo(skb)->gso_size;
2862f748be53SAlexandre TORGUE 
2863f748be53SAlexandre TORGUE 	/* set new MSS value if needed */
28648d212a9eSNiklas Cassel 	if (mss != tx_q->mss) {
2865ce736788SJoao Pinto 		mss_desc = tx_q->dma_tx + tx_q->cur_tx;
286642de047dSJose Abreu 		stmmac_set_mss(priv, mss_desc, mss);
28678d212a9eSNiklas Cassel 		tx_q->mss = mss;
2868ce736788SJoao Pinto 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2869b4c9784cSNiklas Cassel 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
2870f748be53SAlexandre TORGUE 	}
2871f748be53SAlexandre TORGUE 
2872f748be53SAlexandre TORGUE 	if (netif_msg_tx_queued(priv)) {
2873f748be53SAlexandre TORGUE 		pr_info("%s: tcphdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
2874f748be53SAlexandre TORGUE 			__func__, tcp_hdrlen(skb), proto_hdr_len, pay_len, mss);
2875f748be53SAlexandre TORGUE 		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
2876f748be53SAlexandre TORGUE 			skb->data_len);
2877f748be53SAlexandre TORGUE 	}
2878f748be53SAlexandre TORGUE 
2879ce736788SJoao Pinto 	first_entry = tx_q->cur_tx;
2880b4c9784cSNiklas Cassel 	WARN_ON(tx_q->tx_skbuff[first_entry]);
2881f748be53SAlexandre TORGUE 
2882ce736788SJoao Pinto 	desc = tx_q->dma_tx + first_entry;
2883f748be53SAlexandre TORGUE 	first = desc;
2884f748be53SAlexandre TORGUE 
2885f748be53SAlexandre TORGUE 	/* first descriptor: fill Headers on Buf1 */
2886f748be53SAlexandre TORGUE 	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
2887f748be53SAlexandre TORGUE 			     DMA_TO_DEVICE);
2888f748be53SAlexandre TORGUE 	if (dma_mapping_error(priv->device, des))
2889f748be53SAlexandre TORGUE 		goto dma_map_err;
2890f748be53SAlexandre TORGUE 
2891ce736788SJoao Pinto 	tx_q->tx_skbuff_dma[first_entry].buf = des;
2892ce736788SJoao Pinto 	tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
2893f748be53SAlexandre TORGUE 
2894a993db88SJose Abreu 	if (priv->dma_cap.addr64 <= 32) {
2895f8be0d78SMichael Weiser 		first->des0 = cpu_to_le32(des);
2896f748be53SAlexandre TORGUE 
2897f748be53SAlexandre TORGUE 		/* Fill start of payload in buff2 of first descriptor */
2898f748be53SAlexandre TORGUE 		if (pay_len)
2899f8be0d78SMichael Weiser 			first->des1 = cpu_to_le32(des + proto_hdr_len);
2900f748be53SAlexandre TORGUE 
2901f748be53SAlexandre TORGUE 		/* If needed take extra descriptors to fill the remaining payload */
2902f748be53SAlexandre TORGUE 		tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
2903a993db88SJose Abreu 	} else {
2904a993db88SJose Abreu 		stmmac_set_desc_addr(priv, first, des);
2905a993db88SJose Abreu 		tmp_pay_len = pay_len;
2906a993db88SJose Abreu 	}
2907f748be53SAlexandre TORGUE 
2908ce736788SJoao Pinto 	stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
2909f748be53SAlexandre TORGUE 
2910f748be53SAlexandre TORGUE 	/* Prepare fragments */
2911f748be53SAlexandre TORGUE 	for (i = 0; i < nfrags; i++) {
2912f748be53SAlexandre TORGUE 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2913f748be53SAlexandre TORGUE 
2914f748be53SAlexandre TORGUE 		des = skb_frag_dma_map(priv->device, frag, 0,
2915f748be53SAlexandre TORGUE 				       skb_frag_size(frag),
2916f748be53SAlexandre TORGUE 				       DMA_TO_DEVICE);
2917937071c1SThierry Reding 		if (dma_mapping_error(priv->device, des))
2918937071c1SThierry Reding 			goto dma_map_err;
2919f748be53SAlexandre TORGUE 
2920f748be53SAlexandre TORGUE 		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
2921ce736788SJoao Pinto 				     (i == nfrags - 1), queue);
2922f748be53SAlexandre TORGUE 
2923ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
2924ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
2925ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
2926f748be53SAlexandre TORGUE 	}
2927f748be53SAlexandre TORGUE 
2928ce736788SJoao Pinto 	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
2929f748be53SAlexandre TORGUE 
293005cf0d1bSNiklas Cassel 	/* Only the last descriptor gets to point to the skb. */
293105cf0d1bSNiklas Cassel 	tx_q->tx_skbuff[tx_q->cur_tx] = skb;
293205cf0d1bSNiklas Cassel 
293305cf0d1bSNiklas Cassel 	/* We've used all descriptors we need for this skb, however,
293405cf0d1bSNiklas Cassel 	 * advance cur_tx so that it references a fresh descriptor.
293505cf0d1bSNiklas Cassel 	 * ndo_start_xmit will fill this descriptor the next time it's
293605cf0d1bSNiklas Cassel 	 * called and stmmac_tx_clean may clean up to this descriptor.
293705cf0d1bSNiklas Cassel 	 */
2938ce736788SJoao Pinto 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2939f748be53SAlexandre TORGUE 
2940ce736788SJoao Pinto 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
2941b3e51069SLABBE Corentin 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
294238ddc59dSLABBE Corentin 			  __func__);
2943c22a3f48SJoao Pinto 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
2944f748be53SAlexandre TORGUE 	}
2945f748be53SAlexandre TORGUE 
2946f748be53SAlexandre TORGUE 	dev->stats.tx_bytes += skb->len;
2947f748be53SAlexandre TORGUE 	priv->xstats.tx_tso_frames++;
2948f748be53SAlexandre TORGUE 	priv->xstats.tx_tso_nfrags += nfrags;
2949f748be53SAlexandre TORGUE 
2950f748be53SAlexandre TORGUE 	/* Manage tx mitigation */
29518fce3331SJose Abreu 	tx_q->tx_count_frames += nfrags + 1;
2952d0bb82fdSRoland Hii 	if (likely(priv->tx_coal_frames > tx_q->tx_count_frames) &&
2953d0bb82fdSRoland Hii 	    !(priv->synopsys_id >= DWMAC_CORE_4_00 &&
2954d0bb82fdSRoland Hii 	    (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2955d0bb82fdSRoland Hii 	    priv->hwts_tx_en)) {
2956d0bb82fdSRoland Hii 		stmmac_tx_timer_arm(priv, queue);
2957d0bb82fdSRoland Hii 	} else {
2958d0bb82fdSRoland Hii 		tx_q->tx_count_frames = 0;
295942de047dSJose Abreu 		stmmac_set_tx_ic(priv, desc);
2960f748be53SAlexandre TORGUE 		priv->xstats.tx_set_ic_bit++;
2961f748be53SAlexandre TORGUE 	}
2962f748be53SAlexandre TORGUE 
2963f748be53SAlexandre TORGUE 	skb_tx_timestamp(skb);
2964f748be53SAlexandre TORGUE 
2965f748be53SAlexandre TORGUE 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2966f748be53SAlexandre TORGUE 		     priv->hwts_tx_en)) {
2967f748be53SAlexandre TORGUE 		/* declare that device is doing timestamping */
2968f748be53SAlexandre TORGUE 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
296942de047dSJose Abreu 		stmmac_enable_tx_timestamp(priv, first);
2970f748be53SAlexandre TORGUE 	}
2971f748be53SAlexandre TORGUE 
2972f748be53SAlexandre TORGUE 	/* Complete the first descriptor before granting the DMA */
297342de047dSJose Abreu 	stmmac_prepare_tso_tx_desc(priv, first, 1,
2974f748be53SAlexandre TORGUE 			proto_hdr_len,
2975f748be53SAlexandre TORGUE 			pay_len,
2976ce736788SJoao Pinto 			1, tx_q->tx_skbuff_dma[first_entry].last_segment,
2977f748be53SAlexandre TORGUE 			tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
2978f748be53SAlexandre TORGUE 
2979f748be53SAlexandre TORGUE 	/* If context desc is used to change MSS */
298015d2ee42SNiklas Cassel 	if (mss_desc) {
298115d2ee42SNiklas Cassel 		/* Make sure that first descriptor has been completely
298215d2ee42SNiklas Cassel 		 * written, including its own bit. This is because MSS is
298315d2ee42SNiklas Cassel 		 * actually before first descriptor, so we need to make
298415d2ee42SNiklas Cassel 		 * sure that MSS's own bit is the last thing written.
298515d2ee42SNiklas Cassel 		 */
298615d2ee42SNiklas Cassel 		dma_wmb();
298742de047dSJose Abreu 		stmmac_set_tx_owner(priv, mss_desc);
298815d2ee42SNiklas Cassel 	}
2989f748be53SAlexandre TORGUE 
2990f748be53SAlexandre TORGUE 	/* The own bit must be the latest setting done when prepare the
2991f748be53SAlexandre TORGUE 	 * descriptor and then barrier is needed to make sure that
2992f748be53SAlexandre TORGUE 	 * all is coherent before granting the DMA engine.
2993f748be53SAlexandre TORGUE 	 */
299495eb930aSNiklas Cassel 	wmb();
2995f748be53SAlexandre TORGUE 
2996f748be53SAlexandre TORGUE 	if (netif_msg_pktdata(priv)) {
2997f748be53SAlexandre TORGUE 		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
2998ce736788SJoao Pinto 			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
2999ce736788SJoao Pinto 			tx_q->cur_tx, first, nfrags);
3000f748be53SAlexandre TORGUE 
300142de047dSJose Abreu 		stmmac_display_ring(priv, (void *)tx_q->dma_tx, DMA_TX_SIZE, 0);
3002f748be53SAlexandre TORGUE 
3003f748be53SAlexandre TORGUE 		pr_info(">>> frame to be transmitted: ");
3004f748be53SAlexandre TORGUE 		print_pkt(skb->data, skb_headlen(skb));
3005f748be53SAlexandre TORGUE 	}
3006f748be53SAlexandre TORGUE 
3007c22a3f48SJoao Pinto 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3008f748be53SAlexandre TORGUE 
30090431100bSJose Abreu 	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
3010a4e887faSJose Abreu 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
3011f748be53SAlexandre TORGUE 
3012f748be53SAlexandre TORGUE 	return NETDEV_TX_OK;
3013f748be53SAlexandre TORGUE 
3014f748be53SAlexandre TORGUE dma_map_err:
3015f748be53SAlexandre TORGUE 	dev_err(priv->device, "Tx dma map failed\n");
3016f748be53SAlexandre TORGUE 	dev_kfree_skb(skb);
3017f748be53SAlexandre TORGUE 	priv->dev->stats.tx_dropped++;
3018f748be53SAlexandre TORGUE 	return NETDEV_TX_OK;
3019f748be53SAlexandre TORGUE }
3020f748be53SAlexandre TORGUE 
3021f748be53SAlexandre TORGUE /**
3022732fdf0eSGiuseppe CAVALLARO  *  stmmac_xmit - Tx entry point of the driver
30237ac6653aSJeff Kirsher  *  @skb : the socket buffer
30247ac6653aSJeff Kirsher  *  @dev : device pointer
302532ceabcaSGiuseppe CAVALLARO  *  Description : this is the tx entry point of the driver.
302632ceabcaSGiuseppe CAVALLARO  *  It programs the chain or the ring and supports oversized frames
302732ceabcaSGiuseppe CAVALLARO  *  and SG feature.
30287ac6653aSJeff Kirsher  */
30297ac6653aSJeff Kirsher static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
30307ac6653aSJeff Kirsher {
30317ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
30320e80bdc9SGiuseppe Cavallaro 	unsigned int nopaged_len = skb_headlen(skb);
30334a7d666aSGiuseppe CAVALLARO 	int i, csum_insertion = 0, is_jumbo = 0;
3034ce736788SJoao Pinto 	u32 queue = skb_get_queue_mapping(skb);
30357ac6653aSJeff Kirsher 	int nfrags = skb_shinfo(skb)->nr_frags;
30367ac6653aSJeff Kirsher 	struct dma_desc *desc, *first;
3037ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q;
3038a993db88SJose Abreu 	unsigned int first_entry;
30390e80bdc9SGiuseppe Cavallaro 	unsigned int enh_desc;
3040a993db88SJose Abreu 	dma_addr_t des;
3041a993db88SJose Abreu 	int entry;
3042f748be53SAlexandre TORGUE 
3043ce736788SJoao Pinto 	tx_q = &priv->tx_queue[queue];
3044ce736788SJoao Pinto 
3045e2cd682dSJose Abreu 	if (priv->tx_path_in_lpi_mode)
3046e2cd682dSJose Abreu 		stmmac_disable_eee_mode(priv);
3047e2cd682dSJose Abreu 
3048f748be53SAlexandre TORGUE 	/* Manage oversized TCP frames for GMAC4 device */
3049f748be53SAlexandre TORGUE 	if (skb_is_gso(skb) && priv->tso) {
30504993e5b3SJose Abreu 		if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
3051f748be53SAlexandre TORGUE 			return stmmac_tso_xmit(skb, dev);
3052f748be53SAlexandre TORGUE 	}
30537ac6653aSJeff Kirsher 
3054ce736788SJoao Pinto 	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
3055c22a3f48SJoao Pinto 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
3056c22a3f48SJoao Pinto 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
3057c22a3f48SJoao Pinto 								queue));
30587ac6653aSJeff Kirsher 			/* This is a hard error, log it. */
305938ddc59dSLABBE Corentin 			netdev_err(priv->dev,
306038ddc59dSLABBE Corentin 				   "%s: Tx Ring full when queue awake\n",
306138ddc59dSLABBE Corentin 				   __func__);
30627ac6653aSJeff Kirsher 		}
30637ac6653aSJeff Kirsher 		return NETDEV_TX_BUSY;
30647ac6653aSJeff Kirsher 	}
30657ac6653aSJeff Kirsher 
3066ce736788SJoao Pinto 	entry = tx_q->cur_tx;
30670e80bdc9SGiuseppe Cavallaro 	first_entry = entry;
3068b4c9784cSNiklas Cassel 	WARN_ON(tx_q->tx_skbuff[first_entry]);
30697ac6653aSJeff Kirsher 
30707ac6653aSJeff Kirsher 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
30717ac6653aSJeff Kirsher 
30720e80bdc9SGiuseppe Cavallaro 	if (likely(priv->extend_desc))
3073ce736788SJoao Pinto 		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3074c24602efSGiuseppe CAVALLARO 	else
3075ce736788SJoao Pinto 		desc = tx_q->dma_tx + entry;
3076c24602efSGiuseppe CAVALLARO 
30777ac6653aSJeff Kirsher 	first = desc;
30787ac6653aSJeff Kirsher 
30790e80bdc9SGiuseppe Cavallaro 	enh_desc = priv->plat->enh_desc;
30804a7d666aSGiuseppe CAVALLARO 	/* To program the descriptors according to the size of the frame */
308129896a67SGiuseppe CAVALLARO 	if (enh_desc)
30822c520b1cSJose Abreu 		is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
308329896a67SGiuseppe CAVALLARO 
308463a550fcSJose Abreu 	if (unlikely(is_jumbo)) {
30852c520b1cSJose Abreu 		entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
308663a550fcSJose Abreu 		if (unlikely(entry < 0) && (entry != -EINVAL))
3087362b37beSGiuseppe CAVALLARO 			goto dma_map_err;
308829896a67SGiuseppe CAVALLARO 	}
30897ac6653aSJeff Kirsher 
30907ac6653aSJeff Kirsher 	for (i = 0; i < nfrags; i++) {
30919e903e08SEric Dumazet 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
30929e903e08SEric Dumazet 		int len = skb_frag_size(frag);
3093be434d50SGiuseppe Cavallaro 		bool last_segment = (i == (nfrags - 1));
30947ac6653aSJeff Kirsher 
3095e3ad57c9SGiuseppe Cavallaro 		entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3096b4c9784cSNiklas Cassel 		WARN_ON(tx_q->tx_skbuff[entry]);
3097e3ad57c9SGiuseppe Cavallaro 
30980e80bdc9SGiuseppe Cavallaro 		if (likely(priv->extend_desc))
3099ce736788SJoao Pinto 			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3100c24602efSGiuseppe CAVALLARO 		else
3101ce736788SJoao Pinto 			desc = tx_q->dma_tx + entry;
31027ac6653aSJeff Kirsher 
3103f748be53SAlexandre TORGUE 		des = skb_frag_dma_map(priv->device, frag, 0, len,
3104f722380dSIan Campbell 				       DMA_TO_DEVICE);
3105f748be53SAlexandre TORGUE 		if (dma_mapping_error(priv->device, des))
3106362b37beSGiuseppe CAVALLARO 			goto dma_map_err; /* should reuse desc w/o issues */
3107362b37beSGiuseppe CAVALLARO 
3108ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].buf = des;
31096844171dSJose Abreu 
31106844171dSJose Abreu 		stmmac_set_desc_addr(priv, desc, des);
3111f748be53SAlexandre TORGUE 
3112ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].map_as_page = true;
3113ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].len = len;
3114ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
31150e80bdc9SGiuseppe Cavallaro 
31160e80bdc9SGiuseppe Cavallaro 		/* Prepare the descriptor and set the own bit too */
311742de047dSJose Abreu 		stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
311842de047dSJose Abreu 				priv->mode, 1, last_segment, skb->len);
31197ac6653aSJeff Kirsher 	}
31207ac6653aSJeff Kirsher 
312105cf0d1bSNiklas Cassel 	/* Only the last descriptor gets to point to the skb. */
312205cf0d1bSNiklas Cassel 	tx_q->tx_skbuff[entry] = skb;
3123e3ad57c9SGiuseppe Cavallaro 
312405cf0d1bSNiklas Cassel 	/* We've used all descriptors we need for this skb, however,
312505cf0d1bSNiklas Cassel 	 * advance cur_tx so that it references a fresh descriptor.
312605cf0d1bSNiklas Cassel 	 * ndo_start_xmit will fill this descriptor the next time it's
312705cf0d1bSNiklas Cassel 	 * called and stmmac_tx_clean may clean up to this descriptor.
312805cf0d1bSNiklas Cassel 	 */
312905cf0d1bSNiklas Cassel 	entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3130ce736788SJoao Pinto 	tx_q->cur_tx = entry;
31317ac6653aSJeff Kirsher 
31327ac6653aSJeff Kirsher 	if (netif_msg_pktdata(priv)) {
3133d0225e7dSAlexandre TORGUE 		void *tx_head;
3134d0225e7dSAlexandre TORGUE 
313538ddc59dSLABBE Corentin 		netdev_dbg(priv->dev,
313638ddc59dSLABBE Corentin 			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
3137ce736788SJoao Pinto 			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
31380e80bdc9SGiuseppe Cavallaro 			   entry, first, nfrags);
313983d7af64SGiuseppe CAVALLARO 
3140c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
3141ce736788SJoao Pinto 			tx_head = (void *)tx_q->dma_etx;
3142c24602efSGiuseppe CAVALLARO 		else
3143ce736788SJoao Pinto 			tx_head = (void *)tx_q->dma_tx;
3144d0225e7dSAlexandre TORGUE 
314542de047dSJose Abreu 		stmmac_display_ring(priv, tx_head, DMA_TX_SIZE, false);
3146c24602efSGiuseppe CAVALLARO 
314738ddc59dSLABBE Corentin 		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
31487ac6653aSJeff Kirsher 		print_pkt(skb->data, skb->len);
31497ac6653aSJeff Kirsher 	}
31500e80bdc9SGiuseppe Cavallaro 
3151ce736788SJoao Pinto 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3152b3e51069SLABBE Corentin 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
3153b3e51069SLABBE Corentin 			  __func__);
3154c22a3f48SJoao Pinto 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
31557ac6653aSJeff Kirsher 	}
31567ac6653aSJeff Kirsher 
31577ac6653aSJeff Kirsher 	dev->stats.tx_bytes += skb->len;
31587ac6653aSJeff Kirsher 
31590e80bdc9SGiuseppe Cavallaro 	/* According to the coalesce parameter the IC bit for the latest
31600e80bdc9SGiuseppe Cavallaro 	 * segment is reset and the timer re-started to clean the tx status.
31610e80bdc9SGiuseppe Cavallaro 	 * This approach takes care about the fragments: desc is the first
31620e80bdc9SGiuseppe Cavallaro 	 * element in case of no SG.
31630e80bdc9SGiuseppe Cavallaro 	 */
31648fce3331SJose Abreu 	tx_q->tx_count_frames += nfrags + 1;
3165d0bb82fdSRoland Hii 	if (likely(priv->tx_coal_frames > tx_q->tx_count_frames) &&
3166d0bb82fdSRoland Hii 	    !(priv->synopsys_id >= DWMAC_CORE_4_00 &&
3167d0bb82fdSRoland Hii 	    (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3168d0bb82fdSRoland Hii 	    priv->hwts_tx_en)) {
3169d0bb82fdSRoland Hii 		stmmac_tx_timer_arm(priv, queue);
3170d0bb82fdSRoland Hii 	} else {
3171d0bb82fdSRoland Hii 		tx_q->tx_count_frames = 0;
317242de047dSJose Abreu 		stmmac_set_tx_ic(priv, desc);
31730e80bdc9SGiuseppe Cavallaro 		priv->xstats.tx_set_ic_bit++;
31740e80bdc9SGiuseppe Cavallaro 	}
31750e80bdc9SGiuseppe Cavallaro 
31760e80bdc9SGiuseppe Cavallaro 	skb_tx_timestamp(skb);
31770e80bdc9SGiuseppe Cavallaro 
31780e80bdc9SGiuseppe Cavallaro 	/* Ready to fill the first descriptor and set the OWN bit w/o any
31790e80bdc9SGiuseppe Cavallaro 	 * problems because all the descriptors are actually ready to be
31800e80bdc9SGiuseppe Cavallaro 	 * passed to the DMA engine.
31810e80bdc9SGiuseppe Cavallaro 	 */
31820e80bdc9SGiuseppe Cavallaro 	if (likely(!is_jumbo)) {
31830e80bdc9SGiuseppe Cavallaro 		bool last_segment = (nfrags == 0);
31840e80bdc9SGiuseppe Cavallaro 
3185f748be53SAlexandre TORGUE 		des = dma_map_single(priv->device, skb->data,
31860e80bdc9SGiuseppe Cavallaro 				     nopaged_len, DMA_TO_DEVICE);
3187f748be53SAlexandre TORGUE 		if (dma_mapping_error(priv->device, des))
31880e80bdc9SGiuseppe Cavallaro 			goto dma_map_err;
31890e80bdc9SGiuseppe Cavallaro 
3190ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[first_entry].buf = des;
31916844171dSJose Abreu 
31926844171dSJose Abreu 		stmmac_set_desc_addr(priv, first, des);
3193f748be53SAlexandre TORGUE 
3194ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
3195ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
31960e80bdc9SGiuseppe Cavallaro 
3197891434b1SRayagond Kokatanur 		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3198891434b1SRayagond Kokatanur 			     priv->hwts_tx_en)) {
3199891434b1SRayagond Kokatanur 			/* declare that device is doing timestamping */
3200891434b1SRayagond Kokatanur 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
320142de047dSJose Abreu 			stmmac_enable_tx_timestamp(priv, first);
3202891434b1SRayagond Kokatanur 		}
3203891434b1SRayagond Kokatanur 
32040e80bdc9SGiuseppe Cavallaro 		/* Prepare the first descriptor setting the OWN bit too */
320542de047dSJose Abreu 		stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
320642de047dSJose Abreu 				csum_insertion, priv->mode, 1, last_segment,
320742de047dSJose Abreu 				skb->len);
320880acbed9SAaro Koskinen 	} else {
320980acbed9SAaro Koskinen 		stmmac_set_tx_owner(priv, first);
321080acbed9SAaro Koskinen 	}
32110e80bdc9SGiuseppe Cavallaro 
32120e80bdc9SGiuseppe Cavallaro 	/* The own bit must be the latest setting done when prepare the
32130e80bdc9SGiuseppe Cavallaro 	 * descriptor and then barrier is needed to make sure that
32140e80bdc9SGiuseppe Cavallaro 	 * all is coherent before granting the DMA engine.
32150e80bdc9SGiuseppe Cavallaro 	 */
321695eb930aSNiklas Cassel 	wmb();
32177ac6653aSJeff Kirsher 
3218c22a3f48SJoao Pinto 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3219f748be53SAlexandre TORGUE 
3220a4e887faSJose Abreu 	stmmac_enable_dma_transmission(priv, priv->ioaddr);
32218fce3331SJose Abreu 
32220431100bSJose Abreu 	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
3223f1565c60SJose Abreu 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
32247ac6653aSJeff Kirsher 
3225362b37beSGiuseppe CAVALLARO 	return NETDEV_TX_OK;
3226a9097a96SGiuseppe CAVALLARO 
3227362b37beSGiuseppe CAVALLARO dma_map_err:
322838ddc59dSLABBE Corentin 	netdev_err(priv->dev, "Tx DMA map failed\n");
3229362b37beSGiuseppe CAVALLARO 	dev_kfree_skb(skb);
3230362b37beSGiuseppe CAVALLARO 	priv->dev->stats.tx_dropped++;
32317ac6653aSJeff Kirsher 	return NETDEV_TX_OK;
32327ac6653aSJeff Kirsher }
32337ac6653aSJeff Kirsher 
3234b9381985SVince Bridgers static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
3235b9381985SVince Bridgers {
3236ab188e8fSElad Nachman 	struct vlan_ethhdr *veth;
3237ab188e8fSElad Nachman 	__be16 vlan_proto;
3238b9381985SVince Bridgers 	u16 vlanid;
3239b9381985SVince Bridgers 
3240ab188e8fSElad Nachman 	veth = (struct vlan_ethhdr *)skb->data;
3241ab188e8fSElad Nachman 	vlan_proto = veth->h_vlan_proto;
3242ab188e8fSElad Nachman 
3243ab188e8fSElad Nachman 	if ((vlan_proto == htons(ETH_P_8021Q) &&
3244ab188e8fSElad Nachman 	     dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
3245ab188e8fSElad Nachman 	    (vlan_proto == htons(ETH_P_8021AD) &&
3246ab188e8fSElad Nachman 	     dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
3247b9381985SVince Bridgers 		/* pop the vlan tag */
3248ab188e8fSElad Nachman 		vlanid = ntohs(veth->h_vlan_TCI);
3249ab188e8fSElad Nachman 		memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
3250b9381985SVince Bridgers 		skb_pull(skb, VLAN_HLEN);
3251ab188e8fSElad Nachman 		__vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
3252b9381985SVince Bridgers 	}
3253b9381985SVince Bridgers }
3254b9381985SVince Bridgers 
3255b9381985SVince Bridgers 
325654139cf3SJoao Pinto static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q)
3257120e87f9SGiuseppe Cavallaro {
325854139cf3SJoao Pinto 	if (rx_q->rx_zeroc_thresh < STMMAC_RX_THRESH)
3259120e87f9SGiuseppe Cavallaro 		return 0;
3260120e87f9SGiuseppe Cavallaro 
3261120e87f9SGiuseppe Cavallaro 	return 1;
3262120e87f9SGiuseppe Cavallaro }
3263120e87f9SGiuseppe Cavallaro 
326432ceabcaSGiuseppe CAVALLARO /**
3265732fdf0eSGiuseppe CAVALLARO  * stmmac_rx_refill - refill used skb preallocated buffers
326632ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
326754139cf3SJoao Pinto  * @queue: RX queue index
326832ceabcaSGiuseppe CAVALLARO  * Description : this is to reallocate the skb for the reception process
326932ceabcaSGiuseppe CAVALLARO  * that is based on zero-copy.
327032ceabcaSGiuseppe CAVALLARO  */
327154139cf3SJoao Pinto static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
32727ac6653aSJeff Kirsher {
327354139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
32743caa61c2SJose Abreu 	int len, dirty = stmmac_rx_dirty(priv, queue);
327554139cf3SJoao Pinto 	unsigned int entry = rx_q->dirty_rx;
327654139cf3SJoao Pinto 
32773caa61c2SJose Abreu 	len = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
32783caa61c2SJose Abreu 
3279e3ad57c9SGiuseppe Cavallaro 	while (dirty-- > 0) {
32802af6106aSJose Abreu 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
3281c24602efSGiuseppe CAVALLARO 		struct dma_desc *p;
3282d429b66eSJose Abreu 		bool use_rx_wd;
3283c24602efSGiuseppe CAVALLARO 
3284c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
328554139cf3SJoao Pinto 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
3286c24602efSGiuseppe CAVALLARO 		else
328754139cf3SJoao Pinto 			p = rx_q->dma_rx + entry;
3288c24602efSGiuseppe CAVALLARO 
32892af6106aSJose Abreu 		if (!buf->page) {
32902af6106aSJose Abreu 			buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
32912af6106aSJose Abreu 			if (!buf->page)
32927ac6653aSJeff Kirsher 				break;
3293120e87f9SGiuseppe Cavallaro 		}
32947ac6653aSJeff Kirsher 
32952af6106aSJose Abreu 		buf->addr = page_pool_get_dma_addr(buf->page);
32963caa61c2SJose Abreu 
32973caa61c2SJose Abreu 		/* Sync whole allocation to device. This will invalidate old
32983caa61c2SJose Abreu 		 * data.
32993caa61c2SJose Abreu 		 */
33003caa61c2SJose Abreu 		dma_sync_single_for_device(priv->device, buf->addr, len,
33013caa61c2SJose Abreu 					   DMA_FROM_DEVICE);
33023caa61c2SJose Abreu 
33032af6106aSJose Abreu 		stmmac_set_desc_addr(priv, p, buf->addr);
33042c520b1cSJose Abreu 		stmmac_refill_desc3(priv, rx_q, p);
3305286a8372SGiuseppe CAVALLARO 
3306d429b66eSJose Abreu 		rx_q->rx_count_frames++;
3307d429b66eSJose Abreu 		rx_q->rx_count_frames %= priv->rx_coal_frames;
3308d429b66eSJose Abreu 		use_rx_wd = priv->use_riwt && rx_q->rx_count_frames;
3309d429b66eSJose Abreu 
3310ad688cdbSPavel Machek 		dma_wmb();
33112af6106aSJose Abreu 		stmmac_set_rx_owner(priv, p, use_rx_wd);
3312e3ad57c9SGiuseppe Cavallaro 
3313e3ad57c9SGiuseppe Cavallaro 		entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
33147ac6653aSJeff Kirsher 	}
331554139cf3SJoao Pinto 	rx_q->dirty_rx = entry;
3316858a31ffSJose Abreu 	rx_q->rx_tail_addr = rx_q->dma_rx_phy +
3317858a31ffSJose Abreu 			    (rx_q->dirty_rx * sizeof(struct dma_desc));
33184523a561SBiao Huang 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
33197ac6653aSJeff Kirsher }
33207ac6653aSJeff Kirsher 
332132ceabcaSGiuseppe CAVALLARO /**
3322732fdf0eSGiuseppe CAVALLARO  * stmmac_rx - manage the receive process
332332ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
332454139cf3SJoao Pinto  * @limit: napi bugget
332554139cf3SJoao Pinto  * @queue: RX queue index.
332632ceabcaSGiuseppe CAVALLARO  * Description :  this the function called by the napi poll method.
332732ceabcaSGiuseppe CAVALLARO  * It gets all the frames inside the ring.
332832ceabcaSGiuseppe CAVALLARO  */
332954139cf3SJoao Pinto static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
33307ac6653aSJeff Kirsher {
333154139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
33328fce3331SJose Abreu 	struct stmmac_channel *ch = &priv->channel[queue];
333307b39753SAaro Koskinen 	unsigned int next_entry = rx_q->cur_rx;
333454139cf3SJoao Pinto 	int coe = priv->hw->rx_csum;
33357ac6653aSJeff Kirsher 	unsigned int count = 0;
33367ac6653aSJeff Kirsher 
333783d7af64SGiuseppe CAVALLARO 	if (netif_msg_rx_status(priv)) {
3338d0225e7dSAlexandre TORGUE 		void *rx_head;
3339d0225e7dSAlexandre TORGUE 
334038ddc59dSLABBE Corentin 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
3341c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
334254139cf3SJoao Pinto 			rx_head = (void *)rx_q->dma_erx;
3343c24602efSGiuseppe CAVALLARO 		else
334454139cf3SJoao Pinto 			rx_head = (void *)rx_q->dma_rx;
3345d0225e7dSAlexandre TORGUE 
334642de047dSJose Abreu 		stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true);
33477ac6653aSJeff Kirsher 	}
3348c24602efSGiuseppe CAVALLARO 	while (count < limit) {
33492af6106aSJose Abreu 		struct stmmac_rx_buffer *buf;
33502af6106aSJose Abreu 		struct dma_desc *np, *p;
335107b39753SAaro Koskinen 		int entry, status;
33527ac6653aSJeff Kirsher 
335307b39753SAaro Koskinen 		entry = next_entry;
33542af6106aSJose Abreu 		buf = &rx_q->buf_pool[entry];
335507b39753SAaro Koskinen 
3356c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
335754139cf3SJoao Pinto 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
3358c24602efSGiuseppe CAVALLARO 		else
335954139cf3SJoao Pinto 			p = rx_q->dma_rx + entry;
3360c24602efSGiuseppe CAVALLARO 
3361c1fa3212SFabrice Gasnier 		/* read the status of the incoming frame */
336242de047dSJose Abreu 		status = stmmac_rx_status(priv, &priv->dev->stats,
3363c1fa3212SFabrice Gasnier 				&priv->xstats, p);
3364c1fa3212SFabrice Gasnier 		/* check if managed by the DMA otherwise go ahead */
3365c1fa3212SFabrice Gasnier 		if (unlikely(status & dma_own))
33667ac6653aSJeff Kirsher 			break;
33677ac6653aSJeff Kirsher 
33687ac6653aSJeff Kirsher 		count++;
33697ac6653aSJeff Kirsher 
337054139cf3SJoao Pinto 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE);
337154139cf3SJoao Pinto 		next_entry = rx_q->cur_rx;
3372e3ad57c9SGiuseppe Cavallaro 
3373c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
337454139cf3SJoao Pinto 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
3375c24602efSGiuseppe CAVALLARO 		else
337654139cf3SJoao Pinto 			np = rx_q->dma_rx + next_entry;
3377ba1ffd74SGiuseppe CAVALLARO 
3378ba1ffd74SGiuseppe CAVALLARO 		prefetch(np);
33797ac6653aSJeff Kirsher 
338042de047dSJose Abreu 		if (priv->extend_desc)
338142de047dSJose Abreu 			stmmac_rx_extended_status(priv, &priv->dev->stats,
338242de047dSJose Abreu 					&priv->xstats, rx_q->dma_erx + entry);
3383891434b1SRayagond Kokatanur 		if (unlikely(status == discard_frame)) {
33842af6106aSJose Abreu 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
33857ac6653aSJeff Kirsher 			priv->dev->stats.rx_errors++;
33862af6106aSJose Abreu 			buf->page = NULL;
3387891434b1SRayagond Kokatanur 		} else {
33887ac6653aSJeff Kirsher 			struct sk_buff *skb;
33897ac6653aSJeff Kirsher 			int frame_len;
3390f748be53SAlexandre TORGUE 			unsigned int des;
3391f748be53SAlexandre TORGUE 
3392d2df9ea0SJose Abreu 			stmmac_get_desc_addr(priv, p, &des);
339342de047dSJose Abreu 			frame_len = stmmac_get_rx_frame_len(priv, p, coe);
3394ceb69499SGiuseppe CAVALLARO 
33958d45e42bSLABBE Corentin 			/*  If frame length is greater than skb buffer size
3396f748be53SAlexandre TORGUE 			 *  (preallocated during init) then the packet is
3397f748be53SAlexandre TORGUE 			 *  ignored
3398f748be53SAlexandre TORGUE 			 */
3399e527c4a7SGiuseppe CAVALLARO 			if (frame_len > priv->dma_buf_sz) {
3400972c9be7SAaro Koskinen 				if (net_ratelimit())
340138ddc59dSLABBE Corentin 					netdev_err(priv->dev,
340238ddc59dSLABBE Corentin 						   "len %d larger than size (%d)\n",
340338ddc59dSLABBE Corentin 						   frame_len, priv->dma_buf_sz);
3404e527c4a7SGiuseppe CAVALLARO 				priv->dev->stats.rx_length_errors++;
340507b39753SAaro Koskinen 				continue;
3406e527c4a7SGiuseppe CAVALLARO 			}
3407e527c4a7SGiuseppe CAVALLARO 
34087ac6653aSJeff Kirsher 			/* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
3409ceb69499SGiuseppe CAVALLARO 			 * Type frames (LLC/LLC-SNAP)
3410565020aaSJose Abreu 			 *
3411565020aaSJose Abreu 			 * llc_snap is never checked in GMAC >= 4, so this ACS
3412565020aaSJose Abreu 			 * feature is always disabled and packets need to be
3413565020aaSJose Abreu 			 * stripped manually.
3414ceb69499SGiuseppe CAVALLARO 			 */
3415565020aaSJose Abreu 			if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
3416565020aaSJose Abreu 			    unlikely(status != llc_snap))
34177ac6653aSJeff Kirsher 				frame_len -= ETH_FCS_LEN;
34187ac6653aSJeff Kirsher 
341983d7af64SGiuseppe CAVALLARO 			if (netif_msg_rx_status(priv)) {
342038ddc59dSLABBE Corentin 				netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n",
3421f748be53SAlexandre TORGUE 					   p, entry, des);
342238ddc59dSLABBE Corentin 				netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
342383d7af64SGiuseppe CAVALLARO 					   frame_len, status);
342483d7af64SGiuseppe CAVALLARO 			}
342522ad3838SGiuseppe Cavallaro 
34262af6106aSJose Abreu 			skb = netdev_alloc_skb_ip_align(priv->dev, frame_len);
342722ad3838SGiuseppe Cavallaro 			if (unlikely(!skb)) {
342822ad3838SGiuseppe Cavallaro 				priv->dev->stats.rx_dropped++;
342907b39753SAaro Koskinen 				continue;
343022ad3838SGiuseppe Cavallaro 			}
343122ad3838SGiuseppe Cavallaro 
34322af6106aSJose Abreu 			dma_sync_single_for_cpu(priv->device, buf->addr,
34332af6106aSJose Abreu 						frame_len, DMA_FROM_DEVICE);
34342af6106aSJose Abreu 			skb_copy_to_linear_data(skb, page_address(buf->page),
343522ad3838SGiuseppe Cavallaro 						frame_len);
343622ad3838SGiuseppe Cavallaro 			skb_put(skb, frame_len);
343722ad3838SGiuseppe Cavallaro 
34387ac6653aSJeff Kirsher 			if (netif_msg_pktdata(priv)) {
343938ddc59dSLABBE Corentin 				netdev_dbg(priv->dev, "frame received (%dbytes)",
344038ddc59dSLABBE Corentin 					   frame_len);
34417ac6653aSJeff Kirsher 				print_pkt(skb->data, frame_len);
34427ac6653aSJeff Kirsher 			}
344383d7af64SGiuseppe CAVALLARO 
3444ba1ffd74SGiuseppe CAVALLARO 			stmmac_get_rx_hwtstamp(priv, p, np, skb);
3445ba1ffd74SGiuseppe CAVALLARO 
3446b9381985SVince Bridgers 			stmmac_rx_vlan(priv->dev, skb);
3447b9381985SVince Bridgers 
34487ac6653aSJeff Kirsher 			skb->protocol = eth_type_trans(skb, priv->dev);
34497ac6653aSJeff Kirsher 
3450ceb69499SGiuseppe CAVALLARO 			if (unlikely(!coe))
34517ac6653aSJeff Kirsher 				skb_checksum_none_assert(skb);
345262a2ab93SGiuseppe CAVALLARO 			else
34537ac6653aSJeff Kirsher 				skb->ip_summed = CHECKSUM_UNNECESSARY;
345462a2ab93SGiuseppe CAVALLARO 
34554ccb4585SJose Abreu 			napi_gro_receive(&ch->rx_napi, skb);
34567ac6653aSJeff Kirsher 
34572af6106aSJose Abreu 			/* Data payload copied into SKB, page ready for recycle */
34582af6106aSJose Abreu 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
34592af6106aSJose Abreu 			buf->page = NULL;
34602af6106aSJose Abreu 
34617ac6653aSJeff Kirsher 			priv->dev->stats.rx_packets++;
34627ac6653aSJeff Kirsher 			priv->dev->stats.rx_bytes += frame_len;
34637ac6653aSJeff Kirsher 		}
34647ac6653aSJeff Kirsher 	}
34657ac6653aSJeff Kirsher 
346654139cf3SJoao Pinto 	stmmac_rx_refill(priv, queue);
34677ac6653aSJeff Kirsher 
34687ac6653aSJeff Kirsher 	priv->xstats.rx_pkt_n += count;
34697ac6653aSJeff Kirsher 
34707ac6653aSJeff Kirsher 	return count;
34717ac6653aSJeff Kirsher }
34727ac6653aSJeff Kirsher 
34734ccb4585SJose Abreu static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
34747ac6653aSJeff Kirsher {
34758fce3331SJose Abreu 	struct stmmac_channel *ch =
34764ccb4585SJose Abreu 		container_of(napi, struct stmmac_channel, rx_napi);
34778fce3331SJose Abreu 	struct stmmac_priv *priv = ch->priv_data;
34788fce3331SJose Abreu 	u32 chan = ch->index;
34794ccb4585SJose Abreu 	int work_done;
34807ac6653aSJeff Kirsher 
34819125cdd1SGiuseppe CAVALLARO 	priv->xstats.napi_poll++;
3482ce736788SJoao Pinto 
34834ccb4585SJose Abreu 	work_done = stmmac_rx(priv, budget, chan);
34844ccb4585SJose Abreu 	if (work_done < budget && napi_complete_done(napi, work_done))
34854ccb4585SJose Abreu 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan);
34864ccb4585SJose Abreu 	return work_done;
34874ccb4585SJose Abreu }
3488ce736788SJoao Pinto 
34894ccb4585SJose Abreu static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
34904ccb4585SJose Abreu {
34914ccb4585SJose Abreu 	struct stmmac_channel *ch =
34924ccb4585SJose Abreu 		container_of(napi, struct stmmac_channel, tx_napi);
34934ccb4585SJose Abreu 	struct stmmac_priv *priv = ch->priv_data;
34944ccb4585SJose Abreu 	struct stmmac_tx_queue *tx_q;
34954ccb4585SJose Abreu 	u32 chan = ch->index;
34964ccb4585SJose Abreu 	int work_done;
34974ccb4585SJose Abreu 
34984ccb4585SJose Abreu 	priv->xstats.napi_poll++;
34994ccb4585SJose Abreu 
35004ccb4585SJose Abreu 	work_done = stmmac_tx_clean(priv, DMA_TX_SIZE, chan);
3501fa0be0a4SJose Abreu 	work_done = min(work_done, budget);
35028fce3331SJose Abreu 
3503a66b5884SJose Abreu 	if (work_done < budget)
3504a66b5884SJose Abreu 		napi_complete_done(napi, work_done);
35054ccb4585SJose Abreu 
35064ccb4585SJose Abreu 	/* Force transmission restart */
35074ccb4585SJose Abreu 	tx_q = &priv->tx_queue[chan];
35084ccb4585SJose Abreu 	if (tx_q->cur_tx != tx_q->dirty_tx) {
35094ccb4585SJose Abreu 		stmmac_enable_dma_transmission(priv, priv->ioaddr);
35104ccb4585SJose Abreu 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr,
35114ccb4585SJose Abreu 				       chan);
3512fa0be0a4SJose Abreu 	}
35138fce3331SJose Abreu 
35147ac6653aSJeff Kirsher 	return work_done;
35157ac6653aSJeff Kirsher }
35167ac6653aSJeff Kirsher 
35177ac6653aSJeff Kirsher /**
35187ac6653aSJeff Kirsher  *  stmmac_tx_timeout
35197ac6653aSJeff Kirsher  *  @dev : Pointer to net device structure
35207ac6653aSJeff Kirsher  *  Description: this function is called when a packet transmission fails to
35217284a3f1SGiuseppe CAVALLARO  *   complete within a reasonable time. The driver will mark the error in the
35227ac6653aSJeff Kirsher  *   netdev structure and arrange for the device to be reset to a sane state
35237ac6653aSJeff Kirsher  *   in order to transmit a new packet.
35247ac6653aSJeff Kirsher  */
35257ac6653aSJeff Kirsher static void stmmac_tx_timeout(struct net_device *dev)
35267ac6653aSJeff Kirsher {
35277ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
35287ac6653aSJeff Kirsher 
352934877a15SJose Abreu 	stmmac_global_err(priv);
35307ac6653aSJeff Kirsher }
35317ac6653aSJeff Kirsher 
35327ac6653aSJeff Kirsher /**
353301789349SJiri Pirko  *  stmmac_set_rx_mode - entry point for multicast addressing
35347ac6653aSJeff Kirsher  *  @dev : pointer to the device structure
35357ac6653aSJeff Kirsher  *  Description:
35367ac6653aSJeff Kirsher  *  This function is a driver entry point which gets called by the kernel
35377ac6653aSJeff Kirsher  *  whenever multicast addresses must be enabled/disabled.
35387ac6653aSJeff Kirsher  *  Return value:
35397ac6653aSJeff Kirsher  *  void.
35407ac6653aSJeff Kirsher  */
354101789349SJiri Pirko static void stmmac_set_rx_mode(struct net_device *dev)
35427ac6653aSJeff Kirsher {
35437ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
35447ac6653aSJeff Kirsher 
3545c10d4c82SJose Abreu 	stmmac_set_filter(priv, priv->hw, dev);
35467ac6653aSJeff Kirsher }
35477ac6653aSJeff Kirsher 
35487ac6653aSJeff Kirsher /**
35497ac6653aSJeff Kirsher  *  stmmac_change_mtu - entry point to change MTU size for the device.
35507ac6653aSJeff Kirsher  *  @dev : device pointer.
35517ac6653aSJeff Kirsher  *  @new_mtu : the new MTU size for the device.
35527ac6653aSJeff Kirsher  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
35537ac6653aSJeff Kirsher  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
35547ac6653aSJeff Kirsher  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
35557ac6653aSJeff Kirsher  *  Return value:
35567ac6653aSJeff Kirsher  *  0 on success and an appropriate (-)ve integer as defined in errno.h
35577ac6653aSJeff Kirsher  *  file on failure.
35587ac6653aSJeff Kirsher  */
35597ac6653aSJeff Kirsher static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
35607ac6653aSJeff Kirsher {
356138ddc59dSLABBE Corentin 	struct stmmac_priv *priv = netdev_priv(dev);
356238ddc59dSLABBE Corentin 
35637ac6653aSJeff Kirsher 	if (netif_running(dev)) {
356438ddc59dSLABBE Corentin 		netdev_err(priv->dev, "must be stopped to change its MTU\n");
35657ac6653aSJeff Kirsher 		return -EBUSY;
35667ac6653aSJeff Kirsher 	}
35677ac6653aSJeff Kirsher 
35687ac6653aSJeff Kirsher 	dev->mtu = new_mtu;
3569f748be53SAlexandre TORGUE 
35707ac6653aSJeff Kirsher 	netdev_update_features(dev);
35717ac6653aSJeff Kirsher 
35727ac6653aSJeff Kirsher 	return 0;
35737ac6653aSJeff Kirsher }
35747ac6653aSJeff Kirsher 
3575c8f44affSMichał Mirosław static netdev_features_t stmmac_fix_features(struct net_device *dev,
3576c8f44affSMichał Mirosław 					     netdev_features_t features)
35777ac6653aSJeff Kirsher {
35787ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
35797ac6653aSJeff Kirsher 
358038912bdbSDeepak SIKRI 	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
35817ac6653aSJeff Kirsher 		features &= ~NETIF_F_RXCSUM;
3582d2afb5bdSGiuseppe CAVALLARO 
35837ac6653aSJeff Kirsher 	if (!priv->plat->tx_coe)
3584a188222bSTom Herbert 		features &= ~NETIF_F_CSUM_MASK;
35857ac6653aSJeff Kirsher 
35867ac6653aSJeff Kirsher 	/* Some GMAC devices have a bugged Jumbo frame support that
35877ac6653aSJeff Kirsher 	 * needs to have the Tx COE disabled for oversized frames
35887ac6653aSJeff Kirsher 	 * (due to limited buffer sizes). In this case we disable
3589ceb69499SGiuseppe CAVALLARO 	 * the TX csum insertion in the TDES and not use SF.
3590ceb69499SGiuseppe CAVALLARO 	 */
35917ac6653aSJeff Kirsher 	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
3592a188222bSTom Herbert 		features &= ~NETIF_F_CSUM_MASK;
35937ac6653aSJeff Kirsher 
3594f748be53SAlexandre TORGUE 	/* Disable tso if asked by ethtool */
3595f748be53SAlexandre TORGUE 	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
3596f748be53SAlexandre TORGUE 		if (features & NETIF_F_TSO)
3597f748be53SAlexandre TORGUE 			priv->tso = true;
3598f748be53SAlexandre TORGUE 		else
3599f748be53SAlexandre TORGUE 			priv->tso = false;
3600f748be53SAlexandre TORGUE 	}
3601f748be53SAlexandre TORGUE 
36027ac6653aSJeff Kirsher 	return features;
36037ac6653aSJeff Kirsher }
36047ac6653aSJeff Kirsher 
3605d2afb5bdSGiuseppe CAVALLARO static int stmmac_set_features(struct net_device *netdev,
3606d2afb5bdSGiuseppe CAVALLARO 			       netdev_features_t features)
3607d2afb5bdSGiuseppe CAVALLARO {
3608d2afb5bdSGiuseppe CAVALLARO 	struct stmmac_priv *priv = netdev_priv(netdev);
3609d2afb5bdSGiuseppe CAVALLARO 
3610d2afb5bdSGiuseppe CAVALLARO 	/* Keep the COE Type in case of csum is supporting */
3611d2afb5bdSGiuseppe CAVALLARO 	if (features & NETIF_F_RXCSUM)
3612d2afb5bdSGiuseppe CAVALLARO 		priv->hw->rx_csum = priv->plat->rx_coe;
3613d2afb5bdSGiuseppe CAVALLARO 	else
3614d2afb5bdSGiuseppe CAVALLARO 		priv->hw->rx_csum = 0;
3615d2afb5bdSGiuseppe CAVALLARO 	/* No check needed because rx_coe has been set before and it will be
3616d2afb5bdSGiuseppe CAVALLARO 	 * fixed in case of issue.
3617d2afb5bdSGiuseppe CAVALLARO 	 */
3618c10d4c82SJose Abreu 	stmmac_rx_ipc(priv, priv->hw);
3619d2afb5bdSGiuseppe CAVALLARO 
3620d2afb5bdSGiuseppe CAVALLARO 	return 0;
3621d2afb5bdSGiuseppe CAVALLARO }
3622d2afb5bdSGiuseppe CAVALLARO 
362332ceabcaSGiuseppe CAVALLARO /**
362432ceabcaSGiuseppe CAVALLARO  *  stmmac_interrupt - main ISR
362532ceabcaSGiuseppe CAVALLARO  *  @irq: interrupt number.
362632ceabcaSGiuseppe CAVALLARO  *  @dev_id: to pass the net device pointer.
362732ceabcaSGiuseppe CAVALLARO  *  Description: this is the main driver interrupt service routine.
3628732fdf0eSGiuseppe CAVALLARO  *  It can call:
3629732fdf0eSGiuseppe CAVALLARO  *  o DMA service routine (to manage incoming frame reception and transmission
3630732fdf0eSGiuseppe CAVALLARO  *    status)
3631732fdf0eSGiuseppe CAVALLARO  *  o Core interrupts to manage: remote wake-up, management counter, LPI
363232ceabcaSGiuseppe CAVALLARO  *    interrupts.
363332ceabcaSGiuseppe CAVALLARO  */
36347ac6653aSJeff Kirsher static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
36357ac6653aSJeff Kirsher {
36367ac6653aSJeff Kirsher 	struct net_device *dev = (struct net_device *)dev_id;
36377ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
36387bac4e1eSJoao Pinto 	u32 rx_cnt = priv->plat->rx_queues_to_use;
36397bac4e1eSJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
36407bac4e1eSJoao Pinto 	u32 queues_count;
36417bac4e1eSJoao Pinto 	u32 queue;
36427d9e6c5aSJose Abreu 	bool xmac;
36437bac4e1eSJoao Pinto 
36447d9e6c5aSJose Abreu 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
36457bac4e1eSJoao Pinto 	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
36467ac6653aSJeff Kirsher 
364789f7f2cfSSrinivas Kandagatla 	if (priv->irq_wake)
364889f7f2cfSSrinivas Kandagatla 		pm_wakeup_event(priv->device, 0);
364989f7f2cfSSrinivas Kandagatla 
36507ac6653aSJeff Kirsher 	if (unlikely(!dev)) {
365138ddc59dSLABBE Corentin 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
36527ac6653aSJeff Kirsher 		return IRQ_NONE;
36537ac6653aSJeff Kirsher 	}
36547ac6653aSJeff Kirsher 
365534877a15SJose Abreu 	/* Check if adapter is up */
365634877a15SJose Abreu 	if (test_bit(STMMAC_DOWN, &priv->state))
365734877a15SJose Abreu 		return IRQ_HANDLED;
36588bf993a5SJose Abreu 	/* Check if a fatal error happened */
36598bf993a5SJose Abreu 	if (stmmac_safety_feat_interrupt(priv))
36608bf993a5SJose Abreu 		return IRQ_HANDLED;
366134877a15SJose Abreu 
36627ac6653aSJeff Kirsher 	/* To handle GMAC own interrupts */
36637d9e6c5aSJose Abreu 	if ((priv->plat->has_gmac) || xmac) {
3664c10d4c82SJose Abreu 		int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
366561fac60aSJose Abreu 		int mtl_status;
36668f71a88dSJoao Pinto 
3667d765955dSGiuseppe CAVALLARO 		if (unlikely(status)) {
3668d765955dSGiuseppe CAVALLARO 			/* For LPI we need to save the tx status */
36690982a0f6SGiuseppe CAVALLARO 			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
3670d765955dSGiuseppe CAVALLARO 				priv->tx_path_in_lpi_mode = true;
36710982a0f6SGiuseppe CAVALLARO 			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
3672d765955dSGiuseppe CAVALLARO 				priv->tx_path_in_lpi_mode = false;
36737bac4e1eSJoao Pinto 		}
36747bac4e1eSJoao Pinto 
36757bac4e1eSJoao Pinto 		for (queue = 0; queue < queues_count; queue++) {
367661fac60aSJose Abreu 			struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
367754139cf3SJoao Pinto 
367861fac60aSJose Abreu 			mtl_status = stmmac_host_mtl_irq_status(priv, priv->hw,
367961fac60aSJose Abreu 								queue);
368061fac60aSJose Abreu 			if (mtl_status != -EINVAL)
368161fac60aSJose Abreu 				status |= mtl_status;
36827bac4e1eSJoao Pinto 
3683a4e887faSJose Abreu 			if (status & CORE_IRQ_MTL_RX_OVERFLOW)
368461fac60aSJose Abreu 				stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
368554139cf3SJoao Pinto 						       rx_q->rx_tail_addr,
36867bac4e1eSJoao Pinto 						       queue);
36877bac4e1eSJoao Pinto 		}
368870523e63SGiuseppe CAVALLARO 
368970523e63SGiuseppe CAVALLARO 		/* PCS link status */
36903fe5cadbSGiuseppe CAVALLARO 		if (priv->hw->pcs) {
369170523e63SGiuseppe CAVALLARO 			if (priv->xstats.pcs_link)
369270523e63SGiuseppe CAVALLARO 				netif_carrier_on(dev);
369370523e63SGiuseppe CAVALLARO 			else
369470523e63SGiuseppe CAVALLARO 				netif_carrier_off(dev);
369570523e63SGiuseppe CAVALLARO 		}
3696d765955dSGiuseppe CAVALLARO 	}
3697d765955dSGiuseppe CAVALLARO 
3698d765955dSGiuseppe CAVALLARO 	/* To handle DMA interrupts */
36997ac6653aSJeff Kirsher 	stmmac_dma_interrupt(priv);
37007ac6653aSJeff Kirsher 
37017ac6653aSJeff Kirsher 	return IRQ_HANDLED;
37027ac6653aSJeff Kirsher }
37037ac6653aSJeff Kirsher 
37047ac6653aSJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER
37057ac6653aSJeff Kirsher /* Polling receive - used by NETCONSOLE and other diagnostic tools
3706ceb69499SGiuseppe CAVALLARO  * to allow network I/O with interrupts disabled.
3707ceb69499SGiuseppe CAVALLARO  */
37087ac6653aSJeff Kirsher static void stmmac_poll_controller(struct net_device *dev)
37097ac6653aSJeff Kirsher {
37107ac6653aSJeff Kirsher 	disable_irq(dev->irq);
37117ac6653aSJeff Kirsher 	stmmac_interrupt(dev->irq, dev);
37127ac6653aSJeff Kirsher 	enable_irq(dev->irq);
37137ac6653aSJeff Kirsher }
37147ac6653aSJeff Kirsher #endif
37157ac6653aSJeff Kirsher 
37167ac6653aSJeff Kirsher /**
37177ac6653aSJeff Kirsher  *  stmmac_ioctl - Entry point for the Ioctl
37187ac6653aSJeff Kirsher  *  @dev: Device pointer.
37197ac6653aSJeff Kirsher  *  @rq: An IOCTL specefic structure, that can contain a pointer to
37207ac6653aSJeff Kirsher  *  a proprietary structure used to pass information to the driver.
37217ac6653aSJeff Kirsher  *  @cmd: IOCTL command
37227ac6653aSJeff Kirsher  *  Description:
372332ceabcaSGiuseppe CAVALLARO  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
37247ac6653aSJeff Kirsher  */
37257ac6653aSJeff Kirsher static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
37267ac6653aSJeff Kirsher {
372774371272SJose Abreu 	struct stmmac_priv *priv = netdev_priv (dev);
3728891434b1SRayagond Kokatanur 	int ret = -EOPNOTSUPP;
37297ac6653aSJeff Kirsher 
37307ac6653aSJeff Kirsher 	if (!netif_running(dev))
37317ac6653aSJeff Kirsher 		return -EINVAL;
37327ac6653aSJeff Kirsher 
3733891434b1SRayagond Kokatanur 	switch (cmd) {
3734891434b1SRayagond Kokatanur 	case SIOCGMIIPHY:
3735891434b1SRayagond Kokatanur 	case SIOCGMIIREG:
3736891434b1SRayagond Kokatanur 	case SIOCSMIIREG:
373774371272SJose Abreu 		ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
3738891434b1SRayagond Kokatanur 		break;
3739891434b1SRayagond Kokatanur 	case SIOCSHWTSTAMP:
3740d6228b7cSArtem Panfilov 		ret = stmmac_hwtstamp_set(dev, rq);
3741d6228b7cSArtem Panfilov 		break;
3742d6228b7cSArtem Panfilov 	case SIOCGHWTSTAMP:
3743d6228b7cSArtem Panfilov 		ret = stmmac_hwtstamp_get(dev, rq);
3744891434b1SRayagond Kokatanur 		break;
3745891434b1SRayagond Kokatanur 	default:
3746891434b1SRayagond Kokatanur 		break;
3747891434b1SRayagond Kokatanur 	}
37487ac6653aSJeff Kirsher 
37497ac6653aSJeff Kirsher 	return ret;
37507ac6653aSJeff Kirsher }
37517ac6653aSJeff Kirsher 
37524dbbe8ddSJose Abreu static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
37534dbbe8ddSJose Abreu 				    void *cb_priv)
37544dbbe8ddSJose Abreu {
37554dbbe8ddSJose Abreu 	struct stmmac_priv *priv = cb_priv;
37564dbbe8ddSJose Abreu 	int ret = -EOPNOTSUPP;
37574dbbe8ddSJose Abreu 
37584dbbe8ddSJose Abreu 	stmmac_disable_all_queues(priv);
37594dbbe8ddSJose Abreu 
37604dbbe8ddSJose Abreu 	switch (type) {
37614dbbe8ddSJose Abreu 	case TC_SETUP_CLSU32:
37624dbbe8ddSJose Abreu 		if (tc_cls_can_offload_and_chain0(priv->dev, type_data))
37634dbbe8ddSJose Abreu 			ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
37644dbbe8ddSJose Abreu 		break;
37654dbbe8ddSJose Abreu 	default:
37664dbbe8ddSJose Abreu 		break;
37674dbbe8ddSJose Abreu 	}
37684dbbe8ddSJose Abreu 
37694dbbe8ddSJose Abreu 	stmmac_enable_all_queues(priv);
37704dbbe8ddSJose Abreu 	return ret;
37714dbbe8ddSJose Abreu }
37724dbbe8ddSJose Abreu 
3773955bcb6eSPablo Neira Ayuso static LIST_HEAD(stmmac_block_cb_list);
3774955bcb6eSPablo Neira Ayuso 
37754dbbe8ddSJose Abreu static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
37764dbbe8ddSJose Abreu 			   void *type_data)
37774dbbe8ddSJose Abreu {
37784dbbe8ddSJose Abreu 	struct stmmac_priv *priv = netdev_priv(ndev);
37794dbbe8ddSJose Abreu 
37804dbbe8ddSJose Abreu 	switch (type) {
37814dbbe8ddSJose Abreu 	case TC_SETUP_BLOCK:
3782955bcb6eSPablo Neira Ayuso 		return flow_block_cb_setup_simple(type_data,
3783955bcb6eSPablo Neira Ayuso 						  &stmmac_block_cb_list,
37844e95bc26SPablo Neira Ayuso 						  stmmac_setup_tc_block_cb,
37854e95bc26SPablo Neira Ayuso 						  priv, priv, true);
37861f705bc6SJose Abreu 	case TC_SETUP_QDISC_CBS:
37871f705bc6SJose Abreu 		return stmmac_tc_setup_cbs(priv, priv, type_data);
37884dbbe8ddSJose Abreu 	default:
37894dbbe8ddSJose Abreu 		return -EOPNOTSUPP;
37904dbbe8ddSJose Abreu 	}
37914dbbe8ddSJose Abreu }
37924dbbe8ddSJose Abreu 
37934993e5b3SJose Abreu static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
37944993e5b3SJose Abreu 			       struct net_device *sb_dev)
37954993e5b3SJose Abreu {
37964993e5b3SJose Abreu 	if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
37974993e5b3SJose Abreu 		/*
37984993e5b3SJose Abreu 		 * There is no way to determine the number of TSO
37994993e5b3SJose Abreu 		 * capable Queues. Let's use always the Queue 0
38004993e5b3SJose Abreu 		 * because if TSO is supported then at least this
38014993e5b3SJose Abreu 		 * one will be capable.
38024993e5b3SJose Abreu 		 */
38034993e5b3SJose Abreu 		return 0;
38044993e5b3SJose Abreu 	}
38054993e5b3SJose Abreu 
38064993e5b3SJose Abreu 	return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
38074993e5b3SJose Abreu }
38084993e5b3SJose Abreu 
3809a830405eSBhadram Varka static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
3810a830405eSBhadram Varka {
3811a830405eSBhadram Varka 	struct stmmac_priv *priv = netdev_priv(ndev);
3812a830405eSBhadram Varka 	int ret = 0;
3813a830405eSBhadram Varka 
3814a830405eSBhadram Varka 	ret = eth_mac_addr(ndev, addr);
3815a830405eSBhadram Varka 	if (ret)
3816a830405eSBhadram Varka 		return ret;
3817a830405eSBhadram Varka 
3818c10d4c82SJose Abreu 	stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
3819a830405eSBhadram Varka 
3820a830405eSBhadram Varka 	return ret;
3821a830405eSBhadram Varka }
3822a830405eSBhadram Varka 
382350fb4f74SGiuseppe CAVALLARO #ifdef CONFIG_DEBUG_FS
38247ac29055SGiuseppe CAVALLARO static struct dentry *stmmac_fs_dir;
38257ac29055SGiuseppe CAVALLARO 
3826c24602efSGiuseppe CAVALLARO static void sysfs_display_ring(void *head, int size, int extend_desc,
3827c24602efSGiuseppe CAVALLARO 			       struct seq_file *seq)
38287ac29055SGiuseppe CAVALLARO {
38297ac29055SGiuseppe CAVALLARO 	int i;
3830c24602efSGiuseppe CAVALLARO 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
3831c24602efSGiuseppe CAVALLARO 	struct dma_desc *p = (struct dma_desc *)head;
38327ac29055SGiuseppe CAVALLARO 
3833c24602efSGiuseppe CAVALLARO 	for (i = 0; i < size; i++) {
3834c24602efSGiuseppe CAVALLARO 		if (extend_desc) {
3835c24602efSGiuseppe CAVALLARO 			seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3836c24602efSGiuseppe CAVALLARO 				   i, (unsigned int)virt_to_phys(ep),
3837f8be0d78SMichael Weiser 				   le32_to_cpu(ep->basic.des0),
3838f8be0d78SMichael Weiser 				   le32_to_cpu(ep->basic.des1),
3839f8be0d78SMichael Weiser 				   le32_to_cpu(ep->basic.des2),
3840f8be0d78SMichael Weiser 				   le32_to_cpu(ep->basic.des3));
3841c24602efSGiuseppe CAVALLARO 			ep++;
3842c24602efSGiuseppe CAVALLARO 		} else {
3843c24602efSGiuseppe CAVALLARO 			seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
384466c25f6eSNiklas Cassel 				   i, (unsigned int)virt_to_phys(p),
3845f8be0d78SMichael Weiser 				   le32_to_cpu(p->des0), le32_to_cpu(p->des1),
3846f8be0d78SMichael Weiser 				   le32_to_cpu(p->des2), le32_to_cpu(p->des3));
3847c24602efSGiuseppe CAVALLARO 			p++;
3848c24602efSGiuseppe CAVALLARO 		}
38497ac29055SGiuseppe CAVALLARO 		seq_printf(seq, "\n");
38507ac29055SGiuseppe CAVALLARO 	}
3851c24602efSGiuseppe CAVALLARO }
38527ac29055SGiuseppe CAVALLARO 
3853fb0d9c63SYangtao Li static int stmmac_rings_status_show(struct seq_file *seq, void *v)
3854c24602efSGiuseppe CAVALLARO {
3855c24602efSGiuseppe CAVALLARO 	struct net_device *dev = seq->private;
3856c24602efSGiuseppe CAVALLARO 	struct stmmac_priv *priv = netdev_priv(dev);
385754139cf3SJoao Pinto 	u32 rx_count = priv->plat->rx_queues_to_use;
3858ce736788SJoao Pinto 	u32 tx_count = priv->plat->tx_queues_to_use;
385954139cf3SJoao Pinto 	u32 queue;
386054139cf3SJoao Pinto 
38615f2b8b62SThierry Reding 	if ((dev->flags & IFF_UP) == 0)
38625f2b8b62SThierry Reding 		return 0;
38635f2b8b62SThierry Reding 
386454139cf3SJoao Pinto 	for (queue = 0; queue < rx_count; queue++) {
386554139cf3SJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
386654139cf3SJoao Pinto 
386754139cf3SJoao Pinto 		seq_printf(seq, "RX Queue %d:\n", queue);
38687ac29055SGiuseppe CAVALLARO 
3869c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc) {
387054139cf3SJoao Pinto 			seq_printf(seq, "Extended descriptor ring:\n");
387154139cf3SJoao Pinto 			sysfs_display_ring((void *)rx_q->dma_erx,
387254139cf3SJoao Pinto 					   DMA_RX_SIZE, 1, seq);
387354139cf3SJoao Pinto 		} else {
387454139cf3SJoao Pinto 			seq_printf(seq, "Descriptor ring:\n");
387554139cf3SJoao Pinto 			sysfs_display_ring((void *)rx_q->dma_rx,
387654139cf3SJoao Pinto 					   DMA_RX_SIZE, 0, seq);
387754139cf3SJoao Pinto 		}
387854139cf3SJoao Pinto 	}
387954139cf3SJoao Pinto 
3880ce736788SJoao Pinto 	for (queue = 0; queue < tx_count; queue++) {
3881ce736788SJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3882ce736788SJoao Pinto 
3883ce736788SJoao Pinto 		seq_printf(seq, "TX Queue %d:\n", queue);
3884ce736788SJoao Pinto 
388554139cf3SJoao Pinto 		if (priv->extend_desc) {
3886ce736788SJoao Pinto 			seq_printf(seq, "Extended descriptor ring:\n");
3887ce736788SJoao Pinto 			sysfs_display_ring((void *)tx_q->dma_etx,
3888ce736788SJoao Pinto 					   DMA_TX_SIZE, 1, seq);
3889c24602efSGiuseppe CAVALLARO 		} else {
3890ce736788SJoao Pinto 			seq_printf(seq, "Descriptor ring:\n");
3891ce736788SJoao Pinto 			sysfs_display_ring((void *)tx_q->dma_tx,
3892ce736788SJoao Pinto 					   DMA_TX_SIZE, 0, seq);
3893ce736788SJoao Pinto 		}
38947ac29055SGiuseppe CAVALLARO 	}
38957ac29055SGiuseppe CAVALLARO 
38967ac29055SGiuseppe CAVALLARO 	return 0;
38977ac29055SGiuseppe CAVALLARO }
3898fb0d9c63SYangtao Li DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
38997ac29055SGiuseppe CAVALLARO 
3900fb0d9c63SYangtao Li static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
3901e7434821SGiuseppe CAVALLARO {
3902e7434821SGiuseppe CAVALLARO 	struct net_device *dev = seq->private;
3903e7434821SGiuseppe CAVALLARO 	struct stmmac_priv *priv = netdev_priv(dev);
3904e7434821SGiuseppe CAVALLARO 
390519e30c14SGiuseppe CAVALLARO 	if (!priv->hw_cap_support) {
3906e7434821SGiuseppe CAVALLARO 		seq_printf(seq, "DMA HW features not supported\n");
3907e7434821SGiuseppe CAVALLARO 		return 0;
3908e7434821SGiuseppe CAVALLARO 	}
3909e7434821SGiuseppe CAVALLARO 
3910e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "==============================\n");
3911e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tDMA HW features\n");
3912e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "==============================\n");
3913e7434821SGiuseppe CAVALLARO 
391422d3efe5SPavel Machek 	seq_printf(seq, "\t10/100 Mbps: %s\n",
3915e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
391622d3efe5SPavel Machek 	seq_printf(seq, "\t1000 Mbps: %s\n",
3917e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
391822d3efe5SPavel Machek 	seq_printf(seq, "\tHalf duplex: %s\n",
3919e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.half_duplex) ? "Y" : "N");
3920e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tHash Filter: %s\n",
3921e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.hash_filter) ? "Y" : "N");
3922e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tMultiple MAC address registers: %s\n",
3923e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.multi_addr) ? "Y" : "N");
39248d45e42bSLABBE Corentin 	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
3925e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.pcs) ? "Y" : "N");
3926e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
3927e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
3928e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tPMT Remote wake up: %s\n",
3929e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
3930e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tPMT Magic Frame: %s\n",
3931e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
3932e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tRMON module: %s\n",
3933e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.rmon) ? "Y" : "N");
3934e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
3935e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.time_stamp) ? "Y" : "N");
3936e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
3937e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
393822d3efe5SPavel Machek 	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
3939e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.eee) ? "Y" : "N");
3940e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
3941e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
3942e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.tx_coe) ? "Y" : "N");
3943f748be53SAlexandre TORGUE 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3944f748be53SAlexandre TORGUE 		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
3945f748be53SAlexandre TORGUE 			   (priv->dma_cap.rx_coe) ? "Y" : "N");
3946f748be53SAlexandre TORGUE 	} else {
3947e7434821SGiuseppe CAVALLARO 		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
3948e7434821SGiuseppe CAVALLARO 			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
3949e7434821SGiuseppe CAVALLARO 		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
3950e7434821SGiuseppe CAVALLARO 			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
3951f748be53SAlexandre TORGUE 	}
3952e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
3953e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
3954e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
3955e7434821SGiuseppe CAVALLARO 		   priv->dma_cap.number_rx_channel);
3956e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
3957e7434821SGiuseppe CAVALLARO 		   priv->dma_cap.number_tx_channel);
3958e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tEnhanced descriptors: %s\n",
3959e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.enh_desc) ? "Y" : "N");
3960e7434821SGiuseppe CAVALLARO 
3961e7434821SGiuseppe CAVALLARO 	return 0;
3962e7434821SGiuseppe CAVALLARO }
3963fb0d9c63SYangtao Li DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
3964e7434821SGiuseppe CAVALLARO 
39657ac29055SGiuseppe CAVALLARO static int stmmac_init_fs(struct net_device *dev)
39667ac29055SGiuseppe CAVALLARO {
3967466c5ac8SMathieu Olivari 	struct stmmac_priv *priv = netdev_priv(dev);
39687ac29055SGiuseppe CAVALLARO 
3969466c5ac8SMathieu Olivari 	/* Create per netdev entries */
3970466c5ac8SMathieu Olivari 	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
3971466c5ac8SMathieu Olivari 
3972466c5ac8SMathieu Olivari 	if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) {
397338ddc59dSLABBE Corentin 		netdev_err(priv->dev, "ERROR failed to create debugfs directory\n");
39747ac29055SGiuseppe CAVALLARO 
39757ac29055SGiuseppe CAVALLARO 		return -ENOMEM;
39767ac29055SGiuseppe CAVALLARO 	}
39777ac29055SGiuseppe CAVALLARO 
39787ac29055SGiuseppe CAVALLARO 	/* Entry to report DMA RX/TX rings */
3979466c5ac8SMathieu Olivari 	priv->dbgfs_rings_status =
3980d3757ba4SJoe Perches 		debugfs_create_file("descriptors_status", 0444,
3981466c5ac8SMathieu Olivari 				    priv->dbgfs_dir, dev,
39827ac29055SGiuseppe CAVALLARO 				    &stmmac_rings_status_fops);
39837ac29055SGiuseppe CAVALLARO 
3984466c5ac8SMathieu Olivari 	if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) {
398538ddc59dSLABBE Corentin 		netdev_err(priv->dev, "ERROR creating stmmac ring debugfs file\n");
3986466c5ac8SMathieu Olivari 		debugfs_remove_recursive(priv->dbgfs_dir);
39877ac29055SGiuseppe CAVALLARO 
39887ac29055SGiuseppe CAVALLARO 		return -ENOMEM;
39897ac29055SGiuseppe CAVALLARO 	}
39907ac29055SGiuseppe CAVALLARO 
3991e7434821SGiuseppe CAVALLARO 	/* Entry to report the DMA HW features */
3992d3757ba4SJoe Perches 	priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", 0444,
3993466c5ac8SMathieu Olivari 						  priv->dbgfs_dir,
3994e7434821SGiuseppe CAVALLARO 						  dev, &stmmac_dma_cap_fops);
3995e7434821SGiuseppe CAVALLARO 
3996466c5ac8SMathieu Olivari 	if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
399738ddc59dSLABBE Corentin 		netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n");
3998466c5ac8SMathieu Olivari 		debugfs_remove_recursive(priv->dbgfs_dir);
3999e7434821SGiuseppe CAVALLARO 
4000e7434821SGiuseppe CAVALLARO 		return -ENOMEM;
4001e7434821SGiuseppe CAVALLARO 	}
4002e7434821SGiuseppe CAVALLARO 
40037ac29055SGiuseppe CAVALLARO 	return 0;
40047ac29055SGiuseppe CAVALLARO }
40057ac29055SGiuseppe CAVALLARO 
4006466c5ac8SMathieu Olivari static void stmmac_exit_fs(struct net_device *dev)
40077ac29055SGiuseppe CAVALLARO {
4008466c5ac8SMathieu Olivari 	struct stmmac_priv *priv = netdev_priv(dev);
4009466c5ac8SMathieu Olivari 
4010466c5ac8SMathieu Olivari 	debugfs_remove_recursive(priv->dbgfs_dir);
40117ac29055SGiuseppe CAVALLARO }
401250fb4f74SGiuseppe CAVALLARO #endif /* CONFIG_DEBUG_FS */
40137ac29055SGiuseppe CAVALLARO 
40147ac6653aSJeff Kirsher static const struct net_device_ops stmmac_netdev_ops = {
40157ac6653aSJeff Kirsher 	.ndo_open = stmmac_open,
40167ac6653aSJeff Kirsher 	.ndo_start_xmit = stmmac_xmit,
40177ac6653aSJeff Kirsher 	.ndo_stop = stmmac_release,
40187ac6653aSJeff Kirsher 	.ndo_change_mtu = stmmac_change_mtu,
40197ac6653aSJeff Kirsher 	.ndo_fix_features = stmmac_fix_features,
4020d2afb5bdSGiuseppe CAVALLARO 	.ndo_set_features = stmmac_set_features,
402101789349SJiri Pirko 	.ndo_set_rx_mode = stmmac_set_rx_mode,
40227ac6653aSJeff Kirsher 	.ndo_tx_timeout = stmmac_tx_timeout,
40237ac6653aSJeff Kirsher 	.ndo_do_ioctl = stmmac_ioctl,
40244dbbe8ddSJose Abreu 	.ndo_setup_tc = stmmac_setup_tc,
40254993e5b3SJose Abreu 	.ndo_select_queue = stmmac_select_queue,
40267ac6653aSJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER
40277ac6653aSJeff Kirsher 	.ndo_poll_controller = stmmac_poll_controller,
40287ac6653aSJeff Kirsher #endif
4029a830405eSBhadram Varka 	.ndo_set_mac_address = stmmac_set_mac_address,
40307ac6653aSJeff Kirsher };
40317ac6653aSJeff Kirsher 
403234877a15SJose Abreu static void stmmac_reset_subtask(struct stmmac_priv *priv)
403334877a15SJose Abreu {
403434877a15SJose Abreu 	if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
403534877a15SJose Abreu 		return;
403634877a15SJose Abreu 	if (test_bit(STMMAC_DOWN, &priv->state))
403734877a15SJose Abreu 		return;
403834877a15SJose Abreu 
403934877a15SJose Abreu 	netdev_err(priv->dev, "Reset adapter.\n");
404034877a15SJose Abreu 
404134877a15SJose Abreu 	rtnl_lock();
404234877a15SJose Abreu 	netif_trans_update(priv->dev);
404334877a15SJose Abreu 	while (test_and_set_bit(STMMAC_RESETING, &priv->state))
404434877a15SJose Abreu 		usleep_range(1000, 2000);
404534877a15SJose Abreu 
404634877a15SJose Abreu 	set_bit(STMMAC_DOWN, &priv->state);
404734877a15SJose Abreu 	dev_close(priv->dev);
404800f54e68SPetr Machata 	dev_open(priv->dev, NULL);
404934877a15SJose Abreu 	clear_bit(STMMAC_DOWN, &priv->state);
405034877a15SJose Abreu 	clear_bit(STMMAC_RESETING, &priv->state);
405134877a15SJose Abreu 	rtnl_unlock();
405234877a15SJose Abreu }
405334877a15SJose Abreu 
405434877a15SJose Abreu static void stmmac_service_task(struct work_struct *work)
405534877a15SJose Abreu {
405634877a15SJose Abreu 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
405734877a15SJose Abreu 			service_task);
405834877a15SJose Abreu 
405934877a15SJose Abreu 	stmmac_reset_subtask(priv);
406034877a15SJose Abreu 	clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
406134877a15SJose Abreu }
406234877a15SJose Abreu 
40637ac6653aSJeff Kirsher /**
4064cf3f047bSGiuseppe CAVALLARO  *  stmmac_hw_init - Init the MAC device
406532ceabcaSGiuseppe CAVALLARO  *  @priv: driver private structure
4066732fdf0eSGiuseppe CAVALLARO  *  Description: this function is to configure the MAC device according to
4067732fdf0eSGiuseppe CAVALLARO  *  some platform parameters or the HW capability register. It prepares the
4068732fdf0eSGiuseppe CAVALLARO  *  driver to use either ring or chain modes and to setup either enhanced or
4069732fdf0eSGiuseppe CAVALLARO  *  normal descriptors.
4070cf3f047bSGiuseppe CAVALLARO  */
4071cf3f047bSGiuseppe CAVALLARO static int stmmac_hw_init(struct stmmac_priv *priv)
4072cf3f047bSGiuseppe CAVALLARO {
40735f0456b4SJose Abreu 	int ret;
4074cf3f047bSGiuseppe CAVALLARO 
40759f93ac8dSLABBE Corentin 	/* dwmac-sun8i only work in chain mode */
40769f93ac8dSLABBE Corentin 	if (priv->plat->has_sun8i)
40779f93ac8dSLABBE Corentin 		chain_mode = 1;
40785f0456b4SJose Abreu 	priv->chain_mode = chain_mode;
40799f93ac8dSLABBE Corentin 
40805f0456b4SJose Abreu 	/* Initialize HW Interface */
40815f0456b4SJose Abreu 	ret = stmmac_hwif_init(priv);
40825f0456b4SJose Abreu 	if (ret)
40835f0456b4SJose Abreu 		return ret;
40844a7d666aSGiuseppe CAVALLARO 
4085cf3f047bSGiuseppe CAVALLARO 	/* Get the HW capability (new GMAC newer than 3.50a) */
4086cf3f047bSGiuseppe CAVALLARO 	priv->hw_cap_support = stmmac_get_hw_features(priv);
4087cf3f047bSGiuseppe CAVALLARO 	if (priv->hw_cap_support) {
408838ddc59dSLABBE Corentin 		dev_info(priv->device, "DMA HW capability register supported\n");
4089cf3f047bSGiuseppe CAVALLARO 
4090cf3f047bSGiuseppe CAVALLARO 		/* We can override some gmac/dma configuration fields: e.g.
4091cf3f047bSGiuseppe CAVALLARO 		 * enh_desc, tx_coe (e.g. that are passed through the
4092cf3f047bSGiuseppe CAVALLARO 		 * platform) with the values from the HW capability
4093cf3f047bSGiuseppe CAVALLARO 		 * register (if supported).
4094cf3f047bSGiuseppe CAVALLARO 		 */
4095cf3f047bSGiuseppe CAVALLARO 		priv->plat->enh_desc = priv->dma_cap.enh_desc;
4096cf3f047bSGiuseppe CAVALLARO 		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
40973fe5cadbSGiuseppe CAVALLARO 		priv->hw->pmt = priv->plat->pmt;
4098b8ef7020SBiao Huang 		if (priv->dma_cap.hash_tb_sz) {
4099b8ef7020SBiao Huang 			priv->hw->multicast_filter_bins =
4100b8ef7020SBiao Huang 					(BIT(priv->dma_cap.hash_tb_sz) << 5);
4101b8ef7020SBiao Huang 			priv->hw->mcast_bits_log2 =
4102b8ef7020SBiao Huang 					ilog2(priv->hw->multicast_filter_bins);
4103b8ef7020SBiao Huang 		}
410438912bdbSDeepak SIKRI 
4105a8df35d4SEzequiel Garcia 		/* TXCOE doesn't work in thresh DMA mode */
4106a8df35d4SEzequiel Garcia 		if (priv->plat->force_thresh_dma_mode)
4107a8df35d4SEzequiel Garcia 			priv->plat->tx_coe = 0;
4108a8df35d4SEzequiel Garcia 		else
410938912bdbSDeepak SIKRI 			priv->plat->tx_coe = priv->dma_cap.tx_coe;
4110a8df35d4SEzequiel Garcia 
4111f748be53SAlexandre TORGUE 		/* In case of GMAC4 rx_coe is from HW cap register. */
4112f748be53SAlexandre TORGUE 		priv->plat->rx_coe = priv->dma_cap.rx_coe;
411338912bdbSDeepak SIKRI 
411438912bdbSDeepak SIKRI 		if (priv->dma_cap.rx_coe_type2)
411538912bdbSDeepak SIKRI 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
411638912bdbSDeepak SIKRI 		else if (priv->dma_cap.rx_coe_type1)
411738912bdbSDeepak SIKRI 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
411838912bdbSDeepak SIKRI 
411938ddc59dSLABBE Corentin 	} else {
412038ddc59dSLABBE Corentin 		dev_info(priv->device, "No HW DMA feature register supported\n");
412138ddc59dSLABBE Corentin 	}
4122cf3f047bSGiuseppe CAVALLARO 
4123d2afb5bdSGiuseppe CAVALLARO 	if (priv->plat->rx_coe) {
4124d2afb5bdSGiuseppe CAVALLARO 		priv->hw->rx_csum = priv->plat->rx_coe;
412538ddc59dSLABBE Corentin 		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
4126f748be53SAlexandre TORGUE 		if (priv->synopsys_id < DWMAC_CORE_4_00)
412738ddc59dSLABBE Corentin 			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
4128d2afb5bdSGiuseppe CAVALLARO 	}
4129cf3f047bSGiuseppe CAVALLARO 	if (priv->plat->tx_coe)
413038ddc59dSLABBE Corentin 		dev_info(priv->device, "TX Checksum insertion supported\n");
4131cf3f047bSGiuseppe CAVALLARO 
4132cf3f047bSGiuseppe CAVALLARO 	if (priv->plat->pmt) {
413338ddc59dSLABBE Corentin 		dev_info(priv->device, "Wake-Up On Lan supported\n");
4134cf3f047bSGiuseppe CAVALLARO 		device_set_wakeup_capable(priv->device, 1);
4135cf3f047bSGiuseppe CAVALLARO 	}
4136cf3f047bSGiuseppe CAVALLARO 
4137f748be53SAlexandre TORGUE 	if (priv->dma_cap.tsoen)
413838ddc59dSLABBE Corentin 		dev_info(priv->device, "TSO supported\n");
4139f748be53SAlexandre TORGUE 
41407cfde0afSJose Abreu 	/* Run HW quirks, if any */
41417cfde0afSJose Abreu 	if (priv->hwif_quirks) {
41427cfde0afSJose Abreu 		ret = priv->hwif_quirks(priv);
41437cfde0afSJose Abreu 		if (ret)
41447cfde0afSJose Abreu 			return ret;
41457cfde0afSJose Abreu 	}
41467cfde0afSJose Abreu 
41473b509466SJose Abreu 	/* Rx Watchdog is available in the COREs newer than the 3.40.
41483b509466SJose Abreu 	 * In some case, for example on bugged HW this feature
41493b509466SJose Abreu 	 * has to be disable and this can be done by passing the
41503b509466SJose Abreu 	 * riwt_off field from the platform.
41513b509466SJose Abreu 	 */
41523b509466SJose Abreu 	if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
41533b509466SJose Abreu 	    (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
41543b509466SJose Abreu 		priv->use_riwt = 1;
41553b509466SJose Abreu 		dev_info(priv->device,
41563b509466SJose Abreu 			 "Enable RX Mitigation via HW Watchdog Timer\n");
41573b509466SJose Abreu 	}
41583b509466SJose Abreu 
4159c24602efSGiuseppe CAVALLARO 	return 0;
4160cf3f047bSGiuseppe CAVALLARO }
4161cf3f047bSGiuseppe CAVALLARO 
4162cf3f047bSGiuseppe CAVALLARO /**
4163bfab27a1SGiuseppe CAVALLARO  * stmmac_dvr_probe
4164bfab27a1SGiuseppe CAVALLARO  * @device: device pointer
4165ff3dd78cSGiuseppe CAVALLARO  * @plat_dat: platform data pointer
4166e56788cfSJoachim Eastwood  * @res: stmmac resource pointer
4167bfab27a1SGiuseppe CAVALLARO  * Description: this is the main probe function used to
4168bfab27a1SGiuseppe CAVALLARO  * call the alloc_etherdev, allocate the priv structure.
41699afec6efSAndy Shevchenko  * Return:
417015ffac73SJoachim Eastwood  * returns 0 on success, otherwise errno.
41717ac6653aSJeff Kirsher  */
417215ffac73SJoachim Eastwood int stmmac_dvr_probe(struct device *device,
4173cf3f047bSGiuseppe CAVALLARO 		     struct plat_stmmacenet_data *plat_dat,
4174e56788cfSJoachim Eastwood 		     struct stmmac_resources *res)
41757ac6653aSJeff Kirsher {
4176bfab27a1SGiuseppe CAVALLARO 	struct net_device *ndev = NULL;
4177bfab27a1SGiuseppe CAVALLARO 	struct stmmac_priv *priv;
41788fce3331SJose Abreu 	u32 queue, maxq;
4179c22a3f48SJoao Pinto 	int ret = 0;
41807ac6653aSJeff Kirsher 
41819737070cSJisheng Zhang 	ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
41829737070cSJisheng Zhang 				       MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
418341de8d4cSJoe Perches 	if (!ndev)
418415ffac73SJoachim Eastwood 		return -ENOMEM;
41857ac6653aSJeff Kirsher 
4186bfab27a1SGiuseppe CAVALLARO 	SET_NETDEV_DEV(ndev, device);
41877ac6653aSJeff Kirsher 
4188bfab27a1SGiuseppe CAVALLARO 	priv = netdev_priv(ndev);
4189bfab27a1SGiuseppe CAVALLARO 	priv->device = device;
4190bfab27a1SGiuseppe CAVALLARO 	priv->dev = ndev;
4191bfab27a1SGiuseppe CAVALLARO 
4192bfab27a1SGiuseppe CAVALLARO 	stmmac_set_ethtool_ops(ndev);
4193cf3f047bSGiuseppe CAVALLARO 	priv->pause = pause;
4194cf3f047bSGiuseppe CAVALLARO 	priv->plat = plat_dat;
4195e56788cfSJoachim Eastwood 	priv->ioaddr = res->addr;
4196e56788cfSJoachim Eastwood 	priv->dev->base_addr = (unsigned long)res->addr;
4197e56788cfSJoachim Eastwood 
4198e56788cfSJoachim Eastwood 	priv->dev->irq = res->irq;
4199e56788cfSJoachim Eastwood 	priv->wol_irq = res->wol_irq;
4200e56788cfSJoachim Eastwood 	priv->lpi_irq = res->lpi_irq;
4201e56788cfSJoachim Eastwood 
4202a51645f7SPetr Štetiar 	if (!IS_ERR_OR_NULL(res->mac))
4203e56788cfSJoachim Eastwood 		memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
4204bfab27a1SGiuseppe CAVALLARO 
4205a7a62685SJoachim Eastwood 	dev_set_drvdata(device, priv->dev);
4206803f8fc4SJoachim Eastwood 
4207cf3f047bSGiuseppe CAVALLARO 	/* Verify driver arguments */
4208cf3f047bSGiuseppe CAVALLARO 	stmmac_verify_args();
4209cf3f047bSGiuseppe CAVALLARO 
421034877a15SJose Abreu 	/* Allocate workqueue */
421134877a15SJose Abreu 	priv->wq = create_singlethread_workqueue("stmmac_wq");
421234877a15SJose Abreu 	if (!priv->wq) {
421334877a15SJose Abreu 		dev_err(priv->device, "failed to create workqueue\n");
42149737070cSJisheng Zhang 		return -ENOMEM;
421534877a15SJose Abreu 	}
421634877a15SJose Abreu 
421734877a15SJose Abreu 	INIT_WORK(&priv->service_task, stmmac_service_task);
421834877a15SJose Abreu 
4219cf3f047bSGiuseppe CAVALLARO 	/* Override with kernel parameters if supplied XXX CRS XXX
4220ceb69499SGiuseppe CAVALLARO 	 * this needs to have multiple instances
4221ceb69499SGiuseppe CAVALLARO 	 */
4222cf3f047bSGiuseppe CAVALLARO 	if ((phyaddr >= 0) && (phyaddr <= 31))
4223cf3f047bSGiuseppe CAVALLARO 		priv->plat->phy_addr = phyaddr;
4224cf3f047bSGiuseppe CAVALLARO 
422590f522a2SEugeniy Paltsev 	if (priv->plat->stmmac_rst) {
422690f522a2SEugeniy Paltsev 		ret = reset_control_assert(priv->plat->stmmac_rst);
4227f573c0b9Sjpinto 		reset_control_deassert(priv->plat->stmmac_rst);
422890f522a2SEugeniy Paltsev 		/* Some reset controllers have only reset callback instead of
422990f522a2SEugeniy Paltsev 		 * assert + deassert callbacks pair.
423090f522a2SEugeniy Paltsev 		 */
423190f522a2SEugeniy Paltsev 		if (ret == -ENOTSUPP)
423290f522a2SEugeniy Paltsev 			reset_control_reset(priv->plat->stmmac_rst);
423390f522a2SEugeniy Paltsev 	}
4234c5e4ddbdSChen-Yu Tsai 
4235cf3f047bSGiuseppe CAVALLARO 	/* Init MAC and get the capabilities */
4236c24602efSGiuseppe CAVALLARO 	ret = stmmac_hw_init(priv);
4237c24602efSGiuseppe CAVALLARO 	if (ret)
423862866e98SChen-Yu Tsai 		goto error_hw_init;
4239cf3f047bSGiuseppe CAVALLARO 
4240b561af36SVinod Koul 	stmmac_check_ether_addr(priv);
4241b561af36SVinod Koul 
4242c22a3f48SJoao Pinto 	/* Configure real RX and TX queues */
4243c02b7a91SJoao Pinto 	netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
4244c02b7a91SJoao Pinto 	netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
4245c22a3f48SJoao Pinto 
4246cf3f047bSGiuseppe CAVALLARO 	ndev->netdev_ops = &stmmac_netdev_ops;
4247cf3f047bSGiuseppe CAVALLARO 
4248cf3f047bSGiuseppe CAVALLARO 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4249cf3f047bSGiuseppe CAVALLARO 			    NETIF_F_RXCSUM;
4250f748be53SAlexandre TORGUE 
42514dbbe8ddSJose Abreu 	ret = stmmac_tc_init(priv, priv);
42524dbbe8ddSJose Abreu 	if (!ret) {
42534dbbe8ddSJose Abreu 		ndev->hw_features |= NETIF_F_HW_TC;
42544dbbe8ddSJose Abreu 	}
42554dbbe8ddSJose Abreu 
4256f748be53SAlexandre TORGUE 	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
42579edfa7daSNiklas Cassel 		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
4258f748be53SAlexandre TORGUE 		priv->tso = true;
425938ddc59dSLABBE Corentin 		dev_info(priv->device, "TSO feature enabled\n");
4260f748be53SAlexandre TORGUE 	}
4261a993db88SJose Abreu 
4262a993db88SJose Abreu 	if (priv->dma_cap.addr64) {
4263a993db88SJose Abreu 		ret = dma_set_mask_and_coherent(device,
4264a993db88SJose Abreu 				DMA_BIT_MASK(priv->dma_cap.addr64));
4265a993db88SJose Abreu 		if (!ret) {
4266a993db88SJose Abreu 			dev_info(priv->device, "Using %d bits DMA width\n",
4267a993db88SJose Abreu 				 priv->dma_cap.addr64);
4268a993db88SJose Abreu 		} else {
4269a993db88SJose Abreu 			ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
4270a993db88SJose Abreu 			if (ret) {
4271a993db88SJose Abreu 				dev_err(priv->device, "Failed to set DMA Mask\n");
4272a993db88SJose Abreu 				goto error_hw_init;
4273a993db88SJose Abreu 			}
4274a993db88SJose Abreu 
4275a993db88SJose Abreu 			priv->dma_cap.addr64 = 32;
4276a993db88SJose Abreu 		}
4277a993db88SJose Abreu 	}
4278a993db88SJose Abreu 
4279bfab27a1SGiuseppe CAVALLARO 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
4280bfab27a1SGiuseppe CAVALLARO 	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
42817ac6653aSJeff Kirsher #ifdef STMMAC_VLAN_TAG_USED
42827ac6653aSJeff Kirsher 	/* Both mac100 and gmac support receive VLAN tag detection */
4283ab188e8fSElad Nachman 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
42847ac6653aSJeff Kirsher #endif
42857ac6653aSJeff Kirsher 	priv->msg_enable = netif_msg_init(debug, default_msg_level);
42867ac6653aSJeff Kirsher 
428744770e11SJarod Wilson 	/* MTU range: 46 - hw-specific max */
428844770e11SJarod Wilson 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
428944770e11SJarod Wilson 	if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
429044770e11SJarod Wilson 		ndev->max_mtu = JUMBO_LEN;
42917d9e6c5aSJose Abreu 	else if (priv->plat->has_xgmac)
42927d9e6c5aSJose Abreu 		ndev->max_mtu = XGMAC_JUMBO_LEN;
429344770e11SJarod Wilson 	else
429444770e11SJarod Wilson 		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
4295a2cd64f3SKweh, Hock Leong 	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
4296a2cd64f3SKweh, Hock Leong 	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
4297a2cd64f3SKweh, Hock Leong 	 */
4298a2cd64f3SKweh, Hock Leong 	if ((priv->plat->maxmtu < ndev->max_mtu) &&
4299a2cd64f3SKweh, Hock Leong 	    (priv->plat->maxmtu >= ndev->min_mtu))
430044770e11SJarod Wilson 		ndev->max_mtu = priv->plat->maxmtu;
4301a2cd64f3SKweh, Hock Leong 	else if (priv->plat->maxmtu < ndev->min_mtu)
4302b618ab45SHeiner Kallweit 		dev_warn(priv->device,
4303a2cd64f3SKweh, Hock Leong 			 "%s: warning: maxmtu having invalid value (%d)\n",
4304a2cd64f3SKweh, Hock Leong 			 __func__, priv->plat->maxmtu);
430544770e11SJarod Wilson 
43067ac6653aSJeff Kirsher 	if (flow_ctrl)
43077ac6653aSJeff Kirsher 		priv->flow_ctrl = FLOW_AUTO;	/* RX/TX pause on */
43087ac6653aSJeff Kirsher 
43098fce3331SJose Abreu 	/* Setup channels NAPI */
43108fce3331SJose Abreu 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
4311c22a3f48SJoao Pinto 
43128fce3331SJose Abreu 	for (queue = 0; queue < maxq; queue++) {
43138fce3331SJose Abreu 		struct stmmac_channel *ch = &priv->channel[queue];
43148fce3331SJose Abreu 
43158fce3331SJose Abreu 		ch->priv_data = priv;
43168fce3331SJose Abreu 		ch->index = queue;
43178fce3331SJose Abreu 
43184ccb4585SJose Abreu 		if (queue < priv->plat->rx_queues_to_use) {
43194ccb4585SJose Abreu 			netif_napi_add(ndev, &ch->rx_napi, stmmac_napi_poll_rx,
43208fce3331SJose Abreu 				       NAPI_POLL_WEIGHT);
4321c22a3f48SJoao Pinto 		}
43224ccb4585SJose Abreu 		if (queue < priv->plat->tx_queues_to_use) {
43234ccb4585SJose Abreu 			netif_napi_add(ndev, &ch->tx_napi, stmmac_napi_poll_tx,
43244ccb4585SJose Abreu 				       NAPI_POLL_WEIGHT);
43254ccb4585SJose Abreu 		}
43264ccb4585SJose Abreu 	}
43277ac6653aSJeff Kirsher 
432829555fa3SThierry Reding 	mutex_init(&priv->lock);
43297ac6653aSJeff Kirsher 
4330cd7201f4SGiuseppe CAVALLARO 	/* If a specific clk_csr value is passed from the platform
4331cd7201f4SGiuseppe CAVALLARO 	 * this means that the CSR Clock Range selection cannot be
4332cd7201f4SGiuseppe CAVALLARO 	 * changed at run-time and it is fixed. Viceversa the driver'll try to
4333cd7201f4SGiuseppe CAVALLARO 	 * set the MDC clock dynamically according to the csr actual
4334cd7201f4SGiuseppe CAVALLARO 	 * clock input.
4335cd7201f4SGiuseppe CAVALLARO 	 */
43365e7f7fc5SBiao Huang 	if (priv->plat->clk_csr >= 0)
4337cd7201f4SGiuseppe CAVALLARO 		priv->clk_csr = priv->plat->clk_csr;
43385e7f7fc5SBiao Huang 	else
43395e7f7fc5SBiao Huang 		stmmac_clk_csr_set(priv);
4340cd7201f4SGiuseppe CAVALLARO 
4341e58bb43fSGiuseppe CAVALLARO 	stmmac_check_pcs_mode(priv);
4342e58bb43fSGiuseppe CAVALLARO 
43433fe5cadbSGiuseppe CAVALLARO 	if (priv->hw->pcs != STMMAC_PCS_RGMII  &&
43443fe5cadbSGiuseppe CAVALLARO 	    priv->hw->pcs != STMMAC_PCS_TBI &&
43453fe5cadbSGiuseppe CAVALLARO 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
43464bfcbd7aSFrancesco Virlinzi 		/* MDIO bus Registration */
43474bfcbd7aSFrancesco Virlinzi 		ret = stmmac_mdio_register(ndev);
43484bfcbd7aSFrancesco Virlinzi 		if (ret < 0) {
4349b618ab45SHeiner Kallweit 			dev_err(priv->device,
435038ddc59dSLABBE Corentin 				"%s: MDIO bus (id: %d) registration failed",
43514bfcbd7aSFrancesco Virlinzi 				__func__, priv->plat->bus_id);
43526a81c26fSViresh Kumar 			goto error_mdio_register;
43534bfcbd7aSFrancesco Virlinzi 		}
4354e58bb43fSGiuseppe CAVALLARO 	}
43554bfcbd7aSFrancesco Virlinzi 
435674371272SJose Abreu 	ret = stmmac_phy_setup(priv);
435774371272SJose Abreu 	if (ret) {
435874371272SJose Abreu 		netdev_err(ndev, "failed to setup phy (%d)\n", ret);
435974371272SJose Abreu 		goto error_phy_setup;
436074371272SJose Abreu 	}
436174371272SJose Abreu 
436257016590SFlorian Fainelli 	ret = register_netdev(ndev);
4363b2eb09afSFlorian Fainelli 	if (ret) {
4364b618ab45SHeiner Kallweit 		dev_err(priv->device, "%s: ERROR %i registering the device\n",
436557016590SFlorian Fainelli 			__func__, ret);
4366b2eb09afSFlorian Fainelli 		goto error_netdev_register;
4367b2eb09afSFlorian Fainelli 	}
43687ac6653aSJeff Kirsher 
43695f2b8b62SThierry Reding #ifdef CONFIG_DEBUG_FS
43705f2b8b62SThierry Reding 	ret = stmmac_init_fs(ndev);
43715f2b8b62SThierry Reding 	if (ret < 0)
43725f2b8b62SThierry Reding 		netdev_warn(priv->dev, "%s: failed debugFS registration\n",
43735f2b8b62SThierry Reding 			    __func__);
43745f2b8b62SThierry Reding #endif
43755f2b8b62SThierry Reding 
437657016590SFlorian Fainelli 	return ret;
43777ac6653aSJeff Kirsher 
43786a81c26fSViresh Kumar error_netdev_register:
437974371272SJose Abreu 	phylink_destroy(priv->phylink);
438074371272SJose Abreu error_phy_setup:
4381b2eb09afSFlorian Fainelli 	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4382b2eb09afSFlorian Fainelli 	    priv->hw->pcs != STMMAC_PCS_TBI &&
4383b2eb09afSFlorian Fainelli 	    priv->hw->pcs != STMMAC_PCS_RTBI)
4384b2eb09afSFlorian Fainelli 		stmmac_mdio_unregister(ndev);
43857ac6653aSJeff Kirsher error_mdio_register:
43868fce3331SJose Abreu 	for (queue = 0; queue < maxq; queue++) {
43878fce3331SJose Abreu 		struct stmmac_channel *ch = &priv->channel[queue];
4388c22a3f48SJoao Pinto 
43894ccb4585SJose Abreu 		if (queue < priv->plat->rx_queues_to_use)
43904ccb4585SJose Abreu 			netif_napi_del(&ch->rx_napi);
43914ccb4585SJose Abreu 		if (queue < priv->plat->tx_queues_to_use)
43924ccb4585SJose Abreu 			netif_napi_del(&ch->tx_napi);
4393c22a3f48SJoao Pinto 	}
439462866e98SChen-Yu Tsai error_hw_init:
439534877a15SJose Abreu 	destroy_workqueue(priv->wq);
43967ac6653aSJeff Kirsher 
439715ffac73SJoachim Eastwood 	return ret;
43987ac6653aSJeff Kirsher }
4399b2e2f0c7SAndy Shevchenko EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
44007ac6653aSJeff Kirsher 
44017ac6653aSJeff Kirsher /**
44027ac6653aSJeff Kirsher  * stmmac_dvr_remove
4403f4e7bd81SJoachim Eastwood  * @dev: device pointer
44047ac6653aSJeff Kirsher  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
4405bfab27a1SGiuseppe CAVALLARO  * changes the link status, releases the DMA descriptor rings.
44067ac6653aSJeff Kirsher  */
4407f4e7bd81SJoachim Eastwood int stmmac_dvr_remove(struct device *dev)
44087ac6653aSJeff Kirsher {
4409f4e7bd81SJoachim Eastwood 	struct net_device *ndev = dev_get_drvdata(dev);
44107ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(ndev);
44117ac6653aSJeff Kirsher 
441238ddc59dSLABBE Corentin 	netdev_info(priv->dev, "%s: removing driver", __func__);
44137ac6653aSJeff Kirsher 
44145f2b8b62SThierry Reding #ifdef CONFIG_DEBUG_FS
44155f2b8b62SThierry Reding 	stmmac_exit_fs(ndev);
44165f2b8b62SThierry Reding #endif
4417ae4f0d46SJoao Pinto 	stmmac_stop_all_dma(priv);
44187ac6653aSJeff Kirsher 
4419c10d4c82SJose Abreu 	stmmac_mac_set(priv, priv->ioaddr, false);
44207ac6653aSJeff Kirsher 	netif_carrier_off(ndev);
44217ac6653aSJeff Kirsher 	unregister_netdev(ndev);
442274371272SJose Abreu 	phylink_destroy(priv->phylink);
4423f573c0b9Sjpinto 	if (priv->plat->stmmac_rst)
4424f573c0b9Sjpinto 		reset_control_assert(priv->plat->stmmac_rst);
4425f573c0b9Sjpinto 	clk_disable_unprepare(priv->plat->pclk);
4426f573c0b9Sjpinto 	clk_disable_unprepare(priv->plat->stmmac_clk);
44273fe5cadbSGiuseppe CAVALLARO 	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
44283fe5cadbSGiuseppe CAVALLARO 	    priv->hw->pcs != STMMAC_PCS_TBI &&
44293fe5cadbSGiuseppe CAVALLARO 	    priv->hw->pcs != STMMAC_PCS_RTBI)
4430e743471fSBryan O'Donoghue 		stmmac_mdio_unregister(ndev);
443134877a15SJose Abreu 	destroy_workqueue(priv->wq);
443229555fa3SThierry Reding 	mutex_destroy(&priv->lock);
44337ac6653aSJeff Kirsher 
44347ac6653aSJeff Kirsher 	return 0;
44357ac6653aSJeff Kirsher }
4436b2e2f0c7SAndy Shevchenko EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
44377ac6653aSJeff Kirsher 
4438732fdf0eSGiuseppe CAVALLARO /**
4439732fdf0eSGiuseppe CAVALLARO  * stmmac_suspend - suspend callback
4440f4e7bd81SJoachim Eastwood  * @dev: device pointer
4441732fdf0eSGiuseppe CAVALLARO  * Description: this is the function to suspend the device and it is called
4442732fdf0eSGiuseppe CAVALLARO  * by the platform driver to stop the network queue, release the resources,
4443732fdf0eSGiuseppe CAVALLARO  * program the PMT register (for WoL), clean and release driver resources.
4444732fdf0eSGiuseppe CAVALLARO  */
4445f4e7bd81SJoachim Eastwood int stmmac_suspend(struct device *dev)
44467ac6653aSJeff Kirsher {
4447f4e7bd81SJoachim Eastwood 	struct net_device *ndev = dev_get_drvdata(dev);
44487ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(ndev);
44497ac6653aSJeff Kirsher 
44507ac6653aSJeff Kirsher 	if (!ndev || !netif_running(ndev))
44517ac6653aSJeff Kirsher 		return 0;
44527ac6653aSJeff Kirsher 
445374371272SJose Abreu 	phylink_stop(priv->phylink);
4454102463b1SFrancesco Virlinzi 
445529555fa3SThierry Reding 	mutex_lock(&priv->lock);
44567ac6653aSJeff Kirsher 
44577ac6653aSJeff Kirsher 	netif_device_detach(ndev);
4458c22a3f48SJoao Pinto 	stmmac_stop_all_queues(priv);
44597ac6653aSJeff Kirsher 
4460c22a3f48SJoao Pinto 	stmmac_disable_all_queues(priv);
44617ac6653aSJeff Kirsher 
44627ac6653aSJeff Kirsher 	/* Stop TX/RX DMA */
4463ae4f0d46SJoao Pinto 	stmmac_stop_all_dma(priv);
4464c24602efSGiuseppe CAVALLARO 
44657ac6653aSJeff Kirsher 	/* Enable Power down mode by programming the PMT regs */
446689f7f2cfSSrinivas Kandagatla 	if (device_may_wakeup(priv->device)) {
4467c10d4c82SJose Abreu 		stmmac_pmt(priv, priv->hw, priv->wolopts);
446889f7f2cfSSrinivas Kandagatla 		priv->irq_wake = 1;
446989f7f2cfSSrinivas Kandagatla 	} else {
4470c10d4c82SJose Abreu 		stmmac_mac_set(priv, priv->ioaddr, false);
4471db88f10aSSrinivas Kandagatla 		pinctrl_pm_select_sleep_state(priv->device);
4472ba1377ffSGiuseppe CAVALLARO 		/* Disable clock in case of PWM is off */
4473f573c0b9Sjpinto 		clk_disable(priv->plat->pclk);
4474f573c0b9Sjpinto 		clk_disable(priv->plat->stmmac_clk);
4475ba1377ffSGiuseppe CAVALLARO 	}
447629555fa3SThierry Reding 	mutex_unlock(&priv->lock);
44772d871aa0SVince Bridgers 
4478bd00632cSLABBE Corentin 	priv->speed = SPEED_UNKNOWN;
44797ac6653aSJeff Kirsher 	return 0;
44807ac6653aSJeff Kirsher }
4481b2e2f0c7SAndy Shevchenko EXPORT_SYMBOL_GPL(stmmac_suspend);
44827ac6653aSJeff Kirsher 
4483732fdf0eSGiuseppe CAVALLARO /**
448454139cf3SJoao Pinto  * stmmac_reset_queues_param - reset queue parameters
448554139cf3SJoao Pinto  * @dev: device pointer
448654139cf3SJoao Pinto  */
448754139cf3SJoao Pinto static void stmmac_reset_queues_param(struct stmmac_priv *priv)
448854139cf3SJoao Pinto {
448954139cf3SJoao Pinto 	u32 rx_cnt = priv->plat->rx_queues_to_use;
4490ce736788SJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
449154139cf3SJoao Pinto 	u32 queue;
449254139cf3SJoao Pinto 
449354139cf3SJoao Pinto 	for (queue = 0; queue < rx_cnt; queue++) {
449454139cf3SJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
449554139cf3SJoao Pinto 
449654139cf3SJoao Pinto 		rx_q->cur_rx = 0;
449754139cf3SJoao Pinto 		rx_q->dirty_rx = 0;
449854139cf3SJoao Pinto 	}
449954139cf3SJoao Pinto 
4500ce736788SJoao Pinto 	for (queue = 0; queue < tx_cnt; queue++) {
4501ce736788SJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
4502ce736788SJoao Pinto 
4503ce736788SJoao Pinto 		tx_q->cur_tx = 0;
4504ce736788SJoao Pinto 		tx_q->dirty_tx = 0;
45058d212a9eSNiklas Cassel 		tx_q->mss = 0;
4506ce736788SJoao Pinto 	}
450754139cf3SJoao Pinto }
450854139cf3SJoao Pinto 
450954139cf3SJoao Pinto /**
4510732fdf0eSGiuseppe CAVALLARO  * stmmac_resume - resume callback
4511f4e7bd81SJoachim Eastwood  * @dev: device pointer
4512732fdf0eSGiuseppe CAVALLARO  * Description: when resume this function is invoked to setup the DMA and CORE
4513732fdf0eSGiuseppe CAVALLARO  * in a usable state.
4514732fdf0eSGiuseppe CAVALLARO  */
4515f4e7bd81SJoachim Eastwood int stmmac_resume(struct device *dev)
45167ac6653aSJeff Kirsher {
4517f4e7bd81SJoachim Eastwood 	struct net_device *ndev = dev_get_drvdata(dev);
45187ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(ndev);
45197ac6653aSJeff Kirsher 
45207ac6653aSJeff Kirsher 	if (!netif_running(ndev))
45217ac6653aSJeff Kirsher 		return 0;
45227ac6653aSJeff Kirsher 
45237ac6653aSJeff Kirsher 	/* Power Down bit, into the PM register, is cleared
45247ac6653aSJeff Kirsher 	 * automatically as soon as a magic packet or a Wake-up frame
45257ac6653aSJeff Kirsher 	 * is received. Anyway, it's better to manually clear
45267ac6653aSJeff Kirsher 	 * this bit because it can generate problems while resuming
4527ceb69499SGiuseppe CAVALLARO 	 * from another devices (e.g. serial console).
4528ceb69499SGiuseppe CAVALLARO 	 */
4529623997fbSSrinivas Kandagatla 	if (device_may_wakeup(priv->device)) {
453029555fa3SThierry Reding 		mutex_lock(&priv->lock);
4531c10d4c82SJose Abreu 		stmmac_pmt(priv, priv->hw, 0);
453229555fa3SThierry Reding 		mutex_unlock(&priv->lock);
453389f7f2cfSSrinivas Kandagatla 		priv->irq_wake = 0;
4534623997fbSSrinivas Kandagatla 	} else {
4535db88f10aSSrinivas Kandagatla 		pinctrl_pm_select_default_state(priv->device);
45368d45e42bSLABBE Corentin 		/* enable the clk previously disabled */
4537f573c0b9Sjpinto 		clk_enable(priv->plat->stmmac_clk);
4538f573c0b9Sjpinto 		clk_enable(priv->plat->pclk);
4539623997fbSSrinivas Kandagatla 		/* reset the phy so that it's ready */
4540623997fbSSrinivas Kandagatla 		if (priv->mii)
4541623997fbSSrinivas Kandagatla 			stmmac_mdio_reset(priv->mii);
4542623997fbSSrinivas Kandagatla 	}
45437ac6653aSJeff Kirsher 
45447ac6653aSJeff Kirsher 	netif_device_attach(ndev);
45457ac6653aSJeff Kirsher 
454629555fa3SThierry Reding 	mutex_lock(&priv->lock);
4547f55d84b0SVincent Palatin 
454854139cf3SJoao Pinto 	stmmac_reset_queues_param(priv);
454954139cf3SJoao Pinto 
4550ae79a639SGiuseppe CAVALLARO 	stmmac_clear_descriptors(priv);
4551ae79a639SGiuseppe CAVALLARO 
4552fe131929SHuacai Chen 	stmmac_hw_setup(ndev, false);
4553d429b66eSJose Abreu 	stmmac_init_coalesce(priv);
4554ac316c78SGiuseppe CAVALLARO 	stmmac_set_rx_mode(ndev);
45557ac6653aSJeff Kirsher 
4556c22a3f48SJoao Pinto 	stmmac_enable_all_queues(priv);
45577ac6653aSJeff Kirsher 
4558c22a3f48SJoao Pinto 	stmmac_start_all_queues(priv);
45597ac6653aSJeff Kirsher 
456029555fa3SThierry Reding 	mutex_unlock(&priv->lock);
4561102463b1SFrancesco Virlinzi 
456274371272SJose Abreu 	phylink_start(priv->phylink);
4563102463b1SFrancesco Virlinzi 
45647ac6653aSJeff Kirsher 	return 0;
45657ac6653aSJeff Kirsher }
4566b2e2f0c7SAndy Shevchenko EXPORT_SYMBOL_GPL(stmmac_resume);
4567ba27ec66SGiuseppe CAVALLARO 
45687ac6653aSJeff Kirsher #ifndef MODULE
45697ac6653aSJeff Kirsher static int __init stmmac_cmdline_opt(char *str)
45707ac6653aSJeff Kirsher {
45717ac6653aSJeff Kirsher 	char *opt;
45727ac6653aSJeff Kirsher 
45737ac6653aSJeff Kirsher 	if (!str || !*str)
45747ac6653aSJeff Kirsher 		return -EINVAL;
45757ac6653aSJeff Kirsher 	while ((opt = strsep(&str, ",")) != NULL) {
45767ac6653aSJeff Kirsher 		if (!strncmp(opt, "debug:", 6)) {
4577ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 6, 0, &debug))
45787ac6653aSJeff Kirsher 				goto err;
45797ac6653aSJeff Kirsher 		} else if (!strncmp(opt, "phyaddr:", 8)) {
4580ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 8, 0, &phyaddr))
45817ac6653aSJeff Kirsher 				goto err;
45827ac6653aSJeff Kirsher 		} else if (!strncmp(opt, "buf_sz:", 7)) {
4583ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 7, 0, &buf_sz))
45847ac6653aSJeff Kirsher 				goto err;
45857ac6653aSJeff Kirsher 		} else if (!strncmp(opt, "tc:", 3)) {
4586ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 3, 0, &tc))
45877ac6653aSJeff Kirsher 				goto err;
45887ac6653aSJeff Kirsher 		} else if (!strncmp(opt, "watchdog:", 9)) {
4589ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 9, 0, &watchdog))
45907ac6653aSJeff Kirsher 				goto err;
45917ac6653aSJeff Kirsher 		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
4592ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 10, 0, &flow_ctrl))
45937ac6653aSJeff Kirsher 				goto err;
45947ac6653aSJeff Kirsher 		} else if (!strncmp(opt, "pause:", 6)) {
4595ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 6, 0, &pause))
45967ac6653aSJeff Kirsher 				goto err;
4597506f669cSGiuseppe CAVALLARO 		} else if (!strncmp(opt, "eee_timer:", 10)) {
4598d765955dSGiuseppe CAVALLARO 			if (kstrtoint(opt + 10, 0, &eee_timer))
4599d765955dSGiuseppe CAVALLARO 				goto err;
46004a7d666aSGiuseppe CAVALLARO 		} else if (!strncmp(opt, "chain_mode:", 11)) {
46014a7d666aSGiuseppe CAVALLARO 			if (kstrtoint(opt + 11, 0, &chain_mode))
46024a7d666aSGiuseppe CAVALLARO 				goto err;
46037ac6653aSJeff Kirsher 		}
46047ac6653aSJeff Kirsher 	}
46057ac6653aSJeff Kirsher 	return 0;
46067ac6653aSJeff Kirsher 
46077ac6653aSJeff Kirsher err:
46087ac6653aSJeff Kirsher 	pr_err("%s: ERROR broken module parameter conversion", __func__);
46097ac6653aSJeff Kirsher 	return -EINVAL;
46107ac6653aSJeff Kirsher }
46117ac6653aSJeff Kirsher 
46127ac6653aSJeff Kirsher __setup("stmmaceth=", stmmac_cmdline_opt);
4613ceb69499SGiuseppe CAVALLARO #endif /* MODULE */
46146fc0d0f2SGiuseppe Cavallaro 
4615466c5ac8SMathieu Olivari static int __init stmmac_init(void)
4616466c5ac8SMathieu Olivari {
4617466c5ac8SMathieu Olivari #ifdef CONFIG_DEBUG_FS
4618466c5ac8SMathieu Olivari 	/* Create debugfs main directory if it doesn't exist yet */
4619466c5ac8SMathieu Olivari 	if (!stmmac_fs_dir) {
4620466c5ac8SMathieu Olivari 		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
4621466c5ac8SMathieu Olivari 
4622466c5ac8SMathieu Olivari 		if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
4623466c5ac8SMathieu Olivari 			pr_err("ERROR %s, debugfs create directory failed\n",
4624466c5ac8SMathieu Olivari 			       STMMAC_RESOURCE_NAME);
4625466c5ac8SMathieu Olivari 
4626466c5ac8SMathieu Olivari 			return -ENOMEM;
4627466c5ac8SMathieu Olivari 		}
4628466c5ac8SMathieu Olivari 	}
4629466c5ac8SMathieu Olivari #endif
4630466c5ac8SMathieu Olivari 
4631466c5ac8SMathieu Olivari 	return 0;
4632466c5ac8SMathieu Olivari }
4633466c5ac8SMathieu Olivari 
4634466c5ac8SMathieu Olivari static void __exit stmmac_exit(void)
4635466c5ac8SMathieu Olivari {
4636466c5ac8SMathieu Olivari #ifdef CONFIG_DEBUG_FS
4637466c5ac8SMathieu Olivari 	debugfs_remove_recursive(stmmac_fs_dir);
4638466c5ac8SMathieu Olivari #endif
4639466c5ac8SMathieu Olivari }
4640466c5ac8SMathieu Olivari 
4641466c5ac8SMathieu Olivari module_init(stmmac_init)
4642466c5ac8SMathieu Olivari module_exit(stmmac_exit)
4643466c5ac8SMathieu Olivari 
46446fc0d0f2SGiuseppe Cavallaro MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
46456fc0d0f2SGiuseppe Cavallaro MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
46466fc0d0f2SGiuseppe Cavallaro MODULE_LICENSE("GPL");
4647