14fa9c49fSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
27ac6653aSJeff Kirsher /*******************************************************************************
37ac6653aSJeff Kirsher   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
47ac6653aSJeff Kirsher   ST Ethernet IPs are built around a Synopsys IP Core.
57ac6653aSJeff Kirsher 
6286a8372SGiuseppe CAVALLARO 	Copyright(C) 2007-2011 STMicroelectronics Ltd
77ac6653aSJeff Kirsher 
87ac6653aSJeff Kirsher 
97ac6653aSJeff Kirsher   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
107ac6653aSJeff Kirsher 
117ac6653aSJeff Kirsher   Documentation available at:
127ac6653aSJeff Kirsher 	http://www.stlinux.com
137ac6653aSJeff Kirsher   Support available at:
147ac6653aSJeff Kirsher 	https://bugzilla.stlinux.com/
157ac6653aSJeff Kirsher *******************************************************************************/
167ac6653aSJeff Kirsher 
176a81c26fSViresh Kumar #include <linux/clk.h>
187ac6653aSJeff Kirsher #include <linux/kernel.h>
197ac6653aSJeff Kirsher #include <linux/interrupt.h>
207ac6653aSJeff Kirsher #include <linux/ip.h>
217ac6653aSJeff Kirsher #include <linux/tcp.h>
227ac6653aSJeff Kirsher #include <linux/skbuff.h>
237ac6653aSJeff Kirsher #include <linux/ethtool.h>
247ac6653aSJeff Kirsher #include <linux/if_ether.h>
257ac6653aSJeff Kirsher #include <linux/crc32.h>
267ac6653aSJeff Kirsher #include <linux/mii.h>
2701789349SJiri Pirko #include <linux/if.h>
287ac6653aSJeff Kirsher #include <linux/if_vlan.h>
297ac6653aSJeff Kirsher #include <linux/dma-mapping.h>
307ac6653aSJeff Kirsher #include <linux/slab.h>
317ac6653aSJeff Kirsher #include <linux/prefetch.h>
32db88f10aSSrinivas Kandagatla #include <linux/pinctrl/consumer.h>
3350fb4f74SGiuseppe CAVALLARO #ifdef CONFIG_DEBUG_FS
347ac29055SGiuseppe CAVALLARO #include <linux/debugfs.h>
357ac29055SGiuseppe CAVALLARO #include <linux/seq_file.h>
3650fb4f74SGiuseppe CAVALLARO #endif /* CONFIG_DEBUG_FS */
37891434b1SRayagond Kokatanur #include <linux/net_tstamp.h>
38eeef2f6bSJose Abreu #include <linux/phylink.h>
39b7766206SJose Abreu #include <linux/udp.h>
404dbbe8ddSJose Abreu #include <net/pkt_cls.h>
41891434b1SRayagond Kokatanur #include "stmmac_ptp.h"
42286a8372SGiuseppe CAVALLARO #include "stmmac.h"
43c5e4ddbdSChen-Yu Tsai #include <linux/reset.h>
445790cf3cSMathieu Olivari #include <linux/of_mdio.h>
4519d857c9SPhil Reid #include "dwmac1000.h"
467d9e6c5aSJose Abreu #include "dwxgmac2.h"
4742de047dSJose Abreu #include "hwif.h"
487ac6653aSJeff Kirsher 
498d558f02SJose Abreu #define	STMMAC_ALIGN(x)		ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
50f748be53SAlexandre TORGUE #define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
517ac6653aSJeff Kirsher 
527ac6653aSJeff Kirsher /* Module parameters */
5332ceabcaSGiuseppe CAVALLARO #define TX_TIMEO	5000
547ac6653aSJeff Kirsher static int watchdog = TX_TIMEO;
55d3757ba4SJoe Perches module_param(watchdog, int, 0644);
5632ceabcaSGiuseppe CAVALLARO MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
577ac6653aSJeff Kirsher 
5832ceabcaSGiuseppe CAVALLARO static int debug = -1;
59d3757ba4SJoe Perches module_param(debug, int, 0644);
6032ceabcaSGiuseppe CAVALLARO MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
617ac6653aSJeff Kirsher 
6247d1f71fSstephen hemminger static int phyaddr = -1;
63d3757ba4SJoe Perches module_param(phyaddr, int, 0444);
647ac6653aSJeff Kirsher MODULE_PARM_DESC(phyaddr, "Physical device address");
657ac6653aSJeff Kirsher 
66e3ad57c9SGiuseppe Cavallaro #define STMMAC_TX_THRESH	(DMA_TX_SIZE / 4)
67120e87f9SGiuseppe Cavallaro #define STMMAC_RX_THRESH	(DMA_RX_SIZE / 4)
687ac6653aSJeff Kirsher 
69e9989339SJose Abreu static int flow_ctrl = FLOW_AUTO;
70d3757ba4SJoe Perches module_param(flow_ctrl, int, 0644);
717ac6653aSJeff Kirsher MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
727ac6653aSJeff Kirsher 
737ac6653aSJeff Kirsher static int pause = PAUSE_TIME;
74d3757ba4SJoe Perches module_param(pause, int, 0644);
757ac6653aSJeff Kirsher MODULE_PARM_DESC(pause, "Flow Control Pause Time");
767ac6653aSJeff Kirsher 
777ac6653aSJeff Kirsher #define TC_DEFAULT 64
787ac6653aSJeff Kirsher static int tc = TC_DEFAULT;
79d3757ba4SJoe Perches module_param(tc, int, 0644);
807ac6653aSJeff Kirsher MODULE_PARM_DESC(tc, "DMA threshold control value");
817ac6653aSJeff Kirsher 
82d916701cSGiuseppe CAVALLARO #define	DEFAULT_BUFSIZE	1536
83d916701cSGiuseppe CAVALLARO static int buf_sz = DEFAULT_BUFSIZE;
84d3757ba4SJoe Perches module_param(buf_sz, int, 0644);
857ac6653aSJeff Kirsher MODULE_PARM_DESC(buf_sz, "DMA buffer size");
867ac6653aSJeff Kirsher 
8722ad3838SGiuseppe Cavallaro #define	STMMAC_RX_COPYBREAK	256
8822ad3838SGiuseppe Cavallaro 
897ac6653aSJeff Kirsher static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
907ac6653aSJeff Kirsher 				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
917ac6653aSJeff Kirsher 				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
927ac6653aSJeff Kirsher 
93d765955dSGiuseppe CAVALLARO #define STMMAC_DEFAULT_LPI_TIMER	1000
94d765955dSGiuseppe CAVALLARO static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
95d3757ba4SJoe Perches module_param(eee_timer, int, 0644);
96d765955dSGiuseppe CAVALLARO MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
97f5351ef7SGiuseppe CAVALLARO #define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
98d765955dSGiuseppe CAVALLARO 
9922d3efe5SPavel Machek /* By default the driver will use the ring mode to manage tx and rx descriptors,
10022d3efe5SPavel Machek  * but allow user to force to use the chain instead of the ring
1014a7d666aSGiuseppe CAVALLARO  */
1024a7d666aSGiuseppe CAVALLARO static unsigned int chain_mode;
103d3757ba4SJoe Perches module_param(chain_mode, int, 0444);
1044a7d666aSGiuseppe CAVALLARO MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
1054a7d666aSGiuseppe CAVALLARO 
1067ac6653aSJeff Kirsher static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
1077ac6653aSJeff Kirsher 
10850fb4f74SGiuseppe CAVALLARO #ifdef CONFIG_DEBUG_FS
109481a7d15SJiping Ma static const struct net_device_ops stmmac_netdev_ops;
1108d72ab11SGreg Kroah-Hartman static void stmmac_init_fs(struct net_device *dev);
111466c5ac8SMathieu Olivari static void stmmac_exit_fs(struct net_device *dev);
112bfab27a1SGiuseppe CAVALLARO #endif
113bfab27a1SGiuseppe CAVALLARO 
1149125cdd1SGiuseppe CAVALLARO #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
1159125cdd1SGiuseppe CAVALLARO 
1167ac6653aSJeff Kirsher /**
1177ac6653aSJeff Kirsher  * stmmac_verify_args - verify the driver parameters.
118732fdf0eSGiuseppe CAVALLARO  * Description: it checks the driver parameters and set a default in case of
119732fdf0eSGiuseppe CAVALLARO  * errors.
1207ac6653aSJeff Kirsher  */
1217ac6653aSJeff Kirsher static void stmmac_verify_args(void)
1227ac6653aSJeff Kirsher {
1237ac6653aSJeff Kirsher 	if (unlikely(watchdog < 0))
1247ac6653aSJeff Kirsher 		watchdog = TX_TIMEO;
125d916701cSGiuseppe CAVALLARO 	if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
126d916701cSGiuseppe CAVALLARO 		buf_sz = DEFAULT_BUFSIZE;
1277ac6653aSJeff Kirsher 	if (unlikely(flow_ctrl > 1))
1287ac6653aSJeff Kirsher 		flow_ctrl = FLOW_AUTO;
1297ac6653aSJeff Kirsher 	else if (likely(flow_ctrl < 0))
1307ac6653aSJeff Kirsher 		flow_ctrl = FLOW_OFF;
1317ac6653aSJeff Kirsher 	if (unlikely((pause < 0) || (pause > 0xffff)))
1327ac6653aSJeff Kirsher 		pause = PAUSE_TIME;
133d765955dSGiuseppe CAVALLARO 	if (eee_timer < 0)
134d765955dSGiuseppe CAVALLARO 		eee_timer = STMMAC_DEFAULT_LPI_TIMER;
1357ac6653aSJeff Kirsher }
1367ac6653aSJeff Kirsher 
13732ceabcaSGiuseppe CAVALLARO /**
138c22a3f48SJoao Pinto  * stmmac_disable_all_queues - Disable all queues
139c22a3f48SJoao Pinto  * @priv: driver private structure
140c22a3f48SJoao Pinto  */
141c22a3f48SJoao Pinto static void stmmac_disable_all_queues(struct stmmac_priv *priv)
142c22a3f48SJoao Pinto {
143c22a3f48SJoao Pinto 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
1448fce3331SJose Abreu 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
1458fce3331SJose Abreu 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
146c22a3f48SJoao Pinto 	u32 queue;
147c22a3f48SJoao Pinto 
1488fce3331SJose Abreu 	for (queue = 0; queue < maxq; queue++) {
1498fce3331SJose Abreu 		struct stmmac_channel *ch = &priv->channel[queue];
150c22a3f48SJoao Pinto 
1514ccb4585SJose Abreu 		if (queue < rx_queues_cnt)
1524ccb4585SJose Abreu 			napi_disable(&ch->rx_napi);
1534ccb4585SJose Abreu 		if (queue < tx_queues_cnt)
1544ccb4585SJose Abreu 			napi_disable(&ch->tx_napi);
155c22a3f48SJoao Pinto 	}
156c22a3f48SJoao Pinto }
157c22a3f48SJoao Pinto 
158c22a3f48SJoao Pinto /**
159c22a3f48SJoao Pinto  * stmmac_enable_all_queues - Enable all queues
160c22a3f48SJoao Pinto  * @priv: driver private structure
161c22a3f48SJoao Pinto  */
162c22a3f48SJoao Pinto static void stmmac_enable_all_queues(struct stmmac_priv *priv)
163c22a3f48SJoao Pinto {
164c22a3f48SJoao Pinto 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
1658fce3331SJose Abreu 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
1668fce3331SJose Abreu 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
167c22a3f48SJoao Pinto 	u32 queue;
168c22a3f48SJoao Pinto 
1698fce3331SJose Abreu 	for (queue = 0; queue < maxq; queue++) {
1708fce3331SJose Abreu 		struct stmmac_channel *ch = &priv->channel[queue];
171c22a3f48SJoao Pinto 
1724ccb4585SJose Abreu 		if (queue < rx_queues_cnt)
1734ccb4585SJose Abreu 			napi_enable(&ch->rx_napi);
1744ccb4585SJose Abreu 		if (queue < tx_queues_cnt)
1754ccb4585SJose Abreu 			napi_enable(&ch->tx_napi);
176c22a3f48SJoao Pinto 	}
177c22a3f48SJoao Pinto }
178c22a3f48SJoao Pinto 
179c22a3f48SJoao Pinto /**
180c22a3f48SJoao Pinto  * stmmac_stop_all_queues - Stop all queues
181c22a3f48SJoao Pinto  * @priv: driver private structure
182c22a3f48SJoao Pinto  */
183c22a3f48SJoao Pinto static void stmmac_stop_all_queues(struct stmmac_priv *priv)
184c22a3f48SJoao Pinto {
185c22a3f48SJoao Pinto 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
186c22a3f48SJoao Pinto 	u32 queue;
187c22a3f48SJoao Pinto 
188c22a3f48SJoao Pinto 	for (queue = 0; queue < tx_queues_cnt; queue++)
189c22a3f48SJoao Pinto 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
190c22a3f48SJoao Pinto }
191c22a3f48SJoao Pinto 
192c22a3f48SJoao Pinto /**
193c22a3f48SJoao Pinto  * stmmac_start_all_queues - Start all queues
194c22a3f48SJoao Pinto  * @priv: driver private structure
195c22a3f48SJoao Pinto  */
196c22a3f48SJoao Pinto static void stmmac_start_all_queues(struct stmmac_priv *priv)
197c22a3f48SJoao Pinto {
198c22a3f48SJoao Pinto 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
199c22a3f48SJoao Pinto 	u32 queue;
200c22a3f48SJoao Pinto 
201c22a3f48SJoao Pinto 	for (queue = 0; queue < tx_queues_cnt; queue++)
202c22a3f48SJoao Pinto 		netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue));
203c22a3f48SJoao Pinto }
204c22a3f48SJoao Pinto 
20534877a15SJose Abreu static void stmmac_service_event_schedule(struct stmmac_priv *priv)
20634877a15SJose Abreu {
20734877a15SJose Abreu 	if (!test_bit(STMMAC_DOWN, &priv->state) &&
20834877a15SJose Abreu 	    !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
20934877a15SJose Abreu 		queue_work(priv->wq, &priv->service_task);
21034877a15SJose Abreu }
21134877a15SJose Abreu 
21234877a15SJose Abreu static void stmmac_global_err(struct stmmac_priv *priv)
21334877a15SJose Abreu {
21434877a15SJose Abreu 	netif_carrier_off(priv->dev);
21534877a15SJose Abreu 	set_bit(STMMAC_RESET_REQUESTED, &priv->state);
21634877a15SJose Abreu 	stmmac_service_event_schedule(priv);
21734877a15SJose Abreu }
21834877a15SJose Abreu 
219c22a3f48SJoao Pinto /**
22032ceabcaSGiuseppe CAVALLARO  * stmmac_clk_csr_set - dynamically set the MDC clock
22132ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
22232ceabcaSGiuseppe CAVALLARO  * Description: this is to dynamically set the MDC clock according to the csr
22332ceabcaSGiuseppe CAVALLARO  * clock input.
22432ceabcaSGiuseppe CAVALLARO  * Note:
22532ceabcaSGiuseppe CAVALLARO  *	If a specific clk_csr value is passed from the platform
22632ceabcaSGiuseppe CAVALLARO  *	this means that the CSR Clock Range selection cannot be
22732ceabcaSGiuseppe CAVALLARO  *	changed at run-time and it is fixed (as reported in the driver
22832ceabcaSGiuseppe CAVALLARO  *	documentation). Viceversa the driver will try to set the MDC
22932ceabcaSGiuseppe CAVALLARO  *	clock dynamically according to the actual clock input.
23032ceabcaSGiuseppe CAVALLARO  */
231cd7201f4SGiuseppe CAVALLARO static void stmmac_clk_csr_set(struct stmmac_priv *priv)
232cd7201f4SGiuseppe CAVALLARO {
233cd7201f4SGiuseppe CAVALLARO 	u32 clk_rate;
234cd7201f4SGiuseppe CAVALLARO 
235f573c0b9Sjpinto 	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
236cd7201f4SGiuseppe CAVALLARO 
237cd7201f4SGiuseppe CAVALLARO 	/* Platform provided default clk_csr would be assumed valid
238ceb69499SGiuseppe CAVALLARO 	 * for all other cases except for the below mentioned ones.
239ceb69499SGiuseppe CAVALLARO 	 * For values higher than the IEEE 802.3 specified frequency
240ceb69499SGiuseppe CAVALLARO 	 * we can not estimate the proper divider as it is not known
241ceb69499SGiuseppe CAVALLARO 	 * the frequency of clk_csr_i. So we do not change the default
242ceb69499SGiuseppe CAVALLARO 	 * divider.
243ceb69499SGiuseppe CAVALLARO 	 */
244cd7201f4SGiuseppe CAVALLARO 	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
245cd7201f4SGiuseppe CAVALLARO 		if (clk_rate < CSR_F_35M)
246cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_20_35M;
247cd7201f4SGiuseppe CAVALLARO 		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
248cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_35_60M;
249cd7201f4SGiuseppe CAVALLARO 		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
250cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_60_100M;
251cd7201f4SGiuseppe CAVALLARO 		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
252cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_100_150M;
253cd7201f4SGiuseppe CAVALLARO 		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
254cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_150_250M;
25519d857c9SPhil Reid 		else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
256cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_250_300M;
257ceb69499SGiuseppe CAVALLARO 	}
2589f93ac8dSLABBE Corentin 
2599f93ac8dSLABBE Corentin 	if (priv->plat->has_sun8i) {
2609f93ac8dSLABBE Corentin 		if (clk_rate > 160000000)
2619f93ac8dSLABBE Corentin 			priv->clk_csr = 0x03;
2629f93ac8dSLABBE Corentin 		else if (clk_rate > 80000000)
2639f93ac8dSLABBE Corentin 			priv->clk_csr = 0x02;
2649f93ac8dSLABBE Corentin 		else if (clk_rate > 40000000)
2659f93ac8dSLABBE Corentin 			priv->clk_csr = 0x01;
2669f93ac8dSLABBE Corentin 		else
2679f93ac8dSLABBE Corentin 			priv->clk_csr = 0;
2689f93ac8dSLABBE Corentin 	}
2697d9e6c5aSJose Abreu 
2707d9e6c5aSJose Abreu 	if (priv->plat->has_xgmac) {
2717d9e6c5aSJose Abreu 		if (clk_rate > 400000000)
2727d9e6c5aSJose Abreu 			priv->clk_csr = 0x5;
2737d9e6c5aSJose Abreu 		else if (clk_rate > 350000000)
2747d9e6c5aSJose Abreu 			priv->clk_csr = 0x4;
2757d9e6c5aSJose Abreu 		else if (clk_rate > 300000000)
2767d9e6c5aSJose Abreu 			priv->clk_csr = 0x3;
2777d9e6c5aSJose Abreu 		else if (clk_rate > 250000000)
2787d9e6c5aSJose Abreu 			priv->clk_csr = 0x2;
2797d9e6c5aSJose Abreu 		else if (clk_rate > 150000000)
2807d9e6c5aSJose Abreu 			priv->clk_csr = 0x1;
2817d9e6c5aSJose Abreu 		else
2827d9e6c5aSJose Abreu 			priv->clk_csr = 0x0;
2837d9e6c5aSJose Abreu 	}
284cd7201f4SGiuseppe CAVALLARO }
285cd7201f4SGiuseppe CAVALLARO 
2867ac6653aSJeff Kirsher static void print_pkt(unsigned char *buf, int len)
2877ac6653aSJeff Kirsher {
288424c4f78SAndy Shevchenko 	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
289424c4f78SAndy Shevchenko 	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
2907ac6653aSJeff Kirsher }
2917ac6653aSJeff Kirsher 
292ce736788SJoao Pinto static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
2937ac6653aSJeff Kirsher {
294ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
295a6a3e026SLABBE Corentin 	u32 avail;
296e3ad57c9SGiuseppe Cavallaro 
297ce736788SJoao Pinto 	if (tx_q->dirty_tx > tx_q->cur_tx)
298ce736788SJoao Pinto 		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
299e3ad57c9SGiuseppe Cavallaro 	else
300ce736788SJoao Pinto 		avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1;
301e3ad57c9SGiuseppe Cavallaro 
302e3ad57c9SGiuseppe Cavallaro 	return avail;
303e3ad57c9SGiuseppe Cavallaro }
304e3ad57c9SGiuseppe Cavallaro 
30554139cf3SJoao Pinto /**
30654139cf3SJoao Pinto  * stmmac_rx_dirty - Get RX queue dirty
30754139cf3SJoao Pinto  * @priv: driver private structure
30854139cf3SJoao Pinto  * @queue: RX queue index
30954139cf3SJoao Pinto  */
31054139cf3SJoao Pinto static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
311e3ad57c9SGiuseppe Cavallaro {
31254139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
313a6a3e026SLABBE Corentin 	u32 dirty;
314e3ad57c9SGiuseppe Cavallaro 
31554139cf3SJoao Pinto 	if (rx_q->dirty_rx <= rx_q->cur_rx)
31654139cf3SJoao Pinto 		dirty = rx_q->cur_rx - rx_q->dirty_rx;
317e3ad57c9SGiuseppe Cavallaro 	else
31854139cf3SJoao Pinto 		dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx;
319e3ad57c9SGiuseppe Cavallaro 
320e3ad57c9SGiuseppe Cavallaro 	return dirty;
3217ac6653aSJeff Kirsher }
3227ac6653aSJeff Kirsher 
32332ceabcaSGiuseppe CAVALLARO /**
324732fdf0eSGiuseppe CAVALLARO  * stmmac_enable_eee_mode - check and enter in LPI mode
32532ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
326732fdf0eSGiuseppe CAVALLARO  * Description: this function is to verify and enter in LPI mode in case of
327732fdf0eSGiuseppe CAVALLARO  * EEE.
32832ceabcaSGiuseppe CAVALLARO  */
329d765955dSGiuseppe CAVALLARO static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
330d765955dSGiuseppe CAVALLARO {
331ce736788SJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
332ce736788SJoao Pinto 	u32 queue;
333ce736788SJoao Pinto 
334ce736788SJoao Pinto 	/* check if all TX queues have the work finished */
335ce736788SJoao Pinto 	for (queue = 0; queue < tx_cnt; queue++) {
336ce736788SJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
337ce736788SJoao Pinto 
338ce736788SJoao Pinto 		if (tx_q->dirty_tx != tx_q->cur_tx)
339ce736788SJoao Pinto 			return; /* still unfinished work */
340ce736788SJoao Pinto 	}
341ce736788SJoao Pinto 
342d765955dSGiuseppe CAVALLARO 	/* Check and enter in LPI mode */
343ce736788SJoao Pinto 	if (!priv->tx_path_in_lpi_mode)
344c10d4c82SJose Abreu 		stmmac_set_eee_mode(priv, priv->hw,
345b4b7b772Sjpinto 				priv->plat->en_tx_lpi_clockgating);
346d765955dSGiuseppe CAVALLARO }
347d765955dSGiuseppe CAVALLARO 
34832ceabcaSGiuseppe CAVALLARO /**
349732fdf0eSGiuseppe CAVALLARO  * stmmac_disable_eee_mode - disable and exit from LPI mode
35032ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
35132ceabcaSGiuseppe CAVALLARO  * Description: this function is to exit and disable EEE in case of
35232ceabcaSGiuseppe CAVALLARO  * LPI state is true. This is called by the xmit.
35332ceabcaSGiuseppe CAVALLARO  */
354d765955dSGiuseppe CAVALLARO void stmmac_disable_eee_mode(struct stmmac_priv *priv)
355d765955dSGiuseppe CAVALLARO {
356c10d4c82SJose Abreu 	stmmac_reset_eee_mode(priv, priv->hw);
357d765955dSGiuseppe CAVALLARO 	del_timer_sync(&priv->eee_ctrl_timer);
358d765955dSGiuseppe CAVALLARO 	priv->tx_path_in_lpi_mode = false;
359d765955dSGiuseppe CAVALLARO }
360d765955dSGiuseppe CAVALLARO 
361d765955dSGiuseppe CAVALLARO /**
362732fdf0eSGiuseppe CAVALLARO  * stmmac_eee_ctrl_timer - EEE TX SW timer.
363d765955dSGiuseppe CAVALLARO  * @arg : data hook
364d765955dSGiuseppe CAVALLARO  * Description:
36532ceabcaSGiuseppe CAVALLARO  *  if there is no data transfer and if we are not in LPI state,
366d765955dSGiuseppe CAVALLARO  *  then MAC Transmitter can be moved to LPI state.
367d765955dSGiuseppe CAVALLARO  */
368e99e88a9SKees Cook static void stmmac_eee_ctrl_timer(struct timer_list *t)
369d765955dSGiuseppe CAVALLARO {
370e99e88a9SKees Cook 	struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
371d765955dSGiuseppe CAVALLARO 
372d765955dSGiuseppe CAVALLARO 	stmmac_enable_eee_mode(priv);
373f5351ef7SGiuseppe CAVALLARO 	mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
374d765955dSGiuseppe CAVALLARO }
375d765955dSGiuseppe CAVALLARO 
376d765955dSGiuseppe CAVALLARO /**
377732fdf0eSGiuseppe CAVALLARO  * stmmac_eee_init - init EEE
37832ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
379d765955dSGiuseppe CAVALLARO  * Description:
380732fdf0eSGiuseppe CAVALLARO  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
381732fdf0eSGiuseppe CAVALLARO  *  can also manage EEE, this function enable the LPI state and start related
382732fdf0eSGiuseppe CAVALLARO  *  timer.
383d765955dSGiuseppe CAVALLARO  */
384d765955dSGiuseppe CAVALLARO bool stmmac_eee_init(struct stmmac_priv *priv)
385d765955dSGiuseppe CAVALLARO {
38674371272SJose Abreu 	int tx_lpi_timer = priv->tx_lpi_timer;
387879626e3SJerome Brunet 
388f5351ef7SGiuseppe CAVALLARO 	/* Using PCS we cannot dial with the phy registers at this stage
389f5351ef7SGiuseppe CAVALLARO 	 * so we do not support extra feature like EEE.
390f5351ef7SGiuseppe CAVALLARO 	 */
391a47b9e15SDejin Zheng 	if (priv->hw->pcs == STMMAC_PCS_TBI ||
392a47b9e15SDejin Zheng 	    priv->hw->pcs == STMMAC_PCS_RTBI)
39374371272SJose Abreu 		return false;
394f5351ef7SGiuseppe CAVALLARO 
39574371272SJose Abreu 	/* Check if MAC core supports the EEE feature. */
39674371272SJose Abreu 	if (!priv->dma_cap.eee)
39774371272SJose Abreu 		return false;
398d765955dSGiuseppe CAVALLARO 
39929555fa3SThierry Reding 	mutex_lock(&priv->lock);
40074371272SJose Abreu 
40174371272SJose Abreu 	/* Check if it needs to be deactivated */
402177d935aSJon Hunter 	if (!priv->eee_active) {
403177d935aSJon Hunter 		if (priv->eee_enabled) {
40438ddc59dSLABBE Corentin 			netdev_dbg(priv->dev, "disable EEE\n");
40583bf79b6SGiuseppe CAVALLARO 			del_timer_sync(&priv->eee_ctrl_timer);
40674371272SJose Abreu 			stmmac_set_eee_timer(priv, priv->hw, 0, tx_lpi_timer);
407177d935aSJon Hunter 		}
4080867bb97SJon Hunter 		mutex_unlock(&priv->lock);
40974371272SJose Abreu 		return false;
41074371272SJose Abreu 	}
41174371272SJose Abreu 
41274371272SJose Abreu 	if (priv->eee_active && !priv->eee_enabled) {
41374371272SJose Abreu 		timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
41474371272SJose Abreu 		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
41574371272SJose Abreu 		stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
41683bf79b6SGiuseppe CAVALLARO 				     tx_lpi_timer);
41783bf79b6SGiuseppe CAVALLARO 	}
41874371272SJose Abreu 
41929555fa3SThierry Reding 	mutex_unlock(&priv->lock);
42038ddc59dSLABBE Corentin 	netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
42174371272SJose Abreu 	return true;
422d765955dSGiuseppe CAVALLARO }
423d765955dSGiuseppe CAVALLARO 
424732fdf0eSGiuseppe CAVALLARO /* stmmac_get_tx_hwtstamp - get HW TX timestamps
42532ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
426ba1ffd74SGiuseppe CAVALLARO  * @p : descriptor pointer
427891434b1SRayagond Kokatanur  * @skb : the socket buffer
428891434b1SRayagond Kokatanur  * Description :
429891434b1SRayagond Kokatanur  * This function will read timestamp from the descriptor & pass it to stack.
430891434b1SRayagond Kokatanur  * and also perform some sanity checks.
431891434b1SRayagond Kokatanur  */
432891434b1SRayagond Kokatanur static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
433ba1ffd74SGiuseppe CAVALLARO 				   struct dma_desc *p, struct sk_buff *skb)
434891434b1SRayagond Kokatanur {
435891434b1SRayagond Kokatanur 	struct skb_shared_hwtstamps shhwtstamp;
43625e80cd0SJose Abreu 	bool found = false;
437df103170SNathan Chancellor 	u64 ns = 0;
438891434b1SRayagond Kokatanur 
439891434b1SRayagond Kokatanur 	if (!priv->hwts_tx_en)
440891434b1SRayagond Kokatanur 		return;
441891434b1SRayagond Kokatanur 
442ceb69499SGiuseppe CAVALLARO 	/* exit if skb doesn't support hw tstamp */
44375e4364fSdamuzi000 	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
444891434b1SRayagond Kokatanur 		return;
445891434b1SRayagond Kokatanur 
446891434b1SRayagond Kokatanur 	/* check tx tstamp status */
44742de047dSJose Abreu 	if (stmmac_get_tx_timestamp_status(priv, p)) {
44842de047dSJose Abreu 		stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
44925e80cd0SJose Abreu 		found = true;
45025e80cd0SJose Abreu 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
45125e80cd0SJose Abreu 		found = true;
45225e80cd0SJose Abreu 	}
453891434b1SRayagond Kokatanur 
45425e80cd0SJose Abreu 	if (found) {
455891434b1SRayagond Kokatanur 		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
456891434b1SRayagond Kokatanur 		shhwtstamp.hwtstamp = ns_to_ktime(ns);
457ba1ffd74SGiuseppe CAVALLARO 
45833d4c482SMario Molitor 		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
459891434b1SRayagond Kokatanur 		/* pass tstamp to stack */
460891434b1SRayagond Kokatanur 		skb_tstamp_tx(skb, &shhwtstamp);
461ba1ffd74SGiuseppe CAVALLARO 	}
462891434b1SRayagond Kokatanur }
463891434b1SRayagond Kokatanur 
464732fdf0eSGiuseppe CAVALLARO /* stmmac_get_rx_hwtstamp - get HW RX timestamps
46532ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
466ba1ffd74SGiuseppe CAVALLARO  * @p : descriptor pointer
467ba1ffd74SGiuseppe CAVALLARO  * @np : next descriptor pointer
468891434b1SRayagond Kokatanur  * @skb : the socket buffer
469891434b1SRayagond Kokatanur  * Description :
470891434b1SRayagond Kokatanur  * This function will read received packet's timestamp from the descriptor
471891434b1SRayagond Kokatanur  * and pass it to stack. It also perform some sanity checks.
472891434b1SRayagond Kokatanur  */
473ba1ffd74SGiuseppe CAVALLARO static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
474ba1ffd74SGiuseppe CAVALLARO 				   struct dma_desc *np, struct sk_buff *skb)
475891434b1SRayagond Kokatanur {
476891434b1SRayagond Kokatanur 	struct skb_shared_hwtstamps *shhwtstamp = NULL;
47798870943SJose Abreu 	struct dma_desc *desc = p;
478df103170SNathan Chancellor 	u64 ns = 0;
479891434b1SRayagond Kokatanur 
480891434b1SRayagond Kokatanur 	if (!priv->hwts_rx_en)
481891434b1SRayagond Kokatanur 		return;
482ba1ffd74SGiuseppe CAVALLARO 	/* For GMAC4, the valid timestamp is from CTX next desc. */
4837d9e6c5aSJose Abreu 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
48498870943SJose Abreu 		desc = np;
485891434b1SRayagond Kokatanur 
48698870943SJose Abreu 	/* Check if timestamp is available */
48742de047dSJose Abreu 	if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
48842de047dSJose Abreu 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
48933d4c482SMario Molitor 		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
490891434b1SRayagond Kokatanur 		shhwtstamp = skb_hwtstamps(skb);
491891434b1SRayagond Kokatanur 		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
492891434b1SRayagond Kokatanur 		shhwtstamp->hwtstamp = ns_to_ktime(ns);
493ba1ffd74SGiuseppe CAVALLARO 	} else  {
49433d4c482SMario Molitor 		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
495ba1ffd74SGiuseppe CAVALLARO 	}
496891434b1SRayagond Kokatanur }
497891434b1SRayagond Kokatanur 
498891434b1SRayagond Kokatanur /**
499d6228b7cSArtem Panfilov  *  stmmac_hwtstamp_set - control hardware timestamping.
500891434b1SRayagond Kokatanur  *  @dev: device pointer.
5018d45e42bSLABBE Corentin  *  @ifr: An IOCTL specific structure, that can contain a pointer to
502891434b1SRayagond Kokatanur  *  a proprietary structure used to pass information to the driver.
503891434b1SRayagond Kokatanur  *  Description:
504891434b1SRayagond Kokatanur  *  This function configures the MAC to enable/disable both outgoing(TX)
505891434b1SRayagond Kokatanur  *  and incoming(RX) packets time stamping based on user input.
506891434b1SRayagond Kokatanur  *  Return Value:
507891434b1SRayagond Kokatanur  *  0 on success and an appropriate -ve integer on failure.
508891434b1SRayagond Kokatanur  */
509d6228b7cSArtem Panfilov static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
510891434b1SRayagond Kokatanur {
511891434b1SRayagond Kokatanur 	struct stmmac_priv *priv = netdev_priv(dev);
512891434b1SRayagond Kokatanur 	struct hwtstamp_config config;
5130a624155SArnd Bergmann 	struct timespec64 now;
514891434b1SRayagond Kokatanur 	u64 temp = 0;
515891434b1SRayagond Kokatanur 	u32 ptp_v2 = 0;
516891434b1SRayagond Kokatanur 	u32 tstamp_all = 0;
517891434b1SRayagond Kokatanur 	u32 ptp_over_ipv4_udp = 0;
518891434b1SRayagond Kokatanur 	u32 ptp_over_ipv6_udp = 0;
519891434b1SRayagond Kokatanur 	u32 ptp_over_ethernet = 0;
520891434b1SRayagond Kokatanur 	u32 snap_type_sel = 0;
521891434b1SRayagond Kokatanur 	u32 ts_master_en = 0;
522891434b1SRayagond Kokatanur 	u32 ts_event_en = 0;
523df103170SNathan Chancellor 	u32 sec_inc = 0;
524891434b1SRayagond Kokatanur 	u32 value = 0;
5257d9e6c5aSJose Abreu 	bool xmac;
5267d9e6c5aSJose Abreu 
5277d9e6c5aSJose Abreu 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
528891434b1SRayagond Kokatanur 
529891434b1SRayagond Kokatanur 	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
530891434b1SRayagond Kokatanur 		netdev_alert(priv->dev, "No support for HW time stamping\n");
531891434b1SRayagond Kokatanur 		priv->hwts_tx_en = 0;
532891434b1SRayagond Kokatanur 		priv->hwts_rx_en = 0;
533891434b1SRayagond Kokatanur 
534891434b1SRayagond Kokatanur 		return -EOPNOTSUPP;
535891434b1SRayagond Kokatanur 	}
536891434b1SRayagond Kokatanur 
537891434b1SRayagond Kokatanur 	if (copy_from_user(&config, ifr->ifr_data,
538d6228b7cSArtem Panfilov 			   sizeof(config)))
539891434b1SRayagond Kokatanur 		return -EFAULT;
540891434b1SRayagond Kokatanur 
54138ddc59dSLABBE Corentin 	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
542891434b1SRayagond Kokatanur 		   __func__, config.flags, config.tx_type, config.rx_filter);
543891434b1SRayagond Kokatanur 
544891434b1SRayagond Kokatanur 	/* reserved for future extensions */
545891434b1SRayagond Kokatanur 	if (config.flags)
546891434b1SRayagond Kokatanur 		return -EINVAL;
547891434b1SRayagond Kokatanur 
5485f3da328SBen Hutchings 	if (config.tx_type != HWTSTAMP_TX_OFF &&
5495f3da328SBen Hutchings 	    config.tx_type != HWTSTAMP_TX_ON)
550891434b1SRayagond Kokatanur 		return -ERANGE;
551891434b1SRayagond Kokatanur 
552891434b1SRayagond Kokatanur 	if (priv->adv_ts) {
553891434b1SRayagond Kokatanur 		switch (config.rx_filter) {
554891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_NONE:
555ceb69499SGiuseppe CAVALLARO 			/* time stamp no incoming packet at all */
556891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_NONE;
557891434b1SRayagond Kokatanur 			break;
558891434b1SRayagond Kokatanur 
559891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
560ceb69499SGiuseppe CAVALLARO 			/* PTP v1, UDP, any kind of event packet */
561891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
5627d8e249fSIlias Apalodimas 			/* 'xmac' hardware can support Sync, Pdelay_Req and
5637d8e249fSIlias Apalodimas 			 * Pdelay_resp by setting bit14 and bits17/16 to 01
5647d8e249fSIlias Apalodimas 			 * This leaves Delay_Req timestamps out.
5657d8e249fSIlias Apalodimas 			 * Enable all events *and* general purpose message
5667d8e249fSIlias Apalodimas 			 * timestamping
5677d8e249fSIlias Apalodimas 			 */
568891434b1SRayagond Kokatanur 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
569891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
570891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
571891434b1SRayagond Kokatanur 			break;
572891434b1SRayagond Kokatanur 
573891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
574ceb69499SGiuseppe CAVALLARO 			/* PTP v1, UDP, Sync packet */
575891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
576891434b1SRayagond Kokatanur 			/* take time stamp for SYNC messages only */
577891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
578891434b1SRayagond Kokatanur 
579891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
580891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
581891434b1SRayagond Kokatanur 			break;
582891434b1SRayagond Kokatanur 
583891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
584ceb69499SGiuseppe CAVALLARO 			/* PTP v1, UDP, Delay_req packet */
585891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
586891434b1SRayagond Kokatanur 			/* take time stamp for Delay_Req messages only */
587891434b1SRayagond Kokatanur 			ts_master_en = PTP_TCR_TSMSTRENA;
588891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
589891434b1SRayagond Kokatanur 
590891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
591891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
592891434b1SRayagond Kokatanur 			break;
593891434b1SRayagond Kokatanur 
594891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
595ceb69499SGiuseppe CAVALLARO 			/* PTP v2, UDP, any kind of event packet */
596891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
597891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
598891434b1SRayagond Kokatanur 			/* take time stamp for all event messages */
599891434b1SRayagond Kokatanur 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
600891434b1SRayagond Kokatanur 
601891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
602891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
603891434b1SRayagond Kokatanur 			break;
604891434b1SRayagond Kokatanur 
605891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
606ceb69499SGiuseppe CAVALLARO 			/* PTP v2, UDP, Sync packet */
607891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
608891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
609891434b1SRayagond Kokatanur 			/* take time stamp for SYNC messages only */
610891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
611891434b1SRayagond Kokatanur 
612891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
613891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
614891434b1SRayagond Kokatanur 			break;
615891434b1SRayagond Kokatanur 
616891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
617ceb69499SGiuseppe CAVALLARO 			/* PTP v2, UDP, Delay_req packet */
618891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
619891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
620891434b1SRayagond Kokatanur 			/* take time stamp for Delay_Req messages only */
621891434b1SRayagond Kokatanur 			ts_master_en = PTP_TCR_TSMSTRENA;
622891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
623891434b1SRayagond Kokatanur 
624891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
625891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
626891434b1SRayagond Kokatanur 			break;
627891434b1SRayagond Kokatanur 
628891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_EVENT:
629ceb69499SGiuseppe CAVALLARO 			/* PTP v2/802.AS1 any layer, any kind of event packet */
630891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
631891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
632891434b1SRayagond Kokatanur 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
63314f34733SJose Abreu 			ts_event_en = PTP_TCR_TSEVNTENA;
634891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
635891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
636891434b1SRayagond Kokatanur 			ptp_over_ethernet = PTP_TCR_TSIPENA;
637891434b1SRayagond Kokatanur 			break;
638891434b1SRayagond Kokatanur 
639891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_SYNC:
640ceb69499SGiuseppe CAVALLARO 			/* PTP v2/802.AS1, any layer, Sync packet */
641891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
642891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
643891434b1SRayagond Kokatanur 			/* take time stamp for SYNC messages only */
644891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
645891434b1SRayagond Kokatanur 
646891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
647891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
648891434b1SRayagond Kokatanur 			ptp_over_ethernet = PTP_TCR_TSIPENA;
649891434b1SRayagond Kokatanur 			break;
650891434b1SRayagond Kokatanur 
651891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
652ceb69499SGiuseppe CAVALLARO 			/* PTP v2/802.AS1, any layer, Delay_req packet */
653891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
654891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
655891434b1SRayagond Kokatanur 			/* take time stamp for Delay_Req messages only */
656891434b1SRayagond Kokatanur 			ts_master_en = PTP_TCR_TSMSTRENA;
657891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
658891434b1SRayagond Kokatanur 
659891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
660891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
661891434b1SRayagond Kokatanur 			ptp_over_ethernet = PTP_TCR_TSIPENA;
662891434b1SRayagond Kokatanur 			break;
663891434b1SRayagond Kokatanur 
664e3412575SMiroslav Lichvar 		case HWTSTAMP_FILTER_NTP_ALL:
665891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_ALL:
666ceb69499SGiuseppe CAVALLARO 			/* time stamp any incoming packet */
667891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_ALL;
668891434b1SRayagond Kokatanur 			tstamp_all = PTP_TCR_TSENALL;
669891434b1SRayagond Kokatanur 			break;
670891434b1SRayagond Kokatanur 
671891434b1SRayagond Kokatanur 		default:
672891434b1SRayagond Kokatanur 			return -ERANGE;
673891434b1SRayagond Kokatanur 		}
674891434b1SRayagond Kokatanur 	} else {
675891434b1SRayagond Kokatanur 		switch (config.rx_filter) {
676891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_NONE:
677891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_NONE;
678891434b1SRayagond Kokatanur 			break;
679891434b1SRayagond Kokatanur 		default:
680891434b1SRayagond Kokatanur 			/* PTP v1, UDP, any kind of event packet */
681891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
682891434b1SRayagond Kokatanur 			break;
683891434b1SRayagond Kokatanur 		}
684891434b1SRayagond Kokatanur 	}
685891434b1SRayagond Kokatanur 	priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
6865f3da328SBen Hutchings 	priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
687891434b1SRayagond Kokatanur 
688891434b1SRayagond Kokatanur 	if (!priv->hwts_tx_en && !priv->hwts_rx_en)
689cc4c9001SJose Abreu 		stmmac_config_hw_tstamping(priv, priv->ptpaddr, 0);
690891434b1SRayagond Kokatanur 	else {
691891434b1SRayagond Kokatanur 		value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
692891434b1SRayagond Kokatanur 			 tstamp_all | ptp_v2 | ptp_over_ethernet |
693891434b1SRayagond Kokatanur 			 ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
694891434b1SRayagond Kokatanur 			 ts_master_en | snap_type_sel);
695cc4c9001SJose Abreu 		stmmac_config_hw_tstamping(priv, priv->ptpaddr, value);
696891434b1SRayagond Kokatanur 
697891434b1SRayagond Kokatanur 		/* program Sub Second Increment reg */
698cc4c9001SJose Abreu 		stmmac_config_sub_second_increment(priv,
699f573c0b9Sjpinto 				priv->ptpaddr, priv->plat->clk_ptp_rate,
7007d9e6c5aSJose Abreu 				xmac, &sec_inc);
70119d857c9SPhil Reid 		temp = div_u64(1000000000ULL, sec_inc);
702891434b1SRayagond Kokatanur 
7039a8a02c9SJose Abreu 		/* Store sub second increment and flags for later use */
7049a8a02c9SJose Abreu 		priv->sub_second_inc = sec_inc;
7059a8a02c9SJose Abreu 		priv->systime_flags = value;
7069a8a02c9SJose Abreu 
707891434b1SRayagond Kokatanur 		/* calculate default added value:
708891434b1SRayagond Kokatanur 		 * formula is :
709891434b1SRayagond Kokatanur 		 * addend = (2^32)/freq_div_ratio;
71019d857c9SPhil Reid 		 * where, freq_div_ratio = 1e9ns/sec_inc
711891434b1SRayagond Kokatanur 		 */
71219d857c9SPhil Reid 		temp = (u64)(temp << 32);
713f573c0b9Sjpinto 		priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
714cc4c9001SJose Abreu 		stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
715891434b1SRayagond Kokatanur 
716891434b1SRayagond Kokatanur 		/* initialize system time */
7170a624155SArnd Bergmann 		ktime_get_real_ts64(&now);
7180a624155SArnd Bergmann 
7190a624155SArnd Bergmann 		/* lower 32 bits of tv_sec are safe until y2106 */
720cc4c9001SJose Abreu 		stmmac_init_systime(priv, priv->ptpaddr,
721cc4c9001SJose Abreu 				(u32)now.tv_sec, now.tv_nsec);
722891434b1SRayagond Kokatanur 	}
723891434b1SRayagond Kokatanur 
724d6228b7cSArtem Panfilov 	memcpy(&priv->tstamp_config, &config, sizeof(config));
725d6228b7cSArtem Panfilov 
726891434b1SRayagond Kokatanur 	return copy_to_user(ifr->ifr_data, &config,
727d6228b7cSArtem Panfilov 			    sizeof(config)) ? -EFAULT : 0;
728d6228b7cSArtem Panfilov }
729d6228b7cSArtem Panfilov 
730d6228b7cSArtem Panfilov /**
731d6228b7cSArtem Panfilov  *  stmmac_hwtstamp_get - read hardware timestamping.
732d6228b7cSArtem Panfilov  *  @dev: device pointer.
733d6228b7cSArtem Panfilov  *  @ifr: An IOCTL specific structure, that can contain a pointer to
734d6228b7cSArtem Panfilov  *  a proprietary structure used to pass information to the driver.
735d6228b7cSArtem Panfilov  *  Description:
736d6228b7cSArtem Panfilov  *  This function obtain the current hardware timestamping settings
737d6228b7cSArtem Panfilov     as requested.
738d6228b7cSArtem Panfilov  */
739d6228b7cSArtem Panfilov static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
740d6228b7cSArtem Panfilov {
741d6228b7cSArtem Panfilov 	struct stmmac_priv *priv = netdev_priv(dev);
742d6228b7cSArtem Panfilov 	struct hwtstamp_config *config = &priv->tstamp_config;
743d6228b7cSArtem Panfilov 
744d6228b7cSArtem Panfilov 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
745d6228b7cSArtem Panfilov 		return -EOPNOTSUPP;
746d6228b7cSArtem Panfilov 
747d6228b7cSArtem Panfilov 	return copy_to_user(ifr->ifr_data, config,
748d6228b7cSArtem Panfilov 			    sizeof(*config)) ? -EFAULT : 0;
749891434b1SRayagond Kokatanur }
750891434b1SRayagond Kokatanur 
75132ceabcaSGiuseppe CAVALLARO /**
752732fdf0eSGiuseppe CAVALLARO  * stmmac_init_ptp - init PTP
75332ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
754732fdf0eSGiuseppe CAVALLARO  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
75532ceabcaSGiuseppe CAVALLARO  * This is done by looking at the HW cap. register.
756732fdf0eSGiuseppe CAVALLARO  * This function also registers the ptp driver.
75732ceabcaSGiuseppe CAVALLARO  */
75892ba6888SRayagond Kokatanur static int stmmac_init_ptp(struct stmmac_priv *priv)
759891434b1SRayagond Kokatanur {
7607d9e6c5aSJose Abreu 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
7617d9e6c5aSJose Abreu 
76292ba6888SRayagond Kokatanur 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
76392ba6888SRayagond Kokatanur 		return -EOPNOTSUPP;
76492ba6888SRayagond Kokatanur 
765891434b1SRayagond Kokatanur 	priv->adv_ts = 0;
7667d9e6c5aSJose Abreu 	/* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
7677d9e6c5aSJose Abreu 	if (xmac && priv->dma_cap.atime_stamp)
768be9b3174SGiuseppe CAVALLARO 		priv->adv_ts = 1;
769be9b3174SGiuseppe CAVALLARO 	/* Dwmac 3.x core with extend_desc can support adv_ts */
770be9b3174SGiuseppe CAVALLARO 	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
771891434b1SRayagond Kokatanur 		priv->adv_ts = 1;
7727cd01399SVince Bridgers 
773be9b3174SGiuseppe CAVALLARO 	if (priv->dma_cap.time_stamp)
774be9b3174SGiuseppe CAVALLARO 		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
7757cd01399SVince Bridgers 
776be9b3174SGiuseppe CAVALLARO 	if (priv->adv_ts)
777be9b3174SGiuseppe CAVALLARO 		netdev_info(priv->dev,
778be9b3174SGiuseppe CAVALLARO 			    "IEEE 1588-2008 Advanced Timestamp supported\n");
779891434b1SRayagond Kokatanur 
780891434b1SRayagond Kokatanur 	priv->hwts_tx_en = 0;
781891434b1SRayagond Kokatanur 	priv->hwts_rx_en = 0;
78292ba6888SRayagond Kokatanur 
783c30a70d3SGiuseppe CAVALLARO 	stmmac_ptp_register(priv);
784c30a70d3SGiuseppe CAVALLARO 
785c30a70d3SGiuseppe CAVALLARO 	return 0;
78692ba6888SRayagond Kokatanur }
78792ba6888SRayagond Kokatanur 
78892ba6888SRayagond Kokatanur static void stmmac_release_ptp(struct stmmac_priv *priv)
78992ba6888SRayagond Kokatanur {
790f573c0b9Sjpinto 	if (priv->plat->clk_ptp_ref)
791f573c0b9Sjpinto 		clk_disable_unprepare(priv->plat->clk_ptp_ref);
79292ba6888SRayagond Kokatanur 	stmmac_ptp_unregister(priv);
793891434b1SRayagond Kokatanur }
794891434b1SRayagond Kokatanur 
7957ac6653aSJeff Kirsher /**
79629feff39SJoao Pinto  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
79729feff39SJoao Pinto  *  @priv: driver private structure
79829feff39SJoao Pinto  *  Description: It is used for configuring the flow control in all queues
79929feff39SJoao Pinto  */
80029feff39SJoao Pinto static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
80129feff39SJoao Pinto {
80229feff39SJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
80329feff39SJoao Pinto 
804c10d4c82SJose Abreu 	stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
80529feff39SJoao Pinto 			priv->pause, tx_cnt);
80629feff39SJoao Pinto }
80729feff39SJoao Pinto 
808eeef2f6bSJose Abreu static void stmmac_validate(struct phylink_config *config,
809eeef2f6bSJose Abreu 			    unsigned long *supported,
810eeef2f6bSJose Abreu 			    struct phylink_link_state *state)
811eeef2f6bSJose Abreu {
812eeef2f6bSJose Abreu 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
8135b0d7d7dSJose Abreu 	__ETHTOOL_DECLARE_LINK_MODE_MASK(mac_supported) = { 0, };
814eeef2f6bSJose Abreu 	__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
815eeef2f6bSJose Abreu 	int tx_cnt = priv->plat->tx_queues_to_use;
816eeef2f6bSJose Abreu 	int max_speed = priv->plat->max_speed;
817eeef2f6bSJose Abreu 
8185b0d7d7dSJose Abreu 	phylink_set(mac_supported, 10baseT_Half);
8195b0d7d7dSJose Abreu 	phylink_set(mac_supported, 10baseT_Full);
8205b0d7d7dSJose Abreu 	phylink_set(mac_supported, 100baseT_Half);
8215b0d7d7dSJose Abreu 	phylink_set(mac_supported, 100baseT_Full);
822df7699c7SJose Abreu 	phylink_set(mac_supported, 1000baseT_Half);
823df7699c7SJose Abreu 	phylink_set(mac_supported, 1000baseT_Full);
824df7699c7SJose Abreu 	phylink_set(mac_supported, 1000baseKX_Full);
8255b0d7d7dSJose Abreu 
8265b0d7d7dSJose Abreu 	phylink_set(mac_supported, Autoneg);
8275b0d7d7dSJose Abreu 	phylink_set(mac_supported, Pause);
8285b0d7d7dSJose Abreu 	phylink_set(mac_supported, Asym_Pause);
8295b0d7d7dSJose Abreu 	phylink_set_port_modes(mac_supported);
8305b0d7d7dSJose Abreu 
831eeef2f6bSJose Abreu 	/* Cut down 1G if asked to */
832eeef2f6bSJose Abreu 	if ((max_speed > 0) && (max_speed < 1000)) {
833eeef2f6bSJose Abreu 		phylink_set(mask, 1000baseT_Full);
834eeef2f6bSJose Abreu 		phylink_set(mask, 1000baseX_Full);
8355b0d7d7dSJose Abreu 	} else if (priv->plat->has_xgmac) {
836d9da2c87SJose Abreu 		if (!max_speed || (max_speed >= 2500)) {
8375b0d7d7dSJose Abreu 			phylink_set(mac_supported, 2500baseT_Full);
838d9da2c87SJose Abreu 			phylink_set(mac_supported, 2500baseX_Full);
839d9da2c87SJose Abreu 		}
840d9da2c87SJose Abreu 		if (!max_speed || (max_speed >= 5000)) {
8415b0d7d7dSJose Abreu 			phylink_set(mac_supported, 5000baseT_Full);
842d9da2c87SJose Abreu 		}
843d9da2c87SJose Abreu 		if (!max_speed || (max_speed >= 10000)) {
8445b0d7d7dSJose Abreu 			phylink_set(mac_supported, 10000baseSR_Full);
8455b0d7d7dSJose Abreu 			phylink_set(mac_supported, 10000baseLR_Full);
8465b0d7d7dSJose Abreu 			phylink_set(mac_supported, 10000baseER_Full);
8475b0d7d7dSJose Abreu 			phylink_set(mac_supported, 10000baseLRM_Full);
8485b0d7d7dSJose Abreu 			phylink_set(mac_supported, 10000baseT_Full);
8495b0d7d7dSJose Abreu 			phylink_set(mac_supported, 10000baseKX4_Full);
8505b0d7d7dSJose Abreu 			phylink_set(mac_supported, 10000baseKR_Full);
851eeef2f6bSJose Abreu 		}
8528a880936SJose Abreu 		if (!max_speed || (max_speed >= 25000)) {
8538a880936SJose Abreu 			phylink_set(mac_supported, 25000baseCR_Full);
8548a880936SJose Abreu 			phylink_set(mac_supported, 25000baseKR_Full);
8558a880936SJose Abreu 			phylink_set(mac_supported, 25000baseSR_Full);
8568a880936SJose Abreu 		}
8578a880936SJose Abreu 		if (!max_speed || (max_speed >= 40000)) {
8588a880936SJose Abreu 			phylink_set(mac_supported, 40000baseKR4_Full);
8598a880936SJose Abreu 			phylink_set(mac_supported, 40000baseCR4_Full);
8608a880936SJose Abreu 			phylink_set(mac_supported, 40000baseSR4_Full);
8618a880936SJose Abreu 			phylink_set(mac_supported, 40000baseLR4_Full);
8628a880936SJose Abreu 		}
8638a880936SJose Abreu 		if (!max_speed || (max_speed >= 50000)) {
8648a880936SJose Abreu 			phylink_set(mac_supported, 50000baseCR2_Full);
8658a880936SJose Abreu 			phylink_set(mac_supported, 50000baseKR2_Full);
8668a880936SJose Abreu 			phylink_set(mac_supported, 50000baseSR2_Full);
8678a880936SJose Abreu 			phylink_set(mac_supported, 50000baseKR_Full);
8688a880936SJose Abreu 			phylink_set(mac_supported, 50000baseSR_Full);
8698a880936SJose Abreu 			phylink_set(mac_supported, 50000baseCR_Full);
8708a880936SJose Abreu 			phylink_set(mac_supported, 50000baseLR_ER_FR_Full);
8718a880936SJose Abreu 			phylink_set(mac_supported, 50000baseDR_Full);
8728a880936SJose Abreu 		}
8738a880936SJose Abreu 		if (!max_speed || (max_speed >= 100000)) {
8748a880936SJose Abreu 			phylink_set(mac_supported, 100000baseKR4_Full);
8758a880936SJose Abreu 			phylink_set(mac_supported, 100000baseSR4_Full);
8768a880936SJose Abreu 			phylink_set(mac_supported, 100000baseCR4_Full);
8778a880936SJose Abreu 			phylink_set(mac_supported, 100000baseLR4_ER4_Full);
8788a880936SJose Abreu 			phylink_set(mac_supported, 100000baseKR2_Full);
8798a880936SJose Abreu 			phylink_set(mac_supported, 100000baseSR2_Full);
8808a880936SJose Abreu 			phylink_set(mac_supported, 100000baseCR2_Full);
8818a880936SJose Abreu 			phylink_set(mac_supported, 100000baseLR2_ER2_FR2_Full);
8828a880936SJose Abreu 			phylink_set(mac_supported, 100000baseDR2_Full);
8838a880936SJose Abreu 		}
884d9da2c87SJose Abreu 	}
885eeef2f6bSJose Abreu 
886eeef2f6bSJose Abreu 	/* Half-Duplex can only work with single queue */
887eeef2f6bSJose Abreu 	if (tx_cnt > 1) {
888eeef2f6bSJose Abreu 		phylink_set(mask, 10baseT_Half);
889eeef2f6bSJose Abreu 		phylink_set(mask, 100baseT_Half);
890eeef2f6bSJose Abreu 		phylink_set(mask, 1000baseT_Half);
891eeef2f6bSJose Abreu 	}
892eeef2f6bSJose Abreu 
893422829f9SJose Abreu 	linkmode_and(supported, supported, mac_supported);
894422829f9SJose Abreu 	linkmode_andnot(supported, supported, mask);
895422829f9SJose Abreu 
896422829f9SJose Abreu 	linkmode_and(state->advertising, state->advertising, mac_supported);
897422829f9SJose Abreu 	linkmode_andnot(state->advertising, state->advertising, mask);
898f213bbe8SJose Abreu 
899f213bbe8SJose Abreu 	/* If PCS is supported, check which modes it supports. */
900f213bbe8SJose Abreu 	stmmac_xpcs_validate(priv, &priv->hw->xpcs_args, supported, state);
901eeef2f6bSJose Abreu }
902eeef2f6bSJose Abreu 
903d46b7e4fSRussell King static void stmmac_mac_pcs_get_state(struct phylink_config *config,
904eeef2f6bSJose Abreu 				     struct phylink_link_state *state)
905eeef2f6bSJose Abreu {
906f213bbe8SJose Abreu 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
907f213bbe8SJose Abreu 
908d46b7e4fSRussell King 	state->link = 0;
909f213bbe8SJose Abreu 	stmmac_xpcs_get_state(priv, &priv->hw->xpcs_args, state);
910eeef2f6bSJose Abreu }
911eeef2f6bSJose Abreu 
91274371272SJose Abreu static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
91374371272SJose Abreu 			      const struct phylink_link_state *state)
9149ad372fcSJose Abreu {
915f213bbe8SJose Abreu 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
916f213bbe8SJose Abreu 
917f213bbe8SJose Abreu 	stmmac_xpcs_config(priv, &priv->hw->xpcs_args, state);
9189ad372fcSJose Abreu }
9199ad372fcSJose Abreu 
920eeef2f6bSJose Abreu static void stmmac_mac_an_restart(struct phylink_config *config)
921eeef2f6bSJose Abreu {
922eeef2f6bSJose Abreu 	/* Not Supported */
923eeef2f6bSJose Abreu }
924eeef2f6bSJose Abreu 
92574371272SJose Abreu static void stmmac_mac_link_down(struct phylink_config *config,
92674371272SJose Abreu 				 unsigned int mode, phy_interface_t interface)
9279ad372fcSJose Abreu {
92874371272SJose Abreu 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
9299ad372fcSJose Abreu 
9309ad372fcSJose Abreu 	stmmac_mac_set(priv, priv->ioaddr, false);
93174371272SJose Abreu 	priv->eee_active = false;
93274371272SJose Abreu 	stmmac_eee_init(priv);
93374371272SJose Abreu 	stmmac_set_eee_pls(priv, priv->hw, false);
9349ad372fcSJose Abreu }
9359ad372fcSJose Abreu 
93674371272SJose Abreu static void stmmac_mac_link_up(struct phylink_config *config,
93791a208f2SRussell King 			       struct phy_device *phy,
93874371272SJose Abreu 			       unsigned int mode, phy_interface_t interface,
93991a208f2SRussell King 			       int speed, int duplex,
94091a208f2SRussell King 			       bool tx_pause, bool rx_pause)
9419ad372fcSJose Abreu {
94274371272SJose Abreu 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
94346f69dedSJose Abreu 	u32 ctrl;
94446f69dedSJose Abreu 
945f213bbe8SJose Abreu 	stmmac_xpcs_link_up(priv, &priv->hw->xpcs_args, speed, interface);
946f213bbe8SJose Abreu 
94746f69dedSJose Abreu 	ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
94846f69dedSJose Abreu 	ctrl &= ~priv->hw->link.speed_mask;
94946f69dedSJose Abreu 
95046f69dedSJose Abreu 	if (interface == PHY_INTERFACE_MODE_USXGMII) {
95146f69dedSJose Abreu 		switch (speed) {
95246f69dedSJose Abreu 		case SPEED_10000:
95346f69dedSJose Abreu 			ctrl |= priv->hw->link.xgmii.speed10000;
95446f69dedSJose Abreu 			break;
95546f69dedSJose Abreu 		case SPEED_5000:
95646f69dedSJose Abreu 			ctrl |= priv->hw->link.xgmii.speed5000;
95746f69dedSJose Abreu 			break;
95846f69dedSJose Abreu 		case SPEED_2500:
95946f69dedSJose Abreu 			ctrl |= priv->hw->link.xgmii.speed2500;
96046f69dedSJose Abreu 			break;
96146f69dedSJose Abreu 		default:
96246f69dedSJose Abreu 			return;
96346f69dedSJose Abreu 		}
9648a880936SJose Abreu 	} else if (interface == PHY_INTERFACE_MODE_XLGMII) {
9658a880936SJose Abreu 		switch (speed) {
9668a880936SJose Abreu 		case SPEED_100000:
9678a880936SJose Abreu 			ctrl |= priv->hw->link.xlgmii.speed100000;
9688a880936SJose Abreu 			break;
9698a880936SJose Abreu 		case SPEED_50000:
9708a880936SJose Abreu 			ctrl |= priv->hw->link.xlgmii.speed50000;
9718a880936SJose Abreu 			break;
9728a880936SJose Abreu 		case SPEED_40000:
9738a880936SJose Abreu 			ctrl |= priv->hw->link.xlgmii.speed40000;
9748a880936SJose Abreu 			break;
9758a880936SJose Abreu 		case SPEED_25000:
9768a880936SJose Abreu 			ctrl |= priv->hw->link.xlgmii.speed25000;
9778a880936SJose Abreu 			break;
9788a880936SJose Abreu 		case SPEED_10000:
9798a880936SJose Abreu 			ctrl |= priv->hw->link.xgmii.speed10000;
9808a880936SJose Abreu 			break;
9818a880936SJose Abreu 		case SPEED_2500:
9828a880936SJose Abreu 			ctrl |= priv->hw->link.speed2500;
9838a880936SJose Abreu 			break;
9848a880936SJose Abreu 		case SPEED_1000:
9858a880936SJose Abreu 			ctrl |= priv->hw->link.speed1000;
9868a880936SJose Abreu 			break;
9878a880936SJose Abreu 		default:
9888a880936SJose Abreu 			return;
9898a880936SJose Abreu 		}
99046f69dedSJose Abreu 	} else {
99146f69dedSJose Abreu 		switch (speed) {
99246f69dedSJose Abreu 		case SPEED_2500:
99346f69dedSJose Abreu 			ctrl |= priv->hw->link.speed2500;
99446f69dedSJose Abreu 			break;
99546f69dedSJose Abreu 		case SPEED_1000:
99646f69dedSJose Abreu 			ctrl |= priv->hw->link.speed1000;
99746f69dedSJose Abreu 			break;
99846f69dedSJose Abreu 		case SPEED_100:
99946f69dedSJose Abreu 			ctrl |= priv->hw->link.speed100;
100046f69dedSJose Abreu 			break;
100146f69dedSJose Abreu 		case SPEED_10:
100246f69dedSJose Abreu 			ctrl |= priv->hw->link.speed10;
100346f69dedSJose Abreu 			break;
100446f69dedSJose Abreu 		default:
100546f69dedSJose Abreu 			return;
100646f69dedSJose Abreu 		}
100746f69dedSJose Abreu 	}
100846f69dedSJose Abreu 
100946f69dedSJose Abreu 	priv->speed = speed;
101046f69dedSJose Abreu 
101146f69dedSJose Abreu 	if (priv->plat->fix_mac_speed)
101246f69dedSJose Abreu 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed);
101346f69dedSJose Abreu 
101446f69dedSJose Abreu 	if (!duplex)
101546f69dedSJose Abreu 		ctrl &= ~priv->hw->link.duplex;
101646f69dedSJose Abreu 	else
101746f69dedSJose Abreu 		ctrl |= priv->hw->link.duplex;
101846f69dedSJose Abreu 
101946f69dedSJose Abreu 	/* Flow Control operation */
102046f69dedSJose Abreu 	if (tx_pause && rx_pause)
102146f69dedSJose Abreu 		stmmac_mac_flow_ctrl(priv, duplex);
102246f69dedSJose Abreu 
102346f69dedSJose Abreu 	writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
10249ad372fcSJose Abreu 
10259ad372fcSJose Abreu 	stmmac_mac_set(priv, priv->ioaddr, true);
10265b111770SJose Abreu 	if (phy && priv->dma_cap.eee) {
102774371272SJose Abreu 		priv->eee_active = phy_init_eee(phy, 1) >= 0;
102874371272SJose Abreu 		priv->eee_enabled = stmmac_eee_init(priv);
102974371272SJose Abreu 		stmmac_set_eee_pls(priv, priv->hw, true);
103074371272SJose Abreu 	}
10319ad372fcSJose Abreu }
10329ad372fcSJose Abreu 
103374371272SJose Abreu static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1034eeef2f6bSJose Abreu 	.validate = stmmac_validate,
1035d46b7e4fSRussell King 	.mac_pcs_get_state = stmmac_mac_pcs_get_state,
103674371272SJose Abreu 	.mac_config = stmmac_mac_config,
1037eeef2f6bSJose Abreu 	.mac_an_restart = stmmac_mac_an_restart,
103874371272SJose Abreu 	.mac_link_down = stmmac_mac_link_down,
103974371272SJose Abreu 	.mac_link_up = stmmac_mac_link_up,
1040eeef2f6bSJose Abreu };
1041eeef2f6bSJose Abreu 
104229feff39SJoao Pinto /**
1043732fdf0eSGiuseppe CAVALLARO  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
104432ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
104532ceabcaSGiuseppe CAVALLARO  * Description: this is to verify if the HW supports the PCS.
104632ceabcaSGiuseppe CAVALLARO  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
104732ceabcaSGiuseppe CAVALLARO  * configured for the TBI, RTBI, or SGMII PHY interface.
104832ceabcaSGiuseppe CAVALLARO  */
1049e58bb43fSGiuseppe CAVALLARO static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1050e58bb43fSGiuseppe CAVALLARO {
1051e58bb43fSGiuseppe CAVALLARO 	int interface = priv->plat->interface;
1052e58bb43fSGiuseppe CAVALLARO 
1053e58bb43fSGiuseppe CAVALLARO 	if (priv->dma_cap.pcs) {
10540d909dcdSByungho An 		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
10550d909dcdSByungho An 		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
10560d909dcdSByungho An 		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
10570d909dcdSByungho An 		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
105838ddc59dSLABBE Corentin 			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
10593fe5cadbSGiuseppe CAVALLARO 			priv->hw->pcs = STMMAC_PCS_RGMII;
10600d909dcdSByungho An 		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
106138ddc59dSLABBE Corentin 			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
10623fe5cadbSGiuseppe CAVALLARO 			priv->hw->pcs = STMMAC_PCS_SGMII;
1063e58bb43fSGiuseppe CAVALLARO 		}
1064e58bb43fSGiuseppe CAVALLARO 	}
1065e58bb43fSGiuseppe CAVALLARO }
1066e58bb43fSGiuseppe CAVALLARO 
10677ac6653aSJeff Kirsher /**
10687ac6653aSJeff Kirsher  * stmmac_init_phy - PHY initialization
10697ac6653aSJeff Kirsher  * @dev: net device structure
10707ac6653aSJeff Kirsher  * Description: it initializes the driver's PHY state, and attaches the PHY
10717ac6653aSJeff Kirsher  * to the mac driver.
10727ac6653aSJeff Kirsher  *  Return value:
10737ac6653aSJeff Kirsher  *  0 on success
10747ac6653aSJeff Kirsher  */
10757ac6653aSJeff Kirsher static int stmmac_init_phy(struct net_device *dev)
10767ac6653aSJeff Kirsher {
10777ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
107874371272SJose Abreu 	struct device_node *node;
107974371272SJose Abreu 	int ret;
10807ac6653aSJeff Kirsher 
10814838a540SJose Abreu 	node = priv->plat->phylink_node;
108274371272SJose Abreu 
108342e87024SJose Abreu 	if (node)
108474371272SJose Abreu 		ret = phylink_of_phy_connect(priv->phylink, node, 0);
108542e87024SJose Abreu 
108642e87024SJose Abreu 	/* Some DT bindings do not set-up the PHY handle. Let's try to
108742e87024SJose Abreu 	 * manually parse it
108842e87024SJose Abreu 	 */
108942e87024SJose Abreu 	if (!node || ret) {
109074371272SJose Abreu 		int addr = priv->plat->phy_addr;
109174371272SJose Abreu 		struct phy_device *phydev;
1092f142af2eSSrinivas Kandagatla 
109374371272SJose Abreu 		phydev = mdiobus_get_phy(priv->mii, addr);
109474371272SJose Abreu 		if (!phydev) {
109574371272SJose Abreu 			netdev_err(priv->dev, "no phy at addr %d\n", addr);
10967ac6653aSJeff Kirsher 			return -ENODEV;
10977ac6653aSJeff Kirsher 		}
10988e99fc5fSGiuseppe Cavallaro 
109974371272SJose Abreu 		ret = phylink_connect_phy(priv->phylink, phydev);
110074371272SJose Abreu 	}
1101c51e424dSFlorian Fainelli 
110274371272SJose Abreu 	return ret;
110374371272SJose Abreu }
110474371272SJose Abreu 
110574371272SJose Abreu static int stmmac_phy_setup(struct stmmac_priv *priv)
110674371272SJose Abreu {
1107c63d1e5cSArnd Bergmann 	struct fwnode_handle *fwnode = of_fwnode_handle(priv->plat->phylink_node);
11080060c878SAlexandru Ardelean 	int mode = priv->plat->phy_interface;
110974371272SJose Abreu 	struct phylink *phylink;
111074371272SJose Abreu 
111174371272SJose Abreu 	priv->phylink_config.dev = &priv->dev->dev;
111274371272SJose Abreu 	priv->phylink_config.type = PHYLINK_NETDEV;
1113f213bbe8SJose Abreu 	priv->phylink_config.pcs_poll = true;
111474371272SJose Abreu 
11158dc6051cSJose Abreu 	if (!fwnode)
11168dc6051cSJose Abreu 		fwnode = dev_fwnode(priv->device);
11178dc6051cSJose Abreu 
1118c63d1e5cSArnd Bergmann 	phylink = phylink_create(&priv->phylink_config, fwnode,
111974371272SJose Abreu 				 mode, &stmmac_phylink_mac_ops);
112074371272SJose Abreu 	if (IS_ERR(phylink))
112174371272SJose Abreu 		return PTR_ERR(phylink);
112274371272SJose Abreu 
112374371272SJose Abreu 	priv->phylink = phylink;
11247ac6653aSJeff Kirsher 	return 0;
11257ac6653aSJeff Kirsher }
11267ac6653aSJeff Kirsher 
112771fedb01SJoao Pinto static void stmmac_display_rx_rings(struct stmmac_priv *priv)
1128c24602efSGiuseppe CAVALLARO {
112954139cf3SJoao Pinto 	u32 rx_cnt = priv->plat->rx_queues_to_use;
113071fedb01SJoao Pinto 	void *head_rx;
113154139cf3SJoao Pinto 	u32 queue;
113254139cf3SJoao Pinto 
113354139cf3SJoao Pinto 	/* Display RX rings */
113454139cf3SJoao Pinto 	for (queue = 0; queue < rx_cnt; queue++) {
113554139cf3SJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
113654139cf3SJoao Pinto 
113754139cf3SJoao Pinto 		pr_info("\tRX Queue %u rings\n", queue);
1138d0225e7dSAlexandre TORGUE 
113971fedb01SJoao Pinto 		if (priv->extend_desc)
114054139cf3SJoao Pinto 			head_rx = (void *)rx_q->dma_erx;
114171fedb01SJoao Pinto 		else
114254139cf3SJoao Pinto 			head_rx = (void *)rx_q->dma_rx;
114371fedb01SJoao Pinto 
114471fedb01SJoao Pinto 		/* Display RX ring */
114542de047dSJose Abreu 		stmmac_display_ring(priv, head_rx, DMA_RX_SIZE, true);
11465bacd778SLABBE Corentin 	}
114754139cf3SJoao Pinto }
1148d0225e7dSAlexandre TORGUE 
114971fedb01SJoao Pinto static void stmmac_display_tx_rings(struct stmmac_priv *priv)
115071fedb01SJoao Pinto {
1151ce736788SJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
115271fedb01SJoao Pinto 	void *head_tx;
1153ce736788SJoao Pinto 	u32 queue;
1154ce736788SJoao Pinto 
1155ce736788SJoao Pinto 	/* Display TX rings */
1156ce736788SJoao Pinto 	for (queue = 0; queue < tx_cnt; queue++) {
1157ce736788SJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1158ce736788SJoao Pinto 
1159ce736788SJoao Pinto 		pr_info("\tTX Queue %d rings\n", queue);
116071fedb01SJoao Pinto 
116171fedb01SJoao Pinto 		if (priv->extend_desc)
1162ce736788SJoao Pinto 			head_tx = (void *)tx_q->dma_etx;
1163579a25a8SJose Abreu 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1164579a25a8SJose Abreu 			head_tx = (void *)tx_q->dma_entx;
116571fedb01SJoao Pinto 		else
1166ce736788SJoao Pinto 			head_tx = (void *)tx_q->dma_tx;
116771fedb01SJoao Pinto 
116842de047dSJose Abreu 		stmmac_display_ring(priv, head_tx, DMA_TX_SIZE, false);
1169c24602efSGiuseppe CAVALLARO 	}
1170ce736788SJoao Pinto }
1171c24602efSGiuseppe CAVALLARO 
117271fedb01SJoao Pinto static void stmmac_display_rings(struct stmmac_priv *priv)
117371fedb01SJoao Pinto {
117471fedb01SJoao Pinto 	/* Display RX ring */
117571fedb01SJoao Pinto 	stmmac_display_rx_rings(priv);
117671fedb01SJoao Pinto 
117771fedb01SJoao Pinto 	/* Display TX ring */
117871fedb01SJoao Pinto 	stmmac_display_tx_rings(priv);
117971fedb01SJoao Pinto }
118071fedb01SJoao Pinto 
1181286a8372SGiuseppe CAVALLARO static int stmmac_set_bfsize(int mtu, int bufsize)
1182286a8372SGiuseppe CAVALLARO {
1183286a8372SGiuseppe CAVALLARO 	int ret = bufsize;
1184286a8372SGiuseppe CAVALLARO 
1185b2f3a481SJose Abreu 	if (mtu >= BUF_SIZE_8KiB)
1186b2f3a481SJose Abreu 		ret = BUF_SIZE_16KiB;
1187b2f3a481SJose Abreu 	else if (mtu >= BUF_SIZE_4KiB)
1188286a8372SGiuseppe CAVALLARO 		ret = BUF_SIZE_8KiB;
1189286a8372SGiuseppe CAVALLARO 	else if (mtu >= BUF_SIZE_2KiB)
1190286a8372SGiuseppe CAVALLARO 		ret = BUF_SIZE_4KiB;
1191d916701cSGiuseppe CAVALLARO 	else if (mtu > DEFAULT_BUFSIZE)
1192286a8372SGiuseppe CAVALLARO 		ret = BUF_SIZE_2KiB;
1193286a8372SGiuseppe CAVALLARO 	else
1194d916701cSGiuseppe CAVALLARO 		ret = DEFAULT_BUFSIZE;
1195286a8372SGiuseppe CAVALLARO 
1196286a8372SGiuseppe CAVALLARO 	return ret;
1197286a8372SGiuseppe CAVALLARO }
1198286a8372SGiuseppe CAVALLARO 
119932ceabcaSGiuseppe CAVALLARO /**
120071fedb01SJoao Pinto  * stmmac_clear_rx_descriptors - clear RX descriptors
120132ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
120254139cf3SJoao Pinto  * @queue: RX queue index
120371fedb01SJoao Pinto  * Description: this function is called to clear the RX descriptors
120432ceabcaSGiuseppe CAVALLARO  * in case of both basic and extended descriptors are used.
120532ceabcaSGiuseppe CAVALLARO  */
120654139cf3SJoao Pinto static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
1207c24602efSGiuseppe CAVALLARO {
120854139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
12095bacd778SLABBE Corentin 	int i;
1210c24602efSGiuseppe CAVALLARO 
121171fedb01SJoao Pinto 	/* Clear the RX descriptors */
12125bacd778SLABBE Corentin 	for (i = 0; i < DMA_RX_SIZE; i++)
12135bacd778SLABBE Corentin 		if (priv->extend_desc)
121442de047dSJose Abreu 			stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
12155bacd778SLABBE Corentin 					priv->use_riwt, priv->mode,
1216583e6361SAaro Koskinen 					(i == DMA_RX_SIZE - 1),
1217583e6361SAaro Koskinen 					priv->dma_buf_sz);
12185bacd778SLABBE Corentin 		else
121942de047dSJose Abreu 			stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
12205bacd778SLABBE Corentin 					priv->use_riwt, priv->mode,
1221583e6361SAaro Koskinen 					(i == DMA_RX_SIZE - 1),
1222583e6361SAaro Koskinen 					priv->dma_buf_sz);
122371fedb01SJoao Pinto }
122471fedb01SJoao Pinto 
122571fedb01SJoao Pinto /**
122671fedb01SJoao Pinto  * stmmac_clear_tx_descriptors - clear tx descriptors
122771fedb01SJoao Pinto  * @priv: driver private structure
1228ce736788SJoao Pinto  * @queue: TX queue index.
122971fedb01SJoao Pinto  * Description: this function is called to clear the TX descriptors
123071fedb01SJoao Pinto  * in case of both basic and extended descriptors are used.
123171fedb01SJoao Pinto  */
1232ce736788SJoao Pinto static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
123371fedb01SJoao Pinto {
1234ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
123571fedb01SJoao Pinto 	int i;
123671fedb01SJoao Pinto 
123771fedb01SJoao Pinto 	/* Clear the TX descriptors */
1238579a25a8SJose Abreu 	for (i = 0; i < DMA_TX_SIZE; i++) {
1239579a25a8SJose Abreu 		int last = (i == (DMA_TX_SIZE - 1));
1240579a25a8SJose Abreu 		struct dma_desc *p;
1241579a25a8SJose Abreu 
12425bacd778SLABBE Corentin 		if (priv->extend_desc)
1243579a25a8SJose Abreu 			p = &tx_q->dma_etx[i].basic;
1244579a25a8SJose Abreu 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1245579a25a8SJose Abreu 			p = &tx_q->dma_entx[i].basic;
12465bacd778SLABBE Corentin 		else
1247579a25a8SJose Abreu 			p = &tx_q->dma_tx[i];
1248579a25a8SJose Abreu 
1249579a25a8SJose Abreu 		stmmac_init_tx_desc(priv, p, priv->mode, last);
1250579a25a8SJose Abreu 	}
1251c24602efSGiuseppe CAVALLARO }
1252c24602efSGiuseppe CAVALLARO 
1253732fdf0eSGiuseppe CAVALLARO /**
125471fedb01SJoao Pinto  * stmmac_clear_descriptors - clear descriptors
125571fedb01SJoao Pinto  * @priv: driver private structure
125671fedb01SJoao Pinto  * Description: this function is called to clear the TX and RX descriptors
125771fedb01SJoao Pinto  * in case of both basic and extended descriptors are used.
125871fedb01SJoao Pinto  */
125971fedb01SJoao Pinto static void stmmac_clear_descriptors(struct stmmac_priv *priv)
126071fedb01SJoao Pinto {
126154139cf3SJoao Pinto 	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1262ce736788SJoao Pinto 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
126354139cf3SJoao Pinto 	u32 queue;
126454139cf3SJoao Pinto 
126571fedb01SJoao Pinto 	/* Clear the RX descriptors */
126654139cf3SJoao Pinto 	for (queue = 0; queue < rx_queue_cnt; queue++)
126754139cf3SJoao Pinto 		stmmac_clear_rx_descriptors(priv, queue);
126871fedb01SJoao Pinto 
126971fedb01SJoao Pinto 	/* Clear the TX descriptors */
1270ce736788SJoao Pinto 	for (queue = 0; queue < tx_queue_cnt; queue++)
1271ce736788SJoao Pinto 		stmmac_clear_tx_descriptors(priv, queue);
127271fedb01SJoao Pinto }
127371fedb01SJoao Pinto 
127471fedb01SJoao Pinto /**
1275732fdf0eSGiuseppe CAVALLARO  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1276732fdf0eSGiuseppe CAVALLARO  * @priv: driver private structure
1277732fdf0eSGiuseppe CAVALLARO  * @p: descriptor pointer
1278732fdf0eSGiuseppe CAVALLARO  * @i: descriptor index
127954139cf3SJoao Pinto  * @flags: gfp flag
128054139cf3SJoao Pinto  * @queue: RX queue index
1281732fdf0eSGiuseppe CAVALLARO  * Description: this function is called to allocate a receive buffer, perform
1282732fdf0eSGiuseppe CAVALLARO  * the DMA mapping and init the descriptor.
1283732fdf0eSGiuseppe CAVALLARO  */
1284c24602efSGiuseppe CAVALLARO static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
128554139cf3SJoao Pinto 				  int i, gfp_t flags, u32 queue)
1286c24602efSGiuseppe CAVALLARO {
128754139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
12882af6106aSJose Abreu 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1289c24602efSGiuseppe CAVALLARO 
12902af6106aSJose Abreu 	buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
12912af6106aSJose Abreu 	if (!buf->page)
129256329137SBartlomiej Zolnierkiewicz 		return -ENOMEM;
1293c24602efSGiuseppe CAVALLARO 
129467afd6d1SJose Abreu 	if (priv->sph) {
129567afd6d1SJose Abreu 		buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
129667afd6d1SJose Abreu 		if (!buf->sec_page)
129767afd6d1SJose Abreu 			return -ENOMEM;
129867afd6d1SJose Abreu 
129967afd6d1SJose Abreu 		buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
130067afd6d1SJose Abreu 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr);
130167afd6d1SJose Abreu 	} else {
130267afd6d1SJose Abreu 		buf->sec_page = NULL;
130367afd6d1SJose Abreu 	}
130467afd6d1SJose Abreu 
13052af6106aSJose Abreu 	buf->addr = page_pool_get_dma_addr(buf->page);
13062af6106aSJose Abreu 	stmmac_set_desc_addr(priv, p, buf->addr);
13072c520b1cSJose Abreu 	if (priv->dma_buf_sz == BUF_SIZE_16KiB)
13082c520b1cSJose Abreu 		stmmac_init_desc3(priv, p);
1309c24602efSGiuseppe CAVALLARO 
1310c24602efSGiuseppe CAVALLARO 	return 0;
1311c24602efSGiuseppe CAVALLARO }
1312c24602efSGiuseppe CAVALLARO 
131371fedb01SJoao Pinto /**
131471fedb01SJoao Pinto  * stmmac_free_rx_buffer - free RX dma buffers
131571fedb01SJoao Pinto  * @priv: private structure
131654139cf3SJoao Pinto  * @queue: RX queue index
131771fedb01SJoao Pinto  * @i: buffer index.
131871fedb01SJoao Pinto  */
131954139cf3SJoao Pinto static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
132056329137SBartlomiej Zolnierkiewicz {
132154139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
13222af6106aSJose Abreu 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
132354139cf3SJoao Pinto 
13242af6106aSJose Abreu 	if (buf->page)
1325458de8a9SIlias Apalodimas 		page_pool_put_full_page(rx_q->page_pool, buf->page, false);
13262af6106aSJose Abreu 	buf->page = NULL;
132767afd6d1SJose Abreu 
132867afd6d1SJose Abreu 	if (buf->sec_page)
1329458de8a9SIlias Apalodimas 		page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
133067afd6d1SJose Abreu 	buf->sec_page = NULL;
133156329137SBartlomiej Zolnierkiewicz }
133256329137SBartlomiej Zolnierkiewicz 
13337ac6653aSJeff Kirsher /**
133471fedb01SJoao Pinto  * stmmac_free_tx_buffer - free RX dma buffers
133571fedb01SJoao Pinto  * @priv: private structure
1336ce736788SJoao Pinto  * @queue: RX queue index
133771fedb01SJoao Pinto  * @i: buffer index.
133871fedb01SJoao Pinto  */
1339ce736788SJoao Pinto static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
134071fedb01SJoao Pinto {
1341ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1342ce736788SJoao Pinto 
1343ce736788SJoao Pinto 	if (tx_q->tx_skbuff_dma[i].buf) {
1344ce736788SJoao Pinto 		if (tx_q->tx_skbuff_dma[i].map_as_page)
134571fedb01SJoao Pinto 			dma_unmap_page(priv->device,
1346ce736788SJoao Pinto 				       tx_q->tx_skbuff_dma[i].buf,
1347ce736788SJoao Pinto 				       tx_q->tx_skbuff_dma[i].len,
134871fedb01SJoao Pinto 				       DMA_TO_DEVICE);
134971fedb01SJoao Pinto 		else
135071fedb01SJoao Pinto 			dma_unmap_single(priv->device,
1351ce736788SJoao Pinto 					 tx_q->tx_skbuff_dma[i].buf,
1352ce736788SJoao Pinto 					 tx_q->tx_skbuff_dma[i].len,
135371fedb01SJoao Pinto 					 DMA_TO_DEVICE);
135471fedb01SJoao Pinto 	}
135571fedb01SJoao Pinto 
1356ce736788SJoao Pinto 	if (tx_q->tx_skbuff[i]) {
1357ce736788SJoao Pinto 		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1358ce736788SJoao Pinto 		tx_q->tx_skbuff[i] = NULL;
1359ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[i].buf = 0;
1360ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[i].map_as_page = false;
136171fedb01SJoao Pinto 	}
136271fedb01SJoao Pinto }
136371fedb01SJoao Pinto 
136471fedb01SJoao Pinto /**
136571fedb01SJoao Pinto  * init_dma_rx_desc_rings - init the RX descriptor rings
13667ac6653aSJeff Kirsher  * @dev: net device structure
13675bacd778SLABBE Corentin  * @flags: gfp flag.
136871fedb01SJoao Pinto  * Description: this function initializes the DMA RX descriptors
13695bacd778SLABBE Corentin  * and allocates the socket buffers. It supports the chained and ring
1370286a8372SGiuseppe CAVALLARO  * modes.
13717ac6653aSJeff Kirsher  */
137271fedb01SJoao Pinto static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
13737ac6653aSJeff Kirsher {
13747ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
137554139cf3SJoao Pinto 	u32 rx_count = priv->plat->rx_queues_to_use;
13765bacd778SLABBE Corentin 	int ret = -ENOMEM;
13771d3028f4SColin Ian King 	int queue;
137854139cf3SJoao Pinto 	int i;
13797ac6653aSJeff Kirsher 
138054139cf3SJoao Pinto 	/* RX INITIALIZATION */
13815bacd778SLABBE Corentin 	netif_dbg(priv, probe, priv->dev,
13825bacd778SLABBE Corentin 		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
13835bacd778SLABBE Corentin 
138454139cf3SJoao Pinto 	for (queue = 0; queue < rx_count; queue++) {
138554139cf3SJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
138654139cf3SJoao Pinto 
138754139cf3SJoao Pinto 		netif_dbg(priv, probe, priv->dev,
138854139cf3SJoao Pinto 			  "(%s) dma_rx_phy=0x%08x\n", __func__,
138954139cf3SJoao Pinto 			  (u32)rx_q->dma_rx_phy);
139054139cf3SJoao Pinto 
1391cbcf0999SJose Abreu 		stmmac_clear_rx_descriptors(priv, queue);
1392cbcf0999SJose Abreu 
13935bacd778SLABBE Corentin 		for (i = 0; i < DMA_RX_SIZE; i++) {
13945bacd778SLABBE Corentin 			struct dma_desc *p;
13955bacd778SLABBE Corentin 
139654139cf3SJoao Pinto 			if (priv->extend_desc)
139754139cf3SJoao Pinto 				p = &((rx_q->dma_erx + i)->basic);
139854139cf3SJoao Pinto 			else
139954139cf3SJoao Pinto 				p = rx_q->dma_rx + i;
140054139cf3SJoao Pinto 
140154139cf3SJoao Pinto 			ret = stmmac_init_rx_buffers(priv, p, i, flags,
140254139cf3SJoao Pinto 						     queue);
14035bacd778SLABBE Corentin 			if (ret)
14045bacd778SLABBE Corentin 				goto err_init_rx_buffers;
14055bacd778SLABBE Corentin 		}
140654139cf3SJoao Pinto 
140754139cf3SJoao Pinto 		rx_q->cur_rx = 0;
140854139cf3SJoao Pinto 		rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
140954139cf3SJoao Pinto 
1410c24602efSGiuseppe CAVALLARO 		/* Setup the chained descriptor addresses */
1411c24602efSGiuseppe CAVALLARO 		if (priv->mode == STMMAC_CHAIN_MODE) {
141271fedb01SJoao Pinto 			if (priv->extend_desc)
14132c520b1cSJose Abreu 				stmmac_mode_init(priv, rx_q->dma_erx,
14142c520b1cSJose Abreu 						rx_q->dma_rx_phy, DMA_RX_SIZE, 1);
141571fedb01SJoao Pinto 			else
14162c520b1cSJose Abreu 				stmmac_mode_init(priv, rx_q->dma_rx,
14172c520b1cSJose Abreu 						rx_q->dma_rx_phy, DMA_RX_SIZE, 0);
141871fedb01SJoao Pinto 		}
141954139cf3SJoao Pinto 	}
142054139cf3SJoao Pinto 
142171fedb01SJoao Pinto 	return 0;
142254139cf3SJoao Pinto 
142371fedb01SJoao Pinto err_init_rx_buffers:
142454139cf3SJoao Pinto 	while (queue >= 0) {
142571fedb01SJoao Pinto 		while (--i >= 0)
142654139cf3SJoao Pinto 			stmmac_free_rx_buffer(priv, queue, i);
142754139cf3SJoao Pinto 
142854139cf3SJoao Pinto 		if (queue == 0)
142954139cf3SJoao Pinto 			break;
143054139cf3SJoao Pinto 
143154139cf3SJoao Pinto 		i = DMA_RX_SIZE;
143254139cf3SJoao Pinto 		queue--;
143354139cf3SJoao Pinto 	}
143454139cf3SJoao Pinto 
143571fedb01SJoao Pinto 	return ret;
143671fedb01SJoao Pinto }
143771fedb01SJoao Pinto 
143871fedb01SJoao Pinto /**
143971fedb01SJoao Pinto  * init_dma_tx_desc_rings - init the TX descriptor rings
144071fedb01SJoao Pinto  * @dev: net device structure.
144171fedb01SJoao Pinto  * Description: this function initializes the DMA TX descriptors
144271fedb01SJoao Pinto  * and allocates the socket buffers. It supports the chained and ring
144371fedb01SJoao Pinto  * modes.
144471fedb01SJoao Pinto  */
144571fedb01SJoao Pinto static int init_dma_tx_desc_rings(struct net_device *dev)
144671fedb01SJoao Pinto {
144771fedb01SJoao Pinto 	struct stmmac_priv *priv = netdev_priv(dev);
1448ce736788SJoao Pinto 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1449ce736788SJoao Pinto 	u32 queue;
145071fedb01SJoao Pinto 	int i;
145171fedb01SJoao Pinto 
1452ce736788SJoao Pinto 	for (queue = 0; queue < tx_queue_cnt; queue++) {
1453ce736788SJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1454ce736788SJoao Pinto 
145571fedb01SJoao Pinto 		netif_dbg(priv, probe, priv->dev,
1456ce736788SJoao Pinto 			  "(%s) dma_tx_phy=0x%08x\n", __func__,
1457ce736788SJoao Pinto 			 (u32)tx_q->dma_tx_phy);
145871fedb01SJoao Pinto 
145971fedb01SJoao Pinto 		/* Setup the chained descriptor addresses */
146071fedb01SJoao Pinto 		if (priv->mode == STMMAC_CHAIN_MODE) {
146171fedb01SJoao Pinto 			if (priv->extend_desc)
14622c520b1cSJose Abreu 				stmmac_mode_init(priv, tx_q->dma_etx,
14632c520b1cSJose Abreu 						tx_q->dma_tx_phy, DMA_TX_SIZE, 1);
1464579a25a8SJose Abreu 			else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
14652c520b1cSJose Abreu 				stmmac_mode_init(priv, tx_q->dma_tx,
14662c520b1cSJose Abreu 						tx_q->dma_tx_phy, DMA_TX_SIZE, 0);
1467c24602efSGiuseppe CAVALLARO 		}
1468286a8372SGiuseppe CAVALLARO 
1469e3ad57c9SGiuseppe Cavallaro 		for (i = 0; i < DMA_TX_SIZE; i++) {
1470c24602efSGiuseppe CAVALLARO 			struct dma_desc *p;
1471c24602efSGiuseppe CAVALLARO 			if (priv->extend_desc)
1472ce736788SJoao Pinto 				p = &((tx_q->dma_etx + i)->basic);
1473579a25a8SJose Abreu 			else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1474579a25a8SJose Abreu 				p = &((tx_q->dma_entx + i)->basic);
1475c24602efSGiuseppe CAVALLARO 			else
1476ce736788SJoao Pinto 				p = tx_q->dma_tx + i;
1477f748be53SAlexandre TORGUE 
147844c67f85SJose Abreu 			stmmac_clear_desc(priv, p);
1479f748be53SAlexandre TORGUE 
1480ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[i].buf = 0;
1481ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[i].map_as_page = false;
1482ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[i].len = 0;
1483ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[i].last_segment = false;
1484ce736788SJoao Pinto 			tx_q->tx_skbuff[i] = NULL;
14854a7d666aSGiuseppe CAVALLARO 		}
1486c24602efSGiuseppe CAVALLARO 
1487ce736788SJoao Pinto 		tx_q->dirty_tx = 0;
1488ce736788SJoao Pinto 		tx_q->cur_tx = 0;
14898d212a9eSNiklas Cassel 		tx_q->mss = 0;
1490ce736788SJoao Pinto 
1491c22a3f48SJoao Pinto 		netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
1492c22a3f48SJoao Pinto 	}
14937ac6653aSJeff Kirsher 
149471fedb01SJoao Pinto 	return 0;
149571fedb01SJoao Pinto }
149671fedb01SJoao Pinto 
149771fedb01SJoao Pinto /**
149871fedb01SJoao Pinto  * init_dma_desc_rings - init the RX/TX descriptor rings
149971fedb01SJoao Pinto  * @dev: net device structure
150071fedb01SJoao Pinto  * @flags: gfp flag.
150171fedb01SJoao Pinto  * Description: this function initializes the DMA RX/TX descriptors
150271fedb01SJoao Pinto  * and allocates the socket buffers. It supports the chained and ring
150371fedb01SJoao Pinto  * modes.
150471fedb01SJoao Pinto  */
150571fedb01SJoao Pinto static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
150671fedb01SJoao Pinto {
150771fedb01SJoao Pinto 	struct stmmac_priv *priv = netdev_priv(dev);
150871fedb01SJoao Pinto 	int ret;
150971fedb01SJoao Pinto 
151071fedb01SJoao Pinto 	ret = init_dma_rx_desc_rings(dev, flags);
151171fedb01SJoao Pinto 	if (ret)
151271fedb01SJoao Pinto 		return ret;
151371fedb01SJoao Pinto 
151471fedb01SJoao Pinto 	ret = init_dma_tx_desc_rings(dev);
151571fedb01SJoao Pinto 
15165bacd778SLABBE Corentin 	stmmac_clear_descriptors(priv);
15177ac6653aSJeff Kirsher 
1518c24602efSGiuseppe CAVALLARO 	if (netif_msg_hw(priv))
1519c24602efSGiuseppe CAVALLARO 		stmmac_display_rings(priv);
152056329137SBartlomiej Zolnierkiewicz 
152156329137SBartlomiej Zolnierkiewicz 	return ret;
15227ac6653aSJeff Kirsher }
15237ac6653aSJeff Kirsher 
152471fedb01SJoao Pinto /**
152571fedb01SJoao Pinto  * dma_free_rx_skbufs - free RX dma buffers
152671fedb01SJoao Pinto  * @priv: private structure
152754139cf3SJoao Pinto  * @queue: RX queue index
152871fedb01SJoao Pinto  */
152954139cf3SJoao Pinto static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
15307ac6653aSJeff Kirsher {
15317ac6653aSJeff Kirsher 	int i;
15327ac6653aSJeff Kirsher 
1533e3ad57c9SGiuseppe Cavallaro 	for (i = 0; i < DMA_RX_SIZE; i++)
153454139cf3SJoao Pinto 		stmmac_free_rx_buffer(priv, queue, i);
15357ac6653aSJeff Kirsher }
15367ac6653aSJeff Kirsher 
153771fedb01SJoao Pinto /**
153871fedb01SJoao Pinto  * dma_free_tx_skbufs - free TX dma buffers
153971fedb01SJoao Pinto  * @priv: private structure
1540ce736788SJoao Pinto  * @queue: TX queue index
154171fedb01SJoao Pinto  */
1542ce736788SJoao Pinto static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
15437ac6653aSJeff Kirsher {
15447ac6653aSJeff Kirsher 	int i;
15457ac6653aSJeff Kirsher 
154671fedb01SJoao Pinto 	for (i = 0; i < DMA_TX_SIZE; i++)
1547ce736788SJoao Pinto 		stmmac_free_tx_buffer(priv, queue, i);
15487ac6653aSJeff Kirsher }
15497ac6653aSJeff Kirsher 
1550732fdf0eSGiuseppe CAVALLARO /**
155154139cf3SJoao Pinto  * free_dma_rx_desc_resources - free RX dma desc resources
155254139cf3SJoao Pinto  * @priv: private structure
155354139cf3SJoao Pinto  */
155454139cf3SJoao Pinto static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
155554139cf3SJoao Pinto {
155654139cf3SJoao Pinto 	u32 rx_count = priv->plat->rx_queues_to_use;
155754139cf3SJoao Pinto 	u32 queue;
155854139cf3SJoao Pinto 
155954139cf3SJoao Pinto 	/* Free RX queue resources */
156054139cf3SJoao Pinto 	for (queue = 0; queue < rx_count; queue++) {
156154139cf3SJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
156254139cf3SJoao Pinto 
156354139cf3SJoao Pinto 		/* Release the DMA RX socket buffers */
156454139cf3SJoao Pinto 		dma_free_rx_skbufs(priv, queue);
156554139cf3SJoao Pinto 
156654139cf3SJoao Pinto 		/* Free DMA regions of consistent memory previously allocated */
156754139cf3SJoao Pinto 		if (!priv->extend_desc)
156854139cf3SJoao Pinto 			dma_free_coherent(priv->device,
156954139cf3SJoao Pinto 					  DMA_RX_SIZE * sizeof(struct dma_desc),
157054139cf3SJoao Pinto 					  rx_q->dma_rx, rx_q->dma_rx_phy);
157154139cf3SJoao Pinto 		else
157254139cf3SJoao Pinto 			dma_free_coherent(priv->device, DMA_RX_SIZE *
157354139cf3SJoao Pinto 					  sizeof(struct dma_extended_desc),
157454139cf3SJoao Pinto 					  rx_q->dma_erx, rx_q->dma_rx_phy);
157554139cf3SJoao Pinto 
15762af6106aSJose Abreu 		kfree(rx_q->buf_pool);
1577c3f812ceSJonathan Lemon 		if (rx_q->page_pool)
15782af6106aSJose Abreu 			page_pool_destroy(rx_q->page_pool);
15792af6106aSJose Abreu 	}
158054139cf3SJoao Pinto }
158154139cf3SJoao Pinto 
158254139cf3SJoao Pinto /**
1583ce736788SJoao Pinto  * free_dma_tx_desc_resources - free TX dma desc resources
1584ce736788SJoao Pinto  * @priv: private structure
1585ce736788SJoao Pinto  */
1586ce736788SJoao Pinto static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
1587ce736788SJoao Pinto {
1588ce736788SJoao Pinto 	u32 tx_count = priv->plat->tx_queues_to_use;
158962242260SChristophe Jaillet 	u32 queue;
1590ce736788SJoao Pinto 
1591ce736788SJoao Pinto 	/* Free TX queue resources */
1592ce736788SJoao Pinto 	for (queue = 0; queue < tx_count; queue++) {
1593ce736788SJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1594579a25a8SJose Abreu 		size_t size;
1595579a25a8SJose Abreu 		void *addr;
1596ce736788SJoao Pinto 
1597ce736788SJoao Pinto 		/* Release the DMA TX socket buffers */
1598ce736788SJoao Pinto 		dma_free_tx_skbufs(priv, queue);
1599ce736788SJoao Pinto 
1600579a25a8SJose Abreu 		if (priv->extend_desc) {
1601579a25a8SJose Abreu 			size = sizeof(struct dma_extended_desc);
1602579a25a8SJose Abreu 			addr = tx_q->dma_etx;
1603579a25a8SJose Abreu 		} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1604579a25a8SJose Abreu 			size = sizeof(struct dma_edesc);
1605579a25a8SJose Abreu 			addr = tx_q->dma_entx;
1606579a25a8SJose Abreu 		} else {
1607579a25a8SJose Abreu 			size = sizeof(struct dma_desc);
1608579a25a8SJose Abreu 			addr = tx_q->dma_tx;
1609579a25a8SJose Abreu 		}
1610579a25a8SJose Abreu 
1611579a25a8SJose Abreu 		size *= DMA_TX_SIZE;
1612579a25a8SJose Abreu 
1613579a25a8SJose Abreu 		dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
1614ce736788SJoao Pinto 
1615ce736788SJoao Pinto 		kfree(tx_q->tx_skbuff_dma);
1616ce736788SJoao Pinto 		kfree(tx_q->tx_skbuff);
1617ce736788SJoao Pinto 	}
1618ce736788SJoao Pinto }
1619ce736788SJoao Pinto 
1620ce736788SJoao Pinto /**
162171fedb01SJoao Pinto  * alloc_dma_rx_desc_resources - alloc RX resources.
1622732fdf0eSGiuseppe CAVALLARO  * @priv: private structure
1623732fdf0eSGiuseppe CAVALLARO  * Description: according to which descriptor can be used (extend or basic)
1624732fdf0eSGiuseppe CAVALLARO  * this function allocates the resources for TX and RX paths. In case of
1625732fdf0eSGiuseppe CAVALLARO  * reception, for example, it pre-allocated the RX socket buffer in order to
1626732fdf0eSGiuseppe CAVALLARO  * allow zero-copy mechanism.
1627732fdf0eSGiuseppe CAVALLARO  */
162871fedb01SJoao Pinto static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
162909f8d696SSrinivas Kandagatla {
163054139cf3SJoao Pinto 	u32 rx_count = priv->plat->rx_queues_to_use;
16315bacd778SLABBE Corentin 	int ret = -ENOMEM;
163254139cf3SJoao Pinto 	u32 queue;
163309f8d696SSrinivas Kandagatla 
163454139cf3SJoao Pinto 	/* RX queues buffers and DMA */
163554139cf3SJoao Pinto 	for (queue = 0; queue < rx_count; queue++) {
163654139cf3SJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
16372af6106aSJose Abreu 		struct page_pool_params pp_params = { 0 };
16384f28bd95SThierry Reding 		unsigned int num_pages;
163954139cf3SJoao Pinto 
164054139cf3SJoao Pinto 		rx_q->queue_index = queue;
164154139cf3SJoao Pinto 		rx_q->priv_data = priv;
164254139cf3SJoao Pinto 
16432af6106aSJose Abreu 		pp_params.flags = PP_FLAG_DMA_MAP;
16442af6106aSJose Abreu 		pp_params.pool_size = DMA_RX_SIZE;
16454f28bd95SThierry Reding 		num_pages = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE);
16464f28bd95SThierry Reding 		pp_params.order = ilog2(num_pages);
16472af6106aSJose Abreu 		pp_params.nid = dev_to_node(priv->device);
16482af6106aSJose Abreu 		pp_params.dev = priv->device;
16492af6106aSJose Abreu 		pp_params.dma_dir = DMA_FROM_DEVICE;
16505bacd778SLABBE Corentin 
16512af6106aSJose Abreu 		rx_q->page_pool = page_pool_create(&pp_params);
16522af6106aSJose Abreu 		if (IS_ERR(rx_q->page_pool)) {
16532af6106aSJose Abreu 			ret = PTR_ERR(rx_q->page_pool);
16542af6106aSJose Abreu 			rx_q->page_pool = NULL;
16552af6106aSJose Abreu 			goto err_dma;
16562af6106aSJose Abreu 		}
16572af6106aSJose Abreu 
1658ec5e5ce1SJose Abreu 		rx_q->buf_pool = kcalloc(DMA_RX_SIZE, sizeof(*rx_q->buf_pool),
16595bacd778SLABBE Corentin 					 GFP_KERNEL);
16602af6106aSJose Abreu 		if (!rx_q->buf_pool)
166154139cf3SJoao Pinto 			goto err_dma;
16625bacd778SLABBE Corentin 
16635bacd778SLABBE Corentin 		if (priv->extend_desc) {
1664750afb08SLuis Chamberlain 			rx_q->dma_erx = dma_alloc_coherent(priv->device,
1665750afb08SLuis Chamberlain 							   DMA_RX_SIZE * sizeof(struct dma_extended_desc),
166654139cf3SJoao Pinto 							   &rx_q->dma_rx_phy,
16675bacd778SLABBE Corentin 							   GFP_KERNEL);
166854139cf3SJoao Pinto 			if (!rx_q->dma_erx)
16695bacd778SLABBE Corentin 				goto err_dma;
16705bacd778SLABBE Corentin 
167171fedb01SJoao Pinto 		} else {
1672750afb08SLuis Chamberlain 			rx_q->dma_rx = dma_alloc_coherent(priv->device,
1673750afb08SLuis Chamberlain 							  DMA_RX_SIZE * sizeof(struct dma_desc),
167454139cf3SJoao Pinto 							  &rx_q->dma_rx_phy,
167571fedb01SJoao Pinto 							  GFP_KERNEL);
167654139cf3SJoao Pinto 			if (!rx_q->dma_rx)
167771fedb01SJoao Pinto 				goto err_dma;
167871fedb01SJoao Pinto 		}
167954139cf3SJoao Pinto 	}
168071fedb01SJoao Pinto 
168171fedb01SJoao Pinto 	return 0;
168271fedb01SJoao Pinto 
168371fedb01SJoao Pinto err_dma:
168454139cf3SJoao Pinto 	free_dma_rx_desc_resources(priv);
168554139cf3SJoao Pinto 
168671fedb01SJoao Pinto 	return ret;
168771fedb01SJoao Pinto }
168871fedb01SJoao Pinto 
168971fedb01SJoao Pinto /**
169071fedb01SJoao Pinto  * alloc_dma_tx_desc_resources - alloc TX resources.
169171fedb01SJoao Pinto  * @priv: private structure
169271fedb01SJoao Pinto  * Description: according to which descriptor can be used (extend or basic)
169371fedb01SJoao Pinto  * this function allocates the resources for TX and RX paths. In case of
169471fedb01SJoao Pinto  * reception, for example, it pre-allocated the RX socket buffer in order to
169571fedb01SJoao Pinto  * allow zero-copy mechanism.
169671fedb01SJoao Pinto  */
169771fedb01SJoao Pinto static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
169871fedb01SJoao Pinto {
1699ce736788SJoao Pinto 	u32 tx_count = priv->plat->tx_queues_to_use;
170071fedb01SJoao Pinto 	int ret = -ENOMEM;
1701ce736788SJoao Pinto 	u32 queue;
170271fedb01SJoao Pinto 
1703ce736788SJoao Pinto 	/* TX queues buffers and DMA */
1704ce736788SJoao Pinto 	for (queue = 0; queue < tx_count; queue++) {
1705ce736788SJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1706579a25a8SJose Abreu 		size_t size;
1707579a25a8SJose Abreu 		void *addr;
1708ce736788SJoao Pinto 
1709ce736788SJoao Pinto 		tx_q->queue_index = queue;
1710ce736788SJoao Pinto 		tx_q->priv_data = priv;
1711ce736788SJoao Pinto 
1712ec5e5ce1SJose Abreu 		tx_q->tx_skbuff_dma = kcalloc(DMA_TX_SIZE,
1713ce736788SJoao Pinto 					      sizeof(*tx_q->tx_skbuff_dma),
171471fedb01SJoao Pinto 					      GFP_KERNEL);
1715ce736788SJoao Pinto 		if (!tx_q->tx_skbuff_dma)
171662242260SChristophe Jaillet 			goto err_dma;
171771fedb01SJoao Pinto 
1718ec5e5ce1SJose Abreu 		tx_q->tx_skbuff = kcalloc(DMA_TX_SIZE,
1719ce736788SJoao Pinto 					  sizeof(struct sk_buff *),
172071fedb01SJoao Pinto 					  GFP_KERNEL);
1721ce736788SJoao Pinto 		if (!tx_q->tx_skbuff)
172262242260SChristophe Jaillet 			goto err_dma;
172371fedb01SJoao Pinto 
1724579a25a8SJose Abreu 		if (priv->extend_desc)
1725579a25a8SJose Abreu 			size = sizeof(struct dma_extended_desc);
1726579a25a8SJose Abreu 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1727579a25a8SJose Abreu 			size = sizeof(struct dma_edesc);
1728579a25a8SJose Abreu 		else
1729579a25a8SJose Abreu 			size = sizeof(struct dma_desc);
1730579a25a8SJose Abreu 
1731579a25a8SJose Abreu 		size *= DMA_TX_SIZE;
1732579a25a8SJose Abreu 
1733579a25a8SJose Abreu 		addr = dma_alloc_coherent(priv->device, size,
1734579a25a8SJose Abreu 					  &tx_q->dma_tx_phy, GFP_KERNEL);
1735579a25a8SJose Abreu 		if (!addr)
173662242260SChristophe Jaillet 			goto err_dma;
1737579a25a8SJose Abreu 
1738579a25a8SJose Abreu 		if (priv->extend_desc)
1739579a25a8SJose Abreu 			tx_q->dma_etx = addr;
1740579a25a8SJose Abreu 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1741579a25a8SJose Abreu 			tx_q->dma_entx = addr;
1742579a25a8SJose Abreu 		else
1743579a25a8SJose Abreu 			tx_q->dma_tx = addr;
17445bacd778SLABBE Corentin 	}
17455bacd778SLABBE Corentin 
17465bacd778SLABBE Corentin 	return 0;
17475bacd778SLABBE Corentin 
174862242260SChristophe Jaillet err_dma:
1749ce736788SJoao Pinto 	free_dma_tx_desc_resources(priv);
175009f8d696SSrinivas Kandagatla 	return ret;
17515bacd778SLABBE Corentin }
175209f8d696SSrinivas Kandagatla 
175371fedb01SJoao Pinto /**
175471fedb01SJoao Pinto  * alloc_dma_desc_resources - alloc TX/RX resources.
175571fedb01SJoao Pinto  * @priv: private structure
175671fedb01SJoao Pinto  * Description: according to which descriptor can be used (extend or basic)
175771fedb01SJoao Pinto  * this function allocates the resources for TX and RX paths. In case of
175871fedb01SJoao Pinto  * reception, for example, it pre-allocated the RX socket buffer in order to
175971fedb01SJoao Pinto  * allow zero-copy mechanism.
176071fedb01SJoao Pinto  */
176171fedb01SJoao Pinto static int alloc_dma_desc_resources(struct stmmac_priv *priv)
17625bacd778SLABBE Corentin {
176354139cf3SJoao Pinto 	/* RX Allocation */
176471fedb01SJoao Pinto 	int ret = alloc_dma_rx_desc_resources(priv);
176571fedb01SJoao Pinto 
176671fedb01SJoao Pinto 	if (ret)
176771fedb01SJoao Pinto 		return ret;
176871fedb01SJoao Pinto 
176971fedb01SJoao Pinto 	ret = alloc_dma_tx_desc_resources(priv);
177071fedb01SJoao Pinto 
177171fedb01SJoao Pinto 	return ret;
177271fedb01SJoao Pinto }
177371fedb01SJoao Pinto 
177471fedb01SJoao Pinto /**
177571fedb01SJoao Pinto  * free_dma_desc_resources - free dma desc resources
177671fedb01SJoao Pinto  * @priv: private structure
177771fedb01SJoao Pinto  */
177871fedb01SJoao Pinto static void free_dma_desc_resources(struct stmmac_priv *priv)
177971fedb01SJoao Pinto {
178071fedb01SJoao Pinto 	/* Release the DMA RX socket buffers */
178171fedb01SJoao Pinto 	free_dma_rx_desc_resources(priv);
178271fedb01SJoao Pinto 
178371fedb01SJoao Pinto 	/* Release the DMA TX socket buffers */
178471fedb01SJoao Pinto 	free_dma_tx_desc_resources(priv);
178571fedb01SJoao Pinto }
178671fedb01SJoao Pinto 
178771fedb01SJoao Pinto /**
17889eb12474Sjpinto  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
17899eb12474Sjpinto  *  @priv: driver private structure
17909eb12474Sjpinto  *  Description: It is used for enabling the rx queues in the MAC
17919eb12474Sjpinto  */
17929eb12474Sjpinto static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
17939eb12474Sjpinto {
17944f6046f5SJoao Pinto 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
17954f6046f5SJoao Pinto 	int queue;
17964f6046f5SJoao Pinto 	u8 mode;
17979eb12474Sjpinto 
17984f6046f5SJoao Pinto 	for (queue = 0; queue < rx_queues_count; queue++) {
17994f6046f5SJoao Pinto 		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
1800c10d4c82SJose Abreu 		stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
18014f6046f5SJoao Pinto 	}
18029eb12474Sjpinto }
18039eb12474Sjpinto 
18049eb12474Sjpinto /**
1805ae4f0d46SJoao Pinto  * stmmac_start_rx_dma - start RX DMA channel
1806ae4f0d46SJoao Pinto  * @priv: driver private structure
1807ae4f0d46SJoao Pinto  * @chan: RX channel index
1808ae4f0d46SJoao Pinto  * Description:
1809ae4f0d46SJoao Pinto  * This starts a RX DMA channel
1810ae4f0d46SJoao Pinto  */
1811ae4f0d46SJoao Pinto static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
1812ae4f0d46SJoao Pinto {
1813ae4f0d46SJoao Pinto 	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
1814a4e887faSJose Abreu 	stmmac_start_rx(priv, priv->ioaddr, chan);
1815ae4f0d46SJoao Pinto }
1816ae4f0d46SJoao Pinto 
1817ae4f0d46SJoao Pinto /**
1818ae4f0d46SJoao Pinto  * stmmac_start_tx_dma - start TX DMA channel
1819ae4f0d46SJoao Pinto  * @priv: driver private structure
1820ae4f0d46SJoao Pinto  * @chan: TX channel index
1821ae4f0d46SJoao Pinto  * Description:
1822ae4f0d46SJoao Pinto  * This starts a TX DMA channel
1823ae4f0d46SJoao Pinto  */
1824ae4f0d46SJoao Pinto static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
1825ae4f0d46SJoao Pinto {
1826ae4f0d46SJoao Pinto 	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
1827a4e887faSJose Abreu 	stmmac_start_tx(priv, priv->ioaddr, chan);
1828ae4f0d46SJoao Pinto }
1829ae4f0d46SJoao Pinto 
1830ae4f0d46SJoao Pinto /**
1831ae4f0d46SJoao Pinto  * stmmac_stop_rx_dma - stop RX DMA channel
1832ae4f0d46SJoao Pinto  * @priv: driver private structure
1833ae4f0d46SJoao Pinto  * @chan: RX channel index
1834ae4f0d46SJoao Pinto  * Description:
1835ae4f0d46SJoao Pinto  * This stops a RX DMA channel
1836ae4f0d46SJoao Pinto  */
1837ae4f0d46SJoao Pinto static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
1838ae4f0d46SJoao Pinto {
1839ae4f0d46SJoao Pinto 	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
1840a4e887faSJose Abreu 	stmmac_stop_rx(priv, priv->ioaddr, chan);
1841ae4f0d46SJoao Pinto }
1842ae4f0d46SJoao Pinto 
1843ae4f0d46SJoao Pinto /**
1844ae4f0d46SJoao Pinto  * stmmac_stop_tx_dma - stop TX DMA channel
1845ae4f0d46SJoao Pinto  * @priv: driver private structure
1846ae4f0d46SJoao Pinto  * @chan: TX channel index
1847ae4f0d46SJoao Pinto  * Description:
1848ae4f0d46SJoao Pinto  * This stops a TX DMA channel
1849ae4f0d46SJoao Pinto  */
1850ae4f0d46SJoao Pinto static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
1851ae4f0d46SJoao Pinto {
1852ae4f0d46SJoao Pinto 	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
1853a4e887faSJose Abreu 	stmmac_stop_tx(priv, priv->ioaddr, chan);
1854ae4f0d46SJoao Pinto }
1855ae4f0d46SJoao Pinto 
1856ae4f0d46SJoao Pinto /**
1857ae4f0d46SJoao Pinto  * stmmac_start_all_dma - start all RX and TX DMA channels
1858ae4f0d46SJoao Pinto  * @priv: driver private structure
1859ae4f0d46SJoao Pinto  * Description:
1860ae4f0d46SJoao Pinto  * This starts all the RX and TX DMA channels
1861ae4f0d46SJoao Pinto  */
1862ae4f0d46SJoao Pinto static void stmmac_start_all_dma(struct stmmac_priv *priv)
1863ae4f0d46SJoao Pinto {
1864ae4f0d46SJoao Pinto 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1865ae4f0d46SJoao Pinto 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1866ae4f0d46SJoao Pinto 	u32 chan = 0;
1867ae4f0d46SJoao Pinto 
1868ae4f0d46SJoao Pinto 	for (chan = 0; chan < rx_channels_count; chan++)
1869ae4f0d46SJoao Pinto 		stmmac_start_rx_dma(priv, chan);
1870ae4f0d46SJoao Pinto 
1871ae4f0d46SJoao Pinto 	for (chan = 0; chan < tx_channels_count; chan++)
1872ae4f0d46SJoao Pinto 		stmmac_start_tx_dma(priv, chan);
1873ae4f0d46SJoao Pinto }
1874ae4f0d46SJoao Pinto 
1875ae4f0d46SJoao Pinto /**
1876ae4f0d46SJoao Pinto  * stmmac_stop_all_dma - stop all RX and TX DMA channels
1877ae4f0d46SJoao Pinto  * @priv: driver private structure
1878ae4f0d46SJoao Pinto  * Description:
1879ae4f0d46SJoao Pinto  * This stops the RX and TX DMA channels
1880ae4f0d46SJoao Pinto  */
1881ae4f0d46SJoao Pinto static void stmmac_stop_all_dma(struct stmmac_priv *priv)
1882ae4f0d46SJoao Pinto {
1883ae4f0d46SJoao Pinto 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1884ae4f0d46SJoao Pinto 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1885ae4f0d46SJoao Pinto 	u32 chan = 0;
1886ae4f0d46SJoao Pinto 
1887ae4f0d46SJoao Pinto 	for (chan = 0; chan < rx_channels_count; chan++)
1888ae4f0d46SJoao Pinto 		stmmac_stop_rx_dma(priv, chan);
1889ae4f0d46SJoao Pinto 
1890ae4f0d46SJoao Pinto 	for (chan = 0; chan < tx_channels_count; chan++)
1891ae4f0d46SJoao Pinto 		stmmac_stop_tx_dma(priv, chan);
1892ae4f0d46SJoao Pinto }
1893ae4f0d46SJoao Pinto 
1894ae4f0d46SJoao Pinto /**
18957ac6653aSJeff Kirsher  *  stmmac_dma_operation_mode - HW DMA operation mode
189632ceabcaSGiuseppe CAVALLARO  *  @priv: driver private structure
1897732fdf0eSGiuseppe CAVALLARO  *  Description: it is used for configuring the DMA operation mode register in
1898732fdf0eSGiuseppe CAVALLARO  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
18997ac6653aSJeff Kirsher  */
19007ac6653aSJeff Kirsher static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
19017ac6653aSJeff Kirsher {
19026deee222SJoao Pinto 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
19036deee222SJoao Pinto 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1904f88203a2SVince Bridgers 	int rxfifosz = priv->plat->rx_fifo_size;
190552a76235SJose Abreu 	int txfifosz = priv->plat->tx_fifo_size;
19066deee222SJoao Pinto 	u32 txmode = 0;
19076deee222SJoao Pinto 	u32 rxmode = 0;
19086deee222SJoao Pinto 	u32 chan = 0;
1909a0daae13SJose Abreu 	u8 qmode = 0;
1910f88203a2SVince Bridgers 
191111fbf811SThierry Reding 	if (rxfifosz == 0)
191211fbf811SThierry Reding 		rxfifosz = priv->dma_cap.rx_fifo_size;
191352a76235SJose Abreu 	if (txfifosz == 0)
191452a76235SJose Abreu 		txfifosz = priv->dma_cap.tx_fifo_size;
191552a76235SJose Abreu 
191652a76235SJose Abreu 	/* Adjust for real per queue fifo size */
191752a76235SJose Abreu 	rxfifosz /= rx_channels_count;
191852a76235SJose Abreu 	txfifosz /= tx_channels_count;
191911fbf811SThierry Reding 
19206deee222SJoao Pinto 	if (priv->plat->force_thresh_dma_mode) {
19216deee222SJoao Pinto 		txmode = tc;
19226deee222SJoao Pinto 		rxmode = tc;
19236deee222SJoao Pinto 	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
19247ac6653aSJeff Kirsher 		/*
19257ac6653aSJeff Kirsher 		 * In case of GMAC, SF mode can be enabled
19267ac6653aSJeff Kirsher 		 * to perform the TX COE in HW. This depends on:
19277ac6653aSJeff Kirsher 		 * 1) TX COE if actually supported
19287ac6653aSJeff Kirsher 		 * 2) There is no bugged Jumbo frame support
19297ac6653aSJeff Kirsher 		 *    that needs to not insert csum in the TDES.
19307ac6653aSJeff Kirsher 		 */
19316deee222SJoao Pinto 		txmode = SF_DMA_MODE;
19326deee222SJoao Pinto 		rxmode = SF_DMA_MODE;
1933b2dec116SSonic Zhang 		priv->xstats.threshold = SF_DMA_MODE;
19346deee222SJoao Pinto 	} else {
19356deee222SJoao Pinto 		txmode = tc;
19366deee222SJoao Pinto 		rxmode = SF_DMA_MODE;
19376deee222SJoao Pinto 	}
19386deee222SJoao Pinto 
19396deee222SJoao Pinto 	/* configure all channels */
1940a0daae13SJose Abreu 	for (chan = 0; chan < rx_channels_count; chan++) {
1941a0daae13SJose Abreu 		qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
19426deee222SJoao Pinto 
1943a4e887faSJose Abreu 		stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
1944a0daae13SJose Abreu 				rxfifosz, qmode);
19454205c88eSJose Abreu 		stmmac_set_dma_bfsize(priv, priv->ioaddr, priv->dma_buf_sz,
19464205c88eSJose Abreu 				chan);
1947a0daae13SJose Abreu 	}
1948a0daae13SJose Abreu 
1949a0daae13SJose Abreu 	for (chan = 0; chan < tx_channels_count; chan++) {
1950a0daae13SJose Abreu 		qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
1951a0daae13SJose Abreu 
1952a4e887faSJose Abreu 		stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
1953a0daae13SJose Abreu 				txfifosz, qmode);
1954a0daae13SJose Abreu 	}
19557ac6653aSJeff Kirsher }
19567ac6653aSJeff Kirsher 
19577ac6653aSJeff Kirsher /**
1958732fdf0eSGiuseppe CAVALLARO  * stmmac_tx_clean - to manage the transmission completion
195932ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
1960ce736788SJoao Pinto  * @queue: TX queue index
1961732fdf0eSGiuseppe CAVALLARO  * Description: it reclaims the transmit resources after transmission completes.
19627ac6653aSJeff Kirsher  */
19638fce3331SJose Abreu static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
19647ac6653aSJeff Kirsher {
1965ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
196638979574SBeniamino Galvani 	unsigned int bytes_compl = 0, pkts_compl = 0;
19678fce3331SJose Abreu 	unsigned int entry, count = 0;
19687ac6653aSJeff Kirsher 
19698fce3331SJose Abreu 	__netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
1970a9097a96SGiuseppe CAVALLARO 
19719125cdd1SGiuseppe CAVALLARO 	priv->xstats.tx_clean++;
19729125cdd1SGiuseppe CAVALLARO 
19738d5f4b07SBernd Edlinger 	entry = tx_q->dirty_tx;
19748fce3331SJose Abreu 	while ((entry != tx_q->cur_tx) && (count < budget)) {
1975ce736788SJoao Pinto 		struct sk_buff *skb = tx_q->tx_skbuff[entry];
1976c24602efSGiuseppe CAVALLARO 		struct dma_desc *p;
1977c363b658SFabrice Gasnier 		int status;
1978c24602efSGiuseppe CAVALLARO 
1979c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
1980ce736788SJoao Pinto 			p = (struct dma_desc *)(tx_q->dma_etx + entry);
1981579a25a8SJose Abreu 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1982579a25a8SJose Abreu 			p = &tx_q->dma_entx[entry].basic;
1983c24602efSGiuseppe CAVALLARO 		else
1984ce736788SJoao Pinto 			p = tx_q->dma_tx + entry;
19857ac6653aSJeff Kirsher 
198642de047dSJose Abreu 		status = stmmac_tx_status(priv, &priv->dev->stats,
198742de047dSJose Abreu 				&priv->xstats, p, priv->ioaddr);
1988c363b658SFabrice Gasnier 		/* Check if the descriptor is owned by the DMA */
1989c363b658SFabrice Gasnier 		if (unlikely(status & tx_dma_own))
1990c363b658SFabrice Gasnier 			break;
1991c363b658SFabrice Gasnier 
19928fce3331SJose Abreu 		count++;
19938fce3331SJose Abreu 
1994a6b25da5SNiklas Cassel 		/* Make sure descriptor fields are read after reading
1995a6b25da5SNiklas Cassel 		 * the own bit.
1996a6b25da5SNiklas Cassel 		 */
1997a6b25da5SNiklas Cassel 		dma_rmb();
1998a6b25da5SNiklas Cassel 
1999c363b658SFabrice Gasnier 		/* Just consider the last segment and ...*/
2000c363b658SFabrice Gasnier 		if (likely(!(status & tx_not_ls))) {
2001c363b658SFabrice Gasnier 			/* ... verify the status error condition */
2002c363b658SFabrice Gasnier 			if (unlikely(status & tx_err)) {
2003c363b658SFabrice Gasnier 				priv->dev->stats.tx_errors++;
2004c363b658SFabrice Gasnier 			} else {
20057ac6653aSJeff Kirsher 				priv->dev->stats.tx_packets++;
20067ac6653aSJeff Kirsher 				priv->xstats.tx_pkt_n++;
2007c363b658SFabrice Gasnier 			}
2008ba1ffd74SGiuseppe CAVALLARO 			stmmac_get_tx_hwtstamp(priv, p, skb);
20097ac6653aSJeff Kirsher 		}
20107ac6653aSJeff Kirsher 
2011ce736788SJoao Pinto 		if (likely(tx_q->tx_skbuff_dma[entry].buf)) {
2012ce736788SJoao Pinto 			if (tx_q->tx_skbuff_dma[entry].map_as_page)
2013362b37beSGiuseppe CAVALLARO 				dma_unmap_page(priv->device,
2014ce736788SJoao Pinto 					       tx_q->tx_skbuff_dma[entry].buf,
2015ce736788SJoao Pinto 					       tx_q->tx_skbuff_dma[entry].len,
20167ac6653aSJeff Kirsher 					       DMA_TO_DEVICE);
2017362b37beSGiuseppe CAVALLARO 			else
2018362b37beSGiuseppe CAVALLARO 				dma_unmap_single(priv->device,
2019ce736788SJoao Pinto 						 tx_q->tx_skbuff_dma[entry].buf,
2020ce736788SJoao Pinto 						 tx_q->tx_skbuff_dma[entry].len,
2021362b37beSGiuseppe CAVALLARO 						 DMA_TO_DEVICE);
2022ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[entry].buf = 0;
2023ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[entry].len = 0;
2024ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[entry].map_as_page = false;
2025cf32deecSRayagond Kokatanur 		}
2026f748be53SAlexandre TORGUE 
20272c520b1cSJose Abreu 		stmmac_clean_desc3(priv, tx_q, p);
2028f748be53SAlexandre TORGUE 
2029ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].last_segment = false;
2030ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
20317ac6653aSJeff Kirsher 
20327ac6653aSJeff Kirsher 		if (likely(skb != NULL)) {
203338979574SBeniamino Galvani 			pkts_compl++;
203438979574SBeniamino Galvani 			bytes_compl += skb->len;
20357c565c33SEric W. Biederman 			dev_consume_skb_any(skb);
2036ce736788SJoao Pinto 			tx_q->tx_skbuff[entry] = NULL;
20377ac6653aSJeff Kirsher 		}
20387ac6653aSJeff Kirsher 
203942de047dSJose Abreu 		stmmac_release_tx_desc(priv, p, priv->mode);
20407ac6653aSJeff Kirsher 
2041e3ad57c9SGiuseppe Cavallaro 		entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
20427ac6653aSJeff Kirsher 	}
2043ce736788SJoao Pinto 	tx_q->dirty_tx = entry;
204438979574SBeniamino Galvani 
2045c22a3f48SJoao Pinto 	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2046c22a3f48SJoao Pinto 				  pkts_compl, bytes_compl);
204738979574SBeniamino Galvani 
2048c22a3f48SJoao Pinto 	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2049c22a3f48SJoao Pinto 								queue))) &&
2050c22a3f48SJoao Pinto 	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) {
2051c22a3f48SJoao Pinto 
2052b3e51069SLABBE Corentin 		netif_dbg(priv, tx_done, priv->dev,
2053b3e51069SLABBE Corentin 			  "%s: restart transmit\n", __func__);
2054c22a3f48SJoao Pinto 		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
20557ac6653aSJeff Kirsher 	}
2056d765955dSGiuseppe CAVALLARO 
2057d765955dSGiuseppe CAVALLARO 	if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
2058d765955dSGiuseppe CAVALLARO 		stmmac_enable_eee_mode(priv);
2059f5351ef7SGiuseppe CAVALLARO 		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
2060d765955dSGiuseppe CAVALLARO 	}
20618fce3331SJose Abreu 
20624ccb4585SJose Abreu 	/* We still have pending packets, let's call for a new scheduling */
20634ccb4585SJose Abreu 	if (tx_q->dirty_tx != tx_q->cur_tx)
20643755b21bSJose Abreu 		mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer));
20654ccb4585SJose Abreu 
20668fce3331SJose Abreu 	__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
20678fce3331SJose Abreu 
20688fce3331SJose Abreu 	return count;
20697ac6653aSJeff Kirsher }
20707ac6653aSJeff Kirsher 
20717ac6653aSJeff Kirsher /**
2072732fdf0eSGiuseppe CAVALLARO  * stmmac_tx_err - to manage the tx error
207332ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
20745bacd778SLABBE Corentin  * @chan: channel index
20757ac6653aSJeff Kirsher  * Description: it cleans the descriptors and restarts the transmission
2076732fdf0eSGiuseppe CAVALLARO  * in case of transmission errors.
20777ac6653aSJeff Kirsher  */
20785bacd778SLABBE Corentin static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
20797ac6653aSJeff Kirsher {
2080ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2081ce736788SJoao Pinto 
2082c22a3f48SJoao Pinto 	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
20837ac6653aSJeff Kirsher 
2084ae4f0d46SJoao Pinto 	stmmac_stop_tx_dma(priv, chan);
2085ce736788SJoao Pinto 	dma_free_tx_skbufs(priv, chan);
2086579a25a8SJose Abreu 	stmmac_clear_tx_descriptors(priv, chan);
2087ce736788SJoao Pinto 	tx_q->dirty_tx = 0;
2088ce736788SJoao Pinto 	tx_q->cur_tx = 0;
20898d212a9eSNiklas Cassel 	tx_q->mss = 0;
2090c22a3f48SJoao Pinto 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
2091f421031eSJongsung Kim 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2092f421031eSJongsung Kim 			    tx_q->dma_tx_phy, chan);
2093ae4f0d46SJoao Pinto 	stmmac_start_tx_dma(priv, chan);
20947ac6653aSJeff Kirsher 
20957ac6653aSJeff Kirsher 	priv->dev->stats.tx_errors++;
2096c22a3f48SJoao Pinto 	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
20977ac6653aSJeff Kirsher }
20987ac6653aSJeff Kirsher 
209932ceabcaSGiuseppe CAVALLARO /**
21006deee222SJoao Pinto  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
21016deee222SJoao Pinto  *  @priv: driver private structure
21026deee222SJoao Pinto  *  @txmode: TX operating mode
21036deee222SJoao Pinto  *  @rxmode: RX operating mode
21046deee222SJoao Pinto  *  @chan: channel index
21056deee222SJoao Pinto  *  Description: it is used for configuring of the DMA operation mode in
21066deee222SJoao Pinto  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
21076deee222SJoao Pinto  *  mode.
21086deee222SJoao Pinto  */
21096deee222SJoao Pinto static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
21106deee222SJoao Pinto 					  u32 rxmode, u32 chan)
21116deee222SJoao Pinto {
2112a0daae13SJose Abreu 	u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2113a0daae13SJose Abreu 	u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
211452a76235SJose Abreu 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
211552a76235SJose Abreu 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
21166deee222SJoao Pinto 	int rxfifosz = priv->plat->rx_fifo_size;
211752a76235SJose Abreu 	int txfifosz = priv->plat->tx_fifo_size;
21186deee222SJoao Pinto 
21196deee222SJoao Pinto 	if (rxfifosz == 0)
21206deee222SJoao Pinto 		rxfifosz = priv->dma_cap.rx_fifo_size;
212152a76235SJose Abreu 	if (txfifosz == 0)
212252a76235SJose Abreu 		txfifosz = priv->dma_cap.tx_fifo_size;
212352a76235SJose Abreu 
212452a76235SJose Abreu 	/* Adjust for real per queue fifo size */
212552a76235SJose Abreu 	rxfifosz /= rx_channels_count;
212652a76235SJose Abreu 	txfifosz /= tx_channels_count;
21276deee222SJoao Pinto 
2128ab0204e3SJose Abreu 	stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2129ab0204e3SJose Abreu 	stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
21306deee222SJoao Pinto }
21316deee222SJoao Pinto 
21328bf993a5SJose Abreu static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
21338bf993a5SJose Abreu {
213463a550fcSJose Abreu 	int ret;
21358bf993a5SJose Abreu 
2136c10d4c82SJose Abreu 	ret = stmmac_safety_feat_irq_status(priv, priv->dev,
21378bf993a5SJose Abreu 			priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2138c10d4c82SJose Abreu 	if (ret && (ret != -EINVAL)) {
21398bf993a5SJose Abreu 		stmmac_global_err(priv);
2140c10d4c82SJose Abreu 		return true;
2141c10d4c82SJose Abreu 	}
2142c10d4c82SJose Abreu 
2143c10d4c82SJose Abreu 	return false;
21448bf993a5SJose Abreu }
21458bf993a5SJose Abreu 
21468fce3331SJose Abreu static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan)
21478fce3331SJose Abreu {
21488fce3331SJose Abreu 	int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
21498fce3331SJose Abreu 						 &priv->xstats, chan);
21508fce3331SJose Abreu 	struct stmmac_channel *ch = &priv->channel[chan];
2151021bd5e3SJose Abreu 	unsigned long flags;
21528fce3331SJose Abreu 
21534ccb4585SJose Abreu 	if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
21543ba07debSJose Abreu 		if (napi_schedule_prep(&ch->rx_napi)) {
2155021bd5e3SJose Abreu 			spin_lock_irqsave(&ch->lock, flags);
2156021bd5e3SJose Abreu 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2157021bd5e3SJose Abreu 			spin_unlock_irqrestore(&ch->lock, flags);
21583ba07debSJose Abreu 			__napi_schedule_irqoff(&ch->rx_napi);
21593ba07debSJose Abreu 		}
21604ccb4585SJose Abreu 	}
21614ccb4585SJose Abreu 
2162021bd5e3SJose Abreu 	if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2163021bd5e3SJose Abreu 		if (napi_schedule_prep(&ch->tx_napi)) {
2164021bd5e3SJose Abreu 			spin_lock_irqsave(&ch->lock, flags);
2165021bd5e3SJose Abreu 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
2166021bd5e3SJose Abreu 			spin_unlock_irqrestore(&ch->lock, flags);
2167021bd5e3SJose Abreu 			__napi_schedule_irqoff(&ch->tx_napi);
2168021bd5e3SJose Abreu 		}
2169021bd5e3SJose Abreu 	}
21708fce3331SJose Abreu 
21718fce3331SJose Abreu 	return status;
21728fce3331SJose Abreu }
21738fce3331SJose Abreu 
21746deee222SJoao Pinto /**
2175732fdf0eSGiuseppe CAVALLARO  * stmmac_dma_interrupt - DMA ISR
217632ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
217732ceabcaSGiuseppe CAVALLARO  * Description: this is the DMA ISR. It is called by the main ISR.
2178732fdf0eSGiuseppe CAVALLARO  * It calls the dwmac dma routine and schedule poll method in case of some
2179732fdf0eSGiuseppe CAVALLARO  * work can be done.
218032ceabcaSGiuseppe CAVALLARO  */
21817ac6653aSJeff Kirsher static void stmmac_dma_interrupt(struct stmmac_priv *priv)
21827ac6653aSJeff Kirsher {
2183d62a107aSJoao Pinto 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
21845a6a0445SNiklas Cassel 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
21855a6a0445SNiklas Cassel 	u32 channels_to_check = tx_channel_count > rx_channel_count ?
21865a6a0445SNiklas Cassel 				tx_channel_count : rx_channel_count;
2187d62a107aSJoao Pinto 	u32 chan;
21888ac60ffbSKees Cook 	int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
21898ac60ffbSKees Cook 
21908ac60ffbSKees Cook 	/* Make sure we never check beyond our status buffer. */
21918ac60ffbSKees Cook 	if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
21928ac60ffbSKees Cook 		channels_to_check = ARRAY_SIZE(status);
219368e5cfafSJoao Pinto 
21945a6a0445SNiklas Cassel 	for (chan = 0; chan < channels_to_check; chan++)
21958fce3331SJose Abreu 		status[chan] = stmmac_napi_check(priv, chan);
2196d62a107aSJoao Pinto 
21975a6a0445SNiklas Cassel 	for (chan = 0; chan < tx_channel_count; chan++) {
21985a6a0445SNiklas Cassel 		if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
21997ac6653aSJeff Kirsher 			/* Try to bump up the dma threshold on this failure */
2200b2dec116SSonic Zhang 			if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
2201b2dec116SSonic Zhang 			    (tc <= 256)) {
22027ac6653aSJeff Kirsher 				tc += 64;
2203c405abe2SSonic Zhang 				if (priv->plat->force_thresh_dma_mode)
2204d62a107aSJoao Pinto 					stmmac_set_dma_operation_mode(priv,
2205d62a107aSJoao Pinto 								      tc,
2206d62a107aSJoao Pinto 								      tc,
2207d62a107aSJoao Pinto 								      chan);
2208c405abe2SSonic Zhang 				else
2209d62a107aSJoao Pinto 					stmmac_set_dma_operation_mode(priv,
2210d62a107aSJoao Pinto 								    tc,
2211d62a107aSJoao Pinto 								    SF_DMA_MODE,
2212d62a107aSJoao Pinto 								    chan);
22137ac6653aSJeff Kirsher 				priv->xstats.threshold = tc;
22147ac6653aSJeff Kirsher 			}
22155a6a0445SNiklas Cassel 		} else if (unlikely(status[chan] == tx_hard_error)) {
22164e593262SJoao Pinto 			stmmac_tx_err(priv, chan);
22177ac6653aSJeff Kirsher 		}
2218d62a107aSJoao Pinto 	}
2219d62a107aSJoao Pinto }
22207ac6653aSJeff Kirsher 
222132ceabcaSGiuseppe CAVALLARO /**
222232ceabcaSGiuseppe CAVALLARO  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
222332ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
222432ceabcaSGiuseppe CAVALLARO  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
222532ceabcaSGiuseppe CAVALLARO  */
22261c901a46SGiuseppe CAVALLARO static void stmmac_mmc_setup(struct stmmac_priv *priv)
22271c901a46SGiuseppe CAVALLARO {
22281c901a46SGiuseppe CAVALLARO 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
22291c901a46SGiuseppe CAVALLARO 			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
22301c901a46SGiuseppe CAVALLARO 
22313b1dd2c5SJose Abreu 	stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
22324f795b25SGiuseppe CAVALLARO 
22334f795b25SGiuseppe CAVALLARO 	if (priv->dma_cap.rmon) {
22343b1dd2c5SJose Abreu 		stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
22351c901a46SGiuseppe CAVALLARO 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
22364f795b25SGiuseppe CAVALLARO 	} else
223738ddc59dSLABBE Corentin 		netdev_info(priv->dev, "No MAC Management Counters available\n");
22381c901a46SGiuseppe CAVALLARO }
22391c901a46SGiuseppe CAVALLARO 
2240732fdf0eSGiuseppe CAVALLARO /**
2241732fdf0eSGiuseppe CAVALLARO  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
224232ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
224319e30c14SGiuseppe CAVALLARO  * Description:
224419e30c14SGiuseppe CAVALLARO  *  new GMAC chip generations have a new register to indicate the
2245e7434821SGiuseppe CAVALLARO  *  presence of the optional feature/functions.
224619e30c14SGiuseppe CAVALLARO  *  This can be also used to override the value passed through the
224719e30c14SGiuseppe CAVALLARO  *  platform and necessary for old MAC10/100 and GMAC chips.
2248e7434821SGiuseppe CAVALLARO  */
2249e7434821SGiuseppe CAVALLARO static int stmmac_get_hw_features(struct stmmac_priv *priv)
2250e7434821SGiuseppe CAVALLARO {
2251a4e887faSJose Abreu 	return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2252e7434821SGiuseppe CAVALLARO }
2253e7434821SGiuseppe CAVALLARO 
225432ceabcaSGiuseppe CAVALLARO /**
2255732fdf0eSGiuseppe CAVALLARO  * stmmac_check_ether_addr - check if the MAC addr is valid
225632ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
225732ceabcaSGiuseppe CAVALLARO  * Description:
225832ceabcaSGiuseppe CAVALLARO  * it is to verify if the MAC address is valid, in case of failures it
225932ceabcaSGiuseppe CAVALLARO  * generates a random MAC address
226032ceabcaSGiuseppe CAVALLARO  */
2261bfab27a1SGiuseppe CAVALLARO static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2262bfab27a1SGiuseppe CAVALLARO {
2263bfab27a1SGiuseppe CAVALLARO 	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2264c10d4c82SJose Abreu 		stmmac_get_umac_addr(priv, priv->hw, priv->dev->dev_addr, 0);
2265bfab27a1SGiuseppe CAVALLARO 		if (!is_valid_ether_addr(priv->dev->dev_addr))
2266f2cedb63SDanny Kukawka 			eth_hw_addr_random(priv->dev);
2267af649352SJisheng Zhang 		dev_info(priv->device, "device MAC address %pM\n",
2268bfab27a1SGiuseppe CAVALLARO 			 priv->dev->dev_addr);
2269bfab27a1SGiuseppe CAVALLARO 	}
2270c88460b7SHans de Goede }
2271bfab27a1SGiuseppe CAVALLARO 
227232ceabcaSGiuseppe CAVALLARO /**
2273732fdf0eSGiuseppe CAVALLARO  * stmmac_init_dma_engine - DMA init.
227432ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
227532ceabcaSGiuseppe CAVALLARO  * Description:
227632ceabcaSGiuseppe CAVALLARO  * It inits the DMA invoking the specific MAC/GMAC callback.
227732ceabcaSGiuseppe CAVALLARO  * Some DMA parameters can be passed from the platform;
227832ceabcaSGiuseppe CAVALLARO  * in case of these are not passed a default is kept for the MAC or GMAC.
227932ceabcaSGiuseppe CAVALLARO  */
22800f1f88a8SGiuseppe CAVALLARO static int stmmac_init_dma_engine(struct stmmac_priv *priv)
22810f1f88a8SGiuseppe CAVALLARO {
228247f2a9ceSJoao Pinto 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
228347f2a9ceSJoao Pinto 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
228424aaed0cSJose Abreu 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
228554139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q;
2286ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q;
228747f2a9ceSJoao Pinto 	u32 chan = 0;
2288c24602efSGiuseppe CAVALLARO 	int atds = 0;
2289495db273SGiuseppe Cavallaro 	int ret = 0;
22900f1f88a8SGiuseppe CAVALLARO 
2291a332e2faSNiklas Cassel 	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2292a332e2faSNiklas Cassel 		dev_err(priv->device, "Invalid DMA configuration\n");
229389ab75bfSNiklas Cassel 		return -EINVAL;
22940f1f88a8SGiuseppe CAVALLARO 	}
22950f1f88a8SGiuseppe CAVALLARO 
2296c24602efSGiuseppe CAVALLARO 	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2297c24602efSGiuseppe CAVALLARO 		atds = 1;
2298c24602efSGiuseppe CAVALLARO 
2299a4e887faSJose Abreu 	ret = stmmac_reset(priv, priv->ioaddr);
2300495db273SGiuseppe Cavallaro 	if (ret) {
2301495db273SGiuseppe Cavallaro 		dev_err(priv->device, "Failed to reset the dma\n");
2302495db273SGiuseppe Cavallaro 		return ret;
2303495db273SGiuseppe Cavallaro 	}
2304495db273SGiuseppe Cavallaro 
23057d9e6c5aSJose Abreu 	/* DMA Configuration */
23067d9e6c5aSJose Abreu 	stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
23077d9e6c5aSJose Abreu 
23087d9e6c5aSJose Abreu 	if (priv->plat->axi)
23097d9e6c5aSJose Abreu 		stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
23107d9e6c5aSJose Abreu 
2311af8f3fb7SWeifeng Voon 	/* DMA CSR Channel configuration */
2312af8f3fb7SWeifeng Voon 	for (chan = 0; chan < dma_csr_ch; chan++)
2313af8f3fb7SWeifeng Voon 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
2314af8f3fb7SWeifeng Voon 
231547f2a9ceSJoao Pinto 	/* DMA RX Channel Configuration */
231647f2a9ceSJoao Pinto 	for (chan = 0; chan < rx_channels_count; chan++) {
231754139cf3SJoao Pinto 		rx_q = &priv->rx_queue[chan];
231854139cf3SJoao Pinto 
231924aaed0cSJose Abreu 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
232024aaed0cSJose Abreu 				    rx_q->dma_rx_phy, chan);
232147f2a9ceSJoao Pinto 
232254139cf3SJoao Pinto 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2323f748be53SAlexandre TORGUE 			    (DMA_RX_SIZE * sizeof(struct dma_desc));
2324a4e887faSJose Abreu 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
2325a4e887faSJose Abreu 				       rx_q->rx_tail_addr, chan);
232647f2a9ceSJoao Pinto 	}
232747f2a9ceSJoao Pinto 
232847f2a9ceSJoao Pinto 	/* DMA TX Channel Configuration */
232947f2a9ceSJoao Pinto 	for (chan = 0; chan < tx_channels_count; chan++) {
2330ce736788SJoao Pinto 		tx_q = &priv->tx_queue[chan];
2331ce736788SJoao Pinto 
233224aaed0cSJose Abreu 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
233324aaed0cSJose Abreu 				    tx_q->dma_tx_phy, chan);
2334f748be53SAlexandre TORGUE 
23350431100bSJose Abreu 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
2336a4e887faSJose Abreu 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
2337a4e887faSJose Abreu 				       tx_q->tx_tail_addr, chan);
233847f2a9ceSJoao Pinto 	}
233924aaed0cSJose Abreu 
2340495db273SGiuseppe Cavallaro 	return ret;
23410f1f88a8SGiuseppe CAVALLARO }
23420f1f88a8SGiuseppe CAVALLARO 
23438fce3331SJose Abreu static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
23448fce3331SJose Abreu {
23458fce3331SJose Abreu 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
23468fce3331SJose Abreu 
23478fce3331SJose Abreu 	mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer));
23488fce3331SJose Abreu }
23498fce3331SJose Abreu 
2350bfab27a1SGiuseppe CAVALLARO /**
2351732fdf0eSGiuseppe CAVALLARO  * stmmac_tx_timer - mitigation sw timer for tx.
23529125cdd1SGiuseppe CAVALLARO  * @data: data pointer
23539125cdd1SGiuseppe CAVALLARO  * Description:
23549125cdd1SGiuseppe CAVALLARO  * This is the timer handler to directly invoke the stmmac_tx_clean.
23559125cdd1SGiuseppe CAVALLARO  */
2356e99e88a9SKees Cook static void stmmac_tx_timer(struct timer_list *t)
23579125cdd1SGiuseppe CAVALLARO {
23588fce3331SJose Abreu 	struct stmmac_tx_queue *tx_q = from_timer(tx_q, t, txtimer);
23598fce3331SJose Abreu 	struct stmmac_priv *priv = tx_q->priv_data;
23608fce3331SJose Abreu 	struct stmmac_channel *ch;
23619125cdd1SGiuseppe CAVALLARO 
23628fce3331SJose Abreu 	ch = &priv->channel[tx_q->queue_index];
23638fce3331SJose Abreu 
2364021bd5e3SJose Abreu 	if (likely(napi_schedule_prep(&ch->tx_napi))) {
2365021bd5e3SJose Abreu 		unsigned long flags;
2366021bd5e3SJose Abreu 
2367021bd5e3SJose Abreu 		spin_lock_irqsave(&ch->lock, flags);
2368021bd5e3SJose Abreu 		stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
2369021bd5e3SJose Abreu 		spin_unlock_irqrestore(&ch->lock, flags);
23704ccb4585SJose Abreu 		__napi_schedule(&ch->tx_napi);
2371021bd5e3SJose Abreu 	}
23729125cdd1SGiuseppe CAVALLARO }
23739125cdd1SGiuseppe CAVALLARO 
23749125cdd1SGiuseppe CAVALLARO /**
2375d429b66eSJose Abreu  * stmmac_init_coalesce - init mitigation options.
237632ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
23779125cdd1SGiuseppe CAVALLARO  * Description:
2378d429b66eSJose Abreu  * This inits the coalesce parameters: i.e. timer rate,
23799125cdd1SGiuseppe CAVALLARO  * timer handler and default threshold used for enabling the
23809125cdd1SGiuseppe CAVALLARO  * interrupt on completion bit.
23819125cdd1SGiuseppe CAVALLARO  */
2382d429b66eSJose Abreu static void stmmac_init_coalesce(struct stmmac_priv *priv)
23839125cdd1SGiuseppe CAVALLARO {
23848fce3331SJose Abreu 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
23858fce3331SJose Abreu 	u32 chan;
23868fce3331SJose Abreu 
23879125cdd1SGiuseppe CAVALLARO 	priv->tx_coal_frames = STMMAC_TX_FRAMES;
23889125cdd1SGiuseppe CAVALLARO 	priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
2389d429b66eSJose Abreu 	priv->rx_coal_frames = STMMAC_RX_FRAMES;
23908fce3331SJose Abreu 
23918fce3331SJose Abreu 	for (chan = 0; chan < tx_channel_count; chan++) {
23928fce3331SJose Abreu 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
23938fce3331SJose Abreu 
23948fce3331SJose Abreu 		timer_setup(&tx_q->txtimer, stmmac_tx_timer, 0);
23958fce3331SJose Abreu 	}
23969125cdd1SGiuseppe CAVALLARO }
23979125cdd1SGiuseppe CAVALLARO 
23984854ab99SJoao Pinto static void stmmac_set_rings_length(struct stmmac_priv *priv)
23994854ab99SJoao Pinto {
24004854ab99SJoao Pinto 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
24014854ab99SJoao Pinto 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
24024854ab99SJoao Pinto 	u32 chan;
24034854ab99SJoao Pinto 
24044854ab99SJoao Pinto 	/* set TX ring length */
24054854ab99SJoao Pinto 	for (chan = 0; chan < tx_channels_count; chan++)
2406a4e887faSJose Abreu 		stmmac_set_tx_ring_len(priv, priv->ioaddr,
24074854ab99SJoao Pinto 				(DMA_TX_SIZE - 1), chan);
24084854ab99SJoao Pinto 
24094854ab99SJoao Pinto 	/* set RX ring length */
24104854ab99SJoao Pinto 	for (chan = 0; chan < rx_channels_count; chan++)
2411a4e887faSJose Abreu 		stmmac_set_rx_ring_len(priv, priv->ioaddr,
24124854ab99SJoao Pinto 				(DMA_RX_SIZE - 1), chan);
24134854ab99SJoao Pinto }
24144854ab99SJoao Pinto 
24159125cdd1SGiuseppe CAVALLARO /**
24166a3a7193SJoao Pinto  *  stmmac_set_tx_queue_weight - Set TX queue weight
24176a3a7193SJoao Pinto  *  @priv: driver private structure
24186a3a7193SJoao Pinto  *  Description: It is used for setting TX queues weight
24196a3a7193SJoao Pinto  */
24206a3a7193SJoao Pinto static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
24216a3a7193SJoao Pinto {
24226a3a7193SJoao Pinto 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
24236a3a7193SJoao Pinto 	u32 weight;
24246a3a7193SJoao Pinto 	u32 queue;
24256a3a7193SJoao Pinto 
24266a3a7193SJoao Pinto 	for (queue = 0; queue < tx_queues_count; queue++) {
24276a3a7193SJoao Pinto 		weight = priv->plat->tx_queues_cfg[queue].weight;
2428c10d4c82SJose Abreu 		stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
24296a3a7193SJoao Pinto 	}
24306a3a7193SJoao Pinto }
24316a3a7193SJoao Pinto 
24326a3a7193SJoao Pinto /**
243319d91873SJoao Pinto  *  stmmac_configure_cbs - Configure CBS in TX queue
243419d91873SJoao Pinto  *  @priv: driver private structure
243519d91873SJoao Pinto  *  Description: It is used for configuring CBS in AVB TX queues
243619d91873SJoao Pinto  */
243719d91873SJoao Pinto static void stmmac_configure_cbs(struct stmmac_priv *priv)
243819d91873SJoao Pinto {
243919d91873SJoao Pinto 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
244019d91873SJoao Pinto 	u32 mode_to_use;
244119d91873SJoao Pinto 	u32 queue;
244219d91873SJoao Pinto 
244344781fefSJoao Pinto 	/* queue 0 is reserved for legacy traffic */
244444781fefSJoao Pinto 	for (queue = 1; queue < tx_queues_count; queue++) {
244519d91873SJoao Pinto 		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
244619d91873SJoao Pinto 		if (mode_to_use == MTL_QUEUE_DCB)
244719d91873SJoao Pinto 			continue;
244819d91873SJoao Pinto 
2449c10d4c82SJose Abreu 		stmmac_config_cbs(priv, priv->hw,
245019d91873SJoao Pinto 				priv->plat->tx_queues_cfg[queue].send_slope,
245119d91873SJoao Pinto 				priv->plat->tx_queues_cfg[queue].idle_slope,
245219d91873SJoao Pinto 				priv->plat->tx_queues_cfg[queue].high_credit,
245319d91873SJoao Pinto 				priv->plat->tx_queues_cfg[queue].low_credit,
245419d91873SJoao Pinto 				queue);
245519d91873SJoao Pinto 	}
245619d91873SJoao Pinto }
245719d91873SJoao Pinto 
245819d91873SJoao Pinto /**
2459d43042f4SJoao Pinto  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
2460d43042f4SJoao Pinto  *  @priv: driver private structure
2461d43042f4SJoao Pinto  *  Description: It is used for mapping RX queues to RX dma channels
2462d43042f4SJoao Pinto  */
2463d43042f4SJoao Pinto static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
2464d43042f4SJoao Pinto {
2465d43042f4SJoao Pinto 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2466d43042f4SJoao Pinto 	u32 queue;
2467d43042f4SJoao Pinto 	u32 chan;
2468d43042f4SJoao Pinto 
2469d43042f4SJoao Pinto 	for (queue = 0; queue < rx_queues_count; queue++) {
2470d43042f4SJoao Pinto 		chan = priv->plat->rx_queues_cfg[queue].chan;
2471c10d4c82SJose Abreu 		stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
2472d43042f4SJoao Pinto 	}
2473d43042f4SJoao Pinto }
2474d43042f4SJoao Pinto 
2475d43042f4SJoao Pinto /**
2476a8f5102aSJoao Pinto  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
2477a8f5102aSJoao Pinto  *  @priv: driver private structure
2478a8f5102aSJoao Pinto  *  Description: It is used for configuring the RX Queue Priority
2479a8f5102aSJoao Pinto  */
2480a8f5102aSJoao Pinto static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
2481a8f5102aSJoao Pinto {
2482a8f5102aSJoao Pinto 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2483a8f5102aSJoao Pinto 	u32 queue;
2484a8f5102aSJoao Pinto 	u32 prio;
2485a8f5102aSJoao Pinto 
2486a8f5102aSJoao Pinto 	for (queue = 0; queue < rx_queues_count; queue++) {
2487a8f5102aSJoao Pinto 		if (!priv->plat->rx_queues_cfg[queue].use_prio)
2488a8f5102aSJoao Pinto 			continue;
2489a8f5102aSJoao Pinto 
2490a8f5102aSJoao Pinto 		prio = priv->plat->rx_queues_cfg[queue].prio;
2491c10d4c82SJose Abreu 		stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
2492a8f5102aSJoao Pinto 	}
2493a8f5102aSJoao Pinto }
2494a8f5102aSJoao Pinto 
2495a8f5102aSJoao Pinto /**
2496a8f5102aSJoao Pinto  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
2497a8f5102aSJoao Pinto  *  @priv: driver private structure
2498a8f5102aSJoao Pinto  *  Description: It is used for configuring the TX Queue Priority
2499a8f5102aSJoao Pinto  */
2500a8f5102aSJoao Pinto static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
2501a8f5102aSJoao Pinto {
2502a8f5102aSJoao Pinto 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2503a8f5102aSJoao Pinto 	u32 queue;
2504a8f5102aSJoao Pinto 	u32 prio;
2505a8f5102aSJoao Pinto 
2506a8f5102aSJoao Pinto 	for (queue = 0; queue < tx_queues_count; queue++) {
2507a8f5102aSJoao Pinto 		if (!priv->plat->tx_queues_cfg[queue].use_prio)
2508a8f5102aSJoao Pinto 			continue;
2509a8f5102aSJoao Pinto 
2510a8f5102aSJoao Pinto 		prio = priv->plat->tx_queues_cfg[queue].prio;
2511c10d4c82SJose Abreu 		stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
2512a8f5102aSJoao Pinto 	}
2513a8f5102aSJoao Pinto }
2514a8f5102aSJoao Pinto 
2515a8f5102aSJoao Pinto /**
2516abe80fdcSJoao Pinto  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
2517abe80fdcSJoao Pinto  *  @priv: driver private structure
2518abe80fdcSJoao Pinto  *  Description: It is used for configuring the RX queue routing
2519abe80fdcSJoao Pinto  */
2520abe80fdcSJoao Pinto static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
2521abe80fdcSJoao Pinto {
2522abe80fdcSJoao Pinto 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2523abe80fdcSJoao Pinto 	u32 queue;
2524abe80fdcSJoao Pinto 	u8 packet;
2525abe80fdcSJoao Pinto 
2526abe80fdcSJoao Pinto 	for (queue = 0; queue < rx_queues_count; queue++) {
2527abe80fdcSJoao Pinto 		/* no specific packet type routing specified for the queue */
2528abe80fdcSJoao Pinto 		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
2529abe80fdcSJoao Pinto 			continue;
2530abe80fdcSJoao Pinto 
2531abe80fdcSJoao Pinto 		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
2532c10d4c82SJose Abreu 		stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
2533abe80fdcSJoao Pinto 	}
2534abe80fdcSJoao Pinto }
2535abe80fdcSJoao Pinto 
253676067459SJose Abreu static void stmmac_mac_config_rss(struct stmmac_priv *priv)
253776067459SJose Abreu {
253876067459SJose Abreu 	if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
253976067459SJose Abreu 		priv->rss.enable = false;
254076067459SJose Abreu 		return;
254176067459SJose Abreu 	}
254276067459SJose Abreu 
254376067459SJose Abreu 	if (priv->dev->features & NETIF_F_RXHASH)
254476067459SJose Abreu 		priv->rss.enable = true;
254576067459SJose Abreu 	else
254676067459SJose Abreu 		priv->rss.enable = false;
254776067459SJose Abreu 
254876067459SJose Abreu 	stmmac_rss_configure(priv, priv->hw, &priv->rss,
254976067459SJose Abreu 			     priv->plat->rx_queues_to_use);
255076067459SJose Abreu }
255176067459SJose Abreu 
2552abe80fdcSJoao Pinto /**
2553d0a9c9f9SJoao Pinto  *  stmmac_mtl_configuration - Configure MTL
2554d0a9c9f9SJoao Pinto  *  @priv: driver private structure
2555d0a9c9f9SJoao Pinto  *  Description: It is used for configurring MTL
2556d0a9c9f9SJoao Pinto  */
2557d0a9c9f9SJoao Pinto static void stmmac_mtl_configuration(struct stmmac_priv *priv)
2558d0a9c9f9SJoao Pinto {
2559d0a9c9f9SJoao Pinto 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2560d0a9c9f9SJoao Pinto 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2561d0a9c9f9SJoao Pinto 
2562c10d4c82SJose Abreu 	if (tx_queues_count > 1)
25636a3a7193SJoao Pinto 		stmmac_set_tx_queue_weight(priv);
25646a3a7193SJoao Pinto 
2565d0a9c9f9SJoao Pinto 	/* Configure MTL RX algorithms */
2566c10d4c82SJose Abreu 	if (rx_queues_count > 1)
2567c10d4c82SJose Abreu 		stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
2568d0a9c9f9SJoao Pinto 				priv->plat->rx_sched_algorithm);
2569d0a9c9f9SJoao Pinto 
2570d0a9c9f9SJoao Pinto 	/* Configure MTL TX algorithms */
2571c10d4c82SJose Abreu 	if (tx_queues_count > 1)
2572c10d4c82SJose Abreu 		stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
2573d0a9c9f9SJoao Pinto 				priv->plat->tx_sched_algorithm);
2574d0a9c9f9SJoao Pinto 
257519d91873SJoao Pinto 	/* Configure CBS in AVB TX queues */
2576c10d4c82SJose Abreu 	if (tx_queues_count > 1)
257719d91873SJoao Pinto 		stmmac_configure_cbs(priv);
257819d91873SJoao Pinto 
2579d43042f4SJoao Pinto 	/* Map RX MTL to DMA channels */
2580d43042f4SJoao Pinto 	stmmac_rx_queue_dma_chan_map(priv);
2581d43042f4SJoao Pinto 
2582d0a9c9f9SJoao Pinto 	/* Enable MAC RX Queues */
2583d0a9c9f9SJoao Pinto 	stmmac_mac_enable_rx_queues(priv);
25846deee222SJoao Pinto 
2585a8f5102aSJoao Pinto 	/* Set RX priorities */
2586c10d4c82SJose Abreu 	if (rx_queues_count > 1)
2587a8f5102aSJoao Pinto 		stmmac_mac_config_rx_queues_prio(priv);
2588a8f5102aSJoao Pinto 
2589a8f5102aSJoao Pinto 	/* Set TX priorities */
2590c10d4c82SJose Abreu 	if (tx_queues_count > 1)
2591a8f5102aSJoao Pinto 		stmmac_mac_config_tx_queues_prio(priv);
2592abe80fdcSJoao Pinto 
2593abe80fdcSJoao Pinto 	/* Set RX routing */
2594c10d4c82SJose Abreu 	if (rx_queues_count > 1)
2595abe80fdcSJoao Pinto 		stmmac_mac_config_rx_queues_routing(priv);
259676067459SJose Abreu 
259776067459SJose Abreu 	/* Receive Side Scaling */
259876067459SJose Abreu 	if (rx_queues_count > 1)
259976067459SJose Abreu 		stmmac_mac_config_rss(priv);
2600d0a9c9f9SJoao Pinto }
2601d0a9c9f9SJoao Pinto 
26028bf993a5SJose Abreu static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
26038bf993a5SJose Abreu {
2604c10d4c82SJose Abreu 	if (priv->dma_cap.asp) {
26058bf993a5SJose Abreu 		netdev_info(priv->dev, "Enabling Safety Features\n");
2606c10d4c82SJose Abreu 		stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp);
26078bf993a5SJose Abreu 	} else {
26088bf993a5SJose Abreu 		netdev_info(priv->dev, "No Safety Features support found\n");
26098bf993a5SJose Abreu 	}
26108bf993a5SJose Abreu }
26118bf993a5SJose Abreu 
2612d0a9c9f9SJoao Pinto /**
2613732fdf0eSGiuseppe CAVALLARO  * stmmac_hw_setup - setup mac in a usable state.
2614523f11b5SSrinivas Kandagatla  *  @dev : pointer to the device structure.
2615523f11b5SSrinivas Kandagatla  *  Description:
2616732fdf0eSGiuseppe CAVALLARO  *  this is the main function to setup the HW in a usable state because the
2617732fdf0eSGiuseppe CAVALLARO  *  dma engine is reset, the core registers are configured (e.g. AXI,
2618732fdf0eSGiuseppe CAVALLARO  *  Checksum features, timers). The DMA is ready to start receiving and
2619732fdf0eSGiuseppe CAVALLARO  *  transmitting.
2620523f11b5SSrinivas Kandagatla  *  Return value:
2621523f11b5SSrinivas Kandagatla  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2622523f11b5SSrinivas Kandagatla  *  file on failure.
2623523f11b5SSrinivas Kandagatla  */
2624fe131929SHuacai Chen static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
2625523f11b5SSrinivas Kandagatla {
2626523f11b5SSrinivas Kandagatla 	struct stmmac_priv *priv = netdev_priv(dev);
26273c55d4d0SJoao Pinto 	u32 rx_cnt = priv->plat->rx_queues_to_use;
2628146617b8SJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
2629146617b8SJoao Pinto 	u32 chan;
2630523f11b5SSrinivas Kandagatla 	int ret;
2631523f11b5SSrinivas Kandagatla 
2632523f11b5SSrinivas Kandagatla 	/* DMA initialization and SW reset */
2633523f11b5SSrinivas Kandagatla 	ret = stmmac_init_dma_engine(priv);
2634523f11b5SSrinivas Kandagatla 	if (ret < 0) {
263538ddc59dSLABBE Corentin 		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
263638ddc59dSLABBE Corentin 			   __func__);
2637523f11b5SSrinivas Kandagatla 		return ret;
2638523f11b5SSrinivas Kandagatla 	}
2639523f11b5SSrinivas Kandagatla 
2640523f11b5SSrinivas Kandagatla 	/* Copy the MAC addr into the HW  */
2641c10d4c82SJose Abreu 	stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
2642523f11b5SSrinivas Kandagatla 
264302e57b9dSGiuseppe CAVALLARO 	/* PS and related bits will be programmed according to the speed */
264402e57b9dSGiuseppe CAVALLARO 	if (priv->hw->pcs) {
264502e57b9dSGiuseppe CAVALLARO 		int speed = priv->plat->mac_port_sel_speed;
264602e57b9dSGiuseppe CAVALLARO 
264702e57b9dSGiuseppe CAVALLARO 		if ((speed == SPEED_10) || (speed == SPEED_100) ||
264802e57b9dSGiuseppe CAVALLARO 		    (speed == SPEED_1000)) {
264902e57b9dSGiuseppe CAVALLARO 			priv->hw->ps = speed;
265002e57b9dSGiuseppe CAVALLARO 		} else {
265102e57b9dSGiuseppe CAVALLARO 			dev_warn(priv->device, "invalid port speed\n");
265202e57b9dSGiuseppe CAVALLARO 			priv->hw->ps = 0;
265302e57b9dSGiuseppe CAVALLARO 		}
265402e57b9dSGiuseppe CAVALLARO 	}
265502e57b9dSGiuseppe CAVALLARO 
2656523f11b5SSrinivas Kandagatla 	/* Initialize the MAC Core */
2657c10d4c82SJose Abreu 	stmmac_core_init(priv, priv->hw, dev);
2658523f11b5SSrinivas Kandagatla 
2659d0a9c9f9SJoao Pinto 	/* Initialize MTL*/
2660d0a9c9f9SJoao Pinto 	stmmac_mtl_configuration(priv);
26619eb12474Sjpinto 
26628bf993a5SJose Abreu 	/* Initialize Safety Features */
26638bf993a5SJose Abreu 	stmmac_safety_feat_configuration(priv);
26648bf993a5SJose Abreu 
2665c10d4c82SJose Abreu 	ret = stmmac_rx_ipc(priv, priv->hw);
2666978aded4SGiuseppe CAVALLARO 	if (!ret) {
266738ddc59dSLABBE Corentin 		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
2668978aded4SGiuseppe CAVALLARO 		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
2669d2afb5bdSGiuseppe CAVALLARO 		priv->hw->rx_csum = 0;
2670978aded4SGiuseppe CAVALLARO 	}
2671978aded4SGiuseppe CAVALLARO 
2672523f11b5SSrinivas Kandagatla 	/* Enable the MAC Rx/Tx */
2673c10d4c82SJose Abreu 	stmmac_mac_set(priv, priv->ioaddr, true);
2674523f11b5SSrinivas Kandagatla 
2675b4f0a661SJoao Pinto 	/* Set the HW DMA mode and the COE */
2676b4f0a661SJoao Pinto 	stmmac_dma_operation_mode(priv);
2677b4f0a661SJoao Pinto 
2678523f11b5SSrinivas Kandagatla 	stmmac_mmc_setup(priv);
2679523f11b5SSrinivas Kandagatla 
2680fe131929SHuacai Chen 	if (init_ptp) {
26810ad2be79SThierry Reding 		ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
26820ad2be79SThierry Reding 		if (ret < 0)
26830ad2be79SThierry Reding 			netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
26840ad2be79SThierry Reding 
2685523f11b5SSrinivas Kandagatla 		ret = stmmac_init_ptp(priv);
2686722eef28SHeiner Kallweit 		if (ret == -EOPNOTSUPP)
2687722eef28SHeiner Kallweit 			netdev_warn(priv->dev, "PTP not supported by HW\n");
2688722eef28SHeiner Kallweit 		else if (ret)
2689722eef28SHeiner Kallweit 			netdev_warn(priv->dev, "PTP init failed\n");
2690fe131929SHuacai Chen 	}
2691523f11b5SSrinivas Kandagatla 
2692523f11b5SSrinivas Kandagatla 	priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
2693523f11b5SSrinivas Kandagatla 
2694a4e887faSJose Abreu 	if (priv->use_riwt) {
26954e4337ccSJose Abreu 		if (!priv->rx_riwt)
26964e4337ccSJose Abreu 			priv->rx_riwt = DEF_DMA_RIWT;
26974e4337ccSJose Abreu 
26984e4337ccSJose Abreu 		ret = stmmac_rx_watchdog(priv, priv->ioaddr, priv->rx_riwt, rx_cnt);
2699523f11b5SSrinivas Kandagatla 	}
2700523f11b5SSrinivas Kandagatla 
2701c10d4c82SJose Abreu 	if (priv->hw->pcs)
2702c9ad4c10SBen Dooks (Codethink) 		stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
2703523f11b5SSrinivas Kandagatla 
27044854ab99SJoao Pinto 	/* set TX and RX rings length */
27054854ab99SJoao Pinto 	stmmac_set_rings_length(priv);
27064854ab99SJoao Pinto 
2707f748be53SAlexandre TORGUE 	/* Enable TSO */
2708146617b8SJoao Pinto 	if (priv->tso) {
2709146617b8SJoao Pinto 		for (chan = 0; chan < tx_cnt; chan++)
2710a4e887faSJose Abreu 			stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
2711146617b8SJoao Pinto 	}
2712f748be53SAlexandre TORGUE 
271367afd6d1SJose Abreu 	/* Enable Split Header */
271467afd6d1SJose Abreu 	if (priv->sph && priv->hw->rx_csum) {
271567afd6d1SJose Abreu 		for (chan = 0; chan < rx_cnt; chan++)
271667afd6d1SJose Abreu 			stmmac_enable_sph(priv, priv->ioaddr, 1, chan);
271767afd6d1SJose Abreu 	}
271867afd6d1SJose Abreu 
271930d93227SJose Abreu 	/* VLAN Tag Insertion */
272030d93227SJose Abreu 	if (priv->dma_cap.vlins)
272130d93227SJose Abreu 		stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
272230d93227SJose Abreu 
2723579a25a8SJose Abreu 	/* TBS */
2724579a25a8SJose Abreu 	for (chan = 0; chan < tx_cnt; chan++) {
2725579a25a8SJose Abreu 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2726579a25a8SJose Abreu 		int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
2727579a25a8SJose Abreu 
2728579a25a8SJose Abreu 		stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
2729579a25a8SJose Abreu 	}
2730579a25a8SJose Abreu 
27317d9e6c5aSJose Abreu 	/* Start the ball rolling... */
27327d9e6c5aSJose Abreu 	stmmac_start_all_dma(priv);
27337d9e6c5aSJose Abreu 
2734523f11b5SSrinivas Kandagatla 	return 0;
2735523f11b5SSrinivas Kandagatla }
2736523f11b5SSrinivas Kandagatla 
2737c66f6c37SThierry Reding static void stmmac_hw_teardown(struct net_device *dev)
2738c66f6c37SThierry Reding {
2739c66f6c37SThierry Reding 	struct stmmac_priv *priv = netdev_priv(dev);
2740c66f6c37SThierry Reding 
2741c66f6c37SThierry Reding 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
2742c66f6c37SThierry Reding }
2743c66f6c37SThierry Reding 
2744523f11b5SSrinivas Kandagatla /**
27457ac6653aSJeff Kirsher  *  stmmac_open - open entry point of the driver
27467ac6653aSJeff Kirsher  *  @dev : pointer to the device structure.
27477ac6653aSJeff Kirsher  *  Description:
27487ac6653aSJeff Kirsher  *  This function is the open entry point of the driver.
27497ac6653aSJeff Kirsher  *  Return value:
27507ac6653aSJeff Kirsher  *  0 on success and an appropriate (-)ve integer as defined in errno.h
27517ac6653aSJeff Kirsher  *  file on failure.
27527ac6653aSJeff Kirsher  */
27537ac6653aSJeff Kirsher static int stmmac_open(struct net_device *dev)
27547ac6653aSJeff Kirsher {
27557ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
27565d626c87SJose Abreu 	int bfsize = 0;
27578fce3331SJose Abreu 	u32 chan;
27587ac6653aSJeff Kirsher 	int ret;
27597ac6653aSJeff Kirsher 
2760a47b9e15SDejin Zheng 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
2761f213bbe8SJose Abreu 	    priv->hw->pcs != STMMAC_PCS_RTBI &&
2762f213bbe8SJose Abreu 	    priv->hw->xpcs == NULL) {
27637ac6653aSJeff Kirsher 		ret = stmmac_init_phy(dev);
2764e58bb43fSGiuseppe CAVALLARO 		if (ret) {
276538ddc59dSLABBE Corentin 			netdev_err(priv->dev,
276638ddc59dSLABBE Corentin 				   "%s: Cannot attach to PHY (error: %d)\n",
2767e58bb43fSGiuseppe CAVALLARO 				   __func__, ret);
276889df20d9SHans de Goede 			return ret;
27697ac6653aSJeff Kirsher 		}
2770e58bb43fSGiuseppe CAVALLARO 	}
27717ac6653aSJeff Kirsher 
2772523f11b5SSrinivas Kandagatla 	/* Extra statistics */
2773523f11b5SSrinivas Kandagatla 	memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
2774523f11b5SSrinivas Kandagatla 	priv->xstats.threshold = tc;
2775523f11b5SSrinivas Kandagatla 
27765d626c87SJose Abreu 	bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
27775d626c87SJose Abreu 	if (bfsize < 0)
27785d626c87SJose Abreu 		bfsize = 0;
27795d626c87SJose Abreu 
27805d626c87SJose Abreu 	if (bfsize < BUF_SIZE_16KiB)
27815d626c87SJose Abreu 		bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
27825d626c87SJose Abreu 
27835d626c87SJose Abreu 	priv->dma_buf_sz = bfsize;
27845d626c87SJose Abreu 	buf_sz = bfsize;
27855d626c87SJose Abreu 
278622ad3838SGiuseppe Cavallaro 	priv->rx_copybreak = STMMAC_RX_COPYBREAK;
278756329137SBartlomiej Zolnierkiewicz 
2788579a25a8SJose Abreu 	/* Earlier check for TBS */
2789579a25a8SJose Abreu 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
2790579a25a8SJose Abreu 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2791579a25a8SJose Abreu 		int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
2792579a25a8SJose Abreu 
2793579a25a8SJose Abreu 		tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
2794579a25a8SJose Abreu 		if (stmmac_enable_tbs(priv, priv->ioaddr, tbs_en, chan))
2795579a25a8SJose Abreu 			tx_q->tbs &= ~STMMAC_TBS_AVAIL;
2796579a25a8SJose Abreu 	}
2797579a25a8SJose Abreu 
27985bacd778SLABBE Corentin 	ret = alloc_dma_desc_resources(priv);
27995bacd778SLABBE Corentin 	if (ret < 0) {
28005bacd778SLABBE Corentin 		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
28015bacd778SLABBE Corentin 			   __func__);
28025bacd778SLABBE Corentin 		goto dma_desc_error;
28035bacd778SLABBE Corentin 	}
28045bacd778SLABBE Corentin 
28055bacd778SLABBE Corentin 	ret = init_dma_desc_rings(dev, GFP_KERNEL);
28065bacd778SLABBE Corentin 	if (ret < 0) {
28075bacd778SLABBE Corentin 		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
28085bacd778SLABBE Corentin 			   __func__);
28095bacd778SLABBE Corentin 		goto init_error;
28105bacd778SLABBE Corentin 	}
28115bacd778SLABBE Corentin 
2812fe131929SHuacai Chen 	ret = stmmac_hw_setup(dev, true);
281356329137SBartlomiej Zolnierkiewicz 	if (ret < 0) {
281438ddc59dSLABBE Corentin 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
2815c9324d18SGiuseppe CAVALLARO 		goto init_error;
28167ac6653aSJeff Kirsher 	}
28177ac6653aSJeff Kirsher 
2818d429b66eSJose Abreu 	stmmac_init_coalesce(priv);
2819777da230SGiuseppe CAVALLARO 
282074371272SJose Abreu 	phylink_start(priv->phylink);
28217ac6653aSJeff Kirsher 
28227ac6653aSJeff Kirsher 	/* Request the IRQ lines */
28237ac6653aSJeff Kirsher 	ret = request_irq(dev->irq, stmmac_interrupt,
28247ac6653aSJeff Kirsher 			  IRQF_SHARED, dev->name, dev);
28257ac6653aSJeff Kirsher 	if (unlikely(ret < 0)) {
282638ddc59dSLABBE Corentin 		netdev_err(priv->dev,
282738ddc59dSLABBE Corentin 			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
28287ac6653aSJeff Kirsher 			   __func__, dev->irq, ret);
28296c1e5abeSThierry Reding 		goto irq_error;
28307ac6653aSJeff Kirsher 	}
28317ac6653aSJeff Kirsher 
28327a13f8f5SFrancesco Virlinzi 	/* Request the Wake IRQ in case of another line is used for WoL */
28337a13f8f5SFrancesco Virlinzi 	if (priv->wol_irq != dev->irq) {
28347a13f8f5SFrancesco Virlinzi 		ret = request_irq(priv->wol_irq, stmmac_interrupt,
28357a13f8f5SFrancesco Virlinzi 				  IRQF_SHARED, dev->name, dev);
28367a13f8f5SFrancesco Virlinzi 		if (unlikely(ret < 0)) {
283738ddc59dSLABBE Corentin 			netdev_err(priv->dev,
283838ddc59dSLABBE Corentin 				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
2839ceb69499SGiuseppe CAVALLARO 				   __func__, priv->wol_irq, ret);
2840c9324d18SGiuseppe CAVALLARO 			goto wolirq_error;
28417a13f8f5SFrancesco Virlinzi 		}
28427a13f8f5SFrancesco Virlinzi 	}
28437a13f8f5SFrancesco Virlinzi 
2844d765955dSGiuseppe CAVALLARO 	/* Request the IRQ lines */
2845d7ec8584SChen-Yu Tsai 	if (priv->lpi_irq > 0) {
2846d765955dSGiuseppe CAVALLARO 		ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
2847d765955dSGiuseppe CAVALLARO 				  dev->name, dev);
2848d765955dSGiuseppe CAVALLARO 		if (unlikely(ret < 0)) {
284938ddc59dSLABBE Corentin 			netdev_err(priv->dev,
285038ddc59dSLABBE Corentin 				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
2851d765955dSGiuseppe CAVALLARO 				   __func__, priv->lpi_irq, ret);
2852c9324d18SGiuseppe CAVALLARO 			goto lpiirq_error;
2853d765955dSGiuseppe CAVALLARO 		}
2854d765955dSGiuseppe CAVALLARO 	}
2855d765955dSGiuseppe CAVALLARO 
2856c22a3f48SJoao Pinto 	stmmac_enable_all_queues(priv);
2857c22a3f48SJoao Pinto 	stmmac_start_all_queues(priv);
28587ac6653aSJeff Kirsher 
28597ac6653aSJeff Kirsher 	return 0;
28607ac6653aSJeff Kirsher 
2861c9324d18SGiuseppe CAVALLARO lpiirq_error:
2862d765955dSGiuseppe CAVALLARO 	if (priv->wol_irq != dev->irq)
2863d765955dSGiuseppe CAVALLARO 		free_irq(priv->wol_irq, dev);
2864c9324d18SGiuseppe CAVALLARO wolirq_error:
28657a13f8f5SFrancesco Virlinzi 	free_irq(dev->irq, dev);
28666c1e5abeSThierry Reding irq_error:
286774371272SJose Abreu 	phylink_stop(priv->phylink);
28687a13f8f5SFrancesco Virlinzi 
28698fce3331SJose Abreu 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
28708fce3331SJose Abreu 		del_timer_sync(&priv->tx_queue[chan].txtimer);
28718fce3331SJose Abreu 
2872c66f6c37SThierry Reding 	stmmac_hw_teardown(dev);
2873c9324d18SGiuseppe CAVALLARO init_error:
2874c9324d18SGiuseppe CAVALLARO 	free_dma_desc_resources(priv);
28755bacd778SLABBE Corentin dma_desc_error:
287674371272SJose Abreu 	phylink_disconnect_phy(priv->phylink);
28777ac6653aSJeff Kirsher 	return ret;
28787ac6653aSJeff Kirsher }
28797ac6653aSJeff Kirsher 
28807ac6653aSJeff Kirsher /**
28817ac6653aSJeff Kirsher  *  stmmac_release - close entry point of the driver
28827ac6653aSJeff Kirsher  *  @dev : device pointer.
28837ac6653aSJeff Kirsher  *  Description:
28847ac6653aSJeff Kirsher  *  This is the stop entry point of the driver.
28857ac6653aSJeff Kirsher  */
28867ac6653aSJeff Kirsher static int stmmac_release(struct net_device *dev)
28877ac6653aSJeff Kirsher {
28887ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
28898fce3331SJose Abreu 	u32 chan;
28907ac6653aSJeff Kirsher 
2891d765955dSGiuseppe CAVALLARO 	if (priv->eee_enabled)
2892d765955dSGiuseppe CAVALLARO 		del_timer_sync(&priv->eee_ctrl_timer);
2893d765955dSGiuseppe CAVALLARO 
28947ac6653aSJeff Kirsher 	/* Stop and disconnect the PHY */
289574371272SJose Abreu 	phylink_stop(priv->phylink);
289674371272SJose Abreu 	phylink_disconnect_phy(priv->phylink);
28977ac6653aSJeff Kirsher 
2898c22a3f48SJoao Pinto 	stmmac_stop_all_queues(priv);
28997ac6653aSJeff Kirsher 
2900c22a3f48SJoao Pinto 	stmmac_disable_all_queues(priv);
29017ac6653aSJeff Kirsher 
29028fce3331SJose Abreu 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
29038fce3331SJose Abreu 		del_timer_sync(&priv->tx_queue[chan].txtimer);
29049125cdd1SGiuseppe CAVALLARO 
29057ac6653aSJeff Kirsher 	/* Free the IRQ lines */
29067ac6653aSJeff Kirsher 	free_irq(dev->irq, dev);
29077a13f8f5SFrancesco Virlinzi 	if (priv->wol_irq != dev->irq)
29087a13f8f5SFrancesco Virlinzi 		free_irq(priv->wol_irq, dev);
2909d7ec8584SChen-Yu Tsai 	if (priv->lpi_irq > 0)
2910d765955dSGiuseppe CAVALLARO 		free_irq(priv->lpi_irq, dev);
29117ac6653aSJeff Kirsher 
29127ac6653aSJeff Kirsher 	/* Stop TX/RX DMA and clear the descriptors */
2913ae4f0d46SJoao Pinto 	stmmac_stop_all_dma(priv);
29147ac6653aSJeff Kirsher 
29157ac6653aSJeff Kirsher 	/* Release and free the Rx/Tx resources */
29167ac6653aSJeff Kirsher 	free_dma_desc_resources(priv);
29177ac6653aSJeff Kirsher 
29187ac6653aSJeff Kirsher 	/* Disable the MAC Rx/Tx */
2919c10d4c82SJose Abreu 	stmmac_mac_set(priv, priv->ioaddr, false);
29207ac6653aSJeff Kirsher 
29217ac6653aSJeff Kirsher 	netif_carrier_off(dev);
29227ac6653aSJeff Kirsher 
292392ba6888SRayagond Kokatanur 	stmmac_release_ptp(priv);
292492ba6888SRayagond Kokatanur 
29257ac6653aSJeff Kirsher 	return 0;
29267ac6653aSJeff Kirsher }
29277ac6653aSJeff Kirsher 
292830d93227SJose Abreu static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
292930d93227SJose Abreu 			       struct stmmac_tx_queue *tx_q)
293030d93227SJose Abreu {
293130d93227SJose Abreu 	u16 tag = 0x0, inner_tag = 0x0;
293230d93227SJose Abreu 	u32 inner_type = 0x0;
293330d93227SJose Abreu 	struct dma_desc *p;
293430d93227SJose Abreu 
293530d93227SJose Abreu 	if (!priv->dma_cap.vlins)
293630d93227SJose Abreu 		return false;
293730d93227SJose Abreu 	if (!skb_vlan_tag_present(skb))
293830d93227SJose Abreu 		return false;
293930d93227SJose Abreu 	if (skb->vlan_proto == htons(ETH_P_8021AD)) {
294030d93227SJose Abreu 		inner_tag = skb_vlan_tag_get(skb);
294130d93227SJose Abreu 		inner_type = STMMAC_VLAN_INSERT;
294230d93227SJose Abreu 	}
294330d93227SJose Abreu 
294430d93227SJose Abreu 	tag = skb_vlan_tag_get(skb);
294530d93227SJose Abreu 
2946579a25a8SJose Abreu 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
2947579a25a8SJose Abreu 		p = &tx_q->dma_entx[tx_q->cur_tx].basic;
2948579a25a8SJose Abreu 	else
2949579a25a8SJose Abreu 		p = &tx_q->dma_tx[tx_q->cur_tx];
2950579a25a8SJose Abreu 
295130d93227SJose Abreu 	if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
295230d93227SJose Abreu 		return false;
295330d93227SJose Abreu 
295430d93227SJose Abreu 	stmmac_set_tx_owner(priv, p);
295530d93227SJose Abreu 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
295630d93227SJose Abreu 	return true;
295730d93227SJose Abreu }
295830d93227SJose Abreu 
29597ac6653aSJeff Kirsher /**
2960f748be53SAlexandre TORGUE  *  stmmac_tso_allocator - close entry point of the driver
2961f748be53SAlexandre TORGUE  *  @priv: driver private structure
2962f748be53SAlexandre TORGUE  *  @des: buffer start address
2963f748be53SAlexandre TORGUE  *  @total_len: total length to fill in descriptors
2964f748be53SAlexandre TORGUE  *  @last_segmant: condition for the last descriptor
2965ce736788SJoao Pinto  *  @queue: TX queue index
2966f748be53SAlexandre TORGUE  *  Description:
2967f748be53SAlexandre TORGUE  *  This function fills descriptor and request new descriptors according to
2968f748be53SAlexandre TORGUE  *  buffer length to fill
2969f748be53SAlexandre TORGUE  */
2970a993db88SJose Abreu static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
2971ce736788SJoao Pinto 				 int total_len, bool last_segment, u32 queue)
2972f748be53SAlexandre TORGUE {
2973ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2974f748be53SAlexandre TORGUE 	struct dma_desc *desc;
29755bacd778SLABBE Corentin 	u32 buff_size;
2976ce736788SJoao Pinto 	int tmp_len;
2977f748be53SAlexandre TORGUE 
2978f748be53SAlexandre TORGUE 	tmp_len = total_len;
2979f748be53SAlexandre TORGUE 
2980f748be53SAlexandre TORGUE 	while (tmp_len > 0) {
2981a993db88SJose Abreu 		dma_addr_t curr_addr;
2982a993db88SJose Abreu 
2983ce736788SJoao Pinto 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2984b4c9784cSNiklas Cassel 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
2985579a25a8SJose Abreu 
2986579a25a8SJose Abreu 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
2987579a25a8SJose Abreu 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
2988579a25a8SJose Abreu 		else
2989579a25a8SJose Abreu 			desc = &tx_q->dma_tx[tx_q->cur_tx];
2990f748be53SAlexandre TORGUE 
2991a993db88SJose Abreu 		curr_addr = des + (total_len - tmp_len);
2992a993db88SJose Abreu 		if (priv->dma_cap.addr64 <= 32)
2993a993db88SJose Abreu 			desc->des0 = cpu_to_le32(curr_addr);
2994a993db88SJose Abreu 		else
2995a993db88SJose Abreu 			stmmac_set_desc_addr(priv, desc, curr_addr);
2996a993db88SJose Abreu 
2997f748be53SAlexandre TORGUE 		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
2998f748be53SAlexandre TORGUE 			    TSO_MAX_BUFF_SIZE : tmp_len;
2999f748be53SAlexandre TORGUE 
300042de047dSJose Abreu 		stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
3001f748be53SAlexandre TORGUE 				0, 1,
3002426849e6SNiklas Cassel 				(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
3003f748be53SAlexandre TORGUE 				0, 0);
3004f748be53SAlexandre TORGUE 
3005f748be53SAlexandre TORGUE 		tmp_len -= TSO_MAX_BUFF_SIZE;
3006f748be53SAlexandre TORGUE 	}
3007f748be53SAlexandre TORGUE }
3008f748be53SAlexandre TORGUE 
3009f748be53SAlexandre TORGUE /**
3010f748be53SAlexandre TORGUE  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
3011f748be53SAlexandre TORGUE  *  @skb : the socket buffer
3012f748be53SAlexandre TORGUE  *  @dev : device pointer
3013f748be53SAlexandre TORGUE  *  Description: this is the transmit function that is called on TSO frames
3014f748be53SAlexandre TORGUE  *  (support available on GMAC4 and newer chips).
3015f748be53SAlexandre TORGUE  *  Diagram below show the ring programming in case of TSO frames:
3016f748be53SAlexandre TORGUE  *
3017f748be53SAlexandre TORGUE  *  First Descriptor
3018f748be53SAlexandre TORGUE  *   --------
3019f748be53SAlexandre TORGUE  *   | DES0 |---> buffer1 = L2/L3/L4 header
3020f748be53SAlexandre TORGUE  *   | DES1 |---> TCP Payload (can continue on next descr...)
3021f748be53SAlexandre TORGUE  *   | DES2 |---> buffer 1 and 2 len
3022f748be53SAlexandre TORGUE  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
3023f748be53SAlexandre TORGUE  *   --------
3024f748be53SAlexandre TORGUE  *	|
3025f748be53SAlexandre TORGUE  *     ...
3026f748be53SAlexandre TORGUE  *	|
3027f748be53SAlexandre TORGUE  *   --------
3028f748be53SAlexandre TORGUE  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
3029f748be53SAlexandre TORGUE  *   | DES1 | --|
3030f748be53SAlexandre TORGUE  *   | DES2 | --> buffer 1 and 2 len
3031f748be53SAlexandre TORGUE  *   | DES3 |
3032f748be53SAlexandre TORGUE  *   --------
3033f748be53SAlexandre TORGUE  *
3034f748be53SAlexandre TORGUE  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
3035f748be53SAlexandre TORGUE  */
3036f748be53SAlexandre TORGUE static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
3037f748be53SAlexandre TORGUE {
3038ce736788SJoao Pinto 	struct dma_desc *desc, *first, *mss_desc = NULL;
3039f748be53SAlexandre TORGUE 	struct stmmac_priv *priv = netdev_priv(dev);
3040579a25a8SJose Abreu 	int desc_size, tmp_pay_len = 0, first_tx;
3041f748be53SAlexandre TORGUE 	int nfrags = skb_shinfo(skb)->nr_frags;
3042ce736788SJoao Pinto 	u32 queue = skb_get_queue_mapping(skb);
3043c2837423SJose Abreu 	unsigned int first_entry, tx_packets;
3044ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q;
3045c2837423SJose Abreu 	bool has_vlan, set_ic;
3046579a25a8SJose Abreu 	u8 proto_hdr_len, hdr;
3047ce736788SJoao Pinto 	u32 pay_len, mss;
3048a993db88SJose Abreu 	dma_addr_t des;
3049f748be53SAlexandre TORGUE 	int i;
3050f748be53SAlexandre TORGUE 
3051ce736788SJoao Pinto 	tx_q = &priv->tx_queue[queue];
3052c2837423SJose Abreu 	first_tx = tx_q->cur_tx;
3053ce736788SJoao Pinto 
3054f748be53SAlexandre TORGUE 	/* Compute header lengths */
3055b7766206SJose Abreu 	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
3056b7766206SJose Abreu 		proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
3057b7766206SJose Abreu 		hdr = sizeof(struct udphdr);
3058b7766206SJose Abreu 	} else {
3059f748be53SAlexandre TORGUE 		proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
3060b7766206SJose Abreu 		hdr = tcp_hdrlen(skb);
3061b7766206SJose Abreu 	}
3062f748be53SAlexandre TORGUE 
3063f748be53SAlexandre TORGUE 	/* Desc availability based on threshold should be enough safe */
3064ce736788SJoao Pinto 	if (unlikely(stmmac_tx_avail(priv, queue) <
3065f748be53SAlexandre TORGUE 		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
3066c22a3f48SJoao Pinto 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
3067c22a3f48SJoao Pinto 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
3068c22a3f48SJoao Pinto 								queue));
3069f748be53SAlexandre TORGUE 			/* This is a hard error, log it. */
307038ddc59dSLABBE Corentin 			netdev_err(priv->dev,
307138ddc59dSLABBE Corentin 				   "%s: Tx Ring full when queue awake\n",
307238ddc59dSLABBE Corentin 				   __func__);
3073f748be53SAlexandre TORGUE 		}
3074f748be53SAlexandre TORGUE 		return NETDEV_TX_BUSY;
3075f748be53SAlexandre TORGUE 	}
3076f748be53SAlexandre TORGUE 
3077f748be53SAlexandre TORGUE 	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
3078f748be53SAlexandre TORGUE 
3079f748be53SAlexandre TORGUE 	mss = skb_shinfo(skb)->gso_size;
3080f748be53SAlexandre TORGUE 
3081f748be53SAlexandre TORGUE 	/* set new MSS value if needed */
30828d212a9eSNiklas Cassel 	if (mss != tx_q->mss) {
3083579a25a8SJose Abreu 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
3084579a25a8SJose Abreu 			mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
3085579a25a8SJose Abreu 		else
3086579a25a8SJose Abreu 			mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
3087579a25a8SJose Abreu 
308842de047dSJose Abreu 		stmmac_set_mss(priv, mss_desc, mss);
30898d212a9eSNiklas Cassel 		tx_q->mss = mss;
3090ce736788SJoao Pinto 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
3091b4c9784cSNiklas Cassel 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
3092f748be53SAlexandre TORGUE 	}
3093f748be53SAlexandre TORGUE 
3094f748be53SAlexandre TORGUE 	if (netif_msg_tx_queued(priv)) {
3095b7766206SJose Abreu 		pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
3096b7766206SJose Abreu 			__func__, hdr, proto_hdr_len, pay_len, mss);
3097f748be53SAlexandre TORGUE 		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
3098f748be53SAlexandre TORGUE 			skb->data_len);
3099f748be53SAlexandre TORGUE 	}
3100f748be53SAlexandre TORGUE 
310130d93227SJose Abreu 	/* Check if VLAN can be inserted by HW */
310230d93227SJose Abreu 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
310330d93227SJose Abreu 
3104ce736788SJoao Pinto 	first_entry = tx_q->cur_tx;
3105b4c9784cSNiklas Cassel 	WARN_ON(tx_q->tx_skbuff[first_entry]);
3106f748be53SAlexandre TORGUE 
3107579a25a8SJose Abreu 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
3108579a25a8SJose Abreu 		desc = &tx_q->dma_entx[first_entry].basic;
3109579a25a8SJose Abreu 	else
3110579a25a8SJose Abreu 		desc = &tx_q->dma_tx[first_entry];
3111f748be53SAlexandre TORGUE 	first = desc;
3112f748be53SAlexandre TORGUE 
311330d93227SJose Abreu 	if (has_vlan)
311430d93227SJose Abreu 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
311530d93227SJose Abreu 
3116f748be53SAlexandre TORGUE 	/* first descriptor: fill Headers on Buf1 */
3117f748be53SAlexandre TORGUE 	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
3118f748be53SAlexandre TORGUE 			     DMA_TO_DEVICE);
3119f748be53SAlexandre TORGUE 	if (dma_mapping_error(priv->device, des))
3120f748be53SAlexandre TORGUE 		goto dma_map_err;
3121f748be53SAlexandre TORGUE 
3122ce736788SJoao Pinto 	tx_q->tx_skbuff_dma[first_entry].buf = des;
3123ce736788SJoao Pinto 	tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
3124f748be53SAlexandre TORGUE 
3125a993db88SJose Abreu 	if (priv->dma_cap.addr64 <= 32) {
3126f8be0d78SMichael Weiser 		first->des0 = cpu_to_le32(des);
3127f748be53SAlexandre TORGUE 
3128f748be53SAlexandre TORGUE 		/* Fill start of payload in buff2 of first descriptor */
3129f748be53SAlexandre TORGUE 		if (pay_len)
3130f8be0d78SMichael Weiser 			first->des1 = cpu_to_le32(des + proto_hdr_len);
3131f748be53SAlexandre TORGUE 
3132f748be53SAlexandre TORGUE 		/* If needed take extra descriptors to fill the remaining payload */
3133f748be53SAlexandre TORGUE 		tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
3134a993db88SJose Abreu 	} else {
3135a993db88SJose Abreu 		stmmac_set_desc_addr(priv, first, des);
3136a993db88SJose Abreu 		tmp_pay_len = pay_len;
313734c15202Syuqi jin 		des += proto_hdr_len;
3138b2f07199SJose Abreu 		pay_len = 0;
3139a993db88SJose Abreu 	}
3140f748be53SAlexandre TORGUE 
3141ce736788SJoao Pinto 	stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
3142f748be53SAlexandre TORGUE 
3143f748be53SAlexandre TORGUE 	/* Prepare fragments */
3144f748be53SAlexandre TORGUE 	for (i = 0; i < nfrags; i++) {
3145f748be53SAlexandre TORGUE 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3146f748be53SAlexandre TORGUE 
3147f748be53SAlexandre TORGUE 		des = skb_frag_dma_map(priv->device, frag, 0,
3148f748be53SAlexandre TORGUE 				       skb_frag_size(frag),
3149f748be53SAlexandre TORGUE 				       DMA_TO_DEVICE);
3150937071c1SThierry Reding 		if (dma_mapping_error(priv->device, des))
3151937071c1SThierry Reding 			goto dma_map_err;
3152f748be53SAlexandre TORGUE 
3153f748be53SAlexandre TORGUE 		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
3154ce736788SJoao Pinto 				     (i == nfrags - 1), queue);
3155f748be53SAlexandre TORGUE 
3156ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
3157ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
3158ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
3159f748be53SAlexandre TORGUE 	}
3160f748be53SAlexandre TORGUE 
3161ce736788SJoao Pinto 	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
3162f748be53SAlexandre TORGUE 
316305cf0d1bSNiklas Cassel 	/* Only the last descriptor gets to point to the skb. */
316405cf0d1bSNiklas Cassel 	tx_q->tx_skbuff[tx_q->cur_tx] = skb;
316505cf0d1bSNiklas Cassel 
31667df4a3a7SJose Abreu 	/* Manage tx mitigation */
3167c2837423SJose Abreu 	tx_packets = (tx_q->cur_tx + 1) - first_tx;
3168c2837423SJose Abreu 	tx_q->tx_count_frames += tx_packets;
3169c2837423SJose Abreu 
3170c2837423SJose Abreu 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
3171c2837423SJose Abreu 		set_ic = true;
3172c2837423SJose Abreu 	else if (!priv->tx_coal_frames)
3173c2837423SJose Abreu 		set_ic = false;
3174c2837423SJose Abreu 	else if (tx_packets > priv->tx_coal_frames)
3175c2837423SJose Abreu 		set_ic = true;
3176c2837423SJose Abreu 	else if ((tx_q->tx_count_frames % priv->tx_coal_frames) < tx_packets)
3177c2837423SJose Abreu 		set_ic = true;
3178c2837423SJose Abreu 	else
3179c2837423SJose Abreu 		set_ic = false;
3180c2837423SJose Abreu 
3181c2837423SJose Abreu 	if (set_ic) {
3182579a25a8SJose Abreu 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
3183579a25a8SJose Abreu 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
3184579a25a8SJose Abreu 		else
31857df4a3a7SJose Abreu 			desc = &tx_q->dma_tx[tx_q->cur_tx];
3186579a25a8SJose Abreu 
31877df4a3a7SJose Abreu 		tx_q->tx_count_frames = 0;
31887df4a3a7SJose Abreu 		stmmac_set_tx_ic(priv, desc);
31897df4a3a7SJose Abreu 		priv->xstats.tx_set_ic_bit++;
31907df4a3a7SJose Abreu 	}
31917df4a3a7SJose Abreu 
319205cf0d1bSNiklas Cassel 	/* We've used all descriptors we need for this skb, however,
319305cf0d1bSNiklas Cassel 	 * advance cur_tx so that it references a fresh descriptor.
319405cf0d1bSNiklas Cassel 	 * ndo_start_xmit will fill this descriptor the next time it's
319505cf0d1bSNiklas Cassel 	 * called and stmmac_tx_clean may clean up to this descriptor.
319605cf0d1bSNiklas Cassel 	 */
3197ce736788SJoao Pinto 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
3198f748be53SAlexandre TORGUE 
3199ce736788SJoao Pinto 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3200b3e51069SLABBE Corentin 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
320138ddc59dSLABBE Corentin 			  __func__);
3202c22a3f48SJoao Pinto 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
3203f748be53SAlexandre TORGUE 	}
3204f748be53SAlexandre TORGUE 
3205f748be53SAlexandre TORGUE 	dev->stats.tx_bytes += skb->len;
3206f748be53SAlexandre TORGUE 	priv->xstats.tx_tso_frames++;
3207f748be53SAlexandre TORGUE 	priv->xstats.tx_tso_nfrags += nfrags;
3208f748be53SAlexandre TORGUE 
32098000ddc0SJose Abreu 	if (priv->sarc_type)
32108000ddc0SJose Abreu 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
32118000ddc0SJose Abreu 
3212f748be53SAlexandre TORGUE 	skb_tx_timestamp(skb);
3213f748be53SAlexandre TORGUE 
3214f748be53SAlexandre TORGUE 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3215f748be53SAlexandre TORGUE 		     priv->hwts_tx_en)) {
3216f748be53SAlexandre TORGUE 		/* declare that device is doing timestamping */
3217f748be53SAlexandre TORGUE 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
321842de047dSJose Abreu 		stmmac_enable_tx_timestamp(priv, first);
3219f748be53SAlexandre TORGUE 	}
3220f748be53SAlexandre TORGUE 
3221f748be53SAlexandre TORGUE 	/* Complete the first descriptor before granting the DMA */
322242de047dSJose Abreu 	stmmac_prepare_tso_tx_desc(priv, first, 1,
3223f748be53SAlexandre TORGUE 			proto_hdr_len,
3224f748be53SAlexandre TORGUE 			pay_len,
3225ce736788SJoao Pinto 			1, tx_q->tx_skbuff_dma[first_entry].last_segment,
3226b7766206SJose Abreu 			hdr / 4, (skb->len - proto_hdr_len));
3227f748be53SAlexandre TORGUE 
3228f748be53SAlexandre TORGUE 	/* If context desc is used to change MSS */
322915d2ee42SNiklas Cassel 	if (mss_desc) {
323015d2ee42SNiklas Cassel 		/* Make sure that first descriptor has been completely
323115d2ee42SNiklas Cassel 		 * written, including its own bit. This is because MSS is
323215d2ee42SNiklas Cassel 		 * actually before first descriptor, so we need to make
323315d2ee42SNiklas Cassel 		 * sure that MSS's own bit is the last thing written.
323415d2ee42SNiklas Cassel 		 */
323515d2ee42SNiklas Cassel 		dma_wmb();
323642de047dSJose Abreu 		stmmac_set_tx_owner(priv, mss_desc);
323715d2ee42SNiklas Cassel 	}
3238f748be53SAlexandre TORGUE 
3239f748be53SAlexandre TORGUE 	/* The own bit must be the latest setting done when prepare the
3240f748be53SAlexandre TORGUE 	 * descriptor and then barrier is needed to make sure that
3241f748be53SAlexandre TORGUE 	 * all is coherent before granting the DMA engine.
3242f748be53SAlexandre TORGUE 	 */
324395eb930aSNiklas Cassel 	wmb();
3244f748be53SAlexandre TORGUE 
3245f748be53SAlexandre TORGUE 	if (netif_msg_pktdata(priv)) {
3246f748be53SAlexandre TORGUE 		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
3247ce736788SJoao Pinto 			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
3248ce736788SJoao Pinto 			tx_q->cur_tx, first, nfrags);
3249f748be53SAlexandre TORGUE 		pr_info(">>> frame to be transmitted: ");
3250f748be53SAlexandre TORGUE 		print_pkt(skb->data, skb_headlen(skb));
3251f748be53SAlexandre TORGUE 	}
3252f748be53SAlexandre TORGUE 
3253c22a3f48SJoao Pinto 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3254f748be53SAlexandre TORGUE 
3255579a25a8SJose Abreu 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
3256579a25a8SJose Abreu 		desc_size = sizeof(struct dma_edesc);
3257579a25a8SJose Abreu 	else
3258579a25a8SJose Abreu 		desc_size = sizeof(struct dma_desc);
3259579a25a8SJose Abreu 
3260579a25a8SJose Abreu 	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
3261a4e887faSJose Abreu 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
32624772f26dSJose Abreu 	stmmac_tx_timer_arm(priv, queue);
3263f748be53SAlexandre TORGUE 
3264f748be53SAlexandre TORGUE 	return NETDEV_TX_OK;
3265f748be53SAlexandre TORGUE 
3266f748be53SAlexandre TORGUE dma_map_err:
3267f748be53SAlexandre TORGUE 	dev_err(priv->device, "Tx dma map failed\n");
3268f748be53SAlexandre TORGUE 	dev_kfree_skb(skb);
3269f748be53SAlexandre TORGUE 	priv->dev->stats.tx_dropped++;
3270f748be53SAlexandre TORGUE 	return NETDEV_TX_OK;
3271f748be53SAlexandre TORGUE }
3272f748be53SAlexandre TORGUE 
3273f748be53SAlexandre TORGUE /**
3274732fdf0eSGiuseppe CAVALLARO  *  stmmac_xmit - Tx entry point of the driver
32757ac6653aSJeff Kirsher  *  @skb : the socket buffer
32767ac6653aSJeff Kirsher  *  @dev : device pointer
327732ceabcaSGiuseppe CAVALLARO  *  Description : this is the tx entry point of the driver.
327832ceabcaSGiuseppe CAVALLARO  *  It programs the chain or the ring and supports oversized frames
327932ceabcaSGiuseppe CAVALLARO  *  and SG feature.
32807ac6653aSJeff Kirsher  */
32817ac6653aSJeff Kirsher static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
32827ac6653aSJeff Kirsher {
3283c2837423SJose Abreu 	unsigned int first_entry, tx_packets, enh_desc;
32847ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
32850e80bdc9SGiuseppe Cavallaro 	unsigned int nopaged_len = skb_headlen(skb);
32864a7d666aSGiuseppe CAVALLARO 	int i, csum_insertion = 0, is_jumbo = 0;
3287ce736788SJoao Pinto 	u32 queue = skb_get_queue_mapping(skb);
32887ac6653aSJeff Kirsher 	int nfrags = skb_shinfo(skb)->nr_frags;
3289b7766206SJose Abreu 	int gso = skb_shinfo(skb)->gso_type;
3290579a25a8SJose Abreu 	struct dma_edesc *tbs_desc = NULL;
3291579a25a8SJose Abreu 	int entry, desc_size, first_tx;
32927ac6653aSJeff Kirsher 	struct dma_desc *desc, *first;
3293ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q;
3294c2837423SJose Abreu 	bool has_vlan, set_ic;
3295a993db88SJose Abreu 	dma_addr_t des;
3296f748be53SAlexandre TORGUE 
3297ce736788SJoao Pinto 	tx_q = &priv->tx_queue[queue];
3298c2837423SJose Abreu 	first_tx = tx_q->cur_tx;
3299ce736788SJoao Pinto 
3300e2cd682dSJose Abreu 	if (priv->tx_path_in_lpi_mode)
3301e2cd682dSJose Abreu 		stmmac_disable_eee_mode(priv);
3302e2cd682dSJose Abreu 
3303f748be53SAlexandre TORGUE 	/* Manage oversized TCP frames for GMAC4 device */
3304f748be53SAlexandre TORGUE 	if (skb_is_gso(skb) && priv->tso) {
3305b7766206SJose Abreu 		if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
3306b7766206SJose Abreu 			return stmmac_tso_xmit(skb, dev);
3307b7766206SJose Abreu 		if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
3308f748be53SAlexandre TORGUE 			return stmmac_tso_xmit(skb, dev);
3309f748be53SAlexandre TORGUE 	}
33107ac6653aSJeff Kirsher 
3311ce736788SJoao Pinto 	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
3312c22a3f48SJoao Pinto 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
3313c22a3f48SJoao Pinto 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
3314c22a3f48SJoao Pinto 								queue));
33157ac6653aSJeff Kirsher 			/* This is a hard error, log it. */
331638ddc59dSLABBE Corentin 			netdev_err(priv->dev,
331738ddc59dSLABBE Corentin 				   "%s: Tx Ring full when queue awake\n",
331838ddc59dSLABBE Corentin 				   __func__);
33197ac6653aSJeff Kirsher 		}
33207ac6653aSJeff Kirsher 		return NETDEV_TX_BUSY;
33217ac6653aSJeff Kirsher 	}
33227ac6653aSJeff Kirsher 
332330d93227SJose Abreu 	/* Check if VLAN can be inserted by HW */
332430d93227SJose Abreu 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
332530d93227SJose Abreu 
3326ce736788SJoao Pinto 	entry = tx_q->cur_tx;
33270e80bdc9SGiuseppe Cavallaro 	first_entry = entry;
3328b4c9784cSNiklas Cassel 	WARN_ON(tx_q->tx_skbuff[first_entry]);
33297ac6653aSJeff Kirsher 
33307ac6653aSJeff Kirsher 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
33317ac6653aSJeff Kirsher 
33320e80bdc9SGiuseppe Cavallaro 	if (likely(priv->extend_desc))
3333ce736788SJoao Pinto 		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3334579a25a8SJose Abreu 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
3335579a25a8SJose Abreu 		desc = &tx_q->dma_entx[entry].basic;
3336c24602efSGiuseppe CAVALLARO 	else
3337ce736788SJoao Pinto 		desc = tx_q->dma_tx + entry;
3338c24602efSGiuseppe CAVALLARO 
33397ac6653aSJeff Kirsher 	first = desc;
33407ac6653aSJeff Kirsher 
334130d93227SJose Abreu 	if (has_vlan)
334230d93227SJose Abreu 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
334330d93227SJose Abreu 
33440e80bdc9SGiuseppe Cavallaro 	enh_desc = priv->plat->enh_desc;
33454a7d666aSGiuseppe CAVALLARO 	/* To program the descriptors according to the size of the frame */
334629896a67SGiuseppe CAVALLARO 	if (enh_desc)
33472c520b1cSJose Abreu 		is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
334829896a67SGiuseppe CAVALLARO 
334963a550fcSJose Abreu 	if (unlikely(is_jumbo)) {
33502c520b1cSJose Abreu 		entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
335163a550fcSJose Abreu 		if (unlikely(entry < 0) && (entry != -EINVAL))
3352362b37beSGiuseppe CAVALLARO 			goto dma_map_err;
335329896a67SGiuseppe CAVALLARO 	}
33547ac6653aSJeff Kirsher 
33557ac6653aSJeff Kirsher 	for (i = 0; i < nfrags; i++) {
33569e903e08SEric Dumazet 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
33579e903e08SEric Dumazet 		int len = skb_frag_size(frag);
3358be434d50SGiuseppe Cavallaro 		bool last_segment = (i == (nfrags - 1));
33597ac6653aSJeff Kirsher 
3360e3ad57c9SGiuseppe Cavallaro 		entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3361b4c9784cSNiklas Cassel 		WARN_ON(tx_q->tx_skbuff[entry]);
3362e3ad57c9SGiuseppe Cavallaro 
33630e80bdc9SGiuseppe Cavallaro 		if (likely(priv->extend_desc))
3364ce736788SJoao Pinto 			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3365579a25a8SJose Abreu 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
3366579a25a8SJose Abreu 			desc = &tx_q->dma_entx[entry].basic;
3367c24602efSGiuseppe CAVALLARO 		else
3368ce736788SJoao Pinto 			desc = tx_q->dma_tx + entry;
33697ac6653aSJeff Kirsher 
3370f748be53SAlexandre TORGUE 		des = skb_frag_dma_map(priv->device, frag, 0, len,
3371f722380dSIan Campbell 				       DMA_TO_DEVICE);
3372f748be53SAlexandre TORGUE 		if (dma_mapping_error(priv->device, des))
3373362b37beSGiuseppe CAVALLARO 			goto dma_map_err; /* should reuse desc w/o issues */
3374362b37beSGiuseppe CAVALLARO 
3375ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].buf = des;
33766844171dSJose Abreu 
33776844171dSJose Abreu 		stmmac_set_desc_addr(priv, desc, des);
3378f748be53SAlexandre TORGUE 
3379ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].map_as_page = true;
3380ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].len = len;
3381ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
33820e80bdc9SGiuseppe Cavallaro 
33830e80bdc9SGiuseppe Cavallaro 		/* Prepare the descriptor and set the own bit too */
338442de047dSJose Abreu 		stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
338542de047dSJose Abreu 				priv->mode, 1, last_segment, skb->len);
33867ac6653aSJeff Kirsher 	}
33877ac6653aSJeff Kirsher 
338805cf0d1bSNiklas Cassel 	/* Only the last descriptor gets to point to the skb. */
338905cf0d1bSNiklas Cassel 	tx_q->tx_skbuff[entry] = skb;
3390e3ad57c9SGiuseppe Cavallaro 
33917df4a3a7SJose Abreu 	/* According to the coalesce parameter the IC bit for the latest
33927df4a3a7SJose Abreu 	 * segment is reset and the timer re-started to clean the tx status.
33937df4a3a7SJose Abreu 	 * This approach takes care about the fragments: desc is the first
33947df4a3a7SJose Abreu 	 * element in case of no SG.
33957df4a3a7SJose Abreu 	 */
3396c2837423SJose Abreu 	tx_packets = (entry + 1) - first_tx;
3397c2837423SJose Abreu 	tx_q->tx_count_frames += tx_packets;
3398c2837423SJose Abreu 
3399c2837423SJose Abreu 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
3400c2837423SJose Abreu 		set_ic = true;
3401c2837423SJose Abreu 	else if (!priv->tx_coal_frames)
3402c2837423SJose Abreu 		set_ic = false;
3403c2837423SJose Abreu 	else if (tx_packets > priv->tx_coal_frames)
3404c2837423SJose Abreu 		set_ic = true;
3405c2837423SJose Abreu 	else if ((tx_q->tx_count_frames % priv->tx_coal_frames) < tx_packets)
3406c2837423SJose Abreu 		set_ic = true;
3407c2837423SJose Abreu 	else
3408c2837423SJose Abreu 		set_ic = false;
3409c2837423SJose Abreu 
3410c2837423SJose Abreu 	if (set_ic) {
34117df4a3a7SJose Abreu 		if (likely(priv->extend_desc))
34127df4a3a7SJose Abreu 			desc = &tx_q->dma_etx[entry].basic;
3413579a25a8SJose Abreu 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
3414579a25a8SJose Abreu 			desc = &tx_q->dma_entx[entry].basic;
34157df4a3a7SJose Abreu 		else
34167df4a3a7SJose Abreu 			desc = &tx_q->dma_tx[entry];
34177df4a3a7SJose Abreu 
34187df4a3a7SJose Abreu 		tx_q->tx_count_frames = 0;
34197df4a3a7SJose Abreu 		stmmac_set_tx_ic(priv, desc);
34207df4a3a7SJose Abreu 		priv->xstats.tx_set_ic_bit++;
34217df4a3a7SJose Abreu 	}
34227df4a3a7SJose Abreu 
342305cf0d1bSNiklas Cassel 	/* We've used all descriptors we need for this skb, however,
342405cf0d1bSNiklas Cassel 	 * advance cur_tx so that it references a fresh descriptor.
342505cf0d1bSNiklas Cassel 	 * ndo_start_xmit will fill this descriptor the next time it's
342605cf0d1bSNiklas Cassel 	 * called and stmmac_tx_clean may clean up to this descriptor.
342705cf0d1bSNiklas Cassel 	 */
342805cf0d1bSNiklas Cassel 	entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3429ce736788SJoao Pinto 	tx_q->cur_tx = entry;
34307ac6653aSJeff Kirsher 
34317ac6653aSJeff Kirsher 	if (netif_msg_pktdata(priv)) {
343238ddc59dSLABBE Corentin 		netdev_dbg(priv->dev,
343338ddc59dSLABBE Corentin 			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
3434ce736788SJoao Pinto 			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
34350e80bdc9SGiuseppe Cavallaro 			   entry, first, nfrags);
343683d7af64SGiuseppe CAVALLARO 
343738ddc59dSLABBE Corentin 		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
34387ac6653aSJeff Kirsher 		print_pkt(skb->data, skb->len);
34397ac6653aSJeff Kirsher 	}
34400e80bdc9SGiuseppe Cavallaro 
3441ce736788SJoao Pinto 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3442b3e51069SLABBE Corentin 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
3443b3e51069SLABBE Corentin 			  __func__);
3444c22a3f48SJoao Pinto 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
34457ac6653aSJeff Kirsher 	}
34467ac6653aSJeff Kirsher 
34477ac6653aSJeff Kirsher 	dev->stats.tx_bytes += skb->len;
34487ac6653aSJeff Kirsher 
34498000ddc0SJose Abreu 	if (priv->sarc_type)
34508000ddc0SJose Abreu 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
34518000ddc0SJose Abreu 
34520e80bdc9SGiuseppe Cavallaro 	skb_tx_timestamp(skb);
34530e80bdc9SGiuseppe Cavallaro 
34540e80bdc9SGiuseppe Cavallaro 	/* Ready to fill the first descriptor and set the OWN bit w/o any
34550e80bdc9SGiuseppe Cavallaro 	 * problems because all the descriptors are actually ready to be
34560e80bdc9SGiuseppe Cavallaro 	 * passed to the DMA engine.
34570e80bdc9SGiuseppe Cavallaro 	 */
34580e80bdc9SGiuseppe Cavallaro 	if (likely(!is_jumbo)) {
34590e80bdc9SGiuseppe Cavallaro 		bool last_segment = (nfrags == 0);
34600e80bdc9SGiuseppe Cavallaro 
3461f748be53SAlexandre TORGUE 		des = dma_map_single(priv->device, skb->data,
34620e80bdc9SGiuseppe Cavallaro 				     nopaged_len, DMA_TO_DEVICE);
3463f748be53SAlexandre TORGUE 		if (dma_mapping_error(priv->device, des))
34640e80bdc9SGiuseppe Cavallaro 			goto dma_map_err;
34650e80bdc9SGiuseppe Cavallaro 
3466ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[first_entry].buf = des;
34676844171dSJose Abreu 
34686844171dSJose Abreu 		stmmac_set_desc_addr(priv, first, des);
3469f748be53SAlexandre TORGUE 
3470ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
3471ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
34720e80bdc9SGiuseppe Cavallaro 
3473891434b1SRayagond Kokatanur 		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3474891434b1SRayagond Kokatanur 			     priv->hwts_tx_en)) {
3475891434b1SRayagond Kokatanur 			/* declare that device is doing timestamping */
3476891434b1SRayagond Kokatanur 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
347742de047dSJose Abreu 			stmmac_enable_tx_timestamp(priv, first);
3478891434b1SRayagond Kokatanur 		}
3479891434b1SRayagond Kokatanur 
34800e80bdc9SGiuseppe Cavallaro 		/* Prepare the first descriptor setting the OWN bit too */
348142de047dSJose Abreu 		stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
3482579a25a8SJose Abreu 				csum_insertion, priv->mode, 0, last_segment,
348342de047dSJose Abreu 				skb->len);
348480acbed9SAaro Koskinen 	}
34850e80bdc9SGiuseppe Cavallaro 
3486579a25a8SJose Abreu 	if (tx_q->tbs & STMMAC_TBS_EN) {
3487579a25a8SJose Abreu 		struct timespec64 ts = ns_to_timespec64(skb->tstamp);
3488579a25a8SJose Abreu 
3489579a25a8SJose Abreu 		tbs_desc = &tx_q->dma_entx[first_entry];
3490579a25a8SJose Abreu 		stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
3491579a25a8SJose Abreu 	}
3492579a25a8SJose Abreu 
3493579a25a8SJose Abreu 	stmmac_set_tx_owner(priv, first);
3494579a25a8SJose Abreu 
34950e80bdc9SGiuseppe Cavallaro 	/* The own bit must be the latest setting done when prepare the
34960e80bdc9SGiuseppe Cavallaro 	 * descriptor and then barrier is needed to make sure that
34970e80bdc9SGiuseppe Cavallaro 	 * all is coherent before granting the DMA engine.
34980e80bdc9SGiuseppe Cavallaro 	 */
349995eb930aSNiklas Cassel 	wmb();
35007ac6653aSJeff Kirsher 
3501c22a3f48SJoao Pinto 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3502f748be53SAlexandre TORGUE 
3503a4e887faSJose Abreu 	stmmac_enable_dma_transmission(priv, priv->ioaddr);
35048fce3331SJose Abreu 
3505579a25a8SJose Abreu 	if (likely(priv->extend_desc))
3506579a25a8SJose Abreu 		desc_size = sizeof(struct dma_extended_desc);
3507579a25a8SJose Abreu 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
3508579a25a8SJose Abreu 		desc_size = sizeof(struct dma_edesc);
3509579a25a8SJose Abreu 	else
3510579a25a8SJose Abreu 		desc_size = sizeof(struct dma_desc);
3511579a25a8SJose Abreu 
3512579a25a8SJose Abreu 	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
3513f1565c60SJose Abreu 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
35144772f26dSJose Abreu 	stmmac_tx_timer_arm(priv, queue);
35157ac6653aSJeff Kirsher 
3516362b37beSGiuseppe CAVALLARO 	return NETDEV_TX_OK;
3517a9097a96SGiuseppe CAVALLARO 
3518362b37beSGiuseppe CAVALLARO dma_map_err:
351938ddc59dSLABBE Corentin 	netdev_err(priv->dev, "Tx DMA map failed\n");
3520362b37beSGiuseppe CAVALLARO 	dev_kfree_skb(skb);
3521362b37beSGiuseppe CAVALLARO 	priv->dev->stats.tx_dropped++;
35227ac6653aSJeff Kirsher 	return NETDEV_TX_OK;
35237ac6653aSJeff Kirsher }
35247ac6653aSJeff Kirsher 
3525b9381985SVince Bridgers static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
3526b9381985SVince Bridgers {
3527ab188e8fSElad Nachman 	struct vlan_ethhdr *veth;
3528ab188e8fSElad Nachman 	__be16 vlan_proto;
3529b9381985SVince Bridgers 	u16 vlanid;
3530b9381985SVince Bridgers 
3531ab188e8fSElad Nachman 	veth = (struct vlan_ethhdr *)skb->data;
3532ab188e8fSElad Nachman 	vlan_proto = veth->h_vlan_proto;
3533ab188e8fSElad Nachman 
3534ab188e8fSElad Nachman 	if ((vlan_proto == htons(ETH_P_8021Q) &&
3535ab188e8fSElad Nachman 	     dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
3536ab188e8fSElad Nachman 	    (vlan_proto == htons(ETH_P_8021AD) &&
3537ab188e8fSElad Nachman 	     dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
3538b9381985SVince Bridgers 		/* pop the vlan tag */
3539ab188e8fSElad Nachman 		vlanid = ntohs(veth->h_vlan_TCI);
3540ab188e8fSElad Nachman 		memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
3541b9381985SVince Bridgers 		skb_pull(skb, VLAN_HLEN);
3542ab188e8fSElad Nachman 		__vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
3543b9381985SVince Bridgers 	}
3544b9381985SVince Bridgers }
3545b9381985SVince Bridgers 
3546b9381985SVince Bridgers 
354754139cf3SJoao Pinto static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q)
3548120e87f9SGiuseppe Cavallaro {
354954139cf3SJoao Pinto 	if (rx_q->rx_zeroc_thresh < STMMAC_RX_THRESH)
3550120e87f9SGiuseppe Cavallaro 		return 0;
3551120e87f9SGiuseppe Cavallaro 
3552120e87f9SGiuseppe Cavallaro 	return 1;
3553120e87f9SGiuseppe Cavallaro }
3554120e87f9SGiuseppe Cavallaro 
355532ceabcaSGiuseppe CAVALLARO /**
3556732fdf0eSGiuseppe CAVALLARO  * stmmac_rx_refill - refill used skb preallocated buffers
355732ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
355854139cf3SJoao Pinto  * @queue: RX queue index
355932ceabcaSGiuseppe CAVALLARO  * Description : this is to reallocate the skb for the reception process
356032ceabcaSGiuseppe CAVALLARO  * that is based on zero-copy.
356132ceabcaSGiuseppe CAVALLARO  */
356254139cf3SJoao Pinto static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
35637ac6653aSJeff Kirsher {
356454139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
35653caa61c2SJose Abreu 	int len, dirty = stmmac_rx_dirty(priv, queue);
356654139cf3SJoao Pinto 	unsigned int entry = rx_q->dirty_rx;
356754139cf3SJoao Pinto 
35683caa61c2SJose Abreu 	len = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
35693caa61c2SJose Abreu 
3570e3ad57c9SGiuseppe Cavallaro 	while (dirty-- > 0) {
35712af6106aSJose Abreu 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
3572c24602efSGiuseppe CAVALLARO 		struct dma_desc *p;
3573d429b66eSJose Abreu 		bool use_rx_wd;
3574c24602efSGiuseppe CAVALLARO 
3575c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
357654139cf3SJoao Pinto 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
3577c24602efSGiuseppe CAVALLARO 		else
357854139cf3SJoao Pinto 			p = rx_q->dma_rx + entry;
3579c24602efSGiuseppe CAVALLARO 
35802af6106aSJose Abreu 		if (!buf->page) {
35812af6106aSJose Abreu 			buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
35822af6106aSJose Abreu 			if (!buf->page)
35837ac6653aSJeff Kirsher 				break;
3584120e87f9SGiuseppe Cavallaro 		}
35857ac6653aSJeff Kirsher 
358667afd6d1SJose Abreu 		if (priv->sph && !buf->sec_page) {
358767afd6d1SJose Abreu 			buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
358867afd6d1SJose Abreu 			if (!buf->sec_page)
358967afd6d1SJose Abreu 				break;
359067afd6d1SJose Abreu 
359167afd6d1SJose Abreu 			buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
359267afd6d1SJose Abreu 
359367afd6d1SJose Abreu 			dma_sync_single_for_device(priv->device, buf->sec_addr,
359467afd6d1SJose Abreu 						   len, DMA_FROM_DEVICE);
359567afd6d1SJose Abreu 		}
359667afd6d1SJose Abreu 
35972af6106aSJose Abreu 		buf->addr = page_pool_get_dma_addr(buf->page);
35983caa61c2SJose Abreu 
35993caa61c2SJose Abreu 		/* Sync whole allocation to device. This will invalidate old
36003caa61c2SJose Abreu 		 * data.
36013caa61c2SJose Abreu 		 */
36023caa61c2SJose Abreu 		dma_sync_single_for_device(priv->device, buf->addr, len,
36033caa61c2SJose Abreu 					   DMA_FROM_DEVICE);
36043caa61c2SJose Abreu 
36052af6106aSJose Abreu 		stmmac_set_desc_addr(priv, p, buf->addr);
360667afd6d1SJose Abreu 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr);
36072c520b1cSJose Abreu 		stmmac_refill_desc3(priv, rx_q, p);
3608286a8372SGiuseppe CAVALLARO 
3609d429b66eSJose Abreu 		rx_q->rx_count_frames++;
36106fa9d691SJose Abreu 		rx_q->rx_count_frames += priv->rx_coal_frames;
36116fa9d691SJose Abreu 		if (rx_q->rx_count_frames > priv->rx_coal_frames)
36126fa9d691SJose Abreu 			rx_q->rx_count_frames = 0;
361309146abeSJose Abreu 
361409146abeSJose Abreu 		use_rx_wd = !priv->rx_coal_frames;
361509146abeSJose Abreu 		use_rx_wd |= rx_q->rx_count_frames > 0;
361609146abeSJose Abreu 		if (!priv->use_riwt)
361709146abeSJose Abreu 			use_rx_wd = false;
3618d429b66eSJose Abreu 
3619ad688cdbSPavel Machek 		dma_wmb();
36202af6106aSJose Abreu 		stmmac_set_rx_owner(priv, p, use_rx_wd);
3621e3ad57c9SGiuseppe Cavallaro 
3622e3ad57c9SGiuseppe Cavallaro 		entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
36237ac6653aSJeff Kirsher 	}
362454139cf3SJoao Pinto 	rx_q->dirty_rx = entry;
3625858a31ffSJose Abreu 	rx_q->rx_tail_addr = rx_q->dma_rx_phy +
3626858a31ffSJose Abreu 			    (rx_q->dirty_rx * sizeof(struct dma_desc));
36274523a561SBiao Huang 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
36287ac6653aSJeff Kirsher }
36297ac6653aSJeff Kirsher 
363088ebe2cfSJose Abreu static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
363188ebe2cfSJose Abreu 				       struct dma_desc *p,
363288ebe2cfSJose Abreu 				       int status, unsigned int len)
363388ebe2cfSJose Abreu {
363488ebe2cfSJose Abreu 	int ret, coe = priv->hw->rx_csum;
363588ebe2cfSJose Abreu 	unsigned int plen = 0, hlen = 0;
363688ebe2cfSJose Abreu 
363788ebe2cfSJose Abreu 	/* Not first descriptor, buffer is always zero */
363888ebe2cfSJose Abreu 	if (priv->sph && len)
363988ebe2cfSJose Abreu 		return 0;
364088ebe2cfSJose Abreu 
364188ebe2cfSJose Abreu 	/* First descriptor, get split header length */
364288ebe2cfSJose Abreu 	ret = stmmac_get_rx_header_len(priv, p, &hlen);
364388ebe2cfSJose Abreu 	if (priv->sph && hlen) {
364488ebe2cfSJose Abreu 		priv->xstats.rx_split_hdr_pkt_n++;
364588ebe2cfSJose Abreu 		return hlen;
364688ebe2cfSJose Abreu 	}
364788ebe2cfSJose Abreu 
364888ebe2cfSJose Abreu 	/* First descriptor, not last descriptor and not split header */
364988ebe2cfSJose Abreu 	if (status & rx_not_ls)
365088ebe2cfSJose Abreu 		return priv->dma_buf_sz;
365188ebe2cfSJose Abreu 
365288ebe2cfSJose Abreu 	plen = stmmac_get_rx_frame_len(priv, p, coe);
365388ebe2cfSJose Abreu 
365488ebe2cfSJose Abreu 	/* First descriptor and last descriptor and not split header */
365588ebe2cfSJose Abreu 	return min_t(unsigned int, priv->dma_buf_sz, plen);
365688ebe2cfSJose Abreu }
365788ebe2cfSJose Abreu 
365888ebe2cfSJose Abreu static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
365988ebe2cfSJose Abreu 				       struct dma_desc *p,
366088ebe2cfSJose Abreu 				       int status, unsigned int len)
366188ebe2cfSJose Abreu {
366288ebe2cfSJose Abreu 	int coe = priv->hw->rx_csum;
366388ebe2cfSJose Abreu 	unsigned int plen = 0;
366488ebe2cfSJose Abreu 
366588ebe2cfSJose Abreu 	/* Not split header, buffer is not available */
366688ebe2cfSJose Abreu 	if (!priv->sph)
366788ebe2cfSJose Abreu 		return 0;
366888ebe2cfSJose Abreu 
366988ebe2cfSJose Abreu 	/* Not last descriptor */
367088ebe2cfSJose Abreu 	if (status & rx_not_ls)
367188ebe2cfSJose Abreu 		return priv->dma_buf_sz;
367288ebe2cfSJose Abreu 
367388ebe2cfSJose Abreu 	plen = stmmac_get_rx_frame_len(priv, p, coe);
367488ebe2cfSJose Abreu 
367588ebe2cfSJose Abreu 	/* Last descriptor */
367688ebe2cfSJose Abreu 	return plen - len;
367788ebe2cfSJose Abreu }
367888ebe2cfSJose Abreu 
367932ceabcaSGiuseppe CAVALLARO /**
3680732fdf0eSGiuseppe CAVALLARO  * stmmac_rx - manage the receive process
368132ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
368254139cf3SJoao Pinto  * @limit: napi bugget
368354139cf3SJoao Pinto  * @queue: RX queue index.
368432ceabcaSGiuseppe CAVALLARO  * Description :  this the function called by the napi poll method.
368532ceabcaSGiuseppe CAVALLARO  * It gets all the frames inside the ring.
368632ceabcaSGiuseppe CAVALLARO  */
368754139cf3SJoao Pinto static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
36887ac6653aSJeff Kirsher {
368954139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
36908fce3331SJose Abreu 	struct stmmac_channel *ch = &priv->channel[queue];
3691ec222003SJose Abreu 	unsigned int count = 0, error = 0, len = 0;
3692ec222003SJose Abreu 	int status = 0, coe = priv->hw->rx_csum;
369307b39753SAaro Koskinen 	unsigned int next_entry = rx_q->cur_rx;
3694ec222003SJose Abreu 	struct sk_buff *skb = NULL;
36957ac6653aSJeff Kirsher 
369683d7af64SGiuseppe CAVALLARO 	if (netif_msg_rx_status(priv)) {
3697d0225e7dSAlexandre TORGUE 		void *rx_head;
3698d0225e7dSAlexandre TORGUE 
369938ddc59dSLABBE Corentin 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
3700c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
370154139cf3SJoao Pinto 			rx_head = (void *)rx_q->dma_erx;
3702c24602efSGiuseppe CAVALLARO 		else
370354139cf3SJoao Pinto 			rx_head = (void *)rx_q->dma_rx;
3704d0225e7dSAlexandre TORGUE 
370542de047dSJose Abreu 		stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true);
37067ac6653aSJeff Kirsher 	}
3707c24602efSGiuseppe CAVALLARO 	while (count < limit) {
370888ebe2cfSJose Abreu 		unsigned int buf1_len = 0, buf2_len = 0;
3709ec222003SJose Abreu 		enum pkt_hash_types hash_type;
37102af6106aSJose Abreu 		struct stmmac_rx_buffer *buf;
37112af6106aSJose Abreu 		struct dma_desc *np, *p;
3712ec222003SJose Abreu 		int entry;
3713ec222003SJose Abreu 		u32 hash;
37147ac6653aSJeff Kirsher 
3715ec222003SJose Abreu 		if (!count && rx_q->state_saved) {
3716ec222003SJose Abreu 			skb = rx_q->state.skb;
3717ec222003SJose Abreu 			error = rx_q->state.error;
3718ec222003SJose Abreu 			len = rx_q->state.len;
3719ec222003SJose Abreu 		} else {
3720ec222003SJose Abreu 			rx_q->state_saved = false;
3721ec222003SJose Abreu 			skb = NULL;
3722ec222003SJose Abreu 			error = 0;
3723ec222003SJose Abreu 			len = 0;
3724ec222003SJose Abreu 		}
3725ec222003SJose Abreu 
3726ec222003SJose Abreu 		if (count >= limit)
3727ec222003SJose Abreu 			break;
3728ec222003SJose Abreu 
3729ec222003SJose Abreu read_again:
373088ebe2cfSJose Abreu 		buf1_len = 0;
373188ebe2cfSJose Abreu 		buf2_len = 0;
373207b39753SAaro Koskinen 		entry = next_entry;
37332af6106aSJose Abreu 		buf = &rx_q->buf_pool[entry];
373407b39753SAaro Koskinen 
3735c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
373654139cf3SJoao Pinto 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
3737c24602efSGiuseppe CAVALLARO 		else
373854139cf3SJoao Pinto 			p = rx_q->dma_rx + entry;
3739c24602efSGiuseppe CAVALLARO 
3740c1fa3212SFabrice Gasnier 		/* read the status of the incoming frame */
374142de047dSJose Abreu 		status = stmmac_rx_status(priv, &priv->dev->stats,
3742c1fa3212SFabrice Gasnier 				&priv->xstats, p);
3743c1fa3212SFabrice Gasnier 		/* check if managed by the DMA otherwise go ahead */
3744c1fa3212SFabrice Gasnier 		if (unlikely(status & dma_own))
37457ac6653aSJeff Kirsher 			break;
37467ac6653aSJeff Kirsher 
374754139cf3SJoao Pinto 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE);
374854139cf3SJoao Pinto 		next_entry = rx_q->cur_rx;
3749e3ad57c9SGiuseppe Cavallaro 
3750c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
375154139cf3SJoao Pinto 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
3752c24602efSGiuseppe CAVALLARO 		else
375354139cf3SJoao Pinto 			np = rx_q->dma_rx + next_entry;
3754ba1ffd74SGiuseppe CAVALLARO 
3755ba1ffd74SGiuseppe CAVALLARO 		prefetch(np);
37567ac6653aSJeff Kirsher 
375742de047dSJose Abreu 		if (priv->extend_desc)
375842de047dSJose Abreu 			stmmac_rx_extended_status(priv, &priv->dev->stats,
375942de047dSJose Abreu 					&priv->xstats, rx_q->dma_erx + entry);
3760891434b1SRayagond Kokatanur 		if (unlikely(status == discard_frame)) {
37612af6106aSJose Abreu 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
37622af6106aSJose Abreu 			buf->page = NULL;
3763ec222003SJose Abreu 			error = 1;
37640b273ca4SJose Abreu 			if (!priv->hwts_rx_en)
37650b273ca4SJose Abreu 				priv->dev->stats.rx_errors++;
3766ec222003SJose Abreu 		}
3767f748be53SAlexandre TORGUE 
3768ec222003SJose Abreu 		if (unlikely(error && (status & rx_not_ls)))
3769ec222003SJose Abreu 			goto read_again;
3770ec222003SJose Abreu 		if (unlikely(error)) {
3771ec222003SJose Abreu 			dev_kfree_skb(skb);
377288ebe2cfSJose Abreu 			skb = NULL;
3773cda4985aSJose Abreu 			count++;
377407b39753SAaro Koskinen 			continue;
3775e527c4a7SGiuseppe CAVALLARO 		}
3776e527c4a7SGiuseppe CAVALLARO 
3777ec222003SJose Abreu 		/* Buffer is good. Go on. */
3778ec222003SJose Abreu 
377988ebe2cfSJose Abreu 		prefetch(page_address(buf->page));
378088ebe2cfSJose Abreu 		if (buf->sec_page)
378188ebe2cfSJose Abreu 			prefetch(page_address(buf->sec_page));
378288ebe2cfSJose Abreu 
378388ebe2cfSJose Abreu 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
378488ebe2cfSJose Abreu 		len += buf1_len;
378588ebe2cfSJose Abreu 		buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
378688ebe2cfSJose Abreu 		len += buf2_len;
3787ec222003SJose Abreu 
37887ac6653aSJeff Kirsher 		/* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
3789ceb69499SGiuseppe CAVALLARO 		 * Type frames (LLC/LLC-SNAP)
3790565020aaSJose Abreu 		 *
3791565020aaSJose Abreu 		 * llc_snap is never checked in GMAC >= 4, so this ACS
3792565020aaSJose Abreu 		 * feature is always disabled and packets need to be
3793565020aaSJose Abreu 		 * stripped manually.
3794ceb69499SGiuseppe CAVALLARO 		 */
379593b5dce4SJose Abreu 		if (likely(!(status & rx_not_ls)) &&
379693b5dce4SJose Abreu 		    (likely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
379793b5dce4SJose Abreu 		     unlikely(status != llc_snap))) {
379888ebe2cfSJose Abreu 			if (buf2_len)
379988ebe2cfSJose Abreu 				buf2_len -= ETH_FCS_LEN;
380088ebe2cfSJose Abreu 			else
380188ebe2cfSJose Abreu 				buf1_len -= ETH_FCS_LEN;
380288ebe2cfSJose Abreu 
3803ec222003SJose Abreu 			len -= ETH_FCS_LEN;
380483d7af64SGiuseppe CAVALLARO 		}
380522ad3838SGiuseppe Cavallaro 
3806ec222003SJose Abreu 		if (!skb) {
380788ebe2cfSJose Abreu 			skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
3808ec222003SJose Abreu 			if (!skb) {
380922ad3838SGiuseppe Cavallaro 				priv->dev->stats.rx_dropped++;
3810cda4985aSJose Abreu 				count++;
381188ebe2cfSJose Abreu 				goto drain_data;
381222ad3838SGiuseppe Cavallaro 			}
381322ad3838SGiuseppe Cavallaro 
381488ebe2cfSJose Abreu 			dma_sync_single_for_cpu(priv->device, buf->addr,
381588ebe2cfSJose Abreu 						buf1_len, DMA_FROM_DEVICE);
38162af6106aSJose Abreu 			skb_copy_to_linear_data(skb, page_address(buf->page),
381788ebe2cfSJose Abreu 						buf1_len);
381888ebe2cfSJose Abreu 			skb_put(skb, buf1_len);
381922ad3838SGiuseppe Cavallaro 
3820ec222003SJose Abreu 			/* Data payload copied into SKB, page ready for recycle */
3821ec222003SJose Abreu 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
3822ec222003SJose Abreu 			buf->page = NULL;
382388ebe2cfSJose Abreu 		} else if (buf1_len) {
3824ec222003SJose Abreu 			dma_sync_single_for_cpu(priv->device, buf->addr,
382588ebe2cfSJose Abreu 						buf1_len, DMA_FROM_DEVICE);
3826ec222003SJose Abreu 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
382788ebe2cfSJose Abreu 					buf->page, 0, buf1_len,
3828ec222003SJose Abreu 					priv->dma_buf_sz);
3829ec222003SJose Abreu 
3830ec222003SJose Abreu 			/* Data payload appended into SKB */
3831ec222003SJose Abreu 			page_pool_release_page(rx_q->page_pool, buf->page);
3832ec222003SJose Abreu 			buf->page = NULL;
38337ac6653aSJeff Kirsher 		}
383483d7af64SGiuseppe CAVALLARO 
383588ebe2cfSJose Abreu 		if (buf2_len) {
383667afd6d1SJose Abreu 			dma_sync_single_for_cpu(priv->device, buf->sec_addr,
383788ebe2cfSJose Abreu 						buf2_len, DMA_FROM_DEVICE);
383867afd6d1SJose Abreu 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
383988ebe2cfSJose Abreu 					buf->sec_page, 0, buf2_len,
384067afd6d1SJose Abreu 					priv->dma_buf_sz);
384167afd6d1SJose Abreu 
384267afd6d1SJose Abreu 			/* Data payload appended into SKB */
384367afd6d1SJose Abreu 			page_pool_release_page(rx_q->page_pool, buf->sec_page);
384467afd6d1SJose Abreu 			buf->sec_page = NULL;
384567afd6d1SJose Abreu 		}
384667afd6d1SJose Abreu 
384788ebe2cfSJose Abreu drain_data:
3848ec222003SJose Abreu 		if (likely(status & rx_not_ls))
3849ec222003SJose Abreu 			goto read_again;
385088ebe2cfSJose Abreu 		if (!skb)
385188ebe2cfSJose Abreu 			continue;
3852ec222003SJose Abreu 
3853ec222003SJose Abreu 		/* Got entire packet into SKB. Finish it. */
3854ec222003SJose Abreu 
3855ba1ffd74SGiuseppe CAVALLARO 		stmmac_get_rx_hwtstamp(priv, p, np, skb);
3856b9381985SVince Bridgers 		stmmac_rx_vlan(priv->dev, skb);
38577ac6653aSJeff Kirsher 		skb->protocol = eth_type_trans(skb, priv->dev);
38587ac6653aSJeff Kirsher 
3859ceb69499SGiuseppe CAVALLARO 		if (unlikely(!coe))
38607ac6653aSJeff Kirsher 			skb_checksum_none_assert(skb);
386162a2ab93SGiuseppe CAVALLARO 		else
38627ac6653aSJeff Kirsher 			skb->ip_summed = CHECKSUM_UNNECESSARY;
386362a2ab93SGiuseppe CAVALLARO 
386476067459SJose Abreu 		if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
386576067459SJose Abreu 			skb_set_hash(skb, hash, hash_type);
386676067459SJose Abreu 
386776067459SJose Abreu 		skb_record_rx_queue(skb, queue);
38684ccb4585SJose Abreu 		napi_gro_receive(&ch->rx_napi, skb);
386988ebe2cfSJose Abreu 		skb = NULL;
38707ac6653aSJeff Kirsher 
38717ac6653aSJeff Kirsher 		priv->dev->stats.rx_packets++;
3872ec222003SJose Abreu 		priv->dev->stats.rx_bytes += len;
3873cda4985aSJose Abreu 		count++;
38747ac6653aSJeff Kirsher 	}
3875ec222003SJose Abreu 
387688ebe2cfSJose Abreu 	if (status & rx_not_ls || skb) {
3877ec222003SJose Abreu 		rx_q->state_saved = true;
3878ec222003SJose Abreu 		rx_q->state.skb = skb;
3879ec222003SJose Abreu 		rx_q->state.error = error;
3880ec222003SJose Abreu 		rx_q->state.len = len;
38817ac6653aSJeff Kirsher 	}
38827ac6653aSJeff Kirsher 
388354139cf3SJoao Pinto 	stmmac_rx_refill(priv, queue);
38847ac6653aSJeff Kirsher 
38857ac6653aSJeff Kirsher 	priv->xstats.rx_pkt_n += count;
38867ac6653aSJeff Kirsher 
38877ac6653aSJeff Kirsher 	return count;
38887ac6653aSJeff Kirsher }
38897ac6653aSJeff Kirsher 
38904ccb4585SJose Abreu static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
38917ac6653aSJeff Kirsher {
38928fce3331SJose Abreu 	struct stmmac_channel *ch =
38934ccb4585SJose Abreu 		container_of(napi, struct stmmac_channel, rx_napi);
38948fce3331SJose Abreu 	struct stmmac_priv *priv = ch->priv_data;
38958fce3331SJose Abreu 	u32 chan = ch->index;
38964ccb4585SJose Abreu 	int work_done;
38977ac6653aSJeff Kirsher 
38989125cdd1SGiuseppe CAVALLARO 	priv->xstats.napi_poll++;
3899ce736788SJoao Pinto 
39004ccb4585SJose Abreu 	work_done = stmmac_rx(priv, budget, chan);
3901021bd5e3SJose Abreu 	if (work_done < budget && napi_complete_done(napi, work_done)) {
3902021bd5e3SJose Abreu 		unsigned long flags;
3903021bd5e3SJose Abreu 
3904021bd5e3SJose Abreu 		spin_lock_irqsave(&ch->lock, flags);
3905021bd5e3SJose Abreu 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
3906021bd5e3SJose Abreu 		spin_unlock_irqrestore(&ch->lock, flags);
3907021bd5e3SJose Abreu 	}
3908021bd5e3SJose Abreu 
39094ccb4585SJose Abreu 	return work_done;
39104ccb4585SJose Abreu }
3911ce736788SJoao Pinto 
39124ccb4585SJose Abreu static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
39134ccb4585SJose Abreu {
39144ccb4585SJose Abreu 	struct stmmac_channel *ch =
39154ccb4585SJose Abreu 		container_of(napi, struct stmmac_channel, tx_napi);
39164ccb4585SJose Abreu 	struct stmmac_priv *priv = ch->priv_data;
39174ccb4585SJose Abreu 	u32 chan = ch->index;
39184ccb4585SJose Abreu 	int work_done;
39194ccb4585SJose Abreu 
39204ccb4585SJose Abreu 	priv->xstats.napi_poll++;
39214ccb4585SJose Abreu 
39224ccb4585SJose Abreu 	work_done = stmmac_tx_clean(priv, DMA_TX_SIZE, chan);
3923fa0be0a4SJose Abreu 	work_done = min(work_done, budget);
39248fce3331SJose Abreu 
3925021bd5e3SJose Abreu 	if (work_done < budget && napi_complete_done(napi, work_done)) {
3926021bd5e3SJose Abreu 		unsigned long flags;
39274ccb4585SJose Abreu 
3928021bd5e3SJose Abreu 		spin_lock_irqsave(&ch->lock, flags);
3929021bd5e3SJose Abreu 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
3930021bd5e3SJose Abreu 		spin_unlock_irqrestore(&ch->lock, flags);
3931fa0be0a4SJose Abreu 	}
39328fce3331SJose Abreu 
39337ac6653aSJeff Kirsher 	return work_done;
39347ac6653aSJeff Kirsher }
39357ac6653aSJeff Kirsher 
39367ac6653aSJeff Kirsher /**
39377ac6653aSJeff Kirsher  *  stmmac_tx_timeout
39387ac6653aSJeff Kirsher  *  @dev : Pointer to net device structure
39397ac6653aSJeff Kirsher  *  Description: this function is called when a packet transmission fails to
39407284a3f1SGiuseppe CAVALLARO  *   complete within a reasonable time. The driver will mark the error in the
39417ac6653aSJeff Kirsher  *   netdev structure and arrange for the device to be reset to a sane state
39427ac6653aSJeff Kirsher  *   in order to transmit a new packet.
39437ac6653aSJeff Kirsher  */
39440290bd29SMichael S. Tsirkin static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
39457ac6653aSJeff Kirsher {
39467ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
39477ac6653aSJeff Kirsher 
394834877a15SJose Abreu 	stmmac_global_err(priv);
39497ac6653aSJeff Kirsher }
39507ac6653aSJeff Kirsher 
39517ac6653aSJeff Kirsher /**
395201789349SJiri Pirko  *  stmmac_set_rx_mode - entry point for multicast addressing
39537ac6653aSJeff Kirsher  *  @dev : pointer to the device structure
39547ac6653aSJeff Kirsher  *  Description:
39557ac6653aSJeff Kirsher  *  This function is a driver entry point which gets called by the kernel
39567ac6653aSJeff Kirsher  *  whenever multicast addresses must be enabled/disabled.
39577ac6653aSJeff Kirsher  *  Return value:
39587ac6653aSJeff Kirsher  *  void.
39597ac6653aSJeff Kirsher  */
396001789349SJiri Pirko static void stmmac_set_rx_mode(struct net_device *dev)
39617ac6653aSJeff Kirsher {
39627ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
39637ac6653aSJeff Kirsher 
3964c10d4c82SJose Abreu 	stmmac_set_filter(priv, priv->hw, dev);
39657ac6653aSJeff Kirsher }
39667ac6653aSJeff Kirsher 
39677ac6653aSJeff Kirsher /**
39687ac6653aSJeff Kirsher  *  stmmac_change_mtu - entry point to change MTU size for the device.
39697ac6653aSJeff Kirsher  *  @dev : device pointer.
39707ac6653aSJeff Kirsher  *  @new_mtu : the new MTU size for the device.
39717ac6653aSJeff Kirsher  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
39727ac6653aSJeff Kirsher  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
39737ac6653aSJeff Kirsher  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
39747ac6653aSJeff Kirsher  *  Return value:
39757ac6653aSJeff Kirsher  *  0 on success and an appropriate (-)ve integer as defined in errno.h
39767ac6653aSJeff Kirsher  *  file on failure.
39777ac6653aSJeff Kirsher  */
39787ac6653aSJeff Kirsher static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
39797ac6653aSJeff Kirsher {
398038ddc59dSLABBE Corentin 	struct stmmac_priv *priv = netdev_priv(dev);
3981eaf4fac4SJose Abreu 	int txfifosz = priv->plat->tx_fifo_size;
3982eaf4fac4SJose Abreu 
3983eaf4fac4SJose Abreu 	if (txfifosz == 0)
3984eaf4fac4SJose Abreu 		txfifosz = priv->dma_cap.tx_fifo_size;
3985eaf4fac4SJose Abreu 
3986eaf4fac4SJose Abreu 	txfifosz /= priv->plat->tx_queues_to_use;
398738ddc59dSLABBE Corentin 
39887ac6653aSJeff Kirsher 	if (netif_running(dev)) {
398938ddc59dSLABBE Corentin 		netdev_err(priv->dev, "must be stopped to change its MTU\n");
39907ac6653aSJeff Kirsher 		return -EBUSY;
39917ac6653aSJeff Kirsher 	}
39927ac6653aSJeff Kirsher 
3993eaf4fac4SJose Abreu 	new_mtu = STMMAC_ALIGN(new_mtu);
3994eaf4fac4SJose Abreu 
3995eaf4fac4SJose Abreu 	/* If condition true, FIFO is too small or MTU too large */
3996eaf4fac4SJose Abreu 	if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
3997eaf4fac4SJose Abreu 		return -EINVAL;
3998eaf4fac4SJose Abreu 
39997ac6653aSJeff Kirsher 	dev->mtu = new_mtu;
4000f748be53SAlexandre TORGUE 
40017ac6653aSJeff Kirsher 	netdev_update_features(dev);
40027ac6653aSJeff Kirsher 
40037ac6653aSJeff Kirsher 	return 0;
40047ac6653aSJeff Kirsher }
40057ac6653aSJeff Kirsher 
4006c8f44affSMichał Mirosław static netdev_features_t stmmac_fix_features(struct net_device *dev,
4007c8f44affSMichał Mirosław 					     netdev_features_t features)
40087ac6653aSJeff Kirsher {
40097ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
40107ac6653aSJeff Kirsher 
401138912bdbSDeepak SIKRI 	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
40127ac6653aSJeff Kirsher 		features &= ~NETIF_F_RXCSUM;
4013d2afb5bdSGiuseppe CAVALLARO 
40147ac6653aSJeff Kirsher 	if (!priv->plat->tx_coe)
4015a188222bSTom Herbert 		features &= ~NETIF_F_CSUM_MASK;
40167ac6653aSJeff Kirsher 
40177ac6653aSJeff Kirsher 	/* Some GMAC devices have a bugged Jumbo frame support that
40187ac6653aSJeff Kirsher 	 * needs to have the Tx COE disabled for oversized frames
40197ac6653aSJeff Kirsher 	 * (due to limited buffer sizes). In this case we disable
4020ceb69499SGiuseppe CAVALLARO 	 * the TX csum insertion in the TDES and not use SF.
4021ceb69499SGiuseppe CAVALLARO 	 */
40227ac6653aSJeff Kirsher 	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
4023a188222bSTom Herbert 		features &= ~NETIF_F_CSUM_MASK;
40247ac6653aSJeff Kirsher 
4025f748be53SAlexandre TORGUE 	/* Disable tso if asked by ethtool */
4026f748be53SAlexandre TORGUE 	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
4027f748be53SAlexandre TORGUE 		if (features & NETIF_F_TSO)
4028f748be53SAlexandre TORGUE 			priv->tso = true;
4029f748be53SAlexandre TORGUE 		else
4030f748be53SAlexandre TORGUE 			priv->tso = false;
4031f748be53SAlexandre TORGUE 	}
4032f748be53SAlexandre TORGUE 
40337ac6653aSJeff Kirsher 	return features;
40347ac6653aSJeff Kirsher }
40357ac6653aSJeff Kirsher 
4036d2afb5bdSGiuseppe CAVALLARO static int stmmac_set_features(struct net_device *netdev,
4037d2afb5bdSGiuseppe CAVALLARO 			       netdev_features_t features)
4038d2afb5bdSGiuseppe CAVALLARO {
4039d2afb5bdSGiuseppe CAVALLARO 	struct stmmac_priv *priv = netdev_priv(netdev);
404067afd6d1SJose Abreu 	bool sph_en;
404167afd6d1SJose Abreu 	u32 chan;
4042d2afb5bdSGiuseppe CAVALLARO 
4043d2afb5bdSGiuseppe CAVALLARO 	/* Keep the COE Type in case of csum is supporting */
4044d2afb5bdSGiuseppe CAVALLARO 	if (features & NETIF_F_RXCSUM)
4045d2afb5bdSGiuseppe CAVALLARO 		priv->hw->rx_csum = priv->plat->rx_coe;
4046d2afb5bdSGiuseppe CAVALLARO 	else
4047d2afb5bdSGiuseppe CAVALLARO 		priv->hw->rx_csum = 0;
4048d2afb5bdSGiuseppe CAVALLARO 	/* No check needed because rx_coe has been set before and it will be
4049d2afb5bdSGiuseppe CAVALLARO 	 * fixed in case of issue.
4050d2afb5bdSGiuseppe CAVALLARO 	 */
4051c10d4c82SJose Abreu 	stmmac_rx_ipc(priv, priv->hw);
4052d2afb5bdSGiuseppe CAVALLARO 
405367afd6d1SJose Abreu 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
405467afd6d1SJose Abreu 	for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
405567afd6d1SJose Abreu 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
405667afd6d1SJose Abreu 
4057d2afb5bdSGiuseppe CAVALLARO 	return 0;
4058d2afb5bdSGiuseppe CAVALLARO }
4059d2afb5bdSGiuseppe CAVALLARO 
406032ceabcaSGiuseppe CAVALLARO /**
406132ceabcaSGiuseppe CAVALLARO  *  stmmac_interrupt - main ISR
406232ceabcaSGiuseppe CAVALLARO  *  @irq: interrupt number.
4063f42234ffSMaxim Petrov  *  @dev_id: to pass the net device pointer (must be valid).
406432ceabcaSGiuseppe CAVALLARO  *  Description: this is the main driver interrupt service routine.
4065732fdf0eSGiuseppe CAVALLARO  *  It can call:
4066732fdf0eSGiuseppe CAVALLARO  *  o DMA service routine (to manage incoming frame reception and transmission
4067732fdf0eSGiuseppe CAVALLARO  *    status)
4068732fdf0eSGiuseppe CAVALLARO  *  o Core interrupts to manage: remote wake-up, management counter, LPI
406932ceabcaSGiuseppe CAVALLARO  *    interrupts.
407032ceabcaSGiuseppe CAVALLARO  */
40717ac6653aSJeff Kirsher static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
40727ac6653aSJeff Kirsher {
40737ac6653aSJeff Kirsher 	struct net_device *dev = (struct net_device *)dev_id;
40747ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
40757bac4e1eSJoao Pinto 	u32 rx_cnt = priv->plat->rx_queues_to_use;
40767bac4e1eSJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
40777bac4e1eSJoao Pinto 	u32 queues_count;
40787bac4e1eSJoao Pinto 	u32 queue;
40797d9e6c5aSJose Abreu 	bool xmac;
40807bac4e1eSJoao Pinto 
40817d9e6c5aSJose Abreu 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
40827bac4e1eSJoao Pinto 	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
40837ac6653aSJeff Kirsher 
408489f7f2cfSSrinivas Kandagatla 	if (priv->irq_wake)
408589f7f2cfSSrinivas Kandagatla 		pm_wakeup_event(priv->device, 0);
408689f7f2cfSSrinivas Kandagatla 
408734877a15SJose Abreu 	/* Check if adapter is up */
408834877a15SJose Abreu 	if (test_bit(STMMAC_DOWN, &priv->state))
408934877a15SJose Abreu 		return IRQ_HANDLED;
40908bf993a5SJose Abreu 	/* Check if a fatal error happened */
40918bf993a5SJose Abreu 	if (stmmac_safety_feat_interrupt(priv))
40928bf993a5SJose Abreu 		return IRQ_HANDLED;
409334877a15SJose Abreu 
40947ac6653aSJeff Kirsher 	/* To handle GMAC own interrupts */
40957d9e6c5aSJose Abreu 	if ((priv->plat->has_gmac) || xmac) {
4096c10d4c82SJose Abreu 		int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
409761fac60aSJose Abreu 		int mtl_status;
40988f71a88dSJoao Pinto 
4099d765955dSGiuseppe CAVALLARO 		if (unlikely(status)) {
4100d765955dSGiuseppe CAVALLARO 			/* For LPI we need to save the tx status */
41010982a0f6SGiuseppe CAVALLARO 			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
4102d765955dSGiuseppe CAVALLARO 				priv->tx_path_in_lpi_mode = true;
41030982a0f6SGiuseppe CAVALLARO 			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
4104d765955dSGiuseppe CAVALLARO 				priv->tx_path_in_lpi_mode = false;
41057bac4e1eSJoao Pinto 		}
41067bac4e1eSJoao Pinto 
41077bac4e1eSJoao Pinto 		for (queue = 0; queue < queues_count; queue++) {
410861fac60aSJose Abreu 			struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
410954139cf3SJoao Pinto 
411061fac60aSJose Abreu 			mtl_status = stmmac_host_mtl_irq_status(priv, priv->hw,
411161fac60aSJose Abreu 								queue);
411261fac60aSJose Abreu 			if (mtl_status != -EINVAL)
411361fac60aSJose Abreu 				status |= mtl_status;
41147bac4e1eSJoao Pinto 
4115a4e887faSJose Abreu 			if (status & CORE_IRQ_MTL_RX_OVERFLOW)
411661fac60aSJose Abreu 				stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
411754139cf3SJoao Pinto 						       rx_q->rx_tail_addr,
41187bac4e1eSJoao Pinto 						       queue);
41197bac4e1eSJoao Pinto 		}
412070523e63SGiuseppe CAVALLARO 
412170523e63SGiuseppe CAVALLARO 		/* PCS link status */
41223fe5cadbSGiuseppe CAVALLARO 		if (priv->hw->pcs) {
412370523e63SGiuseppe CAVALLARO 			if (priv->xstats.pcs_link)
412470523e63SGiuseppe CAVALLARO 				netif_carrier_on(dev);
412570523e63SGiuseppe CAVALLARO 			else
412670523e63SGiuseppe CAVALLARO 				netif_carrier_off(dev);
412770523e63SGiuseppe CAVALLARO 		}
4128d765955dSGiuseppe CAVALLARO 	}
4129d765955dSGiuseppe CAVALLARO 
4130d765955dSGiuseppe CAVALLARO 	/* To handle DMA interrupts */
41317ac6653aSJeff Kirsher 	stmmac_dma_interrupt(priv);
41327ac6653aSJeff Kirsher 
41337ac6653aSJeff Kirsher 	return IRQ_HANDLED;
41347ac6653aSJeff Kirsher }
41357ac6653aSJeff Kirsher 
41367ac6653aSJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER
41377ac6653aSJeff Kirsher /* Polling receive - used by NETCONSOLE and other diagnostic tools
4138ceb69499SGiuseppe CAVALLARO  * to allow network I/O with interrupts disabled.
4139ceb69499SGiuseppe CAVALLARO  */
41407ac6653aSJeff Kirsher static void stmmac_poll_controller(struct net_device *dev)
41417ac6653aSJeff Kirsher {
41427ac6653aSJeff Kirsher 	disable_irq(dev->irq);
41437ac6653aSJeff Kirsher 	stmmac_interrupt(dev->irq, dev);
41447ac6653aSJeff Kirsher 	enable_irq(dev->irq);
41457ac6653aSJeff Kirsher }
41467ac6653aSJeff Kirsher #endif
41477ac6653aSJeff Kirsher 
41487ac6653aSJeff Kirsher /**
41497ac6653aSJeff Kirsher  *  stmmac_ioctl - Entry point for the Ioctl
41507ac6653aSJeff Kirsher  *  @dev: Device pointer.
41517ac6653aSJeff Kirsher  *  @rq: An IOCTL specefic structure, that can contain a pointer to
41527ac6653aSJeff Kirsher  *  a proprietary structure used to pass information to the driver.
41537ac6653aSJeff Kirsher  *  @cmd: IOCTL command
41547ac6653aSJeff Kirsher  *  Description:
415532ceabcaSGiuseppe CAVALLARO  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
41567ac6653aSJeff Kirsher  */
41577ac6653aSJeff Kirsher static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
41587ac6653aSJeff Kirsher {
415974371272SJose Abreu 	struct stmmac_priv *priv = netdev_priv (dev);
4160891434b1SRayagond Kokatanur 	int ret = -EOPNOTSUPP;
41617ac6653aSJeff Kirsher 
41627ac6653aSJeff Kirsher 	if (!netif_running(dev))
41637ac6653aSJeff Kirsher 		return -EINVAL;
41647ac6653aSJeff Kirsher 
4165891434b1SRayagond Kokatanur 	switch (cmd) {
4166891434b1SRayagond Kokatanur 	case SIOCGMIIPHY:
4167891434b1SRayagond Kokatanur 	case SIOCGMIIREG:
4168891434b1SRayagond Kokatanur 	case SIOCSMIIREG:
416974371272SJose Abreu 		ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
4170891434b1SRayagond Kokatanur 		break;
4171891434b1SRayagond Kokatanur 	case SIOCSHWTSTAMP:
4172d6228b7cSArtem Panfilov 		ret = stmmac_hwtstamp_set(dev, rq);
4173d6228b7cSArtem Panfilov 		break;
4174d6228b7cSArtem Panfilov 	case SIOCGHWTSTAMP:
4175d6228b7cSArtem Panfilov 		ret = stmmac_hwtstamp_get(dev, rq);
4176891434b1SRayagond Kokatanur 		break;
4177891434b1SRayagond Kokatanur 	default:
4178891434b1SRayagond Kokatanur 		break;
4179891434b1SRayagond Kokatanur 	}
41807ac6653aSJeff Kirsher 
41817ac6653aSJeff Kirsher 	return ret;
41827ac6653aSJeff Kirsher }
41837ac6653aSJeff Kirsher 
41844dbbe8ddSJose Abreu static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
41854dbbe8ddSJose Abreu 				    void *cb_priv)
41864dbbe8ddSJose Abreu {
41874dbbe8ddSJose Abreu 	struct stmmac_priv *priv = cb_priv;
41884dbbe8ddSJose Abreu 	int ret = -EOPNOTSUPP;
41894dbbe8ddSJose Abreu 
4190425eabddSJose Abreu 	if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
4191425eabddSJose Abreu 		return ret;
4192425eabddSJose Abreu 
41934dbbe8ddSJose Abreu 	stmmac_disable_all_queues(priv);
41944dbbe8ddSJose Abreu 
41954dbbe8ddSJose Abreu 	switch (type) {
41964dbbe8ddSJose Abreu 	case TC_SETUP_CLSU32:
41974dbbe8ddSJose Abreu 		ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
41984dbbe8ddSJose Abreu 		break;
4199425eabddSJose Abreu 	case TC_SETUP_CLSFLOWER:
4200425eabddSJose Abreu 		ret = stmmac_tc_setup_cls(priv, priv, type_data);
4201425eabddSJose Abreu 		break;
42024dbbe8ddSJose Abreu 	default:
42034dbbe8ddSJose Abreu 		break;
42044dbbe8ddSJose Abreu 	}
42054dbbe8ddSJose Abreu 
42064dbbe8ddSJose Abreu 	stmmac_enable_all_queues(priv);
42074dbbe8ddSJose Abreu 	return ret;
42084dbbe8ddSJose Abreu }
42094dbbe8ddSJose Abreu 
4210955bcb6eSPablo Neira Ayuso static LIST_HEAD(stmmac_block_cb_list);
4211955bcb6eSPablo Neira Ayuso 
42124dbbe8ddSJose Abreu static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
42134dbbe8ddSJose Abreu 			   void *type_data)
42144dbbe8ddSJose Abreu {
42154dbbe8ddSJose Abreu 	struct stmmac_priv *priv = netdev_priv(ndev);
42164dbbe8ddSJose Abreu 
42174dbbe8ddSJose Abreu 	switch (type) {
42184dbbe8ddSJose Abreu 	case TC_SETUP_BLOCK:
4219955bcb6eSPablo Neira Ayuso 		return flow_block_cb_setup_simple(type_data,
4220955bcb6eSPablo Neira Ayuso 						  &stmmac_block_cb_list,
42214e95bc26SPablo Neira Ayuso 						  stmmac_setup_tc_block_cb,
42224e95bc26SPablo Neira Ayuso 						  priv, priv, true);
42231f705bc6SJose Abreu 	case TC_SETUP_QDISC_CBS:
42241f705bc6SJose Abreu 		return stmmac_tc_setup_cbs(priv, priv, type_data);
4225b60189e0SJose Abreu 	case TC_SETUP_QDISC_TAPRIO:
4226b60189e0SJose Abreu 		return stmmac_tc_setup_taprio(priv, priv, type_data);
4227430b383cSJose Abreu 	case TC_SETUP_QDISC_ETF:
4228430b383cSJose Abreu 		return stmmac_tc_setup_etf(priv, priv, type_data);
42294dbbe8ddSJose Abreu 	default:
42304dbbe8ddSJose Abreu 		return -EOPNOTSUPP;
42314dbbe8ddSJose Abreu 	}
42324dbbe8ddSJose Abreu }
42334dbbe8ddSJose Abreu 
42344993e5b3SJose Abreu static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
42354993e5b3SJose Abreu 			       struct net_device *sb_dev)
42364993e5b3SJose Abreu {
4237b7766206SJose Abreu 	int gso = skb_shinfo(skb)->gso_type;
4238b7766206SJose Abreu 
4239b7766206SJose Abreu 	if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
42404993e5b3SJose Abreu 		/*
4241b7766206SJose Abreu 		 * There is no way to determine the number of TSO/USO
42424993e5b3SJose Abreu 		 * capable Queues. Let's use always the Queue 0
4243b7766206SJose Abreu 		 * because if TSO/USO is supported then at least this
42444993e5b3SJose Abreu 		 * one will be capable.
42454993e5b3SJose Abreu 		 */
42464993e5b3SJose Abreu 		return 0;
42474993e5b3SJose Abreu 	}
42484993e5b3SJose Abreu 
42494993e5b3SJose Abreu 	return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
42504993e5b3SJose Abreu }
42514993e5b3SJose Abreu 
4252a830405eSBhadram Varka static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
4253a830405eSBhadram Varka {
4254a830405eSBhadram Varka 	struct stmmac_priv *priv = netdev_priv(ndev);
4255a830405eSBhadram Varka 	int ret = 0;
4256a830405eSBhadram Varka 
4257a830405eSBhadram Varka 	ret = eth_mac_addr(ndev, addr);
4258a830405eSBhadram Varka 	if (ret)
4259a830405eSBhadram Varka 		return ret;
4260a830405eSBhadram Varka 
4261c10d4c82SJose Abreu 	stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
4262a830405eSBhadram Varka 
4263a830405eSBhadram Varka 	return ret;
4264a830405eSBhadram Varka }
4265a830405eSBhadram Varka 
426650fb4f74SGiuseppe CAVALLARO #ifdef CONFIG_DEBUG_FS
42677ac29055SGiuseppe CAVALLARO static struct dentry *stmmac_fs_dir;
42687ac29055SGiuseppe CAVALLARO 
4269c24602efSGiuseppe CAVALLARO static void sysfs_display_ring(void *head, int size, int extend_desc,
4270c24602efSGiuseppe CAVALLARO 			       struct seq_file *seq)
42717ac29055SGiuseppe CAVALLARO {
42727ac29055SGiuseppe CAVALLARO 	int i;
4273c24602efSGiuseppe CAVALLARO 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
4274c24602efSGiuseppe CAVALLARO 	struct dma_desc *p = (struct dma_desc *)head;
42757ac29055SGiuseppe CAVALLARO 
4276c24602efSGiuseppe CAVALLARO 	for (i = 0; i < size; i++) {
4277c24602efSGiuseppe CAVALLARO 		if (extend_desc) {
4278c24602efSGiuseppe CAVALLARO 			seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
4279c24602efSGiuseppe CAVALLARO 				   i, (unsigned int)virt_to_phys(ep),
4280f8be0d78SMichael Weiser 				   le32_to_cpu(ep->basic.des0),
4281f8be0d78SMichael Weiser 				   le32_to_cpu(ep->basic.des1),
4282f8be0d78SMichael Weiser 				   le32_to_cpu(ep->basic.des2),
4283f8be0d78SMichael Weiser 				   le32_to_cpu(ep->basic.des3));
4284c24602efSGiuseppe CAVALLARO 			ep++;
4285c24602efSGiuseppe CAVALLARO 		} else {
4286c24602efSGiuseppe CAVALLARO 			seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
428766c25f6eSNiklas Cassel 				   i, (unsigned int)virt_to_phys(p),
4288f8be0d78SMichael Weiser 				   le32_to_cpu(p->des0), le32_to_cpu(p->des1),
4289f8be0d78SMichael Weiser 				   le32_to_cpu(p->des2), le32_to_cpu(p->des3));
4290c24602efSGiuseppe CAVALLARO 			p++;
4291c24602efSGiuseppe CAVALLARO 		}
42927ac29055SGiuseppe CAVALLARO 		seq_printf(seq, "\n");
42937ac29055SGiuseppe CAVALLARO 	}
4294c24602efSGiuseppe CAVALLARO }
42957ac29055SGiuseppe CAVALLARO 
4296fb0d9c63SYangtao Li static int stmmac_rings_status_show(struct seq_file *seq, void *v)
4297c24602efSGiuseppe CAVALLARO {
4298c24602efSGiuseppe CAVALLARO 	struct net_device *dev = seq->private;
4299c24602efSGiuseppe CAVALLARO 	struct stmmac_priv *priv = netdev_priv(dev);
430054139cf3SJoao Pinto 	u32 rx_count = priv->plat->rx_queues_to_use;
4301ce736788SJoao Pinto 	u32 tx_count = priv->plat->tx_queues_to_use;
430254139cf3SJoao Pinto 	u32 queue;
430354139cf3SJoao Pinto 
43045f2b8b62SThierry Reding 	if ((dev->flags & IFF_UP) == 0)
43055f2b8b62SThierry Reding 		return 0;
43065f2b8b62SThierry Reding 
430754139cf3SJoao Pinto 	for (queue = 0; queue < rx_count; queue++) {
430854139cf3SJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
430954139cf3SJoao Pinto 
431054139cf3SJoao Pinto 		seq_printf(seq, "RX Queue %d:\n", queue);
43117ac29055SGiuseppe CAVALLARO 
4312c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc) {
431354139cf3SJoao Pinto 			seq_printf(seq, "Extended descriptor ring:\n");
431454139cf3SJoao Pinto 			sysfs_display_ring((void *)rx_q->dma_erx,
431554139cf3SJoao Pinto 					   DMA_RX_SIZE, 1, seq);
431654139cf3SJoao Pinto 		} else {
431754139cf3SJoao Pinto 			seq_printf(seq, "Descriptor ring:\n");
431854139cf3SJoao Pinto 			sysfs_display_ring((void *)rx_q->dma_rx,
431954139cf3SJoao Pinto 					   DMA_RX_SIZE, 0, seq);
432054139cf3SJoao Pinto 		}
432154139cf3SJoao Pinto 	}
432254139cf3SJoao Pinto 
4323ce736788SJoao Pinto 	for (queue = 0; queue < tx_count; queue++) {
4324ce736788SJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
4325ce736788SJoao Pinto 
4326ce736788SJoao Pinto 		seq_printf(seq, "TX Queue %d:\n", queue);
4327ce736788SJoao Pinto 
432854139cf3SJoao Pinto 		if (priv->extend_desc) {
4329ce736788SJoao Pinto 			seq_printf(seq, "Extended descriptor ring:\n");
4330ce736788SJoao Pinto 			sysfs_display_ring((void *)tx_q->dma_etx,
4331ce736788SJoao Pinto 					   DMA_TX_SIZE, 1, seq);
4332579a25a8SJose Abreu 		} else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
4333ce736788SJoao Pinto 			seq_printf(seq, "Descriptor ring:\n");
4334ce736788SJoao Pinto 			sysfs_display_ring((void *)tx_q->dma_tx,
4335ce736788SJoao Pinto 					   DMA_TX_SIZE, 0, seq);
4336ce736788SJoao Pinto 		}
43377ac29055SGiuseppe CAVALLARO 	}
43387ac29055SGiuseppe CAVALLARO 
43397ac29055SGiuseppe CAVALLARO 	return 0;
43407ac29055SGiuseppe CAVALLARO }
4341fb0d9c63SYangtao Li DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
43427ac29055SGiuseppe CAVALLARO 
4343fb0d9c63SYangtao Li static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
4344e7434821SGiuseppe CAVALLARO {
4345e7434821SGiuseppe CAVALLARO 	struct net_device *dev = seq->private;
4346e7434821SGiuseppe CAVALLARO 	struct stmmac_priv *priv = netdev_priv(dev);
4347e7434821SGiuseppe CAVALLARO 
434819e30c14SGiuseppe CAVALLARO 	if (!priv->hw_cap_support) {
4349e7434821SGiuseppe CAVALLARO 		seq_printf(seq, "DMA HW features not supported\n");
4350e7434821SGiuseppe CAVALLARO 		return 0;
4351e7434821SGiuseppe CAVALLARO 	}
4352e7434821SGiuseppe CAVALLARO 
4353e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "==============================\n");
4354e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tDMA HW features\n");
4355e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "==============================\n");
4356e7434821SGiuseppe CAVALLARO 
435722d3efe5SPavel Machek 	seq_printf(seq, "\t10/100 Mbps: %s\n",
4358e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
435922d3efe5SPavel Machek 	seq_printf(seq, "\t1000 Mbps: %s\n",
4360e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
436122d3efe5SPavel Machek 	seq_printf(seq, "\tHalf duplex: %s\n",
4362e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.half_duplex) ? "Y" : "N");
4363e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tHash Filter: %s\n",
4364e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.hash_filter) ? "Y" : "N");
4365e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tMultiple MAC address registers: %s\n",
4366e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.multi_addr) ? "Y" : "N");
43678d45e42bSLABBE Corentin 	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
4368e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.pcs) ? "Y" : "N");
4369e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
4370e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
4371e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tPMT Remote wake up: %s\n",
4372e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
4373e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tPMT Magic Frame: %s\n",
4374e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
4375e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tRMON module: %s\n",
4376e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.rmon) ? "Y" : "N");
4377e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
4378e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.time_stamp) ? "Y" : "N");
4379e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
4380e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
438122d3efe5SPavel Machek 	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
4382e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.eee) ? "Y" : "N");
4383e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
4384e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
4385e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.tx_coe) ? "Y" : "N");
4386f748be53SAlexandre TORGUE 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
4387f748be53SAlexandre TORGUE 		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
4388f748be53SAlexandre TORGUE 			   (priv->dma_cap.rx_coe) ? "Y" : "N");
4389f748be53SAlexandre TORGUE 	} else {
4390e7434821SGiuseppe CAVALLARO 		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
4391e7434821SGiuseppe CAVALLARO 			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
4392e7434821SGiuseppe CAVALLARO 		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
4393e7434821SGiuseppe CAVALLARO 			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
4394f748be53SAlexandre TORGUE 	}
4395e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
4396e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
4397e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
4398e7434821SGiuseppe CAVALLARO 		   priv->dma_cap.number_rx_channel);
4399e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
4400e7434821SGiuseppe CAVALLARO 		   priv->dma_cap.number_tx_channel);
44017d0b447aSJose Abreu 	seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
44027d0b447aSJose Abreu 		   priv->dma_cap.number_rx_queues);
44037d0b447aSJose Abreu 	seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
44047d0b447aSJose Abreu 		   priv->dma_cap.number_tx_queues);
4405e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tEnhanced descriptors: %s\n",
4406e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.enh_desc) ? "Y" : "N");
44077d0b447aSJose Abreu 	seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
44087d0b447aSJose Abreu 	seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
44097d0b447aSJose Abreu 	seq_printf(seq, "\tHash Table Size: %d\n", priv->dma_cap.hash_tb_sz);
44107d0b447aSJose Abreu 	seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
44117d0b447aSJose Abreu 	seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
44127d0b447aSJose Abreu 		   priv->dma_cap.pps_out_num);
44137d0b447aSJose Abreu 	seq_printf(seq, "\tSafety Features: %s\n",
44147d0b447aSJose Abreu 		   priv->dma_cap.asp ? "Y" : "N");
44157d0b447aSJose Abreu 	seq_printf(seq, "\tFlexible RX Parser: %s\n",
44167d0b447aSJose Abreu 		   priv->dma_cap.frpsel ? "Y" : "N");
44177d0b447aSJose Abreu 	seq_printf(seq, "\tEnhanced Addressing: %d\n",
44187d0b447aSJose Abreu 		   priv->dma_cap.addr64);
44197d0b447aSJose Abreu 	seq_printf(seq, "\tReceive Side Scaling: %s\n",
44207d0b447aSJose Abreu 		   priv->dma_cap.rssen ? "Y" : "N");
44217d0b447aSJose Abreu 	seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
44227d0b447aSJose Abreu 		   priv->dma_cap.vlhash ? "Y" : "N");
44237d0b447aSJose Abreu 	seq_printf(seq, "\tSplit Header: %s\n",
44247d0b447aSJose Abreu 		   priv->dma_cap.sphen ? "Y" : "N");
44257d0b447aSJose Abreu 	seq_printf(seq, "\tVLAN TX Insertion: %s\n",
44267d0b447aSJose Abreu 		   priv->dma_cap.vlins ? "Y" : "N");
44277d0b447aSJose Abreu 	seq_printf(seq, "\tDouble VLAN: %s\n",
44287d0b447aSJose Abreu 		   priv->dma_cap.dvlan ? "Y" : "N");
44297d0b447aSJose Abreu 	seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
44307d0b447aSJose Abreu 		   priv->dma_cap.l3l4fnum);
44317d0b447aSJose Abreu 	seq_printf(seq, "\tARP Offloading: %s\n",
44327d0b447aSJose Abreu 		   priv->dma_cap.arpoffsel ? "Y" : "N");
443344e65475SJose Abreu 	seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
443444e65475SJose Abreu 		   priv->dma_cap.estsel ? "Y" : "N");
443544e65475SJose Abreu 	seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
443644e65475SJose Abreu 		   priv->dma_cap.fpesel ? "Y" : "N");
443744e65475SJose Abreu 	seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
443844e65475SJose Abreu 		   priv->dma_cap.tbssel ? "Y" : "N");
4439e7434821SGiuseppe CAVALLARO 	return 0;
4440e7434821SGiuseppe CAVALLARO }
4441fb0d9c63SYangtao Li DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
4442e7434821SGiuseppe CAVALLARO 
4443481a7d15SJiping Ma /* Use network device events to rename debugfs file entries.
4444481a7d15SJiping Ma  */
4445481a7d15SJiping Ma static int stmmac_device_event(struct notifier_block *unused,
4446481a7d15SJiping Ma 			       unsigned long event, void *ptr)
4447481a7d15SJiping Ma {
4448481a7d15SJiping Ma 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4449481a7d15SJiping Ma 	struct stmmac_priv *priv = netdev_priv(dev);
4450481a7d15SJiping Ma 
4451481a7d15SJiping Ma 	if (dev->netdev_ops != &stmmac_netdev_ops)
4452481a7d15SJiping Ma 		goto done;
4453481a7d15SJiping Ma 
4454481a7d15SJiping Ma 	switch (event) {
4455481a7d15SJiping Ma 	case NETDEV_CHANGENAME:
4456481a7d15SJiping Ma 		if (priv->dbgfs_dir)
4457481a7d15SJiping Ma 			priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
4458481a7d15SJiping Ma 							 priv->dbgfs_dir,
4459481a7d15SJiping Ma 							 stmmac_fs_dir,
4460481a7d15SJiping Ma 							 dev->name);
4461481a7d15SJiping Ma 		break;
4462481a7d15SJiping Ma 	}
4463481a7d15SJiping Ma done:
4464481a7d15SJiping Ma 	return NOTIFY_DONE;
4465481a7d15SJiping Ma }
4466481a7d15SJiping Ma 
4467481a7d15SJiping Ma static struct notifier_block stmmac_notifier = {
4468481a7d15SJiping Ma 	.notifier_call = stmmac_device_event,
4469481a7d15SJiping Ma };
4470481a7d15SJiping Ma 
44718d72ab11SGreg Kroah-Hartman static void stmmac_init_fs(struct net_device *dev)
44727ac29055SGiuseppe CAVALLARO {
4473466c5ac8SMathieu Olivari 	struct stmmac_priv *priv = netdev_priv(dev);
44747ac29055SGiuseppe CAVALLARO 
4475474a31e1SAaro Koskinen 	rtnl_lock();
4476474a31e1SAaro Koskinen 
4477466c5ac8SMathieu Olivari 	/* Create per netdev entries */
4478466c5ac8SMathieu Olivari 	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
4479466c5ac8SMathieu Olivari 
44807ac29055SGiuseppe CAVALLARO 	/* Entry to report DMA RX/TX rings */
44818d72ab11SGreg Kroah-Hartman 	debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
44827ac29055SGiuseppe CAVALLARO 			    &stmmac_rings_status_fops);
44837ac29055SGiuseppe CAVALLARO 
4484e7434821SGiuseppe CAVALLARO 	/* Entry to report the DMA HW features */
44858d72ab11SGreg Kroah-Hartman 	debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
44868d72ab11SGreg Kroah-Hartman 			    &stmmac_dma_cap_fops);
4487481a7d15SJiping Ma 
4488474a31e1SAaro Koskinen 	rtnl_unlock();
44897ac29055SGiuseppe CAVALLARO }
44907ac29055SGiuseppe CAVALLARO 
4491466c5ac8SMathieu Olivari static void stmmac_exit_fs(struct net_device *dev)
44927ac29055SGiuseppe CAVALLARO {
4493466c5ac8SMathieu Olivari 	struct stmmac_priv *priv = netdev_priv(dev);
4494466c5ac8SMathieu Olivari 
4495466c5ac8SMathieu Olivari 	debugfs_remove_recursive(priv->dbgfs_dir);
44967ac29055SGiuseppe CAVALLARO }
449750fb4f74SGiuseppe CAVALLARO #endif /* CONFIG_DEBUG_FS */
44987ac29055SGiuseppe CAVALLARO 
44993cd1cfcbSJose Abreu static u32 stmmac_vid_crc32_le(__le16 vid_le)
45003cd1cfcbSJose Abreu {
45013cd1cfcbSJose Abreu 	unsigned char *data = (unsigned char *)&vid_le;
45023cd1cfcbSJose Abreu 	unsigned char data_byte = 0;
45033cd1cfcbSJose Abreu 	u32 crc = ~0x0;
45043cd1cfcbSJose Abreu 	u32 temp = 0;
45053cd1cfcbSJose Abreu 	int i, bits;
45063cd1cfcbSJose Abreu 
45073cd1cfcbSJose Abreu 	bits = get_bitmask_order(VLAN_VID_MASK);
45083cd1cfcbSJose Abreu 	for (i = 0; i < bits; i++) {
45093cd1cfcbSJose Abreu 		if ((i % 8) == 0)
45103cd1cfcbSJose Abreu 			data_byte = data[i / 8];
45113cd1cfcbSJose Abreu 
45123cd1cfcbSJose Abreu 		temp = ((crc & 1) ^ data_byte) & 1;
45133cd1cfcbSJose Abreu 		crc >>= 1;
45143cd1cfcbSJose Abreu 		data_byte >>= 1;
45153cd1cfcbSJose Abreu 
45163cd1cfcbSJose Abreu 		if (temp)
45173cd1cfcbSJose Abreu 			crc ^= 0xedb88320;
45183cd1cfcbSJose Abreu 	}
45193cd1cfcbSJose Abreu 
45203cd1cfcbSJose Abreu 	return crc;
45213cd1cfcbSJose Abreu }
45223cd1cfcbSJose Abreu 
45233cd1cfcbSJose Abreu static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
45243cd1cfcbSJose Abreu {
45253cd1cfcbSJose Abreu 	u32 crc, hash = 0;
4526a24cae70SJose Abreu 	__le16 pmatch = 0;
4527c7ab0b80SJose Abreu 	int count = 0;
4528c7ab0b80SJose Abreu 	u16 vid = 0;
45293cd1cfcbSJose Abreu 
45303cd1cfcbSJose Abreu 	for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
45313cd1cfcbSJose Abreu 		__le16 vid_le = cpu_to_le16(vid);
45323cd1cfcbSJose Abreu 		crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
45333cd1cfcbSJose Abreu 		hash |= (1 << crc);
4534c7ab0b80SJose Abreu 		count++;
45353cd1cfcbSJose Abreu 	}
45363cd1cfcbSJose Abreu 
4537c7ab0b80SJose Abreu 	if (!priv->dma_cap.vlhash) {
4538c7ab0b80SJose Abreu 		if (count > 2) /* VID = 0 always passes filter */
4539c7ab0b80SJose Abreu 			return -EOPNOTSUPP;
4540c7ab0b80SJose Abreu 
4541a24cae70SJose Abreu 		pmatch = cpu_to_le16(vid);
4542c7ab0b80SJose Abreu 		hash = 0;
4543c7ab0b80SJose Abreu 	}
4544c7ab0b80SJose Abreu 
4545a24cae70SJose Abreu 	return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
45463cd1cfcbSJose Abreu }
45473cd1cfcbSJose Abreu 
45483cd1cfcbSJose Abreu static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
45493cd1cfcbSJose Abreu {
45503cd1cfcbSJose Abreu 	struct stmmac_priv *priv = netdev_priv(ndev);
45513cd1cfcbSJose Abreu 	bool is_double = false;
45523cd1cfcbSJose Abreu 	int ret;
45533cd1cfcbSJose Abreu 
45543cd1cfcbSJose Abreu 	if (be16_to_cpu(proto) == ETH_P_8021AD)
45553cd1cfcbSJose Abreu 		is_double = true;
45563cd1cfcbSJose Abreu 
45573cd1cfcbSJose Abreu 	set_bit(vid, priv->active_vlans);
45583cd1cfcbSJose Abreu 	ret = stmmac_vlan_update(priv, is_double);
45593cd1cfcbSJose Abreu 	if (ret) {
45603cd1cfcbSJose Abreu 		clear_bit(vid, priv->active_vlans);
45613cd1cfcbSJose Abreu 		return ret;
45623cd1cfcbSJose Abreu 	}
45633cd1cfcbSJose Abreu 
4564dd6a4998SJose Abreu 	if (priv->hw->num_vlan) {
4565ed64639bSWong Vee Khee 		ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
4566dd6a4998SJose Abreu 		if (ret)
45673cd1cfcbSJose Abreu 			return ret;
45683cd1cfcbSJose Abreu 	}
45693cd1cfcbSJose Abreu 
4570dd6a4998SJose Abreu 	return 0;
4571dd6a4998SJose Abreu }
4572dd6a4998SJose Abreu 
45733cd1cfcbSJose Abreu static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
45743cd1cfcbSJose Abreu {
45753cd1cfcbSJose Abreu 	struct stmmac_priv *priv = netdev_priv(ndev);
45763cd1cfcbSJose Abreu 	bool is_double = false;
4577ed64639bSWong Vee Khee 	int ret;
45783cd1cfcbSJose Abreu 
45793cd1cfcbSJose Abreu 	if (be16_to_cpu(proto) == ETH_P_8021AD)
45803cd1cfcbSJose Abreu 		is_double = true;
45813cd1cfcbSJose Abreu 
45823cd1cfcbSJose Abreu 	clear_bit(vid, priv->active_vlans);
4583dd6a4998SJose Abreu 
4584dd6a4998SJose Abreu 	if (priv->hw->num_vlan) {
4585ed64639bSWong Vee Khee 		ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
4586ed64639bSWong Vee Khee 		if (ret)
4587ed64639bSWong Vee Khee 			return ret;
4588dd6a4998SJose Abreu 	}
4589ed64639bSWong Vee Khee 
45903cd1cfcbSJose Abreu 	return stmmac_vlan_update(priv, is_double);
45913cd1cfcbSJose Abreu }
45923cd1cfcbSJose Abreu 
45937ac6653aSJeff Kirsher static const struct net_device_ops stmmac_netdev_ops = {
45947ac6653aSJeff Kirsher 	.ndo_open = stmmac_open,
45957ac6653aSJeff Kirsher 	.ndo_start_xmit = stmmac_xmit,
45967ac6653aSJeff Kirsher 	.ndo_stop = stmmac_release,
45977ac6653aSJeff Kirsher 	.ndo_change_mtu = stmmac_change_mtu,
45987ac6653aSJeff Kirsher 	.ndo_fix_features = stmmac_fix_features,
4599d2afb5bdSGiuseppe CAVALLARO 	.ndo_set_features = stmmac_set_features,
460001789349SJiri Pirko 	.ndo_set_rx_mode = stmmac_set_rx_mode,
46017ac6653aSJeff Kirsher 	.ndo_tx_timeout = stmmac_tx_timeout,
46027ac6653aSJeff Kirsher 	.ndo_do_ioctl = stmmac_ioctl,
46034dbbe8ddSJose Abreu 	.ndo_setup_tc = stmmac_setup_tc,
46044993e5b3SJose Abreu 	.ndo_select_queue = stmmac_select_queue,
46057ac6653aSJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER
46067ac6653aSJeff Kirsher 	.ndo_poll_controller = stmmac_poll_controller,
46077ac6653aSJeff Kirsher #endif
4608a830405eSBhadram Varka 	.ndo_set_mac_address = stmmac_set_mac_address,
46093cd1cfcbSJose Abreu 	.ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
46103cd1cfcbSJose Abreu 	.ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
46117ac6653aSJeff Kirsher };
46127ac6653aSJeff Kirsher 
461334877a15SJose Abreu static void stmmac_reset_subtask(struct stmmac_priv *priv)
461434877a15SJose Abreu {
461534877a15SJose Abreu 	if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
461634877a15SJose Abreu 		return;
461734877a15SJose Abreu 	if (test_bit(STMMAC_DOWN, &priv->state))
461834877a15SJose Abreu 		return;
461934877a15SJose Abreu 
462034877a15SJose Abreu 	netdev_err(priv->dev, "Reset adapter.\n");
462134877a15SJose Abreu 
462234877a15SJose Abreu 	rtnl_lock();
462334877a15SJose Abreu 	netif_trans_update(priv->dev);
462434877a15SJose Abreu 	while (test_and_set_bit(STMMAC_RESETING, &priv->state))
462534877a15SJose Abreu 		usleep_range(1000, 2000);
462634877a15SJose Abreu 
462734877a15SJose Abreu 	set_bit(STMMAC_DOWN, &priv->state);
462834877a15SJose Abreu 	dev_close(priv->dev);
462900f54e68SPetr Machata 	dev_open(priv->dev, NULL);
463034877a15SJose Abreu 	clear_bit(STMMAC_DOWN, &priv->state);
463134877a15SJose Abreu 	clear_bit(STMMAC_RESETING, &priv->state);
463234877a15SJose Abreu 	rtnl_unlock();
463334877a15SJose Abreu }
463434877a15SJose Abreu 
463534877a15SJose Abreu static void stmmac_service_task(struct work_struct *work)
463634877a15SJose Abreu {
463734877a15SJose Abreu 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
463834877a15SJose Abreu 			service_task);
463934877a15SJose Abreu 
464034877a15SJose Abreu 	stmmac_reset_subtask(priv);
464134877a15SJose Abreu 	clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
464234877a15SJose Abreu }
464334877a15SJose Abreu 
46447ac6653aSJeff Kirsher /**
4645cf3f047bSGiuseppe CAVALLARO  *  stmmac_hw_init - Init the MAC device
464632ceabcaSGiuseppe CAVALLARO  *  @priv: driver private structure
4647732fdf0eSGiuseppe CAVALLARO  *  Description: this function is to configure the MAC device according to
4648732fdf0eSGiuseppe CAVALLARO  *  some platform parameters or the HW capability register. It prepares the
4649732fdf0eSGiuseppe CAVALLARO  *  driver to use either ring or chain modes and to setup either enhanced or
4650732fdf0eSGiuseppe CAVALLARO  *  normal descriptors.
4651cf3f047bSGiuseppe CAVALLARO  */
4652cf3f047bSGiuseppe CAVALLARO static int stmmac_hw_init(struct stmmac_priv *priv)
4653cf3f047bSGiuseppe CAVALLARO {
46545f0456b4SJose Abreu 	int ret;
4655cf3f047bSGiuseppe CAVALLARO 
46569f93ac8dSLABBE Corentin 	/* dwmac-sun8i only work in chain mode */
46579f93ac8dSLABBE Corentin 	if (priv->plat->has_sun8i)
46589f93ac8dSLABBE Corentin 		chain_mode = 1;
46595f0456b4SJose Abreu 	priv->chain_mode = chain_mode;
46609f93ac8dSLABBE Corentin 
46615f0456b4SJose Abreu 	/* Initialize HW Interface */
46625f0456b4SJose Abreu 	ret = stmmac_hwif_init(priv);
46635f0456b4SJose Abreu 	if (ret)
46645f0456b4SJose Abreu 		return ret;
46654a7d666aSGiuseppe CAVALLARO 
4666cf3f047bSGiuseppe CAVALLARO 	/* Get the HW capability (new GMAC newer than 3.50a) */
4667cf3f047bSGiuseppe CAVALLARO 	priv->hw_cap_support = stmmac_get_hw_features(priv);
4668cf3f047bSGiuseppe CAVALLARO 	if (priv->hw_cap_support) {
466938ddc59dSLABBE Corentin 		dev_info(priv->device, "DMA HW capability register supported\n");
4670cf3f047bSGiuseppe CAVALLARO 
4671cf3f047bSGiuseppe CAVALLARO 		/* We can override some gmac/dma configuration fields: e.g.
4672cf3f047bSGiuseppe CAVALLARO 		 * enh_desc, tx_coe (e.g. that are passed through the
4673cf3f047bSGiuseppe CAVALLARO 		 * platform) with the values from the HW capability
4674cf3f047bSGiuseppe CAVALLARO 		 * register (if supported).
4675cf3f047bSGiuseppe CAVALLARO 		 */
4676cf3f047bSGiuseppe CAVALLARO 		priv->plat->enh_desc = priv->dma_cap.enh_desc;
4677cf3f047bSGiuseppe CAVALLARO 		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
46783fe5cadbSGiuseppe CAVALLARO 		priv->hw->pmt = priv->plat->pmt;
4679b8ef7020SBiao Huang 		if (priv->dma_cap.hash_tb_sz) {
4680b8ef7020SBiao Huang 			priv->hw->multicast_filter_bins =
4681b8ef7020SBiao Huang 					(BIT(priv->dma_cap.hash_tb_sz) << 5);
4682b8ef7020SBiao Huang 			priv->hw->mcast_bits_log2 =
4683b8ef7020SBiao Huang 					ilog2(priv->hw->multicast_filter_bins);
4684b8ef7020SBiao Huang 		}
468538912bdbSDeepak SIKRI 
4686a8df35d4SEzequiel Garcia 		/* TXCOE doesn't work in thresh DMA mode */
4687a8df35d4SEzequiel Garcia 		if (priv->plat->force_thresh_dma_mode)
4688a8df35d4SEzequiel Garcia 			priv->plat->tx_coe = 0;
4689a8df35d4SEzequiel Garcia 		else
469038912bdbSDeepak SIKRI 			priv->plat->tx_coe = priv->dma_cap.tx_coe;
4691a8df35d4SEzequiel Garcia 
4692f748be53SAlexandre TORGUE 		/* In case of GMAC4 rx_coe is from HW cap register. */
4693f748be53SAlexandre TORGUE 		priv->plat->rx_coe = priv->dma_cap.rx_coe;
469438912bdbSDeepak SIKRI 
469538912bdbSDeepak SIKRI 		if (priv->dma_cap.rx_coe_type2)
469638912bdbSDeepak SIKRI 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
469738912bdbSDeepak SIKRI 		else if (priv->dma_cap.rx_coe_type1)
469838912bdbSDeepak SIKRI 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
469938912bdbSDeepak SIKRI 
470038ddc59dSLABBE Corentin 	} else {
470138ddc59dSLABBE Corentin 		dev_info(priv->device, "No HW DMA feature register supported\n");
470238ddc59dSLABBE Corentin 	}
4703cf3f047bSGiuseppe CAVALLARO 
4704d2afb5bdSGiuseppe CAVALLARO 	if (priv->plat->rx_coe) {
4705d2afb5bdSGiuseppe CAVALLARO 		priv->hw->rx_csum = priv->plat->rx_coe;
470638ddc59dSLABBE Corentin 		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
4707f748be53SAlexandre TORGUE 		if (priv->synopsys_id < DWMAC_CORE_4_00)
470838ddc59dSLABBE Corentin 			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
4709d2afb5bdSGiuseppe CAVALLARO 	}
4710cf3f047bSGiuseppe CAVALLARO 	if (priv->plat->tx_coe)
471138ddc59dSLABBE Corentin 		dev_info(priv->device, "TX Checksum insertion supported\n");
4712cf3f047bSGiuseppe CAVALLARO 
4713cf3f047bSGiuseppe CAVALLARO 	if (priv->plat->pmt) {
471438ddc59dSLABBE Corentin 		dev_info(priv->device, "Wake-Up On Lan supported\n");
4715cf3f047bSGiuseppe CAVALLARO 		device_set_wakeup_capable(priv->device, 1);
4716cf3f047bSGiuseppe CAVALLARO 	}
4717cf3f047bSGiuseppe CAVALLARO 
4718f748be53SAlexandre TORGUE 	if (priv->dma_cap.tsoen)
471938ddc59dSLABBE Corentin 		dev_info(priv->device, "TSO supported\n");
4720f748be53SAlexandre TORGUE 
47217cfde0afSJose Abreu 	/* Run HW quirks, if any */
47227cfde0afSJose Abreu 	if (priv->hwif_quirks) {
47237cfde0afSJose Abreu 		ret = priv->hwif_quirks(priv);
47247cfde0afSJose Abreu 		if (ret)
47257cfde0afSJose Abreu 			return ret;
47267cfde0afSJose Abreu 	}
47277cfde0afSJose Abreu 
47283b509466SJose Abreu 	/* Rx Watchdog is available in the COREs newer than the 3.40.
47293b509466SJose Abreu 	 * In some case, for example on bugged HW this feature
47303b509466SJose Abreu 	 * has to be disable and this can be done by passing the
47313b509466SJose Abreu 	 * riwt_off field from the platform.
47323b509466SJose Abreu 	 */
47333b509466SJose Abreu 	if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
47343b509466SJose Abreu 	    (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
47353b509466SJose Abreu 		priv->use_riwt = 1;
47363b509466SJose Abreu 		dev_info(priv->device,
47373b509466SJose Abreu 			 "Enable RX Mitigation via HW Watchdog Timer\n");
47383b509466SJose Abreu 	}
47393b509466SJose Abreu 
4740c24602efSGiuseppe CAVALLARO 	return 0;
4741cf3f047bSGiuseppe CAVALLARO }
4742cf3f047bSGiuseppe CAVALLARO 
4743cf3f047bSGiuseppe CAVALLARO /**
4744bfab27a1SGiuseppe CAVALLARO  * stmmac_dvr_probe
4745bfab27a1SGiuseppe CAVALLARO  * @device: device pointer
4746ff3dd78cSGiuseppe CAVALLARO  * @plat_dat: platform data pointer
4747e56788cfSJoachim Eastwood  * @res: stmmac resource pointer
4748bfab27a1SGiuseppe CAVALLARO  * Description: this is the main probe function used to
4749bfab27a1SGiuseppe CAVALLARO  * call the alloc_etherdev, allocate the priv structure.
47509afec6efSAndy Shevchenko  * Return:
475115ffac73SJoachim Eastwood  * returns 0 on success, otherwise errno.
47527ac6653aSJeff Kirsher  */
475315ffac73SJoachim Eastwood int stmmac_dvr_probe(struct device *device,
4754cf3f047bSGiuseppe CAVALLARO 		     struct plat_stmmacenet_data *plat_dat,
4755e56788cfSJoachim Eastwood 		     struct stmmac_resources *res)
47567ac6653aSJeff Kirsher {
4757bfab27a1SGiuseppe CAVALLARO 	struct net_device *ndev = NULL;
4758bfab27a1SGiuseppe CAVALLARO 	struct stmmac_priv *priv;
475976067459SJose Abreu 	u32 queue, rxq, maxq;
476076067459SJose Abreu 	int i, ret = 0;
47617ac6653aSJeff Kirsher 
47629737070cSJisheng Zhang 	ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
47639737070cSJisheng Zhang 				       MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
476441de8d4cSJoe Perches 	if (!ndev)
476515ffac73SJoachim Eastwood 		return -ENOMEM;
47667ac6653aSJeff Kirsher 
4767bfab27a1SGiuseppe CAVALLARO 	SET_NETDEV_DEV(ndev, device);
47687ac6653aSJeff Kirsher 
4769bfab27a1SGiuseppe CAVALLARO 	priv = netdev_priv(ndev);
4770bfab27a1SGiuseppe CAVALLARO 	priv->device = device;
4771bfab27a1SGiuseppe CAVALLARO 	priv->dev = ndev;
4772bfab27a1SGiuseppe CAVALLARO 
4773bfab27a1SGiuseppe CAVALLARO 	stmmac_set_ethtool_ops(ndev);
4774cf3f047bSGiuseppe CAVALLARO 	priv->pause = pause;
4775cf3f047bSGiuseppe CAVALLARO 	priv->plat = plat_dat;
4776e56788cfSJoachim Eastwood 	priv->ioaddr = res->addr;
4777e56788cfSJoachim Eastwood 	priv->dev->base_addr = (unsigned long)res->addr;
4778e56788cfSJoachim Eastwood 
4779e56788cfSJoachim Eastwood 	priv->dev->irq = res->irq;
4780e56788cfSJoachim Eastwood 	priv->wol_irq = res->wol_irq;
4781e56788cfSJoachim Eastwood 	priv->lpi_irq = res->lpi_irq;
4782e56788cfSJoachim Eastwood 
4783a51645f7SPetr Štetiar 	if (!IS_ERR_OR_NULL(res->mac))
4784e56788cfSJoachim Eastwood 		memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
4785bfab27a1SGiuseppe CAVALLARO 
4786a7a62685SJoachim Eastwood 	dev_set_drvdata(device, priv->dev);
4787803f8fc4SJoachim Eastwood 
4788cf3f047bSGiuseppe CAVALLARO 	/* Verify driver arguments */
4789cf3f047bSGiuseppe CAVALLARO 	stmmac_verify_args();
4790cf3f047bSGiuseppe CAVALLARO 
479134877a15SJose Abreu 	/* Allocate workqueue */
479234877a15SJose Abreu 	priv->wq = create_singlethread_workqueue("stmmac_wq");
479334877a15SJose Abreu 	if (!priv->wq) {
479434877a15SJose Abreu 		dev_err(priv->device, "failed to create workqueue\n");
47959737070cSJisheng Zhang 		return -ENOMEM;
479634877a15SJose Abreu 	}
479734877a15SJose Abreu 
479834877a15SJose Abreu 	INIT_WORK(&priv->service_task, stmmac_service_task);
479934877a15SJose Abreu 
4800cf3f047bSGiuseppe CAVALLARO 	/* Override with kernel parameters if supplied XXX CRS XXX
4801ceb69499SGiuseppe CAVALLARO 	 * this needs to have multiple instances
4802ceb69499SGiuseppe CAVALLARO 	 */
4803cf3f047bSGiuseppe CAVALLARO 	if ((phyaddr >= 0) && (phyaddr <= 31))
4804cf3f047bSGiuseppe CAVALLARO 		priv->plat->phy_addr = phyaddr;
4805cf3f047bSGiuseppe CAVALLARO 
480690f522a2SEugeniy Paltsev 	if (priv->plat->stmmac_rst) {
480790f522a2SEugeniy Paltsev 		ret = reset_control_assert(priv->plat->stmmac_rst);
4808f573c0b9Sjpinto 		reset_control_deassert(priv->plat->stmmac_rst);
480990f522a2SEugeniy Paltsev 		/* Some reset controllers have only reset callback instead of
481090f522a2SEugeniy Paltsev 		 * assert + deassert callbacks pair.
481190f522a2SEugeniy Paltsev 		 */
481290f522a2SEugeniy Paltsev 		if (ret == -ENOTSUPP)
481390f522a2SEugeniy Paltsev 			reset_control_reset(priv->plat->stmmac_rst);
481490f522a2SEugeniy Paltsev 	}
4815c5e4ddbdSChen-Yu Tsai 
4816cf3f047bSGiuseppe CAVALLARO 	/* Init MAC and get the capabilities */
4817c24602efSGiuseppe CAVALLARO 	ret = stmmac_hw_init(priv);
4818c24602efSGiuseppe CAVALLARO 	if (ret)
481962866e98SChen-Yu Tsai 		goto error_hw_init;
4820cf3f047bSGiuseppe CAVALLARO 
4821b561af36SVinod Koul 	stmmac_check_ether_addr(priv);
4822b561af36SVinod Koul 
4823c22a3f48SJoao Pinto 	/* Configure real RX and TX queues */
4824c02b7a91SJoao Pinto 	netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
4825c02b7a91SJoao Pinto 	netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
4826c22a3f48SJoao Pinto 
4827cf3f047bSGiuseppe CAVALLARO 	ndev->netdev_ops = &stmmac_netdev_ops;
4828cf3f047bSGiuseppe CAVALLARO 
4829cf3f047bSGiuseppe CAVALLARO 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4830cf3f047bSGiuseppe CAVALLARO 			    NETIF_F_RXCSUM;
4831f748be53SAlexandre TORGUE 
48324dbbe8ddSJose Abreu 	ret = stmmac_tc_init(priv, priv);
48334dbbe8ddSJose Abreu 	if (!ret) {
48344dbbe8ddSJose Abreu 		ndev->hw_features |= NETIF_F_HW_TC;
48354dbbe8ddSJose Abreu 	}
48364dbbe8ddSJose Abreu 
4837f748be53SAlexandre TORGUE 	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
48389edfa7daSNiklas Cassel 		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
4839b7766206SJose Abreu 		if (priv->plat->has_gmac4)
4840b7766206SJose Abreu 			ndev->hw_features |= NETIF_F_GSO_UDP_L4;
4841f748be53SAlexandre TORGUE 		priv->tso = true;
484238ddc59dSLABBE Corentin 		dev_info(priv->device, "TSO feature enabled\n");
4843f748be53SAlexandre TORGUE 	}
4844a993db88SJose Abreu 
484567afd6d1SJose Abreu 	if (priv->dma_cap.sphen) {
484667afd6d1SJose Abreu 		ndev->hw_features |= NETIF_F_GRO;
484767afd6d1SJose Abreu 		priv->sph = true;
484867afd6d1SJose Abreu 		dev_info(priv->device, "SPH feature enabled\n");
484967afd6d1SJose Abreu 	}
485067afd6d1SJose Abreu 
4851a993db88SJose Abreu 	if (priv->dma_cap.addr64) {
4852a993db88SJose Abreu 		ret = dma_set_mask_and_coherent(device,
4853a993db88SJose Abreu 				DMA_BIT_MASK(priv->dma_cap.addr64));
4854a993db88SJose Abreu 		if (!ret) {
4855a993db88SJose Abreu 			dev_info(priv->device, "Using %d bits DMA width\n",
4856a993db88SJose Abreu 				 priv->dma_cap.addr64);
4857968a2978SThierry Reding 
4858968a2978SThierry Reding 			/*
4859968a2978SThierry Reding 			 * If more than 32 bits can be addressed, make sure to
4860968a2978SThierry Reding 			 * enable enhanced addressing mode.
4861968a2978SThierry Reding 			 */
4862968a2978SThierry Reding 			if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
4863968a2978SThierry Reding 				priv->plat->dma_cfg->eame = true;
4864a993db88SJose Abreu 		} else {
4865a993db88SJose Abreu 			ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
4866a993db88SJose Abreu 			if (ret) {
4867a993db88SJose Abreu 				dev_err(priv->device, "Failed to set DMA Mask\n");
4868a993db88SJose Abreu 				goto error_hw_init;
4869a993db88SJose Abreu 			}
4870a993db88SJose Abreu 
4871a993db88SJose Abreu 			priv->dma_cap.addr64 = 32;
4872a993db88SJose Abreu 		}
4873a993db88SJose Abreu 	}
4874a993db88SJose Abreu 
4875bfab27a1SGiuseppe CAVALLARO 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
4876bfab27a1SGiuseppe CAVALLARO 	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
48777ac6653aSJeff Kirsher #ifdef STMMAC_VLAN_TAG_USED
48787ac6653aSJeff Kirsher 	/* Both mac100 and gmac support receive VLAN tag detection */
4879ab188e8fSElad Nachman 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
48803cd1cfcbSJose Abreu 	if (priv->dma_cap.vlhash) {
48813cd1cfcbSJose Abreu 		ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
48823cd1cfcbSJose Abreu 		ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
48833cd1cfcbSJose Abreu 	}
488430d93227SJose Abreu 	if (priv->dma_cap.vlins) {
488530d93227SJose Abreu 		ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
488630d93227SJose Abreu 		if (priv->dma_cap.dvlan)
488730d93227SJose Abreu 			ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
488830d93227SJose Abreu 	}
48897ac6653aSJeff Kirsher #endif
48907ac6653aSJeff Kirsher 	priv->msg_enable = netif_msg_init(debug, default_msg_level);
48917ac6653aSJeff Kirsher 
489276067459SJose Abreu 	/* Initialize RSS */
489376067459SJose Abreu 	rxq = priv->plat->rx_queues_to_use;
489476067459SJose Abreu 	netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
489576067459SJose Abreu 	for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
489676067459SJose Abreu 		priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
489776067459SJose Abreu 
489876067459SJose Abreu 	if (priv->dma_cap.rssen && priv->plat->rss_en)
489976067459SJose Abreu 		ndev->features |= NETIF_F_RXHASH;
490076067459SJose Abreu 
490144770e11SJarod Wilson 	/* MTU range: 46 - hw-specific max */
490244770e11SJarod Wilson 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
490356bcd591SJose Abreu 	if (priv->plat->has_xgmac)
49047d9e6c5aSJose Abreu 		ndev->max_mtu = XGMAC_JUMBO_LEN;
490556bcd591SJose Abreu 	else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
490656bcd591SJose Abreu 		ndev->max_mtu = JUMBO_LEN;
490744770e11SJarod Wilson 	else
490844770e11SJarod Wilson 		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
4909a2cd64f3SKweh, Hock Leong 	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
4910a2cd64f3SKweh, Hock Leong 	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
4911a2cd64f3SKweh, Hock Leong 	 */
4912a2cd64f3SKweh, Hock Leong 	if ((priv->plat->maxmtu < ndev->max_mtu) &&
4913a2cd64f3SKweh, Hock Leong 	    (priv->plat->maxmtu >= ndev->min_mtu))
491444770e11SJarod Wilson 		ndev->max_mtu = priv->plat->maxmtu;
4915a2cd64f3SKweh, Hock Leong 	else if (priv->plat->maxmtu < ndev->min_mtu)
4916b618ab45SHeiner Kallweit 		dev_warn(priv->device,
4917a2cd64f3SKweh, Hock Leong 			 "%s: warning: maxmtu having invalid value (%d)\n",
4918a2cd64f3SKweh, Hock Leong 			 __func__, priv->plat->maxmtu);
491944770e11SJarod Wilson 
49207ac6653aSJeff Kirsher 	if (flow_ctrl)
49217ac6653aSJeff Kirsher 		priv->flow_ctrl = FLOW_AUTO;	/* RX/TX pause on */
49227ac6653aSJeff Kirsher 
49238fce3331SJose Abreu 	/* Setup channels NAPI */
49248fce3331SJose Abreu 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
4925c22a3f48SJoao Pinto 
49268fce3331SJose Abreu 	for (queue = 0; queue < maxq; queue++) {
49278fce3331SJose Abreu 		struct stmmac_channel *ch = &priv->channel[queue];
49288fce3331SJose Abreu 
4929021bd5e3SJose Abreu 		spin_lock_init(&ch->lock);
49308fce3331SJose Abreu 		ch->priv_data = priv;
49318fce3331SJose Abreu 		ch->index = queue;
49328fce3331SJose Abreu 
49334ccb4585SJose Abreu 		if (queue < priv->plat->rx_queues_to_use) {
49344ccb4585SJose Abreu 			netif_napi_add(ndev, &ch->rx_napi, stmmac_napi_poll_rx,
49358fce3331SJose Abreu 				       NAPI_POLL_WEIGHT);
4936c22a3f48SJoao Pinto 		}
49374ccb4585SJose Abreu 		if (queue < priv->plat->tx_queues_to_use) {
49384d97972bSFrode Isaksen 			netif_tx_napi_add(ndev, &ch->tx_napi,
49394d97972bSFrode Isaksen 					  stmmac_napi_poll_tx,
49404ccb4585SJose Abreu 					  NAPI_POLL_WEIGHT);
49414ccb4585SJose Abreu 		}
49424ccb4585SJose Abreu 	}
49437ac6653aSJeff Kirsher 
494429555fa3SThierry Reding 	mutex_init(&priv->lock);
49457ac6653aSJeff Kirsher 
4946cd7201f4SGiuseppe CAVALLARO 	/* If a specific clk_csr value is passed from the platform
4947cd7201f4SGiuseppe CAVALLARO 	 * this means that the CSR Clock Range selection cannot be
4948cd7201f4SGiuseppe CAVALLARO 	 * changed at run-time and it is fixed. Viceversa the driver'll try to
4949cd7201f4SGiuseppe CAVALLARO 	 * set the MDC clock dynamically according to the csr actual
4950cd7201f4SGiuseppe CAVALLARO 	 * clock input.
4951cd7201f4SGiuseppe CAVALLARO 	 */
49525e7f7fc5SBiao Huang 	if (priv->plat->clk_csr >= 0)
4953cd7201f4SGiuseppe CAVALLARO 		priv->clk_csr = priv->plat->clk_csr;
49545e7f7fc5SBiao Huang 	else
49555e7f7fc5SBiao Huang 		stmmac_clk_csr_set(priv);
4956cd7201f4SGiuseppe CAVALLARO 
4957e58bb43fSGiuseppe CAVALLARO 	stmmac_check_pcs_mode(priv);
4958e58bb43fSGiuseppe CAVALLARO 
4959a47b9e15SDejin Zheng 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
49603fe5cadbSGiuseppe CAVALLARO 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
49614bfcbd7aSFrancesco Virlinzi 		/* MDIO bus Registration */
49624bfcbd7aSFrancesco Virlinzi 		ret = stmmac_mdio_register(ndev);
49634bfcbd7aSFrancesco Virlinzi 		if (ret < 0) {
4964b618ab45SHeiner Kallweit 			dev_err(priv->device,
496538ddc59dSLABBE Corentin 				"%s: MDIO bus (id: %d) registration failed",
49664bfcbd7aSFrancesco Virlinzi 				__func__, priv->plat->bus_id);
49676a81c26fSViresh Kumar 			goto error_mdio_register;
49684bfcbd7aSFrancesco Virlinzi 		}
4969e58bb43fSGiuseppe CAVALLARO 	}
49704bfcbd7aSFrancesco Virlinzi 
497174371272SJose Abreu 	ret = stmmac_phy_setup(priv);
497274371272SJose Abreu 	if (ret) {
497374371272SJose Abreu 		netdev_err(ndev, "failed to setup phy (%d)\n", ret);
497474371272SJose Abreu 		goto error_phy_setup;
497574371272SJose Abreu 	}
497674371272SJose Abreu 
497757016590SFlorian Fainelli 	ret = register_netdev(ndev);
4978b2eb09afSFlorian Fainelli 	if (ret) {
4979b618ab45SHeiner Kallweit 		dev_err(priv->device, "%s: ERROR %i registering the device\n",
498057016590SFlorian Fainelli 			__func__, ret);
4981b2eb09afSFlorian Fainelli 		goto error_netdev_register;
4982b2eb09afSFlorian Fainelli 	}
49837ac6653aSJeff Kirsher 
4984b9663b7cSVoon Weifeng 	if (priv->plat->serdes_powerup) {
4985b9663b7cSVoon Weifeng 		ret = priv->plat->serdes_powerup(ndev,
4986b9663b7cSVoon Weifeng 						 priv->plat->bsp_priv);
4987b9663b7cSVoon Weifeng 
4988b9663b7cSVoon Weifeng 		if (ret < 0)
4989ab1c637cSAndy Shevchenko 			goto error_serdes_powerup;
4990b9663b7cSVoon Weifeng 	}
4991b9663b7cSVoon Weifeng 
49925f2b8b62SThierry Reding #ifdef CONFIG_DEBUG_FS
49938d72ab11SGreg Kroah-Hartman 	stmmac_init_fs(ndev);
49945f2b8b62SThierry Reding #endif
49955f2b8b62SThierry Reding 
499657016590SFlorian Fainelli 	return ret;
49977ac6653aSJeff Kirsher 
4998ab1c637cSAndy Shevchenko error_serdes_powerup:
4999ab1c637cSAndy Shevchenko 	unregister_netdev(ndev);
50006a81c26fSViresh Kumar error_netdev_register:
500174371272SJose Abreu 	phylink_destroy(priv->phylink);
500274371272SJose Abreu error_phy_setup:
5003a47b9e15SDejin Zheng 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
5004b2eb09afSFlorian Fainelli 	    priv->hw->pcs != STMMAC_PCS_RTBI)
5005b2eb09afSFlorian Fainelli 		stmmac_mdio_unregister(ndev);
50067ac6653aSJeff Kirsher error_mdio_register:
50078fce3331SJose Abreu 	for (queue = 0; queue < maxq; queue++) {
50088fce3331SJose Abreu 		struct stmmac_channel *ch = &priv->channel[queue];
5009c22a3f48SJoao Pinto 
50104ccb4585SJose Abreu 		if (queue < priv->plat->rx_queues_to_use)
50114ccb4585SJose Abreu 			netif_napi_del(&ch->rx_napi);
50124ccb4585SJose Abreu 		if (queue < priv->plat->tx_queues_to_use)
50134ccb4585SJose Abreu 			netif_napi_del(&ch->tx_napi);
5014c22a3f48SJoao Pinto 	}
501562866e98SChen-Yu Tsai error_hw_init:
501634877a15SJose Abreu 	destroy_workqueue(priv->wq);
50177ac6653aSJeff Kirsher 
501815ffac73SJoachim Eastwood 	return ret;
50197ac6653aSJeff Kirsher }
5020b2e2f0c7SAndy Shevchenko EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
50217ac6653aSJeff Kirsher 
50227ac6653aSJeff Kirsher /**
50237ac6653aSJeff Kirsher  * stmmac_dvr_remove
5024f4e7bd81SJoachim Eastwood  * @dev: device pointer
50257ac6653aSJeff Kirsher  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
5026bfab27a1SGiuseppe CAVALLARO  * changes the link status, releases the DMA descriptor rings.
50277ac6653aSJeff Kirsher  */
5028f4e7bd81SJoachim Eastwood int stmmac_dvr_remove(struct device *dev)
50297ac6653aSJeff Kirsher {
5030f4e7bd81SJoachim Eastwood 	struct net_device *ndev = dev_get_drvdata(dev);
50317ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(ndev);
50327ac6653aSJeff Kirsher 
503338ddc59dSLABBE Corentin 	netdev_info(priv->dev, "%s: removing driver", __func__);
50347ac6653aSJeff Kirsher 
5035ae4f0d46SJoao Pinto 	stmmac_stop_all_dma(priv);
50367ac6653aSJeff Kirsher 
5037b9663b7cSVoon Weifeng 	if (priv->plat->serdes_powerdown)
5038b9663b7cSVoon Weifeng 		priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
5039b9663b7cSVoon Weifeng 
5040c10d4c82SJose Abreu 	stmmac_mac_set(priv, priv->ioaddr, false);
50417ac6653aSJeff Kirsher 	netif_carrier_off(ndev);
50427ac6653aSJeff Kirsher 	unregister_netdev(ndev);
5043474a31e1SAaro Koskinen #ifdef CONFIG_DEBUG_FS
5044474a31e1SAaro Koskinen 	stmmac_exit_fs(ndev);
5045474a31e1SAaro Koskinen #endif
504674371272SJose Abreu 	phylink_destroy(priv->phylink);
5047f573c0b9Sjpinto 	if (priv->plat->stmmac_rst)
5048f573c0b9Sjpinto 		reset_control_assert(priv->plat->stmmac_rst);
5049f573c0b9Sjpinto 	clk_disable_unprepare(priv->plat->pclk);
5050f573c0b9Sjpinto 	clk_disable_unprepare(priv->plat->stmmac_clk);
5051a47b9e15SDejin Zheng 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
50523fe5cadbSGiuseppe CAVALLARO 	    priv->hw->pcs != STMMAC_PCS_RTBI)
5053e743471fSBryan O'Donoghue 		stmmac_mdio_unregister(ndev);
505434877a15SJose Abreu 	destroy_workqueue(priv->wq);
505529555fa3SThierry Reding 	mutex_destroy(&priv->lock);
50567ac6653aSJeff Kirsher 
50577ac6653aSJeff Kirsher 	return 0;
50587ac6653aSJeff Kirsher }
5059b2e2f0c7SAndy Shevchenko EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
50607ac6653aSJeff Kirsher 
5061732fdf0eSGiuseppe CAVALLARO /**
5062732fdf0eSGiuseppe CAVALLARO  * stmmac_suspend - suspend callback
5063f4e7bd81SJoachim Eastwood  * @dev: device pointer
5064732fdf0eSGiuseppe CAVALLARO  * Description: this is the function to suspend the device and it is called
5065732fdf0eSGiuseppe CAVALLARO  * by the platform driver to stop the network queue, release the resources,
5066732fdf0eSGiuseppe CAVALLARO  * program the PMT register (for WoL), clean and release driver resources.
5067732fdf0eSGiuseppe CAVALLARO  */
5068f4e7bd81SJoachim Eastwood int stmmac_suspend(struct device *dev)
50697ac6653aSJeff Kirsher {
5070f4e7bd81SJoachim Eastwood 	struct net_device *ndev = dev_get_drvdata(dev);
50717ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(ndev);
507214b41a29SNicolin Chen 	u32 chan;
50737ac6653aSJeff Kirsher 
50747ac6653aSJeff Kirsher 	if (!ndev || !netif_running(ndev))
50757ac6653aSJeff Kirsher 		return 0;
50767ac6653aSJeff Kirsher 
50773e2bf04fSJose Abreu 	phylink_mac_change(priv->phylink, false);
50787ac6653aSJeff Kirsher 
5079134cc4ceSThierry Reding 	mutex_lock(&priv->lock);
508019e13cb2SJose Abreu 
50817ac6653aSJeff Kirsher 	netif_device_detach(ndev);
5082c22a3f48SJoao Pinto 	stmmac_stop_all_queues(priv);
50837ac6653aSJeff Kirsher 
5084c22a3f48SJoao Pinto 	stmmac_disable_all_queues(priv);
50857ac6653aSJeff Kirsher 
508614b41a29SNicolin Chen 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
508714b41a29SNicolin Chen 		del_timer_sync(&priv->tx_queue[chan].txtimer);
508814b41a29SNicolin Chen 
50897ac6653aSJeff Kirsher 	/* Stop TX/RX DMA */
5090ae4f0d46SJoao Pinto 	stmmac_stop_all_dma(priv);
5091c24602efSGiuseppe CAVALLARO 
5092b9663b7cSVoon Weifeng 	if (priv->plat->serdes_powerdown)
5093b9663b7cSVoon Weifeng 		priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
5094b9663b7cSVoon Weifeng 
50957ac6653aSJeff Kirsher 	/* Enable Power down mode by programming the PMT regs */
509689f7f2cfSSrinivas Kandagatla 	if (device_may_wakeup(priv->device)) {
5097c10d4c82SJose Abreu 		stmmac_pmt(priv, priv->hw, priv->wolopts);
509889f7f2cfSSrinivas Kandagatla 		priv->irq_wake = 1;
509989f7f2cfSSrinivas Kandagatla 	} else {
5100134cc4ceSThierry Reding 		mutex_unlock(&priv->lock);
51013e2bf04fSJose Abreu 		rtnl_lock();
51023e2bf04fSJose Abreu 		phylink_stop(priv->phylink);
51033e2bf04fSJose Abreu 		rtnl_unlock();
5104134cc4ceSThierry Reding 		mutex_lock(&priv->lock);
51053e2bf04fSJose Abreu 
5106c10d4c82SJose Abreu 		stmmac_mac_set(priv, priv->ioaddr, false);
5107db88f10aSSrinivas Kandagatla 		pinctrl_pm_select_sleep_state(priv->device);
5108ba1377ffSGiuseppe CAVALLARO 		/* Disable clock in case of PWM is off */
5109e497c20eSBiao Huang 		if (priv->plat->clk_ptp_ref)
5110e497c20eSBiao Huang 			clk_disable_unprepare(priv->plat->clk_ptp_ref);
5111e497c20eSBiao Huang 		clk_disable_unprepare(priv->plat->pclk);
5112e497c20eSBiao Huang 		clk_disable_unprepare(priv->plat->stmmac_clk);
5113ba1377ffSGiuseppe CAVALLARO 	}
511429555fa3SThierry Reding 	mutex_unlock(&priv->lock);
51152d871aa0SVince Bridgers 
5116bd00632cSLABBE Corentin 	priv->speed = SPEED_UNKNOWN;
51177ac6653aSJeff Kirsher 	return 0;
51187ac6653aSJeff Kirsher }
5119b2e2f0c7SAndy Shevchenko EXPORT_SYMBOL_GPL(stmmac_suspend);
51207ac6653aSJeff Kirsher 
5121732fdf0eSGiuseppe CAVALLARO /**
512254139cf3SJoao Pinto  * stmmac_reset_queues_param - reset queue parameters
512354139cf3SJoao Pinto  * @dev: device pointer
512454139cf3SJoao Pinto  */
512554139cf3SJoao Pinto static void stmmac_reset_queues_param(struct stmmac_priv *priv)
512654139cf3SJoao Pinto {
512754139cf3SJoao Pinto 	u32 rx_cnt = priv->plat->rx_queues_to_use;
5128ce736788SJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
512954139cf3SJoao Pinto 	u32 queue;
513054139cf3SJoao Pinto 
513154139cf3SJoao Pinto 	for (queue = 0; queue < rx_cnt; queue++) {
513254139cf3SJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
513354139cf3SJoao Pinto 
513454139cf3SJoao Pinto 		rx_q->cur_rx = 0;
513554139cf3SJoao Pinto 		rx_q->dirty_rx = 0;
513654139cf3SJoao Pinto 	}
513754139cf3SJoao Pinto 
5138ce736788SJoao Pinto 	for (queue = 0; queue < tx_cnt; queue++) {
5139ce736788SJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
5140ce736788SJoao Pinto 
5141ce736788SJoao Pinto 		tx_q->cur_tx = 0;
5142ce736788SJoao Pinto 		tx_q->dirty_tx = 0;
51438d212a9eSNiklas Cassel 		tx_q->mss = 0;
5144ce736788SJoao Pinto 	}
514554139cf3SJoao Pinto }
514654139cf3SJoao Pinto 
514754139cf3SJoao Pinto /**
5148732fdf0eSGiuseppe CAVALLARO  * stmmac_resume - resume callback
5149f4e7bd81SJoachim Eastwood  * @dev: device pointer
5150732fdf0eSGiuseppe CAVALLARO  * Description: when resume this function is invoked to setup the DMA and CORE
5151732fdf0eSGiuseppe CAVALLARO  * in a usable state.
5152732fdf0eSGiuseppe CAVALLARO  */
5153f4e7bd81SJoachim Eastwood int stmmac_resume(struct device *dev)
51547ac6653aSJeff Kirsher {
5155f4e7bd81SJoachim Eastwood 	struct net_device *ndev = dev_get_drvdata(dev);
51567ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(ndev);
5157b9663b7cSVoon Weifeng 	int ret;
51587ac6653aSJeff Kirsher 
51597ac6653aSJeff Kirsher 	if (!netif_running(ndev))
51607ac6653aSJeff Kirsher 		return 0;
51617ac6653aSJeff Kirsher 
51627ac6653aSJeff Kirsher 	/* Power Down bit, into the PM register, is cleared
51637ac6653aSJeff Kirsher 	 * automatically as soon as a magic packet or a Wake-up frame
51647ac6653aSJeff Kirsher 	 * is received. Anyway, it's better to manually clear
51657ac6653aSJeff Kirsher 	 * this bit because it can generate problems while resuming
5166ceb69499SGiuseppe CAVALLARO 	 * from another devices (e.g. serial console).
5167ceb69499SGiuseppe CAVALLARO 	 */
5168623997fbSSrinivas Kandagatla 	if (device_may_wakeup(priv->device)) {
516929555fa3SThierry Reding 		mutex_lock(&priv->lock);
5170c10d4c82SJose Abreu 		stmmac_pmt(priv, priv->hw, 0);
517129555fa3SThierry Reding 		mutex_unlock(&priv->lock);
517289f7f2cfSSrinivas Kandagatla 		priv->irq_wake = 0;
5173623997fbSSrinivas Kandagatla 	} else {
5174db88f10aSSrinivas Kandagatla 		pinctrl_pm_select_default_state(priv->device);
51758d45e42bSLABBE Corentin 		/* enable the clk previously disabled */
5176e497c20eSBiao Huang 		clk_prepare_enable(priv->plat->stmmac_clk);
5177e497c20eSBiao Huang 		clk_prepare_enable(priv->plat->pclk);
5178e497c20eSBiao Huang 		if (priv->plat->clk_ptp_ref)
5179e497c20eSBiao Huang 			clk_prepare_enable(priv->plat->clk_ptp_ref);
5180623997fbSSrinivas Kandagatla 		/* reset the phy so that it's ready */
5181623997fbSSrinivas Kandagatla 		if (priv->mii)
5182623997fbSSrinivas Kandagatla 			stmmac_mdio_reset(priv->mii);
5183623997fbSSrinivas Kandagatla 	}
51847ac6653aSJeff Kirsher 
5185b9663b7cSVoon Weifeng 	if (priv->plat->serdes_powerup) {
5186b9663b7cSVoon Weifeng 		ret = priv->plat->serdes_powerup(ndev,
5187b9663b7cSVoon Weifeng 						 priv->plat->bsp_priv);
5188b9663b7cSVoon Weifeng 
5189b9663b7cSVoon Weifeng 		if (ret < 0)
5190b9663b7cSVoon Weifeng 			return ret;
5191b9663b7cSVoon Weifeng 	}
5192b9663b7cSVoon Weifeng 
519329555fa3SThierry Reding 	mutex_lock(&priv->lock);
5194f55d84b0SVincent Palatin 
519554139cf3SJoao Pinto 	stmmac_reset_queues_param(priv);
519654139cf3SJoao Pinto 
5197ae79a639SGiuseppe CAVALLARO 	stmmac_clear_descriptors(priv);
5198ae79a639SGiuseppe CAVALLARO 
5199fe131929SHuacai Chen 	stmmac_hw_setup(ndev, false);
5200d429b66eSJose Abreu 	stmmac_init_coalesce(priv);
5201ac316c78SGiuseppe CAVALLARO 	stmmac_set_rx_mode(ndev);
52027ac6653aSJeff Kirsher 
5203ed64639bSWong Vee Khee 	stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
5204ed64639bSWong Vee Khee 
5205c22a3f48SJoao Pinto 	stmmac_enable_all_queues(priv);
52067ac6653aSJeff Kirsher 
5207c22a3f48SJoao Pinto 	stmmac_start_all_queues(priv);
52087ac6653aSJeff Kirsher 
5209134cc4ceSThierry Reding 	mutex_unlock(&priv->lock);
5210134cc4ceSThierry Reding 
52113e2bf04fSJose Abreu 	if (!device_may_wakeup(priv->device)) {
521219e13cb2SJose Abreu 		rtnl_lock();
521374371272SJose Abreu 		phylink_start(priv->phylink);
521419e13cb2SJose Abreu 		rtnl_unlock();
52153e2bf04fSJose Abreu 	}
521619e13cb2SJose Abreu 
52173e2bf04fSJose Abreu 	phylink_mac_change(priv->phylink, true);
5218102463b1SFrancesco Virlinzi 
521931096c3eSLeon Yu 	netif_device_attach(ndev);
522031096c3eSLeon Yu 
52217ac6653aSJeff Kirsher 	return 0;
52227ac6653aSJeff Kirsher }
5223b2e2f0c7SAndy Shevchenko EXPORT_SYMBOL_GPL(stmmac_resume);
5224ba27ec66SGiuseppe CAVALLARO 
52257ac6653aSJeff Kirsher #ifndef MODULE
52267ac6653aSJeff Kirsher static int __init stmmac_cmdline_opt(char *str)
52277ac6653aSJeff Kirsher {
52287ac6653aSJeff Kirsher 	char *opt;
52297ac6653aSJeff Kirsher 
52307ac6653aSJeff Kirsher 	if (!str || !*str)
52317ac6653aSJeff Kirsher 		return -EINVAL;
52327ac6653aSJeff Kirsher 	while ((opt = strsep(&str, ",")) != NULL) {
52337ac6653aSJeff Kirsher 		if (!strncmp(opt, "debug:", 6)) {
5234ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 6, 0, &debug))
52357ac6653aSJeff Kirsher 				goto err;
52367ac6653aSJeff Kirsher 		} else if (!strncmp(opt, "phyaddr:", 8)) {
5237ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 8, 0, &phyaddr))
52387ac6653aSJeff Kirsher 				goto err;
52397ac6653aSJeff Kirsher 		} else if (!strncmp(opt, "buf_sz:", 7)) {
5240ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 7, 0, &buf_sz))
52417ac6653aSJeff Kirsher 				goto err;
52427ac6653aSJeff Kirsher 		} else if (!strncmp(opt, "tc:", 3)) {
5243ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 3, 0, &tc))
52447ac6653aSJeff Kirsher 				goto err;
52457ac6653aSJeff Kirsher 		} else if (!strncmp(opt, "watchdog:", 9)) {
5246ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 9, 0, &watchdog))
52477ac6653aSJeff Kirsher 				goto err;
52487ac6653aSJeff Kirsher 		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
5249ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 10, 0, &flow_ctrl))
52507ac6653aSJeff Kirsher 				goto err;
52517ac6653aSJeff Kirsher 		} else if (!strncmp(opt, "pause:", 6)) {
5252ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 6, 0, &pause))
52537ac6653aSJeff Kirsher 				goto err;
5254506f669cSGiuseppe CAVALLARO 		} else if (!strncmp(opt, "eee_timer:", 10)) {
5255d765955dSGiuseppe CAVALLARO 			if (kstrtoint(opt + 10, 0, &eee_timer))
5256d765955dSGiuseppe CAVALLARO 				goto err;
52574a7d666aSGiuseppe CAVALLARO 		} else if (!strncmp(opt, "chain_mode:", 11)) {
52584a7d666aSGiuseppe CAVALLARO 			if (kstrtoint(opt + 11, 0, &chain_mode))
52594a7d666aSGiuseppe CAVALLARO 				goto err;
52607ac6653aSJeff Kirsher 		}
52617ac6653aSJeff Kirsher 	}
52627ac6653aSJeff Kirsher 	return 0;
52637ac6653aSJeff Kirsher 
52647ac6653aSJeff Kirsher err:
52657ac6653aSJeff Kirsher 	pr_err("%s: ERROR broken module parameter conversion", __func__);
52667ac6653aSJeff Kirsher 	return -EINVAL;
52677ac6653aSJeff Kirsher }
52687ac6653aSJeff Kirsher 
52697ac6653aSJeff Kirsher __setup("stmmaceth=", stmmac_cmdline_opt);
5270ceb69499SGiuseppe CAVALLARO #endif /* MODULE */
52716fc0d0f2SGiuseppe Cavallaro 
5272466c5ac8SMathieu Olivari static int __init stmmac_init(void)
5273466c5ac8SMathieu Olivari {
5274466c5ac8SMathieu Olivari #ifdef CONFIG_DEBUG_FS
5275466c5ac8SMathieu Olivari 	/* Create debugfs main directory if it doesn't exist yet */
52768d72ab11SGreg Kroah-Hartman 	if (!stmmac_fs_dir)
5277466c5ac8SMathieu Olivari 		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
5278474a31e1SAaro Koskinen 	register_netdevice_notifier(&stmmac_notifier);
5279466c5ac8SMathieu Olivari #endif
5280466c5ac8SMathieu Olivari 
5281466c5ac8SMathieu Olivari 	return 0;
5282466c5ac8SMathieu Olivari }
5283466c5ac8SMathieu Olivari 
5284466c5ac8SMathieu Olivari static void __exit stmmac_exit(void)
5285466c5ac8SMathieu Olivari {
5286466c5ac8SMathieu Olivari #ifdef CONFIG_DEBUG_FS
5287474a31e1SAaro Koskinen 	unregister_netdevice_notifier(&stmmac_notifier);
5288466c5ac8SMathieu Olivari 	debugfs_remove_recursive(stmmac_fs_dir);
5289466c5ac8SMathieu Olivari #endif
5290466c5ac8SMathieu Olivari }
5291466c5ac8SMathieu Olivari 
5292466c5ac8SMathieu Olivari module_init(stmmac_init)
5293466c5ac8SMathieu Olivari module_exit(stmmac_exit)
5294466c5ac8SMathieu Olivari 
52956fc0d0f2SGiuseppe Cavallaro MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
52966fc0d0f2SGiuseppe Cavallaro MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
52976fc0d0f2SGiuseppe Cavallaro MODULE_LICENSE("GPL");
5298