14fa9c49fSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
27ac6653aSJeff Kirsher /*******************************************************************************
37ac6653aSJeff Kirsher   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
47ac6653aSJeff Kirsher   ST Ethernet IPs are built around a Synopsys IP Core.
57ac6653aSJeff Kirsher 
6286a8372SGiuseppe CAVALLARO 	Copyright(C) 2007-2011 STMicroelectronics Ltd
77ac6653aSJeff Kirsher 
87ac6653aSJeff Kirsher 
97ac6653aSJeff Kirsher   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
107ac6653aSJeff Kirsher 
117ac6653aSJeff Kirsher   Documentation available at:
127ac6653aSJeff Kirsher 	http://www.stlinux.com
137ac6653aSJeff Kirsher   Support available at:
147ac6653aSJeff Kirsher 	https://bugzilla.stlinux.com/
157ac6653aSJeff Kirsher *******************************************************************************/
167ac6653aSJeff Kirsher 
176a81c26fSViresh Kumar #include <linux/clk.h>
187ac6653aSJeff Kirsher #include <linux/kernel.h>
197ac6653aSJeff Kirsher #include <linux/interrupt.h>
207ac6653aSJeff Kirsher #include <linux/ip.h>
217ac6653aSJeff Kirsher #include <linux/tcp.h>
227ac6653aSJeff Kirsher #include <linux/skbuff.h>
237ac6653aSJeff Kirsher #include <linux/ethtool.h>
247ac6653aSJeff Kirsher #include <linux/if_ether.h>
257ac6653aSJeff Kirsher #include <linux/crc32.h>
267ac6653aSJeff Kirsher #include <linux/mii.h>
2701789349SJiri Pirko #include <linux/if.h>
287ac6653aSJeff Kirsher #include <linux/if_vlan.h>
297ac6653aSJeff Kirsher #include <linux/dma-mapping.h>
307ac6653aSJeff Kirsher #include <linux/slab.h>
317ac6653aSJeff Kirsher #include <linux/prefetch.h>
32db88f10aSSrinivas Kandagatla #include <linux/pinctrl/consumer.h>
3350fb4f74SGiuseppe CAVALLARO #ifdef CONFIG_DEBUG_FS
347ac29055SGiuseppe CAVALLARO #include <linux/debugfs.h>
357ac29055SGiuseppe CAVALLARO #include <linux/seq_file.h>
3650fb4f74SGiuseppe CAVALLARO #endif /* CONFIG_DEBUG_FS */
37891434b1SRayagond Kokatanur #include <linux/net_tstamp.h>
38eeef2f6bSJose Abreu #include <linux/phylink.h>
39b7766206SJose Abreu #include <linux/udp.h>
404dbbe8ddSJose Abreu #include <net/pkt_cls.h>
41891434b1SRayagond Kokatanur #include "stmmac_ptp.h"
42286a8372SGiuseppe CAVALLARO #include "stmmac.h"
43c5e4ddbdSChen-Yu Tsai #include <linux/reset.h>
445790cf3cSMathieu Olivari #include <linux/of_mdio.h>
4519d857c9SPhil Reid #include "dwmac1000.h"
467d9e6c5aSJose Abreu #include "dwxgmac2.h"
4742de047dSJose Abreu #include "hwif.h"
487ac6653aSJeff Kirsher 
498d558f02SJose Abreu #define	STMMAC_ALIGN(x)		ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
50f748be53SAlexandre TORGUE #define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
517ac6653aSJeff Kirsher 
527ac6653aSJeff Kirsher /* Module parameters */
5332ceabcaSGiuseppe CAVALLARO #define TX_TIMEO	5000
547ac6653aSJeff Kirsher static int watchdog = TX_TIMEO;
55d3757ba4SJoe Perches module_param(watchdog, int, 0644);
5632ceabcaSGiuseppe CAVALLARO MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
577ac6653aSJeff Kirsher 
5832ceabcaSGiuseppe CAVALLARO static int debug = -1;
59d3757ba4SJoe Perches module_param(debug, int, 0644);
6032ceabcaSGiuseppe CAVALLARO MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
617ac6653aSJeff Kirsher 
6247d1f71fSstephen hemminger static int phyaddr = -1;
63d3757ba4SJoe Perches module_param(phyaddr, int, 0444);
647ac6653aSJeff Kirsher MODULE_PARM_DESC(phyaddr, "Physical device address");
657ac6653aSJeff Kirsher 
66aa042f60SSong, Yoong Siang #define STMMAC_TX_THRESH(x)	((x)->dma_tx_size / 4)
67aa042f60SSong, Yoong Siang #define STMMAC_RX_THRESH(x)	((x)->dma_rx_size / 4)
687ac6653aSJeff Kirsher 
69e9989339SJose Abreu static int flow_ctrl = FLOW_AUTO;
70d3757ba4SJoe Perches module_param(flow_ctrl, int, 0644);
717ac6653aSJeff Kirsher MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
727ac6653aSJeff Kirsher 
737ac6653aSJeff Kirsher static int pause = PAUSE_TIME;
74d3757ba4SJoe Perches module_param(pause, int, 0644);
757ac6653aSJeff Kirsher MODULE_PARM_DESC(pause, "Flow Control Pause Time");
767ac6653aSJeff Kirsher 
777ac6653aSJeff Kirsher #define TC_DEFAULT 64
787ac6653aSJeff Kirsher static int tc = TC_DEFAULT;
79d3757ba4SJoe Perches module_param(tc, int, 0644);
807ac6653aSJeff Kirsher MODULE_PARM_DESC(tc, "DMA threshold control value");
817ac6653aSJeff Kirsher 
82d916701cSGiuseppe CAVALLARO #define	DEFAULT_BUFSIZE	1536
83d916701cSGiuseppe CAVALLARO static int buf_sz = DEFAULT_BUFSIZE;
84d3757ba4SJoe Perches module_param(buf_sz, int, 0644);
857ac6653aSJeff Kirsher MODULE_PARM_DESC(buf_sz, "DMA buffer size");
867ac6653aSJeff Kirsher 
8722ad3838SGiuseppe Cavallaro #define	STMMAC_RX_COPYBREAK	256
8822ad3838SGiuseppe Cavallaro 
897ac6653aSJeff Kirsher static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
907ac6653aSJeff Kirsher 				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
917ac6653aSJeff Kirsher 				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
927ac6653aSJeff Kirsher 
93d765955dSGiuseppe CAVALLARO #define STMMAC_DEFAULT_LPI_TIMER	1000
94d765955dSGiuseppe CAVALLARO static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
95d3757ba4SJoe Perches module_param(eee_timer, int, 0644);
96d765955dSGiuseppe CAVALLARO MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
97388e201dSVineetha G. Jaya Kumaran #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
98d765955dSGiuseppe CAVALLARO 
9922d3efe5SPavel Machek /* By default the driver will use the ring mode to manage tx and rx descriptors,
10022d3efe5SPavel Machek  * but allow user to force to use the chain instead of the ring
1014a7d666aSGiuseppe CAVALLARO  */
1024a7d666aSGiuseppe CAVALLARO static unsigned int chain_mode;
103d3757ba4SJoe Perches module_param(chain_mode, int, 0444);
1044a7d666aSGiuseppe CAVALLARO MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
1054a7d666aSGiuseppe CAVALLARO 
1067ac6653aSJeff Kirsher static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
1077ac6653aSJeff Kirsher 
10850fb4f74SGiuseppe CAVALLARO #ifdef CONFIG_DEBUG_FS
109481a7d15SJiping Ma static const struct net_device_ops stmmac_netdev_ops;
1108d72ab11SGreg Kroah-Hartman static void stmmac_init_fs(struct net_device *dev);
111466c5ac8SMathieu Olivari static void stmmac_exit_fs(struct net_device *dev);
112bfab27a1SGiuseppe CAVALLARO #endif
113bfab27a1SGiuseppe CAVALLARO 
114d5a05e69SVincent Whitchurch #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
1159125cdd1SGiuseppe CAVALLARO 
1167ac6653aSJeff Kirsher /**
1177ac6653aSJeff Kirsher  * stmmac_verify_args - verify the driver parameters.
118732fdf0eSGiuseppe CAVALLARO  * Description: it checks the driver parameters and set a default in case of
119732fdf0eSGiuseppe CAVALLARO  * errors.
1207ac6653aSJeff Kirsher  */
1217ac6653aSJeff Kirsher static void stmmac_verify_args(void)
1227ac6653aSJeff Kirsher {
1237ac6653aSJeff Kirsher 	if (unlikely(watchdog < 0))
1247ac6653aSJeff Kirsher 		watchdog = TX_TIMEO;
125d916701cSGiuseppe CAVALLARO 	if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
126d916701cSGiuseppe CAVALLARO 		buf_sz = DEFAULT_BUFSIZE;
1277ac6653aSJeff Kirsher 	if (unlikely(flow_ctrl > 1))
1287ac6653aSJeff Kirsher 		flow_ctrl = FLOW_AUTO;
1297ac6653aSJeff Kirsher 	else if (likely(flow_ctrl < 0))
1307ac6653aSJeff Kirsher 		flow_ctrl = FLOW_OFF;
1317ac6653aSJeff Kirsher 	if (unlikely((pause < 0) || (pause > 0xffff)))
1327ac6653aSJeff Kirsher 		pause = PAUSE_TIME;
133d765955dSGiuseppe CAVALLARO 	if (eee_timer < 0)
134d765955dSGiuseppe CAVALLARO 		eee_timer = STMMAC_DEFAULT_LPI_TIMER;
1357ac6653aSJeff Kirsher }
1367ac6653aSJeff Kirsher 
13732ceabcaSGiuseppe CAVALLARO /**
138c22a3f48SJoao Pinto  * stmmac_disable_all_queues - Disable all queues
139c22a3f48SJoao Pinto  * @priv: driver private structure
140c22a3f48SJoao Pinto  */
141c22a3f48SJoao Pinto static void stmmac_disable_all_queues(struct stmmac_priv *priv)
142c22a3f48SJoao Pinto {
143c22a3f48SJoao Pinto 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
1448fce3331SJose Abreu 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
1458fce3331SJose Abreu 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
146c22a3f48SJoao Pinto 	u32 queue;
147c22a3f48SJoao Pinto 
1488fce3331SJose Abreu 	for (queue = 0; queue < maxq; queue++) {
1498fce3331SJose Abreu 		struct stmmac_channel *ch = &priv->channel[queue];
150c22a3f48SJoao Pinto 
1514ccb4585SJose Abreu 		if (queue < rx_queues_cnt)
1524ccb4585SJose Abreu 			napi_disable(&ch->rx_napi);
1534ccb4585SJose Abreu 		if (queue < tx_queues_cnt)
1544ccb4585SJose Abreu 			napi_disable(&ch->tx_napi);
155c22a3f48SJoao Pinto 	}
156c22a3f48SJoao Pinto }
157c22a3f48SJoao Pinto 
158c22a3f48SJoao Pinto /**
159c22a3f48SJoao Pinto  * stmmac_enable_all_queues - Enable all queues
160c22a3f48SJoao Pinto  * @priv: driver private structure
161c22a3f48SJoao Pinto  */
162c22a3f48SJoao Pinto static void stmmac_enable_all_queues(struct stmmac_priv *priv)
163c22a3f48SJoao Pinto {
164c22a3f48SJoao Pinto 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
1658fce3331SJose Abreu 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
1668fce3331SJose Abreu 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
167c22a3f48SJoao Pinto 	u32 queue;
168c22a3f48SJoao Pinto 
1698fce3331SJose Abreu 	for (queue = 0; queue < maxq; queue++) {
1708fce3331SJose Abreu 		struct stmmac_channel *ch = &priv->channel[queue];
171c22a3f48SJoao Pinto 
1724ccb4585SJose Abreu 		if (queue < rx_queues_cnt)
1734ccb4585SJose Abreu 			napi_enable(&ch->rx_napi);
1744ccb4585SJose Abreu 		if (queue < tx_queues_cnt)
1754ccb4585SJose Abreu 			napi_enable(&ch->tx_napi);
176c22a3f48SJoao Pinto 	}
177c22a3f48SJoao Pinto }
178c22a3f48SJoao Pinto 
17934877a15SJose Abreu static void stmmac_service_event_schedule(struct stmmac_priv *priv)
18034877a15SJose Abreu {
18134877a15SJose Abreu 	if (!test_bit(STMMAC_DOWN, &priv->state) &&
18234877a15SJose Abreu 	    !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
18334877a15SJose Abreu 		queue_work(priv->wq, &priv->service_task);
18434877a15SJose Abreu }
18534877a15SJose Abreu 
18634877a15SJose Abreu static void stmmac_global_err(struct stmmac_priv *priv)
18734877a15SJose Abreu {
18834877a15SJose Abreu 	netif_carrier_off(priv->dev);
18934877a15SJose Abreu 	set_bit(STMMAC_RESET_REQUESTED, &priv->state);
19034877a15SJose Abreu 	stmmac_service_event_schedule(priv);
19134877a15SJose Abreu }
19234877a15SJose Abreu 
193c22a3f48SJoao Pinto /**
19432ceabcaSGiuseppe CAVALLARO  * stmmac_clk_csr_set - dynamically set the MDC clock
19532ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
19632ceabcaSGiuseppe CAVALLARO  * Description: this is to dynamically set the MDC clock according to the csr
19732ceabcaSGiuseppe CAVALLARO  * clock input.
19832ceabcaSGiuseppe CAVALLARO  * Note:
19932ceabcaSGiuseppe CAVALLARO  *	If a specific clk_csr value is passed from the platform
20032ceabcaSGiuseppe CAVALLARO  *	this means that the CSR Clock Range selection cannot be
20132ceabcaSGiuseppe CAVALLARO  *	changed at run-time and it is fixed (as reported in the driver
20232ceabcaSGiuseppe CAVALLARO  *	documentation). Viceversa the driver will try to set the MDC
20332ceabcaSGiuseppe CAVALLARO  *	clock dynamically according to the actual clock input.
20432ceabcaSGiuseppe CAVALLARO  */
205cd7201f4SGiuseppe CAVALLARO static void stmmac_clk_csr_set(struct stmmac_priv *priv)
206cd7201f4SGiuseppe CAVALLARO {
207cd7201f4SGiuseppe CAVALLARO 	u32 clk_rate;
208cd7201f4SGiuseppe CAVALLARO 
209f573c0b9Sjpinto 	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
210cd7201f4SGiuseppe CAVALLARO 
211cd7201f4SGiuseppe CAVALLARO 	/* Platform provided default clk_csr would be assumed valid
212ceb69499SGiuseppe CAVALLARO 	 * for all other cases except for the below mentioned ones.
213ceb69499SGiuseppe CAVALLARO 	 * For values higher than the IEEE 802.3 specified frequency
214ceb69499SGiuseppe CAVALLARO 	 * we can not estimate the proper divider as it is not known
215ceb69499SGiuseppe CAVALLARO 	 * the frequency of clk_csr_i. So we do not change the default
216ceb69499SGiuseppe CAVALLARO 	 * divider.
217ceb69499SGiuseppe CAVALLARO 	 */
218cd7201f4SGiuseppe CAVALLARO 	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
219cd7201f4SGiuseppe CAVALLARO 		if (clk_rate < CSR_F_35M)
220cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_20_35M;
221cd7201f4SGiuseppe CAVALLARO 		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
222cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_35_60M;
223cd7201f4SGiuseppe CAVALLARO 		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
224cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_60_100M;
225cd7201f4SGiuseppe CAVALLARO 		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
226cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_100_150M;
227cd7201f4SGiuseppe CAVALLARO 		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
228cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_150_250M;
22919d857c9SPhil Reid 		else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
230cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_250_300M;
231ceb69499SGiuseppe CAVALLARO 	}
2329f93ac8dSLABBE Corentin 
2339f93ac8dSLABBE Corentin 	if (priv->plat->has_sun8i) {
2349f93ac8dSLABBE Corentin 		if (clk_rate > 160000000)
2359f93ac8dSLABBE Corentin 			priv->clk_csr = 0x03;
2369f93ac8dSLABBE Corentin 		else if (clk_rate > 80000000)
2379f93ac8dSLABBE Corentin 			priv->clk_csr = 0x02;
2389f93ac8dSLABBE Corentin 		else if (clk_rate > 40000000)
2399f93ac8dSLABBE Corentin 			priv->clk_csr = 0x01;
2409f93ac8dSLABBE Corentin 		else
2419f93ac8dSLABBE Corentin 			priv->clk_csr = 0;
2429f93ac8dSLABBE Corentin 	}
2437d9e6c5aSJose Abreu 
2447d9e6c5aSJose Abreu 	if (priv->plat->has_xgmac) {
2457d9e6c5aSJose Abreu 		if (clk_rate > 400000000)
2467d9e6c5aSJose Abreu 			priv->clk_csr = 0x5;
2477d9e6c5aSJose Abreu 		else if (clk_rate > 350000000)
2487d9e6c5aSJose Abreu 			priv->clk_csr = 0x4;
2497d9e6c5aSJose Abreu 		else if (clk_rate > 300000000)
2507d9e6c5aSJose Abreu 			priv->clk_csr = 0x3;
2517d9e6c5aSJose Abreu 		else if (clk_rate > 250000000)
2527d9e6c5aSJose Abreu 			priv->clk_csr = 0x2;
2537d9e6c5aSJose Abreu 		else if (clk_rate > 150000000)
2547d9e6c5aSJose Abreu 			priv->clk_csr = 0x1;
2557d9e6c5aSJose Abreu 		else
2567d9e6c5aSJose Abreu 			priv->clk_csr = 0x0;
2577d9e6c5aSJose Abreu 	}
258cd7201f4SGiuseppe CAVALLARO }
259cd7201f4SGiuseppe CAVALLARO 
2607ac6653aSJeff Kirsher static void print_pkt(unsigned char *buf, int len)
2617ac6653aSJeff Kirsher {
262424c4f78SAndy Shevchenko 	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
263424c4f78SAndy Shevchenko 	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
2647ac6653aSJeff Kirsher }
2657ac6653aSJeff Kirsher 
266ce736788SJoao Pinto static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
2677ac6653aSJeff Kirsher {
268ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
269a6a3e026SLABBE Corentin 	u32 avail;
270e3ad57c9SGiuseppe Cavallaro 
271ce736788SJoao Pinto 	if (tx_q->dirty_tx > tx_q->cur_tx)
272ce736788SJoao Pinto 		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
273e3ad57c9SGiuseppe Cavallaro 	else
274aa042f60SSong, Yoong Siang 		avail = priv->dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
275e3ad57c9SGiuseppe Cavallaro 
276e3ad57c9SGiuseppe Cavallaro 	return avail;
277e3ad57c9SGiuseppe Cavallaro }
278e3ad57c9SGiuseppe Cavallaro 
27954139cf3SJoao Pinto /**
28054139cf3SJoao Pinto  * stmmac_rx_dirty - Get RX queue dirty
28154139cf3SJoao Pinto  * @priv: driver private structure
28254139cf3SJoao Pinto  * @queue: RX queue index
28354139cf3SJoao Pinto  */
28454139cf3SJoao Pinto static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
285e3ad57c9SGiuseppe Cavallaro {
28654139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
287a6a3e026SLABBE Corentin 	u32 dirty;
288e3ad57c9SGiuseppe Cavallaro 
28954139cf3SJoao Pinto 	if (rx_q->dirty_rx <= rx_q->cur_rx)
29054139cf3SJoao Pinto 		dirty = rx_q->cur_rx - rx_q->dirty_rx;
291e3ad57c9SGiuseppe Cavallaro 	else
292aa042f60SSong, Yoong Siang 		dirty = priv->dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
293e3ad57c9SGiuseppe Cavallaro 
294e3ad57c9SGiuseppe Cavallaro 	return dirty;
2957ac6653aSJeff Kirsher }
2967ac6653aSJeff Kirsher 
297be1c7eaeSVineetha G. Jaya Kumaran static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en)
298be1c7eaeSVineetha G. Jaya Kumaran {
299be1c7eaeSVineetha G. Jaya Kumaran 	int tx_lpi_timer;
300be1c7eaeSVineetha G. Jaya Kumaran 
301be1c7eaeSVineetha G. Jaya Kumaran 	/* Clear/set the SW EEE timer flag based on LPI ET enablement */
302be1c7eaeSVineetha G. Jaya Kumaran 	priv->eee_sw_timer_en = en ? 0 : 1;
303be1c7eaeSVineetha G. Jaya Kumaran 	tx_lpi_timer  = en ? priv->tx_lpi_timer : 0;
304be1c7eaeSVineetha G. Jaya Kumaran 	stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer);
305be1c7eaeSVineetha G. Jaya Kumaran }
306be1c7eaeSVineetha G. Jaya Kumaran 
30732ceabcaSGiuseppe CAVALLARO /**
308732fdf0eSGiuseppe CAVALLARO  * stmmac_enable_eee_mode - check and enter in LPI mode
30932ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
310732fdf0eSGiuseppe CAVALLARO  * Description: this function is to verify and enter in LPI mode in case of
311732fdf0eSGiuseppe CAVALLARO  * EEE.
31232ceabcaSGiuseppe CAVALLARO  */
313d765955dSGiuseppe CAVALLARO static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
314d765955dSGiuseppe CAVALLARO {
315ce736788SJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
316ce736788SJoao Pinto 	u32 queue;
317ce736788SJoao Pinto 
318ce736788SJoao Pinto 	/* check if all TX queues have the work finished */
319ce736788SJoao Pinto 	for (queue = 0; queue < tx_cnt; queue++) {
320ce736788SJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
321ce736788SJoao Pinto 
322ce736788SJoao Pinto 		if (tx_q->dirty_tx != tx_q->cur_tx)
323ce736788SJoao Pinto 			return; /* still unfinished work */
324ce736788SJoao Pinto 	}
325ce736788SJoao Pinto 
326d765955dSGiuseppe CAVALLARO 	/* Check and enter in LPI mode */
327ce736788SJoao Pinto 	if (!priv->tx_path_in_lpi_mode)
328c10d4c82SJose Abreu 		stmmac_set_eee_mode(priv, priv->hw,
329b4b7b772Sjpinto 				priv->plat->en_tx_lpi_clockgating);
330d765955dSGiuseppe CAVALLARO }
331d765955dSGiuseppe CAVALLARO 
33232ceabcaSGiuseppe CAVALLARO /**
333732fdf0eSGiuseppe CAVALLARO  * stmmac_disable_eee_mode - disable and exit from LPI mode
33432ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
33532ceabcaSGiuseppe CAVALLARO  * Description: this function is to exit and disable EEE in case of
33632ceabcaSGiuseppe CAVALLARO  * LPI state is true. This is called by the xmit.
33732ceabcaSGiuseppe CAVALLARO  */
338d765955dSGiuseppe CAVALLARO void stmmac_disable_eee_mode(struct stmmac_priv *priv)
339d765955dSGiuseppe CAVALLARO {
340be1c7eaeSVineetha G. Jaya Kumaran 	if (!priv->eee_sw_timer_en) {
341be1c7eaeSVineetha G. Jaya Kumaran 		stmmac_lpi_entry_timer_config(priv, 0);
342be1c7eaeSVineetha G. Jaya Kumaran 		return;
343be1c7eaeSVineetha G. Jaya Kumaran 	}
344be1c7eaeSVineetha G. Jaya Kumaran 
345c10d4c82SJose Abreu 	stmmac_reset_eee_mode(priv, priv->hw);
346d765955dSGiuseppe CAVALLARO 	del_timer_sync(&priv->eee_ctrl_timer);
347d765955dSGiuseppe CAVALLARO 	priv->tx_path_in_lpi_mode = false;
348d765955dSGiuseppe CAVALLARO }
349d765955dSGiuseppe CAVALLARO 
350d765955dSGiuseppe CAVALLARO /**
351732fdf0eSGiuseppe CAVALLARO  * stmmac_eee_ctrl_timer - EEE TX SW timer.
352d0ea5cbdSJesse Brandeburg  * @t:  timer_list struct containing private info
353d765955dSGiuseppe CAVALLARO  * Description:
35432ceabcaSGiuseppe CAVALLARO  *  if there is no data transfer and if we are not in LPI state,
355d765955dSGiuseppe CAVALLARO  *  then MAC Transmitter can be moved to LPI state.
356d765955dSGiuseppe CAVALLARO  */
357e99e88a9SKees Cook static void stmmac_eee_ctrl_timer(struct timer_list *t)
358d765955dSGiuseppe CAVALLARO {
359e99e88a9SKees Cook 	struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
360d765955dSGiuseppe CAVALLARO 
361d765955dSGiuseppe CAVALLARO 	stmmac_enable_eee_mode(priv);
362388e201dSVineetha G. Jaya Kumaran 	mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
363d765955dSGiuseppe CAVALLARO }
364d765955dSGiuseppe CAVALLARO 
365d765955dSGiuseppe CAVALLARO /**
366732fdf0eSGiuseppe CAVALLARO  * stmmac_eee_init - init EEE
36732ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
368d765955dSGiuseppe CAVALLARO  * Description:
369732fdf0eSGiuseppe CAVALLARO  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
370732fdf0eSGiuseppe CAVALLARO  *  can also manage EEE, this function enable the LPI state and start related
371732fdf0eSGiuseppe CAVALLARO  *  timer.
372d765955dSGiuseppe CAVALLARO  */
373d765955dSGiuseppe CAVALLARO bool stmmac_eee_init(struct stmmac_priv *priv)
374d765955dSGiuseppe CAVALLARO {
375388e201dSVineetha G. Jaya Kumaran 	int eee_tw_timer = priv->eee_tw_timer;
376879626e3SJerome Brunet 
377f5351ef7SGiuseppe CAVALLARO 	/* Using PCS we cannot dial with the phy registers at this stage
378f5351ef7SGiuseppe CAVALLARO 	 * so we do not support extra feature like EEE.
379f5351ef7SGiuseppe CAVALLARO 	 */
380a47b9e15SDejin Zheng 	if (priv->hw->pcs == STMMAC_PCS_TBI ||
381a47b9e15SDejin Zheng 	    priv->hw->pcs == STMMAC_PCS_RTBI)
38274371272SJose Abreu 		return false;
383f5351ef7SGiuseppe CAVALLARO 
38474371272SJose Abreu 	/* Check if MAC core supports the EEE feature. */
38574371272SJose Abreu 	if (!priv->dma_cap.eee)
38674371272SJose Abreu 		return false;
387d765955dSGiuseppe CAVALLARO 
38829555fa3SThierry Reding 	mutex_lock(&priv->lock);
38974371272SJose Abreu 
39074371272SJose Abreu 	/* Check if it needs to be deactivated */
391177d935aSJon Hunter 	if (!priv->eee_active) {
392177d935aSJon Hunter 		if (priv->eee_enabled) {
39338ddc59dSLABBE Corentin 			netdev_dbg(priv->dev, "disable EEE\n");
394be1c7eaeSVineetha G. Jaya Kumaran 			stmmac_lpi_entry_timer_config(priv, 0);
39583bf79b6SGiuseppe CAVALLARO 			del_timer_sync(&priv->eee_ctrl_timer);
396388e201dSVineetha G. Jaya Kumaran 			stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
397177d935aSJon Hunter 		}
3980867bb97SJon Hunter 		mutex_unlock(&priv->lock);
39974371272SJose Abreu 		return false;
40074371272SJose Abreu 	}
40174371272SJose Abreu 
40274371272SJose Abreu 	if (priv->eee_active && !priv->eee_enabled) {
40374371272SJose Abreu 		timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
40474371272SJose Abreu 		stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
405388e201dSVineetha G. Jaya Kumaran 				     eee_tw_timer);
40683bf79b6SGiuseppe CAVALLARO 	}
40774371272SJose Abreu 
408be1c7eaeSVineetha G. Jaya Kumaran 	if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
409be1c7eaeSVineetha G. Jaya Kumaran 		del_timer_sync(&priv->eee_ctrl_timer);
410be1c7eaeSVineetha G. Jaya Kumaran 		priv->tx_path_in_lpi_mode = false;
411be1c7eaeSVineetha G. Jaya Kumaran 		stmmac_lpi_entry_timer_config(priv, 1);
412be1c7eaeSVineetha G. Jaya Kumaran 	} else {
413be1c7eaeSVineetha G. Jaya Kumaran 		stmmac_lpi_entry_timer_config(priv, 0);
414be1c7eaeSVineetha G. Jaya Kumaran 		mod_timer(&priv->eee_ctrl_timer,
415be1c7eaeSVineetha G. Jaya Kumaran 			  STMMAC_LPI_T(priv->tx_lpi_timer));
416be1c7eaeSVineetha G. Jaya Kumaran 	}
417388e201dSVineetha G. Jaya Kumaran 
41829555fa3SThierry Reding 	mutex_unlock(&priv->lock);
41938ddc59dSLABBE Corentin 	netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
42074371272SJose Abreu 	return true;
421d765955dSGiuseppe CAVALLARO }
422d765955dSGiuseppe CAVALLARO 
423732fdf0eSGiuseppe CAVALLARO /* stmmac_get_tx_hwtstamp - get HW TX timestamps
42432ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
425ba1ffd74SGiuseppe CAVALLARO  * @p : descriptor pointer
426891434b1SRayagond Kokatanur  * @skb : the socket buffer
427891434b1SRayagond Kokatanur  * Description :
428891434b1SRayagond Kokatanur  * This function will read timestamp from the descriptor & pass it to stack.
429891434b1SRayagond Kokatanur  * and also perform some sanity checks.
430891434b1SRayagond Kokatanur  */
431891434b1SRayagond Kokatanur static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
432ba1ffd74SGiuseppe CAVALLARO 				   struct dma_desc *p, struct sk_buff *skb)
433891434b1SRayagond Kokatanur {
434891434b1SRayagond Kokatanur 	struct skb_shared_hwtstamps shhwtstamp;
43525e80cd0SJose Abreu 	bool found = false;
436df103170SNathan Chancellor 	u64 ns = 0;
437891434b1SRayagond Kokatanur 
438891434b1SRayagond Kokatanur 	if (!priv->hwts_tx_en)
439891434b1SRayagond Kokatanur 		return;
440891434b1SRayagond Kokatanur 
441ceb69499SGiuseppe CAVALLARO 	/* exit if skb doesn't support hw tstamp */
44275e4364fSdamuzi000 	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
443891434b1SRayagond Kokatanur 		return;
444891434b1SRayagond Kokatanur 
445891434b1SRayagond Kokatanur 	/* check tx tstamp status */
44642de047dSJose Abreu 	if (stmmac_get_tx_timestamp_status(priv, p)) {
44742de047dSJose Abreu 		stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
44825e80cd0SJose Abreu 		found = true;
44925e80cd0SJose Abreu 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
45025e80cd0SJose Abreu 		found = true;
45125e80cd0SJose Abreu 	}
452891434b1SRayagond Kokatanur 
45325e80cd0SJose Abreu 	if (found) {
454891434b1SRayagond Kokatanur 		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
455891434b1SRayagond Kokatanur 		shhwtstamp.hwtstamp = ns_to_ktime(ns);
456ba1ffd74SGiuseppe CAVALLARO 
45733d4c482SMario Molitor 		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
458891434b1SRayagond Kokatanur 		/* pass tstamp to stack */
459891434b1SRayagond Kokatanur 		skb_tstamp_tx(skb, &shhwtstamp);
460ba1ffd74SGiuseppe CAVALLARO 	}
461891434b1SRayagond Kokatanur }
462891434b1SRayagond Kokatanur 
463732fdf0eSGiuseppe CAVALLARO /* stmmac_get_rx_hwtstamp - get HW RX timestamps
46432ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
465ba1ffd74SGiuseppe CAVALLARO  * @p : descriptor pointer
466ba1ffd74SGiuseppe CAVALLARO  * @np : next descriptor pointer
467891434b1SRayagond Kokatanur  * @skb : the socket buffer
468891434b1SRayagond Kokatanur  * Description :
469891434b1SRayagond Kokatanur  * This function will read received packet's timestamp from the descriptor
470891434b1SRayagond Kokatanur  * and pass it to stack. It also perform some sanity checks.
471891434b1SRayagond Kokatanur  */
472ba1ffd74SGiuseppe CAVALLARO static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
473ba1ffd74SGiuseppe CAVALLARO 				   struct dma_desc *np, struct sk_buff *skb)
474891434b1SRayagond Kokatanur {
475891434b1SRayagond Kokatanur 	struct skb_shared_hwtstamps *shhwtstamp = NULL;
47698870943SJose Abreu 	struct dma_desc *desc = p;
477df103170SNathan Chancellor 	u64 ns = 0;
478891434b1SRayagond Kokatanur 
479891434b1SRayagond Kokatanur 	if (!priv->hwts_rx_en)
480891434b1SRayagond Kokatanur 		return;
481ba1ffd74SGiuseppe CAVALLARO 	/* For GMAC4, the valid timestamp is from CTX next desc. */
4827d9e6c5aSJose Abreu 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
48398870943SJose Abreu 		desc = np;
484891434b1SRayagond Kokatanur 
48598870943SJose Abreu 	/* Check if timestamp is available */
48642de047dSJose Abreu 	if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
48742de047dSJose Abreu 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
48833d4c482SMario Molitor 		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
489891434b1SRayagond Kokatanur 		shhwtstamp = skb_hwtstamps(skb);
490891434b1SRayagond Kokatanur 		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
491891434b1SRayagond Kokatanur 		shhwtstamp->hwtstamp = ns_to_ktime(ns);
492ba1ffd74SGiuseppe CAVALLARO 	} else  {
49333d4c482SMario Molitor 		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
494ba1ffd74SGiuseppe CAVALLARO 	}
495891434b1SRayagond Kokatanur }
496891434b1SRayagond Kokatanur 
497891434b1SRayagond Kokatanur /**
498d6228b7cSArtem Panfilov  *  stmmac_hwtstamp_set - control hardware timestamping.
499891434b1SRayagond Kokatanur  *  @dev: device pointer.
5008d45e42bSLABBE Corentin  *  @ifr: An IOCTL specific structure, that can contain a pointer to
501891434b1SRayagond Kokatanur  *  a proprietary structure used to pass information to the driver.
502891434b1SRayagond Kokatanur  *  Description:
503891434b1SRayagond Kokatanur  *  This function configures the MAC to enable/disable both outgoing(TX)
504891434b1SRayagond Kokatanur  *  and incoming(RX) packets time stamping based on user input.
505891434b1SRayagond Kokatanur  *  Return Value:
506891434b1SRayagond Kokatanur  *  0 on success and an appropriate -ve integer on failure.
507891434b1SRayagond Kokatanur  */
508d6228b7cSArtem Panfilov static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
509891434b1SRayagond Kokatanur {
510891434b1SRayagond Kokatanur 	struct stmmac_priv *priv = netdev_priv(dev);
511891434b1SRayagond Kokatanur 	struct hwtstamp_config config;
5120a624155SArnd Bergmann 	struct timespec64 now;
513891434b1SRayagond Kokatanur 	u64 temp = 0;
514891434b1SRayagond Kokatanur 	u32 ptp_v2 = 0;
515891434b1SRayagond Kokatanur 	u32 tstamp_all = 0;
516891434b1SRayagond Kokatanur 	u32 ptp_over_ipv4_udp = 0;
517891434b1SRayagond Kokatanur 	u32 ptp_over_ipv6_udp = 0;
518891434b1SRayagond Kokatanur 	u32 ptp_over_ethernet = 0;
519891434b1SRayagond Kokatanur 	u32 snap_type_sel = 0;
520891434b1SRayagond Kokatanur 	u32 ts_master_en = 0;
521891434b1SRayagond Kokatanur 	u32 ts_event_en = 0;
522df103170SNathan Chancellor 	u32 sec_inc = 0;
523891434b1SRayagond Kokatanur 	u32 value = 0;
5247d9e6c5aSJose Abreu 	bool xmac;
5257d9e6c5aSJose Abreu 
5267d9e6c5aSJose Abreu 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
527891434b1SRayagond Kokatanur 
528891434b1SRayagond Kokatanur 	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
529891434b1SRayagond Kokatanur 		netdev_alert(priv->dev, "No support for HW time stamping\n");
530891434b1SRayagond Kokatanur 		priv->hwts_tx_en = 0;
531891434b1SRayagond Kokatanur 		priv->hwts_rx_en = 0;
532891434b1SRayagond Kokatanur 
533891434b1SRayagond Kokatanur 		return -EOPNOTSUPP;
534891434b1SRayagond Kokatanur 	}
535891434b1SRayagond Kokatanur 
536891434b1SRayagond Kokatanur 	if (copy_from_user(&config, ifr->ifr_data,
537d6228b7cSArtem Panfilov 			   sizeof(config)))
538891434b1SRayagond Kokatanur 		return -EFAULT;
539891434b1SRayagond Kokatanur 
54038ddc59dSLABBE Corentin 	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
541891434b1SRayagond Kokatanur 		   __func__, config.flags, config.tx_type, config.rx_filter);
542891434b1SRayagond Kokatanur 
543891434b1SRayagond Kokatanur 	/* reserved for future extensions */
544891434b1SRayagond Kokatanur 	if (config.flags)
545891434b1SRayagond Kokatanur 		return -EINVAL;
546891434b1SRayagond Kokatanur 
5475f3da328SBen Hutchings 	if (config.tx_type != HWTSTAMP_TX_OFF &&
5485f3da328SBen Hutchings 	    config.tx_type != HWTSTAMP_TX_ON)
549891434b1SRayagond Kokatanur 		return -ERANGE;
550891434b1SRayagond Kokatanur 
551891434b1SRayagond Kokatanur 	if (priv->adv_ts) {
552891434b1SRayagond Kokatanur 		switch (config.rx_filter) {
553891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_NONE:
554ceb69499SGiuseppe CAVALLARO 			/* time stamp no incoming packet at all */
555891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_NONE;
556891434b1SRayagond Kokatanur 			break;
557891434b1SRayagond Kokatanur 
558891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
559ceb69499SGiuseppe CAVALLARO 			/* PTP v1, UDP, any kind of event packet */
560891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
5617d8e249fSIlias Apalodimas 			/* 'xmac' hardware can support Sync, Pdelay_Req and
5627d8e249fSIlias Apalodimas 			 * Pdelay_resp by setting bit14 and bits17/16 to 01
5637d8e249fSIlias Apalodimas 			 * This leaves Delay_Req timestamps out.
5647d8e249fSIlias Apalodimas 			 * Enable all events *and* general purpose message
5657d8e249fSIlias Apalodimas 			 * timestamping
5667d8e249fSIlias Apalodimas 			 */
567891434b1SRayagond Kokatanur 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
568891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
569891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
570891434b1SRayagond Kokatanur 			break;
571891434b1SRayagond Kokatanur 
572891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
573ceb69499SGiuseppe CAVALLARO 			/* PTP v1, UDP, Sync packet */
574891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
575891434b1SRayagond Kokatanur 			/* take time stamp for SYNC messages only */
576891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
577891434b1SRayagond Kokatanur 
578891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
579891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
580891434b1SRayagond Kokatanur 			break;
581891434b1SRayagond Kokatanur 
582891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
583ceb69499SGiuseppe CAVALLARO 			/* PTP v1, UDP, Delay_req packet */
584891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
585891434b1SRayagond Kokatanur 			/* take time stamp for Delay_Req messages only */
586891434b1SRayagond Kokatanur 			ts_master_en = PTP_TCR_TSMSTRENA;
587891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
588891434b1SRayagond Kokatanur 
589891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
590891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
591891434b1SRayagond Kokatanur 			break;
592891434b1SRayagond Kokatanur 
593891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
594ceb69499SGiuseppe CAVALLARO 			/* PTP v2, UDP, any kind of event packet */
595891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
596891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
597891434b1SRayagond Kokatanur 			/* take time stamp for all event messages */
598891434b1SRayagond Kokatanur 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
599891434b1SRayagond Kokatanur 
600891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
601891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
602891434b1SRayagond Kokatanur 			break;
603891434b1SRayagond Kokatanur 
604891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
605ceb69499SGiuseppe CAVALLARO 			/* PTP v2, UDP, Sync packet */
606891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
607891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
608891434b1SRayagond Kokatanur 			/* take time stamp for SYNC messages only */
609891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
610891434b1SRayagond Kokatanur 
611891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
612891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
613891434b1SRayagond Kokatanur 			break;
614891434b1SRayagond Kokatanur 
615891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
616ceb69499SGiuseppe CAVALLARO 			/* PTP v2, UDP, Delay_req packet */
617891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
618891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
619891434b1SRayagond Kokatanur 			/* take time stamp for Delay_Req messages only */
620891434b1SRayagond Kokatanur 			ts_master_en = PTP_TCR_TSMSTRENA;
621891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
622891434b1SRayagond Kokatanur 
623891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
624891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
625891434b1SRayagond Kokatanur 			break;
626891434b1SRayagond Kokatanur 
627891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_EVENT:
628ceb69499SGiuseppe CAVALLARO 			/* PTP v2/802.AS1 any layer, any kind of event packet */
629891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
630891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
631891434b1SRayagond Kokatanur 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
632f2fb6b62SFugang Duan 			if (priv->synopsys_id != DWMAC_CORE_5_10)
63314f34733SJose Abreu 				ts_event_en = PTP_TCR_TSEVNTENA;
634891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
635891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
636891434b1SRayagond Kokatanur 			ptp_over_ethernet = PTP_TCR_TSIPENA;
637891434b1SRayagond Kokatanur 			break;
638891434b1SRayagond Kokatanur 
639891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_SYNC:
640ceb69499SGiuseppe CAVALLARO 			/* PTP v2/802.AS1, any layer, Sync packet */
641891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
642891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
643891434b1SRayagond Kokatanur 			/* take time stamp for SYNC messages only */
644891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
645891434b1SRayagond Kokatanur 
646891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
647891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
648891434b1SRayagond Kokatanur 			ptp_over_ethernet = PTP_TCR_TSIPENA;
649891434b1SRayagond Kokatanur 			break;
650891434b1SRayagond Kokatanur 
651891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
652ceb69499SGiuseppe CAVALLARO 			/* PTP v2/802.AS1, any layer, Delay_req packet */
653891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
654891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
655891434b1SRayagond Kokatanur 			/* take time stamp for Delay_Req messages only */
656891434b1SRayagond Kokatanur 			ts_master_en = PTP_TCR_TSMSTRENA;
657891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
658891434b1SRayagond Kokatanur 
659891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
660891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
661891434b1SRayagond Kokatanur 			ptp_over_ethernet = PTP_TCR_TSIPENA;
662891434b1SRayagond Kokatanur 			break;
663891434b1SRayagond Kokatanur 
664e3412575SMiroslav Lichvar 		case HWTSTAMP_FILTER_NTP_ALL:
665891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_ALL:
666ceb69499SGiuseppe CAVALLARO 			/* time stamp any incoming packet */
667891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_ALL;
668891434b1SRayagond Kokatanur 			tstamp_all = PTP_TCR_TSENALL;
669891434b1SRayagond Kokatanur 			break;
670891434b1SRayagond Kokatanur 
671891434b1SRayagond Kokatanur 		default:
672891434b1SRayagond Kokatanur 			return -ERANGE;
673891434b1SRayagond Kokatanur 		}
674891434b1SRayagond Kokatanur 	} else {
675891434b1SRayagond Kokatanur 		switch (config.rx_filter) {
676891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_NONE:
677891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_NONE;
678891434b1SRayagond Kokatanur 			break;
679891434b1SRayagond Kokatanur 		default:
680891434b1SRayagond Kokatanur 			/* PTP v1, UDP, any kind of event packet */
681891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
682891434b1SRayagond Kokatanur 			break;
683891434b1SRayagond Kokatanur 		}
684891434b1SRayagond Kokatanur 	}
685891434b1SRayagond Kokatanur 	priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
6865f3da328SBen Hutchings 	priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
687891434b1SRayagond Kokatanur 
688891434b1SRayagond Kokatanur 	if (!priv->hwts_tx_en && !priv->hwts_rx_en)
689cc4c9001SJose Abreu 		stmmac_config_hw_tstamping(priv, priv->ptpaddr, 0);
690891434b1SRayagond Kokatanur 	else {
691891434b1SRayagond Kokatanur 		value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
692891434b1SRayagond Kokatanur 			 tstamp_all | ptp_v2 | ptp_over_ethernet |
693891434b1SRayagond Kokatanur 			 ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
694891434b1SRayagond Kokatanur 			 ts_master_en | snap_type_sel);
695cc4c9001SJose Abreu 		stmmac_config_hw_tstamping(priv, priv->ptpaddr, value);
696891434b1SRayagond Kokatanur 
697891434b1SRayagond Kokatanur 		/* program Sub Second Increment reg */
698cc4c9001SJose Abreu 		stmmac_config_sub_second_increment(priv,
699f573c0b9Sjpinto 				priv->ptpaddr, priv->plat->clk_ptp_rate,
7007d9e6c5aSJose Abreu 				xmac, &sec_inc);
70119d857c9SPhil Reid 		temp = div_u64(1000000000ULL, sec_inc);
702891434b1SRayagond Kokatanur 
7039a8a02c9SJose Abreu 		/* Store sub second increment and flags for later use */
7049a8a02c9SJose Abreu 		priv->sub_second_inc = sec_inc;
7059a8a02c9SJose Abreu 		priv->systime_flags = value;
7069a8a02c9SJose Abreu 
707891434b1SRayagond Kokatanur 		/* calculate default added value:
708891434b1SRayagond Kokatanur 		 * formula is :
709891434b1SRayagond Kokatanur 		 * addend = (2^32)/freq_div_ratio;
71019d857c9SPhil Reid 		 * where, freq_div_ratio = 1e9ns/sec_inc
711891434b1SRayagond Kokatanur 		 */
71219d857c9SPhil Reid 		temp = (u64)(temp << 32);
713f573c0b9Sjpinto 		priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
714cc4c9001SJose Abreu 		stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
715891434b1SRayagond Kokatanur 
716891434b1SRayagond Kokatanur 		/* initialize system time */
7170a624155SArnd Bergmann 		ktime_get_real_ts64(&now);
7180a624155SArnd Bergmann 
7190a624155SArnd Bergmann 		/* lower 32 bits of tv_sec are safe until y2106 */
720cc4c9001SJose Abreu 		stmmac_init_systime(priv, priv->ptpaddr,
721cc4c9001SJose Abreu 				(u32)now.tv_sec, now.tv_nsec);
722891434b1SRayagond Kokatanur 	}
723891434b1SRayagond Kokatanur 
724d6228b7cSArtem Panfilov 	memcpy(&priv->tstamp_config, &config, sizeof(config));
725d6228b7cSArtem Panfilov 
726891434b1SRayagond Kokatanur 	return copy_to_user(ifr->ifr_data, &config,
727d6228b7cSArtem Panfilov 			    sizeof(config)) ? -EFAULT : 0;
728d6228b7cSArtem Panfilov }
729d6228b7cSArtem Panfilov 
730d6228b7cSArtem Panfilov /**
731d6228b7cSArtem Panfilov  *  stmmac_hwtstamp_get - read hardware timestamping.
732d6228b7cSArtem Panfilov  *  @dev: device pointer.
733d6228b7cSArtem Panfilov  *  @ifr: An IOCTL specific structure, that can contain a pointer to
734d6228b7cSArtem Panfilov  *  a proprietary structure used to pass information to the driver.
735d6228b7cSArtem Panfilov  *  Description:
736d6228b7cSArtem Panfilov  *  This function obtain the current hardware timestamping settings
737d0ea5cbdSJesse Brandeburg  *  as requested.
738d6228b7cSArtem Panfilov  */
739d6228b7cSArtem Panfilov static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
740d6228b7cSArtem Panfilov {
741d6228b7cSArtem Panfilov 	struct stmmac_priv *priv = netdev_priv(dev);
742d6228b7cSArtem Panfilov 	struct hwtstamp_config *config = &priv->tstamp_config;
743d6228b7cSArtem Panfilov 
744d6228b7cSArtem Panfilov 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
745d6228b7cSArtem Panfilov 		return -EOPNOTSUPP;
746d6228b7cSArtem Panfilov 
747d6228b7cSArtem Panfilov 	return copy_to_user(ifr->ifr_data, config,
748d6228b7cSArtem Panfilov 			    sizeof(*config)) ? -EFAULT : 0;
749891434b1SRayagond Kokatanur }
750891434b1SRayagond Kokatanur 
75132ceabcaSGiuseppe CAVALLARO /**
752732fdf0eSGiuseppe CAVALLARO  * stmmac_init_ptp - init PTP
75332ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
754732fdf0eSGiuseppe CAVALLARO  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
75532ceabcaSGiuseppe CAVALLARO  * This is done by looking at the HW cap. register.
756732fdf0eSGiuseppe CAVALLARO  * This function also registers the ptp driver.
75732ceabcaSGiuseppe CAVALLARO  */
75892ba6888SRayagond Kokatanur static int stmmac_init_ptp(struct stmmac_priv *priv)
759891434b1SRayagond Kokatanur {
7607d9e6c5aSJose Abreu 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
7617d9e6c5aSJose Abreu 
76292ba6888SRayagond Kokatanur 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
76392ba6888SRayagond Kokatanur 		return -EOPNOTSUPP;
76492ba6888SRayagond Kokatanur 
765891434b1SRayagond Kokatanur 	priv->adv_ts = 0;
7667d9e6c5aSJose Abreu 	/* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
7677d9e6c5aSJose Abreu 	if (xmac && priv->dma_cap.atime_stamp)
768be9b3174SGiuseppe CAVALLARO 		priv->adv_ts = 1;
769be9b3174SGiuseppe CAVALLARO 	/* Dwmac 3.x core with extend_desc can support adv_ts */
770be9b3174SGiuseppe CAVALLARO 	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
771891434b1SRayagond Kokatanur 		priv->adv_ts = 1;
7727cd01399SVince Bridgers 
773be9b3174SGiuseppe CAVALLARO 	if (priv->dma_cap.time_stamp)
774be9b3174SGiuseppe CAVALLARO 		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
7757cd01399SVince Bridgers 
776be9b3174SGiuseppe CAVALLARO 	if (priv->adv_ts)
777be9b3174SGiuseppe CAVALLARO 		netdev_info(priv->dev,
778be9b3174SGiuseppe CAVALLARO 			    "IEEE 1588-2008 Advanced Timestamp supported\n");
779891434b1SRayagond Kokatanur 
780891434b1SRayagond Kokatanur 	priv->hwts_tx_en = 0;
781891434b1SRayagond Kokatanur 	priv->hwts_rx_en = 0;
78292ba6888SRayagond Kokatanur 
783c30a70d3SGiuseppe CAVALLARO 	stmmac_ptp_register(priv);
784c30a70d3SGiuseppe CAVALLARO 
785c30a70d3SGiuseppe CAVALLARO 	return 0;
78692ba6888SRayagond Kokatanur }
78792ba6888SRayagond Kokatanur 
78892ba6888SRayagond Kokatanur static void stmmac_release_ptp(struct stmmac_priv *priv)
78992ba6888SRayagond Kokatanur {
790f573c0b9Sjpinto 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
79192ba6888SRayagond Kokatanur 	stmmac_ptp_unregister(priv);
792891434b1SRayagond Kokatanur }
793891434b1SRayagond Kokatanur 
7947ac6653aSJeff Kirsher /**
79529feff39SJoao Pinto  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
79629feff39SJoao Pinto  *  @priv: driver private structure
797d0ea5cbdSJesse Brandeburg  *  @duplex: duplex passed to the next function
79829feff39SJoao Pinto  *  Description: It is used for configuring the flow control in all queues
79929feff39SJoao Pinto  */
80029feff39SJoao Pinto static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
80129feff39SJoao Pinto {
80229feff39SJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
80329feff39SJoao Pinto 
804c10d4c82SJose Abreu 	stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
80529feff39SJoao Pinto 			priv->pause, tx_cnt);
80629feff39SJoao Pinto }
80729feff39SJoao Pinto 
808eeef2f6bSJose Abreu static void stmmac_validate(struct phylink_config *config,
809eeef2f6bSJose Abreu 			    unsigned long *supported,
810eeef2f6bSJose Abreu 			    struct phylink_link_state *state)
811eeef2f6bSJose Abreu {
812eeef2f6bSJose Abreu 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
8135b0d7d7dSJose Abreu 	__ETHTOOL_DECLARE_LINK_MODE_MASK(mac_supported) = { 0, };
814eeef2f6bSJose Abreu 	__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
815eeef2f6bSJose Abreu 	int tx_cnt = priv->plat->tx_queues_to_use;
816eeef2f6bSJose Abreu 	int max_speed = priv->plat->max_speed;
817eeef2f6bSJose Abreu 
8185b0d7d7dSJose Abreu 	phylink_set(mac_supported, 10baseT_Half);
8195b0d7d7dSJose Abreu 	phylink_set(mac_supported, 10baseT_Full);
8205b0d7d7dSJose Abreu 	phylink_set(mac_supported, 100baseT_Half);
8215b0d7d7dSJose Abreu 	phylink_set(mac_supported, 100baseT_Full);
822df7699c7SJose Abreu 	phylink_set(mac_supported, 1000baseT_Half);
823df7699c7SJose Abreu 	phylink_set(mac_supported, 1000baseT_Full);
824df7699c7SJose Abreu 	phylink_set(mac_supported, 1000baseKX_Full);
8255b0d7d7dSJose Abreu 
8265b0d7d7dSJose Abreu 	phylink_set(mac_supported, Autoneg);
8275b0d7d7dSJose Abreu 	phylink_set(mac_supported, Pause);
8285b0d7d7dSJose Abreu 	phylink_set(mac_supported, Asym_Pause);
8295b0d7d7dSJose Abreu 	phylink_set_port_modes(mac_supported);
8305b0d7d7dSJose Abreu 
831eeef2f6bSJose Abreu 	/* Cut down 1G if asked to */
832eeef2f6bSJose Abreu 	if ((max_speed > 0) && (max_speed < 1000)) {
833eeef2f6bSJose Abreu 		phylink_set(mask, 1000baseT_Full);
834eeef2f6bSJose Abreu 		phylink_set(mask, 1000baseX_Full);
8355b0d7d7dSJose Abreu 	} else if (priv->plat->has_xgmac) {
836d9da2c87SJose Abreu 		if (!max_speed || (max_speed >= 2500)) {
8375b0d7d7dSJose Abreu 			phylink_set(mac_supported, 2500baseT_Full);
838d9da2c87SJose Abreu 			phylink_set(mac_supported, 2500baseX_Full);
839d9da2c87SJose Abreu 		}
840d9da2c87SJose Abreu 		if (!max_speed || (max_speed >= 5000)) {
8415b0d7d7dSJose Abreu 			phylink_set(mac_supported, 5000baseT_Full);
842d9da2c87SJose Abreu 		}
843d9da2c87SJose Abreu 		if (!max_speed || (max_speed >= 10000)) {
8445b0d7d7dSJose Abreu 			phylink_set(mac_supported, 10000baseSR_Full);
8455b0d7d7dSJose Abreu 			phylink_set(mac_supported, 10000baseLR_Full);
8465b0d7d7dSJose Abreu 			phylink_set(mac_supported, 10000baseER_Full);
8475b0d7d7dSJose Abreu 			phylink_set(mac_supported, 10000baseLRM_Full);
8485b0d7d7dSJose Abreu 			phylink_set(mac_supported, 10000baseT_Full);
8495b0d7d7dSJose Abreu 			phylink_set(mac_supported, 10000baseKX4_Full);
8505b0d7d7dSJose Abreu 			phylink_set(mac_supported, 10000baseKR_Full);
851eeef2f6bSJose Abreu 		}
8528a880936SJose Abreu 		if (!max_speed || (max_speed >= 25000)) {
8538a880936SJose Abreu 			phylink_set(mac_supported, 25000baseCR_Full);
8548a880936SJose Abreu 			phylink_set(mac_supported, 25000baseKR_Full);
8558a880936SJose Abreu 			phylink_set(mac_supported, 25000baseSR_Full);
8568a880936SJose Abreu 		}
8578a880936SJose Abreu 		if (!max_speed || (max_speed >= 40000)) {
8588a880936SJose Abreu 			phylink_set(mac_supported, 40000baseKR4_Full);
8598a880936SJose Abreu 			phylink_set(mac_supported, 40000baseCR4_Full);
8608a880936SJose Abreu 			phylink_set(mac_supported, 40000baseSR4_Full);
8618a880936SJose Abreu 			phylink_set(mac_supported, 40000baseLR4_Full);
8628a880936SJose Abreu 		}
8638a880936SJose Abreu 		if (!max_speed || (max_speed >= 50000)) {
8648a880936SJose Abreu 			phylink_set(mac_supported, 50000baseCR2_Full);
8658a880936SJose Abreu 			phylink_set(mac_supported, 50000baseKR2_Full);
8668a880936SJose Abreu 			phylink_set(mac_supported, 50000baseSR2_Full);
8678a880936SJose Abreu 			phylink_set(mac_supported, 50000baseKR_Full);
8688a880936SJose Abreu 			phylink_set(mac_supported, 50000baseSR_Full);
8698a880936SJose Abreu 			phylink_set(mac_supported, 50000baseCR_Full);
8708a880936SJose Abreu 			phylink_set(mac_supported, 50000baseLR_ER_FR_Full);
8718a880936SJose Abreu 			phylink_set(mac_supported, 50000baseDR_Full);
8728a880936SJose Abreu 		}
8738a880936SJose Abreu 		if (!max_speed || (max_speed >= 100000)) {
8748a880936SJose Abreu 			phylink_set(mac_supported, 100000baseKR4_Full);
8758a880936SJose Abreu 			phylink_set(mac_supported, 100000baseSR4_Full);
8768a880936SJose Abreu 			phylink_set(mac_supported, 100000baseCR4_Full);
8778a880936SJose Abreu 			phylink_set(mac_supported, 100000baseLR4_ER4_Full);
8788a880936SJose Abreu 			phylink_set(mac_supported, 100000baseKR2_Full);
8798a880936SJose Abreu 			phylink_set(mac_supported, 100000baseSR2_Full);
8808a880936SJose Abreu 			phylink_set(mac_supported, 100000baseCR2_Full);
8818a880936SJose Abreu 			phylink_set(mac_supported, 100000baseLR2_ER2_FR2_Full);
8828a880936SJose Abreu 			phylink_set(mac_supported, 100000baseDR2_Full);
8838a880936SJose Abreu 		}
884d9da2c87SJose Abreu 	}
885eeef2f6bSJose Abreu 
886eeef2f6bSJose Abreu 	/* Half-Duplex can only work with single queue */
887eeef2f6bSJose Abreu 	if (tx_cnt > 1) {
888eeef2f6bSJose Abreu 		phylink_set(mask, 10baseT_Half);
889eeef2f6bSJose Abreu 		phylink_set(mask, 100baseT_Half);
890eeef2f6bSJose Abreu 		phylink_set(mask, 1000baseT_Half);
891eeef2f6bSJose Abreu 	}
892eeef2f6bSJose Abreu 
893422829f9SJose Abreu 	linkmode_and(supported, supported, mac_supported);
894422829f9SJose Abreu 	linkmode_andnot(supported, supported, mask);
895422829f9SJose Abreu 
896422829f9SJose Abreu 	linkmode_and(state->advertising, state->advertising, mac_supported);
897422829f9SJose Abreu 	linkmode_andnot(state->advertising, state->advertising, mask);
898f213bbe8SJose Abreu 
899f213bbe8SJose Abreu 	/* If PCS is supported, check which modes it supports. */
900f213bbe8SJose Abreu 	stmmac_xpcs_validate(priv, &priv->hw->xpcs_args, supported, state);
901eeef2f6bSJose Abreu }
902eeef2f6bSJose Abreu 
903d46b7e4fSRussell King static void stmmac_mac_pcs_get_state(struct phylink_config *config,
904eeef2f6bSJose Abreu 				     struct phylink_link_state *state)
905eeef2f6bSJose Abreu {
906f213bbe8SJose Abreu 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
907f213bbe8SJose Abreu 
908d46b7e4fSRussell King 	state->link = 0;
909f213bbe8SJose Abreu 	stmmac_xpcs_get_state(priv, &priv->hw->xpcs_args, state);
910eeef2f6bSJose Abreu }
911eeef2f6bSJose Abreu 
91274371272SJose Abreu static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
91374371272SJose Abreu 			      const struct phylink_link_state *state)
9149ad372fcSJose Abreu {
915f213bbe8SJose Abreu 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
916f213bbe8SJose Abreu 
917f213bbe8SJose Abreu 	stmmac_xpcs_config(priv, &priv->hw->xpcs_args, state);
9189ad372fcSJose Abreu }
9199ad372fcSJose Abreu 
920eeef2f6bSJose Abreu static void stmmac_mac_an_restart(struct phylink_config *config)
921eeef2f6bSJose Abreu {
922eeef2f6bSJose Abreu 	/* Not Supported */
923eeef2f6bSJose Abreu }
924eeef2f6bSJose Abreu 
92574371272SJose Abreu static void stmmac_mac_link_down(struct phylink_config *config,
92674371272SJose Abreu 				 unsigned int mode, phy_interface_t interface)
9279ad372fcSJose Abreu {
92874371272SJose Abreu 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
9299ad372fcSJose Abreu 
9309ad372fcSJose Abreu 	stmmac_mac_set(priv, priv->ioaddr, false);
93174371272SJose Abreu 	priv->eee_active = false;
932388e201dSVineetha G. Jaya Kumaran 	priv->tx_lpi_enabled = false;
93374371272SJose Abreu 	stmmac_eee_init(priv);
93474371272SJose Abreu 	stmmac_set_eee_pls(priv, priv->hw, false);
9359ad372fcSJose Abreu }
9369ad372fcSJose Abreu 
93774371272SJose Abreu static void stmmac_mac_link_up(struct phylink_config *config,
93891a208f2SRussell King 			       struct phy_device *phy,
93974371272SJose Abreu 			       unsigned int mode, phy_interface_t interface,
94091a208f2SRussell King 			       int speed, int duplex,
94191a208f2SRussell King 			       bool tx_pause, bool rx_pause)
9429ad372fcSJose Abreu {
94374371272SJose Abreu 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
94446f69dedSJose Abreu 	u32 ctrl;
94546f69dedSJose Abreu 
946f213bbe8SJose Abreu 	stmmac_xpcs_link_up(priv, &priv->hw->xpcs_args, speed, interface);
947f213bbe8SJose Abreu 
94846f69dedSJose Abreu 	ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
94946f69dedSJose Abreu 	ctrl &= ~priv->hw->link.speed_mask;
95046f69dedSJose Abreu 
95146f69dedSJose Abreu 	if (interface == PHY_INTERFACE_MODE_USXGMII) {
95246f69dedSJose Abreu 		switch (speed) {
95346f69dedSJose Abreu 		case SPEED_10000:
95446f69dedSJose Abreu 			ctrl |= priv->hw->link.xgmii.speed10000;
95546f69dedSJose Abreu 			break;
95646f69dedSJose Abreu 		case SPEED_5000:
95746f69dedSJose Abreu 			ctrl |= priv->hw->link.xgmii.speed5000;
95846f69dedSJose Abreu 			break;
95946f69dedSJose Abreu 		case SPEED_2500:
96046f69dedSJose Abreu 			ctrl |= priv->hw->link.xgmii.speed2500;
96146f69dedSJose Abreu 			break;
96246f69dedSJose Abreu 		default:
96346f69dedSJose Abreu 			return;
96446f69dedSJose Abreu 		}
9658a880936SJose Abreu 	} else if (interface == PHY_INTERFACE_MODE_XLGMII) {
9668a880936SJose Abreu 		switch (speed) {
9678a880936SJose Abreu 		case SPEED_100000:
9688a880936SJose Abreu 			ctrl |= priv->hw->link.xlgmii.speed100000;
9698a880936SJose Abreu 			break;
9708a880936SJose Abreu 		case SPEED_50000:
9718a880936SJose Abreu 			ctrl |= priv->hw->link.xlgmii.speed50000;
9728a880936SJose Abreu 			break;
9738a880936SJose Abreu 		case SPEED_40000:
9748a880936SJose Abreu 			ctrl |= priv->hw->link.xlgmii.speed40000;
9758a880936SJose Abreu 			break;
9768a880936SJose Abreu 		case SPEED_25000:
9778a880936SJose Abreu 			ctrl |= priv->hw->link.xlgmii.speed25000;
9788a880936SJose Abreu 			break;
9798a880936SJose Abreu 		case SPEED_10000:
9808a880936SJose Abreu 			ctrl |= priv->hw->link.xgmii.speed10000;
9818a880936SJose Abreu 			break;
9828a880936SJose Abreu 		case SPEED_2500:
9838a880936SJose Abreu 			ctrl |= priv->hw->link.speed2500;
9848a880936SJose Abreu 			break;
9858a880936SJose Abreu 		case SPEED_1000:
9868a880936SJose Abreu 			ctrl |= priv->hw->link.speed1000;
9878a880936SJose Abreu 			break;
9888a880936SJose Abreu 		default:
9898a880936SJose Abreu 			return;
9908a880936SJose Abreu 		}
99146f69dedSJose Abreu 	} else {
99246f69dedSJose Abreu 		switch (speed) {
99346f69dedSJose Abreu 		case SPEED_2500:
99446f69dedSJose Abreu 			ctrl |= priv->hw->link.speed2500;
99546f69dedSJose Abreu 			break;
99646f69dedSJose Abreu 		case SPEED_1000:
99746f69dedSJose Abreu 			ctrl |= priv->hw->link.speed1000;
99846f69dedSJose Abreu 			break;
99946f69dedSJose Abreu 		case SPEED_100:
100046f69dedSJose Abreu 			ctrl |= priv->hw->link.speed100;
100146f69dedSJose Abreu 			break;
100246f69dedSJose Abreu 		case SPEED_10:
100346f69dedSJose Abreu 			ctrl |= priv->hw->link.speed10;
100446f69dedSJose Abreu 			break;
100546f69dedSJose Abreu 		default:
100646f69dedSJose Abreu 			return;
100746f69dedSJose Abreu 		}
100846f69dedSJose Abreu 	}
100946f69dedSJose Abreu 
101046f69dedSJose Abreu 	priv->speed = speed;
101146f69dedSJose Abreu 
101246f69dedSJose Abreu 	if (priv->plat->fix_mac_speed)
101346f69dedSJose Abreu 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed);
101446f69dedSJose Abreu 
101546f69dedSJose Abreu 	if (!duplex)
101646f69dedSJose Abreu 		ctrl &= ~priv->hw->link.duplex;
101746f69dedSJose Abreu 	else
101846f69dedSJose Abreu 		ctrl |= priv->hw->link.duplex;
101946f69dedSJose Abreu 
102046f69dedSJose Abreu 	/* Flow Control operation */
102146f69dedSJose Abreu 	if (tx_pause && rx_pause)
102246f69dedSJose Abreu 		stmmac_mac_flow_ctrl(priv, duplex);
102346f69dedSJose Abreu 
102446f69dedSJose Abreu 	writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
10259ad372fcSJose Abreu 
10269ad372fcSJose Abreu 	stmmac_mac_set(priv, priv->ioaddr, true);
10275b111770SJose Abreu 	if (phy && priv->dma_cap.eee) {
102874371272SJose Abreu 		priv->eee_active = phy_init_eee(phy, 1) >= 0;
102974371272SJose Abreu 		priv->eee_enabled = stmmac_eee_init(priv);
1030388e201dSVineetha G. Jaya Kumaran 		priv->tx_lpi_enabled = priv->eee_enabled;
103174371272SJose Abreu 		stmmac_set_eee_pls(priv, priv->hw, true);
103274371272SJose Abreu 	}
10339ad372fcSJose Abreu }
10349ad372fcSJose Abreu 
103574371272SJose Abreu static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1036eeef2f6bSJose Abreu 	.validate = stmmac_validate,
1037d46b7e4fSRussell King 	.mac_pcs_get_state = stmmac_mac_pcs_get_state,
103874371272SJose Abreu 	.mac_config = stmmac_mac_config,
1039eeef2f6bSJose Abreu 	.mac_an_restart = stmmac_mac_an_restart,
104074371272SJose Abreu 	.mac_link_down = stmmac_mac_link_down,
104174371272SJose Abreu 	.mac_link_up = stmmac_mac_link_up,
1042eeef2f6bSJose Abreu };
1043eeef2f6bSJose Abreu 
104429feff39SJoao Pinto /**
1045732fdf0eSGiuseppe CAVALLARO  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
104632ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
104732ceabcaSGiuseppe CAVALLARO  * Description: this is to verify if the HW supports the PCS.
104832ceabcaSGiuseppe CAVALLARO  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
104932ceabcaSGiuseppe CAVALLARO  * configured for the TBI, RTBI, or SGMII PHY interface.
105032ceabcaSGiuseppe CAVALLARO  */
1051e58bb43fSGiuseppe CAVALLARO static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1052e58bb43fSGiuseppe CAVALLARO {
1053e58bb43fSGiuseppe CAVALLARO 	int interface = priv->plat->interface;
1054e58bb43fSGiuseppe CAVALLARO 
1055e58bb43fSGiuseppe CAVALLARO 	if (priv->dma_cap.pcs) {
10560d909dcdSByungho An 		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
10570d909dcdSByungho An 		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
10580d909dcdSByungho An 		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
10590d909dcdSByungho An 		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
106038ddc59dSLABBE Corentin 			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
10613fe5cadbSGiuseppe CAVALLARO 			priv->hw->pcs = STMMAC_PCS_RGMII;
10620d909dcdSByungho An 		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
106338ddc59dSLABBE Corentin 			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
10643fe5cadbSGiuseppe CAVALLARO 			priv->hw->pcs = STMMAC_PCS_SGMII;
1065e58bb43fSGiuseppe CAVALLARO 		}
1066e58bb43fSGiuseppe CAVALLARO 	}
1067e58bb43fSGiuseppe CAVALLARO }
1068e58bb43fSGiuseppe CAVALLARO 
10697ac6653aSJeff Kirsher /**
10707ac6653aSJeff Kirsher  * stmmac_init_phy - PHY initialization
10717ac6653aSJeff Kirsher  * @dev: net device structure
10727ac6653aSJeff Kirsher  * Description: it initializes the driver's PHY state, and attaches the PHY
10737ac6653aSJeff Kirsher  * to the mac driver.
10747ac6653aSJeff Kirsher  *  Return value:
10757ac6653aSJeff Kirsher  *  0 on success
10767ac6653aSJeff Kirsher  */
10777ac6653aSJeff Kirsher static int stmmac_init_phy(struct net_device *dev)
10787ac6653aSJeff Kirsher {
10791d8e5b0fSJisheng Zhang 	struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
10807ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
108174371272SJose Abreu 	struct device_node *node;
108274371272SJose Abreu 	int ret;
10837ac6653aSJeff Kirsher 
10844838a540SJose Abreu 	node = priv->plat->phylink_node;
108574371272SJose Abreu 
108642e87024SJose Abreu 	if (node)
108774371272SJose Abreu 		ret = phylink_of_phy_connect(priv->phylink, node, 0);
108842e87024SJose Abreu 
108942e87024SJose Abreu 	/* Some DT bindings do not set-up the PHY handle. Let's try to
109042e87024SJose Abreu 	 * manually parse it
109142e87024SJose Abreu 	 */
109242e87024SJose Abreu 	if (!node || ret) {
109374371272SJose Abreu 		int addr = priv->plat->phy_addr;
109474371272SJose Abreu 		struct phy_device *phydev;
1095f142af2eSSrinivas Kandagatla 
109674371272SJose Abreu 		phydev = mdiobus_get_phy(priv->mii, addr);
109774371272SJose Abreu 		if (!phydev) {
109874371272SJose Abreu 			netdev_err(priv->dev, "no phy at addr %d\n", addr);
10997ac6653aSJeff Kirsher 			return -ENODEV;
11007ac6653aSJeff Kirsher 		}
11018e99fc5fSGiuseppe Cavallaro 
110274371272SJose Abreu 		ret = phylink_connect_phy(priv->phylink, phydev);
110374371272SJose Abreu 	}
1104c51e424dSFlorian Fainelli 
11051d8e5b0fSJisheng Zhang 	phylink_ethtool_get_wol(priv->phylink, &wol);
11061d8e5b0fSJisheng Zhang 	device_set_wakeup_capable(priv->device, !!wol.supported);
11071d8e5b0fSJisheng Zhang 
110874371272SJose Abreu 	return ret;
110974371272SJose Abreu }
111074371272SJose Abreu 
111174371272SJose Abreu static int stmmac_phy_setup(struct stmmac_priv *priv)
111274371272SJose Abreu {
1113c63d1e5cSArnd Bergmann 	struct fwnode_handle *fwnode = of_fwnode_handle(priv->plat->phylink_node);
11140060c878SAlexandru Ardelean 	int mode = priv->plat->phy_interface;
111574371272SJose Abreu 	struct phylink *phylink;
111674371272SJose Abreu 
111774371272SJose Abreu 	priv->phylink_config.dev = &priv->dev->dev;
111874371272SJose Abreu 	priv->phylink_config.type = PHYLINK_NETDEV;
1119f213bbe8SJose Abreu 	priv->phylink_config.pcs_poll = true;
1120e5e5b771SOng Boon Leong 	priv->phylink_config.ovr_an_inband =
1121e5e5b771SOng Boon Leong 		priv->plat->mdio_bus_data->xpcs_an_inband;
112274371272SJose Abreu 
11238dc6051cSJose Abreu 	if (!fwnode)
11248dc6051cSJose Abreu 		fwnode = dev_fwnode(priv->device);
11258dc6051cSJose Abreu 
1126c63d1e5cSArnd Bergmann 	phylink = phylink_create(&priv->phylink_config, fwnode,
112774371272SJose Abreu 				 mode, &stmmac_phylink_mac_ops);
112874371272SJose Abreu 	if (IS_ERR(phylink))
112974371272SJose Abreu 		return PTR_ERR(phylink);
113074371272SJose Abreu 
113174371272SJose Abreu 	priv->phylink = phylink;
11327ac6653aSJeff Kirsher 	return 0;
11337ac6653aSJeff Kirsher }
11347ac6653aSJeff Kirsher 
113571fedb01SJoao Pinto static void stmmac_display_rx_rings(struct stmmac_priv *priv)
1136c24602efSGiuseppe CAVALLARO {
113754139cf3SJoao Pinto 	u32 rx_cnt = priv->plat->rx_queues_to_use;
1138bfaf91caSJoakim Zhang 	unsigned int desc_size;
113971fedb01SJoao Pinto 	void *head_rx;
114054139cf3SJoao Pinto 	u32 queue;
114154139cf3SJoao Pinto 
114254139cf3SJoao Pinto 	/* Display RX rings */
114354139cf3SJoao Pinto 	for (queue = 0; queue < rx_cnt; queue++) {
114454139cf3SJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
114554139cf3SJoao Pinto 
114654139cf3SJoao Pinto 		pr_info("\tRX Queue %u rings\n", queue);
1147d0225e7dSAlexandre TORGUE 
1148bfaf91caSJoakim Zhang 		if (priv->extend_desc) {
114954139cf3SJoao Pinto 			head_rx = (void *)rx_q->dma_erx;
1150bfaf91caSJoakim Zhang 			desc_size = sizeof(struct dma_extended_desc);
1151bfaf91caSJoakim Zhang 		} else {
115254139cf3SJoao Pinto 			head_rx = (void *)rx_q->dma_rx;
1153bfaf91caSJoakim Zhang 			desc_size = sizeof(struct dma_desc);
1154bfaf91caSJoakim Zhang 		}
115571fedb01SJoao Pinto 
115671fedb01SJoao Pinto 		/* Display RX ring */
1157bfaf91caSJoakim Zhang 		stmmac_display_ring(priv, head_rx, priv->dma_rx_size, true,
1158bfaf91caSJoakim Zhang 				    rx_q->dma_rx_phy, desc_size);
11595bacd778SLABBE Corentin 	}
116054139cf3SJoao Pinto }
1161d0225e7dSAlexandre TORGUE 
116271fedb01SJoao Pinto static void stmmac_display_tx_rings(struct stmmac_priv *priv)
116371fedb01SJoao Pinto {
1164ce736788SJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
1165bfaf91caSJoakim Zhang 	unsigned int desc_size;
116671fedb01SJoao Pinto 	void *head_tx;
1167ce736788SJoao Pinto 	u32 queue;
1168ce736788SJoao Pinto 
1169ce736788SJoao Pinto 	/* Display TX rings */
1170ce736788SJoao Pinto 	for (queue = 0; queue < tx_cnt; queue++) {
1171ce736788SJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1172ce736788SJoao Pinto 
1173ce736788SJoao Pinto 		pr_info("\tTX Queue %d rings\n", queue);
117471fedb01SJoao Pinto 
1175bfaf91caSJoakim Zhang 		if (priv->extend_desc) {
1176ce736788SJoao Pinto 			head_tx = (void *)tx_q->dma_etx;
1177bfaf91caSJoakim Zhang 			desc_size = sizeof(struct dma_extended_desc);
1178bfaf91caSJoakim Zhang 		} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1179579a25a8SJose Abreu 			head_tx = (void *)tx_q->dma_entx;
1180bfaf91caSJoakim Zhang 			desc_size = sizeof(struct dma_edesc);
1181bfaf91caSJoakim Zhang 		} else {
1182ce736788SJoao Pinto 			head_tx = (void *)tx_q->dma_tx;
1183bfaf91caSJoakim Zhang 			desc_size = sizeof(struct dma_desc);
1184bfaf91caSJoakim Zhang 		}
118571fedb01SJoao Pinto 
1186bfaf91caSJoakim Zhang 		stmmac_display_ring(priv, head_tx, priv->dma_tx_size, false,
1187bfaf91caSJoakim Zhang 				    tx_q->dma_tx_phy, desc_size);
1188c24602efSGiuseppe CAVALLARO 	}
1189ce736788SJoao Pinto }
1190c24602efSGiuseppe CAVALLARO 
119171fedb01SJoao Pinto static void stmmac_display_rings(struct stmmac_priv *priv)
119271fedb01SJoao Pinto {
119371fedb01SJoao Pinto 	/* Display RX ring */
119471fedb01SJoao Pinto 	stmmac_display_rx_rings(priv);
119571fedb01SJoao Pinto 
119671fedb01SJoao Pinto 	/* Display TX ring */
119771fedb01SJoao Pinto 	stmmac_display_tx_rings(priv);
119871fedb01SJoao Pinto }
119971fedb01SJoao Pinto 
1200286a8372SGiuseppe CAVALLARO static int stmmac_set_bfsize(int mtu, int bufsize)
1201286a8372SGiuseppe CAVALLARO {
1202286a8372SGiuseppe CAVALLARO 	int ret = bufsize;
1203286a8372SGiuseppe CAVALLARO 
1204b2f3a481SJose Abreu 	if (mtu >= BUF_SIZE_8KiB)
1205b2f3a481SJose Abreu 		ret = BUF_SIZE_16KiB;
1206b2f3a481SJose Abreu 	else if (mtu >= BUF_SIZE_4KiB)
1207286a8372SGiuseppe CAVALLARO 		ret = BUF_SIZE_8KiB;
1208286a8372SGiuseppe CAVALLARO 	else if (mtu >= BUF_SIZE_2KiB)
1209286a8372SGiuseppe CAVALLARO 		ret = BUF_SIZE_4KiB;
1210d916701cSGiuseppe CAVALLARO 	else if (mtu > DEFAULT_BUFSIZE)
1211286a8372SGiuseppe CAVALLARO 		ret = BUF_SIZE_2KiB;
1212286a8372SGiuseppe CAVALLARO 	else
1213d916701cSGiuseppe CAVALLARO 		ret = DEFAULT_BUFSIZE;
1214286a8372SGiuseppe CAVALLARO 
1215286a8372SGiuseppe CAVALLARO 	return ret;
1216286a8372SGiuseppe CAVALLARO }
1217286a8372SGiuseppe CAVALLARO 
121832ceabcaSGiuseppe CAVALLARO /**
121971fedb01SJoao Pinto  * stmmac_clear_rx_descriptors - clear RX descriptors
122032ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
122154139cf3SJoao Pinto  * @queue: RX queue index
122271fedb01SJoao Pinto  * Description: this function is called to clear the RX descriptors
122332ceabcaSGiuseppe CAVALLARO  * in case of both basic and extended descriptors are used.
122432ceabcaSGiuseppe CAVALLARO  */
122554139cf3SJoao Pinto static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
1226c24602efSGiuseppe CAVALLARO {
122754139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
12285bacd778SLABBE Corentin 	int i;
1229c24602efSGiuseppe CAVALLARO 
123071fedb01SJoao Pinto 	/* Clear the RX descriptors */
1231aa042f60SSong, Yoong Siang 	for (i = 0; i < priv->dma_rx_size; i++)
12325bacd778SLABBE Corentin 		if (priv->extend_desc)
123342de047dSJose Abreu 			stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
12345bacd778SLABBE Corentin 					priv->use_riwt, priv->mode,
1235aa042f60SSong, Yoong Siang 					(i == priv->dma_rx_size - 1),
1236583e6361SAaro Koskinen 					priv->dma_buf_sz);
12375bacd778SLABBE Corentin 		else
123842de047dSJose Abreu 			stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
12395bacd778SLABBE Corentin 					priv->use_riwt, priv->mode,
1240aa042f60SSong, Yoong Siang 					(i == priv->dma_rx_size - 1),
1241583e6361SAaro Koskinen 					priv->dma_buf_sz);
124271fedb01SJoao Pinto }
124371fedb01SJoao Pinto 
124471fedb01SJoao Pinto /**
124571fedb01SJoao Pinto  * stmmac_clear_tx_descriptors - clear tx descriptors
124671fedb01SJoao Pinto  * @priv: driver private structure
1247ce736788SJoao Pinto  * @queue: TX queue index.
124871fedb01SJoao Pinto  * Description: this function is called to clear the TX descriptors
124971fedb01SJoao Pinto  * in case of both basic and extended descriptors are used.
125071fedb01SJoao Pinto  */
1251ce736788SJoao Pinto static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
125271fedb01SJoao Pinto {
1253ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
125471fedb01SJoao Pinto 	int i;
125571fedb01SJoao Pinto 
125671fedb01SJoao Pinto 	/* Clear the TX descriptors */
1257aa042f60SSong, Yoong Siang 	for (i = 0; i < priv->dma_tx_size; i++) {
1258aa042f60SSong, Yoong Siang 		int last = (i == (priv->dma_tx_size - 1));
1259579a25a8SJose Abreu 		struct dma_desc *p;
1260579a25a8SJose Abreu 
12615bacd778SLABBE Corentin 		if (priv->extend_desc)
1262579a25a8SJose Abreu 			p = &tx_q->dma_etx[i].basic;
1263579a25a8SJose Abreu 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1264579a25a8SJose Abreu 			p = &tx_q->dma_entx[i].basic;
12655bacd778SLABBE Corentin 		else
1266579a25a8SJose Abreu 			p = &tx_q->dma_tx[i];
1267579a25a8SJose Abreu 
1268579a25a8SJose Abreu 		stmmac_init_tx_desc(priv, p, priv->mode, last);
1269579a25a8SJose Abreu 	}
1270c24602efSGiuseppe CAVALLARO }
1271c24602efSGiuseppe CAVALLARO 
1272732fdf0eSGiuseppe CAVALLARO /**
127371fedb01SJoao Pinto  * stmmac_clear_descriptors - clear descriptors
127471fedb01SJoao Pinto  * @priv: driver private structure
127571fedb01SJoao Pinto  * Description: this function is called to clear the TX and RX descriptors
127671fedb01SJoao Pinto  * in case of both basic and extended descriptors are used.
127771fedb01SJoao Pinto  */
127871fedb01SJoao Pinto static void stmmac_clear_descriptors(struct stmmac_priv *priv)
127971fedb01SJoao Pinto {
128054139cf3SJoao Pinto 	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1281ce736788SJoao Pinto 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
128254139cf3SJoao Pinto 	u32 queue;
128354139cf3SJoao Pinto 
128471fedb01SJoao Pinto 	/* Clear the RX descriptors */
128554139cf3SJoao Pinto 	for (queue = 0; queue < rx_queue_cnt; queue++)
128654139cf3SJoao Pinto 		stmmac_clear_rx_descriptors(priv, queue);
128771fedb01SJoao Pinto 
128871fedb01SJoao Pinto 	/* Clear the TX descriptors */
1289ce736788SJoao Pinto 	for (queue = 0; queue < tx_queue_cnt; queue++)
1290ce736788SJoao Pinto 		stmmac_clear_tx_descriptors(priv, queue);
129171fedb01SJoao Pinto }
129271fedb01SJoao Pinto 
129371fedb01SJoao Pinto /**
1294732fdf0eSGiuseppe CAVALLARO  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1295732fdf0eSGiuseppe CAVALLARO  * @priv: driver private structure
1296732fdf0eSGiuseppe CAVALLARO  * @p: descriptor pointer
1297732fdf0eSGiuseppe CAVALLARO  * @i: descriptor index
129854139cf3SJoao Pinto  * @flags: gfp flag
129954139cf3SJoao Pinto  * @queue: RX queue index
1300732fdf0eSGiuseppe CAVALLARO  * Description: this function is called to allocate a receive buffer, perform
1301732fdf0eSGiuseppe CAVALLARO  * the DMA mapping and init the descriptor.
1302732fdf0eSGiuseppe CAVALLARO  */
1303c24602efSGiuseppe CAVALLARO static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
130454139cf3SJoao Pinto 				  int i, gfp_t flags, u32 queue)
1305c24602efSGiuseppe CAVALLARO {
130654139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
13072af6106aSJose Abreu 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1308c24602efSGiuseppe CAVALLARO 
13092af6106aSJose Abreu 	buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
13102af6106aSJose Abreu 	if (!buf->page)
131156329137SBartlomiej Zolnierkiewicz 		return -ENOMEM;
1312c24602efSGiuseppe CAVALLARO 
131367afd6d1SJose Abreu 	if (priv->sph) {
131467afd6d1SJose Abreu 		buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
131567afd6d1SJose Abreu 		if (!buf->sec_page)
131667afd6d1SJose Abreu 			return -ENOMEM;
131767afd6d1SJose Abreu 
131867afd6d1SJose Abreu 		buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1319396e13e1SJoakim Zhang 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
132067afd6d1SJose Abreu 	} else {
132167afd6d1SJose Abreu 		buf->sec_page = NULL;
1322396e13e1SJoakim Zhang 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
132367afd6d1SJose Abreu 	}
132467afd6d1SJose Abreu 
13252af6106aSJose Abreu 	buf->addr = page_pool_get_dma_addr(buf->page);
13262af6106aSJose Abreu 	stmmac_set_desc_addr(priv, p, buf->addr);
13272c520b1cSJose Abreu 	if (priv->dma_buf_sz == BUF_SIZE_16KiB)
13282c520b1cSJose Abreu 		stmmac_init_desc3(priv, p);
1329c24602efSGiuseppe CAVALLARO 
1330c24602efSGiuseppe CAVALLARO 	return 0;
1331c24602efSGiuseppe CAVALLARO }
1332c24602efSGiuseppe CAVALLARO 
133371fedb01SJoao Pinto /**
133471fedb01SJoao Pinto  * stmmac_free_rx_buffer - free RX dma buffers
133571fedb01SJoao Pinto  * @priv: private structure
133654139cf3SJoao Pinto  * @queue: RX queue index
133771fedb01SJoao Pinto  * @i: buffer index.
133871fedb01SJoao Pinto  */
133954139cf3SJoao Pinto static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
134056329137SBartlomiej Zolnierkiewicz {
134154139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
13422af6106aSJose Abreu 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
134354139cf3SJoao Pinto 
13442af6106aSJose Abreu 	if (buf->page)
1345458de8a9SIlias Apalodimas 		page_pool_put_full_page(rx_q->page_pool, buf->page, false);
13462af6106aSJose Abreu 	buf->page = NULL;
134767afd6d1SJose Abreu 
134867afd6d1SJose Abreu 	if (buf->sec_page)
1349458de8a9SIlias Apalodimas 		page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
135067afd6d1SJose Abreu 	buf->sec_page = NULL;
135156329137SBartlomiej Zolnierkiewicz }
135256329137SBartlomiej Zolnierkiewicz 
13537ac6653aSJeff Kirsher /**
135471fedb01SJoao Pinto  * stmmac_free_tx_buffer - free RX dma buffers
135571fedb01SJoao Pinto  * @priv: private structure
1356ce736788SJoao Pinto  * @queue: RX queue index
135771fedb01SJoao Pinto  * @i: buffer index.
135871fedb01SJoao Pinto  */
1359ce736788SJoao Pinto static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
136071fedb01SJoao Pinto {
1361ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1362ce736788SJoao Pinto 
1363ce736788SJoao Pinto 	if (tx_q->tx_skbuff_dma[i].buf) {
1364ce736788SJoao Pinto 		if (tx_q->tx_skbuff_dma[i].map_as_page)
136571fedb01SJoao Pinto 			dma_unmap_page(priv->device,
1366ce736788SJoao Pinto 				       tx_q->tx_skbuff_dma[i].buf,
1367ce736788SJoao Pinto 				       tx_q->tx_skbuff_dma[i].len,
136871fedb01SJoao Pinto 				       DMA_TO_DEVICE);
136971fedb01SJoao Pinto 		else
137071fedb01SJoao Pinto 			dma_unmap_single(priv->device,
1371ce736788SJoao Pinto 					 tx_q->tx_skbuff_dma[i].buf,
1372ce736788SJoao Pinto 					 tx_q->tx_skbuff_dma[i].len,
137371fedb01SJoao Pinto 					 DMA_TO_DEVICE);
137471fedb01SJoao Pinto 	}
137571fedb01SJoao Pinto 
1376ce736788SJoao Pinto 	if (tx_q->tx_skbuff[i]) {
1377ce736788SJoao Pinto 		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1378ce736788SJoao Pinto 		tx_q->tx_skbuff[i] = NULL;
1379ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[i].buf = 0;
1380ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[i].map_as_page = false;
138171fedb01SJoao Pinto 	}
138271fedb01SJoao Pinto }
138371fedb01SJoao Pinto 
138471fedb01SJoao Pinto /**
13859c63faaaSJoakim Zhang  * stmmac_reinit_rx_buffers - reinit the RX descriptor buffer.
13869c63faaaSJoakim Zhang  * @priv: driver private structure
13879c63faaaSJoakim Zhang  * Description: this function is called to re-allocate a receive buffer, perform
13889c63faaaSJoakim Zhang  * the DMA mapping and init the descriptor.
13899c63faaaSJoakim Zhang  */
13909c63faaaSJoakim Zhang static void stmmac_reinit_rx_buffers(struct stmmac_priv *priv)
13919c63faaaSJoakim Zhang {
13929c63faaaSJoakim Zhang 	u32 rx_count = priv->plat->rx_queues_to_use;
13939c63faaaSJoakim Zhang 	u32 queue;
13949c63faaaSJoakim Zhang 	int i;
13959c63faaaSJoakim Zhang 
13969c63faaaSJoakim Zhang 	for (queue = 0; queue < rx_count; queue++) {
13979c63faaaSJoakim Zhang 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
13989c63faaaSJoakim Zhang 
13999c63faaaSJoakim Zhang 		for (i = 0; i < priv->dma_rx_size; i++) {
14009c63faaaSJoakim Zhang 			struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
14019c63faaaSJoakim Zhang 
14029c63faaaSJoakim Zhang 			if (buf->page) {
14039c63faaaSJoakim Zhang 				page_pool_recycle_direct(rx_q->page_pool, buf->page);
14049c63faaaSJoakim Zhang 				buf->page = NULL;
14059c63faaaSJoakim Zhang 			}
14069c63faaaSJoakim Zhang 
14079c63faaaSJoakim Zhang 			if (priv->sph && buf->sec_page) {
14089c63faaaSJoakim Zhang 				page_pool_recycle_direct(rx_q->page_pool, buf->sec_page);
14099c63faaaSJoakim Zhang 				buf->sec_page = NULL;
14109c63faaaSJoakim Zhang 			}
14119c63faaaSJoakim Zhang 		}
14129c63faaaSJoakim Zhang 	}
14139c63faaaSJoakim Zhang 
14149c63faaaSJoakim Zhang 	for (queue = 0; queue < rx_count; queue++) {
14159c63faaaSJoakim Zhang 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
14169c63faaaSJoakim Zhang 
14179c63faaaSJoakim Zhang 		for (i = 0; i < priv->dma_rx_size; i++) {
14189c63faaaSJoakim Zhang 			struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
14199c63faaaSJoakim Zhang 			struct dma_desc *p;
14209c63faaaSJoakim Zhang 
14219c63faaaSJoakim Zhang 			if (priv->extend_desc)
14229c63faaaSJoakim Zhang 				p = &((rx_q->dma_erx + i)->basic);
14239c63faaaSJoakim Zhang 			else
14249c63faaaSJoakim Zhang 				p = rx_q->dma_rx + i;
14259c63faaaSJoakim Zhang 
14269c63faaaSJoakim Zhang 			if (!buf->page) {
14279c63faaaSJoakim Zhang 				buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
14289c63faaaSJoakim Zhang 				if (!buf->page)
14299c63faaaSJoakim Zhang 					goto err_reinit_rx_buffers;
14309c63faaaSJoakim Zhang 
14319c63faaaSJoakim Zhang 				buf->addr = page_pool_get_dma_addr(buf->page);
14329c63faaaSJoakim Zhang 			}
14339c63faaaSJoakim Zhang 
14349c63faaaSJoakim Zhang 			if (priv->sph && !buf->sec_page) {
14359c63faaaSJoakim Zhang 				buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
14369c63faaaSJoakim Zhang 				if (!buf->sec_page)
14379c63faaaSJoakim Zhang 					goto err_reinit_rx_buffers;
14389c63faaaSJoakim Zhang 
14399c63faaaSJoakim Zhang 				buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
14409c63faaaSJoakim Zhang 			}
14419c63faaaSJoakim Zhang 
14429c63faaaSJoakim Zhang 			stmmac_set_desc_addr(priv, p, buf->addr);
14439c63faaaSJoakim Zhang 			if (priv->sph)
14449c63faaaSJoakim Zhang 				stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
14459c63faaaSJoakim Zhang 			else
14469c63faaaSJoakim Zhang 				stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
14479c63faaaSJoakim Zhang 			if (priv->dma_buf_sz == BUF_SIZE_16KiB)
14489c63faaaSJoakim Zhang 				stmmac_init_desc3(priv, p);
14499c63faaaSJoakim Zhang 		}
14509c63faaaSJoakim Zhang 	}
14519c63faaaSJoakim Zhang 
14529c63faaaSJoakim Zhang 	return;
14539c63faaaSJoakim Zhang 
14549c63faaaSJoakim Zhang err_reinit_rx_buffers:
14559c63faaaSJoakim Zhang 	do {
14569c63faaaSJoakim Zhang 		while (--i >= 0)
14579c63faaaSJoakim Zhang 			stmmac_free_rx_buffer(priv, queue, i);
14589c63faaaSJoakim Zhang 
14599c63faaaSJoakim Zhang 		if (queue == 0)
14609c63faaaSJoakim Zhang 			break;
14619c63faaaSJoakim Zhang 
14629c63faaaSJoakim Zhang 		i = priv->dma_rx_size;
14639c63faaaSJoakim Zhang 	} while (queue-- > 0);
14649c63faaaSJoakim Zhang }
14659c63faaaSJoakim Zhang 
14669c63faaaSJoakim Zhang /**
146771fedb01SJoao Pinto  * init_dma_rx_desc_rings - init the RX descriptor rings
14687ac6653aSJeff Kirsher  * @dev: net device structure
14695bacd778SLABBE Corentin  * @flags: gfp flag.
147071fedb01SJoao Pinto  * Description: this function initializes the DMA RX descriptors
14715bacd778SLABBE Corentin  * and allocates the socket buffers. It supports the chained and ring
1472286a8372SGiuseppe CAVALLARO  * modes.
14737ac6653aSJeff Kirsher  */
147471fedb01SJoao Pinto static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
14757ac6653aSJeff Kirsher {
14767ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
147754139cf3SJoao Pinto 	u32 rx_count = priv->plat->rx_queues_to_use;
14785bacd778SLABBE Corentin 	int ret = -ENOMEM;
14791d3028f4SColin Ian King 	int queue;
148054139cf3SJoao Pinto 	int i;
14817ac6653aSJeff Kirsher 
148254139cf3SJoao Pinto 	/* RX INITIALIZATION */
14835bacd778SLABBE Corentin 	netif_dbg(priv, probe, priv->dev,
14845bacd778SLABBE Corentin 		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
14855bacd778SLABBE Corentin 
148654139cf3SJoao Pinto 	for (queue = 0; queue < rx_count; queue++) {
148754139cf3SJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
148854139cf3SJoao Pinto 
148954139cf3SJoao Pinto 		netif_dbg(priv, probe, priv->dev,
149054139cf3SJoao Pinto 			  "(%s) dma_rx_phy=0x%08x\n", __func__,
149154139cf3SJoao Pinto 			  (u32)rx_q->dma_rx_phy);
149254139cf3SJoao Pinto 
1493cbcf0999SJose Abreu 		stmmac_clear_rx_descriptors(priv, queue);
1494cbcf0999SJose Abreu 
1495aa042f60SSong, Yoong Siang 		for (i = 0; i < priv->dma_rx_size; i++) {
14965bacd778SLABBE Corentin 			struct dma_desc *p;
14975bacd778SLABBE Corentin 
149854139cf3SJoao Pinto 			if (priv->extend_desc)
149954139cf3SJoao Pinto 				p = &((rx_q->dma_erx + i)->basic);
150054139cf3SJoao Pinto 			else
150154139cf3SJoao Pinto 				p = rx_q->dma_rx + i;
150254139cf3SJoao Pinto 
150354139cf3SJoao Pinto 			ret = stmmac_init_rx_buffers(priv, p, i, flags,
150454139cf3SJoao Pinto 						     queue);
15055bacd778SLABBE Corentin 			if (ret)
15065bacd778SLABBE Corentin 				goto err_init_rx_buffers;
15075bacd778SLABBE Corentin 		}
150854139cf3SJoao Pinto 
150954139cf3SJoao Pinto 		rx_q->cur_rx = 0;
1510aa042f60SSong, Yoong Siang 		rx_q->dirty_rx = (unsigned int)(i - priv->dma_rx_size);
151154139cf3SJoao Pinto 
1512c24602efSGiuseppe CAVALLARO 		/* Setup the chained descriptor addresses */
1513c24602efSGiuseppe CAVALLARO 		if (priv->mode == STMMAC_CHAIN_MODE) {
151471fedb01SJoao Pinto 			if (priv->extend_desc)
15152c520b1cSJose Abreu 				stmmac_mode_init(priv, rx_q->dma_erx,
1516aa042f60SSong, Yoong Siang 						 rx_q->dma_rx_phy,
1517aa042f60SSong, Yoong Siang 						 priv->dma_rx_size, 1);
151871fedb01SJoao Pinto 			else
15192c520b1cSJose Abreu 				stmmac_mode_init(priv, rx_q->dma_rx,
1520aa042f60SSong, Yoong Siang 						 rx_q->dma_rx_phy,
1521aa042f60SSong, Yoong Siang 						 priv->dma_rx_size, 0);
152271fedb01SJoao Pinto 		}
152354139cf3SJoao Pinto 	}
152454139cf3SJoao Pinto 
152571fedb01SJoao Pinto 	return 0;
152654139cf3SJoao Pinto 
152771fedb01SJoao Pinto err_init_rx_buffers:
152854139cf3SJoao Pinto 	while (queue >= 0) {
152971fedb01SJoao Pinto 		while (--i >= 0)
153054139cf3SJoao Pinto 			stmmac_free_rx_buffer(priv, queue, i);
153154139cf3SJoao Pinto 
153254139cf3SJoao Pinto 		if (queue == 0)
153354139cf3SJoao Pinto 			break;
153454139cf3SJoao Pinto 
1535aa042f60SSong, Yoong Siang 		i = priv->dma_rx_size;
153654139cf3SJoao Pinto 		queue--;
153754139cf3SJoao Pinto 	}
153854139cf3SJoao Pinto 
153971fedb01SJoao Pinto 	return ret;
154071fedb01SJoao Pinto }
154171fedb01SJoao Pinto 
154271fedb01SJoao Pinto /**
154371fedb01SJoao Pinto  * init_dma_tx_desc_rings - init the TX descriptor rings
154471fedb01SJoao Pinto  * @dev: net device structure.
154571fedb01SJoao Pinto  * Description: this function initializes the DMA TX descriptors
154671fedb01SJoao Pinto  * and allocates the socket buffers. It supports the chained and ring
154771fedb01SJoao Pinto  * modes.
154871fedb01SJoao Pinto  */
154971fedb01SJoao Pinto static int init_dma_tx_desc_rings(struct net_device *dev)
155071fedb01SJoao Pinto {
155171fedb01SJoao Pinto 	struct stmmac_priv *priv = netdev_priv(dev);
1552ce736788SJoao Pinto 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1553ce736788SJoao Pinto 	u32 queue;
155471fedb01SJoao Pinto 	int i;
155571fedb01SJoao Pinto 
1556ce736788SJoao Pinto 	for (queue = 0; queue < tx_queue_cnt; queue++) {
1557ce736788SJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1558ce736788SJoao Pinto 
155971fedb01SJoao Pinto 		netif_dbg(priv, probe, priv->dev,
1560ce736788SJoao Pinto 			  "(%s) dma_tx_phy=0x%08x\n", __func__,
1561ce736788SJoao Pinto 			 (u32)tx_q->dma_tx_phy);
156271fedb01SJoao Pinto 
156371fedb01SJoao Pinto 		/* Setup the chained descriptor addresses */
156471fedb01SJoao Pinto 		if (priv->mode == STMMAC_CHAIN_MODE) {
156571fedb01SJoao Pinto 			if (priv->extend_desc)
15662c520b1cSJose Abreu 				stmmac_mode_init(priv, tx_q->dma_etx,
1567aa042f60SSong, Yoong Siang 						 tx_q->dma_tx_phy,
1568aa042f60SSong, Yoong Siang 						 priv->dma_tx_size, 1);
1569579a25a8SJose Abreu 			else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
15702c520b1cSJose Abreu 				stmmac_mode_init(priv, tx_q->dma_tx,
1571aa042f60SSong, Yoong Siang 						 tx_q->dma_tx_phy,
1572aa042f60SSong, Yoong Siang 						 priv->dma_tx_size, 0);
1573c24602efSGiuseppe CAVALLARO 		}
1574286a8372SGiuseppe CAVALLARO 
1575aa042f60SSong, Yoong Siang 		for (i = 0; i < priv->dma_tx_size; i++) {
1576c24602efSGiuseppe CAVALLARO 			struct dma_desc *p;
1577c24602efSGiuseppe CAVALLARO 			if (priv->extend_desc)
1578ce736788SJoao Pinto 				p = &((tx_q->dma_etx + i)->basic);
1579579a25a8SJose Abreu 			else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1580579a25a8SJose Abreu 				p = &((tx_q->dma_entx + i)->basic);
1581c24602efSGiuseppe CAVALLARO 			else
1582ce736788SJoao Pinto 				p = tx_q->dma_tx + i;
1583f748be53SAlexandre TORGUE 
158444c67f85SJose Abreu 			stmmac_clear_desc(priv, p);
1585f748be53SAlexandre TORGUE 
1586ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[i].buf = 0;
1587ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[i].map_as_page = false;
1588ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[i].len = 0;
1589ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[i].last_segment = false;
1590ce736788SJoao Pinto 			tx_q->tx_skbuff[i] = NULL;
15914a7d666aSGiuseppe CAVALLARO 		}
1592c24602efSGiuseppe CAVALLARO 
1593ce736788SJoao Pinto 		tx_q->dirty_tx = 0;
1594ce736788SJoao Pinto 		tx_q->cur_tx = 0;
15958d212a9eSNiklas Cassel 		tx_q->mss = 0;
1596ce736788SJoao Pinto 
1597c22a3f48SJoao Pinto 		netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
1598c22a3f48SJoao Pinto 	}
15997ac6653aSJeff Kirsher 
160071fedb01SJoao Pinto 	return 0;
160171fedb01SJoao Pinto }
160271fedb01SJoao Pinto 
160371fedb01SJoao Pinto /**
160471fedb01SJoao Pinto  * init_dma_desc_rings - init the RX/TX descriptor rings
160571fedb01SJoao Pinto  * @dev: net device structure
160671fedb01SJoao Pinto  * @flags: gfp flag.
160771fedb01SJoao Pinto  * Description: this function initializes the DMA RX/TX descriptors
160871fedb01SJoao Pinto  * and allocates the socket buffers. It supports the chained and ring
160971fedb01SJoao Pinto  * modes.
161071fedb01SJoao Pinto  */
161171fedb01SJoao Pinto static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
161271fedb01SJoao Pinto {
161371fedb01SJoao Pinto 	struct stmmac_priv *priv = netdev_priv(dev);
161471fedb01SJoao Pinto 	int ret;
161571fedb01SJoao Pinto 
161671fedb01SJoao Pinto 	ret = init_dma_rx_desc_rings(dev, flags);
161771fedb01SJoao Pinto 	if (ret)
161871fedb01SJoao Pinto 		return ret;
161971fedb01SJoao Pinto 
162071fedb01SJoao Pinto 	ret = init_dma_tx_desc_rings(dev);
162171fedb01SJoao Pinto 
16225bacd778SLABBE Corentin 	stmmac_clear_descriptors(priv);
16237ac6653aSJeff Kirsher 
1624c24602efSGiuseppe CAVALLARO 	if (netif_msg_hw(priv))
1625c24602efSGiuseppe CAVALLARO 		stmmac_display_rings(priv);
162656329137SBartlomiej Zolnierkiewicz 
162756329137SBartlomiej Zolnierkiewicz 	return ret;
16287ac6653aSJeff Kirsher }
16297ac6653aSJeff Kirsher 
163071fedb01SJoao Pinto /**
163171fedb01SJoao Pinto  * dma_free_rx_skbufs - free RX dma buffers
163271fedb01SJoao Pinto  * @priv: private structure
163354139cf3SJoao Pinto  * @queue: RX queue index
163471fedb01SJoao Pinto  */
163554139cf3SJoao Pinto static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
16367ac6653aSJeff Kirsher {
16377ac6653aSJeff Kirsher 	int i;
16387ac6653aSJeff Kirsher 
1639aa042f60SSong, Yoong Siang 	for (i = 0; i < priv->dma_rx_size; i++)
164054139cf3SJoao Pinto 		stmmac_free_rx_buffer(priv, queue, i);
16417ac6653aSJeff Kirsher }
16427ac6653aSJeff Kirsher 
164371fedb01SJoao Pinto /**
164471fedb01SJoao Pinto  * dma_free_tx_skbufs - free TX dma buffers
164571fedb01SJoao Pinto  * @priv: private structure
1646ce736788SJoao Pinto  * @queue: TX queue index
164771fedb01SJoao Pinto  */
1648ce736788SJoao Pinto static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
16497ac6653aSJeff Kirsher {
16507ac6653aSJeff Kirsher 	int i;
16517ac6653aSJeff Kirsher 
1652aa042f60SSong, Yoong Siang 	for (i = 0; i < priv->dma_tx_size; i++)
1653ce736788SJoao Pinto 		stmmac_free_tx_buffer(priv, queue, i);
16547ac6653aSJeff Kirsher }
16557ac6653aSJeff Kirsher 
1656732fdf0eSGiuseppe CAVALLARO /**
16574ec236c7SFugang Duan  * stmmac_free_tx_skbufs - free TX skb buffers
16584ec236c7SFugang Duan  * @priv: private structure
16594ec236c7SFugang Duan  */
16604ec236c7SFugang Duan static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
16614ec236c7SFugang Duan {
16624ec236c7SFugang Duan 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
16634ec236c7SFugang Duan 	u32 queue;
16644ec236c7SFugang Duan 
16654ec236c7SFugang Duan 	for (queue = 0; queue < tx_queue_cnt; queue++)
16664ec236c7SFugang Duan 		dma_free_tx_skbufs(priv, queue);
16674ec236c7SFugang Duan }
16684ec236c7SFugang Duan 
16694ec236c7SFugang Duan /**
167054139cf3SJoao Pinto  * free_dma_rx_desc_resources - free RX dma desc resources
167154139cf3SJoao Pinto  * @priv: private structure
167254139cf3SJoao Pinto  */
167354139cf3SJoao Pinto static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
167454139cf3SJoao Pinto {
167554139cf3SJoao Pinto 	u32 rx_count = priv->plat->rx_queues_to_use;
167654139cf3SJoao Pinto 	u32 queue;
167754139cf3SJoao Pinto 
167854139cf3SJoao Pinto 	/* Free RX queue resources */
167954139cf3SJoao Pinto 	for (queue = 0; queue < rx_count; queue++) {
168054139cf3SJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
168154139cf3SJoao Pinto 
168254139cf3SJoao Pinto 		/* Release the DMA RX socket buffers */
168354139cf3SJoao Pinto 		dma_free_rx_skbufs(priv, queue);
168454139cf3SJoao Pinto 
168554139cf3SJoao Pinto 		/* Free DMA regions of consistent memory previously allocated */
168654139cf3SJoao Pinto 		if (!priv->extend_desc)
1687aa042f60SSong, Yoong Siang 			dma_free_coherent(priv->device, priv->dma_rx_size *
1688aa042f60SSong, Yoong Siang 					  sizeof(struct dma_desc),
168954139cf3SJoao Pinto 					  rx_q->dma_rx, rx_q->dma_rx_phy);
169054139cf3SJoao Pinto 		else
1691aa042f60SSong, Yoong Siang 			dma_free_coherent(priv->device, priv->dma_rx_size *
169254139cf3SJoao Pinto 					  sizeof(struct dma_extended_desc),
169354139cf3SJoao Pinto 					  rx_q->dma_erx, rx_q->dma_rx_phy);
169454139cf3SJoao Pinto 
16952af6106aSJose Abreu 		kfree(rx_q->buf_pool);
1696c3f812ceSJonathan Lemon 		if (rx_q->page_pool)
16972af6106aSJose Abreu 			page_pool_destroy(rx_q->page_pool);
16982af6106aSJose Abreu 	}
169954139cf3SJoao Pinto }
170054139cf3SJoao Pinto 
170154139cf3SJoao Pinto /**
1702ce736788SJoao Pinto  * free_dma_tx_desc_resources - free TX dma desc resources
1703ce736788SJoao Pinto  * @priv: private structure
1704ce736788SJoao Pinto  */
1705ce736788SJoao Pinto static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
1706ce736788SJoao Pinto {
1707ce736788SJoao Pinto 	u32 tx_count = priv->plat->tx_queues_to_use;
170862242260SChristophe Jaillet 	u32 queue;
1709ce736788SJoao Pinto 
1710ce736788SJoao Pinto 	/* Free TX queue resources */
1711ce736788SJoao Pinto 	for (queue = 0; queue < tx_count; queue++) {
1712ce736788SJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1713579a25a8SJose Abreu 		size_t size;
1714579a25a8SJose Abreu 		void *addr;
1715ce736788SJoao Pinto 
1716ce736788SJoao Pinto 		/* Release the DMA TX socket buffers */
1717ce736788SJoao Pinto 		dma_free_tx_skbufs(priv, queue);
1718ce736788SJoao Pinto 
1719579a25a8SJose Abreu 		if (priv->extend_desc) {
1720579a25a8SJose Abreu 			size = sizeof(struct dma_extended_desc);
1721579a25a8SJose Abreu 			addr = tx_q->dma_etx;
1722579a25a8SJose Abreu 		} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1723579a25a8SJose Abreu 			size = sizeof(struct dma_edesc);
1724579a25a8SJose Abreu 			addr = tx_q->dma_entx;
1725579a25a8SJose Abreu 		} else {
1726579a25a8SJose Abreu 			size = sizeof(struct dma_desc);
1727579a25a8SJose Abreu 			addr = tx_q->dma_tx;
1728579a25a8SJose Abreu 		}
1729579a25a8SJose Abreu 
1730aa042f60SSong, Yoong Siang 		size *= priv->dma_tx_size;
1731579a25a8SJose Abreu 
1732579a25a8SJose Abreu 		dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
1733ce736788SJoao Pinto 
1734ce736788SJoao Pinto 		kfree(tx_q->tx_skbuff_dma);
1735ce736788SJoao Pinto 		kfree(tx_q->tx_skbuff);
1736ce736788SJoao Pinto 	}
1737ce736788SJoao Pinto }
1738ce736788SJoao Pinto 
1739ce736788SJoao Pinto /**
174071fedb01SJoao Pinto  * alloc_dma_rx_desc_resources - alloc RX resources.
1741732fdf0eSGiuseppe CAVALLARO  * @priv: private structure
1742732fdf0eSGiuseppe CAVALLARO  * Description: according to which descriptor can be used (extend or basic)
1743732fdf0eSGiuseppe CAVALLARO  * this function allocates the resources for TX and RX paths. In case of
1744732fdf0eSGiuseppe CAVALLARO  * reception, for example, it pre-allocated the RX socket buffer in order to
1745732fdf0eSGiuseppe CAVALLARO  * allow zero-copy mechanism.
1746732fdf0eSGiuseppe CAVALLARO  */
174771fedb01SJoao Pinto static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
174809f8d696SSrinivas Kandagatla {
174954139cf3SJoao Pinto 	u32 rx_count = priv->plat->rx_queues_to_use;
17505bacd778SLABBE Corentin 	int ret = -ENOMEM;
175154139cf3SJoao Pinto 	u32 queue;
175209f8d696SSrinivas Kandagatla 
175354139cf3SJoao Pinto 	/* RX queues buffers and DMA */
175454139cf3SJoao Pinto 	for (queue = 0; queue < rx_count; queue++) {
175554139cf3SJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
17562af6106aSJose Abreu 		struct page_pool_params pp_params = { 0 };
17574f28bd95SThierry Reding 		unsigned int num_pages;
175854139cf3SJoao Pinto 
175954139cf3SJoao Pinto 		rx_q->queue_index = queue;
176054139cf3SJoao Pinto 		rx_q->priv_data = priv;
176154139cf3SJoao Pinto 
17622af6106aSJose Abreu 		pp_params.flags = PP_FLAG_DMA_MAP;
1763aa042f60SSong, Yoong Siang 		pp_params.pool_size = priv->dma_rx_size;
17644f28bd95SThierry Reding 		num_pages = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE);
17654f28bd95SThierry Reding 		pp_params.order = ilog2(num_pages);
17662af6106aSJose Abreu 		pp_params.nid = dev_to_node(priv->device);
17672af6106aSJose Abreu 		pp_params.dev = priv->device;
17682af6106aSJose Abreu 		pp_params.dma_dir = DMA_FROM_DEVICE;
17695bacd778SLABBE Corentin 
17702af6106aSJose Abreu 		rx_q->page_pool = page_pool_create(&pp_params);
17712af6106aSJose Abreu 		if (IS_ERR(rx_q->page_pool)) {
17722af6106aSJose Abreu 			ret = PTR_ERR(rx_q->page_pool);
17732af6106aSJose Abreu 			rx_q->page_pool = NULL;
17742af6106aSJose Abreu 			goto err_dma;
17752af6106aSJose Abreu 		}
17762af6106aSJose Abreu 
1777aa042f60SSong, Yoong Siang 		rx_q->buf_pool = kcalloc(priv->dma_rx_size,
1778aa042f60SSong, Yoong Siang 					 sizeof(*rx_q->buf_pool),
17795bacd778SLABBE Corentin 					 GFP_KERNEL);
17802af6106aSJose Abreu 		if (!rx_q->buf_pool)
178154139cf3SJoao Pinto 			goto err_dma;
17825bacd778SLABBE Corentin 
17835bacd778SLABBE Corentin 		if (priv->extend_desc) {
1784750afb08SLuis Chamberlain 			rx_q->dma_erx = dma_alloc_coherent(priv->device,
1785aa042f60SSong, Yoong Siang 							   priv->dma_rx_size *
1786aa042f60SSong, Yoong Siang 							   sizeof(struct dma_extended_desc),
178754139cf3SJoao Pinto 							   &rx_q->dma_rx_phy,
17885bacd778SLABBE Corentin 							   GFP_KERNEL);
178954139cf3SJoao Pinto 			if (!rx_q->dma_erx)
17905bacd778SLABBE Corentin 				goto err_dma;
17915bacd778SLABBE Corentin 
179271fedb01SJoao Pinto 		} else {
1793750afb08SLuis Chamberlain 			rx_q->dma_rx = dma_alloc_coherent(priv->device,
1794aa042f60SSong, Yoong Siang 							  priv->dma_rx_size *
1795aa042f60SSong, Yoong Siang 							  sizeof(struct dma_desc),
179654139cf3SJoao Pinto 							  &rx_q->dma_rx_phy,
179771fedb01SJoao Pinto 							  GFP_KERNEL);
179854139cf3SJoao Pinto 			if (!rx_q->dma_rx)
179971fedb01SJoao Pinto 				goto err_dma;
180071fedb01SJoao Pinto 		}
180154139cf3SJoao Pinto 	}
180271fedb01SJoao Pinto 
180371fedb01SJoao Pinto 	return 0;
180471fedb01SJoao Pinto 
180571fedb01SJoao Pinto err_dma:
180654139cf3SJoao Pinto 	free_dma_rx_desc_resources(priv);
180754139cf3SJoao Pinto 
180871fedb01SJoao Pinto 	return ret;
180971fedb01SJoao Pinto }
181071fedb01SJoao Pinto 
181171fedb01SJoao Pinto /**
181271fedb01SJoao Pinto  * alloc_dma_tx_desc_resources - alloc TX resources.
181371fedb01SJoao Pinto  * @priv: private structure
181471fedb01SJoao Pinto  * Description: according to which descriptor can be used (extend or basic)
181571fedb01SJoao Pinto  * this function allocates the resources for TX and RX paths. In case of
181671fedb01SJoao Pinto  * reception, for example, it pre-allocated the RX socket buffer in order to
181771fedb01SJoao Pinto  * allow zero-copy mechanism.
181871fedb01SJoao Pinto  */
181971fedb01SJoao Pinto static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
182071fedb01SJoao Pinto {
1821ce736788SJoao Pinto 	u32 tx_count = priv->plat->tx_queues_to_use;
182271fedb01SJoao Pinto 	int ret = -ENOMEM;
1823ce736788SJoao Pinto 	u32 queue;
182471fedb01SJoao Pinto 
1825ce736788SJoao Pinto 	/* TX queues buffers and DMA */
1826ce736788SJoao Pinto 	for (queue = 0; queue < tx_count; queue++) {
1827ce736788SJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1828579a25a8SJose Abreu 		size_t size;
1829579a25a8SJose Abreu 		void *addr;
1830ce736788SJoao Pinto 
1831ce736788SJoao Pinto 		tx_q->queue_index = queue;
1832ce736788SJoao Pinto 		tx_q->priv_data = priv;
1833ce736788SJoao Pinto 
1834aa042f60SSong, Yoong Siang 		tx_q->tx_skbuff_dma = kcalloc(priv->dma_tx_size,
1835ce736788SJoao Pinto 					      sizeof(*tx_q->tx_skbuff_dma),
183671fedb01SJoao Pinto 					      GFP_KERNEL);
1837ce736788SJoao Pinto 		if (!tx_q->tx_skbuff_dma)
183862242260SChristophe Jaillet 			goto err_dma;
183971fedb01SJoao Pinto 
1840aa042f60SSong, Yoong Siang 		tx_q->tx_skbuff = kcalloc(priv->dma_tx_size,
1841ce736788SJoao Pinto 					  sizeof(struct sk_buff *),
184271fedb01SJoao Pinto 					  GFP_KERNEL);
1843ce736788SJoao Pinto 		if (!tx_q->tx_skbuff)
184462242260SChristophe Jaillet 			goto err_dma;
184571fedb01SJoao Pinto 
1846579a25a8SJose Abreu 		if (priv->extend_desc)
1847579a25a8SJose Abreu 			size = sizeof(struct dma_extended_desc);
1848579a25a8SJose Abreu 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1849579a25a8SJose Abreu 			size = sizeof(struct dma_edesc);
1850579a25a8SJose Abreu 		else
1851579a25a8SJose Abreu 			size = sizeof(struct dma_desc);
1852579a25a8SJose Abreu 
1853aa042f60SSong, Yoong Siang 		size *= priv->dma_tx_size;
1854579a25a8SJose Abreu 
1855579a25a8SJose Abreu 		addr = dma_alloc_coherent(priv->device, size,
1856579a25a8SJose Abreu 					  &tx_q->dma_tx_phy, GFP_KERNEL);
1857579a25a8SJose Abreu 		if (!addr)
185862242260SChristophe Jaillet 			goto err_dma;
1859579a25a8SJose Abreu 
1860579a25a8SJose Abreu 		if (priv->extend_desc)
1861579a25a8SJose Abreu 			tx_q->dma_etx = addr;
1862579a25a8SJose Abreu 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1863579a25a8SJose Abreu 			tx_q->dma_entx = addr;
1864579a25a8SJose Abreu 		else
1865579a25a8SJose Abreu 			tx_q->dma_tx = addr;
18665bacd778SLABBE Corentin 	}
18675bacd778SLABBE Corentin 
18685bacd778SLABBE Corentin 	return 0;
18695bacd778SLABBE Corentin 
187062242260SChristophe Jaillet err_dma:
1871ce736788SJoao Pinto 	free_dma_tx_desc_resources(priv);
187209f8d696SSrinivas Kandagatla 	return ret;
18735bacd778SLABBE Corentin }
187409f8d696SSrinivas Kandagatla 
187571fedb01SJoao Pinto /**
187671fedb01SJoao Pinto  * alloc_dma_desc_resources - alloc TX/RX resources.
187771fedb01SJoao Pinto  * @priv: private structure
187871fedb01SJoao Pinto  * Description: according to which descriptor can be used (extend or basic)
187971fedb01SJoao Pinto  * this function allocates the resources for TX and RX paths. In case of
188071fedb01SJoao Pinto  * reception, for example, it pre-allocated the RX socket buffer in order to
188171fedb01SJoao Pinto  * allow zero-copy mechanism.
188271fedb01SJoao Pinto  */
188371fedb01SJoao Pinto static int alloc_dma_desc_resources(struct stmmac_priv *priv)
18845bacd778SLABBE Corentin {
188554139cf3SJoao Pinto 	/* RX Allocation */
188671fedb01SJoao Pinto 	int ret = alloc_dma_rx_desc_resources(priv);
188771fedb01SJoao Pinto 
188871fedb01SJoao Pinto 	if (ret)
188971fedb01SJoao Pinto 		return ret;
189071fedb01SJoao Pinto 
189171fedb01SJoao Pinto 	ret = alloc_dma_tx_desc_resources(priv);
189271fedb01SJoao Pinto 
189371fedb01SJoao Pinto 	return ret;
189471fedb01SJoao Pinto }
189571fedb01SJoao Pinto 
189671fedb01SJoao Pinto /**
189771fedb01SJoao Pinto  * free_dma_desc_resources - free dma desc resources
189871fedb01SJoao Pinto  * @priv: private structure
189971fedb01SJoao Pinto  */
190071fedb01SJoao Pinto static void free_dma_desc_resources(struct stmmac_priv *priv)
190171fedb01SJoao Pinto {
190271fedb01SJoao Pinto 	/* Release the DMA RX socket buffers */
190371fedb01SJoao Pinto 	free_dma_rx_desc_resources(priv);
190471fedb01SJoao Pinto 
190571fedb01SJoao Pinto 	/* Release the DMA TX socket buffers */
190671fedb01SJoao Pinto 	free_dma_tx_desc_resources(priv);
190771fedb01SJoao Pinto }
190871fedb01SJoao Pinto 
190971fedb01SJoao Pinto /**
19109eb12474Sjpinto  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
19119eb12474Sjpinto  *  @priv: driver private structure
19129eb12474Sjpinto  *  Description: It is used for enabling the rx queues in the MAC
19139eb12474Sjpinto  */
19149eb12474Sjpinto static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
19159eb12474Sjpinto {
19164f6046f5SJoao Pinto 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
19174f6046f5SJoao Pinto 	int queue;
19184f6046f5SJoao Pinto 	u8 mode;
19199eb12474Sjpinto 
19204f6046f5SJoao Pinto 	for (queue = 0; queue < rx_queues_count; queue++) {
19214f6046f5SJoao Pinto 		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
1922c10d4c82SJose Abreu 		stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
19234f6046f5SJoao Pinto 	}
19249eb12474Sjpinto }
19259eb12474Sjpinto 
19269eb12474Sjpinto /**
1927ae4f0d46SJoao Pinto  * stmmac_start_rx_dma - start RX DMA channel
1928ae4f0d46SJoao Pinto  * @priv: driver private structure
1929ae4f0d46SJoao Pinto  * @chan: RX channel index
1930ae4f0d46SJoao Pinto  * Description:
1931ae4f0d46SJoao Pinto  * This starts a RX DMA channel
1932ae4f0d46SJoao Pinto  */
1933ae4f0d46SJoao Pinto static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
1934ae4f0d46SJoao Pinto {
1935ae4f0d46SJoao Pinto 	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
1936a4e887faSJose Abreu 	stmmac_start_rx(priv, priv->ioaddr, chan);
1937ae4f0d46SJoao Pinto }
1938ae4f0d46SJoao Pinto 
1939ae4f0d46SJoao Pinto /**
1940ae4f0d46SJoao Pinto  * stmmac_start_tx_dma - start TX DMA channel
1941ae4f0d46SJoao Pinto  * @priv: driver private structure
1942ae4f0d46SJoao Pinto  * @chan: TX channel index
1943ae4f0d46SJoao Pinto  * Description:
1944ae4f0d46SJoao Pinto  * This starts a TX DMA channel
1945ae4f0d46SJoao Pinto  */
1946ae4f0d46SJoao Pinto static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
1947ae4f0d46SJoao Pinto {
1948ae4f0d46SJoao Pinto 	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
1949a4e887faSJose Abreu 	stmmac_start_tx(priv, priv->ioaddr, chan);
1950ae4f0d46SJoao Pinto }
1951ae4f0d46SJoao Pinto 
1952ae4f0d46SJoao Pinto /**
1953ae4f0d46SJoao Pinto  * stmmac_stop_rx_dma - stop RX DMA channel
1954ae4f0d46SJoao Pinto  * @priv: driver private structure
1955ae4f0d46SJoao Pinto  * @chan: RX channel index
1956ae4f0d46SJoao Pinto  * Description:
1957ae4f0d46SJoao Pinto  * This stops a RX DMA channel
1958ae4f0d46SJoao Pinto  */
1959ae4f0d46SJoao Pinto static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
1960ae4f0d46SJoao Pinto {
1961ae4f0d46SJoao Pinto 	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
1962a4e887faSJose Abreu 	stmmac_stop_rx(priv, priv->ioaddr, chan);
1963ae4f0d46SJoao Pinto }
1964ae4f0d46SJoao Pinto 
1965ae4f0d46SJoao Pinto /**
1966ae4f0d46SJoao Pinto  * stmmac_stop_tx_dma - stop TX DMA channel
1967ae4f0d46SJoao Pinto  * @priv: driver private structure
1968ae4f0d46SJoao Pinto  * @chan: TX channel index
1969ae4f0d46SJoao Pinto  * Description:
1970ae4f0d46SJoao Pinto  * This stops a TX DMA channel
1971ae4f0d46SJoao Pinto  */
1972ae4f0d46SJoao Pinto static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
1973ae4f0d46SJoao Pinto {
1974ae4f0d46SJoao Pinto 	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
1975a4e887faSJose Abreu 	stmmac_stop_tx(priv, priv->ioaddr, chan);
1976ae4f0d46SJoao Pinto }
1977ae4f0d46SJoao Pinto 
1978ae4f0d46SJoao Pinto /**
1979ae4f0d46SJoao Pinto  * stmmac_start_all_dma - start all RX and TX DMA channels
1980ae4f0d46SJoao Pinto  * @priv: driver private structure
1981ae4f0d46SJoao Pinto  * Description:
1982ae4f0d46SJoao Pinto  * This starts all the RX and TX DMA channels
1983ae4f0d46SJoao Pinto  */
1984ae4f0d46SJoao Pinto static void stmmac_start_all_dma(struct stmmac_priv *priv)
1985ae4f0d46SJoao Pinto {
1986ae4f0d46SJoao Pinto 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1987ae4f0d46SJoao Pinto 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1988ae4f0d46SJoao Pinto 	u32 chan = 0;
1989ae4f0d46SJoao Pinto 
1990ae4f0d46SJoao Pinto 	for (chan = 0; chan < rx_channels_count; chan++)
1991ae4f0d46SJoao Pinto 		stmmac_start_rx_dma(priv, chan);
1992ae4f0d46SJoao Pinto 
1993ae4f0d46SJoao Pinto 	for (chan = 0; chan < tx_channels_count; chan++)
1994ae4f0d46SJoao Pinto 		stmmac_start_tx_dma(priv, chan);
1995ae4f0d46SJoao Pinto }
1996ae4f0d46SJoao Pinto 
1997ae4f0d46SJoao Pinto /**
1998ae4f0d46SJoao Pinto  * stmmac_stop_all_dma - stop all RX and TX DMA channels
1999ae4f0d46SJoao Pinto  * @priv: driver private structure
2000ae4f0d46SJoao Pinto  * Description:
2001ae4f0d46SJoao Pinto  * This stops the RX and TX DMA channels
2002ae4f0d46SJoao Pinto  */
2003ae4f0d46SJoao Pinto static void stmmac_stop_all_dma(struct stmmac_priv *priv)
2004ae4f0d46SJoao Pinto {
2005ae4f0d46SJoao Pinto 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2006ae4f0d46SJoao Pinto 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2007ae4f0d46SJoao Pinto 	u32 chan = 0;
2008ae4f0d46SJoao Pinto 
2009ae4f0d46SJoao Pinto 	for (chan = 0; chan < rx_channels_count; chan++)
2010ae4f0d46SJoao Pinto 		stmmac_stop_rx_dma(priv, chan);
2011ae4f0d46SJoao Pinto 
2012ae4f0d46SJoao Pinto 	for (chan = 0; chan < tx_channels_count; chan++)
2013ae4f0d46SJoao Pinto 		stmmac_stop_tx_dma(priv, chan);
2014ae4f0d46SJoao Pinto }
2015ae4f0d46SJoao Pinto 
2016ae4f0d46SJoao Pinto /**
20177ac6653aSJeff Kirsher  *  stmmac_dma_operation_mode - HW DMA operation mode
201832ceabcaSGiuseppe CAVALLARO  *  @priv: driver private structure
2019732fdf0eSGiuseppe CAVALLARO  *  Description: it is used for configuring the DMA operation mode register in
2020732fdf0eSGiuseppe CAVALLARO  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
20217ac6653aSJeff Kirsher  */
20227ac6653aSJeff Kirsher static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
20237ac6653aSJeff Kirsher {
20246deee222SJoao Pinto 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
20256deee222SJoao Pinto 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2026f88203a2SVince Bridgers 	int rxfifosz = priv->plat->rx_fifo_size;
202752a76235SJose Abreu 	int txfifosz = priv->plat->tx_fifo_size;
20286deee222SJoao Pinto 	u32 txmode = 0;
20296deee222SJoao Pinto 	u32 rxmode = 0;
20306deee222SJoao Pinto 	u32 chan = 0;
2031a0daae13SJose Abreu 	u8 qmode = 0;
2032f88203a2SVince Bridgers 
203311fbf811SThierry Reding 	if (rxfifosz == 0)
203411fbf811SThierry Reding 		rxfifosz = priv->dma_cap.rx_fifo_size;
203552a76235SJose Abreu 	if (txfifosz == 0)
203652a76235SJose Abreu 		txfifosz = priv->dma_cap.tx_fifo_size;
203752a76235SJose Abreu 
203852a76235SJose Abreu 	/* Adjust for real per queue fifo size */
203952a76235SJose Abreu 	rxfifosz /= rx_channels_count;
204052a76235SJose Abreu 	txfifosz /= tx_channels_count;
204111fbf811SThierry Reding 
20426deee222SJoao Pinto 	if (priv->plat->force_thresh_dma_mode) {
20436deee222SJoao Pinto 		txmode = tc;
20446deee222SJoao Pinto 		rxmode = tc;
20456deee222SJoao Pinto 	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
20467ac6653aSJeff Kirsher 		/*
20477ac6653aSJeff Kirsher 		 * In case of GMAC, SF mode can be enabled
20487ac6653aSJeff Kirsher 		 * to perform the TX COE in HW. This depends on:
20497ac6653aSJeff Kirsher 		 * 1) TX COE if actually supported
20507ac6653aSJeff Kirsher 		 * 2) There is no bugged Jumbo frame support
20517ac6653aSJeff Kirsher 		 *    that needs to not insert csum in the TDES.
20527ac6653aSJeff Kirsher 		 */
20536deee222SJoao Pinto 		txmode = SF_DMA_MODE;
20546deee222SJoao Pinto 		rxmode = SF_DMA_MODE;
2055b2dec116SSonic Zhang 		priv->xstats.threshold = SF_DMA_MODE;
20566deee222SJoao Pinto 	} else {
20576deee222SJoao Pinto 		txmode = tc;
20586deee222SJoao Pinto 		rxmode = SF_DMA_MODE;
20596deee222SJoao Pinto 	}
20606deee222SJoao Pinto 
20616deee222SJoao Pinto 	/* configure all channels */
2062a0daae13SJose Abreu 	for (chan = 0; chan < rx_channels_count; chan++) {
2063a0daae13SJose Abreu 		qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
20646deee222SJoao Pinto 
2065a4e887faSJose Abreu 		stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2066a0daae13SJose Abreu 				rxfifosz, qmode);
20674205c88eSJose Abreu 		stmmac_set_dma_bfsize(priv, priv->ioaddr, priv->dma_buf_sz,
20684205c88eSJose Abreu 				chan);
2069a0daae13SJose Abreu 	}
2070a0daae13SJose Abreu 
2071a0daae13SJose Abreu 	for (chan = 0; chan < tx_channels_count; chan++) {
2072a0daae13SJose Abreu 		qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2073a0daae13SJose Abreu 
2074a4e887faSJose Abreu 		stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2075a0daae13SJose Abreu 				txfifosz, qmode);
2076a0daae13SJose Abreu 	}
20777ac6653aSJeff Kirsher }
20787ac6653aSJeff Kirsher 
20797ac6653aSJeff Kirsher /**
2080732fdf0eSGiuseppe CAVALLARO  * stmmac_tx_clean - to manage the transmission completion
208132ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
2082d0ea5cbdSJesse Brandeburg  * @budget: napi budget limiting this functions packet handling
2083ce736788SJoao Pinto  * @queue: TX queue index
2084732fdf0eSGiuseppe CAVALLARO  * Description: it reclaims the transmit resources after transmission completes.
20857ac6653aSJeff Kirsher  */
20868fce3331SJose Abreu static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
20877ac6653aSJeff Kirsher {
2088ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
208938979574SBeniamino Galvani 	unsigned int bytes_compl = 0, pkts_compl = 0;
20908fce3331SJose Abreu 	unsigned int entry, count = 0;
20917ac6653aSJeff Kirsher 
20928fce3331SJose Abreu 	__netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2093a9097a96SGiuseppe CAVALLARO 
20949125cdd1SGiuseppe CAVALLARO 	priv->xstats.tx_clean++;
20959125cdd1SGiuseppe CAVALLARO 
20968d5f4b07SBernd Edlinger 	entry = tx_q->dirty_tx;
20978fce3331SJose Abreu 	while ((entry != tx_q->cur_tx) && (count < budget)) {
2098ce736788SJoao Pinto 		struct sk_buff *skb = tx_q->tx_skbuff[entry];
2099c24602efSGiuseppe CAVALLARO 		struct dma_desc *p;
2100c363b658SFabrice Gasnier 		int status;
2101c24602efSGiuseppe CAVALLARO 
2102c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
2103ce736788SJoao Pinto 			p = (struct dma_desc *)(tx_q->dma_etx + entry);
2104579a25a8SJose Abreu 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2105579a25a8SJose Abreu 			p = &tx_q->dma_entx[entry].basic;
2106c24602efSGiuseppe CAVALLARO 		else
2107ce736788SJoao Pinto 			p = tx_q->dma_tx + entry;
21087ac6653aSJeff Kirsher 
210942de047dSJose Abreu 		status = stmmac_tx_status(priv, &priv->dev->stats,
211042de047dSJose Abreu 				&priv->xstats, p, priv->ioaddr);
2111c363b658SFabrice Gasnier 		/* Check if the descriptor is owned by the DMA */
2112c363b658SFabrice Gasnier 		if (unlikely(status & tx_dma_own))
2113c363b658SFabrice Gasnier 			break;
2114c363b658SFabrice Gasnier 
21158fce3331SJose Abreu 		count++;
21168fce3331SJose Abreu 
2117a6b25da5SNiklas Cassel 		/* Make sure descriptor fields are read after reading
2118a6b25da5SNiklas Cassel 		 * the own bit.
2119a6b25da5SNiklas Cassel 		 */
2120a6b25da5SNiklas Cassel 		dma_rmb();
2121a6b25da5SNiklas Cassel 
2122c363b658SFabrice Gasnier 		/* Just consider the last segment and ...*/
2123c363b658SFabrice Gasnier 		if (likely(!(status & tx_not_ls))) {
2124c363b658SFabrice Gasnier 			/* ... verify the status error condition */
2125c363b658SFabrice Gasnier 			if (unlikely(status & tx_err)) {
2126c363b658SFabrice Gasnier 				priv->dev->stats.tx_errors++;
2127c363b658SFabrice Gasnier 			} else {
21287ac6653aSJeff Kirsher 				priv->dev->stats.tx_packets++;
21297ac6653aSJeff Kirsher 				priv->xstats.tx_pkt_n++;
2130c363b658SFabrice Gasnier 			}
2131ba1ffd74SGiuseppe CAVALLARO 			stmmac_get_tx_hwtstamp(priv, p, skb);
21327ac6653aSJeff Kirsher 		}
21337ac6653aSJeff Kirsher 
2134ce736788SJoao Pinto 		if (likely(tx_q->tx_skbuff_dma[entry].buf)) {
2135ce736788SJoao Pinto 			if (tx_q->tx_skbuff_dma[entry].map_as_page)
2136362b37beSGiuseppe CAVALLARO 				dma_unmap_page(priv->device,
2137ce736788SJoao Pinto 					       tx_q->tx_skbuff_dma[entry].buf,
2138ce736788SJoao Pinto 					       tx_q->tx_skbuff_dma[entry].len,
21397ac6653aSJeff Kirsher 					       DMA_TO_DEVICE);
2140362b37beSGiuseppe CAVALLARO 			else
2141362b37beSGiuseppe CAVALLARO 				dma_unmap_single(priv->device,
2142ce736788SJoao Pinto 						 tx_q->tx_skbuff_dma[entry].buf,
2143ce736788SJoao Pinto 						 tx_q->tx_skbuff_dma[entry].len,
2144362b37beSGiuseppe CAVALLARO 						 DMA_TO_DEVICE);
2145ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[entry].buf = 0;
2146ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[entry].len = 0;
2147ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[entry].map_as_page = false;
2148cf32deecSRayagond Kokatanur 		}
2149f748be53SAlexandre TORGUE 
21502c520b1cSJose Abreu 		stmmac_clean_desc3(priv, tx_q, p);
2151f748be53SAlexandre TORGUE 
2152ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].last_segment = false;
2153ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
21547ac6653aSJeff Kirsher 
21557ac6653aSJeff Kirsher 		if (likely(skb != NULL)) {
215638979574SBeniamino Galvani 			pkts_compl++;
215738979574SBeniamino Galvani 			bytes_compl += skb->len;
21587c565c33SEric W. Biederman 			dev_consume_skb_any(skb);
2159ce736788SJoao Pinto 			tx_q->tx_skbuff[entry] = NULL;
21607ac6653aSJeff Kirsher 		}
21617ac6653aSJeff Kirsher 
216242de047dSJose Abreu 		stmmac_release_tx_desc(priv, p, priv->mode);
21637ac6653aSJeff Kirsher 
2164aa042f60SSong, Yoong Siang 		entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
21657ac6653aSJeff Kirsher 	}
2166ce736788SJoao Pinto 	tx_q->dirty_tx = entry;
216738979574SBeniamino Galvani 
2168c22a3f48SJoao Pinto 	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2169c22a3f48SJoao Pinto 				  pkts_compl, bytes_compl);
217038979574SBeniamino Galvani 
2171c22a3f48SJoao Pinto 	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2172c22a3f48SJoao Pinto 								queue))) &&
2173aa042f60SSong, Yoong Siang 	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2174c22a3f48SJoao Pinto 
2175b3e51069SLABBE Corentin 		netif_dbg(priv, tx_done, priv->dev,
2176b3e51069SLABBE Corentin 			  "%s: restart transmit\n", __func__);
2177c22a3f48SJoao Pinto 		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
21787ac6653aSJeff Kirsher 	}
2179d765955dSGiuseppe CAVALLARO 
2180be1c7eaeSVineetha G. Jaya Kumaran 	if (priv->eee_enabled && !priv->tx_path_in_lpi_mode &&
2181be1c7eaeSVineetha G. Jaya Kumaran 	    priv->eee_sw_timer_en) {
2182d765955dSGiuseppe CAVALLARO 		stmmac_enable_eee_mode(priv);
2183388e201dSVineetha G. Jaya Kumaran 		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
2184d765955dSGiuseppe CAVALLARO 	}
21858fce3331SJose Abreu 
21864ccb4585SJose Abreu 	/* We still have pending packets, let's call for a new scheduling */
21874ccb4585SJose Abreu 	if (tx_q->dirty_tx != tx_q->cur_tx)
2188d5a05e69SVincent Whitchurch 		hrtimer_start(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer),
2189d5a05e69SVincent Whitchurch 			      HRTIMER_MODE_REL);
21904ccb4585SJose Abreu 
21918fce3331SJose Abreu 	__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
21928fce3331SJose Abreu 
21938fce3331SJose Abreu 	return count;
21947ac6653aSJeff Kirsher }
21957ac6653aSJeff Kirsher 
21967ac6653aSJeff Kirsher /**
2197732fdf0eSGiuseppe CAVALLARO  * stmmac_tx_err - to manage the tx error
219832ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
21995bacd778SLABBE Corentin  * @chan: channel index
22007ac6653aSJeff Kirsher  * Description: it cleans the descriptors and restarts the transmission
2201732fdf0eSGiuseppe CAVALLARO  * in case of transmission errors.
22027ac6653aSJeff Kirsher  */
22035bacd778SLABBE Corentin static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
22047ac6653aSJeff Kirsher {
2205ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2206ce736788SJoao Pinto 
2207c22a3f48SJoao Pinto 	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
22087ac6653aSJeff Kirsher 
2209ae4f0d46SJoao Pinto 	stmmac_stop_tx_dma(priv, chan);
2210ce736788SJoao Pinto 	dma_free_tx_skbufs(priv, chan);
2211579a25a8SJose Abreu 	stmmac_clear_tx_descriptors(priv, chan);
2212ce736788SJoao Pinto 	tx_q->dirty_tx = 0;
2213ce736788SJoao Pinto 	tx_q->cur_tx = 0;
22148d212a9eSNiklas Cassel 	tx_q->mss = 0;
2215c22a3f48SJoao Pinto 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
2216f421031eSJongsung Kim 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2217f421031eSJongsung Kim 			    tx_q->dma_tx_phy, chan);
2218ae4f0d46SJoao Pinto 	stmmac_start_tx_dma(priv, chan);
22197ac6653aSJeff Kirsher 
22207ac6653aSJeff Kirsher 	priv->dev->stats.tx_errors++;
2221c22a3f48SJoao Pinto 	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
22227ac6653aSJeff Kirsher }
22237ac6653aSJeff Kirsher 
222432ceabcaSGiuseppe CAVALLARO /**
22256deee222SJoao Pinto  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
22266deee222SJoao Pinto  *  @priv: driver private structure
22276deee222SJoao Pinto  *  @txmode: TX operating mode
22286deee222SJoao Pinto  *  @rxmode: RX operating mode
22296deee222SJoao Pinto  *  @chan: channel index
22306deee222SJoao Pinto  *  Description: it is used for configuring of the DMA operation mode in
22316deee222SJoao Pinto  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
22326deee222SJoao Pinto  *  mode.
22336deee222SJoao Pinto  */
22346deee222SJoao Pinto static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
22356deee222SJoao Pinto 					  u32 rxmode, u32 chan)
22366deee222SJoao Pinto {
2237a0daae13SJose Abreu 	u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2238a0daae13SJose Abreu 	u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
223952a76235SJose Abreu 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
224052a76235SJose Abreu 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
22416deee222SJoao Pinto 	int rxfifosz = priv->plat->rx_fifo_size;
224252a76235SJose Abreu 	int txfifosz = priv->plat->tx_fifo_size;
22436deee222SJoao Pinto 
22446deee222SJoao Pinto 	if (rxfifosz == 0)
22456deee222SJoao Pinto 		rxfifosz = priv->dma_cap.rx_fifo_size;
224652a76235SJose Abreu 	if (txfifosz == 0)
224752a76235SJose Abreu 		txfifosz = priv->dma_cap.tx_fifo_size;
224852a76235SJose Abreu 
224952a76235SJose Abreu 	/* Adjust for real per queue fifo size */
225052a76235SJose Abreu 	rxfifosz /= rx_channels_count;
225152a76235SJose Abreu 	txfifosz /= tx_channels_count;
22526deee222SJoao Pinto 
2253ab0204e3SJose Abreu 	stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2254ab0204e3SJose Abreu 	stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
22556deee222SJoao Pinto }
22566deee222SJoao Pinto 
22578bf993a5SJose Abreu static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
22588bf993a5SJose Abreu {
225963a550fcSJose Abreu 	int ret;
22608bf993a5SJose Abreu 
2261c10d4c82SJose Abreu 	ret = stmmac_safety_feat_irq_status(priv, priv->dev,
22628bf993a5SJose Abreu 			priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2263c10d4c82SJose Abreu 	if (ret && (ret != -EINVAL)) {
22648bf993a5SJose Abreu 		stmmac_global_err(priv);
2265c10d4c82SJose Abreu 		return true;
2266c10d4c82SJose Abreu 	}
2267c10d4c82SJose Abreu 
2268c10d4c82SJose Abreu 	return false;
22698bf993a5SJose Abreu }
22708bf993a5SJose Abreu 
22718fce3331SJose Abreu static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan)
22728fce3331SJose Abreu {
22738fce3331SJose Abreu 	int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
22748fce3331SJose Abreu 						 &priv->xstats, chan);
22758fce3331SJose Abreu 	struct stmmac_channel *ch = &priv->channel[chan];
2276021bd5e3SJose Abreu 	unsigned long flags;
22778fce3331SJose Abreu 
22784ccb4585SJose Abreu 	if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
22793ba07debSJose Abreu 		if (napi_schedule_prep(&ch->rx_napi)) {
2280021bd5e3SJose Abreu 			spin_lock_irqsave(&ch->lock, flags);
2281021bd5e3SJose Abreu 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2282021bd5e3SJose Abreu 			spin_unlock_irqrestore(&ch->lock, flags);
22831f02efd1SSeb Laveze 			__napi_schedule(&ch->rx_napi);
22843ba07debSJose Abreu 		}
22854ccb4585SJose Abreu 	}
22864ccb4585SJose Abreu 
2287021bd5e3SJose Abreu 	if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2288021bd5e3SJose Abreu 		if (napi_schedule_prep(&ch->tx_napi)) {
2289021bd5e3SJose Abreu 			spin_lock_irqsave(&ch->lock, flags);
2290021bd5e3SJose Abreu 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
2291021bd5e3SJose Abreu 			spin_unlock_irqrestore(&ch->lock, flags);
22921f02efd1SSeb Laveze 			__napi_schedule(&ch->tx_napi);
2293021bd5e3SJose Abreu 		}
2294021bd5e3SJose Abreu 	}
22958fce3331SJose Abreu 
22968fce3331SJose Abreu 	return status;
22978fce3331SJose Abreu }
22988fce3331SJose Abreu 
22996deee222SJoao Pinto /**
2300732fdf0eSGiuseppe CAVALLARO  * stmmac_dma_interrupt - DMA ISR
230132ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
230232ceabcaSGiuseppe CAVALLARO  * Description: this is the DMA ISR. It is called by the main ISR.
2303732fdf0eSGiuseppe CAVALLARO  * It calls the dwmac dma routine and schedule poll method in case of some
2304732fdf0eSGiuseppe CAVALLARO  * work can be done.
230532ceabcaSGiuseppe CAVALLARO  */
23067ac6653aSJeff Kirsher static void stmmac_dma_interrupt(struct stmmac_priv *priv)
23077ac6653aSJeff Kirsher {
2308d62a107aSJoao Pinto 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
23095a6a0445SNiklas Cassel 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
23105a6a0445SNiklas Cassel 	u32 channels_to_check = tx_channel_count > rx_channel_count ?
23115a6a0445SNiklas Cassel 				tx_channel_count : rx_channel_count;
2312d62a107aSJoao Pinto 	u32 chan;
23138ac60ffbSKees Cook 	int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
23148ac60ffbSKees Cook 
23158ac60ffbSKees Cook 	/* Make sure we never check beyond our status buffer. */
23168ac60ffbSKees Cook 	if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
23178ac60ffbSKees Cook 		channels_to_check = ARRAY_SIZE(status);
231868e5cfafSJoao Pinto 
23195a6a0445SNiklas Cassel 	for (chan = 0; chan < channels_to_check; chan++)
23208fce3331SJose Abreu 		status[chan] = stmmac_napi_check(priv, chan);
2321d62a107aSJoao Pinto 
23225a6a0445SNiklas Cassel 	for (chan = 0; chan < tx_channel_count; chan++) {
23235a6a0445SNiklas Cassel 		if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
23247ac6653aSJeff Kirsher 			/* Try to bump up the dma threshold on this failure */
2325b2dec116SSonic Zhang 			if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
2326b2dec116SSonic Zhang 			    (tc <= 256)) {
23277ac6653aSJeff Kirsher 				tc += 64;
2328c405abe2SSonic Zhang 				if (priv->plat->force_thresh_dma_mode)
2329d62a107aSJoao Pinto 					stmmac_set_dma_operation_mode(priv,
2330d62a107aSJoao Pinto 								      tc,
2331d62a107aSJoao Pinto 								      tc,
2332d62a107aSJoao Pinto 								      chan);
2333c405abe2SSonic Zhang 				else
2334d62a107aSJoao Pinto 					stmmac_set_dma_operation_mode(priv,
2335d62a107aSJoao Pinto 								    tc,
2336d62a107aSJoao Pinto 								    SF_DMA_MODE,
2337d62a107aSJoao Pinto 								    chan);
23387ac6653aSJeff Kirsher 				priv->xstats.threshold = tc;
23397ac6653aSJeff Kirsher 			}
23405a6a0445SNiklas Cassel 		} else if (unlikely(status[chan] == tx_hard_error)) {
23414e593262SJoao Pinto 			stmmac_tx_err(priv, chan);
23427ac6653aSJeff Kirsher 		}
2343d62a107aSJoao Pinto 	}
2344d62a107aSJoao Pinto }
23457ac6653aSJeff Kirsher 
234632ceabcaSGiuseppe CAVALLARO /**
234732ceabcaSGiuseppe CAVALLARO  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
234832ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
234932ceabcaSGiuseppe CAVALLARO  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
235032ceabcaSGiuseppe CAVALLARO  */
23511c901a46SGiuseppe CAVALLARO static void stmmac_mmc_setup(struct stmmac_priv *priv)
23521c901a46SGiuseppe CAVALLARO {
23531c901a46SGiuseppe CAVALLARO 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
23541c901a46SGiuseppe CAVALLARO 			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
23551c901a46SGiuseppe CAVALLARO 
23563b1dd2c5SJose Abreu 	stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
23574f795b25SGiuseppe CAVALLARO 
23584f795b25SGiuseppe CAVALLARO 	if (priv->dma_cap.rmon) {
23593b1dd2c5SJose Abreu 		stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
23601c901a46SGiuseppe CAVALLARO 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
23614f795b25SGiuseppe CAVALLARO 	} else
236238ddc59dSLABBE Corentin 		netdev_info(priv->dev, "No MAC Management Counters available\n");
23631c901a46SGiuseppe CAVALLARO }
23641c901a46SGiuseppe CAVALLARO 
2365732fdf0eSGiuseppe CAVALLARO /**
2366732fdf0eSGiuseppe CAVALLARO  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
236732ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
236819e30c14SGiuseppe CAVALLARO  * Description:
236919e30c14SGiuseppe CAVALLARO  *  new GMAC chip generations have a new register to indicate the
2370e7434821SGiuseppe CAVALLARO  *  presence of the optional feature/functions.
237119e30c14SGiuseppe CAVALLARO  *  This can be also used to override the value passed through the
237219e30c14SGiuseppe CAVALLARO  *  platform and necessary for old MAC10/100 and GMAC chips.
2373e7434821SGiuseppe CAVALLARO  */
2374e7434821SGiuseppe CAVALLARO static int stmmac_get_hw_features(struct stmmac_priv *priv)
2375e7434821SGiuseppe CAVALLARO {
2376a4e887faSJose Abreu 	return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2377e7434821SGiuseppe CAVALLARO }
2378e7434821SGiuseppe CAVALLARO 
237932ceabcaSGiuseppe CAVALLARO /**
2380732fdf0eSGiuseppe CAVALLARO  * stmmac_check_ether_addr - check if the MAC addr is valid
238132ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
238232ceabcaSGiuseppe CAVALLARO  * Description:
238332ceabcaSGiuseppe CAVALLARO  * it is to verify if the MAC address is valid, in case of failures it
238432ceabcaSGiuseppe CAVALLARO  * generates a random MAC address
238532ceabcaSGiuseppe CAVALLARO  */
2386bfab27a1SGiuseppe CAVALLARO static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2387bfab27a1SGiuseppe CAVALLARO {
2388bfab27a1SGiuseppe CAVALLARO 	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2389c10d4c82SJose Abreu 		stmmac_get_umac_addr(priv, priv->hw, priv->dev->dev_addr, 0);
2390bfab27a1SGiuseppe CAVALLARO 		if (!is_valid_ether_addr(priv->dev->dev_addr))
2391f2cedb63SDanny Kukawka 			eth_hw_addr_random(priv->dev);
2392af649352SJisheng Zhang 		dev_info(priv->device, "device MAC address %pM\n",
2393bfab27a1SGiuseppe CAVALLARO 			 priv->dev->dev_addr);
2394bfab27a1SGiuseppe CAVALLARO 	}
2395c88460b7SHans de Goede }
2396bfab27a1SGiuseppe CAVALLARO 
239732ceabcaSGiuseppe CAVALLARO /**
2398732fdf0eSGiuseppe CAVALLARO  * stmmac_init_dma_engine - DMA init.
239932ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
240032ceabcaSGiuseppe CAVALLARO  * Description:
240132ceabcaSGiuseppe CAVALLARO  * It inits the DMA invoking the specific MAC/GMAC callback.
240232ceabcaSGiuseppe CAVALLARO  * Some DMA parameters can be passed from the platform;
240332ceabcaSGiuseppe CAVALLARO  * in case of these are not passed a default is kept for the MAC or GMAC.
240432ceabcaSGiuseppe CAVALLARO  */
24050f1f88a8SGiuseppe CAVALLARO static int stmmac_init_dma_engine(struct stmmac_priv *priv)
24060f1f88a8SGiuseppe CAVALLARO {
240747f2a9ceSJoao Pinto 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
240847f2a9ceSJoao Pinto 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
240924aaed0cSJose Abreu 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
241054139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q;
2411ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q;
241247f2a9ceSJoao Pinto 	u32 chan = 0;
2413c24602efSGiuseppe CAVALLARO 	int atds = 0;
2414495db273SGiuseppe Cavallaro 	int ret = 0;
24150f1f88a8SGiuseppe CAVALLARO 
2416a332e2faSNiklas Cassel 	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2417a332e2faSNiklas Cassel 		dev_err(priv->device, "Invalid DMA configuration\n");
241889ab75bfSNiklas Cassel 		return -EINVAL;
24190f1f88a8SGiuseppe CAVALLARO 	}
24200f1f88a8SGiuseppe CAVALLARO 
2421c24602efSGiuseppe CAVALLARO 	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2422c24602efSGiuseppe CAVALLARO 		atds = 1;
2423c24602efSGiuseppe CAVALLARO 
2424a4e887faSJose Abreu 	ret = stmmac_reset(priv, priv->ioaddr);
2425495db273SGiuseppe Cavallaro 	if (ret) {
2426495db273SGiuseppe Cavallaro 		dev_err(priv->device, "Failed to reset the dma\n");
2427495db273SGiuseppe Cavallaro 		return ret;
2428495db273SGiuseppe Cavallaro 	}
2429495db273SGiuseppe Cavallaro 
24307d9e6c5aSJose Abreu 	/* DMA Configuration */
24317d9e6c5aSJose Abreu 	stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
24327d9e6c5aSJose Abreu 
24337d9e6c5aSJose Abreu 	if (priv->plat->axi)
24347d9e6c5aSJose Abreu 		stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
24357d9e6c5aSJose Abreu 
2436af8f3fb7SWeifeng Voon 	/* DMA CSR Channel configuration */
2437af8f3fb7SWeifeng Voon 	for (chan = 0; chan < dma_csr_ch; chan++)
2438af8f3fb7SWeifeng Voon 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
2439af8f3fb7SWeifeng Voon 
244047f2a9ceSJoao Pinto 	/* DMA RX Channel Configuration */
244147f2a9ceSJoao Pinto 	for (chan = 0; chan < rx_channels_count; chan++) {
244254139cf3SJoao Pinto 		rx_q = &priv->rx_queue[chan];
244354139cf3SJoao Pinto 
244424aaed0cSJose Abreu 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
244524aaed0cSJose Abreu 				    rx_q->dma_rx_phy, chan);
244647f2a9ceSJoao Pinto 
244754139cf3SJoao Pinto 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2448aa042f60SSong, Yoong Siang 				     (priv->dma_rx_size *
2449aa042f60SSong, Yoong Siang 				      sizeof(struct dma_desc));
2450a4e887faSJose Abreu 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
2451a4e887faSJose Abreu 				       rx_q->rx_tail_addr, chan);
245247f2a9ceSJoao Pinto 	}
245347f2a9ceSJoao Pinto 
245447f2a9ceSJoao Pinto 	/* DMA TX Channel Configuration */
245547f2a9ceSJoao Pinto 	for (chan = 0; chan < tx_channels_count; chan++) {
2456ce736788SJoao Pinto 		tx_q = &priv->tx_queue[chan];
2457ce736788SJoao Pinto 
245824aaed0cSJose Abreu 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
245924aaed0cSJose Abreu 				    tx_q->dma_tx_phy, chan);
2460f748be53SAlexandre TORGUE 
24610431100bSJose Abreu 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
2462a4e887faSJose Abreu 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
2463a4e887faSJose Abreu 				       tx_q->tx_tail_addr, chan);
246447f2a9ceSJoao Pinto 	}
246524aaed0cSJose Abreu 
2466495db273SGiuseppe Cavallaro 	return ret;
24670f1f88a8SGiuseppe CAVALLARO }
24680f1f88a8SGiuseppe CAVALLARO 
24698fce3331SJose Abreu static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
24708fce3331SJose Abreu {
24718fce3331SJose Abreu 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
24728fce3331SJose Abreu 
2473d5a05e69SVincent Whitchurch 	hrtimer_start(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer),
2474d5a05e69SVincent Whitchurch 		      HRTIMER_MODE_REL);
24758fce3331SJose Abreu }
24768fce3331SJose Abreu 
2477bfab27a1SGiuseppe CAVALLARO /**
2478732fdf0eSGiuseppe CAVALLARO  * stmmac_tx_timer - mitigation sw timer for tx.
2479d0ea5cbdSJesse Brandeburg  * @t: data pointer
24809125cdd1SGiuseppe CAVALLARO  * Description:
24819125cdd1SGiuseppe CAVALLARO  * This is the timer handler to directly invoke the stmmac_tx_clean.
24829125cdd1SGiuseppe CAVALLARO  */
2483d5a05e69SVincent Whitchurch static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
24849125cdd1SGiuseppe CAVALLARO {
2485d5a05e69SVincent Whitchurch 	struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
24868fce3331SJose Abreu 	struct stmmac_priv *priv = tx_q->priv_data;
24878fce3331SJose Abreu 	struct stmmac_channel *ch;
24889125cdd1SGiuseppe CAVALLARO 
24898fce3331SJose Abreu 	ch = &priv->channel[tx_q->queue_index];
24908fce3331SJose Abreu 
2491021bd5e3SJose Abreu 	if (likely(napi_schedule_prep(&ch->tx_napi))) {
2492021bd5e3SJose Abreu 		unsigned long flags;
2493021bd5e3SJose Abreu 
2494021bd5e3SJose Abreu 		spin_lock_irqsave(&ch->lock, flags);
2495021bd5e3SJose Abreu 		stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
2496021bd5e3SJose Abreu 		spin_unlock_irqrestore(&ch->lock, flags);
24974ccb4585SJose Abreu 		__napi_schedule(&ch->tx_napi);
2498021bd5e3SJose Abreu 	}
2499d5a05e69SVincent Whitchurch 
2500d5a05e69SVincent Whitchurch 	return HRTIMER_NORESTART;
25019125cdd1SGiuseppe CAVALLARO }
25029125cdd1SGiuseppe CAVALLARO 
25039125cdd1SGiuseppe CAVALLARO /**
2504d429b66eSJose Abreu  * stmmac_init_coalesce - init mitigation options.
250532ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
25069125cdd1SGiuseppe CAVALLARO  * Description:
2507d429b66eSJose Abreu  * This inits the coalesce parameters: i.e. timer rate,
25089125cdd1SGiuseppe CAVALLARO  * timer handler and default threshold used for enabling the
25099125cdd1SGiuseppe CAVALLARO  * interrupt on completion bit.
25109125cdd1SGiuseppe CAVALLARO  */
2511d429b66eSJose Abreu static void stmmac_init_coalesce(struct stmmac_priv *priv)
25129125cdd1SGiuseppe CAVALLARO {
25138fce3331SJose Abreu 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
25148fce3331SJose Abreu 	u32 chan;
25158fce3331SJose Abreu 
25169125cdd1SGiuseppe CAVALLARO 	priv->tx_coal_frames = STMMAC_TX_FRAMES;
25179125cdd1SGiuseppe CAVALLARO 	priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
2518d429b66eSJose Abreu 	priv->rx_coal_frames = STMMAC_RX_FRAMES;
25198fce3331SJose Abreu 
25208fce3331SJose Abreu 	for (chan = 0; chan < tx_channel_count; chan++) {
25218fce3331SJose Abreu 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
25228fce3331SJose Abreu 
2523d5a05e69SVincent Whitchurch 		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2524d5a05e69SVincent Whitchurch 		tx_q->txtimer.function = stmmac_tx_timer;
25258fce3331SJose Abreu 	}
25269125cdd1SGiuseppe CAVALLARO }
25279125cdd1SGiuseppe CAVALLARO 
25284854ab99SJoao Pinto static void stmmac_set_rings_length(struct stmmac_priv *priv)
25294854ab99SJoao Pinto {
25304854ab99SJoao Pinto 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
25314854ab99SJoao Pinto 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
25324854ab99SJoao Pinto 	u32 chan;
25334854ab99SJoao Pinto 
25344854ab99SJoao Pinto 	/* set TX ring length */
25354854ab99SJoao Pinto 	for (chan = 0; chan < tx_channels_count; chan++)
2536a4e887faSJose Abreu 		stmmac_set_tx_ring_len(priv, priv->ioaddr,
2537aa042f60SSong, Yoong Siang 				       (priv->dma_tx_size - 1), chan);
25384854ab99SJoao Pinto 
25394854ab99SJoao Pinto 	/* set RX ring length */
25404854ab99SJoao Pinto 	for (chan = 0; chan < rx_channels_count; chan++)
2541a4e887faSJose Abreu 		stmmac_set_rx_ring_len(priv, priv->ioaddr,
2542aa042f60SSong, Yoong Siang 				       (priv->dma_rx_size - 1), chan);
25434854ab99SJoao Pinto }
25444854ab99SJoao Pinto 
25459125cdd1SGiuseppe CAVALLARO /**
25466a3a7193SJoao Pinto  *  stmmac_set_tx_queue_weight - Set TX queue weight
25476a3a7193SJoao Pinto  *  @priv: driver private structure
25486a3a7193SJoao Pinto  *  Description: It is used for setting TX queues weight
25496a3a7193SJoao Pinto  */
25506a3a7193SJoao Pinto static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
25516a3a7193SJoao Pinto {
25526a3a7193SJoao Pinto 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
25536a3a7193SJoao Pinto 	u32 weight;
25546a3a7193SJoao Pinto 	u32 queue;
25556a3a7193SJoao Pinto 
25566a3a7193SJoao Pinto 	for (queue = 0; queue < tx_queues_count; queue++) {
25576a3a7193SJoao Pinto 		weight = priv->plat->tx_queues_cfg[queue].weight;
2558c10d4c82SJose Abreu 		stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
25596a3a7193SJoao Pinto 	}
25606a3a7193SJoao Pinto }
25616a3a7193SJoao Pinto 
25626a3a7193SJoao Pinto /**
256319d91873SJoao Pinto  *  stmmac_configure_cbs - Configure CBS in TX queue
256419d91873SJoao Pinto  *  @priv: driver private structure
256519d91873SJoao Pinto  *  Description: It is used for configuring CBS in AVB TX queues
256619d91873SJoao Pinto  */
256719d91873SJoao Pinto static void stmmac_configure_cbs(struct stmmac_priv *priv)
256819d91873SJoao Pinto {
256919d91873SJoao Pinto 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
257019d91873SJoao Pinto 	u32 mode_to_use;
257119d91873SJoao Pinto 	u32 queue;
257219d91873SJoao Pinto 
257344781fefSJoao Pinto 	/* queue 0 is reserved for legacy traffic */
257444781fefSJoao Pinto 	for (queue = 1; queue < tx_queues_count; queue++) {
257519d91873SJoao Pinto 		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
257619d91873SJoao Pinto 		if (mode_to_use == MTL_QUEUE_DCB)
257719d91873SJoao Pinto 			continue;
257819d91873SJoao Pinto 
2579c10d4c82SJose Abreu 		stmmac_config_cbs(priv, priv->hw,
258019d91873SJoao Pinto 				priv->plat->tx_queues_cfg[queue].send_slope,
258119d91873SJoao Pinto 				priv->plat->tx_queues_cfg[queue].idle_slope,
258219d91873SJoao Pinto 				priv->plat->tx_queues_cfg[queue].high_credit,
258319d91873SJoao Pinto 				priv->plat->tx_queues_cfg[queue].low_credit,
258419d91873SJoao Pinto 				queue);
258519d91873SJoao Pinto 	}
258619d91873SJoao Pinto }
258719d91873SJoao Pinto 
258819d91873SJoao Pinto /**
2589d43042f4SJoao Pinto  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
2590d43042f4SJoao Pinto  *  @priv: driver private structure
2591d43042f4SJoao Pinto  *  Description: It is used for mapping RX queues to RX dma channels
2592d43042f4SJoao Pinto  */
2593d43042f4SJoao Pinto static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
2594d43042f4SJoao Pinto {
2595d43042f4SJoao Pinto 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2596d43042f4SJoao Pinto 	u32 queue;
2597d43042f4SJoao Pinto 	u32 chan;
2598d43042f4SJoao Pinto 
2599d43042f4SJoao Pinto 	for (queue = 0; queue < rx_queues_count; queue++) {
2600d43042f4SJoao Pinto 		chan = priv->plat->rx_queues_cfg[queue].chan;
2601c10d4c82SJose Abreu 		stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
2602d43042f4SJoao Pinto 	}
2603d43042f4SJoao Pinto }
2604d43042f4SJoao Pinto 
2605d43042f4SJoao Pinto /**
2606a8f5102aSJoao Pinto  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
2607a8f5102aSJoao Pinto  *  @priv: driver private structure
2608a8f5102aSJoao Pinto  *  Description: It is used for configuring the RX Queue Priority
2609a8f5102aSJoao Pinto  */
2610a8f5102aSJoao Pinto static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
2611a8f5102aSJoao Pinto {
2612a8f5102aSJoao Pinto 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2613a8f5102aSJoao Pinto 	u32 queue;
2614a8f5102aSJoao Pinto 	u32 prio;
2615a8f5102aSJoao Pinto 
2616a8f5102aSJoao Pinto 	for (queue = 0; queue < rx_queues_count; queue++) {
2617a8f5102aSJoao Pinto 		if (!priv->plat->rx_queues_cfg[queue].use_prio)
2618a8f5102aSJoao Pinto 			continue;
2619a8f5102aSJoao Pinto 
2620a8f5102aSJoao Pinto 		prio = priv->plat->rx_queues_cfg[queue].prio;
2621c10d4c82SJose Abreu 		stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
2622a8f5102aSJoao Pinto 	}
2623a8f5102aSJoao Pinto }
2624a8f5102aSJoao Pinto 
2625a8f5102aSJoao Pinto /**
2626a8f5102aSJoao Pinto  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
2627a8f5102aSJoao Pinto  *  @priv: driver private structure
2628a8f5102aSJoao Pinto  *  Description: It is used for configuring the TX Queue Priority
2629a8f5102aSJoao Pinto  */
2630a8f5102aSJoao Pinto static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
2631a8f5102aSJoao Pinto {
2632a8f5102aSJoao Pinto 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2633a8f5102aSJoao Pinto 	u32 queue;
2634a8f5102aSJoao Pinto 	u32 prio;
2635a8f5102aSJoao Pinto 
2636a8f5102aSJoao Pinto 	for (queue = 0; queue < tx_queues_count; queue++) {
2637a8f5102aSJoao Pinto 		if (!priv->plat->tx_queues_cfg[queue].use_prio)
2638a8f5102aSJoao Pinto 			continue;
2639a8f5102aSJoao Pinto 
2640a8f5102aSJoao Pinto 		prio = priv->plat->tx_queues_cfg[queue].prio;
2641c10d4c82SJose Abreu 		stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
2642a8f5102aSJoao Pinto 	}
2643a8f5102aSJoao Pinto }
2644a8f5102aSJoao Pinto 
2645a8f5102aSJoao Pinto /**
2646abe80fdcSJoao Pinto  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
2647abe80fdcSJoao Pinto  *  @priv: driver private structure
2648abe80fdcSJoao Pinto  *  Description: It is used for configuring the RX queue routing
2649abe80fdcSJoao Pinto  */
2650abe80fdcSJoao Pinto static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
2651abe80fdcSJoao Pinto {
2652abe80fdcSJoao Pinto 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2653abe80fdcSJoao Pinto 	u32 queue;
2654abe80fdcSJoao Pinto 	u8 packet;
2655abe80fdcSJoao Pinto 
2656abe80fdcSJoao Pinto 	for (queue = 0; queue < rx_queues_count; queue++) {
2657abe80fdcSJoao Pinto 		/* no specific packet type routing specified for the queue */
2658abe80fdcSJoao Pinto 		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
2659abe80fdcSJoao Pinto 			continue;
2660abe80fdcSJoao Pinto 
2661abe80fdcSJoao Pinto 		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
2662c10d4c82SJose Abreu 		stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
2663abe80fdcSJoao Pinto 	}
2664abe80fdcSJoao Pinto }
2665abe80fdcSJoao Pinto 
266676067459SJose Abreu static void stmmac_mac_config_rss(struct stmmac_priv *priv)
266776067459SJose Abreu {
266876067459SJose Abreu 	if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
266976067459SJose Abreu 		priv->rss.enable = false;
267076067459SJose Abreu 		return;
267176067459SJose Abreu 	}
267276067459SJose Abreu 
267376067459SJose Abreu 	if (priv->dev->features & NETIF_F_RXHASH)
267476067459SJose Abreu 		priv->rss.enable = true;
267576067459SJose Abreu 	else
267676067459SJose Abreu 		priv->rss.enable = false;
267776067459SJose Abreu 
267876067459SJose Abreu 	stmmac_rss_configure(priv, priv->hw, &priv->rss,
267976067459SJose Abreu 			     priv->plat->rx_queues_to_use);
268076067459SJose Abreu }
268176067459SJose Abreu 
2682abe80fdcSJoao Pinto /**
2683d0a9c9f9SJoao Pinto  *  stmmac_mtl_configuration - Configure MTL
2684d0a9c9f9SJoao Pinto  *  @priv: driver private structure
2685d0a9c9f9SJoao Pinto  *  Description: It is used for configurring MTL
2686d0a9c9f9SJoao Pinto  */
2687d0a9c9f9SJoao Pinto static void stmmac_mtl_configuration(struct stmmac_priv *priv)
2688d0a9c9f9SJoao Pinto {
2689d0a9c9f9SJoao Pinto 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2690d0a9c9f9SJoao Pinto 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2691d0a9c9f9SJoao Pinto 
2692c10d4c82SJose Abreu 	if (tx_queues_count > 1)
26936a3a7193SJoao Pinto 		stmmac_set_tx_queue_weight(priv);
26946a3a7193SJoao Pinto 
2695d0a9c9f9SJoao Pinto 	/* Configure MTL RX algorithms */
2696c10d4c82SJose Abreu 	if (rx_queues_count > 1)
2697c10d4c82SJose Abreu 		stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
2698d0a9c9f9SJoao Pinto 				priv->plat->rx_sched_algorithm);
2699d0a9c9f9SJoao Pinto 
2700d0a9c9f9SJoao Pinto 	/* Configure MTL TX algorithms */
2701c10d4c82SJose Abreu 	if (tx_queues_count > 1)
2702c10d4c82SJose Abreu 		stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
2703d0a9c9f9SJoao Pinto 				priv->plat->tx_sched_algorithm);
2704d0a9c9f9SJoao Pinto 
270519d91873SJoao Pinto 	/* Configure CBS in AVB TX queues */
2706c10d4c82SJose Abreu 	if (tx_queues_count > 1)
270719d91873SJoao Pinto 		stmmac_configure_cbs(priv);
270819d91873SJoao Pinto 
2709d43042f4SJoao Pinto 	/* Map RX MTL to DMA channels */
2710d43042f4SJoao Pinto 	stmmac_rx_queue_dma_chan_map(priv);
2711d43042f4SJoao Pinto 
2712d0a9c9f9SJoao Pinto 	/* Enable MAC RX Queues */
2713d0a9c9f9SJoao Pinto 	stmmac_mac_enable_rx_queues(priv);
27146deee222SJoao Pinto 
2715a8f5102aSJoao Pinto 	/* Set RX priorities */
2716c10d4c82SJose Abreu 	if (rx_queues_count > 1)
2717a8f5102aSJoao Pinto 		stmmac_mac_config_rx_queues_prio(priv);
2718a8f5102aSJoao Pinto 
2719a8f5102aSJoao Pinto 	/* Set TX priorities */
2720c10d4c82SJose Abreu 	if (tx_queues_count > 1)
2721a8f5102aSJoao Pinto 		stmmac_mac_config_tx_queues_prio(priv);
2722abe80fdcSJoao Pinto 
2723abe80fdcSJoao Pinto 	/* Set RX routing */
2724c10d4c82SJose Abreu 	if (rx_queues_count > 1)
2725abe80fdcSJoao Pinto 		stmmac_mac_config_rx_queues_routing(priv);
272676067459SJose Abreu 
272776067459SJose Abreu 	/* Receive Side Scaling */
272876067459SJose Abreu 	if (rx_queues_count > 1)
272976067459SJose Abreu 		stmmac_mac_config_rss(priv);
2730d0a9c9f9SJoao Pinto }
2731d0a9c9f9SJoao Pinto 
27328bf993a5SJose Abreu static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
27338bf993a5SJose Abreu {
2734c10d4c82SJose Abreu 	if (priv->dma_cap.asp) {
27358bf993a5SJose Abreu 		netdev_info(priv->dev, "Enabling Safety Features\n");
2736c10d4c82SJose Abreu 		stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp);
27378bf993a5SJose Abreu 	} else {
27388bf993a5SJose Abreu 		netdev_info(priv->dev, "No Safety Features support found\n");
27398bf993a5SJose Abreu 	}
27408bf993a5SJose Abreu }
27418bf993a5SJose Abreu 
2742d0a9c9f9SJoao Pinto /**
2743732fdf0eSGiuseppe CAVALLARO  * stmmac_hw_setup - setup mac in a usable state.
2744523f11b5SSrinivas Kandagatla  *  @dev : pointer to the device structure.
2745d0ea5cbdSJesse Brandeburg  *  @init_ptp: initialize PTP if set
2746523f11b5SSrinivas Kandagatla  *  Description:
2747732fdf0eSGiuseppe CAVALLARO  *  this is the main function to setup the HW in a usable state because the
2748732fdf0eSGiuseppe CAVALLARO  *  dma engine is reset, the core registers are configured (e.g. AXI,
2749732fdf0eSGiuseppe CAVALLARO  *  Checksum features, timers). The DMA is ready to start receiving and
2750732fdf0eSGiuseppe CAVALLARO  *  transmitting.
2751523f11b5SSrinivas Kandagatla  *  Return value:
2752523f11b5SSrinivas Kandagatla  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2753523f11b5SSrinivas Kandagatla  *  file on failure.
2754523f11b5SSrinivas Kandagatla  */
2755fe131929SHuacai Chen static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
2756523f11b5SSrinivas Kandagatla {
2757523f11b5SSrinivas Kandagatla 	struct stmmac_priv *priv = netdev_priv(dev);
27583c55d4d0SJoao Pinto 	u32 rx_cnt = priv->plat->rx_queues_to_use;
2759146617b8SJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
2760146617b8SJoao Pinto 	u32 chan;
2761523f11b5SSrinivas Kandagatla 	int ret;
2762523f11b5SSrinivas Kandagatla 
2763523f11b5SSrinivas Kandagatla 	/* DMA initialization and SW reset */
2764523f11b5SSrinivas Kandagatla 	ret = stmmac_init_dma_engine(priv);
2765523f11b5SSrinivas Kandagatla 	if (ret < 0) {
276638ddc59dSLABBE Corentin 		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
276738ddc59dSLABBE Corentin 			   __func__);
2768523f11b5SSrinivas Kandagatla 		return ret;
2769523f11b5SSrinivas Kandagatla 	}
2770523f11b5SSrinivas Kandagatla 
2771523f11b5SSrinivas Kandagatla 	/* Copy the MAC addr into the HW  */
2772c10d4c82SJose Abreu 	stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
2773523f11b5SSrinivas Kandagatla 
277402e57b9dSGiuseppe CAVALLARO 	/* PS and related bits will be programmed according to the speed */
277502e57b9dSGiuseppe CAVALLARO 	if (priv->hw->pcs) {
277602e57b9dSGiuseppe CAVALLARO 		int speed = priv->plat->mac_port_sel_speed;
277702e57b9dSGiuseppe CAVALLARO 
277802e57b9dSGiuseppe CAVALLARO 		if ((speed == SPEED_10) || (speed == SPEED_100) ||
277902e57b9dSGiuseppe CAVALLARO 		    (speed == SPEED_1000)) {
278002e57b9dSGiuseppe CAVALLARO 			priv->hw->ps = speed;
278102e57b9dSGiuseppe CAVALLARO 		} else {
278202e57b9dSGiuseppe CAVALLARO 			dev_warn(priv->device, "invalid port speed\n");
278302e57b9dSGiuseppe CAVALLARO 			priv->hw->ps = 0;
278402e57b9dSGiuseppe CAVALLARO 		}
278502e57b9dSGiuseppe CAVALLARO 	}
278602e57b9dSGiuseppe CAVALLARO 
2787523f11b5SSrinivas Kandagatla 	/* Initialize the MAC Core */
2788c10d4c82SJose Abreu 	stmmac_core_init(priv, priv->hw, dev);
2789523f11b5SSrinivas Kandagatla 
2790d0a9c9f9SJoao Pinto 	/* Initialize MTL*/
2791d0a9c9f9SJoao Pinto 	stmmac_mtl_configuration(priv);
27929eb12474Sjpinto 
27938bf993a5SJose Abreu 	/* Initialize Safety Features */
27948bf993a5SJose Abreu 	stmmac_safety_feat_configuration(priv);
27958bf993a5SJose Abreu 
2796c10d4c82SJose Abreu 	ret = stmmac_rx_ipc(priv, priv->hw);
2797978aded4SGiuseppe CAVALLARO 	if (!ret) {
279838ddc59dSLABBE Corentin 		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
2799978aded4SGiuseppe CAVALLARO 		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
2800d2afb5bdSGiuseppe CAVALLARO 		priv->hw->rx_csum = 0;
2801978aded4SGiuseppe CAVALLARO 	}
2802978aded4SGiuseppe CAVALLARO 
2803523f11b5SSrinivas Kandagatla 	/* Enable the MAC Rx/Tx */
2804c10d4c82SJose Abreu 	stmmac_mac_set(priv, priv->ioaddr, true);
2805523f11b5SSrinivas Kandagatla 
2806b4f0a661SJoao Pinto 	/* Set the HW DMA mode and the COE */
2807b4f0a661SJoao Pinto 	stmmac_dma_operation_mode(priv);
2808b4f0a661SJoao Pinto 
2809523f11b5SSrinivas Kandagatla 	stmmac_mmc_setup(priv);
2810523f11b5SSrinivas Kandagatla 
2811fe131929SHuacai Chen 	if (init_ptp) {
28120ad2be79SThierry Reding 		ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
28130ad2be79SThierry Reding 		if (ret < 0)
28140ad2be79SThierry Reding 			netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
28150ad2be79SThierry Reding 
2816523f11b5SSrinivas Kandagatla 		ret = stmmac_init_ptp(priv);
2817722eef28SHeiner Kallweit 		if (ret == -EOPNOTSUPP)
2818722eef28SHeiner Kallweit 			netdev_warn(priv->dev, "PTP not supported by HW\n");
2819722eef28SHeiner Kallweit 		else if (ret)
2820722eef28SHeiner Kallweit 			netdev_warn(priv->dev, "PTP init failed\n");
2821fe131929SHuacai Chen 	}
2822523f11b5SSrinivas Kandagatla 
2823388e201dSVineetha G. Jaya Kumaran 	priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
2824388e201dSVineetha G. Jaya Kumaran 
2825388e201dSVineetha G. Jaya Kumaran 	/* Convert the timer from msec to usec */
2826388e201dSVineetha G. Jaya Kumaran 	if (!priv->tx_lpi_timer)
2827388e201dSVineetha G. Jaya Kumaran 		priv->tx_lpi_timer = eee_timer * 1000;
2828523f11b5SSrinivas Kandagatla 
2829a4e887faSJose Abreu 	if (priv->use_riwt) {
28304e4337ccSJose Abreu 		if (!priv->rx_riwt)
28314e4337ccSJose Abreu 			priv->rx_riwt = DEF_DMA_RIWT;
28324e4337ccSJose Abreu 
28334e4337ccSJose Abreu 		ret = stmmac_rx_watchdog(priv, priv->ioaddr, priv->rx_riwt, rx_cnt);
2834523f11b5SSrinivas Kandagatla 	}
2835523f11b5SSrinivas Kandagatla 
2836c10d4c82SJose Abreu 	if (priv->hw->pcs)
2837c9ad4c10SBen Dooks (Codethink) 		stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
2838523f11b5SSrinivas Kandagatla 
28394854ab99SJoao Pinto 	/* set TX and RX rings length */
28404854ab99SJoao Pinto 	stmmac_set_rings_length(priv);
28414854ab99SJoao Pinto 
2842f748be53SAlexandre TORGUE 	/* Enable TSO */
2843146617b8SJoao Pinto 	if (priv->tso) {
2844146617b8SJoao Pinto 		for (chan = 0; chan < tx_cnt; chan++)
2845a4e887faSJose Abreu 			stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
2846146617b8SJoao Pinto 	}
2847f748be53SAlexandre TORGUE 
284867afd6d1SJose Abreu 	/* Enable Split Header */
284967afd6d1SJose Abreu 	if (priv->sph && priv->hw->rx_csum) {
285067afd6d1SJose Abreu 		for (chan = 0; chan < rx_cnt; chan++)
285167afd6d1SJose Abreu 			stmmac_enable_sph(priv, priv->ioaddr, 1, chan);
285267afd6d1SJose Abreu 	}
285367afd6d1SJose Abreu 
285430d93227SJose Abreu 	/* VLAN Tag Insertion */
285530d93227SJose Abreu 	if (priv->dma_cap.vlins)
285630d93227SJose Abreu 		stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
285730d93227SJose Abreu 
2858579a25a8SJose Abreu 	/* TBS */
2859579a25a8SJose Abreu 	for (chan = 0; chan < tx_cnt; chan++) {
2860579a25a8SJose Abreu 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2861579a25a8SJose Abreu 		int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
2862579a25a8SJose Abreu 
2863579a25a8SJose Abreu 		stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
2864579a25a8SJose Abreu 	}
2865579a25a8SJose Abreu 
2866686cff3dSAashish Verma 	/* Configure real RX and TX queues */
2867686cff3dSAashish Verma 	netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
2868686cff3dSAashish Verma 	netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
2869686cff3dSAashish Verma 
28707d9e6c5aSJose Abreu 	/* Start the ball rolling... */
28717d9e6c5aSJose Abreu 	stmmac_start_all_dma(priv);
28727d9e6c5aSJose Abreu 
2873523f11b5SSrinivas Kandagatla 	return 0;
2874523f11b5SSrinivas Kandagatla }
2875523f11b5SSrinivas Kandagatla 
2876c66f6c37SThierry Reding static void stmmac_hw_teardown(struct net_device *dev)
2877c66f6c37SThierry Reding {
2878c66f6c37SThierry Reding 	struct stmmac_priv *priv = netdev_priv(dev);
2879c66f6c37SThierry Reding 
2880c66f6c37SThierry Reding 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
2881c66f6c37SThierry Reding }
2882c66f6c37SThierry Reding 
2883523f11b5SSrinivas Kandagatla /**
28847ac6653aSJeff Kirsher  *  stmmac_open - open entry point of the driver
28857ac6653aSJeff Kirsher  *  @dev : pointer to the device structure.
28867ac6653aSJeff Kirsher  *  Description:
28877ac6653aSJeff Kirsher  *  This function is the open entry point of the driver.
28887ac6653aSJeff Kirsher  *  Return value:
28897ac6653aSJeff Kirsher  *  0 on success and an appropriate (-)ve integer as defined in errno.h
28907ac6653aSJeff Kirsher  *  file on failure.
28917ac6653aSJeff Kirsher  */
28927ac6653aSJeff Kirsher static int stmmac_open(struct net_device *dev)
28937ac6653aSJeff Kirsher {
28947ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
28955d626c87SJose Abreu 	int bfsize = 0;
28968fce3331SJose Abreu 	u32 chan;
28977ac6653aSJeff Kirsher 	int ret;
28987ac6653aSJeff Kirsher 
2899a47b9e15SDejin Zheng 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
2900f213bbe8SJose Abreu 	    priv->hw->pcs != STMMAC_PCS_RTBI &&
2901*c62808e8SOng Boon Leong 	    priv->hw->xpcs_args.an_mode != DW_AN_C73) {
29027ac6653aSJeff Kirsher 		ret = stmmac_init_phy(dev);
2903e58bb43fSGiuseppe CAVALLARO 		if (ret) {
290438ddc59dSLABBE Corentin 			netdev_err(priv->dev,
290538ddc59dSLABBE Corentin 				   "%s: Cannot attach to PHY (error: %d)\n",
2906e58bb43fSGiuseppe CAVALLARO 				   __func__, ret);
290789df20d9SHans de Goede 			return ret;
29087ac6653aSJeff Kirsher 		}
2909e58bb43fSGiuseppe CAVALLARO 	}
29107ac6653aSJeff Kirsher 
2911523f11b5SSrinivas Kandagatla 	/* Extra statistics */
2912523f11b5SSrinivas Kandagatla 	memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
2913523f11b5SSrinivas Kandagatla 	priv->xstats.threshold = tc;
2914523f11b5SSrinivas Kandagatla 
29155d626c87SJose Abreu 	bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
29165d626c87SJose Abreu 	if (bfsize < 0)
29175d626c87SJose Abreu 		bfsize = 0;
29185d626c87SJose Abreu 
29195d626c87SJose Abreu 	if (bfsize < BUF_SIZE_16KiB)
29205d626c87SJose Abreu 		bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
29215d626c87SJose Abreu 
29225d626c87SJose Abreu 	priv->dma_buf_sz = bfsize;
29235d626c87SJose Abreu 	buf_sz = bfsize;
29245d626c87SJose Abreu 
292522ad3838SGiuseppe Cavallaro 	priv->rx_copybreak = STMMAC_RX_COPYBREAK;
292656329137SBartlomiej Zolnierkiewicz 
2927aa042f60SSong, Yoong Siang 	if (!priv->dma_tx_size)
2928aa042f60SSong, Yoong Siang 		priv->dma_tx_size = DMA_DEFAULT_TX_SIZE;
2929aa042f60SSong, Yoong Siang 	if (!priv->dma_rx_size)
2930aa042f60SSong, Yoong Siang 		priv->dma_rx_size = DMA_DEFAULT_RX_SIZE;
2931aa042f60SSong, Yoong Siang 
2932579a25a8SJose Abreu 	/* Earlier check for TBS */
2933579a25a8SJose Abreu 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
2934579a25a8SJose Abreu 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2935579a25a8SJose Abreu 		int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
2936579a25a8SJose Abreu 
2937579a25a8SJose Abreu 		tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
2938579a25a8SJose Abreu 		if (stmmac_enable_tbs(priv, priv->ioaddr, tbs_en, chan))
2939579a25a8SJose Abreu 			tx_q->tbs &= ~STMMAC_TBS_AVAIL;
2940579a25a8SJose Abreu 	}
2941579a25a8SJose Abreu 
29425bacd778SLABBE Corentin 	ret = alloc_dma_desc_resources(priv);
29435bacd778SLABBE Corentin 	if (ret < 0) {
29445bacd778SLABBE Corentin 		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
29455bacd778SLABBE Corentin 			   __func__);
29465bacd778SLABBE Corentin 		goto dma_desc_error;
29475bacd778SLABBE Corentin 	}
29485bacd778SLABBE Corentin 
29495bacd778SLABBE Corentin 	ret = init_dma_desc_rings(dev, GFP_KERNEL);
29505bacd778SLABBE Corentin 	if (ret < 0) {
29515bacd778SLABBE Corentin 		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
29525bacd778SLABBE Corentin 			   __func__);
29535bacd778SLABBE Corentin 		goto init_error;
29545bacd778SLABBE Corentin 	}
29555bacd778SLABBE Corentin 
2956fe131929SHuacai Chen 	ret = stmmac_hw_setup(dev, true);
295756329137SBartlomiej Zolnierkiewicz 	if (ret < 0) {
295838ddc59dSLABBE Corentin 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
2959c9324d18SGiuseppe CAVALLARO 		goto init_error;
29607ac6653aSJeff Kirsher 	}
29617ac6653aSJeff Kirsher 
2962d429b66eSJose Abreu 	stmmac_init_coalesce(priv);
2963777da230SGiuseppe CAVALLARO 
296474371272SJose Abreu 	phylink_start(priv->phylink);
296577b28983SJisheng Zhang 	/* We may have called phylink_speed_down before */
296677b28983SJisheng Zhang 	phylink_speed_up(priv->phylink);
29677ac6653aSJeff Kirsher 
29687ac6653aSJeff Kirsher 	/* Request the IRQ lines */
29697ac6653aSJeff Kirsher 	ret = request_irq(dev->irq, stmmac_interrupt,
29707ac6653aSJeff Kirsher 			  IRQF_SHARED, dev->name, dev);
29717ac6653aSJeff Kirsher 	if (unlikely(ret < 0)) {
297238ddc59dSLABBE Corentin 		netdev_err(priv->dev,
297338ddc59dSLABBE Corentin 			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
29747ac6653aSJeff Kirsher 			   __func__, dev->irq, ret);
29756c1e5abeSThierry Reding 		goto irq_error;
29767ac6653aSJeff Kirsher 	}
29777ac6653aSJeff Kirsher 
29787a13f8f5SFrancesco Virlinzi 	/* Request the Wake IRQ in case of another line is used for WoL */
29797a13f8f5SFrancesco Virlinzi 	if (priv->wol_irq != dev->irq) {
29807a13f8f5SFrancesco Virlinzi 		ret = request_irq(priv->wol_irq, stmmac_interrupt,
29817a13f8f5SFrancesco Virlinzi 				  IRQF_SHARED, dev->name, dev);
29827a13f8f5SFrancesco Virlinzi 		if (unlikely(ret < 0)) {
298338ddc59dSLABBE Corentin 			netdev_err(priv->dev,
298438ddc59dSLABBE Corentin 				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
2985ceb69499SGiuseppe CAVALLARO 				   __func__, priv->wol_irq, ret);
2986c9324d18SGiuseppe CAVALLARO 			goto wolirq_error;
29877a13f8f5SFrancesco Virlinzi 		}
29887a13f8f5SFrancesco Virlinzi 	}
29897a13f8f5SFrancesco Virlinzi 
2990d765955dSGiuseppe CAVALLARO 	/* Request the IRQ lines */
2991d7ec8584SChen-Yu Tsai 	if (priv->lpi_irq > 0) {
2992d765955dSGiuseppe CAVALLARO 		ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
2993d765955dSGiuseppe CAVALLARO 				  dev->name, dev);
2994d765955dSGiuseppe CAVALLARO 		if (unlikely(ret < 0)) {
299538ddc59dSLABBE Corentin 			netdev_err(priv->dev,
299638ddc59dSLABBE Corentin 				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
2997d765955dSGiuseppe CAVALLARO 				   __func__, priv->lpi_irq, ret);
2998c9324d18SGiuseppe CAVALLARO 			goto lpiirq_error;
2999d765955dSGiuseppe CAVALLARO 		}
3000d765955dSGiuseppe CAVALLARO 	}
3001d765955dSGiuseppe CAVALLARO 
3002c22a3f48SJoao Pinto 	stmmac_enable_all_queues(priv);
30039f19306dSOng Boon Leong 	netif_tx_start_all_queues(priv->dev);
30047ac6653aSJeff Kirsher 
30057ac6653aSJeff Kirsher 	return 0;
30067ac6653aSJeff Kirsher 
3007c9324d18SGiuseppe CAVALLARO lpiirq_error:
3008d765955dSGiuseppe CAVALLARO 	if (priv->wol_irq != dev->irq)
3009d765955dSGiuseppe CAVALLARO 		free_irq(priv->wol_irq, dev);
3010c9324d18SGiuseppe CAVALLARO wolirq_error:
30117a13f8f5SFrancesco Virlinzi 	free_irq(dev->irq, dev);
30126c1e5abeSThierry Reding irq_error:
301374371272SJose Abreu 	phylink_stop(priv->phylink);
30147a13f8f5SFrancesco Virlinzi 
30158fce3331SJose Abreu 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3016d5a05e69SVincent Whitchurch 		hrtimer_cancel(&priv->tx_queue[chan].txtimer);
30178fce3331SJose Abreu 
3018c66f6c37SThierry Reding 	stmmac_hw_teardown(dev);
3019c9324d18SGiuseppe CAVALLARO init_error:
3020c9324d18SGiuseppe CAVALLARO 	free_dma_desc_resources(priv);
30215bacd778SLABBE Corentin dma_desc_error:
302274371272SJose Abreu 	phylink_disconnect_phy(priv->phylink);
30237ac6653aSJeff Kirsher 	return ret;
30247ac6653aSJeff Kirsher }
30257ac6653aSJeff Kirsher 
30267ac6653aSJeff Kirsher /**
30277ac6653aSJeff Kirsher  *  stmmac_release - close entry point of the driver
30287ac6653aSJeff Kirsher  *  @dev : device pointer.
30297ac6653aSJeff Kirsher  *  Description:
30307ac6653aSJeff Kirsher  *  This is the stop entry point of the driver.
30317ac6653aSJeff Kirsher  */
30327ac6653aSJeff Kirsher static int stmmac_release(struct net_device *dev)
30337ac6653aSJeff Kirsher {
30347ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
30358fce3331SJose Abreu 	u32 chan;
30367ac6653aSJeff Kirsher 
303777b28983SJisheng Zhang 	if (device_may_wakeup(priv->device))
303877b28983SJisheng Zhang 		phylink_speed_down(priv->phylink, false);
30397ac6653aSJeff Kirsher 	/* Stop and disconnect the PHY */
304074371272SJose Abreu 	phylink_stop(priv->phylink);
304174371272SJose Abreu 	phylink_disconnect_phy(priv->phylink);
30427ac6653aSJeff Kirsher 
3043c22a3f48SJoao Pinto 	stmmac_disable_all_queues(priv);
30447ac6653aSJeff Kirsher 
30458fce3331SJose Abreu 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3046d5a05e69SVincent Whitchurch 		hrtimer_cancel(&priv->tx_queue[chan].txtimer);
30479125cdd1SGiuseppe CAVALLARO 
30487ac6653aSJeff Kirsher 	/* Free the IRQ lines */
30497ac6653aSJeff Kirsher 	free_irq(dev->irq, dev);
30507a13f8f5SFrancesco Virlinzi 	if (priv->wol_irq != dev->irq)
30517a13f8f5SFrancesco Virlinzi 		free_irq(priv->wol_irq, dev);
3052d7ec8584SChen-Yu Tsai 	if (priv->lpi_irq > 0)
3053d765955dSGiuseppe CAVALLARO 		free_irq(priv->lpi_irq, dev);
30547ac6653aSJeff Kirsher 
30555f585913SFugang Duan 	if (priv->eee_enabled) {
30565f585913SFugang Duan 		priv->tx_path_in_lpi_mode = false;
30575f585913SFugang Duan 		del_timer_sync(&priv->eee_ctrl_timer);
30585f585913SFugang Duan 	}
30595f585913SFugang Duan 
30607ac6653aSJeff Kirsher 	/* Stop TX/RX DMA and clear the descriptors */
3061ae4f0d46SJoao Pinto 	stmmac_stop_all_dma(priv);
30627ac6653aSJeff Kirsher 
30637ac6653aSJeff Kirsher 	/* Release and free the Rx/Tx resources */
30647ac6653aSJeff Kirsher 	free_dma_desc_resources(priv);
30657ac6653aSJeff Kirsher 
30667ac6653aSJeff Kirsher 	/* Disable the MAC Rx/Tx */
3067c10d4c82SJose Abreu 	stmmac_mac_set(priv, priv->ioaddr, false);
30687ac6653aSJeff Kirsher 
30697ac6653aSJeff Kirsher 	netif_carrier_off(dev);
30707ac6653aSJeff Kirsher 
307192ba6888SRayagond Kokatanur 	stmmac_release_ptp(priv);
307292ba6888SRayagond Kokatanur 
30737ac6653aSJeff Kirsher 	return 0;
30747ac6653aSJeff Kirsher }
30757ac6653aSJeff Kirsher 
307630d93227SJose Abreu static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
307730d93227SJose Abreu 			       struct stmmac_tx_queue *tx_q)
307830d93227SJose Abreu {
307930d93227SJose Abreu 	u16 tag = 0x0, inner_tag = 0x0;
308030d93227SJose Abreu 	u32 inner_type = 0x0;
308130d93227SJose Abreu 	struct dma_desc *p;
308230d93227SJose Abreu 
308330d93227SJose Abreu 	if (!priv->dma_cap.vlins)
308430d93227SJose Abreu 		return false;
308530d93227SJose Abreu 	if (!skb_vlan_tag_present(skb))
308630d93227SJose Abreu 		return false;
308730d93227SJose Abreu 	if (skb->vlan_proto == htons(ETH_P_8021AD)) {
308830d93227SJose Abreu 		inner_tag = skb_vlan_tag_get(skb);
308930d93227SJose Abreu 		inner_type = STMMAC_VLAN_INSERT;
309030d93227SJose Abreu 	}
309130d93227SJose Abreu 
309230d93227SJose Abreu 	tag = skb_vlan_tag_get(skb);
309330d93227SJose Abreu 
3094579a25a8SJose Abreu 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
3095579a25a8SJose Abreu 		p = &tx_q->dma_entx[tx_q->cur_tx].basic;
3096579a25a8SJose Abreu 	else
3097579a25a8SJose Abreu 		p = &tx_q->dma_tx[tx_q->cur_tx];
3098579a25a8SJose Abreu 
309930d93227SJose Abreu 	if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
310030d93227SJose Abreu 		return false;
310130d93227SJose Abreu 
310230d93227SJose Abreu 	stmmac_set_tx_owner(priv, p);
3103aa042f60SSong, Yoong Siang 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
310430d93227SJose Abreu 	return true;
310530d93227SJose Abreu }
310630d93227SJose Abreu 
31077ac6653aSJeff Kirsher /**
3108f748be53SAlexandre TORGUE  *  stmmac_tso_allocator - close entry point of the driver
3109f748be53SAlexandre TORGUE  *  @priv: driver private structure
3110f748be53SAlexandre TORGUE  *  @des: buffer start address
3111f748be53SAlexandre TORGUE  *  @total_len: total length to fill in descriptors
3112d0ea5cbdSJesse Brandeburg  *  @last_segment: condition for the last descriptor
3113ce736788SJoao Pinto  *  @queue: TX queue index
3114f748be53SAlexandre TORGUE  *  Description:
3115f748be53SAlexandre TORGUE  *  This function fills descriptor and request new descriptors according to
3116f748be53SAlexandre TORGUE  *  buffer length to fill
3117f748be53SAlexandre TORGUE  */
3118a993db88SJose Abreu static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
3119ce736788SJoao Pinto 				 int total_len, bool last_segment, u32 queue)
3120f748be53SAlexandre TORGUE {
3121ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3122f748be53SAlexandre TORGUE 	struct dma_desc *desc;
31235bacd778SLABBE Corentin 	u32 buff_size;
3124ce736788SJoao Pinto 	int tmp_len;
3125f748be53SAlexandre TORGUE 
3126f748be53SAlexandre TORGUE 	tmp_len = total_len;
3127f748be53SAlexandre TORGUE 
3128f748be53SAlexandre TORGUE 	while (tmp_len > 0) {
3129a993db88SJose Abreu 		dma_addr_t curr_addr;
3130a993db88SJose Abreu 
3131aa042f60SSong, Yoong Siang 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
3132aa042f60SSong, Yoong Siang 						priv->dma_tx_size);
3133b4c9784cSNiklas Cassel 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
3134579a25a8SJose Abreu 
3135579a25a8SJose Abreu 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
3136579a25a8SJose Abreu 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
3137579a25a8SJose Abreu 		else
3138579a25a8SJose Abreu 			desc = &tx_q->dma_tx[tx_q->cur_tx];
3139f748be53SAlexandre TORGUE 
3140a993db88SJose Abreu 		curr_addr = des + (total_len - tmp_len);
3141a993db88SJose Abreu 		if (priv->dma_cap.addr64 <= 32)
3142a993db88SJose Abreu 			desc->des0 = cpu_to_le32(curr_addr);
3143a993db88SJose Abreu 		else
3144a993db88SJose Abreu 			stmmac_set_desc_addr(priv, desc, curr_addr);
3145a993db88SJose Abreu 
3146f748be53SAlexandre TORGUE 		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
3147f748be53SAlexandre TORGUE 			    TSO_MAX_BUFF_SIZE : tmp_len;
3148f748be53SAlexandre TORGUE 
314942de047dSJose Abreu 		stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
3150f748be53SAlexandre TORGUE 				0, 1,
3151426849e6SNiklas Cassel 				(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
3152f748be53SAlexandre TORGUE 				0, 0);
3153f748be53SAlexandre TORGUE 
3154f748be53SAlexandre TORGUE 		tmp_len -= TSO_MAX_BUFF_SIZE;
3155f748be53SAlexandre TORGUE 	}
3156f748be53SAlexandre TORGUE }
3157f748be53SAlexandre TORGUE 
3158f748be53SAlexandre TORGUE /**
3159f748be53SAlexandre TORGUE  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
3160f748be53SAlexandre TORGUE  *  @skb : the socket buffer
3161f748be53SAlexandre TORGUE  *  @dev : device pointer
3162f748be53SAlexandre TORGUE  *  Description: this is the transmit function that is called on TSO frames
3163f748be53SAlexandre TORGUE  *  (support available on GMAC4 and newer chips).
3164f748be53SAlexandre TORGUE  *  Diagram below show the ring programming in case of TSO frames:
3165f748be53SAlexandre TORGUE  *
3166f748be53SAlexandre TORGUE  *  First Descriptor
3167f748be53SAlexandre TORGUE  *   --------
3168f748be53SAlexandre TORGUE  *   | DES0 |---> buffer1 = L2/L3/L4 header
3169f748be53SAlexandre TORGUE  *   | DES1 |---> TCP Payload (can continue on next descr...)
3170f748be53SAlexandre TORGUE  *   | DES2 |---> buffer 1 and 2 len
3171f748be53SAlexandre TORGUE  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
3172f748be53SAlexandre TORGUE  *   --------
3173f748be53SAlexandre TORGUE  *	|
3174f748be53SAlexandre TORGUE  *     ...
3175f748be53SAlexandre TORGUE  *	|
3176f748be53SAlexandre TORGUE  *   --------
3177f748be53SAlexandre TORGUE  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
3178f748be53SAlexandre TORGUE  *   | DES1 | --|
3179f748be53SAlexandre TORGUE  *   | DES2 | --> buffer 1 and 2 len
3180f748be53SAlexandre TORGUE  *   | DES3 |
3181f748be53SAlexandre TORGUE  *   --------
3182f748be53SAlexandre TORGUE  *
3183f748be53SAlexandre TORGUE  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
3184f748be53SAlexandre TORGUE  */
3185f748be53SAlexandre TORGUE static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
3186f748be53SAlexandre TORGUE {
3187ce736788SJoao Pinto 	struct dma_desc *desc, *first, *mss_desc = NULL;
3188f748be53SAlexandre TORGUE 	struct stmmac_priv *priv = netdev_priv(dev);
3189579a25a8SJose Abreu 	int desc_size, tmp_pay_len = 0, first_tx;
3190f748be53SAlexandre TORGUE 	int nfrags = skb_shinfo(skb)->nr_frags;
3191ce736788SJoao Pinto 	u32 queue = skb_get_queue_mapping(skb);
3192c2837423SJose Abreu 	unsigned int first_entry, tx_packets;
3193ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q;
3194c2837423SJose Abreu 	bool has_vlan, set_ic;
3195579a25a8SJose Abreu 	u8 proto_hdr_len, hdr;
3196ce736788SJoao Pinto 	u32 pay_len, mss;
3197a993db88SJose Abreu 	dma_addr_t des;
3198f748be53SAlexandre TORGUE 	int i;
3199f748be53SAlexandre TORGUE 
3200ce736788SJoao Pinto 	tx_q = &priv->tx_queue[queue];
3201c2837423SJose Abreu 	first_tx = tx_q->cur_tx;
3202ce736788SJoao Pinto 
3203f748be53SAlexandre TORGUE 	/* Compute header lengths */
3204b7766206SJose Abreu 	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
3205b7766206SJose Abreu 		proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
3206b7766206SJose Abreu 		hdr = sizeof(struct udphdr);
3207b7766206SJose Abreu 	} else {
3208f748be53SAlexandre TORGUE 		proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
3209b7766206SJose Abreu 		hdr = tcp_hdrlen(skb);
3210b7766206SJose Abreu 	}
3211f748be53SAlexandre TORGUE 
3212f748be53SAlexandre TORGUE 	/* Desc availability based on threshold should be enough safe */
3213ce736788SJoao Pinto 	if (unlikely(stmmac_tx_avail(priv, queue) <
3214f748be53SAlexandre TORGUE 		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
3215c22a3f48SJoao Pinto 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
3216c22a3f48SJoao Pinto 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
3217c22a3f48SJoao Pinto 								queue));
3218f748be53SAlexandre TORGUE 			/* This is a hard error, log it. */
321938ddc59dSLABBE Corentin 			netdev_err(priv->dev,
322038ddc59dSLABBE Corentin 				   "%s: Tx Ring full when queue awake\n",
322138ddc59dSLABBE Corentin 				   __func__);
3222f748be53SAlexandre TORGUE 		}
3223f748be53SAlexandre TORGUE 		return NETDEV_TX_BUSY;
3224f748be53SAlexandre TORGUE 	}
3225f748be53SAlexandre TORGUE 
3226f748be53SAlexandre TORGUE 	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
3227f748be53SAlexandre TORGUE 
3228f748be53SAlexandre TORGUE 	mss = skb_shinfo(skb)->gso_size;
3229f748be53SAlexandre TORGUE 
3230f748be53SAlexandre TORGUE 	/* set new MSS value if needed */
32318d212a9eSNiklas Cassel 	if (mss != tx_q->mss) {
3232579a25a8SJose Abreu 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
3233579a25a8SJose Abreu 			mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
3234579a25a8SJose Abreu 		else
3235579a25a8SJose Abreu 			mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
3236579a25a8SJose Abreu 
323742de047dSJose Abreu 		stmmac_set_mss(priv, mss_desc, mss);
32388d212a9eSNiklas Cassel 		tx_q->mss = mss;
3239aa042f60SSong, Yoong Siang 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
3240aa042f60SSong, Yoong Siang 						priv->dma_tx_size);
3241b4c9784cSNiklas Cassel 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
3242f748be53SAlexandre TORGUE 	}
3243f748be53SAlexandre TORGUE 
3244f748be53SAlexandre TORGUE 	if (netif_msg_tx_queued(priv)) {
3245b7766206SJose Abreu 		pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
3246b7766206SJose Abreu 			__func__, hdr, proto_hdr_len, pay_len, mss);
3247f748be53SAlexandre TORGUE 		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
3248f748be53SAlexandre TORGUE 			skb->data_len);
3249f748be53SAlexandre TORGUE 	}
3250f748be53SAlexandre TORGUE 
325130d93227SJose Abreu 	/* Check if VLAN can be inserted by HW */
325230d93227SJose Abreu 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
325330d93227SJose Abreu 
3254ce736788SJoao Pinto 	first_entry = tx_q->cur_tx;
3255b4c9784cSNiklas Cassel 	WARN_ON(tx_q->tx_skbuff[first_entry]);
3256f748be53SAlexandre TORGUE 
3257579a25a8SJose Abreu 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
3258579a25a8SJose Abreu 		desc = &tx_q->dma_entx[first_entry].basic;
3259579a25a8SJose Abreu 	else
3260579a25a8SJose Abreu 		desc = &tx_q->dma_tx[first_entry];
3261f748be53SAlexandre TORGUE 	first = desc;
3262f748be53SAlexandre TORGUE 
326330d93227SJose Abreu 	if (has_vlan)
326430d93227SJose Abreu 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
326530d93227SJose Abreu 
3266f748be53SAlexandre TORGUE 	/* first descriptor: fill Headers on Buf1 */
3267f748be53SAlexandre TORGUE 	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
3268f748be53SAlexandre TORGUE 			     DMA_TO_DEVICE);
3269f748be53SAlexandre TORGUE 	if (dma_mapping_error(priv->device, des))
3270f748be53SAlexandre TORGUE 		goto dma_map_err;
3271f748be53SAlexandre TORGUE 
3272ce736788SJoao Pinto 	tx_q->tx_skbuff_dma[first_entry].buf = des;
3273ce736788SJoao Pinto 	tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
3274f748be53SAlexandre TORGUE 
3275a993db88SJose Abreu 	if (priv->dma_cap.addr64 <= 32) {
3276f8be0d78SMichael Weiser 		first->des0 = cpu_to_le32(des);
3277f748be53SAlexandre TORGUE 
3278f748be53SAlexandre TORGUE 		/* Fill start of payload in buff2 of first descriptor */
3279f748be53SAlexandre TORGUE 		if (pay_len)
3280f8be0d78SMichael Weiser 			first->des1 = cpu_to_le32(des + proto_hdr_len);
3281f748be53SAlexandre TORGUE 
3282f748be53SAlexandre TORGUE 		/* If needed take extra descriptors to fill the remaining payload */
3283f748be53SAlexandre TORGUE 		tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
3284a993db88SJose Abreu 	} else {
3285a993db88SJose Abreu 		stmmac_set_desc_addr(priv, first, des);
3286a993db88SJose Abreu 		tmp_pay_len = pay_len;
328734c15202Syuqi jin 		des += proto_hdr_len;
3288b2f07199SJose Abreu 		pay_len = 0;
3289a993db88SJose Abreu 	}
3290f748be53SAlexandre TORGUE 
3291ce736788SJoao Pinto 	stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
3292f748be53SAlexandre TORGUE 
3293f748be53SAlexandre TORGUE 	/* Prepare fragments */
3294f748be53SAlexandre TORGUE 	for (i = 0; i < nfrags; i++) {
3295f748be53SAlexandre TORGUE 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3296f748be53SAlexandre TORGUE 
3297f748be53SAlexandre TORGUE 		des = skb_frag_dma_map(priv->device, frag, 0,
3298f748be53SAlexandre TORGUE 				       skb_frag_size(frag),
3299f748be53SAlexandre TORGUE 				       DMA_TO_DEVICE);
3300937071c1SThierry Reding 		if (dma_mapping_error(priv->device, des))
3301937071c1SThierry Reding 			goto dma_map_err;
3302f748be53SAlexandre TORGUE 
3303f748be53SAlexandre TORGUE 		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
3304ce736788SJoao Pinto 				     (i == nfrags - 1), queue);
3305f748be53SAlexandre TORGUE 
3306ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
3307ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
3308ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
3309f748be53SAlexandre TORGUE 	}
3310f748be53SAlexandre TORGUE 
3311ce736788SJoao Pinto 	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
3312f748be53SAlexandre TORGUE 
331305cf0d1bSNiklas Cassel 	/* Only the last descriptor gets to point to the skb. */
331405cf0d1bSNiklas Cassel 	tx_q->tx_skbuff[tx_q->cur_tx] = skb;
331505cf0d1bSNiklas Cassel 
33167df4a3a7SJose Abreu 	/* Manage tx mitigation */
3317c2837423SJose Abreu 	tx_packets = (tx_q->cur_tx + 1) - first_tx;
3318c2837423SJose Abreu 	tx_q->tx_count_frames += tx_packets;
3319c2837423SJose Abreu 
3320c2837423SJose Abreu 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
3321c2837423SJose Abreu 		set_ic = true;
3322c2837423SJose Abreu 	else if (!priv->tx_coal_frames)
3323c2837423SJose Abreu 		set_ic = false;
3324c2837423SJose Abreu 	else if (tx_packets > priv->tx_coal_frames)
3325c2837423SJose Abreu 		set_ic = true;
3326c2837423SJose Abreu 	else if ((tx_q->tx_count_frames % priv->tx_coal_frames) < tx_packets)
3327c2837423SJose Abreu 		set_ic = true;
3328c2837423SJose Abreu 	else
3329c2837423SJose Abreu 		set_ic = false;
3330c2837423SJose Abreu 
3331c2837423SJose Abreu 	if (set_ic) {
3332579a25a8SJose Abreu 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
3333579a25a8SJose Abreu 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
3334579a25a8SJose Abreu 		else
33357df4a3a7SJose Abreu 			desc = &tx_q->dma_tx[tx_q->cur_tx];
3336579a25a8SJose Abreu 
33377df4a3a7SJose Abreu 		tx_q->tx_count_frames = 0;
33387df4a3a7SJose Abreu 		stmmac_set_tx_ic(priv, desc);
33397df4a3a7SJose Abreu 		priv->xstats.tx_set_ic_bit++;
33407df4a3a7SJose Abreu 	}
33417df4a3a7SJose Abreu 
334205cf0d1bSNiklas Cassel 	/* We've used all descriptors we need for this skb, however,
334305cf0d1bSNiklas Cassel 	 * advance cur_tx so that it references a fresh descriptor.
334405cf0d1bSNiklas Cassel 	 * ndo_start_xmit will fill this descriptor the next time it's
334505cf0d1bSNiklas Cassel 	 * called and stmmac_tx_clean may clean up to this descriptor.
334605cf0d1bSNiklas Cassel 	 */
3347aa042f60SSong, Yoong Siang 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
3348f748be53SAlexandre TORGUE 
3349ce736788SJoao Pinto 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3350b3e51069SLABBE Corentin 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
335138ddc59dSLABBE Corentin 			  __func__);
3352c22a3f48SJoao Pinto 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
3353f748be53SAlexandre TORGUE 	}
3354f748be53SAlexandre TORGUE 
3355f748be53SAlexandre TORGUE 	dev->stats.tx_bytes += skb->len;
3356f748be53SAlexandre TORGUE 	priv->xstats.tx_tso_frames++;
3357f748be53SAlexandre TORGUE 	priv->xstats.tx_tso_nfrags += nfrags;
3358f748be53SAlexandre TORGUE 
33598000ddc0SJose Abreu 	if (priv->sarc_type)
33608000ddc0SJose Abreu 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
33618000ddc0SJose Abreu 
3362f748be53SAlexandre TORGUE 	skb_tx_timestamp(skb);
3363f748be53SAlexandre TORGUE 
3364f748be53SAlexandre TORGUE 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3365f748be53SAlexandre TORGUE 		     priv->hwts_tx_en)) {
3366f748be53SAlexandre TORGUE 		/* declare that device is doing timestamping */
3367f748be53SAlexandre TORGUE 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
336842de047dSJose Abreu 		stmmac_enable_tx_timestamp(priv, first);
3369f748be53SAlexandre TORGUE 	}
3370f748be53SAlexandre TORGUE 
3371f748be53SAlexandre TORGUE 	/* Complete the first descriptor before granting the DMA */
337242de047dSJose Abreu 	stmmac_prepare_tso_tx_desc(priv, first, 1,
3373f748be53SAlexandre TORGUE 			proto_hdr_len,
3374f748be53SAlexandre TORGUE 			pay_len,
3375ce736788SJoao Pinto 			1, tx_q->tx_skbuff_dma[first_entry].last_segment,
3376b7766206SJose Abreu 			hdr / 4, (skb->len - proto_hdr_len));
3377f748be53SAlexandre TORGUE 
3378f748be53SAlexandre TORGUE 	/* If context desc is used to change MSS */
337915d2ee42SNiklas Cassel 	if (mss_desc) {
338015d2ee42SNiklas Cassel 		/* Make sure that first descriptor has been completely
338115d2ee42SNiklas Cassel 		 * written, including its own bit. This is because MSS is
338215d2ee42SNiklas Cassel 		 * actually before first descriptor, so we need to make
338315d2ee42SNiklas Cassel 		 * sure that MSS's own bit is the last thing written.
338415d2ee42SNiklas Cassel 		 */
338515d2ee42SNiklas Cassel 		dma_wmb();
338642de047dSJose Abreu 		stmmac_set_tx_owner(priv, mss_desc);
338715d2ee42SNiklas Cassel 	}
3388f748be53SAlexandre TORGUE 
3389f748be53SAlexandre TORGUE 	/* The own bit must be the latest setting done when prepare the
3390f748be53SAlexandre TORGUE 	 * descriptor and then barrier is needed to make sure that
3391f748be53SAlexandre TORGUE 	 * all is coherent before granting the DMA engine.
3392f748be53SAlexandre TORGUE 	 */
339395eb930aSNiklas Cassel 	wmb();
3394f748be53SAlexandre TORGUE 
3395f748be53SAlexandre TORGUE 	if (netif_msg_pktdata(priv)) {
3396f748be53SAlexandre TORGUE 		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
3397ce736788SJoao Pinto 			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
3398ce736788SJoao Pinto 			tx_q->cur_tx, first, nfrags);
3399f748be53SAlexandre TORGUE 		pr_info(">>> frame to be transmitted: ");
3400f748be53SAlexandre TORGUE 		print_pkt(skb->data, skb_headlen(skb));
3401f748be53SAlexandre TORGUE 	}
3402f748be53SAlexandre TORGUE 
3403c22a3f48SJoao Pinto 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3404f748be53SAlexandre TORGUE 
3405579a25a8SJose Abreu 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
3406579a25a8SJose Abreu 		desc_size = sizeof(struct dma_edesc);
3407579a25a8SJose Abreu 	else
3408579a25a8SJose Abreu 		desc_size = sizeof(struct dma_desc);
3409579a25a8SJose Abreu 
3410579a25a8SJose Abreu 	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
3411a4e887faSJose Abreu 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
34124772f26dSJose Abreu 	stmmac_tx_timer_arm(priv, queue);
3413f748be53SAlexandre TORGUE 
3414f748be53SAlexandre TORGUE 	return NETDEV_TX_OK;
3415f748be53SAlexandre TORGUE 
3416f748be53SAlexandre TORGUE dma_map_err:
3417f748be53SAlexandre TORGUE 	dev_err(priv->device, "Tx dma map failed\n");
3418f748be53SAlexandre TORGUE 	dev_kfree_skb(skb);
3419f748be53SAlexandre TORGUE 	priv->dev->stats.tx_dropped++;
3420f748be53SAlexandre TORGUE 	return NETDEV_TX_OK;
3421f748be53SAlexandre TORGUE }
3422f748be53SAlexandre TORGUE 
3423f748be53SAlexandre TORGUE /**
3424732fdf0eSGiuseppe CAVALLARO  *  stmmac_xmit - Tx entry point of the driver
34257ac6653aSJeff Kirsher  *  @skb : the socket buffer
34267ac6653aSJeff Kirsher  *  @dev : device pointer
342732ceabcaSGiuseppe CAVALLARO  *  Description : this is the tx entry point of the driver.
342832ceabcaSGiuseppe CAVALLARO  *  It programs the chain or the ring and supports oversized frames
342932ceabcaSGiuseppe CAVALLARO  *  and SG feature.
34307ac6653aSJeff Kirsher  */
34317ac6653aSJeff Kirsher static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
34327ac6653aSJeff Kirsher {
3433c2837423SJose Abreu 	unsigned int first_entry, tx_packets, enh_desc;
34347ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
34350e80bdc9SGiuseppe Cavallaro 	unsigned int nopaged_len = skb_headlen(skb);
34364a7d666aSGiuseppe CAVALLARO 	int i, csum_insertion = 0, is_jumbo = 0;
3437ce736788SJoao Pinto 	u32 queue = skb_get_queue_mapping(skb);
34387ac6653aSJeff Kirsher 	int nfrags = skb_shinfo(skb)->nr_frags;
3439b7766206SJose Abreu 	int gso = skb_shinfo(skb)->gso_type;
3440579a25a8SJose Abreu 	struct dma_edesc *tbs_desc = NULL;
3441579a25a8SJose Abreu 	int entry, desc_size, first_tx;
34427ac6653aSJeff Kirsher 	struct dma_desc *desc, *first;
3443ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q;
3444c2837423SJose Abreu 	bool has_vlan, set_ic;
3445a993db88SJose Abreu 	dma_addr_t des;
3446f748be53SAlexandre TORGUE 
3447ce736788SJoao Pinto 	tx_q = &priv->tx_queue[queue];
3448c2837423SJose Abreu 	first_tx = tx_q->cur_tx;
3449ce736788SJoao Pinto 
3450be1c7eaeSVineetha G. Jaya Kumaran 	if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
3451e2cd682dSJose Abreu 		stmmac_disable_eee_mode(priv);
3452e2cd682dSJose Abreu 
3453f748be53SAlexandre TORGUE 	/* Manage oversized TCP frames for GMAC4 device */
3454f748be53SAlexandre TORGUE 	if (skb_is_gso(skb) && priv->tso) {
3455b7766206SJose Abreu 		if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
3456b7766206SJose Abreu 			return stmmac_tso_xmit(skb, dev);
3457b7766206SJose Abreu 		if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
3458f748be53SAlexandre TORGUE 			return stmmac_tso_xmit(skb, dev);
3459f748be53SAlexandre TORGUE 	}
34607ac6653aSJeff Kirsher 
3461ce736788SJoao Pinto 	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
3462c22a3f48SJoao Pinto 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
3463c22a3f48SJoao Pinto 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
3464c22a3f48SJoao Pinto 								queue));
34657ac6653aSJeff Kirsher 			/* This is a hard error, log it. */
346638ddc59dSLABBE Corentin 			netdev_err(priv->dev,
346738ddc59dSLABBE Corentin 				   "%s: Tx Ring full when queue awake\n",
346838ddc59dSLABBE Corentin 				   __func__);
34697ac6653aSJeff Kirsher 		}
34707ac6653aSJeff Kirsher 		return NETDEV_TX_BUSY;
34717ac6653aSJeff Kirsher 	}
34727ac6653aSJeff Kirsher 
347330d93227SJose Abreu 	/* Check if VLAN can be inserted by HW */
347430d93227SJose Abreu 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
347530d93227SJose Abreu 
3476ce736788SJoao Pinto 	entry = tx_q->cur_tx;
34770e80bdc9SGiuseppe Cavallaro 	first_entry = entry;
3478b4c9784cSNiklas Cassel 	WARN_ON(tx_q->tx_skbuff[first_entry]);
34797ac6653aSJeff Kirsher 
34807ac6653aSJeff Kirsher 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
34817ac6653aSJeff Kirsher 
34820e80bdc9SGiuseppe Cavallaro 	if (likely(priv->extend_desc))
3483ce736788SJoao Pinto 		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3484579a25a8SJose Abreu 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
3485579a25a8SJose Abreu 		desc = &tx_q->dma_entx[entry].basic;
3486c24602efSGiuseppe CAVALLARO 	else
3487ce736788SJoao Pinto 		desc = tx_q->dma_tx + entry;
3488c24602efSGiuseppe CAVALLARO 
34897ac6653aSJeff Kirsher 	first = desc;
34907ac6653aSJeff Kirsher 
349130d93227SJose Abreu 	if (has_vlan)
349230d93227SJose Abreu 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
349330d93227SJose Abreu 
34940e80bdc9SGiuseppe Cavallaro 	enh_desc = priv->plat->enh_desc;
34954a7d666aSGiuseppe CAVALLARO 	/* To program the descriptors according to the size of the frame */
349629896a67SGiuseppe CAVALLARO 	if (enh_desc)
34972c520b1cSJose Abreu 		is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
349829896a67SGiuseppe CAVALLARO 
349963a550fcSJose Abreu 	if (unlikely(is_jumbo)) {
35002c520b1cSJose Abreu 		entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
350163a550fcSJose Abreu 		if (unlikely(entry < 0) && (entry != -EINVAL))
3502362b37beSGiuseppe CAVALLARO 			goto dma_map_err;
350329896a67SGiuseppe CAVALLARO 	}
35047ac6653aSJeff Kirsher 
35057ac6653aSJeff Kirsher 	for (i = 0; i < nfrags; i++) {
35069e903e08SEric Dumazet 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
35079e903e08SEric Dumazet 		int len = skb_frag_size(frag);
3508be434d50SGiuseppe Cavallaro 		bool last_segment = (i == (nfrags - 1));
35097ac6653aSJeff Kirsher 
3510aa042f60SSong, Yoong Siang 		entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
3511b4c9784cSNiklas Cassel 		WARN_ON(tx_q->tx_skbuff[entry]);
3512e3ad57c9SGiuseppe Cavallaro 
35130e80bdc9SGiuseppe Cavallaro 		if (likely(priv->extend_desc))
3514ce736788SJoao Pinto 			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3515579a25a8SJose Abreu 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
3516579a25a8SJose Abreu 			desc = &tx_q->dma_entx[entry].basic;
3517c24602efSGiuseppe CAVALLARO 		else
3518ce736788SJoao Pinto 			desc = tx_q->dma_tx + entry;
35197ac6653aSJeff Kirsher 
3520f748be53SAlexandre TORGUE 		des = skb_frag_dma_map(priv->device, frag, 0, len,
3521f722380dSIan Campbell 				       DMA_TO_DEVICE);
3522f748be53SAlexandre TORGUE 		if (dma_mapping_error(priv->device, des))
3523362b37beSGiuseppe CAVALLARO 			goto dma_map_err; /* should reuse desc w/o issues */
3524362b37beSGiuseppe CAVALLARO 
3525ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].buf = des;
35266844171dSJose Abreu 
35276844171dSJose Abreu 		stmmac_set_desc_addr(priv, desc, des);
3528f748be53SAlexandre TORGUE 
3529ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].map_as_page = true;
3530ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].len = len;
3531ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
35320e80bdc9SGiuseppe Cavallaro 
35330e80bdc9SGiuseppe Cavallaro 		/* Prepare the descriptor and set the own bit too */
353442de047dSJose Abreu 		stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
353542de047dSJose Abreu 				priv->mode, 1, last_segment, skb->len);
35367ac6653aSJeff Kirsher 	}
35377ac6653aSJeff Kirsher 
353805cf0d1bSNiklas Cassel 	/* Only the last descriptor gets to point to the skb. */
353905cf0d1bSNiklas Cassel 	tx_q->tx_skbuff[entry] = skb;
3540e3ad57c9SGiuseppe Cavallaro 
35417df4a3a7SJose Abreu 	/* According to the coalesce parameter the IC bit for the latest
35427df4a3a7SJose Abreu 	 * segment is reset and the timer re-started to clean the tx status.
35437df4a3a7SJose Abreu 	 * This approach takes care about the fragments: desc is the first
35447df4a3a7SJose Abreu 	 * element in case of no SG.
35457df4a3a7SJose Abreu 	 */
3546c2837423SJose Abreu 	tx_packets = (entry + 1) - first_tx;
3547c2837423SJose Abreu 	tx_q->tx_count_frames += tx_packets;
3548c2837423SJose Abreu 
3549c2837423SJose Abreu 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
3550c2837423SJose Abreu 		set_ic = true;
3551c2837423SJose Abreu 	else if (!priv->tx_coal_frames)
3552c2837423SJose Abreu 		set_ic = false;
3553c2837423SJose Abreu 	else if (tx_packets > priv->tx_coal_frames)
3554c2837423SJose Abreu 		set_ic = true;
3555c2837423SJose Abreu 	else if ((tx_q->tx_count_frames % priv->tx_coal_frames) < tx_packets)
3556c2837423SJose Abreu 		set_ic = true;
3557c2837423SJose Abreu 	else
3558c2837423SJose Abreu 		set_ic = false;
3559c2837423SJose Abreu 
3560c2837423SJose Abreu 	if (set_ic) {
35617df4a3a7SJose Abreu 		if (likely(priv->extend_desc))
35627df4a3a7SJose Abreu 			desc = &tx_q->dma_etx[entry].basic;
3563579a25a8SJose Abreu 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
3564579a25a8SJose Abreu 			desc = &tx_q->dma_entx[entry].basic;
35657df4a3a7SJose Abreu 		else
35667df4a3a7SJose Abreu 			desc = &tx_q->dma_tx[entry];
35677df4a3a7SJose Abreu 
35687df4a3a7SJose Abreu 		tx_q->tx_count_frames = 0;
35697df4a3a7SJose Abreu 		stmmac_set_tx_ic(priv, desc);
35707df4a3a7SJose Abreu 		priv->xstats.tx_set_ic_bit++;
35717df4a3a7SJose Abreu 	}
35727df4a3a7SJose Abreu 
357305cf0d1bSNiklas Cassel 	/* We've used all descriptors we need for this skb, however,
357405cf0d1bSNiklas Cassel 	 * advance cur_tx so that it references a fresh descriptor.
357505cf0d1bSNiklas Cassel 	 * ndo_start_xmit will fill this descriptor the next time it's
357605cf0d1bSNiklas Cassel 	 * called and stmmac_tx_clean may clean up to this descriptor.
357705cf0d1bSNiklas Cassel 	 */
3578aa042f60SSong, Yoong Siang 	entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
3579ce736788SJoao Pinto 	tx_q->cur_tx = entry;
35807ac6653aSJeff Kirsher 
35817ac6653aSJeff Kirsher 	if (netif_msg_pktdata(priv)) {
358238ddc59dSLABBE Corentin 		netdev_dbg(priv->dev,
358338ddc59dSLABBE Corentin 			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
3584ce736788SJoao Pinto 			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
35850e80bdc9SGiuseppe Cavallaro 			   entry, first, nfrags);
358683d7af64SGiuseppe CAVALLARO 
358738ddc59dSLABBE Corentin 		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
35887ac6653aSJeff Kirsher 		print_pkt(skb->data, skb->len);
35897ac6653aSJeff Kirsher 	}
35900e80bdc9SGiuseppe Cavallaro 
3591ce736788SJoao Pinto 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3592b3e51069SLABBE Corentin 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
3593b3e51069SLABBE Corentin 			  __func__);
3594c22a3f48SJoao Pinto 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
35957ac6653aSJeff Kirsher 	}
35967ac6653aSJeff Kirsher 
35977ac6653aSJeff Kirsher 	dev->stats.tx_bytes += skb->len;
35987ac6653aSJeff Kirsher 
35998000ddc0SJose Abreu 	if (priv->sarc_type)
36008000ddc0SJose Abreu 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
36018000ddc0SJose Abreu 
36020e80bdc9SGiuseppe Cavallaro 	skb_tx_timestamp(skb);
36030e80bdc9SGiuseppe Cavallaro 
36040e80bdc9SGiuseppe Cavallaro 	/* Ready to fill the first descriptor and set the OWN bit w/o any
36050e80bdc9SGiuseppe Cavallaro 	 * problems because all the descriptors are actually ready to be
36060e80bdc9SGiuseppe Cavallaro 	 * passed to the DMA engine.
36070e80bdc9SGiuseppe Cavallaro 	 */
36080e80bdc9SGiuseppe Cavallaro 	if (likely(!is_jumbo)) {
36090e80bdc9SGiuseppe Cavallaro 		bool last_segment = (nfrags == 0);
36100e80bdc9SGiuseppe Cavallaro 
3611f748be53SAlexandre TORGUE 		des = dma_map_single(priv->device, skb->data,
36120e80bdc9SGiuseppe Cavallaro 				     nopaged_len, DMA_TO_DEVICE);
3613f748be53SAlexandre TORGUE 		if (dma_mapping_error(priv->device, des))
36140e80bdc9SGiuseppe Cavallaro 			goto dma_map_err;
36150e80bdc9SGiuseppe Cavallaro 
3616ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[first_entry].buf = des;
36176844171dSJose Abreu 
36186844171dSJose Abreu 		stmmac_set_desc_addr(priv, first, des);
3619f748be53SAlexandre TORGUE 
3620ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
3621ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
36220e80bdc9SGiuseppe Cavallaro 
3623891434b1SRayagond Kokatanur 		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3624891434b1SRayagond Kokatanur 			     priv->hwts_tx_en)) {
3625891434b1SRayagond Kokatanur 			/* declare that device is doing timestamping */
3626891434b1SRayagond Kokatanur 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
362742de047dSJose Abreu 			stmmac_enable_tx_timestamp(priv, first);
3628891434b1SRayagond Kokatanur 		}
3629891434b1SRayagond Kokatanur 
36300e80bdc9SGiuseppe Cavallaro 		/* Prepare the first descriptor setting the OWN bit too */
363142de047dSJose Abreu 		stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
3632579a25a8SJose Abreu 				csum_insertion, priv->mode, 0, last_segment,
363342de047dSJose Abreu 				skb->len);
363480acbed9SAaro Koskinen 	}
36350e80bdc9SGiuseppe Cavallaro 
3636579a25a8SJose Abreu 	if (tx_q->tbs & STMMAC_TBS_EN) {
3637579a25a8SJose Abreu 		struct timespec64 ts = ns_to_timespec64(skb->tstamp);
3638579a25a8SJose Abreu 
3639579a25a8SJose Abreu 		tbs_desc = &tx_q->dma_entx[first_entry];
3640579a25a8SJose Abreu 		stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
3641579a25a8SJose Abreu 	}
3642579a25a8SJose Abreu 
3643579a25a8SJose Abreu 	stmmac_set_tx_owner(priv, first);
3644579a25a8SJose Abreu 
36450e80bdc9SGiuseppe Cavallaro 	/* The own bit must be the latest setting done when prepare the
36460e80bdc9SGiuseppe Cavallaro 	 * descriptor and then barrier is needed to make sure that
36470e80bdc9SGiuseppe Cavallaro 	 * all is coherent before granting the DMA engine.
36480e80bdc9SGiuseppe Cavallaro 	 */
364995eb930aSNiklas Cassel 	wmb();
36507ac6653aSJeff Kirsher 
3651c22a3f48SJoao Pinto 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3652f748be53SAlexandre TORGUE 
3653a4e887faSJose Abreu 	stmmac_enable_dma_transmission(priv, priv->ioaddr);
36548fce3331SJose Abreu 
3655579a25a8SJose Abreu 	if (likely(priv->extend_desc))
3656579a25a8SJose Abreu 		desc_size = sizeof(struct dma_extended_desc);
3657579a25a8SJose Abreu 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
3658579a25a8SJose Abreu 		desc_size = sizeof(struct dma_edesc);
3659579a25a8SJose Abreu 	else
3660579a25a8SJose Abreu 		desc_size = sizeof(struct dma_desc);
3661579a25a8SJose Abreu 
3662579a25a8SJose Abreu 	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
3663f1565c60SJose Abreu 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
36644772f26dSJose Abreu 	stmmac_tx_timer_arm(priv, queue);
36657ac6653aSJeff Kirsher 
3666362b37beSGiuseppe CAVALLARO 	return NETDEV_TX_OK;
3667a9097a96SGiuseppe CAVALLARO 
3668362b37beSGiuseppe CAVALLARO dma_map_err:
366938ddc59dSLABBE Corentin 	netdev_err(priv->dev, "Tx DMA map failed\n");
3670362b37beSGiuseppe CAVALLARO 	dev_kfree_skb(skb);
3671362b37beSGiuseppe CAVALLARO 	priv->dev->stats.tx_dropped++;
36727ac6653aSJeff Kirsher 	return NETDEV_TX_OK;
36737ac6653aSJeff Kirsher }
36747ac6653aSJeff Kirsher 
3675b9381985SVince Bridgers static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
3676b9381985SVince Bridgers {
3677ab188e8fSElad Nachman 	struct vlan_ethhdr *veth;
3678ab188e8fSElad Nachman 	__be16 vlan_proto;
3679b9381985SVince Bridgers 	u16 vlanid;
3680b9381985SVince Bridgers 
3681ab188e8fSElad Nachman 	veth = (struct vlan_ethhdr *)skb->data;
3682ab188e8fSElad Nachman 	vlan_proto = veth->h_vlan_proto;
3683ab188e8fSElad Nachman 
3684ab188e8fSElad Nachman 	if ((vlan_proto == htons(ETH_P_8021Q) &&
3685ab188e8fSElad Nachman 	     dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
3686ab188e8fSElad Nachman 	    (vlan_proto == htons(ETH_P_8021AD) &&
3687ab188e8fSElad Nachman 	     dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
3688b9381985SVince Bridgers 		/* pop the vlan tag */
3689ab188e8fSElad Nachman 		vlanid = ntohs(veth->h_vlan_TCI);
3690ab188e8fSElad Nachman 		memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
3691b9381985SVince Bridgers 		skb_pull(skb, VLAN_HLEN);
3692ab188e8fSElad Nachman 		__vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
3693b9381985SVince Bridgers 	}
3694b9381985SVince Bridgers }
3695b9381985SVince Bridgers 
369632ceabcaSGiuseppe CAVALLARO /**
3697732fdf0eSGiuseppe CAVALLARO  * stmmac_rx_refill - refill used skb preallocated buffers
369832ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
369954139cf3SJoao Pinto  * @queue: RX queue index
370032ceabcaSGiuseppe CAVALLARO  * Description : this is to reallocate the skb for the reception process
370132ceabcaSGiuseppe CAVALLARO  * that is based on zero-copy.
370232ceabcaSGiuseppe CAVALLARO  */
370354139cf3SJoao Pinto static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
37047ac6653aSJeff Kirsher {
370554139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
37063caa61c2SJose Abreu 	int len, dirty = stmmac_rx_dirty(priv, queue);
370754139cf3SJoao Pinto 	unsigned int entry = rx_q->dirty_rx;
370854139cf3SJoao Pinto 
37093caa61c2SJose Abreu 	len = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
37103caa61c2SJose Abreu 
3711e3ad57c9SGiuseppe Cavallaro 	while (dirty-- > 0) {
37122af6106aSJose Abreu 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
3713c24602efSGiuseppe CAVALLARO 		struct dma_desc *p;
3714d429b66eSJose Abreu 		bool use_rx_wd;
3715c24602efSGiuseppe CAVALLARO 
3716c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
371754139cf3SJoao Pinto 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
3718c24602efSGiuseppe CAVALLARO 		else
371954139cf3SJoao Pinto 			p = rx_q->dma_rx + entry;
3720c24602efSGiuseppe CAVALLARO 
37212af6106aSJose Abreu 		if (!buf->page) {
37222af6106aSJose Abreu 			buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
37232af6106aSJose Abreu 			if (!buf->page)
37247ac6653aSJeff Kirsher 				break;
3725120e87f9SGiuseppe Cavallaro 		}
37267ac6653aSJeff Kirsher 
372767afd6d1SJose Abreu 		if (priv->sph && !buf->sec_page) {
372867afd6d1SJose Abreu 			buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
372967afd6d1SJose Abreu 			if (!buf->sec_page)
373067afd6d1SJose Abreu 				break;
373167afd6d1SJose Abreu 
373267afd6d1SJose Abreu 			buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
373367afd6d1SJose Abreu 
373467afd6d1SJose Abreu 			dma_sync_single_for_device(priv->device, buf->sec_addr,
373567afd6d1SJose Abreu 						   len, DMA_FROM_DEVICE);
373667afd6d1SJose Abreu 		}
373767afd6d1SJose Abreu 
37382af6106aSJose Abreu 		buf->addr = page_pool_get_dma_addr(buf->page);
37393caa61c2SJose Abreu 
37403caa61c2SJose Abreu 		/* Sync whole allocation to device. This will invalidate old
37413caa61c2SJose Abreu 		 * data.
37423caa61c2SJose Abreu 		 */
37433caa61c2SJose Abreu 		dma_sync_single_for_device(priv->device, buf->addr, len,
37443caa61c2SJose Abreu 					   DMA_FROM_DEVICE);
37453caa61c2SJose Abreu 
37462af6106aSJose Abreu 		stmmac_set_desc_addr(priv, p, buf->addr);
3747396e13e1SJoakim Zhang 		if (priv->sph)
3748396e13e1SJoakim Zhang 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
3749396e13e1SJoakim Zhang 		else
3750396e13e1SJoakim Zhang 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
37512c520b1cSJose Abreu 		stmmac_refill_desc3(priv, rx_q, p);
3752286a8372SGiuseppe CAVALLARO 
3753d429b66eSJose Abreu 		rx_q->rx_count_frames++;
37546fa9d691SJose Abreu 		rx_q->rx_count_frames += priv->rx_coal_frames;
37556fa9d691SJose Abreu 		if (rx_q->rx_count_frames > priv->rx_coal_frames)
37566fa9d691SJose Abreu 			rx_q->rx_count_frames = 0;
375709146abeSJose Abreu 
375809146abeSJose Abreu 		use_rx_wd = !priv->rx_coal_frames;
375909146abeSJose Abreu 		use_rx_wd |= rx_q->rx_count_frames > 0;
376009146abeSJose Abreu 		if (!priv->use_riwt)
376109146abeSJose Abreu 			use_rx_wd = false;
3762d429b66eSJose Abreu 
3763ad688cdbSPavel Machek 		dma_wmb();
37642af6106aSJose Abreu 		stmmac_set_rx_owner(priv, p, use_rx_wd);
3765e3ad57c9SGiuseppe Cavallaro 
3766aa042f60SSong, Yoong Siang 		entry = STMMAC_GET_ENTRY(entry, priv->dma_rx_size);
37677ac6653aSJeff Kirsher 	}
376854139cf3SJoao Pinto 	rx_q->dirty_rx = entry;
3769858a31ffSJose Abreu 	rx_q->rx_tail_addr = rx_q->dma_rx_phy +
3770858a31ffSJose Abreu 			    (rx_q->dirty_rx * sizeof(struct dma_desc));
37714523a561SBiao Huang 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
37727ac6653aSJeff Kirsher }
37737ac6653aSJeff Kirsher 
377488ebe2cfSJose Abreu static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
377588ebe2cfSJose Abreu 				       struct dma_desc *p,
377688ebe2cfSJose Abreu 				       int status, unsigned int len)
377788ebe2cfSJose Abreu {
377888ebe2cfSJose Abreu 	unsigned int plen = 0, hlen = 0;
377931f2760eSLuo Jiaxing 	int coe = priv->hw->rx_csum;
378088ebe2cfSJose Abreu 
378188ebe2cfSJose Abreu 	/* Not first descriptor, buffer is always zero */
378288ebe2cfSJose Abreu 	if (priv->sph && len)
378388ebe2cfSJose Abreu 		return 0;
378488ebe2cfSJose Abreu 
378588ebe2cfSJose Abreu 	/* First descriptor, get split header length */
378631f2760eSLuo Jiaxing 	stmmac_get_rx_header_len(priv, p, &hlen);
378788ebe2cfSJose Abreu 	if (priv->sph && hlen) {
378888ebe2cfSJose Abreu 		priv->xstats.rx_split_hdr_pkt_n++;
378988ebe2cfSJose Abreu 		return hlen;
379088ebe2cfSJose Abreu 	}
379188ebe2cfSJose Abreu 
379288ebe2cfSJose Abreu 	/* First descriptor, not last descriptor and not split header */
379388ebe2cfSJose Abreu 	if (status & rx_not_ls)
379488ebe2cfSJose Abreu 		return priv->dma_buf_sz;
379588ebe2cfSJose Abreu 
379688ebe2cfSJose Abreu 	plen = stmmac_get_rx_frame_len(priv, p, coe);
379788ebe2cfSJose Abreu 
379888ebe2cfSJose Abreu 	/* First descriptor and last descriptor and not split header */
379988ebe2cfSJose Abreu 	return min_t(unsigned int, priv->dma_buf_sz, plen);
380088ebe2cfSJose Abreu }
380188ebe2cfSJose Abreu 
380288ebe2cfSJose Abreu static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
380388ebe2cfSJose Abreu 				       struct dma_desc *p,
380488ebe2cfSJose Abreu 				       int status, unsigned int len)
380588ebe2cfSJose Abreu {
380688ebe2cfSJose Abreu 	int coe = priv->hw->rx_csum;
380788ebe2cfSJose Abreu 	unsigned int plen = 0;
380888ebe2cfSJose Abreu 
380988ebe2cfSJose Abreu 	/* Not split header, buffer is not available */
381088ebe2cfSJose Abreu 	if (!priv->sph)
381188ebe2cfSJose Abreu 		return 0;
381288ebe2cfSJose Abreu 
381388ebe2cfSJose Abreu 	/* Not last descriptor */
381488ebe2cfSJose Abreu 	if (status & rx_not_ls)
381588ebe2cfSJose Abreu 		return priv->dma_buf_sz;
381688ebe2cfSJose Abreu 
381788ebe2cfSJose Abreu 	plen = stmmac_get_rx_frame_len(priv, p, coe);
381888ebe2cfSJose Abreu 
381988ebe2cfSJose Abreu 	/* Last descriptor */
382088ebe2cfSJose Abreu 	return plen - len;
382188ebe2cfSJose Abreu }
382288ebe2cfSJose Abreu 
382332ceabcaSGiuseppe CAVALLARO /**
3824732fdf0eSGiuseppe CAVALLARO  * stmmac_rx - manage the receive process
382532ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
382654139cf3SJoao Pinto  * @limit: napi bugget
382754139cf3SJoao Pinto  * @queue: RX queue index.
382832ceabcaSGiuseppe CAVALLARO  * Description :  this the function called by the napi poll method.
382932ceabcaSGiuseppe CAVALLARO  * It gets all the frames inside the ring.
383032ceabcaSGiuseppe CAVALLARO  */
383154139cf3SJoao Pinto static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
38327ac6653aSJeff Kirsher {
383354139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
38348fce3331SJose Abreu 	struct stmmac_channel *ch = &priv->channel[queue];
3835ec222003SJose Abreu 	unsigned int count = 0, error = 0, len = 0;
3836ec222003SJose Abreu 	int status = 0, coe = priv->hw->rx_csum;
383707b39753SAaro Koskinen 	unsigned int next_entry = rx_q->cur_rx;
3838bfaf91caSJoakim Zhang 	unsigned int desc_size;
3839ec222003SJose Abreu 	struct sk_buff *skb = NULL;
38407ac6653aSJeff Kirsher 
384183d7af64SGiuseppe CAVALLARO 	if (netif_msg_rx_status(priv)) {
3842d0225e7dSAlexandre TORGUE 		void *rx_head;
3843d0225e7dSAlexandre TORGUE 
384438ddc59dSLABBE Corentin 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
3845bfaf91caSJoakim Zhang 		if (priv->extend_desc) {
384654139cf3SJoao Pinto 			rx_head = (void *)rx_q->dma_erx;
3847bfaf91caSJoakim Zhang 			desc_size = sizeof(struct dma_extended_desc);
3848bfaf91caSJoakim Zhang 		} else {
384954139cf3SJoao Pinto 			rx_head = (void *)rx_q->dma_rx;
3850bfaf91caSJoakim Zhang 			desc_size = sizeof(struct dma_desc);
3851bfaf91caSJoakim Zhang 		}
3852d0225e7dSAlexandre TORGUE 
3853bfaf91caSJoakim Zhang 		stmmac_display_ring(priv, rx_head, priv->dma_rx_size, true,
3854bfaf91caSJoakim Zhang 				    rx_q->dma_rx_phy, desc_size);
38557ac6653aSJeff Kirsher 	}
3856c24602efSGiuseppe CAVALLARO 	while (count < limit) {
385788ebe2cfSJose Abreu 		unsigned int buf1_len = 0, buf2_len = 0;
3858ec222003SJose Abreu 		enum pkt_hash_types hash_type;
38592af6106aSJose Abreu 		struct stmmac_rx_buffer *buf;
38602af6106aSJose Abreu 		struct dma_desc *np, *p;
3861ec222003SJose Abreu 		int entry;
3862ec222003SJose Abreu 		u32 hash;
38637ac6653aSJeff Kirsher 
3864ec222003SJose Abreu 		if (!count && rx_q->state_saved) {
3865ec222003SJose Abreu 			skb = rx_q->state.skb;
3866ec222003SJose Abreu 			error = rx_q->state.error;
3867ec222003SJose Abreu 			len = rx_q->state.len;
3868ec222003SJose Abreu 		} else {
3869ec222003SJose Abreu 			rx_q->state_saved = false;
3870ec222003SJose Abreu 			skb = NULL;
3871ec222003SJose Abreu 			error = 0;
3872ec222003SJose Abreu 			len = 0;
3873ec222003SJose Abreu 		}
3874ec222003SJose Abreu 
3875ec222003SJose Abreu 		if (count >= limit)
3876ec222003SJose Abreu 			break;
3877ec222003SJose Abreu 
3878ec222003SJose Abreu read_again:
387988ebe2cfSJose Abreu 		buf1_len = 0;
388088ebe2cfSJose Abreu 		buf2_len = 0;
388107b39753SAaro Koskinen 		entry = next_entry;
38822af6106aSJose Abreu 		buf = &rx_q->buf_pool[entry];
388307b39753SAaro Koskinen 
3884c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
388554139cf3SJoao Pinto 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
3886c24602efSGiuseppe CAVALLARO 		else
388754139cf3SJoao Pinto 			p = rx_q->dma_rx + entry;
3888c24602efSGiuseppe CAVALLARO 
3889c1fa3212SFabrice Gasnier 		/* read the status of the incoming frame */
389042de047dSJose Abreu 		status = stmmac_rx_status(priv, &priv->dev->stats,
3891c1fa3212SFabrice Gasnier 				&priv->xstats, p);
3892c1fa3212SFabrice Gasnier 		/* check if managed by the DMA otherwise go ahead */
3893c1fa3212SFabrice Gasnier 		if (unlikely(status & dma_own))
38947ac6653aSJeff Kirsher 			break;
38957ac6653aSJeff Kirsher 
3896aa042f60SSong, Yoong Siang 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
3897aa042f60SSong, Yoong Siang 						priv->dma_rx_size);
389854139cf3SJoao Pinto 		next_entry = rx_q->cur_rx;
3899e3ad57c9SGiuseppe Cavallaro 
3900c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
390154139cf3SJoao Pinto 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
3902c24602efSGiuseppe CAVALLARO 		else
390354139cf3SJoao Pinto 			np = rx_q->dma_rx + next_entry;
3904ba1ffd74SGiuseppe CAVALLARO 
3905ba1ffd74SGiuseppe CAVALLARO 		prefetch(np);
39067ac6653aSJeff Kirsher 
390742de047dSJose Abreu 		if (priv->extend_desc)
390842de047dSJose Abreu 			stmmac_rx_extended_status(priv, &priv->dev->stats,
390942de047dSJose Abreu 					&priv->xstats, rx_q->dma_erx + entry);
3910891434b1SRayagond Kokatanur 		if (unlikely(status == discard_frame)) {
39112af6106aSJose Abreu 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
39122af6106aSJose Abreu 			buf->page = NULL;
3913ec222003SJose Abreu 			error = 1;
39140b273ca4SJose Abreu 			if (!priv->hwts_rx_en)
39150b273ca4SJose Abreu 				priv->dev->stats.rx_errors++;
3916ec222003SJose Abreu 		}
3917f748be53SAlexandre TORGUE 
3918ec222003SJose Abreu 		if (unlikely(error && (status & rx_not_ls)))
3919ec222003SJose Abreu 			goto read_again;
3920ec222003SJose Abreu 		if (unlikely(error)) {
3921ec222003SJose Abreu 			dev_kfree_skb(skb);
392288ebe2cfSJose Abreu 			skb = NULL;
3923cda4985aSJose Abreu 			count++;
392407b39753SAaro Koskinen 			continue;
3925e527c4a7SGiuseppe CAVALLARO 		}
3926e527c4a7SGiuseppe CAVALLARO 
3927ec222003SJose Abreu 		/* Buffer is good. Go on. */
3928ec222003SJose Abreu 
392988ebe2cfSJose Abreu 		prefetch(page_address(buf->page));
393088ebe2cfSJose Abreu 		if (buf->sec_page)
393188ebe2cfSJose Abreu 			prefetch(page_address(buf->sec_page));
393288ebe2cfSJose Abreu 
393388ebe2cfSJose Abreu 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
393488ebe2cfSJose Abreu 		len += buf1_len;
393588ebe2cfSJose Abreu 		buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
393688ebe2cfSJose Abreu 		len += buf2_len;
3937ec222003SJose Abreu 
39387ac6653aSJeff Kirsher 		/* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
3939ceb69499SGiuseppe CAVALLARO 		 * Type frames (LLC/LLC-SNAP)
3940565020aaSJose Abreu 		 *
3941565020aaSJose Abreu 		 * llc_snap is never checked in GMAC >= 4, so this ACS
3942565020aaSJose Abreu 		 * feature is always disabled and packets need to be
3943565020aaSJose Abreu 		 * stripped manually.
3944ceb69499SGiuseppe CAVALLARO 		 */
394593b5dce4SJose Abreu 		if (likely(!(status & rx_not_ls)) &&
394693b5dce4SJose Abreu 		    (likely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
394793b5dce4SJose Abreu 		     unlikely(status != llc_snap))) {
394888ebe2cfSJose Abreu 			if (buf2_len)
394988ebe2cfSJose Abreu 				buf2_len -= ETH_FCS_LEN;
395088ebe2cfSJose Abreu 			else
395188ebe2cfSJose Abreu 				buf1_len -= ETH_FCS_LEN;
395288ebe2cfSJose Abreu 
3953ec222003SJose Abreu 			len -= ETH_FCS_LEN;
395483d7af64SGiuseppe CAVALLARO 		}
395522ad3838SGiuseppe Cavallaro 
3956ec222003SJose Abreu 		if (!skb) {
395788ebe2cfSJose Abreu 			skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
3958ec222003SJose Abreu 			if (!skb) {
395922ad3838SGiuseppe Cavallaro 				priv->dev->stats.rx_dropped++;
3960cda4985aSJose Abreu 				count++;
396188ebe2cfSJose Abreu 				goto drain_data;
396222ad3838SGiuseppe Cavallaro 			}
396322ad3838SGiuseppe Cavallaro 
396488ebe2cfSJose Abreu 			dma_sync_single_for_cpu(priv->device, buf->addr,
396588ebe2cfSJose Abreu 						buf1_len, DMA_FROM_DEVICE);
39662af6106aSJose Abreu 			skb_copy_to_linear_data(skb, page_address(buf->page),
396788ebe2cfSJose Abreu 						buf1_len);
396888ebe2cfSJose Abreu 			skb_put(skb, buf1_len);
396922ad3838SGiuseppe Cavallaro 
3970ec222003SJose Abreu 			/* Data payload copied into SKB, page ready for recycle */
3971ec222003SJose Abreu 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
3972ec222003SJose Abreu 			buf->page = NULL;
397388ebe2cfSJose Abreu 		} else if (buf1_len) {
3974ec222003SJose Abreu 			dma_sync_single_for_cpu(priv->device, buf->addr,
397588ebe2cfSJose Abreu 						buf1_len, DMA_FROM_DEVICE);
3976ec222003SJose Abreu 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
397788ebe2cfSJose Abreu 					buf->page, 0, buf1_len,
3978ec222003SJose Abreu 					priv->dma_buf_sz);
3979ec222003SJose Abreu 
3980ec222003SJose Abreu 			/* Data payload appended into SKB */
3981ec222003SJose Abreu 			page_pool_release_page(rx_q->page_pool, buf->page);
3982ec222003SJose Abreu 			buf->page = NULL;
39837ac6653aSJeff Kirsher 		}
398483d7af64SGiuseppe CAVALLARO 
398588ebe2cfSJose Abreu 		if (buf2_len) {
398667afd6d1SJose Abreu 			dma_sync_single_for_cpu(priv->device, buf->sec_addr,
398788ebe2cfSJose Abreu 						buf2_len, DMA_FROM_DEVICE);
398867afd6d1SJose Abreu 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
398988ebe2cfSJose Abreu 					buf->sec_page, 0, buf2_len,
399067afd6d1SJose Abreu 					priv->dma_buf_sz);
399167afd6d1SJose Abreu 
399267afd6d1SJose Abreu 			/* Data payload appended into SKB */
399367afd6d1SJose Abreu 			page_pool_release_page(rx_q->page_pool, buf->sec_page);
399467afd6d1SJose Abreu 			buf->sec_page = NULL;
399567afd6d1SJose Abreu 		}
399667afd6d1SJose Abreu 
399788ebe2cfSJose Abreu drain_data:
3998ec222003SJose Abreu 		if (likely(status & rx_not_ls))
3999ec222003SJose Abreu 			goto read_again;
400088ebe2cfSJose Abreu 		if (!skb)
400188ebe2cfSJose Abreu 			continue;
4002ec222003SJose Abreu 
4003ec222003SJose Abreu 		/* Got entire packet into SKB. Finish it. */
4004ec222003SJose Abreu 
4005ba1ffd74SGiuseppe CAVALLARO 		stmmac_get_rx_hwtstamp(priv, p, np, skb);
4006b9381985SVince Bridgers 		stmmac_rx_vlan(priv->dev, skb);
40077ac6653aSJeff Kirsher 		skb->protocol = eth_type_trans(skb, priv->dev);
40087ac6653aSJeff Kirsher 
4009ceb69499SGiuseppe CAVALLARO 		if (unlikely(!coe))
40107ac6653aSJeff Kirsher 			skb_checksum_none_assert(skb);
401162a2ab93SGiuseppe CAVALLARO 		else
40127ac6653aSJeff Kirsher 			skb->ip_summed = CHECKSUM_UNNECESSARY;
401362a2ab93SGiuseppe CAVALLARO 
401476067459SJose Abreu 		if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
401576067459SJose Abreu 			skb_set_hash(skb, hash, hash_type);
401676067459SJose Abreu 
401776067459SJose Abreu 		skb_record_rx_queue(skb, queue);
40184ccb4585SJose Abreu 		napi_gro_receive(&ch->rx_napi, skb);
401988ebe2cfSJose Abreu 		skb = NULL;
40207ac6653aSJeff Kirsher 
40217ac6653aSJeff Kirsher 		priv->dev->stats.rx_packets++;
4022ec222003SJose Abreu 		priv->dev->stats.rx_bytes += len;
4023cda4985aSJose Abreu 		count++;
40247ac6653aSJeff Kirsher 	}
4025ec222003SJose Abreu 
402688ebe2cfSJose Abreu 	if (status & rx_not_ls || skb) {
4027ec222003SJose Abreu 		rx_q->state_saved = true;
4028ec222003SJose Abreu 		rx_q->state.skb = skb;
4029ec222003SJose Abreu 		rx_q->state.error = error;
4030ec222003SJose Abreu 		rx_q->state.len = len;
40317ac6653aSJeff Kirsher 	}
40327ac6653aSJeff Kirsher 
403354139cf3SJoao Pinto 	stmmac_rx_refill(priv, queue);
40347ac6653aSJeff Kirsher 
40357ac6653aSJeff Kirsher 	priv->xstats.rx_pkt_n += count;
40367ac6653aSJeff Kirsher 
40377ac6653aSJeff Kirsher 	return count;
40387ac6653aSJeff Kirsher }
40397ac6653aSJeff Kirsher 
40404ccb4585SJose Abreu static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
40417ac6653aSJeff Kirsher {
40428fce3331SJose Abreu 	struct stmmac_channel *ch =
40434ccb4585SJose Abreu 		container_of(napi, struct stmmac_channel, rx_napi);
40448fce3331SJose Abreu 	struct stmmac_priv *priv = ch->priv_data;
40458fce3331SJose Abreu 	u32 chan = ch->index;
40464ccb4585SJose Abreu 	int work_done;
40477ac6653aSJeff Kirsher 
40489125cdd1SGiuseppe CAVALLARO 	priv->xstats.napi_poll++;
4049ce736788SJoao Pinto 
40504ccb4585SJose Abreu 	work_done = stmmac_rx(priv, budget, chan);
4051021bd5e3SJose Abreu 	if (work_done < budget && napi_complete_done(napi, work_done)) {
4052021bd5e3SJose Abreu 		unsigned long flags;
4053021bd5e3SJose Abreu 
4054021bd5e3SJose Abreu 		spin_lock_irqsave(&ch->lock, flags);
4055021bd5e3SJose Abreu 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
4056021bd5e3SJose Abreu 		spin_unlock_irqrestore(&ch->lock, flags);
4057021bd5e3SJose Abreu 	}
4058021bd5e3SJose Abreu 
40594ccb4585SJose Abreu 	return work_done;
40604ccb4585SJose Abreu }
4061ce736788SJoao Pinto 
40624ccb4585SJose Abreu static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
40634ccb4585SJose Abreu {
40644ccb4585SJose Abreu 	struct stmmac_channel *ch =
40654ccb4585SJose Abreu 		container_of(napi, struct stmmac_channel, tx_napi);
40664ccb4585SJose Abreu 	struct stmmac_priv *priv = ch->priv_data;
40674ccb4585SJose Abreu 	u32 chan = ch->index;
40684ccb4585SJose Abreu 	int work_done;
40694ccb4585SJose Abreu 
40704ccb4585SJose Abreu 	priv->xstats.napi_poll++;
40714ccb4585SJose Abreu 
4072aa042f60SSong, Yoong Siang 	work_done = stmmac_tx_clean(priv, priv->dma_tx_size, chan);
4073fa0be0a4SJose Abreu 	work_done = min(work_done, budget);
40748fce3331SJose Abreu 
4075021bd5e3SJose Abreu 	if (work_done < budget && napi_complete_done(napi, work_done)) {
4076021bd5e3SJose Abreu 		unsigned long flags;
40774ccb4585SJose Abreu 
4078021bd5e3SJose Abreu 		spin_lock_irqsave(&ch->lock, flags);
4079021bd5e3SJose Abreu 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
4080021bd5e3SJose Abreu 		spin_unlock_irqrestore(&ch->lock, flags);
4081fa0be0a4SJose Abreu 	}
40828fce3331SJose Abreu 
40837ac6653aSJeff Kirsher 	return work_done;
40847ac6653aSJeff Kirsher }
40857ac6653aSJeff Kirsher 
40867ac6653aSJeff Kirsher /**
40877ac6653aSJeff Kirsher  *  stmmac_tx_timeout
40887ac6653aSJeff Kirsher  *  @dev : Pointer to net device structure
4089d0ea5cbdSJesse Brandeburg  *  @txqueue: the index of the hanging transmit queue
40907ac6653aSJeff Kirsher  *  Description: this function is called when a packet transmission fails to
40917284a3f1SGiuseppe CAVALLARO  *   complete within a reasonable time. The driver will mark the error in the
40927ac6653aSJeff Kirsher  *   netdev structure and arrange for the device to be reset to a sane state
40937ac6653aSJeff Kirsher  *   in order to transmit a new packet.
40947ac6653aSJeff Kirsher  */
40950290bd29SMichael S. Tsirkin static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
40967ac6653aSJeff Kirsher {
40977ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
40987ac6653aSJeff Kirsher 
409934877a15SJose Abreu 	stmmac_global_err(priv);
41007ac6653aSJeff Kirsher }
41017ac6653aSJeff Kirsher 
41027ac6653aSJeff Kirsher /**
410301789349SJiri Pirko  *  stmmac_set_rx_mode - entry point for multicast addressing
41047ac6653aSJeff Kirsher  *  @dev : pointer to the device structure
41057ac6653aSJeff Kirsher  *  Description:
41067ac6653aSJeff Kirsher  *  This function is a driver entry point which gets called by the kernel
41077ac6653aSJeff Kirsher  *  whenever multicast addresses must be enabled/disabled.
41087ac6653aSJeff Kirsher  *  Return value:
41097ac6653aSJeff Kirsher  *  void.
41107ac6653aSJeff Kirsher  */
411101789349SJiri Pirko static void stmmac_set_rx_mode(struct net_device *dev)
41127ac6653aSJeff Kirsher {
41137ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
41147ac6653aSJeff Kirsher 
4115c10d4c82SJose Abreu 	stmmac_set_filter(priv, priv->hw, dev);
41167ac6653aSJeff Kirsher }
41177ac6653aSJeff Kirsher 
41187ac6653aSJeff Kirsher /**
41197ac6653aSJeff Kirsher  *  stmmac_change_mtu - entry point to change MTU size for the device.
41207ac6653aSJeff Kirsher  *  @dev : device pointer.
41217ac6653aSJeff Kirsher  *  @new_mtu : the new MTU size for the device.
41227ac6653aSJeff Kirsher  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
41237ac6653aSJeff Kirsher  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
41247ac6653aSJeff Kirsher  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
41257ac6653aSJeff Kirsher  *  Return value:
41267ac6653aSJeff Kirsher  *  0 on success and an appropriate (-)ve integer as defined in errno.h
41277ac6653aSJeff Kirsher  *  file on failure.
41287ac6653aSJeff Kirsher  */
41297ac6653aSJeff Kirsher static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
41307ac6653aSJeff Kirsher {
413138ddc59dSLABBE Corentin 	struct stmmac_priv *priv = netdev_priv(dev);
4132eaf4fac4SJose Abreu 	int txfifosz = priv->plat->tx_fifo_size;
41335b55299eSDavid Wu 	const int mtu = new_mtu;
4134eaf4fac4SJose Abreu 
4135eaf4fac4SJose Abreu 	if (txfifosz == 0)
4136eaf4fac4SJose Abreu 		txfifosz = priv->dma_cap.tx_fifo_size;
4137eaf4fac4SJose Abreu 
4138eaf4fac4SJose Abreu 	txfifosz /= priv->plat->tx_queues_to_use;
413938ddc59dSLABBE Corentin 
41407ac6653aSJeff Kirsher 	if (netif_running(dev)) {
414138ddc59dSLABBE Corentin 		netdev_err(priv->dev, "must be stopped to change its MTU\n");
41427ac6653aSJeff Kirsher 		return -EBUSY;
41437ac6653aSJeff Kirsher 	}
41447ac6653aSJeff Kirsher 
4145eaf4fac4SJose Abreu 	new_mtu = STMMAC_ALIGN(new_mtu);
4146eaf4fac4SJose Abreu 
4147eaf4fac4SJose Abreu 	/* If condition true, FIFO is too small or MTU too large */
4148eaf4fac4SJose Abreu 	if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
4149eaf4fac4SJose Abreu 		return -EINVAL;
4150eaf4fac4SJose Abreu 
41515b55299eSDavid Wu 	dev->mtu = mtu;
4152f748be53SAlexandre TORGUE 
41537ac6653aSJeff Kirsher 	netdev_update_features(dev);
41547ac6653aSJeff Kirsher 
41557ac6653aSJeff Kirsher 	return 0;
41567ac6653aSJeff Kirsher }
41577ac6653aSJeff Kirsher 
4158c8f44affSMichał Mirosław static netdev_features_t stmmac_fix_features(struct net_device *dev,
4159c8f44affSMichał Mirosław 					     netdev_features_t features)
41607ac6653aSJeff Kirsher {
41617ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
41627ac6653aSJeff Kirsher 
416338912bdbSDeepak SIKRI 	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
41647ac6653aSJeff Kirsher 		features &= ~NETIF_F_RXCSUM;
4165d2afb5bdSGiuseppe CAVALLARO 
41667ac6653aSJeff Kirsher 	if (!priv->plat->tx_coe)
4167a188222bSTom Herbert 		features &= ~NETIF_F_CSUM_MASK;
41687ac6653aSJeff Kirsher 
41697ac6653aSJeff Kirsher 	/* Some GMAC devices have a bugged Jumbo frame support that
41707ac6653aSJeff Kirsher 	 * needs to have the Tx COE disabled for oversized frames
41717ac6653aSJeff Kirsher 	 * (due to limited buffer sizes). In this case we disable
4172ceb69499SGiuseppe CAVALLARO 	 * the TX csum insertion in the TDES and not use SF.
4173ceb69499SGiuseppe CAVALLARO 	 */
41747ac6653aSJeff Kirsher 	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
4175a188222bSTom Herbert 		features &= ~NETIF_F_CSUM_MASK;
41767ac6653aSJeff Kirsher 
4177f748be53SAlexandre TORGUE 	/* Disable tso if asked by ethtool */
4178f748be53SAlexandre TORGUE 	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
4179f748be53SAlexandre TORGUE 		if (features & NETIF_F_TSO)
4180f748be53SAlexandre TORGUE 			priv->tso = true;
4181f748be53SAlexandre TORGUE 		else
4182f748be53SAlexandre TORGUE 			priv->tso = false;
4183f748be53SAlexandre TORGUE 	}
4184f748be53SAlexandre TORGUE 
41857ac6653aSJeff Kirsher 	return features;
41867ac6653aSJeff Kirsher }
41877ac6653aSJeff Kirsher 
4188d2afb5bdSGiuseppe CAVALLARO static int stmmac_set_features(struct net_device *netdev,
4189d2afb5bdSGiuseppe CAVALLARO 			       netdev_features_t features)
4190d2afb5bdSGiuseppe CAVALLARO {
4191d2afb5bdSGiuseppe CAVALLARO 	struct stmmac_priv *priv = netdev_priv(netdev);
419267afd6d1SJose Abreu 	bool sph_en;
419367afd6d1SJose Abreu 	u32 chan;
4194d2afb5bdSGiuseppe CAVALLARO 
4195d2afb5bdSGiuseppe CAVALLARO 	/* Keep the COE Type in case of csum is supporting */
4196d2afb5bdSGiuseppe CAVALLARO 	if (features & NETIF_F_RXCSUM)
4197d2afb5bdSGiuseppe CAVALLARO 		priv->hw->rx_csum = priv->plat->rx_coe;
4198d2afb5bdSGiuseppe CAVALLARO 	else
4199d2afb5bdSGiuseppe CAVALLARO 		priv->hw->rx_csum = 0;
4200d2afb5bdSGiuseppe CAVALLARO 	/* No check needed because rx_coe has been set before and it will be
4201d2afb5bdSGiuseppe CAVALLARO 	 * fixed in case of issue.
4202d2afb5bdSGiuseppe CAVALLARO 	 */
4203c10d4c82SJose Abreu 	stmmac_rx_ipc(priv, priv->hw);
4204d2afb5bdSGiuseppe CAVALLARO 
420567afd6d1SJose Abreu 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
420667afd6d1SJose Abreu 	for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
420767afd6d1SJose Abreu 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
420867afd6d1SJose Abreu 
4209d2afb5bdSGiuseppe CAVALLARO 	return 0;
4210d2afb5bdSGiuseppe CAVALLARO }
4211d2afb5bdSGiuseppe CAVALLARO 
421232ceabcaSGiuseppe CAVALLARO /**
421332ceabcaSGiuseppe CAVALLARO  *  stmmac_interrupt - main ISR
421432ceabcaSGiuseppe CAVALLARO  *  @irq: interrupt number.
4215f42234ffSMaxim Petrov  *  @dev_id: to pass the net device pointer (must be valid).
421632ceabcaSGiuseppe CAVALLARO  *  Description: this is the main driver interrupt service routine.
4217732fdf0eSGiuseppe CAVALLARO  *  It can call:
4218732fdf0eSGiuseppe CAVALLARO  *  o DMA service routine (to manage incoming frame reception and transmission
4219732fdf0eSGiuseppe CAVALLARO  *    status)
4220732fdf0eSGiuseppe CAVALLARO  *  o Core interrupts to manage: remote wake-up, management counter, LPI
422132ceabcaSGiuseppe CAVALLARO  *    interrupts.
422232ceabcaSGiuseppe CAVALLARO  */
42237ac6653aSJeff Kirsher static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
42247ac6653aSJeff Kirsher {
42257ac6653aSJeff Kirsher 	struct net_device *dev = (struct net_device *)dev_id;
42267ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
42277bac4e1eSJoao Pinto 	u32 rx_cnt = priv->plat->rx_queues_to_use;
42287bac4e1eSJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
42297bac4e1eSJoao Pinto 	u32 queues_count;
42307bac4e1eSJoao Pinto 	u32 queue;
42317d9e6c5aSJose Abreu 	bool xmac;
42327bac4e1eSJoao Pinto 
42337d9e6c5aSJose Abreu 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
42347bac4e1eSJoao Pinto 	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
42357ac6653aSJeff Kirsher 
423689f7f2cfSSrinivas Kandagatla 	if (priv->irq_wake)
423789f7f2cfSSrinivas Kandagatla 		pm_wakeup_event(priv->device, 0);
423889f7f2cfSSrinivas Kandagatla 
423934877a15SJose Abreu 	/* Check if adapter is up */
424034877a15SJose Abreu 	if (test_bit(STMMAC_DOWN, &priv->state))
424134877a15SJose Abreu 		return IRQ_HANDLED;
42428bf993a5SJose Abreu 	/* Check if a fatal error happened */
42438bf993a5SJose Abreu 	if (stmmac_safety_feat_interrupt(priv))
42448bf993a5SJose Abreu 		return IRQ_HANDLED;
424534877a15SJose Abreu 
42467ac6653aSJeff Kirsher 	/* To handle GMAC own interrupts */
42477d9e6c5aSJose Abreu 	if ((priv->plat->has_gmac) || xmac) {
4248c10d4c82SJose Abreu 		int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
424961fac60aSJose Abreu 		int mtl_status;
42508f71a88dSJoao Pinto 
4251d765955dSGiuseppe CAVALLARO 		if (unlikely(status)) {
4252d765955dSGiuseppe CAVALLARO 			/* For LPI we need to save the tx status */
42530982a0f6SGiuseppe CAVALLARO 			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
4254d765955dSGiuseppe CAVALLARO 				priv->tx_path_in_lpi_mode = true;
42550982a0f6SGiuseppe CAVALLARO 			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
4256d765955dSGiuseppe CAVALLARO 				priv->tx_path_in_lpi_mode = false;
42577bac4e1eSJoao Pinto 		}
42587bac4e1eSJoao Pinto 
42597bac4e1eSJoao Pinto 		for (queue = 0; queue < queues_count; queue++) {
426061fac60aSJose Abreu 			struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
426154139cf3SJoao Pinto 
426261fac60aSJose Abreu 			mtl_status = stmmac_host_mtl_irq_status(priv, priv->hw,
426361fac60aSJose Abreu 								queue);
426461fac60aSJose Abreu 			if (mtl_status != -EINVAL)
426561fac60aSJose Abreu 				status |= mtl_status;
42667bac4e1eSJoao Pinto 
4267a4e887faSJose Abreu 			if (status & CORE_IRQ_MTL_RX_OVERFLOW)
426861fac60aSJose Abreu 				stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
426954139cf3SJoao Pinto 						       rx_q->rx_tail_addr,
42707bac4e1eSJoao Pinto 						       queue);
42717bac4e1eSJoao Pinto 		}
427270523e63SGiuseppe CAVALLARO 
427370523e63SGiuseppe CAVALLARO 		/* PCS link status */
42743fe5cadbSGiuseppe CAVALLARO 		if (priv->hw->pcs) {
427570523e63SGiuseppe CAVALLARO 			if (priv->xstats.pcs_link)
427670523e63SGiuseppe CAVALLARO 				netif_carrier_on(dev);
427770523e63SGiuseppe CAVALLARO 			else
427870523e63SGiuseppe CAVALLARO 				netif_carrier_off(dev);
427970523e63SGiuseppe CAVALLARO 		}
4280d765955dSGiuseppe CAVALLARO 	}
4281d765955dSGiuseppe CAVALLARO 
4282d765955dSGiuseppe CAVALLARO 	/* To handle DMA interrupts */
42837ac6653aSJeff Kirsher 	stmmac_dma_interrupt(priv);
42847ac6653aSJeff Kirsher 
42857ac6653aSJeff Kirsher 	return IRQ_HANDLED;
42867ac6653aSJeff Kirsher }
42877ac6653aSJeff Kirsher 
42887ac6653aSJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER
42897ac6653aSJeff Kirsher /* Polling receive - used by NETCONSOLE and other diagnostic tools
4290ceb69499SGiuseppe CAVALLARO  * to allow network I/O with interrupts disabled.
4291ceb69499SGiuseppe CAVALLARO  */
42927ac6653aSJeff Kirsher static void stmmac_poll_controller(struct net_device *dev)
42937ac6653aSJeff Kirsher {
42947ac6653aSJeff Kirsher 	disable_irq(dev->irq);
42957ac6653aSJeff Kirsher 	stmmac_interrupt(dev->irq, dev);
42967ac6653aSJeff Kirsher 	enable_irq(dev->irq);
42977ac6653aSJeff Kirsher }
42987ac6653aSJeff Kirsher #endif
42997ac6653aSJeff Kirsher 
43007ac6653aSJeff Kirsher /**
43017ac6653aSJeff Kirsher  *  stmmac_ioctl - Entry point for the Ioctl
43027ac6653aSJeff Kirsher  *  @dev: Device pointer.
43037ac6653aSJeff Kirsher  *  @rq: An IOCTL specefic structure, that can contain a pointer to
43047ac6653aSJeff Kirsher  *  a proprietary structure used to pass information to the driver.
43057ac6653aSJeff Kirsher  *  @cmd: IOCTL command
43067ac6653aSJeff Kirsher  *  Description:
430732ceabcaSGiuseppe CAVALLARO  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
43087ac6653aSJeff Kirsher  */
43097ac6653aSJeff Kirsher static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
43107ac6653aSJeff Kirsher {
431174371272SJose Abreu 	struct stmmac_priv *priv = netdev_priv (dev);
4312891434b1SRayagond Kokatanur 	int ret = -EOPNOTSUPP;
43137ac6653aSJeff Kirsher 
43147ac6653aSJeff Kirsher 	if (!netif_running(dev))
43157ac6653aSJeff Kirsher 		return -EINVAL;
43167ac6653aSJeff Kirsher 
4317891434b1SRayagond Kokatanur 	switch (cmd) {
4318891434b1SRayagond Kokatanur 	case SIOCGMIIPHY:
4319891434b1SRayagond Kokatanur 	case SIOCGMIIREG:
4320891434b1SRayagond Kokatanur 	case SIOCSMIIREG:
432174371272SJose Abreu 		ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
4322891434b1SRayagond Kokatanur 		break;
4323891434b1SRayagond Kokatanur 	case SIOCSHWTSTAMP:
4324d6228b7cSArtem Panfilov 		ret = stmmac_hwtstamp_set(dev, rq);
4325d6228b7cSArtem Panfilov 		break;
4326d6228b7cSArtem Panfilov 	case SIOCGHWTSTAMP:
4327d6228b7cSArtem Panfilov 		ret = stmmac_hwtstamp_get(dev, rq);
4328891434b1SRayagond Kokatanur 		break;
4329891434b1SRayagond Kokatanur 	default:
4330891434b1SRayagond Kokatanur 		break;
4331891434b1SRayagond Kokatanur 	}
43327ac6653aSJeff Kirsher 
43337ac6653aSJeff Kirsher 	return ret;
43347ac6653aSJeff Kirsher }
43357ac6653aSJeff Kirsher 
43364dbbe8ddSJose Abreu static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
43374dbbe8ddSJose Abreu 				    void *cb_priv)
43384dbbe8ddSJose Abreu {
43394dbbe8ddSJose Abreu 	struct stmmac_priv *priv = cb_priv;
43404dbbe8ddSJose Abreu 	int ret = -EOPNOTSUPP;
43414dbbe8ddSJose Abreu 
4342425eabddSJose Abreu 	if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
4343425eabddSJose Abreu 		return ret;
4344425eabddSJose Abreu 
43454dbbe8ddSJose Abreu 	stmmac_disable_all_queues(priv);
43464dbbe8ddSJose Abreu 
43474dbbe8ddSJose Abreu 	switch (type) {
43484dbbe8ddSJose Abreu 	case TC_SETUP_CLSU32:
43494dbbe8ddSJose Abreu 		ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
43504dbbe8ddSJose Abreu 		break;
4351425eabddSJose Abreu 	case TC_SETUP_CLSFLOWER:
4352425eabddSJose Abreu 		ret = stmmac_tc_setup_cls(priv, priv, type_data);
4353425eabddSJose Abreu 		break;
43544dbbe8ddSJose Abreu 	default:
43554dbbe8ddSJose Abreu 		break;
43564dbbe8ddSJose Abreu 	}
43574dbbe8ddSJose Abreu 
43584dbbe8ddSJose Abreu 	stmmac_enable_all_queues(priv);
43594dbbe8ddSJose Abreu 	return ret;
43604dbbe8ddSJose Abreu }
43614dbbe8ddSJose Abreu 
4362955bcb6eSPablo Neira Ayuso static LIST_HEAD(stmmac_block_cb_list);
4363955bcb6eSPablo Neira Ayuso 
43644dbbe8ddSJose Abreu static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
43654dbbe8ddSJose Abreu 			   void *type_data)
43664dbbe8ddSJose Abreu {
43674dbbe8ddSJose Abreu 	struct stmmac_priv *priv = netdev_priv(ndev);
43684dbbe8ddSJose Abreu 
43694dbbe8ddSJose Abreu 	switch (type) {
43704dbbe8ddSJose Abreu 	case TC_SETUP_BLOCK:
4371955bcb6eSPablo Neira Ayuso 		return flow_block_cb_setup_simple(type_data,
4372955bcb6eSPablo Neira Ayuso 						  &stmmac_block_cb_list,
43734e95bc26SPablo Neira Ayuso 						  stmmac_setup_tc_block_cb,
43744e95bc26SPablo Neira Ayuso 						  priv, priv, true);
43751f705bc6SJose Abreu 	case TC_SETUP_QDISC_CBS:
43761f705bc6SJose Abreu 		return stmmac_tc_setup_cbs(priv, priv, type_data);
4377b60189e0SJose Abreu 	case TC_SETUP_QDISC_TAPRIO:
4378b60189e0SJose Abreu 		return stmmac_tc_setup_taprio(priv, priv, type_data);
4379430b383cSJose Abreu 	case TC_SETUP_QDISC_ETF:
4380430b383cSJose Abreu 		return stmmac_tc_setup_etf(priv, priv, type_data);
43814dbbe8ddSJose Abreu 	default:
43824dbbe8ddSJose Abreu 		return -EOPNOTSUPP;
43834dbbe8ddSJose Abreu 	}
43844dbbe8ddSJose Abreu }
43854dbbe8ddSJose Abreu 
43864993e5b3SJose Abreu static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
43874993e5b3SJose Abreu 			       struct net_device *sb_dev)
43884993e5b3SJose Abreu {
4389b7766206SJose Abreu 	int gso = skb_shinfo(skb)->gso_type;
4390b7766206SJose Abreu 
4391b7766206SJose Abreu 	if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
43924993e5b3SJose Abreu 		/*
4393b7766206SJose Abreu 		 * There is no way to determine the number of TSO/USO
43944993e5b3SJose Abreu 		 * capable Queues. Let's use always the Queue 0
4395b7766206SJose Abreu 		 * because if TSO/USO is supported then at least this
43964993e5b3SJose Abreu 		 * one will be capable.
43974993e5b3SJose Abreu 		 */
43984993e5b3SJose Abreu 		return 0;
43994993e5b3SJose Abreu 	}
44004993e5b3SJose Abreu 
44014993e5b3SJose Abreu 	return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
44024993e5b3SJose Abreu }
44034993e5b3SJose Abreu 
4404a830405eSBhadram Varka static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
4405a830405eSBhadram Varka {
4406a830405eSBhadram Varka 	struct stmmac_priv *priv = netdev_priv(ndev);
4407a830405eSBhadram Varka 	int ret = 0;
4408a830405eSBhadram Varka 
4409a830405eSBhadram Varka 	ret = eth_mac_addr(ndev, addr);
4410a830405eSBhadram Varka 	if (ret)
4411a830405eSBhadram Varka 		return ret;
4412a830405eSBhadram Varka 
4413c10d4c82SJose Abreu 	stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
4414a830405eSBhadram Varka 
4415a830405eSBhadram Varka 	return ret;
4416a830405eSBhadram Varka }
4417a830405eSBhadram Varka 
441850fb4f74SGiuseppe CAVALLARO #ifdef CONFIG_DEBUG_FS
44197ac29055SGiuseppe CAVALLARO static struct dentry *stmmac_fs_dir;
44207ac29055SGiuseppe CAVALLARO 
4421c24602efSGiuseppe CAVALLARO static void sysfs_display_ring(void *head, int size, int extend_desc,
4422bfaf91caSJoakim Zhang 			       struct seq_file *seq, dma_addr_t dma_phy_addr)
44237ac29055SGiuseppe CAVALLARO {
44247ac29055SGiuseppe CAVALLARO 	int i;
4425c24602efSGiuseppe CAVALLARO 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
4426c24602efSGiuseppe CAVALLARO 	struct dma_desc *p = (struct dma_desc *)head;
4427bfaf91caSJoakim Zhang 	dma_addr_t dma_addr;
44287ac29055SGiuseppe CAVALLARO 
4429c24602efSGiuseppe CAVALLARO 	for (i = 0; i < size; i++) {
4430c24602efSGiuseppe CAVALLARO 		if (extend_desc) {
4431bfaf91caSJoakim Zhang 			dma_addr = dma_phy_addr + i * sizeof(*ep);
4432bfaf91caSJoakim Zhang 			seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
4433bfaf91caSJoakim Zhang 				   i, &dma_addr,
4434f8be0d78SMichael Weiser 				   le32_to_cpu(ep->basic.des0),
4435f8be0d78SMichael Weiser 				   le32_to_cpu(ep->basic.des1),
4436f8be0d78SMichael Weiser 				   le32_to_cpu(ep->basic.des2),
4437f8be0d78SMichael Weiser 				   le32_to_cpu(ep->basic.des3));
4438c24602efSGiuseppe CAVALLARO 			ep++;
4439c24602efSGiuseppe CAVALLARO 		} else {
4440bfaf91caSJoakim Zhang 			dma_addr = dma_phy_addr + i * sizeof(*p);
4441bfaf91caSJoakim Zhang 			seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
4442bfaf91caSJoakim Zhang 				   i, &dma_addr,
4443f8be0d78SMichael Weiser 				   le32_to_cpu(p->des0), le32_to_cpu(p->des1),
4444f8be0d78SMichael Weiser 				   le32_to_cpu(p->des2), le32_to_cpu(p->des3));
4445c24602efSGiuseppe CAVALLARO 			p++;
4446c24602efSGiuseppe CAVALLARO 		}
44477ac29055SGiuseppe CAVALLARO 		seq_printf(seq, "\n");
44487ac29055SGiuseppe CAVALLARO 	}
4449c24602efSGiuseppe CAVALLARO }
44507ac29055SGiuseppe CAVALLARO 
4451fb0d9c63SYangtao Li static int stmmac_rings_status_show(struct seq_file *seq, void *v)
4452c24602efSGiuseppe CAVALLARO {
4453c24602efSGiuseppe CAVALLARO 	struct net_device *dev = seq->private;
4454c24602efSGiuseppe CAVALLARO 	struct stmmac_priv *priv = netdev_priv(dev);
445554139cf3SJoao Pinto 	u32 rx_count = priv->plat->rx_queues_to_use;
4456ce736788SJoao Pinto 	u32 tx_count = priv->plat->tx_queues_to_use;
445754139cf3SJoao Pinto 	u32 queue;
445854139cf3SJoao Pinto 
44595f2b8b62SThierry Reding 	if ((dev->flags & IFF_UP) == 0)
44605f2b8b62SThierry Reding 		return 0;
44615f2b8b62SThierry Reding 
446254139cf3SJoao Pinto 	for (queue = 0; queue < rx_count; queue++) {
446354139cf3SJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
446454139cf3SJoao Pinto 
446554139cf3SJoao Pinto 		seq_printf(seq, "RX Queue %d:\n", queue);
44667ac29055SGiuseppe CAVALLARO 
4467c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc) {
446854139cf3SJoao Pinto 			seq_printf(seq, "Extended descriptor ring:\n");
446954139cf3SJoao Pinto 			sysfs_display_ring((void *)rx_q->dma_erx,
4470bfaf91caSJoakim Zhang 					   priv->dma_rx_size, 1, seq, rx_q->dma_rx_phy);
447154139cf3SJoao Pinto 		} else {
447254139cf3SJoao Pinto 			seq_printf(seq, "Descriptor ring:\n");
447354139cf3SJoao Pinto 			sysfs_display_ring((void *)rx_q->dma_rx,
4474bfaf91caSJoakim Zhang 					   priv->dma_rx_size, 0, seq, rx_q->dma_rx_phy);
447554139cf3SJoao Pinto 		}
447654139cf3SJoao Pinto 	}
447754139cf3SJoao Pinto 
4478ce736788SJoao Pinto 	for (queue = 0; queue < tx_count; queue++) {
4479ce736788SJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
4480ce736788SJoao Pinto 
4481ce736788SJoao Pinto 		seq_printf(seq, "TX Queue %d:\n", queue);
4482ce736788SJoao Pinto 
448354139cf3SJoao Pinto 		if (priv->extend_desc) {
4484ce736788SJoao Pinto 			seq_printf(seq, "Extended descriptor ring:\n");
4485ce736788SJoao Pinto 			sysfs_display_ring((void *)tx_q->dma_etx,
4486bfaf91caSJoakim Zhang 					   priv->dma_tx_size, 1, seq, tx_q->dma_tx_phy);
4487579a25a8SJose Abreu 		} else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
4488ce736788SJoao Pinto 			seq_printf(seq, "Descriptor ring:\n");
4489ce736788SJoao Pinto 			sysfs_display_ring((void *)tx_q->dma_tx,
4490bfaf91caSJoakim Zhang 					   priv->dma_tx_size, 0, seq, tx_q->dma_tx_phy);
4491ce736788SJoao Pinto 		}
44927ac29055SGiuseppe CAVALLARO 	}
44937ac29055SGiuseppe CAVALLARO 
44947ac29055SGiuseppe CAVALLARO 	return 0;
44957ac29055SGiuseppe CAVALLARO }
4496fb0d9c63SYangtao Li DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
44977ac29055SGiuseppe CAVALLARO 
4498fb0d9c63SYangtao Li static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
4499e7434821SGiuseppe CAVALLARO {
4500e7434821SGiuseppe CAVALLARO 	struct net_device *dev = seq->private;
4501e7434821SGiuseppe CAVALLARO 	struct stmmac_priv *priv = netdev_priv(dev);
4502e7434821SGiuseppe CAVALLARO 
450319e30c14SGiuseppe CAVALLARO 	if (!priv->hw_cap_support) {
4504e7434821SGiuseppe CAVALLARO 		seq_printf(seq, "DMA HW features not supported\n");
4505e7434821SGiuseppe CAVALLARO 		return 0;
4506e7434821SGiuseppe CAVALLARO 	}
4507e7434821SGiuseppe CAVALLARO 
4508e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "==============================\n");
4509e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tDMA HW features\n");
4510e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "==============================\n");
4511e7434821SGiuseppe CAVALLARO 
451222d3efe5SPavel Machek 	seq_printf(seq, "\t10/100 Mbps: %s\n",
4513e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
451422d3efe5SPavel Machek 	seq_printf(seq, "\t1000 Mbps: %s\n",
4515e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
451622d3efe5SPavel Machek 	seq_printf(seq, "\tHalf duplex: %s\n",
4517e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.half_duplex) ? "Y" : "N");
4518e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tHash Filter: %s\n",
4519e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.hash_filter) ? "Y" : "N");
4520e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tMultiple MAC address registers: %s\n",
4521e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.multi_addr) ? "Y" : "N");
45228d45e42bSLABBE Corentin 	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
4523e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.pcs) ? "Y" : "N");
4524e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
4525e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
4526e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tPMT Remote wake up: %s\n",
4527e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
4528e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tPMT Magic Frame: %s\n",
4529e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
4530e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tRMON module: %s\n",
4531e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.rmon) ? "Y" : "N");
4532e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
4533e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.time_stamp) ? "Y" : "N");
4534e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
4535e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
453622d3efe5SPavel Machek 	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
4537e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.eee) ? "Y" : "N");
4538e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
4539e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
4540e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.tx_coe) ? "Y" : "N");
4541f748be53SAlexandre TORGUE 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
4542f748be53SAlexandre TORGUE 		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
4543f748be53SAlexandre TORGUE 			   (priv->dma_cap.rx_coe) ? "Y" : "N");
4544f748be53SAlexandre TORGUE 	} else {
4545e7434821SGiuseppe CAVALLARO 		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
4546e7434821SGiuseppe CAVALLARO 			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
4547e7434821SGiuseppe CAVALLARO 		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
4548e7434821SGiuseppe CAVALLARO 			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
4549f748be53SAlexandre TORGUE 	}
4550e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
4551e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
4552e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
4553e7434821SGiuseppe CAVALLARO 		   priv->dma_cap.number_rx_channel);
4554e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
4555e7434821SGiuseppe CAVALLARO 		   priv->dma_cap.number_tx_channel);
45567d0b447aSJose Abreu 	seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
45577d0b447aSJose Abreu 		   priv->dma_cap.number_rx_queues);
45587d0b447aSJose Abreu 	seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
45597d0b447aSJose Abreu 		   priv->dma_cap.number_tx_queues);
4560e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tEnhanced descriptors: %s\n",
4561e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.enh_desc) ? "Y" : "N");
45627d0b447aSJose Abreu 	seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
45637d0b447aSJose Abreu 	seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
45647d0b447aSJose Abreu 	seq_printf(seq, "\tHash Table Size: %d\n", priv->dma_cap.hash_tb_sz);
45657d0b447aSJose Abreu 	seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
45667d0b447aSJose Abreu 	seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
45677d0b447aSJose Abreu 		   priv->dma_cap.pps_out_num);
45687d0b447aSJose Abreu 	seq_printf(seq, "\tSafety Features: %s\n",
45697d0b447aSJose Abreu 		   priv->dma_cap.asp ? "Y" : "N");
45707d0b447aSJose Abreu 	seq_printf(seq, "\tFlexible RX Parser: %s\n",
45717d0b447aSJose Abreu 		   priv->dma_cap.frpsel ? "Y" : "N");
45727d0b447aSJose Abreu 	seq_printf(seq, "\tEnhanced Addressing: %d\n",
45737d0b447aSJose Abreu 		   priv->dma_cap.addr64);
45747d0b447aSJose Abreu 	seq_printf(seq, "\tReceive Side Scaling: %s\n",
45757d0b447aSJose Abreu 		   priv->dma_cap.rssen ? "Y" : "N");
45767d0b447aSJose Abreu 	seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
45777d0b447aSJose Abreu 		   priv->dma_cap.vlhash ? "Y" : "N");
45787d0b447aSJose Abreu 	seq_printf(seq, "\tSplit Header: %s\n",
45797d0b447aSJose Abreu 		   priv->dma_cap.sphen ? "Y" : "N");
45807d0b447aSJose Abreu 	seq_printf(seq, "\tVLAN TX Insertion: %s\n",
45817d0b447aSJose Abreu 		   priv->dma_cap.vlins ? "Y" : "N");
45827d0b447aSJose Abreu 	seq_printf(seq, "\tDouble VLAN: %s\n",
45837d0b447aSJose Abreu 		   priv->dma_cap.dvlan ? "Y" : "N");
45847d0b447aSJose Abreu 	seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
45857d0b447aSJose Abreu 		   priv->dma_cap.l3l4fnum);
45867d0b447aSJose Abreu 	seq_printf(seq, "\tARP Offloading: %s\n",
45877d0b447aSJose Abreu 		   priv->dma_cap.arpoffsel ? "Y" : "N");
458844e65475SJose Abreu 	seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
458944e65475SJose Abreu 		   priv->dma_cap.estsel ? "Y" : "N");
459044e65475SJose Abreu 	seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
459144e65475SJose Abreu 		   priv->dma_cap.fpesel ? "Y" : "N");
459244e65475SJose Abreu 	seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
459344e65475SJose Abreu 		   priv->dma_cap.tbssel ? "Y" : "N");
4594e7434821SGiuseppe CAVALLARO 	return 0;
4595e7434821SGiuseppe CAVALLARO }
4596fb0d9c63SYangtao Li DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
4597e7434821SGiuseppe CAVALLARO 
4598481a7d15SJiping Ma /* Use network device events to rename debugfs file entries.
4599481a7d15SJiping Ma  */
4600481a7d15SJiping Ma static int stmmac_device_event(struct notifier_block *unused,
4601481a7d15SJiping Ma 			       unsigned long event, void *ptr)
4602481a7d15SJiping Ma {
4603481a7d15SJiping Ma 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4604481a7d15SJiping Ma 	struct stmmac_priv *priv = netdev_priv(dev);
4605481a7d15SJiping Ma 
4606481a7d15SJiping Ma 	if (dev->netdev_ops != &stmmac_netdev_ops)
4607481a7d15SJiping Ma 		goto done;
4608481a7d15SJiping Ma 
4609481a7d15SJiping Ma 	switch (event) {
4610481a7d15SJiping Ma 	case NETDEV_CHANGENAME:
4611481a7d15SJiping Ma 		if (priv->dbgfs_dir)
4612481a7d15SJiping Ma 			priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
4613481a7d15SJiping Ma 							 priv->dbgfs_dir,
4614481a7d15SJiping Ma 							 stmmac_fs_dir,
4615481a7d15SJiping Ma 							 dev->name);
4616481a7d15SJiping Ma 		break;
4617481a7d15SJiping Ma 	}
4618481a7d15SJiping Ma done:
4619481a7d15SJiping Ma 	return NOTIFY_DONE;
4620481a7d15SJiping Ma }
4621481a7d15SJiping Ma 
4622481a7d15SJiping Ma static struct notifier_block stmmac_notifier = {
4623481a7d15SJiping Ma 	.notifier_call = stmmac_device_event,
4624481a7d15SJiping Ma };
4625481a7d15SJiping Ma 
46268d72ab11SGreg Kroah-Hartman static void stmmac_init_fs(struct net_device *dev)
46277ac29055SGiuseppe CAVALLARO {
4628466c5ac8SMathieu Olivari 	struct stmmac_priv *priv = netdev_priv(dev);
46297ac29055SGiuseppe CAVALLARO 
4630474a31e1SAaro Koskinen 	rtnl_lock();
4631474a31e1SAaro Koskinen 
4632466c5ac8SMathieu Olivari 	/* Create per netdev entries */
4633466c5ac8SMathieu Olivari 	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
4634466c5ac8SMathieu Olivari 
46357ac29055SGiuseppe CAVALLARO 	/* Entry to report DMA RX/TX rings */
46368d72ab11SGreg Kroah-Hartman 	debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
46377ac29055SGiuseppe CAVALLARO 			    &stmmac_rings_status_fops);
46387ac29055SGiuseppe CAVALLARO 
4639e7434821SGiuseppe CAVALLARO 	/* Entry to report the DMA HW features */
46408d72ab11SGreg Kroah-Hartman 	debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
46418d72ab11SGreg Kroah-Hartman 			    &stmmac_dma_cap_fops);
4642481a7d15SJiping Ma 
4643474a31e1SAaro Koskinen 	rtnl_unlock();
46447ac29055SGiuseppe CAVALLARO }
46457ac29055SGiuseppe CAVALLARO 
4646466c5ac8SMathieu Olivari static void stmmac_exit_fs(struct net_device *dev)
46477ac29055SGiuseppe CAVALLARO {
4648466c5ac8SMathieu Olivari 	struct stmmac_priv *priv = netdev_priv(dev);
4649466c5ac8SMathieu Olivari 
4650466c5ac8SMathieu Olivari 	debugfs_remove_recursive(priv->dbgfs_dir);
46517ac29055SGiuseppe CAVALLARO }
465250fb4f74SGiuseppe CAVALLARO #endif /* CONFIG_DEBUG_FS */
46537ac29055SGiuseppe CAVALLARO 
46543cd1cfcbSJose Abreu static u32 stmmac_vid_crc32_le(__le16 vid_le)
46553cd1cfcbSJose Abreu {
46563cd1cfcbSJose Abreu 	unsigned char *data = (unsigned char *)&vid_le;
46573cd1cfcbSJose Abreu 	unsigned char data_byte = 0;
46583cd1cfcbSJose Abreu 	u32 crc = ~0x0;
46593cd1cfcbSJose Abreu 	u32 temp = 0;
46603cd1cfcbSJose Abreu 	int i, bits;
46613cd1cfcbSJose Abreu 
46623cd1cfcbSJose Abreu 	bits = get_bitmask_order(VLAN_VID_MASK);
46633cd1cfcbSJose Abreu 	for (i = 0; i < bits; i++) {
46643cd1cfcbSJose Abreu 		if ((i % 8) == 0)
46653cd1cfcbSJose Abreu 			data_byte = data[i / 8];
46663cd1cfcbSJose Abreu 
46673cd1cfcbSJose Abreu 		temp = ((crc & 1) ^ data_byte) & 1;
46683cd1cfcbSJose Abreu 		crc >>= 1;
46693cd1cfcbSJose Abreu 		data_byte >>= 1;
46703cd1cfcbSJose Abreu 
46713cd1cfcbSJose Abreu 		if (temp)
46723cd1cfcbSJose Abreu 			crc ^= 0xedb88320;
46733cd1cfcbSJose Abreu 	}
46743cd1cfcbSJose Abreu 
46753cd1cfcbSJose Abreu 	return crc;
46763cd1cfcbSJose Abreu }
46773cd1cfcbSJose Abreu 
46783cd1cfcbSJose Abreu static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
46793cd1cfcbSJose Abreu {
46803cd1cfcbSJose Abreu 	u32 crc, hash = 0;
4681a24cae70SJose Abreu 	__le16 pmatch = 0;
4682c7ab0b80SJose Abreu 	int count = 0;
4683c7ab0b80SJose Abreu 	u16 vid = 0;
46843cd1cfcbSJose Abreu 
46853cd1cfcbSJose Abreu 	for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
46863cd1cfcbSJose Abreu 		__le16 vid_le = cpu_to_le16(vid);
46873cd1cfcbSJose Abreu 		crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
46883cd1cfcbSJose Abreu 		hash |= (1 << crc);
4689c7ab0b80SJose Abreu 		count++;
46903cd1cfcbSJose Abreu 	}
46913cd1cfcbSJose Abreu 
4692c7ab0b80SJose Abreu 	if (!priv->dma_cap.vlhash) {
4693c7ab0b80SJose Abreu 		if (count > 2) /* VID = 0 always passes filter */
4694c7ab0b80SJose Abreu 			return -EOPNOTSUPP;
4695c7ab0b80SJose Abreu 
4696a24cae70SJose Abreu 		pmatch = cpu_to_le16(vid);
4697c7ab0b80SJose Abreu 		hash = 0;
4698c7ab0b80SJose Abreu 	}
4699c7ab0b80SJose Abreu 
4700a24cae70SJose Abreu 	return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
47013cd1cfcbSJose Abreu }
47023cd1cfcbSJose Abreu 
47033cd1cfcbSJose Abreu static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
47043cd1cfcbSJose Abreu {
47053cd1cfcbSJose Abreu 	struct stmmac_priv *priv = netdev_priv(ndev);
47063cd1cfcbSJose Abreu 	bool is_double = false;
47073cd1cfcbSJose Abreu 	int ret;
47083cd1cfcbSJose Abreu 
47093cd1cfcbSJose Abreu 	if (be16_to_cpu(proto) == ETH_P_8021AD)
47103cd1cfcbSJose Abreu 		is_double = true;
47113cd1cfcbSJose Abreu 
47123cd1cfcbSJose Abreu 	set_bit(vid, priv->active_vlans);
47133cd1cfcbSJose Abreu 	ret = stmmac_vlan_update(priv, is_double);
47143cd1cfcbSJose Abreu 	if (ret) {
47153cd1cfcbSJose Abreu 		clear_bit(vid, priv->active_vlans);
47163cd1cfcbSJose Abreu 		return ret;
47173cd1cfcbSJose Abreu 	}
47183cd1cfcbSJose Abreu 
4719dd6a4998SJose Abreu 	if (priv->hw->num_vlan) {
4720ed64639bSWong Vee Khee 		ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
4721dd6a4998SJose Abreu 		if (ret)
47223cd1cfcbSJose Abreu 			return ret;
47233cd1cfcbSJose Abreu 	}
47243cd1cfcbSJose Abreu 
4725dd6a4998SJose Abreu 	return 0;
4726dd6a4998SJose Abreu }
4727dd6a4998SJose Abreu 
47283cd1cfcbSJose Abreu static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
47293cd1cfcbSJose Abreu {
47303cd1cfcbSJose Abreu 	struct stmmac_priv *priv = netdev_priv(ndev);
47313cd1cfcbSJose Abreu 	bool is_double = false;
4732ed64639bSWong Vee Khee 	int ret;
47333cd1cfcbSJose Abreu 
47343cd1cfcbSJose Abreu 	if (be16_to_cpu(proto) == ETH_P_8021AD)
47353cd1cfcbSJose Abreu 		is_double = true;
47363cd1cfcbSJose Abreu 
47373cd1cfcbSJose Abreu 	clear_bit(vid, priv->active_vlans);
4738dd6a4998SJose Abreu 
4739dd6a4998SJose Abreu 	if (priv->hw->num_vlan) {
4740ed64639bSWong Vee Khee 		ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
4741ed64639bSWong Vee Khee 		if (ret)
4742ed64639bSWong Vee Khee 			return ret;
4743dd6a4998SJose Abreu 	}
4744ed64639bSWong Vee Khee 
47453cd1cfcbSJose Abreu 	return stmmac_vlan_update(priv, is_double);
47463cd1cfcbSJose Abreu }
47473cd1cfcbSJose Abreu 
47487ac6653aSJeff Kirsher static const struct net_device_ops stmmac_netdev_ops = {
47497ac6653aSJeff Kirsher 	.ndo_open = stmmac_open,
47507ac6653aSJeff Kirsher 	.ndo_start_xmit = stmmac_xmit,
47517ac6653aSJeff Kirsher 	.ndo_stop = stmmac_release,
47527ac6653aSJeff Kirsher 	.ndo_change_mtu = stmmac_change_mtu,
47537ac6653aSJeff Kirsher 	.ndo_fix_features = stmmac_fix_features,
4754d2afb5bdSGiuseppe CAVALLARO 	.ndo_set_features = stmmac_set_features,
475501789349SJiri Pirko 	.ndo_set_rx_mode = stmmac_set_rx_mode,
47567ac6653aSJeff Kirsher 	.ndo_tx_timeout = stmmac_tx_timeout,
47577ac6653aSJeff Kirsher 	.ndo_do_ioctl = stmmac_ioctl,
47584dbbe8ddSJose Abreu 	.ndo_setup_tc = stmmac_setup_tc,
47594993e5b3SJose Abreu 	.ndo_select_queue = stmmac_select_queue,
47607ac6653aSJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER
47617ac6653aSJeff Kirsher 	.ndo_poll_controller = stmmac_poll_controller,
47627ac6653aSJeff Kirsher #endif
4763a830405eSBhadram Varka 	.ndo_set_mac_address = stmmac_set_mac_address,
47643cd1cfcbSJose Abreu 	.ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
47653cd1cfcbSJose Abreu 	.ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
47667ac6653aSJeff Kirsher };
47677ac6653aSJeff Kirsher 
476834877a15SJose Abreu static void stmmac_reset_subtask(struct stmmac_priv *priv)
476934877a15SJose Abreu {
477034877a15SJose Abreu 	if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
477134877a15SJose Abreu 		return;
477234877a15SJose Abreu 	if (test_bit(STMMAC_DOWN, &priv->state))
477334877a15SJose Abreu 		return;
477434877a15SJose Abreu 
477534877a15SJose Abreu 	netdev_err(priv->dev, "Reset adapter.\n");
477634877a15SJose Abreu 
477734877a15SJose Abreu 	rtnl_lock();
477834877a15SJose Abreu 	netif_trans_update(priv->dev);
477934877a15SJose Abreu 	while (test_and_set_bit(STMMAC_RESETING, &priv->state))
478034877a15SJose Abreu 		usleep_range(1000, 2000);
478134877a15SJose Abreu 
478234877a15SJose Abreu 	set_bit(STMMAC_DOWN, &priv->state);
478334877a15SJose Abreu 	dev_close(priv->dev);
478400f54e68SPetr Machata 	dev_open(priv->dev, NULL);
478534877a15SJose Abreu 	clear_bit(STMMAC_DOWN, &priv->state);
478634877a15SJose Abreu 	clear_bit(STMMAC_RESETING, &priv->state);
478734877a15SJose Abreu 	rtnl_unlock();
478834877a15SJose Abreu }
478934877a15SJose Abreu 
479034877a15SJose Abreu static void stmmac_service_task(struct work_struct *work)
479134877a15SJose Abreu {
479234877a15SJose Abreu 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
479334877a15SJose Abreu 			service_task);
479434877a15SJose Abreu 
479534877a15SJose Abreu 	stmmac_reset_subtask(priv);
479634877a15SJose Abreu 	clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
479734877a15SJose Abreu }
479834877a15SJose Abreu 
47997ac6653aSJeff Kirsher /**
4800cf3f047bSGiuseppe CAVALLARO  *  stmmac_hw_init - Init the MAC device
480132ceabcaSGiuseppe CAVALLARO  *  @priv: driver private structure
4802732fdf0eSGiuseppe CAVALLARO  *  Description: this function is to configure the MAC device according to
4803732fdf0eSGiuseppe CAVALLARO  *  some platform parameters or the HW capability register. It prepares the
4804732fdf0eSGiuseppe CAVALLARO  *  driver to use either ring or chain modes and to setup either enhanced or
4805732fdf0eSGiuseppe CAVALLARO  *  normal descriptors.
4806cf3f047bSGiuseppe CAVALLARO  */
4807cf3f047bSGiuseppe CAVALLARO static int stmmac_hw_init(struct stmmac_priv *priv)
4808cf3f047bSGiuseppe CAVALLARO {
48095f0456b4SJose Abreu 	int ret;
4810cf3f047bSGiuseppe CAVALLARO 
48119f93ac8dSLABBE Corentin 	/* dwmac-sun8i only work in chain mode */
48129f93ac8dSLABBE Corentin 	if (priv->plat->has_sun8i)
48139f93ac8dSLABBE Corentin 		chain_mode = 1;
48145f0456b4SJose Abreu 	priv->chain_mode = chain_mode;
48159f93ac8dSLABBE Corentin 
48165f0456b4SJose Abreu 	/* Initialize HW Interface */
48175f0456b4SJose Abreu 	ret = stmmac_hwif_init(priv);
48185f0456b4SJose Abreu 	if (ret)
48195f0456b4SJose Abreu 		return ret;
48204a7d666aSGiuseppe CAVALLARO 
4821cf3f047bSGiuseppe CAVALLARO 	/* Get the HW capability (new GMAC newer than 3.50a) */
4822cf3f047bSGiuseppe CAVALLARO 	priv->hw_cap_support = stmmac_get_hw_features(priv);
4823cf3f047bSGiuseppe CAVALLARO 	if (priv->hw_cap_support) {
482438ddc59dSLABBE Corentin 		dev_info(priv->device, "DMA HW capability register supported\n");
4825cf3f047bSGiuseppe CAVALLARO 
4826cf3f047bSGiuseppe CAVALLARO 		/* We can override some gmac/dma configuration fields: e.g.
4827cf3f047bSGiuseppe CAVALLARO 		 * enh_desc, tx_coe (e.g. that are passed through the
4828cf3f047bSGiuseppe CAVALLARO 		 * platform) with the values from the HW capability
4829cf3f047bSGiuseppe CAVALLARO 		 * register (if supported).
4830cf3f047bSGiuseppe CAVALLARO 		 */
4831cf3f047bSGiuseppe CAVALLARO 		priv->plat->enh_desc = priv->dma_cap.enh_desc;
4832cf3f047bSGiuseppe CAVALLARO 		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
48333fe5cadbSGiuseppe CAVALLARO 		priv->hw->pmt = priv->plat->pmt;
4834b8ef7020SBiao Huang 		if (priv->dma_cap.hash_tb_sz) {
4835b8ef7020SBiao Huang 			priv->hw->multicast_filter_bins =
4836b8ef7020SBiao Huang 					(BIT(priv->dma_cap.hash_tb_sz) << 5);
4837b8ef7020SBiao Huang 			priv->hw->mcast_bits_log2 =
4838b8ef7020SBiao Huang 					ilog2(priv->hw->multicast_filter_bins);
4839b8ef7020SBiao Huang 		}
484038912bdbSDeepak SIKRI 
4841a8df35d4SEzequiel Garcia 		/* TXCOE doesn't work in thresh DMA mode */
4842a8df35d4SEzequiel Garcia 		if (priv->plat->force_thresh_dma_mode)
4843a8df35d4SEzequiel Garcia 			priv->plat->tx_coe = 0;
4844a8df35d4SEzequiel Garcia 		else
484538912bdbSDeepak SIKRI 			priv->plat->tx_coe = priv->dma_cap.tx_coe;
4846a8df35d4SEzequiel Garcia 
4847f748be53SAlexandre TORGUE 		/* In case of GMAC4 rx_coe is from HW cap register. */
4848f748be53SAlexandre TORGUE 		priv->plat->rx_coe = priv->dma_cap.rx_coe;
484938912bdbSDeepak SIKRI 
485038912bdbSDeepak SIKRI 		if (priv->dma_cap.rx_coe_type2)
485138912bdbSDeepak SIKRI 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
485238912bdbSDeepak SIKRI 		else if (priv->dma_cap.rx_coe_type1)
485338912bdbSDeepak SIKRI 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
485438912bdbSDeepak SIKRI 
485538ddc59dSLABBE Corentin 	} else {
485638ddc59dSLABBE Corentin 		dev_info(priv->device, "No HW DMA feature register supported\n");
485738ddc59dSLABBE Corentin 	}
4858cf3f047bSGiuseppe CAVALLARO 
4859d2afb5bdSGiuseppe CAVALLARO 	if (priv->plat->rx_coe) {
4860d2afb5bdSGiuseppe CAVALLARO 		priv->hw->rx_csum = priv->plat->rx_coe;
486138ddc59dSLABBE Corentin 		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
4862f748be53SAlexandre TORGUE 		if (priv->synopsys_id < DWMAC_CORE_4_00)
486338ddc59dSLABBE Corentin 			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
4864d2afb5bdSGiuseppe CAVALLARO 	}
4865cf3f047bSGiuseppe CAVALLARO 	if (priv->plat->tx_coe)
486638ddc59dSLABBE Corentin 		dev_info(priv->device, "TX Checksum insertion supported\n");
4867cf3f047bSGiuseppe CAVALLARO 
4868cf3f047bSGiuseppe CAVALLARO 	if (priv->plat->pmt) {
486938ddc59dSLABBE Corentin 		dev_info(priv->device, "Wake-Up On Lan supported\n");
4870cf3f047bSGiuseppe CAVALLARO 		device_set_wakeup_capable(priv->device, 1);
4871cf3f047bSGiuseppe CAVALLARO 	}
4872cf3f047bSGiuseppe CAVALLARO 
4873f748be53SAlexandre TORGUE 	if (priv->dma_cap.tsoen)
487438ddc59dSLABBE Corentin 		dev_info(priv->device, "TSO supported\n");
4875f748be53SAlexandre TORGUE 
4876e0f9956aSChuah, Kim Tatt 	priv->hw->vlan_fail_q_en = priv->plat->vlan_fail_q_en;
4877e0f9956aSChuah, Kim Tatt 	priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
4878e0f9956aSChuah, Kim Tatt 
48797cfde0afSJose Abreu 	/* Run HW quirks, if any */
48807cfde0afSJose Abreu 	if (priv->hwif_quirks) {
48817cfde0afSJose Abreu 		ret = priv->hwif_quirks(priv);
48827cfde0afSJose Abreu 		if (ret)
48837cfde0afSJose Abreu 			return ret;
48847cfde0afSJose Abreu 	}
48857cfde0afSJose Abreu 
48863b509466SJose Abreu 	/* Rx Watchdog is available in the COREs newer than the 3.40.
48873b509466SJose Abreu 	 * In some case, for example on bugged HW this feature
48883b509466SJose Abreu 	 * has to be disable and this can be done by passing the
48893b509466SJose Abreu 	 * riwt_off field from the platform.
48903b509466SJose Abreu 	 */
48913b509466SJose Abreu 	if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
48923b509466SJose Abreu 	    (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
48933b509466SJose Abreu 		priv->use_riwt = 1;
48943b509466SJose Abreu 		dev_info(priv->device,
48953b509466SJose Abreu 			 "Enable RX Mitigation via HW Watchdog Timer\n");
48963b509466SJose Abreu 	}
48973b509466SJose Abreu 
4898c24602efSGiuseppe CAVALLARO 	return 0;
4899cf3f047bSGiuseppe CAVALLARO }
4900cf3f047bSGiuseppe CAVALLARO 
49010366f7e0SOng Boon Leong static void stmmac_napi_add(struct net_device *dev)
49020366f7e0SOng Boon Leong {
49030366f7e0SOng Boon Leong 	struct stmmac_priv *priv = netdev_priv(dev);
49040366f7e0SOng Boon Leong 	u32 queue, maxq;
49050366f7e0SOng Boon Leong 
49060366f7e0SOng Boon Leong 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
49070366f7e0SOng Boon Leong 
49080366f7e0SOng Boon Leong 	for (queue = 0; queue < maxq; queue++) {
49090366f7e0SOng Boon Leong 		struct stmmac_channel *ch = &priv->channel[queue];
49100366f7e0SOng Boon Leong 
49110366f7e0SOng Boon Leong 		ch->priv_data = priv;
49120366f7e0SOng Boon Leong 		ch->index = queue;
49132b94f526SMarek Szyprowski 		spin_lock_init(&ch->lock);
49140366f7e0SOng Boon Leong 
49150366f7e0SOng Boon Leong 		if (queue < priv->plat->rx_queues_to_use) {
49160366f7e0SOng Boon Leong 			netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx,
49170366f7e0SOng Boon Leong 				       NAPI_POLL_WEIGHT);
49180366f7e0SOng Boon Leong 		}
49190366f7e0SOng Boon Leong 		if (queue < priv->plat->tx_queues_to_use) {
49200366f7e0SOng Boon Leong 			netif_tx_napi_add(dev, &ch->tx_napi,
49210366f7e0SOng Boon Leong 					  stmmac_napi_poll_tx,
49220366f7e0SOng Boon Leong 					  NAPI_POLL_WEIGHT);
49230366f7e0SOng Boon Leong 		}
49240366f7e0SOng Boon Leong 	}
49250366f7e0SOng Boon Leong }
49260366f7e0SOng Boon Leong 
49270366f7e0SOng Boon Leong static void stmmac_napi_del(struct net_device *dev)
49280366f7e0SOng Boon Leong {
49290366f7e0SOng Boon Leong 	struct stmmac_priv *priv = netdev_priv(dev);
49300366f7e0SOng Boon Leong 	u32 queue, maxq;
49310366f7e0SOng Boon Leong 
49320366f7e0SOng Boon Leong 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
49330366f7e0SOng Boon Leong 
49340366f7e0SOng Boon Leong 	for (queue = 0; queue < maxq; queue++) {
49350366f7e0SOng Boon Leong 		struct stmmac_channel *ch = &priv->channel[queue];
49360366f7e0SOng Boon Leong 
49370366f7e0SOng Boon Leong 		if (queue < priv->plat->rx_queues_to_use)
49380366f7e0SOng Boon Leong 			netif_napi_del(&ch->rx_napi);
49390366f7e0SOng Boon Leong 		if (queue < priv->plat->tx_queues_to_use)
49400366f7e0SOng Boon Leong 			netif_napi_del(&ch->tx_napi);
49410366f7e0SOng Boon Leong 	}
49420366f7e0SOng Boon Leong }
49430366f7e0SOng Boon Leong 
49440366f7e0SOng Boon Leong int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
49450366f7e0SOng Boon Leong {
49460366f7e0SOng Boon Leong 	struct stmmac_priv *priv = netdev_priv(dev);
49470366f7e0SOng Boon Leong 	int ret = 0;
49480366f7e0SOng Boon Leong 
49490366f7e0SOng Boon Leong 	if (netif_running(dev))
49500366f7e0SOng Boon Leong 		stmmac_release(dev);
49510366f7e0SOng Boon Leong 
49520366f7e0SOng Boon Leong 	stmmac_napi_del(dev);
49530366f7e0SOng Boon Leong 
49540366f7e0SOng Boon Leong 	priv->plat->rx_queues_to_use = rx_cnt;
49550366f7e0SOng Boon Leong 	priv->plat->tx_queues_to_use = tx_cnt;
49560366f7e0SOng Boon Leong 
49570366f7e0SOng Boon Leong 	stmmac_napi_add(dev);
49580366f7e0SOng Boon Leong 
49590366f7e0SOng Boon Leong 	if (netif_running(dev))
49600366f7e0SOng Boon Leong 		ret = stmmac_open(dev);
49610366f7e0SOng Boon Leong 
49620366f7e0SOng Boon Leong 	return ret;
49630366f7e0SOng Boon Leong }
49640366f7e0SOng Boon Leong 
4965aa042f60SSong, Yoong Siang int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
4966aa042f60SSong, Yoong Siang {
4967aa042f60SSong, Yoong Siang 	struct stmmac_priv *priv = netdev_priv(dev);
4968aa042f60SSong, Yoong Siang 	int ret = 0;
4969aa042f60SSong, Yoong Siang 
4970aa042f60SSong, Yoong Siang 	if (netif_running(dev))
4971aa042f60SSong, Yoong Siang 		stmmac_release(dev);
4972aa042f60SSong, Yoong Siang 
4973aa042f60SSong, Yoong Siang 	priv->dma_rx_size = rx_size;
4974aa042f60SSong, Yoong Siang 	priv->dma_tx_size = tx_size;
4975aa042f60SSong, Yoong Siang 
4976aa042f60SSong, Yoong Siang 	if (netif_running(dev))
4977aa042f60SSong, Yoong Siang 		ret = stmmac_open(dev);
4978aa042f60SSong, Yoong Siang 
4979aa042f60SSong, Yoong Siang 	return ret;
4980aa042f60SSong, Yoong Siang }
4981aa042f60SSong, Yoong Siang 
4982cf3f047bSGiuseppe CAVALLARO /**
4983bfab27a1SGiuseppe CAVALLARO  * stmmac_dvr_probe
4984bfab27a1SGiuseppe CAVALLARO  * @device: device pointer
4985ff3dd78cSGiuseppe CAVALLARO  * @plat_dat: platform data pointer
4986e56788cfSJoachim Eastwood  * @res: stmmac resource pointer
4987bfab27a1SGiuseppe CAVALLARO  * Description: this is the main probe function used to
4988bfab27a1SGiuseppe CAVALLARO  * call the alloc_etherdev, allocate the priv structure.
49899afec6efSAndy Shevchenko  * Return:
499015ffac73SJoachim Eastwood  * returns 0 on success, otherwise errno.
49917ac6653aSJeff Kirsher  */
499215ffac73SJoachim Eastwood int stmmac_dvr_probe(struct device *device,
4993cf3f047bSGiuseppe CAVALLARO 		     struct plat_stmmacenet_data *plat_dat,
4994e56788cfSJoachim Eastwood 		     struct stmmac_resources *res)
49957ac6653aSJeff Kirsher {
4996bfab27a1SGiuseppe CAVALLARO 	struct net_device *ndev = NULL;
4997bfab27a1SGiuseppe CAVALLARO 	struct stmmac_priv *priv;
49980366f7e0SOng Boon Leong 	u32 rxq;
499976067459SJose Abreu 	int i, ret = 0;
50007ac6653aSJeff Kirsher 
50019737070cSJisheng Zhang 	ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
50029737070cSJisheng Zhang 				       MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
500341de8d4cSJoe Perches 	if (!ndev)
500415ffac73SJoachim Eastwood 		return -ENOMEM;
50057ac6653aSJeff Kirsher 
5006bfab27a1SGiuseppe CAVALLARO 	SET_NETDEV_DEV(ndev, device);
50077ac6653aSJeff Kirsher 
5008bfab27a1SGiuseppe CAVALLARO 	priv = netdev_priv(ndev);
5009bfab27a1SGiuseppe CAVALLARO 	priv->device = device;
5010bfab27a1SGiuseppe CAVALLARO 	priv->dev = ndev;
5011bfab27a1SGiuseppe CAVALLARO 
5012bfab27a1SGiuseppe CAVALLARO 	stmmac_set_ethtool_ops(ndev);
5013cf3f047bSGiuseppe CAVALLARO 	priv->pause = pause;
5014cf3f047bSGiuseppe CAVALLARO 	priv->plat = plat_dat;
5015e56788cfSJoachim Eastwood 	priv->ioaddr = res->addr;
5016e56788cfSJoachim Eastwood 	priv->dev->base_addr = (unsigned long)res->addr;
5017e56788cfSJoachim Eastwood 
5018e56788cfSJoachim Eastwood 	priv->dev->irq = res->irq;
5019e56788cfSJoachim Eastwood 	priv->wol_irq = res->wol_irq;
5020e56788cfSJoachim Eastwood 	priv->lpi_irq = res->lpi_irq;
5021e56788cfSJoachim Eastwood 
5022a51645f7SPetr Štetiar 	if (!IS_ERR_OR_NULL(res->mac))
5023e56788cfSJoachim Eastwood 		memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
5024bfab27a1SGiuseppe CAVALLARO 
5025a7a62685SJoachim Eastwood 	dev_set_drvdata(device, priv->dev);
5026803f8fc4SJoachim Eastwood 
5027cf3f047bSGiuseppe CAVALLARO 	/* Verify driver arguments */
5028cf3f047bSGiuseppe CAVALLARO 	stmmac_verify_args();
5029cf3f047bSGiuseppe CAVALLARO 
503034877a15SJose Abreu 	/* Allocate workqueue */
503134877a15SJose Abreu 	priv->wq = create_singlethread_workqueue("stmmac_wq");
503234877a15SJose Abreu 	if (!priv->wq) {
503334877a15SJose Abreu 		dev_err(priv->device, "failed to create workqueue\n");
50349737070cSJisheng Zhang 		return -ENOMEM;
503534877a15SJose Abreu 	}
503634877a15SJose Abreu 
503734877a15SJose Abreu 	INIT_WORK(&priv->service_task, stmmac_service_task);
503834877a15SJose Abreu 
5039cf3f047bSGiuseppe CAVALLARO 	/* Override with kernel parameters if supplied XXX CRS XXX
5040ceb69499SGiuseppe CAVALLARO 	 * this needs to have multiple instances
5041ceb69499SGiuseppe CAVALLARO 	 */
5042cf3f047bSGiuseppe CAVALLARO 	if ((phyaddr >= 0) && (phyaddr <= 31))
5043cf3f047bSGiuseppe CAVALLARO 		priv->plat->phy_addr = phyaddr;
5044cf3f047bSGiuseppe CAVALLARO 
504590f522a2SEugeniy Paltsev 	if (priv->plat->stmmac_rst) {
504690f522a2SEugeniy Paltsev 		ret = reset_control_assert(priv->plat->stmmac_rst);
5047f573c0b9Sjpinto 		reset_control_deassert(priv->plat->stmmac_rst);
504890f522a2SEugeniy Paltsev 		/* Some reset controllers have only reset callback instead of
504990f522a2SEugeniy Paltsev 		 * assert + deassert callbacks pair.
505090f522a2SEugeniy Paltsev 		 */
505190f522a2SEugeniy Paltsev 		if (ret == -ENOTSUPP)
505290f522a2SEugeniy Paltsev 			reset_control_reset(priv->plat->stmmac_rst);
505390f522a2SEugeniy Paltsev 	}
5054c5e4ddbdSChen-Yu Tsai 
5055cf3f047bSGiuseppe CAVALLARO 	/* Init MAC and get the capabilities */
5056c24602efSGiuseppe CAVALLARO 	ret = stmmac_hw_init(priv);
5057c24602efSGiuseppe CAVALLARO 	if (ret)
505862866e98SChen-Yu Tsai 		goto error_hw_init;
5059cf3f047bSGiuseppe CAVALLARO 
5060b561af36SVinod Koul 	stmmac_check_ether_addr(priv);
5061b561af36SVinod Koul 
5062cf3f047bSGiuseppe CAVALLARO 	ndev->netdev_ops = &stmmac_netdev_ops;
5063cf3f047bSGiuseppe CAVALLARO 
5064cf3f047bSGiuseppe CAVALLARO 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
5065cf3f047bSGiuseppe CAVALLARO 			    NETIF_F_RXCSUM;
5066f748be53SAlexandre TORGUE 
50674dbbe8ddSJose Abreu 	ret = stmmac_tc_init(priv, priv);
50684dbbe8ddSJose Abreu 	if (!ret) {
50694dbbe8ddSJose Abreu 		ndev->hw_features |= NETIF_F_HW_TC;
50704dbbe8ddSJose Abreu 	}
50714dbbe8ddSJose Abreu 
5072f748be53SAlexandre TORGUE 	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
50739edfa7daSNiklas Cassel 		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
5074b7766206SJose Abreu 		if (priv->plat->has_gmac4)
5075b7766206SJose Abreu 			ndev->hw_features |= NETIF_F_GSO_UDP_L4;
5076f748be53SAlexandre TORGUE 		priv->tso = true;
507738ddc59dSLABBE Corentin 		dev_info(priv->device, "TSO feature enabled\n");
5078f748be53SAlexandre TORGUE 	}
5079a993db88SJose Abreu 
508067afd6d1SJose Abreu 	if (priv->dma_cap.sphen) {
508167afd6d1SJose Abreu 		ndev->hw_features |= NETIF_F_GRO;
508267afd6d1SJose Abreu 		priv->sph = true;
508367afd6d1SJose Abreu 		dev_info(priv->device, "SPH feature enabled\n");
508467afd6d1SJose Abreu 	}
508567afd6d1SJose Abreu 
5086f119cc98SFugang Duan 	/* The current IP register MAC_HW_Feature1[ADDR64] only define
5087f119cc98SFugang Duan 	 * 32/40/64 bit width, but some SOC support others like i.MX8MP
5088f119cc98SFugang Duan 	 * support 34 bits but it map to 40 bits width in MAC_HW_Feature1[ADDR64].
5089f119cc98SFugang Duan 	 * So overwrite dma_cap.addr64 according to HW real design.
5090f119cc98SFugang Duan 	 */
5091f119cc98SFugang Duan 	if (priv->plat->addr64)
5092f119cc98SFugang Duan 		priv->dma_cap.addr64 = priv->plat->addr64;
5093f119cc98SFugang Duan 
5094a993db88SJose Abreu 	if (priv->dma_cap.addr64) {
5095a993db88SJose Abreu 		ret = dma_set_mask_and_coherent(device,
5096a993db88SJose Abreu 				DMA_BIT_MASK(priv->dma_cap.addr64));
5097a993db88SJose Abreu 		if (!ret) {
5098a993db88SJose Abreu 			dev_info(priv->device, "Using %d bits DMA width\n",
5099a993db88SJose Abreu 				 priv->dma_cap.addr64);
5100968a2978SThierry Reding 
5101968a2978SThierry Reding 			/*
5102968a2978SThierry Reding 			 * If more than 32 bits can be addressed, make sure to
5103968a2978SThierry Reding 			 * enable enhanced addressing mode.
5104968a2978SThierry Reding 			 */
5105968a2978SThierry Reding 			if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
5106968a2978SThierry Reding 				priv->plat->dma_cfg->eame = true;
5107a993db88SJose Abreu 		} else {
5108a993db88SJose Abreu 			ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
5109a993db88SJose Abreu 			if (ret) {
5110a993db88SJose Abreu 				dev_err(priv->device, "Failed to set DMA Mask\n");
5111a993db88SJose Abreu 				goto error_hw_init;
5112a993db88SJose Abreu 			}
5113a993db88SJose Abreu 
5114a993db88SJose Abreu 			priv->dma_cap.addr64 = 32;
5115a993db88SJose Abreu 		}
5116a993db88SJose Abreu 	}
5117a993db88SJose Abreu 
5118bfab27a1SGiuseppe CAVALLARO 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
5119bfab27a1SGiuseppe CAVALLARO 	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
51207ac6653aSJeff Kirsher #ifdef STMMAC_VLAN_TAG_USED
51217ac6653aSJeff Kirsher 	/* Both mac100 and gmac support receive VLAN tag detection */
5122ab188e8fSElad Nachman 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
51233cd1cfcbSJose Abreu 	if (priv->dma_cap.vlhash) {
51243cd1cfcbSJose Abreu 		ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
51253cd1cfcbSJose Abreu 		ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
51263cd1cfcbSJose Abreu 	}
512730d93227SJose Abreu 	if (priv->dma_cap.vlins) {
512830d93227SJose Abreu 		ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
512930d93227SJose Abreu 		if (priv->dma_cap.dvlan)
513030d93227SJose Abreu 			ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
513130d93227SJose Abreu 	}
51327ac6653aSJeff Kirsher #endif
51337ac6653aSJeff Kirsher 	priv->msg_enable = netif_msg_init(debug, default_msg_level);
51347ac6653aSJeff Kirsher 
513576067459SJose Abreu 	/* Initialize RSS */
513676067459SJose Abreu 	rxq = priv->plat->rx_queues_to_use;
513776067459SJose Abreu 	netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
513876067459SJose Abreu 	for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
513976067459SJose Abreu 		priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
514076067459SJose Abreu 
514176067459SJose Abreu 	if (priv->dma_cap.rssen && priv->plat->rss_en)
514276067459SJose Abreu 		ndev->features |= NETIF_F_RXHASH;
514376067459SJose Abreu 
514444770e11SJarod Wilson 	/* MTU range: 46 - hw-specific max */
514544770e11SJarod Wilson 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
514656bcd591SJose Abreu 	if (priv->plat->has_xgmac)
51477d9e6c5aSJose Abreu 		ndev->max_mtu = XGMAC_JUMBO_LEN;
514856bcd591SJose Abreu 	else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
514956bcd591SJose Abreu 		ndev->max_mtu = JUMBO_LEN;
515044770e11SJarod Wilson 	else
515144770e11SJarod Wilson 		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
5152a2cd64f3SKweh, Hock Leong 	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
5153a2cd64f3SKweh, Hock Leong 	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
5154a2cd64f3SKweh, Hock Leong 	 */
5155a2cd64f3SKweh, Hock Leong 	if ((priv->plat->maxmtu < ndev->max_mtu) &&
5156a2cd64f3SKweh, Hock Leong 	    (priv->plat->maxmtu >= ndev->min_mtu))
515744770e11SJarod Wilson 		ndev->max_mtu = priv->plat->maxmtu;
5158a2cd64f3SKweh, Hock Leong 	else if (priv->plat->maxmtu < ndev->min_mtu)
5159b618ab45SHeiner Kallweit 		dev_warn(priv->device,
5160a2cd64f3SKweh, Hock Leong 			 "%s: warning: maxmtu having invalid value (%d)\n",
5161a2cd64f3SKweh, Hock Leong 			 __func__, priv->plat->maxmtu);
516244770e11SJarod Wilson 
51637ac6653aSJeff Kirsher 	if (flow_ctrl)
51647ac6653aSJeff Kirsher 		priv->flow_ctrl = FLOW_AUTO;	/* RX/TX pause on */
51657ac6653aSJeff Kirsher 
51668fce3331SJose Abreu 	/* Setup channels NAPI */
51670366f7e0SOng Boon Leong 	stmmac_napi_add(ndev);
51687ac6653aSJeff Kirsher 
516929555fa3SThierry Reding 	mutex_init(&priv->lock);
51707ac6653aSJeff Kirsher 
5171cd7201f4SGiuseppe CAVALLARO 	/* If a specific clk_csr value is passed from the platform
5172cd7201f4SGiuseppe CAVALLARO 	 * this means that the CSR Clock Range selection cannot be
5173cd7201f4SGiuseppe CAVALLARO 	 * changed at run-time and it is fixed. Viceversa the driver'll try to
5174cd7201f4SGiuseppe CAVALLARO 	 * set the MDC clock dynamically according to the csr actual
5175cd7201f4SGiuseppe CAVALLARO 	 * clock input.
5176cd7201f4SGiuseppe CAVALLARO 	 */
51775e7f7fc5SBiao Huang 	if (priv->plat->clk_csr >= 0)
5178cd7201f4SGiuseppe CAVALLARO 		priv->clk_csr = priv->plat->clk_csr;
51795e7f7fc5SBiao Huang 	else
51805e7f7fc5SBiao Huang 		stmmac_clk_csr_set(priv);
5181cd7201f4SGiuseppe CAVALLARO 
5182e58bb43fSGiuseppe CAVALLARO 	stmmac_check_pcs_mode(priv);
5183e58bb43fSGiuseppe CAVALLARO 
5184a47b9e15SDejin Zheng 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
51853fe5cadbSGiuseppe CAVALLARO 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
51864bfcbd7aSFrancesco Virlinzi 		/* MDIO bus Registration */
51874bfcbd7aSFrancesco Virlinzi 		ret = stmmac_mdio_register(ndev);
51884bfcbd7aSFrancesco Virlinzi 		if (ret < 0) {
5189b618ab45SHeiner Kallweit 			dev_err(priv->device,
519038ddc59dSLABBE Corentin 				"%s: MDIO bus (id: %d) registration failed",
51914bfcbd7aSFrancesco Virlinzi 				__func__, priv->plat->bus_id);
51926a81c26fSViresh Kumar 			goto error_mdio_register;
51934bfcbd7aSFrancesco Virlinzi 		}
5194e58bb43fSGiuseppe CAVALLARO 	}
51954bfcbd7aSFrancesco Virlinzi 
519674371272SJose Abreu 	ret = stmmac_phy_setup(priv);
519774371272SJose Abreu 	if (ret) {
519874371272SJose Abreu 		netdev_err(ndev, "failed to setup phy (%d)\n", ret);
519974371272SJose Abreu 		goto error_phy_setup;
520074371272SJose Abreu 	}
520174371272SJose Abreu 
520257016590SFlorian Fainelli 	ret = register_netdev(ndev);
5203b2eb09afSFlorian Fainelli 	if (ret) {
5204b618ab45SHeiner Kallweit 		dev_err(priv->device, "%s: ERROR %i registering the device\n",
520557016590SFlorian Fainelli 			__func__, ret);
5206b2eb09afSFlorian Fainelli 		goto error_netdev_register;
5207b2eb09afSFlorian Fainelli 	}
52087ac6653aSJeff Kirsher 
5209b9663b7cSVoon Weifeng 	if (priv->plat->serdes_powerup) {
5210b9663b7cSVoon Weifeng 		ret = priv->plat->serdes_powerup(ndev,
5211b9663b7cSVoon Weifeng 						 priv->plat->bsp_priv);
5212b9663b7cSVoon Weifeng 
5213b9663b7cSVoon Weifeng 		if (ret < 0)
5214801eb050SAndy Shevchenko 			goto error_serdes_powerup;
5215b9663b7cSVoon Weifeng 	}
5216b9663b7cSVoon Weifeng 
52175f2b8b62SThierry Reding #ifdef CONFIG_DEBUG_FS
52188d72ab11SGreg Kroah-Hartman 	stmmac_init_fs(ndev);
52195f2b8b62SThierry Reding #endif
52205f2b8b62SThierry Reding 
522157016590SFlorian Fainelli 	return ret;
52227ac6653aSJeff Kirsher 
5223801eb050SAndy Shevchenko error_serdes_powerup:
5224801eb050SAndy Shevchenko 	unregister_netdev(ndev);
52256a81c26fSViresh Kumar error_netdev_register:
522674371272SJose Abreu 	phylink_destroy(priv->phylink);
522774371272SJose Abreu error_phy_setup:
5228a47b9e15SDejin Zheng 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
5229b2eb09afSFlorian Fainelli 	    priv->hw->pcs != STMMAC_PCS_RTBI)
5230b2eb09afSFlorian Fainelli 		stmmac_mdio_unregister(ndev);
52317ac6653aSJeff Kirsher error_mdio_register:
52320366f7e0SOng Boon Leong 	stmmac_napi_del(ndev);
523362866e98SChen-Yu Tsai error_hw_init:
523434877a15SJose Abreu 	destroy_workqueue(priv->wq);
52357ac6653aSJeff Kirsher 
523615ffac73SJoachim Eastwood 	return ret;
52377ac6653aSJeff Kirsher }
5238b2e2f0c7SAndy Shevchenko EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
52397ac6653aSJeff Kirsher 
52407ac6653aSJeff Kirsher /**
52417ac6653aSJeff Kirsher  * stmmac_dvr_remove
5242f4e7bd81SJoachim Eastwood  * @dev: device pointer
52437ac6653aSJeff Kirsher  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
5244bfab27a1SGiuseppe CAVALLARO  * changes the link status, releases the DMA descriptor rings.
52457ac6653aSJeff Kirsher  */
5246f4e7bd81SJoachim Eastwood int stmmac_dvr_remove(struct device *dev)
52477ac6653aSJeff Kirsher {
5248f4e7bd81SJoachim Eastwood 	struct net_device *ndev = dev_get_drvdata(dev);
52497ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(ndev);
52507ac6653aSJeff Kirsher 
525138ddc59dSLABBE Corentin 	netdev_info(priv->dev, "%s: removing driver", __func__);
52527ac6653aSJeff Kirsher 
5253ae4f0d46SJoao Pinto 	stmmac_stop_all_dma(priv);
5254c10d4c82SJose Abreu 	stmmac_mac_set(priv, priv->ioaddr, false);
52557ac6653aSJeff Kirsher 	netif_carrier_off(ndev);
52567ac6653aSJeff Kirsher 	unregister_netdev(ndev);
52579a7b3950SOng Boon Leong 
52589a7b3950SOng Boon Leong 	/* Serdes power down needs to happen after VLAN filter
52599a7b3950SOng Boon Leong 	 * is deleted that is triggered by unregister_netdev().
52609a7b3950SOng Boon Leong 	 */
52619a7b3950SOng Boon Leong 	if (priv->plat->serdes_powerdown)
52629a7b3950SOng Boon Leong 		priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
52639a7b3950SOng Boon Leong 
5264474a31e1SAaro Koskinen #ifdef CONFIG_DEBUG_FS
5265474a31e1SAaro Koskinen 	stmmac_exit_fs(ndev);
5266474a31e1SAaro Koskinen #endif
526774371272SJose Abreu 	phylink_destroy(priv->phylink);
5268f573c0b9Sjpinto 	if (priv->plat->stmmac_rst)
5269f573c0b9Sjpinto 		reset_control_assert(priv->plat->stmmac_rst);
5270f573c0b9Sjpinto 	clk_disable_unprepare(priv->plat->pclk);
5271f573c0b9Sjpinto 	clk_disable_unprepare(priv->plat->stmmac_clk);
5272a47b9e15SDejin Zheng 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
52733fe5cadbSGiuseppe CAVALLARO 	    priv->hw->pcs != STMMAC_PCS_RTBI)
5274e743471fSBryan O'Donoghue 		stmmac_mdio_unregister(ndev);
527534877a15SJose Abreu 	destroy_workqueue(priv->wq);
527629555fa3SThierry Reding 	mutex_destroy(&priv->lock);
52777ac6653aSJeff Kirsher 
52787ac6653aSJeff Kirsher 	return 0;
52797ac6653aSJeff Kirsher }
5280b2e2f0c7SAndy Shevchenko EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
52817ac6653aSJeff Kirsher 
5282732fdf0eSGiuseppe CAVALLARO /**
5283732fdf0eSGiuseppe CAVALLARO  * stmmac_suspend - suspend callback
5284f4e7bd81SJoachim Eastwood  * @dev: device pointer
5285732fdf0eSGiuseppe CAVALLARO  * Description: this is the function to suspend the device and it is called
5286732fdf0eSGiuseppe CAVALLARO  * by the platform driver to stop the network queue, release the resources,
5287732fdf0eSGiuseppe CAVALLARO  * program the PMT register (for WoL), clean and release driver resources.
5288732fdf0eSGiuseppe CAVALLARO  */
5289f4e7bd81SJoachim Eastwood int stmmac_suspend(struct device *dev)
52907ac6653aSJeff Kirsher {
5291f4e7bd81SJoachim Eastwood 	struct net_device *ndev = dev_get_drvdata(dev);
52927ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(ndev);
529314b41a29SNicolin Chen 	u32 chan;
52947ac6653aSJeff Kirsher 
52957ac6653aSJeff Kirsher 	if (!ndev || !netif_running(ndev))
52967ac6653aSJeff Kirsher 		return 0;
52977ac6653aSJeff Kirsher 
52983e2bf04fSJose Abreu 	phylink_mac_change(priv->phylink, false);
52997ac6653aSJeff Kirsher 
5300134cc4ceSThierry Reding 	mutex_lock(&priv->lock);
530119e13cb2SJose Abreu 
53027ac6653aSJeff Kirsher 	netif_device_detach(ndev);
53037ac6653aSJeff Kirsher 
5304c22a3f48SJoao Pinto 	stmmac_disable_all_queues(priv);
53057ac6653aSJeff Kirsher 
530614b41a29SNicolin Chen 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
5307d5a05e69SVincent Whitchurch 		hrtimer_cancel(&priv->tx_queue[chan].txtimer);
530814b41a29SNicolin Chen 
53095f585913SFugang Duan 	if (priv->eee_enabled) {
53105f585913SFugang Duan 		priv->tx_path_in_lpi_mode = false;
53115f585913SFugang Duan 		del_timer_sync(&priv->eee_ctrl_timer);
53125f585913SFugang Duan 	}
53135f585913SFugang Duan 
53147ac6653aSJeff Kirsher 	/* Stop TX/RX DMA */
5315ae4f0d46SJoao Pinto 	stmmac_stop_all_dma(priv);
5316c24602efSGiuseppe CAVALLARO 
5317b9663b7cSVoon Weifeng 	if (priv->plat->serdes_powerdown)
5318b9663b7cSVoon Weifeng 		priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
5319b9663b7cSVoon Weifeng 
53207ac6653aSJeff Kirsher 	/* Enable Power down mode by programming the PMT regs */
5321e8377e7aSJisheng Zhang 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
5322c10d4c82SJose Abreu 		stmmac_pmt(priv, priv->hw, priv->wolopts);
532389f7f2cfSSrinivas Kandagatla 		priv->irq_wake = 1;
532489f7f2cfSSrinivas Kandagatla 	} else {
5325134cc4ceSThierry Reding 		mutex_unlock(&priv->lock);
53263e2bf04fSJose Abreu 		rtnl_lock();
532777b28983SJisheng Zhang 		if (device_may_wakeup(priv->device))
532877b28983SJisheng Zhang 			phylink_speed_down(priv->phylink, false);
53293e2bf04fSJose Abreu 		phylink_stop(priv->phylink);
53303e2bf04fSJose Abreu 		rtnl_unlock();
5331134cc4ceSThierry Reding 		mutex_lock(&priv->lock);
53323e2bf04fSJose Abreu 
5333c10d4c82SJose Abreu 		stmmac_mac_set(priv, priv->ioaddr, false);
5334db88f10aSSrinivas Kandagatla 		pinctrl_pm_select_sleep_state(priv->device);
5335ba1377ffSGiuseppe CAVALLARO 		/* Disable clock in case of PWM is off */
5336e497c20eSBiao Huang 		clk_disable_unprepare(priv->plat->clk_ptp_ref);
5337e497c20eSBiao Huang 		clk_disable_unprepare(priv->plat->pclk);
5338e497c20eSBiao Huang 		clk_disable_unprepare(priv->plat->stmmac_clk);
5339ba1377ffSGiuseppe CAVALLARO 	}
534029555fa3SThierry Reding 	mutex_unlock(&priv->lock);
53412d871aa0SVince Bridgers 
5342bd00632cSLABBE Corentin 	priv->speed = SPEED_UNKNOWN;
53437ac6653aSJeff Kirsher 	return 0;
53447ac6653aSJeff Kirsher }
5345b2e2f0c7SAndy Shevchenko EXPORT_SYMBOL_GPL(stmmac_suspend);
53467ac6653aSJeff Kirsher 
5347732fdf0eSGiuseppe CAVALLARO /**
534854139cf3SJoao Pinto  * stmmac_reset_queues_param - reset queue parameters
5349d0ea5cbdSJesse Brandeburg  * @priv: device pointer
535054139cf3SJoao Pinto  */
535154139cf3SJoao Pinto static void stmmac_reset_queues_param(struct stmmac_priv *priv)
535254139cf3SJoao Pinto {
535354139cf3SJoao Pinto 	u32 rx_cnt = priv->plat->rx_queues_to_use;
5354ce736788SJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
535554139cf3SJoao Pinto 	u32 queue;
535654139cf3SJoao Pinto 
535754139cf3SJoao Pinto 	for (queue = 0; queue < rx_cnt; queue++) {
535854139cf3SJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
535954139cf3SJoao Pinto 
536054139cf3SJoao Pinto 		rx_q->cur_rx = 0;
536154139cf3SJoao Pinto 		rx_q->dirty_rx = 0;
536254139cf3SJoao Pinto 	}
536354139cf3SJoao Pinto 
5364ce736788SJoao Pinto 	for (queue = 0; queue < tx_cnt; queue++) {
5365ce736788SJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
5366ce736788SJoao Pinto 
5367ce736788SJoao Pinto 		tx_q->cur_tx = 0;
5368ce736788SJoao Pinto 		tx_q->dirty_tx = 0;
53698d212a9eSNiklas Cassel 		tx_q->mss = 0;
5370c511819dSJoakim Zhang 
5371c511819dSJoakim Zhang 		netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
5372ce736788SJoao Pinto 	}
537354139cf3SJoao Pinto }
537454139cf3SJoao Pinto 
537554139cf3SJoao Pinto /**
5376732fdf0eSGiuseppe CAVALLARO  * stmmac_resume - resume callback
5377f4e7bd81SJoachim Eastwood  * @dev: device pointer
5378732fdf0eSGiuseppe CAVALLARO  * Description: when resume this function is invoked to setup the DMA and CORE
5379732fdf0eSGiuseppe CAVALLARO  * in a usable state.
5380732fdf0eSGiuseppe CAVALLARO  */
5381f4e7bd81SJoachim Eastwood int stmmac_resume(struct device *dev)
53827ac6653aSJeff Kirsher {
5383f4e7bd81SJoachim Eastwood 	struct net_device *ndev = dev_get_drvdata(dev);
53847ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(ndev);
5385b9663b7cSVoon Weifeng 	int ret;
53867ac6653aSJeff Kirsher 
53877ac6653aSJeff Kirsher 	if (!netif_running(ndev))
53887ac6653aSJeff Kirsher 		return 0;
53897ac6653aSJeff Kirsher 
53907ac6653aSJeff Kirsher 	/* Power Down bit, into the PM register, is cleared
53917ac6653aSJeff Kirsher 	 * automatically as soon as a magic packet or a Wake-up frame
53927ac6653aSJeff Kirsher 	 * is received. Anyway, it's better to manually clear
53937ac6653aSJeff Kirsher 	 * this bit because it can generate problems while resuming
5394ceb69499SGiuseppe CAVALLARO 	 * from another devices (e.g. serial console).
5395ceb69499SGiuseppe CAVALLARO 	 */
5396e8377e7aSJisheng Zhang 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
539729555fa3SThierry Reding 		mutex_lock(&priv->lock);
5398c10d4c82SJose Abreu 		stmmac_pmt(priv, priv->hw, 0);
539929555fa3SThierry Reding 		mutex_unlock(&priv->lock);
540089f7f2cfSSrinivas Kandagatla 		priv->irq_wake = 0;
5401623997fbSSrinivas Kandagatla 	} else {
5402db88f10aSSrinivas Kandagatla 		pinctrl_pm_select_default_state(priv->device);
54038d45e42bSLABBE Corentin 		/* enable the clk previously disabled */
5404e497c20eSBiao Huang 		clk_prepare_enable(priv->plat->stmmac_clk);
5405e497c20eSBiao Huang 		clk_prepare_enable(priv->plat->pclk);
5406e497c20eSBiao Huang 		if (priv->plat->clk_ptp_ref)
5407e497c20eSBiao Huang 			clk_prepare_enable(priv->plat->clk_ptp_ref);
5408623997fbSSrinivas Kandagatla 		/* reset the phy so that it's ready */
5409623997fbSSrinivas Kandagatla 		if (priv->mii)
5410623997fbSSrinivas Kandagatla 			stmmac_mdio_reset(priv->mii);
5411623997fbSSrinivas Kandagatla 	}
54127ac6653aSJeff Kirsher 
5413b9663b7cSVoon Weifeng 	if (priv->plat->serdes_powerup) {
5414b9663b7cSVoon Weifeng 		ret = priv->plat->serdes_powerup(ndev,
5415b9663b7cSVoon Weifeng 						 priv->plat->bsp_priv);
5416b9663b7cSVoon Weifeng 
5417b9663b7cSVoon Weifeng 		if (ret < 0)
5418b9663b7cSVoon Weifeng 			return ret;
5419b9663b7cSVoon Weifeng 	}
5420b9663b7cSVoon Weifeng 
542136d18b56SFugang Duan 	if (!device_may_wakeup(priv->device) || !priv->plat->pmt) {
542236d18b56SFugang Duan 		rtnl_lock();
542336d18b56SFugang Duan 		phylink_start(priv->phylink);
542436d18b56SFugang Duan 		/* We may have called phylink_speed_down before */
542536d18b56SFugang Duan 		phylink_speed_up(priv->phylink);
542636d18b56SFugang Duan 		rtnl_unlock();
542736d18b56SFugang Duan 	}
542836d18b56SFugang Duan 
54298e5debedSWong Vee Khee 	rtnl_lock();
543029555fa3SThierry Reding 	mutex_lock(&priv->lock);
5431f55d84b0SVincent Palatin 
543254139cf3SJoao Pinto 	stmmac_reset_queues_param(priv);
54339c63faaaSJoakim Zhang 	stmmac_reinit_rx_buffers(priv);
54344ec236c7SFugang Duan 	stmmac_free_tx_skbufs(priv);
5435ae79a639SGiuseppe CAVALLARO 	stmmac_clear_descriptors(priv);
5436ae79a639SGiuseppe CAVALLARO 
5437fe131929SHuacai Chen 	stmmac_hw_setup(ndev, false);
5438d429b66eSJose Abreu 	stmmac_init_coalesce(priv);
5439ac316c78SGiuseppe CAVALLARO 	stmmac_set_rx_mode(ndev);
54407ac6653aSJeff Kirsher 
5441ed64639bSWong Vee Khee 	stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
5442ed64639bSWong Vee Khee 
5443c22a3f48SJoao Pinto 	stmmac_enable_all_queues(priv);
54447ac6653aSJeff Kirsher 
5445134cc4ceSThierry Reding 	mutex_unlock(&priv->lock);
54468e5debedSWong Vee Khee 	rtnl_unlock();
5447134cc4ceSThierry Reding 
54483e2bf04fSJose Abreu 	phylink_mac_change(priv->phylink, true);
5449102463b1SFrancesco Virlinzi 
545031096c3eSLeon Yu 	netif_device_attach(ndev);
545131096c3eSLeon Yu 
54527ac6653aSJeff Kirsher 	return 0;
54537ac6653aSJeff Kirsher }
5454b2e2f0c7SAndy Shevchenko EXPORT_SYMBOL_GPL(stmmac_resume);
5455ba27ec66SGiuseppe CAVALLARO 
54567ac6653aSJeff Kirsher #ifndef MODULE
54577ac6653aSJeff Kirsher static int __init stmmac_cmdline_opt(char *str)
54587ac6653aSJeff Kirsher {
54597ac6653aSJeff Kirsher 	char *opt;
54607ac6653aSJeff Kirsher 
54617ac6653aSJeff Kirsher 	if (!str || !*str)
54627ac6653aSJeff Kirsher 		return -EINVAL;
54637ac6653aSJeff Kirsher 	while ((opt = strsep(&str, ",")) != NULL) {
54647ac6653aSJeff Kirsher 		if (!strncmp(opt, "debug:", 6)) {
5465ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 6, 0, &debug))
54667ac6653aSJeff Kirsher 				goto err;
54677ac6653aSJeff Kirsher 		} else if (!strncmp(opt, "phyaddr:", 8)) {
5468ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 8, 0, &phyaddr))
54697ac6653aSJeff Kirsher 				goto err;
54707ac6653aSJeff Kirsher 		} else if (!strncmp(opt, "buf_sz:", 7)) {
5471ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 7, 0, &buf_sz))
54727ac6653aSJeff Kirsher 				goto err;
54737ac6653aSJeff Kirsher 		} else if (!strncmp(opt, "tc:", 3)) {
5474ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 3, 0, &tc))
54757ac6653aSJeff Kirsher 				goto err;
54767ac6653aSJeff Kirsher 		} else if (!strncmp(opt, "watchdog:", 9)) {
5477ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 9, 0, &watchdog))
54787ac6653aSJeff Kirsher 				goto err;
54797ac6653aSJeff Kirsher 		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
5480ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 10, 0, &flow_ctrl))
54817ac6653aSJeff Kirsher 				goto err;
54827ac6653aSJeff Kirsher 		} else if (!strncmp(opt, "pause:", 6)) {
5483ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 6, 0, &pause))
54847ac6653aSJeff Kirsher 				goto err;
5485506f669cSGiuseppe CAVALLARO 		} else if (!strncmp(opt, "eee_timer:", 10)) {
5486d765955dSGiuseppe CAVALLARO 			if (kstrtoint(opt + 10, 0, &eee_timer))
5487d765955dSGiuseppe CAVALLARO 				goto err;
54884a7d666aSGiuseppe CAVALLARO 		} else if (!strncmp(opt, "chain_mode:", 11)) {
54894a7d666aSGiuseppe CAVALLARO 			if (kstrtoint(opt + 11, 0, &chain_mode))
54904a7d666aSGiuseppe CAVALLARO 				goto err;
54917ac6653aSJeff Kirsher 		}
54927ac6653aSJeff Kirsher 	}
54937ac6653aSJeff Kirsher 	return 0;
54947ac6653aSJeff Kirsher 
54957ac6653aSJeff Kirsher err:
54967ac6653aSJeff Kirsher 	pr_err("%s: ERROR broken module parameter conversion", __func__);
54977ac6653aSJeff Kirsher 	return -EINVAL;
54987ac6653aSJeff Kirsher }
54997ac6653aSJeff Kirsher 
55007ac6653aSJeff Kirsher __setup("stmmaceth=", stmmac_cmdline_opt);
5501ceb69499SGiuseppe CAVALLARO #endif /* MODULE */
55026fc0d0f2SGiuseppe Cavallaro 
5503466c5ac8SMathieu Olivari static int __init stmmac_init(void)
5504466c5ac8SMathieu Olivari {
5505466c5ac8SMathieu Olivari #ifdef CONFIG_DEBUG_FS
5506466c5ac8SMathieu Olivari 	/* Create debugfs main directory if it doesn't exist yet */
55078d72ab11SGreg Kroah-Hartman 	if (!stmmac_fs_dir)
5508466c5ac8SMathieu Olivari 		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
5509474a31e1SAaro Koskinen 	register_netdevice_notifier(&stmmac_notifier);
5510466c5ac8SMathieu Olivari #endif
5511466c5ac8SMathieu Olivari 
5512466c5ac8SMathieu Olivari 	return 0;
5513466c5ac8SMathieu Olivari }
5514466c5ac8SMathieu Olivari 
5515466c5ac8SMathieu Olivari static void __exit stmmac_exit(void)
5516466c5ac8SMathieu Olivari {
5517466c5ac8SMathieu Olivari #ifdef CONFIG_DEBUG_FS
5518474a31e1SAaro Koskinen 	unregister_netdevice_notifier(&stmmac_notifier);
5519466c5ac8SMathieu Olivari 	debugfs_remove_recursive(stmmac_fs_dir);
5520466c5ac8SMathieu Olivari #endif
5521466c5ac8SMathieu Olivari }
5522466c5ac8SMathieu Olivari 
5523466c5ac8SMathieu Olivari module_init(stmmac_init)
5524466c5ac8SMathieu Olivari module_exit(stmmac_exit)
5525466c5ac8SMathieu Olivari 
55266fc0d0f2SGiuseppe Cavallaro MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
55276fc0d0f2SGiuseppe Cavallaro MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
55286fc0d0f2SGiuseppe Cavallaro MODULE_LICENSE("GPL");
5529