14fa9c49fSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
27ac6653aSJeff Kirsher /*******************************************************************************
37ac6653aSJeff Kirsher   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
47ac6653aSJeff Kirsher   ST Ethernet IPs are built around a Synopsys IP Core.
57ac6653aSJeff Kirsher 
6286a8372SGiuseppe CAVALLARO 	Copyright(C) 2007-2011 STMicroelectronics Ltd
77ac6653aSJeff Kirsher 
87ac6653aSJeff Kirsher 
97ac6653aSJeff Kirsher   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
107ac6653aSJeff Kirsher 
117ac6653aSJeff Kirsher   Documentation available at:
127ac6653aSJeff Kirsher 	http://www.stlinux.com
137ac6653aSJeff Kirsher   Support available at:
147ac6653aSJeff Kirsher 	https://bugzilla.stlinux.com/
157ac6653aSJeff Kirsher *******************************************************************************/
167ac6653aSJeff Kirsher 
176a81c26fSViresh Kumar #include <linux/clk.h>
187ac6653aSJeff Kirsher #include <linux/kernel.h>
197ac6653aSJeff Kirsher #include <linux/interrupt.h>
207ac6653aSJeff Kirsher #include <linux/ip.h>
217ac6653aSJeff Kirsher #include <linux/tcp.h>
227ac6653aSJeff Kirsher #include <linux/skbuff.h>
237ac6653aSJeff Kirsher #include <linux/ethtool.h>
247ac6653aSJeff Kirsher #include <linux/if_ether.h>
257ac6653aSJeff Kirsher #include <linux/crc32.h>
267ac6653aSJeff Kirsher #include <linux/mii.h>
2701789349SJiri Pirko #include <linux/if.h>
287ac6653aSJeff Kirsher #include <linux/if_vlan.h>
297ac6653aSJeff Kirsher #include <linux/dma-mapping.h>
307ac6653aSJeff Kirsher #include <linux/slab.h>
315ec55823SJoakim Zhang #include <linux/pm_runtime.h>
327ac6653aSJeff Kirsher #include <linux/prefetch.h>
33db88f10aSSrinivas Kandagatla #include <linux/pinctrl/consumer.h>
3450fb4f74SGiuseppe CAVALLARO #ifdef CONFIG_DEBUG_FS
357ac29055SGiuseppe CAVALLARO #include <linux/debugfs.h>
367ac29055SGiuseppe CAVALLARO #include <linux/seq_file.h>
3750fb4f74SGiuseppe CAVALLARO #endif /* CONFIG_DEBUG_FS */
38891434b1SRayagond Kokatanur #include <linux/net_tstamp.h>
39eeef2f6bSJose Abreu #include <linux/phylink.h>
40b7766206SJose Abreu #include <linux/udp.h>
414dbbe8ddSJose Abreu #include <net/pkt_cls.h>
42891434b1SRayagond Kokatanur #include "stmmac_ptp.h"
43286a8372SGiuseppe CAVALLARO #include "stmmac.h"
44c5e4ddbdSChen-Yu Tsai #include <linux/reset.h>
455790cf3cSMathieu Olivari #include <linux/of_mdio.h>
4619d857c9SPhil Reid #include "dwmac1000.h"
477d9e6c5aSJose Abreu #include "dwxgmac2.h"
4842de047dSJose Abreu #include "hwif.h"
497ac6653aSJeff Kirsher 
508d558f02SJose Abreu #define	STMMAC_ALIGN(x)		ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
51f748be53SAlexandre TORGUE #define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
527ac6653aSJeff Kirsher 
537ac6653aSJeff Kirsher /* Module parameters */
5432ceabcaSGiuseppe CAVALLARO #define TX_TIMEO	5000
557ac6653aSJeff Kirsher static int watchdog = TX_TIMEO;
56d3757ba4SJoe Perches module_param(watchdog, int, 0644);
5732ceabcaSGiuseppe CAVALLARO MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
587ac6653aSJeff Kirsher 
5932ceabcaSGiuseppe CAVALLARO static int debug = -1;
60d3757ba4SJoe Perches module_param(debug, int, 0644);
6132ceabcaSGiuseppe CAVALLARO MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
627ac6653aSJeff Kirsher 
6347d1f71fSstephen hemminger static int phyaddr = -1;
64d3757ba4SJoe Perches module_param(phyaddr, int, 0444);
657ac6653aSJeff Kirsher MODULE_PARM_DESC(phyaddr, "Physical device address");
667ac6653aSJeff Kirsher 
67aa042f60SSong, Yoong Siang #define STMMAC_TX_THRESH(x)	((x)->dma_tx_size / 4)
68aa042f60SSong, Yoong Siang #define STMMAC_RX_THRESH(x)	((x)->dma_rx_size / 4)
697ac6653aSJeff Kirsher 
70e9989339SJose Abreu static int flow_ctrl = FLOW_AUTO;
71d3757ba4SJoe Perches module_param(flow_ctrl, int, 0644);
727ac6653aSJeff Kirsher MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
737ac6653aSJeff Kirsher 
747ac6653aSJeff Kirsher static int pause = PAUSE_TIME;
75d3757ba4SJoe Perches module_param(pause, int, 0644);
767ac6653aSJeff Kirsher MODULE_PARM_DESC(pause, "Flow Control Pause Time");
777ac6653aSJeff Kirsher 
787ac6653aSJeff Kirsher #define TC_DEFAULT 64
797ac6653aSJeff Kirsher static int tc = TC_DEFAULT;
80d3757ba4SJoe Perches module_param(tc, int, 0644);
817ac6653aSJeff Kirsher MODULE_PARM_DESC(tc, "DMA threshold control value");
827ac6653aSJeff Kirsher 
83d916701cSGiuseppe CAVALLARO #define	DEFAULT_BUFSIZE	1536
84d916701cSGiuseppe CAVALLARO static int buf_sz = DEFAULT_BUFSIZE;
85d3757ba4SJoe Perches module_param(buf_sz, int, 0644);
867ac6653aSJeff Kirsher MODULE_PARM_DESC(buf_sz, "DMA buffer size");
877ac6653aSJeff Kirsher 
8822ad3838SGiuseppe Cavallaro #define	STMMAC_RX_COPYBREAK	256
8922ad3838SGiuseppe Cavallaro 
907ac6653aSJeff Kirsher static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
917ac6653aSJeff Kirsher 				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
927ac6653aSJeff Kirsher 				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
937ac6653aSJeff Kirsher 
94d765955dSGiuseppe CAVALLARO #define STMMAC_DEFAULT_LPI_TIMER	1000
95d765955dSGiuseppe CAVALLARO static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
96d3757ba4SJoe Perches module_param(eee_timer, int, 0644);
97d765955dSGiuseppe CAVALLARO MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
98388e201dSVineetha G. Jaya Kumaran #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
99d765955dSGiuseppe CAVALLARO 
10022d3efe5SPavel Machek /* By default the driver will use the ring mode to manage tx and rx descriptors,
10122d3efe5SPavel Machek  * but allow user to force to use the chain instead of the ring
1024a7d666aSGiuseppe CAVALLARO  */
1034a7d666aSGiuseppe CAVALLARO static unsigned int chain_mode;
104d3757ba4SJoe Perches module_param(chain_mode, int, 0444);
1054a7d666aSGiuseppe CAVALLARO MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
1064a7d666aSGiuseppe CAVALLARO 
1077ac6653aSJeff Kirsher static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
1087ac6653aSJeff Kirsher 
10950fb4f74SGiuseppe CAVALLARO #ifdef CONFIG_DEBUG_FS
110481a7d15SJiping Ma static const struct net_device_ops stmmac_netdev_ops;
1118d72ab11SGreg Kroah-Hartman static void stmmac_init_fs(struct net_device *dev);
112466c5ac8SMathieu Olivari static void stmmac_exit_fs(struct net_device *dev);
113bfab27a1SGiuseppe CAVALLARO #endif
114bfab27a1SGiuseppe CAVALLARO 
115d5a05e69SVincent Whitchurch #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
1169125cdd1SGiuseppe CAVALLARO 
1175ec55823SJoakim Zhang int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
1185ec55823SJoakim Zhang {
1195ec55823SJoakim Zhang 	int ret = 0;
1205ec55823SJoakim Zhang 
1215ec55823SJoakim Zhang 	if (enabled) {
1225ec55823SJoakim Zhang 		ret = clk_prepare_enable(priv->plat->stmmac_clk);
1235ec55823SJoakim Zhang 		if (ret)
1245ec55823SJoakim Zhang 			return ret;
1255ec55823SJoakim Zhang 		ret = clk_prepare_enable(priv->plat->pclk);
1265ec55823SJoakim Zhang 		if (ret) {
1275ec55823SJoakim Zhang 			clk_disable_unprepare(priv->plat->stmmac_clk);
1285ec55823SJoakim Zhang 			return ret;
1295ec55823SJoakim Zhang 		}
130*b4d45aeeSJoakim Zhang 		if (priv->plat->clks_config) {
131*b4d45aeeSJoakim Zhang 			ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled);
132*b4d45aeeSJoakim Zhang 			if (ret) {
133*b4d45aeeSJoakim Zhang 				clk_disable_unprepare(priv->plat->stmmac_clk);
134*b4d45aeeSJoakim Zhang 				clk_disable_unprepare(priv->plat->pclk);
135*b4d45aeeSJoakim Zhang 				return ret;
136*b4d45aeeSJoakim Zhang 			}
137*b4d45aeeSJoakim Zhang 		}
1385ec55823SJoakim Zhang 	} else {
1395ec55823SJoakim Zhang 		clk_disable_unprepare(priv->plat->stmmac_clk);
1405ec55823SJoakim Zhang 		clk_disable_unprepare(priv->plat->pclk);
141*b4d45aeeSJoakim Zhang 		if (priv->plat->clks_config)
142*b4d45aeeSJoakim Zhang 			priv->plat->clks_config(priv->plat->bsp_priv, enabled);
1435ec55823SJoakim Zhang 	}
1445ec55823SJoakim Zhang 
1455ec55823SJoakim Zhang 	return ret;
1465ec55823SJoakim Zhang }
1475ec55823SJoakim Zhang EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
1485ec55823SJoakim Zhang 
1497ac6653aSJeff Kirsher /**
1507ac6653aSJeff Kirsher  * stmmac_verify_args - verify the driver parameters.
151732fdf0eSGiuseppe CAVALLARO  * Description: it checks the driver parameters and set a default in case of
152732fdf0eSGiuseppe CAVALLARO  * errors.
1537ac6653aSJeff Kirsher  */
1547ac6653aSJeff Kirsher static void stmmac_verify_args(void)
1557ac6653aSJeff Kirsher {
1567ac6653aSJeff Kirsher 	if (unlikely(watchdog < 0))
1577ac6653aSJeff Kirsher 		watchdog = TX_TIMEO;
158d916701cSGiuseppe CAVALLARO 	if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
159d916701cSGiuseppe CAVALLARO 		buf_sz = DEFAULT_BUFSIZE;
1607ac6653aSJeff Kirsher 	if (unlikely(flow_ctrl > 1))
1617ac6653aSJeff Kirsher 		flow_ctrl = FLOW_AUTO;
1627ac6653aSJeff Kirsher 	else if (likely(flow_ctrl < 0))
1637ac6653aSJeff Kirsher 		flow_ctrl = FLOW_OFF;
1647ac6653aSJeff Kirsher 	if (unlikely((pause < 0) || (pause > 0xffff)))
1657ac6653aSJeff Kirsher 		pause = PAUSE_TIME;
166d765955dSGiuseppe CAVALLARO 	if (eee_timer < 0)
167d765955dSGiuseppe CAVALLARO 		eee_timer = STMMAC_DEFAULT_LPI_TIMER;
1687ac6653aSJeff Kirsher }
1697ac6653aSJeff Kirsher 
17032ceabcaSGiuseppe CAVALLARO /**
171c22a3f48SJoao Pinto  * stmmac_disable_all_queues - Disable all queues
172c22a3f48SJoao Pinto  * @priv: driver private structure
173c22a3f48SJoao Pinto  */
174c22a3f48SJoao Pinto static void stmmac_disable_all_queues(struct stmmac_priv *priv)
175c22a3f48SJoao Pinto {
176c22a3f48SJoao Pinto 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
1778fce3331SJose Abreu 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
1788fce3331SJose Abreu 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
179c22a3f48SJoao Pinto 	u32 queue;
180c22a3f48SJoao Pinto 
1818fce3331SJose Abreu 	for (queue = 0; queue < maxq; queue++) {
1828fce3331SJose Abreu 		struct stmmac_channel *ch = &priv->channel[queue];
183c22a3f48SJoao Pinto 
1844ccb4585SJose Abreu 		if (queue < rx_queues_cnt)
1854ccb4585SJose Abreu 			napi_disable(&ch->rx_napi);
1864ccb4585SJose Abreu 		if (queue < tx_queues_cnt)
1874ccb4585SJose Abreu 			napi_disable(&ch->tx_napi);
188c22a3f48SJoao Pinto 	}
189c22a3f48SJoao Pinto }
190c22a3f48SJoao Pinto 
191c22a3f48SJoao Pinto /**
192c22a3f48SJoao Pinto  * stmmac_enable_all_queues - Enable all queues
193c22a3f48SJoao Pinto  * @priv: driver private structure
194c22a3f48SJoao Pinto  */
195c22a3f48SJoao Pinto static void stmmac_enable_all_queues(struct stmmac_priv *priv)
196c22a3f48SJoao Pinto {
197c22a3f48SJoao Pinto 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
1988fce3331SJose Abreu 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
1998fce3331SJose Abreu 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
200c22a3f48SJoao Pinto 	u32 queue;
201c22a3f48SJoao Pinto 
2028fce3331SJose Abreu 	for (queue = 0; queue < maxq; queue++) {
2038fce3331SJose Abreu 		struct stmmac_channel *ch = &priv->channel[queue];
204c22a3f48SJoao Pinto 
2054ccb4585SJose Abreu 		if (queue < rx_queues_cnt)
2064ccb4585SJose Abreu 			napi_enable(&ch->rx_napi);
2074ccb4585SJose Abreu 		if (queue < tx_queues_cnt)
2084ccb4585SJose Abreu 			napi_enable(&ch->tx_napi);
209c22a3f48SJoao Pinto 	}
210c22a3f48SJoao Pinto }
211c22a3f48SJoao Pinto 
21234877a15SJose Abreu static void stmmac_service_event_schedule(struct stmmac_priv *priv)
21334877a15SJose Abreu {
21434877a15SJose Abreu 	if (!test_bit(STMMAC_DOWN, &priv->state) &&
21534877a15SJose Abreu 	    !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
21634877a15SJose Abreu 		queue_work(priv->wq, &priv->service_task);
21734877a15SJose Abreu }
21834877a15SJose Abreu 
21934877a15SJose Abreu static void stmmac_global_err(struct stmmac_priv *priv)
22034877a15SJose Abreu {
22134877a15SJose Abreu 	netif_carrier_off(priv->dev);
22234877a15SJose Abreu 	set_bit(STMMAC_RESET_REQUESTED, &priv->state);
22334877a15SJose Abreu 	stmmac_service_event_schedule(priv);
22434877a15SJose Abreu }
22534877a15SJose Abreu 
226c22a3f48SJoao Pinto /**
22732ceabcaSGiuseppe CAVALLARO  * stmmac_clk_csr_set - dynamically set the MDC clock
22832ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
22932ceabcaSGiuseppe CAVALLARO  * Description: this is to dynamically set the MDC clock according to the csr
23032ceabcaSGiuseppe CAVALLARO  * clock input.
23132ceabcaSGiuseppe CAVALLARO  * Note:
23232ceabcaSGiuseppe CAVALLARO  *	If a specific clk_csr value is passed from the platform
23332ceabcaSGiuseppe CAVALLARO  *	this means that the CSR Clock Range selection cannot be
23432ceabcaSGiuseppe CAVALLARO  *	changed at run-time and it is fixed (as reported in the driver
23532ceabcaSGiuseppe CAVALLARO  *	documentation). Viceversa the driver will try to set the MDC
23632ceabcaSGiuseppe CAVALLARO  *	clock dynamically according to the actual clock input.
23732ceabcaSGiuseppe CAVALLARO  */
238cd7201f4SGiuseppe CAVALLARO static void stmmac_clk_csr_set(struct stmmac_priv *priv)
239cd7201f4SGiuseppe CAVALLARO {
240cd7201f4SGiuseppe CAVALLARO 	u32 clk_rate;
241cd7201f4SGiuseppe CAVALLARO 
242f573c0b9Sjpinto 	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
243cd7201f4SGiuseppe CAVALLARO 
244cd7201f4SGiuseppe CAVALLARO 	/* Platform provided default clk_csr would be assumed valid
245ceb69499SGiuseppe CAVALLARO 	 * for all other cases except for the below mentioned ones.
246ceb69499SGiuseppe CAVALLARO 	 * For values higher than the IEEE 802.3 specified frequency
247ceb69499SGiuseppe CAVALLARO 	 * we can not estimate the proper divider as it is not known
248ceb69499SGiuseppe CAVALLARO 	 * the frequency of clk_csr_i. So we do not change the default
249ceb69499SGiuseppe CAVALLARO 	 * divider.
250ceb69499SGiuseppe CAVALLARO 	 */
251cd7201f4SGiuseppe CAVALLARO 	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
252cd7201f4SGiuseppe CAVALLARO 		if (clk_rate < CSR_F_35M)
253cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_20_35M;
254cd7201f4SGiuseppe CAVALLARO 		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
255cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_35_60M;
256cd7201f4SGiuseppe CAVALLARO 		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
257cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_60_100M;
258cd7201f4SGiuseppe CAVALLARO 		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
259cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_100_150M;
260cd7201f4SGiuseppe CAVALLARO 		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
261cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_150_250M;
26219d857c9SPhil Reid 		else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
263cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_250_300M;
264ceb69499SGiuseppe CAVALLARO 	}
2659f93ac8dSLABBE Corentin 
2669f93ac8dSLABBE Corentin 	if (priv->plat->has_sun8i) {
2679f93ac8dSLABBE Corentin 		if (clk_rate > 160000000)
2689f93ac8dSLABBE Corentin 			priv->clk_csr = 0x03;
2699f93ac8dSLABBE Corentin 		else if (clk_rate > 80000000)
2709f93ac8dSLABBE Corentin 			priv->clk_csr = 0x02;
2719f93ac8dSLABBE Corentin 		else if (clk_rate > 40000000)
2729f93ac8dSLABBE Corentin 			priv->clk_csr = 0x01;
2739f93ac8dSLABBE Corentin 		else
2749f93ac8dSLABBE Corentin 			priv->clk_csr = 0;
2759f93ac8dSLABBE Corentin 	}
2767d9e6c5aSJose Abreu 
2777d9e6c5aSJose Abreu 	if (priv->plat->has_xgmac) {
2787d9e6c5aSJose Abreu 		if (clk_rate > 400000000)
2797d9e6c5aSJose Abreu 			priv->clk_csr = 0x5;
2807d9e6c5aSJose Abreu 		else if (clk_rate > 350000000)
2817d9e6c5aSJose Abreu 			priv->clk_csr = 0x4;
2827d9e6c5aSJose Abreu 		else if (clk_rate > 300000000)
2837d9e6c5aSJose Abreu 			priv->clk_csr = 0x3;
2847d9e6c5aSJose Abreu 		else if (clk_rate > 250000000)
2857d9e6c5aSJose Abreu 			priv->clk_csr = 0x2;
2867d9e6c5aSJose Abreu 		else if (clk_rate > 150000000)
2877d9e6c5aSJose Abreu 			priv->clk_csr = 0x1;
2887d9e6c5aSJose Abreu 		else
2897d9e6c5aSJose Abreu 			priv->clk_csr = 0x0;
2907d9e6c5aSJose Abreu 	}
291cd7201f4SGiuseppe CAVALLARO }
292cd7201f4SGiuseppe CAVALLARO 
2937ac6653aSJeff Kirsher static void print_pkt(unsigned char *buf, int len)
2947ac6653aSJeff Kirsher {
295424c4f78SAndy Shevchenko 	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
296424c4f78SAndy Shevchenko 	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
2977ac6653aSJeff Kirsher }
2987ac6653aSJeff Kirsher 
299ce736788SJoao Pinto static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
3007ac6653aSJeff Kirsher {
301ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
302a6a3e026SLABBE Corentin 	u32 avail;
303e3ad57c9SGiuseppe Cavallaro 
304ce736788SJoao Pinto 	if (tx_q->dirty_tx > tx_q->cur_tx)
305ce736788SJoao Pinto 		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
306e3ad57c9SGiuseppe Cavallaro 	else
307aa042f60SSong, Yoong Siang 		avail = priv->dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
308e3ad57c9SGiuseppe Cavallaro 
309e3ad57c9SGiuseppe Cavallaro 	return avail;
310e3ad57c9SGiuseppe Cavallaro }
311e3ad57c9SGiuseppe Cavallaro 
31254139cf3SJoao Pinto /**
31354139cf3SJoao Pinto  * stmmac_rx_dirty - Get RX queue dirty
31454139cf3SJoao Pinto  * @priv: driver private structure
31554139cf3SJoao Pinto  * @queue: RX queue index
31654139cf3SJoao Pinto  */
31754139cf3SJoao Pinto static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
318e3ad57c9SGiuseppe Cavallaro {
31954139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
320a6a3e026SLABBE Corentin 	u32 dirty;
321e3ad57c9SGiuseppe Cavallaro 
32254139cf3SJoao Pinto 	if (rx_q->dirty_rx <= rx_q->cur_rx)
32354139cf3SJoao Pinto 		dirty = rx_q->cur_rx - rx_q->dirty_rx;
324e3ad57c9SGiuseppe Cavallaro 	else
325aa042f60SSong, Yoong Siang 		dirty = priv->dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
326e3ad57c9SGiuseppe Cavallaro 
327e3ad57c9SGiuseppe Cavallaro 	return dirty;
3287ac6653aSJeff Kirsher }
3297ac6653aSJeff Kirsher 
330be1c7eaeSVineetha G. Jaya Kumaran static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en)
331be1c7eaeSVineetha G. Jaya Kumaran {
332be1c7eaeSVineetha G. Jaya Kumaran 	int tx_lpi_timer;
333be1c7eaeSVineetha G. Jaya Kumaran 
334be1c7eaeSVineetha G. Jaya Kumaran 	/* Clear/set the SW EEE timer flag based on LPI ET enablement */
335be1c7eaeSVineetha G. Jaya Kumaran 	priv->eee_sw_timer_en = en ? 0 : 1;
336be1c7eaeSVineetha G. Jaya Kumaran 	tx_lpi_timer  = en ? priv->tx_lpi_timer : 0;
337be1c7eaeSVineetha G. Jaya Kumaran 	stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer);
338be1c7eaeSVineetha G. Jaya Kumaran }
339be1c7eaeSVineetha G. Jaya Kumaran 
34032ceabcaSGiuseppe CAVALLARO /**
341732fdf0eSGiuseppe CAVALLARO  * stmmac_enable_eee_mode - check and enter in LPI mode
34232ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
343732fdf0eSGiuseppe CAVALLARO  * Description: this function is to verify and enter in LPI mode in case of
344732fdf0eSGiuseppe CAVALLARO  * EEE.
34532ceabcaSGiuseppe CAVALLARO  */
346d765955dSGiuseppe CAVALLARO static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
347d765955dSGiuseppe CAVALLARO {
348ce736788SJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
349ce736788SJoao Pinto 	u32 queue;
350ce736788SJoao Pinto 
351ce736788SJoao Pinto 	/* check if all TX queues have the work finished */
352ce736788SJoao Pinto 	for (queue = 0; queue < tx_cnt; queue++) {
353ce736788SJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
354ce736788SJoao Pinto 
355ce736788SJoao Pinto 		if (tx_q->dirty_tx != tx_q->cur_tx)
356ce736788SJoao Pinto 			return; /* still unfinished work */
357ce736788SJoao Pinto 	}
358ce736788SJoao Pinto 
359d765955dSGiuseppe CAVALLARO 	/* Check and enter in LPI mode */
360ce736788SJoao Pinto 	if (!priv->tx_path_in_lpi_mode)
361c10d4c82SJose Abreu 		stmmac_set_eee_mode(priv, priv->hw,
362b4b7b772Sjpinto 				priv->plat->en_tx_lpi_clockgating);
363d765955dSGiuseppe CAVALLARO }
364d765955dSGiuseppe CAVALLARO 
36532ceabcaSGiuseppe CAVALLARO /**
366732fdf0eSGiuseppe CAVALLARO  * stmmac_disable_eee_mode - disable and exit from LPI mode
36732ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
36832ceabcaSGiuseppe CAVALLARO  * Description: this function is to exit and disable EEE in case of
36932ceabcaSGiuseppe CAVALLARO  * LPI state is true. This is called by the xmit.
37032ceabcaSGiuseppe CAVALLARO  */
371d765955dSGiuseppe CAVALLARO void stmmac_disable_eee_mode(struct stmmac_priv *priv)
372d765955dSGiuseppe CAVALLARO {
373be1c7eaeSVineetha G. Jaya Kumaran 	if (!priv->eee_sw_timer_en) {
374be1c7eaeSVineetha G. Jaya Kumaran 		stmmac_lpi_entry_timer_config(priv, 0);
375be1c7eaeSVineetha G. Jaya Kumaran 		return;
376be1c7eaeSVineetha G. Jaya Kumaran 	}
377be1c7eaeSVineetha G. Jaya Kumaran 
378c10d4c82SJose Abreu 	stmmac_reset_eee_mode(priv, priv->hw);
379d765955dSGiuseppe CAVALLARO 	del_timer_sync(&priv->eee_ctrl_timer);
380d765955dSGiuseppe CAVALLARO 	priv->tx_path_in_lpi_mode = false;
381d765955dSGiuseppe CAVALLARO }
382d765955dSGiuseppe CAVALLARO 
383d765955dSGiuseppe CAVALLARO /**
384732fdf0eSGiuseppe CAVALLARO  * stmmac_eee_ctrl_timer - EEE TX SW timer.
385d0ea5cbdSJesse Brandeburg  * @t:  timer_list struct containing private info
386d765955dSGiuseppe CAVALLARO  * Description:
38732ceabcaSGiuseppe CAVALLARO  *  if there is no data transfer and if we are not in LPI state,
388d765955dSGiuseppe CAVALLARO  *  then MAC Transmitter can be moved to LPI state.
389d765955dSGiuseppe CAVALLARO  */
390e99e88a9SKees Cook static void stmmac_eee_ctrl_timer(struct timer_list *t)
391d765955dSGiuseppe CAVALLARO {
392e99e88a9SKees Cook 	struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
393d765955dSGiuseppe CAVALLARO 
394d765955dSGiuseppe CAVALLARO 	stmmac_enable_eee_mode(priv);
395388e201dSVineetha G. Jaya Kumaran 	mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
396d765955dSGiuseppe CAVALLARO }
397d765955dSGiuseppe CAVALLARO 
398d765955dSGiuseppe CAVALLARO /**
399732fdf0eSGiuseppe CAVALLARO  * stmmac_eee_init - init EEE
40032ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
401d765955dSGiuseppe CAVALLARO  * Description:
402732fdf0eSGiuseppe CAVALLARO  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
403732fdf0eSGiuseppe CAVALLARO  *  can also manage EEE, this function enable the LPI state and start related
404732fdf0eSGiuseppe CAVALLARO  *  timer.
405d765955dSGiuseppe CAVALLARO  */
406d765955dSGiuseppe CAVALLARO bool stmmac_eee_init(struct stmmac_priv *priv)
407d765955dSGiuseppe CAVALLARO {
408388e201dSVineetha G. Jaya Kumaran 	int eee_tw_timer = priv->eee_tw_timer;
409879626e3SJerome Brunet 
410f5351ef7SGiuseppe CAVALLARO 	/* Using PCS we cannot dial with the phy registers at this stage
411f5351ef7SGiuseppe CAVALLARO 	 * so we do not support extra feature like EEE.
412f5351ef7SGiuseppe CAVALLARO 	 */
413a47b9e15SDejin Zheng 	if (priv->hw->pcs == STMMAC_PCS_TBI ||
414a47b9e15SDejin Zheng 	    priv->hw->pcs == STMMAC_PCS_RTBI)
41574371272SJose Abreu 		return false;
416f5351ef7SGiuseppe CAVALLARO 
41774371272SJose Abreu 	/* Check if MAC core supports the EEE feature. */
41874371272SJose Abreu 	if (!priv->dma_cap.eee)
41974371272SJose Abreu 		return false;
420d765955dSGiuseppe CAVALLARO 
42129555fa3SThierry Reding 	mutex_lock(&priv->lock);
42274371272SJose Abreu 
42374371272SJose Abreu 	/* Check if it needs to be deactivated */
424177d935aSJon Hunter 	if (!priv->eee_active) {
425177d935aSJon Hunter 		if (priv->eee_enabled) {
42638ddc59dSLABBE Corentin 			netdev_dbg(priv->dev, "disable EEE\n");
427be1c7eaeSVineetha G. Jaya Kumaran 			stmmac_lpi_entry_timer_config(priv, 0);
42883bf79b6SGiuseppe CAVALLARO 			del_timer_sync(&priv->eee_ctrl_timer);
429388e201dSVineetha G. Jaya Kumaran 			stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
430177d935aSJon Hunter 		}
4310867bb97SJon Hunter 		mutex_unlock(&priv->lock);
43274371272SJose Abreu 		return false;
43374371272SJose Abreu 	}
43474371272SJose Abreu 
43574371272SJose Abreu 	if (priv->eee_active && !priv->eee_enabled) {
43674371272SJose Abreu 		timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
43774371272SJose Abreu 		stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
438388e201dSVineetha G. Jaya Kumaran 				     eee_tw_timer);
43983bf79b6SGiuseppe CAVALLARO 	}
44074371272SJose Abreu 
441be1c7eaeSVineetha G. Jaya Kumaran 	if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
442be1c7eaeSVineetha G. Jaya Kumaran 		del_timer_sync(&priv->eee_ctrl_timer);
443be1c7eaeSVineetha G. Jaya Kumaran 		priv->tx_path_in_lpi_mode = false;
444be1c7eaeSVineetha G. Jaya Kumaran 		stmmac_lpi_entry_timer_config(priv, 1);
445be1c7eaeSVineetha G. Jaya Kumaran 	} else {
446be1c7eaeSVineetha G. Jaya Kumaran 		stmmac_lpi_entry_timer_config(priv, 0);
447be1c7eaeSVineetha G. Jaya Kumaran 		mod_timer(&priv->eee_ctrl_timer,
448be1c7eaeSVineetha G. Jaya Kumaran 			  STMMAC_LPI_T(priv->tx_lpi_timer));
449be1c7eaeSVineetha G. Jaya Kumaran 	}
450388e201dSVineetha G. Jaya Kumaran 
45129555fa3SThierry Reding 	mutex_unlock(&priv->lock);
45238ddc59dSLABBE Corentin 	netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
45374371272SJose Abreu 	return true;
454d765955dSGiuseppe CAVALLARO }
455d765955dSGiuseppe CAVALLARO 
456732fdf0eSGiuseppe CAVALLARO /* stmmac_get_tx_hwtstamp - get HW TX timestamps
45732ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
458ba1ffd74SGiuseppe CAVALLARO  * @p : descriptor pointer
459891434b1SRayagond Kokatanur  * @skb : the socket buffer
460891434b1SRayagond Kokatanur  * Description :
461891434b1SRayagond Kokatanur  * This function will read timestamp from the descriptor & pass it to stack.
462891434b1SRayagond Kokatanur  * and also perform some sanity checks.
463891434b1SRayagond Kokatanur  */
464891434b1SRayagond Kokatanur static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
465ba1ffd74SGiuseppe CAVALLARO 				   struct dma_desc *p, struct sk_buff *skb)
466891434b1SRayagond Kokatanur {
467891434b1SRayagond Kokatanur 	struct skb_shared_hwtstamps shhwtstamp;
46825e80cd0SJose Abreu 	bool found = false;
469df103170SNathan Chancellor 	u64 ns = 0;
470891434b1SRayagond Kokatanur 
471891434b1SRayagond Kokatanur 	if (!priv->hwts_tx_en)
472891434b1SRayagond Kokatanur 		return;
473891434b1SRayagond Kokatanur 
474ceb69499SGiuseppe CAVALLARO 	/* exit if skb doesn't support hw tstamp */
47575e4364fSdamuzi000 	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
476891434b1SRayagond Kokatanur 		return;
477891434b1SRayagond Kokatanur 
478891434b1SRayagond Kokatanur 	/* check tx tstamp status */
47942de047dSJose Abreu 	if (stmmac_get_tx_timestamp_status(priv, p)) {
48042de047dSJose Abreu 		stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
48125e80cd0SJose Abreu 		found = true;
48225e80cd0SJose Abreu 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
48325e80cd0SJose Abreu 		found = true;
48425e80cd0SJose Abreu 	}
485891434b1SRayagond Kokatanur 
48625e80cd0SJose Abreu 	if (found) {
487891434b1SRayagond Kokatanur 		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
488891434b1SRayagond Kokatanur 		shhwtstamp.hwtstamp = ns_to_ktime(ns);
489ba1ffd74SGiuseppe CAVALLARO 
49033d4c482SMario Molitor 		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
491891434b1SRayagond Kokatanur 		/* pass tstamp to stack */
492891434b1SRayagond Kokatanur 		skb_tstamp_tx(skb, &shhwtstamp);
493ba1ffd74SGiuseppe CAVALLARO 	}
494891434b1SRayagond Kokatanur }
495891434b1SRayagond Kokatanur 
496732fdf0eSGiuseppe CAVALLARO /* stmmac_get_rx_hwtstamp - get HW RX timestamps
49732ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
498ba1ffd74SGiuseppe CAVALLARO  * @p : descriptor pointer
499ba1ffd74SGiuseppe CAVALLARO  * @np : next descriptor pointer
500891434b1SRayagond Kokatanur  * @skb : the socket buffer
501891434b1SRayagond Kokatanur  * Description :
502891434b1SRayagond Kokatanur  * This function will read received packet's timestamp from the descriptor
503891434b1SRayagond Kokatanur  * and pass it to stack. It also perform some sanity checks.
504891434b1SRayagond Kokatanur  */
505ba1ffd74SGiuseppe CAVALLARO static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
506ba1ffd74SGiuseppe CAVALLARO 				   struct dma_desc *np, struct sk_buff *skb)
507891434b1SRayagond Kokatanur {
508891434b1SRayagond Kokatanur 	struct skb_shared_hwtstamps *shhwtstamp = NULL;
50998870943SJose Abreu 	struct dma_desc *desc = p;
510df103170SNathan Chancellor 	u64 ns = 0;
511891434b1SRayagond Kokatanur 
512891434b1SRayagond Kokatanur 	if (!priv->hwts_rx_en)
513891434b1SRayagond Kokatanur 		return;
514ba1ffd74SGiuseppe CAVALLARO 	/* For GMAC4, the valid timestamp is from CTX next desc. */
5157d9e6c5aSJose Abreu 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
51698870943SJose Abreu 		desc = np;
517891434b1SRayagond Kokatanur 
51898870943SJose Abreu 	/* Check if timestamp is available */
51942de047dSJose Abreu 	if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
52042de047dSJose Abreu 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
52133d4c482SMario Molitor 		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
522891434b1SRayagond Kokatanur 		shhwtstamp = skb_hwtstamps(skb);
523891434b1SRayagond Kokatanur 		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
524891434b1SRayagond Kokatanur 		shhwtstamp->hwtstamp = ns_to_ktime(ns);
525ba1ffd74SGiuseppe CAVALLARO 	} else  {
52633d4c482SMario Molitor 		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
527ba1ffd74SGiuseppe CAVALLARO 	}
528891434b1SRayagond Kokatanur }
529891434b1SRayagond Kokatanur 
530891434b1SRayagond Kokatanur /**
531d6228b7cSArtem Panfilov  *  stmmac_hwtstamp_set - control hardware timestamping.
532891434b1SRayagond Kokatanur  *  @dev: device pointer.
5338d45e42bSLABBE Corentin  *  @ifr: An IOCTL specific structure, that can contain a pointer to
534891434b1SRayagond Kokatanur  *  a proprietary structure used to pass information to the driver.
535891434b1SRayagond Kokatanur  *  Description:
536891434b1SRayagond Kokatanur  *  This function configures the MAC to enable/disable both outgoing(TX)
537891434b1SRayagond Kokatanur  *  and incoming(RX) packets time stamping based on user input.
538891434b1SRayagond Kokatanur  *  Return Value:
539891434b1SRayagond Kokatanur  *  0 on success and an appropriate -ve integer on failure.
540891434b1SRayagond Kokatanur  */
541d6228b7cSArtem Panfilov static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
542891434b1SRayagond Kokatanur {
543891434b1SRayagond Kokatanur 	struct stmmac_priv *priv = netdev_priv(dev);
544891434b1SRayagond Kokatanur 	struct hwtstamp_config config;
5450a624155SArnd Bergmann 	struct timespec64 now;
546891434b1SRayagond Kokatanur 	u64 temp = 0;
547891434b1SRayagond Kokatanur 	u32 ptp_v2 = 0;
548891434b1SRayagond Kokatanur 	u32 tstamp_all = 0;
549891434b1SRayagond Kokatanur 	u32 ptp_over_ipv4_udp = 0;
550891434b1SRayagond Kokatanur 	u32 ptp_over_ipv6_udp = 0;
551891434b1SRayagond Kokatanur 	u32 ptp_over_ethernet = 0;
552891434b1SRayagond Kokatanur 	u32 snap_type_sel = 0;
553891434b1SRayagond Kokatanur 	u32 ts_master_en = 0;
554891434b1SRayagond Kokatanur 	u32 ts_event_en = 0;
555df103170SNathan Chancellor 	u32 sec_inc = 0;
556891434b1SRayagond Kokatanur 	u32 value = 0;
5577d9e6c5aSJose Abreu 	bool xmac;
5587d9e6c5aSJose Abreu 
5597d9e6c5aSJose Abreu 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
560891434b1SRayagond Kokatanur 
561891434b1SRayagond Kokatanur 	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
562891434b1SRayagond Kokatanur 		netdev_alert(priv->dev, "No support for HW time stamping\n");
563891434b1SRayagond Kokatanur 		priv->hwts_tx_en = 0;
564891434b1SRayagond Kokatanur 		priv->hwts_rx_en = 0;
565891434b1SRayagond Kokatanur 
566891434b1SRayagond Kokatanur 		return -EOPNOTSUPP;
567891434b1SRayagond Kokatanur 	}
568891434b1SRayagond Kokatanur 
569891434b1SRayagond Kokatanur 	if (copy_from_user(&config, ifr->ifr_data,
570d6228b7cSArtem Panfilov 			   sizeof(config)))
571891434b1SRayagond Kokatanur 		return -EFAULT;
572891434b1SRayagond Kokatanur 
57338ddc59dSLABBE Corentin 	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
574891434b1SRayagond Kokatanur 		   __func__, config.flags, config.tx_type, config.rx_filter);
575891434b1SRayagond Kokatanur 
576891434b1SRayagond Kokatanur 	/* reserved for future extensions */
577891434b1SRayagond Kokatanur 	if (config.flags)
578891434b1SRayagond Kokatanur 		return -EINVAL;
579891434b1SRayagond Kokatanur 
5805f3da328SBen Hutchings 	if (config.tx_type != HWTSTAMP_TX_OFF &&
5815f3da328SBen Hutchings 	    config.tx_type != HWTSTAMP_TX_ON)
582891434b1SRayagond Kokatanur 		return -ERANGE;
583891434b1SRayagond Kokatanur 
584891434b1SRayagond Kokatanur 	if (priv->adv_ts) {
585891434b1SRayagond Kokatanur 		switch (config.rx_filter) {
586891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_NONE:
587ceb69499SGiuseppe CAVALLARO 			/* time stamp no incoming packet at all */
588891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_NONE;
589891434b1SRayagond Kokatanur 			break;
590891434b1SRayagond Kokatanur 
591891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
592ceb69499SGiuseppe CAVALLARO 			/* PTP v1, UDP, any kind of event packet */
593891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
5947d8e249fSIlias Apalodimas 			/* 'xmac' hardware can support Sync, Pdelay_Req and
5957d8e249fSIlias Apalodimas 			 * Pdelay_resp by setting bit14 and bits17/16 to 01
5967d8e249fSIlias Apalodimas 			 * This leaves Delay_Req timestamps out.
5977d8e249fSIlias Apalodimas 			 * Enable all events *and* general purpose message
5987d8e249fSIlias Apalodimas 			 * timestamping
5997d8e249fSIlias Apalodimas 			 */
600891434b1SRayagond Kokatanur 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
601891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
602891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
603891434b1SRayagond Kokatanur 			break;
604891434b1SRayagond Kokatanur 
605891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
606ceb69499SGiuseppe CAVALLARO 			/* PTP v1, UDP, Sync packet */
607891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
608891434b1SRayagond Kokatanur 			/* take time stamp for SYNC messages only */
609891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
610891434b1SRayagond Kokatanur 
611891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
612891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
613891434b1SRayagond Kokatanur 			break;
614891434b1SRayagond Kokatanur 
615891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
616ceb69499SGiuseppe CAVALLARO 			/* PTP v1, UDP, Delay_req packet */
617891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
618891434b1SRayagond Kokatanur 			/* take time stamp for Delay_Req messages only */
619891434b1SRayagond Kokatanur 			ts_master_en = PTP_TCR_TSMSTRENA;
620891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
621891434b1SRayagond Kokatanur 
622891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
623891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
624891434b1SRayagond Kokatanur 			break;
625891434b1SRayagond Kokatanur 
626891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
627ceb69499SGiuseppe CAVALLARO 			/* PTP v2, UDP, any kind of event packet */
628891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
629891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
630891434b1SRayagond Kokatanur 			/* take time stamp for all event messages */
631891434b1SRayagond Kokatanur 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
632891434b1SRayagond Kokatanur 
633891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
634891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
635891434b1SRayagond Kokatanur 			break;
636891434b1SRayagond Kokatanur 
637891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
638ceb69499SGiuseppe CAVALLARO 			/* PTP v2, UDP, Sync packet */
639891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
640891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
641891434b1SRayagond Kokatanur 			/* take time stamp for SYNC messages only */
642891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
643891434b1SRayagond Kokatanur 
644891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
645891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
646891434b1SRayagond Kokatanur 			break;
647891434b1SRayagond Kokatanur 
648891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
649ceb69499SGiuseppe CAVALLARO 			/* PTP v2, UDP, Delay_req packet */
650891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
651891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
652891434b1SRayagond Kokatanur 			/* take time stamp for Delay_Req messages only */
653891434b1SRayagond Kokatanur 			ts_master_en = PTP_TCR_TSMSTRENA;
654891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
655891434b1SRayagond Kokatanur 
656891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
657891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
658891434b1SRayagond Kokatanur 			break;
659891434b1SRayagond Kokatanur 
660891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_EVENT:
661ceb69499SGiuseppe CAVALLARO 			/* PTP v2/802.AS1 any layer, any kind of event packet */
662891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
663891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
664891434b1SRayagond Kokatanur 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
665f2fb6b62SFugang Duan 			if (priv->synopsys_id != DWMAC_CORE_5_10)
66614f34733SJose Abreu 				ts_event_en = PTP_TCR_TSEVNTENA;
667891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
668891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
669891434b1SRayagond Kokatanur 			ptp_over_ethernet = PTP_TCR_TSIPENA;
670891434b1SRayagond Kokatanur 			break;
671891434b1SRayagond Kokatanur 
672891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_SYNC:
673ceb69499SGiuseppe CAVALLARO 			/* PTP v2/802.AS1, any layer, Sync packet */
674891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
675891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
676891434b1SRayagond Kokatanur 			/* take time stamp for SYNC messages only */
677891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
678891434b1SRayagond Kokatanur 
679891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
680891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
681891434b1SRayagond Kokatanur 			ptp_over_ethernet = PTP_TCR_TSIPENA;
682891434b1SRayagond Kokatanur 			break;
683891434b1SRayagond Kokatanur 
684891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
685ceb69499SGiuseppe CAVALLARO 			/* PTP v2/802.AS1, any layer, Delay_req packet */
686891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
687891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
688891434b1SRayagond Kokatanur 			/* take time stamp for Delay_Req messages only */
689891434b1SRayagond Kokatanur 			ts_master_en = PTP_TCR_TSMSTRENA;
690891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
691891434b1SRayagond Kokatanur 
692891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
693891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
694891434b1SRayagond Kokatanur 			ptp_over_ethernet = PTP_TCR_TSIPENA;
695891434b1SRayagond Kokatanur 			break;
696891434b1SRayagond Kokatanur 
697e3412575SMiroslav Lichvar 		case HWTSTAMP_FILTER_NTP_ALL:
698891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_ALL:
699ceb69499SGiuseppe CAVALLARO 			/* time stamp any incoming packet */
700891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_ALL;
701891434b1SRayagond Kokatanur 			tstamp_all = PTP_TCR_TSENALL;
702891434b1SRayagond Kokatanur 			break;
703891434b1SRayagond Kokatanur 
704891434b1SRayagond Kokatanur 		default:
705891434b1SRayagond Kokatanur 			return -ERANGE;
706891434b1SRayagond Kokatanur 		}
707891434b1SRayagond Kokatanur 	} else {
708891434b1SRayagond Kokatanur 		switch (config.rx_filter) {
709891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_NONE:
710891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_NONE;
711891434b1SRayagond Kokatanur 			break;
712891434b1SRayagond Kokatanur 		default:
713891434b1SRayagond Kokatanur 			/* PTP v1, UDP, any kind of event packet */
714891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
715891434b1SRayagond Kokatanur 			break;
716891434b1SRayagond Kokatanur 		}
717891434b1SRayagond Kokatanur 	}
718891434b1SRayagond Kokatanur 	priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
7195f3da328SBen Hutchings 	priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
720891434b1SRayagond Kokatanur 
721891434b1SRayagond Kokatanur 	if (!priv->hwts_tx_en && !priv->hwts_rx_en)
722cc4c9001SJose Abreu 		stmmac_config_hw_tstamping(priv, priv->ptpaddr, 0);
723891434b1SRayagond Kokatanur 	else {
724891434b1SRayagond Kokatanur 		value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
725891434b1SRayagond Kokatanur 			 tstamp_all | ptp_v2 | ptp_over_ethernet |
726891434b1SRayagond Kokatanur 			 ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
727891434b1SRayagond Kokatanur 			 ts_master_en | snap_type_sel);
728cc4c9001SJose Abreu 		stmmac_config_hw_tstamping(priv, priv->ptpaddr, value);
729891434b1SRayagond Kokatanur 
730891434b1SRayagond Kokatanur 		/* program Sub Second Increment reg */
731cc4c9001SJose Abreu 		stmmac_config_sub_second_increment(priv,
732f573c0b9Sjpinto 				priv->ptpaddr, priv->plat->clk_ptp_rate,
7337d9e6c5aSJose Abreu 				xmac, &sec_inc);
73419d857c9SPhil Reid 		temp = div_u64(1000000000ULL, sec_inc);
735891434b1SRayagond Kokatanur 
7369a8a02c9SJose Abreu 		/* Store sub second increment and flags for later use */
7379a8a02c9SJose Abreu 		priv->sub_second_inc = sec_inc;
7389a8a02c9SJose Abreu 		priv->systime_flags = value;
7399a8a02c9SJose Abreu 
740891434b1SRayagond Kokatanur 		/* calculate default added value:
741891434b1SRayagond Kokatanur 		 * formula is :
742891434b1SRayagond Kokatanur 		 * addend = (2^32)/freq_div_ratio;
74319d857c9SPhil Reid 		 * where, freq_div_ratio = 1e9ns/sec_inc
744891434b1SRayagond Kokatanur 		 */
74519d857c9SPhil Reid 		temp = (u64)(temp << 32);
746f573c0b9Sjpinto 		priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
747cc4c9001SJose Abreu 		stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
748891434b1SRayagond Kokatanur 
749891434b1SRayagond Kokatanur 		/* initialize system time */
7500a624155SArnd Bergmann 		ktime_get_real_ts64(&now);
7510a624155SArnd Bergmann 
7520a624155SArnd Bergmann 		/* lower 32 bits of tv_sec are safe until y2106 */
753cc4c9001SJose Abreu 		stmmac_init_systime(priv, priv->ptpaddr,
754cc4c9001SJose Abreu 				(u32)now.tv_sec, now.tv_nsec);
755891434b1SRayagond Kokatanur 	}
756891434b1SRayagond Kokatanur 
757d6228b7cSArtem Panfilov 	memcpy(&priv->tstamp_config, &config, sizeof(config));
758d6228b7cSArtem Panfilov 
759891434b1SRayagond Kokatanur 	return copy_to_user(ifr->ifr_data, &config,
760d6228b7cSArtem Panfilov 			    sizeof(config)) ? -EFAULT : 0;
761d6228b7cSArtem Panfilov }
762d6228b7cSArtem Panfilov 
763d6228b7cSArtem Panfilov /**
764d6228b7cSArtem Panfilov  *  stmmac_hwtstamp_get - read hardware timestamping.
765d6228b7cSArtem Panfilov  *  @dev: device pointer.
766d6228b7cSArtem Panfilov  *  @ifr: An IOCTL specific structure, that can contain a pointer to
767d6228b7cSArtem Panfilov  *  a proprietary structure used to pass information to the driver.
768d6228b7cSArtem Panfilov  *  Description:
769d6228b7cSArtem Panfilov  *  This function obtain the current hardware timestamping settings
770d0ea5cbdSJesse Brandeburg  *  as requested.
771d6228b7cSArtem Panfilov  */
772d6228b7cSArtem Panfilov static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
773d6228b7cSArtem Panfilov {
774d6228b7cSArtem Panfilov 	struct stmmac_priv *priv = netdev_priv(dev);
775d6228b7cSArtem Panfilov 	struct hwtstamp_config *config = &priv->tstamp_config;
776d6228b7cSArtem Panfilov 
777d6228b7cSArtem Panfilov 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
778d6228b7cSArtem Panfilov 		return -EOPNOTSUPP;
779d6228b7cSArtem Panfilov 
780d6228b7cSArtem Panfilov 	return copy_to_user(ifr->ifr_data, config,
781d6228b7cSArtem Panfilov 			    sizeof(*config)) ? -EFAULT : 0;
782891434b1SRayagond Kokatanur }
783891434b1SRayagond Kokatanur 
78432ceabcaSGiuseppe CAVALLARO /**
785732fdf0eSGiuseppe CAVALLARO  * stmmac_init_ptp - init PTP
78632ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
787732fdf0eSGiuseppe CAVALLARO  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
78832ceabcaSGiuseppe CAVALLARO  * This is done by looking at the HW cap. register.
789732fdf0eSGiuseppe CAVALLARO  * This function also registers the ptp driver.
79032ceabcaSGiuseppe CAVALLARO  */
79192ba6888SRayagond Kokatanur static int stmmac_init_ptp(struct stmmac_priv *priv)
792891434b1SRayagond Kokatanur {
7937d9e6c5aSJose Abreu 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
7947d9e6c5aSJose Abreu 
79592ba6888SRayagond Kokatanur 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
79692ba6888SRayagond Kokatanur 		return -EOPNOTSUPP;
79792ba6888SRayagond Kokatanur 
798891434b1SRayagond Kokatanur 	priv->adv_ts = 0;
7997d9e6c5aSJose Abreu 	/* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
8007d9e6c5aSJose Abreu 	if (xmac && priv->dma_cap.atime_stamp)
801be9b3174SGiuseppe CAVALLARO 		priv->adv_ts = 1;
802be9b3174SGiuseppe CAVALLARO 	/* Dwmac 3.x core with extend_desc can support adv_ts */
803be9b3174SGiuseppe CAVALLARO 	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
804891434b1SRayagond Kokatanur 		priv->adv_ts = 1;
8057cd01399SVince Bridgers 
806be9b3174SGiuseppe CAVALLARO 	if (priv->dma_cap.time_stamp)
807be9b3174SGiuseppe CAVALLARO 		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
8087cd01399SVince Bridgers 
809be9b3174SGiuseppe CAVALLARO 	if (priv->adv_ts)
810be9b3174SGiuseppe CAVALLARO 		netdev_info(priv->dev,
811be9b3174SGiuseppe CAVALLARO 			    "IEEE 1588-2008 Advanced Timestamp supported\n");
812891434b1SRayagond Kokatanur 
813891434b1SRayagond Kokatanur 	priv->hwts_tx_en = 0;
814891434b1SRayagond Kokatanur 	priv->hwts_rx_en = 0;
81592ba6888SRayagond Kokatanur 
816c30a70d3SGiuseppe CAVALLARO 	stmmac_ptp_register(priv);
817c30a70d3SGiuseppe CAVALLARO 
818c30a70d3SGiuseppe CAVALLARO 	return 0;
81992ba6888SRayagond Kokatanur }
82092ba6888SRayagond Kokatanur 
82192ba6888SRayagond Kokatanur static void stmmac_release_ptp(struct stmmac_priv *priv)
82292ba6888SRayagond Kokatanur {
823f573c0b9Sjpinto 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
82492ba6888SRayagond Kokatanur 	stmmac_ptp_unregister(priv);
825891434b1SRayagond Kokatanur }
826891434b1SRayagond Kokatanur 
8277ac6653aSJeff Kirsher /**
82829feff39SJoao Pinto  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
82929feff39SJoao Pinto  *  @priv: driver private structure
830d0ea5cbdSJesse Brandeburg  *  @duplex: duplex passed to the next function
83129feff39SJoao Pinto  *  Description: It is used for configuring the flow control in all queues
83229feff39SJoao Pinto  */
83329feff39SJoao Pinto static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
83429feff39SJoao Pinto {
83529feff39SJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
83629feff39SJoao Pinto 
837c10d4c82SJose Abreu 	stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
83829feff39SJoao Pinto 			priv->pause, tx_cnt);
83929feff39SJoao Pinto }
84029feff39SJoao Pinto 
841eeef2f6bSJose Abreu static void stmmac_validate(struct phylink_config *config,
842eeef2f6bSJose Abreu 			    unsigned long *supported,
843eeef2f6bSJose Abreu 			    struct phylink_link_state *state)
844eeef2f6bSJose Abreu {
845eeef2f6bSJose Abreu 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
8465b0d7d7dSJose Abreu 	__ETHTOOL_DECLARE_LINK_MODE_MASK(mac_supported) = { 0, };
847eeef2f6bSJose Abreu 	__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
848eeef2f6bSJose Abreu 	int tx_cnt = priv->plat->tx_queues_to_use;
849eeef2f6bSJose Abreu 	int max_speed = priv->plat->max_speed;
850eeef2f6bSJose Abreu 
8515b0d7d7dSJose Abreu 	phylink_set(mac_supported, 10baseT_Half);
8525b0d7d7dSJose Abreu 	phylink_set(mac_supported, 10baseT_Full);
8535b0d7d7dSJose Abreu 	phylink_set(mac_supported, 100baseT_Half);
8545b0d7d7dSJose Abreu 	phylink_set(mac_supported, 100baseT_Full);
855df7699c7SJose Abreu 	phylink_set(mac_supported, 1000baseT_Half);
856df7699c7SJose Abreu 	phylink_set(mac_supported, 1000baseT_Full);
857df7699c7SJose Abreu 	phylink_set(mac_supported, 1000baseKX_Full);
8585b0d7d7dSJose Abreu 
8595b0d7d7dSJose Abreu 	phylink_set(mac_supported, Autoneg);
8605b0d7d7dSJose Abreu 	phylink_set(mac_supported, Pause);
8615b0d7d7dSJose Abreu 	phylink_set(mac_supported, Asym_Pause);
8625b0d7d7dSJose Abreu 	phylink_set_port_modes(mac_supported);
8635b0d7d7dSJose Abreu 
864eeef2f6bSJose Abreu 	/* Cut down 1G if asked to */
865eeef2f6bSJose Abreu 	if ((max_speed > 0) && (max_speed < 1000)) {
866eeef2f6bSJose Abreu 		phylink_set(mask, 1000baseT_Full);
867eeef2f6bSJose Abreu 		phylink_set(mask, 1000baseX_Full);
8685b0d7d7dSJose Abreu 	} else if (priv->plat->has_xgmac) {
869d9da2c87SJose Abreu 		if (!max_speed || (max_speed >= 2500)) {
8705b0d7d7dSJose Abreu 			phylink_set(mac_supported, 2500baseT_Full);
871d9da2c87SJose Abreu 			phylink_set(mac_supported, 2500baseX_Full);
872d9da2c87SJose Abreu 		}
873d9da2c87SJose Abreu 		if (!max_speed || (max_speed >= 5000)) {
8745b0d7d7dSJose Abreu 			phylink_set(mac_supported, 5000baseT_Full);
875d9da2c87SJose Abreu 		}
876d9da2c87SJose Abreu 		if (!max_speed || (max_speed >= 10000)) {
8775b0d7d7dSJose Abreu 			phylink_set(mac_supported, 10000baseSR_Full);
8785b0d7d7dSJose Abreu 			phylink_set(mac_supported, 10000baseLR_Full);
8795b0d7d7dSJose Abreu 			phylink_set(mac_supported, 10000baseER_Full);
8805b0d7d7dSJose Abreu 			phylink_set(mac_supported, 10000baseLRM_Full);
8815b0d7d7dSJose Abreu 			phylink_set(mac_supported, 10000baseT_Full);
8825b0d7d7dSJose Abreu 			phylink_set(mac_supported, 10000baseKX4_Full);
8835b0d7d7dSJose Abreu 			phylink_set(mac_supported, 10000baseKR_Full);
884eeef2f6bSJose Abreu 		}
8858a880936SJose Abreu 		if (!max_speed || (max_speed >= 25000)) {
8868a880936SJose Abreu 			phylink_set(mac_supported, 25000baseCR_Full);
8878a880936SJose Abreu 			phylink_set(mac_supported, 25000baseKR_Full);
8888a880936SJose Abreu 			phylink_set(mac_supported, 25000baseSR_Full);
8898a880936SJose Abreu 		}
8908a880936SJose Abreu 		if (!max_speed || (max_speed >= 40000)) {
8918a880936SJose Abreu 			phylink_set(mac_supported, 40000baseKR4_Full);
8928a880936SJose Abreu 			phylink_set(mac_supported, 40000baseCR4_Full);
8938a880936SJose Abreu 			phylink_set(mac_supported, 40000baseSR4_Full);
8948a880936SJose Abreu 			phylink_set(mac_supported, 40000baseLR4_Full);
8958a880936SJose Abreu 		}
8968a880936SJose Abreu 		if (!max_speed || (max_speed >= 50000)) {
8978a880936SJose Abreu 			phylink_set(mac_supported, 50000baseCR2_Full);
8988a880936SJose Abreu 			phylink_set(mac_supported, 50000baseKR2_Full);
8998a880936SJose Abreu 			phylink_set(mac_supported, 50000baseSR2_Full);
9008a880936SJose Abreu 			phylink_set(mac_supported, 50000baseKR_Full);
9018a880936SJose Abreu 			phylink_set(mac_supported, 50000baseSR_Full);
9028a880936SJose Abreu 			phylink_set(mac_supported, 50000baseCR_Full);
9038a880936SJose Abreu 			phylink_set(mac_supported, 50000baseLR_ER_FR_Full);
9048a880936SJose Abreu 			phylink_set(mac_supported, 50000baseDR_Full);
9058a880936SJose Abreu 		}
9068a880936SJose Abreu 		if (!max_speed || (max_speed >= 100000)) {
9078a880936SJose Abreu 			phylink_set(mac_supported, 100000baseKR4_Full);
9088a880936SJose Abreu 			phylink_set(mac_supported, 100000baseSR4_Full);
9098a880936SJose Abreu 			phylink_set(mac_supported, 100000baseCR4_Full);
9108a880936SJose Abreu 			phylink_set(mac_supported, 100000baseLR4_ER4_Full);
9118a880936SJose Abreu 			phylink_set(mac_supported, 100000baseKR2_Full);
9128a880936SJose Abreu 			phylink_set(mac_supported, 100000baseSR2_Full);
9138a880936SJose Abreu 			phylink_set(mac_supported, 100000baseCR2_Full);
9148a880936SJose Abreu 			phylink_set(mac_supported, 100000baseLR2_ER2_FR2_Full);
9158a880936SJose Abreu 			phylink_set(mac_supported, 100000baseDR2_Full);
9168a880936SJose Abreu 		}
917d9da2c87SJose Abreu 	}
918eeef2f6bSJose Abreu 
919eeef2f6bSJose Abreu 	/* Half-Duplex can only work with single queue */
920eeef2f6bSJose Abreu 	if (tx_cnt > 1) {
921eeef2f6bSJose Abreu 		phylink_set(mask, 10baseT_Half);
922eeef2f6bSJose Abreu 		phylink_set(mask, 100baseT_Half);
923eeef2f6bSJose Abreu 		phylink_set(mask, 1000baseT_Half);
924eeef2f6bSJose Abreu 	}
925eeef2f6bSJose Abreu 
926422829f9SJose Abreu 	linkmode_and(supported, supported, mac_supported);
927422829f9SJose Abreu 	linkmode_andnot(supported, supported, mask);
928422829f9SJose Abreu 
929422829f9SJose Abreu 	linkmode_and(state->advertising, state->advertising, mac_supported);
930422829f9SJose Abreu 	linkmode_andnot(state->advertising, state->advertising, mask);
931f213bbe8SJose Abreu 
932f213bbe8SJose Abreu 	/* If PCS is supported, check which modes it supports. */
933f213bbe8SJose Abreu 	stmmac_xpcs_validate(priv, &priv->hw->xpcs_args, supported, state);
934eeef2f6bSJose Abreu }
935eeef2f6bSJose Abreu 
936d46b7e4fSRussell King static void stmmac_mac_pcs_get_state(struct phylink_config *config,
937eeef2f6bSJose Abreu 				     struct phylink_link_state *state)
938eeef2f6bSJose Abreu {
939f213bbe8SJose Abreu 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
940f213bbe8SJose Abreu 
941d46b7e4fSRussell King 	state->link = 0;
942f213bbe8SJose Abreu 	stmmac_xpcs_get_state(priv, &priv->hw->xpcs_args, state);
943eeef2f6bSJose Abreu }
944eeef2f6bSJose Abreu 
94574371272SJose Abreu static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
94674371272SJose Abreu 			      const struct phylink_link_state *state)
9479ad372fcSJose Abreu {
948f213bbe8SJose Abreu 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
949f213bbe8SJose Abreu 
950f213bbe8SJose Abreu 	stmmac_xpcs_config(priv, &priv->hw->xpcs_args, state);
9519ad372fcSJose Abreu }
9529ad372fcSJose Abreu 
953eeef2f6bSJose Abreu static void stmmac_mac_an_restart(struct phylink_config *config)
954eeef2f6bSJose Abreu {
955eeef2f6bSJose Abreu 	/* Not Supported */
956eeef2f6bSJose Abreu }
957eeef2f6bSJose Abreu 
95874371272SJose Abreu static void stmmac_mac_link_down(struct phylink_config *config,
95974371272SJose Abreu 				 unsigned int mode, phy_interface_t interface)
9609ad372fcSJose Abreu {
96174371272SJose Abreu 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
9629ad372fcSJose Abreu 
9639ad372fcSJose Abreu 	stmmac_mac_set(priv, priv->ioaddr, false);
96474371272SJose Abreu 	priv->eee_active = false;
965388e201dSVineetha G. Jaya Kumaran 	priv->tx_lpi_enabled = false;
96674371272SJose Abreu 	stmmac_eee_init(priv);
96774371272SJose Abreu 	stmmac_set_eee_pls(priv, priv->hw, false);
9689ad372fcSJose Abreu }
9699ad372fcSJose Abreu 
97074371272SJose Abreu static void stmmac_mac_link_up(struct phylink_config *config,
97191a208f2SRussell King 			       struct phy_device *phy,
97274371272SJose Abreu 			       unsigned int mode, phy_interface_t interface,
97391a208f2SRussell King 			       int speed, int duplex,
97491a208f2SRussell King 			       bool tx_pause, bool rx_pause)
9759ad372fcSJose Abreu {
97674371272SJose Abreu 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
97746f69dedSJose Abreu 	u32 ctrl;
97846f69dedSJose Abreu 
979f213bbe8SJose Abreu 	stmmac_xpcs_link_up(priv, &priv->hw->xpcs_args, speed, interface);
980f213bbe8SJose Abreu 
98146f69dedSJose Abreu 	ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
98246f69dedSJose Abreu 	ctrl &= ~priv->hw->link.speed_mask;
98346f69dedSJose Abreu 
98446f69dedSJose Abreu 	if (interface == PHY_INTERFACE_MODE_USXGMII) {
98546f69dedSJose Abreu 		switch (speed) {
98646f69dedSJose Abreu 		case SPEED_10000:
98746f69dedSJose Abreu 			ctrl |= priv->hw->link.xgmii.speed10000;
98846f69dedSJose Abreu 			break;
98946f69dedSJose Abreu 		case SPEED_5000:
99046f69dedSJose Abreu 			ctrl |= priv->hw->link.xgmii.speed5000;
99146f69dedSJose Abreu 			break;
99246f69dedSJose Abreu 		case SPEED_2500:
99346f69dedSJose Abreu 			ctrl |= priv->hw->link.xgmii.speed2500;
99446f69dedSJose Abreu 			break;
99546f69dedSJose Abreu 		default:
99646f69dedSJose Abreu 			return;
99746f69dedSJose Abreu 		}
9988a880936SJose Abreu 	} else if (interface == PHY_INTERFACE_MODE_XLGMII) {
9998a880936SJose Abreu 		switch (speed) {
10008a880936SJose Abreu 		case SPEED_100000:
10018a880936SJose Abreu 			ctrl |= priv->hw->link.xlgmii.speed100000;
10028a880936SJose Abreu 			break;
10038a880936SJose Abreu 		case SPEED_50000:
10048a880936SJose Abreu 			ctrl |= priv->hw->link.xlgmii.speed50000;
10058a880936SJose Abreu 			break;
10068a880936SJose Abreu 		case SPEED_40000:
10078a880936SJose Abreu 			ctrl |= priv->hw->link.xlgmii.speed40000;
10088a880936SJose Abreu 			break;
10098a880936SJose Abreu 		case SPEED_25000:
10108a880936SJose Abreu 			ctrl |= priv->hw->link.xlgmii.speed25000;
10118a880936SJose Abreu 			break;
10128a880936SJose Abreu 		case SPEED_10000:
10138a880936SJose Abreu 			ctrl |= priv->hw->link.xgmii.speed10000;
10148a880936SJose Abreu 			break;
10158a880936SJose Abreu 		case SPEED_2500:
10168a880936SJose Abreu 			ctrl |= priv->hw->link.speed2500;
10178a880936SJose Abreu 			break;
10188a880936SJose Abreu 		case SPEED_1000:
10198a880936SJose Abreu 			ctrl |= priv->hw->link.speed1000;
10208a880936SJose Abreu 			break;
10218a880936SJose Abreu 		default:
10228a880936SJose Abreu 			return;
10238a880936SJose Abreu 		}
102446f69dedSJose Abreu 	} else {
102546f69dedSJose Abreu 		switch (speed) {
102646f69dedSJose Abreu 		case SPEED_2500:
102746f69dedSJose Abreu 			ctrl |= priv->hw->link.speed2500;
102846f69dedSJose Abreu 			break;
102946f69dedSJose Abreu 		case SPEED_1000:
103046f69dedSJose Abreu 			ctrl |= priv->hw->link.speed1000;
103146f69dedSJose Abreu 			break;
103246f69dedSJose Abreu 		case SPEED_100:
103346f69dedSJose Abreu 			ctrl |= priv->hw->link.speed100;
103446f69dedSJose Abreu 			break;
103546f69dedSJose Abreu 		case SPEED_10:
103646f69dedSJose Abreu 			ctrl |= priv->hw->link.speed10;
103746f69dedSJose Abreu 			break;
103846f69dedSJose Abreu 		default:
103946f69dedSJose Abreu 			return;
104046f69dedSJose Abreu 		}
104146f69dedSJose Abreu 	}
104246f69dedSJose Abreu 
104346f69dedSJose Abreu 	priv->speed = speed;
104446f69dedSJose Abreu 
104546f69dedSJose Abreu 	if (priv->plat->fix_mac_speed)
104646f69dedSJose Abreu 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed);
104746f69dedSJose Abreu 
104846f69dedSJose Abreu 	if (!duplex)
104946f69dedSJose Abreu 		ctrl &= ~priv->hw->link.duplex;
105046f69dedSJose Abreu 	else
105146f69dedSJose Abreu 		ctrl |= priv->hw->link.duplex;
105246f69dedSJose Abreu 
105346f69dedSJose Abreu 	/* Flow Control operation */
105446f69dedSJose Abreu 	if (tx_pause && rx_pause)
105546f69dedSJose Abreu 		stmmac_mac_flow_ctrl(priv, duplex);
105646f69dedSJose Abreu 
105746f69dedSJose Abreu 	writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
10589ad372fcSJose Abreu 
10599ad372fcSJose Abreu 	stmmac_mac_set(priv, priv->ioaddr, true);
10605b111770SJose Abreu 	if (phy && priv->dma_cap.eee) {
106174371272SJose Abreu 		priv->eee_active = phy_init_eee(phy, 1) >= 0;
106274371272SJose Abreu 		priv->eee_enabled = stmmac_eee_init(priv);
1063388e201dSVineetha G. Jaya Kumaran 		priv->tx_lpi_enabled = priv->eee_enabled;
106474371272SJose Abreu 		stmmac_set_eee_pls(priv, priv->hw, true);
106574371272SJose Abreu 	}
10669ad372fcSJose Abreu }
10679ad372fcSJose Abreu 
106874371272SJose Abreu static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1069eeef2f6bSJose Abreu 	.validate = stmmac_validate,
1070d46b7e4fSRussell King 	.mac_pcs_get_state = stmmac_mac_pcs_get_state,
107174371272SJose Abreu 	.mac_config = stmmac_mac_config,
1072eeef2f6bSJose Abreu 	.mac_an_restart = stmmac_mac_an_restart,
107374371272SJose Abreu 	.mac_link_down = stmmac_mac_link_down,
107474371272SJose Abreu 	.mac_link_up = stmmac_mac_link_up,
1075eeef2f6bSJose Abreu };
1076eeef2f6bSJose Abreu 
107729feff39SJoao Pinto /**
1078732fdf0eSGiuseppe CAVALLARO  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
107932ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
108032ceabcaSGiuseppe CAVALLARO  * Description: this is to verify if the HW supports the PCS.
108132ceabcaSGiuseppe CAVALLARO  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
108232ceabcaSGiuseppe CAVALLARO  * configured for the TBI, RTBI, or SGMII PHY interface.
108332ceabcaSGiuseppe CAVALLARO  */
1084e58bb43fSGiuseppe CAVALLARO static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1085e58bb43fSGiuseppe CAVALLARO {
1086e58bb43fSGiuseppe CAVALLARO 	int interface = priv->plat->interface;
1087e58bb43fSGiuseppe CAVALLARO 
1088e58bb43fSGiuseppe CAVALLARO 	if (priv->dma_cap.pcs) {
10890d909dcdSByungho An 		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
10900d909dcdSByungho An 		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
10910d909dcdSByungho An 		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
10920d909dcdSByungho An 		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
109338ddc59dSLABBE Corentin 			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
10943fe5cadbSGiuseppe CAVALLARO 			priv->hw->pcs = STMMAC_PCS_RGMII;
10950d909dcdSByungho An 		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
109638ddc59dSLABBE Corentin 			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
10973fe5cadbSGiuseppe CAVALLARO 			priv->hw->pcs = STMMAC_PCS_SGMII;
1098e58bb43fSGiuseppe CAVALLARO 		}
1099e58bb43fSGiuseppe CAVALLARO 	}
1100e58bb43fSGiuseppe CAVALLARO }
1101e58bb43fSGiuseppe CAVALLARO 
11027ac6653aSJeff Kirsher /**
11037ac6653aSJeff Kirsher  * stmmac_init_phy - PHY initialization
11047ac6653aSJeff Kirsher  * @dev: net device structure
11057ac6653aSJeff Kirsher  * Description: it initializes the driver's PHY state, and attaches the PHY
11067ac6653aSJeff Kirsher  * to the mac driver.
11077ac6653aSJeff Kirsher  *  Return value:
11087ac6653aSJeff Kirsher  *  0 on success
11097ac6653aSJeff Kirsher  */
11107ac6653aSJeff Kirsher static int stmmac_init_phy(struct net_device *dev)
11117ac6653aSJeff Kirsher {
11121d8e5b0fSJisheng Zhang 	struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
11137ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
111474371272SJose Abreu 	struct device_node *node;
111574371272SJose Abreu 	int ret;
11167ac6653aSJeff Kirsher 
11174838a540SJose Abreu 	node = priv->plat->phylink_node;
111874371272SJose Abreu 
111942e87024SJose Abreu 	if (node)
112074371272SJose Abreu 		ret = phylink_of_phy_connect(priv->phylink, node, 0);
112142e87024SJose Abreu 
112242e87024SJose Abreu 	/* Some DT bindings do not set-up the PHY handle. Let's try to
112342e87024SJose Abreu 	 * manually parse it
112442e87024SJose Abreu 	 */
112542e87024SJose Abreu 	if (!node || ret) {
112674371272SJose Abreu 		int addr = priv->plat->phy_addr;
112774371272SJose Abreu 		struct phy_device *phydev;
1128f142af2eSSrinivas Kandagatla 
112974371272SJose Abreu 		phydev = mdiobus_get_phy(priv->mii, addr);
113074371272SJose Abreu 		if (!phydev) {
113174371272SJose Abreu 			netdev_err(priv->dev, "no phy at addr %d\n", addr);
11327ac6653aSJeff Kirsher 			return -ENODEV;
11337ac6653aSJeff Kirsher 		}
11348e99fc5fSGiuseppe Cavallaro 
113574371272SJose Abreu 		ret = phylink_connect_phy(priv->phylink, phydev);
113674371272SJose Abreu 	}
1137c51e424dSFlorian Fainelli 
11381d8e5b0fSJisheng Zhang 	phylink_ethtool_get_wol(priv->phylink, &wol);
11391d8e5b0fSJisheng Zhang 	device_set_wakeup_capable(priv->device, !!wol.supported);
11401d8e5b0fSJisheng Zhang 
114174371272SJose Abreu 	return ret;
114274371272SJose Abreu }
114374371272SJose Abreu 
114474371272SJose Abreu static int stmmac_phy_setup(struct stmmac_priv *priv)
114574371272SJose Abreu {
1146c63d1e5cSArnd Bergmann 	struct fwnode_handle *fwnode = of_fwnode_handle(priv->plat->phylink_node);
11470060c878SAlexandru Ardelean 	int mode = priv->plat->phy_interface;
114874371272SJose Abreu 	struct phylink *phylink;
114974371272SJose Abreu 
115074371272SJose Abreu 	priv->phylink_config.dev = &priv->dev->dev;
115174371272SJose Abreu 	priv->phylink_config.type = PHYLINK_NETDEV;
1152f213bbe8SJose Abreu 	priv->phylink_config.pcs_poll = true;
1153e5e5b771SOng Boon Leong 	priv->phylink_config.ovr_an_inband =
1154e5e5b771SOng Boon Leong 		priv->plat->mdio_bus_data->xpcs_an_inband;
115574371272SJose Abreu 
11568dc6051cSJose Abreu 	if (!fwnode)
11578dc6051cSJose Abreu 		fwnode = dev_fwnode(priv->device);
11588dc6051cSJose Abreu 
1159c63d1e5cSArnd Bergmann 	phylink = phylink_create(&priv->phylink_config, fwnode,
116074371272SJose Abreu 				 mode, &stmmac_phylink_mac_ops);
116174371272SJose Abreu 	if (IS_ERR(phylink))
116274371272SJose Abreu 		return PTR_ERR(phylink);
116374371272SJose Abreu 
116474371272SJose Abreu 	priv->phylink = phylink;
11657ac6653aSJeff Kirsher 	return 0;
11667ac6653aSJeff Kirsher }
11677ac6653aSJeff Kirsher 
116871fedb01SJoao Pinto static void stmmac_display_rx_rings(struct stmmac_priv *priv)
1169c24602efSGiuseppe CAVALLARO {
117054139cf3SJoao Pinto 	u32 rx_cnt = priv->plat->rx_queues_to_use;
1171bfaf91caSJoakim Zhang 	unsigned int desc_size;
117271fedb01SJoao Pinto 	void *head_rx;
117354139cf3SJoao Pinto 	u32 queue;
117454139cf3SJoao Pinto 
117554139cf3SJoao Pinto 	/* Display RX rings */
117654139cf3SJoao Pinto 	for (queue = 0; queue < rx_cnt; queue++) {
117754139cf3SJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
117854139cf3SJoao Pinto 
117954139cf3SJoao Pinto 		pr_info("\tRX Queue %u rings\n", queue);
1180d0225e7dSAlexandre TORGUE 
1181bfaf91caSJoakim Zhang 		if (priv->extend_desc) {
118254139cf3SJoao Pinto 			head_rx = (void *)rx_q->dma_erx;
1183bfaf91caSJoakim Zhang 			desc_size = sizeof(struct dma_extended_desc);
1184bfaf91caSJoakim Zhang 		} else {
118554139cf3SJoao Pinto 			head_rx = (void *)rx_q->dma_rx;
1186bfaf91caSJoakim Zhang 			desc_size = sizeof(struct dma_desc);
1187bfaf91caSJoakim Zhang 		}
118871fedb01SJoao Pinto 
118971fedb01SJoao Pinto 		/* Display RX ring */
1190bfaf91caSJoakim Zhang 		stmmac_display_ring(priv, head_rx, priv->dma_rx_size, true,
1191bfaf91caSJoakim Zhang 				    rx_q->dma_rx_phy, desc_size);
11925bacd778SLABBE Corentin 	}
119354139cf3SJoao Pinto }
1194d0225e7dSAlexandre TORGUE 
119571fedb01SJoao Pinto static void stmmac_display_tx_rings(struct stmmac_priv *priv)
119671fedb01SJoao Pinto {
1197ce736788SJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
1198bfaf91caSJoakim Zhang 	unsigned int desc_size;
119971fedb01SJoao Pinto 	void *head_tx;
1200ce736788SJoao Pinto 	u32 queue;
1201ce736788SJoao Pinto 
1202ce736788SJoao Pinto 	/* Display TX rings */
1203ce736788SJoao Pinto 	for (queue = 0; queue < tx_cnt; queue++) {
1204ce736788SJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1205ce736788SJoao Pinto 
1206ce736788SJoao Pinto 		pr_info("\tTX Queue %d rings\n", queue);
120771fedb01SJoao Pinto 
1208bfaf91caSJoakim Zhang 		if (priv->extend_desc) {
1209ce736788SJoao Pinto 			head_tx = (void *)tx_q->dma_etx;
1210bfaf91caSJoakim Zhang 			desc_size = sizeof(struct dma_extended_desc);
1211bfaf91caSJoakim Zhang 		} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1212579a25a8SJose Abreu 			head_tx = (void *)tx_q->dma_entx;
1213bfaf91caSJoakim Zhang 			desc_size = sizeof(struct dma_edesc);
1214bfaf91caSJoakim Zhang 		} else {
1215ce736788SJoao Pinto 			head_tx = (void *)tx_q->dma_tx;
1216bfaf91caSJoakim Zhang 			desc_size = sizeof(struct dma_desc);
1217bfaf91caSJoakim Zhang 		}
121871fedb01SJoao Pinto 
1219bfaf91caSJoakim Zhang 		stmmac_display_ring(priv, head_tx, priv->dma_tx_size, false,
1220bfaf91caSJoakim Zhang 				    tx_q->dma_tx_phy, desc_size);
1221c24602efSGiuseppe CAVALLARO 	}
1222ce736788SJoao Pinto }
1223c24602efSGiuseppe CAVALLARO 
122471fedb01SJoao Pinto static void stmmac_display_rings(struct stmmac_priv *priv)
122571fedb01SJoao Pinto {
122671fedb01SJoao Pinto 	/* Display RX ring */
122771fedb01SJoao Pinto 	stmmac_display_rx_rings(priv);
122871fedb01SJoao Pinto 
122971fedb01SJoao Pinto 	/* Display TX ring */
123071fedb01SJoao Pinto 	stmmac_display_tx_rings(priv);
123171fedb01SJoao Pinto }
123271fedb01SJoao Pinto 
1233286a8372SGiuseppe CAVALLARO static int stmmac_set_bfsize(int mtu, int bufsize)
1234286a8372SGiuseppe CAVALLARO {
1235286a8372SGiuseppe CAVALLARO 	int ret = bufsize;
1236286a8372SGiuseppe CAVALLARO 
1237b2f3a481SJose Abreu 	if (mtu >= BUF_SIZE_8KiB)
1238b2f3a481SJose Abreu 		ret = BUF_SIZE_16KiB;
1239b2f3a481SJose Abreu 	else if (mtu >= BUF_SIZE_4KiB)
1240286a8372SGiuseppe CAVALLARO 		ret = BUF_SIZE_8KiB;
1241286a8372SGiuseppe CAVALLARO 	else if (mtu >= BUF_SIZE_2KiB)
1242286a8372SGiuseppe CAVALLARO 		ret = BUF_SIZE_4KiB;
1243d916701cSGiuseppe CAVALLARO 	else if (mtu > DEFAULT_BUFSIZE)
1244286a8372SGiuseppe CAVALLARO 		ret = BUF_SIZE_2KiB;
1245286a8372SGiuseppe CAVALLARO 	else
1246d916701cSGiuseppe CAVALLARO 		ret = DEFAULT_BUFSIZE;
1247286a8372SGiuseppe CAVALLARO 
1248286a8372SGiuseppe CAVALLARO 	return ret;
1249286a8372SGiuseppe CAVALLARO }
1250286a8372SGiuseppe CAVALLARO 
125132ceabcaSGiuseppe CAVALLARO /**
125271fedb01SJoao Pinto  * stmmac_clear_rx_descriptors - clear RX descriptors
125332ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
125454139cf3SJoao Pinto  * @queue: RX queue index
125571fedb01SJoao Pinto  * Description: this function is called to clear the RX descriptors
125632ceabcaSGiuseppe CAVALLARO  * in case of both basic and extended descriptors are used.
125732ceabcaSGiuseppe CAVALLARO  */
125854139cf3SJoao Pinto static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
1259c24602efSGiuseppe CAVALLARO {
126054139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
12615bacd778SLABBE Corentin 	int i;
1262c24602efSGiuseppe CAVALLARO 
126371fedb01SJoao Pinto 	/* Clear the RX descriptors */
1264aa042f60SSong, Yoong Siang 	for (i = 0; i < priv->dma_rx_size; i++)
12655bacd778SLABBE Corentin 		if (priv->extend_desc)
126642de047dSJose Abreu 			stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
12675bacd778SLABBE Corentin 					priv->use_riwt, priv->mode,
1268aa042f60SSong, Yoong Siang 					(i == priv->dma_rx_size - 1),
1269583e6361SAaro Koskinen 					priv->dma_buf_sz);
12705bacd778SLABBE Corentin 		else
127142de047dSJose Abreu 			stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
12725bacd778SLABBE Corentin 					priv->use_riwt, priv->mode,
1273aa042f60SSong, Yoong Siang 					(i == priv->dma_rx_size - 1),
1274583e6361SAaro Koskinen 					priv->dma_buf_sz);
127571fedb01SJoao Pinto }
127671fedb01SJoao Pinto 
127771fedb01SJoao Pinto /**
127871fedb01SJoao Pinto  * stmmac_clear_tx_descriptors - clear tx descriptors
127971fedb01SJoao Pinto  * @priv: driver private structure
1280ce736788SJoao Pinto  * @queue: TX queue index.
128171fedb01SJoao Pinto  * Description: this function is called to clear the TX descriptors
128271fedb01SJoao Pinto  * in case of both basic and extended descriptors are used.
128371fedb01SJoao Pinto  */
1284ce736788SJoao Pinto static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
128571fedb01SJoao Pinto {
1286ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
128771fedb01SJoao Pinto 	int i;
128871fedb01SJoao Pinto 
128971fedb01SJoao Pinto 	/* Clear the TX descriptors */
1290aa042f60SSong, Yoong Siang 	for (i = 0; i < priv->dma_tx_size; i++) {
1291aa042f60SSong, Yoong Siang 		int last = (i == (priv->dma_tx_size - 1));
1292579a25a8SJose Abreu 		struct dma_desc *p;
1293579a25a8SJose Abreu 
12945bacd778SLABBE Corentin 		if (priv->extend_desc)
1295579a25a8SJose Abreu 			p = &tx_q->dma_etx[i].basic;
1296579a25a8SJose Abreu 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1297579a25a8SJose Abreu 			p = &tx_q->dma_entx[i].basic;
12985bacd778SLABBE Corentin 		else
1299579a25a8SJose Abreu 			p = &tx_q->dma_tx[i];
1300579a25a8SJose Abreu 
1301579a25a8SJose Abreu 		stmmac_init_tx_desc(priv, p, priv->mode, last);
1302579a25a8SJose Abreu 	}
1303c24602efSGiuseppe CAVALLARO }
1304c24602efSGiuseppe CAVALLARO 
1305732fdf0eSGiuseppe CAVALLARO /**
130671fedb01SJoao Pinto  * stmmac_clear_descriptors - clear descriptors
130771fedb01SJoao Pinto  * @priv: driver private structure
130871fedb01SJoao Pinto  * Description: this function is called to clear the TX and RX descriptors
130971fedb01SJoao Pinto  * in case of both basic and extended descriptors are used.
131071fedb01SJoao Pinto  */
131171fedb01SJoao Pinto static void stmmac_clear_descriptors(struct stmmac_priv *priv)
131271fedb01SJoao Pinto {
131354139cf3SJoao Pinto 	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1314ce736788SJoao Pinto 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
131554139cf3SJoao Pinto 	u32 queue;
131654139cf3SJoao Pinto 
131771fedb01SJoao Pinto 	/* Clear the RX descriptors */
131854139cf3SJoao Pinto 	for (queue = 0; queue < rx_queue_cnt; queue++)
131954139cf3SJoao Pinto 		stmmac_clear_rx_descriptors(priv, queue);
132071fedb01SJoao Pinto 
132171fedb01SJoao Pinto 	/* Clear the TX descriptors */
1322ce736788SJoao Pinto 	for (queue = 0; queue < tx_queue_cnt; queue++)
1323ce736788SJoao Pinto 		stmmac_clear_tx_descriptors(priv, queue);
132471fedb01SJoao Pinto }
132571fedb01SJoao Pinto 
132671fedb01SJoao Pinto /**
1327732fdf0eSGiuseppe CAVALLARO  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1328732fdf0eSGiuseppe CAVALLARO  * @priv: driver private structure
1329732fdf0eSGiuseppe CAVALLARO  * @p: descriptor pointer
1330732fdf0eSGiuseppe CAVALLARO  * @i: descriptor index
133154139cf3SJoao Pinto  * @flags: gfp flag
133254139cf3SJoao Pinto  * @queue: RX queue index
1333732fdf0eSGiuseppe CAVALLARO  * Description: this function is called to allocate a receive buffer, perform
1334732fdf0eSGiuseppe CAVALLARO  * the DMA mapping and init the descriptor.
1335732fdf0eSGiuseppe CAVALLARO  */
1336c24602efSGiuseppe CAVALLARO static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
133754139cf3SJoao Pinto 				  int i, gfp_t flags, u32 queue)
1338c24602efSGiuseppe CAVALLARO {
133954139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
13402af6106aSJose Abreu 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1341c24602efSGiuseppe CAVALLARO 
13422af6106aSJose Abreu 	buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
13432af6106aSJose Abreu 	if (!buf->page)
134456329137SBartlomiej Zolnierkiewicz 		return -ENOMEM;
1345c24602efSGiuseppe CAVALLARO 
134667afd6d1SJose Abreu 	if (priv->sph) {
134767afd6d1SJose Abreu 		buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
134867afd6d1SJose Abreu 		if (!buf->sec_page)
134967afd6d1SJose Abreu 			return -ENOMEM;
135067afd6d1SJose Abreu 
135167afd6d1SJose Abreu 		buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1352396e13e1SJoakim Zhang 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
135367afd6d1SJose Abreu 	} else {
135467afd6d1SJose Abreu 		buf->sec_page = NULL;
1355396e13e1SJoakim Zhang 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
135667afd6d1SJose Abreu 	}
135767afd6d1SJose Abreu 
13582af6106aSJose Abreu 	buf->addr = page_pool_get_dma_addr(buf->page);
13592af6106aSJose Abreu 	stmmac_set_desc_addr(priv, p, buf->addr);
13602c520b1cSJose Abreu 	if (priv->dma_buf_sz == BUF_SIZE_16KiB)
13612c520b1cSJose Abreu 		stmmac_init_desc3(priv, p);
1362c24602efSGiuseppe CAVALLARO 
1363c24602efSGiuseppe CAVALLARO 	return 0;
1364c24602efSGiuseppe CAVALLARO }
1365c24602efSGiuseppe CAVALLARO 
136671fedb01SJoao Pinto /**
136771fedb01SJoao Pinto  * stmmac_free_rx_buffer - free RX dma buffers
136871fedb01SJoao Pinto  * @priv: private structure
136954139cf3SJoao Pinto  * @queue: RX queue index
137071fedb01SJoao Pinto  * @i: buffer index.
137171fedb01SJoao Pinto  */
137254139cf3SJoao Pinto static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
137356329137SBartlomiej Zolnierkiewicz {
137454139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
13752af6106aSJose Abreu 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
137654139cf3SJoao Pinto 
13772af6106aSJose Abreu 	if (buf->page)
1378458de8a9SIlias Apalodimas 		page_pool_put_full_page(rx_q->page_pool, buf->page, false);
13792af6106aSJose Abreu 	buf->page = NULL;
138067afd6d1SJose Abreu 
138167afd6d1SJose Abreu 	if (buf->sec_page)
1382458de8a9SIlias Apalodimas 		page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
138367afd6d1SJose Abreu 	buf->sec_page = NULL;
138456329137SBartlomiej Zolnierkiewicz }
138556329137SBartlomiej Zolnierkiewicz 
13867ac6653aSJeff Kirsher /**
138771fedb01SJoao Pinto  * stmmac_free_tx_buffer - free RX dma buffers
138871fedb01SJoao Pinto  * @priv: private structure
1389ce736788SJoao Pinto  * @queue: RX queue index
139071fedb01SJoao Pinto  * @i: buffer index.
139171fedb01SJoao Pinto  */
1392ce736788SJoao Pinto static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
139371fedb01SJoao Pinto {
1394ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1395ce736788SJoao Pinto 
1396ce736788SJoao Pinto 	if (tx_q->tx_skbuff_dma[i].buf) {
1397ce736788SJoao Pinto 		if (tx_q->tx_skbuff_dma[i].map_as_page)
139871fedb01SJoao Pinto 			dma_unmap_page(priv->device,
1399ce736788SJoao Pinto 				       tx_q->tx_skbuff_dma[i].buf,
1400ce736788SJoao Pinto 				       tx_q->tx_skbuff_dma[i].len,
140171fedb01SJoao Pinto 				       DMA_TO_DEVICE);
140271fedb01SJoao Pinto 		else
140371fedb01SJoao Pinto 			dma_unmap_single(priv->device,
1404ce736788SJoao Pinto 					 tx_q->tx_skbuff_dma[i].buf,
1405ce736788SJoao Pinto 					 tx_q->tx_skbuff_dma[i].len,
140671fedb01SJoao Pinto 					 DMA_TO_DEVICE);
140771fedb01SJoao Pinto 	}
140871fedb01SJoao Pinto 
1409ce736788SJoao Pinto 	if (tx_q->tx_skbuff[i]) {
1410ce736788SJoao Pinto 		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1411ce736788SJoao Pinto 		tx_q->tx_skbuff[i] = NULL;
1412ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[i].buf = 0;
1413ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[i].map_as_page = false;
141471fedb01SJoao Pinto 	}
141571fedb01SJoao Pinto }
141671fedb01SJoao Pinto 
141771fedb01SJoao Pinto /**
14189c63faaaSJoakim Zhang  * stmmac_reinit_rx_buffers - reinit the RX descriptor buffer.
14199c63faaaSJoakim Zhang  * @priv: driver private structure
14209c63faaaSJoakim Zhang  * Description: this function is called to re-allocate a receive buffer, perform
14219c63faaaSJoakim Zhang  * the DMA mapping and init the descriptor.
14229c63faaaSJoakim Zhang  */
14239c63faaaSJoakim Zhang static void stmmac_reinit_rx_buffers(struct stmmac_priv *priv)
14249c63faaaSJoakim Zhang {
14259c63faaaSJoakim Zhang 	u32 rx_count = priv->plat->rx_queues_to_use;
14269c63faaaSJoakim Zhang 	u32 queue;
14279c63faaaSJoakim Zhang 	int i;
14289c63faaaSJoakim Zhang 
14299c63faaaSJoakim Zhang 	for (queue = 0; queue < rx_count; queue++) {
14309c63faaaSJoakim Zhang 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
14319c63faaaSJoakim Zhang 
14329c63faaaSJoakim Zhang 		for (i = 0; i < priv->dma_rx_size; i++) {
14339c63faaaSJoakim Zhang 			struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
14349c63faaaSJoakim Zhang 
14359c63faaaSJoakim Zhang 			if (buf->page) {
14369c63faaaSJoakim Zhang 				page_pool_recycle_direct(rx_q->page_pool, buf->page);
14379c63faaaSJoakim Zhang 				buf->page = NULL;
14389c63faaaSJoakim Zhang 			}
14399c63faaaSJoakim Zhang 
14409c63faaaSJoakim Zhang 			if (priv->sph && buf->sec_page) {
14419c63faaaSJoakim Zhang 				page_pool_recycle_direct(rx_q->page_pool, buf->sec_page);
14429c63faaaSJoakim Zhang 				buf->sec_page = NULL;
14439c63faaaSJoakim Zhang 			}
14449c63faaaSJoakim Zhang 		}
14459c63faaaSJoakim Zhang 	}
14469c63faaaSJoakim Zhang 
14479c63faaaSJoakim Zhang 	for (queue = 0; queue < rx_count; queue++) {
14489c63faaaSJoakim Zhang 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
14499c63faaaSJoakim Zhang 
14509c63faaaSJoakim Zhang 		for (i = 0; i < priv->dma_rx_size; i++) {
14519c63faaaSJoakim Zhang 			struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
14529c63faaaSJoakim Zhang 			struct dma_desc *p;
14539c63faaaSJoakim Zhang 
14549c63faaaSJoakim Zhang 			if (priv->extend_desc)
14559c63faaaSJoakim Zhang 				p = &((rx_q->dma_erx + i)->basic);
14569c63faaaSJoakim Zhang 			else
14579c63faaaSJoakim Zhang 				p = rx_q->dma_rx + i;
14589c63faaaSJoakim Zhang 
14599c63faaaSJoakim Zhang 			if (!buf->page) {
14609c63faaaSJoakim Zhang 				buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
14619c63faaaSJoakim Zhang 				if (!buf->page)
14629c63faaaSJoakim Zhang 					goto err_reinit_rx_buffers;
14639c63faaaSJoakim Zhang 
14649c63faaaSJoakim Zhang 				buf->addr = page_pool_get_dma_addr(buf->page);
14659c63faaaSJoakim Zhang 			}
14669c63faaaSJoakim Zhang 
14679c63faaaSJoakim Zhang 			if (priv->sph && !buf->sec_page) {
14689c63faaaSJoakim Zhang 				buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
14699c63faaaSJoakim Zhang 				if (!buf->sec_page)
14709c63faaaSJoakim Zhang 					goto err_reinit_rx_buffers;
14719c63faaaSJoakim Zhang 
14729c63faaaSJoakim Zhang 				buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
14739c63faaaSJoakim Zhang 			}
14749c63faaaSJoakim Zhang 
14759c63faaaSJoakim Zhang 			stmmac_set_desc_addr(priv, p, buf->addr);
14769c63faaaSJoakim Zhang 			if (priv->sph)
14779c63faaaSJoakim Zhang 				stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
14789c63faaaSJoakim Zhang 			else
14799c63faaaSJoakim Zhang 				stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
14809c63faaaSJoakim Zhang 			if (priv->dma_buf_sz == BUF_SIZE_16KiB)
14819c63faaaSJoakim Zhang 				stmmac_init_desc3(priv, p);
14829c63faaaSJoakim Zhang 		}
14839c63faaaSJoakim Zhang 	}
14849c63faaaSJoakim Zhang 
14859c63faaaSJoakim Zhang 	return;
14869c63faaaSJoakim Zhang 
14879c63faaaSJoakim Zhang err_reinit_rx_buffers:
14889c63faaaSJoakim Zhang 	do {
14899c63faaaSJoakim Zhang 		while (--i >= 0)
14909c63faaaSJoakim Zhang 			stmmac_free_rx_buffer(priv, queue, i);
14919c63faaaSJoakim Zhang 
14929c63faaaSJoakim Zhang 		if (queue == 0)
14939c63faaaSJoakim Zhang 			break;
14949c63faaaSJoakim Zhang 
14959c63faaaSJoakim Zhang 		i = priv->dma_rx_size;
14969c63faaaSJoakim Zhang 	} while (queue-- > 0);
14979c63faaaSJoakim Zhang }
14989c63faaaSJoakim Zhang 
14999c63faaaSJoakim Zhang /**
150071fedb01SJoao Pinto  * init_dma_rx_desc_rings - init the RX descriptor rings
15017ac6653aSJeff Kirsher  * @dev: net device structure
15025bacd778SLABBE Corentin  * @flags: gfp flag.
150371fedb01SJoao Pinto  * Description: this function initializes the DMA RX descriptors
15045bacd778SLABBE Corentin  * and allocates the socket buffers. It supports the chained and ring
1505286a8372SGiuseppe CAVALLARO  * modes.
15067ac6653aSJeff Kirsher  */
150771fedb01SJoao Pinto static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
15087ac6653aSJeff Kirsher {
15097ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
151054139cf3SJoao Pinto 	u32 rx_count = priv->plat->rx_queues_to_use;
15115bacd778SLABBE Corentin 	int ret = -ENOMEM;
15121d3028f4SColin Ian King 	int queue;
151354139cf3SJoao Pinto 	int i;
15147ac6653aSJeff Kirsher 
151554139cf3SJoao Pinto 	/* RX INITIALIZATION */
15165bacd778SLABBE Corentin 	netif_dbg(priv, probe, priv->dev,
15175bacd778SLABBE Corentin 		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
15185bacd778SLABBE Corentin 
151954139cf3SJoao Pinto 	for (queue = 0; queue < rx_count; queue++) {
152054139cf3SJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
152154139cf3SJoao Pinto 
152254139cf3SJoao Pinto 		netif_dbg(priv, probe, priv->dev,
152354139cf3SJoao Pinto 			  "(%s) dma_rx_phy=0x%08x\n", __func__,
152454139cf3SJoao Pinto 			  (u32)rx_q->dma_rx_phy);
152554139cf3SJoao Pinto 
1526cbcf0999SJose Abreu 		stmmac_clear_rx_descriptors(priv, queue);
1527cbcf0999SJose Abreu 
1528aa042f60SSong, Yoong Siang 		for (i = 0; i < priv->dma_rx_size; i++) {
15295bacd778SLABBE Corentin 			struct dma_desc *p;
15305bacd778SLABBE Corentin 
153154139cf3SJoao Pinto 			if (priv->extend_desc)
153254139cf3SJoao Pinto 				p = &((rx_q->dma_erx + i)->basic);
153354139cf3SJoao Pinto 			else
153454139cf3SJoao Pinto 				p = rx_q->dma_rx + i;
153554139cf3SJoao Pinto 
153654139cf3SJoao Pinto 			ret = stmmac_init_rx_buffers(priv, p, i, flags,
153754139cf3SJoao Pinto 						     queue);
15385bacd778SLABBE Corentin 			if (ret)
15395bacd778SLABBE Corentin 				goto err_init_rx_buffers;
15405bacd778SLABBE Corentin 		}
154154139cf3SJoao Pinto 
154254139cf3SJoao Pinto 		rx_q->cur_rx = 0;
1543aa042f60SSong, Yoong Siang 		rx_q->dirty_rx = (unsigned int)(i - priv->dma_rx_size);
154454139cf3SJoao Pinto 
1545c24602efSGiuseppe CAVALLARO 		/* Setup the chained descriptor addresses */
1546c24602efSGiuseppe CAVALLARO 		if (priv->mode == STMMAC_CHAIN_MODE) {
154771fedb01SJoao Pinto 			if (priv->extend_desc)
15482c520b1cSJose Abreu 				stmmac_mode_init(priv, rx_q->dma_erx,
1549aa042f60SSong, Yoong Siang 						 rx_q->dma_rx_phy,
1550aa042f60SSong, Yoong Siang 						 priv->dma_rx_size, 1);
155171fedb01SJoao Pinto 			else
15522c520b1cSJose Abreu 				stmmac_mode_init(priv, rx_q->dma_rx,
1553aa042f60SSong, Yoong Siang 						 rx_q->dma_rx_phy,
1554aa042f60SSong, Yoong Siang 						 priv->dma_rx_size, 0);
155571fedb01SJoao Pinto 		}
155654139cf3SJoao Pinto 	}
155754139cf3SJoao Pinto 
155871fedb01SJoao Pinto 	return 0;
155954139cf3SJoao Pinto 
156071fedb01SJoao Pinto err_init_rx_buffers:
156154139cf3SJoao Pinto 	while (queue >= 0) {
156271fedb01SJoao Pinto 		while (--i >= 0)
156354139cf3SJoao Pinto 			stmmac_free_rx_buffer(priv, queue, i);
156454139cf3SJoao Pinto 
156554139cf3SJoao Pinto 		if (queue == 0)
156654139cf3SJoao Pinto 			break;
156754139cf3SJoao Pinto 
1568aa042f60SSong, Yoong Siang 		i = priv->dma_rx_size;
156954139cf3SJoao Pinto 		queue--;
157054139cf3SJoao Pinto 	}
157154139cf3SJoao Pinto 
157271fedb01SJoao Pinto 	return ret;
157371fedb01SJoao Pinto }
157471fedb01SJoao Pinto 
157571fedb01SJoao Pinto /**
157671fedb01SJoao Pinto  * init_dma_tx_desc_rings - init the TX descriptor rings
157771fedb01SJoao Pinto  * @dev: net device structure.
157871fedb01SJoao Pinto  * Description: this function initializes the DMA TX descriptors
157971fedb01SJoao Pinto  * and allocates the socket buffers. It supports the chained and ring
158071fedb01SJoao Pinto  * modes.
158171fedb01SJoao Pinto  */
158271fedb01SJoao Pinto static int init_dma_tx_desc_rings(struct net_device *dev)
158371fedb01SJoao Pinto {
158471fedb01SJoao Pinto 	struct stmmac_priv *priv = netdev_priv(dev);
1585ce736788SJoao Pinto 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1586ce736788SJoao Pinto 	u32 queue;
158771fedb01SJoao Pinto 	int i;
158871fedb01SJoao Pinto 
1589ce736788SJoao Pinto 	for (queue = 0; queue < tx_queue_cnt; queue++) {
1590ce736788SJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1591ce736788SJoao Pinto 
159271fedb01SJoao Pinto 		netif_dbg(priv, probe, priv->dev,
1593ce736788SJoao Pinto 			  "(%s) dma_tx_phy=0x%08x\n", __func__,
1594ce736788SJoao Pinto 			 (u32)tx_q->dma_tx_phy);
159571fedb01SJoao Pinto 
159671fedb01SJoao Pinto 		/* Setup the chained descriptor addresses */
159771fedb01SJoao Pinto 		if (priv->mode == STMMAC_CHAIN_MODE) {
159871fedb01SJoao Pinto 			if (priv->extend_desc)
15992c520b1cSJose Abreu 				stmmac_mode_init(priv, tx_q->dma_etx,
1600aa042f60SSong, Yoong Siang 						 tx_q->dma_tx_phy,
1601aa042f60SSong, Yoong Siang 						 priv->dma_tx_size, 1);
1602579a25a8SJose Abreu 			else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
16032c520b1cSJose Abreu 				stmmac_mode_init(priv, tx_q->dma_tx,
1604aa042f60SSong, Yoong Siang 						 tx_q->dma_tx_phy,
1605aa042f60SSong, Yoong Siang 						 priv->dma_tx_size, 0);
1606c24602efSGiuseppe CAVALLARO 		}
1607286a8372SGiuseppe CAVALLARO 
1608aa042f60SSong, Yoong Siang 		for (i = 0; i < priv->dma_tx_size; i++) {
1609c24602efSGiuseppe CAVALLARO 			struct dma_desc *p;
1610c24602efSGiuseppe CAVALLARO 			if (priv->extend_desc)
1611ce736788SJoao Pinto 				p = &((tx_q->dma_etx + i)->basic);
1612579a25a8SJose Abreu 			else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1613579a25a8SJose Abreu 				p = &((tx_q->dma_entx + i)->basic);
1614c24602efSGiuseppe CAVALLARO 			else
1615ce736788SJoao Pinto 				p = tx_q->dma_tx + i;
1616f748be53SAlexandre TORGUE 
161744c67f85SJose Abreu 			stmmac_clear_desc(priv, p);
1618f748be53SAlexandre TORGUE 
1619ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[i].buf = 0;
1620ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[i].map_as_page = false;
1621ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[i].len = 0;
1622ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[i].last_segment = false;
1623ce736788SJoao Pinto 			tx_q->tx_skbuff[i] = NULL;
16244a7d666aSGiuseppe CAVALLARO 		}
1625c24602efSGiuseppe CAVALLARO 
1626ce736788SJoao Pinto 		tx_q->dirty_tx = 0;
1627ce736788SJoao Pinto 		tx_q->cur_tx = 0;
16288d212a9eSNiklas Cassel 		tx_q->mss = 0;
1629ce736788SJoao Pinto 
1630c22a3f48SJoao Pinto 		netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
1631c22a3f48SJoao Pinto 	}
16327ac6653aSJeff Kirsher 
163371fedb01SJoao Pinto 	return 0;
163471fedb01SJoao Pinto }
163571fedb01SJoao Pinto 
163671fedb01SJoao Pinto /**
163771fedb01SJoao Pinto  * init_dma_desc_rings - init the RX/TX descriptor rings
163871fedb01SJoao Pinto  * @dev: net device structure
163971fedb01SJoao Pinto  * @flags: gfp flag.
164071fedb01SJoao Pinto  * Description: this function initializes the DMA RX/TX descriptors
164171fedb01SJoao Pinto  * and allocates the socket buffers. It supports the chained and ring
164271fedb01SJoao Pinto  * modes.
164371fedb01SJoao Pinto  */
164471fedb01SJoao Pinto static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
164571fedb01SJoao Pinto {
164671fedb01SJoao Pinto 	struct stmmac_priv *priv = netdev_priv(dev);
164771fedb01SJoao Pinto 	int ret;
164871fedb01SJoao Pinto 
164971fedb01SJoao Pinto 	ret = init_dma_rx_desc_rings(dev, flags);
165071fedb01SJoao Pinto 	if (ret)
165171fedb01SJoao Pinto 		return ret;
165271fedb01SJoao Pinto 
165371fedb01SJoao Pinto 	ret = init_dma_tx_desc_rings(dev);
165471fedb01SJoao Pinto 
16555bacd778SLABBE Corentin 	stmmac_clear_descriptors(priv);
16567ac6653aSJeff Kirsher 
1657c24602efSGiuseppe CAVALLARO 	if (netif_msg_hw(priv))
1658c24602efSGiuseppe CAVALLARO 		stmmac_display_rings(priv);
165956329137SBartlomiej Zolnierkiewicz 
166056329137SBartlomiej Zolnierkiewicz 	return ret;
16617ac6653aSJeff Kirsher }
16627ac6653aSJeff Kirsher 
166371fedb01SJoao Pinto /**
166471fedb01SJoao Pinto  * dma_free_rx_skbufs - free RX dma buffers
166571fedb01SJoao Pinto  * @priv: private structure
166654139cf3SJoao Pinto  * @queue: RX queue index
166771fedb01SJoao Pinto  */
166854139cf3SJoao Pinto static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
16697ac6653aSJeff Kirsher {
16707ac6653aSJeff Kirsher 	int i;
16717ac6653aSJeff Kirsher 
1672aa042f60SSong, Yoong Siang 	for (i = 0; i < priv->dma_rx_size; i++)
167354139cf3SJoao Pinto 		stmmac_free_rx_buffer(priv, queue, i);
16747ac6653aSJeff Kirsher }
16757ac6653aSJeff Kirsher 
167671fedb01SJoao Pinto /**
167771fedb01SJoao Pinto  * dma_free_tx_skbufs - free TX dma buffers
167871fedb01SJoao Pinto  * @priv: private structure
1679ce736788SJoao Pinto  * @queue: TX queue index
168071fedb01SJoao Pinto  */
1681ce736788SJoao Pinto static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
16827ac6653aSJeff Kirsher {
16837ac6653aSJeff Kirsher 	int i;
16847ac6653aSJeff Kirsher 
1685aa042f60SSong, Yoong Siang 	for (i = 0; i < priv->dma_tx_size; i++)
1686ce736788SJoao Pinto 		stmmac_free_tx_buffer(priv, queue, i);
16877ac6653aSJeff Kirsher }
16887ac6653aSJeff Kirsher 
1689732fdf0eSGiuseppe CAVALLARO /**
16904ec236c7SFugang Duan  * stmmac_free_tx_skbufs - free TX skb buffers
16914ec236c7SFugang Duan  * @priv: private structure
16924ec236c7SFugang Duan  */
16934ec236c7SFugang Duan static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
16944ec236c7SFugang Duan {
16954ec236c7SFugang Duan 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
16964ec236c7SFugang Duan 	u32 queue;
16974ec236c7SFugang Duan 
16984ec236c7SFugang Duan 	for (queue = 0; queue < tx_queue_cnt; queue++)
16994ec236c7SFugang Duan 		dma_free_tx_skbufs(priv, queue);
17004ec236c7SFugang Duan }
17014ec236c7SFugang Duan 
17024ec236c7SFugang Duan /**
170354139cf3SJoao Pinto  * free_dma_rx_desc_resources - free RX dma desc resources
170454139cf3SJoao Pinto  * @priv: private structure
170554139cf3SJoao Pinto  */
170654139cf3SJoao Pinto static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
170754139cf3SJoao Pinto {
170854139cf3SJoao Pinto 	u32 rx_count = priv->plat->rx_queues_to_use;
170954139cf3SJoao Pinto 	u32 queue;
171054139cf3SJoao Pinto 
171154139cf3SJoao Pinto 	/* Free RX queue resources */
171254139cf3SJoao Pinto 	for (queue = 0; queue < rx_count; queue++) {
171354139cf3SJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
171454139cf3SJoao Pinto 
171554139cf3SJoao Pinto 		/* Release the DMA RX socket buffers */
171654139cf3SJoao Pinto 		dma_free_rx_skbufs(priv, queue);
171754139cf3SJoao Pinto 
171854139cf3SJoao Pinto 		/* Free DMA regions of consistent memory previously allocated */
171954139cf3SJoao Pinto 		if (!priv->extend_desc)
1720aa042f60SSong, Yoong Siang 			dma_free_coherent(priv->device, priv->dma_rx_size *
1721aa042f60SSong, Yoong Siang 					  sizeof(struct dma_desc),
172254139cf3SJoao Pinto 					  rx_q->dma_rx, rx_q->dma_rx_phy);
172354139cf3SJoao Pinto 		else
1724aa042f60SSong, Yoong Siang 			dma_free_coherent(priv->device, priv->dma_rx_size *
172554139cf3SJoao Pinto 					  sizeof(struct dma_extended_desc),
172654139cf3SJoao Pinto 					  rx_q->dma_erx, rx_q->dma_rx_phy);
172754139cf3SJoao Pinto 
17282af6106aSJose Abreu 		kfree(rx_q->buf_pool);
1729c3f812ceSJonathan Lemon 		if (rx_q->page_pool)
17302af6106aSJose Abreu 			page_pool_destroy(rx_q->page_pool);
17312af6106aSJose Abreu 	}
173254139cf3SJoao Pinto }
173354139cf3SJoao Pinto 
173454139cf3SJoao Pinto /**
1735ce736788SJoao Pinto  * free_dma_tx_desc_resources - free TX dma desc resources
1736ce736788SJoao Pinto  * @priv: private structure
1737ce736788SJoao Pinto  */
1738ce736788SJoao Pinto static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
1739ce736788SJoao Pinto {
1740ce736788SJoao Pinto 	u32 tx_count = priv->plat->tx_queues_to_use;
174162242260SChristophe Jaillet 	u32 queue;
1742ce736788SJoao Pinto 
1743ce736788SJoao Pinto 	/* Free TX queue resources */
1744ce736788SJoao Pinto 	for (queue = 0; queue < tx_count; queue++) {
1745ce736788SJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1746579a25a8SJose Abreu 		size_t size;
1747579a25a8SJose Abreu 		void *addr;
1748ce736788SJoao Pinto 
1749ce736788SJoao Pinto 		/* Release the DMA TX socket buffers */
1750ce736788SJoao Pinto 		dma_free_tx_skbufs(priv, queue);
1751ce736788SJoao Pinto 
1752579a25a8SJose Abreu 		if (priv->extend_desc) {
1753579a25a8SJose Abreu 			size = sizeof(struct dma_extended_desc);
1754579a25a8SJose Abreu 			addr = tx_q->dma_etx;
1755579a25a8SJose Abreu 		} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1756579a25a8SJose Abreu 			size = sizeof(struct dma_edesc);
1757579a25a8SJose Abreu 			addr = tx_q->dma_entx;
1758579a25a8SJose Abreu 		} else {
1759579a25a8SJose Abreu 			size = sizeof(struct dma_desc);
1760579a25a8SJose Abreu 			addr = tx_q->dma_tx;
1761579a25a8SJose Abreu 		}
1762579a25a8SJose Abreu 
1763aa042f60SSong, Yoong Siang 		size *= priv->dma_tx_size;
1764579a25a8SJose Abreu 
1765579a25a8SJose Abreu 		dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
1766ce736788SJoao Pinto 
1767ce736788SJoao Pinto 		kfree(tx_q->tx_skbuff_dma);
1768ce736788SJoao Pinto 		kfree(tx_q->tx_skbuff);
1769ce736788SJoao Pinto 	}
1770ce736788SJoao Pinto }
1771ce736788SJoao Pinto 
1772ce736788SJoao Pinto /**
177371fedb01SJoao Pinto  * alloc_dma_rx_desc_resources - alloc RX resources.
1774732fdf0eSGiuseppe CAVALLARO  * @priv: private structure
1775732fdf0eSGiuseppe CAVALLARO  * Description: according to which descriptor can be used (extend or basic)
1776732fdf0eSGiuseppe CAVALLARO  * this function allocates the resources for TX and RX paths. In case of
1777732fdf0eSGiuseppe CAVALLARO  * reception, for example, it pre-allocated the RX socket buffer in order to
1778732fdf0eSGiuseppe CAVALLARO  * allow zero-copy mechanism.
1779732fdf0eSGiuseppe CAVALLARO  */
178071fedb01SJoao Pinto static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
178109f8d696SSrinivas Kandagatla {
178254139cf3SJoao Pinto 	u32 rx_count = priv->plat->rx_queues_to_use;
17835bacd778SLABBE Corentin 	int ret = -ENOMEM;
178454139cf3SJoao Pinto 	u32 queue;
178509f8d696SSrinivas Kandagatla 
178654139cf3SJoao Pinto 	/* RX queues buffers and DMA */
178754139cf3SJoao Pinto 	for (queue = 0; queue < rx_count; queue++) {
178854139cf3SJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
17892af6106aSJose Abreu 		struct page_pool_params pp_params = { 0 };
17904f28bd95SThierry Reding 		unsigned int num_pages;
179154139cf3SJoao Pinto 
179254139cf3SJoao Pinto 		rx_q->queue_index = queue;
179354139cf3SJoao Pinto 		rx_q->priv_data = priv;
179454139cf3SJoao Pinto 
17952af6106aSJose Abreu 		pp_params.flags = PP_FLAG_DMA_MAP;
1796aa042f60SSong, Yoong Siang 		pp_params.pool_size = priv->dma_rx_size;
17974f28bd95SThierry Reding 		num_pages = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE);
17984f28bd95SThierry Reding 		pp_params.order = ilog2(num_pages);
17992af6106aSJose Abreu 		pp_params.nid = dev_to_node(priv->device);
18002af6106aSJose Abreu 		pp_params.dev = priv->device;
18012af6106aSJose Abreu 		pp_params.dma_dir = DMA_FROM_DEVICE;
18025bacd778SLABBE Corentin 
18032af6106aSJose Abreu 		rx_q->page_pool = page_pool_create(&pp_params);
18042af6106aSJose Abreu 		if (IS_ERR(rx_q->page_pool)) {
18052af6106aSJose Abreu 			ret = PTR_ERR(rx_q->page_pool);
18062af6106aSJose Abreu 			rx_q->page_pool = NULL;
18072af6106aSJose Abreu 			goto err_dma;
18082af6106aSJose Abreu 		}
18092af6106aSJose Abreu 
1810aa042f60SSong, Yoong Siang 		rx_q->buf_pool = kcalloc(priv->dma_rx_size,
1811aa042f60SSong, Yoong Siang 					 sizeof(*rx_q->buf_pool),
18125bacd778SLABBE Corentin 					 GFP_KERNEL);
18132af6106aSJose Abreu 		if (!rx_q->buf_pool)
181454139cf3SJoao Pinto 			goto err_dma;
18155bacd778SLABBE Corentin 
18165bacd778SLABBE Corentin 		if (priv->extend_desc) {
1817750afb08SLuis Chamberlain 			rx_q->dma_erx = dma_alloc_coherent(priv->device,
1818aa042f60SSong, Yoong Siang 							   priv->dma_rx_size *
1819aa042f60SSong, Yoong Siang 							   sizeof(struct dma_extended_desc),
182054139cf3SJoao Pinto 							   &rx_q->dma_rx_phy,
18215bacd778SLABBE Corentin 							   GFP_KERNEL);
182254139cf3SJoao Pinto 			if (!rx_q->dma_erx)
18235bacd778SLABBE Corentin 				goto err_dma;
18245bacd778SLABBE Corentin 
182571fedb01SJoao Pinto 		} else {
1826750afb08SLuis Chamberlain 			rx_q->dma_rx = dma_alloc_coherent(priv->device,
1827aa042f60SSong, Yoong Siang 							  priv->dma_rx_size *
1828aa042f60SSong, Yoong Siang 							  sizeof(struct dma_desc),
182954139cf3SJoao Pinto 							  &rx_q->dma_rx_phy,
183071fedb01SJoao Pinto 							  GFP_KERNEL);
183154139cf3SJoao Pinto 			if (!rx_q->dma_rx)
183271fedb01SJoao Pinto 				goto err_dma;
183371fedb01SJoao Pinto 		}
183454139cf3SJoao Pinto 	}
183571fedb01SJoao Pinto 
183671fedb01SJoao Pinto 	return 0;
183771fedb01SJoao Pinto 
183871fedb01SJoao Pinto err_dma:
183954139cf3SJoao Pinto 	free_dma_rx_desc_resources(priv);
184054139cf3SJoao Pinto 
184171fedb01SJoao Pinto 	return ret;
184271fedb01SJoao Pinto }
184371fedb01SJoao Pinto 
184471fedb01SJoao Pinto /**
184571fedb01SJoao Pinto  * alloc_dma_tx_desc_resources - alloc TX resources.
184671fedb01SJoao Pinto  * @priv: private structure
184771fedb01SJoao Pinto  * Description: according to which descriptor can be used (extend or basic)
184871fedb01SJoao Pinto  * this function allocates the resources for TX and RX paths. In case of
184971fedb01SJoao Pinto  * reception, for example, it pre-allocated the RX socket buffer in order to
185071fedb01SJoao Pinto  * allow zero-copy mechanism.
185171fedb01SJoao Pinto  */
185271fedb01SJoao Pinto static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
185371fedb01SJoao Pinto {
1854ce736788SJoao Pinto 	u32 tx_count = priv->plat->tx_queues_to_use;
185571fedb01SJoao Pinto 	int ret = -ENOMEM;
1856ce736788SJoao Pinto 	u32 queue;
185771fedb01SJoao Pinto 
1858ce736788SJoao Pinto 	/* TX queues buffers and DMA */
1859ce736788SJoao Pinto 	for (queue = 0; queue < tx_count; queue++) {
1860ce736788SJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1861579a25a8SJose Abreu 		size_t size;
1862579a25a8SJose Abreu 		void *addr;
1863ce736788SJoao Pinto 
1864ce736788SJoao Pinto 		tx_q->queue_index = queue;
1865ce736788SJoao Pinto 		tx_q->priv_data = priv;
1866ce736788SJoao Pinto 
1867aa042f60SSong, Yoong Siang 		tx_q->tx_skbuff_dma = kcalloc(priv->dma_tx_size,
1868ce736788SJoao Pinto 					      sizeof(*tx_q->tx_skbuff_dma),
186971fedb01SJoao Pinto 					      GFP_KERNEL);
1870ce736788SJoao Pinto 		if (!tx_q->tx_skbuff_dma)
187162242260SChristophe Jaillet 			goto err_dma;
187271fedb01SJoao Pinto 
1873aa042f60SSong, Yoong Siang 		tx_q->tx_skbuff = kcalloc(priv->dma_tx_size,
1874ce736788SJoao Pinto 					  sizeof(struct sk_buff *),
187571fedb01SJoao Pinto 					  GFP_KERNEL);
1876ce736788SJoao Pinto 		if (!tx_q->tx_skbuff)
187762242260SChristophe Jaillet 			goto err_dma;
187871fedb01SJoao Pinto 
1879579a25a8SJose Abreu 		if (priv->extend_desc)
1880579a25a8SJose Abreu 			size = sizeof(struct dma_extended_desc);
1881579a25a8SJose Abreu 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1882579a25a8SJose Abreu 			size = sizeof(struct dma_edesc);
1883579a25a8SJose Abreu 		else
1884579a25a8SJose Abreu 			size = sizeof(struct dma_desc);
1885579a25a8SJose Abreu 
1886aa042f60SSong, Yoong Siang 		size *= priv->dma_tx_size;
1887579a25a8SJose Abreu 
1888579a25a8SJose Abreu 		addr = dma_alloc_coherent(priv->device, size,
1889579a25a8SJose Abreu 					  &tx_q->dma_tx_phy, GFP_KERNEL);
1890579a25a8SJose Abreu 		if (!addr)
189162242260SChristophe Jaillet 			goto err_dma;
1892579a25a8SJose Abreu 
1893579a25a8SJose Abreu 		if (priv->extend_desc)
1894579a25a8SJose Abreu 			tx_q->dma_etx = addr;
1895579a25a8SJose Abreu 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1896579a25a8SJose Abreu 			tx_q->dma_entx = addr;
1897579a25a8SJose Abreu 		else
1898579a25a8SJose Abreu 			tx_q->dma_tx = addr;
18995bacd778SLABBE Corentin 	}
19005bacd778SLABBE Corentin 
19015bacd778SLABBE Corentin 	return 0;
19025bacd778SLABBE Corentin 
190362242260SChristophe Jaillet err_dma:
1904ce736788SJoao Pinto 	free_dma_tx_desc_resources(priv);
190509f8d696SSrinivas Kandagatla 	return ret;
19065bacd778SLABBE Corentin }
190709f8d696SSrinivas Kandagatla 
190871fedb01SJoao Pinto /**
190971fedb01SJoao Pinto  * alloc_dma_desc_resources - alloc TX/RX resources.
191071fedb01SJoao Pinto  * @priv: private structure
191171fedb01SJoao Pinto  * Description: according to which descriptor can be used (extend or basic)
191271fedb01SJoao Pinto  * this function allocates the resources for TX and RX paths. In case of
191371fedb01SJoao Pinto  * reception, for example, it pre-allocated the RX socket buffer in order to
191471fedb01SJoao Pinto  * allow zero-copy mechanism.
191571fedb01SJoao Pinto  */
191671fedb01SJoao Pinto static int alloc_dma_desc_resources(struct stmmac_priv *priv)
19175bacd778SLABBE Corentin {
191854139cf3SJoao Pinto 	/* RX Allocation */
191971fedb01SJoao Pinto 	int ret = alloc_dma_rx_desc_resources(priv);
192071fedb01SJoao Pinto 
192171fedb01SJoao Pinto 	if (ret)
192271fedb01SJoao Pinto 		return ret;
192371fedb01SJoao Pinto 
192471fedb01SJoao Pinto 	ret = alloc_dma_tx_desc_resources(priv);
192571fedb01SJoao Pinto 
192671fedb01SJoao Pinto 	return ret;
192771fedb01SJoao Pinto }
192871fedb01SJoao Pinto 
192971fedb01SJoao Pinto /**
193071fedb01SJoao Pinto  * free_dma_desc_resources - free dma desc resources
193171fedb01SJoao Pinto  * @priv: private structure
193271fedb01SJoao Pinto  */
193371fedb01SJoao Pinto static void free_dma_desc_resources(struct stmmac_priv *priv)
193471fedb01SJoao Pinto {
193571fedb01SJoao Pinto 	/* Release the DMA RX socket buffers */
193671fedb01SJoao Pinto 	free_dma_rx_desc_resources(priv);
193771fedb01SJoao Pinto 
193871fedb01SJoao Pinto 	/* Release the DMA TX socket buffers */
193971fedb01SJoao Pinto 	free_dma_tx_desc_resources(priv);
194071fedb01SJoao Pinto }
194171fedb01SJoao Pinto 
194271fedb01SJoao Pinto /**
19439eb12474Sjpinto  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
19449eb12474Sjpinto  *  @priv: driver private structure
19459eb12474Sjpinto  *  Description: It is used for enabling the rx queues in the MAC
19469eb12474Sjpinto  */
19479eb12474Sjpinto static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
19489eb12474Sjpinto {
19494f6046f5SJoao Pinto 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
19504f6046f5SJoao Pinto 	int queue;
19514f6046f5SJoao Pinto 	u8 mode;
19529eb12474Sjpinto 
19534f6046f5SJoao Pinto 	for (queue = 0; queue < rx_queues_count; queue++) {
19544f6046f5SJoao Pinto 		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
1955c10d4c82SJose Abreu 		stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
19564f6046f5SJoao Pinto 	}
19579eb12474Sjpinto }
19589eb12474Sjpinto 
19599eb12474Sjpinto /**
1960ae4f0d46SJoao Pinto  * stmmac_start_rx_dma - start RX DMA channel
1961ae4f0d46SJoao Pinto  * @priv: driver private structure
1962ae4f0d46SJoao Pinto  * @chan: RX channel index
1963ae4f0d46SJoao Pinto  * Description:
1964ae4f0d46SJoao Pinto  * This starts a RX DMA channel
1965ae4f0d46SJoao Pinto  */
1966ae4f0d46SJoao Pinto static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
1967ae4f0d46SJoao Pinto {
1968ae4f0d46SJoao Pinto 	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
1969a4e887faSJose Abreu 	stmmac_start_rx(priv, priv->ioaddr, chan);
1970ae4f0d46SJoao Pinto }
1971ae4f0d46SJoao Pinto 
1972ae4f0d46SJoao Pinto /**
1973ae4f0d46SJoao Pinto  * stmmac_start_tx_dma - start TX DMA channel
1974ae4f0d46SJoao Pinto  * @priv: driver private structure
1975ae4f0d46SJoao Pinto  * @chan: TX channel index
1976ae4f0d46SJoao Pinto  * Description:
1977ae4f0d46SJoao Pinto  * This starts a TX DMA channel
1978ae4f0d46SJoao Pinto  */
1979ae4f0d46SJoao Pinto static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
1980ae4f0d46SJoao Pinto {
1981ae4f0d46SJoao Pinto 	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
1982a4e887faSJose Abreu 	stmmac_start_tx(priv, priv->ioaddr, chan);
1983ae4f0d46SJoao Pinto }
1984ae4f0d46SJoao Pinto 
1985ae4f0d46SJoao Pinto /**
1986ae4f0d46SJoao Pinto  * stmmac_stop_rx_dma - stop RX DMA channel
1987ae4f0d46SJoao Pinto  * @priv: driver private structure
1988ae4f0d46SJoao Pinto  * @chan: RX channel index
1989ae4f0d46SJoao Pinto  * Description:
1990ae4f0d46SJoao Pinto  * This stops a RX DMA channel
1991ae4f0d46SJoao Pinto  */
1992ae4f0d46SJoao Pinto static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
1993ae4f0d46SJoao Pinto {
1994ae4f0d46SJoao Pinto 	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
1995a4e887faSJose Abreu 	stmmac_stop_rx(priv, priv->ioaddr, chan);
1996ae4f0d46SJoao Pinto }
1997ae4f0d46SJoao Pinto 
1998ae4f0d46SJoao Pinto /**
1999ae4f0d46SJoao Pinto  * stmmac_stop_tx_dma - stop TX DMA channel
2000ae4f0d46SJoao Pinto  * @priv: driver private structure
2001ae4f0d46SJoao Pinto  * @chan: TX channel index
2002ae4f0d46SJoao Pinto  * Description:
2003ae4f0d46SJoao Pinto  * This stops a TX DMA channel
2004ae4f0d46SJoao Pinto  */
2005ae4f0d46SJoao Pinto static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
2006ae4f0d46SJoao Pinto {
2007ae4f0d46SJoao Pinto 	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
2008a4e887faSJose Abreu 	stmmac_stop_tx(priv, priv->ioaddr, chan);
2009ae4f0d46SJoao Pinto }
2010ae4f0d46SJoao Pinto 
2011ae4f0d46SJoao Pinto /**
2012ae4f0d46SJoao Pinto  * stmmac_start_all_dma - start all RX and TX DMA channels
2013ae4f0d46SJoao Pinto  * @priv: driver private structure
2014ae4f0d46SJoao Pinto  * Description:
2015ae4f0d46SJoao Pinto  * This starts all the RX and TX DMA channels
2016ae4f0d46SJoao Pinto  */
2017ae4f0d46SJoao Pinto static void stmmac_start_all_dma(struct stmmac_priv *priv)
2018ae4f0d46SJoao Pinto {
2019ae4f0d46SJoao Pinto 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2020ae4f0d46SJoao Pinto 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2021ae4f0d46SJoao Pinto 	u32 chan = 0;
2022ae4f0d46SJoao Pinto 
2023ae4f0d46SJoao Pinto 	for (chan = 0; chan < rx_channels_count; chan++)
2024ae4f0d46SJoao Pinto 		stmmac_start_rx_dma(priv, chan);
2025ae4f0d46SJoao Pinto 
2026ae4f0d46SJoao Pinto 	for (chan = 0; chan < tx_channels_count; chan++)
2027ae4f0d46SJoao Pinto 		stmmac_start_tx_dma(priv, chan);
2028ae4f0d46SJoao Pinto }
2029ae4f0d46SJoao Pinto 
2030ae4f0d46SJoao Pinto /**
2031ae4f0d46SJoao Pinto  * stmmac_stop_all_dma - stop all RX and TX DMA channels
2032ae4f0d46SJoao Pinto  * @priv: driver private structure
2033ae4f0d46SJoao Pinto  * Description:
2034ae4f0d46SJoao Pinto  * This stops the RX and TX DMA channels
2035ae4f0d46SJoao Pinto  */
2036ae4f0d46SJoao Pinto static void stmmac_stop_all_dma(struct stmmac_priv *priv)
2037ae4f0d46SJoao Pinto {
2038ae4f0d46SJoao Pinto 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2039ae4f0d46SJoao Pinto 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2040ae4f0d46SJoao Pinto 	u32 chan = 0;
2041ae4f0d46SJoao Pinto 
2042ae4f0d46SJoao Pinto 	for (chan = 0; chan < rx_channels_count; chan++)
2043ae4f0d46SJoao Pinto 		stmmac_stop_rx_dma(priv, chan);
2044ae4f0d46SJoao Pinto 
2045ae4f0d46SJoao Pinto 	for (chan = 0; chan < tx_channels_count; chan++)
2046ae4f0d46SJoao Pinto 		stmmac_stop_tx_dma(priv, chan);
2047ae4f0d46SJoao Pinto }
2048ae4f0d46SJoao Pinto 
2049ae4f0d46SJoao Pinto /**
20507ac6653aSJeff Kirsher  *  stmmac_dma_operation_mode - HW DMA operation mode
205132ceabcaSGiuseppe CAVALLARO  *  @priv: driver private structure
2052732fdf0eSGiuseppe CAVALLARO  *  Description: it is used for configuring the DMA operation mode register in
2053732fdf0eSGiuseppe CAVALLARO  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
20547ac6653aSJeff Kirsher  */
20557ac6653aSJeff Kirsher static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
20567ac6653aSJeff Kirsher {
20576deee222SJoao Pinto 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
20586deee222SJoao Pinto 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2059f88203a2SVince Bridgers 	int rxfifosz = priv->plat->rx_fifo_size;
206052a76235SJose Abreu 	int txfifosz = priv->plat->tx_fifo_size;
20616deee222SJoao Pinto 	u32 txmode = 0;
20626deee222SJoao Pinto 	u32 rxmode = 0;
20636deee222SJoao Pinto 	u32 chan = 0;
2064a0daae13SJose Abreu 	u8 qmode = 0;
2065f88203a2SVince Bridgers 
206611fbf811SThierry Reding 	if (rxfifosz == 0)
206711fbf811SThierry Reding 		rxfifosz = priv->dma_cap.rx_fifo_size;
206852a76235SJose Abreu 	if (txfifosz == 0)
206952a76235SJose Abreu 		txfifosz = priv->dma_cap.tx_fifo_size;
207052a76235SJose Abreu 
207152a76235SJose Abreu 	/* Adjust for real per queue fifo size */
207252a76235SJose Abreu 	rxfifosz /= rx_channels_count;
207352a76235SJose Abreu 	txfifosz /= tx_channels_count;
207411fbf811SThierry Reding 
20756deee222SJoao Pinto 	if (priv->plat->force_thresh_dma_mode) {
20766deee222SJoao Pinto 		txmode = tc;
20776deee222SJoao Pinto 		rxmode = tc;
20786deee222SJoao Pinto 	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
20797ac6653aSJeff Kirsher 		/*
20807ac6653aSJeff Kirsher 		 * In case of GMAC, SF mode can be enabled
20817ac6653aSJeff Kirsher 		 * to perform the TX COE in HW. This depends on:
20827ac6653aSJeff Kirsher 		 * 1) TX COE if actually supported
20837ac6653aSJeff Kirsher 		 * 2) There is no bugged Jumbo frame support
20847ac6653aSJeff Kirsher 		 *    that needs to not insert csum in the TDES.
20857ac6653aSJeff Kirsher 		 */
20866deee222SJoao Pinto 		txmode = SF_DMA_MODE;
20876deee222SJoao Pinto 		rxmode = SF_DMA_MODE;
2088b2dec116SSonic Zhang 		priv->xstats.threshold = SF_DMA_MODE;
20896deee222SJoao Pinto 	} else {
20906deee222SJoao Pinto 		txmode = tc;
20916deee222SJoao Pinto 		rxmode = SF_DMA_MODE;
20926deee222SJoao Pinto 	}
20936deee222SJoao Pinto 
20946deee222SJoao Pinto 	/* configure all channels */
2095a0daae13SJose Abreu 	for (chan = 0; chan < rx_channels_count; chan++) {
2096a0daae13SJose Abreu 		qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
20976deee222SJoao Pinto 
2098a4e887faSJose Abreu 		stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2099a0daae13SJose Abreu 				rxfifosz, qmode);
21004205c88eSJose Abreu 		stmmac_set_dma_bfsize(priv, priv->ioaddr, priv->dma_buf_sz,
21014205c88eSJose Abreu 				chan);
2102a0daae13SJose Abreu 	}
2103a0daae13SJose Abreu 
2104a0daae13SJose Abreu 	for (chan = 0; chan < tx_channels_count; chan++) {
2105a0daae13SJose Abreu 		qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2106a0daae13SJose Abreu 
2107a4e887faSJose Abreu 		stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2108a0daae13SJose Abreu 				txfifosz, qmode);
2109a0daae13SJose Abreu 	}
21107ac6653aSJeff Kirsher }
21117ac6653aSJeff Kirsher 
21127ac6653aSJeff Kirsher /**
2113732fdf0eSGiuseppe CAVALLARO  * stmmac_tx_clean - to manage the transmission completion
211432ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
2115d0ea5cbdSJesse Brandeburg  * @budget: napi budget limiting this functions packet handling
2116ce736788SJoao Pinto  * @queue: TX queue index
2117732fdf0eSGiuseppe CAVALLARO  * Description: it reclaims the transmit resources after transmission completes.
21187ac6653aSJeff Kirsher  */
21198fce3331SJose Abreu static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
21207ac6653aSJeff Kirsher {
2121ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
212238979574SBeniamino Galvani 	unsigned int bytes_compl = 0, pkts_compl = 0;
21238fce3331SJose Abreu 	unsigned int entry, count = 0;
21247ac6653aSJeff Kirsher 
21258fce3331SJose Abreu 	__netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2126a9097a96SGiuseppe CAVALLARO 
21279125cdd1SGiuseppe CAVALLARO 	priv->xstats.tx_clean++;
21289125cdd1SGiuseppe CAVALLARO 
21298d5f4b07SBernd Edlinger 	entry = tx_q->dirty_tx;
21308fce3331SJose Abreu 	while ((entry != tx_q->cur_tx) && (count < budget)) {
2131ce736788SJoao Pinto 		struct sk_buff *skb = tx_q->tx_skbuff[entry];
2132c24602efSGiuseppe CAVALLARO 		struct dma_desc *p;
2133c363b658SFabrice Gasnier 		int status;
2134c24602efSGiuseppe CAVALLARO 
2135c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
2136ce736788SJoao Pinto 			p = (struct dma_desc *)(tx_q->dma_etx + entry);
2137579a25a8SJose Abreu 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2138579a25a8SJose Abreu 			p = &tx_q->dma_entx[entry].basic;
2139c24602efSGiuseppe CAVALLARO 		else
2140ce736788SJoao Pinto 			p = tx_q->dma_tx + entry;
21417ac6653aSJeff Kirsher 
214242de047dSJose Abreu 		status = stmmac_tx_status(priv, &priv->dev->stats,
214342de047dSJose Abreu 				&priv->xstats, p, priv->ioaddr);
2144c363b658SFabrice Gasnier 		/* Check if the descriptor is owned by the DMA */
2145c363b658SFabrice Gasnier 		if (unlikely(status & tx_dma_own))
2146c363b658SFabrice Gasnier 			break;
2147c363b658SFabrice Gasnier 
21488fce3331SJose Abreu 		count++;
21498fce3331SJose Abreu 
2150a6b25da5SNiklas Cassel 		/* Make sure descriptor fields are read after reading
2151a6b25da5SNiklas Cassel 		 * the own bit.
2152a6b25da5SNiklas Cassel 		 */
2153a6b25da5SNiklas Cassel 		dma_rmb();
2154a6b25da5SNiklas Cassel 
2155c363b658SFabrice Gasnier 		/* Just consider the last segment and ...*/
2156c363b658SFabrice Gasnier 		if (likely(!(status & tx_not_ls))) {
2157c363b658SFabrice Gasnier 			/* ... verify the status error condition */
2158c363b658SFabrice Gasnier 			if (unlikely(status & tx_err)) {
2159c363b658SFabrice Gasnier 				priv->dev->stats.tx_errors++;
2160c363b658SFabrice Gasnier 			} else {
21617ac6653aSJeff Kirsher 				priv->dev->stats.tx_packets++;
21627ac6653aSJeff Kirsher 				priv->xstats.tx_pkt_n++;
2163c363b658SFabrice Gasnier 			}
2164ba1ffd74SGiuseppe CAVALLARO 			stmmac_get_tx_hwtstamp(priv, p, skb);
21657ac6653aSJeff Kirsher 		}
21667ac6653aSJeff Kirsher 
2167ce736788SJoao Pinto 		if (likely(tx_q->tx_skbuff_dma[entry].buf)) {
2168ce736788SJoao Pinto 			if (tx_q->tx_skbuff_dma[entry].map_as_page)
2169362b37beSGiuseppe CAVALLARO 				dma_unmap_page(priv->device,
2170ce736788SJoao Pinto 					       tx_q->tx_skbuff_dma[entry].buf,
2171ce736788SJoao Pinto 					       tx_q->tx_skbuff_dma[entry].len,
21727ac6653aSJeff Kirsher 					       DMA_TO_DEVICE);
2173362b37beSGiuseppe CAVALLARO 			else
2174362b37beSGiuseppe CAVALLARO 				dma_unmap_single(priv->device,
2175ce736788SJoao Pinto 						 tx_q->tx_skbuff_dma[entry].buf,
2176ce736788SJoao Pinto 						 tx_q->tx_skbuff_dma[entry].len,
2177362b37beSGiuseppe CAVALLARO 						 DMA_TO_DEVICE);
2178ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[entry].buf = 0;
2179ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[entry].len = 0;
2180ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[entry].map_as_page = false;
2181cf32deecSRayagond Kokatanur 		}
2182f748be53SAlexandre TORGUE 
21832c520b1cSJose Abreu 		stmmac_clean_desc3(priv, tx_q, p);
2184f748be53SAlexandre TORGUE 
2185ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].last_segment = false;
2186ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
21877ac6653aSJeff Kirsher 
21887ac6653aSJeff Kirsher 		if (likely(skb != NULL)) {
218938979574SBeniamino Galvani 			pkts_compl++;
219038979574SBeniamino Galvani 			bytes_compl += skb->len;
21917c565c33SEric W. Biederman 			dev_consume_skb_any(skb);
2192ce736788SJoao Pinto 			tx_q->tx_skbuff[entry] = NULL;
21937ac6653aSJeff Kirsher 		}
21947ac6653aSJeff Kirsher 
219542de047dSJose Abreu 		stmmac_release_tx_desc(priv, p, priv->mode);
21967ac6653aSJeff Kirsher 
2197aa042f60SSong, Yoong Siang 		entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
21987ac6653aSJeff Kirsher 	}
2199ce736788SJoao Pinto 	tx_q->dirty_tx = entry;
220038979574SBeniamino Galvani 
2201c22a3f48SJoao Pinto 	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2202c22a3f48SJoao Pinto 				  pkts_compl, bytes_compl);
220338979574SBeniamino Galvani 
2204c22a3f48SJoao Pinto 	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2205c22a3f48SJoao Pinto 								queue))) &&
2206aa042f60SSong, Yoong Siang 	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2207c22a3f48SJoao Pinto 
2208b3e51069SLABBE Corentin 		netif_dbg(priv, tx_done, priv->dev,
2209b3e51069SLABBE Corentin 			  "%s: restart transmit\n", __func__);
2210c22a3f48SJoao Pinto 		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
22117ac6653aSJeff Kirsher 	}
2212d765955dSGiuseppe CAVALLARO 
2213be1c7eaeSVineetha G. Jaya Kumaran 	if (priv->eee_enabled && !priv->tx_path_in_lpi_mode &&
2214be1c7eaeSVineetha G. Jaya Kumaran 	    priv->eee_sw_timer_en) {
2215d765955dSGiuseppe CAVALLARO 		stmmac_enable_eee_mode(priv);
2216388e201dSVineetha G. Jaya Kumaran 		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
2217d765955dSGiuseppe CAVALLARO 	}
22188fce3331SJose Abreu 
22194ccb4585SJose Abreu 	/* We still have pending packets, let's call for a new scheduling */
22204ccb4585SJose Abreu 	if (tx_q->dirty_tx != tx_q->cur_tx)
2221d5a05e69SVincent Whitchurch 		hrtimer_start(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer),
2222d5a05e69SVincent Whitchurch 			      HRTIMER_MODE_REL);
22234ccb4585SJose Abreu 
22248fce3331SJose Abreu 	__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
22258fce3331SJose Abreu 
22268fce3331SJose Abreu 	return count;
22277ac6653aSJeff Kirsher }
22287ac6653aSJeff Kirsher 
22297ac6653aSJeff Kirsher /**
2230732fdf0eSGiuseppe CAVALLARO  * stmmac_tx_err - to manage the tx error
223132ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
22325bacd778SLABBE Corentin  * @chan: channel index
22337ac6653aSJeff Kirsher  * Description: it cleans the descriptors and restarts the transmission
2234732fdf0eSGiuseppe CAVALLARO  * in case of transmission errors.
22357ac6653aSJeff Kirsher  */
22365bacd778SLABBE Corentin static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
22377ac6653aSJeff Kirsher {
2238ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2239ce736788SJoao Pinto 
2240c22a3f48SJoao Pinto 	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
22417ac6653aSJeff Kirsher 
2242ae4f0d46SJoao Pinto 	stmmac_stop_tx_dma(priv, chan);
2243ce736788SJoao Pinto 	dma_free_tx_skbufs(priv, chan);
2244579a25a8SJose Abreu 	stmmac_clear_tx_descriptors(priv, chan);
2245ce736788SJoao Pinto 	tx_q->dirty_tx = 0;
2246ce736788SJoao Pinto 	tx_q->cur_tx = 0;
22478d212a9eSNiklas Cassel 	tx_q->mss = 0;
2248c22a3f48SJoao Pinto 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
2249f421031eSJongsung Kim 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2250f421031eSJongsung Kim 			    tx_q->dma_tx_phy, chan);
2251ae4f0d46SJoao Pinto 	stmmac_start_tx_dma(priv, chan);
22527ac6653aSJeff Kirsher 
22537ac6653aSJeff Kirsher 	priv->dev->stats.tx_errors++;
2254c22a3f48SJoao Pinto 	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
22557ac6653aSJeff Kirsher }
22567ac6653aSJeff Kirsher 
225732ceabcaSGiuseppe CAVALLARO /**
22586deee222SJoao Pinto  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
22596deee222SJoao Pinto  *  @priv: driver private structure
22606deee222SJoao Pinto  *  @txmode: TX operating mode
22616deee222SJoao Pinto  *  @rxmode: RX operating mode
22626deee222SJoao Pinto  *  @chan: channel index
22636deee222SJoao Pinto  *  Description: it is used for configuring of the DMA operation mode in
22646deee222SJoao Pinto  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
22656deee222SJoao Pinto  *  mode.
22666deee222SJoao Pinto  */
22676deee222SJoao Pinto static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
22686deee222SJoao Pinto 					  u32 rxmode, u32 chan)
22696deee222SJoao Pinto {
2270a0daae13SJose Abreu 	u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2271a0daae13SJose Abreu 	u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
227252a76235SJose Abreu 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
227352a76235SJose Abreu 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
22746deee222SJoao Pinto 	int rxfifosz = priv->plat->rx_fifo_size;
227552a76235SJose Abreu 	int txfifosz = priv->plat->tx_fifo_size;
22766deee222SJoao Pinto 
22776deee222SJoao Pinto 	if (rxfifosz == 0)
22786deee222SJoao Pinto 		rxfifosz = priv->dma_cap.rx_fifo_size;
227952a76235SJose Abreu 	if (txfifosz == 0)
228052a76235SJose Abreu 		txfifosz = priv->dma_cap.tx_fifo_size;
228152a76235SJose Abreu 
228252a76235SJose Abreu 	/* Adjust for real per queue fifo size */
228352a76235SJose Abreu 	rxfifosz /= rx_channels_count;
228452a76235SJose Abreu 	txfifosz /= tx_channels_count;
22856deee222SJoao Pinto 
2286ab0204e3SJose Abreu 	stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2287ab0204e3SJose Abreu 	stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
22886deee222SJoao Pinto }
22896deee222SJoao Pinto 
22908bf993a5SJose Abreu static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
22918bf993a5SJose Abreu {
229263a550fcSJose Abreu 	int ret;
22938bf993a5SJose Abreu 
2294c10d4c82SJose Abreu 	ret = stmmac_safety_feat_irq_status(priv, priv->dev,
22958bf993a5SJose Abreu 			priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2296c10d4c82SJose Abreu 	if (ret && (ret != -EINVAL)) {
22978bf993a5SJose Abreu 		stmmac_global_err(priv);
2298c10d4c82SJose Abreu 		return true;
2299c10d4c82SJose Abreu 	}
2300c10d4c82SJose Abreu 
2301c10d4c82SJose Abreu 	return false;
23028bf993a5SJose Abreu }
23038bf993a5SJose Abreu 
23048fce3331SJose Abreu static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan)
23058fce3331SJose Abreu {
23068fce3331SJose Abreu 	int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
23078fce3331SJose Abreu 						 &priv->xstats, chan);
23088fce3331SJose Abreu 	struct stmmac_channel *ch = &priv->channel[chan];
2309021bd5e3SJose Abreu 	unsigned long flags;
23108fce3331SJose Abreu 
23114ccb4585SJose Abreu 	if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
23123ba07debSJose Abreu 		if (napi_schedule_prep(&ch->rx_napi)) {
2313021bd5e3SJose Abreu 			spin_lock_irqsave(&ch->lock, flags);
2314021bd5e3SJose Abreu 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2315021bd5e3SJose Abreu 			spin_unlock_irqrestore(&ch->lock, flags);
23161f02efd1SSeb Laveze 			__napi_schedule(&ch->rx_napi);
23173ba07debSJose Abreu 		}
23184ccb4585SJose Abreu 	}
23194ccb4585SJose Abreu 
2320021bd5e3SJose Abreu 	if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2321021bd5e3SJose Abreu 		if (napi_schedule_prep(&ch->tx_napi)) {
2322021bd5e3SJose Abreu 			spin_lock_irqsave(&ch->lock, flags);
2323021bd5e3SJose Abreu 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
2324021bd5e3SJose Abreu 			spin_unlock_irqrestore(&ch->lock, flags);
23251f02efd1SSeb Laveze 			__napi_schedule(&ch->tx_napi);
2326021bd5e3SJose Abreu 		}
2327021bd5e3SJose Abreu 	}
23288fce3331SJose Abreu 
23298fce3331SJose Abreu 	return status;
23308fce3331SJose Abreu }
23318fce3331SJose Abreu 
23326deee222SJoao Pinto /**
2333732fdf0eSGiuseppe CAVALLARO  * stmmac_dma_interrupt - DMA ISR
233432ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
233532ceabcaSGiuseppe CAVALLARO  * Description: this is the DMA ISR. It is called by the main ISR.
2336732fdf0eSGiuseppe CAVALLARO  * It calls the dwmac dma routine and schedule poll method in case of some
2337732fdf0eSGiuseppe CAVALLARO  * work can be done.
233832ceabcaSGiuseppe CAVALLARO  */
23397ac6653aSJeff Kirsher static void stmmac_dma_interrupt(struct stmmac_priv *priv)
23407ac6653aSJeff Kirsher {
2341d62a107aSJoao Pinto 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
23425a6a0445SNiklas Cassel 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
23435a6a0445SNiklas Cassel 	u32 channels_to_check = tx_channel_count > rx_channel_count ?
23445a6a0445SNiklas Cassel 				tx_channel_count : rx_channel_count;
2345d62a107aSJoao Pinto 	u32 chan;
23468ac60ffbSKees Cook 	int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
23478ac60ffbSKees Cook 
23488ac60ffbSKees Cook 	/* Make sure we never check beyond our status buffer. */
23498ac60ffbSKees Cook 	if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
23508ac60ffbSKees Cook 		channels_to_check = ARRAY_SIZE(status);
235168e5cfafSJoao Pinto 
23525a6a0445SNiklas Cassel 	for (chan = 0; chan < channels_to_check; chan++)
23538fce3331SJose Abreu 		status[chan] = stmmac_napi_check(priv, chan);
2354d62a107aSJoao Pinto 
23555a6a0445SNiklas Cassel 	for (chan = 0; chan < tx_channel_count; chan++) {
23565a6a0445SNiklas Cassel 		if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
23577ac6653aSJeff Kirsher 			/* Try to bump up the dma threshold on this failure */
2358b2dec116SSonic Zhang 			if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
2359b2dec116SSonic Zhang 			    (tc <= 256)) {
23607ac6653aSJeff Kirsher 				tc += 64;
2361c405abe2SSonic Zhang 				if (priv->plat->force_thresh_dma_mode)
2362d62a107aSJoao Pinto 					stmmac_set_dma_operation_mode(priv,
2363d62a107aSJoao Pinto 								      tc,
2364d62a107aSJoao Pinto 								      tc,
2365d62a107aSJoao Pinto 								      chan);
2366c405abe2SSonic Zhang 				else
2367d62a107aSJoao Pinto 					stmmac_set_dma_operation_mode(priv,
2368d62a107aSJoao Pinto 								    tc,
2369d62a107aSJoao Pinto 								    SF_DMA_MODE,
2370d62a107aSJoao Pinto 								    chan);
23717ac6653aSJeff Kirsher 				priv->xstats.threshold = tc;
23727ac6653aSJeff Kirsher 			}
23735a6a0445SNiklas Cassel 		} else if (unlikely(status[chan] == tx_hard_error)) {
23744e593262SJoao Pinto 			stmmac_tx_err(priv, chan);
23757ac6653aSJeff Kirsher 		}
2376d62a107aSJoao Pinto 	}
2377d62a107aSJoao Pinto }
23787ac6653aSJeff Kirsher 
237932ceabcaSGiuseppe CAVALLARO /**
238032ceabcaSGiuseppe CAVALLARO  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
238132ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
238232ceabcaSGiuseppe CAVALLARO  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
238332ceabcaSGiuseppe CAVALLARO  */
23841c901a46SGiuseppe CAVALLARO static void stmmac_mmc_setup(struct stmmac_priv *priv)
23851c901a46SGiuseppe CAVALLARO {
23861c901a46SGiuseppe CAVALLARO 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
23871c901a46SGiuseppe CAVALLARO 			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
23881c901a46SGiuseppe CAVALLARO 
23893b1dd2c5SJose Abreu 	stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
23904f795b25SGiuseppe CAVALLARO 
23914f795b25SGiuseppe CAVALLARO 	if (priv->dma_cap.rmon) {
23923b1dd2c5SJose Abreu 		stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
23931c901a46SGiuseppe CAVALLARO 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
23944f795b25SGiuseppe CAVALLARO 	} else
239538ddc59dSLABBE Corentin 		netdev_info(priv->dev, "No MAC Management Counters available\n");
23961c901a46SGiuseppe CAVALLARO }
23971c901a46SGiuseppe CAVALLARO 
2398732fdf0eSGiuseppe CAVALLARO /**
2399732fdf0eSGiuseppe CAVALLARO  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
240032ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
240119e30c14SGiuseppe CAVALLARO  * Description:
240219e30c14SGiuseppe CAVALLARO  *  new GMAC chip generations have a new register to indicate the
2403e7434821SGiuseppe CAVALLARO  *  presence of the optional feature/functions.
240419e30c14SGiuseppe CAVALLARO  *  This can be also used to override the value passed through the
240519e30c14SGiuseppe CAVALLARO  *  platform and necessary for old MAC10/100 and GMAC chips.
2406e7434821SGiuseppe CAVALLARO  */
2407e7434821SGiuseppe CAVALLARO static int stmmac_get_hw_features(struct stmmac_priv *priv)
2408e7434821SGiuseppe CAVALLARO {
2409a4e887faSJose Abreu 	return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2410e7434821SGiuseppe CAVALLARO }
2411e7434821SGiuseppe CAVALLARO 
241232ceabcaSGiuseppe CAVALLARO /**
2413732fdf0eSGiuseppe CAVALLARO  * stmmac_check_ether_addr - check if the MAC addr is valid
241432ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
241532ceabcaSGiuseppe CAVALLARO  * Description:
241632ceabcaSGiuseppe CAVALLARO  * it is to verify if the MAC address is valid, in case of failures it
241732ceabcaSGiuseppe CAVALLARO  * generates a random MAC address
241832ceabcaSGiuseppe CAVALLARO  */
2419bfab27a1SGiuseppe CAVALLARO static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2420bfab27a1SGiuseppe CAVALLARO {
2421bfab27a1SGiuseppe CAVALLARO 	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2422c10d4c82SJose Abreu 		stmmac_get_umac_addr(priv, priv->hw, priv->dev->dev_addr, 0);
2423bfab27a1SGiuseppe CAVALLARO 		if (!is_valid_ether_addr(priv->dev->dev_addr))
2424f2cedb63SDanny Kukawka 			eth_hw_addr_random(priv->dev);
2425af649352SJisheng Zhang 		dev_info(priv->device, "device MAC address %pM\n",
2426bfab27a1SGiuseppe CAVALLARO 			 priv->dev->dev_addr);
2427bfab27a1SGiuseppe CAVALLARO 	}
2428c88460b7SHans de Goede }
2429bfab27a1SGiuseppe CAVALLARO 
243032ceabcaSGiuseppe CAVALLARO /**
2431732fdf0eSGiuseppe CAVALLARO  * stmmac_init_dma_engine - DMA init.
243232ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
243332ceabcaSGiuseppe CAVALLARO  * Description:
243432ceabcaSGiuseppe CAVALLARO  * It inits the DMA invoking the specific MAC/GMAC callback.
243532ceabcaSGiuseppe CAVALLARO  * Some DMA parameters can be passed from the platform;
243632ceabcaSGiuseppe CAVALLARO  * in case of these are not passed a default is kept for the MAC or GMAC.
243732ceabcaSGiuseppe CAVALLARO  */
24380f1f88a8SGiuseppe CAVALLARO static int stmmac_init_dma_engine(struct stmmac_priv *priv)
24390f1f88a8SGiuseppe CAVALLARO {
244047f2a9ceSJoao Pinto 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
244147f2a9ceSJoao Pinto 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
244224aaed0cSJose Abreu 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
244354139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q;
2444ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q;
244547f2a9ceSJoao Pinto 	u32 chan = 0;
2446c24602efSGiuseppe CAVALLARO 	int atds = 0;
2447495db273SGiuseppe Cavallaro 	int ret = 0;
24480f1f88a8SGiuseppe CAVALLARO 
2449a332e2faSNiklas Cassel 	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2450a332e2faSNiklas Cassel 		dev_err(priv->device, "Invalid DMA configuration\n");
245189ab75bfSNiklas Cassel 		return -EINVAL;
24520f1f88a8SGiuseppe CAVALLARO 	}
24530f1f88a8SGiuseppe CAVALLARO 
2454c24602efSGiuseppe CAVALLARO 	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2455c24602efSGiuseppe CAVALLARO 		atds = 1;
2456c24602efSGiuseppe CAVALLARO 
2457a4e887faSJose Abreu 	ret = stmmac_reset(priv, priv->ioaddr);
2458495db273SGiuseppe Cavallaro 	if (ret) {
2459495db273SGiuseppe Cavallaro 		dev_err(priv->device, "Failed to reset the dma\n");
2460495db273SGiuseppe Cavallaro 		return ret;
2461495db273SGiuseppe Cavallaro 	}
2462495db273SGiuseppe Cavallaro 
24637d9e6c5aSJose Abreu 	/* DMA Configuration */
24647d9e6c5aSJose Abreu 	stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
24657d9e6c5aSJose Abreu 
24667d9e6c5aSJose Abreu 	if (priv->plat->axi)
24677d9e6c5aSJose Abreu 		stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
24687d9e6c5aSJose Abreu 
2469af8f3fb7SWeifeng Voon 	/* DMA CSR Channel configuration */
2470af8f3fb7SWeifeng Voon 	for (chan = 0; chan < dma_csr_ch; chan++)
2471af8f3fb7SWeifeng Voon 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
2472af8f3fb7SWeifeng Voon 
247347f2a9ceSJoao Pinto 	/* DMA RX Channel Configuration */
247447f2a9ceSJoao Pinto 	for (chan = 0; chan < rx_channels_count; chan++) {
247554139cf3SJoao Pinto 		rx_q = &priv->rx_queue[chan];
247654139cf3SJoao Pinto 
247724aaed0cSJose Abreu 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
247824aaed0cSJose Abreu 				    rx_q->dma_rx_phy, chan);
247947f2a9ceSJoao Pinto 
248054139cf3SJoao Pinto 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2481aa042f60SSong, Yoong Siang 				     (priv->dma_rx_size *
2482aa042f60SSong, Yoong Siang 				      sizeof(struct dma_desc));
2483a4e887faSJose Abreu 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
2484a4e887faSJose Abreu 				       rx_q->rx_tail_addr, chan);
248547f2a9ceSJoao Pinto 	}
248647f2a9ceSJoao Pinto 
248747f2a9ceSJoao Pinto 	/* DMA TX Channel Configuration */
248847f2a9ceSJoao Pinto 	for (chan = 0; chan < tx_channels_count; chan++) {
2489ce736788SJoao Pinto 		tx_q = &priv->tx_queue[chan];
2490ce736788SJoao Pinto 
249124aaed0cSJose Abreu 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
249224aaed0cSJose Abreu 				    tx_q->dma_tx_phy, chan);
2493f748be53SAlexandre TORGUE 
24940431100bSJose Abreu 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
2495a4e887faSJose Abreu 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
2496a4e887faSJose Abreu 				       tx_q->tx_tail_addr, chan);
249747f2a9ceSJoao Pinto 	}
249824aaed0cSJose Abreu 
2499495db273SGiuseppe Cavallaro 	return ret;
25000f1f88a8SGiuseppe CAVALLARO }
25010f1f88a8SGiuseppe CAVALLARO 
25028fce3331SJose Abreu static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
25038fce3331SJose Abreu {
25048fce3331SJose Abreu 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
25058fce3331SJose Abreu 
2506d5a05e69SVincent Whitchurch 	hrtimer_start(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer),
2507d5a05e69SVincent Whitchurch 		      HRTIMER_MODE_REL);
25088fce3331SJose Abreu }
25098fce3331SJose Abreu 
2510bfab27a1SGiuseppe CAVALLARO /**
2511732fdf0eSGiuseppe CAVALLARO  * stmmac_tx_timer - mitigation sw timer for tx.
2512d0ea5cbdSJesse Brandeburg  * @t: data pointer
25139125cdd1SGiuseppe CAVALLARO  * Description:
25149125cdd1SGiuseppe CAVALLARO  * This is the timer handler to directly invoke the stmmac_tx_clean.
25159125cdd1SGiuseppe CAVALLARO  */
2516d5a05e69SVincent Whitchurch static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
25179125cdd1SGiuseppe CAVALLARO {
2518d5a05e69SVincent Whitchurch 	struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
25198fce3331SJose Abreu 	struct stmmac_priv *priv = tx_q->priv_data;
25208fce3331SJose Abreu 	struct stmmac_channel *ch;
25219125cdd1SGiuseppe CAVALLARO 
25228fce3331SJose Abreu 	ch = &priv->channel[tx_q->queue_index];
25238fce3331SJose Abreu 
2524021bd5e3SJose Abreu 	if (likely(napi_schedule_prep(&ch->tx_napi))) {
2525021bd5e3SJose Abreu 		unsigned long flags;
2526021bd5e3SJose Abreu 
2527021bd5e3SJose Abreu 		spin_lock_irqsave(&ch->lock, flags);
2528021bd5e3SJose Abreu 		stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
2529021bd5e3SJose Abreu 		spin_unlock_irqrestore(&ch->lock, flags);
25304ccb4585SJose Abreu 		__napi_schedule(&ch->tx_napi);
2531021bd5e3SJose Abreu 	}
2532d5a05e69SVincent Whitchurch 
2533d5a05e69SVincent Whitchurch 	return HRTIMER_NORESTART;
25349125cdd1SGiuseppe CAVALLARO }
25359125cdd1SGiuseppe CAVALLARO 
25369125cdd1SGiuseppe CAVALLARO /**
2537d429b66eSJose Abreu  * stmmac_init_coalesce - init mitigation options.
253832ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
25399125cdd1SGiuseppe CAVALLARO  * Description:
2540d429b66eSJose Abreu  * This inits the coalesce parameters: i.e. timer rate,
25419125cdd1SGiuseppe CAVALLARO  * timer handler and default threshold used for enabling the
25429125cdd1SGiuseppe CAVALLARO  * interrupt on completion bit.
25439125cdd1SGiuseppe CAVALLARO  */
2544d429b66eSJose Abreu static void stmmac_init_coalesce(struct stmmac_priv *priv)
25459125cdd1SGiuseppe CAVALLARO {
25468fce3331SJose Abreu 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
25478fce3331SJose Abreu 	u32 chan;
25488fce3331SJose Abreu 
25499125cdd1SGiuseppe CAVALLARO 	priv->tx_coal_frames = STMMAC_TX_FRAMES;
25509125cdd1SGiuseppe CAVALLARO 	priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
2551d429b66eSJose Abreu 	priv->rx_coal_frames = STMMAC_RX_FRAMES;
25528fce3331SJose Abreu 
25538fce3331SJose Abreu 	for (chan = 0; chan < tx_channel_count; chan++) {
25548fce3331SJose Abreu 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
25558fce3331SJose Abreu 
2556d5a05e69SVincent Whitchurch 		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2557d5a05e69SVincent Whitchurch 		tx_q->txtimer.function = stmmac_tx_timer;
25588fce3331SJose Abreu 	}
25599125cdd1SGiuseppe CAVALLARO }
25609125cdd1SGiuseppe CAVALLARO 
25614854ab99SJoao Pinto static void stmmac_set_rings_length(struct stmmac_priv *priv)
25624854ab99SJoao Pinto {
25634854ab99SJoao Pinto 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
25644854ab99SJoao Pinto 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
25654854ab99SJoao Pinto 	u32 chan;
25664854ab99SJoao Pinto 
25674854ab99SJoao Pinto 	/* set TX ring length */
25684854ab99SJoao Pinto 	for (chan = 0; chan < tx_channels_count; chan++)
2569a4e887faSJose Abreu 		stmmac_set_tx_ring_len(priv, priv->ioaddr,
2570aa042f60SSong, Yoong Siang 				       (priv->dma_tx_size - 1), chan);
25714854ab99SJoao Pinto 
25724854ab99SJoao Pinto 	/* set RX ring length */
25734854ab99SJoao Pinto 	for (chan = 0; chan < rx_channels_count; chan++)
2574a4e887faSJose Abreu 		stmmac_set_rx_ring_len(priv, priv->ioaddr,
2575aa042f60SSong, Yoong Siang 				       (priv->dma_rx_size - 1), chan);
25764854ab99SJoao Pinto }
25774854ab99SJoao Pinto 
25789125cdd1SGiuseppe CAVALLARO /**
25796a3a7193SJoao Pinto  *  stmmac_set_tx_queue_weight - Set TX queue weight
25806a3a7193SJoao Pinto  *  @priv: driver private structure
25816a3a7193SJoao Pinto  *  Description: It is used for setting TX queues weight
25826a3a7193SJoao Pinto  */
25836a3a7193SJoao Pinto static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
25846a3a7193SJoao Pinto {
25856a3a7193SJoao Pinto 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
25866a3a7193SJoao Pinto 	u32 weight;
25876a3a7193SJoao Pinto 	u32 queue;
25886a3a7193SJoao Pinto 
25896a3a7193SJoao Pinto 	for (queue = 0; queue < tx_queues_count; queue++) {
25906a3a7193SJoao Pinto 		weight = priv->plat->tx_queues_cfg[queue].weight;
2591c10d4c82SJose Abreu 		stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
25926a3a7193SJoao Pinto 	}
25936a3a7193SJoao Pinto }
25946a3a7193SJoao Pinto 
25956a3a7193SJoao Pinto /**
259619d91873SJoao Pinto  *  stmmac_configure_cbs - Configure CBS in TX queue
259719d91873SJoao Pinto  *  @priv: driver private structure
259819d91873SJoao Pinto  *  Description: It is used for configuring CBS in AVB TX queues
259919d91873SJoao Pinto  */
260019d91873SJoao Pinto static void stmmac_configure_cbs(struct stmmac_priv *priv)
260119d91873SJoao Pinto {
260219d91873SJoao Pinto 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
260319d91873SJoao Pinto 	u32 mode_to_use;
260419d91873SJoao Pinto 	u32 queue;
260519d91873SJoao Pinto 
260644781fefSJoao Pinto 	/* queue 0 is reserved for legacy traffic */
260744781fefSJoao Pinto 	for (queue = 1; queue < tx_queues_count; queue++) {
260819d91873SJoao Pinto 		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
260919d91873SJoao Pinto 		if (mode_to_use == MTL_QUEUE_DCB)
261019d91873SJoao Pinto 			continue;
261119d91873SJoao Pinto 
2612c10d4c82SJose Abreu 		stmmac_config_cbs(priv, priv->hw,
261319d91873SJoao Pinto 				priv->plat->tx_queues_cfg[queue].send_slope,
261419d91873SJoao Pinto 				priv->plat->tx_queues_cfg[queue].idle_slope,
261519d91873SJoao Pinto 				priv->plat->tx_queues_cfg[queue].high_credit,
261619d91873SJoao Pinto 				priv->plat->tx_queues_cfg[queue].low_credit,
261719d91873SJoao Pinto 				queue);
261819d91873SJoao Pinto 	}
261919d91873SJoao Pinto }
262019d91873SJoao Pinto 
262119d91873SJoao Pinto /**
2622d43042f4SJoao Pinto  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
2623d43042f4SJoao Pinto  *  @priv: driver private structure
2624d43042f4SJoao Pinto  *  Description: It is used for mapping RX queues to RX dma channels
2625d43042f4SJoao Pinto  */
2626d43042f4SJoao Pinto static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
2627d43042f4SJoao Pinto {
2628d43042f4SJoao Pinto 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2629d43042f4SJoao Pinto 	u32 queue;
2630d43042f4SJoao Pinto 	u32 chan;
2631d43042f4SJoao Pinto 
2632d43042f4SJoao Pinto 	for (queue = 0; queue < rx_queues_count; queue++) {
2633d43042f4SJoao Pinto 		chan = priv->plat->rx_queues_cfg[queue].chan;
2634c10d4c82SJose Abreu 		stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
2635d43042f4SJoao Pinto 	}
2636d43042f4SJoao Pinto }
2637d43042f4SJoao Pinto 
2638d43042f4SJoao Pinto /**
2639a8f5102aSJoao Pinto  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
2640a8f5102aSJoao Pinto  *  @priv: driver private structure
2641a8f5102aSJoao Pinto  *  Description: It is used for configuring the RX Queue Priority
2642a8f5102aSJoao Pinto  */
2643a8f5102aSJoao Pinto static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
2644a8f5102aSJoao Pinto {
2645a8f5102aSJoao Pinto 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2646a8f5102aSJoao Pinto 	u32 queue;
2647a8f5102aSJoao Pinto 	u32 prio;
2648a8f5102aSJoao Pinto 
2649a8f5102aSJoao Pinto 	for (queue = 0; queue < rx_queues_count; queue++) {
2650a8f5102aSJoao Pinto 		if (!priv->plat->rx_queues_cfg[queue].use_prio)
2651a8f5102aSJoao Pinto 			continue;
2652a8f5102aSJoao Pinto 
2653a8f5102aSJoao Pinto 		prio = priv->plat->rx_queues_cfg[queue].prio;
2654c10d4c82SJose Abreu 		stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
2655a8f5102aSJoao Pinto 	}
2656a8f5102aSJoao Pinto }
2657a8f5102aSJoao Pinto 
2658a8f5102aSJoao Pinto /**
2659a8f5102aSJoao Pinto  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
2660a8f5102aSJoao Pinto  *  @priv: driver private structure
2661a8f5102aSJoao Pinto  *  Description: It is used for configuring the TX Queue Priority
2662a8f5102aSJoao Pinto  */
2663a8f5102aSJoao Pinto static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
2664a8f5102aSJoao Pinto {
2665a8f5102aSJoao Pinto 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2666a8f5102aSJoao Pinto 	u32 queue;
2667a8f5102aSJoao Pinto 	u32 prio;
2668a8f5102aSJoao Pinto 
2669a8f5102aSJoao Pinto 	for (queue = 0; queue < tx_queues_count; queue++) {
2670a8f5102aSJoao Pinto 		if (!priv->plat->tx_queues_cfg[queue].use_prio)
2671a8f5102aSJoao Pinto 			continue;
2672a8f5102aSJoao Pinto 
2673a8f5102aSJoao Pinto 		prio = priv->plat->tx_queues_cfg[queue].prio;
2674c10d4c82SJose Abreu 		stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
2675a8f5102aSJoao Pinto 	}
2676a8f5102aSJoao Pinto }
2677a8f5102aSJoao Pinto 
2678a8f5102aSJoao Pinto /**
2679abe80fdcSJoao Pinto  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
2680abe80fdcSJoao Pinto  *  @priv: driver private structure
2681abe80fdcSJoao Pinto  *  Description: It is used for configuring the RX queue routing
2682abe80fdcSJoao Pinto  */
2683abe80fdcSJoao Pinto static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
2684abe80fdcSJoao Pinto {
2685abe80fdcSJoao Pinto 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2686abe80fdcSJoao Pinto 	u32 queue;
2687abe80fdcSJoao Pinto 	u8 packet;
2688abe80fdcSJoao Pinto 
2689abe80fdcSJoao Pinto 	for (queue = 0; queue < rx_queues_count; queue++) {
2690abe80fdcSJoao Pinto 		/* no specific packet type routing specified for the queue */
2691abe80fdcSJoao Pinto 		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
2692abe80fdcSJoao Pinto 			continue;
2693abe80fdcSJoao Pinto 
2694abe80fdcSJoao Pinto 		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
2695c10d4c82SJose Abreu 		stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
2696abe80fdcSJoao Pinto 	}
2697abe80fdcSJoao Pinto }
2698abe80fdcSJoao Pinto 
269976067459SJose Abreu static void stmmac_mac_config_rss(struct stmmac_priv *priv)
270076067459SJose Abreu {
270176067459SJose Abreu 	if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
270276067459SJose Abreu 		priv->rss.enable = false;
270376067459SJose Abreu 		return;
270476067459SJose Abreu 	}
270576067459SJose Abreu 
270676067459SJose Abreu 	if (priv->dev->features & NETIF_F_RXHASH)
270776067459SJose Abreu 		priv->rss.enable = true;
270876067459SJose Abreu 	else
270976067459SJose Abreu 		priv->rss.enable = false;
271076067459SJose Abreu 
271176067459SJose Abreu 	stmmac_rss_configure(priv, priv->hw, &priv->rss,
271276067459SJose Abreu 			     priv->plat->rx_queues_to_use);
271376067459SJose Abreu }
271476067459SJose Abreu 
2715abe80fdcSJoao Pinto /**
2716d0a9c9f9SJoao Pinto  *  stmmac_mtl_configuration - Configure MTL
2717d0a9c9f9SJoao Pinto  *  @priv: driver private structure
2718d0a9c9f9SJoao Pinto  *  Description: It is used for configurring MTL
2719d0a9c9f9SJoao Pinto  */
2720d0a9c9f9SJoao Pinto static void stmmac_mtl_configuration(struct stmmac_priv *priv)
2721d0a9c9f9SJoao Pinto {
2722d0a9c9f9SJoao Pinto 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2723d0a9c9f9SJoao Pinto 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2724d0a9c9f9SJoao Pinto 
2725c10d4c82SJose Abreu 	if (tx_queues_count > 1)
27266a3a7193SJoao Pinto 		stmmac_set_tx_queue_weight(priv);
27276a3a7193SJoao Pinto 
2728d0a9c9f9SJoao Pinto 	/* Configure MTL RX algorithms */
2729c10d4c82SJose Abreu 	if (rx_queues_count > 1)
2730c10d4c82SJose Abreu 		stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
2731d0a9c9f9SJoao Pinto 				priv->plat->rx_sched_algorithm);
2732d0a9c9f9SJoao Pinto 
2733d0a9c9f9SJoao Pinto 	/* Configure MTL TX algorithms */
2734c10d4c82SJose Abreu 	if (tx_queues_count > 1)
2735c10d4c82SJose Abreu 		stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
2736d0a9c9f9SJoao Pinto 				priv->plat->tx_sched_algorithm);
2737d0a9c9f9SJoao Pinto 
273819d91873SJoao Pinto 	/* Configure CBS in AVB TX queues */
2739c10d4c82SJose Abreu 	if (tx_queues_count > 1)
274019d91873SJoao Pinto 		stmmac_configure_cbs(priv);
274119d91873SJoao Pinto 
2742d43042f4SJoao Pinto 	/* Map RX MTL to DMA channels */
2743d43042f4SJoao Pinto 	stmmac_rx_queue_dma_chan_map(priv);
2744d43042f4SJoao Pinto 
2745d0a9c9f9SJoao Pinto 	/* Enable MAC RX Queues */
2746d0a9c9f9SJoao Pinto 	stmmac_mac_enable_rx_queues(priv);
27476deee222SJoao Pinto 
2748a8f5102aSJoao Pinto 	/* Set RX priorities */
2749c10d4c82SJose Abreu 	if (rx_queues_count > 1)
2750a8f5102aSJoao Pinto 		stmmac_mac_config_rx_queues_prio(priv);
2751a8f5102aSJoao Pinto 
2752a8f5102aSJoao Pinto 	/* Set TX priorities */
2753c10d4c82SJose Abreu 	if (tx_queues_count > 1)
2754a8f5102aSJoao Pinto 		stmmac_mac_config_tx_queues_prio(priv);
2755abe80fdcSJoao Pinto 
2756abe80fdcSJoao Pinto 	/* Set RX routing */
2757c10d4c82SJose Abreu 	if (rx_queues_count > 1)
2758abe80fdcSJoao Pinto 		stmmac_mac_config_rx_queues_routing(priv);
275976067459SJose Abreu 
276076067459SJose Abreu 	/* Receive Side Scaling */
276176067459SJose Abreu 	if (rx_queues_count > 1)
276276067459SJose Abreu 		stmmac_mac_config_rss(priv);
2763d0a9c9f9SJoao Pinto }
2764d0a9c9f9SJoao Pinto 
27658bf993a5SJose Abreu static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
27668bf993a5SJose Abreu {
2767c10d4c82SJose Abreu 	if (priv->dma_cap.asp) {
27688bf993a5SJose Abreu 		netdev_info(priv->dev, "Enabling Safety Features\n");
2769c10d4c82SJose Abreu 		stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp);
27708bf993a5SJose Abreu 	} else {
27718bf993a5SJose Abreu 		netdev_info(priv->dev, "No Safety Features support found\n");
27728bf993a5SJose Abreu 	}
27738bf993a5SJose Abreu }
27748bf993a5SJose Abreu 
2775d0a9c9f9SJoao Pinto /**
2776732fdf0eSGiuseppe CAVALLARO  * stmmac_hw_setup - setup mac in a usable state.
2777523f11b5SSrinivas Kandagatla  *  @dev : pointer to the device structure.
2778d0ea5cbdSJesse Brandeburg  *  @init_ptp: initialize PTP if set
2779523f11b5SSrinivas Kandagatla  *  Description:
2780732fdf0eSGiuseppe CAVALLARO  *  this is the main function to setup the HW in a usable state because the
2781732fdf0eSGiuseppe CAVALLARO  *  dma engine is reset, the core registers are configured (e.g. AXI,
2782732fdf0eSGiuseppe CAVALLARO  *  Checksum features, timers). The DMA is ready to start receiving and
2783732fdf0eSGiuseppe CAVALLARO  *  transmitting.
2784523f11b5SSrinivas Kandagatla  *  Return value:
2785523f11b5SSrinivas Kandagatla  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2786523f11b5SSrinivas Kandagatla  *  file on failure.
2787523f11b5SSrinivas Kandagatla  */
2788fe131929SHuacai Chen static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
2789523f11b5SSrinivas Kandagatla {
2790523f11b5SSrinivas Kandagatla 	struct stmmac_priv *priv = netdev_priv(dev);
27913c55d4d0SJoao Pinto 	u32 rx_cnt = priv->plat->rx_queues_to_use;
2792146617b8SJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
2793146617b8SJoao Pinto 	u32 chan;
2794523f11b5SSrinivas Kandagatla 	int ret;
2795523f11b5SSrinivas Kandagatla 
2796523f11b5SSrinivas Kandagatla 	/* DMA initialization and SW reset */
2797523f11b5SSrinivas Kandagatla 	ret = stmmac_init_dma_engine(priv);
2798523f11b5SSrinivas Kandagatla 	if (ret < 0) {
279938ddc59dSLABBE Corentin 		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
280038ddc59dSLABBE Corentin 			   __func__);
2801523f11b5SSrinivas Kandagatla 		return ret;
2802523f11b5SSrinivas Kandagatla 	}
2803523f11b5SSrinivas Kandagatla 
2804523f11b5SSrinivas Kandagatla 	/* Copy the MAC addr into the HW  */
2805c10d4c82SJose Abreu 	stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
2806523f11b5SSrinivas Kandagatla 
280702e57b9dSGiuseppe CAVALLARO 	/* PS and related bits will be programmed according to the speed */
280802e57b9dSGiuseppe CAVALLARO 	if (priv->hw->pcs) {
280902e57b9dSGiuseppe CAVALLARO 		int speed = priv->plat->mac_port_sel_speed;
281002e57b9dSGiuseppe CAVALLARO 
281102e57b9dSGiuseppe CAVALLARO 		if ((speed == SPEED_10) || (speed == SPEED_100) ||
281202e57b9dSGiuseppe CAVALLARO 		    (speed == SPEED_1000)) {
281302e57b9dSGiuseppe CAVALLARO 			priv->hw->ps = speed;
281402e57b9dSGiuseppe CAVALLARO 		} else {
281502e57b9dSGiuseppe CAVALLARO 			dev_warn(priv->device, "invalid port speed\n");
281602e57b9dSGiuseppe CAVALLARO 			priv->hw->ps = 0;
281702e57b9dSGiuseppe CAVALLARO 		}
281802e57b9dSGiuseppe CAVALLARO 	}
281902e57b9dSGiuseppe CAVALLARO 
2820523f11b5SSrinivas Kandagatla 	/* Initialize the MAC Core */
2821c10d4c82SJose Abreu 	stmmac_core_init(priv, priv->hw, dev);
2822523f11b5SSrinivas Kandagatla 
2823d0a9c9f9SJoao Pinto 	/* Initialize MTL*/
2824d0a9c9f9SJoao Pinto 	stmmac_mtl_configuration(priv);
28259eb12474Sjpinto 
28268bf993a5SJose Abreu 	/* Initialize Safety Features */
28278bf993a5SJose Abreu 	stmmac_safety_feat_configuration(priv);
28288bf993a5SJose Abreu 
2829c10d4c82SJose Abreu 	ret = stmmac_rx_ipc(priv, priv->hw);
2830978aded4SGiuseppe CAVALLARO 	if (!ret) {
283138ddc59dSLABBE Corentin 		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
2832978aded4SGiuseppe CAVALLARO 		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
2833d2afb5bdSGiuseppe CAVALLARO 		priv->hw->rx_csum = 0;
2834978aded4SGiuseppe CAVALLARO 	}
2835978aded4SGiuseppe CAVALLARO 
2836523f11b5SSrinivas Kandagatla 	/* Enable the MAC Rx/Tx */
2837c10d4c82SJose Abreu 	stmmac_mac_set(priv, priv->ioaddr, true);
2838523f11b5SSrinivas Kandagatla 
2839b4f0a661SJoao Pinto 	/* Set the HW DMA mode and the COE */
2840b4f0a661SJoao Pinto 	stmmac_dma_operation_mode(priv);
2841b4f0a661SJoao Pinto 
2842523f11b5SSrinivas Kandagatla 	stmmac_mmc_setup(priv);
2843523f11b5SSrinivas Kandagatla 
2844fe131929SHuacai Chen 	if (init_ptp) {
28450ad2be79SThierry Reding 		ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
28460ad2be79SThierry Reding 		if (ret < 0)
28470ad2be79SThierry Reding 			netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
28480ad2be79SThierry Reding 
2849523f11b5SSrinivas Kandagatla 		ret = stmmac_init_ptp(priv);
2850722eef28SHeiner Kallweit 		if (ret == -EOPNOTSUPP)
2851722eef28SHeiner Kallweit 			netdev_warn(priv->dev, "PTP not supported by HW\n");
2852722eef28SHeiner Kallweit 		else if (ret)
2853722eef28SHeiner Kallweit 			netdev_warn(priv->dev, "PTP init failed\n");
2854fe131929SHuacai Chen 	}
2855523f11b5SSrinivas Kandagatla 
2856388e201dSVineetha G. Jaya Kumaran 	priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
2857388e201dSVineetha G. Jaya Kumaran 
2858388e201dSVineetha G. Jaya Kumaran 	/* Convert the timer from msec to usec */
2859388e201dSVineetha G. Jaya Kumaran 	if (!priv->tx_lpi_timer)
2860388e201dSVineetha G. Jaya Kumaran 		priv->tx_lpi_timer = eee_timer * 1000;
2861523f11b5SSrinivas Kandagatla 
2862a4e887faSJose Abreu 	if (priv->use_riwt) {
28634e4337ccSJose Abreu 		if (!priv->rx_riwt)
28644e4337ccSJose Abreu 			priv->rx_riwt = DEF_DMA_RIWT;
28654e4337ccSJose Abreu 
28664e4337ccSJose Abreu 		ret = stmmac_rx_watchdog(priv, priv->ioaddr, priv->rx_riwt, rx_cnt);
2867523f11b5SSrinivas Kandagatla 	}
2868523f11b5SSrinivas Kandagatla 
2869c10d4c82SJose Abreu 	if (priv->hw->pcs)
2870c9ad4c10SBen Dooks (Codethink) 		stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
2871523f11b5SSrinivas Kandagatla 
28724854ab99SJoao Pinto 	/* set TX and RX rings length */
28734854ab99SJoao Pinto 	stmmac_set_rings_length(priv);
28744854ab99SJoao Pinto 
2875f748be53SAlexandre TORGUE 	/* Enable TSO */
2876146617b8SJoao Pinto 	if (priv->tso) {
2877146617b8SJoao Pinto 		for (chan = 0; chan < tx_cnt; chan++)
2878a4e887faSJose Abreu 			stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
2879146617b8SJoao Pinto 	}
2880f748be53SAlexandre TORGUE 
288167afd6d1SJose Abreu 	/* Enable Split Header */
288267afd6d1SJose Abreu 	if (priv->sph && priv->hw->rx_csum) {
288367afd6d1SJose Abreu 		for (chan = 0; chan < rx_cnt; chan++)
288467afd6d1SJose Abreu 			stmmac_enable_sph(priv, priv->ioaddr, 1, chan);
288567afd6d1SJose Abreu 	}
288667afd6d1SJose Abreu 
288730d93227SJose Abreu 	/* VLAN Tag Insertion */
288830d93227SJose Abreu 	if (priv->dma_cap.vlins)
288930d93227SJose Abreu 		stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
289030d93227SJose Abreu 
2891579a25a8SJose Abreu 	/* TBS */
2892579a25a8SJose Abreu 	for (chan = 0; chan < tx_cnt; chan++) {
2893579a25a8SJose Abreu 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2894579a25a8SJose Abreu 		int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
2895579a25a8SJose Abreu 
2896579a25a8SJose Abreu 		stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
2897579a25a8SJose Abreu 	}
2898579a25a8SJose Abreu 
2899686cff3dSAashish Verma 	/* Configure real RX and TX queues */
2900686cff3dSAashish Verma 	netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
2901686cff3dSAashish Verma 	netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
2902686cff3dSAashish Verma 
29037d9e6c5aSJose Abreu 	/* Start the ball rolling... */
29047d9e6c5aSJose Abreu 	stmmac_start_all_dma(priv);
29057d9e6c5aSJose Abreu 
2906523f11b5SSrinivas Kandagatla 	return 0;
2907523f11b5SSrinivas Kandagatla }
2908523f11b5SSrinivas Kandagatla 
2909c66f6c37SThierry Reding static void stmmac_hw_teardown(struct net_device *dev)
2910c66f6c37SThierry Reding {
2911c66f6c37SThierry Reding 	struct stmmac_priv *priv = netdev_priv(dev);
2912c66f6c37SThierry Reding 
2913c66f6c37SThierry Reding 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
2914c66f6c37SThierry Reding }
2915c66f6c37SThierry Reding 
2916523f11b5SSrinivas Kandagatla /**
29177ac6653aSJeff Kirsher  *  stmmac_open - open entry point of the driver
29187ac6653aSJeff Kirsher  *  @dev : pointer to the device structure.
29197ac6653aSJeff Kirsher  *  Description:
29207ac6653aSJeff Kirsher  *  This function is the open entry point of the driver.
29217ac6653aSJeff Kirsher  *  Return value:
29227ac6653aSJeff Kirsher  *  0 on success and an appropriate (-)ve integer as defined in errno.h
29237ac6653aSJeff Kirsher  *  file on failure.
29247ac6653aSJeff Kirsher  */
29257ac6653aSJeff Kirsher static int stmmac_open(struct net_device *dev)
29267ac6653aSJeff Kirsher {
29277ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
29285d626c87SJose Abreu 	int bfsize = 0;
29298fce3331SJose Abreu 	u32 chan;
29307ac6653aSJeff Kirsher 	int ret;
29317ac6653aSJeff Kirsher 
29325ec55823SJoakim Zhang 	ret = pm_runtime_get_sync(priv->device);
29335ec55823SJoakim Zhang 	if (ret < 0) {
29345ec55823SJoakim Zhang 		pm_runtime_put_noidle(priv->device);
29355ec55823SJoakim Zhang 		return ret;
29365ec55823SJoakim Zhang 	}
29375ec55823SJoakim Zhang 
2938a47b9e15SDejin Zheng 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
2939f213bbe8SJose Abreu 	    priv->hw->pcs != STMMAC_PCS_RTBI &&
2940c62808e8SOng Boon Leong 	    priv->hw->xpcs_args.an_mode != DW_AN_C73) {
29417ac6653aSJeff Kirsher 		ret = stmmac_init_phy(dev);
2942e58bb43fSGiuseppe CAVALLARO 		if (ret) {
294338ddc59dSLABBE Corentin 			netdev_err(priv->dev,
294438ddc59dSLABBE Corentin 				   "%s: Cannot attach to PHY (error: %d)\n",
2945e58bb43fSGiuseppe CAVALLARO 				   __func__, ret);
29465ec55823SJoakim Zhang 			goto init_phy_error;
29477ac6653aSJeff Kirsher 		}
2948e58bb43fSGiuseppe CAVALLARO 	}
29497ac6653aSJeff Kirsher 
2950523f11b5SSrinivas Kandagatla 	/* Extra statistics */
2951523f11b5SSrinivas Kandagatla 	memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
2952523f11b5SSrinivas Kandagatla 	priv->xstats.threshold = tc;
2953523f11b5SSrinivas Kandagatla 
29545d626c87SJose Abreu 	bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
29555d626c87SJose Abreu 	if (bfsize < 0)
29565d626c87SJose Abreu 		bfsize = 0;
29575d626c87SJose Abreu 
29585d626c87SJose Abreu 	if (bfsize < BUF_SIZE_16KiB)
29595d626c87SJose Abreu 		bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
29605d626c87SJose Abreu 
29615d626c87SJose Abreu 	priv->dma_buf_sz = bfsize;
29625d626c87SJose Abreu 	buf_sz = bfsize;
29635d626c87SJose Abreu 
296422ad3838SGiuseppe Cavallaro 	priv->rx_copybreak = STMMAC_RX_COPYBREAK;
296556329137SBartlomiej Zolnierkiewicz 
2966aa042f60SSong, Yoong Siang 	if (!priv->dma_tx_size)
2967aa042f60SSong, Yoong Siang 		priv->dma_tx_size = DMA_DEFAULT_TX_SIZE;
2968aa042f60SSong, Yoong Siang 	if (!priv->dma_rx_size)
2969aa042f60SSong, Yoong Siang 		priv->dma_rx_size = DMA_DEFAULT_RX_SIZE;
2970aa042f60SSong, Yoong Siang 
2971579a25a8SJose Abreu 	/* Earlier check for TBS */
2972579a25a8SJose Abreu 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
2973579a25a8SJose Abreu 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2974579a25a8SJose Abreu 		int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
2975579a25a8SJose Abreu 
2976579a25a8SJose Abreu 		tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
2977579a25a8SJose Abreu 		if (stmmac_enable_tbs(priv, priv->ioaddr, tbs_en, chan))
2978579a25a8SJose Abreu 			tx_q->tbs &= ~STMMAC_TBS_AVAIL;
2979579a25a8SJose Abreu 	}
2980579a25a8SJose Abreu 
29815bacd778SLABBE Corentin 	ret = alloc_dma_desc_resources(priv);
29825bacd778SLABBE Corentin 	if (ret < 0) {
29835bacd778SLABBE Corentin 		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
29845bacd778SLABBE Corentin 			   __func__);
29855bacd778SLABBE Corentin 		goto dma_desc_error;
29865bacd778SLABBE Corentin 	}
29875bacd778SLABBE Corentin 
29885bacd778SLABBE Corentin 	ret = init_dma_desc_rings(dev, GFP_KERNEL);
29895bacd778SLABBE Corentin 	if (ret < 0) {
29905bacd778SLABBE Corentin 		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
29915bacd778SLABBE Corentin 			   __func__);
29925bacd778SLABBE Corentin 		goto init_error;
29935bacd778SLABBE Corentin 	}
29945bacd778SLABBE Corentin 
2995fe131929SHuacai Chen 	ret = stmmac_hw_setup(dev, true);
299656329137SBartlomiej Zolnierkiewicz 	if (ret < 0) {
299738ddc59dSLABBE Corentin 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
2998c9324d18SGiuseppe CAVALLARO 		goto init_error;
29997ac6653aSJeff Kirsher 	}
30007ac6653aSJeff Kirsher 
3001d429b66eSJose Abreu 	stmmac_init_coalesce(priv);
3002777da230SGiuseppe CAVALLARO 
300374371272SJose Abreu 	phylink_start(priv->phylink);
300477b28983SJisheng Zhang 	/* We may have called phylink_speed_down before */
300577b28983SJisheng Zhang 	phylink_speed_up(priv->phylink);
30067ac6653aSJeff Kirsher 
30077ac6653aSJeff Kirsher 	/* Request the IRQ lines */
30087ac6653aSJeff Kirsher 	ret = request_irq(dev->irq, stmmac_interrupt,
30097ac6653aSJeff Kirsher 			  IRQF_SHARED, dev->name, dev);
30107ac6653aSJeff Kirsher 	if (unlikely(ret < 0)) {
301138ddc59dSLABBE Corentin 		netdev_err(priv->dev,
301238ddc59dSLABBE Corentin 			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
30137ac6653aSJeff Kirsher 			   __func__, dev->irq, ret);
30146c1e5abeSThierry Reding 		goto irq_error;
30157ac6653aSJeff Kirsher 	}
30167ac6653aSJeff Kirsher 
30177a13f8f5SFrancesco Virlinzi 	/* Request the Wake IRQ in case of another line is used for WoL */
30187a13f8f5SFrancesco Virlinzi 	if (priv->wol_irq != dev->irq) {
30197a13f8f5SFrancesco Virlinzi 		ret = request_irq(priv->wol_irq, stmmac_interrupt,
30207a13f8f5SFrancesco Virlinzi 				  IRQF_SHARED, dev->name, dev);
30217a13f8f5SFrancesco Virlinzi 		if (unlikely(ret < 0)) {
302238ddc59dSLABBE Corentin 			netdev_err(priv->dev,
302338ddc59dSLABBE Corentin 				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
3024ceb69499SGiuseppe CAVALLARO 				   __func__, priv->wol_irq, ret);
3025c9324d18SGiuseppe CAVALLARO 			goto wolirq_error;
30267a13f8f5SFrancesco Virlinzi 		}
30277a13f8f5SFrancesco Virlinzi 	}
30287a13f8f5SFrancesco Virlinzi 
3029d765955dSGiuseppe CAVALLARO 	/* Request the IRQ lines */
3030d7ec8584SChen-Yu Tsai 	if (priv->lpi_irq > 0) {
3031d765955dSGiuseppe CAVALLARO 		ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
3032d765955dSGiuseppe CAVALLARO 				  dev->name, dev);
3033d765955dSGiuseppe CAVALLARO 		if (unlikely(ret < 0)) {
303438ddc59dSLABBE Corentin 			netdev_err(priv->dev,
303538ddc59dSLABBE Corentin 				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
3036d765955dSGiuseppe CAVALLARO 				   __func__, priv->lpi_irq, ret);
3037c9324d18SGiuseppe CAVALLARO 			goto lpiirq_error;
3038d765955dSGiuseppe CAVALLARO 		}
3039d765955dSGiuseppe CAVALLARO 	}
3040d765955dSGiuseppe CAVALLARO 
3041c22a3f48SJoao Pinto 	stmmac_enable_all_queues(priv);
30429f19306dSOng Boon Leong 	netif_tx_start_all_queues(priv->dev);
30437ac6653aSJeff Kirsher 
30447ac6653aSJeff Kirsher 	return 0;
30457ac6653aSJeff Kirsher 
3046c9324d18SGiuseppe CAVALLARO lpiirq_error:
3047d765955dSGiuseppe CAVALLARO 	if (priv->wol_irq != dev->irq)
3048d765955dSGiuseppe CAVALLARO 		free_irq(priv->wol_irq, dev);
3049c9324d18SGiuseppe CAVALLARO wolirq_error:
30507a13f8f5SFrancesco Virlinzi 	free_irq(dev->irq, dev);
30516c1e5abeSThierry Reding irq_error:
305274371272SJose Abreu 	phylink_stop(priv->phylink);
30537a13f8f5SFrancesco Virlinzi 
30548fce3331SJose Abreu 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3055d5a05e69SVincent Whitchurch 		hrtimer_cancel(&priv->tx_queue[chan].txtimer);
30568fce3331SJose Abreu 
3057c66f6c37SThierry Reding 	stmmac_hw_teardown(dev);
3058c9324d18SGiuseppe CAVALLARO init_error:
3059c9324d18SGiuseppe CAVALLARO 	free_dma_desc_resources(priv);
30605bacd778SLABBE Corentin dma_desc_error:
306174371272SJose Abreu 	phylink_disconnect_phy(priv->phylink);
30625ec55823SJoakim Zhang init_phy_error:
30635ec55823SJoakim Zhang 	pm_runtime_put(priv->device);
30647ac6653aSJeff Kirsher 	return ret;
30657ac6653aSJeff Kirsher }
30667ac6653aSJeff Kirsher 
30677ac6653aSJeff Kirsher /**
30687ac6653aSJeff Kirsher  *  stmmac_release - close entry point of the driver
30697ac6653aSJeff Kirsher  *  @dev : device pointer.
30707ac6653aSJeff Kirsher  *  Description:
30717ac6653aSJeff Kirsher  *  This is the stop entry point of the driver.
30727ac6653aSJeff Kirsher  */
30737ac6653aSJeff Kirsher static int stmmac_release(struct net_device *dev)
30747ac6653aSJeff Kirsher {
30757ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
30768fce3331SJose Abreu 	u32 chan;
30777ac6653aSJeff Kirsher 
307877b28983SJisheng Zhang 	if (device_may_wakeup(priv->device))
307977b28983SJisheng Zhang 		phylink_speed_down(priv->phylink, false);
30807ac6653aSJeff Kirsher 	/* Stop and disconnect the PHY */
308174371272SJose Abreu 	phylink_stop(priv->phylink);
308274371272SJose Abreu 	phylink_disconnect_phy(priv->phylink);
30837ac6653aSJeff Kirsher 
3084c22a3f48SJoao Pinto 	stmmac_disable_all_queues(priv);
30857ac6653aSJeff Kirsher 
30868fce3331SJose Abreu 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3087d5a05e69SVincent Whitchurch 		hrtimer_cancel(&priv->tx_queue[chan].txtimer);
30889125cdd1SGiuseppe CAVALLARO 
30897ac6653aSJeff Kirsher 	/* Free the IRQ lines */
30907ac6653aSJeff Kirsher 	free_irq(dev->irq, dev);
30917a13f8f5SFrancesco Virlinzi 	if (priv->wol_irq != dev->irq)
30927a13f8f5SFrancesco Virlinzi 		free_irq(priv->wol_irq, dev);
3093d7ec8584SChen-Yu Tsai 	if (priv->lpi_irq > 0)
3094d765955dSGiuseppe CAVALLARO 		free_irq(priv->lpi_irq, dev);
30957ac6653aSJeff Kirsher 
30965f585913SFugang Duan 	if (priv->eee_enabled) {
30975f585913SFugang Duan 		priv->tx_path_in_lpi_mode = false;
30985f585913SFugang Duan 		del_timer_sync(&priv->eee_ctrl_timer);
30995f585913SFugang Duan 	}
31005f585913SFugang Duan 
31017ac6653aSJeff Kirsher 	/* Stop TX/RX DMA and clear the descriptors */
3102ae4f0d46SJoao Pinto 	stmmac_stop_all_dma(priv);
31037ac6653aSJeff Kirsher 
31047ac6653aSJeff Kirsher 	/* Release and free the Rx/Tx resources */
31057ac6653aSJeff Kirsher 	free_dma_desc_resources(priv);
31067ac6653aSJeff Kirsher 
31077ac6653aSJeff Kirsher 	/* Disable the MAC Rx/Tx */
3108c10d4c82SJose Abreu 	stmmac_mac_set(priv, priv->ioaddr, false);
31097ac6653aSJeff Kirsher 
31107ac6653aSJeff Kirsher 	netif_carrier_off(dev);
31117ac6653aSJeff Kirsher 
311292ba6888SRayagond Kokatanur 	stmmac_release_ptp(priv);
311392ba6888SRayagond Kokatanur 
31145ec55823SJoakim Zhang 	pm_runtime_put(priv->device);
31155ec55823SJoakim Zhang 
31167ac6653aSJeff Kirsher 	return 0;
31177ac6653aSJeff Kirsher }
31187ac6653aSJeff Kirsher 
311930d93227SJose Abreu static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
312030d93227SJose Abreu 			       struct stmmac_tx_queue *tx_q)
312130d93227SJose Abreu {
312230d93227SJose Abreu 	u16 tag = 0x0, inner_tag = 0x0;
312330d93227SJose Abreu 	u32 inner_type = 0x0;
312430d93227SJose Abreu 	struct dma_desc *p;
312530d93227SJose Abreu 
312630d93227SJose Abreu 	if (!priv->dma_cap.vlins)
312730d93227SJose Abreu 		return false;
312830d93227SJose Abreu 	if (!skb_vlan_tag_present(skb))
312930d93227SJose Abreu 		return false;
313030d93227SJose Abreu 	if (skb->vlan_proto == htons(ETH_P_8021AD)) {
313130d93227SJose Abreu 		inner_tag = skb_vlan_tag_get(skb);
313230d93227SJose Abreu 		inner_type = STMMAC_VLAN_INSERT;
313330d93227SJose Abreu 	}
313430d93227SJose Abreu 
313530d93227SJose Abreu 	tag = skb_vlan_tag_get(skb);
313630d93227SJose Abreu 
3137579a25a8SJose Abreu 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
3138579a25a8SJose Abreu 		p = &tx_q->dma_entx[tx_q->cur_tx].basic;
3139579a25a8SJose Abreu 	else
3140579a25a8SJose Abreu 		p = &tx_q->dma_tx[tx_q->cur_tx];
3141579a25a8SJose Abreu 
314230d93227SJose Abreu 	if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
314330d93227SJose Abreu 		return false;
314430d93227SJose Abreu 
314530d93227SJose Abreu 	stmmac_set_tx_owner(priv, p);
3146aa042f60SSong, Yoong Siang 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
314730d93227SJose Abreu 	return true;
314830d93227SJose Abreu }
314930d93227SJose Abreu 
31507ac6653aSJeff Kirsher /**
3151f748be53SAlexandre TORGUE  *  stmmac_tso_allocator - close entry point of the driver
3152f748be53SAlexandre TORGUE  *  @priv: driver private structure
3153f748be53SAlexandre TORGUE  *  @des: buffer start address
3154f748be53SAlexandre TORGUE  *  @total_len: total length to fill in descriptors
3155d0ea5cbdSJesse Brandeburg  *  @last_segment: condition for the last descriptor
3156ce736788SJoao Pinto  *  @queue: TX queue index
3157f748be53SAlexandre TORGUE  *  Description:
3158f748be53SAlexandre TORGUE  *  This function fills descriptor and request new descriptors according to
3159f748be53SAlexandre TORGUE  *  buffer length to fill
3160f748be53SAlexandre TORGUE  */
3161a993db88SJose Abreu static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
3162ce736788SJoao Pinto 				 int total_len, bool last_segment, u32 queue)
3163f748be53SAlexandre TORGUE {
3164ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3165f748be53SAlexandre TORGUE 	struct dma_desc *desc;
31665bacd778SLABBE Corentin 	u32 buff_size;
3167ce736788SJoao Pinto 	int tmp_len;
3168f748be53SAlexandre TORGUE 
3169f748be53SAlexandre TORGUE 	tmp_len = total_len;
3170f748be53SAlexandre TORGUE 
3171f748be53SAlexandre TORGUE 	while (tmp_len > 0) {
3172a993db88SJose Abreu 		dma_addr_t curr_addr;
3173a993db88SJose Abreu 
3174aa042f60SSong, Yoong Siang 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
3175aa042f60SSong, Yoong Siang 						priv->dma_tx_size);
3176b4c9784cSNiklas Cassel 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
3177579a25a8SJose Abreu 
3178579a25a8SJose Abreu 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
3179579a25a8SJose Abreu 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
3180579a25a8SJose Abreu 		else
3181579a25a8SJose Abreu 			desc = &tx_q->dma_tx[tx_q->cur_tx];
3182f748be53SAlexandre TORGUE 
3183a993db88SJose Abreu 		curr_addr = des + (total_len - tmp_len);
3184a993db88SJose Abreu 		if (priv->dma_cap.addr64 <= 32)
3185a993db88SJose Abreu 			desc->des0 = cpu_to_le32(curr_addr);
3186a993db88SJose Abreu 		else
3187a993db88SJose Abreu 			stmmac_set_desc_addr(priv, desc, curr_addr);
3188a993db88SJose Abreu 
3189f748be53SAlexandre TORGUE 		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
3190f748be53SAlexandre TORGUE 			    TSO_MAX_BUFF_SIZE : tmp_len;
3191f748be53SAlexandre TORGUE 
319242de047dSJose Abreu 		stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
3193f748be53SAlexandre TORGUE 				0, 1,
3194426849e6SNiklas Cassel 				(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
3195f748be53SAlexandre TORGUE 				0, 0);
3196f748be53SAlexandre TORGUE 
3197f748be53SAlexandre TORGUE 		tmp_len -= TSO_MAX_BUFF_SIZE;
3198f748be53SAlexandre TORGUE 	}
3199f748be53SAlexandre TORGUE }
3200f748be53SAlexandre TORGUE 
3201f748be53SAlexandre TORGUE /**
3202f748be53SAlexandre TORGUE  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
3203f748be53SAlexandre TORGUE  *  @skb : the socket buffer
3204f748be53SAlexandre TORGUE  *  @dev : device pointer
3205f748be53SAlexandre TORGUE  *  Description: this is the transmit function that is called on TSO frames
3206f748be53SAlexandre TORGUE  *  (support available on GMAC4 and newer chips).
3207f748be53SAlexandre TORGUE  *  Diagram below show the ring programming in case of TSO frames:
3208f748be53SAlexandre TORGUE  *
3209f748be53SAlexandre TORGUE  *  First Descriptor
3210f748be53SAlexandre TORGUE  *   --------
3211f748be53SAlexandre TORGUE  *   | DES0 |---> buffer1 = L2/L3/L4 header
3212f748be53SAlexandre TORGUE  *   | DES1 |---> TCP Payload (can continue on next descr...)
3213f748be53SAlexandre TORGUE  *   | DES2 |---> buffer 1 and 2 len
3214f748be53SAlexandre TORGUE  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
3215f748be53SAlexandre TORGUE  *   --------
3216f748be53SAlexandre TORGUE  *	|
3217f748be53SAlexandre TORGUE  *     ...
3218f748be53SAlexandre TORGUE  *	|
3219f748be53SAlexandre TORGUE  *   --------
3220f748be53SAlexandre TORGUE  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
3221f748be53SAlexandre TORGUE  *   | DES1 | --|
3222f748be53SAlexandre TORGUE  *   | DES2 | --> buffer 1 and 2 len
3223f748be53SAlexandre TORGUE  *   | DES3 |
3224f748be53SAlexandre TORGUE  *   --------
3225f748be53SAlexandre TORGUE  *
3226f748be53SAlexandre TORGUE  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
3227f748be53SAlexandre TORGUE  */
3228f748be53SAlexandre TORGUE static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
3229f748be53SAlexandre TORGUE {
3230ce736788SJoao Pinto 	struct dma_desc *desc, *first, *mss_desc = NULL;
3231f748be53SAlexandre TORGUE 	struct stmmac_priv *priv = netdev_priv(dev);
3232579a25a8SJose Abreu 	int desc_size, tmp_pay_len = 0, first_tx;
3233f748be53SAlexandre TORGUE 	int nfrags = skb_shinfo(skb)->nr_frags;
3234ce736788SJoao Pinto 	u32 queue = skb_get_queue_mapping(skb);
3235c2837423SJose Abreu 	unsigned int first_entry, tx_packets;
3236ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q;
3237c2837423SJose Abreu 	bool has_vlan, set_ic;
3238579a25a8SJose Abreu 	u8 proto_hdr_len, hdr;
3239ce736788SJoao Pinto 	u32 pay_len, mss;
3240a993db88SJose Abreu 	dma_addr_t des;
3241f748be53SAlexandre TORGUE 	int i;
3242f748be53SAlexandre TORGUE 
3243ce736788SJoao Pinto 	tx_q = &priv->tx_queue[queue];
3244c2837423SJose Abreu 	first_tx = tx_q->cur_tx;
3245ce736788SJoao Pinto 
3246f748be53SAlexandre TORGUE 	/* Compute header lengths */
3247b7766206SJose Abreu 	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
3248b7766206SJose Abreu 		proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
3249b7766206SJose Abreu 		hdr = sizeof(struct udphdr);
3250b7766206SJose Abreu 	} else {
3251f748be53SAlexandre TORGUE 		proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
3252b7766206SJose Abreu 		hdr = tcp_hdrlen(skb);
3253b7766206SJose Abreu 	}
3254f748be53SAlexandre TORGUE 
3255f748be53SAlexandre TORGUE 	/* Desc availability based on threshold should be enough safe */
3256ce736788SJoao Pinto 	if (unlikely(stmmac_tx_avail(priv, queue) <
3257f748be53SAlexandre TORGUE 		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
3258c22a3f48SJoao Pinto 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
3259c22a3f48SJoao Pinto 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
3260c22a3f48SJoao Pinto 								queue));
3261f748be53SAlexandre TORGUE 			/* This is a hard error, log it. */
326238ddc59dSLABBE Corentin 			netdev_err(priv->dev,
326338ddc59dSLABBE Corentin 				   "%s: Tx Ring full when queue awake\n",
326438ddc59dSLABBE Corentin 				   __func__);
3265f748be53SAlexandre TORGUE 		}
3266f748be53SAlexandre TORGUE 		return NETDEV_TX_BUSY;
3267f748be53SAlexandre TORGUE 	}
3268f748be53SAlexandre TORGUE 
3269f748be53SAlexandre TORGUE 	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
3270f748be53SAlexandre TORGUE 
3271f748be53SAlexandre TORGUE 	mss = skb_shinfo(skb)->gso_size;
3272f748be53SAlexandre TORGUE 
3273f748be53SAlexandre TORGUE 	/* set new MSS value if needed */
32748d212a9eSNiklas Cassel 	if (mss != tx_q->mss) {
3275579a25a8SJose Abreu 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
3276579a25a8SJose Abreu 			mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
3277579a25a8SJose Abreu 		else
3278579a25a8SJose Abreu 			mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
3279579a25a8SJose Abreu 
328042de047dSJose Abreu 		stmmac_set_mss(priv, mss_desc, mss);
32818d212a9eSNiklas Cassel 		tx_q->mss = mss;
3282aa042f60SSong, Yoong Siang 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
3283aa042f60SSong, Yoong Siang 						priv->dma_tx_size);
3284b4c9784cSNiklas Cassel 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
3285f748be53SAlexandre TORGUE 	}
3286f748be53SAlexandre TORGUE 
3287f748be53SAlexandre TORGUE 	if (netif_msg_tx_queued(priv)) {
3288b7766206SJose Abreu 		pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
3289b7766206SJose Abreu 			__func__, hdr, proto_hdr_len, pay_len, mss);
3290f748be53SAlexandre TORGUE 		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
3291f748be53SAlexandre TORGUE 			skb->data_len);
3292f748be53SAlexandre TORGUE 	}
3293f748be53SAlexandre TORGUE 
329430d93227SJose Abreu 	/* Check if VLAN can be inserted by HW */
329530d93227SJose Abreu 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
329630d93227SJose Abreu 
3297ce736788SJoao Pinto 	first_entry = tx_q->cur_tx;
3298b4c9784cSNiklas Cassel 	WARN_ON(tx_q->tx_skbuff[first_entry]);
3299f748be53SAlexandre TORGUE 
3300579a25a8SJose Abreu 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
3301579a25a8SJose Abreu 		desc = &tx_q->dma_entx[first_entry].basic;
3302579a25a8SJose Abreu 	else
3303579a25a8SJose Abreu 		desc = &tx_q->dma_tx[first_entry];
3304f748be53SAlexandre TORGUE 	first = desc;
3305f748be53SAlexandre TORGUE 
330630d93227SJose Abreu 	if (has_vlan)
330730d93227SJose Abreu 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
330830d93227SJose Abreu 
3309f748be53SAlexandre TORGUE 	/* first descriptor: fill Headers on Buf1 */
3310f748be53SAlexandre TORGUE 	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
3311f748be53SAlexandre TORGUE 			     DMA_TO_DEVICE);
3312f748be53SAlexandre TORGUE 	if (dma_mapping_error(priv->device, des))
3313f748be53SAlexandre TORGUE 		goto dma_map_err;
3314f748be53SAlexandre TORGUE 
3315ce736788SJoao Pinto 	tx_q->tx_skbuff_dma[first_entry].buf = des;
3316ce736788SJoao Pinto 	tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
3317f748be53SAlexandre TORGUE 
3318a993db88SJose Abreu 	if (priv->dma_cap.addr64 <= 32) {
3319f8be0d78SMichael Weiser 		first->des0 = cpu_to_le32(des);
3320f748be53SAlexandre TORGUE 
3321f748be53SAlexandre TORGUE 		/* Fill start of payload in buff2 of first descriptor */
3322f748be53SAlexandre TORGUE 		if (pay_len)
3323f8be0d78SMichael Weiser 			first->des1 = cpu_to_le32(des + proto_hdr_len);
3324f748be53SAlexandre TORGUE 
3325f748be53SAlexandre TORGUE 		/* If needed take extra descriptors to fill the remaining payload */
3326f748be53SAlexandre TORGUE 		tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
3327a993db88SJose Abreu 	} else {
3328a993db88SJose Abreu 		stmmac_set_desc_addr(priv, first, des);
3329a993db88SJose Abreu 		tmp_pay_len = pay_len;
333034c15202Syuqi jin 		des += proto_hdr_len;
3331b2f07199SJose Abreu 		pay_len = 0;
3332a993db88SJose Abreu 	}
3333f748be53SAlexandre TORGUE 
3334ce736788SJoao Pinto 	stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
3335f748be53SAlexandre TORGUE 
3336f748be53SAlexandre TORGUE 	/* Prepare fragments */
3337f748be53SAlexandre TORGUE 	for (i = 0; i < nfrags; i++) {
3338f748be53SAlexandre TORGUE 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3339f748be53SAlexandre TORGUE 
3340f748be53SAlexandre TORGUE 		des = skb_frag_dma_map(priv->device, frag, 0,
3341f748be53SAlexandre TORGUE 				       skb_frag_size(frag),
3342f748be53SAlexandre TORGUE 				       DMA_TO_DEVICE);
3343937071c1SThierry Reding 		if (dma_mapping_error(priv->device, des))
3344937071c1SThierry Reding 			goto dma_map_err;
3345f748be53SAlexandre TORGUE 
3346f748be53SAlexandre TORGUE 		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
3347ce736788SJoao Pinto 				     (i == nfrags - 1), queue);
3348f748be53SAlexandre TORGUE 
3349ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
3350ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
3351ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
3352f748be53SAlexandre TORGUE 	}
3353f748be53SAlexandre TORGUE 
3354ce736788SJoao Pinto 	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
3355f748be53SAlexandre TORGUE 
335605cf0d1bSNiklas Cassel 	/* Only the last descriptor gets to point to the skb. */
335705cf0d1bSNiklas Cassel 	tx_q->tx_skbuff[tx_q->cur_tx] = skb;
335805cf0d1bSNiklas Cassel 
33597df4a3a7SJose Abreu 	/* Manage tx mitigation */
3360c2837423SJose Abreu 	tx_packets = (tx_q->cur_tx + 1) - first_tx;
3361c2837423SJose Abreu 	tx_q->tx_count_frames += tx_packets;
3362c2837423SJose Abreu 
3363c2837423SJose Abreu 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
3364c2837423SJose Abreu 		set_ic = true;
3365c2837423SJose Abreu 	else if (!priv->tx_coal_frames)
3366c2837423SJose Abreu 		set_ic = false;
3367c2837423SJose Abreu 	else if (tx_packets > priv->tx_coal_frames)
3368c2837423SJose Abreu 		set_ic = true;
3369c2837423SJose Abreu 	else if ((tx_q->tx_count_frames % priv->tx_coal_frames) < tx_packets)
3370c2837423SJose Abreu 		set_ic = true;
3371c2837423SJose Abreu 	else
3372c2837423SJose Abreu 		set_ic = false;
3373c2837423SJose Abreu 
3374c2837423SJose Abreu 	if (set_ic) {
3375579a25a8SJose Abreu 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
3376579a25a8SJose Abreu 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
3377579a25a8SJose Abreu 		else
33787df4a3a7SJose Abreu 			desc = &tx_q->dma_tx[tx_q->cur_tx];
3379579a25a8SJose Abreu 
33807df4a3a7SJose Abreu 		tx_q->tx_count_frames = 0;
33817df4a3a7SJose Abreu 		stmmac_set_tx_ic(priv, desc);
33827df4a3a7SJose Abreu 		priv->xstats.tx_set_ic_bit++;
33837df4a3a7SJose Abreu 	}
33847df4a3a7SJose Abreu 
338505cf0d1bSNiklas Cassel 	/* We've used all descriptors we need for this skb, however,
338605cf0d1bSNiklas Cassel 	 * advance cur_tx so that it references a fresh descriptor.
338705cf0d1bSNiklas Cassel 	 * ndo_start_xmit will fill this descriptor the next time it's
338805cf0d1bSNiklas Cassel 	 * called and stmmac_tx_clean may clean up to this descriptor.
338905cf0d1bSNiklas Cassel 	 */
3390aa042f60SSong, Yoong Siang 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
3391f748be53SAlexandre TORGUE 
3392ce736788SJoao Pinto 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3393b3e51069SLABBE Corentin 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
339438ddc59dSLABBE Corentin 			  __func__);
3395c22a3f48SJoao Pinto 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
3396f748be53SAlexandre TORGUE 	}
3397f748be53SAlexandre TORGUE 
3398f748be53SAlexandre TORGUE 	dev->stats.tx_bytes += skb->len;
3399f748be53SAlexandre TORGUE 	priv->xstats.tx_tso_frames++;
3400f748be53SAlexandre TORGUE 	priv->xstats.tx_tso_nfrags += nfrags;
3401f748be53SAlexandre TORGUE 
34028000ddc0SJose Abreu 	if (priv->sarc_type)
34038000ddc0SJose Abreu 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
34048000ddc0SJose Abreu 
3405f748be53SAlexandre TORGUE 	skb_tx_timestamp(skb);
3406f748be53SAlexandre TORGUE 
3407f748be53SAlexandre TORGUE 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3408f748be53SAlexandre TORGUE 		     priv->hwts_tx_en)) {
3409f748be53SAlexandre TORGUE 		/* declare that device is doing timestamping */
3410f748be53SAlexandre TORGUE 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
341142de047dSJose Abreu 		stmmac_enable_tx_timestamp(priv, first);
3412f748be53SAlexandre TORGUE 	}
3413f748be53SAlexandre TORGUE 
3414f748be53SAlexandre TORGUE 	/* Complete the first descriptor before granting the DMA */
341542de047dSJose Abreu 	stmmac_prepare_tso_tx_desc(priv, first, 1,
3416f748be53SAlexandre TORGUE 			proto_hdr_len,
3417f748be53SAlexandre TORGUE 			pay_len,
3418ce736788SJoao Pinto 			1, tx_q->tx_skbuff_dma[first_entry].last_segment,
3419b7766206SJose Abreu 			hdr / 4, (skb->len - proto_hdr_len));
3420f748be53SAlexandre TORGUE 
3421f748be53SAlexandre TORGUE 	/* If context desc is used to change MSS */
342215d2ee42SNiklas Cassel 	if (mss_desc) {
342315d2ee42SNiklas Cassel 		/* Make sure that first descriptor has been completely
342415d2ee42SNiklas Cassel 		 * written, including its own bit. This is because MSS is
342515d2ee42SNiklas Cassel 		 * actually before first descriptor, so we need to make
342615d2ee42SNiklas Cassel 		 * sure that MSS's own bit is the last thing written.
342715d2ee42SNiklas Cassel 		 */
342815d2ee42SNiklas Cassel 		dma_wmb();
342942de047dSJose Abreu 		stmmac_set_tx_owner(priv, mss_desc);
343015d2ee42SNiklas Cassel 	}
3431f748be53SAlexandre TORGUE 
3432f748be53SAlexandre TORGUE 	/* The own bit must be the latest setting done when prepare the
3433f748be53SAlexandre TORGUE 	 * descriptor and then barrier is needed to make sure that
3434f748be53SAlexandre TORGUE 	 * all is coherent before granting the DMA engine.
3435f748be53SAlexandre TORGUE 	 */
343695eb930aSNiklas Cassel 	wmb();
3437f748be53SAlexandre TORGUE 
3438f748be53SAlexandre TORGUE 	if (netif_msg_pktdata(priv)) {
3439f748be53SAlexandre TORGUE 		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
3440ce736788SJoao Pinto 			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
3441ce736788SJoao Pinto 			tx_q->cur_tx, first, nfrags);
3442f748be53SAlexandre TORGUE 		pr_info(">>> frame to be transmitted: ");
3443f748be53SAlexandre TORGUE 		print_pkt(skb->data, skb_headlen(skb));
3444f748be53SAlexandre TORGUE 	}
3445f748be53SAlexandre TORGUE 
3446c22a3f48SJoao Pinto 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3447f748be53SAlexandre TORGUE 
3448579a25a8SJose Abreu 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
3449579a25a8SJose Abreu 		desc_size = sizeof(struct dma_edesc);
3450579a25a8SJose Abreu 	else
3451579a25a8SJose Abreu 		desc_size = sizeof(struct dma_desc);
3452579a25a8SJose Abreu 
3453579a25a8SJose Abreu 	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
3454a4e887faSJose Abreu 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
34554772f26dSJose Abreu 	stmmac_tx_timer_arm(priv, queue);
3456f748be53SAlexandre TORGUE 
3457f748be53SAlexandre TORGUE 	return NETDEV_TX_OK;
3458f748be53SAlexandre TORGUE 
3459f748be53SAlexandre TORGUE dma_map_err:
3460f748be53SAlexandre TORGUE 	dev_err(priv->device, "Tx dma map failed\n");
3461f748be53SAlexandre TORGUE 	dev_kfree_skb(skb);
3462f748be53SAlexandre TORGUE 	priv->dev->stats.tx_dropped++;
3463f748be53SAlexandre TORGUE 	return NETDEV_TX_OK;
3464f748be53SAlexandre TORGUE }
3465f748be53SAlexandre TORGUE 
3466f748be53SAlexandre TORGUE /**
3467732fdf0eSGiuseppe CAVALLARO  *  stmmac_xmit - Tx entry point of the driver
34687ac6653aSJeff Kirsher  *  @skb : the socket buffer
34697ac6653aSJeff Kirsher  *  @dev : device pointer
347032ceabcaSGiuseppe CAVALLARO  *  Description : this is the tx entry point of the driver.
347132ceabcaSGiuseppe CAVALLARO  *  It programs the chain or the ring and supports oversized frames
347232ceabcaSGiuseppe CAVALLARO  *  and SG feature.
34737ac6653aSJeff Kirsher  */
34747ac6653aSJeff Kirsher static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
34757ac6653aSJeff Kirsher {
3476c2837423SJose Abreu 	unsigned int first_entry, tx_packets, enh_desc;
34777ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
34780e80bdc9SGiuseppe Cavallaro 	unsigned int nopaged_len = skb_headlen(skb);
34794a7d666aSGiuseppe CAVALLARO 	int i, csum_insertion = 0, is_jumbo = 0;
3480ce736788SJoao Pinto 	u32 queue = skb_get_queue_mapping(skb);
34817ac6653aSJeff Kirsher 	int nfrags = skb_shinfo(skb)->nr_frags;
3482b7766206SJose Abreu 	int gso = skb_shinfo(skb)->gso_type;
3483579a25a8SJose Abreu 	struct dma_edesc *tbs_desc = NULL;
3484579a25a8SJose Abreu 	int entry, desc_size, first_tx;
34857ac6653aSJeff Kirsher 	struct dma_desc *desc, *first;
3486ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q;
3487c2837423SJose Abreu 	bool has_vlan, set_ic;
3488a993db88SJose Abreu 	dma_addr_t des;
3489f748be53SAlexandre TORGUE 
3490ce736788SJoao Pinto 	tx_q = &priv->tx_queue[queue];
3491c2837423SJose Abreu 	first_tx = tx_q->cur_tx;
3492ce736788SJoao Pinto 
3493be1c7eaeSVineetha G. Jaya Kumaran 	if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
3494e2cd682dSJose Abreu 		stmmac_disable_eee_mode(priv);
3495e2cd682dSJose Abreu 
3496f748be53SAlexandre TORGUE 	/* Manage oversized TCP frames for GMAC4 device */
3497f748be53SAlexandre TORGUE 	if (skb_is_gso(skb) && priv->tso) {
3498b7766206SJose Abreu 		if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
3499b7766206SJose Abreu 			return stmmac_tso_xmit(skb, dev);
3500b7766206SJose Abreu 		if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
3501f748be53SAlexandre TORGUE 			return stmmac_tso_xmit(skb, dev);
3502f748be53SAlexandre TORGUE 	}
35037ac6653aSJeff Kirsher 
3504ce736788SJoao Pinto 	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
3505c22a3f48SJoao Pinto 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
3506c22a3f48SJoao Pinto 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
3507c22a3f48SJoao Pinto 								queue));
35087ac6653aSJeff Kirsher 			/* This is a hard error, log it. */
350938ddc59dSLABBE Corentin 			netdev_err(priv->dev,
351038ddc59dSLABBE Corentin 				   "%s: Tx Ring full when queue awake\n",
351138ddc59dSLABBE Corentin 				   __func__);
35127ac6653aSJeff Kirsher 		}
35137ac6653aSJeff Kirsher 		return NETDEV_TX_BUSY;
35147ac6653aSJeff Kirsher 	}
35157ac6653aSJeff Kirsher 
351630d93227SJose Abreu 	/* Check if VLAN can be inserted by HW */
351730d93227SJose Abreu 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
351830d93227SJose Abreu 
3519ce736788SJoao Pinto 	entry = tx_q->cur_tx;
35200e80bdc9SGiuseppe Cavallaro 	first_entry = entry;
3521b4c9784cSNiklas Cassel 	WARN_ON(tx_q->tx_skbuff[first_entry]);
35227ac6653aSJeff Kirsher 
35237ac6653aSJeff Kirsher 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
35247ac6653aSJeff Kirsher 
35250e80bdc9SGiuseppe Cavallaro 	if (likely(priv->extend_desc))
3526ce736788SJoao Pinto 		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3527579a25a8SJose Abreu 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
3528579a25a8SJose Abreu 		desc = &tx_q->dma_entx[entry].basic;
3529c24602efSGiuseppe CAVALLARO 	else
3530ce736788SJoao Pinto 		desc = tx_q->dma_tx + entry;
3531c24602efSGiuseppe CAVALLARO 
35327ac6653aSJeff Kirsher 	first = desc;
35337ac6653aSJeff Kirsher 
353430d93227SJose Abreu 	if (has_vlan)
353530d93227SJose Abreu 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
353630d93227SJose Abreu 
35370e80bdc9SGiuseppe Cavallaro 	enh_desc = priv->plat->enh_desc;
35384a7d666aSGiuseppe CAVALLARO 	/* To program the descriptors according to the size of the frame */
353929896a67SGiuseppe CAVALLARO 	if (enh_desc)
35402c520b1cSJose Abreu 		is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
354129896a67SGiuseppe CAVALLARO 
354263a550fcSJose Abreu 	if (unlikely(is_jumbo)) {
35432c520b1cSJose Abreu 		entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
354463a550fcSJose Abreu 		if (unlikely(entry < 0) && (entry != -EINVAL))
3545362b37beSGiuseppe CAVALLARO 			goto dma_map_err;
354629896a67SGiuseppe CAVALLARO 	}
35477ac6653aSJeff Kirsher 
35487ac6653aSJeff Kirsher 	for (i = 0; i < nfrags; i++) {
35499e903e08SEric Dumazet 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
35509e903e08SEric Dumazet 		int len = skb_frag_size(frag);
3551be434d50SGiuseppe Cavallaro 		bool last_segment = (i == (nfrags - 1));
35527ac6653aSJeff Kirsher 
3553aa042f60SSong, Yoong Siang 		entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
3554b4c9784cSNiklas Cassel 		WARN_ON(tx_q->tx_skbuff[entry]);
3555e3ad57c9SGiuseppe Cavallaro 
35560e80bdc9SGiuseppe Cavallaro 		if (likely(priv->extend_desc))
3557ce736788SJoao Pinto 			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3558579a25a8SJose Abreu 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
3559579a25a8SJose Abreu 			desc = &tx_q->dma_entx[entry].basic;
3560c24602efSGiuseppe CAVALLARO 		else
3561ce736788SJoao Pinto 			desc = tx_q->dma_tx + entry;
35627ac6653aSJeff Kirsher 
3563f748be53SAlexandre TORGUE 		des = skb_frag_dma_map(priv->device, frag, 0, len,
3564f722380dSIan Campbell 				       DMA_TO_DEVICE);
3565f748be53SAlexandre TORGUE 		if (dma_mapping_error(priv->device, des))
3566362b37beSGiuseppe CAVALLARO 			goto dma_map_err; /* should reuse desc w/o issues */
3567362b37beSGiuseppe CAVALLARO 
3568ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].buf = des;
35696844171dSJose Abreu 
35706844171dSJose Abreu 		stmmac_set_desc_addr(priv, desc, des);
3571f748be53SAlexandre TORGUE 
3572ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].map_as_page = true;
3573ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].len = len;
3574ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
35750e80bdc9SGiuseppe Cavallaro 
35760e80bdc9SGiuseppe Cavallaro 		/* Prepare the descriptor and set the own bit too */
357742de047dSJose Abreu 		stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
357842de047dSJose Abreu 				priv->mode, 1, last_segment, skb->len);
35797ac6653aSJeff Kirsher 	}
35807ac6653aSJeff Kirsher 
358105cf0d1bSNiklas Cassel 	/* Only the last descriptor gets to point to the skb. */
358205cf0d1bSNiklas Cassel 	tx_q->tx_skbuff[entry] = skb;
3583e3ad57c9SGiuseppe Cavallaro 
35847df4a3a7SJose Abreu 	/* According to the coalesce parameter the IC bit for the latest
35857df4a3a7SJose Abreu 	 * segment is reset and the timer re-started to clean the tx status.
35867df4a3a7SJose Abreu 	 * This approach takes care about the fragments: desc is the first
35877df4a3a7SJose Abreu 	 * element in case of no SG.
35887df4a3a7SJose Abreu 	 */
3589c2837423SJose Abreu 	tx_packets = (entry + 1) - first_tx;
3590c2837423SJose Abreu 	tx_q->tx_count_frames += tx_packets;
3591c2837423SJose Abreu 
3592c2837423SJose Abreu 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
3593c2837423SJose Abreu 		set_ic = true;
3594c2837423SJose Abreu 	else if (!priv->tx_coal_frames)
3595c2837423SJose Abreu 		set_ic = false;
3596c2837423SJose Abreu 	else if (tx_packets > priv->tx_coal_frames)
3597c2837423SJose Abreu 		set_ic = true;
3598c2837423SJose Abreu 	else if ((tx_q->tx_count_frames % priv->tx_coal_frames) < tx_packets)
3599c2837423SJose Abreu 		set_ic = true;
3600c2837423SJose Abreu 	else
3601c2837423SJose Abreu 		set_ic = false;
3602c2837423SJose Abreu 
3603c2837423SJose Abreu 	if (set_ic) {
36047df4a3a7SJose Abreu 		if (likely(priv->extend_desc))
36057df4a3a7SJose Abreu 			desc = &tx_q->dma_etx[entry].basic;
3606579a25a8SJose Abreu 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
3607579a25a8SJose Abreu 			desc = &tx_q->dma_entx[entry].basic;
36087df4a3a7SJose Abreu 		else
36097df4a3a7SJose Abreu 			desc = &tx_q->dma_tx[entry];
36107df4a3a7SJose Abreu 
36117df4a3a7SJose Abreu 		tx_q->tx_count_frames = 0;
36127df4a3a7SJose Abreu 		stmmac_set_tx_ic(priv, desc);
36137df4a3a7SJose Abreu 		priv->xstats.tx_set_ic_bit++;
36147df4a3a7SJose Abreu 	}
36157df4a3a7SJose Abreu 
361605cf0d1bSNiklas Cassel 	/* We've used all descriptors we need for this skb, however,
361705cf0d1bSNiklas Cassel 	 * advance cur_tx so that it references a fresh descriptor.
361805cf0d1bSNiklas Cassel 	 * ndo_start_xmit will fill this descriptor the next time it's
361905cf0d1bSNiklas Cassel 	 * called and stmmac_tx_clean may clean up to this descriptor.
362005cf0d1bSNiklas Cassel 	 */
3621aa042f60SSong, Yoong Siang 	entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
3622ce736788SJoao Pinto 	tx_q->cur_tx = entry;
36237ac6653aSJeff Kirsher 
36247ac6653aSJeff Kirsher 	if (netif_msg_pktdata(priv)) {
362538ddc59dSLABBE Corentin 		netdev_dbg(priv->dev,
362638ddc59dSLABBE Corentin 			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
3627ce736788SJoao Pinto 			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
36280e80bdc9SGiuseppe Cavallaro 			   entry, first, nfrags);
362983d7af64SGiuseppe CAVALLARO 
363038ddc59dSLABBE Corentin 		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
36317ac6653aSJeff Kirsher 		print_pkt(skb->data, skb->len);
36327ac6653aSJeff Kirsher 	}
36330e80bdc9SGiuseppe Cavallaro 
3634ce736788SJoao Pinto 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3635b3e51069SLABBE Corentin 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
3636b3e51069SLABBE Corentin 			  __func__);
3637c22a3f48SJoao Pinto 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
36387ac6653aSJeff Kirsher 	}
36397ac6653aSJeff Kirsher 
36407ac6653aSJeff Kirsher 	dev->stats.tx_bytes += skb->len;
36417ac6653aSJeff Kirsher 
36428000ddc0SJose Abreu 	if (priv->sarc_type)
36438000ddc0SJose Abreu 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
36448000ddc0SJose Abreu 
36450e80bdc9SGiuseppe Cavallaro 	skb_tx_timestamp(skb);
36460e80bdc9SGiuseppe Cavallaro 
36470e80bdc9SGiuseppe Cavallaro 	/* Ready to fill the first descriptor and set the OWN bit w/o any
36480e80bdc9SGiuseppe Cavallaro 	 * problems because all the descriptors are actually ready to be
36490e80bdc9SGiuseppe Cavallaro 	 * passed to the DMA engine.
36500e80bdc9SGiuseppe Cavallaro 	 */
36510e80bdc9SGiuseppe Cavallaro 	if (likely(!is_jumbo)) {
36520e80bdc9SGiuseppe Cavallaro 		bool last_segment = (nfrags == 0);
36530e80bdc9SGiuseppe Cavallaro 
3654f748be53SAlexandre TORGUE 		des = dma_map_single(priv->device, skb->data,
36550e80bdc9SGiuseppe Cavallaro 				     nopaged_len, DMA_TO_DEVICE);
3656f748be53SAlexandre TORGUE 		if (dma_mapping_error(priv->device, des))
36570e80bdc9SGiuseppe Cavallaro 			goto dma_map_err;
36580e80bdc9SGiuseppe Cavallaro 
3659ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[first_entry].buf = des;
36606844171dSJose Abreu 
36616844171dSJose Abreu 		stmmac_set_desc_addr(priv, first, des);
3662f748be53SAlexandre TORGUE 
3663ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
3664ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
36650e80bdc9SGiuseppe Cavallaro 
3666891434b1SRayagond Kokatanur 		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3667891434b1SRayagond Kokatanur 			     priv->hwts_tx_en)) {
3668891434b1SRayagond Kokatanur 			/* declare that device is doing timestamping */
3669891434b1SRayagond Kokatanur 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
367042de047dSJose Abreu 			stmmac_enable_tx_timestamp(priv, first);
3671891434b1SRayagond Kokatanur 		}
3672891434b1SRayagond Kokatanur 
36730e80bdc9SGiuseppe Cavallaro 		/* Prepare the first descriptor setting the OWN bit too */
367442de047dSJose Abreu 		stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
3675579a25a8SJose Abreu 				csum_insertion, priv->mode, 0, last_segment,
367642de047dSJose Abreu 				skb->len);
367780acbed9SAaro Koskinen 	}
36780e80bdc9SGiuseppe Cavallaro 
3679579a25a8SJose Abreu 	if (tx_q->tbs & STMMAC_TBS_EN) {
3680579a25a8SJose Abreu 		struct timespec64 ts = ns_to_timespec64(skb->tstamp);
3681579a25a8SJose Abreu 
3682579a25a8SJose Abreu 		tbs_desc = &tx_q->dma_entx[first_entry];
3683579a25a8SJose Abreu 		stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
3684579a25a8SJose Abreu 	}
3685579a25a8SJose Abreu 
3686579a25a8SJose Abreu 	stmmac_set_tx_owner(priv, first);
3687579a25a8SJose Abreu 
36880e80bdc9SGiuseppe Cavallaro 	/* The own bit must be the latest setting done when prepare the
36890e80bdc9SGiuseppe Cavallaro 	 * descriptor and then barrier is needed to make sure that
36900e80bdc9SGiuseppe Cavallaro 	 * all is coherent before granting the DMA engine.
36910e80bdc9SGiuseppe Cavallaro 	 */
369295eb930aSNiklas Cassel 	wmb();
36937ac6653aSJeff Kirsher 
3694c22a3f48SJoao Pinto 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3695f748be53SAlexandre TORGUE 
3696a4e887faSJose Abreu 	stmmac_enable_dma_transmission(priv, priv->ioaddr);
36978fce3331SJose Abreu 
3698579a25a8SJose Abreu 	if (likely(priv->extend_desc))
3699579a25a8SJose Abreu 		desc_size = sizeof(struct dma_extended_desc);
3700579a25a8SJose Abreu 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
3701579a25a8SJose Abreu 		desc_size = sizeof(struct dma_edesc);
3702579a25a8SJose Abreu 	else
3703579a25a8SJose Abreu 		desc_size = sizeof(struct dma_desc);
3704579a25a8SJose Abreu 
3705579a25a8SJose Abreu 	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
3706f1565c60SJose Abreu 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
37074772f26dSJose Abreu 	stmmac_tx_timer_arm(priv, queue);
37087ac6653aSJeff Kirsher 
3709362b37beSGiuseppe CAVALLARO 	return NETDEV_TX_OK;
3710a9097a96SGiuseppe CAVALLARO 
3711362b37beSGiuseppe CAVALLARO dma_map_err:
371238ddc59dSLABBE Corentin 	netdev_err(priv->dev, "Tx DMA map failed\n");
3713362b37beSGiuseppe CAVALLARO 	dev_kfree_skb(skb);
3714362b37beSGiuseppe CAVALLARO 	priv->dev->stats.tx_dropped++;
37157ac6653aSJeff Kirsher 	return NETDEV_TX_OK;
37167ac6653aSJeff Kirsher }
37177ac6653aSJeff Kirsher 
3718b9381985SVince Bridgers static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
3719b9381985SVince Bridgers {
3720ab188e8fSElad Nachman 	struct vlan_ethhdr *veth;
3721ab188e8fSElad Nachman 	__be16 vlan_proto;
3722b9381985SVince Bridgers 	u16 vlanid;
3723b9381985SVince Bridgers 
3724ab188e8fSElad Nachman 	veth = (struct vlan_ethhdr *)skb->data;
3725ab188e8fSElad Nachman 	vlan_proto = veth->h_vlan_proto;
3726ab188e8fSElad Nachman 
3727ab188e8fSElad Nachman 	if ((vlan_proto == htons(ETH_P_8021Q) &&
3728ab188e8fSElad Nachman 	     dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
3729ab188e8fSElad Nachman 	    (vlan_proto == htons(ETH_P_8021AD) &&
3730ab188e8fSElad Nachman 	     dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
3731b9381985SVince Bridgers 		/* pop the vlan tag */
3732ab188e8fSElad Nachman 		vlanid = ntohs(veth->h_vlan_TCI);
3733ab188e8fSElad Nachman 		memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
3734b9381985SVince Bridgers 		skb_pull(skb, VLAN_HLEN);
3735ab188e8fSElad Nachman 		__vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
3736b9381985SVince Bridgers 	}
3737b9381985SVince Bridgers }
3738b9381985SVince Bridgers 
373932ceabcaSGiuseppe CAVALLARO /**
3740732fdf0eSGiuseppe CAVALLARO  * stmmac_rx_refill - refill used skb preallocated buffers
374132ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
374254139cf3SJoao Pinto  * @queue: RX queue index
374332ceabcaSGiuseppe CAVALLARO  * Description : this is to reallocate the skb for the reception process
374432ceabcaSGiuseppe CAVALLARO  * that is based on zero-copy.
374532ceabcaSGiuseppe CAVALLARO  */
374654139cf3SJoao Pinto static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
37477ac6653aSJeff Kirsher {
374854139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
37493caa61c2SJose Abreu 	int len, dirty = stmmac_rx_dirty(priv, queue);
375054139cf3SJoao Pinto 	unsigned int entry = rx_q->dirty_rx;
375154139cf3SJoao Pinto 
37523caa61c2SJose Abreu 	len = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
37533caa61c2SJose Abreu 
3754e3ad57c9SGiuseppe Cavallaro 	while (dirty-- > 0) {
37552af6106aSJose Abreu 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
3756c24602efSGiuseppe CAVALLARO 		struct dma_desc *p;
3757d429b66eSJose Abreu 		bool use_rx_wd;
3758c24602efSGiuseppe CAVALLARO 
3759c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
376054139cf3SJoao Pinto 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
3761c24602efSGiuseppe CAVALLARO 		else
376254139cf3SJoao Pinto 			p = rx_q->dma_rx + entry;
3763c24602efSGiuseppe CAVALLARO 
37642af6106aSJose Abreu 		if (!buf->page) {
37652af6106aSJose Abreu 			buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
37662af6106aSJose Abreu 			if (!buf->page)
37677ac6653aSJeff Kirsher 				break;
3768120e87f9SGiuseppe Cavallaro 		}
37697ac6653aSJeff Kirsher 
377067afd6d1SJose Abreu 		if (priv->sph && !buf->sec_page) {
377167afd6d1SJose Abreu 			buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
377267afd6d1SJose Abreu 			if (!buf->sec_page)
377367afd6d1SJose Abreu 				break;
377467afd6d1SJose Abreu 
377567afd6d1SJose Abreu 			buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
377667afd6d1SJose Abreu 
377767afd6d1SJose Abreu 			dma_sync_single_for_device(priv->device, buf->sec_addr,
377867afd6d1SJose Abreu 						   len, DMA_FROM_DEVICE);
377967afd6d1SJose Abreu 		}
378067afd6d1SJose Abreu 
37812af6106aSJose Abreu 		buf->addr = page_pool_get_dma_addr(buf->page);
37823caa61c2SJose Abreu 
37833caa61c2SJose Abreu 		/* Sync whole allocation to device. This will invalidate old
37843caa61c2SJose Abreu 		 * data.
37853caa61c2SJose Abreu 		 */
37863caa61c2SJose Abreu 		dma_sync_single_for_device(priv->device, buf->addr, len,
37873caa61c2SJose Abreu 					   DMA_FROM_DEVICE);
37883caa61c2SJose Abreu 
37892af6106aSJose Abreu 		stmmac_set_desc_addr(priv, p, buf->addr);
3790396e13e1SJoakim Zhang 		if (priv->sph)
3791396e13e1SJoakim Zhang 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
3792396e13e1SJoakim Zhang 		else
3793396e13e1SJoakim Zhang 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
37942c520b1cSJose Abreu 		stmmac_refill_desc3(priv, rx_q, p);
3795286a8372SGiuseppe CAVALLARO 
3796d429b66eSJose Abreu 		rx_q->rx_count_frames++;
37976fa9d691SJose Abreu 		rx_q->rx_count_frames += priv->rx_coal_frames;
37986fa9d691SJose Abreu 		if (rx_q->rx_count_frames > priv->rx_coal_frames)
37996fa9d691SJose Abreu 			rx_q->rx_count_frames = 0;
380009146abeSJose Abreu 
380109146abeSJose Abreu 		use_rx_wd = !priv->rx_coal_frames;
380209146abeSJose Abreu 		use_rx_wd |= rx_q->rx_count_frames > 0;
380309146abeSJose Abreu 		if (!priv->use_riwt)
380409146abeSJose Abreu 			use_rx_wd = false;
3805d429b66eSJose Abreu 
3806ad688cdbSPavel Machek 		dma_wmb();
38072af6106aSJose Abreu 		stmmac_set_rx_owner(priv, p, use_rx_wd);
3808e3ad57c9SGiuseppe Cavallaro 
3809aa042f60SSong, Yoong Siang 		entry = STMMAC_GET_ENTRY(entry, priv->dma_rx_size);
38107ac6653aSJeff Kirsher 	}
381154139cf3SJoao Pinto 	rx_q->dirty_rx = entry;
3812858a31ffSJose Abreu 	rx_q->rx_tail_addr = rx_q->dma_rx_phy +
3813858a31ffSJose Abreu 			    (rx_q->dirty_rx * sizeof(struct dma_desc));
38144523a561SBiao Huang 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
38157ac6653aSJeff Kirsher }
38167ac6653aSJeff Kirsher 
381788ebe2cfSJose Abreu static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
381888ebe2cfSJose Abreu 				       struct dma_desc *p,
381988ebe2cfSJose Abreu 				       int status, unsigned int len)
382088ebe2cfSJose Abreu {
382188ebe2cfSJose Abreu 	unsigned int plen = 0, hlen = 0;
382231f2760eSLuo Jiaxing 	int coe = priv->hw->rx_csum;
382388ebe2cfSJose Abreu 
382488ebe2cfSJose Abreu 	/* Not first descriptor, buffer is always zero */
382588ebe2cfSJose Abreu 	if (priv->sph && len)
382688ebe2cfSJose Abreu 		return 0;
382788ebe2cfSJose Abreu 
382888ebe2cfSJose Abreu 	/* First descriptor, get split header length */
382931f2760eSLuo Jiaxing 	stmmac_get_rx_header_len(priv, p, &hlen);
383088ebe2cfSJose Abreu 	if (priv->sph && hlen) {
383188ebe2cfSJose Abreu 		priv->xstats.rx_split_hdr_pkt_n++;
383288ebe2cfSJose Abreu 		return hlen;
383388ebe2cfSJose Abreu 	}
383488ebe2cfSJose Abreu 
383588ebe2cfSJose Abreu 	/* First descriptor, not last descriptor and not split header */
383688ebe2cfSJose Abreu 	if (status & rx_not_ls)
383788ebe2cfSJose Abreu 		return priv->dma_buf_sz;
383888ebe2cfSJose Abreu 
383988ebe2cfSJose Abreu 	plen = stmmac_get_rx_frame_len(priv, p, coe);
384088ebe2cfSJose Abreu 
384188ebe2cfSJose Abreu 	/* First descriptor and last descriptor and not split header */
384288ebe2cfSJose Abreu 	return min_t(unsigned int, priv->dma_buf_sz, plen);
384388ebe2cfSJose Abreu }
384488ebe2cfSJose Abreu 
384588ebe2cfSJose Abreu static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
384688ebe2cfSJose Abreu 				       struct dma_desc *p,
384788ebe2cfSJose Abreu 				       int status, unsigned int len)
384888ebe2cfSJose Abreu {
384988ebe2cfSJose Abreu 	int coe = priv->hw->rx_csum;
385088ebe2cfSJose Abreu 	unsigned int plen = 0;
385188ebe2cfSJose Abreu 
385288ebe2cfSJose Abreu 	/* Not split header, buffer is not available */
385388ebe2cfSJose Abreu 	if (!priv->sph)
385488ebe2cfSJose Abreu 		return 0;
385588ebe2cfSJose Abreu 
385688ebe2cfSJose Abreu 	/* Not last descriptor */
385788ebe2cfSJose Abreu 	if (status & rx_not_ls)
385888ebe2cfSJose Abreu 		return priv->dma_buf_sz;
385988ebe2cfSJose Abreu 
386088ebe2cfSJose Abreu 	plen = stmmac_get_rx_frame_len(priv, p, coe);
386188ebe2cfSJose Abreu 
386288ebe2cfSJose Abreu 	/* Last descriptor */
386388ebe2cfSJose Abreu 	return plen - len;
386488ebe2cfSJose Abreu }
386588ebe2cfSJose Abreu 
386632ceabcaSGiuseppe CAVALLARO /**
3867732fdf0eSGiuseppe CAVALLARO  * stmmac_rx - manage the receive process
386832ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
386954139cf3SJoao Pinto  * @limit: napi bugget
387054139cf3SJoao Pinto  * @queue: RX queue index.
387132ceabcaSGiuseppe CAVALLARO  * Description :  this the function called by the napi poll method.
387232ceabcaSGiuseppe CAVALLARO  * It gets all the frames inside the ring.
387332ceabcaSGiuseppe CAVALLARO  */
387454139cf3SJoao Pinto static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
38757ac6653aSJeff Kirsher {
387654139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
38778fce3331SJose Abreu 	struct stmmac_channel *ch = &priv->channel[queue];
3878ec222003SJose Abreu 	unsigned int count = 0, error = 0, len = 0;
3879ec222003SJose Abreu 	int status = 0, coe = priv->hw->rx_csum;
388007b39753SAaro Koskinen 	unsigned int next_entry = rx_q->cur_rx;
3881bfaf91caSJoakim Zhang 	unsigned int desc_size;
3882ec222003SJose Abreu 	struct sk_buff *skb = NULL;
38837ac6653aSJeff Kirsher 
388483d7af64SGiuseppe CAVALLARO 	if (netif_msg_rx_status(priv)) {
3885d0225e7dSAlexandre TORGUE 		void *rx_head;
3886d0225e7dSAlexandre TORGUE 
388738ddc59dSLABBE Corentin 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
3888bfaf91caSJoakim Zhang 		if (priv->extend_desc) {
388954139cf3SJoao Pinto 			rx_head = (void *)rx_q->dma_erx;
3890bfaf91caSJoakim Zhang 			desc_size = sizeof(struct dma_extended_desc);
3891bfaf91caSJoakim Zhang 		} else {
389254139cf3SJoao Pinto 			rx_head = (void *)rx_q->dma_rx;
3893bfaf91caSJoakim Zhang 			desc_size = sizeof(struct dma_desc);
3894bfaf91caSJoakim Zhang 		}
3895d0225e7dSAlexandre TORGUE 
3896bfaf91caSJoakim Zhang 		stmmac_display_ring(priv, rx_head, priv->dma_rx_size, true,
3897bfaf91caSJoakim Zhang 				    rx_q->dma_rx_phy, desc_size);
38987ac6653aSJeff Kirsher 	}
3899c24602efSGiuseppe CAVALLARO 	while (count < limit) {
390088ebe2cfSJose Abreu 		unsigned int buf1_len = 0, buf2_len = 0;
3901ec222003SJose Abreu 		enum pkt_hash_types hash_type;
39022af6106aSJose Abreu 		struct stmmac_rx_buffer *buf;
39032af6106aSJose Abreu 		struct dma_desc *np, *p;
3904ec222003SJose Abreu 		int entry;
3905ec222003SJose Abreu 		u32 hash;
39067ac6653aSJeff Kirsher 
3907ec222003SJose Abreu 		if (!count && rx_q->state_saved) {
3908ec222003SJose Abreu 			skb = rx_q->state.skb;
3909ec222003SJose Abreu 			error = rx_q->state.error;
3910ec222003SJose Abreu 			len = rx_q->state.len;
3911ec222003SJose Abreu 		} else {
3912ec222003SJose Abreu 			rx_q->state_saved = false;
3913ec222003SJose Abreu 			skb = NULL;
3914ec222003SJose Abreu 			error = 0;
3915ec222003SJose Abreu 			len = 0;
3916ec222003SJose Abreu 		}
3917ec222003SJose Abreu 
3918ec222003SJose Abreu 		if (count >= limit)
3919ec222003SJose Abreu 			break;
3920ec222003SJose Abreu 
3921ec222003SJose Abreu read_again:
392288ebe2cfSJose Abreu 		buf1_len = 0;
392388ebe2cfSJose Abreu 		buf2_len = 0;
392407b39753SAaro Koskinen 		entry = next_entry;
39252af6106aSJose Abreu 		buf = &rx_q->buf_pool[entry];
392607b39753SAaro Koskinen 
3927c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
392854139cf3SJoao Pinto 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
3929c24602efSGiuseppe CAVALLARO 		else
393054139cf3SJoao Pinto 			p = rx_q->dma_rx + entry;
3931c24602efSGiuseppe CAVALLARO 
3932c1fa3212SFabrice Gasnier 		/* read the status of the incoming frame */
393342de047dSJose Abreu 		status = stmmac_rx_status(priv, &priv->dev->stats,
3934c1fa3212SFabrice Gasnier 				&priv->xstats, p);
3935c1fa3212SFabrice Gasnier 		/* check if managed by the DMA otherwise go ahead */
3936c1fa3212SFabrice Gasnier 		if (unlikely(status & dma_own))
39377ac6653aSJeff Kirsher 			break;
39387ac6653aSJeff Kirsher 
3939aa042f60SSong, Yoong Siang 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
3940aa042f60SSong, Yoong Siang 						priv->dma_rx_size);
394154139cf3SJoao Pinto 		next_entry = rx_q->cur_rx;
3942e3ad57c9SGiuseppe Cavallaro 
3943c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
394454139cf3SJoao Pinto 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
3945c24602efSGiuseppe CAVALLARO 		else
394654139cf3SJoao Pinto 			np = rx_q->dma_rx + next_entry;
3947ba1ffd74SGiuseppe CAVALLARO 
3948ba1ffd74SGiuseppe CAVALLARO 		prefetch(np);
39497ac6653aSJeff Kirsher 
395042de047dSJose Abreu 		if (priv->extend_desc)
395142de047dSJose Abreu 			stmmac_rx_extended_status(priv, &priv->dev->stats,
395242de047dSJose Abreu 					&priv->xstats, rx_q->dma_erx + entry);
3953891434b1SRayagond Kokatanur 		if (unlikely(status == discard_frame)) {
39542af6106aSJose Abreu 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
39552af6106aSJose Abreu 			buf->page = NULL;
3956ec222003SJose Abreu 			error = 1;
39570b273ca4SJose Abreu 			if (!priv->hwts_rx_en)
39580b273ca4SJose Abreu 				priv->dev->stats.rx_errors++;
3959ec222003SJose Abreu 		}
3960f748be53SAlexandre TORGUE 
3961ec222003SJose Abreu 		if (unlikely(error && (status & rx_not_ls)))
3962ec222003SJose Abreu 			goto read_again;
3963ec222003SJose Abreu 		if (unlikely(error)) {
3964ec222003SJose Abreu 			dev_kfree_skb(skb);
396588ebe2cfSJose Abreu 			skb = NULL;
3966cda4985aSJose Abreu 			count++;
396707b39753SAaro Koskinen 			continue;
3968e527c4a7SGiuseppe CAVALLARO 		}
3969e527c4a7SGiuseppe CAVALLARO 
3970ec222003SJose Abreu 		/* Buffer is good. Go on. */
3971ec222003SJose Abreu 
397288ebe2cfSJose Abreu 		prefetch(page_address(buf->page));
397388ebe2cfSJose Abreu 		if (buf->sec_page)
397488ebe2cfSJose Abreu 			prefetch(page_address(buf->sec_page));
397588ebe2cfSJose Abreu 
397688ebe2cfSJose Abreu 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
397788ebe2cfSJose Abreu 		len += buf1_len;
397888ebe2cfSJose Abreu 		buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
397988ebe2cfSJose Abreu 		len += buf2_len;
3980ec222003SJose Abreu 
39817ac6653aSJeff Kirsher 		/* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
3982ceb69499SGiuseppe CAVALLARO 		 * Type frames (LLC/LLC-SNAP)
3983565020aaSJose Abreu 		 *
3984565020aaSJose Abreu 		 * llc_snap is never checked in GMAC >= 4, so this ACS
3985565020aaSJose Abreu 		 * feature is always disabled and packets need to be
3986565020aaSJose Abreu 		 * stripped manually.
3987ceb69499SGiuseppe CAVALLARO 		 */
398893b5dce4SJose Abreu 		if (likely(!(status & rx_not_ls)) &&
398993b5dce4SJose Abreu 		    (likely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
399093b5dce4SJose Abreu 		     unlikely(status != llc_snap))) {
399188ebe2cfSJose Abreu 			if (buf2_len)
399288ebe2cfSJose Abreu 				buf2_len -= ETH_FCS_LEN;
399388ebe2cfSJose Abreu 			else
399488ebe2cfSJose Abreu 				buf1_len -= ETH_FCS_LEN;
399588ebe2cfSJose Abreu 
3996ec222003SJose Abreu 			len -= ETH_FCS_LEN;
399783d7af64SGiuseppe CAVALLARO 		}
399822ad3838SGiuseppe Cavallaro 
3999ec222003SJose Abreu 		if (!skb) {
400088ebe2cfSJose Abreu 			skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
4001ec222003SJose Abreu 			if (!skb) {
400222ad3838SGiuseppe Cavallaro 				priv->dev->stats.rx_dropped++;
4003cda4985aSJose Abreu 				count++;
400488ebe2cfSJose Abreu 				goto drain_data;
400522ad3838SGiuseppe Cavallaro 			}
400622ad3838SGiuseppe Cavallaro 
400788ebe2cfSJose Abreu 			dma_sync_single_for_cpu(priv->device, buf->addr,
400888ebe2cfSJose Abreu 						buf1_len, DMA_FROM_DEVICE);
40092af6106aSJose Abreu 			skb_copy_to_linear_data(skb, page_address(buf->page),
401088ebe2cfSJose Abreu 						buf1_len);
401188ebe2cfSJose Abreu 			skb_put(skb, buf1_len);
401222ad3838SGiuseppe Cavallaro 
4013ec222003SJose Abreu 			/* Data payload copied into SKB, page ready for recycle */
4014ec222003SJose Abreu 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
4015ec222003SJose Abreu 			buf->page = NULL;
401688ebe2cfSJose Abreu 		} else if (buf1_len) {
4017ec222003SJose Abreu 			dma_sync_single_for_cpu(priv->device, buf->addr,
401888ebe2cfSJose Abreu 						buf1_len, DMA_FROM_DEVICE);
4019ec222003SJose Abreu 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
402088ebe2cfSJose Abreu 					buf->page, 0, buf1_len,
4021ec222003SJose Abreu 					priv->dma_buf_sz);
4022ec222003SJose Abreu 
4023ec222003SJose Abreu 			/* Data payload appended into SKB */
4024ec222003SJose Abreu 			page_pool_release_page(rx_q->page_pool, buf->page);
4025ec222003SJose Abreu 			buf->page = NULL;
40267ac6653aSJeff Kirsher 		}
402783d7af64SGiuseppe CAVALLARO 
402888ebe2cfSJose Abreu 		if (buf2_len) {
402967afd6d1SJose Abreu 			dma_sync_single_for_cpu(priv->device, buf->sec_addr,
403088ebe2cfSJose Abreu 						buf2_len, DMA_FROM_DEVICE);
403167afd6d1SJose Abreu 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
403288ebe2cfSJose Abreu 					buf->sec_page, 0, buf2_len,
403367afd6d1SJose Abreu 					priv->dma_buf_sz);
403467afd6d1SJose Abreu 
403567afd6d1SJose Abreu 			/* Data payload appended into SKB */
403667afd6d1SJose Abreu 			page_pool_release_page(rx_q->page_pool, buf->sec_page);
403767afd6d1SJose Abreu 			buf->sec_page = NULL;
403867afd6d1SJose Abreu 		}
403967afd6d1SJose Abreu 
404088ebe2cfSJose Abreu drain_data:
4041ec222003SJose Abreu 		if (likely(status & rx_not_ls))
4042ec222003SJose Abreu 			goto read_again;
404388ebe2cfSJose Abreu 		if (!skb)
404488ebe2cfSJose Abreu 			continue;
4045ec222003SJose Abreu 
4046ec222003SJose Abreu 		/* Got entire packet into SKB. Finish it. */
4047ec222003SJose Abreu 
4048ba1ffd74SGiuseppe CAVALLARO 		stmmac_get_rx_hwtstamp(priv, p, np, skb);
4049b9381985SVince Bridgers 		stmmac_rx_vlan(priv->dev, skb);
40507ac6653aSJeff Kirsher 		skb->protocol = eth_type_trans(skb, priv->dev);
40517ac6653aSJeff Kirsher 
4052ceb69499SGiuseppe CAVALLARO 		if (unlikely(!coe))
40537ac6653aSJeff Kirsher 			skb_checksum_none_assert(skb);
405462a2ab93SGiuseppe CAVALLARO 		else
40557ac6653aSJeff Kirsher 			skb->ip_summed = CHECKSUM_UNNECESSARY;
405662a2ab93SGiuseppe CAVALLARO 
405776067459SJose Abreu 		if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
405876067459SJose Abreu 			skb_set_hash(skb, hash, hash_type);
405976067459SJose Abreu 
406076067459SJose Abreu 		skb_record_rx_queue(skb, queue);
40614ccb4585SJose Abreu 		napi_gro_receive(&ch->rx_napi, skb);
406288ebe2cfSJose Abreu 		skb = NULL;
40637ac6653aSJeff Kirsher 
40647ac6653aSJeff Kirsher 		priv->dev->stats.rx_packets++;
4065ec222003SJose Abreu 		priv->dev->stats.rx_bytes += len;
4066cda4985aSJose Abreu 		count++;
40677ac6653aSJeff Kirsher 	}
4068ec222003SJose Abreu 
406988ebe2cfSJose Abreu 	if (status & rx_not_ls || skb) {
4070ec222003SJose Abreu 		rx_q->state_saved = true;
4071ec222003SJose Abreu 		rx_q->state.skb = skb;
4072ec222003SJose Abreu 		rx_q->state.error = error;
4073ec222003SJose Abreu 		rx_q->state.len = len;
40747ac6653aSJeff Kirsher 	}
40757ac6653aSJeff Kirsher 
407654139cf3SJoao Pinto 	stmmac_rx_refill(priv, queue);
40777ac6653aSJeff Kirsher 
40787ac6653aSJeff Kirsher 	priv->xstats.rx_pkt_n += count;
40797ac6653aSJeff Kirsher 
40807ac6653aSJeff Kirsher 	return count;
40817ac6653aSJeff Kirsher }
40827ac6653aSJeff Kirsher 
40834ccb4585SJose Abreu static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
40847ac6653aSJeff Kirsher {
40858fce3331SJose Abreu 	struct stmmac_channel *ch =
40864ccb4585SJose Abreu 		container_of(napi, struct stmmac_channel, rx_napi);
40878fce3331SJose Abreu 	struct stmmac_priv *priv = ch->priv_data;
40888fce3331SJose Abreu 	u32 chan = ch->index;
40894ccb4585SJose Abreu 	int work_done;
40907ac6653aSJeff Kirsher 
40919125cdd1SGiuseppe CAVALLARO 	priv->xstats.napi_poll++;
4092ce736788SJoao Pinto 
40934ccb4585SJose Abreu 	work_done = stmmac_rx(priv, budget, chan);
4094021bd5e3SJose Abreu 	if (work_done < budget && napi_complete_done(napi, work_done)) {
4095021bd5e3SJose Abreu 		unsigned long flags;
4096021bd5e3SJose Abreu 
4097021bd5e3SJose Abreu 		spin_lock_irqsave(&ch->lock, flags);
4098021bd5e3SJose Abreu 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
4099021bd5e3SJose Abreu 		spin_unlock_irqrestore(&ch->lock, flags);
4100021bd5e3SJose Abreu 	}
4101021bd5e3SJose Abreu 
41024ccb4585SJose Abreu 	return work_done;
41034ccb4585SJose Abreu }
4104ce736788SJoao Pinto 
41054ccb4585SJose Abreu static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
41064ccb4585SJose Abreu {
41074ccb4585SJose Abreu 	struct stmmac_channel *ch =
41084ccb4585SJose Abreu 		container_of(napi, struct stmmac_channel, tx_napi);
41094ccb4585SJose Abreu 	struct stmmac_priv *priv = ch->priv_data;
41104ccb4585SJose Abreu 	u32 chan = ch->index;
41114ccb4585SJose Abreu 	int work_done;
41124ccb4585SJose Abreu 
41134ccb4585SJose Abreu 	priv->xstats.napi_poll++;
41144ccb4585SJose Abreu 
4115aa042f60SSong, Yoong Siang 	work_done = stmmac_tx_clean(priv, priv->dma_tx_size, chan);
4116fa0be0a4SJose Abreu 	work_done = min(work_done, budget);
41178fce3331SJose Abreu 
4118021bd5e3SJose Abreu 	if (work_done < budget && napi_complete_done(napi, work_done)) {
4119021bd5e3SJose Abreu 		unsigned long flags;
41204ccb4585SJose Abreu 
4121021bd5e3SJose Abreu 		spin_lock_irqsave(&ch->lock, flags);
4122021bd5e3SJose Abreu 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
4123021bd5e3SJose Abreu 		spin_unlock_irqrestore(&ch->lock, flags);
4124fa0be0a4SJose Abreu 	}
41258fce3331SJose Abreu 
41267ac6653aSJeff Kirsher 	return work_done;
41277ac6653aSJeff Kirsher }
41287ac6653aSJeff Kirsher 
41297ac6653aSJeff Kirsher /**
41307ac6653aSJeff Kirsher  *  stmmac_tx_timeout
41317ac6653aSJeff Kirsher  *  @dev : Pointer to net device structure
4132d0ea5cbdSJesse Brandeburg  *  @txqueue: the index of the hanging transmit queue
41337ac6653aSJeff Kirsher  *  Description: this function is called when a packet transmission fails to
41347284a3f1SGiuseppe CAVALLARO  *   complete within a reasonable time. The driver will mark the error in the
41357ac6653aSJeff Kirsher  *   netdev structure and arrange for the device to be reset to a sane state
41367ac6653aSJeff Kirsher  *   in order to transmit a new packet.
41377ac6653aSJeff Kirsher  */
41380290bd29SMichael S. Tsirkin static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
41397ac6653aSJeff Kirsher {
41407ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
41417ac6653aSJeff Kirsher 
414234877a15SJose Abreu 	stmmac_global_err(priv);
41437ac6653aSJeff Kirsher }
41447ac6653aSJeff Kirsher 
41457ac6653aSJeff Kirsher /**
414601789349SJiri Pirko  *  stmmac_set_rx_mode - entry point for multicast addressing
41477ac6653aSJeff Kirsher  *  @dev : pointer to the device structure
41487ac6653aSJeff Kirsher  *  Description:
41497ac6653aSJeff Kirsher  *  This function is a driver entry point which gets called by the kernel
41507ac6653aSJeff Kirsher  *  whenever multicast addresses must be enabled/disabled.
41517ac6653aSJeff Kirsher  *  Return value:
41527ac6653aSJeff Kirsher  *  void.
41537ac6653aSJeff Kirsher  */
415401789349SJiri Pirko static void stmmac_set_rx_mode(struct net_device *dev)
41557ac6653aSJeff Kirsher {
41567ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
41577ac6653aSJeff Kirsher 
4158c10d4c82SJose Abreu 	stmmac_set_filter(priv, priv->hw, dev);
41597ac6653aSJeff Kirsher }
41607ac6653aSJeff Kirsher 
41617ac6653aSJeff Kirsher /**
41627ac6653aSJeff Kirsher  *  stmmac_change_mtu - entry point to change MTU size for the device.
41637ac6653aSJeff Kirsher  *  @dev : device pointer.
41647ac6653aSJeff Kirsher  *  @new_mtu : the new MTU size for the device.
41657ac6653aSJeff Kirsher  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
41667ac6653aSJeff Kirsher  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
41677ac6653aSJeff Kirsher  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
41687ac6653aSJeff Kirsher  *  Return value:
41697ac6653aSJeff Kirsher  *  0 on success and an appropriate (-)ve integer as defined in errno.h
41707ac6653aSJeff Kirsher  *  file on failure.
41717ac6653aSJeff Kirsher  */
41727ac6653aSJeff Kirsher static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
41737ac6653aSJeff Kirsher {
417438ddc59dSLABBE Corentin 	struct stmmac_priv *priv = netdev_priv(dev);
4175eaf4fac4SJose Abreu 	int txfifosz = priv->plat->tx_fifo_size;
41765b55299eSDavid Wu 	const int mtu = new_mtu;
4177eaf4fac4SJose Abreu 
4178eaf4fac4SJose Abreu 	if (txfifosz == 0)
4179eaf4fac4SJose Abreu 		txfifosz = priv->dma_cap.tx_fifo_size;
4180eaf4fac4SJose Abreu 
4181eaf4fac4SJose Abreu 	txfifosz /= priv->plat->tx_queues_to_use;
418238ddc59dSLABBE Corentin 
41837ac6653aSJeff Kirsher 	if (netif_running(dev)) {
418438ddc59dSLABBE Corentin 		netdev_err(priv->dev, "must be stopped to change its MTU\n");
41857ac6653aSJeff Kirsher 		return -EBUSY;
41867ac6653aSJeff Kirsher 	}
41877ac6653aSJeff Kirsher 
4188eaf4fac4SJose Abreu 	new_mtu = STMMAC_ALIGN(new_mtu);
4189eaf4fac4SJose Abreu 
4190eaf4fac4SJose Abreu 	/* If condition true, FIFO is too small or MTU too large */
4191eaf4fac4SJose Abreu 	if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
4192eaf4fac4SJose Abreu 		return -EINVAL;
4193eaf4fac4SJose Abreu 
41945b55299eSDavid Wu 	dev->mtu = mtu;
4195f748be53SAlexandre TORGUE 
41967ac6653aSJeff Kirsher 	netdev_update_features(dev);
41977ac6653aSJeff Kirsher 
41987ac6653aSJeff Kirsher 	return 0;
41997ac6653aSJeff Kirsher }
42007ac6653aSJeff Kirsher 
4201c8f44affSMichał Mirosław static netdev_features_t stmmac_fix_features(struct net_device *dev,
4202c8f44affSMichał Mirosław 					     netdev_features_t features)
42037ac6653aSJeff Kirsher {
42047ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
42057ac6653aSJeff Kirsher 
420638912bdbSDeepak SIKRI 	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
42077ac6653aSJeff Kirsher 		features &= ~NETIF_F_RXCSUM;
4208d2afb5bdSGiuseppe CAVALLARO 
42097ac6653aSJeff Kirsher 	if (!priv->plat->tx_coe)
4210a188222bSTom Herbert 		features &= ~NETIF_F_CSUM_MASK;
42117ac6653aSJeff Kirsher 
42127ac6653aSJeff Kirsher 	/* Some GMAC devices have a bugged Jumbo frame support that
42137ac6653aSJeff Kirsher 	 * needs to have the Tx COE disabled for oversized frames
42147ac6653aSJeff Kirsher 	 * (due to limited buffer sizes). In this case we disable
4215ceb69499SGiuseppe CAVALLARO 	 * the TX csum insertion in the TDES and not use SF.
4216ceb69499SGiuseppe CAVALLARO 	 */
42177ac6653aSJeff Kirsher 	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
4218a188222bSTom Herbert 		features &= ~NETIF_F_CSUM_MASK;
42197ac6653aSJeff Kirsher 
4220f748be53SAlexandre TORGUE 	/* Disable tso if asked by ethtool */
4221f748be53SAlexandre TORGUE 	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
4222f748be53SAlexandre TORGUE 		if (features & NETIF_F_TSO)
4223f748be53SAlexandre TORGUE 			priv->tso = true;
4224f748be53SAlexandre TORGUE 		else
4225f748be53SAlexandre TORGUE 			priv->tso = false;
4226f748be53SAlexandre TORGUE 	}
4227f748be53SAlexandre TORGUE 
42287ac6653aSJeff Kirsher 	return features;
42297ac6653aSJeff Kirsher }
42307ac6653aSJeff Kirsher 
4231d2afb5bdSGiuseppe CAVALLARO static int stmmac_set_features(struct net_device *netdev,
4232d2afb5bdSGiuseppe CAVALLARO 			       netdev_features_t features)
4233d2afb5bdSGiuseppe CAVALLARO {
4234d2afb5bdSGiuseppe CAVALLARO 	struct stmmac_priv *priv = netdev_priv(netdev);
423567afd6d1SJose Abreu 	bool sph_en;
423667afd6d1SJose Abreu 	u32 chan;
4237d2afb5bdSGiuseppe CAVALLARO 
4238d2afb5bdSGiuseppe CAVALLARO 	/* Keep the COE Type in case of csum is supporting */
4239d2afb5bdSGiuseppe CAVALLARO 	if (features & NETIF_F_RXCSUM)
4240d2afb5bdSGiuseppe CAVALLARO 		priv->hw->rx_csum = priv->plat->rx_coe;
4241d2afb5bdSGiuseppe CAVALLARO 	else
4242d2afb5bdSGiuseppe CAVALLARO 		priv->hw->rx_csum = 0;
4243d2afb5bdSGiuseppe CAVALLARO 	/* No check needed because rx_coe has been set before and it will be
4244d2afb5bdSGiuseppe CAVALLARO 	 * fixed in case of issue.
4245d2afb5bdSGiuseppe CAVALLARO 	 */
4246c10d4c82SJose Abreu 	stmmac_rx_ipc(priv, priv->hw);
4247d2afb5bdSGiuseppe CAVALLARO 
424867afd6d1SJose Abreu 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
424967afd6d1SJose Abreu 	for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
425067afd6d1SJose Abreu 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
425167afd6d1SJose Abreu 
4252d2afb5bdSGiuseppe CAVALLARO 	return 0;
4253d2afb5bdSGiuseppe CAVALLARO }
4254d2afb5bdSGiuseppe CAVALLARO 
425532ceabcaSGiuseppe CAVALLARO /**
425632ceabcaSGiuseppe CAVALLARO  *  stmmac_interrupt - main ISR
425732ceabcaSGiuseppe CAVALLARO  *  @irq: interrupt number.
4258f42234ffSMaxim Petrov  *  @dev_id: to pass the net device pointer (must be valid).
425932ceabcaSGiuseppe CAVALLARO  *  Description: this is the main driver interrupt service routine.
4260732fdf0eSGiuseppe CAVALLARO  *  It can call:
4261732fdf0eSGiuseppe CAVALLARO  *  o DMA service routine (to manage incoming frame reception and transmission
4262732fdf0eSGiuseppe CAVALLARO  *    status)
4263732fdf0eSGiuseppe CAVALLARO  *  o Core interrupts to manage: remote wake-up, management counter, LPI
426432ceabcaSGiuseppe CAVALLARO  *    interrupts.
426532ceabcaSGiuseppe CAVALLARO  */
42667ac6653aSJeff Kirsher static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
42677ac6653aSJeff Kirsher {
42687ac6653aSJeff Kirsher 	struct net_device *dev = (struct net_device *)dev_id;
42697ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
42707bac4e1eSJoao Pinto 	u32 rx_cnt = priv->plat->rx_queues_to_use;
42717bac4e1eSJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
42727bac4e1eSJoao Pinto 	u32 queues_count;
42737bac4e1eSJoao Pinto 	u32 queue;
42747d9e6c5aSJose Abreu 	bool xmac;
42757bac4e1eSJoao Pinto 
42767d9e6c5aSJose Abreu 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
42777bac4e1eSJoao Pinto 	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
42787ac6653aSJeff Kirsher 
427989f7f2cfSSrinivas Kandagatla 	if (priv->irq_wake)
428089f7f2cfSSrinivas Kandagatla 		pm_wakeup_event(priv->device, 0);
428189f7f2cfSSrinivas Kandagatla 
428234877a15SJose Abreu 	/* Check if adapter is up */
428334877a15SJose Abreu 	if (test_bit(STMMAC_DOWN, &priv->state))
428434877a15SJose Abreu 		return IRQ_HANDLED;
42858bf993a5SJose Abreu 	/* Check if a fatal error happened */
42868bf993a5SJose Abreu 	if (stmmac_safety_feat_interrupt(priv))
42878bf993a5SJose Abreu 		return IRQ_HANDLED;
428834877a15SJose Abreu 
42897ac6653aSJeff Kirsher 	/* To handle GMAC own interrupts */
42907d9e6c5aSJose Abreu 	if ((priv->plat->has_gmac) || xmac) {
4291c10d4c82SJose Abreu 		int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
429261fac60aSJose Abreu 		int mtl_status;
42938f71a88dSJoao Pinto 
4294d765955dSGiuseppe CAVALLARO 		if (unlikely(status)) {
4295d765955dSGiuseppe CAVALLARO 			/* For LPI we need to save the tx status */
42960982a0f6SGiuseppe CAVALLARO 			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
4297d765955dSGiuseppe CAVALLARO 				priv->tx_path_in_lpi_mode = true;
42980982a0f6SGiuseppe CAVALLARO 			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
4299d765955dSGiuseppe CAVALLARO 				priv->tx_path_in_lpi_mode = false;
43007bac4e1eSJoao Pinto 		}
43017bac4e1eSJoao Pinto 
43027bac4e1eSJoao Pinto 		for (queue = 0; queue < queues_count; queue++) {
430361fac60aSJose Abreu 			struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
430454139cf3SJoao Pinto 
430561fac60aSJose Abreu 			mtl_status = stmmac_host_mtl_irq_status(priv, priv->hw,
430661fac60aSJose Abreu 								queue);
430761fac60aSJose Abreu 			if (mtl_status != -EINVAL)
430861fac60aSJose Abreu 				status |= mtl_status;
43097bac4e1eSJoao Pinto 
4310a4e887faSJose Abreu 			if (status & CORE_IRQ_MTL_RX_OVERFLOW)
431161fac60aSJose Abreu 				stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
431254139cf3SJoao Pinto 						       rx_q->rx_tail_addr,
43137bac4e1eSJoao Pinto 						       queue);
43147bac4e1eSJoao Pinto 		}
431570523e63SGiuseppe CAVALLARO 
431670523e63SGiuseppe CAVALLARO 		/* PCS link status */
43173fe5cadbSGiuseppe CAVALLARO 		if (priv->hw->pcs) {
431870523e63SGiuseppe CAVALLARO 			if (priv->xstats.pcs_link)
431970523e63SGiuseppe CAVALLARO 				netif_carrier_on(dev);
432070523e63SGiuseppe CAVALLARO 			else
432170523e63SGiuseppe CAVALLARO 				netif_carrier_off(dev);
432270523e63SGiuseppe CAVALLARO 		}
4323d765955dSGiuseppe CAVALLARO 	}
4324d765955dSGiuseppe CAVALLARO 
4325d765955dSGiuseppe CAVALLARO 	/* To handle DMA interrupts */
43267ac6653aSJeff Kirsher 	stmmac_dma_interrupt(priv);
43277ac6653aSJeff Kirsher 
43287ac6653aSJeff Kirsher 	return IRQ_HANDLED;
43297ac6653aSJeff Kirsher }
43307ac6653aSJeff Kirsher 
43317ac6653aSJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER
43327ac6653aSJeff Kirsher /* Polling receive - used by NETCONSOLE and other diagnostic tools
4333ceb69499SGiuseppe CAVALLARO  * to allow network I/O with interrupts disabled.
4334ceb69499SGiuseppe CAVALLARO  */
43357ac6653aSJeff Kirsher static void stmmac_poll_controller(struct net_device *dev)
43367ac6653aSJeff Kirsher {
43377ac6653aSJeff Kirsher 	disable_irq(dev->irq);
43387ac6653aSJeff Kirsher 	stmmac_interrupt(dev->irq, dev);
43397ac6653aSJeff Kirsher 	enable_irq(dev->irq);
43407ac6653aSJeff Kirsher }
43417ac6653aSJeff Kirsher #endif
43427ac6653aSJeff Kirsher 
43437ac6653aSJeff Kirsher /**
43447ac6653aSJeff Kirsher  *  stmmac_ioctl - Entry point for the Ioctl
43457ac6653aSJeff Kirsher  *  @dev: Device pointer.
43467ac6653aSJeff Kirsher  *  @rq: An IOCTL specefic structure, that can contain a pointer to
43477ac6653aSJeff Kirsher  *  a proprietary structure used to pass information to the driver.
43487ac6653aSJeff Kirsher  *  @cmd: IOCTL command
43497ac6653aSJeff Kirsher  *  Description:
435032ceabcaSGiuseppe CAVALLARO  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
43517ac6653aSJeff Kirsher  */
43527ac6653aSJeff Kirsher static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
43537ac6653aSJeff Kirsher {
435474371272SJose Abreu 	struct stmmac_priv *priv = netdev_priv (dev);
4355891434b1SRayagond Kokatanur 	int ret = -EOPNOTSUPP;
43567ac6653aSJeff Kirsher 
43577ac6653aSJeff Kirsher 	if (!netif_running(dev))
43587ac6653aSJeff Kirsher 		return -EINVAL;
43597ac6653aSJeff Kirsher 
4360891434b1SRayagond Kokatanur 	switch (cmd) {
4361891434b1SRayagond Kokatanur 	case SIOCGMIIPHY:
4362891434b1SRayagond Kokatanur 	case SIOCGMIIREG:
4363891434b1SRayagond Kokatanur 	case SIOCSMIIREG:
436474371272SJose Abreu 		ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
4365891434b1SRayagond Kokatanur 		break;
4366891434b1SRayagond Kokatanur 	case SIOCSHWTSTAMP:
4367d6228b7cSArtem Panfilov 		ret = stmmac_hwtstamp_set(dev, rq);
4368d6228b7cSArtem Panfilov 		break;
4369d6228b7cSArtem Panfilov 	case SIOCGHWTSTAMP:
4370d6228b7cSArtem Panfilov 		ret = stmmac_hwtstamp_get(dev, rq);
4371891434b1SRayagond Kokatanur 		break;
4372891434b1SRayagond Kokatanur 	default:
4373891434b1SRayagond Kokatanur 		break;
4374891434b1SRayagond Kokatanur 	}
43757ac6653aSJeff Kirsher 
43767ac6653aSJeff Kirsher 	return ret;
43777ac6653aSJeff Kirsher }
43787ac6653aSJeff Kirsher 
43794dbbe8ddSJose Abreu static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
43804dbbe8ddSJose Abreu 				    void *cb_priv)
43814dbbe8ddSJose Abreu {
43824dbbe8ddSJose Abreu 	struct stmmac_priv *priv = cb_priv;
43834dbbe8ddSJose Abreu 	int ret = -EOPNOTSUPP;
43844dbbe8ddSJose Abreu 
4385425eabddSJose Abreu 	if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
4386425eabddSJose Abreu 		return ret;
4387425eabddSJose Abreu 
43884dbbe8ddSJose Abreu 	stmmac_disable_all_queues(priv);
43894dbbe8ddSJose Abreu 
43904dbbe8ddSJose Abreu 	switch (type) {
43914dbbe8ddSJose Abreu 	case TC_SETUP_CLSU32:
43924dbbe8ddSJose Abreu 		ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
43934dbbe8ddSJose Abreu 		break;
4394425eabddSJose Abreu 	case TC_SETUP_CLSFLOWER:
4395425eabddSJose Abreu 		ret = stmmac_tc_setup_cls(priv, priv, type_data);
4396425eabddSJose Abreu 		break;
43974dbbe8ddSJose Abreu 	default:
43984dbbe8ddSJose Abreu 		break;
43994dbbe8ddSJose Abreu 	}
44004dbbe8ddSJose Abreu 
44014dbbe8ddSJose Abreu 	stmmac_enable_all_queues(priv);
44024dbbe8ddSJose Abreu 	return ret;
44034dbbe8ddSJose Abreu }
44044dbbe8ddSJose Abreu 
4405955bcb6eSPablo Neira Ayuso static LIST_HEAD(stmmac_block_cb_list);
4406955bcb6eSPablo Neira Ayuso 
44074dbbe8ddSJose Abreu static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
44084dbbe8ddSJose Abreu 			   void *type_data)
44094dbbe8ddSJose Abreu {
44104dbbe8ddSJose Abreu 	struct stmmac_priv *priv = netdev_priv(ndev);
44114dbbe8ddSJose Abreu 
44124dbbe8ddSJose Abreu 	switch (type) {
44134dbbe8ddSJose Abreu 	case TC_SETUP_BLOCK:
4414955bcb6eSPablo Neira Ayuso 		return flow_block_cb_setup_simple(type_data,
4415955bcb6eSPablo Neira Ayuso 						  &stmmac_block_cb_list,
44164e95bc26SPablo Neira Ayuso 						  stmmac_setup_tc_block_cb,
44174e95bc26SPablo Neira Ayuso 						  priv, priv, true);
44181f705bc6SJose Abreu 	case TC_SETUP_QDISC_CBS:
44191f705bc6SJose Abreu 		return stmmac_tc_setup_cbs(priv, priv, type_data);
4420b60189e0SJose Abreu 	case TC_SETUP_QDISC_TAPRIO:
4421b60189e0SJose Abreu 		return stmmac_tc_setup_taprio(priv, priv, type_data);
4422430b383cSJose Abreu 	case TC_SETUP_QDISC_ETF:
4423430b383cSJose Abreu 		return stmmac_tc_setup_etf(priv, priv, type_data);
44244dbbe8ddSJose Abreu 	default:
44254dbbe8ddSJose Abreu 		return -EOPNOTSUPP;
44264dbbe8ddSJose Abreu 	}
44274dbbe8ddSJose Abreu }
44284dbbe8ddSJose Abreu 
44294993e5b3SJose Abreu static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
44304993e5b3SJose Abreu 			       struct net_device *sb_dev)
44314993e5b3SJose Abreu {
4432b7766206SJose Abreu 	int gso = skb_shinfo(skb)->gso_type;
4433b7766206SJose Abreu 
4434b7766206SJose Abreu 	if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
44354993e5b3SJose Abreu 		/*
4436b7766206SJose Abreu 		 * There is no way to determine the number of TSO/USO
44374993e5b3SJose Abreu 		 * capable Queues. Let's use always the Queue 0
4438b7766206SJose Abreu 		 * because if TSO/USO is supported then at least this
44394993e5b3SJose Abreu 		 * one will be capable.
44404993e5b3SJose Abreu 		 */
44414993e5b3SJose Abreu 		return 0;
44424993e5b3SJose Abreu 	}
44434993e5b3SJose Abreu 
44444993e5b3SJose Abreu 	return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
44454993e5b3SJose Abreu }
44464993e5b3SJose Abreu 
4447a830405eSBhadram Varka static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
4448a830405eSBhadram Varka {
4449a830405eSBhadram Varka 	struct stmmac_priv *priv = netdev_priv(ndev);
4450a830405eSBhadram Varka 	int ret = 0;
4451a830405eSBhadram Varka 
4452a830405eSBhadram Varka 	ret = eth_mac_addr(ndev, addr);
4453a830405eSBhadram Varka 	if (ret)
4454a830405eSBhadram Varka 		return ret;
4455a830405eSBhadram Varka 
4456c10d4c82SJose Abreu 	stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
4457a830405eSBhadram Varka 
4458a830405eSBhadram Varka 	return ret;
4459a830405eSBhadram Varka }
4460a830405eSBhadram Varka 
446150fb4f74SGiuseppe CAVALLARO #ifdef CONFIG_DEBUG_FS
44627ac29055SGiuseppe CAVALLARO static struct dentry *stmmac_fs_dir;
44637ac29055SGiuseppe CAVALLARO 
4464c24602efSGiuseppe CAVALLARO static void sysfs_display_ring(void *head, int size, int extend_desc,
4465bfaf91caSJoakim Zhang 			       struct seq_file *seq, dma_addr_t dma_phy_addr)
44667ac29055SGiuseppe CAVALLARO {
44677ac29055SGiuseppe CAVALLARO 	int i;
4468c24602efSGiuseppe CAVALLARO 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
4469c24602efSGiuseppe CAVALLARO 	struct dma_desc *p = (struct dma_desc *)head;
4470bfaf91caSJoakim Zhang 	dma_addr_t dma_addr;
44717ac29055SGiuseppe CAVALLARO 
4472c24602efSGiuseppe CAVALLARO 	for (i = 0; i < size; i++) {
4473c24602efSGiuseppe CAVALLARO 		if (extend_desc) {
4474bfaf91caSJoakim Zhang 			dma_addr = dma_phy_addr + i * sizeof(*ep);
4475bfaf91caSJoakim Zhang 			seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
4476bfaf91caSJoakim Zhang 				   i, &dma_addr,
4477f8be0d78SMichael Weiser 				   le32_to_cpu(ep->basic.des0),
4478f8be0d78SMichael Weiser 				   le32_to_cpu(ep->basic.des1),
4479f8be0d78SMichael Weiser 				   le32_to_cpu(ep->basic.des2),
4480f8be0d78SMichael Weiser 				   le32_to_cpu(ep->basic.des3));
4481c24602efSGiuseppe CAVALLARO 			ep++;
4482c24602efSGiuseppe CAVALLARO 		} else {
4483bfaf91caSJoakim Zhang 			dma_addr = dma_phy_addr + i * sizeof(*p);
4484bfaf91caSJoakim Zhang 			seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
4485bfaf91caSJoakim Zhang 				   i, &dma_addr,
4486f8be0d78SMichael Weiser 				   le32_to_cpu(p->des0), le32_to_cpu(p->des1),
4487f8be0d78SMichael Weiser 				   le32_to_cpu(p->des2), le32_to_cpu(p->des3));
4488c24602efSGiuseppe CAVALLARO 			p++;
4489c24602efSGiuseppe CAVALLARO 		}
44907ac29055SGiuseppe CAVALLARO 		seq_printf(seq, "\n");
44917ac29055SGiuseppe CAVALLARO 	}
4492c24602efSGiuseppe CAVALLARO }
44937ac29055SGiuseppe CAVALLARO 
4494fb0d9c63SYangtao Li static int stmmac_rings_status_show(struct seq_file *seq, void *v)
4495c24602efSGiuseppe CAVALLARO {
4496c24602efSGiuseppe CAVALLARO 	struct net_device *dev = seq->private;
4497c24602efSGiuseppe CAVALLARO 	struct stmmac_priv *priv = netdev_priv(dev);
449854139cf3SJoao Pinto 	u32 rx_count = priv->plat->rx_queues_to_use;
4499ce736788SJoao Pinto 	u32 tx_count = priv->plat->tx_queues_to_use;
450054139cf3SJoao Pinto 	u32 queue;
450154139cf3SJoao Pinto 
45025f2b8b62SThierry Reding 	if ((dev->flags & IFF_UP) == 0)
45035f2b8b62SThierry Reding 		return 0;
45045f2b8b62SThierry Reding 
450554139cf3SJoao Pinto 	for (queue = 0; queue < rx_count; queue++) {
450654139cf3SJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
450754139cf3SJoao Pinto 
450854139cf3SJoao Pinto 		seq_printf(seq, "RX Queue %d:\n", queue);
45097ac29055SGiuseppe CAVALLARO 
4510c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc) {
451154139cf3SJoao Pinto 			seq_printf(seq, "Extended descriptor ring:\n");
451254139cf3SJoao Pinto 			sysfs_display_ring((void *)rx_q->dma_erx,
4513bfaf91caSJoakim Zhang 					   priv->dma_rx_size, 1, seq, rx_q->dma_rx_phy);
451454139cf3SJoao Pinto 		} else {
451554139cf3SJoao Pinto 			seq_printf(seq, "Descriptor ring:\n");
451654139cf3SJoao Pinto 			sysfs_display_ring((void *)rx_q->dma_rx,
4517bfaf91caSJoakim Zhang 					   priv->dma_rx_size, 0, seq, rx_q->dma_rx_phy);
451854139cf3SJoao Pinto 		}
451954139cf3SJoao Pinto 	}
452054139cf3SJoao Pinto 
4521ce736788SJoao Pinto 	for (queue = 0; queue < tx_count; queue++) {
4522ce736788SJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
4523ce736788SJoao Pinto 
4524ce736788SJoao Pinto 		seq_printf(seq, "TX Queue %d:\n", queue);
4525ce736788SJoao Pinto 
452654139cf3SJoao Pinto 		if (priv->extend_desc) {
4527ce736788SJoao Pinto 			seq_printf(seq, "Extended descriptor ring:\n");
4528ce736788SJoao Pinto 			sysfs_display_ring((void *)tx_q->dma_etx,
4529bfaf91caSJoakim Zhang 					   priv->dma_tx_size, 1, seq, tx_q->dma_tx_phy);
4530579a25a8SJose Abreu 		} else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
4531ce736788SJoao Pinto 			seq_printf(seq, "Descriptor ring:\n");
4532ce736788SJoao Pinto 			sysfs_display_ring((void *)tx_q->dma_tx,
4533bfaf91caSJoakim Zhang 					   priv->dma_tx_size, 0, seq, tx_q->dma_tx_phy);
4534ce736788SJoao Pinto 		}
45357ac29055SGiuseppe CAVALLARO 	}
45367ac29055SGiuseppe CAVALLARO 
45377ac29055SGiuseppe CAVALLARO 	return 0;
45387ac29055SGiuseppe CAVALLARO }
4539fb0d9c63SYangtao Li DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
45407ac29055SGiuseppe CAVALLARO 
4541fb0d9c63SYangtao Li static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
4542e7434821SGiuseppe CAVALLARO {
4543e7434821SGiuseppe CAVALLARO 	struct net_device *dev = seq->private;
4544e7434821SGiuseppe CAVALLARO 	struct stmmac_priv *priv = netdev_priv(dev);
4545e7434821SGiuseppe CAVALLARO 
454619e30c14SGiuseppe CAVALLARO 	if (!priv->hw_cap_support) {
4547e7434821SGiuseppe CAVALLARO 		seq_printf(seq, "DMA HW features not supported\n");
4548e7434821SGiuseppe CAVALLARO 		return 0;
4549e7434821SGiuseppe CAVALLARO 	}
4550e7434821SGiuseppe CAVALLARO 
4551e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "==============================\n");
4552e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tDMA HW features\n");
4553e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "==============================\n");
4554e7434821SGiuseppe CAVALLARO 
455522d3efe5SPavel Machek 	seq_printf(seq, "\t10/100 Mbps: %s\n",
4556e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
455722d3efe5SPavel Machek 	seq_printf(seq, "\t1000 Mbps: %s\n",
4558e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
455922d3efe5SPavel Machek 	seq_printf(seq, "\tHalf duplex: %s\n",
4560e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.half_duplex) ? "Y" : "N");
4561e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tHash Filter: %s\n",
4562e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.hash_filter) ? "Y" : "N");
4563e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tMultiple MAC address registers: %s\n",
4564e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.multi_addr) ? "Y" : "N");
45658d45e42bSLABBE Corentin 	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
4566e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.pcs) ? "Y" : "N");
4567e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
4568e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
4569e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tPMT Remote wake up: %s\n",
4570e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
4571e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tPMT Magic Frame: %s\n",
4572e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
4573e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tRMON module: %s\n",
4574e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.rmon) ? "Y" : "N");
4575e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
4576e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.time_stamp) ? "Y" : "N");
4577e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
4578e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
457922d3efe5SPavel Machek 	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
4580e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.eee) ? "Y" : "N");
4581e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
4582e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
4583e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.tx_coe) ? "Y" : "N");
4584f748be53SAlexandre TORGUE 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
4585f748be53SAlexandre TORGUE 		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
4586f748be53SAlexandre TORGUE 			   (priv->dma_cap.rx_coe) ? "Y" : "N");
4587f748be53SAlexandre TORGUE 	} else {
4588e7434821SGiuseppe CAVALLARO 		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
4589e7434821SGiuseppe CAVALLARO 			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
4590e7434821SGiuseppe CAVALLARO 		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
4591e7434821SGiuseppe CAVALLARO 			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
4592f748be53SAlexandre TORGUE 	}
4593e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
4594e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
4595e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
4596e7434821SGiuseppe CAVALLARO 		   priv->dma_cap.number_rx_channel);
4597e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
4598e7434821SGiuseppe CAVALLARO 		   priv->dma_cap.number_tx_channel);
45997d0b447aSJose Abreu 	seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
46007d0b447aSJose Abreu 		   priv->dma_cap.number_rx_queues);
46017d0b447aSJose Abreu 	seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
46027d0b447aSJose Abreu 		   priv->dma_cap.number_tx_queues);
4603e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tEnhanced descriptors: %s\n",
4604e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.enh_desc) ? "Y" : "N");
46057d0b447aSJose Abreu 	seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
46067d0b447aSJose Abreu 	seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
46077d0b447aSJose Abreu 	seq_printf(seq, "\tHash Table Size: %d\n", priv->dma_cap.hash_tb_sz);
46087d0b447aSJose Abreu 	seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
46097d0b447aSJose Abreu 	seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
46107d0b447aSJose Abreu 		   priv->dma_cap.pps_out_num);
46117d0b447aSJose Abreu 	seq_printf(seq, "\tSafety Features: %s\n",
46127d0b447aSJose Abreu 		   priv->dma_cap.asp ? "Y" : "N");
46137d0b447aSJose Abreu 	seq_printf(seq, "\tFlexible RX Parser: %s\n",
46147d0b447aSJose Abreu 		   priv->dma_cap.frpsel ? "Y" : "N");
46157d0b447aSJose Abreu 	seq_printf(seq, "\tEnhanced Addressing: %d\n",
46167d0b447aSJose Abreu 		   priv->dma_cap.addr64);
46177d0b447aSJose Abreu 	seq_printf(seq, "\tReceive Side Scaling: %s\n",
46187d0b447aSJose Abreu 		   priv->dma_cap.rssen ? "Y" : "N");
46197d0b447aSJose Abreu 	seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
46207d0b447aSJose Abreu 		   priv->dma_cap.vlhash ? "Y" : "N");
46217d0b447aSJose Abreu 	seq_printf(seq, "\tSplit Header: %s\n",
46227d0b447aSJose Abreu 		   priv->dma_cap.sphen ? "Y" : "N");
46237d0b447aSJose Abreu 	seq_printf(seq, "\tVLAN TX Insertion: %s\n",
46247d0b447aSJose Abreu 		   priv->dma_cap.vlins ? "Y" : "N");
46257d0b447aSJose Abreu 	seq_printf(seq, "\tDouble VLAN: %s\n",
46267d0b447aSJose Abreu 		   priv->dma_cap.dvlan ? "Y" : "N");
46277d0b447aSJose Abreu 	seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
46287d0b447aSJose Abreu 		   priv->dma_cap.l3l4fnum);
46297d0b447aSJose Abreu 	seq_printf(seq, "\tARP Offloading: %s\n",
46307d0b447aSJose Abreu 		   priv->dma_cap.arpoffsel ? "Y" : "N");
463144e65475SJose Abreu 	seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
463244e65475SJose Abreu 		   priv->dma_cap.estsel ? "Y" : "N");
463344e65475SJose Abreu 	seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
463444e65475SJose Abreu 		   priv->dma_cap.fpesel ? "Y" : "N");
463544e65475SJose Abreu 	seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
463644e65475SJose Abreu 		   priv->dma_cap.tbssel ? "Y" : "N");
4637e7434821SGiuseppe CAVALLARO 	return 0;
4638e7434821SGiuseppe CAVALLARO }
4639fb0d9c63SYangtao Li DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
4640e7434821SGiuseppe CAVALLARO 
4641481a7d15SJiping Ma /* Use network device events to rename debugfs file entries.
4642481a7d15SJiping Ma  */
4643481a7d15SJiping Ma static int stmmac_device_event(struct notifier_block *unused,
4644481a7d15SJiping Ma 			       unsigned long event, void *ptr)
4645481a7d15SJiping Ma {
4646481a7d15SJiping Ma 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4647481a7d15SJiping Ma 	struct stmmac_priv *priv = netdev_priv(dev);
4648481a7d15SJiping Ma 
4649481a7d15SJiping Ma 	if (dev->netdev_ops != &stmmac_netdev_ops)
4650481a7d15SJiping Ma 		goto done;
4651481a7d15SJiping Ma 
4652481a7d15SJiping Ma 	switch (event) {
4653481a7d15SJiping Ma 	case NETDEV_CHANGENAME:
4654481a7d15SJiping Ma 		if (priv->dbgfs_dir)
4655481a7d15SJiping Ma 			priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
4656481a7d15SJiping Ma 							 priv->dbgfs_dir,
4657481a7d15SJiping Ma 							 stmmac_fs_dir,
4658481a7d15SJiping Ma 							 dev->name);
4659481a7d15SJiping Ma 		break;
4660481a7d15SJiping Ma 	}
4661481a7d15SJiping Ma done:
4662481a7d15SJiping Ma 	return NOTIFY_DONE;
4663481a7d15SJiping Ma }
4664481a7d15SJiping Ma 
4665481a7d15SJiping Ma static struct notifier_block stmmac_notifier = {
4666481a7d15SJiping Ma 	.notifier_call = stmmac_device_event,
4667481a7d15SJiping Ma };
4668481a7d15SJiping Ma 
46698d72ab11SGreg Kroah-Hartman static void stmmac_init_fs(struct net_device *dev)
46707ac29055SGiuseppe CAVALLARO {
4671466c5ac8SMathieu Olivari 	struct stmmac_priv *priv = netdev_priv(dev);
46727ac29055SGiuseppe CAVALLARO 
4673474a31e1SAaro Koskinen 	rtnl_lock();
4674474a31e1SAaro Koskinen 
4675466c5ac8SMathieu Olivari 	/* Create per netdev entries */
4676466c5ac8SMathieu Olivari 	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
4677466c5ac8SMathieu Olivari 
46787ac29055SGiuseppe CAVALLARO 	/* Entry to report DMA RX/TX rings */
46798d72ab11SGreg Kroah-Hartman 	debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
46807ac29055SGiuseppe CAVALLARO 			    &stmmac_rings_status_fops);
46817ac29055SGiuseppe CAVALLARO 
4682e7434821SGiuseppe CAVALLARO 	/* Entry to report the DMA HW features */
46838d72ab11SGreg Kroah-Hartman 	debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
46848d72ab11SGreg Kroah-Hartman 			    &stmmac_dma_cap_fops);
4685481a7d15SJiping Ma 
4686474a31e1SAaro Koskinen 	rtnl_unlock();
46877ac29055SGiuseppe CAVALLARO }
46887ac29055SGiuseppe CAVALLARO 
4689466c5ac8SMathieu Olivari static void stmmac_exit_fs(struct net_device *dev)
46907ac29055SGiuseppe CAVALLARO {
4691466c5ac8SMathieu Olivari 	struct stmmac_priv *priv = netdev_priv(dev);
4692466c5ac8SMathieu Olivari 
4693466c5ac8SMathieu Olivari 	debugfs_remove_recursive(priv->dbgfs_dir);
46947ac29055SGiuseppe CAVALLARO }
469550fb4f74SGiuseppe CAVALLARO #endif /* CONFIG_DEBUG_FS */
46967ac29055SGiuseppe CAVALLARO 
46973cd1cfcbSJose Abreu static u32 stmmac_vid_crc32_le(__le16 vid_le)
46983cd1cfcbSJose Abreu {
46993cd1cfcbSJose Abreu 	unsigned char *data = (unsigned char *)&vid_le;
47003cd1cfcbSJose Abreu 	unsigned char data_byte = 0;
47013cd1cfcbSJose Abreu 	u32 crc = ~0x0;
47023cd1cfcbSJose Abreu 	u32 temp = 0;
47033cd1cfcbSJose Abreu 	int i, bits;
47043cd1cfcbSJose Abreu 
47053cd1cfcbSJose Abreu 	bits = get_bitmask_order(VLAN_VID_MASK);
47063cd1cfcbSJose Abreu 	for (i = 0; i < bits; i++) {
47073cd1cfcbSJose Abreu 		if ((i % 8) == 0)
47083cd1cfcbSJose Abreu 			data_byte = data[i / 8];
47093cd1cfcbSJose Abreu 
47103cd1cfcbSJose Abreu 		temp = ((crc & 1) ^ data_byte) & 1;
47113cd1cfcbSJose Abreu 		crc >>= 1;
47123cd1cfcbSJose Abreu 		data_byte >>= 1;
47133cd1cfcbSJose Abreu 
47143cd1cfcbSJose Abreu 		if (temp)
47153cd1cfcbSJose Abreu 			crc ^= 0xedb88320;
47163cd1cfcbSJose Abreu 	}
47173cd1cfcbSJose Abreu 
47183cd1cfcbSJose Abreu 	return crc;
47193cd1cfcbSJose Abreu }
47203cd1cfcbSJose Abreu 
47213cd1cfcbSJose Abreu static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
47223cd1cfcbSJose Abreu {
47233cd1cfcbSJose Abreu 	u32 crc, hash = 0;
4724a24cae70SJose Abreu 	__le16 pmatch = 0;
4725c7ab0b80SJose Abreu 	int count = 0;
4726c7ab0b80SJose Abreu 	u16 vid = 0;
47273cd1cfcbSJose Abreu 
47283cd1cfcbSJose Abreu 	for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
47293cd1cfcbSJose Abreu 		__le16 vid_le = cpu_to_le16(vid);
47303cd1cfcbSJose Abreu 		crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
47313cd1cfcbSJose Abreu 		hash |= (1 << crc);
4732c7ab0b80SJose Abreu 		count++;
47333cd1cfcbSJose Abreu 	}
47343cd1cfcbSJose Abreu 
4735c7ab0b80SJose Abreu 	if (!priv->dma_cap.vlhash) {
4736c7ab0b80SJose Abreu 		if (count > 2) /* VID = 0 always passes filter */
4737c7ab0b80SJose Abreu 			return -EOPNOTSUPP;
4738c7ab0b80SJose Abreu 
4739a24cae70SJose Abreu 		pmatch = cpu_to_le16(vid);
4740c7ab0b80SJose Abreu 		hash = 0;
4741c7ab0b80SJose Abreu 	}
4742c7ab0b80SJose Abreu 
4743a24cae70SJose Abreu 	return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
47443cd1cfcbSJose Abreu }
47453cd1cfcbSJose Abreu 
47463cd1cfcbSJose Abreu static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
47473cd1cfcbSJose Abreu {
47483cd1cfcbSJose Abreu 	struct stmmac_priv *priv = netdev_priv(ndev);
47493cd1cfcbSJose Abreu 	bool is_double = false;
47503cd1cfcbSJose Abreu 	int ret;
47513cd1cfcbSJose Abreu 
47525ec55823SJoakim Zhang 	ret = pm_runtime_get_sync(priv->device);
47535ec55823SJoakim Zhang 	if (ret < 0) {
47545ec55823SJoakim Zhang 		pm_runtime_put_noidle(priv->device);
47555ec55823SJoakim Zhang 		return ret;
47565ec55823SJoakim Zhang 	}
47575ec55823SJoakim Zhang 
47583cd1cfcbSJose Abreu 	if (be16_to_cpu(proto) == ETH_P_8021AD)
47593cd1cfcbSJose Abreu 		is_double = true;
47603cd1cfcbSJose Abreu 
47613cd1cfcbSJose Abreu 	set_bit(vid, priv->active_vlans);
47623cd1cfcbSJose Abreu 	ret = stmmac_vlan_update(priv, is_double);
47633cd1cfcbSJose Abreu 	if (ret) {
47643cd1cfcbSJose Abreu 		clear_bit(vid, priv->active_vlans);
47653cd1cfcbSJose Abreu 		return ret;
47663cd1cfcbSJose Abreu 	}
47673cd1cfcbSJose Abreu 
4768dd6a4998SJose Abreu 	if (priv->hw->num_vlan) {
4769ed64639bSWong Vee Khee 		ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
4770dd6a4998SJose Abreu 		if (ret)
47713cd1cfcbSJose Abreu 			return ret;
47723cd1cfcbSJose Abreu 	}
47733cd1cfcbSJose Abreu 
4774dd6a4998SJose Abreu 	return 0;
4775dd6a4998SJose Abreu }
4776dd6a4998SJose Abreu 
47773cd1cfcbSJose Abreu static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
47783cd1cfcbSJose Abreu {
47793cd1cfcbSJose Abreu 	struct stmmac_priv *priv = netdev_priv(ndev);
47803cd1cfcbSJose Abreu 	bool is_double = false;
4781ed64639bSWong Vee Khee 	int ret;
47823cd1cfcbSJose Abreu 
47833cd1cfcbSJose Abreu 	if (be16_to_cpu(proto) == ETH_P_8021AD)
47843cd1cfcbSJose Abreu 		is_double = true;
47853cd1cfcbSJose Abreu 
47863cd1cfcbSJose Abreu 	clear_bit(vid, priv->active_vlans);
4787dd6a4998SJose Abreu 
4788dd6a4998SJose Abreu 	if (priv->hw->num_vlan) {
4789ed64639bSWong Vee Khee 		ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
4790ed64639bSWong Vee Khee 		if (ret)
47915ec55823SJoakim Zhang 			goto del_vlan_error;
4792dd6a4998SJose Abreu 	}
4793ed64639bSWong Vee Khee 
47945ec55823SJoakim Zhang 	ret = stmmac_vlan_update(priv, is_double);
47955ec55823SJoakim Zhang 
47965ec55823SJoakim Zhang del_vlan_error:
47975ec55823SJoakim Zhang 	pm_runtime_put(priv->device);
47985ec55823SJoakim Zhang 
47995ec55823SJoakim Zhang 	return ret;
48003cd1cfcbSJose Abreu }
48013cd1cfcbSJose Abreu 
48027ac6653aSJeff Kirsher static const struct net_device_ops stmmac_netdev_ops = {
48037ac6653aSJeff Kirsher 	.ndo_open = stmmac_open,
48047ac6653aSJeff Kirsher 	.ndo_start_xmit = stmmac_xmit,
48057ac6653aSJeff Kirsher 	.ndo_stop = stmmac_release,
48067ac6653aSJeff Kirsher 	.ndo_change_mtu = stmmac_change_mtu,
48077ac6653aSJeff Kirsher 	.ndo_fix_features = stmmac_fix_features,
4808d2afb5bdSGiuseppe CAVALLARO 	.ndo_set_features = stmmac_set_features,
480901789349SJiri Pirko 	.ndo_set_rx_mode = stmmac_set_rx_mode,
48107ac6653aSJeff Kirsher 	.ndo_tx_timeout = stmmac_tx_timeout,
48117ac6653aSJeff Kirsher 	.ndo_do_ioctl = stmmac_ioctl,
48124dbbe8ddSJose Abreu 	.ndo_setup_tc = stmmac_setup_tc,
48134993e5b3SJose Abreu 	.ndo_select_queue = stmmac_select_queue,
48147ac6653aSJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER
48157ac6653aSJeff Kirsher 	.ndo_poll_controller = stmmac_poll_controller,
48167ac6653aSJeff Kirsher #endif
4817a830405eSBhadram Varka 	.ndo_set_mac_address = stmmac_set_mac_address,
48183cd1cfcbSJose Abreu 	.ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
48193cd1cfcbSJose Abreu 	.ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
48207ac6653aSJeff Kirsher };
48217ac6653aSJeff Kirsher 
482234877a15SJose Abreu static void stmmac_reset_subtask(struct stmmac_priv *priv)
482334877a15SJose Abreu {
482434877a15SJose Abreu 	if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
482534877a15SJose Abreu 		return;
482634877a15SJose Abreu 	if (test_bit(STMMAC_DOWN, &priv->state))
482734877a15SJose Abreu 		return;
482834877a15SJose Abreu 
482934877a15SJose Abreu 	netdev_err(priv->dev, "Reset adapter.\n");
483034877a15SJose Abreu 
483134877a15SJose Abreu 	rtnl_lock();
483234877a15SJose Abreu 	netif_trans_update(priv->dev);
483334877a15SJose Abreu 	while (test_and_set_bit(STMMAC_RESETING, &priv->state))
483434877a15SJose Abreu 		usleep_range(1000, 2000);
483534877a15SJose Abreu 
483634877a15SJose Abreu 	set_bit(STMMAC_DOWN, &priv->state);
483734877a15SJose Abreu 	dev_close(priv->dev);
483800f54e68SPetr Machata 	dev_open(priv->dev, NULL);
483934877a15SJose Abreu 	clear_bit(STMMAC_DOWN, &priv->state);
484034877a15SJose Abreu 	clear_bit(STMMAC_RESETING, &priv->state);
484134877a15SJose Abreu 	rtnl_unlock();
484234877a15SJose Abreu }
484334877a15SJose Abreu 
484434877a15SJose Abreu static void stmmac_service_task(struct work_struct *work)
484534877a15SJose Abreu {
484634877a15SJose Abreu 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
484734877a15SJose Abreu 			service_task);
484834877a15SJose Abreu 
484934877a15SJose Abreu 	stmmac_reset_subtask(priv);
485034877a15SJose Abreu 	clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
485134877a15SJose Abreu }
485234877a15SJose Abreu 
48537ac6653aSJeff Kirsher /**
4854cf3f047bSGiuseppe CAVALLARO  *  stmmac_hw_init - Init the MAC device
485532ceabcaSGiuseppe CAVALLARO  *  @priv: driver private structure
4856732fdf0eSGiuseppe CAVALLARO  *  Description: this function is to configure the MAC device according to
4857732fdf0eSGiuseppe CAVALLARO  *  some platform parameters or the HW capability register. It prepares the
4858732fdf0eSGiuseppe CAVALLARO  *  driver to use either ring or chain modes and to setup either enhanced or
4859732fdf0eSGiuseppe CAVALLARO  *  normal descriptors.
4860cf3f047bSGiuseppe CAVALLARO  */
4861cf3f047bSGiuseppe CAVALLARO static int stmmac_hw_init(struct stmmac_priv *priv)
4862cf3f047bSGiuseppe CAVALLARO {
48635f0456b4SJose Abreu 	int ret;
4864cf3f047bSGiuseppe CAVALLARO 
48659f93ac8dSLABBE Corentin 	/* dwmac-sun8i only work in chain mode */
48669f93ac8dSLABBE Corentin 	if (priv->plat->has_sun8i)
48679f93ac8dSLABBE Corentin 		chain_mode = 1;
48685f0456b4SJose Abreu 	priv->chain_mode = chain_mode;
48699f93ac8dSLABBE Corentin 
48705f0456b4SJose Abreu 	/* Initialize HW Interface */
48715f0456b4SJose Abreu 	ret = stmmac_hwif_init(priv);
48725f0456b4SJose Abreu 	if (ret)
48735f0456b4SJose Abreu 		return ret;
48744a7d666aSGiuseppe CAVALLARO 
4875cf3f047bSGiuseppe CAVALLARO 	/* Get the HW capability (new GMAC newer than 3.50a) */
4876cf3f047bSGiuseppe CAVALLARO 	priv->hw_cap_support = stmmac_get_hw_features(priv);
4877cf3f047bSGiuseppe CAVALLARO 	if (priv->hw_cap_support) {
487838ddc59dSLABBE Corentin 		dev_info(priv->device, "DMA HW capability register supported\n");
4879cf3f047bSGiuseppe CAVALLARO 
4880cf3f047bSGiuseppe CAVALLARO 		/* We can override some gmac/dma configuration fields: e.g.
4881cf3f047bSGiuseppe CAVALLARO 		 * enh_desc, tx_coe (e.g. that are passed through the
4882cf3f047bSGiuseppe CAVALLARO 		 * platform) with the values from the HW capability
4883cf3f047bSGiuseppe CAVALLARO 		 * register (if supported).
4884cf3f047bSGiuseppe CAVALLARO 		 */
4885cf3f047bSGiuseppe CAVALLARO 		priv->plat->enh_desc = priv->dma_cap.enh_desc;
4886cf3f047bSGiuseppe CAVALLARO 		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
48873fe5cadbSGiuseppe CAVALLARO 		priv->hw->pmt = priv->plat->pmt;
4888b8ef7020SBiao Huang 		if (priv->dma_cap.hash_tb_sz) {
4889b8ef7020SBiao Huang 			priv->hw->multicast_filter_bins =
4890b8ef7020SBiao Huang 					(BIT(priv->dma_cap.hash_tb_sz) << 5);
4891b8ef7020SBiao Huang 			priv->hw->mcast_bits_log2 =
4892b8ef7020SBiao Huang 					ilog2(priv->hw->multicast_filter_bins);
4893b8ef7020SBiao Huang 		}
489438912bdbSDeepak SIKRI 
4895a8df35d4SEzequiel Garcia 		/* TXCOE doesn't work in thresh DMA mode */
4896a8df35d4SEzequiel Garcia 		if (priv->plat->force_thresh_dma_mode)
4897a8df35d4SEzequiel Garcia 			priv->plat->tx_coe = 0;
4898a8df35d4SEzequiel Garcia 		else
489938912bdbSDeepak SIKRI 			priv->plat->tx_coe = priv->dma_cap.tx_coe;
4900a8df35d4SEzequiel Garcia 
4901f748be53SAlexandre TORGUE 		/* In case of GMAC4 rx_coe is from HW cap register. */
4902f748be53SAlexandre TORGUE 		priv->plat->rx_coe = priv->dma_cap.rx_coe;
490338912bdbSDeepak SIKRI 
490438912bdbSDeepak SIKRI 		if (priv->dma_cap.rx_coe_type2)
490538912bdbSDeepak SIKRI 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
490638912bdbSDeepak SIKRI 		else if (priv->dma_cap.rx_coe_type1)
490738912bdbSDeepak SIKRI 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
490838912bdbSDeepak SIKRI 
490938ddc59dSLABBE Corentin 	} else {
491038ddc59dSLABBE Corentin 		dev_info(priv->device, "No HW DMA feature register supported\n");
491138ddc59dSLABBE Corentin 	}
4912cf3f047bSGiuseppe CAVALLARO 
4913d2afb5bdSGiuseppe CAVALLARO 	if (priv->plat->rx_coe) {
4914d2afb5bdSGiuseppe CAVALLARO 		priv->hw->rx_csum = priv->plat->rx_coe;
491538ddc59dSLABBE Corentin 		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
4916f748be53SAlexandre TORGUE 		if (priv->synopsys_id < DWMAC_CORE_4_00)
491738ddc59dSLABBE Corentin 			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
4918d2afb5bdSGiuseppe CAVALLARO 	}
4919cf3f047bSGiuseppe CAVALLARO 	if (priv->plat->tx_coe)
492038ddc59dSLABBE Corentin 		dev_info(priv->device, "TX Checksum insertion supported\n");
4921cf3f047bSGiuseppe CAVALLARO 
4922cf3f047bSGiuseppe CAVALLARO 	if (priv->plat->pmt) {
492338ddc59dSLABBE Corentin 		dev_info(priv->device, "Wake-Up On Lan supported\n");
4924cf3f047bSGiuseppe CAVALLARO 		device_set_wakeup_capable(priv->device, 1);
4925cf3f047bSGiuseppe CAVALLARO 	}
4926cf3f047bSGiuseppe CAVALLARO 
4927f748be53SAlexandre TORGUE 	if (priv->dma_cap.tsoen)
492838ddc59dSLABBE Corentin 		dev_info(priv->device, "TSO supported\n");
4929f748be53SAlexandre TORGUE 
4930e0f9956aSChuah, Kim Tatt 	priv->hw->vlan_fail_q_en = priv->plat->vlan_fail_q_en;
4931e0f9956aSChuah, Kim Tatt 	priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
4932e0f9956aSChuah, Kim Tatt 
49337cfde0afSJose Abreu 	/* Run HW quirks, if any */
49347cfde0afSJose Abreu 	if (priv->hwif_quirks) {
49357cfde0afSJose Abreu 		ret = priv->hwif_quirks(priv);
49367cfde0afSJose Abreu 		if (ret)
49377cfde0afSJose Abreu 			return ret;
49387cfde0afSJose Abreu 	}
49397cfde0afSJose Abreu 
49403b509466SJose Abreu 	/* Rx Watchdog is available in the COREs newer than the 3.40.
49413b509466SJose Abreu 	 * In some case, for example on bugged HW this feature
49423b509466SJose Abreu 	 * has to be disable and this can be done by passing the
49433b509466SJose Abreu 	 * riwt_off field from the platform.
49443b509466SJose Abreu 	 */
49453b509466SJose Abreu 	if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
49463b509466SJose Abreu 	    (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
49473b509466SJose Abreu 		priv->use_riwt = 1;
49483b509466SJose Abreu 		dev_info(priv->device,
49493b509466SJose Abreu 			 "Enable RX Mitigation via HW Watchdog Timer\n");
49503b509466SJose Abreu 	}
49513b509466SJose Abreu 
4952c24602efSGiuseppe CAVALLARO 	return 0;
4953cf3f047bSGiuseppe CAVALLARO }
4954cf3f047bSGiuseppe CAVALLARO 
49550366f7e0SOng Boon Leong static void stmmac_napi_add(struct net_device *dev)
49560366f7e0SOng Boon Leong {
49570366f7e0SOng Boon Leong 	struct stmmac_priv *priv = netdev_priv(dev);
49580366f7e0SOng Boon Leong 	u32 queue, maxq;
49590366f7e0SOng Boon Leong 
49600366f7e0SOng Boon Leong 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
49610366f7e0SOng Boon Leong 
49620366f7e0SOng Boon Leong 	for (queue = 0; queue < maxq; queue++) {
49630366f7e0SOng Boon Leong 		struct stmmac_channel *ch = &priv->channel[queue];
49640366f7e0SOng Boon Leong 
49650366f7e0SOng Boon Leong 		ch->priv_data = priv;
49660366f7e0SOng Boon Leong 		ch->index = queue;
49672b94f526SMarek Szyprowski 		spin_lock_init(&ch->lock);
49680366f7e0SOng Boon Leong 
49690366f7e0SOng Boon Leong 		if (queue < priv->plat->rx_queues_to_use) {
49700366f7e0SOng Boon Leong 			netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx,
49710366f7e0SOng Boon Leong 				       NAPI_POLL_WEIGHT);
49720366f7e0SOng Boon Leong 		}
49730366f7e0SOng Boon Leong 		if (queue < priv->plat->tx_queues_to_use) {
49740366f7e0SOng Boon Leong 			netif_tx_napi_add(dev, &ch->tx_napi,
49750366f7e0SOng Boon Leong 					  stmmac_napi_poll_tx,
49760366f7e0SOng Boon Leong 					  NAPI_POLL_WEIGHT);
49770366f7e0SOng Boon Leong 		}
49780366f7e0SOng Boon Leong 	}
49790366f7e0SOng Boon Leong }
49800366f7e0SOng Boon Leong 
49810366f7e0SOng Boon Leong static void stmmac_napi_del(struct net_device *dev)
49820366f7e0SOng Boon Leong {
49830366f7e0SOng Boon Leong 	struct stmmac_priv *priv = netdev_priv(dev);
49840366f7e0SOng Boon Leong 	u32 queue, maxq;
49850366f7e0SOng Boon Leong 
49860366f7e0SOng Boon Leong 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
49870366f7e0SOng Boon Leong 
49880366f7e0SOng Boon Leong 	for (queue = 0; queue < maxq; queue++) {
49890366f7e0SOng Boon Leong 		struct stmmac_channel *ch = &priv->channel[queue];
49900366f7e0SOng Boon Leong 
49910366f7e0SOng Boon Leong 		if (queue < priv->plat->rx_queues_to_use)
49920366f7e0SOng Boon Leong 			netif_napi_del(&ch->rx_napi);
49930366f7e0SOng Boon Leong 		if (queue < priv->plat->tx_queues_to_use)
49940366f7e0SOng Boon Leong 			netif_napi_del(&ch->tx_napi);
49950366f7e0SOng Boon Leong 	}
49960366f7e0SOng Boon Leong }
49970366f7e0SOng Boon Leong 
49980366f7e0SOng Boon Leong int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
49990366f7e0SOng Boon Leong {
50000366f7e0SOng Boon Leong 	struct stmmac_priv *priv = netdev_priv(dev);
50010366f7e0SOng Boon Leong 	int ret = 0;
50020366f7e0SOng Boon Leong 
50030366f7e0SOng Boon Leong 	if (netif_running(dev))
50040366f7e0SOng Boon Leong 		stmmac_release(dev);
50050366f7e0SOng Boon Leong 
50060366f7e0SOng Boon Leong 	stmmac_napi_del(dev);
50070366f7e0SOng Boon Leong 
50080366f7e0SOng Boon Leong 	priv->plat->rx_queues_to_use = rx_cnt;
50090366f7e0SOng Boon Leong 	priv->plat->tx_queues_to_use = tx_cnt;
50100366f7e0SOng Boon Leong 
50110366f7e0SOng Boon Leong 	stmmac_napi_add(dev);
50120366f7e0SOng Boon Leong 
50130366f7e0SOng Boon Leong 	if (netif_running(dev))
50140366f7e0SOng Boon Leong 		ret = stmmac_open(dev);
50150366f7e0SOng Boon Leong 
50160366f7e0SOng Boon Leong 	return ret;
50170366f7e0SOng Boon Leong }
50180366f7e0SOng Boon Leong 
5019aa042f60SSong, Yoong Siang int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
5020aa042f60SSong, Yoong Siang {
5021aa042f60SSong, Yoong Siang 	struct stmmac_priv *priv = netdev_priv(dev);
5022aa042f60SSong, Yoong Siang 	int ret = 0;
5023aa042f60SSong, Yoong Siang 
5024aa042f60SSong, Yoong Siang 	if (netif_running(dev))
5025aa042f60SSong, Yoong Siang 		stmmac_release(dev);
5026aa042f60SSong, Yoong Siang 
5027aa042f60SSong, Yoong Siang 	priv->dma_rx_size = rx_size;
5028aa042f60SSong, Yoong Siang 	priv->dma_tx_size = tx_size;
5029aa042f60SSong, Yoong Siang 
5030aa042f60SSong, Yoong Siang 	if (netif_running(dev))
5031aa042f60SSong, Yoong Siang 		ret = stmmac_open(dev);
5032aa042f60SSong, Yoong Siang 
5033aa042f60SSong, Yoong Siang 	return ret;
5034aa042f60SSong, Yoong Siang }
5035aa042f60SSong, Yoong Siang 
5036cf3f047bSGiuseppe CAVALLARO /**
5037bfab27a1SGiuseppe CAVALLARO  * stmmac_dvr_probe
5038bfab27a1SGiuseppe CAVALLARO  * @device: device pointer
5039ff3dd78cSGiuseppe CAVALLARO  * @plat_dat: platform data pointer
5040e56788cfSJoachim Eastwood  * @res: stmmac resource pointer
5041bfab27a1SGiuseppe CAVALLARO  * Description: this is the main probe function used to
5042bfab27a1SGiuseppe CAVALLARO  * call the alloc_etherdev, allocate the priv structure.
50439afec6efSAndy Shevchenko  * Return:
504415ffac73SJoachim Eastwood  * returns 0 on success, otherwise errno.
50457ac6653aSJeff Kirsher  */
504615ffac73SJoachim Eastwood int stmmac_dvr_probe(struct device *device,
5047cf3f047bSGiuseppe CAVALLARO 		     struct plat_stmmacenet_data *plat_dat,
5048e56788cfSJoachim Eastwood 		     struct stmmac_resources *res)
50497ac6653aSJeff Kirsher {
5050bfab27a1SGiuseppe CAVALLARO 	struct net_device *ndev = NULL;
5051bfab27a1SGiuseppe CAVALLARO 	struct stmmac_priv *priv;
50520366f7e0SOng Boon Leong 	u32 rxq;
505376067459SJose Abreu 	int i, ret = 0;
50547ac6653aSJeff Kirsher 
50559737070cSJisheng Zhang 	ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
50569737070cSJisheng Zhang 				       MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
505741de8d4cSJoe Perches 	if (!ndev)
505815ffac73SJoachim Eastwood 		return -ENOMEM;
50597ac6653aSJeff Kirsher 
5060bfab27a1SGiuseppe CAVALLARO 	SET_NETDEV_DEV(ndev, device);
50617ac6653aSJeff Kirsher 
5062bfab27a1SGiuseppe CAVALLARO 	priv = netdev_priv(ndev);
5063bfab27a1SGiuseppe CAVALLARO 	priv->device = device;
5064bfab27a1SGiuseppe CAVALLARO 	priv->dev = ndev;
5065bfab27a1SGiuseppe CAVALLARO 
5066bfab27a1SGiuseppe CAVALLARO 	stmmac_set_ethtool_ops(ndev);
5067cf3f047bSGiuseppe CAVALLARO 	priv->pause = pause;
5068cf3f047bSGiuseppe CAVALLARO 	priv->plat = plat_dat;
5069e56788cfSJoachim Eastwood 	priv->ioaddr = res->addr;
5070e56788cfSJoachim Eastwood 	priv->dev->base_addr = (unsigned long)res->addr;
5071e56788cfSJoachim Eastwood 
5072e56788cfSJoachim Eastwood 	priv->dev->irq = res->irq;
5073e56788cfSJoachim Eastwood 	priv->wol_irq = res->wol_irq;
5074e56788cfSJoachim Eastwood 	priv->lpi_irq = res->lpi_irq;
5075e56788cfSJoachim Eastwood 
5076a51645f7SPetr Štetiar 	if (!IS_ERR_OR_NULL(res->mac))
5077e56788cfSJoachim Eastwood 		memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
5078bfab27a1SGiuseppe CAVALLARO 
5079a7a62685SJoachim Eastwood 	dev_set_drvdata(device, priv->dev);
5080803f8fc4SJoachim Eastwood 
5081cf3f047bSGiuseppe CAVALLARO 	/* Verify driver arguments */
5082cf3f047bSGiuseppe CAVALLARO 	stmmac_verify_args();
5083cf3f047bSGiuseppe CAVALLARO 
508434877a15SJose Abreu 	/* Allocate workqueue */
508534877a15SJose Abreu 	priv->wq = create_singlethread_workqueue("stmmac_wq");
508634877a15SJose Abreu 	if (!priv->wq) {
508734877a15SJose Abreu 		dev_err(priv->device, "failed to create workqueue\n");
50889737070cSJisheng Zhang 		return -ENOMEM;
508934877a15SJose Abreu 	}
509034877a15SJose Abreu 
509134877a15SJose Abreu 	INIT_WORK(&priv->service_task, stmmac_service_task);
509234877a15SJose Abreu 
5093cf3f047bSGiuseppe CAVALLARO 	/* Override with kernel parameters if supplied XXX CRS XXX
5094ceb69499SGiuseppe CAVALLARO 	 * this needs to have multiple instances
5095ceb69499SGiuseppe CAVALLARO 	 */
5096cf3f047bSGiuseppe CAVALLARO 	if ((phyaddr >= 0) && (phyaddr <= 31))
5097cf3f047bSGiuseppe CAVALLARO 		priv->plat->phy_addr = phyaddr;
5098cf3f047bSGiuseppe CAVALLARO 
509990f522a2SEugeniy Paltsev 	if (priv->plat->stmmac_rst) {
510090f522a2SEugeniy Paltsev 		ret = reset_control_assert(priv->plat->stmmac_rst);
5101f573c0b9Sjpinto 		reset_control_deassert(priv->plat->stmmac_rst);
510290f522a2SEugeniy Paltsev 		/* Some reset controllers have only reset callback instead of
510390f522a2SEugeniy Paltsev 		 * assert + deassert callbacks pair.
510490f522a2SEugeniy Paltsev 		 */
510590f522a2SEugeniy Paltsev 		if (ret == -ENOTSUPP)
510690f522a2SEugeniy Paltsev 			reset_control_reset(priv->plat->stmmac_rst);
510790f522a2SEugeniy Paltsev 	}
5108c5e4ddbdSChen-Yu Tsai 
5109cf3f047bSGiuseppe CAVALLARO 	/* Init MAC and get the capabilities */
5110c24602efSGiuseppe CAVALLARO 	ret = stmmac_hw_init(priv);
5111c24602efSGiuseppe CAVALLARO 	if (ret)
511262866e98SChen-Yu Tsai 		goto error_hw_init;
5113cf3f047bSGiuseppe CAVALLARO 
5114b561af36SVinod Koul 	stmmac_check_ether_addr(priv);
5115b561af36SVinod Koul 
5116cf3f047bSGiuseppe CAVALLARO 	ndev->netdev_ops = &stmmac_netdev_ops;
5117cf3f047bSGiuseppe CAVALLARO 
5118cf3f047bSGiuseppe CAVALLARO 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
5119cf3f047bSGiuseppe CAVALLARO 			    NETIF_F_RXCSUM;
5120f748be53SAlexandre TORGUE 
51214dbbe8ddSJose Abreu 	ret = stmmac_tc_init(priv, priv);
51224dbbe8ddSJose Abreu 	if (!ret) {
51234dbbe8ddSJose Abreu 		ndev->hw_features |= NETIF_F_HW_TC;
51244dbbe8ddSJose Abreu 	}
51254dbbe8ddSJose Abreu 
5126f748be53SAlexandre TORGUE 	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
51279edfa7daSNiklas Cassel 		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
5128b7766206SJose Abreu 		if (priv->plat->has_gmac4)
5129b7766206SJose Abreu 			ndev->hw_features |= NETIF_F_GSO_UDP_L4;
5130f748be53SAlexandre TORGUE 		priv->tso = true;
513138ddc59dSLABBE Corentin 		dev_info(priv->device, "TSO feature enabled\n");
5132f748be53SAlexandre TORGUE 	}
5133a993db88SJose Abreu 
513467afd6d1SJose Abreu 	if (priv->dma_cap.sphen) {
513567afd6d1SJose Abreu 		ndev->hw_features |= NETIF_F_GRO;
513667afd6d1SJose Abreu 		priv->sph = true;
513767afd6d1SJose Abreu 		dev_info(priv->device, "SPH feature enabled\n");
513867afd6d1SJose Abreu 	}
513967afd6d1SJose Abreu 
5140f119cc98SFugang Duan 	/* The current IP register MAC_HW_Feature1[ADDR64] only define
5141f119cc98SFugang Duan 	 * 32/40/64 bit width, but some SOC support others like i.MX8MP
5142f119cc98SFugang Duan 	 * support 34 bits but it map to 40 bits width in MAC_HW_Feature1[ADDR64].
5143f119cc98SFugang Duan 	 * So overwrite dma_cap.addr64 according to HW real design.
5144f119cc98SFugang Duan 	 */
5145f119cc98SFugang Duan 	if (priv->plat->addr64)
5146f119cc98SFugang Duan 		priv->dma_cap.addr64 = priv->plat->addr64;
5147f119cc98SFugang Duan 
5148a993db88SJose Abreu 	if (priv->dma_cap.addr64) {
5149a993db88SJose Abreu 		ret = dma_set_mask_and_coherent(device,
5150a993db88SJose Abreu 				DMA_BIT_MASK(priv->dma_cap.addr64));
5151a993db88SJose Abreu 		if (!ret) {
5152a993db88SJose Abreu 			dev_info(priv->device, "Using %d bits DMA width\n",
5153a993db88SJose Abreu 				 priv->dma_cap.addr64);
5154968a2978SThierry Reding 
5155968a2978SThierry Reding 			/*
5156968a2978SThierry Reding 			 * If more than 32 bits can be addressed, make sure to
5157968a2978SThierry Reding 			 * enable enhanced addressing mode.
5158968a2978SThierry Reding 			 */
5159968a2978SThierry Reding 			if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
5160968a2978SThierry Reding 				priv->plat->dma_cfg->eame = true;
5161a993db88SJose Abreu 		} else {
5162a993db88SJose Abreu 			ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
5163a993db88SJose Abreu 			if (ret) {
5164a993db88SJose Abreu 				dev_err(priv->device, "Failed to set DMA Mask\n");
5165a993db88SJose Abreu 				goto error_hw_init;
5166a993db88SJose Abreu 			}
5167a993db88SJose Abreu 
5168a993db88SJose Abreu 			priv->dma_cap.addr64 = 32;
5169a993db88SJose Abreu 		}
5170a993db88SJose Abreu 	}
5171a993db88SJose Abreu 
5172bfab27a1SGiuseppe CAVALLARO 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
5173bfab27a1SGiuseppe CAVALLARO 	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
51747ac6653aSJeff Kirsher #ifdef STMMAC_VLAN_TAG_USED
51757ac6653aSJeff Kirsher 	/* Both mac100 and gmac support receive VLAN tag detection */
5176ab188e8fSElad Nachman 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
51773cd1cfcbSJose Abreu 	if (priv->dma_cap.vlhash) {
51783cd1cfcbSJose Abreu 		ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
51793cd1cfcbSJose Abreu 		ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
51803cd1cfcbSJose Abreu 	}
518130d93227SJose Abreu 	if (priv->dma_cap.vlins) {
518230d93227SJose Abreu 		ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
518330d93227SJose Abreu 		if (priv->dma_cap.dvlan)
518430d93227SJose Abreu 			ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
518530d93227SJose Abreu 	}
51867ac6653aSJeff Kirsher #endif
51877ac6653aSJeff Kirsher 	priv->msg_enable = netif_msg_init(debug, default_msg_level);
51887ac6653aSJeff Kirsher 
518976067459SJose Abreu 	/* Initialize RSS */
519076067459SJose Abreu 	rxq = priv->plat->rx_queues_to_use;
519176067459SJose Abreu 	netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
519276067459SJose Abreu 	for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
519376067459SJose Abreu 		priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
519476067459SJose Abreu 
519576067459SJose Abreu 	if (priv->dma_cap.rssen && priv->plat->rss_en)
519676067459SJose Abreu 		ndev->features |= NETIF_F_RXHASH;
519776067459SJose Abreu 
519844770e11SJarod Wilson 	/* MTU range: 46 - hw-specific max */
519944770e11SJarod Wilson 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
520056bcd591SJose Abreu 	if (priv->plat->has_xgmac)
52017d9e6c5aSJose Abreu 		ndev->max_mtu = XGMAC_JUMBO_LEN;
520256bcd591SJose Abreu 	else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
520356bcd591SJose Abreu 		ndev->max_mtu = JUMBO_LEN;
520444770e11SJarod Wilson 	else
520544770e11SJarod Wilson 		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
5206a2cd64f3SKweh, Hock Leong 	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
5207a2cd64f3SKweh, Hock Leong 	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
5208a2cd64f3SKweh, Hock Leong 	 */
5209a2cd64f3SKweh, Hock Leong 	if ((priv->plat->maxmtu < ndev->max_mtu) &&
5210a2cd64f3SKweh, Hock Leong 	    (priv->plat->maxmtu >= ndev->min_mtu))
521144770e11SJarod Wilson 		ndev->max_mtu = priv->plat->maxmtu;
5212a2cd64f3SKweh, Hock Leong 	else if (priv->plat->maxmtu < ndev->min_mtu)
5213b618ab45SHeiner Kallweit 		dev_warn(priv->device,
5214a2cd64f3SKweh, Hock Leong 			 "%s: warning: maxmtu having invalid value (%d)\n",
5215a2cd64f3SKweh, Hock Leong 			 __func__, priv->plat->maxmtu);
521644770e11SJarod Wilson 
52177ac6653aSJeff Kirsher 	if (flow_ctrl)
52187ac6653aSJeff Kirsher 		priv->flow_ctrl = FLOW_AUTO;	/* RX/TX pause on */
52197ac6653aSJeff Kirsher 
52208fce3331SJose Abreu 	/* Setup channels NAPI */
52210366f7e0SOng Boon Leong 	stmmac_napi_add(ndev);
52227ac6653aSJeff Kirsher 
522329555fa3SThierry Reding 	mutex_init(&priv->lock);
52247ac6653aSJeff Kirsher 
5225cd7201f4SGiuseppe CAVALLARO 	/* If a specific clk_csr value is passed from the platform
5226cd7201f4SGiuseppe CAVALLARO 	 * this means that the CSR Clock Range selection cannot be
5227cd7201f4SGiuseppe CAVALLARO 	 * changed at run-time and it is fixed. Viceversa the driver'll try to
5228cd7201f4SGiuseppe CAVALLARO 	 * set the MDC clock dynamically according to the csr actual
5229cd7201f4SGiuseppe CAVALLARO 	 * clock input.
5230cd7201f4SGiuseppe CAVALLARO 	 */
52315e7f7fc5SBiao Huang 	if (priv->plat->clk_csr >= 0)
5232cd7201f4SGiuseppe CAVALLARO 		priv->clk_csr = priv->plat->clk_csr;
52335e7f7fc5SBiao Huang 	else
52345e7f7fc5SBiao Huang 		stmmac_clk_csr_set(priv);
5235cd7201f4SGiuseppe CAVALLARO 
5236e58bb43fSGiuseppe CAVALLARO 	stmmac_check_pcs_mode(priv);
5237e58bb43fSGiuseppe CAVALLARO 
52385ec55823SJoakim Zhang 	pm_runtime_get_noresume(device);
52395ec55823SJoakim Zhang 	pm_runtime_set_active(device);
52405ec55823SJoakim Zhang 	pm_runtime_enable(device);
52415ec55823SJoakim Zhang 
5242a47b9e15SDejin Zheng 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
52433fe5cadbSGiuseppe CAVALLARO 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
52444bfcbd7aSFrancesco Virlinzi 		/* MDIO bus Registration */
52454bfcbd7aSFrancesco Virlinzi 		ret = stmmac_mdio_register(ndev);
52464bfcbd7aSFrancesco Virlinzi 		if (ret < 0) {
5247b618ab45SHeiner Kallweit 			dev_err(priv->device,
524838ddc59dSLABBE Corentin 				"%s: MDIO bus (id: %d) registration failed",
52494bfcbd7aSFrancesco Virlinzi 				__func__, priv->plat->bus_id);
52506a81c26fSViresh Kumar 			goto error_mdio_register;
52514bfcbd7aSFrancesco Virlinzi 		}
5252e58bb43fSGiuseppe CAVALLARO 	}
52534bfcbd7aSFrancesco Virlinzi 
525474371272SJose Abreu 	ret = stmmac_phy_setup(priv);
525574371272SJose Abreu 	if (ret) {
525674371272SJose Abreu 		netdev_err(ndev, "failed to setup phy (%d)\n", ret);
525774371272SJose Abreu 		goto error_phy_setup;
525874371272SJose Abreu 	}
525974371272SJose Abreu 
526057016590SFlorian Fainelli 	ret = register_netdev(ndev);
5261b2eb09afSFlorian Fainelli 	if (ret) {
5262b618ab45SHeiner Kallweit 		dev_err(priv->device, "%s: ERROR %i registering the device\n",
526357016590SFlorian Fainelli 			__func__, ret);
5264b2eb09afSFlorian Fainelli 		goto error_netdev_register;
5265b2eb09afSFlorian Fainelli 	}
52667ac6653aSJeff Kirsher 
5267b9663b7cSVoon Weifeng 	if (priv->plat->serdes_powerup) {
5268b9663b7cSVoon Weifeng 		ret = priv->plat->serdes_powerup(ndev,
5269b9663b7cSVoon Weifeng 						 priv->plat->bsp_priv);
5270b9663b7cSVoon Weifeng 
5271b9663b7cSVoon Weifeng 		if (ret < 0)
5272801eb050SAndy Shevchenko 			goto error_serdes_powerup;
5273b9663b7cSVoon Weifeng 	}
5274b9663b7cSVoon Weifeng 
52755f2b8b62SThierry Reding #ifdef CONFIG_DEBUG_FS
52768d72ab11SGreg Kroah-Hartman 	stmmac_init_fs(ndev);
52775f2b8b62SThierry Reding #endif
52785f2b8b62SThierry Reding 
52795ec55823SJoakim Zhang 	/* Let pm_runtime_put() disable the clocks.
52805ec55823SJoakim Zhang 	 * If CONFIG_PM is not enabled, the clocks will stay powered.
52815ec55823SJoakim Zhang 	 */
52825ec55823SJoakim Zhang 	pm_runtime_put(device);
52835ec55823SJoakim Zhang 
528457016590SFlorian Fainelli 	return ret;
52857ac6653aSJeff Kirsher 
5286801eb050SAndy Shevchenko error_serdes_powerup:
5287801eb050SAndy Shevchenko 	unregister_netdev(ndev);
52886a81c26fSViresh Kumar error_netdev_register:
528974371272SJose Abreu 	phylink_destroy(priv->phylink);
529074371272SJose Abreu error_phy_setup:
5291a47b9e15SDejin Zheng 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
5292b2eb09afSFlorian Fainelli 	    priv->hw->pcs != STMMAC_PCS_RTBI)
5293b2eb09afSFlorian Fainelli 		stmmac_mdio_unregister(ndev);
52947ac6653aSJeff Kirsher error_mdio_register:
52950366f7e0SOng Boon Leong 	stmmac_napi_del(ndev);
529662866e98SChen-Yu Tsai error_hw_init:
529734877a15SJose Abreu 	destroy_workqueue(priv->wq);
52985ec55823SJoakim Zhang 	stmmac_bus_clks_config(priv, false);
52997ac6653aSJeff Kirsher 
530015ffac73SJoachim Eastwood 	return ret;
53017ac6653aSJeff Kirsher }
5302b2e2f0c7SAndy Shevchenko EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
53037ac6653aSJeff Kirsher 
53047ac6653aSJeff Kirsher /**
53057ac6653aSJeff Kirsher  * stmmac_dvr_remove
5306f4e7bd81SJoachim Eastwood  * @dev: device pointer
53077ac6653aSJeff Kirsher  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
5308bfab27a1SGiuseppe CAVALLARO  * changes the link status, releases the DMA descriptor rings.
53097ac6653aSJeff Kirsher  */
5310f4e7bd81SJoachim Eastwood int stmmac_dvr_remove(struct device *dev)
53117ac6653aSJeff Kirsher {
5312f4e7bd81SJoachim Eastwood 	struct net_device *ndev = dev_get_drvdata(dev);
53137ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(ndev);
53147ac6653aSJeff Kirsher 
531538ddc59dSLABBE Corentin 	netdev_info(priv->dev, "%s: removing driver", __func__);
53167ac6653aSJeff Kirsher 
5317ae4f0d46SJoao Pinto 	stmmac_stop_all_dma(priv);
5318c10d4c82SJose Abreu 	stmmac_mac_set(priv, priv->ioaddr, false);
53197ac6653aSJeff Kirsher 	netif_carrier_off(ndev);
53207ac6653aSJeff Kirsher 	unregister_netdev(ndev);
53219a7b3950SOng Boon Leong 
53229a7b3950SOng Boon Leong 	/* Serdes power down needs to happen after VLAN filter
53239a7b3950SOng Boon Leong 	 * is deleted that is triggered by unregister_netdev().
53249a7b3950SOng Boon Leong 	 */
53259a7b3950SOng Boon Leong 	if (priv->plat->serdes_powerdown)
53269a7b3950SOng Boon Leong 		priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
53279a7b3950SOng Boon Leong 
5328474a31e1SAaro Koskinen #ifdef CONFIG_DEBUG_FS
5329474a31e1SAaro Koskinen 	stmmac_exit_fs(ndev);
5330474a31e1SAaro Koskinen #endif
533174371272SJose Abreu 	phylink_destroy(priv->phylink);
5332f573c0b9Sjpinto 	if (priv->plat->stmmac_rst)
5333f573c0b9Sjpinto 		reset_control_assert(priv->plat->stmmac_rst);
53345ec55823SJoakim Zhang 	pm_runtime_put(dev);
53355ec55823SJoakim Zhang 	pm_runtime_disable(dev);
5336a47b9e15SDejin Zheng 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
53373fe5cadbSGiuseppe CAVALLARO 	    priv->hw->pcs != STMMAC_PCS_RTBI)
5338e743471fSBryan O'Donoghue 		stmmac_mdio_unregister(ndev);
533934877a15SJose Abreu 	destroy_workqueue(priv->wq);
534029555fa3SThierry Reding 	mutex_destroy(&priv->lock);
53417ac6653aSJeff Kirsher 
53427ac6653aSJeff Kirsher 	return 0;
53437ac6653aSJeff Kirsher }
5344b2e2f0c7SAndy Shevchenko EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
53457ac6653aSJeff Kirsher 
5346732fdf0eSGiuseppe CAVALLARO /**
5347732fdf0eSGiuseppe CAVALLARO  * stmmac_suspend - suspend callback
5348f4e7bd81SJoachim Eastwood  * @dev: device pointer
5349732fdf0eSGiuseppe CAVALLARO  * Description: this is the function to suspend the device and it is called
5350732fdf0eSGiuseppe CAVALLARO  * by the platform driver to stop the network queue, release the resources,
5351732fdf0eSGiuseppe CAVALLARO  * program the PMT register (for WoL), clean and release driver resources.
5352732fdf0eSGiuseppe CAVALLARO  */
5353f4e7bd81SJoachim Eastwood int stmmac_suspend(struct device *dev)
53547ac6653aSJeff Kirsher {
5355f4e7bd81SJoachim Eastwood 	struct net_device *ndev = dev_get_drvdata(dev);
53567ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(ndev);
535714b41a29SNicolin Chen 	u32 chan;
53585ec55823SJoakim Zhang 	int ret;
53597ac6653aSJeff Kirsher 
53607ac6653aSJeff Kirsher 	if (!ndev || !netif_running(ndev))
53617ac6653aSJeff Kirsher 		return 0;
53627ac6653aSJeff Kirsher 
53633e2bf04fSJose Abreu 	phylink_mac_change(priv->phylink, false);
53647ac6653aSJeff Kirsher 
5365134cc4ceSThierry Reding 	mutex_lock(&priv->lock);
536619e13cb2SJose Abreu 
53677ac6653aSJeff Kirsher 	netif_device_detach(ndev);
53687ac6653aSJeff Kirsher 
5369c22a3f48SJoao Pinto 	stmmac_disable_all_queues(priv);
53707ac6653aSJeff Kirsher 
537114b41a29SNicolin Chen 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
5372d5a05e69SVincent Whitchurch 		hrtimer_cancel(&priv->tx_queue[chan].txtimer);
537314b41a29SNicolin Chen 
53745f585913SFugang Duan 	if (priv->eee_enabled) {
53755f585913SFugang Duan 		priv->tx_path_in_lpi_mode = false;
53765f585913SFugang Duan 		del_timer_sync(&priv->eee_ctrl_timer);
53775f585913SFugang Duan 	}
53785f585913SFugang Duan 
53797ac6653aSJeff Kirsher 	/* Stop TX/RX DMA */
5380ae4f0d46SJoao Pinto 	stmmac_stop_all_dma(priv);
5381c24602efSGiuseppe CAVALLARO 
5382b9663b7cSVoon Weifeng 	if (priv->plat->serdes_powerdown)
5383b9663b7cSVoon Weifeng 		priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
5384b9663b7cSVoon Weifeng 
53857ac6653aSJeff Kirsher 	/* Enable Power down mode by programming the PMT regs */
5386e8377e7aSJisheng Zhang 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
5387c10d4c82SJose Abreu 		stmmac_pmt(priv, priv->hw, priv->wolopts);
538889f7f2cfSSrinivas Kandagatla 		priv->irq_wake = 1;
538989f7f2cfSSrinivas Kandagatla 	} else {
5390134cc4ceSThierry Reding 		mutex_unlock(&priv->lock);
53913e2bf04fSJose Abreu 		rtnl_lock();
539277b28983SJisheng Zhang 		if (device_may_wakeup(priv->device))
539377b28983SJisheng Zhang 			phylink_speed_down(priv->phylink, false);
53943e2bf04fSJose Abreu 		phylink_stop(priv->phylink);
53953e2bf04fSJose Abreu 		rtnl_unlock();
5396134cc4ceSThierry Reding 		mutex_lock(&priv->lock);
53973e2bf04fSJose Abreu 
5398c10d4c82SJose Abreu 		stmmac_mac_set(priv, priv->ioaddr, false);
5399db88f10aSSrinivas Kandagatla 		pinctrl_pm_select_sleep_state(priv->device);
5400ba1377ffSGiuseppe CAVALLARO 		/* Disable clock in case of PWM is off */
5401e497c20eSBiao Huang 		clk_disable_unprepare(priv->plat->clk_ptp_ref);
54025ec55823SJoakim Zhang 		ret = pm_runtime_force_suspend(dev);
54035ec55823SJoakim Zhang 		if (ret)
54045ec55823SJoakim Zhang 			return ret;
5405ba1377ffSGiuseppe CAVALLARO 	}
540629555fa3SThierry Reding 	mutex_unlock(&priv->lock);
54072d871aa0SVince Bridgers 
5408bd00632cSLABBE Corentin 	priv->speed = SPEED_UNKNOWN;
54097ac6653aSJeff Kirsher 	return 0;
54107ac6653aSJeff Kirsher }
5411b2e2f0c7SAndy Shevchenko EXPORT_SYMBOL_GPL(stmmac_suspend);
54127ac6653aSJeff Kirsher 
5413732fdf0eSGiuseppe CAVALLARO /**
541454139cf3SJoao Pinto  * stmmac_reset_queues_param - reset queue parameters
5415d0ea5cbdSJesse Brandeburg  * @priv: device pointer
541654139cf3SJoao Pinto  */
541754139cf3SJoao Pinto static void stmmac_reset_queues_param(struct stmmac_priv *priv)
541854139cf3SJoao Pinto {
541954139cf3SJoao Pinto 	u32 rx_cnt = priv->plat->rx_queues_to_use;
5420ce736788SJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
542154139cf3SJoao Pinto 	u32 queue;
542254139cf3SJoao Pinto 
542354139cf3SJoao Pinto 	for (queue = 0; queue < rx_cnt; queue++) {
542454139cf3SJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
542554139cf3SJoao Pinto 
542654139cf3SJoao Pinto 		rx_q->cur_rx = 0;
542754139cf3SJoao Pinto 		rx_q->dirty_rx = 0;
542854139cf3SJoao Pinto 	}
542954139cf3SJoao Pinto 
5430ce736788SJoao Pinto 	for (queue = 0; queue < tx_cnt; queue++) {
5431ce736788SJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
5432ce736788SJoao Pinto 
5433ce736788SJoao Pinto 		tx_q->cur_tx = 0;
5434ce736788SJoao Pinto 		tx_q->dirty_tx = 0;
54358d212a9eSNiklas Cassel 		tx_q->mss = 0;
5436c511819dSJoakim Zhang 
5437c511819dSJoakim Zhang 		netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
5438ce736788SJoao Pinto 	}
543954139cf3SJoao Pinto }
544054139cf3SJoao Pinto 
544154139cf3SJoao Pinto /**
5442732fdf0eSGiuseppe CAVALLARO  * stmmac_resume - resume callback
5443f4e7bd81SJoachim Eastwood  * @dev: device pointer
5444732fdf0eSGiuseppe CAVALLARO  * Description: when resume this function is invoked to setup the DMA and CORE
5445732fdf0eSGiuseppe CAVALLARO  * in a usable state.
5446732fdf0eSGiuseppe CAVALLARO  */
5447f4e7bd81SJoachim Eastwood int stmmac_resume(struct device *dev)
54487ac6653aSJeff Kirsher {
5449f4e7bd81SJoachim Eastwood 	struct net_device *ndev = dev_get_drvdata(dev);
54507ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(ndev);
5451b9663b7cSVoon Weifeng 	int ret;
54527ac6653aSJeff Kirsher 
54537ac6653aSJeff Kirsher 	if (!netif_running(ndev))
54547ac6653aSJeff Kirsher 		return 0;
54557ac6653aSJeff Kirsher 
54567ac6653aSJeff Kirsher 	/* Power Down bit, into the PM register, is cleared
54577ac6653aSJeff Kirsher 	 * automatically as soon as a magic packet or a Wake-up frame
54587ac6653aSJeff Kirsher 	 * is received. Anyway, it's better to manually clear
54597ac6653aSJeff Kirsher 	 * this bit because it can generate problems while resuming
5460ceb69499SGiuseppe CAVALLARO 	 * from another devices (e.g. serial console).
5461ceb69499SGiuseppe CAVALLARO 	 */
5462e8377e7aSJisheng Zhang 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
546329555fa3SThierry Reding 		mutex_lock(&priv->lock);
5464c10d4c82SJose Abreu 		stmmac_pmt(priv, priv->hw, 0);
546529555fa3SThierry Reding 		mutex_unlock(&priv->lock);
546689f7f2cfSSrinivas Kandagatla 		priv->irq_wake = 0;
5467623997fbSSrinivas Kandagatla 	} else {
5468db88f10aSSrinivas Kandagatla 		pinctrl_pm_select_default_state(priv->device);
54698d45e42bSLABBE Corentin 		/* enable the clk previously disabled */
54705ec55823SJoakim Zhang 		ret = pm_runtime_force_resume(dev);
54715ec55823SJoakim Zhang 		if (ret)
54725ec55823SJoakim Zhang 			return ret;
5473e497c20eSBiao Huang 		if (priv->plat->clk_ptp_ref)
5474e497c20eSBiao Huang 			clk_prepare_enable(priv->plat->clk_ptp_ref);
5475623997fbSSrinivas Kandagatla 		/* reset the phy so that it's ready */
5476623997fbSSrinivas Kandagatla 		if (priv->mii)
5477623997fbSSrinivas Kandagatla 			stmmac_mdio_reset(priv->mii);
5478623997fbSSrinivas Kandagatla 	}
54797ac6653aSJeff Kirsher 
5480b9663b7cSVoon Weifeng 	if (priv->plat->serdes_powerup) {
5481b9663b7cSVoon Weifeng 		ret = priv->plat->serdes_powerup(ndev,
5482b9663b7cSVoon Weifeng 						 priv->plat->bsp_priv);
5483b9663b7cSVoon Weifeng 
5484b9663b7cSVoon Weifeng 		if (ret < 0)
5485b9663b7cSVoon Weifeng 			return ret;
5486b9663b7cSVoon Weifeng 	}
5487b9663b7cSVoon Weifeng 
548836d18b56SFugang Duan 	if (!device_may_wakeup(priv->device) || !priv->plat->pmt) {
548936d18b56SFugang Duan 		rtnl_lock();
549036d18b56SFugang Duan 		phylink_start(priv->phylink);
549136d18b56SFugang Duan 		/* We may have called phylink_speed_down before */
549236d18b56SFugang Duan 		phylink_speed_up(priv->phylink);
549336d18b56SFugang Duan 		rtnl_unlock();
549436d18b56SFugang Duan 	}
549536d18b56SFugang Duan 
54968e5debedSWong Vee Khee 	rtnl_lock();
549729555fa3SThierry Reding 	mutex_lock(&priv->lock);
5498f55d84b0SVincent Palatin 
549954139cf3SJoao Pinto 	stmmac_reset_queues_param(priv);
55009c63faaaSJoakim Zhang 	stmmac_reinit_rx_buffers(priv);
55014ec236c7SFugang Duan 	stmmac_free_tx_skbufs(priv);
5502ae79a639SGiuseppe CAVALLARO 	stmmac_clear_descriptors(priv);
5503ae79a639SGiuseppe CAVALLARO 
5504fe131929SHuacai Chen 	stmmac_hw_setup(ndev, false);
5505d429b66eSJose Abreu 	stmmac_init_coalesce(priv);
5506ac316c78SGiuseppe CAVALLARO 	stmmac_set_rx_mode(ndev);
55077ac6653aSJeff Kirsher 
5508ed64639bSWong Vee Khee 	stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
5509ed64639bSWong Vee Khee 
5510c22a3f48SJoao Pinto 	stmmac_enable_all_queues(priv);
55117ac6653aSJeff Kirsher 
5512134cc4ceSThierry Reding 	mutex_unlock(&priv->lock);
55138e5debedSWong Vee Khee 	rtnl_unlock();
5514134cc4ceSThierry Reding 
55153e2bf04fSJose Abreu 	phylink_mac_change(priv->phylink, true);
5516102463b1SFrancesco Virlinzi 
551731096c3eSLeon Yu 	netif_device_attach(ndev);
551831096c3eSLeon Yu 
55197ac6653aSJeff Kirsher 	return 0;
55207ac6653aSJeff Kirsher }
5521b2e2f0c7SAndy Shevchenko EXPORT_SYMBOL_GPL(stmmac_resume);
5522ba27ec66SGiuseppe CAVALLARO 
55237ac6653aSJeff Kirsher #ifndef MODULE
55247ac6653aSJeff Kirsher static int __init stmmac_cmdline_opt(char *str)
55257ac6653aSJeff Kirsher {
55267ac6653aSJeff Kirsher 	char *opt;
55277ac6653aSJeff Kirsher 
55287ac6653aSJeff Kirsher 	if (!str || !*str)
55297ac6653aSJeff Kirsher 		return -EINVAL;
55307ac6653aSJeff Kirsher 	while ((opt = strsep(&str, ",")) != NULL) {
55317ac6653aSJeff Kirsher 		if (!strncmp(opt, "debug:", 6)) {
5532ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 6, 0, &debug))
55337ac6653aSJeff Kirsher 				goto err;
55347ac6653aSJeff Kirsher 		} else if (!strncmp(opt, "phyaddr:", 8)) {
5535ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 8, 0, &phyaddr))
55367ac6653aSJeff Kirsher 				goto err;
55377ac6653aSJeff Kirsher 		} else if (!strncmp(opt, "buf_sz:", 7)) {
5538ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 7, 0, &buf_sz))
55397ac6653aSJeff Kirsher 				goto err;
55407ac6653aSJeff Kirsher 		} else if (!strncmp(opt, "tc:", 3)) {
5541ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 3, 0, &tc))
55427ac6653aSJeff Kirsher 				goto err;
55437ac6653aSJeff Kirsher 		} else if (!strncmp(opt, "watchdog:", 9)) {
5544ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 9, 0, &watchdog))
55457ac6653aSJeff Kirsher 				goto err;
55467ac6653aSJeff Kirsher 		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
5547ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 10, 0, &flow_ctrl))
55487ac6653aSJeff Kirsher 				goto err;
55497ac6653aSJeff Kirsher 		} else if (!strncmp(opt, "pause:", 6)) {
5550ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 6, 0, &pause))
55517ac6653aSJeff Kirsher 				goto err;
5552506f669cSGiuseppe CAVALLARO 		} else if (!strncmp(opt, "eee_timer:", 10)) {
5553d765955dSGiuseppe CAVALLARO 			if (kstrtoint(opt + 10, 0, &eee_timer))
5554d765955dSGiuseppe CAVALLARO 				goto err;
55554a7d666aSGiuseppe CAVALLARO 		} else if (!strncmp(opt, "chain_mode:", 11)) {
55564a7d666aSGiuseppe CAVALLARO 			if (kstrtoint(opt + 11, 0, &chain_mode))
55574a7d666aSGiuseppe CAVALLARO 				goto err;
55587ac6653aSJeff Kirsher 		}
55597ac6653aSJeff Kirsher 	}
55607ac6653aSJeff Kirsher 	return 0;
55617ac6653aSJeff Kirsher 
55627ac6653aSJeff Kirsher err:
55637ac6653aSJeff Kirsher 	pr_err("%s: ERROR broken module parameter conversion", __func__);
55647ac6653aSJeff Kirsher 	return -EINVAL;
55657ac6653aSJeff Kirsher }
55667ac6653aSJeff Kirsher 
55677ac6653aSJeff Kirsher __setup("stmmaceth=", stmmac_cmdline_opt);
5568ceb69499SGiuseppe CAVALLARO #endif /* MODULE */
55696fc0d0f2SGiuseppe Cavallaro 
5570466c5ac8SMathieu Olivari static int __init stmmac_init(void)
5571466c5ac8SMathieu Olivari {
5572466c5ac8SMathieu Olivari #ifdef CONFIG_DEBUG_FS
5573466c5ac8SMathieu Olivari 	/* Create debugfs main directory if it doesn't exist yet */
55748d72ab11SGreg Kroah-Hartman 	if (!stmmac_fs_dir)
5575466c5ac8SMathieu Olivari 		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
5576474a31e1SAaro Koskinen 	register_netdevice_notifier(&stmmac_notifier);
5577466c5ac8SMathieu Olivari #endif
5578466c5ac8SMathieu Olivari 
5579466c5ac8SMathieu Olivari 	return 0;
5580466c5ac8SMathieu Olivari }
5581466c5ac8SMathieu Olivari 
5582466c5ac8SMathieu Olivari static void __exit stmmac_exit(void)
5583466c5ac8SMathieu Olivari {
5584466c5ac8SMathieu Olivari #ifdef CONFIG_DEBUG_FS
5585474a31e1SAaro Koskinen 	unregister_netdevice_notifier(&stmmac_notifier);
5586466c5ac8SMathieu Olivari 	debugfs_remove_recursive(stmmac_fs_dir);
5587466c5ac8SMathieu Olivari #endif
5588466c5ac8SMathieu Olivari }
5589466c5ac8SMathieu Olivari 
5590466c5ac8SMathieu Olivari module_init(stmmac_init)
5591466c5ac8SMathieu Olivari module_exit(stmmac_exit)
5592466c5ac8SMathieu Olivari 
55936fc0d0f2SGiuseppe Cavallaro MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
55946fc0d0f2SGiuseppe Cavallaro MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
55956fc0d0f2SGiuseppe Cavallaro MODULE_LICENSE("GPL");
5596