17ac6653aSJeff Kirsher /*******************************************************************************
27ac6653aSJeff Kirsher   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
37ac6653aSJeff Kirsher   ST Ethernet IPs are built around a Synopsys IP Core.
47ac6653aSJeff Kirsher 
5286a8372SGiuseppe CAVALLARO 	Copyright(C) 2007-2011 STMicroelectronics Ltd
67ac6653aSJeff Kirsher 
77ac6653aSJeff Kirsher   This program is free software; you can redistribute it and/or modify it
87ac6653aSJeff Kirsher   under the terms and conditions of the GNU General Public License,
97ac6653aSJeff Kirsher   version 2, as published by the Free Software Foundation.
107ac6653aSJeff Kirsher 
117ac6653aSJeff Kirsher   This program is distributed in the hope it will be useful, but WITHOUT
127ac6653aSJeff Kirsher   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
137ac6653aSJeff Kirsher   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
147ac6653aSJeff Kirsher   more details.
157ac6653aSJeff Kirsher 
167ac6653aSJeff Kirsher   The full GNU General Public License is included in this distribution in
177ac6653aSJeff Kirsher   the file called "COPYING".
187ac6653aSJeff Kirsher 
197ac6653aSJeff Kirsher   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
207ac6653aSJeff Kirsher 
217ac6653aSJeff Kirsher   Documentation available at:
227ac6653aSJeff Kirsher 	http://www.stlinux.com
237ac6653aSJeff Kirsher   Support available at:
247ac6653aSJeff Kirsher 	https://bugzilla.stlinux.com/
257ac6653aSJeff Kirsher *******************************************************************************/
267ac6653aSJeff Kirsher 
276a81c26fSViresh Kumar #include <linux/clk.h>
287ac6653aSJeff Kirsher #include <linux/kernel.h>
297ac6653aSJeff Kirsher #include <linux/interrupt.h>
307ac6653aSJeff Kirsher #include <linux/ip.h>
317ac6653aSJeff Kirsher #include <linux/tcp.h>
327ac6653aSJeff Kirsher #include <linux/skbuff.h>
337ac6653aSJeff Kirsher #include <linux/ethtool.h>
347ac6653aSJeff Kirsher #include <linux/if_ether.h>
357ac6653aSJeff Kirsher #include <linux/crc32.h>
367ac6653aSJeff Kirsher #include <linux/mii.h>
3701789349SJiri Pirko #include <linux/if.h>
387ac6653aSJeff Kirsher #include <linux/if_vlan.h>
397ac6653aSJeff Kirsher #include <linux/dma-mapping.h>
407ac6653aSJeff Kirsher #include <linux/slab.h>
417ac6653aSJeff Kirsher #include <linux/prefetch.h>
42db88f10aSSrinivas Kandagatla #include <linux/pinctrl/consumer.h>
4350fb4f74SGiuseppe CAVALLARO #ifdef CONFIG_DEBUG_FS
447ac29055SGiuseppe CAVALLARO #include <linux/debugfs.h>
457ac29055SGiuseppe CAVALLARO #include <linux/seq_file.h>
4650fb4f74SGiuseppe CAVALLARO #endif /* CONFIG_DEBUG_FS */
47891434b1SRayagond Kokatanur #include <linux/net_tstamp.h>
484dbbe8ddSJose Abreu #include <net/pkt_cls.h>
49891434b1SRayagond Kokatanur #include "stmmac_ptp.h"
50286a8372SGiuseppe CAVALLARO #include "stmmac.h"
51c5e4ddbdSChen-Yu Tsai #include <linux/reset.h>
525790cf3cSMathieu Olivari #include <linux/of_mdio.h>
5319d857c9SPhil Reid #include "dwmac1000.h"
547d9e6c5aSJose Abreu #include "dwxgmac2.h"
5542de047dSJose Abreu #include "hwif.h"
567ac6653aSJeff Kirsher 
579939a46dSEugeniy Paltsev #define	STMMAC_ALIGN(x)		__ALIGN_KERNEL(x, SMP_CACHE_BYTES)
58f748be53SAlexandre TORGUE #define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
597ac6653aSJeff Kirsher 
607ac6653aSJeff Kirsher /* Module parameters */
6132ceabcaSGiuseppe CAVALLARO #define TX_TIMEO	5000
627ac6653aSJeff Kirsher static int watchdog = TX_TIMEO;
63d3757ba4SJoe Perches module_param(watchdog, int, 0644);
6432ceabcaSGiuseppe CAVALLARO MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
657ac6653aSJeff Kirsher 
6632ceabcaSGiuseppe CAVALLARO static int debug = -1;
67d3757ba4SJoe Perches module_param(debug, int, 0644);
6832ceabcaSGiuseppe CAVALLARO MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
697ac6653aSJeff Kirsher 
7047d1f71fSstephen hemminger static int phyaddr = -1;
71d3757ba4SJoe Perches module_param(phyaddr, int, 0444);
727ac6653aSJeff Kirsher MODULE_PARM_DESC(phyaddr, "Physical device address");
737ac6653aSJeff Kirsher 
74e3ad57c9SGiuseppe Cavallaro #define STMMAC_TX_THRESH	(DMA_TX_SIZE / 4)
75120e87f9SGiuseppe Cavallaro #define STMMAC_RX_THRESH	(DMA_RX_SIZE / 4)
767ac6653aSJeff Kirsher 
77e9989339SJose Abreu static int flow_ctrl = FLOW_AUTO;
78d3757ba4SJoe Perches module_param(flow_ctrl, int, 0644);
797ac6653aSJeff Kirsher MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
807ac6653aSJeff Kirsher 
817ac6653aSJeff Kirsher static int pause = PAUSE_TIME;
82d3757ba4SJoe Perches module_param(pause, int, 0644);
837ac6653aSJeff Kirsher MODULE_PARM_DESC(pause, "Flow Control Pause Time");
847ac6653aSJeff Kirsher 
857ac6653aSJeff Kirsher #define TC_DEFAULT 64
867ac6653aSJeff Kirsher static int tc = TC_DEFAULT;
87d3757ba4SJoe Perches module_param(tc, int, 0644);
887ac6653aSJeff Kirsher MODULE_PARM_DESC(tc, "DMA threshold control value");
897ac6653aSJeff Kirsher 
90d916701cSGiuseppe CAVALLARO #define	DEFAULT_BUFSIZE	1536
91d916701cSGiuseppe CAVALLARO static int buf_sz = DEFAULT_BUFSIZE;
92d3757ba4SJoe Perches module_param(buf_sz, int, 0644);
937ac6653aSJeff Kirsher MODULE_PARM_DESC(buf_sz, "DMA buffer size");
947ac6653aSJeff Kirsher 
9522ad3838SGiuseppe Cavallaro #define	STMMAC_RX_COPYBREAK	256
9622ad3838SGiuseppe Cavallaro 
977ac6653aSJeff Kirsher static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
987ac6653aSJeff Kirsher 				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
997ac6653aSJeff Kirsher 				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
1007ac6653aSJeff Kirsher 
101d765955dSGiuseppe CAVALLARO #define STMMAC_DEFAULT_LPI_TIMER	1000
102d765955dSGiuseppe CAVALLARO static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
103d3757ba4SJoe Perches module_param(eee_timer, int, 0644);
104d765955dSGiuseppe CAVALLARO MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
105f5351ef7SGiuseppe CAVALLARO #define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
106d765955dSGiuseppe CAVALLARO 
10722d3efe5SPavel Machek /* By default the driver will use the ring mode to manage tx and rx descriptors,
10822d3efe5SPavel Machek  * but allow user to force to use the chain instead of the ring
1094a7d666aSGiuseppe CAVALLARO  */
1104a7d666aSGiuseppe CAVALLARO static unsigned int chain_mode;
111d3757ba4SJoe Perches module_param(chain_mode, int, 0444);
1124a7d666aSGiuseppe CAVALLARO MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
1134a7d666aSGiuseppe CAVALLARO 
1147ac6653aSJeff Kirsher static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
1157ac6653aSJeff Kirsher 
11650fb4f74SGiuseppe CAVALLARO #ifdef CONFIG_DEBUG_FS
117bfab27a1SGiuseppe CAVALLARO static int stmmac_init_fs(struct net_device *dev);
118466c5ac8SMathieu Olivari static void stmmac_exit_fs(struct net_device *dev);
119bfab27a1SGiuseppe CAVALLARO #endif
120bfab27a1SGiuseppe CAVALLARO 
1219125cdd1SGiuseppe CAVALLARO #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
1229125cdd1SGiuseppe CAVALLARO 
1237ac6653aSJeff Kirsher /**
1247ac6653aSJeff Kirsher  * stmmac_verify_args - verify the driver parameters.
125732fdf0eSGiuseppe CAVALLARO  * Description: it checks the driver parameters and set a default in case of
126732fdf0eSGiuseppe CAVALLARO  * errors.
1277ac6653aSJeff Kirsher  */
1287ac6653aSJeff Kirsher static void stmmac_verify_args(void)
1297ac6653aSJeff Kirsher {
1307ac6653aSJeff Kirsher 	if (unlikely(watchdog < 0))
1317ac6653aSJeff Kirsher 		watchdog = TX_TIMEO;
132d916701cSGiuseppe CAVALLARO 	if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
133d916701cSGiuseppe CAVALLARO 		buf_sz = DEFAULT_BUFSIZE;
1347ac6653aSJeff Kirsher 	if (unlikely(flow_ctrl > 1))
1357ac6653aSJeff Kirsher 		flow_ctrl = FLOW_AUTO;
1367ac6653aSJeff Kirsher 	else if (likely(flow_ctrl < 0))
1377ac6653aSJeff Kirsher 		flow_ctrl = FLOW_OFF;
1387ac6653aSJeff Kirsher 	if (unlikely((pause < 0) || (pause > 0xffff)))
1397ac6653aSJeff Kirsher 		pause = PAUSE_TIME;
140d765955dSGiuseppe CAVALLARO 	if (eee_timer < 0)
141d765955dSGiuseppe CAVALLARO 		eee_timer = STMMAC_DEFAULT_LPI_TIMER;
1427ac6653aSJeff Kirsher }
1437ac6653aSJeff Kirsher 
14432ceabcaSGiuseppe CAVALLARO /**
145c22a3f48SJoao Pinto  * stmmac_disable_all_queues - Disable all queues
146c22a3f48SJoao Pinto  * @priv: driver private structure
147c22a3f48SJoao Pinto  */
148c22a3f48SJoao Pinto static void stmmac_disable_all_queues(struct stmmac_priv *priv)
149c22a3f48SJoao Pinto {
150c22a3f48SJoao Pinto 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
1518fce3331SJose Abreu 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
1528fce3331SJose Abreu 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
153c22a3f48SJoao Pinto 	u32 queue;
154c22a3f48SJoao Pinto 
1558fce3331SJose Abreu 	for (queue = 0; queue < maxq; queue++) {
1568fce3331SJose Abreu 		struct stmmac_channel *ch = &priv->channel[queue];
157c22a3f48SJoao Pinto 
1584ccb4585SJose Abreu 		if (queue < rx_queues_cnt)
1594ccb4585SJose Abreu 			napi_disable(&ch->rx_napi);
1604ccb4585SJose Abreu 		if (queue < tx_queues_cnt)
1614ccb4585SJose Abreu 			napi_disable(&ch->tx_napi);
162c22a3f48SJoao Pinto 	}
163c22a3f48SJoao Pinto }
164c22a3f48SJoao Pinto 
165c22a3f48SJoao Pinto /**
166c22a3f48SJoao Pinto  * stmmac_enable_all_queues - Enable all queues
167c22a3f48SJoao Pinto  * @priv: driver private structure
168c22a3f48SJoao Pinto  */
169c22a3f48SJoao Pinto static void stmmac_enable_all_queues(struct stmmac_priv *priv)
170c22a3f48SJoao Pinto {
171c22a3f48SJoao Pinto 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
1728fce3331SJose Abreu 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
1738fce3331SJose Abreu 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
174c22a3f48SJoao Pinto 	u32 queue;
175c22a3f48SJoao Pinto 
1768fce3331SJose Abreu 	for (queue = 0; queue < maxq; queue++) {
1778fce3331SJose Abreu 		struct stmmac_channel *ch = &priv->channel[queue];
178c22a3f48SJoao Pinto 
1794ccb4585SJose Abreu 		if (queue < rx_queues_cnt)
1804ccb4585SJose Abreu 			napi_enable(&ch->rx_napi);
1814ccb4585SJose Abreu 		if (queue < tx_queues_cnt)
1824ccb4585SJose Abreu 			napi_enable(&ch->tx_napi);
183c22a3f48SJoao Pinto 	}
184c22a3f48SJoao Pinto }
185c22a3f48SJoao Pinto 
186c22a3f48SJoao Pinto /**
187c22a3f48SJoao Pinto  * stmmac_stop_all_queues - Stop all queues
188c22a3f48SJoao Pinto  * @priv: driver private structure
189c22a3f48SJoao Pinto  */
190c22a3f48SJoao Pinto static void stmmac_stop_all_queues(struct stmmac_priv *priv)
191c22a3f48SJoao Pinto {
192c22a3f48SJoao Pinto 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
193c22a3f48SJoao Pinto 	u32 queue;
194c22a3f48SJoao Pinto 
195c22a3f48SJoao Pinto 	for (queue = 0; queue < tx_queues_cnt; queue++)
196c22a3f48SJoao Pinto 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
197c22a3f48SJoao Pinto }
198c22a3f48SJoao Pinto 
199c22a3f48SJoao Pinto /**
200c22a3f48SJoao Pinto  * stmmac_start_all_queues - Start all queues
201c22a3f48SJoao Pinto  * @priv: driver private structure
202c22a3f48SJoao Pinto  */
203c22a3f48SJoao Pinto static void stmmac_start_all_queues(struct stmmac_priv *priv)
204c22a3f48SJoao Pinto {
205c22a3f48SJoao Pinto 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
206c22a3f48SJoao Pinto 	u32 queue;
207c22a3f48SJoao Pinto 
208c22a3f48SJoao Pinto 	for (queue = 0; queue < tx_queues_cnt; queue++)
209c22a3f48SJoao Pinto 		netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue));
210c22a3f48SJoao Pinto }
211c22a3f48SJoao Pinto 
21234877a15SJose Abreu static void stmmac_service_event_schedule(struct stmmac_priv *priv)
21334877a15SJose Abreu {
21434877a15SJose Abreu 	if (!test_bit(STMMAC_DOWN, &priv->state) &&
21534877a15SJose Abreu 	    !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
21634877a15SJose Abreu 		queue_work(priv->wq, &priv->service_task);
21734877a15SJose Abreu }
21834877a15SJose Abreu 
21934877a15SJose Abreu static void stmmac_global_err(struct stmmac_priv *priv)
22034877a15SJose Abreu {
22134877a15SJose Abreu 	netif_carrier_off(priv->dev);
22234877a15SJose Abreu 	set_bit(STMMAC_RESET_REQUESTED, &priv->state);
22334877a15SJose Abreu 	stmmac_service_event_schedule(priv);
22434877a15SJose Abreu }
22534877a15SJose Abreu 
226c22a3f48SJoao Pinto /**
22732ceabcaSGiuseppe CAVALLARO  * stmmac_clk_csr_set - dynamically set the MDC clock
22832ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
22932ceabcaSGiuseppe CAVALLARO  * Description: this is to dynamically set the MDC clock according to the csr
23032ceabcaSGiuseppe CAVALLARO  * clock input.
23132ceabcaSGiuseppe CAVALLARO  * Note:
23232ceabcaSGiuseppe CAVALLARO  *	If a specific clk_csr value is passed from the platform
23332ceabcaSGiuseppe CAVALLARO  *	this means that the CSR Clock Range selection cannot be
23432ceabcaSGiuseppe CAVALLARO  *	changed at run-time and it is fixed (as reported in the driver
23532ceabcaSGiuseppe CAVALLARO  *	documentation). Viceversa the driver will try to set the MDC
23632ceabcaSGiuseppe CAVALLARO  *	clock dynamically according to the actual clock input.
23732ceabcaSGiuseppe CAVALLARO  */
238cd7201f4SGiuseppe CAVALLARO static void stmmac_clk_csr_set(struct stmmac_priv *priv)
239cd7201f4SGiuseppe CAVALLARO {
240cd7201f4SGiuseppe CAVALLARO 	u32 clk_rate;
241cd7201f4SGiuseppe CAVALLARO 
242f573c0b9Sjpinto 	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
243cd7201f4SGiuseppe CAVALLARO 
244cd7201f4SGiuseppe CAVALLARO 	/* Platform provided default clk_csr would be assumed valid
245ceb69499SGiuseppe CAVALLARO 	 * for all other cases except for the below mentioned ones.
246ceb69499SGiuseppe CAVALLARO 	 * For values higher than the IEEE 802.3 specified frequency
247ceb69499SGiuseppe CAVALLARO 	 * we can not estimate the proper divider as it is not known
248ceb69499SGiuseppe CAVALLARO 	 * the frequency of clk_csr_i. So we do not change the default
249ceb69499SGiuseppe CAVALLARO 	 * divider.
250ceb69499SGiuseppe CAVALLARO 	 */
251cd7201f4SGiuseppe CAVALLARO 	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
252cd7201f4SGiuseppe CAVALLARO 		if (clk_rate < CSR_F_35M)
253cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_20_35M;
254cd7201f4SGiuseppe CAVALLARO 		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
255cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_35_60M;
256cd7201f4SGiuseppe CAVALLARO 		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
257cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_60_100M;
258cd7201f4SGiuseppe CAVALLARO 		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
259cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_100_150M;
260cd7201f4SGiuseppe CAVALLARO 		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
261cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_150_250M;
26219d857c9SPhil Reid 		else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
263cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_250_300M;
264ceb69499SGiuseppe CAVALLARO 	}
2659f93ac8dSLABBE Corentin 
2669f93ac8dSLABBE Corentin 	if (priv->plat->has_sun8i) {
2679f93ac8dSLABBE Corentin 		if (clk_rate > 160000000)
2689f93ac8dSLABBE Corentin 			priv->clk_csr = 0x03;
2699f93ac8dSLABBE Corentin 		else if (clk_rate > 80000000)
2709f93ac8dSLABBE Corentin 			priv->clk_csr = 0x02;
2719f93ac8dSLABBE Corentin 		else if (clk_rate > 40000000)
2729f93ac8dSLABBE Corentin 			priv->clk_csr = 0x01;
2739f93ac8dSLABBE Corentin 		else
2749f93ac8dSLABBE Corentin 			priv->clk_csr = 0;
2759f93ac8dSLABBE Corentin 	}
2767d9e6c5aSJose Abreu 
2777d9e6c5aSJose Abreu 	if (priv->plat->has_xgmac) {
2787d9e6c5aSJose Abreu 		if (clk_rate > 400000000)
2797d9e6c5aSJose Abreu 			priv->clk_csr = 0x5;
2807d9e6c5aSJose Abreu 		else if (clk_rate > 350000000)
2817d9e6c5aSJose Abreu 			priv->clk_csr = 0x4;
2827d9e6c5aSJose Abreu 		else if (clk_rate > 300000000)
2837d9e6c5aSJose Abreu 			priv->clk_csr = 0x3;
2847d9e6c5aSJose Abreu 		else if (clk_rate > 250000000)
2857d9e6c5aSJose Abreu 			priv->clk_csr = 0x2;
2867d9e6c5aSJose Abreu 		else if (clk_rate > 150000000)
2877d9e6c5aSJose Abreu 			priv->clk_csr = 0x1;
2887d9e6c5aSJose Abreu 		else
2897d9e6c5aSJose Abreu 			priv->clk_csr = 0x0;
2907d9e6c5aSJose Abreu 	}
291cd7201f4SGiuseppe CAVALLARO }
292cd7201f4SGiuseppe CAVALLARO 
2937ac6653aSJeff Kirsher static void print_pkt(unsigned char *buf, int len)
2947ac6653aSJeff Kirsher {
295424c4f78SAndy Shevchenko 	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
296424c4f78SAndy Shevchenko 	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
2977ac6653aSJeff Kirsher }
2987ac6653aSJeff Kirsher 
299ce736788SJoao Pinto static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
3007ac6653aSJeff Kirsher {
301ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
302a6a3e026SLABBE Corentin 	u32 avail;
303e3ad57c9SGiuseppe Cavallaro 
304ce736788SJoao Pinto 	if (tx_q->dirty_tx > tx_q->cur_tx)
305ce736788SJoao Pinto 		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
306e3ad57c9SGiuseppe Cavallaro 	else
307ce736788SJoao Pinto 		avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1;
308e3ad57c9SGiuseppe Cavallaro 
309e3ad57c9SGiuseppe Cavallaro 	return avail;
310e3ad57c9SGiuseppe Cavallaro }
311e3ad57c9SGiuseppe Cavallaro 
31254139cf3SJoao Pinto /**
31354139cf3SJoao Pinto  * stmmac_rx_dirty - Get RX queue dirty
31454139cf3SJoao Pinto  * @priv: driver private structure
31554139cf3SJoao Pinto  * @queue: RX queue index
31654139cf3SJoao Pinto  */
31754139cf3SJoao Pinto static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
318e3ad57c9SGiuseppe Cavallaro {
31954139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
320a6a3e026SLABBE Corentin 	u32 dirty;
321e3ad57c9SGiuseppe Cavallaro 
32254139cf3SJoao Pinto 	if (rx_q->dirty_rx <= rx_q->cur_rx)
32354139cf3SJoao Pinto 		dirty = rx_q->cur_rx - rx_q->dirty_rx;
324e3ad57c9SGiuseppe Cavallaro 	else
32554139cf3SJoao Pinto 		dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx;
326e3ad57c9SGiuseppe Cavallaro 
327e3ad57c9SGiuseppe Cavallaro 	return dirty;
3287ac6653aSJeff Kirsher }
3297ac6653aSJeff Kirsher 
33032ceabcaSGiuseppe CAVALLARO /**
331732fdf0eSGiuseppe CAVALLARO  * stmmac_hw_fix_mac_speed - callback for speed selection
33232ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
3338d45e42bSLABBE Corentin  * Description: on some platforms (e.g. ST), some HW system configuration
33432ceabcaSGiuseppe CAVALLARO  * registers have to be set according to the link speed negotiated.
3357ac6653aSJeff Kirsher  */
3367ac6653aSJeff Kirsher static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
3377ac6653aSJeff Kirsher {
338d6d50c7eSPhilippe Reynes 	struct net_device *ndev = priv->dev;
339d6d50c7eSPhilippe Reynes 	struct phy_device *phydev = ndev->phydev;
3407ac6653aSJeff Kirsher 
3417ac6653aSJeff Kirsher 	if (likely(priv->plat->fix_mac_speed))
342ceb69499SGiuseppe CAVALLARO 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed);
3437ac6653aSJeff Kirsher }
3447ac6653aSJeff Kirsher 
34532ceabcaSGiuseppe CAVALLARO /**
346732fdf0eSGiuseppe CAVALLARO  * stmmac_enable_eee_mode - check and enter in LPI mode
34732ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
348732fdf0eSGiuseppe CAVALLARO  * Description: this function is to verify and enter in LPI mode in case of
349732fdf0eSGiuseppe CAVALLARO  * EEE.
35032ceabcaSGiuseppe CAVALLARO  */
351d765955dSGiuseppe CAVALLARO static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
352d765955dSGiuseppe CAVALLARO {
353ce736788SJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
354ce736788SJoao Pinto 	u32 queue;
355ce736788SJoao Pinto 
356ce736788SJoao Pinto 	/* check if all TX queues have the work finished */
357ce736788SJoao Pinto 	for (queue = 0; queue < tx_cnt; queue++) {
358ce736788SJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
359ce736788SJoao Pinto 
360ce736788SJoao Pinto 		if (tx_q->dirty_tx != tx_q->cur_tx)
361ce736788SJoao Pinto 			return; /* still unfinished work */
362ce736788SJoao Pinto 	}
363ce736788SJoao Pinto 
364d765955dSGiuseppe CAVALLARO 	/* Check and enter in LPI mode */
365ce736788SJoao Pinto 	if (!priv->tx_path_in_lpi_mode)
366c10d4c82SJose Abreu 		stmmac_set_eee_mode(priv, priv->hw,
367b4b7b772Sjpinto 				priv->plat->en_tx_lpi_clockgating);
368d765955dSGiuseppe CAVALLARO }
369d765955dSGiuseppe CAVALLARO 
37032ceabcaSGiuseppe CAVALLARO /**
371732fdf0eSGiuseppe CAVALLARO  * stmmac_disable_eee_mode - disable and exit from LPI mode
37232ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
37332ceabcaSGiuseppe CAVALLARO  * Description: this function is to exit and disable EEE in case of
37432ceabcaSGiuseppe CAVALLARO  * LPI state is true. This is called by the xmit.
37532ceabcaSGiuseppe CAVALLARO  */
376d765955dSGiuseppe CAVALLARO void stmmac_disable_eee_mode(struct stmmac_priv *priv)
377d765955dSGiuseppe CAVALLARO {
378c10d4c82SJose Abreu 	stmmac_reset_eee_mode(priv, priv->hw);
379d765955dSGiuseppe CAVALLARO 	del_timer_sync(&priv->eee_ctrl_timer);
380d765955dSGiuseppe CAVALLARO 	priv->tx_path_in_lpi_mode = false;
381d765955dSGiuseppe CAVALLARO }
382d765955dSGiuseppe CAVALLARO 
383d765955dSGiuseppe CAVALLARO /**
384732fdf0eSGiuseppe CAVALLARO  * stmmac_eee_ctrl_timer - EEE TX SW timer.
385d765955dSGiuseppe CAVALLARO  * @arg : data hook
386d765955dSGiuseppe CAVALLARO  * Description:
38732ceabcaSGiuseppe CAVALLARO  *  if there is no data transfer and if we are not in LPI state,
388d765955dSGiuseppe CAVALLARO  *  then MAC Transmitter can be moved to LPI state.
389d765955dSGiuseppe CAVALLARO  */
390e99e88a9SKees Cook static void stmmac_eee_ctrl_timer(struct timer_list *t)
391d765955dSGiuseppe CAVALLARO {
392e99e88a9SKees Cook 	struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
393d765955dSGiuseppe CAVALLARO 
394d765955dSGiuseppe CAVALLARO 	stmmac_enable_eee_mode(priv);
395f5351ef7SGiuseppe CAVALLARO 	mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
396d765955dSGiuseppe CAVALLARO }
397d765955dSGiuseppe CAVALLARO 
398d765955dSGiuseppe CAVALLARO /**
399732fdf0eSGiuseppe CAVALLARO  * stmmac_eee_init - init EEE
40032ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
401d765955dSGiuseppe CAVALLARO  * Description:
402732fdf0eSGiuseppe CAVALLARO  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
403732fdf0eSGiuseppe CAVALLARO  *  can also manage EEE, this function enable the LPI state and start related
404732fdf0eSGiuseppe CAVALLARO  *  timer.
405d765955dSGiuseppe CAVALLARO  */
406d765955dSGiuseppe CAVALLARO bool stmmac_eee_init(struct stmmac_priv *priv)
407d765955dSGiuseppe CAVALLARO {
408d6d50c7eSPhilippe Reynes 	struct net_device *ndev = priv->dev;
409879626e3SJerome Brunet 	int interface = priv->plat->interface;
410d765955dSGiuseppe CAVALLARO 	bool ret = false;
411d765955dSGiuseppe CAVALLARO 
412879626e3SJerome Brunet 	if ((interface != PHY_INTERFACE_MODE_MII) &&
413879626e3SJerome Brunet 	    (interface != PHY_INTERFACE_MODE_GMII) &&
414879626e3SJerome Brunet 	    !phy_interface_mode_is_rgmii(interface))
415879626e3SJerome Brunet 		goto out;
416879626e3SJerome Brunet 
417f5351ef7SGiuseppe CAVALLARO 	/* Using PCS we cannot dial with the phy registers at this stage
418f5351ef7SGiuseppe CAVALLARO 	 * so we do not support extra feature like EEE.
419f5351ef7SGiuseppe CAVALLARO 	 */
4203fe5cadbSGiuseppe CAVALLARO 	if ((priv->hw->pcs == STMMAC_PCS_RGMII) ||
4213fe5cadbSGiuseppe CAVALLARO 	    (priv->hw->pcs == STMMAC_PCS_TBI) ||
4223fe5cadbSGiuseppe CAVALLARO 	    (priv->hw->pcs == STMMAC_PCS_RTBI))
423f5351ef7SGiuseppe CAVALLARO 		goto out;
424f5351ef7SGiuseppe CAVALLARO 
425d765955dSGiuseppe CAVALLARO 	/* MAC core supports the EEE feature. */
426d765955dSGiuseppe CAVALLARO 	if (priv->dma_cap.eee) {
42783bf79b6SGiuseppe CAVALLARO 		int tx_lpi_timer = priv->tx_lpi_timer;
428d765955dSGiuseppe CAVALLARO 
42983bf79b6SGiuseppe CAVALLARO 		/* Check if the PHY supports EEE */
430d6d50c7eSPhilippe Reynes 		if (phy_init_eee(ndev->phydev, 1)) {
43183bf79b6SGiuseppe CAVALLARO 			/* To manage at run-time if the EEE cannot be supported
43283bf79b6SGiuseppe CAVALLARO 			 * anymore (for example because the lp caps have been
43383bf79b6SGiuseppe CAVALLARO 			 * changed).
43483bf79b6SGiuseppe CAVALLARO 			 * In that case the driver disable own timers.
43583bf79b6SGiuseppe CAVALLARO 			 */
43629555fa3SThierry Reding 			mutex_lock(&priv->lock);
43783bf79b6SGiuseppe CAVALLARO 			if (priv->eee_active) {
43838ddc59dSLABBE Corentin 				netdev_dbg(priv->dev, "disable EEE\n");
43983bf79b6SGiuseppe CAVALLARO 				del_timer_sync(&priv->eee_ctrl_timer);
440c10d4c82SJose Abreu 				stmmac_set_eee_timer(priv, priv->hw, 0,
44183bf79b6SGiuseppe CAVALLARO 						tx_lpi_timer);
44283bf79b6SGiuseppe CAVALLARO 			}
44383bf79b6SGiuseppe CAVALLARO 			priv->eee_active = 0;
44429555fa3SThierry Reding 			mutex_unlock(&priv->lock);
44583bf79b6SGiuseppe CAVALLARO 			goto out;
44683bf79b6SGiuseppe CAVALLARO 		}
44783bf79b6SGiuseppe CAVALLARO 		/* Activate the EEE and start timers */
44829555fa3SThierry Reding 		mutex_lock(&priv->lock);
449f5351ef7SGiuseppe CAVALLARO 		if (!priv->eee_active) {
450d765955dSGiuseppe CAVALLARO 			priv->eee_active = 1;
451e99e88a9SKees Cook 			timer_setup(&priv->eee_ctrl_timer,
452e99e88a9SKees Cook 				    stmmac_eee_ctrl_timer, 0);
453ccb36da1SVaishali Thakkar 			mod_timer(&priv->eee_ctrl_timer,
454ccb36da1SVaishali Thakkar 				  STMMAC_LPI_T(eee_timer));
455d765955dSGiuseppe CAVALLARO 
456c10d4c82SJose Abreu 			stmmac_set_eee_timer(priv, priv->hw,
457c10d4c82SJose Abreu 					STMMAC_DEFAULT_LIT_LS, tx_lpi_timer);
45871965352SGiuseppe CAVALLARO 		}
459f5351ef7SGiuseppe CAVALLARO 		/* Set HW EEE according to the speed */
460c10d4c82SJose Abreu 		stmmac_set_eee_pls(priv, priv->hw, ndev->phydev->link);
461d765955dSGiuseppe CAVALLARO 
462d765955dSGiuseppe CAVALLARO 		ret = true;
46329555fa3SThierry Reding 		mutex_unlock(&priv->lock);
4644741cf9cSGiuseppe CAVALLARO 
46538ddc59dSLABBE Corentin 		netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
466d765955dSGiuseppe CAVALLARO 	}
467d765955dSGiuseppe CAVALLARO out:
468d765955dSGiuseppe CAVALLARO 	return ret;
469d765955dSGiuseppe CAVALLARO }
470d765955dSGiuseppe CAVALLARO 
471732fdf0eSGiuseppe CAVALLARO /* stmmac_get_tx_hwtstamp - get HW TX timestamps
47232ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
473ba1ffd74SGiuseppe CAVALLARO  * @p : descriptor pointer
474891434b1SRayagond Kokatanur  * @skb : the socket buffer
475891434b1SRayagond Kokatanur  * Description :
476891434b1SRayagond Kokatanur  * This function will read timestamp from the descriptor & pass it to stack.
477891434b1SRayagond Kokatanur  * and also perform some sanity checks.
478891434b1SRayagond Kokatanur  */
479891434b1SRayagond Kokatanur static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
480ba1ffd74SGiuseppe CAVALLARO 				   struct dma_desc *p, struct sk_buff *skb)
481891434b1SRayagond Kokatanur {
482891434b1SRayagond Kokatanur 	struct skb_shared_hwtstamps shhwtstamp;
483df103170SNathan Chancellor 	u64 ns = 0;
484891434b1SRayagond Kokatanur 
485891434b1SRayagond Kokatanur 	if (!priv->hwts_tx_en)
486891434b1SRayagond Kokatanur 		return;
487891434b1SRayagond Kokatanur 
488ceb69499SGiuseppe CAVALLARO 	/* exit if skb doesn't support hw tstamp */
48975e4364fSdamuzi000 	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
490891434b1SRayagond Kokatanur 		return;
491891434b1SRayagond Kokatanur 
492891434b1SRayagond Kokatanur 	/* check tx tstamp status */
49342de047dSJose Abreu 	if (stmmac_get_tx_timestamp_status(priv, p)) {
494891434b1SRayagond Kokatanur 		/* get the valid tstamp */
49542de047dSJose Abreu 		stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
496891434b1SRayagond Kokatanur 
497891434b1SRayagond Kokatanur 		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
498891434b1SRayagond Kokatanur 		shhwtstamp.hwtstamp = ns_to_ktime(ns);
499ba1ffd74SGiuseppe CAVALLARO 
50033d4c482SMario Molitor 		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
501891434b1SRayagond Kokatanur 		/* pass tstamp to stack */
502891434b1SRayagond Kokatanur 		skb_tstamp_tx(skb, &shhwtstamp);
503ba1ffd74SGiuseppe CAVALLARO 	}
504891434b1SRayagond Kokatanur 
505891434b1SRayagond Kokatanur 	return;
506891434b1SRayagond Kokatanur }
507891434b1SRayagond Kokatanur 
508732fdf0eSGiuseppe CAVALLARO /* stmmac_get_rx_hwtstamp - get HW RX timestamps
50932ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
510ba1ffd74SGiuseppe CAVALLARO  * @p : descriptor pointer
511ba1ffd74SGiuseppe CAVALLARO  * @np : next descriptor pointer
512891434b1SRayagond Kokatanur  * @skb : the socket buffer
513891434b1SRayagond Kokatanur  * Description :
514891434b1SRayagond Kokatanur  * This function will read received packet's timestamp from the descriptor
515891434b1SRayagond Kokatanur  * and pass it to stack. It also perform some sanity checks.
516891434b1SRayagond Kokatanur  */
517ba1ffd74SGiuseppe CAVALLARO static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
518ba1ffd74SGiuseppe CAVALLARO 				   struct dma_desc *np, struct sk_buff *skb)
519891434b1SRayagond Kokatanur {
520891434b1SRayagond Kokatanur 	struct skb_shared_hwtstamps *shhwtstamp = NULL;
52198870943SJose Abreu 	struct dma_desc *desc = p;
522df103170SNathan Chancellor 	u64 ns = 0;
523891434b1SRayagond Kokatanur 
524891434b1SRayagond Kokatanur 	if (!priv->hwts_rx_en)
525891434b1SRayagond Kokatanur 		return;
526ba1ffd74SGiuseppe CAVALLARO 	/* For GMAC4, the valid timestamp is from CTX next desc. */
5277d9e6c5aSJose Abreu 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
52898870943SJose Abreu 		desc = np;
529891434b1SRayagond Kokatanur 
53098870943SJose Abreu 	/* Check if timestamp is available */
53142de047dSJose Abreu 	if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
53242de047dSJose Abreu 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
53333d4c482SMario Molitor 		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
534891434b1SRayagond Kokatanur 		shhwtstamp = skb_hwtstamps(skb);
535891434b1SRayagond Kokatanur 		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
536891434b1SRayagond Kokatanur 		shhwtstamp->hwtstamp = ns_to_ktime(ns);
537ba1ffd74SGiuseppe CAVALLARO 	} else  {
53833d4c482SMario Molitor 		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
539ba1ffd74SGiuseppe CAVALLARO 	}
540891434b1SRayagond Kokatanur }
541891434b1SRayagond Kokatanur 
542891434b1SRayagond Kokatanur /**
543d6228b7cSArtem Panfilov  *  stmmac_hwtstamp_set - control hardware timestamping.
544891434b1SRayagond Kokatanur  *  @dev: device pointer.
5458d45e42bSLABBE Corentin  *  @ifr: An IOCTL specific structure, that can contain a pointer to
546891434b1SRayagond Kokatanur  *  a proprietary structure used to pass information to the driver.
547891434b1SRayagond Kokatanur  *  Description:
548891434b1SRayagond Kokatanur  *  This function configures the MAC to enable/disable both outgoing(TX)
549891434b1SRayagond Kokatanur  *  and incoming(RX) packets time stamping based on user input.
550891434b1SRayagond Kokatanur  *  Return Value:
551891434b1SRayagond Kokatanur  *  0 on success and an appropriate -ve integer on failure.
552891434b1SRayagond Kokatanur  */
553d6228b7cSArtem Panfilov static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
554891434b1SRayagond Kokatanur {
555891434b1SRayagond Kokatanur 	struct stmmac_priv *priv = netdev_priv(dev);
556891434b1SRayagond Kokatanur 	struct hwtstamp_config config;
5570a624155SArnd Bergmann 	struct timespec64 now;
558891434b1SRayagond Kokatanur 	u64 temp = 0;
559891434b1SRayagond Kokatanur 	u32 ptp_v2 = 0;
560891434b1SRayagond Kokatanur 	u32 tstamp_all = 0;
561891434b1SRayagond Kokatanur 	u32 ptp_over_ipv4_udp = 0;
562891434b1SRayagond Kokatanur 	u32 ptp_over_ipv6_udp = 0;
563891434b1SRayagond Kokatanur 	u32 ptp_over_ethernet = 0;
564891434b1SRayagond Kokatanur 	u32 snap_type_sel = 0;
565891434b1SRayagond Kokatanur 	u32 ts_master_en = 0;
566891434b1SRayagond Kokatanur 	u32 ts_event_en = 0;
567df103170SNathan Chancellor 	u32 sec_inc = 0;
568891434b1SRayagond Kokatanur 	u32 value = 0;
5697d9e6c5aSJose Abreu 	bool xmac;
5707d9e6c5aSJose Abreu 
5717d9e6c5aSJose Abreu 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
572891434b1SRayagond Kokatanur 
573891434b1SRayagond Kokatanur 	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
574891434b1SRayagond Kokatanur 		netdev_alert(priv->dev, "No support for HW time stamping\n");
575891434b1SRayagond Kokatanur 		priv->hwts_tx_en = 0;
576891434b1SRayagond Kokatanur 		priv->hwts_rx_en = 0;
577891434b1SRayagond Kokatanur 
578891434b1SRayagond Kokatanur 		return -EOPNOTSUPP;
579891434b1SRayagond Kokatanur 	}
580891434b1SRayagond Kokatanur 
581891434b1SRayagond Kokatanur 	if (copy_from_user(&config, ifr->ifr_data,
582d6228b7cSArtem Panfilov 			   sizeof(config)))
583891434b1SRayagond Kokatanur 		return -EFAULT;
584891434b1SRayagond Kokatanur 
58538ddc59dSLABBE Corentin 	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
586891434b1SRayagond Kokatanur 		   __func__, config.flags, config.tx_type, config.rx_filter);
587891434b1SRayagond Kokatanur 
588891434b1SRayagond Kokatanur 	/* reserved for future extensions */
589891434b1SRayagond Kokatanur 	if (config.flags)
590891434b1SRayagond Kokatanur 		return -EINVAL;
591891434b1SRayagond Kokatanur 
5925f3da328SBen Hutchings 	if (config.tx_type != HWTSTAMP_TX_OFF &&
5935f3da328SBen Hutchings 	    config.tx_type != HWTSTAMP_TX_ON)
594891434b1SRayagond Kokatanur 		return -ERANGE;
595891434b1SRayagond Kokatanur 
596891434b1SRayagond Kokatanur 	if (priv->adv_ts) {
597891434b1SRayagond Kokatanur 		switch (config.rx_filter) {
598891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_NONE:
599ceb69499SGiuseppe CAVALLARO 			/* time stamp no incoming packet at all */
600891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_NONE;
601891434b1SRayagond Kokatanur 			break;
602891434b1SRayagond Kokatanur 
603891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
604ceb69499SGiuseppe CAVALLARO 			/* PTP v1, UDP, any kind of event packet */
605891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
6067d8e249fSIlias Apalodimas 			/* 'xmac' hardware can support Sync, Pdelay_Req and
6077d8e249fSIlias Apalodimas 			 * Pdelay_resp by setting bit14 and bits17/16 to 01
6087d8e249fSIlias Apalodimas 			 * This leaves Delay_Req timestamps out.
6097d8e249fSIlias Apalodimas 			 * Enable all events *and* general purpose message
6107d8e249fSIlias Apalodimas 			 * timestamping
6117d8e249fSIlias Apalodimas 			 */
612891434b1SRayagond Kokatanur 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
613891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
614891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
615891434b1SRayagond Kokatanur 			break;
616891434b1SRayagond Kokatanur 
617891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
618ceb69499SGiuseppe CAVALLARO 			/* PTP v1, UDP, Sync packet */
619891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
620891434b1SRayagond Kokatanur 			/* take time stamp for SYNC messages only */
621891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
622891434b1SRayagond Kokatanur 
623891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
624891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
625891434b1SRayagond Kokatanur 			break;
626891434b1SRayagond Kokatanur 
627891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
628ceb69499SGiuseppe CAVALLARO 			/* PTP v1, UDP, Delay_req packet */
629891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
630891434b1SRayagond Kokatanur 			/* take time stamp for Delay_Req messages only */
631891434b1SRayagond Kokatanur 			ts_master_en = PTP_TCR_TSMSTRENA;
632891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
633891434b1SRayagond Kokatanur 
634891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
635891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
636891434b1SRayagond Kokatanur 			break;
637891434b1SRayagond Kokatanur 
638891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
639ceb69499SGiuseppe CAVALLARO 			/* PTP v2, UDP, any kind of event packet */
640891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
641891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
642891434b1SRayagond Kokatanur 			/* take time stamp for all event messages */
643891434b1SRayagond Kokatanur 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
644891434b1SRayagond Kokatanur 
645891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
646891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
647891434b1SRayagond Kokatanur 			break;
648891434b1SRayagond Kokatanur 
649891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
650ceb69499SGiuseppe CAVALLARO 			/* PTP v2, UDP, Sync packet */
651891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
652891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
653891434b1SRayagond Kokatanur 			/* take time stamp for SYNC messages only */
654891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
655891434b1SRayagond Kokatanur 
656891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
657891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
658891434b1SRayagond Kokatanur 			break;
659891434b1SRayagond Kokatanur 
660891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
661ceb69499SGiuseppe CAVALLARO 			/* PTP v2, UDP, Delay_req packet */
662891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
663891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
664891434b1SRayagond Kokatanur 			/* take time stamp for Delay_Req messages only */
665891434b1SRayagond Kokatanur 			ts_master_en = PTP_TCR_TSMSTRENA;
666891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
667891434b1SRayagond Kokatanur 
668891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
669891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
670891434b1SRayagond Kokatanur 			break;
671891434b1SRayagond Kokatanur 
672891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_EVENT:
673ceb69499SGiuseppe CAVALLARO 			/* PTP v2/802.AS1 any layer, any kind of event packet */
674891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
675891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
676891434b1SRayagond Kokatanur 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
677891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
678891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
679891434b1SRayagond Kokatanur 			ptp_over_ethernet = PTP_TCR_TSIPENA;
680891434b1SRayagond Kokatanur 			break;
681891434b1SRayagond Kokatanur 
682891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_SYNC:
683ceb69499SGiuseppe CAVALLARO 			/* PTP v2/802.AS1, any layer, Sync packet */
684891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
685891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
686891434b1SRayagond Kokatanur 			/* take time stamp for SYNC messages only */
687891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
688891434b1SRayagond Kokatanur 
689891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
690891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
691891434b1SRayagond Kokatanur 			ptp_over_ethernet = PTP_TCR_TSIPENA;
692891434b1SRayagond Kokatanur 			break;
693891434b1SRayagond Kokatanur 
694891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
695ceb69499SGiuseppe CAVALLARO 			/* PTP v2/802.AS1, any layer, Delay_req packet */
696891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
697891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
698891434b1SRayagond Kokatanur 			/* take time stamp for Delay_Req messages only */
699891434b1SRayagond Kokatanur 			ts_master_en = PTP_TCR_TSMSTRENA;
700891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
701891434b1SRayagond Kokatanur 
702891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
703891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
704891434b1SRayagond Kokatanur 			ptp_over_ethernet = PTP_TCR_TSIPENA;
705891434b1SRayagond Kokatanur 			break;
706891434b1SRayagond Kokatanur 
707e3412575SMiroslav Lichvar 		case HWTSTAMP_FILTER_NTP_ALL:
708891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_ALL:
709ceb69499SGiuseppe CAVALLARO 			/* time stamp any incoming packet */
710891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_ALL;
711891434b1SRayagond Kokatanur 			tstamp_all = PTP_TCR_TSENALL;
712891434b1SRayagond Kokatanur 			break;
713891434b1SRayagond Kokatanur 
714891434b1SRayagond Kokatanur 		default:
715891434b1SRayagond Kokatanur 			return -ERANGE;
716891434b1SRayagond Kokatanur 		}
717891434b1SRayagond Kokatanur 	} else {
718891434b1SRayagond Kokatanur 		switch (config.rx_filter) {
719891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_NONE:
720891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_NONE;
721891434b1SRayagond Kokatanur 			break;
722891434b1SRayagond Kokatanur 		default:
723891434b1SRayagond Kokatanur 			/* PTP v1, UDP, any kind of event packet */
724891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
725891434b1SRayagond Kokatanur 			break;
726891434b1SRayagond Kokatanur 		}
727891434b1SRayagond Kokatanur 	}
728891434b1SRayagond Kokatanur 	priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
7295f3da328SBen Hutchings 	priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
730891434b1SRayagond Kokatanur 
731891434b1SRayagond Kokatanur 	if (!priv->hwts_tx_en && !priv->hwts_rx_en)
732cc4c9001SJose Abreu 		stmmac_config_hw_tstamping(priv, priv->ptpaddr, 0);
733891434b1SRayagond Kokatanur 	else {
734891434b1SRayagond Kokatanur 		value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
735891434b1SRayagond Kokatanur 			 tstamp_all | ptp_v2 | ptp_over_ethernet |
736891434b1SRayagond Kokatanur 			 ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
737891434b1SRayagond Kokatanur 			 ts_master_en | snap_type_sel);
738cc4c9001SJose Abreu 		stmmac_config_hw_tstamping(priv, priv->ptpaddr, value);
739891434b1SRayagond Kokatanur 
740891434b1SRayagond Kokatanur 		/* program Sub Second Increment reg */
741cc4c9001SJose Abreu 		stmmac_config_sub_second_increment(priv,
742f573c0b9Sjpinto 				priv->ptpaddr, priv->plat->clk_ptp_rate,
7437d9e6c5aSJose Abreu 				xmac, &sec_inc);
74419d857c9SPhil Reid 		temp = div_u64(1000000000ULL, sec_inc);
745891434b1SRayagond Kokatanur 
7469a8a02c9SJose Abreu 		/* Store sub second increment and flags for later use */
7479a8a02c9SJose Abreu 		priv->sub_second_inc = sec_inc;
7489a8a02c9SJose Abreu 		priv->systime_flags = value;
7499a8a02c9SJose Abreu 
750891434b1SRayagond Kokatanur 		/* calculate default added value:
751891434b1SRayagond Kokatanur 		 * formula is :
752891434b1SRayagond Kokatanur 		 * addend = (2^32)/freq_div_ratio;
75319d857c9SPhil Reid 		 * where, freq_div_ratio = 1e9ns/sec_inc
754891434b1SRayagond Kokatanur 		 */
75519d857c9SPhil Reid 		temp = (u64)(temp << 32);
756f573c0b9Sjpinto 		priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
757cc4c9001SJose Abreu 		stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
758891434b1SRayagond Kokatanur 
759891434b1SRayagond Kokatanur 		/* initialize system time */
7600a624155SArnd Bergmann 		ktime_get_real_ts64(&now);
7610a624155SArnd Bergmann 
7620a624155SArnd Bergmann 		/* lower 32 bits of tv_sec are safe until y2106 */
763cc4c9001SJose Abreu 		stmmac_init_systime(priv, priv->ptpaddr,
764cc4c9001SJose Abreu 				(u32)now.tv_sec, now.tv_nsec);
765891434b1SRayagond Kokatanur 	}
766891434b1SRayagond Kokatanur 
767d6228b7cSArtem Panfilov 	memcpy(&priv->tstamp_config, &config, sizeof(config));
768d6228b7cSArtem Panfilov 
769891434b1SRayagond Kokatanur 	return copy_to_user(ifr->ifr_data, &config,
770d6228b7cSArtem Panfilov 			    sizeof(config)) ? -EFAULT : 0;
771d6228b7cSArtem Panfilov }
772d6228b7cSArtem Panfilov 
773d6228b7cSArtem Panfilov /**
774d6228b7cSArtem Panfilov  *  stmmac_hwtstamp_get - read hardware timestamping.
775d6228b7cSArtem Panfilov  *  @dev: device pointer.
776d6228b7cSArtem Panfilov  *  @ifr: An IOCTL specific structure, that can contain a pointer to
777d6228b7cSArtem Panfilov  *  a proprietary structure used to pass information to the driver.
778d6228b7cSArtem Panfilov  *  Description:
779d6228b7cSArtem Panfilov  *  This function obtain the current hardware timestamping settings
780d6228b7cSArtem Panfilov     as requested.
781d6228b7cSArtem Panfilov  */
782d6228b7cSArtem Panfilov static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
783d6228b7cSArtem Panfilov {
784d6228b7cSArtem Panfilov 	struct stmmac_priv *priv = netdev_priv(dev);
785d6228b7cSArtem Panfilov 	struct hwtstamp_config *config = &priv->tstamp_config;
786d6228b7cSArtem Panfilov 
787d6228b7cSArtem Panfilov 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
788d6228b7cSArtem Panfilov 		return -EOPNOTSUPP;
789d6228b7cSArtem Panfilov 
790d6228b7cSArtem Panfilov 	return copy_to_user(ifr->ifr_data, config,
791d6228b7cSArtem Panfilov 			    sizeof(*config)) ? -EFAULT : 0;
792891434b1SRayagond Kokatanur }
793891434b1SRayagond Kokatanur 
79432ceabcaSGiuseppe CAVALLARO /**
795732fdf0eSGiuseppe CAVALLARO  * stmmac_init_ptp - init PTP
79632ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
797732fdf0eSGiuseppe CAVALLARO  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
79832ceabcaSGiuseppe CAVALLARO  * This is done by looking at the HW cap. register.
799732fdf0eSGiuseppe CAVALLARO  * This function also registers the ptp driver.
80032ceabcaSGiuseppe CAVALLARO  */
80192ba6888SRayagond Kokatanur static int stmmac_init_ptp(struct stmmac_priv *priv)
802891434b1SRayagond Kokatanur {
8037d9e6c5aSJose Abreu 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
8047d9e6c5aSJose Abreu 
80592ba6888SRayagond Kokatanur 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
80692ba6888SRayagond Kokatanur 		return -EOPNOTSUPP;
80792ba6888SRayagond Kokatanur 
808891434b1SRayagond Kokatanur 	priv->adv_ts = 0;
8097d9e6c5aSJose Abreu 	/* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
8107d9e6c5aSJose Abreu 	if (xmac && priv->dma_cap.atime_stamp)
811be9b3174SGiuseppe CAVALLARO 		priv->adv_ts = 1;
812be9b3174SGiuseppe CAVALLARO 	/* Dwmac 3.x core with extend_desc can support adv_ts */
813be9b3174SGiuseppe CAVALLARO 	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
814891434b1SRayagond Kokatanur 		priv->adv_ts = 1;
8157cd01399SVince Bridgers 
816be9b3174SGiuseppe CAVALLARO 	if (priv->dma_cap.time_stamp)
817be9b3174SGiuseppe CAVALLARO 		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
8187cd01399SVince Bridgers 
819be9b3174SGiuseppe CAVALLARO 	if (priv->adv_ts)
820be9b3174SGiuseppe CAVALLARO 		netdev_info(priv->dev,
821be9b3174SGiuseppe CAVALLARO 			    "IEEE 1588-2008 Advanced Timestamp supported\n");
822891434b1SRayagond Kokatanur 
823891434b1SRayagond Kokatanur 	priv->hwts_tx_en = 0;
824891434b1SRayagond Kokatanur 	priv->hwts_rx_en = 0;
82592ba6888SRayagond Kokatanur 
826c30a70d3SGiuseppe CAVALLARO 	stmmac_ptp_register(priv);
827c30a70d3SGiuseppe CAVALLARO 
828c30a70d3SGiuseppe CAVALLARO 	return 0;
82992ba6888SRayagond Kokatanur }
83092ba6888SRayagond Kokatanur 
83192ba6888SRayagond Kokatanur static void stmmac_release_ptp(struct stmmac_priv *priv)
83292ba6888SRayagond Kokatanur {
833f573c0b9Sjpinto 	if (priv->plat->clk_ptp_ref)
834f573c0b9Sjpinto 		clk_disable_unprepare(priv->plat->clk_ptp_ref);
83592ba6888SRayagond Kokatanur 	stmmac_ptp_unregister(priv);
836891434b1SRayagond Kokatanur }
837891434b1SRayagond Kokatanur 
8387ac6653aSJeff Kirsher /**
83929feff39SJoao Pinto  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
84029feff39SJoao Pinto  *  @priv: driver private structure
84129feff39SJoao Pinto  *  Description: It is used for configuring the flow control in all queues
84229feff39SJoao Pinto  */
84329feff39SJoao Pinto static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
84429feff39SJoao Pinto {
84529feff39SJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
84629feff39SJoao Pinto 
847c10d4c82SJose Abreu 	stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
84829feff39SJoao Pinto 			priv->pause, tx_cnt);
84929feff39SJoao Pinto }
85029feff39SJoao Pinto 
85129feff39SJoao Pinto /**
852732fdf0eSGiuseppe CAVALLARO  * stmmac_adjust_link - adjusts the link parameters
8537ac6653aSJeff Kirsher  * @dev: net device structure
854732fdf0eSGiuseppe CAVALLARO  * Description: this is the helper called by the physical abstraction layer
855732fdf0eSGiuseppe CAVALLARO  * drivers to communicate the phy link status. According the speed and duplex
856732fdf0eSGiuseppe CAVALLARO  * this driver can invoke registered glue-logic as well.
857732fdf0eSGiuseppe CAVALLARO  * It also invoke the eee initialization because it could happen when switch
858732fdf0eSGiuseppe CAVALLARO  * on different networks (that are eee capable).
8597ac6653aSJeff Kirsher  */
8607ac6653aSJeff Kirsher static void stmmac_adjust_link(struct net_device *dev)
8617ac6653aSJeff Kirsher {
8627ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
863d6d50c7eSPhilippe Reynes 	struct phy_device *phydev = dev->phydev;
86499a4cca2SLABBE Corentin 	bool new_state = false;
8657ac6653aSJeff Kirsher 
866662ec2b7SLABBE Corentin 	if (!phydev)
8677ac6653aSJeff Kirsher 		return;
8687ac6653aSJeff Kirsher 
86929555fa3SThierry Reding 	mutex_lock(&priv->lock);
870d765955dSGiuseppe CAVALLARO 
8717ac6653aSJeff Kirsher 	if (phydev->link) {
8727ac6653aSJeff Kirsher 		u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
8737ac6653aSJeff Kirsher 
8747ac6653aSJeff Kirsher 		/* Now we make sure that we can be in full duplex mode.
8757ac6653aSJeff Kirsher 		 * If not, we operate in half-duplex mode. */
8767ac6653aSJeff Kirsher 		if (phydev->duplex != priv->oldduplex) {
87799a4cca2SLABBE Corentin 			new_state = true;
87850cb16d4SLABBE Corentin 			if (!phydev->duplex)
8797ac6653aSJeff Kirsher 				ctrl &= ~priv->hw->link.duplex;
8807ac6653aSJeff Kirsher 			else
8817ac6653aSJeff Kirsher 				ctrl |= priv->hw->link.duplex;
8827ac6653aSJeff Kirsher 			priv->oldduplex = phydev->duplex;
8837ac6653aSJeff Kirsher 		}
8847ac6653aSJeff Kirsher 		/* Flow Control operation */
8857ac6653aSJeff Kirsher 		if (phydev->pause)
88629feff39SJoao Pinto 			stmmac_mac_flow_ctrl(priv, phydev->duplex);
8877ac6653aSJeff Kirsher 
8887ac6653aSJeff Kirsher 		if (phydev->speed != priv->speed) {
88999a4cca2SLABBE Corentin 			new_state = true;
890ca84dfb9SLABBE Corentin 			ctrl &= ~priv->hw->link.speed_mask;
8917ac6653aSJeff Kirsher 			switch (phydev->speed) {
892afbe17a3SLABBE Corentin 			case SPEED_1000:
893ca84dfb9SLABBE Corentin 				ctrl |= priv->hw->link.speed1000;
8947ac6653aSJeff Kirsher 				break;
895afbe17a3SLABBE Corentin 			case SPEED_100:
896ca84dfb9SLABBE Corentin 				ctrl |= priv->hw->link.speed100;
8979beae261SLABBE Corentin 				break;
898afbe17a3SLABBE Corentin 			case SPEED_10:
899ca84dfb9SLABBE Corentin 				ctrl |= priv->hw->link.speed10;
9007ac6653aSJeff Kirsher 				break;
9017ac6653aSJeff Kirsher 			default:
902b3e51069SLABBE Corentin 				netif_warn(priv, link, priv->dev,
903cba920afSLABBE Corentin 					   "broken speed: %d\n", phydev->speed);
904688495b1SLABBE Corentin 				phydev->speed = SPEED_UNKNOWN;
9057ac6653aSJeff Kirsher 				break;
9067ac6653aSJeff Kirsher 			}
9075db13556SLABBE Corentin 			if (phydev->speed != SPEED_UNKNOWN)
9085db13556SLABBE Corentin 				stmmac_hw_fix_mac_speed(priv);
9097ac6653aSJeff Kirsher 			priv->speed = phydev->speed;
9107ac6653aSJeff Kirsher 		}
9117ac6653aSJeff Kirsher 
9127ac6653aSJeff Kirsher 		writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
9137ac6653aSJeff Kirsher 
9147ac6653aSJeff Kirsher 		if (!priv->oldlink) {
91599a4cca2SLABBE Corentin 			new_state = true;
9164d869b03SLABBE Corentin 			priv->oldlink = true;
9177ac6653aSJeff Kirsher 		}
9187ac6653aSJeff Kirsher 	} else if (priv->oldlink) {
91999a4cca2SLABBE Corentin 		new_state = true;
9204d869b03SLABBE Corentin 		priv->oldlink = false;
921bd00632cSLABBE Corentin 		priv->speed = SPEED_UNKNOWN;
922bd00632cSLABBE Corentin 		priv->oldduplex = DUPLEX_UNKNOWN;
9237ac6653aSJeff Kirsher 	}
9247ac6653aSJeff Kirsher 
9257ac6653aSJeff Kirsher 	if (new_state && netif_msg_link(priv))
9267ac6653aSJeff Kirsher 		phy_print_status(phydev);
9277ac6653aSJeff Kirsher 
92829555fa3SThierry Reding 	mutex_unlock(&priv->lock);
9294741cf9cSGiuseppe CAVALLARO 
93052f95bbfSGiuseppe CAVALLARO 	if (phydev->is_pseudo_fixed_link)
93152f95bbfSGiuseppe CAVALLARO 		/* Stop PHY layer to call the hook to adjust the link in case
93252f95bbfSGiuseppe CAVALLARO 		 * of a switch is attached to the stmmac driver.
93352f95bbfSGiuseppe CAVALLARO 		 */
93452f95bbfSGiuseppe CAVALLARO 		phydev->irq = PHY_IGNORE_INTERRUPT;
93552f95bbfSGiuseppe CAVALLARO 	else
93652f95bbfSGiuseppe CAVALLARO 		/* At this stage, init the EEE if supported.
93752f95bbfSGiuseppe CAVALLARO 		 * Never called in case of fixed_link.
938f5351ef7SGiuseppe CAVALLARO 		 */
939f5351ef7SGiuseppe CAVALLARO 		priv->eee_enabled = stmmac_eee_init(priv);
9407ac6653aSJeff Kirsher }
9417ac6653aSJeff Kirsher 
94232ceabcaSGiuseppe CAVALLARO /**
943732fdf0eSGiuseppe CAVALLARO  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
94432ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
94532ceabcaSGiuseppe CAVALLARO  * Description: this is to verify if the HW supports the PCS.
94632ceabcaSGiuseppe CAVALLARO  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
94732ceabcaSGiuseppe CAVALLARO  * configured for the TBI, RTBI, or SGMII PHY interface.
94832ceabcaSGiuseppe CAVALLARO  */
949e58bb43fSGiuseppe CAVALLARO static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
950e58bb43fSGiuseppe CAVALLARO {
951e58bb43fSGiuseppe CAVALLARO 	int interface = priv->plat->interface;
952e58bb43fSGiuseppe CAVALLARO 
953e58bb43fSGiuseppe CAVALLARO 	if (priv->dma_cap.pcs) {
9540d909dcdSByungho An 		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
9550d909dcdSByungho An 		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
9560d909dcdSByungho An 		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
9570d909dcdSByungho An 		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
95838ddc59dSLABBE Corentin 			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
9593fe5cadbSGiuseppe CAVALLARO 			priv->hw->pcs = STMMAC_PCS_RGMII;
9600d909dcdSByungho An 		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
96138ddc59dSLABBE Corentin 			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
9623fe5cadbSGiuseppe CAVALLARO 			priv->hw->pcs = STMMAC_PCS_SGMII;
963e58bb43fSGiuseppe CAVALLARO 		}
964e58bb43fSGiuseppe CAVALLARO 	}
965e58bb43fSGiuseppe CAVALLARO }
966e58bb43fSGiuseppe CAVALLARO 
9677ac6653aSJeff Kirsher /**
9687ac6653aSJeff Kirsher  * stmmac_init_phy - PHY initialization
9697ac6653aSJeff Kirsher  * @dev: net device structure
9707ac6653aSJeff Kirsher  * Description: it initializes the driver's PHY state, and attaches the PHY
9717ac6653aSJeff Kirsher  * to the mac driver.
9727ac6653aSJeff Kirsher  *  Return value:
9737ac6653aSJeff Kirsher  *  0 on success
9747ac6653aSJeff Kirsher  */
9757ac6653aSJeff Kirsher static int stmmac_init_phy(struct net_device *dev)
9767ac6653aSJeff Kirsher {
9777ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
978b6cfffa7SBhadram Varka 	u32 tx_cnt = priv->plat->tx_queues_to_use;
9797ac6653aSJeff Kirsher 	struct phy_device *phydev;
980d765955dSGiuseppe CAVALLARO 	char phy_id_fmt[MII_BUS_ID_SIZE + 3];
9817ac6653aSJeff Kirsher 	char bus_id[MII_BUS_ID_SIZE];
98279ee1dc3SSrinivas Kandagatla 	int interface = priv->plat->interface;
9839cbadf09SSrinivas Kandagatla 	int max_speed = priv->plat->max_speed;
9844d869b03SLABBE Corentin 	priv->oldlink = false;
985bd00632cSLABBE Corentin 	priv->speed = SPEED_UNKNOWN;
986bd00632cSLABBE Corentin 	priv->oldduplex = DUPLEX_UNKNOWN;
9877ac6653aSJeff Kirsher 
9885790cf3cSMathieu Olivari 	if (priv->plat->phy_node) {
9895790cf3cSMathieu Olivari 		phydev = of_phy_connect(dev, priv->plat->phy_node,
9905790cf3cSMathieu Olivari 					&stmmac_adjust_link, 0, interface);
9915790cf3cSMathieu Olivari 	} else {
992f142af2eSSrinivas Kandagatla 		snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
993f142af2eSSrinivas Kandagatla 			 priv->plat->bus_id);
994f142af2eSSrinivas Kandagatla 
995d765955dSGiuseppe CAVALLARO 		snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
9967ac6653aSJeff Kirsher 			 priv->plat->phy_addr);
997de9a2165SLABBE Corentin 		netdev_dbg(priv->dev, "%s: trying to attach to %s\n", __func__,
9985790cf3cSMathieu Olivari 			   phy_id_fmt);
9997ac6653aSJeff Kirsher 
10005790cf3cSMathieu Olivari 		phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link,
10015790cf3cSMathieu Olivari 				     interface);
10025790cf3cSMathieu Olivari 	}
10037ac6653aSJeff Kirsher 
1004dfc50fcaSAlexey Brodkin 	if (IS_ERR_OR_NULL(phydev)) {
100538ddc59dSLABBE Corentin 		netdev_err(priv->dev, "Could not attach to PHY\n");
1006dfc50fcaSAlexey Brodkin 		if (!phydev)
1007dfc50fcaSAlexey Brodkin 			return -ENODEV;
1008dfc50fcaSAlexey Brodkin 
10097ac6653aSJeff Kirsher 		return PTR_ERR(phydev);
10107ac6653aSJeff Kirsher 	}
10117ac6653aSJeff Kirsher 
101279ee1dc3SSrinivas Kandagatla 	/* Stop Advertising 1000BASE Capability if interface is not GMII */
1013c5b9b4e4SSrinivas Kandagatla 	if ((interface == PHY_INTERFACE_MODE_MII) ||
10149cbadf09SSrinivas Kandagatla 	    (interface == PHY_INTERFACE_MODE_RMII) ||
10159cbadf09SSrinivas Kandagatla 		(max_speed < 1000 && max_speed > 0))
101658056c1eSAndrew Lunn 		phy_set_max_speed(phydev, SPEED_100);
101779ee1dc3SSrinivas Kandagatla 
10187ac6653aSJeff Kirsher 	/*
1019b6cfffa7SBhadram Varka 	 * Half-duplex mode not supported with multiqueue
1020b6cfffa7SBhadram Varka 	 * half-duplex can only works with single queue
1021b6cfffa7SBhadram Varka 	 */
102241124fa6SAndrew Lunn 	if (tx_cnt > 1) {
102341124fa6SAndrew Lunn 		phy_remove_link_mode(phydev,
102441124fa6SAndrew Lunn 				     ETHTOOL_LINK_MODE_10baseT_Half_BIT);
102541124fa6SAndrew Lunn 		phy_remove_link_mode(phydev,
102641124fa6SAndrew Lunn 				     ETHTOOL_LINK_MODE_100baseT_Half_BIT);
102741124fa6SAndrew Lunn 		phy_remove_link_mode(phydev,
102841124fa6SAndrew Lunn 				     ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
102941124fa6SAndrew Lunn 	}
1030b6cfffa7SBhadram Varka 
1031b6cfffa7SBhadram Varka 	/*
10327ac6653aSJeff Kirsher 	 * Broken HW is sometimes missing the pull-up resistor on the
10337ac6653aSJeff Kirsher 	 * MDIO line, which results in reads to non-existent devices returning
10347ac6653aSJeff Kirsher 	 * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
10357ac6653aSJeff Kirsher 	 * device as well.
10367ac6653aSJeff Kirsher 	 * Note: phydev->phy_id is the result of reading the UID PHY registers.
10377ac6653aSJeff Kirsher 	 */
103827732381SMathieu Olivari 	if (!priv->plat->phy_node && phydev->phy_id == 0) {
10397ac6653aSJeff Kirsher 		phy_disconnect(phydev);
10407ac6653aSJeff Kirsher 		return -ENODEV;
10417ac6653aSJeff Kirsher 	}
10428e99fc5fSGiuseppe Cavallaro 
1043c51e424dSFlorian Fainelli 	/* stmmac_adjust_link will change this to PHY_IGNORE_INTERRUPT to avoid
1044c51e424dSFlorian Fainelli 	 * subsequent PHY polling, make sure we force a link transition if
1045c51e424dSFlorian Fainelli 	 * we have a UP/DOWN/UP transition
1046c51e424dSFlorian Fainelli 	 */
1047c51e424dSFlorian Fainelli 	if (phydev->is_pseudo_fixed_link)
1048c51e424dSFlorian Fainelli 		phydev->irq = PHY_POLL;
1049c51e424dSFlorian Fainelli 
1050b05c76a1SLABBE Corentin 	phy_attached_info(phydev);
10517ac6653aSJeff Kirsher 	return 0;
10527ac6653aSJeff Kirsher }
10537ac6653aSJeff Kirsher 
105471fedb01SJoao Pinto static void stmmac_display_rx_rings(struct stmmac_priv *priv)
1055c24602efSGiuseppe CAVALLARO {
105654139cf3SJoao Pinto 	u32 rx_cnt = priv->plat->rx_queues_to_use;
105771fedb01SJoao Pinto 	void *head_rx;
105854139cf3SJoao Pinto 	u32 queue;
105954139cf3SJoao Pinto 
106054139cf3SJoao Pinto 	/* Display RX rings */
106154139cf3SJoao Pinto 	for (queue = 0; queue < rx_cnt; queue++) {
106254139cf3SJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
106354139cf3SJoao Pinto 
106454139cf3SJoao Pinto 		pr_info("\tRX Queue %u rings\n", queue);
1065d0225e7dSAlexandre TORGUE 
106671fedb01SJoao Pinto 		if (priv->extend_desc)
106754139cf3SJoao Pinto 			head_rx = (void *)rx_q->dma_erx;
106871fedb01SJoao Pinto 		else
106954139cf3SJoao Pinto 			head_rx = (void *)rx_q->dma_rx;
107071fedb01SJoao Pinto 
107171fedb01SJoao Pinto 		/* Display RX ring */
107242de047dSJose Abreu 		stmmac_display_ring(priv, head_rx, DMA_RX_SIZE, true);
10735bacd778SLABBE Corentin 	}
107454139cf3SJoao Pinto }
1075d0225e7dSAlexandre TORGUE 
107671fedb01SJoao Pinto static void stmmac_display_tx_rings(struct stmmac_priv *priv)
107771fedb01SJoao Pinto {
1078ce736788SJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
107971fedb01SJoao Pinto 	void *head_tx;
1080ce736788SJoao Pinto 	u32 queue;
1081ce736788SJoao Pinto 
1082ce736788SJoao Pinto 	/* Display TX rings */
1083ce736788SJoao Pinto 	for (queue = 0; queue < tx_cnt; queue++) {
1084ce736788SJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1085ce736788SJoao Pinto 
1086ce736788SJoao Pinto 		pr_info("\tTX Queue %d rings\n", queue);
108771fedb01SJoao Pinto 
108871fedb01SJoao Pinto 		if (priv->extend_desc)
1089ce736788SJoao Pinto 			head_tx = (void *)tx_q->dma_etx;
109071fedb01SJoao Pinto 		else
1091ce736788SJoao Pinto 			head_tx = (void *)tx_q->dma_tx;
109271fedb01SJoao Pinto 
109342de047dSJose Abreu 		stmmac_display_ring(priv, head_tx, DMA_TX_SIZE, false);
1094c24602efSGiuseppe CAVALLARO 	}
1095ce736788SJoao Pinto }
1096c24602efSGiuseppe CAVALLARO 
109771fedb01SJoao Pinto static void stmmac_display_rings(struct stmmac_priv *priv)
109871fedb01SJoao Pinto {
109971fedb01SJoao Pinto 	/* Display RX ring */
110071fedb01SJoao Pinto 	stmmac_display_rx_rings(priv);
110171fedb01SJoao Pinto 
110271fedb01SJoao Pinto 	/* Display TX ring */
110371fedb01SJoao Pinto 	stmmac_display_tx_rings(priv);
110471fedb01SJoao Pinto }
110571fedb01SJoao Pinto 
1106286a8372SGiuseppe CAVALLARO static int stmmac_set_bfsize(int mtu, int bufsize)
1107286a8372SGiuseppe CAVALLARO {
1108286a8372SGiuseppe CAVALLARO 	int ret = bufsize;
1109286a8372SGiuseppe CAVALLARO 
1110286a8372SGiuseppe CAVALLARO 	if (mtu >= BUF_SIZE_4KiB)
1111286a8372SGiuseppe CAVALLARO 		ret = BUF_SIZE_8KiB;
1112286a8372SGiuseppe CAVALLARO 	else if (mtu >= BUF_SIZE_2KiB)
1113286a8372SGiuseppe CAVALLARO 		ret = BUF_SIZE_4KiB;
1114d916701cSGiuseppe CAVALLARO 	else if (mtu > DEFAULT_BUFSIZE)
1115286a8372SGiuseppe CAVALLARO 		ret = BUF_SIZE_2KiB;
1116286a8372SGiuseppe CAVALLARO 	else
1117d916701cSGiuseppe CAVALLARO 		ret = DEFAULT_BUFSIZE;
1118286a8372SGiuseppe CAVALLARO 
1119286a8372SGiuseppe CAVALLARO 	return ret;
1120286a8372SGiuseppe CAVALLARO }
1121286a8372SGiuseppe CAVALLARO 
112232ceabcaSGiuseppe CAVALLARO /**
112371fedb01SJoao Pinto  * stmmac_clear_rx_descriptors - clear RX descriptors
112432ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
112554139cf3SJoao Pinto  * @queue: RX queue index
112671fedb01SJoao Pinto  * Description: this function is called to clear the RX descriptors
112732ceabcaSGiuseppe CAVALLARO  * in case of both basic and extended descriptors are used.
112832ceabcaSGiuseppe CAVALLARO  */
112954139cf3SJoao Pinto static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
1130c24602efSGiuseppe CAVALLARO {
113154139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
11325bacd778SLABBE Corentin 	int i;
1133c24602efSGiuseppe CAVALLARO 
113471fedb01SJoao Pinto 	/* Clear the RX descriptors */
11355bacd778SLABBE Corentin 	for (i = 0; i < DMA_RX_SIZE; i++)
11365bacd778SLABBE Corentin 		if (priv->extend_desc)
113742de047dSJose Abreu 			stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
11385bacd778SLABBE Corentin 					priv->use_riwt, priv->mode,
1139583e6361SAaro Koskinen 					(i == DMA_RX_SIZE - 1),
1140583e6361SAaro Koskinen 					priv->dma_buf_sz);
11415bacd778SLABBE Corentin 		else
114242de047dSJose Abreu 			stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
11435bacd778SLABBE Corentin 					priv->use_riwt, priv->mode,
1144583e6361SAaro Koskinen 					(i == DMA_RX_SIZE - 1),
1145583e6361SAaro Koskinen 					priv->dma_buf_sz);
114671fedb01SJoao Pinto }
114771fedb01SJoao Pinto 
114871fedb01SJoao Pinto /**
114971fedb01SJoao Pinto  * stmmac_clear_tx_descriptors - clear tx descriptors
115071fedb01SJoao Pinto  * @priv: driver private structure
1151ce736788SJoao Pinto  * @queue: TX queue index.
115271fedb01SJoao Pinto  * Description: this function is called to clear the TX descriptors
115371fedb01SJoao Pinto  * in case of both basic and extended descriptors are used.
115471fedb01SJoao Pinto  */
1155ce736788SJoao Pinto static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
115671fedb01SJoao Pinto {
1157ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
115871fedb01SJoao Pinto 	int i;
115971fedb01SJoao Pinto 
116071fedb01SJoao Pinto 	/* Clear the TX descriptors */
11615bacd778SLABBE Corentin 	for (i = 0; i < DMA_TX_SIZE; i++)
11625bacd778SLABBE Corentin 		if (priv->extend_desc)
116342de047dSJose Abreu 			stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
116442de047dSJose Abreu 					priv->mode, (i == DMA_TX_SIZE - 1));
11655bacd778SLABBE Corentin 		else
116642de047dSJose Abreu 			stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
116742de047dSJose Abreu 					priv->mode, (i == DMA_TX_SIZE - 1));
1168c24602efSGiuseppe CAVALLARO }
1169c24602efSGiuseppe CAVALLARO 
1170732fdf0eSGiuseppe CAVALLARO /**
117171fedb01SJoao Pinto  * stmmac_clear_descriptors - clear descriptors
117271fedb01SJoao Pinto  * @priv: driver private structure
117371fedb01SJoao Pinto  * Description: this function is called to clear the TX and RX descriptors
117471fedb01SJoao Pinto  * in case of both basic and extended descriptors are used.
117571fedb01SJoao Pinto  */
117671fedb01SJoao Pinto static void stmmac_clear_descriptors(struct stmmac_priv *priv)
117771fedb01SJoao Pinto {
117854139cf3SJoao Pinto 	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1179ce736788SJoao Pinto 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
118054139cf3SJoao Pinto 	u32 queue;
118154139cf3SJoao Pinto 
118271fedb01SJoao Pinto 	/* Clear the RX descriptors */
118354139cf3SJoao Pinto 	for (queue = 0; queue < rx_queue_cnt; queue++)
118454139cf3SJoao Pinto 		stmmac_clear_rx_descriptors(priv, queue);
118571fedb01SJoao Pinto 
118671fedb01SJoao Pinto 	/* Clear the TX descriptors */
1187ce736788SJoao Pinto 	for (queue = 0; queue < tx_queue_cnt; queue++)
1188ce736788SJoao Pinto 		stmmac_clear_tx_descriptors(priv, queue);
118971fedb01SJoao Pinto }
119071fedb01SJoao Pinto 
119171fedb01SJoao Pinto /**
1192732fdf0eSGiuseppe CAVALLARO  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1193732fdf0eSGiuseppe CAVALLARO  * @priv: driver private structure
1194732fdf0eSGiuseppe CAVALLARO  * @p: descriptor pointer
1195732fdf0eSGiuseppe CAVALLARO  * @i: descriptor index
119654139cf3SJoao Pinto  * @flags: gfp flag
119754139cf3SJoao Pinto  * @queue: RX queue index
1198732fdf0eSGiuseppe CAVALLARO  * Description: this function is called to allocate a receive buffer, perform
1199732fdf0eSGiuseppe CAVALLARO  * the DMA mapping and init the descriptor.
1200732fdf0eSGiuseppe CAVALLARO  */
1201c24602efSGiuseppe CAVALLARO static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
120254139cf3SJoao Pinto 				  int i, gfp_t flags, u32 queue)
1203c24602efSGiuseppe CAVALLARO {
120454139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1205c24602efSGiuseppe CAVALLARO 	struct sk_buff *skb;
1206c24602efSGiuseppe CAVALLARO 
12074ec49a37SVineet Gupta 	skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
120856329137SBartlomiej Zolnierkiewicz 	if (!skb) {
120938ddc59dSLABBE Corentin 		netdev_err(priv->dev,
121038ddc59dSLABBE Corentin 			   "%s: Rx init fails; skb is NULL\n", __func__);
121156329137SBartlomiej Zolnierkiewicz 		return -ENOMEM;
1212c24602efSGiuseppe CAVALLARO 	}
121354139cf3SJoao Pinto 	rx_q->rx_skbuff[i] = skb;
121454139cf3SJoao Pinto 	rx_q->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
1215c24602efSGiuseppe CAVALLARO 						priv->dma_buf_sz,
1216c24602efSGiuseppe CAVALLARO 						DMA_FROM_DEVICE);
121754139cf3SJoao Pinto 	if (dma_mapping_error(priv->device, rx_q->rx_skbuff_dma[i])) {
121838ddc59dSLABBE Corentin 		netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
121956329137SBartlomiej Zolnierkiewicz 		dev_kfree_skb_any(skb);
122056329137SBartlomiej Zolnierkiewicz 		return -EINVAL;
122156329137SBartlomiej Zolnierkiewicz 	}
1222c24602efSGiuseppe CAVALLARO 
12236844171dSJose Abreu 	stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[i]);
1224c24602efSGiuseppe CAVALLARO 
12252c520b1cSJose Abreu 	if (priv->dma_buf_sz == BUF_SIZE_16KiB)
12262c520b1cSJose Abreu 		stmmac_init_desc3(priv, p);
1227c24602efSGiuseppe CAVALLARO 
1228c24602efSGiuseppe CAVALLARO 	return 0;
1229c24602efSGiuseppe CAVALLARO }
1230c24602efSGiuseppe CAVALLARO 
123171fedb01SJoao Pinto /**
123271fedb01SJoao Pinto  * stmmac_free_rx_buffer - free RX dma buffers
123371fedb01SJoao Pinto  * @priv: private structure
123454139cf3SJoao Pinto  * @queue: RX queue index
123571fedb01SJoao Pinto  * @i: buffer index.
123671fedb01SJoao Pinto  */
123754139cf3SJoao Pinto static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
123856329137SBartlomiej Zolnierkiewicz {
123954139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
124054139cf3SJoao Pinto 
124154139cf3SJoao Pinto 	if (rx_q->rx_skbuff[i]) {
124254139cf3SJoao Pinto 		dma_unmap_single(priv->device, rx_q->rx_skbuff_dma[i],
124356329137SBartlomiej Zolnierkiewicz 				 priv->dma_buf_sz, DMA_FROM_DEVICE);
124454139cf3SJoao Pinto 		dev_kfree_skb_any(rx_q->rx_skbuff[i]);
124556329137SBartlomiej Zolnierkiewicz 	}
124654139cf3SJoao Pinto 	rx_q->rx_skbuff[i] = NULL;
124756329137SBartlomiej Zolnierkiewicz }
124856329137SBartlomiej Zolnierkiewicz 
12497ac6653aSJeff Kirsher /**
125071fedb01SJoao Pinto  * stmmac_free_tx_buffer - free RX dma buffers
125171fedb01SJoao Pinto  * @priv: private structure
1252ce736788SJoao Pinto  * @queue: RX queue index
125371fedb01SJoao Pinto  * @i: buffer index.
125471fedb01SJoao Pinto  */
1255ce736788SJoao Pinto static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
125671fedb01SJoao Pinto {
1257ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1258ce736788SJoao Pinto 
1259ce736788SJoao Pinto 	if (tx_q->tx_skbuff_dma[i].buf) {
1260ce736788SJoao Pinto 		if (tx_q->tx_skbuff_dma[i].map_as_page)
126171fedb01SJoao Pinto 			dma_unmap_page(priv->device,
1262ce736788SJoao Pinto 				       tx_q->tx_skbuff_dma[i].buf,
1263ce736788SJoao Pinto 				       tx_q->tx_skbuff_dma[i].len,
126471fedb01SJoao Pinto 				       DMA_TO_DEVICE);
126571fedb01SJoao Pinto 		else
126671fedb01SJoao Pinto 			dma_unmap_single(priv->device,
1267ce736788SJoao Pinto 					 tx_q->tx_skbuff_dma[i].buf,
1268ce736788SJoao Pinto 					 tx_q->tx_skbuff_dma[i].len,
126971fedb01SJoao Pinto 					 DMA_TO_DEVICE);
127071fedb01SJoao Pinto 	}
127171fedb01SJoao Pinto 
1272ce736788SJoao Pinto 	if (tx_q->tx_skbuff[i]) {
1273ce736788SJoao Pinto 		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1274ce736788SJoao Pinto 		tx_q->tx_skbuff[i] = NULL;
1275ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[i].buf = 0;
1276ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[i].map_as_page = false;
127771fedb01SJoao Pinto 	}
127871fedb01SJoao Pinto }
127971fedb01SJoao Pinto 
128071fedb01SJoao Pinto /**
128171fedb01SJoao Pinto  * init_dma_rx_desc_rings - init the RX descriptor rings
12827ac6653aSJeff Kirsher  * @dev: net device structure
12835bacd778SLABBE Corentin  * @flags: gfp flag.
128471fedb01SJoao Pinto  * Description: this function initializes the DMA RX descriptors
12855bacd778SLABBE Corentin  * and allocates the socket buffers. It supports the chained and ring
1286286a8372SGiuseppe CAVALLARO  * modes.
12877ac6653aSJeff Kirsher  */
128871fedb01SJoao Pinto static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
12897ac6653aSJeff Kirsher {
12907ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
129154139cf3SJoao Pinto 	u32 rx_count = priv->plat->rx_queues_to_use;
12925bacd778SLABBE Corentin 	int ret = -ENOMEM;
12932c520b1cSJose Abreu 	int bfsize = 0;
12941d3028f4SColin Ian King 	int queue;
129554139cf3SJoao Pinto 	int i;
12967ac6653aSJeff Kirsher 
12972c520b1cSJose Abreu 	bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
12982c520b1cSJose Abreu 	if (bfsize < 0)
12992c520b1cSJose Abreu 		bfsize = 0;
13005bacd778SLABBE Corentin 
13015bacd778SLABBE Corentin 	if (bfsize < BUF_SIZE_16KiB)
13025bacd778SLABBE Corentin 		bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
13035bacd778SLABBE Corentin 
13045bacd778SLABBE Corentin 	priv->dma_buf_sz = bfsize;
13052618abb7SVince Bridgers 
130654139cf3SJoao Pinto 	/* RX INITIALIZATION */
13075bacd778SLABBE Corentin 	netif_dbg(priv, probe, priv->dev,
13085bacd778SLABBE Corentin 		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
13095bacd778SLABBE Corentin 
131054139cf3SJoao Pinto 	for (queue = 0; queue < rx_count; queue++) {
131154139cf3SJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
131254139cf3SJoao Pinto 
131354139cf3SJoao Pinto 		netif_dbg(priv, probe, priv->dev,
131454139cf3SJoao Pinto 			  "(%s) dma_rx_phy=0x%08x\n", __func__,
131554139cf3SJoao Pinto 			  (u32)rx_q->dma_rx_phy);
131654139cf3SJoao Pinto 
13175bacd778SLABBE Corentin 		for (i = 0; i < DMA_RX_SIZE; i++) {
13185bacd778SLABBE Corentin 			struct dma_desc *p;
13195bacd778SLABBE Corentin 
132054139cf3SJoao Pinto 			if (priv->extend_desc)
132154139cf3SJoao Pinto 				p = &((rx_q->dma_erx + i)->basic);
132254139cf3SJoao Pinto 			else
132354139cf3SJoao Pinto 				p = rx_q->dma_rx + i;
132454139cf3SJoao Pinto 
132554139cf3SJoao Pinto 			ret = stmmac_init_rx_buffers(priv, p, i, flags,
132654139cf3SJoao Pinto 						     queue);
13275bacd778SLABBE Corentin 			if (ret)
13285bacd778SLABBE Corentin 				goto err_init_rx_buffers;
13295bacd778SLABBE Corentin 
13305bacd778SLABBE Corentin 			netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
133154139cf3SJoao Pinto 				  rx_q->rx_skbuff[i], rx_q->rx_skbuff[i]->data,
133254139cf3SJoao Pinto 				  (unsigned int)rx_q->rx_skbuff_dma[i]);
13335bacd778SLABBE Corentin 		}
133454139cf3SJoao Pinto 
133554139cf3SJoao Pinto 		rx_q->cur_rx = 0;
133654139cf3SJoao Pinto 		rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
133754139cf3SJoao Pinto 
133854139cf3SJoao Pinto 		stmmac_clear_rx_descriptors(priv, queue);
13397ac6653aSJeff Kirsher 
1340c24602efSGiuseppe CAVALLARO 		/* Setup the chained descriptor addresses */
1341c24602efSGiuseppe CAVALLARO 		if (priv->mode == STMMAC_CHAIN_MODE) {
134271fedb01SJoao Pinto 			if (priv->extend_desc)
13432c520b1cSJose Abreu 				stmmac_mode_init(priv, rx_q->dma_erx,
13442c520b1cSJose Abreu 						rx_q->dma_rx_phy, DMA_RX_SIZE, 1);
134571fedb01SJoao Pinto 			else
13462c520b1cSJose Abreu 				stmmac_mode_init(priv, rx_q->dma_rx,
13472c520b1cSJose Abreu 						rx_q->dma_rx_phy, DMA_RX_SIZE, 0);
134871fedb01SJoao Pinto 		}
134954139cf3SJoao Pinto 	}
135054139cf3SJoao Pinto 
135154139cf3SJoao Pinto 	buf_sz = bfsize;
135271fedb01SJoao Pinto 
135371fedb01SJoao Pinto 	return 0;
135454139cf3SJoao Pinto 
135571fedb01SJoao Pinto err_init_rx_buffers:
135654139cf3SJoao Pinto 	while (queue >= 0) {
135771fedb01SJoao Pinto 		while (--i >= 0)
135854139cf3SJoao Pinto 			stmmac_free_rx_buffer(priv, queue, i);
135954139cf3SJoao Pinto 
136054139cf3SJoao Pinto 		if (queue == 0)
136154139cf3SJoao Pinto 			break;
136254139cf3SJoao Pinto 
136354139cf3SJoao Pinto 		i = DMA_RX_SIZE;
136454139cf3SJoao Pinto 		queue--;
136554139cf3SJoao Pinto 	}
136654139cf3SJoao Pinto 
136771fedb01SJoao Pinto 	return ret;
136871fedb01SJoao Pinto }
136971fedb01SJoao Pinto 
137071fedb01SJoao Pinto /**
137171fedb01SJoao Pinto  * init_dma_tx_desc_rings - init the TX descriptor rings
137271fedb01SJoao Pinto  * @dev: net device structure.
137371fedb01SJoao Pinto  * Description: this function initializes the DMA TX descriptors
137471fedb01SJoao Pinto  * and allocates the socket buffers. It supports the chained and ring
137571fedb01SJoao Pinto  * modes.
137671fedb01SJoao Pinto  */
137771fedb01SJoao Pinto static int init_dma_tx_desc_rings(struct net_device *dev)
137871fedb01SJoao Pinto {
137971fedb01SJoao Pinto 	struct stmmac_priv *priv = netdev_priv(dev);
1380ce736788SJoao Pinto 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1381ce736788SJoao Pinto 	u32 queue;
138271fedb01SJoao Pinto 	int i;
138371fedb01SJoao Pinto 
1384ce736788SJoao Pinto 	for (queue = 0; queue < tx_queue_cnt; queue++) {
1385ce736788SJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1386ce736788SJoao Pinto 
138771fedb01SJoao Pinto 		netif_dbg(priv, probe, priv->dev,
1388ce736788SJoao Pinto 			  "(%s) dma_tx_phy=0x%08x\n", __func__,
1389ce736788SJoao Pinto 			 (u32)tx_q->dma_tx_phy);
139071fedb01SJoao Pinto 
139171fedb01SJoao Pinto 		/* Setup the chained descriptor addresses */
139271fedb01SJoao Pinto 		if (priv->mode == STMMAC_CHAIN_MODE) {
139371fedb01SJoao Pinto 			if (priv->extend_desc)
13942c520b1cSJose Abreu 				stmmac_mode_init(priv, tx_q->dma_etx,
13952c520b1cSJose Abreu 						tx_q->dma_tx_phy, DMA_TX_SIZE, 1);
139671fedb01SJoao Pinto 			else
13972c520b1cSJose Abreu 				stmmac_mode_init(priv, tx_q->dma_tx,
13982c520b1cSJose Abreu 						tx_q->dma_tx_phy, DMA_TX_SIZE, 0);
1399c24602efSGiuseppe CAVALLARO 		}
1400286a8372SGiuseppe CAVALLARO 
1401e3ad57c9SGiuseppe Cavallaro 		for (i = 0; i < DMA_TX_SIZE; i++) {
1402c24602efSGiuseppe CAVALLARO 			struct dma_desc *p;
1403c24602efSGiuseppe CAVALLARO 			if (priv->extend_desc)
1404ce736788SJoao Pinto 				p = &((tx_q->dma_etx + i)->basic);
1405c24602efSGiuseppe CAVALLARO 			else
1406ce736788SJoao Pinto 				p = tx_q->dma_tx + i;
1407f748be53SAlexandre TORGUE 
140844c67f85SJose Abreu 			stmmac_clear_desc(priv, p);
1409f748be53SAlexandre TORGUE 
1410ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[i].buf = 0;
1411ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[i].map_as_page = false;
1412ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[i].len = 0;
1413ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[i].last_segment = false;
1414ce736788SJoao Pinto 			tx_q->tx_skbuff[i] = NULL;
14154a7d666aSGiuseppe CAVALLARO 		}
1416c24602efSGiuseppe CAVALLARO 
1417ce736788SJoao Pinto 		tx_q->dirty_tx = 0;
1418ce736788SJoao Pinto 		tx_q->cur_tx = 0;
14198d212a9eSNiklas Cassel 		tx_q->mss = 0;
1420ce736788SJoao Pinto 
1421c22a3f48SJoao Pinto 		netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
1422c22a3f48SJoao Pinto 	}
14237ac6653aSJeff Kirsher 
142471fedb01SJoao Pinto 	return 0;
142571fedb01SJoao Pinto }
142671fedb01SJoao Pinto 
142771fedb01SJoao Pinto /**
142871fedb01SJoao Pinto  * init_dma_desc_rings - init the RX/TX descriptor rings
142971fedb01SJoao Pinto  * @dev: net device structure
143071fedb01SJoao Pinto  * @flags: gfp flag.
143171fedb01SJoao Pinto  * Description: this function initializes the DMA RX/TX descriptors
143271fedb01SJoao Pinto  * and allocates the socket buffers. It supports the chained and ring
143371fedb01SJoao Pinto  * modes.
143471fedb01SJoao Pinto  */
143571fedb01SJoao Pinto static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
143671fedb01SJoao Pinto {
143771fedb01SJoao Pinto 	struct stmmac_priv *priv = netdev_priv(dev);
143871fedb01SJoao Pinto 	int ret;
143971fedb01SJoao Pinto 
144071fedb01SJoao Pinto 	ret = init_dma_rx_desc_rings(dev, flags);
144171fedb01SJoao Pinto 	if (ret)
144271fedb01SJoao Pinto 		return ret;
144371fedb01SJoao Pinto 
144471fedb01SJoao Pinto 	ret = init_dma_tx_desc_rings(dev);
144571fedb01SJoao Pinto 
14465bacd778SLABBE Corentin 	stmmac_clear_descriptors(priv);
14477ac6653aSJeff Kirsher 
1448c24602efSGiuseppe CAVALLARO 	if (netif_msg_hw(priv))
1449c24602efSGiuseppe CAVALLARO 		stmmac_display_rings(priv);
145056329137SBartlomiej Zolnierkiewicz 
145156329137SBartlomiej Zolnierkiewicz 	return ret;
14527ac6653aSJeff Kirsher }
14537ac6653aSJeff Kirsher 
145471fedb01SJoao Pinto /**
145571fedb01SJoao Pinto  * dma_free_rx_skbufs - free RX dma buffers
145671fedb01SJoao Pinto  * @priv: private structure
145754139cf3SJoao Pinto  * @queue: RX queue index
145871fedb01SJoao Pinto  */
145954139cf3SJoao Pinto static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
14607ac6653aSJeff Kirsher {
14617ac6653aSJeff Kirsher 	int i;
14627ac6653aSJeff Kirsher 
1463e3ad57c9SGiuseppe Cavallaro 	for (i = 0; i < DMA_RX_SIZE; i++)
146454139cf3SJoao Pinto 		stmmac_free_rx_buffer(priv, queue, i);
14657ac6653aSJeff Kirsher }
14667ac6653aSJeff Kirsher 
146771fedb01SJoao Pinto /**
146871fedb01SJoao Pinto  * dma_free_tx_skbufs - free TX dma buffers
146971fedb01SJoao Pinto  * @priv: private structure
1470ce736788SJoao Pinto  * @queue: TX queue index
147171fedb01SJoao Pinto  */
1472ce736788SJoao Pinto static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
14737ac6653aSJeff Kirsher {
14747ac6653aSJeff Kirsher 	int i;
14757ac6653aSJeff Kirsher 
147671fedb01SJoao Pinto 	for (i = 0; i < DMA_TX_SIZE; i++)
1477ce736788SJoao Pinto 		stmmac_free_tx_buffer(priv, queue, i);
14787ac6653aSJeff Kirsher }
14797ac6653aSJeff Kirsher 
1480732fdf0eSGiuseppe CAVALLARO /**
148154139cf3SJoao Pinto  * free_dma_rx_desc_resources - free RX dma desc resources
148254139cf3SJoao Pinto  * @priv: private structure
148354139cf3SJoao Pinto  */
148454139cf3SJoao Pinto static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
148554139cf3SJoao Pinto {
148654139cf3SJoao Pinto 	u32 rx_count = priv->plat->rx_queues_to_use;
148754139cf3SJoao Pinto 	u32 queue;
148854139cf3SJoao Pinto 
148954139cf3SJoao Pinto 	/* Free RX queue resources */
149054139cf3SJoao Pinto 	for (queue = 0; queue < rx_count; queue++) {
149154139cf3SJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
149254139cf3SJoao Pinto 
149354139cf3SJoao Pinto 		/* Release the DMA RX socket buffers */
149454139cf3SJoao Pinto 		dma_free_rx_skbufs(priv, queue);
149554139cf3SJoao Pinto 
149654139cf3SJoao Pinto 		/* Free DMA regions of consistent memory previously allocated */
149754139cf3SJoao Pinto 		if (!priv->extend_desc)
149854139cf3SJoao Pinto 			dma_free_coherent(priv->device,
149954139cf3SJoao Pinto 					  DMA_RX_SIZE * sizeof(struct dma_desc),
150054139cf3SJoao Pinto 					  rx_q->dma_rx, rx_q->dma_rx_phy);
150154139cf3SJoao Pinto 		else
150254139cf3SJoao Pinto 			dma_free_coherent(priv->device, DMA_RX_SIZE *
150354139cf3SJoao Pinto 					  sizeof(struct dma_extended_desc),
150454139cf3SJoao Pinto 					  rx_q->dma_erx, rx_q->dma_rx_phy);
150554139cf3SJoao Pinto 
150654139cf3SJoao Pinto 		kfree(rx_q->rx_skbuff_dma);
150754139cf3SJoao Pinto 		kfree(rx_q->rx_skbuff);
150854139cf3SJoao Pinto 	}
150954139cf3SJoao Pinto }
151054139cf3SJoao Pinto 
151154139cf3SJoao Pinto /**
1512ce736788SJoao Pinto  * free_dma_tx_desc_resources - free TX dma desc resources
1513ce736788SJoao Pinto  * @priv: private structure
1514ce736788SJoao Pinto  */
1515ce736788SJoao Pinto static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
1516ce736788SJoao Pinto {
1517ce736788SJoao Pinto 	u32 tx_count = priv->plat->tx_queues_to_use;
151862242260SChristophe Jaillet 	u32 queue;
1519ce736788SJoao Pinto 
1520ce736788SJoao Pinto 	/* Free TX queue resources */
1521ce736788SJoao Pinto 	for (queue = 0; queue < tx_count; queue++) {
1522ce736788SJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1523ce736788SJoao Pinto 
1524ce736788SJoao Pinto 		/* Release the DMA TX socket buffers */
1525ce736788SJoao Pinto 		dma_free_tx_skbufs(priv, queue);
1526ce736788SJoao Pinto 
1527ce736788SJoao Pinto 		/* Free DMA regions of consistent memory previously allocated */
1528ce736788SJoao Pinto 		if (!priv->extend_desc)
1529ce736788SJoao Pinto 			dma_free_coherent(priv->device,
1530ce736788SJoao Pinto 					  DMA_TX_SIZE * sizeof(struct dma_desc),
1531ce736788SJoao Pinto 					  tx_q->dma_tx, tx_q->dma_tx_phy);
1532ce736788SJoao Pinto 		else
1533ce736788SJoao Pinto 			dma_free_coherent(priv->device, DMA_TX_SIZE *
1534ce736788SJoao Pinto 					  sizeof(struct dma_extended_desc),
1535ce736788SJoao Pinto 					  tx_q->dma_etx, tx_q->dma_tx_phy);
1536ce736788SJoao Pinto 
1537ce736788SJoao Pinto 		kfree(tx_q->tx_skbuff_dma);
1538ce736788SJoao Pinto 		kfree(tx_q->tx_skbuff);
1539ce736788SJoao Pinto 	}
1540ce736788SJoao Pinto }
1541ce736788SJoao Pinto 
1542ce736788SJoao Pinto /**
154371fedb01SJoao Pinto  * alloc_dma_rx_desc_resources - alloc RX resources.
1544732fdf0eSGiuseppe CAVALLARO  * @priv: private structure
1545732fdf0eSGiuseppe CAVALLARO  * Description: according to which descriptor can be used (extend or basic)
1546732fdf0eSGiuseppe CAVALLARO  * this function allocates the resources for TX and RX paths. In case of
1547732fdf0eSGiuseppe CAVALLARO  * reception, for example, it pre-allocated the RX socket buffer in order to
1548732fdf0eSGiuseppe CAVALLARO  * allow zero-copy mechanism.
1549732fdf0eSGiuseppe CAVALLARO  */
155071fedb01SJoao Pinto static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
155109f8d696SSrinivas Kandagatla {
155254139cf3SJoao Pinto 	u32 rx_count = priv->plat->rx_queues_to_use;
15535bacd778SLABBE Corentin 	int ret = -ENOMEM;
155454139cf3SJoao Pinto 	u32 queue;
155509f8d696SSrinivas Kandagatla 
155654139cf3SJoao Pinto 	/* RX queues buffers and DMA */
155754139cf3SJoao Pinto 	for (queue = 0; queue < rx_count; queue++) {
155854139cf3SJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
155954139cf3SJoao Pinto 
156054139cf3SJoao Pinto 		rx_q->queue_index = queue;
156154139cf3SJoao Pinto 		rx_q->priv_data = priv;
156254139cf3SJoao Pinto 
156354139cf3SJoao Pinto 		rx_q->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE,
156454139cf3SJoao Pinto 						    sizeof(dma_addr_t),
15655bacd778SLABBE Corentin 						    GFP_KERNEL);
156654139cf3SJoao Pinto 		if (!rx_q->rx_skbuff_dma)
156763c3aa6bSChristophe Jaillet 			goto err_dma;
15685bacd778SLABBE Corentin 
156954139cf3SJoao Pinto 		rx_q->rx_skbuff = kmalloc_array(DMA_RX_SIZE,
157054139cf3SJoao Pinto 						sizeof(struct sk_buff *),
15715bacd778SLABBE Corentin 						GFP_KERNEL);
157254139cf3SJoao Pinto 		if (!rx_q->rx_skbuff)
157354139cf3SJoao Pinto 			goto err_dma;
15745bacd778SLABBE Corentin 
15755bacd778SLABBE Corentin 		if (priv->extend_desc) {
1576750afb08SLuis Chamberlain 			rx_q->dma_erx = dma_alloc_coherent(priv->device,
1577750afb08SLuis Chamberlain 							   DMA_RX_SIZE * sizeof(struct dma_extended_desc),
157854139cf3SJoao Pinto 							   &rx_q->dma_rx_phy,
15795bacd778SLABBE Corentin 							   GFP_KERNEL);
158054139cf3SJoao Pinto 			if (!rx_q->dma_erx)
15815bacd778SLABBE Corentin 				goto err_dma;
15825bacd778SLABBE Corentin 
158371fedb01SJoao Pinto 		} else {
1584750afb08SLuis Chamberlain 			rx_q->dma_rx = dma_alloc_coherent(priv->device,
1585750afb08SLuis Chamberlain 							  DMA_RX_SIZE * sizeof(struct dma_desc),
158654139cf3SJoao Pinto 							  &rx_q->dma_rx_phy,
158771fedb01SJoao Pinto 							  GFP_KERNEL);
158854139cf3SJoao Pinto 			if (!rx_q->dma_rx)
158971fedb01SJoao Pinto 				goto err_dma;
159071fedb01SJoao Pinto 		}
159154139cf3SJoao Pinto 	}
159271fedb01SJoao Pinto 
159371fedb01SJoao Pinto 	return 0;
159471fedb01SJoao Pinto 
159571fedb01SJoao Pinto err_dma:
159654139cf3SJoao Pinto 	free_dma_rx_desc_resources(priv);
159754139cf3SJoao Pinto 
159871fedb01SJoao Pinto 	return ret;
159971fedb01SJoao Pinto }
160071fedb01SJoao Pinto 
160171fedb01SJoao Pinto /**
160271fedb01SJoao Pinto  * alloc_dma_tx_desc_resources - alloc TX resources.
160371fedb01SJoao Pinto  * @priv: private structure
160471fedb01SJoao Pinto  * Description: according to which descriptor can be used (extend or basic)
160571fedb01SJoao Pinto  * this function allocates the resources for TX and RX paths. In case of
160671fedb01SJoao Pinto  * reception, for example, it pre-allocated the RX socket buffer in order to
160771fedb01SJoao Pinto  * allow zero-copy mechanism.
160871fedb01SJoao Pinto  */
160971fedb01SJoao Pinto static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
161071fedb01SJoao Pinto {
1611ce736788SJoao Pinto 	u32 tx_count = priv->plat->tx_queues_to_use;
161271fedb01SJoao Pinto 	int ret = -ENOMEM;
1613ce736788SJoao Pinto 	u32 queue;
161471fedb01SJoao Pinto 
1615ce736788SJoao Pinto 	/* TX queues buffers and DMA */
1616ce736788SJoao Pinto 	for (queue = 0; queue < tx_count; queue++) {
1617ce736788SJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1618ce736788SJoao Pinto 
1619ce736788SJoao Pinto 		tx_q->queue_index = queue;
1620ce736788SJoao Pinto 		tx_q->priv_data = priv;
1621ce736788SJoao Pinto 
1622ce736788SJoao Pinto 		tx_q->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
1623ce736788SJoao Pinto 						    sizeof(*tx_q->tx_skbuff_dma),
162471fedb01SJoao Pinto 						    GFP_KERNEL);
1625ce736788SJoao Pinto 		if (!tx_q->tx_skbuff_dma)
162662242260SChristophe Jaillet 			goto err_dma;
162771fedb01SJoao Pinto 
1628ce736788SJoao Pinto 		tx_q->tx_skbuff = kmalloc_array(DMA_TX_SIZE,
1629ce736788SJoao Pinto 						sizeof(struct sk_buff *),
163071fedb01SJoao Pinto 						GFP_KERNEL);
1631ce736788SJoao Pinto 		if (!tx_q->tx_skbuff)
163262242260SChristophe Jaillet 			goto err_dma;
163371fedb01SJoao Pinto 
163471fedb01SJoao Pinto 		if (priv->extend_desc) {
1635750afb08SLuis Chamberlain 			tx_q->dma_etx = dma_alloc_coherent(priv->device,
1636750afb08SLuis Chamberlain 							   DMA_TX_SIZE * sizeof(struct dma_extended_desc),
1637ce736788SJoao Pinto 							   &tx_q->dma_tx_phy,
16385bacd778SLABBE Corentin 							   GFP_KERNEL);
1639ce736788SJoao Pinto 			if (!tx_q->dma_etx)
164062242260SChristophe Jaillet 				goto err_dma;
16415bacd778SLABBE Corentin 		} else {
1642750afb08SLuis Chamberlain 			tx_q->dma_tx = dma_alloc_coherent(priv->device,
1643750afb08SLuis Chamberlain 							  DMA_TX_SIZE * sizeof(struct dma_desc),
1644ce736788SJoao Pinto 							  &tx_q->dma_tx_phy,
16455bacd778SLABBE Corentin 							  GFP_KERNEL);
1646ce736788SJoao Pinto 			if (!tx_q->dma_tx)
164762242260SChristophe Jaillet 				goto err_dma;
1648ce736788SJoao Pinto 		}
16495bacd778SLABBE Corentin 	}
16505bacd778SLABBE Corentin 
16515bacd778SLABBE Corentin 	return 0;
16525bacd778SLABBE Corentin 
165362242260SChristophe Jaillet err_dma:
1654ce736788SJoao Pinto 	free_dma_tx_desc_resources(priv);
1655ce736788SJoao Pinto 
165609f8d696SSrinivas Kandagatla 	return ret;
16575bacd778SLABBE Corentin }
165809f8d696SSrinivas Kandagatla 
165971fedb01SJoao Pinto /**
166071fedb01SJoao Pinto  * alloc_dma_desc_resources - alloc TX/RX resources.
166171fedb01SJoao Pinto  * @priv: private structure
166271fedb01SJoao Pinto  * Description: according to which descriptor can be used (extend or basic)
166371fedb01SJoao Pinto  * this function allocates the resources for TX and RX paths. In case of
166471fedb01SJoao Pinto  * reception, for example, it pre-allocated the RX socket buffer in order to
166571fedb01SJoao Pinto  * allow zero-copy mechanism.
166671fedb01SJoao Pinto  */
166771fedb01SJoao Pinto static int alloc_dma_desc_resources(struct stmmac_priv *priv)
16685bacd778SLABBE Corentin {
166954139cf3SJoao Pinto 	/* RX Allocation */
167071fedb01SJoao Pinto 	int ret = alloc_dma_rx_desc_resources(priv);
167171fedb01SJoao Pinto 
167271fedb01SJoao Pinto 	if (ret)
167371fedb01SJoao Pinto 		return ret;
167471fedb01SJoao Pinto 
167571fedb01SJoao Pinto 	ret = alloc_dma_tx_desc_resources(priv);
167671fedb01SJoao Pinto 
167771fedb01SJoao Pinto 	return ret;
167871fedb01SJoao Pinto }
167971fedb01SJoao Pinto 
168071fedb01SJoao Pinto /**
168171fedb01SJoao Pinto  * free_dma_desc_resources - free dma desc resources
168271fedb01SJoao Pinto  * @priv: private structure
168371fedb01SJoao Pinto  */
168471fedb01SJoao Pinto static void free_dma_desc_resources(struct stmmac_priv *priv)
168571fedb01SJoao Pinto {
168671fedb01SJoao Pinto 	/* Release the DMA RX socket buffers */
168771fedb01SJoao Pinto 	free_dma_rx_desc_resources(priv);
168871fedb01SJoao Pinto 
168971fedb01SJoao Pinto 	/* Release the DMA TX socket buffers */
169071fedb01SJoao Pinto 	free_dma_tx_desc_resources(priv);
169171fedb01SJoao Pinto }
169271fedb01SJoao Pinto 
169371fedb01SJoao Pinto /**
16949eb12474Sjpinto  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
16959eb12474Sjpinto  *  @priv: driver private structure
16969eb12474Sjpinto  *  Description: It is used for enabling the rx queues in the MAC
16979eb12474Sjpinto  */
16989eb12474Sjpinto static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
16999eb12474Sjpinto {
17004f6046f5SJoao Pinto 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
17014f6046f5SJoao Pinto 	int queue;
17024f6046f5SJoao Pinto 	u8 mode;
17039eb12474Sjpinto 
17044f6046f5SJoao Pinto 	for (queue = 0; queue < rx_queues_count; queue++) {
17054f6046f5SJoao Pinto 		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
1706c10d4c82SJose Abreu 		stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
17074f6046f5SJoao Pinto 	}
17089eb12474Sjpinto }
17099eb12474Sjpinto 
17109eb12474Sjpinto /**
1711ae4f0d46SJoao Pinto  * stmmac_start_rx_dma - start RX DMA channel
1712ae4f0d46SJoao Pinto  * @priv: driver private structure
1713ae4f0d46SJoao Pinto  * @chan: RX channel index
1714ae4f0d46SJoao Pinto  * Description:
1715ae4f0d46SJoao Pinto  * This starts a RX DMA channel
1716ae4f0d46SJoao Pinto  */
1717ae4f0d46SJoao Pinto static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
1718ae4f0d46SJoao Pinto {
1719ae4f0d46SJoao Pinto 	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
1720a4e887faSJose Abreu 	stmmac_start_rx(priv, priv->ioaddr, chan);
1721ae4f0d46SJoao Pinto }
1722ae4f0d46SJoao Pinto 
1723ae4f0d46SJoao Pinto /**
1724ae4f0d46SJoao Pinto  * stmmac_start_tx_dma - start TX DMA channel
1725ae4f0d46SJoao Pinto  * @priv: driver private structure
1726ae4f0d46SJoao Pinto  * @chan: TX channel index
1727ae4f0d46SJoao Pinto  * Description:
1728ae4f0d46SJoao Pinto  * This starts a TX DMA channel
1729ae4f0d46SJoao Pinto  */
1730ae4f0d46SJoao Pinto static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
1731ae4f0d46SJoao Pinto {
1732ae4f0d46SJoao Pinto 	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
1733a4e887faSJose Abreu 	stmmac_start_tx(priv, priv->ioaddr, chan);
1734ae4f0d46SJoao Pinto }
1735ae4f0d46SJoao Pinto 
1736ae4f0d46SJoao Pinto /**
1737ae4f0d46SJoao Pinto  * stmmac_stop_rx_dma - stop RX DMA channel
1738ae4f0d46SJoao Pinto  * @priv: driver private structure
1739ae4f0d46SJoao Pinto  * @chan: RX channel index
1740ae4f0d46SJoao Pinto  * Description:
1741ae4f0d46SJoao Pinto  * This stops a RX DMA channel
1742ae4f0d46SJoao Pinto  */
1743ae4f0d46SJoao Pinto static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
1744ae4f0d46SJoao Pinto {
1745ae4f0d46SJoao Pinto 	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
1746a4e887faSJose Abreu 	stmmac_stop_rx(priv, priv->ioaddr, chan);
1747ae4f0d46SJoao Pinto }
1748ae4f0d46SJoao Pinto 
1749ae4f0d46SJoao Pinto /**
1750ae4f0d46SJoao Pinto  * stmmac_stop_tx_dma - stop TX DMA channel
1751ae4f0d46SJoao Pinto  * @priv: driver private structure
1752ae4f0d46SJoao Pinto  * @chan: TX channel index
1753ae4f0d46SJoao Pinto  * Description:
1754ae4f0d46SJoao Pinto  * This stops a TX DMA channel
1755ae4f0d46SJoao Pinto  */
1756ae4f0d46SJoao Pinto static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
1757ae4f0d46SJoao Pinto {
1758ae4f0d46SJoao Pinto 	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
1759a4e887faSJose Abreu 	stmmac_stop_tx(priv, priv->ioaddr, chan);
1760ae4f0d46SJoao Pinto }
1761ae4f0d46SJoao Pinto 
1762ae4f0d46SJoao Pinto /**
1763ae4f0d46SJoao Pinto  * stmmac_start_all_dma - start all RX and TX DMA channels
1764ae4f0d46SJoao Pinto  * @priv: driver private structure
1765ae4f0d46SJoao Pinto  * Description:
1766ae4f0d46SJoao Pinto  * This starts all the RX and TX DMA channels
1767ae4f0d46SJoao Pinto  */
1768ae4f0d46SJoao Pinto static void stmmac_start_all_dma(struct stmmac_priv *priv)
1769ae4f0d46SJoao Pinto {
1770ae4f0d46SJoao Pinto 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1771ae4f0d46SJoao Pinto 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1772ae4f0d46SJoao Pinto 	u32 chan = 0;
1773ae4f0d46SJoao Pinto 
1774ae4f0d46SJoao Pinto 	for (chan = 0; chan < rx_channels_count; chan++)
1775ae4f0d46SJoao Pinto 		stmmac_start_rx_dma(priv, chan);
1776ae4f0d46SJoao Pinto 
1777ae4f0d46SJoao Pinto 	for (chan = 0; chan < tx_channels_count; chan++)
1778ae4f0d46SJoao Pinto 		stmmac_start_tx_dma(priv, chan);
1779ae4f0d46SJoao Pinto }
1780ae4f0d46SJoao Pinto 
1781ae4f0d46SJoao Pinto /**
1782ae4f0d46SJoao Pinto  * stmmac_stop_all_dma - stop all RX and TX DMA channels
1783ae4f0d46SJoao Pinto  * @priv: driver private structure
1784ae4f0d46SJoao Pinto  * Description:
1785ae4f0d46SJoao Pinto  * This stops the RX and TX DMA channels
1786ae4f0d46SJoao Pinto  */
1787ae4f0d46SJoao Pinto static void stmmac_stop_all_dma(struct stmmac_priv *priv)
1788ae4f0d46SJoao Pinto {
1789ae4f0d46SJoao Pinto 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1790ae4f0d46SJoao Pinto 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1791ae4f0d46SJoao Pinto 	u32 chan = 0;
1792ae4f0d46SJoao Pinto 
1793ae4f0d46SJoao Pinto 	for (chan = 0; chan < rx_channels_count; chan++)
1794ae4f0d46SJoao Pinto 		stmmac_stop_rx_dma(priv, chan);
1795ae4f0d46SJoao Pinto 
1796ae4f0d46SJoao Pinto 	for (chan = 0; chan < tx_channels_count; chan++)
1797ae4f0d46SJoao Pinto 		stmmac_stop_tx_dma(priv, chan);
1798ae4f0d46SJoao Pinto }
1799ae4f0d46SJoao Pinto 
1800ae4f0d46SJoao Pinto /**
18017ac6653aSJeff Kirsher  *  stmmac_dma_operation_mode - HW DMA operation mode
180232ceabcaSGiuseppe CAVALLARO  *  @priv: driver private structure
1803732fdf0eSGiuseppe CAVALLARO  *  Description: it is used for configuring the DMA operation mode register in
1804732fdf0eSGiuseppe CAVALLARO  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
18057ac6653aSJeff Kirsher  */
18067ac6653aSJeff Kirsher static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
18077ac6653aSJeff Kirsher {
18086deee222SJoao Pinto 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
18096deee222SJoao Pinto 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1810f88203a2SVince Bridgers 	int rxfifosz = priv->plat->rx_fifo_size;
181152a76235SJose Abreu 	int txfifosz = priv->plat->tx_fifo_size;
18126deee222SJoao Pinto 	u32 txmode = 0;
18136deee222SJoao Pinto 	u32 rxmode = 0;
18146deee222SJoao Pinto 	u32 chan = 0;
1815a0daae13SJose Abreu 	u8 qmode = 0;
1816f88203a2SVince Bridgers 
181711fbf811SThierry Reding 	if (rxfifosz == 0)
181811fbf811SThierry Reding 		rxfifosz = priv->dma_cap.rx_fifo_size;
181952a76235SJose Abreu 	if (txfifosz == 0)
182052a76235SJose Abreu 		txfifosz = priv->dma_cap.tx_fifo_size;
182152a76235SJose Abreu 
182252a76235SJose Abreu 	/* Adjust for real per queue fifo size */
182352a76235SJose Abreu 	rxfifosz /= rx_channels_count;
182452a76235SJose Abreu 	txfifosz /= tx_channels_count;
182511fbf811SThierry Reding 
18266deee222SJoao Pinto 	if (priv->plat->force_thresh_dma_mode) {
18276deee222SJoao Pinto 		txmode = tc;
18286deee222SJoao Pinto 		rxmode = tc;
18296deee222SJoao Pinto 	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
18307ac6653aSJeff Kirsher 		/*
18317ac6653aSJeff Kirsher 		 * In case of GMAC, SF mode can be enabled
18327ac6653aSJeff Kirsher 		 * to perform the TX COE in HW. This depends on:
18337ac6653aSJeff Kirsher 		 * 1) TX COE if actually supported
18347ac6653aSJeff Kirsher 		 * 2) There is no bugged Jumbo frame support
18357ac6653aSJeff Kirsher 		 *    that needs to not insert csum in the TDES.
18367ac6653aSJeff Kirsher 		 */
18376deee222SJoao Pinto 		txmode = SF_DMA_MODE;
18386deee222SJoao Pinto 		rxmode = SF_DMA_MODE;
1839b2dec116SSonic Zhang 		priv->xstats.threshold = SF_DMA_MODE;
18406deee222SJoao Pinto 	} else {
18416deee222SJoao Pinto 		txmode = tc;
18426deee222SJoao Pinto 		rxmode = SF_DMA_MODE;
18436deee222SJoao Pinto 	}
18446deee222SJoao Pinto 
18456deee222SJoao Pinto 	/* configure all channels */
1846a0daae13SJose Abreu 	for (chan = 0; chan < rx_channels_count; chan++) {
1847a0daae13SJose Abreu 		qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
18486deee222SJoao Pinto 
1849a4e887faSJose Abreu 		stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
1850a0daae13SJose Abreu 				rxfifosz, qmode);
18514205c88eSJose Abreu 		stmmac_set_dma_bfsize(priv, priv->ioaddr, priv->dma_buf_sz,
18524205c88eSJose Abreu 				chan);
1853a0daae13SJose Abreu 	}
1854a0daae13SJose Abreu 
1855a0daae13SJose Abreu 	for (chan = 0; chan < tx_channels_count; chan++) {
1856a0daae13SJose Abreu 		qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
1857a0daae13SJose Abreu 
1858a4e887faSJose Abreu 		stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
1859a0daae13SJose Abreu 				txfifosz, qmode);
1860a0daae13SJose Abreu 	}
18617ac6653aSJeff Kirsher }
18627ac6653aSJeff Kirsher 
18637ac6653aSJeff Kirsher /**
1864732fdf0eSGiuseppe CAVALLARO  * stmmac_tx_clean - to manage the transmission completion
186532ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
1866ce736788SJoao Pinto  * @queue: TX queue index
1867732fdf0eSGiuseppe CAVALLARO  * Description: it reclaims the transmit resources after transmission completes.
18687ac6653aSJeff Kirsher  */
18698fce3331SJose Abreu static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
18707ac6653aSJeff Kirsher {
1871ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
187238979574SBeniamino Galvani 	unsigned int bytes_compl = 0, pkts_compl = 0;
18738fce3331SJose Abreu 	unsigned int entry, count = 0;
18747ac6653aSJeff Kirsher 
18758fce3331SJose Abreu 	__netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
1876a9097a96SGiuseppe CAVALLARO 
18779125cdd1SGiuseppe CAVALLARO 	priv->xstats.tx_clean++;
18789125cdd1SGiuseppe CAVALLARO 
18798d5f4b07SBernd Edlinger 	entry = tx_q->dirty_tx;
18808fce3331SJose Abreu 	while ((entry != tx_q->cur_tx) && (count < budget)) {
1881ce736788SJoao Pinto 		struct sk_buff *skb = tx_q->tx_skbuff[entry];
1882c24602efSGiuseppe CAVALLARO 		struct dma_desc *p;
1883c363b658SFabrice Gasnier 		int status;
1884c24602efSGiuseppe CAVALLARO 
1885c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
1886ce736788SJoao Pinto 			p = (struct dma_desc *)(tx_q->dma_etx + entry);
1887c24602efSGiuseppe CAVALLARO 		else
1888ce736788SJoao Pinto 			p = tx_q->dma_tx + entry;
18897ac6653aSJeff Kirsher 
189042de047dSJose Abreu 		status = stmmac_tx_status(priv, &priv->dev->stats,
189142de047dSJose Abreu 				&priv->xstats, p, priv->ioaddr);
1892c363b658SFabrice Gasnier 		/* Check if the descriptor is owned by the DMA */
1893c363b658SFabrice Gasnier 		if (unlikely(status & tx_dma_own))
1894c363b658SFabrice Gasnier 			break;
1895c363b658SFabrice Gasnier 
18968fce3331SJose Abreu 		count++;
18978fce3331SJose Abreu 
1898a6b25da5SNiklas Cassel 		/* Make sure descriptor fields are read after reading
1899a6b25da5SNiklas Cassel 		 * the own bit.
1900a6b25da5SNiklas Cassel 		 */
1901a6b25da5SNiklas Cassel 		dma_rmb();
1902a6b25da5SNiklas Cassel 
1903c363b658SFabrice Gasnier 		/* Just consider the last segment and ...*/
1904c363b658SFabrice Gasnier 		if (likely(!(status & tx_not_ls))) {
1905c363b658SFabrice Gasnier 			/* ... verify the status error condition */
1906c363b658SFabrice Gasnier 			if (unlikely(status & tx_err)) {
1907c363b658SFabrice Gasnier 				priv->dev->stats.tx_errors++;
1908c363b658SFabrice Gasnier 			} else {
19097ac6653aSJeff Kirsher 				priv->dev->stats.tx_packets++;
19107ac6653aSJeff Kirsher 				priv->xstats.tx_pkt_n++;
1911c363b658SFabrice Gasnier 			}
1912ba1ffd74SGiuseppe CAVALLARO 			stmmac_get_tx_hwtstamp(priv, p, skb);
19137ac6653aSJeff Kirsher 		}
19147ac6653aSJeff Kirsher 
1915ce736788SJoao Pinto 		if (likely(tx_q->tx_skbuff_dma[entry].buf)) {
1916ce736788SJoao Pinto 			if (tx_q->tx_skbuff_dma[entry].map_as_page)
1917362b37beSGiuseppe CAVALLARO 				dma_unmap_page(priv->device,
1918ce736788SJoao Pinto 					       tx_q->tx_skbuff_dma[entry].buf,
1919ce736788SJoao Pinto 					       tx_q->tx_skbuff_dma[entry].len,
19207ac6653aSJeff Kirsher 					       DMA_TO_DEVICE);
1921362b37beSGiuseppe CAVALLARO 			else
1922362b37beSGiuseppe CAVALLARO 				dma_unmap_single(priv->device,
1923ce736788SJoao Pinto 						 tx_q->tx_skbuff_dma[entry].buf,
1924ce736788SJoao Pinto 						 tx_q->tx_skbuff_dma[entry].len,
1925362b37beSGiuseppe CAVALLARO 						 DMA_TO_DEVICE);
1926ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[entry].buf = 0;
1927ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[entry].len = 0;
1928ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[entry].map_as_page = false;
1929cf32deecSRayagond Kokatanur 		}
1930f748be53SAlexandre TORGUE 
19312c520b1cSJose Abreu 		stmmac_clean_desc3(priv, tx_q, p);
1932f748be53SAlexandre TORGUE 
1933ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].last_segment = false;
1934ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
19357ac6653aSJeff Kirsher 
19367ac6653aSJeff Kirsher 		if (likely(skb != NULL)) {
193738979574SBeniamino Galvani 			pkts_compl++;
193838979574SBeniamino Galvani 			bytes_compl += skb->len;
19397c565c33SEric W. Biederman 			dev_consume_skb_any(skb);
1940ce736788SJoao Pinto 			tx_q->tx_skbuff[entry] = NULL;
19417ac6653aSJeff Kirsher 		}
19427ac6653aSJeff Kirsher 
194342de047dSJose Abreu 		stmmac_release_tx_desc(priv, p, priv->mode);
19447ac6653aSJeff Kirsher 
1945e3ad57c9SGiuseppe Cavallaro 		entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
19467ac6653aSJeff Kirsher 	}
1947ce736788SJoao Pinto 	tx_q->dirty_tx = entry;
194838979574SBeniamino Galvani 
1949c22a3f48SJoao Pinto 	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
1950c22a3f48SJoao Pinto 				  pkts_compl, bytes_compl);
195138979574SBeniamino Galvani 
1952c22a3f48SJoao Pinto 	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
1953c22a3f48SJoao Pinto 								queue))) &&
1954c22a3f48SJoao Pinto 	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) {
1955c22a3f48SJoao Pinto 
1956b3e51069SLABBE Corentin 		netif_dbg(priv, tx_done, priv->dev,
1957b3e51069SLABBE Corentin 			  "%s: restart transmit\n", __func__);
1958c22a3f48SJoao Pinto 		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
19597ac6653aSJeff Kirsher 	}
1960d765955dSGiuseppe CAVALLARO 
1961d765955dSGiuseppe CAVALLARO 	if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
1962d765955dSGiuseppe CAVALLARO 		stmmac_enable_eee_mode(priv);
1963f5351ef7SGiuseppe CAVALLARO 		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
1964d765955dSGiuseppe CAVALLARO 	}
19658fce3331SJose Abreu 
19664ccb4585SJose Abreu 	/* We still have pending packets, let's call for a new scheduling */
19674ccb4585SJose Abreu 	if (tx_q->dirty_tx != tx_q->cur_tx)
19684ccb4585SJose Abreu 		mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(10));
19694ccb4585SJose Abreu 
19708fce3331SJose Abreu 	__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
19718fce3331SJose Abreu 
19728fce3331SJose Abreu 	return count;
19737ac6653aSJeff Kirsher }
19747ac6653aSJeff Kirsher 
19757ac6653aSJeff Kirsher /**
1976732fdf0eSGiuseppe CAVALLARO  * stmmac_tx_err - to manage the tx error
197732ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
19785bacd778SLABBE Corentin  * @chan: channel index
19797ac6653aSJeff Kirsher  * Description: it cleans the descriptors and restarts the transmission
1980732fdf0eSGiuseppe CAVALLARO  * in case of transmission errors.
19817ac6653aSJeff Kirsher  */
19825bacd778SLABBE Corentin static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
19837ac6653aSJeff Kirsher {
1984ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
1985c24602efSGiuseppe CAVALLARO 	int i;
1986ce736788SJoao Pinto 
1987c22a3f48SJoao Pinto 	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
19887ac6653aSJeff Kirsher 
1989ae4f0d46SJoao Pinto 	stmmac_stop_tx_dma(priv, chan);
1990ce736788SJoao Pinto 	dma_free_tx_skbufs(priv, chan);
1991e3ad57c9SGiuseppe Cavallaro 	for (i = 0; i < DMA_TX_SIZE; i++)
1992c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
199342de047dSJose Abreu 			stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
199442de047dSJose Abreu 					priv->mode, (i == DMA_TX_SIZE - 1));
1995c24602efSGiuseppe CAVALLARO 		else
199642de047dSJose Abreu 			stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
199742de047dSJose Abreu 					priv->mode, (i == DMA_TX_SIZE - 1));
1998ce736788SJoao Pinto 	tx_q->dirty_tx = 0;
1999ce736788SJoao Pinto 	tx_q->cur_tx = 0;
20008d212a9eSNiklas Cassel 	tx_q->mss = 0;
2001c22a3f48SJoao Pinto 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
2002ae4f0d46SJoao Pinto 	stmmac_start_tx_dma(priv, chan);
20037ac6653aSJeff Kirsher 
20047ac6653aSJeff Kirsher 	priv->dev->stats.tx_errors++;
2005c22a3f48SJoao Pinto 	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
20067ac6653aSJeff Kirsher }
20077ac6653aSJeff Kirsher 
200832ceabcaSGiuseppe CAVALLARO /**
20096deee222SJoao Pinto  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
20106deee222SJoao Pinto  *  @priv: driver private structure
20116deee222SJoao Pinto  *  @txmode: TX operating mode
20126deee222SJoao Pinto  *  @rxmode: RX operating mode
20136deee222SJoao Pinto  *  @chan: channel index
20146deee222SJoao Pinto  *  Description: it is used for configuring of the DMA operation mode in
20156deee222SJoao Pinto  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
20166deee222SJoao Pinto  *  mode.
20176deee222SJoao Pinto  */
20186deee222SJoao Pinto static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
20196deee222SJoao Pinto 					  u32 rxmode, u32 chan)
20206deee222SJoao Pinto {
2021a0daae13SJose Abreu 	u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2022a0daae13SJose Abreu 	u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
202352a76235SJose Abreu 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
202452a76235SJose Abreu 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
20256deee222SJoao Pinto 	int rxfifosz = priv->plat->rx_fifo_size;
202652a76235SJose Abreu 	int txfifosz = priv->plat->tx_fifo_size;
20276deee222SJoao Pinto 
20286deee222SJoao Pinto 	if (rxfifosz == 0)
20296deee222SJoao Pinto 		rxfifosz = priv->dma_cap.rx_fifo_size;
203052a76235SJose Abreu 	if (txfifosz == 0)
203152a76235SJose Abreu 		txfifosz = priv->dma_cap.tx_fifo_size;
203252a76235SJose Abreu 
203352a76235SJose Abreu 	/* Adjust for real per queue fifo size */
203452a76235SJose Abreu 	rxfifosz /= rx_channels_count;
203552a76235SJose Abreu 	txfifosz /= tx_channels_count;
20366deee222SJoao Pinto 
2037ab0204e3SJose Abreu 	stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2038ab0204e3SJose Abreu 	stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
20396deee222SJoao Pinto }
20406deee222SJoao Pinto 
20418bf993a5SJose Abreu static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
20428bf993a5SJose Abreu {
204363a550fcSJose Abreu 	int ret;
20448bf993a5SJose Abreu 
2045c10d4c82SJose Abreu 	ret = stmmac_safety_feat_irq_status(priv, priv->dev,
20468bf993a5SJose Abreu 			priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2047c10d4c82SJose Abreu 	if (ret && (ret != -EINVAL)) {
20488bf993a5SJose Abreu 		stmmac_global_err(priv);
2049c10d4c82SJose Abreu 		return true;
2050c10d4c82SJose Abreu 	}
2051c10d4c82SJose Abreu 
2052c10d4c82SJose Abreu 	return false;
20538bf993a5SJose Abreu }
20548bf993a5SJose Abreu 
20558fce3331SJose Abreu static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan)
20568fce3331SJose Abreu {
20578fce3331SJose Abreu 	int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
20588fce3331SJose Abreu 						 &priv->xstats, chan);
20598fce3331SJose Abreu 	struct stmmac_channel *ch = &priv->channel[chan];
20608fce3331SJose Abreu 
20614ccb4585SJose Abreu 	if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
20628fce3331SJose Abreu 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
20634ccb4585SJose Abreu 		napi_schedule_irqoff(&ch->rx_napi);
20644ccb4585SJose Abreu 	}
20654ccb4585SJose Abreu 
20664ccb4585SJose Abreu 	if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
20674ccb4585SJose Abreu 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
20684ccb4585SJose Abreu 		napi_schedule_irqoff(&ch->tx_napi);
20698fce3331SJose Abreu 	}
20708fce3331SJose Abreu 
20718fce3331SJose Abreu 	return status;
20728fce3331SJose Abreu }
20738fce3331SJose Abreu 
20746deee222SJoao Pinto /**
2075732fdf0eSGiuseppe CAVALLARO  * stmmac_dma_interrupt - DMA ISR
207632ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
207732ceabcaSGiuseppe CAVALLARO  * Description: this is the DMA ISR. It is called by the main ISR.
2078732fdf0eSGiuseppe CAVALLARO  * It calls the dwmac dma routine and schedule poll method in case of some
2079732fdf0eSGiuseppe CAVALLARO  * work can be done.
208032ceabcaSGiuseppe CAVALLARO  */
20817ac6653aSJeff Kirsher static void stmmac_dma_interrupt(struct stmmac_priv *priv)
20827ac6653aSJeff Kirsher {
2083d62a107aSJoao Pinto 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
20845a6a0445SNiklas Cassel 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
20855a6a0445SNiklas Cassel 	u32 channels_to_check = tx_channel_count > rx_channel_count ?
20865a6a0445SNiklas Cassel 				tx_channel_count : rx_channel_count;
2087d62a107aSJoao Pinto 	u32 chan;
20888ac60ffbSKees Cook 	int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
20898ac60ffbSKees Cook 
20908ac60ffbSKees Cook 	/* Make sure we never check beyond our status buffer. */
20918ac60ffbSKees Cook 	if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
20928ac60ffbSKees Cook 		channels_to_check = ARRAY_SIZE(status);
209368e5cfafSJoao Pinto 
20945a6a0445SNiklas Cassel 	for (chan = 0; chan < channels_to_check; chan++)
20958fce3331SJose Abreu 		status[chan] = stmmac_napi_check(priv, chan);
2096d62a107aSJoao Pinto 
20975a6a0445SNiklas Cassel 	for (chan = 0; chan < tx_channel_count; chan++) {
20985a6a0445SNiklas Cassel 		if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
20997ac6653aSJeff Kirsher 			/* Try to bump up the dma threshold on this failure */
2100b2dec116SSonic Zhang 			if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
2101b2dec116SSonic Zhang 			    (tc <= 256)) {
21027ac6653aSJeff Kirsher 				tc += 64;
2103c405abe2SSonic Zhang 				if (priv->plat->force_thresh_dma_mode)
2104d62a107aSJoao Pinto 					stmmac_set_dma_operation_mode(priv,
2105d62a107aSJoao Pinto 								      tc,
2106d62a107aSJoao Pinto 								      tc,
2107d62a107aSJoao Pinto 								      chan);
2108c405abe2SSonic Zhang 				else
2109d62a107aSJoao Pinto 					stmmac_set_dma_operation_mode(priv,
2110d62a107aSJoao Pinto 								    tc,
2111d62a107aSJoao Pinto 								    SF_DMA_MODE,
2112d62a107aSJoao Pinto 								    chan);
21137ac6653aSJeff Kirsher 				priv->xstats.threshold = tc;
21147ac6653aSJeff Kirsher 			}
21155a6a0445SNiklas Cassel 		} else if (unlikely(status[chan] == tx_hard_error)) {
21164e593262SJoao Pinto 			stmmac_tx_err(priv, chan);
21177ac6653aSJeff Kirsher 		}
2118d62a107aSJoao Pinto 	}
2119d62a107aSJoao Pinto }
21207ac6653aSJeff Kirsher 
212132ceabcaSGiuseppe CAVALLARO /**
212232ceabcaSGiuseppe CAVALLARO  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
212332ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
212432ceabcaSGiuseppe CAVALLARO  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
212532ceabcaSGiuseppe CAVALLARO  */
21261c901a46SGiuseppe CAVALLARO static void stmmac_mmc_setup(struct stmmac_priv *priv)
21271c901a46SGiuseppe CAVALLARO {
21281c901a46SGiuseppe CAVALLARO 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
21291c901a46SGiuseppe CAVALLARO 			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
21301c901a46SGiuseppe CAVALLARO 
213136ff7c1eSAlexandre TORGUE 	dwmac_mmc_intr_all_mask(priv->mmcaddr);
21324f795b25SGiuseppe CAVALLARO 
21334f795b25SGiuseppe CAVALLARO 	if (priv->dma_cap.rmon) {
213436ff7c1eSAlexandre TORGUE 		dwmac_mmc_ctrl(priv->mmcaddr, mode);
21351c901a46SGiuseppe CAVALLARO 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
21364f795b25SGiuseppe CAVALLARO 	} else
213738ddc59dSLABBE Corentin 		netdev_info(priv->dev, "No MAC Management Counters available\n");
21381c901a46SGiuseppe CAVALLARO }
21391c901a46SGiuseppe CAVALLARO 
2140732fdf0eSGiuseppe CAVALLARO /**
2141732fdf0eSGiuseppe CAVALLARO  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
214232ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
214319e30c14SGiuseppe CAVALLARO  * Description:
214419e30c14SGiuseppe CAVALLARO  *  new GMAC chip generations have a new register to indicate the
2145e7434821SGiuseppe CAVALLARO  *  presence of the optional feature/functions.
214619e30c14SGiuseppe CAVALLARO  *  This can be also used to override the value passed through the
214719e30c14SGiuseppe CAVALLARO  *  platform and necessary for old MAC10/100 and GMAC chips.
2148e7434821SGiuseppe CAVALLARO  */
2149e7434821SGiuseppe CAVALLARO static int stmmac_get_hw_features(struct stmmac_priv *priv)
2150e7434821SGiuseppe CAVALLARO {
2151a4e887faSJose Abreu 	return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2152e7434821SGiuseppe CAVALLARO }
2153e7434821SGiuseppe CAVALLARO 
215432ceabcaSGiuseppe CAVALLARO /**
2155732fdf0eSGiuseppe CAVALLARO  * stmmac_check_ether_addr - check if the MAC addr is valid
215632ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
215732ceabcaSGiuseppe CAVALLARO  * Description:
215832ceabcaSGiuseppe CAVALLARO  * it is to verify if the MAC address is valid, in case of failures it
215932ceabcaSGiuseppe CAVALLARO  * generates a random MAC address
216032ceabcaSGiuseppe CAVALLARO  */
2161bfab27a1SGiuseppe CAVALLARO static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2162bfab27a1SGiuseppe CAVALLARO {
2163bfab27a1SGiuseppe CAVALLARO 	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2164c10d4c82SJose Abreu 		stmmac_get_umac_addr(priv, priv->hw, priv->dev->dev_addr, 0);
2165bfab27a1SGiuseppe CAVALLARO 		if (!is_valid_ether_addr(priv->dev->dev_addr))
2166f2cedb63SDanny Kukawka 			eth_hw_addr_random(priv->dev);
216738ddc59dSLABBE Corentin 		netdev_info(priv->dev, "device MAC address %pM\n",
2168bfab27a1SGiuseppe CAVALLARO 			    priv->dev->dev_addr);
2169bfab27a1SGiuseppe CAVALLARO 	}
2170c88460b7SHans de Goede }
2171bfab27a1SGiuseppe CAVALLARO 
217232ceabcaSGiuseppe CAVALLARO /**
2173732fdf0eSGiuseppe CAVALLARO  * stmmac_init_dma_engine - DMA init.
217432ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
217532ceabcaSGiuseppe CAVALLARO  * Description:
217632ceabcaSGiuseppe CAVALLARO  * It inits the DMA invoking the specific MAC/GMAC callback.
217732ceabcaSGiuseppe CAVALLARO  * Some DMA parameters can be passed from the platform;
217832ceabcaSGiuseppe CAVALLARO  * in case of these are not passed a default is kept for the MAC or GMAC.
217932ceabcaSGiuseppe CAVALLARO  */
21800f1f88a8SGiuseppe CAVALLARO static int stmmac_init_dma_engine(struct stmmac_priv *priv)
21810f1f88a8SGiuseppe CAVALLARO {
218247f2a9ceSJoao Pinto 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
218347f2a9ceSJoao Pinto 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
218424aaed0cSJose Abreu 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
218554139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q;
2186ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q;
218747f2a9ceSJoao Pinto 	u32 chan = 0;
2188c24602efSGiuseppe CAVALLARO 	int atds = 0;
2189495db273SGiuseppe Cavallaro 	int ret = 0;
21900f1f88a8SGiuseppe CAVALLARO 
2191a332e2faSNiklas Cassel 	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2192a332e2faSNiklas Cassel 		dev_err(priv->device, "Invalid DMA configuration\n");
219389ab75bfSNiklas Cassel 		return -EINVAL;
21940f1f88a8SGiuseppe CAVALLARO 	}
21950f1f88a8SGiuseppe CAVALLARO 
2196c24602efSGiuseppe CAVALLARO 	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2197c24602efSGiuseppe CAVALLARO 		atds = 1;
2198c24602efSGiuseppe CAVALLARO 
2199a4e887faSJose Abreu 	ret = stmmac_reset(priv, priv->ioaddr);
2200495db273SGiuseppe Cavallaro 	if (ret) {
2201495db273SGiuseppe Cavallaro 		dev_err(priv->device, "Failed to reset the dma\n");
2202495db273SGiuseppe Cavallaro 		return ret;
2203495db273SGiuseppe Cavallaro 	}
2204495db273SGiuseppe Cavallaro 
22057d9e6c5aSJose Abreu 	/* DMA Configuration */
22067d9e6c5aSJose Abreu 	stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
22077d9e6c5aSJose Abreu 
22087d9e6c5aSJose Abreu 	if (priv->plat->axi)
22097d9e6c5aSJose Abreu 		stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
22107d9e6c5aSJose Abreu 
221147f2a9ceSJoao Pinto 	/* DMA RX Channel Configuration */
221247f2a9ceSJoao Pinto 	for (chan = 0; chan < rx_channels_count; chan++) {
221354139cf3SJoao Pinto 		rx_q = &priv->rx_queue[chan];
221454139cf3SJoao Pinto 
221524aaed0cSJose Abreu 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
221624aaed0cSJose Abreu 				    rx_q->dma_rx_phy, chan);
221747f2a9ceSJoao Pinto 
221854139cf3SJoao Pinto 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2219f748be53SAlexandre TORGUE 			    (DMA_RX_SIZE * sizeof(struct dma_desc));
2220a4e887faSJose Abreu 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
2221a4e887faSJose Abreu 				       rx_q->rx_tail_addr, chan);
222247f2a9ceSJoao Pinto 	}
222347f2a9ceSJoao Pinto 
222447f2a9ceSJoao Pinto 	/* DMA TX Channel Configuration */
222547f2a9ceSJoao Pinto 	for (chan = 0; chan < tx_channels_count; chan++) {
2226ce736788SJoao Pinto 		tx_q = &priv->tx_queue[chan];
2227ce736788SJoao Pinto 
222824aaed0cSJose Abreu 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
222924aaed0cSJose Abreu 				    tx_q->dma_tx_phy, chan);
2230f748be53SAlexandre TORGUE 
22310431100bSJose Abreu 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
2232a4e887faSJose Abreu 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
2233a4e887faSJose Abreu 				       tx_q->tx_tail_addr, chan);
223447f2a9ceSJoao Pinto 	}
223524aaed0cSJose Abreu 
223624aaed0cSJose Abreu 	/* DMA CSR Channel configuration */
223724aaed0cSJose Abreu 	for (chan = 0; chan < dma_csr_ch; chan++)
223824aaed0cSJose Abreu 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
223924aaed0cSJose Abreu 
2240495db273SGiuseppe Cavallaro 	return ret;
22410f1f88a8SGiuseppe CAVALLARO }
22420f1f88a8SGiuseppe CAVALLARO 
22438fce3331SJose Abreu static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
22448fce3331SJose Abreu {
22458fce3331SJose Abreu 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
22468fce3331SJose Abreu 
22478fce3331SJose Abreu 	mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer));
22488fce3331SJose Abreu }
22498fce3331SJose Abreu 
2250bfab27a1SGiuseppe CAVALLARO /**
2251732fdf0eSGiuseppe CAVALLARO  * stmmac_tx_timer - mitigation sw timer for tx.
22529125cdd1SGiuseppe CAVALLARO  * @data: data pointer
22539125cdd1SGiuseppe CAVALLARO  * Description:
22549125cdd1SGiuseppe CAVALLARO  * This is the timer handler to directly invoke the stmmac_tx_clean.
22559125cdd1SGiuseppe CAVALLARO  */
2256e99e88a9SKees Cook static void stmmac_tx_timer(struct timer_list *t)
22579125cdd1SGiuseppe CAVALLARO {
22588fce3331SJose Abreu 	struct stmmac_tx_queue *tx_q = from_timer(tx_q, t, txtimer);
22598fce3331SJose Abreu 	struct stmmac_priv *priv = tx_q->priv_data;
22608fce3331SJose Abreu 	struct stmmac_channel *ch;
22619125cdd1SGiuseppe CAVALLARO 
22628fce3331SJose Abreu 	ch = &priv->channel[tx_q->queue_index];
22638fce3331SJose Abreu 
22644ccb4585SJose Abreu 	/*
22654ccb4585SJose Abreu 	 * If NAPI is already running we can miss some events. Let's rearm
22664ccb4585SJose Abreu 	 * the timer and try again.
22674ccb4585SJose Abreu 	 */
22684ccb4585SJose Abreu 	if (likely(napi_schedule_prep(&ch->tx_napi)))
22694ccb4585SJose Abreu 		__napi_schedule(&ch->tx_napi);
22704ccb4585SJose Abreu 	else
22714ccb4585SJose Abreu 		mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(10));
22729125cdd1SGiuseppe CAVALLARO }
22739125cdd1SGiuseppe CAVALLARO 
22749125cdd1SGiuseppe CAVALLARO /**
2275732fdf0eSGiuseppe CAVALLARO  * stmmac_init_tx_coalesce - init tx mitigation options.
227632ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
22779125cdd1SGiuseppe CAVALLARO  * Description:
22789125cdd1SGiuseppe CAVALLARO  * This inits the transmit coalesce parameters: i.e. timer rate,
22799125cdd1SGiuseppe CAVALLARO  * timer handler and default threshold used for enabling the
22809125cdd1SGiuseppe CAVALLARO  * interrupt on completion bit.
22819125cdd1SGiuseppe CAVALLARO  */
22829125cdd1SGiuseppe CAVALLARO static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
22839125cdd1SGiuseppe CAVALLARO {
22848fce3331SJose Abreu 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
22858fce3331SJose Abreu 	u32 chan;
22868fce3331SJose Abreu 
22879125cdd1SGiuseppe CAVALLARO 	priv->tx_coal_frames = STMMAC_TX_FRAMES;
22889125cdd1SGiuseppe CAVALLARO 	priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
22898fce3331SJose Abreu 
22908fce3331SJose Abreu 	for (chan = 0; chan < tx_channel_count; chan++) {
22918fce3331SJose Abreu 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
22928fce3331SJose Abreu 
22938fce3331SJose Abreu 		timer_setup(&tx_q->txtimer, stmmac_tx_timer, 0);
22948fce3331SJose Abreu 	}
22959125cdd1SGiuseppe CAVALLARO }
22969125cdd1SGiuseppe CAVALLARO 
22974854ab99SJoao Pinto static void stmmac_set_rings_length(struct stmmac_priv *priv)
22984854ab99SJoao Pinto {
22994854ab99SJoao Pinto 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
23004854ab99SJoao Pinto 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
23014854ab99SJoao Pinto 	u32 chan;
23024854ab99SJoao Pinto 
23034854ab99SJoao Pinto 	/* set TX ring length */
23044854ab99SJoao Pinto 	for (chan = 0; chan < tx_channels_count; chan++)
2305a4e887faSJose Abreu 		stmmac_set_tx_ring_len(priv, priv->ioaddr,
23064854ab99SJoao Pinto 				(DMA_TX_SIZE - 1), chan);
23074854ab99SJoao Pinto 
23084854ab99SJoao Pinto 	/* set RX ring length */
23094854ab99SJoao Pinto 	for (chan = 0; chan < rx_channels_count; chan++)
2310a4e887faSJose Abreu 		stmmac_set_rx_ring_len(priv, priv->ioaddr,
23114854ab99SJoao Pinto 				(DMA_RX_SIZE - 1), chan);
23124854ab99SJoao Pinto }
23134854ab99SJoao Pinto 
23149125cdd1SGiuseppe CAVALLARO /**
23156a3a7193SJoao Pinto  *  stmmac_set_tx_queue_weight - Set TX queue weight
23166a3a7193SJoao Pinto  *  @priv: driver private structure
23176a3a7193SJoao Pinto  *  Description: It is used for setting TX queues weight
23186a3a7193SJoao Pinto  */
23196a3a7193SJoao Pinto static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
23206a3a7193SJoao Pinto {
23216a3a7193SJoao Pinto 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
23226a3a7193SJoao Pinto 	u32 weight;
23236a3a7193SJoao Pinto 	u32 queue;
23246a3a7193SJoao Pinto 
23256a3a7193SJoao Pinto 	for (queue = 0; queue < tx_queues_count; queue++) {
23266a3a7193SJoao Pinto 		weight = priv->plat->tx_queues_cfg[queue].weight;
2327c10d4c82SJose Abreu 		stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
23286a3a7193SJoao Pinto 	}
23296a3a7193SJoao Pinto }
23306a3a7193SJoao Pinto 
23316a3a7193SJoao Pinto /**
233219d91873SJoao Pinto  *  stmmac_configure_cbs - Configure CBS in TX queue
233319d91873SJoao Pinto  *  @priv: driver private structure
233419d91873SJoao Pinto  *  Description: It is used for configuring CBS in AVB TX queues
233519d91873SJoao Pinto  */
233619d91873SJoao Pinto static void stmmac_configure_cbs(struct stmmac_priv *priv)
233719d91873SJoao Pinto {
233819d91873SJoao Pinto 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
233919d91873SJoao Pinto 	u32 mode_to_use;
234019d91873SJoao Pinto 	u32 queue;
234119d91873SJoao Pinto 
234244781fefSJoao Pinto 	/* queue 0 is reserved for legacy traffic */
234344781fefSJoao Pinto 	for (queue = 1; queue < tx_queues_count; queue++) {
234419d91873SJoao Pinto 		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
234519d91873SJoao Pinto 		if (mode_to_use == MTL_QUEUE_DCB)
234619d91873SJoao Pinto 			continue;
234719d91873SJoao Pinto 
2348c10d4c82SJose Abreu 		stmmac_config_cbs(priv, priv->hw,
234919d91873SJoao Pinto 				priv->plat->tx_queues_cfg[queue].send_slope,
235019d91873SJoao Pinto 				priv->plat->tx_queues_cfg[queue].idle_slope,
235119d91873SJoao Pinto 				priv->plat->tx_queues_cfg[queue].high_credit,
235219d91873SJoao Pinto 				priv->plat->tx_queues_cfg[queue].low_credit,
235319d91873SJoao Pinto 				queue);
235419d91873SJoao Pinto 	}
235519d91873SJoao Pinto }
235619d91873SJoao Pinto 
235719d91873SJoao Pinto /**
2358d43042f4SJoao Pinto  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
2359d43042f4SJoao Pinto  *  @priv: driver private structure
2360d43042f4SJoao Pinto  *  Description: It is used for mapping RX queues to RX dma channels
2361d43042f4SJoao Pinto  */
2362d43042f4SJoao Pinto static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
2363d43042f4SJoao Pinto {
2364d43042f4SJoao Pinto 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2365d43042f4SJoao Pinto 	u32 queue;
2366d43042f4SJoao Pinto 	u32 chan;
2367d43042f4SJoao Pinto 
2368d43042f4SJoao Pinto 	for (queue = 0; queue < rx_queues_count; queue++) {
2369d43042f4SJoao Pinto 		chan = priv->plat->rx_queues_cfg[queue].chan;
2370c10d4c82SJose Abreu 		stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
2371d43042f4SJoao Pinto 	}
2372d43042f4SJoao Pinto }
2373d43042f4SJoao Pinto 
2374d43042f4SJoao Pinto /**
2375a8f5102aSJoao Pinto  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
2376a8f5102aSJoao Pinto  *  @priv: driver private structure
2377a8f5102aSJoao Pinto  *  Description: It is used for configuring the RX Queue Priority
2378a8f5102aSJoao Pinto  */
2379a8f5102aSJoao Pinto static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
2380a8f5102aSJoao Pinto {
2381a8f5102aSJoao Pinto 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2382a8f5102aSJoao Pinto 	u32 queue;
2383a8f5102aSJoao Pinto 	u32 prio;
2384a8f5102aSJoao Pinto 
2385a8f5102aSJoao Pinto 	for (queue = 0; queue < rx_queues_count; queue++) {
2386a8f5102aSJoao Pinto 		if (!priv->plat->rx_queues_cfg[queue].use_prio)
2387a8f5102aSJoao Pinto 			continue;
2388a8f5102aSJoao Pinto 
2389a8f5102aSJoao Pinto 		prio = priv->plat->rx_queues_cfg[queue].prio;
2390c10d4c82SJose Abreu 		stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
2391a8f5102aSJoao Pinto 	}
2392a8f5102aSJoao Pinto }
2393a8f5102aSJoao Pinto 
2394a8f5102aSJoao Pinto /**
2395a8f5102aSJoao Pinto  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
2396a8f5102aSJoao Pinto  *  @priv: driver private structure
2397a8f5102aSJoao Pinto  *  Description: It is used for configuring the TX Queue Priority
2398a8f5102aSJoao Pinto  */
2399a8f5102aSJoao Pinto static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
2400a8f5102aSJoao Pinto {
2401a8f5102aSJoao Pinto 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2402a8f5102aSJoao Pinto 	u32 queue;
2403a8f5102aSJoao Pinto 	u32 prio;
2404a8f5102aSJoao Pinto 
2405a8f5102aSJoao Pinto 	for (queue = 0; queue < tx_queues_count; queue++) {
2406a8f5102aSJoao Pinto 		if (!priv->plat->tx_queues_cfg[queue].use_prio)
2407a8f5102aSJoao Pinto 			continue;
2408a8f5102aSJoao Pinto 
2409a8f5102aSJoao Pinto 		prio = priv->plat->tx_queues_cfg[queue].prio;
2410c10d4c82SJose Abreu 		stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
2411a8f5102aSJoao Pinto 	}
2412a8f5102aSJoao Pinto }
2413a8f5102aSJoao Pinto 
2414a8f5102aSJoao Pinto /**
2415abe80fdcSJoao Pinto  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
2416abe80fdcSJoao Pinto  *  @priv: driver private structure
2417abe80fdcSJoao Pinto  *  Description: It is used for configuring the RX queue routing
2418abe80fdcSJoao Pinto  */
2419abe80fdcSJoao Pinto static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
2420abe80fdcSJoao Pinto {
2421abe80fdcSJoao Pinto 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2422abe80fdcSJoao Pinto 	u32 queue;
2423abe80fdcSJoao Pinto 	u8 packet;
2424abe80fdcSJoao Pinto 
2425abe80fdcSJoao Pinto 	for (queue = 0; queue < rx_queues_count; queue++) {
2426abe80fdcSJoao Pinto 		/* no specific packet type routing specified for the queue */
2427abe80fdcSJoao Pinto 		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
2428abe80fdcSJoao Pinto 			continue;
2429abe80fdcSJoao Pinto 
2430abe80fdcSJoao Pinto 		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
2431c10d4c82SJose Abreu 		stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
2432abe80fdcSJoao Pinto 	}
2433abe80fdcSJoao Pinto }
2434abe80fdcSJoao Pinto 
2435abe80fdcSJoao Pinto /**
2436d0a9c9f9SJoao Pinto  *  stmmac_mtl_configuration - Configure MTL
2437d0a9c9f9SJoao Pinto  *  @priv: driver private structure
2438d0a9c9f9SJoao Pinto  *  Description: It is used for configurring MTL
2439d0a9c9f9SJoao Pinto  */
2440d0a9c9f9SJoao Pinto static void stmmac_mtl_configuration(struct stmmac_priv *priv)
2441d0a9c9f9SJoao Pinto {
2442d0a9c9f9SJoao Pinto 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2443d0a9c9f9SJoao Pinto 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2444d0a9c9f9SJoao Pinto 
2445c10d4c82SJose Abreu 	if (tx_queues_count > 1)
24466a3a7193SJoao Pinto 		stmmac_set_tx_queue_weight(priv);
24476a3a7193SJoao Pinto 
2448d0a9c9f9SJoao Pinto 	/* Configure MTL RX algorithms */
2449c10d4c82SJose Abreu 	if (rx_queues_count > 1)
2450c10d4c82SJose Abreu 		stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
2451d0a9c9f9SJoao Pinto 				priv->plat->rx_sched_algorithm);
2452d0a9c9f9SJoao Pinto 
2453d0a9c9f9SJoao Pinto 	/* Configure MTL TX algorithms */
2454c10d4c82SJose Abreu 	if (tx_queues_count > 1)
2455c10d4c82SJose Abreu 		stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
2456d0a9c9f9SJoao Pinto 				priv->plat->tx_sched_algorithm);
2457d0a9c9f9SJoao Pinto 
245819d91873SJoao Pinto 	/* Configure CBS in AVB TX queues */
2459c10d4c82SJose Abreu 	if (tx_queues_count > 1)
246019d91873SJoao Pinto 		stmmac_configure_cbs(priv);
246119d91873SJoao Pinto 
2462d43042f4SJoao Pinto 	/* Map RX MTL to DMA channels */
2463d43042f4SJoao Pinto 	stmmac_rx_queue_dma_chan_map(priv);
2464d43042f4SJoao Pinto 
2465d0a9c9f9SJoao Pinto 	/* Enable MAC RX Queues */
2466d0a9c9f9SJoao Pinto 	stmmac_mac_enable_rx_queues(priv);
24676deee222SJoao Pinto 
2468a8f5102aSJoao Pinto 	/* Set RX priorities */
2469c10d4c82SJose Abreu 	if (rx_queues_count > 1)
2470a8f5102aSJoao Pinto 		stmmac_mac_config_rx_queues_prio(priv);
2471a8f5102aSJoao Pinto 
2472a8f5102aSJoao Pinto 	/* Set TX priorities */
2473c10d4c82SJose Abreu 	if (tx_queues_count > 1)
2474a8f5102aSJoao Pinto 		stmmac_mac_config_tx_queues_prio(priv);
2475abe80fdcSJoao Pinto 
2476abe80fdcSJoao Pinto 	/* Set RX routing */
2477c10d4c82SJose Abreu 	if (rx_queues_count > 1)
2478abe80fdcSJoao Pinto 		stmmac_mac_config_rx_queues_routing(priv);
2479d0a9c9f9SJoao Pinto }
2480d0a9c9f9SJoao Pinto 
24818bf993a5SJose Abreu static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
24828bf993a5SJose Abreu {
2483c10d4c82SJose Abreu 	if (priv->dma_cap.asp) {
24848bf993a5SJose Abreu 		netdev_info(priv->dev, "Enabling Safety Features\n");
2485c10d4c82SJose Abreu 		stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp);
24868bf993a5SJose Abreu 	} else {
24878bf993a5SJose Abreu 		netdev_info(priv->dev, "No Safety Features support found\n");
24888bf993a5SJose Abreu 	}
24898bf993a5SJose Abreu }
24908bf993a5SJose Abreu 
2491d0a9c9f9SJoao Pinto /**
2492732fdf0eSGiuseppe CAVALLARO  * stmmac_hw_setup - setup mac in a usable state.
2493523f11b5SSrinivas Kandagatla  *  @dev : pointer to the device structure.
2494523f11b5SSrinivas Kandagatla  *  Description:
2495732fdf0eSGiuseppe CAVALLARO  *  this is the main function to setup the HW in a usable state because the
2496732fdf0eSGiuseppe CAVALLARO  *  dma engine is reset, the core registers are configured (e.g. AXI,
2497732fdf0eSGiuseppe CAVALLARO  *  Checksum features, timers). The DMA is ready to start receiving and
2498732fdf0eSGiuseppe CAVALLARO  *  transmitting.
2499523f11b5SSrinivas Kandagatla  *  Return value:
2500523f11b5SSrinivas Kandagatla  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2501523f11b5SSrinivas Kandagatla  *  file on failure.
2502523f11b5SSrinivas Kandagatla  */
2503fe131929SHuacai Chen static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
2504523f11b5SSrinivas Kandagatla {
2505523f11b5SSrinivas Kandagatla 	struct stmmac_priv *priv = netdev_priv(dev);
25063c55d4d0SJoao Pinto 	u32 rx_cnt = priv->plat->rx_queues_to_use;
2507146617b8SJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
2508146617b8SJoao Pinto 	u32 chan;
2509523f11b5SSrinivas Kandagatla 	int ret;
2510523f11b5SSrinivas Kandagatla 
2511523f11b5SSrinivas Kandagatla 	/* DMA initialization and SW reset */
2512523f11b5SSrinivas Kandagatla 	ret = stmmac_init_dma_engine(priv);
2513523f11b5SSrinivas Kandagatla 	if (ret < 0) {
251438ddc59dSLABBE Corentin 		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
251538ddc59dSLABBE Corentin 			   __func__);
2516523f11b5SSrinivas Kandagatla 		return ret;
2517523f11b5SSrinivas Kandagatla 	}
2518523f11b5SSrinivas Kandagatla 
2519523f11b5SSrinivas Kandagatla 	/* Copy the MAC addr into the HW  */
2520c10d4c82SJose Abreu 	stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
2521523f11b5SSrinivas Kandagatla 
252202e57b9dSGiuseppe CAVALLARO 	/* PS and related bits will be programmed according to the speed */
252302e57b9dSGiuseppe CAVALLARO 	if (priv->hw->pcs) {
252402e57b9dSGiuseppe CAVALLARO 		int speed = priv->plat->mac_port_sel_speed;
252502e57b9dSGiuseppe CAVALLARO 
252602e57b9dSGiuseppe CAVALLARO 		if ((speed == SPEED_10) || (speed == SPEED_100) ||
252702e57b9dSGiuseppe CAVALLARO 		    (speed == SPEED_1000)) {
252802e57b9dSGiuseppe CAVALLARO 			priv->hw->ps = speed;
252902e57b9dSGiuseppe CAVALLARO 		} else {
253002e57b9dSGiuseppe CAVALLARO 			dev_warn(priv->device, "invalid port speed\n");
253102e57b9dSGiuseppe CAVALLARO 			priv->hw->ps = 0;
253202e57b9dSGiuseppe CAVALLARO 		}
253302e57b9dSGiuseppe CAVALLARO 	}
253402e57b9dSGiuseppe CAVALLARO 
2535523f11b5SSrinivas Kandagatla 	/* Initialize the MAC Core */
2536c10d4c82SJose Abreu 	stmmac_core_init(priv, priv->hw, dev);
2537523f11b5SSrinivas Kandagatla 
2538d0a9c9f9SJoao Pinto 	/* Initialize MTL*/
2539d0a9c9f9SJoao Pinto 	stmmac_mtl_configuration(priv);
25409eb12474Sjpinto 
25418bf993a5SJose Abreu 	/* Initialize Safety Features */
25428bf993a5SJose Abreu 	stmmac_safety_feat_configuration(priv);
25438bf993a5SJose Abreu 
2544c10d4c82SJose Abreu 	ret = stmmac_rx_ipc(priv, priv->hw);
2545978aded4SGiuseppe CAVALLARO 	if (!ret) {
254638ddc59dSLABBE Corentin 		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
2547978aded4SGiuseppe CAVALLARO 		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
2548d2afb5bdSGiuseppe CAVALLARO 		priv->hw->rx_csum = 0;
2549978aded4SGiuseppe CAVALLARO 	}
2550978aded4SGiuseppe CAVALLARO 
2551523f11b5SSrinivas Kandagatla 	/* Enable the MAC Rx/Tx */
2552c10d4c82SJose Abreu 	stmmac_mac_set(priv, priv->ioaddr, true);
2553523f11b5SSrinivas Kandagatla 
2554b4f0a661SJoao Pinto 	/* Set the HW DMA mode and the COE */
2555b4f0a661SJoao Pinto 	stmmac_dma_operation_mode(priv);
2556b4f0a661SJoao Pinto 
2557523f11b5SSrinivas Kandagatla 	stmmac_mmc_setup(priv);
2558523f11b5SSrinivas Kandagatla 
2559fe131929SHuacai Chen 	if (init_ptp) {
25600ad2be79SThierry Reding 		ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
25610ad2be79SThierry Reding 		if (ret < 0)
25620ad2be79SThierry Reding 			netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
25630ad2be79SThierry Reding 
2564523f11b5SSrinivas Kandagatla 		ret = stmmac_init_ptp(priv);
2565722eef28SHeiner Kallweit 		if (ret == -EOPNOTSUPP)
2566722eef28SHeiner Kallweit 			netdev_warn(priv->dev, "PTP not supported by HW\n");
2567722eef28SHeiner Kallweit 		else if (ret)
2568722eef28SHeiner Kallweit 			netdev_warn(priv->dev, "PTP init failed\n");
2569fe131929SHuacai Chen 	}
2570523f11b5SSrinivas Kandagatla 
2571523f11b5SSrinivas Kandagatla 	priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
2572523f11b5SSrinivas Kandagatla 
2573a4e887faSJose Abreu 	if (priv->use_riwt) {
2574a4e887faSJose Abreu 		ret = stmmac_rx_watchdog(priv, priv->ioaddr, MAX_DMA_RIWT, rx_cnt);
2575a4e887faSJose Abreu 		if (!ret)
2576523f11b5SSrinivas Kandagatla 			priv->rx_riwt = MAX_DMA_RIWT;
2577523f11b5SSrinivas Kandagatla 	}
2578523f11b5SSrinivas Kandagatla 
2579c10d4c82SJose Abreu 	if (priv->hw->pcs)
2580c10d4c82SJose Abreu 		stmmac_pcs_ctrl_ane(priv, priv->hw, 1, priv->hw->ps, 0);
2581523f11b5SSrinivas Kandagatla 
25824854ab99SJoao Pinto 	/* set TX and RX rings length */
25834854ab99SJoao Pinto 	stmmac_set_rings_length(priv);
25844854ab99SJoao Pinto 
2585f748be53SAlexandre TORGUE 	/* Enable TSO */
2586146617b8SJoao Pinto 	if (priv->tso) {
2587146617b8SJoao Pinto 		for (chan = 0; chan < tx_cnt; chan++)
2588a4e887faSJose Abreu 			stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
2589146617b8SJoao Pinto 	}
2590f748be53SAlexandre TORGUE 
25917d9e6c5aSJose Abreu 	/* Start the ball rolling... */
25927d9e6c5aSJose Abreu 	stmmac_start_all_dma(priv);
25937d9e6c5aSJose Abreu 
2594523f11b5SSrinivas Kandagatla 	return 0;
2595523f11b5SSrinivas Kandagatla }
2596523f11b5SSrinivas Kandagatla 
2597c66f6c37SThierry Reding static void stmmac_hw_teardown(struct net_device *dev)
2598c66f6c37SThierry Reding {
2599c66f6c37SThierry Reding 	struct stmmac_priv *priv = netdev_priv(dev);
2600c66f6c37SThierry Reding 
2601c66f6c37SThierry Reding 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
2602c66f6c37SThierry Reding }
2603c66f6c37SThierry Reding 
2604523f11b5SSrinivas Kandagatla /**
26057ac6653aSJeff Kirsher  *  stmmac_open - open entry point of the driver
26067ac6653aSJeff Kirsher  *  @dev : pointer to the device structure.
26077ac6653aSJeff Kirsher  *  Description:
26087ac6653aSJeff Kirsher  *  This function is the open entry point of the driver.
26097ac6653aSJeff Kirsher  *  Return value:
26107ac6653aSJeff Kirsher  *  0 on success and an appropriate (-)ve integer as defined in errno.h
26117ac6653aSJeff Kirsher  *  file on failure.
26127ac6653aSJeff Kirsher  */
26137ac6653aSJeff Kirsher static int stmmac_open(struct net_device *dev)
26147ac6653aSJeff Kirsher {
26157ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
26168fce3331SJose Abreu 	u32 chan;
26177ac6653aSJeff Kirsher 	int ret;
26187ac6653aSJeff Kirsher 
26194bfcbd7aSFrancesco Virlinzi 	stmmac_check_ether_addr(priv);
26204bfcbd7aSFrancesco Virlinzi 
26213fe5cadbSGiuseppe CAVALLARO 	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
26223fe5cadbSGiuseppe CAVALLARO 	    priv->hw->pcs != STMMAC_PCS_TBI &&
26233fe5cadbSGiuseppe CAVALLARO 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
26247ac6653aSJeff Kirsher 		ret = stmmac_init_phy(dev);
2625e58bb43fSGiuseppe CAVALLARO 		if (ret) {
262638ddc59dSLABBE Corentin 			netdev_err(priv->dev,
262738ddc59dSLABBE Corentin 				   "%s: Cannot attach to PHY (error: %d)\n",
2628e58bb43fSGiuseppe CAVALLARO 				   __func__, ret);
262989df20d9SHans de Goede 			return ret;
26307ac6653aSJeff Kirsher 		}
2631e58bb43fSGiuseppe CAVALLARO 	}
26327ac6653aSJeff Kirsher 
2633523f11b5SSrinivas Kandagatla 	/* Extra statistics */
2634523f11b5SSrinivas Kandagatla 	memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
2635523f11b5SSrinivas Kandagatla 	priv->xstats.threshold = tc;
2636523f11b5SSrinivas Kandagatla 
26375bacd778SLABBE Corentin 	priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
263822ad3838SGiuseppe Cavallaro 	priv->rx_copybreak = STMMAC_RX_COPYBREAK;
263956329137SBartlomiej Zolnierkiewicz 
26405bacd778SLABBE Corentin 	ret = alloc_dma_desc_resources(priv);
26415bacd778SLABBE Corentin 	if (ret < 0) {
26425bacd778SLABBE Corentin 		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
26435bacd778SLABBE Corentin 			   __func__);
26445bacd778SLABBE Corentin 		goto dma_desc_error;
26455bacd778SLABBE Corentin 	}
26465bacd778SLABBE Corentin 
26475bacd778SLABBE Corentin 	ret = init_dma_desc_rings(dev, GFP_KERNEL);
26485bacd778SLABBE Corentin 	if (ret < 0) {
26495bacd778SLABBE Corentin 		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
26505bacd778SLABBE Corentin 			   __func__);
26515bacd778SLABBE Corentin 		goto init_error;
26525bacd778SLABBE Corentin 	}
26535bacd778SLABBE Corentin 
2654fe131929SHuacai Chen 	ret = stmmac_hw_setup(dev, true);
265556329137SBartlomiej Zolnierkiewicz 	if (ret < 0) {
265638ddc59dSLABBE Corentin 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
2657c9324d18SGiuseppe CAVALLARO 		goto init_error;
26587ac6653aSJeff Kirsher 	}
26597ac6653aSJeff Kirsher 
2660777da230SGiuseppe CAVALLARO 	stmmac_init_tx_coalesce(priv);
2661777da230SGiuseppe CAVALLARO 
2662d6d50c7eSPhilippe Reynes 	if (dev->phydev)
2663d6d50c7eSPhilippe Reynes 		phy_start(dev->phydev);
26647ac6653aSJeff Kirsher 
26657ac6653aSJeff Kirsher 	/* Request the IRQ lines */
26667ac6653aSJeff Kirsher 	ret = request_irq(dev->irq, stmmac_interrupt,
26677ac6653aSJeff Kirsher 			  IRQF_SHARED, dev->name, dev);
26687ac6653aSJeff Kirsher 	if (unlikely(ret < 0)) {
266938ddc59dSLABBE Corentin 		netdev_err(priv->dev,
267038ddc59dSLABBE Corentin 			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
26717ac6653aSJeff Kirsher 			   __func__, dev->irq, ret);
26726c1e5abeSThierry Reding 		goto irq_error;
26737ac6653aSJeff Kirsher 	}
26747ac6653aSJeff Kirsher 
26757a13f8f5SFrancesco Virlinzi 	/* Request the Wake IRQ in case of another line is used for WoL */
26767a13f8f5SFrancesco Virlinzi 	if (priv->wol_irq != dev->irq) {
26777a13f8f5SFrancesco Virlinzi 		ret = request_irq(priv->wol_irq, stmmac_interrupt,
26787a13f8f5SFrancesco Virlinzi 				  IRQF_SHARED, dev->name, dev);
26797a13f8f5SFrancesco Virlinzi 		if (unlikely(ret < 0)) {
268038ddc59dSLABBE Corentin 			netdev_err(priv->dev,
268138ddc59dSLABBE Corentin 				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
2682ceb69499SGiuseppe CAVALLARO 				   __func__, priv->wol_irq, ret);
2683c9324d18SGiuseppe CAVALLARO 			goto wolirq_error;
26847a13f8f5SFrancesco Virlinzi 		}
26857a13f8f5SFrancesco Virlinzi 	}
26867a13f8f5SFrancesco Virlinzi 
2687d765955dSGiuseppe CAVALLARO 	/* Request the IRQ lines */
2688d7ec8584SChen-Yu Tsai 	if (priv->lpi_irq > 0) {
2689d765955dSGiuseppe CAVALLARO 		ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
2690d765955dSGiuseppe CAVALLARO 				  dev->name, dev);
2691d765955dSGiuseppe CAVALLARO 		if (unlikely(ret < 0)) {
269238ddc59dSLABBE Corentin 			netdev_err(priv->dev,
269338ddc59dSLABBE Corentin 				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
2694d765955dSGiuseppe CAVALLARO 				   __func__, priv->lpi_irq, ret);
2695c9324d18SGiuseppe CAVALLARO 			goto lpiirq_error;
2696d765955dSGiuseppe CAVALLARO 		}
2697d765955dSGiuseppe CAVALLARO 	}
2698d765955dSGiuseppe CAVALLARO 
2699c22a3f48SJoao Pinto 	stmmac_enable_all_queues(priv);
2700c22a3f48SJoao Pinto 	stmmac_start_all_queues(priv);
27017ac6653aSJeff Kirsher 
27027ac6653aSJeff Kirsher 	return 0;
27037ac6653aSJeff Kirsher 
2704c9324d18SGiuseppe CAVALLARO lpiirq_error:
2705d765955dSGiuseppe CAVALLARO 	if (priv->wol_irq != dev->irq)
2706d765955dSGiuseppe CAVALLARO 		free_irq(priv->wol_irq, dev);
2707c9324d18SGiuseppe CAVALLARO wolirq_error:
27087a13f8f5SFrancesco Virlinzi 	free_irq(dev->irq, dev);
27096c1e5abeSThierry Reding irq_error:
27106c1e5abeSThierry Reding 	if (dev->phydev)
27116c1e5abeSThierry Reding 		phy_stop(dev->phydev);
27127a13f8f5SFrancesco Virlinzi 
27138fce3331SJose Abreu 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
27148fce3331SJose Abreu 		del_timer_sync(&priv->tx_queue[chan].txtimer);
27158fce3331SJose Abreu 
2716c66f6c37SThierry Reding 	stmmac_hw_teardown(dev);
2717c9324d18SGiuseppe CAVALLARO init_error:
2718c9324d18SGiuseppe CAVALLARO 	free_dma_desc_resources(priv);
27195bacd778SLABBE Corentin dma_desc_error:
2720d6d50c7eSPhilippe Reynes 	if (dev->phydev)
2721d6d50c7eSPhilippe Reynes 		phy_disconnect(dev->phydev);
27224bfcbd7aSFrancesco Virlinzi 
27237ac6653aSJeff Kirsher 	return ret;
27247ac6653aSJeff Kirsher }
27257ac6653aSJeff Kirsher 
27267ac6653aSJeff Kirsher /**
27277ac6653aSJeff Kirsher  *  stmmac_release - close entry point of the driver
27287ac6653aSJeff Kirsher  *  @dev : device pointer.
27297ac6653aSJeff Kirsher  *  Description:
27307ac6653aSJeff Kirsher  *  This is the stop entry point of the driver.
27317ac6653aSJeff Kirsher  */
27327ac6653aSJeff Kirsher static int stmmac_release(struct net_device *dev)
27337ac6653aSJeff Kirsher {
27347ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
27358fce3331SJose Abreu 	u32 chan;
27367ac6653aSJeff Kirsher 
2737d765955dSGiuseppe CAVALLARO 	if (priv->eee_enabled)
2738d765955dSGiuseppe CAVALLARO 		del_timer_sync(&priv->eee_ctrl_timer);
2739d765955dSGiuseppe CAVALLARO 
27407ac6653aSJeff Kirsher 	/* Stop and disconnect the PHY */
2741d6d50c7eSPhilippe Reynes 	if (dev->phydev) {
2742d6d50c7eSPhilippe Reynes 		phy_stop(dev->phydev);
2743d6d50c7eSPhilippe Reynes 		phy_disconnect(dev->phydev);
27447ac6653aSJeff Kirsher 	}
27457ac6653aSJeff Kirsher 
2746c22a3f48SJoao Pinto 	stmmac_stop_all_queues(priv);
27477ac6653aSJeff Kirsher 
2748c22a3f48SJoao Pinto 	stmmac_disable_all_queues(priv);
27497ac6653aSJeff Kirsher 
27508fce3331SJose Abreu 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
27518fce3331SJose Abreu 		del_timer_sync(&priv->tx_queue[chan].txtimer);
27529125cdd1SGiuseppe CAVALLARO 
27537ac6653aSJeff Kirsher 	/* Free the IRQ lines */
27547ac6653aSJeff Kirsher 	free_irq(dev->irq, dev);
27557a13f8f5SFrancesco Virlinzi 	if (priv->wol_irq != dev->irq)
27567a13f8f5SFrancesco Virlinzi 		free_irq(priv->wol_irq, dev);
2757d7ec8584SChen-Yu Tsai 	if (priv->lpi_irq > 0)
2758d765955dSGiuseppe CAVALLARO 		free_irq(priv->lpi_irq, dev);
27597ac6653aSJeff Kirsher 
27607ac6653aSJeff Kirsher 	/* Stop TX/RX DMA and clear the descriptors */
2761ae4f0d46SJoao Pinto 	stmmac_stop_all_dma(priv);
27627ac6653aSJeff Kirsher 
27637ac6653aSJeff Kirsher 	/* Release and free the Rx/Tx resources */
27647ac6653aSJeff Kirsher 	free_dma_desc_resources(priv);
27657ac6653aSJeff Kirsher 
27667ac6653aSJeff Kirsher 	/* Disable the MAC Rx/Tx */
2767c10d4c82SJose Abreu 	stmmac_mac_set(priv, priv->ioaddr, false);
27687ac6653aSJeff Kirsher 
27697ac6653aSJeff Kirsher 	netif_carrier_off(dev);
27707ac6653aSJeff Kirsher 
277192ba6888SRayagond Kokatanur 	stmmac_release_ptp(priv);
277292ba6888SRayagond Kokatanur 
27737ac6653aSJeff Kirsher 	return 0;
27747ac6653aSJeff Kirsher }
27757ac6653aSJeff Kirsher 
27767ac6653aSJeff Kirsher /**
2777f748be53SAlexandre TORGUE  *  stmmac_tso_allocator - close entry point of the driver
2778f748be53SAlexandre TORGUE  *  @priv: driver private structure
2779f748be53SAlexandre TORGUE  *  @des: buffer start address
2780f748be53SAlexandre TORGUE  *  @total_len: total length to fill in descriptors
2781f748be53SAlexandre TORGUE  *  @last_segmant: condition for the last descriptor
2782ce736788SJoao Pinto  *  @queue: TX queue index
2783f748be53SAlexandre TORGUE  *  Description:
2784f748be53SAlexandre TORGUE  *  This function fills descriptor and request new descriptors according to
2785f748be53SAlexandre TORGUE  *  buffer length to fill
2786f748be53SAlexandre TORGUE  */
2787f748be53SAlexandre TORGUE static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
2788ce736788SJoao Pinto 				 int total_len, bool last_segment, u32 queue)
2789f748be53SAlexandre TORGUE {
2790ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2791f748be53SAlexandre TORGUE 	struct dma_desc *desc;
27925bacd778SLABBE Corentin 	u32 buff_size;
2793ce736788SJoao Pinto 	int tmp_len;
2794f748be53SAlexandre TORGUE 
2795f748be53SAlexandre TORGUE 	tmp_len = total_len;
2796f748be53SAlexandre TORGUE 
2797f748be53SAlexandre TORGUE 	while (tmp_len > 0) {
2798ce736788SJoao Pinto 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2799b4c9784cSNiklas Cassel 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
2800ce736788SJoao Pinto 		desc = tx_q->dma_tx + tx_q->cur_tx;
2801f748be53SAlexandre TORGUE 
2802f8be0d78SMichael Weiser 		desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
2803f748be53SAlexandre TORGUE 		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
2804f748be53SAlexandre TORGUE 			    TSO_MAX_BUFF_SIZE : tmp_len;
2805f748be53SAlexandre TORGUE 
280642de047dSJose Abreu 		stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
2807f748be53SAlexandre TORGUE 				0, 1,
2808426849e6SNiklas Cassel 				(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
2809f748be53SAlexandre TORGUE 				0, 0);
2810f748be53SAlexandre TORGUE 
2811f748be53SAlexandre TORGUE 		tmp_len -= TSO_MAX_BUFF_SIZE;
2812f748be53SAlexandre TORGUE 	}
2813f748be53SAlexandre TORGUE }
2814f748be53SAlexandre TORGUE 
2815f748be53SAlexandre TORGUE /**
2816f748be53SAlexandre TORGUE  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
2817f748be53SAlexandre TORGUE  *  @skb : the socket buffer
2818f748be53SAlexandre TORGUE  *  @dev : device pointer
2819f748be53SAlexandre TORGUE  *  Description: this is the transmit function that is called on TSO frames
2820f748be53SAlexandre TORGUE  *  (support available on GMAC4 and newer chips).
2821f748be53SAlexandre TORGUE  *  Diagram below show the ring programming in case of TSO frames:
2822f748be53SAlexandre TORGUE  *
2823f748be53SAlexandre TORGUE  *  First Descriptor
2824f748be53SAlexandre TORGUE  *   --------
2825f748be53SAlexandre TORGUE  *   | DES0 |---> buffer1 = L2/L3/L4 header
2826f748be53SAlexandre TORGUE  *   | DES1 |---> TCP Payload (can continue on next descr...)
2827f748be53SAlexandre TORGUE  *   | DES2 |---> buffer 1 and 2 len
2828f748be53SAlexandre TORGUE  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
2829f748be53SAlexandre TORGUE  *   --------
2830f748be53SAlexandre TORGUE  *	|
2831f748be53SAlexandre TORGUE  *     ...
2832f748be53SAlexandre TORGUE  *	|
2833f748be53SAlexandre TORGUE  *   --------
2834f748be53SAlexandre TORGUE  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
2835f748be53SAlexandre TORGUE  *   | DES1 | --|
2836f748be53SAlexandre TORGUE  *   | DES2 | --> buffer 1 and 2 len
2837f748be53SAlexandre TORGUE  *   | DES3 |
2838f748be53SAlexandre TORGUE  *   --------
2839f748be53SAlexandre TORGUE  *
2840f748be53SAlexandre TORGUE  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
2841f748be53SAlexandre TORGUE  */
2842f748be53SAlexandre TORGUE static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2843f748be53SAlexandre TORGUE {
2844ce736788SJoao Pinto 	struct dma_desc *desc, *first, *mss_desc = NULL;
2845f748be53SAlexandre TORGUE 	struct stmmac_priv *priv = netdev_priv(dev);
2846f748be53SAlexandre TORGUE 	int nfrags = skb_shinfo(skb)->nr_frags;
2847ce736788SJoao Pinto 	u32 queue = skb_get_queue_mapping(skb);
2848f748be53SAlexandre TORGUE 	unsigned int first_entry, des;
2849ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q;
2850ce736788SJoao Pinto 	int tmp_pay_len = 0;
2851ce736788SJoao Pinto 	u32 pay_len, mss;
2852f748be53SAlexandre TORGUE 	u8 proto_hdr_len;
2853f748be53SAlexandre TORGUE 	int i;
2854f748be53SAlexandre TORGUE 
2855ce736788SJoao Pinto 	tx_q = &priv->tx_queue[queue];
2856ce736788SJoao Pinto 
2857f748be53SAlexandre TORGUE 	/* Compute header lengths */
2858f748be53SAlexandre TORGUE 	proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2859f748be53SAlexandre TORGUE 
2860f748be53SAlexandre TORGUE 	/* Desc availability based on threshold should be enough safe */
2861ce736788SJoao Pinto 	if (unlikely(stmmac_tx_avail(priv, queue) <
2862f748be53SAlexandre TORGUE 		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
2863c22a3f48SJoao Pinto 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
2864c22a3f48SJoao Pinto 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
2865c22a3f48SJoao Pinto 								queue));
2866f748be53SAlexandre TORGUE 			/* This is a hard error, log it. */
286738ddc59dSLABBE Corentin 			netdev_err(priv->dev,
286838ddc59dSLABBE Corentin 				   "%s: Tx Ring full when queue awake\n",
286938ddc59dSLABBE Corentin 				   __func__);
2870f748be53SAlexandre TORGUE 		}
2871f748be53SAlexandre TORGUE 		return NETDEV_TX_BUSY;
2872f748be53SAlexandre TORGUE 	}
2873f748be53SAlexandre TORGUE 
2874f748be53SAlexandre TORGUE 	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
2875f748be53SAlexandre TORGUE 
2876f748be53SAlexandre TORGUE 	mss = skb_shinfo(skb)->gso_size;
2877f748be53SAlexandre TORGUE 
2878f748be53SAlexandre TORGUE 	/* set new MSS value if needed */
28798d212a9eSNiklas Cassel 	if (mss != tx_q->mss) {
2880ce736788SJoao Pinto 		mss_desc = tx_q->dma_tx + tx_q->cur_tx;
288142de047dSJose Abreu 		stmmac_set_mss(priv, mss_desc, mss);
28828d212a9eSNiklas Cassel 		tx_q->mss = mss;
2883ce736788SJoao Pinto 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2884b4c9784cSNiklas Cassel 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
2885f748be53SAlexandre TORGUE 	}
2886f748be53SAlexandre TORGUE 
2887f748be53SAlexandre TORGUE 	if (netif_msg_tx_queued(priv)) {
2888f748be53SAlexandre TORGUE 		pr_info("%s: tcphdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
2889f748be53SAlexandre TORGUE 			__func__, tcp_hdrlen(skb), proto_hdr_len, pay_len, mss);
2890f748be53SAlexandre TORGUE 		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
2891f748be53SAlexandre TORGUE 			skb->data_len);
2892f748be53SAlexandre TORGUE 	}
2893f748be53SAlexandre TORGUE 
2894ce736788SJoao Pinto 	first_entry = tx_q->cur_tx;
2895b4c9784cSNiklas Cassel 	WARN_ON(tx_q->tx_skbuff[first_entry]);
2896f748be53SAlexandre TORGUE 
2897ce736788SJoao Pinto 	desc = tx_q->dma_tx + first_entry;
2898f748be53SAlexandre TORGUE 	first = desc;
2899f748be53SAlexandre TORGUE 
2900f748be53SAlexandre TORGUE 	/* first descriptor: fill Headers on Buf1 */
2901f748be53SAlexandre TORGUE 	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
2902f748be53SAlexandre TORGUE 			     DMA_TO_DEVICE);
2903f748be53SAlexandre TORGUE 	if (dma_mapping_error(priv->device, des))
2904f748be53SAlexandre TORGUE 		goto dma_map_err;
2905f748be53SAlexandre TORGUE 
2906ce736788SJoao Pinto 	tx_q->tx_skbuff_dma[first_entry].buf = des;
2907ce736788SJoao Pinto 	tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
2908f748be53SAlexandre TORGUE 
2909f8be0d78SMichael Weiser 	first->des0 = cpu_to_le32(des);
2910f748be53SAlexandre TORGUE 
2911f748be53SAlexandre TORGUE 	/* Fill start of payload in buff2 of first descriptor */
2912f748be53SAlexandre TORGUE 	if (pay_len)
2913f8be0d78SMichael Weiser 		first->des1 = cpu_to_le32(des + proto_hdr_len);
2914f748be53SAlexandre TORGUE 
2915f748be53SAlexandre TORGUE 	/* If needed take extra descriptors to fill the remaining payload */
2916f748be53SAlexandre TORGUE 	tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
2917f748be53SAlexandre TORGUE 
2918ce736788SJoao Pinto 	stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
2919f748be53SAlexandre TORGUE 
2920f748be53SAlexandre TORGUE 	/* Prepare fragments */
2921f748be53SAlexandre TORGUE 	for (i = 0; i < nfrags; i++) {
2922f748be53SAlexandre TORGUE 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2923f748be53SAlexandre TORGUE 
2924f748be53SAlexandre TORGUE 		des = skb_frag_dma_map(priv->device, frag, 0,
2925f748be53SAlexandre TORGUE 				       skb_frag_size(frag),
2926f748be53SAlexandre TORGUE 				       DMA_TO_DEVICE);
2927937071c1SThierry Reding 		if (dma_mapping_error(priv->device, des))
2928937071c1SThierry Reding 			goto dma_map_err;
2929f748be53SAlexandre TORGUE 
2930f748be53SAlexandre TORGUE 		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
2931ce736788SJoao Pinto 				     (i == nfrags - 1), queue);
2932f748be53SAlexandre TORGUE 
2933ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
2934ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
2935ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
2936f748be53SAlexandre TORGUE 	}
2937f748be53SAlexandre TORGUE 
2938ce736788SJoao Pinto 	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
2939f748be53SAlexandre TORGUE 
294005cf0d1bSNiklas Cassel 	/* Only the last descriptor gets to point to the skb. */
294105cf0d1bSNiklas Cassel 	tx_q->tx_skbuff[tx_q->cur_tx] = skb;
294205cf0d1bSNiklas Cassel 
294305cf0d1bSNiklas Cassel 	/* We've used all descriptors we need for this skb, however,
294405cf0d1bSNiklas Cassel 	 * advance cur_tx so that it references a fresh descriptor.
294505cf0d1bSNiklas Cassel 	 * ndo_start_xmit will fill this descriptor the next time it's
294605cf0d1bSNiklas Cassel 	 * called and stmmac_tx_clean may clean up to this descriptor.
294705cf0d1bSNiklas Cassel 	 */
2948ce736788SJoao Pinto 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2949f748be53SAlexandre TORGUE 
2950ce736788SJoao Pinto 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
2951b3e51069SLABBE Corentin 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
295238ddc59dSLABBE Corentin 			  __func__);
2953c22a3f48SJoao Pinto 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
2954f748be53SAlexandre TORGUE 	}
2955f748be53SAlexandre TORGUE 
2956f748be53SAlexandre TORGUE 	dev->stats.tx_bytes += skb->len;
2957f748be53SAlexandre TORGUE 	priv->xstats.tx_tso_frames++;
2958f748be53SAlexandre TORGUE 	priv->xstats.tx_tso_nfrags += nfrags;
2959f748be53SAlexandre TORGUE 
2960f748be53SAlexandre TORGUE 	/* Manage tx mitigation */
29618fce3331SJose Abreu 	tx_q->tx_count_frames += nfrags + 1;
29628fce3331SJose Abreu 	if (priv->tx_coal_frames <= tx_q->tx_count_frames) {
296342de047dSJose Abreu 		stmmac_set_tx_ic(priv, desc);
2964f748be53SAlexandre TORGUE 		priv->xstats.tx_set_ic_bit++;
29658fce3331SJose Abreu 		tx_q->tx_count_frames = 0;
29668fce3331SJose Abreu 	} else {
29678fce3331SJose Abreu 		stmmac_tx_timer_arm(priv, queue);
2968f748be53SAlexandre TORGUE 	}
2969f748be53SAlexandre TORGUE 
2970f748be53SAlexandre TORGUE 	skb_tx_timestamp(skb);
2971f748be53SAlexandre TORGUE 
2972f748be53SAlexandre TORGUE 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2973f748be53SAlexandre TORGUE 		     priv->hwts_tx_en)) {
2974f748be53SAlexandre TORGUE 		/* declare that device is doing timestamping */
2975f748be53SAlexandre TORGUE 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
297642de047dSJose Abreu 		stmmac_enable_tx_timestamp(priv, first);
2977f748be53SAlexandre TORGUE 	}
2978f748be53SAlexandre TORGUE 
2979f748be53SAlexandre TORGUE 	/* Complete the first descriptor before granting the DMA */
298042de047dSJose Abreu 	stmmac_prepare_tso_tx_desc(priv, first, 1,
2981f748be53SAlexandre TORGUE 			proto_hdr_len,
2982f748be53SAlexandre TORGUE 			pay_len,
2983ce736788SJoao Pinto 			1, tx_q->tx_skbuff_dma[first_entry].last_segment,
2984f748be53SAlexandre TORGUE 			tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
2985f748be53SAlexandre TORGUE 
2986f748be53SAlexandre TORGUE 	/* If context desc is used to change MSS */
298715d2ee42SNiklas Cassel 	if (mss_desc) {
298815d2ee42SNiklas Cassel 		/* Make sure that first descriptor has been completely
298915d2ee42SNiklas Cassel 		 * written, including its own bit. This is because MSS is
299015d2ee42SNiklas Cassel 		 * actually before first descriptor, so we need to make
299115d2ee42SNiklas Cassel 		 * sure that MSS's own bit is the last thing written.
299215d2ee42SNiklas Cassel 		 */
299315d2ee42SNiklas Cassel 		dma_wmb();
299442de047dSJose Abreu 		stmmac_set_tx_owner(priv, mss_desc);
299515d2ee42SNiklas Cassel 	}
2996f748be53SAlexandre TORGUE 
2997f748be53SAlexandre TORGUE 	/* The own bit must be the latest setting done when prepare the
2998f748be53SAlexandre TORGUE 	 * descriptor and then barrier is needed to make sure that
2999f748be53SAlexandre TORGUE 	 * all is coherent before granting the DMA engine.
3000f748be53SAlexandre TORGUE 	 */
300195eb930aSNiklas Cassel 	wmb();
3002f748be53SAlexandre TORGUE 
3003f748be53SAlexandre TORGUE 	if (netif_msg_pktdata(priv)) {
3004f748be53SAlexandre TORGUE 		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
3005ce736788SJoao Pinto 			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
3006ce736788SJoao Pinto 			tx_q->cur_tx, first, nfrags);
3007f748be53SAlexandre TORGUE 
300842de047dSJose Abreu 		stmmac_display_ring(priv, (void *)tx_q->dma_tx, DMA_TX_SIZE, 0);
3009f748be53SAlexandre TORGUE 
3010f748be53SAlexandre TORGUE 		pr_info(">>> frame to be transmitted: ");
3011f748be53SAlexandre TORGUE 		print_pkt(skb->data, skb_headlen(skb));
3012f748be53SAlexandre TORGUE 	}
3013f748be53SAlexandre TORGUE 
3014c22a3f48SJoao Pinto 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3015f748be53SAlexandre TORGUE 
30160431100bSJose Abreu 	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
3017a4e887faSJose Abreu 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
3018f748be53SAlexandre TORGUE 
3019f748be53SAlexandre TORGUE 	return NETDEV_TX_OK;
3020f748be53SAlexandre TORGUE 
3021f748be53SAlexandre TORGUE dma_map_err:
3022f748be53SAlexandre TORGUE 	dev_err(priv->device, "Tx dma map failed\n");
3023f748be53SAlexandre TORGUE 	dev_kfree_skb(skb);
3024f748be53SAlexandre TORGUE 	priv->dev->stats.tx_dropped++;
3025f748be53SAlexandre TORGUE 	return NETDEV_TX_OK;
3026f748be53SAlexandre TORGUE }
3027f748be53SAlexandre TORGUE 
3028f748be53SAlexandre TORGUE /**
3029732fdf0eSGiuseppe CAVALLARO  *  stmmac_xmit - Tx entry point of the driver
30307ac6653aSJeff Kirsher  *  @skb : the socket buffer
30317ac6653aSJeff Kirsher  *  @dev : device pointer
303232ceabcaSGiuseppe CAVALLARO  *  Description : this is the tx entry point of the driver.
303332ceabcaSGiuseppe CAVALLARO  *  It programs the chain or the ring and supports oversized frames
303432ceabcaSGiuseppe CAVALLARO  *  and SG feature.
30357ac6653aSJeff Kirsher  */
30367ac6653aSJeff Kirsher static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
30377ac6653aSJeff Kirsher {
30387ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
30390e80bdc9SGiuseppe Cavallaro 	unsigned int nopaged_len = skb_headlen(skb);
30404a7d666aSGiuseppe CAVALLARO 	int i, csum_insertion = 0, is_jumbo = 0;
3041ce736788SJoao Pinto 	u32 queue = skb_get_queue_mapping(skb);
30427ac6653aSJeff Kirsher 	int nfrags = skb_shinfo(skb)->nr_frags;
304359423815SColin Ian King 	int entry;
304459423815SColin Ian King 	unsigned int first_entry;
30457ac6653aSJeff Kirsher 	struct dma_desc *desc, *first;
3046ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q;
30470e80bdc9SGiuseppe Cavallaro 	unsigned int enh_desc;
3048f748be53SAlexandre TORGUE 	unsigned int des;
3049f748be53SAlexandre TORGUE 
3050ce736788SJoao Pinto 	tx_q = &priv->tx_queue[queue];
3051ce736788SJoao Pinto 
3052e2cd682dSJose Abreu 	if (priv->tx_path_in_lpi_mode)
3053e2cd682dSJose Abreu 		stmmac_disable_eee_mode(priv);
3054e2cd682dSJose Abreu 
3055f748be53SAlexandre TORGUE 	/* Manage oversized TCP frames for GMAC4 device */
3056f748be53SAlexandre TORGUE 	if (skb_is_gso(skb) && priv->tso) {
3057c5acdbeeSJose Abreu 		if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
3058c5acdbeeSJose Abreu 			/*
3059c5acdbeeSJose Abreu 			 * There is no way to determine the number of TSO
3060c5acdbeeSJose Abreu 			 * capable Queues. Let's use always the Queue 0
3061c5acdbeeSJose Abreu 			 * because if TSO is supported then at least this
3062c5acdbeeSJose Abreu 			 * one will be capable.
3063c5acdbeeSJose Abreu 			 */
3064c5acdbeeSJose Abreu 			skb_set_queue_mapping(skb, 0);
3065c5acdbeeSJose Abreu 
3066f748be53SAlexandre TORGUE 			return stmmac_tso_xmit(skb, dev);
3067f748be53SAlexandre TORGUE 		}
3068c5acdbeeSJose Abreu 	}
30697ac6653aSJeff Kirsher 
3070ce736788SJoao Pinto 	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
3071c22a3f48SJoao Pinto 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
3072c22a3f48SJoao Pinto 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
3073c22a3f48SJoao Pinto 								queue));
30747ac6653aSJeff Kirsher 			/* This is a hard error, log it. */
307538ddc59dSLABBE Corentin 			netdev_err(priv->dev,
307638ddc59dSLABBE Corentin 				   "%s: Tx Ring full when queue awake\n",
307738ddc59dSLABBE Corentin 				   __func__);
30787ac6653aSJeff Kirsher 		}
30797ac6653aSJeff Kirsher 		return NETDEV_TX_BUSY;
30807ac6653aSJeff Kirsher 	}
30817ac6653aSJeff Kirsher 
3082ce736788SJoao Pinto 	entry = tx_q->cur_tx;
30830e80bdc9SGiuseppe Cavallaro 	first_entry = entry;
3084b4c9784cSNiklas Cassel 	WARN_ON(tx_q->tx_skbuff[first_entry]);
30857ac6653aSJeff Kirsher 
30867ac6653aSJeff Kirsher 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
30877ac6653aSJeff Kirsher 
30880e80bdc9SGiuseppe Cavallaro 	if (likely(priv->extend_desc))
3089ce736788SJoao Pinto 		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3090c24602efSGiuseppe CAVALLARO 	else
3091ce736788SJoao Pinto 		desc = tx_q->dma_tx + entry;
3092c24602efSGiuseppe CAVALLARO 
30937ac6653aSJeff Kirsher 	first = desc;
30947ac6653aSJeff Kirsher 
30950e80bdc9SGiuseppe Cavallaro 	enh_desc = priv->plat->enh_desc;
30964a7d666aSGiuseppe CAVALLARO 	/* To program the descriptors according to the size of the frame */
309729896a67SGiuseppe CAVALLARO 	if (enh_desc)
30982c520b1cSJose Abreu 		is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
309929896a67SGiuseppe CAVALLARO 
310063a550fcSJose Abreu 	if (unlikely(is_jumbo)) {
31012c520b1cSJose Abreu 		entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
310263a550fcSJose Abreu 		if (unlikely(entry < 0) && (entry != -EINVAL))
3103362b37beSGiuseppe CAVALLARO 			goto dma_map_err;
310429896a67SGiuseppe CAVALLARO 	}
31057ac6653aSJeff Kirsher 
31067ac6653aSJeff Kirsher 	for (i = 0; i < nfrags; i++) {
31079e903e08SEric Dumazet 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
31089e903e08SEric Dumazet 		int len = skb_frag_size(frag);
3109be434d50SGiuseppe Cavallaro 		bool last_segment = (i == (nfrags - 1));
31107ac6653aSJeff Kirsher 
3111e3ad57c9SGiuseppe Cavallaro 		entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3112b4c9784cSNiklas Cassel 		WARN_ON(tx_q->tx_skbuff[entry]);
3113e3ad57c9SGiuseppe Cavallaro 
31140e80bdc9SGiuseppe Cavallaro 		if (likely(priv->extend_desc))
3115ce736788SJoao Pinto 			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3116c24602efSGiuseppe CAVALLARO 		else
3117ce736788SJoao Pinto 			desc = tx_q->dma_tx + entry;
31187ac6653aSJeff Kirsher 
3119f748be53SAlexandre TORGUE 		des = skb_frag_dma_map(priv->device, frag, 0, len,
3120f722380dSIan Campbell 				       DMA_TO_DEVICE);
3121f748be53SAlexandre TORGUE 		if (dma_mapping_error(priv->device, des))
3122362b37beSGiuseppe CAVALLARO 			goto dma_map_err; /* should reuse desc w/o issues */
3123362b37beSGiuseppe CAVALLARO 
3124ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].buf = des;
31256844171dSJose Abreu 
31266844171dSJose Abreu 		stmmac_set_desc_addr(priv, desc, des);
3127f748be53SAlexandre TORGUE 
3128ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].map_as_page = true;
3129ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].len = len;
3130ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
31310e80bdc9SGiuseppe Cavallaro 
31320e80bdc9SGiuseppe Cavallaro 		/* Prepare the descriptor and set the own bit too */
313342de047dSJose Abreu 		stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
313442de047dSJose Abreu 				priv->mode, 1, last_segment, skb->len);
31357ac6653aSJeff Kirsher 	}
31367ac6653aSJeff Kirsher 
313705cf0d1bSNiklas Cassel 	/* Only the last descriptor gets to point to the skb. */
313805cf0d1bSNiklas Cassel 	tx_q->tx_skbuff[entry] = skb;
3139e3ad57c9SGiuseppe Cavallaro 
314005cf0d1bSNiklas Cassel 	/* We've used all descriptors we need for this skb, however,
314105cf0d1bSNiklas Cassel 	 * advance cur_tx so that it references a fresh descriptor.
314205cf0d1bSNiklas Cassel 	 * ndo_start_xmit will fill this descriptor the next time it's
314305cf0d1bSNiklas Cassel 	 * called and stmmac_tx_clean may clean up to this descriptor.
314405cf0d1bSNiklas Cassel 	 */
314505cf0d1bSNiklas Cassel 	entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3146ce736788SJoao Pinto 	tx_q->cur_tx = entry;
31477ac6653aSJeff Kirsher 
31487ac6653aSJeff Kirsher 	if (netif_msg_pktdata(priv)) {
3149d0225e7dSAlexandre TORGUE 		void *tx_head;
3150d0225e7dSAlexandre TORGUE 
315138ddc59dSLABBE Corentin 		netdev_dbg(priv->dev,
315238ddc59dSLABBE Corentin 			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
3153ce736788SJoao Pinto 			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
31540e80bdc9SGiuseppe Cavallaro 			   entry, first, nfrags);
315583d7af64SGiuseppe CAVALLARO 
3156c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
3157ce736788SJoao Pinto 			tx_head = (void *)tx_q->dma_etx;
3158c24602efSGiuseppe CAVALLARO 		else
3159ce736788SJoao Pinto 			tx_head = (void *)tx_q->dma_tx;
3160d0225e7dSAlexandre TORGUE 
316142de047dSJose Abreu 		stmmac_display_ring(priv, tx_head, DMA_TX_SIZE, false);
3162c24602efSGiuseppe CAVALLARO 
316338ddc59dSLABBE Corentin 		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
31647ac6653aSJeff Kirsher 		print_pkt(skb->data, skb->len);
31657ac6653aSJeff Kirsher 	}
31660e80bdc9SGiuseppe Cavallaro 
3167ce736788SJoao Pinto 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3168b3e51069SLABBE Corentin 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
3169b3e51069SLABBE Corentin 			  __func__);
3170c22a3f48SJoao Pinto 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
31717ac6653aSJeff Kirsher 	}
31727ac6653aSJeff Kirsher 
31737ac6653aSJeff Kirsher 	dev->stats.tx_bytes += skb->len;
31747ac6653aSJeff Kirsher 
31750e80bdc9SGiuseppe Cavallaro 	/* According to the coalesce parameter the IC bit for the latest
31760e80bdc9SGiuseppe Cavallaro 	 * segment is reset and the timer re-started to clean the tx status.
31770e80bdc9SGiuseppe Cavallaro 	 * This approach takes care about the fragments: desc is the first
31780e80bdc9SGiuseppe Cavallaro 	 * element in case of no SG.
31790e80bdc9SGiuseppe Cavallaro 	 */
31808fce3331SJose Abreu 	tx_q->tx_count_frames += nfrags + 1;
31818fce3331SJose Abreu 	if (priv->tx_coal_frames <= tx_q->tx_count_frames) {
318242de047dSJose Abreu 		stmmac_set_tx_ic(priv, desc);
31830e80bdc9SGiuseppe Cavallaro 		priv->xstats.tx_set_ic_bit++;
31848fce3331SJose Abreu 		tx_q->tx_count_frames = 0;
31858fce3331SJose Abreu 	} else {
31868fce3331SJose Abreu 		stmmac_tx_timer_arm(priv, queue);
31870e80bdc9SGiuseppe Cavallaro 	}
31880e80bdc9SGiuseppe Cavallaro 
31890e80bdc9SGiuseppe Cavallaro 	skb_tx_timestamp(skb);
31900e80bdc9SGiuseppe Cavallaro 
31910e80bdc9SGiuseppe Cavallaro 	/* Ready to fill the first descriptor and set the OWN bit w/o any
31920e80bdc9SGiuseppe Cavallaro 	 * problems because all the descriptors are actually ready to be
31930e80bdc9SGiuseppe Cavallaro 	 * passed to the DMA engine.
31940e80bdc9SGiuseppe Cavallaro 	 */
31950e80bdc9SGiuseppe Cavallaro 	if (likely(!is_jumbo)) {
31960e80bdc9SGiuseppe Cavallaro 		bool last_segment = (nfrags == 0);
31970e80bdc9SGiuseppe Cavallaro 
3198f748be53SAlexandre TORGUE 		des = dma_map_single(priv->device, skb->data,
31990e80bdc9SGiuseppe Cavallaro 				     nopaged_len, DMA_TO_DEVICE);
3200f748be53SAlexandre TORGUE 		if (dma_mapping_error(priv->device, des))
32010e80bdc9SGiuseppe Cavallaro 			goto dma_map_err;
32020e80bdc9SGiuseppe Cavallaro 
3203ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[first_entry].buf = des;
32046844171dSJose Abreu 
32056844171dSJose Abreu 		stmmac_set_desc_addr(priv, first, des);
3206f748be53SAlexandre TORGUE 
3207ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
3208ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
32090e80bdc9SGiuseppe Cavallaro 
3210891434b1SRayagond Kokatanur 		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3211891434b1SRayagond Kokatanur 			     priv->hwts_tx_en)) {
3212891434b1SRayagond Kokatanur 			/* declare that device is doing timestamping */
3213891434b1SRayagond Kokatanur 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
321442de047dSJose Abreu 			stmmac_enable_tx_timestamp(priv, first);
3215891434b1SRayagond Kokatanur 		}
3216891434b1SRayagond Kokatanur 
32170e80bdc9SGiuseppe Cavallaro 		/* Prepare the first descriptor setting the OWN bit too */
321842de047dSJose Abreu 		stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
321942de047dSJose Abreu 				csum_insertion, priv->mode, 1, last_segment,
322042de047dSJose Abreu 				skb->len);
322180acbed9SAaro Koskinen 	} else {
322280acbed9SAaro Koskinen 		stmmac_set_tx_owner(priv, first);
322380acbed9SAaro Koskinen 	}
32240e80bdc9SGiuseppe Cavallaro 
32250e80bdc9SGiuseppe Cavallaro 	/* The own bit must be the latest setting done when prepare the
32260e80bdc9SGiuseppe Cavallaro 	 * descriptor and then barrier is needed to make sure that
32270e80bdc9SGiuseppe Cavallaro 	 * all is coherent before granting the DMA engine.
32280e80bdc9SGiuseppe Cavallaro 	 */
322995eb930aSNiklas Cassel 	wmb();
32307ac6653aSJeff Kirsher 
3231c22a3f48SJoao Pinto 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3232f748be53SAlexandre TORGUE 
3233a4e887faSJose Abreu 	stmmac_enable_dma_transmission(priv, priv->ioaddr);
32348fce3331SJose Abreu 
32350431100bSJose Abreu 	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
3236f1565c60SJose Abreu 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
32377ac6653aSJeff Kirsher 
3238362b37beSGiuseppe CAVALLARO 	return NETDEV_TX_OK;
3239a9097a96SGiuseppe CAVALLARO 
3240362b37beSGiuseppe CAVALLARO dma_map_err:
324138ddc59dSLABBE Corentin 	netdev_err(priv->dev, "Tx DMA map failed\n");
3242362b37beSGiuseppe CAVALLARO 	dev_kfree_skb(skb);
3243362b37beSGiuseppe CAVALLARO 	priv->dev->stats.tx_dropped++;
32447ac6653aSJeff Kirsher 	return NETDEV_TX_OK;
32457ac6653aSJeff Kirsher }
32467ac6653aSJeff Kirsher 
3247b9381985SVince Bridgers static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
3248b9381985SVince Bridgers {
3249ab188e8fSElad Nachman 	struct vlan_ethhdr *veth;
3250ab188e8fSElad Nachman 	__be16 vlan_proto;
3251b9381985SVince Bridgers 	u16 vlanid;
3252b9381985SVince Bridgers 
3253ab188e8fSElad Nachman 	veth = (struct vlan_ethhdr *)skb->data;
3254ab188e8fSElad Nachman 	vlan_proto = veth->h_vlan_proto;
3255ab188e8fSElad Nachman 
3256ab188e8fSElad Nachman 	if ((vlan_proto == htons(ETH_P_8021Q) &&
3257ab188e8fSElad Nachman 	     dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
3258ab188e8fSElad Nachman 	    (vlan_proto == htons(ETH_P_8021AD) &&
3259ab188e8fSElad Nachman 	     dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
3260b9381985SVince Bridgers 		/* pop the vlan tag */
3261ab188e8fSElad Nachman 		vlanid = ntohs(veth->h_vlan_TCI);
3262ab188e8fSElad Nachman 		memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
3263b9381985SVince Bridgers 		skb_pull(skb, VLAN_HLEN);
3264ab188e8fSElad Nachman 		__vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
3265b9381985SVince Bridgers 	}
3266b9381985SVince Bridgers }
3267b9381985SVince Bridgers 
3268b9381985SVince Bridgers 
326954139cf3SJoao Pinto static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q)
3270120e87f9SGiuseppe Cavallaro {
327154139cf3SJoao Pinto 	if (rx_q->rx_zeroc_thresh < STMMAC_RX_THRESH)
3272120e87f9SGiuseppe Cavallaro 		return 0;
3273120e87f9SGiuseppe Cavallaro 
3274120e87f9SGiuseppe Cavallaro 	return 1;
3275120e87f9SGiuseppe Cavallaro }
3276120e87f9SGiuseppe Cavallaro 
327732ceabcaSGiuseppe CAVALLARO /**
3278732fdf0eSGiuseppe CAVALLARO  * stmmac_rx_refill - refill used skb preallocated buffers
327932ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
328054139cf3SJoao Pinto  * @queue: RX queue index
328132ceabcaSGiuseppe CAVALLARO  * Description : this is to reallocate the skb for the reception process
328232ceabcaSGiuseppe CAVALLARO  * that is based on zero-copy.
328332ceabcaSGiuseppe CAVALLARO  */
328454139cf3SJoao Pinto static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
32857ac6653aSJeff Kirsher {
328654139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
328754139cf3SJoao Pinto 	int dirty = stmmac_rx_dirty(priv, queue);
328854139cf3SJoao Pinto 	unsigned int entry = rx_q->dirty_rx;
328954139cf3SJoao Pinto 
32907ac6653aSJeff Kirsher 	int bfsize = priv->dma_buf_sz;
32917ac6653aSJeff Kirsher 
3292e3ad57c9SGiuseppe Cavallaro 	while (dirty-- > 0) {
3293c24602efSGiuseppe CAVALLARO 		struct dma_desc *p;
3294c24602efSGiuseppe CAVALLARO 
3295c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
329654139cf3SJoao Pinto 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
3297c24602efSGiuseppe CAVALLARO 		else
329854139cf3SJoao Pinto 			p = rx_q->dma_rx + entry;
3299c24602efSGiuseppe CAVALLARO 
330054139cf3SJoao Pinto 		if (likely(!rx_q->rx_skbuff[entry])) {
33017ac6653aSJeff Kirsher 			struct sk_buff *skb;
33027ac6653aSJeff Kirsher 
3303acb600deSEric Dumazet 			skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
3304120e87f9SGiuseppe Cavallaro 			if (unlikely(!skb)) {
3305120e87f9SGiuseppe Cavallaro 				/* so for a while no zero-copy! */
330654139cf3SJoao Pinto 				rx_q->rx_zeroc_thresh = STMMAC_RX_THRESH;
3307120e87f9SGiuseppe Cavallaro 				if (unlikely(net_ratelimit()))
3308120e87f9SGiuseppe Cavallaro 					dev_err(priv->device,
3309120e87f9SGiuseppe Cavallaro 						"fail to alloc skb entry %d\n",
3310120e87f9SGiuseppe Cavallaro 						entry);
33117ac6653aSJeff Kirsher 				break;
3312120e87f9SGiuseppe Cavallaro 			}
33137ac6653aSJeff Kirsher 
331454139cf3SJoao Pinto 			rx_q->rx_skbuff[entry] = skb;
331554139cf3SJoao Pinto 			rx_q->rx_skbuff_dma[entry] =
33167ac6653aSJeff Kirsher 			    dma_map_single(priv->device, skb->data, bfsize,
33177ac6653aSJeff Kirsher 					   DMA_FROM_DEVICE);
3318362b37beSGiuseppe CAVALLARO 			if (dma_mapping_error(priv->device,
331954139cf3SJoao Pinto 					      rx_q->rx_skbuff_dma[entry])) {
332038ddc59dSLABBE Corentin 				netdev_err(priv->dev, "Rx DMA map failed\n");
3321362b37beSGiuseppe CAVALLARO 				dev_kfree_skb(skb);
3322362b37beSGiuseppe CAVALLARO 				break;
3323362b37beSGiuseppe CAVALLARO 			}
3324286a8372SGiuseppe CAVALLARO 
33256844171dSJose Abreu 			stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[entry]);
33262c520b1cSJose Abreu 			stmmac_refill_desc3(priv, rx_q, p);
3327286a8372SGiuseppe CAVALLARO 
332854139cf3SJoao Pinto 			if (rx_q->rx_zeroc_thresh > 0)
332954139cf3SJoao Pinto 				rx_q->rx_zeroc_thresh--;
3330120e87f9SGiuseppe Cavallaro 
3331b3e51069SLABBE Corentin 			netif_dbg(priv, rx_status, priv->dev,
333238ddc59dSLABBE Corentin 				  "refill entry #%d\n", entry);
33337ac6653aSJeff Kirsher 		}
3334ad688cdbSPavel Machek 		dma_wmb();
3335f748be53SAlexandre TORGUE 
3336357951cdSJose Abreu 		stmmac_set_rx_owner(priv, p, priv->use_riwt);
3337f748be53SAlexandre TORGUE 
3338ad688cdbSPavel Machek 		dma_wmb();
3339e3ad57c9SGiuseppe Cavallaro 
3340e3ad57c9SGiuseppe Cavallaro 		entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
33417ac6653aSJeff Kirsher 	}
334254139cf3SJoao Pinto 	rx_q->dirty_rx = entry;
33437ac6653aSJeff Kirsher }
33447ac6653aSJeff Kirsher 
334532ceabcaSGiuseppe CAVALLARO /**
3346732fdf0eSGiuseppe CAVALLARO  * stmmac_rx - manage the receive process
334732ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
334854139cf3SJoao Pinto  * @limit: napi bugget
334954139cf3SJoao Pinto  * @queue: RX queue index.
335032ceabcaSGiuseppe CAVALLARO  * Description :  this the function called by the napi poll method.
335132ceabcaSGiuseppe CAVALLARO  * It gets all the frames inside the ring.
335232ceabcaSGiuseppe CAVALLARO  */
335354139cf3SJoao Pinto static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
33547ac6653aSJeff Kirsher {
335554139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
33568fce3331SJose Abreu 	struct stmmac_channel *ch = &priv->channel[queue];
335707b39753SAaro Koskinen 	unsigned int next_entry = rx_q->cur_rx;
335854139cf3SJoao Pinto 	int coe = priv->hw->rx_csum;
33597ac6653aSJeff Kirsher 	unsigned int count = 0;
33607d9e6c5aSJose Abreu 	bool xmac;
33617d9e6c5aSJose Abreu 
33627d9e6c5aSJose Abreu 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
33637ac6653aSJeff Kirsher 
336483d7af64SGiuseppe CAVALLARO 	if (netif_msg_rx_status(priv)) {
3365d0225e7dSAlexandre TORGUE 		void *rx_head;
3366d0225e7dSAlexandre TORGUE 
336738ddc59dSLABBE Corentin 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
3368c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
336954139cf3SJoao Pinto 			rx_head = (void *)rx_q->dma_erx;
3370c24602efSGiuseppe CAVALLARO 		else
337154139cf3SJoao Pinto 			rx_head = (void *)rx_q->dma_rx;
3372d0225e7dSAlexandre TORGUE 
337342de047dSJose Abreu 		stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true);
33747ac6653aSJeff Kirsher 	}
3375c24602efSGiuseppe CAVALLARO 	while (count < limit) {
337607b39753SAaro Koskinen 		int entry, status;
33779401bb5cSGiuseppe CAVALLARO 		struct dma_desc *p;
3378ba1ffd74SGiuseppe CAVALLARO 		struct dma_desc *np;
33797ac6653aSJeff Kirsher 
338007b39753SAaro Koskinen 		entry = next_entry;
338107b39753SAaro Koskinen 
3382c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
338354139cf3SJoao Pinto 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
3384c24602efSGiuseppe CAVALLARO 		else
338554139cf3SJoao Pinto 			p = rx_q->dma_rx + entry;
3386c24602efSGiuseppe CAVALLARO 
3387c1fa3212SFabrice Gasnier 		/* read the status of the incoming frame */
338842de047dSJose Abreu 		status = stmmac_rx_status(priv, &priv->dev->stats,
3389c1fa3212SFabrice Gasnier 				&priv->xstats, p);
3390c1fa3212SFabrice Gasnier 		/* check if managed by the DMA otherwise go ahead */
3391c1fa3212SFabrice Gasnier 		if (unlikely(status & dma_own))
33927ac6653aSJeff Kirsher 			break;
33937ac6653aSJeff Kirsher 
33947ac6653aSJeff Kirsher 		count++;
33957ac6653aSJeff Kirsher 
339654139cf3SJoao Pinto 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE);
339754139cf3SJoao Pinto 		next_entry = rx_q->cur_rx;
3398e3ad57c9SGiuseppe Cavallaro 
3399c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
340054139cf3SJoao Pinto 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
3401c24602efSGiuseppe CAVALLARO 		else
340254139cf3SJoao Pinto 			np = rx_q->dma_rx + next_entry;
3403ba1ffd74SGiuseppe CAVALLARO 
3404ba1ffd74SGiuseppe CAVALLARO 		prefetch(np);
34057ac6653aSJeff Kirsher 
340642de047dSJose Abreu 		if (priv->extend_desc)
340742de047dSJose Abreu 			stmmac_rx_extended_status(priv, &priv->dev->stats,
340842de047dSJose Abreu 					&priv->xstats, rx_q->dma_erx + entry);
3409891434b1SRayagond Kokatanur 		if (unlikely(status == discard_frame)) {
34107ac6653aSJeff Kirsher 			priv->dev->stats.rx_errors++;
3411891434b1SRayagond Kokatanur 			if (priv->hwts_rx_en && !priv->extend_desc) {
34128d45e42bSLABBE Corentin 				/* DESC2 & DESC3 will be overwritten by device
3413891434b1SRayagond Kokatanur 				 * with timestamp value, hence reinitialize
3414891434b1SRayagond Kokatanur 				 * them in stmmac_rx_refill() function so that
3415891434b1SRayagond Kokatanur 				 * device can reuse it.
3416891434b1SRayagond Kokatanur 				 */
34179c8080d0SJose Abreu 				dev_kfree_skb_any(rx_q->rx_skbuff[entry]);
341854139cf3SJoao Pinto 				rx_q->rx_skbuff[entry] = NULL;
3419891434b1SRayagond Kokatanur 				dma_unmap_single(priv->device,
342054139cf3SJoao Pinto 						 rx_q->rx_skbuff_dma[entry],
3421ceb69499SGiuseppe CAVALLARO 						 priv->dma_buf_sz,
3422ceb69499SGiuseppe CAVALLARO 						 DMA_FROM_DEVICE);
3423891434b1SRayagond Kokatanur 			}
3424891434b1SRayagond Kokatanur 		} else {
34257ac6653aSJeff Kirsher 			struct sk_buff *skb;
34267ac6653aSJeff Kirsher 			int frame_len;
3427f748be53SAlexandre TORGUE 			unsigned int des;
3428f748be53SAlexandre TORGUE 
3429d2df9ea0SJose Abreu 			stmmac_get_desc_addr(priv, p, &des);
343042de047dSJose Abreu 			frame_len = stmmac_get_rx_frame_len(priv, p, coe);
3431ceb69499SGiuseppe CAVALLARO 
34328d45e42bSLABBE Corentin 			/*  If frame length is greater than skb buffer size
3433f748be53SAlexandre TORGUE 			 *  (preallocated during init) then the packet is
3434f748be53SAlexandre TORGUE 			 *  ignored
3435f748be53SAlexandre TORGUE 			 */
3436e527c4a7SGiuseppe CAVALLARO 			if (frame_len > priv->dma_buf_sz) {
3437972c9be7SAaro Koskinen 				if (net_ratelimit())
343838ddc59dSLABBE Corentin 					netdev_err(priv->dev,
343938ddc59dSLABBE Corentin 						   "len %d larger than size (%d)\n",
344038ddc59dSLABBE Corentin 						   frame_len, priv->dma_buf_sz);
3441e527c4a7SGiuseppe CAVALLARO 				priv->dev->stats.rx_length_errors++;
344207b39753SAaro Koskinen 				continue;
3443e527c4a7SGiuseppe CAVALLARO 			}
3444e527c4a7SGiuseppe CAVALLARO 
34457ac6653aSJeff Kirsher 			/* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
3446ceb69499SGiuseppe CAVALLARO 			 * Type frames (LLC/LLC-SNAP)
3447565020aaSJose Abreu 			 *
3448565020aaSJose Abreu 			 * llc_snap is never checked in GMAC >= 4, so this ACS
3449565020aaSJose Abreu 			 * feature is always disabled and packets need to be
3450565020aaSJose Abreu 			 * stripped manually.
3451ceb69499SGiuseppe CAVALLARO 			 */
3452565020aaSJose Abreu 			if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
3453565020aaSJose Abreu 			    unlikely(status != llc_snap))
34547ac6653aSJeff Kirsher 				frame_len -= ETH_FCS_LEN;
34557ac6653aSJeff Kirsher 
345683d7af64SGiuseppe CAVALLARO 			if (netif_msg_rx_status(priv)) {
345738ddc59dSLABBE Corentin 				netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n",
3458f748be53SAlexandre TORGUE 					   p, entry, des);
345938ddc59dSLABBE Corentin 				netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
346083d7af64SGiuseppe CAVALLARO 					   frame_len, status);
346183d7af64SGiuseppe CAVALLARO 			}
346222ad3838SGiuseppe Cavallaro 
3463f748be53SAlexandre TORGUE 			/* The zero-copy is always used for all the sizes
3464f748be53SAlexandre TORGUE 			 * in case of GMAC4 because it needs
3465f748be53SAlexandre TORGUE 			 * to refill the used descriptors, always.
3466f748be53SAlexandre TORGUE 			 */
34677d9e6c5aSJose Abreu 			if (unlikely(!xmac &&
3468f748be53SAlexandre TORGUE 				     ((frame_len < priv->rx_copybreak) ||
346954139cf3SJoao Pinto 				     stmmac_rx_threshold_count(rx_q)))) {
347022ad3838SGiuseppe Cavallaro 				skb = netdev_alloc_skb_ip_align(priv->dev,
347122ad3838SGiuseppe Cavallaro 								frame_len);
347222ad3838SGiuseppe Cavallaro 				if (unlikely(!skb)) {
347322ad3838SGiuseppe Cavallaro 					if (net_ratelimit())
347422ad3838SGiuseppe Cavallaro 						dev_warn(priv->device,
347522ad3838SGiuseppe Cavallaro 							 "packet dropped\n");
347622ad3838SGiuseppe Cavallaro 					priv->dev->stats.rx_dropped++;
347707b39753SAaro Koskinen 					continue;
347822ad3838SGiuseppe Cavallaro 				}
347922ad3838SGiuseppe Cavallaro 
348022ad3838SGiuseppe Cavallaro 				dma_sync_single_for_cpu(priv->device,
348154139cf3SJoao Pinto 							rx_q->rx_skbuff_dma
348222ad3838SGiuseppe Cavallaro 							[entry], frame_len,
348322ad3838SGiuseppe Cavallaro 							DMA_FROM_DEVICE);
348422ad3838SGiuseppe Cavallaro 				skb_copy_to_linear_data(skb,
348554139cf3SJoao Pinto 							rx_q->
348622ad3838SGiuseppe Cavallaro 							rx_skbuff[entry]->data,
348722ad3838SGiuseppe Cavallaro 							frame_len);
348822ad3838SGiuseppe Cavallaro 
348922ad3838SGiuseppe Cavallaro 				skb_put(skb, frame_len);
349022ad3838SGiuseppe Cavallaro 				dma_sync_single_for_device(priv->device,
349154139cf3SJoao Pinto 							   rx_q->rx_skbuff_dma
349222ad3838SGiuseppe Cavallaro 							   [entry], frame_len,
349322ad3838SGiuseppe Cavallaro 							   DMA_FROM_DEVICE);
349422ad3838SGiuseppe Cavallaro 			} else {
349554139cf3SJoao Pinto 				skb = rx_q->rx_skbuff[entry];
34967ac6653aSJeff Kirsher 				if (unlikely(!skb)) {
3497972c9be7SAaro Koskinen 					if (net_ratelimit())
349838ddc59dSLABBE Corentin 						netdev_err(priv->dev,
349938ddc59dSLABBE Corentin 							   "%s: Inconsistent Rx chain\n",
35007ac6653aSJeff Kirsher 							   priv->dev->name);
35017ac6653aSJeff Kirsher 					priv->dev->stats.rx_dropped++;
350207b39753SAaro Koskinen 					continue;
35037ac6653aSJeff Kirsher 				}
35047ac6653aSJeff Kirsher 				prefetch(skb->data - NET_IP_ALIGN);
350554139cf3SJoao Pinto 				rx_q->rx_skbuff[entry] = NULL;
350654139cf3SJoao Pinto 				rx_q->rx_zeroc_thresh++;
35077ac6653aSJeff Kirsher 
35087ac6653aSJeff Kirsher 				skb_put(skb, frame_len);
35097ac6653aSJeff Kirsher 				dma_unmap_single(priv->device,
351054139cf3SJoao Pinto 						 rx_q->rx_skbuff_dma[entry],
351122ad3838SGiuseppe Cavallaro 						 priv->dma_buf_sz,
351222ad3838SGiuseppe Cavallaro 						 DMA_FROM_DEVICE);
351322ad3838SGiuseppe Cavallaro 			}
351422ad3838SGiuseppe Cavallaro 
35157ac6653aSJeff Kirsher 			if (netif_msg_pktdata(priv)) {
351638ddc59dSLABBE Corentin 				netdev_dbg(priv->dev, "frame received (%dbytes)",
351738ddc59dSLABBE Corentin 					   frame_len);
35187ac6653aSJeff Kirsher 				print_pkt(skb->data, frame_len);
35197ac6653aSJeff Kirsher 			}
352083d7af64SGiuseppe CAVALLARO 
3521ba1ffd74SGiuseppe CAVALLARO 			stmmac_get_rx_hwtstamp(priv, p, np, skb);
3522ba1ffd74SGiuseppe CAVALLARO 
3523b9381985SVince Bridgers 			stmmac_rx_vlan(priv->dev, skb);
3524b9381985SVince Bridgers 
35257ac6653aSJeff Kirsher 			skb->protocol = eth_type_trans(skb, priv->dev);
35267ac6653aSJeff Kirsher 
3527ceb69499SGiuseppe CAVALLARO 			if (unlikely(!coe))
35287ac6653aSJeff Kirsher 				skb_checksum_none_assert(skb);
352962a2ab93SGiuseppe CAVALLARO 			else
35307ac6653aSJeff Kirsher 				skb->ip_summed = CHECKSUM_UNNECESSARY;
353162a2ab93SGiuseppe CAVALLARO 
35324ccb4585SJose Abreu 			napi_gro_receive(&ch->rx_napi, skb);
35337ac6653aSJeff Kirsher 
35347ac6653aSJeff Kirsher 			priv->dev->stats.rx_packets++;
35357ac6653aSJeff Kirsher 			priv->dev->stats.rx_bytes += frame_len;
35367ac6653aSJeff Kirsher 		}
35377ac6653aSJeff Kirsher 	}
35387ac6653aSJeff Kirsher 
353954139cf3SJoao Pinto 	stmmac_rx_refill(priv, queue);
35407ac6653aSJeff Kirsher 
35417ac6653aSJeff Kirsher 	priv->xstats.rx_pkt_n += count;
35427ac6653aSJeff Kirsher 
35437ac6653aSJeff Kirsher 	return count;
35447ac6653aSJeff Kirsher }
35457ac6653aSJeff Kirsher 
35464ccb4585SJose Abreu static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
35477ac6653aSJeff Kirsher {
35488fce3331SJose Abreu 	struct stmmac_channel *ch =
35494ccb4585SJose Abreu 		container_of(napi, struct stmmac_channel, rx_napi);
35508fce3331SJose Abreu 	struct stmmac_priv *priv = ch->priv_data;
35518fce3331SJose Abreu 	u32 chan = ch->index;
35524ccb4585SJose Abreu 	int work_done;
35537ac6653aSJeff Kirsher 
35549125cdd1SGiuseppe CAVALLARO 	priv->xstats.napi_poll++;
3555ce736788SJoao Pinto 
35564ccb4585SJose Abreu 	work_done = stmmac_rx(priv, budget, chan);
35574ccb4585SJose Abreu 	if (work_done < budget && napi_complete_done(napi, work_done))
35584ccb4585SJose Abreu 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan);
35594ccb4585SJose Abreu 	return work_done;
35604ccb4585SJose Abreu }
3561ce736788SJoao Pinto 
35624ccb4585SJose Abreu static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
35634ccb4585SJose Abreu {
35644ccb4585SJose Abreu 	struct stmmac_channel *ch =
35654ccb4585SJose Abreu 		container_of(napi, struct stmmac_channel, tx_napi);
35664ccb4585SJose Abreu 	struct stmmac_priv *priv = ch->priv_data;
35674ccb4585SJose Abreu 	struct stmmac_tx_queue *tx_q;
35684ccb4585SJose Abreu 	u32 chan = ch->index;
35694ccb4585SJose Abreu 	int work_done;
35704ccb4585SJose Abreu 
35714ccb4585SJose Abreu 	priv->xstats.napi_poll++;
35724ccb4585SJose Abreu 
35734ccb4585SJose Abreu 	work_done = stmmac_tx_clean(priv, DMA_TX_SIZE, chan);
3574fa0be0a4SJose Abreu 	work_done = min(work_done, budget);
35758fce3331SJose Abreu 
35764ccb4585SJose Abreu 	if (work_done < budget && napi_complete_done(napi, work_done))
35778fce3331SJose Abreu 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan);
35784ccb4585SJose Abreu 
35794ccb4585SJose Abreu 	/* Force transmission restart */
35804ccb4585SJose Abreu 	tx_q = &priv->tx_queue[chan];
35814ccb4585SJose Abreu 	if (tx_q->cur_tx != tx_q->dirty_tx) {
35824ccb4585SJose Abreu 		stmmac_enable_dma_transmission(priv, priv->ioaddr);
35834ccb4585SJose Abreu 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr,
35844ccb4585SJose Abreu 				       chan);
3585fa0be0a4SJose Abreu 	}
35868fce3331SJose Abreu 
35877ac6653aSJeff Kirsher 	return work_done;
35887ac6653aSJeff Kirsher }
35897ac6653aSJeff Kirsher 
35907ac6653aSJeff Kirsher /**
35917ac6653aSJeff Kirsher  *  stmmac_tx_timeout
35927ac6653aSJeff Kirsher  *  @dev : Pointer to net device structure
35937ac6653aSJeff Kirsher  *  Description: this function is called when a packet transmission fails to
35947284a3f1SGiuseppe CAVALLARO  *   complete within a reasonable time. The driver will mark the error in the
35957ac6653aSJeff Kirsher  *   netdev structure and arrange for the device to be reset to a sane state
35967ac6653aSJeff Kirsher  *   in order to transmit a new packet.
35977ac6653aSJeff Kirsher  */
35987ac6653aSJeff Kirsher static void stmmac_tx_timeout(struct net_device *dev)
35997ac6653aSJeff Kirsher {
36007ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
36017ac6653aSJeff Kirsher 
360234877a15SJose Abreu 	stmmac_global_err(priv);
36037ac6653aSJeff Kirsher }
36047ac6653aSJeff Kirsher 
36057ac6653aSJeff Kirsher /**
360601789349SJiri Pirko  *  stmmac_set_rx_mode - entry point for multicast addressing
36077ac6653aSJeff Kirsher  *  @dev : pointer to the device structure
36087ac6653aSJeff Kirsher  *  Description:
36097ac6653aSJeff Kirsher  *  This function is a driver entry point which gets called by the kernel
36107ac6653aSJeff Kirsher  *  whenever multicast addresses must be enabled/disabled.
36117ac6653aSJeff Kirsher  *  Return value:
36127ac6653aSJeff Kirsher  *  void.
36137ac6653aSJeff Kirsher  */
361401789349SJiri Pirko static void stmmac_set_rx_mode(struct net_device *dev)
36157ac6653aSJeff Kirsher {
36167ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
36177ac6653aSJeff Kirsher 
3618c10d4c82SJose Abreu 	stmmac_set_filter(priv, priv->hw, dev);
36197ac6653aSJeff Kirsher }
36207ac6653aSJeff Kirsher 
36217ac6653aSJeff Kirsher /**
36227ac6653aSJeff Kirsher  *  stmmac_change_mtu - entry point to change MTU size for the device.
36237ac6653aSJeff Kirsher  *  @dev : device pointer.
36247ac6653aSJeff Kirsher  *  @new_mtu : the new MTU size for the device.
36257ac6653aSJeff Kirsher  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
36267ac6653aSJeff Kirsher  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
36277ac6653aSJeff Kirsher  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
36287ac6653aSJeff Kirsher  *  Return value:
36297ac6653aSJeff Kirsher  *  0 on success and an appropriate (-)ve integer as defined in errno.h
36307ac6653aSJeff Kirsher  *  file on failure.
36317ac6653aSJeff Kirsher  */
36327ac6653aSJeff Kirsher static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
36337ac6653aSJeff Kirsher {
363438ddc59dSLABBE Corentin 	struct stmmac_priv *priv = netdev_priv(dev);
363538ddc59dSLABBE Corentin 
36367ac6653aSJeff Kirsher 	if (netif_running(dev)) {
363738ddc59dSLABBE Corentin 		netdev_err(priv->dev, "must be stopped to change its MTU\n");
36387ac6653aSJeff Kirsher 		return -EBUSY;
36397ac6653aSJeff Kirsher 	}
36407ac6653aSJeff Kirsher 
36417ac6653aSJeff Kirsher 	dev->mtu = new_mtu;
3642f748be53SAlexandre TORGUE 
36437ac6653aSJeff Kirsher 	netdev_update_features(dev);
36447ac6653aSJeff Kirsher 
36457ac6653aSJeff Kirsher 	return 0;
36467ac6653aSJeff Kirsher }
36477ac6653aSJeff Kirsher 
3648c8f44affSMichał Mirosław static netdev_features_t stmmac_fix_features(struct net_device *dev,
3649c8f44affSMichał Mirosław 					     netdev_features_t features)
36507ac6653aSJeff Kirsher {
36517ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
36527ac6653aSJeff Kirsher 
365338912bdbSDeepak SIKRI 	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
36547ac6653aSJeff Kirsher 		features &= ~NETIF_F_RXCSUM;
3655d2afb5bdSGiuseppe CAVALLARO 
36567ac6653aSJeff Kirsher 	if (!priv->plat->tx_coe)
3657a188222bSTom Herbert 		features &= ~NETIF_F_CSUM_MASK;
36587ac6653aSJeff Kirsher 
36597ac6653aSJeff Kirsher 	/* Some GMAC devices have a bugged Jumbo frame support that
36607ac6653aSJeff Kirsher 	 * needs to have the Tx COE disabled for oversized frames
36617ac6653aSJeff Kirsher 	 * (due to limited buffer sizes). In this case we disable
3662ceb69499SGiuseppe CAVALLARO 	 * the TX csum insertion in the TDES and not use SF.
3663ceb69499SGiuseppe CAVALLARO 	 */
36647ac6653aSJeff Kirsher 	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
3665a188222bSTom Herbert 		features &= ~NETIF_F_CSUM_MASK;
36667ac6653aSJeff Kirsher 
3667f748be53SAlexandre TORGUE 	/* Disable tso if asked by ethtool */
3668f748be53SAlexandre TORGUE 	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
3669f748be53SAlexandre TORGUE 		if (features & NETIF_F_TSO)
3670f748be53SAlexandre TORGUE 			priv->tso = true;
3671f748be53SAlexandre TORGUE 		else
3672f748be53SAlexandre TORGUE 			priv->tso = false;
3673f748be53SAlexandre TORGUE 	}
3674f748be53SAlexandre TORGUE 
36757ac6653aSJeff Kirsher 	return features;
36767ac6653aSJeff Kirsher }
36777ac6653aSJeff Kirsher 
3678d2afb5bdSGiuseppe CAVALLARO static int stmmac_set_features(struct net_device *netdev,
3679d2afb5bdSGiuseppe CAVALLARO 			       netdev_features_t features)
3680d2afb5bdSGiuseppe CAVALLARO {
3681d2afb5bdSGiuseppe CAVALLARO 	struct stmmac_priv *priv = netdev_priv(netdev);
3682d2afb5bdSGiuseppe CAVALLARO 
3683d2afb5bdSGiuseppe CAVALLARO 	/* Keep the COE Type in case of csum is supporting */
3684d2afb5bdSGiuseppe CAVALLARO 	if (features & NETIF_F_RXCSUM)
3685d2afb5bdSGiuseppe CAVALLARO 		priv->hw->rx_csum = priv->plat->rx_coe;
3686d2afb5bdSGiuseppe CAVALLARO 	else
3687d2afb5bdSGiuseppe CAVALLARO 		priv->hw->rx_csum = 0;
3688d2afb5bdSGiuseppe CAVALLARO 	/* No check needed because rx_coe has been set before and it will be
3689d2afb5bdSGiuseppe CAVALLARO 	 * fixed in case of issue.
3690d2afb5bdSGiuseppe CAVALLARO 	 */
3691c10d4c82SJose Abreu 	stmmac_rx_ipc(priv, priv->hw);
3692d2afb5bdSGiuseppe CAVALLARO 
3693d2afb5bdSGiuseppe CAVALLARO 	return 0;
3694d2afb5bdSGiuseppe CAVALLARO }
3695d2afb5bdSGiuseppe CAVALLARO 
369632ceabcaSGiuseppe CAVALLARO /**
369732ceabcaSGiuseppe CAVALLARO  *  stmmac_interrupt - main ISR
369832ceabcaSGiuseppe CAVALLARO  *  @irq: interrupt number.
369932ceabcaSGiuseppe CAVALLARO  *  @dev_id: to pass the net device pointer.
370032ceabcaSGiuseppe CAVALLARO  *  Description: this is the main driver interrupt service routine.
3701732fdf0eSGiuseppe CAVALLARO  *  It can call:
3702732fdf0eSGiuseppe CAVALLARO  *  o DMA service routine (to manage incoming frame reception and transmission
3703732fdf0eSGiuseppe CAVALLARO  *    status)
3704732fdf0eSGiuseppe CAVALLARO  *  o Core interrupts to manage: remote wake-up, management counter, LPI
370532ceabcaSGiuseppe CAVALLARO  *    interrupts.
370632ceabcaSGiuseppe CAVALLARO  */
37077ac6653aSJeff Kirsher static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
37087ac6653aSJeff Kirsher {
37097ac6653aSJeff Kirsher 	struct net_device *dev = (struct net_device *)dev_id;
37107ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
37117bac4e1eSJoao Pinto 	u32 rx_cnt = priv->plat->rx_queues_to_use;
37127bac4e1eSJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
37137bac4e1eSJoao Pinto 	u32 queues_count;
37147bac4e1eSJoao Pinto 	u32 queue;
37157d9e6c5aSJose Abreu 	bool xmac;
37167bac4e1eSJoao Pinto 
37177d9e6c5aSJose Abreu 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
37187bac4e1eSJoao Pinto 	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
37197ac6653aSJeff Kirsher 
372089f7f2cfSSrinivas Kandagatla 	if (priv->irq_wake)
372189f7f2cfSSrinivas Kandagatla 		pm_wakeup_event(priv->device, 0);
372289f7f2cfSSrinivas Kandagatla 
37237ac6653aSJeff Kirsher 	if (unlikely(!dev)) {
372438ddc59dSLABBE Corentin 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
37257ac6653aSJeff Kirsher 		return IRQ_NONE;
37267ac6653aSJeff Kirsher 	}
37277ac6653aSJeff Kirsher 
372834877a15SJose Abreu 	/* Check if adapter is up */
372934877a15SJose Abreu 	if (test_bit(STMMAC_DOWN, &priv->state))
373034877a15SJose Abreu 		return IRQ_HANDLED;
37318bf993a5SJose Abreu 	/* Check if a fatal error happened */
37328bf993a5SJose Abreu 	if (stmmac_safety_feat_interrupt(priv))
37338bf993a5SJose Abreu 		return IRQ_HANDLED;
373434877a15SJose Abreu 
37357ac6653aSJeff Kirsher 	/* To handle GMAC own interrupts */
37367d9e6c5aSJose Abreu 	if ((priv->plat->has_gmac) || xmac) {
3737c10d4c82SJose Abreu 		int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
373861fac60aSJose Abreu 		int mtl_status;
37398f71a88dSJoao Pinto 
3740d765955dSGiuseppe CAVALLARO 		if (unlikely(status)) {
3741d765955dSGiuseppe CAVALLARO 			/* For LPI we need to save the tx status */
37420982a0f6SGiuseppe CAVALLARO 			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
3743d765955dSGiuseppe CAVALLARO 				priv->tx_path_in_lpi_mode = true;
37440982a0f6SGiuseppe CAVALLARO 			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
3745d765955dSGiuseppe CAVALLARO 				priv->tx_path_in_lpi_mode = false;
37467bac4e1eSJoao Pinto 		}
37477bac4e1eSJoao Pinto 
37487bac4e1eSJoao Pinto 		for (queue = 0; queue < queues_count; queue++) {
374961fac60aSJose Abreu 			struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
375054139cf3SJoao Pinto 
375161fac60aSJose Abreu 			mtl_status = stmmac_host_mtl_irq_status(priv, priv->hw,
375261fac60aSJose Abreu 								queue);
375361fac60aSJose Abreu 			if (mtl_status != -EINVAL)
375461fac60aSJose Abreu 				status |= mtl_status;
37557bac4e1eSJoao Pinto 
3756a4e887faSJose Abreu 			if (status & CORE_IRQ_MTL_RX_OVERFLOW)
375761fac60aSJose Abreu 				stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
375854139cf3SJoao Pinto 						       rx_q->rx_tail_addr,
37597bac4e1eSJoao Pinto 						       queue);
37607bac4e1eSJoao Pinto 		}
376170523e63SGiuseppe CAVALLARO 
376270523e63SGiuseppe CAVALLARO 		/* PCS link status */
37633fe5cadbSGiuseppe CAVALLARO 		if (priv->hw->pcs) {
376470523e63SGiuseppe CAVALLARO 			if (priv->xstats.pcs_link)
376570523e63SGiuseppe CAVALLARO 				netif_carrier_on(dev);
376670523e63SGiuseppe CAVALLARO 			else
376770523e63SGiuseppe CAVALLARO 				netif_carrier_off(dev);
376870523e63SGiuseppe CAVALLARO 		}
3769d765955dSGiuseppe CAVALLARO 	}
3770d765955dSGiuseppe CAVALLARO 
3771d765955dSGiuseppe CAVALLARO 	/* To handle DMA interrupts */
37727ac6653aSJeff Kirsher 	stmmac_dma_interrupt(priv);
37737ac6653aSJeff Kirsher 
37747ac6653aSJeff Kirsher 	return IRQ_HANDLED;
37757ac6653aSJeff Kirsher }
37767ac6653aSJeff Kirsher 
37777ac6653aSJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER
37787ac6653aSJeff Kirsher /* Polling receive - used by NETCONSOLE and other diagnostic tools
3779ceb69499SGiuseppe CAVALLARO  * to allow network I/O with interrupts disabled.
3780ceb69499SGiuseppe CAVALLARO  */
37817ac6653aSJeff Kirsher static void stmmac_poll_controller(struct net_device *dev)
37827ac6653aSJeff Kirsher {
37837ac6653aSJeff Kirsher 	disable_irq(dev->irq);
37847ac6653aSJeff Kirsher 	stmmac_interrupt(dev->irq, dev);
37857ac6653aSJeff Kirsher 	enable_irq(dev->irq);
37867ac6653aSJeff Kirsher }
37877ac6653aSJeff Kirsher #endif
37887ac6653aSJeff Kirsher 
37897ac6653aSJeff Kirsher /**
37907ac6653aSJeff Kirsher  *  stmmac_ioctl - Entry point for the Ioctl
37917ac6653aSJeff Kirsher  *  @dev: Device pointer.
37927ac6653aSJeff Kirsher  *  @rq: An IOCTL specefic structure, that can contain a pointer to
37937ac6653aSJeff Kirsher  *  a proprietary structure used to pass information to the driver.
37947ac6653aSJeff Kirsher  *  @cmd: IOCTL command
37957ac6653aSJeff Kirsher  *  Description:
379632ceabcaSGiuseppe CAVALLARO  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
37977ac6653aSJeff Kirsher  */
37987ac6653aSJeff Kirsher static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
37997ac6653aSJeff Kirsher {
3800891434b1SRayagond Kokatanur 	int ret = -EOPNOTSUPP;
38017ac6653aSJeff Kirsher 
38027ac6653aSJeff Kirsher 	if (!netif_running(dev))
38037ac6653aSJeff Kirsher 		return -EINVAL;
38047ac6653aSJeff Kirsher 
3805891434b1SRayagond Kokatanur 	switch (cmd) {
3806891434b1SRayagond Kokatanur 	case SIOCGMIIPHY:
3807891434b1SRayagond Kokatanur 	case SIOCGMIIREG:
3808891434b1SRayagond Kokatanur 	case SIOCSMIIREG:
3809d6d50c7eSPhilippe Reynes 		if (!dev->phydev)
38107ac6653aSJeff Kirsher 			return -EINVAL;
3811d6d50c7eSPhilippe Reynes 		ret = phy_mii_ioctl(dev->phydev, rq, cmd);
3812891434b1SRayagond Kokatanur 		break;
3813891434b1SRayagond Kokatanur 	case SIOCSHWTSTAMP:
3814d6228b7cSArtem Panfilov 		ret = stmmac_hwtstamp_set(dev, rq);
3815d6228b7cSArtem Panfilov 		break;
3816d6228b7cSArtem Panfilov 	case SIOCGHWTSTAMP:
3817d6228b7cSArtem Panfilov 		ret = stmmac_hwtstamp_get(dev, rq);
3818891434b1SRayagond Kokatanur 		break;
3819891434b1SRayagond Kokatanur 	default:
3820891434b1SRayagond Kokatanur 		break;
3821891434b1SRayagond Kokatanur 	}
38227ac6653aSJeff Kirsher 
38237ac6653aSJeff Kirsher 	return ret;
38247ac6653aSJeff Kirsher }
38257ac6653aSJeff Kirsher 
38264dbbe8ddSJose Abreu static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
38274dbbe8ddSJose Abreu 				    void *cb_priv)
38284dbbe8ddSJose Abreu {
38294dbbe8ddSJose Abreu 	struct stmmac_priv *priv = cb_priv;
38304dbbe8ddSJose Abreu 	int ret = -EOPNOTSUPP;
38314dbbe8ddSJose Abreu 
38324dbbe8ddSJose Abreu 	stmmac_disable_all_queues(priv);
38334dbbe8ddSJose Abreu 
38344dbbe8ddSJose Abreu 	switch (type) {
38354dbbe8ddSJose Abreu 	case TC_SETUP_CLSU32:
38364dbbe8ddSJose Abreu 		if (tc_cls_can_offload_and_chain0(priv->dev, type_data))
38374dbbe8ddSJose Abreu 			ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
38384dbbe8ddSJose Abreu 		break;
38394dbbe8ddSJose Abreu 	default:
38404dbbe8ddSJose Abreu 		break;
38414dbbe8ddSJose Abreu 	}
38424dbbe8ddSJose Abreu 
38434dbbe8ddSJose Abreu 	stmmac_enable_all_queues(priv);
38444dbbe8ddSJose Abreu 	return ret;
38454dbbe8ddSJose Abreu }
38464dbbe8ddSJose Abreu 
38474dbbe8ddSJose Abreu static int stmmac_setup_tc_block(struct stmmac_priv *priv,
38484dbbe8ddSJose Abreu 				 struct tc_block_offload *f)
38494dbbe8ddSJose Abreu {
38504dbbe8ddSJose Abreu 	if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
38514dbbe8ddSJose Abreu 		return -EOPNOTSUPP;
38524dbbe8ddSJose Abreu 
38534dbbe8ddSJose Abreu 	switch (f->command) {
38544dbbe8ddSJose Abreu 	case TC_BLOCK_BIND:
38554dbbe8ddSJose Abreu 		return tcf_block_cb_register(f->block, stmmac_setup_tc_block_cb,
385660513bd8SJohn Hurley 				priv, priv, f->extack);
38574dbbe8ddSJose Abreu 	case TC_BLOCK_UNBIND:
38584dbbe8ddSJose Abreu 		tcf_block_cb_unregister(f->block, stmmac_setup_tc_block_cb, priv);
38594dbbe8ddSJose Abreu 		return 0;
38604dbbe8ddSJose Abreu 	default:
38614dbbe8ddSJose Abreu 		return -EOPNOTSUPP;
38624dbbe8ddSJose Abreu 	}
38634dbbe8ddSJose Abreu }
38644dbbe8ddSJose Abreu 
38654dbbe8ddSJose Abreu static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
38664dbbe8ddSJose Abreu 			   void *type_data)
38674dbbe8ddSJose Abreu {
38684dbbe8ddSJose Abreu 	struct stmmac_priv *priv = netdev_priv(ndev);
38694dbbe8ddSJose Abreu 
38704dbbe8ddSJose Abreu 	switch (type) {
38714dbbe8ddSJose Abreu 	case TC_SETUP_BLOCK:
38724dbbe8ddSJose Abreu 		return stmmac_setup_tc_block(priv, type_data);
38731f705bc6SJose Abreu 	case TC_SETUP_QDISC_CBS:
38741f705bc6SJose Abreu 		return stmmac_tc_setup_cbs(priv, priv, type_data);
38754dbbe8ddSJose Abreu 	default:
38764dbbe8ddSJose Abreu 		return -EOPNOTSUPP;
38774dbbe8ddSJose Abreu 	}
38784dbbe8ddSJose Abreu }
38794dbbe8ddSJose Abreu 
3880a830405eSBhadram Varka static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
3881a830405eSBhadram Varka {
3882a830405eSBhadram Varka 	struct stmmac_priv *priv = netdev_priv(ndev);
3883a830405eSBhadram Varka 	int ret = 0;
3884a830405eSBhadram Varka 
3885a830405eSBhadram Varka 	ret = eth_mac_addr(ndev, addr);
3886a830405eSBhadram Varka 	if (ret)
3887a830405eSBhadram Varka 		return ret;
3888a830405eSBhadram Varka 
3889c10d4c82SJose Abreu 	stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
3890a830405eSBhadram Varka 
3891a830405eSBhadram Varka 	return ret;
3892a830405eSBhadram Varka }
3893a830405eSBhadram Varka 
389450fb4f74SGiuseppe CAVALLARO #ifdef CONFIG_DEBUG_FS
38957ac29055SGiuseppe CAVALLARO static struct dentry *stmmac_fs_dir;
38967ac29055SGiuseppe CAVALLARO 
3897c24602efSGiuseppe CAVALLARO static void sysfs_display_ring(void *head, int size, int extend_desc,
3898c24602efSGiuseppe CAVALLARO 			       struct seq_file *seq)
38997ac29055SGiuseppe CAVALLARO {
39007ac29055SGiuseppe CAVALLARO 	int i;
3901c24602efSGiuseppe CAVALLARO 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
3902c24602efSGiuseppe CAVALLARO 	struct dma_desc *p = (struct dma_desc *)head;
39037ac29055SGiuseppe CAVALLARO 
3904c24602efSGiuseppe CAVALLARO 	for (i = 0; i < size; i++) {
3905c24602efSGiuseppe CAVALLARO 		if (extend_desc) {
3906c24602efSGiuseppe CAVALLARO 			seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3907c24602efSGiuseppe CAVALLARO 				   i, (unsigned int)virt_to_phys(ep),
3908f8be0d78SMichael Weiser 				   le32_to_cpu(ep->basic.des0),
3909f8be0d78SMichael Weiser 				   le32_to_cpu(ep->basic.des1),
3910f8be0d78SMichael Weiser 				   le32_to_cpu(ep->basic.des2),
3911f8be0d78SMichael Weiser 				   le32_to_cpu(ep->basic.des3));
3912c24602efSGiuseppe CAVALLARO 			ep++;
3913c24602efSGiuseppe CAVALLARO 		} else {
3914c24602efSGiuseppe CAVALLARO 			seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
391566c25f6eSNiklas Cassel 				   i, (unsigned int)virt_to_phys(p),
3916f8be0d78SMichael Weiser 				   le32_to_cpu(p->des0), le32_to_cpu(p->des1),
3917f8be0d78SMichael Weiser 				   le32_to_cpu(p->des2), le32_to_cpu(p->des3));
3918c24602efSGiuseppe CAVALLARO 			p++;
3919c24602efSGiuseppe CAVALLARO 		}
39207ac29055SGiuseppe CAVALLARO 		seq_printf(seq, "\n");
39217ac29055SGiuseppe CAVALLARO 	}
3922c24602efSGiuseppe CAVALLARO }
39237ac29055SGiuseppe CAVALLARO 
3924fb0d9c63SYangtao Li static int stmmac_rings_status_show(struct seq_file *seq, void *v)
3925c24602efSGiuseppe CAVALLARO {
3926c24602efSGiuseppe CAVALLARO 	struct net_device *dev = seq->private;
3927c24602efSGiuseppe CAVALLARO 	struct stmmac_priv *priv = netdev_priv(dev);
392854139cf3SJoao Pinto 	u32 rx_count = priv->plat->rx_queues_to_use;
3929ce736788SJoao Pinto 	u32 tx_count = priv->plat->tx_queues_to_use;
393054139cf3SJoao Pinto 	u32 queue;
393154139cf3SJoao Pinto 
39325f2b8b62SThierry Reding 	if ((dev->flags & IFF_UP) == 0)
39335f2b8b62SThierry Reding 		return 0;
39345f2b8b62SThierry Reding 
393554139cf3SJoao Pinto 	for (queue = 0; queue < rx_count; queue++) {
393654139cf3SJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
393754139cf3SJoao Pinto 
393854139cf3SJoao Pinto 		seq_printf(seq, "RX Queue %d:\n", queue);
39397ac29055SGiuseppe CAVALLARO 
3940c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc) {
394154139cf3SJoao Pinto 			seq_printf(seq, "Extended descriptor ring:\n");
394254139cf3SJoao Pinto 			sysfs_display_ring((void *)rx_q->dma_erx,
394354139cf3SJoao Pinto 					   DMA_RX_SIZE, 1, seq);
394454139cf3SJoao Pinto 		} else {
394554139cf3SJoao Pinto 			seq_printf(seq, "Descriptor ring:\n");
394654139cf3SJoao Pinto 			sysfs_display_ring((void *)rx_q->dma_rx,
394754139cf3SJoao Pinto 					   DMA_RX_SIZE, 0, seq);
394854139cf3SJoao Pinto 		}
394954139cf3SJoao Pinto 	}
395054139cf3SJoao Pinto 
3951ce736788SJoao Pinto 	for (queue = 0; queue < tx_count; queue++) {
3952ce736788SJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3953ce736788SJoao Pinto 
3954ce736788SJoao Pinto 		seq_printf(seq, "TX Queue %d:\n", queue);
3955ce736788SJoao Pinto 
395654139cf3SJoao Pinto 		if (priv->extend_desc) {
3957ce736788SJoao Pinto 			seq_printf(seq, "Extended descriptor ring:\n");
3958ce736788SJoao Pinto 			sysfs_display_ring((void *)tx_q->dma_etx,
3959ce736788SJoao Pinto 					   DMA_TX_SIZE, 1, seq);
3960c24602efSGiuseppe CAVALLARO 		} else {
3961ce736788SJoao Pinto 			seq_printf(seq, "Descriptor ring:\n");
3962ce736788SJoao Pinto 			sysfs_display_ring((void *)tx_q->dma_tx,
3963ce736788SJoao Pinto 					   DMA_TX_SIZE, 0, seq);
3964ce736788SJoao Pinto 		}
39657ac29055SGiuseppe CAVALLARO 	}
39667ac29055SGiuseppe CAVALLARO 
39677ac29055SGiuseppe CAVALLARO 	return 0;
39687ac29055SGiuseppe CAVALLARO }
3969fb0d9c63SYangtao Li DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
39707ac29055SGiuseppe CAVALLARO 
3971fb0d9c63SYangtao Li static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
3972e7434821SGiuseppe CAVALLARO {
3973e7434821SGiuseppe CAVALLARO 	struct net_device *dev = seq->private;
3974e7434821SGiuseppe CAVALLARO 	struct stmmac_priv *priv = netdev_priv(dev);
3975e7434821SGiuseppe CAVALLARO 
397619e30c14SGiuseppe CAVALLARO 	if (!priv->hw_cap_support) {
3977e7434821SGiuseppe CAVALLARO 		seq_printf(seq, "DMA HW features not supported\n");
3978e7434821SGiuseppe CAVALLARO 		return 0;
3979e7434821SGiuseppe CAVALLARO 	}
3980e7434821SGiuseppe CAVALLARO 
3981e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "==============================\n");
3982e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tDMA HW features\n");
3983e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "==============================\n");
3984e7434821SGiuseppe CAVALLARO 
398522d3efe5SPavel Machek 	seq_printf(seq, "\t10/100 Mbps: %s\n",
3986e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
398722d3efe5SPavel Machek 	seq_printf(seq, "\t1000 Mbps: %s\n",
3988e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
398922d3efe5SPavel Machek 	seq_printf(seq, "\tHalf duplex: %s\n",
3990e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.half_duplex) ? "Y" : "N");
3991e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tHash Filter: %s\n",
3992e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.hash_filter) ? "Y" : "N");
3993e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tMultiple MAC address registers: %s\n",
3994e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.multi_addr) ? "Y" : "N");
39958d45e42bSLABBE Corentin 	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
3996e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.pcs) ? "Y" : "N");
3997e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
3998e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
3999e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tPMT Remote wake up: %s\n",
4000e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
4001e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tPMT Magic Frame: %s\n",
4002e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
4003e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tRMON module: %s\n",
4004e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.rmon) ? "Y" : "N");
4005e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
4006e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.time_stamp) ? "Y" : "N");
4007e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
4008e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
400922d3efe5SPavel Machek 	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
4010e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.eee) ? "Y" : "N");
4011e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
4012e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
4013e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.tx_coe) ? "Y" : "N");
4014f748be53SAlexandre TORGUE 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
4015f748be53SAlexandre TORGUE 		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
4016f748be53SAlexandre TORGUE 			   (priv->dma_cap.rx_coe) ? "Y" : "N");
4017f748be53SAlexandre TORGUE 	} else {
4018e7434821SGiuseppe CAVALLARO 		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
4019e7434821SGiuseppe CAVALLARO 			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
4020e7434821SGiuseppe CAVALLARO 		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
4021e7434821SGiuseppe CAVALLARO 			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
4022f748be53SAlexandre TORGUE 	}
4023e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
4024e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
4025e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
4026e7434821SGiuseppe CAVALLARO 		   priv->dma_cap.number_rx_channel);
4027e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
4028e7434821SGiuseppe CAVALLARO 		   priv->dma_cap.number_tx_channel);
4029e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tEnhanced descriptors: %s\n",
4030e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.enh_desc) ? "Y" : "N");
4031e7434821SGiuseppe CAVALLARO 
4032e7434821SGiuseppe CAVALLARO 	return 0;
4033e7434821SGiuseppe CAVALLARO }
4034fb0d9c63SYangtao Li DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
4035e7434821SGiuseppe CAVALLARO 
40367ac29055SGiuseppe CAVALLARO static int stmmac_init_fs(struct net_device *dev)
40377ac29055SGiuseppe CAVALLARO {
4038466c5ac8SMathieu Olivari 	struct stmmac_priv *priv = netdev_priv(dev);
40397ac29055SGiuseppe CAVALLARO 
4040466c5ac8SMathieu Olivari 	/* Create per netdev entries */
4041466c5ac8SMathieu Olivari 	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
4042466c5ac8SMathieu Olivari 
4043466c5ac8SMathieu Olivari 	if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) {
404438ddc59dSLABBE Corentin 		netdev_err(priv->dev, "ERROR failed to create debugfs directory\n");
40457ac29055SGiuseppe CAVALLARO 
40467ac29055SGiuseppe CAVALLARO 		return -ENOMEM;
40477ac29055SGiuseppe CAVALLARO 	}
40487ac29055SGiuseppe CAVALLARO 
40497ac29055SGiuseppe CAVALLARO 	/* Entry to report DMA RX/TX rings */
4050466c5ac8SMathieu Olivari 	priv->dbgfs_rings_status =
4051d3757ba4SJoe Perches 		debugfs_create_file("descriptors_status", 0444,
4052466c5ac8SMathieu Olivari 				    priv->dbgfs_dir, dev,
40537ac29055SGiuseppe CAVALLARO 				    &stmmac_rings_status_fops);
40547ac29055SGiuseppe CAVALLARO 
4055466c5ac8SMathieu Olivari 	if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) {
405638ddc59dSLABBE Corentin 		netdev_err(priv->dev, "ERROR creating stmmac ring debugfs file\n");
4057466c5ac8SMathieu Olivari 		debugfs_remove_recursive(priv->dbgfs_dir);
40587ac29055SGiuseppe CAVALLARO 
40597ac29055SGiuseppe CAVALLARO 		return -ENOMEM;
40607ac29055SGiuseppe CAVALLARO 	}
40617ac29055SGiuseppe CAVALLARO 
4062e7434821SGiuseppe CAVALLARO 	/* Entry to report the DMA HW features */
4063d3757ba4SJoe Perches 	priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", 0444,
4064466c5ac8SMathieu Olivari 						  priv->dbgfs_dir,
4065e7434821SGiuseppe CAVALLARO 						  dev, &stmmac_dma_cap_fops);
4066e7434821SGiuseppe CAVALLARO 
4067466c5ac8SMathieu Olivari 	if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
406838ddc59dSLABBE Corentin 		netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n");
4069466c5ac8SMathieu Olivari 		debugfs_remove_recursive(priv->dbgfs_dir);
4070e7434821SGiuseppe CAVALLARO 
4071e7434821SGiuseppe CAVALLARO 		return -ENOMEM;
4072e7434821SGiuseppe CAVALLARO 	}
4073e7434821SGiuseppe CAVALLARO 
40747ac29055SGiuseppe CAVALLARO 	return 0;
40757ac29055SGiuseppe CAVALLARO }
40767ac29055SGiuseppe CAVALLARO 
4077466c5ac8SMathieu Olivari static void stmmac_exit_fs(struct net_device *dev)
40787ac29055SGiuseppe CAVALLARO {
4079466c5ac8SMathieu Olivari 	struct stmmac_priv *priv = netdev_priv(dev);
4080466c5ac8SMathieu Olivari 
4081466c5ac8SMathieu Olivari 	debugfs_remove_recursive(priv->dbgfs_dir);
40827ac29055SGiuseppe CAVALLARO }
408350fb4f74SGiuseppe CAVALLARO #endif /* CONFIG_DEBUG_FS */
40847ac29055SGiuseppe CAVALLARO 
40857ac6653aSJeff Kirsher static const struct net_device_ops stmmac_netdev_ops = {
40867ac6653aSJeff Kirsher 	.ndo_open = stmmac_open,
40877ac6653aSJeff Kirsher 	.ndo_start_xmit = stmmac_xmit,
40887ac6653aSJeff Kirsher 	.ndo_stop = stmmac_release,
40897ac6653aSJeff Kirsher 	.ndo_change_mtu = stmmac_change_mtu,
40907ac6653aSJeff Kirsher 	.ndo_fix_features = stmmac_fix_features,
4091d2afb5bdSGiuseppe CAVALLARO 	.ndo_set_features = stmmac_set_features,
409201789349SJiri Pirko 	.ndo_set_rx_mode = stmmac_set_rx_mode,
40937ac6653aSJeff Kirsher 	.ndo_tx_timeout = stmmac_tx_timeout,
40947ac6653aSJeff Kirsher 	.ndo_do_ioctl = stmmac_ioctl,
40954dbbe8ddSJose Abreu 	.ndo_setup_tc = stmmac_setup_tc,
40967ac6653aSJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER
40977ac6653aSJeff Kirsher 	.ndo_poll_controller = stmmac_poll_controller,
40987ac6653aSJeff Kirsher #endif
4099a830405eSBhadram Varka 	.ndo_set_mac_address = stmmac_set_mac_address,
41007ac6653aSJeff Kirsher };
41017ac6653aSJeff Kirsher 
410234877a15SJose Abreu static void stmmac_reset_subtask(struct stmmac_priv *priv)
410334877a15SJose Abreu {
410434877a15SJose Abreu 	if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
410534877a15SJose Abreu 		return;
410634877a15SJose Abreu 	if (test_bit(STMMAC_DOWN, &priv->state))
410734877a15SJose Abreu 		return;
410834877a15SJose Abreu 
410934877a15SJose Abreu 	netdev_err(priv->dev, "Reset adapter.\n");
411034877a15SJose Abreu 
411134877a15SJose Abreu 	rtnl_lock();
411234877a15SJose Abreu 	netif_trans_update(priv->dev);
411334877a15SJose Abreu 	while (test_and_set_bit(STMMAC_RESETING, &priv->state))
411434877a15SJose Abreu 		usleep_range(1000, 2000);
411534877a15SJose Abreu 
411634877a15SJose Abreu 	set_bit(STMMAC_DOWN, &priv->state);
411734877a15SJose Abreu 	dev_close(priv->dev);
411800f54e68SPetr Machata 	dev_open(priv->dev, NULL);
411934877a15SJose Abreu 	clear_bit(STMMAC_DOWN, &priv->state);
412034877a15SJose Abreu 	clear_bit(STMMAC_RESETING, &priv->state);
412134877a15SJose Abreu 	rtnl_unlock();
412234877a15SJose Abreu }
412334877a15SJose Abreu 
412434877a15SJose Abreu static void stmmac_service_task(struct work_struct *work)
412534877a15SJose Abreu {
412634877a15SJose Abreu 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
412734877a15SJose Abreu 			service_task);
412834877a15SJose Abreu 
412934877a15SJose Abreu 	stmmac_reset_subtask(priv);
413034877a15SJose Abreu 	clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
413134877a15SJose Abreu }
413234877a15SJose Abreu 
41337ac6653aSJeff Kirsher /**
4134cf3f047bSGiuseppe CAVALLARO  *  stmmac_hw_init - Init the MAC device
413532ceabcaSGiuseppe CAVALLARO  *  @priv: driver private structure
4136732fdf0eSGiuseppe CAVALLARO  *  Description: this function is to configure the MAC device according to
4137732fdf0eSGiuseppe CAVALLARO  *  some platform parameters or the HW capability register. It prepares the
4138732fdf0eSGiuseppe CAVALLARO  *  driver to use either ring or chain modes and to setup either enhanced or
4139732fdf0eSGiuseppe CAVALLARO  *  normal descriptors.
4140cf3f047bSGiuseppe CAVALLARO  */
4141cf3f047bSGiuseppe CAVALLARO static int stmmac_hw_init(struct stmmac_priv *priv)
4142cf3f047bSGiuseppe CAVALLARO {
41435f0456b4SJose Abreu 	int ret;
4144cf3f047bSGiuseppe CAVALLARO 
41459f93ac8dSLABBE Corentin 	/* dwmac-sun8i only work in chain mode */
41469f93ac8dSLABBE Corentin 	if (priv->plat->has_sun8i)
41479f93ac8dSLABBE Corentin 		chain_mode = 1;
41485f0456b4SJose Abreu 	priv->chain_mode = chain_mode;
41499f93ac8dSLABBE Corentin 
41505f0456b4SJose Abreu 	/* Initialize HW Interface */
41515f0456b4SJose Abreu 	ret = stmmac_hwif_init(priv);
41525f0456b4SJose Abreu 	if (ret)
41535f0456b4SJose Abreu 		return ret;
41544a7d666aSGiuseppe CAVALLARO 
4155cf3f047bSGiuseppe CAVALLARO 	/* Get the HW capability (new GMAC newer than 3.50a) */
4156cf3f047bSGiuseppe CAVALLARO 	priv->hw_cap_support = stmmac_get_hw_features(priv);
4157cf3f047bSGiuseppe CAVALLARO 	if (priv->hw_cap_support) {
415838ddc59dSLABBE Corentin 		dev_info(priv->device, "DMA HW capability register supported\n");
4159cf3f047bSGiuseppe CAVALLARO 
4160cf3f047bSGiuseppe CAVALLARO 		/* We can override some gmac/dma configuration fields: e.g.
4161cf3f047bSGiuseppe CAVALLARO 		 * enh_desc, tx_coe (e.g. that are passed through the
4162cf3f047bSGiuseppe CAVALLARO 		 * platform) with the values from the HW capability
4163cf3f047bSGiuseppe CAVALLARO 		 * register (if supported).
4164cf3f047bSGiuseppe CAVALLARO 		 */
4165cf3f047bSGiuseppe CAVALLARO 		priv->plat->enh_desc = priv->dma_cap.enh_desc;
4166cf3f047bSGiuseppe CAVALLARO 		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
41673fe5cadbSGiuseppe CAVALLARO 		priv->hw->pmt = priv->plat->pmt;
416838912bdbSDeepak SIKRI 
4169a8df35d4SEzequiel Garcia 		/* TXCOE doesn't work in thresh DMA mode */
4170a8df35d4SEzequiel Garcia 		if (priv->plat->force_thresh_dma_mode)
4171a8df35d4SEzequiel Garcia 			priv->plat->tx_coe = 0;
4172a8df35d4SEzequiel Garcia 		else
417338912bdbSDeepak SIKRI 			priv->plat->tx_coe = priv->dma_cap.tx_coe;
4174a8df35d4SEzequiel Garcia 
4175f748be53SAlexandre TORGUE 		/* In case of GMAC4 rx_coe is from HW cap register. */
4176f748be53SAlexandre TORGUE 		priv->plat->rx_coe = priv->dma_cap.rx_coe;
417738912bdbSDeepak SIKRI 
417838912bdbSDeepak SIKRI 		if (priv->dma_cap.rx_coe_type2)
417938912bdbSDeepak SIKRI 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
418038912bdbSDeepak SIKRI 		else if (priv->dma_cap.rx_coe_type1)
418138912bdbSDeepak SIKRI 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
418238912bdbSDeepak SIKRI 
418338ddc59dSLABBE Corentin 	} else {
418438ddc59dSLABBE Corentin 		dev_info(priv->device, "No HW DMA feature register supported\n");
418538ddc59dSLABBE Corentin 	}
4186cf3f047bSGiuseppe CAVALLARO 
4187d2afb5bdSGiuseppe CAVALLARO 	if (priv->plat->rx_coe) {
4188d2afb5bdSGiuseppe CAVALLARO 		priv->hw->rx_csum = priv->plat->rx_coe;
418938ddc59dSLABBE Corentin 		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
4190f748be53SAlexandre TORGUE 		if (priv->synopsys_id < DWMAC_CORE_4_00)
419138ddc59dSLABBE Corentin 			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
4192d2afb5bdSGiuseppe CAVALLARO 	}
4193cf3f047bSGiuseppe CAVALLARO 	if (priv->plat->tx_coe)
419438ddc59dSLABBE Corentin 		dev_info(priv->device, "TX Checksum insertion supported\n");
4195cf3f047bSGiuseppe CAVALLARO 
4196cf3f047bSGiuseppe CAVALLARO 	if (priv->plat->pmt) {
419738ddc59dSLABBE Corentin 		dev_info(priv->device, "Wake-Up On Lan supported\n");
4198cf3f047bSGiuseppe CAVALLARO 		device_set_wakeup_capable(priv->device, 1);
4199cf3f047bSGiuseppe CAVALLARO 	}
4200cf3f047bSGiuseppe CAVALLARO 
4201f748be53SAlexandre TORGUE 	if (priv->dma_cap.tsoen)
420238ddc59dSLABBE Corentin 		dev_info(priv->device, "TSO supported\n");
4203f748be53SAlexandre TORGUE 
42047cfde0afSJose Abreu 	/* Run HW quirks, if any */
42057cfde0afSJose Abreu 	if (priv->hwif_quirks) {
42067cfde0afSJose Abreu 		ret = priv->hwif_quirks(priv);
42077cfde0afSJose Abreu 		if (ret)
42087cfde0afSJose Abreu 			return ret;
42097cfde0afSJose Abreu 	}
42107cfde0afSJose Abreu 
42113b509466SJose Abreu 	/* Rx Watchdog is available in the COREs newer than the 3.40.
42123b509466SJose Abreu 	 * In some case, for example on bugged HW this feature
42133b509466SJose Abreu 	 * has to be disable and this can be done by passing the
42143b509466SJose Abreu 	 * riwt_off field from the platform.
42153b509466SJose Abreu 	 */
42163b509466SJose Abreu 	if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
42173b509466SJose Abreu 	    (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
42183b509466SJose Abreu 		priv->use_riwt = 1;
42193b509466SJose Abreu 		dev_info(priv->device,
42203b509466SJose Abreu 			 "Enable RX Mitigation via HW Watchdog Timer\n");
42213b509466SJose Abreu 	}
42223b509466SJose Abreu 
4223c24602efSGiuseppe CAVALLARO 	return 0;
4224cf3f047bSGiuseppe CAVALLARO }
4225cf3f047bSGiuseppe CAVALLARO 
4226cf3f047bSGiuseppe CAVALLARO /**
4227bfab27a1SGiuseppe CAVALLARO  * stmmac_dvr_probe
4228bfab27a1SGiuseppe CAVALLARO  * @device: device pointer
4229ff3dd78cSGiuseppe CAVALLARO  * @plat_dat: platform data pointer
4230e56788cfSJoachim Eastwood  * @res: stmmac resource pointer
4231bfab27a1SGiuseppe CAVALLARO  * Description: this is the main probe function used to
4232bfab27a1SGiuseppe CAVALLARO  * call the alloc_etherdev, allocate the priv structure.
42339afec6efSAndy Shevchenko  * Return:
423415ffac73SJoachim Eastwood  * returns 0 on success, otherwise errno.
42357ac6653aSJeff Kirsher  */
423615ffac73SJoachim Eastwood int stmmac_dvr_probe(struct device *device,
4237cf3f047bSGiuseppe CAVALLARO 		     struct plat_stmmacenet_data *plat_dat,
4238e56788cfSJoachim Eastwood 		     struct stmmac_resources *res)
42397ac6653aSJeff Kirsher {
4240bfab27a1SGiuseppe CAVALLARO 	struct net_device *ndev = NULL;
4241bfab27a1SGiuseppe CAVALLARO 	struct stmmac_priv *priv;
42428fce3331SJose Abreu 	u32 queue, maxq;
4243c22a3f48SJoao Pinto 	int ret = 0;
42447ac6653aSJeff Kirsher 
4245c22a3f48SJoao Pinto 	ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv),
4246c22a3f48SJoao Pinto 				  MTL_MAX_TX_QUEUES,
4247c22a3f48SJoao Pinto 				  MTL_MAX_RX_QUEUES);
424841de8d4cSJoe Perches 	if (!ndev)
424915ffac73SJoachim Eastwood 		return -ENOMEM;
42507ac6653aSJeff Kirsher 
4251bfab27a1SGiuseppe CAVALLARO 	SET_NETDEV_DEV(ndev, device);
42527ac6653aSJeff Kirsher 
4253bfab27a1SGiuseppe CAVALLARO 	priv = netdev_priv(ndev);
4254bfab27a1SGiuseppe CAVALLARO 	priv->device = device;
4255bfab27a1SGiuseppe CAVALLARO 	priv->dev = ndev;
4256bfab27a1SGiuseppe CAVALLARO 
4257bfab27a1SGiuseppe CAVALLARO 	stmmac_set_ethtool_ops(ndev);
4258cf3f047bSGiuseppe CAVALLARO 	priv->pause = pause;
4259cf3f047bSGiuseppe CAVALLARO 	priv->plat = plat_dat;
4260e56788cfSJoachim Eastwood 	priv->ioaddr = res->addr;
4261e56788cfSJoachim Eastwood 	priv->dev->base_addr = (unsigned long)res->addr;
4262e56788cfSJoachim Eastwood 
4263e56788cfSJoachim Eastwood 	priv->dev->irq = res->irq;
4264e56788cfSJoachim Eastwood 	priv->wol_irq = res->wol_irq;
4265e56788cfSJoachim Eastwood 	priv->lpi_irq = res->lpi_irq;
4266e56788cfSJoachim Eastwood 
4267e56788cfSJoachim Eastwood 	if (res->mac)
4268e56788cfSJoachim Eastwood 		memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
4269bfab27a1SGiuseppe CAVALLARO 
4270a7a62685SJoachim Eastwood 	dev_set_drvdata(device, priv->dev);
4271803f8fc4SJoachim Eastwood 
4272cf3f047bSGiuseppe CAVALLARO 	/* Verify driver arguments */
4273cf3f047bSGiuseppe CAVALLARO 	stmmac_verify_args();
4274cf3f047bSGiuseppe CAVALLARO 
427534877a15SJose Abreu 	/* Allocate workqueue */
427634877a15SJose Abreu 	priv->wq = create_singlethread_workqueue("stmmac_wq");
427734877a15SJose Abreu 	if (!priv->wq) {
427834877a15SJose Abreu 		dev_err(priv->device, "failed to create workqueue\n");
4279b26322d2SDan Carpenter 		ret = -ENOMEM;
428034877a15SJose Abreu 		goto error_wq;
428134877a15SJose Abreu 	}
428234877a15SJose Abreu 
428334877a15SJose Abreu 	INIT_WORK(&priv->service_task, stmmac_service_task);
428434877a15SJose Abreu 
4285cf3f047bSGiuseppe CAVALLARO 	/* Override with kernel parameters if supplied XXX CRS XXX
4286ceb69499SGiuseppe CAVALLARO 	 * this needs to have multiple instances
4287ceb69499SGiuseppe CAVALLARO 	 */
4288cf3f047bSGiuseppe CAVALLARO 	if ((phyaddr >= 0) && (phyaddr <= 31))
4289cf3f047bSGiuseppe CAVALLARO 		priv->plat->phy_addr = phyaddr;
4290cf3f047bSGiuseppe CAVALLARO 
429190f522a2SEugeniy Paltsev 	if (priv->plat->stmmac_rst) {
429290f522a2SEugeniy Paltsev 		ret = reset_control_assert(priv->plat->stmmac_rst);
4293f573c0b9Sjpinto 		reset_control_deassert(priv->plat->stmmac_rst);
429490f522a2SEugeniy Paltsev 		/* Some reset controllers have only reset callback instead of
429590f522a2SEugeniy Paltsev 		 * assert + deassert callbacks pair.
429690f522a2SEugeniy Paltsev 		 */
429790f522a2SEugeniy Paltsev 		if (ret == -ENOTSUPP)
429890f522a2SEugeniy Paltsev 			reset_control_reset(priv->plat->stmmac_rst);
429990f522a2SEugeniy Paltsev 	}
4300c5e4ddbdSChen-Yu Tsai 
4301cf3f047bSGiuseppe CAVALLARO 	/* Init MAC and get the capabilities */
4302c24602efSGiuseppe CAVALLARO 	ret = stmmac_hw_init(priv);
4303c24602efSGiuseppe CAVALLARO 	if (ret)
430462866e98SChen-Yu Tsai 		goto error_hw_init;
4305cf3f047bSGiuseppe CAVALLARO 
4306c22a3f48SJoao Pinto 	/* Configure real RX and TX queues */
4307c02b7a91SJoao Pinto 	netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
4308c02b7a91SJoao Pinto 	netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
4309c22a3f48SJoao Pinto 
4310cf3f047bSGiuseppe CAVALLARO 	ndev->netdev_ops = &stmmac_netdev_ops;
4311cf3f047bSGiuseppe CAVALLARO 
4312cf3f047bSGiuseppe CAVALLARO 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4313cf3f047bSGiuseppe CAVALLARO 			    NETIF_F_RXCSUM;
4314f748be53SAlexandre TORGUE 
43154dbbe8ddSJose Abreu 	ret = stmmac_tc_init(priv, priv);
43164dbbe8ddSJose Abreu 	if (!ret) {
43174dbbe8ddSJose Abreu 		ndev->hw_features |= NETIF_F_HW_TC;
43184dbbe8ddSJose Abreu 	}
43194dbbe8ddSJose Abreu 
4320f748be53SAlexandre TORGUE 	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
43219edfa7daSNiklas Cassel 		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
4322f748be53SAlexandre TORGUE 		priv->tso = true;
432338ddc59dSLABBE Corentin 		dev_info(priv->device, "TSO feature enabled\n");
4324f748be53SAlexandre TORGUE 	}
4325bfab27a1SGiuseppe CAVALLARO 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
4326bfab27a1SGiuseppe CAVALLARO 	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
43277ac6653aSJeff Kirsher #ifdef STMMAC_VLAN_TAG_USED
43287ac6653aSJeff Kirsher 	/* Both mac100 and gmac support receive VLAN tag detection */
4329ab188e8fSElad Nachman 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
43307ac6653aSJeff Kirsher #endif
43317ac6653aSJeff Kirsher 	priv->msg_enable = netif_msg_init(debug, default_msg_level);
43327ac6653aSJeff Kirsher 
433344770e11SJarod Wilson 	/* MTU range: 46 - hw-specific max */
433444770e11SJarod Wilson 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
433544770e11SJarod Wilson 	if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
433644770e11SJarod Wilson 		ndev->max_mtu = JUMBO_LEN;
43377d9e6c5aSJose Abreu 	else if (priv->plat->has_xgmac)
43387d9e6c5aSJose Abreu 		ndev->max_mtu = XGMAC_JUMBO_LEN;
433944770e11SJarod Wilson 	else
434044770e11SJarod Wilson 		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
4341a2cd64f3SKweh, Hock Leong 	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
4342a2cd64f3SKweh, Hock Leong 	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
4343a2cd64f3SKweh, Hock Leong 	 */
4344a2cd64f3SKweh, Hock Leong 	if ((priv->plat->maxmtu < ndev->max_mtu) &&
4345a2cd64f3SKweh, Hock Leong 	    (priv->plat->maxmtu >= ndev->min_mtu))
434644770e11SJarod Wilson 		ndev->max_mtu = priv->plat->maxmtu;
4347a2cd64f3SKweh, Hock Leong 	else if (priv->plat->maxmtu < ndev->min_mtu)
4348b618ab45SHeiner Kallweit 		dev_warn(priv->device,
4349a2cd64f3SKweh, Hock Leong 			 "%s: warning: maxmtu having invalid value (%d)\n",
4350a2cd64f3SKweh, Hock Leong 			 __func__, priv->plat->maxmtu);
435144770e11SJarod Wilson 
43527ac6653aSJeff Kirsher 	if (flow_ctrl)
43537ac6653aSJeff Kirsher 		priv->flow_ctrl = FLOW_AUTO;	/* RX/TX pause on */
43547ac6653aSJeff Kirsher 
43558fce3331SJose Abreu 	/* Setup channels NAPI */
43568fce3331SJose Abreu 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
4357c22a3f48SJoao Pinto 
43588fce3331SJose Abreu 	for (queue = 0; queue < maxq; queue++) {
43598fce3331SJose Abreu 		struct stmmac_channel *ch = &priv->channel[queue];
43608fce3331SJose Abreu 
43618fce3331SJose Abreu 		ch->priv_data = priv;
43628fce3331SJose Abreu 		ch->index = queue;
43638fce3331SJose Abreu 
43644ccb4585SJose Abreu 		if (queue < priv->plat->rx_queues_to_use) {
43654ccb4585SJose Abreu 			netif_napi_add(ndev, &ch->rx_napi, stmmac_napi_poll_rx,
43668fce3331SJose Abreu 				       NAPI_POLL_WEIGHT);
4367c22a3f48SJoao Pinto 		}
43684ccb4585SJose Abreu 		if (queue < priv->plat->tx_queues_to_use) {
43694ccb4585SJose Abreu 			netif_napi_add(ndev, &ch->tx_napi, stmmac_napi_poll_tx,
43704ccb4585SJose Abreu 				       NAPI_POLL_WEIGHT);
43714ccb4585SJose Abreu 		}
43724ccb4585SJose Abreu 	}
43737ac6653aSJeff Kirsher 
437429555fa3SThierry Reding 	mutex_init(&priv->lock);
43757ac6653aSJeff Kirsher 
4376cd7201f4SGiuseppe CAVALLARO 	/* If a specific clk_csr value is passed from the platform
4377cd7201f4SGiuseppe CAVALLARO 	 * this means that the CSR Clock Range selection cannot be
4378cd7201f4SGiuseppe CAVALLARO 	 * changed at run-time and it is fixed. Viceversa the driver'll try to
4379cd7201f4SGiuseppe CAVALLARO 	 * set the MDC clock dynamically according to the csr actual
4380cd7201f4SGiuseppe CAVALLARO 	 * clock input.
4381cd7201f4SGiuseppe CAVALLARO 	 */
4382cd7201f4SGiuseppe CAVALLARO 	if (!priv->plat->clk_csr)
4383cd7201f4SGiuseppe CAVALLARO 		stmmac_clk_csr_set(priv);
4384cd7201f4SGiuseppe CAVALLARO 	else
4385cd7201f4SGiuseppe CAVALLARO 		priv->clk_csr = priv->plat->clk_csr;
4386cd7201f4SGiuseppe CAVALLARO 
4387e58bb43fSGiuseppe CAVALLARO 	stmmac_check_pcs_mode(priv);
4388e58bb43fSGiuseppe CAVALLARO 
43893fe5cadbSGiuseppe CAVALLARO 	if (priv->hw->pcs != STMMAC_PCS_RGMII  &&
43903fe5cadbSGiuseppe CAVALLARO 	    priv->hw->pcs != STMMAC_PCS_TBI &&
43913fe5cadbSGiuseppe CAVALLARO 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
43924bfcbd7aSFrancesco Virlinzi 		/* MDIO bus Registration */
43934bfcbd7aSFrancesco Virlinzi 		ret = stmmac_mdio_register(ndev);
43944bfcbd7aSFrancesco Virlinzi 		if (ret < 0) {
4395b618ab45SHeiner Kallweit 			dev_err(priv->device,
439638ddc59dSLABBE Corentin 				"%s: MDIO bus (id: %d) registration failed",
43974bfcbd7aSFrancesco Virlinzi 				__func__, priv->plat->bus_id);
43986a81c26fSViresh Kumar 			goto error_mdio_register;
43994bfcbd7aSFrancesco Virlinzi 		}
4400e58bb43fSGiuseppe CAVALLARO 	}
44014bfcbd7aSFrancesco Virlinzi 
440257016590SFlorian Fainelli 	ret = register_netdev(ndev);
4403b2eb09afSFlorian Fainelli 	if (ret) {
4404b618ab45SHeiner Kallweit 		dev_err(priv->device, "%s: ERROR %i registering the device\n",
440557016590SFlorian Fainelli 			__func__, ret);
4406b2eb09afSFlorian Fainelli 		goto error_netdev_register;
4407b2eb09afSFlorian Fainelli 	}
44087ac6653aSJeff Kirsher 
44095f2b8b62SThierry Reding #ifdef CONFIG_DEBUG_FS
44105f2b8b62SThierry Reding 	ret = stmmac_init_fs(ndev);
44115f2b8b62SThierry Reding 	if (ret < 0)
44125f2b8b62SThierry Reding 		netdev_warn(priv->dev, "%s: failed debugFS registration\n",
44135f2b8b62SThierry Reding 			    __func__);
44145f2b8b62SThierry Reding #endif
44155f2b8b62SThierry Reding 
441657016590SFlorian Fainelli 	return ret;
44177ac6653aSJeff Kirsher 
44186a81c26fSViresh Kumar error_netdev_register:
4419b2eb09afSFlorian Fainelli 	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4420b2eb09afSFlorian Fainelli 	    priv->hw->pcs != STMMAC_PCS_TBI &&
4421b2eb09afSFlorian Fainelli 	    priv->hw->pcs != STMMAC_PCS_RTBI)
4422b2eb09afSFlorian Fainelli 		stmmac_mdio_unregister(ndev);
44237ac6653aSJeff Kirsher error_mdio_register:
44248fce3331SJose Abreu 	for (queue = 0; queue < maxq; queue++) {
44258fce3331SJose Abreu 		struct stmmac_channel *ch = &priv->channel[queue];
4426c22a3f48SJoao Pinto 
44274ccb4585SJose Abreu 		if (queue < priv->plat->rx_queues_to_use)
44284ccb4585SJose Abreu 			netif_napi_del(&ch->rx_napi);
44294ccb4585SJose Abreu 		if (queue < priv->plat->tx_queues_to_use)
44304ccb4585SJose Abreu 			netif_napi_del(&ch->tx_napi);
4431c22a3f48SJoao Pinto 	}
443262866e98SChen-Yu Tsai error_hw_init:
443334877a15SJose Abreu 	destroy_workqueue(priv->wq);
443434877a15SJose Abreu error_wq:
44357ac6653aSJeff Kirsher 	free_netdev(ndev);
44367ac6653aSJeff Kirsher 
443715ffac73SJoachim Eastwood 	return ret;
44387ac6653aSJeff Kirsher }
4439b2e2f0c7SAndy Shevchenko EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
44407ac6653aSJeff Kirsher 
44417ac6653aSJeff Kirsher /**
44427ac6653aSJeff Kirsher  * stmmac_dvr_remove
4443f4e7bd81SJoachim Eastwood  * @dev: device pointer
44447ac6653aSJeff Kirsher  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
4445bfab27a1SGiuseppe CAVALLARO  * changes the link status, releases the DMA descriptor rings.
44467ac6653aSJeff Kirsher  */
4447f4e7bd81SJoachim Eastwood int stmmac_dvr_remove(struct device *dev)
44487ac6653aSJeff Kirsher {
4449f4e7bd81SJoachim Eastwood 	struct net_device *ndev = dev_get_drvdata(dev);
44507ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(ndev);
44517ac6653aSJeff Kirsher 
445238ddc59dSLABBE Corentin 	netdev_info(priv->dev, "%s: removing driver", __func__);
44537ac6653aSJeff Kirsher 
44545f2b8b62SThierry Reding #ifdef CONFIG_DEBUG_FS
44555f2b8b62SThierry Reding 	stmmac_exit_fs(ndev);
44565f2b8b62SThierry Reding #endif
4457ae4f0d46SJoao Pinto 	stmmac_stop_all_dma(priv);
44587ac6653aSJeff Kirsher 
4459c10d4c82SJose Abreu 	stmmac_mac_set(priv, priv->ioaddr, false);
44607ac6653aSJeff Kirsher 	netif_carrier_off(ndev);
44617ac6653aSJeff Kirsher 	unregister_netdev(ndev);
4462f573c0b9Sjpinto 	if (priv->plat->stmmac_rst)
4463f573c0b9Sjpinto 		reset_control_assert(priv->plat->stmmac_rst);
4464f573c0b9Sjpinto 	clk_disable_unprepare(priv->plat->pclk);
4465f573c0b9Sjpinto 	clk_disable_unprepare(priv->plat->stmmac_clk);
44663fe5cadbSGiuseppe CAVALLARO 	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
44673fe5cadbSGiuseppe CAVALLARO 	    priv->hw->pcs != STMMAC_PCS_TBI &&
44683fe5cadbSGiuseppe CAVALLARO 	    priv->hw->pcs != STMMAC_PCS_RTBI)
4469e743471fSBryan O'Donoghue 		stmmac_mdio_unregister(ndev);
447034877a15SJose Abreu 	destroy_workqueue(priv->wq);
447129555fa3SThierry Reding 	mutex_destroy(&priv->lock);
44727ac6653aSJeff Kirsher 	free_netdev(ndev);
44737ac6653aSJeff Kirsher 
44747ac6653aSJeff Kirsher 	return 0;
44757ac6653aSJeff Kirsher }
4476b2e2f0c7SAndy Shevchenko EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
44777ac6653aSJeff Kirsher 
4478732fdf0eSGiuseppe CAVALLARO /**
4479732fdf0eSGiuseppe CAVALLARO  * stmmac_suspend - suspend callback
4480f4e7bd81SJoachim Eastwood  * @dev: device pointer
4481732fdf0eSGiuseppe CAVALLARO  * Description: this is the function to suspend the device and it is called
4482732fdf0eSGiuseppe CAVALLARO  * by the platform driver to stop the network queue, release the resources,
4483732fdf0eSGiuseppe CAVALLARO  * program the PMT register (for WoL), clean and release driver resources.
4484732fdf0eSGiuseppe CAVALLARO  */
4485f4e7bd81SJoachim Eastwood int stmmac_suspend(struct device *dev)
44867ac6653aSJeff Kirsher {
4487f4e7bd81SJoachim Eastwood 	struct net_device *ndev = dev_get_drvdata(dev);
44887ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(ndev);
44897ac6653aSJeff Kirsher 
44907ac6653aSJeff Kirsher 	if (!ndev || !netif_running(ndev))
44917ac6653aSJeff Kirsher 		return 0;
44927ac6653aSJeff Kirsher 
4493d6d50c7eSPhilippe Reynes 	if (ndev->phydev)
4494d6d50c7eSPhilippe Reynes 		phy_stop(ndev->phydev);
4495102463b1SFrancesco Virlinzi 
449629555fa3SThierry Reding 	mutex_lock(&priv->lock);
44977ac6653aSJeff Kirsher 
44987ac6653aSJeff Kirsher 	netif_device_detach(ndev);
4499c22a3f48SJoao Pinto 	stmmac_stop_all_queues(priv);
45007ac6653aSJeff Kirsher 
4501c22a3f48SJoao Pinto 	stmmac_disable_all_queues(priv);
45027ac6653aSJeff Kirsher 
45037ac6653aSJeff Kirsher 	/* Stop TX/RX DMA */
4504ae4f0d46SJoao Pinto 	stmmac_stop_all_dma(priv);
4505c24602efSGiuseppe CAVALLARO 
45067ac6653aSJeff Kirsher 	/* Enable Power down mode by programming the PMT regs */
450789f7f2cfSSrinivas Kandagatla 	if (device_may_wakeup(priv->device)) {
4508c10d4c82SJose Abreu 		stmmac_pmt(priv, priv->hw, priv->wolopts);
450989f7f2cfSSrinivas Kandagatla 		priv->irq_wake = 1;
451089f7f2cfSSrinivas Kandagatla 	} else {
4511c10d4c82SJose Abreu 		stmmac_mac_set(priv, priv->ioaddr, false);
4512db88f10aSSrinivas Kandagatla 		pinctrl_pm_select_sleep_state(priv->device);
4513ba1377ffSGiuseppe CAVALLARO 		/* Disable clock in case of PWM is off */
4514f573c0b9Sjpinto 		clk_disable(priv->plat->pclk);
4515f573c0b9Sjpinto 		clk_disable(priv->plat->stmmac_clk);
4516ba1377ffSGiuseppe CAVALLARO 	}
451729555fa3SThierry Reding 	mutex_unlock(&priv->lock);
45182d871aa0SVince Bridgers 
45194d869b03SLABBE Corentin 	priv->oldlink = false;
4520bd00632cSLABBE Corentin 	priv->speed = SPEED_UNKNOWN;
4521bd00632cSLABBE Corentin 	priv->oldduplex = DUPLEX_UNKNOWN;
45227ac6653aSJeff Kirsher 	return 0;
45237ac6653aSJeff Kirsher }
4524b2e2f0c7SAndy Shevchenko EXPORT_SYMBOL_GPL(stmmac_suspend);
45257ac6653aSJeff Kirsher 
4526732fdf0eSGiuseppe CAVALLARO /**
452754139cf3SJoao Pinto  * stmmac_reset_queues_param - reset queue parameters
452854139cf3SJoao Pinto  * @dev: device pointer
452954139cf3SJoao Pinto  */
453054139cf3SJoao Pinto static void stmmac_reset_queues_param(struct stmmac_priv *priv)
453154139cf3SJoao Pinto {
453254139cf3SJoao Pinto 	u32 rx_cnt = priv->plat->rx_queues_to_use;
4533ce736788SJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
453454139cf3SJoao Pinto 	u32 queue;
453554139cf3SJoao Pinto 
453654139cf3SJoao Pinto 	for (queue = 0; queue < rx_cnt; queue++) {
453754139cf3SJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
453854139cf3SJoao Pinto 
453954139cf3SJoao Pinto 		rx_q->cur_rx = 0;
454054139cf3SJoao Pinto 		rx_q->dirty_rx = 0;
454154139cf3SJoao Pinto 	}
454254139cf3SJoao Pinto 
4543ce736788SJoao Pinto 	for (queue = 0; queue < tx_cnt; queue++) {
4544ce736788SJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
4545ce736788SJoao Pinto 
4546ce736788SJoao Pinto 		tx_q->cur_tx = 0;
4547ce736788SJoao Pinto 		tx_q->dirty_tx = 0;
45488d212a9eSNiklas Cassel 		tx_q->mss = 0;
4549ce736788SJoao Pinto 	}
455054139cf3SJoao Pinto }
455154139cf3SJoao Pinto 
455254139cf3SJoao Pinto /**
4553732fdf0eSGiuseppe CAVALLARO  * stmmac_resume - resume callback
4554f4e7bd81SJoachim Eastwood  * @dev: device pointer
4555732fdf0eSGiuseppe CAVALLARO  * Description: when resume this function is invoked to setup the DMA and CORE
4556732fdf0eSGiuseppe CAVALLARO  * in a usable state.
4557732fdf0eSGiuseppe CAVALLARO  */
4558f4e7bd81SJoachim Eastwood int stmmac_resume(struct device *dev)
45597ac6653aSJeff Kirsher {
4560f4e7bd81SJoachim Eastwood 	struct net_device *ndev = dev_get_drvdata(dev);
45617ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(ndev);
45627ac6653aSJeff Kirsher 
45637ac6653aSJeff Kirsher 	if (!netif_running(ndev))
45647ac6653aSJeff Kirsher 		return 0;
45657ac6653aSJeff Kirsher 
45667ac6653aSJeff Kirsher 	/* Power Down bit, into the PM register, is cleared
45677ac6653aSJeff Kirsher 	 * automatically as soon as a magic packet or a Wake-up frame
45687ac6653aSJeff Kirsher 	 * is received. Anyway, it's better to manually clear
45697ac6653aSJeff Kirsher 	 * this bit because it can generate problems while resuming
4570ceb69499SGiuseppe CAVALLARO 	 * from another devices (e.g. serial console).
4571ceb69499SGiuseppe CAVALLARO 	 */
4572623997fbSSrinivas Kandagatla 	if (device_may_wakeup(priv->device)) {
457329555fa3SThierry Reding 		mutex_lock(&priv->lock);
4574c10d4c82SJose Abreu 		stmmac_pmt(priv, priv->hw, 0);
457529555fa3SThierry Reding 		mutex_unlock(&priv->lock);
457689f7f2cfSSrinivas Kandagatla 		priv->irq_wake = 0;
4577623997fbSSrinivas Kandagatla 	} else {
4578db88f10aSSrinivas Kandagatla 		pinctrl_pm_select_default_state(priv->device);
45798d45e42bSLABBE Corentin 		/* enable the clk previously disabled */
4580f573c0b9Sjpinto 		clk_enable(priv->plat->stmmac_clk);
4581f573c0b9Sjpinto 		clk_enable(priv->plat->pclk);
4582623997fbSSrinivas Kandagatla 		/* reset the phy so that it's ready */
4583623997fbSSrinivas Kandagatla 		if (priv->mii)
4584623997fbSSrinivas Kandagatla 			stmmac_mdio_reset(priv->mii);
4585623997fbSSrinivas Kandagatla 	}
45867ac6653aSJeff Kirsher 
45877ac6653aSJeff Kirsher 	netif_device_attach(ndev);
45887ac6653aSJeff Kirsher 
458929555fa3SThierry Reding 	mutex_lock(&priv->lock);
4590f55d84b0SVincent Palatin 
459154139cf3SJoao Pinto 	stmmac_reset_queues_param(priv);
459254139cf3SJoao Pinto 
4593ae79a639SGiuseppe CAVALLARO 	stmmac_clear_descriptors(priv);
4594ae79a639SGiuseppe CAVALLARO 
4595fe131929SHuacai Chen 	stmmac_hw_setup(ndev, false);
4596777da230SGiuseppe CAVALLARO 	stmmac_init_tx_coalesce(priv);
4597ac316c78SGiuseppe CAVALLARO 	stmmac_set_rx_mode(ndev);
45987ac6653aSJeff Kirsher 
4599c22a3f48SJoao Pinto 	stmmac_enable_all_queues(priv);
46007ac6653aSJeff Kirsher 
4601c22a3f48SJoao Pinto 	stmmac_start_all_queues(priv);
46027ac6653aSJeff Kirsher 
460329555fa3SThierry Reding 	mutex_unlock(&priv->lock);
4604102463b1SFrancesco Virlinzi 
4605d6d50c7eSPhilippe Reynes 	if (ndev->phydev)
4606d6d50c7eSPhilippe Reynes 		phy_start(ndev->phydev);
4607102463b1SFrancesco Virlinzi 
46087ac6653aSJeff Kirsher 	return 0;
46097ac6653aSJeff Kirsher }
4610b2e2f0c7SAndy Shevchenko EXPORT_SYMBOL_GPL(stmmac_resume);
4611ba27ec66SGiuseppe CAVALLARO 
46127ac6653aSJeff Kirsher #ifndef MODULE
46137ac6653aSJeff Kirsher static int __init stmmac_cmdline_opt(char *str)
46147ac6653aSJeff Kirsher {
46157ac6653aSJeff Kirsher 	char *opt;
46167ac6653aSJeff Kirsher 
46177ac6653aSJeff Kirsher 	if (!str || !*str)
46187ac6653aSJeff Kirsher 		return -EINVAL;
46197ac6653aSJeff Kirsher 	while ((opt = strsep(&str, ",")) != NULL) {
46207ac6653aSJeff Kirsher 		if (!strncmp(opt, "debug:", 6)) {
4621ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 6, 0, &debug))
46227ac6653aSJeff Kirsher 				goto err;
46237ac6653aSJeff Kirsher 		} else if (!strncmp(opt, "phyaddr:", 8)) {
4624ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 8, 0, &phyaddr))
46257ac6653aSJeff Kirsher 				goto err;
46267ac6653aSJeff Kirsher 		} else if (!strncmp(opt, "buf_sz:", 7)) {
4627ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 7, 0, &buf_sz))
46287ac6653aSJeff Kirsher 				goto err;
46297ac6653aSJeff Kirsher 		} else if (!strncmp(opt, "tc:", 3)) {
4630ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 3, 0, &tc))
46317ac6653aSJeff Kirsher 				goto err;
46327ac6653aSJeff Kirsher 		} else if (!strncmp(opt, "watchdog:", 9)) {
4633ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 9, 0, &watchdog))
46347ac6653aSJeff Kirsher 				goto err;
46357ac6653aSJeff Kirsher 		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
4636ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 10, 0, &flow_ctrl))
46377ac6653aSJeff Kirsher 				goto err;
46387ac6653aSJeff Kirsher 		} else if (!strncmp(opt, "pause:", 6)) {
4639ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 6, 0, &pause))
46407ac6653aSJeff Kirsher 				goto err;
4641506f669cSGiuseppe CAVALLARO 		} else if (!strncmp(opt, "eee_timer:", 10)) {
4642d765955dSGiuseppe CAVALLARO 			if (kstrtoint(opt + 10, 0, &eee_timer))
4643d765955dSGiuseppe CAVALLARO 				goto err;
46444a7d666aSGiuseppe CAVALLARO 		} else if (!strncmp(opt, "chain_mode:", 11)) {
46454a7d666aSGiuseppe CAVALLARO 			if (kstrtoint(opt + 11, 0, &chain_mode))
46464a7d666aSGiuseppe CAVALLARO 				goto err;
46477ac6653aSJeff Kirsher 		}
46487ac6653aSJeff Kirsher 	}
46497ac6653aSJeff Kirsher 	return 0;
46507ac6653aSJeff Kirsher 
46517ac6653aSJeff Kirsher err:
46527ac6653aSJeff Kirsher 	pr_err("%s: ERROR broken module parameter conversion", __func__);
46537ac6653aSJeff Kirsher 	return -EINVAL;
46547ac6653aSJeff Kirsher }
46557ac6653aSJeff Kirsher 
46567ac6653aSJeff Kirsher __setup("stmmaceth=", stmmac_cmdline_opt);
4657ceb69499SGiuseppe CAVALLARO #endif /* MODULE */
46586fc0d0f2SGiuseppe Cavallaro 
4659466c5ac8SMathieu Olivari static int __init stmmac_init(void)
4660466c5ac8SMathieu Olivari {
4661466c5ac8SMathieu Olivari #ifdef CONFIG_DEBUG_FS
4662466c5ac8SMathieu Olivari 	/* Create debugfs main directory if it doesn't exist yet */
4663466c5ac8SMathieu Olivari 	if (!stmmac_fs_dir) {
4664466c5ac8SMathieu Olivari 		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
4665466c5ac8SMathieu Olivari 
4666466c5ac8SMathieu Olivari 		if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
4667466c5ac8SMathieu Olivari 			pr_err("ERROR %s, debugfs create directory failed\n",
4668466c5ac8SMathieu Olivari 			       STMMAC_RESOURCE_NAME);
4669466c5ac8SMathieu Olivari 
4670466c5ac8SMathieu Olivari 			return -ENOMEM;
4671466c5ac8SMathieu Olivari 		}
4672466c5ac8SMathieu Olivari 	}
4673466c5ac8SMathieu Olivari #endif
4674466c5ac8SMathieu Olivari 
4675466c5ac8SMathieu Olivari 	return 0;
4676466c5ac8SMathieu Olivari }
4677466c5ac8SMathieu Olivari 
4678466c5ac8SMathieu Olivari static void __exit stmmac_exit(void)
4679466c5ac8SMathieu Olivari {
4680466c5ac8SMathieu Olivari #ifdef CONFIG_DEBUG_FS
4681466c5ac8SMathieu Olivari 	debugfs_remove_recursive(stmmac_fs_dir);
4682466c5ac8SMathieu Olivari #endif
4683466c5ac8SMathieu Olivari }
4684466c5ac8SMathieu Olivari 
4685466c5ac8SMathieu Olivari module_init(stmmac_init)
4686466c5ac8SMathieu Olivari module_exit(stmmac_exit)
4687466c5ac8SMathieu Olivari 
46886fc0d0f2SGiuseppe Cavallaro MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
46896fc0d0f2SGiuseppe Cavallaro MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
46906fc0d0f2SGiuseppe Cavallaro MODULE_LICENSE("GPL");
4691