17ac6653aSJeff Kirsher /*******************************************************************************
27ac6653aSJeff Kirsher   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
37ac6653aSJeff Kirsher   ST Ethernet IPs are built around a Synopsys IP Core.
47ac6653aSJeff Kirsher 
5286a8372SGiuseppe CAVALLARO 	Copyright(C) 2007-2011 STMicroelectronics Ltd
67ac6653aSJeff Kirsher 
77ac6653aSJeff Kirsher   This program is free software; you can redistribute it and/or modify it
87ac6653aSJeff Kirsher   under the terms and conditions of the GNU General Public License,
97ac6653aSJeff Kirsher   version 2, as published by the Free Software Foundation.
107ac6653aSJeff Kirsher 
117ac6653aSJeff Kirsher   This program is distributed in the hope it will be useful, but WITHOUT
127ac6653aSJeff Kirsher   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
137ac6653aSJeff Kirsher   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
147ac6653aSJeff Kirsher   more details.
157ac6653aSJeff Kirsher 
167ac6653aSJeff Kirsher   The full GNU General Public License is included in this distribution in
177ac6653aSJeff Kirsher   the file called "COPYING".
187ac6653aSJeff Kirsher 
197ac6653aSJeff Kirsher   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
207ac6653aSJeff Kirsher 
217ac6653aSJeff Kirsher   Documentation available at:
227ac6653aSJeff Kirsher 	http://www.stlinux.com
237ac6653aSJeff Kirsher   Support available at:
247ac6653aSJeff Kirsher 	https://bugzilla.stlinux.com/
257ac6653aSJeff Kirsher *******************************************************************************/
267ac6653aSJeff Kirsher 
276a81c26fSViresh Kumar #include <linux/clk.h>
287ac6653aSJeff Kirsher #include <linux/kernel.h>
297ac6653aSJeff Kirsher #include <linux/interrupt.h>
307ac6653aSJeff Kirsher #include <linux/ip.h>
317ac6653aSJeff Kirsher #include <linux/tcp.h>
327ac6653aSJeff Kirsher #include <linux/skbuff.h>
337ac6653aSJeff Kirsher #include <linux/ethtool.h>
347ac6653aSJeff Kirsher #include <linux/if_ether.h>
357ac6653aSJeff Kirsher #include <linux/crc32.h>
367ac6653aSJeff Kirsher #include <linux/mii.h>
3701789349SJiri Pirko #include <linux/if.h>
387ac6653aSJeff Kirsher #include <linux/if_vlan.h>
397ac6653aSJeff Kirsher #include <linux/dma-mapping.h>
407ac6653aSJeff Kirsher #include <linux/slab.h>
417ac6653aSJeff Kirsher #include <linux/prefetch.h>
42db88f10aSSrinivas Kandagatla #include <linux/pinctrl/consumer.h>
4350fb4f74SGiuseppe CAVALLARO #ifdef CONFIG_DEBUG_FS
447ac29055SGiuseppe CAVALLARO #include <linux/debugfs.h>
457ac29055SGiuseppe CAVALLARO #include <linux/seq_file.h>
4650fb4f74SGiuseppe CAVALLARO #endif /* CONFIG_DEBUG_FS */
47891434b1SRayagond Kokatanur #include <linux/net_tstamp.h>
48eeef2f6bSJose Abreu #include <linux/phylink.h>
494dbbe8ddSJose Abreu #include <net/pkt_cls.h>
50891434b1SRayagond Kokatanur #include "stmmac_ptp.h"
51286a8372SGiuseppe CAVALLARO #include "stmmac.h"
52c5e4ddbdSChen-Yu Tsai #include <linux/reset.h>
535790cf3cSMathieu Olivari #include <linux/of_mdio.h>
5419d857c9SPhil Reid #include "dwmac1000.h"
557d9e6c5aSJose Abreu #include "dwxgmac2.h"
5642de047dSJose Abreu #include "hwif.h"
577ac6653aSJeff Kirsher 
589939a46dSEugeniy Paltsev #define	STMMAC_ALIGN(x)		__ALIGN_KERNEL(x, SMP_CACHE_BYTES)
59f748be53SAlexandre TORGUE #define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
607ac6653aSJeff Kirsher 
617ac6653aSJeff Kirsher /* Module parameters */
6232ceabcaSGiuseppe CAVALLARO #define TX_TIMEO	5000
637ac6653aSJeff Kirsher static int watchdog = TX_TIMEO;
64d3757ba4SJoe Perches module_param(watchdog, int, 0644);
6532ceabcaSGiuseppe CAVALLARO MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
667ac6653aSJeff Kirsher 
6732ceabcaSGiuseppe CAVALLARO static int debug = -1;
68d3757ba4SJoe Perches module_param(debug, int, 0644);
6932ceabcaSGiuseppe CAVALLARO MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
707ac6653aSJeff Kirsher 
7147d1f71fSstephen hemminger static int phyaddr = -1;
72d3757ba4SJoe Perches module_param(phyaddr, int, 0444);
737ac6653aSJeff Kirsher MODULE_PARM_DESC(phyaddr, "Physical device address");
747ac6653aSJeff Kirsher 
75e3ad57c9SGiuseppe Cavallaro #define STMMAC_TX_THRESH	(DMA_TX_SIZE / 4)
76120e87f9SGiuseppe Cavallaro #define STMMAC_RX_THRESH	(DMA_RX_SIZE / 4)
777ac6653aSJeff Kirsher 
78e9989339SJose Abreu static int flow_ctrl = FLOW_AUTO;
79d3757ba4SJoe Perches module_param(flow_ctrl, int, 0644);
807ac6653aSJeff Kirsher MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
817ac6653aSJeff Kirsher 
827ac6653aSJeff Kirsher static int pause = PAUSE_TIME;
83d3757ba4SJoe Perches module_param(pause, int, 0644);
847ac6653aSJeff Kirsher MODULE_PARM_DESC(pause, "Flow Control Pause Time");
857ac6653aSJeff Kirsher 
867ac6653aSJeff Kirsher #define TC_DEFAULT 64
877ac6653aSJeff Kirsher static int tc = TC_DEFAULT;
88d3757ba4SJoe Perches module_param(tc, int, 0644);
897ac6653aSJeff Kirsher MODULE_PARM_DESC(tc, "DMA threshold control value");
907ac6653aSJeff Kirsher 
91d916701cSGiuseppe CAVALLARO #define	DEFAULT_BUFSIZE	1536
92d916701cSGiuseppe CAVALLARO static int buf_sz = DEFAULT_BUFSIZE;
93d3757ba4SJoe Perches module_param(buf_sz, int, 0644);
947ac6653aSJeff Kirsher MODULE_PARM_DESC(buf_sz, "DMA buffer size");
957ac6653aSJeff Kirsher 
9622ad3838SGiuseppe Cavallaro #define	STMMAC_RX_COPYBREAK	256
9722ad3838SGiuseppe Cavallaro 
987ac6653aSJeff Kirsher static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
997ac6653aSJeff Kirsher 				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
1007ac6653aSJeff Kirsher 				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
1017ac6653aSJeff Kirsher 
102d765955dSGiuseppe CAVALLARO #define STMMAC_DEFAULT_LPI_TIMER	1000
103d765955dSGiuseppe CAVALLARO static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
104d3757ba4SJoe Perches module_param(eee_timer, int, 0644);
105d765955dSGiuseppe CAVALLARO MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
106f5351ef7SGiuseppe CAVALLARO #define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
107d765955dSGiuseppe CAVALLARO 
10822d3efe5SPavel Machek /* By default the driver will use the ring mode to manage tx and rx descriptors,
10922d3efe5SPavel Machek  * but allow user to force to use the chain instead of the ring
1104a7d666aSGiuseppe CAVALLARO  */
1114a7d666aSGiuseppe CAVALLARO static unsigned int chain_mode;
112d3757ba4SJoe Perches module_param(chain_mode, int, 0444);
1134a7d666aSGiuseppe CAVALLARO MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
1144a7d666aSGiuseppe CAVALLARO 
1157ac6653aSJeff Kirsher static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
1167ac6653aSJeff Kirsher 
11750fb4f74SGiuseppe CAVALLARO #ifdef CONFIG_DEBUG_FS
118bfab27a1SGiuseppe CAVALLARO static int stmmac_init_fs(struct net_device *dev);
119466c5ac8SMathieu Olivari static void stmmac_exit_fs(struct net_device *dev);
120bfab27a1SGiuseppe CAVALLARO #endif
121bfab27a1SGiuseppe CAVALLARO 
1229125cdd1SGiuseppe CAVALLARO #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
1239125cdd1SGiuseppe CAVALLARO 
1247ac6653aSJeff Kirsher /**
1257ac6653aSJeff Kirsher  * stmmac_verify_args - verify the driver parameters.
126732fdf0eSGiuseppe CAVALLARO  * Description: it checks the driver parameters and set a default in case of
127732fdf0eSGiuseppe CAVALLARO  * errors.
1287ac6653aSJeff Kirsher  */
1297ac6653aSJeff Kirsher static void stmmac_verify_args(void)
1307ac6653aSJeff Kirsher {
1317ac6653aSJeff Kirsher 	if (unlikely(watchdog < 0))
1327ac6653aSJeff Kirsher 		watchdog = TX_TIMEO;
133d916701cSGiuseppe CAVALLARO 	if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
134d916701cSGiuseppe CAVALLARO 		buf_sz = DEFAULT_BUFSIZE;
1357ac6653aSJeff Kirsher 	if (unlikely(flow_ctrl > 1))
1367ac6653aSJeff Kirsher 		flow_ctrl = FLOW_AUTO;
1377ac6653aSJeff Kirsher 	else if (likely(flow_ctrl < 0))
1387ac6653aSJeff Kirsher 		flow_ctrl = FLOW_OFF;
1397ac6653aSJeff Kirsher 	if (unlikely((pause < 0) || (pause > 0xffff)))
1407ac6653aSJeff Kirsher 		pause = PAUSE_TIME;
141d765955dSGiuseppe CAVALLARO 	if (eee_timer < 0)
142d765955dSGiuseppe CAVALLARO 		eee_timer = STMMAC_DEFAULT_LPI_TIMER;
1437ac6653aSJeff Kirsher }
1447ac6653aSJeff Kirsher 
14532ceabcaSGiuseppe CAVALLARO /**
146c22a3f48SJoao Pinto  * stmmac_disable_all_queues - Disable all queues
147c22a3f48SJoao Pinto  * @priv: driver private structure
148c22a3f48SJoao Pinto  */
149c22a3f48SJoao Pinto static void stmmac_disable_all_queues(struct stmmac_priv *priv)
150c22a3f48SJoao Pinto {
151c22a3f48SJoao Pinto 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
1528fce3331SJose Abreu 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
1538fce3331SJose Abreu 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
154c22a3f48SJoao Pinto 	u32 queue;
155c22a3f48SJoao Pinto 
1568fce3331SJose Abreu 	for (queue = 0; queue < maxq; queue++) {
1578fce3331SJose Abreu 		struct stmmac_channel *ch = &priv->channel[queue];
158c22a3f48SJoao Pinto 
1594ccb4585SJose Abreu 		if (queue < rx_queues_cnt)
1604ccb4585SJose Abreu 			napi_disable(&ch->rx_napi);
1614ccb4585SJose Abreu 		if (queue < tx_queues_cnt)
1624ccb4585SJose Abreu 			napi_disable(&ch->tx_napi);
163c22a3f48SJoao Pinto 	}
164c22a3f48SJoao Pinto }
165c22a3f48SJoao Pinto 
166c22a3f48SJoao Pinto /**
167c22a3f48SJoao Pinto  * stmmac_enable_all_queues - Enable all queues
168c22a3f48SJoao Pinto  * @priv: driver private structure
169c22a3f48SJoao Pinto  */
170c22a3f48SJoao Pinto static void stmmac_enable_all_queues(struct stmmac_priv *priv)
171c22a3f48SJoao Pinto {
172c22a3f48SJoao Pinto 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
1738fce3331SJose Abreu 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
1748fce3331SJose Abreu 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
175c22a3f48SJoao Pinto 	u32 queue;
176c22a3f48SJoao Pinto 
1778fce3331SJose Abreu 	for (queue = 0; queue < maxq; queue++) {
1788fce3331SJose Abreu 		struct stmmac_channel *ch = &priv->channel[queue];
179c22a3f48SJoao Pinto 
1804ccb4585SJose Abreu 		if (queue < rx_queues_cnt)
1814ccb4585SJose Abreu 			napi_enable(&ch->rx_napi);
1824ccb4585SJose Abreu 		if (queue < tx_queues_cnt)
1834ccb4585SJose Abreu 			napi_enable(&ch->tx_napi);
184c22a3f48SJoao Pinto 	}
185c22a3f48SJoao Pinto }
186c22a3f48SJoao Pinto 
187c22a3f48SJoao Pinto /**
188c22a3f48SJoao Pinto  * stmmac_stop_all_queues - Stop all queues
189c22a3f48SJoao Pinto  * @priv: driver private structure
190c22a3f48SJoao Pinto  */
191c22a3f48SJoao Pinto static void stmmac_stop_all_queues(struct stmmac_priv *priv)
192c22a3f48SJoao Pinto {
193c22a3f48SJoao Pinto 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
194c22a3f48SJoao Pinto 	u32 queue;
195c22a3f48SJoao Pinto 
196c22a3f48SJoao Pinto 	for (queue = 0; queue < tx_queues_cnt; queue++)
197c22a3f48SJoao Pinto 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
198c22a3f48SJoao Pinto }
199c22a3f48SJoao Pinto 
200c22a3f48SJoao Pinto /**
201c22a3f48SJoao Pinto  * stmmac_start_all_queues - Start all queues
202c22a3f48SJoao Pinto  * @priv: driver private structure
203c22a3f48SJoao Pinto  */
204c22a3f48SJoao Pinto static void stmmac_start_all_queues(struct stmmac_priv *priv)
205c22a3f48SJoao Pinto {
206c22a3f48SJoao Pinto 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
207c22a3f48SJoao Pinto 	u32 queue;
208c22a3f48SJoao Pinto 
209c22a3f48SJoao Pinto 	for (queue = 0; queue < tx_queues_cnt; queue++)
210c22a3f48SJoao Pinto 		netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue));
211c22a3f48SJoao Pinto }
212c22a3f48SJoao Pinto 
21334877a15SJose Abreu static void stmmac_service_event_schedule(struct stmmac_priv *priv)
21434877a15SJose Abreu {
21534877a15SJose Abreu 	if (!test_bit(STMMAC_DOWN, &priv->state) &&
21634877a15SJose Abreu 	    !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
21734877a15SJose Abreu 		queue_work(priv->wq, &priv->service_task);
21834877a15SJose Abreu }
21934877a15SJose Abreu 
22034877a15SJose Abreu static void stmmac_global_err(struct stmmac_priv *priv)
22134877a15SJose Abreu {
22234877a15SJose Abreu 	netif_carrier_off(priv->dev);
22334877a15SJose Abreu 	set_bit(STMMAC_RESET_REQUESTED, &priv->state);
22434877a15SJose Abreu 	stmmac_service_event_schedule(priv);
22534877a15SJose Abreu }
22634877a15SJose Abreu 
227c22a3f48SJoao Pinto /**
22832ceabcaSGiuseppe CAVALLARO  * stmmac_clk_csr_set - dynamically set the MDC clock
22932ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
23032ceabcaSGiuseppe CAVALLARO  * Description: this is to dynamically set the MDC clock according to the csr
23132ceabcaSGiuseppe CAVALLARO  * clock input.
23232ceabcaSGiuseppe CAVALLARO  * Note:
23332ceabcaSGiuseppe CAVALLARO  *	If a specific clk_csr value is passed from the platform
23432ceabcaSGiuseppe CAVALLARO  *	this means that the CSR Clock Range selection cannot be
23532ceabcaSGiuseppe CAVALLARO  *	changed at run-time and it is fixed (as reported in the driver
23632ceabcaSGiuseppe CAVALLARO  *	documentation). Viceversa the driver will try to set the MDC
23732ceabcaSGiuseppe CAVALLARO  *	clock dynamically according to the actual clock input.
23832ceabcaSGiuseppe CAVALLARO  */
239cd7201f4SGiuseppe CAVALLARO static void stmmac_clk_csr_set(struct stmmac_priv *priv)
240cd7201f4SGiuseppe CAVALLARO {
241cd7201f4SGiuseppe CAVALLARO 	u32 clk_rate;
242cd7201f4SGiuseppe CAVALLARO 
243f573c0b9Sjpinto 	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
244cd7201f4SGiuseppe CAVALLARO 
245cd7201f4SGiuseppe CAVALLARO 	/* Platform provided default clk_csr would be assumed valid
246ceb69499SGiuseppe CAVALLARO 	 * for all other cases except for the below mentioned ones.
247ceb69499SGiuseppe CAVALLARO 	 * For values higher than the IEEE 802.3 specified frequency
248ceb69499SGiuseppe CAVALLARO 	 * we can not estimate the proper divider as it is not known
249ceb69499SGiuseppe CAVALLARO 	 * the frequency of clk_csr_i. So we do not change the default
250ceb69499SGiuseppe CAVALLARO 	 * divider.
251ceb69499SGiuseppe CAVALLARO 	 */
252cd7201f4SGiuseppe CAVALLARO 	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
253cd7201f4SGiuseppe CAVALLARO 		if (clk_rate < CSR_F_35M)
254cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_20_35M;
255cd7201f4SGiuseppe CAVALLARO 		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
256cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_35_60M;
257cd7201f4SGiuseppe CAVALLARO 		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
258cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_60_100M;
259cd7201f4SGiuseppe CAVALLARO 		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
260cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_100_150M;
261cd7201f4SGiuseppe CAVALLARO 		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
262cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_150_250M;
26319d857c9SPhil Reid 		else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
264cd7201f4SGiuseppe CAVALLARO 			priv->clk_csr = STMMAC_CSR_250_300M;
265ceb69499SGiuseppe CAVALLARO 	}
2669f93ac8dSLABBE Corentin 
2679f93ac8dSLABBE Corentin 	if (priv->plat->has_sun8i) {
2689f93ac8dSLABBE Corentin 		if (clk_rate > 160000000)
2699f93ac8dSLABBE Corentin 			priv->clk_csr = 0x03;
2709f93ac8dSLABBE Corentin 		else if (clk_rate > 80000000)
2719f93ac8dSLABBE Corentin 			priv->clk_csr = 0x02;
2729f93ac8dSLABBE Corentin 		else if (clk_rate > 40000000)
2739f93ac8dSLABBE Corentin 			priv->clk_csr = 0x01;
2749f93ac8dSLABBE Corentin 		else
2759f93ac8dSLABBE Corentin 			priv->clk_csr = 0;
2769f93ac8dSLABBE Corentin 	}
2777d9e6c5aSJose Abreu 
2787d9e6c5aSJose Abreu 	if (priv->plat->has_xgmac) {
2797d9e6c5aSJose Abreu 		if (clk_rate > 400000000)
2807d9e6c5aSJose Abreu 			priv->clk_csr = 0x5;
2817d9e6c5aSJose Abreu 		else if (clk_rate > 350000000)
2827d9e6c5aSJose Abreu 			priv->clk_csr = 0x4;
2837d9e6c5aSJose Abreu 		else if (clk_rate > 300000000)
2847d9e6c5aSJose Abreu 			priv->clk_csr = 0x3;
2857d9e6c5aSJose Abreu 		else if (clk_rate > 250000000)
2867d9e6c5aSJose Abreu 			priv->clk_csr = 0x2;
2877d9e6c5aSJose Abreu 		else if (clk_rate > 150000000)
2887d9e6c5aSJose Abreu 			priv->clk_csr = 0x1;
2897d9e6c5aSJose Abreu 		else
2907d9e6c5aSJose Abreu 			priv->clk_csr = 0x0;
2917d9e6c5aSJose Abreu 	}
292cd7201f4SGiuseppe CAVALLARO }
293cd7201f4SGiuseppe CAVALLARO 
2947ac6653aSJeff Kirsher static void print_pkt(unsigned char *buf, int len)
2957ac6653aSJeff Kirsher {
296424c4f78SAndy Shevchenko 	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
297424c4f78SAndy Shevchenko 	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
2987ac6653aSJeff Kirsher }
2997ac6653aSJeff Kirsher 
300ce736788SJoao Pinto static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
3017ac6653aSJeff Kirsher {
302ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
303a6a3e026SLABBE Corentin 	u32 avail;
304e3ad57c9SGiuseppe Cavallaro 
305ce736788SJoao Pinto 	if (tx_q->dirty_tx > tx_q->cur_tx)
306ce736788SJoao Pinto 		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
307e3ad57c9SGiuseppe Cavallaro 	else
308ce736788SJoao Pinto 		avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1;
309e3ad57c9SGiuseppe Cavallaro 
310e3ad57c9SGiuseppe Cavallaro 	return avail;
311e3ad57c9SGiuseppe Cavallaro }
312e3ad57c9SGiuseppe Cavallaro 
31354139cf3SJoao Pinto /**
31454139cf3SJoao Pinto  * stmmac_rx_dirty - Get RX queue dirty
31554139cf3SJoao Pinto  * @priv: driver private structure
31654139cf3SJoao Pinto  * @queue: RX queue index
31754139cf3SJoao Pinto  */
31854139cf3SJoao Pinto static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
319e3ad57c9SGiuseppe Cavallaro {
32054139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
321a6a3e026SLABBE Corentin 	u32 dirty;
322e3ad57c9SGiuseppe Cavallaro 
32354139cf3SJoao Pinto 	if (rx_q->dirty_rx <= rx_q->cur_rx)
32454139cf3SJoao Pinto 		dirty = rx_q->cur_rx - rx_q->dirty_rx;
325e3ad57c9SGiuseppe Cavallaro 	else
32654139cf3SJoao Pinto 		dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx;
327e3ad57c9SGiuseppe Cavallaro 
328e3ad57c9SGiuseppe Cavallaro 	return dirty;
3297ac6653aSJeff Kirsher }
3307ac6653aSJeff Kirsher 
33132ceabcaSGiuseppe CAVALLARO /**
332732fdf0eSGiuseppe CAVALLARO  * stmmac_hw_fix_mac_speed - callback for speed selection
33332ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
3348d45e42bSLABBE Corentin  * Description: on some platforms (e.g. ST), some HW system configuration
33532ceabcaSGiuseppe CAVALLARO  * registers have to be set according to the link speed negotiated.
3367ac6653aSJeff Kirsher  */
3377ac6653aSJeff Kirsher static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
3387ac6653aSJeff Kirsher {
339d6d50c7eSPhilippe Reynes 	struct net_device *ndev = priv->dev;
340d6d50c7eSPhilippe Reynes 	struct phy_device *phydev = ndev->phydev;
3417ac6653aSJeff Kirsher 
3427ac6653aSJeff Kirsher 	if (likely(priv->plat->fix_mac_speed))
343ceb69499SGiuseppe CAVALLARO 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed);
3447ac6653aSJeff Kirsher }
3457ac6653aSJeff Kirsher 
34632ceabcaSGiuseppe CAVALLARO /**
347732fdf0eSGiuseppe CAVALLARO  * stmmac_enable_eee_mode - check and enter in LPI mode
34832ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
349732fdf0eSGiuseppe CAVALLARO  * Description: this function is to verify and enter in LPI mode in case of
350732fdf0eSGiuseppe CAVALLARO  * EEE.
35132ceabcaSGiuseppe CAVALLARO  */
352d765955dSGiuseppe CAVALLARO static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
353d765955dSGiuseppe CAVALLARO {
354ce736788SJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
355ce736788SJoao Pinto 	u32 queue;
356ce736788SJoao Pinto 
357ce736788SJoao Pinto 	/* check if all TX queues have the work finished */
358ce736788SJoao Pinto 	for (queue = 0; queue < tx_cnt; queue++) {
359ce736788SJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
360ce736788SJoao Pinto 
361ce736788SJoao Pinto 		if (tx_q->dirty_tx != tx_q->cur_tx)
362ce736788SJoao Pinto 			return; /* still unfinished work */
363ce736788SJoao Pinto 	}
364ce736788SJoao Pinto 
365d765955dSGiuseppe CAVALLARO 	/* Check and enter in LPI mode */
366ce736788SJoao Pinto 	if (!priv->tx_path_in_lpi_mode)
367c10d4c82SJose Abreu 		stmmac_set_eee_mode(priv, priv->hw,
368b4b7b772Sjpinto 				priv->plat->en_tx_lpi_clockgating);
369d765955dSGiuseppe CAVALLARO }
370d765955dSGiuseppe CAVALLARO 
37132ceabcaSGiuseppe CAVALLARO /**
372732fdf0eSGiuseppe CAVALLARO  * stmmac_disable_eee_mode - disable and exit from LPI mode
37332ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
37432ceabcaSGiuseppe CAVALLARO  * Description: this function is to exit and disable EEE in case of
37532ceabcaSGiuseppe CAVALLARO  * LPI state is true. This is called by the xmit.
37632ceabcaSGiuseppe CAVALLARO  */
377d765955dSGiuseppe CAVALLARO void stmmac_disable_eee_mode(struct stmmac_priv *priv)
378d765955dSGiuseppe CAVALLARO {
379c10d4c82SJose Abreu 	stmmac_reset_eee_mode(priv, priv->hw);
380d765955dSGiuseppe CAVALLARO 	del_timer_sync(&priv->eee_ctrl_timer);
381d765955dSGiuseppe CAVALLARO 	priv->tx_path_in_lpi_mode = false;
382d765955dSGiuseppe CAVALLARO }
383d765955dSGiuseppe CAVALLARO 
384d765955dSGiuseppe CAVALLARO /**
385732fdf0eSGiuseppe CAVALLARO  * stmmac_eee_ctrl_timer - EEE TX SW timer.
386d765955dSGiuseppe CAVALLARO  * @arg : data hook
387d765955dSGiuseppe CAVALLARO  * Description:
38832ceabcaSGiuseppe CAVALLARO  *  if there is no data transfer and if we are not in LPI state,
389d765955dSGiuseppe CAVALLARO  *  then MAC Transmitter can be moved to LPI state.
390d765955dSGiuseppe CAVALLARO  */
391e99e88a9SKees Cook static void stmmac_eee_ctrl_timer(struct timer_list *t)
392d765955dSGiuseppe CAVALLARO {
393e99e88a9SKees Cook 	struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
394d765955dSGiuseppe CAVALLARO 
395d765955dSGiuseppe CAVALLARO 	stmmac_enable_eee_mode(priv);
396f5351ef7SGiuseppe CAVALLARO 	mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
397d765955dSGiuseppe CAVALLARO }
398d765955dSGiuseppe CAVALLARO 
399d765955dSGiuseppe CAVALLARO /**
400732fdf0eSGiuseppe CAVALLARO  * stmmac_eee_init - init EEE
40132ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
402d765955dSGiuseppe CAVALLARO  * Description:
403732fdf0eSGiuseppe CAVALLARO  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
404732fdf0eSGiuseppe CAVALLARO  *  can also manage EEE, this function enable the LPI state and start related
405732fdf0eSGiuseppe CAVALLARO  *  timer.
406d765955dSGiuseppe CAVALLARO  */
407d765955dSGiuseppe CAVALLARO bool stmmac_eee_init(struct stmmac_priv *priv)
408d765955dSGiuseppe CAVALLARO {
409d6d50c7eSPhilippe Reynes 	struct net_device *ndev = priv->dev;
410879626e3SJerome Brunet 	int interface = priv->plat->interface;
411d765955dSGiuseppe CAVALLARO 	bool ret = false;
412d765955dSGiuseppe CAVALLARO 
413879626e3SJerome Brunet 	if ((interface != PHY_INTERFACE_MODE_MII) &&
414879626e3SJerome Brunet 	    (interface != PHY_INTERFACE_MODE_GMII) &&
415879626e3SJerome Brunet 	    !phy_interface_mode_is_rgmii(interface))
416879626e3SJerome Brunet 		goto out;
417879626e3SJerome Brunet 
418f5351ef7SGiuseppe CAVALLARO 	/* Using PCS we cannot dial with the phy registers at this stage
419f5351ef7SGiuseppe CAVALLARO 	 * so we do not support extra feature like EEE.
420f5351ef7SGiuseppe CAVALLARO 	 */
4213fe5cadbSGiuseppe CAVALLARO 	if ((priv->hw->pcs == STMMAC_PCS_RGMII) ||
4223fe5cadbSGiuseppe CAVALLARO 	    (priv->hw->pcs == STMMAC_PCS_TBI) ||
4233fe5cadbSGiuseppe CAVALLARO 	    (priv->hw->pcs == STMMAC_PCS_RTBI))
424f5351ef7SGiuseppe CAVALLARO 		goto out;
425f5351ef7SGiuseppe CAVALLARO 
426d765955dSGiuseppe CAVALLARO 	/* MAC core supports the EEE feature. */
427d765955dSGiuseppe CAVALLARO 	if (priv->dma_cap.eee) {
42883bf79b6SGiuseppe CAVALLARO 		int tx_lpi_timer = priv->tx_lpi_timer;
429d765955dSGiuseppe CAVALLARO 
43083bf79b6SGiuseppe CAVALLARO 		/* Check if the PHY supports EEE */
431d6d50c7eSPhilippe Reynes 		if (phy_init_eee(ndev->phydev, 1)) {
43283bf79b6SGiuseppe CAVALLARO 			/* To manage at run-time if the EEE cannot be supported
43383bf79b6SGiuseppe CAVALLARO 			 * anymore (for example because the lp caps have been
43483bf79b6SGiuseppe CAVALLARO 			 * changed).
43583bf79b6SGiuseppe CAVALLARO 			 * In that case the driver disable own timers.
43683bf79b6SGiuseppe CAVALLARO 			 */
43729555fa3SThierry Reding 			mutex_lock(&priv->lock);
43883bf79b6SGiuseppe CAVALLARO 			if (priv->eee_active) {
43938ddc59dSLABBE Corentin 				netdev_dbg(priv->dev, "disable EEE\n");
44083bf79b6SGiuseppe CAVALLARO 				del_timer_sync(&priv->eee_ctrl_timer);
441c10d4c82SJose Abreu 				stmmac_set_eee_timer(priv, priv->hw, 0,
44283bf79b6SGiuseppe CAVALLARO 						tx_lpi_timer);
44383bf79b6SGiuseppe CAVALLARO 			}
44483bf79b6SGiuseppe CAVALLARO 			priv->eee_active = 0;
44529555fa3SThierry Reding 			mutex_unlock(&priv->lock);
44683bf79b6SGiuseppe CAVALLARO 			goto out;
44783bf79b6SGiuseppe CAVALLARO 		}
44883bf79b6SGiuseppe CAVALLARO 		/* Activate the EEE and start timers */
44929555fa3SThierry Reding 		mutex_lock(&priv->lock);
450f5351ef7SGiuseppe CAVALLARO 		if (!priv->eee_active) {
451d765955dSGiuseppe CAVALLARO 			priv->eee_active = 1;
452e99e88a9SKees Cook 			timer_setup(&priv->eee_ctrl_timer,
453e99e88a9SKees Cook 				    stmmac_eee_ctrl_timer, 0);
454ccb36da1SVaishali Thakkar 			mod_timer(&priv->eee_ctrl_timer,
455ccb36da1SVaishali Thakkar 				  STMMAC_LPI_T(eee_timer));
456d765955dSGiuseppe CAVALLARO 
457c10d4c82SJose Abreu 			stmmac_set_eee_timer(priv, priv->hw,
458c10d4c82SJose Abreu 					STMMAC_DEFAULT_LIT_LS, tx_lpi_timer);
45971965352SGiuseppe CAVALLARO 		}
460f5351ef7SGiuseppe CAVALLARO 		/* Set HW EEE according to the speed */
461c10d4c82SJose Abreu 		stmmac_set_eee_pls(priv, priv->hw, ndev->phydev->link);
462d765955dSGiuseppe CAVALLARO 
463d765955dSGiuseppe CAVALLARO 		ret = true;
46429555fa3SThierry Reding 		mutex_unlock(&priv->lock);
4654741cf9cSGiuseppe CAVALLARO 
46638ddc59dSLABBE Corentin 		netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
467d765955dSGiuseppe CAVALLARO 	}
468d765955dSGiuseppe CAVALLARO out:
469d765955dSGiuseppe CAVALLARO 	return ret;
470d765955dSGiuseppe CAVALLARO }
471d765955dSGiuseppe CAVALLARO 
472732fdf0eSGiuseppe CAVALLARO /* stmmac_get_tx_hwtstamp - get HW TX timestamps
47332ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
474ba1ffd74SGiuseppe CAVALLARO  * @p : descriptor pointer
475891434b1SRayagond Kokatanur  * @skb : the socket buffer
476891434b1SRayagond Kokatanur  * Description :
477891434b1SRayagond Kokatanur  * This function will read timestamp from the descriptor & pass it to stack.
478891434b1SRayagond Kokatanur  * and also perform some sanity checks.
479891434b1SRayagond Kokatanur  */
480891434b1SRayagond Kokatanur static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
481ba1ffd74SGiuseppe CAVALLARO 				   struct dma_desc *p, struct sk_buff *skb)
482891434b1SRayagond Kokatanur {
483891434b1SRayagond Kokatanur 	struct skb_shared_hwtstamps shhwtstamp;
484df103170SNathan Chancellor 	u64 ns = 0;
485891434b1SRayagond Kokatanur 
486891434b1SRayagond Kokatanur 	if (!priv->hwts_tx_en)
487891434b1SRayagond Kokatanur 		return;
488891434b1SRayagond Kokatanur 
489ceb69499SGiuseppe CAVALLARO 	/* exit if skb doesn't support hw tstamp */
49075e4364fSdamuzi000 	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
491891434b1SRayagond Kokatanur 		return;
492891434b1SRayagond Kokatanur 
493891434b1SRayagond Kokatanur 	/* check tx tstamp status */
49442de047dSJose Abreu 	if (stmmac_get_tx_timestamp_status(priv, p)) {
495891434b1SRayagond Kokatanur 		/* get the valid tstamp */
49642de047dSJose Abreu 		stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
497891434b1SRayagond Kokatanur 
498891434b1SRayagond Kokatanur 		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
499891434b1SRayagond Kokatanur 		shhwtstamp.hwtstamp = ns_to_ktime(ns);
500ba1ffd74SGiuseppe CAVALLARO 
50133d4c482SMario Molitor 		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
502891434b1SRayagond Kokatanur 		/* pass tstamp to stack */
503891434b1SRayagond Kokatanur 		skb_tstamp_tx(skb, &shhwtstamp);
504ba1ffd74SGiuseppe CAVALLARO 	}
505891434b1SRayagond Kokatanur 
506891434b1SRayagond Kokatanur 	return;
507891434b1SRayagond Kokatanur }
508891434b1SRayagond Kokatanur 
509732fdf0eSGiuseppe CAVALLARO /* stmmac_get_rx_hwtstamp - get HW RX timestamps
51032ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
511ba1ffd74SGiuseppe CAVALLARO  * @p : descriptor pointer
512ba1ffd74SGiuseppe CAVALLARO  * @np : next descriptor pointer
513891434b1SRayagond Kokatanur  * @skb : the socket buffer
514891434b1SRayagond Kokatanur  * Description :
515891434b1SRayagond Kokatanur  * This function will read received packet's timestamp from the descriptor
516891434b1SRayagond Kokatanur  * and pass it to stack. It also perform some sanity checks.
517891434b1SRayagond Kokatanur  */
518ba1ffd74SGiuseppe CAVALLARO static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
519ba1ffd74SGiuseppe CAVALLARO 				   struct dma_desc *np, struct sk_buff *skb)
520891434b1SRayagond Kokatanur {
521891434b1SRayagond Kokatanur 	struct skb_shared_hwtstamps *shhwtstamp = NULL;
52298870943SJose Abreu 	struct dma_desc *desc = p;
523df103170SNathan Chancellor 	u64 ns = 0;
524891434b1SRayagond Kokatanur 
525891434b1SRayagond Kokatanur 	if (!priv->hwts_rx_en)
526891434b1SRayagond Kokatanur 		return;
527ba1ffd74SGiuseppe CAVALLARO 	/* For GMAC4, the valid timestamp is from CTX next desc. */
5287d9e6c5aSJose Abreu 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
52998870943SJose Abreu 		desc = np;
530891434b1SRayagond Kokatanur 
53198870943SJose Abreu 	/* Check if timestamp is available */
53242de047dSJose Abreu 	if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
53342de047dSJose Abreu 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
53433d4c482SMario Molitor 		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
535891434b1SRayagond Kokatanur 		shhwtstamp = skb_hwtstamps(skb);
536891434b1SRayagond Kokatanur 		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
537891434b1SRayagond Kokatanur 		shhwtstamp->hwtstamp = ns_to_ktime(ns);
538ba1ffd74SGiuseppe CAVALLARO 	} else  {
53933d4c482SMario Molitor 		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
540ba1ffd74SGiuseppe CAVALLARO 	}
541891434b1SRayagond Kokatanur }
542891434b1SRayagond Kokatanur 
543891434b1SRayagond Kokatanur /**
544d6228b7cSArtem Panfilov  *  stmmac_hwtstamp_set - control hardware timestamping.
545891434b1SRayagond Kokatanur  *  @dev: device pointer.
5468d45e42bSLABBE Corentin  *  @ifr: An IOCTL specific structure, that can contain a pointer to
547891434b1SRayagond Kokatanur  *  a proprietary structure used to pass information to the driver.
548891434b1SRayagond Kokatanur  *  Description:
549891434b1SRayagond Kokatanur  *  This function configures the MAC to enable/disable both outgoing(TX)
550891434b1SRayagond Kokatanur  *  and incoming(RX) packets time stamping based on user input.
551891434b1SRayagond Kokatanur  *  Return Value:
552891434b1SRayagond Kokatanur  *  0 on success and an appropriate -ve integer on failure.
553891434b1SRayagond Kokatanur  */
554d6228b7cSArtem Panfilov static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
555891434b1SRayagond Kokatanur {
556891434b1SRayagond Kokatanur 	struct stmmac_priv *priv = netdev_priv(dev);
557891434b1SRayagond Kokatanur 	struct hwtstamp_config config;
5580a624155SArnd Bergmann 	struct timespec64 now;
559891434b1SRayagond Kokatanur 	u64 temp = 0;
560891434b1SRayagond Kokatanur 	u32 ptp_v2 = 0;
561891434b1SRayagond Kokatanur 	u32 tstamp_all = 0;
562891434b1SRayagond Kokatanur 	u32 ptp_over_ipv4_udp = 0;
563891434b1SRayagond Kokatanur 	u32 ptp_over_ipv6_udp = 0;
564891434b1SRayagond Kokatanur 	u32 ptp_over_ethernet = 0;
565891434b1SRayagond Kokatanur 	u32 snap_type_sel = 0;
566891434b1SRayagond Kokatanur 	u32 ts_master_en = 0;
567891434b1SRayagond Kokatanur 	u32 ts_event_en = 0;
568df103170SNathan Chancellor 	u32 sec_inc = 0;
569891434b1SRayagond Kokatanur 	u32 value = 0;
5707d9e6c5aSJose Abreu 	bool xmac;
5717d9e6c5aSJose Abreu 
5727d9e6c5aSJose Abreu 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
573891434b1SRayagond Kokatanur 
574891434b1SRayagond Kokatanur 	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
575891434b1SRayagond Kokatanur 		netdev_alert(priv->dev, "No support for HW time stamping\n");
576891434b1SRayagond Kokatanur 		priv->hwts_tx_en = 0;
577891434b1SRayagond Kokatanur 		priv->hwts_rx_en = 0;
578891434b1SRayagond Kokatanur 
579891434b1SRayagond Kokatanur 		return -EOPNOTSUPP;
580891434b1SRayagond Kokatanur 	}
581891434b1SRayagond Kokatanur 
582891434b1SRayagond Kokatanur 	if (copy_from_user(&config, ifr->ifr_data,
583d6228b7cSArtem Panfilov 			   sizeof(config)))
584891434b1SRayagond Kokatanur 		return -EFAULT;
585891434b1SRayagond Kokatanur 
58638ddc59dSLABBE Corentin 	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
587891434b1SRayagond Kokatanur 		   __func__, config.flags, config.tx_type, config.rx_filter);
588891434b1SRayagond Kokatanur 
589891434b1SRayagond Kokatanur 	/* reserved for future extensions */
590891434b1SRayagond Kokatanur 	if (config.flags)
591891434b1SRayagond Kokatanur 		return -EINVAL;
592891434b1SRayagond Kokatanur 
5935f3da328SBen Hutchings 	if (config.tx_type != HWTSTAMP_TX_OFF &&
5945f3da328SBen Hutchings 	    config.tx_type != HWTSTAMP_TX_ON)
595891434b1SRayagond Kokatanur 		return -ERANGE;
596891434b1SRayagond Kokatanur 
597891434b1SRayagond Kokatanur 	if (priv->adv_ts) {
598891434b1SRayagond Kokatanur 		switch (config.rx_filter) {
599891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_NONE:
600ceb69499SGiuseppe CAVALLARO 			/* time stamp no incoming packet at all */
601891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_NONE;
602891434b1SRayagond Kokatanur 			break;
603891434b1SRayagond Kokatanur 
604891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
605ceb69499SGiuseppe CAVALLARO 			/* PTP v1, UDP, any kind of event packet */
606891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
6077d8e249fSIlias Apalodimas 			/* 'xmac' hardware can support Sync, Pdelay_Req and
6087d8e249fSIlias Apalodimas 			 * Pdelay_resp by setting bit14 and bits17/16 to 01
6097d8e249fSIlias Apalodimas 			 * This leaves Delay_Req timestamps out.
6107d8e249fSIlias Apalodimas 			 * Enable all events *and* general purpose message
6117d8e249fSIlias Apalodimas 			 * timestamping
6127d8e249fSIlias Apalodimas 			 */
613891434b1SRayagond Kokatanur 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
614891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
615891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
616891434b1SRayagond Kokatanur 			break;
617891434b1SRayagond Kokatanur 
618891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
619ceb69499SGiuseppe CAVALLARO 			/* PTP v1, UDP, Sync packet */
620891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
621891434b1SRayagond Kokatanur 			/* take time stamp for SYNC messages only */
622891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
623891434b1SRayagond Kokatanur 
624891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
625891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
626891434b1SRayagond Kokatanur 			break;
627891434b1SRayagond Kokatanur 
628891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
629ceb69499SGiuseppe CAVALLARO 			/* PTP v1, UDP, Delay_req packet */
630891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
631891434b1SRayagond Kokatanur 			/* take time stamp for Delay_Req messages only */
632891434b1SRayagond Kokatanur 			ts_master_en = PTP_TCR_TSMSTRENA;
633891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
634891434b1SRayagond Kokatanur 
635891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
636891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
637891434b1SRayagond Kokatanur 			break;
638891434b1SRayagond Kokatanur 
639891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
640ceb69499SGiuseppe CAVALLARO 			/* PTP v2, UDP, any kind of event packet */
641891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
642891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
643891434b1SRayagond Kokatanur 			/* take time stamp for all event messages */
644891434b1SRayagond Kokatanur 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
645891434b1SRayagond Kokatanur 
646891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
647891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
648891434b1SRayagond Kokatanur 			break;
649891434b1SRayagond Kokatanur 
650891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
651ceb69499SGiuseppe CAVALLARO 			/* PTP v2, UDP, Sync packet */
652891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
653891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
654891434b1SRayagond Kokatanur 			/* take time stamp for SYNC messages only */
655891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
656891434b1SRayagond Kokatanur 
657891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
658891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
659891434b1SRayagond Kokatanur 			break;
660891434b1SRayagond Kokatanur 
661891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
662ceb69499SGiuseppe CAVALLARO 			/* PTP v2, UDP, Delay_req packet */
663891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
664891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
665891434b1SRayagond Kokatanur 			/* take time stamp for Delay_Req messages only */
666891434b1SRayagond Kokatanur 			ts_master_en = PTP_TCR_TSMSTRENA;
667891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
668891434b1SRayagond Kokatanur 
669891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
670891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
671891434b1SRayagond Kokatanur 			break;
672891434b1SRayagond Kokatanur 
673891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_EVENT:
674ceb69499SGiuseppe CAVALLARO 			/* PTP v2/802.AS1 any layer, any kind of event packet */
675891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
676891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
677891434b1SRayagond Kokatanur 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
678891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
679891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
680891434b1SRayagond Kokatanur 			ptp_over_ethernet = PTP_TCR_TSIPENA;
681891434b1SRayagond Kokatanur 			break;
682891434b1SRayagond Kokatanur 
683891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_SYNC:
684ceb69499SGiuseppe CAVALLARO 			/* PTP v2/802.AS1, any layer, Sync packet */
685891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
686891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
687891434b1SRayagond Kokatanur 			/* take time stamp for SYNC messages only */
688891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
689891434b1SRayagond Kokatanur 
690891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
691891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
692891434b1SRayagond Kokatanur 			ptp_over_ethernet = PTP_TCR_TSIPENA;
693891434b1SRayagond Kokatanur 			break;
694891434b1SRayagond Kokatanur 
695891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
696ceb69499SGiuseppe CAVALLARO 			/* PTP v2/802.AS1, any layer, Delay_req packet */
697891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
698891434b1SRayagond Kokatanur 			ptp_v2 = PTP_TCR_TSVER2ENA;
699891434b1SRayagond Kokatanur 			/* take time stamp for Delay_Req messages only */
700891434b1SRayagond Kokatanur 			ts_master_en = PTP_TCR_TSMSTRENA;
701891434b1SRayagond Kokatanur 			ts_event_en = PTP_TCR_TSEVNTENA;
702891434b1SRayagond Kokatanur 
703891434b1SRayagond Kokatanur 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
704891434b1SRayagond Kokatanur 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
705891434b1SRayagond Kokatanur 			ptp_over_ethernet = PTP_TCR_TSIPENA;
706891434b1SRayagond Kokatanur 			break;
707891434b1SRayagond Kokatanur 
708e3412575SMiroslav Lichvar 		case HWTSTAMP_FILTER_NTP_ALL:
709891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_ALL:
710ceb69499SGiuseppe CAVALLARO 			/* time stamp any incoming packet */
711891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_ALL;
712891434b1SRayagond Kokatanur 			tstamp_all = PTP_TCR_TSENALL;
713891434b1SRayagond Kokatanur 			break;
714891434b1SRayagond Kokatanur 
715891434b1SRayagond Kokatanur 		default:
716891434b1SRayagond Kokatanur 			return -ERANGE;
717891434b1SRayagond Kokatanur 		}
718891434b1SRayagond Kokatanur 	} else {
719891434b1SRayagond Kokatanur 		switch (config.rx_filter) {
720891434b1SRayagond Kokatanur 		case HWTSTAMP_FILTER_NONE:
721891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_NONE;
722891434b1SRayagond Kokatanur 			break;
723891434b1SRayagond Kokatanur 		default:
724891434b1SRayagond Kokatanur 			/* PTP v1, UDP, any kind of event packet */
725891434b1SRayagond Kokatanur 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
726891434b1SRayagond Kokatanur 			break;
727891434b1SRayagond Kokatanur 		}
728891434b1SRayagond Kokatanur 	}
729891434b1SRayagond Kokatanur 	priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
7305f3da328SBen Hutchings 	priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
731891434b1SRayagond Kokatanur 
732891434b1SRayagond Kokatanur 	if (!priv->hwts_tx_en && !priv->hwts_rx_en)
733cc4c9001SJose Abreu 		stmmac_config_hw_tstamping(priv, priv->ptpaddr, 0);
734891434b1SRayagond Kokatanur 	else {
735891434b1SRayagond Kokatanur 		value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
736891434b1SRayagond Kokatanur 			 tstamp_all | ptp_v2 | ptp_over_ethernet |
737891434b1SRayagond Kokatanur 			 ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
738891434b1SRayagond Kokatanur 			 ts_master_en | snap_type_sel);
739cc4c9001SJose Abreu 		stmmac_config_hw_tstamping(priv, priv->ptpaddr, value);
740891434b1SRayagond Kokatanur 
741891434b1SRayagond Kokatanur 		/* program Sub Second Increment reg */
742cc4c9001SJose Abreu 		stmmac_config_sub_second_increment(priv,
743f573c0b9Sjpinto 				priv->ptpaddr, priv->plat->clk_ptp_rate,
7447d9e6c5aSJose Abreu 				xmac, &sec_inc);
74519d857c9SPhil Reid 		temp = div_u64(1000000000ULL, sec_inc);
746891434b1SRayagond Kokatanur 
7479a8a02c9SJose Abreu 		/* Store sub second increment and flags for later use */
7489a8a02c9SJose Abreu 		priv->sub_second_inc = sec_inc;
7499a8a02c9SJose Abreu 		priv->systime_flags = value;
7509a8a02c9SJose Abreu 
751891434b1SRayagond Kokatanur 		/* calculate default added value:
752891434b1SRayagond Kokatanur 		 * formula is :
753891434b1SRayagond Kokatanur 		 * addend = (2^32)/freq_div_ratio;
75419d857c9SPhil Reid 		 * where, freq_div_ratio = 1e9ns/sec_inc
755891434b1SRayagond Kokatanur 		 */
75619d857c9SPhil Reid 		temp = (u64)(temp << 32);
757f573c0b9Sjpinto 		priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
758cc4c9001SJose Abreu 		stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
759891434b1SRayagond Kokatanur 
760891434b1SRayagond Kokatanur 		/* initialize system time */
7610a624155SArnd Bergmann 		ktime_get_real_ts64(&now);
7620a624155SArnd Bergmann 
7630a624155SArnd Bergmann 		/* lower 32 bits of tv_sec are safe until y2106 */
764cc4c9001SJose Abreu 		stmmac_init_systime(priv, priv->ptpaddr,
765cc4c9001SJose Abreu 				(u32)now.tv_sec, now.tv_nsec);
766891434b1SRayagond Kokatanur 	}
767891434b1SRayagond Kokatanur 
768d6228b7cSArtem Panfilov 	memcpy(&priv->tstamp_config, &config, sizeof(config));
769d6228b7cSArtem Panfilov 
770891434b1SRayagond Kokatanur 	return copy_to_user(ifr->ifr_data, &config,
771d6228b7cSArtem Panfilov 			    sizeof(config)) ? -EFAULT : 0;
772d6228b7cSArtem Panfilov }
773d6228b7cSArtem Panfilov 
774d6228b7cSArtem Panfilov /**
775d6228b7cSArtem Panfilov  *  stmmac_hwtstamp_get - read hardware timestamping.
776d6228b7cSArtem Panfilov  *  @dev: device pointer.
777d6228b7cSArtem Panfilov  *  @ifr: An IOCTL specific structure, that can contain a pointer to
778d6228b7cSArtem Panfilov  *  a proprietary structure used to pass information to the driver.
779d6228b7cSArtem Panfilov  *  Description:
780d6228b7cSArtem Panfilov  *  This function obtain the current hardware timestamping settings
781d6228b7cSArtem Panfilov     as requested.
782d6228b7cSArtem Panfilov  */
783d6228b7cSArtem Panfilov static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
784d6228b7cSArtem Panfilov {
785d6228b7cSArtem Panfilov 	struct stmmac_priv *priv = netdev_priv(dev);
786d6228b7cSArtem Panfilov 	struct hwtstamp_config *config = &priv->tstamp_config;
787d6228b7cSArtem Panfilov 
788d6228b7cSArtem Panfilov 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
789d6228b7cSArtem Panfilov 		return -EOPNOTSUPP;
790d6228b7cSArtem Panfilov 
791d6228b7cSArtem Panfilov 	return copy_to_user(ifr->ifr_data, config,
792d6228b7cSArtem Panfilov 			    sizeof(*config)) ? -EFAULT : 0;
793891434b1SRayagond Kokatanur }
794891434b1SRayagond Kokatanur 
79532ceabcaSGiuseppe CAVALLARO /**
796732fdf0eSGiuseppe CAVALLARO  * stmmac_init_ptp - init PTP
79732ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
798732fdf0eSGiuseppe CAVALLARO  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
79932ceabcaSGiuseppe CAVALLARO  * This is done by looking at the HW cap. register.
800732fdf0eSGiuseppe CAVALLARO  * This function also registers the ptp driver.
80132ceabcaSGiuseppe CAVALLARO  */
80292ba6888SRayagond Kokatanur static int stmmac_init_ptp(struct stmmac_priv *priv)
803891434b1SRayagond Kokatanur {
8047d9e6c5aSJose Abreu 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
8057d9e6c5aSJose Abreu 
80692ba6888SRayagond Kokatanur 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
80792ba6888SRayagond Kokatanur 		return -EOPNOTSUPP;
80892ba6888SRayagond Kokatanur 
809891434b1SRayagond Kokatanur 	priv->adv_ts = 0;
8107d9e6c5aSJose Abreu 	/* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
8117d9e6c5aSJose Abreu 	if (xmac && priv->dma_cap.atime_stamp)
812be9b3174SGiuseppe CAVALLARO 		priv->adv_ts = 1;
813be9b3174SGiuseppe CAVALLARO 	/* Dwmac 3.x core with extend_desc can support adv_ts */
814be9b3174SGiuseppe CAVALLARO 	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
815891434b1SRayagond Kokatanur 		priv->adv_ts = 1;
8167cd01399SVince Bridgers 
817be9b3174SGiuseppe CAVALLARO 	if (priv->dma_cap.time_stamp)
818be9b3174SGiuseppe CAVALLARO 		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
8197cd01399SVince Bridgers 
820be9b3174SGiuseppe CAVALLARO 	if (priv->adv_ts)
821be9b3174SGiuseppe CAVALLARO 		netdev_info(priv->dev,
822be9b3174SGiuseppe CAVALLARO 			    "IEEE 1588-2008 Advanced Timestamp supported\n");
823891434b1SRayagond Kokatanur 
824891434b1SRayagond Kokatanur 	priv->hwts_tx_en = 0;
825891434b1SRayagond Kokatanur 	priv->hwts_rx_en = 0;
82692ba6888SRayagond Kokatanur 
827c30a70d3SGiuseppe CAVALLARO 	stmmac_ptp_register(priv);
828c30a70d3SGiuseppe CAVALLARO 
829c30a70d3SGiuseppe CAVALLARO 	return 0;
83092ba6888SRayagond Kokatanur }
83192ba6888SRayagond Kokatanur 
83292ba6888SRayagond Kokatanur static void stmmac_release_ptp(struct stmmac_priv *priv)
83392ba6888SRayagond Kokatanur {
834f573c0b9Sjpinto 	if (priv->plat->clk_ptp_ref)
835f573c0b9Sjpinto 		clk_disable_unprepare(priv->plat->clk_ptp_ref);
83692ba6888SRayagond Kokatanur 	stmmac_ptp_unregister(priv);
837891434b1SRayagond Kokatanur }
838891434b1SRayagond Kokatanur 
8397ac6653aSJeff Kirsher /**
84029feff39SJoao Pinto  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
84129feff39SJoao Pinto  *  @priv: driver private structure
84229feff39SJoao Pinto  *  Description: It is used for configuring the flow control in all queues
84329feff39SJoao Pinto  */
84429feff39SJoao Pinto static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
84529feff39SJoao Pinto {
84629feff39SJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
84729feff39SJoao Pinto 
848c10d4c82SJose Abreu 	stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
84929feff39SJoao Pinto 			priv->pause, tx_cnt);
85029feff39SJoao Pinto }
85129feff39SJoao Pinto 
852eeef2f6bSJose Abreu static void stmmac_validate(struct phylink_config *config,
853eeef2f6bSJose Abreu 			    unsigned long *supported,
854eeef2f6bSJose Abreu 			    struct phylink_link_state *state)
855eeef2f6bSJose Abreu {
856eeef2f6bSJose Abreu 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
857eeef2f6bSJose Abreu 	__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
858eeef2f6bSJose Abreu 	int tx_cnt = priv->plat->tx_queues_to_use;
859eeef2f6bSJose Abreu 	int max_speed = priv->plat->max_speed;
860eeef2f6bSJose Abreu 
861eeef2f6bSJose Abreu 	/* Cut down 1G if asked to */
862eeef2f6bSJose Abreu 	if ((max_speed > 0) && (max_speed < 1000)) {
863eeef2f6bSJose Abreu 		phylink_set(mask, 1000baseT_Full);
864eeef2f6bSJose Abreu 		phylink_set(mask, 1000baseX_Full);
865eeef2f6bSJose Abreu 	}
866eeef2f6bSJose Abreu 
867eeef2f6bSJose Abreu 	/* Half-Duplex can only work with single queue */
868eeef2f6bSJose Abreu 	if (tx_cnt > 1) {
869eeef2f6bSJose Abreu 		phylink_set(mask, 10baseT_Half);
870eeef2f6bSJose Abreu 		phylink_set(mask, 100baseT_Half);
871eeef2f6bSJose Abreu 		phylink_set(mask, 1000baseT_Half);
872eeef2f6bSJose Abreu 	}
873eeef2f6bSJose Abreu 
874eeef2f6bSJose Abreu 	bitmap_andnot(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS);
875eeef2f6bSJose Abreu 	bitmap_andnot(state->advertising, state->advertising, mask,
876eeef2f6bSJose Abreu 		      __ETHTOOL_LINK_MODE_MASK_NBITS);
877eeef2f6bSJose Abreu }
878eeef2f6bSJose Abreu 
879eeef2f6bSJose Abreu static int stmmac_mac_link_state(struct phylink_config *config,
880eeef2f6bSJose Abreu 				 struct phylink_link_state *state)
881eeef2f6bSJose Abreu {
882eeef2f6bSJose Abreu 	return -EOPNOTSUPP;
883eeef2f6bSJose Abreu }
884eeef2f6bSJose Abreu 
8859ad372fcSJose Abreu static void stmmac_mac_config(struct net_device *dev)
8869ad372fcSJose Abreu {
8879ad372fcSJose Abreu 	struct stmmac_priv *priv = netdev_priv(dev);
8889ad372fcSJose Abreu 	struct phy_device *phydev = dev->phydev;
8899ad372fcSJose Abreu 	u32 ctrl;
8909ad372fcSJose Abreu 
8919ad372fcSJose Abreu 	ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
8929ad372fcSJose Abreu 
8939ad372fcSJose Abreu 	if (phydev->speed != priv->speed) {
8949ad372fcSJose Abreu 		ctrl &= ~priv->hw->link.speed_mask;
8959ad372fcSJose Abreu 
8969ad372fcSJose Abreu 		switch (phydev->speed) {
8979ad372fcSJose Abreu 		case SPEED_1000:
8989ad372fcSJose Abreu 			ctrl |= priv->hw->link.speed1000;
8999ad372fcSJose Abreu 			break;
9009ad372fcSJose Abreu 		case SPEED_100:
9019ad372fcSJose Abreu 			ctrl |= priv->hw->link.speed100;
9029ad372fcSJose Abreu 			break;
9039ad372fcSJose Abreu 		case SPEED_10:
9049ad372fcSJose Abreu 			ctrl |= priv->hw->link.speed10;
9059ad372fcSJose Abreu 			break;
9069ad372fcSJose Abreu 		default:
9079ad372fcSJose Abreu 			netif_warn(priv, link, priv->dev,
9089ad372fcSJose Abreu 				   "broken speed: %d\n", phydev->speed);
9099ad372fcSJose Abreu 			phydev->speed = SPEED_UNKNOWN;
9109ad372fcSJose Abreu 			break;
9119ad372fcSJose Abreu 		}
9129ad372fcSJose Abreu 
9139ad372fcSJose Abreu 		if (phydev->speed != SPEED_UNKNOWN)
9149ad372fcSJose Abreu 			stmmac_hw_fix_mac_speed(priv);
9159ad372fcSJose Abreu 
9169ad372fcSJose Abreu 		priv->speed = phydev->speed;
9179ad372fcSJose Abreu 	}
9189ad372fcSJose Abreu 
9199ad372fcSJose Abreu 	/* Now we make sure that we can be in full duplex mode.
9209ad372fcSJose Abreu 	 * If not, we operate in half-duplex mode. */
9219ad372fcSJose Abreu 	if (phydev->duplex != priv->oldduplex) {
9229ad372fcSJose Abreu 		if (!phydev->duplex)
9239ad372fcSJose Abreu 			ctrl &= ~priv->hw->link.duplex;
9249ad372fcSJose Abreu 		else
9259ad372fcSJose Abreu 			ctrl |= priv->hw->link.duplex;
9269ad372fcSJose Abreu 
9279ad372fcSJose Abreu 		priv->oldduplex = phydev->duplex;
9289ad372fcSJose Abreu 	}
9299ad372fcSJose Abreu 
9309ad372fcSJose Abreu 	/* Flow Control operation */
9319ad372fcSJose Abreu 	if (phydev->pause)
9329ad372fcSJose Abreu 		stmmac_mac_flow_ctrl(priv, phydev->duplex);
9339ad372fcSJose Abreu 
9349ad372fcSJose Abreu 	writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
9359ad372fcSJose Abreu }
9369ad372fcSJose Abreu 
937eeef2f6bSJose Abreu static void stmmac_mac_an_restart(struct phylink_config *config)
938eeef2f6bSJose Abreu {
939eeef2f6bSJose Abreu 	/* Not Supported */
940eeef2f6bSJose Abreu }
941eeef2f6bSJose Abreu 
9429ad372fcSJose Abreu static void stmmac_mac_link_down(struct net_device *dev, bool autoneg)
9439ad372fcSJose Abreu {
9449ad372fcSJose Abreu 	struct stmmac_priv *priv = netdev_priv(dev);
9459ad372fcSJose Abreu 
9469ad372fcSJose Abreu 	stmmac_mac_set(priv, priv->ioaddr, false);
9479ad372fcSJose Abreu }
9489ad372fcSJose Abreu 
9499ad372fcSJose Abreu static void stmmac_mac_link_up(struct net_device *dev, bool autoneg)
9509ad372fcSJose Abreu {
9519ad372fcSJose Abreu 	struct stmmac_priv *priv = netdev_priv(dev);
9529ad372fcSJose Abreu 
9539ad372fcSJose Abreu 	stmmac_mac_set(priv, priv->ioaddr, true);
9549ad372fcSJose Abreu }
9559ad372fcSJose Abreu 
956eeef2f6bSJose Abreu static const struct phylink_mac_ops __maybe_unused stmmac_phylink_mac_ops = {
957eeef2f6bSJose Abreu 	.validate = stmmac_validate,
958eeef2f6bSJose Abreu 	.mac_link_state = stmmac_mac_link_state,
959eeef2f6bSJose Abreu 	.mac_config = NULL, /* TO BE FILLED */
960eeef2f6bSJose Abreu 	.mac_an_restart = stmmac_mac_an_restart,
961eeef2f6bSJose Abreu 	.mac_link_down = NULL, /* TO BE FILLED */
962eeef2f6bSJose Abreu 	.mac_link_up = NULL, /* TO BE FILLED */
963eeef2f6bSJose Abreu };
964eeef2f6bSJose Abreu 
96529feff39SJoao Pinto /**
966732fdf0eSGiuseppe CAVALLARO  * stmmac_adjust_link - adjusts the link parameters
9677ac6653aSJeff Kirsher  * @dev: net device structure
968732fdf0eSGiuseppe CAVALLARO  * Description: this is the helper called by the physical abstraction layer
969732fdf0eSGiuseppe CAVALLARO  * drivers to communicate the phy link status. According the speed and duplex
970732fdf0eSGiuseppe CAVALLARO  * this driver can invoke registered glue-logic as well.
971732fdf0eSGiuseppe CAVALLARO  * It also invoke the eee initialization because it could happen when switch
972732fdf0eSGiuseppe CAVALLARO  * on different networks (that are eee capable).
9737ac6653aSJeff Kirsher  */
9747ac6653aSJeff Kirsher static void stmmac_adjust_link(struct net_device *dev)
9757ac6653aSJeff Kirsher {
9767ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
977d6d50c7eSPhilippe Reynes 	struct phy_device *phydev = dev->phydev;
97899a4cca2SLABBE Corentin 	bool new_state = false;
9797ac6653aSJeff Kirsher 
980662ec2b7SLABBE Corentin 	if (!phydev)
9817ac6653aSJeff Kirsher 		return;
9827ac6653aSJeff Kirsher 
98329555fa3SThierry Reding 	mutex_lock(&priv->lock);
984d765955dSGiuseppe CAVALLARO 
9857ac6653aSJeff Kirsher 	if (phydev->link) {
9869ad372fcSJose Abreu 		stmmac_mac_config(dev);
9877ac6653aSJeff Kirsher 
9887ac6653aSJeff Kirsher 		if (!priv->oldlink) {
98999a4cca2SLABBE Corentin 			new_state = true;
9904d869b03SLABBE Corentin 			priv->oldlink = true;
9917ac6653aSJeff Kirsher 		}
9927ac6653aSJeff Kirsher 	} else if (priv->oldlink) {
99399a4cca2SLABBE Corentin 		new_state = true;
9944d869b03SLABBE Corentin 		priv->oldlink = false;
995bd00632cSLABBE Corentin 		priv->speed = SPEED_UNKNOWN;
996bd00632cSLABBE Corentin 		priv->oldduplex = DUPLEX_UNKNOWN;
9977ac6653aSJeff Kirsher 	}
9987ac6653aSJeff Kirsher 
9999ad372fcSJose Abreu 	if (phydev->link)
10009ad372fcSJose Abreu 		stmmac_mac_link_up(dev, false);
10019ad372fcSJose Abreu 	else
10029ad372fcSJose Abreu 		stmmac_mac_link_down(dev, false);
10039ad372fcSJose Abreu 
10047ac6653aSJeff Kirsher 	if (new_state && netif_msg_link(priv))
10057ac6653aSJeff Kirsher 		phy_print_status(phydev);
10067ac6653aSJeff Kirsher 
100729555fa3SThierry Reding 	mutex_unlock(&priv->lock);
10084741cf9cSGiuseppe CAVALLARO 
100952f95bbfSGiuseppe CAVALLARO 	if (phydev->is_pseudo_fixed_link)
101052f95bbfSGiuseppe CAVALLARO 		/* Stop PHY layer to call the hook to adjust the link in case
101152f95bbfSGiuseppe CAVALLARO 		 * of a switch is attached to the stmmac driver.
101252f95bbfSGiuseppe CAVALLARO 		 */
101352f95bbfSGiuseppe CAVALLARO 		phydev->irq = PHY_IGNORE_INTERRUPT;
101452f95bbfSGiuseppe CAVALLARO 	else
101552f95bbfSGiuseppe CAVALLARO 		/* At this stage, init the EEE if supported.
101652f95bbfSGiuseppe CAVALLARO 		 * Never called in case of fixed_link.
1017f5351ef7SGiuseppe CAVALLARO 		 */
1018f5351ef7SGiuseppe CAVALLARO 		priv->eee_enabled = stmmac_eee_init(priv);
10197ac6653aSJeff Kirsher }
10207ac6653aSJeff Kirsher 
102132ceabcaSGiuseppe CAVALLARO /**
1022732fdf0eSGiuseppe CAVALLARO  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
102332ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
102432ceabcaSGiuseppe CAVALLARO  * Description: this is to verify if the HW supports the PCS.
102532ceabcaSGiuseppe CAVALLARO  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
102632ceabcaSGiuseppe CAVALLARO  * configured for the TBI, RTBI, or SGMII PHY interface.
102732ceabcaSGiuseppe CAVALLARO  */
1028e58bb43fSGiuseppe CAVALLARO static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1029e58bb43fSGiuseppe CAVALLARO {
1030e58bb43fSGiuseppe CAVALLARO 	int interface = priv->plat->interface;
1031e58bb43fSGiuseppe CAVALLARO 
1032e58bb43fSGiuseppe CAVALLARO 	if (priv->dma_cap.pcs) {
10330d909dcdSByungho An 		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
10340d909dcdSByungho An 		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
10350d909dcdSByungho An 		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
10360d909dcdSByungho An 		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
103738ddc59dSLABBE Corentin 			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
10383fe5cadbSGiuseppe CAVALLARO 			priv->hw->pcs = STMMAC_PCS_RGMII;
10390d909dcdSByungho An 		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
104038ddc59dSLABBE Corentin 			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
10413fe5cadbSGiuseppe CAVALLARO 			priv->hw->pcs = STMMAC_PCS_SGMII;
1042e58bb43fSGiuseppe CAVALLARO 		}
1043e58bb43fSGiuseppe CAVALLARO 	}
1044e58bb43fSGiuseppe CAVALLARO }
1045e58bb43fSGiuseppe CAVALLARO 
10467ac6653aSJeff Kirsher /**
10477ac6653aSJeff Kirsher  * stmmac_init_phy - PHY initialization
10487ac6653aSJeff Kirsher  * @dev: net device structure
10497ac6653aSJeff Kirsher  * Description: it initializes the driver's PHY state, and attaches the PHY
10507ac6653aSJeff Kirsher  * to the mac driver.
10517ac6653aSJeff Kirsher  *  Return value:
10527ac6653aSJeff Kirsher  *  0 on success
10537ac6653aSJeff Kirsher  */
10547ac6653aSJeff Kirsher static int stmmac_init_phy(struct net_device *dev)
10557ac6653aSJeff Kirsher {
10567ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
1057b6cfffa7SBhadram Varka 	u32 tx_cnt = priv->plat->tx_queues_to_use;
10587ac6653aSJeff Kirsher 	struct phy_device *phydev;
1059d765955dSGiuseppe CAVALLARO 	char phy_id_fmt[MII_BUS_ID_SIZE + 3];
10607ac6653aSJeff Kirsher 	char bus_id[MII_BUS_ID_SIZE];
106179ee1dc3SSrinivas Kandagatla 	int interface = priv->plat->interface;
10629cbadf09SSrinivas Kandagatla 	int max_speed = priv->plat->max_speed;
10634d869b03SLABBE Corentin 	priv->oldlink = false;
1064bd00632cSLABBE Corentin 	priv->speed = SPEED_UNKNOWN;
1065bd00632cSLABBE Corentin 	priv->oldduplex = DUPLEX_UNKNOWN;
10667ac6653aSJeff Kirsher 
10675790cf3cSMathieu Olivari 	if (priv->plat->phy_node) {
10685790cf3cSMathieu Olivari 		phydev = of_phy_connect(dev, priv->plat->phy_node,
10695790cf3cSMathieu Olivari 					&stmmac_adjust_link, 0, interface);
10705790cf3cSMathieu Olivari 	} else {
1071f142af2eSSrinivas Kandagatla 		snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
1072f142af2eSSrinivas Kandagatla 			 priv->plat->bus_id);
1073f142af2eSSrinivas Kandagatla 
1074d765955dSGiuseppe CAVALLARO 		snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
10757ac6653aSJeff Kirsher 			 priv->plat->phy_addr);
1076de9a2165SLABBE Corentin 		netdev_dbg(priv->dev, "%s: trying to attach to %s\n", __func__,
10775790cf3cSMathieu Olivari 			   phy_id_fmt);
10787ac6653aSJeff Kirsher 
10795790cf3cSMathieu Olivari 		phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link,
10805790cf3cSMathieu Olivari 				     interface);
10815790cf3cSMathieu Olivari 	}
10827ac6653aSJeff Kirsher 
1083dfc50fcaSAlexey Brodkin 	if (IS_ERR_OR_NULL(phydev)) {
108438ddc59dSLABBE Corentin 		netdev_err(priv->dev, "Could not attach to PHY\n");
1085dfc50fcaSAlexey Brodkin 		if (!phydev)
1086dfc50fcaSAlexey Brodkin 			return -ENODEV;
1087dfc50fcaSAlexey Brodkin 
10887ac6653aSJeff Kirsher 		return PTR_ERR(phydev);
10897ac6653aSJeff Kirsher 	}
10907ac6653aSJeff Kirsher 
109179ee1dc3SSrinivas Kandagatla 	/* Stop Advertising 1000BASE Capability if interface is not GMII */
1092c5b9b4e4SSrinivas Kandagatla 	if ((interface == PHY_INTERFACE_MODE_MII) ||
10939cbadf09SSrinivas Kandagatla 	    (interface == PHY_INTERFACE_MODE_RMII) ||
10949cbadf09SSrinivas Kandagatla 		(max_speed < 1000 && max_speed > 0))
109558056c1eSAndrew Lunn 		phy_set_max_speed(phydev, SPEED_100);
109679ee1dc3SSrinivas Kandagatla 
10977ac6653aSJeff Kirsher 	/*
1098b6cfffa7SBhadram Varka 	 * Half-duplex mode not supported with multiqueue
1099b6cfffa7SBhadram Varka 	 * half-duplex can only works with single queue
1100b6cfffa7SBhadram Varka 	 */
110141124fa6SAndrew Lunn 	if (tx_cnt > 1) {
110241124fa6SAndrew Lunn 		phy_remove_link_mode(phydev,
110341124fa6SAndrew Lunn 				     ETHTOOL_LINK_MODE_10baseT_Half_BIT);
110441124fa6SAndrew Lunn 		phy_remove_link_mode(phydev,
110541124fa6SAndrew Lunn 				     ETHTOOL_LINK_MODE_100baseT_Half_BIT);
110641124fa6SAndrew Lunn 		phy_remove_link_mode(phydev,
110741124fa6SAndrew Lunn 				     ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
110841124fa6SAndrew Lunn 	}
1109b6cfffa7SBhadram Varka 
1110b6cfffa7SBhadram Varka 	/*
11117ac6653aSJeff Kirsher 	 * Broken HW is sometimes missing the pull-up resistor on the
11127ac6653aSJeff Kirsher 	 * MDIO line, which results in reads to non-existent devices returning
11137ac6653aSJeff Kirsher 	 * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
11147ac6653aSJeff Kirsher 	 * device as well.
11157ac6653aSJeff Kirsher 	 * Note: phydev->phy_id is the result of reading the UID PHY registers.
11167ac6653aSJeff Kirsher 	 */
111727732381SMathieu Olivari 	if (!priv->plat->phy_node && phydev->phy_id == 0) {
11187ac6653aSJeff Kirsher 		phy_disconnect(phydev);
11197ac6653aSJeff Kirsher 		return -ENODEV;
11207ac6653aSJeff Kirsher 	}
11218e99fc5fSGiuseppe Cavallaro 
1122c51e424dSFlorian Fainelli 	/* stmmac_adjust_link will change this to PHY_IGNORE_INTERRUPT to avoid
1123c51e424dSFlorian Fainelli 	 * subsequent PHY polling, make sure we force a link transition if
1124c51e424dSFlorian Fainelli 	 * we have a UP/DOWN/UP transition
1125c51e424dSFlorian Fainelli 	 */
1126c51e424dSFlorian Fainelli 	if (phydev->is_pseudo_fixed_link)
1127c51e424dSFlorian Fainelli 		phydev->irq = PHY_POLL;
1128c51e424dSFlorian Fainelli 
1129b05c76a1SLABBE Corentin 	phy_attached_info(phydev);
11307ac6653aSJeff Kirsher 	return 0;
11317ac6653aSJeff Kirsher }
11327ac6653aSJeff Kirsher 
113371fedb01SJoao Pinto static void stmmac_display_rx_rings(struct stmmac_priv *priv)
1134c24602efSGiuseppe CAVALLARO {
113554139cf3SJoao Pinto 	u32 rx_cnt = priv->plat->rx_queues_to_use;
113671fedb01SJoao Pinto 	void *head_rx;
113754139cf3SJoao Pinto 	u32 queue;
113854139cf3SJoao Pinto 
113954139cf3SJoao Pinto 	/* Display RX rings */
114054139cf3SJoao Pinto 	for (queue = 0; queue < rx_cnt; queue++) {
114154139cf3SJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
114254139cf3SJoao Pinto 
114354139cf3SJoao Pinto 		pr_info("\tRX Queue %u rings\n", queue);
1144d0225e7dSAlexandre TORGUE 
114571fedb01SJoao Pinto 		if (priv->extend_desc)
114654139cf3SJoao Pinto 			head_rx = (void *)rx_q->dma_erx;
114771fedb01SJoao Pinto 		else
114854139cf3SJoao Pinto 			head_rx = (void *)rx_q->dma_rx;
114971fedb01SJoao Pinto 
115071fedb01SJoao Pinto 		/* Display RX ring */
115142de047dSJose Abreu 		stmmac_display_ring(priv, head_rx, DMA_RX_SIZE, true);
11525bacd778SLABBE Corentin 	}
115354139cf3SJoao Pinto }
1154d0225e7dSAlexandre TORGUE 
115571fedb01SJoao Pinto static void stmmac_display_tx_rings(struct stmmac_priv *priv)
115671fedb01SJoao Pinto {
1157ce736788SJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
115871fedb01SJoao Pinto 	void *head_tx;
1159ce736788SJoao Pinto 	u32 queue;
1160ce736788SJoao Pinto 
1161ce736788SJoao Pinto 	/* Display TX rings */
1162ce736788SJoao Pinto 	for (queue = 0; queue < tx_cnt; queue++) {
1163ce736788SJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1164ce736788SJoao Pinto 
1165ce736788SJoao Pinto 		pr_info("\tTX Queue %d rings\n", queue);
116671fedb01SJoao Pinto 
116771fedb01SJoao Pinto 		if (priv->extend_desc)
1168ce736788SJoao Pinto 			head_tx = (void *)tx_q->dma_etx;
116971fedb01SJoao Pinto 		else
1170ce736788SJoao Pinto 			head_tx = (void *)tx_q->dma_tx;
117171fedb01SJoao Pinto 
117242de047dSJose Abreu 		stmmac_display_ring(priv, head_tx, DMA_TX_SIZE, false);
1173c24602efSGiuseppe CAVALLARO 	}
1174ce736788SJoao Pinto }
1175c24602efSGiuseppe CAVALLARO 
117671fedb01SJoao Pinto static void stmmac_display_rings(struct stmmac_priv *priv)
117771fedb01SJoao Pinto {
117871fedb01SJoao Pinto 	/* Display RX ring */
117971fedb01SJoao Pinto 	stmmac_display_rx_rings(priv);
118071fedb01SJoao Pinto 
118171fedb01SJoao Pinto 	/* Display TX ring */
118271fedb01SJoao Pinto 	stmmac_display_tx_rings(priv);
118371fedb01SJoao Pinto }
118471fedb01SJoao Pinto 
1185286a8372SGiuseppe CAVALLARO static int stmmac_set_bfsize(int mtu, int bufsize)
1186286a8372SGiuseppe CAVALLARO {
1187286a8372SGiuseppe CAVALLARO 	int ret = bufsize;
1188286a8372SGiuseppe CAVALLARO 
1189286a8372SGiuseppe CAVALLARO 	if (mtu >= BUF_SIZE_4KiB)
1190286a8372SGiuseppe CAVALLARO 		ret = BUF_SIZE_8KiB;
1191286a8372SGiuseppe CAVALLARO 	else if (mtu >= BUF_SIZE_2KiB)
1192286a8372SGiuseppe CAVALLARO 		ret = BUF_SIZE_4KiB;
1193d916701cSGiuseppe CAVALLARO 	else if (mtu > DEFAULT_BUFSIZE)
1194286a8372SGiuseppe CAVALLARO 		ret = BUF_SIZE_2KiB;
1195286a8372SGiuseppe CAVALLARO 	else
1196d916701cSGiuseppe CAVALLARO 		ret = DEFAULT_BUFSIZE;
1197286a8372SGiuseppe CAVALLARO 
1198286a8372SGiuseppe CAVALLARO 	return ret;
1199286a8372SGiuseppe CAVALLARO }
1200286a8372SGiuseppe CAVALLARO 
120132ceabcaSGiuseppe CAVALLARO /**
120271fedb01SJoao Pinto  * stmmac_clear_rx_descriptors - clear RX descriptors
120332ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
120454139cf3SJoao Pinto  * @queue: RX queue index
120571fedb01SJoao Pinto  * Description: this function is called to clear the RX descriptors
120632ceabcaSGiuseppe CAVALLARO  * in case of both basic and extended descriptors are used.
120732ceabcaSGiuseppe CAVALLARO  */
120854139cf3SJoao Pinto static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
1209c24602efSGiuseppe CAVALLARO {
121054139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
12115bacd778SLABBE Corentin 	int i;
1212c24602efSGiuseppe CAVALLARO 
121371fedb01SJoao Pinto 	/* Clear the RX descriptors */
12145bacd778SLABBE Corentin 	for (i = 0; i < DMA_RX_SIZE; i++)
12155bacd778SLABBE Corentin 		if (priv->extend_desc)
121642de047dSJose Abreu 			stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
12175bacd778SLABBE Corentin 					priv->use_riwt, priv->mode,
1218583e6361SAaro Koskinen 					(i == DMA_RX_SIZE - 1),
1219583e6361SAaro Koskinen 					priv->dma_buf_sz);
12205bacd778SLABBE Corentin 		else
122142de047dSJose Abreu 			stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
12225bacd778SLABBE Corentin 					priv->use_riwt, priv->mode,
1223583e6361SAaro Koskinen 					(i == DMA_RX_SIZE - 1),
1224583e6361SAaro Koskinen 					priv->dma_buf_sz);
122571fedb01SJoao Pinto }
122671fedb01SJoao Pinto 
122771fedb01SJoao Pinto /**
122871fedb01SJoao Pinto  * stmmac_clear_tx_descriptors - clear tx descriptors
122971fedb01SJoao Pinto  * @priv: driver private structure
1230ce736788SJoao Pinto  * @queue: TX queue index.
123171fedb01SJoao Pinto  * Description: this function is called to clear the TX descriptors
123271fedb01SJoao Pinto  * in case of both basic and extended descriptors are used.
123371fedb01SJoao Pinto  */
1234ce736788SJoao Pinto static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
123571fedb01SJoao Pinto {
1236ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
123771fedb01SJoao Pinto 	int i;
123871fedb01SJoao Pinto 
123971fedb01SJoao Pinto 	/* Clear the TX descriptors */
12405bacd778SLABBE Corentin 	for (i = 0; i < DMA_TX_SIZE; i++)
12415bacd778SLABBE Corentin 		if (priv->extend_desc)
124242de047dSJose Abreu 			stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
124342de047dSJose Abreu 					priv->mode, (i == DMA_TX_SIZE - 1));
12445bacd778SLABBE Corentin 		else
124542de047dSJose Abreu 			stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
124642de047dSJose Abreu 					priv->mode, (i == DMA_TX_SIZE - 1));
1247c24602efSGiuseppe CAVALLARO }
1248c24602efSGiuseppe CAVALLARO 
1249732fdf0eSGiuseppe CAVALLARO /**
125071fedb01SJoao Pinto  * stmmac_clear_descriptors - clear descriptors
125171fedb01SJoao Pinto  * @priv: driver private structure
125271fedb01SJoao Pinto  * Description: this function is called to clear the TX and RX descriptors
125371fedb01SJoao Pinto  * in case of both basic and extended descriptors are used.
125471fedb01SJoao Pinto  */
125571fedb01SJoao Pinto static void stmmac_clear_descriptors(struct stmmac_priv *priv)
125671fedb01SJoao Pinto {
125754139cf3SJoao Pinto 	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1258ce736788SJoao Pinto 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
125954139cf3SJoao Pinto 	u32 queue;
126054139cf3SJoao Pinto 
126171fedb01SJoao Pinto 	/* Clear the RX descriptors */
126254139cf3SJoao Pinto 	for (queue = 0; queue < rx_queue_cnt; queue++)
126354139cf3SJoao Pinto 		stmmac_clear_rx_descriptors(priv, queue);
126471fedb01SJoao Pinto 
126571fedb01SJoao Pinto 	/* Clear the TX descriptors */
1266ce736788SJoao Pinto 	for (queue = 0; queue < tx_queue_cnt; queue++)
1267ce736788SJoao Pinto 		stmmac_clear_tx_descriptors(priv, queue);
126871fedb01SJoao Pinto }
126971fedb01SJoao Pinto 
127071fedb01SJoao Pinto /**
1271732fdf0eSGiuseppe CAVALLARO  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1272732fdf0eSGiuseppe CAVALLARO  * @priv: driver private structure
1273732fdf0eSGiuseppe CAVALLARO  * @p: descriptor pointer
1274732fdf0eSGiuseppe CAVALLARO  * @i: descriptor index
127554139cf3SJoao Pinto  * @flags: gfp flag
127654139cf3SJoao Pinto  * @queue: RX queue index
1277732fdf0eSGiuseppe CAVALLARO  * Description: this function is called to allocate a receive buffer, perform
1278732fdf0eSGiuseppe CAVALLARO  * the DMA mapping and init the descriptor.
1279732fdf0eSGiuseppe CAVALLARO  */
1280c24602efSGiuseppe CAVALLARO static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
128154139cf3SJoao Pinto 				  int i, gfp_t flags, u32 queue)
1282c24602efSGiuseppe CAVALLARO {
128354139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1284c24602efSGiuseppe CAVALLARO 	struct sk_buff *skb;
1285c24602efSGiuseppe CAVALLARO 
12864ec49a37SVineet Gupta 	skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
128756329137SBartlomiej Zolnierkiewicz 	if (!skb) {
128838ddc59dSLABBE Corentin 		netdev_err(priv->dev,
128938ddc59dSLABBE Corentin 			   "%s: Rx init fails; skb is NULL\n", __func__);
129056329137SBartlomiej Zolnierkiewicz 		return -ENOMEM;
1291c24602efSGiuseppe CAVALLARO 	}
129254139cf3SJoao Pinto 	rx_q->rx_skbuff[i] = skb;
129354139cf3SJoao Pinto 	rx_q->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
1294c24602efSGiuseppe CAVALLARO 						priv->dma_buf_sz,
1295c24602efSGiuseppe CAVALLARO 						DMA_FROM_DEVICE);
129654139cf3SJoao Pinto 	if (dma_mapping_error(priv->device, rx_q->rx_skbuff_dma[i])) {
129738ddc59dSLABBE Corentin 		netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
129856329137SBartlomiej Zolnierkiewicz 		dev_kfree_skb_any(skb);
129956329137SBartlomiej Zolnierkiewicz 		return -EINVAL;
130056329137SBartlomiej Zolnierkiewicz 	}
1301c24602efSGiuseppe CAVALLARO 
13026844171dSJose Abreu 	stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[i]);
1303c24602efSGiuseppe CAVALLARO 
13042c520b1cSJose Abreu 	if (priv->dma_buf_sz == BUF_SIZE_16KiB)
13052c520b1cSJose Abreu 		stmmac_init_desc3(priv, p);
1306c24602efSGiuseppe CAVALLARO 
1307c24602efSGiuseppe CAVALLARO 	return 0;
1308c24602efSGiuseppe CAVALLARO }
1309c24602efSGiuseppe CAVALLARO 
131071fedb01SJoao Pinto /**
131171fedb01SJoao Pinto  * stmmac_free_rx_buffer - free RX dma buffers
131271fedb01SJoao Pinto  * @priv: private structure
131354139cf3SJoao Pinto  * @queue: RX queue index
131471fedb01SJoao Pinto  * @i: buffer index.
131571fedb01SJoao Pinto  */
131654139cf3SJoao Pinto static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
131756329137SBartlomiej Zolnierkiewicz {
131854139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
131954139cf3SJoao Pinto 
132054139cf3SJoao Pinto 	if (rx_q->rx_skbuff[i]) {
132154139cf3SJoao Pinto 		dma_unmap_single(priv->device, rx_q->rx_skbuff_dma[i],
132256329137SBartlomiej Zolnierkiewicz 				 priv->dma_buf_sz, DMA_FROM_DEVICE);
132354139cf3SJoao Pinto 		dev_kfree_skb_any(rx_q->rx_skbuff[i]);
132456329137SBartlomiej Zolnierkiewicz 	}
132554139cf3SJoao Pinto 	rx_q->rx_skbuff[i] = NULL;
132656329137SBartlomiej Zolnierkiewicz }
132756329137SBartlomiej Zolnierkiewicz 
13287ac6653aSJeff Kirsher /**
132971fedb01SJoao Pinto  * stmmac_free_tx_buffer - free RX dma buffers
133071fedb01SJoao Pinto  * @priv: private structure
1331ce736788SJoao Pinto  * @queue: RX queue index
133271fedb01SJoao Pinto  * @i: buffer index.
133371fedb01SJoao Pinto  */
1334ce736788SJoao Pinto static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
133571fedb01SJoao Pinto {
1336ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1337ce736788SJoao Pinto 
1338ce736788SJoao Pinto 	if (tx_q->tx_skbuff_dma[i].buf) {
1339ce736788SJoao Pinto 		if (tx_q->tx_skbuff_dma[i].map_as_page)
134071fedb01SJoao Pinto 			dma_unmap_page(priv->device,
1341ce736788SJoao Pinto 				       tx_q->tx_skbuff_dma[i].buf,
1342ce736788SJoao Pinto 				       tx_q->tx_skbuff_dma[i].len,
134371fedb01SJoao Pinto 				       DMA_TO_DEVICE);
134471fedb01SJoao Pinto 		else
134571fedb01SJoao Pinto 			dma_unmap_single(priv->device,
1346ce736788SJoao Pinto 					 tx_q->tx_skbuff_dma[i].buf,
1347ce736788SJoao Pinto 					 tx_q->tx_skbuff_dma[i].len,
134871fedb01SJoao Pinto 					 DMA_TO_DEVICE);
134971fedb01SJoao Pinto 	}
135071fedb01SJoao Pinto 
1351ce736788SJoao Pinto 	if (tx_q->tx_skbuff[i]) {
1352ce736788SJoao Pinto 		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1353ce736788SJoao Pinto 		tx_q->tx_skbuff[i] = NULL;
1354ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[i].buf = 0;
1355ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[i].map_as_page = false;
135671fedb01SJoao Pinto 	}
135771fedb01SJoao Pinto }
135871fedb01SJoao Pinto 
135971fedb01SJoao Pinto /**
136071fedb01SJoao Pinto  * init_dma_rx_desc_rings - init the RX descriptor rings
13617ac6653aSJeff Kirsher  * @dev: net device structure
13625bacd778SLABBE Corentin  * @flags: gfp flag.
136371fedb01SJoao Pinto  * Description: this function initializes the DMA RX descriptors
13645bacd778SLABBE Corentin  * and allocates the socket buffers. It supports the chained and ring
1365286a8372SGiuseppe CAVALLARO  * modes.
13667ac6653aSJeff Kirsher  */
136771fedb01SJoao Pinto static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
13687ac6653aSJeff Kirsher {
13697ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
137054139cf3SJoao Pinto 	u32 rx_count = priv->plat->rx_queues_to_use;
13715bacd778SLABBE Corentin 	int ret = -ENOMEM;
13722c520b1cSJose Abreu 	int bfsize = 0;
13731d3028f4SColin Ian King 	int queue;
137454139cf3SJoao Pinto 	int i;
13757ac6653aSJeff Kirsher 
13762c520b1cSJose Abreu 	bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
13772c520b1cSJose Abreu 	if (bfsize < 0)
13782c520b1cSJose Abreu 		bfsize = 0;
13795bacd778SLABBE Corentin 
13805bacd778SLABBE Corentin 	if (bfsize < BUF_SIZE_16KiB)
13815bacd778SLABBE Corentin 		bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
13825bacd778SLABBE Corentin 
13835bacd778SLABBE Corentin 	priv->dma_buf_sz = bfsize;
13842618abb7SVince Bridgers 
138554139cf3SJoao Pinto 	/* RX INITIALIZATION */
13865bacd778SLABBE Corentin 	netif_dbg(priv, probe, priv->dev,
13875bacd778SLABBE Corentin 		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
13885bacd778SLABBE Corentin 
138954139cf3SJoao Pinto 	for (queue = 0; queue < rx_count; queue++) {
139054139cf3SJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
139154139cf3SJoao Pinto 
139254139cf3SJoao Pinto 		netif_dbg(priv, probe, priv->dev,
139354139cf3SJoao Pinto 			  "(%s) dma_rx_phy=0x%08x\n", __func__,
139454139cf3SJoao Pinto 			  (u32)rx_q->dma_rx_phy);
139554139cf3SJoao Pinto 
13965bacd778SLABBE Corentin 		for (i = 0; i < DMA_RX_SIZE; i++) {
13975bacd778SLABBE Corentin 			struct dma_desc *p;
13985bacd778SLABBE Corentin 
139954139cf3SJoao Pinto 			if (priv->extend_desc)
140054139cf3SJoao Pinto 				p = &((rx_q->dma_erx + i)->basic);
140154139cf3SJoao Pinto 			else
140254139cf3SJoao Pinto 				p = rx_q->dma_rx + i;
140354139cf3SJoao Pinto 
140454139cf3SJoao Pinto 			ret = stmmac_init_rx_buffers(priv, p, i, flags,
140554139cf3SJoao Pinto 						     queue);
14065bacd778SLABBE Corentin 			if (ret)
14075bacd778SLABBE Corentin 				goto err_init_rx_buffers;
14085bacd778SLABBE Corentin 
14095bacd778SLABBE Corentin 			netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
141054139cf3SJoao Pinto 				  rx_q->rx_skbuff[i], rx_q->rx_skbuff[i]->data,
141154139cf3SJoao Pinto 				  (unsigned int)rx_q->rx_skbuff_dma[i]);
14125bacd778SLABBE Corentin 		}
141354139cf3SJoao Pinto 
141454139cf3SJoao Pinto 		rx_q->cur_rx = 0;
141554139cf3SJoao Pinto 		rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
141654139cf3SJoao Pinto 
141754139cf3SJoao Pinto 		stmmac_clear_rx_descriptors(priv, queue);
14187ac6653aSJeff Kirsher 
1419c24602efSGiuseppe CAVALLARO 		/* Setup the chained descriptor addresses */
1420c24602efSGiuseppe CAVALLARO 		if (priv->mode == STMMAC_CHAIN_MODE) {
142171fedb01SJoao Pinto 			if (priv->extend_desc)
14222c520b1cSJose Abreu 				stmmac_mode_init(priv, rx_q->dma_erx,
14232c520b1cSJose Abreu 						rx_q->dma_rx_phy, DMA_RX_SIZE, 1);
142471fedb01SJoao Pinto 			else
14252c520b1cSJose Abreu 				stmmac_mode_init(priv, rx_q->dma_rx,
14262c520b1cSJose Abreu 						rx_q->dma_rx_phy, DMA_RX_SIZE, 0);
142771fedb01SJoao Pinto 		}
142854139cf3SJoao Pinto 	}
142954139cf3SJoao Pinto 
143054139cf3SJoao Pinto 	buf_sz = bfsize;
143171fedb01SJoao Pinto 
143271fedb01SJoao Pinto 	return 0;
143354139cf3SJoao Pinto 
143471fedb01SJoao Pinto err_init_rx_buffers:
143554139cf3SJoao Pinto 	while (queue >= 0) {
143671fedb01SJoao Pinto 		while (--i >= 0)
143754139cf3SJoao Pinto 			stmmac_free_rx_buffer(priv, queue, i);
143854139cf3SJoao Pinto 
143954139cf3SJoao Pinto 		if (queue == 0)
144054139cf3SJoao Pinto 			break;
144154139cf3SJoao Pinto 
144254139cf3SJoao Pinto 		i = DMA_RX_SIZE;
144354139cf3SJoao Pinto 		queue--;
144454139cf3SJoao Pinto 	}
144554139cf3SJoao Pinto 
144671fedb01SJoao Pinto 	return ret;
144771fedb01SJoao Pinto }
144871fedb01SJoao Pinto 
144971fedb01SJoao Pinto /**
145071fedb01SJoao Pinto  * init_dma_tx_desc_rings - init the TX descriptor rings
145171fedb01SJoao Pinto  * @dev: net device structure.
145271fedb01SJoao Pinto  * Description: this function initializes the DMA TX descriptors
145371fedb01SJoao Pinto  * and allocates the socket buffers. It supports the chained and ring
145471fedb01SJoao Pinto  * modes.
145571fedb01SJoao Pinto  */
145671fedb01SJoao Pinto static int init_dma_tx_desc_rings(struct net_device *dev)
145771fedb01SJoao Pinto {
145871fedb01SJoao Pinto 	struct stmmac_priv *priv = netdev_priv(dev);
1459ce736788SJoao Pinto 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1460ce736788SJoao Pinto 	u32 queue;
146171fedb01SJoao Pinto 	int i;
146271fedb01SJoao Pinto 
1463ce736788SJoao Pinto 	for (queue = 0; queue < tx_queue_cnt; queue++) {
1464ce736788SJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1465ce736788SJoao Pinto 
146671fedb01SJoao Pinto 		netif_dbg(priv, probe, priv->dev,
1467ce736788SJoao Pinto 			  "(%s) dma_tx_phy=0x%08x\n", __func__,
1468ce736788SJoao Pinto 			 (u32)tx_q->dma_tx_phy);
146971fedb01SJoao Pinto 
147071fedb01SJoao Pinto 		/* Setup the chained descriptor addresses */
147171fedb01SJoao Pinto 		if (priv->mode == STMMAC_CHAIN_MODE) {
147271fedb01SJoao Pinto 			if (priv->extend_desc)
14732c520b1cSJose Abreu 				stmmac_mode_init(priv, tx_q->dma_etx,
14742c520b1cSJose Abreu 						tx_q->dma_tx_phy, DMA_TX_SIZE, 1);
147571fedb01SJoao Pinto 			else
14762c520b1cSJose Abreu 				stmmac_mode_init(priv, tx_q->dma_tx,
14772c520b1cSJose Abreu 						tx_q->dma_tx_phy, DMA_TX_SIZE, 0);
1478c24602efSGiuseppe CAVALLARO 		}
1479286a8372SGiuseppe CAVALLARO 
1480e3ad57c9SGiuseppe Cavallaro 		for (i = 0; i < DMA_TX_SIZE; i++) {
1481c24602efSGiuseppe CAVALLARO 			struct dma_desc *p;
1482c24602efSGiuseppe CAVALLARO 			if (priv->extend_desc)
1483ce736788SJoao Pinto 				p = &((tx_q->dma_etx + i)->basic);
1484c24602efSGiuseppe CAVALLARO 			else
1485ce736788SJoao Pinto 				p = tx_q->dma_tx + i;
1486f748be53SAlexandre TORGUE 
148744c67f85SJose Abreu 			stmmac_clear_desc(priv, p);
1488f748be53SAlexandre TORGUE 
1489ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[i].buf = 0;
1490ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[i].map_as_page = false;
1491ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[i].len = 0;
1492ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[i].last_segment = false;
1493ce736788SJoao Pinto 			tx_q->tx_skbuff[i] = NULL;
14944a7d666aSGiuseppe CAVALLARO 		}
1495c24602efSGiuseppe CAVALLARO 
1496ce736788SJoao Pinto 		tx_q->dirty_tx = 0;
1497ce736788SJoao Pinto 		tx_q->cur_tx = 0;
14988d212a9eSNiklas Cassel 		tx_q->mss = 0;
1499ce736788SJoao Pinto 
1500c22a3f48SJoao Pinto 		netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
1501c22a3f48SJoao Pinto 	}
15027ac6653aSJeff Kirsher 
150371fedb01SJoao Pinto 	return 0;
150471fedb01SJoao Pinto }
150571fedb01SJoao Pinto 
150671fedb01SJoao Pinto /**
150771fedb01SJoao Pinto  * init_dma_desc_rings - init the RX/TX descriptor rings
150871fedb01SJoao Pinto  * @dev: net device structure
150971fedb01SJoao Pinto  * @flags: gfp flag.
151071fedb01SJoao Pinto  * Description: this function initializes the DMA RX/TX descriptors
151171fedb01SJoao Pinto  * and allocates the socket buffers. It supports the chained and ring
151271fedb01SJoao Pinto  * modes.
151371fedb01SJoao Pinto  */
151471fedb01SJoao Pinto static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
151571fedb01SJoao Pinto {
151671fedb01SJoao Pinto 	struct stmmac_priv *priv = netdev_priv(dev);
151771fedb01SJoao Pinto 	int ret;
151871fedb01SJoao Pinto 
151971fedb01SJoao Pinto 	ret = init_dma_rx_desc_rings(dev, flags);
152071fedb01SJoao Pinto 	if (ret)
152171fedb01SJoao Pinto 		return ret;
152271fedb01SJoao Pinto 
152371fedb01SJoao Pinto 	ret = init_dma_tx_desc_rings(dev);
152471fedb01SJoao Pinto 
15255bacd778SLABBE Corentin 	stmmac_clear_descriptors(priv);
15267ac6653aSJeff Kirsher 
1527c24602efSGiuseppe CAVALLARO 	if (netif_msg_hw(priv))
1528c24602efSGiuseppe CAVALLARO 		stmmac_display_rings(priv);
152956329137SBartlomiej Zolnierkiewicz 
153056329137SBartlomiej Zolnierkiewicz 	return ret;
15317ac6653aSJeff Kirsher }
15327ac6653aSJeff Kirsher 
153371fedb01SJoao Pinto /**
153471fedb01SJoao Pinto  * dma_free_rx_skbufs - free RX dma buffers
153571fedb01SJoao Pinto  * @priv: private structure
153654139cf3SJoao Pinto  * @queue: RX queue index
153771fedb01SJoao Pinto  */
153854139cf3SJoao Pinto static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
15397ac6653aSJeff Kirsher {
15407ac6653aSJeff Kirsher 	int i;
15417ac6653aSJeff Kirsher 
1542e3ad57c9SGiuseppe Cavallaro 	for (i = 0; i < DMA_RX_SIZE; i++)
154354139cf3SJoao Pinto 		stmmac_free_rx_buffer(priv, queue, i);
15447ac6653aSJeff Kirsher }
15457ac6653aSJeff Kirsher 
154671fedb01SJoao Pinto /**
154771fedb01SJoao Pinto  * dma_free_tx_skbufs - free TX dma buffers
154871fedb01SJoao Pinto  * @priv: private structure
1549ce736788SJoao Pinto  * @queue: TX queue index
155071fedb01SJoao Pinto  */
1551ce736788SJoao Pinto static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
15527ac6653aSJeff Kirsher {
15537ac6653aSJeff Kirsher 	int i;
15547ac6653aSJeff Kirsher 
155571fedb01SJoao Pinto 	for (i = 0; i < DMA_TX_SIZE; i++)
1556ce736788SJoao Pinto 		stmmac_free_tx_buffer(priv, queue, i);
15577ac6653aSJeff Kirsher }
15587ac6653aSJeff Kirsher 
1559732fdf0eSGiuseppe CAVALLARO /**
156054139cf3SJoao Pinto  * free_dma_rx_desc_resources - free RX dma desc resources
156154139cf3SJoao Pinto  * @priv: private structure
156254139cf3SJoao Pinto  */
156354139cf3SJoao Pinto static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
156454139cf3SJoao Pinto {
156554139cf3SJoao Pinto 	u32 rx_count = priv->plat->rx_queues_to_use;
156654139cf3SJoao Pinto 	u32 queue;
156754139cf3SJoao Pinto 
156854139cf3SJoao Pinto 	/* Free RX queue resources */
156954139cf3SJoao Pinto 	for (queue = 0; queue < rx_count; queue++) {
157054139cf3SJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
157154139cf3SJoao Pinto 
157254139cf3SJoao Pinto 		/* Release the DMA RX socket buffers */
157354139cf3SJoao Pinto 		dma_free_rx_skbufs(priv, queue);
157454139cf3SJoao Pinto 
157554139cf3SJoao Pinto 		/* Free DMA regions of consistent memory previously allocated */
157654139cf3SJoao Pinto 		if (!priv->extend_desc)
157754139cf3SJoao Pinto 			dma_free_coherent(priv->device,
157854139cf3SJoao Pinto 					  DMA_RX_SIZE * sizeof(struct dma_desc),
157954139cf3SJoao Pinto 					  rx_q->dma_rx, rx_q->dma_rx_phy);
158054139cf3SJoao Pinto 		else
158154139cf3SJoao Pinto 			dma_free_coherent(priv->device, DMA_RX_SIZE *
158254139cf3SJoao Pinto 					  sizeof(struct dma_extended_desc),
158354139cf3SJoao Pinto 					  rx_q->dma_erx, rx_q->dma_rx_phy);
158454139cf3SJoao Pinto 
158554139cf3SJoao Pinto 		kfree(rx_q->rx_skbuff_dma);
158654139cf3SJoao Pinto 		kfree(rx_q->rx_skbuff);
158754139cf3SJoao Pinto 	}
158854139cf3SJoao Pinto }
158954139cf3SJoao Pinto 
159054139cf3SJoao Pinto /**
1591ce736788SJoao Pinto  * free_dma_tx_desc_resources - free TX dma desc resources
1592ce736788SJoao Pinto  * @priv: private structure
1593ce736788SJoao Pinto  */
1594ce736788SJoao Pinto static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
1595ce736788SJoao Pinto {
1596ce736788SJoao Pinto 	u32 tx_count = priv->plat->tx_queues_to_use;
159762242260SChristophe Jaillet 	u32 queue;
1598ce736788SJoao Pinto 
1599ce736788SJoao Pinto 	/* Free TX queue resources */
1600ce736788SJoao Pinto 	for (queue = 0; queue < tx_count; queue++) {
1601ce736788SJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1602ce736788SJoao Pinto 
1603ce736788SJoao Pinto 		/* Release the DMA TX socket buffers */
1604ce736788SJoao Pinto 		dma_free_tx_skbufs(priv, queue);
1605ce736788SJoao Pinto 
1606ce736788SJoao Pinto 		/* Free DMA regions of consistent memory previously allocated */
1607ce736788SJoao Pinto 		if (!priv->extend_desc)
1608ce736788SJoao Pinto 			dma_free_coherent(priv->device,
1609ce736788SJoao Pinto 					  DMA_TX_SIZE * sizeof(struct dma_desc),
1610ce736788SJoao Pinto 					  tx_q->dma_tx, tx_q->dma_tx_phy);
1611ce736788SJoao Pinto 		else
1612ce736788SJoao Pinto 			dma_free_coherent(priv->device, DMA_TX_SIZE *
1613ce736788SJoao Pinto 					  sizeof(struct dma_extended_desc),
1614ce736788SJoao Pinto 					  tx_q->dma_etx, tx_q->dma_tx_phy);
1615ce736788SJoao Pinto 
1616ce736788SJoao Pinto 		kfree(tx_q->tx_skbuff_dma);
1617ce736788SJoao Pinto 		kfree(tx_q->tx_skbuff);
1618ce736788SJoao Pinto 	}
1619ce736788SJoao Pinto }
1620ce736788SJoao Pinto 
1621ce736788SJoao Pinto /**
162271fedb01SJoao Pinto  * alloc_dma_rx_desc_resources - alloc RX resources.
1623732fdf0eSGiuseppe CAVALLARO  * @priv: private structure
1624732fdf0eSGiuseppe CAVALLARO  * Description: according to which descriptor can be used (extend or basic)
1625732fdf0eSGiuseppe CAVALLARO  * this function allocates the resources for TX and RX paths. In case of
1626732fdf0eSGiuseppe CAVALLARO  * reception, for example, it pre-allocated the RX socket buffer in order to
1627732fdf0eSGiuseppe CAVALLARO  * allow zero-copy mechanism.
1628732fdf0eSGiuseppe CAVALLARO  */
162971fedb01SJoao Pinto static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
163009f8d696SSrinivas Kandagatla {
163154139cf3SJoao Pinto 	u32 rx_count = priv->plat->rx_queues_to_use;
16325bacd778SLABBE Corentin 	int ret = -ENOMEM;
163354139cf3SJoao Pinto 	u32 queue;
163409f8d696SSrinivas Kandagatla 
163554139cf3SJoao Pinto 	/* RX queues buffers and DMA */
163654139cf3SJoao Pinto 	for (queue = 0; queue < rx_count; queue++) {
163754139cf3SJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
163854139cf3SJoao Pinto 
163954139cf3SJoao Pinto 		rx_q->queue_index = queue;
164054139cf3SJoao Pinto 		rx_q->priv_data = priv;
164154139cf3SJoao Pinto 
164254139cf3SJoao Pinto 		rx_q->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE,
164354139cf3SJoao Pinto 						    sizeof(dma_addr_t),
16445bacd778SLABBE Corentin 						    GFP_KERNEL);
164554139cf3SJoao Pinto 		if (!rx_q->rx_skbuff_dma)
164663c3aa6bSChristophe Jaillet 			goto err_dma;
16475bacd778SLABBE Corentin 
164854139cf3SJoao Pinto 		rx_q->rx_skbuff = kmalloc_array(DMA_RX_SIZE,
164954139cf3SJoao Pinto 						sizeof(struct sk_buff *),
16505bacd778SLABBE Corentin 						GFP_KERNEL);
165154139cf3SJoao Pinto 		if (!rx_q->rx_skbuff)
165254139cf3SJoao Pinto 			goto err_dma;
16535bacd778SLABBE Corentin 
16545bacd778SLABBE Corentin 		if (priv->extend_desc) {
1655750afb08SLuis Chamberlain 			rx_q->dma_erx = dma_alloc_coherent(priv->device,
1656750afb08SLuis Chamberlain 							   DMA_RX_SIZE * sizeof(struct dma_extended_desc),
165754139cf3SJoao Pinto 							   &rx_q->dma_rx_phy,
16585bacd778SLABBE Corentin 							   GFP_KERNEL);
165954139cf3SJoao Pinto 			if (!rx_q->dma_erx)
16605bacd778SLABBE Corentin 				goto err_dma;
16615bacd778SLABBE Corentin 
166271fedb01SJoao Pinto 		} else {
1663750afb08SLuis Chamberlain 			rx_q->dma_rx = dma_alloc_coherent(priv->device,
1664750afb08SLuis Chamberlain 							  DMA_RX_SIZE * sizeof(struct dma_desc),
166554139cf3SJoao Pinto 							  &rx_q->dma_rx_phy,
166671fedb01SJoao Pinto 							  GFP_KERNEL);
166754139cf3SJoao Pinto 			if (!rx_q->dma_rx)
166871fedb01SJoao Pinto 				goto err_dma;
166971fedb01SJoao Pinto 		}
167054139cf3SJoao Pinto 	}
167171fedb01SJoao Pinto 
167271fedb01SJoao Pinto 	return 0;
167371fedb01SJoao Pinto 
167471fedb01SJoao Pinto err_dma:
167554139cf3SJoao Pinto 	free_dma_rx_desc_resources(priv);
167654139cf3SJoao Pinto 
167771fedb01SJoao Pinto 	return ret;
167871fedb01SJoao Pinto }
167971fedb01SJoao Pinto 
168071fedb01SJoao Pinto /**
168171fedb01SJoao Pinto  * alloc_dma_tx_desc_resources - alloc TX resources.
168271fedb01SJoao Pinto  * @priv: private structure
168371fedb01SJoao Pinto  * Description: according to which descriptor can be used (extend or basic)
168471fedb01SJoao Pinto  * this function allocates the resources for TX and RX paths. In case of
168571fedb01SJoao Pinto  * reception, for example, it pre-allocated the RX socket buffer in order to
168671fedb01SJoao Pinto  * allow zero-copy mechanism.
168771fedb01SJoao Pinto  */
168871fedb01SJoao Pinto static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
168971fedb01SJoao Pinto {
1690ce736788SJoao Pinto 	u32 tx_count = priv->plat->tx_queues_to_use;
169171fedb01SJoao Pinto 	int ret = -ENOMEM;
1692ce736788SJoao Pinto 	u32 queue;
169371fedb01SJoao Pinto 
1694ce736788SJoao Pinto 	/* TX queues buffers and DMA */
1695ce736788SJoao Pinto 	for (queue = 0; queue < tx_count; queue++) {
1696ce736788SJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1697ce736788SJoao Pinto 
1698ce736788SJoao Pinto 		tx_q->queue_index = queue;
1699ce736788SJoao Pinto 		tx_q->priv_data = priv;
1700ce736788SJoao Pinto 
1701ce736788SJoao Pinto 		tx_q->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
1702ce736788SJoao Pinto 						    sizeof(*tx_q->tx_skbuff_dma),
170371fedb01SJoao Pinto 						    GFP_KERNEL);
1704ce736788SJoao Pinto 		if (!tx_q->tx_skbuff_dma)
170562242260SChristophe Jaillet 			goto err_dma;
170671fedb01SJoao Pinto 
1707ce736788SJoao Pinto 		tx_q->tx_skbuff = kmalloc_array(DMA_TX_SIZE,
1708ce736788SJoao Pinto 						sizeof(struct sk_buff *),
170971fedb01SJoao Pinto 						GFP_KERNEL);
1710ce736788SJoao Pinto 		if (!tx_q->tx_skbuff)
171162242260SChristophe Jaillet 			goto err_dma;
171271fedb01SJoao Pinto 
171371fedb01SJoao Pinto 		if (priv->extend_desc) {
1714750afb08SLuis Chamberlain 			tx_q->dma_etx = dma_alloc_coherent(priv->device,
1715750afb08SLuis Chamberlain 							   DMA_TX_SIZE * sizeof(struct dma_extended_desc),
1716ce736788SJoao Pinto 							   &tx_q->dma_tx_phy,
17175bacd778SLABBE Corentin 							   GFP_KERNEL);
1718ce736788SJoao Pinto 			if (!tx_q->dma_etx)
171962242260SChristophe Jaillet 				goto err_dma;
17205bacd778SLABBE Corentin 		} else {
1721750afb08SLuis Chamberlain 			tx_q->dma_tx = dma_alloc_coherent(priv->device,
1722750afb08SLuis Chamberlain 							  DMA_TX_SIZE * sizeof(struct dma_desc),
1723ce736788SJoao Pinto 							  &tx_q->dma_tx_phy,
17245bacd778SLABBE Corentin 							  GFP_KERNEL);
1725ce736788SJoao Pinto 			if (!tx_q->dma_tx)
172662242260SChristophe Jaillet 				goto err_dma;
1727ce736788SJoao Pinto 		}
17285bacd778SLABBE Corentin 	}
17295bacd778SLABBE Corentin 
17305bacd778SLABBE Corentin 	return 0;
17315bacd778SLABBE Corentin 
173262242260SChristophe Jaillet err_dma:
1733ce736788SJoao Pinto 	free_dma_tx_desc_resources(priv);
1734ce736788SJoao Pinto 
173509f8d696SSrinivas Kandagatla 	return ret;
17365bacd778SLABBE Corentin }
173709f8d696SSrinivas Kandagatla 
173871fedb01SJoao Pinto /**
173971fedb01SJoao Pinto  * alloc_dma_desc_resources - alloc TX/RX resources.
174071fedb01SJoao Pinto  * @priv: private structure
174171fedb01SJoao Pinto  * Description: according to which descriptor can be used (extend or basic)
174271fedb01SJoao Pinto  * this function allocates the resources for TX and RX paths. In case of
174371fedb01SJoao Pinto  * reception, for example, it pre-allocated the RX socket buffer in order to
174471fedb01SJoao Pinto  * allow zero-copy mechanism.
174571fedb01SJoao Pinto  */
174671fedb01SJoao Pinto static int alloc_dma_desc_resources(struct stmmac_priv *priv)
17475bacd778SLABBE Corentin {
174854139cf3SJoao Pinto 	/* RX Allocation */
174971fedb01SJoao Pinto 	int ret = alloc_dma_rx_desc_resources(priv);
175071fedb01SJoao Pinto 
175171fedb01SJoao Pinto 	if (ret)
175271fedb01SJoao Pinto 		return ret;
175371fedb01SJoao Pinto 
175471fedb01SJoao Pinto 	ret = alloc_dma_tx_desc_resources(priv);
175571fedb01SJoao Pinto 
175671fedb01SJoao Pinto 	return ret;
175771fedb01SJoao Pinto }
175871fedb01SJoao Pinto 
175971fedb01SJoao Pinto /**
176071fedb01SJoao Pinto  * free_dma_desc_resources - free dma desc resources
176171fedb01SJoao Pinto  * @priv: private structure
176271fedb01SJoao Pinto  */
176371fedb01SJoao Pinto static void free_dma_desc_resources(struct stmmac_priv *priv)
176471fedb01SJoao Pinto {
176571fedb01SJoao Pinto 	/* Release the DMA RX socket buffers */
176671fedb01SJoao Pinto 	free_dma_rx_desc_resources(priv);
176771fedb01SJoao Pinto 
176871fedb01SJoao Pinto 	/* Release the DMA TX socket buffers */
176971fedb01SJoao Pinto 	free_dma_tx_desc_resources(priv);
177071fedb01SJoao Pinto }
177171fedb01SJoao Pinto 
177271fedb01SJoao Pinto /**
17739eb12474Sjpinto  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
17749eb12474Sjpinto  *  @priv: driver private structure
17759eb12474Sjpinto  *  Description: It is used for enabling the rx queues in the MAC
17769eb12474Sjpinto  */
17779eb12474Sjpinto static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
17789eb12474Sjpinto {
17794f6046f5SJoao Pinto 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
17804f6046f5SJoao Pinto 	int queue;
17814f6046f5SJoao Pinto 	u8 mode;
17829eb12474Sjpinto 
17834f6046f5SJoao Pinto 	for (queue = 0; queue < rx_queues_count; queue++) {
17844f6046f5SJoao Pinto 		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
1785c10d4c82SJose Abreu 		stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
17864f6046f5SJoao Pinto 	}
17879eb12474Sjpinto }
17889eb12474Sjpinto 
17899eb12474Sjpinto /**
1790ae4f0d46SJoao Pinto  * stmmac_start_rx_dma - start RX DMA channel
1791ae4f0d46SJoao Pinto  * @priv: driver private structure
1792ae4f0d46SJoao Pinto  * @chan: RX channel index
1793ae4f0d46SJoao Pinto  * Description:
1794ae4f0d46SJoao Pinto  * This starts a RX DMA channel
1795ae4f0d46SJoao Pinto  */
1796ae4f0d46SJoao Pinto static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
1797ae4f0d46SJoao Pinto {
1798ae4f0d46SJoao Pinto 	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
1799a4e887faSJose Abreu 	stmmac_start_rx(priv, priv->ioaddr, chan);
1800ae4f0d46SJoao Pinto }
1801ae4f0d46SJoao Pinto 
1802ae4f0d46SJoao Pinto /**
1803ae4f0d46SJoao Pinto  * stmmac_start_tx_dma - start TX DMA channel
1804ae4f0d46SJoao Pinto  * @priv: driver private structure
1805ae4f0d46SJoao Pinto  * @chan: TX channel index
1806ae4f0d46SJoao Pinto  * Description:
1807ae4f0d46SJoao Pinto  * This starts a TX DMA channel
1808ae4f0d46SJoao Pinto  */
1809ae4f0d46SJoao Pinto static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
1810ae4f0d46SJoao Pinto {
1811ae4f0d46SJoao Pinto 	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
1812a4e887faSJose Abreu 	stmmac_start_tx(priv, priv->ioaddr, chan);
1813ae4f0d46SJoao Pinto }
1814ae4f0d46SJoao Pinto 
1815ae4f0d46SJoao Pinto /**
1816ae4f0d46SJoao Pinto  * stmmac_stop_rx_dma - stop RX DMA channel
1817ae4f0d46SJoao Pinto  * @priv: driver private structure
1818ae4f0d46SJoao Pinto  * @chan: RX channel index
1819ae4f0d46SJoao Pinto  * Description:
1820ae4f0d46SJoao Pinto  * This stops a RX DMA channel
1821ae4f0d46SJoao Pinto  */
1822ae4f0d46SJoao Pinto static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
1823ae4f0d46SJoao Pinto {
1824ae4f0d46SJoao Pinto 	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
1825a4e887faSJose Abreu 	stmmac_stop_rx(priv, priv->ioaddr, chan);
1826ae4f0d46SJoao Pinto }
1827ae4f0d46SJoao Pinto 
1828ae4f0d46SJoao Pinto /**
1829ae4f0d46SJoao Pinto  * stmmac_stop_tx_dma - stop TX DMA channel
1830ae4f0d46SJoao Pinto  * @priv: driver private structure
1831ae4f0d46SJoao Pinto  * @chan: TX channel index
1832ae4f0d46SJoao Pinto  * Description:
1833ae4f0d46SJoao Pinto  * This stops a TX DMA channel
1834ae4f0d46SJoao Pinto  */
1835ae4f0d46SJoao Pinto static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
1836ae4f0d46SJoao Pinto {
1837ae4f0d46SJoao Pinto 	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
1838a4e887faSJose Abreu 	stmmac_stop_tx(priv, priv->ioaddr, chan);
1839ae4f0d46SJoao Pinto }
1840ae4f0d46SJoao Pinto 
1841ae4f0d46SJoao Pinto /**
1842ae4f0d46SJoao Pinto  * stmmac_start_all_dma - start all RX and TX DMA channels
1843ae4f0d46SJoao Pinto  * @priv: driver private structure
1844ae4f0d46SJoao Pinto  * Description:
1845ae4f0d46SJoao Pinto  * This starts all the RX and TX DMA channels
1846ae4f0d46SJoao Pinto  */
1847ae4f0d46SJoao Pinto static void stmmac_start_all_dma(struct stmmac_priv *priv)
1848ae4f0d46SJoao Pinto {
1849ae4f0d46SJoao Pinto 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1850ae4f0d46SJoao Pinto 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1851ae4f0d46SJoao Pinto 	u32 chan = 0;
1852ae4f0d46SJoao Pinto 
1853ae4f0d46SJoao Pinto 	for (chan = 0; chan < rx_channels_count; chan++)
1854ae4f0d46SJoao Pinto 		stmmac_start_rx_dma(priv, chan);
1855ae4f0d46SJoao Pinto 
1856ae4f0d46SJoao Pinto 	for (chan = 0; chan < tx_channels_count; chan++)
1857ae4f0d46SJoao Pinto 		stmmac_start_tx_dma(priv, chan);
1858ae4f0d46SJoao Pinto }
1859ae4f0d46SJoao Pinto 
1860ae4f0d46SJoao Pinto /**
1861ae4f0d46SJoao Pinto  * stmmac_stop_all_dma - stop all RX and TX DMA channels
1862ae4f0d46SJoao Pinto  * @priv: driver private structure
1863ae4f0d46SJoao Pinto  * Description:
1864ae4f0d46SJoao Pinto  * This stops the RX and TX DMA channels
1865ae4f0d46SJoao Pinto  */
1866ae4f0d46SJoao Pinto static void stmmac_stop_all_dma(struct stmmac_priv *priv)
1867ae4f0d46SJoao Pinto {
1868ae4f0d46SJoao Pinto 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1869ae4f0d46SJoao Pinto 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1870ae4f0d46SJoao Pinto 	u32 chan = 0;
1871ae4f0d46SJoao Pinto 
1872ae4f0d46SJoao Pinto 	for (chan = 0; chan < rx_channels_count; chan++)
1873ae4f0d46SJoao Pinto 		stmmac_stop_rx_dma(priv, chan);
1874ae4f0d46SJoao Pinto 
1875ae4f0d46SJoao Pinto 	for (chan = 0; chan < tx_channels_count; chan++)
1876ae4f0d46SJoao Pinto 		stmmac_stop_tx_dma(priv, chan);
1877ae4f0d46SJoao Pinto }
1878ae4f0d46SJoao Pinto 
1879ae4f0d46SJoao Pinto /**
18807ac6653aSJeff Kirsher  *  stmmac_dma_operation_mode - HW DMA operation mode
188132ceabcaSGiuseppe CAVALLARO  *  @priv: driver private structure
1882732fdf0eSGiuseppe CAVALLARO  *  Description: it is used for configuring the DMA operation mode register in
1883732fdf0eSGiuseppe CAVALLARO  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
18847ac6653aSJeff Kirsher  */
18857ac6653aSJeff Kirsher static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
18867ac6653aSJeff Kirsher {
18876deee222SJoao Pinto 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
18886deee222SJoao Pinto 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1889f88203a2SVince Bridgers 	int rxfifosz = priv->plat->rx_fifo_size;
189052a76235SJose Abreu 	int txfifosz = priv->plat->tx_fifo_size;
18916deee222SJoao Pinto 	u32 txmode = 0;
18926deee222SJoao Pinto 	u32 rxmode = 0;
18936deee222SJoao Pinto 	u32 chan = 0;
1894a0daae13SJose Abreu 	u8 qmode = 0;
1895f88203a2SVince Bridgers 
189611fbf811SThierry Reding 	if (rxfifosz == 0)
189711fbf811SThierry Reding 		rxfifosz = priv->dma_cap.rx_fifo_size;
189852a76235SJose Abreu 	if (txfifosz == 0)
189952a76235SJose Abreu 		txfifosz = priv->dma_cap.tx_fifo_size;
190052a76235SJose Abreu 
190152a76235SJose Abreu 	/* Adjust for real per queue fifo size */
190252a76235SJose Abreu 	rxfifosz /= rx_channels_count;
190352a76235SJose Abreu 	txfifosz /= tx_channels_count;
190411fbf811SThierry Reding 
19056deee222SJoao Pinto 	if (priv->plat->force_thresh_dma_mode) {
19066deee222SJoao Pinto 		txmode = tc;
19076deee222SJoao Pinto 		rxmode = tc;
19086deee222SJoao Pinto 	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
19097ac6653aSJeff Kirsher 		/*
19107ac6653aSJeff Kirsher 		 * In case of GMAC, SF mode can be enabled
19117ac6653aSJeff Kirsher 		 * to perform the TX COE in HW. This depends on:
19127ac6653aSJeff Kirsher 		 * 1) TX COE if actually supported
19137ac6653aSJeff Kirsher 		 * 2) There is no bugged Jumbo frame support
19147ac6653aSJeff Kirsher 		 *    that needs to not insert csum in the TDES.
19157ac6653aSJeff Kirsher 		 */
19166deee222SJoao Pinto 		txmode = SF_DMA_MODE;
19176deee222SJoao Pinto 		rxmode = SF_DMA_MODE;
1918b2dec116SSonic Zhang 		priv->xstats.threshold = SF_DMA_MODE;
19196deee222SJoao Pinto 	} else {
19206deee222SJoao Pinto 		txmode = tc;
19216deee222SJoao Pinto 		rxmode = SF_DMA_MODE;
19226deee222SJoao Pinto 	}
19236deee222SJoao Pinto 
19246deee222SJoao Pinto 	/* configure all channels */
1925a0daae13SJose Abreu 	for (chan = 0; chan < rx_channels_count; chan++) {
1926a0daae13SJose Abreu 		qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
19276deee222SJoao Pinto 
1928a4e887faSJose Abreu 		stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
1929a0daae13SJose Abreu 				rxfifosz, qmode);
19304205c88eSJose Abreu 		stmmac_set_dma_bfsize(priv, priv->ioaddr, priv->dma_buf_sz,
19314205c88eSJose Abreu 				chan);
1932a0daae13SJose Abreu 	}
1933a0daae13SJose Abreu 
1934a0daae13SJose Abreu 	for (chan = 0; chan < tx_channels_count; chan++) {
1935a0daae13SJose Abreu 		qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
1936a0daae13SJose Abreu 
1937a4e887faSJose Abreu 		stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
1938a0daae13SJose Abreu 				txfifosz, qmode);
1939a0daae13SJose Abreu 	}
19407ac6653aSJeff Kirsher }
19417ac6653aSJeff Kirsher 
19427ac6653aSJeff Kirsher /**
1943732fdf0eSGiuseppe CAVALLARO  * stmmac_tx_clean - to manage the transmission completion
194432ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
1945ce736788SJoao Pinto  * @queue: TX queue index
1946732fdf0eSGiuseppe CAVALLARO  * Description: it reclaims the transmit resources after transmission completes.
19477ac6653aSJeff Kirsher  */
19488fce3331SJose Abreu static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
19497ac6653aSJeff Kirsher {
1950ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
195138979574SBeniamino Galvani 	unsigned int bytes_compl = 0, pkts_compl = 0;
19528fce3331SJose Abreu 	unsigned int entry, count = 0;
19537ac6653aSJeff Kirsher 
19548fce3331SJose Abreu 	__netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
1955a9097a96SGiuseppe CAVALLARO 
19569125cdd1SGiuseppe CAVALLARO 	priv->xstats.tx_clean++;
19579125cdd1SGiuseppe CAVALLARO 
19588d5f4b07SBernd Edlinger 	entry = tx_q->dirty_tx;
19598fce3331SJose Abreu 	while ((entry != tx_q->cur_tx) && (count < budget)) {
1960ce736788SJoao Pinto 		struct sk_buff *skb = tx_q->tx_skbuff[entry];
1961c24602efSGiuseppe CAVALLARO 		struct dma_desc *p;
1962c363b658SFabrice Gasnier 		int status;
1963c24602efSGiuseppe CAVALLARO 
1964c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
1965ce736788SJoao Pinto 			p = (struct dma_desc *)(tx_q->dma_etx + entry);
1966c24602efSGiuseppe CAVALLARO 		else
1967ce736788SJoao Pinto 			p = tx_q->dma_tx + entry;
19687ac6653aSJeff Kirsher 
196942de047dSJose Abreu 		status = stmmac_tx_status(priv, &priv->dev->stats,
197042de047dSJose Abreu 				&priv->xstats, p, priv->ioaddr);
1971c363b658SFabrice Gasnier 		/* Check if the descriptor is owned by the DMA */
1972c363b658SFabrice Gasnier 		if (unlikely(status & tx_dma_own))
1973c363b658SFabrice Gasnier 			break;
1974c363b658SFabrice Gasnier 
19758fce3331SJose Abreu 		count++;
19768fce3331SJose Abreu 
1977a6b25da5SNiklas Cassel 		/* Make sure descriptor fields are read after reading
1978a6b25da5SNiklas Cassel 		 * the own bit.
1979a6b25da5SNiklas Cassel 		 */
1980a6b25da5SNiklas Cassel 		dma_rmb();
1981a6b25da5SNiklas Cassel 
1982c363b658SFabrice Gasnier 		/* Just consider the last segment and ...*/
1983c363b658SFabrice Gasnier 		if (likely(!(status & tx_not_ls))) {
1984c363b658SFabrice Gasnier 			/* ... verify the status error condition */
1985c363b658SFabrice Gasnier 			if (unlikely(status & tx_err)) {
1986c363b658SFabrice Gasnier 				priv->dev->stats.tx_errors++;
1987c363b658SFabrice Gasnier 			} else {
19887ac6653aSJeff Kirsher 				priv->dev->stats.tx_packets++;
19897ac6653aSJeff Kirsher 				priv->xstats.tx_pkt_n++;
1990c363b658SFabrice Gasnier 			}
1991ba1ffd74SGiuseppe CAVALLARO 			stmmac_get_tx_hwtstamp(priv, p, skb);
19927ac6653aSJeff Kirsher 		}
19937ac6653aSJeff Kirsher 
1994ce736788SJoao Pinto 		if (likely(tx_q->tx_skbuff_dma[entry].buf)) {
1995ce736788SJoao Pinto 			if (tx_q->tx_skbuff_dma[entry].map_as_page)
1996362b37beSGiuseppe CAVALLARO 				dma_unmap_page(priv->device,
1997ce736788SJoao Pinto 					       tx_q->tx_skbuff_dma[entry].buf,
1998ce736788SJoao Pinto 					       tx_q->tx_skbuff_dma[entry].len,
19997ac6653aSJeff Kirsher 					       DMA_TO_DEVICE);
2000362b37beSGiuseppe CAVALLARO 			else
2001362b37beSGiuseppe CAVALLARO 				dma_unmap_single(priv->device,
2002ce736788SJoao Pinto 						 tx_q->tx_skbuff_dma[entry].buf,
2003ce736788SJoao Pinto 						 tx_q->tx_skbuff_dma[entry].len,
2004362b37beSGiuseppe CAVALLARO 						 DMA_TO_DEVICE);
2005ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[entry].buf = 0;
2006ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[entry].len = 0;
2007ce736788SJoao Pinto 			tx_q->tx_skbuff_dma[entry].map_as_page = false;
2008cf32deecSRayagond Kokatanur 		}
2009f748be53SAlexandre TORGUE 
20102c520b1cSJose Abreu 		stmmac_clean_desc3(priv, tx_q, p);
2011f748be53SAlexandre TORGUE 
2012ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].last_segment = false;
2013ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
20147ac6653aSJeff Kirsher 
20157ac6653aSJeff Kirsher 		if (likely(skb != NULL)) {
201638979574SBeniamino Galvani 			pkts_compl++;
201738979574SBeniamino Galvani 			bytes_compl += skb->len;
20187c565c33SEric W. Biederman 			dev_consume_skb_any(skb);
2019ce736788SJoao Pinto 			tx_q->tx_skbuff[entry] = NULL;
20207ac6653aSJeff Kirsher 		}
20217ac6653aSJeff Kirsher 
202242de047dSJose Abreu 		stmmac_release_tx_desc(priv, p, priv->mode);
20237ac6653aSJeff Kirsher 
2024e3ad57c9SGiuseppe Cavallaro 		entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
20257ac6653aSJeff Kirsher 	}
2026ce736788SJoao Pinto 	tx_q->dirty_tx = entry;
202738979574SBeniamino Galvani 
2028c22a3f48SJoao Pinto 	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2029c22a3f48SJoao Pinto 				  pkts_compl, bytes_compl);
203038979574SBeniamino Galvani 
2031c22a3f48SJoao Pinto 	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2032c22a3f48SJoao Pinto 								queue))) &&
2033c22a3f48SJoao Pinto 	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) {
2034c22a3f48SJoao Pinto 
2035b3e51069SLABBE Corentin 		netif_dbg(priv, tx_done, priv->dev,
2036b3e51069SLABBE Corentin 			  "%s: restart transmit\n", __func__);
2037c22a3f48SJoao Pinto 		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
20387ac6653aSJeff Kirsher 	}
2039d765955dSGiuseppe CAVALLARO 
2040d765955dSGiuseppe CAVALLARO 	if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
2041d765955dSGiuseppe CAVALLARO 		stmmac_enable_eee_mode(priv);
2042f5351ef7SGiuseppe CAVALLARO 		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
2043d765955dSGiuseppe CAVALLARO 	}
20448fce3331SJose Abreu 
20454ccb4585SJose Abreu 	/* We still have pending packets, let's call for a new scheduling */
20464ccb4585SJose Abreu 	if (tx_q->dirty_tx != tx_q->cur_tx)
20474ccb4585SJose Abreu 		mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(10));
20484ccb4585SJose Abreu 
20498fce3331SJose Abreu 	__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
20508fce3331SJose Abreu 
20518fce3331SJose Abreu 	return count;
20527ac6653aSJeff Kirsher }
20537ac6653aSJeff Kirsher 
20547ac6653aSJeff Kirsher /**
2055732fdf0eSGiuseppe CAVALLARO  * stmmac_tx_err - to manage the tx error
205632ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
20575bacd778SLABBE Corentin  * @chan: channel index
20587ac6653aSJeff Kirsher  * Description: it cleans the descriptors and restarts the transmission
2059732fdf0eSGiuseppe CAVALLARO  * in case of transmission errors.
20607ac6653aSJeff Kirsher  */
20615bacd778SLABBE Corentin static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
20627ac6653aSJeff Kirsher {
2063ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2064c24602efSGiuseppe CAVALLARO 	int i;
2065ce736788SJoao Pinto 
2066c22a3f48SJoao Pinto 	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
20677ac6653aSJeff Kirsher 
2068ae4f0d46SJoao Pinto 	stmmac_stop_tx_dma(priv, chan);
2069ce736788SJoao Pinto 	dma_free_tx_skbufs(priv, chan);
2070e3ad57c9SGiuseppe Cavallaro 	for (i = 0; i < DMA_TX_SIZE; i++)
2071c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
207242de047dSJose Abreu 			stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
207342de047dSJose Abreu 					priv->mode, (i == DMA_TX_SIZE - 1));
2074c24602efSGiuseppe CAVALLARO 		else
207542de047dSJose Abreu 			stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
207642de047dSJose Abreu 					priv->mode, (i == DMA_TX_SIZE - 1));
2077ce736788SJoao Pinto 	tx_q->dirty_tx = 0;
2078ce736788SJoao Pinto 	tx_q->cur_tx = 0;
20798d212a9eSNiklas Cassel 	tx_q->mss = 0;
2080c22a3f48SJoao Pinto 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
2081ae4f0d46SJoao Pinto 	stmmac_start_tx_dma(priv, chan);
20827ac6653aSJeff Kirsher 
20837ac6653aSJeff Kirsher 	priv->dev->stats.tx_errors++;
2084c22a3f48SJoao Pinto 	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
20857ac6653aSJeff Kirsher }
20867ac6653aSJeff Kirsher 
208732ceabcaSGiuseppe CAVALLARO /**
20886deee222SJoao Pinto  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
20896deee222SJoao Pinto  *  @priv: driver private structure
20906deee222SJoao Pinto  *  @txmode: TX operating mode
20916deee222SJoao Pinto  *  @rxmode: RX operating mode
20926deee222SJoao Pinto  *  @chan: channel index
20936deee222SJoao Pinto  *  Description: it is used for configuring of the DMA operation mode in
20946deee222SJoao Pinto  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
20956deee222SJoao Pinto  *  mode.
20966deee222SJoao Pinto  */
20976deee222SJoao Pinto static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
20986deee222SJoao Pinto 					  u32 rxmode, u32 chan)
20996deee222SJoao Pinto {
2100a0daae13SJose Abreu 	u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2101a0daae13SJose Abreu 	u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
210252a76235SJose Abreu 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
210352a76235SJose Abreu 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
21046deee222SJoao Pinto 	int rxfifosz = priv->plat->rx_fifo_size;
210552a76235SJose Abreu 	int txfifosz = priv->plat->tx_fifo_size;
21066deee222SJoao Pinto 
21076deee222SJoao Pinto 	if (rxfifosz == 0)
21086deee222SJoao Pinto 		rxfifosz = priv->dma_cap.rx_fifo_size;
210952a76235SJose Abreu 	if (txfifosz == 0)
211052a76235SJose Abreu 		txfifosz = priv->dma_cap.tx_fifo_size;
211152a76235SJose Abreu 
211252a76235SJose Abreu 	/* Adjust for real per queue fifo size */
211352a76235SJose Abreu 	rxfifosz /= rx_channels_count;
211452a76235SJose Abreu 	txfifosz /= tx_channels_count;
21156deee222SJoao Pinto 
2116ab0204e3SJose Abreu 	stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2117ab0204e3SJose Abreu 	stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
21186deee222SJoao Pinto }
21196deee222SJoao Pinto 
21208bf993a5SJose Abreu static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
21218bf993a5SJose Abreu {
212263a550fcSJose Abreu 	int ret;
21238bf993a5SJose Abreu 
2124c10d4c82SJose Abreu 	ret = stmmac_safety_feat_irq_status(priv, priv->dev,
21258bf993a5SJose Abreu 			priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2126c10d4c82SJose Abreu 	if (ret && (ret != -EINVAL)) {
21278bf993a5SJose Abreu 		stmmac_global_err(priv);
2128c10d4c82SJose Abreu 		return true;
2129c10d4c82SJose Abreu 	}
2130c10d4c82SJose Abreu 
2131c10d4c82SJose Abreu 	return false;
21328bf993a5SJose Abreu }
21338bf993a5SJose Abreu 
21348fce3331SJose Abreu static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan)
21358fce3331SJose Abreu {
21368fce3331SJose Abreu 	int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
21378fce3331SJose Abreu 						 &priv->xstats, chan);
21388fce3331SJose Abreu 	struct stmmac_channel *ch = &priv->channel[chan];
21398fce3331SJose Abreu 
2140a976ca79SJose Abreu 	if (status)
2141a976ca79SJose Abreu 		status |= handle_rx | handle_tx;
2142a976ca79SJose Abreu 
21434ccb4585SJose Abreu 	if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
21448fce3331SJose Abreu 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
21454ccb4585SJose Abreu 		napi_schedule_irqoff(&ch->rx_napi);
21464ccb4585SJose Abreu 	}
21474ccb4585SJose Abreu 
21484ccb4585SJose Abreu 	if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
21494ccb4585SJose Abreu 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
21504ccb4585SJose Abreu 		napi_schedule_irqoff(&ch->tx_napi);
21518fce3331SJose Abreu 	}
21528fce3331SJose Abreu 
21538fce3331SJose Abreu 	return status;
21548fce3331SJose Abreu }
21558fce3331SJose Abreu 
21566deee222SJoao Pinto /**
2157732fdf0eSGiuseppe CAVALLARO  * stmmac_dma_interrupt - DMA ISR
215832ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
215932ceabcaSGiuseppe CAVALLARO  * Description: this is the DMA ISR. It is called by the main ISR.
2160732fdf0eSGiuseppe CAVALLARO  * It calls the dwmac dma routine and schedule poll method in case of some
2161732fdf0eSGiuseppe CAVALLARO  * work can be done.
216232ceabcaSGiuseppe CAVALLARO  */
21637ac6653aSJeff Kirsher static void stmmac_dma_interrupt(struct stmmac_priv *priv)
21647ac6653aSJeff Kirsher {
2165d62a107aSJoao Pinto 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
21665a6a0445SNiklas Cassel 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
21675a6a0445SNiklas Cassel 	u32 channels_to_check = tx_channel_count > rx_channel_count ?
21685a6a0445SNiklas Cassel 				tx_channel_count : rx_channel_count;
2169d62a107aSJoao Pinto 	u32 chan;
21708ac60ffbSKees Cook 	int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
21718ac60ffbSKees Cook 
21728ac60ffbSKees Cook 	/* Make sure we never check beyond our status buffer. */
21738ac60ffbSKees Cook 	if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
21748ac60ffbSKees Cook 		channels_to_check = ARRAY_SIZE(status);
217568e5cfafSJoao Pinto 
21765a6a0445SNiklas Cassel 	for (chan = 0; chan < channels_to_check; chan++)
21778fce3331SJose Abreu 		status[chan] = stmmac_napi_check(priv, chan);
2178d62a107aSJoao Pinto 
21795a6a0445SNiklas Cassel 	for (chan = 0; chan < tx_channel_count; chan++) {
21805a6a0445SNiklas Cassel 		if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
21817ac6653aSJeff Kirsher 			/* Try to bump up the dma threshold on this failure */
2182b2dec116SSonic Zhang 			if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
2183b2dec116SSonic Zhang 			    (tc <= 256)) {
21847ac6653aSJeff Kirsher 				tc += 64;
2185c405abe2SSonic Zhang 				if (priv->plat->force_thresh_dma_mode)
2186d62a107aSJoao Pinto 					stmmac_set_dma_operation_mode(priv,
2187d62a107aSJoao Pinto 								      tc,
2188d62a107aSJoao Pinto 								      tc,
2189d62a107aSJoao Pinto 								      chan);
2190c405abe2SSonic Zhang 				else
2191d62a107aSJoao Pinto 					stmmac_set_dma_operation_mode(priv,
2192d62a107aSJoao Pinto 								    tc,
2193d62a107aSJoao Pinto 								    SF_DMA_MODE,
2194d62a107aSJoao Pinto 								    chan);
21957ac6653aSJeff Kirsher 				priv->xstats.threshold = tc;
21967ac6653aSJeff Kirsher 			}
21975a6a0445SNiklas Cassel 		} else if (unlikely(status[chan] == tx_hard_error)) {
21984e593262SJoao Pinto 			stmmac_tx_err(priv, chan);
21997ac6653aSJeff Kirsher 		}
2200d62a107aSJoao Pinto 	}
2201d62a107aSJoao Pinto }
22027ac6653aSJeff Kirsher 
220332ceabcaSGiuseppe CAVALLARO /**
220432ceabcaSGiuseppe CAVALLARO  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
220532ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
220632ceabcaSGiuseppe CAVALLARO  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
220732ceabcaSGiuseppe CAVALLARO  */
22081c901a46SGiuseppe CAVALLARO static void stmmac_mmc_setup(struct stmmac_priv *priv)
22091c901a46SGiuseppe CAVALLARO {
22101c901a46SGiuseppe CAVALLARO 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
22111c901a46SGiuseppe CAVALLARO 			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
22121c901a46SGiuseppe CAVALLARO 
22133b1dd2c5SJose Abreu 	stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
22144f795b25SGiuseppe CAVALLARO 
22154f795b25SGiuseppe CAVALLARO 	if (priv->dma_cap.rmon) {
22163b1dd2c5SJose Abreu 		stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
22171c901a46SGiuseppe CAVALLARO 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
22184f795b25SGiuseppe CAVALLARO 	} else
221938ddc59dSLABBE Corentin 		netdev_info(priv->dev, "No MAC Management Counters available\n");
22201c901a46SGiuseppe CAVALLARO }
22211c901a46SGiuseppe CAVALLARO 
2222732fdf0eSGiuseppe CAVALLARO /**
2223732fdf0eSGiuseppe CAVALLARO  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
222432ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
222519e30c14SGiuseppe CAVALLARO  * Description:
222619e30c14SGiuseppe CAVALLARO  *  new GMAC chip generations have a new register to indicate the
2227e7434821SGiuseppe CAVALLARO  *  presence of the optional feature/functions.
222819e30c14SGiuseppe CAVALLARO  *  This can be also used to override the value passed through the
222919e30c14SGiuseppe CAVALLARO  *  platform and necessary for old MAC10/100 and GMAC chips.
2230e7434821SGiuseppe CAVALLARO  */
2231e7434821SGiuseppe CAVALLARO static int stmmac_get_hw_features(struct stmmac_priv *priv)
2232e7434821SGiuseppe CAVALLARO {
2233a4e887faSJose Abreu 	return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2234e7434821SGiuseppe CAVALLARO }
2235e7434821SGiuseppe CAVALLARO 
223632ceabcaSGiuseppe CAVALLARO /**
2237732fdf0eSGiuseppe CAVALLARO  * stmmac_check_ether_addr - check if the MAC addr is valid
223832ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
223932ceabcaSGiuseppe CAVALLARO  * Description:
224032ceabcaSGiuseppe CAVALLARO  * it is to verify if the MAC address is valid, in case of failures it
224132ceabcaSGiuseppe CAVALLARO  * generates a random MAC address
224232ceabcaSGiuseppe CAVALLARO  */
2243bfab27a1SGiuseppe CAVALLARO static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2244bfab27a1SGiuseppe CAVALLARO {
2245bfab27a1SGiuseppe CAVALLARO 	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2246c10d4c82SJose Abreu 		stmmac_get_umac_addr(priv, priv->hw, priv->dev->dev_addr, 0);
2247bfab27a1SGiuseppe CAVALLARO 		if (!is_valid_ether_addr(priv->dev->dev_addr))
2248f2cedb63SDanny Kukawka 			eth_hw_addr_random(priv->dev);
2249af649352SJisheng Zhang 		dev_info(priv->device, "device MAC address %pM\n",
2250bfab27a1SGiuseppe CAVALLARO 			 priv->dev->dev_addr);
2251bfab27a1SGiuseppe CAVALLARO 	}
2252c88460b7SHans de Goede }
2253bfab27a1SGiuseppe CAVALLARO 
225432ceabcaSGiuseppe CAVALLARO /**
2255732fdf0eSGiuseppe CAVALLARO  * stmmac_init_dma_engine - DMA init.
225632ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
225732ceabcaSGiuseppe CAVALLARO  * Description:
225832ceabcaSGiuseppe CAVALLARO  * It inits the DMA invoking the specific MAC/GMAC callback.
225932ceabcaSGiuseppe CAVALLARO  * Some DMA parameters can be passed from the platform;
226032ceabcaSGiuseppe CAVALLARO  * in case of these are not passed a default is kept for the MAC or GMAC.
226132ceabcaSGiuseppe CAVALLARO  */
22620f1f88a8SGiuseppe CAVALLARO static int stmmac_init_dma_engine(struct stmmac_priv *priv)
22630f1f88a8SGiuseppe CAVALLARO {
226447f2a9ceSJoao Pinto 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
226547f2a9ceSJoao Pinto 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
226624aaed0cSJose Abreu 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
226754139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q;
2268ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q;
226947f2a9ceSJoao Pinto 	u32 chan = 0;
2270c24602efSGiuseppe CAVALLARO 	int atds = 0;
2271495db273SGiuseppe Cavallaro 	int ret = 0;
22720f1f88a8SGiuseppe CAVALLARO 
2273a332e2faSNiklas Cassel 	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2274a332e2faSNiklas Cassel 		dev_err(priv->device, "Invalid DMA configuration\n");
227589ab75bfSNiklas Cassel 		return -EINVAL;
22760f1f88a8SGiuseppe CAVALLARO 	}
22770f1f88a8SGiuseppe CAVALLARO 
2278c24602efSGiuseppe CAVALLARO 	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2279c24602efSGiuseppe CAVALLARO 		atds = 1;
2280c24602efSGiuseppe CAVALLARO 
2281a4e887faSJose Abreu 	ret = stmmac_reset(priv, priv->ioaddr);
2282495db273SGiuseppe Cavallaro 	if (ret) {
2283495db273SGiuseppe Cavallaro 		dev_err(priv->device, "Failed to reset the dma\n");
2284495db273SGiuseppe Cavallaro 		return ret;
2285495db273SGiuseppe Cavallaro 	}
2286495db273SGiuseppe Cavallaro 
22877d9e6c5aSJose Abreu 	/* DMA Configuration */
22887d9e6c5aSJose Abreu 	stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
22897d9e6c5aSJose Abreu 
22907d9e6c5aSJose Abreu 	if (priv->plat->axi)
22917d9e6c5aSJose Abreu 		stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
22927d9e6c5aSJose Abreu 
2293af8f3fb7SWeifeng Voon 	/* DMA CSR Channel configuration */
2294af8f3fb7SWeifeng Voon 	for (chan = 0; chan < dma_csr_ch; chan++)
2295af8f3fb7SWeifeng Voon 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
2296af8f3fb7SWeifeng Voon 
229747f2a9ceSJoao Pinto 	/* DMA RX Channel Configuration */
229847f2a9ceSJoao Pinto 	for (chan = 0; chan < rx_channels_count; chan++) {
229954139cf3SJoao Pinto 		rx_q = &priv->rx_queue[chan];
230054139cf3SJoao Pinto 
230124aaed0cSJose Abreu 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
230224aaed0cSJose Abreu 				    rx_q->dma_rx_phy, chan);
230347f2a9ceSJoao Pinto 
230454139cf3SJoao Pinto 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2305f748be53SAlexandre TORGUE 			    (DMA_RX_SIZE * sizeof(struct dma_desc));
2306a4e887faSJose Abreu 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
2307a4e887faSJose Abreu 				       rx_q->rx_tail_addr, chan);
230847f2a9ceSJoao Pinto 	}
230947f2a9ceSJoao Pinto 
231047f2a9ceSJoao Pinto 	/* DMA TX Channel Configuration */
231147f2a9ceSJoao Pinto 	for (chan = 0; chan < tx_channels_count; chan++) {
2312ce736788SJoao Pinto 		tx_q = &priv->tx_queue[chan];
2313ce736788SJoao Pinto 
231424aaed0cSJose Abreu 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
231524aaed0cSJose Abreu 				    tx_q->dma_tx_phy, chan);
2316f748be53SAlexandre TORGUE 
23170431100bSJose Abreu 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
2318a4e887faSJose Abreu 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
2319a4e887faSJose Abreu 				       tx_q->tx_tail_addr, chan);
232047f2a9ceSJoao Pinto 	}
232124aaed0cSJose Abreu 
2322495db273SGiuseppe Cavallaro 	return ret;
23230f1f88a8SGiuseppe CAVALLARO }
23240f1f88a8SGiuseppe CAVALLARO 
23258fce3331SJose Abreu static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
23268fce3331SJose Abreu {
23278fce3331SJose Abreu 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
23288fce3331SJose Abreu 
23298fce3331SJose Abreu 	mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer));
23308fce3331SJose Abreu }
23318fce3331SJose Abreu 
2332bfab27a1SGiuseppe CAVALLARO /**
2333732fdf0eSGiuseppe CAVALLARO  * stmmac_tx_timer - mitigation sw timer for tx.
23349125cdd1SGiuseppe CAVALLARO  * @data: data pointer
23359125cdd1SGiuseppe CAVALLARO  * Description:
23369125cdd1SGiuseppe CAVALLARO  * This is the timer handler to directly invoke the stmmac_tx_clean.
23379125cdd1SGiuseppe CAVALLARO  */
2338e99e88a9SKees Cook static void stmmac_tx_timer(struct timer_list *t)
23399125cdd1SGiuseppe CAVALLARO {
23408fce3331SJose Abreu 	struct stmmac_tx_queue *tx_q = from_timer(tx_q, t, txtimer);
23418fce3331SJose Abreu 	struct stmmac_priv *priv = tx_q->priv_data;
23428fce3331SJose Abreu 	struct stmmac_channel *ch;
23439125cdd1SGiuseppe CAVALLARO 
23448fce3331SJose Abreu 	ch = &priv->channel[tx_q->queue_index];
23458fce3331SJose Abreu 
23464ccb4585SJose Abreu 	/*
23474ccb4585SJose Abreu 	 * If NAPI is already running we can miss some events. Let's rearm
23484ccb4585SJose Abreu 	 * the timer and try again.
23494ccb4585SJose Abreu 	 */
23504ccb4585SJose Abreu 	if (likely(napi_schedule_prep(&ch->tx_napi)))
23514ccb4585SJose Abreu 		__napi_schedule(&ch->tx_napi);
23524ccb4585SJose Abreu 	else
23534ccb4585SJose Abreu 		mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(10));
23549125cdd1SGiuseppe CAVALLARO }
23559125cdd1SGiuseppe CAVALLARO 
23569125cdd1SGiuseppe CAVALLARO /**
2357732fdf0eSGiuseppe CAVALLARO  * stmmac_init_tx_coalesce - init tx mitigation options.
235832ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
23599125cdd1SGiuseppe CAVALLARO  * Description:
23609125cdd1SGiuseppe CAVALLARO  * This inits the transmit coalesce parameters: i.e. timer rate,
23619125cdd1SGiuseppe CAVALLARO  * timer handler and default threshold used for enabling the
23629125cdd1SGiuseppe CAVALLARO  * interrupt on completion bit.
23639125cdd1SGiuseppe CAVALLARO  */
23649125cdd1SGiuseppe CAVALLARO static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
23659125cdd1SGiuseppe CAVALLARO {
23668fce3331SJose Abreu 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
23678fce3331SJose Abreu 	u32 chan;
23688fce3331SJose Abreu 
23699125cdd1SGiuseppe CAVALLARO 	priv->tx_coal_frames = STMMAC_TX_FRAMES;
23709125cdd1SGiuseppe CAVALLARO 	priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
23718fce3331SJose Abreu 
23728fce3331SJose Abreu 	for (chan = 0; chan < tx_channel_count; chan++) {
23738fce3331SJose Abreu 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
23748fce3331SJose Abreu 
23758fce3331SJose Abreu 		timer_setup(&tx_q->txtimer, stmmac_tx_timer, 0);
23768fce3331SJose Abreu 	}
23779125cdd1SGiuseppe CAVALLARO }
23789125cdd1SGiuseppe CAVALLARO 
23794854ab99SJoao Pinto static void stmmac_set_rings_length(struct stmmac_priv *priv)
23804854ab99SJoao Pinto {
23814854ab99SJoao Pinto 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
23824854ab99SJoao Pinto 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
23834854ab99SJoao Pinto 	u32 chan;
23844854ab99SJoao Pinto 
23854854ab99SJoao Pinto 	/* set TX ring length */
23864854ab99SJoao Pinto 	for (chan = 0; chan < tx_channels_count; chan++)
2387a4e887faSJose Abreu 		stmmac_set_tx_ring_len(priv, priv->ioaddr,
23884854ab99SJoao Pinto 				(DMA_TX_SIZE - 1), chan);
23894854ab99SJoao Pinto 
23904854ab99SJoao Pinto 	/* set RX ring length */
23914854ab99SJoao Pinto 	for (chan = 0; chan < rx_channels_count; chan++)
2392a4e887faSJose Abreu 		stmmac_set_rx_ring_len(priv, priv->ioaddr,
23934854ab99SJoao Pinto 				(DMA_RX_SIZE - 1), chan);
23944854ab99SJoao Pinto }
23954854ab99SJoao Pinto 
23969125cdd1SGiuseppe CAVALLARO /**
23976a3a7193SJoao Pinto  *  stmmac_set_tx_queue_weight - Set TX queue weight
23986a3a7193SJoao Pinto  *  @priv: driver private structure
23996a3a7193SJoao Pinto  *  Description: It is used for setting TX queues weight
24006a3a7193SJoao Pinto  */
24016a3a7193SJoao Pinto static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
24026a3a7193SJoao Pinto {
24036a3a7193SJoao Pinto 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
24046a3a7193SJoao Pinto 	u32 weight;
24056a3a7193SJoao Pinto 	u32 queue;
24066a3a7193SJoao Pinto 
24076a3a7193SJoao Pinto 	for (queue = 0; queue < tx_queues_count; queue++) {
24086a3a7193SJoao Pinto 		weight = priv->plat->tx_queues_cfg[queue].weight;
2409c10d4c82SJose Abreu 		stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
24106a3a7193SJoao Pinto 	}
24116a3a7193SJoao Pinto }
24126a3a7193SJoao Pinto 
24136a3a7193SJoao Pinto /**
241419d91873SJoao Pinto  *  stmmac_configure_cbs - Configure CBS in TX queue
241519d91873SJoao Pinto  *  @priv: driver private structure
241619d91873SJoao Pinto  *  Description: It is used for configuring CBS in AVB TX queues
241719d91873SJoao Pinto  */
241819d91873SJoao Pinto static void stmmac_configure_cbs(struct stmmac_priv *priv)
241919d91873SJoao Pinto {
242019d91873SJoao Pinto 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
242119d91873SJoao Pinto 	u32 mode_to_use;
242219d91873SJoao Pinto 	u32 queue;
242319d91873SJoao Pinto 
242444781fefSJoao Pinto 	/* queue 0 is reserved for legacy traffic */
242544781fefSJoao Pinto 	for (queue = 1; queue < tx_queues_count; queue++) {
242619d91873SJoao Pinto 		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
242719d91873SJoao Pinto 		if (mode_to_use == MTL_QUEUE_DCB)
242819d91873SJoao Pinto 			continue;
242919d91873SJoao Pinto 
2430c10d4c82SJose Abreu 		stmmac_config_cbs(priv, priv->hw,
243119d91873SJoao Pinto 				priv->plat->tx_queues_cfg[queue].send_slope,
243219d91873SJoao Pinto 				priv->plat->tx_queues_cfg[queue].idle_slope,
243319d91873SJoao Pinto 				priv->plat->tx_queues_cfg[queue].high_credit,
243419d91873SJoao Pinto 				priv->plat->tx_queues_cfg[queue].low_credit,
243519d91873SJoao Pinto 				queue);
243619d91873SJoao Pinto 	}
243719d91873SJoao Pinto }
243819d91873SJoao Pinto 
243919d91873SJoao Pinto /**
2440d43042f4SJoao Pinto  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
2441d43042f4SJoao Pinto  *  @priv: driver private structure
2442d43042f4SJoao Pinto  *  Description: It is used for mapping RX queues to RX dma channels
2443d43042f4SJoao Pinto  */
2444d43042f4SJoao Pinto static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
2445d43042f4SJoao Pinto {
2446d43042f4SJoao Pinto 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2447d43042f4SJoao Pinto 	u32 queue;
2448d43042f4SJoao Pinto 	u32 chan;
2449d43042f4SJoao Pinto 
2450d43042f4SJoao Pinto 	for (queue = 0; queue < rx_queues_count; queue++) {
2451d43042f4SJoao Pinto 		chan = priv->plat->rx_queues_cfg[queue].chan;
2452c10d4c82SJose Abreu 		stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
2453d43042f4SJoao Pinto 	}
2454d43042f4SJoao Pinto }
2455d43042f4SJoao Pinto 
2456d43042f4SJoao Pinto /**
2457a8f5102aSJoao Pinto  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
2458a8f5102aSJoao Pinto  *  @priv: driver private structure
2459a8f5102aSJoao Pinto  *  Description: It is used for configuring the RX Queue Priority
2460a8f5102aSJoao Pinto  */
2461a8f5102aSJoao Pinto static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
2462a8f5102aSJoao Pinto {
2463a8f5102aSJoao Pinto 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2464a8f5102aSJoao Pinto 	u32 queue;
2465a8f5102aSJoao Pinto 	u32 prio;
2466a8f5102aSJoao Pinto 
2467a8f5102aSJoao Pinto 	for (queue = 0; queue < rx_queues_count; queue++) {
2468a8f5102aSJoao Pinto 		if (!priv->plat->rx_queues_cfg[queue].use_prio)
2469a8f5102aSJoao Pinto 			continue;
2470a8f5102aSJoao Pinto 
2471a8f5102aSJoao Pinto 		prio = priv->plat->rx_queues_cfg[queue].prio;
2472c10d4c82SJose Abreu 		stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
2473a8f5102aSJoao Pinto 	}
2474a8f5102aSJoao Pinto }
2475a8f5102aSJoao Pinto 
2476a8f5102aSJoao Pinto /**
2477a8f5102aSJoao Pinto  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
2478a8f5102aSJoao Pinto  *  @priv: driver private structure
2479a8f5102aSJoao Pinto  *  Description: It is used for configuring the TX Queue Priority
2480a8f5102aSJoao Pinto  */
2481a8f5102aSJoao Pinto static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
2482a8f5102aSJoao Pinto {
2483a8f5102aSJoao Pinto 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2484a8f5102aSJoao Pinto 	u32 queue;
2485a8f5102aSJoao Pinto 	u32 prio;
2486a8f5102aSJoao Pinto 
2487a8f5102aSJoao Pinto 	for (queue = 0; queue < tx_queues_count; queue++) {
2488a8f5102aSJoao Pinto 		if (!priv->plat->tx_queues_cfg[queue].use_prio)
2489a8f5102aSJoao Pinto 			continue;
2490a8f5102aSJoao Pinto 
2491a8f5102aSJoao Pinto 		prio = priv->plat->tx_queues_cfg[queue].prio;
2492c10d4c82SJose Abreu 		stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
2493a8f5102aSJoao Pinto 	}
2494a8f5102aSJoao Pinto }
2495a8f5102aSJoao Pinto 
2496a8f5102aSJoao Pinto /**
2497abe80fdcSJoao Pinto  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
2498abe80fdcSJoao Pinto  *  @priv: driver private structure
2499abe80fdcSJoao Pinto  *  Description: It is used for configuring the RX queue routing
2500abe80fdcSJoao Pinto  */
2501abe80fdcSJoao Pinto static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
2502abe80fdcSJoao Pinto {
2503abe80fdcSJoao Pinto 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2504abe80fdcSJoao Pinto 	u32 queue;
2505abe80fdcSJoao Pinto 	u8 packet;
2506abe80fdcSJoao Pinto 
2507abe80fdcSJoao Pinto 	for (queue = 0; queue < rx_queues_count; queue++) {
2508abe80fdcSJoao Pinto 		/* no specific packet type routing specified for the queue */
2509abe80fdcSJoao Pinto 		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
2510abe80fdcSJoao Pinto 			continue;
2511abe80fdcSJoao Pinto 
2512abe80fdcSJoao Pinto 		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
2513c10d4c82SJose Abreu 		stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
2514abe80fdcSJoao Pinto 	}
2515abe80fdcSJoao Pinto }
2516abe80fdcSJoao Pinto 
2517abe80fdcSJoao Pinto /**
2518d0a9c9f9SJoao Pinto  *  stmmac_mtl_configuration - Configure MTL
2519d0a9c9f9SJoao Pinto  *  @priv: driver private structure
2520d0a9c9f9SJoao Pinto  *  Description: It is used for configurring MTL
2521d0a9c9f9SJoao Pinto  */
2522d0a9c9f9SJoao Pinto static void stmmac_mtl_configuration(struct stmmac_priv *priv)
2523d0a9c9f9SJoao Pinto {
2524d0a9c9f9SJoao Pinto 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2525d0a9c9f9SJoao Pinto 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2526d0a9c9f9SJoao Pinto 
2527c10d4c82SJose Abreu 	if (tx_queues_count > 1)
25286a3a7193SJoao Pinto 		stmmac_set_tx_queue_weight(priv);
25296a3a7193SJoao Pinto 
2530d0a9c9f9SJoao Pinto 	/* Configure MTL RX algorithms */
2531c10d4c82SJose Abreu 	if (rx_queues_count > 1)
2532c10d4c82SJose Abreu 		stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
2533d0a9c9f9SJoao Pinto 				priv->plat->rx_sched_algorithm);
2534d0a9c9f9SJoao Pinto 
2535d0a9c9f9SJoao Pinto 	/* Configure MTL TX algorithms */
2536c10d4c82SJose Abreu 	if (tx_queues_count > 1)
2537c10d4c82SJose Abreu 		stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
2538d0a9c9f9SJoao Pinto 				priv->plat->tx_sched_algorithm);
2539d0a9c9f9SJoao Pinto 
254019d91873SJoao Pinto 	/* Configure CBS in AVB TX queues */
2541c10d4c82SJose Abreu 	if (tx_queues_count > 1)
254219d91873SJoao Pinto 		stmmac_configure_cbs(priv);
254319d91873SJoao Pinto 
2544d43042f4SJoao Pinto 	/* Map RX MTL to DMA channels */
2545d43042f4SJoao Pinto 	stmmac_rx_queue_dma_chan_map(priv);
2546d43042f4SJoao Pinto 
2547d0a9c9f9SJoao Pinto 	/* Enable MAC RX Queues */
2548d0a9c9f9SJoao Pinto 	stmmac_mac_enable_rx_queues(priv);
25496deee222SJoao Pinto 
2550a8f5102aSJoao Pinto 	/* Set RX priorities */
2551c10d4c82SJose Abreu 	if (rx_queues_count > 1)
2552a8f5102aSJoao Pinto 		stmmac_mac_config_rx_queues_prio(priv);
2553a8f5102aSJoao Pinto 
2554a8f5102aSJoao Pinto 	/* Set TX priorities */
2555c10d4c82SJose Abreu 	if (tx_queues_count > 1)
2556a8f5102aSJoao Pinto 		stmmac_mac_config_tx_queues_prio(priv);
2557abe80fdcSJoao Pinto 
2558abe80fdcSJoao Pinto 	/* Set RX routing */
2559c10d4c82SJose Abreu 	if (rx_queues_count > 1)
2560abe80fdcSJoao Pinto 		stmmac_mac_config_rx_queues_routing(priv);
2561d0a9c9f9SJoao Pinto }
2562d0a9c9f9SJoao Pinto 
25638bf993a5SJose Abreu static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
25648bf993a5SJose Abreu {
2565c10d4c82SJose Abreu 	if (priv->dma_cap.asp) {
25668bf993a5SJose Abreu 		netdev_info(priv->dev, "Enabling Safety Features\n");
2567c10d4c82SJose Abreu 		stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp);
25688bf993a5SJose Abreu 	} else {
25698bf993a5SJose Abreu 		netdev_info(priv->dev, "No Safety Features support found\n");
25708bf993a5SJose Abreu 	}
25718bf993a5SJose Abreu }
25728bf993a5SJose Abreu 
2573d0a9c9f9SJoao Pinto /**
2574732fdf0eSGiuseppe CAVALLARO  * stmmac_hw_setup - setup mac in a usable state.
2575523f11b5SSrinivas Kandagatla  *  @dev : pointer to the device structure.
2576523f11b5SSrinivas Kandagatla  *  Description:
2577732fdf0eSGiuseppe CAVALLARO  *  this is the main function to setup the HW in a usable state because the
2578732fdf0eSGiuseppe CAVALLARO  *  dma engine is reset, the core registers are configured (e.g. AXI,
2579732fdf0eSGiuseppe CAVALLARO  *  Checksum features, timers). The DMA is ready to start receiving and
2580732fdf0eSGiuseppe CAVALLARO  *  transmitting.
2581523f11b5SSrinivas Kandagatla  *  Return value:
2582523f11b5SSrinivas Kandagatla  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2583523f11b5SSrinivas Kandagatla  *  file on failure.
2584523f11b5SSrinivas Kandagatla  */
2585fe131929SHuacai Chen static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
2586523f11b5SSrinivas Kandagatla {
2587523f11b5SSrinivas Kandagatla 	struct stmmac_priv *priv = netdev_priv(dev);
25883c55d4d0SJoao Pinto 	u32 rx_cnt = priv->plat->rx_queues_to_use;
2589146617b8SJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
2590146617b8SJoao Pinto 	u32 chan;
2591523f11b5SSrinivas Kandagatla 	int ret;
2592523f11b5SSrinivas Kandagatla 
2593523f11b5SSrinivas Kandagatla 	/* DMA initialization and SW reset */
2594523f11b5SSrinivas Kandagatla 	ret = stmmac_init_dma_engine(priv);
2595523f11b5SSrinivas Kandagatla 	if (ret < 0) {
259638ddc59dSLABBE Corentin 		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
259738ddc59dSLABBE Corentin 			   __func__);
2598523f11b5SSrinivas Kandagatla 		return ret;
2599523f11b5SSrinivas Kandagatla 	}
2600523f11b5SSrinivas Kandagatla 
2601523f11b5SSrinivas Kandagatla 	/* Copy the MAC addr into the HW  */
2602c10d4c82SJose Abreu 	stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
2603523f11b5SSrinivas Kandagatla 
260402e57b9dSGiuseppe CAVALLARO 	/* PS and related bits will be programmed according to the speed */
260502e57b9dSGiuseppe CAVALLARO 	if (priv->hw->pcs) {
260602e57b9dSGiuseppe CAVALLARO 		int speed = priv->plat->mac_port_sel_speed;
260702e57b9dSGiuseppe CAVALLARO 
260802e57b9dSGiuseppe CAVALLARO 		if ((speed == SPEED_10) || (speed == SPEED_100) ||
260902e57b9dSGiuseppe CAVALLARO 		    (speed == SPEED_1000)) {
261002e57b9dSGiuseppe CAVALLARO 			priv->hw->ps = speed;
261102e57b9dSGiuseppe CAVALLARO 		} else {
261202e57b9dSGiuseppe CAVALLARO 			dev_warn(priv->device, "invalid port speed\n");
261302e57b9dSGiuseppe CAVALLARO 			priv->hw->ps = 0;
261402e57b9dSGiuseppe CAVALLARO 		}
261502e57b9dSGiuseppe CAVALLARO 	}
261602e57b9dSGiuseppe CAVALLARO 
2617523f11b5SSrinivas Kandagatla 	/* Initialize the MAC Core */
2618c10d4c82SJose Abreu 	stmmac_core_init(priv, priv->hw, dev);
2619523f11b5SSrinivas Kandagatla 
2620d0a9c9f9SJoao Pinto 	/* Initialize MTL*/
2621d0a9c9f9SJoao Pinto 	stmmac_mtl_configuration(priv);
26229eb12474Sjpinto 
26238bf993a5SJose Abreu 	/* Initialize Safety Features */
26248bf993a5SJose Abreu 	stmmac_safety_feat_configuration(priv);
26258bf993a5SJose Abreu 
2626c10d4c82SJose Abreu 	ret = stmmac_rx_ipc(priv, priv->hw);
2627978aded4SGiuseppe CAVALLARO 	if (!ret) {
262838ddc59dSLABBE Corentin 		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
2629978aded4SGiuseppe CAVALLARO 		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
2630d2afb5bdSGiuseppe CAVALLARO 		priv->hw->rx_csum = 0;
2631978aded4SGiuseppe CAVALLARO 	}
2632978aded4SGiuseppe CAVALLARO 
2633523f11b5SSrinivas Kandagatla 	/* Enable the MAC Rx/Tx */
2634c10d4c82SJose Abreu 	stmmac_mac_set(priv, priv->ioaddr, true);
2635523f11b5SSrinivas Kandagatla 
2636b4f0a661SJoao Pinto 	/* Set the HW DMA mode and the COE */
2637b4f0a661SJoao Pinto 	stmmac_dma_operation_mode(priv);
2638b4f0a661SJoao Pinto 
2639523f11b5SSrinivas Kandagatla 	stmmac_mmc_setup(priv);
2640523f11b5SSrinivas Kandagatla 
2641fe131929SHuacai Chen 	if (init_ptp) {
26420ad2be79SThierry Reding 		ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
26430ad2be79SThierry Reding 		if (ret < 0)
26440ad2be79SThierry Reding 			netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
26450ad2be79SThierry Reding 
2646523f11b5SSrinivas Kandagatla 		ret = stmmac_init_ptp(priv);
2647722eef28SHeiner Kallweit 		if (ret == -EOPNOTSUPP)
2648722eef28SHeiner Kallweit 			netdev_warn(priv->dev, "PTP not supported by HW\n");
2649722eef28SHeiner Kallweit 		else if (ret)
2650722eef28SHeiner Kallweit 			netdev_warn(priv->dev, "PTP init failed\n");
2651fe131929SHuacai Chen 	}
2652523f11b5SSrinivas Kandagatla 
2653523f11b5SSrinivas Kandagatla 	priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
2654523f11b5SSrinivas Kandagatla 
2655a4e887faSJose Abreu 	if (priv->use_riwt) {
2656a4e887faSJose Abreu 		ret = stmmac_rx_watchdog(priv, priv->ioaddr, MAX_DMA_RIWT, rx_cnt);
2657a4e887faSJose Abreu 		if (!ret)
2658523f11b5SSrinivas Kandagatla 			priv->rx_riwt = MAX_DMA_RIWT;
2659523f11b5SSrinivas Kandagatla 	}
2660523f11b5SSrinivas Kandagatla 
2661c10d4c82SJose Abreu 	if (priv->hw->pcs)
2662c10d4c82SJose Abreu 		stmmac_pcs_ctrl_ane(priv, priv->hw, 1, priv->hw->ps, 0);
2663523f11b5SSrinivas Kandagatla 
26644854ab99SJoao Pinto 	/* set TX and RX rings length */
26654854ab99SJoao Pinto 	stmmac_set_rings_length(priv);
26664854ab99SJoao Pinto 
2667f748be53SAlexandre TORGUE 	/* Enable TSO */
2668146617b8SJoao Pinto 	if (priv->tso) {
2669146617b8SJoao Pinto 		for (chan = 0; chan < tx_cnt; chan++)
2670a4e887faSJose Abreu 			stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
2671146617b8SJoao Pinto 	}
2672f748be53SAlexandre TORGUE 
26737d9e6c5aSJose Abreu 	/* Start the ball rolling... */
26747d9e6c5aSJose Abreu 	stmmac_start_all_dma(priv);
26757d9e6c5aSJose Abreu 
2676523f11b5SSrinivas Kandagatla 	return 0;
2677523f11b5SSrinivas Kandagatla }
2678523f11b5SSrinivas Kandagatla 
2679c66f6c37SThierry Reding static void stmmac_hw_teardown(struct net_device *dev)
2680c66f6c37SThierry Reding {
2681c66f6c37SThierry Reding 	struct stmmac_priv *priv = netdev_priv(dev);
2682c66f6c37SThierry Reding 
2683c66f6c37SThierry Reding 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
2684c66f6c37SThierry Reding }
2685c66f6c37SThierry Reding 
2686523f11b5SSrinivas Kandagatla /**
26877ac6653aSJeff Kirsher  *  stmmac_open - open entry point of the driver
26887ac6653aSJeff Kirsher  *  @dev : pointer to the device structure.
26897ac6653aSJeff Kirsher  *  Description:
26907ac6653aSJeff Kirsher  *  This function is the open entry point of the driver.
26917ac6653aSJeff Kirsher  *  Return value:
26927ac6653aSJeff Kirsher  *  0 on success and an appropriate (-)ve integer as defined in errno.h
26937ac6653aSJeff Kirsher  *  file on failure.
26947ac6653aSJeff Kirsher  */
26957ac6653aSJeff Kirsher static int stmmac_open(struct net_device *dev)
26967ac6653aSJeff Kirsher {
26977ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
26988fce3331SJose Abreu 	u32 chan;
26997ac6653aSJeff Kirsher 	int ret;
27007ac6653aSJeff Kirsher 
27013fe5cadbSGiuseppe CAVALLARO 	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
27023fe5cadbSGiuseppe CAVALLARO 	    priv->hw->pcs != STMMAC_PCS_TBI &&
27033fe5cadbSGiuseppe CAVALLARO 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
27047ac6653aSJeff Kirsher 		ret = stmmac_init_phy(dev);
2705e58bb43fSGiuseppe CAVALLARO 		if (ret) {
270638ddc59dSLABBE Corentin 			netdev_err(priv->dev,
270738ddc59dSLABBE Corentin 				   "%s: Cannot attach to PHY (error: %d)\n",
2708e58bb43fSGiuseppe CAVALLARO 				   __func__, ret);
270989df20d9SHans de Goede 			return ret;
27107ac6653aSJeff Kirsher 		}
2711e58bb43fSGiuseppe CAVALLARO 	}
27127ac6653aSJeff Kirsher 
2713523f11b5SSrinivas Kandagatla 	/* Extra statistics */
2714523f11b5SSrinivas Kandagatla 	memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
2715523f11b5SSrinivas Kandagatla 	priv->xstats.threshold = tc;
2716523f11b5SSrinivas Kandagatla 
27175bacd778SLABBE Corentin 	priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
271822ad3838SGiuseppe Cavallaro 	priv->rx_copybreak = STMMAC_RX_COPYBREAK;
271956329137SBartlomiej Zolnierkiewicz 
27205bacd778SLABBE Corentin 	ret = alloc_dma_desc_resources(priv);
27215bacd778SLABBE Corentin 	if (ret < 0) {
27225bacd778SLABBE Corentin 		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
27235bacd778SLABBE Corentin 			   __func__);
27245bacd778SLABBE Corentin 		goto dma_desc_error;
27255bacd778SLABBE Corentin 	}
27265bacd778SLABBE Corentin 
27275bacd778SLABBE Corentin 	ret = init_dma_desc_rings(dev, GFP_KERNEL);
27285bacd778SLABBE Corentin 	if (ret < 0) {
27295bacd778SLABBE Corentin 		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
27305bacd778SLABBE Corentin 			   __func__);
27315bacd778SLABBE Corentin 		goto init_error;
27325bacd778SLABBE Corentin 	}
27335bacd778SLABBE Corentin 
2734fe131929SHuacai Chen 	ret = stmmac_hw_setup(dev, true);
273556329137SBartlomiej Zolnierkiewicz 	if (ret < 0) {
273638ddc59dSLABBE Corentin 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
2737c9324d18SGiuseppe CAVALLARO 		goto init_error;
27387ac6653aSJeff Kirsher 	}
27397ac6653aSJeff Kirsher 
2740777da230SGiuseppe CAVALLARO 	stmmac_init_tx_coalesce(priv);
2741777da230SGiuseppe CAVALLARO 
2742d6d50c7eSPhilippe Reynes 	if (dev->phydev)
2743d6d50c7eSPhilippe Reynes 		phy_start(dev->phydev);
27447ac6653aSJeff Kirsher 
27457ac6653aSJeff Kirsher 	/* Request the IRQ lines */
27467ac6653aSJeff Kirsher 	ret = request_irq(dev->irq, stmmac_interrupt,
27477ac6653aSJeff Kirsher 			  IRQF_SHARED, dev->name, dev);
27487ac6653aSJeff Kirsher 	if (unlikely(ret < 0)) {
274938ddc59dSLABBE Corentin 		netdev_err(priv->dev,
275038ddc59dSLABBE Corentin 			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
27517ac6653aSJeff Kirsher 			   __func__, dev->irq, ret);
27526c1e5abeSThierry Reding 		goto irq_error;
27537ac6653aSJeff Kirsher 	}
27547ac6653aSJeff Kirsher 
27557a13f8f5SFrancesco Virlinzi 	/* Request the Wake IRQ in case of another line is used for WoL */
27567a13f8f5SFrancesco Virlinzi 	if (priv->wol_irq != dev->irq) {
27577a13f8f5SFrancesco Virlinzi 		ret = request_irq(priv->wol_irq, stmmac_interrupt,
27587a13f8f5SFrancesco Virlinzi 				  IRQF_SHARED, dev->name, dev);
27597a13f8f5SFrancesco Virlinzi 		if (unlikely(ret < 0)) {
276038ddc59dSLABBE Corentin 			netdev_err(priv->dev,
276138ddc59dSLABBE Corentin 				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
2762ceb69499SGiuseppe CAVALLARO 				   __func__, priv->wol_irq, ret);
2763c9324d18SGiuseppe CAVALLARO 			goto wolirq_error;
27647a13f8f5SFrancesco Virlinzi 		}
27657a13f8f5SFrancesco Virlinzi 	}
27667a13f8f5SFrancesco Virlinzi 
2767d765955dSGiuseppe CAVALLARO 	/* Request the IRQ lines */
2768d7ec8584SChen-Yu Tsai 	if (priv->lpi_irq > 0) {
2769d765955dSGiuseppe CAVALLARO 		ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
2770d765955dSGiuseppe CAVALLARO 				  dev->name, dev);
2771d765955dSGiuseppe CAVALLARO 		if (unlikely(ret < 0)) {
277238ddc59dSLABBE Corentin 			netdev_err(priv->dev,
277338ddc59dSLABBE Corentin 				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
2774d765955dSGiuseppe CAVALLARO 				   __func__, priv->lpi_irq, ret);
2775c9324d18SGiuseppe CAVALLARO 			goto lpiirq_error;
2776d765955dSGiuseppe CAVALLARO 		}
2777d765955dSGiuseppe CAVALLARO 	}
2778d765955dSGiuseppe CAVALLARO 
2779c22a3f48SJoao Pinto 	stmmac_enable_all_queues(priv);
2780c22a3f48SJoao Pinto 	stmmac_start_all_queues(priv);
27817ac6653aSJeff Kirsher 
27827ac6653aSJeff Kirsher 	return 0;
27837ac6653aSJeff Kirsher 
2784c9324d18SGiuseppe CAVALLARO lpiirq_error:
2785d765955dSGiuseppe CAVALLARO 	if (priv->wol_irq != dev->irq)
2786d765955dSGiuseppe CAVALLARO 		free_irq(priv->wol_irq, dev);
2787c9324d18SGiuseppe CAVALLARO wolirq_error:
27887a13f8f5SFrancesco Virlinzi 	free_irq(dev->irq, dev);
27896c1e5abeSThierry Reding irq_error:
27906c1e5abeSThierry Reding 	if (dev->phydev)
27916c1e5abeSThierry Reding 		phy_stop(dev->phydev);
27927a13f8f5SFrancesco Virlinzi 
27938fce3331SJose Abreu 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
27948fce3331SJose Abreu 		del_timer_sync(&priv->tx_queue[chan].txtimer);
27958fce3331SJose Abreu 
2796c66f6c37SThierry Reding 	stmmac_hw_teardown(dev);
2797c9324d18SGiuseppe CAVALLARO init_error:
2798c9324d18SGiuseppe CAVALLARO 	free_dma_desc_resources(priv);
27995bacd778SLABBE Corentin dma_desc_error:
2800d6d50c7eSPhilippe Reynes 	if (dev->phydev)
2801d6d50c7eSPhilippe Reynes 		phy_disconnect(dev->phydev);
28024bfcbd7aSFrancesco Virlinzi 
28037ac6653aSJeff Kirsher 	return ret;
28047ac6653aSJeff Kirsher }
28057ac6653aSJeff Kirsher 
28067ac6653aSJeff Kirsher /**
28077ac6653aSJeff Kirsher  *  stmmac_release - close entry point of the driver
28087ac6653aSJeff Kirsher  *  @dev : device pointer.
28097ac6653aSJeff Kirsher  *  Description:
28107ac6653aSJeff Kirsher  *  This is the stop entry point of the driver.
28117ac6653aSJeff Kirsher  */
28127ac6653aSJeff Kirsher static int stmmac_release(struct net_device *dev)
28137ac6653aSJeff Kirsher {
28147ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
28158fce3331SJose Abreu 	u32 chan;
28167ac6653aSJeff Kirsher 
2817d765955dSGiuseppe CAVALLARO 	if (priv->eee_enabled)
2818d765955dSGiuseppe CAVALLARO 		del_timer_sync(&priv->eee_ctrl_timer);
2819d765955dSGiuseppe CAVALLARO 
28207ac6653aSJeff Kirsher 	/* Stop and disconnect the PHY */
2821d6d50c7eSPhilippe Reynes 	if (dev->phydev) {
2822d6d50c7eSPhilippe Reynes 		phy_stop(dev->phydev);
2823d6d50c7eSPhilippe Reynes 		phy_disconnect(dev->phydev);
28247ac6653aSJeff Kirsher 	}
28257ac6653aSJeff Kirsher 
2826c22a3f48SJoao Pinto 	stmmac_stop_all_queues(priv);
28277ac6653aSJeff Kirsher 
2828c22a3f48SJoao Pinto 	stmmac_disable_all_queues(priv);
28297ac6653aSJeff Kirsher 
28308fce3331SJose Abreu 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
28318fce3331SJose Abreu 		del_timer_sync(&priv->tx_queue[chan].txtimer);
28329125cdd1SGiuseppe CAVALLARO 
28337ac6653aSJeff Kirsher 	/* Free the IRQ lines */
28347ac6653aSJeff Kirsher 	free_irq(dev->irq, dev);
28357a13f8f5SFrancesco Virlinzi 	if (priv->wol_irq != dev->irq)
28367a13f8f5SFrancesco Virlinzi 		free_irq(priv->wol_irq, dev);
2837d7ec8584SChen-Yu Tsai 	if (priv->lpi_irq > 0)
2838d765955dSGiuseppe CAVALLARO 		free_irq(priv->lpi_irq, dev);
28397ac6653aSJeff Kirsher 
28407ac6653aSJeff Kirsher 	/* Stop TX/RX DMA and clear the descriptors */
2841ae4f0d46SJoao Pinto 	stmmac_stop_all_dma(priv);
28427ac6653aSJeff Kirsher 
28437ac6653aSJeff Kirsher 	/* Release and free the Rx/Tx resources */
28447ac6653aSJeff Kirsher 	free_dma_desc_resources(priv);
28457ac6653aSJeff Kirsher 
28467ac6653aSJeff Kirsher 	/* Disable the MAC Rx/Tx */
2847c10d4c82SJose Abreu 	stmmac_mac_set(priv, priv->ioaddr, false);
28487ac6653aSJeff Kirsher 
28497ac6653aSJeff Kirsher 	netif_carrier_off(dev);
28507ac6653aSJeff Kirsher 
285192ba6888SRayagond Kokatanur 	stmmac_release_ptp(priv);
285292ba6888SRayagond Kokatanur 
28537ac6653aSJeff Kirsher 	return 0;
28547ac6653aSJeff Kirsher }
28557ac6653aSJeff Kirsher 
28567ac6653aSJeff Kirsher /**
2857f748be53SAlexandre TORGUE  *  stmmac_tso_allocator - close entry point of the driver
2858f748be53SAlexandre TORGUE  *  @priv: driver private structure
2859f748be53SAlexandre TORGUE  *  @des: buffer start address
2860f748be53SAlexandre TORGUE  *  @total_len: total length to fill in descriptors
2861f748be53SAlexandre TORGUE  *  @last_segmant: condition for the last descriptor
2862ce736788SJoao Pinto  *  @queue: TX queue index
2863f748be53SAlexandre TORGUE  *  Description:
2864f748be53SAlexandre TORGUE  *  This function fills descriptor and request new descriptors according to
2865f748be53SAlexandre TORGUE  *  buffer length to fill
2866f748be53SAlexandre TORGUE  */
2867f748be53SAlexandre TORGUE static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
2868ce736788SJoao Pinto 				 int total_len, bool last_segment, u32 queue)
2869f748be53SAlexandre TORGUE {
2870ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2871f748be53SAlexandre TORGUE 	struct dma_desc *desc;
28725bacd778SLABBE Corentin 	u32 buff_size;
2873ce736788SJoao Pinto 	int tmp_len;
2874f748be53SAlexandre TORGUE 
2875f748be53SAlexandre TORGUE 	tmp_len = total_len;
2876f748be53SAlexandre TORGUE 
2877f748be53SAlexandre TORGUE 	while (tmp_len > 0) {
2878ce736788SJoao Pinto 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2879b4c9784cSNiklas Cassel 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
2880ce736788SJoao Pinto 		desc = tx_q->dma_tx + tx_q->cur_tx;
2881f748be53SAlexandre TORGUE 
2882f8be0d78SMichael Weiser 		desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
2883f748be53SAlexandre TORGUE 		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
2884f748be53SAlexandre TORGUE 			    TSO_MAX_BUFF_SIZE : tmp_len;
2885f748be53SAlexandre TORGUE 
288642de047dSJose Abreu 		stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
2887f748be53SAlexandre TORGUE 				0, 1,
2888426849e6SNiklas Cassel 				(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
2889f748be53SAlexandre TORGUE 				0, 0);
2890f748be53SAlexandre TORGUE 
2891f748be53SAlexandre TORGUE 		tmp_len -= TSO_MAX_BUFF_SIZE;
2892f748be53SAlexandre TORGUE 	}
2893f748be53SAlexandre TORGUE }
2894f748be53SAlexandre TORGUE 
2895f748be53SAlexandre TORGUE /**
2896f748be53SAlexandre TORGUE  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
2897f748be53SAlexandre TORGUE  *  @skb : the socket buffer
2898f748be53SAlexandre TORGUE  *  @dev : device pointer
2899f748be53SAlexandre TORGUE  *  Description: this is the transmit function that is called on TSO frames
2900f748be53SAlexandre TORGUE  *  (support available on GMAC4 and newer chips).
2901f748be53SAlexandre TORGUE  *  Diagram below show the ring programming in case of TSO frames:
2902f748be53SAlexandre TORGUE  *
2903f748be53SAlexandre TORGUE  *  First Descriptor
2904f748be53SAlexandre TORGUE  *   --------
2905f748be53SAlexandre TORGUE  *   | DES0 |---> buffer1 = L2/L3/L4 header
2906f748be53SAlexandre TORGUE  *   | DES1 |---> TCP Payload (can continue on next descr...)
2907f748be53SAlexandre TORGUE  *   | DES2 |---> buffer 1 and 2 len
2908f748be53SAlexandre TORGUE  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
2909f748be53SAlexandre TORGUE  *   --------
2910f748be53SAlexandre TORGUE  *	|
2911f748be53SAlexandre TORGUE  *     ...
2912f748be53SAlexandre TORGUE  *	|
2913f748be53SAlexandre TORGUE  *   --------
2914f748be53SAlexandre TORGUE  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
2915f748be53SAlexandre TORGUE  *   | DES1 | --|
2916f748be53SAlexandre TORGUE  *   | DES2 | --> buffer 1 and 2 len
2917f748be53SAlexandre TORGUE  *   | DES3 |
2918f748be53SAlexandre TORGUE  *   --------
2919f748be53SAlexandre TORGUE  *
2920f748be53SAlexandre TORGUE  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
2921f748be53SAlexandre TORGUE  */
2922f748be53SAlexandre TORGUE static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2923f748be53SAlexandre TORGUE {
2924ce736788SJoao Pinto 	struct dma_desc *desc, *first, *mss_desc = NULL;
2925f748be53SAlexandre TORGUE 	struct stmmac_priv *priv = netdev_priv(dev);
2926f748be53SAlexandre TORGUE 	int nfrags = skb_shinfo(skb)->nr_frags;
2927ce736788SJoao Pinto 	u32 queue = skb_get_queue_mapping(skb);
2928f748be53SAlexandre TORGUE 	unsigned int first_entry, des;
2929ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q;
2930ce736788SJoao Pinto 	int tmp_pay_len = 0;
2931ce736788SJoao Pinto 	u32 pay_len, mss;
2932f748be53SAlexandre TORGUE 	u8 proto_hdr_len;
2933f748be53SAlexandre TORGUE 	int i;
2934f748be53SAlexandre TORGUE 
2935ce736788SJoao Pinto 	tx_q = &priv->tx_queue[queue];
2936ce736788SJoao Pinto 
2937f748be53SAlexandre TORGUE 	/* Compute header lengths */
2938f748be53SAlexandre TORGUE 	proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2939f748be53SAlexandre TORGUE 
2940f748be53SAlexandre TORGUE 	/* Desc availability based on threshold should be enough safe */
2941ce736788SJoao Pinto 	if (unlikely(stmmac_tx_avail(priv, queue) <
2942f748be53SAlexandre TORGUE 		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
2943c22a3f48SJoao Pinto 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
2944c22a3f48SJoao Pinto 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
2945c22a3f48SJoao Pinto 								queue));
2946f748be53SAlexandre TORGUE 			/* This is a hard error, log it. */
294738ddc59dSLABBE Corentin 			netdev_err(priv->dev,
294838ddc59dSLABBE Corentin 				   "%s: Tx Ring full when queue awake\n",
294938ddc59dSLABBE Corentin 				   __func__);
2950f748be53SAlexandre TORGUE 		}
2951f748be53SAlexandre TORGUE 		return NETDEV_TX_BUSY;
2952f748be53SAlexandre TORGUE 	}
2953f748be53SAlexandre TORGUE 
2954f748be53SAlexandre TORGUE 	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
2955f748be53SAlexandre TORGUE 
2956f748be53SAlexandre TORGUE 	mss = skb_shinfo(skb)->gso_size;
2957f748be53SAlexandre TORGUE 
2958f748be53SAlexandre TORGUE 	/* set new MSS value if needed */
29598d212a9eSNiklas Cassel 	if (mss != tx_q->mss) {
2960ce736788SJoao Pinto 		mss_desc = tx_q->dma_tx + tx_q->cur_tx;
296142de047dSJose Abreu 		stmmac_set_mss(priv, mss_desc, mss);
29628d212a9eSNiklas Cassel 		tx_q->mss = mss;
2963ce736788SJoao Pinto 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2964b4c9784cSNiklas Cassel 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
2965f748be53SAlexandre TORGUE 	}
2966f748be53SAlexandre TORGUE 
2967f748be53SAlexandre TORGUE 	if (netif_msg_tx_queued(priv)) {
2968f748be53SAlexandre TORGUE 		pr_info("%s: tcphdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
2969f748be53SAlexandre TORGUE 			__func__, tcp_hdrlen(skb), proto_hdr_len, pay_len, mss);
2970f748be53SAlexandre TORGUE 		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
2971f748be53SAlexandre TORGUE 			skb->data_len);
2972f748be53SAlexandre TORGUE 	}
2973f748be53SAlexandre TORGUE 
2974ce736788SJoao Pinto 	first_entry = tx_q->cur_tx;
2975b4c9784cSNiklas Cassel 	WARN_ON(tx_q->tx_skbuff[first_entry]);
2976f748be53SAlexandre TORGUE 
2977ce736788SJoao Pinto 	desc = tx_q->dma_tx + first_entry;
2978f748be53SAlexandre TORGUE 	first = desc;
2979f748be53SAlexandre TORGUE 
2980f748be53SAlexandre TORGUE 	/* first descriptor: fill Headers on Buf1 */
2981f748be53SAlexandre TORGUE 	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
2982f748be53SAlexandre TORGUE 			     DMA_TO_DEVICE);
2983f748be53SAlexandre TORGUE 	if (dma_mapping_error(priv->device, des))
2984f748be53SAlexandre TORGUE 		goto dma_map_err;
2985f748be53SAlexandre TORGUE 
2986ce736788SJoao Pinto 	tx_q->tx_skbuff_dma[first_entry].buf = des;
2987ce736788SJoao Pinto 	tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
2988f748be53SAlexandre TORGUE 
2989f8be0d78SMichael Weiser 	first->des0 = cpu_to_le32(des);
2990f748be53SAlexandre TORGUE 
2991f748be53SAlexandre TORGUE 	/* Fill start of payload in buff2 of first descriptor */
2992f748be53SAlexandre TORGUE 	if (pay_len)
2993f8be0d78SMichael Weiser 		first->des1 = cpu_to_le32(des + proto_hdr_len);
2994f748be53SAlexandre TORGUE 
2995f748be53SAlexandre TORGUE 	/* If needed take extra descriptors to fill the remaining payload */
2996f748be53SAlexandre TORGUE 	tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
2997f748be53SAlexandre TORGUE 
2998ce736788SJoao Pinto 	stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
2999f748be53SAlexandre TORGUE 
3000f748be53SAlexandre TORGUE 	/* Prepare fragments */
3001f748be53SAlexandre TORGUE 	for (i = 0; i < nfrags; i++) {
3002f748be53SAlexandre TORGUE 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3003f748be53SAlexandre TORGUE 
3004f748be53SAlexandre TORGUE 		des = skb_frag_dma_map(priv->device, frag, 0,
3005f748be53SAlexandre TORGUE 				       skb_frag_size(frag),
3006f748be53SAlexandre TORGUE 				       DMA_TO_DEVICE);
3007937071c1SThierry Reding 		if (dma_mapping_error(priv->device, des))
3008937071c1SThierry Reding 			goto dma_map_err;
3009f748be53SAlexandre TORGUE 
3010f748be53SAlexandre TORGUE 		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
3011ce736788SJoao Pinto 				     (i == nfrags - 1), queue);
3012f748be53SAlexandre TORGUE 
3013ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
3014ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
3015ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
3016f748be53SAlexandre TORGUE 	}
3017f748be53SAlexandre TORGUE 
3018ce736788SJoao Pinto 	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
3019f748be53SAlexandre TORGUE 
302005cf0d1bSNiklas Cassel 	/* Only the last descriptor gets to point to the skb. */
302105cf0d1bSNiklas Cassel 	tx_q->tx_skbuff[tx_q->cur_tx] = skb;
302205cf0d1bSNiklas Cassel 
302305cf0d1bSNiklas Cassel 	/* We've used all descriptors we need for this skb, however,
302405cf0d1bSNiklas Cassel 	 * advance cur_tx so that it references a fresh descriptor.
302505cf0d1bSNiklas Cassel 	 * ndo_start_xmit will fill this descriptor the next time it's
302605cf0d1bSNiklas Cassel 	 * called and stmmac_tx_clean may clean up to this descriptor.
302705cf0d1bSNiklas Cassel 	 */
3028ce736788SJoao Pinto 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
3029f748be53SAlexandre TORGUE 
3030ce736788SJoao Pinto 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3031b3e51069SLABBE Corentin 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
303238ddc59dSLABBE Corentin 			  __func__);
3033c22a3f48SJoao Pinto 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
3034f748be53SAlexandre TORGUE 	}
3035f748be53SAlexandre TORGUE 
3036f748be53SAlexandre TORGUE 	dev->stats.tx_bytes += skb->len;
3037f748be53SAlexandre TORGUE 	priv->xstats.tx_tso_frames++;
3038f748be53SAlexandre TORGUE 	priv->xstats.tx_tso_nfrags += nfrags;
3039f748be53SAlexandre TORGUE 
3040f748be53SAlexandre TORGUE 	/* Manage tx mitigation */
30418fce3331SJose Abreu 	tx_q->tx_count_frames += nfrags + 1;
30428fce3331SJose Abreu 	if (priv->tx_coal_frames <= tx_q->tx_count_frames) {
304342de047dSJose Abreu 		stmmac_set_tx_ic(priv, desc);
3044f748be53SAlexandre TORGUE 		priv->xstats.tx_set_ic_bit++;
30458fce3331SJose Abreu 		tx_q->tx_count_frames = 0;
30468fce3331SJose Abreu 	} else {
30478fce3331SJose Abreu 		stmmac_tx_timer_arm(priv, queue);
3048f748be53SAlexandre TORGUE 	}
3049f748be53SAlexandre TORGUE 
3050f748be53SAlexandre TORGUE 	skb_tx_timestamp(skb);
3051f748be53SAlexandre TORGUE 
3052f748be53SAlexandre TORGUE 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3053f748be53SAlexandre TORGUE 		     priv->hwts_tx_en)) {
3054f748be53SAlexandre TORGUE 		/* declare that device is doing timestamping */
3055f748be53SAlexandre TORGUE 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
305642de047dSJose Abreu 		stmmac_enable_tx_timestamp(priv, first);
3057f748be53SAlexandre TORGUE 	}
3058f748be53SAlexandre TORGUE 
3059f748be53SAlexandre TORGUE 	/* Complete the first descriptor before granting the DMA */
306042de047dSJose Abreu 	stmmac_prepare_tso_tx_desc(priv, first, 1,
3061f748be53SAlexandre TORGUE 			proto_hdr_len,
3062f748be53SAlexandre TORGUE 			pay_len,
3063ce736788SJoao Pinto 			1, tx_q->tx_skbuff_dma[first_entry].last_segment,
3064f748be53SAlexandre TORGUE 			tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
3065f748be53SAlexandre TORGUE 
3066f748be53SAlexandre TORGUE 	/* If context desc is used to change MSS */
306715d2ee42SNiklas Cassel 	if (mss_desc) {
306815d2ee42SNiklas Cassel 		/* Make sure that first descriptor has been completely
306915d2ee42SNiklas Cassel 		 * written, including its own bit. This is because MSS is
307015d2ee42SNiklas Cassel 		 * actually before first descriptor, so we need to make
307115d2ee42SNiklas Cassel 		 * sure that MSS's own bit is the last thing written.
307215d2ee42SNiklas Cassel 		 */
307315d2ee42SNiklas Cassel 		dma_wmb();
307442de047dSJose Abreu 		stmmac_set_tx_owner(priv, mss_desc);
307515d2ee42SNiklas Cassel 	}
3076f748be53SAlexandre TORGUE 
3077f748be53SAlexandre TORGUE 	/* The own bit must be the latest setting done when prepare the
3078f748be53SAlexandre TORGUE 	 * descriptor and then barrier is needed to make sure that
3079f748be53SAlexandre TORGUE 	 * all is coherent before granting the DMA engine.
3080f748be53SAlexandre TORGUE 	 */
308195eb930aSNiklas Cassel 	wmb();
3082f748be53SAlexandre TORGUE 
3083f748be53SAlexandre TORGUE 	if (netif_msg_pktdata(priv)) {
3084f748be53SAlexandre TORGUE 		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
3085ce736788SJoao Pinto 			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
3086ce736788SJoao Pinto 			tx_q->cur_tx, first, nfrags);
3087f748be53SAlexandre TORGUE 
308842de047dSJose Abreu 		stmmac_display_ring(priv, (void *)tx_q->dma_tx, DMA_TX_SIZE, 0);
3089f748be53SAlexandre TORGUE 
3090f748be53SAlexandre TORGUE 		pr_info(">>> frame to be transmitted: ");
3091f748be53SAlexandre TORGUE 		print_pkt(skb->data, skb_headlen(skb));
3092f748be53SAlexandre TORGUE 	}
3093f748be53SAlexandre TORGUE 
3094c22a3f48SJoao Pinto 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3095f748be53SAlexandre TORGUE 
30960431100bSJose Abreu 	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
3097a4e887faSJose Abreu 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
3098f748be53SAlexandre TORGUE 
3099f748be53SAlexandre TORGUE 	return NETDEV_TX_OK;
3100f748be53SAlexandre TORGUE 
3101f748be53SAlexandre TORGUE dma_map_err:
3102f748be53SAlexandre TORGUE 	dev_err(priv->device, "Tx dma map failed\n");
3103f748be53SAlexandre TORGUE 	dev_kfree_skb(skb);
3104f748be53SAlexandre TORGUE 	priv->dev->stats.tx_dropped++;
3105f748be53SAlexandre TORGUE 	return NETDEV_TX_OK;
3106f748be53SAlexandre TORGUE }
3107f748be53SAlexandre TORGUE 
3108f748be53SAlexandre TORGUE /**
3109732fdf0eSGiuseppe CAVALLARO  *  stmmac_xmit - Tx entry point of the driver
31107ac6653aSJeff Kirsher  *  @skb : the socket buffer
31117ac6653aSJeff Kirsher  *  @dev : device pointer
311232ceabcaSGiuseppe CAVALLARO  *  Description : this is the tx entry point of the driver.
311332ceabcaSGiuseppe CAVALLARO  *  It programs the chain or the ring and supports oversized frames
311432ceabcaSGiuseppe CAVALLARO  *  and SG feature.
31157ac6653aSJeff Kirsher  */
31167ac6653aSJeff Kirsher static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
31177ac6653aSJeff Kirsher {
31187ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
31190e80bdc9SGiuseppe Cavallaro 	unsigned int nopaged_len = skb_headlen(skb);
31204a7d666aSGiuseppe CAVALLARO 	int i, csum_insertion = 0, is_jumbo = 0;
3121ce736788SJoao Pinto 	u32 queue = skb_get_queue_mapping(skb);
31227ac6653aSJeff Kirsher 	int nfrags = skb_shinfo(skb)->nr_frags;
312359423815SColin Ian King 	int entry;
312459423815SColin Ian King 	unsigned int first_entry;
31257ac6653aSJeff Kirsher 	struct dma_desc *desc, *first;
3126ce736788SJoao Pinto 	struct stmmac_tx_queue *tx_q;
31270e80bdc9SGiuseppe Cavallaro 	unsigned int enh_desc;
3128f748be53SAlexandre TORGUE 	unsigned int des;
3129f748be53SAlexandre TORGUE 
3130ce736788SJoao Pinto 	tx_q = &priv->tx_queue[queue];
3131ce736788SJoao Pinto 
3132e2cd682dSJose Abreu 	if (priv->tx_path_in_lpi_mode)
3133e2cd682dSJose Abreu 		stmmac_disable_eee_mode(priv);
3134e2cd682dSJose Abreu 
3135f748be53SAlexandre TORGUE 	/* Manage oversized TCP frames for GMAC4 device */
3136f748be53SAlexandre TORGUE 	if (skb_is_gso(skb) && priv->tso) {
3137c5acdbeeSJose Abreu 		if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
3138c5acdbeeSJose Abreu 			/*
3139c5acdbeeSJose Abreu 			 * There is no way to determine the number of TSO
3140c5acdbeeSJose Abreu 			 * capable Queues. Let's use always the Queue 0
3141c5acdbeeSJose Abreu 			 * because if TSO is supported then at least this
3142c5acdbeeSJose Abreu 			 * one will be capable.
3143c5acdbeeSJose Abreu 			 */
3144c5acdbeeSJose Abreu 			skb_set_queue_mapping(skb, 0);
3145c5acdbeeSJose Abreu 
3146f748be53SAlexandre TORGUE 			return stmmac_tso_xmit(skb, dev);
3147f748be53SAlexandre TORGUE 		}
3148c5acdbeeSJose Abreu 	}
31497ac6653aSJeff Kirsher 
3150ce736788SJoao Pinto 	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
3151c22a3f48SJoao Pinto 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
3152c22a3f48SJoao Pinto 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
3153c22a3f48SJoao Pinto 								queue));
31547ac6653aSJeff Kirsher 			/* This is a hard error, log it. */
315538ddc59dSLABBE Corentin 			netdev_err(priv->dev,
315638ddc59dSLABBE Corentin 				   "%s: Tx Ring full when queue awake\n",
315738ddc59dSLABBE Corentin 				   __func__);
31587ac6653aSJeff Kirsher 		}
31597ac6653aSJeff Kirsher 		return NETDEV_TX_BUSY;
31607ac6653aSJeff Kirsher 	}
31617ac6653aSJeff Kirsher 
3162ce736788SJoao Pinto 	entry = tx_q->cur_tx;
31630e80bdc9SGiuseppe Cavallaro 	first_entry = entry;
3164b4c9784cSNiklas Cassel 	WARN_ON(tx_q->tx_skbuff[first_entry]);
31657ac6653aSJeff Kirsher 
31667ac6653aSJeff Kirsher 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
31677ac6653aSJeff Kirsher 
31680e80bdc9SGiuseppe Cavallaro 	if (likely(priv->extend_desc))
3169ce736788SJoao Pinto 		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3170c24602efSGiuseppe CAVALLARO 	else
3171ce736788SJoao Pinto 		desc = tx_q->dma_tx + entry;
3172c24602efSGiuseppe CAVALLARO 
31737ac6653aSJeff Kirsher 	first = desc;
31747ac6653aSJeff Kirsher 
31750e80bdc9SGiuseppe Cavallaro 	enh_desc = priv->plat->enh_desc;
31764a7d666aSGiuseppe CAVALLARO 	/* To program the descriptors according to the size of the frame */
317729896a67SGiuseppe CAVALLARO 	if (enh_desc)
31782c520b1cSJose Abreu 		is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
317929896a67SGiuseppe CAVALLARO 
318063a550fcSJose Abreu 	if (unlikely(is_jumbo)) {
31812c520b1cSJose Abreu 		entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
318263a550fcSJose Abreu 		if (unlikely(entry < 0) && (entry != -EINVAL))
3183362b37beSGiuseppe CAVALLARO 			goto dma_map_err;
318429896a67SGiuseppe CAVALLARO 	}
31857ac6653aSJeff Kirsher 
31867ac6653aSJeff Kirsher 	for (i = 0; i < nfrags; i++) {
31879e903e08SEric Dumazet 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
31889e903e08SEric Dumazet 		int len = skb_frag_size(frag);
3189be434d50SGiuseppe Cavallaro 		bool last_segment = (i == (nfrags - 1));
31907ac6653aSJeff Kirsher 
3191e3ad57c9SGiuseppe Cavallaro 		entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3192b4c9784cSNiklas Cassel 		WARN_ON(tx_q->tx_skbuff[entry]);
3193e3ad57c9SGiuseppe Cavallaro 
31940e80bdc9SGiuseppe Cavallaro 		if (likely(priv->extend_desc))
3195ce736788SJoao Pinto 			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3196c24602efSGiuseppe CAVALLARO 		else
3197ce736788SJoao Pinto 			desc = tx_q->dma_tx + entry;
31987ac6653aSJeff Kirsher 
3199f748be53SAlexandre TORGUE 		des = skb_frag_dma_map(priv->device, frag, 0, len,
3200f722380dSIan Campbell 				       DMA_TO_DEVICE);
3201f748be53SAlexandre TORGUE 		if (dma_mapping_error(priv->device, des))
3202362b37beSGiuseppe CAVALLARO 			goto dma_map_err; /* should reuse desc w/o issues */
3203362b37beSGiuseppe CAVALLARO 
3204ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].buf = des;
32056844171dSJose Abreu 
32066844171dSJose Abreu 		stmmac_set_desc_addr(priv, desc, des);
3207f748be53SAlexandre TORGUE 
3208ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].map_as_page = true;
3209ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].len = len;
3210ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
32110e80bdc9SGiuseppe Cavallaro 
32120e80bdc9SGiuseppe Cavallaro 		/* Prepare the descriptor and set the own bit too */
321342de047dSJose Abreu 		stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
321442de047dSJose Abreu 				priv->mode, 1, last_segment, skb->len);
32157ac6653aSJeff Kirsher 	}
32167ac6653aSJeff Kirsher 
321705cf0d1bSNiklas Cassel 	/* Only the last descriptor gets to point to the skb. */
321805cf0d1bSNiklas Cassel 	tx_q->tx_skbuff[entry] = skb;
3219e3ad57c9SGiuseppe Cavallaro 
322005cf0d1bSNiklas Cassel 	/* We've used all descriptors we need for this skb, however,
322105cf0d1bSNiklas Cassel 	 * advance cur_tx so that it references a fresh descriptor.
322205cf0d1bSNiklas Cassel 	 * ndo_start_xmit will fill this descriptor the next time it's
322305cf0d1bSNiklas Cassel 	 * called and stmmac_tx_clean may clean up to this descriptor.
322405cf0d1bSNiklas Cassel 	 */
322505cf0d1bSNiklas Cassel 	entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3226ce736788SJoao Pinto 	tx_q->cur_tx = entry;
32277ac6653aSJeff Kirsher 
32287ac6653aSJeff Kirsher 	if (netif_msg_pktdata(priv)) {
3229d0225e7dSAlexandre TORGUE 		void *tx_head;
3230d0225e7dSAlexandre TORGUE 
323138ddc59dSLABBE Corentin 		netdev_dbg(priv->dev,
323238ddc59dSLABBE Corentin 			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
3233ce736788SJoao Pinto 			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
32340e80bdc9SGiuseppe Cavallaro 			   entry, first, nfrags);
323583d7af64SGiuseppe CAVALLARO 
3236c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
3237ce736788SJoao Pinto 			tx_head = (void *)tx_q->dma_etx;
3238c24602efSGiuseppe CAVALLARO 		else
3239ce736788SJoao Pinto 			tx_head = (void *)tx_q->dma_tx;
3240d0225e7dSAlexandre TORGUE 
324142de047dSJose Abreu 		stmmac_display_ring(priv, tx_head, DMA_TX_SIZE, false);
3242c24602efSGiuseppe CAVALLARO 
324338ddc59dSLABBE Corentin 		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
32447ac6653aSJeff Kirsher 		print_pkt(skb->data, skb->len);
32457ac6653aSJeff Kirsher 	}
32460e80bdc9SGiuseppe Cavallaro 
3247ce736788SJoao Pinto 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3248b3e51069SLABBE Corentin 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
3249b3e51069SLABBE Corentin 			  __func__);
3250c22a3f48SJoao Pinto 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
32517ac6653aSJeff Kirsher 	}
32527ac6653aSJeff Kirsher 
32537ac6653aSJeff Kirsher 	dev->stats.tx_bytes += skb->len;
32547ac6653aSJeff Kirsher 
32550e80bdc9SGiuseppe Cavallaro 	/* According to the coalesce parameter the IC bit for the latest
32560e80bdc9SGiuseppe Cavallaro 	 * segment is reset and the timer re-started to clean the tx status.
32570e80bdc9SGiuseppe Cavallaro 	 * This approach takes care about the fragments: desc is the first
32580e80bdc9SGiuseppe Cavallaro 	 * element in case of no SG.
32590e80bdc9SGiuseppe Cavallaro 	 */
32608fce3331SJose Abreu 	tx_q->tx_count_frames += nfrags + 1;
32618fce3331SJose Abreu 	if (priv->tx_coal_frames <= tx_q->tx_count_frames) {
326242de047dSJose Abreu 		stmmac_set_tx_ic(priv, desc);
32630e80bdc9SGiuseppe Cavallaro 		priv->xstats.tx_set_ic_bit++;
32648fce3331SJose Abreu 		tx_q->tx_count_frames = 0;
32658fce3331SJose Abreu 	} else {
32668fce3331SJose Abreu 		stmmac_tx_timer_arm(priv, queue);
32670e80bdc9SGiuseppe Cavallaro 	}
32680e80bdc9SGiuseppe Cavallaro 
32690e80bdc9SGiuseppe Cavallaro 	skb_tx_timestamp(skb);
32700e80bdc9SGiuseppe Cavallaro 
32710e80bdc9SGiuseppe Cavallaro 	/* Ready to fill the first descriptor and set the OWN bit w/o any
32720e80bdc9SGiuseppe Cavallaro 	 * problems because all the descriptors are actually ready to be
32730e80bdc9SGiuseppe Cavallaro 	 * passed to the DMA engine.
32740e80bdc9SGiuseppe Cavallaro 	 */
32750e80bdc9SGiuseppe Cavallaro 	if (likely(!is_jumbo)) {
32760e80bdc9SGiuseppe Cavallaro 		bool last_segment = (nfrags == 0);
32770e80bdc9SGiuseppe Cavallaro 
3278f748be53SAlexandre TORGUE 		des = dma_map_single(priv->device, skb->data,
32790e80bdc9SGiuseppe Cavallaro 				     nopaged_len, DMA_TO_DEVICE);
3280f748be53SAlexandre TORGUE 		if (dma_mapping_error(priv->device, des))
32810e80bdc9SGiuseppe Cavallaro 			goto dma_map_err;
32820e80bdc9SGiuseppe Cavallaro 
3283ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[first_entry].buf = des;
32846844171dSJose Abreu 
32856844171dSJose Abreu 		stmmac_set_desc_addr(priv, first, des);
3286f748be53SAlexandre TORGUE 
3287ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
3288ce736788SJoao Pinto 		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
32890e80bdc9SGiuseppe Cavallaro 
3290891434b1SRayagond Kokatanur 		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3291891434b1SRayagond Kokatanur 			     priv->hwts_tx_en)) {
3292891434b1SRayagond Kokatanur 			/* declare that device is doing timestamping */
3293891434b1SRayagond Kokatanur 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
329442de047dSJose Abreu 			stmmac_enable_tx_timestamp(priv, first);
3295891434b1SRayagond Kokatanur 		}
3296891434b1SRayagond Kokatanur 
32970e80bdc9SGiuseppe Cavallaro 		/* Prepare the first descriptor setting the OWN bit too */
329842de047dSJose Abreu 		stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
329942de047dSJose Abreu 				csum_insertion, priv->mode, 1, last_segment,
330042de047dSJose Abreu 				skb->len);
330180acbed9SAaro Koskinen 	} else {
330280acbed9SAaro Koskinen 		stmmac_set_tx_owner(priv, first);
330380acbed9SAaro Koskinen 	}
33040e80bdc9SGiuseppe Cavallaro 
33050e80bdc9SGiuseppe Cavallaro 	/* The own bit must be the latest setting done when prepare the
33060e80bdc9SGiuseppe Cavallaro 	 * descriptor and then barrier is needed to make sure that
33070e80bdc9SGiuseppe Cavallaro 	 * all is coherent before granting the DMA engine.
33080e80bdc9SGiuseppe Cavallaro 	 */
330995eb930aSNiklas Cassel 	wmb();
33107ac6653aSJeff Kirsher 
3311c22a3f48SJoao Pinto 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3312f748be53SAlexandre TORGUE 
3313a4e887faSJose Abreu 	stmmac_enable_dma_transmission(priv, priv->ioaddr);
33148fce3331SJose Abreu 
33150431100bSJose Abreu 	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
3316f1565c60SJose Abreu 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
33177ac6653aSJeff Kirsher 
3318362b37beSGiuseppe CAVALLARO 	return NETDEV_TX_OK;
3319a9097a96SGiuseppe CAVALLARO 
3320362b37beSGiuseppe CAVALLARO dma_map_err:
332138ddc59dSLABBE Corentin 	netdev_err(priv->dev, "Tx DMA map failed\n");
3322362b37beSGiuseppe CAVALLARO 	dev_kfree_skb(skb);
3323362b37beSGiuseppe CAVALLARO 	priv->dev->stats.tx_dropped++;
33247ac6653aSJeff Kirsher 	return NETDEV_TX_OK;
33257ac6653aSJeff Kirsher }
33267ac6653aSJeff Kirsher 
3327b9381985SVince Bridgers static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
3328b9381985SVince Bridgers {
3329ab188e8fSElad Nachman 	struct vlan_ethhdr *veth;
3330ab188e8fSElad Nachman 	__be16 vlan_proto;
3331b9381985SVince Bridgers 	u16 vlanid;
3332b9381985SVince Bridgers 
3333ab188e8fSElad Nachman 	veth = (struct vlan_ethhdr *)skb->data;
3334ab188e8fSElad Nachman 	vlan_proto = veth->h_vlan_proto;
3335ab188e8fSElad Nachman 
3336ab188e8fSElad Nachman 	if ((vlan_proto == htons(ETH_P_8021Q) &&
3337ab188e8fSElad Nachman 	     dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
3338ab188e8fSElad Nachman 	    (vlan_proto == htons(ETH_P_8021AD) &&
3339ab188e8fSElad Nachman 	     dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
3340b9381985SVince Bridgers 		/* pop the vlan tag */
3341ab188e8fSElad Nachman 		vlanid = ntohs(veth->h_vlan_TCI);
3342ab188e8fSElad Nachman 		memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
3343b9381985SVince Bridgers 		skb_pull(skb, VLAN_HLEN);
3344ab188e8fSElad Nachman 		__vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
3345b9381985SVince Bridgers 	}
3346b9381985SVince Bridgers }
3347b9381985SVince Bridgers 
3348b9381985SVince Bridgers 
334954139cf3SJoao Pinto static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q)
3350120e87f9SGiuseppe Cavallaro {
335154139cf3SJoao Pinto 	if (rx_q->rx_zeroc_thresh < STMMAC_RX_THRESH)
3352120e87f9SGiuseppe Cavallaro 		return 0;
3353120e87f9SGiuseppe Cavallaro 
3354120e87f9SGiuseppe Cavallaro 	return 1;
3355120e87f9SGiuseppe Cavallaro }
3356120e87f9SGiuseppe Cavallaro 
335732ceabcaSGiuseppe CAVALLARO /**
3358732fdf0eSGiuseppe CAVALLARO  * stmmac_rx_refill - refill used skb preallocated buffers
335932ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
336054139cf3SJoao Pinto  * @queue: RX queue index
336132ceabcaSGiuseppe CAVALLARO  * Description : this is to reallocate the skb for the reception process
336232ceabcaSGiuseppe CAVALLARO  * that is based on zero-copy.
336332ceabcaSGiuseppe CAVALLARO  */
336454139cf3SJoao Pinto static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
33657ac6653aSJeff Kirsher {
336654139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
336754139cf3SJoao Pinto 	int dirty = stmmac_rx_dirty(priv, queue);
336854139cf3SJoao Pinto 	unsigned int entry = rx_q->dirty_rx;
336954139cf3SJoao Pinto 
33707ac6653aSJeff Kirsher 	int bfsize = priv->dma_buf_sz;
33717ac6653aSJeff Kirsher 
3372e3ad57c9SGiuseppe Cavallaro 	while (dirty-- > 0) {
3373c24602efSGiuseppe CAVALLARO 		struct dma_desc *p;
3374c24602efSGiuseppe CAVALLARO 
3375c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
337654139cf3SJoao Pinto 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
3377c24602efSGiuseppe CAVALLARO 		else
337854139cf3SJoao Pinto 			p = rx_q->dma_rx + entry;
3379c24602efSGiuseppe CAVALLARO 
338054139cf3SJoao Pinto 		if (likely(!rx_q->rx_skbuff[entry])) {
33817ac6653aSJeff Kirsher 			struct sk_buff *skb;
33827ac6653aSJeff Kirsher 
3383acb600deSEric Dumazet 			skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
3384120e87f9SGiuseppe Cavallaro 			if (unlikely(!skb)) {
3385120e87f9SGiuseppe Cavallaro 				/* so for a while no zero-copy! */
338654139cf3SJoao Pinto 				rx_q->rx_zeroc_thresh = STMMAC_RX_THRESH;
3387120e87f9SGiuseppe Cavallaro 				if (unlikely(net_ratelimit()))
3388120e87f9SGiuseppe Cavallaro 					dev_err(priv->device,
3389120e87f9SGiuseppe Cavallaro 						"fail to alloc skb entry %d\n",
3390120e87f9SGiuseppe Cavallaro 						entry);
33917ac6653aSJeff Kirsher 				break;
3392120e87f9SGiuseppe Cavallaro 			}
33937ac6653aSJeff Kirsher 
339454139cf3SJoao Pinto 			rx_q->rx_skbuff[entry] = skb;
339554139cf3SJoao Pinto 			rx_q->rx_skbuff_dma[entry] =
33967ac6653aSJeff Kirsher 			    dma_map_single(priv->device, skb->data, bfsize,
33977ac6653aSJeff Kirsher 					   DMA_FROM_DEVICE);
3398362b37beSGiuseppe CAVALLARO 			if (dma_mapping_error(priv->device,
339954139cf3SJoao Pinto 					      rx_q->rx_skbuff_dma[entry])) {
340038ddc59dSLABBE Corentin 				netdev_err(priv->dev, "Rx DMA map failed\n");
3401362b37beSGiuseppe CAVALLARO 				dev_kfree_skb(skb);
3402362b37beSGiuseppe CAVALLARO 				break;
3403362b37beSGiuseppe CAVALLARO 			}
3404286a8372SGiuseppe CAVALLARO 
34056844171dSJose Abreu 			stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[entry]);
34062c520b1cSJose Abreu 			stmmac_refill_desc3(priv, rx_q, p);
3407286a8372SGiuseppe CAVALLARO 
340854139cf3SJoao Pinto 			if (rx_q->rx_zeroc_thresh > 0)
340954139cf3SJoao Pinto 				rx_q->rx_zeroc_thresh--;
3410120e87f9SGiuseppe Cavallaro 
3411b3e51069SLABBE Corentin 			netif_dbg(priv, rx_status, priv->dev,
341238ddc59dSLABBE Corentin 				  "refill entry #%d\n", entry);
34137ac6653aSJeff Kirsher 		}
3414ad688cdbSPavel Machek 		dma_wmb();
3415f748be53SAlexandre TORGUE 
3416357951cdSJose Abreu 		stmmac_set_rx_owner(priv, p, priv->use_riwt);
3417f748be53SAlexandre TORGUE 
3418ad688cdbSPavel Machek 		dma_wmb();
3419e3ad57c9SGiuseppe Cavallaro 
3420e3ad57c9SGiuseppe Cavallaro 		entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
34217ac6653aSJeff Kirsher 	}
342254139cf3SJoao Pinto 	rx_q->dirty_rx = entry;
34234523a561SBiao Huang 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
34247ac6653aSJeff Kirsher }
34257ac6653aSJeff Kirsher 
342632ceabcaSGiuseppe CAVALLARO /**
3427732fdf0eSGiuseppe CAVALLARO  * stmmac_rx - manage the receive process
342832ceabcaSGiuseppe CAVALLARO  * @priv: driver private structure
342954139cf3SJoao Pinto  * @limit: napi bugget
343054139cf3SJoao Pinto  * @queue: RX queue index.
343132ceabcaSGiuseppe CAVALLARO  * Description :  this the function called by the napi poll method.
343232ceabcaSGiuseppe CAVALLARO  * It gets all the frames inside the ring.
343332ceabcaSGiuseppe CAVALLARO  */
343454139cf3SJoao Pinto static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
34357ac6653aSJeff Kirsher {
343654139cf3SJoao Pinto 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
34378fce3331SJose Abreu 	struct stmmac_channel *ch = &priv->channel[queue];
343807b39753SAaro Koskinen 	unsigned int next_entry = rx_q->cur_rx;
343954139cf3SJoao Pinto 	int coe = priv->hw->rx_csum;
34407ac6653aSJeff Kirsher 	unsigned int count = 0;
34417d9e6c5aSJose Abreu 	bool xmac;
34427d9e6c5aSJose Abreu 
34437d9e6c5aSJose Abreu 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
34447ac6653aSJeff Kirsher 
344583d7af64SGiuseppe CAVALLARO 	if (netif_msg_rx_status(priv)) {
3446d0225e7dSAlexandre TORGUE 		void *rx_head;
3447d0225e7dSAlexandre TORGUE 
344838ddc59dSLABBE Corentin 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
3449c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
345054139cf3SJoao Pinto 			rx_head = (void *)rx_q->dma_erx;
3451c24602efSGiuseppe CAVALLARO 		else
345254139cf3SJoao Pinto 			rx_head = (void *)rx_q->dma_rx;
3453d0225e7dSAlexandre TORGUE 
345442de047dSJose Abreu 		stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true);
34557ac6653aSJeff Kirsher 	}
3456c24602efSGiuseppe CAVALLARO 	while (count < limit) {
345707b39753SAaro Koskinen 		int entry, status;
34589401bb5cSGiuseppe CAVALLARO 		struct dma_desc *p;
3459ba1ffd74SGiuseppe CAVALLARO 		struct dma_desc *np;
34607ac6653aSJeff Kirsher 
346107b39753SAaro Koskinen 		entry = next_entry;
346207b39753SAaro Koskinen 
3463c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
346454139cf3SJoao Pinto 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
3465c24602efSGiuseppe CAVALLARO 		else
346654139cf3SJoao Pinto 			p = rx_q->dma_rx + entry;
3467c24602efSGiuseppe CAVALLARO 
3468c1fa3212SFabrice Gasnier 		/* read the status of the incoming frame */
346942de047dSJose Abreu 		status = stmmac_rx_status(priv, &priv->dev->stats,
3470c1fa3212SFabrice Gasnier 				&priv->xstats, p);
3471c1fa3212SFabrice Gasnier 		/* check if managed by the DMA otherwise go ahead */
3472c1fa3212SFabrice Gasnier 		if (unlikely(status & dma_own))
34737ac6653aSJeff Kirsher 			break;
34747ac6653aSJeff Kirsher 
34757ac6653aSJeff Kirsher 		count++;
34767ac6653aSJeff Kirsher 
347754139cf3SJoao Pinto 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE);
347854139cf3SJoao Pinto 		next_entry = rx_q->cur_rx;
3479e3ad57c9SGiuseppe Cavallaro 
3480c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc)
348154139cf3SJoao Pinto 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
3482c24602efSGiuseppe CAVALLARO 		else
348354139cf3SJoao Pinto 			np = rx_q->dma_rx + next_entry;
3484ba1ffd74SGiuseppe CAVALLARO 
3485ba1ffd74SGiuseppe CAVALLARO 		prefetch(np);
34867ac6653aSJeff Kirsher 
348742de047dSJose Abreu 		if (priv->extend_desc)
348842de047dSJose Abreu 			stmmac_rx_extended_status(priv, &priv->dev->stats,
348942de047dSJose Abreu 					&priv->xstats, rx_q->dma_erx + entry);
3490891434b1SRayagond Kokatanur 		if (unlikely(status == discard_frame)) {
34917ac6653aSJeff Kirsher 			priv->dev->stats.rx_errors++;
3492891434b1SRayagond Kokatanur 			if (priv->hwts_rx_en && !priv->extend_desc) {
34938d45e42bSLABBE Corentin 				/* DESC2 & DESC3 will be overwritten by device
3494891434b1SRayagond Kokatanur 				 * with timestamp value, hence reinitialize
3495891434b1SRayagond Kokatanur 				 * them in stmmac_rx_refill() function so that
3496891434b1SRayagond Kokatanur 				 * device can reuse it.
3497891434b1SRayagond Kokatanur 				 */
34989c8080d0SJose Abreu 				dev_kfree_skb_any(rx_q->rx_skbuff[entry]);
349954139cf3SJoao Pinto 				rx_q->rx_skbuff[entry] = NULL;
3500891434b1SRayagond Kokatanur 				dma_unmap_single(priv->device,
350154139cf3SJoao Pinto 						 rx_q->rx_skbuff_dma[entry],
3502ceb69499SGiuseppe CAVALLARO 						 priv->dma_buf_sz,
3503ceb69499SGiuseppe CAVALLARO 						 DMA_FROM_DEVICE);
3504891434b1SRayagond Kokatanur 			}
3505891434b1SRayagond Kokatanur 		} else {
35067ac6653aSJeff Kirsher 			struct sk_buff *skb;
35077ac6653aSJeff Kirsher 			int frame_len;
3508f748be53SAlexandre TORGUE 			unsigned int des;
3509f748be53SAlexandre TORGUE 
3510d2df9ea0SJose Abreu 			stmmac_get_desc_addr(priv, p, &des);
351142de047dSJose Abreu 			frame_len = stmmac_get_rx_frame_len(priv, p, coe);
3512ceb69499SGiuseppe CAVALLARO 
35138d45e42bSLABBE Corentin 			/*  If frame length is greater than skb buffer size
3514f748be53SAlexandre TORGUE 			 *  (preallocated during init) then the packet is
3515f748be53SAlexandre TORGUE 			 *  ignored
3516f748be53SAlexandre TORGUE 			 */
3517e527c4a7SGiuseppe CAVALLARO 			if (frame_len > priv->dma_buf_sz) {
3518972c9be7SAaro Koskinen 				if (net_ratelimit())
351938ddc59dSLABBE Corentin 					netdev_err(priv->dev,
352038ddc59dSLABBE Corentin 						   "len %d larger than size (%d)\n",
352138ddc59dSLABBE Corentin 						   frame_len, priv->dma_buf_sz);
3522e527c4a7SGiuseppe CAVALLARO 				priv->dev->stats.rx_length_errors++;
352307b39753SAaro Koskinen 				continue;
3524e527c4a7SGiuseppe CAVALLARO 			}
3525e527c4a7SGiuseppe CAVALLARO 
35267ac6653aSJeff Kirsher 			/* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
3527ceb69499SGiuseppe CAVALLARO 			 * Type frames (LLC/LLC-SNAP)
3528565020aaSJose Abreu 			 *
3529565020aaSJose Abreu 			 * llc_snap is never checked in GMAC >= 4, so this ACS
3530565020aaSJose Abreu 			 * feature is always disabled and packets need to be
3531565020aaSJose Abreu 			 * stripped manually.
3532ceb69499SGiuseppe CAVALLARO 			 */
3533565020aaSJose Abreu 			if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
3534565020aaSJose Abreu 			    unlikely(status != llc_snap))
35357ac6653aSJeff Kirsher 				frame_len -= ETH_FCS_LEN;
35367ac6653aSJeff Kirsher 
353783d7af64SGiuseppe CAVALLARO 			if (netif_msg_rx_status(priv)) {
353838ddc59dSLABBE Corentin 				netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n",
3539f748be53SAlexandre TORGUE 					   p, entry, des);
354038ddc59dSLABBE Corentin 				netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
354183d7af64SGiuseppe CAVALLARO 					   frame_len, status);
354283d7af64SGiuseppe CAVALLARO 			}
354322ad3838SGiuseppe Cavallaro 
3544f748be53SAlexandre TORGUE 			/* The zero-copy is always used for all the sizes
3545f748be53SAlexandre TORGUE 			 * in case of GMAC4 because it needs
3546f748be53SAlexandre TORGUE 			 * to refill the used descriptors, always.
3547f748be53SAlexandre TORGUE 			 */
35487d9e6c5aSJose Abreu 			if (unlikely(!xmac &&
3549f748be53SAlexandre TORGUE 				     ((frame_len < priv->rx_copybreak) ||
355054139cf3SJoao Pinto 				     stmmac_rx_threshold_count(rx_q)))) {
355122ad3838SGiuseppe Cavallaro 				skb = netdev_alloc_skb_ip_align(priv->dev,
355222ad3838SGiuseppe Cavallaro 								frame_len);
355322ad3838SGiuseppe Cavallaro 				if (unlikely(!skb)) {
355422ad3838SGiuseppe Cavallaro 					if (net_ratelimit())
355522ad3838SGiuseppe Cavallaro 						dev_warn(priv->device,
355622ad3838SGiuseppe Cavallaro 							 "packet dropped\n");
355722ad3838SGiuseppe Cavallaro 					priv->dev->stats.rx_dropped++;
355807b39753SAaro Koskinen 					continue;
355922ad3838SGiuseppe Cavallaro 				}
356022ad3838SGiuseppe Cavallaro 
356122ad3838SGiuseppe Cavallaro 				dma_sync_single_for_cpu(priv->device,
356254139cf3SJoao Pinto 							rx_q->rx_skbuff_dma
356322ad3838SGiuseppe Cavallaro 							[entry], frame_len,
356422ad3838SGiuseppe Cavallaro 							DMA_FROM_DEVICE);
356522ad3838SGiuseppe Cavallaro 				skb_copy_to_linear_data(skb,
356654139cf3SJoao Pinto 							rx_q->
356722ad3838SGiuseppe Cavallaro 							rx_skbuff[entry]->data,
356822ad3838SGiuseppe Cavallaro 							frame_len);
356922ad3838SGiuseppe Cavallaro 
357022ad3838SGiuseppe Cavallaro 				skb_put(skb, frame_len);
357122ad3838SGiuseppe Cavallaro 				dma_sync_single_for_device(priv->device,
357254139cf3SJoao Pinto 							   rx_q->rx_skbuff_dma
357322ad3838SGiuseppe Cavallaro 							   [entry], frame_len,
357422ad3838SGiuseppe Cavallaro 							   DMA_FROM_DEVICE);
357522ad3838SGiuseppe Cavallaro 			} else {
357654139cf3SJoao Pinto 				skb = rx_q->rx_skbuff[entry];
35777ac6653aSJeff Kirsher 				if (unlikely(!skb)) {
3578972c9be7SAaro Koskinen 					if (net_ratelimit())
357938ddc59dSLABBE Corentin 						netdev_err(priv->dev,
358038ddc59dSLABBE Corentin 							   "%s: Inconsistent Rx chain\n",
35817ac6653aSJeff Kirsher 							   priv->dev->name);
35827ac6653aSJeff Kirsher 					priv->dev->stats.rx_dropped++;
358307b39753SAaro Koskinen 					continue;
35847ac6653aSJeff Kirsher 				}
35857ac6653aSJeff Kirsher 				prefetch(skb->data - NET_IP_ALIGN);
358654139cf3SJoao Pinto 				rx_q->rx_skbuff[entry] = NULL;
358754139cf3SJoao Pinto 				rx_q->rx_zeroc_thresh++;
35887ac6653aSJeff Kirsher 
35897ac6653aSJeff Kirsher 				skb_put(skb, frame_len);
35907ac6653aSJeff Kirsher 				dma_unmap_single(priv->device,
359154139cf3SJoao Pinto 						 rx_q->rx_skbuff_dma[entry],
359222ad3838SGiuseppe Cavallaro 						 priv->dma_buf_sz,
359322ad3838SGiuseppe Cavallaro 						 DMA_FROM_DEVICE);
359422ad3838SGiuseppe Cavallaro 			}
359522ad3838SGiuseppe Cavallaro 
35967ac6653aSJeff Kirsher 			if (netif_msg_pktdata(priv)) {
359738ddc59dSLABBE Corentin 				netdev_dbg(priv->dev, "frame received (%dbytes)",
359838ddc59dSLABBE Corentin 					   frame_len);
35997ac6653aSJeff Kirsher 				print_pkt(skb->data, frame_len);
36007ac6653aSJeff Kirsher 			}
360183d7af64SGiuseppe CAVALLARO 
3602ba1ffd74SGiuseppe CAVALLARO 			stmmac_get_rx_hwtstamp(priv, p, np, skb);
3603ba1ffd74SGiuseppe CAVALLARO 
3604b9381985SVince Bridgers 			stmmac_rx_vlan(priv->dev, skb);
3605b9381985SVince Bridgers 
36067ac6653aSJeff Kirsher 			skb->protocol = eth_type_trans(skb, priv->dev);
36077ac6653aSJeff Kirsher 
3608ceb69499SGiuseppe CAVALLARO 			if (unlikely(!coe))
36097ac6653aSJeff Kirsher 				skb_checksum_none_assert(skb);
361062a2ab93SGiuseppe CAVALLARO 			else
36117ac6653aSJeff Kirsher 				skb->ip_summed = CHECKSUM_UNNECESSARY;
361262a2ab93SGiuseppe CAVALLARO 
36134ccb4585SJose Abreu 			napi_gro_receive(&ch->rx_napi, skb);
36147ac6653aSJeff Kirsher 
36157ac6653aSJeff Kirsher 			priv->dev->stats.rx_packets++;
36167ac6653aSJeff Kirsher 			priv->dev->stats.rx_bytes += frame_len;
36177ac6653aSJeff Kirsher 		}
36187ac6653aSJeff Kirsher 	}
36197ac6653aSJeff Kirsher 
362054139cf3SJoao Pinto 	stmmac_rx_refill(priv, queue);
36217ac6653aSJeff Kirsher 
36227ac6653aSJeff Kirsher 	priv->xstats.rx_pkt_n += count;
36237ac6653aSJeff Kirsher 
36247ac6653aSJeff Kirsher 	return count;
36257ac6653aSJeff Kirsher }
36267ac6653aSJeff Kirsher 
36274ccb4585SJose Abreu static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
36287ac6653aSJeff Kirsher {
36298fce3331SJose Abreu 	struct stmmac_channel *ch =
36304ccb4585SJose Abreu 		container_of(napi, struct stmmac_channel, rx_napi);
36318fce3331SJose Abreu 	struct stmmac_priv *priv = ch->priv_data;
36328fce3331SJose Abreu 	u32 chan = ch->index;
36334ccb4585SJose Abreu 	int work_done;
36347ac6653aSJeff Kirsher 
36359125cdd1SGiuseppe CAVALLARO 	priv->xstats.napi_poll++;
3636ce736788SJoao Pinto 
36374ccb4585SJose Abreu 	work_done = stmmac_rx(priv, budget, chan);
36384ccb4585SJose Abreu 	if (work_done < budget && napi_complete_done(napi, work_done))
36394ccb4585SJose Abreu 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan);
36404ccb4585SJose Abreu 	return work_done;
36414ccb4585SJose Abreu }
3642ce736788SJoao Pinto 
36434ccb4585SJose Abreu static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
36444ccb4585SJose Abreu {
36454ccb4585SJose Abreu 	struct stmmac_channel *ch =
36464ccb4585SJose Abreu 		container_of(napi, struct stmmac_channel, tx_napi);
36474ccb4585SJose Abreu 	struct stmmac_priv *priv = ch->priv_data;
36484ccb4585SJose Abreu 	struct stmmac_tx_queue *tx_q;
36494ccb4585SJose Abreu 	u32 chan = ch->index;
36504ccb4585SJose Abreu 	int work_done;
36514ccb4585SJose Abreu 
36524ccb4585SJose Abreu 	priv->xstats.napi_poll++;
36534ccb4585SJose Abreu 
36544ccb4585SJose Abreu 	work_done = stmmac_tx_clean(priv, DMA_TX_SIZE, chan);
3655fa0be0a4SJose Abreu 	work_done = min(work_done, budget);
36568fce3331SJose Abreu 
36574ccb4585SJose Abreu 	if (work_done < budget && napi_complete_done(napi, work_done))
36588fce3331SJose Abreu 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan);
36594ccb4585SJose Abreu 
36604ccb4585SJose Abreu 	/* Force transmission restart */
36614ccb4585SJose Abreu 	tx_q = &priv->tx_queue[chan];
36624ccb4585SJose Abreu 	if (tx_q->cur_tx != tx_q->dirty_tx) {
36634ccb4585SJose Abreu 		stmmac_enable_dma_transmission(priv, priv->ioaddr);
36644ccb4585SJose Abreu 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr,
36654ccb4585SJose Abreu 				       chan);
3666fa0be0a4SJose Abreu 	}
36678fce3331SJose Abreu 
36687ac6653aSJeff Kirsher 	return work_done;
36697ac6653aSJeff Kirsher }
36707ac6653aSJeff Kirsher 
36717ac6653aSJeff Kirsher /**
36727ac6653aSJeff Kirsher  *  stmmac_tx_timeout
36737ac6653aSJeff Kirsher  *  @dev : Pointer to net device structure
36747ac6653aSJeff Kirsher  *  Description: this function is called when a packet transmission fails to
36757284a3f1SGiuseppe CAVALLARO  *   complete within a reasonable time. The driver will mark the error in the
36767ac6653aSJeff Kirsher  *   netdev structure and arrange for the device to be reset to a sane state
36777ac6653aSJeff Kirsher  *   in order to transmit a new packet.
36787ac6653aSJeff Kirsher  */
36797ac6653aSJeff Kirsher static void stmmac_tx_timeout(struct net_device *dev)
36807ac6653aSJeff Kirsher {
36817ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
36827ac6653aSJeff Kirsher 
368334877a15SJose Abreu 	stmmac_global_err(priv);
36847ac6653aSJeff Kirsher }
36857ac6653aSJeff Kirsher 
36867ac6653aSJeff Kirsher /**
368701789349SJiri Pirko  *  stmmac_set_rx_mode - entry point for multicast addressing
36887ac6653aSJeff Kirsher  *  @dev : pointer to the device structure
36897ac6653aSJeff Kirsher  *  Description:
36907ac6653aSJeff Kirsher  *  This function is a driver entry point which gets called by the kernel
36917ac6653aSJeff Kirsher  *  whenever multicast addresses must be enabled/disabled.
36927ac6653aSJeff Kirsher  *  Return value:
36937ac6653aSJeff Kirsher  *  void.
36947ac6653aSJeff Kirsher  */
369501789349SJiri Pirko static void stmmac_set_rx_mode(struct net_device *dev)
36967ac6653aSJeff Kirsher {
36977ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
36987ac6653aSJeff Kirsher 
3699c10d4c82SJose Abreu 	stmmac_set_filter(priv, priv->hw, dev);
37007ac6653aSJeff Kirsher }
37017ac6653aSJeff Kirsher 
37027ac6653aSJeff Kirsher /**
37037ac6653aSJeff Kirsher  *  stmmac_change_mtu - entry point to change MTU size for the device.
37047ac6653aSJeff Kirsher  *  @dev : device pointer.
37057ac6653aSJeff Kirsher  *  @new_mtu : the new MTU size for the device.
37067ac6653aSJeff Kirsher  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
37077ac6653aSJeff Kirsher  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
37087ac6653aSJeff Kirsher  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
37097ac6653aSJeff Kirsher  *  Return value:
37107ac6653aSJeff Kirsher  *  0 on success and an appropriate (-)ve integer as defined in errno.h
37117ac6653aSJeff Kirsher  *  file on failure.
37127ac6653aSJeff Kirsher  */
37137ac6653aSJeff Kirsher static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
37147ac6653aSJeff Kirsher {
371538ddc59dSLABBE Corentin 	struct stmmac_priv *priv = netdev_priv(dev);
371638ddc59dSLABBE Corentin 
37177ac6653aSJeff Kirsher 	if (netif_running(dev)) {
371838ddc59dSLABBE Corentin 		netdev_err(priv->dev, "must be stopped to change its MTU\n");
37197ac6653aSJeff Kirsher 		return -EBUSY;
37207ac6653aSJeff Kirsher 	}
37217ac6653aSJeff Kirsher 
37227ac6653aSJeff Kirsher 	dev->mtu = new_mtu;
3723f748be53SAlexandre TORGUE 
37247ac6653aSJeff Kirsher 	netdev_update_features(dev);
37257ac6653aSJeff Kirsher 
37267ac6653aSJeff Kirsher 	return 0;
37277ac6653aSJeff Kirsher }
37287ac6653aSJeff Kirsher 
3729c8f44affSMichał Mirosław static netdev_features_t stmmac_fix_features(struct net_device *dev,
3730c8f44affSMichał Mirosław 					     netdev_features_t features)
37317ac6653aSJeff Kirsher {
37327ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
37337ac6653aSJeff Kirsher 
373438912bdbSDeepak SIKRI 	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
37357ac6653aSJeff Kirsher 		features &= ~NETIF_F_RXCSUM;
3736d2afb5bdSGiuseppe CAVALLARO 
37377ac6653aSJeff Kirsher 	if (!priv->plat->tx_coe)
3738a188222bSTom Herbert 		features &= ~NETIF_F_CSUM_MASK;
37397ac6653aSJeff Kirsher 
37407ac6653aSJeff Kirsher 	/* Some GMAC devices have a bugged Jumbo frame support that
37417ac6653aSJeff Kirsher 	 * needs to have the Tx COE disabled for oversized frames
37427ac6653aSJeff Kirsher 	 * (due to limited buffer sizes). In this case we disable
3743ceb69499SGiuseppe CAVALLARO 	 * the TX csum insertion in the TDES and not use SF.
3744ceb69499SGiuseppe CAVALLARO 	 */
37457ac6653aSJeff Kirsher 	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
3746a188222bSTom Herbert 		features &= ~NETIF_F_CSUM_MASK;
37477ac6653aSJeff Kirsher 
3748f748be53SAlexandre TORGUE 	/* Disable tso if asked by ethtool */
3749f748be53SAlexandre TORGUE 	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
3750f748be53SAlexandre TORGUE 		if (features & NETIF_F_TSO)
3751f748be53SAlexandre TORGUE 			priv->tso = true;
3752f748be53SAlexandre TORGUE 		else
3753f748be53SAlexandre TORGUE 			priv->tso = false;
3754f748be53SAlexandre TORGUE 	}
3755f748be53SAlexandre TORGUE 
37567ac6653aSJeff Kirsher 	return features;
37577ac6653aSJeff Kirsher }
37587ac6653aSJeff Kirsher 
3759d2afb5bdSGiuseppe CAVALLARO static int stmmac_set_features(struct net_device *netdev,
3760d2afb5bdSGiuseppe CAVALLARO 			       netdev_features_t features)
3761d2afb5bdSGiuseppe CAVALLARO {
3762d2afb5bdSGiuseppe CAVALLARO 	struct stmmac_priv *priv = netdev_priv(netdev);
3763d2afb5bdSGiuseppe CAVALLARO 
3764d2afb5bdSGiuseppe CAVALLARO 	/* Keep the COE Type in case of csum is supporting */
3765d2afb5bdSGiuseppe CAVALLARO 	if (features & NETIF_F_RXCSUM)
3766d2afb5bdSGiuseppe CAVALLARO 		priv->hw->rx_csum = priv->plat->rx_coe;
3767d2afb5bdSGiuseppe CAVALLARO 	else
3768d2afb5bdSGiuseppe CAVALLARO 		priv->hw->rx_csum = 0;
3769d2afb5bdSGiuseppe CAVALLARO 	/* No check needed because rx_coe has been set before and it will be
3770d2afb5bdSGiuseppe CAVALLARO 	 * fixed in case of issue.
3771d2afb5bdSGiuseppe CAVALLARO 	 */
3772c10d4c82SJose Abreu 	stmmac_rx_ipc(priv, priv->hw);
3773d2afb5bdSGiuseppe CAVALLARO 
3774d2afb5bdSGiuseppe CAVALLARO 	return 0;
3775d2afb5bdSGiuseppe CAVALLARO }
3776d2afb5bdSGiuseppe CAVALLARO 
377732ceabcaSGiuseppe CAVALLARO /**
377832ceabcaSGiuseppe CAVALLARO  *  stmmac_interrupt - main ISR
377932ceabcaSGiuseppe CAVALLARO  *  @irq: interrupt number.
378032ceabcaSGiuseppe CAVALLARO  *  @dev_id: to pass the net device pointer.
378132ceabcaSGiuseppe CAVALLARO  *  Description: this is the main driver interrupt service routine.
3782732fdf0eSGiuseppe CAVALLARO  *  It can call:
3783732fdf0eSGiuseppe CAVALLARO  *  o DMA service routine (to manage incoming frame reception and transmission
3784732fdf0eSGiuseppe CAVALLARO  *    status)
3785732fdf0eSGiuseppe CAVALLARO  *  o Core interrupts to manage: remote wake-up, management counter, LPI
378632ceabcaSGiuseppe CAVALLARO  *    interrupts.
378732ceabcaSGiuseppe CAVALLARO  */
37887ac6653aSJeff Kirsher static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
37897ac6653aSJeff Kirsher {
37907ac6653aSJeff Kirsher 	struct net_device *dev = (struct net_device *)dev_id;
37917ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(dev);
37927bac4e1eSJoao Pinto 	u32 rx_cnt = priv->plat->rx_queues_to_use;
37937bac4e1eSJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
37947bac4e1eSJoao Pinto 	u32 queues_count;
37957bac4e1eSJoao Pinto 	u32 queue;
37967d9e6c5aSJose Abreu 	bool xmac;
37977bac4e1eSJoao Pinto 
37987d9e6c5aSJose Abreu 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
37997bac4e1eSJoao Pinto 	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
38007ac6653aSJeff Kirsher 
380189f7f2cfSSrinivas Kandagatla 	if (priv->irq_wake)
380289f7f2cfSSrinivas Kandagatla 		pm_wakeup_event(priv->device, 0);
380389f7f2cfSSrinivas Kandagatla 
38047ac6653aSJeff Kirsher 	if (unlikely(!dev)) {
380538ddc59dSLABBE Corentin 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
38067ac6653aSJeff Kirsher 		return IRQ_NONE;
38077ac6653aSJeff Kirsher 	}
38087ac6653aSJeff Kirsher 
380934877a15SJose Abreu 	/* Check if adapter is up */
381034877a15SJose Abreu 	if (test_bit(STMMAC_DOWN, &priv->state))
381134877a15SJose Abreu 		return IRQ_HANDLED;
38128bf993a5SJose Abreu 	/* Check if a fatal error happened */
38138bf993a5SJose Abreu 	if (stmmac_safety_feat_interrupt(priv))
38148bf993a5SJose Abreu 		return IRQ_HANDLED;
381534877a15SJose Abreu 
38167ac6653aSJeff Kirsher 	/* To handle GMAC own interrupts */
38177d9e6c5aSJose Abreu 	if ((priv->plat->has_gmac) || xmac) {
3818c10d4c82SJose Abreu 		int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
381961fac60aSJose Abreu 		int mtl_status;
38208f71a88dSJoao Pinto 
3821d765955dSGiuseppe CAVALLARO 		if (unlikely(status)) {
3822d765955dSGiuseppe CAVALLARO 			/* For LPI we need to save the tx status */
38230982a0f6SGiuseppe CAVALLARO 			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
3824d765955dSGiuseppe CAVALLARO 				priv->tx_path_in_lpi_mode = true;
38250982a0f6SGiuseppe CAVALLARO 			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
3826d765955dSGiuseppe CAVALLARO 				priv->tx_path_in_lpi_mode = false;
38277bac4e1eSJoao Pinto 		}
38287bac4e1eSJoao Pinto 
38297bac4e1eSJoao Pinto 		for (queue = 0; queue < queues_count; queue++) {
383061fac60aSJose Abreu 			struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
383154139cf3SJoao Pinto 
383261fac60aSJose Abreu 			mtl_status = stmmac_host_mtl_irq_status(priv, priv->hw,
383361fac60aSJose Abreu 								queue);
383461fac60aSJose Abreu 			if (mtl_status != -EINVAL)
383561fac60aSJose Abreu 				status |= mtl_status;
38367bac4e1eSJoao Pinto 
3837a4e887faSJose Abreu 			if (status & CORE_IRQ_MTL_RX_OVERFLOW)
383861fac60aSJose Abreu 				stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
383954139cf3SJoao Pinto 						       rx_q->rx_tail_addr,
38407bac4e1eSJoao Pinto 						       queue);
38417bac4e1eSJoao Pinto 		}
384270523e63SGiuseppe CAVALLARO 
384370523e63SGiuseppe CAVALLARO 		/* PCS link status */
38443fe5cadbSGiuseppe CAVALLARO 		if (priv->hw->pcs) {
384570523e63SGiuseppe CAVALLARO 			if (priv->xstats.pcs_link)
384670523e63SGiuseppe CAVALLARO 				netif_carrier_on(dev);
384770523e63SGiuseppe CAVALLARO 			else
384870523e63SGiuseppe CAVALLARO 				netif_carrier_off(dev);
384970523e63SGiuseppe CAVALLARO 		}
3850d765955dSGiuseppe CAVALLARO 	}
3851d765955dSGiuseppe CAVALLARO 
3852d765955dSGiuseppe CAVALLARO 	/* To handle DMA interrupts */
38537ac6653aSJeff Kirsher 	stmmac_dma_interrupt(priv);
38547ac6653aSJeff Kirsher 
38557ac6653aSJeff Kirsher 	return IRQ_HANDLED;
38567ac6653aSJeff Kirsher }
38577ac6653aSJeff Kirsher 
38587ac6653aSJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER
38597ac6653aSJeff Kirsher /* Polling receive - used by NETCONSOLE and other diagnostic tools
3860ceb69499SGiuseppe CAVALLARO  * to allow network I/O with interrupts disabled.
3861ceb69499SGiuseppe CAVALLARO  */
38627ac6653aSJeff Kirsher static void stmmac_poll_controller(struct net_device *dev)
38637ac6653aSJeff Kirsher {
38647ac6653aSJeff Kirsher 	disable_irq(dev->irq);
38657ac6653aSJeff Kirsher 	stmmac_interrupt(dev->irq, dev);
38667ac6653aSJeff Kirsher 	enable_irq(dev->irq);
38677ac6653aSJeff Kirsher }
38687ac6653aSJeff Kirsher #endif
38697ac6653aSJeff Kirsher 
38707ac6653aSJeff Kirsher /**
38717ac6653aSJeff Kirsher  *  stmmac_ioctl - Entry point for the Ioctl
38727ac6653aSJeff Kirsher  *  @dev: Device pointer.
38737ac6653aSJeff Kirsher  *  @rq: An IOCTL specefic structure, that can contain a pointer to
38747ac6653aSJeff Kirsher  *  a proprietary structure used to pass information to the driver.
38757ac6653aSJeff Kirsher  *  @cmd: IOCTL command
38767ac6653aSJeff Kirsher  *  Description:
387732ceabcaSGiuseppe CAVALLARO  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
38787ac6653aSJeff Kirsher  */
38797ac6653aSJeff Kirsher static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
38807ac6653aSJeff Kirsher {
3881891434b1SRayagond Kokatanur 	int ret = -EOPNOTSUPP;
38827ac6653aSJeff Kirsher 
38837ac6653aSJeff Kirsher 	if (!netif_running(dev))
38847ac6653aSJeff Kirsher 		return -EINVAL;
38857ac6653aSJeff Kirsher 
3886891434b1SRayagond Kokatanur 	switch (cmd) {
3887891434b1SRayagond Kokatanur 	case SIOCGMIIPHY:
3888891434b1SRayagond Kokatanur 	case SIOCGMIIREG:
3889891434b1SRayagond Kokatanur 	case SIOCSMIIREG:
3890d6d50c7eSPhilippe Reynes 		if (!dev->phydev)
38917ac6653aSJeff Kirsher 			return -EINVAL;
3892d6d50c7eSPhilippe Reynes 		ret = phy_mii_ioctl(dev->phydev, rq, cmd);
3893891434b1SRayagond Kokatanur 		break;
3894891434b1SRayagond Kokatanur 	case SIOCSHWTSTAMP:
3895d6228b7cSArtem Panfilov 		ret = stmmac_hwtstamp_set(dev, rq);
3896d6228b7cSArtem Panfilov 		break;
3897d6228b7cSArtem Panfilov 	case SIOCGHWTSTAMP:
3898d6228b7cSArtem Panfilov 		ret = stmmac_hwtstamp_get(dev, rq);
3899891434b1SRayagond Kokatanur 		break;
3900891434b1SRayagond Kokatanur 	default:
3901891434b1SRayagond Kokatanur 		break;
3902891434b1SRayagond Kokatanur 	}
39037ac6653aSJeff Kirsher 
39047ac6653aSJeff Kirsher 	return ret;
39057ac6653aSJeff Kirsher }
39067ac6653aSJeff Kirsher 
39074dbbe8ddSJose Abreu static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
39084dbbe8ddSJose Abreu 				    void *cb_priv)
39094dbbe8ddSJose Abreu {
39104dbbe8ddSJose Abreu 	struct stmmac_priv *priv = cb_priv;
39114dbbe8ddSJose Abreu 	int ret = -EOPNOTSUPP;
39124dbbe8ddSJose Abreu 
39134dbbe8ddSJose Abreu 	stmmac_disable_all_queues(priv);
39144dbbe8ddSJose Abreu 
39154dbbe8ddSJose Abreu 	switch (type) {
39164dbbe8ddSJose Abreu 	case TC_SETUP_CLSU32:
39174dbbe8ddSJose Abreu 		if (tc_cls_can_offload_and_chain0(priv->dev, type_data))
39184dbbe8ddSJose Abreu 			ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
39194dbbe8ddSJose Abreu 		break;
39204dbbe8ddSJose Abreu 	default:
39214dbbe8ddSJose Abreu 		break;
39224dbbe8ddSJose Abreu 	}
39234dbbe8ddSJose Abreu 
39244dbbe8ddSJose Abreu 	stmmac_enable_all_queues(priv);
39254dbbe8ddSJose Abreu 	return ret;
39264dbbe8ddSJose Abreu }
39274dbbe8ddSJose Abreu 
39284dbbe8ddSJose Abreu static int stmmac_setup_tc_block(struct stmmac_priv *priv,
39294dbbe8ddSJose Abreu 				 struct tc_block_offload *f)
39304dbbe8ddSJose Abreu {
39314dbbe8ddSJose Abreu 	if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
39324dbbe8ddSJose Abreu 		return -EOPNOTSUPP;
39334dbbe8ddSJose Abreu 
39344dbbe8ddSJose Abreu 	switch (f->command) {
39354dbbe8ddSJose Abreu 	case TC_BLOCK_BIND:
39364dbbe8ddSJose Abreu 		return tcf_block_cb_register(f->block, stmmac_setup_tc_block_cb,
393760513bd8SJohn Hurley 				priv, priv, f->extack);
39384dbbe8ddSJose Abreu 	case TC_BLOCK_UNBIND:
39394dbbe8ddSJose Abreu 		tcf_block_cb_unregister(f->block, stmmac_setup_tc_block_cb, priv);
39404dbbe8ddSJose Abreu 		return 0;
39414dbbe8ddSJose Abreu 	default:
39424dbbe8ddSJose Abreu 		return -EOPNOTSUPP;
39434dbbe8ddSJose Abreu 	}
39444dbbe8ddSJose Abreu }
39454dbbe8ddSJose Abreu 
39464dbbe8ddSJose Abreu static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
39474dbbe8ddSJose Abreu 			   void *type_data)
39484dbbe8ddSJose Abreu {
39494dbbe8ddSJose Abreu 	struct stmmac_priv *priv = netdev_priv(ndev);
39504dbbe8ddSJose Abreu 
39514dbbe8ddSJose Abreu 	switch (type) {
39524dbbe8ddSJose Abreu 	case TC_SETUP_BLOCK:
39534dbbe8ddSJose Abreu 		return stmmac_setup_tc_block(priv, type_data);
39541f705bc6SJose Abreu 	case TC_SETUP_QDISC_CBS:
39551f705bc6SJose Abreu 		return stmmac_tc_setup_cbs(priv, priv, type_data);
39564dbbe8ddSJose Abreu 	default:
39574dbbe8ddSJose Abreu 		return -EOPNOTSUPP;
39584dbbe8ddSJose Abreu 	}
39594dbbe8ddSJose Abreu }
39604dbbe8ddSJose Abreu 
3961a830405eSBhadram Varka static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
3962a830405eSBhadram Varka {
3963a830405eSBhadram Varka 	struct stmmac_priv *priv = netdev_priv(ndev);
3964a830405eSBhadram Varka 	int ret = 0;
3965a830405eSBhadram Varka 
3966a830405eSBhadram Varka 	ret = eth_mac_addr(ndev, addr);
3967a830405eSBhadram Varka 	if (ret)
3968a830405eSBhadram Varka 		return ret;
3969a830405eSBhadram Varka 
3970c10d4c82SJose Abreu 	stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
3971a830405eSBhadram Varka 
3972a830405eSBhadram Varka 	return ret;
3973a830405eSBhadram Varka }
3974a830405eSBhadram Varka 
397550fb4f74SGiuseppe CAVALLARO #ifdef CONFIG_DEBUG_FS
39767ac29055SGiuseppe CAVALLARO static struct dentry *stmmac_fs_dir;
39777ac29055SGiuseppe CAVALLARO 
3978c24602efSGiuseppe CAVALLARO static void sysfs_display_ring(void *head, int size, int extend_desc,
3979c24602efSGiuseppe CAVALLARO 			       struct seq_file *seq)
39807ac29055SGiuseppe CAVALLARO {
39817ac29055SGiuseppe CAVALLARO 	int i;
3982c24602efSGiuseppe CAVALLARO 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
3983c24602efSGiuseppe CAVALLARO 	struct dma_desc *p = (struct dma_desc *)head;
39847ac29055SGiuseppe CAVALLARO 
3985c24602efSGiuseppe CAVALLARO 	for (i = 0; i < size; i++) {
3986c24602efSGiuseppe CAVALLARO 		if (extend_desc) {
3987c24602efSGiuseppe CAVALLARO 			seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3988c24602efSGiuseppe CAVALLARO 				   i, (unsigned int)virt_to_phys(ep),
3989f8be0d78SMichael Weiser 				   le32_to_cpu(ep->basic.des0),
3990f8be0d78SMichael Weiser 				   le32_to_cpu(ep->basic.des1),
3991f8be0d78SMichael Weiser 				   le32_to_cpu(ep->basic.des2),
3992f8be0d78SMichael Weiser 				   le32_to_cpu(ep->basic.des3));
3993c24602efSGiuseppe CAVALLARO 			ep++;
3994c24602efSGiuseppe CAVALLARO 		} else {
3995c24602efSGiuseppe CAVALLARO 			seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
399666c25f6eSNiklas Cassel 				   i, (unsigned int)virt_to_phys(p),
3997f8be0d78SMichael Weiser 				   le32_to_cpu(p->des0), le32_to_cpu(p->des1),
3998f8be0d78SMichael Weiser 				   le32_to_cpu(p->des2), le32_to_cpu(p->des3));
3999c24602efSGiuseppe CAVALLARO 			p++;
4000c24602efSGiuseppe CAVALLARO 		}
40017ac29055SGiuseppe CAVALLARO 		seq_printf(seq, "\n");
40027ac29055SGiuseppe CAVALLARO 	}
4003c24602efSGiuseppe CAVALLARO }
40047ac29055SGiuseppe CAVALLARO 
4005fb0d9c63SYangtao Li static int stmmac_rings_status_show(struct seq_file *seq, void *v)
4006c24602efSGiuseppe CAVALLARO {
4007c24602efSGiuseppe CAVALLARO 	struct net_device *dev = seq->private;
4008c24602efSGiuseppe CAVALLARO 	struct stmmac_priv *priv = netdev_priv(dev);
400954139cf3SJoao Pinto 	u32 rx_count = priv->plat->rx_queues_to_use;
4010ce736788SJoao Pinto 	u32 tx_count = priv->plat->tx_queues_to_use;
401154139cf3SJoao Pinto 	u32 queue;
401254139cf3SJoao Pinto 
40135f2b8b62SThierry Reding 	if ((dev->flags & IFF_UP) == 0)
40145f2b8b62SThierry Reding 		return 0;
40155f2b8b62SThierry Reding 
401654139cf3SJoao Pinto 	for (queue = 0; queue < rx_count; queue++) {
401754139cf3SJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
401854139cf3SJoao Pinto 
401954139cf3SJoao Pinto 		seq_printf(seq, "RX Queue %d:\n", queue);
40207ac29055SGiuseppe CAVALLARO 
4021c24602efSGiuseppe CAVALLARO 		if (priv->extend_desc) {
402254139cf3SJoao Pinto 			seq_printf(seq, "Extended descriptor ring:\n");
402354139cf3SJoao Pinto 			sysfs_display_ring((void *)rx_q->dma_erx,
402454139cf3SJoao Pinto 					   DMA_RX_SIZE, 1, seq);
402554139cf3SJoao Pinto 		} else {
402654139cf3SJoao Pinto 			seq_printf(seq, "Descriptor ring:\n");
402754139cf3SJoao Pinto 			sysfs_display_ring((void *)rx_q->dma_rx,
402854139cf3SJoao Pinto 					   DMA_RX_SIZE, 0, seq);
402954139cf3SJoao Pinto 		}
403054139cf3SJoao Pinto 	}
403154139cf3SJoao Pinto 
4032ce736788SJoao Pinto 	for (queue = 0; queue < tx_count; queue++) {
4033ce736788SJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
4034ce736788SJoao Pinto 
4035ce736788SJoao Pinto 		seq_printf(seq, "TX Queue %d:\n", queue);
4036ce736788SJoao Pinto 
403754139cf3SJoao Pinto 		if (priv->extend_desc) {
4038ce736788SJoao Pinto 			seq_printf(seq, "Extended descriptor ring:\n");
4039ce736788SJoao Pinto 			sysfs_display_ring((void *)tx_q->dma_etx,
4040ce736788SJoao Pinto 					   DMA_TX_SIZE, 1, seq);
4041c24602efSGiuseppe CAVALLARO 		} else {
4042ce736788SJoao Pinto 			seq_printf(seq, "Descriptor ring:\n");
4043ce736788SJoao Pinto 			sysfs_display_ring((void *)tx_q->dma_tx,
4044ce736788SJoao Pinto 					   DMA_TX_SIZE, 0, seq);
4045ce736788SJoao Pinto 		}
40467ac29055SGiuseppe CAVALLARO 	}
40477ac29055SGiuseppe CAVALLARO 
40487ac29055SGiuseppe CAVALLARO 	return 0;
40497ac29055SGiuseppe CAVALLARO }
4050fb0d9c63SYangtao Li DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
40517ac29055SGiuseppe CAVALLARO 
4052fb0d9c63SYangtao Li static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
4053e7434821SGiuseppe CAVALLARO {
4054e7434821SGiuseppe CAVALLARO 	struct net_device *dev = seq->private;
4055e7434821SGiuseppe CAVALLARO 	struct stmmac_priv *priv = netdev_priv(dev);
4056e7434821SGiuseppe CAVALLARO 
405719e30c14SGiuseppe CAVALLARO 	if (!priv->hw_cap_support) {
4058e7434821SGiuseppe CAVALLARO 		seq_printf(seq, "DMA HW features not supported\n");
4059e7434821SGiuseppe CAVALLARO 		return 0;
4060e7434821SGiuseppe CAVALLARO 	}
4061e7434821SGiuseppe CAVALLARO 
4062e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "==============================\n");
4063e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tDMA HW features\n");
4064e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "==============================\n");
4065e7434821SGiuseppe CAVALLARO 
406622d3efe5SPavel Machek 	seq_printf(seq, "\t10/100 Mbps: %s\n",
4067e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
406822d3efe5SPavel Machek 	seq_printf(seq, "\t1000 Mbps: %s\n",
4069e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
407022d3efe5SPavel Machek 	seq_printf(seq, "\tHalf duplex: %s\n",
4071e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.half_duplex) ? "Y" : "N");
4072e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tHash Filter: %s\n",
4073e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.hash_filter) ? "Y" : "N");
4074e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tMultiple MAC address registers: %s\n",
4075e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.multi_addr) ? "Y" : "N");
40768d45e42bSLABBE Corentin 	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
4077e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.pcs) ? "Y" : "N");
4078e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
4079e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
4080e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tPMT Remote wake up: %s\n",
4081e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
4082e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tPMT Magic Frame: %s\n",
4083e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
4084e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tRMON module: %s\n",
4085e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.rmon) ? "Y" : "N");
4086e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
4087e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.time_stamp) ? "Y" : "N");
4088e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
4089e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
409022d3efe5SPavel Machek 	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
4091e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.eee) ? "Y" : "N");
4092e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
4093e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
4094e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.tx_coe) ? "Y" : "N");
4095f748be53SAlexandre TORGUE 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
4096f748be53SAlexandre TORGUE 		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
4097f748be53SAlexandre TORGUE 			   (priv->dma_cap.rx_coe) ? "Y" : "N");
4098f748be53SAlexandre TORGUE 	} else {
4099e7434821SGiuseppe CAVALLARO 		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
4100e7434821SGiuseppe CAVALLARO 			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
4101e7434821SGiuseppe CAVALLARO 		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
4102e7434821SGiuseppe CAVALLARO 			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
4103f748be53SAlexandre TORGUE 	}
4104e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
4105e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
4106e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
4107e7434821SGiuseppe CAVALLARO 		   priv->dma_cap.number_rx_channel);
4108e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
4109e7434821SGiuseppe CAVALLARO 		   priv->dma_cap.number_tx_channel);
4110e7434821SGiuseppe CAVALLARO 	seq_printf(seq, "\tEnhanced descriptors: %s\n",
4111e7434821SGiuseppe CAVALLARO 		   (priv->dma_cap.enh_desc) ? "Y" : "N");
4112e7434821SGiuseppe CAVALLARO 
4113e7434821SGiuseppe CAVALLARO 	return 0;
4114e7434821SGiuseppe CAVALLARO }
4115fb0d9c63SYangtao Li DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
4116e7434821SGiuseppe CAVALLARO 
41177ac29055SGiuseppe CAVALLARO static int stmmac_init_fs(struct net_device *dev)
41187ac29055SGiuseppe CAVALLARO {
4119466c5ac8SMathieu Olivari 	struct stmmac_priv *priv = netdev_priv(dev);
41207ac29055SGiuseppe CAVALLARO 
4121466c5ac8SMathieu Olivari 	/* Create per netdev entries */
4122466c5ac8SMathieu Olivari 	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
4123466c5ac8SMathieu Olivari 
4124466c5ac8SMathieu Olivari 	if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) {
412538ddc59dSLABBE Corentin 		netdev_err(priv->dev, "ERROR failed to create debugfs directory\n");
41267ac29055SGiuseppe CAVALLARO 
41277ac29055SGiuseppe CAVALLARO 		return -ENOMEM;
41287ac29055SGiuseppe CAVALLARO 	}
41297ac29055SGiuseppe CAVALLARO 
41307ac29055SGiuseppe CAVALLARO 	/* Entry to report DMA RX/TX rings */
4131466c5ac8SMathieu Olivari 	priv->dbgfs_rings_status =
4132d3757ba4SJoe Perches 		debugfs_create_file("descriptors_status", 0444,
4133466c5ac8SMathieu Olivari 				    priv->dbgfs_dir, dev,
41347ac29055SGiuseppe CAVALLARO 				    &stmmac_rings_status_fops);
41357ac29055SGiuseppe CAVALLARO 
4136466c5ac8SMathieu Olivari 	if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) {
413738ddc59dSLABBE Corentin 		netdev_err(priv->dev, "ERROR creating stmmac ring debugfs file\n");
4138466c5ac8SMathieu Olivari 		debugfs_remove_recursive(priv->dbgfs_dir);
41397ac29055SGiuseppe CAVALLARO 
41407ac29055SGiuseppe CAVALLARO 		return -ENOMEM;
41417ac29055SGiuseppe CAVALLARO 	}
41427ac29055SGiuseppe CAVALLARO 
4143e7434821SGiuseppe CAVALLARO 	/* Entry to report the DMA HW features */
4144d3757ba4SJoe Perches 	priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", 0444,
4145466c5ac8SMathieu Olivari 						  priv->dbgfs_dir,
4146e7434821SGiuseppe CAVALLARO 						  dev, &stmmac_dma_cap_fops);
4147e7434821SGiuseppe CAVALLARO 
4148466c5ac8SMathieu Olivari 	if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
414938ddc59dSLABBE Corentin 		netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n");
4150466c5ac8SMathieu Olivari 		debugfs_remove_recursive(priv->dbgfs_dir);
4151e7434821SGiuseppe CAVALLARO 
4152e7434821SGiuseppe CAVALLARO 		return -ENOMEM;
4153e7434821SGiuseppe CAVALLARO 	}
4154e7434821SGiuseppe CAVALLARO 
41557ac29055SGiuseppe CAVALLARO 	return 0;
41567ac29055SGiuseppe CAVALLARO }
41577ac29055SGiuseppe CAVALLARO 
4158466c5ac8SMathieu Olivari static void stmmac_exit_fs(struct net_device *dev)
41597ac29055SGiuseppe CAVALLARO {
4160466c5ac8SMathieu Olivari 	struct stmmac_priv *priv = netdev_priv(dev);
4161466c5ac8SMathieu Olivari 
4162466c5ac8SMathieu Olivari 	debugfs_remove_recursive(priv->dbgfs_dir);
41637ac29055SGiuseppe CAVALLARO }
416450fb4f74SGiuseppe CAVALLARO #endif /* CONFIG_DEBUG_FS */
41657ac29055SGiuseppe CAVALLARO 
41667ac6653aSJeff Kirsher static const struct net_device_ops stmmac_netdev_ops = {
41677ac6653aSJeff Kirsher 	.ndo_open = stmmac_open,
41687ac6653aSJeff Kirsher 	.ndo_start_xmit = stmmac_xmit,
41697ac6653aSJeff Kirsher 	.ndo_stop = stmmac_release,
41707ac6653aSJeff Kirsher 	.ndo_change_mtu = stmmac_change_mtu,
41717ac6653aSJeff Kirsher 	.ndo_fix_features = stmmac_fix_features,
4172d2afb5bdSGiuseppe CAVALLARO 	.ndo_set_features = stmmac_set_features,
417301789349SJiri Pirko 	.ndo_set_rx_mode = stmmac_set_rx_mode,
41747ac6653aSJeff Kirsher 	.ndo_tx_timeout = stmmac_tx_timeout,
41757ac6653aSJeff Kirsher 	.ndo_do_ioctl = stmmac_ioctl,
41764dbbe8ddSJose Abreu 	.ndo_setup_tc = stmmac_setup_tc,
41777ac6653aSJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER
41787ac6653aSJeff Kirsher 	.ndo_poll_controller = stmmac_poll_controller,
41797ac6653aSJeff Kirsher #endif
4180a830405eSBhadram Varka 	.ndo_set_mac_address = stmmac_set_mac_address,
41817ac6653aSJeff Kirsher };
41827ac6653aSJeff Kirsher 
418334877a15SJose Abreu static void stmmac_reset_subtask(struct stmmac_priv *priv)
418434877a15SJose Abreu {
418534877a15SJose Abreu 	if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
418634877a15SJose Abreu 		return;
418734877a15SJose Abreu 	if (test_bit(STMMAC_DOWN, &priv->state))
418834877a15SJose Abreu 		return;
418934877a15SJose Abreu 
419034877a15SJose Abreu 	netdev_err(priv->dev, "Reset adapter.\n");
419134877a15SJose Abreu 
419234877a15SJose Abreu 	rtnl_lock();
419334877a15SJose Abreu 	netif_trans_update(priv->dev);
419434877a15SJose Abreu 	while (test_and_set_bit(STMMAC_RESETING, &priv->state))
419534877a15SJose Abreu 		usleep_range(1000, 2000);
419634877a15SJose Abreu 
419734877a15SJose Abreu 	set_bit(STMMAC_DOWN, &priv->state);
419834877a15SJose Abreu 	dev_close(priv->dev);
419900f54e68SPetr Machata 	dev_open(priv->dev, NULL);
420034877a15SJose Abreu 	clear_bit(STMMAC_DOWN, &priv->state);
420134877a15SJose Abreu 	clear_bit(STMMAC_RESETING, &priv->state);
420234877a15SJose Abreu 	rtnl_unlock();
420334877a15SJose Abreu }
420434877a15SJose Abreu 
420534877a15SJose Abreu static void stmmac_service_task(struct work_struct *work)
420634877a15SJose Abreu {
420734877a15SJose Abreu 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
420834877a15SJose Abreu 			service_task);
420934877a15SJose Abreu 
421034877a15SJose Abreu 	stmmac_reset_subtask(priv);
421134877a15SJose Abreu 	clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
421234877a15SJose Abreu }
421334877a15SJose Abreu 
42147ac6653aSJeff Kirsher /**
4215cf3f047bSGiuseppe CAVALLARO  *  stmmac_hw_init - Init the MAC device
421632ceabcaSGiuseppe CAVALLARO  *  @priv: driver private structure
4217732fdf0eSGiuseppe CAVALLARO  *  Description: this function is to configure the MAC device according to
4218732fdf0eSGiuseppe CAVALLARO  *  some platform parameters or the HW capability register. It prepares the
4219732fdf0eSGiuseppe CAVALLARO  *  driver to use either ring or chain modes and to setup either enhanced or
4220732fdf0eSGiuseppe CAVALLARO  *  normal descriptors.
4221cf3f047bSGiuseppe CAVALLARO  */
4222cf3f047bSGiuseppe CAVALLARO static int stmmac_hw_init(struct stmmac_priv *priv)
4223cf3f047bSGiuseppe CAVALLARO {
42245f0456b4SJose Abreu 	int ret;
4225cf3f047bSGiuseppe CAVALLARO 
42269f93ac8dSLABBE Corentin 	/* dwmac-sun8i only work in chain mode */
42279f93ac8dSLABBE Corentin 	if (priv->plat->has_sun8i)
42289f93ac8dSLABBE Corentin 		chain_mode = 1;
42295f0456b4SJose Abreu 	priv->chain_mode = chain_mode;
42309f93ac8dSLABBE Corentin 
42315f0456b4SJose Abreu 	/* Initialize HW Interface */
42325f0456b4SJose Abreu 	ret = stmmac_hwif_init(priv);
42335f0456b4SJose Abreu 	if (ret)
42345f0456b4SJose Abreu 		return ret;
42354a7d666aSGiuseppe CAVALLARO 
4236cf3f047bSGiuseppe CAVALLARO 	/* Get the HW capability (new GMAC newer than 3.50a) */
4237cf3f047bSGiuseppe CAVALLARO 	priv->hw_cap_support = stmmac_get_hw_features(priv);
4238cf3f047bSGiuseppe CAVALLARO 	if (priv->hw_cap_support) {
423938ddc59dSLABBE Corentin 		dev_info(priv->device, "DMA HW capability register supported\n");
4240cf3f047bSGiuseppe CAVALLARO 
4241cf3f047bSGiuseppe CAVALLARO 		/* We can override some gmac/dma configuration fields: e.g.
4242cf3f047bSGiuseppe CAVALLARO 		 * enh_desc, tx_coe (e.g. that are passed through the
4243cf3f047bSGiuseppe CAVALLARO 		 * platform) with the values from the HW capability
4244cf3f047bSGiuseppe CAVALLARO 		 * register (if supported).
4245cf3f047bSGiuseppe CAVALLARO 		 */
4246cf3f047bSGiuseppe CAVALLARO 		priv->plat->enh_desc = priv->dma_cap.enh_desc;
4247cf3f047bSGiuseppe CAVALLARO 		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
42483fe5cadbSGiuseppe CAVALLARO 		priv->hw->pmt = priv->plat->pmt;
424938912bdbSDeepak SIKRI 
4250a8df35d4SEzequiel Garcia 		/* TXCOE doesn't work in thresh DMA mode */
4251a8df35d4SEzequiel Garcia 		if (priv->plat->force_thresh_dma_mode)
4252a8df35d4SEzequiel Garcia 			priv->plat->tx_coe = 0;
4253a8df35d4SEzequiel Garcia 		else
425438912bdbSDeepak SIKRI 			priv->plat->tx_coe = priv->dma_cap.tx_coe;
4255a8df35d4SEzequiel Garcia 
4256f748be53SAlexandre TORGUE 		/* In case of GMAC4 rx_coe is from HW cap register. */
4257f748be53SAlexandre TORGUE 		priv->plat->rx_coe = priv->dma_cap.rx_coe;
425838912bdbSDeepak SIKRI 
425938912bdbSDeepak SIKRI 		if (priv->dma_cap.rx_coe_type2)
426038912bdbSDeepak SIKRI 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
426138912bdbSDeepak SIKRI 		else if (priv->dma_cap.rx_coe_type1)
426238912bdbSDeepak SIKRI 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
426338912bdbSDeepak SIKRI 
426438ddc59dSLABBE Corentin 	} else {
426538ddc59dSLABBE Corentin 		dev_info(priv->device, "No HW DMA feature register supported\n");
426638ddc59dSLABBE Corentin 	}
4267cf3f047bSGiuseppe CAVALLARO 
4268d2afb5bdSGiuseppe CAVALLARO 	if (priv->plat->rx_coe) {
4269d2afb5bdSGiuseppe CAVALLARO 		priv->hw->rx_csum = priv->plat->rx_coe;
427038ddc59dSLABBE Corentin 		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
4271f748be53SAlexandre TORGUE 		if (priv->synopsys_id < DWMAC_CORE_4_00)
427238ddc59dSLABBE Corentin 			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
4273d2afb5bdSGiuseppe CAVALLARO 	}
4274cf3f047bSGiuseppe CAVALLARO 	if (priv->plat->tx_coe)
427538ddc59dSLABBE Corentin 		dev_info(priv->device, "TX Checksum insertion supported\n");
4276cf3f047bSGiuseppe CAVALLARO 
4277cf3f047bSGiuseppe CAVALLARO 	if (priv->plat->pmt) {
427838ddc59dSLABBE Corentin 		dev_info(priv->device, "Wake-Up On Lan supported\n");
4279cf3f047bSGiuseppe CAVALLARO 		device_set_wakeup_capable(priv->device, 1);
4280cf3f047bSGiuseppe CAVALLARO 	}
4281cf3f047bSGiuseppe CAVALLARO 
4282f748be53SAlexandre TORGUE 	if (priv->dma_cap.tsoen)
428338ddc59dSLABBE Corentin 		dev_info(priv->device, "TSO supported\n");
4284f748be53SAlexandre TORGUE 
42857cfde0afSJose Abreu 	/* Run HW quirks, if any */
42867cfde0afSJose Abreu 	if (priv->hwif_quirks) {
42877cfde0afSJose Abreu 		ret = priv->hwif_quirks(priv);
42887cfde0afSJose Abreu 		if (ret)
42897cfde0afSJose Abreu 			return ret;
42907cfde0afSJose Abreu 	}
42917cfde0afSJose Abreu 
42923b509466SJose Abreu 	/* Rx Watchdog is available in the COREs newer than the 3.40.
42933b509466SJose Abreu 	 * In some case, for example on bugged HW this feature
42943b509466SJose Abreu 	 * has to be disable and this can be done by passing the
42953b509466SJose Abreu 	 * riwt_off field from the platform.
42963b509466SJose Abreu 	 */
42973b509466SJose Abreu 	if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
42983b509466SJose Abreu 	    (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
42993b509466SJose Abreu 		priv->use_riwt = 1;
43003b509466SJose Abreu 		dev_info(priv->device,
43013b509466SJose Abreu 			 "Enable RX Mitigation via HW Watchdog Timer\n");
43023b509466SJose Abreu 	}
43033b509466SJose Abreu 
4304c24602efSGiuseppe CAVALLARO 	return 0;
4305cf3f047bSGiuseppe CAVALLARO }
4306cf3f047bSGiuseppe CAVALLARO 
4307cf3f047bSGiuseppe CAVALLARO /**
4308bfab27a1SGiuseppe CAVALLARO  * stmmac_dvr_probe
4309bfab27a1SGiuseppe CAVALLARO  * @device: device pointer
4310ff3dd78cSGiuseppe CAVALLARO  * @plat_dat: platform data pointer
4311e56788cfSJoachim Eastwood  * @res: stmmac resource pointer
4312bfab27a1SGiuseppe CAVALLARO  * Description: this is the main probe function used to
4313bfab27a1SGiuseppe CAVALLARO  * call the alloc_etherdev, allocate the priv structure.
43149afec6efSAndy Shevchenko  * Return:
431515ffac73SJoachim Eastwood  * returns 0 on success, otherwise errno.
43167ac6653aSJeff Kirsher  */
431715ffac73SJoachim Eastwood int stmmac_dvr_probe(struct device *device,
4318cf3f047bSGiuseppe CAVALLARO 		     struct plat_stmmacenet_data *plat_dat,
4319e56788cfSJoachim Eastwood 		     struct stmmac_resources *res)
43207ac6653aSJeff Kirsher {
4321bfab27a1SGiuseppe CAVALLARO 	struct net_device *ndev = NULL;
4322bfab27a1SGiuseppe CAVALLARO 	struct stmmac_priv *priv;
43238fce3331SJose Abreu 	u32 queue, maxq;
4324c22a3f48SJoao Pinto 	int ret = 0;
43257ac6653aSJeff Kirsher 
43269737070cSJisheng Zhang 	ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
43279737070cSJisheng Zhang 				       MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
432841de8d4cSJoe Perches 	if (!ndev)
432915ffac73SJoachim Eastwood 		return -ENOMEM;
43307ac6653aSJeff Kirsher 
4331bfab27a1SGiuseppe CAVALLARO 	SET_NETDEV_DEV(ndev, device);
43327ac6653aSJeff Kirsher 
4333bfab27a1SGiuseppe CAVALLARO 	priv = netdev_priv(ndev);
4334bfab27a1SGiuseppe CAVALLARO 	priv->device = device;
4335bfab27a1SGiuseppe CAVALLARO 	priv->dev = ndev;
4336bfab27a1SGiuseppe CAVALLARO 
4337bfab27a1SGiuseppe CAVALLARO 	stmmac_set_ethtool_ops(ndev);
4338cf3f047bSGiuseppe CAVALLARO 	priv->pause = pause;
4339cf3f047bSGiuseppe CAVALLARO 	priv->plat = plat_dat;
4340e56788cfSJoachim Eastwood 	priv->ioaddr = res->addr;
4341e56788cfSJoachim Eastwood 	priv->dev->base_addr = (unsigned long)res->addr;
4342e56788cfSJoachim Eastwood 
4343e56788cfSJoachim Eastwood 	priv->dev->irq = res->irq;
4344e56788cfSJoachim Eastwood 	priv->wol_irq = res->wol_irq;
4345e56788cfSJoachim Eastwood 	priv->lpi_irq = res->lpi_irq;
4346e56788cfSJoachim Eastwood 
4347a51645f7SPetr Štetiar 	if (!IS_ERR_OR_NULL(res->mac))
4348e56788cfSJoachim Eastwood 		memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
4349bfab27a1SGiuseppe CAVALLARO 
4350a7a62685SJoachim Eastwood 	dev_set_drvdata(device, priv->dev);
4351803f8fc4SJoachim Eastwood 
4352cf3f047bSGiuseppe CAVALLARO 	/* Verify driver arguments */
4353cf3f047bSGiuseppe CAVALLARO 	stmmac_verify_args();
4354cf3f047bSGiuseppe CAVALLARO 
435534877a15SJose Abreu 	/* Allocate workqueue */
435634877a15SJose Abreu 	priv->wq = create_singlethread_workqueue("stmmac_wq");
435734877a15SJose Abreu 	if (!priv->wq) {
435834877a15SJose Abreu 		dev_err(priv->device, "failed to create workqueue\n");
43599737070cSJisheng Zhang 		return -ENOMEM;
436034877a15SJose Abreu 	}
436134877a15SJose Abreu 
436234877a15SJose Abreu 	INIT_WORK(&priv->service_task, stmmac_service_task);
436334877a15SJose Abreu 
4364cf3f047bSGiuseppe CAVALLARO 	/* Override with kernel parameters if supplied XXX CRS XXX
4365ceb69499SGiuseppe CAVALLARO 	 * this needs to have multiple instances
4366ceb69499SGiuseppe CAVALLARO 	 */
4367cf3f047bSGiuseppe CAVALLARO 	if ((phyaddr >= 0) && (phyaddr <= 31))
4368cf3f047bSGiuseppe CAVALLARO 		priv->plat->phy_addr = phyaddr;
4369cf3f047bSGiuseppe CAVALLARO 
437090f522a2SEugeniy Paltsev 	if (priv->plat->stmmac_rst) {
437190f522a2SEugeniy Paltsev 		ret = reset_control_assert(priv->plat->stmmac_rst);
4372f573c0b9Sjpinto 		reset_control_deassert(priv->plat->stmmac_rst);
437390f522a2SEugeniy Paltsev 		/* Some reset controllers have only reset callback instead of
437490f522a2SEugeniy Paltsev 		 * assert + deassert callbacks pair.
437590f522a2SEugeniy Paltsev 		 */
437690f522a2SEugeniy Paltsev 		if (ret == -ENOTSUPP)
437790f522a2SEugeniy Paltsev 			reset_control_reset(priv->plat->stmmac_rst);
437890f522a2SEugeniy Paltsev 	}
4379c5e4ddbdSChen-Yu Tsai 
4380cf3f047bSGiuseppe CAVALLARO 	/* Init MAC and get the capabilities */
4381c24602efSGiuseppe CAVALLARO 	ret = stmmac_hw_init(priv);
4382c24602efSGiuseppe CAVALLARO 	if (ret)
438362866e98SChen-Yu Tsai 		goto error_hw_init;
4384cf3f047bSGiuseppe CAVALLARO 
4385b561af36SVinod Koul 	stmmac_check_ether_addr(priv);
4386b561af36SVinod Koul 
4387c22a3f48SJoao Pinto 	/* Configure real RX and TX queues */
4388c02b7a91SJoao Pinto 	netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
4389c02b7a91SJoao Pinto 	netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
4390c22a3f48SJoao Pinto 
4391cf3f047bSGiuseppe CAVALLARO 	ndev->netdev_ops = &stmmac_netdev_ops;
4392cf3f047bSGiuseppe CAVALLARO 
4393cf3f047bSGiuseppe CAVALLARO 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4394cf3f047bSGiuseppe CAVALLARO 			    NETIF_F_RXCSUM;
4395f748be53SAlexandre TORGUE 
43964dbbe8ddSJose Abreu 	ret = stmmac_tc_init(priv, priv);
43974dbbe8ddSJose Abreu 	if (!ret) {
43984dbbe8ddSJose Abreu 		ndev->hw_features |= NETIF_F_HW_TC;
43994dbbe8ddSJose Abreu 	}
44004dbbe8ddSJose Abreu 
4401f748be53SAlexandre TORGUE 	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
44029edfa7daSNiklas Cassel 		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
4403f748be53SAlexandre TORGUE 		priv->tso = true;
440438ddc59dSLABBE Corentin 		dev_info(priv->device, "TSO feature enabled\n");
4405f748be53SAlexandre TORGUE 	}
4406bfab27a1SGiuseppe CAVALLARO 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
4407bfab27a1SGiuseppe CAVALLARO 	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
44087ac6653aSJeff Kirsher #ifdef STMMAC_VLAN_TAG_USED
44097ac6653aSJeff Kirsher 	/* Both mac100 and gmac support receive VLAN tag detection */
4410ab188e8fSElad Nachman 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
44117ac6653aSJeff Kirsher #endif
44127ac6653aSJeff Kirsher 	priv->msg_enable = netif_msg_init(debug, default_msg_level);
44137ac6653aSJeff Kirsher 
441444770e11SJarod Wilson 	/* MTU range: 46 - hw-specific max */
441544770e11SJarod Wilson 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
441644770e11SJarod Wilson 	if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
441744770e11SJarod Wilson 		ndev->max_mtu = JUMBO_LEN;
44187d9e6c5aSJose Abreu 	else if (priv->plat->has_xgmac)
44197d9e6c5aSJose Abreu 		ndev->max_mtu = XGMAC_JUMBO_LEN;
442044770e11SJarod Wilson 	else
442144770e11SJarod Wilson 		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
4422a2cd64f3SKweh, Hock Leong 	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
4423a2cd64f3SKweh, Hock Leong 	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
4424a2cd64f3SKweh, Hock Leong 	 */
4425a2cd64f3SKweh, Hock Leong 	if ((priv->plat->maxmtu < ndev->max_mtu) &&
4426a2cd64f3SKweh, Hock Leong 	    (priv->plat->maxmtu >= ndev->min_mtu))
442744770e11SJarod Wilson 		ndev->max_mtu = priv->plat->maxmtu;
4428a2cd64f3SKweh, Hock Leong 	else if (priv->plat->maxmtu < ndev->min_mtu)
4429b618ab45SHeiner Kallweit 		dev_warn(priv->device,
4430a2cd64f3SKweh, Hock Leong 			 "%s: warning: maxmtu having invalid value (%d)\n",
4431a2cd64f3SKweh, Hock Leong 			 __func__, priv->plat->maxmtu);
443244770e11SJarod Wilson 
44337ac6653aSJeff Kirsher 	if (flow_ctrl)
44347ac6653aSJeff Kirsher 		priv->flow_ctrl = FLOW_AUTO;	/* RX/TX pause on */
44357ac6653aSJeff Kirsher 
44368fce3331SJose Abreu 	/* Setup channels NAPI */
44378fce3331SJose Abreu 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
4438c22a3f48SJoao Pinto 
44398fce3331SJose Abreu 	for (queue = 0; queue < maxq; queue++) {
44408fce3331SJose Abreu 		struct stmmac_channel *ch = &priv->channel[queue];
44418fce3331SJose Abreu 
44428fce3331SJose Abreu 		ch->priv_data = priv;
44438fce3331SJose Abreu 		ch->index = queue;
44448fce3331SJose Abreu 
44454ccb4585SJose Abreu 		if (queue < priv->plat->rx_queues_to_use) {
44464ccb4585SJose Abreu 			netif_napi_add(ndev, &ch->rx_napi, stmmac_napi_poll_rx,
44478fce3331SJose Abreu 				       NAPI_POLL_WEIGHT);
4448c22a3f48SJoao Pinto 		}
44494ccb4585SJose Abreu 		if (queue < priv->plat->tx_queues_to_use) {
44504ccb4585SJose Abreu 			netif_napi_add(ndev, &ch->tx_napi, stmmac_napi_poll_tx,
44514ccb4585SJose Abreu 				       NAPI_POLL_WEIGHT);
44524ccb4585SJose Abreu 		}
44534ccb4585SJose Abreu 	}
44547ac6653aSJeff Kirsher 
445529555fa3SThierry Reding 	mutex_init(&priv->lock);
44567ac6653aSJeff Kirsher 
4457cd7201f4SGiuseppe CAVALLARO 	/* If a specific clk_csr value is passed from the platform
4458cd7201f4SGiuseppe CAVALLARO 	 * this means that the CSR Clock Range selection cannot be
4459cd7201f4SGiuseppe CAVALLARO 	 * changed at run-time and it is fixed. Viceversa the driver'll try to
4460cd7201f4SGiuseppe CAVALLARO 	 * set the MDC clock dynamically according to the csr actual
4461cd7201f4SGiuseppe CAVALLARO 	 * clock input.
4462cd7201f4SGiuseppe CAVALLARO 	 */
44635e7f7fc5SBiao Huang 	if (priv->plat->clk_csr >= 0)
4464cd7201f4SGiuseppe CAVALLARO 		priv->clk_csr = priv->plat->clk_csr;
44655e7f7fc5SBiao Huang 	else
44665e7f7fc5SBiao Huang 		stmmac_clk_csr_set(priv);
4467cd7201f4SGiuseppe CAVALLARO 
4468e58bb43fSGiuseppe CAVALLARO 	stmmac_check_pcs_mode(priv);
4469e58bb43fSGiuseppe CAVALLARO 
44703fe5cadbSGiuseppe CAVALLARO 	if (priv->hw->pcs != STMMAC_PCS_RGMII  &&
44713fe5cadbSGiuseppe CAVALLARO 	    priv->hw->pcs != STMMAC_PCS_TBI &&
44723fe5cadbSGiuseppe CAVALLARO 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
44734bfcbd7aSFrancesco Virlinzi 		/* MDIO bus Registration */
44744bfcbd7aSFrancesco Virlinzi 		ret = stmmac_mdio_register(ndev);
44754bfcbd7aSFrancesco Virlinzi 		if (ret < 0) {
4476b618ab45SHeiner Kallweit 			dev_err(priv->device,
447738ddc59dSLABBE Corentin 				"%s: MDIO bus (id: %d) registration failed",
44784bfcbd7aSFrancesco Virlinzi 				__func__, priv->plat->bus_id);
44796a81c26fSViresh Kumar 			goto error_mdio_register;
44804bfcbd7aSFrancesco Virlinzi 		}
4481e58bb43fSGiuseppe CAVALLARO 	}
44824bfcbd7aSFrancesco Virlinzi 
448357016590SFlorian Fainelli 	ret = register_netdev(ndev);
4484b2eb09afSFlorian Fainelli 	if (ret) {
4485b618ab45SHeiner Kallweit 		dev_err(priv->device, "%s: ERROR %i registering the device\n",
448657016590SFlorian Fainelli 			__func__, ret);
4487b2eb09afSFlorian Fainelli 		goto error_netdev_register;
4488b2eb09afSFlorian Fainelli 	}
44897ac6653aSJeff Kirsher 
44905f2b8b62SThierry Reding #ifdef CONFIG_DEBUG_FS
44915f2b8b62SThierry Reding 	ret = stmmac_init_fs(ndev);
44925f2b8b62SThierry Reding 	if (ret < 0)
44935f2b8b62SThierry Reding 		netdev_warn(priv->dev, "%s: failed debugFS registration\n",
44945f2b8b62SThierry Reding 			    __func__);
44955f2b8b62SThierry Reding #endif
44965f2b8b62SThierry Reding 
449757016590SFlorian Fainelli 	return ret;
44987ac6653aSJeff Kirsher 
44996a81c26fSViresh Kumar error_netdev_register:
4500b2eb09afSFlorian Fainelli 	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4501b2eb09afSFlorian Fainelli 	    priv->hw->pcs != STMMAC_PCS_TBI &&
4502b2eb09afSFlorian Fainelli 	    priv->hw->pcs != STMMAC_PCS_RTBI)
4503b2eb09afSFlorian Fainelli 		stmmac_mdio_unregister(ndev);
45047ac6653aSJeff Kirsher error_mdio_register:
45058fce3331SJose Abreu 	for (queue = 0; queue < maxq; queue++) {
45068fce3331SJose Abreu 		struct stmmac_channel *ch = &priv->channel[queue];
4507c22a3f48SJoao Pinto 
45084ccb4585SJose Abreu 		if (queue < priv->plat->rx_queues_to_use)
45094ccb4585SJose Abreu 			netif_napi_del(&ch->rx_napi);
45104ccb4585SJose Abreu 		if (queue < priv->plat->tx_queues_to_use)
45114ccb4585SJose Abreu 			netif_napi_del(&ch->tx_napi);
4512c22a3f48SJoao Pinto 	}
451362866e98SChen-Yu Tsai error_hw_init:
451434877a15SJose Abreu 	destroy_workqueue(priv->wq);
45157ac6653aSJeff Kirsher 
451615ffac73SJoachim Eastwood 	return ret;
45177ac6653aSJeff Kirsher }
4518b2e2f0c7SAndy Shevchenko EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
45197ac6653aSJeff Kirsher 
45207ac6653aSJeff Kirsher /**
45217ac6653aSJeff Kirsher  * stmmac_dvr_remove
4522f4e7bd81SJoachim Eastwood  * @dev: device pointer
45237ac6653aSJeff Kirsher  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
4524bfab27a1SGiuseppe CAVALLARO  * changes the link status, releases the DMA descriptor rings.
45257ac6653aSJeff Kirsher  */
4526f4e7bd81SJoachim Eastwood int stmmac_dvr_remove(struct device *dev)
45277ac6653aSJeff Kirsher {
4528f4e7bd81SJoachim Eastwood 	struct net_device *ndev = dev_get_drvdata(dev);
45297ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(ndev);
45307ac6653aSJeff Kirsher 
453138ddc59dSLABBE Corentin 	netdev_info(priv->dev, "%s: removing driver", __func__);
45327ac6653aSJeff Kirsher 
45335f2b8b62SThierry Reding #ifdef CONFIG_DEBUG_FS
45345f2b8b62SThierry Reding 	stmmac_exit_fs(ndev);
45355f2b8b62SThierry Reding #endif
4536ae4f0d46SJoao Pinto 	stmmac_stop_all_dma(priv);
45377ac6653aSJeff Kirsher 
4538c10d4c82SJose Abreu 	stmmac_mac_set(priv, priv->ioaddr, false);
45397ac6653aSJeff Kirsher 	netif_carrier_off(ndev);
45407ac6653aSJeff Kirsher 	unregister_netdev(ndev);
4541f573c0b9Sjpinto 	if (priv->plat->stmmac_rst)
4542f573c0b9Sjpinto 		reset_control_assert(priv->plat->stmmac_rst);
4543f573c0b9Sjpinto 	clk_disable_unprepare(priv->plat->pclk);
4544f573c0b9Sjpinto 	clk_disable_unprepare(priv->plat->stmmac_clk);
45453fe5cadbSGiuseppe CAVALLARO 	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
45463fe5cadbSGiuseppe CAVALLARO 	    priv->hw->pcs != STMMAC_PCS_TBI &&
45473fe5cadbSGiuseppe CAVALLARO 	    priv->hw->pcs != STMMAC_PCS_RTBI)
4548e743471fSBryan O'Donoghue 		stmmac_mdio_unregister(ndev);
454934877a15SJose Abreu 	destroy_workqueue(priv->wq);
455029555fa3SThierry Reding 	mutex_destroy(&priv->lock);
45517ac6653aSJeff Kirsher 
45527ac6653aSJeff Kirsher 	return 0;
45537ac6653aSJeff Kirsher }
4554b2e2f0c7SAndy Shevchenko EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
45557ac6653aSJeff Kirsher 
4556732fdf0eSGiuseppe CAVALLARO /**
4557732fdf0eSGiuseppe CAVALLARO  * stmmac_suspend - suspend callback
4558f4e7bd81SJoachim Eastwood  * @dev: device pointer
4559732fdf0eSGiuseppe CAVALLARO  * Description: this is the function to suspend the device and it is called
4560732fdf0eSGiuseppe CAVALLARO  * by the platform driver to stop the network queue, release the resources,
4561732fdf0eSGiuseppe CAVALLARO  * program the PMT register (for WoL), clean and release driver resources.
4562732fdf0eSGiuseppe CAVALLARO  */
4563f4e7bd81SJoachim Eastwood int stmmac_suspend(struct device *dev)
45647ac6653aSJeff Kirsher {
4565f4e7bd81SJoachim Eastwood 	struct net_device *ndev = dev_get_drvdata(dev);
45667ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(ndev);
45677ac6653aSJeff Kirsher 
45687ac6653aSJeff Kirsher 	if (!ndev || !netif_running(ndev))
45697ac6653aSJeff Kirsher 		return 0;
45707ac6653aSJeff Kirsher 
4571d6d50c7eSPhilippe Reynes 	if (ndev->phydev)
4572d6d50c7eSPhilippe Reynes 		phy_stop(ndev->phydev);
4573102463b1SFrancesco Virlinzi 
457429555fa3SThierry Reding 	mutex_lock(&priv->lock);
45757ac6653aSJeff Kirsher 
45767ac6653aSJeff Kirsher 	netif_device_detach(ndev);
4577c22a3f48SJoao Pinto 	stmmac_stop_all_queues(priv);
45787ac6653aSJeff Kirsher 
4579c22a3f48SJoao Pinto 	stmmac_disable_all_queues(priv);
45807ac6653aSJeff Kirsher 
45817ac6653aSJeff Kirsher 	/* Stop TX/RX DMA */
4582ae4f0d46SJoao Pinto 	stmmac_stop_all_dma(priv);
4583c24602efSGiuseppe CAVALLARO 
45847ac6653aSJeff Kirsher 	/* Enable Power down mode by programming the PMT regs */
458589f7f2cfSSrinivas Kandagatla 	if (device_may_wakeup(priv->device)) {
4586c10d4c82SJose Abreu 		stmmac_pmt(priv, priv->hw, priv->wolopts);
458789f7f2cfSSrinivas Kandagatla 		priv->irq_wake = 1;
458889f7f2cfSSrinivas Kandagatla 	} else {
4589c10d4c82SJose Abreu 		stmmac_mac_set(priv, priv->ioaddr, false);
4590db88f10aSSrinivas Kandagatla 		pinctrl_pm_select_sleep_state(priv->device);
4591ba1377ffSGiuseppe CAVALLARO 		/* Disable clock in case of PWM is off */
4592f573c0b9Sjpinto 		clk_disable(priv->plat->pclk);
4593f573c0b9Sjpinto 		clk_disable(priv->plat->stmmac_clk);
4594ba1377ffSGiuseppe CAVALLARO 	}
459529555fa3SThierry Reding 	mutex_unlock(&priv->lock);
45962d871aa0SVince Bridgers 
45974d869b03SLABBE Corentin 	priv->oldlink = false;
4598bd00632cSLABBE Corentin 	priv->speed = SPEED_UNKNOWN;
4599bd00632cSLABBE Corentin 	priv->oldduplex = DUPLEX_UNKNOWN;
46007ac6653aSJeff Kirsher 	return 0;
46017ac6653aSJeff Kirsher }
4602b2e2f0c7SAndy Shevchenko EXPORT_SYMBOL_GPL(stmmac_suspend);
46037ac6653aSJeff Kirsher 
4604732fdf0eSGiuseppe CAVALLARO /**
460554139cf3SJoao Pinto  * stmmac_reset_queues_param - reset queue parameters
460654139cf3SJoao Pinto  * @dev: device pointer
460754139cf3SJoao Pinto  */
460854139cf3SJoao Pinto static void stmmac_reset_queues_param(struct stmmac_priv *priv)
460954139cf3SJoao Pinto {
461054139cf3SJoao Pinto 	u32 rx_cnt = priv->plat->rx_queues_to_use;
4611ce736788SJoao Pinto 	u32 tx_cnt = priv->plat->tx_queues_to_use;
461254139cf3SJoao Pinto 	u32 queue;
461354139cf3SJoao Pinto 
461454139cf3SJoao Pinto 	for (queue = 0; queue < rx_cnt; queue++) {
461554139cf3SJoao Pinto 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
461654139cf3SJoao Pinto 
461754139cf3SJoao Pinto 		rx_q->cur_rx = 0;
461854139cf3SJoao Pinto 		rx_q->dirty_rx = 0;
461954139cf3SJoao Pinto 	}
462054139cf3SJoao Pinto 
4621ce736788SJoao Pinto 	for (queue = 0; queue < tx_cnt; queue++) {
4622ce736788SJoao Pinto 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
4623ce736788SJoao Pinto 
4624ce736788SJoao Pinto 		tx_q->cur_tx = 0;
4625ce736788SJoao Pinto 		tx_q->dirty_tx = 0;
46268d212a9eSNiklas Cassel 		tx_q->mss = 0;
4627ce736788SJoao Pinto 	}
462854139cf3SJoao Pinto }
462954139cf3SJoao Pinto 
463054139cf3SJoao Pinto /**
4631732fdf0eSGiuseppe CAVALLARO  * stmmac_resume - resume callback
4632f4e7bd81SJoachim Eastwood  * @dev: device pointer
4633732fdf0eSGiuseppe CAVALLARO  * Description: when resume this function is invoked to setup the DMA and CORE
4634732fdf0eSGiuseppe CAVALLARO  * in a usable state.
4635732fdf0eSGiuseppe CAVALLARO  */
4636f4e7bd81SJoachim Eastwood int stmmac_resume(struct device *dev)
46377ac6653aSJeff Kirsher {
4638f4e7bd81SJoachim Eastwood 	struct net_device *ndev = dev_get_drvdata(dev);
46397ac6653aSJeff Kirsher 	struct stmmac_priv *priv = netdev_priv(ndev);
46407ac6653aSJeff Kirsher 
46417ac6653aSJeff Kirsher 	if (!netif_running(ndev))
46427ac6653aSJeff Kirsher 		return 0;
46437ac6653aSJeff Kirsher 
46447ac6653aSJeff Kirsher 	/* Power Down bit, into the PM register, is cleared
46457ac6653aSJeff Kirsher 	 * automatically as soon as a magic packet or a Wake-up frame
46467ac6653aSJeff Kirsher 	 * is received. Anyway, it's better to manually clear
46477ac6653aSJeff Kirsher 	 * this bit because it can generate problems while resuming
4648ceb69499SGiuseppe CAVALLARO 	 * from another devices (e.g. serial console).
4649ceb69499SGiuseppe CAVALLARO 	 */
4650623997fbSSrinivas Kandagatla 	if (device_may_wakeup(priv->device)) {
465129555fa3SThierry Reding 		mutex_lock(&priv->lock);
4652c10d4c82SJose Abreu 		stmmac_pmt(priv, priv->hw, 0);
465329555fa3SThierry Reding 		mutex_unlock(&priv->lock);
465489f7f2cfSSrinivas Kandagatla 		priv->irq_wake = 0;
4655623997fbSSrinivas Kandagatla 	} else {
4656db88f10aSSrinivas Kandagatla 		pinctrl_pm_select_default_state(priv->device);
46578d45e42bSLABBE Corentin 		/* enable the clk previously disabled */
4658f573c0b9Sjpinto 		clk_enable(priv->plat->stmmac_clk);
4659f573c0b9Sjpinto 		clk_enable(priv->plat->pclk);
4660623997fbSSrinivas Kandagatla 		/* reset the phy so that it's ready */
4661623997fbSSrinivas Kandagatla 		if (priv->mii)
4662623997fbSSrinivas Kandagatla 			stmmac_mdio_reset(priv->mii);
4663623997fbSSrinivas Kandagatla 	}
46647ac6653aSJeff Kirsher 
46657ac6653aSJeff Kirsher 	netif_device_attach(ndev);
46667ac6653aSJeff Kirsher 
466729555fa3SThierry Reding 	mutex_lock(&priv->lock);
4668f55d84b0SVincent Palatin 
466954139cf3SJoao Pinto 	stmmac_reset_queues_param(priv);
467054139cf3SJoao Pinto 
4671ae79a639SGiuseppe CAVALLARO 	stmmac_clear_descriptors(priv);
4672ae79a639SGiuseppe CAVALLARO 
4673fe131929SHuacai Chen 	stmmac_hw_setup(ndev, false);
4674777da230SGiuseppe CAVALLARO 	stmmac_init_tx_coalesce(priv);
4675ac316c78SGiuseppe CAVALLARO 	stmmac_set_rx_mode(ndev);
46767ac6653aSJeff Kirsher 
4677c22a3f48SJoao Pinto 	stmmac_enable_all_queues(priv);
46787ac6653aSJeff Kirsher 
4679c22a3f48SJoao Pinto 	stmmac_start_all_queues(priv);
46807ac6653aSJeff Kirsher 
468129555fa3SThierry Reding 	mutex_unlock(&priv->lock);
4682102463b1SFrancesco Virlinzi 
4683d6d50c7eSPhilippe Reynes 	if (ndev->phydev)
4684d6d50c7eSPhilippe Reynes 		phy_start(ndev->phydev);
4685102463b1SFrancesco Virlinzi 
46867ac6653aSJeff Kirsher 	return 0;
46877ac6653aSJeff Kirsher }
4688b2e2f0c7SAndy Shevchenko EXPORT_SYMBOL_GPL(stmmac_resume);
4689ba27ec66SGiuseppe CAVALLARO 
46907ac6653aSJeff Kirsher #ifndef MODULE
46917ac6653aSJeff Kirsher static int __init stmmac_cmdline_opt(char *str)
46927ac6653aSJeff Kirsher {
46937ac6653aSJeff Kirsher 	char *opt;
46947ac6653aSJeff Kirsher 
46957ac6653aSJeff Kirsher 	if (!str || !*str)
46967ac6653aSJeff Kirsher 		return -EINVAL;
46977ac6653aSJeff Kirsher 	while ((opt = strsep(&str, ",")) != NULL) {
46987ac6653aSJeff Kirsher 		if (!strncmp(opt, "debug:", 6)) {
4699ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 6, 0, &debug))
47007ac6653aSJeff Kirsher 				goto err;
47017ac6653aSJeff Kirsher 		} else if (!strncmp(opt, "phyaddr:", 8)) {
4702ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 8, 0, &phyaddr))
47037ac6653aSJeff Kirsher 				goto err;
47047ac6653aSJeff Kirsher 		} else if (!strncmp(opt, "buf_sz:", 7)) {
4705ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 7, 0, &buf_sz))
47067ac6653aSJeff Kirsher 				goto err;
47077ac6653aSJeff Kirsher 		} else if (!strncmp(opt, "tc:", 3)) {
4708ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 3, 0, &tc))
47097ac6653aSJeff Kirsher 				goto err;
47107ac6653aSJeff Kirsher 		} else if (!strncmp(opt, "watchdog:", 9)) {
4711ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 9, 0, &watchdog))
47127ac6653aSJeff Kirsher 				goto err;
47137ac6653aSJeff Kirsher 		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
4714ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 10, 0, &flow_ctrl))
47157ac6653aSJeff Kirsher 				goto err;
47167ac6653aSJeff Kirsher 		} else if (!strncmp(opt, "pause:", 6)) {
4717ea2ab871SGiuseppe CAVALLARO 			if (kstrtoint(opt + 6, 0, &pause))
47187ac6653aSJeff Kirsher 				goto err;
4719506f669cSGiuseppe CAVALLARO 		} else if (!strncmp(opt, "eee_timer:", 10)) {
4720d765955dSGiuseppe CAVALLARO 			if (kstrtoint(opt + 10, 0, &eee_timer))
4721d765955dSGiuseppe CAVALLARO 				goto err;
47224a7d666aSGiuseppe CAVALLARO 		} else if (!strncmp(opt, "chain_mode:", 11)) {
47234a7d666aSGiuseppe CAVALLARO 			if (kstrtoint(opt + 11, 0, &chain_mode))
47244a7d666aSGiuseppe CAVALLARO 				goto err;
47257ac6653aSJeff Kirsher 		}
47267ac6653aSJeff Kirsher 	}
47277ac6653aSJeff Kirsher 	return 0;
47287ac6653aSJeff Kirsher 
47297ac6653aSJeff Kirsher err:
47307ac6653aSJeff Kirsher 	pr_err("%s: ERROR broken module parameter conversion", __func__);
47317ac6653aSJeff Kirsher 	return -EINVAL;
47327ac6653aSJeff Kirsher }
47337ac6653aSJeff Kirsher 
47347ac6653aSJeff Kirsher __setup("stmmaceth=", stmmac_cmdline_opt);
4735ceb69499SGiuseppe CAVALLARO #endif /* MODULE */
47366fc0d0f2SGiuseppe Cavallaro 
4737466c5ac8SMathieu Olivari static int __init stmmac_init(void)
4738466c5ac8SMathieu Olivari {
4739466c5ac8SMathieu Olivari #ifdef CONFIG_DEBUG_FS
4740466c5ac8SMathieu Olivari 	/* Create debugfs main directory if it doesn't exist yet */
4741466c5ac8SMathieu Olivari 	if (!stmmac_fs_dir) {
4742466c5ac8SMathieu Olivari 		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
4743466c5ac8SMathieu Olivari 
4744466c5ac8SMathieu Olivari 		if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
4745466c5ac8SMathieu Olivari 			pr_err("ERROR %s, debugfs create directory failed\n",
4746466c5ac8SMathieu Olivari 			       STMMAC_RESOURCE_NAME);
4747466c5ac8SMathieu Olivari 
4748466c5ac8SMathieu Olivari 			return -ENOMEM;
4749466c5ac8SMathieu Olivari 		}
4750466c5ac8SMathieu Olivari 	}
4751466c5ac8SMathieu Olivari #endif
4752466c5ac8SMathieu Olivari 
4753466c5ac8SMathieu Olivari 	return 0;
4754466c5ac8SMathieu Olivari }
4755466c5ac8SMathieu Olivari 
4756466c5ac8SMathieu Olivari static void __exit stmmac_exit(void)
4757466c5ac8SMathieu Olivari {
4758466c5ac8SMathieu Olivari #ifdef CONFIG_DEBUG_FS
4759466c5ac8SMathieu Olivari 	debugfs_remove_recursive(stmmac_fs_dir);
4760466c5ac8SMathieu Olivari #endif
4761466c5ac8SMathieu Olivari }
4762466c5ac8SMathieu Olivari 
4763466c5ac8SMathieu Olivari module_init(stmmac_init)
4764466c5ac8SMathieu Olivari module_exit(stmmac_exit)
4765466c5ac8SMathieu Olivari 
47666fc0d0f2SGiuseppe Cavallaro MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
47676fc0d0f2SGiuseppe Cavallaro MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
47686fc0d0f2SGiuseppe Cavallaro MODULE_LICENSE("GPL");
4769